Bug Summary

File:llvm/lib/Analysis/ScalarEvolution.cpp
Warning:line 10457, column 35
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name ScalarEvolution.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/build-llvm/lib/Analysis -resource-dir /usr/lib/llvm-14/lib/clang/14.0.0 -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/build-llvm/lib/Analysis -I /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/llvm/lib/Analysis -I /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/build-llvm/include -I /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/llvm/include -D NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-14/lib/clang/14.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/build-llvm/lib/Analysis -fdebug-prefix-map=/build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e=. -ferror-limit 19 -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2021-09-04-040900-46481-1 -x c++ /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/llvm/lib/Analysis/ScalarEvolution.cpp

/build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/llvm/lib/Analysis/ScalarEvolution.cpp

1//===- ScalarEvolution.cpp - Scalar Evolution Analysis --------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the implementation of the scalar evolution analysis
10// engine, which is used primarily to analyze expressions involving induction
11// variables in loops.
12//
13// There are several aspects to this library. First is the representation of
14// scalar expressions, which are represented as subclasses of the SCEV class.
15// These classes are used to represent certain types of subexpressions that we
16// can handle. We only create one SCEV of a particular shape, so
17// pointer-comparisons for equality are legal.
18//
19// One important aspect of the SCEV objects is that they are never cyclic, even
20// if there is a cycle in the dataflow for an expression (ie, a PHI node). If
21// the PHI node is one of the idioms that we can represent (e.g., a polynomial
22// recurrence) then we represent it directly as a recurrence node, otherwise we
23// represent it as a SCEVUnknown node.
24//
25// In addition to being able to represent expressions of various types, we also
26// have folders that are used to build the *canonical* representation for a
27// particular expression. These folders are capable of using a variety of
28// rewrite rules to simplify the expressions.
29//
30// Once the folders are defined, we can implement the more interesting
31// higher-level code, such as the code that recognizes PHI nodes of various
32// types, computes the execution count of a loop, etc.
33//
34// TODO: We should use these routines and value representations to implement
35// dependence analysis!
36//
37//===----------------------------------------------------------------------===//
38//
39// There are several good references for the techniques used in this analysis.
40//
41// Chains of recurrences -- a method to expedite the evaluation
42// of closed-form functions
43// Olaf Bachmann, Paul S. Wang, Eugene V. Zima
44//
45// On computational properties of chains of recurrences
46// Eugene V. Zima
47//
48// Symbolic Evaluation of Chains of Recurrences for Loop Optimization
49// Robert A. van Engelen
50//
51// Efficient Symbolic Analysis for Optimizing Compilers
52// Robert A. van Engelen
53//
54// Using the chains of recurrences algebra for data dependence testing and
55// induction variable substitution
56// MS Thesis, Johnie Birch
57//
58//===----------------------------------------------------------------------===//
59
60#include "llvm/Analysis/ScalarEvolution.h"
61#include "llvm/ADT/APInt.h"
62#include "llvm/ADT/ArrayRef.h"
63#include "llvm/ADT/DenseMap.h"
64#include "llvm/ADT/DepthFirstIterator.h"
65#include "llvm/ADT/EquivalenceClasses.h"
66#include "llvm/ADT/FoldingSet.h"
67#include "llvm/ADT/None.h"
68#include "llvm/ADT/Optional.h"
69#include "llvm/ADT/STLExtras.h"
70#include "llvm/ADT/ScopeExit.h"
71#include "llvm/ADT/Sequence.h"
72#include "llvm/ADT/SetVector.h"
73#include "llvm/ADT/SmallPtrSet.h"
74#include "llvm/ADT/SmallSet.h"
75#include "llvm/ADT/SmallVector.h"
76#include "llvm/ADT/Statistic.h"
77#include "llvm/ADT/StringRef.h"
78#include "llvm/Analysis/AssumptionCache.h"
79#include "llvm/Analysis/ConstantFolding.h"
80#include "llvm/Analysis/InstructionSimplify.h"
81#include "llvm/Analysis/LoopInfo.h"
82#include "llvm/Analysis/ScalarEvolutionDivision.h"
83#include "llvm/Analysis/ScalarEvolutionExpressions.h"
84#include "llvm/Analysis/TargetLibraryInfo.h"
85#include "llvm/Analysis/ValueTracking.h"
86#include "llvm/Config/llvm-config.h"
87#include "llvm/IR/Argument.h"
88#include "llvm/IR/BasicBlock.h"
89#include "llvm/IR/CFG.h"
90#include "llvm/IR/Constant.h"
91#include "llvm/IR/ConstantRange.h"
92#include "llvm/IR/Constants.h"
93#include "llvm/IR/DataLayout.h"
94#include "llvm/IR/DerivedTypes.h"
95#include "llvm/IR/Dominators.h"
96#include "llvm/IR/Function.h"
97#include "llvm/IR/GlobalAlias.h"
98#include "llvm/IR/GlobalValue.h"
99#include "llvm/IR/GlobalVariable.h"
100#include "llvm/IR/InstIterator.h"
101#include "llvm/IR/InstrTypes.h"
102#include "llvm/IR/Instruction.h"
103#include "llvm/IR/Instructions.h"
104#include "llvm/IR/IntrinsicInst.h"
105#include "llvm/IR/Intrinsics.h"
106#include "llvm/IR/LLVMContext.h"
107#include "llvm/IR/Metadata.h"
108#include "llvm/IR/Operator.h"
109#include "llvm/IR/PatternMatch.h"
110#include "llvm/IR/Type.h"
111#include "llvm/IR/Use.h"
112#include "llvm/IR/User.h"
113#include "llvm/IR/Value.h"
114#include "llvm/IR/Verifier.h"
115#include "llvm/InitializePasses.h"
116#include "llvm/Pass.h"
117#include "llvm/Support/Casting.h"
118#include "llvm/Support/CommandLine.h"
119#include "llvm/Support/Compiler.h"
120#include "llvm/Support/Debug.h"
121#include "llvm/Support/ErrorHandling.h"
122#include "llvm/Support/KnownBits.h"
123#include "llvm/Support/SaveAndRestore.h"
124#include "llvm/Support/raw_ostream.h"
125#include <algorithm>
126#include <cassert>
127#include <climits>
128#include <cstddef>
129#include <cstdint>
130#include <cstdlib>
131#include <map>
132#include <memory>
133#include <tuple>
134#include <utility>
135#include <vector>
136
137using namespace llvm;
138using namespace PatternMatch;
139
140#define DEBUG_TYPE"scalar-evolution" "scalar-evolution"
141
142STATISTIC(NumArrayLenItCounts,static llvm::Statistic NumArrayLenItCounts = {"scalar-evolution"
, "NumArrayLenItCounts", "Number of trip counts computed with array length"
}
143 "Number of trip counts computed with array length")static llvm::Statistic NumArrayLenItCounts = {"scalar-evolution"
, "NumArrayLenItCounts", "Number of trip counts computed with array length"
}
;
144STATISTIC(NumTripCountsComputed,static llvm::Statistic NumTripCountsComputed = {"scalar-evolution"
, "NumTripCountsComputed", "Number of loops with predictable loop counts"
}
145 "Number of loops with predictable loop counts")static llvm::Statistic NumTripCountsComputed = {"scalar-evolution"
, "NumTripCountsComputed", "Number of loops with predictable loop counts"
}
;
146STATISTIC(NumTripCountsNotComputed,static llvm::Statistic NumTripCountsNotComputed = {"scalar-evolution"
, "NumTripCountsNotComputed", "Number of loops without predictable loop counts"
}
147 "Number of loops without predictable loop counts")static llvm::Statistic NumTripCountsNotComputed = {"scalar-evolution"
, "NumTripCountsNotComputed", "Number of loops without predictable loop counts"
}
;
148STATISTIC(NumBruteForceTripCountsComputed,static llvm::Statistic NumBruteForceTripCountsComputed = {"scalar-evolution"
, "NumBruteForceTripCountsComputed", "Number of loops with trip counts computed by force"
}
149 "Number of loops with trip counts computed by force")static llvm::Statistic NumBruteForceTripCountsComputed = {"scalar-evolution"
, "NumBruteForceTripCountsComputed", "Number of loops with trip counts computed by force"
}
;
150
151static cl::opt<unsigned>
152MaxBruteForceIterations("scalar-evolution-max-iterations", cl::ReallyHidden,
153 cl::ZeroOrMore,
154 cl::desc("Maximum number of iterations SCEV will "
155 "symbolically execute a constant "
156 "derived loop"),
157 cl::init(100));
158
159// FIXME: Enable this with EXPENSIVE_CHECKS when the test suite is clean.
160static cl::opt<bool> VerifySCEV(
161 "verify-scev", cl::Hidden,
162 cl::desc("Verify ScalarEvolution's backedge taken counts (slow)"));
163static cl::opt<bool> VerifySCEVStrict(
164 "verify-scev-strict", cl::Hidden,
165 cl::desc("Enable stricter verification with -verify-scev is passed"));
166static cl::opt<bool>
167 VerifySCEVMap("verify-scev-maps", cl::Hidden,
168 cl::desc("Verify no dangling value in ScalarEvolution's "
169 "ExprValueMap (slow)"));
170
171static cl::opt<bool> VerifyIR(
172 "scev-verify-ir", cl::Hidden,
173 cl::desc("Verify IR correctness when making sensitive SCEV queries (slow)"),
174 cl::init(false));
175
176static cl::opt<unsigned> MulOpsInlineThreshold(
177 "scev-mulops-inline-threshold", cl::Hidden,
178 cl::desc("Threshold for inlining multiplication operands into a SCEV"),
179 cl::init(32));
180
181static cl::opt<unsigned> AddOpsInlineThreshold(
182 "scev-addops-inline-threshold", cl::Hidden,
183 cl::desc("Threshold for inlining addition operands into a SCEV"),
184 cl::init(500));
185
186static cl::opt<unsigned> MaxSCEVCompareDepth(
187 "scalar-evolution-max-scev-compare-depth", cl::Hidden,
188 cl::desc("Maximum depth of recursive SCEV complexity comparisons"),
189 cl::init(32));
190
191static cl::opt<unsigned> MaxSCEVOperationsImplicationDepth(
192 "scalar-evolution-max-scev-operations-implication-depth", cl::Hidden,
193 cl::desc("Maximum depth of recursive SCEV operations implication analysis"),
194 cl::init(2));
195
196static cl::opt<unsigned> MaxValueCompareDepth(
197 "scalar-evolution-max-value-compare-depth", cl::Hidden,
198 cl::desc("Maximum depth of recursive value complexity comparisons"),
199 cl::init(2));
200
201static cl::opt<unsigned>
202 MaxArithDepth("scalar-evolution-max-arith-depth", cl::Hidden,
203 cl::desc("Maximum depth of recursive arithmetics"),
204 cl::init(32));
205
206static cl::opt<unsigned> MaxConstantEvolvingDepth(
207 "scalar-evolution-max-constant-evolving-depth", cl::Hidden,
208 cl::desc("Maximum depth of recursive constant evolving"), cl::init(32));
209
210static cl::opt<unsigned>
211 MaxCastDepth("scalar-evolution-max-cast-depth", cl::Hidden,
212 cl::desc("Maximum depth of recursive SExt/ZExt/Trunc"),
213 cl::init(8));
214
215static cl::opt<unsigned>
216 MaxAddRecSize("scalar-evolution-max-add-rec-size", cl::Hidden,
217 cl::desc("Max coefficients in AddRec during evolving"),
218 cl::init(8));
219
220static cl::opt<unsigned>
221 HugeExprThreshold("scalar-evolution-huge-expr-threshold", cl::Hidden,
222 cl::desc("Size of the expression which is considered huge"),
223 cl::init(4096));
224
225static cl::opt<bool>
226ClassifyExpressions("scalar-evolution-classify-expressions",
227 cl::Hidden, cl::init(true),
228 cl::desc("When printing analysis, include information on every instruction"));
229
230static cl::opt<bool> UseExpensiveRangeSharpening(
231 "scalar-evolution-use-expensive-range-sharpening", cl::Hidden,
232 cl::init(false),
233 cl::desc("Use more powerful methods of sharpening expression ranges. May "
234 "be costly in terms of compile time"));
235
236//===----------------------------------------------------------------------===//
237// SCEV class definitions
238//===----------------------------------------------------------------------===//
239
240//===----------------------------------------------------------------------===//
241// Implementation of the SCEV class.
242//
243
244#if !defined(NDEBUG1) || defined(LLVM_ENABLE_DUMP)
245LLVM_DUMP_METHOD__attribute__((noinline)) __attribute__((__used__)) void SCEV::dump() const {
246 print(dbgs());
247 dbgs() << '\n';
248}
249#endif
250
251void SCEV::print(raw_ostream &OS) const {
252 switch (getSCEVType()) {
253 case scConstant:
254 cast<SCEVConstant>(this)->getValue()->printAsOperand(OS, false);
255 return;
256 case scPtrToInt: {
257 const SCEVPtrToIntExpr *PtrToInt = cast<SCEVPtrToIntExpr>(this);
258 const SCEV *Op = PtrToInt->getOperand();
259 OS << "(ptrtoint " << *Op->getType() << " " << *Op << " to "
260 << *PtrToInt->getType() << ")";
261 return;
262 }
263 case scTruncate: {
264 const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(this);
265 const SCEV *Op = Trunc->getOperand();
266 OS << "(trunc " << *Op->getType() << " " << *Op << " to "
267 << *Trunc->getType() << ")";
268 return;
269 }
270 case scZeroExtend: {
271 const SCEVZeroExtendExpr *ZExt = cast<SCEVZeroExtendExpr>(this);
272 const SCEV *Op = ZExt->getOperand();
273 OS << "(zext " << *Op->getType() << " " << *Op << " to "
274 << *ZExt->getType() << ")";
275 return;
276 }
277 case scSignExtend: {
278 const SCEVSignExtendExpr *SExt = cast<SCEVSignExtendExpr>(this);
279 const SCEV *Op = SExt->getOperand();
280 OS << "(sext " << *Op->getType() << " " << *Op << " to "
281 << *SExt->getType() << ")";
282 return;
283 }
284 case scAddRecExpr: {
285 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(this);
286 OS << "{" << *AR->getOperand(0);
287 for (unsigned i = 1, e = AR->getNumOperands(); i != e; ++i)
288 OS << ",+," << *AR->getOperand(i);
289 OS << "}<";
290 if (AR->hasNoUnsignedWrap())
291 OS << "nuw><";
292 if (AR->hasNoSignedWrap())
293 OS << "nsw><";
294 if (AR->hasNoSelfWrap() &&
295 !AR->getNoWrapFlags((NoWrapFlags)(FlagNUW | FlagNSW)))
296 OS << "nw><";
297 AR->getLoop()->getHeader()->printAsOperand(OS, /*PrintType=*/false);
298 OS << ">";
299 return;
300 }
301 case scAddExpr:
302 case scMulExpr:
303 case scUMaxExpr:
304 case scSMaxExpr:
305 case scUMinExpr:
306 case scSMinExpr: {
307 const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(this);
308 const char *OpStr = nullptr;
309 switch (NAry->getSCEVType()) {
310 case scAddExpr: OpStr = " + "; break;
311 case scMulExpr: OpStr = " * "; break;
312 case scUMaxExpr: OpStr = " umax "; break;
313 case scSMaxExpr: OpStr = " smax "; break;
314 case scUMinExpr:
315 OpStr = " umin ";
316 break;
317 case scSMinExpr:
318 OpStr = " smin ";
319 break;
320 default:
321 llvm_unreachable("There are no other nary expression types.")__builtin_unreachable();
322 }
323 OS << "(";
324 ListSeparator LS(OpStr);
325 for (const SCEV *Op : NAry->operands())
326 OS << LS << *Op;
327 OS << ")";
328 switch (NAry->getSCEVType()) {
329 case scAddExpr:
330 case scMulExpr:
331 if (NAry->hasNoUnsignedWrap())
332 OS << "<nuw>";
333 if (NAry->hasNoSignedWrap())
334 OS << "<nsw>";
335 break;
336 default:
337 // Nothing to print for other nary expressions.
338 break;
339 }
340 return;
341 }
342 case scUDivExpr: {
343 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(this);
344 OS << "(" << *UDiv->getLHS() << " /u " << *UDiv->getRHS() << ")";
345 return;
346 }
347 case scUnknown: {
348 const SCEVUnknown *U = cast<SCEVUnknown>(this);
349 Type *AllocTy;
350 if (U->isSizeOf(AllocTy)) {
351 OS << "sizeof(" << *AllocTy << ")";
352 return;
353 }
354 if (U->isAlignOf(AllocTy)) {
355 OS << "alignof(" << *AllocTy << ")";
356 return;
357 }
358
359 Type *CTy;
360 Constant *FieldNo;
361 if (U->isOffsetOf(CTy, FieldNo)) {
362 OS << "offsetof(" << *CTy << ", ";
363 FieldNo->printAsOperand(OS, false);
364 OS << ")";
365 return;
366 }
367
368 // Otherwise just print it normally.
369 U->getValue()->printAsOperand(OS, false);
370 return;
371 }
372 case scCouldNotCompute:
373 OS << "***COULDNOTCOMPUTE***";
374 return;
375 }
376 llvm_unreachable("Unknown SCEV kind!")__builtin_unreachable();
377}
378
379Type *SCEV::getType() const {
380 switch (getSCEVType()) {
381 case scConstant:
382 return cast<SCEVConstant>(this)->getType();
383 case scPtrToInt:
384 case scTruncate:
385 case scZeroExtend:
386 case scSignExtend:
387 return cast<SCEVCastExpr>(this)->getType();
388 case scAddRecExpr:
389 return cast<SCEVAddRecExpr>(this)->getType();
390 case scMulExpr:
391 return cast<SCEVMulExpr>(this)->getType();
392 case scUMaxExpr:
393 case scSMaxExpr:
394 case scUMinExpr:
395 case scSMinExpr:
396 return cast<SCEVMinMaxExpr>(this)->getType();
397 case scAddExpr:
398 return cast<SCEVAddExpr>(this)->getType();
399 case scUDivExpr:
400 return cast<SCEVUDivExpr>(this)->getType();
401 case scUnknown:
402 return cast<SCEVUnknown>(this)->getType();
403 case scCouldNotCompute:
404 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!")__builtin_unreachable();
405 }
406 llvm_unreachable("Unknown SCEV kind!")__builtin_unreachable();
407}
408
409bool SCEV::isZero() const {
410 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
411 return SC->getValue()->isZero();
412 return false;
413}
414
415bool SCEV::isOne() const {
416 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
417 return SC->getValue()->isOne();
418 return false;
419}
420
421bool SCEV::isAllOnesValue() const {
422 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
423 return SC->getValue()->isMinusOne();
424 return false;
425}
426
427bool SCEV::isNonConstantNegative() const {
428 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(this);
429 if (!Mul) return false;
430
431 // If there is a constant factor, it will be first.
432 const SCEVConstant *SC = dyn_cast<SCEVConstant>(Mul->getOperand(0));
433 if (!SC) return false;
434
435 // Return true if the value is negative, this matches things like (-42 * V).
436 return SC->getAPInt().isNegative();
437}
438
439SCEVCouldNotCompute::SCEVCouldNotCompute() :
440 SCEV(FoldingSetNodeIDRef(), scCouldNotCompute, 0) {}
441
442bool SCEVCouldNotCompute::classof(const SCEV *S) {
443 return S->getSCEVType() == scCouldNotCompute;
444}
445
446const SCEV *ScalarEvolution::getConstant(ConstantInt *V) {
447 FoldingSetNodeID ID;
448 ID.AddInteger(scConstant);
449 ID.AddPointer(V);
450 void *IP = nullptr;
451 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
452 SCEV *S = new (SCEVAllocator) SCEVConstant(ID.Intern(SCEVAllocator), V);
453 UniqueSCEVs.InsertNode(S, IP);
454 return S;
455}
456
457const SCEV *ScalarEvolution::getConstant(const APInt &Val) {
458 return getConstant(ConstantInt::get(getContext(), Val));
459}
460
461const SCEV *
462ScalarEvolution::getConstant(Type *Ty, uint64_t V, bool isSigned) {
463 IntegerType *ITy = cast<IntegerType>(getEffectiveSCEVType(Ty));
464 return getConstant(ConstantInt::get(ITy, V, isSigned));
465}
466
467SCEVCastExpr::SCEVCastExpr(const FoldingSetNodeIDRef ID, SCEVTypes SCEVTy,
468 const SCEV *op, Type *ty)
469 : SCEV(ID, SCEVTy, computeExpressionSize(op)), Ty(ty) {
470 Operands[0] = op;
471}
472
473SCEVPtrToIntExpr::SCEVPtrToIntExpr(const FoldingSetNodeIDRef ID, const SCEV *Op,
474 Type *ITy)
475 : SCEVCastExpr(ID, scPtrToInt, Op, ITy) {
476 assert(getOperand()->getType()->isPointerTy() && Ty->isIntegerTy() &&(static_cast<void> (0))
477 "Must be a non-bit-width-changing pointer-to-integer cast!")(static_cast<void> (0));
478}
479
480SCEVIntegralCastExpr::SCEVIntegralCastExpr(const FoldingSetNodeIDRef ID,
481 SCEVTypes SCEVTy, const SCEV *op,
482 Type *ty)
483 : SCEVCastExpr(ID, SCEVTy, op, ty) {}
484
485SCEVTruncateExpr::SCEVTruncateExpr(const FoldingSetNodeIDRef ID, const SCEV *op,
486 Type *ty)
487 : SCEVIntegralCastExpr(ID, scTruncate, op, ty) {
488 assert(getOperand()->getType()->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&(static_cast<void> (0))
489 "Cannot truncate non-integer value!")(static_cast<void> (0));
490}
491
492SCEVZeroExtendExpr::SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID,
493 const SCEV *op, Type *ty)
494 : SCEVIntegralCastExpr(ID, scZeroExtend, op, ty) {
495 assert(getOperand()->getType()->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&(static_cast<void> (0))
496 "Cannot zero extend non-integer value!")(static_cast<void> (0));
497}
498
499SCEVSignExtendExpr::SCEVSignExtendExpr(const FoldingSetNodeIDRef ID,
500 const SCEV *op, Type *ty)
501 : SCEVIntegralCastExpr(ID, scSignExtend, op, ty) {
502 assert(getOperand()->getType()->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&(static_cast<void> (0))
503 "Cannot sign extend non-integer value!")(static_cast<void> (0));
504}
505
506void SCEVUnknown::deleted() {
507 // Clear this SCEVUnknown from various maps.
508 SE->forgetMemoizedResults(this);
509
510 // Remove this SCEVUnknown from the uniquing map.
511 SE->UniqueSCEVs.RemoveNode(this);
512
513 // Release the value.
514 setValPtr(nullptr);
515}
516
517void SCEVUnknown::allUsesReplacedWith(Value *New) {
518 // Remove this SCEVUnknown from the uniquing map.
519 SE->UniqueSCEVs.RemoveNode(this);
520
521 // Update this SCEVUnknown to point to the new value. This is needed
522 // because there may still be outstanding SCEVs which still point to
523 // this SCEVUnknown.
524 setValPtr(New);
525}
526
527bool SCEVUnknown::isSizeOf(Type *&AllocTy) const {
528 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue()))
529 if (VCE->getOpcode() == Instruction::PtrToInt)
530 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
531 if (CE->getOpcode() == Instruction::GetElementPtr &&
532 CE->getOperand(0)->isNullValue() &&
533 CE->getNumOperands() == 2)
534 if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(1)))
535 if (CI->isOne()) {
536 AllocTy = cast<GEPOperator>(CE)->getSourceElementType();
537 return true;
538 }
539
540 return false;
541}
542
543bool SCEVUnknown::isAlignOf(Type *&AllocTy) const {
544 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue()))
545 if (VCE->getOpcode() == Instruction::PtrToInt)
546 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
547 if (CE->getOpcode() == Instruction::GetElementPtr &&
548 CE->getOperand(0)->isNullValue()) {
549 Type *Ty = cast<GEPOperator>(CE)->getSourceElementType();
550 if (StructType *STy = dyn_cast<StructType>(Ty))
551 if (!STy->isPacked() &&
552 CE->getNumOperands() == 3 &&
553 CE->getOperand(1)->isNullValue()) {
554 if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(2)))
555 if (CI->isOne() &&
556 STy->getNumElements() == 2 &&
557 STy->getElementType(0)->isIntegerTy(1)) {
558 AllocTy = STy->getElementType(1);
559 return true;
560 }
561 }
562 }
563
564 return false;
565}
566
567bool SCEVUnknown::isOffsetOf(Type *&CTy, Constant *&FieldNo) const {
568 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue()))
569 if (VCE->getOpcode() == Instruction::PtrToInt)
570 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
571 if (CE->getOpcode() == Instruction::GetElementPtr &&
572 CE->getNumOperands() == 3 &&
573 CE->getOperand(0)->isNullValue() &&
574 CE->getOperand(1)->isNullValue()) {
575 Type *Ty = cast<GEPOperator>(CE)->getSourceElementType();
576 // Ignore vector types here so that ScalarEvolutionExpander doesn't
577 // emit getelementptrs that index into vectors.
578 if (Ty->isStructTy() || Ty->isArrayTy()) {
579 CTy = Ty;
580 FieldNo = CE->getOperand(2);
581 return true;
582 }
583 }
584
585 return false;
586}
587
588//===----------------------------------------------------------------------===//
589// SCEV Utilities
590//===----------------------------------------------------------------------===//
591
592/// Compare the two values \p LV and \p RV in terms of their "complexity" where
593/// "complexity" is a partial (and somewhat ad-hoc) relation used to order
594/// operands in SCEV expressions. \p EqCache is a set of pairs of values that
595/// have been previously deemed to be "equally complex" by this routine. It is
596/// intended to avoid exponential time complexity in cases like:
597///
598/// %a = f(%x, %y)
599/// %b = f(%a, %a)
600/// %c = f(%b, %b)
601///
602/// %d = f(%x, %y)
603/// %e = f(%d, %d)
604/// %f = f(%e, %e)
605///
606/// CompareValueComplexity(%f, %c)
607///
608/// Since we do not continue running this routine on expression trees once we
609/// have seen unequal values, there is no need to track them in the cache.
610static int
611CompareValueComplexity(EquivalenceClasses<const Value *> &EqCacheValue,
612 const LoopInfo *const LI, Value *LV, Value *RV,
613 unsigned Depth) {
614 if (Depth > MaxValueCompareDepth || EqCacheValue.isEquivalent(LV, RV))
615 return 0;
616
617 // Order pointer values after integer values. This helps SCEVExpander form
618 // GEPs.
619 bool LIsPointer = LV->getType()->isPointerTy(),
620 RIsPointer = RV->getType()->isPointerTy();
621 if (LIsPointer != RIsPointer)
622 return (int)LIsPointer - (int)RIsPointer;
623
624 // Compare getValueID values.
625 unsigned LID = LV->getValueID(), RID = RV->getValueID();
626 if (LID != RID)
627 return (int)LID - (int)RID;
628
629 // Sort arguments by their position.
630 if (const auto *LA = dyn_cast<Argument>(LV)) {
631 const auto *RA = cast<Argument>(RV);
632 unsigned LArgNo = LA->getArgNo(), RArgNo = RA->getArgNo();
633 return (int)LArgNo - (int)RArgNo;
634 }
635
636 if (const auto *LGV = dyn_cast<GlobalValue>(LV)) {
637 const auto *RGV = cast<GlobalValue>(RV);
638
639 const auto IsGVNameSemantic = [&](const GlobalValue *GV) {
640 auto LT = GV->getLinkage();
641 return !(GlobalValue::isPrivateLinkage(LT) ||
642 GlobalValue::isInternalLinkage(LT));
643 };
644
645 // Use the names to distinguish the two values, but only if the
646 // names are semantically important.
647 if (IsGVNameSemantic(LGV) && IsGVNameSemantic(RGV))
648 return LGV->getName().compare(RGV->getName());
649 }
650
651 // For instructions, compare their loop depth, and their operand count. This
652 // is pretty loose.
653 if (const auto *LInst = dyn_cast<Instruction>(LV)) {
654 const auto *RInst = cast<Instruction>(RV);
655
656 // Compare loop depths.
657 const BasicBlock *LParent = LInst->getParent(),
658 *RParent = RInst->getParent();
659 if (LParent != RParent) {
660 unsigned LDepth = LI->getLoopDepth(LParent),
661 RDepth = LI->getLoopDepth(RParent);
662 if (LDepth != RDepth)
663 return (int)LDepth - (int)RDepth;
664 }
665
666 // Compare the number of operands.
667 unsigned LNumOps = LInst->getNumOperands(),
668 RNumOps = RInst->getNumOperands();
669 if (LNumOps != RNumOps)
670 return (int)LNumOps - (int)RNumOps;
671
672 for (unsigned Idx : seq(0u, LNumOps)) {
673 int Result =
674 CompareValueComplexity(EqCacheValue, LI, LInst->getOperand(Idx),
675 RInst->getOperand(Idx), Depth + 1);
676 if (Result != 0)
677 return Result;
678 }
679 }
680
681 EqCacheValue.unionSets(LV, RV);
682 return 0;
683}
684
685// Return negative, zero, or positive, if LHS is less than, equal to, or greater
686// than RHS, respectively. A three-way result allows recursive comparisons to be
687// more efficient.
688// If the max analysis depth was reached, return None, assuming we do not know
689// if they are equivalent for sure.
690static Optional<int>
691CompareSCEVComplexity(EquivalenceClasses<const SCEV *> &EqCacheSCEV,
692 EquivalenceClasses<const Value *> &EqCacheValue,
693 const LoopInfo *const LI, const SCEV *LHS,
694 const SCEV *RHS, DominatorTree &DT, unsigned Depth = 0) {
695 // Fast-path: SCEVs are uniqued so we can do a quick equality check.
696 if (LHS == RHS)
697 return 0;
698
699 // Primarily, sort the SCEVs by their getSCEVType().
700 SCEVTypes LType = LHS->getSCEVType(), RType = RHS->getSCEVType();
701 if (LType != RType)
702 return (int)LType - (int)RType;
703
704 if (EqCacheSCEV.isEquivalent(LHS, RHS))
705 return 0;
706
707 if (Depth > MaxSCEVCompareDepth)
708 return None;
709
710 // Aside from the getSCEVType() ordering, the particular ordering
711 // isn't very important except that it's beneficial to be consistent,
712 // so that (a + b) and (b + a) don't end up as different expressions.
713 switch (LType) {
714 case scUnknown: {
715 const SCEVUnknown *LU = cast<SCEVUnknown>(LHS);
716 const SCEVUnknown *RU = cast<SCEVUnknown>(RHS);
717
718 int X = CompareValueComplexity(EqCacheValue, LI, LU->getValue(),
719 RU->getValue(), Depth + 1);
720 if (X == 0)
721 EqCacheSCEV.unionSets(LHS, RHS);
722 return X;
723 }
724
725 case scConstant: {
726 const SCEVConstant *LC = cast<SCEVConstant>(LHS);
727 const SCEVConstant *RC = cast<SCEVConstant>(RHS);
728
729 // Compare constant values.
730 const APInt &LA = LC->getAPInt();
731 const APInt &RA = RC->getAPInt();
732 unsigned LBitWidth = LA.getBitWidth(), RBitWidth = RA.getBitWidth();
733 if (LBitWidth != RBitWidth)
734 return (int)LBitWidth - (int)RBitWidth;
735 return LA.ult(RA) ? -1 : 1;
736 }
737
738 case scAddRecExpr: {
739 const SCEVAddRecExpr *LA = cast<SCEVAddRecExpr>(LHS);
740 const SCEVAddRecExpr *RA = cast<SCEVAddRecExpr>(RHS);
741
742 // There is always a dominance between two recs that are used by one SCEV,
743 // so we can safely sort recs by loop header dominance. We require such
744 // order in getAddExpr.
745 const Loop *LLoop = LA->getLoop(), *RLoop = RA->getLoop();
746 if (LLoop != RLoop) {
747 const BasicBlock *LHead = LLoop->getHeader(), *RHead = RLoop->getHeader();
748 assert(LHead != RHead && "Two loops share the same header?")(static_cast<void> (0));
749 if (DT.dominates(LHead, RHead))
750 return 1;
751 else
752 assert(DT.dominates(RHead, LHead) &&(static_cast<void> (0))
753 "No dominance between recurrences used by one SCEV?")(static_cast<void> (0));
754 return -1;
755 }
756
757 // Addrec complexity grows with operand count.
758 unsigned LNumOps = LA->getNumOperands(), RNumOps = RA->getNumOperands();
759 if (LNumOps != RNumOps)
760 return (int)LNumOps - (int)RNumOps;
761
762 // Lexicographically compare.
763 for (unsigned i = 0; i != LNumOps; ++i) {
764 auto X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI,
765 LA->getOperand(i), RA->getOperand(i), DT,
766 Depth + 1);
767 if (X != 0)
768 return X;
769 }
770 EqCacheSCEV.unionSets(LHS, RHS);
771 return 0;
772 }
773
774 case scAddExpr:
775 case scMulExpr:
776 case scSMaxExpr:
777 case scUMaxExpr:
778 case scSMinExpr:
779 case scUMinExpr: {
780 const SCEVNAryExpr *LC = cast<SCEVNAryExpr>(LHS);
781 const SCEVNAryExpr *RC = cast<SCEVNAryExpr>(RHS);
782
783 // Lexicographically compare n-ary expressions.
784 unsigned LNumOps = LC->getNumOperands(), RNumOps = RC->getNumOperands();
785 if (LNumOps != RNumOps)
786 return (int)LNumOps - (int)RNumOps;
787
788 for (unsigned i = 0; i != LNumOps; ++i) {
789 auto X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI,
790 LC->getOperand(i), RC->getOperand(i), DT,
791 Depth + 1);
792 if (X != 0)
793 return X;
794 }
795 EqCacheSCEV.unionSets(LHS, RHS);
796 return 0;
797 }
798
799 case scUDivExpr: {
800 const SCEVUDivExpr *LC = cast<SCEVUDivExpr>(LHS);
801 const SCEVUDivExpr *RC = cast<SCEVUDivExpr>(RHS);
802
803 // Lexicographically compare udiv expressions.
804 auto X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LC->getLHS(),
805 RC->getLHS(), DT, Depth + 1);
806 if (X != 0)
807 return X;
808 X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LC->getRHS(),
809 RC->getRHS(), DT, Depth + 1);
810 if (X == 0)
811 EqCacheSCEV.unionSets(LHS, RHS);
812 return X;
813 }
814
815 case scPtrToInt:
816 case scTruncate:
817 case scZeroExtend:
818 case scSignExtend: {
819 const SCEVCastExpr *LC = cast<SCEVCastExpr>(LHS);
820 const SCEVCastExpr *RC = cast<SCEVCastExpr>(RHS);
821
822 // Compare cast expressions by operand.
823 auto X =
824 CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LC->getOperand(),
825 RC->getOperand(), DT, Depth + 1);
826 if (X == 0)
827 EqCacheSCEV.unionSets(LHS, RHS);
828 return X;
829 }
830
831 case scCouldNotCompute:
832 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!")__builtin_unreachable();
833 }
834 llvm_unreachable("Unknown SCEV kind!")__builtin_unreachable();
835}
836
837/// Given a list of SCEV objects, order them by their complexity, and group
838/// objects of the same complexity together by value. When this routine is
839/// finished, we know that any duplicates in the vector are consecutive and that
840/// complexity is monotonically increasing.
841///
842/// Note that we go take special precautions to ensure that we get deterministic
843/// results from this routine. In other words, we don't want the results of
844/// this to depend on where the addresses of various SCEV objects happened to
845/// land in memory.
846static void GroupByComplexity(SmallVectorImpl<const SCEV *> &Ops,
847 LoopInfo *LI, DominatorTree &DT) {
848 if (Ops.size() < 2) return; // Noop
849
850 EquivalenceClasses<const SCEV *> EqCacheSCEV;
851 EquivalenceClasses<const Value *> EqCacheValue;
852
853 // Whether LHS has provably less complexity than RHS.
854 auto IsLessComplex = [&](const SCEV *LHS, const SCEV *RHS) {
855 auto Complexity =
856 CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LHS, RHS, DT);
857 return Complexity && *Complexity < 0;
858 };
859 if (Ops.size() == 2) {
860 // This is the common case, which also happens to be trivially simple.
861 // Special case it.
862 const SCEV *&LHS = Ops[0], *&RHS = Ops[1];
863 if (IsLessComplex(RHS, LHS))
864 std::swap(LHS, RHS);
865 return;
866 }
867
868 // Do the rough sort by complexity.
869 llvm::stable_sort(Ops, [&](const SCEV *LHS, const SCEV *RHS) {
870 return IsLessComplex(LHS, RHS);
871 });
872
873 // Now that we are sorted by complexity, group elements of the same
874 // complexity. Note that this is, at worst, N^2, but the vector is likely to
875 // be extremely short in practice. Note that we take this approach because we
876 // do not want to depend on the addresses of the objects we are grouping.
877 for (unsigned i = 0, e = Ops.size(); i != e-2; ++i) {
878 const SCEV *S = Ops[i];
879 unsigned Complexity = S->getSCEVType();
880
881 // If there are any objects of the same complexity and same value as this
882 // one, group them.
883 for (unsigned j = i+1; j != e && Ops[j]->getSCEVType() == Complexity; ++j) {
884 if (Ops[j] == S) { // Found a duplicate.
885 // Move it to immediately after i'th element.
886 std::swap(Ops[i+1], Ops[j]);
887 ++i; // no need to rescan it.
888 if (i == e-2) return; // Done!
889 }
890 }
891 }
892}
893
894/// Returns true if \p Ops contains a huge SCEV (the subtree of S contains at
895/// least HugeExprThreshold nodes).
896static bool hasHugeExpression(ArrayRef<const SCEV *> Ops) {
897 return any_of(Ops, [](const SCEV *S) {
898 return S->getExpressionSize() >= HugeExprThreshold;
899 });
900}
901
902//===----------------------------------------------------------------------===//
903// Simple SCEV method implementations
904//===----------------------------------------------------------------------===//
905
906/// Compute BC(It, K). The result has width W. Assume, K > 0.
907static const SCEV *BinomialCoefficient(const SCEV *It, unsigned K,
908 ScalarEvolution &SE,
909 Type *ResultTy) {
910 // Handle the simplest case efficiently.
911 if (K == 1)
912 return SE.getTruncateOrZeroExtend(It, ResultTy);
913
914 // We are using the following formula for BC(It, K):
915 //
916 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / K!
917 //
918 // Suppose, W is the bitwidth of the return value. We must be prepared for
919 // overflow. Hence, we must assure that the result of our computation is
920 // equal to the accurate one modulo 2^W. Unfortunately, division isn't
921 // safe in modular arithmetic.
922 //
923 // However, this code doesn't use exactly that formula; the formula it uses
924 // is something like the following, where T is the number of factors of 2 in
925 // K! (i.e. trailing zeros in the binary representation of K!), and ^ is
926 // exponentiation:
927 //
928 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / 2^T / (K! / 2^T)
929 //
930 // This formula is trivially equivalent to the previous formula. However,
931 // this formula can be implemented much more efficiently. The trick is that
932 // K! / 2^T is odd, and exact division by an odd number *is* safe in modular
933 // arithmetic. To do exact division in modular arithmetic, all we have
934 // to do is multiply by the inverse. Therefore, this step can be done at
935 // width W.
936 //
937 // The next issue is how to safely do the division by 2^T. The way this
938 // is done is by doing the multiplication step at a width of at least W + T
939 // bits. This way, the bottom W+T bits of the product are accurate. Then,
940 // when we perform the division by 2^T (which is equivalent to a right shift
941 // by T), the bottom W bits are accurate. Extra bits are okay; they'll get
942 // truncated out after the division by 2^T.
943 //
944 // In comparison to just directly using the first formula, this technique
945 // is much more efficient; using the first formula requires W * K bits,
946 // but this formula less than W + K bits. Also, the first formula requires
947 // a division step, whereas this formula only requires multiplies and shifts.
948 //
949 // It doesn't matter whether the subtraction step is done in the calculation
950 // width or the input iteration count's width; if the subtraction overflows,
951 // the result must be zero anyway. We prefer here to do it in the width of
952 // the induction variable because it helps a lot for certain cases; CodeGen
953 // isn't smart enough to ignore the overflow, which leads to much less
954 // efficient code if the width of the subtraction is wider than the native
955 // register width.
956 //
957 // (It's possible to not widen at all by pulling out factors of 2 before
958 // the multiplication; for example, K=2 can be calculated as
959 // It/2*(It+(It*INT_MIN/INT_MIN)+-1). However, it requires
960 // extra arithmetic, so it's not an obvious win, and it gets
961 // much more complicated for K > 3.)
962
963 // Protection from insane SCEVs; this bound is conservative,
964 // but it probably doesn't matter.
965 if (K > 1000)
966 return SE.getCouldNotCompute();
967
968 unsigned W = SE.getTypeSizeInBits(ResultTy);
969
970 // Calculate K! / 2^T and T; we divide out the factors of two before
971 // multiplying for calculating K! / 2^T to avoid overflow.
972 // Other overflow doesn't matter because we only care about the bottom
973 // W bits of the result.
974 APInt OddFactorial(W, 1);
975 unsigned T = 1;
976 for (unsigned i = 3; i <= K; ++i) {
977 APInt Mult(W, i);
978 unsigned TwoFactors = Mult.countTrailingZeros();
979 T += TwoFactors;
980 Mult.lshrInPlace(TwoFactors);
981 OddFactorial *= Mult;
982 }
983
984 // We need at least W + T bits for the multiplication step
985 unsigned CalculationBits = W + T;
986
987 // Calculate 2^T, at width T+W.
988 APInt DivFactor = APInt::getOneBitSet(CalculationBits, T);
989
990 // Calculate the multiplicative inverse of K! / 2^T;
991 // this multiplication factor will perform the exact division by
992 // K! / 2^T.
993 APInt Mod = APInt::getSignedMinValue(W+1);
994 APInt MultiplyFactor = OddFactorial.zext(W+1);
995 MultiplyFactor = MultiplyFactor.multiplicativeInverse(Mod);
996 MultiplyFactor = MultiplyFactor.trunc(W);
997
998 // Calculate the product, at width T+W
999 IntegerType *CalculationTy = IntegerType::get(SE.getContext(),
1000 CalculationBits);
1001 const SCEV *Dividend = SE.getTruncateOrZeroExtend(It, CalculationTy);
1002 for (unsigned i = 1; i != K; ++i) {
1003 const SCEV *S = SE.getMinusSCEV(It, SE.getConstant(It->getType(), i));
1004 Dividend = SE.getMulExpr(Dividend,
1005 SE.getTruncateOrZeroExtend(S, CalculationTy));
1006 }
1007
1008 // Divide by 2^T
1009 const SCEV *DivResult = SE.getUDivExpr(Dividend, SE.getConstant(DivFactor));
1010
1011 // Truncate the result, and divide by K! / 2^T.
1012
1013 return SE.getMulExpr(SE.getConstant(MultiplyFactor),
1014 SE.getTruncateOrZeroExtend(DivResult, ResultTy));
1015}
1016
1017/// Return the value of this chain of recurrences at the specified iteration
1018/// number. We can evaluate this recurrence by multiplying each element in the
1019/// chain by the binomial coefficient corresponding to it. In other words, we
1020/// can evaluate {A,+,B,+,C,+,D} as:
1021///
1022/// A*BC(It, 0) + B*BC(It, 1) + C*BC(It, 2) + D*BC(It, 3)
1023///
1024/// where BC(It, k) stands for binomial coefficient.
1025const SCEV *SCEVAddRecExpr::evaluateAtIteration(const SCEV *It,
1026 ScalarEvolution &SE) const {
1027 return evaluateAtIteration(makeArrayRef(op_begin(), op_end()), It, SE);
1028}
1029
1030const SCEV *
1031SCEVAddRecExpr::evaluateAtIteration(ArrayRef<const SCEV *> Operands,
1032 const SCEV *It, ScalarEvolution &SE) {
1033 assert(Operands.size() > 0)(static_cast<void> (0));
1034 const SCEV *Result = Operands[0];
1035 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
1036 // The computation is correct in the face of overflow provided that the
1037 // multiplication is performed _after_ the evaluation of the binomial
1038 // coefficient.
1039 const SCEV *Coeff = BinomialCoefficient(It, i, SE, Result->getType());
1040 if (isa<SCEVCouldNotCompute>(Coeff))
1041 return Coeff;
1042
1043 Result = SE.getAddExpr(Result, SE.getMulExpr(Operands[i], Coeff));
1044 }
1045 return Result;
1046}
1047
1048//===----------------------------------------------------------------------===//
1049// SCEV Expression folder implementations
1050//===----------------------------------------------------------------------===//
1051
1052const SCEV *ScalarEvolution::getLosslessPtrToIntExpr(const SCEV *Op,
1053 unsigned Depth) {
1054 assert(Depth <= 1 &&(static_cast<void> (0))
1055 "getLosslessPtrToIntExpr() should self-recurse at most once.")(static_cast<void> (0));
1056
1057 // We could be called with an integer-typed operands during SCEV rewrites.
1058 // Since the operand is an integer already, just perform zext/trunc/self cast.
1059 if (!Op->getType()->isPointerTy())
1060 return Op;
1061
1062 // What would be an ID for such a SCEV cast expression?
1063 FoldingSetNodeID ID;
1064 ID.AddInteger(scPtrToInt);
1065 ID.AddPointer(Op);
1066
1067 void *IP = nullptr;
1068
1069 // Is there already an expression for such a cast?
1070 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP))
1071 return S;
1072
1073 // It isn't legal for optimizations to construct new ptrtoint expressions
1074 // for non-integral pointers.
1075 if (getDataLayout().isNonIntegralPointerType(Op->getType()))
1076 return getCouldNotCompute();
1077
1078 Type *IntPtrTy = getDataLayout().getIntPtrType(Op->getType());
1079
1080 // We can only trivially model ptrtoint if SCEV's effective (integer) type
1081 // is sufficiently wide to represent all possible pointer values.
1082 // We could theoretically teach SCEV to truncate wider pointers, but
1083 // that isn't implemented for now.
1084 if (getDataLayout().getTypeSizeInBits(getEffectiveSCEVType(Op->getType())) !=
1085 getDataLayout().getTypeSizeInBits(IntPtrTy))
1086 return getCouldNotCompute();
1087
1088 // If not, is this expression something we can't reduce any further?
1089 if (auto *U = dyn_cast<SCEVUnknown>(Op)) {
1090 // Perform some basic constant folding. If the operand of the ptr2int cast
1091 // is a null pointer, don't create a ptr2int SCEV expression (that will be
1092 // left as-is), but produce a zero constant.
1093 // NOTE: We could handle a more general case, but lack motivational cases.
1094 if (isa<ConstantPointerNull>(U->getValue()))
1095 return getZero(IntPtrTy);
1096
1097 // Create an explicit cast node.
1098 // We can reuse the existing insert position since if we get here,
1099 // we won't have made any changes which would invalidate it.
1100 SCEV *S = new (SCEVAllocator)
1101 SCEVPtrToIntExpr(ID.Intern(SCEVAllocator), Op, IntPtrTy);
1102 UniqueSCEVs.InsertNode(S, IP);
1103 addToLoopUseLists(S);
1104 return S;
1105 }
1106
1107 assert(Depth == 0 && "getLosslessPtrToIntExpr() should not self-recurse for "(static_cast<void> (0))
1108 "non-SCEVUnknown's.")(static_cast<void> (0));
1109
1110 // Otherwise, we've got some expression that is more complex than just a
1111 // single SCEVUnknown. But we don't want to have a SCEVPtrToIntExpr of an
1112 // arbitrary expression, we want to have SCEVPtrToIntExpr of an SCEVUnknown
1113 // only, and the expressions must otherwise be integer-typed.
1114 // So sink the cast down to the SCEVUnknown's.
1115
1116 /// The SCEVPtrToIntSinkingRewriter takes a scalar evolution expression,
1117 /// which computes a pointer-typed value, and rewrites the whole expression
1118 /// tree so that *all* the computations are done on integers, and the only
1119 /// pointer-typed operands in the expression are SCEVUnknown.
1120 class SCEVPtrToIntSinkingRewriter
1121 : public SCEVRewriteVisitor<SCEVPtrToIntSinkingRewriter> {
1122 using Base = SCEVRewriteVisitor<SCEVPtrToIntSinkingRewriter>;
1123
1124 public:
1125 SCEVPtrToIntSinkingRewriter(ScalarEvolution &SE) : SCEVRewriteVisitor(SE) {}
1126
1127 static const SCEV *rewrite(const SCEV *Scev, ScalarEvolution &SE) {
1128 SCEVPtrToIntSinkingRewriter Rewriter(SE);
1129 return Rewriter.visit(Scev);
1130 }
1131
1132 const SCEV *visit(const SCEV *S) {
1133 Type *STy = S->getType();
1134 // If the expression is not pointer-typed, just keep it as-is.
1135 if (!STy->isPointerTy())
1136 return S;
1137 // Else, recursively sink the cast down into it.
1138 return Base::visit(S);
1139 }
1140
1141 const SCEV *visitAddExpr(const SCEVAddExpr *Expr) {
1142 SmallVector<const SCEV *, 2> Operands;
1143 bool Changed = false;
1144 for (auto *Op : Expr->operands()) {
1145 Operands.push_back(visit(Op));
1146 Changed |= Op != Operands.back();
1147 }
1148 return !Changed ? Expr : SE.getAddExpr(Operands, Expr->getNoWrapFlags());
1149 }
1150
1151 const SCEV *visitMulExpr(const SCEVMulExpr *Expr) {
1152 SmallVector<const SCEV *, 2> Operands;
1153 bool Changed = false;
1154 for (auto *Op : Expr->operands()) {
1155 Operands.push_back(visit(Op));
1156 Changed |= Op != Operands.back();
1157 }
1158 return !Changed ? Expr : SE.getMulExpr(Operands, Expr->getNoWrapFlags());
1159 }
1160
1161 const SCEV *visitUnknown(const SCEVUnknown *Expr) {
1162 assert(Expr->getType()->isPointerTy() &&(static_cast<void> (0))
1163 "Should only reach pointer-typed SCEVUnknown's.")(static_cast<void> (0));
1164 return SE.getLosslessPtrToIntExpr(Expr, /*Depth=*/1);
1165 }
1166 };
1167
1168 // And actually perform the cast sinking.
1169 const SCEV *IntOp = SCEVPtrToIntSinkingRewriter::rewrite(Op, *this);
1170 assert(IntOp->getType()->isIntegerTy() &&(static_cast<void> (0))
1171 "We must have succeeded in sinking the cast, "(static_cast<void> (0))
1172 "and ending up with an integer-typed expression!")(static_cast<void> (0));
1173 return IntOp;
1174}
1175
1176const SCEV *ScalarEvolution::getPtrToIntExpr(const SCEV *Op, Type *Ty) {
1177 assert(Ty->isIntegerTy() && "Target type must be an integer type!")(static_cast<void> (0));
1178
1179 const SCEV *IntOp = getLosslessPtrToIntExpr(Op);
1180 if (isa<SCEVCouldNotCompute>(IntOp))
1181 return IntOp;
1182
1183 return getTruncateOrZeroExtend(IntOp, Ty);
1184}
1185
1186const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op, Type *Ty,
1187 unsigned Depth) {
1188 assert(getTypeSizeInBits(Op->getType()) > getTypeSizeInBits(Ty) &&(static_cast<void> (0))
1189 "This is not a truncating conversion!")(static_cast<void> (0));
1190 assert(isSCEVable(Ty) &&(static_cast<void> (0))
1191 "This is not a conversion to a SCEVable type!")(static_cast<void> (0));
1192 assert(!Op->getType()->isPointerTy() && "Can't truncate pointer!")(static_cast<void> (0));
1193 Ty = getEffectiveSCEVType(Ty);
1194
1195 FoldingSetNodeID ID;
1196 ID.AddInteger(scTruncate);
1197 ID.AddPointer(Op);
1198 ID.AddPointer(Ty);
1199 void *IP = nullptr;
1200 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1201
1202 // Fold if the operand is constant.
1203 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
1204 return getConstant(
1205 cast<ConstantInt>(ConstantExpr::getTrunc(SC->getValue(), Ty)));
1206
1207 // trunc(trunc(x)) --> trunc(x)
1208 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op))
1209 return getTruncateExpr(ST->getOperand(), Ty, Depth + 1);
1210
1211 // trunc(sext(x)) --> sext(x) if widening or trunc(x) if narrowing
1212 if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op))
1213 return getTruncateOrSignExtend(SS->getOperand(), Ty, Depth + 1);
1214
1215 // trunc(zext(x)) --> zext(x) if widening or trunc(x) if narrowing
1216 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op))
1217 return getTruncateOrZeroExtend(SZ->getOperand(), Ty, Depth + 1);
1218
1219 if (Depth > MaxCastDepth) {
1220 SCEV *S =
1221 new (SCEVAllocator) SCEVTruncateExpr(ID.Intern(SCEVAllocator), Op, Ty);
1222 UniqueSCEVs.InsertNode(S, IP);
1223 addToLoopUseLists(S);
1224 return S;
1225 }
1226
1227 // trunc(x1 + ... + xN) --> trunc(x1) + ... + trunc(xN) and
1228 // trunc(x1 * ... * xN) --> trunc(x1) * ... * trunc(xN),
1229 // if after transforming we have at most one truncate, not counting truncates
1230 // that replace other casts.
1231 if (isa<SCEVAddExpr>(Op) || isa<SCEVMulExpr>(Op)) {
1232 auto *CommOp = cast<SCEVCommutativeExpr>(Op);
1233 SmallVector<const SCEV *, 4> Operands;
1234 unsigned numTruncs = 0;
1235 for (unsigned i = 0, e = CommOp->getNumOperands(); i != e && numTruncs < 2;
1236 ++i) {
1237 const SCEV *S = getTruncateExpr(CommOp->getOperand(i), Ty, Depth + 1);
1238 if (!isa<SCEVIntegralCastExpr>(CommOp->getOperand(i)) &&
1239 isa<SCEVTruncateExpr>(S))
1240 numTruncs++;
1241 Operands.push_back(S);
1242 }
1243 if (numTruncs < 2) {
1244 if (isa<SCEVAddExpr>(Op))
1245 return getAddExpr(Operands);
1246 else if (isa<SCEVMulExpr>(Op))
1247 return getMulExpr(Operands);
1248 else
1249 llvm_unreachable("Unexpected SCEV type for Op.")__builtin_unreachable();
1250 }
1251 // Although we checked in the beginning that ID is not in the cache, it is
1252 // possible that during recursion and different modification ID was inserted
1253 // into the cache. So if we find it, just return it.
1254 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP))
1255 return S;
1256 }
1257
1258 // If the input value is a chrec scev, truncate the chrec's operands.
1259 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) {
1260 SmallVector<const SCEV *, 4> Operands;
1261 for (const SCEV *Op : AddRec->operands())
1262 Operands.push_back(getTruncateExpr(Op, Ty, Depth + 1));
1263 return getAddRecExpr(Operands, AddRec->getLoop(), SCEV::FlagAnyWrap);
1264 }
1265
1266 // Return zero if truncating to known zeros.
1267 uint32_t MinTrailingZeros = GetMinTrailingZeros(Op);
1268 if (MinTrailingZeros >= getTypeSizeInBits(Ty))
1269 return getZero(Ty);
1270
1271 // The cast wasn't folded; create an explicit cast node. We can reuse
1272 // the existing insert position since if we get here, we won't have
1273 // made any changes which would invalidate it.
1274 SCEV *S = new (SCEVAllocator) SCEVTruncateExpr(ID.Intern(SCEVAllocator),
1275 Op, Ty);
1276 UniqueSCEVs.InsertNode(S, IP);
1277 addToLoopUseLists(S);
1278 return S;
1279}
1280
1281// Get the limit of a recurrence such that incrementing by Step cannot cause
1282// signed overflow as long as the value of the recurrence within the
1283// loop does not exceed this limit before incrementing.
1284static const SCEV *getSignedOverflowLimitForStep(const SCEV *Step,
1285 ICmpInst::Predicate *Pred,
1286 ScalarEvolution *SE) {
1287 unsigned BitWidth = SE->getTypeSizeInBits(Step->getType());
1288 if (SE->isKnownPositive(Step)) {
1289 *Pred = ICmpInst::ICMP_SLT;
1290 return SE->getConstant(APInt::getSignedMinValue(BitWidth) -
1291 SE->getSignedRangeMax(Step));
1292 }
1293 if (SE->isKnownNegative(Step)) {
1294 *Pred = ICmpInst::ICMP_SGT;
1295 return SE->getConstant(APInt::getSignedMaxValue(BitWidth) -
1296 SE->getSignedRangeMin(Step));
1297 }
1298 return nullptr;
1299}
1300
1301// Get the limit of a recurrence such that incrementing by Step cannot cause
1302// unsigned overflow as long as the value of the recurrence within the loop does
1303// not exceed this limit before incrementing.
1304static const SCEV *getUnsignedOverflowLimitForStep(const SCEV *Step,
1305 ICmpInst::Predicate *Pred,
1306 ScalarEvolution *SE) {
1307 unsigned BitWidth = SE->getTypeSizeInBits(Step->getType());
1308 *Pred = ICmpInst::ICMP_ULT;
1309
1310 return SE->getConstant(APInt::getMinValue(BitWidth) -
1311 SE->getUnsignedRangeMax(Step));
1312}
1313
1314namespace {
1315
1316struct ExtendOpTraitsBase {
1317 typedef const SCEV *(ScalarEvolution::*GetExtendExprTy)(const SCEV *, Type *,
1318 unsigned);
1319};
1320
1321// Used to make code generic over signed and unsigned overflow.
1322template <typename ExtendOp> struct ExtendOpTraits {
1323 // Members present:
1324 //
1325 // static const SCEV::NoWrapFlags WrapType;
1326 //
1327 // static const ExtendOpTraitsBase::GetExtendExprTy GetExtendExpr;
1328 //
1329 // static const SCEV *getOverflowLimitForStep(const SCEV *Step,
1330 // ICmpInst::Predicate *Pred,
1331 // ScalarEvolution *SE);
1332};
1333
1334template <>
1335struct ExtendOpTraits<SCEVSignExtendExpr> : public ExtendOpTraitsBase {
1336 static const SCEV::NoWrapFlags WrapType = SCEV::FlagNSW;
1337
1338 static const GetExtendExprTy GetExtendExpr;
1339
1340 static const SCEV *getOverflowLimitForStep(const SCEV *Step,
1341 ICmpInst::Predicate *Pred,
1342 ScalarEvolution *SE) {
1343 return getSignedOverflowLimitForStep(Step, Pred, SE);
1344 }
1345};
1346
1347const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits<
1348 SCEVSignExtendExpr>::GetExtendExpr = &ScalarEvolution::getSignExtendExpr;
1349
1350template <>
1351struct ExtendOpTraits<SCEVZeroExtendExpr> : public ExtendOpTraitsBase {
1352 static const SCEV::NoWrapFlags WrapType = SCEV::FlagNUW;
1353
1354 static const GetExtendExprTy GetExtendExpr;
1355
1356 static const SCEV *getOverflowLimitForStep(const SCEV *Step,
1357 ICmpInst::Predicate *Pred,
1358 ScalarEvolution *SE) {
1359 return getUnsignedOverflowLimitForStep(Step, Pred, SE);
1360 }
1361};
1362
1363const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits<
1364 SCEVZeroExtendExpr>::GetExtendExpr = &ScalarEvolution::getZeroExtendExpr;
1365
1366} // end anonymous namespace
1367
1368// The recurrence AR has been shown to have no signed/unsigned wrap or something
1369// close to it. Typically, if we can prove NSW/NUW for AR, then we can just as
1370// easily prove NSW/NUW for its preincrement or postincrement sibling. This
1371// allows normalizing a sign/zero extended AddRec as such: {sext/zext(Step +
1372// Start),+,Step} => {(Step + sext/zext(Start),+,Step} As a result, the
1373// expression "Step + sext/zext(PreIncAR)" is congruent with
1374// "sext/zext(PostIncAR)"
1375template <typename ExtendOpTy>
1376static const SCEV *getPreStartForExtend(const SCEVAddRecExpr *AR, Type *Ty,
1377 ScalarEvolution *SE, unsigned Depth) {
1378 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType;
1379 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr;
1380
1381 const Loop *L = AR->getLoop();
1382 const SCEV *Start = AR->getStart();
1383 const SCEV *Step = AR->getStepRecurrence(*SE);
1384
1385 // Check for a simple looking step prior to loop entry.
1386 const SCEVAddExpr *SA = dyn_cast<SCEVAddExpr>(Start);
1387 if (!SA)
1388 return nullptr;
1389
1390 // Create an AddExpr for "PreStart" after subtracting Step. Full SCEV
1391 // subtraction is expensive. For this purpose, perform a quick and dirty
1392 // difference, by checking for Step in the operand list.
1393 SmallVector<const SCEV *, 4> DiffOps;
1394 for (const SCEV *Op : SA->operands())
1395 if (Op != Step)
1396 DiffOps.push_back(Op);
1397
1398 if (DiffOps.size() == SA->getNumOperands())
1399 return nullptr;
1400
1401 // Try to prove `WrapType` (SCEV::FlagNSW or SCEV::FlagNUW) on `PreStart` +
1402 // `Step`:
1403
1404 // 1. NSW/NUW flags on the step increment.
1405 auto PreStartFlags =
1406 ScalarEvolution::maskFlags(SA->getNoWrapFlags(), SCEV::FlagNUW);
1407 const SCEV *PreStart = SE->getAddExpr(DiffOps, PreStartFlags);
1408 const SCEVAddRecExpr *PreAR = dyn_cast<SCEVAddRecExpr>(
1409 SE->getAddRecExpr(PreStart, Step, L, SCEV::FlagAnyWrap));
1410
1411 // "{S,+,X} is <nsw>/<nuw>" and "the backedge is taken at least once" implies
1412 // "S+X does not sign/unsign-overflow".
1413 //
1414
1415 const SCEV *BECount = SE->getBackedgeTakenCount(L);
1416 if (PreAR && PreAR->getNoWrapFlags(WrapType) &&
1417 !isa<SCEVCouldNotCompute>(BECount) && SE->isKnownPositive(BECount))
1418 return PreStart;
1419
1420 // 2. Direct overflow check on the step operation's expression.
1421 unsigned BitWidth = SE->getTypeSizeInBits(AR->getType());
1422 Type *WideTy = IntegerType::get(SE->getContext(), BitWidth * 2);
1423 const SCEV *OperandExtendedStart =
1424 SE->getAddExpr((SE->*GetExtendExpr)(PreStart, WideTy, Depth),
1425 (SE->*GetExtendExpr)(Step, WideTy, Depth));
1426 if ((SE->*GetExtendExpr)(Start, WideTy, Depth) == OperandExtendedStart) {
1427 if (PreAR && AR->getNoWrapFlags(WrapType)) {
1428 // If we know `AR` == {`PreStart`+`Step`,+,`Step`} is `WrapType` (FlagNSW
1429 // or FlagNUW) and that `PreStart` + `Step` is `WrapType` too, then
1430 // `PreAR` == {`PreStart`,+,`Step`} is also `WrapType`. Cache this fact.
1431 SE->setNoWrapFlags(const_cast<SCEVAddRecExpr *>(PreAR), WrapType);
1432 }
1433 return PreStart;
1434 }
1435
1436 // 3. Loop precondition.
1437 ICmpInst::Predicate Pred;
1438 const SCEV *OverflowLimit =
1439 ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep(Step, &Pred, SE);
1440
1441 if (OverflowLimit &&
1442 SE->isLoopEntryGuardedByCond(L, Pred, PreStart, OverflowLimit))
1443 return PreStart;
1444
1445 return nullptr;
1446}
1447
1448// Get the normalized zero or sign extended expression for this AddRec's Start.
1449template <typename ExtendOpTy>
1450static const SCEV *getExtendAddRecStart(const SCEVAddRecExpr *AR, Type *Ty,
1451 ScalarEvolution *SE,
1452 unsigned Depth) {
1453 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr;
1454
1455 const SCEV *PreStart = getPreStartForExtend<ExtendOpTy>(AR, Ty, SE, Depth);
1456 if (!PreStart)
1457 return (SE->*GetExtendExpr)(AR->getStart(), Ty, Depth);
1458
1459 return SE->getAddExpr((SE->*GetExtendExpr)(AR->getStepRecurrence(*SE), Ty,
1460 Depth),
1461 (SE->*GetExtendExpr)(PreStart, Ty, Depth));
1462}
1463
1464// Try to prove away overflow by looking at "nearby" add recurrences. A
1465// motivating example for this rule: if we know `{0,+,4}` is `ult` `-1` and it
1466// does not itself wrap then we can conclude that `{1,+,4}` is `nuw`.
1467//
1468// Formally:
1469//
1470// {S,+,X} == {S-T,+,X} + T
1471// => Ext({S,+,X}) == Ext({S-T,+,X} + T)
1472//
1473// If ({S-T,+,X} + T) does not overflow ... (1)
1474//
1475// RHS == Ext({S-T,+,X} + T) == Ext({S-T,+,X}) + Ext(T)
1476//
1477// If {S-T,+,X} does not overflow ... (2)
1478//
1479// RHS == Ext({S-T,+,X}) + Ext(T) == {Ext(S-T),+,Ext(X)} + Ext(T)
1480// == {Ext(S-T)+Ext(T),+,Ext(X)}
1481//
1482// If (S-T)+T does not overflow ... (3)
1483//
1484// RHS == {Ext(S-T)+Ext(T),+,Ext(X)} == {Ext(S-T+T),+,Ext(X)}
1485// == {Ext(S),+,Ext(X)} == LHS
1486//
1487// Thus, if (1), (2) and (3) are true for some T, then
1488// Ext({S,+,X}) == {Ext(S),+,Ext(X)}
1489//
1490// (3) is implied by (1) -- "(S-T)+T does not overflow" is simply "({S-T,+,X}+T)
1491// does not overflow" restricted to the 0th iteration. Therefore we only need
1492// to check for (1) and (2).
1493//
1494// In the current context, S is `Start`, X is `Step`, Ext is `ExtendOpTy` and T
1495// is `Delta` (defined below).
1496template <typename ExtendOpTy>
1497bool ScalarEvolution::proveNoWrapByVaryingStart(const SCEV *Start,
1498 const SCEV *Step,
1499 const Loop *L) {
1500 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType;
1501
1502 // We restrict `Start` to a constant to prevent SCEV from spending too much
1503 // time here. It is correct (but more expensive) to continue with a
1504 // non-constant `Start` and do a general SCEV subtraction to compute
1505 // `PreStart` below.
1506 const SCEVConstant *StartC = dyn_cast<SCEVConstant>(Start);
1507 if (!StartC)
1508 return false;
1509
1510 APInt StartAI = StartC->getAPInt();
1511
1512 for (unsigned Delta : {-2, -1, 1, 2}) {
1513 const SCEV *PreStart = getConstant(StartAI - Delta);
1514
1515 FoldingSetNodeID ID;
1516 ID.AddInteger(scAddRecExpr);
1517 ID.AddPointer(PreStart);
1518 ID.AddPointer(Step);
1519 ID.AddPointer(L);
1520 void *IP = nullptr;
1521 const auto *PreAR =
1522 static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
1523
1524 // Give up if we don't already have the add recurrence we need because
1525 // actually constructing an add recurrence is relatively expensive.
1526 if (PreAR && PreAR->getNoWrapFlags(WrapType)) { // proves (2)
1527 const SCEV *DeltaS = getConstant(StartC->getType(), Delta);
1528 ICmpInst::Predicate Pred = ICmpInst::BAD_ICMP_PREDICATE;
1529 const SCEV *Limit = ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep(
1530 DeltaS, &Pred, this);
1531 if (Limit && isKnownPredicate(Pred, PreAR, Limit)) // proves (1)
1532 return true;
1533 }
1534 }
1535
1536 return false;
1537}
1538
1539// Finds an integer D for an expression (C + x + y + ...) such that the top
1540// level addition in (D + (C - D + x + y + ...)) would not wrap (signed or
1541// unsigned) and the number of trailing zeros of (C - D + x + y + ...) is
1542// maximized, where C is the \p ConstantTerm, x, y, ... are arbitrary SCEVs, and
1543// the (C + x + y + ...) expression is \p WholeAddExpr.
1544static APInt extractConstantWithoutWrapping(ScalarEvolution &SE,
1545 const SCEVConstant *ConstantTerm,
1546 const SCEVAddExpr *WholeAddExpr) {
1547 const APInt &C = ConstantTerm->getAPInt();
1548 const unsigned BitWidth = C.getBitWidth();
1549 // Find number of trailing zeros of (x + y + ...) w/o the C first:
1550 uint32_t TZ = BitWidth;
1551 for (unsigned I = 1, E = WholeAddExpr->getNumOperands(); I < E && TZ; ++I)
1552 TZ = std::min(TZ, SE.GetMinTrailingZeros(WholeAddExpr->getOperand(I)));
1553 if (TZ) {
1554 // Set D to be as many least significant bits of C as possible while still
1555 // guaranteeing that adding D to (C - D + x + y + ...) won't cause a wrap:
1556 return TZ < BitWidth ? C.trunc(TZ).zext(BitWidth) : C;
1557 }
1558 return APInt(BitWidth, 0);
1559}
1560
1561// Finds an integer D for an affine AddRec expression {C,+,x} such that the top
1562// level addition in (D + {C-D,+,x}) would not wrap (signed or unsigned) and the
1563// number of trailing zeros of (C - D + x * n) is maximized, where C is the \p
1564// ConstantStart, x is an arbitrary \p Step, and n is the loop trip count.
1565static APInt extractConstantWithoutWrapping(ScalarEvolution &SE,
1566 const APInt &ConstantStart,
1567 const SCEV *Step) {
1568 const unsigned BitWidth = ConstantStart.getBitWidth();
1569 const uint32_t TZ = SE.GetMinTrailingZeros(Step);
1570 if (TZ)
1571 return TZ < BitWidth ? ConstantStart.trunc(TZ).zext(BitWidth)
1572 : ConstantStart;
1573 return APInt(BitWidth, 0);
1574}
1575
1576const SCEV *
1577ScalarEvolution::getZeroExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth) {
1578 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&(static_cast<void> (0))
1579 "This is not an extending conversion!")(static_cast<void> (0));
1580 assert(isSCEVable(Ty) &&(static_cast<void> (0))
1581 "This is not a conversion to a SCEVable type!")(static_cast<void> (0));
1582 assert(!Op->getType()->isPointerTy() && "Can't extend pointer!")(static_cast<void> (0));
1583 Ty = getEffectiveSCEVType(Ty);
1584
1585 // Fold if the operand is constant.
1586 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
1587 return getConstant(
1588 cast<ConstantInt>(ConstantExpr::getZExt(SC->getValue(), Ty)));
1589
1590 // zext(zext(x)) --> zext(x)
1591 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op))
1592 return getZeroExtendExpr(SZ->getOperand(), Ty, Depth + 1);
1593
1594 // Before doing any expensive analysis, check to see if we've already
1595 // computed a SCEV for this Op and Ty.
1596 FoldingSetNodeID ID;
1597 ID.AddInteger(scZeroExtend);
1598 ID.AddPointer(Op);
1599 ID.AddPointer(Ty);
1600 void *IP = nullptr;
1601 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1602 if (Depth > MaxCastDepth) {
1603 SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator),
1604 Op, Ty);
1605 UniqueSCEVs.InsertNode(S, IP);
1606 addToLoopUseLists(S);
1607 return S;
1608 }
1609
1610 // zext(trunc(x)) --> zext(x) or x or trunc(x)
1611 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) {
1612 // It's possible the bits taken off by the truncate were all zero bits. If
1613 // so, we should be able to simplify this further.
1614 const SCEV *X = ST->getOperand();
1615 ConstantRange CR = getUnsignedRange(X);
1616 unsigned TruncBits = getTypeSizeInBits(ST->getType());
1617 unsigned NewBits = getTypeSizeInBits(Ty);
1618 if (CR.truncate(TruncBits).zeroExtend(NewBits).contains(
1619 CR.zextOrTrunc(NewBits)))
1620 return getTruncateOrZeroExtend(X, Ty, Depth);
1621 }
1622
1623 // If the input value is a chrec scev, and we can prove that the value
1624 // did not overflow the old, smaller, value, we can zero extend all of the
1625 // operands (often constants). This allows analysis of something like
1626 // this: for (unsigned char X = 0; X < 100; ++X) { int Y = X; }
1627 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op))
1628 if (AR->isAffine()) {
1629 const SCEV *Start = AR->getStart();
1630 const SCEV *Step = AR->getStepRecurrence(*this);
1631 unsigned BitWidth = getTypeSizeInBits(AR->getType());
1632 const Loop *L = AR->getLoop();
1633
1634 if (!AR->hasNoUnsignedWrap()) {
1635 auto NewFlags = proveNoWrapViaConstantRanges(AR);
1636 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), NewFlags);
1637 }
1638
1639 // If we have special knowledge that this addrec won't overflow,
1640 // we don't need to do any further analysis.
1641 if (AR->hasNoUnsignedWrap())
1642 return getAddRecExpr(
1643 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, Depth + 1),
1644 getZeroExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags());
1645
1646 // Check whether the backedge-taken count is SCEVCouldNotCompute.
1647 // Note that this serves two purposes: It filters out loops that are
1648 // simply not analyzable, and it covers the case where this code is
1649 // being called from within backedge-taken count analysis, such that
1650 // attempting to ask for the backedge-taken count would likely result
1651 // in infinite recursion. In the later case, the analysis code will
1652 // cope with a conservative value, and it will take care to purge
1653 // that value once it has finished.
1654 const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(L);
1655 if (!isa<SCEVCouldNotCompute>(MaxBECount)) {
1656 // Manually compute the final value for AR, checking for overflow.
1657
1658 // Check whether the backedge-taken count can be losslessly casted to
1659 // the addrec's type. The count is always unsigned.
1660 const SCEV *CastedMaxBECount =
1661 getTruncateOrZeroExtend(MaxBECount, Start->getType(), Depth);
1662 const SCEV *RecastedMaxBECount = getTruncateOrZeroExtend(
1663 CastedMaxBECount, MaxBECount->getType(), Depth);
1664 if (MaxBECount == RecastedMaxBECount) {
1665 Type *WideTy = IntegerType::get(getContext(), BitWidth * 2);
1666 // Check whether Start+Step*MaxBECount has no unsigned overflow.
1667 const SCEV *ZMul = getMulExpr(CastedMaxBECount, Step,
1668 SCEV::FlagAnyWrap, Depth + 1);
1669 const SCEV *ZAdd = getZeroExtendExpr(getAddExpr(Start, ZMul,
1670 SCEV::FlagAnyWrap,
1671 Depth + 1),
1672 WideTy, Depth + 1);
1673 const SCEV *WideStart = getZeroExtendExpr(Start, WideTy, Depth + 1);
1674 const SCEV *WideMaxBECount =
1675 getZeroExtendExpr(CastedMaxBECount, WideTy, Depth + 1);
1676 const SCEV *OperandExtendedAdd =
1677 getAddExpr(WideStart,
1678 getMulExpr(WideMaxBECount,
1679 getZeroExtendExpr(Step, WideTy, Depth + 1),
1680 SCEV::FlagAnyWrap, Depth + 1),
1681 SCEV::FlagAnyWrap, Depth + 1);
1682 if (ZAdd == OperandExtendedAdd) {
1683 // Cache knowledge of AR NUW, which is propagated to this AddRec.
1684 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNUW);
1685 // Return the expression with the addrec on the outside.
1686 return getAddRecExpr(
1687 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this,
1688 Depth + 1),
1689 getZeroExtendExpr(Step, Ty, Depth + 1), L,
1690 AR->getNoWrapFlags());
1691 }
1692 // Similar to above, only this time treat the step value as signed.
1693 // This covers loops that count down.
1694 OperandExtendedAdd =
1695 getAddExpr(WideStart,
1696 getMulExpr(WideMaxBECount,
1697 getSignExtendExpr(Step, WideTy, Depth + 1),
1698 SCEV::FlagAnyWrap, Depth + 1),
1699 SCEV::FlagAnyWrap, Depth + 1);
1700 if (ZAdd == OperandExtendedAdd) {
1701 // Cache knowledge of AR NW, which is propagated to this AddRec.
1702 // Negative step causes unsigned wrap, but it still can't self-wrap.
1703 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNW);
1704 // Return the expression with the addrec on the outside.
1705 return getAddRecExpr(
1706 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this,
1707 Depth + 1),
1708 getSignExtendExpr(Step, Ty, Depth + 1), L,
1709 AR->getNoWrapFlags());
1710 }
1711 }
1712 }
1713
1714 // Normally, in the cases we can prove no-overflow via a
1715 // backedge guarding condition, we can also compute a backedge
1716 // taken count for the loop. The exceptions are assumptions and
1717 // guards present in the loop -- SCEV is not great at exploiting
1718 // these to compute max backedge taken counts, but can still use
1719 // these to prove lack of overflow. Use this fact to avoid
1720 // doing extra work that may not pay off.
1721 if (!isa<SCEVCouldNotCompute>(MaxBECount) || HasGuards ||
1722 !AC.assumptions().empty()) {
1723
1724 auto NewFlags = proveNoUnsignedWrapViaInduction(AR);
1725 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), NewFlags);
1726 if (AR->hasNoUnsignedWrap()) {
1727 // Same as nuw case above - duplicated here to avoid a compile time
1728 // issue. It's not clear that the order of checks does matter, but
1729 // it's one of two issue possible causes for a change which was
1730 // reverted. Be conservative for the moment.
1731 return getAddRecExpr(
1732 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this,
1733 Depth + 1),
1734 getZeroExtendExpr(Step, Ty, Depth + 1), L,
1735 AR->getNoWrapFlags());
1736 }
1737
1738 // For a negative step, we can extend the operands iff doing so only
1739 // traverses values in the range zext([0,UINT_MAX]).
1740 if (isKnownNegative(Step)) {
1741 const SCEV *N = getConstant(APInt::getMaxValue(BitWidth) -
1742 getSignedRangeMin(Step));
1743 if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT, AR, N) ||
1744 isKnownOnEveryIteration(ICmpInst::ICMP_UGT, AR, N)) {
1745 // Cache knowledge of AR NW, which is propagated to this
1746 // AddRec. Negative step causes unsigned wrap, but it
1747 // still can't self-wrap.
1748 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNW);
1749 // Return the expression with the addrec on the outside.
1750 return getAddRecExpr(
1751 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this,
1752 Depth + 1),
1753 getSignExtendExpr(Step, Ty, Depth + 1), L,
1754 AR->getNoWrapFlags());
1755 }
1756 }
1757 }
1758
1759 // zext({C,+,Step}) --> (zext(D) + zext({C-D,+,Step}))<nuw><nsw>
1760 // if D + (C - D + Step * n) could be proven to not unsigned wrap
1761 // where D maximizes the number of trailing zeros of (C - D + Step * n)
1762 if (const auto *SC = dyn_cast<SCEVConstant>(Start)) {
1763 const APInt &C = SC->getAPInt();
1764 const APInt &D = extractConstantWithoutWrapping(*this, C, Step);
1765 if (D != 0) {
1766 const SCEV *SZExtD = getZeroExtendExpr(getConstant(D), Ty, Depth);
1767 const SCEV *SResidual =
1768 getAddRecExpr(getConstant(C - D), Step, L, AR->getNoWrapFlags());
1769 const SCEV *SZExtR = getZeroExtendExpr(SResidual, Ty, Depth + 1);
1770 return getAddExpr(SZExtD, SZExtR,
1771 (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW),
1772 Depth + 1);
1773 }
1774 }
1775
1776 if (proveNoWrapByVaryingStart<SCEVZeroExtendExpr>(Start, Step, L)) {
1777 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNUW);
1778 return getAddRecExpr(
1779 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, Depth + 1),
1780 getZeroExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags());
1781 }
1782 }
1783
1784 // zext(A % B) --> zext(A) % zext(B)
1785 {
1786 const SCEV *LHS;
1787 const SCEV *RHS;
1788 if (matchURem(Op, LHS, RHS))
1789 return getURemExpr(getZeroExtendExpr(LHS, Ty, Depth + 1),
1790 getZeroExtendExpr(RHS, Ty, Depth + 1));
1791 }
1792
1793 // zext(A / B) --> zext(A) / zext(B).
1794 if (auto *Div = dyn_cast<SCEVUDivExpr>(Op))
1795 return getUDivExpr(getZeroExtendExpr(Div->getLHS(), Ty, Depth + 1),
1796 getZeroExtendExpr(Div->getRHS(), Ty, Depth + 1));
1797
1798 if (auto *SA = dyn_cast<SCEVAddExpr>(Op)) {
1799 // zext((A + B + ...)<nuw>) --> (zext(A) + zext(B) + ...)<nuw>
1800 if (SA->hasNoUnsignedWrap()) {
1801 // If the addition does not unsign overflow then we can, by definition,
1802 // commute the zero extension with the addition operation.
1803 SmallVector<const SCEV *, 4> Ops;
1804 for (const auto *Op : SA->operands())
1805 Ops.push_back(getZeroExtendExpr(Op, Ty, Depth + 1));
1806 return getAddExpr(Ops, SCEV::FlagNUW, Depth + 1);
1807 }
1808
1809 // zext(C + x + y + ...) --> (zext(D) + zext((C - D) + x + y + ...))
1810 // if D + (C - D + x + y + ...) could be proven to not unsigned wrap
1811 // where D maximizes the number of trailing zeros of (C - D + x + y + ...)
1812 //
1813 // Often address arithmetics contain expressions like
1814 // (zext (add (shl X, C1), C2)), for instance, (zext (5 + (4 * X))).
1815 // This transformation is useful while proving that such expressions are
1816 // equal or differ by a small constant amount, see LoadStoreVectorizer pass.
1817 if (const auto *SC = dyn_cast<SCEVConstant>(SA->getOperand(0))) {
1818 const APInt &D = extractConstantWithoutWrapping(*this, SC, SA);
1819 if (D != 0) {
1820 const SCEV *SZExtD = getZeroExtendExpr(getConstant(D), Ty, Depth);
1821 const SCEV *SResidual =
1822 getAddExpr(getConstant(-D), SA, SCEV::FlagAnyWrap, Depth);
1823 const SCEV *SZExtR = getZeroExtendExpr(SResidual, Ty, Depth + 1);
1824 return getAddExpr(SZExtD, SZExtR,
1825 (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW),
1826 Depth + 1);
1827 }
1828 }
1829 }
1830
1831 if (auto *SM = dyn_cast<SCEVMulExpr>(Op)) {
1832 // zext((A * B * ...)<nuw>) --> (zext(A) * zext(B) * ...)<nuw>
1833 if (SM->hasNoUnsignedWrap()) {
1834 // If the multiply does not unsign overflow then we can, by definition,
1835 // commute the zero extension with the multiply operation.
1836 SmallVector<const SCEV *, 4> Ops;
1837 for (const auto *Op : SM->operands())
1838 Ops.push_back(getZeroExtendExpr(Op, Ty, Depth + 1));
1839 return getMulExpr(Ops, SCEV::FlagNUW, Depth + 1);
1840 }
1841
1842 // zext(2^K * (trunc X to iN)) to iM ->
1843 // 2^K * (zext(trunc X to i{N-K}) to iM)<nuw>
1844 //
1845 // Proof:
1846 //
1847 // zext(2^K * (trunc X to iN)) to iM
1848 // = zext((trunc X to iN) << K) to iM
1849 // = zext((trunc X to i{N-K}) << K)<nuw> to iM
1850 // (because shl removes the top K bits)
1851 // = zext((2^K * (trunc X to i{N-K}))<nuw>) to iM
1852 // = (2^K * (zext(trunc X to i{N-K}) to iM))<nuw>.
1853 //
1854 if (SM->getNumOperands() == 2)
1855 if (auto *MulLHS = dyn_cast<SCEVConstant>(SM->getOperand(0)))
1856 if (MulLHS->getAPInt().isPowerOf2())
1857 if (auto *TruncRHS = dyn_cast<SCEVTruncateExpr>(SM->getOperand(1))) {
1858 int NewTruncBits = getTypeSizeInBits(TruncRHS->getType()) -
1859 MulLHS->getAPInt().logBase2();
1860 Type *NewTruncTy = IntegerType::get(getContext(), NewTruncBits);
1861 return getMulExpr(
1862 getZeroExtendExpr(MulLHS, Ty),
1863 getZeroExtendExpr(
1864 getTruncateExpr(TruncRHS->getOperand(), NewTruncTy), Ty),
1865 SCEV::FlagNUW, Depth + 1);
1866 }
1867 }
1868
1869 // The cast wasn't folded; create an explicit cast node.
1870 // Recompute the insert position, as it may have been invalidated.
1871 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1872 SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator),
1873 Op, Ty);
1874 UniqueSCEVs.InsertNode(S, IP);
1875 addToLoopUseLists(S);
1876 return S;
1877}
1878
1879const SCEV *
1880ScalarEvolution::getSignExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth) {
1881 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&(static_cast<void> (0))
1882 "This is not an extending conversion!")(static_cast<void> (0));
1883 assert(isSCEVable(Ty) &&(static_cast<void> (0))
1884 "This is not a conversion to a SCEVable type!")(static_cast<void> (0));
1885 assert(!Op->getType()->isPointerTy() && "Can't extend pointer!")(static_cast<void> (0));
1886 Ty = getEffectiveSCEVType(Ty);
1887
1888 // Fold if the operand is constant.
1889 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
1890 return getConstant(
1891 cast<ConstantInt>(ConstantExpr::getSExt(SC->getValue(), Ty)));
1892
1893 // sext(sext(x)) --> sext(x)
1894 if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op))
1895 return getSignExtendExpr(SS->getOperand(), Ty, Depth + 1);
1896
1897 // sext(zext(x)) --> zext(x)
1898 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op))
1899 return getZeroExtendExpr(SZ->getOperand(), Ty, Depth + 1);
1900
1901 // Before doing any expensive analysis, check to see if we've already
1902 // computed a SCEV for this Op and Ty.
1903 FoldingSetNodeID ID;
1904 ID.AddInteger(scSignExtend);
1905 ID.AddPointer(Op);
1906 ID.AddPointer(Ty);
1907 void *IP = nullptr;
1908 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1909 // Limit recursion depth.
1910 if (Depth > MaxCastDepth) {
1911 SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator),
1912 Op, Ty);
1913 UniqueSCEVs.InsertNode(S, IP);
1914 addToLoopUseLists(S);
1915 return S;
1916 }
1917
1918 // sext(trunc(x)) --> sext(x) or x or trunc(x)
1919 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) {
1920 // It's possible the bits taken off by the truncate were all sign bits. If
1921 // so, we should be able to simplify this further.
1922 const SCEV *X = ST->getOperand();
1923 ConstantRange CR = getSignedRange(X);
1924 unsigned TruncBits = getTypeSizeInBits(ST->getType());
1925 unsigned NewBits = getTypeSizeInBits(Ty);
1926 if (CR.truncate(TruncBits).signExtend(NewBits).contains(
1927 CR.sextOrTrunc(NewBits)))
1928 return getTruncateOrSignExtend(X, Ty, Depth);
1929 }
1930
1931 if (auto *SA = dyn_cast<SCEVAddExpr>(Op)) {
1932 // sext((A + B + ...)<nsw>) --> (sext(A) + sext(B) + ...)<nsw>
1933 if (SA->hasNoSignedWrap()) {
1934 // If the addition does not sign overflow then we can, by definition,
1935 // commute the sign extension with the addition operation.
1936 SmallVector<const SCEV *, 4> Ops;
1937 for (const auto *Op : SA->operands())
1938 Ops.push_back(getSignExtendExpr(Op, Ty, Depth + 1));
1939 return getAddExpr(Ops, SCEV::FlagNSW, Depth + 1);
1940 }
1941
1942 // sext(C + x + y + ...) --> (sext(D) + sext((C - D) + x + y + ...))
1943 // if D + (C - D + x + y + ...) could be proven to not signed wrap
1944 // where D maximizes the number of trailing zeros of (C - D + x + y + ...)
1945 //
1946 // For instance, this will bring two seemingly different expressions:
1947 // 1 + sext(5 + 20 * %x + 24 * %y) and
1948 // sext(6 + 20 * %x + 24 * %y)
1949 // to the same form:
1950 // 2 + sext(4 + 20 * %x + 24 * %y)
1951 if (const auto *SC = dyn_cast<SCEVConstant>(SA->getOperand(0))) {
1952 const APInt &D = extractConstantWithoutWrapping(*this, SC, SA);
1953 if (D != 0) {
1954 const SCEV *SSExtD = getSignExtendExpr(getConstant(D), Ty, Depth);
1955 const SCEV *SResidual =
1956 getAddExpr(getConstant(-D), SA, SCEV::FlagAnyWrap, Depth);
1957 const SCEV *SSExtR = getSignExtendExpr(SResidual, Ty, Depth + 1);
1958 return getAddExpr(SSExtD, SSExtR,
1959 (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW),
1960 Depth + 1);
1961 }
1962 }
1963 }
1964 // If the input value is a chrec scev, and we can prove that the value
1965 // did not overflow the old, smaller, value, we can sign extend all of the
1966 // operands (often constants). This allows analysis of something like
1967 // this: for (signed char X = 0; X < 100; ++X) { int Y = X; }
1968 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op))
1969 if (AR->isAffine()) {
1970 const SCEV *Start = AR->getStart();
1971 const SCEV *Step = AR->getStepRecurrence(*this);
1972 unsigned BitWidth = getTypeSizeInBits(AR->getType());
1973 const Loop *L = AR->getLoop();
1974
1975 if (!AR->hasNoSignedWrap()) {
1976 auto NewFlags = proveNoWrapViaConstantRanges(AR);
1977 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), NewFlags);
1978 }
1979
1980 // If we have special knowledge that this addrec won't overflow,
1981 // we don't need to do any further analysis.
1982 if (AR->hasNoSignedWrap())
1983 return getAddRecExpr(
1984 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1),
1985 getSignExtendExpr(Step, Ty, Depth + 1), L, SCEV::FlagNSW);
1986
1987 // Check whether the backedge-taken count is SCEVCouldNotCompute.
1988 // Note that this serves two purposes: It filters out loops that are
1989 // simply not analyzable, and it covers the case where this code is
1990 // being called from within backedge-taken count analysis, such that
1991 // attempting to ask for the backedge-taken count would likely result
1992 // in infinite recursion. In the later case, the analysis code will
1993 // cope with a conservative value, and it will take care to purge
1994 // that value once it has finished.
1995 const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(L);
1996 if (!isa<SCEVCouldNotCompute>(MaxBECount)) {
1997 // Manually compute the final value for AR, checking for
1998 // overflow.
1999
2000 // Check whether the backedge-taken count can be losslessly casted to
2001 // the addrec's type. The count is always unsigned.
2002 const SCEV *CastedMaxBECount =
2003 getTruncateOrZeroExtend(MaxBECount, Start->getType(), Depth);
2004 const SCEV *RecastedMaxBECount = getTruncateOrZeroExtend(
2005 CastedMaxBECount, MaxBECount->getType(), Depth);
2006 if (MaxBECount == RecastedMaxBECount) {
2007 Type *WideTy = IntegerType::get(getContext(), BitWidth * 2);
2008 // Check whether Start+Step*MaxBECount has no signed overflow.
2009 const SCEV *SMul = getMulExpr(CastedMaxBECount, Step,
2010 SCEV::FlagAnyWrap, Depth + 1);
2011 const SCEV *SAdd = getSignExtendExpr(getAddExpr(Start, SMul,
2012 SCEV::FlagAnyWrap,
2013 Depth + 1),
2014 WideTy, Depth + 1);
2015 const SCEV *WideStart = getSignExtendExpr(Start, WideTy, Depth + 1);
2016 const SCEV *WideMaxBECount =
2017 getZeroExtendExpr(CastedMaxBECount, WideTy, Depth + 1);
2018 const SCEV *OperandExtendedAdd =
2019 getAddExpr(WideStart,
2020 getMulExpr(WideMaxBECount,
2021 getSignExtendExpr(Step, WideTy, Depth + 1),
2022 SCEV::FlagAnyWrap, Depth + 1),
2023 SCEV::FlagAnyWrap, Depth + 1);
2024 if (SAdd == OperandExtendedAdd) {
2025 // Cache knowledge of AR NSW, which is propagated to this AddRec.
2026 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNSW);
2027 // Return the expression with the addrec on the outside.
2028 return getAddRecExpr(
2029 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this,
2030 Depth + 1),
2031 getSignExtendExpr(Step, Ty, Depth + 1), L,
2032 AR->getNoWrapFlags());
2033 }
2034 // Similar to above, only this time treat the step value as unsigned.
2035 // This covers loops that count up with an unsigned step.
2036 OperandExtendedAdd =
2037 getAddExpr(WideStart,
2038 getMulExpr(WideMaxBECount,
2039 getZeroExtendExpr(Step, WideTy, Depth + 1),
2040 SCEV::FlagAnyWrap, Depth + 1),
2041 SCEV::FlagAnyWrap, Depth + 1);
2042 if (SAdd == OperandExtendedAdd) {
2043 // If AR wraps around then
2044 //
2045 // abs(Step) * MaxBECount > unsigned-max(AR->getType())
2046 // => SAdd != OperandExtendedAdd
2047 //
2048 // Thus (AR is not NW => SAdd != OperandExtendedAdd) <=>
2049 // (SAdd == OperandExtendedAdd => AR is NW)
2050
2051 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNW);
2052
2053 // Return the expression with the addrec on the outside.
2054 return getAddRecExpr(
2055 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this,
2056 Depth + 1),
2057 getZeroExtendExpr(Step, Ty, Depth + 1), L,
2058 AR->getNoWrapFlags());
2059 }
2060 }
2061 }
2062
2063 auto NewFlags = proveNoSignedWrapViaInduction(AR);
2064 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), NewFlags);
2065 if (AR->hasNoSignedWrap()) {
2066 // Same as nsw case above - duplicated here to avoid a compile time
2067 // issue. It's not clear that the order of checks does matter, but
2068 // it's one of two issue possible causes for a change which was
2069 // reverted. Be conservative for the moment.
2070 return getAddRecExpr(
2071 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1),
2072 getSignExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags());
2073 }
2074
2075 // sext({C,+,Step}) --> (sext(D) + sext({C-D,+,Step}))<nuw><nsw>
2076 // if D + (C - D + Step * n) could be proven to not signed wrap
2077 // where D maximizes the number of trailing zeros of (C - D + Step * n)
2078 if (const auto *SC = dyn_cast<SCEVConstant>(Start)) {
2079 const APInt &C = SC->getAPInt();
2080 const APInt &D = extractConstantWithoutWrapping(*this, C, Step);
2081 if (D != 0) {
2082 const SCEV *SSExtD = getSignExtendExpr(getConstant(D), Ty, Depth);
2083 const SCEV *SResidual =
2084 getAddRecExpr(getConstant(C - D), Step, L, AR->getNoWrapFlags());
2085 const SCEV *SSExtR = getSignExtendExpr(SResidual, Ty, Depth + 1);
2086 return getAddExpr(SSExtD, SSExtR,
2087 (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW),
2088 Depth + 1);
2089 }
2090 }
2091
2092 if (proveNoWrapByVaryingStart<SCEVSignExtendExpr>(Start, Step, L)) {
2093 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNSW);
2094 return getAddRecExpr(
2095 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1),
2096 getSignExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags());
2097 }
2098 }
2099
2100 // If the input value is provably positive and we could not simplify
2101 // away the sext build a zext instead.
2102 if (isKnownNonNegative(Op))
2103 return getZeroExtendExpr(Op, Ty, Depth + 1);
2104
2105 // The cast wasn't folded; create an explicit cast node.
2106 // Recompute the insert position, as it may have been invalidated.
2107 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
2108 SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator),
2109 Op, Ty);
2110 UniqueSCEVs.InsertNode(S, IP);
2111 addToLoopUseLists(S);
2112 return S;
2113}
2114
2115/// getAnyExtendExpr - Return a SCEV for the given operand extended with
2116/// unspecified bits out to the given type.
2117const SCEV *ScalarEvolution::getAnyExtendExpr(const SCEV *Op,
2118 Type *Ty) {
2119 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&(static_cast<void> (0))
2120 "This is not an extending conversion!")(static_cast<void> (0));
2121 assert(isSCEVable(Ty) &&(static_cast<void> (0))
2122 "This is not a conversion to a SCEVable type!")(static_cast<void> (0));
2123 Ty = getEffectiveSCEVType(Ty);
2124
2125 // Sign-extend negative constants.
2126 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
2127 if (SC->getAPInt().isNegative())
2128 return getSignExtendExpr(Op, Ty);
2129
2130 // Peel off a truncate cast.
2131 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Op)) {
2132 const SCEV *NewOp = T->getOperand();
2133 if (getTypeSizeInBits(NewOp->getType()) < getTypeSizeInBits(Ty))
2134 return getAnyExtendExpr(NewOp, Ty);
2135 return getTruncateOrNoop(NewOp, Ty);
2136 }
2137
2138 // Next try a zext cast. If the cast is folded, use it.
2139 const SCEV *ZExt = getZeroExtendExpr(Op, Ty);
2140 if (!isa<SCEVZeroExtendExpr>(ZExt))
2141 return ZExt;
2142
2143 // Next try a sext cast. If the cast is folded, use it.
2144 const SCEV *SExt = getSignExtendExpr(Op, Ty);
2145 if (!isa<SCEVSignExtendExpr>(SExt))
2146 return SExt;
2147
2148 // Force the cast to be folded into the operands of an addrec.
2149 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) {
2150 SmallVector<const SCEV *, 4> Ops;
2151 for (const SCEV *Op : AR->operands())
2152 Ops.push_back(getAnyExtendExpr(Op, Ty));
2153 return getAddRecExpr(Ops, AR->getLoop(), SCEV::FlagNW);
2154 }
2155
2156 // If the expression is obviously signed, use the sext cast value.
2157 if (isa<SCEVSMaxExpr>(Op))
2158 return SExt;
2159
2160 // Absent any other information, use the zext cast value.
2161 return ZExt;
2162}
2163
2164/// Process the given Ops list, which is a list of operands to be added under
2165/// the given scale, update the given map. This is a helper function for
2166/// getAddRecExpr. As an example of what it does, given a sequence of operands
2167/// that would form an add expression like this:
2168///
2169/// m + n + 13 + (A * (o + p + (B * (q + m + 29)))) + r + (-1 * r)
2170///
2171/// where A and B are constants, update the map with these values:
2172///
2173/// (m, 1+A*B), (n, 1), (o, A), (p, A), (q, A*B), (r, 0)
2174///
2175/// and add 13 + A*B*29 to AccumulatedConstant.
2176/// This will allow getAddRecExpr to produce this:
2177///
2178/// 13+A*B*29 + n + (m * (1+A*B)) + ((o + p) * A) + (q * A*B)
2179///
2180/// This form often exposes folding opportunities that are hidden in
2181/// the original operand list.
2182///
2183/// Return true iff it appears that any interesting folding opportunities
2184/// may be exposed. This helps getAddRecExpr short-circuit extra work in
2185/// the common case where no interesting opportunities are present, and
2186/// is also used as a check to avoid infinite recursion.
2187static bool
2188CollectAddOperandsWithScales(DenseMap<const SCEV *, APInt> &M,
2189 SmallVectorImpl<const SCEV *> &NewOps,
2190 APInt &AccumulatedConstant,
2191 const SCEV *const *Ops, size_t NumOperands,
2192 const APInt &Scale,
2193 ScalarEvolution &SE) {
2194 bool Interesting = false;
2195
2196 // Iterate over the add operands. They are sorted, with constants first.
2197 unsigned i = 0;
2198 while (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) {
2199 ++i;
2200 // Pull a buried constant out to the outside.
2201 if (Scale != 1 || AccumulatedConstant != 0 || C->getValue()->isZero())
2202 Interesting = true;
2203 AccumulatedConstant += Scale * C->getAPInt();
2204 }
2205
2206 // Next comes everything else. We're especially interested in multiplies
2207 // here, but they're in the middle, so just visit the rest with one loop.
2208 for (; i != NumOperands; ++i) {
2209 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[i]);
2210 if (Mul && isa<SCEVConstant>(Mul->getOperand(0))) {
2211 APInt NewScale =
2212 Scale * cast<SCEVConstant>(Mul->getOperand(0))->getAPInt();
2213 if (Mul->getNumOperands() == 2 && isa<SCEVAddExpr>(Mul->getOperand(1))) {
2214 // A multiplication of a constant with another add; recurse.
2215 const SCEVAddExpr *Add = cast<SCEVAddExpr>(Mul->getOperand(1));
2216 Interesting |=
2217 CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant,
2218 Add->op_begin(), Add->getNumOperands(),
2219 NewScale, SE);
2220 } else {
2221 // A multiplication of a constant with some other value. Update
2222 // the map.
2223 SmallVector<const SCEV *, 4> MulOps(drop_begin(Mul->operands()));
2224 const SCEV *Key = SE.getMulExpr(MulOps);
2225 auto Pair = M.insert({Key, NewScale});
2226 if (Pair.second) {
2227 NewOps.push_back(Pair.first->first);
2228 } else {
2229 Pair.first->second += NewScale;
2230 // The map already had an entry for this value, which may indicate
2231 // a folding opportunity.
2232 Interesting = true;
2233 }
2234 }
2235 } else {
2236 // An ordinary operand. Update the map.
2237 std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair =
2238 M.insert({Ops[i], Scale});
2239 if (Pair.second) {
2240 NewOps.push_back(Pair.first->first);
2241 } else {
2242 Pair.first->second += Scale;
2243 // The map already had an entry for this value, which may indicate
2244 // a folding opportunity.
2245 Interesting = true;
2246 }
2247 }
2248 }
2249
2250 return Interesting;
2251}
2252
2253bool ScalarEvolution::willNotOverflow(Instruction::BinaryOps BinOp, bool Signed,
2254 const SCEV *LHS, const SCEV *RHS) {
2255 const SCEV *(ScalarEvolution::*Operation)(const SCEV *, const SCEV *,
2256 SCEV::NoWrapFlags, unsigned);
2257 switch (BinOp) {
2258 default:
2259 llvm_unreachable("Unsupported binary op")__builtin_unreachable();
2260 case Instruction::Add:
2261 Operation = &ScalarEvolution::getAddExpr;
2262 break;
2263 case Instruction::Sub:
2264 Operation = &ScalarEvolution::getMinusSCEV;
2265 break;
2266 case Instruction::Mul:
2267 Operation = &ScalarEvolution::getMulExpr;
2268 break;
2269 }
2270
2271 const SCEV *(ScalarEvolution::*Extension)(const SCEV *, Type *, unsigned) =
2272 Signed ? &ScalarEvolution::getSignExtendExpr
2273 : &ScalarEvolution::getZeroExtendExpr;
2274
2275 // Check ext(LHS op RHS) == ext(LHS) op ext(RHS)
2276 auto *NarrowTy = cast<IntegerType>(LHS->getType());
2277 auto *WideTy =
2278 IntegerType::get(NarrowTy->getContext(), NarrowTy->getBitWidth() * 2);
2279
2280 const SCEV *A = (this->*Extension)(
2281 (this->*Operation)(LHS, RHS, SCEV::FlagAnyWrap, 0), WideTy, 0);
2282 const SCEV *B = (this->*Operation)((this->*Extension)(LHS, WideTy, 0),
2283 (this->*Extension)(RHS, WideTy, 0),
2284 SCEV::FlagAnyWrap, 0);
2285 return A == B;
2286}
2287
2288std::pair<SCEV::NoWrapFlags, bool /*Deduced*/>
2289ScalarEvolution::getStrengthenedNoWrapFlagsFromBinOp(
2290 const OverflowingBinaryOperator *OBO) {
2291 SCEV::NoWrapFlags Flags = SCEV::NoWrapFlags::FlagAnyWrap;
2292
2293 if (OBO->hasNoUnsignedWrap())
2294 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW);
2295 if (OBO->hasNoSignedWrap())
2296 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW);
2297
2298 bool Deduced = false;
2299
2300 if (OBO->hasNoUnsignedWrap() && OBO->hasNoSignedWrap())
2301 return {Flags, Deduced};
2302
2303 if (OBO->getOpcode() != Instruction::Add &&
2304 OBO->getOpcode() != Instruction::Sub &&
2305 OBO->getOpcode() != Instruction::Mul)
2306 return {Flags, Deduced};
2307
2308 const SCEV *LHS = getSCEV(OBO->getOperand(0));
2309 const SCEV *RHS = getSCEV(OBO->getOperand(1));
2310
2311 if (!OBO->hasNoUnsignedWrap() &&
2312 willNotOverflow((Instruction::BinaryOps)OBO->getOpcode(),
2313 /* Signed */ false, LHS, RHS)) {
2314 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW);
2315 Deduced = true;
2316 }
2317
2318 if (!OBO->hasNoSignedWrap() &&
2319 willNotOverflow((Instruction::BinaryOps)OBO->getOpcode(),
2320 /* Signed */ true, LHS, RHS)) {
2321 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW);
2322 Deduced = true;
2323 }
2324
2325 return {Flags, Deduced};
2326}
2327
2328// We're trying to construct a SCEV of type `Type' with `Ops' as operands and
2329// `OldFlags' as can't-wrap behavior. Infer a more aggressive set of
2330// can't-overflow flags for the operation if possible.
2331static SCEV::NoWrapFlags
2332StrengthenNoWrapFlags(ScalarEvolution *SE, SCEVTypes Type,
2333 const ArrayRef<const SCEV *> Ops,
2334 SCEV::NoWrapFlags Flags) {
2335 using namespace std::placeholders;
2336
2337 using OBO = OverflowingBinaryOperator;
2338
2339 bool CanAnalyze =
2340 Type == scAddExpr || Type == scAddRecExpr || Type == scMulExpr;
2341 (void)CanAnalyze;
2342 assert(CanAnalyze && "don't call from other places!")(static_cast<void> (0));
2343
2344 int SignOrUnsignMask = SCEV::FlagNUW | SCEV::FlagNSW;
2345 SCEV::NoWrapFlags SignOrUnsignWrap =
2346 ScalarEvolution::maskFlags(Flags, SignOrUnsignMask);
2347
2348 // If FlagNSW is true and all the operands are non-negative, infer FlagNUW.
2349 auto IsKnownNonNegative = [&](const SCEV *S) {
2350 return SE->isKnownNonNegative(S);
2351 };
2352
2353 if (SignOrUnsignWrap == SCEV::FlagNSW && all_of(Ops, IsKnownNonNegative))
2354 Flags =
2355 ScalarEvolution::setFlags(Flags, (SCEV::NoWrapFlags)SignOrUnsignMask);
2356
2357 SignOrUnsignWrap = ScalarEvolution::maskFlags(Flags, SignOrUnsignMask);
2358
2359 if (SignOrUnsignWrap != SignOrUnsignMask &&
2360 (Type == scAddExpr || Type == scMulExpr) && Ops.size() == 2 &&
2361 isa<SCEVConstant>(Ops[0])) {
2362
2363 auto Opcode = [&] {
2364 switch (Type) {
2365 case scAddExpr:
2366 return Instruction::Add;
2367 case scMulExpr:
2368 return Instruction::Mul;
2369 default:
2370 llvm_unreachable("Unexpected SCEV op.")__builtin_unreachable();
2371 }
2372 }();
2373
2374 const APInt &C = cast<SCEVConstant>(Ops[0])->getAPInt();
2375
2376 // (A <opcode> C) --> (A <opcode> C)<nsw> if the op doesn't sign overflow.
2377 if (!(SignOrUnsignWrap & SCEV::FlagNSW)) {
2378 auto NSWRegion = ConstantRange::makeGuaranteedNoWrapRegion(
2379 Opcode, C, OBO::NoSignedWrap);
2380 if (NSWRegion.contains(SE->getSignedRange(Ops[1])))
2381 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW);
2382 }
2383
2384 // (A <opcode> C) --> (A <opcode> C)<nuw> if the op doesn't unsign overflow.
2385 if (!(SignOrUnsignWrap & SCEV::FlagNUW)) {
2386 auto NUWRegion = ConstantRange::makeGuaranteedNoWrapRegion(
2387 Opcode, C, OBO::NoUnsignedWrap);
2388 if (NUWRegion.contains(SE->getUnsignedRange(Ops[1])))
2389 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW);
2390 }
2391 }
2392
2393 // <0,+,nonnegative><nw> is also nuw
2394 // TODO: Add corresponding nsw case
2395 if (Type == scAddRecExpr && ScalarEvolution::hasFlags(Flags, SCEV::FlagNW) &&
2396 !ScalarEvolution::hasFlags(Flags, SCEV::FlagNUW) && Ops.size() == 2 &&
2397 Ops[0]->isZero() && IsKnownNonNegative(Ops[1]))
2398 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW);
2399
2400 return Flags;
2401}
2402
2403bool ScalarEvolution::isAvailableAtLoopEntry(const SCEV *S, const Loop *L) {
2404 return isLoopInvariant(S, L) && properlyDominates(S, L->getHeader());
2405}
2406
2407/// Get a canonical add expression, or something simpler if possible.
2408const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
2409 SCEV::NoWrapFlags OrigFlags,
2410 unsigned Depth) {
2411 assert(!(OrigFlags & ~(SCEV::FlagNUW | SCEV::FlagNSW)) &&(static_cast<void> (0))
2412 "only nuw or nsw allowed")(static_cast<void> (0));
2413 assert(!Ops.empty() && "Cannot get empty add!")(static_cast<void> (0));
2414 if (Ops.size() == 1) return Ops[0];
2415#ifndef NDEBUG1
2416 Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
2417 for (unsigned i = 1, e = Ops.size(); i != e; ++i)
2418 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&(static_cast<void> (0))
2419 "SCEVAddExpr operand types don't match!")(static_cast<void> (0));
2420 unsigned NumPtrs = count_if(
2421 Ops, [](const SCEV *Op) { return Op->getType()->isPointerTy(); });
2422 assert(NumPtrs <= 1 && "add has at most one pointer operand")(static_cast<void> (0));
2423#endif
2424
2425 // Sort by complexity, this groups all similar expression types together.
2426 GroupByComplexity(Ops, &LI, DT);
2427
2428 // If there are any constants, fold them together.
2429 unsigned Idx = 0;
2430 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
2431 ++Idx;
2432 assert(Idx < Ops.size())(static_cast<void> (0));
2433 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
2434 // We found two constants, fold them together!
2435 Ops[0] = getConstant(LHSC->getAPInt() + RHSC->getAPInt());
2436 if (Ops.size() == 2) return Ops[0];
2437 Ops.erase(Ops.begin()+1); // Erase the folded element
2438 LHSC = cast<SCEVConstant>(Ops[0]);
2439 }
2440
2441 // If we are left with a constant zero being added, strip it off.
2442 if (LHSC->getValue()->isZero()) {
2443 Ops.erase(Ops.begin());
2444 --Idx;
2445 }
2446
2447 if (Ops.size() == 1) return Ops[0];
2448 }
2449
2450 // Delay expensive flag strengthening until necessary.
2451 auto ComputeFlags = [this, OrigFlags](const ArrayRef<const SCEV *> Ops) {
2452 return StrengthenNoWrapFlags(this, scAddExpr, Ops, OrigFlags);
2453 };
2454
2455 // Limit recursion calls depth.
2456 if (Depth > MaxArithDepth || hasHugeExpression(Ops))
2457 return getOrCreateAddExpr(Ops, ComputeFlags(Ops));
2458
2459 if (SCEV *S = std::get<0>(findExistingSCEVInCache(scAddExpr, Ops))) {
2460 // Don't strengthen flags if we have no new information.
2461 SCEVAddExpr *Add = static_cast<SCEVAddExpr *>(S);
2462 if (Add->getNoWrapFlags(OrigFlags) != OrigFlags)
2463 Add->setNoWrapFlags(ComputeFlags(Ops));
2464 return S;
2465 }
2466
2467 // Okay, check to see if the same value occurs in the operand list more than
2468 // once. If so, merge them together into an multiply expression. Since we
2469 // sorted the list, these values are required to be adjacent.
2470 Type *Ty = Ops[0]->getType();
2471 bool FoundMatch = false;
2472 for (unsigned i = 0, e = Ops.size(); i != e-1; ++i)
2473 if (Ops[i] == Ops[i+1]) { // X + Y + Y --> X + Y*2
2474 // Scan ahead to count how many equal operands there are.
2475 unsigned Count = 2;
2476 while (i+Count != e && Ops[i+Count] == Ops[i])
2477 ++Count;
2478 // Merge the values into a multiply.
2479 const SCEV *Scale = getConstant(Ty, Count);
2480 const SCEV *Mul = getMulExpr(Scale, Ops[i], SCEV::FlagAnyWrap, Depth + 1);
2481 if (Ops.size() == Count)
2482 return Mul;
2483 Ops[i] = Mul;
2484 Ops.erase(Ops.begin()+i+1, Ops.begin()+i+Count);
2485 --i; e -= Count - 1;
2486 FoundMatch = true;
2487 }
2488 if (FoundMatch)
2489 return getAddExpr(Ops, OrigFlags, Depth + 1);
2490
2491 // Check for truncates. If all the operands are truncated from the same
2492 // type, see if factoring out the truncate would permit the result to be
2493 // folded. eg., n*trunc(x) + m*trunc(y) --> trunc(trunc(m)*x + trunc(n)*y)
2494 // if the contents of the resulting outer trunc fold to something simple.
2495 auto FindTruncSrcType = [&]() -> Type * {
2496 // We're ultimately looking to fold an addrec of truncs and muls of only
2497 // constants and truncs, so if we find any other types of SCEV
2498 // as operands of the addrec then we bail and return nullptr here.
2499 // Otherwise, we return the type of the operand of a trunc that we find.
2500 if (auto *T = dyn_cast<SCEVTruncateExpr>(Ops[Idx]))
2501 return T->getOperand()->getType();
2502 if (const auto *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) {
2503 const auto *LastOp = Mul->getOperand(Mul->getNumOperands() - 1);
2504 if (const auto *T = dyn_cast<SCEVTruncateExpr>(LastOp))
2505 return T->getOperand()->getType();
2506 }
2507 return nullptr;
2508 };
2509 if (auto *SrcType = FindTruncSrcType()) {
2510 SmallVector<const SCEV *, 8> LargeOps;
2511 bool Ok = true;
2512 // Check all the operands to see if they can be represented in the
2513 // source type of the truncate.
2514 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
2515 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Ops[i])) {
2516 if (T->getOperand()->getType() != SrcType) {
2517 Ok = false;
2518 break;
2519 }
2520 LargeOps.push_back(T->getOperand());
2521 } else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) {
2522 LargeOps.push_back(getAnyExtendExpr(C, SrcType));
2523 } else if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Ops[i])) {
2524 SmallVector<const SCEV *, 8> LargeMulOps;
2525 for (unsigned j = 0, f = M->getNumOperands(); j != f && Ok; ++j) {
2526 if (const SCEVTruncateExpr *T =
2527 dyn_cast<SCEVTruncateExpr>(M->getOperand(j))) {
2528 if (T->getOperand()->getType() != SrcType) {
2529 Ok = false;
2530 break;
2531 }
2532 LargeMulOps.push_back(T->getOperand());
2533 } else if (const auto *C = dyn_cast<SCEVConstant>(M->getOperand(j))) {
2534 LargeMulOps.push_back(getAnyExtendExpr(C, SrcType));
2535 } else {
2536 Ok = false;
2537 break;
2538 }
2539 }
2540 if (Ok)
2541 LargeOps.push_back(getMulExpr(LargeMulOps, SCEV::FlagAnyWrap, Depth + 1));
2542 } else {
2543 Ok = false;
2544 break;
2545 }
2546 }
2547 if (Ok) {
2548 // Evaluate the expression in the larger type.
2549 const SCEV *Fold = getAddExpr(LargeOps, SCEV::FlagAnyWrap, Depth + 1);
2550 // If it folds to something simple, use it. Otherwise, don't.
2551 if (isa<SCEVConstant>(Fold) || isa<SCEVUnknown>(Fold))
2552 return getTruncateExpr(Fold, Ty);
2553 }
2554 }
2555
2556 if (Ops.size() == 2) {
2557 // Check if we have an expression of the form ((X + C1) - C2), where C1 and
2558 // C2 can be folded in a way that allows retaining wrapping flags of (X +
2559 // C1).
2560 const SCEV *A = Ops[0];
2561 const SCEV *B = Ops[1];
2562 auto *AddExpr = dyn_cast<SCEVAddExpr>(B);
2563 auto *C = dyn_cast<SCEVConstant>(A);
2564 if (AddExpr && C && isa<SCEVConstant>(AddExpr->getOperand(0))) {
2565 auto C1 = cast<SCEVConstant>(AddExpr->getOperand(0))->getAPInt();
2566 auto C2 = C->getAPInt();
2567 SCEV::NoWrapFlags PreservedFlags = SCEV::FlagAnyWrap;
2568
2569 APInt ConstAdd = C1 + C2;
2570 auto AddFlags = AddExpr->getNoWrapFlags();
2571 // Adding a smaller constant is NUW if the original AddExpr was NUW.
2572 if (ScalarEvolution::hasFlags(AddFlags, SCEV::FlagNUW) &&
2573 ConstAdd.ule(C1)) {
2574 PreservedFlags =
2575 ScalarEvolution::setFlags(PreservedFlags, SCEV::FlagNUW);
2576 }
2577
2578 // Adding a constant with the same sign and small magnitude is NSW, if the
2579 // original AddExpr was NSW.
2580 if (ScalarEvolution::hasFlags(AddFlags, SCEV::FlagNSW) &&
2581 C1.isSignBitSet() == ConstAdd.isSignBitSet() &&
2582 ConstAdd.abs().ule(C1.abs())) {
2583 PreservedFlags =
2584 ScalarEvolution::setFlags(PreservedFlags, SCEV::FlagNSW);
2585 }
2586
2587 if (PreservedFlags != SCEV::FlagAnyWrap) {
2588 SmallVector<const SCEV *, 4> NewOps(AddExpr->op_begin(),
2589 AddExpr->op_end());
2590 NewOps[0] = getConstant(ConstAdd);
2591 return getAddExpr(NewOps, PreservedFlags);
2592 }
2593 }
2594 }
2595
2596 // Skip past any other cast SCEVs.
2597 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddExpr)
2598 ++Idx;
2599
2600 // If there are add operands they would be next.
2601 if (Idx < Ops.size()) {
2602 bool DeletedAdd = false;
2603 // If the original flags and all inlined SCEVAddExprs are NUW, use the
2604 // common NUW flag for expression after inlining. Other flags cannot be
2605 // preserved, because they may depend on the original order of operations.
2606 SCEV::NoWrapFlags CommonFlags = maskFlags(OrigFlags, SCEV::FlagNUW);
2607 while (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[Idx])) {
2608 if (Ops.size() > AddOpsInlineThreshold ||
2609 Add->getNumOperands() > AddOpsInlineThreshold)
2610 break;
2611 // If we have an add, expand the add operands onto the end of the operands
2612 // list.
2613 Ops.erase(Ops.begin()+Idx);
2614 Ops.append(Add->op_begin(), Add->op_end());
2615 DeletedAdd = true;
2616 CommonFlags = maskFlags(CommonFlags, Add->getNoWrapFlags());
2617 }
2618
2619 // If we deleted at least one add, we added operands to the end of the list,
2620 // and they are not necessarily sorted. Recurse to resort and resimplify
2621 // any operands we just acquired.
2622 if (DeletedAdd)
2623 return getAddExpr(Ops, CommonFlags, Depth + 1);
2624 }
2625
2626 // Skip over the add expression until we get to a multiply.
2627 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr)
2628 ++Idx;
2629
2630 // Check to see if there are any folding opportunities present with
2631 // operands multiplied by constant values.
2632 if (Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx])) {
2633 uint64_t BitWidth = getTypeSizeInBits(Ty);
2634 DenseMap<const SCEV *, APInt> M;
2635 SmallVector<const SCEV *, 8> NewOps;
2636 APInt AccumulatedConstant(BitWidth, 0);
2637 if (CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant,
2638 Ops.data(), Ops.size(),
2639 APInt(BitWidth, 1), *this)) {
2640 struct APIntCompare {
2641 bool operator()(const APInt &LHS, const APInt &RHS) const {
2642 return LHS.ult(RHS);
2643 }
2644 };
2645
2646 // Some interesting folding opportunity is present, so its worthwhile to
2647 // re-generate the operands list. Group the operands by constant scale,
2648 // to avoid multiplying by the same constant scale multiple times.
2649 std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare> MulOpLists;
2650 for (const SCEV *NewOp : NewOps)
2651 MulOpLists[M.find(NewOp)->second].push_back(NewOp);
2652 // Re-generate the operands list.
2653 Ops.clear();
2654 if (AccumulatedConstant != 0)
2655 Ops.push_back(getConstant(AccumulatedConstant));
2656 for (auto &MulOp : MulOpLists) {
2657 if (MulOp.first == 1) {
2658 Ops.push_back(getAddExpr(MulOp.second, SCEV::FlagAnyWrap, Depth + 1));
2659 } else if (MulOp.first != 0) {
2660 Ops.push_back(getMulExpr(
2661 getConstant(MulOp.first),
2662 getAddExpr(MulOp.second, SCEV::FlagAnyWrap, Depth + 1),
2663 SCEV::FlagAnyWrap, Depth + 1));
2664 }
2665 }
2666 if (Ops.empty())
2667 return getZero(Ty);
2668 if (Ops.size() == 1)
2669 return Ops[0];
2670 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1);
2671 }
2672 }
2673
2674 // If we are adding something to a multiply expression, make sure the
2675 // something is not already an operand of the multiply. If so, merge it into
2676 // the multiply.
2677 for (; Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx]); ++Idx) {
2678 const SCEVMulExpr *Mul = cast<SCEVMulExpr>(Ops[Idx]);
2679 for (unsigned MulOp = 0, e = Mul->getNumOperands(); MulOp != e; ++MulOp) {
2680 const SCEV *MulOpSCEV = Mul->getOperand(MulOp);
2681 if (isa<SCEVConstant>(MulOpSCEV))
2682 continue;
2683 for (unsigned AddOp = 0, e = Ops.size(); AddOp != e; ++AddOp)
2684 if (MulOpSCEV == Ops[AddOp]) {
2685 // Fold W + X + (X * Y * Z) --> W + (X * ((Y*Z)+1))
2686 const SCEV *InnerMul = Mul->getOperand(MulOp == 0);
2687 if (Mul->getNumOperands() != 2) {
2688 // If the multiply has more than two operands, we must get the
2689 // Y*Z term.
2690 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(),
2691 Mul->op_begin()+MulOp);
2692 MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end());
2693 InnerMul = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1);
2694 }
2695 SmallVector<const SCEV *, 2> TwoOps = {getOne(Ty), InnerMul};
2696 const SCEV *AddOne = getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1);
2697 const SCEV *OuterMul = getMulExpr(AddOne, MulOpSCEV,
2698 SCEV::FlagAnyWrap, Depth + 1);
2699 if (Ops.size() == 2) return OuterMul;
2700 if (AddOp < Idx) {
2701 Ops.erase(Ops.begin()+AddOp);
2702 Ops.erase(Ops.begin()+Idx-1);
2703 } else {
2704 Ops.erase(Ops.begin()+Idx);
2705 Ops.erase(Ops.begin()+AddOp-1);
2706 }
2707 Ops.push_back(OuterMul);
2708 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1);
2709 }
2710
2711 // Check this multiply against other multiplies being added together.
2712 for (unsigned OtherMulIdx = Idx+1;
2713 OtherMulIdx < Ops.size() && isa<SCEVMulExpr>(Ops[OtherMulIdx]);
2714 ++OtherMulIdx) {
2715 const SCEVMulExpr *OtherMul = cast<SCEVMulExpr>(Ops[OtherMulIdx]);
2716 // If MulOp occurs in OtherMul, we can fold the two multiplies
2717 // together.
2718 for (unsigned OMulOp = 0, e = OtherMul->getNumOperands();
2719 OMulOp != e; ++OMulOp)
2720 if (OtherMul->getOperand(OMulOp) == MulOpSCEV) {
2721 // Fold X + (A*B*C) + (A*D*E) --> X + (A*(B*C+D*E))
2722 const SCEV *InnerMul1 = Mul->getOperand(MulOp == 0);
2723 if (Mul->getNumOperands() != 2) {
2724 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(),
2725 Mul->op_begin()+MulOp);
2726 MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end());
2727 InnerMul1 = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1);
2728 }
2729 const SCEV *InnerMul2 = OtherMul->getOperand(OMulOp == 0);
2730 if (OtherMul->getNumOperands() != 2) {
2731 SmallVector<const SCEV *, 4> MulOps(OtherMul->op_begin(),
2732 OtherMul->op_begin()+OMulOp);
2733 MulOps.append(OtherMul->op_begin()+OMulOp+1, OtherMul->op_end());
2734 InnerMul2 = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1);
2735 }
2736 SmallVector<const SCEV *, 2> TwoOps = {InnerMul1, InnerMul2};
2737 const SCEV *InnerMulSum =
2738 getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1);
2739 const SCEV *OuterMul = getMulExpr(MulOpSCEV, InnerMulSum,
2740 SCEV::FlagAnyWrap, Depth + 1);
2741 if (Ops.size() == 2) return OuterMul;
2742 Ops.erase(Ops.begin()+Idx);
2743 Ops.erase(Ops.begin()+OtherMulIdx-1);
2744 Ops.push_back(OuterMul);
2745 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1);
2746 }
2747 }
2748 }
2749 }
2750
2751 // If there are any add recurrences in the operands list, see if any other
2752 // added values are loop invariant. If so, we can fold them into the
2753 // recurrence.
2754 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr)
2755 ++Idx;
2756
2757 // Scan over all recurrences, trying to fold loop invariants into them.
2758 for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) {
2759 // Scan all of the other operands to this add and add them to the vector if
2760 // they are loop invariant w.r.t. the recurrence.
2761 SmallVector<const SCEV *, 8> LIOps;
2762 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]);
2763 const Loop *AddRecLoop = AddRec->getLoop();
2764 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
2765 if (isAvailableAtLoopEntry(Ops[i], AddRecLoop)) {
2766 LIOps.push_back(Ops[i]);
2767 Ops.erase(Ops.begin()+i);
2768 --i; --e;
2769 }
2770
2771 // If we found some loop invariants, fold them into the recurrence.
2772 if (!LIOps.empty()) {
2773 // Compute nowrap flags for the addition of the loop-invariant ops and
2774 // the addrec. Temporarily push it as an operand for that purpose.
2775 LIOps.push_back(AddRec);
2776 SCEV::NoWrapFlags Flags = ComputeFlags(LIOps);
2777 LIOps.pop_back();
2778
2779 // NLI + LI + {Start,+,Step} --> NLI + {LI+Start,+,Step}
2780 LIOps.push_back(AddRec->getStart());
2781
2782 SmallVector<const SCEV *, 4> AddRecOps(AddRec->operands());
2783 // This follows from the fact that the no-wrap flags on the outer add
2784 // expression are applicable on the 0th iteration, when the add recurrence
2785 // will be equal to its start value.
2786 AddRecOps[0] = getAddExpr(LIOps, Flags, Depth + 1);
2787
2788 // Build the new addrec. Propagate the NUW and NSW flags if both the
2789 // outer add and the inner addrec are guaranteed to have no overflow.
2790 // Always propagate NW.
2791 Flags = AddRec->getNoWrapFlags(setFlags(Flags, SCEV::FlagNW));
2792 const SCEV *NewRec = getAddRecExpr(AddRecOps, AddRecLoop, Flags);
2793
2794 // If all of the other operands were loop invariant, we are done.
2795 if (Ops.size() == 1) return NewRec;
2796
2797 // Otherwise, add the folded AddRec by the non-invariant parts.
2798 for (unsigned i = 0;; ++i)
2799 if (Ops[i] == AddRec) {
2800 Ops[i] = NewRec;
2801 break;
2802 }
2803 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1);
2804 }
2805
2806 // Okay, if there weren't any loop invariants to be folded, check to see if
2807 // there are multiple AddRec's with the same loop induction variable being
2808 // added together. If so, we can fold them.
2809 for (unsigned OtherIdx = Idx+1;
2810 OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
2811 ++OtherIdx) {
2812 // We expect the AddRecExpr's to be sorted in reverse dominance order,
2813 // so that the 1st found AddRecExpr is dominated by all others.
2814 assert(DT.dominates((static_cast<void> (0))
2815 cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()->getHeader(),(static_cast<void> (0))
2816 AddRec->getLoop()->getHeader()) &&(static_cast<void> (0))
2817 "AddRecExprs are not sorted in reverse dominance order?")(static_cast<void> (0));
2818 if (AddRecLoop == cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()) {
2819 // Other + {A,+,B}<L> + {C,+,D}<L> --> Other + {A+C,+,B+D}<L>
2820 SmallVector<const SCEV *, 4> AddRecOps(AddRec->operands());
2821 for (; OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
2822 ++OtherIdx) {
2823 const auto *OtherAddRec = cast<SCEVAddRecExpr>(Ops[OtherIdx]);
2824 if (OtherAddRec->getLoop() == AddRecLoop) {
2825 for (unsigned i = 0, e = OtherAddRec->getNumOperands();
2826 i != e; ++i) {
2827 if (i >= AddRecOps.size()) {
2828 AddRecOps.append(OtherAddRec->op_begin()+i,
2829 OtherAddRec->op_end());
2830 break;
2831 }
2832 SmallVector<const SCEV *, 2> TwoOps = {
2833 AddRecOps[i], OtherAddRec->getOperand(i)};
2834 AddRecOps[i] = getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1);
2835 }
2836 Ops.erase(Ops.begin() + OtherIdx); --OtherIdx;
2837 }
2838 }
2839 // Step size has changed, so we cannot guarantee no self-wraparound.
2840 Ops[Idx] = getAddRecExpr(AddRecOps, AddRecLoop, SCEV::FlagAnyWrap);
2841 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1);
2842 }
2843 }
2844
2845 // Otherwise couldn't fold anything into this recurrence. Move onto the
2846 // next one.
2847 }
2848
2849 // Okay, it looks like we really DO need an add expr. Check to see if we
2850 // already have one, otherwise create a new one.
2851 return getOrCreateAddExpr(Ops, ComputeFlags(Ops));
2852}
2853
2854const SCEV *
2855ScalarEvolution::getOrCreateAddExpr(ArrayRef<const SCEV *> Ops,
2856 SCEV::NoWrapFlags Flags) {
2857 FoldingSetNodeID ID;
2858 ID.AddInteger(scAddExpr);
2859 for (const SCEV *Op : Ops)
2860 ID.AddPointer(Op);
2861 void *IP = nullptr;
2862 SCEVAddExpr *S =
2863 static_cast<SCEVAddExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
2864 if (!S) {
2865 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
2866 std::uninitialized_copy(Ops.begin(), Ops.end(), O);
2867 S = new (SCEVAllocator)
2868 SCEVAddExpr(ID.Intern(SCEVAllocator), O, Ops.size());
2869 UniqueSCEVs.InsertNode(S, IP);
2870 addToLoopUseLists(S);
2871 }
2872 S->setNoWrapFlags(Flags);
2873 return S;
2874}
2875
2876const SCEV *
2877ScalarEvolution::getOrCreateAddRecExpr(ArrayRef<const SCEV *> Ops,
2878 const Loop *L, SCEV::NoWrapFlags Flags) {
2879 FoldingSetNodeID ID;
2880 ID.AddInteger(scAddRecExpr);
2881 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
2882 ID.AddPointer(Ops[i]);
2883 ID.AddPointer(L);
2884 void *IP = nullptr;
2885 SCEVAddRecExpr *S =
2886 static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
2887 if (!S) {
2888 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
2889 std::uninitialized_copy(Ops.begin(), Ops.end(), O);
2890 S = new (SCEVAllocator)
2891 SCEVAddRecExpr(ID.Intern(SCEVAllocator), O, Ops.size(), L);
2892 UniqueSCEVs.InsertNode(S, IP);
2893 addToLoopUseLists(S);
2894 }
2895 setNoWrapFlags(S, Flags);
2896 return S;
2897}
2898
2899const SCEV *
2900ScalarEvolution::getOrCreateMulExpr(ArrayRef<const SCEV *> Ops,
2901 SCEV::NoWrapFlags Flags) {
2902 FoldingSetNodeID ID;
2903 ID.AddInteger(scMulExpr);
2904 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
2905 ID.AddPointer(Ops[i]);
2906 void *IP = nullptr;
2907 SCEVMulExpr *S =
2908 static_cast<SCEVMulExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
2909 if (!S) {
2910 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
2911 std::uninitialized_copy(Ops.begin(), Ops.end(), O);
2912 S = new (SCEVAllocator) SCEVMulExpr(ID.Intern(SCEVAllocator),
2913 O, Ops.size());
2914 UniqueSCEVs.InsertNode(S, IP);
2915 addToLoopUseLists(S);
2916 }
2917 S->setNoWrapFlags(Flags);
2918 return S;
2919}
2920
2921static uint64_t umul_ov(uint64_t i, uint64_t j, bool &Overflow) {
2922 uint64_t k = i*j;
2923 if (j > 1 && k / j != i) Overflow = true;
2924 return k;
2925}
2926
2927/// Compute the result of "n choose k", the binomial coefficient. If an
2928/// intermediate computation overflows, Overflow will be set and the return will
2929/// be garbage. Overflow is not cleared on absence of overflow.
2930static uint64_t Choose(uint64_t n, uint64_t k, bool &Overflow) {
2931 // We use the multiplicative formula:
2932 // n(n-1)(n-2)...(n-(k-1)) / k(k-1)(k-2)...1 .
2933 // At each iteration, we take the n-th term of the numeral and divide by the
2934 // (k-n)th term of the denominator. This division will always produce an
2935 // integral result, and helps reduce the chance of overflow in the
2936 // intermediate computations. However, we can still overflow even when the
2937 // final result would fit.
2938
2939 if (n == 0 || n == k) return 1;
2940 if (k > n) return 0;
2941
2942 if (k > n/2)
2943 k = n-k;
2944
2945 uint64_t r = 1;
2946 for (uint64_t i = 1; i <= k; ++i) {
2947 r = umul_ov(r, n-(i-1), Overflow);
2948 r /= i;
2949 }
2950 return r;
2951}
2952
2953/// Determine if any of the operands in this SCEV are a constant or if
2954/// any of the add or multiply expressions in this SCEV contain a constant.
2955static bool containsConstantInAddMulChain(const SCEV *StartExpr) {
2956 struct FindConstantInAddMulChain {
2957 bool FoundConstant = false;
2958
2959 bool follow(const SCEV *S) {
2960 FoundConstant |= isa<SCEVConstant>(S);
2961 return isa<SCEVAddExpr>(S) || isa<SCEVMulExpr>(S);
2962 }
2963
2964 bool isDone() const {
2965 return FoundConstant;
2966 }
2967 };
2968
2969 FindConstantInAddMulChain F;
2970 SCEVTraversal<FindConstantInAddMulChain> ST(F);
2971 ST.visitAll(StartExpr);
2972 return F.FoundConstant;
2973}
2974
2975/// Get a canonical multiply expression, or something simpler if possible.
2976const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
2977 SCEV::NoWrapFlags OrigFlags,
2978 unsigned Depth) {
2979 assert(OrigFlags == maskFlags(OrigFlags, SCEV::FlagNUW | SCEV::FlagNSW) &&(static_cast<void> (0))
2980 "only nuw or nsw allowed")(static_cast<void> (0));
2981 assert(!Ops.empty() && "Cannot get empty mul!")(static_cast<void> (0));
2982 if (Ops.size() == 1) return Ops[0];
2983#ifndef NDEBUG1
2984 Type *ETy = Ops[0]->getType();
2985 assert(!ETy->isPointerTy())(static_cast<void> (0));
2986 for (unsigned i = 1, e = Ops.size(); i != e; ++i)
2987 assert(Ops[i]->getType() == ETy &&(static_cast<void> (0))
2988 "SCEVMulExpr operand types don't match!")(static_cast<void> (0));
2989#endif
2990
2991 // Sort by complexity, this groups all similar expression types together.
2992 GroupByComplexity(Ops, &LI, DT);
2993
2994 // If there are any constants, fold them together.
2995 unsigned Idx = 0;
2996 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
2997 ++Idx;
2998 assert(Idx < Ops.size())(static_cast<void> (0));
2999 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
3000 // We found two constants, fold them together!
3001 Ops[0] = getConstant(LHSC->getAPInt() * RHSC->getAPInt());
3002 if (Ops.size() == 2) return Ops[0];
3003 Ops.erase(Ops.begin()+1); // Erase the folded element
3004 LHSC = cast<SCEVConstant>(Ops[0]);
3005 }
3006
3007 // If we have a multiply of zero, it will always be zero.
3008 if (LHSC->getValue()->isZero())
3009 return LHSC;
3010
3011 // If we are left with a constant one being multiplied, strip it off.
3012 if (LHSC->getValue()->isOne()) {
3013 Ops.erase(Ops.begin());
3014 --Idx;
3015 }
3016
3017 if (Ops.size() == 1)
3018 return Ops[0];
3019 }
3020
3021 // Delay expensive flag strengthening until necessary.
3022 auto ComputeFlags = [this, OrigFlags](const ArrayRef<const SCEV *> Ops) {
3023 return StrengthenNoWrapFlags(this, scMulExpr, Ops, OrigFlags);
3024 };
3025
3026 // Limit recursion calls depth.
3027 if (Depth > MaxArithDepth || hasHugeExpression(Ops))
3028 return getOrCreateMulExpr(Ops, ComputeFlags(Ops));
3029
3030 if (SCEV *S = std::get<0>(findExistingSCEVInCache(scMulExpr, Ops))) {
3031 // Don't strengthen flags if we have no new information.
3032 SCEVMulExpr *Mul = static_cast<SCEVMulExpr *>(S);
3033 if (Mul->getNoWrapFlags(OrigFlags) != OrigFlags)
3034 Mul->setNoWrapFlags(ComputeFlags(Ops));
3035 return S;
3036 }
3037
3038 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
3039 if (Ops.size() == 2) {
3040 // C1*(C2+V) -> C1*C2 + C1*V
3041 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1]))
3042 // If any of Add's ops are Adds or Muls with a constant, apply this
3043 // transformation as well.
3044 //
3045 // TODO: There are some cases where this transformation is not
3046 // profitable; for example, Add = (C0 + X) * Y + Z. Maybe the scope of
3047 // this transformation should be narrowed down.
3048 if (Add->getNumOperands() == 2 && containsConstantInAddMulChain(Add))
3049 return getAddExpr(getMulExpr(LHSC, Add->getOperand(0),
3050 SCEV::FlagAnyWrap, Depth + 1),
3051 getMulExpr(LHSC, Add->getOperand(1),
3052 SCEV::FlagAnyWrap, Depth + 1),
3053 SCEV::FlagAnyWrap, Depth + 1);
3054
3055 if (Ops[0]->isAllOnesValue()) {
3056 // If we have a mul by -1 of an add, try distributing the -1 among the
3057 // add operands.
3058 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) {
3059 SmallVector<const SCEV *, 4> NewOps;
3060 bool AnyFolded = false;
3061 for (const SCEV *AddOp : Add->operands()) {
3062 const SCEV *Mul = getMulExpr(Ops[0], AddOp, SCEV::FlagAnyWrap,
3063 Depth + 1);
3064 if (!isa<SCEVMulExpr>(Mul)) AnyFolded = true;
3065 NewOps.push_back(Mul);
3066 }
3067 if (AnyFolded)
3068 return getAddExpr(NewOps, SCEV::FlagAnyWrap, Depth + 1);
3069 } else if (const auto *AddRec = dyn_cast<SCEVAddRecExpr>(Ops[1])) {
3070 // Negation preserves a recurrence's no self-wrap property.
3071 SmallVector<const SCEV *, 4> Operands;
3072 for (const SCEV *AddRecOp : AddRec->operands())
3073 Operands.push_back(getMulExpr(Ops[0], AddRecOp, SCEV::FlagAnyWrap,
3074 Depth + 1));
3075
3076 return getAddRecExpr(Operands, AddRec->getLoop(),
3077 AddRec->getNoWrapFlags(SCEV::FlagNW));
3078 }
3079 }
3080 }
3081 }
3082
3083 // Skip over the add expression until we get to a multiply.
3084 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr)
3085 ++Idx;
3086
3087 // If there are mul operands inline them all into this expression.
3088 if (Idx < Ops.size()) {
3089 bool DeletedMul = false;
3090 while (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) {
3091 if (Ops.size() > MulOpsInlineThreshold)
3092 break;
3093 // If we have an mul, expand the mul operands onto the end of the
3094 // operands list.
3095 Ops.erase(Ops.begin()+Idx);
3096 Ops.append(Mul->op_begin(), Mul->op_end());
3097 DeletedMul = true;
3098 }
3099
3100 // If we deleted at least one mul, we added operands to the end of the
3101 // list, and they are not necessarily sorted. Recurse to resort and
3102 // resimplify any operands we just acquired.
3103 if (DeletedMul)
3104 return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1);
3105 }
3106
3107 // If there are any add recurrences in the operands list, see if any other
3108 // added values are loop invariant. If so, we can fold them into the
3109 // recurrence.
3110 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr)
3111 ++Idx;
3112
3113 // Scan over all recurrences, trying to fold loop invariants into them.
3114 for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) {
3115 // Scan all of the other operands to this mul and add them to the vector
3116 // if they are loop invariant w.r.t. the recurrence.
3117 SmallVector<const SCEV *, 8> LIOps;
3118 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]);
3119 const Loop *AddRecLoop = AddRec->getLoop();
3120 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
3121 if (isAvailableAtLoopEntry(Ops[i], AddRecLoop)) {
3122 LIOps.push_back(Ops[i]);
3123 Ops.erase(Ops.begin()+i);
3124 --i; --e;
3125 }
3126
3127 // If we found some loop invariants, fold them into the recurrence.
3128 if (!LIOps.empty()) {
3129 // NLI * LI * {Start,+,Step} --> NLI * {LI*Start,+,LI*Step}
3130 SmallVector<const SCEV *, 4> NewOps;
3131 NewOps.reserve(AddRec->getNumOperands());
3132 const SCEV *Scale = getMulExpr(LIOps, SCEV::FlagAnyWrap, Depth + 1);
3133 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i)
3134 NewOps.push_back(getMulExpr(Scale, AddRec->getOperand(i),
3135 SCEV::FlagAnyWrap, Depth + 1));
3136
3137 // Build the new addrec. Propagate the NUW and NSW flags if both the
3138 // outer mul and the inner addrec are guaranteed to have no overflow.
3139 //
3140 // No self-wrap cannot be guaranteed after changing the step size, but
3141 // will be inferred if either NUW or NSW is true.
3142 SCEV::NoWrapFlags Flags = ComputeFlags({Scale, AddRec});
3143 const SCEV *NewRec = getAddRecExpr(
3144 NewOps, AddRecLoop, AddRec->getNoWrapFlags(Flags));
3145
3146 // If all of the other operands were loop invariant, we are done.
3147 if (Ops.size() == 1) return NewRec;
3148
3149 // Otherwise, multiply the folded AddRec by the non-invariant parts.
3150 for (unsigned i = 0;; ++i)
3151 if (Ops[i] == AddRec) {
3152 Ops[i] = NewRec;
3153 break;
3154 }
3155 return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1);
3156 }
3157
3158 // Okay, if there weren't any loop invariants to be folded, check to see
3159 // if there are multiple AddRec's with the same loop induction variable
3160 // being multiplied together. If so, we can fold them.
3161
3162 // {A1,+,A2,+,...,+,An}<L> * {B1,+,B2,+,...,+,Bn}<L>
3163 // = {x=1 in [ sum y=x..2x [ sum z=max(y-x, y-n)..min(x,n) [
3164 // choose(x, 2x)*choose(2x-y, x-z)*A_{y-z}*B_z
3165 // ]]],+,...up to x=2n}.
3166 // Note that the arguments to choose() are always integers with values
3167 // known at compile time, never SCEV objects.
3168 //
3169 // The implementation avoids pointless extra computations when the two
3170 // addrec's are of different length (mathematically, it's equivalent to
3171 // an infinite stream of zeros on the right).
3172 bool OpsModified = false;
3173 for (unsigned OtherIdx = Idx+1;
3174 OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
3175 ++OtherIdx) {
3176 const SCEVAddRecExpr *OtherAddRec =
3177 dyn_cast<SCEVAddRecExpr>(Ops[OtherIdx]);
3178 if (!OtherAddRec || OtherAddRec->getLoop() != AddRecLoop)
3179 continue;
3180
3181 // Limit max number of arguments to avoid creation of unreasonably big
3182 // SCEVAddRecs with very complex operands.
3183 if (AddRec->getNumOperands() + OtherAddRec->getNumOperands() - 1 >
3184 MaxAddRecSize || hasHugeExpression({AddRec, OtherAddRec}))
3185 continue;
3186
3187 bool Overflow = false;
3188 Type *Ty = AddRec->getType();
3189 bool LargerThan64Bits = getTypeSizeInBits(Ty) > 64;
3190 SmallVector<const SCEV*, 7> AddRecOps;
3191 for (int x = 0, xe = AddRec->getNumOperands() +
3192 OtherAddRec->getNumOperands() - 1; x != xe && !Overflow; ++x) {
3193 SmallVector <const SCEV *, 7> SumOps;
3194 for (int y = x, ye = 2*x+1; y != ye && !Overflow; ++y) {
3195 uint64_t Coeff1 = Choose(x, 2*x - y, Overflow);
3196 for (int z = std::max(y-x, y-(int)AddRec->getNumOperands()+1),
3197 ze = std::min(x+1, (int)OtherAddRec->getNumOperands());
3198 z < ze && !Overflow; ++z) {
3199 uint64_t Coeff2 = Choose(2*x - y, x-z, Overflow);
3200 uint64_t Coeff;
3201 if (LargerThan64Bits)
3202 Coeff = umul_ov(Coeff1, Coeff2, Overflow);
3203 else
3204 Coeff = Coeff1*Coeff2;
3205 const SCEV *CoeffTerm = getConstant(Ty, Coeff);
3206 const SCEV *Term1 = AddRec->getOperand(y-z);
3207 const SCEV *Term2 = OtherAddRec->getOperand(z);
3208 SumOps.push_back(getMulExpr(CoeffTerm, Term1, Term2,
3209 SCEV::FlagAnyWrap, Depth + 1));
3210 }
3211 }
3212 if (SumOps.empty())
3213 SumOps.push_back(getZero(Ty));
3214 AddRecOps.push_back(getAddExpr(SumOps, SCEV::FlagAnyWrap, Depth + 1));
3215 }
3216 if (!Overflow) {
3217 const SCEV *NewAddRec = getAddRecExpr(AddRecOps, AddRecLoop,
3218 SCEV::FlagAnyWrap);
3219 if (Ops.size() == 2) return NewAddRec;
3220 Ops[Idx] = NewAddRec;
3221 Ops.erase(Ops.begin() + OtherIdx); --OtherIdx;
3222 OpsModified = true;
3223 AddRec = dyn_cast<SCEVAddRecExpr>(NewAddRec);
3224 if (!AddRec)
3225 break;
3226 }
3227 }
3228 if (OpsModified)
3229 return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1);
3230
3231 // Otherwise couldn't fold anything into this recurrence. Move onto the
3232 // next one.
3233 }
3234
3235 // Okay, it looks like we really DO need an mul expr. Check to see if we
3236 // already have one, otherwise create a new one.
3237 return getOrCreateMulExpr(Ops, ComputeFlags(Ops));
3238}
3239
3240/// Represents an unsigned remainder expression based on unsigned division.
3241const SCEV *ScalarEvolution::getURemExpr(const SCEV *LHS,
3242 const SCEV *RHS) {
3243 assert(getEffectiveSCEVType(LHS->getType()) ==(static_cast<void> (0))
3244 getEffectiveSCEVType(RHS->getType()) &&(static_cast<void> (0))
3245 "SCEVURemExpr operand types don't match!")(static_cast<void> (0));
3246
3247 // Short-circuit easy cases
3248 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) {
3249 // If constant is one, the result is trivial
3250 if (RHSC->getValue()->isOne())
3251 return getZero(LHS->getType()); // X urem 1 --> 0
3252
3253 // If constant is a power of two, fold into a zext(trunc(LHS)).
3254 if (RHSC->getAPInt().isPowerOf2()) {
3255 Type *FullTy = LHS->getType();
3256 Type *TruncTy =
3257 IntegerType::get(getContext(), RHSC->getAPInt().logBase2());
3258 return getZeroExtendExpr(getTruncateExpr(LHS, TruncTy), FullTy);
3259 }
3260 }
3261
3262 // Fallback to %a == %x urem %y == %x -<nuw> ((%x udiv %y) *<nuw> %y)
3263 const SCEV *UDiv = getUDivExpr(LHS, RHS);
3264 const SCEV *Mult = getMulExpr(UDiv, RHS, SCEV::FlagNUW);
3265 return getMinusSCEV(LHS, Mult, SCEV::FlagNUW);
3266}
3267
3268/// Get a canonical unsigned division expression, or something simpler if
3269/// possible.
3270const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS,
3271 const SCEV *RHS) {
3272 assert(!LHS->getType()->isPointerTy() &&(static_cast<void> (0))
3273 "SCEVUDivExpr operand can't be pointer!")(static_cast<void> (0));
3274 assert(LHS->getType() == RHS->getType() &&(static_cast<void> (0))
3275 "SCEVUDivExpr operand types don't match!")(static_cast<void> (0));
3276
3277 FoldingSetNodeID ID;
3278 ID.AddInteger(scUDivExpr);
3279 ID.AddPointer(LHS);
3280 ID.AddPointer(RHS);
3281 void *IP = nullptr;
3282 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP))
3283 return S;
3284
3285 // 0 udiv Y == 0
3286 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS))
3287 if (LHSC->getValue()->isZero())
3288 return LHS;
3289
3290 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) {
3291 if (RHSC->getValue()->isOne())
3292 return LHS; // X udiv 1 --> x
3293 // If the denominator is zero, the result of the udiv is undefined. Don't
3294 // try to analyze it, because the resolution chosen here may differ from
3295 // the resolution chosen in other parts of the compiler.
3296 if (!RHSC->getValue()->isZero()) {
3297 // Determine if the division can be folded into the operands of
3298 // its operands.
3299 // TODO: Generalize this to non-constants by using known-bits information.
3300 Type *Ty = LHS->getType();
3301 unsigned LZ = RHSC->getAPInt().countLeadingZeros();
3302 unsigned MaxShiftAmt = getTypeSizeInBits(Ty) - LZ - 1;
3303 // For non-power-of-two values, effectively round the value up to the
3304 // nearest power of two.
3305 if (!RHSC->getAPInt().isPowerOf2())
3306 ++MaxShiftAmt;
3307 IntegerType *ExtTy =
3308 IntegerType::get(getContext(), getTypeSizeInBits(Ty) + MaxShiftAmt);
3309 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS))
3310 if (const SCEVConstant *Step =
3311 dyn_cast<SCEVConstant>(AR->getStepRecurrence(*this))) {
3312 // {X,+,N}/C --> {X/C,+,N/C} if safe and N/C can be folded.
3313 const APInt &StepInt = Step->getAPInt();
3314 const APInt &DivInt = RHSC->getAPInt();
3315 if (!StepInt.urem(DivInt) &&
3316 getZeroExtendExpr(AR, ExtTy) ==
3317 getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy),
3318 getZeroExtendExpr(Step, ExtTy),
3319 AR->getLoop(), SCEV::FlagAnyWrap)) {
3320 SmallVector<const SCEV *, 4> Operands;
3321 for (const SCEV *Op : AR->operands())
3322 Operands.push_back(getUDivExpr(Op, RHS));
3323 return getAddRecExpr(Operands, AR->getLoop(), SCEV::FlagNW);
3324 }
3325 /// Get a canonical UDivExpr for a recurrence.
3326 /// {X,+,N}/C => {Y,+,N}/C where Y=X-(X%N). Safe when C%N=0.
3327 // We can currently only fold X%N if X is constant.
3328 const SCEVConstant *StartC = dyn_cast<SCEVConstant>(AR->getStart());
3329 if (StartC && !DivInt.urem(StepInt) &&
3330 getZeroExtendExpr(AR, ExtTy) ==
3331 getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy),
3332 getZeroExtendExpr(Step, ExtTy),
3333 AR->getLoop(), SCEV::FlagAnyWrap)) {
3334 const APInt &StartInt = StartC->getAPInt();
3335 const APInt &StartRem = StartInt.urem(StepInt);
3336 if (StartRem != 0) {
3337 const SCEV *NewLHS =
3338 getAddRecExpr(getConstant(StartInt - StartRem), Step,
3339 AR->getLoop(), SCEV::FlagNW);
3340 if (LHS != NewLHS) {
3341 LHS = NewLHS;
3342
3343 // Reset the ID to include the new LHS, and check if it is
3344 // already cached.
3345 ID.clear();
3346 ID.AddInteger(scUDivExpr);
3347 ID.AddPointer(LHS);
3348 ID.AddPointer(RHS);
3349 IP = nullptr;
3350 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP))
3351 return S;
3352 }
3353 }
3354 }
3355 }
3356 // (A*B)/C --> A*(B/C) if safe and B/C can be folded.
3357 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(LHS)) {
3358 SmallVector<const SCEV *, 4> Operands;
3359 for (const SCEV *Op : M->operands())
3360 Operands.push_back(getZeroExtendExpr(Op, ExtTy));
3361 if (getZeroExtendExpr(M, ExtTy) == getMulExpr(Operands))
3362 // Find an operand that's safely divisible.
3363 for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) {
3364 const SCEV *Op = M->getOperand(i);
3365 const SCEV *Div = getUDivExpr(Op, RHSC);
3366 if (!isa<SCEVUDivExpr>(Div) && getMulExpr(Div, RHSC) == Op) {
3367 Operands = SmallVector<const SCEV *, 4>(M->operands());
3368 Operands[i] = Div;
3369 return getMulExpr(Operands);
3370 }
3371 }
3372 }
3373
3374 // (A/B)/C --> A/(B*C) if safe and B*C can be folded.
3375 if (const SCEVUDivExpr *OtherDiv = dyn_cast<SCEVUDivExpr>(LHS)) {
3376 if (auto *DivisorConstant =
3377 dyn_cast<SCEVConstant>(OtherDiv->getRHS())) {
3378 bool Overflow = false;
3379 APInt NewRHS =
3380 DivisorConstant->getAPInt().umul_ov(RHSC->getAPInt(), Overflow);
3381 if (Overflow) {
3382 return getConstant(RHSC->getType(), 0, false);
3383 }
3384 return getUDivExpr(OtherDiv->getLHS(), getConstant(NewRHS));
3385 }
3386 }
3387
3388 // (A+B)/C --> (A/C + B/C) if safe and A/C and B/C can be folded.
3389 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(LHS)) {
3390 SmallVector<const SCEV *, 4> Operands;
3391 for (const SCEV *Op : A->operands())
3392 Operands.push_back(getZeroExtendExpr(Op, ExtTy));
3393 if (getZeroExtendExpr(A, ExtTy) == getAddExpr(Operands)) {
3394 Operands.clear();
3395 for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) {
3396 const SCEV *Op = getUDivExpr(A->getOperand(i), RHS);
3397 if (isa<SCEVUDivExpr>(Op) ||
3398 getMulExpr(Op, RHS) != A->getOperand(i))
3399 break;
3400 Operands.push_back(Op);
3401 }
3402 if (Operands.size() == A->getNumOperands())
3403 return getAddExpr(Operands);
3404 }
3405 }
3406
3407 // Fold if both operands are constant.
3408 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) {
3409 Constant *LHSCV = LHSC->getValue();
3410 Constant *RHSCV = RHSC->getValue();
3411 return getConstant(cast<ConstantInt>(ConstantExpr::getUDiv(LHSCV,
3412 RHSCV)));
3413 }
3414 }
3415 }
3416
3417 // The Insertion Point (IP) might be invalid by now (due to UniqueSCEVs
3418 // changes). Make sure we get a new one.
3419 IP = nullptr;
3420 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
3421 SCEV *S = new (SCEVAllocator) SCEVUDivExpr(ID.Intern(SCEVAllocator),
3422 LHS, RHS);
3423 UniqueSCEVs.InsertNode(S, IP);
3424 addToLoopUseLists(S);
3425 return S;
3426}
3427
3428static const APInt gcd(const SCEVConstant *C1, const SCEVConstant *C2) {
3429 APInt A = C1->getAPInt().abs();
3430 APInt B = C2->getAPInt().abs();
3431 uint32_t ABW = A.getBitWidth();
3432 uint32_t BBW = B.getBitWidth();
3433
3434 if (ABW > BBW)
3435 B = B.zext(ABW);
3436 else if (ABW < BBW)
3437 A = A.zext(BBW);
3438
3439 return APIntOps::GreatestCommonDivisor(std::move(A), std::move(B));
3440}
3441
3442/// Get a canonical unsigned division expression, or something simpler if
3443/// possible. There is no representation for an exact udiv in SCEV IR, but we
3444/// can attempt to remove factors from the LHS and RHS. We can't do this when
3445/// it's not exact because the udiv may be clearing bits.
3446const SCEV *ScalarEvolution::getUDivExactExpr(const SCEV *LHS,
3447 const SCEV *RHS) {
3448 // TODO: we could try to find factors in all sorts of things, but for now we
3449 // just deal with u/exact (multiply, constant). See SCEVDivision towards the
3450 // end of this file for inspiration.
3451
3452 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(LHS);
3453 if (!Mul || !Mul->hasNoUnsignedWrap())
3454 return getUDivExpr(LHS, RHS);
3455
3456 if (const SCEVConstant *RHSCst = dyn_cast<SCEVConstant>(RHS)) {
3457 // If the mulexpr multiplies by a constant, then that constant must be the
3458 // first element of the mulexpr.
3459 if (const auto *LHSCst = dyn_cast<SCEVConstant>(Mul->getOperand(0))) {
3460 if (LHSCst == RHSCst) {
3461 SmallVector<const SCEV *, 2> Operands(drop_begin(Mul->operands()));
3462 return getMulExpr(Operands);
3463 }
3464
3465 // We can't just assume that LHSCst divides RHSCst cleanly, it could be
3466 // that there's a factor provided by one of the other terms. We need to
3467 // check.
3468 APInt Factor = gcd(LHSCst, RHSCst);
3469 if (!Factor.isIntN(1)) {
3470 LHSCst =
3471 cast<SCEVConstant>(getConstant(LHSCst->getAPInt().udiv(Factor)));
3472 RHSCst =
3473 cast<SCEVConstant>(getConstant(RHSCst->getAPInt().udiv(Factor)));
3474 SmallVector<const SCEV *, 2> Operands;
3475 Operands.push_back(LHSCst);
3476 Operands.append(Mul->op_begin() + 1, Mul->op_end());
3477 LHS = getMulExpr(Operands);
3478 RHS = RHSCst;
3479 Mul = dyn_cast<SCEVMulExpr>(LHS);
3480 if (!Mul)
3481 return getUDivExactExpr(LHS, RHS);
3482 }
3483 }
3484 }
3485
3486 for (int i = 0, e = Mul->getNumOperands(); i != e; ++i) {
3487 if (Mul->getOperand(i) == RHS) {
3488 SmallVector<const SCEV *, 2> Operands;
3489 Operands.append(Mul->op_begin(), Mul->op_begin() + i);
3490 Operands.append(Mul->op_begin() + i + 1, Mul->op_end());
3491 return getMulExpr(Operands);
3492 }
3493 }
3494
3495 return getUDivExpr(LHS, RHS);
3496}
3497
3498/// Get an add recurrence expression for the specified loop. Simplify the
3499/// expression as much as possible.
3500const SCEV *ScalarEvolution::getAddRecExpr(const SCEV *Start, const SCEV *Step,
3501 const Loop *L,
3502 SCEV::NoWrapFlags Flags) {
3503 SmallVector<const SCEV *, 4> Operands;
3504 Operands.push_back(Start);
3505 if (const SCEVAddRecExpr *StepChrec = dyn_cast<SCEVAddRecExpr>(Step))
3506 if (StepChrec->getLoop() == L) {
3507 Operands.append(StepChrec->op_begin(), StepChrec->op_end());
3508 return getAddRecExpr(Operands, L, maskFlags(Flags, SCEV::FlagNW));
3509 }
3510
3511 Operands.push_back(Step);
3512 return getAddRecExpr(Operands, L, Flags);
3513}
3514
3515/// Get an add recurrence expression for the specified loop. Simplify the
3516/// expression as much as possible.
3517const SCEV *
3518ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands,
3519 const Loop *L, SCEV::NoWrapFlags Flags) {
3520 if (Operands.size() == 1) return Operands[0];
3521#ifndef NDEBUG1
3522 Type *ETy = getEffectiveSCEVType(Operands[0]->getType());
3523 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
3524 assert(getEffectiveSCEVType(Operands[i]->getType()) == ETy &&(static_cast<void> (0))
3525 "SCEVAddRecExpr operand types don't match!")(static_cast<void> (0));
3526 assert(!Operands[i]->getType()->isPointerTy() && "Step must be integer")(static_cast<void> (0));
3527 }
3528 for (unsigned i = 0, e = Operands.size(); i != e; ++i)
3529 assert(isLoopInvariant(Operands[i], L) &&(static_cast<void> (0))
3530 "SCEVAddRecExpr operand is not loop-invariant!")(static_cast<void> (0));
3531#endif
3532
3533 if (Operands.back()->isZero()) {
3534 Operands.pop_back();
3535 return getAddRecExpr(Operands, L, SCEV::FlagAnyWrap); // {X,+,0} --> X
3536 }
3537
3538 // It's tempting to want to call getConstantMaxBackedgeTakenCount count here and
3539 // use that information to infer NUW and NSW flags. However, computing a
3540 // BE count requires calling getAddRecExpr, so we may not yet have a
3541 // meaningful BE count at this point (and if we don't, we'd be stuck
3542 // with a SCEVCouldNotCompute as the cached BE count).
3543
3544 Flags = StrengthenNoWrapFlags(this, scAddRecExpr, Operands, Flags);
3545
3546 // Canonicalize nested AddRecs in by nesting them in order of loop depth.
3547 if (const SCEVAddRecExpr *NestedAR = dyn_cast<SCEVAddRecExpr>(Operands[0])) {
3548 const Loop *NestedLoop = NestedAR->getLoop();
3549 if (L->contains(NestedLoop)
3550 ? (L->getLoopDepth() < NestedLoop->getLoopDepth())
3551 : (!NestedLoop->contains(L) &&
3552 DT.dominates(L->getHeader(), NestedLoop->getHeader()))) {
3553 SmallVector<const SCEV *, 4> NestedOperands(NestedAR->operands());
3554 Operands[0] = NestedAR->getStart();
3555 // AddRecs require their operands be loop-invariant with respect to their
3556 // loops. Don't perform this transformation if it would break this
3557 // requirement.
3558 bool AllInvariant = all_of(
3559 Operands, [&](const SCEV *Op) { return isLoopInvariant(Op, L); });
3560
3561 if (AllInvariant) {
3562 // Create a recurrence for the outer loop with the same step size.
3563 //
3564 // The outer recurrence keeps its NW flag but only keeps NUW/NSW if the
3565 // inner recurrence has the same property.
3566 SCEV::NoWrapFlags OuterFlags =
3567 maskFlags(Flags, SCEV::FlagNW | NestedAR->getNoWrapFlags());
3568
3569 NestedOperands[0] = getAddRecExpr(Operands, L, OuterFlags);
3570 AllInvariant = all_of(NestedOperands, [&](const SCEV *Op) {
3571 return isLoopInvariant(Op, NestedLoop);
3572 });
3573
3574 if (AllInvariant) {
3575 // Ok, both add recurrences are valid after the transformation.
3576 //
3577 // The inner recurrence keeps its NW flag but only keeps NUW/NSW if
3578 // the outer recurrence has the same property.
3579 SCEV::NoWrapFlags InnerFlags =
3580 maskFlags(NestedAR->getNoWrapFlags(), SCEV::FlagNW | Flags);
3581 return getAddRecExpr(NestedOperands, NestedLoop, InnerFlags);
3582 }
3583 }
3584 // Reset Operands to its original state.
3585 Operands[0] = NestedAR;
3586 }
3587 }
3588
3589 // Okay, it looks like we really DO need an addrec expr. Check to see if we
3590 // already have one, otherwise create a new one.
3591 return getOrCreateAddRecExpr(Operands, L, Flags);
3592}
3593
3594const SCEV *
3595ScalarEvolution::getGEPExpr(GEPOperator *GEP,
3596 const SmallVectorImpl<const SCEV *> &IndexExprs) {
3597 const SCEV *BaseExpr = getSCEV(GEP->getPointerOperand());
3598 // getSCEV(Base)->getType() has the same address space as Base->getType()
3599 // because SCEV::getType() preserves the address space.
3600 Type *IntIdxTy = getEffectiveSCEVType(BaseExpr->getType());
3601 // FIXME(PR23527): Don't blindly transfer the inbounds flag from the GEP
3602 // instruction to its SCEV, because the Instruction may be guarded by control
3603 // flow and the no-overflow bits may not be valid for the expression in any
3604 // context. This can be fixed similarly to how these flags are handled for
3605 // adds.
3606 SCEV::NoWrapFlags OffsetWrap =
3607 GEP->isInBounds() ? SCEV::FlagNSW : SCEV::FlagAnyWrap;
3608
3609 Type *CurTy = GEP->getType();
3610 bool FirstIter = true;
3611 SmallVector<const SCEV *, 4> Offsets;
3612 for (const SCEV *IndexExpr : IndexExprs) {
3613 // Compute the (potentially symbolic) offset in bytes for this index.
3614 if (StructType *STy = dyn_cast<StructType>(CurTy)) {
3615 // For a struct, add the member offset.
3616 ConstantInt *Index = cast<SCEVConstant>(IndexExpr)->getValue();
3617 unsigned FieldNo = Index->getZExtValue();
3618 const SCEV *FieldOffset = getOffsetOfExpr(IntIdxTy, STy, FieldNo);
3619 Offsets.push_back(FieldOffset);
3620
3621 // Update CurTy to the type of the field at Index.
3622 CurTy = STy->getTypeAtIndex(Index);
3623 } else {
3624 // Update CurTy to its element type.
3625 if (FirstIter) {
3626 assert(isa<PointerType>(CurTy) &&(static_cast<void> (0))
3627 "The first index of a GEP indexes a pointer")(static_cast<void> (0));
3628 CurTy = GEP->getSourceElementType();
3629 FirstIter = false;
3630 } else {
3631 CurTy = GetElementPtrInst::getTypeAtIndex(CurTy, (uint64_t)0);
3632 }
3633 // For an array, add the element offset, explicitly scaled.
3634 const SCEV *ElementSize = getSizeOfExpr(IntIdxTy, CurTy);
3635 // Getelementptr indices are signed.
3636 IndexExpr = getTruncateOrSignExtend(IndexExpr, IntIdxTy);
3637
3638 // Multiply the index by the element size to compute the element offset.
3639 const SCEV *LocalOffset = getMulExpr(IndexExpr, ElementSize, OffsetWrap);
3640 Offsets.push_back(LocalOffset);
3641 }
3642 }
3643
3644 // Handle degenerate case of GEP without offsets.
3645 if (Offsets.empty())
3646 return BaseExpr;
3647
3648 // Add the offsets together, assuming nsw if inbounds.
3649 const SCEV *Offset = getAddExpr(Offsets, OffsetWrap);
3650 // Add the base address and the offset. We cannot use the nsw flag, as the
3651 // base address is unsigned. However, if we know that the offset is
3652 // non-negative, we can use nuw.
3653 SCEV::NoWrapFlags BaseWrap = GEP->isInBounds() && isKnownNonNegative(Offset)
3654 ? SCEV::FlagNUW : SCEV::FlagAnyWrap;
3655 auto *GEPExpr = getAddExpr(BaseExpr, Offset, BaseWrap);
3656 assert(BaseExpr->getType() == GEPExpr->getType() &&(static_cast<void> (0))
3657 "GEP should not change type mid-flight.")(static_cast<void> (0));
3658 return GEPExpr;
3659}
3660
3661std::tuple<SCEV *, FoldingSetNodeID, void *>
3662ScalarEvolution::findExistingSCEVInCache(SCEVTypes SCEVType,
3663 ArrayRef<const SCEV *> Ops) {
3664 FoldingSetNodeID ID;
3665 void *IP = nullptr;
3666 ID.AddInteger(SCEVType);
3667 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
3668 ID.AddPointer(Ops[i]);
3669 return std::tuple<SCEV *, FoldingSetNodeID, void *>(
3670 UniqueSCEVs.FindNodeOrInsertPos(ID, IP), std::move(ID), IP);
3671}
3672
3673const SCEV *ScalarEvolution::getAbsExpr(const SCEV *Op, bool IsNSW) {
3674 SCEV::NoWrapFlags Flags = IsNSW ? SCEV::FlagNSW : SCEV::FlagAnyWrap;
3675 return getSMaxExpr(Op, getNegativeSCEV(Op, Flags));
3676}
3677
3678const SCEV *ScalarEvolution::getMinMaxExpr(SCEVTypes Kind,
3679 SmallVectorImpl<const SCEV *> &Ops) {
3680 assert(!Ops.empty() && "Cannot get empty (u|s)(min|max)!")(static_cast<void> (0));
3681 if (Ops.size() == 1) return Ops[0];
3682#ifndef NDEBUG1
3683 Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
3684 for (unsigned i = 1, e = Ops.size(); i != e; ++i) {
3685 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&(static_cast<void> (0))
3686 "Operand types don't match!")(static_cast<void> (0));
3687 assert(Ops[0]->getType()->isPointerTy() ==(static_cast<void> (0))
3688 Ops[i]->getType()->isPointerTy() &&(static_cast<void> (0))
3689 "min/max should be consistently pointerish")(static_cast<void> (0));
3690 }
3691#endif
3692
3693 bool IsSigned = Kind == scSMaxExpr || Kind == scSMinExpr;
3694 bool IsMax = Kind == scSMaxExpr || Kind == scUMaxExpr;
3695
3696 // Sort by complexity, this groups all similar expression types together.
3697 GroupByComplexity(Ops, &LI, DT);
3698
3699 // Check if we have created the same expression before.
3700 if (const SCEV *S = std::get<0>(findExistingSCEVInCache(Kind, Ops))) {
3701 return S;
3702 }
3703
3704 // If there are any constants, fold them together.
3705 unsigned Idx = 0;
3706 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
3707 ++Idx;
3708 assert(Idx < Ops.size())(static_cast<void> (0));
3709 auto FoldOp = [&](const APInt &LHS, const APInt &RHS) {
3710 if (Kind == scSMaxExpr)
3711 return APIntOps::smax(LHS, RHS);
3712 else if (Kind == scSMinExpr)
3713 return APIntOps::smin(LHS, RHS);
3714 else if (Kind == scUMaxExpr)
3715 return APIntOps::umax(LHS, RHS);
3716 else if (Kind == scUMinExpr)
3717 return APIntOps::umin(LHS, RHS);
3718 llvm_unreachable("Unknown SCEV min/max opcode")__builtin_unreachable();
3719 };
3720
3721 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
3722 // We found two constants, fold them together!
3723 ConstantInt *Fold = ConstantInt::get(
3724 getContext(), FoldOp(LHSC->getAPInt(), RHSC->getAPInt()));
3725 Ops[0] = getConstant(Fold);
3726 Ops.erase(Ops.begin()+1); // Erase the folded element
3727 if (Ops.size() == 1) return Ops[0];
3728 LHSC = cast<SCEVConstant>(Ops[0]);
3729 }
3730
3731 bool IsMinV = LHSC->getValue()->isMinValue(IsSigned);
3732 bool IsMaxV = LHSC->getValue()->isMaxValue(IsSigned);
3733
3734 if (IsMax ? IsMinV : IsMaxV) {
3735 // If we are left with a constant minimum(/maximum)-int, strip it off.
3736 Ops.erase(Ops.begin());
3737 --Idx;
3738 } else if (IsMax ? IsMaxV : IsMinV) {
3739 // If we have a max(/min) with a constant maximum(/minimum)-int,
3740 // it will always be the extremum.
3741 return LHSC;
3742 }
3743
3744 if (Ops.size() == 1) return Ops[0];
3745 }
3746
3747 // Find the first operation of the same kind
3748 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < Kind)
3749 ++Idx;
3750
3751 // Check to see if one of the operands is of the same kind. If so, expand its
3752 // operands onto our operand list, and recurse to simplify.
3753 if (Idx < Ops.size()) {
3754 bool DeletedAny = false;
3755 while (Ops[Idx]->getSCEVType() == Kind) {
3756 const SCEVMinMaxExpr *SMME = cast<SCEVMinMaxExpr>(Ops[Idx]);
3757 Ops.erase(Ops.begin()+Idx);
3758 Ops.append(SMME->op_begin(), SMME->op_end());
3759 DeletedAny = true;
3760 }
3761
3762 if (DeletedAny)
3763 return getMinMaxExpr(Kind, Ops);
3764 }
3765
3766 // Okay, check to see if the same value occurs in the operand list twice. If
3767 // so, delete one. Since we sorted the list, these values are required to
3768 // be adjacent.
3769 llvm::CmpInst::Predicate GEPred =
3770 IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE;
3771 llvm::CmpInst::Predicate LEPred =
3772 IsSigned ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE;
3773 llvm::CmpInst::Predicate FirstPred = IsMax ? GEPred : LEPred;
3774 llvm::CmpInst::Predicate SecondPred = IsMax ? LEPred : GEPred;
3775 for (unsigned i = 0, e = Ops.size() - 1; i != e; ++i) {
3776 if (Ops[i] == Ops[i + 1] ||
3777 isKnownViaNonRecursiveReasoning(FirstPred, Ops[i], Ops[i + 1])) {
3778 // X op Y op Y --> X op Y
3779 // X op Y --> X, if we know X, Y are ordered appropriately
3780 Ops.erase(Ops.begin() + i + 1, Ops.begin() + i + 2);
3781 --i;
3782 --e;
3783 } else if (isKnownViaNonRecursiveReasoning(SecondPred, Ops[i],
3784 Ops[i + 1])) {
3785 // X op Y --> Y, if we know X, Y are ordered appropriately
3786 Ops.erase(Ops.begin() + i, Ops.begin() + i + 1);
3787 --i;
3788 --e;
3789 }
3790 }
3791
3792 if (Ops.size() == 1) return Ops[0];
3793
3794 assert(!Ops.empty() && "Reduced smax down to nothing!")(static_cast<void> (0));
3795
3796 // Okay, it looks like we really DO need an expr. Check to see if we
3797 // already have one, otherwise create a new one.
3798 const SCEV *ExistingSCEV;
3799 FoldingSetNodeID ID;
3800 void *IP;
3801 std::tie(ExistingSCEV, ID, IP) = findExistingSCEVInCache(Kind, Ops);
3802 if (ExistingSCEV)
3803 return ExistingSCEV;
3804 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
3805 std::uninitialized_copy(Ops.begin(), Ops.end(), O);
3806 SCEV *S = new (SCEVAllocator)
3807 SCEVMinMaxExpr(ID.Intern(SCEVAllocator), Kind, O, Ops.size());
3808
3809 UniqueSCEVs.InsertNode(S, IP);
3810 addToLoopUseLists(S);
3811 return S;
3812}
3813
3814const SCEV *ScalarEvolution::getSMaxExpr(const SCEV *LHS, const SCEV *RHS) {
3815 SmallVector<const SCEV *, 2> Ops = {LHS, RHS};
3816 return getSMaxExpr(Ops);
3817}
3818
3819const SCEV *ScalarEvolution::getSMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
3820 return getMinMaxExpr(scSMaxExpr, Ops);
3821}
3822
3823const SCEV *ScalarEvolution::getUMaxExpr(const SCEV *LHS, const SCEV *RHS) {
3824 SmallVector<const SCEV *, 2> Ops = {LHS, RHS};
3825 return getUMaxExpr(Ops);
3826}
3827
3828const SCEV *ScalarEvolution::getUMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
3829 return getMinMaxExpr(scUMaxExpr, Ops);
3830}
3831
3832const SCEV *ScalarEvolution::getSMinExpr(const SCEV *LHS,
3833 const SCEV *RHS) {
3834 SmallVector<const SCEV *, 2> Ops = { LHS, RHS };
3835 return getSMinExpr(Ops);
3836}
3837
3838const SCEV *ScalarEvolution::getSMinExpr(SmallVectorImpl<const SCEV *> &Ops) {
3839 return getMinMaxExpr(scSMinExpr, Ops);
3840}
3841
3842const SCEV *ScalarEvolution::getUMinExpr(const SCEV *LHS,
3843 const SCEV *RHS) {
3844 SmallVector<const SCEV *, 2> Ops = { LHS, RHS };
3845 return getUMinExpr(Ops);
3846}
3847
3848const SCEV *ScalarEvolution::getUMinExpr(SmallVectorImpl<const SCEV *> &Ops) {
3849 return getMinMaxExpr(scUMinExpr, Ops);
3850}
3851
3852const SCEV *
3853ScalarEvolution::getSizeOfScalableVectorExpr(Type *IntTy,
3854 ScalableVectorType *ScalableTy) {
3855 Constant *NullPtr = Constant::getNullValue(ScalableTy->getPointerTo());
3856 Constant *One = ConstantInt::get(IntTy, 1);
3857 Constant *GEP = ConstantExpr::getGetElementPtr(ScalableTy, NullPtr, One);
3858 // Note that the expression we created is the final expression, we don't
3859 // want to simplify it any further Also, if we call a normal getSCEV(),
3860 // we'll end up in an endless recursion. So just create an SCEVUnknown.
3861 return getUnknown(ConstantExpr::getPtrToInt(GEP, IntTy));
3862}
3863
3864const SCEV *ScalarEvolution::getSizeOfExpr(Type *IntTy, Type *AllocTy) {
3865 if (auto *ScalableAllocTy = dyn_cast<ScalableVectorType>(AllocTy))
3866 return getSizeOfScalableVectorExpr(IntTy, ScalableAllocTy);
3867 // We can bypass creating a target-independent constant expression and then
3868 // folding it back into a ConstantInt. This is just a compile-time
3869 // optimization.
3870 return getConstant(IntTy, getDataLayout().getTypeAllocSize(AllocTy));
3871}
3872
3873const SCEV *ScalarEvolution::getStoreSizeOfExpr(Type *IntTy, Type *StoreTy) {
3874 if (auto *ScalableStoreTy = dyn_cast<ScalableVectorType>(StoreTy))
3875 return getSizeOfScalableVectorExpr(IntTy, ScalableStoreTy);
3876 // We can bypass creating a target-independent constant expression and then
3877 // folding it back into a ConstantInt. This is just a compile-time
3878 // optimization.
3879 return getConstant(IntTy, getDataLayout().getTypeStoreSize(StoreTy));
3880}
3881
3882const SCEV *ScalarEvolution::getOffsetOfExpr(Type *IntTy,
3883 StructType *STy,
3884 unsigned FieldNo) {
3885 // We can bypass creating a target-independent constant expression and then
3886 // folding it back into a ConstantInt. This is just a compile-time
3887 // optimization.
3888 return getConstant(
3889 IntTy, getDataLayout().getStructLayout(STy)->getElementOffset(FieldNo));
3890}
3891
3892const SCEV *ScalarEvolution::getUnknown(Value *V) {
3893 // Don't attempt to do anything other than create a SCEVUnknown object
3894 // here. createSCEV only calls getUnknown after checking for all other
3895 // interesting possibilities, and any other code that calls getUnknown
3896 // is doing so in order to hide a value from SCEV canonicalization.
3897
3898 FoldingSetNodeID ID;
3899 ID.AddInteger(scUnknown);
3900 ID.AddPointer(V);
3901 void *IP = nullptr;
3902 if (SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) {
3903 assert(cast<SCEVUnknown>(S)->getValue() == V &&(static_cast<void> (0))
3904 "Stale SCEVUnknown in uniquing map!")(static_cast<void> (0));
3905 return S;
3906 }
3907 SCEV *S = new (SCEVAllocator) SCEVUnknown(ID.Intern(SCEVAllocator), V, this,
3908 FirstUnknown);
3909 FirstUnknown = cast<SCEVUnknown>(S);
3910 UniqueSCEVs.InsertNode(S, IP);
3911 return S;
3912}
3913
3914//===----------------------------------------------------------------------===//
3915// Basic SCEV Analysis and PHI Idiom Recognition Code
3916//
3917
3918/// Test if values of the given type are analyzable within the SCEV
3919/// framework. This primarily includes integer types, and it can optionally
3920/// include pointer types if the ScalarEvolution class has access to
3921/// target-specific information.
3922bool ScalarEvolution::isSCEVable(Type *Ty) const {
3923 // Integers and pointers are always SCEVable.
3924 return Ty->isIntOrPtrTy();
3925}
3926
3927/// Return the size in bits of the specified type, for which isSCEVable must
3928/// return true.
3929uint64_t ScalarEvolution::getTypeSizeInBits(Type *Ty) const {
3930 assert(isSCEVable(Ty) && "Type is not SCEVable!")(static_cast<void> (0));
3931 if (Ty->isPointerTy())
3932 return getDataLayout().getIndexTypeSizeInBits(Ty);
3933 return getDataLayout().getTypeSizeInBits(Ty);
3934}
3935
3936/// Return a type with the same bitwidth as the given type and which represents
3937/// how SCEV will treat the given type, for which isSCEVable must return
3938/// true. For pointer types, this is the pointer index sized integer type.
3939Type *ScalarEvolution::getEffectiveSCEVType(Type *Ty) const {
3940 assert(isSCEVable(Ty) && "Type is not SCEVable!")(static_cast<void> (0));
3941
3942 if (Ty->isIntegerTy())
3943 return Ty;
3944
3945 // The only other support type is pointer.
3946 assert(Ty->isPointerTy() && "Unexpected non-pointer non-integer type!")(static_cast<void> (0));
3947 return getDataLayout().getIndexType(Ty);
3948}
3949
3950Type *ScalarEvolution::getWiderType(Type *T1, Type *T2) const {
3951 return getTypeSizeInBits(T1) >= getTypeSizeInBits(T2) ? T1 : T2;
3952}
3953
3954const SCEV *ScalarEvolution::getCouldNotCompute() {
3955 return CouldNotCompute.get();
3956}
3957
3958bool ScalarEvolution::checkValidity(const SCEV *S) const {
3959 bool ContainsNulls = SCEVExprContains(S, [](const SCEV *S) {
3960 auto *SU = dyn_cast<SCEVUnknown>(S);
3961 return SU && SU->getValue() == nullptr;
3962 });
3963
3964 return !ContainsNulls;
3965}
3966
3967bool ScalarEvolution::containsAddRecurrence(const SCEV *S) {
3968 HasRecMapType::iterator I = HasRecMap.find(S);
3969 if (I != HasRecMap.end())
3970 return I->second;
3971
3972 bool FoundAddRec =
3973 SCEVExprContains(S, [](const SCEV *S) { return isa<SCEVAddRecExpr>(S); });
3974 HasRecMap.insert({S, FoundAddRec});
3975 return FoundAddRec;
3976}
3977
3978/// Try to split a SCEVAddExpr into a pair of {SCEV, ConstantInt}.
3979/// If \p S is a SCEVAddExpr and is composed of a sub SCEV S' and an
3980/// offset I, then return {S', I}, else return {\p S, nullptr}.
3981static std::pair<const SCEV *, ConstantInt *> splitAddExpr(const SCEV *S) {
3982 const auto *Add = dyn_cast<SCEVAddExpr>(S);
3983 if (!Add)
3984 return {S, nullptr};
3985
3986 if (Add->getNumOperands() != 2)
3987 return {S, nullptr};
3988
3989 auto *ConstOp = dyn_cast<SCEVConstant>(Add->getOperand(0));
3990 if (!ConstOp)
3991 return {S, nullptr};
3992
3993 return {Add->getOperand(1), ConstOp->getValue()};
3994}
3995
3996/// Return the ValueOffsetPair set for \p S. \p S can be represented
3997/// by the value and offset from any ValueOffsetPair in the set.
3998ScalarEvolution::ValueOffsetPairSetVector *
3999ScalarEvolution::getSCEVValues(const SCEV *S) {
4000 ExprValueMapType::iterator SI = ExprValueMap.find_as(S);
4001 if (SI == ExprValueMap.end())
4002 return nullptr;
4003#ifndef NDEBUG1
4004 if (VerifySCEVMap) {
4005 // Check there is no dangling Value in the set returned.
4006 for (const auto &VE : SI->second)
4007 assert(ValueExprMap.count(VE.first))(static_cast<void> (0));
4008 }
4009#endif
4010 return &SI->second;
4011}
4012
4013/// Erase Value from ValueExprMap and ExprValueMap. ValueExprMap.erase(V)
4014/// cannot be used separately. eraseValueFromMap should be used to remove
4015/// V from ValueExprMap and ExprValueMap at the same time.
4016void ScalarEvolution::eraseValueFromMap(Value *V) {
4017 ValueExprMapType::iterator I = ValueExprMap.find_as(V);
4018 if (I != ValueExprMap.end()) {
4019 const SCEV *S = I->second;
4020 // Remove {V, 0} from the set of ExprValueMap[S]
4021 if (auto *SV = getSCEVValues(S))
4022 SV->remove({V, nullptr});
4023
4024 // Remove {V, Offset} from the set of ExprValueMap[Stripped]
4025 const SCEV *Stripped;
4026 ConstantInt *Offset;
4027 std::tie(Stripped, Offset) = splitAddExpr(S);
4028 if (Offset != nullptr) {
4029 if (auto *SV = getSCEVValues(Stripped))
4030 SV->remove({V, Offset});
4031 }
4032 ValueExprMap.erase(V);
4033 }
4034}
4035
4036/// Check whether value has nuw/nsw/exact set but SCEV does not.
4037/// TODO: In reality it is better to check the poison recursively
4038/// but this is better than nothing.
4039static bool SCEVLostPoisonFlags(const SCEV *S, const Value *V) {
4040 if (auto *I = dyn_cast<Instruction>(V)) {
4041 if (isa<OverflowingBinaryOperator>(I)) {
4042 if (auto *NS = dyn_cast<SCEVNAryExpr>(S)) {
4043 if (I->hasNoSignedWrap() && !NS->hasNoSignedWrap())
4044 return true;
4045 if (I->hasNoUnsignedWrap() && !NS->hasNoUnsignedWrap())
4046 return true;
4047 }
4048 } else if (isa<PossiblyExactOperator>(I) && I->isExact())
4049 return true;
4050 }
4051 return false;
4052}
4053
4054/// Return an existing SCEV if it exists, otherwise analyze the expression and
4055/// create a new one.
4056const SCEV *ScalarEvolution::getSCEV(Value *V) {
4057 assert(isSCEVable(V->getType()) && "Value is not SCEVable!")(static_cast<void> (0));
4058
4059 const SCEV *S = getExistingSCEV(V);
4060 if (S == nullptr) {
4061 S = createSCEV(V);
4062 // During PHI resolution, it is possible to create two SCEVs for the same
4063 // V, so it is needed to double check whether V->S is inserted into
4064 // ValueExprMap before insert S->{V, 0} into ExprValueMap.
4065 std::pair<ValueExprMapType::iterator, bool> Pair =
4066 ValueExprMap.insert({SCEVCallbackVH(V, this), S});
4067 if (Pair.second && !SCEVLostPoisonFlags(S, V)) {
4068 ExprValueMap[S].insert({V, nullptr});
4069
4070 // If S == Stripped + Offset, add Stripped -> {V, Offset} into
4071 // ExprValueMap.
4072 const SCEV *Stripped = S;
4073 ConstantInt *Offset = nullptr;
4074 std::tie(Stripped, Offset) = splitAddExpr(S);
4075 // If stripped is SCEVUnknown, don't bother to save
4076 // Stripped -> {V, offset}. It doesn't simplify and sometimes even
4077 // increase the complexity of the expansion code.
4078 // If V is GetElementPtrInst, don't save Stripped -> {V, offset}
4079 // because it may generate add/sub instead of GEP in SCEV expansion.
4080 if (Offset != nullptr && !isa<SCEVUnknown>(Stripped) &&
4081 !isa<GetElementPtrInst>(V))
4082 ExprValueMap[Stripped].insert({V, Offset});
4083 }
4084 }
4085 return S;
4086}
4087
4088const SCEV *ScalarEvolution::getExistingSCEV(Value *V) {
4089 assert(isSCEVable(V->getType()) && "Value is not SCEVable!")(static_cast<void> (0));
4090
4091 ValueExprMapType::iterator I = ValueExprMap.find_as(V);
4092 if (I != ValueExprMap.end()) {
4093 const SCEV *S = I->second;
4094 if (checkValidity(S))
4095 return S;
4096 eraseValueFromMap(V);
4097 forgetMemoizedResults(S);
4098 }
4099 return nullptr;
4100}
4101
4102/// Return a SCEV corresponding to -V = -1*V
4103const SCEV *ScalarEvolution::getNegativeSCEV(const SCEV *V,
4104 SCEV::NoWrapFlags Flags) {
4105 if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V))
4106 return getConstant(
4107 cast<ConstantInt>(ConstantExpr::getNeg(VC->getValue())));
4108
4109 Type *Ty = V->getType();
4110 Ty = getEffectiveSCEVType(Ty);
4111 return getMulExpr(V, getMinusOne(Ty), Flags);
4112}
4113
4114/// If Expr computes ~A, return A else return nullptr
4115static const SCEV *MatchNotExpr(const SCEV *Expr) {
4116 const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Expr);
4117 if (!Add || Add->getNumOperands() != 2 ||
4118 !Add->getOperand(0)->isAllOnesValue())
4119 return nullptr;
4120
4121 const SCEVMulExpr *AddRHS = dyn_cast<SCEVMulExpr>(Add->getOperand(1));
4122 if (!AddRHS || AddRHS->getNumOperands() != 2 ||
4123 !AddRHS->getOperand(0)->isAllOnesValue())
4124 return nullptr;
4125
4126 return AddRHS->getOperand(1);
4127}
4128
4129/// Return a SCEV corresponding to ~V = -1-V
4130const SCEV *ScalarEvolution::getNotSCEV(const SCEV *V) {
4131 if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V))
4132 return getConstant(
4133 cast<ConstantInt>(ConstantExpr::getNot(VC->getValue())));
4134
4135 // Fold ~(u|s)(min|max)(~x, ~y) to (u|s)(max|min)(x, y)
4136 if (const SCEVMinMaxExpr *MME = dyn_cast<SCEVMinMaxExpr>(V)) {
4137 auto MatchMinMaxNegation = [&](const SCEVMinMaxExpr *MME) {
4138 SmallVector<const SCEV *, 2> MatchedOperands;
4139 for (const SCEV *Operand : MME->operands()) {
4140 const SCEV *Matched = MatchNotExpr(Operand);
4141 if (!Matched)
4142 return (const SCEV *)nullptr;
4143 MatchedOperands.push_back(Matched);
4144 }
4145 return getMinMaxExpr(SCEVMinMaxExpr::negate(MME->getSCEVType()),
4146 MatchedOperands);
4147 };
4148 if (const SCEV *Replaced = MatchMinMaxNegation(MME))
4149 return Replaced;
4150 }
4151
4152 Type *Ty = V->getType();
4153 Ty = getEffectiveSCEVType(Ty);
4154 return getMinusSCEV(getMinusOne(Ty), V);
4155}
4156
4157const SCEV *ScalarEvolution::removePointerBase(const SCEV *P) {
4158 assert(P->getType()->isPointerTy())(static_cast<void> (0));
4159
4160 if (auto *AddRec = dyn_cast<SCEVAddRecExpr>(P)) {
4161 // The base of an AddRec is the first operand.
4162 SmallVector<const SCEV *> Ops{AddRec->operands()};
4163 Ops[0] = removePointerBase(Ops[0]);
4164 // Don't try to transfer nowrap flags for now. We could in some cases
4165 // (for example, if pointer operand of the AddRec is a SCEVUnknown).
4166 return getAddRecExpr(Ops, AddRec->getLoop(), SCEV::FlagAnyWrap);
4167 }
4168 if (auto *Add = dyn_cast<SCEVAddExpr>(P)) {
4169 // The base of an Add is the pointer operand.
4170 SmallVector<const SCEV *> Ops{Add->operands()};
4171 const SCEV **PtrOp = nullptr;
4172 for (const SCEV *&AddOp : Ops) {
4173 if (AddOp->getType()->isPointerTy()) {
4174 assert(!PtrOp && "Cannot have multiple pointer ops")(static_cast<void> (0));
4175 PtrOp = &AddOp;
4176 }
4177 }
4178 *PtrOp = removePointerBase(*PtrOp);
4179 // Don't try to transfer nowrap flags for now. We could in some cases
4180 // (for example, if the pointer operand of the Add is a SCEVUnknown).
4181 return getAddExpr(Ops);
4182 }
4183 // Any other expression must be a pointer base.
4184 return getZero(P->getType());
4185}
4186
4187const SCEV *ScalarEvolution::getMinusSCEV(const SCEV *LHS, const SCEV *RHS,
4188 SCEV::NoWrapFlags Flags,
4189 unsigned Depth) {
4190 // Fast path: X - X --> 0.
4191 if (LHS == RHS)
4192 return getZero(LHS->getType());
4193
4194 // If we subtract two pointers with different pointer bases, bail.
4195 // Eventually, we're going to add an assertion to getMulExpr that we
4196 // can't multiply by a pointer.
4197 if (RHS->getType()->isPointerTy()) {
4198 if (!LHS->getType()->isPointerTy() ||
4199 getPointerBase(LHS) != getPointerBase(RHS))
4200 return getCouldNotCompute();
4201 LHS = removePointerBase(LHS);
4202 RHS = removePointerBase(RHS);
4203 }
4204
4205 // We represent LHS - RHS as LHS + (-1)*RHS. This transformation
4206 // makes it so that we cannot make much use of NUW.
4207 auto AddFlags = SCEV::FlagAnyWrap;
4208 const bool RHSIsNotMinSigned =
4209 !getSignedRangeMin(RHS).isMinSignedValue();
4210 if (hasFlags(Flags, SCEV::FlagNSW)) {
4211 // Let M be the minimum representable signed value. Then (-1)*RHS
4212 // signed-wraps if and only if RHS is M. That can happen even for
4213 // a NSW subtraction because e.g. (-1)*M signed-wraps even though
4214 // -1 - M does not. So to transfer NSW from LHS - RHS to LHS +
4215 // (-1)*RHS, we need to prove that RHS != M.
4216 //
4217 // If LHS is non-negative and we know that LHS - RHS does not
4218 // signed-wrap, then RHS cannot be M. So we can rule out signed-wrap
4219 // either by proving that RHS > M or that LHS >= 0.
4220 if (RHSIsNotMinSigned || isKnownNonNegative(LHS)) {
4221 AddFlags = SCEV::FlagNSW;
4222 }
4223 }
4224
4225 // FIXME: Find a correct way to transfer NSW to (-1)*M when LHS -
4226 // RHS is NSW and LHS >= 0.
4227 //
4228 // The difficulty here is that the NSW flag may have been proven
4229 // relative to a loop that is to be found in a recurrence in LHS and
4230 // not in RHS. Applying NSW to (-1)*M may then let the NSW have a
4231 // larger scope than intended.
4232 auto NegFlags = RHSIsNotMinSigned ? SCEV::FlagNSW : SCEV::FlagAnyWrap;
4233
4234 return getAddExpr(LHS, getNegativeSCEV(RHS, NegFlags), AddFlags, Depth);
4235}
4236
4237const SCEV *ScalarEvolution::getTruncateOrZeroExtend(const SCEV *V, Type *Ty,
4238 unsigned Depth) {
4239 Type *SrcTy = V->getType();
4240 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&(static_cast<void> (0))
4241 "Cannot truncate or zero extend with non-integer arguments!")(static_cast<void> (0));
4242 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
4243 return V; // No conversion
4244 if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty))
4245 return getTruncateExpr(V, Ty, Depth);
4246 return getZeroExtendExpr(V, Ty, Depth);
4247}
4248
4249const SCEV *ScalarEvolution::getTruncateOrSignExtend(const SCEV *V, Type *Ty,
4250 unsigned Depth) {
4251 Type *SrcTy = V->getType();
4252 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&(static_cast<void> (0))
4253 "Cannot truncate or zero extend with non-integer arguments!")(static_cast<void> (0));
4254 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
4255 return V; // No conversion
4256 if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty))
4257 return getTruncateExpr(V, Ty, Depth);
4258 return getSignExtendExpr(V, Ty, Depth);
4259}
4260
4261const SCEV *
4262ScalarEvolution::getNoopOrZeroExtend(const SCEV *V, Type *Ty) {
4263 Type *SrcTy = V->getType();
4264 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&(static_cast<void> (0))
4265 "Cannot noop or zero extend with non-integer arguments!")(static_cast<void> (0));
4266 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&(static_cast<void> (0))
4267 "getNoopOrZeroExtend cannot truncate!")(static_cast<void> (0));
4268 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
4269 return V; // No conversion
4270 return getZeroExtendExpr(V, Ty);
4271}
4272
4273const SCEV *
4274ScalarEvolution::getNoopOrSignExtend(const SCEV *V, Type *Ty) {
4275 Type *SrcTy = V->getType();
4276 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&(static_cast<void> (0))
4277 "Cannot noop or sign extend with non-integer arguments!")(static_cast<void> (0));
4278 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&(static_cast<void> (0))
4279 "getNoopOrSignExtend cannot truncate!")(static_cast<void> (0));
4280 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
4281 return V; // No conversion
4282 return getSignExtendExpr(V, Ty);
4283}
4284
4285const SCEV *
4286ScalarEvolution::getNoopOrAnyExtend(const SCEV *V, Type *Ty) {
4287 Type *SrcTy = V->getType();
4288 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&(static_cast<void> (0))
4289 "Cannot noop or any extend with non-integer arguments!")(static_cast<void> (0));
4290 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&(static_cast<void> (0))
4291 "getNoopOrAnyExtend cannot truncate!")(static_cast<void> (0));
4292 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
4293 return V; // No conversion
4294 return getAnyExtendExpr(V, Ty);
4295}
4296
4297const SCEV *
4298ScalarEvolution::getTruncateOrNoop(const SCEV *V, Type *Ty) {
4299 Type *SrcTy = V->getType();
4300 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&(static_cast<void> (0))
4301 "Cannot truncate or noop with non-integer arguments!")(static_cast<void> (0));
4302 assert(getTypeSizeInBits(SrcTy) >= getTypeSizeInBits(Ty) &&(static_cast<void> (0))
4303 "getTruncateOrNoop cannot extend!")(static_cast<void> (0));
4304 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
4305 return V; // No conversion
4306 return getTruncateExpr(V, Ty);
4307}
4308
4309const SCEV *ScalarEvolution::getUMaxFromMismatchedTypes(const SCEV *LHS,
4310 const SCEV *RHS) {
4311 const SCEV *PromotedLHS = LHS;
4312 const SCEV *PromotedRHS = RHS;
4313
4314 if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType()))
4315 PromotedRHS = getZeroExtendExpr(RHS, LHS->getType());
4316 else
4317 PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType());
4318
4319 return getUMaxExpr(PromotedLHS, PromotedRHS);
4320}
4321
4322const SCEV *ScalarEvolution::getUMinFromMismatchedTypes(const SCEV *LHS,
4323 const SCEV *RHS) {
4324 SmallVector<const SCEV *, 2> Ops = { LHS, RHS };
4325 return getUMinFromMismatchedTypes(Ops);
4326}
4327
4328const SCEV *ScalarEvolution::getUMinFromMismatchedTypes(
4329 SmallVectorImpl<const SCEV *> &Ops) {
4330 assert(!Ops.empty() && "At least one operand must be!")(static_cast<void> (0));
4331 // Trivial case.
4332 if (Ops.size() == 1)
4333 return Ops[0];
4334
4335 // Find the max type first.
4336 Type *MaxType = nullptr;
4337 for (auto *S : Ops)
4338 if (MaxType)
4339 MaxType = getWiderType(MaxType, S->getType());
4340 else
4341 MaxType = S->getType();
4342 assert(MaxType && "Failed to find maximum type!")(static_cast<void> (0));
4343
4344 // Extend all ops to max type.
4345 SmallVector<const SCEV *, 2> PromotedOps;
4346 for (auto *S : Ops)
4347 PromotedOps.push_back(getNoopOrZeroExtend(S, MaxType));
4348
4349 // Generate umin.
4350 return getUMinExpr(PromotedOps);
4351}
4352
4353const SCEV *ScalarEvolution::getPointerBase(const SCEV *V) {
4354 // A pointer operand may evaluate to a nonpointer expression, such as null.
4355 if (!V->getType()->isPointerTy())
4356 return V;
4357
4358 while (true) {
4359 if (auto *AddRec = dyn_cast<SCEVAddRecExpr>(V)) {
4360 V = AddRec->getStart();
4361 } else if (auto *Add = dyn_cast<SCEVAddExpr>(V)) {
4362 const SCEV *PtrOp = nullptr;
4363 for (const SCEV *AddOp : Add->operands()) {
4364 if (AddOp->getType()->isPointerTy()) {
4365 assert(!PtrOp && "Cannot have multiple pointer ops")(static_cast<void> (0));
4366 PtrOp = AddOp;
4367 }
4368 }
4369 assert(PtrOp && "Must have pointer op")(static_cast<void> (0));
4370 V = PtrOp;
4371 } else // Not something we can look further into.
4372 return V;
4373 }
4374}
4375
4376/// Push users of the given Instruction onto the given Worklist.
4377static void
4378PushDefUseChildren(Instruction *I,
4379 SmallVectorImpl<Instruction *> &Worklist) {
4380 // Push the def-use children onto the Worklist stack.
4381 for (User *U : I->users())
4382 Worklist.push_back(cast<Instruction>(U));
4383}
4384
4385void ScalarEvolution::forgetSymbolicName(Instruction *PN, const SCEV *SymName) {
4386 SmallVector<Instruction *, 16> Worklist;
4387 PushDefUseChildren(PN, Worklist);
4388
4389 SmallPtrSet<Instruction *, 8> Visited;
4390 Visited.insert(PN);
4391 while (!Worklist.empty()) {
4392 Instruction *I = Worklist.pop_back_val();
4393 if (!Visited.insert(I).second)
4394 continue;
4395
4396 auto It = ValueExprMap.find_as(static_cast<Value *>(I));
4397 if (It != ValueExprMap.end()) {
4398 const SCEV *Old = It->second;
4399
4400 // Short-circuit the def-use traversal if the symbolic name
4401 // ceases to appear in expressions.
4402 if (Old != SymName && !hasOperand(Old, SymName))
4403 continue;
4404
4405 // SCEVUnknown for a PHI either means that it has an unrecognized
4406 // structure, it's a PHI that's in the progress of being computed
4407 // by createNodeForPHI, or it's a single-value PHI. In the first case,
4408 // additional loop trip count information isn't going to change anything.
4409 // In the second case, createNodeForPHI will perform the necessary
4410 // updates on its own when it gets to that point. In the third, we do
4411 // want to forget the SCEVUnknown.
4412 if (!isa<PHINode>(I) ||
4413 !isa<SCEVUnknown>(Old) ||
4414 (I != PN && Old == SymName)) {
4415 eraseValueFromMap(It->first);
4416 forgetMemoizedResults(Old);
4417 }
4418 }
4419
4420 PushDefUseChildren(I, Worklist);
4421 }
4422}
4423
4424namespace {
4425
4426/// Takes SCEV S and Loop L. For each AddRec sub-expression, use its start
4427/// expression in case its Loop is L. If it is not L then
4428/// if IgnoreOtherLoops is true then use AddRec itself
4429/// otherwise rewrite cannot be done.
4430/// If SCEV contains non-invariant unknown SCEV rewrite cannot be done.
4431class SCEVInitRewriter : public SCEVRewriteVisitor<SCEVInitRewriter> {
4432public:
4433 static const SCEV *rewrite(const SCEV *S, const Loop *L, ScalarEvolution &SE,
4434 bool IgnoreOtherLoops = true) {
4435 SCEVInitRewriter Rewriter(L, SE);
4436 const SCEV *Result = Rewriter.visit(S);
4437 if (Rewriter.hasSeenLoopVariantSCEVUnknown())
4438 return SE.getCouldNotCompute();
4439 return Rewriter.hasSeenOtherLoops() && !IgnoreOtherLoops
4440 ? SE.getCouldNotCompute()
4441 : Result;
4442 }
4443
4444 const SCEV *visitUnknown(const SCEVUnknown *Expr) {
4445 if (!SE.isLoopInvariant(Expr, L))
4446 SeenLoopVariantSCEVUnknown = true;
4447 return Expr;
4448 }
4449
4450 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) {
4451 // Only re-write AddRecExprs for this loop.
4452 if (Expr->getLoop() == L)
4453 return Expr->getStart();
4454 SeenOtherLoops = true;
4455 return Expr;
4456 }
4457
4458 bool hasSeenLoopVariantSCEVUnknown() { return SeenLoopVariantSCEVUnknown; }
4459
4460 bool hasSeenOtherLoops() { return SeenOtherLoops; }
4461
4462private:
4463 explicit SCEVInitRewriter(const Loop *L, ScalarEvolution &SE)
4464 : SCEVRewriteVisitor(SE), L(L) {}
4465
4466 const Loop *L;
4467 bool SeenLoopVariantSCEVUnknown = false;
4468 bool SeenOtherLoops = false;
4469};
4470
4471/// Takes SCEV S and Loop L. For each AddRec sub-expression, use its post
4472/// increment expression in case its Loop is L. If it is not L then
4473/// use AddRec itself.
4474/// If SCEV contains non-invariant unknown SCEV rewrite cannot be done.
4475class SCEVPostIncRewriter : public SCEVRewriteVisitor<SCEVPostIncRewriter> {
4476public:
4477 static const SCEV *rewrite(const SCEV *S, const Loop *L, ScalarEvolution &SE) {
4478 SCEVPostIncRewriter Rewriter(L, SE);
4479 const SCEV *Result = Rewriter.visit(S);
4480 return Rewriter.hasSeenLoopVariantSCEVUnknown()
4481 ? SE.getCouldNotCompute()
4482 : Result;
4483 }
4484
4485 const SCEV *visitUnknown(const SCEVUnknown *Expr) {
4486 if (!SE.isLoopInvariant(Expr, L))
4487 SeenLoopVariantSCEVUnknown = true;
4488 return Expr;
4489 }
4490
4491 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) {
4492 // Only re-write AddRecExprs for this loop.
4493 if (Expr->getLoop() == L)
4494 return Expr->getPostIncExpr(SE);
4495 SeenOtherLoops = true;
4496 return Expr;
4497 }
4498
4499 bool hasSeenLoopVariantSCEVUnknown() { return SeenLoopVariantSCEVUnknown; }
4500
4501 bool hasSeenOtherLoops() { return SeenOtherLoops; }
4502
4503private:
4504 explicit SCEVPostIncRewriter(const Loop *L, ScalarEvolution &SE)
4505 : SCEVRewriteVisitor(SE), L(L) {}
4506
4507 const Loop *L;
4508 bool SeenLoopVariantSCEVUnknown = false;
4509 bool SeenOtherLoops = false;
4510};
4511
4512/// This class evaluates the compare condition by matching it against the
4513/// condition of loop latch. If there is a match we assume a true value
4514/// for the condition while building SCEV nodes.
4515class SCEVBackedgeConditionFolder
4516 : public SCEVRewriteVisitor<SCEVBackedgeConditionFolder> {
4517public:
4518 static const SCEV *rewrite(const SCEV *S, const Loop *L,
4519 ScalarEvolution &SE) {
4520 bool IsPosBECond = false;
4521 Value *BECond = nullptr;
4522 if (BasicBlock *Latch = L->getLoopLatch()) {
4523 BranchInst *BI = dyn_cast<BranchInst>(Latch->getTerminator());
4524 if (BI && BI->isConditional()) {
4525 assert(BI->getSuccessor(0) != BI->getSuccessor(1) &&(static_cast<void> (0))
4526 "Both outgoing branches should not target same header!")(static_cast<void> (0));
4527 BECond = BI->getCondition();
4528 IsPosBECond = BI->getSuccessor(0) == L->getHeader();
4529 } else {
4530 return S;
4531 }
4532 }
4533 SCEVBackedgeConditionFolder Rewriter(L, BECond, IsPosBECond, SE);
4534 return Rewriter.visit(S);
4535 }
4536
4537 const SCEV *visitUnknown(const SCEVUnknown *Expr) {
4538 const SCEV *Result = Expr;
4539 bool InvariantF = SE.isLoopInvariant(Expr, L);
4540
4541 if (!InvariantF) {
4542 Instruction *I = cast<Instruction>(Expr->getValue());
4543 switch (I->getOpcode()) {
4544 case Instruction::Select: {
4545 SelectInst *SI = cast<SelectInst>(I);
4546 Optional<const SCEV *> Res =
4547 compareWithBackedgeCondition(SI->getCondition());
4548 if (Res.hasValue()) {
4549 bool IsOne = cast<SCEVConstant>(Res.getValue())->getValue()->isOne();
4550 Result = SE.getSCEV(IsOne ? SI->getTrueValue() : SI->getFalseValue());
4551 }
4552 break;
4553 }
4554 default: {
4555 Optional<const SCEV *> Res = compareWithBackedgeCondition(I);
4556 if (Res.hasValue())
4557 Result = Res.getValue();
4558 break;
4559 }
4560 }
4561 }
4562 return Result;
4563 }
4564
4565private:
4566 explicit SCEVBackedgeConditionFolder(const Loop *L, Value *BECond,
4567 bool IsPosBECond, ScalarEvolution &SE)
4568 : SCEVRewriteVisitor(SE), L(L), BackedgeCond(BECond),
4569 IsPositiveBECond(IsPosBECond) {}
4570
4571 Optional<const SCEV *> compareWithBackedgeCondition(Value *IC);
4572
4573 const Loop *L;
4574 /// Loop back condition.
4575 Value *BackedgeCond = nullptr;
4576 /// Set to true if loop back is on positive branch condition.
4577 bool IsPositiveBECond;
4578};
4579
4580Optional<const SCEV *>
4581SCEVBackedgeConditionFolder::compareWithBackedgeCondition(Value *IC) {
4582
4583 // If value matches the backedge condition for loop latch,
4584 // then return a constant evolution node based on loopback
4585 // branch taken.
4586 if (BackedgeCond == IC)
4587 return IsPositiveBECond ? SE.getOne(Type::getInt1Ty(SE.getContext()))
4588 : SE.getZero(Type::getInt1Ty(SE.getContext()));
4589 return None;
4590}
4591
4592class SCEVShiftRewriter : public SCEVRewriteVisitor<SCEVShiftRewriter> {
4593public:
4594 static const SCEV *rewrite(const SCEV *S, const Loop *L,
4595 ScalarEvolution &SE) {
4596 SCEVShiftRewriter Rewriter(L, SE);
4597 const SCEV *Result = Rewriter.visit(S);
4598 return Rewriter.isValid() ? Result : SE.getCouldNotCompute();
4599 }
4600
4601 const SCEV *visitUnknown(const SCEVUnknown *Expr) {
4602 // Only allow AddRecExprs for this loop.
4603 if (!SE.isLoopInvariant(Expr, L))
4604 Valid = false;
4605 return Expr;
4606 }
4607
4608 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) {
4609 if (Expr->getLoop() == L && Expr->isAffine())
4610 return SE.getMinusSCEV(Expr, Expr->getStepRecurrence(SE));
4611 Valid = false;
4612 return Expr;
4613 }
4614
4615 bool isValid() { return Valid; }
4616
4617private:
4618 explicit SCEVShiftRewriter(const Loop *L, ScalarEvolution &SE)
4619 : SCEVRewriteVisitor(SE), L(L) {}
4620
4621 const Loop *L;
4622 bool Valid = true;
4623};
4624
4625} // end anonymous namespace
4626
4627SCEV::NoWrapFlags
4628ScalarEvolution::proveNoWrapViaConstantRanges(const SCEVAddRecExpr *AR) {
4629 if (!AR->isAffine())
4630 return SCEV::FlagAnyWrap;
4631
4632 using OBO = OverflowingBinaryOperator;
4633
4634 SCEV::NoWrapFlags Result = SCEV::FlagAnyWrap;
4635
4636 if (!AR->hasNoSignedWrap()) {
4637 ConstantRange AddRecRange = getSignedRange(AR);
4638 ConstantRange IncRange = getSignedRange(AR->getStepRecurrence(*this));
4639
4640 auto NSWRegion = ConstantRange::makeGuaranteedNoWrapRegion(
4641 Instruction::Add, IncRange, OBO::NoSignedWrap);
4642 if (NSWRegion.contains(AddRecRange))
4643 Result = ScalarEvolution::setFlags(Result, SCEV::FlagNSW);
4644 }
4645
4646 if (!AR->hasNoUnsignedWrap()) {
4647 ConstantRange AddRecRange = getUnsignedRange(AR);
4648 ConstantRange IncRange = getUnsignedRange(AR->getStepRecurrence(*this));
4649
4650 auto NUWRegion = ConstantRange::makeGuaranteedNoWrapRegion(
4651 Instruction::Add, IncRange, OBO::NoUnsignedWrap);
4652 if (NUWRegion.contains(AddRecRange))
4653 Result = ScalarEvolution::setFlags(Result, SCEV::FlagNUW);
4654 }
4655
4656 return Result;
4657}
4658
4659SCEV::NoWrapFlags
4660ScalarEvolution::proveNoSignedWrapViaInduction(const SCEVAddRecExpr *AR) {
4661 SCEV::NoWrapFlags Result = AR->getNoWrapFlags();
4662
4663 if (AR->hasNoSignedWrap())
4664 return Result;
4665
4666 if (!AR->isAffine())
4667 return Result;
4668
4669 const SCEV *Step = AR->getStepRecurrence(*this);
4670 const Loop *L = AR->getLoop();
4671
4672 // Check whether the backedge-taken count is SCEVCouldNotCompute.
4673 // Note that this serves two purposes: It filters out loops that are
4674 // simply not analyzable, and it covers the case where this code is
4675 // being called from within backedge-taken count analysis, such that
4676 // attempting to ask for the backedge-taken count would likely result
4677 // in infinite recursion. In the later case, the analysis code will
4678 // cope with a conservative value, and it will take care to purge
4679 // that value once it has finished.
4680 const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(L);
4681
4682 // Normally, in the cases we can prove no-overflow via a
4683 // backedge guarding condition, we can also compute a backedge
4684 // taken count for the loop. The exceptions are assumptions and
4685 // guards present in the loop -- SCEV is not great at exploiting
4686 // these to compute max backedge taken counts, but can still use
4687 // these to prove lack of overflow. Use this fact to avoid
4688 // doing extra work that may not pay off.
4689
4690 if (isa<SCEVCouldNotCompute>(MaxBECount) && !HasGuards &&
4691 AC.assumptions().empty())
4692 return Result;
4693
4694 // If the backedge is guarded by a comparison with the pre-inc value the
4695 // addrec is safe. Also, if the entry is guarded by a comparison with the
4696 // start value and the backedge is guarded by a comparison with the post-inc
4697 // value, the addrec is safe.
4698 ICmpInst::Predicate Pred;
4699 const SCEV *OverflowLimit =
4700 getSignedOverflowLimitForStep(Step, &Pred, this);
4701 if (OverflowLimit &&
4702 (isLoopBackedgeGuardedByCond(L, Pred, AR, OverflowLimit) ||
4703 isKnownOnEveryIteration(Pred, AR, OverflowLimit))) {
4704 Result = setFlags(Result, SCEV::FlagNSW);
4705 }
4706 return Result;
4707}
4708SCEV::NoWrapFlags
4709ScalarEvolution::proveNoUnsignedWrapViaInduction(const SCEVAddRecExpr *AR) {
4710 SCEV::NoWrapFlags Result = AR->getNoWrapFlags();
4711
4712 if (AR->hasNoUnsignedWrap())
4713 return Result;
4714
4715 if (!AR->isAffine())
4716 return Result;
4717
4718 const SCEV *Step = AR->getStepRecurrence(*this);
4719 unsigned BitWidth = getTypeSizeInBits(AR->getType());
4720 const Loop *L = AR->getLoop();
4721
4722 // Check whether the backedge-taken count is SCEVCouldNotCompute.
4723 // Note that this serves two purposes: It filters out loops that are
4724 // simply not analyzable, and it covers the case where this code is
4725 // being called from within backedge-taken count analysis, such that
4726 // attempting to ask for the backedge-taken count would likely result
4727 // in infinite recursion. In the later case, the analysis code will
4728 // cope with a conservative value, and it will take care to purge
4729 // that value once it has finished.
4730 const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(L);
4731
4732 // Normally, in the cases we can prove no-overflow via a
4733 // backedge guarding condition, we can also compute a backedge
4734 // taken count for the loop. The exceptions are assumptions and
4735 // guards present in the loop -- SCEV is not great at exploiting
4736 // these to compute max backedge taken counts, but can still use
4737 // these to prove lack of overflow. Use this fact to avoid
4738 // doing extra work that may not pay off.
4739
4740 if (isa<SCEVCouldNotCompute>(MaxBECount) && !HasGuards &&
4741 AC.assumptions().empty())
4742 return Result;
4743
4744 // If the backedge is guarded by a comparison with the pre-inc value the
4745 // addrec is safe. Also, if the entry is guarded by a comparison with the
4746 // start value and the backedge is guarded by a comparison with the post-inc
4747 // value, the addrec is safe.
4748 if (isKnownPositive(Step)) {
4749 const SCEV *N = getConstant(APInt::getMinValue(BitWidth) -
4750 getUnsignedRangeMax(Step));
4751 if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT, AR, N) ||
4752 isKnownOnEveryIteration(ICmpInst::ICMP_ULT, AR, N)) {
4753 Result = setFlags(Result, SCEV::FlagNUW);
4754 }
4755 }
4756
4757 return Result;
4758}
4759
4760namespace {
4761
4762/// Represents an abstract binary operation. This may exist as a
4763/// normal instruction or constant expression, or may have been
4764/// derived from an expression tree.
4765struct BinaryOp {
4766 unsigned Opcode;
4767 Value *LHS;
4768 Value *RHS;
4769 bool IsNSW = false;
4770 bool IsNUW = false;
4771
4772 /// Op is set if this BinaryOp corresponds to a concrete LLVM instruction or
4773 /// constant expression.
4774 Operator *Op = nullptr;
4775
4776 explicit BinaryOp(Operator *Op)
4777 : Opcode(Op->getOpcode()), LHS(Op->getOperand(0)), RHS(Op->getOperand(1)),
4778 Op(Op) {
4779 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(Op)) {
4780 IsNSW = OBO->hasNoSignedWrap();
4781 IsNUW = OBO->hasNoUnsignedWrap();
4782 }
4783 }
4784
4785 explicit BinaryOp(unsigned Opcode, Value *LHS, Value *RHS, bool IsNSW = false,
4786 bool IsNUW = false)
4787 : Opcode(Opcode), LHS(LHS), RHS(RHS), IsNSW(IsNSW), IsNUW(IsNUW) {}
4788};
4789
4790} // end anonymous namespace
4791
4792/// Try to map \p V into a BinaryOp, and return \c None on failure.
4793static Optional<BinaryOp> MatchBinaryOp(Value *V, DominatorTree &DT) {
4794 auto *Op = dyn_cast<Operator>(V);
4795 if (!Op)
4796 return None;
4797
4798 // Implementation detail: all the cleverness here should happen without
4799 // creating new SCEV expressions -- our caller knowns tricks to avoid creating
4800 // SCEV expressions when possible, and we should not break that.
4801
4802 switch (Op->getOpcode()) {
4803 case Instruction::Add:
4804 case Instruction::Sub:
4805 case Instruction::Mul:
4806 case Instruction::UDiv:
4807 case Instruction::URem:
4808 case Instruction::And:
4809 case Instruction::Or:
4810 case Instruction::AShr:
4811 case Instruction::Shl:
4812 return BinaryOp(Op);
4813
4814 case Instruction::Xor:
4815 if (auto *RHSC = dyn_cast<ConstantInt>(Op->getOperand(1)))
4816 // If the RHS of the xor is a signmask, then this is just an add.
4817 // Instcombine turns add of signmask into xor as a strength reduction step.
4818 if (RHSC->getValue().isSignMask())
4819 return BinaryOp(Instruction::Add, Op->getOperand(0), Op->getOperand(1));
4820 return BinaryOp(Op);
4821
4822 case Instruction::LShr:
4823 // Turn logical shift right of a constant into a unsigned divide.
4824 if (ConstantInt *SA = dyn_cast<ConstantInt>(Op->getOperand(1))) {
4825 uint32_t BitWidth = cast<IntegerType>(Op->getType())->getBitWidth();
4826
4827 // If the shift count is not less than the bitwidth, the result of
4828 // the shift is undefined. Don't try to analyze it, because the
4829 // resolution chosen here may differ from the resolution chosen in
4830 // other parts of the compiler.
4831 if (SA->getValue().ult(BitWidth)) {
4832 Constant *X =
4833 ConstantInt::get(SA->getContext(),
4834 APInt::getOneBitSet(BitWidth, SA->getZExtValue()));
4835 return BinaryOp(Instruction::UDiv, Op->getOperand(0), X);
4836 }
4837 }
4838 return BinaryOp(Op);
4839
4840 case Instruction::ExtractValue: {
4841 auto *EVI = cast<ExtractValueInst>(Op);
4842 if (EVI->getNumIndices() != 1 || EVI->getIndices()[0] != 0)
4843 break;
4844
4845 auto *WO = dyn_cast<WithOverflowInst>(EVI->getAggregateOperand());
4846 if (!WO)
4847 break;
4848
4849 Instruction::BinaryOps BinOp = WO->getBinaryOp();
4850 bool Signed = WO->isSigned();
4851 // TODO: Should add nuw/nsw flags for mul as well.
4852 if (BinOp == Instruction::Mul || !isOverflowIntrinsicNoWrap(WO, DT))
4853 return BinaryOp(BinOp, WO->getLHS(), WO->getRHS());
4854
4855 // Now that we know that all uses of the arithmetic-result component of
4856 // CI are guarded by the overflow check, we can go ahead and pretend
4857 // that the arithmetic is non-overflowing.
4858 return BinaryOp(BinOp, WO->getLHS(), WO->getRHS(),
4859 /* IsNSW = */ Signed, /* IsNUW = */ !Signed);
4860 }
4861
4862 default:
4863 break;
4864 }
4865
4866 // Recognise intrinsic loop.decrement.reg, and as this has exactly the same
4867 // semantics as a Sub, return a binary sub expression.
4868 if (auto *II = dyn_cast<IntrinsicInst>(V))
4869 if (II->getIntrinsicID() == Intrinsic::loop_decrement_reg)
4870 return BinaryOp(Instruction::Sub, II->getOperand(0), II->getOperand(1));
4871
4872 return None;
4873}
4874
4875/// Helper function to createAddRecFromPHIWithCasts. We have a phi
4876/// node whose symbolic (unknown) SCEV is \p SymbolicPHI, which is updated via
4877/// the loop backedge by a SCEVAddExpr, possibly also with a few casts on the
4878/// way. This function checks if \p Op, an operand of this SCEVAddExpr,
4879/// follows one of the following patterns:
4880/// Op == (SExt ix (Trunc iy (%SymbolicPHI) to ix) to iy)
4881/// Op == (ZExt ix (Trunc iy (%SymbolicPHI) to ix) to iy)
4882/// If the SCEV expression of \p Op conforms with one of the expected patterns
4883/// we return the type of the truncation operation, and indicate whether the
4884/// truncated type should be treated as signed/unsigned by setting
4885/// \p Signed to true/false, respectively.
4886static Type *isSimpleCastedPHI(const SCEV *Op, const SCEVUnknown *SymbolicPHI,
4887 bool &Signed, ScalarEvolution &SE) {
4888 // The case where Op == SymbolicPHI (that is, with no type conversions on
4889 // the way) is handled by the regular add recurrence creating logic and
4890 // would have already been triggered in createAddRecForPHI. Reaching it here
4891 // means that createAddRecFromPHI had failed for this PHI before (e.g.,
4892 // because one of the other operands of the SCEVAddExpr updating this PHI is
4893 // not invariant).
4894 //
4895 // Here we look for the case where Op = (ext(trunc(SymbolicPHI))), and in
4896 // this case predicates that allow us to prove that Op == SymbolicPHI will
4897 // be added.
4898 if (Op == SymbolicPHI)
4899 return nullptr;
4900
4901 unsigned SourceBits = SE.getTypeSizeInBits(SymbolicPHI->getType());
4902 unsigned NewBits = SE.getTypeSizeInBits(Op->getType());
4903 if (SourceBits != NewBits)
4904 return nullptr;
4905
4906 const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(Op);
4907 const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(Op);
4908 if (!SExt && !ZExt)
4909 return nullptr;
4910 const SCEVTruncateExpr *Trunc =
4911 SExt ? dyn_cast<SCEVTruncateExpr>(SExt->getOperand())
4912 : dyn_cast<SCEVTruncateExpr>(ZExt->getOperand());
4913 if (!Trunc)
4914 return nullptr;
4915 const SCEV *X = Trunc->getOperand();
4916 if (X != SymbolicPHI)
4917 return nullptr;
4918 Signed = SExt != nullptr;
4919 return Trunc->getType();
4920}
4921
4922static const Loop *isIntegerLoopHeaderPHI(const PHINode *PN, LoopInfo &LI) {
4923 if (!PN->getType()->isIntegerTy())
4924 return nullptr;
4925 const Loop *L = LI.getLoopFor(PN->getParent());
4926 if (!L || L->getHeader() != PN->getParent())
4927 return nullptr;
4928 return L;
4929}
4930
4931// Analyze \p SymbolicPHI, a SCEV expression of a phi node, and check if the
4932// computation that updates the phi follows the following pattern:
4933// (SExt/ZExt ix (Trunc iy (%SymbolicPHI) to ix) to iy) + InvariantAccum
4934// which correspond to a phi->trunc->sext/zext->add->phi update chain.
4935// If so, try to see if it can be rewritten as an AddRecExpr under some
4936// Predicates. If successful, return them as a pair. Also cache the results
4937// of the analysis.
4938//
4939// Example usage scenario:
4940// Say the Rewriter is called for the following SCEV:
4941// 8 * ((sext i32 (trunc i64 %X to i32) to i64) + %Step)
4942// where:
4943// %X = phi i64 (%Start, %BEValue)
4944// It will visitMul->visitAdd->visitSExt->visitTrunc->visitUnknown(%X),
4945// and call this function with %SymbolicPHI = %X.
4946//
4947// The analysis will find that the value coming around the backedge has
4948// the following SCEV:
4949// BEValue = ((sext i32 (trunc i64 %X to i32) to i64) + %Step)
4950// Upon concluding that this matches the desired pattern, the function
4951// will return the pair {NewAddRec, SmallPredsVec} where:
4952// NewAddRec = {%Start,+,%Step}
4953// SmallPredsVec = {P1, P2, P3} as follows:
4954// P1(WrapPred): AR: {trunc(%Start),+,(trunc %Step)}<nsw> Flags: <nssw>
4955// P2(EqualPred): %Start == (sext i32 (trunc i64 %Start to i32) to i64)
4956// P3(EqualPred): %Step == (sext i32 (trunc i64 %Step to i32) to i64)
4957// The returned pair means that SymbolicPHI can be rewritten into NewAddRec
4958// under the predicates {P1,P2,P3}.
4959// This predicated rewrite will be cached in PredicatedSCEVRewrites:
4960// PredicatedSCEVRewrites[{%X,L}] = {NewAddRec, {P1,P2,P3)}
4961//
4962// TODO's:
4963//
4964// 1) Extend the Induction descriptor to also support inductions that involve
4965// casts: When needed (namely, when we are called in the context of the
4966// vectorizer induction analysis), a Set of cast instructions will be
4967// populated by this method, and provided back to isInductionPHI. This is
4968// needed to allow the vectorizer to properly record them to be ignored by
4969// the cost model and to avoid vectorizing them (otherwise these casts,
4970// which are redundant under the runtime overflow checks, will be
4971// vectorized, which can be costly).
4972//
4973// 2) Support additional induction/PHISCEV patterns: We also want to support
4974// inductions where the sext-trunc / zext-trunc operations (partly) occur
4975// after the induction update operation (the induction increment):
4976//
4977// (Trunc iy (SExt/ZExt ix (%SymbolicPHI + InvariantAccum) to iy) to ix)
4978// which correspond to a phi->add->trunc->sext/zext->phi update chain.
4979//
4980// (Trunc iy ((SExt/ZExt ix (%SymbolicPhi) to iy) + InvariantAccum) to ix)
4981// which correspond to a phi->trunc->add->sext/zext->phi update chain.
4982//
4983// 3) Outline common code with createAddRecFromPHI to avoid duplication.
4984Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>>
4985ScalarEvolution::createAddRecFromPHIWithCastsImpl(const SCEVUnknown *SymbolicPHI) {
4986 SmallVector<const SCEVPredicate *, 3> Predicates;
4987
4988 // *** Part1: Analyze if we have a phi-with-cast pattern for which we can
4989 // return an AddRec expression under some predicate.
4990
4991 auto *PN = cast<PHINode>(SymbolicPHI->getValue());
4992 const Loop *L = isIntegerLoopHeaderPHI(PN, LI);
4993 assert(L && "Expecting an integer loop header phi")(static_cast<void> (0));
4994
4995 // The loop may have multiple entrances or multiple exits; we can analyze
4996 // this phi as an addrec if it has a unique entry value and a unique
4997 // backedge value.
4998 Value *BEValueV = nullptr, *StartValueV = nullptr;
4999 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
5000 Value *V = PN->getIncomingValue(i);
5001 if (L->contains(PN->getIncomingBlock(i))) {
5002 if (!BEValueV) {
5003 BEValueV = V;
5004 } else if (BEValueV != V) {
5005 BEValueV = nullptr;
5006 break;
5007 }
5008 } else if (!StartValueV) {
5009 StartValueV = V;
5010 } else if (StartValueV != V) {
5011 StartValueV = nullptr;
5012 break;
5013 }
5014 }
5015 if (!BEValueV || !StartValueV)
5016 return None;
5017
5018 const SCEV *BEValue = getSCEV(BEValueV);
5019
5020 // If the value coming around the backedge is an add with the symbolic
5021 // value we just inserted, possibly with casts that we can ignore under
5022 // an appropriate runtime guard, then we found a simple induction variable!
5023 const auto *Add = dyn_cast<SCEVAddExpr>(BEValue);
5024 if (!Add)
5025 return None;
5026
5027 // If there is a single occurrence of the symbolic value, possibly
5028 // casted, replace it with a recurrence.
5029 unsigned FoundIndex = Add->getNumOperands();
5030 Type *TruncTy = nullptr;
5031 bool Signed;
5032 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i)
5033 if ((TruncTy =
5034 isSimpleCastedPHI(Add->getOperand(i), SymbolicPHI, Signed, *this)))
5035 if (FoundIndex == e) {
5036 FoundIndex = i;
5037 break;
5038 }
5039
5040 if (FoundIndex == Add->getNumOperands())
5041 return None;
5042
5043 // Create an add with everything but the specified operand.
5044 SmallVector<const SCEV *, 8> Ops;
5045 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i)
5046 if (i != FoundIndex)
5047 Ops.push_back(Add->getOperand(i));
5048 const SCEV *Accum = getAddExpr(Ops);
5049
5050 // The runtime checks will not be valid if the step amount is
5051 // varying inside the loop.
5052 if (!isLoopInvariant(Accum, L))
5053 return None;
5054
5055 // *** Part2: Create the predicates
5056
5057 // Analysis was successful: we have a phi-with-cast pattern for which we
5058 // can return an AddRec expression under the following predicates:
5059 //
5060 // P1: A Wrap predicate that guarantees that Trunc(Start) + i*Trunc(Accum)
5061 // fits within the truncated type (does not overflow) for i = 0 to n-1.
5062 // P2: An Equal predicate that guarantees that
5063 // Start = (Ext ix (Trunc iy (Start) to ix) to iy)
5064 // P3: An Equal predicate that guarantees that
5065 // Accum = (Ext ix (Trunc iy (Accum) to ix) to iy)
5066 //
5067 // As we next prove, the above predicates guarantee that:
5068 // Start + i*Accum = (Ext ix (Trunc iy ( Start + i*Accum ) to ix) to iy)
5069 //
5070 //
5071 // More formally, we want to prove that:
5072 // Expr(i+1) = Start + (i+1) * Accum
5073 // = (Ext ix (Trunc iy (Expr(i)) to ix) to iy) + Accum
5074 //
5075 // Given that:
5076 // 1) Expr(0) = Start
5077 // 2) Expr(1) = Start + Accum
5078 // = (Ext ix (Trunc iy (Start) to ix) to iy) + Accum :: from P2
5079 // 3) Induction hypothesis (step i):
5080 // Expr(i) = (Ext ix (Trunc iy (Expr(i-1)) to ix) to iy) + Accum
5081 //
5082 // Proof:
5083 // Expr(i+1) =
5084 // = Start + (i+1)*Accum
5085 // = (Start + i*Accum) + Accum
5086 // = Expr(i) + Accum
5087 // = (Ext ix (Trunc iy (Expr(i-1)) to ix) to iy) + Accum + Accum
5088 // :: from step i
5089 //
5090 // = (Ext ix (Trunc iy (Start + (i-1)*Accum) to ix) to iy) + Accum + Accum
5091 //
5092 // = (Ext ix (Trunc iy (Start + (i-1)*Accum) to ix) to iy)
5093 // + (Ext ix (Trunc iy (Accum) to ix) to iy)
5094 // + Accum :: from P3
5095 //
5096 // = (Ext ix (Trunc iy ((Start + (i-1)*Accum) + Accum) to ix) to iy)
5097 // + Accum :: from P1: Ext(x)+Ext(y)=>Ext(x+y)
5098 //
5099 // = (Ext ix (Trunc iy (Start + i*Accum) to ix) to iy) + Accum
5100 // = (Ext ix (Trunc iy (Expr(i)) to ix) to iy) + Accum
5101 //
5102 // By induction, the same applies to all iterations 1<=i<n:
5103 //
5104
5105 // Create a truncated addrec for which we will add a no overflow check (P1).
5106 const SCEV *StartVal = getSCEV(StartValueV);
5107 const SCEV *PHISCEV =
5108 getAddRecExpr(getTruncateExpr(StartVal, TruncTy),
5109 getTruncateExpr(Accum, TruncTy), L, SCEV::FlagAnyWrap);
5110
5111 // PHISCEV can be either a SCEVConstant or a SCEVAddRecExpr.
5112 // ex: If truncated Accum is 0 and StartVal is a constant, then PHISCEV
5113 // will be constant.
5114 //
5115 // If PHISCEV is a constant, then P1 degenerates into P2 or P3, so we don't
5116 // add P1.
5117 if (const auto *AR = dyn_cast<SCEVAddRecExpr>(PHISCEV)) {
5118 SCEVWrapPredicate::IncrementWrapFlags AddedFlags =
5119 Signed ? SCEVWrapPredicate::IncrementNSSW
5120 : SCEVWrapPredicate::IncrementNUSW;
5121 const SCEVPredicate *AddRecPred = getWrapPredicate(AR, AddedFlags);
5122 Predicates.push_back(AddRecPred);
5123 }
5124
5125 // Create the Equal Predicates P2,P3:
5126
5127 // It is possible that the predicates P2 and/or P3 are computable at
5128 // compile time due to StartVal and/or Accum being constants.
5129 // If either one is, then we can check that now and escape if either P2
5130 // or P3 is false.
5131
5132 // Construct the extended SCEV: (Ext ix (Trunc iy (Expr) to ix) to iy)
5133 // for each of StartVal and Accum
5134 auto getExtendedExpr = [&](const SCEV *Expr,
5135 bool CreateSignExtend) -> const SCEV * {
5136 assert(isLoopInvariant(Expr, L) && "Expr is expected to be invariant")(static_cast<void> (0));
5137 const SCEV *TruncatedExpr = getTruncateExpr(Expr, TruncTy);
5138 const SCEV *ExtendedExpr =
5139 CreateSignExtend ? getSignExtendExpr(TruncatedExpr, Expr->getType())
5140 : getZeroExtendExpr(TruncatedExpr, Expr->getType());
5141 return ExtendedExpr;
5142 };
5143
5144 // Given:
5145 // ExtendedExpr = (Ext ix (Trunc iy (Expr) to ix) to iy
5146 // = getExtendedExpr(Expr)
5147 // Determine whether the predicate P: Expr == ExtendedExpr
5148 // is known to be false at compile time
5149 auto PredIsKnownFalse = [&](const SCEV *Expr,
5150 const SCEV *ExtendedExpr) -> bool {
5151 return Expr != ExtendedExpr &&
5152 isKnownPredicate(ICmpInst::ICMP_NE, Expr, ExtendedExpr);
5153 };
5154
5155 const SCEV *StartExtended = getExtendedExpr(StartVal, Signed);
5156 if (PredIsKnownFalse(StartVal, StartExtended)) {
5157 LLVM_DEBUG(dbgs() << "P2 is compile-time false\n";)do { } while (false);
5158 return None;
5159 }
5160
5161 // The Step is always Signed (because the overflow checks are either
5162 // NSSW or NUSW)
5163 const SCEV *AccumExtended = getExtendedExpr(Accum, /*CreateSignExtend=*/true);
5164 if (PredIsKnownFalse(Accum, AccumExtended)) {
5165 LLVM_DEBUG(dbgs() << "P3 is compile-time false\n";)do { } while (false);
5166 return None;
5167 }
5168
5169 auto AppendPredicate = [&](const SCEV *Expr,
5170 const SCEV *ExtendedExpr) -> void {
5171 if (Expr != ExtendedExpr &&
5172 !isKnownPredicate(ICmpInst::ICMP_EQ, Expr, ExtendedExpr)) {
5173 const SCEVPredicate *Pred = getEqualPredicate(Expr, ExtendedExpr);
5174 LLVM_DEBUG(dbgs() << "Added Predicate: " << *Pred)do { } while (false);
5175 Predicates.push_back(Pred);
5176 }
5177 };
5178
5179 AppendPredicate(StartVal, StartExtended);
5180 AppendPredicate(Accum, AccumExtended);
5181
5182 // *** Part3: Predicates are ready. Now go ahead and create the new addrec in
5183 // which the casts had been folded away. The caller can rewrite SymbolicPHI
5184 // into NewAR if it will also add the runtime overflow checks specified in
5185 // Predicates.
5186 auto *NewAR = getAddRecExpr(StartVal, Accum, L, SCEV::FlagAnyWrap);
5187
5188 std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>> PredRewrite =
5189 std::make_pair(NewAR, Predicates);
5190 // Remember the result of the analysis for this SCEV at this locayyytion.
5191 PredicatedSCEVRewrites[{SymbolicPHI, L}] = PredRewrite;
5192 return PredRewrite;
5193}
5194
5195Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>>
5196ScalarEvolution::createAddRecFromPHIWithCasts(const SCEVUnknown *SymbolicPHI) {
5197 auto *PN = cast<PHINode>(SymbolicPHI->getValue());
5198 const Loop *L = isIntegerLoopHeaderPHI(PN, LI);
5199 if (!L)
5200 return None;
5201
5202 // Check to see if we already analyzed this PHI.
5203 auto I = PredicatedSCEVRewrites.find({SymbolicPHI, L});
5204 if (I != PredicatedSCEVRewrites.end()) {
5205 std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>> Rewrite =
5206 I->second;
5207 // Analysis was done before and failed to create an AddRec:
5208 if (Rewrite.first == SymbolicPHI)
5209 return None;
5210 // Analysis was done before and succeeded to create an AddRec under
5211 // a predicate:
5212 assert(isa<SCEVAddRecExpr>(Rewrite.first) && "Expected an AddRec")(static_cast<void> (0));
5213 assert(!(Rewrite.second).empty() && "Expected to find Predicates")(static_cast<void> (0));
5214 return Rewrite;
5215 }
5216
5217 Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>>
5218 Rewrite = createAddRecFromPHIWithCastsImpl(SymbolicPHI);
5219
5220 // Record in the cache that the analysis failed
5221 if (!Rewrite) {
5222 SmallVector<const SCEVPredicate *, 3> Predicates;
5223 PredicatedSCEVRewrites[{SymbolicPHI, L}] = {SymbolicPHI, Predicates};
5224 return None;
5225 }
5226
5227 return Rewrite;
5228}
5229
5230// FIXME: This utility is currently required because the Rewriter currently
5231// does not rewrite this expression:
5232// {0, +, (sext ix (trunc iy to ix) to iy)}
5233// into {0, +, %step},
5234// even when the following Equal predicate exists:
5235// "%step == (sext ix (trunc iy to ix) to iy)".
5236bool PredicatedScalarEvolution::areAddRecsEqualWithPreds(
5237 const SCEVAddRecExpr *AR1, const SCEVAddRecExpr *AR2) const {
5238 if (AR1 == AR2)
5239 return true;
5240
5241 auto areExprsEqual = [&](const SCEV *Expr1, const SCEV *Expr2) -> bool {
5242 if (Expr1 != Expr2 && !Preds.implies(SE.getEqualPredicate(Expr1, Expr2)) &&
5243 !Preds.implies(SE.getEqualPredicate(Expr2, Expr1)))
5244 return false;
5245 return true;
5246 };
5247
5248 if (!areExprsEqual(AR1->getStart(), AR2->getStart()) ||
5249 !areExprsEqual(AR1->getStepRecurrence(SE), AR2->getStepRecurrence(SE)))
5250 return false;
5251 return true;
5252}
5253
5254/// A helper function for createAddRecFromPHI to handle simple cases.
5255///
5256/// This function tries to find an AddRec expression for the simplest (yet most
5257/// common) cases: PN = PHI(Start, OP(Self, LoopInvariant)).
5258/// If it fails, createAddRecFromPHI will use a more general, but slow,
5259/// technique for finding the AddRec expression.
5260const SCEV *ScalarEvolution::createSimpleAffineAddRec(PHINode *PN,
5261 Value *BEValueV,
5262 Value *StartValueV) {
5263 const Loop *L = LI.getLoopFor(PN->getParent());
5264 assert(L && L->getHeader() == PN->getParent())(static_cast<void> (0));
5265 assert(BEValueV && StartValueV)(static_cast<void> (0));
5266
5267 auto BO = MatchBinaryOp(BEValueV, DT);
5268 if (!BO)
5269 return nullptr;
5270
5271 if (BO->Opcode != Instruction::Add)
5272 return nullptr;
5273
5274 const SCEV *Accum = nullptr;
5275 if (BO->LHS == PN && L->isLoopInvariant(BO->RHS))
5276 Accum = getSCEV(BO->RHS);
5277 else if (BO->RHS == PN && L->isLoopInvariant(BO->LHS))
5278 Accum = getSCEV(BO->LHS);
5279
5280 if (!Accum)
5281 return nullptr;
5282
5283 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap;
5284 if (BO->IsNUW)
5285 Flags = setFlags(Flags, SCEV::FlagNUW);
5286 if (BO->IsNSW)
5287 Flags = setFlags(Flags, SCEV::FlagNSW);
5288
5289 const SCEV *StartVal = getSCEV(StartValueV);
5290 const SCEV *PHISCEV = getAddRecExpr(StartVal, Accum, L, Flags);
5291
5292 ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV;
5293
5294 // We can add Flags to the post-inc expression only if we
5295 // know that it is *undefined behavior* for BEValueV to
5296 // overflow.
5297 if (auto *BEInst = dyn_cast<Instruction>(BEValueV))
5298 if (isLoopInvariant(Accum, L) && isAddRecNeverPoison(BEInst, L))
5299 (void)getAddRecExpr(getAddExpr(StartVal, Accum), Accum, L, Flags);
5300
5301 return PHISCEV;
5302}
5303
5304const SCEV *ScalarEvolution::createAddRecFromPHI(PHINode *PN) {
5305 const Loop *L = LI.getLoopFor(PN->getParent());
5306 if (!L || L->getHeader() != PN->getParent())
5307 return nullptr;
5308
5309 // The loop may have multiple entrances or multiple exits; we can analyze
5310 // this phi as an addrec if it has a unique entry value and a unique
5311 // backedge value.
5312 Value *BEValueV = nullptr, *StartValueV = nullptr;
5313 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
5314 Value *V = PN->getIncomingValue(i);
5315 if (L->contains(PN->getIncomingBlock(i))) {
5316 if (!BEValueV) {
5317 BEValueV = V;
5318 } else if (BEValueV != V) {
5319 BEValueV = nullptr;
5320 break;
5321 }
5322 } else if (!StartValueV) {
5323 StartValueV = V;
5324 } else if (StartValueV != V) {
5325 StartValueV = nullptr;
5326 break;
5327 }
5328 }
5329 if (!BEValueV || !StartValueV)
5330 return nullptr;
5331
5332 assert(ValueExprMap.find_as(PN) == ValueExprMap.end() &&(static_cast<void> (0))
5333 "PHI node already processed?")(static_cast<void> (0));
5334
5335 // First, try to find AddRec expression without creating a fictituos symbolic
5336 // value for PN.
5337 if (auto *S = createSimpleAffineAddRec(PN, BEValueV, StartValueV))
5338 return S;
5339
5340 // Handle PHI node value symbolically.
5341 const SCEV *SymbolicName = getUnknown(PN);
5342 ValueExprMap.insert({SCEVCallbackVH(PN, this), SymbolicName});
5343
5344 // Using this symbolic name for the PHI, analyze the value coming around
5345 // the back-edge.
5346 const SCEV *BEValue = getSCEV(BEValueV);
5347
5348 // NOTE: If BEValue is loop invariant, we know that the PHI node just
5349 // has a special value for the first iteration of the loop.
5350
5351 // If the value coming around the backedge is an add with the symbolic
5352 // value we just inserted, then we found a simple induction variable!
5353 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(BEValue)) {
5354 // If there is a single occurrence of the symbolic value, replace it
5355 // with a recurrence.
5356 unsigned FoundIndex = Add->getNumOperands();
5357 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i)
5358 if (Add->getOperand(i) == SymbolicName)
5359 if (FoundIndex == e) {
5360 FoundIndex = i;
5361 break;
5362 }
5363
5364 if (FoundIndex != Add->getNumOperands()) {
5365 // Create an add with everything but the specified operand.
5366 SmallVector<const SCEV *, 8> Ops;
5367 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i)
5368 if (i != FoundIndex)
5369 Ops.push_back(SCEVBackedgeConditionFolder::rewrite(Add->getOperand(i),
5370 L, *this));
5371 const SCEV *Accum = getAddExpr(Ops);
5372
5373 // This is not a valid addrec if the step amount is varying each
5374 // loop iteration, but is not itself an addrec in this loop.
5375 if (isLoopInvariant(Accum, L) ||
5376 (isa<SCEVAddRecExpr>(Accum) &&
5377 cast<SCEVAddRecExpr>(Accum)->getLoop() == L)) {
5378 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap;
5379
5380 if (auto BO = MatchBinaryOp(BEValueV, DT)) {
5381 if (BO->Opcode == Instruction::Add && BO->LHS == PN) {
5382 if (BO->IsNUW)
5383 Flags = setFlags(Flags, SCEV::FlagNUW);
5384 if (BO->IsNSW)
5385 Flags = setFlags(Flags, SCEV::FlagNSW);
5386 }
5387 } else if (GEPOperator *GEP = dyn_cast<GEPOperator>(BEValueV)) {
5388 // If the increment is an inbounds GEP, then we know the address
5389 // space cannot be wrapped around. We cannot make any guarantee
5390 // about signed or unsigned overflow because pointers are
5391 // unsigned but we may have a negative index from the base
5392 // pointer. We can guarantee that no unsigned wrap occurs if the
5393 // indices form a positive value.
5394 if (GEP->isInBounds() && GEP->getOperand(0) == PN) {
5395 Flags = setFlags(Flags, SCEV::FlagNW);
5396
5397 const SCEV *Ptr = getSCEV(GEP->getPointerOperand());
5398 if (isKnownPositive(getMinusSCEV(getSCEV(GEP), Ptr)))
5399 Flags = setFlags(Flags, SCEV::FlagNUW);
5400 }
5401
5402 // We cannot transfer nuw and nsw flags from subtraction
5403 // operations -- sub nuw X, Y is not the same as add nuw X, -Y
5404 // for instance.
5405 }
5406
5407 const SCEV *StartVal = getSCEV(StartValueV);
5408 const SCEV *PHISCEV = getAddRecExpr(StartVal, Accum, L, Flags);
5409
5410 // Okay, for the entire analysis of this edge we assumed the PHI
5411 // to be symbolic. We now need to go back and purge all of the
5412 // entries for the scalars that use the symbolic expression.
5413 forgetSymbolicName(PN, SymbolicName);
5414 ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV;
5415
5416 // We can add Flags to the post-inc expression only if we
5417 // know that it is *undefined behavior* for BEValueV to
5418 // overflow.
5419 if (auto *BEInst = dyn_cast<Instruction>(BEValueV))
5420 if (isLoopInvariant(Accum, L) && isAddRecNeverPoison(BEInst, L))
5421 (void)getAddRecExpr(getAddExpr(StartVal, Accum), Accum, L, Flags);
5422
5423 return PHISCEV;
5424 }
5425 }
5426 } else {
5427 // Otherwise, this could be a loop like this:
5428 // i = 0; for (j = 1; ..; ++j) { .... i = j; }
5429 // In this case, j = {1,+,1} and BEValue is j.
5430 // Because the other in-value of i (0) fits the evolution of BEValue
5431 // i really is an addrec evolution.
5432 //
5433 // We can generalize this saying that i is the shifted value of BEValue
5434 // by one iteration:
5435 // PHI(f(0), f({1,+,1})) --> f({0,+,1})
5436 const SCEV *Shifted = SCEVShiftRewriter::rewrite(BEValue, L, *this);
5437 const SCEV *Start = SCEVInitRewriter::rewrite(Shifted, L, *this, false);
5438 if (Shifted != getCouldNotCompute() &&
5439 Start != getCouldNotCompute()) {
5440 const SCEV *StartVal = getSCEV(StartValueV);
5441 if (Start == StartVal) {
5442 // Okay, for the entire analysis of this edge we assumed the PHI
5443 // to be symbolic. We now need to go back and purge all of the
5444 // entries for the scalars that use the symbolic expression.
5445 forgetSymbolicName(PN, SymbolicName);
5446 ValueExprMap[SCEVCallbackVH(PN, this)] = Shifted;
5447 return Shifted;
5448 }
5449 }
5450 }
5451
5452 // Remove the temporary PHI node SCEV that has been inserted while intending
5453 // to create an AddRecExpr for this PHI node. We can not keep this temporary
5454 // as it will prevent later (possibly simpler) SCEV expressions to be added
5455 // to the ValueExprMap.
5456 eraseValueFromMap(PN);
5457
5458 return nullptr;
5459}
5460
5461// Checks if the SCEV S is available at BB. S is considered available at BB
5462// if S can be materialized at BB without introducing a fault.
5463static bool IsAvailableOnEntry(const Loop *L, DominatorTree &DT, const SCEV *S,
5464 BasicBlock *BB) {
5465 struct CheckAvailable {
5466 bool TraversalDone = false;
5467 bool Available = true;
5468
5469 const Loop *L = nullptr; // The loop BB is in (can be nullptr)
5470 BasicBlock *BB = nullptr;
5471 DominatorTree &DT;
5472
5473 CheckAvailable(const Loop *L, BasicBlock *BB, DominatorTree &DT)
5474 : L(L), BB(BB), DT(DT) {}
5475
5476 bool setUnavailable() {
5477 TraversalDone = true;
5478 Available = false;
5479 return false;
5480 }
5481
5482 bool follow(const SCEV *S) {
5483 switch (S->getSCEVType()) {
5484 case scConstant:
5485 case scPtrToInt:
5486 case scTruncate:
5487 case scZeroExtend:
5488 case scSignExtend:
5489 case scAddExpr:
5490 case scMulExpr:
5491 case scUMaxExpr:
5492 case scSMaxExpr:
5493 case scUMinExpr:
5494 case scSMinExpr:
5495 // These expressions are available if their operand(s) is/are.
5496 return true;
5497
5498 case scAddRecExpr: {
5499 // We allow add recurrences that are on the loop BB is in, or some
5500 // outer loop. This guarantees availability because the value of the
5501 // add recurrence at BB is simply the "current" value of the induction
5502 // variable. We can relax this in the future; for instance an add
5503 // recurrence on a sibling dominating loop is also available at BB.
5504 const auto *ARLoop = cast<SCEVAddRecExpr>(S)->getLoop();
5505 if (L && (ARLoop == L || ARLoop->contains(L)))
5506 return true;
5507
5508 return setUnavailable();
5509 }
5510
5511 case scUnknown: {
5512 // For SCEVUnknown, we check for simple dominance.
5513 const auto *SU = cast<SCEVUnknown>(S);
5514 Value *V = SU->getValue();
5515
5516 if (isa<Argument>(V))
5517 return false;
5518
5519 if (isa<Instruction>(V) && DT.dominates(cast<Instruction>(V), BB))
5520 return false;
5521
5522 return setUnavailable();
5523 }
5524
5525 case scUDivExpr:
5526 case scCouldNotCompute:
5527 // We do not try to smart about these at all.
5528 return setUnavailable();
5529 }
5530 llvm_unreachable("Unknown SCEV kind!")__builtin_unreachable();
5531 }
5532
5533 bool isDone() { return TraversalDone; }
5534 };
5535
5536 CheckAvailable CA(L, BB, DT);
5537 SCEVTraversal<CheckAvailable> ST(CA);
5538
5539 ST.visitAll(S);
5540 return CA.Available;
5541}
5542
5543// Try to match a control flow sequence that branches out at BI and merges back
5544// at Merge into a "C ? LHS : RHS" select pattern. Return true on a successful
5545// match.
5546static bool BrPHIToSelect(DominatorTree &DT, BranchInst *BI, PHINode *Merge,
5547 Value *&C, Value *&LHS, Value *&RHS) {
5548 C = BI->getCondition();
5549
5550 BasicBlockEdge LeftEdge(BI->getParent(), BI->getSuccessor(0));
5551 BasicBlockEdge RightEdge(BI->getParent(), BI->getSuccessor(1));
5552
5553 if (!LeftEdge.isSingleEdge())
5554 return false;
5555
5556 assert(RightEdge.isSingleEdge() && "Follows from LeftEdge.isSingleEdge()")(static_cast<void> (0));
5557
5558 Use &LeftUse = Merge->getOperandUse(0);
5559 Use &RightUse = Merge->getOperandUse(1);
5560
5561 if (DT.dominates(LeftEdge, LeftUse) && DT.dominates(RightEdge, RightUse)) {
5562 LHS = LeftUse;
5563 RHS = RightUse;
5564 return true;
5565 }
5566
5567 if (DT.dominates(LeftEdge, RightUse) && DT.dominates(RightEdge, LeftUse)) {
5568 LHS = RightUse;
5569 RHS = LeftUse;
5570 return true;
5571 }
5572
5573 return false;
5574}
5575
5576const SCEV *ScalarEvolution::createNodeFromSelectLikePHI(PHINode *PN) {
5577 auto IsReachable =
5578 [&](BasicBlock *BB) { return DT.isReachableFromEntry(BB); };
5579 if (PN->getNumIncomingValues() == 2 && all_of(PN->blocks(), IsReachable)) {
5580 const Loop *L = LI.getLoopFor(PN->getParent());
5581
5582 // We don't want to break LCSSA, even in a SCEV expression tree.
5583 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
5584 if (LI.getLoopFor(PN->getIncomingBlock(i)) != L)
5585 return nullptr;
5586
5587 // Try to match
5588 //
5589 // br %cond, label %left, label %right
5590 // left:
5591 // br label %merge
5592 // right:
5593 // br label %merge
5594 // merge:
5595 // V = phi [ %x, %left ], [ %y, %right ]
5596 //
5597 // as "select %cond, %x, %y"
5598
5599 BasicBlock *IDom = DT[PN->getParent()]->getIDom()->getBlock();
5600 assert(IDom && "At least the entry block should dominate PN")(static_cast<void> (0));
5601
5602 auto *BI = dyn_cast<BranchInst>(IDom->getTerminator());
5603 Value *Cond = nullptr, *LHS = nullptr, *RHS = nullptr;
5604
5605 if (BI && BI->isConditional() &&
5606 BrPHIToSelect(DT, BI, PN, Cond, LHS, RHS) &&
5607 IsAvailableOnEntry(L, DT, getSCEV(LHS), PN->getParent()) &&
5608 IsAvailableOnEntry(L, DT, getSCEV(RHS), PN->getParent()))
5609 return createNodeForSelectOrPHI(PN, Cond, LHS, RHS);
5610 }
5611
5612 return nullptr;
5613}
5614
5615const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) {
5616 if (const SCEV *S = createAddRecFromPHI(PN))
5617 return S;
5618
5619 if (const SCEV *S = createNodeFromSelectLikePHI(PN))
5620 return S;
5621
5622 // If the PHI has a single incoming value, follow that value, unless the
5623 // PHI's incoming blocks are in a different loop, in which case doing so
5624 // risks breaking LCSSA form. Instcombine would normally zap these, but
5625 // it doesn't have DominatorTree information, so it may miss cases.
5626 if (Value *V = SimplifyInstruction(PN, {getDataLayout(), &TLI, &DT, &AC}))
5627 if (LI.replacementPreservesLCSSAForm(PN, V))
5628 return getSCEV(V);
5629
5630 // If it's not a loop phi, we can't handle it yet.
5631 return getUnknown(PN);
5632}
5633
5634const SCEV *ScalarEvolution::createNodeForSelectOrPHI(Instruction *I,
5635 Value *Cond,
5636 Value *TrueVal,
5637 Value *FalseVal) {
5638 // Handle "constant" branch or select. This can occur for instance when a
5639 // loop pass transforms an inner loop and moves on to process the outer loop.
5640 if (auto *CI = dyn_cast<ConstantInt>(Cond))
5641 return getSCEV(CI->isOne() ? TrueVal : FalseVal);
5642
5643 // Try to match some simple smax or umax patterns.
5644 auto *ICI = dyn_cast<ICmpInst>(Cond);
5645 if (!ICI)
5646 return getUnknown(I);
5647
5648 Value *LHS = ICI->getOperand(0);
5649 Value *RHS = ICI->getOperand(1);
5650
5651 switch (ICI->getPredicate()) {
5652 case ICmpInst::ICMP_SLT:
5653 case ICmpInst::ICMP_SLE:
5654 case ICmpInst::ICMP_ULT:
5655 case ICmpInst::ICMP_ULE:
5656 std::swap(LHS, RHS);
5657 LLVM_FALLTHROUGH[[gnu::fallthrough]];
5658 case ICmpInst::ICMP_SGT:
5659 case ICmpInst::ICMP_SGE:
5660 case ICmpInst::ICMP_UGT:
5661 case ICmpInst::ICMP_UGE:
5662 // a > b ? a+x : b+x -> max(a, b)+x
5663 // a > b ? b+x : a+x -> min(a, b)+x
5664 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType())) {
5665 bool Signed = ICI->isSigned();
5666 const SCEV *LA = getSCEV(TrueVal);
5667 const SCEV *RA = getSCEV(FalseVal);
5668 const SCEV *LS = getSCEV(LHS);
5669 const SCEV *RS = getSCEV(RHS);
5670 if (LA->getType()->isPointerTy()) {
5671 // FIXME: Handle cases where LS/RS are pointers not equal to LA/RA.
5672 // Need to make sure we can't produce weird expressions involving
5673 // negated pointers.
5674 if (LA == LS && RA == RS)
5675 return Signed ? getSMaxExpr(LS, RS) : getUMaxExpr(LS, RS);
5676 if (LA == RS && RA == LS)
5677 return Signed ? getSMinExpr(LS, RS) : getUMinExpr(LS, RS);
5678 }
5679 auto CoerceOperand = [&](const SCEV *Op) -> const SCEV * {
5680 if (Op->getType()->isPointerTy()) {
5681 Op = getLosslessPtrToIntExpr(Op);
5682 if (isa<SCEVCouldNotCompute>(Op))
5683 return Op;
5684 }
5685 if (Signed)
5686 Op = getNoopOrSignExtend(Op, I->getType());
5687 else
5688 Op = getNoopOrZeroExtend(Op, I->getType());
5689 return Op;
5690 };
5691 LS = CoerceOperand(LS);
5692 RS = CoerceOperand(RS);
5693 if (isa<SCEVCouldNotCompute>(LS) || isa<SCEVCouldNotCompute>(RS))
5694 break;
5695 const SCEV *LDiff = getMinusSCEV(LA, LS);
5696 const SCEV *RDiff = getMinusSCEV(RA, RS);
5697 if (LDiff == RDiff)
5698 return getAddExpr(Signed ? getSMaxExpr(LS, RS) : getUMaxExpr(LS, RS),
5699 LDiff);
5700 LDiff = getMinusSCEV(LA, RS);
5701 RDiff = getMinusSCEV(RA, LS);
5702 if (LDiff == RDiff)
5703 return getAddExpr(Signed ? getSMinExpr(LS, RS) : getUMinExpr(LS, RS),
5704 LDiff);
5705 }
5706 break;
5707 case ICmpInst::ICMP_NE:
5708 // n != 0 ? n+x : 1+x -> umax(n, 1)+x
5709 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType()) &&
5710 isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->isZero()) {
5711 const SCEV *One = getOne(I->getType());
5712 const SCEV *LS = getNoopOrZeroExtend(getSCEV(LHS), I->getType());
5713 const SCEV *LA = getSCEV(TrueVal);
5714 const SCEV *RA = getSCEV(FalseVal);
5715 const SCEV *LDiff = getMinusSCEV(LA, LS);
5716 const SCEV *RDiff = getMinusSCEV(RA, One);
5717 if (LDiff == RDiff)
5718 return getAddExpr(getUMaxExpr(One, LS), LDiff);
5719 }
5720 break;
5721 case ICmpInst::ICMP_EQ:
5722 // n == 0 ? 1+x : n+x -> umax(n, 1)+x
5723 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType()) &&
5724 isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->isZero()) {
5725 const SCEV *One = getOne(I->getType());
5726 const SCEV *LS = getNoopOrZeroExtend(getSCEV(LHS), I->getType());
5727 const SCEV *LA = getSCEV(TrueVal);
5728 const SCEV *RA = getSCEV(FalseVal);
5729 const SCEV *LDiff = getMinusSCEV(LA, One);
5730 const SCEV *RDiff = getMinusSCEV(RA, LS);
5731 if (LDiff == RDiff)
5732 return getAddExpr(getUMaxExpr(One, LS), LDiff);
5733 }
5734 break;
5735 default:
5736 break;
5737 }
5738
5739 return getUnknown(I);
5740}
5741
5742/// Expand GEP instructions into add and multiply operations. This allows them
5743/// to be analyzed by regular SCEV code.
5744const SCEV *ScalarEvolution::createNodeForGEP(GEPOperator *GEP) {
5745 // Don't attempt to analyze GEPs over unsized objects.
5746 if (!GEP->getSourceElementType()->isSized())
5747 return getUnknown(GEP);
5748
5749 SmallVector<const SCEV *, 4> IndexExprs;
5750 for (Value *Index : GEP->indices())
5751 IndexExprs.push_back(getSCEV(Index));
5752 return getGEPExpr(GEP, IndexExprs);
5753}
5754
5755uint32_t ScalarEvolution::GetMinTrailingZerosImpl(const SCEV *S) {
5756 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S))
5757 return C->getAPInt().countTrailingZeros();
5758
5759 if (const SCEVPtrToIntExpr *I = dyn_cast<SCEVPtrToIntExpr>(S))
5760 return GetMinTrailingZeros(I->getOperand());
5761
5762 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(S))
5763 return std::min(GetMinTrailingZeros(T->getOperand()),
5764 (uint32_t)getTypeSizeInBits(T->getType()));
5765
5766 if (const SCEVZeroExtendExpr *E = dyn_cast<SCEVZeroExtendExpr>(S)) {
5767 uint32_t OpRes = GetMinTrailingZeros(E->getOperand());
5768 return OpRes == getTypeSizeInBits(E->getOperand()->getType())
5769 ? getTypeSizeInBits(E->getType())
5770 : OpRes;
5771 }
5772
5773 if (const SCEVSignExtendExpr *E = dyn_cast<SCEVSignExtendExpr>(S)) {
5774 uint32_t OpRes = GetMinTrailingZeros(E->getOperand());
5775 return OpRes == getTypeSizeInBits(E->getOperand()->getType())
5776 ? getTypeSizeInBits(E->getType())
5777 : OpRes;
5778 }
5779
5780 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(S)) {
5781 // The result is the min of all operands results.
5782 uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0));
5783 for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i)
5784 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i)));
5785 return MinOpRes;
5786 }
5787
5788 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) {
5789 // The result is the sum of all operands results.
5790 uint32_t SumOpRes = GetMinTrailingZeros(M->getOperand(0));
5791 uint32_t BitWidth = getTypeSizeInBits(M->getType());
5792 for (unsigned i = 1, e = M->getNumOperands();
5793 SumOpRes != BitWidth && i != e; ++i)
5794 SumOpRes =
5795 std::min(SumOpRes + GetMinTrailingZeros(M->getOperand(i)), BitWidth);
5796 return SumOpRes;
5797 }
5798
5799 if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) {
5800 // The result is the min of all operands results.
5801 uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0));
5802 for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i)
5803 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i)));
5804 return MinOpRes;
5805 }
5806
5807 if (const SCEVSMaxExpr *M = dyn_cast<SCEVSMaxExpr>(S)) {
5808 // The result is the min of all operands results.
5809 uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0));
5810 for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i)
5811 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i)));
5812 return MinOpRes;
5813 }
5814
5815 if (const SCEVUMaxExpr *M = dyn_cast<SCEVUMaxExpr>(S)) {
5816 // The result is the min of all operands results.
5817 uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0));
5818 for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i)
5819 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i)));
5820 return MinOpRes;
5821 }
5822
5823 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
5824 // For a SCEVUnknown, ask ValueTracking.
5825 KnownBits Known = computeKnownBits(U->getValue(), getDataLayout(), 0, &AC, nullptr, &DT);
5826 return Known.countMinTrailingZeros();
5827 }
5828
5829 // SCEVUDivExpr
5830 return 0;
5831}
5832
5833uint32_t ScalarEvolution::GetMinTrailingZeros(const SCEV *S) {
5834 auto I = MinTrailingZerosCache.find(S);
5835 if (I != MinTrailingZerosCache.end())
5836 return I->second;
5837
5838 uint32_t Result = GetMinTrailingZerosImpl(S);
5839 auto InsertPair = MinTrailingZerosCache.insert({S, Result});
5840 assert(InsertPair.second && "Should insert a new key")(static_cast<void> (0));
5841 return InsertPair.first->second;
5842}
5843
5844/// Helper method to assign a range to V from metadata present in the IR.
5845static Optional<ConstantRange> GetRangeFromMetadata(Value *V) {
5846 if (Instruction *I = dyn_cast<Instruction>(V))
5847 if (MDNode *MD = I->getMetadata(LLVMContext::MD_range))
5848 return getConstantRangeFromMetadata(*MD);
5849
5850 return None;
5851}
5852
5853void ScalarEvolution::setNoWrapFlags(SCEVAddRecExpr *AddRec,
5854 SCEV::NoWrapFlags Flags) {
5855 if (AddRec->getNoWrapFlags(Flags) != Flags) {
5856 AddRec->setNoWrapFlags(Flags);
5857 UnsignedRanges.erase(AddRec);
5858 SignedRanges.erase(AddRec);
5859 }
5860}
5861
5862ConstantRange ScalarEvolution::
5863getRangeForUnknownRecurrence(const SCEVUnknown *U) {
5864 const DataLayout &DL = getDataLayout();
5865
5866 unsigned BitWidth = getTypeSizeInBits(U->getType());
5867 const ConstantRange FullSet(BitWidth, /*isFullSet=*/true);
5868
5869 // Match a simple recurrence of the form: <start, ShiftOp, Step>, and then
5870 // use information about the trip count to improve our available range. Note
5871 // that the trip count independent cases are already handled by known bits.
5872 // WARNING: The definition of recurrence used here is subtly different than
5873 // the one used by AddRec (and thus most of this file). Step is allowed to
5874 // be arbitrarily loop varying here, where AddRec allows only loop invariant
5875 // and other addrecs in the same loop (for non-affine addrecs). The code
5876 // below intentionally handles the case where step is not loop invariant.
5877 auto *P = dyn_cast<PHINode>(U->getValue());
5878 if (!P)
5879 return FullSet;
5880
5881 // Make sure that no Phi input comes from an unreachable block. Otherwise,
5882 // even the values that are not available in these blocks may come from them,
5883 // and this leads to false-positive recurrence test.
5884 for (auto *Pred : predecessors(P->getParent()))
5885 if (!DT.isReachableFromEntry(Pred))
5886 return FullSet;
5887
5888 BinaryOperator *BO;
5889 Value *Start, *Step;
5890 if (!matchSimpleRecurrence(P, BO, Start, Step))
5891 return FullSet;
5892
5893 // If we found a recurrence in reachable code, we must be in a loop. Note
5894 // that BO might be in some subloop of L, and that's completely okay.
5895 auto *L = LI.getLoopFor(P->getParent());
5896 assert(L && L->getHeader() == P->getParent())(static_cast<void> (0));
5897 if (!L->contains(BO->getParent()))
5898 // NOTE: This bailout should be an assert instead. However, asserting
5899 // the condition here exposes a case where LoopFusion is querying SCEV
5900 // with malformed loop information during the midst of the transform.
5901 // There doesn't appear to be an obvious fix, so for the moment bailout
5902 // until the caller issue can be fixed. PR49566 tracks the bug.
5903 return FullSet;
5904
5905 // TODO: Extend to other opcodes such as mul, and div
5906 switch (BO->getOpcode()) {
5907 default:
5908 return FullSet;
5909 case Instruction::AShr:
5910 case Instruction::LShr:
5911 case Instruction::Shl:
5912 break;
5913 };
5914
5915 if (BO->getOperand(0) != P)
5916 // TODO: Handle the power function forms some day.
5917 return FullSet;
5918
5919 unsigned TC = getSmallConstantMaxTripCount(L);
5920 if (!TC || TC >= BitWidth)
5921 return FullSet;
5922
5923 auto KnownStart = computeKnownBits(Start, DL, 0, &AC, nullptr, &DT);
5924 auto KnownStep = computeKnownBits(Step, DL, 0, &AC, nullptr, &DT);
5925 assert(KnownStart.getBitWidth() == BitWidth &&(static_cast<void> (0))
5926 KnownStep.getBitWidth() == BitWidth)(static_cast<void> (0));
5927
5928 // Compute total shift amount, being careful of overflow and bitwidths.
5929 auto MaxShiftAmt = KnownStep.getMaxValue();
5930 APInt TCAP(BitWidth, TC-1);
5931 bool Overflow = false;
5932 auto TotalShift = MaxShiftAmt.umul_ov(TCAP, Overflow);
5933 if (Overflow)
5934 return FullSet;
5935
5936 switch (BO->getOpcode()) {
5937 default:
5938 llvm_unreachable("filtered out above")__builtin_unreachable();
5939 case Instruction::AShr: {
5940 // For each ashr, three cases:
5941 // shift = 0 => unchanged value
5942 // saturation => 0 or -1
5943 // other => a value closer to zero (of the same sign)
5944 // Thus, the end value is closer to zero than the start.
5945 auto KnownEnd = KnownBits::ashr(KnownStart,
5946 KnownBits::makeConstant(TotalShift));
5947 if (KnownStart.isNonNegative())
5948 // Analogous to lshr (simply not yet canonicalized)
5949 return ConstantRange::getNonEmpty(KnownEnd.getMinValue(),
5950 KnownStart.getMaxValue() + 1);
5951 if (KnownStart.isNegative())
5952 // End >=u Start && End <=s Start
5953 return ConstantRange::getNonEmpty(KnownStart.getMinValue(),
5954 KnownEnd.getMaxValue() + 1);
5955 break;
5956 }
5957 case Instruction::LShr: {
5958 // For each lshr, three cases:
5959 // shift = 0 => unchanged value
5960 // saturation => 0
5961 // other => a smaller positive number
5962 // Thus, the low end of the unsigned range is the last value produced.
5963 auto KnownEnd = KnownBits::lshr(KnownStart,
5964 KnownBits::makeConstant(TotalShift));
5965 return ConstantRange::getNonEmpty(KnownEnd.getMinValue(),
5966 KnownStart.getMaxValue() + 1);
5967 }
5968 case Instruction::Shl: {
5969 // Iff no bits are shifted out, value increases on every shift.
5970 auto KnownEnd = KnownBits::shl(KnownStart,
5971 KnownBits::makeConstant(TotalShift));
5972 if (TotalShift.ult(KnownStart.countMinLeadingZeros()))
5973 return ConstantRange(KnownStart.getMinValue(),
5974 KnownEnd.getMaxValue() + 1);
5975 break;
5976 }
5977 };
5978 return FullSet;
5979}
5980
5981/// Determine the range for a particular SCEV. If SignHint is
5982/// HINT_RANGE_UNSIGNED (resp. HINT_RANGE_SIGNED) then getRange prefers ranges
5983/// with a "cleaner" unsigned (resp. signed) representation.
5984const ConstantRange &
5985ScalarEvolution::getRangeRef(const SCEV *S,
5986 ScalarEvolution::RangeSignHint SignHint) {
5987 DenseMap<const SCEV *, ConstantRange> &Cache =
5988 SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED ? UnsignedRanges
5989 : SignedRanges;
5990 ConstantRange::PreferredRangeType RangeType =
5991 SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED
5992 ? ConstantRange::Unsigned : ConstantRange::Signed;
5993
5994 // See if we've computed this range already.
5995 DenseMap<const SCEV *, ConstantRange>::iterator I = Cache.find(S);
5996 if (I != Cache.end())
5997 return I->second;
5998
5999 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S))
6000 return setRange(C, SignHint, ConstantRange(C->getAPInt()));
6001
6002 unsigned BitWidth = getTypeSizeInBits(S->getType());
6003 ConstantRange ConservativeResult(BitWidth, /*isFullSet=*/true);
6004 using OBO = OverflowingBinaryOperator;
6005
6006 // If the value has known zeros, the maximum value will have those known zeros
6007 // as well.
6008 uint32_t TZ = GetMinTrailingZeros(S);
6009 if (TZ != 0) {
6010 if (SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED)
6011 ConservativeResult =
6012 ConstantRange(APInt::getMinValue(BitWidth),
6013 APInt::getMaxValue(BitWidth).lshr(TZ).shl(TZ) + 1);
6014 else
6015 ConservativeResult = ConstantRange(
6016 APInt::getSignedMinValue(BitWidth),
6017 APInt::getSignedMaxValue(BitWidth).ashr(TZ).shl(TZ) + 1);
6018 }
6019
6020 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
6021 ConstantRange X = getRangeRef(Add->getOperand(0), SignHint);
6022 unsigned WrapType = OBO::AnyWrap;
6023 if (Add->hasNoSignedWrap())
6024 WrapType |= OBO::NoSignedWrap;
6025 if (Add->hasNoUnsignedWrap())
6026 WrapType |= OBO::NoUnsignedWrap;
6027 for (unsigned i = 1, e = Add->getNumOperands(); i != e; ++i)
6028 X = X.addWithNoWrap(getRangeRef(Add->getOperand(i), SignHint),
6029 WrapType, RangeType);
6030 return setRange(Add, SignHint,
6031 ConservativeResult.intersectWith(X, RangeType));
6032 }
6033
6034 if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) {
6035 ConstantRange X = getRangeRef(Mul->getOperand(0), SignHint);
6036 for (unsigned i = 1, e = Mul->getNumOperands(); i != e; ++i)
6037 X = X.multiply(getRangeRef(Mul->getOperand(i), SignHint));
6038 return setRange(Mul, SignHint,
6039 ConservativeResult.intersectWith(X, RangeType));
6040 }
6041
6042 if (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(S)) {
6043 ConstantRange X = getRangeRef(SMax->getOperand(0), SignHint);
6044 for (unsigned i = 1, e = SMax->getNumOperands(); i != e; ++i)
6045 X = X.smax(getRangeRef(SMax->getOperand(i), SignHint));
6046 return setRange(SMax, SignHint,
6047 ConservativeResult.intersectWith(X, RangeType));
6048 }
6049
6050 if (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(S)) {
6051 ConstantRange X = getRangeRef(UMax->getOperand(0), SignHint);
6052 for (unsigned i = 1, e = UMax->getNumOperands(); i != e; ++i)
6053 X = X.umax(getRangeRef(UMax->getOperand(i), SignHint));
6054 return setRange(UMax, SignHint,
6055 ConservativeResult.intersectWith(X, RangeType));
6056 }
6057
6058 if (const SCEVSMinExpr *SMin = dyn_cast<SCEVSMinExpr>(S)) {
6059 ConstantRange X = getRangeRef(SMin->getOperand(0), SignHint);
6060 for (unsigned i = 1, e = SMin->getNumOperands(); i != e; ++i)
6061 X = X.smin(getRangeRef(SMin->getOperand(i), SignHint));
6062 return setRange(SMin, SignHint,
6063 ConservativeResult.intersectWith(X, RangeType));
6064 }
6065
6066 if (const SCEVUMinExpr *UMin = dyn_cast<SCEVUMinExpr>(S)) {
6067 ConstantRange X = getRangeRef(UMin->getOperand(0), SignHint);
6068 for (unsigned i = 1, e = UMin->getNumOperands(); i != e; ++i)
6069 X = X.umin(getRangeRef(UMin->getOperand(i), SignHint));
6070 return setRange(UMin, SignHint,
6071 ConservativeResult.intersectWith(X, RangeType));
6072 }
6073
6074 if (const SCEVUDivExpr *UDiv = dyn_cast<SCEVUDivExpr>(S)) {
6075 ConstantRange X = getRangeRef(UDiv->getLHS(), SignHint);
6076 ConstantRange Y = getRangeRef(UDiv->getRHS(), SignHint);
6077 return setRange(UDiv, SignHint,
6078 ConservativeResult.intersectWith(X.udiv(Y), RangeType));
6079 }
6080
6081 if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) {
6082 ConstantRange X = getRangeRef(ZExt->getOperand(), SignHint);
6083 return setRange(ZExt, SignHint,
6084 ConservativeResult.intersectWith(X.zeroExtend(BitWidth),
6085 RangeType));
6086 }
6087
6088 if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) {
6089 ConstantRange X = getRangeRef(SExt->getOperand(), SignHint);
6090 return setRange(SExt, SignHint,
6091 ConservativeResult.intersectWith(X.signExtend(BitWidth),
6092 RangeType));
6093 }
6094
6095 if (const SCEVPtrToIntExpr *PtrToInt = dyn_cast<SCEVPtrToIntExpr>(S)) {
6096 ConstantRange X = getRangeRef(PtrToInt->getOperand(), SignHint);
6097 return setRange(PtrToInt, SignHint, X);
6098 }
6099
6100 if (const SCEVTruncateExpr *Trunc = dyn_cast<SCEVTruncateExpr>(S)) {
6101 ConstantRange X = getRangeRef(Trunc->getOperand(), SignHint);
6102 return setRange(Trunc, SignHint,
6103 ConservativeResult.intersectWith(X.truncate(BitWidth),
6104 RangeType));
6105 }
6106
6107 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(S)) {
6108 // If there's no unsigned wrap, the value will never be less than its
6109 // initial value.
6110 if (AddRec->hasNoUnsignedWrap()) {
6111 APInt UnsignedMinValue = getUnsignedRangeMin(AddRec->getStart());
6112 if (!UnsignedMinValue.isNullValue())
6113 ConservativeResult = ConservativeResult.intersectWith(
6114 ConstantRange(UnsignedMinValue, APInt(BitWidth, 0)), RangeType);
6115 }
6116
6117 // If there's no signed wrap, and all the operands except initial value have
6118 // the same sign or zero, the value won't ever be:
6119 // 1: smaller than initial value if operands are non negative,
6120 // 2: bigger than initial value if operands are non positive.
6121 // For both cases, value can not cross signed min/max boundary.
6122 if (AddRec->hasNoSignedWrap()) {
6123 bool AllNonNeg = true;
6124 bool AllNonPos = true;
6125 for (unsigned i = 1, e = AddRec->getNumOperands(); i != e; ++i) {
6126 if (!isKnownNonNegative(AddRec->getOperand(i)))
6127 AllNonNeg = false;
6128 if (!isKnownNonPositive(AddRec->getOperand(i)))
6129 AllNonPos = false;
6130 }
6131 if (AllNonNeg)
6132 ConservativeResult = ConservativeResult.intersectWith(
6133 ConstantRange::getNonEmpty(getSignedRangeMin(AddRec->getStart()),
6134 APInt::getSignedMinValue(BitWidth)),
6135 RangeType);
6136 else if (AllNonPos)
6137 ConservativeResult = ConservativeResult.intersectWith(
6138 ConstantRange::getNonEmpty(
6139 APInt::getSignedMinValue(BitWidth),
6140 getSignedRangeMax(AddRec->getStart()) + 1),
6141 RangeType);
6142 }
6143
6144 // TODO: non-affine addrec
6145 if (AddRec->isAffine()) {
6146 const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(AddRec->getLoop());
6147 if (!isa<SCEVCouldNotCompute>(MaxBECount) &&
6148 getTypeSizeInBits(MaxBECount->getType()) <= BitWidth) {
6149 auto RangeFromAffine = getRangeForAffineAR(
6150 AddRec->getStart(), AddRec->getStepRecurrence(*this), MaxBECount,
6151 BitWidth);
6152 ConservativeResult =
6153 ConservativeResult.intersectWith(RangeFromAffine, RangeType);
6154
6155 auto RangeFromFactoring = getRangeViaFactoring(
6156 AddRec->getStart(), AddRec->getStepRecurrence(*this), MaxBECount,
6157 BitWidth);
6158 ConservativeResult =
6159 ConservativeResult.intersectWith(RangeFromFactoring, RangeType);
6160 }
6161
6162 // Now try symbolic BE count and more powerful methods.
6163 if (UseExpensiveRangeSharpening) {
6164 const SCEV *SymbolicMaxBECount =
6165 getSymbolicMaxBackedgeTakenCount(AddRec->getLoop());
6166 if (!isa<SCEVCouldNotCompute>(SymbolicMaxBECount) &&
6167 getTypeSizeInBits(MaxBECount->getType()) <= BitWidth &&
6168 AddRec->hasNoSelfWrap()) {
6169 auto RangeFromAffineNew = getRangeForAffineNoSelfWrappingAR(
6170 AddRec, SymbolicMaxBECount, BitWidth, SignHint);
6171 ConservativeResult =
6172 ConservativeResult.intersectWith(RangeFromAffineNew, RangeType);
6173 }
6174 }
6175 }
6176
6177 return setRange(AddRec, SignHint, std::move(ConservativeResult));
6178 }
6179
6180 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
6181
6182 // Check if the IR explicitly contains !range metadata.
6183 Optional<ConstantRange> MDRange = GetRangeFromMetadata(U->getValue());
6184 if (MDRange.hasValue())
6185 ConservativeResult = ConservativeResult.intersectWith(MDRange.getValue(),
6186 RangeType);
6187
6188 // Use facts about recurrences in the underlying IR. Note that add
6189 // recurrences are AddRecExprs and thus don't hit this path. This
6190 // primarily handles shift recurrences.
6191 auto CR = getRangeForUnknownRecurrence(U);
6192 ConservativeResult = ConservativeResult.intersectWith(CR);
6193
6194 // See if ValueTracking can give us a useful range.
6195 const DataLayout &DL = getDataLayout();
6196 KnownBits Known = computeKnownBits(U->getValue(), DL, 0, &AC, nullptr, &DT);
6197 if (Known.getBitWidth() != BitWidth)
6198 Known = Known.zextOrTrunc(BitWidth);
6199
6200 // ValueTracking may be able to compute a tighter result for the number of
6201 // sign bits than for the value of those sign bits.
6202 unsigned NS = ComputeNumSignBits(U->getValue(), DL, 0, &AC, nullptr, &DT);
6203 if (U->getType()->isPointerTy()) {
6204 // If the pointer size is larger than the index size type, this can cause
6205 // NS to be larger than BitWidth. So compensate for this.
6206 unsigned ptrSize = DL.getPointerTypeSizeInBits(U->getType());
6207 int ptrIdxDiff = ptrSize - BitWidth;
6208 if (ptrIdxDiff > 0 && ptrSize > BitWidth && NS > (unsigned)ptrIdxDiff)
6209 NS -= ptrIdxDiff;
6210 }
6211
6212 if (NS > 1) {
6213 // If we know any of the sign bits, we know all of the sign bits.
6214 if (!Known.Zero.getHiBits(NS).isNullValue())
6215 Known.Zero.setHighBits(NS);
6216 if (!Known.One.getHiBits(NS).isNullValue())
6217 Known.One.setHighBits(NS);
6218 }
6219
6220 if (Known.getMinValue() != Known.getMaxValue() + 1)
6221 ConservativeResult = ConservativeResult.intersectWith(
6222 ConstantRange(Known.getMinValue(), Known.getMaxValue() + 1),
6223 RangeType);
6224 if (NS > 1)
6225 ConservativeResult = ConservativeResult.intersectWith(
6226 ConstantRange(APInt::getSignedMinValue(BitWidth).ashr(NS - 1),
6227 APInt::getSignedMaxValue(BitWidth).ashr(NS - 1) + 1),
6228 RangeType);
6229
6230 // A range of Phi is a subset of union of all ranges of its input.
6231 if (const PHINode *Phi = dyn_cast<PHINode>(U->getValue())) {
6232 // Make sure that we do not run over cycled Phis.
6233 if (PendingPhiRanges.insert(Phi).second) {
6234 ConstantRange RangeFromOps(BitWidth, /*isFullSet=*/false);
6235 for (auto &Op : Phi->operands()) {
6236 auto OpRange = getRangeRef(getSCEV(Op), SignHint);
6237 RangeFromOps = RangeFromOps.unionWith(OpRange);
6238 // No point to continue if we already have a full set.
6239 if (RangeFromOps.isFullSet())
6240 break;
6241 }
6242 ConservativeResult =
6243 ConservativeResult.intersectWith(RangeFromOps, RangeType);
6244 bool Erased = PendingPhiRanges.erase(Phi);
6245 assert(Erased && "Failed to erase Phi properly?")(static_cast<void> (0));
6246 (void) Erased;
6247 }
6248 }
6249
6250 return setRange(U, SignHint, std::move(ConservativeResult));
6251 }
6252
6253 return setRange(S, SignHint, std::move(ConservativeResult));
6254}
6255
6256// Given a StartRange, Step and MaxBECount for an expression compute a range of
6257// values that the expression can take. Initially, the expression has a value
6258// from StartRange and then is changed by Step up to MaxBECount times. Signed
6259// argument defines if we treat Step as signed or unsigned.
6260static ConstantRange getRangeForAffineARHelper(APInt Step,
6261 const ConstantRange &StartRange,
6262 const APInt &MaxBECount,
6263 unsigned BitWidth, bool Signed) {
6264 // If either Step or MaxBECount is 0, then the expression won't change, and we
6265 // just need to return the initial range.
6266 if (Step == 0 || MaxBECount == 0)
6267 return StartRange;
6268
6269 // If we don't know anything about the initial value (i.e. StartRange is
6270 // FullRange), then we don't know anything about the final range either.
6271 // Return FullRange.
6272 if (StartRange.isFullSet())
6273 return ConstantRange::getFull(BitWidth);
6274
6275 // If Step is signed and negative, then we use its absolute value, but we also
6276 // note that we're moving in the opposite direction.
6277 bool Descending = Signed && Step.isNegative();
6278
6279 if (Signed)
6280 // This is correct even for INT_SMIN. Let's look at i8 to illustrate this:
6281 // abs(INT_SMIN) = abs(-128) = abs(0x80) = -0x80 = 0x80 = 128.
6282 // This equations hold true due to the well-defined wrap-around behavior of
6283 // APInt.
6284 Step = Step.abs();
6285
6286 // Check if Offset is more than full span of BitWidth. If it is, the
6287 // expression is guaranteed to overflow.
6288 if (APInt::getMaxValue(StartRange.getBitWidth()).udiv(Step).ult(MaxBECount))
6289 return ConstantRange::getFull(BitWidth);
6290
6291 // Offset is by how much the expression can change. Checks above guarantee no
6292 // overflow here.
6293 APInt Offset = Step * MaxBECount;
6294
6295 // Minimum value of the final range will match the minimal value of StartRange
6296 // if the expression is increasing and will be decreased by Offset otherwise.
6297 // Maximum value of the final range will match the maximal value of StartRange
6298 // if the expression is decreasing and will be increased by Offset otherwise.
6299 APInt StartLower = StartRange.getLower();
6300 APInt StartUpper = StartRange.getUpper() - 1;
6301 APInt MovedBoundary = Descending ? (StartLower - std::move(Offset))
6302 : (StartUpper + std::move(Offset));
6303
6304 // It's possible that the new minimum/maximum value will fall into the initial
6305 // range (due to wrap around). This means that the expression can take any
6306 // value in this bitwidth, and we have to return full range.
6307 if (StartRange.contains(MovedBoundary))
6308 return ConstantRange::getFull(BitWidth);
6309
6310 APInt NewLower =
6311 Descending ? std::move(MovedBoundary) : std::move(StartLower);
6312 APInt NewUpper =
6313 Descending ? std::move(StartUpper) : std::move(MovedBoundary);
6314 NewUpper += 1;
6315
6316 // No overflow detected, return [StartLower, StartUpper + Offset + 1) range.
6317 return ConstantRange::getNonEmpty(std::move(NewLower), std::move(NewUpper));
6318}
6319
6320ConstantRange ScalarEvolution::getRangeForAffineAR(const SCEV *Start,
6321 const SCEV *Step,
6322 const SCEV *MaxBECount,
6323 unsigned BitWidth) {
6324 assert(!isa<SCEVCouldNotCompute>(MaxBECount) &&(static_cast<void> (0))
6325 getTypeSizeInBits(MaxBECount->getType()) <= BitWidth &&(static_cast<void> (0))
6326 "Precondition!")(static_cast<void> (0));
6327
6328 MaxBECount = getNoopOrZeroExtend(MaxBECount, Start->getType());
6329 APInt MaxBECountValue = getUnsignedRangeMax(MaxBECount);
6330
6331 // First, consider step signed.
6332 ConstantRange StartSRange = getSignedRange(Start);
6333 ConstantRange StepSRange = getSignedRange(Step);
6334
6335 // If Step can be both positive and negative, we need to find ranges for the
6336 // maximum absolute step values in both directions and union them.
6337 ConstantRange SR =
6338 getRangeForAffineARHelper(StepSRange.getSignedMin(), StartSRange,
6339 MaxBECountValue, BitWidth, /* Signed = */ true);
6340 SR = SR.unionWith(getRangeForAffineARHelper(StepSRange.getSignedMax(),
6341 StartSRange, MaxBECountValue,
6342 BitWidth, /* Signed = */ true));
6343
6344 // Next, consider step unsigned.
6345 ConstantRange UR = getRangeForAffineARHelper(
6346 getUnsignedRangeMax(Step), getUnsignedRange(Start),
6347 MaxBECountValue, BitWidth, /* Signed = */ false);
6348
6349 // Finally, intersect signed and unsigned ranges.
6350 return SR.intersectWith(UR, ConstantRange::Smallest);
6351}
6352
6353ConstantRange ScalarEvolution::getRangeForAffineNoSelfWrappingAR(
6354 const SCEVAddRecExpr *AddRec, const SCEV *MaxBECount, unsigned BitWidth,
6355 ScalarEvolution::RangeSignHint SignHint) {
6356 assert(AddRec->isAffine() && "Non-affine AddRecs are not suppored!\n")(static_cast<void> (0));
6357 assert(AddRec->hasNoSelfWrap() &&(static_cast<void> (0))
6358 "This only works for non-self-wrapping AddRecs!")(static_cast<void> (0));
6359 const bool IsSigned = SignHint == HINT_RANGE_SIGNED;
6360 const SCEV *Step = AddRec->getStepRecurrence(*this);
6361 // Only deal with constant step to save compile time.
6362 if (!isa<SCEVConstant>(Step))
6363 return ConstantRange::getFull(BitWidth);
6364 // Let's make sure that we can prove that we do not self-wrap during
6365 // MaxBECount iterations. We need this because MaxBECount is a maximum
6366 // iteration count estimate, and we might infer nw from some exit for which we
6367 // do not know max exit count (or any other side reasoning).
6368 // TODO: Turn into assert at some point.
6369 if (getTypeSizeInBits(MaxBECount->getType()) >
6370 getTypeSizeInBits(AddRec->getType()))
6371 return ConstantRange::getFull(BitWidth);
6372 MaxBECount = getNoopOrZeroExtend(MaxBECount, AddRec->getType());
6373 const SCEV *RangeWidth = getMinusOne(AddRec->getType());
6374 const SCEV *StepAbs = getUMinExpr(Step, getNegativeSCEV(Step));
6375 const SCEV *MaxItersWithoutWrap = getUDivExpr(RangeWidth, StepAbs);
6376 if (!isKnownPredicateViaConstantRanges(ICmpInst::ICMP_ULE, MaxBECount,
6377 MaxItersWithoutWrap))
6378 return ConstantRange::getFull(BitWidth);
6379
6380 ICmpInst::Predicate LEPred =
6381 IsSigned ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE;
6382 ICmpInst::Predicate GEPred =
6383 IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE;
6384 const SCEV *End = AddRec->evaluateAtIteration(MaxBECount, *this);
6385
6386 // We know that there is no self-wrap. Let's take Start and End values and
6387 // look at all intermediate values V1, V2, ..., Vn that IndVar takes during
6388 // the iteration. They either lie inside the range [Min(Start, End),
6389 // Max(Start, End)] or outside it:
6390 //
6391 // Case 1: RangeMin ... Start V1 ... VN End ... RangeMax;
6392 // Case 2: RangeMin Vk ... V1 Start ... End Vn ... Vk + 1 RangeMax;
6393 //
6394 // No self wrap flag guarantees that the intermediate values cannot be BOTH
6395 // outside and inside the range [Min(Start, End), Max(Start, End)]. Using that
6396 // knowledge, let's try to prove that we are dealing with Case 1. It is so if
6397 // Start <= End and step is positive, or Start >= End and step is negative.
6398 const SCEV *Start = AddRec->getStart();
6399 ConstantRange StartRange = getRangeRef(Start, SignHint);
6400 ConstantRange EndRange = getRangeRef(End, SignHint);
6401 ConstantRange RangeBetween = StartRange.unionWith(EndRange);
6402 // If they already cover full iteration space, we will know nothing useful
6403 // even if we prove what we want to prove.
6404 if (RangeBetween.isFullSet())
6405 return RangeBetween;
6406 // Only deal with ranges that do not wrap (i.e. RangeMin < RangeMax).
6407 bool IsWrappedSet = IsSigned ? RangeBetween.isSignWrappedSet()
6408 : RangeBetween.isWrappedSet();
6409 if (IsWrappedSet)
6410 return ConstantRange::getFull(BitWidth);
6411
6412 if (isKnownPositive(Step) &&
6413 isKnownPredicateViaConstantRanges(LEPred, Start, End))
6414 return RangeBetween;
6415 else if (isKnownNegative(Step) &&
6416 isKnownPredicateViaConstantRanges(GEPred, Start, End))
6417 return RangeBetween;
6418 return ConstantRange::getFull(BitWidth);
6419}
6420
6421ConstantRange ScalarEvolution::getRangeViaFactoring(const SCEV *Start,
6422 const SCEV *Step,
6423 const SCEV *MaxBECount,
6424 unsigned BitWidth) {
6425 // RangeOf({C?A:B,+,C?P:Q}) == RangeOf(C?{A,+,P}:{B,+,Q})
6426 // == RangeOf({A,+,P}) union RangeOf({B,+,Q})
6427
6428 struct SelectPattern {
6429 Value *Condition = nullptr;
6430 APInt TrueValue;
6431 APInt FalseValue;
6432
6433 explicit SelectPattern(ScalarEvolution &SE, unsigned BitWidth,
6434 const SCEV *S) {
6435 Optional<unsigned> CastOp;
6436 APInt Offset(BitWidth, 0);
6437
6438 assert(SE.getTypeSizeInBits(S->getType()) == BitWidth &&(static_cast<void> (0))
6439 "Should be!")(static_cast<void> (0));
6440
6441 // Peel off a constant offset:
6442 if (auto *SA = dyn_cast<SCEVAddExpr>(S)) {
6443 // In the future we could consider being smarter here and handle
6444 // {Start+Step,+,Step} too.
6445 if (SA->getNumOperands() != 2 || !isa<SCEVConstant>(SA->getOperand(0)))
6446 return;
6447
6448 Offset = cast<SCEVConstant>(SA->getOperand(0))->getAPInt();
6449 S = SA->getOperand(1);
6450 }
6451
6452 // Peel off a cast operation
6453 if (auto *SCast = dyn_cast<SCEVIntegralCastExpr>(S)) {
6454 CastOp = SCast->getSCEVType();
6455 S = SCast->getOperand();
6456 }
6457
6458 using namespace llvm::PatternMatch;
6459
6460 auto *SU = dyn_cast<SCEVUnknown>(S);
6461 const APInt *TrueVal, *FalseVal;
6462 if (!SU ||
6463 !match(SU->getValue(), m_Select(m_Value(Condition), m_APInt(TrueVal),
6464 m_APInt(FalseVal)))) {
6465 Condition = nullptr;
6466 return;
6467 }
6468
6469 TrueValue = *TrueVal;
6470 FalseValue = *FalseVal;
6471
6472 // Re-apply the cast we peeled off earlier
6473 if (CastOp.hasValue())
6474 switch (*CastOp) {
6475 default:
6476 llvm_unreachable("Unknown SCEV cast type!")__builtin_unreachable();
6477
6478 case scTruncate:
6479 TrueValue = TrueValue.trunc(BitWidth);
6480 FalseValue = FalseValue.trunc(BitWidth);
6481 break;
6482 case scZeroExtend:
6483 TrueValue = TrueValue.zext(BitWidth);
6484 FalseValue = FalseValue.zext(BitWidth);
6485 break;
6486 case scSignExtend:
6487 TrueValue = TrueValue.sext(BitWidth);
6488 FalseValue = FalseValue.sext(BitWidth);
6489 break;
6490 }
6491
6492 // Re-apply the constant offset we peeled off earlier
6493 TrueValue += Offset;
6494 FalseValue += Offset;
6495 }
6496
6497 bool isRecognized() { return Condition != nullptr; }
6498 };
6499
6500 SelectPattern StartPattern(*this, BitWidth, Start);
6501 if (!StartPattern.isRecognized())
6502 return ConstantRange::getFull(BitWidth);
6503
6504 SelectPattern StepPattern(*this, BitWidth, Step);
6505 if (!StepPattern.isRecognized())
6506 return ConstantRange::getFull(BitWidth);
6507
6508 if (StartPattern.Condition != StepPattern.Condition) {
6509 // We don't handle this case today; but we could, by considering four
6510 // possibilities below instead of two. I'm not sure if there are cases where
6511 // that will help over what getRange already does, though.
6512 return ConstantRange::getFull(BitWidth);
6513 }
6514
6515 // NB! Calling ScalarEvolution::getConstant is fine, but we should not try to
6516 // construct arbitrary general SCEV expressions here. This function is called
6517 // from deep in the call stack, and calling getSCEV (on a sext instruction,
6518 // say) can end up caching a suboptimal value.
6519
6520 // FIXME: without the explicit `this` receiver below, MSVC errors out with
6521 // C2352 and C2512 (otherwise it isn't needed).
6522
6523 const SCEV *TrueStart = this->getConstant(StartPattern.TrueValue);
6524 const SCEV *TrueStep = this->getConstant(StepPattern.TrueValue);
6525 const SCEV *FalseStart = this->getConstant(StartPattern.FalseValue);
6526 const SCEV *FalseStep = this->getConstant(StepPattern.FalseValue);
6527
6528 ConstantRange TrueRange =
6529 this->getRangeForAffineAR(TrueStart, TrueStep, MaxBECount, BitWidth);
6530 ConstantRange FalseRange =
6531 this->getRangeForAffineAR(FalseStart, FalseStep, MaxBECount, BitWidth);
6532
6533 return TrueRange.unionWith(FalseRange);
6534}
6535
6536SCEV::NoWrapFlags ScalarEvolution::getNoWrapFlagsFromUB(const Value *V) {
6537 if (isa<ConstantExpr>(V)) return SCEV::FlagAnyWrap;
6538 const BinaryOperator *BinOp = cast<BinaryOperator>(V);
6539
6540 // Return early if there are no flags to propagate to the SCEV.
6541 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap;
6542 if (BinOp->hasNoUnsignedWrap())
6543 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW);
6544 if (BinOp->hasNoSignedWrap())
6545 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW);
6546 if (Flags == SCEV::FlagAnyWrap)
6547 return SCEV::FlagAnyWrap;
6548
6549 return isSCEVExprNeverPoison(BinOp) ? Flags : SCEV::FlagAnyWrap;
6550}
6551
6552bool ScalarEvolution::isSCEVExprNeverPoison(const Instruction *I) {
6553 // Here we check that I is in the header of the innermost loop containing I,
6554 // since we only deal with instructions in the loop header. The actual loop we
6555 // need to check later will come from an add recurrence, but getting that
6556 // requires computing the SCEV of the operands, which can be expensive. This
6557 // check we can do cheaply to rule out some cases early.
6558 Loop *InnermostContainingLoop = LI.getLoopFor(I->getParent());
6559 if (InnermostContainingLoop == nullptr ||
6560 InnermostContainingLoop->getHeader() != I->getParent())
6561 return false;
6562
6563 // Only proceed if we can prove that I does not yield poison.
6564 if (!programUndefinedIfPoison(I))
6565 return false;
6566
6567 // At this point we know that if I is executed, then it does not wrap
6568 // according to at least one of NSW or NUW. If I is not executed, then we do
6569 // not know if the calculation that I represents would wrap. Multiple
6570 // instructions can map to the same SCEV. If we apply NSW or NUW from I to
6571 // the SCEV, we must guarantee no wrapping for that SCEV also when it is
6572 // derived from other instructions that map to the same SCEV. We cannot make
6573 // that guarantee for cases where I is not executed. So we need to find the
6574 // loop that I is considered in relation to and prove that I is executed for
6575 // every iteration of that loop. That implies that the value that I
6576 // calculates does not wrap anywhere in the loop, so then we can apply the
6577 // flags to the SCEV.
6578 //
6579 // We check isLoopInvariant to disambiguate in case we are adding recurrences
6580 // from different loops, so that we know which loop to prove that I is
6581 // executed in.
6582 for (unsigned OpIndex = 0; OpIndex < I->getNumOperands(); ++OpIndex) {
6583 // I could be an extractvalue from a call to an overflow intrinsic.
6584 // TODO: We can do better here in some cases.
6585 if (!isSCEVable(I->getOperand(OpIndex)->getType()))
6586 return false;
6587 const SCEV *Op = getSCEV(I->getOperand(OpIndex));
6588 if (auto *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) {
6589 bool AllOtherOpsLoopInvariant = true;
6590 for (unsigned OtherOpIndex = 0; OtherOpIndex < I->getNumOperands();
6591 ++OtherOpIndex) {
6592 if (OtherOpIndex != OpIndex) {
6593 const SCEV *OtherOp = getSCEV(I->getOperand(OtherOpIndex));
6594 if (!isLoopInvariant(OtherOp, AddRec->getLoop())) {
6595 AllOtherOpsLoopInvariant = false;
6596 break;
6597 }
6598 }
6599 }
6600 if (AllOtherOpsLoopInvariant &&
6601 isGuaranteedToExecuteForEveryIteration(I, AddRec->getLoop()))
6602 return true;
6603 }
6604 }
6605 return false;
6606}
6607
6608bool ScalarEvolution::isAddRecNeverPoison(const Instruction *I, const Loop *L) {
6609 // If we know that \c I can never be poison period, then that's enough.
6610 if (isSCEVExprNeverPoison(I))
6611 return true;
6612
6613 // For an add recurrence specifically, we assume that infinite loops without
6614 // side effects are undefined behavior, and then reason as follows:
6615 //
6616 // If the add recurrence is poison in any iteration, it is poison on all
6617 // future iterations (since incrementing poison yields poison). If the result
6618 // of the add recurrence is fed into the loop latch condition and the loop
6619 // does not contain any throws or exiting blocks other than the latch, we now
6620 // have the ability to "choose" whether the backedge is taken or not (by
6621 // choosing a sufficiently evil value for the poison feeding into the branch)
6622 // for every iteration including and after the one in which \p I first became
6623 // poison. There are two possibilities (let's call the iteration in which \p
6624 // I first became poison as K):
6625 //
6626 // 1. In the set of iterations including and after K, the loop body executes
6627 // no side effects. In this case executing the backege an infinte number
6628 // of times will yield undefined behavior.
6629 //
6630 // 2. In the set of iterations including and after K, the loop body executes
6631 // at least one side effect. In this case, that specific instance of side
6632 // effect is control dependent on poison, which also yields undefined
6633 // behavior.
6634
6635 auto *ExitingBB = L->getExitingBlock();
6636 auto *LatchBB = L->getLoopLatch();
6637 if (!ExitingBB || !LatchBB || ExitingBB != LatchBB)
6638 return false;
6639
6640 SmallPtrSet<const Instruction *, 16> Pushed;
6641 SmallVector<const Instruction *, 8> PoisonStack;
6642
6643 // We start by assuming \c I, the post-inc add recurrence, is poison. Only
6644 // things that are known to be poison under that assumption go on the
6645 // PoisonStack.
6646 Pushed.insert(I);
6647 PoisonStack.push_back(I);
6648
6649 bool LatchControlDependentOnPoison = false;
6650 while (!PoisonStack.empty() && !LatchControlDependentOnPoison) {
6651 const Instruction *Poison = PoisonStack.pop_back_val();
6652
6653 for (auto *PoisonUser : Poison->users()) {
6654 if (propagatesPoison(cast<Operator>(PoisonUser))) {
6655 if (Pushed.insert(cast<Instruction>(PoisonUser)).second)
6656 PoisonStack.push_back(cast<Instruction>(PoisonUser));
6657 } else if (auto *BI = dyn_cast<BranchInst>(PoisonUser)) {
6658 assert(BI->isConditional() && "Only possibility!")(static_cast<void> (0));
6659 if (BI->getParent() == LatchBB) {
6660 LatchControlDependentOnPoison = true;
6661 break;
6662 }
6663 }
6664 }
6665 }
6666
6667 return LatchControlDependentOnPoison && loopHasNoAbnormalExits(L);
6668}
6669
6670ScalarEvolution::LoopProperties
6671ScalarEvolution::getLoopProperties(const Loop *L) {
6672 using LoopProperties = ScalarEvolution::LoopProperties;
6673
6674 auto Itr = LoopPropertiesCache.find(L);
6675 if (Itr == LoopPropertiesCache.end()) {
6676 auto HasSideEffects = [](Instruction *I) {
6677 if (auto *SI = dyn_cast<StoreInst>(I))
6678 return !SI->isSimple();
6679
6680 return I->mayThrow() || I->mayWriteToMemory();
6681 };
6682
6683 LoopProperties LP = {/* HasNoAbnormalExits */ true,
6684 /*HasNoSideEffects*/ true};
6685
6686 for (auto *BB : L->getBlocks())
6687 for (auto &I : *BB) {
6688 if (!isGuaranteedToTransferExecutionToSuccessor(&I))
6689 LP.HasNoAbnormalExits = false;
6690 if (HasSideEffects(&I))
6691 LP.HasNoSideEffects = false;
6692 if (!LP.HasNoAbnormalExits && !LP.HasNoSideEffects)
6693 break; // We're already as pessimistic as we can get.
6694 }
6695
6696 auto InsertPair = LoopPropertiesCache.insert({L, LP});
6697 assert(InsertPair.second && "We just checked!")(static_cast<void> (0));
6698 Itr = InsertPair.first;
6699 }
6700
6701 return Itr->second;
6702}
6703
6704bool ScalarEvolution::loopIsFiniteByAssumption(const Loop *L) {
6705 // A mustprogress loop without side effects must be finite.
6706 // TODO: The check used here is very conservative. It's only *specific*
6707 // side effects which are well defined in infinite loops.
6708 return isMustProgress(L) && loopHasNoSideEffects(L);
6709}
6710
6711const SCEV *ScalarEvolution::createSCEV(Value *V) {
6712 if (!isSCEVable(V->getType()))
6713 return getUnknown(V);
6714
6715 if (Instruction *I = dyn_cast<Instruction>(V)) {
6716 // Don't attempt to analyze instructions in blocks that aren't
6717 // reachable. Such instructions don't matter, and they aren't required
6718 // to obey basic rules for definitions dominating uses which this
6719 // analysis depends on.
6720 if (!DT.isReachableFromEntry(I->getParent()))
6721 return getUnknown(UndefValue::get(V->getType()));
6722 } else if (ConstantInt *CI = dyn_cast<ConstantInt>(V))
6723 return getConstant(CI);
6724 else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V))
6725 return GA->isInterposable() ? getUnknown(V) : getSCEV(GA->getAliasee());
6726 else if (!isa<ConstantExpr>(V))
6727 return getUnknown(V);
6728
6729 Operator *U = cast<Operator>(V);
6730 if (auto BO = MatchBinaryOp(U, DT)) {
6731 switch (BO->Opcode) {
6732 case Instruction::Add: {
6733 // The simple thing to do would be to just call getSCEV on both operands
6734 // and call getAddExpr with the result. However if we're looking at a
6735 // bunch of things all added together, this can be quite inefficient,
6736 // because it leads to N-1 getAddExpr calls for N ultimate operands.
6737 // Instead, gather up all the operands and make a single getAddExpr call.
6738 // LLVM IR canonical form means we need only traverse the left operands.
6739 SmallVector<const SCEV *, 4> AddOps;
6740 do {
6741 if (BO->Op) {
6742 if (auto *OpSCEV = getExistingSCEV(BO->Op)) {
6743 AddOps.push_back(OpSCEV);
6744 break;
6745 }
6746
6747 // If a NUW or NSW flag can be applied to the SCEV for this
6748 // addition, then compute the SCEV for this addition by itself
6749 // with a separate call to getAddExpr. We need to do that
6750 // instead of pushing the operands of the addition onto AddOps,
6751 // since the flags are only known to apply to this particular
6752 // addition - they may not apply to other additions that can be
6753 // formed with operands from AddOps.
6754 const SCEV *RHS = getSCEV(BO->RHS);
6755 SCEV::NoWrapFlags Flags = getNoWrapFlagsFromUB(BO->Op);
6756 if (Flags != SCEV::FlagAnyWrap) {
6757 const SCEV *LHS = getSCEV(BO->LHS);
6758 if (BO->Opcode == Instruction::Sub)
6759 AddOps.push_back(getMinusSCEV(LHS, RHS, Flags));
6760 else
6761 AddOps.push_back(getAddExpr(LHS, RHS, Flags));
6762 break;
6763 }
6764 }
6765
6766 if (BO->Opcode == Instruction::Sub)
6767 AddOps.push_back(getNegativeSCEV(getSCEV(BO->RHS)));
6768 else
6769 AddOps.push_back(getSCEV(BO->RHS));
6770
6771 auto NewBO = MatchBinaryOp(BO->LHS, DT);
6772 if (!NewBO || (NewBO->Opcode != Instruction::Add &&
6773 NewBO->Opcode != Instruction::Sub)) {
6774 AddOps.push_back(getSCEV(BO->LHS));
6775 break;
6776 }
6777 BO = NewBO;
6778 } while (true);
6779
6780 return getAddExpr(AddOps);
6781 }
6782
6783 case Instruction::Mul: {
6784 SmallVector<const SCEV *, 4> MulOps;
6785 do {
6786 if (BO->Op) {
6787 if (auto *OpSCEV = getExistingSCEV(BO->Op)) {
6788 MulOps.push_back(OpSCEV);
6789 break;
6790 }
6791
6792 SCEV::NoWrapFlags Flags = getNoWrapFlagsFromUB(BO->Op);
6793 if (Flags != SCEV::FlagAnyWrap) {
6794 MulOps.push_back(
6795 getMulExpr(getSCEV(BO->LHS), getSCEV(BO->RHS), Flags));
6796 break;
6797 }
6798 }
6799
6800 MulOps.push_back(getSCEV(BO->RHS));
6801 auto NewBO = MatchBinaryOp(BO->LHS, DT);
6802 if (!NewBO || NewBO->Opcode != Instruction::Mul) {
6803 MulOps.push_back(getSCEV(BO->LHS));
6804 break;
6805 }
6806 BO = NewBO;
6807 } while (true);
6808
6809 return getMulExpr(MulOps);
6810 }
6811 case Instruction::UDiv:
6812 return getUDivExpr(getSCEV(BO->LHS), getSCEV(BO->RHS));
6813 case Instruction::URem:
6814 return getURemExpr(getSCEV(BO->LHS), getSCEV(BO->RHS));
6815 case Instruction::Sub: {
6816 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap;
6817 if (BO->Op)
6818 Flags = getNoWrapFlagsFromUB(BO->Op);
6819 return getMinusSCEV(getSCEV(BO->LHS), getSCEV(BO->RHS), Flags);
6820 }
6821 case Instruction::And:
6822 // For an expression like x&255 that merely masks off the high bits,
6823 // use zext(trunc(x)) as the SCEV expression.
6824 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) {
6825 if (CI->isZero())
6826 return getSCEV(BO->RHS);
6827 if (CI->isMinusOne())
6828 return getSCEV(BO->LHS);
6829 const APInt &A = CI->getValue();
6830
6831 // Instcombine's ShrinkDemandedConstant may strip bits out of
6832 // constants, obscuring what would otherwise be a low-bits mask.
6833 // Use computeKnownBits to compute what ShrinkDemandedConstant
6834 // knew about to reconstruct a low-bits mask value.
6835 unsigned LZ = A.countLeadingZeros();
6836 unsigned TZ = A.countTrailingZeros();
6837 unsigned BitWidth = A.getBitWidth();
6838 KnownBits Known(BitWidth);
6839 computeKnownBits(BO->LHS, Known, getDataLayout(),
6840 0, &AC, nullptr, &DT);
6841
6842 APInt EffectiveMask =
6843 APInt::getLowBitsSet(BitWidth, BitWidth - LZ - TZ).shl(TZ);
6844 if ((LZ != 0 || TZ != 0) && !((~A & ~Known.Zero) & EffectiveMask)) {
6845 const SCEV *MulCount = getConstant(APInt::getOneBitSet(BitWidth, TZ));
6846 const SCEV *LHS = getSCEV(BO->LHS);
6847 const SCEV *ShiftedLHS = nullptr;
6848 if (auto *LHSMul = dyn_cast<SCEVMulExpr>(LHS)) {
6849 if (auto *OpC = dyn_cast<SCEVConstant>(LHSMul->getOperand(0))) {
6850 // For an expression like (x * 8) & 8, simplify the multiply.
6851 unsigned MulZeros = OpC->getAPInt().countTrailingZeros();
6852 unsigned GCD = std::min(MulZeros, TZ);
6853 APInt DivAmt = APInt::getOneBitSet(BitWidth, TZ - GCD);
6854 SmallVector<const SCEV*, 4> MulOps;
6855 MulOps.push_back(getConstant(OpC->getAPInt().lshr(GCD)));
6856 MulOps.append(LHSMul->op_begin() + 1, LHSMul->op_end());
6857 auto *NewMul = getMulExpr(MulOps, LHSMul->getNoWrapFlags());
6858 ShiftedLHS = getUDivExpr(NewMul, getConstant(DivAmt));
6859 }
6860 }
6861 if (!ShiftedLHS)
6862 ShiftedLHS = getUDivExpr(LHS, MulCount);
6863 return getMulExpr(
6864 getZeroExtendExpr(
6865 getTruncateExpr(ShiftedLHS,
6866 IntegerType::get(getContext(), BitWidth - LZ - TZ)),
6867 BO->LHS->getType()),
6868 MulCount);
6869 }
6870 }
6871 break;
6872
6873 case Instruction::Or:
6874 // If the RHS of the Or is a constant, we may have something like:
6875 // X*4+1 which got turned into X*4|1. Handle this as an Add so loop
6876 // optimizations will transparently handle this case.
6877 //
6878 // In order for this transformation to be safe, the LHS must be of the
6879 // form X*(2^n) and the Or constant must be less than 2^n.
6880 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) {
6881 const SCEV *LHS = getSCEV(BO->LHS);
6882 const APInt &CIVal = CI->getValue();
6883 if (GetMinTrailingZeros(LHS) >=
6884 (CIVal.getBitWidth() - CIVal.countLeadingZeros())) {
6885 // Build a plain add SCEV.
6886 return getAddExpr(LHS, getSCEV(CI),
6887 (SCEV::NoWrapFlags)(SCEV::FlagNUW | SCEV::FlagNSW));
6888 }
6889 }
6890 break;
6891
6892 case Instruction::Xor:
6893 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) {
6894 // If the RHS of xor is -1, then this is a not operation.
6895 if (CI->isMinusOne())
6896 return getNotSCEV(getSCEV(BO->LHS));
6897
6898 // Model xor(and(x, C), C) as and(~x, C), if C is a low-bits mask.
6899 // This is a variant of the check for xor with -1, and it handles
6900 // the case where instcombine has trimmed non-demanded bits out
6901 // of an xor with -1.
6902 if (auto *LBO = dyn_cast<BinaryOperator>(BO->LHS))
6903 if (ConstantInt *LCI = dyn_cast<ConstantInt>(LBO->getOperand(1)))
6904 if (LBO->getOpcode() == Instruction::And &&
6905 LCI->getValue() == CI->getValue())
6906 if (const SCEVZeroExtendExpr *Z =
6907 dyn_cast<SCEVZeroExtendExpr>(getSCEV(BO->LHS))) {
6908 Type *UTy = BO->LHS->getType();
6909 const SCEV *Z0 = Z->getOperand();
6910 Type *Z0Ty = Z0->getType();
6911 unsigned Z0TySize = getTypeSizeInBits(Z0Ty);
6912
6913 // If C is a low-bits mask, the zero extend is serving to
6914 // mask off the high bits. Complement the operand and
6915 // re-apply the zext.
6916 if (CI->getValue().isMask(Z0TySize))
6917 return getZeroExtendExpr(getNotSCEV(Z0), UTy);
6918
6919 // If C is a single bit, it may be in the sign-bit position
6920 // before the zero-extend. In this case, represent the xor
6921 // using an add, which is equivalent, and re-apply the zext.
6922 APInt Trunc = CI->getValue().trunc(Z0TySize);
6923 if (Trunc.zext(getTypeSizeInBits(UTy)) == CI->getValue() &&
6924 Trunc.isSignMask())
6925 return getZeroExtendExpr(getAddExpr(Z0, getConstant(Trunc)),
6926 UTy);
6927 }
6928 }
6929 break;
6930
6931 case Instruction::Shl:
6932 // Turn shift left of a constant amount into a multiply.
6933 if (ConstantInt *SA = dyn_cast<ConstantInt>(BO->RHS)) {
6934 uint32_t BitWidth = cast<IntegerType>(SA->getType())->getBitWidth();
6935
6936 // If the shift count is not less than the bitwidth, the result of
6937 // the shift is undefined. Don't try to analyze it, because the
6938 // resolution chosen here may differ from the resolution chosen in
6939 // other parts of the compiler.
6940 if (SA->getValue().uge(BitWidth))
6941 break;
6942
6943 // We can safely preserve the nuw flag in all cases. It's also safe to
6944 // turn a nuw nsw shl into a nuw nsw mul. However, nsw in isolation
6945 // requires special handling. It can be preserved as long as we're not
6946 // left shifting by bitwidth - 1.
6947 auto Flags = SCEV::FlagAnyWrap;
6948 if (BO->Op) {
6949 auto MulFlags = getNoWrapFlagsFromUB(BO->Op);
6950 if ((MulFlags & SCEV::FlagNSW) &&
6951 ((MulFlags & SCEV::FlagNUW) || SA->getValue().ult(BitWidth - 1)))
6952 Flags = (SCEV::NoWrapFlags)(Flags | SCEV::FlagNSW);
6953 if (MulFlags & SCEV::FlagNUW)
6954 Flags = (SCEV::NoWrapFlags)(Flags | SCEV::FlagNUW);
6955 }
6956
6957 Constant *X = ConstantInt::get(
6958 getContext(), APInt::getOneBitSet(BitWidth, SA->getZExtValue()));
6959 return getMulExpr(getSCEV(BO->LHS), getSCEV(X), Flags);
6960 }
6961 break;
6962
6963 case Instruction::AShr: {
6964 // AShr X, C, where C is a constant.
6965 ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS);
6966 if (!CI)
6967 break;
6968
6969 Type *OuterTy = BO->LHS->getType();
6970 uint64_t BitWidth = getTypeSizeInBits(OuterTy);
6971 // If the shift count is not less than the bitwidth, the result of
6972 // the shift is undefined. Don't try to analyze it, because the
6973 // resolution chosen here may differ from the resolution chosen in
6974 // other parts of the compiler.
6975 if (CI->getValue().uge(BitWidth))
6976 break;
6977
6978 if (CI->isZero())
6979 return getSCEV(BO->LHS); // shift by zero --> noop
6980
6981 uint64_t AShrAmt = CI->getZExtValue();
6982 Type *TruncTy = IntegerType::get(getContext(), BitWidth - AShrAmt);
6983
6984 Operator *L = dyn_cast<Operator>(BO->LHS);
6985 if (L && L->getOpcode() == Instruction::Shl) {
6986 // X = Shl A, n
6987 // Y = AShr X, m
6988 // Both n and m are constant.
6989
6990 const SCEV *ShlOp0SCEV = getSCEV(L->getOperand(0));
6991 if (L->getOperand(1) == BO->RHS)
6992 // For a two-shift sext-inreg, i.e. n = m,
6993 // use sext(trunc(x)) as the SCEV expression.
6994 return getSignExtendExpr(
6995 getTruncateExpr(ShlOp0SCEV, TruncTy), OuterTy);
6996
6997 ConstantInt *ShlAmtCI = dyn_cast<ConstantInt>(L->getOperand(1));
6998 if (ShlAmtCI && ShlAmtCI->getValue().ult(BitWidth)) {
6999 uint64_t ShlAmt = ShlAmtCI->getZExtValue();
7000 if (ShlAmt > AShrAmt) {
7001 // When n > m, use sext(mul(trunc(x), 2^(n-m)))) as the SCEV
7002 // expression. We already checked that ShlAmt < BitWidth, so
7003 // the multiplier, 1 << (ShlAmt - AShrAmt), fits into TruncTy as
7004 // ShlAmt - AShrAmt < Amt.
7005 APInt Mul = APInt::getOneBitSet(BitWidth - AShrAmt,
7006 ShlAmt - AShrAmt);
7007 return getSignExtendExpr(
7008 getMulExpr(getTruncateExpr(ShlOp0SCEV, TruncTy),
7009 getConstant(Mul)), OuterTy);
7010 }
7011 }
7012 }
7013 break;
7014 }
7015 }
7016 }
7017
7018 switch (U->getOpcode()) {
7019 case Instruction::Trunc:
7020 return getTruncateExpr(getSCEV(U->getOperand(0)), U->getType());
7021
7022 case Instruction::ZExt:
7023 return getZeroExtendExpr(getSCEV(U->getOperand(0)), U->getType());
7024
7025 case Instruction::SExt:
7026 if (auto BO = MatchBinaryOp(U->getOperand(0), DT)) {
7027 // The NSW flag of a subtract does not always survive the conversion to
7028 // A + (-1)*B. By pushing sign extension onto its operands we are much
7029 // more likely to preserve NSW and allow later AddRec optimisations.
7030 //
7031 // NOTE: This is effectively duplicating this logic from getSignExtend:
7032 // sext((A + B + ...)<nsw>) --> (sext(A) + sext(B) + ...)<nsw>
7033 // but by that point the NSW information has potentially been lost.
7034 if (BO->Opcode == Instruction::Sub && BO->IsNSW) {
7035 Type *Ty = U->getType();
7036 auto *V1 = getSignExtendExpr(getSCEV(BO->LHS), Ty);
7037 auto *V2 = getSignExtendExpr(getSCEV(BO->RHS), Ty);
7038 return getMinusSCEV(V1, V2, SCEV::FlagNSW);
7039 }
7040 }
7041 return getSignExtendExpr(getSCEV(U->getOperand(0)), U->getType());
7042
7043 case Instruction::BitCast:
7044 // BitCasts are no-op casts so we just eliminate the cast.
7045 if (isSCEVable(U->getType()) && isSCEVable(U->getOperand(0)->getType()))
7046 return getSCEV(U->getOperand(0));
7047 break;
7048
7049 case Instruction::PtrToInt: {
7050 // Pointer to integer cast is straight-forward, so do model it.
7051 const SCEV *Op = getSCEV(U->getOperand(0));
7052 Type *DstIntTy = U->getType();
7053 // But only if effective SCEV (integer) type is wide enough to represent
7054 // all possible pointer values.
7055 const SCEV *IntOp = getPtrToIntExpr(Op, DstIntTy);
7056 if (isa<SCEVCouldNotCompute>(IntOp))
7057 return getUnknown(V);
7058 return IntOp;
7059 }
7060 case Instruction::IntToPtr:
7061 // Just don't deal with inttoptr casts.
7062 return getUnknown(V);
7063
7064 case Instruction::SDiv:
7065 // If both operands are non-negative, this is just an udiv.
7066 if (isKnownNonNegative(getSCEV(U->getOperand(0))) &&
7067 isKnownNonNegative(getSCEV(U->getOperand(1))))
7068 return getUDivExpr(getSCEV(U->getOperand(0)), getSCEV(U->getOperand(1)));
7069 break;
7070
7071 case Instruction::SRem:
7072 // If both operands are non-negative, this is just an urem.
7073 if (isKnownNonNegative(getSCEV(U->getOperand(0))) &&
7074 isKnownNonNegative(getSCEV(U->getOperand(1))))
7075 return getURemExpr(getSCEV(U->getOperand(0)), getSCEV(U->getOperand(1)));
7076 break;
7077
7078 case Instruction::GetElementPtr:
7079 return createNodeForGEP(cast<GEPOperator>(U));
7080
7081 case Instruction::PHI:
7082 return createNodeForPHI(cast<PHINode>(U));
7083
7084 case Instruction::Select:
7085 // U can also be a select constant expr, which let fall through. Since
7086 // createNodeForSelect only works for a condition that is an `ICmpInst`, and
7087 // constant expressions cannot have instructions as operands, we'd have
7088 // returned getUnknown for a select constant expressions anyway.
7089 if (isa<Instruction>(U))
7090 return createNodeForSelectOrPHI(cast<Instruction>(U), U->getOperand(0),
7091 U->getOperand(1), U->getOperand(2));
7092 break;
7093
7094 case Instruction::Call:
7095 case Instruction::Invoke:
7096 if (Value *RV = cast<CallBase>(U)->getReturnedArgOperand())
7097 return getSCEV(RV);
7098
7099 if (auto *II = dyn_cast<IntrinsicInst>(U)) {
7100 switch (II->getIntrinsicID()) {
7101 case Intrinsic::abs:
7102 return getAbsExpr(
7103 getSCEV(II->getArgOperand(0)),
7104 /*IsNSW=*/cast<ConstantInt>(II->getArgOperand(1))->isOne());
7105 case Intrinsic::umax:
7106 return getUMaxExpr(getSCEV(II->getArgOperand(0)),
7107 getSCEV(II->getArgOperand(1)));
7108 case Intrinsic::umin:
7109 return getUMinExpr(getSCEV(II->getArgOperand(0)),
7110 getSCEV(II->getArgOperand(1)));
7111 case Intrinsic::smax:
7112 return getSMaxExpr(getSCEV(II->getArgOperand(0)),
7113 getSCEV(II->getArgOperand(1)));
7114 case Intrinsic::smin:
7115 return getSMinExpr(getSCEV(II->getArgOperand(0)),
7116 getSCEV(II->getArgOperand(1)));
7117 case Intrinsic::usub_sat: {
7118 const SCEV *X = getSCEV(II->getArgOperand(0));
7119 const SCEV *Y = getSCEV(II->getArgOperand(1));
7120 const SCEV *ClampedY = getUMinExpr(X, Y);
7121 return getMinusSCEV(X, ClampedY, SCEV::FlagNUW);
7122 }
7123 case Intrinsic::uadd_sat: {
7124 const SCEV *X = getSCEV(II->getArgOperand(0));
7125 const SCEV *Y = getSCEV(II->getArgOperand(1));
7126 const SCEV *ClampedX = getUMinExpr(X, getNotSCEV(Y));
7127 return getAddExpr(ClampedX, Y, SCEV::FlagNUW);
7128 }
7129 case Intrinsic::start_loop_iterations:
7130 // A start_loop_iterations is just equivalent to the first operand for
7131 // SCEV purposes.
7132 return getSCEV(II->getArgOperand(0));
7133 default:
7134 break;
7135 }
7136 }
7137 break;
7138 }
7139
7140 return getUnknown(V);
7141}
7142
7143//===----------------------------------------------------------------------===//
7144// Iteration Count Computation Code
7145//
7146
7147const SCEV *ScalarEvolution::getTripCountFromExitCount(const SCEV *ExitCount) {
7148 // Get the trip count from the BE count by adding 1. Overflow, results
7149 // in zero which means "unknown".
7150 return getAddExpr(ExitCount, getOne(ExitCount->getType()));
7151}
7152
7153static unsigned getConstantTripCount(const SCEVConstant *ExitCount) {
7154 if (!ExitCount)
7155 return 0;
7156
7157 ConstantInt *ExitConst = ExitCount->getValue();
7158
7159 // Guard against huge trip counts.
7160 if (ExitConst->getValue().getActiveBits() > 32)
7161 return 0;
7162
7163 // In case of integer overflow, this returns 0, which is correct.
7164 return ((unsigned)ExitConst->getZExtValue()) + 1;
7165}
7166
7167unsigned ScalarEvolution::getSmallConstantTripCount(const Loop *L) {
7168 auto *ExitCount = dyn_cast<SCEVConstant>(getBackedgeTakenCount(L, Exact));
7169 return getConstantTripCount(ExitCount);
7170}
7171
7172unsigned
7173ScalarEvolution::getSmallConstantTripCount(const Loop *L,
7174 const BasicBlock *ExitingBlock) {
7175 assert(ExitingBlock && "Must pass a non-null exiting block!")(static_cast<void> (0));
7176 assert(L->isLoopExiting(ExitingBlock) &&(static_cast<void> (0))
7177 "Exiting block must actually branch out of the loop!")(static_cast<void> (0));
7178 const SCEVConstant *ExitCount =
7179 dyn_cast<SCEVConstant>(getExitCount(L, ExitingBlock));
7180 return getConstantTripCount(ExitCount);
7181}
7182
7183unsigned ScalarEvolution::getSmallConstantMaxTripCount(const Loop *L) {
7184 const auto *MaxExitCount =
7185 dyn_cast<SCEVConstant>(getConstantMaxBackedgeTakenCount(L));
7186 return getConstantTripCount(MaxExitCount);
7187}
7188
7189unsigned ScalarEvolution::getSmallConstantTripMultiple(const Loop *L) {
7190 SmallVector<BasicBlock *, 8> ExitingBlocks;
7191 L->getExitingBlocks(ExitingBlocks);
7192
7193 Optional<unsigned> Res = None;
7194 for (auto *ExitingBB : ExitingBlocks) {
7195 unsigned Multiple = getSmallConstantTripMultiple(L, ExitingBB);
7196 if (!Res)
7197 Res = Multiple;
7198 Res = (unsigned)GreatestCommonDivisor64(*Res, Multiple);
7199 }
7200 return Res.getValueOr(1);
7201}
7202
7203unsigned ScalarEvolution::getSmallConstantTripMultiple(const Loop *L,
7204 const SCEV *ExitCount) {
7205 if (ExitCount == getCouldNotCompute())
7206 return 1;
7207
7208 // Get the trip count
7209 const SCEV *TCExpr = getTripCountFromExitCount(ExitCount);
7210
7211 const SCEVConstant *TC = dyn_cast<SCEVConstant>(TCExpr);
7212 if (!TC)
7213 // Attempt to factor more general cases. Returns the greatest power of
7214 // two divisor. If overflow happens, the trip count expression is still
7215 // divisible by the greatest power of 2 divisor returned.
7216 return 1U << std::min((uint32_t)31,
7217 GetMinTrailingZeros(applyLoopGuards(TCExpr, L)));
7218
7219 ConstantInt *Result = TC->getValue();
7220
7221 // Guard against huge trip counts (this requires checking
7222 // for zero to handle the case where the trip count == -1 and the
7223 // addition wraps).
7224 if (!Result || Result->getValue().getActiveBits() > 32 ||
7225 Result->getValue().getActiveBits() == 0)
7226 return 1;
7227
7228 return (unsigned)Result->getZExtValue();
7229}
7230
7231/// Returns the largest constant divisor of the trip count of this loop as a
7232/// normal unsigned value, if possible. This means that the actual trip count is
7233/// always a multiple of the returned value (don't forget the trip count could
7234/// very well be zero as well!).
7235///
7236/// Returns 1 if the trip count is unknown or not guaranteed to be the
7237/// multiple of a constant (which is also the case if the trip count is simply
7238/// constant, use getSmallConstantTripCount for that case), Will also return 1
7239/// if the trip count is very large (>= 2^32).
7240///
7241/// As explained in the comments for getSmallConstantTripCount, this assumes
7242/// that control exits the loop via ExitingBlock.
7243unsigned
7244ScalarEvolution::getSmallConstantTripMultiple(const Loop *L,
7245 const BasicBlock *ExitingBlock) {
7246 assert(ExitingBlock && "Must pass a non-null exiting block!")(static_cast<void> (0));
7247 assert(L->isLoopExiting(ExitingBlock) &&(static_cast<void> (0))
7248 "Exiting block must actually branch out of the loop!")(static_cast<void> (0));
7249 const SCEV *ExitCount = getExitCount(L, ExitingBlock);
7250 return getSmallConstantTripMultiple(L, ExitCount);
7251}
7252
7253const SCEV *ScalarEvolution::getExitCount(const Loop *L,
7254 const BasicBlock *ExitingBlock,
7255 ExitCountKind Kind) {
7256 switch (Kind) {
7257 case Exact:
7258 case SymbolicMaximum:
7259 return getBackedgeTakenInfo(L).getExact(ExitingBlock, this);
7260 case ConstantMaximum:
7261 return getBackedgeTakenInfo(L).getConstantMax(ExitingBlock, this);
7262 };
7263 llvm_unreachable("Invalid ExitCountKind!")__builtin_unreachable();
7264}
7265
7266const SCEV *
7267ScalarEvolution::getPredicatedBackedgeTakenCount(const Loop *L,
7268 SCEVUnionPredicate &Preds) {
7269 return getPredicatedBackedgeTakenInfo(L).getExact(L, this, &Preds);
7270}
7271
7272const SCEV *ScalarEvolution::getBackedgeTakenCount(const Loop *L,
7273 ExitCountKind Kind) {
7274 switch (Kind) {
7275 case Exact:
7276 return getBackedgeTakenInfo(L).getExact(L, this);
7277 case ConstantMaximum:
7278 return getBackedgeTakenInfo(L).getConstantMax(this);
7279 case SymbolicMaximum:
7280 return getBackedgeTakenInfo(L).getSymbolicMax(L, this);
7281 };
7282 llvm_unreachable("Invalid ExitCountKind!")__builtin_unreachable();
7283}
7284
7285bool ScalarEvolution::isBackedgeTakenCountMaxOrZero(const Loop *L) {
7286 return getBackedgeTakenInfo(L).isConstantMaxOrZero(this);
7287}
7288
7289/// Push PHI nodes in the header of the given loop onto the given Worklist.
7290static void
7291PushLoopPHIs(const Loop *L, SmallVectorImpl<Instruction *> &Worklist) {
7292 BasicBlock *Header = L->getHeader();
7293
7294 // Push all Loop-header PHIs onto the Worklist stack.
7295 for (PHINode &PN : Header->phis())
7296 Worklist.push_back(&PN);
7297}
7298
7299const ScalarEvolution::BackedgeTakenInfo &
7300ScalarEvolution::getPredicatedBackedgeTakenInfo(const Loop *L) {
7301 auto &BTI = getBackedgeTakenInfo(L);
7302 if (BTI.hasFullInfo())
7303 return BTI;
7304
7305 auto Pair = PredicatedBackedgeTakenCounts.insert({L, BackedgeTakenInfo()});
7306
7307 if (!Pair.second)
7308 return Pair.first->second;
7309
7310 BackedgeTakenInfo Result =
7311 computeBackedgeTakenCount(L, /*AllowPredicates=*/true);
7312
7313 return PredicatedBackedgeTakenCounts.find(L)->second = std::move(Result);
7314}
7315
7316ScalarEvolution::BackedgeTakenInfo &
7317ScalarEvolution::getBackedgeTakenInfo(const Loop *L) {
7318 // Initially insert an invalid entry for this loop. If the insertion
7319 // succeeds, proceed to actually compute a backedge-taken count and
7320 // update the value. The temporary CouldNotCompute value tells SCEV
7321 // code elsewhere that it shouldn't attempt to request a new
7322 // backedge-taken count, which could result in infinite recursion.
7323 std::pair<DenseMap<const Loop *, BackedgeTakenInfo>::iterator, bool> Pair =
7324 BackedgeTakenCounts.insert({L, BackedgeTakenInfo()});
7325 if (!Pair.second)
7326 return Pair.first->second;
7327
7328 // computeBackedgeTakenCount may allocate memory for its result. Inserting it
7329 // into the BackedgeTakenCounts map transfers ownership. Otherwise, the result
7330 // must be cleared in this scope.
7331 BackedgeTakenInfo Result = computeBackedgeTakenCount(L);
7332
7333 // In product build, there are no usage of statistic.
7334 (void)NumTripCountsComputed;
7335 (void)NumTripCountsNotComputed;
7336#if LLVM_ENABLE_STATS0 || !defined(NDEBUG1)
7337 const SCEV *BEExact = Result.getExact(L, this);
7338 if (BEExact != getCouldNotCompute()) {
7339 assert(isLoopInvariant(BEExact, L) &&(static_cast<void> (0))
7340 isLoopInvariant(Result.getConstantMax(this), L) &&(static_cast<void> (0))
7341 "Computed backedge-taken count isn't loop invariant for loop!")(static_cast<void> (0));
7342 ++NumTripCountsComputed;
7343 } else if (Result.getConstantMax(this) == getCouldNotCompute() &&
7344 isa<PHINode>(L->getHeader()->begin())) {
7345 // Only count loops that have phi nodes as not being computable.
7346 ++NumTripCountsNotComputed;
7347 }
7348#endif // LLVM_ENABLE_STATS || !defined(NDEBUG)
7349
7350 // Now that we know more about the trip count for this loop, forget any
7351 // existing SCEV values for PHI nodes in this loop since they are only
7352 // conservative estimates made without the benefit of trip count
7353 // information. This is similar to the code in forgetLoop, except that
7354 // it handles SCEVUnknown PHI nodes specially.
7355 if (Result.hasAnyInfo()) {
7356 SmallVector<Instruction *, 16> Worklist;
7357 PushLoopPHIs(L, Worklist);
7358
7359 SmallPtrSet<Instruction *, 8> Discovered;
7360 while (!Worklist.empty()) {
7361 Instruction *I = Worklist.pop_back_val();
7362
7363 ValueExprMapType::iterator It =
7364 ValueExprMap.find_as(static_cast<Value *>(I));
7365 if (It != ValueExprMap.end()) {
7366 const SCEV *Old = It->second;
7367
7368 // SCEVUnknown for a PHI either means that it has an unrecognized
7369 // structure, or it's a PHI that's in the progress of being computed
7370 // by createNodeForPHI. In the former case, additional loop trip
7371 // count information isn't going to change anything. In the later
7372 // case, createNodeForPHI will perform the necessary updates on its
7373 // own when it gets to that point.
7374 if (!isa<PHINode>(I) || !isa<SCEVUnknown>(Old)) {
7375 eraseValueFromMap(It->first);
7376 forgetMemoizedResults(Old);
7377 }
7378 if (PHINode *PN = dyn_cast<PHINode>(I))
7379 ConstantEvolutionLoopExitValue.erase(PN);
7380 }
7381
7382 // Since we don't need to invalidate anything for correctness and we're
7383 // only invalidating to make SCEV's results more precise, we get to stop
7384 // early to avoid invalidating too much. This is especially important in
7385 // cases like:
7386 //
7387 // %v = f(pn0, pn1) // pn0 and pn1 used through some other phi node
7388 // loop0:
7389 // %pn0 = phi
7390 // ...
7391 // loop1:
7392 // %pn1 = phi
7393 // ...
7394 //
7395 // where both loop0 and loop1's backedge taken count uses the SCEV
7396 // expression for %v. If we don't have the early stop below then in cases
7397 // like the above, getBackedgeTakenInfo(loop1) will clear out the trip
7398 // count for loop0 and getBackedgeTakenInfo(loop0) will clear out the trip
7399 // count for loop1, effectively nullifying SCEV's trip count cache.
7400 for (auto *U : I->users())
7401 if (auto *I = dyn_cast<Instruction>(U)) {
7402 auto *LoopForUser = LI.getLoopFor(I->getParent());
7403 if (LoopForUser && L->contains(LoopForUser) &&
7404 Discovered.insert(I).second)
7405 Worklist.push_back(I);
7406 }
7407 }
7408 }
7409
7410 // Re-lookup the insert position, since the call to
7411 // computeBackedgeTakenCount above could result in a
7412 // recusive call to getBackedgeTakenInfo (on a different
7413 // loop), which would invalidate the iterator computed
7414 // earlier.
7415 return BackedgeTakenCounts.find(L)->second = std::move(Result);
7416}
7417
7418void ScalarEvolution::forgetAllLoops() {
7419 // This method is intended to forget all info about loops. It should
7420 // invalidate caches as if the following happened:
7421 // - The trip counts of all loops have changed arbitrarily
7422 // - Every llvm::Value has been updated in place to produce a different
7423 // result.
7424 BackedgeTakenCounts.clear();
7425 PredicatedBackedgeTakenCounts.clear();
7426 LoopPropertiesCache.clear();
7427 ConstantEvolutionLoopExitValue.clear();
7428 ValueExprMap.clear();
7429 ValuesAtScopes.clear();
7430 LoopDispositions.clear();
7431 BlockDispositions.clear();
7432 UnsignedRanges.clear();
7433 SignedRanges.clear();
7434 ExprValueMap.clear();
7435 HasRecMap.clear();
7436 MinTrailingZerosCache.clear();
7437 PredicatedSCEVRewrites.clear();
7438}
7439
7440void ScalarEvolution::forgetLoop(const Loop *L) {
7441 SmallVector<const Loop *, 16> LoopWorklist(1, L);
7442 SmallVector<Instruction *, 32> Worklist;
7443 SmallPtrSet<Instruction *, 16> Visited;
7444
7445 // Iterate over all the loops and sub-loops to drop SCEV information.
7446 while (!LoopWorklist.empty()) {
7447 auto *CurrL = LoopWorklist.pop_back_val();
7448
7449 // Drop any stored trip count value.
7450 BackedgeTakenCounts.erase(CurrL);
7451 PredicatedBackedgeTakenCounts.erase(CurrL);
7452
7453 // Drop information about predicated SCEV rewrites for this loop.
7454 for (auto I = PredicatedSCEVRewrites.begin();
7455 I != PredicatedSCEVRewrites.end();) {
7456 std::pair<const SCEV *, const Loop *> Entry = I->first;
7457 if (Entry.second == CurrL)
7458 PredicatedSCEVRewrites.erase(I++);
7459 else
7460 ++I;
7461 }
7462
7463 auto LoopUsersItr = LoopUsers.find(CurrL);
7464 if (LoopUsersItr != LoopUsers.end()) {
7465 for (auto *S : LoopUsersItr->second)
7466 forgetMemoizedResults(S);
7467 LoopUsers.erase(LoopUsersItr);
7468 }
7469
7470 // Drop information about expressions based on loop-header PHIs.
7471 PushLoopPHIs(CurrL, Worklist);
7472
7473 while (!Worklist.empty()) {
7474 Instruction *I = Worklist.pop_back_val();
7475 if (!Visited.insert(I).second)
7476 continue;
7477
7478 ValueExprMapType::iterator It =
7479 ValueExprMap.find_as(static_cast<Value *>(I));
7480 if (It != ValueExprMap.end()) {
7481 eraseValueFromMap(It->first);
7482 forgetMemoizedResults(It->second);
7483 if (PHINode *PN = dyn_cast<PHINode>(I))
7484 ConstantEvolutionLoopExitValue.erase(PN);
7485 }
7486
7487 PushDefUseChildren(I, Worklist);
7488 }
7489
7490 LoopPropertiesCache.erase(CurrL);
7491 // Forget all contained loops too, to avoid dangling entries in the
7492 // ValuesAtScopes map.
7493 LoopWorklist.append(CurrL->begin(), CurrL->end());
7494 }
7495}
7496
7497void ScalarEvolution::forgetTopmostLoop(const Loop *L) {
7498 while (Loop *Parent = L->getParentLoop())
7499 L = Parent;
7500 forgetLoop(L);
7501}
7502
7503void ScalarEvolution::forgetValue(Value *V) {
7504 Instruction *I = dyn_cast<Instruction>(V);
7505 if (!I) return;
7506
7507 // Drop information about expressions based on loop-header PHIs.
7508 SmallVector<Instruction *, 16> Worklist;
7509 Worklist.push_back(I);
7510
7511 SmallPtrSet<Instruction *, 8> Visited;
7512 while (!Worklist.empty()) {
7513 I = Worklist.pop_back_val();
7514 if (!Visited.insert(I).second)
7515 continue;
7516
7517 ValueExprMapType::iterator It =
7518 ValueExprMap.find_as(static_cast<Value *>(I));
7519 if (It != ValueExprMap.end()) {
7520 eraseValueFromMap(It->first);
7521 forgetMemoizedResults(It->second);
7522 if (PHINode *PN = dyn_cast<PHINode>(I))
7523 ConstantEvolutionLoopExitValue.erase(PN);
7524 }
7525
7526 PushDefUseChildren(I, Worklist);
7527 }
7528}
7529
7530void ScalarEvolution::forgetLoopDispositions(const Loop *L) {
7531 LoopDispositions.clear();
7532}
7533
7534/// Get the exact loop backedge taken count considering all loop exits. A
7535/// computable result can only be returned for loops with all exiting blocks
7536/// dominating the latch. howFarToZero assumes that the limit of each loop test
7537/// is never skipped. This is a valid assumption as long as the loop exits via
7538/// that test. For precise results, it is the caller's responsibility to specify
7539/// the relevant loop exiting block using getExact(ExitingBlock, SE).
7540const SCEV *
7541ScalarEvolution::BackedgeTakenInfo::getExact(const Loop *L, ScalarEvolution *SE,
7542 SCEVUnionPredicate *Preds) const {
7543 // If any exits were not computable, the loop is not computable.
7544 if (!isComplete() || ExitNotTaken.empty())
7545 return SE->getCouldNotCompute();
7546
7547 const BasicBlock *Latch = L->getLoopLatch();
7548 // All exiting blocks we have collected must dominate the only backedge.
7549 if (!Latch)
7550 return SE->getCouldNotCompute();
7551
7552 // All exiting blocks we have gathered dominate loop's latch, so exact trip
7553 // count is simply a minimum out of all these calculated exit counts.
7554 SmallVector<const SCEV *, 2> Ops;
7555 for (auto &ENT : ExitNotTaken) {
7556 const SCEV *BECount = ENT.ExactNotTaken;
7557 assert(BECount != SE->getCouldNotCompute() && "Bad exit SCEV!")(static_cast<void> (0));
7558 assert(SE->DT.dominates(ENT.ExitingBlock, Latch) &&(static_cast<void> (0))
7559 "We should only have known counts for exiting blocks that dominate "(static_cast<void> (0))
7560 "latch!")(static_cast<void> (0));
7561
7562 Ops.push_back(BECount);
7563
7564 if (Preds && !ENT.hasAlwaysTruePredicate())
7565 Preds->add(ENT.Predicate.get());
7566
7567 assert((Preds || ENT.hasAlwaysTruePredicate()) &&(static_cast<void> (0))
7568 "Predicate should be always true!")(static_cast<void> (0));
7569 }
7570
7571 return SE->getUMinFromMismatchedTypes(Ops);
7572}
7573
7574/// Get the exact not taken count for this loop exit.
7575const SCEV *
7576ScalarEvolution::BackedgeTakenInfo::getExact(const BasicBlock *ExitingBlock,
7577 ScalarEvolution *SE) const {
7578 for (auto &ENT : ExitNotTaken)
7579 if (ENT.ExitingBlock == ExitingBlock && ENT.hasAlwaysTruePredicate())
7580 return ENT.ExactNotTaken;
7581
7582 return SE->getCouldNotCompute();
7583}
7584
7585const SCEV *ScalarEvolution::BackedgeTakenInfo::getConstantMax(
7586 const BasicBlock *ExitingBlock, ScalarEvolution *SE) const {
7587 for (auto &ENT : ExitNotTaken)
7588 if (ENT.ExitingBlock == ExitingBlock && ENT.hasAlwaysTruePredicate())
7589 return ENT.MaxNotTaken;
7590
7591 return SE->getCouldNotCompute();
7592}
7593
7594/// getConstantMax - Get the constant max backedge taken count for the loop.
7595const SCEV *
7596ScalarEvolution::BackedgeTakenInfo::getConstantMax(ScalarEvolution *SE) const {
7597 auto PredicateNotAlwaysTrue = [](const ExitNotTakenInfo &ENT) {
7598 return !ENT.hasAlwaysTruePredicate();
7599 };
7600
7601 if (any_of(ExitNotTaken, PredicateNotAlwaysTrue) || !getConstantMax())
7602 return SE->getCouldNotCompute();
7603
7604 assert((isa<SCEVCouldNotCompute>(getConstantMax()) ||(static_cast<void> (0))
7605 isa<SCEVConstant>(getConstantMax())) &&(static_cast<void> (0))
7606 "No point in having a non-constant max backedge taken count!")(static_cast<void> (0));
7607 return getConstantMax();
7608}
7609
7610const SCEV *
7611ScalarEvolution::BackedgeTakenInfo::getSymbolicMax(const Loop *L,
7612 ScalarEvolution *SE) {
7613 if (!SymbolicMax)
7614 SymbolicMax = SE->computeSymbolicMaxBackedgeTakenCount(L);
7615 return SymbolicMax;
7616}
7617
7618bool ScalarEvolution::BackedgeTakenInfo::isConstantMaxOrZero(
7619 ScalarEvolution *SE) const {
7620 auto PredicateNotAlwaysTrue = [](const ExitNotTakenInfo &ENT) {
7621 return !ENT.hasAlwaysTruePredicate();
7622 };
7623 return MaxOrZero && !any_of(ExitNotTaken, PredicateNotAlwaysTrue);
7624}
7625
7626bool ScalarEvolution::BackedgeTakenInfo::hasOperand(const SCEV *S) const {
7627 return Operands.contains(S);
7628}
7629
7630ScalarEvolution::ExitLimit::ExitLimit(const SCEV *E)
7631 : ExitLimit(E, E, false, None) {
7632}
7633
7634ScalarEvolution::ExitLimit::ExitLimit(
7635 const SCEV *E, const SCEV *M, bool MaxOrZero,
7636 ArrayRef<const SmallPtrSetImpl<const SCEVPredicate *> *> PredSetList)
7637 : ExactNotTaken(E), MaxNotTaken(M), MaxOrZero(MaxOrZero) {
7638 // If we prove the max count is zero, so is the symbolic bound. This happens
7639 // in practice due to differences in a) how context sensitive we've chosen
7640 // to be and b) how we reason about bounds impied by UB.
7641 if (MaxNotTaken->isZero())
7642 ExactNotTaken = MaxNotTaken;
7643
7644 assert((isa<SCEVCouldNotCompute>(ExactNotTaken) ||(static_cast<void> (0))
7645 !isa<SCEVCouldNotCompute>(MaxNotTaken)) &&(static_cast<void> (0))
7646 "Exact is not allowed to be less precise than Max")(static_cast<void> (0));
7647 assert((isa<SCEVCouldNotCompute>(MaxNotTaken) ||(static_cast<void> (0))
7648 isa<SCEVConstant>(MaxNotTaken)) &&(static_cast<void> (0))
7649 "No point in having a non-constant max backedge taken count!")(static_cast<void> (0));
7650 for (auto *PredSet : PredSetList)
7651 for (auto *P : *PredSet)
7652 addPredicate(P);
7653 assert((isa<SCEVCouldNotCompute>(E) || !E->getType()->isPointerTy()) &&(static_cast<void> (0))
7654 "Backedge count should be int")(static_cast<void> (0));
7655 assert((isa<SCEVCouldNotCompute>(M) || !M->getType()->isPointerTy()) &&(static_cast<void> (0))
7656 "Max backedge count should be int")(static_cast<void> (0));
7657}
7658
7659ScalarEvolution::ExitLimit::ExitLimit(
7660 const SCEV *E, const SCEV *M, bool MaxOrZero,
7661 const SmallPtrSetImpl<const SCEVPredicate *> &PredSet)
7662 : ExitLimit(E, M, MaxOrZero, {&PredSet}) {
7663}
7664
7665ScalarEvolution::ExitLimit::ExitLimit(const SCEV *E, const SCEV *M,
7666 bool MaxOrZero)
7667 : ExitLimit(E, M, MaxOrZero, None) {
7668}
7669
7670class SCEVRecordOperands {
7671 SmallPtrSetImpl<const SCEV *> &Operands;
7672
7673public:
7674 SCEVRecordOperands(SmallPtrSetImpl<const SCEV *> &Operands)
7675 : Operands(Operands) {}
7676 bool follow(const SCEV *S) {
7677 Operands.insert(S);
7678 return true;
7679 }
7680 bool isDone() { return false; }
7681};
7682
7683/// Allocate memory for BackedgeTakenInfo and copy the not-taken count of each
7684/// computable exit into a persistent ExitNotTakenInfo array.
7685ScalarEvolution::BackedgeTakenInfo::BackedgeTakenInfo(
7686 ArrayRef<ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo> ExitCounts,
7687 bool IsComplete, const SCEV *ConstantMax, bool MaxOrZero)
7688 : ConstantMax(ConstantMax), IsComplete(IsComplete), MaxOrZero(MaxOrZero) {
7689 using EdgeExitInfo = ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo;
7690
7691 ExitNotTaken.reserve(ExitCounts.size());
7692 std::transform(
7693 ExitCounts.begin(), ExitCounts.end(), std::back_inserter(ExitNotTaken),
7694 [&](const EdgeExitInfo &EEI) {
7695 BasicBlock *ExitBB = EEI.first;
7696 const ExitLimit &EL = EEI.second;
7697 if (EL.Predicates.empty())
7698 return ExitNotTakenInfo(ExitBB, EL.ExactNotTaken, EL.MaxNotTaken,
7699 nullptr);
7700
7701 std::unique_ptr<SCEVUnionPredicate> Predicate(new SCEVUnionPredicate);
7702 for (auto *Pred : EL.Predicates)
7703 Predicate->add(Pred);
7704
7705 return ExitNotTakenInfo(ExitBB, EL.ExactNotTaken, EL.MaxNotTaken,
7706 std::move(Predicate));
7707 });
7708 assert((isa<SCEVCouldNotCompute>(ConstantMax) ||(static_cast<void> (0))
7709 isa<SCEVConstant>(ConstantMax)) &&(static_cast<void> (0))
7710 "No point in having a non-constant max backedge taken count!")(static_cast<void> (0));
7711
7712 SCEVRecordOperands RecordOperands(Operands);
7713 SCEVTraversal<SCEVRecordOperands> ST(RecordOperands);
7714 if (!isa<SCEVCouldNotCompute>(ConstantMax))
7715 ST.visitAll(ConstantMax);
7716 for (auto &ENT : ExitNotTaken)
7717 if (!isa<SCEVCouldNotCompute>(ENT.ExactNotTaken))
7718 ST.visitAll(ENT.ExactNotTaken);
7719}
7720
7721/// Compute the number of times the backedge of the specified loop will execute.
7722ScalarEvolution::BackedgeTakenInfo
7723ScalarEvolution::computeBackedgeTakenCount(const Loop *L,
7724 bool AllowPredicates) {
7725 SmallVector<BasicBlock *, 8> ExitingBlocks;
7726 L->getExitingBlocks(ExitingBlocks);
7727
7728 using EdgeExitInfo = ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo;
7729
7730 SmallVector<EdgeExitInfo, 4> ExitCounts;
7731 bool CouldComputeBECount = true;
7732 BasicBlock *Latch = L->getLoopLatch(); // may be NULL.
7733 const SCEV *MustExitMaxBECount = nullptr;
7734 const SCEV *MayExitMaxBECount = nullptr;
7735 bool MustExitMaxOrZero = false;
7736
7737 // Compute the ExitLimit for each loop exit. Use this to populate ExitCounts
7738 // and compute maxBECount.
7739 // Do a union of all the predicates here.
7740 for (unsigned i = 0, e = ExitingBlocks.size(); i != e; ++i) {
7741 BasicBlock *ExitBB = ExitingBlocks[i];
7742
7743 // We canonicalize untaken exits to br (constant), ignore them so that
7744 // proving an exit untaken doesn't negatively impact our ability to reason
7745 // about the loop as whole.
7746 if (auto *BI = dyn_cast<BranchInst>(ExitBB->getTerminator()))
7747 if (auto *CI = dyn_cast<ConstantInt>(BI->getCondition())) {
7748 bool ExitIfTrue = !L->contains(BI->getSuccessor(0));
7749 if ((ExitIfTrue && CI->isZero()) || (!ExitIfTrue && CI->isOne()))
7750 continue;
7751 }
7752
7753 ExitLimit EL = computeExitLimit(L, ExitBB, AllowPredicates);
7754
7755 assert((AllowPredicates || EL.Predicates.empty()) &&(static_cast<void> (0))
7756 "Predicated exit limit when predicates are not allowed!")(static_cast<void> (0));
7757
7758 // 1. For each exit that can be computed, add an entry to ExitCounts.
7759 // CouldComputeBECount is true only if all exits can be computed.
7760 if (EL.ExactNotTaken == getCouldNotCompute())
7761 // We couldn't compute an exact value for this exit, so
7762 // we won't be able to compute an exact value for the loop.
7763 CouldComputeBECount = false;
7764 else
7765 ExitCounts.emplace_back(ExitBB, EL);
7766
7767 // 2. Derive the loop's MaxBECount from each exit's max number of
7768 // non-exiting iterations. Partition the loop exits into two kinds:
7769 // LoopMustExits and LoopMayExits.
7770 //
7771 // If the exit dominates the loop latch, it is a LoopMustExit otherwise it
7772 // is a LoopMayExit. If any computable LoopMustExit is found, then
7773 // MaxBECount is the minimum EL.MaxNotTaken of computable
7774 // LoopMustExits. Otherwise, MaxBECount is conservatively the maximum
7775 // EL.MaxNotTaken, where CouldNotCompute is considered greater than any
7776 // computable EL.MaxNotTaken.
7777 if (EL.MaxNotTaken != getCouldNotCompute() && Latch &&
7778 DT.dominates(ExitBB, Latch)) {
7779 if (!MustExitMaxBECount) {
7780 MustExitMaxBECount = EL.MaxNotTaken;
7781 MustExitMaxOrZero = EL.MaxOrZero;
7782 } else {
7783 MustExitMaxBECount =
7784 getUMinFromMismatchedTypes(MustExitMaxBECount, EL.MaxNotTaken);
7785 }
7786 } else if (MayExitMaxBECount != getCouldNotCompute()) {
7787 if (!MayExitMaxBECount || EL.MaxNotTaken == getCouldNotCompute())
7788 MayExitMaxBECount = EL.MaxNotTaken;
7789 else {
7790 MayExitMaxBECount =
7791 getUMaxFromMismatchedTypes(MayExitMaxBECount, EL.MaxNotTaken);
7792 }
7793 }
7794 }
7795 const SCEV *MaxBECount = MustExitMaxBECount ? MustExitMaxBECount :
7796 (MayExitMaxBECount ? MayExitMaxBECount : getCouldNotCompute());
7797 // The loop backedge will be taken the maximum or zero times if there's
7798 // a single exit that must be taken the maximum or zero times.
7799 bool MaxOrZero = (MustExitMaxOrZero && ExitingBlocks.size() == 1);
7800 return BackedgeTakenInfo(std::move(ExitCounts), CouldComputeBECount,
7801 MaxBECount, MaxOrZero);
7802}
7803
7804ScalarEvolution::ExitLimit
7805ScalarEvolution::computeExitLimit(const Loop *L, BasicBlock *ExitingBlock,
7806 bool AllowPredicates) {
7807 assert(L->contains(ExitingBlock) && "Exit count for non-loop block?")(static_cast<void> (0));
7808 // If our exiting block does not dominate the latch, then its connection with
7809 // loop's exit limit may be far from trivial.
7810 const BasicBlock *Latch = L->getLoopLatch();
7811 if (!Latch || !DT.dominates(ExitingBlock, Latch))
7812 return getCouldNotCompute();
7813
7814 bool IsOnlyExit = (L->getExitingBlock() != nullptr);
7815 Instruction *Term = ExitingBlock->getTerminator();
7816 if (BranchInst *BI = dyn_cast<BranchInst>(Term)) {
7817 assert(BI->isConditional() && "If unconditional, it can't be in loop!")(static_cast<void> (0));
7818 bool ExitIfTrue = !L->contains(BI->getSuccessor(0));
7819 assert(ExitIfTrue == L->contains(BI->getSuccessor(1)) &&(static_cast<void> (0))
7820 "It should have one successor in loop and one exit block!")(static_cast<void> (0));
7821 // Proceed to the next level to examine the exit condition expression.
7822 return computeExitLimitFromCond(
7823 L, BI->getCondition(), ExitIfTrue,
7824 /*ControlsExit=*/IsOnlyExit, AllowPredicates);
7825 }
7826
7827 if (SwitchInst *SI = dyn_cast<SwitchInst>(Term)) {
7828 // For switch, make sure that there is a single exit from the loop.
7829 BasicBlock *Exit = nullptr;
7830 for (auto *SBB : successors(ExitingBlock))
7831 if (!L->contains(SBB)) {
7832 if (Exit) // Multiple exit successors.
7833 return getCouldNotCompute();
7834 Exit = SBB;
7835 }
7836 assert(Exit && "Exiting block must have at least one exit")(static_cast<void> (0));
7837 return computeExitLimitFromSingleExitSwitch(L, SI, Exit,
7838 /*ControlsExit=*/IsOnlyExit);
7839 }
7840
7841 return getCouldNotCompute();
7842}
7843
7844ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCond(
7845 const Loop *L, Value *ExitCond, bool ExitIfTrue,
7846 bool ControlsExit, bool AllowPredicates) {
7847 ScalarEvolution::ExitLimitCacheTy Cache(L, ExitIfTrue, AllowPredicates);
7848 return computeExitLimitFromCondCached(Cache, L, ExitCond, ExitIfTrue,
7849 ControlsExit, AllowPredicates);
7850}
7851
7852Optional<ScalarEvolution::ExitLimit>
7853ScalarEvolution::ExitLimitCache::find(const Loop *L, Value *ExitCond,
7854 bool ExitIfTrue, bool ControlsExit,
7855 bool AllowPredicates) {
7856 (void)this->L;
7857 (void)this->ExitIfTrue;
7858 (void)this->AllowPredicates;
7859
7860 assert(this->L == L && this->ExitIfTrue == ExitIfTrue &&(static_cast<void> (0))
7861 this->AllowPredicates == AllowPredicates &&(static_cast<void> (0))
7862 "Variance in assumed invariant key components!")(static_cast<void> (0));
7863 auto Itr = TripCountMap.find({ExitCond, ControlsExit});
7864 if (Itr == TripCountMap.end())
7865 return None;
7866 return Itr->second;
7867}
7868
7869void ScalarEvolution::ExitLimitCache::insert(const Loop *L, Value *ExitCond,
7870 bool ExitIfTrue,
7871 bool ControlsExit,
7872 bool AllowPredicates,
7873 const ExitLimit &EL) {
7874 assert(this->L == L && this->ExitIfTrue == ExitIfTrue &&(static_cast<void> (0))
7875 this->AllowPredicates == AllowPredicates &&(static_cast<void> (0))
7876 "Variance in assumed invariant key components!")(static_cast<void> (0));
7877
7878 auto InsertResult = TripCountMap.insert({{ExitCond, ControlsExit}, EL});
7879 assert(InsertResult.second && "Expected successful insertion!")(static_cast<void> (0));
7880 (void)InsertResult;
7881 (void)ExitIfTrue;
7882}
7883
7884ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCondCached(
7885 ExitLimitCacheTy &Cache, const Loop *L, Value *ExitCond, bool ExitIfTrue,
7886 bool ControlsExit, bool AllowPredicates) {
7887
7888 if (auto MaybeEL =
7889 Cache.find(L, ExitCond, ExitIfTrue, ControlsExit, AllowPredicates))
7890 return *MaybeEL;
7891
7892 ExitLimit EL = computeExitLimitFromCondImpl(Cache, L, ExitCond, ExitIfTrue,
7893 ControlsExit, AllowPredicates);
7894 Cache.insert(L, ExitCond, ExitIfTrue, ControlsExit, AllowPredicates, EL);
7895 return EL;
7896}
7897
7898ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCondImpl(
7899 ExitLimitCacheTy &Cache, const Loop *L, Value *ExitCond, bool ExitIfTrue,
7900 bool ControlsExit, bool AllowPredicates) {
7901 // Handle BinOp conditions (And, Or).
7902 if (auto LimitFromBinOp = computeExitLimitFromCondFromBinOp(
7903 Cache, L, ExitCond, ExitIfTrue, ControlsExit, AllowPredicates))
7904 return *LimitFromBinOp;
7905
7906 // With an icmp, it may be feasible to compute an exact backedge-taken count.
7907 // Proceed to the next level to examine the icmp.
7908 if (ICmpInst *ExitCondICmp = dyn_cast<ICmpInst>(ExitCond)) {
7909 ExitLimit EL =
7910 computeExitLimitFromICmp(L, ExitCondICmp, ExitIfTrue, ControlsExit);
7911 if (EL.hasFullInfo() || !AllowPredicates)
7912 return EL;
7913
7914 // Try again, but use SCEV predicates this time.
7915 return computeExitLimitFromICmp(L, ExitCondICmp, ExitIfTrue, ControlsExit,
7916 /*AllowPredicates=*/true);
7917 }
7918
7919 // Check for a constant condition. These are normally stripped out by
7920 // SimplifyCFG, but ScalarEvolution may be used by a pass which wishes to
7921 // preserve the CFG and is temporarily leaving constant conditions
7922 // in place.
7923 if (ConstantInt *CI = dyn_cast<ConstantInt>(ExitCond)) {
7924 if (ExitIfTrue == !CI->getZExtValue())
7925 // The backedge is always taken.
7926 return getCouldNotCompute();
7927 else
7928 // The backedge is never taken.
7929 return getZero(CI->getType());
7930 }
7931
7932 // If it's not an integer or pointer comparison then compute it the hard way.
7933 return computeExitCountExhaustively(L, ExitCond, ExitIfTrue);
7934}
7935
7936Optional<ScalarEvolution::ExitLimit>
7937ScalarEvolution::computeExitLimitFromCondFromBinOp(
7938 ExitLimitCacheTy &Cache, const Loop *L, Value *ExitCond, bool ExitIfTrue,
7939 bool ControlsExit, bool AllowPredicates) {
7940 // Check if the controlling expression for this loop is an And or Or.
7941 Value *Op0, *Op1;
7942 bool IsAnd = false;
7943 if (match(ExitCond, m_LogicalAnd(m_Value(Op0), m_Value(Op1))))
7944 IsAnd = true;
7945 else if (match(ExitCond, m_LogicalOr(m_Value(Op0), m_Value(Op1))))
7946 IsAnd = false;
7947 else
7948 return None;
7949
7950 // EitherMayExit is true in these two cases:
7951 // br (and Op0 Op1), loop, exit
7952 // br (or Op0 Op1), exit, loop
7953 bool EitherMayExit = IsAnd ^ ExitIfTrue;
7954 ExitLimit EL0 = computeExitLimitFromCondCached(Cache, L, Op0, ExitIfTrue,
7955 ControlsExit && !EitherMayExit,
7956 AllowPredicates);
7957 ExitLimit EL1 = computeExitLimitFromCondCached(Cache, L, Op1, ExitIfTrue,
7958 ControlsExit && !EitherMayExit,
7959 AllowPredicates);
7960
7961 // Be robust against unsimplified IR for the form "op i1 X, NeutralElement"
7962 const Constant *NeutralElement = ConstantInt::get(ExitCond->getType(), IsAnd);
7963 if (isa<ConstantInt>(Op1))
7964 return Op1 == NeutralElement ? EL0 : EL1;
7965 if (isa<ConstantInt>(Op0))
7966 return Op0 == NeutralElement ? EL1 : EL0;
7967
7968 const SCEV *BECount = getCouldNotCompute();
7969 const SCEV *MaxBECount = getCouldNotCompute();
7970 if (EitherMayExit) {
7971 // Both conditions must be same for the loop to continue executing.
7972 // Choose the less conservative count.
7973 // If ExitCond is a short-circuit form (select), using
7974 // umin(EL0.ExactNotTaken, EL1.ExactNotTaken) is unsafe in general.
7975 // To see the detailed examples, please see
7976 // test/Analysis/ScalarEvolution/exit-count-select.ll
7977 bool PoisonSafe = isa<BinaryOperator>(ExitCond);
7978 if (!PoisonSafe)
7979 // Even if ExitCond is select, we can safely derive BECount using both
7980 // EL0 and EL1 in these cases:
7981 // (1) EL0.ExactNotTaken is non-zero
7982 // (2) EL1.ExactNotTaken is non-poison
7983 // (3) EL0.ExactNotTaken is zero (BECount should be simply zero and
7984 // it cannot be umin(0, ..))
7985 // The PoisonSafe assignment below is simplified and the assertion after
7986 // BECount calculation fully guarantees the condition (3).
7987 PoisonSafe = isa<SCEVConstant>(EL0.ExactNotTaken) ||
7988 isa<SCEVConstant>(EL1.ExactNotTaken);
7989 if (EL0.ExactNotTaken != getCouldNotCompute() &&
7990 EL1.ExactNotTaken != getCouldNotCompute() && PoisonSafe) {
7991 BECount =
7992 getUMinFromMismatchedTypes(EL0.ExactNotTaken, EL1.ExactNotTaken);
7993
7994 // If EL0.ExactNotTaken was zero and ExitCond was a short-circuit form,
7995 // it should have been simplified to zero (see the condition (3) above)
7996 assert(!isa<BinaryOperator>(ExitCond) || !EL0.ExactNotTaken->isZero() ||(static_cast<void> (0))
7997 BECount->isZero())(static_cast<void> (0));
7998 }
7999 if (EL0.MaxNotTaken == getCouldNotCompute())
8000 MaxBECount = EL1.MaxNotTaken;
8001 else if (EL1.MaxNotTaken == getCouldNotCompute())
8002 MaxBECount = EL0.MaxNotTaken;
8003 else
8004 MaxBECount = getUMinFromMismatchedTypes(EL0.MaxNotTaken, EL1.MaxNotTaken);
8005 } else {
8006 // Both conditions must be same at the same time for the loop to exit.
8007 // For now, be conservative.
8008 if (EL0.ExactNotTaken == EL1.ExactNotTaken)
8009 BECount = EL0.ExactNotTaken;
8010 }
8011
8012 // There are cases (e.g. PR26207) where computeExitLimitFromCond is able
8013 // to be more aggressive when computing BECount than when computing
8014 // MaxBECount. In these cases it is possible for EL0.ExactNotTaken and
8015 // EL1.ExactNotTaken to match, but for EL0.MaxNotTaken and EL1.MaxNotTaken
8016 // to not.
8017 if (isa<SCEVCouldNotCompute>(MaxBECount) &&
8018 !isa<SCEVCouldNotCompute>(BECount))
8019 MaxBECount = getConstant(getUnsignedRangeMax(BECount));
8020
8021 return ExitLimit(BECount, MaxBECount, false,
8022 { &EL0.Predicates, &EL1.Predicates });
8023}
8024
8025ScalarEvolution::ExitLimit
8026ScalarEvolution::computeExitLimitFromICmp(const Loop *L,
8027 ICmpInst *ExitCond,
8028 bool ExitIfTrue,
8029 bool ControlsExit,
8030 bool AllowPredicates) {
8031 // If the condition was exit on true, convert the condition to exit on false
8032 ICmpInst::Predicate Pred;
8033 if (!ExitIfTrue)
8034 Pred = ExitCond->getPredicate();
8035 else
8036 Pred = ExitCond->getInversePredicate();
8037 const ICmpInst::Predicate OriginalPred = Pred;
8038
8039 // Handle common loops like: for (X = "string"; *X; ++X)
8040 if (LoadInst *LI = dyn_cast<LoadInst>(ExitCond->getOperand(0)))
8041 if (Constant *RHS = dyn_cast<Constant>(ExitCond->getOperand(1))) {
8042 ExitLimit ItCnt =
8043 computeLoadConstantCompareExitLimit(LI, RHS, L, Pred);
8044 if (ItCnt.hasAnyInfo())
8045 return ItCnt;
8046 }
8047
8048 const SCEV *LHS = getSCEV(ExitCond->getOperand(0));
8049 const SCEV *RHS = getSCEV(ExitCond->getOperand(1));
8050
8051 // Try to evaluate any dependencies out of the loop.
8052 LHS = getSCEVAtScope(LHS, L);
8053 RHS = getSCEVAtScope(RHS, L);
8054
8055 // At this point, we would like to compute how many iterations of the
8056 // loop the predicate will return true for these inputs.
8057 if (isLoopInvariant(LHS, L) && !isLoopInvariant(RHS, L)) {
8058 // If there is a loop-invariant, force it into the RHS.
8059 std::swap(LHS, RHS);
8060 Pred = ICmpInst::getSwappedPredicate(Pred);
8061 }
8062
8063 // Simplify the operands before analyzing them.
8064 (void)SimplifyICmpOperands(Pred, LHS, RHS);
8065
8066 // If we have a comparison of a chrec against a constant, try to use value
8067 // ranges to answer this query.
8068 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS))
8069 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(LHS))
8070 if (AddRec->getLoop() == L) {
8071 // Form the constant range.
8072 ConstantRange CompRange =
8073 ConstantRange::makeExactICmpRegion(Pred, RHSC->getAPInt());
8074
8075 const SCEV *Ret = AddRec->getNumIterationsInRange(CompRange, *this);
8076 if (!isa<SCEVCouldNotCompute>(Ret)) return Ret;
8077 }
8078
8079 switch (Pred) {
8080 case ICmpInst::ICMP_NE: { // while (X != Y)
8081 // Convert to: while (X-Y != 0)
8082 if (LHS->getType()->isPointerTy()) {
8083 LHS = getLosslessPtrToIntExpr(LHS);
8084 if (isa<SCEVCouldNotCompute>(LHS))
8085 return LHS;
8086 }
8087 if (RHS->getType()->isPointerTy()) {
8088 RHS = getLosslessPtrToIntExpr(RHS);
8089 if (isa<SCEVCouldNotCompute>(RHS))
8090 return RHS;
8091 }
8092 ExitLimit EL = howFarToZero(getMinusSCEV(LHS, RHS), L, ControlsExit,
8093 AllowPredicates);
8094 if (EL.hasAnyInfo()) return EL;
8095 break;
8096 }
8097 case ICmpInst::ICMP_EQ: { // while (X == Y)
8098 // Convert to: while (X-Y == 0)
8099 if (LHS->getType()->isPointerTy()) {
8100 LHS = getLosslessPtrToIntExpr(LHS);
8101 if (isa<SCEVCouldNotCompute>(LHS))
8102 return LHS;
8103 }
8104 if (RHS->getType()->isPointerTy()) {
8105 RHS = getLosslessPtrToIntExpr(RHS);
8106 if (isa<SCEVCouldNotCompute>(RHS))
8107 return RHS;
8108 }
8109 ExitLimit EL = howFarToNonZero(getMinusSCEV(LHS, RHS), L);
8110 if (EL.hasAnyInfo()) return EL;
8111 break;
8112 }
8113 case ICmpInst::ICMP_SLT:
8114 case ICmpInst::ICMP_ULT: { // while (X < Y)
8115 bool IsSigned = Pred == ICmpInst::ICMP_SLT;
8116 ExitLimit EL = howManyLessThans(LHS, RHS, L, IsSigned, ControlsExit,
8117 AllowPredicates);
8118 if (EL.hasAnyInfo()) return EL;
8119 break;
8120 }
8121 case ICmpInst::ICMP_SGT:
8122 case ICmpInst::ICMP_UGT: { // while (X > Y)
8123 bool IsSigned = Pred == ICmpInst::ICMP_SGT;
8124 ExitLimit EL =
8125 howManyGreaterThans(LHS, RHS, L, IsSigned, ControlsExit,
8126 AllowPredicates);
8127 if (EL.hasAnyInfo()) return EL;
8128 break;
8129 }
8130 default:
8131 break;
8132 }
8133
8134 auto *ExhaustiveCount =
8135 computeExitCountExhaustively(L, ExitCond, ExitIfTrue);
8136
8137 if (!isa<SCEVCouldNotCompute>(ExhaustiveCount))
8138 return ExhaustiveCount;
8139
8140 return computeShiftCompareExitLimit(ExitCond->getOperand(0),
8141 ExitCond->getOperand(1), L, OriginalPred);
8142}
8143
8144ScalarEvolution::ExitLimit
8145ScalarEvolution::computeExitLimitFromSingleExitSwitch(const Loop *L,
8146 SwitchInst *Switch,
8147 BasicBlock *ExitingBlock,
8148 bool ControlsExit) {
8149 assert(!L->contains(ExitingBlock) && "Not an exiting block!")(static_cast<void> (0));
8150
8151 // Give up if the exit is the default dest of a switch.
8152 if (Switch->getDefaultDest() == ExitingBlock)
8153 return getCouldNotCompute();
8154
8155 assert(L->contains(Switch->getDefaultDest()) &&(static_cast<void> (0))
8156 "Default case must not exit the loop!")(static_cast<void> (0));
8157 const SCEV *LHS = getSCEVAtScope(Switch->getCondition(), L);
8158 const SCEV *RHS = getConstant(Switch->findCaseDest(ExitingBlock));
8159
8160 // while (X != Y) --> while (X-Y != 0)
8161 ExitLimit EL = howFarToZero(getMinusSCEV(LHS, RHS), L, ControlsExit);
8162 if (EL.hasAnyInfo())
8163 return EL;
8164
8165 return getCouldNotCompute();
8166}
8167
8168static ConstantInt *
8169EvaluateConstantChrecAtConstant(const SCEVAddRecExpr *AddRec, ConstantInt *C,
8170 ScalarEvolution &SE) {
8171 const SCEV *InVal = SE.getConstant(C);
8172 const SCEV *Val = AddRec->evaluateAtIteration(InVal, SE);
8173 assert(isa<SCEVConstant>(Val) &&(static_cast<void> (0))
8174 "Evaluation of SCEV at constant didn't fold correctly?")(static_cast<void> (0));
8175 return cast<SCEVConstant>(Val)->getValue();
8176}
8177
8178/// Given an exit condition of 'icmp op load X, cst', try to see if we can
8179/// compute the backedge execution count.
8180ScalarEvolution::ExitLimit
8181ScalarEvolution::computeLoadConstantCompareExitLimit(
8182 LoadInst *LI,
8183 Constant *RHS,
8184 const Loop *L,
8185 ICmpInst::Predicate predicate) {
8186 if (LI->isVolatile()) return getCouldNotCompute();
8187
8188 // Check to see if the loaded pointer is a getelementptr of a global.
8189 // TODO: Use SCEV instead of manually grubbing with GEPs.
8190 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(LI->getOperand(0));
8191 if (!GEP) return getCouldNotCompute();
8192
8193 // Make sure that it is really a constant global we are gepping, with an
8194 // initializer, and make sure the first IDX is really 0.
8195 GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0));
8196 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer() ||
8197 GEP->getNumOperands() < 3 || !isa<Constant>(GEP->getOperand(1)) ||
8198 !cast<Constant>(GEP->getOperand(1))->isNullValue())
8199 return getCouldNotCompute();
8200
8201 // Okay, we allow one non-constant index into the GEP instruction.
8202 Value *VarIdx = nullptr;
8203 std::vector<Constant*> Indexes;
8204 unsigned VarIdxNum = 0;
8205 for (unsigned i = 2, e = GEP->getNumOperands(); i != e; ++i)
8206 if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i))) {
8207 Indexes.push_back(CI);
8208 } else if (!isa<ConstantInt>(GEP->getOperand(i))) {
8209 if (VarIdx) return getCouldNotCompute(); // Multiple non-constant idx's.
8210 VarIdx = GEP->getOperand(i);
8211 VarIdxNum = i-2;
8212 Indexes.push_back(nullptr);
8213 }
8214
8215 // Loop-invariant loads may be a byproduct of loop optimization. Skip them.
8216 if (!VarIdx)
8217 return getCouldNotCompute();
8218
8219 // Okay, we know we have a (load (gep GV, 0, X)) comparison with a constant.
8220 // Check to see if X is a loop variant variable value now.
8221 const SCEV *Idx = getSCEV(VarIdx);
8222 Idx = getSCEVAtScope(Idx, L);
8223
8224 // We can only recognize very limited forms of loop index expressions, in
8225 // particular, only affine AddRec's like {C1,+,C2}<L>.
8226 const SCEVAddRecExpr *IdxExpr = dyn_cast<SCEVAddRecExpr>(Idx);
8227 if (!IdxExpr || IdxExpr->getLoop() != L || !IdxExpr->isAffine() ||
8228 isLoopInvariant(IdxExpr, L) ||
8229 !isa<SCEVConstant>(IdxExpr->getOperand(0)) ||
8230 !isa<SCEVConstant>(IdxExpr->getOperand(1)))
8231 return getCouldNotCompute();
8232
8233 unsigned MaxSteps = MaxBruteForceIterations;
8234 for (unsigned IterationNum = 0; IterationNum != MaxSteps; ++IterationNum) {
8235 ConstantInt *ItCst = ConstantInt::get(
8236 cast<IntegerType>(IdxExpr->getType()), IterationNum);
8237 ConstantInt *Val = EvaluateConstantChrecAtConstant(IdxExpr, ItCst, *this);
8238
8239 // Form the GEP offset.
8240 Indexes[VarIdxNum] = Val;
8241
8242 Constant *Result = ConstantFoldLoadThroughGEPIndices(GV->getInitializer(),
8243 Indexes);
8244 if (!Result) break; // Cannot compute!
8245
8246 // Evaluate the condition for this iteration.
8247 Result = ConstantExpr::getICmp(predicate, Result, RHS);
8248 if (!isa<ConstantInt>(Result)) break; // Couldn't decide for sure
8249 if (cast<ConstantInt>(Result)->getValue().isMinValue()) {
8250 ++NumArrayLenItCounts;
8251 return getConstant(ItCst); // Found terminating iteration!
8252 }
8253 }
8254 return getCouldNotCompute();
8255}
8256
8257ScalarEvolution::ExitLimit ScalarEvolution::computeShiftCompareExitLimit(
8258 Value *LHS, Value *RHSV, const Loop *L, ICmpInst::Predicate Pred) {
8259 ConstantInt *RHS = dyn_cast<ConstantInt>(RHSV);
8260 if (!RHS)
8261 return getCouldNotCompute();
8262
8263 const BasicBlock *Latch = L->getLoopLatch();
8264 if (!Latch)
8265 return getCouldNotCompute();
8266
8267 const BasicBlock *Predecessor = L->getLoopPredecessor();
8268 if (!Predecessor)
8269 return getCouldNotCompute();
8270
8271 // Return true if V is of the form "LHS `shift_op` <positive constant>".
8272 // Return LHS in OutLHS and shift_opt in OutOpCode.
8273 auto MatchPositiveShift =
8274 [](Value *V, Value *&OutLHS, Instruction::BinaryOps &OutOpCode) {
8275
8276 using namespace PatternMatch;
8277
8278 ConstantInt *ShiftAmt;
8279 if (match(V, m_LShr(m_Value(OutLHS), m_ConstantInt(ShiftAmt))))
8280 OutOpCode = Instruction::LShr;
8281 else if (match(V, m_AShr(m_Value(OutLHS), m_ConstantInt(ShiftAmt))))
8282 OutOpCode = Instruction::AShr;
8283 else if (match(V, m_Shl(m_Value(OutLHS), m_ConstantInt(ShiftAmt))))
8284 OutOpCode = Instruction::Shl;
8285 else
8286 return false;
8287
8288 return ShiftAmt->getValue().isStrictlyPositive();
8289 };
8290
8291 // Recognize a "shift recurrence" either of the form %iv or of %iv.shifted in
8292 //
8293 // loop:
8294 // %iv = phi i32 [ %iv.shifted, %loop ], [ %val, %preheader ]
8295 // %iv.shifted = lshr i32 %iv, <positive constant>
8296 //
8297 // Return true on a successful match. Return the corresponding PHI node (%iv
8298 // above) in PNOut and the opcode of the shift operation in OpCodeOut.
8299 auto MatchShiftRecurrence =
8300 [&](Value *V, PHINode *&PNOut, Instruction::BinaryOps &OpCodeOut) {
8301 Optional<Instruction::BinaryOps> PostShiftOpCode;
8302
8303 {
8304 Instruction::BinaryOps OpC;
8305 Value *V;
8306
8307 // If we encounter a shift instruction, "peel off" the shift operation,
8308 // and remember that we did so. Later when we inspect %iv's backedge
8309 // value, we will make sure that the backedge value uses the same
8310 // operation.
8311 //
8312 // Note: the peeled shift operation does not have to be the same
8313 // instruction as the one feeding into the PHI's backedge value. We only
8314 // really care about it being the same *kind* of shift instruction --
8315 // that's all that is required for our later inferences to hold.
8316 if (MatchPositiveShift(LHS, V, OpC)) {
8317 PostShiftOpCode = OpC;
8318 LHS = V;
8319 }
8320 }
8321
8322 PNOut = dyn_cast<PHINode>(LHS);
8323 if (!PNOut || PNOut->getParent() != L->getHeader())
8324 return false;
8325
8326 Value *BEValue = PNOut->getIncomingValueForBlock(Latch);
8327 Value *OpLHS;
8328
8329 return
8330 // The backedge value for the PHI node must be a shift by a positive
8331 // amount
8332 MatchPositiveShift(BEValue, OpLHS, OpCodeOut) &&
8333
8334 // of the PHI node itself
8335 OpLHS == PNOut &&
8336
8337 // and the kind of shift should be match the kind of shift we peeled
8338 // off, if any.
8339 (!PostShiftOpCode.hasValue() || *PostShiftOpCode == OpCodeOut);
8340 };
8341
8342 PHINode *PN;
8343 Instruction::BinaryOps OpCode;
8344 if (!MatchShiftRecurrence(LHS, PN, OpCode))
8345 return getCouldNotCompute();
8346
8347 const DataLayout &DL = getDataLayout();
8348
8349 // The key rationale for this optimization is that for some kinds of shift
8350 // recurrences, the value of the recurrence "stabilizes" to either 0 or -1
8351 // within a finite number of iterations. If the condition guarding the
8352 // backedge (in the sense that the backedge is taken if the condition is true)
8353 // is false for the value the shift recurrence stabilizes to, then we know
8354 // that the backedge is taken only a finite number of times.
8355
8356 ConstantInt *StableValue = nullptr;
8357 switch (OpCode) {
8358 default:
8359 llvm_unreachable("Impossible case!")__builtin_unreachable();
8360
8361 case Instruction::AShr: {
8362 // {K,ashr,<positive-constant>} stabilizes to signum(K) in at most
8363 // bitwidth(K) iterations.
8364 Value *FirstValue = PN->getIncomingValueForBlock(Predecessor);
8365 KnownBits Known = computeKnownBits(FirstValue, DL, 0, &AC,
8366 Predecessor->getTerminator(), &DT);
8367 auto *Ty = cast<IntegerType>(RHS->getType());
8368 if (Known.isNonNegative())
8369 StableValue = ConstantInt::get(Ty, 0);
8370 else if (Known.isNegative())
8371 StableValue = ConstantInt::get(Ty, -1, true);
8372 else
8373 return getCouldNotCompute();
8374
8375 break;
8376 }
8377 case Instruction::LShr:
8378 case Instruction::Shl:
8379 // Both {K,lshr,<positive-constant>} and {K,shl,<positive-constant>}
8380 // stabilize to 0 in at most bitwidth(K) iterations.
8381 StableValue = ConstantInt::get(cast<IntegerType>(RHS->getType()), 0);
8382 break;
8383 }
8384
8385 auto *Result =
8386 ConstantFoldCompareInstOperands(Pred, StableValue, RHS, DL, &TLI);
8387 assert(Result->getType()->isIntegerTy(1) &&(static_cast<void> (0))
8388 "Otherwise cannot be an operand to a branch instruction")(static_cast<void> (0));
8389
8390 if (Result->isZeroValue()) {
8391 unsigned BitWidth = getTypeSizeInBits(RHS->getType());
8392 const SCEV *UpperBound =
8393 getConstant(getEffectiveSCEVType(RHS->getType()), BitWidth);
8394 return ExitLimit(getCouldNotCompute(), UpperBound, false);
8395 }
8396
8397 return getCouldNotCompute();
8398}
8399
8400/// Return true if we can constant fold an instruction of the specified type,
8401/// assuming that all operands were constants.
8402static bool CanConstantFold(const Instruction *I) {
8403 if (isa<BinaryOperator>(I) || isa<CmpInst>(I) ||
8404 isa<SelectInst>(I) || isa<CastInst>(I) || isa<GetElementPtrInst>(I) ||
8405 isa<LoadInst>(I) || isa<ExtractValueInst>(I))
8406 return true;
8407
8408 if (const CallInst *CI = dyn_cast<CallInst>(I))
8409 if (const Function *F = CI->getCalledFunction())
8410 return canConstantFoldCallTo(CI, F);
8411 return false;
8412}
8413
8414/// Determine whether this instruction can constant evolve within this loop
8415/// assuming its operands can all constant evolve.
8416static bool canConstantEvolve(Instruction *I, const Loop *L) {
8417 // An instruction outside of the loop can't be derived from a loop PHI.
8418 if (!L->contains(I)) return false;
8419
8420 if (isa<PHINode>(I)) {
8421 // We don't currently keep track of the control flow needed to evaluate
8422 // PHIs, so we cannot handle PHIs inside of loops.
8423 return L->getHeader() == I->getParent();
8424 }
8425
8426 // If we won't be able to constant fold this expression even if the operands
8427 // are constants, bail early.
8428 return CanConstantFold(I);
8429}
8430
8431/// getConstantEvolvingPHIOperands - Implement getConstantEvolvingPHI by
8432/// recursing through each instruction operand until reaching a loop header phi.
8433static PHINode *
8434getConstantEvolvingPHIOperands(Instruction *UseInst, const Loop *L,
8435 DenseMap<Instruction *, PHINode *> &PHIMap,
8436 unsigned Depth) {
8437 if (Depth > MaxConstantEvolvingDepth)
8438 return nullptr;
8439
8440 // Otherwise, we can evaluate this instruction if all of its operands are
8441 // constant or derived from a PHI node themselves.
8442 PHINode *PHI = nullptr;
8443 for (Value *Op : UseInst->operands()) {
8444 if (isa<Constant>(Op)) continue;
8445
8446 Instruction *OpInst = dyn_cast<Instruction>(Op);
8447 if (!OpInst || !canConstantEvolve(OpInst, L)) return nullptr;
8448
8449 PHINode *P = dyn_cast<PHINode>(OpInst);
8450 if (!P)
8451 // If this operand is already visited, reuse the prior result.
8452 // We may have P != PHI if this is the deepest point at which the
8453 // inconsistent paths meet.
8454 P = PHIMap.lookup(OpInst);
8455 if (!P) {
8456 // Recurse and memoize the results, whether a phi is found or not.
8457 // This recursive call invalidates pointers into PHIMap.
8458 P = getConstantEvolvingPHIOperands(OpInst, L, PHIMap, Depth + 1);
8459 PHIMap[OpInst] = P;
8460 }
8461 if (!P)
8462 return nullptr; // Not evolving from PHI
8463 if (PHI && PHI != P)
8464 return nullptr; // Evolving from multiple different PHIs.
8465 PHI = P;
8466 }
8467 // This is a expression evolving from a constant PHI!
8468 return PHI;
8469}
8470
8471/// getConstantEvolvingPHI - Given an LLVM value and a loop, return a PHI node
8472/// in the loop that V is derived from. We allow arbitrary operations along the
8473/// way, but the operands of an operation must either be constants or a value
8474/// derived from a constant PHI. If this expression does not fit with these
8475/// constraints, return null.
8476static PHINode *getConstantEvolvingPHI(Value *V, const Loop *L) {
8477 Instruction *I = dyn_cast<Instruction>(V);
8478 if (!I || !canConstantEvolve(I, L)) return nullptr;
8479
8480 if (PHINode *PN = dyn_cast<PHINode>(I))
8481 return PN;
8482
8483 // Record non-constant instructions contained by the loop.
8484 DenseMap<Instruction *, PHINode *> PHIMap;
8485 return getConstantEvolvingPHIOperands(I, L, PHIMap, 0);
8486}
8487
8488/// EvaluateExpression - Given an expression that passes the
8489/// getConstantEvolvingPHI predicate, evaluate its value assuming the PHI node
8490/// in the loop has the value PHIVal. If we can't fold this expression for some
8491/// reason, return null.
8492static Constant *EvaluateExpression(Value *V, const Loop *L,
8493 DenseMap<Instruction *, Constant *> &Vals,
8494 const DataLayout &DL,
8495 const TargetLibraryInfo *TLI) {
8496 // Convenient constant check, but redundant for recursive calls.
8497 if (Constant *C = dyn_cast<Constant>(V)) return C;
8498 Instruction *I = dyn_cast<Instruction>(V);
8499 if (!I) return nullptr;
8500
8501 if (Constant *C = Vals.lookup(I)) return C;
8502
8503 // An instruction inside the loop depends on a value outside the loop that we
8504 // weren't given a mapping for, or a value such as a call inside the loop.
8505 if (!canConstantEvolve(I, L)) return nullptr;
8506
8507 // An unmapped PHI can be due to a branch or another loop inside this loop,
8508 // or due to this not being the initial iteration through a loop where we
8509 // couldn't compute the evolution of this particular PHI last time.
8510 if (isa<PHINode>(I)) return nullptr;
8511
8512 std::vector<Constant*> Operands(I->getNumOperands());
8513
8514 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
8515 Instruction *Operand = dyn_cast<Instruction>(I->getOperand(i));
8516 if (!Operand) {
8517 Operands[i] = dyn_cast<Constant>(I->getOperand(i));
8518 if (!Operands[i]) return nullptr;
8519 continue;
8520 }
8521 Constant *C = EvaluateExpression(Operand, L, Vals, DL, TLI);
8522 Vals[Operand] = C;
8523 if (!C) return nullptr;
8524 Operands[i] = C;
8525 }
8526
8527 if (CmpInst *CI = dyn_cast<CmpInst>(I))
8528 return ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0],
8529 Operands[1], DL, TLI);
8530 if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
8531 if (!LI->isVolatile())
8532 return ConstantFoldLoadFromConstPtr(Operands[0], LI->getType(), DL);
8533 }
8534 return ConstantFoldInstOperands(I, Operands, DL, TLI);
8535}
8536
8537
8538// If every incoming value to PN except the one for BB is a specific Constant,
8539// return that, else return nullptr.
8540static Constant *getOtherIncomingValue(PHINode *PN, BasicBlock *BB) {
8541 Constant *IncomingVal = nullptr;
8542
8543 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
8544 if (PN->getIncomingBlock(i) == BB)
8545 continue;
8546
8547 auto *CurrentVal = dyn_cast<Constant>(PN->getIncomingValue(i));
8548 if (!CurrentVal)
8549 return nullptr;
8550
8551 if (IncomingVal != CurrentVal) {
8552 if (IncomingVal)
8553 return nullptr;
8554 IncomingVal = CurrentVal;
8555 }
8556 }
8557
8558 return IncomingVal;
8559}
8560
8561/// getConstantEvolutionLoopExitValue - If we know that the specified Phi is
8562/// in the header of its containing loop, we know the loop executes a
8563/// constant number of times, and the PHI node is just a recurrence
8564/// involving constants, fold it.
8565Constant *
8566ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN,
8567 const APInt &BEs,
8568 const Loop *L) {
8569 auto I = ConstantEvolutionLoopExitValue.find(PN);
8570 if (I != ConstantEvolutionLoopExitValue.end())
8571 return I->second;
8572
8573 if (BEs.ugt(MaxBruteForceIterations))
8574 return ConstantEvolutionLoopExitValue[PN] = nullptr; // Not going to evaluate it.
8575
8576 Constant *&RetVal = ConstantEvolutionLoopExitValue[PN];
8577
8578 DenseMap<Instruction *, Constant *> CurrentIterVals;
8579 BasicBlock *Header = L->getHeader();
8580 assert(PN->getParent() == Header && "Can't evaluate PHI not in loop header!")(static_cast<void> (0));
8581
8582 BasicBlock *Latch = L->getLoopLatch();
8583 if (!Latch)
8584 return nullptr;
8585
8586 for (PHINode &PHI : Header->phis()) {
8587 if (auto *StartCST = getOtherIncomingValue(&PHI, Latch))
8588 CurrentIterVals[&PHI] = StartCST;
8589 }
8590 if (!CurrentIterVals.count(PN))
8591 return RetVal = nullptr;
8592
8593 Value *BEValue = PN->getIncomingValueForBlock(Latch);
8594
8595 // Execute the loop symbolically to determine the exit value.
8596 assert(BEs.getActiveBits() < CHAR_BIT * sizeof(unsigned) &&(static_cast<void> (0))
8597 "BEs is <= MaxBruteForceIterations which is an 'unsigned'!")(static_cast<void> (0));
8598
8599 unsigned NumIterations = BEs.getZExtValue(); // must be in range
8600 unsigned IterationNum = 0;
8601 const DataLayout &DL = getDataLayout();
8602 for (; ; ++IterationNum) {
8603 if (IterationNum == NumIterations)
8604 return RetVal = CurrentIterVals[PN]; // Got exit value!
8605
8606 // Compute the value of the PHIs for the next iteration.
8607 // EvaluateExpression adds non-phi values to the CurrentIterVals map.
8608 DenseMap<Instruction *, Constant *> NextIterVals;
8609 Constant *NextPHI =
8610 EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI);
8611 if (!NextPHI)
8612 return nullptr; // Couldn't evaluate!
8613 NextIterVals[PN] = NextPHI;
8614
8615 bool StoppedEvolving = NextPHI == CurrentIterVals[PN];
8616
8617 // Also evaluate the other PHI nodes. However, we don't get to stop if we
8618 // cease to be able to evaluate one of them or if they stop evolving,
8619 // because that doesn't necessarily prevent us from computing PN.
8620 SmallVector<std::pair<PHINode *, Constant *>, 8> PHIsToCompute;
8621 for (const auto &I : CurrentIterVals) {
8622 PHINode *PHI = dyn_cast<PHINode>(I.first);
8623 if (!PHI || PHI == PN || PHI->getParent() != Header) continue;
8624 PHIsToCompute.emplace_back(PHI, I.second);
8625 }
8626 // We use two distinct loops because EvaluateExpression may invalidate any
8627 // iterators into CurrentIterVals.
8628 for (const auto &I : PHIsToCompute) {
8629 PHINode *PHI = I.first;
8630 Constant *&NextPHI = NextIterVals[PHI];
8631 if (!NextPHI) { // Not already computed.
8632 Value *BEValue = PHI->getIncomingValueForBlock(Latch);
8633 NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI);
8634 }
8635 if (NextPHI != I.second)
8636 StoppedEvolving = false;
8637 }
8638
8639 // If all entries in CurrentIterVals == NextIterVals then we can stop
8640 // iterating, the loop can't continue to change.
8641 if (StoppedEvolving)
8642 return RetVal = CurrentIterVals[PN];
8643
8644 CurrentIterVals.swap(NextIterVals);
8645 }
8646}
8647
8648const SCEV *ScalarEvolution::computeExitCountExhaustively(const Loop *L,
8649 Value *Cond,
8650 bool ExitWhen) {
8651 PHINode *PN = getConstantEvolvingPHI(Cond, L);
8652 if (!PN) return getCouldNotCompute();
8653
8654 // If the loop is canonicalized, the PHI will have exactly two entries.
8655 // That's the only form we support here.
8656 if (PN->getNumIncomingValues() != 2) return getCouldNotCompute();
8657
8658 DenseMap<Instruction *, Constant *> CurrentIterVals;
8659 BasicBlock *Header = L->getHeader();
8660 assert(PN->getParent() == Header && "Can't evaluate PHI not in loop header!")(static_cast<void> (0));
8661
8662 BasicBlock *Latch = L->getLoopLatch();
8663 assert(Latch && "Should follow from NumIncomingValues == 2!")(static_cast<void> (0));
8664
8665 for (PHINode &PHI : Header->phis()) {
8666 if (auto *StartCST = getOtherIncomingValue(&PHI, Latch))
8667 CurrentIterVals[&PHI] = StartCST;
8668 }
8669 if (!CurrentIterVals.count(PN))
8670 return getCouldNotCompute();
8671
8672 // Okay, we find a PHI node that defines the trip count of this loop. Execute
8673 // the loop symbolically to determine when the condition gets a value of
8674 // "ExitWhen".
8675 unsigned MaxIterations = MaxBruteForceIterations; // Limit analysis.
8676 const DataLayout &DL = getDataLayout();
8677 for (unsigned IterationNum = 0; IterationNum != MaxIterations;++IterationNum){
8678 auto *CondVal = dyn_cast_or_null<ConstantInt>(
8679 EvaluateExpression(Cond, L, CurrentIterVals, DL, &TLI));
8680
8681 // Couldn't symbolically evaluate.
8682 if (!CondVal) return getCouldNotCompute();
8683
8684 if (CondVal->getValue() == uint64_t(ExitWhen)) {
8685 ++NumBruteForceTripCountsComputed;
8686 return getConstant(Type::getInt32Ty(getContext()), IterationNum);
8687 }
8688
8689 // Update all the PHI nodes for the next iteration.
8690 DenseMap<Instruction *, Constant *> NextIterVals;
8691
8692 // Create a list of which PHIs we need to compute. We want to do this before
8693 // calling EvaluateExpression on them because that may invalidate iterators
8694 // into CurrentIterVals.
8695 SmallVector<PHINode *, 8> PHIsToCompute;
8696 for (const auto &I : CurrentIterVals) {
8697 PHINode *PHI = dyn_cast<PHINode>(I.first);
8698 if (!PHI || PHI->getParent() != Header) continue;
8699 PHIsToCompute.push_back(PHI);
8700 }
8701 for (PHINode *PHI : PHIsToCompute) {
8702 Constant *&NextPHI = NextIterVals[PHI];
8703 if (NextPHI) continue; // Already computed!
8704
8705 Value *BEValue = PHI->getIncomingValueForBlock(Latch);
8706 NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI);
8707 }
8708 CurrentIterVals.swap(NextIterVals);
8709 }
8710
8711 // Too many iterations were needed to evaluate.
8712 return getCouldNotCompute();
8713}
8714
8715const SCEV *ScalarEvolution::getSCEVAtScope(const SCEV *V, const Loop *L) {
8716 SmallVector<std::pair<const Loop *, const SCEV *>, 2> &Values =
8717 ValuesAtScopes[V];
8718 // Check to see if we've folded this expression at this loop before.
8719 for (auto &LS : Values)
8720 if (LS.first == L)
8721 return LS.second ? LS.second : V;
8722
8723 Values.emplace_back(L, nullptr);
8724
8725 // Otherwise compute it.
8726 const SCEV *C = computeSCEVAtScope(V, L);
8727 for (auto &LS : reverse(ValuesAtScopes[V]))
8728 if (LS.first == L) {
8729 LS.second = C;
8730 break;
8731 }
8732 return C;
8733}
8734
8735/// This builds up a Constant using the ConstantExpr interface. That way, we
8736/// will return Constants for objects which aren't represented by a
8737/// SCEVConstant, because SCEVConstant is restricted to ConstantInt.
8738/// Returns NULL if the SCEV isn't representable as a Constant.
8739static Constant *BuildConstantFromSCEV(const SCEV *V) {
8740 switch (V->getSCEVType()) {
8741 case scCouldNotCompute:
8742 case scAddRecExpr:
8743 return nullptr;
8744 case scConstant:
8745 return cast<SCEVConstant>(V)->getValue();
8746 case scUnknown:
8747 return dyn_cast<Constant>(cast<SCEVUnknown>(V)->getValue());
8748 case scSignExtend: {
8749 const SCEVSignExtendExpr *SS = cast<SCEVSignExtendExpr>(V);
8750 if (Constant *CastOp = BuildConstantFromSCEV(SS->getOperand()))
8751 return ConstantExpr::getSExt(CastOp, SS->getType());
8752 return nullptr;
8753 }
8754 case scZeroExtend: {
8755 const SCEVZeroExtendExpr *SZ = cast<SCEVZeroExtendExpr>(V);
8756 if (Constant *CastOp = BuildConstantFromSCEV(SZ->getOperand()))
8757 return ConstantExpr::getZExt(CastOp, SZ->getType());
8758 return nullptr;
8759 }
8760 case scPtrToInt: {
8761 const SCEVPtrToIntExpr *P2I = cast<SCEVPtrToIntExpr>(V);
8762 if (Constant *CastOp = BuildConstantFromSCEV(P2I->getOperand()))
8763 return ConstantExpr::getPtrToInt(CastOp, P2I->getType());
8764
8765 return nullptr;
8766 }
8767 case scTruncate: {
8768 const SCEVTruncateExpr *ST = cast<SCEVTruncateExpr>(V);
8769 if (Constant *CastOp = BuildConstantFromSCEV(ST->getOperand()))
8770 return ConstantExpr::getTrunc(CastOp, ST->getType());
8771 return nullptr;
8772 }
8773 case scAddExpr: {
8774 const SCEVAddExpr *SA = cast<SCEVAddExpr>(V);
8775 if (Constant *C = BuildConstantFromSCEV(SA->getOperand(0))) {
8776 if (PointerType *PTy = dyn_cast<PointerType>(C->getType())) {
8777 unsigned AS = PTy->getAddressSpace();
8778 Type *DestPtrTy = Type::getInt8PtrTy(C->getContext(), AS);
8779 C = ConstantExpr::getBitCast(C, DestPtrTy);
8780 }
8781 for (unsigned i = 1, e = SA->getNumOperands(); i != e; ++i) {
8782 Constant *C2 = BuildConstantFromSCEV(SA->getOperand(i));
8783 if (!C2)
8784 return nullptr;
8785
8786 // First pointer!
8787 if (!C->getType()->isPointerTy() && C2->getType()->isPointerTy()) {
8788 unsigned AS = C2->getType()->getPointerAddressSpace();
8789 std::swap(C, C2);
8790 Type *DestPtrTy = Type::getInt8PtrTy(C->getContext(), AS);
8791 // The offsets have been converted to bytes. We can add bytes to an
8792 // i8* by GEP with the byte count in the first index.
8793 C = ConstantExpr::getBitCast(C, DestPtrTy);
8794 }
8795
8796 // Don't bother trying to sum two pointers. We probably can't
8797 // statically compute a load that results from it anyway.
8798 if (C2->getType()->isPointerTy())
8799 return nullptr;
8800
8801 if (C->getType()->isPointerTy()) {
8802 C = ConstantExpr::getGetElementPtr(Type::getInt8Ty(C->getContext()),
8803 C, C2);
8804 } else {
8805 C = ConstantExpr::getAdd(C, C2);
8806 }
8807 }
8808 return C;
8809 }
8810 return nullptr;
8811 }
8812 case scMulExpr: {
8813 const SCEVMulExpr *SM = cast<SCEVMulExpr>(V);
8814 if (Constant *C = BuildConstantFromSCEV(SM->getOperand(0))) {
8815 // Don't bother with pointers at all.
8816 if (C->getType()->isPointerTy())
8817 return nullptr;
8818 for (unsigned i = 1, e = SM->getNumOperands(); i != e; ++i) {
8819 Constant *C2 = BuildConstantFromSCEV(SM->getOperand(i));
8820 if (!C2 || C2->getType()->isPointerTy())
8821 return nullptr;
8822 C = ConstantExpr::getMul(C, C2);
8823 }
8824 return C;
8825 }
8826 return nullptr;
8827 }
8828 case scUDivExpr: {
8829 const SCEVUDivExpr *SU = cast<SCEVUDivExpr>(V);
8830 if (Constant *LHS = BuildConstantFromSCEV(SU->getLHS()))
8831 if (Constant *RHS = BuildConstantFromSCEV(SU->getRHS()))
8832 if (LHS->getType() == RHS->getType())
8833 return ConstantExpr::getUDiv(LHS, RHS);
8834 return nullptr;
8835 }
8836 case scSMaxExpr:
8837 case scUMaxExpr:
8838 case scSMinExpr:
8839 case scUMinExpr:
8840 return nullptr; // TODO: smax, umax, smin, umax.
8841 }
8842 llvm_unreachable("Unknown SCEV kind!")__builtin_unreachable();
8843}
8844
8845const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) {
8846 if (isa<SCEVConstant>(V)) return V;
8847
8848 // If this instruction is evolved from a constant-evolving PHI, compute the
8849 // exit value from the loop without using SCEVs.
8850 if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(V)) {
8851 if (Instruction *I = dyn_cast<Instruction>(SU->getValue())) {
8852 if (PHINode *PN = dyn_cast<PHINode>(I)) {
8853 const Loop *CurrLoop = this->LI[I->getParent()];
8854 // Looking for loop exit value.
8855 if (CurrLoop && CurrLoop->getParentLoop() == L &&
8856 PN->getParent() == CurrLoop->getHeader()) {
8857 // Okay, there is no closed form solution for the PHI node. Check
8858 // to see if the loop that contains it has a known backedge-taken
8859 // count. If so, we may be able to force computation of the exit
8860 // value.
8861 const SCEV *BackedgeTakenCount = getBackedgeTakenCount(CurrLoop);
8862 // This trivial case can show up in some degenerate cases where
8863 // the incoming IR has not yet been fully simplified.
8864 if (BackedgeTakenCount->isZero()) {
8865 Value *InitValue = nullptr;
8866 bool MultipleInitValues = false;
8867 for (unsigned i = 0; i < PN->getNumIncomingValues(); i++) {
8868 if (!CurrLoop->contains(PN->getIncomingBlock(i))) {
8869 if (!InitValue)
8870 InitValue = PN->getIncomingValue(i);
8871 else if (InitValue != PN->getIncomingValue(i)) {
8872 MultipleInitValues = true;
8873 break;
8874 }
8875 }
8876 }
8877 if (!MultipleInitValues && InitValue)
8878 return getSCEV(InitValue);
8879 }
8880 // Do we have a loop invariant value flowing around the backedge
8881 // for a loop which must execute the backedge?
8882 if (!isa<SCEVCouldNotCompute>(BackedgeTakenCount) &&
8883 isKnownPositive(BackedgeTakenCount) &&
8884 PN->getNumIncomingValues() == 2) {
8885
8886 unsigned InLoopPred =
8887 CurrLoop->contains(PN->getIncomingBlock(0)) ? 0 : 1;
8888 Value *BackedgeVal = PN->getIncomingValue(InLoopPred);
8889 if (CurrLoop->isLoopInvariant(BackedgeVal))
8890 return getSCEV(BackedgeVal);
8891 }
8892 if (auto *BTCC = dyn_cast<SCEVConstant>(BackedgeTakenCount)) {
8893 // Okay, we know how many times the containing loop executes. If
8894 // this is a constant evolving PHI node, get the final value at
8895 // the specified iteration number.
8896 Constant *RV = getConstantEvolutionLoopExitValue(
8897 PN, BTCC->getAPInt(), CurrLoop);
8898 if (RV) return getSCEV(RV);
8899 }
8900 }
8901
8902 // If there is a single-input Phi, evaluate it at our scope. If we can
8903 // prove that this replacement does not break LCSSA form, use new value.
8904 if (PN->getNumOperands() == 1) {
8905 const SCEV *Input = getSCEV(PN->getOperand(0));
8906 const SCEV *InputAtScope = getSCEVAtScope(Input, L);
8907 // TODO: We can generalize it using LI.replacementPreservesLCSSAForm,
8908 // for the simplest case just support constants.
8909 if (isa<SCEVConstant>(InputAtScope)) return InputAtScope;
8910 }
8911 }
8912
8913 // Okay, this is an expression that we cannot symbolically evaluate
8914 // into a SCEV. Check to see if it's possible to symbolically evaluate
8915 // the arguments into constants, and if so, try to constant propagate the
8916 // result. This is particularly useful for computing loop exit values.
8917 if (CanConstantFold(I)) {
8918 SmallVector<Constant *, 4> Operands;
8919 bool MadeImprovement = false;
8920 for (Value *Op : I->operands()) {
8921 if (Constant *C = dyn_cast<Constant>(Op)) {
8922 Operands.push_back(C);
8923 continue;
8924 }
8925
8926 // If any of the operands is non-constant and if they are
8927 // non-integer and non-pointer, don't even try to analyze them
8928 // with scev techniques.
8929 if (!isSCEVable(Op->getType()))
8930 return V;
8931
8932 const SCEV *OrigV = getSCEV(Op);
8933 const SCEV *OpV = getSCEVAtScope(OrigV, L);
8934 MadeImprovement |= OrigV != OpV;
8935
8936 Constant *C = BuildConstantFromSCEV(OpV);
8937 if (!C) return V;
8938 if (C->getType() != Op->getType())
8939 C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false,
8940 Op->getType(),
8941 false),
8942 C, Op->getType());
8943 Operands.push_back(C);
8944 }
8945
8946 // Check to see if getSCEVAtScope actually made an improvement.
8947 if (MadeImprovement) {
8948 Constant *C = nullptr;
8949 const DataLayout &DL = getDataLayout();
8950 if (const CmpInst *CI = dyn_cast<CmpInst>(I))
8951 C = ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0],
8952 Operands[1], DL, &TLI);
8953 else if (const LoadInst *Load = dyn_cast<LoadInst>(I)) {
8954 if (!Load->isVolatile())
8955 C = ConstantFoldLoadFromConstPtr(Operands[0], Load->getType(),
8956 DL);
8957 } else
8958 C = ConstantFoldInstOperands(I, Operands, DL, &TLI);
8959 if (!C) return V;
8960 return getSCEV(C);
8961 }
8962 }
8963 }
8964
8965 // This is some other type of SCEVUnknown, just return it.
8966 return V;
8967 }
8968
8969 if (const SCEVCommutativeExpr *Comm = dyn_cast<SCEVCommutativeExpr>(V)) {
8970 // Avoid performing the look-up in the common case where the specified
8971 // expression has no loop-variant portions.
8972 for (unsigned i = 0, e = Comm->getNumOperands(); i != e; ++i) {
8973 const SCEV *OpAtScope = getSCEVAtScope(Comm->getOperand(i), L);
8974 if (OpAtScope != Comm->getOperand(i)) {
8975 // Okay, at least one of these operands is loop variant but might be
8976 // foldable. Build a new instance of the folded commutative expression.
8977 SmallVector<const SCEV *, 8> NewOps(Comm->op_begin(),
8978 Comm->op_begin()+i);
8979 NewOps.push_back(OpAtScope);
8980
8981 for (++i; i != e; ++i) {
8982 OpAtScope = getSCEVAtScope(Comm->getOperand(i), L);
8983 NewOps.push_back(OpAtScope);
8984 }
8985 if (isa<SCEVAddExpr>(Comm))
8986 return getAddExpr(NewOps, Comm->getNoWrapFlags());
8987 if (isa<SCEVMulExpr>(Comm))
8988 return getMulExpr(NewOps, Comm->getNoWrapFlags());
8989 if (isa<SCEVMinMaxExpr>(Comm))
8990 return getMinMaxExpr(Comm->getSCEVType(), NewOps);
8991 llvm_unreachable("Unknown commutative SCEV type!")__builtin_unreachable();
8992 }
8993 }
8994 // If we got here, all operands are loop invariant.
8995 return Comm;
8996 }
8997
8998 if (const SCEVUDivExpr *Div = dyn_cast<SCEVUDivExpr>(V)) {
8999 const SCEV *LHS = getSCEVAtScope(Div->getLHS(), L);
9000 const SCEV *RHS = getSCEVAtScope(Div->getRHS(), L);
9001 if (LHS == Div->getLHS() && RHS == Div->getRHS())
9002 return Div; // must be loop invariant
9003 return getUDivExpr(LHS, RHS);
9004 }
9005
9006 // If this is a loop recurrence for a loop that does not contain L, then we
9007 // are dealing with the final value computed by the loop.
9008 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V)) {
9009 // First, attempt to evaluate each operand.
9010 // Avoid performing the look-up in the common case where the specified
9011 // expression has no loop-variant portions.
9012 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) {
9013 const SCEV *OpAtScope = getSCEVAtScope(AddRec->getOperand(i), L);
9014 if (OpAtScope == AddRec->getOperand(i))
9015 continue;
9016
9017 // Okay, at least one of these operands is loop variant but might be
9018 // foldable. Build a new instance of the folded commutative expression.
9019 SmallVector<const SCEV *, 8> NewOps(AddRec->op_begin(),
9020 AddRec->op_begin()+i);
9021 NewOps.push_back(OpAtScope);
9022 for (++i; i != e; ++i)
9023 NewOps.push_back(getSCEVAtScope(AddRec->getOperand(i), L));
9024
9025 const SCEV *FoldedRec =
9026 getAddRecExpr(NewOps, AddRec->getLoop(),
9027 AddRec->getNoWrapFlags(SCEV::FlagNW));
9028 AddRec = dyn_cast<SCEVAddRecExpr>(FoldedRec);
9029 // The addrec may be folded to a nonrecurrence, for example, if the
9030 // induction variable is multiplied by zero after constant folding. Go
9031 // ahead and return the folded value.
9032 if (!AddRec)
9033 return FoldedRec;
9034 break;
9035 }
9036
9037 // If the scope is outside the addrec's loop, evaluate it by using the
9038 // loop exit value of the addrec.
9039 if (!AddRec->getLoop()->contains(L)) {
9040 // To evaluate this recurrence, we need to know how many times the AddRec
9041 // loop iterates. Compute this now.
9042 const SCEV *BackedgeTakenCount = getBackedgeTakenCount(AddRec->getLoop());
9043 if (BackedgeTakenCount == getCouldNotCompute()) return AddRec;
9044
9045 // Then, evaluate the AddRec.
9046 return AddRec->evaluateAtIteration(BackedgeTakenCount, *this);
9047 }
9048
9049 return AddRec;
9050 }
9051
9052 if (const SCEVZeroExtendExpr *Cast = dyn_cast<SCEVZeroExtendExpr>(V)) {
9053 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L);
9054 if (Op == Cast->getOperand())
9055 return Cast; // must be loop invariant
9056 return getZeroExtendExpr(Op, Cast->getType());
9057 }
9058
9059 if (const SCEVSignExtendExpr *Cast = dyn_cast<SCEVSignExtendExpr>(V)) {
9060 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L);
9061 if (Op == Cast->getOperand())
9062 return Cast; // must be loop invariant
9063 return getSignExtendExpr(Op, Cast->getType());
9064 }
9065
9066 if (const SCEVTruncateExpr *Cast = dyn_cast<SCEVTruncateExpr>(V)) {
9067 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L);
9068 if (Op == Cast->getOperand())
9069 return Cast; // must be loop invariant
9070 return getTruncateExpr(Op, Cast->getType());
9071 }
9072
9073 if (const SCEVPtrToIntExpr *Cast = dyn_cast<SCEVPtrToIntExpr>(V)) {
9074 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L);
9075 if (Op == Cast->getOperand())
9076 return Cast; // must be loop invariant
9077 return getPtrToIntExpr(Op, Cast->getType());
9078 }
9079
9080 llvm_unreachable("Unknown SCEV type!")__builtin_unreachable();
9081}
9082
9083const SCEV *ScalarEvolution::getSCEVAtScope(Value *V, const Loop *L) {
9084 return getSCEVAtScope(getSCEV(V), L);
9085}
9086
9087const SCEV *ScalarEvolution::stripInjectiveFunctions(const SCEV *S) const {
9088 if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S))
9089 return stripInjectiveFunctions(ZExt->getOperand());
9090 if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S))
9091 return stripInjectiveFunctions(SExt->getOperand());
9092 return S;
9093}
9094
9095/// Finds the minimum unsigned root of the following equation:
9096///
9097/// A * X = B (mod N)
9098///
9099/// where N = 2^BW and BW is the common bit width of A and B. The signedness of
9100/// A and B isn't important.
9101///
9102/// If the equation does not have a solution, SCEVCouldNotCompute is returned.
9103static const SCEV *SolveLinEquationWithOverflow(const APInt &A, const SCEV *B,
9104 ScalarEvolution &SE) {
9105 uint32_t BW = A.getBitWidth();
9106 assert(BW == SE.getTypeSizeInBits(B->getType()))(static_cast<void> (0));
9107 assert(A != 0 && "A must be non-zero.")(static_cast<void> (0));
9108
9109 // 1. D = gcd(A, N)
9110 //
9111 // The gcd of A and N may have only one prime factor: 2. The number of
9112 // trailing zeros in A is its multiplicity
9113 uint32_t Mult2 = A.countTrailingZeros();
9114 // D = 2^Mult2
9115
9116 // 2. Check if B is divisible by D.
9117 //
9118 // B is divisible by D if and only if the multiplicity of prime factor 2 for B
9119 // is not less than multiplicity of this prime factor for D.
9120 if (SE.GetMinTrailingZeros(B) < Mult2)
9121 return SE.getCouldNotCompute();
9122
9123 // 3. Compute I: the multiplicative inverse of (A / D) in arithmetic
9124 // modulo (N / D).
9125 //
9126 // If D == 1, (N / D) == N == 2^BW, so we need one extra bit to represent
9127 // (N / D) in general. The inverse itself always fits into BW bits, though,
9128 // so we immediately truncate it.
9129 APInt AD = A.lshr(Mult2).zext(BW + 1); // AD = A / D
9130 APInt Mod(BW + 1, 0);
9131 Mod.setBit(BW - Mult2); // Mod = N / D
9132 APInt I = AD.multiplicativeInverse(Mod).trunc(BW);
9133
9134 // 4. Compute the minimum unsigned root of the equation:
9135 // I * (B / D) mod (N / D)
9136 // To simplify the computation, we factor out the divide by D:
9137 // (I * B mod N) / D
9138 const SCEV *D = SE.getConstant(APInt::getOneBitSet(BW, Mult2));
9139 return SE.getUDivExactExpr(SE.getMulExpr(B, SE.getConstant(I)), D);
9140}
9141
9142/// For a given quadratic addrec, generate coefficients of the corresponding
9143/// quadratic equation, multiplied by a common value to ensure that they are
9144/// integers.
9145/// The returned value is a tuple { A, B, C, M, BitWidth }, where
9146/// Ax^2 + Bx + C is the quadratic function, M is the value that A, B and C
9147/// were multiplied by, and BitWidth is the bit width of the original addrec
9148/// coefficients.
9149/// This function returns None if the addrec coefficients are not compile-
9150/// time constants.
9151static Optional<std::tuple<APInt, APInt, APInt, APInt, unsigned>>
9152GetQuadraticEquation(const SCEVAddRecExpr *AddRec) {
9153 assert(AddRec->getNumOperands() == 3 && "This is not a quadratic chrec!")(static_cast<void> (0));
9154 const SCEVConstant *LC = dyn_cast<SCEVConstant>(AddRec->getOperand(0));
9155 const SCEVConstant *MC = dyn_cast<SCEVConstant>(AddRec->getOperand(1));
9156 const SCEVConstant *NC = dyn_cast<SCEVConstant>(AddRec->getOperand(2));
9157 LLVM_DEBUG(dbgs() << __func__ << ": analyzing quadratic addrec: "do { } while (false)
9158 << *AddRec << '\n')do { } while (false);
9159
9160 // We currently can only solve this if the coefficients are constants.
9161 if (!LC || !MC || !NC) {
9162 LLVM_DEBUG(dbgs() << __func__ << ": coefficients are not constant\n")do { } while (false);
9163 return None;
9164 }
9165
9166 APInt L = LC->getAPInt();
9167 APInt M = MC->getAPInt();
9168 APInt N = NC->getAPInt();
9169 assert(!N.isNullValue() && "This is not a quadratic addrec")(static_cast<void> (0));
9170
9171 unsigned BitWidth = LC->getAPInt().getBitWidth();
9172 unsigned NewWidth = BitWidth + 1;
9173 LLVM_DEBUG(dbgs() << __func__ << ": addrec coeff bw: "do { } while (false)
9174 << BitWidth << '\n')do { } while (false);
9175 // The sign-extension (as opposed to a zero-extension) here matches the
9176 // extension used in SolveQuadraticEquationWrap (with the same motivation).
9177 N = N.sext(NewWidth);
9178 M = M.sext(NewWidth);
9179 L = L.sext(NewWidth);
9180
9181 // The increments are M, M+N, M+2N, ..., so the accumulated values are
9182 // L+M, (L+M)+(M+N), (L+M)+(M+N)+(M+2N), ..., that is,
9183 // L+M, L+2M+N, L+3M+3N, ...
9184 // After n iterations the accumulated value Acc is L + nM + n(n-1)/2 N.
9185 //
9186 // The equation Acc = 0 is then
9187 // L + nM + n(n-1)/2 N = 0, or 2L + 2M n + n(n-1) N = 0.
9188 // In a quadratic form it becomes:
9189 // N n^2 + (2M-N) n + 2L = 0.
9190
9191 APInt A = N;
9192 APInt B = 2 * M - A;
9193 APInt C = 2 * L;
9194 APInt T = APInt(NewWidth, 2);
9195 LLVM_DEBUG(dbgs() << __func__ << ": equation " << A << "x^2 + " << Bdo { } while (false)
9196 << "x + " << C << ", coeff bw: " << NewWidthdo { } while (false)
9197 << ", multiplied by " << T << '\n')do { } while (false);
9198 return std::make_tuple(A, B, C, T, BitWidth);
9199}
9200
9201/// Helper function to compare optional APInts:
9202/// (a) if X and Y both exist, return min(X, Y),
9203/// (b) if neither X nor Y exist, return None,
9204/// (c) if exactly one of X and Y exists, return that value.
9205static Optional<APInt> MinOptional(Optional<APInt> X, Optional<APInt> Y) {
9206 if (X.hasValue() && Y.hasValue()) {
9207 unsigned W = std::max(X->getBitWidth(), Y->getBitWidth());
9208 APInt XW = X->sextOrSelf(W);
9209 APInt YW = Y->sextOrSelf(W);
9210 return XW.slt(YW) ? *X : *Y;
9211 }
9212 if (!X.hasValue() && !Y.hasValue())
9213 return None;
9214 return X.hasValue() ? *X : *Y;
9215}
9216
9217/// Helper function to truncate an optional APInt to a given BitWidth.
9218/// When solving addrec-related equations, it is preferable to return a value
9219/// that has the same bit width as the original addrec's coefficients. If the
9220/// solution fits in the original bit width, truncate it (except for i1).
9221/// Returning a value of a different bit width may inhibit some optimizations.
9222///
9223/// In general, a solution to a quadratic equation generated from an addrec
9224/// may require BW+1 bits, where BW is the bit width of the addrec's
9225/// coefficients. The reason is that the coefficients of the quadratic
9226/// equation are BW+1 bits wide (to avoid truncation when converting from
9227/// the addrec to the equation).
9228static Optional<APInt> TruncIfPossible(Optional<APInt> X, unsigned BitWidth) {
9229 if (!X.hasValue())
9230 return None;
9231 unsigned W = X->getBitWidth();
9232 if (BitWidth > 1 && BitWidth < W && X->isIntN(BitWidth))
9233 return X->trunc(BitWidth);
9234 return X;
9235}
9236
9237/// Let c(n) be the value of the quadratic chrec {L,+,M,+,N} after n
9238/// iterations. The values L, M, N are assumed to be signed, and they
9239/// should all have the same bit widths.
9240/// Find the least n >= 0 such that c(n) = 0 in the arithmetic modulo 2^BW,
9241/// where BW is the bit width of the addrec's coefficients.
9242/// If the calculated value is a BW-bit integer (for BW > 1), it will be
9243/// returned as such, otherwise the bit width of the returned value may
9244/// be greater than BW.
9245///
9246/// This function returns None if
9247/// (a) the addrec coefficients are not constant, or
9248/// (b) SolveQuadraticEquationWrap was unable to find a solution. For cases
9249/// like x^2 = 5, no integer solutions exist, in other cases an integer
9250/// solution may exist, but SolveQuadraticEquationWrap may fail to find it.
9251static Optional<APInt>
9252SolveQuadraticAddRecExact(const SCEVAddRecExpr *AddRec, ScalarEvolution &SE) {
9253 APInt A, B, C, M;
9254 unsigned BitWidth;
9255 auto T = GetQuadraticEquation(AddRec);
9256 if (!T.hasValue())
9257 return None;
9258
9259 std::tie(A, B, C, M, BitWidth) = *T;
9260 LLVM_DEBUG(dbgs() << __func__ << ": solving for unsigned overflow\n")do { } while (false);
9261 Optional<APInt> X = APIntOps::SolveQuadraticEquationWrap(A, B, C, BitWidth+1);
9262 if (!X.hasValue())
9263 return None;
9264
9265 ConstantInt *CX = ConstantInt::get(SE.getContext(), *X);
9266 ConstantInt *V = EvaluateConstantChrecAtConstant(AddRec, CX, SE);
9267 if (!V->isZero())
9268 return None;
9269
9270 return TruncIfPossible(X, BitWidth);
9271}
9272
9273/// Let c(n) be the value of the quadratic chrec {0,+,M,+,N} after n
9274/// iterations. The values M, N are assumed to be signed, and they
9275/// should all have the same bit widths.
9276/// Find the least n such that c(n) does not belong to the given range,
9277/// while c(n-1) does.
9278///
9279/// This function returns None if
9280/// (a) the addrec coefficients are not constant, or
9281/// (b) SolveQuadraticEquationWrap was unable to find a solution for the
9282/// bounds of the range.
9283static Optional<APInt>
9284SolveQuadraticAddRecRange(const SCEVAddRecExpr *AddRec,
9285 const ConstantRange &Range, ScalarEvolution &SE) {
9286 assert(AddRec->getOperand(0)->isZero() &&(static_cast<void> (0))
9287 "Starting value of addrec should be 0")(static_cast<void> (0));
9288 LLVM_DEBUG(dbgs() << __func__ << ": solving boundary crossing for range "do { } while (false)
9289 << Range << ", addrec " << *AddRec << '\n')do { } while (false);
9290 // This case is handled in getNumIterationsInRange. Here we can assume that
9291 // we start in the range.
9292 assert(Range.contains(APInt(SE.getTypeSizeInBits(AddRec->getType()), 0)) &&(static_cast<void> (0))
9293 "Addrec's initial value should be in range")(static_cast<void> (0));
9294
9295 APInt A, B, C, M;
9296 unsigned BitWidth;
9297 auto T = GetQuadraticEquation(AddRec);
9298 if (!T.hasValue())
9299 return None;
9300
9301 // Be careful about the return value: there can be two reasons for not
9302 // returning an actual number. First, if no solutions to the equations
9303 // were found, and second, if the solutions don't leave the given range.
9304 // The first case means that the actual solution is "unknown", the second
9305 // means that it's known, but not valid. If the solution is unknown, we
9306 // cannot make any conclusions.
9307 // Return a pair: the optional solution and a flag indicating if the
9308 // solution was found.
9309 auto SolveForBoundary = [&](APInt Bound) -> std::pair<Optional<APInt>,bool> {
9310 // Solve for signed overflow and unsigned overflow, pick the lower
9311 // solution.
9312 LLVM_DEBUG(dbgs() << "SolveQuadraticAddRecRange: checking boundary "do { } while (false)
9313 << Bound << " (before multiplying by " << M << ")\n")do { } while (false);
9314 Bound *= M; // The quadratic equation multiplier.
9315
9316 Optional<APInt> SO = None;
9317 if (BitWidth > 1) {
9318 LLVM_DEBUG(dbgs() << "SolveQuadraticAddRecRange: solving for "do { } while (false)
9319 "signed overflow\n")do { } while (false);
9320 SO = APIntOps::SolveQuadraticEquationWrap(A, B, -Bound, BitWidth);
9321 }
9322 LLVM_DEBUG(dbgs() << "SolveQuadraticAddRecRange: solving for "do { } while (false)
9323 "unsigned overflow\n")do { } while (false);
9324 Optional<APInt> UO = APIntOps::SolveQuadraticEquationWrap(A, B, -Bound,
9325 BitWidth+1);
9326
9327 auto LeavesRange = [&] (const APInt &X) {
9328 ConstantInt *C0 = ConstantInt::get(SE.getContext(), X);
9329 ConstantInt *V0 = EvaluateConstantChrecAtConstant(AddRec, C0, SE);
9330 if (Range.contains(V0->getValue()))
9331 return false;
9332 // X should be at least 1, so X-1 is non-negative.
9333 ConstantInt *C1 = ConstantInt::get(SE.getContext(), X-1);
9334 ConstantInt *V1 = EvaluateConstantChrecAtConstant(AddRec, C1, SE);
9335 if (Range.contains(V1->getValue()))
9336 return true;
9337 return false;
9338 };
9339
9340 // If SolveQuadraticEquationWrap returns None, it means that there can
9341 // be a solution, but the function failed to find it. We cannot treat it
9342 // as "no solution".
9343 if (!SO.hasValue() || !UO.hasValue())
9344 return { None, false };
9345
9346 // Check the smaller value first to see if it leaves the range.
9347 // At this point, both SO and UO must have values.
9348 Optional<APInt> Min = MinOptional(SO, UO);
9349 if (LeavesRange(*Min))
9350 return { Min, true };
9351 Optional<APInt> Max = Min == SO ? UO : SO;
9352 if (LeavesRange(*Max))
9353 return { Max, true };
9354
9355 // Solutions were found, but were eliminated, hence the "true".
9356 return { None, true };
9357 };
9358
9359 std::tie(A, B, C, M, BitWidth) = *T;
9360 // Lower bound is inclusive, subtract 1 to represent the exiting value.
9361 APInt Lower = Range.getLower().sextOrSelf(A.getBitWidth()) - 1;
9362 APInt Upper = Range.getUpper().sextOrSelf(A.getBitWidth());
9363 auto SL = SolveForBoundary(Lower);
9364 auto SU = SolveForBoundary(Upper);
9365 // If any of the solutions was unknown, no meaninigful conclusions can
9366 // be made.
9367 if (!SL.second || !SU.second)
9368 return None;
9369
9370 // Claim: The correct solution is not some value between Min and Max.
9371 //
9372 // Justification: Assuming that Min and Max are different values, one of
9373 // them is when the first signed overflow happens, the other is when the
9374 // first unsigned overflow happens. Crossing the range boundary is only
9375 // possible via an overflow (treating 0 as a special case of it, modeling
9376 // an overflow as crossing k*2^W for some k).
9377 //
9378 // The interesting case here is when Min was eliminated as an invalid
9379 // solution, but Max was not. The argument is that if there was another
9380 // overflow between Min and Max, it would also have been eliminated if
9381 // it was considered.
9382 //
9383 // For a given boundary, it is possible to have two overflows of the same
9384 // type (signed/unsigned) without having the other type in between: this
9385 // can happen when the vertex of the parabola is between the iterations
9386 // corresponding to the overflows. This is only possible when the two
9387 // overflows cross k*2^W for the same k. In such case, if the second one
9388 // left the range (and was the first one to do so), the first overflow
9389 // would have to enter the range, which would mean that either we had left
9390 // the range before or that we started outside of it. Both of these cases
9391 // are contradictions.
9392 //
9393 // Claim: In the case where SolveForBoundary returns None, the correct
9394 // solution is not some value between the Max for this boundary and the
9395 // Min of the other boundary.
9396 //
9397 // Justification: Assume that we had such Max_A and Min_B corresponding
9398 // to range boundaries A and B and such that Max_A < Min_B. If there was
9399 // a solution between Max_A and Min_B, it would have to be caused by an
9400 // overflow corresponding to either A or B. It cannot correspond to B,
9401 // since Min_B is the first occurrence of such an overflow. If it
9402 // corresponded to A, it would have to be either a signed or an unsigned
9403 // overflow that is larger than both eliminated overflows for A. But
9404 // between the eliminated overflows and this overflow, the values would
9405 // cover the entire value space, thus crossing the other boundary, which
9406 // is a contradiction.
9407
9408 return TruncIfPossible(MinOptional(SL.first, SU.first), BitWidth);
9409}
9410
9411ScalarEvolution::ExitLimit
9412ScalarEvolution::howFarToZero(const SCEV *V, const Loop *L, bool ControlsExit,
9413 bool AllowPredicates) {
9414
9415 // This is only used for loops with a "x != y" exit test. The exit condition
9416 // is now expressed as a single expression, V = x-y. So the exit test is
9417 // effectively V != 0. We know and take advantage of the fact that this
9418 // expression only being used in a comparison by zero context.
9419
9420 SmallPtrSet<const SCEVPredicate *, 4> Predicates;
9421 // If the value is a constant
9422 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) {
9423 // If the value is already zero, the branch will execute zero times.
9424 if (C->getValue()->isZero()) return C;
9425 return getCouldNotCompute(); // Otherwise it will loop infinitely.
9426 }
9427
9428 const SCEVAddRecExpr *AddRec =
9429 dyn_cast<SCEVAddRecExpr>(stripInjectiveFunctions(V));
9430
9431 if (!AddRec && AllowPredicates)
9432 // Try to make this an AddRec using runtime tests, in the first X
9433 // iterations of this loop, where X is the SCEV expression found by the
9434 // algorithm below.
9435 AddRec = convertSCEVToAddRecWithPredicates(V, L, Predicates);
9436
9437 if (!AddRec || AddRec->getLoop() != L)
9438 return getCouldNotCompute();
9439
9440 // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of
9441 // the quadratic equation to solve it.
9442 if (AddRec->isQuadratic() && AddRec->getType()->isIntegerTy()) {
9443 // We can only use this value if the chrec ends up with an exact zero
9444 // value at this index. When solving for "X*X != 5", for example, we
9445 // should not accept a root of 2.
9446 if (auto S = SolveQuadraticAddRecExact(AddRec, *this)) {
9447 const auto *R = cast<SCEVConstant>(getConstant(S.getValue()));
9448 return ExitLimit(R, R, false, Predicates);
9449 }
9450 return getCouldNotCompute();
9451 }
9452
9453 // Otherwise we can only handle this if it is affine.
9454 if (!AddRec->isAffine())
9455 return getCouldNotCompute();
9456
9457 // If this is an affine expression, the execution count of this branch is
9458 // the minimum unsigned root of the following equation:
9459 //
9460 // Start + Step*N = 0 (mod 2^BW)
9461 //
9462 // equivalent to:
9463 //
9464 // Step*N = -Start (mod 2^BW)
9465 //
9466 // where BW is the common bit width of Start and Step.
9467
9468 // Get the initial value for the loop.
9469 const SCEV *Start = getSCEVAtScope(AddRec->getStart(), L->getParentLoop());
9470 const SCEV *Step = getSCEVAtScope(AddRec->getOperand(1), L->getParentLoop());
9471
9472 // For now we handle only constant steps.
9473 //
9474 // TODO: Handle a nonconstant Step given AddRec<NUW>. If the
9475 // AddRec is NUW, then (in an unsigned sense) it cannot be counting up to wrap
9476 // to 0, it must be counting down to equal 0. Consequently, N = Start / -Step.
9477 // We have not yet seen any such cases.
9478 const SCEVConstant *StepC = dyn_cast<SCEVConstant>(Step);
9479 if (!StepC || StepC->getValue()->isZero())
9480 return getCouldNotCompute();
9481
9482 // For positive steps (counting up until unsigned overflow):
9483 // N = -Start/Step (as unsigned)
9484 // For negative steps (counting down to zero):
9485 // N = Start/-Step
9486 // First compute the unsigned distance from zero in the direction of Step.
9487 bool CountDown = StepC->getAPInt().isNegative();
9488 const SCEV *Distance = CountDown ? Start : getNegativeSCEV(Start);
9489
9490 // Handle unitary steps, which cannot wraparound.
9491 // 1*N = -Start; -1*N = Start (mod 2^BW), so:
9492 // N = Distance (as unsigned)
9493 if (StepC->getValue()->isOne() || StepC->getValue()->isMinusOne()) {
9494 APInt MaxBECount = getUnsignedRangeMax(applyLoopGuards(Distance, L));
9495 APInt MaxBECountBase = getUnsignedRangeMax(Distance);
9496 if (MaxBECountBase.ult(MaxBECount))
9497 MaxBECount = MaxBECountBase;
9498
9499 // When a loop like "for (int i = 0; i != n; ++i) { /* body */ }" is rotated,
9500 // we end up with a loop whose backedge-taken count is n - 1. Detect this
9501 // case, and see if we can improve the bound.
9502 //
9503 // Explicitly handling this here is necessary because getUnsignedRange
9504 // isn't context-sensitive; it doesn't know that we only care about the
9505 // range inside the loop.
9506 const SCEV *Zero = getZero(Distance->getType());
9507 const SCEV *One = getOne(Distance->getType());
9508 const SCEV *DistancePlusOne = getAddExpr(Distance, One);
9509 if (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_NE, DistancePlusOne, Zero)) {
9510 // If Distance + 1 doesn't overflow, we can compute the maximum distance
9511 // as "unsigned_max(Distance + 1) - 1".
9512 ConstantRange CR = getUnsignedRange(DistancePlusOne);
9513 MaxBECount = APIntOps::umin(MaxBECount, CR.getUnsignedMax() - 1);
9514 }
9515 return ExitLimit(Distance, getConstant(MaxBECount), false, Predicates);
9516 }
9517
9518 // If the condition controls loop exit (the loop exits only if the expression
9519 // is true) and the addition is no-wrap we can use unsigned divide to
9520 // compute the backedge count. In this case, the step may not divide the
9521 // distance, but we don't care because if the condition is "missed" the loop
9522 // will have undefined behavior due to wrapping.
9523 if (ControlsExit && AddRec->hasNoSelfWrap() &&
9524 loopHasNoAbnormalExits(AddRec->getLoop())) {
9525 const SCEV *Exact =
9526 getUDivExpr(Distance, CountDown ? getNegativeSCEV(Step) : Step);
9527 const SCEV *Max = getCouldNotCompute();
9528 if (Exact != getCouldNotCompute()) {
9529 APInt MaxInt = getUnsignedRangeMax(applyLoopGuards(Exact, L));
9530 APInt BaseMaxInt = getUnsignedRangeMax(Exact);
9531 if (BaseMaxInt.ult(MaxInt))
9532 Max = getConstant(BaseMaxInt);
9533 else
9534 Max = getConstant(MaxInt);
9535 }
9536 return ExitLimit(Exact, Max, false, Predicates);
9537 }
9538
9539 // Solve the general equation.
9540 const SCEV *E = SolveLinEquationWithOverflow(StepC->getAPInt(),
9541 getNegativeSCEV(Start), *this);
9542 const SCEV *M = E == getCouldNotCompute()
9543 ? E
9544 : getConstant(getUnsignedRangeMax(E));
9545 return ExitLimit(E, M, false, Predicates);
9546}
9547
9548ScalarEvolution::ExitLimit
9549ScalarEvolution::howFarToNonZero(const SCEV *V, const Loop *L) {
9550 // Loops that look like: while (X == 0) are very strange indeed. We don't
9551 // handle them yet except for the trivial case. This could be expanded in the
9552 // future as needed.
9553
9554 // If the value is a constant, check to see if it is known to be non-zero
9555 // already. If so, the backedge will execute zero times.
9556 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) {
9557 if (!C->getValue()->isZero())
9558 return getZero(C->getType());
9559 return getCouldNotCompute(); // Otherwise it will loop infinitely.
9560 }
9561
9562 // We could implement others, but I really doubt anyone writes loops like
9563 // this, and if they did, they would already be constant folded.
9564 return getCouldNotCompute();
9565}
9566
9567std::pair<const BasicBlock *, const BasicBlock *>
9568ScalarEvolution::getPredecessorWithUniqueSuccessorForBB(const BasicBlock *BB)
9569 const {
9570 // If the block has a unique predecessor, then there is no path from the
9571 // predecessor to the block that does not go through the direct edge
9572 // from the predecessor to the block.
9573 if (const BasicBlock *Pred = BB->getSinglePredecessor())
9574 return {Pred, BB};
9575
9576 // A loop's header is defined to be a block that dominates the loop.
9577 // If the header has a unique predecessor outside the loop, it must be
9578 // a block that has exactly one successor that can reach the loop.
9579 if (const Loop *L = LI.getLoopFor(BB))
9580 return {L->getLoopPredecessor(), L->getHeader()};
9581
9582 return {nullptr, nullptr};
9583}
9584
9585/// SCEV structural equivalence is usually sufficient for testing whether two
9586/// expressions are equal, however for the purposes of looking for a condition
9587/// guarding a loop, it can be useful to be a little more general, since a
9588/// front-end may have replicated the controlling expression.
9589static bool HasSameValue(const SCEV *A, const SCEV *B) {
9590 // Quick check to see if they are the same SCEV.
9591 if (A == B) return true;
9592
9593 auto ComputesEqualValues = [](const Instruction *A, const Instruction *B) {
9594 // Not all instructions that are "identical" compute the same value. For
9595 // instance, two distinct alloca instructions allocating the same type are
9596 // identical and do not read memory; but compute distinct values.
9597 return A->isIdenticalTo(B) && (isa<BinaryOperator>(A) || isa<GetElementPtrInst>(A));
9598 };
9599
9600 // Otherwise, if they're both SCEVUnknown, it's possible that they hold
9601 // two different instructions with the same value. Check for this case.
9602 if (const SCEVUnknown *AU = dyn_cast<SCEVUnknown>(A))
9603 if (const SCEVUnknown *BU = dyn_cast<SCEVUnknown>(B))
9604 if (const Instruction *AI = dyn_cast<Instruction>(AU->getValue()))
9605 if (const Instruction *BI = dyn_cast<Instruction>(BU->getValue()))
9606 if (ComputesEqualValues(AI, BI))
9607 return true;
9608
9609 // Otherwise assume they may have a different value.
9610 return false;
9611}
9612
9613bool ScalarEvolution::SimplifyICmpOperands(ICmpInst::Predicate &Pred,
9614 const SCEV *&LHS, const SCEV *&RHS,
9615 unsigned Depth) {
9616 bool Changed = false;
9617 // Simplifies ICMP to trivial true or false by turning it into '0 == 0' or
9618 // '0 != 0'.
9619 auto TrivialCase = [&](bool TriviallyTrue) {
9620 LHS = RHS = getConstant(ConstantInt::getFalse(getContext()));
9621 Pred = TriviallyTrue ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE;
9622 return true;
9623 };
9624 // If we hit the max recursion limit bail out.
9625 if (Depth >= 3)
9626 return false;
9627
9628 // Canonicalize a constant to the right side.
9629 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) {
9630 // Check for both operands constant.
9631 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) {
9632 if (ConstantExpr::getICmp(Pred,
9633 LHSC->getValue(),
9634 RHSC->getValue())->isNullValue())
9635 return TrivialCase(false);
9636 else
9637 return TrivialCase(true);
9638 }
9639 // Otherwise swap the operands to put the constant on the right.
9640 std::swap(LHS, RHS);
9641 Pred = ICmpInst::getSwappedPredicate(Pred);
9642 Changed = true;
9643 }
9644
9645 // If we're comparing an addrec with a value which is loop-invariant in the
9646 // addrec's loop, put the addrec on the left. Also make a dominance check,
9647 // as both operands could be addrecs loop-invariant in each other's loop.
9648 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(RHS)) {
9649 const Loop *L = AR->getLoop();
9650 if (isLoopInvariant(LHS, L) && properlyDominates(LHS, L->getHeader())) {
9651 std::swap(LHS, RHS);
9652 Pred = ICmpInst::getSwappedPredicate(Pred);
9653 Changed = true;
9654 }
9655 }
9656
9657 // If there's a constant operand, canonicalize comparisons with boundary
9658 // cases, and canonicalize *-or-equal comparisons to regular comparisons.
9659 if (const SCEVConstant *RC = dyn_cast<SCEVConstant>(RHS)) {
9660 const APInt &RA = RC->getAPInt();
9661
9662 bool SimplifiedByConstantRange = false;
9663
9664 if (!ICmpInst::isEquality(Pred)) {
9665 ConstantRange ExactCR = ConstantRange::makeExactICmpRegion(Pred, RA);
9666 if (ExactCR.isFullSet())
9667 return TrivialCase(true);
9668 else if (ExactCR.isEmptySet())
9669 return TrivialCase(false);
9670
9671 APInt NewRHS;
9672 CmpInst::Predicate NewPred;
9673 if (ExactCR.getEquivalentICmp(NewPred, NewRHS) &&
9674 ICmpInst::isEquality(NewPred)) {
9675 // We were able to convert an inequality to an equality.
9676 Pred = NewPred;
9677 RHS = getConstant(NewRHS);
9678 Changed = SimplifiedByConstantRange = true;
9679 }
9680 }
9681
9682 if (!SimplifiedByConstantRange) {
9683 switch (Pred) {
9684 default:
9685 break;
9686 case ICmpInst::ICMP_EQ:
9687 case ICmpInst::ICMP_NE:
9688 // Fold ((-1) * %a) + %b == 0 (equivalent to %b-%a == 0) into %a == %b.
9689 if (!RA)
9690 if (const SCEVAddExpr *AE = dyn_cast<SCEVAddExpr>(LHS))
9691 if (const SCEVMulExpr *ME =
9692 dyn_cast<SCEVMulExpr>(AE->getOperand(0)))
9693 if (AE->getNumOperands() == 2 && ME->getNumOperands() == 2 &&
9694 ME->getOperand(0)->isAllOnesValue()) {
9695 RHS = AE->getOperand(1);
9696 LHS = ME->getOperand(1);
9697 Changed = true;
9698 }
9699 break;
9700
9701
9702 // The "Should have been caught earlier!" messages refer to the fact
9703 // that the ExactCR.isFullSet() or ExactCR.isEmptySet() check above
9704 // should have fired on the corresponding cases, and canonicalized the
9705 // check to trivial case.
9706
9707 case ICmpInst::ICMP_UGE:
9708 assert(!RA.isMinValue() && "Should have been caught earlier!")(static_cast<void> (0));
9709 Pred = ICmpInst::ICMP_UGT;
9710 RHS = getConstant(RA - 1);
9711 Changed = true;
9712 break;
9713 case ICmpInst::ICMP_ULE:
9714 assert(!RA.isMaxValue() && "Should have been caught earlier!")(static_cast<void> (0));
9715 Pred = ICmpInst::ICMP_ULT;
9716 RHS = getConstant(RA + 1);
9717 Changed = true;
9718 break;
9719 case ICmpInst::ICMP_SGE:
9720 assert(!RA.isMinSignedValue() && "Should have been caught earlier!")(static_cast<void> (0));
9721 Pred = ICmpInst::ICMP_SGT;
9722 RHS = getConstant(RA - 1);
9723 Changed = true;
9724 break;
9725 case ICmpInst::ICMP_SLE:
9726 assert(!RA.isMaxSignedValue() && "Should have been caught earlier!")(static_cast<void> (0));
9727 Pred = ICmpInst::ICMP_SLT;
9728 RHS = getConstant(RA + 1);
9729 Changed = true;
9730 break;
9731 }
9732 }
9733 }
9734
9735 // Check for obvious equality.
9736 if (HasSameValue(LHS, RHS)) {
9737 if (ICmpInst::isTrueWhenEqual(Pred))
9738 return TrivialCase(true);
9739 if (ICmpInst::isFalseWhenEqual(Pred))
9740 return TrivialCase(false);
9741 }
9742
9743 // If possible, canonicalize GE/LE comparisons to GT/LT comparisons, by
9744 // adding or subtracting 1 from one of the operands.
9745 switch (Pred) {
9746 case ICmpInst::ICMP_SLE:
9747 if (!getSignedRangeMax(RHS).isMaxSignedValue()) {
9748 RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS,
9749 SCEV::FlagNSW);
9750 Pred = ICmpInst::ICMP_SLT;
9751 Changed = true;
9752 } else if (!getSignedRangeMin(LHS).isMinSignedValue()) {
9753 LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS,
9754 SCEV::FlagNSW);
9755 Pred = ICmpInst::ICMP_SLT;
9756 Changed = true;
9757 }
9758 break;
9759 case ICmpInst::ICMP_SGE:
9760 if (!getSignedRangeMin(RHS).isMinSignedValue()) {
9761 RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS,
9762 SCEV::FlagNSW);
9763 Pred = ICmpInst::ICMP_SGT;
9764 Changed = true;
9765 } else if (!getSignedRangeMax(LHS).isMaxSignedValue()) {
9766 LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS,
9767 SCEV::FlagNSW);
9768 Pred = ICmpInst::ICMP_SGT;
9769 Changed = true;
9770 }
9771 break;
9772 case ICmpInst::ICMP_ULE:
9773 if (!getUnsignedRangeMax(RHS).isMaxValue()) {
9774 RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS,
9775 SCEV::FlagNUW);
9776 Pred = ICmpInst::ICMP_ULT;
9777 Changed = true;
9778 } else if (!getUnsignedRangeMin(LHS).isMinValue()) {
9779 LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS);
9780 Pred = ICmpInst::ICMP_ULT;
9781 Changed = true;
9782 }
9783 break;
9784 case ICmpInst::ICMP_UGE:
9785 if (!getUnsignedRangeMin(RHS).isMinValue()) {
9786 RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS);
9787 Pred = ICmpInst::ICMP_UGT;
9788 Changed = true;
9789 } else if (!getUnsignedRangeMax(LHS).isMaxValue()) {
9790 LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS,
9791 SCEV::FlagNUW);
9792 Pred = ICmpInst::ICMP_UGT;
9793 Changed = true;
9794 }
9795 break;
9796 default:
9797 break;
9798 }
9799
9800 // TODO: More simplifications are possible here.
9801
9802 // Recursively simplify until we either hit a recursion limit or nothing
9803 // changes.
9804 if (Changed)
9805 return SimplifyICmpOperands(Pred, LHS, RHS, Depth+1);
9806
9807 return Changed;
9808}
9809
9810bool ScalarEvolution::isKnownNegative(const SCEV *S) {
9811 return getSignedRangeMax(S).isNegative();
9812}
9813
9814bool ScalarEvolution::isKnownPositive(const SCEV *S) {
9815 return getSignedRangeMin(S).isStrictlyPositive();
9816}
9817
9818bool ScalarEvolution::isKnownNonNegative(const SCEV *S) {
9819 return !getSignedRangeMin(S).isNegative();
9820}
9821
9822bool ScalarEvolution::isKnownNonPositive(const SCEV *S) {
9823 return !getSignedRangeMax(S).isStrictlyPositive();
9824}
9825
9826bool ScalarEvolution::isKnownNonZero(const SCEV *S) {
9827 return getUnsignedRangeMin(S) != 0;
9828}
9829
9830std::pair<const SCEV *, const SCEV *>
9831ScalarEvolution::SplitIntoInitAndPostInc(const Loop *L, const SCEV *S) {
9832 // Compute SCEV on entry of loop L.
9833 const SCEV *Start = SCEVInitRewriter::rewrite(S, L, *this);
9834 if (Start == getCouldNotCompute())
9835 return { Start, Start };
9836 // Compute post increment SCEV for loop L.
9837 const SCEV *PostInc = SCEVPostIncRewriter::rewrite(S, L, *this);
9838 assert(PostInc != getCouldNotCompute() && "Unexpected could not compute")(static_cast<void> (0));
9839 return { Start, PostInc };
9840}
9841
9842bool ScalarEvolution::isKnownViaInduction(ICmpInst::Predicate Pred,
9843 const SCEV *LHS, const SCEV *RHS) {
9844 // First collect all loops.
9845 SmallPtrSet<const Loop *, 8> LoopsUsed;
9846 getUsedLoops(LHS, LoopsUsed);
9847 getUsedLoops(RHS, LoopsUsed);
9848
9849 if (LoopsUsed.empty())
9850 return false;
9851
9852 // Domination relationship must be a linear order on collected loops.
9853#ifndef NDEBUG1
9854 for (auto *L1 : LoopsUsed)
9855 for (auto *L2 : LoopsUsed)
9856 assert((DT.dominates(L1->getHeader(), L2->getHeader()) ||(static_cast<void> (0))
9857 DT.dominates(L2->getHeader(), L1->getHeader())) &&(static_cast<void> (0))
9858 "Domination relationship is not a linear order")(static_cast<void> (0));
9859#endif
9860
9861 const Loop *MDL =
9862 *std::max_element(LoopsUsed.begin(), LoopsUsed.end(),
9863 [&](const Loop *L1, const Loop *L2) {
9864 return DT.properlyDominates(L1->getHeader(), L2->getHeader());
9865 });
9866
9867 // Get init and post increment value for LHS.
9868 auto SplitLHS = SplitIntoInitAndPostInc(MDL, LHS);
9869 // if LHS contains unknown non-invariant SCEV then bail out.
9870 if (SplitLHS.first == getCouldNotCompute())
9871 return false;
9872 assert (SplitLHS.second != getCouldNotCompute() && "Unexpected CNC")(static_cast<void> (0));
9873 // Get init and post increment value for RHS.
9874 auto SplitRHS = SplitIntoInitAndPostInc(MDL, RHS);
9875 // if RHS contains unknown non-invariant SCEV then bail out.
9876 if (SplitRHS.first == getCouldNotCompute())
9877 return false;
9878 assert (SplitRHS.second != getCouldNotCompute() && "Unexpected CNC")(static_cast<void> (0));
9879 // It is possible that init SCEV contains an invariant load but it does
9880 // not dominate MDL and is not available at MDL loop entry, so we should
9881 // check it here.
9882 if (!isAvailableAtLoopEntry(SplitLHS.first, MDL) ||
9883 !isAvailableAtLoopEntry(SplitRHS.first, MDL))
9884 return false;
9885
9886 // It seems backedge guard check is faster than entry one so in some cases
9887 // it can speed up whole estimation by short circuit
9888 return isLoopBackedgeGuardedByCond(MDL, Pred, SplitLHS.second,
9889 SplitRHS.second) &&
9890 isLoopEntryGuardedByCond(MDL, Pred, SplitLHS.first, SplitRHS.first);
9891}
9892
9893bool ScalarEvolution::isKnownPredicate(ICmpInst::Predicate Pred,
9894 const SCEV *LHS, const SCEV *RHS) {
9895 // Canonicalize the inputs first.
9896 (void)SimplifyICmpOperands(Pred, LHS, RHS);
9897
9898 if (isKnownViaInduction(Pred, LHS, RHS))
9899 return true;
9900
9901 if (isKnownPredicateViaSplitting(Pred, LHS, RHS))
9902 return true;
9903
9904 // Otherwise see what can be done with some simple reasoning.
9905 return isKnownViaNonRecursiveReasoning(Pred, LHS, RHS);
9906}
9907
9908Optional<bool> ScalarEvolution::evaluatePredicate(ICmpInst::Predicate Pred,
9909 const SCEV *LHS,
9910 const SCEV *RHS) {
9911 if (isKnownPredicate(Pred, LHS, RHS))
9912 return true;
9913 else if (isKnownPredicate(ICmpInst::getInversePredicate(Pred), LHS, RHS))
9914 return false;
9915 return None;
9916}
9917
9918bool ScalarEvolution::isKnownPredicateAt(ICmpInst::Predicate Pred,
9919 const SCEV *LHS, const SCEV *RHS,
9920 const Instruction *Context) {
9921 // TODO: Analyze guards and assumes from Context's block.
9922 return isKnownPredicate(Pred, LHS, RHS) ||
9923 isBasicBlockEntryGuardedByCond(Context->getParent(), Pred, LHS, RHS);
9924}
9925
9926Optional<bool>
9927ScalarEvolution::evaluatePredicateAt(ICmpInst::Predicate Pred, const SCEV *LHS,
9928 const SCEV *RHS,
9929 const Instruction *Context) {
9930 Optional<bool> KnownWithoutContext = evaluatePredicate(Pred, LHS, RHS);
9931 if (KnownWithoutContext)
1
Calling 'Optional::operator bool'
9
Returning from 'Optional::operator bool'
10
Taking false branch
9932 return KnownWithoutContext;
9933
9934 if (isBasicBlockEntryGuardedByCond(Context->getParent(), Pred, LHS, RHS))
11
Passing value via 1st parameter 'BB'
12
Calling 'ScalarEvolution::isBasicBlockEntryGuardedByCond'
9935 return true;
9936 else if (isBasicBlockEntryGuardedByCond(Context->getParent(),
9937 ICmpInst::getInversePredicate(Pred),
9938 LHS, RHS))
9939 return false;
9940 return None;
9941}
9942
9943bool ScalarEvolution::isKnownOnEveryIteration(ICmpInst::Predicate Pred,
9944 const SCEVAddRecExpr *LHS,
9945 const SCEV *RHS) {
9946 const Loop *L = LHS->getLoop();
9947 return isLoopEntryGuardedByCond(L, Pred, LHS->getStart(), RHS) &&
9948 isLoopBackedgeGuardedByCond(L, Pred, LHS->getPostIncExpr(*this), RHS);
9949}
9950
9951Optional<ScalarEvolution::MonotonicPredicateType>
9952ScalarEvolution::getMonotonicPredicateType(const SCEVAddRecExpr *LHS,
9953 ICmpInst::Predicate Pred) {
9954 auto Result = getMonotonicPredicateTypeImpl(LHS, Pred);
9955
9956#ifndef NDEBUG1
9957 // Verify an invariant: inverting the predicate should turn a monotonically
9958 // increasing change to a monotonically decreasing one, and vice versa.
9959 if (Result) {
9960 auto ResultSwapped =
9961 getMonotonicPredicateTypeImpl(LHS, ICmpInst::getSwappedPredicate(Pred));
9962
9963 assert(ResultSwapped.hasValue() && "should be able to analyze both!")(static_cast<void> (0));
9964 assert(ResultSwapped.getValue() != Result.getValue() &&(static_cast<void> (0))
9965 "monotonicity should flip as we flip the predicate")(static_cast<void> (0));
9966 }
9967#endif
9968
9969 return Result;
9970}
9971
9972Optional<ScalarEvolution::MonotonicPredicateType>
9973ScalarEvolution::getMonotonicPredicateTypeImpl(const SCEVAddRecExpr *LHS,
9974 ICmpInst::Predicate Pred) {
9975 // A zero step value for LHS means the induction variable is essentially a
9976 // loop invariant value. We don't really depend on the predicate actually
9977 // flipping from false to true (for increasing predicates, and the other way
9978 // around for decreasing predicates), all we care about is that *if* the
9979 // predicate changes then it only changes from false to true.
9980 //
9981 // A zero step value in itself is not very useful, but there may be places
9982 // where SCEV can prove X >= 0 but not prove X > 0, so it is helpful to be
9983 // as general as possible.
9984
9985 // Only handle LE/LT/GE/GT predicates.
9986 if (!ICmpInst::isRelational(Pred))
9987 return None;
9988
9989 bool IsGreater = ICmpInst::isGE(Pred) || ICmpInst::isGT(Pred);
9990 assert((IsGreater || ICmpInst::isLE(Pred) || ICmpInst::isLT(Pred)) &&(static_cast<void> (0))
9991 "Should be greater or less!")(static_cast<void> (0));
9992
9993 // Check that AR does not wrap.
9994 if (ICmpInst::isUnsigned(Pred)) {
9995 if (!LHS->hasNoUnsignedWrap())
9996 return None;
9997 return IsGreater ? MonotonicallyIncreasing : MonotonicallyDecreasing;
9998 } else {
9999 assert(ICmpInst::isSigned(Pred) &&(static_cast<void> (0))
10000 "Relational predicate is either signed or unsigned!")(static_cast<void> (0));
10001 if (!LHS->hasNoSignedWrap())
10002 return None;
10003
10004 const SCEV *Step = LHS->getStepRecurrence(*this);
10005
10006 if (isKnownNonNegative(Step))
10007 return IsGreater ? MonotonicallyIncreasing : MonotonicallyDecreasing;
10008
10009 if (isKnownNonPositive(Step))
10010 return !IsGreater ? MonotonicallyIncreasing : MonotonicallyDecreasing;
10011
10012 return None;
10013 }
10014}
10015
10016Optional<ScalarEvolution::LoopInvariantPredicate>
10017ScalarEvolution::getLoopInvariantPredicate(ICmpInst::Predicate Pred,
10018 const SCEV *LHS, const SCEV *RHS,
10019 const Loop *L) {
10020
10021 // If there is a loop-invariant, force it into the RHS, otherwise bail out.
10022 if (!isLoopInvariant(RHS, L)) {
10023 if (!isLoopInvariant(LHS, L))
10024 return None;
10025
10026 std::swap(LHS, RHS);
10027 Pred = ICmpInst::getSwappedPredicate(Pred);
10028 }
10029
10030 const SCEVAddRecExpr *ArLHS = dyn_cast<SCEVAddRecExpr>(LHS);
10031 if (!ArLHS || ArLHS->getLoop() != L)
10032 return None;
10033
10034 auto MonotonicType = getMonotonicPredicateType(ArLHS, Pred);
10035 if (!MonotonicType)
10036 return None;
10037 // If the predicate "ArLHS `Pred` RHS" monotonically increases from false to
10038 // true as the loop iterates, and the backedge is control dependent on
10039 // "ArLHS `Pred` RHS" == true then we can reason as follows:
10040 //
10041 // * if the predicate was false in the first iteration then the predicate
10042 // is never evaluated again, since the loop exits without taking the
10043 // backedge.
10044 // * if the predicate was true in the first iteration then it will
10045 // continue to be true for all future iterations since it is
10046 // monotonically increasing.
10047 //
10048 // For both the above possibilities, we can replace the loop varying
10049 // predicate with its value on the first iteration of the loop (which is
10050 // loop invariant).
10051 //
10052 // A similar reasoning applies for a monotonically decreasing predicate, by
10053 // replacing true with false and false with true in the above two bullets.
10054 bool Increasing = *MonotonicType == ScalarEvolution::MonotonicallyIncreasing;
10055 auto P = Increasing ? Pred : ICmpInst::getInversePredicate(Pred);
10056
10057 if (!isLoopBackedgeGuardedByCond(L, P, LHS, RHS))
10058 return None;
10059
10060 return ScalarEvolution::LoopInvariantPredicate(Pred, ArLHS->getStart(), RHS);
10061}
10062
10063Optional<ScalarEvolution::LoopInvariantPredicate>
10064ScalarEvolution::getLoopInvariantExitCondDuringFirstIterations(
10065 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, const Loop *L,
10066 const Instruction *Context, const SCEV *MaxIter) {
10067 // Try to prove the following set of facts:
10068 // - The predicate is monotonic in the iteration space.
10069 // - If the check does not fail on the 1st iteration:
10070 // - No overflow will happen during first MaxIter iterations;
10071 // - It will not fail on the MaxIter'th iteration.
10072 // If the check does fail on the 1st iteration, we leave the loop and no
10073 // other checks matter.
10074
10075 // If there is a loop-invariant, force it into the RHS, otherwise bail out.
10076 if (!isLoopInvariant(RHS, L)) {
10077 if (!isLoopInvariant(LHS, L))
10078 return None;
10079
10080 std::swap(LHS, RHS);
10081 Pred = ICmpInst::getSwappedPredicate(Pred);
10082 }
10083
10084 auto *AR = dyn_cast<SCEVAddRecExpr>(LHS);
10085 if (!AR || AR->getLoop() != L)
10086 return None;
10087
10088 // The predicate must be relational (i.e. <, <=, >=, >).
10089 if (!ICmpInst::isRelational(Pred))
10090 return None;
10091
10092 // TODO: Support steps other than +/- 1.
10093 const SCEV *Step = AR->getStepRecurrence(*this);
10094 auto *One = getOne(Step->getType());
10095 auto *MinusOne = getNegativeSCEV(One);
10096 if (Step != One && Step != MinusOne)
10097 return None;
10098
10099 // Type mismatch here means that MaxIter is potentially larger than max
10100 // unsigned value in start type, which mean we cannot prove no wrap for the
10101 // indvar.
10102 if (AR->getType() != MaxIter->getType())
10103 return None;
10104
10105 // Value of IV on suggested last iteration.
10106 const SCEV *Last = AR->evaluateAtIteration(MaxIter, *this);
10107 // Does it still meet the requirement?
10108 if (!isLoopBackedgeGuardedByCond(L, Pred, Last, RHS))
10109 return None;
10110 // Because step is +/- 1 and MaxIter has same type as Start (i.e. it does
10111 // not exceed max unsigned value of this type), this effectively proves
10112 // that there is no wrap during the iteration. To prove that there is no
10113 // signed/unsigned wrap, we need to check that
10114 // Start <= Last for step = 1 or Start >= Last for step = -1.
10115 ICmpInst::Predicate NoOverflowPred =
10116 CmpInst::isSigned(Pred) ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE;
10117 if (Step == MinusOne)
10118 NoOverflowPred = CmpInst::getSwappedPredicate(NoOverflowPred);
10119 const SCEV *Start = AR->getStart();
10120 if (!isKnownPredicateAt(NoOverflowPred, Start, Last, Context))
10121 return None;
10122
10123 // Everything is fine.
10124 return ScalarEvolution::LoopInvariantPredicate(Pred, Start, RHS);
10125}
10126
10127bool ScalarEvolution::isKnownPredicateViaConstantRanges(
10128 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS) {
10129 if (HasSameValue(LHS, RHS))
10130 return ICmpInst::isTrueWhenEqual(Pred);
10131
10132 // This code is split out from isKnownPredicate because it is called from
10133 // within isLoopEntryGuardedByCond.
10134
10135 auto CheckRanges = [&](const ConstantRange &RangeLHS,
10136 const ConstantRange &RangeRHS) {
10137 return RangeLHS.icmp(Pred, RangeRHS);
10138 };
10139
10140 // The check at the top of the function catches the case where the values are
10141 // known to be equal.
10142 if (Pred == CmpInst::ICMP_EQ)
10143 return false;
10144
10145 if (Pred == CmpInst::ICMP_NE) {
10146 if (CheckRanges(getSignedRange(LHS), getSignedRange(RHS)) ||
10147 CheckRanges(getUnsignedRange(LHS), getUnsignedRange(RHS)))
10148 return true;
10149 auto *Diff = getMinusSCEV(LHS, RHS);
10150 return !isa<SCEVCouldNotCompute>(Diff) && isKnownNonZero(Diff);
10151 }
10152
10153 if (CmpInst::isSigned(Pred))
10154 return CheckRanges(getSignedRange(LHS), getSignedRange(RHS));
10155
10156 return CheckRanges(getUnsignedRange(LHS), getUnsignedRange(RHS));
10157}
10158
10159bool ScalarEvolution::isKnownPredicateViaNoOverflow(ICmpInst::Predicate Pred,
10160 const SCEV *LHS,
10161 const SCEV *RHS) {
10162 // Match X to (A + C1)<ExpectedFlags> and Y to (A + C2)<ExpectedFlags>, where
10163 // C1 and C2 are constant integers. If either X or Y are not add expressions,
10164 // consider them as X + 0 and Y + 0 respectively. C1 and C2 are returned via
10165 // OutC1 and OutC2.
10166 auto MatchBinaryAddToConst = [this](const SCEV *X, const SCEV *Y,
10167 APInt &OutC1, APInt &OutC2,
10168 SCEV::NoWrapFlags ExpectedFlags) {
10169 const SCEV *XNonConstOp, *XConstOp;
10170 const SCEV *YNonConstOp, *YConstOp;
10171 SCEV::NoWrapFlags XFlagsPresent;
10172 SCEV::NoWrapFlags YFlagsPresent;
10173
10174 if (!splitBinaryAdd(X, XConstOp, XNonConstOp, XFlagsPresent)) {
10175 XConstOp = getZero(X->getType());
10176 XNonConstOp = X;
10177 XFlagsPresent = ExpectedFlags;
10178 }
10179 if (!isa<SCEVConstant>(XConstOp) ||
10180 (XFlagsPresent & ExpectedFlags) != ExpectedFlags)
10181 return false;
10182
10183 if (!splitBinaryAdd(Y, YConstOp, YNonConstOp, YFlagsPresent)) {
10184 YConstOp = getZero(Y->getType());
10185 YNonConstOp = Y;
10186 YFlagsPresent = ExpectedFlags;
10187 }
10188
10189 if (!isa<SCEVConstant>(YConstOp) ||
10190 (YFlagsPresent & ExpectedFlags) != ExpectedFlags)
10191 return false;
10192
10193 if (YNonConstOp != XNonConstOp)
10194 return false;
10195
10196 OutC1 = cast<SCEVConstant>(XConstOp)->getAPInt();
10197 OutC2 = cast<SCEVConstant>(YConstOp)->getAPInt();
10198
10199 return true;
10200 };
10201
10202 APInt C1;
10203 APInt C2;
10204
10205 switch (Pred) {
10206 default:
10207 break;
10208
10209 case ICmpInst::ICMP_SGE:
10210 std::swap(LHS, RHS);
10211 LLVM_FALLTHROUGH[[gnu::fallthrough]];
10212 case ICmpInst::ICMP_SLE:
10213 // (X + C1)<nsw> s<= (X + C2)<nsw> if C1 s<= C2.
10214 if (MatchBinaryAddToConst(LHS, RHS, C1, C2, SCEV::FlagNSW) && C1.sle(C2))
10215 return true;
10216
10217 break;
10218
10219 case ICmpInst::ICMP_SGT:
10220 std::swap(LHS, RHS);
10221 LLVM_FALLTHROUGH[[gnu::fallthrough]];
10222 case ICmpInst::ICMP_SLT:
10223 // (X + C1)<nsw> s< (X + C2)<nsw> if C1 s< C2.
10224 if (MatchBinaryAddToConst(LHS, RHS, C1, C2, SCEV::FlagNSW) && C1.slt(C2))
10225 return true;
10226
10227 break;
10228
10229 case ICmpInst::ICMP_UGE:
10230 std::swap(LHS, RHS);
10231 LLVM_FALLTHROUGH[[gnu::fallthrough]];
10232 case ICmpInst::ICMP_ULE:
10233 // (X + C1)<nuw> u<= (X + C2)<nuw> for C1 u<= C2.
10234 if (MatchBinaryAddToConst(RHS, LHS, C2, C1, SCEV::FlagNUW) && C1.ule(C2))
10235 return true;
10236
10237 break;
10238
10239 case ICmpInst::ICMP_UGT:
10240 std::swap(LHS, RHS);
10241 LLVM_FALLTHROUGH[[gnu::fallthrough]];
10242 case ICmpInst::ICMP_ULT:
10243 // (X + C1)<nuw> u< (X + C2)<nuw> if C1 u< C2.
10244 if (MatchBinaryAddToConst(RHS, LHS, C2, C1, SCEV::FlagNUW) && C1.ult(C2))
10245 return true;
10246 break;
10247 }
10248
10249 return false;
10250}
10251
10252bool ScalarEvolution::isKnownPredicateViaSplitting(ICmpInst::Predicate Pred,
10253 const SCEV *LHS,
10254 const SCEV *RHS) {
10255 if (Pred != ICmpInst::ICMP_ULT || ProvingSplitPredicate)
10256 return false;
10257
10258 // Allowing arbitrary number of activations of isKnownPredicateViaSplitting on
10259 // the stack can result in exponential time complexity.
10260 SaveAndRestore<bool> Restore(ProvingSplitPredicate, true);
10261
10262 // If L >= 0 then I `ult` L <=> I >= 0 && I `slt` L
10263 //
10264 // To prove L >= 0 we use isKnownNonNegative whereas to prove I >= 0 we use
10265 // isKnownPredicate. isKnownPredicate is more powerful, but also more
10266 // expensive; and using isKnownNonNegative(RHS) is sufficient for most of the
10267 // interesting cases seen in practice. We can consider "upgrading" L >= 0 to
10268 // use isKnownPredicate later if needed.
10269 return isKnownNonNegative(RHS) &&
10270 isKnownPredicate(CmpInst::ICMP_SGE, LHS, getZero(LHS->getType())) &&
10271 isKnownPredicate(CmpInst::ICMP_SLT, LHS, RHS);
10272}
10273
10274bool ScalarEvolution::isImpliedViaGuard(const BasicBlock *BB,
10275 ICmpInst::Predicate Pred,
10276 const SCEV *LHS, const SCEV *RHS) {
10277 // No need to even try if we know the module has no guards.
10278 if (!HasGuards)
10279 return false;
10280
10281 return any_of(*BB, [&](const Instruction &I) {
10282 using namespace llvm::PatternMatch;
10283
10284 Value *Condition;
10285 return match(&I, m_Intrinsic<Intrinsic::experimental_guard>(
10286 m_Value(Condition))) &&
10287 isImpliedCond(Pred, LHS, RHS, Condition, false);
10288 });
10289}
10290
10291/// isLoopBackedgeGuardedByCond - Test whether the backedge of the loop is
10292/// protected by a conditional between LHS and RHS. This is used to
10293/// to eliminate casts.
10294bool
10295ScalarEvolution::isLoopBackedgeGuardedByCond(const Loop *L,
10296 ICmpInst::Predicate Pred,
10297 const SCEV *LHS, const SCEV *RHS) {
10298 // Interpret a null as meaning no loop, where there is obviously no guard
10299 // (interprocedural conditions notwithstanding).
10300 if (!L) return true;
10301
10302 if (VerifyIR)
10303 assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs()) &&(static_cast<void> (0))
10304 "This cannot be done on broken IR!")(static_cast<void> (0));
10305
10306
10307 if (isKnownViaNonRecursiveReasoning(Pred, LHS, RHS))
10308 return true;
10309
10310 BasicBlock *Latch = L->getLoopLatch();
10311 if (!Latch)
10312 return false;
10313
10314 BranchInst *LoopContinuePredicate =
10315 dyn_cast<BranchInst>(Latch->getTerminator());
10316 if (LoopContinuePredicate && LoopContinuePredicate->isConditional() &&
10317 isImpliedCond(Pred, LHS, RHS,
10318 LoopContinuePredicate->getCondition(),
10319 LoopContinuePredicate->getSuccessor(0) != L->getHeader()))
10320 return true;
10321
10322 // We don't want more than one activation of the following loops on the stack
10323 // -- that can lead to O(n!) time complexity.
10324 if (WalkingBEDominatingConds)
10325 return false;
10326
10327 SaveAndRestore<bool> ClearOnExit(WalkingBEDominatingConds, true);
10328
10329 // See if we can exploit a trip count to prove the predicate.
10330 const auto &BETakenInfo = getBackedgeTakenInfo(L);
10331 const SCEV *LatchBECount = BETakenInfo.getExact(Latch, this);
10332 if (LatchBECount != getCouldNotCompute()) {
10333 // We know that Latch branches back to the loop header exactly
10334 // LatchBECount times. This means the backdege condition at Latch is
10335 // equivalent to "{0,+,1} u< LatchBECount".
10336 Type *Ty = LatchBECount->getType();
10337 auto NoWrapFlags = SCEV::NoWrapFlags(SCEV::FlagNUW | SCEV::FlagNW);
10338 const SCEV *LoopCounter =
10339 getAddRecExpr(getZero(Ty), getOne(Ty), L, NoWrapFlags);
10340 if (isImpliedCond(Pred, LHS, RHS, ICmpInst::ICMP_ULT, LoopCounter,
10341 LatchBECount))
10342 return true;
10343 }
10344
10345 // Check conditions due to any @llvm.assume intrinsics.
10346 for (auto &AssumeVH : AC.assumptions()) {
10347 if (!AssumeVH)
10348 continue;
10349 auto *CI = cast<CallInst>(AssumeVH);
10350 if (!DT.dominates(CI, Latch->getTerminator()))
10351 continue;
10352
10353 if (isImpliedCond(Pred, LHS, RHS, CI->getArgOperand(0), false))
10354 return true;
10355 }
10356
10357 // If the loop is not reachable from the entry block, we risk running into an
10358 // infinite loop as we walk up into the dom tree. These loops do not matter
10359 // anyway, so we just return a conservative answer when we see them.
10360 if (!DT.isReachableFromEntry(L->getHeader()))
10361 return false;
10362
10363 if (isImpliedViaGuard(Latch, Pred, LHS, RHS))
10364 return true;
10365
10366 for (DomTreeNode *DTN = DT[Latch], *HeaderDTN = DT[L->getHeader()];
10367 DTN != HeaderDTN; DTN = DTN->getIDom()) {
10368 assert(DTN && "should reach the loop header before reaching the root!")(static_cast<void> (0));
10369
10370 BasicBlock *BB = DTN->getBlock();
10371 if (isImpliedViaGuard(BB, Pred, LHS, RHS))
10372 return true;
10373
10374 BasicBlock *PBB = BB->getSinglePredecessor();
10375 if (!PBB)
10376 continue;
10377
10378 BranchInst *ContinuePredicate = dyn_cast<BranchInst>(PBB->getTerminator());
10379 if (!ContinuePredicate || !ContinuePredicate->isConditional())
10380 continue;
10381
10382 Value *Condition = ContinuePredicate->getCondition();
10383
10384 // If we have an edge `E` within the loop body that dominates the only
10385 // latch, the condition guarding `E` also guards the backedge. This
10386 // reasoning works only for loops with a single latch.
10387
10388 BasicBlockEdge DominatingEdge(PBB, BB);
10389 if (DominatingEdge.isSingleEdge()) {
10390 // We're constructively (and conservatively) enumerating edges within the
10391 // loop body that dominate the latch. The dominator tree better agree
10392 // with us on this:
10393 assert(DT.dominates(DominatingEdge, Latch) && "should be!")(static_cast<void> (0));
10394
10395 if (isImpliedCond(Pred, LHS, RHS, Condition,
10396 BB != ContinuePredicate->getSuccessor(0)))
10397 return true;
10398 }
10399 }
10400
10401 return false;
10402}
10403
10404bool ScalarEvolution::isBasicBlockEntryGuardedByCond(const BasicBlock *BB,
10405 ICmpInst::Predicate Pred,
10406 const SCEV *LHS,
10407 const SCEV *RHS) {
10408 if (VerifyIR)
13
Assuming the condition is false
14
Taking false branch
10409 assert(!verifyFunction(*BB->getParent(), &dbgs()) &&(static_cast<void> (0))
10410 "This cannot be done on broken IR!")(static_cast<void> (0));
10411
10412 // If we cannot prove strict comparison (e.g. a > b), maybe we can prove
10413 // the facts (a >= b && a != b) separately. A typical situation is when the
10414 // non-strict comparison is known from ranges and non-equality is known from
10415 // dominating predicates. If we are proving strict comparison, we always try
10416 // to prove non-equality and non-strict comparison separately.
10417 auto NonStrictPredicate = ICmpInst::getNonStrictPredicate(Pred);
10418 const bool ProvingStrictComparison = (Pred != NonStrictPredicate);
15
Assuming 'Pred' is equal to 'NonStrictPredicate'
10419 bool ProvedNonStrictComparison = false;
10420 bool ProvedNonEquality = false;
10421
10422 auto SplitAndProve =
10423 [&](std::function<bool(ICmpInst::Predicate)> Fn) -> bool {
10424 if (!ProvedNonStrictComparison)
10425 ProvedNonStrictComparison = Fn(NonStrictPredicate);
10426 if (!ProvedNonEquality)
10427 ProvedNonEquality = Fn(ICmpInst::ICMP_NE);
10428 if (ProvedNonStrictComparison && ProvedNonEquality)
10429 return true;
10430 return false;
10431 };
10432
10433 if (ProvingStrictComparison
15.1
'ProvingStrictComparison' is false
15.1
'ProvingStrictComparison' is false
) {
16
Taking false branch
10434 auto ProofFn = [&](ICmpInst::Predicate P) {
10435 return isKnownViaNonRecursiveReasoning(P, LHS, RHS);
10436 };
10437 if (SplitAndProve(ProofFn))
10438 return true;
10439 }
10440
10441 // Try to prove (Pred, LHS, RHS) using isImpliedViaGuard.
10442 auto ProveViaGuard = [&](const BasicBlock *Block) {
10443 if (isImpliedViaGuard(Block, Pred, LHS, RHS))
10444 return true;
10445 if (ProvingStrictComparison) {
10446 auto ProofFn = [&](ICmpInst::Predicate P) {
10447 return isImpliedViaGuard(Block, P, LHS, RHS);
10448 };
10449 if (SplitAndProve(ProofFn))
10450 return true;
10451 }
10452 return false;
10453 };
10454
10455 // Try to prove (Pred, LHS, RHS) using isImpliedCond.
10456 auto ProveViaCond = [&](const Value *Condition, bool Inverse) {
10457 const Instruction *Context = &BB->front();
26
Called C++ object pointer is null
10458 if (isImpliedCond(Pred, LHS, RHS, Condition, Inverse, Context))
10459 return true;
10460 if (ProvingStrictComparison) {
10461 auto ProofFn = [&](ICmpInst::Predicate P) {
10462 return isImpliedCond(P, LHS, RHS, Condition, Inverse, Context);
10463 };
10464 if (SplitAndProve(ProofFn))
10465 return true;
10466 }
10467 return false;
10468 };
10469
10470 // Starting at the block's predecessor, climb up the predecessor chain, as long
10471 // as there are predecessors that can be found that have unique successors
10472 // leading to the original block.
10473 const Loop *ContainingLoop = LI.getLoopFor(BB);
10474 const BasicBlock *PredBB;
10475 if (ContainingLoop && ContainingLoop->getHeader() == BB)
17
Assuming 'ContainingLoop' is non-null
18
Assuming the condition is true
19
Taking true branch
10476 PredBB = ContainingLoop->getLoopPredecessor();
10477 else
10478 PredBB = BB->getSinglePredecessor();
10479 for (std::pair<const BasicBlock *, const BasicBlock *> Pair(PredBB, BB);
20
Loop condition is true. Entering loop body
10480 Pair.first; Pair = getPredecessorWithUniqueSuccessorForBB(Pair.first)) {
10481 if (ProveViaGuard(Pair.first))
21
Taking false branch
10482 return true;
10483
10484 const BranchInst *LoopEntryPredicate =
10485 dyn_cast<BranchInst>(Pair.first->getTerminator());
22
Assuming the object is a 'BranchInst'
10486 if (!LoopEntryPredicate
22.1
'LoopEntryPredicate' is non-null
22.1
'LoopEntryPredicate' is non-null
||
23
Taking false branch
10487 LoopEntryPredicate->isUnconditional())
10488 continue;
10489
10490 if (ProveViaCond(LoopEntryPredicate->getCondition(),
25
Calling 'operator()'
10491 LoopEntryPredicate->getSuccessor(0) != Pair.second))
24
Assuming pointer value is null
10492 return true;
10493 }
10494
10495 // Check conditions due to any @llvm.assume intrinsics.
10496 for (auto &AssumeVH : AC.assumptions()) {
10497 if (!AssumeVH)
10498 continue;
10499 auto *CI = cast<CallInst>(AssumeVH);
10500 if (!DT.dominates(CI, BB))
10501 continue;
10502
10503 if (ProveViaCond(CI->getArgOperand(0), false))
10504 return true;
10505 }
10506
10507 return false;
10508}
10509
10510bool ScalarEvolution::isLoopEntryGuardedByCond(const Loop *L,
10511 ICmpInst::Predicate Pred,
10512 const SCEV *LHS,
10513 const SCEV *RHS) {
10514 // Interpret a null as meaning no loop, where there is obviously no guard
10515 // (interprocedural conditions notwithstanding).
10516 if (!L)
10517 return false;
10518
10519 // Both LHS and RHS must be available at loop entry.
10520 assert(isAvailableAtLoopEntry(LHS, L) &&(static_cast<void> (0))
10521 "LHS is not available at Loop Entry")(static_cast<void> (0));
10522 assert(isAvailableAtLoopEntry(RHS, L) &&(static_cast<void> (0))
10523 "RHS is not available at Loop Entry")(static_cast<void> (0));
10524
10525 if (isKnownViaNonRecursiveReasoning(Pred, LHS, RHS))
10526 return true;
10527
10528 return isBasicBlockEntryGuardedByCond(L->getHeader(), Pred, LHS, RHS);
10529}
10530
10531bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred, const SCEV *LHS,
10532 const SCEV *RHS,
10533 const Value *FoundCondValue, bool Inverse,
10534 const Instruction *Context) {
10535 // False conditions implies anything. Do not bother analyzing it further.
10536 if (FoundCondValue ==
10537 ConstantInt::getBool(FoundCondValue->getContext(), Inverse))
10538 return true;
10539
10540 if (!PendingLoopPredicates.insert(FoundCondValue).second)
10541 return false;
10542
10543 auto ClearOnExit =
10544 make_scope_exit([&]() { PendingLoopPredicates.erase(FoundCondValue); });
10545
10546 // Recursively handle And and Or conditions.
10547 const Value *Op0, *Op1;
10548 if (match(FoundCondValue, m_LogicalAnd(m_Value(Op0), m_Value(Op1)))) {
10549 if (!Inverse)
10550 return isImpliedCond(Pred, LHS, RHS, Op0, Inverse, Context) ||
10551 isImpliedCond(Pred, LHS, RHS, Op1, Inverse, Context);
10552 } else if (match(FoundCondValue, m_LogicalOr(m_Value(Op0), m_Value(Op1)))) {
10553 if (Inverse)
10554 return isImpliedCond(Pred, LHS, RHS, Op0, Inverse, Context) ||
10555 isImpliedCond(Pred, LHS, RHS, Op1, Inverse, Context);
10556 }
10557
10558 const ICmpInst *ICI = dyn_cast<ICmpInst>(FoundCondValue);
10559 if (!ICI) return false;
10560
10561 // Now that we found a conditional branch that dominates the loop or controls
10562 // the loop latch. Check to see if it is the comparison we are looking for.
10563 ICmpInst::Predicate FoundPred;
10564 if (Inverse)
10565 FoundPred = ICI->getInversePredicate();
10566 else
10567 FoundPred = ICI->getPredicate();
10568
10569 const SCEV *FoundLHS = getSCEV(ICI->getOperand(0));
10570 const SCEV *FoundRHS = getSCEV(ICI->getOperand(1));
10571
10572 return isImpliedCond(Pred, LHS, RHS, FoundPred, FoundLHS, FoundRHS, Context);
10573}
10574
10575bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred, const SCEV *LHS,
10576 const SCEV *RHS,
10577 ICmpInst::Predicate FoundPred,
10578 const SCEV *FoundLHS, const SCEV *FoundRHS,
10579 const Instruction *Context) {
10580 // Balance the types.
10581 if (getTypeSizeInBits(LHS->getType()) <
10582 getTypeSizeInBits(FoundLHS->getType())) {
10583 // For unsigned and equality predicates, try to prove that both found
10584 // operands fit into narrow unsigned range. If so, try to prove facts in
10585 // narrow types.
10586 if (!CmpInst::isSigned(FoundPred) && !FoundLHS->getType()->isPointerTy()) {
10587 auto *NarrowType = LHS->getType();
10588 auto *WideType = FoundLHS->getType();
10589 auto BitWidth = getTypeSizeInBits(NarrowType);
10590 const SCEV *MaxValue = getZeroExtendExpr(
10591 getConstant(APInt::getMaxValue(BitWidth)), WideType);
10592 if (isKnownPredicate(ICmpInst::ICMP_ULE, FoundLHS, MaxValue) &&
10593 isKnownPredicate(ICmpInst::ICMP_ULE, FoundRHS, MaxValue)) {
10594 const SCEV *TruncFoundLHS = getTruncateExpr(FoundLHS, NarrowType);
10595 const SCEV *TruncFoundRHS = getTruncateExpr(FoundRHS, NarrowType);
10596 if (isImpliedCondBalancedTypes(Pred, LHS, RHS, FoundPred, TruncFoundLHS,
10597 TruncFoundRHS, Context))
10598 return true;
10599 }
10600 }
10601
10602 if (LHS->getType()->isPointerTy())
10603 return false;
10604 if (CmpInst::isSigned(Pred)) {
10605 LHS = getSignExtendExpr(LHS, FoundLHS->getType());
10606 RHS = getSignExtendExpr(RHS, FoundLHS->getType());
10607 } else {
10608 LHS = getZeroExtendExpr(LHS, FoundLHS->getType());
10609 RHS = getZeroExtendExpr(RHS, FoundLHS->getType());
10610 }
10611 } else if (getTypeSizeInBits(LHS->getType()) >
10612 getTypeSizeInBits(FoundLHS->getType())) {
10613 if (FoundLHS->getType()->isPointerTy())
10614 return false;
10615 if (CmpInst::isSigned(FoundPred)) {
10616 FoundLHS = getSignExtendExpr(FoundLHS, LHS->getType());
10617 FoundRHS = getSignExtendExpr(FoundRHS, LHS->getType());
10618 } else {
10619 FoundLHS = getZeroExtendExpr(FoundLHS, LHS->getType());
10620 FoundRHS = getZeroExtendExpr(FoundRHS, LHS->getType());
10621 }
10622 }
10623 return isImpliedCondBalancedTypes(Pred, LHS, RHS, FoundPred, FoundLHS,
10624 FoundRHS, Context);
10625}
10626
10627bool ScalarEvolution::isImpliedCondBalancedTypes(
10628 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS,
10629 ICmpInst::Predicate FoundPred, const SCEV *FoundLHS, const SCEV *FoundRHS,
10630 const Instruction *Context) {
10631 assert(getTypeSizeInBits(LHS->getType()) ==(static_cast<void> (0))
10632 getTypeSizeInBits(FoundLHS->getType()) &&(static_cast<void> (0))
10633 "Types should be balanced!")(static_cast<void> (0));
10634 // Canonicalize the query to match the way instcombine will have
10635 // canonicalized the comparison.
10636 if (SimplifyICmpOperands(Pred, LHS, RHS))
10637 if (LHS == RHS)
10638 return CmpInst::isTrueWhenEqual(Pred);
10639 if (SimplifyICmpOperands(FoundPred, FoundLHS, FoundRHS))
10640 if (FoundLHS == FoundRHS)
10641 return CmpInst::isFalseWhenEqual(FoundPred);
10642
10643 // Check to see if we can make the LHS or RHS match.
10644 if (LHS == FoundRHS || RHS == FoundLHS) {
10645 if (isa<SCEVConstant>(RHS)) {
10646 std::swap(FoundLHS, FoundRHS);
10647 FoundPred = ICmpInst::getSwappedPredicate(FoundPred);
10648 } else {
10649 std::swap(LHS, RHS);
10650 Pred = ICmpInst::getSwappedPredicate(Pred);
10651 }
10652 }
10653
10654 // Check whether the found predicate is the same as the desired predicate.
10655 if (FoundPred == Pred)
10656 return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS, Context);
10657
10658 // Check whether swapping the found predicate makes it the same as the
10659 // desired predicate.
10660 if (ICmpInst::getSwappedPredicate(FoundPred) == Pred) {
10661 // We can write the implication
10662 // 0. LHS Pred RHS <- FoundLHS SwapPred FoundRHS
10663 // using one of the following ways:
10664 // 1. LHS Pred RHS <- FoundRHS Pred FoundLHS
10665 // 2. RHS SwapPred LHS <- FoundLHS SwapPred FoundRHS
10666 // 3. LHS Pred RHS <- ~FoundLHS Pred ~FoundRHS
10667 // 4. ~LHS SwapPred ~RHS <- FoundLHS SwapPred FoundRHS
10668 // Forms 1. and 2. require swapping the operands of one condition. Don't
10669 // do this if it would break canonical constant/addrec ordering.
10670 if (!isa<SCEVConstant>(RHS) && !isa<SCEVAddRecExpr>(LHS))
10671 return isImpliedCondOperands(FoundPred, RHS, LHS, FoundLHS, FoundRHS,
10672 Context);
10673 if (!isa<SCEVConstant>(FoundRHS) && !isa<SCEVAddRecExpr>(FoundLHS))
10674 return isImpliedCondOperands(Pred, LHS, RHS, FoundRHS, FoundLHS, Context);
10675
10676 // Don't try to getNotSCEV pointers.
10677 if (LHS->getType()->isPointerTy() || FoundLHS->getType()->isPointerTy())
10678 return false;
10679
10680 // There's no clear preference between forms 3. and 4., try both.
10681 return isImpliedCondOperands(FoundPred, getNotSCEV(LHS), getNotSCEV(RHS),
10682 FoundLHS, FoundRHS, Context) ||
10683 isImpliedCondOperands(Pred, LHS, RHS, getNotSCEV(FoundLHS),
10684 getNotSCEV(FoundRHS), Context);
10685 }
10686
10687 // Unsigned comparison is the same as signed comparison when both the operands
10688 // are non-negative.
10689 if (CmpInst::isUnsigned(FoundPred) &&
10690 CmpInst::getSignedPredicate(FoundPred) == Pred &&
10691 isKnownNonNegative(FoundLHS) && isKnownNonNegative(FoundRHS))
10692 return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS, Context);
10693
10694 // Check if we can make progress by sharpening ranges.
10695 if (FoundPred == ICmpInst::ICMP_NE &&
10696 (isa<SCEVConstant>(FoundLHS) || isa<SCEVConstant>(FoundRHS))) {
10697
10698 const SCEVConstant *C = nullptr;
10699 const SCEV *V = nullptr;
10700
10701 if (isa<SCEVConstant>(FoundLHS)) {
10702 C = cast<SCEVConstant>(FoundLHS);
10703 V = FoundRHS;
10704 } else {
10705 C = cast<SCEVConstant>(FoundRHS);
10706 V = FoundLHS;
10707 }
10708
10709 // The guarding predicate tells us that C != V. If the known range
10710 // of V is [C, t), we can sharpen the range to [C + 1, t). The
10711 // range we consider has to correspond to same signedness as the
10712 // predicate we're interested in folding.
10713
10714 APInt Min = ICmpInst::isSigned(Pred) ?
10715 getSignedRangeMin(V) : getUnsignedRangeMin(V);
10716
10717 if (Min == C->getAPInt()) {
10718 // Given (V >= Min && V != Min) we conclude V >= (Min + 1).
10719 // This is true even if (Min + 1) wraps around -- in case of
10720 // wraparound, (Min + 1) < Min, so (V >= Min => V >= (Min + 1)).
10721
10722 APInt SharperMin = Min + 1;
10723
10724 switch (Pred) {
10725 case ICmpInst::ICMP_SGE:
10726 case ICmpInst::ICMP_UGE:
10727 // We know V `Pred` SharperMin. If this implies LHS `Pred`
10728 // RHS, we're done.
10729 if (isImpliedCondOperands(Pred, LHS, RHS, V, getConstant(SharperMin),
10730 Context))
10731 return true;
10732 LLVM_FALLTHROUGH[[gnu::fallthrough]];
10733
10734 case ICmpInst::ICMP_SGT:
10735 case ICmpInst::ICMP_UGT:
10736 // We know from the range information that (V `Pred` Min ||
10737 // V == Min). We know from the guarding condition that !(V
10738 // == Min). This gives us
10739 //
10740 // V `Pred` Min || V == Min && !(V == Min)
10741 // => V `Pred` Min
10742 //
10743 // If V `Pred` Min implies LHS `Pred` RHS, we're done.
10744
10745 if (isImpliedCondOperands(Pred, LHS, RHS, V, getConstant(Min),
10746 Context))
10747 return true;
10748 break;
10749
10750 // `LHS < RHS` and `LHS <= RHS` are handled in the same way as `RHS > LHS` and `RHS >= LHS` respectively.
10751 case ICmpInst::ICMP_SLE:
10752 case ICmpInst::ICMP_ULE:
10753 if (isImpliedCondOperands(CmpInst::getSwappedPredicate(Pred), RHS,
10754 LHS, V, getConstant(SharperMin), Context))
10755 return true;
10756 LLVM_FALLTHROUGH[[gnu::fallthrough]];
10757
10758 case ICmpInst::ICMP_SLT:
10759 case ICmpInst::ICMP_ULT:
10760 if (isImpliedCondOperands(CmpInst::getSwappedPredicate(Pred), RHS,
10761 LHS, V, getConstant(Min), Context))
10762 return true;
10763 break;
10764
10765 default:
10766 // No change
10767 break;
10768 }
10769 }
10770 }
10771
10772 // Check whether the actual condition is beyond sufficient.
10773 if (FoundPred == ICmpInst::ICMP_EQ)
10774 if (ICmpInst::isTrueWhenEqual(Pred))
10775 if (isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS, Context))
10776 return true;
10777 if (Pred == ICmpInst::ICMP_NE)
10778 if (!ICmpInst::isTrueWhenEqual(FoundPred))
10779 if (isImpliedCondOperands(FoundPred, LHS, RHS, FoundLHS, FoundRHS,
10780 Context))
10781 return true;
10782
10783 // Otherwise assume the worst.
10784 return false;
10785}
10786
10787bool ScalarEvolution::splitBinaryAdd(const SCEV *Expr,
10788 const SCEV *&L, const SCEV *&R,
10789 SCEV::NoWrapFlags &Flags) {
10790 const auto *AE = dyn_cast<SCEVAddExpr>(Expr);
10791 if (!AE || AE->getNumOperands() != 2)
10792 return false;
10793
10794 L = AE->getOperand(0);
10795 R = AE->getOperand(1);
10796 Flags = AE->getNoWrapFlags();
10797 return true;
10798}
10799
10800Optional<APInt> ScalarEvolution::computeConstantDifference(const SCEV *More,
10801 const SCEV *Less) {
10802 // We avoid subtracting expressions here because this function is usually
10803 // fairly deep in the call stack (i.e. is called many times).
10804
10805 // X - X = 0.
10806 if (More == Less)
10807 return APInt(getTypeSizeInBits(More->getType()), 0);
10808
10809 if (isa<SCEVAddRecExpr>(Less) && isa<SCEVAddRecExpr>(More)) {
10810 const auto *LAR = cast<SCEVAddRecExpr>(Less);
10811 const auto *MAR = cast<SCEVAddRecExpr>(More);
10812
10813 if (LAR->getLoop() != MAR->getLoop())
10814 return None;
10815
10816 // We look at affine expressions only; not for correctness but to keep
10817 // getStepRecurrence cheap.
10818 if (!LAR->isAffine() || !MAR->isAffine())
10819 return None;
10820
10821 if (LAR->getStepRecurrence(*this) != MAR->getStepRecurrence(*this))
10822 return None;
10823
10824 Less = LAR->getStart();
10825 More = MAR->getStart();
10826
10827 // fall through
10828 }
10829
10830 if (isa<SCEVConstant>(Less) && isa<SCEVConstant>(More)) {
10831 const auto &M = cast<SCEVConstant>(More)->getAPInt();
10832 const auto &L = cast<SCEVConstant>(Less)->getAPInt();
10833 return M - L;
10834 }
10835
10836 SCEV::NoWrapFlags Flags;
10837 const SCEV *LLess = nullptr, *RLess = nullptr;
10838 const SCEV *LMore = nullptr, *RMore = nullptr;
10839 const SCEVConstant *C1 = nullptr, *C2 = nullptr;
10840 // Compare (X + C1) vs X.
10841 if (splitBinaryAdd(Less, LLess, RLess, Flags))
10842 if ((C1 = dyn_cast<SCEVConstant>(LLess)))
10843 if (RLess == More)
10844 return -(C1->getAPInt());
10845
10846 // Compare X vs (X + C2).
10847 if (splitBinaryAdd(More, LMore, RMore, Flags))
10848 if ((C2 = dyn_cast<SCEVConstant>(LMore)))
10849 if (RMore == Less)
10850 return C2->getAPInt();
10851
10852 // Compare (X + C1) vs (X + C2).
10853 if (C1 && C2 && RLess == RMore)
10854 return C2->getAPInt() - C1->getAPInt();
10855
10856 return None;
10857}
10858
10859bool ScalarEvolution::isImpliedCondOperandsViaAddRecStart(
10860 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS,
10861 const SCEV *FoundLHS, const SCEV *FoundRHS, const Instruction *Context) {
10862 // Try to recognize the following pattern:
10863 //
10864 // FoundRHS = ...
10865 // ...
10866 // loop:
10867 // FoundLHS = {Start,+,W}
10868 // context_bb: // Basic block from the same loop
10869 // known(Pred, FoundLHS, FoundRHS)
10870 //
10871 // If some predicate is known in the context of a loop, it is also known on
10872 // each iteration of this loop, including the first iteration. Therefore, in
10873 // this case, `FoundLHS Pred FoundRHS` implies `Start Pred FoundRHS`. Try to
10874 // prove the original pred using this fact.
10875 if (!Context)
10876 return false;
10877 const BasicBlock *ContextBB = Context->getParent();
10878 // Make sure AR varies in the context block.
10879 if (auto *AR = dyn_cast<SCEVAddRecExpr>(FoundLHS)) {
10880 const Loop *L = AR->getLoop();
10881 // Make sure that context belongs to the loop and executes on 1st iteration
10882 // (if it ever executes at all).
10883 if (!L->contains(ContextBB) || !DT.dominates(ContextBB, L->getLoopLatch()))
10884 return false;
10885 if (!isAvailableAtLoopEntry(FoundRHS, AR->getLoop()))
10886 return false;
10887 return isImpliedCondOperands(Pred, LHS, RHS, AR->getStart(), FoundRHS);
10888 }
10889
10890 if (auto *AR = dyn_cast<SCEVAddRecExpr>(FoundRHS)) {
10891 const Loop *L = AR->getLoop();
10892 // Make sure that context belongs to the loop and executes on 1st iteration
10893 // (if it ever executes at all).
10894 if (!L->contains(ContextBB) || !DT.dominates(ContextBB, L->getLoopLatch()))
10895 return false;
10896 if (!isAvailableAtLoopEntry(FoundLHS, AR->getLoop()))
10897 return false;
10898 return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, AR->getStart());
10899 }
10900
10901 return false;
10902}
10903
10904bool ScalarEvolution::isImpliedCondOperandsViaNoOverflow(
10905 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS,
10906 const SCEV *FoundLHS, const SCEV *FoundRHS) {
10907 if (Pred != CmpInst::ICMP_SLT && Pred != CmpInst::ICMP_ULT)
10908 return false;
10909
10910 const auto *AddRecLHS = dyn_cast<SCEVAddRecExpr>(LHS);
10911 if (!AddRecLHS)
10912 return false;
10913
10914 const auto *AddRecFoundLHS = dyn_cast<SCEVAddRecExpr>(FoundLHS);
10915 if (!AddRecFoundLHS)
10916 return false;
10917
10918 // We'd like to let SCEV reason about control dependencies, so we constrain
10919 // both the inequalities to be about add recurrences on the same loop. This
10920 // way we can use isLoopEntryGuardedByCond later.
10921
10922 const Loop *L = AddRecFoundLHS->getLoop();
10923 if (L != AddRecLHS->getLoop())
10924 return false;
10925
10926 // FoundLHS u< FoundRHS u< -C => (FoundLHS + C) u< (FoundRHS + C) ... (1)
10927 //
10928 // FoundLHS s< FoundRHS s< INT_MIN - C => (FoundLHS + C) s< (FoundRHS + C)
10929 // ... (2)
10930 //
10931 // Informal proof for (2), assuming (1) [*]:
10932 //
10933 // We'll also assume (A s< B) <=> ((A + INT_MIN) u< (B + INT_MIN)) ... (3)[**]
10934 //
10935 // Then
10936 //
10937 // FoundLHS s< FoundRHS s< INT_MIN - C
10938 // <=> (FoundLHS + INT_MIN) u< (FoundRHS + INT_MIN) u< -C [ using (3) ]
10939 // <=> (FoundLHS + INT_MIN + C) u< (FoundRHS + INT_MIN + C) [ using (1) ]
10940 // <=> (FoundLHS + INT_MIN + C + INT_MIN) s<
10941 // (FoundRHS + INT_MIN + C + INT_MIN) [ using (3) ]
10942 // <=> FoundLHS + C s< FoundRHS + C
10943 //
10944 // [*]: (1) can be proved by ruling out overflow.
10945 //
10946 // [**]: This can be proved by analyzing all the four possibilities:
10947 // (A s< 0, B s< 0), (A s< 0, B s>= 0), (A s>= 0, B s< 0) and
10948 // (A s>= 0, B s>= 0).
10949 //
10950 // Note:
10951 // Despite (2), "FoundRHS s< INT_MIN - C" does not mean that "FoundRHS + C"
10952 // will not sign underflow. For instance, say FoundLHS = (i8 -128), FoundRHS
10953 // = (i8 -127) and C = (i8 -100). Then INT_MIN - C = (i8 -28), and FoundRHS
10954 // s< (INT_MIN - C). Lack of sign overflow / underflow in "FoundRHS + C" is
10955 // neither necessary nor sufficient to prove "(FoundLHS + C) s< (FoundRHS +
10956 // C)".
10957
10958 Optional<APInt> LDiff = computeConstantDifference(LHS, FoundLHS);
10959 Optional<APInt> RDiff = computeConstantDifference(RHS, FoundRHS);
10960 if (!LDiff || !RDiff || *LDiff != *RDiff)
10961 return false;
10962
10963 if (LDiff->isMinValue())
10964 return true;
10965
10966 APInt FoundRHSLimit;
10967
10968 if (Pred == CmpInst::ICMP_ULT) {
10969 FoundRHSLimit = -(*RDiff);
10970 } else {
10971 assert(Pred == CmpInst::ICMP_SLT && "Checked above!")(static_cast<void> (0));
10972 FoundRHSLimit = APInt::getSignedMinValue(getTypeSizeInBits(RHS->getType())) - *RDiff;
10973 }
10974
10975 // Try to prove (1) or (2), as needed.
10976 return isAvailableAtLoopEntry(FoundRHS, L) &&
10977 isLoopEntryGuardedByCond(L, Pred, FoundRHS,
10978 getConstant(FoundRHSLimit));
10979}
10980
10981bool ScalarEvolution::isImpliedViaMerge(ICmpInst::Predicate Pred,
10982 const SCEV *LHS, const SCEV *RHS,
10983 const SCEV *FoundLHS,
10984 const SCEV *FoundRHS, unsigned Depth) {
10985 const PHINode *LPhi = nullptr, *RPhi = nullptr;
10986
10987 auto ClearOnExit = make_scope_exit([&]() {
10988 if (LPhi) {
10989 bool Erased = PendingMerges.erase(LPhi);
10990 assert(Erased && "Failed to erase LPhi!")(static_cast<void> (0));
10991 (void)Erased;
10992 }
10993 if (RPhi) {
10994 bool Erased = PendingMerges.erase(RPhi);
10995 assert(Erased && "Failed to erase RPhi!")(static_cast<void> (0));
10996 (void)Erased;
10997 }
10998 });
10999
11000 // Find respective Phis and check that they are not being pending.
11001 if (const SCEVUnknown *LU = dyn_cast<SCEVUnknown>(LHS))
11002 if (auto *Phi = dyn_cast<PHINode>(LU->getValue())) {
11003 if (!PendingMerges.insert(Phi).second)
11004 return false;
11005 LPhi = Phi;
11006 }
11007 if (const SCEVUnknown *RU = dyn_cast<SCEVUnknown>(RHS))
11008 if (auto *Phi = dyn_cast<PHINode>(RU->getValue())) {
11009 // If we detect a loop of Phi nodes being processed by this method, for
11010 // example:
11011 //
11012 // %a = phi i32 [ %some1, %preheader ], [ %b, %latch ]
11013 // %b = phi i32 [ %some2, %preheader ], [ %a, %latch ]
11014 //
11015 // we don't want to deal with a case that complex, so return conservative
11016 // answer false.
11017 if (!PendingMerges.insert(Phi).second)
11018 return false;
11019 RPhi = Phi;
11020 }
11021
11022 // If none of LHS, RHS is a Phi, nothing to do here.
11023 if (!LPhi && !RPhi)
11024 return false;
11025
11026 // If there is a SCEVUnknown Phi we are interested in, make it left.
11027 if (!LPhi) {
11028 std::swap(LHS, RHS);
11029 std::swap(FoundLHS, FoundRHS);
11030 std::swap(LPhi, RPhi);
11031 Pred = ICmpInst::getSwappedPredicate(Pred);
11032 }
11033
11034 assert(LPhi && "LPhi should definitely be a SCEVUnknown Phi!")(static_cast<void> (0));
11035 const BasicBlock *LBB = LPhi->getParent();
11036 const SCEVAddRecExpr *RAR = dyn_cast<SCEVAddRecExpr>(RHS);
11037
11038 auto ProvedEasily = [&](const SCEV *S1, const SCEV *S2) {
11039 return isKnownViaNonRecursiveReasoning(Pred, S1, S2) ||
11040 isImpliedCondOperandsViaRanges(Pred, S1, S2, FoundLHS, FoundRHS) ||
11041 isImpliedViaOperations(Pred, S1, S2, FoundLHS, FoundRHS, Depth);
11042 };
11043
11044 if (RPhi && RPhi->getParent() == LBB) {
11045 // Case one: RHS is also a SCEVUnknown Phi from the same basic block.
11046 // If we compare two Phis from the same block, and for each entry block
11047 // the predicate is true for incoming values from this block, then the
11048 // predicate is also true for the Phis.
11049 for (const BasicBlock *IncBB : predecessors(LBB)) {
11050 const SCEV *L = getSCEV(LPhi->getIncomingValueForBlock(IncBB));
11051 const SCEV *R = getSCEV(RPhi->getIncomingValueForBlock(IncBB));
11052 if (!ProvedEasily(L, R))
11053 return false;
11054 }
11055 } else if (RAR && RAR->getLoop()->getHeader() == LBB) {
11056 // Case two: RHS is also a Phi from the same basic block, and it is an
11057 // AddRec. It means that there is a loop which has both AddRec and Unknown
11058 // PHIs, for it we can compare incoming values of AddRec from above the loop
11059 // and latch with their respective incoming values of LPhi.
11060 // TODO: Generalize to handle loops with many inputs in a header.
11061 if (LPhi->getNumIncomingValues() != 2) return false;
11062
11063 auto *RLoop = RAR->getLoop();
11064 auto *Predecessor = RLoop->getLoopPredecessor();
11065 assert(Predecessor && "Loop with AddRec with no predecessor?")(static_cast<void> (0));
11066 const SCEV *L1 = getSCEV(LPhi->getIncomingValueForBlock(Predecessor));
11067 if (!ProvedEasily(L1, RAR->getStart()))
11068 return false;
11069 auto *Latch = RLoop->getLoopLatch();
11070 assert(Latch && "Loop with AddRec with no latch?")(static_cast<void> (0));
11071 const SCEV *L2 = getSCEV(LPhi->getIncomingValueForBlock(Latch));
11072 if (!ProvedEasily(L2, RAR->getPostIncExpr(*this)))
11073 return false;
11074 } else {
11075 // In all other cases go over inputs of LHS and compare each of them to RHS,
11076 // the predicate is true for (LHS, RHS) if it is true for all such pairs.
11077 // At this point RHS is either a non-Phi, or it is a Phi from some block
11078 // different from LBB.
11079 for (const BasicBlock *IncBB : predecessors(LBB)) {
11080 // Check that RHS is available in this block.
11081 if (!dominates(RHS, IncBB))
11082 return false;
11083 const SCEV *L = getSCEV(LPhi->getIncomingValueForBlock(IncBB));
11084 // Make sure L does not refer to a value from a potentially previous
11085 // iteration of a loop.
11086 if (!properlyDominates(L, IncBB))
11087 return false;
11088 if (!ProvedEasily(L, RHS))
11089 return false;
11090 }
11091 }
11092 return true;
11093}
11094
11095bool ScalarEvolution::isImpliedCondOperands(ICmpInst::Predicate Pred,
11096 const SCEV *LHS, const SCEV *RHS,
11097 const SCEV *FoundLHS,
11098 const SCEV *FoundRHS,
11099 const Instruction *Context) {
11100 if (isImpliedCondOperandsViaRanges(Pred, LHS, RHS, FoundLHS, FoundRHS))
11101 return true;
11102
11103 if (isImpliedCondOperandsViaNoOverflow(Pred, LHS, RHS, FoundLHS, FoundRHS))
11104 return true;
11105
11106 if (isImpliedCondOperandsViaAddRecStart(Pred, LHS, RHS, FoundLHS, FoundRHS,
11107 Context))
11108 return true;
11109
11110 return isImpliedCondOperandsHelper(Pred, LHS, RHS,
11111 FoundLHS, FoundRHS);
11112}
11113
11114/// Is MaybeMinMaxExpr an (U|S)(Min|Max) of Candidate and some other values?
11115template <typename MinMaxExprType>
11116static bool IsMinMaxConsistingOf(const SCEV *MaybeMinMaxExpr,
11117 const SCEV *Candidate) {
11118 const MinMaxExprType *MinMaxExpr = dyn_cast<MinMaxExprType>(MaybeMinMaxExpr);
11119 if (!MinMaxExpr)
11120 return false;
11121
11122 return is_contained(MinMaxExpr->operands(), Candidate);
11123}
11124
11125static bool IsKnownPredicateViaAddRecStart(ScalarEvolution &SE,
11126 ICmpInst::Predicate Pred,
11127 const SCEV *LHS, const SCEV *RHS) {
11128 // If both sides are affine addrecs for the same loop, with equal
11129 // steps, and we know the recurrences don't wrap, then we only
11130 // need to check the predicate on the starting values.
11131
11132 if (!ICmpInst::isRelational(Pred))
11133 return false;
11134
11135 const SCEVAddRecExpr *LAR = dyn_cast<SCEVAddRecExpr>(LHS);
11136 if (!LAR)
11137 return false;
11138 const SCEVAddRecExpr *RAR = dyn_cast<SCEVAddRecExpr>(RHS);
11139 if (!RAR)
11140 return false;
11141 if (LAR->getLoop() != RAR->getLoop())
11142 return false;
11143 if (!LAR->isAffine() || !RAR->isAffine())
11144 return false;
11145
11146 if (LAR->getStepRecurrence(SE) != RAR->getStepRecurrence(SE))
11147 return false;
11148
11149 SCEV::NoWrapFlags NW = ICmpInst::isSigned(Pred) ?
11150 SCEV::FlagNSW : SCEV::FlagNUW;
11151 if (!LAR->getNoWrapFlags(NW) || !RAR->getNoWrapFlags(NW))
11152 return false;
11153
11154 return SE.isKnownPredicate(Pred, LAR->getStart(), RAR->getStart());
11155}
11156
11157/// Is LHS `Pred` RHS true on the virtue of LHS or RHS being a Min or Max
11158/// expression?
11159static bool IsKnownPredicateViaMinOrMax(ScalarEvolution &SE,
11160 ICmpInst::Predicate Pred,
11161 const SCEV *LHS, const SCEV *RHS) {
11162 switch (Pred) {
11163 default:
11164 return false;
11165
11166 case ICmpInst::ICMP_SGE:
11167 std::swap(LHS, RHS);
11168 LLVM_FALLTHROUGH[[gnu::fallthrough]];
11169 case ICmpInst::ICMP_SLE:
11170 return
11171 // min(A, ...) <= A
11172 IsMinMaxConsistingOf<SCEVSMinExpr>(LHS, RHS) ||
11173 // A <= max(A, ...)
11174 IsMinMaxConsistingOf<SCEVSMaxExpr>(RHS, LHS);
11175
11176 case ICmpInst::ICMP_UGE:
11177 std::swap(LHS, RHS);
11178 LLVM_FALLTHROUGH[[gnu::fallthrough]];
11179 case ICmpInst::ICMP_ULE:
11180 return
11181 // min(A, ...) <= A
11182 IsMinMaxConsistingOf<SCEVUMinExpr>(LHS, RHS) ||
11183 // A <= max(A, ...)
11184 IsMinMaxConsistingOf<SCEVUMaxExpr>(RHS, LHS);
11185 }
11186
11187 llvm_unreachable("covered switch fell through?!")__builtin_unreachable();
11188}
11189
11190bool ScalarEvolution::isImpliedViaOperations(ICmpInst::Predicate Pred,
11191 const SCEV *LHS, const SCEV *RHS,
11192 const SCEV *FoundLHS,
11193 const SCEV *FoundRHS,
11194 unsigned Depth) {
11195 assert(getTypeSizeInBits(LHS->getType()) ==(static_cast<void> (0))
11196 getTypeSizeInBits(RHS->getType()) &&(static_cast<void> (0))
11197 "LHS and RHS have different sizes?")(static_cast<void> (0));
11198 assert(getTypeSizeInBits(FoundLHS->getType()) ==(static_cast<void> (0))
11199 getTypeSizeInBits(FoundRHS->getType()) &&(static_cast<void> (0))
11200 "FoundLHS and FoundRHS have different sizes?")(static_cast<void> (0));
11201 // We want to avoid hurting the compile time with analysis of too big trees.
11202 if (Depth > MaxSCEVOperationsImplicationDepth)
11203 return false;
11204
11205 // We only want to work with GT comparison so far.
11206 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_SLT) {
11207 Pred = CmpInst::getSwappedPredicate(Pred);
11208 std::swap(LHS, RHS);
11209 std::swap(FoundLHS, FoundRHS);
11210 }
11211
11212 // For unsigned, try to reduce it to corresponding signed comparison.
11213 if (Pred == ICmpInst::ICMP_UGT)
11214 // We can replace unsigned predicate with its signed counterpart if all
11215 // involved values are non-negative.
11216 // TODO: We could have better support for unsigned.
11217 if (isKnownNonNegative(FoundLHS) && isKnownNonNegative(FoundRHS)) {
11218 // Knowing that both FoundLHS and FoundRHS are non-negative, and knowing
11219 // FoundLHS >u FoundRHS, we also know that FoundLHS >s FoundRHS. Let us
11220 // use this fact to prove that LHS and RHS are non-negative.
11221 const SCEV *MinusOne = getMinusOne(LHS->getType());
11222 if (isImpliedCondOperands(ICmpInst::ICMP_SGT, LHS, MinusOne, FoundLHS,
11223 FoundRHS) &&
11224 isImpliedCondOperands(ICmpInst::ICMP_SGT, RHS, MinusOne, FoundLHS,
11225 FoundRHS))
11226 Pred = ICmpInst::ICMP_SGT;
11227 }
11228
11229 if (Pred != ICmpInst::ICMP_SGT)
11230 return false;
11231
11232 auto GetOpFromSExt = [&](const SCEV *S) {
11233 if (auto *Ext = dyn_cast<SCEVSignExtendExpr>(S))
11234 return Ext->getOperand();
11235 // TODO: If S is a SCEVConstant then you can cheaply "strip" the sext off
11236 // the constant in some cases.
11237 return S;
11238 };
11239
11240 // Acquire values from extensions.
11241 auto *OrigLHS = LHS;
11242 auto *OrigFoundLHS = FoundLHS;
11243 LHS = GetOpFromSExt(LHS);
11244 FoundLHS = GetOpFromSExt(FoundLHS);
11245
11246 // Is the SGT predicate can be proved trivially or using the found context.
11247 auto IsSGTViaContext = [&](const SCEV *S1, const SCEV *S2) {
11248 return isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SGT, S1, S2) ||
11249 isImpliedViaOperations(ICmpInst::ICMP_SGT, S1, S2, OrigFoundLHS,
11250 FoundRHS, Depth + 1);
11251 };
11252
11253 if (auto *LHSAddExpr = dyn_cast<SCEVAddExpr>(LHS)) {
11254 // We want to avoid creation of any new non-constant SCEV. Since we are
11255 // going to compare the operands to RHS, we should be certain that we don't
11256 // need any size extensions for this. So let's decline all cases when the
11257 // sizes of types of LHS and RHS do not match.
11258 // TODO: Maybe try to get RHS from sext to catch more cases?
11259 if (getTypeSizeInBits(LHS->getType()) != getTypeSizeInBits(RHS->getType()))
11260 return false;
11261
11262 // Should not overflow.
11263 if (!LHSAddExpr->hasNoSignedWrap())
11264 return false;
11265
11266 auto *LL = LHSAddExpr->getOperand(0);
11267 auto *LR = LHSAddExpr->getOperand(1);
11268 auto *MinusOne = getMinusOne(RHS->getType());
11269
11270 // Checks that S1 >= 0 && S2 > RHS, trivially or using the found context.
11271 auto IsSumGreaterThanRHS = [&](const SCEV *S1, const SCEV *S2) {
11272 return IsSGTViaContext(S1, MinusOne) && IsSGTViaContext(S2, RHS);
11273 };
11274 // Try to prove the following rule:
11275 // (LHS = LL + LR) && (LL >= 0) && (LR > RHS) => (LHS > RHS).
11276 // (LHS = LL + LR) && (LR >= 0) && (LL > RHS) => (LHS > RHS).
11277 if (IsSumGreaterThanRHS(LL, LR) || IsSumGreaterThanRHS(LR, LL))
11278 return true;
11279 } else if (auto *LHSUnknownExpr = dyn_cast<SCEVUnknown>(LHS)) {
11280 Value *LL, *LR;
11281 // FIXME: Once we have SDiv implemented, we can get rid of this matching.
11282
11283 using namespace llvm::PatternMatch;
11284
11285 if (match(LHSUnknownExpr->getValue(), m_SDiv(m_Value(LL), m_Value(LR)))) {
11286 // Rules for division.
11287 // We are going to perform some comparisons with Denominator and its
11288 // derivative expressions. In general case, creating a SCEV for it may
11289 // lead to a complex analysis of the entire graph, and in particular it
11290 // can request trip count recalculation for the same loop. This would
11291 // cache as SCEVCouldNotCompute to avoid the infinite recursion. To avoid
11292 // this, we only want to create SCEVs that are constants in this section.
11293 // So we bail if Denominator is not a constant.
11294 if (!isa<ConstantInt>(LR))
11295 return false;
11296
11297 auto *Denominator = cast<SCEVConstant>(getSCEV(LR));
11298
11299 // We want to make sure that LHS = FoundLHS / Denominator. If it is so,
11300 // then a SCEV for the numerator already exists and matches with FoundLHS.
11301 auto *Numerator = getExistingSCEV(LL);
11302 if (!Numerator || Numerator->getType() != FoundLHS->getType())
11303 return false;
11304
11305 // Make sure that the numerator matches with FoundLHS and the denominator
11306 // is positive.
11307 if (!HasSameValue(Numerator, FoundLHS) || !isKnownPositive(Denominator))
11308 return false;
11309
11310 auto *DTy = Denominator->getType();
11311 auto *FRHSTy = FoundRHS->getType();
11312 if (DTy->isPointerTy() != FRHSTy->isPointerTy())
11313 // One of types is a pointer and another one is not. We cannot extend
11314 // them properly to a wider type, so let us just reject this case.
11315 // TODO: Usage of getEffectiveSCEVType for DTy, FRHSTy etc should help
11316 // to avoid this check.
11317 return false;
11318
11319 // Given that:
11320 // FoundLHS > FoundRHS, LHS = FoundLHS / Denominator, Denominator > 0.
11321 auto *WTy = getWiderType(DTy, FRHSTy);
11322 auto *DenominatorExt = getNoopOrSignExtend(Denominator, WTy);
11323 auto *FoundRHSExt = getNoopOrSignExtend(FoundRHS, WTy);
11324
11325 // Try to prove the following rule:
11326 // (FoundRHS > Denominator - 2) && (RHS <= 0) => (LHS > RHS).
11327 // For example, given that FoundLHS > 2. It means that FoundLHS is at
11328 // least 3. If we divide it by Denominator < 4, we will have at least 1.
11329 auto *DenomMinusTwo = getMinusSCEV(DenominatorExt, getConstant(WTy, 2));
11330 if (isKnownNonPositive(RHS) &&
11331 IsSGTViaContext(FoundRHSExt, DenomMinusTwo))
11332 return true;
11333
11334 // Try to prove the following rule:
11335 // (FoundRHS > -1 - Denominator) && (RHS < 0) => (LHS > RHS).
11336 // For example, given that FoundLHS > -3. Then FoundLHS is at least -2.
11337 // If we divide it by Denominator > 2, then:
11338 // 1. If FoundLHS is negative, then the result is 0.
11339 // 2. If FoundLHS is non-negative, then the result is non-negative.
11340 // Anyways, the result is non-negative.
11341 auto *MinusOne = getMinusOne(WTy);
11342 auto *NegDenomMinusOne = getMinusSCEV(MinusOne, DenominatorExt);
11343 if (isKnownNegative(RHS) &&
11344 IsSGTViaContext(FoundRHSExt, NegDenomMinusOne))
11345 return true;
11346 }
11347 }
11348
11349 // If our expression contained SCEVUnknown Phis, and we split it down and now
11350 // need to prove something for them, try to prove the predicate for every
11351 // possible incoming values of those Phis.
11352 if (isImpliedViaMerge(Pred, OrigLHS, RHS, OrigFoundLHS, FoundRHS, Depth + 1))
11353 return true;
11354
11355 return false;
11356}
11357
11358static bool isKnownPredicateExtendIdiom(ICmpInst::Predicate Pred,
11359 const SCEV *LHS, const SCEV *RHS) {
11360 // zext x u<= sext x, sext x s<= zext x
11361 switch (Pred) {
11362 case ICmpInst::ICMP_SGE:
11363 std::swap(LHS, RHS);
11364 LLVM_FALLTHROUGH[[gnu::fallthrough]];
11365 case ICmpInst::ICMP_SLE: {
11366 // If operand >=s 0 then ZExt == SExt. If operand <s 0 then SExt <s ZExt.
11367 const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(LHS);
11368 const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(RHS);
11369 if (SExt && ZExt && SExt->getOperand() == ZExt->getOperand())
11370 return true;
11371 break;
11372 }
11373 case ICmpInst::ICMP_UGE:
11374 std::swap(LHS, RHS);
11375 LLVM_FALLTHROUGH[[gnu::fallthrough]];
11376 case ICmpInst::ICMP_ULE: {
11377 // If operand >=s 0 then ZExt == SExt. If operand <s 0 then ZExt <u SExt.
11378 const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(LHS);
11379 const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(RHS);
11380 if (SExt && ZExt && SExt->getOperand() == ZExt->getOperand())
11381 return true;
11382 break;
11383 }
11384 default:
11385 break;
11386 };
11387 return false;
11388}
11389
11390bool
11391ScalarEvolution::isKnownViaNonRecursiveReasoning(ICmpInst::Predicate Pred,
11392 const SCEV *LHS, const SCEV *RHS) {
11393 return isKnownPredicateExtendIdiom(Pred, LHS, RHS) ||
11394 isKnownPredicateViaConstantRanges(Pred, LHS, RHS) ||
11395 IsKnownPredicateViaMinOrMax(*this, Pred, LHS, RHS) ||
11396 IsKnownPredicateViaAddRecStart(*this, Pred, LHS, RHS) ||
11397 isKnownPredicateViaNoOverflow(Pred, LHS, RHS);
11398}
11399
11400bool
11401ScalarEvolution::isImpliedCondOperandsHelper(ICmpInst::Predicate Pred,
11402 const SCEV *LHS, const SCEV *RHS,
11403 const SCEV *FoundLHS,
11404 const SCEV *FoundRHS) {
11405 switch (Pred) {
11406 default: llvm_unreachable("Unexpected ICmpInst::Predicate value!")__builtin_unreachable();
11407 case ICmpInst::ICMP_EQ:
11408 case ICmpInst::ICMP_NE:
11409 if (HasSameValue(LHS, FoundLHS) && HasSameValue(RHS, FoundRHS))
11410 return true;
11411 break;
11412 case ICmpInst::ICMP_SLT:
11413 case ICmpInst::ICMP_SLE:
11414 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SLE, LHS, FoundLHS) &&
11415 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SGE, RHS, FoundRHS))
11416 return true;
11417 break;
11418 case ICmpInst::ICMP_SGT:
11419 case ICmpInst::ICMP_SGE:
11420 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SGE, LHS, FoundLHS) &&
11421 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SLE, RHS, FoundRHS))
11422 return true;
11423 break;
11424 case ICmpInst::ICMP_ULT:
11425 case ICmpInst::ICMP_ULE:
11426 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_ULE, LHS, FoundLHS) &&
11427 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_UGE, RHS, FoundRHS))
11428 return true;
11429 break;
11430 case ICmpInst::ICMP_UGT:
11431 case ICmpInst::ICMP_UGE:
11432 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_UGE, LHS, FoundLHS) &&
11433 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_ULE, RHS, FoundRHS))
11434 return true;
11435 break;
11436 }
11437
11438 // Maybe it can be proved via operations?
11439 if (isImpliedViaOperations(Pred, LHS, RHS, FoundLHS, FoundRHS))
11440 return true;
11441
11442 return false;
11443}
11444
11445bool ScalarEvolution::isImpliedCondOperandsViaRanges(ICmpInst::Predicate Pred,
11446 const SCEV *LHS,
11447 const SCEV *RHS,
11448 const SCEV *FoundLHS,
11449 const SCEV *FoundRHS) {
11450 if (!isa<SCEVConstant>(RHS) || !isa<SCEVConstant>(FoundRHS))
11451 // The restriction on `FoundRHS` be lifted easily -- it exists only to
11452 // reduce the compile time impact of this optimization.
11453 return false;
11454
11455 Optional<APInt> Addend = computeConstantDifference(LHS, FoundLHS);
11456 if (!Addend)
11457 return false;
11458
11459 const APInt &ConstFoundRHS = cast<SCEVConstant>(FoundRHS)->getAPInt();
11460
11461 // `FoundLHSRange` is the range we know `FoundLHS` to be in by virtue of the
11462 // antecedent "`FoundLHS` `Pred` `FoundRHS`".
11463 ConstantRange FoundLHSRange =
11464 ConstantRange::makeExactICmpRegion(Pred, ConstFoundRHS);
11465
11466 // Since `LHS` is `FoundLHS` + `Addend`, we can compute a range for `LHS`:
11467 ConstantRange LHSRange = FoundLHSRange.add(ConstantRange(*Addend));
11468
11469 // We can also compute the range of values for `LHS` that satisfy the
11470 // consequent, "`LHS` `Pred` `RHS`":
11471 const APInt &ConstRHS = cast<SCEVConstant>(RHS)->getAPInt();
11472 // The antecedent implies the consequent if every value of `LHS` that
11473 // satisfies the antecedent also satisfies the consequent.
11474 return LHSRange.icmp(Pred, ConstRHS);
11475}
11476
11477bool ScalarEvolution::canIVOverflowOnLT(const SCEV *RHS, const SCEV *Stride,
11478 bool IsSigned) {
11479 assert(isKnownPositive(Stride) && "Positive stride expected!")(static_cast<void> (0));
11480
11481 unsigned BitWidth = getTypeSizeInBits(RHS->getType());
11482 const SCEV *One = getOne(Stride->getType());
11483
11484 if (IsSigned) {
11485 APInt MaxRHS = getSignedRangeMax(RHS);
11486 APInt MaxValue = APInt::getSignedMaxValue(BitWidth);
11487 APInt MaxStrideMinusOne = getSignedRangeMax(getMinusSCEV(Stride, One));
11488
11489 // SMaxRHS + SMaxStrideMinusOne > SMaxValue => overflow!
11490 return (std::move(MaxValue) - MaxStrideMinusOne).slt(MaxRHS);
11491 }
11492
11493 APInt MaxRHS = getUnsignedRangeMax(RHS);
11494 APInt MaxValue = APInt::getMaxValue(BitWidth);
11495 APInt MaxStrideMinusOne = getUnsignedRangeMax(getMinusSCEV(Stride, One));
11496
11497 // UMaxRHS + UMaxStrideMinusOne > UMaxValue => overflow!
11498 return (std::move(MaxValue) - MaxStrideMinusOne).ult(MaxRHS);
11499}
11500
11501bool ScalarEvolution::canIVOverflowOnGT(const SCEV *RHS, const SCEV *Stride,
11502 bool IsSigned) {
11503
11504 unsigned BitWidth = getTypeSizeInBits(RHS->getType());
11505 const SCEV *One = getOne(Stride->getType());
11506
11507 if (IsSigned) {
11508 APInt MinRHS = getSignedRangeMin(RHS);
11509 APInt MinValue = APInt::getSignedMinValue(BitWidth);
11510 APInt MaxStrideMinusOne = getSignedRangeMax(getMinusSCEV(Stride, One));
11511
11512 // SMinRHS - SMaxStrideMinusOne < SMinValue => overflow!
11513 return (std::move(MinValue) + MaxStrideMinusOne).sgt(MinRHS);
11514 }
11515
11516 APInt MinRHS = getUnsignedRangeMin(RHS);
11517 APInt MinValue = APInt::getMinValue(BitWidth);
11518 APInt MaxStrideMinusOne = getUnsignedRangeMax(getMinusSCEV(Stride, One));
11519
11520 // UMinRHS - UMaxStrideMinusOne < UMinValue => overflow!
11521 return (std::move(MinValue) + MaxStrideMinusOne).ugt(MinRHS);
11522}
11523
11524const SCEV *ScalarEvolution::getUDivCeilSCEV(const SCEV *N, const SCEV *D) {
11525 // umin(N, 1) + floor((N - umin(N, 1)) / D)
11526 // This is equivalent to "1 + floor((N - 1) / D)" for N != 0. The umin
11527 // expression fixes the case of N=0.
11528 const SCEV *MinNOne = getUMinExpr(N, getOne(N->getType()));
11529 const SCEV *NMinusOne = getMinusSCEV(N, MinNOne);
11530 return getAddExpr(MinNOne, getUDivExpr(NMinusOne, D));
11531}
11532
11533const SCEV *ScalarEvolution::computeMaxBECountForLT(const SCEV *Start,
11534 const SCEV *Stride,
11535 const SCEV *End,
11536 unsigned BitWidth,
11537 bool IsSigned) {
11538 // The logic in this function assumes we can represent a positive stride.
11539 // If we can't, the backedge-taken count must be zero.
11540 if (IsSigned && BitWidth == 1)
11541 return getZero(Stride->getType());
11542
11543 // Calculate the maximum backedge count based on the range of values
11544 // permitted by Start, End, and Stride.
11545 APInt MinStart =
11546 IsSigned ? getSignedRangeMin(Start) : getUnsignedRangeMin(Start);
11547
11548 APInt MinStride =
11549 IsSigned ? getSignedRangeMin(Stride) : getUnsignedRangeMin(Stride);
11550
11551 // We assume either the stride is positive, or the backedge-taken count
11552 // is zero. So force StrideForMaxBECount to be at least one.
11553 APInt One(BitWidth, 1);
11554 APInt StrideForMaxBECount = IsSigned ? APIntOps::smax(One, MinStride)
11555 : APIntOps::umax(One, MinStride);
11556
11557 APInt MaxValue = IsSigned ? APInt::getSignedMaxValue(BitWidth)
11558 : APInt::getMaxValue(BitWidth);
11559 APInt Limit = MaxValue - (StrideForMaxBECount - 1);
11560
11561 // Although End can be a MAX expression we estimate MaxEnd considering only
11562 // the case End = RHS of the loop termination condition. This is safe because
11563 // in the other case (End - Start) is zero, leading to a zero maximum backedge
11564 // taken count.
11565 APInt MaxEnd = IsSigned ? APIntOps::smin(getSignedRangeMax(End), Limit)
11566 : APIntOps::umin(getUnsignedRangeMax(End), Limit);
11567
11568 // MaxBECount = ceil((max(MaxEnd, MinStart) - MinStart) / Stride)
11569 MaxEnd = IsSigned ? APIntOps::smax(MaxEnd, MinStart)
11570 : APIntOps::umax(MaxEnd, MinStart);
11571
11572 return getUDivCeilSCEV(getConstant(MaxEnd - MinStart) /* Delta */,
11573 getConstant(StrideForMaxBECount) /* Step */);
11574}
11575
11576ScalarEvolution::ExitLimit
11577ScalarEvolution::howManyLessThans(const SCEV *LHS, const SCEV *RHS,
11578 const Loop *L, bool IsSigned,
11579 bool ControlsExit, bool AllowPredicates) {
11580 SmallPtrSet<const SCEVPredicate *, 4> Predicates;
11581
11582 const SCEVAddRecExpr *IV = dyn_cast<SCEVAddRecExpr>(LHS);
11583 bool PredicatedIV = false;
11584
11585 if (!IV && AllowPredicates) {
11586 // Try to make this an AddRec using runtime tests, in the first X
11587 // iterations of this loop, where X is the SCEV expression found by the
11588 // algorithm below.
11589 IV = convertSCEVToAddRecWithPredicates(LHS, L, Predicates);
11590 PredicatedIV = true;
11591 }
11592
11593 // Avoid weird loops
11594 if (!IV || IV->getLoop() != L || !IV->isAffine())
11595 return getCouldNotCompute();
11596
11597 // A precondition of this method is that the condition being analyzed
11598 // reaches an exiting branch which dominates the latch. Given that, we can
11599 // assume that an increment which violates the nowrap specification and
11600 // produces poison must cause undefined behavior when the resulting poison
11601 // value is branched upon and thus we can conclude that the backedge is
11602 // taken no more often than would be required to produce that poison value.
11603 // Note that a well defined loop can exit on the iteration which violates
11604 // the nowrap specification if there is another exit (either explicit or
11605 // implicit/exceptional) which causes the loop to execute before the
11606 // exiting instruction we're analyzing would trigger UB.
11607 auto WrapType = IsSigned ? SCEV::FlagNSW : SCEV::FlagNUW;
11608 bool NoWrap = ControlsExit && IV->getNoWrapFlags(WrapType);
11609 ICmpInst::Predicate Cond = IsSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT;
11610
11611 const SCEV *Stride = IV->getStepRecurrence(*this);
11612
11613 bool PositiveStride = isKnownPositive(Stride);
11614
11615 // Avoid negative or zero stride values.
11616 if (!PositiveStride) {
11617 // We can compute the correct backedge taken count for loops with unknown
11618 // strides if we can prove that the loop is not an infinite loop with side
11619 // effects. Here's the loop structure we are trying to handle -
11620 //
11621 // i = start
11622 // do {
11623 // A[i] = i;
11624 // i += s;
11625 // } while (i < end);
11626 //
11627 // The backedge taken count for such loops is evaluated as -
11628 // (max(end, start + stride) - start - 1) /u stride
11629 //
11630 // The additional preconditions that we need to check to prove correctness
11631 // of the above formula is as follows -
11632 //
11633 // a) IV is either nuw or nsw depending upon signedness (indicated by the
11634 // NoWrap flag).
11635 // b) loop is single exit with no side effects.
11636 // c) loop has no abnormal exits
11637 //
11638 //
11639 // Precondition a) implies that if the stride is negative, this is a single
11640 // trip loop. The backedge taken count formula reduces to zero in this case.
11641 //
11642 // Precondition b) and c) combine to imply that if rhs is invariant in L,
11643 // then a zero stride means the backedge can't be taken without executing
11644 // undefined behavior.
11645 //
11646 // The positive stride case is the same as isKnownPositive(Stride) returning
11647 // true (original behavior of the function).
11648 //
11649 // We want to make sure that the stride is truly unknown as there are edge
11650 // cases where ScalarEvolution propagates no wrap flags to the
11651 // post-increment/decrement IV even though the increment/decrement operation
11652 // itself is wrapping. The computed backedge taken count may be wrong in
11653 // such cases. This is prevented by checking that the stride is not known to
11654 // be either positive or non-positive. For example, no wrap flags are
11655 // propagated to the post-increment IV of this loop with a trip count of 2 -
11656 //
11657 // unsigned char i;
11658 // for(i=127; i<128; i+=129)
11659 // A[i] = i;
11660 //
11661 if (PredicatedIV || !NoWrap || isKnownNonPositive(Stride) ||
11662 !loopIsFiniteByAssumption(L) || !loopHasNoAbnormalExits(L))
11663 return getCouldNotCompute();
11664
11665 if (!isKnownNonZero(Stride)) {
11666 // If we have a step of zero, and RHS isn't invariant in L, we don't know
11667 // if it might eventually be greater than start and if so, on which
11668 // iteration. We can't even produce a useful upper bound.
11669 if (!isLoopInvariant(RHS, L))
11670 return getCouldNotCompute();
11671
11672 // We allow a potentially zero stride, but we need to divide by stride
11673 // below. Since the loop can't be infinite and this check must control
11674 // the sole exit, we can infer the exit must be taken on the first
11675 // iteration (e.g. backedge count = 0) if the stride is zero. Given that,
11676 // we know the numerator in the divides below must be zero, so we can
11677 // pick an arbitrary non-zero value for the denominator (e.g. stride)
11678 // and produce the right result.
11679 // FIXME: Handle the case where Stride is poison?
11680 auto wouldZeroStrideBeUB = [&]() {
11681 // Proof by contradiction. Suppose the stride were zero. If we can
11682 // prove that the backedge *is* taken on the first iteration, then since
11683 // we know this condition controls the sole exit, we must have an
11684 // infinite loop. We can't have a (well defined) infinite loop per
11685 // check just above.
11686 // Note: The (Start - Stride) term is used to get the start' term from
11687 // (start' + stride,+,stride). Remember that we only care about the
11688 // result of this expression when stride == 0 at runtime.
11689 auto *StartIfZero = getMinusSCEV(IV->getStart(), Stride);
11690 return isLoopEntryGuardedByCond(L, Cond, StartIfZero, RHS);
11691 };
11692 if (!wouldZeroStrideBeUB()) {
11693 Stride = getUMaxExpr(Stride, getOne(Stride->getType()));
11694 }
11695 }
11696 } else if (!Stride->isOne() && !NoWrap) {
11697 auto isUBOnWrap = [&]() {
11698 // Can we prove this loop *must* be UB if overflow of IV occurs?
11699 // Reasoning goes as follows:
11700 // * Suppose the IV did self wrap.
11701 // * If Stride evenly divides the iteration space, then once wrap
11702 // occurs, the loop must revisit the same values.
11703 // * We know that RHS is invariant, and that none of those values
11704 // caused this exit to be taken previously. Thus, this exit is
11705 // dynamically dead.
11706 // * If this is the sole exit, then a dead exit implies the loop
11707 // must be infinite if there are no abnormal exits.
11708 // * If the loop were infinite, then it must either not be mustprogress
11709 // or have side effects. Otherwise, it must be UB.
11710 // * It can't (by assumption), be UB so we have contradicted our
11711 // premise and can conclude the IV did not in fact self-wrap.
11712 // From no-self-wrap, we need to then prove no-(un)signed-wrap. This
11713 // follows trivially from the fact that every (un)signed-wrapped, but
11714 // not self-wrapped value must be LT than the last value before
11715 // (un)signed wrap. Since we know that last value didn't exit, nor
11716 // will any smaller one.
11717
11718 if (!isLoopInvariant(RHS, L))
11719 return false;
11720
11721 auto *StrideC = dyn_cast<SCEVConstant>(Stride);
11722 if (!StrideC || !StrideC->getAPInt().isPowerOf2())
11723 return false;
11724
11725 if (!ControlsExit || !loopHasNoAbnormalExits(L))
11726 return false;
11727
11728 return loopIsFiniteByAssumption(L);
11729 };
11730
11731 // Avoid proven overflow cases: this will ensure that the backedge taken
11732 // count will not generate any unsigned overflow. Relaxed no-overflow
11733 // conditions exploit NoWrapFlags, allowing to optimize in presence of
11734 // undefined behaviors like the case of C language.
11735 if (canIVOverflowOnLT(RHS, Stride, IsSigned) && !isUBOnWrap())
11736 return getCouldNotCompute();
11737 }
11738
11739 // On all paths just preceeding, we established the following invariant:
11740 // IV can be assumed not to overflow up to and including the exiting
11741 // iteration. We proved this in one of two ways:
11742 // 1) We can show overflow doesn't occur before the exiting iteration
11743 // 1a) canIVOverflowOnLT, and b) step of one
11744 // 2) We can show that if overflow occurs, the loop must execute UB
11745 // before any possible exit.
11746 // Note that we have not yet proved RHS invariant (in general).
11747
11748 const SCEV *Start = IV->getStart();
11749
11750 // Preserve pointer-typed Start/RHS to pass to isLoopEntryGuardedByCond.
11751 // Use integer-typed versions for actual computation.
11752 const SCEV *OrigStart = Start;
11753 const SCEV *OrigRHS = RHS;
11754 if (Start->getType()->isPointerTy()) {
11755 Start = getLosslessPtrToIntExpr(Start);
11756 if (isa<SCEVCouldNotCompute>(Start))
11757 return Start;
11758 }
11759 if (RHS->getType()->isPointerTy()) {
11760 RHS = getLosslessPtrToIntExpr(RHS);
11761 if (isa<SCEVCouldNotCompute>(RHS))
11762 return RHS;
11763 }
11764
11765 // When the RHS is not invariant, we do not know the end bound of the loop and
11766 // cannot calculate the ExactBECount needed by ExitLimit. However, we can
11767 // calculate the MaxBECount, given the start, stride and max value for the end
11768 // bound of the loop (RHS), and the fact that IV does not overflow (which is
11769 // checked above).
11770 if (!isLoopInvariant(RHS, L)) {
11771 const SCEV *MaxBECount = computeMaxBECountForLT(
11772 Start, Stride, RHS, getTypeSizeInBits(LHS->getType()), IsSigned);
11773 return ExitLimit(getCouldNotCompute() /* ExactNotTaken */, MaxBECount,
11774 false /*MaxOrZero*/, Predicates);
11775 }
11776
11777 // We use the expression (max(End,Start)-Start)/Stride to describe the
11778 // backedge count, as if the backedge is taken at least once max(End,Start)
11779 // is End and so the result is as above, and if not max(End,Start) is Start
11780 // so we get a backedge count of zero.
11781 const SCEV *BECount = nullptr;
11782 auto *StartMinusStride = getMinusSCEV(OrigStart, Stride);
11783 // Can we prove (max(RHS,Start) > Start - Stride?
11784 if (isLoopEntryGuardedByCond(L, Cond, StartMinusStride, Start) &&
11785 isLoopEntryGuardedByCond(L, Cond, StartMinusStride, RHS)) {
11786 // In this case, we can use a refined formula for computing backedge taken
11787 // count. The general formula remains:
11788 // "End-Start /uceiling Stride" where "End = max(RHS,Start)"
11789 // We want to use the alternate formula:
11790 // "((End - 1) - (Start - Stride)) /u Stride"
11791 // Let's do a quick case analysis to show these are equivalent under
11792 // our precondition that max(RHS,Start) > Start - Stride.
11793 // * For RHS <= Start, the backedge-taken count must be zero.
11794 // "((End - 1) - (Start - Stride)) /u Stride" reduces to
11795 // "((Start - 1) - (Start - Stride)) /u Stride" which simplies to
11796 // "Stride - 1 /u Stride" which is indeed zero for all non-zero values
11797 // of Stride. For 0 stride, we've use umin(1,Stride) above, reducing
11798 // this to the stride of 1 case.
11799 // * For RHS >= Start, the backedge count must be "RHS-Start /uceil Stride".
11800 // "((End - 1) - (Start - Stride)) /u Stride" reduces to
11801 // "((RHS - 1) - (Start - Stride)) /u Stride" reassociates to
11802 // "((RHS - (Start - Stride) - 1) /u Stride".
11803 // Our preconditions trivially imply no overflow in that form.
11804 const SCEV *MinusOne = getMinusOne(Stride->getType());
11805 const SCEV *Numerator =
11806 getMinusSCEV(getAddExpr(RHS, MinusOne), StartMinusStride);
11807 if (!isa<SCEVCouldNotCompute>(Numerator)) {
11808 BECount = getUDivExpr(Numerator, Stride);
11809 }
11810 }
11811
11812 const SCEV *BECountIfBackedgeTaken = nullptr;
11813 if (!BECount) {
11814 auto canProveRHSGreaterThanEqualStart = [&]() {
11815 auto CondGE = IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE;
11816 if (isLoopEntryGuardedByCond(L, CondGE, OrigRHS, OrigStart))
11817 return true;
11818
11819 // (RHS > Start - 1) implies RHS >= Start.
11820 // * "RHS >= Start" is trivially equivalent to "RHS > Start - 1" if
11821 // "Start - 1" doesn't overflow.
11822 // * For signed comparison, if Start - 1 does overflow, it's equal
11823 // to INT_MAX, and "RHS >s INT_MAX" is trivially false.
11824 // * For unsigned comparison, if Start - 1 does overflow, it's equal
11825 // to UINT_MAX, and "RHS >u UINT_MAX" is trivially false.
11826 //
11827 // FIXME: Should isLoopEntryGuardedByCond do this for us?
11828 auto CondGT = IsSigned ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT;
11829 auto *StartMinusOne = getAddExpr(OrigStart,
11830 getMinusOne(OrigStart->getType()));
11831 return isLoopEntryGuardedByCond(L, CondGT, OrigRHS, StartMinusOne);
11832 };
11833
11834 // If we know that RHS >= Start in the context of loop, then we know that
11835 // max(RHS, Start) = RHS at this point.
11836 const SCEV *End;
11837 if (canProveRHSGreaterThanEqualStart()) {
11838 End = RHS;
11839 } else {
11840 // If RHS < Start, the backedge will be taken zero times. So in
11841 // general, we can write the backedge-taken count as:
11842 //
11843 // RHS >= Start ? ceil(RHS - Start) / Stride : 0
11844 //
11845 // We convert it to the following to make it more convenient for SCEV:
11846 //
11847 // ceil(max(RHS, Start) - Start) / Stride
11848 End = IsSigned ? getSMaxExpr(RHS, Start) : getUMaxExpr(RHS, Start);
11849
11850 // See what would happen if we assume the backedge is taken. This is
11851 // used to compute MaxBECount.
11852 BECountIfBackedgeTaken = getUDivCeilSCEV(getMinusSCEV(RHS, Start), Stride);
11853 }
11854
11855 // At this point, we know:
11856 //
11857 // 1. If IsSigned, Start <=s End; otherwise, Start <=u End
11858 // 2. The index variable doesn't overflow.
11859 //
11860 // Therefore, we know N exists such that
11861 // (Start + Stride * N) >= End, and computing "(Start + Stride * N)"
11862 // doesn't overflow.
11863 //
11864 // Using this information, try to prove whether the addition in
11865 // "(Start - End) + (Stride - 1)" has unsigned overflow.
11866 const SCEV *One = getOne(Stride->getType());
11867 bool MayAddOverflow = [&] {
11868 if (auto *StrideC = dyn_cast<SCEVConstant>(Stride)) {
11869 if (StrideC->getAPInt().isPowerOf2()) {
11870 // Suppose Stride is a power of two, and Start/End are unsigned
11871 // integers. Let UMAX be the largest representable unsigned
11872 // integer.
11873 //
11874 // By the preconditions of this function, we know
11875 // "(Start + Stride * N) >= End", and this doesn't overflow.
11876 // As a formula:
11877 //
11878 // End <= (Start + Stride * N) <= UMAX
11879 //
11880 // Subtracting Start from all the terms:
11881 //
11882 // End - Start <= Stride * N <= UMAX - Start
11883 //
11884 // Since Start is unsigned, UMAX - Start <= UMAX. Therefore:
11885 //
11886 // End - Start <= Stride * N <= UMAX
11887 //
11888 // Stride * N is a multiple of Stride. Therefore,
11889 //
11890 // End - Start <= Stride * N <= UMAX - (UMAX mod Stride)
11891 //
11892 // Since Stride is a power of two, UMAX + 1 is divisible by Stride.
11893 // Therefore, UMAX mod Stride == Stride - 1. So we can write:
11894 //
11895 // End - Start <= Stride * N <= UMAX - Stride - 1
11896 //
11897 // Dropping the middle term:
11898 //
11899 // End - Start <= UMAX - Stride - 1
11900 //
11901 // Adding Stride - 1 to both sides:
11902 //
11903 // (End - Start) + (Stride - 1) <= UMAX
11904 //
11905 // In other words, the addition doesn't have unsigned overflow.
11906 //
11907 // A similar proof works if we treat Start/End as signed values.
11908 // Just rewrite steps before "End - Start <= Stride * N <= UMAX" to
11909 // use signed max instead of unsigned max. Note that we're trying
11910 // to prove a lack of unsigned overflow in either case.
11911 return false;
11912 }
11913 }
11914 if (Start == Stride || Start == getMinusSCEV(Stride, One)) {
11915 // If Start is equal to Stride, (End - Start) + (Stride - 1) == End - 1.
11916 // If !IsSigned, 0 <u Stride == Start <=u End; so 0 <u End - 1 <u End.
11917 // If IsSigned, 0 <s Stride == Start <=s End; so 0 <s End - 1 <s End.
11918 //
11919 // If Start is equal to Stride - 1, (End - Start) + Stride - 1 == End.
11920 return false;
11921 }
11922 return true;
11923 }();
11924
11925 const SCEV *Delta = getMinusSCEV(End, Start);
11926 if (!MayAddOverflow) {
11927 // floor((D + (S - 1)) / S)
11928 // We prefer this formulation if it's legal because it's fewer operations.
11929 BECount =
11930 getUDivExpr(getAddExpr(Delta, getMinusSCEV(Stride, One)), Stride);
11931 } else {
11932 BECount = getUDivCeilSCEV(Delta, Stride);
11933 }
11934 }
11935
11936 const SCEV *MaxBECount;
11937 bool MaxOrZero = false;
11938 if (isa<SCEVConstant>(BECount)) {
11939 MaxBECount = BECount;
11940 } else if (BECountIfBackedgeTaken &&
11941 isa<SCEVConstant>(BECountIfBackedgeTaken)) {
11942 // If we know exactly how many times the backedge will be taken if it's
11943 // taken at least once, then the backedge count will either be that or
11944 // zero.
11945 MaxBECount = BECountIfBackedgeTaken;
11946 MaxOrZero = true;
11947 } else {
11948 MaxBECount = computeMaxBECountForLT(
11949 Start, Stride, RHS, getTypeSizeInBits(LHS->getType()), IsSigned);
11950 }
11951
11952 if (isa<SCEVCouldNotCompute>(MaxBECount) &&
11953 !isa<SCEVCouldNotCompute>(BECount))
11954 MaxBECount = getConstant(getUnsignedRangeMax(BECount));
11955
11956 return ExitLimit(BECount, MaxBECount, MaxOrZero, Predicates);
11957}
11958
11959ScalarEvolution::ExitLimit
11960ScalarEvolution::howManyGreaterThans(const SCEV *LHS, const SCEV *RHS,
11961 const Loop *L, bool IsSigned,
11962 bool ControlsExit, bool AllowPredicates) {
11963 SmallPtrSet<const SCEVPredicate *, 4> Predicates;
11964 // We handle only IV > Invariant
11965 if (!isLoopInvariant(RHS, L))
11966 return getCouldNotCompute();
11967
11968 const SCEVAddRecExpr *IV = dyn_cast<SCEVAddRecExpr>(LHS);
11969 if (!IV && AllowPredicates)
11970 // Try to make this an AddRec using runtime tests, in the first X
11971 // iterations of this loop, where X is the SCEV expression found by the
11972 // algorithm below.
11973 IV = convertSCEVToAddRecWithPredicates(LHS, L, Predicates);
11974
11975 // Avoid weird loops
11976 if (!IV || IV->getLoop() != L || !IV->isAffine())
11977 return getCouldNotCompute();
11978
11979 auto WrapType = IsSigned ? SCEV::FlagNSW : SCEV::FlagNUW;
11980 bool NoWrap = ControlsExit && IV->getNoWrapFlags(WrapType);
11981 ICmpInst::Predicate Cond = IsSigned ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT;
11982
11983 const SCEV *Stride = getNegativeSCEV(IV->getStepRecurrence(*this));
11984
11985 // Avoid negative or zero stride values
11986 if (!isKnownPositive(Stride))
11987 return getCouldNotCompute();
11988
11989 // Avoid proven overflow cases: this will ensure that the backedge taken count
11990 // will not generate any unsigned overflow. Relaxed no-overflow conditions
11991 // exploit NoWrapFlags, allowing to optimize in presence of undefined
11992 // behaviors like the case of C language.
11993 if (!Stride->isOne() && !NoWrap)
11994 if (canIVOverflowOnGT(RHS, Stride, IsSigned))
11995 return getCouldNotCompute();
11996
11997 const SCEV *Start = IV->getStart();
11998 const SCEV *End = RHS;
11999 if (!isLoopEntryGuardedByCond(L, Cond, getAddExpr(Start, Stride), RHS)) {
12000 // If we know that Start >= RHS in the context of loop, then we know that
12001 // min(RHS, Start) = RHS at this point.
12002 if (isLoopEntryGuardedByCond(
12003 L, IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE, Start, RHS))
12004 End = RHS;
12005 else
12006 End = IsSigned ? getSMinExpr(RHS, Start) : getUMinExpr(RHS, Start);
12007 }
12008
12009 if (Start->getType()->isPointerTy()) {
12010 Start = getLosslessPtrToIntExpr(Start);
12011 if (isa<SCEVCouldNotCompute>(Start))
12012 return Start;
12013 }
12014 if (End->getType()->isPointerTy()) {
12015 End = getLosslessPtrToIntExpr(End);
12016 if (isa<SCEVCouldNotCompute>(End))
12017 return End;
12018 }
12019
12020 // Compute ((Start - End) + (Stride - 1)) / Stride.
12021 // FIXME: This can overflow. Holding off on fixing this for now;
12022 // howManyGreaterThans will hopefully be gone soon.
12023 const SCEV *One = getOne(Stride->getType());
12024 const SCEV *BECount = getUDivExpr(
12025 getAddExpr(getMinusSCEV(Start, End), getMinusSCEV(Stride, One)), Stride);
12026
12027 APInt MaxStart = IsSigned ? getSignedRangeMax(Start)
12028 : getUnsignedRangeMax(Start);
12029
12030 APInt MinStride = IsSigned ? getSignedRangeMin(Stride)
12031 : getUnsignedRangeMin(Stride);
12032
12033 unsigned BitWidth = getTypeSizeInBits(LHS->getType());
12034 APInt Limit = IsSigned ? APInt::getSignedMinValue(BitWidth) + (MinStride - 1)
12035 : APInt::getMinValue(BitWidth) + (MinStride - 1);
12036
12037 // Although End can be a MIN expression we estimate MinEnd considering only
12038 // the case End = RHS. This is safe because in the other case (Start - End)
12039 // is zero, leading to a zero maximum backedge taken count.
12040 APInt MinEnd =
12041 IsSigned ? APIntOps::smax(getSignedRangeMin(RHS), Limit)
12042 : APIntOps::umax(getUnsignedRangeMin(RHS), Limit);
12043
12044 const SCEV *MaxBECount = isa<SCEVConstant>(BECount)
12045 ? BECount
12046 : getUDivCeilSCEV(getConstant(MaxStart - MinEnd),
12047 getConstant(MinStride));
12048
12049 if (isa<SCEVCouldNotCompute>(MaxBECount))
12050 MaxBECount = BECount;
12051
12052 return ExitLimit(BECount, MaxBECount, false, Predicates);
12053}
12054
12055const SCEV *SCEVAddRecExpr::getNumIterationsInRange(const ConstantRange &Range,
12056 ScalarEvolution &SE) const {
12057 if (Range.isFullSet()) // Infinite loop.
12058 return SE.getCouldNotCompute();
12059
12060 // If the start is a non-zero constant, shift the range to simplify things.
12061 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(getStart()))
12062 if (!SC->getValue()->isZero()) {
12063 SmallVector<const SCEV *, 4> Operands(operands());
12064 Operands[0] = SE.getZero(SC->getType());
12065 const SCEV *Shifted = SE.getAddRecExpr(Operands, getLoop(),
12066 getNoWrapFlags(FlagNW));
12067 if (const auto *ShiftedAddRec = dyn_cast<SCEVAddRecExpr>(Shifted))
12068 return ShiftedAddRec->getNumIterationsInRange(
12069 Range.subtract(SC->getAPInt()), SE);
12070 // This is strange and shouldn't happen.
12071 return SE.getCouldNotCompute();
12072 }
12073
12074 // The only time we can solve this is when we have all constant indices.
12075 // Otherwise, we cannot determine the overflow conditions.
12076 if (any_of(operands(), [](const SCEV *Op) { return !isa<SCEVConstant>(Op); }))
12077 return SE.getCouldNotCompute();
12078
12079 // Okay at this point we know that all elements of the chrec are constants and
12080 // that the start element is zero.
12081
12082 // First check to see if the range contains zero. If not, the first
12083 // iteration exits.
12084 unsigned BitWidth = SE.getTypeSizeInBits(getType());
12085 if (!Range.contains(APInt(BitWidth, 0)))
12086 return SE.getZero(getType());
12087
12088 if (isAffine()) {
12089 // If this is an affine expression then we have this situation:
12090 // Solve {0,+,A} in Range === Ax in Range
12091
12092 // We know that zero is in the range. If A is positive then we know that
12093 // the upper value of the range must be the first possible exit value.
12094 // If A is negative then the lower of the range is the last possible loop
12095 // value. Also note that we already checked for a full range.
12096 APInt A = cast<SCEVConstant>(getOperand(1))->getAPInt();
12097 APInt End = A.sge(1) ? (Range.getUpper() - 1) : Range.getLower();
12098
12099 // The exit value should be (End+A)/A.
12100 APInt ExitVal = (End + A).udiv(A);
12101 ConstantInt *ExitValue = ConstantInt::get(SE.getContext(), ExitVal);
12102
12103 // Evaluate at the exit value. If we really did fall out of the valid
12104 // range, then we computed our trip count, otherwise wrap around or other
12105 // things must have happened.
12106 ConstantInt *Val = EvaluateConstantChrecAtConstant(this, ExitValue, SE);
12107 if (Range.contains(Val->getValue()))
12108 return SE.getCouldNotCompute(); // Something strange happened
12109
12110 // Ensure that the previous value is in the range. This is a sanity check.
12111 assert(Range.contains((static_cast<void> (0))
12112 EvaluateConstantChrecAtConstant(this,(static_cast<void> (0))
12113 ConstantInt::get(SE.getContext(), ExitVal - 1), SE)->getValue()) &&(static_cast<void> (0))
12114 "Linear scev computation is off in a bad way!")(static_cast<void> (0));
12115 return SE.getConstant(ExitValue);
12116 }
12117
12118 if (isQuadratic()) {
12119 if (auto S = SolveQuadraticAddRecRange(this, Range, SE))
12120 return SE.getConstant(S.getValue());
12121 }
12122
12123 return SE.getCouldNotCompute();
12124}
12125
12126const SCEVAddRecExpr *
12127SCEVAddRecExpr::getPostIncExpr(ScalarEvolution &SE) const {
12128 assert(getNumOperands() > 1 && "AddRec with zero step?")(static_cast<void> (0));
12129 // There is a temptation to just call getAddExpr(this, getStepRecurrence(SE)),
12130 // but in this case we cannot guarantee that the value returned will be an
12131 // AddRec because SCEV does not have a fixed point where it stops
12132 // simplification: it is legal to return ({rec1} + {rec2}). For example, it
12133 // may happen if we reach arithmetic depth limit while simplifying. So we
12134 // construct the returned value explicitly.
12135 SmallVector<const SCEV *, 3> Ops;
12136 // If this is {A,+,B,+,C,...,+,N}, then its step is {B,+,C,+,...,+,N}, and
12137 // (this + Step) is {A+B,+,B+C,+...,+,N}.
12138 for (unsigned i = 0, e = getNumOperands() - 1; i < e; ++i)
12139 Ops.push_back(SE.getAddExpr(getOperand(i), getOperand(i + 1)));
12140 // We know that the last operand is not a constant zero (otherwise it would
12141 // have been popped out earlier). This guarantees us that if the result has
12142 // the same last operand, then it will also not be popped out, meaning that
12143 // the returned value will be an AddRec.
12144 const SCEV *Last = getOperand(getNumOperands() - 1);
12145 assert(!Last->isZero() && "Recurrency with zero step?")(static_cast<void> (0));
12146 Ops.push_back(Last);
12147 return cast<SCEVAddRecExpr>(SE.getAddRecExpr(Ops, getLoop(),
12148 SCEV::FlagAnyWrap));
12149}
12150
12151// Return true when S contains at least an undef value.
12152static inline bool containsUndefs(const SCEV *S) {
12153 return SCEVExprContains(S, [](const SCEV *S) {
12154 if (const auto *SU = dyn_cast<SCEVUnknown>(S))
12155 return isa<UndefValue>(SU->getValue());
12156 return false;
12157 });
12158}
12159
12160namespace {
12161
12162// Collect all steps of SCEV expressions.
12163struct SCEVCollectStrides {
12164 ScalarEvolution &SE;
12165 SmallVectorImpl<const SCEV *> &Strides;
12166
12167 SCEVCollectStrides(ScalarEvolution &SE, SmallVectorImpl<const SCEV *> &S)
12168 : SE(SE), Strides(S) {}
12169
12170 bool follow(const SCEV *S) {
12171 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S))
12172 Strides.push_back(AR->getStepRecurrence(SE));
12173 return true;
12174 }
12175
12176 bool isDone() const { return false; }
12177};
12178
12179// Collect all SCEVUnknown and SCEVMulExpr expressions.
12180struct SCEVCollectTerms {
12181 SmallVectorImpl<const SCEV *> &Terms;
12182
12183 SCEVCollectTerms(SmallVectorImpl<const SCEV *> &T) : Terms(T) {}
12184
12185 bool follow(const SCEV *S) {
12186 if (isa<SCEVUnknown>(S) || isa<SCEVMulExpr>(S) ||
12187 isa<SCEVSignExtendExpr>(S)) {
12188 if (!containsUndefs(S))
12189 Terms.push_back(S);
12190
12191 // Stop recursion: once we collected a term, do not walk its operands.
12192 return false;
12193 }
12194
12195 // Keep looking.
12196 return true;
12197 }
12198
12199 bool isDone() const { return false; }
12200};
12201
12202// Check if a SCEV contains an AddRecExpr.
12203struct SCEVHasAddRec {
12204 bool &ContainsAddRec;
12205
12206 SCEVHasAddRec(bool &ContainsAddRec) : ContainsAddRec(ContainsAddRec) {
12207 ContainsAddRec = false;
12208 }
12209
12210 bool follow(const SCEV *S) {
12211 if (isa<SCEVAddRecExpr>(S)) {
12212 ContainsAddRec = true;
12213
12214 // Stop recursion: once we collected a term, do not walk its operands.
12215 return false;
12216 }
12217
12218 // Keep looking.
12219 return true;
12220 }
12221
12222 bool isDone() const { return false; }
12223};
12224
12225// Find factors that are multiplied with an expression that (possibly as a
12226// subexpression) contains an AddRecExpr. In the expression:
12227//
12228// 8 * (100 + %p * %q * (%a + {0, +, 1}_loop))
12229//
12230// "%p * %q" are factors multiplied by the expression "(%a + {0, +, 1}_loop)"
12231// that contains the AddRec {0, +, 1}_loop. %p * %q are likely to be array size
12232// parameters as they form a product with an induction variable.
12233//
12234// This collector expects all array size parameters to be in the same MulExpr.
12235// It might be necessary to later add support for collecting parameters that are
12236// spread over different nested MulExpr.
12237struct SCEVCollectAddRecMultiplies {
12238 SmallVectorImpl<const SCEV *> &Terms;
12239 ScalarEvolution &SE;
12240
12241 SCEVCollectAddRecMultiplies(SmallVectorImpl<const SCEV *> &T, ScalarEvolution &SE)
12242 : Terms(T), SE(SE) {}
12243
12244 bool follow(const SCEV *S) {
12245 if (auto *Mul = dyn_cast<SCEVMulExpr>(S)) {
12246 bool HasAddRec = false;
12247 SmallVector<const SCEV *, 0> Operands;
12248 for (auto Op : Mul->operands()) {
12249 const SCEVUnknown *Unknown = dyn_cast<SCEVUnknown>(Op);
12250 if (Unknown && !isa<CallInst>(Unknown->getValue())) {
12251 Operands.push_back(Op);
12252 } else if (Unknown) {
12253 HasAddRec = true;
12254 } else {
12255 bool ContainsAddRec = false;
12256 SCEVHasAddRec ContiansAddRec(ContainsAddRec);
12257 visitAll(Op, ContiansAddRec);
12258 HasAddRec |= ContainsAddRec;
12259 }
12260 }
12261 if (Operands.size() == 0)
12262 return true;
12263
12264 if (!HasAddRec)
12265 return false;
12266
12267 Terms.push_back(SE.getMulExpr(Operands));
12268 // Stop recursion: once we collected a term, do not walk its operands.
12269 return false;
12270 }
12271
12272 // Keep looking.
12273 return true;
12274 }
12275
12276 bool isDone() const { return false; }
12277};
12278
12279} // end anonymous namespace
12280
12281/// Find parametric terms in this SCEVAddRecExpr. We first for parameters in
12282/// two places:
12283/// 1) The strides of AddRec expressions.
12284/// 2) Unknowns that are multiplied with AddRec expressions.
12285void ScalarEvolution::collectParametricTerms(const SCEV *Expr,
12286 SmallVectorImpl<const SCEV *> &Terms) {
12287 SmallVector<const SCEV *, 4> Strides;
12288 SCEVCollectStrides StrideCollector(*this, Strides);
12289 visitAll(Expr, StrideCollector);
12290
12291 LLVM_DEBUG({do { } while (false)
12292 dbgs() << "Strides:\n";do { } while (false)
12293 for (const SCEV *S : Strides)do { } while (false)
12294 dbgs() << *S << "\n";do { } while (false)
12295 })do { } while (false);
12296
12297 for (const SCEV *S : Strides) {
12298 SCEVCollectTerms TermCollector(Terms);
12299 visitAll(S, TermCollector);
12300 }
12301
12302 LLVM_DEBUG({do { } while (false)
12303 dbgs() << "Terms:\n";do { } while (false)
12304 for (const SCEV *T : Terms)do { } while (false)
12305 dbgs() << *T << "\n";do { } while (false)
12306 })do { } while (false);
12307
12308 SCEVCollectAddRecMultiplies MulCollector(Terms, *this);
12309 visitAll(Expr, MulCollector);
12310}
12311
12312static bool findArrayDimensionsRec(ScalarEvolution &SE,
12313 SmallVectorImpl<const SCEV *> &Terms,
12314 SmallVectorImpl<const SCEV *> &Sizes) {
12315 int Last = Terms.size() - 1;
12316 const SCEV *Step = Terms[Last];
12317
12318 // End of recursion.
12319 if (Last == 0) {
12320 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Step)) {
12321 SmallVector<const SCEV *, 2> Qs;
12322 for (const SCEV *Op : M->operands())
12323 if (!isa<SCEVConstant>(Op))
12324 Qs.push_back(Op);
12325
12326 Step = SE.getMulExpr(Qs);
12327 }
12328
12329 Sizes.push_back(Step);
12330 return true;
12331 }
12332
12333 for (const SCEV *&Term : Terms) {
12334 // Normalize the terms before the next call to findArrayDimensionsRec.
12335 const SCEV *Q, *R;
12336 SCEVDivision::divide(SE, Term, Step, &Q, &R);
12337
12338 // Bail out when GCD does not evenly divide one of the terms.
12339 if (!R->isZero())
12340 return false;
12341
12342 Term = Q;
12343 }
12344
12345 // Remove all SCEVConstants.
12346 erase_if(Terms, [](const SCEV *E) { return isa<SCEVConstant>(E); });
12347
12348 if (Terms.size() > 0)
12349 if (!findArrayDimensionsRec(SE, Terms, Sizes))
12350 return false;
12351
12352 Sizes.push_back(Step);
12353 return true;
12354}
12355
12356// Returns true when one of the SCEVs of Terms contains a SCEVUnknown parameter.
12357static inline bool containsParameters(SmallVectorImpl<const SCEV *> &Terms) {
12358 for (const SCEV *T : Terms)
12359 if (SCEVExprContains(T, [](const SCEV *S) { return isa<SCEVUnknown>(S); }))
12360 return true;
12361
12362 return false;
12363}
12364
12365// Return the number of product terms in S.
12366static inline int numberOfTerms(const SCEV *S) {
12367 if (const SCEVMulExpr *Expr = dyn_cast<SCEVMulExpr>(S))
12368 return Expr->getNumOperands();
12369 return 1;
12370}
12371
12372static const SCEV *removeConstantFactors(ScalarEvolution &SE, const SCEV *T) {
12373 if (isa<SCEVConstant>(T))
12374 return nullptr;
12375
12376 if (isa<SCEVUnknown>(T))
12377 return T;
12378
12379 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(T)) {
12380 SmallVector<const SCEV *, 2> Factors;
12381 for (const SCEV *Op : M->operands())
12382 if (!isa<SCEVConstant>(Op))
12383 Factors.push_back(Op);
12384
12385 return SE.getMulExpr(Factors);
12386 }
12387
12388 return T;
12389}
12390
12391/// Return the size of an element read or written by Inst.
12392const SCEV *ScalarEvolution::getElementSize(Instruction *Inst) {
12393 Type *Ty;
12394 if (StoreInst *Store = dyn_cast<StoreInst>(Inst))
12395 Ty = Store->getValueOperand()->getType();
12396 else if (LoadInst *Load = dyn_cast<LoadInst>(Inst))
12397 Ty = Load->getType();
12398 else
12399 return nullptr;
12400
12401 Type *ETy = getEffectiveSCEVType(PointerType::getUnqual(Ty));
12402 return getSizeOfExpr(ETy, Ty);
12403}
12404
12405void ScalarEvolution::findArrayDimensions(SmallVectorImpl<const SCEV *> &Terms,
12406 SmallVectorImpl<const SCEV *> &Sizes,
12407 const SCEV *ElementSize) {
12408 if (Terms.size() < 1 || !ElementSize)
12409 return;
12410
12411 // Early return when Terms do not contain parameters: we do not delinearize
12412 // non parametric SCEVs.
12413 if (!containsParameters(Terms))
12414 return;
12415
12416 LLVM_DEBUG({do { } while (false)
12417 dbgs() << "Terms:\n";do { } while (false)
12418 for (const SCEV *T : Terms)do { } while (false)
12419 dbgs() << *T << "\n";do { } while (false)
12420 })do { } while (false);
12421
12422 // Remove duplicates.
12423 array_pod_sort(Terms.begin(), Terms.end());
12424 Terms.erase(std::unique(Terms.begin(), Terms.end()), Terms.end());
12425
12426 // Put larger terms first.
12427 llvm::sort(Terms, [](const SCEV *LHS, const SCEV *RHS) {
12428 return numberOfTerms(LHS) > numberOfTerms(RHS);
12429 });
12430
12431 // Try to divide all terms by the element size. If term is not divisible by
12432 // element size, proceed with the original term.
12433 for (const SCEV *&Term : Terms) {
12434 const SCEV *Q, *R;
12435 SCEVDivision::divide(*this, Term, ElementSize, &Q, &R);
12436 if (!Q->isZero())
12437 Term = Q;
12438 }
12439
12440 SmallVector<const SCEV *, 4> NewTerms;
12441
12442 // Remove constant factors.
12443 for (const SCEV *T : Terms)
12444 if (const SCEV *NewT = removeConstantFactors(*this, T))
12445 NewTerms.push_back(NewT);
12446
12447 LLVM_DEBUG({do { } while (false)
12448 dbgs() << "Terms after sorting:\n";do { } while (false)
12449 for (const SCEV *T : NewTerms)do { } while (false)
12450 dbgs() << *T << "\n";do { } while (false)
12451 })do { } while (false);
12452
12453 if (NewTerms.empty() || !findArrayDimensionsRec(*this, NewTerms, Sizes)) {
12454 Sizes.clear();
12455 return;
12456 }
12457
12458 // The last element to be pushed into Sizes is the size of an element.
12459 Sizes.push_back(ElementSize);
12460
12461 LLVM_DEBUG({do { } while (false)
12462 dbgs() << "Sizes:\n";do { } while (false)
12463 for (const SCEV *S : Sizes)do { } while (false)
12464 dbgs() << *S << "\n";do { } while (false)
12465 })do { } while (false);
12466}
12467
12468void ScalarEvolution::computeAccessFunctions(
12469 const SCEV *Expr, SmallVectorImpl<const SCEV *> &Subscripts,
12470 SmallVectorImpl<const SCEV *> &Sizes) {
12471 // Early exit in case this SCEV is not an affine multivariate function.
12472 if (Sizes.empty())
12473 return;
12474
12475 if (auto *AR = dyn_cast<SCEVAddRecExpr>(Expr))
12476 if (!AR->isAffine())
12477 return;
12478
12479 const SCEV *Res = Expr;
12480 int Last = Sizes.size() - 1;
12481 for (int i = Last; i >= 0; i--) {
12482 const SCEV *Q, *R;
12483 SCEVDivision::divide(*this, Res, Sizes[i], &Q, &R);
12484
12485 LLVM_DEBUG({do { } while (false)
12486 dbgs() << "Res: " << *Res << "\n";do { } while (false)
12487 dbgs() << "Sizes[i]: " << *Sizes[i] << "\n";do { } while (false)
12488 dbgs() << "Res divided by Sizes[i]:\n";do { } while (false)
12489 dbgs() << "Quotient: " << *Q << "\n";do { } while (false)
12490 dbgs() << "Remainder: " << *R << "\n";do { } while (false)
12491 })do { } while (false);
12492
12493 Res = Q;
12494
12495 // Do not record the last subscript corresponding to the size of elements in
12496 // the array.
12497 if (i == Last) {
12498
12499 // Bail out if the remainder is too complex.
12500 if (isa<SCEVAddRecExpr>(R)) {
12501 Subscripts.clear();
12502 Sizes.clear();
12503 return;
12504 }
12505
12506 continue;
12507 }
12508
12509 // Record the access function for the current subscript.
12510 Subscripts.push_back(R);
12511 }
12512
12513 // Also push in last position the remainder of the last division: it will be
12514 // the access function of the innermost dimension.
12515 Subscripts.push_back(Res);
12516
12517 std::reverse(Subscripts.begin(), Subscripts.end());
12518
12519 LLVM_DEBUG({do { } while (false)
12520 dbgs() << "Subscripts:\n";do { } while (false)
12521 for (const SCEV *S : Subscripts)do { } while (false)
12522 dbgs() << *S << "\n";do { } while (false)
12523 })do { } while (false);
12524}
12525
12526/// Splits the SCEV into two vectors of SCEVs representing the subscripts and
12527/// sizes of an array access. Returns the remainder of the delinearization that
12528/// is the offset start of the array. The SCEV->delinearize algorithm computes
12529/// the multiples of SCEV coefficients: that is a pattern matching of sub
12530/// expressions in the stride and base of a SCEV corresponding to the
12531/// computation of a GCD (greatest common divisor) of base and stride. When
12532/// SCEV->delinearize fails, it returns the SCEV unchanged.
12533///
12534/// For example: when analyzing the memory access A[i][j][k] in this loop nest
12535///
12536/// void foo(long n, long m, long o, double A[n][m][o]) {
12537///
12538/// for (long i = 0; i < n; i++)
12539/// for (long j = 0; j < m; j++)
12540/// for (long k = 0; k < o; k++)
12541/// A[i][j][k] = 1.0;
12542/// }
12543///
12544/// the delinearization input is the following AddRec SCEV:
12545///
12546/// AddRec: {{{%A,+,(8 * %m * %o)}<%for.i>,+,(8 * %o)}<%for.j>,+,8}<%for.k>
12547///
12548/// From this SCEV, we are able to say that the base offset of the access is %A
12549/// because it appears as an offset that does not divide any of the strides in
12550/// the loops:
12551///
12552/// CHECK: Base offset: %A
12553///
12554/// and then SCEV->delinearize determines the size of some of the dimensions of
12555/// the array as these are the multiples by which the strides are happening:
12556///
12557/// CHECK: ArrayDecl[UnknownSize][%m][%o] with elements of sizeof(double) bytes.
12558///
12559/// Note that the outermost dimension remains of UnknownSize because there are
12560/// no strides that would help identifying the size of the last dimension: when
12561/// the array has been statically allocated, one could compute the size of that
12562/// dimension by dividing the overall size of the array by the size of the known
12563/// dimensions: %m * %o * 8.
12564///
12565/// Finally delinearize provides the access functions for the array reference
12566/// that does correspond to A[i][j][k] of the above C testcase:
12567///
12568/// CHECK: ArrayRef[{0,+,1}<%for.i>][{0,+,1}<%for.j>][{0,+,1}<%for.k>]
12569///
12570/// The testcases are checking the output of a function pass:
12571/// DelinearizationPass that walks through all loads and stores of a function
12572/// asking for the SCEV of the memory access with respect to all enclosing
12573/// loops, calling SCEV->delinearize on that and printing the results.
12574void ScalarEvolution::delinearize(const SCEV *Expr,
12575 SmallVectorImpl<const SCEV *> &Subscripts,
12576 SmallVectorImpl<const SCEV *> &Sizes,
12577 const SCEV *ElementSize) {
12578 // First step: collect parametric terms.
12579 SmallVector<const SCEV *, 4> Terms;
12580 collectParametricTerms(Expr, Terms);
12581
12582 if (Terms.empty())
12583 return;
12584
12585 // Second step: find subscript sizes.
12586 findArrayDimensions(Terms, Sizes, ElementSize);
12587
12588 if (Sizes.empty())
12589 return;
12590
12591 // Third step: compute the access functions for each subscript.
12592 computeAccessFunctions(Expr, Subscripts, Sizes);
12593
12594 if (Subscripts.empty())
12595 return;
12596
12597 LLVM_DEBUG({do { } while (false)
12598 dbgs() << "succeeded to delinearize " << *Expr << "\n";do { } while (false)
12599 dbgs() << "ArrayDecl[UnknownSize]";do { } while (false)
12600 for (const SCEV *S : Sizes)do { } while (false)
12601 dbgs() << "[" << *S << "]";do { } while (false)
12602
12603 dbgs() << "\nArrayRef";do { } while (false)
12604 for (const SCEV *S : Subscripts)do { } while (false)
12605 dbgs() << "[" << *S << "]";do { } while (false)
12606 dbgs() << "\n";do { } while (false)
12607 })do { } while (false);
12608}
12609
12610bool ScalarEvolution::getIndexExpressionsFromGEP(
12611 const GetElementPtrInst *GEP, SmallVectorImpl<const SCEV *> &Subscripts,
12612 SmallVectorImpl<int> &Sizes) {
12613 assert(Subscripts.empty() && Sizes.empty() &&(static_cast<void> (0))
12614 "Expected output lists to be empty on entry to this function.")(static_cast<void> (0));
12615 assert(GEP && "getIndexExpressionsFromGEP called with a null GEP")(static_cast<void> (0));
12616 Type *Ty = nullptr;
12617 bool DroppedFirstDim = false;
12618 for (unsigned i = 1; i < GEP->getNumOperands(); i++) {
12619 const SCEV *Expr = getSCEV(GEP->getOperand(i));
12620 if (i == 1) {
12621 Ty = GEP->getSourceElementType();
12622 if (auto *Const = dyn_cast<SCEVConstant>(Expr))
12623 if (Const->getValue()->isZero()) {
12624 DroppedFirstDim = true;
12625 continue;
12626 }
12627 Subscripts.push_back(Expr);
12628 continue;
12629 }
12630
12631 auto *ArrayTy = dyn_cast<ArrayType>(Ty);
12632 if (!ArrayTy) {
12633 Subscripts.clear();
12634 Sizes.clear();
12635 return false;
12636 }
12637
12638 Subscripts.push_back(Expr);
12639 if (!(DroppedFirstDim && i == 2))
12640 Sizes.push_back(ArrayTy->getNumElements());
12641
12642 Ty = ArrayTy->getElementType();
12643 }
12644 return !Subscripts.empty();
12645}
12646
12647//===----------------------------------------------------------------------===//
12648// SCEVCallbackVH Class Implementation
12649//===----------------------------------------------------------------------===//
12650
12651void ScalarEvolution::SCEVCallbackVH::deleted() {
12652 assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!")(static_cast<void> (0));
12653 if (PHINode *PN = dyn_cast<PHINode>(getValPtr()))
12654 SE->ConstantEvolutionLoopExitValue.erase(PN);
12655 SE->eraseValueFromMap(getValPtr());
12656 // this now dangles!
12657}
12658
12659void ScalarEvolution::SCEVCallbackVH::allUsesReplacedWith(Value *V) {
12660 assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!")(static_cast<void> (0));
12661
12662 // Forget all the expressions associated with users of the old value,
12663 // so that future queries will recompute the expressions using the new
12664 // value.
12665 Value *Old = getValPtr();
12666 SmallVector<User *, 16> Worklist(Old->users());
12667 SmallPtrSet<User *, 8> Visited;
12668 while (!Worklist.empty()) {
12669 User *U = Worklist.pop_back_val();
12670 // Deleting the Old value will cause this to dangle. Postpone
12671 // that until everything else is done.
12672 if (U == Old)
12673 continue;
12674 if (!Visited.insert(U).second)
12675 continue;
12676 if (PHINode *PN = dyn_cast<PHINode>(U))
12677 SE->ConstantEvolutionLoopExitValue.erase(PN);
12678 SE->eraseValueFromMap(U);
12679 llvm::append_range(Worklist, U->users());
12680 }
12681 // Delete the Old value.
12682 if (PHINode *PN = dyn_cast<PHINode>(Old))
12683 SE->ConstantEvolutionLoopExitValue.erase(PN);
12684 SE->eraseValueFromMap(Old);
12685 // this now dangles!
12686}
12687
12688ScalarEvolution::SCEVCallbackVH::SCEVCallbackVH(Value *V, ScalarEvolution *se)
12689 : CallbackVH(V), SE(se) {}
12690
12691//===----------------------------------------------------------------------===//
12692// ScalarEvolution Class Implementation
12693//===----------------------------------------------------------------------===//
12694
12695ScalarEvolution::ScalarEvolution(Function &F, TargetLibraryInfo &TLI,
12696 AssumptionCache &AC, DominatorTree &DT,
12697 LoopInfo &LI)
12698 : F(F), TLI(TLI), AC(AC), DT(DT), LI(LI),
12699 CouldNotCompute(new SCEVCouldNotCompute()), ValuesAtScopes(64),
12700 LoopDispositions(64), BlockDispositions(64) {
12701 // To use guards for proving predicates, we need to scan every instruction in
12702 // relevant basic blocks, and not just terminators. Doing this is a waste of
12703 // time if the IR does not actually contain any calls to
12704 // @llvm.experimental.guard, so do a quick check and remember this beforehand.
12705 //
12706 // This pessimizes the case where a pass that preserves ScalarEvolution wants
12707 // to _add_ guards to the module when there weren't any before, and wants
12708 // ScalarEvolution to optimize based on those guards. For now we prefer to be
12709 // efficient in lieu of being smart in that rather obscure case.
12710
12711 auto *GuardDecl = F.getParent()->getFunction(
12712 Intrinsic::getName(Intrinsic::experimental_guard));
12713 HasGuards = GuardDecl && !GuardDecl->use_empty();
12714}
12715
12716ScalarEvolution::ScalarEvolution(ScalarEvolution &&Arg)
12717 : F(Arg.F), HasGuards(Arg.HasGuards), TLI(Arg.TLI), AC(Arg.AC), DT(Arg.DT),
12718 LI(Arg.LI), CouldNotCompute(std::move(Arg.CouldNotCompute)),
12719 ValueExprMap(std::move(Arg.ValueExprMap)),
12720 PendingLoopPredicates(std::move(Arg.PendingLoopPredicates)),
12721 PendingPhiRanges(std::move(Arg.PendingPhiRanges)),
12722 PendingMerges(std::move(Arg.PendingMerges)),
12723 MinTrailingZerosCache(std::move(Arg.MinTrailingZerosCache)),
12724 BackedgeTakenCounts(std::move(Arg.BackedgeTakenCounts)),
12725 PredicatedBackedgeTakenCounts(
12726 std::move(Arg.PredicatedBackedgeTakenCounts)),
12727 ConstantEvolutionLoopExitValue(
12728 std::move(Arg.ConstantEvolutionLoopExitValue)),
12729 ValuesAtScopes(std::move(Arg.ValuesAtScopes)),
12730 LoopDispositions(std::move(Arg.LoopDispositions)),
12731 LoopPropertiesCache(std::move(Arg.LoopPropertiesCache)),
12732 BlockDispositions(std::move(Arg.BlockDispositions)),
12733 UnsignedRanges(std::move(Arg.UnsignedRanges)),
12734 SignedRanges(std::move(Arg.SignedRanges)),
12735 UniqueSCEVs(std::move(Arg.UniqueSCEVs)),
12736 UniquePreds(std::move(Arg.UniquePreds)),
12737 SCEVAllocator(std::move(Arg.SCEVAllocator)),
12738 LoopUsers(std::move(Arg.LoopUsers)),
12739 PredicatedSCEVRewrites(std::move(Arg.PredicatedSCEVRewrites)),
12740 FirstUnknown(Arg.FirstUnknown) {
12741 Arg.FirstUnknown = nullptr;
12742}
12743
12744ScalarEvolution::~ScalarEvolution() {
12745 // Iterate through all the SCEVUnknown instances and call their
12746 // destructors, so that they release their references to their values.
12747 for (SCEVUnknown *U = FirstUnknown; U;) {
12748 SCEVUnknown *Tmp = U;
12749 U = U->Next;
12750 Tmp->~SCEVUnknown();
12751 }
12752 FirstUnknown = nullptr;
12753
12754 ExprValueMap.clear();
12755 ValueExprMap.clear();
12756 HasRecMap.clear();
12757 BackedgeTakenCounts.clear();
12758 PredicatedBackedgeTakenCounts.clear();
12759
12760 assert(PendingLoopPredicates.empty() && "isImpliedCond garbage")(static_cast<void> (0));
12761 assert(PendingPhiRanges.empty() && "getRangeRef garbage")(static_cast<void> (0));
12762 assert(PendingMerges.empty() && "isImpliedViaMerge garbage")(static_cast<void> (0));
12763 assert(!WalkingBEDominatingConds && "isLoopBackedgeGuardedByCond garbage!")(static_cast<void> (0));
12764 assert(!ProvingSplitPredicate && "ProvingSplitPredicate garbage!")(static_cast<void> (0));
12765}
12766
12767bool ScalarEvolution::hasLoopInvariantBackedgeTakenCount(const Loop *L) {
12768 return !isa<SCEVCouldNotCompute>(getBackedgeTakenCount(L));
12769}
12770
12771static void PrintLoopInfo(raw_ostream &OS, ScalarEvolution *SE,
12772 const Loop *L) {
12773 // Print all inner loops first
12774 for (Loop *I : *L)
12775 PrintLoopInfo(OS, SE, I);
12776
12777 OS << "Loop ";
12778 L->getHeader()->printAsOperand(OS, /*PrintType=*/false);
12779 OS << ": ";
12780
12781 SmallVector<BasicBlock *, 8> ExitingBlocks;
12782 L->getExitingBlocks(ExitingBlocks);
12783 if (ExitingBlocks.size() != 1)
12784 OS << "<multiple exits> ";
12785
12786 if (SE->hasLoopInvariantBackedgeTakenCount(L))
12787 OS << "backedge-taken count is " << *SE->getBackedgeTakenCount(L) << "\n";
12788 else
12789 OS << "Unpredictable backedge-taken count.\n";
12790
12791 if (ExitingBlocks.size() > 1)
12792 for (BasicBlock *ExitingBlock : ExitingBlocks) {
12793 OS << " exit count for " << ExitingBlock->getName() << ": "
12794 << *SE->getExitCount(L, ExitingBlock) << "\n";
12795 }
12796
12797 OS << "Loop ";
12798 L->getHeader()->printAsOperand(OS, /*PrintType=*/false);
12799 OS << ": ";
12800
12801 if (!isa<SCEVCouldNotCompute>(SE->getConstantMaxBackedgeTakenCount(L))) {
12802 OS << "max backedge-taken count is " << *SE->getConstantMaxBackedgeTakenCount(L);
12803 if (SE->isBackedgeTakenCountMaxOrZero(L))
12804 OS << ", actual taken count either this or zero.";
12805 } else {
12806 OS << "Unpredictable max backedge-taken count. ";
12807 }
12808
12809 OS << "\n"
12810 "Loop ";
12811 L->getHeader()->printAsOperand(OS, /*PrintType=*/false);
12812 OS << ": ";
12813
12814 SCEVUnionPredicate Pred;
12815 auto PBT = SE->getPredicatedBackedgeTakenCount(L, Pred);
12816 if (!isa<SCEVCouldNotCompute>(PBT)) {
12817 OS << "Predicated backedge-taken count is " << *PBT << "\n";
12818 OS << " Predicates:\n";
12819 Pred.print(OS, 4);
12820 } else {
12821 OS << "Unpredictable predicated backedge-taken count. ";
12822 }
12823 OS << "\n";
12824
12825 if (SE->hasLoopInvariantBackedgeTakenCount(L)) {
12826 OS << "Loop ";
12827 L->getHeader()->printAsOperand(OS, /*PrintType=*/false);
12828 OS << ": ";
12829 OS << "Trip multiple is " << SE->getSmallConstantTripMultiple(L) << "\n";
12830 }
12831}
12832
12833static StringRef loopDispositionToStr(ScalarEvolution::LoopDisposition LD) {
12834 switch (LD) {
12835 case ScalarEvolution::LoopVariant:
12836 return "Variant";
12837 case ScalarEvolution::LoopInvariant:
12838 return "Invariant";
12839 case ScalarEvolution::LoopComputable:
12840 return "Computable";
12841 }
12842 llvm_unreachable("Unknown ScalarEvolution::LoopDisposition kind!")__builtin_unreachable();
12843}
12844
12845void ScalarEvolution::print(raw_ostream &OS) const {
12846 // ScalarEvolution's implementation of the print method is to print
12847 // out SCEV values of all instructions that are interesting. Doing
12848 // this potentially causes it to create new SCEV objects though,
12849 // which technically conflicts with the const qualifier. This isn't
12850 // observable from outside the class though, so casting away the
12851 // const isn't dangerous.
12852 ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this);
12853
12854 if (ClassifyExpressions) {
12855 OS << "Classifying expressions for: ";
12856 F.printAsOperand(OS, /*PrintType=*/false);
12857 OS << "\n";
12858 for (Instruction &I : instructions(F))
12859 if (isSCEVable(I.getType()) && !isa<CmpInst>(I)) {
12860 OS << I << '\n';
12861 OS << " --> ";
12862 const SCEV *SV = SE.getSCEV(&I);
12863 SV->print(OS);
12864 if (!isa<SCEVCouldNotCompute>(SV)) {
12865 OS << " U: ";
12866 SE.getUnsignedRange(SV).print(OS);
12867 OS << " S: ";
12868 SE.getSignedRange(SV).print(OS);
12869 }
12870
12871 const Loop *L = LI.getLoopFor(I.getParent());
12872
12873 const SCEV *AtUse = SE.getSCEVAtScope(SV, L);
12874 if (AtUse != SV) {
12875 OS << " --> ";
12876 AtUse->print(OS);
12877 if (!isa<SCEVCouldNotCompute>(AtUse)) {
12878 OS << " U: ";
12879 SE.getUnsignedRange(AtUse).print(OS);
12880 OS << " S: ";
12881 SE.getSignedRange(AtUse).print(OS);
12882 }
12883 }
12884
12885 if (L) {
12886 OS << "\t\t" "Exits: ";
12887 const SCEV *ExitValue = SE.getSCEVAtScope(SV, L->getParentLoop());
12888 if (!SE.isLoopInvariant(ExitValue, L)) {
12889 OS << "<<Unknown>>";
12890 } else {
12891 OS << *ExitValue;
12892 }
12893
12894 bool First = true;
12895 for (auto *Iter = L; Iter; Iter = Iter->getParentLoop()) {
12896 if (First) {
12897 OS << "\t\t" "LoopDispositions: { ";
12898 First = false;
12899 } else {
12900 OS << ", ";
12901 }
12902
12903 Iter->getHeader()->printAsOperand(OS, /*PrintType=*/false);
12904 OS << ": " << loopDispositionToStr(SE.getLoopDisposition(SV, Iter));
12905 }
12906
12907 for (auto *InnerL : depth_first(L)) {
12908 if (InnerL == L)
12909 continue;
12910 if (First) {
12911 OS << "\t\t" "LoopDispositions: { ";
12912 First = false;
12913 } else {
12914 OS << ", ";
12915 }
12916
12917 InnerL->getHeader()->printAsOperand(OS, /*PrintType=*/false);
12918 OS << ": " << loopDispositionToStr(SE.getLoopDisposition(SV, InnerL));
12919 }
12920
12921 OS << " }";
12922 }
12923
12924 OS << "\n";
12925 }
12926 }
12927
12928 OS << "Determining loop execution counts for: ";
12929 F.printAsOperand(OS, /*PrintType=*/false);
12930 OS << "\n";
12931 for (Loop *I : LI)
12932 PrintLoopInfo(OS, &SE, I);
12933}
12934
12935ScalarEvolution::LoopDisposition
12936ScalarEvolution::getLoopDisposition(const SCEV *S, const Loop *L) {
12937 auto &Values = LoopDispositions[S];
12938 for (auto &V : Values) {
12939 if (V.getPointer() == L)
12940 return V.getInt();
12941 }
12942 Values.emplace_back(L, LoopVariant);
12943 LoopDisposition D = computeLoopDisposition(S, L);
12944 auto &Values2 = LoopDispositions[S];
12945 for (auto &V : make_range(Values2.rbegin(), Values2.rend())) {
12946 if (V.getPointer() == L) {
12947 V.setInt(D);
12948 break;
12949 }
12950 }
12951 return D;
12952}
12953
12954ScalarEvolution::LoopDisposition
12955ScalarEvolution::computeLoopDisposition(const SCEV *S, const Loop *L) {
12956 switch (S->getSCEVType()) {
12957 case scConstant:
12958 return LoopInvariant;
12959 case scPtrToInt:
12960 case scTruncate:
12961 case scZeroExtend:
12962 case scSignExtend:
12963 return getLoopDisposition(cast<SCEVCastExpr>(S)->getOperand(), L);
12964 case scAddRecExpr: {
12965 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S);
12966
12967 // If L is the addrec's loop, it's computable.
12968 if (AR->getLoop() == L)
12969 return LoopComputable;
12970
12971 // Add recurrences are never invariant in the function-body (null loop).
12972 if (!L)
12973 return LoopVariant;
12974
12975 // Everything that is not defined at loop entry is variant.
12976 if (DT.dominates(L->getHeader(), AR->getLoop()->getHeader()))
12977 return LoopVariant;
12978 assert(!L->contains(AR->getLoop()) && "Containing loop's header does not"(static_cast<void> (0))
12979 " dominate the contained loop's header?")(static_cast<void> (0));
12980
12981 // This recurrence is invariant w.r.t. L if AR's loop contains L.
12982 if (AR->getLoop()->contains(L))
12983 return LoopInvariant;
12984
12985 // This recurrence is variant w.r.t. L if any of its operands
12986 // are variant.
12987 for (auto *Op : AR->operands())
12988 if (!isLoopInvariant(Op, L))
12989 return LoopVariant;
12990
12991 // Otherwise it's loop-invariant.
12992 return LoopInvariant;
12993 }
12994 case scAddExpr:
12995 case scMulExpr:
12996 case scUMaxExpr:
12997 case scSMaxExpr:
12998 case scUMinExpr:
12999 case scSMinExpr: {
13000 bool HasVarying = false;
13001 for (auto *Op : cast<SCEVNAryExpr>(S)->operands()) {
13002 LoopDisposition D = getLoopDisposition(Op, L);
13003 if (D == LoopVariant)
13004 return LoopVariant;
13005 if (D == LoopComputable)
13006 HasVarying = true;
13007 }
13008 return HasVarying ? LoopComputable : LoopInvariant;
13009 }
13010 case scUDivExpr: {
13011 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S);
13012 LoopDisposition LD = getLoopDisposition(UDiv->getLHS(), L);
13013 if (LD == LoopVariant)
13014 return LoopVariant;
13015 LoopDisposition RD = getLoopDisposition(UDiv->getRHS(), L);
13016 if (RD == LoopVariant)
13017 return LoopVariant;
13018 return (LD == LoopInvariant && RD == LoopInvariant) ?
13019 LoopInvariant : LoopComputable;
13020 }
13021 case scUnknown:
13022 // All non-instruction values are loop invariant. All instructions are loop
13023 // invariant if they are not contained in the specified loop.
13024 // Instructions are never considered invariant in the function body
13025 // (null loop) because they are defined within the "loop".
13026 if (auto *I = dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue()))
13027 return (L && !L->contains(I)) ? LoopInvariant : LoopVariant;
13028 return LoopInvariant;
13029 case scCouldNotCompute:
13030 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!")__builtin_unreachable();
13031 }
13032 llvm_unreachable("Unknown SCEV kind!")__builtin_unreachable();
13033}
13034
13035bool ScalarEvolution::isLoopInvariant(const SCEV *S, const Loop *L) {
13036 return getLoopDisposition(S, L) == LoopInvariant;
13037}
13038
13039bool ScalarEvolution::hasComputableLoopEvolution(const SCEV *S, const Loop *L) {
13040 return getLoopDisposition(S, L) == LoopComputable;
13041}
13042
13043ScalarEvolution::BlockDisposition
13044ScalarEvolution::getBlockDisposition(const SCEV *S, const BasicBlock *BB) {
13045 auto &Values = BlockDispositions[S];
13046 for (auto &V : Values) {
13047 if (V.getPointer() == BB)
13048 return V.getInt();
13049 }
13050 Values.emplace_back(BB, DoesNotDominateBlock);
13051 BlockDisposition D = computeBlockDisposition(S, BB);
13052 auto &Values2 = BlockDispositions[S];
13053 for (auto &V : make_range(Values2.rbegin(), Values2.rend())) {
13054 if (V.getPointer() == BB) {
13055 V.setInt(D);
13056 break;
13057 }
13058 }
13059 return D;
13060}
13061
13062ScalarEvolution::BlockDisposition
13063ScalarEvolution::computeBlockDisposition(const SCEV *S, const BasicBlock *BB) {
13064 switch (S->getSCEVType()) {
13065 case scConstant:
13066 return ProperlyDominatesBlock;
13067 case scPtrToInt:
13068 case scTruncate:
13069 case scZeroExtend:
13070 case scSignExtend:
13071 return getBlockDisposition(cast<SCEVCastExpr>(S)->getOperand(), BB);
13072 case scAddRecExpr: {
13073 // This uses a "dominates" query instead of "properly dominates" query
13074 // to test for proper dominance too, because the instruction which
13075 // produces the addrec's value is a PHI, and a PHI effectively properly
13076 // dominates its entire containing block.
13077 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S);
13078 if (!DT.dominates(AR->getLoop()->getHeader(), BB))
13079 return DoesNotDominateBlock;
13080
13081 // Fall through into SCEVNAryExpr handling.
13082 LLVM_FALLTHROUGH[[gnu::fallthrough]];
13083 }
13084 case scAddExpr:
13085 case scMulExpr:
13086 case scUMaxExpr:
13087 case scSMaxExpr:
13088 case scUMinExpr:
13089 case scSMinExpr: {
13090 const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(S);
13091 bool Proper = true;
13092 for (const SCEV *NAryOp : NAry->operands()) {
13093 BlockDisposition D = getBlockDisposition(NAryOp, BB);
13094 if (D == DoesNotDominateBlock)
13095 return DoesNotDominateBlock;
13096 if (D == DominatesBlock)
13097 Proper = false;
13098 }
13099 return Proper ? ProperlyDominatesBlock : DominatesBlock;
13100 }
13101 case scUDivExpr: {
13102 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S);
13103 const SCEV *LHS = UDiv->getLHS(), *RHS = UDiv->getRHS();
13104 BlockDisposition LD = getBlockDisposition(LHS, BB);
13105 if (LD == DoesNotDominateBlock)
13106 return DoesNotDominateBlock;
13107 BlockDisposition RD = getBlockDisposition(RHS, BB);
13108 if (RD == DoesNotDominateBlock)
13109 return DoesNotDominateBlock;
13110 return (LD == ProperlyDominatesBlock && RD == ProperlyDominatesBlock) ?
13111 ProperlyDominatesBlock : DominatesBlock;
13112 }
13113 case scUnknown:
13114 if (Instruction *I =
13115 dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue())) {
13116 if (I->getParent() == BB)
13117 return DominatesBlock;
13118 if (DT.properlyDominates(I->getParent(), BB))
13119 return ProperlyDominatesBlock;
13120 return DoesNotDominateBlock;
13121 }
13122 return ProperlyDominatesBlock;
13123 case scCouldNotCompute:
13124 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!")__builtin_unreachable();
13125 }
13126 llvm_unreachable("Unknown SCEV kind!")__builtin_unreachable();
13127}
13128
13129bool ScalarEvolution::dominates(const SCEV *S, const BasicBlock *BB) {
13130 return getBlockDisposition(S, BB) >= DominatesBlock;
13131}
13132
13133bool ScalarEvolution::properlyDominates(const SCEV *S, const BasicBlock *BB) {
13134 return getBlockDisposition(S, BB) == ProperlyDominatesBlock;
13135}
13136
13137bool ScalarEvolution::hasOperand(const SCEV *S, const SCEV *Op) const {
13138 return SCEVExprContains(S, [&](const SCEV *Expr) { return Expr == Op; });
13139}
13140
13141void
13142ScalarEvolution::forgetMemoizedResults(const SCEV *S) {
13143 ValuesAtScopes.erase(S);
13144 LoopDispositions.erase(S);
13145 BlockDispositions.erase(S);
13146 UnsignedRanges.erase(S);
13147 SignedRanges.erase(S);
13148 ExprValueMap.erase(S);
13149 HasRecMap.erase(S);
13150 MinTrailingZerosCache.erase(S);
13151
13152 for (auto I = PredicatedSCEVRewrites.begin();
13153 I != PredicatedSCEVRewrites.end();) {
13154 std::pair<const SCEV *, const Loop *> Entry = I->first;
13155 if (Entry.first == S)
13156 PredicatedSCEVRewrites.erase(I++);
13157 else
13158 ++I;
13159 }
13160
13161 auto RemoveSCEVFromBackedgeMap =
13162 [S](DenseMap<const Loop *, BackedgeTakenInfo> &Map) {
13163 for (auto I = Map.begin(), E = Map.end(); I != E;) {
13164 BackedgeTakenInfo &BEInfo = I->second;
13165 if (BEInfo.hasOperand(S))
13166 Map.erase(I++);
13167 else
13168 ++I;
13169 }
13170 };
13171
13172 RemoveSCEVFromBackedgeMap(BackedgeTakenCounts);
13173 RemoveSCEVFromBackedgeMap(PredicatedBackedgeTakenCounts);
13174}
13175
13176void
13177ScalarEvolution::getUsedLoops(const SCEV *S,
13178 SmallPtrSetImpl<const Loop *> &LoopsUsed) {
13179 struct FindUsedLoops {
13180 FindUsedLoops(SmallPtrSetImpl<const Loop *> &LoopsUsed)
13181 : LoopsUsed(LoopsUsed) {}
13182 SmallPtrSetImpl<const Loop *> &LoopsUsed;
13183 bool follow(const SCEV *S) {
13184 if (auto *AR = dyn_cast<SCEVAddRecExpr>(S))
13185 LoopsUsed.insert(AR->getLoop());
13186 return true;
13187 }
13188
13189 bool isDone() const { return false; }
13190 };
13191
13192 FindUsedLoops F(LoopsUsed);
13193 SCEVTraversal<FindUsedLoops>(F).visitAll(S);
13194}
13195
13196void ScalarEvolution::addToLoopUseLists(const SCEV *S) {
13197 SmallPtrSet<const Loop *, 8> LoopsUsed;
13198 getUsedLoops(S, LoopsUsed);
13199 for (auto *L : LoopsUsed)
13200 LoopUsers[L].push_back(S);
13201}
13202
13203void ScalarEvolution::verify() const {
13204 ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this);
13205 ScalarEvolution SE2(F, TLI, AC, DT, LI);
13206
13207 SmallVector<Loop *, 8> LoopStack(LI.begin(), LI.end());
13208
13209 // Map's SCEV expressions from one ScalarEvolution "universe" to another.
13210 struct SCEVMapper : public SCEVRewriteVisitor<SCEVMapper> {
13211 SCEVMapper(ScalarEvolution &SE) : SCEVRewriteVisitor<SCEVMapper>(SE) {}
13212
13213 const SCEV *visitConstant(const SCEVConstant *Constant) {
13214 return SE.getConstant(Constant->getAPInt());
13215 }
13216
13217 const SCEV *visitUnknown(const SCEVUnknown *Expr) {
13218 return SE.getUnknown(Expr->getValue());
13219 }
13220
13221 const SCEV *visitCouldNotCompute(const SCEVCouldNotCompute *Expr) {
13222 return SE.getCouldNotCompute();
13223 }
13224 };
13225
13226 SCEVMapper SCM(SE2);
13227
13228 while (!LoopStack.empty()) {
13229 auto *L = LoopStack.pop_back_val();
13230 llvm::append_range(LoopStack, *L);
13231
13232 auto *CurBECount = SCM.visit(
13233 const_cast<ScalarEvolution *>(this)->getBackedgeTakenCount(L));
13234 auto *NewBECount = SE2.getBackedgeTakenCount(L);
13235
13236 if (CurBECount == SE2.getCouldNotCompute() ||
13237 NewBECount == SE2.getCouldNotCompute()) {
13238 // NB! This situation is legal, but is very suspicious -- whatever pass
13239 // change the loop to make a trip count go from could not compute to
13240 // computable or vice-versa *should have* invalidated SCEV. However, we
13241 // choose not to assert here (for now) since we don't want false
13242 // positives.
13243 continue;
13244 }
13245
13246 if (containsUndefs(CurBECount) || containsUndefs(NewBECount)) {
13247 // SCEV treats "undef" as an unknown but consistent value (i.e. it does
13248 // not propagate undef aggressively). This means we can (and do) fail
13249 // verification in cases where a transform makes the trip count of a loop
13250 // go from "undef" to "undef+1" (say). The transform is fine, since in
13251 // both cases the loop iterates "undef" times, but SCEV thinks we
13252 // increased the trip count of the loop by 1 incorrectly.
13253 continue;
13254 }
13255
13256 if (SE.getTypeSizeInBits(CurBECount->getType()) >
13257 SE.getTypeSizeInBits(NewBECount->getType()))
13258 NewBECount = SE2.getZeroExtendExpr(NewBECount, CurBECount->getType());
13259 else if (SE.getTypeSizeInBits(CurBECount->getType()) <
13260 SE.getTypeSizeInBits(NewBECount->getType()))
13261 CurBECount = SE2.getZeroExtendExpr(CurBECount, NewBECount->getType());
13262
13263 const SCEV *Delta = SE2.getMinusSCEV(CurBECount, NewBECount);
13264
13265 // Unless VerifySCEVStrict is set, we only compare constant deltas.
13266 if ((VerifySCEVStrict || isa<SCEVConstant>(Delta)) && !Delta->isZero()) {
13267 dbgs() << "Trip Count for " << *L << " Changed!\n";
13268 dbgs() << "Old: " << *CurBECount << "\n";
13269 dbgs() << "New: " << *NewBECount << "\n";
13270 dbgs() << "Delta: " << *Delta << "\n";
13271 std::abort();
13272 }
13273 }
13274
13275 // Collect all valid loops currently in LoopInfo.
13276 SmallPtrSet<Loop *, 32> ValidLoops;
13277 SmallVector<Loop *, 32> Worklist(LI.begin(), LI.end());
13278 while (!Worklist.empty()) {
13279 Loop *L = Worklist.pop_back_val();
13280 if (ValidLoops.contains(L))
13281 continue;
13282 ValidLoops.insert(L);
13283 Worklist.append(L->begin(), L->end());
13284 }
13285 // Check for SCEV expressions referencing invalid/deleted loops.
13286 for (auto &KV : ValueExprMap) {
13287 auto *AR = dyn_cast<SCEVAddRecExpr>(KV.second);
13288 if (!AR)
13289 continue;
13290 assert(ValidLoops.contains(AR->getLoop()) &&(static_cast<void> (0))
13291 "AddRec references invalid loop")(static_cast<void> (0));
13292 }
13293}
13294
13295bool ScalarEvolution::invalidate(
13296 Function &F, const PreservedAnalyses &PA,
13297 FunctionAnalysisManager::Invalidator &Inv) {
13298 // Invalidate the ScalarEvolution object whenever it isn't preserved or one
13299 // of its dependencies is invalidated.
13300 auto PAC = PA.getChecker<ScalarEvolutionAnalysis>();
13301 return !(PAC.preserved() || PAC.preservedSet<AllAnalysesOn<Function>>()) ||
13302 Inv.invalidate<AssumptionAnalysis>(F, PA) ||
13303 Inv.invalidate<DominatorTreeAnalysis>(F, PA) ||
13304 Inv.invalidate<LoopAnalysis>(F, PA);
13305}
13306
13307AnalysisKey ScalarEvolutionAnalysis::Key;
13308
13309ScalarEvolution ScalarEvolutionAnalysis::run(Function &F,
13310 FunctionAnalysisManager &AM) {
13311 return ScalarEvolution(F, AM.getResult<TargetLibraryAnalysis>(F),
13312 AM.getResult<AssumptionAnalysis>(F),
13313 AM.getResult<DominatorTreeAnalysis>(F),
13314 AM.getResult<LoopAnalysis>(F));
13315}
13316
13317PreservedAnalyses
13318ScalarEvolutionVerifierPass::run(Function &F, FunctionAnalysisManager &AM) {
13319 AM.getResult<ScalarEvolutionAnalysis>(F).verify();
13320 return PreservedAnalyses::all();
13321}
13322
13323PreservedAnalyses
13324ScalarEvolutionPrinterPass::run(Function &F, FunctionAnalysisManager &AM) {
13325 // For compatibility with opt's -analyze feature under legacy pass manager
13326 // which was not ported to NPM. This keeps tests using
13327 // update_analyze_test_checks.py working.
13328 OS << "Printing analysis 'Scalar Evolution Analysis' for function '"
13329 << F.getName() << "':\n";
13330 AM.getResult<ScalarEvolutionAnalysis>(F).print(OS);
13331 return PreservedAnalyses::all();
13332}
13333
13334INITIALIZE_PASS_BEGIN(ScalarEvolutionWrapperPass, "scalar-evolution",static void *initializeScalarEvolutionWrapperPassPassOnce(PassRegistry
&Registry) {
13335 "Scalar Evolution Analysis", false, true)static void *initializeScalarEvolutionWrapperPassPassOnce(PassRegistry
&Registry) {
13336INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)initializeAssumptionCacheTrackerPass(Registry);
13337INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)initializeLoopInfoWrapperPassPass(Registry);
13338INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)initializeDominatorTreeWrapperPassPass(Registry);
13339INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)initializeTargetLibraryInfoWrapperPassPass(Registry);
13340INITIALIZE_PASS_END(ScalarEvolutionWrapperPass, "scalar-evolution",PassInfo *PI = new PassInfo( "Scalar Evolution Analysis", "scalar-evolution"
, &ScalarEvolutionWrapperPass::ID, PassInfo::NormalCtor_t
(callDefaultCtor<ScalarEvolutionWrapperPass>), false, true
); Registry.registerPass(*PI, true); return PI; } static llvm
::once_flag InitializeScalarEvolutionWrapperPassPassFlag; void
llvm::initializeScalarEvolutionWrapperPassPass(PassRegistry &
Registry) { llvm::call_once(InitializeScalarEvolutionWrapperPassPassFlag
, initializeScalarEvolutionWrapperPassPassOnce, std::ref(Registry
)); }
13341 "Scalar Evolution Analysis", false, true)PassInfo *PI = new PassInfo( "Scalar Evolution Analysis", "scalar-evolution"
, &ScalarEvolutionWrapperPass::ID, PassInfo::NormalCtor_t
(callDefaultCtor<ScalarEvolutionWrapperPass>), false, true
); Registry.registerPass(*PI, true); return PI; } static llvm
::once_flag InitializeScalarEvolutionWrapperPassPassFlag; void
llvm::initializeScalarEvolutionWrapperPassPass(PassRegistry &
Registry) { llvm::call_once(InitializeScalarEvolutionWrapperPassPassFlag
, initializeScalarEvolutionWrapperPassPassOnce, std::ref(Registry
)); }
13342
13343char ScalarEvolutionWrapperPass::ID = 0;
13344
13345ScalarEvolutionWrapperPass::ScalarEvolutionWrapperPass() : FunctionPass(ID) {
13346 initializeScalarEvolutionWrapperPassPass(*PassRegistry::getPassRegistry());
13347}
13348
13349bool ScalarEvolutionWrapperPass::runOnFunction(Function &F) {
13350 SE.reset(new ScalarEvolution(
13351 F, getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F),
13352 getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F),
13353 getAnalysis<DominatorTreeWrapperPass>().getDomTree(),
13354 getAnalysis<LoopInfoWrapperPass>().getLoopInfo()));
13355 return false;
13356}
13357
13358void ScalarEvolutionWrapperPass::releaseMemory() { SE.reset(); }
13359
13360void ScalarEvolutionWrapperPass::print(raw_ostream &OS, const Module *) const {
13361 SE->print(OS);
13362}
13363
13364void ScalarEvolutionWrapperPass::verifyAnalysis() const {
13365 if (!VerifySCEV)
13366 return;
13367
13368 SE->verify();
13369}
13370
13371void ScalarEvolutionWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
13372 AU.setPreservesAll();
13373 AU.addRequiredTransitive<AssumptionCacheTracker>();
13374 AU.addRequiredTransitive<LoopInfoWrapperPass>();
13375 AU.addRequiredTransitive<DominatorTreeWrapperPass>();
13376 AU.addRequiredTransitive<TargetLibraryInfoWrapperPass>();
13377}
13378
13379const SCEVPredicate *ScalarEvolution::getEqualPredicate(const SCEV *LHS,
13380 const SCEV *RHS) {
13381 FoldingSetNodeID ID;
13382 assert(LHS->getType() == RHS->getType() &&(static_cast<void> (0))
13383 "Type mismatch between LHS and RHS")(static_cast<void> (0));
13384 // Unique this node based on the arguments
13385 ID.AddInteger(SCEVPredicate::P_Equal);
13386 ID.AddPointer(LHS);
13387 ID.AddPointer(RHS);
13388 void *IP = nullptr;
13389 if (const auto *S = UniquePreds.FindNodeOrInsertPos(ID, IP))
13390 return S;
13391 SCEVEqualPredicate *Eq = new (SCEVAllocator)
13392 SCEVEqualPredicate(ID.Intern(SCEVAllocator), LHS, RHS);
13393 UniquePreds.InsertNode(Eq, IP);
13394 return Eq;
13395}
13396
13397const SCEVPredicate *ScalarEvolution::getWrapPredicate(
13398 const SCEVAddRecExpr *AR,
13399 SCEVWrapPredicate::IncrementWrapFlags AddedFlags) {
13400 FoldingSetNodeID ID;
13401 // Unique this node based on the arguments
13402 ID.AddInteger(SCEVPredicate::P_Wrap);
13403 ID.AddPointer(AR);
13404 ID.AddInteger(AddedFlags);
13405 void *IP = nullptr;
13406 if (const auto *S = UniquePreds.FindNodeOrInsertPos(ID, IP))
13407 return S;
13408 auto *OF = new (SCEVAllocator)
13409 SCEVWrapPredicate(ID.Intern(SCEVAllocator), AR, AddedFlags);
13410 UniquePreds.InsertNode(OF, IP);
13411 return OF;
13412}
13413
13414namespace {
13415
13416class SCEVPredicateRewriter : public SCEVRewriteVisitor<SCEVPredicateRewriter> {
13417public:
13418
13419 /// Rewrites \p S in the context of a loop L and the SCEV predication
13420 /// infrastructure.
13421 ///
13422 /// If \p Pred is non-null, the SCEV expression is rewritten to respect the
13423 /// equivalences present in \p Pred.
13424 ///
13425 /// If \p NewPreds is non-null, rewrite is free to add further predicates to
13426 /// \p NewPreds such that the result will be an AddRecExpr.
13427 static const SCEV *rewrite(const SCEV *S, const Loop *L, ScalarEvolution &SE,
13428 SmallPtrSetImpl<const SCEVPredicate *> *NewPreds,
13429 SCEVUnionPredicate *Pred) {
13430 SCEVPredicateRewriter Rewriter(L, SE, NewPreds, Pred);
13431 return Rewriter.visit(S);
13432 }
13433
13434 const SCEV *visitUnknown(const SCEVUnknown *Expr) {
13435 if (Pred) {
13436 auto ExprPreds = Pred->getPredicatesForExpr(Expr);
13437 for (auto *Pred : ExprPreds)
13438 if (const auto *IPred = dyn_cast<SCEVEqualPredicate>(Pred))
13439 if (IPred->getLHS() == Expr)
13440 return IPred->getRHS();
13441 }
13442 return convertToAddRecWithPreds(Expr);
13443 }
13444
13445 const SCEV *visitZeroExtendExpr(const SCEVZeroExtendExpr *Expr) {
13446 const SCEV *Operand = visit(Expr->getOperand());
13447 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Operand);
13448 if (AR && AR->getLoop() == L && AR->isAffine()) {
13449 // This couldn't be folded because the operand didn't have the nuw
13450 // flag. Add the nusw flag as an assumption that we could make.
13451 const SCEV *Step = AR->getStepRecurrence(SE);
13452 Type *Ty = Expr->getType();
13453 if (addOverflowAssumption(AR, SCEVWrapPredicate::IncrementNUSW))
13454 return SE.getAddRecExpr(SE.getZeroExtendExpr(AR->getStart(), Ty),
13455 SE.getSignExtendExpr(Step, Ty), L,
13456 AR->getNoWrapFlags());
13457 }
13458 return SE.getZeroExtendExpr(Operand, Expr->getType());
13459 }
13460
13461 const SCEV *visitSignExtendExpr(const SCEVSignExtendExpr *Expr) {
13462 const SCEV *Operand = visit(Expr->getOperand());
13463 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Operand);
13464 if (AR && AR->getLoop() == L && AR->isAffine()) {
13465 // This couldn't be folded because the operand didn't have the nsw
13466 // flag. Add the nssw flag as an assumption that we could make.
13467 const SCEV *Step = AR->getStepRecurrence(SE);
13468 Type *Ty = Expr->getType();
13469 if (addOverflowAssumption(AR, SCEVWrapPredicate::IncrementNSSW))
13470 return SE.getAddRecExpr(SE.getSignExtendExpr(AR->getStart(), Ty),
13471 SE.getSignExtendExpr(Step, Ty), L,
13472 AR->getNoWrapFlags());
13473 }
13474 return SE.getSignExtendExpr(Operand, Expr->getType());
13475 }
13476
13477private:
13478 explicit SCEVPredicateRewriter(const Loop *L, ScalarEvolution &SE,
13479 SmallPtrSetImpl<const SCEVPredicate *> *NewPreds,
13480 SCEVUnionPredicate *Pred)
13481 : SCEVRewriteVisitor(SE), NewPreds(NewPreds), Pred(Pred), L(L) {}
13482
13483 bool addOverflowAssumption(const SCEVPredicate *P) {
13484 if (!NewPreds) {
13485 // Check if we've already made this assumption.
13486 return Pred && Pred->implies(P);
13487 }
13488 NewPreds->insert(P);
13489 return true;
13490 }
13491
13492 bool addOverflowAssumption(const SCEVAddRecExpr *AR,
13493 SCEVWrapPredicate::IncrementWrapFlags AddedFlags) {
13494 auto *A = SE.getWrapPredicate(AR, AddedFlags);
13495 return addOverflowAssumption(A);
13496 }
13497
13498 // If \p Expr represents a PHINode, we try to see if it can be represented
13499 // as an AddRec, possibly under a predicate (PHISCEVPred). If it is possible
13500 // to add this predicate as a runtime overflow check, we return the AddRec.
13501 // If \p Expr does not meet these conditions (is not a PHI node, or we
13502 // couldn't create an AddRec for it, or couldn't add the predicate), we just
13503 // return \p Expr.
13504 const SCEV *convertToAddRecWithPreds(const SCEVUnknown *Expr) {
13505 if (!isa<PHINode>(Expr->getValue()))
13506 return Expr;
13507 Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>>
13508 PredicatedRewrite = SE.createAddRecFromPHIWithCasts(Expr);
13509 if (!PredicatedRewrite)
13510 return Expr;
13511 for (auto *P : PredicatedRewrite->second){
13512 // Wrap predicates from outer loops are not supported.
13513 if (auto *WP = dyn_cast<const SCEVWrapPredicate>(P)) {
13514 auto *AR = cast<const SCEVAddRecExpr>(WP->getExpr());
13515 if (L != AR->getLoop())
13516 return Expr;
13517 }
13518 if (!addOverflowAssumption(P))
13519 return Expr;
13520 }
13521 return PredicatedRewrite->first;
13522 }
13523
13524 SmallPtrSetImpl<const SCEVPredicate *> *NewPreds;
13525 SCEVUnionPredicate *Pred;
13526 const Loop *L;
13527};
13528
13529} // end anonymous namespace
13530
13531const SCEV *ScalarEvolution::rewriteUsingPredicate(const SCEV *S, const Loop *L,
13532 SCEVUnionPredicate &Preds) {
13533 return SCEVPredicateRewriter::rewrite(S, L, *this, nullptr, &Preds);
13534}
13535
13536const SCEVAddRecExpr *ScalarEvolution::convertSCEVToAddRecWithPredicates(
13537 const SCEV *S, const Loop *L,
13538 SmallPtrSetImpl<const SCEVPredicate *> &Preds) {
13539 SmallPtrSet<const SCEVPredicate *, 4> TransformPreds;
13540 S = SCEVPredicateRewriter::rewrite(S, L, *this, &TransformPreds, nullptr);
13541 auto *AddRec = dyn_cast<SCEVAddRecExpr>(S);
13542
13543 if (!AddRec)
13544 return nullptr;
13545
13546 // Since the transformation was successful, we can now transfer the SCEV
13547 // predicates.
13548 for (auto *P : TransformPreds)
13549 Preds.insert(P);
13550
13551 return AddRec;
13552}
13553
13554/// SCEV predicates
13555SCEVPredicate::SCEVPredicate(const FoldingSetNodeIDRef ID,
13556 SCEVPredicateKind Kind)
13557 : FastID(ID), Kind(Kind) {}
13558
13559SCEVEqualPredicate::SCEVEqualPredicate(const FoldingSetNodeIDRef ID,
13560 const SCEV *LHS, const SCEV *RHS)
13561 : SCEVPredicate(ID, P_Equal), LHS(LHS), RHS(RHS) {
13562 assert(LHS->getType() == RHS->getType() && "LHS and RHS types don't match")(static_cast<void> (0));
13563 assert(LHS != RHS && "LHS and RHS are the same SCEV")(static_cast<void> (0));
13564}
13565
13566bool SCEVEqualPredicate::implies(const SCEVPredicate *N) const {
13567 const auto *Op = dyn_cast<SCEVEqualPredicate>(N);
13568
13569 if (!Op)
13570 return false;
13571
13572 return Op->LHS == LHS && Op->RHS == RHS;
13573}
13574
13575bool SCEVEqualPredicate::isAlwaysTrue() const { return false; }
13576
13577const SCEV *SCEVEqualPredicate::getExpr() const { return LHS; }
13578
13579void SCEVEqualPredicate::print(raw_ostream &OS, unsigned Depth) const {
13580 OS.indent(Depth) << "Equal predicate: " << *LHS << " == " << *RHS << "\n";
13581}
13582
13583SCEVWrapPredicate::SCEVWrapPredicate(const FoldingSetNodeIDRef ID,
13584 const SCEVAddRecExpr *AR,
13585 IncrementWrapFlags Flags)
13586 : SCEVPredicate(ID, P_Wrap), AR(AR), Flags(Flags) {}
13587
13588const SCEV *SCEVWrapPredicate::getExpr() const { return AR; }
13589
13590bool SCEVWrapPredicate::implies(const SCEVPredicate *N) const {
13591 const auto *Op = dyn_cast<SCEVWrapPredicate>(N);
13592
13593 return Op && Op->AR == AR && setFlags(Flags, Op->Flags) == Flags;
13594}
13595
13596bool SCEVWrapPredicate::isAlwaysTrue() const {
13597 SCEV::NoWrapFlags ScevFlags = AR->getNoWrapFlags();
13598 IncrementWrapFlags IFlags = Flags;
13599
13600 if (ScalarEvolution::setFlags(ScevFlags, SCEV::FlagNSW) == ScevFlags)
13601 IFlags = clearFlags(IFlags, IncrementNSSW);
13602
13603 return IFlags == IncrementAnyWrap;
13604}
13605
13606void SCEVWrapPredicate::print(raw_ostream &OS, unsigned Depth) const {
13607 OS.indent(Depth) << *getExpr() << " Added Flags: ";
13608 if (SCEVWrapPredicate::IncrementNUSW & getFlags())
13609 OS << "<nusw>";
13610 if (SCEVWrapPredicate::IncrementNSSW & getFlags())
13611 OS << "<nssw>";
13612 OS << "\n";
13613}
13614
13615SCEVWrapPredicate::IncrementWrapFlags
13616SCEVWrapPredicate::getImpliedFlags(const SCEVAddRecExpr *AR,
13617 ScalarEvolution &SE) {
13618 IncrementWrapFlags ImpliedFlags = IncrementAnyWrap;
13619 SCEV::NoWrapFlags StaticFlags = AR->getNoWrapFlags();
13620
13621 // We can safely transfer the NSW flag as NSSW.
13622 if (ScalarEvolution::setFlags(StaticFlags, SCEV::FlagNSW) == StaticFlags)
13623 ImpliedFlags = IncrementNSSW;
13624
13625 if (ScalarEvolution::setFlags(StaticFlags, SCEV::FlagNUW) == StaticFlags) {
13626 // If the increment is positive, the SCEV NUW flag will also imply the
13627 // WrapPredicate NUSW flag.
13628 if (const auto *Step = dyn_cast<SCEVConstant>(AR->getStepRecurrence(SE)))
13629 if (Step->getValue()->getValue().isNonNegative())
13630 ImpliedFlags = setFlags(ImpliedFlags, IncrementNUSW);
13631 }
13632
13633 return ImpliedFlags;
13634}
13635
13636/// Union predicates don't get cached so create a dummy set ID for it.
13637SCEVUnionPredicate::SCEVUnionPredicate()
13638 : SCEVPredicate(FoldingSetNodeIDRef(nullptr, 0), P_Union) {}
13639
13640bool SCEVUnionPredicate::isAlwaysTrue() const {
13641 return all_of(Preds,
13642 [](const SCEVPredicate *I) { return I->isAlwaysTrue(); });
13643}
13644
13645ArrayRef<const SCEVPredicate *>
13646SCEVUnionPredicate::getPredicatesForExpr(const SCEV *Expr) {
13647 auto I = SCEVToPreds.find(Expr);
13648 if (I == SCEVToPreds.end())
13649 return ArrayRef<const SCEVPredicate *>();
13650 return I->second;
13651}
13652
13653bool SCEVUnionPredicate::implies(const SCEVPredicate *N) const {
13654 if (const auto *Set = dyn_cast<SCEVUnionPredicate>(N))
13655 return all_of(Set->Preds,
13656 [this](const SCEVPredicate *I) { return this->implies(I); });
13657
13658 auto ScevPredsIt = SCEVToPreds.find(N->getExpr());
13659 if (ScevPredsIt == SCEVToPreds.end())
13660 return false;
13661 auto &SCEVPreds = ScevPredsIt->second;
13662
13663 return any_of(SCEVPreds,
13664 [N](const SCEVPredicate *I) { return I->implies(N); });
13665}
13666
13667const SCEV *SCEVUnionPredicate::getExpr() const { return nullptr; }
13668
13669void SCEVUnionPredicate::print(raw_ostream &OS, unsigned Depth) const {
13670 for (auto Pred : Preds)
13671 Pred->print(OS, Depth);
13672}
13673
13674void SCEVUnionPredicate::add(const SCEVPredicate *N) {
13675 if (const auto *Set = dyn_cast<SCEVUnionPredicate>(N)) {
13676 for (auto Pred : Set->Preds)
13677 add(Pred);
13678 return;
13679 }
13680
13681 if (implies(N))
13682 return;
13683
13684 const SCEV *Key = N->getExpr();
13685 assert(Key && "Only SCEVUnionPredicate doesn't have an "(static_cast<void> (0))
13686 " associated expression!")(static_cast<void> (0));
13687
13688 SCEVToPreds[Key].push_back(N);
13689 Preds.push_back(N);
13690}
13691
13692PredicatedScalarEvolution::PredicatedScalarEvolution(ScalarEvolution &SE,
13693 Loop &L)
13694 : SE(SE), L(L) {}
13695
13696const SCEV *PredicatedScalarEvolution::getSCEV(Value *V) {
13697 const SCEV *Expr = SE.getSCEV(V);
13698 RewriteEntry &Entry = RewriteMap[Expr];
13699
13700 // If we already have an entry and the version matches, return it.
13701 if (Entry.second && Generation == Entry.first)
13702 return Entry.second;
13703
13704 // We found an entry but it's stale. Rewrite the stale entry
13705 // according to the current predicate.
13706 if (Entry.second)
13707 Expr = Entry.second;
13708
13709 const SCEV *NewSCEV = SE.rewriteUsingPredicate(Expr, &L, Preds);
13710 Entry = {Generation, NewSCEV};
13711
13712 return NewSCEV;
13713}
13714
13715const SCEV *PredicatedScalarEvolution::getBackedgeTakenCount() {
13716 if (!BackedgeCount) {
13717 SCEVUnionPredicate BackedgePred;
13718 BackedgeCount = SE.getPredicatedBackedgeTakenCount(&L, BackedgePred);
13719 addPredicate(BackedgePred);
13720 }
13721 return BackedgeCount;
13722}
13723
13724void PredicatedScalarEvolution::addPredicate(const SCEVPredicate &Pred) {
13725 if (Preds.implies(&Pred))
13726 return;
13727 Preds.add(&Pred);
13728 updateGeneration();
13729}
13730
13731const SCEVUnionPredicate &PredicatedScalarEvolution::getUnionPredicate() const {
13732 return Preds;
13733}
13734
13735void PredicatedScalarEvolution::updateGeneration() {
13736 // If the generation number wrapped recompute everything.
13737 if (++Generation == 0) {
13738 for (auto &II : RewriteMap) {
13739 const SCEV *Rewritten = II.second.second;
13740 II.second = {Generation, SE.rewriteUsingPredicate(Rewritten, &L, Preds)};
13741 }
13742 }
13743}
13744
13745void PredicatedScalarEvolution::setNoOverflow(
13746 Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags) {
13747 const SCEV *Expr = getSCEV(V);
13748 const auto *AR = cast<SCEVAddRecExpr>(Expr);
13749
13750 auto ImpliedFlags = SCEVWrapPredicate::getImpliedFlags(AR, SE);
13751
13752 // Clear the statically implied flags.
13753 Flags = SCEVWrapPredicate::clearFlags(Flags, ImpliedFlags);
13754 addPredicate(*SE.getWrapPredicate(AR, Flags));
13755
13756 auto II = FlagsMap.insert({V, Flags});
13757 if (!II.second)
13758 II.first->second = SCEVWrapPredicate::setFlags(Flags, II.first->second);
13759}
13760
13761bool PredicatedScalarEvolution::hasNoOverflow(
13762 Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags) {
13763 const SCEV *Expr = getSCEV(V);
13764 const auto *AR = cast<SCEVAddRecExpr>(Expr);
13765
13766 Flags = SCEVWrapPredicate::clearFlags(
13767 Flags, SCEVWrapPredicate::getImpliedFlags(AR, SE));
13768
13769 auto II = FlagsMap.find(V);
13770
13771 if (II != FlagsMap.end())
13772 Flags = SCEVWrapPredicate::clearFlags(Flags, II->second);
13773
13774 return Flags == SCEVWrapPredicate::IncrementAnyWrap;
13775}
13776
13777const SCEVAddRecExpr *PredicatedScalarEvolution::getAsAddRec(Value *V) {
13778 const SCEV *Expr = this->getSCEV(V);
13779 SmallPtrSet<const SCEVPredicate *, 4> NewPreds;
13780 auto *New = SE.convertSCEVToAddRecWithPredicates(Expr, &L, NewPreds);
13781
13782 if (!New)
13783 return nullptr;
13784
13785 for (auto *P : NewPreds)
13786 Preds.add(P);
13787
13788 updateGeneration();
13789 RewriteMap[SE.getSCEV(V)] = {Generation, New};
13790 return New;
13791}
13792
13793PredicatedScalarEvolution::PredicatedScalarEvolution(
13794 const PredicatedScalarEvolution &Init)
13795 : RewriteMap(Init.RewriteMap), SE(Init.SE), L(Init.L), Preds(Init.Preds),
13796 Generation(Init.Generation), BackedgeCount(Init.BackedgeCount) {
13797 for (auto I : Init.FlagsMap)
13798 FlagsMap.insert(I);
13799}
13800
13801void PredicatedScalarEvolution::print(raw_ostream &OS, unsigned Depth) const {
13802 // For each block.
13803 for (auto *BB : L.getBlocks())
13804 for (auto &I : *BB) {
13805 if (!SE.isSCEVable(I.getType()))
13806 continue;
13807
13808 auto *Expr = SE.getSCEV(&I);
13809 auto II = RewriteMap.find(Expr);
13810
13811 if (II == RewriteMap.end())
13812 continue;
13813
13814 // Don't print things that are not interesting.
13815 if (II->second.second == Expr)
13816 continue;
13817
13818 OS.indent(Depth) << "[PSE]" << I << ":\n";
13819 OS.indent(Depth + 2) << *Expr << "\n";
13820 OS.indent(Depth + 2) << "--> " << *II->second.second << "\n";
13821 }
13822}
13823
13824// Match the mathematical pattern A - (A / B) * B, where A and B can be
13825// arbitrary expressions. Also match zext (trunc A to iB) to iY, which is used
13826// for URem with constant power-of-2 second operands.
13827// It's not always easy, as A and B can be folded (imagine A is X / 2, and B is
13828// 4, A / B becomes X / 8).
13829bool ScalarEvolution::matchURem(const SCEV *Expr, const SCEV *&LHS,
13830 const SCEV *&RHS) {
13831 // Try to match 'zext (trunc A to iB) to iY', which is used
13832 // for URem with constant power-of-2 second operands. Make sure the size of
13833 // the operand A matches the size of the whole expressions.
13834 if (const auto *ZExt = dyn_cast<SCEVZeroExtendExpr>(Expr))
13835 if (const auto *Trunc = dyn_cast<SCEVTruncateExpr>(ZExt->getOperand(0))) {
13836 LHS = Trunc->getOperand();
13837 // Bail out if the type of the LHS is larger than the type of the
13838 // expression for now.
13839 if (getTypeSizeInBits(LHS->getType()) >
13840 getTypeSizeInBits(Expr->getType()))
13841 return false;
13842 if (LHS->getType() != Expr->getType())
13843 LHS = getZeroExtendExpr(LHS, Expr->getType());
13844 RHS = getConstant(APInt(getTypeSizeInBits(Expr->getType()), 1)
13845 << getTypeSizeInBits(Trunc->getType()));
13846 return true;
13847 }
13848 const auto *Add = dyn_cast<SCEVAddExpr>(Expr);
13849 if (Add == nullptr || Add->getNumOperands() != 2)
13850 return false;
13851
13852 const SCEV *A = Add->getOperand(1);
13853 const auto *Mul = dyn_cast<SCEVMulExpr>(Add->getOperand(0));
13854
13855 if (Mul == nullptr)
13856 return false;
13857
13858 const auto MatchURemWithDivisor = [&](const SCEV *B) {
13859 // (SomeExpr + (-(SomeExpr / B) * B)).
13860 if (Expr == getURemExpr(A, B)) {
13861 LHS = A;
13862 RHS = B;
13863 return true;
13864 }
13865 return false;
13866 };
13867
13868 // (SomeExpr + (-1 * (SomeExpr / B) * B)).
13869 if (Mul->getNumOperands() == 3 && isa<SCEVConstant>(Mul->getOperand(0)))
13870 return MatchURemWithDivisor(Mul->getOperand(1)) ||
13871 MatchURemWithDivisor(Mul->getOperand(2));
13872
13873 // (SomeExpr + ((-SomeExpr / B) * B)) or (SomeExpr + ((SomeExpr / B) * -B)).
13874 if (Mul->getNumOperands() == 2)
13875 return MatchURemWithDivisor(Mul->getOperand(1)) ||
13876 MatchURemWithDivisor(Mul->getOperand(0)) ||
13877 MatchURemWithDivisor(getNegativeSCEV(Mul->getOperand(1))) ||
13878 MatchURemWithDivisor(getNegativeSCEV(Mul->getOperand(0)));
13879 return false;
13880}
13881
13882const SCEV *
13883ScalarEvolution::computeSymbolicMaxBackedgeTakenCount(const Loop *L) {
13884 SmallVector<BasicBlock*, 16> ExitingBlocks;
13885 L->getExitingBlocks(ExitingBlocks);
13886
13887 // Form an expression for the maximum exit count possible for this loop. We
13888 // merge the max and exact information to approximate a version of
13889 // getConstantMaxBackedgeTakenCount which isn't restricted to just constants.
13890 SmallVector<const SCEV*, 4> ExitCounts;
13891 for (BasicBlock *ExitingBB : ExitingBlocks) {
13892 const SCEV *ExitCount = getExitCount(L, ExitingBB);
13893 if (isa<SCEVCouldNotCompute>(ExitCount))
13894 ExitCount = getExitCount(L, ExitingBB,
13895 ScalarEvolution::ConstantMaximum);
13896 if (!isa<SCEVCouldNotCompute>(ExitCount)) {
13897 assert(DT.dominates(ExitingBB, L->getLoopLatch()) &&(static_cast<void> (0))
13898 "We should only have known counts for exiting blocks that "(static_cast<void> (0))
13899 "dominate latch!")(static_cast<void> (0));
13900 ExitCounts.push_back(ExitCount);
13901 }
13902 }
13903 if (ExitCounts.empty())
13904 return getCouldNotCompute();
13905 return getUMinFromMismatchedTypes(ExitCounts);
13906}
13907
13908/// This rewriter is similar to SCEVParameterRewriter (it replaces SCEVUnknown
13909/// components following the Map (Value -> SCEV)), but skips AddRecExpr because
13910/// we cannot guarantee that the replacement is loop invariant in the loop of
13911/// the AddRec.
13912class SCEVLoopGuardRewriter : public SCEVRewriteVisitor<SCEVLoopGuardRewriter> {
13913 ValueToSCEVMapTy &Map;
13914
13915public:
13916 SCEVLoopGuardRewriter(ScalarEvolution &SE, ValueToSCEVMapTy &M)
13917 : SCEVRewriteVisitor(SE), Map(M) {}
13918
13919 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { return Expr; }
13920
13921 const SCEV *visitUnknown(const SCEVUnknown *Expr) {
13922 auto I = Map.find(Expr->getValue());
13923 if (I == Map.end())
13924 return Expr;
13925 return I->second;
13926 }
13927};
13928
13929const SCEV *ScalarEvolution::applyLoopGuards(const SCEV *Expr, const Loop *L) {
13930 auto CollectCondition = [&](ICmpInst::Predicate Predicate, const SCEV *LHS,
13931 const SCEV *RHS, ValueToSCEVMapTy &RewriteMap) {
13932 // If we have LHS == 0, check if LHS is computing a property of some unknown
13933 // SCEV %v which we can rewrite %v to express explicitly.
13934 const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS);
13935 if (Predicate == CmpInst::ICMP_EQ && RHSC &&
13936 RHSC->getValue()->isNullValue()) {
13937 // If LHS is A % B, i.e. A % B == 0, rewrite A to (A /u B) * B to
13938 // explicitly express that.
13939 const SCEV *URemLHS = nullptr;
13940 const SCEV *URemRHS = nullptr;
13941 if (matchURem(LHS, URemLHS, URemRHS)) {
13942 if (const SCEVUnknown *LHSUnknown = dyn_cast<SCEVUnknown>(URemLHS)) {
13943 Value *V = LHSUnknown->getValue();
13944 auto Multiple =
13945 getMulExpr(getUDivExpr(URemLHS, URemRHS), URemRHS,
13946 (SCEV::NoWrapFlags)(SCEV::FlagNUW | SCEV::FlagNSW));
13947 RewriteMap[V] = Multiple;
13948 return;
13949 }
13950 }
13951 }
13952
13953 if (!isa<SCEVUnknown>(LHS) && isa<SCEVUnknown>(RHS)) {
13954 std::swap(LHS, RHS);
13955 Predicate = CmpInst::getSwappedPredicate(Predicate);
13956 }
13957
13958 // Check for a condition of the form (-C1 + X < C2). InstCombine will
13959 // create this form when combining two checks of the form (X u< C2 + C1) and
13960 // (X >=u C1).
13961 auto MatchRangeCheckIdiom = [this, Predicate, LHS, RHS, &RewriteMap]() {
13962 auto *AddExpr = dyn_cast<SCEVAddExpr>(LHS);
13963 if (!AddExpr || AddExpr->getNumOperands() != 2)
13964 return false;
13965
13966 auto *C1 = dyn_cast<SCEVConstant>(AddExpr->getOperand(0));
13967 auto *LHSUnknown = dyn_cast<SCEVUnknown>(AddExpr->getOperand(1));
13968 auto *C2 = dyn_cast<SCEVConstant>(RHS);
13969 if (!C1 || !C2 || !LHSUnknown)
13970 return false;
13971
13972 auto ExactRegion =
13973 ConstantRange::makeExactICmpRegion(Predicate, C2->getAPInt())
13974 .sub(C1->getAPInt());
13975
13976 // Bail out, unless we have a non-wrapping, monotonic range.
13977 if (ExactRegion.isWrappedSet() || ExactRegion.isFullSet())
13978 return false;
13979 auto I = RewriteMap.find(LHSUnknown->getValue());
13980 const SCEV *RewrittenLHS = I != RewriteMap.end() ? I->second : LHS;
13981 RewriteMap[LHSUnknown->getValue()] = getUMaxExpr(
13982 getConstant(ExactRegion.getUnsignedMin()),
13983 getUMinExpr(RewrittenLHS, getConstant(ExactRegion.getUnsignedMax())));
13984 return true;
13985 };
13986 if (MatchRangeCheckIdiom())
13987 return;
13988
13989 // For now, limit to conditions that provide information about unknown
13990 // expressions. RHS also cannot contain add recurrences.
13991 auto *LHSUnknown = dyn_cast<SCEVUnknown>(LHS);
13992 if (!LHSUnknown || containsAddRecurrence(RHS))
13993 return;
13994
13995 // Check whether LHS has already been rewritten. In that case we want to
13996 // chain further rewrites onto the already rewritten value.
13997 auto I = RewriteMap.find(LHSUnknown->getValue());
13998 const SCEV *RewrittenLHS = I != RewriteMap.end() ? I->second : LHS;
13999 const SCEV *RewrittenRHS = nullptr;
14000 switch (Predicate) {
14001 case CmpInst::ICMP_ULT:
14002 RewrittenRHS =
14003 getUMinExpr(RewrittenLHS, getMinusSCEV(RHS, getOne(RHS->getType())));
14004 break;
14005 case CmpInst::ICMP_SLT:
14006 RewrittenRHS =
14007 getSMinExpr(RewrittenLHS, getMinusSCEV(RHS, getOne(RHS->getType())));
14008 break;
14009 case CmpInst::ICMP_ULE:
14010 RewrittenRHS = getUMinExpr(RewrittenLHS, RHS);
14011 break;
14012 case CmpInst::ICMP_SLE:
14013 RewrittenRHS = getSMinExpr(RewrittenLHS, RHS);
14014 break;
14015 case CmpInst::ICMP_UGT:
14016 RewrittenRHS =
14017 getUMaxExpr(RewrittenLHS, getAddExpr(RHS, getOne(RHS->getType())));
14018 break;
14019 case CmpInst::ICMP_SGT:
14020 RewrittenRHS =
14021 getSMaxExpr(RewrittenLHS, getAddExpr(RHS, getOne(RHS->getType())));
14022 break;
14023 case CmpInst::ICMP_UGE:
14024 RewrittenRHS = getUMaxExpr(RewrittenLHS, RHS);
14025 break;
14026 case CmpInst::ICMP_SGE:
14027 RewrittenRHS = getSMaxExpr(RewrittenLHS, RHS);
14028 break;
14029 case CmpInst::ICMP_EQ:
14030 if (isa<SCEVConstant>(RHS))
14031 RewrittenRHS = RHS;
14032 break;
14033 case CmpInst::ICMP_NE:
14034 if (isa<SCEVConstant>(RHS) &&
14035 cast<SCEVConstant>(RHS)->getValue()->isNullValue())
14036 RewrittenRHS = getUMaxExpr(RewrittenLHS, getOne(RHS->getType()));
14037 break;
14038 default:
14039 break;
14040 }
14041
14042 if (RewrittenRHS)
14043 RewriteMap[LHSUnknown->getValue()] = RewrittenRHS;
14044 };
14045 // Starting at the loop predecessor, climb up the predecessor chain, as long
14046 // as there are predecessors that can be found that have unique successors
14047 // leading to the original header.
14048 // TODO: share this logic with isLoopEntryGuardedByCond.
14049 ValueToSCEVMapTy RewriteMap;
14050 for (std::pair<const BasicBlock *, const BasicBlock *> Pair(
14051 L->getLoopPredecessor(), L->getHeader());
14052 Pair.first; Pair = getPredecessorWithUniqueSuccessorForBB(Pair.first)) {
14053
14054 const BranchInst *LoopEntryPredicate =
14055 dyn_cast<BranchInst>(Pair.first->getTerminator());
14056 if (!LoopEntryPredicate || LoopEntryPredicate->isUnconditional())
14057 continue;
14058
14059 bool EnterIfTrue = LoopEntryPredicate->getSuccessor(0) == Pair.second;
14060 SmallVector<Value *, 8> Worklist;
14061 SmallPtrSet<Value *, 8> Visited;
14062 Worklist.push_back(LoopEntryPredicate->getCondition());
14063 while (!Worklist.empty()) {
14064 Value *Cond = Worklist.pop_back_val();
14065 if (!Visited.insert(Cond).second)
14066 continue;
14067
14068 if (auto *Cmp = dyn_cast<ICmpInst>(Cond)) {
14069 auto Predicate =
14070 EnterIfTrue ? Cmp->getPredicate() : Cmp->getInversePredicate();
14071 CollectCondition(Predicate, getSCEV(Cmp->getOperand(0)),
14072 getSCEV(Cmp->getOperand(1)), RewriteMap);
14073 continue;
14074 }
14075
14076 Value *L, *R;
14077 if (EnterIfTrue ? match(Cond, m_LogicalAnd(m_Value(L), m_Value(R)))
14078 : match(Cond, m_LogicalOr(m_Value(L), m_Value(R)))) {
14079 Worklist.push_back(L);
14080 Worklist.push_back(R);
14081 }
14082 }
14083 }
14084
14085 // Also collect information from assumptions dominating the loop.
14086 for (auto &AssumeVH : AC.assumptions()) {
14087 if (!AssumeVH)
14088 continue;
14089 auto *AssumeI = cast<CallInst>(AssumeVH);
14090 auto *Cmp = dyn_cast<ICmpInst>(AssumeI->getOperand(0));
14091 if (!Cmp || !DT.dominates(AssumeI, L->getHeader()))
14092 continue;
14093 CollectCondition(Cmp->getPredicate(), getSCEV(Cmp->getOperand(0)),
14094 getSCEV(Cmp->getOperand(1)), RewriteMap);
14095 }
14096
14097 if (RewriteMap.empty())
14098 return Expr;
14099 SCEVLoopGuardRewriter Rewriter(*this, RewriteMap);
14100 return Rewriter.visit(Expr);
14101}

/build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/llvm/include/llvm/ADT/Optional.h

1//===- Optional.h - Simple variant for passing optional values --*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file provides Optional, a template class modeled in the spirit of
10// OCaml's 'opt' variant. The idea is to strongly type whether or not
11// a value can be optional.
12//
13//===----------------------------------------------------------------------===//
14
15#ifndef LLVM_ADT_OPTIONAL_H
16#define LLVM_ADT_OPTIONAL_H
17
18#include "llvm/ADT/Hashing.h"
19#include "llvm/ADT/None.h"
20#include "llvm/ADT/STLForwardCompat.h"
21#include "llvm/Support/Compiler.h"
22#include "llvm/Support/type_traits.h"
23#include <cassert>
24#include <memory>
25#include <new>
26#include <utility>
27
28namespace llvm {
29
30class raw_ostream;
31
32namespace optional_detail {
33
34/// Storage for any type.
35//
36// The specialization condition intentionally uses
37// llvm::is_trivially_copy_constructible instead of
38// std::is_trivially_copy_constructible. GCC versions prior to 7.4 may
39// instantiate the copy constructor of `T` when
40// std::is_trivially_copy_constructible is instantiated. This causes
41// compilation to fail if we query the trivially copy constructible property of
42// a class which is not copy constructible.
43//
44// The current implementation of OptionalStorage insists that in order to use
45// the trivial specialization, the value_type must be trivially copy
46// constructible and trivially copy assignable due to =default implementations
47// of the copy/move constructor/assignment. It does not follow that this is
48// necessarily the case std::is_trivially_copyable is true (hence the expanded
49// specialization condition).
50//
51// The move constructible / assignable conditions emulate the remaining behavior
52// of std::is_trivially_copyable.
53template <typename T, bool = (llvm::is_trivially_copy_constructible<T>::value &&
54 std::is_trivially_copy_assignable<T>::value &&
55 (std::is_trivially_move_constructible<T>::value ||
56 !std::is_move_constructible<T>::value) &&
57 (std::is_trivially_move_assignable<T>::value ||
58 !std::is_move_assignable<T>::value))>
59class OptionalStorage {
60 union {
61 char empty;
62 T value;
63 };
64 bool hasVal;
65
66public:
67 ~OptionalStorage() { reset(); }
68
69 constexpr OptionalStorage() noexcept : empty(), hasVal(false) {}
70
71 constexpr OptionalStorage(OptionalStorage const &other) : OptionalStorage() {
72 if (other.hasValue()) {
73 emplace(other.value);
74 }
75 }
76 constexpr OptionalStorage(OptionalStorage &&other) : OptionalStorage() {
77 if (other.hasValue()) {
78 emplace(std::move(other.value));
79 }
80 }
81
82 template <class... Args>
83 constexpr explicit OptionalStorage(in_place_t, Args &&... args)
84 : value(std::forward<Args>(args)...), hasVal(true) {}
85
86 void reset() noexcept {
87 if (hasVal) {
88 value.~T();
89 hasVal = false;
90 }
91 }
92
93 constexpr bool hasValue() const noexcept { return hasVal; }
94
95 T &getValue() LLVM_LVALUE_FUNCTION& noexcept {
96 assert(hasVal)(static_cast<void> (0));
97 return value;
98 }
99 constexpr T const &getValue() const LLVM_LVALUE_FUNCTION& noexcept {
100 assert(hasVal)(static_cast<void> (0));
101 return value;
102 }
103#if LLVM_HAS_RVALUE_REFERENCE_THIS1
104 T &&getValue() && noexcept {
105 assert(hasVal)(static_cast<void> (0));
106 return std::move(value);
107 }
108#endif
109
110 template <class... Args> void emplace(Args &&... args) {
111 reset();
112 ::new ((void *)std::addressof(value)) T(std::forward<Args>(args)...);
113 hasVal = true;
114 }
115
116 OptionalStorage &operator=(T const &y) {
117 if (hasValue()) {
118 value = y;
119 } else {
120 ::new ((void *)std::addressof(value)) T(y);
121 hasVal = true;
122 }
123 return *this;
124 }
125 OptionalStorage &operator=(T &&y) {
126 if (hasValue()) {
127 value = std::move(y);
128 } else {
129 ::new ((void *)std::addressof(value)) T(std::move(y));
130 hasVal = true;
131 }
132 return *this;
133 }
134
135 OptionalStorage &operator=(OptionalStorage const &other) {
136 if (other.hasValue()) {
137 if (hasValue()) {
138 value = other.value;
139 } else {
140 ::new ((void *)std::addressof(value)) T(other.value);
141 hasVal = true;
142 }
143 } else {
144 reset();
145 }
146 return *this;
147 }
148
149 OptionalStorage &operator=(OptionalStorage &&other) {
150 if (other.hasValue()) {
151 if (hasValue()) {
152 value = std::move(other.value);
153 } else {
154 ::new ((void *)std::addressof(value)) T(std::move(other.value));
155 hasVal = true;
156 }
157 } else {
158 reset();
159 }
160 return *this;
161 }
162};
163
164template <typename T> class OptionalStorage<T, true> {
165 union {
166 char empty;
167 T value;
168 };
169 bool hasVal = false;
170
171public:
172 ~OptionalStorage() = default;
173
174 constexpr OptionalStorage() noexcept : empty{} {}
175
176 constexpr OptionalStorage(OptionalStorage const &other) = default;
177 constexpr OptionalStorage(OptionalStorage &&other) = default;
178
179 OptionalStorage &operator=(OptionalStorage const &other) = default;
180 OptionalStorage &operator=(OptionalStorage &&other) = default;
181
182 template <class... Args>
183 constexpr explicit OptionalStorage(in_place_t, Args &&... args)
184 : value(std::forward<Args>(args)...), hasVal(true) {}
185
186 void reset() noexcept {
187 if (hasVal) {
188 value.~T();
189 hasVal = false;
190 }
191 }
192
193 constexpr bool hasValue() const noexcept { return hasVal; }
4
Returning zero, which participates in a condition later
194
195 T &getValue() LLVM_LVALUE_FUNCTION& noexcept {
196 assert(hasVal)(static_cast<void> (0));
197 return value;
198 }
199 constexpr T const &getValue() const LLVM_LVALUE_FUNCTION& noexcept {
200 assert(hasVal)(static_cast<void> (0));
201 return value;
202 }
203#if LLVM_HAS_RVALUE_REFERENCE_THIS1
204 T &&getValue() && noexcept {
205 assert(hasVal)(static_cast<void> (0));
206 return std::move(value);
207 }
208#endif
209
210 template <class... Args> void emplace(Args &&... args) {
211 reset();
212 ::new ((void *)std::addressof(value)) T(std::forward<Args>(args)...);
213 hasVal = true;
214 }
215
216 OptionalStorage &operator=(T const &y) {
217 if (hasValue()) {
218 value = y;
219 } else {
220 ::new ((void *)std::addressof(value)) T(y);
221 hasVal = true;
222 }
223 return *this;
224 }
225 OptionalStorage &operator=(T &&y) {
226 if (hasValue()) {
227 value = std::move(y);
228 } else {
229 ::new ((void *)std::addressof(value)) T(std::move(y));
230 hasVal = true;
231 }
232 return *this;
233 }
234};
235
236} // namespace optional_detail
237
238template <typename T> class Optional {
239 optional_detail::OptionalStorage<T> Storage;
240
241public:
242 using value_type = T;
243
244 constexpr Optional() {}
245 constexpr Optional(NoneType) {}
246
247 constexpr Optional(const T &y) : Storage(in_place, y) {}
248 constexpr Optional(const Optional &O) = default;
249
250 constexpr Optional(T &&y) : Storage(in_place, std::move(y)) {}
251 constexpr Optional(Optional &&O) = default;
252
253 template <typename... ArgTypes>
254 constexpr Optional(in_place_t, ArgTypes &&...Args)
255 : Storage(in_place, std::forward<ArgTypes>(Args)...) {}
256
257 Optional &operator=(T &&y) {
258 Storage = std::move(y);
259 return *this;
260 }
261 Optional &operator=(Optional &&O) = default;
262
263 /// Create a new object by constructing it in place with the given arguments.
264 template <typename... ArgTypes> void emplace(ArgTypes &&... Args) {
265 Storage.emplace(std::forward<ArgTypes>(Args)...);
266 }
267
268 static constexpr Optional create(const T *y) {
269 return y ? Optional(*y) : Optional();
270 }
271
272 Optional &operator=(const T &y) {
273 Storage = y;
274 return *this;
275 }
276 Optional &operator=(const Optional &O) = default;
277
278 void reset() { Storage.reset(); }
279
280 constexpr const T *getPointer() const { return &Storage.getValue(); }
281 T *getPointer() { return &Storage.getValue(); }
282 constexpr const T &getValue() const LLVM_LVALUE_FUNCTION& {
283 return Storage.getValue();
284 }
285 T &getValue() LLVM_LVALUE_FUNCTION& { return Storage.getValue(); }
286
287 constexpr explicit operator bool() const { return hasValue(); }
2
Calling 'Optional::hasValue'
7
Returning from 'Optional::hasValue'
8
Returning zero, which participates in a condition later
288 constexpr bool hasValue() const { return Storage.hasValue(); }
3
Calling 'OptionalStorage::hasValue'
5
Returning from 'OptionalStorage::hasValue'
6
Returning zero, which participates in a condition later
289 constexpr const T *operator->() const { return getPointer(); }
290 T *operator->() { return getPointer(); }
291 constexpr const T &operator*() const LLVM_LVALUE_FUNCTION& {
292 return getValue();
293 }
294 T &operator*() LLVM_LVALUE_FUNCTION& { return getValue(); }
295
296 template <typename U>
297 constexpr T getValueOr(U &&value) const LLVM_LVALUE_FUNCTION& {
298 return hasValue() ? getValue() : std::forward<U>(value);
299 }
300
301 /// Apply a function to the value if present; otherwise return None.
302 template <class Function>
303 auto map(const Function &F) const LLVM_LVALUE_FUNCTION&
304 -> Optional<decltype(F(getValue()))> {
305 if (*this) return F(getValue());
306 return None;
307 }
308
309#if LLVM_HAS_RVALUE_REFERENCE_THIS1
310 T &&getValue() && { return std::move(Storage.getValue()); }
311 T &&operator*() && { return std::move(Storage.getValue()); }
312
313 template <typename U>
314 T getValueOr(U &&value) && {
315 return hasValue() ? std::move(getValue()) : std::forward<U>(value);
316 }
317
318 /// Apply a function to the value if present; otherwise return None.
319 template <class Function>
320 auto map(const Function &F) &&
321 -> Optional<decltype(F(std::move(*this).getValue()))> {
322 if (*this) return F(std::move(*this).getValue());
323 return None;
324 }
325#endif
326};
327
328template <class T> llvm::hash_code hash_value(const Optional<T> &O) {
329 return O ? hash_combine(true, *O) : hash_value(false);
330}
331
332template <typename T, typename U>
333constexpr bool operator==(const Optional<T> &X, const Optional<U> &Y) {
334 if (X && Y)
335 return *X == *Y;
336 return X.hasValue() == Y.hasValue();
337}
338
339template <typename T, typename U>
340constexpr bool operator!=(const Optional<T> &X, const Optional<U> &Y) {
341 return !(X == Y);
342}
343
344template <typename T, typename U>
345constexpr bool operator<(const Optional<T> &X, const Optional<U> &Y) {
346 if (X && Y)
347 return *X < *Y;
348 return X.hasValue() < Y.hasValue();
349}
350
351template <typename T, typename U>
352constexpr bool operator<=(const Optional<T> &X, const Optional<U> &Y) {
353 return !(Y < X);
354}
355
356template <typename T, typename U>
357constexpr bool operator>(const Optional<T> &X, const Optional<U> &Y) {
358 return Y < X;
359}
360
361template <typename T, typename U>
362constexpr bool operator>=(const Optional<T> &X, const Optional<U> &Y) {
363 return !(X < Y);
364}
365
366template <typename T>
367constexpr bool operator==(const Optional<T> &X, NoneType) {
368 return !X;
369}
370
371template <typename T>
372constexpr bool operator==(NoneType, const Optional<T> &X) {
373 return X == None;
374}
375
376template <typename T>
377constexpr bool operator!=(const Optional<T> &X, NoneType) {
378 return !(X == None);
379}
380
381template <typename T>
382constexpr bool operator!=(NoneType, const Optional<T> &X) {
383 return X != None;
384}
385
386template <typename T> constexpr bool operator<(const Optional<T> &, NoneType) {
387 return false;
388}
389
390template <typename T> constexpr bool operator<(NoneType, const Optional<T> &X) {
391 return X.hasValue();
392}
393
394template <typename T>
395constexpr bool operator<=(const Optional<T> &X, NoneType) {
396 return !(None < X);
397}
398
399template <typename T>
400constexpr bool operator<=(NoneType, const Optional<T> &X) {
401 return !(X < None);
402}
403
404template <typename T> constexpr bool operator>(const Optional<T> &X, NoneType) {
405 return None < X;
406}
407
408template <typename T> constexpr bool operator>(NoneType, const Optional<T> &X) {
409 return X < None;
410}
411
412template <typename T>
413constexpr bool operator>=(const Optional<T> &X, NoneType) {
414 return None <= X;
415}
416
417template <typename T>
418constexpr bool operator>=(NoneType, const Optional<T> &X) {
419 return X <= None;
420}
421
422template <typename T>
423constexpr bool operator==(const Optional<T> &X, const T &Y) {
424 return X && *X == Y;
425}
426
427template <typename T>
428constexpr bool operator==(const T &X, const Optional<T> &Y) {
429 return Y && X == *Y;
430}
431
432template <typename T>
433constexpr bool operator!=(const Optional<T> &X, const T &Y) {
434 return !(X == Y);
435}
436
437template <typename T>
438constexpr bool operator!=(const T &X, const Optional<T> &Y) {
439 return !(X == Y);
440}
441
442template <typename T>
443constexpr bool operator<(const Optional<T> &X, const T &Y) {
444 return !X || *X < Y;
445}
446
447template <typename T>
448constexpr bool operator<(const T &X, const Optional<T> &Y) {
449 return Y && X < *Y;
450}
451
452template <typename T>
453constexpr bool operator<=(const Optional<T> &X, const T &Y) {
454 return !(Y < X);
455}
456
457template <typename T>
458constexpr bool operator<=(const T &X, const Optional<T> &Y) {
459 return !(Y < X);
460}
461
462template <typename T>
463constexpr bool operator>(const Optional<T> &X, const T &Y) {
464 return Y < X;
465}
466
467template <typename T>
468constexpr bool operator>(const T &X, const Optional<T> &Y) {
469 return Y < X;
470}
471
472template <typename T>
473constexpr bool operator>=(const Optional<T> &X, const T &Y) {
474 return !(X < Y);
475}
476
477template <typename T>
478constexpr bool operator>=(const T &X, const Optional<T> &Y) {
479 return !(X < Y);
480}
481
482raw_ostream &operator<<(raw_ostream &OS, NoneType);
483
484template <typename T, typename = decltype(std::declval<raw_ostream &>()
485 << std::declval<const T &>())>
486raw_ostream &operator<<(raw_ostream &OS, const Optional<T> &O) {
487 if (O)
488 OS << *O;
489 else
490 OS << None;
491 return OS;
492}
493
494} // end namespace llvm
495
496#endif // LLVM_ADT_OPTIONAL_H