LLVM  10.0.0svn
ScalarEvolution.cpp
Go to the documentation of this file.
1 //===- ScalarEvolution.cpp - Scalar Evolution Analysis --------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains the implementation of the scalar evolution analysis
10 // engine, which is used primarily to analyze expressions involving induction
11 // variables in loops.
12 //
13 // There are several aspects to this library. First is the representation of
14 // scalar expressions, which are represented as subclasses of the SCEV class.
15 // These classes are used to represent certain types of subexpressions that we
16 // can handle. We only create one SCEV of a particular shape, so
17 // pointer-comparisons for equality are legal.
18 //
19 // One important aspect of the SCEV objects is that they are never cyclic, even
20 // if there is a cycle in the dataflow for an expression (ie, a PHI node). If
21 // the PHI node is one of the idioms that we can represent (e.g., a polynomial
22 // recurrence) then we represent it directly as a recurrence node, otherwise we
23 // represent it as a SCEVUnknown node.
24 //
25 // In addition to being able to represent expressions of various types, we also
26 // have folders that are used to build the *canonical* representation for a
27 // particular expression. These folders are capable of using a variety of
28 // rewrite rules to simplify the expressions.
29 //
30 // Once the folders are defined, we can implement the more interesting
31 // higher-level code, such as the code that recognizes PHI nodes of various
32 // types, computes the execution count of a loop, etc.
33 //
34 // TODO: We should use these routines and value representations to implement
35 // dependence analysis!
36 //
37 //===----------------------------------------------------------------------===//
38 //
39 // There are several good references for the techniques used in this analysis.
40 //
41 // Chains of recurrences -- a method to expedite the evaluation
42 // of closed-form functions
43 // Olaf Bachmann, Paul S. Wang, Eugene V. Zima
44 //
45 // On computational properties of chains of recurrences
46 // Eugene V. Zima
47 //
48 // Symbolic Evaluation of Chains of Recurrences for Loop Optimization
49 // Robert A. van Engelen
50 //
51 // Efficient Symbolic Analysis for Optimizing Compilers
52 // Robert A. van Engelen
53 //
54 // Using the chains of recurrences algebra for data dependence testing and
55 // induction variable substitution
56 // MS Thesis, Johnie Birch
57 //
58 //===----------------------------------------------------------------------===//
59 
61 #include "llvm/ADT/APInt.h"
62 #include "llvm/ADT/ArrayRef.h"
63 #include "llvm/ADT/DenseMap.h"
66 #include "llvm/ADT/FoldingSet.h"
67 #include "llvm/ADT/None.h"
68 #include "llvm/ADT/Optional.h"
69 #include "llvm/ADT/STLExtras.h"
70 #include "llvm/ADT/ScopeExit.h"
71 #include "llvm/ADT/Sequence.h"
72 #include "llvm/ADT/SetVector.h"
73 #include "llvm/ADT/SmallPtrSet.h"
74 #include "llvm/ADT/SmallSet.h"
75 #include "llvm/ADT/SmallVector.h"
76 #include "llvm/ADT/Statistic.h"
77 #include "llvm/ADT/StringRef.h"
81 #include "llvm/Analysis/LoopInfo.h"
85 #include "llvm/Config/llvm-config.h"
86 #include "llvm/IR/Argument.h"
87 #include "llvm/IR/BasicBlock.h"
88 #include "llvm/IR/CFG.h"
89 #include "llvm/IR/CallSite.h"
90 #include "llvm/IR/Constant.h"
91 #include "llvm/IR/ConstantRange.h"
92 #include "llvm/IR/Constants.h"
93 #include "llvm/IR/DataLayout.h"
94 #include "llvm/IR/DerivedTypes.h"
95 #include "llvm/IR/Dominators.h"
96 #include "llvm/IR/Function.h"
97 #include "llvm/IR/GlobalAlias.h"
98 #include "llvm/IR/GlobalValue.h"
99 #include "llvm/IR/GlobalVariable.h"
100 #include "llvm/IR/InstIterator.h"
101 #include "llvm/IR/InstrTypes.h"
102 #include "llvm/IR/Instruction.h"
103 #include "llvm/IR/Instructions.h"
104 #include "llvm/IR/IntrinsicInst.h"
105 #include "llvm/IR/Intrinsics.h"
106 #include "llvm/IR/LLVMContext.h"
107 #include "llvm/IR/Metadata.h"
108 #include "llvm/IR/Operator.h"
109 #include "llvm/IR/PatternMatch.h"
110 #include "llvm/IR/Type.h"
111 #include "llvm/IR/Use.h"
112 #include "llvm/IR/User.h"
113 #include "llvm/IR/Value.h"
114 #include "llvm/IR/Verifier.h"
115 #include "llvm/Pass.h"
116 #include "llvm/Support/Casting.h"
118 #include "llvm/Support/Compiler.h"
119 #include "llvm/Support/Debug.h"
121 #include "llvm/Support/KnownBits.h"
124 #include <algorithm>
125 #include <cassert>
126 #include <climits>
127 #include <cstddef>
128 #include <cstdint>
129 #include <cstdlib>
130 #include <map>
131 #include <memory>
132 #include <tuple>
133 #include <utility>
134 #include <vector>
135 
136 using namespace llvm;
137 
138 #define DEBUG_TYPE "scalar-evolution"
139 
140 STATISTIC(NumArrayLenItCounts,
141  "Number of trip counts computed with array length");
142 STATISTIC(NumTripCountsComputed,
143  "Number of loops with predictable loop counts");
144 STATISTIC(NumTripCountsNotComputed,
145  "Number of loops without predictable loop counts");
146 STATISTIC(NumBruteForceTripCountsComputed,
147  "Number of loops with trip counts computed by force");
148 
149 static cl::opt<unsigned>
150 MaxBruteForceIterations("scalar-evolution-max-iterations", cl::ReallyHidden,
152  cl::desc("Maximum number of iterations SCEV will "
153  "symbolically execute a constant "
154  "derived loop"),
155  cl::init(100));
156 
157 // FIXME: Enable this with EXPENSIVE_CHECKS when the test suite is clean.
159  "verify-scev", cl::Hidden,
160  cl::desc("Verify ScalarEvolution's backedge taken counts (slow)"));
162  "verify-scev-strict", cl::Hidden,
163  cl::desc("Enable stricter verification with -verify-scev is passed"));
164 static cl::opt<bool>
165  VerifySCEVMap("verify-scev-maps", cl::Hidden,
166  cl::desc("Verify no dangling value in ScalarEvolution's "
167  "ExprValueMap (slow)"));
168 
169 static cl::opt<bool> VerifyIR(
170  "scev-verify-ir", cl::Hidden,
171  cl::desc("Verify IR correctness when making sensitive SCEV queries (slow)"),
172  cl::init(false));
173 
175  "scev-mulops-inline-threshold", cl::Hidden,
176  cl::desc("Threshold for inlining multiplication operands into a SCEV"),
177  cl::init(32));
178 
180  "scev-addops-inline-threshold", cl::Hidden,
181  cl::desc("Threshold for inlining addition operands into a SCEV"),
182  cl::init(500));
183 
185  "scalar-evolution-max-scev-compare-depth", cl::Hidden,
186  cl::desc("Maximum depth of recursive SCEV complexity comparisons"),
187  cl::init(32));
188 
190  "scalar-evolution-max-scev-operations-implication-depth", cl::Hidden,
191  cl::desc("Maximum depth of recursive SCEV operations implication analysis"),
192  cl::init(2));
193 
195  "scalar-evolution-max-value-compare-depth", cl::Hidden,
196  cl::desc("Maximum depth of recursive value complexity comparisons"),
197  cl::init(2));
198 
199 static cl::opt<unsigned>
200  MaxArithDepth("scalar-evolution-max-arith-depth", cl::Hidden,
201  cl::desc("Maximum depth of recursive arithmetics"),
202  cl::init(32));
203 
205  "scalar-evolution-max-constant-evolving-depth", cl::Hidden,
206  cl::desc("Maximum depth of recursive constant evolving"), cl::init(32));
207 
208 static cl::opt<unsigned>
209  MaxCastDepth("scalar-evolution-max-cast-depth", cl::Hidden,
210  cl::desc("Maximum depth of recursive SExt/ZExt/Trunc"),
211  cl::init(8));
212 
213 static cl::opt<unsigned>
214  MaxAddRecSize("scalar-evolution-max-add-rec-size", cl::Hidden,
215  cl::desc("Max coefficients in AddRec during evolving"),
216  cl::init(8));
217 
218 static cl::opt<unsigned>
219  HugeExprThreshold("scalar-evolution-huge-expr-threshold", cl::Hidden,
220  cl::desc("Size of the expression which is considered huge"),
221  cl::init(4096));
222 
223 //===----------------------------------------------------------------------===//
224 // SCEV class definitions
225 //===----------------------------------------------------------------------===//
226 
227 //===----------------------------------------------------------------------===//
228 // Implementation of the SCEV class.
229 //
230 
231 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
233  print(dbgs());
234  dbgs() << '\n';
235 }
236 #endif
237 
238 void SCEV::print(raw_ostream &OS) const {
239  switch (static_cast<SCEVTypes>(getSCEVType())) {
240  case scConstant:
241  cast<SCEVConstant>(this)->getValue()->printAsOperand(OS, false);
242  return;
243  case scTruncate: {
244  const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(this);
245  const SCEV *Op = Trunc->getOperand();
246  OS << "(trunc " << *Op->getType() << " " << *Op << " to "
247  << *Trunc->getType() << ")";
248  return;
249  }
250  case scZeroExtend: {
251  const SCEVZeroExtendExpr *ZExt = cast<SCEVZeroExtendExpr>(this);
252  const SCEV *Op = ZExt->getOperand();
253  OS << "(zext " << *Op->getType() << " " << *Op << " to "
254  << *ZExt->getType() << ")";
255  return;
256  }
257  case scSignExtend: {
258  const SCEVSignExtendExpr *SExt = cast<SCEVSignExtendExpr>(this);
259  const SCEV *Op = SExt->getOperand();
260  OS << "(sext " << *Op->getType() << " " << *Op << " to "
261  << *SExt->getType() << ")";
262  return;
263  }
264  case scAddRecExpr: {
265  const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(this);
266  OS << "{" << *AR->getOperand(0);
267  for (unsigned i = 1, e = AR->getNumOperands(); i != e; ++i)
268  OS << ",+," << *AR->getOperand(i);
269  OS << "}<";
270  if (AR->hasNoUnsignedWrap())
271  OS << "nuw><";
272  if (AR->hasNoSignedWrap())
273  OS << "nsw><";
274  if (AR->hasNoSelfWrap() &&
276  OS << "nw><";
277  AR->getLoop()->getHeader()->printAsOperand(OS, /*PrintType=*/false);
278  OS << ">";
279  return;
280  }
281  case scAddExpr:
282  case scMulExpr:
283  case scUMaxExpr:
284  case scSMaxExpr:
285  case scUMinExpr:
286  case scSMinExpr: {
287  const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(this);
288  const char *OpStr = nullptr;
289  switch (NAry->getSCEVType()) {
290  case scAddExpr: OpStr = " + "; break;
291  case scMulExpr: OpStr = " * "; break;
292  case scUMaxExpr: OpStr = " umax "; break;
293  case scSMaxExpr: OpStr = " smax "; break;
294  case scUMinExpr:
295  OpStr = " umin ";
296  break;
297  case scSMinExpr:
298  OpStr = " smin ";
299  break;
300  }
301  OS << "(";
302  for (SCEVNAryExpr::op_iterator I = NAry->op_begin(), E = NAry->op_end();
303  I != E; ++I) {
304  OS << **I;
305  if (std::next(I) != E)
306  OS << OpStr;
307  }
308  OS << ")";
309  switch (NAry->getSCEVType()) {
310  case scAddExpr:
311  case scMulExpr:
312  if (NAry->hasNoUnsignedWrap())
313  OS << "<nuw>";
314  if (NAry->hasNoSignedWrap())
315  OS << "<nsw>";
316  }
317  return;
318  }
319  case scUDivExpr: {
320  const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(this);
321  OS << "(" << *UDiv->getLHS() << " /u " << *UDiv->getRHS() << ")";
322  return;
323  }
324  case scUnknown: {
325  const SCEVUnknown *U = cast<SCEVUnknown>(this);
326  Type *AllocTy;
327  if (U->isSizeOf(AllocTy)) {
328  OS << "sizeof(" << *AllocTy << ")";
329  return;
330  }
331  if (U->isAlignOf(AllocTy)) {
332  OS << "alignof(" << *AllocTy << ")";
333  return;
334  }
335 
336  Type *CTy;
337  Constant *FieldNo;
338  if (U->isOffsetOf(CTy, FieldNo)) {
339  OS << "offsetof(" << *CTy << ", ";
340  FieldNo->printAsOperand(OS, false);
341  OS << ")";
342  return;
343  }
344 
345  // Otherwise just print it normally.
346  U->getValue()->printAsOperand(OS, false);
347  return;
348  }
349  case scCouldNotCompute:
350  OS << "***COULDNOTCOMPUTE***";
351  return;
352  }
353  llvm_unreachable("Unknown SCEV kind!");
354 }
355 
356 Type *SCEV::getType() const {
357  switch (static_cast<SCEVTypes>(getSCEVType())) {
358  case scConstant:
359  return cast<SCEVConstant>(this)->getType();
360  case scTruncate:
361  case scZeroExtend:
362  case scSignExtend:
363  return cast<SCEVCastExpr>(this)->getType();
364  case scAddRecExpr:
365  case scMulExpr:
366  case scUMaxExpr:
367  case scSMaxExpr:
368  case scUMinExpr:
369  case scSMinExpr:
370  return cast<SCEVNAryExpr>(this)->getType();
371  case scAddExpr:
372  return cast<SCEVAddExpr>(this)->getType();
373  case scUDivExpr:
374  return cast<SCEVUDivExpr>(this)->getType();
375  case scUnknown:
376  return cast<SCEVUnknown>(this)->getType();
377  case scCouldNotCompute:
378  llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
379  }
380  llvm_unreachable("Unknown SCEV kind!");
381 }
382 
383 bool SCEV::isZero() const {
384  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
385  return SC->getValue()->isZero();
386  return false;
387 }
388 
389 bool SCEV::isOne() const {
390  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
391  return SC->getValue()->isOne();
392  return false;
393 }
394 
395 bool SCEV::isAllOnesValue() const {
396  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
397  return SC->getValue()->isMinusOne();
398  return false;
399 }
400 
402  const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(this);
403  if (!Mul) return false;
404 
405  // If there is a constant factor, it will be first.
406  const SCEVConstant *SC = dyn_cast<SCEVConstant>(Mul->getOperand(0));
407  if (!SC) return false;
408 
409  // Return true if the value is negative, this matches things like (-42 * V).
410  return SC->getAPInt().isNegative();
411 }
412 
415 
417  return S->getSCEVType() == scCouldNotCompute;
418 }
419 
423  ID.AddPointer(V);
424  void *IP = nullptr;
425  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
426  SCEV *S = new (SCEVAllocator) SCEVConstant(ID.Intern(SCEVAllocator), V);
427  UniqueSCEVs.InsertNode(S, IP);
428  return S;
429 }
430 
432  return getConstant(ConstantInt::get(getContext(), Val));
433 }
434 
435 const SCEV *
436 ScalarEvolution::getConstant(Type *Ty, uint64_t V, bool isSigned) {
437  IntegerType *ITy = cast<IntegerType>(getEffectiveSCEVType(Ty));
438  return getConstant(ConstantInt::get(ITy, V, isSigned));
439 }
440 
442  unsigned SCEVTy, const SCEV *op, Type *ty)
443  : SCEV(ID, SCEVTy, computeExpressionSize(op)), Op(op), Ty(ty) {}
444 
445 SCEVTruncateExpr::SCEVTruncateExpr(const FoldingSetNodeIDRef ID,
446  const SCEV *op, Type *ty)
447  : SCEVCastExpr(ID, scTruncate, op, ty) {
449  "Cannot truncate non-integer value!");
450 }
451 
452 SCEVZeroExtendExpr::SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID,
453  const SCEV *op, Type *ty)
454  : SCEVCastExpr(ID, scZeroExtend, op, ty) {
456  "Cannot zero extend non-integer value!");
457 }
458 
459 SCEVSignExtendExpr::SCEVSignExtendExpr(const FoldingSetNodeIDRef ID,
460  const SCEV *op, Type *ty)
461  : SCEVCastExpr(ID, scSignExtend, op, ty) {
463  "Cannot sign extend non-integer value!");
464 }
465 
466 void SCEVUnknown::deleted() {
467  // Clear this SCEVUnknown from various maps.
468  SE->forgetMemoizedResults(this);
469 
470  // Remove this SCEVUnknown from the uniquing map.
471  SE->UniqueSCEVs.RemoveNode(this);
472 
473  // Release the value.
474  setValPtr(nullptr);
475 }
476 
477 void SCEVUnknown::allUsesReplacedWith(Value *New) {
478  // Remove this SCEVUnknown from the uniquing map.
479  SE->UniqueSCEVs.RemoveNode(this);
480 
481  // Update this SCEVUnknown to point to the new value. This is needed
482  // because there may still be outstanding SCEVs which still point to
483  // this SCEVUnknown.
484  setValPtr(New);
485 }
486 
487 bool SCEVUnknown::isSizeOf(Type *&AllocTy) const {
488  if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue()))
489  if (VCE->getOpcode() == Instruction::PtrToInt)
490  if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
491  if (CE->getOpcode() == Instruction::GetElementPtr &&
492  CE->getOperand(0)->isNullValue() &&
493  CE->getNumOperands() == 2)
494  if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(1)))
495  if (CI->isOne()) {
496  AllocTy = cast<PointerType>(CE->getOperand(0)->getType())
497  ->getElementType();
498  return true;
499  }
500 
501  return false;
502 }
503 
504 bool SCEVUnknown::isAlignOf(Type *&AllocTy) const {
505  if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue()))
506  if (VCE->getOpcode() == Instruction::PtrToInt)
507  if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
508  if (CE->getOpcode() == Instruction::GetElementPtr &&
509  CE->getOperand(0)->isNullValue()) {
510  Type *Ty =
511  cast<PointerType>(CE->getOperand(0)->getType())->getElementType();
512  if (StructType *STy = dyn_cast<StructType>(Ty))
513  if (!STy->isPacked() &&
514  CE->getNumOperands() == 3 &&
515  CE->getOperand(1)->isNullValue()) {
516  if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(2)))
517  if (CI->isOne() &&
518  STy->getNumElements() == 2 &&
519  STy->getElementType(0)->isIntegerTy(1)) {
520  AllocTy = STy->getElementType(1);
521  return true;
522  }
523  }
524  }
525 
526  return false;
527 }
528 
529 bool SCEVUnknown::isOffsetOf(Type *&CTy, Constant *&FieldNo) const {
530  if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue()))
531  if (VCE->getOpcode() == Instruction::PtrToInt)
532  if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
533  if (CE->getOpcode() == Instruction::GetElementPtr &&
534  CE->getNumOperands() == 3 &&
535  CE->getOperand(0)->isNullValue() &&
536  CE->getOperand(1)->isNullValue()) {
537  Type *Ty =
538  cast<PointerType>(CE->getOperand(0)->getType())->getElementType();
539  // Ignore vector types here so that ScalarEvolutionExpander doesn't
540  // emit getelementptrs that index into vectors.
541  if (Ty->isStructTy() || Ty->isArrayTy()) {
542  CTy = Ty;
543  FieldNo = CE->getOperand(2);
544  return true;
545  }
546  }
547 
548  return false;
549 }
550 
551 //===----------------------------------------------------------------------===//
552 // SCEV Utilities
553 //===----------------------------------------------------------------------===//
554 
555 /// Compare the two values \p LV and \p RV in terms of their "complexity" where
556 /// "complexity" is a partial (and somewhat ad-hoc) relation used to order
557 /// operands in SCEV expressions. \p EqCache is a set of pairs of values that
558 /// have been previously deemed to be "equally complex" by this routine. It is
559 /// intended to avoid exponential time complexity in cases like:
560 ///
561 /// %a = f(%x, %y)
562 /// %b = f(%a, %a)
563 /// %c = f(%b, %b)
564 ///
565 /// %d = f(%x, %y)
566 /// %e = f(%d, %d)
567 /// %f = f(%e, %e)
568 ///
569 /// CompareValueComplexity(%f, %c)
570 ///
571 /// Since we do not continue running this routine on expression trees once we
572 /// have seen unequal values, there is no need to track them in the cache.
573 static int
575  const LoopInfo *const LI, Value *LV, Value *RV,
576  unsigned Depth) {
577  if (Depth > MaxValueCompareDepth || EqCacheValue.isEquivalent(LV, RV))
578  return 0;
579 
580  // Order pointer values after integer values. This helps SCEVExpander form
581  // GEPs.
582  bool LIsPointer = LV->getType()->isPointerTy(),
583  RIsPointer = RV->getType()->isPointerTy();
584  if (LIsPointer != RIsPointer)
585  return (int)LIsPointer - (int)RIsPointer;
586 
587  // Compare getValueID values.
588  unsigned LID = LV->getValueID(), RID = RV->getValueID();
589  if (LID != RID)
590  return (int)LID - (int)RID;
591 
592  // Sort arguments by their position.
593  if (const auto *LA = dyn_cast<Argument>(LV)) {
594  const auto *RA = cast<Argument>(RV);
595  unsigned LArgNo = LA->getArgNo(), RArgNo = RA->getArgNo();
596  return (int)LArgNo - (int)RArgNo;
597  }
598 
599  if (const auto *LGV = dyn_cast<GlobalValue>(LV)) {
600  const auto *RGV = cast<GlobalValue>(RV);
601 
602  const auto IsGVNameSemantic = [&](const GlobalValue *GV) {
603  auto LT = GV->getLinkage();
604  return !(GlobalValue::isPrivateLinkage(LT) ||
606  };
607 
608  // Use the names to distinguish the two values, but only if the
609  // names are semantically important.
610  if (IsGVNameSemantic(LGV) && IsGVNameSemantic(RGV))
611  return LGV->getName().compare(RGV->getName());
612  }
613 
614  // For instructions, compare their loop depth, and their operand count. This
615  // is pretty loose.
616  if (const auto *LInst = dyn_cast<Instruction>(LV)) {
617  const auto *RInst = cast<Instruction>(RV);
618 
619  // Compare loop depths.
620  const BasicBlock *LParent = LInst->getParent(),
621  *RParent = RInst->getParent();
622  if (LParent != RParent) {
623  unsigned LDepth = LI->getLoopDepth(LParent),
624  RDepth = LI->getLoopDepth(RParent);
625  if (LDepth != RDepth)
626  return (int)LDepth - (int)RDepth;
627  }
628 
629  // Compare the number of operands.
630  unsigned LNumOps = LInst->getNumOperands(),
631  RNumOps = RInst->getNumOperands();
632  if (LNumOps != RNumOps)
633  return (int)LNumOps - (int)RNumOps;
634 
635  for (unsigned Idx : seq(0u, LNumOps)) {
636  int Result =
637  CompareValueComplexity(EqCacheValue, LI, LInst->getOperand(Idx),
638  RInst->getOperand(Idx), Depth + 1);
639  if (Result != 0)
640  return Result;
641  }
642  }
643 
644  EqCacheValue.unionSets(LV, RV);
645  return 0;
646 }
647 
648 // Return negative, zero, or positive, if LHS is less than, equal to, or greater
649 // than RHS, respectively. A three-way result allows recursive comparisons to be
650 // more efficient.
653  EquivalenceClasses<const Value *> &EqCacheValue,
654  const LoopInfo *const LI, const SCEV *LHS, const SCEV *RHS,
655  DominatorTree &DT, unsigned Depth = 0) {
656  // Fast-path: SCEVs are uniqued so we can do a quick equality check.
657  if (LHS == RHS)
658  return 0;
659 
660  // Primarily, sort the SCEVs by their getSCEVType().
661  unsigned LType = LHS->getSCEVType(), RType = RHS->getSCEVType();
662  if (LType != RType)
663  return (int)LType - (int)RType;
664 
665  if (Depth > MaxSCEVCompareDepth || EqCacheSCEV.isEquivalent(LHS, RHS))
666  return 0;
667  // Aside from the getSCEVType() ordering, the particular ordering
668  // isn't very important except that it's beneficial to be consistent,
669  // so that (a + b) and (b + a) don't end up as different expressions.
670  switch (static_cast<SCEVTypes>(LType)) {
671  case scUnknown: {
672  const SCEVUnknown *LU = cast<SCEVUnknown>(LHS);
673  const SCEVUnknown *RU = cast<SCEVUnknown>(RHS);
674 
675  int X = CompareValueComplexity(EqCacheValue, LI, LU->getValue(),
676  RU->getValue(), Depth + 1);
677  if (X == 0)
678  EqCacheSCEV.unionSets(LHS, RHS);
679  return X;
680  }
681 
682  case scConstant: {
683  const SCEVConstant *LC = cast<SCEVConstant>(LHS);
684  const SCEVConstant *RC = cast<SCEVConstant>(RHS);
685 
686  // Compare constant values.
687  const APInt &LA = LC->getAPInt();
688  const APInt &RA = RC->getAPInt();
689  unsigned LBitWidth = LA.getBitWidth(), RBitWidth = RA.getBitWidth();
690  if (LBitWidth != RBitWidth)
691  return (int)LBitWidth - (int)RBitWidth;
692  return LA.ult(RA) ? -1 : 1;
693  }
694 
695  case scAddRecExpr: {
696  const SCEVAddRecExpr *LA = cast<SCEVAddRecExpr>(LHS);
697  const SCEVAddRecExpr *RA = cast<SCEVAddRecExpr>(RHS);
698 
699  // There is always a dominance between two recs that are used by one SCEV,
700  // so we can safely sort recs by loop header dominance. We require such
701  // order in getAddExpr.
702  const Loop *LLoop = LA->getLoop(), *RLoop = RA->getLoop();
703  if (LLoop != RLoop) {
704  const BasicBlock *LHead = LLoop->getHeader(), *RHead = RLoop->getHeader();
705  assert(LHead != RHead && "Two loops share the same header?");
706  if (DT.dominates(LHead, RHead))
707  return 1;
708  else
709  assert(DT.dominates(RHead, LHead) &&
710  "No dominance between recurrences used by one SCEV?");
711  return -1;
712  }
713 
714  // Addrec complexity grows with operand count.
715  unsigned LNumOps = LA->getNumOperands(), RNumOps = RA->getNumOperands();
716  if (LNumOps != RNumOps)
717  return (int)LNumOps - (int)RNumOps;
718 
719  // Lexicographically compare.
720  for (unsigned i = 0; i != LNumOps; ++i) {
721  int X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI,
722  LA->getOperand(i), RA->getOperand(i), DT,
723  Depth + 1);
724  if (X != 0)
725  return X;
726  }
727  EqCacheSCEV.unionSets(LHS, RHS);
728  return 0;
729  }
730 
731  case scAddExpr:
732  case scMulExpr:
733  case scSMaxExpr:
734  case scUMaxExpr:
735  case scSMinExpr:
736  case scUMinExpr: {
737  const SCEVNAryExpr *LC = cast<SCEVNAryExpr>(LHS);
738  const SCEVNAryExpr *RC = cast<SCEVNAryExpr>(RHS);
739 
740  // Lexicographically compare n-ary expressions.
741  unsigned LNumOps = LC->getNumOperands(), RNumOps = RC->getNumOperands();
742  if (LNumOps != RNumOps)
743  return (int)LNumOps - (int)RNumOps;
744 
745  for (unsigned i = 0; i != LNumOps; ++i) {
746  int X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI,
747  LC->getOperand(i), RC->getOperand(i), DT,
748  Depth + 1);
749  if (X != 0)
750  return X;
751  }
752  EqCacheSCEV.unionSets(LHS, RHS);
753  return 0;
754  }
755 
756  case scUDivExpr: {
757  const SCEVUDivExpr *LC = cast<SCEVUDivExpr>(LHS);
758  const SCEVUDivExpr *RC = cast<SCEVUDivExpr>(RHS);
759 
760  // Lexicographically compare udiv expressions.
761  int X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LC->getLHS(),
762  RC->getLHS(), DT, Depth + 1);
763  if (X != 0)
764  return X;
765  X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LC->getRHS(),
766  RC->getRHS(), DT, Depth + 1);
767  if (X == 0)
768  EqCacheSCEV.unionSets(LHS, RHS);
769  return X;
770  }
771 
772  case scTruncate:
773  case scZeroExtend:
774  case scSignExtend: {
775  const SCEVCastExpr *LC = cast<SCEVCastExpr>(LHS);
776  const SCEVCastExpr *RC = cast<SCEVCastExpr>(RHS);
777 
778  // Compare cast expressions by operand.
779  int X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI,
780  LC->getOperand(), RC->getOperand(), DT,
781  Depth + 1);
782  if (X == 0)
783  EqCacheSCEV.unionSets(LHS, RHS);
784  return X;
785  }
786 
787  case scCouldNotCompute:
788  llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
789  }
790  llvm_unreachable("Unknown SCEV kind!");
791 }
792 
793 /// Given a list of SCEV objects, order them by their complexity, and group
794 /// objects of the same complexity together by value. When this routine is
795 /// finished, we know that any duplicates in the vector are consecutive and that
796 /// complexity is monotonically increasing.
797 ///
798 /// Note that we go take special precautions to ensure that we get deterministic
799 /// results from this routine. In other words, we don't want the results of
800 /// this to depend on where the addresses of various SCEV objects happened to
801 /// land in memory.
803  LoopInfo *LI, DominatorTree &DT) {
804  if (Ops.size() < 2) return; // Noop
805 
808  if (Ops.size() == 2) {
809  // This is the common case, which also happens to be trivially simple.
810  // Special case it.
811  const SCEV *&LHS = Ops[0], *&RHS = Ops[1];
812  if (CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, RHS, LHS, DT) < 0)
813  std::swap(LHS, RHS);
814  return;
815  }
816 
817  // Do the rough sort by complexity.
818  llvm::stable_sort(Ops, [&](const SCEV *LHS, const SCEV *RHS) {
819  return CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LHS, RHS, DT) <
820  0;
821  });
822 
823  // Now that we are sorted by complexity, group elements of the same
824  // complexity. Note that this is, at worst, N^2, but the vector is likely to
825  // be extremely short in practice. Note that we take this approach because we
826  // do not want to depend on the addresses of the objects we are grouping.
827  for (unsigned i = 0, e = Ops.size(); i != e-2; ++i) {
828  const SCEV *S = Ops[i];
829  unsigned Complexity = S->getSCEVType();
830 
831  // If there are any objects of the same complexity and same value as this
832  // one, group them.
833  for (unsigned j = i+1; j != e && Ops[j]->getSCEVType() == Complexity; ++j) {
834  if (Ops[j] == S) { // Found a duplicate.
835  // Move it to immediately after i'th element.
836  std::swap(Ops[i+1], Ops[j]);
837  ++i; // no need to rescan it.
838  if (i == e-2) return; // Done!
839  }
840  }
841  }
842 }
843 
844 // Returns the size of the SCEV S.
845 static inline int sizeOfSCEV(const SCEV *S) {
846  struct FindSCEVSize {
847  int Size = 0;
848 
849  FindSCEVSize() = default;
850 
851  bool follow(const SCEV *S) {
852  ++Size;
853  // Keep looking at all operands of S.
854  return true;
855  }
856 
857  bool isDone() const {
858  return false;
859  }
860  };
861 
862  FindSCEVSize F;
864  ST.visitAll(S);
865  return F.Size;
866 }
867 
868 /// Returns true if the subtree of \p S contains at least HugeExprThreshold
869 /// nodes.
870 static bool isHugeExpression(const SCEV *S) {
871  return S->getExpressionSize() >= HugeExprThreshold;
872 }
873 
874 /// Returns true of \p Ops contains a huge SCEV (see definition above).
876  return any_of(Ops, isHugeExpression);
877 }
878 
879 namespace {
880 
881 struct SCEVDivision : public SCEVVisitor<SCEVDivision, void> {
882 public:
883  // Computes the Quotient and Remainder of the division of Numerator by
884  // Denominator.
885  static void divide(ScalarEvolution &SE, const SCEV *Numerator,
886  const SCEV *Denominator, const SCEV **Quotient,
887  const SCEV **Remainder) {
888  assert(Numerator && Denominator && "Uninitialized SCEV");
889 
890  SCEVDivision D(SE, Numerator, Denominator);
891 
892  // Check for the trivial case here to avoid having to check for it in the
893  // rest of the code.
894  if (Numerator == Denominator) {
895  *Quotient = D.One;
896  *Remainder = D.Zero;
897  return;
898  }
899 
900  if (Numerator->isZero()) {
901  *Quotient = D.Zero;
902  *Remainder = D.Zero;
903  return;
904  }
905 
906  // A simple case when N/1. The quotient is N.
907  if (Denominator->isOne()) {
908  *Quotient = Numerator;
909  *Remainder = D.Zero;
910  return;
911  }
912 
913  // Split the Denominator when it is a product.
914  if (const SCEVMulExpr *T = dyn_cast<SCEVMulExpr>(Denominator)) {
915  const SCEV *Q, *R;
916  *Quotient = Numerator;
917  for (const SCEV *Op : T->operands()) {
918  divide(SE, *Quotient, Op, &Q, &R);
919  *Quotient = Q;
920 
921  // Bail out when the Numerator is not divisible by one of the terms of
922  // the Denominator.
923  if (!R->isZero()) {
924  *Quotient = D.Zero;
925  *Remainder = Numerator;
926  return;
927  }
928  }
929  *Remainder = D.Zero;
930  return;
931  }
932 
933  D.visit(Numerator);
934  *Quotient = D.Quotient;
935  *Remainder = D.Remainder;
936  }
937 
938  // Except in the trivial case described above, we do not know how to divide
939  // Expr by Denominator for the following functions with empty implementation.
940  void visitTruncateExpr(const SCEVTruncateExpr *Numerator) {}
941  void visitZeroExtendExpr(const SCEVZeroExtendExpr *Numerator) {}
942  void visitSignExtendExpr(const SCEVSignExtendExpr *Numerator) {}
943  void visitUDivExpr(const SCEVUDivExpr *Numerator) {}
944  void visitSMaxExpr(const SCEVSMaxExpr *Numerator) {}
945  void visitUMaxExpr(const SCEVUMaxExpr *Numerator) {}
946  void visitSMinExpr(const SCEVSMinExpr *Numerator) {}
947  void visitUMinExpr(const SCEVUMinExpr *Numerator) {}
948  void visitUnknown(const SCEVUnknown *Numerator) {}
949  void visitCouldNotCompute(const SCEVCouldNotCompute *Numerator) {}
950 
951  void visitConstant(const SCEVConstant *Numerator) {
952  if (const SCEVConstant *D = dyn_cast<SCEVConstant>(Denominator)) {
953  APInt NumeratorVal = Numerator->getAPInt();
954  APInt DenominatorVal = D->getAPInt();
955  uint32_t NumeratorBW = NumeratorVal.getBitWidth();
956  uint32_t DenominatorBW = DenominatorVal.getBitWidth();
957 
958  if (NumeratorBW > DenominatorBW)
959  DenominatorVal = DenominatorVal.sext(NumeratorBW);
960  else if (NumeratorBW < DenominatorBW)
961  NumeratorVal = NumeratorVal.sext(DenominatorBW);
962 
963  APInt QuotientVal(NumeratorVal.getBitWidth(), 0);
964  APInt RemainderVal(NumeratorVal.getBitWidth(), 0);
965  APInt::sdivrem(NumeratorVal, DenominatorVal, QuotientVal, RemainderVal);
966  Quotient = SE.getConstant(QuotientVal);
967  Remainder = SE.getConstant(RemainderVal);
968  return;
969  }
970  }
971 
972  void visitAddRecExpr(const SCEVAddRecExpr *Numerator) {
973  const SCEV *StartQ, *StartR, *StepQ, *StepR;
974  if (!Numerator->isAffine())
975  return cannotDivide(Numerator);
976  divide(SE, Numerator->getStart(), Denominator, &StartQ, &StartR);
977  divide(SE, Numerator->getStepRecurrence(SE), Denominator, &StepQ, &StepR);
978  // Bail out if the types do not match.
979  Type *Ty = Denominator->getType();
980  if (Ty != StartQ->getType() || Ty != StartR->getType() ||
981  Ty != StepQ->getType() || Ty != StepR->getType())
982  return cannotDivide(Numerator);
983  Quotient = SE.getAddRecExpr(StartQ, StepQ, Numerator->getLoop(),
984  Numerator->getNoWrapFlags());
985  Remainder = SE.getAddRecExpr(StartR, StepR, Numerator->getLoop(),
986  Numerator->getNoWrapFlags());
987  }
988 
989  void visitAddExpr(const SCEVAddExpr *Numerator) {
991  Type *Ty = Denominator->getType();
992 
993  for (const SCEV *Op : Numerator->operands()) {
994  const SCEV *Q, *R;
995  divide(SE, Op, Denominator, &Q, &R);
996 
997  // Bail out if types do not match.
998  if (Ty != Q->getType() || Ty != R->getType())
999  return cannotDivide(Numerator);
1000 
1001  Qs.push_back(Q);
1002  Rs.push_back(R);
1003  }
1004 
1005  if (Qs.size() == 1) {
1006  Quotient = Qs[0];
1007  Remainder = Rs[0];
1008  return;
1009  }
1010 
1011  Quotient = SE.getAddExpr(Qs);
1012  Remainder = SE.getAddExpr(Rs);
1013  }
1014 
1015  void visitMulExpr(const SCEVMulExpr *Numerator) {
1017  Type *Ty = Denominator->getType();
1018 
1019  bool FoundDenominatorTerm = false;
1020  for (const SCEV *Op : Numerator->operands()) {
1021  // Bail out if types do not match.
1022  if (Ty != Op->getType())
1023  return cannotDivide(Numerator);
1024 
1025  if (FoundDenominatorTerm) {
1026  Qs.push_back(Op);
1027  continue;
1028  }
1029 
1030  // Check whether Denominator divides one of the product operands.
1031  const SCEV *Q, *R;
1032  divide(SE, Op, Denominator, &Q, &R);
1033  if (!R->isZero()) {
1034  Qs.push_back(Op);
1035  continue;
1036  }
1037 
1038  // Bail out if types do not match.
1039  if (Ty != Q->getType())
1040  return cannotDivide(Numerator);
1041 
1042  FoundDenominatorTerm = true;
1043  Qs.push_back(Q);
1044  }
1045 
1046  if (FoundDenominatorTerm) {
1047  Remainder = Zero;
1048  if (Qs.size() == 1)
1049  Quotient = Qs[0];
1050  else
1051  Quotient = SE.getMulExpr(Qs);
1052  return;
1053  }
1054 
1055  if (!isa<SCEVUnknown>(Denominator))
1056  return cannotDivide(Numerator);
1057 
1058  // The Remainder is obtained by replacing Denominator by 0 in Numerator.
1059  ValueToValueMap RewriteMap;
1060  RewriteMap[cast<SCEVUnknown>(Denominator)->getValue()] =
1061  cast<SCEVConstant>(Zero)->getValue();
1062  Remainder = SCEVParameterRewriter::rewrite(Numerator, SE, RewriteMap, true);
1063 
1064  if (Remainder->isZero()) {
1065  // The Quotient is obtained by replacing Denominator by 1 in Numerator.
1066  RewriteMap[cast<SCEVUnknown>(Denominator)->getValue()] =
1067  cast<SCEVConstant>(One)->getValue();
1068  Quotient =
1069  SCEVParameterRewriter::rewrite(Numerator, SE, RewriteMap, true);
1070  return;
1071  }
1072 
1073  // Quotient is (Numerator - Remainder) divided by Denominator.
1074  const SCEV *Q, *R;
1075  const SCEV *Diff = SE.getMinusSCEV(Numerator, Remainder);
1076  // This SCEV does not seem to simplify: fail the division here.
1077  if (sizeOfSCEV(Diff) > sizeOfSCEV(Numerator))
1078  return cannotDivide(Numerator);
1079  divide(SE, Diff, Denominator, &Q, &R);
1080  if (R != Zero)
1081  return cannotDivide(Numerator);
1082  Quotient = Q;
1083  }
1084 
1085 private:
1086  SCEVDivision(ScalarEvolution &S, const SCEV *Numerator,
1087  const SCEV *Denominator)
1088  : SE(S), Denominator(Denominator) {
1089  Zero = SE.getZero(Denominator->getType());
1090  One = SE.getOne(Denominator->getType());
1091 
1092  // We generally do not know how to divide Expr by Denominator. We
1093  // initialize the division to a "cannot divide" state to simplify the rest
1094  // of the code.
1095  cannotDivide(Numerator);
1096  }
1097 
1098  // Convenience function for giving up on the division. We set the quotient to
1099  // be equal to zero and the remainder to be equal to the numerator.
1100  void cannotDivide(const SCEV *Numerator) {
1101  Quotient = Zero;
1102  Remainder = Numerator;
1103  }
1104 
1105  ScalarEvolution &SE;
1106  const SCEV *Denominator, *Quotient, *Remainder, *Zero, *One;
1107 };
1108 
1109 } // end anonymous namespace
1110 
1111 //===----------------------------------------------------------------------===//
1112 // Simple SCEV method implementations
1113 //===----------------------------------------------------------------------===//
1114 
1115 /// Compute BC(It, K). The result has width W. Assume, K > 0.
1116 static const SCEV *BinomialCoefficient(const SCEV *It, unsigned K,
1117  ScalarEvolution &SE,
1118  Type *ResultTy) {
1119  // Handle the simplest case efficiently.
1120  if (K == 1)
1121  return SE.getTruncateOrZeroExtend(It, ResultTy);
1122 
1123  // We are using the following formula for BC(It, K):
1124  //
1125  // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / K!
1126  //
1127  // Suppose, W is the bitwidth of the return value. We must be prepared for
1128  // overflow. Hence, we must assure that the result of our computation is
1129  // equal to the accurate one modulo 2^W. Unfortunately, division isn't
1130  // safe in modular arithmetic.
1131  //
1132  // However, this code doesn't use exactly that formula; the formula it uses
1133  // is something like the following, where T is the number of factors of 2 in
1134  // K! (i.e. trailing zeros in the binary representation of K!), and ^ is
1135  // exponentiation:
1136  //
1137  // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / 2^T / (K! / 2^T)
1138  //
1139  // This formula is trivially equivalent to the previous formula. However,
1140  // this formula can be implemented much more efficiently. The trick is that
1141  // K! / 2^T is odd, and exact division by an odd number *is* safe in modular
1142  // arithmetic. To do exact division in modular arithmetic, all we have
1143  // to do is multiply by the inverse. Therefore, this step can be done at
1144  // width W.
1145  //
1146  // The next issue is how to safely do the division by 2^T. The way this
1147  // is done is by doing the multiplication step at a width of at least W + T
1148  // bits. This way, the bottom W+T bits of the product are accurate. Then,
1149  // when we perform the division by 2^T (which is equivalent to a right shift
1150  // by T), the bottom W bits are accurate. Extra bits are okay; they'll get
1151  // truncated out after the division by 2^T.
1152  //
1153  // In comparison to just directly using the first formula, this technique
1154  // is much more efficient; using the first formula requires W * K bits,
1155  // but this formula less than W + K bits. Also, the first formula requires
1156  // a division step, whereas this formula only requires multiplies and shifts.
1157  //
1158  // It doesn't matter whether the subtraction step is done in the calculation
1159  // width or the input iteration count's width; if the subtraction overflows,
1160  // the result must be zero anyway. We prefer here to do it in the width of
1161  // the induction variable because it helps a lot for certain cases; CodeGen
1162  // isn't smart enough to ignore the overflow, which leads to much less
1163  // efficient code if the width of the subtraction is wider than the native
1164  // register width.
1165  //
1166  // (It's possible to not widen at all by pulling out factors of 2 before
1167  // the multiplication; for example, K=2 can be calculated as
1168  // It/2*(It+(It*INT_MIN/INT_MIN)+-1). However, it requires
1169  // extra arithmetic, so it's not an obvious win, and it gets
1170  // much more complicated for K > 3.)
1171 
1172  // Protection from insane SCEVs; this bound is conservative,
1173  // but it probably doesn't matter.
1174  if (K > 1000)
1175  return SE.getCouldNotCompute();
1176 
1177  unsigned W = SE.getTypeSizeInBits(ResultTy);
1178 
1179  // Calculate K! / 2^T and T; we divide out the factors of two before
1180  // multiplying for calculating K! / 2^T to avoid overflow.
1181  // Other overflow doesn't matter because we only care about the bottom
1182  // W bits of the result.
1183  APInt OddFactorial(W, 1);
1184  unsigned T = 1;
1185  for (unsigned i = 3; i <= K; ++i) {
1186  APInt Mult(W, i);
1187  unsigned TwoFactors = Mult.countTrailingZeros();
1188  T += TwoFactors;
1189  Mult.lshrInPlace(TwoFactors);
1190  OddFactorial *= Mult;
1191  }
1192 
1193  // We need at least W + T bits for the multiplication step
1194  unsigned CalculationBits = W + T;
1195 
1196  // Calculate 2^T, at width T+W.
1197  APInt DivFactor = APInt::getOneBitSet(CalculationBits, T);
1198 
1199  // Calculate the multiplicative inverse of K! / 2^T;
1200  // this multiplication factor will perform the exact division by
1201  // K! / 2^T.
1203  APInt MultiplyFactor = OddFactorial.zext(W+1);
1204  MultiplyFactor = MultiplyFactor.multiplicativeInverse(Mod);
1205  MultiplyFactor = MultiplyFactor.trunc(W);
1206 
1207  // Calculate the product, at width T+W
1208  IntegerType *CalculationTy = IntegerType::get(SE.getContext(),
1209  CalculationBits);
1210  const SCEV *Dividend = SE.getTruncateOrZeroExtend(It, CalculationTy);
1211  for (unsigned i = 1; i != K; ++i) {
1212  const SCEV *S = SE.getMinusSCEV(It, SE.getConstant(It->getType(), i));
1213  Dividend = SE.getMulExpr(Dividend,
1214  SE.getTruncateOrZeroExtend(S, CalculationTy));
1215  }
1216 
1217  // Divide by 2^T
1218  const SCEV *DivResult = SE.getUDivExpr(Dividend, SE.getConstant(DivFactor));
1219 
1220  // Truncate the result, and divide by K! / 2^T.
1221 
1222  return SE.getMulExpr(SE.getConstant(MultiplyFactor),
1223  SE.getTruncateOrZeroExtend(DivResult, ResultTy));
1224 }
1225 
1226 /// Return the value of this chain of recurrences at the specified iteration
1227 /// number. We can evaluate this recurrence by multiplying each element in the
1228 /// chain by the binomial coefficient corresponding to it. In other words, we
1229 /// can evaluate {A,+,B,+,C,+,D} as:
1230 ///
1231 /// A*BC(It, 0) + B*BC(It, 1) + C*BC(It, 2) + D*BC(It, 3)
1232 ///
1233 /// where BC(It, k) stands for binomial coefficient.
1235  ScalarEvolution &SE) const {
1236  const SCEV *Result = getStart();
1237  for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {
1238  // The computation is correct in the face of overflow provided that the
1239  // multiplication is performed _after_ the evaluation of the binomial
1240  // coefficient.
1241  const SCEV *Coeff = BinomialCoefficient(It, i, SE, getType());
1242  if (isa<SCEVCouldNotCompute>(Coeff))
1243  return Coeff;
1244 
1245  Result = SE.getAddExpr(Result, SE.getMulExpr(getOperand(i), Coeff));
1246  }
1247  return Result;
1248 }
1249 
1250 //===----------------------------------------------------------------------===//
1251 // SCEV Expression folder implementations
1252 //===----------------------------------------------------------------------===//
1253 
1255  unsigned Depth) {
1256  assert(getTypeSizeInBits(Op->getType()) > getTypeSizeInBits(Ty) &&
1257  "This is not a truncating conversion!");
1258  assert(isSCEVable(Ty) &&
1259  "This is not a conversion to a SCEVable type!");
1260  Ty = getEffectiveSCEVType(Ty);
1261 
1263  ID.AddInteger(scTruncate);
1264  ID.AddPointer(Op);
1265  ID.AddPointer(Ty);
1266  void *IP = nullptr;
1267  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1268 
1269  // Fold if the operand is constant.
1270  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
1271  return getConstant(
1272  cast<ConstantInt>(ConstantExpr::getTrunc(SC->getValue(), Ty)));
1273 
1274  // trunc(trunc(x)) --> trunc(x)
1275  if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op))
1276  return getTruncateExpr(ST->getOperand(), Ty, Depth + 1);
1277 
1278  // trunc(sext(x)) --> sext(x) if widening or trunc(x) if narrowing
1279  if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op))
1280  return getTruncateOrSignExtend(SS->getOperand(), Ty, Depth + 1);
1281 
1282  // trunc(zext(x)) --> zext(x) if widening or trunc(x) if narrowing
1283  if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op))
1284  return getTruncateOrZeroExtend(SZ->getOperand(), Ty, Depth + 1);
1285 
1286  if (Depth > MaxCastDepth) {
1287  SCEV *S =
1288  new (SCEVAllocator) SCEVTruncateExpr(ID.Intern(SCEVAllocator), Op, Ty);
1289  UniqueSCEVs.InsertNode(S, IP);
1290  addToLoopUseLists(S);
1291  return S;
1292  }
1293 
1294  // trunc(x1 + ... + xN) --> trunc(x1) + ... + trunc(xN) and
1295  // trunc(x1 * ... * xN) --> trunc(x1) * ... * trunc(xN),
1296  // if after transforming we have at most one truncate, not counting truncates
1297  // that replace other casts.
1298  if (isa<SCEVAddExpr>(Op) || isa<SCEVMulExpr>(Op)) {
1299  auto *CommOp = cast<SCEVCommutativeExpr>(Op);
1301  unsigned numTruncs = 0;
1302  for (unsigned i = 0, e = CommOp->getNumOperands(); i != e && numTruncs < 2;
1303  ++i) {
1304  const SCEV *S = getTruncateExpr(CommOp->getOperand(i), Ty, Depth + 1);
1305  if (!isa<SCEVCastExpr>(CommOp->getOperand(i)) && isa<SCEVTruncateExpr>(S))
1306  numTruncs++;
1307  Operands.push_back(S);
1308  }
1309  if (numTruncs < 2) {
1310  if (isa<SCEVAddExpr>(Op))
1311  return getAddExpr(Operands);
1312  else if (isa<SCEVMulExpr>(Op))
1313  return getMulExpr(Operands);
1314  else
1315  llvm_unreachable("Unexpected SCEV type for Op.");
1316  }
1317  // Although we checked in the beginning that ID is not in the cache, it is
1318  // possible that during recursion and different modification ID was inserted
1319  // into the cache. So if we find it, just return it.
1320  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP))
1321  return S;
1322  }
1323 
1324  // If the input value is a chrec scev, truncate the chrec's operands.
1325  if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) {
1327  for (const SCEV *Op : AddRec->operands())
1328  Operands.push_back(getTruncateExpr(Op, Ty, Depth + 1));
1329  return getAddRecExpr(Operands, AddRec->getLoop(), SCEV::FlagAnyWrap);
1330  }
1331 
1332  // The cast wasn't folded; create an explicit cast node. We can reuse
1333  // the existing insert position since if we get here, we won't have
1334  // made any changes which would invalidate it.
1335  SCEV *S = new (SCEVAllocator) SCEVTruncateExpr(ID.Intern(SCEVAllocator),
1336  Op, Ty);
1337  UniqueSCEVs.InsertNode(S, IP);
1338  addToLoopUseLists(S);
1339  return S;
1340 }
1341 
1342 // Get the limit of a recurrence such that incrementing by Step cannot cause
1343 // signed overflow as long as the value of the recurrence within the
1344 // loop does not exceed this limit before incrementing.
1345 static const SCEV *getSignedOverflowLimitForStep(const SCEV *Step,
1346  ICmpInst::Predicate *Pred,
1347  ScalarEvolution *SE) {
1348  unsigned BitWidth = SE->getTypeSizeInBits(Step->getType());
1349  if (SE->isKnownPositive(Step)) {
1350  *Pred = ICmpInst::ICMP_SLT;
1351  return SE->getConstant(APInt::getSignedMinValue(BitWidth) -
1352  SE->getSignedRangeMax(Step));
1353  }
1354  if (SE->isKnownNegative(Step)) {
1355  *Pred = ICmpInst::ICMP_SGT;
1356  return SE->getConstant(APInt::getSignedMaxValue(BitWidth) -
1357  SE->getSignedRangeMin(Step));
1358  }
1359  return nullptr;
1360 }
1361 
1362 // Get the limit of a recurrence such that incrementing by Step cannot cause
1363 // unsigned overflow as long as the value of the recurrence within the loop does
1364 // not exceed this limit before incrementing.
1365 static const SCEV *getUnsignedOverflowLimitForStep(const SCEV *Step,
1366  ICmpInst::Predicate *Pred,
1367  ScalarEvolution *SE) {
1368  unsigned BitWidth = SE->getTypeSizeInBits(Step->getType());
1369  *Pred = ICmpInst::ICMP_ULT;
1370 
1371  return SE->getConstant(APInt::getMinValue(BitWidth) -
1372  SE->getUnsignedRangeMax(Step));
1373 }
1374 
1375 namespace {
1376 
1377 struct ExtendOpTraitsBase {
1378  typedef const SCEV *(ScalarEvolution::*GetExtendExprTy)(const SCEV *, Type *,
1379  unsigned);
1380 };
1381 
1382 // Used to make code generic over signed and unsigned overflow.
1383 template <typename ExtendOp> struct ExtendOpTraits {
1384  // Members present:
1385  //
1386  // static const SCEV::NoWrapFlags WrapType;
1387  //
1388  // static const ExtendOpTraitsBase::GetExtendExprTy GetExtendExpr;
1389  //
1390  // static const SCEV *getOverflowLimitForStep(const SCEV *Step,
1391  // ICmpInst::Predicate *Pred,
1392  // ScalarEvolution *SE);
1393 };
1394 
1395 template <>
1396 struct ExtendOpTraits<SCEVSignExtendExpr> : public ExtendOpTraitsBase {
1397  static const SCEV::NoWrapFlags WrapType = SCEV::FlagNSW;
1398 
1399  static const GetExtendExprTy GetExtendExpr;
1400 
1401  static const SCEV *getOverflowLimitForStep(const SCEV *Step,
1402  ICmpInst::Predicate *Pred,
1403  ScalarEvolution *SE) {
1404  return getSignedOverflowLimitForStep(Step, Pred, SE);
1405  }
1406 };
1407 
1408 const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits<
1409  SCEVSignExtendExpr>::GetExtendExpr = &ScalarEvolution::getSignExtendExpr;
1410 
1411 template <>
1412 struct ExtendOpTraits<SCEVZeroExtendExpr> : public ExtendOpTraitsBase {
1413  static const SCEV::NoWrapFlags WrapType = SCEV::FlagNUW;
1414 
1415  static const GetExtendExprTy GetExtendExpr;
1416 
1417  static const SCEV *getOverflowLimitForStep(const SCEV *Step,
1418  ICmpInst::Predicate *Pred,
1419  ScalarEvolution *SE) {
1420  return getUnsignedOverflowLimitForStep(Step, Pred, SE);
1421  }
1422 };
1423 
1424 const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits<
1425  SCEVZeroExtendExpr>::GetExtendExpr = &ScalarEvolution::getZeroExtendExpr;
1426 
1427 } // end anonymous namespace
1428 
1429 // The recurrence AR has been shown to have no signed/unsigned wrap or something
1430 // close to it. Typically, if we can prove NSW/NUW for AR, then we can just as
1431 // easily prove NSW/NUW for its preincrement or postincrement sibling. This
1432 // allows normalizing a sign/zero extended AddRec as such: {sext/zext(Step +
1433 // Start),+,Step} => {(Step + sext/zext(Start),+,Step} As a result, the
1434 // expression "Step + sext/zext(PreIncAR)" is congruent with
1435 // "sext/zext(PostIncAR)"
1436 template <typename ExtendOpTy>
1437 static const SCEV *getPreStartForExtend(const SCEVAddRecExpr *AR, Type *Ty,
1438  ScalarEvolution *SE, unsigned Depth) {
1439  auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType;
1440  auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr;
1441 
1442  const Loop *L = AR->getLoop();
1443  const SCEV *Start = AR->getStart();
1444  const SCEV *Step = AR->getStepRecurrence(*SE);
1445 
1446  // Check for a simple looking step prior to loop entry.
1447  const SCEVAddExpr *SA = dyn_cast<SCEVAddExpr>(Start);
1448  if (!SA)
1449  return nullptr;
1450 
1451  // Create an AddExpr for "PreStart" after subtracting Step. Full SCEV
1452  // subtraction is expensive. For this purpose, perform a quick and dirty
1453  // difference, by checking for Step in the operand list.
1455  for (const SCEV *Op : SA->operands())
1456  if (Op != Step)
1457  DiffOps.push_back(Op);
1458 
1459  if (DiffOps.size() == SA->getNumOperands())
1460  return nullptr;
1461 
1462  // Try to prove `WrapType` (SCEV::FlagNSW or SCEV::FlagNUW) on `PreStart` +
1463  // `Step`:
1464 
1465  // 1. NSW/NUW flags on the step increment.
1466  auto PreStartFlags =
1468  const SCEV *PreStart = SE->getAddExpr(DiffOps, PreStartFlags);
1469  const SCEVAddRecExpr *PreAR = dyn_cast<SCEVAddRecExpr>(
1470  SE->getAddRecExpr(PreStart, Step, L, SCEV::FlagAnyWrap));
1471 
1472  // "{S,+,X} is <nsw>/<nuw>" and "the backedge is taken at least once" implies
1473  // "S+X does not sign/unsign-overflow".
1474  //
1475 
1476  const SCEV *BECount = SE->getBackedgeTakenCount(L);
1477  if (PreAR && PreAR->getNoWrapFlags(WrapType) &&
1478  !isa<SCEVCouldNotCompute>(BECount) && SE->isKnownPositive(BECount))
1479  return PreStart;
1480 
1481  // 2. Direct overflow check on the step operation's expression.
1482  unsigned BitWidth = SE->getTypeSizeInBits(AR->getType());
1483  Type *WideTy = IntegerType::get(SE->getContext(), BitWidth * 2);
1484  const SCEV *OperandExtendedStart =
1485  SE->getAddExpr((SE->*GetExtendExpr)(PreStart, WideTy, Depth),
1486  (SE->*GetExtendExpr)(Step, WideTy, Depth));
1487  if ((SE->*GetExtendExpr)(Start, WideTy, Depth) == OperandExtendedStart) {
1488  if (PreAR && AR->getNoWrapFlags(WrapType)) {
1489  // If we know `AR` == {`PreStart`+`Step`,+,`Step`} is `WrapType` (FlagNSW
1490  // or FlagNUW) and that `PreStart` + `Step` is `WrapType` too, then
1491  // `PreAR` == {`PreStart`,+,`Step`} is also `WrapType`. Cache this fact.
1492  const_cast<SCEVAddRecExpr *>(PreAR)->setNoWrapFlags(WrapType);
1493  }
1494  return PreStart;
1495  }
1496 
1497  // 3. Loop precondition.
1498  ICmpInst::Predicate Pred;
1499  const SCEV *OverflowLimit =
1500  ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep(Step, &Pred, SE);
1501 
1502  if (OverflowLimit &&
1503  SE->isLoopEntryGuardedByCond(L, Pred, PreStart, OverflowLimit))
1504  return PreStart;
1505 
1506  return nullptr;
1507 }
1508 
1509 // Get the normalized zero or sign extended expression for this AddRec's Start.
1510 template <typename ExtendOpTy>
1511 static const SCEV *getExtendAddRecStart(const SCEVAddRecExpr *AR, Type *Ty,
1512  ScalarEvolution *SE,
1513  unsigned Depth) {
1514  auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr;
1515 
1516  const SCEV *PreStart = getPreStartForExtend<ExtendOpTy>(AR, Ty, SE, Depth);
1517  if (!PreStart)
1518  return (SE->*GetExtendExpr)(AR->getStart(), Ty, Depth);
1519 
1520  return SE->getAddExpr((SE->*GetExtendExpr)(AR->getStepRecurrence(*SE), Ty,
1521  Depth),
1522  (SE->*GetExtendExpr)(PreStart, Ty, Depth));
1523 }
1524 
1525 // Try to prove away overflow by looking at "nearby" add recurrences. A
1526 // motivating example for this rule: if we know `{0,+,4}` is `ult` `-1` and it
1527 // does not itself wrap then we can conclude that `{1,+,4}` is `nuw`.
1528 //
1529 // Formally:
1530 //
1531 // {S,+,X} == {S-T,+,X} + T
1532 // => Ext({S,+,X}) == Ext({S-T,+,X} + T)
1533 //
1534 // If ({S-T,+,X} + T) does not overflow ... (1)
1535 //
1536 // RHS == Ext({S-T,+,X} + T) == Ext({S-T,+,X}) + Ext(T)
1537 //
1538 // If {S-T,+,X} does not overflow ... (2)
1539 //
1540 // RHS == Ext({S-T,+,X}) + Ext(T) == {Ext(S-T),+,Ext(X)} + Ext(T)
1541 // == {Ext(S-T)+Ext(T),+,Ext(X)}
1542 //
1543 // If (S-T)+T does not overflow ... (3)
1544 //
1545 // RHS == {Ext(S-T)+Ext(T),+,Ext(X)} == {Ext(S-T+T),+,Ext(X)}
1546 // == {Ext(S),+,Ext(X)} == LHS
1547 //
1548 // Thus, if (1), (2) and (3) are true for some T, then
1549 // Ext({S,+,X}) == {Ext(S),+,Ext(X)}
1550 //
1551 // (3) is implied by (1) -- "(S-T)+T does not overflow" is simply "({S-T,+,X}+T)
1552 // does not overflow" restricted to the 0th iteration. Therefore we only need
1553 // to check for (1) and (2).
1554 //
1555 // In the current context, S is `Start`, X is `Step`, Ext is `ExtendOpTy` and T
1556 // is `Delta` (defined below).
1557 template <typename ExtendOpTy>
1558 bool ScalarEvolution::proveNoWrapByVaryingStart(const SCEV *Start,
1559  const SCEV *Step,
1560  const Loop *L) {
1561  auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType;
1562 
1563  // We restrict `Start` to a constant to prevent SCEV from spending too much
1564  // time here. It is correct (but more expensive) to continue with a
1565  // non-constant `Start` and do a general SCEV subtraction to compute
1566  // `PreStart` below.
1567  const SCEVConstant *StartC = dyn_cast<SCEVConstant>(Start);
1568  if (!StartC)
1569  return false;
1570 
1571  APInt StartAI = StartC->getAPInt();
1572 
1573  for (unsigned Delta : {-2, -1, 1, 2}) {
1574  const SCEV *PreStart = getConstant(StartAI - Delta);
1575 
1578  ID.AddPointer(PreStart);
1579  ID.AddPointer(Step);
1580  ID.AddPointer(L);
1581  void *IP = nullptr;
1582  const auto *PreAR =
1583  static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
1584 
1585  // Give up if we don't already have the add recurrence we need because
1586  // actually constructing an add recurrence is relatively expensive.
1587  if (PreAR && PreAR->getNoWrapFlags(WrapType)) { // proves (2)
1588  const SCEV *DeltaS = getConstant(StartC->getType(), Delta);
1590  const SCEV *Limit = ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep(
1591  DeltaS, &Pred, this);
1592  if (Limit && isKnownPredicate(Pred, PreAR, Limit)) // proves (1)
1593  return true;
1594  }
1595  }
1596 
1597  return false;
1598 }
1599 
1600 // Finds an integer D for an expression (C + x + y + ...) such that the top
1601 // level addition in (D + (C - D + x + y + ...)) would not wrap (signed or
1602 // unsigned) and the number of trailing zeros of (C - D + x + y + ...) is
1603 // maximized, where C is the \p ConstantTerm, x, y, ... are arbitrary SCEVs, and
1604 // the (C + x + y + ...) expression is \p WholeAddExpr.
1606  const SCEVConstant *ConstantTerm,
1607  const SCEVAddExpr *WholeAddExpr) {
1608  const APInt C = ConstantTerm->getAPInt();
1609  const unsigned BitWidth = C.getBitWidth();
1610  // Find number of trailing zeros of (x + y + ...) w/o the C first:
1611  uint32_t TZ = BitWidth;
1612  for (unsigned I = 1, E = WholeAddExpr->getNumOperands(); I < E && TZ; ++I)
1613  TZ = std::min(TZ, SE.GetMinTrailingZeros(WholeAddExpr->getOperand(I)));
1614  if (TZ) {
1615  // Set D to be as many least significant bits of C as possible while still
1616  // guaranteeing that adding D to (C - D + x + y + ...) won't cause a wrap:
1617  return TZ < BitWidth ? C.trunc(TZ).zext(BitWidth) : C;
1618  }
1619  return APInt(BitWidth, 0);
1620 }
1621 
1622 // Finds an integer D for an affine AddRec expression {C,+,x} such that the top
1623 // level addition in (D + {C-D,+,x}) would not wrap (signed or unsigned) and the
1624 // number of trailing zeros of (C - D + x * n) is maximized, where C is the \p
1625 // ConstantStart, x is an arbitrary \p Step, and n is the loop trip count.
1627  const APInt &ConstantStart,
1628  const SCEV *Step) {
1629  const unsigned BitWidth = ConstantStart.getBitWidth();
1630  const uint32_t TZ = SE.GetMinTrailingZeros(Step);
1631  if (TZ)
1632  return TZ < BitWidth ? ConstantStart.trunc(TZ).zext(BitWidth)
1633  : ConstantStart;
1634  return APInt(BitWidth, 0);
1635 }
1636 
1637 const SCEV *
1639  assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
1640  "This is not an extending conversion!");
1641  assert(isSCEVable(Ty) &&
1642  "This is not a conversion to a SCEVable type!");
1643  Ty = getEffectiveSCEVType(Ty);
1644 
1645  // Fold if the operand is constant.
1646  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
1647  return getConstant(
1648  cast<ConstantInt>(ConstantExpr::getZExt(SC->getValue(), Ty)));
1649 
1650  // zext(zext(x)) --> zext(x)
1651  if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op))
1652  return getZeroExtendExpr(SZ->getOperand(), Ty, Depth + 1);
1653 
1654  // Before doing any expensive analysis, check to see if we've already
1655  // computed a SCEV for this Op and Ty.
1658  ID.AddPointer(Op);
1659  ID.AddPointer(Ty);
1660  void *IP = nullptr;
1661  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1662  if (Depth > MaxCastDepth) {
1663  SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator),
1664  Op, Ty);
1665  UniqueSCEVs.InsertNode(S, IP);
1666  addToLoopUseLists(S);
1667  return S;
1668  }
1669 
1670  // zext(trunc(x)) --> zext(x) or x or trunc(x)
1671  if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) {
1672  // It's possible the bits taken off by the truncate were all zero bits. If
1673  // so, we should be able to simplify this further.
1674  const SCEV *X = ST->getOperand();
1675  ConstantRange CR = getUnsignedRange(X);
1676  unsigned TruncBits = getTypeSizeInBits(ST->getType());
1677  unsigned NewBits = getTypeSizeInBits(Ty);
1678  if (CR.truncate(TruncBits).zeroExtend(NewBits).contains(
1679  CR.zextOrTrunc(NewBits)))
1680  return getTruncateOrZeroExtend(X, Ty, Depth);
1681  }
1682 
1683  // If the input value is a chrec scev, and we can prove that the value
1684  // did not overflow the old, smaller, value, we can zero extend all of the
1685  // operands (often constants). This allows analysis of something like
1686  // this: for (unsigned char X = 0; X < 100; ++X) { int Y = X; }
1687  if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op))
1688  if (AR->isAffine()) {
1689  const SCEV *Start = AR->getStart();
1690  const SCEV *Step = AR->getStepRecurrence(*this);
1691  unsigned BitWidth = getTypeSizeInBits(AR->getType());
1692  const Loop *L = AR->getLoop();
1693 
1694  if (!AR->hasNoUnsignedWrap()) {
1695  auto NewFlags = proveNoWrapViaConstantRanges(AR);
1696  const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(NewFlags);
1697  }
1698 
1699  // If we have special knowledge that this addrec won't overflow,
1700  // we don't need to do any further analysis.
1701  if (AR->hasNoUnsignedWrap())
1702  return getAddRecExpr(
1703  getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, Depth + 1),
1704  getZeroExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags());
1705 
1706  // Check whether the backedge-taken count is SCEVCouldNotCompute.
1707  // Note that this serves two purposes: It filters out loops that are
1708  // simply not analyzable, and it covers the case where this code is
1709  // being called from within backedge-taken count analysis, such that
1710  // attempting to ask for the backedge-taken count would likely result
1711  // in infinite recursion. In the later case, the analysis code will
1712  // cope with a conservative value, and it will take care to purge
1713  // that value once it has finished.
1714  const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(L);
1715  if (!isa<SCEVCouldNotCompute>(MaxBECount)) {
1716  // Manually compute the final value for AR, checking for
1717  // overflow.
1718 
1719  // Check whether the backedge-taken count can be losslessly casted to
1720  // the addrec's type. The count is always unsigned.
1721  const SCEV *CastedMaxBECount =
1722  getTruncateOrZeroExtend(MaxBECount, Start->getType(), Depth);
1723  const SCEV *RecastedMaxBECount = getTruncateOrZeroExtend(
1724  CastedMaxBECount, MaxBECount->getType(), Depth);
1725  if (MaxBECount == RecastedMaxBECount) {
1726  Type *WideTy = IntegerType::get(getContext(), BitWidth * 2);
1727  // Check whether Start+Step*MaxBECount has no unsigned overflow.
1728  const SCEV *ZMul = getMulExpr(CastedMaxBECount, Step,
1729  SCEV::FlagAnyWrap, Depth + 1);
1730  const SCEV *ZAdd = getZeroExtendExpr(getAddExpr(Start, ZMul,
1732  Depth + 1),
1733  WideTy, Depth + 1);
1734  const SCEV *WideStart = getZeroExtendExpr(Start, WideTy, Depth + 1);
1735  const SCEV *WideMaxBECount =
1736  getZeroExtendExpr(CastedMaxBECount, WideTy, Depth + 1);
1737  const SCEV *OperandExtendedAdd =
1738  getAddExpr(WideStart,
1739  getMulExpr(WideMaxBECount,
1740  getZeroExtendExpr(Step, WideTy, Depth + 1),
1741  SCEV::FlagAnyWrap, Depth + 1),
1742  SCEV::FlagAnyWrap, Depth + 1);
1743  if (ZAdd == OperandExtendedAdd) {
1744  // Cache knowledge of AR NUW, which is propagated to this AddRec.
1745  const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNUW);
1746  // Return the expression with the addrec on the outside.
1747  return getAddRecExpr(
1748  getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this,
1749  Depth + 1),
1750  getZeroExtendExpr(Step, Ty, Depth + 1), L,
1751  AR->getNoWrapFlags());
1752  }
1753  // Similar to above, only this time treat the step value as signed.
1754  // This covers loops that count down.
1755  OperandExtendedAdd =
1756  getAddExpr(WideStart,
1757  getMulExpr(WideMaxBECount,
1758  getSignExtendExpr(Step, WideTy, Depth + 1),
1759  SCEV::FlagAnyWrap, Depth + 1),
1760  SCEV::FlagAnyWrap, Depth + 1);
1761  if (ZAdd == OperandExtendedAdd) {
1762  // Cache knowledge of AR NW, which is propagated to this AddRec.
1763  // Negative step causes unsigned wrap, but it still can't self-wrap.
1764  const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNW);
1765  // Return the expression with the addrec on the outside.
1766  return getAddRecExpr(
1767  getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this,
1768  Depth + 1),
1769  getSignExtendExpr(Step, Ty, Depth + 1), L,
1770  AR->getNoWrapFlags());
1771  }
1772  }
1773  }
1774 
1775  // Normally, in the cases we can prove no-overflow via a
1776  // backedge guarding condition, we can also compute a backedge
1777  // taken count for the loop. The exceptions are assumptions and
1778  // guards present in the loop -- SCEV is not great at exploiting
1779  // these to compute max backedge taken counts, but can still use
1780  // these to prove lack of overflow. Use this fact to avoid
1781  // doing extra work that may not pay off.
1782  if (!isa<SCEVCouldNotCompute>(MaxBECount) || HasGuards ||
1783  !AC.assumptions().empty()) {
1784  // If the backedge is guarded by a comparison with the pre-inc
1785  // value the addrec is safe. Also, if the entry is guarded by
1786  // a comparison with the start value and the backedge is
1787  // guarded by a comparison with the post-inc value, the addrec
1788  // is safe.
1789  if (isKnownPositive(Step)) {
1790  const SCEV *N = getConstant(APInt::getMinValue(BitWidth) -
1791  getUnsignedRangeMax(Step));
1792  if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT, AR, N) ||
1793  isKnownOnEveryIteration(ICmpInst::ICMP_ULT, AR, N)) {
1794  // Cache knowledge of AR NUW, which is propagated to this
1795  // AddRec.
1796  const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNUW);
1797  // Return the expression with the addrec on the outside.
1798  return getAddRecExpr(
1799  getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this,
1800  Depth + 1),
1801  getZeroExtendExpr(Step, Ty, Depth + 1), L,
1802  AR->getNoWrapFlags());
1803  }
1804  } else if (isKnownNegative(Step)) {
1805  const SCEV *N = getConstant(APInt::getMaxValue(BitWidth) -
1806  getSignedRangeMin(Step));
1807  if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT, AR, N) ||
1808  isKnownOnEveryIteration(ICmpInst::ICMP_UGT, AR, N)) {
1809  // Cache knowledge of AR NW, which is propagated to this
1810  // AddRec. Negative step causes unsigned wrap, but it
1811  // still can't self-wrap.
1812  const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNW);
1813  // Return the expression with the addrec on the outside.
1814  return getAddRecExpr(
1815  getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this,
1816  Depth + 1),
1817  getSignExtendExpr(Step, Ty, Depth + 1), L,
1818  AR->getNoWrapFlags());
1819  }
1820  }
1821  }
1822 
1823  // zext({C,+,Step}) --> (zext(D) + zext({C-D,+,Step}))<nuw><nsw>
1824  // if D + (C - D + Step * n) could be proven to not unsigned wrap
1825  // where D maximizes the number of trailing zeros of (C - D + Step * n)
1826  if (const auto *SC = dyn_cast<SCEVConstant>(Start)) {
1827  const APInt &C = SC->getAPInt();
1828  const APInt &D = extractConstantWithoutWrapping(*this, C, Step);
1829  if (D != 0) {
1830  const SCEV *SZExtD = getZeroExtendExpr(getConstant(D), Ty, Depth);
1831  const SCEV *SResidual =
1832  getAddRecExpr(getConstant(C - D), Step, L, AR->getNoWrapFlags());
1833  const SCEV *SZExtR = getZeroExtendExpr(SResidual, Ty, Depth + 1);
1834  return getAddExpr(SZExtD, SZExtR,
1836  Depth + 1);
1837  }
1838  }
1839 
1840  if (proveNoWrapByVaryingStart<SCEVZeroExtendExpr>(Start, Step, L)) {
1841  const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNUW);
1842  return getAddRecExpr(
1843  getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, Depth + 1),
1844  getZeroExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags());
1845  }
1846  }
1847 
1848  // zext(A % B) --> zext(A) % zext(B)
1849  {
1850  const SCEV *LHS;
1851  const SCEV *RHS;
1852  if (matchURem(Op, LHS, RHS))
1853  return getURemExpr(getZeroExtendExpr(LHS, Ty, Depth + 1),
1854  getZeroExtendExpr(RHS, Ty, Depth + 1));
1855  }
1856 
1857  // zext(A / B) --> zext(A) / zext(B).
1858  if (auto *Div = dyn_cast<SCEVUDivExpr>(Op))
1859  return getUDivExpr(getZeroExtendExpr(Div->getLHS(), Ty, Depth + 1),
1860  getZeroExtendExpr(Div->getRHS(), Ty, Depth + 1));
1861 
1862  if (auto *SA = dyn_cast<SCEVAddExpr>(Op)) {
1863  // zext((A + B + ...)<nuw>) --> (zext(A) + zext(B) + ...)<nuw>
1864  if (SA->hasNoUnsignedWrap()) {
1865  // If the addition does not unsign overflow then we can, by definition,
1866  // commute the zero extension with the addition operation.
1868  for (const auto *Op : SA->operands())
1869  Ops.push_back(getZeroExtendExpr(Op, Ty, Depth + 1));
1870  return getAddExpr(Ops, SCEV::FlagNUW, Depth + 1);
1871  }
1872 
1873  // zext(C + x + y + ...) --> (zext(D) + zext((C - D) + x + y + ...))
1874  // if D + (C - D + x + y + ...) could be proven to not unsigned wrap
1875  // where D maximizes the number of trailing zeros of (C - D + x + y + ...)
1876  //
1877  // Often address arithmetics contain expressions like
1878  // (zext (add (shl X, C1), C2)), for instance, (zext (5 + (4 * X))).
1879  // This transformation is useful while proving that such expressions are
1880  // equal or differ by a small constant amount, see LoadStoreVectorizer pass.
1881  if (const auto *SC = dyn_cast<SCEVConstant>(SA->getOperand(0))) {
1882  const APInt &D = extractConstantWithoutWrapping(*this, SC, SA);
1883  if (D != 0) {
1884  const SCEV *SZExtD = getZeroExtendExpr(getConstant(D), Ty, Depth);
1885  const SCEV *SResidual =
1886  getAddExpr(getConstant(-D), SA, SCEV::FlagAnyWrap, Depth);
1887  const SCEV *SZExtR = getZeroExtendExpr(SResidual, Ty, Depth + 1);
1888  return getAddExpr(SZExtD, SZExtR,
1890  Depth + 1);
1891  }
1892  }
1893  }
1894 
1895  if (auto *SM = dyn_cast<SCEVMulExpr>(Op)) {
1896  // zext((A * B * ...)<nuw>) --> (zext(A) * zext(B) * ...)<nuw>
1897  if (SM->hasNoUnsignedWrap()) {
1898  // If the multiply does not unsign overflow then we can, by definition,
1899  // commute the zero extension with the multiply operation.
1901  for (const auto *Op : SM->operands())
1902  Ops.push_back(getZeroExtendExpr(Op, Ty, Depth + 1));
1903  return getMulExpr(Ops, SCEV::FlagNUW, Depth + 1);
1904  }
1905 
1906  // zext(2^K * (trunc X to iN)) to iM ->
1907  // 2^K * (zext(trunc X to i{N-K}) to iM)<nuw>
1908  //
1909  // Proof:
1910  //
1911  // zext(2^K * (trunc X to iN)) to iM
1912  // = zext((trunc X to iN) << K) to iM
1913  // = zext((trunc X to i{N-K}) << K)<nuw> to iM
1914  // (because shl removes the top K bits)
1915  // = zext((2^K * (trunc X to i{N-K}))<nuw>) to iM
1916  // = (2^K * (zext(trunc X to i{N-K}) to iM))<nuw>.
1917  //
1918  if (SM->getNumOperands() == 2)
1919  if (auto *MulLHS = dyn_cast<SCEVConstant>(SM->getOperand(0)))
1920  if (MulLHS->getAPInt().isPowerOf2())
1921  if (auto *TruncRHS = dyn_cast<SCEVTruncateExpr>(SM->getOperand(1))) {
1922  int NewTruncBits = getTypeSizeInBits(TruncRHS->getType()) -
1923  MulLHS->getAPInt().logBase2();
1924  Type *NewTruncTy = IntegerType::get(getContext(), NewTruncBits);
1925  return getMulExpr(
1926  getZeroExtendExpr(MulLHS, Ty),
1927  getZeroExtendExpr(
1928  getTruncateExpr(TruncRHS->getOperand(), NewTruncTy), Ty),
1929  SCEV::FlagNUW, Depth + 1);
1930  }
1931  }
1932 
1933  // The cast wasn't folded; create an explicit cast node.
1934  // Recompute the insert position, as it may have been invalidated.
1935  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1936  SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator),
1937  Op, Ty);
1938  UniqueSCEVs.InsertNode(S, IP);
1939  addToLoopUseLists(S);
1940  return S;
1941 }
1942 
1943 const SCEV *
1945  assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
1946  "This is not an extending conversion!");
1947  assert(isSCEVable(Ty) &&
1948  "This is not a conversion to a SCEVable type!");
1949  Ty = getEffectiveSCEVType(Ty);
1950 
1951  // Fold if the operand is constant.
1952  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
1953  return getConstant(
1954  cast<ConstantInt>(ConstantExpr::getSExt(SC->getValue(), Ty)));
1955 
1956  // sext(sext(x)) --> sext(x)
1957  if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op))
1958  return getSignExtendExpr(SS->getOperand(), Ty, Depth + 1);
1959 
1960  // sext(zext(x)) --> zext(x)
1961  if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op))
1962  return getZeroExtendExpr(SZ->getOperand(), Ty, Depth + 1);
1963 
1964  // Before doing any expensive analysis, check to see if we've already
1965  // computed a SCEV for this Op and Ty.
1968  ID.AddPointer(Op);
1969  ID.AddPointer(Ty);
1970  void *IP = nullptr;
1971  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1972  // Limit recursion depth.
1973  if (Depth > MaxCastDepth) {
1974  SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator),
1975  Op, Ty);
1976  UniqueSCEVs.InsertNode(S, IP);
1977  addToLoopUseLists(S);
1978  return S;
1979  }
1980 
1981  // sext(trunc(x)) --> sext(x) or x or trunc(x)
1982  if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) {
1983  // It's possible the bits taken off by the truncate were all sign bits. If
1984  // so, we should be able to simplify this further.
1985  const SCEV *X = ST->getOperand();
1986  ConstantRange CR = getSignedRange(X);
1987  unsigned TruncBits = getTypeSizeInBits(ST->getType());
1988  unsigned NewBits = getTypeSizeInBits(Ty);
1989  if (CR.truncate(TruncBits).signExtend(NewBits).contains(
1990  CR.sextOrTrunc(NewBits)))
1991  return getTruncateOrSignExtend(X, Ty, Depth);
1992  }
1993 
1994  if (auto *SA = dyn_cast<SCEVAddExpr>(Op)) {
1995  // sext((A + B + ...)<nsw>) --> (sext(A) + sext(B) + ...)<nsw>
1996  if (SA->hasNoSignedWrap()) {
1997  // If the addition does not sign overflow then we can, by definition,
1998  // commute the sign extension with the addition operation.
2000  for (const auto *Op : SA->operands())
2001  Ops.push_back(getSignExtendExpr(Op, Ty, Depth + 1));
2002  return getAddExpr(Ops, SCEV::FlagNSW, Depth + 1);
2003  }
2004 
2005  // sext(C + x + y + ...) --> (sext(D) + sext((C - D) + x + y + ...))
2006  // if D + (C - D + x + y + ...) could be proven to not signed wrap
2007  // where D maximizes the number of trailing zeros of (C - D + x + y + ...)
2008  //
2009  // For instance, this will bring two seemingly different expressions:
2010  // 1 + sext(5 + 20 * %x + 24 * %y) and
2011  // sext(6 + 20 * %x + 24 * %y)
2012  // to the same form:
2013  // 2 + sext(4 + 20 * %x + 24 * %y)
2014  if (const auto *SC = dyn_cast<SCEVConstant>(SA->getOperand(0))) {
2015  const APInt &D = extractConstantWithoutWrapping(*this, SC, SA);
2016  if (D != 0) {
2017  const SCEV *SSExtD = getSignExtendExpr(getConstant(D), Ty, Depth);
2018  const SCEV *SResidual =
2019  getAddExpr(getConstant(-D), SA, SCEV::FlagAnyWrap, Depth);
2020  const SCEV *SSExtR = getSignExtendExpr(SResidual, Ty, Depth + 1);
2021  return getAddExpr(SSExtD, SSExtR,
2023  Depth + 1);
2024  }
2025  }
2026  }
2027  // If the input value is a chrec scev, and we can prove that the value
2028  // did not overflow the old, smaller, value, we can sign extend all of the
2029  // operands (often constants). This allows analysis of something like
2030  // this: for (signed char X = 0; X < 100; ++X) { int Y = X; }
2031  if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op))
2032  if (AR->isAffine()) {
2033  const SCEV *Start = AR->getStart();
2034  const SCEV *Step = AR->getStepRecurrence(*this);
2035  unsigned BitWidth = getTypeSizeInBits(AR->getType());
2036  const Loop *L = AR->getLoop();
2037 
2038  if (!AR->hasNoSignedWrap()) {
2039  auto NewFlags = proveNoWrapViaConstantRanges(AR);
2040  const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(NewFlags);
2041  }
2042 
2043  // If we have special knowledge that this addrec won't overflow,
2044  // we don't need to do any further analysis.
2045  if (AR->hasNoSignedWrap())
2046  return getAddRecExpr(
2047  getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1),
2048  getSignExtendExpr(Step, Ty, Depth + 1), L, SCEV::FlagNSW);
2049 
2050  // Check whether the backedge-taken count is SCEVCouldNotCompute.
2051  // Note that this serves two purposes: It filters out loops that are
2052  // simply not analyzable, and it covers the case where this code is
2053  // being called from within backedge-taken count analysis, such that
2054  // attempting to ask for the backedge-taken count would likely result
2055  // in infinite recursion. In the later case, the analysis code will
2056  // cope with a conservative value, and it will take care to purge
2057  // that value once it has finished.
2058  const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(L);
2059  if (!isa<SCEVCouldNotCompute>(MaxBECount)) {
2060  // Manually compute the final value for AR, checking for
2061  // overflow.
2062 
2063  // Check whether the backedge-taken count can be losslessly casted to
2064  // the addrec's type. The count is always unsigned.
2065  const SCEV *CastedMaxBECount =
2066  getTruncateOrZeroExtend(MaxBECount, Start->getType(), Depth);
2067  const SCEV *RecastedMaxBECount = getTruncateOrZeroExtend(
2068  CastedMaxBECount, MaxBECount->getType(), Depth);
2069  if (MaxBECount == RecastedMaxBECount) {
2070  Type *WideTy = IntegerType::get(getContext(), BitWidth * 2);
2071  // Check whether Start+Step*MaxBECount has no signed overflow.
2072  const SCEV *SMul = getMulExpr(CastedMaxBECount, Step,
2073  SCEV::FlagAnyWrap, Depth + 1);
2074  const SCEV *SAdd = getSignExtendExpr(getAddExpr(Start, SMul,
2076  Depth + 1),
2077  WideTy, Depth + 1);
2078  const SCEV *WideStart = getSignExtendExpr(Start, WideTy, Depth + 1);
2079  const SCEV *WideMaxBECount =
2080  getZeroExtendExpr(CastedMaxBECount, WideTy, Depth + 1);
2081  const SCEV *OperandExtendedAdd =
2082  getAddExpr(WideStart,
2083  getMulExpr(WideMaxBECount,
2084  getSignExtendExpr(Step, WideTy, Depth + 1),
2085  SCEV::FlagAnyWrap, Depth + 1),
2086  SCEV::FlagAnyWrap, Depth + 1);
2087  if (SAdd == OperandExtendedAdd) {
2088  // Cache knowledge of AR NSW, which is propagated to this AddRec.
2089  const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW);
2090  // Return the expression with the addrec on the outside.
2091  return getAddRecExpr(
2092  getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this,
2093  Depth + 1),
2094  getSignExtendExpr(Step, Ty, Depth + 1), L,
2095  AR->getNoWrapFlags());
2096  }
2097  // Similar to above, only this time treat the step value as unsigned.
2098  // This covers loops that count up with an unsigned step.
2099  OperandExtendedAdd =
2100  getAddExpr(WideStart,
2101  getMulExpr(WideMaxBECount,
2102  getZeroExtendExpr(Step, WideTy, Depth + 1),
2103  SCEV::FlagAnyWrap, Depth + 1),
2104  SCEV::FlagAnyWrap, Depth + 1);
2105  if (SAdd == OperandExtendedAdd) {
2106  // If AR wraps around then
2107  //
2108  // abs(Step) * MaxBECount > unsigned-max(AR->getType())
2109  // => SAdd != OperandExtendedAdd
2110  //
2111  // Thus (AR is not NW => SAdd != OperandExtendedAdd) <=>
2112  // (SAdd == OperandExtendedAdd => AR is NW)
2113 
2114  const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNW);
2115 
2116  // Return the expression with the addrec on the outside.
2117  return getAddRecExpr(
2118  getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this,
2119  Depth + 1),
2120  getZeroExtendExpr(Step, Ty, Depth + 1), L,
2121  AR->getNoWrapFlags());
2122  }
2123  }
2124  }
2125 
2126  // Normally, in the cases we can prove no-overflow via a
2127  // backedge guarding condition, we can also compute a backedge
2128  // taken count for the loop. The exceptions are assumptions and
2129  // guards present in the loop -- SCEV is not great at exploiting
2130  // these to compute max backedge taken counts, but can still use
2131  // these to prove lack of overflow. Use this fact to avoid
2132  // doing extra work that may not pay off.
2133 
2134  if (!isa<SCEVCouldNotCompute>(MaxBECount) || HasGuards ||
2135  !AC.assumptions().empty()) {
2136  // If the backedge is guarded by a comparison with the pre-inc
2137  // value the addrec is safe. Also, if the entry is guarded by
2138  // a comparison with the start value and the backedge is
2139  // guarded by a comparison with the post-inc value, the addrec
2140  // is safe.
2141  ICmpInst::Predicate Pred;
2142  const SCEV *OverflowLimit =
2143  getSignedOverflowLimitForStep(Step, &Pred, this);
2144  if (OverflowLimit &&
2145  (isLoopBackedgeGuardedByCond(L, Pred, AR, OverflowLimit) ||
2146  isKnownOnEveryIteration(Pred, AR, OverflowLimit))) {
2147  // Cache knowledge of AR NSW, then propagate NSW to the wide AddRec.
2148  const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW);
2149  return getAddRecExpr(
2150  getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1),
2151  getSignExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags());
2152  }
2153  }
2154 
2155  // sext({C,+,Step}) --> (sext(D) + sext({C-D,+,Step}))<nuw><nsw>
2156  // if D + (C - D + Step * n) could be proven to not signed wrap
2157  // where D maximizes the number of trailing zeros of (C - D + Step * n)
2158  if (const auto *SC = dyn_cast<SCEVConstant>(Start)) {
2159  const APInt &C = SC->getAPInt();
2160  const APInt &D = extractConstantWithoutWrapping(*this, C, Step);
2161  if (D != 0) {
2162  const SCEV *SSExtD = getSignExtendExpr(getConstant(D), Ty, Depth);
2163  const SCEV *SResidual =
2164  getAddRecExpr(getConstant(C - D), Step, L, AR->getNoWrapFlags());
2165  const SCEV *SSExtR = getSignExtendExpr(SResidual, Ty, Depth + 1);
2166  return getAddExpr(SSExtD, SSExtR,
2168  Depth + 1);
2169  }
2170  }
2171 
2172  if (proveNoWrapByVaryingStart<SCEVSignExtendExpr>(Start, Step, L)) {
2173  const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW);
2174  return getAddRecExpr(
2175  getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1),
2176  getSignExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags());
2177  }
2178  }
2179 
2180  // If the input value is provably positive and we could not simplify
2181  // away the sext build a zext instead.
2182  if (isKnownNonNegative(Op))
2183  return getZeroExtendExpr(Op, Ty, Depth + 1);
2184 
2185  // The cast wasn't folded; create an explicit cast node.
2186  // Recompute the insert position, as it may have been invalidated.
2187  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
2188  SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator),
2189  Op, Ty);
2190  UniqueSCEVs.InsertNode(S, IP);
2191  addToLoopUseLists(S);
2192  return S;
2193 }
2194 
2195 /// getAnyExtendExpr - Return a SCEV for the given operand extended with
2196 /// unspecified bits out to the given type.
2198  Type *Ty) {
2199  assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
2200  "This is not an extending conversion!");
2201  assert(isSCEVable(Ty) &&
2202  "This is not a conversion to a SCEVable type!");
2203  Ty = getEffectiveSCEVType(Ty);
2204 
2205  // Sign-extend negative constants.
2206  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
2207  if (SC->getAPInt().isNegative())
2208  return getSignExtendExpr(Op, Ty);
2209 
2210  // Peel off a truncate cast.
2211  if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Op)) {
2212  const SCEV *NewOp = T->getOperand();
2213  if (getTypeSizeInBits(NewOp->getType()) < getTypeSizeInBits(Ty))
2214  return getAnyExtendExpr(NewOp, Ty);
2215  return getTruncateOrNoop(NewOp, Ty);
2216  }
2217 
2218  // Next try a zext cast. If the cast is folded, use it.
2219  const SCEV *ZExt = getZeroExtendExpr(Op, Ty);
2220  if (!isa<SCEVZeroExtendExpr>(ZExt))
2221  return ZExt;
2222 
2223  // Next try a sext cast. If the cast is folded, use it.
2224  const SCEV *SExt = getSignExtendExpr(Op, Ty);
2225  if (!isa<SCEVSignExtendExpr>(SExt))
2226  return SExt;
2227 
2228  // Force the cast to be folded into the operands of an addrec.
2229  if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) {
2231  for (const SCEV *Op : AR->operands())
2232  Ops.push_back(getAnyExtendExpr(Op, Ty));
2233  return getAddRecExpr(Ops, AR->getLoop(), SCEV::FlagNW);
2234  }
2235 
2236  // If the expression is obviously signed, use the sext cast value.
2237  if (isa<SCEVSMaxExpr>(Op))
2238  return SExt;
2239 
2240  // Absent any other information, use the zext cast value.
2241  return ZExt;
2242 }
2243 
2244 /// Process the given Ops list, which is a list of operands to be added under
2245 /// the given scale, update the given map. This is a helper function for
2246 /// getAddRecExpr. As an example of what it does, given a sequence of operands
2247 /// that would form an add expression like this:
2248 ///
2249 /// m + n + 13 + (A * (o + p + (B * (q + m + 29)))) + r + (-1 * r)
2250 ///
2251 /// where A and B are constants, update the map with these values:
2252 ///
2253 /// (m, 1+A*B), (n, 1), (o, A), (p, A), (q, A*B), (r, 0)
2254 ///
2255 /// and add 13 + A*B*29 to AccumulatedConstant.
2256 /// This will allow getAddRecExpr to produce this:
2257 ///
2258 /// 13+A*B*29 + n + (m * (1+A*B)) + ((o + p) * A) + (q * A*B)
2259 ///
2260 /// This form often exposes folding opportunities that are hidden in
2261 /// the original operand list.
2262 ///
2263 /// Return true iff it appears that any interesting folding opportunities
2264 /// may be exposed. This helps getAddRecExpr short-circuit extra work in
2265 /// the common case where no interesting opportunities are present, and
2266 /// is also used as a check to avoid infinite recursion.
2267 static bool
2270  APInt &AccumulatedConstant,
2271  const SCEV *const *Ops, size_t NumOperands,
2272  const APInt &Scale,
2273  ScalarEvolution &SE) {
2274  bool Interesting = false;
2275 
2276  // Iterate over the add operands. They are sorted, with constants first.
2277  unsigned i = 0;
2278  while (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) {
2279  ++i;
2280  // Pull a buried constant out to the outside.
2281  if (Scale != 1 || AccumulatedConstant != 0 || C->getValue()->isZero())
2282  Interesting = true;
2283  AccumulatedConstant += Scale * C->getAPInt();
2284  }
2285 
2286  // Next comes everything else. We're especially interested in multiplies
2287  // here, but they're in the middle, so just visit the rest with one loop.
2288  for (; i != NumOperands; ++i) {
2289  const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[i]);
2290  if (Mul && isa<SCEVConstant>(Mul->getOperand(0))) {
2291  APInt NewScale =
2292  Scale * cast<SCEVConstant>(Mul->getOperand(0))->getAPInt();
2293  if (Mul->getNumOperands() == 2 && isa<SCEVAddExpr>(Mul->getOperand(1))) {
2294  // A multiplication of a constant with another add; recurse.
2295  const SCEVAddExpr *Add = cast<SCEVAddExpr>(Mul->getOperand(1));
2296  Interesting |=
2297  CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant,
2298  Add->op_begin(), Add->getNumOperands(),
2299  NewScale, SE);
2300  } else {
2301  // A multiplication of a constant with some other value. Update
2302  // the map.
2303  SmallVector<const SCEV *, 4> MulOps(Mul->op_begin()+1, Mul->op_end());
2304  const SCEV *Key = SE.getMulExpr(MulOps);
2305  auto Pair = M.insert({Key, NewScale});
2306  if (Pair.second) {
2307  NewOps.push_back(Pair.first->first);
2308  } else {
2309  Pair.first->second += NewScale;
2310  // The map already had an entry for this value, which may indicate
2311  // a folding opportunity.
2312  Interesting = true;
2313  }
2314  }
2315  } else {
2316  // An ordinary operand. Update the map.
2317  std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair =
2318  M.insert({Ops[i], Scale});
2319  if (Pair.second) {
2320  NewOps.push_back(Pair.first->first);
2321  } else {
2322  Pair.first->second += Scale;
2323  // The map already had an entry for this value, which may indicate
2324  // a folding opportunity.
2325  Interesting = true;
2326  }
2327  }
2328  }
2329 
2330  return Interesting;
2331 }
2332 
2333 // We're trying to construct a SCEV of type `Type' with `Ops' as operands and
2334 // `OldFlags' as can't-wrap behavior. Infer a more aggressive set of
2335 // can't-overflow flags for the operation if possible.
2336 static SCEV::NoWrapFlags
2338  const ArrayRef<const SCEV *> Ops,
2339  SCEV::NoWrapFlags Flags) {
2340  using namespace std::placeholders;
2341 
2342  using OBO = OverflowingBinaryOperator;
2343 
2344  bool CanAnalyze =
2345  Type == scAddExpr || Type == scAddRecExpr || Type == scMulExpr;
2346  (void)CanAnalyze;
2347  assert(CanAnalyze && "don't call from other places!");
2348 
2349  int SignOrUnsignMask = SCEV::FlagNUW | SCEV::FlagNSW;
2350  SCEV::NoWrapFlags SignOrUnsignWrap =
2351  ScalarEvolution::maskFlags(Flags, SignOrUnsignMask);
2352 
2353  // If FlagNSW is true and all the operands are non-negative, infer FlagNUW.
2354  auto IsKnownNonNegative = [&](const SCEV *S) {
2355  return SE->isKnownNonNegative(S);
2356  };
2357 
2358  if (SignOrUnsignWrap == SCEV::FlagNSW && all_of(Ops, IsKnownNonNegative))
2359  Flags =
2360  ScalarEvolution::setFlags(Flags, (SCEV::NoWrapFlags)SignOrUnsignMask);
2361 
2362  SignOrUnsignWrap = ScalarEvolution::maskFlags(Flags, SignOrUnsignMask);
2363 
2364  if (SignOrUnsignWrap != SignOrUnsignMask &&
2365  (Type == scAddExpr || Type == scMulExpr) && Ops.size() == 2 &&
2366  isa<SCEVConstant>(Ops[0])) {
2367 
2368  auto Opcode = [&] {
2369  switch (Type) {
2370  case scAddExpr:
2371  return Instruction::Add;
2372  case scMulExpr:
2373  return Instruction::Mul;
2374  default:
2375  llvm_unreachable("Unexpected SCEV op.");
2376  }
2377  }();
2378 
2379  const APInt &C = cast<SCEVConstant>(Ops[0])->getAPInt();
2380 
2381  // (A <opcode> C) --> (A <opcode> C)<nsw> if the op doesn't sign overflow.
2382  if (!(SignOrUnsignWrap & SCEV::FlagNSW)) {
2384  Opcode, C, OBO::NoSignedWrap);
2385  if (NSWRegion.contains(SE->getSignedRange(Ops[1])))
2386  Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW);
2387  }
2388 
2389  // (A <opcode> C) --> (A <opcode> C)<nuw> if the op doesn't unsign overflow.
2390  if (!(SignOrUnsignWrap & SCEV::FlagNUW)) {
2392  Opcode, C, OBO::NoUnsignedWrap);
2393  if (NUWRegion.contains(SE->getUnsignedRange(Ops[1])))
2394  Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW);
2395  }
2396  }
2397 
2398  return Flags;
2399 }
2400 
2402  return isLoopInvariant(S, L) && properlyDominates(S, L->getHeader());
2403 }
2404 
2405 /// Get a canonical add expression, or something simpler if possible.
2407  SCEV::NoWrapFlags Flags,
2408  unsigned Depth) {
2409  assert(!(Flags & ~(SCEV::FlagNUW | SCEV::FlagNSW)) &&
2410  "only nuw or nsw allowed");
2411  assert(!Ops.empty() && "Cannot get empty add!");
2412  if (Ops.size() == 1) return Ops[0];
2413 #ifndef NDEBUG
2414  Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
2415  for (unsigned i = 1, e = Ops.size(); i != e; ++i)
2416  assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
2417  "SCEVAddExpr operand types don't match!");
2418 #endif
2419 
2420  // Sort by complexity, this groups all similar expression types together.
2421  GroupByComplexity(Ops, &LI, DT);
2422 
2423  Flags = StrengthenNoWrapFlags(this, scAddExpr, Ops, Flags);
2424 
2425  // If there are any constants, fold them together.
2426  unsigned Idx = 0;
2427  if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
2428  ++Idx;
2429  assert(Idx < Ops.size());
2430  while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
2431  // We found two constants, fold them together!
2432  Ops[0] = getConstant(LHSC->getAPInt() + RHSC->getAPInt());
2433  if (Ops.size() == 2) return Ops[0];
2434  Ops.erase(Ops.begin()+1); // Erase the folded element
2435  LHSC = cast<SCEVConstant>(Ops[0]);
2436  }
2437 
2438  // If we are left with a constant zero being added, strip it off.
2439  if (LHSC->getValue()->isZero()) {
2440  Ops.erase(Ops.begin());
2441  --Idx;
2442  }
2443 
2444  if (Ops.size() == 1) return Ops[0];
2445  }
2446 
2447  // Limit recursion calls depth.
2448  if (Depth > MaxArithDepth || hasHugeExpression(Ops))
2449  return getOrCreateAddExpr(Ops, Flags);
2450 
2451  // Okay, check to see if the same value occurs in the operand list more than
2452  // once. If so, merge them together into an multiply expression. Since we
2453  // sorted the list, these values are required to be adjacent.
2454  Type *Ty = Ops[0]->getType();
2455  bool FoundMatch = false;
2456  for (unsigned i = 0, e = Ops.size(); i != e-1; ++i)
2457  if (Ops[i] == Ops[i+1]) { // X + Y + Y --> X + Y*2
2458  // Scan ahead to count how many equal operands there are.
2459  unsigned Count = 2;
2460  while (i+Count != e && Ops[i+Count] == Ops[i])
2461  ++Count;
2462  // Merge the values into a multiply.
2463  const SCEV *Scale = getConstant(Ty, Count);
2464  const SCEV *Mul = getMulExpr(Scale, Ops[i], SCEV::FlagAnyWrap, Depth + 1);
2465  if (Ops.size() == Count)
2466  return Mul;
2467  Ops[i] = Mul;
2468  Ops.erase(Ops.begin()+i+1, Ops.begin()+i+Count);
2469  --i; e -= Count - 1;
2470  FoundMatch = true;
2471  }
2472  if (FoundMatch)
2473  return getAddExpr(Ops, Flags, Depth + 1);
2474 
2475  // Check for truncates. If all the operands are truncated from the same
2476  // type, see if factoring out the truncate would permit the result to be
2477  // folded. eg., n*trunc(x) + m*trunc(y) --> trunc(trunc(m)*x + trunc(n)*y)
2478  // if the contents of the resulting outer trunc fold to something simple.
2479  auto FindTruncSrcType = [&]() -> Type * {
2480  // We're ultimately looking to fold an addrec of truncs and muls of only
2481  // constants and truncs, so if we find any other types of SCEV
2482  // as operands of the addrec then we bail and return nullptr here.
2483  // Otherwise, we return the type of the operand of a trunc that we find.
2484  if (auto *T = dyn_cast<SCEVTruncateExpr>(Ops[Idx]))
2485  return T->getOperand()->getType();
2486  if (const auto *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) {
2487  const auto *LastOp = Mul->getOperand(Mul->getNumOperands() - 1);
2488  if (const auto *T = dyn_cast<SCEVTruncateExpr>(LastOp))
2489  return T->getOperand()->getType();
2490  }
2491  return nullptr;
2492  };
2493  if (auto *SrcType = FindTruncSrcType()) {
2495  bool Ok = true;
2496  // Check all the operands to see if they can be represented in the
2497  // source type of the truncate.
2498  for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
2499  if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Ops[i])) {
2500  if (T->getOperand()->getType() != SrcType) {
2501  Ok = false;
2502  break;
2503  }
2504  LargeOps.push_back(T->getOperand());
2505  } else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) {
2506  LargeOps.push_back(getAnyExtendExpr(C, SrcType));
2507  } else if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Ops[i])) {
2508  SmallVector<const SCEV *, 8> LargeMulOps;
2509  for (unsigned j = 0, f = M->getNumOperands(); j != f && Ok; ++j) {
2510  if (const SCEVTruncateExpr *T =
2511  dyn_cast<SCEVTruncateExpr>(M->getOperand(j))) {
2512  if (T->getOperand()->getType() != SrcType) {
2513  Ok = false;
2514  break;
2515  }
2516  LargeMulOps.push_back(T->getOperand());
2517  } else if (const auto *C = dyn_cast<SCEVConstant>(M->getOperand(j))) {
2518  LargeMulOps.push_back(getAnyExtendExpr(C, SrcType));
2519  } else {
2520  Ok = false;
2521  break;
2522  }
2523  }
2524  if (Ok)
2525  LargeOps.push_back(getMulExpr(LargeMulOps, SCEV::FlagAnyWrap, Depth + 1));
2526  } else {
2527  Ok = false;
2528  break;
2529  }
2530  }
2531  if (Ok) {
2532  // Evaluate the expression in the larger type.
2533  const SCEV *Fold = getAddExpr(LargeOps, SCEV::FlagAnyWrap, Depth + 1);
2534  // If it folds to something simple, use it. Otherwise, don't.
2535  if (isa<SCEVConstant>(Fold) || isa<SCEVUnknown>(Fold))
2536  return getTruncateExpr(Fold, Ty);
2537  }
2538  }
2539 
2540  // Skip past any other cast SCEVs.
2541  while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddExpr)
2542  ++Idx;
2543 
2544  // If there are add operands they would be next.
2545  if (Idx < Ops.size()) {
2546  bool DeletedAdd = false;
2547  while (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[Idx])) {
2548  if (Ops.size() > AddOpsInlineThreshold ||
2549  Add->getNumOperands() > AddOpsInlineThreshold)
2550  break;
2551  // If we have an add, expand the add operands onto the end of the operands
2552  // list.
2553  Ops.erase(Ops.begin()+Idx);
2554  Ops.append(Add->op_begin(), Add->op_end());
2555  DeletedAdd = true;
2556  }
2557 
2558  // If we deleted at least one add, we added operands to the end of the list,
2559  // and they are not necessarily sorted. Recurse to resort and resimplify
2560  // any operands we just acquired.
2561  if (DeletedAdd)
2562  return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1);
2563  }
2564 
2565  // Skip over the add expression until we get to a multiply.
2566  while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr)
2567  ++Idx;
2568 
2569  // Check to see if there are any folding opportunities present with
2570  // operands multiplied by constant values.
2571  if (Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx])) {
2572  uint64_t BitWidth = getTypeSizeInBits(Ty);
2575  APInt AccumulatedConstant(BitWidth, 0);
2576  if (CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant,
2577  Ops.data(), Ops.size(),
2578  APInt(BitWidth, 1), *this)) {
2579  struct APIntCompare {
2580  bool operator()(const APInt &LHS, const APInt &RHS) const {
2581  return LHS.ult(RHS);
2582  }
2583  };
2584 
2585  // Some interesting folding opportunity is present, so its worthwhile to
2586  // re-generate the operands list. Group the operands by constant scale,
2587  // to avoid multiplying by the same constant scale multiple times.
2588  std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare> MulOpLists;
2589  for (const SCEV *NewOp : NewOps)
2590  MulOpLists[M.find(NewOp)->second].push_back(NewOp);
2591  // Re-generate the operands list.
2592  Ops.clear();
2593  if (AccumulatedConstant != 0)
2594  Ops.push_back(getConstant(AccumulatedConstant));
2595  for (auto &MulOp : MulOpLists)
2596  if (MulOp.first != 0)
2597  Ops.push_back(getMulExpr(
2598  getConstant(MulOp.first),
2599  getAddExpr(MulOp.second, SCEV::FlagAnyWrap, Depth + 1),
2600  SCEV::FlagAnyWrap, Depth + 1));
2601  if (Ops.empty())
2602  return getZero(Ty);
2603  if (Ops.size() == 1)
2604  return Ops[0];
2605  return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1);
2606  }
2607  }
2608 
2609  // If we are adding something to a multiply expression, make sure the
2610  // something is not already an operand of the multiply. If so, merge it into
2611  // the multiply.
2612  for (; Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx]); ++Idx) {
2613  const SCEVMulExpr *Mul = cast<SCEVMulExpr>(Ops[Idx]);
2614  for (unsigned MulOp = 0, e = Mul->getNumOperands(); MulOp != e; ++MulOp) {
2615  const SCEV *MulOpSCEV = Mul->getOperand(MulOp);
2616  if (isa<SCEVConstant>(MulOpSCEV))
2617  continue;
2618  for (unsigned AddOp = 0, e = Ops.size(); AddOp != e; ++AddOp)
2619  if (MulOpSCEV == Ops[AddOp]) {
2620  // Fold W + X + (X * Y * Z) --> W + (X * ((Y*Z)+1))
2621  const SCEV *InnerMul = Mul->getOperand(MulOp == 0);
2622  if (Mul->getNumOperands() != 2) {
2623  // If the multiply has more than two operands, we must get the
2624  // Y*Z term.
2625  SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(),
2626  Mul->op_begin()+MulOp);
2627  MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end());
2628  InnerMul = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1);
2629  }
2630  SmallVector<const SCEV *, 2> TwoOps = {getOne(Ty), InnerMul};
2631  const SCEV *AddOne = getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1);
2632  const SCEV *OuterMul = getMulExpr(AddOne, MulOpSCEV,
2633  SCEV::FlagAnyWrap, Depth + 1);
2634  if (Ops.size() == 2) return OuterMul;
2635  if (AddOp < Idx) {
2636  Ops.erase(Ops.begin()+AddOp);
2637  Ops.erase(Ops.begin()+Idx-1);
2638  } else {
2639  Ops.erase(Ops.begin()+Idx);
2640  Ops.erase(Ops.begin()+AddOp-1);
2641  }
2642  Ops.push_back(OuterMul);
2643  return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1);
2644  }
2645 
2646  // Check this multiply against other multiplies being added together.
2647  for (unsigned OtherMulIdx = Idx+1;
2648  OtherMulIdx < Ops.size() && isa<SCEVMulExpr>(Ops[OtherMulIdx]);
2649  ++OtherMulIdx) {
2650  const SCEVMulExpr *OtherMul = cast<SCEVMulExpr>(Ops[OtherMulIdx]);
2651  // If MulOp occurs in OtherMul, we can fold the two multiplies
2652  // together.
2653  for (unsigned OMulOp = 0, e = OtherMul->getNumOperands();
2654  OMulOp != e; ++OMulOp)
2655  if (OtherMul->getOperand(OMulOp) == MulOpSCEV) {
2656  // Fold X + (A*B*C) + (A*D*E) --> X + (A*(B*C+D*E))
2657  const SCEV *InnerMul1 = Mul->getOperand(MulOp == 0);
2658  if (Mul->getNumOperands() != 2) {
2659  SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(),
2660  Mul->op_begin()+MulOp);
2661  MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end());
2662  InnerMul1 = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1);
2663  }
2664  const SCEV *InnerMul2 = OtherMul->getOperand(OMulOp == 0);
2665  if (OtherMul->getNumOperands() != 2) {
2666  SmallVector<const SCEV *, 4> MulOps(OtherMul->op_begin(),
2667  OtherMul->op_begin()+OMulOp);
2668  MulOps.append(OtherMul->op_begin()+OMulOp+1, OtherMul->op_end());
2669  InnerMul2 = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1);
2670  }
2671  SmallVector<const SCEV *, 2> TwoOps = {InnerMul1, InnerMul2};
2672  const SCEV *InnerMulSum =
2673  getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1);
2674  const SCEV *OuterMul = getMulExpr(MulOpSCEV, InnerMulSum,
2675  SCEV::FlagAnyWrap, Depth + 1);
2676  if (Ops.size() == 2) return OuterMul;
2677  Ops.erase(Ops.begin()+Idx);
2678  Ops.erase(Ops.begin()+OtherMulIdx-1);
2679  Ops.push_back(OuterMul);
2680  return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1);
2681  }
2682  }
2683  }
2684  }
2685 
2686  // If there are any add recurrences in the operands list, see if any other
2687  // added values are loop invariant. If so, we can fold them into the
2688  // recurrence.
2689  while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr)
2690  ++Idx;
2691 
2692  // Scan over all recurrences, trying to fold loop invariants into them.
2693  for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) {
2694  // Scan all of the other operands to this add and add them to the vector if
2695  // they are loop invariant w.r.t. the recurrence.
2697  const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]);
2698  const Loop *AddRecLoop = AddRec->getLoop();
2699  for (unsigned i = 0, e = Ops.size(); i != e; ++i)
2700  if (isAvailableAtLoopEntry(Ops[i], AddRecLoop)) {
2701  LIOps.push_back(Ops[i]);
2702  Ops.erase(Ops.begin()+i);
2703  --i; --e;
2704  }
2705 
2706  // If we found some loop invariants, fold them into the recurrence.
2707  if (!LIOps.empty()) {
2708  // NLI + LI + {Start,+,Step} --> NLI + {LI+Start,+,Step}
2709  LIOps.push_back(AddRec->getStart());
2710 
2711  SmallVector<const SCEV *, 4> AddRecOps(AddRec->op_begin(),
2712  AddRec->op_end());
2713  // This follows from the fact that the no-wrap flags on the outer add
2714  // expression are applicable on the 0th iteration, when the add recurrence
2715  // will be equal to its start value.
2716  AddRecOps[0] = getAddExpr(LIOps, Flags, Depth + 1);
2717 
2718  // Build the new addrec. Propagate the NUW and NSW flags if both the
2719  // outer add and the inner addrec are guaranteed to have no overflow.
2720  // Always propagate NW.
2721  Flags = AddRec->getNoWrapFlags(setFlags(Flags, SCEV::FlagNW));
2722  const SCEV *NewRec = getAddRecExpr(AddRecOps, AddRecLoop, Flags);
2723 
2724  // If all of the other operands were loop invariant, we are done.
2725  if (Ops.size() == 1) return NewRec;
2726 
2727  // Otherwise, add the folded AddRec by the non-invariant parts.
2728  for (unsigned i = 0;; ++i)
2729  if (Ops[i] == AddRec) {
2730  Ops[i] = NewRec;
2731  break;
2732  }
2733  return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1);
2734  }
2735 
2736  // Okay, if there weren't any loop invariants to be folded, check to see if
2737  // there are multiple AddRec's with the same loop induction variable being
2738  // added together. If so, we can fold them.
2739  for (unsigned OtherIdx = Idx+1;
2740  OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
2741  ++OtherIdx) {
2742  // We expect the AddRecExpr's to be sorted in reverse dominance order,
2743  // so that the 1st found AddRecExpr is dominated by all others.
2744  assert(DT.dominates(
2745  cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()->getHeader(),
2746  AddRec->getLoop()->getHeader()) &&
2747  "AddRecExprs are not sorted in reverse dominance order?");
2748  if (AddRecLoop == cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()) {
2749  // Other + {A,+,B}<L> + {C,+,D}<L> --> Other + {A+C,+,B+D}<L>
2750  SmallVector<const SCEV *, 4> AddRecOps(AddRec->op_begin(),
2751  AddRec->op_end());
2752  for (; OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
2753  ++OtherIdx) {
2754  const auto *OtherAddRec = cast<SCEVAddRecExpr>(Ops[OtherIdx]);
2755  if (OtherAddRec->getLoop() == AddRecLoop) {
2756  for (unsigned i = 0, e = OtherAddRec->getNumOperands();
2757  i != e; ++i) {
2758  if (i >= AddRecOps.size()) {
2759  AddRecOps.append(OtherAddRec->op_begin()+i,
2760  OtherAddRec->op_end());
2761  break;
2762  }
2763  SmallVector<const SCEV *, 2> TwoOps = {
2764  AddRecOps[i], OtherAddRec->getOperand(i)};
2765  AddRecOps[i] = getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1);
2766  }
2767  Ops.erase(Ops.begin() + OtherIdx); --OtherIdx;
2768  }
2769  }
2770  // Step size has changed, so we cannot guarantee no self-wraparound.
2771  Ops[Idx] = getAddRecExpr(AddRecOps, AddRecLoop, SCEV::FlagAnyWrap);
2772  return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1);
2773  }
2774  }
2775 
2776  // Otherwise couldn't fold anything into this recurrence. Move onto the
2777  // next one.
2778  }
2779 
2780  // Okay, it looks like we really DO need an add expr. Check to see if we
2781  // already have one, otherwise create a new one.
2782  return getOrCreateAddExpr(Ops, Flags);
2783 }
2784 
2785 const SCEV *
2786 ScalarEvolution::getOrCreateAddExpr(ArrayRef<const SCEV *> Ops,
2787  SCEV::NoWrapFlags Flags) {
2789  ID.AddInteger(scAddExpr);
2790  for (const SCEV *Op : Ops)
2791  ID.AddPointer(Op);
2792  void *IP = nullptr;
2793  SCEVAddExpr *S =
2794  static_cast<SCEVAddExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
2795  if (!S) {
2796  const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
2797  std::uninitialized_copy(Ops.begin(), Ops.end(), O);
2798  S = new (SCEVAllocator)
2799  SCEVAddExpr(ID.Intern(SCEVAllocator), O, Ops.size());
2800  UniqueSCEVs.InsertNode(S, IP);
2801  addToLoopUseLists(S);
2802  }
2803  S->setNoWrapFlags(Flags);
2804  return S;
2805 }
2806 
2807 const SCEV *
2808 ScalarEvolution::getOrCreateAddRecExpr(ArrayRef<const SCEV *> Ops,
2809  const Loop *L, SCEV::NoWrapFlags Flags) {
2812  for (unsigned i = 0, e = Ops.size(); i != e; ++i)
2813  ID.AddPointer(Ops[i]);
2814  ID.AddPointer(L);
2815  void *IP = nullptr;
2816  SCEVAddRecExpr *S =
2817  static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
2818  if (!S) {
2819  const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
2820  std::uninitialized_copy(Ops.begin(), Ops.end(), O);
2821  S = new (SCEVAllocator)
2822  SCEVAddRecExpr(ID.Intern(SCEVAllocator), O, Ops.size(), L);
2823  UniqueSCEVs.InsertNode(S, IP);
2824  addToLoopUseLists(S);
2825  }
2826  S->setNoWrapFlags(Flags);
2827  return S;
2828 }
2829 
2830 const SCEV *
2831 ScalarEvolution::getOrCreateMulExpr(ArrayRef<const SCEV *> Ops,
2832  SCEV::NoWrapFlags Flags) {
2834  ID.AddInteger(scMulExpr);
2835  for (unsigned i = 0, e = Ops.size(); i != e; ++i)
2836  ID.AddPointer(Ops[i]);
2837  void *IP = nullptr;
2838  SCEVMulExpr *S =
2839  static_cast<SCEVMulExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
2840  if (!S) {
2841  const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
2842  std::uninitialized_copy(Ops.begin(), Ops.end(), O);
2843  S = new (SCEVAllocator) SCEVMulExpr(ID.Intern(SCEVAllocator),
2844  O, Ops.size());
2845  UniqueSCEVs.InsertNode(S, IP);
2846  addToLoopUseLists(S);
2847  }
2848  S->setNoWrapFlags(Flags);
2849  return S;
2850 }
2851 
2852 static uint64_t umul_ov(uint64_t i, uint64_t j, bool &Overflow) {
2853  uint64_t k = i*j;
2854  if (j > 1 && k / j != i) Overflow = true;
2855  return k;
2856 }
2857 
2858 /// Compute the result of "n choose k", the binomial coefficient. If an
2859 /// intermediate computation overflows, Overflow will be set and the return will
2860 /// be garbage. Overflow is not cleared on absence of overflow.
2861 static uint64_t Choose(uint64_t n, uint64_t k, bool &Overflow) {
2862  // We use the multiplicative formula:
2863  // n(n-1)(n-2)...(n-(k-1)) / k(k-1)(k-2)...1 .
2864  // At each iteration, we take the n-th term of the numeral and divide by the
2865  // (k-n)th term of the denominator. This division will always produce an
2866  // integral result, and helps reduce the chance of overflow in the
2867  // intermediate computations. However, we can still overflow even when the
2868  // final result would fit.
2869 
2870  if (n == 0 || n == k) return 1;
2871  if (k > n) return 0;
2872 
2873  if (k > n/2)
2874  k = n-k;
2875 
2876  uint64_t r = 1;
2877  for (uint64_t i = 1; i <= k; ++i) {
2878  r = umul_ov(r, n-(i-1), Overflow);
2879  r /= i;
2880  }
2881  return r;
2882 }
2883 
2884 /// Determine if any of the operands in this SCEV are a constant or if
2885 /// any of the add or multiply expressions in this SCEV contain a constant.
2886 static bool containsConstantInAddMulChain(const SCEV *StartExpr) {
2887  struct FindConstantInAddMulChain {
2888  bool FoundConstant = false;
2889 
2890  bool follow(const SCEV *S) {
2891  FoundConstant |= isa<SCEVConstant>(S);
2892  return isa<SCEVAddExpr>(S) || isa<SCEVMulExpr>(S);
2893  }
2894 
2895  bool isDone() const {
2896  return FoundConstant;
2897  }
2898  };
2899 
2900  FindConstantInAddMulChain F;
2902  ST.visitAll(StartExpr);
2903  return F.FoundConstant;
2904 }
2905 
2906 /// Get a canonical multiply expression, or something simpler if possible.
2908  SCEV::NoWrapFlags Flags,
2909  unsigned Depth) {
2910  assert(Flags == maskFlags(Flags, SCEV::FlagNUW | SCEV::FlagNSW) &&
2911  "only nuw or nsw allowed");
2912  assert(!Ops.empty() && "Cannot get empty mul!");
2913  if (Ops.size() == 1) return Ops[0];
2914 #ifndef NDEBUG
2915  Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
2916  for (unsigned i = 1, e = Ops.size(); i != e; ++i)
2917  assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
2918  "SCEVMulExpr operand types don't match!");
2919 #endif
2920 
2921  // Sort by complexity, this groups all similar expression types together.
2922  GroupByComplexity(Ops, &LI, DT);
2923 
2924  Flags = StrengthenNoWrapFlags(this, scMulExpr, Ops, Flags);
2925 
2926  // Limit recursion calls depth.
2927  if (Depth > MaxArithDepth || hasHugeExpression(Ops))
2928  return getOrCreateMulExpr(Ops, Flags);
2929 
2930  // If there are any constants, fold them together.
2931  unsigned Idx = 0;
2932  if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
2933 
2934  if (Ops.size() == 2)
2935  // C1*(C2+V) -> C1*C2 + C1*V
2936  if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1]))
2937  // If any of Add's ops are Adds or Muls with a constant, apply this
2938  // transformation as well.
2939  //
2940  // TODO: There are some cases where this transformation is not
2941  // profitable; for example, Add = (C0 + X) * Y + Z. Maybe the scope of
2942  // this transformation should be narrowed down.
2943  if (Add->getNumOperands() == 2 && containsConstantInAddMulChain(Add))
2944  return getAddExpr(getMulExpr(LHSC, Add->getOperand(0),
2945  SCEV::FlagAnyWrap, Depth + 1),
2946  getMulExpr(LHSC, Add->getOperand(1),
2947  SCEV::FlagAnyWrap, Depth + 1),
2948  SCEV::FlagAnyWrap, Depth + 1);
2949 
2950  ++Idx;
2951  while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
2952  // We found two constants, fold them together!
2953  ConstantInt *Fold =
2954  ConstantInt::get(getContext(), LHSC->getAPInt() * RHSC->getAPInt());
2955  Ops[0] = getConstant(Fold);
2956  Ops.erase(Ops.begin()+1); // Erase the folded element
2957  if (Ops.size() == 1) return Ops[0];
2958  LHSC = cast<SCEVConstant>(Ops[0]);
2959  }
2960 
2961  // If we are left with a constant one being multiplied, strip it off.
2962  if (cast<SCEVConstant>(Ops[0])->getValue()->isOne()) {
2963  Ops.erase(Ops.begin());
2964  --Idx;
2965  } else if (cast<SCEVConstant>(Ops[0])->getValue()->isZero()) {
2966  // If we have a multiply of zero, it will always be zero.
2967  return Ops[0];
2968  } else if (Ops[0]->isAllOnesValue()) {
2969  // If we have a mul by -1 of an add, try distributing the -1 among the
2970  // add operands.
2971  if (Ops.size() == 2) {
2972  if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) {
2974  bool AnyFolded = false;
2975  for (const SCEV *AddOp : Add->operands()) {
2976  const SCEV *Mul = getMulExpr(Ops[0], AddOp, SCEV::FlagAnyWrap,
2977  Depth + 1);
2978  if (!isa<SCEVMulExpr>(Mul)) AnyFolded = true;
2979  NewOps.push_back(Mul);
2980  }
2981  if (AnyFolded)
2982  return getAddExpr(NewOps, SCEV::FlagAnyWrap, Depth + 1);
2983  } else if (const auto *AddRec = dyn_cast<SCEVAddRecExpr>(Ops[1])) {
2984  // Negation preserves a recurrence's no self-wrap property.
2986  for (const SCEV *AddRecOp : AddRec->operands())
2987  Operands.push_back(getMulExpr(Ops[0], AddRecOp, SCEV::FlagAnyWrap,
2988  Depth + 1));
2989 
2990  return getAddRecExpr(Operands, AddRec->getLoop(),
2991  AddRec->getNoWrapFlags(SCEV::FlagNW));
2992  }
2993  }
2994  }
2995 
2996  if (Ops.size() == 1)
2997  return Ops[0];
2998  }
2999 
3000  // Skip over the add expression until we get to a multiply.
3001  while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr)
3002  ++Idx;
3003 
3004  // If there are mul operands inline them all into this expression.
3005  if (Idx < Ops.size()) {
3006  bool DeletedMul = false;
3007  while (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) {
3008  if (Ops.size() > MulOpsInlineThreshold)
3009  break;
3010  // If we have an mul, expand the mul operands onto the end of the
3011  // operands list.
3012  Ops.erase(Ops.begin()+Idx);
3013  Ops.append(Mul->op_begin(), Mul->op_end());
3014  DeletedMul = true;
3015  }
3016 
3017  // If we deleted at least one mul, we added operands to the end of the
3018  // list, and they are not necessarily sorted. Recurse to resort and
3019  // resimplify any operands we just acquired.
3020  if (DeletedMul)
3021  return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1);
3022  }
3023 
3024  // If there are any add recurrences in the operands list, see if any other
3025  // added values are loop invariant. If so, we can fold them into the
3026  // recurrence.
3027  while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr)
3028  ++Idx;
3029 
3030  // Scan over all recurrences, trying to fold loop invariants into them.
3031  for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) {
3032  // Scan all of the other operands to this mul and add them to the vector
3033  // if they are loop invariant w.r.t. the recurrence.
3035  const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]);
3036  const Loop *AddRecLoop = AddRec->getLoop();
3037  for (unsigned i = 0, e = Ops.size(); i != e; ++i)
3038  if (isAvailableAtLoopEntry(Ops[i], AddRecLoop)) {
3039  LIOps.push_back(Ops[i]);
3040  Ops.erase(Ops.begin()+i);
3041  --i; --e;
3042  }
3043 
3044  // If we found some loop invariants, fold them into the recurrence.
3045  if (!LIOps.empty()) {
3046  // NLI * LI * {Start,+,Step} --> NLI * {LI*Start,+,LI*Step}
3048  NewOps.reserve(AddRec->getNumOperands());
3049  const SCEV *Scale = getMulExpr(LIOps, SCEV::FlagAnyWrap, Depth + 1);
3050  for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i)
3051  NewOps.push_back(getMulExpr(Scale, AddRec->getOperand(i),
3052  SCEV::FlagAnyWrap, Depth + 1));
3053 
3054  // Build the new addrec. Propagate the NUW and NSW flags if both the
3055  // outer mul and the inner addrec are guaranteed to have no overflow.
3056  //
3057  // No self-wrap cannot be guaranteed after changing the step size, but
3058  // will be inferred if either NUW or NSW is true.
3059  Flags = AddRec->getNoWrapFlags(clearFlags(Flags, SCEV::FlagNW));
3060  const SCEV *NewRec = getAddRecExpr(NewOps, AddRecLoop, Flags);
3061 
3062  // If all of the other operands were loop invariant, we are done.
3063  if (Ops.size() == 1) return NewRec;
3064 
3065  // Otherwise, multiply the folded AddRec by the non-invariant parts.
3066  for (unsigned i = 0;; ++i)
3067  if (Ops[i] == AddRec) {
3068  Ops[i] = NewRec;
3069  break;
3070  }
3071  return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1);
3072  }
3073 
3074  // Okay, if there weren't any loop invariants to be folded, check to see
3075  // if there are multiple AddRec's with the same loop induction variable
3076  // being multiplied together. If so, we can fold them.
3077 
3078  // {A1,+,A2,+,...,+,An}<L> * {B1,+,B2,+,...,+,Bn}<L>
3079  // = {x=1 in [ sum y=x..2x [ sum z=max(y-x, y-n)..min(x,n) [
3080  // choose(x, 2x)*choose(2x-y, x-z)*A_{y-z}*B_z
3081  // ]]],+,...up to x=2n}.
3082  // Note that the arguments to choose() are always integers with values
3083  // known at compile time, never SCEV objects.
3084  //
3085  // The implementation avoids pointless extra computations when the two
3086  // addrec's are of different length (mathematically, it's equivalent to
3087  // an infinite stream of zeros on the right).
3088  bool OpsModified = false;
3089  for (unsigned OtherIdx = Idx+1;
3090  OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
3091  ++OtherIdx) {
3092  const SCEVAddRecExpr *OtherAddRec =
3093  dyn_cast<SCEVAddRecExpr>(Ops[OtherIdx]);
3094  if (!OtherAddRec || OtherAddRec->getLoop() != AddRecLoop)
3095  continue;
3096 
3097  // Limit max number of arguments to avoid creation of unreasonably big
3098  // SCEVAddRecs with very complex operands.
3099  if (AddRec->getNumOperands() + OtherAddRec->getNumOperands() - 1 >
3100  MaxAddRecSize || isHugeExpression(AddRec) ||
3101  isHugeExpression(OtherAddRec))
3102  continue;
3103 
3104  bool Overflow = false;
3105  Type *Ty = AddRec->getType();
3106  bool LargerThan64Bits = getTypeSizeInBits(Ty) > 64;
3107  SmallVector<const SCEV*, 7> AddRecOps;
3108  for (int x = 0, xe = AddRec->getNumOperands() +
3109  OtherAddRec->getNumOperands() - 1; x != xe && !Overflow; ++x) {
3111  for (int y = x, ye = 2*x+1; y != ye && !Overflow; ++y) {
3112  uint64_t Coeff1 = Choose(x, 2*x - y, Overflow);
3113  for (int z = std::max(y-x, y-(int)AddRec->getNumOperands()+1),
3114  ze = std::min(x+1, (int)OtherAddRec->getNumOperands());
3115  z < ze && !Overflow; ++z) {
3116  uint64_t Coeff2 = Choose(2*x - y, x-z, Overflow);
3117  uint64_t Coeff;
3118  if (LargerThan64Bits)
3119  Coeff = umul_ov(Coeff1, Coeff2, Overflow);
3120  else
3121  Coeff = Coeff1*Coeff2;
3122  const SCEV *CoeffTerm = getConstant(Ty, Coeff);
3123  const SCEV *Term1 = AddRec->getOperand(y-z);
3124  const SCEV *Term2 = OtherAddRec->getOperand(z);
3125  SumOps.push_back(getMulExpr(CoeffTerm, Term1, Term2,
3126  SCEV::FlagAnyWrap, Depth + 1));
3127  }
3128  }
3129  if (SumOps.empty())
3130  SumOps.push_back(getZero(Ty));
3131  AddRecOps.push_back(getAddExpr(SumOps, SCEV::FlagAnyWrap, Depth + 1));
3132  }
3133  if (!Overflow) {
3134  const SCEV *NewAddRec = getAddRecExpr(AddRecOps, AddRecLoop,
3136  if (Ops.size() == 2) return NewAddRec;
3137  Ops[Idx] = NewAddRec;
3138  Ops.erase(Ops.begin() + OtherIdx); --OtherIdx;
3139  OpsModified = true;
3140  AddRec = dyn_cast<SCEVAddRecExpr>(NewAddRec);
3141  if (!AddRec)
3142  break;
3143  }
3144  }
3145  if (OpsModified)
3146  return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1);
3147 
3148  // Otherwise couldn't fold anything into this recurrence. Move onto the
3149  // next one.
3150  }
3151 
3152  // Okay, it looks like we really DO need an mul expr. Check to see if we
3153  // already have one, otherwise create a new one.
3154  return getOrCreateMulExpr(Ops, Flags);
3155 }
3156 
3157 /// Represents an unsigned remainder expression based on unsigned division.
3159  const SCEV *RHS) {
3160  assert(getEffectiveSCEVType(LHS->getType()) ==
3161  getEffectiveSCEVType(RHS->getType()) &&
3162  "SCEVURemExpr operand types don't match!");
3163 
3164  // Short-circuit easy cases
3165  if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) {
3166  // If constant is one, the result is trivial
3167  if (RHSC->getValue()->isOne())
3168  return getZero(LHS->getType()); // X urem 1 --> 0
3169 
3170  // If constant is a power of two, fold into a zext(trunc(LHS)).
3171  if (RHSC->getAPInt().isPowerOf2()) {
3172  Type *FullTy = LHS->getType();
3173  Type *TruncTy =
3174  IntegerType::get(getContext(), RHSC->getAPInt().logBase2());
3175  return getZeroExtendExpr(getTruncateExpr(LHS, TruncTy), FullTy);
3176  }
3177  }
3178 
3179  // Fallback to %a == %x urem %y == %x -<nuw> ((%x udiv %y) *<nuw> %y)
3180  const SCEV *UDiv = getUDivExpr(LHS, RHS);
3181  const SCEV *Mult = getMulExpr(UDiv, RHS, SCEV::FlagNUW);
3182  return getMinusSCEV(LHS, Mult, SCEV::FlagNUW);
3183 }
3184 
3185 /// Get a canonical unsigned division expression, or something simpler if
3186 /// possible.
3188  const SCEV *RHS) {
3189  assert(getEffectiveSCEVType(LHS->getType()) ==
3190  getEffectiveSCEVType(RHS->getType()) &&
3191  "SCEVUDivExpr operand types don't match!");
3192 
3193  if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) {
3194  if (RHSC->getValue()->isOne())
3195  return LHS; // X udiv 1 --> x
3196  // If the denominator is zero, the result of the udiv is undefined. Don't
3197  // try to analyze it, because the resolution chosen here may differ from
3198  // the resolution chosen in other parts of the compiler.
3199  if (!RHSC->getValue()->isZero()) {
3200  // Determine if the division can be folded into the operands of
3201  // its operands.
3202  // TODO: Generalize this to non-constants by using known-bits information.
3203  Type *Ty = LHS->getType();
3204  unsigned LZ = RHSC->getAPInt().countLeadingZeros();
3205  unsigned MaxShiftAmt = getTypeSizeInBits(Ty) - LZ - 1;
3206  // For non-power-of-two values, effectively round the value up to the
3207  // nearest power of two.
3208  if (!RHSC->getAPInt().isPowerOf2())
3209  ++MaxShiftAmt;
3210  IntegerType *ExtTy =
3211  IntegerType::get(getContext(), getTypeSizeInBits(Ty) + MaxShiftAmt);
3212  if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS))
3213  if (const SCEVConstant *Step =
3214  dyn_cast<SCEVConstant>(AR->getStepRecurrence(*this))) {
3215  // {X,+,N}/C --> {X/C,+,N/C} if safe and N/C can be folded.
3216  const APInt &StepInt = Step->getAPInt();
3217  const APInt &DivInt = RHSC->getAPInt();
3218  if (!StepInt.urem(DivInt) &&
3219  getZeroExtendExpr(AR, ExtTy) ==
3220  getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy),
3221  getZeroExtendExpr(Step, ExtTy),
3222  AR->getLoop(), SCEV::FlagAnyWrap)) {
3224  for (const SCEV *Op : AR->operands())
3225  Operands.push_back(getUDivExpr(Op, RHS));
3226  return getAddRecExpr(Operands, AR->getLoop(), SCEV::FlagNW);
3227  }
3228  /// Get a canonical UDivExpr for a recurrence.
3229  /// {X,+,N}/C => {Y,+,N}/C where Y=X-(X%N). Safe when C%N=0.
3230  // We can currently only fold X%N if X is constant.
3231  const SCEVConstant *StartC = dyn_cast<SCEVConstant>(AR->getStart());
3232  if (StartC && !DivInt.urem(StepInt) &&
3233  getZeroExtendExpr(AR, ExtTy) ==
3234  getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy),
3235  getZeroExtendExpr(Step, ExtTy),
3236  AR->getLoop(), SCEV::FlagAnyWrap)) {
3237  const APInt &StartInt = StartC->getAPInt();
3238  const APInt &StartRem = StartInt.urem(StepInt);
3239  if (StartRem != 0)
3240  LHS = getAddRecExpr(getConstant(StartInt - StartRem), Step,
3241  AR->getLoop(), SCEV::FlagNW);
3242  }
3243  }
3244  // (A*B)/C --> A*(B/C) if safe and B/C can be folded.
3245  if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(LHS)) {
3247  for (const SCEV *Op : M->operands())
3248  Operands.push_back(getZeroExtendExpr(Op, ExtTy));
3249  if (getZeroExtendExpr(M, ExtTy) == getMulExpr(Operands))
3250  // Find an operand that's safely divisible.
3251  for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) {
3252  const SCEV *Op = M->getOperand(i);
3253  const SCEV *Div = getUDivExpr(Op, RHSC);
3254  if (!isa<SCEVUDivExpr>(Div) && getMulExpr(Div, RHSC) == Op) {
3255  Operands = SmallVector<const SCEV *, 4>(M->op_begin(),
3256  M->op_end());
3257  Operands[i] = Div;
3258  return getMulExpr(Operands);
3259  }
3260  }
3261  }
3262 
3263  // (A/B)/C --> A/(B*C) if safe and B*C can be folded.
3264  if (const SCEVUDivExpr *OtherDiv = dyn_cast<SCEVUDivExpr>(LHS)) {
3265  if (auto *DivisorConstant =
3266  dyn_cast<SCEVConstant>(OtherDiv->getRHS())) {
3267  bool Overflow = false;
3268  APInt NewRHS =
3269  DivisorConstant->getAPInt().umul_ov(RHSC->getAPInt(), Overflow);
3270  if (Overflow) {
3271  return getConstant(RHSC->getType(), 0, false);
3272  }
3273  return getUDivExpr(OtherDiv->getLHS(), getConstant(NewRHS));
3274  }
3275  }
3276 
3277  // (A+B)/C --> (A/C + B/C) if safe and A/C and B/C can be folded.
3278  if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(LHS)) {
3280  for (const SCEV *Op : A->operands())
3281  Operands.push_back(getZeroExtendExpr(Op, ExtTy));
3282  if (getZeroExtendExpr(A, ExtTy) == getAddExpr(Operands)) {
3283  Operands.clear();
3284  for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) {
3285  const SCEV *Op = getUDivExpr(A->getOperand(i), RHS);
3286  if (isa<SCEVUDivExpr>(Op) ||
3287  getMulExpr(Op, RHS) != A->getOperand(i))
3288  break;
3289  Operands.push_back(Op);
3290  }
3291  if (Operands.size() == A->getNumOperands())
3292  return getAddExpr(Operands);
3293  }
3294  }
3295 
3296  // Fold if both operands are constant.
3297  if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) {
3298  Constant *LHSCV = LHSC->getValue();
3299  Constant *RHSCV = RHSC->getValue();
3300  return getConstant(cast<ConstantInt>(ConstantExpr::getUDiv(LHSCV,
3301  RHSCV)));
3302  }
3303  }
3304  }
3305 
3307  ID.AddInteger(scUDivExpr);
3308  ID.AddPointer(LHS);
3309  ID.AddPointer(RHS);
3310  void *IP = nullptr;
3311  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
3312  SCEV *S = new (SCEVAllocator) SCEVUDivExpr(ID.Intern(SCEVAllocator),
3313  LHS, RHS);
3314  UniqueSCEVs.InsertNode(S, IP);
3315  addToLoopUseLists(S);
3316  return S;
3317 }
3318 
3319 static const APInt gcd(const SCEVConstant *C1, const SCEVConstant *C2) {
3320  APInt A = C1->getAPInt().abs();
3321  APInt B = C2->getAPInt().abs();
3322  uint32_t ABW = A.getBitWidth();
3323  uint32_t BBW = B.getBitWidth();
3324 
3325  if (ABW > BBW)
3326  B = B.zext(ABW);
3327  else if (ABW < BBW)
3328  A = A.zext(BBW);
3329 
3330  return APIntOps::GreatestCommonDivisor(std::move(A), std::move(B));
3331 }
3332 
3333 /// Get a canonical unsigned division expression, or something simpler if
3334 /// possible. There is no representation for an exact udiv in SCEV IR, but we
3335 /// can attempt to remove factors from the LHS and RHS. We can't do this when
3336 /// it's not exact because the udiv may be clearing bits.
3338  const SCEV *RHS) {
3339  // TODO: we could try to find factors in all sorts of things, but for now we
3340  // just deal with u/exact (multiply, constant). See SCEVDivision towards the
3341  // end of this file for inspiration.
3342 
3343  const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(LHS);
3344  if (!Mul || !Mul->hasNoUnsignedWrap())
3345  return getUDivExpr(LHS, RHS);
3346 
3347  if (const SCEVConstant *RHSCst = dyn_cast<SCEVConstant>(RHS)) {
3348  // If the mulexpr multiplies by a constant, then that constant must be the
3349  // first element of the mulexpr.
3350  if (const auto *LHSCst = dyn_cast<SCEVConstant>(Mul->getOperand(0))) {
3351  if (LHSCst == RHSCst) {
3353  Operands.append(Mul->op_begin() + 1, Mul->op_end());
3354  return getMulExpr(Operands);
3355  }
3356 
3357  // We can't just assume that LHSCst divides RHSCst cleanly, it could be
3358  // that there's a factor provided by one of the other terms. We need to
3359  // check.
3360  APInt Factor = gcd(LHSCst, RHSCst);
3361  if (!Factor.isIntN(1)) {
3362  LHSCst =
3363  cast<SCEVConstant>(getConstant(LHSCst->getAPInt().udiv(Factor)));
3364  RHSCst =
3365  cast<SCEVConstant>(getConstant(RHSCst->getAPInt().udiv(Factor)));
3367  Operands.push_back(LHSCst);
3368  Operands.append(Mul->op_begin() + 1, Mul->op_end());
3369  LHS = getMulExpr(Operands);
3370  RHS = RHSCst;
3371  Mul = dyn_cast<SCEVMulExpr>(LHS);
3372  if (!Mul)
3373  return getUDivExactExpr(LHS, RHS);
3374  }
3375  }
3376  }
3377 
3378  for (int i = 0, e = Mul->getNumOperands(); i != e; ++i) {
3379  if (Mul->getOperand(i) == RHS) {
3381  Operands.append(Mul->op_begin(), Mul->op_begin() + i);
3382  Operands.append(Mul->op_begin() + i + 1, Mul->op_end());
3383  return getMulExpr(Operands);
3384  }
3385  }
3386 
3387  return getUDivExpr(LHS, RHS);
3388 }
3389 
3390 /// Get an add recurrence expression for the specified loop. Simplify the
3391 /// expression as much as possible.
3392 const SCEV *ScalarEvolution::getAddRecExpr(const SCEV *Start, const SCEV *Step,
3393  const Loop *L,
3394  SCEV::NoWrapFlags Flags) {
3396  Operands.push_back(Start);
3397  if (const SCEVAddRecExpr *StepChrec = dyn_cast<SCEVAddRecExpr>(Step))
3398  if (StepChrec->getLoop() == L) {
3399  Operands.append(StepChrec->op_begin(), StepChrec->op_end());
3400  return getAddRecExpr(Operands, L, maskFlags(Flags, SCEV::FlagNW));
3401  }
3402 
3403  Operands.push_back(Step);
3404  return getAddRecExpr(Operands, L, Flags);
3405 }
3406 
3407 /// Get an add recurrence expression for the specified loop. Simplify the
3408 /// expression as much as possible.
3409 const SCEV *
3411  const Loop *L, SCEV::NoWrapFlags Flags) {
3412  if (Operands.size() == 1) return Operands[0];
3413 #ifndef NDEBUG
3414  Type *ETy = getEffectiveSCEVType(Operands[0]->getType());
3415  for (unsigned i = 1, e = Operands.size(); i != e; ++i)
3416  assert(getEffectiveSCEVType(Operands[i]->getType()) == ETy &&
3417  "SCEVAddRecExpr operand types don't match!");
3418  for (unsigned i = 0, e = Operands.size(); i != e; ++i)
3419  assert(isLoopInvariant(Operands[i], L) &&
3420  "SCEVAddRecExpr operand is not loop-invariant!");
3421 #endif
3422 
3423  if (Operands.back()->isZero()) {
3424  Operands.pop_back();
3425  return getAddRecExpr(Operands, L, SCEV::FlagAnyWrap); // {X,+,0} --> X
3426  }
3427 
3428  // It's tempting to want to call getConstantMaxBackedgeTakenCount count here and
3429  // use that information to infer NUW and NSW flags. However, computing a
3430  // BE count requires calling getAddRecExpr, so we may not yet have a
3431  // meaningful BE count at this point (and if we don't, we'd be stuck
3432  // with a SCEVCouldNotCompute as the cached BE count).
3433 
3434  Flags = StrengthenNoWrapFlags(this, scAddRecExpr, Operands, Flags);
3435 
3436  // Canonicalize nested AddRecs in by nesting them in order of loop depth.
3437  if (const SCEVAddRecExpr *NestedAR = dyn_cast<SCEVAddRecExpr>(Operands[0])) {
3438  const Loop *NestedLoop = NestedAR->getLoop();
3439  if (L->contains(NestedLoop)
3440  ? (L->getLoopDepth() < NestedLoop->getLoopDepth())
3441  : (!NestedLoop->contains(L) &&
3442  DT.dominates(L->getHeader(), NestedLoop->getHeader()))) {
3443  SmallVector<const SCEV *, 4> NestedOperands(NestedAR->op_begin(),
3444  NestedAR->op_end());
3445  Operands[0] = NestedAR->getStart();
3446  // AddRecs require their operands be loop-invariant with respect to their
3447  // loops. Don't perform this transformation if it would break this
3448  // requirement.
3449  bool AllInvariant = all_of(
3450  Operands, [&](const SCEV *Op) { return isLoopInvariant(Op, L); });
3451 
3452  if (AllInvariant) {
3453  // Create a recurrence for the outer loop with the same step size.
3454  //
3455  // The outer recurrence keeps its NW flag but only keeps NUW/NSW if the
3456  // inner recurrence has the same property.
3457  SCEV::NoWrapFlags OuterFlags =
3458  maskFlags(Flags, SCEV::FlagNW | NestedAR->getNoWrapFlags());
3459 
3460  NestedOperands[0] = getAddRecExpr(Operands, L, OuterFlags);
3461  AllInvariant = all_of(NestedOperands, [&](const SCEV *Op) {
3462  return isLoopInvariant(Op, NestedLoop);
3463  });
3464 
3465  if (AllInvariant) {
3466  // Ok, both add recurrences are valid after the transformation.
3467  //
3468  // The inner recurrence keeps its NW flag but only keeps NUW/NSW if
3469  // the outer recurrence has the same property.
3470  SCEV::NoWrapFlags InnerFlags =
3471  maskFlags(NestedAR->getNoWrapFlags(), SCEV::FlagNW | Flags);
3472  return getAddRecExpr(NestedOperands, NestedLoop, InnerFlags);
3473  }
3474  }
3475  // Reset Operands to its original state.
3476  Operands[0] = NestedAR;
3477  }
3478  }
3479 
3480  // Okay, it looks like we really DO need an addrec expr. Check to see if we
3481  // already have one, otherwise create a new one.
3482  return getOrCreateAddRecExpr(Operands, L, Flags);
3483 }
3484 
3485 const SCEV *
3487  const SmallVectorImpl<const SCEV *> &IndexExprs) {
3488  const SCEV *BaseExpr = getSCEV(GEP->getPointerOperand());
3489  // getSCEV(Base)->getType() has the same address space as Base->getType()
3490  // because SCEV::getType() preserves the address space.
3491  Type *IntPtrTy = getEffectiveSCEVType(BaseExpr->getType());
3492  // FIXME(PR23527): Don't blindly transfer the inbounds flag from the GEP
3493  // instruction to its SCEV, because the Instruction may be guarded by control
3494  // flow and the no-overflow bits may not be valid for the expression in any
3495  // context. This can be fixed similarly to how these flags are handled for
3496  // adds.
3499 
3500  const SCEV *TotalOffset = getZero(IntPtrTy);
3501  // The array size is unimportant. The first thing we do on CurTy is getting
3502  // its element type.
3503  Type *CurTy = ArrayType::get(GEP->getSourceElementType(), 0);
3504  for (const SCEV *IndexExpr : IndexExprs) {
3505  // Compute the (potentially symbolic) offset in bytes for this index.
3506  if (StructType *STy = dyn_cast<StructType>(CurTy)) {
3507  // For a struct, add the member offset.
3508  ConstantInt *Index = cast<SCEVConstant>(IndexExpr)->getValue();
3509  unsigned FieldNo = Index->getZExtValue();
3510  const SCEV *FieldOffset = getOffsetOfExpr(IntPtrTy, STy, FieldNo);
3511 
3512  // Add the field offset to the running total offset.
3513  TotalOffset = getAddExpr(TotalOffset, FieldOffset);
3514 
3515  // Update CurTy to the type of the field at Index.
3516  CurTy = STy->getTypeAtIndex(Index);
3517  } else {
3518  // Update CurTy to its element type.
3519  CurTy = cast<SequentialType>(CurTy)->getElementType();
3520  // For an array, add the element offset, explicitly scaled.
3521  const SCEV *ElementSize = getSizeOfExpr(IntPtrTy, CurTy);
3522  // Getelementptr indices are signed.
3523  IndexExpr = getTruncateOrSignExtend(IndexExpr, IntPtrTy);
3524 
3525  // Multiply the index by the element size to compute the element offset.
3526  const SCEV *LocalOffset = getMulExpr(IndexExpr, ElementSize, Wrap);
3527 
3528  // Add the element offset to the running total offset.
3529  TotalOffset = getAddExpr(TotalOffset, LocalOffset);
3530  }
3531  }
3532 
3533  // Add the total offset from all the GEP indices to the base.
3534  return getAddExpr(BaseExpr, TotalOffset, Wrap);
3535 }
3536 
3537 std::tuple<const SCEV *, FoldingSetNodeID, void *>
3538 ScalarEvolution::findExistingSCEVInCache(int SCEVType,
3539  ArrayRef<const SCEV *> Ops) {
3541  void *IP = nullptr;
3542  ID.AddInteger(SCEVType);
3543  for (unsigned i = 0, e = Ops.size(); i != e; ++i)
3544  ID.AddPointer(Ops[i]);
3545  return std::tuple<const SCEV *, FoldingSetNodeID, void *>(
3546  UniqueSCEVs.FindNodeOrInsertPos(ID, IP), std::move(ID), IP);
3547 }
3548 
3551  assert(!Ops.empty() && "Cannot get empty (u|s)(min|max)!");
3552  if (Ops.size() == 1) return Ops[0];
3553 #ifndef NDEBUG
3554  Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
3555  for (unsigned i = 1, e = Ops.size(); i != e; ++i)
3556  assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
3557  "Operand types don't match!");
3558 #endif
3559 
3560  bool IsSigned = Kind == scSMaxExpr || Kind == scSMinExpr;
3561  bool IsMax = Kind == scSMaxExpr || Kind == scUMaxExpr;
3562 
3563  // Sort by complexity, this groups all similar expression types together.
3564  GroupByComplexity(Ops, &LI, DT);
3565 
3566  // Check if we have created the same expression before.
3567  if (const SCEV *S = std::get<0>(findExistingSCEVInCache(Kind, Ops))) {
3568  return S;
3569  }
3570 
3571  // If there are any constants, fold them together.
3572  unsigned Idx = 0;
3573  if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
3574  ++Idx;
3575  assert(Idx < Ops.size());
3576  auto FoldOp = [&](const APInt &LHS, const APInt &RHS) {
3577  if (Kind == scSMaxExpr)
3578  return APIntOps::smax(LHS, RHS);
3579  else if (Kind == scSMinExpr)
3580  return APIntOps::smin(LHS, RHS);
3581  else if (Kind == scUMaxExpr)
3582  return APIntOps::umax(LHS, RHS);
3583  else if (Kind == scUMinExpr)
3584  return APIntOps::umin(LHS, RHS);
3585  llvm_unreachable("Unknown SCEV min/max opcode");
3586  };
3587 
3588  while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
3589  // We found two constants, fold them together!
3590  ConstantInt *Fold = ConstantInt::get(
3591  getContext(), FoldOp(LHSC->getAPInt(), RHSC->getAPInt()));
3592  Ops[0] = getConstant(Fold);
3593  Ops.erase(Ops.begin()+1); // Erase the folded element
3594  if (Ops.size() == 1) return Ops[0];
3595  LHSC = cast<SCEVConstant>(Ops[0]);
3596  }
3597 
3598  bool IsMinV = LHSC->getValue()->isMinValue(IsSigned);
3599  bool IsMaxV = LHSC->getValue()->isMaxValue(IsSigned);
3600 
3601  if (IsMax ? IsMinV : IsMaxV) {
3602  // If we are left with a constant minimum(/maximum)-int, strip it off.
3603  Ops.erase(Ops.begin());
3604  --Idx;
3605  } else if (IsMax ? IsMaxV : IsMinV) {
3606  // If we have a max(/min) with a constant maximum(/minimum)-int,
3607  // it will always be the extremum.
3608  return LHSC;
3609  }
3610 
3611  if (Ops.size() == 1) return Ops[0];
3612  }
3613 
3614  // Find the first operation of the same kind
3615  while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < Kind)
3616  ++Idx;
3617 
3618  // Check to see if one of the operands is of the same kind. If so, expand its
3619  // operands onto our operand list, and recurse to simplify.
3620  if (Idx < Ops.size()) {
3621  bool DeletedAny = false;
3622  while (Ops[Idx]->getSCEVType() == Kind) {
3623  const SCEVMinMaxExpr *SMME = cast<SCEVMinMaxExpr>(Ops[Idx]);
3624  Ops.erase(Ops.begin()+Idx);
3625  Ops.append(SMME->op_begin(), SMME->op_end());
3626  DeletedAny = true;
3627  }
3628 
3629  if (DeletedAny)
3630  return getMinMaxExpr(Kind, Ops);
3631  }
3632 
3633  // Okay, check to see if the same value occurs in the operand list twice. If
3634  // so, delete one. Since we sorted the list, these values are required to
3635  // be adjacent.
3636  llvm::CmpInst::Predicate GEPred =
3638  llvm::CmpInst::Predicate LEPred =
3640  llvm::CmpInst::Predicate FirstPred = IsMax ? GEPred : LEPred;
3641  llvm::CmpInst::Predicate SecondPred = IsMax ? LEPred : GEPred;
3642  for (unsigned i = 0, e = Ops.size() - 1; i != e; ++i) {
3643  if (Ops[i] == Ops[i + 1] ||
3644  isKnownViaNonRecursiveReasoning(FirstPred, Ops[i], Ops[i + 1])) {
3645  // X op Y op Y --> X op Y
3646  // X op Y --> X, if we know X, Y are ordered appropriately
3647  Ops.erase(Ops.begin() + i + 1, Ops.begin() + i + 2);
3648  --i;
3649  --e;
3650  } else if (isKnownViaNonRecursiveReasoning(SecondPred, Ops[i],
3651  Ops[i + 1])) {
3652  // X op Y --> Y, if we know X, Y are ordered appropriately
3653  Ops.erase(Ops.begin() + i, Ops.begin() + i + 1);
3654  --i;
3655  --e;
3656  }
3657  }
3658 
3659  if (Ops.size() == 1) return Ops[0];
3660 
3661  assert(!Ops.empty() && "Reduced smax down to nothing!");
3662 
3663  // Okay, it looks like we really DO need an expr. Check to see if we
3664  // already have one, otherwise create a new one.
3665  const SCEV *ExistingSCEV;
3667  void *IP;
3668  std::tie(ExistingSCEV, ID, IP) = findExistingSCEVInCache(Kind, Ops);
3669  if (ExistingSCEV)
3670  return ExistingSCEV;
3671  const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
3672  std::uninitialized_copy(Ops.begin(), Ops.end(), O);
3673  SCEV *S = new (SCEVAllocator) SCEVMinMaxExpr(
3674  ID.Intern(SCEVAllocator), static_cast<SCEVTypes>(Kind), O, Ops.size());
3675 
3676  UniqueSCEVs.InsertNode(S, IP);
3677  addToLoopUseLists(S);
3678  return S;
3679 }
3680 
3681 const SCEV *ScalarEvolution::getSMaxExpr(const SCEV *LHS, const SCEV *RHS) {
3682  SmallVector<const SCEV *, 2> Ops = {LHS, RHS};
3683  return getSMaxExpr(Ops);
3684 }
3685 
3687  return getMinMaxExpr(scSMaxExpr, Ops);
3688 }
3689 
3690 const SCEV *ScalarEvolution::getUMaxExpr(const SCEV *LHS, const SCEV *RHS) {
3691  SmallVector<const SCEV *, 2> Ops = {LHS, RHS};
3692  return getUMaxExpr(Ops);
3693 }
3694 
3696  return getMinMaxExpr(scUMaxExpr, Ops);
3697 }
3698 
3700  const SCEV *RHS) {
3701  SmallVector<const SCEV *, 2> Ops = { LHS, RHS };
3702  return getSMinExpr(Ops);
3703 }
3704 
3706  return getMinMaxExpr(scSMinExpr, Ops);
3707 }
3708 
3710  const SCEV *RHS) {
3711  SmallVector<const SCEV *, 2> Ops = { LHS, RHS };
3712  return getUMinExpr(Ops);
3713 }
3714 
3716  return getMinMaxExpr(scUMinExpr, Ops);
3717 }
3718 
3719 const SCEV *ScalarEvolution::getSizeOfExpr(Type *IntTy, Type *AllocTy) {
3720  // We can bypass creating a target-independent
3721  // constant expression and then folding it back into a ConstantInt.
3722  // This is just a compile-time optimization.
3723  return getConstant(IntTy, getDataLayout().getTypeAllocSize(AllocTy));
3724 }
3725 
3727  StructType *STy,
3728  unsigned FieldNo) {
3729  // We can bypass creating a target-independent
3730  // constant expression and then folding it back into a ConstantInt.
3731  // This is just a compile-time optimization.
3732  return getConstant(
3733  IntTy, getDataLayout().getStructLayout(STy)->getElementOffset(FieldNo));
3734 }
3735 
3737  // Don't attempt to do anything other than create a SCEVUnknown object
3738  // here. createSCEV only calls getUnknown after checking for all other
3739  // interesting possibilities, and any other code that calls getUnknown
3740  // is doing so in order to hide a value from SCEV canonicalization.
3741 
3743  ID.AddInteger(scUnknown);
3744  ID.AddPointer(V);
3745  void *IP = nullptr;
3746  if (SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) {
3747  assert(cast<SCEVUnknown>(S)->getValue() == V &&
3748  "Stale SCEVUnknown in uniquing map!");
3749  return S;
3750  }
3751  SCEV *S = new (SCEVAllocator) SCEVUnknown(ID.Intern(SCEVAllocator), V, this,
3752  FirstUnknown);
3753  FirstUnknown = cast<SCEVUnknown>(S);
3754  UniqueSCEVs.InsertNode(S, IP);
3755  return S;
3756 }
3757 
3758 //===----------------------------------------------------------------------===//
3759 // Basic SCEV Analysis and PHI Idiom Recognition Code
3760 //
3761 
3762 /// Test if values of the given type are analyzable within the SCEV
3763 /// framework. This primarily includes integer types, and it can optionally
3764 /// include pointer types if the ScalarEvolution class has access to
3765 /// target-specific information.
3767  // Integers and pointers are always SCEVable.
3768  return Ty->isIntOrPtrTy();
3769 }
3770 
3771 /// Return the size in bits of the specified type, for which isSCEVable must
3772 /// return true.
3774  assert(isSCEVable(Ty) && "Type is not SCEVable!");
3775  if (Ty->isPointerTy())
3776  return getDataLayout().getIndexTypeSizeInBits(Ty);
3777  return getDataLayout().getTypeSizeInBits(Ty);
3778 }
3779 
3780 /// Return a type with the same bitwidth as the given type and which represents
3781 /// how SCEV will treat the given type, for which isSCEVable must return
3782 /// true. For pointer types, this is the pointer-sized integer type.
3784  assert(isSCEVable(Ty) && "Type is not SCEVable!");
3785 
3786  if (Ty->isIntegerTy())
3787  return Ty;
3788 
3789  // The only other support type is pointer.
3790  assert(Ty->isPointerTy() && "Unexpected non-pointer non-integer type!");
3791  return getDataLayout().getIntPtrType(Ty);
3792 }
3793 
3795  return getTypeSizeInBits(T1) >= getTypeSizeInBits(T2) ? T1 : T2;
3796 }
3797 
3799  return CouldNotCompute.get();
3800 }
3801 
3802 bool ScalarEvolution::checkValidity(const SCEV *S) const {
3803  bool ContainsNulls = SCEVExprContains(S, [](const SCEV *S) {
3804  auto *SU = dyn_cast<SCEVUnknown>(S);
3805  return SU && SU->getValue() == nullptr;
3806  });
3807 
3808  return !ContainsNulls;
3809 }
3810 
3812  HasRecMapType::iterator I = HasRecMap.find(S);
3813  if (I != HasRecMap.end())
3814  return I->second;
3815 
3816  bool FoundAddRec = SCEVExprContains(S, isa<SCEVAddRecExpr, const SCEV *>);
3817  HasRecMap.insert({S, FoundAddRec});
3818  return FoundAddRec;
3819 }
3820 
3821 /// Try to split a SCEVAddExpr into a pair of {SCEV, ConstantInt}.
3822 /// If \p S is a SCEVAddExpr and is composed of a sub SCEV S' and an
3823 /// offset I, then return {S', I}, else return {\p S, nullptr}.
3824 static std::pair<const SCEV *, ConstantInt *> splitAddExpr(const SCEV *S) {
3825  const auto *Add = dyn_cast<SCEVAddExpr>(S);
3826  if (!Add)
3827  return {S, nullptr};
3828 
3829  if (Add->getNumOperands() != 2)
3830  return {S, nullptr};
3831 
3832  auto *ConstOp = dyn_cast<SCEVConstant>(Add->getOperand(0));
3833  if (!ConstOp)
3834  return {S, nullptr};
3835 
3836  return {Add->getOperand(1), ConstOp->getValue()};
3837 }
3838 
3839 /// Return the ValueOffsetPair set for \p S. \p S can be represented
3840 /// by the value and offset from any ValueOffsetPair in the set.
3842 ScalarEvolution::getSCEVValues(const SCEV *S) {
3843  ExprValueMapType::iterator SI = ExprValueMap.find_as(S);
3844  if (SI == ExprValueMap.end())
3845  return nullptr;
3846 #ifndef NDEBUG
3847  if (VerifySCEVMap) {
3848  // Check there is no dangling Value in the set returned.
3849  for (const auto &VE : SI->second)
3850  assert(ValueExprMap.count(VE.first));
3851  }
3852 #endif
3853  return &SI->second;
3854 }
3855 
3856 /// Erase Value from ValueExprMap and ExprValueMap. ValueExprMap.erase(V)
3857 /// cannot be used separately. eraseValueFromMap should be used to remove
3858 /// V from ValueExprMap and ExprValueMap at the same time.
3860  ValueExprMapType::iterator I = ValueExprMap.find_as(V);
3861  if (I != ValueExprMap.end()) {
3862  const SCEV *S = I->second;
3863  // Remove {V, 0} from the set of ExprValueMap[S]
3864  if (SetVector<ValueOffsetPair> *SV = getSCEVValues(S))
3865  SV->remove({V, nullptr});
3866 
3867  // Remove {V, Offset} from the set of ExprValueMap[Stripped]
3868  const SCEV *Stripped;
3870  std::tie(Stripped, Offset) = splitAddExpr(S);
3871  if (Offset != nullptr) {
3872  if (SetVector<ValueOffsetPair> *SV = getSCEVValues(Stripped))
3873  SV->remove({V, Offset});
3874  }
3875  ValueExprMap.erase(V);
3876  }
3877 }
3878 
3879 /// Check whether value has nuw/nsw/exact set but SCEV does not.
3880 /// TODO: In reality it is better to check the poison recursively
3881 /// but this is better than nothing.
3882 static bool SCEVLostPoisonFlags(const SCEV *S, const Value *V) {
3883  if (auto *I = dyn_cast<Instruction>(V)) {
3884  if (isa<OverflowingBinaryOperator>(I)) {
3885  if (auto *NS = dyn_cast<SCEVNAryExpr>(S)) {
3886  if (I->hasNoSignedWrap() && !NS->hasNoSignedWrap())
3887  return true;
3888  if (I->hasNoUnsignedWrap() && !NS->hasNoUnsignedWrap())
3889  return true;
3890  }
3891  } else if (isa<PossiblyExactOperator>(I) && I->isExact())
3892  return true;
3893  }
3894  return false;
3895 }
3896 
3897 /// Return an existing SCEV if it exists, otherwise analyze the expression and
3898 /// create a new one.
3900  assert(isSCEVable(V->getType()) && "Value is not SCEVable!");
3901 
3902  const SCEV *S = getExistingSCEV(V);
3903  if (S == nullptr) {
3904  S = createSCEV(V);
3905  // During PHI resolution, it is possible to create two SCEVs for the same
3906  // V, so it is needed to double check whether V->S is inserted into
3907  // ValueExprMap before insert S->{V, 0} into ExprValueMap.
3908  std::pair<ValueExprMapType::iterator, bool> Pair =
3909  ValueExprMap.insert({SCEVCallbackVH(V, this), S});
3910  if (Pair.second && !SCEVLostPoisonFlags(S, V)) {
3911  ExprValueMap[S].insert({V, nullptr});
3912 
3913  // If S == Stripped + Offset, add Stripped -> {V, Offset} into
3914  // ExprValueMap.
3915  const SCEV *Stripped = S;
3916  ConstantInt *Offset = nullptr;
3917  std::tie(Stripped, Offset) = splitAddExpr(S);
3918  // If stripped is SCEVUnknown, don't bother to save
3919  // Stripped -> {V, offset}. It doesn't simplify and sometimes even
3920  // increase the complexity of the expansion code.
3921  // If V is GetElementPtrInst, don't save Stripped -> {V, offset}
3922  // because it may generate add/sub instead of GEP in SCEV expansion.
3923  if (Offset != nullptr && !isa<SCEVUnknown>(Stripped) &&
3924  !isa<GetElementPtrInst>(V))
3925  ExprValueMap[Stripped].insert({V, Offset});
3926  }
3927  }
3928  return S;
3929 }
3930 
3931 const SCEV *ScalarEvolution::getExistingSCEV(Value *V) {
3932  assert(isSCEVable(V->getType()) && "Value is not SCEVable!");
3933 
3934  ValueExprMapType::iterator I = ValueExprMap.find_as(V);
3935  if (I != ValueExprMap.end()) {
3936  const SCEV *S = I->second;
3937  if (checkValidity(S))
3938  return S;
3939  eraseValueFromMap(V);
3940  forgetMemoizedResults(S);
3941  }
3942  return nullptr;
3943 }
3944 
3945 /// Return a SCEV corresponding to -V = -1*V
3947  SCEV::NoWrapFlags Flags) {
3948  if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V))
3949  return getConstant(
3950  cast<ConstantInt>(ConstantExpr::getNeg(VC->getValue())));
3951 
3952  Type *Ty = V->getType();
3953  Ty = getEffectiveSCEVType(Ty);
3954  return getMulExpr(
3955  V, getConstant(cast<ConstantInt>(Constant::getAllOnesValue(Ty))), Flags);
3956 }
3957 
3958 /// If Expr computes ~A, return A else return nullptr
3959 static const SCEV *MatchNotExpr(const SCEV *Expr) {
3960  const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Expr);
3961  if (!Add || Add->getNumOperands() != 2 ||
3962  !Add->getOperand(0)->isAllOnesValue())
3963  return nullptr;
3964 
3965  const SCEVMulExpr *AddRHS = dyn_cast<SCEVMulExpr>(Add->getOperand(1));
3966  if (!AddRHS || AddRHS->getNumOperands() != 2 ||
3967  !AddRHS->getOperand(0)->isAllOnesValue())
3968  return nullptr;
3969 
3970  return AddRHS->getOperand(1);
3971 }
3972 
3973 /// Return a SCEV corresponding to ~V = -1-V
3975  if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V))
3976  return getConstant(
3977  cast<ConstantInt>(ConstantExpr::getNot(VC->getValue())));
3978 
3979  // Fold ~(u|s)(min|max)(~x, ~y) to (u|s)(max|min)(x, y)
3980  if (const SCEVMinMaxExpr *MME = dyn_cast<SCEVMinMaxExpr>(V)) {
3981  auto MatchMinMaxNegation = [&](const SCEVMinMaxExpr *MME) {
3982  SmallVector<const SCEV *, 2> MatchedOperands;
3983  for (const SCEV *Operand : MME->operands()) {
3984  const SCEV *Matched = MatchNotExpr(Operand);
3985  if (!Matched)
3986  return (const SCEV *)nullptr;
3987  MatchedOperands.push_back(Matched);
3988  }
3989  return getMinMaxExpr(
3990  SCEVMinMaxExpr::negate(static_cast<SCEVTypes>(MME->getSCEVType())),
3991  MatchedOperands);
3992  };
3993  if (const SCEV *Replaced = MatchMinMaxNegation(MME))
3994  return Replaced;
3995  }
3996 
3997  Type *Ty = V->getType();
3998  Ty = getEffectiveSCEVType(Ty);
3999  const SCEV *AllOnes =
4000  getConstant(cast<ConstantInt>(Constant::getAllOnesValue(Ty)));
4001  return getMinusSCEV(AllOnes, V);
4002 }
4003 
4004 const SCEV *ScalarEvolution::getMinusSCEV(const SCEV *LHS, const SCEV *RHS,
4005  SCEV::NoWrapFlags Flags,
4006  unsigned Depth) {
4007  // Fast path: X - X --> 0.
4008  if (LHS == RHS)
4009  return getZero(LHS->getType());
4010 
4011  // We represent LHS - RHS as LHS + (-1)*RHS. This transformation
4012  // makes it so that we cannot make much use of NUW.
4013  auto AddFlags = SCEV::FlagAnyWrap;
4014  const bool RHSIsNotMinSigned =
4015  !getSignedRangeMin(RHS).isMinSignedValue();
4016  if (maskFlags(Flags, SCEV::FlagNSW) == SCEV::FlagNSW) {
4017  // Let M be the minimum representable signed value. Then (-1)*RHS
4018  // signed-wraps if and only if RHS is M. That can happen even for
4019  // a NSW subtraction because e.g. (-1)*M signed-wraps even though
4020  // -1 - M does not. So to transfer NSW from LHS - RHS to LHS +
4021  // (-1)*RHS, we need to prove that RHS != M.
4022  //
4023  // If LHS is non-negative and we know that LHS - RHS does not
4024  // signed-wrap, then RHS cannot be M. So we can rule out signed-wrap
4025  // either by proving that RHS > M or that LHS >= 0.
4026  if (RHSIsNotMinSigned || isKnownNonNegative(LHS)) {
4027  AddFlags = SCEV::FlagNSW;
4028  }
4029  }
4030 
4031  // FIXME: Find a correct way to transfer NSW to (-1)*M when LHS -
4032  // RHS is NSW and LHS >= 0.
4033  //
4034  // The difficulty here is that the NSW flag may have been proven
4035  // relative to a loop that is to be found in a recurrence in LHS and
4036  // not in RHS. Applying NSW to (-1)*M may then let the NSW have a
4037  // larger scope than intended.
4038  auto NegFlags = RHSIsNotMinSigned ? SCEV::FlagNSW : SCEV::FlagAnyWrap;
4039 
4040  return getAddExpr(LHS, getNegativeSCEV(RHS, NegFlags), AddFlags, Depth);
4041 }
4042 
4044  unsigned Depth) {
4045  Type *SrcTy = V->getType();
4046  assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
4047  "Cannot truncate or zero extend with non-integer arguments!");
4048  if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
4049  return V; // No conversion
4050  if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty))
4051  return getTruncateExpr(V, Ty, Depth);
4052  return getZeroExtendExpr(V, Ty, Depth);
4053 }
4054 
4056  unsigned Depth) {
4057  Type *SrcTy = V->getType();
4058  assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
4059  "Cannot truncate or zero extend with non-integer arguments!");
4060  if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
4061  return V; // No conversion
4062  if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty))
4063  return getTruncateExpr(V, Ty, Depth);
4064  return getSignExtendExpr(V, Ty, Depth);
4065 }
4066 
4067 const SCEV *
4069  Type *SrcTy = V->getType();
4070  assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
4071  "Cannot noop or zero extend with non-integer arguments!");
4072  assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
4073  "getNoopOrZeroExtend cannot truncate!");
4074  if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
4075  return V; // No conversion
4076  return getZeroExtendExpr(V, Ty);
4077 }
4078 
4079 const SCEV *
4081  Type *SrcTy = V->getType();
4082  assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
4083  "Cannot noop or sign extend with non-integer arguments!");
4084  assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
4085  "getNoopOrSignExtend cannot truncate!");
4086  if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
4087  return V; // No conversion
4088  return getSignExtendExpr(V, Ty);
4089 }
4090 
4091 const SCEV *
4093  Type *SrcTy = V->getType();
4094  assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
4095  "Cannot noop or any extend with non-integer arguments!");
4096  assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
4097  "getNoopOrAnyExtend cannot truncate!");
4098  if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
4099  return V; // No conversion
4100  return getAnyExtendExpr(V, Ty);
4101 }
4102 
4103 const SCEV *
4105  Type *SrcTy = V->getType();
4106  assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
4107  "Cannot truncate or noop with non-integer arguments!");
4108  assert(getTypeSizeInBits(SrcTy) >= getTypeSizeInBits(Ty) &&
4109  "getTruncateOrNoop cannot extend!");
4110  if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
4111  return V; // No conversion
4112  return getTruncateExpr(V, Ty);
4113 }
4114 
4116  const SCEV *RHS) {
4117  const SCEV *PromotedLHS = LHS;
4118  const SCEV *PromotedRHS = RHS;
4119 
4120  if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType()))
4121  PromotedRHS = getZeroExtendExpr(RHS, LHS->getType());
4122  else
4123  PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType());
4124 
4125  return getUMaxExpr(PromotedLHS, PromotedRHS);
4126 }
4127 
4129  const SCEV *RHS) {
4130  SmallVector<const SCEV *, 2> Ops = { LHS, RHS };
4131  return getUMinFromMismatchedTypes(Ops);
4132 }
4133 
4136  assert(!Ops.empty() && "At least one operand must be!");
4137  // Trivial case.
4138  if (Ops.size() == 1)
4139  return Ops[0];
4140 
4141  // Find the max type first.
4142  Type *MaxType = nullptr;
4143  for (auto *S : Ops)
4144  if (MaxType)
4145  MaxType = getWiderType(MaxType, S->getType());
4146  else
4147  MaxType = S->getType();
4148 
4149  // Extend all ops to max type.
4150  SmallVector<const SCEV *, 2> PromotedOps;
4151  for (auto *S : Ops)
4152  PromotedOps.push_back(getNoopOrZeroExtend(S, MaxType));
4153 
4154  // Generate umin.
4155  return getUMinExpr(PromotedOps);
4156 }
4157 
4159  // A pointer operand may evaluate to a nonpointer expression, such as null.
4160  if (!V->getType()->isPointerTy())
4161  return V;
4162 
4163  if (const SCEVCastExpr *Cast = dyn_cast<SCEVCastExpr>(V)) {
4164  return getPointerBase(Cast->getOperand());
4165  } else if (const SCEVNAryExpr *NAry = dyn_cast<SCEVNAryExpr>(V)) {
4166  const SCEV *PtrOp = nullptr;
4167  for (const SCEV *NAryOp : NAry->operands()) {
4168  if (NAryOp->getType()->isPointerTy()) {
4169  // Cannot find the base of an expression with multiple pointer operands.
4170  if (PtrOp)
4171  return V;
4172  PtrOp = NAryOp;
4173  }
4174  }
4175  if (!PtrOp)
4176  return V;
4177  return getPointerBase(PtrOp);
4178  }
4179  return V;
4180 }
4181 
4182 /// Push users of the given Instruction onto the given Worklist.
4183 static void
4185  SmallVectorImpl<Instruction *> &Worklist) {
4186  // Push the def-use children onto the Worklist stack.
4187  for (User *U : I->users())
4188  Worklist.push_back(cast<Instruction>(U));
4189 }
4190 
4191 void ScalarEvolution::forgetSymbolicName(Instruction *PN, const SCEV *SymName) {
4193  PushDefUseChildren(PN, Worklist);
4194 
4196  Visited.insert(PN);
4197  while (!Worklist.empty()) {
4198  Instruction *I = Worklist.pop_back_val();
4199  if (!Visited.insert(I).second)
4200  continue;
4201 
4202  auto It = ValueExprMap.find_as(static_cast<Value *>(I));
4203  if (It != ValueExprMap.end()) {
4204  const SCEV *Old = It->second;
4205 
4206  // Short-circuit the def-use traversal if the symbolic name
4207  // ceases to appear in expressions.
4208  if (Old != SymName && !hasOperand(Old, SymName))
4209  continue;
4210 
4211  // SCEVUnknown for a PHI either means that it has an unrecognized
4212  // structure, it's a PHI that's in the progress of being computed
4213  // by createNodeForPHI, or it's a single-value PHI. In the first case,
4214  // additional loop trip count information isn't going to change anything.
4215  // In the second case, createNodeForPHI will perform the necessary
4216  // updates on its own when it gets to that point. In the third, we do
4217  // want to forget the SCEVUnknown.
4218  if (!isa<PHINode>(I) ||
4219  !isa<SCEVUnknown>(Old) ||
4220  (I != PN && Old == SymName)) {
4221  eraseValueFromMap(It->first);
4222  forgetMemoizedResults(Old);
4223  }
4224  }
4225 
4226  PushDefUseChildren(I, Worklist);
4227  }
4228 }
4229 
4230 namespace {
4231 
4232 /// Takes SCEV S and Loop L. For each AddRec sub-expression, use its start
4233 /// expression in case its Loop is L. If it is not L then
4234 /// if IgnoreOtherLoops is true then use AddRec itself
4235 /// otherwise rewrite cannot be done.
4236 /// If SCEV contains non-invariant unknown SCEV rewrite cannot be done.
4237 class SCEVInitRewriter : public SCEVRewriteVisitor<SCEVInitRewriter> {
4238 public:
4239  static const SCEV *rewrite(const SCEV *S, const Loop *L, ScalarEvolution &SE,
4240  bool IgnoreOtherLoops = true) {
4241  SCEVInitRewriter Rewriter(L, SE);
4242  const SCEV *Result = Rewriter.visit(S);
4243  if (Rewriter.hasSeenLoopVariantSCEVUnknown())
4244  return SE.getCouldNotCompute();
4245  return Rewriter.hasSeenOtherLoops() && !IgnoreOtherLoops
4246  ? SE.getCouldNotCompute()
4247  : Result;
4248  }
4249 
4250  const SCEV *visitUnknown(const SCEVUnknown *Expr) {
4251  if (!SE.isLoopInvariant(Expr, L))
4252  SeenLoopVariantSCEVUnknown = true;
4253  return Expr;
4254  }
4255 
4256  const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) {
4257  // Only re-write AddRecExprs for this loop.
4258  if (Expr->getLoop() == L)
4259  return Expr->getStart();
4260  SeenOtherLoops = true;
4261  return Expr;
4262  }
4263 
4264  bool hasSeenLoopVariantSCEVUnknown() { return SeenLoopVariantSCEVUnknown; }
4265 
4266  bool hasSeenOtherLoops() { return SeenOtherLoops; }
4267 
4268 private:
4269  explicit SCEVInitRewriter(const Loop *L, ScalarEvolution &SE)
4270  : SCEVRewriteVisitor(SE), L(L) {}
4271 
4272  const Loop *L;
4273  bool SeenLoopVariantSCEVUnknown = false;
4274  bool SeenOtherLoops = false;
4275 };
4276 
4277 /// Takes SCEV S and Loop L. For each AddRec sub-expression, use its post
4278 /// increment expression in case its Loop is L. If it is not L then
4279 /// use AddRec itself.
4280 /// If SCEV contains non-invariant unknown SCEV rewrite cannot be done.
4281 class SCEVPostIncRewriter : public SCEVRewriteVisitor<SCEVPostIncRewriter> {
4282 public:
4283  static const SCEV *rewrite(const SCEV *S, const Loop *L, ScalarEvolution &SE) {
4284  SCEVPostIncRewriter Rewriter(L, SE);
4285  const SCEV *Result = Rewriter.visit(S);
4286  return Rewriter.hasSeenLoopVariantSCEVUnknown()
4287  ? SE.getCouldNotCompute()
4288  : Result;
4289  }
4290 
4291  const SCEV *visitUnknown(const SCEVUnknown *Expr) {
4292  if (!SE.isLoopInvariant(Expr, L))
4293  SeenLoopVariantSCEVUnknown = true;
4294  return Expr;
4295  }
4296 
4297  const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) {
4298  // Only re-write AddRecExprs for this loop.
4299  if (Expr->getLoop() == L)
4300  return Expr->getPostIncExpr(SE);
4301  SeenOtherLoops = true;
4302  return Expr;
4303  }
4304 
4305  bool hasSeenLoopVariantSCEVUnknown() { return SeenLoopVariantSCEVUnknown; }
4306 
4307  bool hasSeenOtherLoops() { return SeenOtherLoops; }
4308 
4309 private:
4310  explicit SCEVPostIncRewriter(const Loop *L, ScalarEvolution &SE)
4311  : SCEVRewriteVisitor(SE), L(L) {}
4312 
4313  const Loop *L;
4314  bool SeenLoopVariantSCEVUnknown = false;
4315  bool SeenOtherLoops = false;
4316 };
4317 
4318 /// This class evaluates the compare condition by matching it against the
4319 /// condition of loop latch. If there is a match we assume a true value
4320 /// for the condition while building SCEV nodes.
4321 class SCEVBackedgeConditionFolder
4322  : public SCEVRewriteVisitor<SCEVBackedgeConditionFolder> {
4323 public:
4324  static const SCEV *rewrite(const SCEV *S, const Loop *L,
4325  ScalarEvolution &SE) {
4326  bool IsPosBECond = false;
4327  Value *BECond = nullptr;
4328  if (BasicBlock *Latch = L->getLoopLatch()) {
4329  BranchInst *BI = dyn_cast<BranchInst>(Latch->getTerminator());
4330  if (BI && BI->isConditional()) {
4331  assert(BI->getSuccessor(0) != BI->getSuccessor(1) &&
4332  "Both outgoing branches should not target same header!");
4333  BECond = BI->getCondition();
4334  IsPosBECond = BI->getSuccessor(0) == L->getHeader();
4335  } else {
4336  return S;
4337  }
4338  }
4339  SCEVBackedgeConditionFolder Rewriter(L, BECond, IsPosBECond, SE);
4340  return Rewriter.visit(S);
4341  }
4342 
4343  const SCEV *visitUnknown(const SCEVUnknown *Expr) {
4344  const SCEV *Result = Expr;
4345  bool InvariantF = SE.isLoopInvariant(Expr, L);
4346 
4347  if (!InvariantF) {
4348  Instruction *I = cast<Instruction>(Expr->getValue());
4349  switch (I->getOpcode()) {
4350  case Instruction::Select: {
4351  SelectInst *SI = cast<SelectInst>(I);
4353  compareWithBackedgeCondition(SI->getCondition());
4354  if (Res.hasValue()) {
4355  bool IsOne = cast<SCEVConstant>(Res.getValue())->getValue()->isOne();
4356  Result = SE.getSCEV(IsOne ? SI->getTrueValue() : SI->getFalseValue());
4357  }
4358  break;
4359  }
4360  default: {
4361  Optional<const SCEV *> Res = compareWithBackedgeCondition(I);
4362  if (Res.hasValue())
4363  Result = Res.getValue();
4364  break;
4365  }
4366  }
4367  }
4368  return Result;
4369  }
4370 
4371 private:
4372  explicit SCEVBackedgeConditionFolder(const Loop *L, Value *BECond,
4373  bool IsPosBECond, ScalarEvolution &SE)
4374  : SCEVRewriteVisitor(SE), L(L), BackedgeCond(BECond),
4375  IsPositiveBECond(IsPosBECond) {}
4376 
4377  Optional<const SCEV *> compareWithBackedgeCondition(Value *IC);
4378 
4379  const Loop *L;
4380  /// Loop back condition.
4381  Value *BackedgeCond = nullptr;
4382  /// Set to true if loop back is on positive branch condition.
4383  bool IsPositiveBECond;
4384 };
4385 
4387 SCEVBackedgeConditionFolder::compareWithBackedgeCondition(Value *IC) {
4388 
4389  // If value matches the backedge condition for loop latch,
4390  // then return a constant evolution node based on loopback
4391  // branch taken.
4392  if (BackedgeCond == IC)
4393  return IsPositiveBECond ? SE.getOne(Type::getInt1Ty(SE.getContext()))
4394  : SE.getZero(Type::getInt1Ty(SE.getContext()));
4395  return None;
4396 }
4397 
4398 class SCEVShiftRewriter : public SCEVRewriteVisitor<SCEVShiftRewriter> {
4399 public:
4400  static const SCEV *rewrite(const SCEV *S, const Loop *L,
4401  ScalarEvolution &SE) {
4402  SCEVShiftRewriter Rewriter(L, SE);
4403  const SCEV *Result = Rewriter.visit(S);
4404  return Rewriter.isValid() ? Result : SE.getCouldNotCompute();
4405  }
4406 
4407  const SCEV *visitUnknown(const SCEVUnknown *Expr) {
4408  // Only allow AddRecExprs for this loop.
4409  if (!SE.isLoopInvariant(Expr, L))
4410  Valid = false;
4411  return Expr;
4412  }
4413 
4414  const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) {
4415  if (Expr->getLoop() == L && Expr->isAffine())
4416  return SE.getMinusSCEV(Expr, Expr->getStepRecurrence(SE));
4417  Valid = false;
4418  return Expr;
4419  }
4420 
4421  bool isValid() { return Valid; }
4422 
4423 private:
4424  explicit SCEVShiftRewriter(const Loop *L, ScalarEvolution &SE)
4425  : SCEVRewriteVisitor(SE), L(L) {}
4426 
4427  const Loop *L;
4428  bool Valid = true;
4429 };
4430 
4431 } // end anonymous namespace
4432 
4434 ScalarEvolution::proveNoWrapViaConstantRanges(const SCEVAddRecExpr *AR) {
4435  if (!AR->isAffine())
4436  return SCEV::FlagAnyWrap;
4437 
4438  using OBO = OverflowingBinaryOperator;
4439 
4441 
4442  if (!AR->hasNoSignedWrap()) {
4443  ConstantRange AddRecRange = getSignedRange(AR);
4444  ConstantRange IncRange = getSignedRange(AR->getStepRecurrence(*this));
4445 
4447  Instruction::Add, IncRange, OBO::NoSignedWrap);
4448  if (NSWRegion.contains(AddRecRange))
4449  Result = ScalarEvolution::setFlags(Result, SCEV::FlagNSW);
4450  }
4451 
4452  if (!AR->hasNoUnsignedWrap()) {
4453  ConstantRange AddRecRange = getUnsignedRange(AR);
4454  ConstantRange IncRange = getUnsignedRange(AR->getStepRecurrence(*this));
4455 
4457  Instruction::Add, IncRange, OBO::NoUnsignedWrap);
4458  if (NUWRegion.contains(AddRecRange))
4459  Result = ScalarEvolution::setFlags(Result, SCEV::FlagNUW);
4460  }
4461 
4462  return Result;
4463 }
4464 
4465 namespace {
4466 
4467 /// Represents an abstract binary operation. This may exist as a
4468 /// normal instruction or constant expression, or may have been
4469 /// derived from an expression tree.
4470 struct BinaryOp {
4471  unsigned Opcode;
4472  Value *LHS;
4473  Value *RHS;
4474  bool IsNSW = false;
4475  bool IsNUW = false;
4476 
4477  /// Op is set if this BinaryOp corresponds to a concrete LLVM instruction or
4478  /// constant expression.
4479  Operator *Op = nullptr;
4480 
4481  explicit BinaryOp(Operator *Op)
4482  : Opcode(Op->getOpcode()), LHS(Op->getOperand(0)), RHS(Op->getOperand(1)),
4483  Op(Op) {
4484  if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(Op)) {
4485  IsNSW = OBO->hasNoSignedWrap();
4486  IsNUW = OBO->hasNoUnsignedWrap();
4487  }
4488  }
4489 
4490  explicit BinaryOp(unsigned Opcode, Value *LHS, Value *RHS, bool IsNSW = false,
4491  bool IsNUW = false)
4492  : Opcode(Opcode), LHS(LHS), RHS(RHS), IsNSW(IsNSW), IsNUW(IsNUW) {}
4493 };
4494 
4495 } // end anonymous namespace
4496 
4497 /// Try to map \p V into a BinaryOp, and return \c None on failure.
4499  auto *Op = dyn_cast<Operator>(V);
4500  if (!Op)
4501  return None;
4502 
4503  // Implementation detail: all the cleverness here should happen without
4504  // creating new SCEV expressions -- our caller knowns tricks to avoid creating
4505  // SCEV expressions when possible, and we should not break that.
4506 
4507  switch (Op->getOpcode()) {
4508  case Instruction::Add:
4509  case Instruction::Sub:
4510  case Instruction::Mul:
4511  case Instruction::UDiv:
4512  case Instruction::URem:
4513  case Instruction::And:
4514  case Instruction::Or:
4515  case Instruction::AShr:
4516  case Instruction::Shl:
4517  return BinaryOp(Op);
4518 
4519  case Instruction::Xor:
4520  if (auto *RHSC = dyn_cast<ConstantInt>(Op->getOperand(1)))
4521  // If the RHS of the xor is a signmask, then this is just an add.
4522  // Instcombine turns add of signmask into xor as a strength reduction step.
4523  if (RHSC->getValue().isSignMask())
4524  return BinaryOp(Instruction::Add, Op->getOperand(0), Op->getOperand(1));
4525  return BinaryOp(Op);
4526 
4527  case Instruction::LShr:
4528  // Turn logical shift right of a constant into a unsigned divide.
4529  if (ConstantInt *SA = dyn_cast<ConstantInt>(Op->getOperand(1))) {
4530  uint32_t BitWidth = cast<IntegerType>(Op->getType())->getBitWidth();
4531 
4532  // If the shift count is not less than the bitwidth, the result of
4533  // the shift is undefined. Don't try to analyze it, because the
4534  // resolution chosen here may differ from the resolution chosen in
4535  // other parts of the compiler.
4536  if (SA->getValue().ult(BitWidth)) {
4537  Constant *X =
4538  ConstantInt::get(SA->getContext(),
4539  APInt::getOneBitSet(BitWidth, SA->getZExtValue()));
4540  return BinaryOp(Instruction::UDiv, Op->getOperand(0), X);
4541  }
4542  }
4543  return BinaryOp(Op);
4544 
4545  case Instruction::ExtractValue: {
4546  auto *EVI = cast<ExtractValueInst>(Op);
4547  if (EVI->getNumIndices() != 1 || EVI->getIndices()[0] != 0)
4548  break;
4549 
4550  auto *WO = dyn_cast<WithOverflowInst>(EVI->getAggregateOperand());
4551  if (!WO)
4552  break;
4553 
4554  Instruction::BinaryOps BinOp = WO->getBinaryOp();
4555  bool Signed = WO->isSigned();
4556  // TODO: Should add nuw/nsw flags for mul as well.
4557  if (BinOp == Instruction::Mul || !isOverflowIntrinsicNoWrap(WO, DT))
4558  return BinaryOp(BinOp, WO->getLHS(), WO->getRHS());
4559 
4560  // Now that we know that all uses of the arithmetic-result component of
4561  // CI are guarded by the overflow check, we can go ahead and pretend
4562  // that the arithmetic is non-overflowing.
4563  return BinaryOp(BinOp, WO->getLHS(), WO->getRHS(),
4564  /* IsNSW = */ Signed, /* IsNUW = */ !Signed);
4565  }
4566 
4567  default:
4568  break;
4569  }
4570 
4571  return None;
4572 }
4573 
4574 /// Helper function to createAddRecFromPHIWithCasts. We have a phi
4575 /// node whose symbolic (unknown) SCEV is \p SymbolicPHI, which is updated via
4576 /// the loop backedge by a SCEVAddExpr, possibly also with a few casts on the
4577 /// way. This function checks if \p Op, an operand of this SCEVAddExpr,
4578 /// follows one of the following patterns:
4579 /// Op == (SExt ix (Trunc iy (%SymbolicPHI) to ix) to iy)
4580 /// Op == (ZExt ix (Trunc iy (%SymbolicPHI) to ix) to iy)
4581 /// If the SCEV expression of \p Op conforms with one of the expected patterns
4582 /// we return the type of the truncation operation, and indicate whether the
4583 /// truncated type should be treated as signed/unsigned by setting
4584 /// \p Signed to true/false, respectively.
4585 static Type *isSimpleCastedPHI(const SCEV *Op, const SCEVUnknown *SymbolicPHI,
4586  bool &Signed, ScalarEvolution &SE) {
4587  // The case where Op == SymbolicPHI (that is, with no type conversions on
4588  // the way) is handled by the regular add recurrence creating logic and
4589  // would have already been triggered in createAddRecForPHI. Reaching it here
4590  // means that createAddRecFromPHI had failed for this PHI before (e.g.,
4591  // because one of the other operands of the SCEVAddExpr updating this PHI is
4592  // not invariant).
4593  //
4594  // Here we look for the case where Op = (ext(trunc(SymbolicPHI))), and in
4595  // this case predicates that allow us to prove that Op == SymbolicPHI will
4596  // be added.
4597  if (Op == SymbolicPHI)
4598  return nullptr;
4599 
4600  unsigned SourceBits = SE.getTypeSizeInBits(SymbolicPHI->getType());
4601  unsigned NewBits = SE.getTypeSizeInBits(Op->getType());
4602  if (SourceBits != NewBits)
4603  return nullptr;
4604 
4607  if (!SExt && !ZExt)
4608  return nullptr;
4609  const SCEVTruncateExpr *Trunc =
4610  SExt ? dyn_cast<SCEVTruncateExpr>(SExt->getOperand())
4611  : dyn_cast<SCEVTruncateExpr>(ZExt->getOperand());
4612  if (!Trunc)
4613  return nullptr;
4614  const SCEV *X = Trunc->getOperand();
4615  if (X != SymbolicPHI)
4616  return nullptr;
4617  Signed = SExt != nullptr;
4618  return Trunc->getType();
4619 }
4620 
4621 static const Loop *isIntegerLoopHeaderPHI(const PHINode *PN, LoopInfo &LI) {
4622  if (!PN->getType()->isIntegerTy())
4623  return nullptr;
4624  const Loop *L = LI.getLoopFor(PN->getParent());
4625  if (!L || L->getHeader() != PN->getParent())
4626  return nullptr;
4627  return L;
4628 }
4629 
4630 // Analyze \p SymbolicPHI, a SCEV expression of a phi node, and check if the
4631 // computation that updates the phi follows the following pattern:
4632 // (SExt/ZExt ix (Trunc iy (%SymbolicPHI) to ix) to iy) + InvariantAccum
4633 // which correspond to a phi->trunc->sext/zext->add->phi update chain.
4634 // If so, try to see if it can be rewritten as an AddRecExpr under some
4635 // Predicates. If successful, return them as a pair. Also cache the results
4636 // of the analysis.
4637 //
4638 // Example usage scenario:
4639 // Say the Rewriter is called for the following SCEV:
4640 // 8 * ((sext i32 (trunc i64 %X to i32) to i64) + %Step)
4641 // where:
4642 // %X = phi i64 (%Start, %BEValue)
4643 // It will visitMul->visitAdd->visitSExt->visitTrunc->visitUnknown(%X),
4644 // and call this function with %SymbolicPHI = %X.
4645 //
4646 // The analysis will find that the value coming around the backedge has
4647 // the following SCEV:
4648 // BEValue = ((sext i32 (trunc i64 %X to i32) to i64) + %Step)
4649 // Upon concluding that this matches the desired pattern, the function
4650 // will return the pair {NewAddRec, SmallPredsVec} where:
4651 // NewAddRec = {%Start,+,%Step}
4652 // SmallPredsVec = {P1, P2, P3} as follows:
4653 // P1(WrapPred): AR: {trunc(%Start),+,(trunc %Step)}<nsw> Flags: <nssw>
4654 // P2(EqualPred): %Start == (sext i32 (trunc i64 %Start to i32) to i64)
4655 // P3(EqualPred): %Step == (sext i32 (trunc i64 %Step to i32) to i64)
4656 // The returned pair means that SymbolicPHI can be rewritten into NewAddRec
4657 // under the predicates {P1,P2,P3}.
4658 // This predicated rewrite will be cached in PredicatedSCEVRewrites:
4659 // PredicatedSCEVRewrites[{%X,L}] = {NewAddRec, {P1,P2,P3)}
4660 //
4661 // TODO's:
4662 //
4663 // 1) Extend the Induction descriptor to also support inductions that involve
4664 // casts: When needed (namely, when we are called in the context of the
4665 // vectorizer induction analysis), a Set of cast instructions will be
4666 // populated by this method, and provided back to isInductionPHI. This is
4667 // needed to allow the vectorizer to properly record them to be ignored by
4668 // the cost model and to avoid vectorizing them (otherwise these casts,
4669 // which are redundant under the runtime overflow checks, will be
4670 // vectorized, which can be costly).
4671 //
4672 // 2) Support additional induction/PHISCEV patterns: We also want to support
4673 // inductions where the sext-trunc / zext-trunc operations (partly) occur
4674 // after the induction update operation (the induction increment):
4675 //
4676 // (Trunc iy (SExt/ZExt ix (%SymbolicPHI + InvariantAccum) to iy) to ix)
4677 // which correspond to a phi->add->trunc->sext/zext->phi update chain.
4678 //
4679 // (Trunc iy ((SExt/ZExt ix (%SymbolicPhi) to iy) + InvariantAccum) to ix)
4680 // which correspond to a phi->trunc->add->sext/zext->phi update chain.
4681 //
4682 // 3) Outline common code with createAddRecFromPHI to avoid duplication.
4684 ScalarEvolution::createAddRecFromPHIWithCastsImpl(const SCEVUnknown *SymbolicPHI) {
4686 
4687  // *** Part1: Analyze if we have a phi-with-cast pattern for which we can
4688  // return an AddRec expression under some predicate.
4689 
4690  auto *PN = cast<PHINode>(SymbolicPHI->getValue());
4691  const Loop *L = isIntegerLoopHeaderPHI(PN, LI);
4692  assert(L && "Expecting an integer loop header phi");
4693 
4694  // The loop may have multiple entrances or multiple exits; we can analyze
4695  // this phi as an addrec if it has a unique entry value and a unique
4696  // backedge value.
4697  Value *BEValueV = nullptr, *StartValueV = nullptr;
4698  for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
4699  Value *V = PN->getIncomingValue(i);
4700  if (L->contains(PN->getIncomingBlock(i))) {
4701  if (!BEValueV) {
4702  BEValueV = V;
4703  } else if (BEValueV != V) {
4704  BEValueV = nullptr;
4705  break;
4706  }
4707  } else if (!StartValueV) {
4708  StartValueV = V;
4709  } else if (StartValueV != V) {
4710  StartValueV = nullptr;
4711  break;
4712  }
4713  }
4714  if (!BEValueV || !StartValueV)
4715  return None;
4716 
4717  const SCEV *BEValue = getSCEV(BEValueV);
4718 
4719  // If the value coming around the backedge is an add with the symbolic
4720  // value we just inserted, possibly with casts that we can ignore under
4721  // an appropriate runtime guard, then we found a simple induction variable!
4722  const auto *Add = dyn_cast<SCEVAddExpr>(BEValue);
4723  if (!Add)
4724  return None;
4725 
4726  // If there is a single occurrence of the symbolic value, possibly
4727  // casted, replace it with a recurrence.
4728  unsigned FoundIndex = Add->getNumOperands();
4729  Type *TruncTy = nullptr;
4730  bool Signed;
4731  for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i)
4732  if ((TruncTy =
4733  isSimpleCastedPHI(Add->getOperand(i), SymbolicPHI, Signed, *this)))
4734  if (FoundIndex == e) {
4735  FoundIndex = i;
4736  break;
4737  }
4738 
4739  if (FoundIndex == Add->getNumOperands())
4740  return None;
4741 
4742  // Create an add with everything but the specified operand.
4744  for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i)
4745  if (i != FoundIndex)
4746  Ops.push_back(Add->getOperand(i));
4747  const SCEV *Accum = getAddExpr(Ops);
4748 
4749  // The runtime checks will not be valid if the step amount is
4750  // varying inside the loop.
4751  if (!isLoopInvariant(Accum, L))
4752  return None;
4753 
4754  // *** Part2: Create the predicates
4755 
4756  // Analysis was successful: we have a phi-with-cast pattern for which we
4757  // can return an AddRec expression under the following predicates:
4758  //
4759  // P1: A Wrap predicate that guarantees that Trunc(Start) + i*Trunc(Accum)
4760  // fits within the truncated type (does not overflow) for i = 0 to n-1.
4761  // P2: An Equal predicate that guarantees that
4762  // Start = (Ext ix (Trunc iy (Start) to ix) to iy)
4763  // P3: An Equal predicate that guarantees that
4764  // Accum = (Ext ix (Trunc iy (Accum) to ix) to iy)
4765  //
4766  // As we next prove, the above predicates guarantee that:
4767  // Start + i*Accum = (Ext ix (Trunc iy ( Start + i*Accum ) to ix) to iy)
4768  //
4769  //
4770  // More formally, we want to prove that:
4771  // Expr(i+1) = Start + (i+1) * Accum
4772  // = (Ext ix (Trunc iy (Expr(i)) to ix) to iy) + Accum
4773  //
4774  // Given that:
4775  // 1) Expr(0) = Start
4776  // 2) Expr(1) = Start + Accum
4777  // = (Ext ix (Trunc iy (Start) to ix) to iy) + Accum :: from P2
4778  // 3) Induction hypothesis (step i):
4779  // Expr(i) = (Ext ix (Trunc iy (Expr(i-1)) to ix) to iy) + Accum
4780  //
4781  // Proof:
4782  // Expr(i+1) =
4783  // = Start + (i+1)*Accum
4784  // = (Start + i*Accum) + Accum
4785  // = Expr(i) + Accum
4786  // = (Ext ix (Trunc iy (Expr(i-1)) to ix) to iy) + Accum + Accum
4787  // :: from step i
4788  //
4789  // = (Ext ix (Trunc iy (Start + (i-1)*Accum) to ix) to iy) + Accum + Accum
4790  //
4791  // = (Ext ix (Trunc iy (Start + (i-1)*Accum) to ix) to iy)
4792  // + (Ext ix (Trunc iy (Accum) to ix) to iy)
4793  // + Accum :: from P3
4794  //
4795  // = (Ext ix (Trunc iy ((Start + (i-1)*Accum) + Accum) to ix) to iy)
4796  // + Accum :: from P1: Ext(x)+Ext(y)=>Ext(x+y)
4797  //
4798  // = (Ext ix (Trunc iy (Start + i*Accum) to ix) to iy) + Accum
4799  // = (Ext ix (Trunc iy (Expr(i)) to ix) to iy) + Accum
4800  //
4801  // By induction, the same applies to all iterations 1<=i<n:
4802  //
4803 
4804  // Create a truncated addrec for which we will add a no overflow check (P1).
4805  const SCEV *StartVal = getSCEV(StartValueV);
4806  const SCEV *PHISCEV =
4807  getAddRecExpr(getTruncateExpr(StartVal, TruncTy),
4808  getTruncateExpr(Accum, TruncTy), L, SCEV::FlagAnyWrap);
4809 
4810  // PHISCEV can be either a SCEVConstant or a SCEVAddRecExpr.
4811  // ex: If truncated Accum is 0 and StartVal is a constant, then PHISCEV
4812  // will be constant.
4813  //
4814  // If PHISCEV is a constant, then P1 degenerates into P2 or P3, so we don't
4815  // add P1.
4816  if (const auto *AR = dyn_cast<SCEVAddRecExpr>(PHISCEV)) {
4820  const SCEVPredicate *AddRecPred = getWrapPredicate(AR, AddedFlags);
4821  Predicates.push_back(AddRecPred);
4822  }
4823 
4824  // Create the Equal Predicates P2,P3:
4825 
4826  // It is possible that the predicates P2 and/or P3 are computable at
4827  // compile time due to StartVal and/or Accum being constants.
4828  // If either one is, then we can check that now and escape if either P2
4829  // or P3 is false.
4830 
4831  // Construct the extended SCEV: (Ext ix (Trunc iy (Expr) to ix) to iy)
4832  // for each of StartVal and Accum
4833  auto getExtendedExpr = [&](const SCEV *Expr,
4834  bool CreateSignExtend) -> const SCEV * {
4835  assert(isLoopInvariant(Expr, L) && "Expr is expected to be invariant");
4836  const SCEV *TruncatedExpr = getTruncateExpr(Expr, TruncTy);
4837  const SCEV *ExtendedExpr =
4838  CreateSignExtend ? getSignExtendExpr(TruncatedExpr, Expr->getType())
4839  : getZeroExtendExpr(TruncatedExpr, Expr->getType());
4840  return ExtendedExpr;
4841  };
4842 
4843  // Given:
4844  // ExtendedExpr = (Ext ix (Trunc iy (Expr) to ix) to iy
4845  // = getExtendedExpr(Expr)
4846  // Determine whether the predicate P: Expr == ExtendedExpr
4847  // is known to be false at compile time
4848  auto PredIsKnownFalse = [&](const SCEV *Expr,
4849  const SCEV *ExtendedExpr) -> bool {
4850  return Expr != ExtendedExpr &&
4851  isKnownPredicate(ICmpInst::ICMP_NE, Expr, ExtendedExpr);
4852  };
4853 
4854  const SCEV *StartExtended = getExtendedExpr(StartVal, Signed);
4855  if (PredIsKnownFalse(StartVal, StartExtended)) {
4856  LLVM_DEBUG(dbgs() << "P2 is compile-time false\n";);
4857  return None;
4858  }
4859 
4860  // The Step is always Signed (because the overflow checks are either
4861  // NSSW or NUSW)
4862  const SCEV *AccumExtended = getExtendedExpr(Accum, /*CreateSignExtend=*/true);
4863  if (PredIsKnownFalse(Accum, AccumExtended)) {
4864  LLVM_DEBUG(dbgs() << "P3 is compile-time false\n";);
4865  return None;
4866  }
4867 
4868  auto AppendPredicate = [&](const SCEV *Expr,
4869  const SCEV *ExtendedExpr) -> void {
4870  if (Expr != ExtendedExpr &&
4871  !isKnownPredicate(ICmpInst::ICMP_EQ, Expr, ExtendedExpr)) {
4872  const SCEVPredicate *Pred = getEqualPredicate(Expr, ExtendedExpr);
4873  LLVM_DEBUG(dbgs() << "Added Predicate: " << *Pred);
4874  Predicates.push_back(Pred);
4875  }
4876  };
4877 
4878  AppendPredicate(StartVal, StartExtended);
4879  AppendPredicate(Accum, AccumExtended);
4880 
4881  // *** Part3: Predicates are ready. Now go ahead and create the new addrec in
4882  // which the casts had been folded away. The caller can rewrite SymbolicPHI
4883  // into NewAR if it will also add the runtime overflow checks specified in
4884  // Predicates.
4885  auto *NewAR = getAddRecExpr(StartVal, Accum, L, SCEV::FlagAnyWrap);
4886 
4887  std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>> PredRewrite =
4888  std::make_pair(NewAR, Predicates);
4889  // Remember the result of the analysis for this SCEV at this locayyytion.
4890  PredicatedSCEVRewrites[{SymbolicPHI, L}] = PredRewrite;
4891  return PredRewrite;
4892 }
4893 
4896  auto *PN = cast<PHINode>(SymbolicPHI->getValue());
4897  const Loop *L = isIntegerLoopHeaderPHI(PN, LI);
4898  if (!L)
4899  return None;
4900 
4901  // Check to see if we already analyzed this PHI.
4902  auto I = PredicatedSCEVRewrites.find({SymbolicPHI, L});
4903  if (I != PredicatedSCEVRewrites.end()) {
4904  std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>> Rewrite =
4905  I->second;
4906  // Analysis was done before and failed to create an AddRec:
4907  if (Rewrite.first == SymbolicPHI)
4908  return None;
4909  // Analysis was done before and succeeded to create an AddRec under
4910  // a predicate:
4911  assert(isa<SCEVAddRecExpr>(Rewrite.first) && "Expected an AddRec");
4912  assert(!(Rewrite.second).empty() && "Expected to find Predicates");
4913  return Rewrite;
4914  }
4915 
4917  Rewrite = createAddRecFromPHIWithCastsImpl(SymbolicPHI);
4918 
4919  // Record in the cache that the analysis failed
4920  if (!Rewrite) {
4922  PredicatedSCEVRewrites[{SymbolicPHI, L}] = {SymbolicPHI, Predicates};
4923  return None;
4924  }
4925 
4926  return Rewrite;
4927 }
4928 
4929 // FIXME: This utility is currently required because the Rewriter currently
4930 // does not rewrite this expression:
4931 // {0, +, (sext ix (trunc iy to ix) to iy)}
4932 // into {0, +, %step},
4933 // even when the following Equal predicate exists:
4934 // "%step == (sext ix (trunc iy to ix) to iy)".
4936  const SCEVAddRecExpr *AR1, const SCEVAddRecExpr *AR2) const {
4937  if (AR1 == AR2)
4938  return true;
4939 
4940  auto areExprsEqual = [&](const SCEV *Expr1, const SCEV *Expr2) -> bool {
4941  if (Expr1 != Expr2 && !Preds.implies(SE.getEqualPredicate(Expr1, Expr2)) &&
4942  !Preds.implies(SE.getEqualPredicate(Expr2, Expr1)))
4943  return false;
4944  return true;
4945  };
4946 
4947  if (!areExprsEqual(AR1->getStart(), AR2->getStart()) ||
4948  !areExprsEqual(AR1->getStepRecurrence(SE), AR2->getStepRecurrence(SE)))
4949  return false;
4950  return true;
4951 }
4952 
4953 /// A helper function for createAddRecFromPHI to handle simple cases.
4954 ///
4955 /// This function tries to find an AddRec expression for the simplest (yet most
4956 /// common) cases: PN = PHI(Start, OP(Self, LoopInvariant)).
4957 /// If it fails, createAddRecFromPHI will use a more general, but slow,
4958 /// technique for finding the AddRec expression.
4959 const SCEV *ScalarEvolution::createSimpleAffineAddRec(PHINode *PN,
4960  Value *BEValueV,
4961  Value *StartValueV) {
4962  const Loop *L = LI.getLoopFor(PN->getParent());
4963  assert(L && L->getHeader() == PN->getParent());
4964  assert(BEValueV && StartValueV);
4965 
4966  auto BO = MatchBinaryOp(BEValueV, DT);
4967  if (!BO)
4968  return nullptr;
4969 
4970  if (BO->Opcode != Instruction::Add)
4971  return nullptr;
4972 
4973  const SCEV *Accum = nullptr;
4974  if (BO->LHS == PN && L->isLoopInvariant(BO->RHS))
4975  Accum = getSCEV(BO->RHS);
4976  else if (BO->RHS == PN && L->isLoopInvariant(BO->LHS))
4977  Accum = getSCEV(BO->LHS);
4978 
4979  if (!Accum)
4980  return nullptr;
4981 
4983  if (BO->IsNUW)
4984  Flags = setFlags(Flags, SCEV::FlagNUW);
4985  if (BO->IsNSW)
4986  Flags = setFlags(Flags, SCEV::FlagNSW);
4987 
4988  const SCEV *StartVal = getSCEV(StartValueV);
4989  const SCEV *PHISCEV = getAddRecExpr(StartVal, Accum, L, Flags);
4990 
4991  ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV;
4992 
4993  // We can add Flags to the post-inc expression only if we
4994  // know that it is *undefined behavior* for BEValueV to
4995  // overflow.
4996  if (auto *BEInst = dyn_cast<Instruction>(BEValueV))
4997  if (isLoopInvariant(Accum, L) && isAddRecNeverPoison(BEInst, L))
4998  (void)getAddRecExpr(getAddExpr(StartVal, Accum), Accum, L, Flags);
4999 
5000  return PHISCEV;
5001 }
5002 
5003 const SCEV *ScalarEvolution::createAddRecFromPHI(PHINode *PN) {
5004  const Loop *L = LI.getLoopFor(PN->getParent());
5005  if (!L || L->getHeader() != PN->getParent())
5006  return nullptr;
5007 
5008  // The loop may have multiple entrances or multiple exits; we can analyze
5009  // this phi as an addrec if it has a unique entry value and a unique
5010  // backedge value.
5011  Value *BEValueV = nullptr, *StartValueV = nullptr;
5012  for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
5013  Value *V = PN->getIncomingValue(i);
5014  if (L->contains(PN->getIncomingBlock(i))) {
5015  if (!BEValueV) {
5016  BEValueV = V;
5017  } else if (BEValueV != V) {
5018  BEValueV = nullptr;
5019  break;
5020  }
5021  } else if (!StartValueV) {
5022  StartValueV = V;
5023  } else if (StartValueV != V) {
5024  StartValueV = nullptr;
5025  break;
5026  }
5027  }
5028  if (!BEValueV || !StartValueV)
5029  return nullptr;
5030 
5031  assert(ValueExprMap.find_as(PN) == ValueExprMap.end() &&
5032  "PHI node already processed?");
5033 
5034  // First, try to find AddRec expression without creating a fictituos symbolic
5035  // value for PN.
5036  if (auto *S = createSimpleAffineAddRec(PN, BEValueV, StartValueV))
5037  return S;
5038 
5039  // Handle PHI node value symbolically.
5040  const SCEV *SymbolicName = getUnknown(PN);
5041  ValueExprMap.insert({SCEVCallbackVH(PN, this), SymbolicName});
5042 
5043  // Using this symbolic name for the PHI, analyze the value coming around
5044  // the back-edge.
5045  const SCEV *BEValue = getSCEV(BEValueV);
5046 
5047  // NOTE: If BEValue is loop invariant, we know that the PHI node just
5048  // has a special value for the first iteration of the loop.
5049 
5050  // If the value coming around the backedge is an add with the symbolic
5051  // value we just inserted, then we found a simple induction variable!
5052  if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(BEValue)) {
5053  // If there is a single occurrence of the symbolic value, replace it
5054  // with a recurrence.
5055  unsigned FoundIndex = Add->getNumOperands();
5056  for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i)
5057  if (Add->getOperand(i) == SymbolicName)
5058  if (FoundIndex == e) {
5059  FoundIndex = i;
5060  break;
5061  }
5062 
5063  if (FoundIndex != Add->getNumOperands()) {
5064  // Create an add with everything but the specified operand.
5066  for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i)
5067  if (i != FoundIndex)
5069  L, *this));
5070  const SCEV *Accum = getAddExpr(Ops);
5071 
5072  // This is not a valid addrec if the step amount is varying each
5073  // loop iteration, but is not itself an addrec in this loop.
5074  if (isLoopInvariant(Accum, L) ||
5075  (isa<SCEVAddRecExpr>(Accum) &&
5076  cast<SCEVAddRecExpr>(Accum)->getLoop() == L)) {
5078 
5079  if (auto BO = MatchBinaryOp(BEValueV, DT)) {
5080  if (BO->Opcode == Instruction::Add && BO->LHS == PN) {
5081  if (BO->IsNUW)
5082  Flags = setFlags(Flags, SCEV::FlagNUW);
5083  if (BO->IsNSW)
5084  Flags = setFlags(Flags, SCEV::FlagNSW);
5085  }
5086  } else if (GEPOperator *GEP = dyn_cast<GEPOperator>(BEValueV)) {
5087  // If the increment is an inbounds GEP, then we know the address
5088  // space cannot be wrapped around. We cannot make any guarantee
5089  // about signed or unsigned overflow because pointers are
5090  // unsigned but we may have a negative index from the base
5091  // pointer. We can guarantee that no unsigned wrap occurs if the
5092  // indices form a positive value.
5093  if (GEP->isInBounds() && GEP->getOperand(0) == PN) {
5094  Flags = setFlags(Flags, SCEV::FlagNW);
5095 
5096  const SCEV *Ptr = getSCEV(GEP->getPointerOperand());
5097  if (isKnownPositive(getMinusSCEV(getSCEV(GEP), Ptr)))
5098  Flags = setFlags(Flags, SCEV::FlagNUW);
5099  }
5100 
5101  // We cannot transfer nuw and nsw flags from subtraction
5102  // operations -- sub nuw X, Y is not the same as add nuw X, -Y
5103  // for instance.
5104  }
5105 
5106  const SCEV *StartVal = getSCEV(StartValueV);
5107  const SCEV *PHISCEV = getAddRecExpr(StartVal, Accum, L, Flags);
5108 
5109  // Okay, for the entire analysis of this edge we assumed the PHI
5110  // to be symbolic. We now need to go back and purge all of the
5111  // entries for the scalars that use the symbolic expression.
5112  forgetSymbolicName(PN, SymbolicName);
5113  ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV;
5114 
5115  // We can add Flags to the post-inc expression only if we
5116  // know that it is *undefined behavior* for BEValueV to
5117  // overflow.
5118  if (auto *BEInst = dyn_cast<Instruction>(BEValueV))
5119  if (isLoopInvariant(Accum, L) && isAddRecNeverPoison(BEInst, L))
5120  (void)getAddRecExpr(getAddExpr(StartVal, Accum), Accum, L, Flags);
5121 
5122  return PHISCEV;
5123  }
5124  }
5125  } else {
5126  // Otherwise, this could be a loop like this:
5127  // i = 0; for (j = 1; ..; ++j) { .... i = j; }
5128  // In this case, j = {1,+,1} and BEValue is j.
5129  // Because the other in-value of i (0) fits the evolution of BEValue
5130  // i really is an addrec evolution.
5131  //
5132  // We can generalize this saying that i is the shifted value of BEValue
5133  // by one iteration:
5134  // PHI(f(0), f({1,+,1})) --> f({0,+,1})
5135  const SCEV *Shifted = SCEVShiftRewriter::rewrite(BEValue, L, *this);
5136  const SCEV *Start = SCEVInitRewriter::rewrite(Shifted, L, *this, false);
5137  if (Shifted != getCouldNotCompute() &&
5138  Start != getCouldNotCompute()) {
5139  const SCEV *StartVal = getSCEV(StartValueV);
5140  if (Start == StartVal) {
5141  // Okay, for the entire analysis of this edge we assumed the PHI
5142  // to be symbolic. We now need to go back and purge all of the
5143  // entries for the scalars that use the symbolic expression.
5144  forgetSymbolicName(PN, SymbolicName);
5145  ValueExprMap[SCEVCallbackVH(PN, this)] = Shifted;
5146  return Shifted;
5147  }
5148  }
5149  }
5150 
5151  // Remove the temporary PHI node SCEV that has been inserted while intending
5152  // to create an AddRecExpr for this PHI node. We can not keep this temporary
5153  // as it will prevent later (possibly simpler) SCEV expressions to be added
5154  // to the ValueExprMap.
5155  eraseValueFromMap(PN);
5156 
5157  return nullptr;
5158 }
5159 
5160 // Checks if the SCEV S is available at BB. S is considered available at BB
5161 // if S can be materialized at BB without introducing a fault.
5162 static bool IsAvailableOnEntry(const Loop *L, DominatorTree &DT, const SCEV *S,
5163  BasicBlock *BB) {
5164  struct CheckAvailable {
5165  bool TraversalDone = false;
5166  bool Available = true;
5167 
5168  const Loop *L = nullptr; // The loop BB is in (can be nullptr)
5169  BasicBlock *BB = nullptr;
5170  DominatorTree &DT;
5171 
5172  CheckAvailable(const Loop *L, BasicBlock *BB, DominatorTree &DT)
5173  : L(L), BB(BB), DT(DT) {}
5174 
5175  bool setUnavailable() {
5176  TraversalDone = true;
5177  Available = false;
5178  return false;
5179  }
5180 
5181  bool follow(const SCEV *S) {
5182  switch (S->getSCEVType()) {
5183  case scConstant: case scTruncate: case scZeroExtend: case scSignExtend:
5184  case scAddExpr: case scMulExpr: case scUMaxExpr: case scSMaxExpr:
5185  case scUMinExpr:
5186  case scSMinExpr:
5187  // These expressions are available if their operand(s) is/are.
5188  return true;
5189 
5190  case scAddRecExpr: {
5191  // We allow add recurrences that are on the loop BB is in, or some
5192  // outer loop. This guarantees availability because the value of the
5193  // add recurrence at BB is simply the "current" value of the induction
5194  // variable. We can relax this in the future; for instance an add
5195  // recurrence on a sibling dominating loop is also available at BB.
5196  const auto *ARLoop = cast<SCEVAddRecExpr>(S)->getLoop();
5197  if (L && (ARLoop == L || ARLoop->contains(L)))
5198  return true;
5199 
5200  return setUnavailable();
5201  }
5202 
5203  case scUnknown: {
5204  // For SCEVUnknown, we check for simple dominance.
5205  const auto *SU = cast<SCEVUnknown>(S);
5206  Value *V = SU->getValue();
5207 
5208  if (isa<Argument>(V))
5209  return false;
5210 
5211  if (isa<Instruction>(V) && DT.dominates(cast<Instruction>(V), BB))
5212  return false;
5213 
5214  return setUnavailable();
5215  }
5216 
5217  case scUDivExpr:
5218  case scCouldNotCompute:
5219  // We do not try to smart about these at all.
5220  return setUnavailable();
5221  }
5222  llvm_unreachable("switch should be fully covered!");
5223  }
5224 
5225  bool isDone() { return TraversalDone; }
5226  };
5227 
5228  CheckAvailable CA(L, BB, DT);
5230 
5231  ST.visitAll(S);
5232  return CA.Available;
5233 }
5234 
5235 // Try to match a control flow sequence that branches out at BI and merges back
5236 // at Merge into a "C ? LHS : RHS" select pattern. Return true on a successful
5237 // match.
5239  Value *&C, Value *&LHS, Value *&RHS) {
5240  C = BI->getCondition();
5241 
5242  BasicBlockEdge LeftEdge(BI->getParent(), BI->getSuccessor(0));
5243  BasicBlockEdge RightEdge(BI->getParent(), BI->getSuccessor(1));
5244 
5245  if (!LeftEdge.isSingleEdge())
5246  return false;
5247 
5248  assert(RightEdge.isSingleEdge() && "Follows from LeftEdge.isSingleEdge()");
5249 
5250  Use &LeftUse = Merge->getOperandUse(0);
5251  Use &RightUse = Merge->getOperandUse(1);
5252 
5253  if (DT.dominates(LeftEdge, LeftUse) && DT.dominates(RightEdge, RightUse)) {
5254  LHS = LeftUse;
5255  RHS = RightUse;
5256  return true;
5257  }
5258 
5259  if (DT.dominates(LeftEdge, RightUse) && DT.dominates(RightEdge, LeftUse)) {
5260  LHS = RightUse;
5261  RHS = LeftUse;
5262  return true;
5263  }
5264 
5265  return false;
5266 }
5267 
5268 const SCEV *ScalarEvolution::createNodeFromSelectLikePHI(PHINode *PN) {
5269  auto IsReachable =
5270  [&](BasicBlock *BB) { return DT.isReachableFromEntry(BB); };
5271  if (PN->getNumIncomingValues() == 2 && all_of(PN->blocks(), IsReachable)) {
5272  const Loop *L = LI.getLoopFor(PN->getParent());
5273 
5274  // We don't want to break LCSSA, even in a SCEV expression tree.
5275  for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
5276  if (LI.getLoopFor(PN->getIncomingBlock(i)) != L)
5277  return nullptr;
5278 
5279  // Try to match
5280  //
5281  // br %cond, label %left, label %right
5282  // left:
5283  // br label %merge
5284  // right:
5285  // br label %merge
5286  // merge:
5287  // V = phi [ %x, %left ], [ %y, %right ]
5288  //
5289  // as "select %cond, %x, %y"
5290 
5291  BasicBlock *IDom = DT[PN->getParent()]->getIDom()->getBlock();
5292  assert(IDom && "At least the entry block should dominate PN");
5293 
5294  auto *BI = dyn_cast<BranchInst>(IDom->getTerminator());
5295  Value *Cond = nullptr, *LHS = nullptr, *RHS = nullptr;
5296 
5297  if (BI && BI->isConditional() &&
5298  BrPHIToSelect(DT, BI, PN, Cond, LHS, RHS) &&
5299  IsAvailableOnEntry(L, DT, getSCEV(LHS), PN->getParent()) &&
5300  IsAvailableOnEntry(L, DT, getSCEV(RHS), PN->getParent()))
5301  return createNodeForSelectOrPHI(PN, Cond, LHS, RHS);
5302  }
5303 
5304  return nullptr;
5305 }
5306 
5307 const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) {
5308  if (const SCEV *S = createAddRecFromPHI(PN))
5309  return S;
5310 
5311  if (const SCEV *S = createNodeFromSelectLikePHI(PN))
5312  return S;
5313 
5314  // If the PHI has a single incoming value, follow that value, unless the
5315  // PHI's incoming blocks are in a different loop, in which case doing so
5316  // risks breaking LCSSA form. Instcombine would normally zap these, but
5317  // it doesn't have DominatorTree information, so it may miss cases.
5318  if (Value *V = SimplifyInstruction(PN, {getDataLayout(), &TLI, &DT, &AC}))
5319  if (LI.replacementPreservesLCSSAForm(PN, V))
5320  return getSCEV(V);
5321 
5322  // If it's not a loop phi, we can't handle it yet.
5323  return getUnknown(PN);
5324 }
5325 
5326 const SCEV *ScalarEvolution::createNodeForSelectOrPHI(Instruction *I,
5327  Value *Cond,
5328  Value *TrueVal,
5329  Value *FalseVal) {
5330  // Handle "constant" branch or select. This can occur for instance when a
5331  // loop pass transforms an inner loop and moves on to process the outer loop.
5332  if (auto *CI = dyn_cast<ConstantInt>(Cond))
5333  return getSCEV(CI->isOne() ? TrueVal : FalseVal);
5334 
5335  // Try to match some simple smax or umax patterns.
5336  auto *ICI = dyn_cast<ICmpInst>(Cond);
5337  if (!ICI)
5338  return getUnknown(I);
5339 
5340  Value *LHS = ICI->getOperand(0);
5341  Value *RHS = ICI->getOperand(1);
5342 
5343  switch (ICI->getPredicate()) {
5344  case ICmpInst::ICMP_SLT:
5345  case ICmpInst::ICMP_SLE:
5346  std::swap(LHS, RHS);
5348  case ICmpInst::ICMP_SGT:
5349  case ICmpInst::ICMP_SGE:
5350  // a >s b ? a+x : b+x -> smax(a, b)+x
5351  // a >s b ? b+x : a+x -> smin(a, b)+x
5352  if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType())) {
5353  const SCEV *LS = getNoopOrSignExtend(getSCEV(LHS), I->getType());
5354  const SCEV *RS = getNoopOrSignExtend(getSCEV(RHS), I->getType());
5355  const SCEV *LA = getSCEV(TrueVal);
5356  const SCEV *RA = getSCEV(FalseVal);
5357  const SCEV *LDiff = getMinusSCEV(LA, LS);
5358  const SCEV *RDiff = getMinusSCEV(RA, RS);
5359  if (LDiff == RDiff)
5360  return getAddExpr(getSMaxExpr(LS, RS), LDiff);
5361  LDiff = getMinusSCEV(LA, RS);
5362  RDiff = getMinusSCEV(RA, LS);
5363  if (LDiff == RDiff)
5364  return getAddExpr(getSMinExpr(LS, RS), LDiff);
5365  }
5366  break;
5367  case ICmpInst::ICMP_ULT:
5368  case ICmpInst::ICMP_ULE:
5369  std::swap(LHS, RHS);
5371  case ICmpInst::ICMP_UGT:
5372  case ICmpInst::ICMP_UGE:
5373  // a >u b ? a+x : b+x -> umax(a, b)+x
5374  // a >u b ? b+x : a+x -> umin(a, b)+x
5375  if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType())) {
5376  const SCEV *LS = getNoopOrZeroExtend(getSCEV(LHS), I->getType());
5377  const SCEV *RS = getNoopOrZeroExtend(getSCEV(RHS), I->getType());
5378  const SCEV *LA = getSCEV(TrueVal);
5379  const SCEV *RA = getSCEV(FalseVal);
5380  const SCEV *LDiff = getMinusSCEV(LA, LS);
5381  const SCEV *RDiff = getMinusSCEV(RA, RS);
5382  if (LDiff == RDiff)
5383  return getAddExpr(getUMaxExpr(LS, RS), LDiff);
5384  LDiff = getMinusSCEV(LA, RS);
5385  RDiff = getMinusSCEV(RA, LS);
5386  if (LDiff == RDiff)
5387  return getAddExpr(getUMinExpr(LS, RS), LDiff);
5388  }
5389  break;
5390  case ICmpInst::ICMP_NE:
5391  // n != 0 ? n+x : 1+x -> umax(n, 1)+x
5392  if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType()) &&
5393  isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->isZero()) {
5394  const SCEV *One = getOne(I->getType());
5395  const SCEV *LS = getNoopOrZeroExtend(getSCEV(LHS), I->getType());
5396  const SCEV *LA = getSCEV(TrueVal);
5397  const SCEV *RA = getSCEV(FalseVal);
5398  const SCEV *LDiff = getMinusSCEV(LA, LS);
5399  const SCEV *RDiff = getMinusSCEV(RA, One);
5400  if (LDiff == RDiff)
5401  return getAddExpr(getUMaxExpr(One, LS), LDiff);
5402  }
5403  break;
5404  case ICmpInst::ICMP_EQ:
5405  // n == 0 ? 1+x : n+x -> umax(n, 1)+x
5406  if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->