85#include "llvm/Config/llvm-config.h"
136using namespace PatternMatch;
137using namespace SCEVPatternMatch;
139#define DEBUG_TYPE "scalar-evolution"
142 "Number of loop exits with predictable exit counts");
144 "Number of loop exits without predictable exit counts");
146 "Number of loops with trip counts computed by force");
148#ifdef EXPENSIVE_CHECKS
156 cl::desc(
"Maximum number of iterations SCEV will "
157 "symbolically execute a constant "
163 cl::desc(
"Verify ScalarEvolution's backedge taken counts (slow)"));
166 cl::desc(
"Enable stricter verification with -verify-scev is passed"));
170 cl::desc(
"Verify IR correctness when making sensitive SCEV queries (slow)"),
175 cl::desc(
"Threshold for inlining multiplication operands into a SCEV"),
180 cl::desc(
"Threshold for inlining addition operands into a SCEV"),
184 "scalar-evolution-max-scev-compare-depth",
cl::Hidden,
185 cl::desc(
"Maximum depth of recursive SCEV complexity comparisons"),
189 "scalar-evolution-max-scev-operations-implication-depth",
cl::Hidden,
190 cl::desc(
"Maximum depth of recursive SCEV operations implication analysis"),
194 "scalar-evolution-max-value-compare-depth",
cl::Hidden,
195 cl::desc(
"Maximum depth of recursive value complexity comparisons"),
200 cl::desc(
"Maximum depth of recursive arithmetics"),
204 "scalar-evolution-max-constant-evolving-depth",
cl::Hidden,
209 cl::desc(
"Maximum depth of recursive SExt/ZExt/Trunc"),
214 cl::desc(
"Max coefficients in AddRec during evolving"),
219 cl::desc(
"Size of the expression which is considered huge"),
224 cl::desc(
"Threshold for switching to iteratively computing SCEV ranges"),
228 "scalar-evolution-max-loop-guard-collection-depth",
cl::Hidden,
229 cl::desc(
"Maximum depth for recursive loop guard collection"),
cl::init(1));
234 cl::desc(
"When printing analysis, include information on every instruction"));
237 "scalar-evolution-use-expensive-range-sharpening",
cl::Hidden,
239 cl::desc(
"Use more powerful methods of sharpening expression ranges. May "
240 "be costly in terms of compile time"));
243 "scalar-evolution-max-scc-analysis-depth",
cl::Hidden,
244 cl::desc(
"Maximum amount of nodes to process while searching SCEVUnknown "
245 "Phi strongly connected components"),
250 cl::desc(
"Handle <= and >= in finite loops"),
254 "scalar-evolution-use-context-for-no-wrap-flag-strenghening",
cl::Hidden,
255 cl::desc(
"Infer nuw/nsw flags using context where suitable"),
266#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
276 cast<SCEVConstant>(
this)->getValue()->printAsOperand(
OS,
false);
284 OS <<
"(ptrtoint " << *
Op->getType() <<
" " << *
Op <<
" to "
285 << *PtrToInt->
getType() <<
")";
291 OS <<
"(trunc " << *
Op->getType() <<
" " << *
Op <<
" to "
298 OS <<
"(zext " << *
Op->getType() <<
" " << *
Op <<
" to "
305 OS <<
"(sext " << *
Op->getType() <<
" " << *
Op <<
" to "
334 const char *OpStr =
nullptr;
347 OpStr =
" umin_seq ";
353 ListSeparator LS(OpStr);
377 cast<SCEVUnknown>(
this)->getValue()->printAsOperand(
OS,
false);
380 OS <<
"***COULDNOTCOMPUTE***";
389 return cast<SCEVConstant>(
this)->getType();
391 return cast<SCEVVScale>(
this)->getType();
396 return cast<SCEVCastExpr>(
this)->getType();
398 return cast<SCEVAddRecExpr>(
this)->getType();
400 return cast<SCEVMulExpr>(
this)->getType();
405 return cast<SCEVMinMaxExpr>(
this)->getType();
407 return cast<SCEVSequentialMinMaxExpr>(
this)->getType();
409 return cast<SCEVAddExpr>(
this)->getType();
411 return cast<SCEVUDivExpr>(
this)->getType();
413 return cast<SCEVUnknown>(
this)->getType();
430 return cast<SCEVCastExpr>(
this)->operands();
439 return cast<SCEVNAryExpr>(
this)->operands();
441 return cast<SCEVUDivExpr>(
this)->operands();
456 if (!
Mul)
return false;
460 if (!SC)
return false;
463 return SC->getAPInt().isNegative();
478 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
return S;
480 UniqueSCEVs.InsertNode(S, IP);
499 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
502 UniqueSCEVs.InsertNode(S, IP);
521 "Must be a non-bit-width-changing pointer-to-integer cast!");
533 "Cannot truncate non-integer value!");
540 "Cannot zero extend non-integer value!");
547 "Cannot sign extend non-integer value!");
550void SCEVUnknown::deleted() {
552 SE->forgetMemoizedResults(
this);
555 SE->UniqueSCEVs.RemoveNode(
this);
561void SCEVUnknown::allUsesReplacedWith(
Value *New) {
563 SE->forgetMemoizedResults(
this);
566 SE->UniqueSCEVs.RemoveNode(
this);
588 if (LIsPointer != RIsPointer)
589 return (
int)LIsPointer - (int)RIsPointer;
594 return (
int)LID - (int)RID;
597 if (
const auto *LA = dyn_cast<Argument>(LV)) {
598 const auto *
RA = cast<Argument>(RV);
599 unsigned LArgNo = LA->getArgNo(), RArgNo =
RA->getArgNo();
600 return (
int)LArgNo - (int)RArgNo;
603 if (
const auto *LGV = dyn_cast<GlobalValue>(LV)) {
604 const auto *RGV = cast<GlobalValue>(RV);
606 const auto IsGVNameSemantic = [&](
const GlobalValue *GV) {
607 auto LT = GV->getLinkage();
614 if (IsGVNameSemantic(LGV) && IsGVNameSemantic(RGV))
615 return LGV->getName().compare(RGV->getName());
620 if (
const auto *LInst = dyn_cast<Instruction>(LV)) {
621 const auto *RInst = cast<Instruction>(RV);
626 if (LParent != RParent) {
629 if (LDepth != RDepth)
630 return (
int)LDepth - (int)RDepth;
634 unsigned LNumOps = LInst->getNumOperands(),
635 RNumOps = RInst->getNumOperands();
636 if (LNumOps != RNumOps)
637 return (
int)LNumOps - (int)RNumOps;
639 for (
unsigned Idx :
seq(LNumOps)) {
655static std::optional<int>
666 return (
int)LType - (int)RType;
696 unsigned LBitWidth = LA.
getBitWidth(), RBitWidth =
RA.getBitWidth();
697 if (LBitWidth != RBitWidth)
698 return (
int)LBitWidth - (int)RBitWidth;
699 return LA.
ult(
RA) ? -1 : 1;
703 const auto *LTy = cast<IntegerType>(cast<SCEVVScale>(
LHS)->
getType());
704 const auto *RTy = cast<IntegerType>(cast<SCEVVScale>(
RHS)->
getType());
705 return LTy->getBitWidth() - RTy->getBitWidth();
716 if (LLoop != RLoop) {
718 assert(LHead != RHead &&
"Two loops share the same header?");
722 "No dominance between recurrences used by one SCEV?");
745 unsigned LNumOps = LOps.
size(), RNumOps = ROps.
size();
746 if (LNumOps != RNumOps)
747 return (
int)LNumOps - (int)RNumOps;
749 for (
unsigned i = 0; i != LNumOps; ++i) {
776 if (Ops.
size() < 2)
return;
783 return Complexity && *Complexity < 0;
785 if (Ops.
size() == 2) {
789 if (IsLessComplex(
RHS,
LHS))
796 return IsLessComplex(
LHS,
RHS);
803 for (
unsigned i = 0, e = Ops.
size(); i != e-2; ++i) {
804 const SCEV *S = Ops[i];
809 for (
unsigned j = i+1; j != e && Ops[j]->getSCEVType() == Complexity; ++j) {
814 if (i == e-2)
return;
836template <
typename FoldT,
typename IsIdentityT,
typename IsAbsorberT>
840 IsIdentityT IsIdentity, IsAbsorberT IsAbsorber) {
844 if (
const auto *
C = dyn_cast<SCEVConstant>(
Op)) {
848 Folded = cast<SCEVConstant>(
857 assert(Folded &&
"Must have folded value");
861 if (Folded && IsAbsorber(Folded->
getAPInt()))
865 if (Folded && !IsIdentity(Folded->
getAPInt()))
868 return Ops.
size() == 1 ? Ops[0] :
nullptr;
943 APInt OddFactorial(W, 1);
945 for (
unsigned i = 3; i <= K; ++i) {
948 OddFactorial *= (i >> TwoFactors);
952 unsigned CalculationBits = W +
T;
966 for (
unsigned i = 1; i != K; ++i) {
999 for (
unsigned i = 1, e =
Operands.size(); i != e; ++i) {
1004 if (isa<SCEVCouldNotCompute>(Coeff))
1019 "getLosslessPtrToIntExpr() should self-recurse at most once.");
1023 if (!
Op->getType()->isPointerTy())
1034 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
1053 if (
auto *U = dyn_cast<SCEVUnknown>(
Op)) {
1058 if (isa<ConstantPointerNull>(U->getValue()))
1064 SCEV *S =
new (SCEVAllocator)
1066 UniqueSCEVs.InsertNode(S, IP);
1071 assert(
Depth == 0 &&
"getLosslessPtrToIntExpr() should not self-recurse for "
1072 "non-SCEVUnknown's.");
1084 class SCEVPtrToIntSinkingRewriter
1092 SCEVPtrToIntSinkingRewriter
Rewriter(SE);
1102 return Base::visit(S);
1107 bool Changed =
false;
1117 bool Changed =
false;
1127 "Should only reach pointer-typed SCEVUnknown's.");
1133 const SCEV *IntOp = SCEVPtrToIntSinkingRewriter::rewrite(
Op, *
this);
1135 "We must have succeeded in sinking the cast, "
1136 "and ending up with an integer-typed expression!");
1144 if (isa<SCEVCouldNotCompute>(IntOp))
1153 "This is not a truncating conversion!");
1155 "This is not a conversion to a SCEVable type!");
1156 assert(!
Op->getType()->isPointerTy() &&
"Can't truncate pointer!");
1164 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
return S;
1186 UniqueSCEVs.InsertNode(S, IP);
1195 if (isa<SCEVAddExpr>(
Op) || isa<SCEVMulExpr>(
Op)) {
1196 auto *CommOp = cast<SCEVCommutativeExpr>(
Op);
1198 unsigned numTruncs = 0;
1199 for (
unsigned i = 0, e = CommOp->getNumOperands(); i != e && numTruncs < 2;
1202 if (!isa<SCEVIntegralCastExpr>(CommOp->getOperand(i)) &&
1203 isa<SCEVTruncateExpr>(S))
1207 if (numTruncs < 2) {
1208 if (isa<SCEVAddExpr>(
Op))
1210 if (isa<SCEVMulExpr>(
Op))
1217 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
1224 for (
const SCEV *
Op : AddRec->operands())
1239 UniqueSCEVs.InsertNode(S, IP);
1279struct ExtendOpTraitsBase {
1285template <
typename ExtendOp>
struct ExtendOpTraits {
1301 static const GetExtendExprTy GetExtendExpr;
1303 static const SCEV *getOverflowLimitForStep(
const SCEV *Step,
1310const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits<
1317 static const GetExtendExprTy GetExtendExpr;
1319 static const SCEV *getOverflowLimitForStep(
const SCEV *Step,
1326const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits<
1338template <
typename ExtendOpTy>
1341 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType;
1342 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr;
1349 const SCEVAddExpr *SA = dyn_cast<SCEVAddExpr>(Start);
1358 for (
auto It = DiffOps.
begin(); It != DiffOps.
end(); ++It)
1371 auto PreStartFlags =
1389 const SCEV *OperandExtendedStart =
1391 (SE->*GetExtendExpr)(Step, WideTy,
Depth));
1392 if ((SE->*GetExtendExpr)(Start, WideTy,
Depth) == OperandExtendedStart) {
1404 const SCEV *OverflowLimit =
1405 ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep(Step, &Pred, SE);
1407 if (OverflowLimit &&
1415template <
typename ExtendOpTy>
1419 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr;
1421 const SCEV *PreStart = getPreStartForExtend<ExtendOpTy>(AR, Ty, SE,
Depth);
1427 (SE->*GetExtendExpr)(PreStart, Ty,
Depth));
1462template <
typename ExtendOpTy>
1463bool ScalarEvolution::proveNoWrapByVaryingStart(
const SCEV *Start,
1466 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType;
1472 const SCEVConstant *StartC = dyn_cast<SCEVConstant>(Start);
1478 for (
unsigned Delta : {-2, -1, 1, 2}) {
1483 ID.AddPointer(PreStart);
1484 ID.AddPointer(Step);
1492 if (PreAR && PreAR->getNoWrapFlags(WrapType)) {
1495 const SCEV *Limit = ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep(
1496 DeltaS, &Pred,
this);
1514 const unsigned BitWidth =
C.getBitWidth();
1532 const APInt &ConstantStart,
1551 auto &UserIDs = FoldCacheUser[
I.first->second];
1552 assert(
count(UserIDs,
ID) == 1 &&
"unexpected duplicates in UserIDs");
1553 for (
unsigned I = 0;
I != UserIDs.size(); ++
I)
1554 if (UserIDs[
I] ==
ID) {
1559 I.first->second = S;
1561 FoldCacheUser[S].push_back(
ID);
1567 "This is not an extending conversion!");
1569 "This is not a conversion to a SCEVable type!");
1570 assert(!
Op->getType()->isPointerTy() &&
"Can't extend pointer!");
1574 auto Iter = FoldCache.find(
ID);
1575 if (Iter != FoldCache.end())
1576 return Iter->second;
1579 if (!isa<SCEVZeroExtendExpr>(S))
1587 "This is not an extending conversion!");
1589 assert(!
Op->getType()->isPointerTy() &&
"Can't extend pointer!");
1606 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
return S;
1610 UniqueSCEVs.InsertNode(S, IP);
1619 const SCEV *
X = ST->getOperand();
1633 if (AR->isAffine()) {
1634 const SCEV *Start = AR->getStart();
1635 const SCEV *Step = AR->getStepRecurrence(*
this);
1637 const Loop *L = AR->getLoop();
1641 if (AR->hasNoUnsignedWrap()) {
1643 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty,
this,
Depth + 1);
1657 if (!isa<SCEVCouldNotCompute>(MaxBECount)) {
1662 const SCEV *CastedMaxBECount =
1666 if (MaxBECount == RecastedMaxBECount) {
1676 const SCEV *WideMaxBECount =
1678 const SCEV *OperandExtendedAdd =
1684 if (ZAdd == OperandExtendedAdd) {
1688 Start = getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty,
this,
1695 OperandExtendedAdd =
1701 if (ZAdd == OperandExtendedAdd) {
1706 Start = getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty,
this,
1721 if (!isa<SCEVCouldNotCompute>(MaxBECount) || HasGuards ||
1724 auto NewFlags = proveNoUnsignedWrapViaInduction(AR);
1726 if (AR->hasNoUnsignedWrap()) {
1732 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty,
this,
Depth + 1);
1749 Start = getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty,
this,
1760 if (
const auto *SC = dyn_cast<SCEVConstant>(Start)) {
1761 const APInt &
C = SC->getAPInt();
1765 const SCEV *SResidual =
1774 if (proveNoWrapByVaryingStart<SCEVZeroExtendExpr>(Start, Step, L)) {
1777 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty,
this,
Depth + 1);
1793 if (
auto *Div = dyn_cast<SCEVUDivExpr>(
Op))
1797 if (
auto *SA = dyn_cast<SCEVAddExpr>(
Op)) {
1799 if (SA->hasNoUnsignedWrap()) {
1803 for (
const auto *
Op : SA->operands())
1816 if (
const auto *SC = dyn_cast<SCEVConstant>(SA->getOperand(0))) {
1820 const SCEV *SResidual =
1830 if (
auto *SM = dyn_cast<SCEVMulExpr>(
Op)) {
1832 if (SM->hasNoUnsignedWrap()) {
1836 for (
const auto *
Op : SM->operands())
1853 if (SM->getNumOperands() == 2)
1854 if (
auto *MulLHS = dyn_cast<SCEVConstant>(SM->getOperand(0)))
1855 if (MulLHS->getAPInt().isPowerOf2())
1856 if (
auto *TruncRHS = dyn_cast<SCEVTruncateExpr>(SM->getOperand(1))) {
1858 MulLHS->getAPInt().logBase2();
1870 if (isa<SCEVUMinExpr>(
Op) || isa<SCEVUMaxExpr>(
Op)) {
1871 auto *
MinMax = cast<SCEVMinMaxExpr>(
Op);
1873 for (
auto *Operand :
MinMax->operands())
1875 if (isa<SCEVUMinExpr>(
MinMax))
1881 if (
auto *
MinMax = dyn_cast<SCEVSequentialMinMaxExpr>(
Op)) {
1882 assert(isa<SCEVSequentialUMinExpr>(
MinMax) &&
"Not supported!");
1884 for (
auto *Operand :
MinMax->operands())
1891 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
return S;
1894 UniqueSCEVs.InsertNode(S, IP);
1902 "This is not an extending conversion!");
1904 "This is not a conversion to a SCEVable type!");
1905 assert(!
Op->getType()->isPointerTy() &&
"Can't extend pointer!");
1909 auto Iter = FoldCache.find(
ID);
1910 if (Iter != FoldCache.end())
1911 return Iter->second;
1914 if (!isa<SCEVSignExtendExpr>(S))
1922 "This is not an extending conversion!");
1924 assert(!
Op->getType()->isPointerTy() &&
"Can't extend pointer!");
1946 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
return S;
1951 UniqueSCEVs.InsertNode(S, IP);
1960 const SCEV *
X = ST->getOperand();
1969 if (
auto *SA = dyn_cast<SCEVAddExpr>(
Op)) {
1971 if (SA->hasNoSignedWrap()) {
1975 for (
const auto *
Op : SA->operands())
1989 if (
const auto *SC = dyn_cast<SCEVConstant>(SA->getOperand(0))) {
1993 const SCEV *SResidual =
2007 if (AR->isAffine()) {
2008 const SCEV *Start = AR->getStart();
2009 const SCEV *Step = AR->getStepRecurrence(*
this);
2011 const Loop *L = AR->getLoop();
2015 if (AR->hasNoSignedWrap()) {
2017 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty,
this,
Depth + 1);
2031 if (!isa<SCEVCouldNotCompute>(MaxBECount)) {
2037 const SCEV *CastedMaxBECount =
2041 if (MaxBECount == RecastedMaxBECount) {
2051 const SCEV *WideMaxBECount =
2053 const SCEV *OperandExtendedAdd =
2059 if (SAdd == OperandExtendedAdd) {
2063 Start = getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty,
this,
2070 OperandExtendedAdd =
2076 if (SAdd == OperandExtendedAdd) {
2088 Start = getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty,
this,
2096 auto NewFlags = proveNoSignedWrapViaInduction(AR);
2098 if (AR->hasNoSignedWrap()) {
2104 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty,
this,
Depth + 1);
2112 if (
const auto *SC = dyn_cast<SCEVConstant>(Start)) {
2113 const APInt &
C = SC->getAPInt();
2117 const SCEV *SResidual =
2126 if (proveNoWrapByVaryingStart<SCEVSignExtendExpr>(Start, Step, L)) {
2129 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty,
this,
Depth + 1);
2142 if (isa<SCEVSMinExpr>(
Op) || isa<SCEVSMaxExpr>(
Op)) {
2143 auto *
MinMax = cast<SCEVMinMaxExpr>(
Op);
2145 for (
auto *Operand :
MinMax->operands())
2147 if (isa<SCEVSMinExpr>(
MinMax))
2154 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
return S;
2157 UniqueSCEVs.InsertNode(S, IP);
2183 "This is not an extending conversion!");
2185 "This is not a conversion to a SCEVable type!");
2190 if (SC->getAPInt().isNegative())
2195 const SCEV *NewOp =
T->getOperand();
2203 if (!isa<SCEVZeroExtendExpr>(ZExt))
2208 if (!isa<SCEVSignExtendExpr>(SExt))
2214 for (
const SCEV *
Op : AR->operands())
2220 if (isa<SCEVSMaxExpr>(
Op))
2253 APInt &AccumulatedConstant,
2256 bool Interesting =
false;
2260 while (
const SCEVConstant *
C = dyn_cast<SCEVConstant>(Ops[i])) {
2263 if (Scale != 1 || AccumulatedConstant != 0 ||
C->getValue()->isZero())
2265 AccumulatedConstant += Scale *
C->getAPInt();
2270 for (; i != Ops.
size(); ++i) {
2272 if (
Mul && isa<SCEVConstant>(
Mul->getOperand(0))) {
2274 Scale * cast<SCEVConstant>(
Mul->getOperand(0))->getAPInt();
2275 if (
Mul->getNumOperands() == 2 && isa<SCEVAddExpr>(
Mul->getOperand(1))) {
2280 Add->operands(), NewScale, SE);
2286 auto Pair = M.insert({Key, NewScale});
2290 Pair.first->second += NewScale;
2298 std::pair<DenseMap<const SCEV *, APInt>::iterator,
bool> Pair =
2299 M.insert({Ops[i], Scale});
2303 Pair.first->second += Scale;
2322 case Instruction::Add:
2325 case Instruction::Sub:
2328 case Instruction::Mul:
2338 auto *NarrowTy = cast<IntegerType>(
LHS->
getType());
2342 const SCEV *
A = (this->*Extension)(
2344 const SCEV *LHSB = (this->*Extension)(
LHS, WideTy, 0);
2345 const SCEV *RHSB = (this->*Extension)(
RHS, WideTy, 0);
2353 if (BinOp == Instruction::Mul)
2355 auto *RHSC = dyn_cast<SCEVConstant>(
RHS);
2359 APInt C = RHSC->getAPInt();
2360 unsigned NumBits =
C.getBitWidth();
2361 bool IsSub = (BinOp == Instruction::Sub);
2362 bool IsNegativeConst = (
Signed &&
C.isNegative());
2364 bool OverflowDown = IsSub ^ IsNegativeConst;
2366 if (IsNegativeConst) {
2379 APInt Limit = Min + Magnitude;
2385 APInt Limit = Max - Magnitude;
2390std::optional<SCEV::NoWrapFlags>
2395 return std::nullopt;
2404 bool Deduced =
false;
2406 if (OBO->
getOpcode() != Instruction::Add &&
2409 return std::nullopt;
2418 false,
LHS,
RHS, CtxI)) {
2432 return std::nullopt;
2442 using namespace std::placeholders;
2449 assert(CanAnalyze &&
"don't call from other places!");
2456 auto IsKnownNonNegative = [&](
const SCEV *S) {
2466 if (SignOrUnsignWrap != SignOrUnsignMask &&
2468 isa<SCEVConstant>(Ops[0])) {
2473 return Instruction::Add;
2475 return Instruction::Mul;
2481 const APInt &
C = cast<SCEVConstant>(Ops[0])->getAPInt();
2486 Opcode,
C, OBO::NoSignedWrap);
2494 Opcode,
C, OBO::NoUnsignedWrap);
2504 Ops[0]->isZero() && IsKnownNonNegative(Ops[1]))
2510 if (
auto *UDiv = dyn_cast<SCEVUDivExpr>(Ops[0]))
2511 if (UDiv->getOperand(1) == Ops[1])
2513 if (
auto *UDiv = dyn_cast<SCEVUDivExpr>(Ops[1]))
2514 if (UDiv->getOperand(1) == Ops[0])
2530 "only nuw or nsw allowed");
2532 if (Ops.
size() == 1)
return Ops[0];
2535 for (
unsigned i = 1, e = Ops.
size(); i != e; ++i)
2537 "SCEVAddExpr operand types don't match!");
2540 assert(NumPtrs <= 1 &&
"add has at most one pointer operand");
2545 [](
const APInt &C1,
const APInt &C2) {
return C1 + C2; },
2546 [](
const APInt &
C) {
return C.isZero(); },
2547 [](
const APInt &
C) {
return false; });
2551 unsigned Idx = isa<SCEVConstant>(Ops[0]) ? 1 : 0;
2560 return getOrCreateAddExpr(Ops, ComputeFlags(Ops));
2565 if (
Add->getNoWrapFlags(OrigFlags) != OrigFlags)
2566 Add->setNoWrapFlags(ComputeFlags(Ops));
2573 Type *Ty = Ops[0]->getType();
2574 bool FoundMatch =
false;
2575 for (
unsigned i = 0, e = Ops.
size(); i != e-1; ++i)
2576 if (Ops[i] == Ops[i+1]) {
2579 while (i+Count != e && Ops[i+Count] == Ops[i])
2584 if (Ops.
size() == Count)
2588 --i; e -= Count - 1;
2598 auto FindTruncSrcType = [&]() ->
Type * {
2603 if (
auto *
T = dyn_cast<SCEVTruncateExpr>(Ops[
Idx]))
2604 return T->getOperand()->getType();
2605 if (
const auto *
Mul = dyn_cast<SCEVMulExpr>(Ops[
Idx])) {
2606 const auto *LastOp =
Mul->getOperand(
Mul->getNumOperands() - 1);
2607 if (
const auto *
T = dyn_cast<SCEVTruncateExpr>(LastOp))
2608 return T->getOperand()->getType();
2612 if (
auto *SrcType = FindTruncSrcType()) {
2617 for (
const SCEV *
Op : Ops) {
2619 if (
T->getOperand()->getType() != SrcType) {
2626 }
else if (
const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(
Op)) {
2628 for (
unsigned j = 0, f = M->getNumOperands(); j != f && Ok; ++j) {
2630 dyn_cast<SCEVTruncateExpr>(M->getOperand(j))) {
2631 if (
T->getOperand()->getType() != SrcType) {
2636 }
else if (
const auto *
C = dyn_cast<SCEVConstant>(M->getOperand(j))) {
2654 if (isa<SCEVConstant>(Fold) || isa<SCEVUnknown>(Fold))
2659 if (Ops.
size() == 2) {
2663 const SCEV *
A = Ops[0];
2664 const SCEV *
B = Ops[1];
2665 auto *AddExpr = dyn_cast<SCEVAddExpr>(
B);
2666 auto *
C = dyn_cast<SCEVConstant>(
A);
2667 if (AddExpr &&
C && isa<SCEVConstant>(AddExpr->getOperand(0))) {
2668 auto C1 = cast<SCEVConstant>(AddExpr->getOperand(0))->getAPInt();
2669 auto C2 =
C->getAPInt();
2672 APInt ConstAdd = C1 + C2;
2673 auto AddFlags = AddExpr->getNoWrapFlags();
2699 if (Ops.
size() == 2) {
2701 if (
Mul &&
Mul->getNumOperands() == 2 &&
2702 Mul->getOperand(0)->isAllOnesValue()) {
2705 if (matchURem(
Mul->getOperand(1),
X,
Y) &&
X == Ops[1]) {
2717 bool DeletedAdd =
false;
2731 CommonFlags =
maskFlags(CommonFlags,
Add->getNoWrapFlags());
2747 if (
Idx < Ops.
size() && isa<SCEVMulExpr>(Ops[
Idx])) {
2754 struct APIntCompare {
2763 std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare> MulOpLists;
2764 for (
const SCEV *NewOp : NewOps)
2765 MulOpLists[M.find(NewOp)->second].push_back(NewOp);
2768 if (AccumulatedConstant != 0)
2770 for (
auto &MulOp : MulOpLists) {
2771 if (MulOp.first == 1) {
2773 }
else if (MulOp.first != 0) {
2782 if (Ops.
size() == 1)
2791 for (;
Idx < Ops.
size() && isa<SCEVMulExpr>(Ops[
Idx]); ++
Idx) {
2793 for (
unsigned MulOp = 0, e =
Mul->getNumOperands(); MulOp != e; ++MulOp) {
2794 const SCEV *MulOpSCEV =
Mul->getOperand(MulOp);
2795 if (isa<SCEVConstant>(MulOpSCEV))
2797 for (
unsigned AddOp = 0, e = Ops.
size(); AddOp != e; ++AddOp)
2798 if (MulOpSCEV == Ops[AddOp]) {
2800 const SCEV *InnerMul =
Mul->getOperand(MulOp == 0);
2801 if (
Mul->getNumOperands() != 2) {
2805 Mul->operands().take_front(MulOp));
2813 if (Ops.
size() == 2)
return OuterMul;
2826 for (
unsigned OtherMulIdx =
Idx+1;
2827 OtherMulIdx < Ops.
size() && isa<SCEVMulExpr>(Ops[OtherMulIdx]);
2829 const SCEVMulExpr *OtherMul = cast<SCEVMulExpr>(Ops[OtherMulIdx]);
2833 OMulOp != e; ++OMulOp)
2834 if (OtherMul->
getOperand(OMulOp) == MulOpSCEV) {
2836 const SCEV *InnerMul1 =
Mul->getOperand(MulOp == 0);
2837 if (
Mul->getNumOperands() != 2) {
2839 Mul->operands().take_front(MulOp));
2846 OtherMul->
operands().take_front(OMulOp));
2851 const SCEV *InnerMulSum =
2855 if (Ops.
size() == 2)
return OuterMul;
2872 for (;
Idx < Ops.
size() && isa<SCEVAddRecExpr>(Ops[
Idx]); ++
Idx) {
2878 for (
unsigned i = 0, e = Ops.
size(); i != e; ++i)
2886 if (!LIOps.
empty()) {
2911 auto *DefI = getDefiningScopeBound(LIOps);
2913 if (!isGuaranteedToTransferExecutionTo(DefI, ReachI))
2925 if (Ops.
size() == 1)
return NewRec;
2928 for (
unsigned i = 0;; ++i)
2929 if (Ops[i] == AddRec) {
2939 for (
unsigned OtherIdx =
Idx+1;
2940 OtherIdx < Ops.
size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
2945 cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()->getHeader(),
2947 "AddRecExprs are not sorted in reverse dominance order?");
2948 if (AddRecLoop == cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()) {
2951 for (; OtherIdx != Ops.
size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
2953 const auto *OtherAddRec = cast<SCEVAddRecExpr>(Ops[OtherIdx]);
2954 if (OtherAddRec->getLoop() == AddRecLoop) {
2955 for (
unsigned i = 0, e = OtherAddRec->getNumOperands();
2957 if (i >= AddRecOps.
size()) {
2958 append_range(AddRecOps, OtherAddRec->operands().drop_front(i));
2962 AddRecOps[i], OtherAddRec->getOperand(i)};
2965 Ops.
erase(Ops.
begin() + OtherIdx); --OtherIdx;
2980 return getOrCreateAddExpr(Ops, ComputeFlags(Ops));
2988 for (
const SCEV *
Op : Ops)
2992 static_cast<SCEVAddExpr *
>(UniqueSCEVs.FindNodeOrInsertPos(
ID, IP));
2995 std::uninitialized_copy(Ops.begin(), Ops.end(), O);
2996 S =
new (SCEVAllocator)
2998 UniqueSCEVs.InsertNode(S, IP);
3010 for (
const SCEV *
Op : Ops)
3018 std::uninitialized_copy(Ops.begin(), Ops.end(), O);
3019 S =
new (SCEVAllocator)
3021 UniqueSCEVs.InsertNode(S, IP);
3022 LoopUsers[
L].push_back(S);
3034 for (
const SCEV *
Op : Ops)
3038 static_cast<SCEVMulExpr *
>(UniqueSCEVs.FindNodeOrInsertPos(
ID, IP));
3041 std::uninitialized_copy(Ops.begin(), Ops.end(), O);
3042 S =
new (SCEVAllocator)
SCEVMulExpr(
ID.Intern(SCEVAllocator),
3044 UniqueSCEVs.InsertNode(S, IP);
3053 if (j > 1 && k / j != i) Overflow =
true;
3069 if (n == 0 || n == k)
return 1;
3070 if (k > n)
return 0;
3076 for (
uint64_t i = 1; i <= k; ++i) {
3077 r =
umul_ov(r, n-(i-1), Overflow);
3086 struct FindConstantInAddMulChain {
3087 bool FoundConstant =
false;
3089 bool follow(
const SCEV *S) {
3090 FoundConstant |= isa<SCEVConstant>(S);
3091 return isa<SCEVAddExpr>(S) || isa<SCEVMulExpr>(S);
3094 bool isDone()
const {
3095 return FoundConstant;
3099 FindConstantInAddMulChain
F;
3101 ST.visitAll(StartExpr);
3102 return F.FoundConstant;
3110 "only nuw or nsw allowed");
3112 if (Ops.
size() == 1)
return Ops[0];
3114 Type *ETy = Ops[0]->getType();
3116 for (
unsigned i = 1, e = Ops.
size(); i != e; ++i)
3118 "SCEVMulExpr operand types don't match!");
3123 [](
const APInt &C1,
const APInt &C2) {
return C1 * C2; },
3124 [](
const APInt &
C) {
return C.isOne(); },
3125 [](
const APInt &
C) {
return C.isZero(); });
3136 return getOrCreateMulExpr(Ops, ComputeFlags(Ops));
3141 if (
Mul->getNoWrapFlags(OrigFlags) != OrigFlags)
3142 Mul->setNoWrapFlags(ComputeFlags(Ops));
3146 if (
const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
3147 if (Ops.
size() == 2) {
3164 if (Ops[0]->isAllOnesValue()) {
3169 bool AnyFolded =
false;
3170 for (
const SCEV *AddOp :
Add->operands()) {
3173 if (!isa<SCEVMulExpr>(
Mul)) AnyFolded =
true;
3178 }
else if (
const auto *AddRec = dyn_cast<SCEVAddRecExpr>(Ops[1])) {
3197 AddRec->getNoWrapFlags(FlagsMask));
3210 bool DeletedMul =
false;
3235 for (;
Idx < Ops.
size() && isa<SCEVAddRecExpr>(Ops[
Idx]); ++
Idx) {
3240 for (
unsigned i = 0, e = Ops.
size(); i != e; ++i)
3248 if (!LIOps.
empty()) {
3261 for (
unsigned i = 0, e = AddRec->
getNumOperands(); i != e; ++i) {
3277 if (Ops.
size() == 1)
return NewRec;
3280 for (
unsigned i = 0;; ++i)
3281 if (Ops[i] == AddRec) {
3302 bool OpsModified =
false;
3303 for (
unsigned OtherIdx =
Idx+1;
3304 OtherIdx != Ops.
size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
3307 dyn_cast<SCEVAddRecExpr>(Ops[OtherIdx]);
3317 bool Overflow =
false;
3323 SmallVector <const SCEV *, 7> SumOps;
3324 for (
int y = x, ye = 2*x+1; y != ye && !Overflow; ++y) {
3328 z < ze && !Overflow; ++z) {
3331 if (LargerThan64Bits)
3332 Coeff =
umul_ov(Coeff1, Coeff2, Overflow);
3334 Coeff = Coeff1*Coeff2;
3349 if (Ops.
size() == 2)
return NewAddRec;
3350 Ops[
Idx] = NewAddRec;
3351 Ops.
erase(Ops.
begin() + OtherIdx); --OtherIdx;
3353 AddRec = dyn_cast<SCEVAddRecExpr>(NewAddRec);
3367 return getOrCreateMulExpr(Ops, ComputeFlags(Ops));
3375 "SCEVURemExpr operand types don't match!");
3380 if (RHSC->getValue()->isOne())
3384 if (RHSC->getAPInt().isPowerOf2()) {
3403 "SCEVUDivExpr operand can't be pointer!");
3405 "SCEVUDivExpr operand types don't match!");
3412 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
3420 if (RHSC->getValue()->isOne())
3425 if (!RHSC->getValue()->isZero()) {
3430 unsigned LZ = RHSC->getAPInt().countl_zero();
3434 if (!RHSC->getAPInt().isPowerOf2())
3440 dyn_cast<SCEVConstant>(AR->getStepRecurrence(*
this))) {
3442 const APInt &StepInt = Step->getAPInt();
3443 const APInt &DivInt = RHSC->getAPInt();
3444 if (!StepInt.
urem(DivInt) &&
3450 for (
const SCEV *
Op : AR->operands())
3457 const SCEVConstant *StartC = dyn_cast<SCEVConstant>(AR->getStart());
3458 if (StartC && !DivInt.
urem(StepInt) &&
3464 const APInt &StartRem = StartInt.
urem(StepInt);
3465 if (StartRem != 0) {
3466 const SCEV *NewLHS =
3469 if (
LHS != NewLHS) {
3479 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
3488 for (
const SCEV *
Op : M->operands())
3492 for (
unsigned i = 0, e = M->getNumOperands(); i != e; ++i) {
3493 const SCEV *
Op = M->getOperand(i);
3495 if (!isa<SCEVUDivExpr>(Div) &&
getMulExpr(Div, RHSC) ==
Op) {
3505 if (
auto *DivisorConstant =
3506 dyn_cast<SCEVConstant>(OtherDiv->getRHS())) {
3507 bool Overflow =
false;
3509 DivisorConstant->getAPInt().
umul_ov(RHSC->getAPInt(), Overflow);
3520 for (
const SCEV *
Op :
A->operands())
3524 for (
unsigned i = 0, e =
A->getNumOperands(); i != e; ++i) {
3526 if (isa<SCEVUDivExpr>(
Op) ||
3531 if (
Operands.size() ==
A->getNumOperands())
3538 return getConstant(LHSC->getAPInt().udiv(RHSC->getAPInt()));
3543 if (
const auto *AE = dyn_cast<SCEVAddExpr>(
LHS);
3544 AE && AE->getNumOperands() == 2) {
3545 if (
const auto *VC = dyn_cast<SCEVConstant>(AE->getOperand(0))) {
3546 const APInt &NegC = VC->getAPInt();
3548 const auto *MME = dyn_cast<SCEVSMaxExpr>(AE->getOperand(1));
3549 if (MME && MME->getNumOperands() == 2 &&
3550 isa<SCEVConstant>(MME->getOperand(0)) &&
3551 cast<SCEVConstant>(MME->getOperand(0))->getAPInt() == -NegC &&
3552 MME->getOperand(1) ==
RHS)
3561 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
return S;
3564 UniqueSCEVs.InsertNode(S, IP);
3594 if (!
Mul || !
Mul->hasNoUnsignedWrap())
3600 if (
const auto *LHSCst = dyn_cast<SCEVConstant>(
Mul->getOperand(0))) {
3601 if (LHSCst == RHSCst) {
3609 APInt Factor =
gcd(LHSCst, RHSCst);
3612 cast<SCEVConstant>(
getConstant(LHSCst->getAPInt().udiv(Factor)));
3614 cast<SCEVConstant>(
getConstant(RHSCst->getAPInt().udiv(Factor)));
3620 Mul = dyn_cast<SCEVMulExpr>(
LHS);
3627 for (
int i = 0, e =
Mul->getNumOperands(); i != e; ++i) {
3628 if (
Mul->getOperand(i) ==
RHS) {
3646 if (
const SCEVAddRecExpr *StepChrec = dyn_cast<SCEVAddRecExpr>(Step))
3647 if (StepChrec->getLoop() == L) {
3666 "SCEVAddRecExpr operand types don't match!");
3667 assert(!
Op->getType()->isPointerTy() &&
"Step must be integer");
3671 "SCEVAddRecExpr operand is not available at loop entry!");
3689 const Loop *NestedLoop = NestedAR->getLoop();
3690 if (L->contains(NestedLoop)
3695 Operands[0] = NestedAR->getStart();
3699 bool AllInvariant =
all_of(
3711 AllInvariant =
all_of(NestedOperands, [&](
const SCEV *
Op) {
3722 return getAddRecExpr(NestedOperands, NestedLoop, InnerFlags);
3732 return getOrCreateAddRecExpr(
Operands, L, Flags);
3749 auto *GEPI = dyn_cast<Instruction>(
GEP);
3750 if (!GEPI || !isSCEVExprNeverPoison(GEPI))
3761 bool FirstIter =
true;
3763 for (
const SCEV *IndexExpr : IndexExprs) {
3765 if (
StructType *STy = dyn_cast<StructType>(CurTy)) {
3767 ConstantInt *Index = cast<SCEVConstant>(IndexExpr)->getValue();
3768 unsigned FieldNo = Index->getZExtValue();
3770 Offsets.push_back(FieldOffset);
3773 CurTy = STy->getTypeAtIndex(Index);
3777 assert(isa<PointerType>(CurTy) &&
3778 "The first index of a GEP indexes a pointer");
3779 CurTy =
GEP->getSourceElementType();
3790 const SCEV *LocalOffset =
getMulExpr(IndexExpr, ElementSize, OffsetWrap);
3791 Offsets.push_back(LocalOffset);
3796 if (Offsets.empty())
3809 "GEP should not change type mid-flight.");
3813SCEV *ScalarEvolution::findExistingSCEVInCache(
SCEVTypes SCEVType,
3816 ID.AddInteger(SCEVType);
3817 for (
const SCEV *
Op : Ops)
3820 return UniqueSCEVs.FindNodeOrInsertPos(
ID, IP);
3830 assert(SCEVMinMaxExpr::isMinMaxType(Kind) &&
"Not a SCEVMinMaxExpr!");
3831 assert(!Ops.
empty() &&
"Cannot get empty (u|s)(min|max)!");
3832 if (Ops.
size() == 1)
return Ops[0];
3835 for (
unsigned i = 1, e = Ops.
size(); i != e; ++i) {
3837 "Operand types don't match!");
3840 "min/max should be consistently pointerish");
3866 return IsSigned ?
C.isMinSignedValue() :
C.isMinValue();
3868 return IsSigned ?
C.isMaxSignedValue() :
C.isMaxValue();
3873 return IsSigned ?
C.isMaxSignedValue() :
C.isMaxValue();
3875 return IsSigned ?
C.isMinSignedValue() :
C.isMinValue();
3881 if (
const SCEV *S = findExistingSCEVInCache(Kind, Ops)) {
3887 while (
Idx < Ops.
size() && Ops[
Idx]->getSCEVType() < Kind)
3893 bool DeletedAny =
false;
3894 while (Ops[
Idx]->getSCEVType() == Kind) {
3914 for (
unsigned i = 0, e = Ops.
size() - 1; i != e; ++i) {
3915 if (Ops[i] == Ops[i + 1] ||
3916 isKnownViaNonRecursiveReasoning(FirstPred, Ops[i], Ops[i + 1])) {
3922 }
else if (isKnownViaNonRecursiveReasoning(SecondPred, Ops[i],
3931 if (Ops.
size() == 1)
return Ops[0];
3933 assert(!Ops.
empty() &&
"Reduced smax down to nothing!");
3938 ID.AddInteger(Kind);
3939 for (
const SCEV *
Op : Ops)
3942 const SCEV *ExistingSCEV = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP);
3944 return ExistingSCEV;
3946 std::uninitialized_copy(Ops.
begin(), Ops.
end(), O);
3947 SCEV *S =
new (SCEVAllocator)
3950 UniqueSCEVs.InsertNode(S, IP);
3957class SCEVSequentialMinMaxDeduplicatingVisitor final
3958 :
public SCEVVisitor<SCEVSequentialMinMaxDeduplicatingVisitor,
3959 std::optional<const SCEV *>> {
3960 using RetVal = std::optional<const SCEV *>;
3968 bool canRecurseInto(
SCEVTypes Kind)
const {
3971 return RootKind == Kind || NonSequentialRootKind == Kind;
3974 RetVal visitAnyMinMaxExpr(
const SCEV *S) {
3975 assert((isa<SCEVMinMaxExpr>(S) || isa<SCEVSequentialMinMaxExpr>(S)) &&
3976 "Only for min/max expressions.");
3979 if (!canRecurseInto(Kind))
3982 auto *NAry = cast<SCEVNAryExpr>(S);
3984 bool Changed =
visit(Kind, NAry->operands(), NewOps);
3989 return std::nullopt;
3991 return isa<SCEVSequentialMinMaxExpr>(S)
3998 if (!SeenOps.
insert(S).second)
3999 return std::nullopt;
4000 return Base::visit(S);
4006 : SE(SE), RootKind(RootKind),
4007 NonSequentialRootKind(
4013 bool Changed =
false;
4017 for (
const SCEV *
Op : OrigOps) {
4026 NewOps = std::move(Ops);
4032 RetVal visitVScale(
const SCEVVScale *VScale) {
return VScale; }
4042 RetVal visitAddExpr(
const SCEVAddExpr *Expr) {
return Expr; }
4044 RetVal visitMulExpr(
const SCEVMulExpr *Expr) {
return Expr; }
4046 RetVal visitUDivExpr(
const SCEVUDivExpr *Expr) {
return Expr; }
4048 RetVal visitAddRecExpr(
const SCEVAddRecExpr *Expr) {
return Expr; }
4051 return visitAnyMinMaxExpr(Expr);
4055 return visitAnyMinMaxExpr(Expr);
4059 return visitAnyMinMaxExpr(Expr);
4063 return visitAnyMinMaxExpr(Expr);
4067 return visitAnyMinMaxExpr(Expr);
4070 RetVal visitUnknown(
const SCEVUnknown *Expr) {
return Expr; }
4114struct SCEVPoisonCollector {
4115 bool LookThroughMaybePoisonBlocking;
4117 SCEVPoisonCollector(
bool LookThroughMaybePoisonBlocking)
4118 : LookThroughMaybePoisonBlocking(LookThroughMaybePoisonBlocking) {}
4120 bool follow(
const SCEV *S) {
4121 if (!LookThroughMaybePoisonBlocking &&
4125 if (
auto *SU = dyn_cast<SCEVUnknown>(S)) {
4131 bool isDone()
const {
return false; }
4141 SCEVPoisonCollector PC1(
true);
4146 if (PC1.MaybePoison.empty())
4152 SCEVPoisonCollector PC2(
false);
4162 SCEVPoisonCollector PC(
false);
4185 while (!Worklist.
empty()) {
4187 if (!Visited.
insert(V).second)
4191 if (Visited.
size() > 16)
4196 if (PoisonVals.
contains(V) || ::isGuaranteedNotToBePoison(V))
4199 auto *
I = dyn_cast<Instruction>(V);
4206 if (
auto *PDI = dyn_cast<PossiblyDisjointInst>(
I))
4207 if (PDI->isDisjoint())
4213 if (
auto *
II = dyn_cast<IntrinsicInst>(
I);
4214 II &&
II->getIntrinsicID() == Intrinsic::vscale)
4221 if (
I->hasPoisonGeneratingAnnotations())
4233 assert(SCEVSequentialMinMaxExpr::isSequentialMinMaxType(Kind) &&
4234 "Not a SCEVSequentialMinMaxExpr!");
4235 assert(!Ops.
empty() &&
"Cannot get empty (u|s)(min|max)!");
4236 if (Ops.
size() == 1)
4240 for (
unsigned i = 1, e = Ops.
size(); i != e; ++i) {
4242 "Operand types don't match!");
4245 "min/max should be consistently pointerish");
4253 if (
const SCEV *S = findExistingSCEVInCache(Kind, Ops))
4260 SCEVSequentialMinMaxDeduplicatingVisitor Deduplicator(*
this, Kind);
4261 bool Changed = Deduplicator.visit(Kind, Ops, Ops);
4270 bool DeletedAny =
false;
4272 if (Ops[
Idx]->getSCEVType() != Kind) {
4276 const auto *SMME = cast<SCEVSequentialMinMaxExpr>(Ops[
Idx]);
4279 SMME->operands().end());
4287 const SCEV *SaturationPoint;
4298 for (
unsigned i = 1, e = Ops.
size(); i != e; ++i) {
4299 if (!isGuaranteedNotToCauseUB(Ops[i]))
4316 if (isKnownViaNonRecursiveReasoning(Pred, Ops[i - 1], Ops[i])) {
4325 ID.AddInteger(Kind);
4326 for (
const SCEV *
Op : Ops)
4329 const SCEV *ExistingSCEV = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP);
4331 return ExistingSCEV;
4334 std::uninitialized_copy(Ops.
begin(), Ops.
end(), O);
4335 SCEV *S =
new (SCEVAllocator)
4338 UniqueSCEVs.InsertNode(S, IP);
4386 if (
Size.isScalable())
4407 "Cannot get offset for structure containing scalable vector types");
4421 if (
SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP)) {
4422 assert(cast<SCEVUnknown>(S)->getValue() == V &&
4423 "Stale SCEVUnknown in uniquing map!");
4428 FirstUnknown = cast<SCEVUnknown>(S);
4429 UniqueSCEVs.InsertNode(S, IP);
4477 bool PreciseA, PreciseB;
4478 auto *ScopeA = getDefiningScopeBound({
A}, PreciseA);
4479 auto *ScopeB = getDefiningScopeBound({
B}, PreciseB);
4480 if (!PreciseA || !PreciseB)
4483 return (ScopeA == ScopeB) || DT.
dominates(ScopeA, ScopeB) ||
4488 return CouldNotCompute.get();
4491bool ScalarEvolution::checkValidity(
const SCEV *S)
const {
4493 auto *SU = dyn_cast<SCEVUnknown>(S);
4494 return SU && SU->getValue() ==
nullptr;
4497 return !ContainsNulls;
4502 if (
I != HasRecMap.
end())
4507 HasRecMap.
insert({S, FoundAddRec});
4515 if (SI == ExprValueMap.
end())
4517 return SI->second.getArrayRef();
4523void ScalarEvolution::eraseValueFromMap(
Value *V) {
4525 if (
I != ValueExprMap.
end()) {
4526 auto EVIt = ExprValueMap.
find(
I->second);
4527 bool Removed = EVIt->second.remove(V);
4529 assert(Removed &&
"Value not in ExprValueMap?");
4534void ScalarEvolution::insertValueToMap(
Value *V,
const SCEV *S) {
4538 auto It = ValueExprMap.
find_as(V);
4539 if (It == ValueExprMap.
end()) {
4541 ExprValueMap[S].
insert(V);
4552 return createSCEVIter(V);
4559 if (
I != ValueExprMap.
end()) {
4560 const SCEV *S =
I->second;
4561 assert(checkValidity(S) &&
4562 "existing SCEV has not been properly invalidated");
4571 if (
const SCEVConstant *VC = dyn_cast<SCEVConstant>(V))
4575 Type *Ty = V->getType();
4583 if (!
Add ||
Add->getNumOperands() != 2 ||
4584 !
Add->getOperand(0)->isAllOnesValue())
4587 const SCEVMulExpr *AddRHS = dyn_cast<SCEVMulExpr>(
Add->getOperand(1));
4597 assert(!V->getType()->isPointerTy() &&
"Can't negate pointer");
4599 if (
const SCEVConstant *VC = dyn_cast<SCEVConstant>(V))
4610 return (
const SCEV *)
nullptr;
4616 if (
const SCEV *Replaced = MatchMinMaxNegation(MME))
4620 Type *Ty = V->getType();
4626 assert(
P->getType()->isPointerTy());
4628 if (
auto *AddRec = dyn_cast<SCEVAddRecExpr>(
P)) {
4636 if (
auto *
Add = dyn_cast<SCEVAddExpr>(
P)) {
4639 const SCEV **PtrOp =
nullptr;
4640 for (
const SCEV *&AddOp : Ops) {
4641 if (AddOp->getType()->isPointerTy()) {
4642 assert(!PtrOp &&
"Cannot have multiple pointer ops");
4676 const bool RHSIsNotMinSigned =
4707 Type *SrcTy = V->getType();
4709 "Cannot truncate or zero extend with non-integer arguments!");
4719 Type *SrcTy = V->getType();
4721 "Cannot truncate or zero extend with non-integer arguments!");
4731 Type *SrcTy = V->getType();
4733 "Cannot noop or zero extend with non-integer arguments!");
4735 "getNoopOrZeroExtend cannot truncate!");
4743 Type *SrcTy = V->getType();
4745 "Cannot noop or sign extend with non-integer arguments!");
4747 "getNoopOrSignExtend cannot truncate!");
4755 Type *SrcTy = V->getType();
4757 "Cannot noop or any extend with non-integer arguments!");
4759 "getNoopOrAnyExtend cannot truncate!");
4767 Type *SrcTy = V->getType();
4769 "Cannot truncate or noop with non-integer arguments!");
4771 "getTruncateOrNoop cannot extend!");
4779 const SCEV *PromotedLHS =
LHS;
4780 const SCEV *PromotedRHS =
RHS;
4800 assert(!Ops.
empty() &&
"At least one operand must be!");
4802 if (Ops.
size() == 1)
4806 Type *MaxType =
nullptr;
4807 for (
const auto *S : Ops)
4812 assert(MaxType &&
"Failed to find maximum type!");
4816 for (
const auto *S : Ops)
4825 if (!V->getType()->isPointerTy())
4829 if (
auto *AddRec = dyn_cast<SCEVAddRecExpr>(V)) {
4830 V = AddRec->getStart();
4831 }
else if (
auto *
Add = dyn_cast<SCEVAddExpr>(V)) {
4832 const SCEV *PtrOp =
nullptr;
4833 for (
const SCEV *AddOp :
Add->operands()) {
4834 if (AddOp->getType()->isPointerTy()) {
4835 assert(!PtrOp &&
"Cannot have multiple pointer ops");
4839 assert(PtrOp &&
"Must have pointer op");
4851 for (
User *U :
I->users()) {
4852 auto *UserInsn = cast<Instruction>(U);
4853 if (Visited.
insert(UserInsn).second)
4868 bool IgnoreOtherLoops =
true) {
4871 if (
Rewriter.hasSeenLoopVariantSCEVUnknown())
4873 return Rewriter.hasSeenOtherLoops() && !IgnoreOtherLoops
4880 SeenLoopVariantSCEVUnknown =
true;
4888 SeenOtherLoops =
true;
4892 bool hasSeenLoopVariantSCEVUnknown() {
return SeenLoopVariantSCEVUnknown; }
4894 bool hasSeenOtherLoops() {
return SeenOtherLoops; }
4901 bool SeenLoopVariantSCEVUnknown =
false;
4902 bool SeenOtherLoops =
false;
4912 SCEVPostIncRewriter
Rewriter(L, SE);
4914 return Rewriter.hasSeenLoopVariantSCEVUnknown()
4921 SeenLoopVariantSCEVUnknown =
true;
4929 SeenOtherLoops =
true;
4933 bool hasSeenLoopVariantSCEVUnknown() {
return SeenLoopVariantSCEVUnknown; }
4935 bool hasSeenOtherLoops() {
return SeenOtherLoops; }
4942 bool SeenLoopVariantSCEVUnknown =
false;
4943 bool SeenOtherLoops =
false;
4949class SCEVBackedgeConditionFolder
4952 static const SCEV *rewrite(
const SCEV *S,
const Loop *L,
4954 bool IsPosBECond =
false;
4955 Value *BECond =
nullptr;
4957 BranchInst *BI = dyn_cast<BranchInst>(Latch->getTerminator());
4960 "Both outgoing branches should not target same header!");
4967 SCEVBackedgeConditionFolder
Rewriter(L, BECond, IsPosBECond, SE);
4977 switch (
I->getOpcode()) {
4978 case Instruction::Select: {
4980 std::optional<const SCEV *> Res =
4981 compareWithBackedgeCondition(
SI->getCondition());
4983 bool IsOne = cast<SCEVConstant>(*Res)->getValue()->isOne();
4989 std::optional<const SCEV *> Res = compareWithBackedgeCondition(
I);
5000 explicit SCEVBackedgeConditionFolder(
const Loop *L,
Value *BECond,
5003 IsPositiveBECond(IsPosBECond) {}
5005 std::optional<const SCEV *> compareWithBackedgeCondition(
Value *IC);
5009 Value *BackedgeCond =
nullptr;
5011 bool IsPositiveBECond;
5014std::optional<const SCEV *>
5015SCEVBackedgeConditionFolder::compareWithBackedgeCondition(
Value *IC) {
5020 if (BackedgeCond == IC)
5023 return std::nullopt;
5028 static const SCEV *rewrite(
const SCEV *S,
const Loop *L,
5049 bool isValid() {
return Valid; }
5062ScalarEvolution::proveNoWrapViaConstantRanges(
const SCEVAddRecExpr *AR) {
5072 if (
const SCEVConstant *BECountMax = dyn_cast<SCEVConstant>(BECount)) {
5074 const APInt &BECountAP = BECountMax->getAPInt();
5075 unsigned NoOverflowBitWidth =
5087 Instruction::Add, IncRange, OBO::NoSignedWrap);
5088 if (NSWRegion.contains(AddRecRange))
5097 Instruction::Add, IncRange, OBO::NoUnsignedWrap);
5098 if (NUWRegion.contains(AddRecRange))
5106ScalarEvolution::proveNoSignedWrapViaInduction(
const SCEVAddRecExpr *AR) {
5116 if (!SignedWrapViaInductionTried.insert(AR).second)
5140 if (isa<SCEVCouldNotCompute>(MaxBECount) && !HasGuards &&
5149 const SCEV *OverflowLimit =
5151 if (OverflowLimit &&
5159ScalarEvolution::proveNoUnsignedWrapViaInduction(
const SCEVAddRecExpr *AR) {
5169 if (!UnsignedWrapViaInductionTried.insert(AR).second)
5194 if (isa<SCEVCouldNotCompute>(MaxBECount) && !HasGuards &&
5233 if (
auto *OBO = dyn_cast<OverflowingBinaryOperator>(
Op)) {
5234 IsNSW = OBO->hasNoSignedWrap();
5235 IsNUW = OBO->hasNoUnsignedWrap();
5239 explicit BinaryOp(
unsigned Opcode,
Value *
LHS,
Value *
RHS,
bool IsNSW =
false,
5241 : Opcode(Opcode),
LHS(
LHS),
RHS(
RHS), IsNSW(IsNSW), IsNUW(IsNUW) {}
5251 auto *
Op = dyn_cast<Operator>(V);
5253 return std::nullopt;
5259 switch (
Op->getOpcode()) {
5260 case Instruction::Add:
5261 case Instruction::Sub:
5262 case Instruction::Mul:
5263 case Instruction::UDiv:
5264 case Instruction::URem:
5265 case Instruction::And:
5266 case Instruction::AShr:
5267 case Instruction::Shl:
5268 return BinaryOp(
Op);
5270 case Instruction::Or: {
5272 if (cast<PossiblyDisjointInst>(
Op)->isDisjoint())
5273 return BinaryOp(Instruction::Add,
Op->getOperand(0),
Op->getOperand(1),
5275 return BinaryOp(
Op);
5278 case Instruction::Xor:
5279 if (
auto *RHSC = dyn_cast<ConstantInt>(
Op->getOperand(1)))
5282 if (RHSC->getValue().isSignMask())
5283 return BinaryOp(Instruction::Add,
Op->getOperand(0),
Op->getOperand(1));
5285 if (V->getType()->isIntegerTy(1))
5286 return BinaryOp(Instruction::Add,
Op->getOperand(0),
Op->getOperand(1));
5287 return BinaryOp(
Op);
5289 case Instruction::LShr:
5291 if (
ConstantInt *SA = dyn_cast<ConstantInt>(
Op->getOperand(1))) {
5298 if (SA->getValue().ult(
BitWidth)) {
5300 ConstantInt::get(SA->getContext(),
5302 return BinaryOp(Instruction::UDiv,
Op->getOperand(0),
X);
5305 return BinaryOp(
Op);
5307 case Instruction::ExtractValue: {
5308 auto *EVI = cast<ExtractValueInst>(
Op);
5309 if (EVI->getNumIndices() != 1 || EVI->getIndices()[0] != 0)
5312 auto *WO = dyn_cast<WithOverflowInst>(EVI->getAggregateOperand());
5317 bool Signed = WO->isSigned();
5320 return BinaryOp(BinOp, WO->getLHS(), WO->getRHS());
5325 return BinaryOp(BinOp, WO->getLHS(), WO->getRHS(),
5335 if (
auto *
II = dyn_cast<IntrinsicInst>(V))
5336 if (
II->getIntrinsicID() == Intrinsic::loop_decrement_reg)
5337 return BinaryOp(Instruction::Sub,
II->getOperand(0),
II->getOperand(1));
5339 return std::nullopt;
5365 if (
Op == SymbolicPHI)
5370 if (SourceBits != NewBits)
5378 SExt ? dyn_cast<SCEVTruncateExpr>(SExt->
getOperand())
5379 : dyn_cast<SCEVTruncateExpr>(ZExt->
getOperand());
5383 if (
X != SymbolicPHI)
5385 Signed = SExt !=
nullptr;
5393 if (!L || L->getHeader() != PN->
getParent())
5451std::optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>>
5452ScalarEvolution::createAddRecFromPHIWithCastsImpl(
const SCEVUnknown *SymbolicPHI) {
5458 auto *PN = cast<PHINode>(SymbolicPHI->
getValue());
5460 assert(L &&
"Expecting an integer loop header phi");
5465 Value *BEValueV =
nullptr, *StartValueV =
nullptr;
5466 for (
unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
5467 Value *
V = PN->getIncomingValue(i);
5468 if (
L->contains(PN->getIncomingBlock(i))) {
5471 }
else if (BEValueV != V) {
5475 }
else if (!StartValueV) {
5477 }
else if (StartValueV != V) {
5478 StartValueV =
nullptr;
5482 if (!BEValueV || !StartValueV)
5483 return std::nullopt;
5490 const auto *
Add = dyn_cast<SCEVAddExpr>(BEValue);
5492 return std::nullopt;
5496 unsigned FoundIndex =
Add->getNumOperands();
5497 Type *TruncTy =
nullptr;
5499 for (
unsigned i = 0, e =
Add->getNumOperands(); i != e; ++i)
5502 if (FoundIndex == e) {
5507 if (FoundIndex ==
Add->getNumOperands())
5508 return std::nullopt;
5512 for (
unsigned i = 0, e =
Add->getNumOperands(); i != e; ++i)
5513 if (i != FoundIndex)
5520 return std::nullopt;
5574 const SCEV *PHISCEV =
5584 if (
const auto *AR = dyn_cast<SCEVAddRecExpr>(PHISCEV)) {
5601 auto getExtendedExpr = [&](
const SCEV *Expr,
5602 bool CreateSignExtend) ->
const SCEV * {
5605 const SCEV *ExtendedExpr =
5608 return ExtendedExpr;
5616 auto PredIsKnownFalse = [&](
const SCEV *Expr,
5617 const SCEV *ExtendedExpr) ->
bool {
5618 return Expr != ExtendedExpr &&
5622 const SCEV *StartExtended = getExtendedExpr(StartVal,
Signed);
5623 if (PredIsKnownFalse(StartVal, StartExtended)) {
5625 return std::nullopt;
5630 const SCEV *AccumExtended = getExtendedExpr(Accum,
true);
5631 if (PredIsKnownFalse(Accum, AccumExtended)) {
5633 return std::nullopt;
5636 auto AppendPredicate = [&](
const SCEV *Expr,
5637 const SCEV *ExtendedExpr) ->
void {
5638 if (Expr != ExtendedExpr &&
5646 AppendPredicate(StartVal, StartExtended);
5647 AppendPredicate(Accum, AccumExtended);
5655 std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>> PredRewrite =
5656 std::make_pair(NewAR, Predicates);
5658 PredicatedSCEVRewrites[{SymbolicPHI,
L}] = PredRewrite;
5662std::optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>>
5664 auto *PN = cast<PHINode>(SymbolicPHI->
getValue());
5667 return std::nullopt;
5670 auto I = PredicatedSCEVRewrites.find({SymbolicPHI, L});
5671 if (
I != PredicatedSCEVRewrites.end()) {
5672 std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>> Rewrite =
5675 if (Rewrite.first == SymbolicPHI)
5676 return std::nullopt;
5679 assert(isa<SCEVAddRecExpr>(Rewrite.first) &&
"Expected an AddRec");
5680 assert(!(Rewrite.second).empty() &&
"Expected to find Predicates");
5684 std::optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>>
5685 Rewrite = createAddRecFromPHIWithCastsImpl(SymbolicPHI);
5690 PredicatedSCEVRewrites[{SymbolicPHI, L}] = {SymbolicPHI, Predicates};
5691 return std::nullopt;
5708 auto areExprsEqual = [&](
const SCEV *Expr1,
const SCEV *Expr2) ->
bool {
5709 if (Expr1 != Expr2 &&
5728const SCEV *ScalarEvolution::createSimpleAffineAddRec(
PHINode *PN,
5730 Value *StartValueV) {
5733 assert(BEValueV && StartValueV);
5739 if (BO->Opcode != Instruction::Add)
5742 const SCEV *Accum =
nullptr;
5743 if (BO->LHS == PN && L->isLoopInvariant(BO->RHS))
5745 else if (BO->RHS == PN && L->isLoopInvariant(BO->LHS))
5759 insertValueToMap(PN, PHISCEV);
5761 if (
auto *AR = dyn_cast<SCEVAddRecExpr>(PHISCEV)) {
5764 proveNoWrapViaConstantRanges(AR)));
5770 if (
auto *BEInst = dyn_cast<Instruction>(BEValueV)) {
5772 "Accum is defined outside L, but is not invariant?");
5773 if (isAddRecNeverPoison(BEInst, L))
5780const SCEV *ScalarEvolution::createAddRecFromPHI(
PHINode *PN) {
5788 Value *BEValueV =
nullptr, *StartValueV =
nullptr;
5794 }
else if (BEValueV != V) {
5798 }
else if (!StartValueV) {
5800 }
else if (StartValueV != V) {
5801 StartValueV =
nullptr;
5805 if (!BEValueV || !StartValueV)
5809 "PHI node already processed?");
5813 if (
auto *S = createSimpleAffineAddRec(PN, BEValueV, StartValueV))
5818 insertValueToMap(PN, SymbolicName);
5832 unsigned FoundIndex =
Add->getNumOperands();
5833 for (
unsigned i = 0, e =
Add->getNumOperands(); i != e; ++i)
5834 if (
Add->getOperand(i) == SymbolicName)
5835 if (FoundIndex == e) {
5840 if (FoundIndex !=
Add->getNumOperands()) {
5843 for (
unsigned i = 0, e =
Add->getNumOperands(); i != e; ++i)
5844 if (i != FoundIndex)
5845 Ops.
push_back(SCEVBackedgeConditionFolder::rewrite(
Add->getOperand(i),
5852 (isa<SCEVAddRecExpr>(Accum) &&
5853 cast<SCEVAddRecExpr>(Accum)->getLoop() == L)) {
5857 if (BO->Opcode == Instruction::Add && BO->LHS == PN) {
5864 if (
GEP->getOperand(0) == PN) {
5889 forgetMemoizedResults(SymbolicName);
5890 insertValueToMap(PN, PHISCEV);
5892 if (
auto *AR = dyn_cast<SCEVAddRecExpr>(PHISCEV)) {
5895 proveNoWrapViaConstantRanges(AR)));
5901 if (
auto *BEInst = dyn_cast<Instruction>(BEValueV))
5920 if (isGuaranteedNotToCauseUB(BEValue)) {
5921 const SCEV *Shifted = SCEVShiftRewriter::rewrite(BEValue, L, *
this);
5922 const SCEV *Start = SCEVInitRewriter::rewrite(Shifted, L, *
this,
false);
5926 if (Start == StartVal) {
5930 forgetMemoizedResults(SymbolicName);
5931 insertValueToMap(PN, Shifted);
5942 eraseValueFromMap(PN);
5962 Use &LeftUse =
Merge->getOperandUse(0);
5963 Use &RightUse =
Merge->getOperandUse(1);
5980const SCEV *ScalarEvolution::createNodeFromSelectLikePHI(
PHINode *PN) {
5997 assert(IDom &&
"At least the entry block should dominate PN");
6006 return createNodeForSelectOrPHI(PN,
Cond, LHS, RHS);
6022ScalarEvolution::createNodeForPHIWithIdenticalOperands(
PHINode *PN) {
6026 auto *IncomingInst = dyn_cast<BinaryOperator>(
Incoming);
6034 CommonInst = IncomingInst;
6042 bool SCEVExprsIdentical =
6044 [
this, CommonSCEV](
Value *V) { return CommonSCEV == getSCEV(V); });
6045 return SCEVExprsIdentical ? CommonSCEV :
nullptr;
6048const SCEV *ScalarEvolution::createNodeForPHI(
PHINode *PN) {
6049 if (
const SCEV *S = createAddRecFromPHI(PN))
6059 if (
const SCEV *S = createNodeForPHIWithIdenticalOperands(PN))
6062 if (
const SCEV *S = createNodeFromSelectLikePHI(PN))
6071 struct FindClosure {
6072 const SCEV *OperandToFind;
6078 bool canRecurseInto(
SCEVTypes Kind)
const {
6081 return RootKind == Kind || NonSequentialRootKind == Kind ||
6086 : OperandToFind(OperandToFind), RootKind(RootKind),
6087 NonSequentialRootKind(
6091 bool follow(
const SCEV *S) {
6092 Found = S == OperandToFind;
6094 return !isDone() && canRecurseInto(S->
getSCEVType());
6097 bool isDone()
const {
return Found; }
6100 FindClosure FC(OperandToFind, RootKind);
6105std::optional<const SCEV *>
6106ScalarEvolution::createNodeForSelectOrPHIInstWithICmpInstCond(
Type *Ty,
6116 switch (ICI->getPredicate()) {
6130 bool Signed = ICI->isSigned();
6139 if (LA == LS &&
RA == RS)
6141 if (LA == RS &&
RA == LS)
6144 auto CoerceOperand = [&](
const SCEV *
Op) ->
const SCEV * {
6145 if (
Op->getType()->isPointerTy()) {
6147 if (isa<SCEVCouldNotCompute>(
Op))
6156 LS = CoerceOperand(LS);
6157 RS = CoerceOperand(RS);
6158 if (isa<SCEVCouldNotCompute>(LS) || isa<SCEVCouldNotCompute>(RS))
6179 isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->
isZero()) {
6185 if (isa<SCEVConstant>(
C) && cast<SCEVConstant>(
C)->getAPInt().ule(1))
6192 if (isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->
isZero() &&
6193 isa<ConstantInt>(TrueVal) && cast<ConstantInt>(TrueVal)->
isZero()) {
6195 while (
auto *ZExt = dyn_cast<SCEVZeroExtendExpr>(
X))
6196 X = ZExt->getOperand();
6209 return std::nullopt;
6212static std::optional<const SCEV *>
6214 const SCEV *TrueExpr,
const SCEV *FalseExpr) {
6218 "Unexpected operands of a select.");
6229 if (!isa<SCEVConstant>(TrueExpr) && !isa<SCEVConstant>(FalseExpr))
6230 return std::nullopt;
6233 if (isa<SCEVConstant>(TrueExpr)) {
6245static std::optional<const SCEV *>
6248 if (!isa<ConstantInt>(TrueVal) && !isa<ConstantInt>(FalseVal))
6249 return std::nullopt;
6252 const auto *SETrue = SE->
getSCEV(TrueVal);
6253 const auto *SEFalse = SE->
getSCEV(FalseVal);
6257const SCEV *ScalarEvolution::createNodeForSelectOrPHIViaUMinSeq(
6259 assert(
Cond->getType()->isIntegerTy(1) &&
"Select condition is not an i1?");
6261 V->getType() ==
TrueVal->getType() &&
6262 "Types of select hands and of the result must match.");
6265 if (!
V->getType()->isIntegerTy(1))
6268 if (std::optional<const SCEV *> S =
6280 if (
auto *CI = dyn_cast<ConstantInt>(
Cond))
6281 return getSCEV(CI->isOne() ? TrueVal : FalseVal);
6283 if (
auto *
I = dyn_cast<Instruction>(V)) {
6284 if (
auto *ICI = dyn_cast<ICmpInst>(
Cond)) {
6285 if (std::optional<const SCEV *> S =
6286 createNodeForSelectOrPHIInstWithICmpInstCond(
I->getType(), ICI,
6292 return createNodeForSelectOrPHIViaUMinSeq(V,
Cond, TrueVal, FalseVal);
6298 assert(
GEP->getSourceElementType()->isSized() &&
6299 "GEP source element type must be sized");
6302 for (
Value *Index :
GEP->indices())
6307APInt ScalarEvolution::getConstantMultipleImpl(
const SCEV *S) {
6317 for (
unsigned I = 1, E =
N->getNumOperands();
I < E && Res != 1; ++
I)
6325 return cast<SCEVConstant>(S)->getAPInt();
6335 return GetShiftedByZeros(TZ);
6345 return GetShiftedByZeros(TZ);
6349 if (
M->hasNoUnsignedWrap()) {
6352 for (
const SCEV *Operand :
M->operands().drop_front())
6360 for (
const SCEV *Operand :
M->operands())
6362 return GetShiftedByZeros(TZ);
6367 if (
N->hasNoUnsignedWrap())
6368 return GetGCDMultiple(
N);
6371 for (
const SCEV *Operand :
N->operands().drop_front())
6373 return GetShiftedByZeros(TZ);
6380 return GetGCDMultiple(cast<SCEVNAryExpr>(S));
6386 .countMinTrailingZeros();
6387 return GetShiftedByZeros(Known);
6396 auto I = ConstantMultipleCache.find(S);
6397 if (
I != ConstantMultipleCache.end())
6400 APInt Result = getConstantMultipleImpl(S);
6401 auto InsertPair = ConstantMultipleCache.insert({S, Result});
6402 assert(InsertPair.second &&
"Should insert a new key");
6403 return InsertPair.first->second;
6419 if (
MDNode *MD =
I->getMetadata(LLVMContext::MD_range))
6421 if (
const auto *CB = dyn_cast<CallBase>(V))
6422 if (std::optional<ConstantRange>
Range = CB->getRange())
6425 if (
auto *
A = dyn_cast<Argument>(V))
6426 if (std::optional<ConstantRange>
Range =
A->getRange())
6429 return std::nullopt;
6436 UnsignedRanges.erase(AddRec);
6437 SignedRanges.erase(AddRec);
6438 ConstantMultipleCache.erase(AddRec);
6443getRangeForUnknownRecurrence(
const SCEVUnknown *U) {
6457 auto *
P = dyn_cast<PHINode>(U->getValue());
6469 Value *Start, *Step;
6476 assert(L && L->getHeader() ==
P->getParent());
6489 case Instruction::AShr:
6490 case Instruction::LShr:
6491 case Instruction::Shl:
6506 KnownStep.getBitWidth() ==
BitWidth);
6509 auto MaxShiftAmt = KnownStep.getMaxValue();
6511 bool Overflow =
false;
6512 auto TotalShift = MaxShiftAmt.umul_ov(TCAP, Overflow);
6519 case Instruction::AShr: {
6527 if (KnownStart.isNonNegative())
6530 KnownStart.getMaxValue() + 1);
6531 if (KnownStart.isNegative())
6534 KnownEnd.getMaxValue() + 1);
6537 case Instruction::LShr: {
6546 KnownStart.getMaxValue() + 1);
6548 case Instruction::Shl: {
6552 if (TotalShift.ult(KnownStart.countMinLeadingZeros()))
6554 KnownEnd.getMaxValue() + 1);
6562ScalarEvolution::getRangeRefIter(
const SCEV *S,
6563 ScalarEvolution::RangeSignHint SignHint) {
6565 SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED ? UnsignedRanges
6572 auto AddToWorklist = [&WorkList, &Seen, &Cache](
const SCEV *Expr) {
6573 if (!Seen.
insert(Expr).second)
6579 if (!isa<PHINode>(cast<SCEVUnknown>(Expr)->getValue()))
6606 for (
unsigned I = 0;
I != WorkList.
size(); ++
I) {
6607 const SCEV *
P = WorkList[
I];
6608 auto *UnknownS = dyn_cast<SCEVUnknown>(
P);
6611 for (
const SCEV *
Op :
P->operands())
6616 if (
const PHINode *
P = dyn_cast<PHINode>(UnknownS->getValue())) {
6617 if (!PendingPhiRangesIter.insert(
P).second)
6624 if (!WorkList.
empty()) {
6629 getRangeRef(
P, SignHint);
6631 if (
auto *UnknownS = dyn_cast<SCEVUnknown>(
P))
6632 if (
const PHINode *
P = dyn_cast<PHINode>(UnknownS->getValue()))
6633 PendingPhiRangesIter.erase(
P);
6637 return getRangeRef(S, SignHint, 0);
6644 const SCEV *S, ScalarEvolution::RangeSignHint SignHint,
unsigned Depth) {
6646 SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED ? UnsignedRanges
6654 if (
I != Cache.
end())
6663 return getRangeRefIter(S, SignHint);
6671 if (SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED) {
6675 ConservativeResult =
6698 ConservativeResult.intersectWith(
X.truncate(
BitWidth), RangeType));
6705 ConservativeResult.intersectWith(
X.zeroExtend(
BitWidth), RangeType));
6712 ConservativeResult.intersectWith(
X.signExtend(
BitWidth), RangeType));
6717 return setRange(PtrToInt, SignHint,
X);
6722 unsigned WrapType = OBO::AnyWrap;
6723 if (
Add->hasNoSignedWrap())
6724 WrapType |= OBO::NoSignedWrap;
6725 if (
Add->hasNoUnsignedWrap())
6726 WrapType |= OBO::NoUnsignedWrap;
6728 X =
X.addWithNoWrap(getRangeRef(
Op, SignHint,
Depth + 1), WrapType,
6730 return setRange(
Add, SignHint,
6731 ConservativeResult.intersectWith(
X, RangeType));
6737 X =
X.multiply(getRangeRef(
Op, SignHint,
Depth + 1));
6738 return setRange(
Mul, SignHint,
6739 ConservativeResult.intersectWith(
X, RangeType));
6745 return setRange(UDiv, SignHint,
6746 ConservativeResult.intersectWith(
X.udiv(
Y), RangeType));
6754 if (!UnsignedMinValue.
isZero())
6755 ConservativeResult = ConservativeResult.intersectWith(
6765 bool AllNonNeg =
true;
6766 bool AllNonPos =
true;
6767 for (
unsigned i = 1, e = AddRec->
getNumOperands(); i != e; ++i) {
6774 ConservativeResult = ConservativeResult.intersectWith(
6779 ConservativeResult = ConservativeResult.intersectWith(
6788 const SCEV *MaxBEScev =
6790 if (!isa<SCEVCouldNotCompute>(MaxBEScev)) {
6791 APInt MaxBECount = cast<SCEVConstant>(MaxBEScev)->getAPInt();
6802 auto RangeFromAffine = getRangeForAffineAR(
6804 ConservativeResult =
6805 ConservativeResult.intersectWith(RangeFromAffine, RangeType);
6807 auto RangeFromFactoring = getRangeViaFactoring(
6809 ConservativeResult =
6810 ConservativeResult.intersectWith(RangeFromFactoring, RangeType);
6816 const SCEV *SymbolicMaxBECount =
6818 if (!isa<SCEVCouldNotCompute>(SymbolicMaxBECount) &&
6821 auto RangeFromAffineNew = getRangeForAffineNoSelfWrappingAR(
6822 AddRec, SymbolicMaxBECount,
BitWidth, SignHint);
6823 ConservativeResult =
6824 ConservativeResult.intersectWith(RangeFromAffineNew, RangeType);
6829 return setRange(AddRec, SignHint, std::move(ConservativeResult));
6839 ID = Intrinsic::umax;
6842 ID = Intrinsic::smax;
6846 ID = Intrinsic::umin;
6849 ID = Intrinsic::smin;
6855 const auto *NAry = cast<SCEVNAryExpr>(S);
6857 for (
unsigned i = 1, e = NAry->getNumOperands(); i != e; ++i)
6859 ID, {
X, getRangeRef(NAry->getOperand(i), SignHint,
Depth + 1)});
6860 return setRange(S, SignHint,
6861 ConservativeResult.intersectWith(
X, RangeType));
6870 ConservativeResult =
6871 ConservativeResult.intersectWith(*MDRange, RangeType);
6876 auto CR = getRangeForUnknownRecurrence(U);
6877 ConservativeResult = ConservativeResult.intersectWith(CR);
6888 if (
U->getType()->isPointerTy()) {
6891 unsigned ptrSize =
DL.getPointerTypeSizeInBits(
U->getType());
6892 int ptrIdxDiff = ptrSize -
BitWidth;
6893 if (ptrIdxDiff > 0 && ptrSize >
BitWidth && NS > (
unsigned)ptrIdxDiff)
6906 ConservativeResult = ConservativeResult.intersectWith(
6910 ConservativeResult = ConservativeResult.intersectWith(
6915 if (
U->getType()->isPointerTy() && SignHint == HINT_RANGE_UNSIGNED) {
6918 bool CanBeNull, CanBeFreed;
6920 V->getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed);
6936 ConservativeResult = ConservativeResult.intersectWith(
6942 if (
PHINode *Phi = dyn_cast<PHINode>(V)) {
6944 if (PendingPhiRanges.insert(Phi).second) {
6947 for (
const auto &
Op :
Phi->operands()) {
6949 RangeFromOps = RangeFromOps.unionWith(OpRange);
6951 if (RangeFromOps.isFullSet())
6954 ConservativeResult =
6955 ConservativeResult.intersectWith(RangeFromOps, RangeType);
6956 bool Erased = PendingPhiRanges.erase(Phi);
6957 assert(Erased &&
"Failed to erase Phi properly?");
6963 if (
const auto *
II = dyn_cast<IntrinsicInst>(V))
6964 if (
II->getIntrinsicID() == Intrinsic::vscale) {
6966 ConservativeResult = ConservativeResult.difference(Disallowed);
6969 return setRange(U, SignHint, std::move(ConservativeResult));
6975 return setRange(S, SignHint, std::move(ConservativeResult));
6984 const APInt &MaxBECount,
6991 if (Step == 0 || MaxBECount == 0)
6998 return ConstantRange::getFull(
BitWidth);
7014 return ConstantRange::getFull(
BitWidth);
7026 APInt MovedBoundary = Descending ? (StartLower - std::move(
Offset))
7027 : (StartUpper + std::move(
Offset));
7032 if (StartRange.
contains(MovedBoundary))
7033 return ConstantRange::getFull(
BitWidth);
7036 Descending ? std::move(MovedBoundary) : std::move(StartLower);
7038 Descending ? std::move(StartUpper) : std::move(MovedBoundary);
7047 const APInt &MaxBECount) {
7051 "mismatched bit widths");
7060 StepSRange.
getSignedMin(), StartSRange, MaxBECount,
true);
7062 StartSRange, MaxBECount,
7074ConstantRange ScalarEvolution::getRangeForAffineNoSelfWrappingAR(
7076 ScalarEvolution::RangeSignHint SignHint) {
7077 assert(AddRec->
isAffine() &&
"Non-affine AddRecs are not suppored!\n");
7079 "This only works for non-self-wrapping AddRecs!");
7080 const bool IsSigned = SignHint == HINT_RANGE_SIGNED;
7083 if (!isa<SCEVConstant>(Step))
7084 return ConstantRange::getFull(
BitWidth);
7092 return ConstantRange::getFull(
BitWidth);
7098 MaxItersWithoutWrap))
7099 return ConstantRange::getFull(
BitWidth);
7126 return RangeBetween;
7131 return ConstantRange::getFull(
BitWidth);
7134 isKnownPredicateViaConstantRanges(LEPred, Start,
End))
7135 return RangeBetween;
7137 isKnownPredicateViaConstantRanges(GEPred, Start,
End))
7138 return RangeBetween;
7139 return ConstantRange::getFull(
BitWidth);
7144 const APInt &MaxBECount) {
7151 "mismatched bit widths");
7153 struct SelectPattern {
7154 Value *Condition =
nullptr;
7160 std::optional<unsigned> CastOp;
7167 if (
auto *SA = dyn_cast<SCEVAddExpr>(S)) {
7170 if (SA->getNumOperands() != 2 || !isa<SCEVConstant>(SA->getOperand(0)))
7173 Offset = cast<SCEVConstant>(SA->getOperand(0))->getAPInt();
7174 S = SA->getOperand(1);
7178 if (
auto *SCast = dyn_cast<SCEVIntegralCastExpr>(S)) {
7180 S = SCast->getOperand();
7185 auto *SU = dyn_cast<SCEVUnknown>(S);
7190 Condition =
nullptr;
7222 bool isRecognized() {
return Condition !=
nullptr; }
7225 SelectPattern StartPattern(*
this,
BitWidth, Start);
7226 if (!StartPattern.isRecognized())
7227 return ConstantRange::getFull(
BitWidth);
7229 SelectPattern StepPattern(*
this,
BitWidth, Step);
7230 if (!StepPattern.isRecognized())
7231 return ConstantRange::getFull(
BitWidth);
7233 if (StartPattern.Condition != StepPattern.Condition) {
7237 return ConstantRange::getFull(
BitWidth);
7254 this->getRangeForAffineAR(TrueStart, TrueStep, MaxBECount);
7256 this->getRangeForAffineAR(FalseStart, FalseStep, MaxBECount);
7278ScalarEvolution::getNonTrivialDefiningScopeBound(
const SCEV *S) {
7279 if (
auto *AddRec = dyn_cast<SCEVAddRecExpr>(S))
7281 if (
auto *U = dyn_cast<SCEVUnknown>(S))
7282 if (
auto *
I = dyn_cast<Instruction>(
U->getValue()))
7294 auto pushOp = [&](
const SCEV *S) {
7295 if (!Visited.
insert(S).second)
7298 if (Visited.
size() > 30) {
7305 for (
const auto *S : Ops)
7309 while (!Worklist.
empty()) {
7311 if (
auto *DefI = getNonTrivialDefiningScopeBound(S)) {
7312 if (!Bound || DT.
dominates(Bound, DefI))
7325 return getDefiningScopeBound(Ops, Discard);
7328bool ScalarEvolution::isGuaranteedToTransferExecutionTo(
const Instruction *
A,
7330 if (
A->getParent() ==
B->getParent() &&
7336 if (BLoop && BLoop->getHeader() ==
B->getParent() &&
7337 BLoop->getLoopPreheader() ==
A->getParent() &&
7339 A->getParent()->end()) &&
7346bool ScalarEvolution::isGuaranteedNotToBePoison(
const SCEV *
Op) {
7347 SCEVPoisonCollector PC(
true);
7349 return PC.MaybePoison.empty();
7352bool ScalarEvolution::isGuaranteedNotToCauseUB(
const SCEV *
Op) {
7354 auto *UDiv = dyn_cast<SCEVUDivExpr>(S);
7358 !isGuaranteedNotToBePoison(UDiv->getOperand(1)));
7362bool ScalarEvolution::isSCEVExprNeverPoison(
const Instruction *
I) {
7379 for (
const Use &
Op :
I->operands()) {
7385 auto *DefI = getDefiningScopeBound(SCEVOps);
7386 return isGuaranteedToTransferExecutionTo(DefI,
I);
7389bool ScalarEvolution::isAddRecNeverPoison(
const Instruction *
I,
const Loop *L) {
7391 if (isSCEVExprNeverPoison(
I))
7402 auto *ExitingBB =
L->getExitingBlock();
7415 while (!Worklist.
empty()) {
7419 const Instruction *PoisonUser = cast<Instruction>(
U.getUser());
7425 if (KnownPoison.
insert(PoisonUser).second)
7433ScalarEvolution::LoopProperties
7434ScalarEvolution::getLoopProperties(
const Loop *L) {
7435 using LoopProperties = ScalarEvolution::LoopProperties;
7437 auto Itr = LoopPropertiesCache.find(L);
7438 if (Itr == LoopPropertiesCache.end()) {
7440 if (
auto *SI = dyn_cast<StoreInst>(
I))
7441 return !
SI->isSimple();
7443 return I->mayThrow() ||
I->mayWriteToMemory();
7446 LoopProperties LP = {
true,
7449 for (
auto *BB :
L->getBlocks())
7450 for (
auto &
I : *BB) {
7452 LP.HasNoAbnormalExits =
false;
7453 if (HasSideEffects(&
I))
7454 LP.HasNoSideEffects =
false;
7455 if (!LP.HasNoAbnormalExits && !LP.HasNoSideEffects)
7459 auto InsertPair = LoopPropertiesCache.insert({
L, LP});
7460 assert(InsertPair.second &&
"We just checked!");
7461 Itr = InsertPair.first;
7474const SCEV *ScalarEvolution::createSCEVIter(
Value *V) {
7480 Stack.emplace_back(V,
true);
7481 Stack.emplace_back(V,
false);
7482 while (!Stack.empty()) {
7483 auto E = Stack.pop_back_val();
7484 Value *CurV = E.getPointer();
7490 const SCEV *CreatedSCEV =
nullptr;
7493 CreatedSCEV = createSCEV(CurV);
7498 CreatedSCEV = getOperandsToCreate(CurV, Ops);
7502 insertValueToMap(CurV, CreatedSCEV);
7506 Stack.emplace_back(CurV,
true);
7508 Stack.emplace_back(
Op,
false);
7527 }
else if (
ConstantInt *CI = dyn_cast<ConstantInt>(V))
7529 else if (isa<GlobalAlias>(V))
7531 else if (!isa<ConstantExpr>(V))
7537 bool IsConstArg = isa<ConstantInt>(BO->RHS);
7538 switch (BO->Opcode) {
7539 case Instruction::Add:
7540 case Instruction::Mul: {
7553 dyn_cast<Instruction>(V));
7555 (BO->Opcode == Instruction::Add &&
7556 (NewBO->Opcode != Instruction::Add &&
7557 NewBO->Opcode != Instruction::Sub)) ||
7558 (BO->Opcode == Instruction::Mul &&
7559 NewBO->Opcode != Instruction::Mul)) {
7565 if (BO->
Op && (BO->IsNSW || BO->IsNUW)) {
7566 auto *
I = dyn_cast<Instruction>(BO->
Op);
7576 case Instruction::Sub:
7577 case Instruction::UDiv:
7578 case Instruction::URem:
7580 case Instruction::AShr:
7581 case Instruction::Shl:
7582 case Instruction::Xor:
7586 case Instruction::And:
7587 case Instruction::Or:
7591 case Instruction::LShr:
7603 switch (
U->getOpcode()) {
7604 case Instruction::Trunc:
7605 case Instruction::ZExt:
7606 case Instruction::SExt:
7607 case Instruction::PtrToInt:
7611 case Instruction::BitCast:
7618 case Instruction::SDiv:
7619 case Instruction::SRem:
7624 case Instruction::GetElementPtr:
7625 assert(cast<GEPOperator>(U)->getSourceElementType()->isSized() &&
7626 "GEP source element type must be sized");
7627 for (
Value *Index :
U->operands())
7631 case Instruction::IntToPtr:
7634 case Instruction::PHI:
7638 case Instruction::Select: {
7640 auto CanSimplifyToUnknown = [
this,
U]() {
7641 if (
U->getType()->isIntegerTy(1) || isa<ConstantInt>(
U->getOperand(0)))
7644 auto *ICI = dyn_cast<ICmpInst>(
U->getOperand(0));
7651 if (!(isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->
isZero()))
7658 if (CanSimplifyToUnknown())
7661 for (
Value *Inc :
U->operands())
7666 case Instruction::Call:
7667 case Instruction::Invoke:
7668 if (
Value *RV = cast<CallBase>(U)->getReturnedArgOperand()) {
7673 if (
auto *
II = dyn_cast<IntrinsicInst>(U)) {
7674 switch (
II->getIntrinsicID()) {
7675 case Intrinsic::abs:
7678 case Intrinsic::umax:
7679 case Intrinsic::umin:
7680 case Intrinsic::smax:
7681 case Intrinsic::smin:
7682 case Intrinsic::usub_sat:
7683 case Intrinsic::uadd_sat:
7687 case Intrinsic::start_loop_iterations:
7688 case Intrinsic::annotation:
7689 case Intrinsic::ptr_annotation:
7702const SCEV *ScalarEvolution::createSCEV(
Value *V) {
7713 }
else if (
ConstantInt *CI = dyn_cast<ConstantInt>(V))
7715 else if (isa<GlobalAlias>(V))
7717 else if (!isa<ConstantExpr>(V))
7726 switch (BO->Opcode) {
7727 case Instruction::Add: {
7753 if (BO->Opcode == Instruction::Sub)
7761 if (BO->Opcode == Instruction::Sub)
7767 dyn_cast<Instruction>(V));
7768 if (!NewBO || (NewBO->Opcode != Instruction::Add &&
7769 NewBO->Opcode != Instruction::Sub)) {
7779 case Instruction::Mul: {
7799 dyn_cast<Instruction>(V));
7800 if (!NewBO || NewBO->Opcode != Instruction::Mul) {
7809 case Instruction::UDiv:
7813 case Instruction::URem:
7817 case Instruction::Sub: {
7820 Flags = getNoWrapFlagsFromUB(BO->
Op);
7825 case Instruction::And:
7828 if (
ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) {
7831 if (CI->isMinusOne())
7833 const APInt &
A = CI->getValue();
7839 unsigned LZ =
A.countl_zero();
7840 unsigned TZ =
A.countr_zero();
7844 0, &AC,
nullptr, &DT);
7846 APInt EffectiveMask =
7848 if ((LZ != 0 || TZ != 0) && !((~
A & ~Known.
Zero) & EffectiveMask)) {
7851 const SCEV *ShiftedLHS =
nullptr;
7852 if (
auto *LHSMul = dyn_cast<SCEVMulExpr>(LHS)) {
7853 if (
auto *OpC = dyn_cast<SCEVConstant>(LHSMul->getOperand(0))) {
7855 unsigned MulZeros = OpC->getAPInt().countr_zero();
7856 unsigned GCD = std::min(MulZeros, TZ);
7861 auto *NewMul =
getMulExpr(MulOps, LHSMul->getNoWrapFlags());
7883 case Instruction::Or:
7892 case Instruction::Xor:
7893 if (
ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) {
7895 if (CI->isMinusOne())
7902 if (
auto *LBO = dyn_cast<BinaryOperator>(BO->LHS))
7903 if (
ConstantInt *LCI = dyn_cast<ConstantInt>(LBO->getOperand(1)))
7904 if (LBO->getOpcode() == Instruction::And &&
7905 LCI->getValue() == CI->getValue())
7907 dyn_cast<SCEVZeroExtendExpr>(
getSCEV(BO->LHS))) {
7909 const SCEV *Z0 =
Z->getOperand();
7916 if (CI->getValue().isMask(Z0TySize))
7922 APInt Trunc = CI->getValue().
trunc(Z0TySize);
7931 case Instruction::Shl:
7933 if (
ConstantInt *SA = dyn_cast<ConstantInt>(BO->RHS)) {
7949 auto MulFlags = getNoWrapFlagsFromUB(BO->
Op);
7963 case Instruction::AShr:
7984 Operator *
L = dyn_cast<Operator>(BO->LHS);
7985 const SCEV *AddTruncateExpr =
nullptr;
7987 const SCEV *AddConstant =
nullptr;
7989 if (L &&
L->getOpcode() == Instruction::Add) {
7995 Operator *LShift = dyn_cast<Operator>(
L->getOperand(0));
7996 ConstantInt *AddOperandCI = dyn_cast<ConstantInt>(
L->getOperand(1));
7997 if (LShift && LShift->
getOpcode() == Instruction::Shl) {
8000 ShlAmtCI = dyn_cast<ConstantInt>(LShift->
getOperand(1));
8012 }
else if (L &&
L->getOpcode() == Instruction::Shl) {
8018 ShlAmtCI = dyn_cast<ConstantInt>(
L->getOperand(1));
8022 if (AddTruncateExpr && ShlAmtCI) {
8038 const SCEV *CompositeExpr =
8040 if (
L->getOpcode() != Instruction::Shl)
8041 CompositeExpr =
getAddExpr(CompositeExpr, AddConstant);
8050 switch (
U->getOpcode()) {
8051 case Instruction::Trunc:
8054 case Instruction::ZExt:
8057 case Instruction::SExt:
8059 dyn_cast<Instruction>(V))) {
8067 if (BO->Opcode == Instruction::Sub && BO->IsNSW) {
8068 Type *Ty =
U->getType();
8076 case Instruction::BitCast:
8082 case Instruction::PtrToInt: {
8085 Type *DstIntTy =
U->getType();
8089 if (isa<SCEVCouldNotCompute>(IntOp))
8093 case Instruction::IntToPtr:
8097 case Instruction::SDiv:
8104 case Instruction::SRem:
8111 case Instruction::GetElementPtr:
8112 return createNodeForGEP(cast<GEPOperator>(U));
8114 case Instruction::PHI:
8115 return createNodeForPHI(cast<PHINode>(U));
8117 case Instruction::Select:
8118 return createNodeForSelectOrPHI(U,
U->getOperand(0),
U->getOperand(1),
8121 case Instruction::Call:
8122 case Instruction::Invoke:
8123 if (
Value *RV = cast<CallBase>(U)->getReturnedArgOperand())
8126 if (
auto *
II = dyn_cast<IntrinsicInst>(U)) {
8127 switch (
II->getIntrinsicID()) {
8128 case Intrinsic::abs:
8131 cast<ConstantInt>(
II->getArgOperand(1))->
isOne());
8132 case Intrinsic::umax:
8136 case Intrinsic::umin:
8140 case Intrinsic::smax:
8144 case Intrinsic::smin:
8148 case Intrinsic::usub_sat: {
8154 case Intrinsic::uadd_sat: {
8160 case Intrinsic::start_loop_iterations:
8161 case Intrinsic::annotation:
8162 case Intrinsic::ptr_annotation:
8166 case Intrinsic::vscale:
8183 if (isa<SCEVCouldNotCompute>(ExitCount))
8186 auto *ExitCountType = ExitCount->
getType();
8187 assert(ExitCountType->isIntegerTy());
8189 1 + ExitCountType->getScalarSizeInBits());
8196 if (isa<SCEVCouldNotCompute>(ExitCount))
8202 auto CanAddOneWithoutOverflow = [&]() {
8204 getRangeRef(ExitCount, RangeSignHint::HINT_RANGE_UNSIGNED);
8215 if (EvalSize > ExitCountSize && CanAddOneWithoutOverflow())
8245 assert(ExitingBlock &&
"Must pass a non-null exiting block!");
8246 assert(L->isLoopExiting(ExitingBlock) &&
8247 "Exiting block must actually branch out of the loop!");
8256 const auto *MaxExitCount =
8264 L->getExitingBlocks(ExitingBlocks);
8266 std::optional<unsigned> Res;
8267 for (
auto *ExitingBB : ExitingBlocks) {
8271 Res = (
unsigned)std::gcd(*Res, Multiple);
8273 return Res.value_or(1);
8277 const SCEV *ExitCount) {
8307 assert(ExitingBlock &&
"Must pass a non-null exiting block!");
8308 assert(L->isLoopExiting(ExitingBlock) &&
8309 "Exiting block must actually branch out of the loop!");
8319 return getBackedgeTakenInfo(L).getExact(ExitingBlock,
this);
8321 return getBackedgeTakenInfo(L).getSymbolicMax(ExitingBlock,
this);
8323 return getBackedgeTakenInfo(L).getConstantMax(ExitingBlock,
this);
8333 return getPredicatedBackedgeTakenInfo(L).getExact(ExitingBlock,
this,
8336 return getPredicatedBackedgeTakenInfo(L).getSymbolicMax(ExitingBlock,
this,
8339 return getPredicatedBackedgeTakenInfo(L).getConstantMax(ExitingBlock,
this,
8347 return getPredicatedBackedgeTakenInfo(L).getExact(L,
this, &Preds);
8354 return getBackedgeTakenInfo(L).getExact(L,
this);
8356 return getBackedgeTakenInfo(L).getConstantMax(
this);
8358 return getBackedgeTakenInfo(L).getSymbolicMax(L,
this);
8365 return getPredicatedBackedgeTakenInfo(L).getSymbolicMax(L,
this, &Preds);
8370 return getPredicatedBackedgeTakenInfo(L).getConstantMax(
this, &Preds);
8374 return getBackedgeTakenInfo(L).isConstantMaxOrZero(
this);
8384 for (
PHINode &PN : Header->phis())
8385 if (Visited.
insert(&PN).second)
8389ScalarEvolution::BackedgeTakenInfo &
8390ScalarEvolution::getPredicatedBackedgeTakenInfo(
const Loop *L) {
8391 auto &BTI = getBackedgeTakenInfo(L);
8392 if (BTI.hasFullInfo())
8395 auto Pair = PredicatedBackedgeTakenCounts.insert({
L, BackedgeTakenInfo()});
8398 return Pair.first->second;
8400 BackedgeTakenInfo
Result =
8401 computeBackedgeTakenCount(L,
true);
8403 return PredicatedBackedgeTakenCounts.find(L)->second = std::move(Result);
8406ScalarEvolution::BackedgeTakenInfo &
8407ScalarEvolution::getBackedgeTakenInfo(
const Loop *L) {
8413 std::pair<DenseMap<const Loop *, BackedgeTakenInfo>::iterator,
bool> Pair =
8414 BackedgeTakenCounts.insert({
L, BackedgeTakenInfo()});
8416 return Pair.first->second;
8421 BackedgeTakenInfo
Result = computeBackedgeTakenCount(L);
8428 if (
Result.hasAnyInfo()) {
8431 auto LoopUsersIt = LoopUsers.find(L);
8432 if (LoopUsersIt != LoopUsers.end())
8434 forgetMemoizedResults(ToForget);
8437 for (
PHINode &PN :
L->getHeader()->phis())
8438 ConstantEvolutionLoopExitValue.erase(&PN);
8446 return BackedgeTakenCounts.find(L)->second = std::move(Result);
8455 BackedgeTakenCounts.clear();
8456 PredicatedBackedgeTakenCounts.clear();
8457 BECountUsers.clear();
8458 LoopPropertiesCache.clear();
8459 ConstantEvolutionLoopExitValue.clear();
8460 ValueExprMap.
clear();
8461 ValuesAtScopes.clear();
8462 ValuesAtScopesUsers.clear();
8463 LoopDispositions.clear();
8464 BlockDispositions.clear();
8465 UnsignedRanges.clear();
8466 SignedRanges.clear();
8467 ExprValueMap.
clear();
8469 ConstantMultipleCache.clear();
8470 PredicatedSCEVRewrites.clear();
8472 FoldCacheUser.clear();
8474void ScalarEvolution::visitAndClearUsers(
8478 while (!Worklist.
empty()) {
8480 if (!
isSCEVable(
I->getType()) && !isa<WithOverflowInst>(
I))
8485 if (It != ValueExprMap.
end()) {
8486 eraseValueFromMap(It->first);
8488 if (
PHINode *PN = dyn_cast<PHINode>(
I))
8489 ConstantEvolutionLoopExitValue.erase(PN);
8503 while (!LoopWorklist.
empty()) {
8507 forgetBackedgeTakenCounts(CurrL,
false);
8508 forgetBackedgeTakenCounts(CurrL,
true);
8511 for (
auto I = PredicatedSCEVRewrites.begin();
8512 I != PredicatedSCEVRewrites.end();) {
8513 std::pair<const SCEV *, const Loop *> Entry =
I->first;
8514 if (Entry.second == CurrL)
8515 PredicatedSCEVRewrites.erase(
I++);
8520 auto LoopUsersItr = LoopUsers.find(CurrL);
8521 if (LoopUsersItr != LoopUsers.end()) {
8522 ToForget.
insert(ToForget.
end(), LoopUsersItr->second.begin(),
8523 LoopUsersItr->second.end());
8528 visitAndClearUsers(Worklist, Visited, ToForget);
8530 LoopPropertiesCache.erase(CurrL);
8533 LoopWorklist.
append(CurrL->begin(), CurrL->end());
8535 forgetMemoizedResults(ToForget);
8552 visitAndClearUsers(Worklist, Visited, ToForget);
8554 forgetMemoizedResults(ToForget);
8566 struct InvalidationRootCollector {
8570 InvalidationRootCollector(
Loop *L) : L(L) {}
8572 bool follow(
const SCEV *S) {
8573 if (
auto *SU = dyn_cast<SCEVUnknown>(S)) {
8574 if (
auto *
I = dyn_cast<Instruction>(SU->getValue()))
8577 }
else if (
auto *AddRec = dyn_cast<SCEVAddRecExpr>(S)) {
8578 if (L->contains(AddRec->
getLoop()))
8583 bool isDone()
const {
return false; }
8586 InvalidationRootCollector
C(L);
8588 forgetMemoizedResults(
C.Roots);
8601 BlockDispositions.clear();
8602 LoopDispositions.clear();
8619 while (!Worklist.
empty()) {
8621 bool LoopDispoRemoved = LoopDispositions.erase(Curr);
8622 bool BlockDispoRemoved = BlockDispositions.erase(Curr);
8623 if (!LoopDispoRemoved && !BlockDispoRemoved)
8625 auto Users = SCEVUsers.find(Curr);
8626 if (
Users != SCEVUsers.end())
8639const SCEV *ScalarEvolution::BackedgeTakenInfo::getExact(
8643 if (!isComplete() || ExitNotTaken.empty())
8654 for (
const auto &ENT : ExitNotTaken) {
8655 const SCEV *BECount = ENT.ExactNotTaken;
8658 "We should only have known counts for exiting blocks that dominate "
8666 assert((Preds || ENT.hasAlwaysTruePredicate()) &&
8667 "Predicate should be always true!");
8676const ScalarEvolution::ExitNotTakenInfo *
8677ScalarEvolution::BackedgeTakenInfo::getExitNotTaken(
8680 for (
const auto &ENT : ExitNotTaken)
8681 if (ENT.ExitingBlock == ExitingBlock) {
8682 if (ENT.hasAlwaysTruePredicate())
8684 else if (Predicates) {
8694const SCEV *ScalarEvolution::BackedgeTakenInfo::getConstantMax(
8697 if (!getConstantMax())
8700 for (
const auto &ENT : ExitNotTaken)
8701 if (!ENT.hasAlwaysTruePredicate()) {
8707 assert((isa<SCEVCouldNotCompute>(getConstantMax()) ||
8708 isa<SCEVConstant>(getConstantMax())) &&
8709 "No point in having a non-constant max backedge taken count!");
8710 return getConstantMax();
8713const SCEV *ScalarEvolution::BackedgeTakenInfo::getSymbolicMax(
8723 for (
const auto &ENT : ExitNotTaken) {
8724 const SCEV *ExitCount = ENT.SymbolicMaxNotTaken;
8725 if (!isa<SCEVCouldNotCompute>(ExitCount)) {
8727 "We should only have known counts for exiting blocks that "
8733 assert((Predicates || ENT.hasAlwaysTruePredicate()) &&
8734 "Predicate should be always true!");
8737 if (ExitCounts.
empty())
8746bool ScalarEvolution::BackedgeTakenInfo::isConstantMaxOrZero(
8748 auto PredicateNotAlwaysTrue = [](
const ExitNotTakenInfo &ENT) {
8749 return !ENT.hasAlwaysTruePredicate();
8751 return MaxOrZero && !
any_of(ExitNotTaken, PredicateNotAlwaysTrue);
8758 const SCEV *E,
const SCEV *ConstantMaxNotTaken,
8759 const SCEV *SymbolicMaxNotTaken,
bool MaxOrZero,
8761 : ExactNotTaken(E), ConstantMaxNotTaken(ConstantMaxNotTaken),
8762 SymbolicMaxNotTaken(SymbolicMaxNotTaken), MaxOrZero(MaxOrZero) {
8773 "Exact is not allowed to be less precise than Constant Max");
8776 "Exact is not allowed to be less precise than Symbolic Max");
8779 "Symbolic Max is not allowed to be less precise than Constant Max");
8782 "No point in having a non-constant max backedge taken count!");
8784 for (
const auto PredList : PredLists)
8785 for (
const auto *
P : PredList) {
8788 assert(!isa<SCEVUnionPredicate>(
P) &&
"Only add leaf predicates here!");
8793 "Backedge count should be int");
8796 "Max backedge count should be int");
8800 const SCEV *ConstantMaxNotTaken,
8801 const SCEV *SymbolicMaxNotTaken,
8804 :
ExitLimit(E, ConstantMaxNotTaken, SymbolicMaxNotTaken, MaxOrZero,
8809ScalarEvolution::BackedgeTakenInfo::BackedgeTakenInfo(
8811 bool IsComplete,
const SCEV *ConstantMax,
bool MaxOrZero)
8812 : ConstantMax(ConstantMax), IsComplete(IsComplete), MaxOrZero(MaxOrZero) {
8813 using EdgeExitInfo = ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo;
8815 ExitNotTaken.reserve(ExitCounts.
size());
8816 std::transform(ExitCounts.
begin(), ExitCounts.
end(),
8817 std::back_inserter(ExitNotTaken),
8818 [&](
const EdgeExitInfo &EEI) {
8819 BasicBlock *ExitBB = EEI.first;
8820 const ExitLimit &EL = EEI.second;
8821 return ExitNotTakenInfo(ExitBB, EL.ExactNotTaken,
8822 EL.ConstantMaxNotTaken, EL.SymbolicMaxNotTaken,
8825 assert((isa<SCEVCouldNotCompute>(ConstantMax) ||
8826 isa<SCEVConstant>(ConstantMax)) &&
8827 "No point in having a non-constant max backedge taken count!");
8831ScalarEvolution::BackedgeTakenInfo
8832ScalarEvolution::computeBackedgeTakenCount(
const Loop *L,
8833 bool AllowPredicates) {
8835 L->getExitingBlocks(ExitingBlocks);
8837 using EdgeExitInfo = ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo;
8840 bool CouldComputeBECount =
true;
8842 const SCEV *MustExitMaxBECount =
nullptr;
8843 const SCEV *MayExitMaxBECount =
nullptr;
8844 bool MustExitMaxOrZero =
false;
8845 bool IsOnlyExit = ExitingBlocks.
size() == 1;
8854 if (
auto *BI = dyn_cast<BranchInst>(ExitBB->getTerminator()))
8855 if (
auto *CI = dyn_cast<ConstantInt>(BI->
getCondition())) {
8857 if (ExitIfTrue == CI->
isZero())
8861 ExitLimit EL = computeExitLimit(L, ExitBB, IsOnlyExit, AllowPredicates);
8863 assert((AllowPredicates || EL.Predicates.empty()) &&
8864 "Predicated exit limit when predicates are not allowed!");
8869 ++NumExitCountsComputed;
8873 CouldComputeBECount =
false;
8880 "Exact is known but symbolic isn't?");
8881 ++NumExitCountsNotComputed;
8897 if (!MustExitMaxBECount) {
8898 MustExitMaxBECount = EL.ConstantMaxNotTaken;
8899 MustExitMaxOrZero = EL.MaxOrZero;
8902 EL.ConstantMaxNotTaken);
8906 MayExitMaxBECount = EL.ConstantMaxNotTaken;
8909 EL.ConstantMaxNotTaken);
8913 const SCEV *MaxBECount = MustExitMaxBECount ? MustExitMaxBECount :
8917 bool MaxOrZero = (MustExitMaxOrZero && ExitingBlocks.size() == 1);
8923 for (
const auto &Pair : ExitCounts) {
8924 if (!isa<SCEVConstant>(Pair.second.ExactNotTaken))
8925 BECountUsers[Pair.second.ExactNotTaken].insert({L, AllowPredicates});
8926 if (!isa<SCEVConstant>(Pair.second.SymbolicMaxNotTaken))
8927 BECountUsers[Pair.second.SymbolicMaxNotTaken].insert(
8928 {L, AllowPredicates});
8930 return BackedgeTakenInfo(std::move(ExitCounts), CouldComputeBECount,
8931 MaxBECount, MaxOrZero);
8935ScalarEvolution::computeExitLimit(
const Loop *L,
BasicBlock *ExitingBlock,
8936 bool IsOnlyExit,
bool AllowPredicates) {
8937 assert(
L->contains(ExitingBlock) &&
"Exit count for non-loop block?");
8941 if (!Latch || !DT.
dominates(ExitingBlock, Latch))
8945 if (
BranchInst *BI = dyn_cast<BranchInst>(Term)) {
8949 "It should have one successor in loop and one exit block!");
8956 if (
SwitchInst *SI = dyn_cast<SwitchInst>(Term)) {
8960 if (!
L->contains(SBB)) {
8965 assert(Exit &&
"Exiting block must have at least one exit");
8966 return computeExitLimitFromSingleExitSwitch(
8967 L, SI, Exit, IsOnlyExit);
8974 const Loop *L,
Value *ExitCond,
bool ExitIfTrue,
bool ControlsOnlyExit,
8975 bool AllowPredicates) {
8976 ScalarEvolution::ExitLimitCacheTy Cache(L, ExitIfTrue, AllowPredicates);
8977 return computeExitLimitFromCondCached(Cache, L, ExitCond, ExitIfTrue,
8978 ControlsOnlyExit, AllowPredicates);
8981std::optional<ScalarEvolution::ExitLimit>
8982ScalarEvolution::ExitLimitCache::find(
const Loop *L,
Value *ExitCond,
8983 bool ExitIfTrue,
bool ControlsOnlyExit,
8984 bool AllowPredicates) {
8986 (void)this->ExitIfTrue;
8987 (void)this->AllowPredicates;
8989 assert(this->L == L && this->ExitIfTrue == ExitIfTrue &&
8990 this->AllowPredicates == AllowPredicates &&
8991 "Variance in assumed invariant key components!");
8992 auto Itr = TripCountMap.find({ExitCond, ControlsOnlyExit});
8993 if (Itr == TripCountMap.end())
8994 return std::nullopt;
8998void ScalarEvolution::ExitLimitCache::insert(
const Loop *L,
Value *ExitCond,
9000 bool ControlsOnlyExit,
9001 bool AllowPredicates,
9002 const ExitLimit &EL) {
9003 assert(this->L == L && this->ExitIfTrue == ExitIfTrue &&
9004 this->AllowPredicates == AllowPredicates &&
9005 "Variance in assumed invariant key components!");
9007 auto InsertResult = TripCountMap.insert({{ExitCond, ControlsOnlyExit}, EL});
9008 assert(InsertResult.second &&
"Expected successful insertion!");
9014 ExitLimitCacheTy &Cache,
const Loop *L,
Value *ExitCond,
bool ExitIfTrue,
9015 bool ControlsOnlyExit,
bool AllowPredicates) {
9017 if (
auto MaybeEL = Cache.find(L, ExitCond, ExitIfTrue, ControlsOnlyExit,
9021 ExitLimit EL = computeExitLimitFromCondImpl(
9022 Cache, L, ExitCond, ExitIfTrue, ControlsOnlyExit, AllowPredicates);
9023 Cache.insert(L, ExitCond, ExitIfTrue, ControlsOnlyExit, AllowPredicates, EL);
9028 ExitLimitCacheTy &Cache,
const Loop *L,
Value *ExitCond,
bool ExitIfTrue,
9029 bool ControlsOnlyExit,
bool AllowPredicates) {
9031 if (
auto LimitFromBinOp = computeExitLimitFromCondFromBinOp(
9032 Cache, L, ExitCond, ExitIfTrue, ControlsOnlyExit, AllowPredicates))
9033 return *LimitFromBinOp;
9037 if (
ICmpInst *ExitCondICmp = dyn_cast<ICmpInst>(ExitCond)) {
9039 computeExitLimitFromICmp(L, ExitCondICmp, ExitIfTrue, ControlsOnlyExit);
9040 if (EL.hasFullInfo() || !AllowPredicates)
9044 return computeExitLimitFromICmp(L, ExitCondICmp, ExitIfTrue,
9053 if (
ConstantInt *CI = dyn_cast<ConstantInt>(ExitCond)) {
9079 auto EL = computeExitLimitFromICmp(L, Pred, LHS,
getConstant(NewRHSC),
9080 ControlsOnlyExit, AllowPredicates);
9081 if (EL.hasAnyInfo())
9086 return computeExitCountExhaustively(L, ExitCond, ExitIfTrue);
9089std::optional<ScalarEvolution::ExitLimit>
9090ScalarEvolution::computeExitLimitFromCondFromBinOp(
9091 ExitLimitCacheTy &Cache,
const Loop *L,
Value *ExitCond,
bool ExitIfTrue,
9092 bool ControlsOnlyExit,
bool AllowPredicates) {
9101 return std::nullopt;
9106 bool EitherMayExit = IsAnd ^ ExitIfTrue;
9107 ExitLimit EL0 = computeExitLimitFromCondCached(
9108 Cache, L, Op0, ExitIfTrue, ControlsOnlyExit && !EitherMayExit,
9110 ExitLimit EL1 = computeExitLimitFromCondCached(
9111 Cache, L, Op1, ExitIfTrue, ControlsOnlyExit && !EitherMayExit,
9115 const Constant *NeutralElement = ConstantInt::get(ExitCond->
getType(), IsAnd);
9116 if (isa<ConstantInt>(Op1))
9117 return Op1 == NeutralElement ? EL0 : EL1;
9118 if (isa<ConstantInt>(Op0))
9119 return Op0 == NeutralElement ? EL1 : EL0;
9124 if (EitherMayExit) {
9125 bool UseSequentialUMin = !isa<BinaryOperator>(ExitCond);
9134 ConstantMaxBECount = EL1.ConstantMaxNotTaken;
9136 ConstantMaxBECount = EL0.ConstantMaxNotTaken;
9139 EL1.ConstantMaxNotTaken);
9141 SymbolicMaxBECount = EL1.SymbolicMaxNotTaken;
9143 SymbolicMaxBECount = EL0.SymbolicMaxNotTaken;
9146 EL0.SymbolicMaxNotTaken, EL1.SymbolicMaxNotTaken, UseSequentialUMin);
9150 if (EL0.ExactNotTaken == EL1.ExactNotTaken)
9151 BECount = EL0.ExactNotTaken;
9160 if (isa<SCEVCouldNotCompute>(ConstantMaxBECount) &&
9161 !isa<SCEVCouldNotCompute>(BECount))
9163 if (isa<SCEVCouldNotCompute>(SymbolicMaxBECount))
9164 SymbolicMaxBECount =
9165 isa<SCEVCouldNotCompute>(BECount) ? ConstantMaxBECount : BECount;
9166 return ExitLimit(BECount, ConstantMaxBECount, SymbolicMaxBECount,
false,
9171 const Loop *L,
ICmpInst *ExitCond,
bool ExitIfTrue,
bool ControlsOnlyExit,
9172 bool AllowPredicates) {
9184 ExitLimit EL = computeExitLimitFromICmp(L, Pred, LHS, RHS, ControlsOnlyExit,
9186 if (EL.hasAnyInfo())
9189 auto *ExhaustiveCount =
9190 computeExitCountExhaustively(L, ExitCond, ExitIfTrue);
9192 if (!isa<SCEVCouldNotCompute>(ExhaustiveCount))
9193 return ExhaustiveCount;
9195 return computeShiftCompareExitLimit(ExitCond->
getOperand(0),
9200 bool ControlsOnlyExit,
bool AllowPredicates) {
9221 if (
const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS))
9222 if (
const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(LHS))
9229 if (!isa<SCEVCouldNotCompute>(Ret))
return Ret;
9241 auto *InnerLHS =
LHS;
9242 if (
auto *ZExt = dyn_cast<SCEVZeroExtendExpr>(LHS))
9243 InnerLHS = ZExt->getOperand();
9244 if (
const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(InnerLHS);
9280 if (isa<SCEVCouldNotCompute>(LHS))
9285 if (isa<SCEVCouldNotCompute>(RHS))
9288 ExitLimit EL = howFarToZero(
getMinusSCEV(LHS, RHS), L, ControlsOnlyExit,
9290 if (EL.hasAnyInfo())
9298 if (isa<SCEVCouldNotCompute>(LHS))
9303 if (isa<SCEVCouldNotCompute>(RHS))
9306 ExitLimit EL = howFarToNonZero(
getMinusSCEV(LHS, RHS), L);
9307 if (EL.hasAnyInfo())
return EL;
9319 auto *OldType = dyn_cast<IntegerType>(
LHS->
getType());
9339 ExitLimit EL = howManyLessThans(LHS, RHS, L, IsSigned, ControlsOnlyExit,
9341 if (EL.hasAnyInfo())
9357 ExitLimit EL = howManyGreaterThans(LHS, RHS, L, IsSigned, ControlsOnlyExit,
9359 if (EL.hasAnyInfo())
9371ScalarEvolution::computeExitLimitFromSingleExitSwitch(
const Loop *L,
9374 bool ControlsOnlyExit) {
9375 assert(!
L->contains(ExitingBlock) &&
"Not an exiting block!");
9378 if (
Switch->getDefaultDest() == ExitingBlock)
9382 "Default case must not exit the loop!");
9387 ExitLimit EL = howFarToZero(
getMinusSCEV(LHS, RHS), L, ControlsOnlyExit);
9388 if (EL.hasAnyInfo())
9399 assert(isa<SCEVConstant>(Val) &&
9400 "Evaluation of SCEV at constant didn't fold correctly?");
9401 return cast<SCEVConstant>(Val)->getValue();
9414 const BasicBlock *Predecessor =
L->getLoopPredecessor();
9420 auto MatchPositiveShift =
9423 using namespace PatternMatch;
9427 OutOpCode = Instruction::LShr;
9429 OutOpCode = Instruction::AShr;
9431 OutOpCode = Instruction::Shl;
9446 auto MatchShiftRecurrence =
9448 std::optional<Instruction::BinaryOps> PostShiftOpCode;
9463 if (MatchPositiveShift(LHS, V, OpC)) {
9464 PostShiftOpCode = OpC;
9469 PNOut = dyn_cast<PHINode>(LHS);
9470 if (!PNOut || PNOut->getParent() !=
L->getHeader())
9473 Value *BEValue = PNOut->getIncomingValueForBlock(Latch);
9479 MatchPositiveShift(BEValue, OpLHS, OpCodeOut) &&
9486 (!PostShiftOpCode || *PostShiftOpCode == OpCodeOut);
9491 if (!MatchShiftRecurrence(LHS, PN, OpCode))
9508 case Instruction::AShr: {
9514 auto *Ty = cast<IntegerType>(
RHS->
getType());
9516 StableValue = ConstantInt::get(Ty, 0);
9518 StableValue = ConstantInt::get(Ty, -1,
true);
9524 case Instruction::LShr:
9525 case Instruction::Shl:
9528 StableValue = ConstantInt::get(cast<IntegerType>(
RHS->
getType()), 0);
9535 "Otherwise cannot be an operand to a branch instruction");
9537 if (
Result->isZeroValue()) {
9539 const SCEV *UpperBound =
9550 if (isa<BinaryOperator>(
I) || isa<CmpInst>(
I) ||
9551 isa<SelectInst>(
I) || isa<CastInst>(
I) || isa<GetElementPtrInst>(
I) ||
9552 isa<LoadInst>(
I) || isa<ExtractValueInst>(
I))
9555 if (
const CallInst *CI = dyn_cast<CallInst>(
I))
9556 if (
const Function *F = CI->getCalledFunction())
9565 if (!L->contains(
I))
return false;
9567 if (isa<PHINode>(
I)) {
9570 return L->getHeader() ==
I->getParent();
9591 if (isa<Constant>(
Op))
continue;
9596 PHINode *
P = dyn_cast<PHINode>(OpInst);
9627 if (
PHINode *PN = dyn_cast<PHINode>(
I))
9644 if (
Constant *
C = dyn_cast<Constant>(V))
return C;
9646 if (!
I)
return nullptr;
9657 if (isa<PHINode>(
I))
return nullptr;
9659 std::vector<Constant*>
Operands(
I->getNumOperands());
9661 for (
unsigned i = 0, e =
I->getNumOperands(); i != e; ++i) {
9662 Instruction *Operand = dyn_cast<Instruction>(
I->getOperand(i));
9664 Operands[i] = dyn_cast<Constant>(
I->getOperand(i));
9670 if (!
C)
return nullptr;
9692 if (IncomingVal != CurrentVal) {
9695 IncomingVal = CurrentVal;
9707ScalarEvolution::getConstantEvolutionLoopExitValue(
PHINode *PN,
9710 auto [
I,
Inserted] = ConstantEvolutionLoopExitValue.try_emplace(PN);
9721 assert(PN->
getParent() == Header &&
"Can't evaluate PHI not in loop header!");
9729 CurrentIterVals[&
PHI] = StartCST;
9731 if (!CurrentIterVals.
count(PN))
9732 return RetVal =
nullptr;
9738 "BEs is <= MaxBruteForceIterations which is an 'unsigned'!");
9741 unsigned IterationNum = 0;
9743 for (; ; ++IterationNum) {
9744 if (IterationNum == NumIterations)
9745 return RetVal = CurrentIterVals[PN];
9754 NextIterVals[PN] = NextPHI;
9756 bool StoppedEvolving = NextPHI == CurrentIterVals[PN];
9762 for (
const auto &
I : CurrentIterVals) {
9764 if (!
PHI ||
PHI == PN ||
PHI->getParent() != Header)
continue;
9769 for (
const auto &
I : PHIsToCompute) {
9773 Value *BEValue =
PHI->getIncomingValueForBlock(Latch);
9776 if (NextPHI !=
I.second)
9777 StoppedEvolving =
false;
9782 if (StoppedEvolving)
9783 return RetVal = CurrentIterVals[PN];
9785 CurrentIterVals.swap(NextIterVals);
9789const SCEV *ScalarEvolution::computeExitCountExhaustively(
const Loop *L,
9801 assert(PN->
getParent() == Header &&
"Can't evaluate PHI not in loop header!");
9804 assert(Latch &&
"Should follow from NumIncomingValues == 2!");
9808 CurrentIterVals[&
PHI] = StartCST;
9810 if (!CurrentIterVals.
count(PN))
9818 for (
unsigned IterationNum = 0; IterationNum != MaxIterations;++IterationNum){
9819 auto *CondVal = dyn_cast_or_null<ConstantInt>(
9825 if (CondVal->getValue() ==
uint64_t(ExitWhen)) {
9826 ++NumBruteForceTripCountsComputed;
9837 for (
const auto &
I : CurrentIterVals) {
9839 if (!
PHI ||
PHI->getParent() != Header)
continue;
9844 if (NextPHI)
continue;
9846 Value *BEValue =
PHI->getIncomingValueForBlock(Latch);
9849 CurrentIterVals.swap(NextIterVals);
9860 for (
auto &LS : Values)
9862 return LS.second ? LS.second : V;
9867 const SCEV *
C = computeSCEVAtScope(V, L);
9868 for (
auto &LS :
reverse(ValuesAtScopes[V]))
9869 if (LS.first == L) {
9871 if (!isa<SCEVConstant>(
C))
9872 ValuesAtScopesUsers[
C].push_back({L, V});
9883 switch (V->getSCEVType()) {
9889 return cast<SCEVConstant>(V)->getValue();
9891 return dyn_cast<Constant>(cast<SCEVUnknown>(V)->getValue());
9916 assert(!
C->getType()->isPointerTy() &&
9917 "Can only have one pointer, and it must be last");
9944ScalarEvolution::getWithOperands(
const SCEV *S,
9953 auto *AddRec = cast<SCEVAddRecExpr>(S);
9957 return getAddExpr(NewOps, cast<SCEVAddExpr>(S)->getNoWrapFlags());
9959 return getMulExpr(NewOps, cast<SCEVMulExpr>(S)->getNoWrapFlags());
9979const SCEV *ScalarEvolution::computeSCEVAtScope(
const SCEV *V,
const Loop *L) {
9980 switch (
V->getSCEVType()) {
9991 for (
unsigned i = 0, e = AddRec->
getNumOperands(); i != e; ++i) {
10002 for (++i; i !=
e; ++i)
10007 AddRec = dyn_cast<SCEVAddRecExpr>(FoldedRec);
10046 for (
unsigned i = 0, e = Ops.
size(); i != e; ++i) {
10048 if (OpAtScope != Ops[i]) {
10056 for (++i; i !=
e; ++i) {
10061 return getWithOperands(V, NewOps);
10075 if (
PHINode *PN = dyn_cast<PHINode>(
I)) {
10076 const Loop *CurrLoop = this->LI[
I->getParent()];
10087 if (BackedgeTakenCount->
isZero()) {
10088 Value *InitValue =
nullptr;
10089 bool MultipleInitValues =
false;
10095 MultipleInitValues =
true;
10100 if (!MultipleInitValues && InitValue)
10105 if (!isa<SCEVCouldNotCompute>(BackedgeTakenCount) &&
10109 unsigned InLoopPred =
10115 if (
auto *BTCC = dyn_cast<SCEVConstant>(BackedgeTakenCount)) {
10120 getConstantEvolutionLoopExitValue(PN, BTCC->getAPInt(), CurrLoop);
10136 bool MadeImprovement =
false;
10151 MadeImprovement |= OrigV != OpV;
10156 assert(
C->getType() ==
Op->getType() &&
"Type mismatch");
10161 if (!MadeImprovement)
10182const SCEV *ScalarEvolution::stripInjectiveFunctions(
const SCEV *S)
const {
10184 return stripInjectiveFunctions(ZExt->getOperand());
10186 return stripInjectiveFunctions(SExt->getOperand());
10205 assert(
A != 0 &&
"A must be non-zero.");
10241 APInt AD =
A.lshr(Mult2).trunc(BW - Mult2);
10261static std::optional<std::tuple<APInt, APInt, APInt, APInt, unsigned>>
10267 LLVM_DEBUG(
dbgs() << __func__ <<
": analyzing quadratic addrec: "
10268 << *AddRec <<
'\n');
10271 if (!LC || !MC || !
NC) {
10272 LLVM_DEBUG(
dbgs() << __func__ <<
": coefficients are not constant\n");
10273 return std::nullopt;
10279 assert(!
N.isZero() &&
"This is not a quadratic addrec");
10287 N =
N.sext(NewWidth);
10288 M = M.sext(NewWidth);
10289 L = L.sext(NewWidth);
10306 <<
"x + " <<
C <<
", coeff bw: " << NewWidth
10307 <<
", multiplied by " <<
T <<
'\n');
10316 std::optional<APInt>
Y) {
10318 unsigned W = std::max(
X->getBitWidth(),
Y->getBitWidth());
10321 return XW.
slt(YW) ? *
X : *
Y;
10324 return std::nullopt;
10325 return X ? *
X : *
Y;
10342 return std::nullopt;
10343 unsigned W =
X->getBitWidth();
10363static std::optional<APInt>
10369 return std::nullopt;
10372 LLVM_DEBUG(
dbgs() << __func__ <<
": solving for unsigned overflow\n");
10373 std::optional<APInt>
X =
10376 return std::nullopt;
10381 return std::nullopt;
10396static std::optional<APInt>
10400 "Starting value of addrec should be 0");
10401 LLVM_DEBUG(
dbgs() << __func__ <<
": solving boundary crossing for range "
10402 <<
Range <<
", addrec " << *AddRec <<
'\n');
10406 "Addrec's initial value should be in range");
10412 return std::nullopt;
10422 auto SolveForBoundary =
10423 [&](
APInt Bound) -> std::pair<std::optional<APInt>,
bool> {
10426 LLVM_DEBUG(
dbgs() <<
"SolveQuadraticAddRecRange: checking boundary "
10427 << Bound <<
" (before multiplying by " << M <<
")\n");
10430 std::optional<APInt> SO;
10433 "signed overflow\n");
10437 "unsigned overflow\n");
10438 std::optional<APInt> UO =
10441 auto LeavesRange = [&] (
const APInt &
X) {
10458 return {std::nullopt,
false};
10463 if (LeavesRange(*Min))
10464 return { Min,
true };
10465 std::optional<APInt> Max = Min == SO ? UO : SO;
10466 if (LeavesRange(*Max))
10467 return { Max,
true };
10470 return {std::nullopt,
true};
10477 auto SL = SolveForBoundary(
Lower);
10478 auto SU = SolveForBoundary(
Upper);
10481 if (!SL.second || !SU.second)
10482 return std::nullopt;
10527 bool ControlsOnlyExit,
10528 bool AllowPredicates) {
10539 if (
C->getValue()->isZero())
return C;
10544 dyn_cast<SCEVAddRecExpr>(stripInjectiveFunctions(V));
10546 if (!AddRec && AllowPredicates)
10552 if (!AddRec || AddRec->
getLoop() != L)
10563 return ExitLimit(R, R, R,
false, Predicates);
10628 return ExitLimit(Distance,
getConstant(MaxBECount), Distance,
false,
10654 const SCEV *SymbolicMax =
10655 isa<SCEVCouldNotCompute>(
Exact) ? ConstantMax :
Exact;
10656 return ExitLimit(
Exact, ConstantMax, SymbolicMax,
false, Predicates);
10660 const SCEVConstant *StepC = dyn_cast<SCEVConstant>(Step);
10665 AllowPredicates ? &Predicates :
nullptr, *
this);
10672 auto *S = isa<SCEVCouldNotCompute>(E) ?
M : E;
10673 return ExitLimit(E, M, S,
false, Predicates);
10677ScalarEvolution::howFarToNonZero(
const SCEV *V,
const Loop *L) {
10685 if (!
C->getValue()->isZero())
10695std::pair<const BasicBlock *, const BasicBlock *>
10696ScalarEvolution::getPredecessorWithUniqueSuccessorForBB(
const BasicBlock *BB)
10708 return {
L->getLoopPredecessor(),
L->getHeader()};
10710 return {
nullptr, BB};
10719 if (
A ==
B)
return true;
10725 return A->isIdenticalTo(
B) && (isa<BinaryOperator>(
A) || isa<GetElementPtrInst>(
A));
10732 if (
const Instruction *AI = dyn_cast<Instruction>(AU->getValue()))
10733 if (
const Instruction *BI = dyn_cast<Instruction>(BU->getValue()))
10734 if (ComputesEqualValues(AI, BI))
10743 if (!
Add ||
Add->getNumOperands() != 2)
10745 if (
auto *ME = dyn_cast<SCEVMulExpr>(
Add->getOperand(0));
10746 ME && ME->getNumOperands() == 2 && ME->getOperand(0)->isAllOnesValue()) {
10747 LHS =
Add->getOperand(1);
10748 RHS = ME->getOperand(1);
10751 if (
auto *ME = dyn_cast<SCEVMulExpr>(
Add->getOperand(1));
10752 ME && ME->getNumOperands() == 2 && ME->getOperand(0)->isAllOnesValue()) {
10753 LHS =
Add->getOperand(0);
10754 RHS = ME->getOperand(1);
10762 bool Changed =
false;
10765 auto TrivialCase = [&](
bool TriviallyTrue) {
10779 return TrivialCase(
false);
10780 return TrivialCase(
true);
10803 const APInt &
RA = RC->getAPInt();
10805 bool SimplifiedByConstantRange =
false;
10810 return TrivialCase(
true);
10812 return TrivialCase(
false);
10821 Changed = SimplifiedByConstantRange =
true;
10825 if (!SimplifiedByConstantRange) {
10842 assert(!
RA.isMinValue() &&
"Should have been caught earlier!");
10848 assert(!
RA.isMaxValue() &&
"Should have been caught earlier!");
10854 assert(!
RA.isMinSignedValue() &&
"Should have been caught earlier!");
10860 assert(!
RA.isMaxSignedValue() &&
"Should have been caught earlier!");
10872 return TrivialCase(
true);
10874 return TrivialCase(
false);
10963 if (
const auto *SExt = dyn_cast<SCEVSignExtendExpr>(S))
10970 auto NonRecursive = [
this, OrNegative](
const SCEV *S) {
10971 if (
auto *
C = dyn_cast<SCEVConstant>(S))
10972 return C->getAPInt().isPowerOf2() ||
10973 (OrNegative &&
C->getAPInt().isNegatedPowerOf2());
10976 return isa<SCEVVScale>(S) && F.
hasFnAttribute(Attribute::VScaleRange);
10979 if (NonRecursive(S))
10982 auto *
Mul = dyn_cast<SCEVMulExpr>(S);
10988std::pair<const SCEV *, const SCEV *>
10991 const SCEV *Start = SCEVInitRewriter::rewrite(S, L, *
this);
10993 return { Start, Start };
10995 const SCEV *
PostInc = SCEVPostIncRewriter::rewrite(S, L, *
this);
11004 getUsedLoops(
LHS, LoopsUsed);
11005 getUsedLoops(
RHS, LoopsUsed);
11007 if (LoopsUsed.
empty())
11012 for (
const auto *L1 : LoopsUsed)
11013 for (
const auto *L2 : LoopsUsed)
11015 DT.
dominates(L2->getHeader(), L1->getHeader())) &&
11016 "Domination relationship is not a linear order");
11046 SplitRHS.second) &&
11058 if (isKnownPredicateViaSplitting(Pred,
LHS,
RHS))
11062 return isKnownViaNonRecursiveReasoning(Pred,
LHS,
RHS);
11072 return std::nullopt;
11087 if (KnownWithoutContext)
11088 return KnownWithoutContext;
11095 return std::nullopt;
11101 const Loop *L =
LHS->getLoop();
11106std::optional<ScalarEvolution::MonotonicPredicateType>
11109 auto Result = getMonotonicPredicateTypeImpl(
LHS, Pred);
11115 auto ResultSwapped =
11118 assert(*ResultSwapped != *Result &&
11119 "monotonicity should flip as we flip the predicate");
11126std::optional<ScalarEvolution::MonotonicPredicateType>
11127ScalarEvolution::getMonotonicPredicateTypeImpl(
const SCEVAddRecExpr *LHS,
11141 return std::nullopt;
11145 "Should be greater or less!");
11149 if (!
LHS->hasNoUnsignedWrap())
11150 return std::nullopt;
11154 "Relational predicate is either signed or unsigned!");
11155 if (!
LHS->hasNoSignedWrap())
11156 return std::nullopt;
11158 const SCEV *Step =
LHS->getStepRecurrence(*
this);
11166 return std::nullopt;
11169std::optional<ScalarEvolution::LoopInvariantPredicate>
11177 return std::nullopt;
11184 if (!ArLHS || ArLHS->
getLoop() != L)
11185 return std::nullopt;
11189 return std::nullopt;
11215 return std::nullopt;
11252 return std::nullopt;
11255std::optional<ScalarEvolution::LoopInvariantPredicate>
11260 Pred,
LHS,
RHS, L, CtxI, MaxIter))
11262 if (
auto *
UMin = dyn_cast<SCEVUMinExpr>(MaxIter))
11268 for (
auto *
Op :
UMin->operands())
11272 return std::nullopt;
11275std::optional<ScalarEvolution::LoopInvariantPredicate>
11290 return std::nullopt;
11296 auto *AR = dyn_cast<SCEVAddRecExpr>(
LHS);
11297 if (!AR || AR->
getLoop() != L)
11298 return std::nullopt;
11302 return std::nullopt;
11308 if (Step != One && Step != MinusOne)
11309 return std::nullopt;
11315 return std::nullopt;
11321 return std::nullopt;
11329 if (Step == MinusOne)
11333 return std::nullopt;
11339bool ScalarEvolution::isKnownPredicateViaConstantRanges(
CmpPredicate Pred,
11350 return RangeLHS.
icmp(Pred, RangeRHS);
11361 if (CheckRanges(SL, SR))
11365 if (CheckRanges(UL, UR))
11374 return CheckRanges(SL, SR);
11379 return CheckRanges(UL, UR);
11382bool ScalarEvolution::isKnownPredicateViaNoOverflow(
CmpPredicate Pred,
11389 auto MatchBinaryAddToConst = [
this](
const SCEV *
X,
const SCEV *
Y,
11392 const SCEV *XNonConstOp, *XConstOp;
11393 const SCEV *YNonConstOp, *YConstOp;
11397 if (!splitBinaryAdd(
X, XConstOp, XNonConstOp, XFlagsPresent)) {
11400 XFlagsPresent = ExpectedFlags;
11402 if (!isa<SCEVConstant>(XConstOp) ||
11403 (XFlagsPresent & ExpectedFlags) != ExpectedFlags)
11406 if (!splitBinaryAdd(
Y, YConstOp, YNonConstOp, YFlagsPresent)) {
11409 YFlagsPresent = ExpectedFlags;
11412 if (!isa<SCEVConstant>(YConstOp) ||
11413 (YFlagsPresent & ExpectedFlags) != ExpectedFlags)
11416 if (YNonConstOp != XNonConstOp)
11419 OutC1 = cast<SCEVConstant>(XConstOp)->getAPInt();
11420 OutC2 = cast<SCEVConstant>(YConstOp)->getAPInt();
11475bool ScalarEvolution::isKnownPredicateViaSplitting(
CmpPredicate Pred,
11498 const SCEV *LHS,
const SCEV *RHS) {
11507 return match(&
I, m_Intrinsic<Intrinsic::experimental_guard>(
11509 isImpliedCond(Pred, LHS, RHS, Condition,
false);
11528 "This cannot be done on broken IR!");
11531 if (isKnownViaNonRecursiveReasoning(Pred,
LHS,
RHS))
11540 if (LoopContinuePredicate && LoopContinuePredicate->
isConditional() &&
11541 isImpliedCond(Pred,
LHS,
RHS,
11543 LoopContinuePredicate->
getSuccessor(0) != L->getHeader()))
11548 if (WalkingBEDominatingConds)
11554 const auto &BETakenInfo = getBackedgeTakenInfo(L);
11555 const SCEV *LatchBECount = BETakenInfo.getExact(Latch,
this);
11562 const SCEV *LoopCounter =
11573 auto *CI = cast<CallInst>(AssumeVH);
11577 if (isImpliedCond(Pred,
LHS,
RHS, CI->getArgOperand(0),
false))
11581 if (isImpliedViaGuard(Latch, Pred,
LHS,
RHS))
11584 for (
DomTreeNode *DTN = DT[Latch], *HeaderDTN = DT[L->getHeader()];
11585 DTN != HeaderDTN; DTN = DTN->getIDom()) {
11586 assert(DTN &&
"should reach the loop header before reaching the root!");
11589 if (isImpliedViaGuard(BB, Pred,
LHS,
RHS))
11597 if (!ContinuePredicate || !ContinuePredicate->
isConditional())
11613 if (isImpliedCond(Pred,
LHS,
RHS, Condition,
11631 "This cannot be done on broken IR!");
11639 const bool ProvingStrictComparison = (Pred != NonStrictPredicate);
11640 bool ProvedNonStrictComparison =
false;
11641 bool ProvedNonEquality =
false;
11644 if (!ProvedNonStrictComparison)
11645 ProvedNonStrictComparison = Fn(NonStrictPredicate);
11646 if (!ProvedNonEquality)
11648 if (ProvedNonStrictComparison && ProvedNonEquality)
11653 if (ProvingStrictComparison) {
11655 return isKnownViaNonRecursiveReasoning(
P,
LHS,
RHS);
11657 if (SplitAndProve(ProofFn))
11662 auto ProveViaCond = [&](
const Value *Condition,
bool Inverse) {
11664 if (isImpliedCond(Pred,
LHS,
RHS, Condition,
Inverse, CtxI))
11666 if (ProvingStrictComparison) {
11670 if (SplitAndProve(ProofFn))
11681 if (ContainingLoop && ContainingLoop->
getHeader() == BB)
11685 for (std::pair<const BasicBlock *, const BasicBlock *> Pair(PredBB, BB);
11686 Pair.first; Pair = getPredecessorWithUniqueSuccessorForBB(Pair.first)) {
11688 dyn_cast<BranchInst>(Pair.first->getTerminator());
11701 auto *CI = cast<CallInst>(AssumeVH);
11705 if (ProveViaCond(CI->getArgOperand(0),
false))
11711 F.
getParent(), Intrinsic::experimental_guard);
11713 for (
const auto *GU : GuardDecl->users())
11714 if (
const auto *Guard = dyn_cast<IntrinsicInst>(GU))
11716 if (ProveViaCond(Guard->getArgOperand(0),
false))
11731 "LHS is not available at Loop Entry");
11733 "RHS is not available at Loop Entry");
11735 if (isKnownViaNonRecursiveReasoning(Pred,
LHS,
RHS))
11746 if (FoundCondValue ==
11750 if (!PendingLoopPredicates.insert(FoundCondValue).second)
11754 make_scope_exit([&]() { PendingLoopPredicates.erase(FoundCondValue); });
11757 const Value *Op0, *Op1;
11760 return isImpliedCond(Pred, LHS, RHS, Op0,
Inverse, CtxI) ||
11761 isImpliedCond(Pred, LHS, RHS, Op1,
Inverse, CtxI);
11764 return isImpliedCond(Pred, LHS, RHS, Op0,
Inverse, CtxI) ||
11765 isImpliedCond(Pred, LHS, RHS, Op1,
Inverse, CtxI);
11768 const ICmpInst *ICI = dyn_cast<ICmpInst>(FoundCondValue);
11769 if (!ICI)
return false;
11782 return isImpliedCond(Pred, LHS, RHS, FoundPred, FoundLHS, FoundRHS, CtxI);
11787 const SCEV *FoundLHS,
const SCEV *FoundRHS,
11798 auto *WideType = FoundLHS->
getType();
11808 if (isImpliedCondBalancedTypes(Pred, LHS, RHS, FoundPred, TruncFoundLHS,
11809 TruncFoundRHS, CtxI))
11835 return isImpliedCondBalancedTypes(Pred, LHS, RHS, FoundPred, FoundLHS,
11839bool ScalarEvolution::isImpliedCondBalancedTypes(
11844 "Types should be balanced!");
11851 if (FoundLHS == FoundRHS)
11855 if (LHS == FoundRHS || RHS == FoundLHS) {
11856 if (isa<SCEVConstant>(RHS)) {
11868 return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS, CtxI);
11884 if (!isa<SCEVConstant>(RHS) && !isa<SCEVAddRecExpr>(LHS))
11885 return isImpliedCondOperands(FoundPred, RHS, LHS, FoundLHS, FoundRHS,
11887 if (!isa<SCEVConstant>(FoundRHS) && !isa<SCEVAddRecExpr>(FoundLHS))
11888 return isImpliedCondOperands(Pred, LHS, RHS, FoundRHS, FoundLHS, CtxI);
11895 FoundLHS, FoundRHS, CtxI))
11900 isImpliedCondOperands(Pred, LHS, RHS,
getNotSCEV(FoundLHS),
11909 assert(P1 != P2 &&
"Handled earlier!");
11913 if (IsSignFlippedPredicate(Pred, FoundPred)) {
11918 return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS, CtxI);
11921 CmpPredicate CanonicalPred = Pred, CanonicalFoundPred = FoundPred;
11922 const SCEV *CanonicalLHS =
LHS, *CanonicalRHS =
RHS,
11923 *CanonicalFoundLHS = FoundLHS, *CanonicalFoundRHS = FoundRHS;
11928 std::swap(CanonicalFoundLHS, CanonicalFoundRHS);
11939 return isImpliedCondOperands(CanonicalFoundPred, CanonicalLHS,
11940 CanonicalRHS, CanonicalFoundLHS,
11941 CanonicalFoundRHS);
11946 return isImpliedCondOperands(CanonicalFoundPred, CanonicalLHS,
11947 CanonicalRHS, CanonicalFoundLHS,
11948 CanonicalFoundRHS);
11953 (isa<SCEVConstant>(FoundLHS) || isa<SCEVConstant>(FoundRHS))) {
11956 const SCEV *
V =
nullptr;
11958 if (isa<SCEVConstant>(FoundLHS)) {
11959 C = cast<SCEVConstant>(FoundLHS);
11962 C = cast<SCEVConstant>(FoundRHS);
11974 if (Min ==
C->getAPInt()) {
11979 APInt SharperMin = Min + 1;
11986 if (isImpliedCondOperands(Pred, LHS, RHS, V,
getConstant(SharperMin),
12002 if (isImpliedCondOperands(Pred, LHS, RHS, V,
getConstant(Min), CtxI))
12031 if (isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS, CtxI))
12035 if (isImpliedCondOperands(FoundPred, LHS, RHS, FoundLHS, FoundRHS, CtxI))
12038 if (isImpliedCondOperandsViaRanges(Pred, LHS, RHS, FoundPred, FoundLHS, FoundRHS))
12045bool ScalarEvolution::splitBinaryAdd(
const SCEV *Expr,
12048 const auto *AE = dyn_cast<SCEVAddExpr>(Expr);
12049 if (!AE || AE->getNumOperands() != 2)
12052 L = AE->getOperand(0);
12053 R = AE->getOperand(1);
12054 Flags = AE->getNoWrapFlags();
12058std::optional<APInt>
12065 APInt DiffMul(BW, 1);
12068 for (
unsigned I = 0;
I < 8; ++
I) {
12073 if (isa<SCEVAddRecExpr>(
Less) && isa<SCEVAddRecExpr>(More)) {
12074 const auto *LAR = cast<SCEVAddRecExpr>(
Less);
12075 const auto *MAR = cast<SCEVAddRecExpr>(More);
12077 if (LAR->getLoop() != MAR->getLoop())
12078 return std::nullopt;
12082 if (!LAR->isAffine() || !MAR->isAffine())
12083 return std::nullopt;
12085 if (LAR->getStepRecurrence(*
this) != MAR->getStepRecurrence(*
this))
12086 return std::nullopt;
12088 Less = LAR->getStart();
12089 More = MAR->getStart();
12094 auto MatchConstMul =
12095 [](
const SCEV *S) -> std::optional<std::pair<const SCEV *, APInt>> {
12096 auto *M = dyn_cast<SCEVMulExpr>(S);
12097 if (!M || M->getNumOperands() != 2 ||
12098 !isa<SCEVConstant>(M->getOperand(0)))
12099 return std::nullopt;
12101 {M->getOperand(1), cast<SCEVConstant>(M->getOperand(0))->getAPInt()}};
12103 if (
auto MatchedMore = MatchConstMul(More)) {
12104 if (
auto MatchedLess = MatchConstMul(
Less)) {
12105 if (MatchedMore->second == MatchedLess->second) {
12106 More = MatchedMore->first;
12107 Less = MatchedLess->first;
12108 DiffMul *= MatchedMore->second;
12117 if (
auto *
C = dyn_cast<SCEVConstant>(S)) {
12119 Diff +=
C->getAPInt() * DiffMul;
12122 Diff -=
C->getAPInt() * DiffMul;
12125 Multiplicity[S] +=
Mul;
12127 auto Decompose = [&](
const SCEV *S,
int Mul) {
12128 if (isa<SCEVAddExpr>(S)) {
12134 Decompose(More, 1);
12135 Decompose(
Less, -1);
12139 const SCEV *NewMore =
nullptr, *NewLess =
nullptr;
12140 for (
const auto &[S,
Mul] : Multiplicity) {
12145 return std::nullopt;
12147 }
else if (
Mul == -1) {
12149 return std::nullopt;
12152 return std::nullopt;
12156 if (NewMore == More || NewLess ==
Less)
12157 return std::nullopt;
12163 if (!More && !
Less)
12167 if (!More || !
Less)
12168 return std::nullopt;
12172 return std::nullopt;
12175bool ScalarEvolution::isImpliedCondOperandsViaAddRecStart(
12195 if (
auto *AR = dyn_cast<SCEVAddRecExpr>(FoundLHS)) {
12199 if (!L->contains(ContextBB) || !DT.
dominates(ContextBB, L->getLoopLatch()))
12203 return isImpliedCondOperands(Pred, LHS, RHS, AR->
getStart(), FoundRHS);
12206 if (
auto *AR = dyn_cast<SCEVAddRecExpr>(FoundRHS)) {
12210 if (!L->contains(ContextBB) || !DT.
dominates(ContextBB, L->getLoopLatch()))
12214 return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, AR->
getStart());
12220bool ScalarEvolution::isImpliedCondOperandsViaNoOverflow(
CmpPredicate Pred,
12223 const SCEV *FoundLHS,
12224 const SCEV *FoundRHS) {
12228 const auto *AddRecLHS = dyn_cast<SCEVAddRecExpr>(LHS);
12232 const auto *AddRecFoundLHS = dyn_cast<SCEVAddRecExpr>(FoundLHS);
12233 if (!AddRecFoundLHS)
12240 const Loop *
L = AddRecFoundLHS->getLoop();
12241 if (L != AddRecLHS->getLoop())
12280 if (!RDiff || *LDiff != *RDiff)
12283 if (LDiff->isMinValue())
12286 APInt FoundRHSLimit;
12289 FoundRHSLimit = -(*RDiff);
12301bool ScalarEvolution::isImpliedViaMerge(
CmpPredicate Pred,
const SCEV *LHS,
12302 const SCEV *RHS,
const SCEV *FoundLHS,
12303 const SCEV *FoundRHS,
unsigned Depth) {
12304 const PHINode *LPhi =
nullptr, *RPhi =
nullptr;
12308 bool Erased = PendingMerges.erase(LPhi);
12309 assert(Erased &&
"Failed to erase LPhi!");
12313 bool Erased = PendingMerges.erase(RPhi);
12314 assert(Erased &&
"Failed to erase RPhi!");
12320 if (
const SCEVUnknown *LU = dyn_cast<SCEVUnknown>(LHS))
12321 if (
auto *Phi = dyn_cast<PHINode>(LU->getValue())) {
12322 if (!PendingMerges.insert(Phi).second)
12326 if (
const SCEVUnknown *RU = dyn_cast<SCEVUnknown>(RHS))
12327 if (
auto *Phi = dyn_cast<PHINode>(RU->getValue())) {
12336 if (!PendingMerges.insert(Phi).second)
12342 if (!LPhi && !RPhi)
12353 assert(LPhi &&
"LPhi should definitely be a SCEVUnknown Phi!");
12357 auto ProvedEasily = [&](
const SCEV *
S1,
const SCEV *S2) {
12358 return isKnownViaNonRecursiveReasoning(Pred,
S1, S2) ||
12359 isImpliedCondOperandsViaRanges(Pred,
S1, S2, Pred, FoundLHS, FoundRHS) ||
12360 isImpliedViaOperations(Pred,
S1, S2, FoundLHS, FoundRHS,
Depth);
12363 if (RPhi && RPhi->getParent() == LBB) {
12370 const SCEV *
R =
getSCEV(RPhi->getIncomingValueForBlock(IncBB));
12371 if (!ProvedEasily(L, R))
12382 auto *RLoop = RAR->
getLoop();
12383 auto *Predecessor = RLoop->getLoopPredecessor();
12384 assert(Predecessor &&
"Loop with AddRec with no predecessor?");
12386 if (!ProvedEasily(L1, RAR->
getStart()))
12388 auto *Latch = RLoop->getLoopLatch();
12389 assert(Latch &&
"Loop with AddRec with no latch?");
12407 if (!ProvedEasily(L, RHS))
12414bool ScalarEvolution::isImpliedCondOperandsViaShift(
CmpPredicate Pred,
12417 const SCEV *FoundLHS,
12418 const SCEV *FoundRHS) {
12421 if (RHS == FoundRHS) {
12426 if (LHS != FoundLHS)
12429 auto *SUFoundRHS = dyn_cast<SCEVUnknown>(FoundRHS);
12433 Value *Shiftee, *ShiftValue;
12435 using namespace PatternMatch;
12436 if (
match(SUFoundRHS->getValue(),
12438 auto *ShifteeS =
getSCEV(Shiftee);
12456bool ScalarEvolution::isImpliedCondOperands(
CmpPredicate Pred,
const SCEV *LHS,
12458 const SCEV *FoundLHS,
12459 const SCEV *FoundRHS,
12461 if (isImpliedCondOperandsViaRanges(Pred, LHS, RHS, Pred, FoundLHS, FoundRHS))
12464 if (isImpliedCondOperandsViaNoOverflow(Pred, LHS, RHS, FoundLHS, FoundRHS))
12467 if (isImpliedCondOperandsViaShift(Pred, LHS, RHS, FoundLHS, FoundRHS))
12470 if (isImpliedCondOperandsViaAddRecStart(Pred, LHS, RHS, FoundLHS, FoundRHS,
12474 return isImpliedCondOperandsHelper(Pred, LHS, RHS,
12475 FoundLHS, FoundRHS);
12479template <
typename MinMaxExprType>
12481 const SCEV *Candidate) {
12482 const MinMaxExprType *MinMaxExpr = dyn_cast<MinMaxExprType>(MaybeMinMaxExpr);
12486 return is_contained(MinMaxExpr->operands(), Candidate);
12524 const SCEV *LHS,
const SCEV *RHS) {
12535 IsMinMaxConsistingOf<SCEVSMinExpr>(
LHS,
RHS) ||
12537 IsMinMaxConsistingOf<SCEVSMaxExpr>(
RHS,
LHS);
12546 IsMinMaxConsistingOf<SCEVUMinExpr>(
LHS,
RHS) ||
12548 IsMinMaxConsistingOf<SCEVUMaxExpr>(
RHS,
LHS);
12554bool ScalarEvolution::isImpliedViaOperations(
CmpPredicate Pred,
const SCEV *LHS,
12556 const SCEV *FoundLHS,
12557 const SCEV *FoundRHS,
12561 "LHS and RHS have different sizes?");
12564 "FoundLHS and FoundRHS have different sizes?");
12596 auto GetOpFromSExt = [&](
const SCEV *S) {
12597 if (
auto *Ext = dyn_cast<SCEVSignExtendExpr>(S))
12598 return Ext->getOperand();
12605 auto *OrigLHS =
LHS;
12606 auto *OrigFoundLHS = FoundLHS;
12607 LHS = GetOpFromSExt(LHS);
12608 FoundLHS = GetOpFromSExt(FoundLHS);
12611 auto IsSGTViaContext = [&](
const SCEV *
S1,
const SCEV *S2) {
12614 FoundRHS,
Depth + 1);
12617 if (
auto *LHSAddExpr = dyn_cast<SCEVAddExpr>(LHS)) {
12627 if (!LHSAddExpr->hasNoSignedWrap())
12630 auto *LL = LHSAddExpr->getOperand(0);
12631 auto *LR = LHSAddExpr->getOperand(1);
12635 auto IsSumGreaterThanRHS = [&](
const SCEV *
S1,
const SCEV *S2) {
12636 return IsSGTViaContext(
S1, MinusOne) && IsSGTViaContext(S2, RHS);
12641 if (IsSumGreaterThanRHS(LL, LR) || IsSumGreaterThanRHS(LR, LL))
12643 }
else if (
auto *LHSUnknownExpr = dyn_cast<SCEVUnknown>(LHS)) {
12658 if (!isa<ConstantInt>(LR))
12661 auto *Denominator = cast<SCEVConstant>(
getSCEV(LR));
12666 if (!Numerator || Numerator->getType() != FoundLHS->
getType())
12674 auto *DTy = Denominator->getType();
12675 auto *FRHSTy = FoundRHS->
getType();
12676 if (DTy->isPointerTy() != FRHSTy->isPointerTy())
12695 IsSGTViaContext(FoundRHSExt, DenomMinusTwo))
12706 auto *NegDenomMinusOne =
getMinusSCEV(MinusOne, DenominatorExt);
12708 IsSGTViaContext(FoundRHSExt, NegDenomMinusOne))
12716 if (isImpliedViaMerge(Pred, OrigLHS, RHS, OrigFoundLHS, FoundRHS,
Depth + 1))
12749bool ScalarEvolution::isKnownViaNonRecursiveReasoning(
CmpPredicate Pred,
12753 isKnownPredicateViaConstantRanges(Pred, LHS, RHS) ||
12756 isKnownPredicateViaNoOverflow(Pred, LHS, RHS);
12759bool ScalarEvolution::isImpliedCondOperandsHelper(
CmpPredicate Pred,
12762 const SCEV *FoundLHS,
12763 const SCEV *FoundRHS) {
12799 if (isImpliedViaOperations(Pred, LHS, RHS, FoundLHS, FoundRHS))
12805bool ScalarEvolution::isImpliedCondOperandsViaRanges(
12807 const SCEV *FoundLHS,
const SCEV *FoundRHS) {
12808 if (!isa<SCEVConstant>(RHS) || !isa<SCEVConstant>(FoundRHS))
12817 const APInt &ConstFoundRHS = cast<SCEVConstant>(FoundRHS)->getAPInt();
12829 const APInt &ConstRHS = cast<SCEVConstant>(RHS)->getAPInt();
12832 return LHSRange.
icmp(Pred, ConstRHS);
12835bool ScalarEvolution::canIVOverflowOnLT(
const SCEV *RHS,
const SCEV *Stride,
12848 return (std::move(MaxValue) - MaxStrideMinusOne).slt(MaxRHS);
12856 return (std::move(MaxValue) - MaxStrideMinusOne).ult(MaxRHS);
12859bool ScalarEvolution::canIVOverflowOnGT(
const SCEV *RHS,
const SCEV *Stride,
12871 return (std::move(MinValue) + MaxStrideMinusOne).sgt(MinRHS);
12879 return (std::move(MinValue) + MaxStrideMinusOne).ugt(MinRHS);
12891const SCEV *ScalarEvolution::computeMaxBECountForLT(
const SCEV *Start,
12892 const SCEV *Stride,
12919 : APIntOps::umax(One, MinStride);
12923 APInt Limit = MaxValue - (StrideForMaxBECount - 1);
12934 : APIntOps::umax(MaxEnd, MinStart);
12941ScalarEvolution::howManyLessThans(
const SCEV *LHS,
const SCEV *RHS,
12942 const Loop *L,
bool IsSigned,
12943 bool ControlsOnlyExit,
bool AllowPredicates) {
12947 bool PredicatedIV =
false;
12949 if (
auto *ZExt = dyn_cast<SCEVZeroExtendExpr>(LHS)) {
12950 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(ZExt->getOperand());
12952 auto canProveNUW = [&]() {
12955 if (!ControlsOnlyExit)
12976 Limit = Limit.
zext(OuterBitWidth);
12988 Type *Ty = ZExt->getType();
12990 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty,
this, 0),
12992 IV = dyn_cast<SCEVAddRecExpr>(S);
12999 if (!
IV && AllowPredicates) {
13004 PredicatedIV =
true;
13008 if (!
IV ||
IV->getLoop() != L || !
IV->isAffine())
13022 bool NoWrap = ControlsOnlyExit &&
IV->getNoWrapFlags(WrapType);
13025 const SCEV *Stride =
IV->getStepRecurrence(*
this);
13030 if (!PositiveStride) {
13082 auto wouldZeroStrideBeUB = [&]() {
13094 if (!wouldZeroStrideBeUB()) {
13098 }
else if (!NoWrap) {
13101 if (canIVOverflowOnLT(RHS, Stride, IsSigned))
13114 const SCEV *Start =
IV->getStart();
13120 const SCEV *OrigStart = Start;
13122 if (Start->getType()->isPointerTy()) {
13124 if (isa<SCEVCouldNotCompute>(Start))
13129 if (isa<SCEVCouldNotCompute>(RHS))
13133 const SCEV *
End =
nullptr, *BECount =
nullptr,
13134 *BECountIfBackedgeTaken =
nullptr;
13136 const auto *RHSAddRec = dyn_cast<SCEVAddRecExpr>(RHS);
13137 if (PositiveStride && RHSAddRec !=
nullptr && RHSAddRec->getLoop() == L &&
13138 RHSAddRec->getNoWrapFlags()) {
13151 const SCEV *RHSStart = RHSAddRec->getStart();
13152 const SCEV *RHSStride = RHSAddRec->getStepRecurrence(*
this);
13173 BECountIfBackedgeTaken =
13178 if (BECount ==
nullptr) {
13183 const SCEV *MaxBECount = computeMaxBECountForLT(
13186 MaxBECount,
false , Predicates);
13193 auto *OrigStartMinusStride =
getMinusSCEV(OrigStart, Stride);
13220 const SCEV *Numerator =
13226 auto canProveRHSGreaterThanEqualStart = [&]() {
13245 auto *StartMinusOne =
13252 if (canProveRHSGreaterThanEqualStart()) {
13267 BECountIfBackedgeTaken =
13283 bool MayAddOverflow = [&] {
13329 if (Start == Stride || Start ==
getMinusSCEV(Stride, One)) {
13343 if (!MayAddOverflow) {
13355 const SCEV *ConstantMaxBECount;
13356 bool MaxOrZero =
false;
13357 if (isa<SCEVConstant>(BECount)) {
13358 ConstantMaxBECount = BECount;
13359 }
else if (BECountIfBackedgeTaken &&
13360 isa<SCEVConstant>(BECountIfBackedgeTaken)) {
13364 ConstantMaxBECount = BECountIfBackedgeTaken;
13367 ConstantMaxBECount = computeMaxBECountForLT(
13371 if (isa<SCEVCouldNotCompute>(ConstantMaxBECount) &&
13372 !isa<SCEVCouldNotCompute>(BECount))
13375 const SCEV *SymbolicMaxBECount =
13376 isa<SCEVCouldNotCompute>(BECount) ? ConstantMaxBECount : BECount;
13377 return ExitLimit(BECount, ConstantMaxBECount, SymbolicMaxBECount, MaxOrZero,
13382 const SCEV *LHS,
const SCEV *RHS,
const Loop *L,
bool IsSigned,
13383 bool ControlsOnlyExit,
bool AllowPredicates) {
13390 if (!
IV && AllowPredicates)
13397 if (!
IV ||
IV->getLoop() != L || !
IV->isAffine())
13401 bool NoWrap = ControlsOnlyExit &&
IV->getNoWrapFlags(WrapType);
13414 if (!Stride->
isOne() && !NoWrap)
13415 if (canIVOverflowOnGT(RHS, Stride, IsSigned))
13418 const SCEV *Start =
IV->getStart();
13430 if (Start->getType()->isPointerTy()) {
13432 if (isa<SCEVCouldNotCompute>(Start))
13435 if (
End->getType()->isPointerTy()) {
13437 if (isa<SCEVCouldNotCompute>(
End))
13465 const SCEV *ConstantMaxBECount =
13466 isa<SCEVConstant>(BECount)
13471 if (isa<SCEVCouldNotCompute>(ConstantMaxBECount))
13472 ConstantMaxBECount = BECount;
13473 const SCEV *SymbolicMaxBECount =
13474 isa<SCEVCouldNotCompute>(BECount) ? ConstantMaxBECount : BECount;
13476 return ExitLimit(BECount, ConstantMaxBECount, SymbolicMaxBECount,
false,
13486 if (
const SCEVConstant *SC = dyn_cast<SCEVConstant>(getStart()))
13487 if (!SC->getValue()->isZero()) {
13491 getNoWrapFlags(FlagNW));
13492 if (
const auto *ShiftedAddRec = dyn_cast<SCEVAddRecExpr>(Shifted))
13493 return ShiftedAddRec->getNumIterationsInRange(
13501 if (
any_of(operands(), [](
const SCEV *
Op) {
return !isa<SCEVConstant>(
Op); }))
13521 APInt A = cast<SCEVConstant>(getOperand(1))->getAPInt();
13539 "Linear scev computation is off in a bad way!");
13543 if (isQuadratic()) {
13553 assert(getNumOperands() > 1 &&
"AddRec with zero step?");
13563 for (
unsigned i = 0, e = getNumOperands() - 1; i < e; ++i)
13569 const SCEV *
Last = getOperand(getNumOperands() - 1);
13570 assert(!
Last->isZero() &&
"Recurrency with zero step?");
13572 return cast<SCEVAddRecExpr>(SE.
getAddRecExpr(Ops, getLoop(),
13579 if (
const auto *SU = dyn_cast<SCEVUnknown>(S))
13580 return isa<UndefValue>(SU->
getValue());
13588 if (
const auto *SU = dyn_cast<SCEVUnknown>(S))
13597 if (
StoreInst *Store = dyn_cast<StoreInst>(Inst))
13598 Ty = Store->getValueOperand()->getType();
13599 else if (
LoadInst *Load = dyn_cast<LoadInst>(Inst))
13600 Ty = Load->getType();
13612void ScalarEvolution::SCEVCallbackVH::deleted() {
13613 assert(SE &&
"SCEVCallbackVH called with a null ScalarEvolution!");
13614 if (
PHINode *PN = dyn_cast<PHINode>(getValPtr()))
13615 SE->ConstantEvolutionLoopExitValue.erase(PN);
13616 SE->eraseValueFromMap(getValPtr());
13620void ScalarEvolution::SCEVCallbackVH::allUsesReplacedWith(
Value *V) {
13621 assert(SE &&
"SCEVCallbackVH called with a null ScalarEvolution!");
13640 :
F(
F),
DL(
F.getDataLayout()), TLI(TLI), AC(AC), DT(DT), LI(LI),
13642 LoopDispositions(64), BlockDispositions(64) {
13654 F.getParent(), Intrinsic::experimental_guard);
13655 HasGuards = GuardDecl && !GuardDecl->use_empty();
13659 :
F(Arg.
F),
DL(Arg.
DL), HasGuards(Arg.HasGuards), TLI(Arg.TLI), AC(Arg.AC),
13660 DT(Arg.DT), LI(Arg.LI), CouldNotCompute(
std::
move(Arg.CouldNotCompute)),
13661 ValueExprMap(
std::
move(Arg.ValueExprMap)),
13662 PendingLoopPredicates(
std::
move(Arg.PendingLoopPredicates)),
13663 PendingPhiRanges(
std::
move(Arg.PendingPhiRanges)),
13664 PendingMerges(
std::
move(Arg.PendingMerges)),
13665 ConstantMultipleCache(
std::
move(Arg.ConstantMultipleCache)),
13666 BackedgeTakenCounts(
std::
move(Arg.BackedgeTakenCounts)),
13667 PredicatedBackedgeTakenCounts(
13668 std::
move(Arg.PredicatedBackedgeTakenCounts)),
13669 BECountUsers(
std::
move(Arg.BECountUsers)),
13670 ConstantEvolutionLoopExitValue(
13671 std::
move(Arg.ConstantEvolutionLoopExitValue)),
13672 ValuesAtScopes(
std::
move(Arg.ValuesAtScopes)),
13673 ValuesAtScopesUsers(
std::
move(Arg.ValuesAtScopesUsers)),
13674 LoopDispositions(
std::
move(Arg.LoopDispositions)),
13675 LoopPropertiesCache(
std::
move(Arg.LoopPropertiesCache)),
13676 BlockDispositions(
std::
move(Arg.BlockDispositions)),
13677 SCEVUsers(
std::
move(Arg.SCEVUsers)),
13678 UnsignedRanges(
std::
move(Arg.UnsignedRanges)),
13679 SignedRanges(
std::
move(Arg.SignedRanges)),
13680 UniqueSCEVs(
std::
move(Arg.UniqueSCEVs)),
13681 UniquePreds(
std::
move(Arg.UniquePreds)),
13682 SCEVAllocator(
std::
move(Arg.SCEVAllocator)),
13683 LoopUsers(
std::
move(Arg.LoopUsers)),
13684 PredicatedSCEVRewrites(
std::
move(Arg.PredicatedSCEVRewrites)),
13685 FirstUnknown(Arg.FirstUnknown) {
13686 Arg.FirstUnknown =
nullptr;
13695 Tmp->~SCEVUnknown();
13697 FirstUnknown =
nullptr;
13699 ExprValueMap.
clear();
13700 ValueExprMap.
clear();
13702 BackedgeTakenCounts.clear();
13703 PredicatedBackedgeTakenCounts.clear();
13705 assert(PendingLoopPredicates.empty() &&
"isImpliedCond garbage");
13706 assert(PendingPhiRanges.empty() &&
"getRangeRef garbage");
13707 assert(PendingMerges.empty() &&
"isImpliedViaMerge garbage");
13708 assert(!WalkingBEDominatingConds &&
"isLoopBackedgeGuardedByCond garbage!");
13709 assert(!ProvingSplitPredicate &&
"ProvingSplitPredicate garbage!");
13719 if (isa<SCEVConstant>(S))
13731 L->getHeader()->printAsOperand(
OS,
false);
13735 L->getExitingBlocks(ExitingBlocks);
13736 if (ExitingBlocks.
size() != 1)
13737 OS <<
"<multiple exits> ";
13740 if (!isa<SCEVCouldNotCompute>(BTC)) {
13741 OS <<
"backedge-taken count is ";
13744 OS <<
"Unpredictable backedge-taken count.";
13747 if (ExitingBlocks.
size() > 1)
13748 for (
BasicBlock *ExitingBlock : ExitingBlocks) {
13749 OS <<
" exit count for " << ExitingBlock->
getName() <<
": ";
13752 if (isa<SCEVCouldNotCompute>(EC)) {
13756 if (!isa<SCEVCouldNotCompute>(EC)) {
13757 OS <<
"\n predicated exit count for " << ExitingBlock->
getName()
13760 OS <<
"\n Predicates:\n";
13761 for (
const auto *
P : Predicates)
13769 L->getHeader()->printAsOperand(
OS,
false);
13773 if (!isa<SCEVCouldNotCompute>(ConstantBTC)) {
13774 OS <<
"constant max backedge-taken count is ";
13777 OS <<
", actual taken count either this or zero.";
13779 OS <<
"Unpredictable constant max backedge-taken count. ";
13784 L->getHeader()->printAsOperand(
OS,
false);
13788 if (!isa<SCEVCouldNotCompute>(SymbolicBTC)) {
13789 OS <<
"symbolic max backedge-taken count is ";
13792 OS <<
", actual taken count either this or zero.";
13794 OS <<
"Unpredictable symbolic max backedge-taken count. ";
13798 if (ExitingBlocks.
size() > 1)
13799 for (
BasicBlock *ExitingBlock : ExitingBlocks) {
13800 OS <<
" symbolic max exit count for " << ExitingBlock->
getName() <<
": ";
13804 if (isa<SCEVCouldNotCompute>(ExitBTC)) {
13809 if (!isa<SCEVCouldNotCompute>(ExitBTC)) {
13810 OS <<
"\n predicated symbolic max exit count for "
13811 << ExitingBlock->
getName() <<
": ";
13813 OS <<
"\n Predicates:\n";
13814 for (
const auto *
P : Predicates)
13824 assert(!Preds.
empty() &&
"Different predicated BTC, but no predicates");
13826 L->getHeader()->printAsOperand(
OS,
false);
13828 if (!isa<SCEVCouldNotCompute>(PBT)) {
13829 OS <<
"Predicated backedge-taken count is ";
13832 OS <<
"Unpredictable predicated backedge-taken count.";
13834 OS <<
" Predicates:\n";
13835 for (
const auto *
P : Preds)
13840 auto *PredConstantMax =
13842 if (PredConstantMax != ConstantBTC) {
13844 "different predicated constant max BTC but no predicates");
13846 L->getHeader()->printAsOperand(
OS,
false);
13848 if (!isa<SCEVCouldNotCompute>(PredConstantMax)) {
13849 OS <<
"Predicated constant max backedge-taken count is ";
13852 OS <<
"Unpredictable predicated constant max backedge-taken count.";
13854 OS <<
" Predicates:\n";
13855 for (
const auto *
P : Preds)
13860 auto *PredSymbolicMax =
13862 if (SymbolicBTC != PredSymbolicMax) {
13864 "Different predicated symbolic max BTC, but no predicates");
13866 L->getHeader()->printAsOperand(
OS,
false);
13868 if (!isa<SCEVCouldNotCompute>(PredSymbolicMax)) {
13869 OS <<
"Predicated symbolic max backedge-taken count is ";
13872 OS <<
"Unpredictable predicated symbolic max backedge-taken count.";
13874 OS <<
" Predicates:\n";
13875 for (
const auto *
P : Preds)
13881 L->getHeader()->printAsOperand(
OS,
false);
13897 OS <<
"Computable";
13906 OS <<
"DoesNotDominate";
13912 OS <<
"ProperlyDominates";
13929 OS <<
"Classifying expressions for: ";
13938 if (!isa<SCEVCouldNotCompute>(SV)) {
13951 if (!isa<SCEVCouldNotCompute>(AtUse)) {
13960 OS <<
"\t\t" "Exits: ";
13963 OS <<
"<<Unknown>>";
13969 for (
const auto *Iter = L; Iter; Iter = Iter->getParentLoop()) {
13971 OS <<
"\t\t" "LoopDispositions: { ";
13977 Iter->getHeader()->printAsOperand(
OS,
false);
13985 OS <<
"\t\t" "LoopDispositions: { ";
13991 InnerL->getHeader()->printAsOperand(
OS,
false);
14002 OS <<
"Determining loop execution counts for: ";
14011 auto &Values = LoopDispositions[S];
14012 for (
auto &V : Values) {
14013 if (V.getPointer() == L)
14018 auto &Values2 = LoopDispositions[S];
14020 if (V.getPointer() == L) {
14029ScalarEvolution::computeLoopDisposition(
const SCEV *S,
const Loop *L) {
14048 assert(!L->contains(AR->
getLoop()) &&
"Containing loop's header does not"
14049 " dominate the contained loop's header?");
14076 bool HasVarying =
false;
14091 if (
auto *
I = dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue()))
14110 auto &Values = BlockDispositions[S];
14111 for (
auto &V : Values) {
14112 if (V.getPointer() == BB)
14117 auto &Values2 = BlockDispositions[S];
14119 if (V.getPointer() == BB) {
14128ScalarEvolution::computeBlockDisposition(
const SCEV *S,
const BasicBlock *BB) {
14157 bool Proper =
true;
14169 dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue())) {
14170 if (
I->getParent() == BB)
14195void ScalarEvolution::forgetBackedgeTakenCounts(
const Loop *L,
14198 Predicated ? PredicatedBackedgeTakenCounts : BackedgeTakenCounts;
14199 auto It = BECounts.find(L);
14200 if (It != BECounts.end()) {
14201 for (
const ExitNotTakenInfo &ENT : It->second.ExitNotTaken) {
14202 for (
const SCEV *S : {ENT.ExactNotTaken, ENT.SymbolicMaxNotTaken}) {
14203 if (!isa<SCEVConstant>(S)) {
14204 auto UserIt = BECountUsers.find(S);
14205 assert(UserIt != BECountUsers.end());
14210 BECounts.erase(It);
14218 while (!Worklist.
empty()) {
14220 auto Users = SCEVUsers.find(Curr);
14221 if (
Users != SCEVUsers.end())
14227 for (
const auto *S : ToForget)
14228 forgetMemoizedResultsImpl(S);
14230 for (
auto I = PredicatedSCEVRewrites.begin();
14231 I != PredicatedSCEVRewrites.end();) {
14232 std::pair<const SCEV *, const Loop *>
Entry =
I->first;
14233 if (ToForget.count(
Entry.first))
14234 PredicatedSCEVRewrites.erase(
I++);
14240void ScalarEvolution::forgetMemoizedResultsImpl(
const SCEV *S) {
14241 LoopDispositions.erase(S);
14242 BlockDispositions.erase(S);
14243 UnsignedRanges.erase(S);
14244 SignedRanges.erase(S);
14245 HasRecMap.
erase(S);
14246 ConstantMultipleCache.erase(S);
14248 if (
auto *AR = dyn_cast<SCEVAddRecExpr>(S)) {
14249 UnsignedWrapViaInductionTried.erase(AR);
14250 SignedWrapViaInductionTried.erase(AR);
14253 auto ExprIt = ExprValueMap.
find(S);
14254 if (ExprIt != ExprValueMap.
end()) {
14255 for (
Value *V : ExprIt->second) {
14256 auto ValueIt = ValueExprMap.
find_as(V);
14257 if (ValueIt != ValueExprMap.
end())
14258 ValueExprMap.
erase(ValueIt);
14260 ExprValueMap.
erase(ExprIt);
14263 auto ScopeIt = ValuesAtScopes.find(S);
14264 if (ScopeIt != ValuesAtScopes.end()) {
14265 for (
const auto &Pair : ScopeIt->second)
14266 if (!isa_and_nonnull<SCEVConstant>(Pair.second))
14268 std::make_pair(Pair.first, S));
14269 ValuesAtScopes.erase(ScopeIt);
14272 auto ScopeUserIt = ValuesAtScopesUsers.find(S);
14273 if (ScopeUserIt != ValuesAtScopesUsers.end()) {
14274 for (
const auto &Pair : ScopeUserIt->second)
14275 llvm::erase(ValuesAtScopes[Pair.second], std::make_pair(Pair.first, S));
14276 ValuesAtScopesUsers.erase(ScopeUserIt);
14279 auto BEUsersIt = BECountUsers.find(S);
14280 if (BEUsersIt != BECountUsers.end()) {
14282 auto Copy = BEUsersIt->second;
14283 for (
const auto &Pair : Copy)
14284 forgetBackedgeTakenCounts(Pair.getPointer(), Pair.getInt());
14285 BECountUsers.erase(BEUsersIt);
14288 auto FoldUser = FoldCacheUser.find(S);
14289 if (FoldUser != FoldCacheUser.end())
14290 for (
auto &KV : FoldUser->second)
14291 FoldCache.erase(KV);
14292 FoldCacheUser.erase(S);
14296ScalarEvolution::getUsedLoops(
const SCEV *S,
14298 struct FindUsedLoops {
14300 : LoopsUsed(LoopsUsed) {}
14302 bool follow(
const SCEV *S) {
14303 if (
auto *AR = dyn_cast<SCEVAddRecExpr>(S))
14308 bool isDone()
const {
return false; }
14311 FindUsedLoops F(LoopsUsed);
14315void ScalarEvolution::getReachableBlocks(
14319 while (!Worklist.
empty()) {
14321 if (!Reachable.
insert(BB).second)
14328 if (
auto *
C = dyn_cast<ConstantInt>(
Cond)) {
14329 Worklist.
push_back(
C->isOne() ? TrueBB : FalseBB);
14333 if (
auto *Cmp = dyn_cast<ICmpInst>(
Cond)) {
14336 if (isKnownPredicateViaConstantRanges(
Cmp->getCmpPredicate(), L, R)) {
14340 if (isKnownPredicateViaConstantRanges(
Cmp->getInverseCmpPredicate(), L,
14375 SCEVMapper SCM(SE2);
14377 SE2.getReachableBlocks(ReachableBlocks,
F);
14379 auto GetDelta = [&](
const SCEV *Old,
const SCEV *New) ->
const SCEV * {
14397 while (!LoopStack.
empty()) {
14403 if (!ReachableBlocks.
contains(L->getHeader()))
14408 auto It = BackedgeTakenCounts.find(L);
14409 if (It == BackedgeTakenCounts.end())
14413 SCM.visit(It->second.getExact(L,
const_cast<ScalarEvolution *
>(
this)));
14433 const SCEV *Delta = GetDelta(CurBECount, NewBECount);
14434 if (Delta && !Delta->
isZero()) {
14435 dbgs() <<
"Trip Count for " << *L <<
" Changed!\n";
14436 dbgs() <<
"Old: " << *CurBECount <<
"\n";
14437 dbgs() <<
"New: " << *NewBECount <<
"\n";
14438 dbgs() <<
"Delta: " << *Delta <<
"\n";
14446 while (!Worklist.
empty()) {
14448 if (ValidLoops.
insert(L).second)
14449 Worklist.
append(L->begin(), L->end());
14451 for (
const auto &KV : ValueExprMap) {
14454 if (
auto *AR = dyn_cast<SCEVAddRecExpr>(KV.second)) {
14456 "AddRec references invalid loop");
14461 auto It = ExprValueMap.
find(KV.second);
14462 if (It == ExprValueMap.
end() || !It->second.contains(KV.first)) {
14463 dbgs() <<
"Value " << *KV.first
14464 <<
" is in ValueExprMap but not in ExprValueMap\n";
14468 if (
auto *
I = dyn_cast<Instruction>(&*KV.first)) {
14469 if (!ReachableBlocks.
contains(
I->getParent()))
14471 const SCEV *OldSCEV = SCM.visit(KV.second);
14473 const SCEV *Delta = GetDelta(OldSCEV, NewSCEV);
14474 if (Delta && !Delta->
isZero()) {
14475 dbgs() <<
"SCEV for value " << *
I <<
" changed!\n"
14476 <<
"Old: " << *OldSCEV <<
"\n"
14477 <<
"New: " << *NewSCEV <<
"\n"
14478 <<
"Delta: " << *Delta <<
"\n";
14484 for (
const auto &KV : ExprValueMap) {
14485 for (
Value *V : KV.second) {
14486 auto It = ValueExprMap.find_as(V);
14487 if (It == ValueExprMap.end()) {
14488 dbgs() <<
"Value " << *V
14489 <<
" is in ExprValueMap but not in ValueExprMap\n";
14492 if (It->second != KV.first) {
14493 dbgs() <<
"Value " << *V <<
" mapped to " << *It->second
14494 <<
" rather than " << *KV.first <<
"\n";
14501 for (
const auto &S : UniqueSCEVs) {
14504 if (isa<SCEVConstant>(
Op))
14506 auto It = SCEVUsers.find(
Op);
14507 if (It != SCEVUsers.end() && It->second.count(&S))
14509 dbgs() <<
"Use of operand " << *
Op <<
" by user " << S
14510 <<
" is not being tracked!\n";
14516 for (
const auto &ValueAndVec : ValuesAtScopes) {
14518 for (
const auto &LoopAndValueAtScope : ValueAndVec.second) {
14519 const Loop *L = LoopAndValueAtScope.first;
14520 const SCEV *ValueAtScope = LoopAndValueAtScope.second;
14521 if (!isa<SCEVConstant>(ValueAtScope)) {
14522 auto It = ValuesAtScopesUsers.find(ValueAtScope);
14523 if (It != ValuesAtScopesUsers.end() &&
14526 dbgs() <<
"Value: " << *
Value <<
", Loop: " << *L <<
", ValueAtScope: "
14527 << *ValueAtScope <<
" missing in ValuesAtScopesUsers\n";
14533 for (
const auto &ValueAtScopeAndVec : ValuesAtScopesUsers) {
14534 const SCEV *ValueAtScope = ValueAtScopeAndVec.first;
14535 for (
const auto &LoopAndValue : ValueAtScopeAndVec.second) {
14536 const Loop *L = LoopAndValue.first;
14537 const SCEV *
Value = LoopAndValue.second;
14539 auto It = ValuesAtScopes.find(
Value);
14540 if (It != ValuesAtScopes.end() &&
14541 is_contained(It->second, std::make_pair(L, ValueAtScope)))
14543 dbgs() <<
"Value: " << *
Value <<
", Loop: " << *L <<
", ValueAtScope: "
14544 << *ValueAtScope <<
" missing in ValuesAtScopes\n";
14550 auto VerifyBECountUsers = [&](
bool Predicated) {
14552 Predicated ? PredicatedBackedgeTakenCounts : BackedgeTakenCounts;
14553 for (
const auto &LoopAndBEInfo : BECounts) {
14554 for (
const ExitNotTakenInfo &ENT : LoopAndBEInfo.second.ExitNotTaken) {
14555 for (
const SCEV *S : {ENT.ExactNotTaken, ENT.SymbolicMaxNotTaken}) {
14556 if (!isa<SCEVConstant>(S)) {
14557 auto UserIt = BECountUsers.find(S);
14558 if (UserIt != BECountUsers.end() &&
14559 UserIt->second.contains({ LoopAndBEInfo.first,
Predicated }))
14561 dbgs() <<
"Value " << *S <<
" for loop " << *LoopAndBEInfo.first
14562 <<
" missing from BECountUsers\n";
14569 VerifyBECountUsers(
false);
14570 VerifyBECountUsers(
true);
14573 for (
auto &[S, Values] : LoopDispositions) {
14574 for (
auto [
Loop, CachedDisposition] : Values) {
14576 if (CachedDisposition != RecomputedDisposition) {
14577 dbgs() <<
"Cached disposition of " << *S <<
" for loop " << *
Loop
14578 <<
" is incorrect: cached " << CachedDisposition <<
", actual "
14579 << RecomputedDisposition <<
"\n";
14586 for (
auto &[S, Values] : BlockDispositions) {
14587 for (
auto [BB, CachedDisposition] : Values) {
14589 if (CachedDisposition != RecomputedDisposition) {
14590 dbgs() <<
"Cached disposition of " << *S <<
" for block %"
14591 << BB->
getName() <<
" is incorrect: cached " << CachedDisposition
14592 <<
", actual " << RecomputedDisposition <<
"\n";
14599 for (
auto [
FoldID, Expr] : FoldCache) {
14600 auto I = FoldCacheUser.find(Expr);
14601 if (
I == FoldCacheUser.end()) {
14602 dbgs() <<
"Missing entry in FoldCacheUser for cached expression " << *Expr
14607 dbgs() <<
"Missing FoldID in cached users of " << *Expr <<
"!\n";
14611 for (
auto [Expr, IDs] : FoldCacheUser) {
14612 for (
auto &
FoldID : IDs) {
14613 auto I = FoldCache.find(
FoldID);
14614 if (
I == FoldCache.end()) {
14615 dbgs() <<
"Missing entry in FoldCache for expression " << *Expr
14619 if (
I->second != Expr) {
14620 dbgs() <<
"Entry in FoldCache doesn't match FoldCacheUser: "
14621 << *
I->second <<
" != " << *Expr <<
"!\n";
14632 for (
auto [S, Multiple] : ConstantMultipleCache) {
14634 if ((Multiple != 0 && RecomputedMultiple != 0 &&
14635 Multiple.
urem(RecomputedMultiple) != 0 &&
14636 RecomputedMultiple.
urem(Multiple) != 0)) {
14637 dbgs() <<
"Incorrect cached computation in ConstantMultipleCache for "
14638 << *S <<
" : Computed " << RecomputedMultiple
14639 <<
" but cache contains " << Multiple <<
"!\n";
14679 OS <<
"Printing analysis 'Scalar Evolution Analysis' for function '"
14680 <<
F.getName() <<
"':\n";
14686 "Scalar Evolution Analysis",
false,
true)
14702 F, getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(
F),
14703 getAnalysis<AssumptionCacheTracker>().getAssumptionCache(
F),
14704 getAnalysis<DominatorTreeWrapperPass>().getDomTree(),
14705 getAnalysis<LoopInfoWrapperPass>().getLoopInfo()));
14737 const SCEV *LHS,
const SCEV *RHS) {
14740 "Type mismatch between LHS and RHS");
14743 ID.AddInteger(Pred);
14744 ID.AddPointer(
LHS);
14745 ID.AddPointer(
RHS);
14746 void *IP =
nullptr;
14747 if (
const auto *S = UniquePreds.FindNodeOrInsertPos(
ID, IP))
14751 UniquePreds.InsertNode(Eq, IP);
14762 ID.AddInteger(AddedFlags);
14763 void *IP =
nullptr;
14764 if (
const auto *S = UniquePreds.FindNodeOrInsertPos(
ID, IP))
14766 auto *OF =
new (SCEVAllocator)
14768 UniquePreds.InsertNode(OF, IP);
14788 SCEVPredicateRewriter
Rewriter(L, SE, NewPreds, Pred);
14794 if (
auto *U = dyn_cast<SCEVUnionPredicate>(Pred)) {
14795 for (
const auto *Pred : U->getPredicates())
14796 if (
const auto *IPred = dyn_cast<SCEVComparePredicate>(Pred))
14797 if (IPred->getLHS() == Expr &&
14798 IPred->getPredicate() == ICmpInst::ICMP_EQ)
14799 return IPred->getRHS();
14800 }
else if (
const auto *IPred = dyn_cast<SCEVComparePredicate>(Pred)) {
14801 if (IPred->getLHS() == Expr &&
14802 IPred->getPredicate() == ICmpInst::ICMP_EQ)
14803 return IPred->getRHS();
14806 return convertToAddRecWithPreds(Expr);
14842 explicit SCEVPredicateRewriter(
14851 return Pred && Pred->
implies(
P, SE);
14860 return addOverflowAssumption(
A);
14870 if (!isa<PHINode>(Expr->
getValue()))
14873 std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>>
14875 if (!PredicatedRewrite)
14877 for (
const auto *
P : PredicatedRewrite->second){
14879 if (
auto *WP = dyn_cast<const SCEVWrapPredicate>(
P)) {
14880 if (L != WP->getExpr()->getLoop())
14883 if (!addOverflowAssumption(
P))
14886 return PredicatedRewrite->first;
14899 return SCEVPredicateRewriter::rewrite(S, L, *
this,
nullptr, &Preds);
14906 S = SCEVPredicateRewriter::rewrite(S, L, *
this, &TransformPreds,
nullptr);
14907 auto *AddRec = dyn_cast<SCEVAddRecExpr>(S);
14922 : FastID(
ID), Kind(Kind) {}
14929 assert(
LHS !=
RHS &&
"LHS and RHS are the same SCEV");
14934 const auto *
Op = dyn_cast<SCEVComparePredicate>(
N);
14965 const auto *
Op = dyn_cast<SCEVWrapPredicate>(
N);
14977 const SCEV *OpStart =
Op->AR->getStart();
14982 const SCEV *OpStep =
Op->AR->getStepRecurrence(SE);
15035 if (Step->getValue()->getValue().isNonNegative())
15039 return ImpliedFlags;
15046 for (
const auto *
P : Preds)
15057 if (
const auto *Set = dyn_cast<SCEVUnionPredicate>(
N))
15059 return this->implies(I, SE);
15067 for (
const auto *Pred : Preds)
15072 if (
const auto *Set = dyn_cast<SCEVUnionPredicate>(
N)) {
15073 for (
const auto *Pred : Set->Preds)
15085 for (
auto *
P : Preds) {
15086 if (
N->implies(
P, SE))
15090 Preds = std::move(PrunedPreds);
15091 Preds.push_back(
N);
15098 Preds = std::make_unique<SCEVUnionPredicate>(Empty, SE);
15103 for (
const auto *
Op : Ops)
15107 if (!isa<SCEVConstant>(
Op))
15108 SCEVUsers[
Op].insert(
User);
15113 RewriteEntry &Entry = RewriteMap[Expr];
15116 if (Entry.second && Generation == Entry.first)
15117 return Entry.second;
15122 Expr = Entry.second;
15125 Entry = {Generation, NewSCEV};
15131 if (!BackedgeCount) {
15134 for (
const auto *
P : Preds)
15137 return BackedgeCount;
15141 if (!SymbolicMaxBackedgeCount) {
15143 SymbolicMaxBackedgeCount =
15145 for (
const auto *
P : Preds)
15148 return SymbolicMaxBackedgeCount;
15152 if (!SmallConstantMaxTripCount) {
15155 for (
const auto *
P : Preds)
15158 return *SmallConstantMaxTripCount;
15162 if (Preds->implies(&Pred, SE))
15167 Preds = std::make_unique<SCEVUnionPredicate>(NewPreds, SE);
15168 updateGeneration();
15175void PredicatedScalarEvolution::updateGeneration() {
15177 if (++Generation == 0) {
15178 for (
auto &
II : RewriteMap) {
15179 const SCEV *Rewritten =
II.second.second;
15188 const auto *AR = cast<SCEVAddRecExpr>(Expr);
15196 auto II = FlagsMap.insert({V, Flags});
15204 const auto *AR = cast<SCEVAddRecExpr>(Expr);
15209 auto II = FlagsMap.find(V);
15211 if (
II != FlagsMap.end())
15225 for (
const auto *
P : NewPreds)
15228 RewriteMap[SE.
getSCEV(V)] = {Generation, New};
15234 : RewriteMap(
Init.RewriteMap), SE(
Init.SE), L(
Init.L),
15237 Generation(
Init.Generation), BackedgeCount(
Init.BackedgeCount) {
15238 for (
auto I :
Init.FlagsMap)
15239 FlagsMap.insert(
I);
15244 for (
auto *BB : L.getBlocks())
15245 for (
auto &
I : *BB) {
15250 auto II = RewriteMap.find(Expr);
15252 if (
II == RewriteMap.end())
15256 if (
II->second.second == Expr)
15270bool ScalarEvolution::matchURem(
const SCEV *Expr,
const SCEV *&LHS,
15271 const SCEV *&RHS) {
15278 if (
const auto *ZExt = dyn_cast<SCEVZeroExtendExpr>(Expr))
15279 if (
const auto *Trunc = dyn_cast<SCEVTruncateExpr>(ZExt->getOperand(0))) {
15280 LHS = Trunc->getOperand();
15292 const auto *
Add = dyn_cast<SCEVAddExpr>(Expr);
15293 if (
Add ==
nullptr ||
Add->getNumOperands() != 2)
15296 const SCEV *
A =
Add->getOperand(1);
15297 const auto *
Mul = dyn_cast<SCEVMulExpr>(
Add->getOperand(0));
15299 if (
Mul ==
nullptr)
15302 const auto MatchURemWithDivisor = [&](
const SCEV *
B) {
15313 if (
Mul->getNumOperands() == 3 && isa<SCEVConstant>(
Mul->getOperand(0)))
15314 return MatchURemWithDivisor(
Mul->getOperand(1)) ||
15315 MatchURemWithDivisor(
Mul->getOperand(2));
15318 if (
Mul->getNumOperands() == 2)
15319 return MatchURemWithDivisor(
Mul->getOperand(1)) ||
15320 MatchURemWithDivisor(
Mul->getOperand(0)) ||
15332 collectFromBlock(SE, Guards, Header, Pred, VisitedBlocks);
15336void ScalarEvolution::LoopGuards::collectFromPHI(
15344 using MinMaxPattern = std::pair<const SCEVConstant *, SCEVTypes>;
15345 auto GetMinMaxConst = [&](
unsigned IncomingIdx) -> MinMaxPattern {
15351 collectFromBlock(SE,
G->second, Phi.getParent(),
InBlock, VisitedBlocks,
15353 auto &RewriteMap =
G->second.RewriteMap;
15354 if (RewriteMap.empty())
15356 auto S = RewriteMap.find(SE.
getSCEV(Phi.getIncomingValue(IncomingIdx)));
15357 if (S == RewriteMap.end())
15359 auto *SM = dyn_cast_if_present<SCEVMinMaxExpr>(S->second);
15362 if (
const SCEVConstant *C0 = dyn_cast<SCEVConstant>(SM->getOperand(0)))
15363 return {C0, SM->getSCEVType()};
15366 auto MergeMinMaxConst = [](MinMaxPattern P1,
15367 MinMaxPattern P2) -> MinMaxPattern {
15368 auto [C1,
T1] = P1;
15369 auto [C2, T2] = P2;
15370 if (!C1 || !C2 || T1 != T2)
15374 return {C1->getAPInt().
ult(C2->getAPInt()) ? C1 : C2, T1};
15376 return {C1->getAPInt().
slt(C2->getAPInt()) ? C1 : C2, T1};
15378 return {C1->getAPInt().
ugt(C2->getAPInt()) ? C1 : C2, T1};
15380 return {C1->getAPInt().
sgt(C2->getAPInt()) ? C1 : C2, T1};
15385 auto P = GetMinMaxConst(0);
15386 for (
unsigned int In = 1;
In <
Phi.getNumIncomingValues();
In++) {
15389 P = MergeMinMaxConst(
P, GetMinMaxConst(In));
15395 Guards.RewriteMap.insert({
LHS,
RHS});
15399void ScalarEvolution::LoopGuards::collectFromBlock(
15414 if (isa<SCEVConstant>(LHS)) {
15423 &ExprsToRewrite]() {
15426 auto *C2 = dyn_cast<SCEVConstant>(RHS);
15437 if (ExactRegion.isWrappedSet() || ExactRegion.isFullSet())
15439 auto I = RewriteMap.find(LHSUnknown);
15440 const SCEV *RewrittenLHS =
I != RewriteMap.end() ?
I->second : LHSUnknown;
15448 if (MatchRangeCheckIdiom())
15454 auto IsMinMaxSCEVWithNonNegativeConstant =
15457 if (
auto *
MinMax = dyn_cast<SCEVMinMaxExpr>(Expr)) {
15458 if (
MinMax->getNumOperands() != 2)
15460 if (
auto *
C = dyn_cast<SCEVConstant>(
MinMax->getOperand(0))) {
15461 if (
C->getAPInt().isNegative())
15463 SCTy =
MinMax->getSCEVType();
15474 auto GetNonNegExprAndPosDivisor = [&](
const SCEV *Expr,
const SCEV *Divisor,
15476 auto *ConstExpr = dyn_cast<SCEVConstant>(Expr);
15477 auto *ConstDivisor = dyn_cast<SCEVConstant>(Divisor);
15478 if (!ConstExpr || !ConstDivisor)
15480 ExprVal = ConstExpr->getAPInt();
15481 DivisorVal = ConstDivisor->getAPInt();
15482 return ExprVal.isNonNegative() && !DivisorVal.isNonPositive();
15488 auto GetNextSCEVDividesByDivisor = [&](
const SCEV *Expr,
15489 const SCEV *Divisor) {
15492 if (!GetNonNegExprAndPosDivisor(Expr, Divisor, ExprVal, DivisorVal))
15497 return SE.
getConstant(ExprVal + DivisorVal - Rem);
15504 auto GetPreviousSCEVDividesByDivisor = [&](
const SCEV *Expr,
15505 const SCEV *Divisor) {
15508 if (!GetNonNegExprAndPosDivisor(Expr, Divisor, ExprVal, DivisorVal))
15518 std::function<
const SCEV *(
const SCEV *,
const SCEV *)>
15519 ApplyDivisibiltyOnMinMaxExpr = [&](
const SCEV *MinMaxExpr,
15520 const SCEV *Divisor) {
15521 const SCEV *MinMaxLHS =
nullptr, *MinMaxRHS =
nullptr;
15523 if (!IsMinMaxSCEVWithNonNegativeConstant(MinMaxExpr, SCTy, MinMaxLHS,
15527 isa<SCEVSMinExpr>(MinMaxExpr) || isa<SCEVUMinExpr>(MinMaxExpr);
15529 "Expected non-negative operand!");
15530 auto *DivisibleExpr =
15531 IsMin ? GetPreviousSCEVDividesByDivisor(MinMaxLHS, Divisor)
15532 : GetNextSCEVDividesByDivisor(MinMaxLHS, Divisor);
15534 ApplyDivisibiltyOnMinMaxExpr(MinMaxRHS, Divisor), DivisibleExpr};
15543 const SCEV *URemLHS =
nullptr;
15544 const SCEV *URemRHS =
nullptr;
15545 if (SE.matchURem(LHS, URemLHS, URemRHS)) {
15546 if (
const SCEVUnknown *LHSUnknown = dyn_cast<SCEVUnknown>(URemLHS)) {
15547 auto I = RewriteMap.find(LHSUnknown);
15548 const SCEV *RewrittenLHS =
15549 I != RewriteMap.end() ?
I->second : LHSUnknown;
15550 RewrittenLHS = ApplyDivisibiltyOnMinMaxExpr(RewrittenLHS, URemRHS);
15551 const auto *Multiple =
15553 RewriteMap[LHSUnknown] = Multiple;
15565 if (!isa<SCEVUnknown>(LHS) && isa<SCEVUnknown>(RHS)) {
15574 auto AddRewrite = [&](
const SCEV *
From,
const SCEV *FromRewritten,
15576 if (
From == FromRewritten)
15578 RewriteMap[
From] = To;
15584 auto GetMaybeRewritten = [&](
const SCEV *S) {
15585 auto I = RewriteMap.find(S);
15586 return I != RewriteMap.end() ?
I->second : S;
15596 std::function<
bool(
const SCEV *,
const SCEV *&)> HasDivisibiltyInfo =
15597 [&](
const SCEV *Expr,
const SCEV *&DividesBy) {
15598 if (
auto *
Mul = dyn_cast<SCEVMulExpr>(Expr)) {
15599 if (
Mul->getNumOperands() != 2)
15601 auto *MulLHS =
Mul->getOperand(0);
15602 auto *MulRHS =
Mul->getOperand(1);
15603 if (isa<SCEVConstant>(MulLHS))
15605 if (
auto *Div = dyn_cast<SCEVUDivExpr>(MulLHS))
15606 if (Div->getOperand(1) == MulRHS) {
15607 DividesBy = MulRHS;
15611 if (
auto *
MinMax = dyn_cast<SCEVMinMaxExpr>(Expr))
15612 return HasDivisibiltyInfo(
MinMax->getOperand(0), DividesBy) ||
15613 HasDivisibiltyInfo(
MinMax->getOperand(1), DividesBy);
15618 std::function<
bool(
const SCEV *,
const SCEV *&)> IsKnownToDivideBy =
15619 [&](
const SCEV *Expr,
const SCEV *DividesBy) {
15622 if (
auto *
MinMax = dyn_cast<SCEVMinMaxExpr>(Expr))
15623 return IsKnownToDivideBy(
MinMax->getOperand(0), DividesBy) &&
15624 IsKnownToDivideBy(
MinMax->getOperand(1), DividesBy);
15628 const SCEV *RewrittenLHS = GetMaybeRewritten(LHS);
15629 const SCEV *DividesBy =
nullptr;
15630 if (HasDivisibiltyInfo(RewrittenLHS, DividesBy))
15633 IsKnownToDivideBy(RewrittenLHS, DividesBy) ? DividesBy :
nullptr;
15647 switch (Predicate) {
15655 RHS = DividesBy ? GetPreviousSCEVDividesByDivisor(RHS, DividesBy) :
RHS;
15661 RHS = DividesBy ? GetNextSCEVDividesByDivisor(RHS, DividesBy) :
RHS;
15665 RHS = DividesBy ? GetPreviousSCEVDividesByDivisor(RHS, DividesBy) :
RHS;
15669 RHS = DividesBy ? GetNextSCEVDividesByDivisor(RHS, DividesBy) :
RHS;
15678 auto EnqueueOperands = [&Worklist](
const SCEVNAryExpr *S) {
15682 while (!Worklist.
empty()) {
15684 if (isa<SCEVConstant>(
From))
15688 const SCEV *FromRewritten = GetMaybeRewritten(
From);
15689 const SCEV *To =
nullptr;
15691 switch (Predicate) {
15695 if (
auto *
UMax = dyn_cast<SCEVUMaxExpr>(FromRewritten))
15696 EnqueueOperands(
UMax);
15701 if (
auto *
SMax = dyn_cast<SCEVSMaxExpr>(FromRewritten))
15702 EnqueueOperands(
SMax);
15707 if (
auto *
UMin = dyn_cast<SCEVUMinExpr>(FromRewritten))
15708 EnqueueOperands(
UMin);
15713 if (
auto *
SMin = dyn_cast<SCEVSMinExpr>(FromRewritten))
15714 EnqueueOperands(
SMin);
15717 if (isa<SCEVConstant>(RHS))
15722 const SCEV *OneAlignedUp =
15723 DividesBy ? GetNextSCEVDividesByDivisor(One, DividesBy) : One;
15724 To = SE.
getUMaxExpr(FromRewritten, OneAlignedUp);
15732 AddRewrite(
From, FromRewritten, To);
15741 auto *AssumeI = cast<CallInst>(AssumeVH);
15749 SE.F.
getParent(), Intrinsic::experimental_guard);
15751 for (
const auto *GU : GuardDecl->users())
15752 if (
const auto *Guard = dyn_cast<IntrinsicInst>(GU))
15753 if (Guard->getFunction() ==
Block->getParent() &&
15762 unsigned NumCollectedConditions = 0;
15764 std::pair<const BasicBlock *, const BasicBlock *> Pair(Pred,
Block);
15766 Pair = SE.getPredecessorWithUniqueSuccessorForBB(Pair.first)) {
15767 VisitedBlocks.
insert(Pair.second);
15769 dyn_cast<BranchInst>(Pair.first->getTerminator());
15775 NumCollectedConditions++;
15779 if (
Depth > 0 && NumCollectedConditions == 2)
15787 if (Pair.second->hasNPredecessorsOrMore(2) &&
15790 for (
auto &Phi : Pair.second->phis())
15791 collectFromPHI(SE, Guards, Phi, VisitedBlocks, IncomingGuards,
Depth);
15798 for (
auto [Term, EnterIfTrue] :
reverse(Terms)) {
15802 while (!Worklist.
empty()) {
15807 if (
auto *Cmp = dyn_cast<ICmpInst>(
Cond)) {
15809 EnterIfTrue ?
Cmp->getPredicate() :
Cmp->getInversePredicate();
15812 CollectCondition(Predicate, LHS, RHS, Guards.RewriteMap);
15828 Guards.PreserveNUW =
true;
15829 Guards.PreserveNSW =
true;
15830 for (
const SCEV *Expr : ExprsToRewrite) {
15831 const SCEV *RewriteTo = Guards.RewriteMap[Expr];
15832 Guards.PreserveNUW &=
15834 Guards.PreserveNSW &=
15841 if (ExprsToRewrite.size() > 1) {
15842 for (
const SCEV *Expr : ExprsToRewrite) {
15843 const SCEV *RewriteTo = Guards.RewriteMap[Expr];
15844 Guards.RewriteMap.erase(Expr);
15845 Guards.RewriteMap.insert({Expr, Guards.
rewrite(RewriteTo)});
15854 class SCEVLoopGuardRewriter
15864 if (Guards.PreserveNUW)
15866 if (Guards.PreserveNSW)
15873 auto I = Map.find(Expr);
15874 if (
I == Map.end())
15880 auto I = Map.find(Expr);
15881 if (
I == Map.end()) {
15887 while (Bitwidth % 8 == 0 && Bitwidth >= 8 &&
15888 Bitwidth >
Op->getType()->getScalarSizeInBits()) {
15891 auto I = Map.find(NarrowExt);
15892 if (
I != Map.end())
15894 Bitwidth = Bitwidth / 2;
15904 auto I = Map.find(Expr);
15905 if (
I == Map.end())
15912 auto I = Map.find(Expr);
15913 if (
I == Map.end())
15919 auto I = Map.find(Expr);
15920 if (
I == Map.end())
15927 bool Changed =
false;
15935 return !Changed ? Expr
15943 bool Changed =
false;
15951 return !Changed ? Expr
15958 if (RewriteMap.empty())
15961 SCEVLoopGuardRewriter
Rewriter(SE, *
this);
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Expand Atomic instructions
block Block Frequency Analysis
BlockVerifier::State From
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
#define LLVM_DUMP_METHOD
Mark debug helper function definitions like dump() that should not be stripped from debug builds.
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This file defines the DenseMap class.
This file builds on the ADT/GraphTraits.h file to build generic depth first graph iterator.
Generic implementation of equivalence classes through the use Tarjan's efficient union-find algorithm...
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
static bool isSigned(unsigned int Opcode)
This file defines a hash set that can be used to remove duplication of nodes in a graph.
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
This defines the Use class.
iv Induction Variable Users
static bool isZero(Value *V, const DataLayout &DL, DominatorTree *DT, AssumptionCache *AC)
mir Rename Register Operands
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t IntrinsicInst * II
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
PowerPC Reduce CR logical Operation
#define INITIALIZE_PASS_DEPENDENCY(depName)
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
const SmallVectorImpl< MachineOperand > & Cond
static bool isValid(const char C)
Returns true if C is a valid mangled character: <0-9a-zA-Z_>.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
SI optimize exec mask operations pre RA
void visit(MachineFunction &MF, MachineBasicBlock &Start, std::function< void(MachineBasicBlock *)> op)
This file provides utility classes that use RAII to save and restore values.
bool SCEVMinMaxExprContains(const SCEV *Root, const SCEV *OperandToFind, SCEVTypes RootKind)
static cl::opt< unsigned > MaxAddRecSize("scalar-evolution-max-add-rec-size", cl::Hidden, cl::desc("Max coefficients in AddRec during evolving"), cl::init(8))
static cl::opt< unsigned > RangeIterThreshold("scev-range-iter-threshold", cl::Hidden, cl::desc("Threshold for switching to iteratively computing SCEV ranges"), cl::init(32))
static const Loop * isIntegerLoopHeaderPHI(const PHINode *PN, LoopInfo &LI)
static unsigned getConstantTripCount(const SCEVConstant *ExitCount)
static int CompareValueComplexity(const LoopInfo *const LI, Value *LV, Value *RV, unsigned Depth)
Compare the two values LV and RV in terms of their "complexity" where "complexity" is a partial (and ...
static void PushLoopPHIs(const Loop *L, SmallVectorImpl< Instruction * > &Worklist, SmallPtrSetImpl< Instruction * > &Visited)
Push PHI nodes in the header of the given loop onto the given Worklist.
static void insertFoldCacheEntry(const ScalarEvolution::FoldID &ID, const SCEV *S, DenseMap< ScalarEvolution::FoldID, const SCEV * > &FoldCache, DenseMap< const SCEV *, SmallVector< ScalarEvolution::FoldID, 2 > > &FoldCacheUser)
static cl::opt< bool > ClassifyExpressions("scalar-evolution-classify-expressions", cl::Hidden, cl::init(true), cl::desc("When printing analysis, include information on every instruction"))
static bool CanConstantFold(const Instruction *I)
Return true if we can constant fold an instruction of the specified type, assuming that all operands ...
static cl::opt< unsigned > AddOpsInlineThreshold("scev-addops-inline-threshold", cl::Hidden, cl::desc("Threshold for inlining addition operands into a SCEV"), cl::init(500))
static cl::opt< unsigned > MaxLoopGuardCollectionDepth("scalar-evolution-max-loop-guard-collection-depth", cl::Hidden, cl::desc("Maximum depth for recursive loop guard collection"), cl::init(1))
static cl::opt< bool > VerifyIR("scev-verify-ir", cl::Hidden, cl::desc("Verify IR correctness when making sensitive SCEV queries (slow)"), cl::init(false))
static bool BrPHIToSelect(DominatorTree &DT, BranchInst *BI, PHINode *Merge, Value *&C, Value *&LHS, Value *&RHS)
static std::optional< int > CompareSCEVComplexity(EquivalenceClasses< const SCEV * > &EqCacheSCEV, const LoopInfo *const LI, const SCEV *LHS, const SCEV *RHS, DominatorTree &DT, unsigned Depth=0)
static const SCEV * getPreStartForExtend(const SCEVAddRecExpr *AR, Type *Ty, ScalarEvolution *SE, unsigned Depth)
static std::optional< APInt > MinOptional(std::optional< APInt > X, std::optional< APInt > Y)
Helper function to compare optional APInts: (a) if X and Y both exist, return min(X,...
static cl::opt< unsigned > MulOpsInlineThreshold("scev-mulops-inline-threshold", cl::Hidden, cl::desc("Threshold for inlining multiplication operands into a SCEV"), cl::init(32))
static void GroupByComplexity(SmallVectorImpl< const SCEV * > &Ops, LoopInfo *LI, DominatorTree &DT)
Given a list of SCEV objects, order them by their complexity, and group objects of the same complexit...
static const SCEV * constantFoldAndGroupOps(ScalarEvolution &SE, LoopInfo &LI, DominatorTree &DT, SmallVectorImpl< const SCEV * > &Ops, FoldT Fold, IsIdentityT IsIdentity, IsAbsorberT IsAbsorber)
Performs a number of common optimizations on the passed Ops.
static std::optional< const SCEV * > createNodeForSelectViaUMinSeq(ScalarEvolution *SE, const SCEV *CondExpr, const SCEV *TrueExpr, const SCEV *FalseExpr)
static Constant * BuildConstantFromSCEV(const SCEV *V)
This builds up a Constant using the ConstantExpr interface.
static ConstantInt * EvaluateConstantChrecAtConstant(const SCEVAddRecExpr *AddRec, ConstantInt *C, ScalarEvolution &SE)
static const SCEV * BinomialCoefficient(const SCEV *It, unsigned K, ScalarEvolution &SE, Type *ResultTy)
Compute BC(It, K). The result has width W. Assume, K > 0.
static cl::opt< unsigned > MaxCastDepth("scalar-evolution-max-cast-depth", cl::Hidden, cl::desc("Maximum depth of recursive SExt/ZExt/Trunc"), cl::init(8))
static bool IsMinMaxConsistingOf(const SCEV *MaybeMinMaxExpr, const SCEV *Candidate)
Is MaybeMinMaxExpr an (U|S)(Min|Max) of Candidate and some other values?
static PHINode * getConstantEvolvingPHI(Value *V, const Loop *L)
getConstantEvolvingPHI - Given an LLVM value and a loop, return a PHI node in the loop that V is deri...
static cl::opt< unsigned > MaxBruteForceIterations("scalar-evolution-max-iterations", cl::ReallyHidden, cl::desc("Maximum number of iterations SCEV will " "symbolically execute a constant " "derived loop"), cl::init(100))
static bool MatchBinarySub(const SCEV *S, const SCEV *&LHS, const SCEV *&RHS)
static uint64_t umul_ov(uint64_t i, uint64_t j, bool &Overflow)
static void PrintSCEVWithTypeHint(raw_ostream &OS, const SCEV *S)
When printing a top-level SCEV for trip counts, it's helpful to include a type for constants which ar...
static void PrintLoopInfo(raw_ostream &OS, ScalarEvolution *SE, const Loop *L)
static bool containsConstantInAddMulChain(const SCEV *StartExpr)
Determine if any of the operands in this SCEV are a constant or if any of the add or multiply express...
static const SCEV * getExtendAddRecStart(const SCEVAddRecExpr *AR, Type *Ty, ScalarEvolution *SE, unsigned Depth)
static bool hasHugeExpression(ArrayRef< const SCEV * > Ops)
Returns true if Ops contains a huge SCEV (the subtree of S contains at least HugeExprThreshold nodes)...
static cl::opt< unsigned > MaxPhiSCCAnalysisSize("scalar-evolution-max-scc-analysis-depth", cl::Hidden, cl::desc("Maximum amount of nodes to process while searching SCEVUnknown " "Phi strongly connected components"), cl::init(8))
static bool IsKnownPredicateViaAddRecStart(ScalarEvolution &SE, CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
static cl::opt< unsigned > MaxSCEVOperationsImplicationDepth("scalar-evolution-max-scev-operations-implication-depth", cl::Hidden, cl::desc("Maximum depth of recursive SCEV operations implication analysis"), cl::init(2))
static void PushDefUseChildren(Instruction *I, SmallVectorImpl< Instruction * > &Worklist, SmallPtrSetImpl< Instruction * > &Visited)
Push users of the given Instruction onto the given Worklist.
static std::optional< APInt > SolveQuadraticAddRecRange(const SCEVAddRecExpr *AddRec, const ConstantRange &Range, ScalarEvolution &SE)
Let c(n) be the value of the quadratic chrec {0,+,M,+,N} after n iterations.
static cl::opt< bool > UseContextForNoWrapFlagInference("scalar-evolution-use-context-for-no-wrap-flag-strenghening", cl::Hidden, cl::desc("Infer nuw/nsw flags using context where suitable"), cl::init(true))
static cl::opt< bool > EnableFiniteLoopControl("scalar-evolution-finite-loop", cl::Hidden, cl::desc("Handle <= and >= in finite loops"), cl::init(true))
static std::optional< std::tuple< APInt, APInt, APInt, APInt, unsigned > > GetQuadraticEquation(const SCEVAddRecExpr *AddRec)
For a given quadratic addrec, generate coefficients of the corresponding quadratic equation,...
static bool isKnownPredicateExtendIdiom(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
static std::optional< BinaryOp > MatchBinaryOp(Value *V, const DataLayout &DL, AssumptionCache &AC, const DominatorTree &DT, const Instruction *CxtI)
Try to map V into a BinaryOp, and return std::nullopt on failure.
static std::optional< APInt > SolveQuadraticAddRecExact(const SCEVAddRecExpr *AddRec, ScalarEvolution &SE)
Let c(n) be the value of the quadratic chrec {L,+,M,+,N} after n iterations.
static std::optional< APInt > TruncIfPossible(std::optional< APInt > X, unsigned BitWidth)
Helper function to truncate an optional APInt to a given BitWidth.
static cl::opt< unsigned > MaxSCEVCompareDepth("scalar-evolution-max-scev-compare-depth", cl::Hidden, cl::desc("Maximum depth of recursive SCEV complexity comparisons"), cl::init(32))
static APInt extractConstantWithoutWrapping(ScalarEvolution &SE, const SCEVConstant *ConstantTerm, const SCEVAddExpr *WholeAddExpr)
static cl::opt< unsigned > MaxConstantEvolvingDepth("scalar-evolution-max-constant-evolving-depth", cl::Hidden, cl::desc("Maximum depth of recursive constant evolving"), cl::init(32))
static ConstantRange getRangeForAffineARHelper(APInt Step, const ConstantRange &StartRange, const APInt &MaxBECount, bool Signed)
static std::optional< ConstantRange > GetRangeFromMetadata(Value *V)
Helper method to assign a range to V from metadata present in the IR.
static const SCEV * SolveLinEquationWithOverflow(const APInt &A, const SCEV *B, SmallVectorImpl< const SCEVPredicate * > *Predicates, ScalarEvolution &SE)
Finds the minimum unsigned root of the following equation:
static cl::opt< unsigned > HugeExprThreshold("scalar-evolution-huge-expr-threshold", cl::Hidden, cl::desc("Size of the expression which is considered huge"), cl::init(4096))
static Type * isSimpleCastedPHI(const SCEV *Op, const SCEVUnknown *SymbolicPHI, bool &Signed, ScalarEvolution &SE)
Helper function to createAddRecFromPHIWithCasts.
static Constant * EvaluateExpression(Value *V, const Loop *L, DenseMap< Instruction *, Constant * > &Vals, const DataLayout &DL, const TargetLibraryInfo *TLI)
EvaluateExpression - Given an expression that passes the getConstantEvolvingPHI predicate,...
static const SCEV * MatchNotExpr(const SCEV *Expr)
If Expr computes ~A, return A else return nullptr.
static cl::opt< unsigned > MaxValueCompareDepth("scalar-evolution-max-value-compare-depth", cl::Hidden, cl::desc("Maximum depth of recursive value complexity comparisons"), cl::init(2))
static cl::opt< bool, true > VerifySCEVOpt("verify-scev", cl::Hidden, cl::location(VerifySCEV), cl::desc("Verify ScalarEvolution's backedge taken counts (slow)"))
static const SCEV * getSignedOverflowLimitForStep(const SCEV *Step, ICmpInst::Predicate *Pred, ScalarEvolution *SE)
static SCEV::NoWrapFlags StrengthenNoWrapFlags(ScalarEvolution *SE, SCEVTypes Type, const ArrayRef< const SCEV * > Ops, SCEV::NoWrapFlags Flags)
static cl::opt< unsigned > MaxArithDepth("scalar-evolution-max-arith-depth", cl::Hidden, cl::desc("Maximum depth of recursive arithmetics"), cl::init(32))
static bool HasSameValue(const SCEV *A, const SCEV *B)
SCEV structural equivalence is usually sufficient for testing whether two expressions are equal,...
static uint64_t Choose(uint64_t n, uint64_t k, bool &Overflow)
Compute the result of "n choose k", the binomial coefficient.
static bool CollectAddOperandsWithScales(SmallDenseMap< const SCEV *, APInt, 16 > &M, SmallVectorImpl< const SCEV * > &NewOps, APInt &AccumulatedConstant, ArrayRef< const SCEV * > Ops, const APInt &Scale, ScalarEvolution &SE)
Process the given Ops list, which is a list of operands to be added under the given scale,...
static bool canConstantEvolve(Instruction *I, const Loop *L)
Determine whether this instruction can constant evolve within this loop assuming its operands can all...
static PHINode * getConstantEvolvingPHIOperands(Instruction *UseInst, const Loop *L, DenseMap< Instruction *, PHINode * > &PHIMap, unsigned Depth)
getConstantEvolvingPHIOperands - Implement getConstantEvolvingPHI by recursing through each instructi...
static bool scevUnconditionallyPropagatesPoisonFromOperands(SCEVTypes Kind)
static cl::opt< bool > VerifySCEVStrict("verify-scev-strict", cl::Hidden, cl::desc("Enable stricter verification with -verify-scev is passed"))
static Constant * getOtherIncomingValue(PHINode *PN, BasicBlock *BB)
static cl::opt< bool > UseExpensiveRangeSharpening("scalar-evolution-use-expensive-range-sharpening", cl::Hidden, cl::init(false), cl::desc("Use more powerful methods of sharpening expression ranges. May " "be costly in terms of compile time"))
static const SCEV * getUnsignedOverflowLimitForStep(const SCEV *Step, ICmpInst::Predicate *Pred, ScalarEvolution *SE)
static bool IsKnownPredicateViaMinOrMax(ScalarEvolution &SE, CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
Is LHS Pred RHS true on the virtue of LHS or RHS being a Min or Max expression?
This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...
static bool InBlock(const Value *V, const BasicBlock *BB)
Provides some synthesis utilities to produce sequences of values.
This file defines the SmallPtrSet class.
This file defines the SmallSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static SymbolRef::Type getType(const Symbol *Sym)
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
Virtual Register Rewriter
static const uint32_t IV[8]
Class for arbitrary precision integers.
APInt umul_ov(const APInt &RHS, bool &Overflow) const
APInt udiv(const APInt &RHS) const
Unsigned division operation.
APInt zext(unsigned width) const
Zero extend to a new width.
bool isMinSignedValue() const
Determine if this is the smallest signed value.
uint64_t getZExtValue() const
Get zero extended value.
void setHighBits(unsigned hiBits)
Set the top hiBits bits.
APInt getHiBits(unsigned numBits) const
Compute an APInt containing numBits highbits from this APInt.
APInt zextOrTrunc(unsigned width) const
Zero extend or truncate to width.
unsigned getActiveBits() const
Compute the number of active bits in the value.
APInt trunc(unsigned width) const
Truncate to new width.
static APInt getMaxValue(unsigned numBits)
Gets maximum unsigned value of APInt for specific bit width.
APInt abs() const
Get the absolute value.
bool sgt(const APInt &RHS) const
Signed greater than comparison.
bool ugt(const APInt &RHS) const
Unsigned greater than comparison.
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
bool isSignMask() const
Check if the APInt's value is returned by getSignMask.
APInt urem(const APInt &RHS) const
Unsigned remainder operation.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool ult(const APInt &RHS) const
Unsigned less than comparison.
static APInt getSignedMaxValue(unsigned numBits)
Gets maximum signed value of APInt for a specific bit width.
static APInt getMinValue(unsigned numBits)
Gets minimum unsigned value of APInt for a specific bit width.
bool isNegative() const
Determine sign of this APInt.
bool sle(const APInt &RHS) const
Signed less or equal comparison.
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
unsigned countTrailingZeros() const
bool isStrictlyPositive() const
Determine if this APInt Value is positive.
APInt ashr(unsigned ShiftAmt) const
Arithmetic right-shift function.
APInt multiplicativeInverse() const
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
APInt sext(unsigned width) const
Sign extend to a new width.
APInt shl(unsigned shiftAmt) const
Left-shift function.
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
bool isSignBitSet() const
Determine if sign bit of this APInt is set.
bool slt(const APInt &RHS) const
Signed less than comparison.
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
bool isIntN(unsigned N) const
Check if this APInt has an N-bits unsigned integer value.
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
This templated class represents "all analyses that operate over <a particular IR unit>" (e....
API to communicate dependencies between analyses during invalidation.
bool invalidate(IRUnitT &IR, const PreservedAnalyses &PA)
Trigger the invalidation of some other analysis pass if not already handled and return whether it was...
A container for analyses that lazily runs them and caches their results.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Represent the analysis usage information of a pass.
void setPreservesAll()
Set by analyses that do not transform their input at all.
AnalysisUsage & addRequiredTransitive()
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
ArrayRef< T > take_front(size_t N=1) const
Return a copy of *this with only the first N elements.
size_t size() const
size - Get the array size.
A function analysis which provides an AssumptionCache.
An immutable pass that tracks lazily created AssumptionCache objects.
A cache of @llvm.assume calls within a function.
MutableArrayRef< ResultElem > assumptions()
Access the list of assumption handles currently tracked for this function.
bool isSingleEdge() const
Check if this is the only edge between Start and End.
LLVM Basic Block Representation.
iterator begin()
Instruction iterator methods.
const Instruction & front() const
const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
const Function * getParent() const
Return the enclosing method, or null if none.
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
unsigned getNoWrapKind() const
Returns one of OBO::NoSignedWrap or OBO::NoUnsignedWrap.
Instruction::BinaryOps getBinaryOp() const
Returns the binary operation underlying the intrinsic.
BinaryOps getOpcode() const
Conditional or Unconditional Branch instruction.
bool isConditional() const
BasicBlock * getSuccessor(unsigned i) const
bool isUnconditional() const
Value * getCondition() const
LLVM_ATTRIBUTE_RETURNS_NONNULL void * Allocate(size_t Size, Align Alignment)
Allocate space at the specified alignment.
This class represents a function call, abstracting a target machine's calling convention.
Value handle with callbacks on RAUW and destruction.
bool isFalseWhenEqual() const
This is just a convenience.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ ICMP_UGE
unsigned greater or equal
@ ICMP_UGT
unsigned greater than
@ ICMP_SGT
signed greater than
@ ICMP_ULT
unsigned less than
@ ICMP_SGE
signed greater or equal
@ ICMP_ULE
unsigned less or equal
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
bool isTrueWhenEqual() const
This is just a convenience.
Predicate getNonStrictPredicate() const
For example, SGT -> SGE, SLT -> SLE, ULT -> ULE, UGT -> UGE.
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
bool isRelational() const
Return true if the predicate is relational (not EQ or NE).
An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...
static Constant * getNot(Constant *C)
static Constant * getPtrToInt(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static Constant * getGetElementPtr(Type *Ty, Constant *C, ArrayRef< Constant * > IdxList, GEPNoWrapFlags NW=GEPNoWrapFlags::none(), std::optional< ConstantRange > InRange=std::nullopt, Type *OnlyIfReducedTy=nullptr)
Getelementptr form.
static Constant * getAdd(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
static Constant * getNeg(Constant *C, bool HasNSW=false)
static Constant * getTrunc(Constant *C, Type *Ty, bool OnlyIfReduced=false)
This is the shared class of boolean and integer constants.
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
static ConstantInt * getFalse(LLVMContext &Context)
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
const APInt & getValue() const
Return the constant as an APInt value reference.
static ConstantInt * getBool(LLVMContext &Context, bool V)
This class represents a range of values.
ConstantRange add(const ConstantRange &Other) const
Return a new range representing the possible values resulting from an addition of a value in this ran...
ConstantRange zextOrTrunc(uint32_t BitWidth) const
Make this range have the bit width given by BitWidth.
PreferredRangeType
If represented precisely, the result of some range operations may consist of multiple disjoint ranges...
bool getEquivalentICmp(CmpInst::Predicate &Pred, APInt &RHS) const
Set up Pred and RHS such that ConstantRange::makeExactICmpRegion(Pred, RHS) == *this.
ConstantRange subtract(const APInt &CI) const
Subtract the specified constant from the endpoints of this constant range.
const APInt & getLower() const
Return the lower value for this range.
ConstantRange truncate(uint32_t BitWidth) const
Return a new range in the specified integer type, which must be strictly smaller than the current typ...
bool isFullSet() const
Return true if this set contains all of the elements possible for this data-type.
bool icmp(CmpInst::Predicate Pred, const ConstantRange &Other) const
Does the predicate Pred hold between ranges this and Other? NOTE: false does not mean that inverse pr...
bool isEmptySet() const
Return true if this set contains no members.
ConstantRange zeroExtend(uint32_t BitWidth) const
Return a new range in the specified integer type, which must be strictly larger than the current type...
bool isSignWrappedSet() const
Return true if this set wraps around the signed domain.
APInt getSignedMin() const
Return the smallest signed value contained in the ConstantRange.
bool isWrappedSet() const
Return true if this set wraps around the unsigned domain.
void print(raw_ostream &OS) const
Print out the bounds to a stream.
ConstantRange signExtend(uint32_t BitWidth) const
Return a new range in the specified integer type, which must be strictly larger than the current type...
const APInt & getUpper() const
Return the upper value for this range.
ConstantRange unionWith(const ConstantRange &CR, PreferredRangeType Type=Smallest) const
Return the range that results from the union of this range with another range.
static ConstantRange makeExactICmpRegion(CmpInst::Predicate Pred, const APInt &Other)
Produce the exact range such that all values in the returned range satisfy the given predicate with a...
bool contains(const APInt &Val) const
Return true if the specified value is in the set.
APInt getUnsignedMax() const
Return the largest unsigned value contained in the ConstantRange.
ConstantRange intersectWith(const ConstantRange &CR, PreferredRangeType Type=Smallest) const
Return the range that results from the intersection of this range with another range.
APInt getSignedMax() const
Return the largest signed value contained in the ConstantRange.
static ConstantRange getNonEmpty(APInt Lower, APInt Upper)
Create non-empty constant range with the given bounds.
static ConstantRange makeGuaranteedNoWrapRegion(Instruction::BinaryOps BinOp, const ConstantRange &Other, unsigned NoWrapKind)
Produce the largest range containing all X such that "X BinOp Y" is guaranteed not to wrap (overflow)...
unsigned getMinSignedBits() const
Compute the maximal number of bits needed to represent every value in this signed range.
uint32_t getBitWidth() const
Get the bit width of this ConstantRange.
ConstantRange sub(const ConstantRange &Other) const
Return a new range representing the possible values resulting from a subtraction of a value in this r...
ConstantRange sextOrTrunc(uint32_t BitWidth) const
Make this range have the bit width given by BitWidth.
static ConstantRange makeExactNoWrapRegion(Instruction::BinaryOps BinOp, const APInt &Other, unsigned NoWrapKind)
Produce the range that contains X if and only if "X BinOp Other" does not wrap.
This is an important base class in LLVM.
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
const StructLayout * getStructLayout(StructType *Ty) const
Returns a StructLayout object, indicating the alignment of the struct, its size, and the offsets of i...
IntegerType * getIntPtrType(LLVMContext &C, unsigned AddressSpace=0) const
Returns an integer type with size at least as big as that of a pointer in the given address space.
unsigned getIndexTypeSizeInBits(Type *Ty) const
Layout size of the index used in GEP calculation.
IntegerType * getIndexType(LLVMContext &C, unsigned AddressSpace) const
Returns the type of a GEP index in AddressSpace.
TypeSize getTypeSizeInBits(Type *Ty) const
Size examples:
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
iterator find(const_arg_type_t< KeyT > Val)
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&...Args)
bool erase(const KeyT &Val)
DenseMapIterator< KeyT, ValueT, KeyInfoT, BucketT > iterator
iterator find_as(const LookupKeyT &Val)
Alternate version of find() which allows a different, and possibly less expensive,...
size_type count(const_arg_type_t< KeyT > Val) const
Return 1 if the specified key is in the map, 0 otherwise.
bool contains(const_arg_type_t< KeyT > Val) const
Return true if the specified key is in the map, false otherwise.
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Analysis pass which computes a DominatorTree.
bool properlyDominates(const DomTreeNodeBase< NodeT > *A, const DomTreeNodeBase< NodeT > *B) const
properlyDominates - Returns true iff A dominates B and A != B.
Legacy analysis pass which computes a DominatorTree.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
bool isReachableFromEntry(const Use &U) const
Provide an overload for a Use.
bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
EquivalenceClasses - This represents a collection of equivalence classes and supports three efficient...
member_iterator unionSets(const ElemTy &V1, const ElemTy &V2)
union - Merge the two equivalence sets for the specified values, inserting them if they do not alread...
bool isEquivalent(const ElemTy &V1, const ElemTy &V2) const
FoldingSetNodeIDRef - This class describes a reference to an interned FoldingSetNodeID,...
FoldingSetNodeID - This class is used to gather all the unique data bits of a node.
FunctionPass class - This class is used to implement most global optimizations.
const BasicBlock & getEntryBlock() const
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Represents flags for the getelementptr instruction/expression.
bool hasNoUnsignedSignedWrap() const
bool hasNoUnsignedWrap() const
static GEPNoWrapFlags none()
static Type * getTypeAtIndex(Type *Ty, Value *Idx)
Return the type of the element at the given index of an indexable type.
Module * getParent()
Get the module that this global value is contained inside of...
static bool isPrivateLinkage(LinkageTypes Linkage)
static bool isInternalLinkage(LinkageTypes Linkage)
This instruction compares its operands according to the predicate given to the constructor.
CmpPredicate getCmpPredicate() const
static bool isGE(Predicate P)
Return true if the predicate is SGE or UGE.
CmpPredicate getSwappedCmpPredicate() const
static bool compare(const APInt &LHS, const APInt &RHS, ICmpInst::Predicate Pred)
Return result of LHS Pred RHS comparison.
static bool isLT(Predicate P)
Return true if the predicate is SLT or ULT.
CmpPredicate getInverseCmpPredicate() const
static bool isGT(Predicate P)
Return true if the predicate is SGT or UGT.
Predicate getFlippedSignednessPredicate() const
For example, SLT->ULT, ULT->SLT, SLE->ULE, ULE->SLE, EQ->EQ.
static CmpPredicate getInverseCmpPredicate(CmpPredicate Pred)
bool isEquality() const
Return true if this predicate is either EQ or NE.
bool isRelational() const
Return true if the predicate is relational (not EQ or NE).
static bool isLE(Predicate P)
Return true if the predicate is SLE or ULE.
bool hasNoUnsignedWrap() const LLVM_READONLY
Determine whether the no unsigned wrap flag is set.
bool hasNoSignedWrap() const LLVM_READONLY
Determine whether the no signed wrap flag is set.
bool isIdenticalToWhenDefined(const Instruction *I, bool IntersectAttrs=false) const LLVM_READONLY
This is like isIdenticalTo, except that it ignores the SubclassOptionalData flags,...
Class to represent integer types.
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
An instruction for reading from memory.
Analysis pass that exposes the LoopInfo for a function.
bool contains(const LoopT *L) const
Return true if the specified loop is contained within in this loop.
BlockT * getHeader() const
unsigned getLoopDepth() const
Return the nesting level of this loop.
BlockT * getLoopPredecessor() const
If the given loop's header has exactly one unique predecessor outside the loop, return it.
LoopT * getParentLoop() const
Return the parent loop if it exists or nullptr for top level loops.
unsigned getLoopDepth(const BlockT *BB) const
Return the loop nesting level of the specified block.
LoopT * getLoopFor(const BlockT *BB) const
Return the inner most loop that BB lives in.
The legacy pass manager's analysis pass to compute loop information.
Represents a single loop in the control flow graph.
bool isLoopInvariant(const Value *V) const
Return true if the specified value is loop invariant.
A Module instance is used to store all the information related to an LLVM module.
This is a utility class that provides an abstraction for the common functionality between Instruction...
unsigned getOpcode() const
Return the opcode for this Instruction or ConstantExpr.
Utility class for integer operators which may exhibit overflow - Add, Sub, Mul, and Shl.
bool hasNoSignedWrap() const
Test whether this operation is known to never undergo signed overflow, aka the nsw property.
bool hasNoUnsignedWrap() const
Test whether this operation is known to never undergo unsigned overflow, aka the nuw property.
iterator_range< const_block_iterator > blocks() const
op_range incoming_values()
Value * getIncomingValueForBlock(const BasicBlock *BB) const
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
PointerIntPair - This class implements a pair of a pointer and small integer.
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the default address space (address sp...
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
An interface layer with SCEV used to manage how we see SCEV expressions for values in the context of ...
void addPredicate(const SCEVPredicate &Pred)
Adds a new predicate.
const SCEVPredicate & getPredicate() const
bool hasNoOverflow(Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags)
Returns true if we've proved that V doesn't wrap by means of a SCEV predicate.
void setNoOverflow(Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags)
Proves that V doesn't overflow by adding SCEV predicate.
void print(raw_ostream &OS, unsigned Depth) const
Print the SCEV mappings done by the Predicated Scalar Evolution.
bool areAddRecsEqualWithPreds(const SCEVAddRecExpr *AR1, const SCEVAddRecExpr *AR2) const
Check if AR1 and AR2 are equal, while taking into account Equal predicates in Preds.
PredicatedScalarEvolution(ScalarEvolution &SE, Loop &L)
const SCEVAddRecExpr * getAsAddRec(Value *V)
Attempts to produce an AddRecExpr for V by adding additional SCEV predicates.
unsigned getSmallConstantMaxTripCount()
Returns the upper bound of the loop trip count as a normal unsigned value, or 0 if the trip count is ...
const SCEV * getBackedgeTakenCount()
Get the (predicated) backedge count for the analyzed loop.
const SCEV * getSymbolicMaxBackedgeTakenCount()
Get the (predicated) symbolic max backedge count for the analyzed loop.
const SCEV * getSCEV(Value *V)
Returns the SCEV expression of V, in the context of the current SCEV predicate.
A set of analyses that are preserved following a run of a transformation pass.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
PreservedAnalysisChecker getChecker() const
Build a checker for this PreservedAnalyses and the specified analysis type.
constexpr bool isValid() const
This node represents an addition of some number of SCEVs.
This node represents a polynomial recurrence on the trip count of the specified loop.
const SCEV * getStart() const
const SCEV * evaluateAtIteration(const SCEV *It, ScalarEvolution &SE) const
Return the value of this chain of recurrences at the specified iteration number.
const SCEV * getStepRecurrence(ScalarEvolution &SE) const
Constructs and returns the recurrence indicating how much this expression steps by.
void setNoWrapFlags(NoWrapFlags Flags)
Set flags for a recurrence without clearing any previously set flags.
bool isAffine() const
Return true if this represents an expression A + B*x where A and B are loop invariant values.
bool isQuadratic() const
Return true if this represents an expression A + B*x + C*x^2 where A, B and C are loop invariant valu...
const SCEV * getNumIterationsInRange(const ConstantRange &Range, ScalarEvolution &SE) const
Return the number of iterations of this loop that produce values in the specified constant range.
const SCEVAddRecExpr * getPostIncExpr(ScalarEvolution &SE) const
Return an expression representing the value of this expression one iteration of the loop ahead.
const Loop * getLoop() const
This is the base class for unary cast operator classes.
const SCEV * getOperand() const
SCEVCastExpr(const FoldingSetNodeIDRef ID, SCEVTypes SCEVTy, const SCEV *op, Type *ty)
void setNoWrapFlags(NoWrapFlags Flags)
Set flags for a non-recurrence without clearing previously set flags.
This class represents an assumption that the expression LHS Pred RHS evaluates to true,...
SCEVComparePredicate(const FoldingSetNodeIDRef ID, const ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS)
bool isAlwaysTrue() const override
Returns true if the predicate is always true.
void print(raw_ostream &OS, unsigned Depth=0) const override
Prints a textual representation of this predicate with an indentation of Depth.
bool implies(const SCEVPredicate *N, ScalarEvolution &SE) const override
Implementation of the SCEVPredicate interface.
This class represents a constant integer value.
ConstantInt * getValue() const
const APInt & getAPInt() const
This is the base class for unary integral cast operator classes.
SCEVIntegralCastExpr(const FoldingSetNodeIDRef ID, SCEVTypes SCEVTy, const SCEV *op, Type *ty)
This node is the base class min/max selections.
static enum SCEVTypes negate(enum SCEVTypes T)
This node represents multiplication of some number of SCEVs.
This node is a base class providing common functionality for n'ary operators.
bool hasNoUnsignedWrap() const
bool hasNoSelfWrap() const
size_t getNumOperands() const
bool hasNoSignedWrap() const
NoWrapFlags getNoWrapFlags(NoWrapFlags Mask=NoWrapMask) const
const SCEV * getOperand(unsigned i) const
const SCEV *const * Operands
ArrayRef< const SCEV * > operands() const
This class represents an assumption made using SCEV expressions which can be checked at run-time.
SCEVPredicate(const SCEVPredicate &)=default
virtual bool implies(const SCEVPredicate *N, ScalarEvolution &SE) const =0
Returns true if this predicate implies N.
virtual void print(raw_ostream &OS, unsigned Depth=0) const =0
Prints a textual representation of this predicate with an indentation of Depth.
This class represents a cast from a pointer to a pointer-sized integer value.
This visitor recursively visits a SCEV expression and re-writes it.
const SCEV * visitSignExtendExpr(const SCEVSignExtendExpr *Expr)
const SCEV * visitZeroExtendExpr(const SCEVZeroExtendExpr *Expr)
const SCEV * visitSMinExpr(const SCEVSMinExpr *Expr)
const SCEV * visitUMinExpr(const SCEVUMinExpr *Expr)
This class represents a signed maximum selection.
This class represents a signed minimum selection.
This node is the base class for sequential/in-order min/max selections.
SCEVTypes getEquivalentNonSequentialSCEVType() const
This class represents a sequential/in-order unsigned minimum selection.
This class represents a sign extension of a small integer value to a larger integer value.
Visit all nodes in the expression tree using worklist traversal.
void visitAll(const SCEV *Root)
This class represents a truncation of an integer value to a smaller integer value.
This class represents a binary unsigned division operation.
const SCEV * getLHS() const
const SCEV * getRHS() const
This class represents an unsigned maximum selection.
This class represents an unsigned minimum selection.
This class represents a composition of other SCEV predicates, and is the class that most clients will...
void print(raw_ostream &OS, unsigned Depth) const override
Prints a textual representation of this predicate with an indentation of Depth.
bool implies(const SCEVPredicate *N, ScalarEvolution &SE) const override
Returns true if this predicate implies N.
SCEVUnionPredicate(ArrayRef< const SCEVPredicate * > Preds, ScalarEvolution &SE)
Union predicates don't get cached so create a dummy set ID for it.
bool isAlwaysTrue() const override
Implementation of the SCEVPredicate interface.
This means that we are dealing with an entirely unknown SCEV value, and only represent it as its LLVM...
This class represents the value of vscale, as used when defining the length of a scalable vector or r...
This class represents an assumption made on an AddRec expression.
IncrementWrapFlags
Similar to SCEV::NoWrapFlags, but with slightly different semantics for FlagNUSW.
SCEVWrapPredicate(const FoldingSetNodeIDRef ID, const SCEVAddRecExpr *AR, IncrementWrapFlags Flags)
bool implies(const SCEVPredicate *N, ScalarEvolution &SE) const override
Returns true if this predicate implies N.
static SCEVWrapPredicate::IncrementWrapFlags setFlags(SCEVWrapPredicate::IncrementWrapFlags Flags, SCEVWrapPredicate::IncrementWrapFlags OnFlags)
void print(raw_ostream &OS, unsigned Depth=0) const override
Prints a textual representation of this predicate with an indentation of Depth.
bool isAlwaysTrue() const override
Returns true if the predicate is always true.
const SCEVAddRecExpr * getExpr() const
Implementation of the SCEVPredicate interface.
static SCEVWrapPredicate::IncrementWrapFlags clearFlags(SCEVWrapPredicate::IncrementWrapFlags Flags, SCEVWrapPredicate::IncrementWrapFlags OffFlags)
Convenient IncrementWrapFlags manipulation methods.
static SCEVWrapPredicate::IncrementWrapFlags getImpliedFlags(const SCEVAddRecExpr *AR, ScalarEvolution &SE)
Returns the set of SCEVWrapPredicate no wrap flags implied by a SCEVAddRecExpr.
IncrementWrapFlags getFlags() const
Returns the set assumed no overflow flags.
This class represents a zero extension of a small integer value to a larger integer value.
This class represents an analyzed expression in the program.
ArrayRef< const SCEV * > operands() const
Return operands of this SCEV expression.
unsigned short getExpressionSize() const
bool isOne() const
Return true if the expression is a constant one.
bool isZero() const
Return true if the expression is a constant zero.
void dump() const
This method is used for debugging.
bool isAllOnesValue() const
Return true if the expression is a constant all-ones value.
bool isNonConstantNegative() const
Return true if the specified scev is negated, but not a constant.
void print(raw_ostream &OS) const
Print out the internal representation of this scalar to the specified stream.
SCEVTypes getSCEVType() const
Type * getType() const
Return the LLVM type of this SCEV expression.
NoWrapFlags
NoWrapFlags are bitfield indices into SubclassData.
Analysis pass that exposes the ScalarEvolution for a function.
ScalarEvolution run(Function &F, FunctionAnalysisManager &AM)
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
void print(raw_ostream &OS, const Module *=nullptr) const override
print - Print out the internal state of the pass.
bool runOnFunction(Function &F) override
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass.
void releaseMemory() override
releaseMemory() - This member can be implemented by a pass if it wants to be able to release its memo...
void verifyAnalysis() const override
verifyAnalysis() - This member can be implemented by a analysis pass to check state of analysis infor...
static LoopGuards collect(const Loop *L, ScalarEvolution &SE)
Collect rewrite map for loop guards for loop L, together with flags indicating if NUW and NSW can be ...
const SCEV * rewrite(const SCEV *Expr) const
Try to apply the collected loop guards to Expr.
The main scalar evolution driver.
const SCEV * getConstantMaxBackedgeTakenCount(const Loop *L)
When successful, this returns a SCEVConstant that is greater than or equal to (i.e.
static bool hasFlags(SCEV::NoWrapFlags Flags, SCEV::NoWrapFlags TestFlags)
const DataLayout & getDataLayout() const
Return the DataLayout associated with the module this SCEV instance is operating on.
bool isKnownNonNegative(const SCEV *S)
Test if the given expression is known to be non-negative.
bool isKnownOnEveryIteration(CmpPredicate Pred, const SCEVAddRecExpr *LHS, const SCEV *RHS)
Test if the condition described by Pred, LHS, RHS is known to be true on every iteration of the loop ...
const SCEV * getNegativeSCEV(const SCEV *V, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap)
Return the SCEV object corresponding to -V.
std::optional< LoopInvariantPredicate > getLoopInvariantExitCondDuringFirstIterationsImpl(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS, const Loop *L, const Instruction *CtxI, const SCEV *MaxIter)
const SCEV * getSMaxExpr(const SCEV *LHS, const SCEV *RHS)
const SCEV * getUDivCeilSCEV(const SCEV *N, const SCEV *D)
Compute ceil(N / D).
const SCEV * getGEPExpr(GEPOperator *GEP, const SmallVectorImpl< const SCEV * > &IndexExprs)
Returns an expression for a GEP.
std::optional< LoopInvariantPredicate > getLoopInvariantExitCondDuringFirstIterations(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS, const Loop *L, const Instruction *CtxI, const SCEV *MaxIter)
If the result of the predicate LHS Pred RHS is loop invariant with respect to L at given Context duri...
Type * getWiderType(Type *Ty1, Type *Ty2) const
const SCEV * getAbsExpr(const SCEV *Op, bool IsNSW)
bool isKnownNonPositive(const SCEV *S)
Test if the given expression is known to be non-positive.
const SCEV * getURemExpr(const SCEV *LHS, const SCEV *RHS)
Represents an unsigned remainder expression based on unsigned division.
APInt getConstantMultiple(const SCEV *S)
Returns the max constant multiple of S.
bool isKnownNegative(const SCEV *S)
Test if the given expression is known to be negative.
const SCEV * getPredicatedConstantMaxBackedgeTakenCount(const Loop *L, SmallVectorImpl< const SCEVPredicate * > &Predicates)
Similar to getConstantMaxBackedgeTakenCount, except it will add a set of SCEV predicates to Predicate...
const SCEV * removePointerBase(const SCEV *S)
Compute an expression equivalent to S - getPointerBase(S).
bool isLoopEntryGuardedByCond(const Loop *L, CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
Test whether entry to the loop is protected by a conditional between LHS and RHS.
bool isKnownNonZero(const SCEV *S)
Test if the given expression is known to be non-zero.
const SCEV * getSCEVAtScope(const SCEV *S, const Loop *L)
Return a SCEV expression for the specified value at the specified scope in the program.
const SCEV * getSMinExpr(const SCEV *LHS, const SCEV *RHS)
const SCEV * getBackedgeTakenCount(const Loop *L, ExitCountKind Kind=Exact)
If the specified loop has a predictable backedge-taken count, return it, otherwise return a SCEVCould...
const SCEV * getUMaxExpr(const SCEV *LHS, const SCEV *RHS)
void setNoWrapFlags(SCEVAddRecExpr *AddRec, SCEV::NoWrapFlags Flags)
Update no-wrap flags of an AddRec.
const SCEV * getUMaxFromMismatchedTypes(const SCEV *LHS, const SCEV *RHS)
Promote the operands to the wider of the types using zero-extension, and then perform a umax operatio...
const SCEV * getZero(Type *Ty)
Return a SCEV for the constant 0 of a specific type.
bool willNotOverflow(Instruction::BinaryOps BinOp, bool Signed, const SCEV *LHS, const SCEV *RHS, const Instruction *CtxI=nullptr)
Is operation BinOp between LHS and RHS provably does not have a signed/unsigned overflow (Signed)?...
ExitLimit computeExitLimitFromCond(const Loop *L, Value *ExitCond, bool ExitIfTrue, bool ControlsOnlyExit, bool AllowPredicates=false)
Compute the number of times the backedge of the specified loop will execute if its exit condition wer...
const SCEV * getZeroExtendExprImpl(const SCEV *Op, Type *Ty, unsigned Depth=0)
const SCEVPredicate * getEqualPredicate(const SCEV *LHS, const SCEV *RHS)
unsigned getSmallConstantTripMultiple(const Loop *L, const SCEV *ExitCount)
Returns the largest constant divisor of the trip count as a normal unsigned value,...
uint64_t getTypeSizeInBits(Type *Ty) const
Return the size in bits of the specified type, for which isSCEVable must return true.
const SCEV * getConstant(ConstantInt *V)
const SCEV * getPredicatedBackedgeTakenCount(const Loop *L, SmallVectorImpl< const SCEVPredicate * > &Predicates)
Similar to getBackedgeTakenCount, except it will add a set of SCEV predicates to Predicates that are ...
const SCEV * getSCEV(Value *V)
Return a SCEV expression for the full generality of the specified expression.
ConstantRange getSignedRange(const SCEV *S)
Determine the signed range for a particular SCEV.
const SCEV * getNoopOrSignExtend(const SCEV *V, Type *Ty)
Return a SCEV corresponding to a conversion of the input value to the specified type.
bool loopHasNoAbnormalExits(const Loop *L)
Return true if the loop has no abnormal exits.
const SCEV * getTripCountFromExitCount(const SCEV *ExitCount)
A version of getTripCountFromExitCount below which always picks an evaluation type which can not resu...
ScalarEvolution(Function &F, TargetLibraryInfo &TLI, AssumptionCache &AC, DominatorTree &DT, LoopInfo &LI)
const SCEV * getOne(Type *Ty)
Return a SCEV for the constant 1 of a specific type.
const SCEV * getTruncateOrNoop(const SCEV *V, Type *Ty)
Return a SCEV corresponding to a conversion of the input value to the specified type.
const SCEV * getCastExpr(SCEVTypes Kind, const SCEV *Op, Type *Ty)
const SCEV * getSequentialMinMaxExpr(SCEVTypes Kind, SmallVectorImpl< const SCEV * > &Operands)
const SCEV * getLosslessPtrToIntExpr(const SCEV *Op, unsigned Depth=0)
std::optional< bool > evaluatePredicateAt(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS, const Instruction *CtxI)
Check whether the condition described by Pred, LHS, and RHS is true or false in the given Context.
unsigned getSmallConstantMaxTripCount(const Loop *L, SmallVectorImpl< const SCEVPredicate * > *Predicates=nullptr)
Returns the upper bound of the loop trip count as a normal unsigned value.
const SCEV * getPtrToIntExpr(const SCEV *Op, Type *Ty)
bool isBackedgeTakenCountMaxOrZero(const Loop *L)
Return true if the backedge taken count is either the value returned by getConstantMaxBackedgeTakenCo...
void forgetLoop(const Loop *L)
This method should be called by the client when it has changed a loop in a way that may effect Scalar...
bool isLoopInvariant(const SCEV *S, const Loop *L)
Return true if the value of the given SCEV is unchanging in the specified loop.
bool isKnownPositive(const SCEV *S)
Test if the given expression is known to be positive.
APInt getUnsignedRangeMin(const SCEV *S)
Determine the min of the unsigned range for a particular SCEV.
bool SimplifyICmpOperands(CmpPredicate &Pred, const SCEV *&LHS, const SCEV *&RHS, unsigned Depth=0)
Simplify LHS and RHS in a comparison with predicate Pred.
const SCEV * getOffsetOfExpr(Type *IntTy, StructType *STy, unsigned FieldNo)
Return an expression for offsetof on the given field with type IntTy.
LoopDisposition getLoopDisposition(const SCEV *S, const Loop *L)
Return the "disposition" of the given SCEV with respect to the given loop.
bool containsAddRecurrence(const SCEV *S)
Return true if the SCEV is a scAddRecExpr or it contains scAddRecExpr.
const SCEV * getSignExtendExprImpl(const SCEV *Op, Type *Ty, unsigned Depth=0)
const SCEV * getAddRecExpr(const SCEV *Start, const SCEV *Step, const Loop *L, SCEV::NoWrapFlags Flags)
Get an add recurrence expression for the specified loop.
bool hasOperand(const SCEV *S, const SCEV *Op) const
Test whether the given SCEV has Op as a direct or indirect operand.
const SCEV * getUDivExpr(const SCEV *LHS, const SCEV *RHS)
Get a canonical unsigned division expression, or something simpler if possible.
const SCEV * getZeroExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth=0)
bool isSCEVable(Type *Ty) const
Test if values of the given type are analyzable within the SCEV framework.
Type * getEffectiveSCEVType(Type *Ty) const
Return a type with the same bitwidth as the given type and which represents how SCEV will treat the g...
const SCEVPredicate * getComparePredicate(ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS)
const SCEV * getNotSCEV(const SCEV *V)
Return the SCEV object corresponding to ~V.
std::optional< LoopInvariantPredicate > getLoopInvariantPredicate(ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, const Loop *L, const Instruction *CtxI=nullptr)
If the result of the predicate LHS Pred RHS is loop invariant with respect to L, return a LoopInvaria...
bool instructionCouldExistWithOperands(const SCEV *A, const SCEV *B)
Return true if there exists a point in the program at which both A and B could be operands to the sam...
ConstantRange getUnsignedRange(const SCEV *S)
Determine the unsigned range for a particular SCEV.
uint32_t getMinTrailingZeros(const SCEV *S)
Determine the minimum number of zero bits that S is guaranteed to end in (at every loop iteration).
void print(raw_ostream &OS) const
const SCEV * getUMinExpr(const SCEV *LHS, const SCEV *RHS, bool Sequential=false)
const SCEV * getPredicatedExitCount(const Loop *L, const BasicBlock *ExitingBlock, SmallVectorImpl< const SCEVPredicate * > *Predicates, ExitCountKind Kind=Exact)
Same as above except this uses the predicated backedge taken info and may require predicates.
static SCEV::NoWrapFlags clearFlags(SCEV::NoWrapFlags Flags, SCEV::NoWrapFlags OffFlags)
void forgetTopmostLoop(const Loop *L)
void forgetValue(Value *V)
This method should be called by the client when it has changed a value in a way that may effect its v...
APInt getSignedRangeMin(const SCEV *S)
Determine the min of the signed range for a particular SCEV.
const SCEV * getNoopOrAnyExtend(const SCEV *V, Type *Ty)
Return a SCEV corresponding to a conversion of the input value to the specified type.
void forgetBlockAndLoopDispositions(Value *V=nullptr)
Called when the client has changed the disposition of values in a loop or block.
const SCEV * getTruncateExpr(const SCEV *Op, Type *Ty, unsigned Depth=0)
@ MonotonicallyDecreasing
@ MonotonicallyIncreasing
const SCEV * getStoreSizeOfExpr(Type *IntTy, Type *StoreTy)
Return an expression for the store size of StoreTy that is type IntTy.
const SCEVPredicate * getWrapPredicate(const SCEVAddRecExpr *AR, SCEVWrapPredicate::IncrementWrapFlags AddedFlags)
bool isLoopBackedgeGuardedByCond(const Loop *L, CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
Test whether the backedge of the loop is protected by a conditional between LHS and RHS.
const SCEV * getMinusSCEV(const SCEV *LHS, const SCEV *RHS, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Return LHS-RHS.
APInt getNonZeroConstantMultiple(const SCEV *S)
const SCEV * getMinusOne(Type *Ty)
Return a SCEV for the constant -1 of a specific type.
static SCEV::NoWrapFlags setFlags(SCEV::NoWrapFlags Flags, SCEV::NoWrapFlags OnFlags)
bool hasLoopInvariantBackedgeTakenCount(const Loop *L)
Return true if the specified loop has an analyzable loop-invariant backedge-taken count.
BlockDisposition getBlockDisposition(const SCEV *S, const BasicBlock *BB)
Return the "disposition" of the given SCEV with respect to the given block.
const SCEV * getNoopOrZeroExtend(const SCEV *V, Type *Ty)
Return a SCEV corresponding to a conversion of the input value to the specified type.
bool invalidate(Function &F, const PreservedAnalyses &PA, FunctionAnalysisManager::Invalidator &Inv)
const SCEV * getUMinFromMismatchedTypes(const SCEV *LHS, const SCEV *RHS, bool Sequential=false)
Promote the operands to the wider of the types using zero-extension, and then perform a umin operatio...
bool loopIsFiniteByAssumption(const Loop *L)
Return true if this loop is finite by assumption.
const SCEV * getExistingSCEV(Value *V)
Return an existing SCEV for V if there is one, otherwise return nullptr.
LoopDisposition
An enum describing the relationship between a SCEV and a loop.
@ LoopComputable
The SCEV varies predictably with the loop.
@ LoopVariant
The SCEV is loop-variant (unknown).
@ LoopInvariant
The SCEV is loop-invariant.
friend class SCEVCallbackVH
const SCEV * getAnyExtendExpr(const SCEV *Op, Type *Ty)
getAnyExtendExpr - Return a SCEV for the given operand extended with unspecified bits out to the give...
bool isKnownToBeAPowerOfTwo(const SCEV *S, bool OrZero=false, bool OrNegative=false)
Test if the given expression is known to be a power of 2.
std::optional< SCEV::NoWrapFlags > getStrengthenedNoWrapFlagsFromBinOp(const OverflowingBinaryOperator *OBO)
Parse NSW/NUW flags from add/sub/mul IR binary operation Op into SCEV no-wrap flags,...
void forgetLcssaPhiWithNewPredecessor(Loop *L, PHINode *V)
Forget LCSSA phi node V of loop L to which a new predecessor was added, such that it may no longer be...
bool containsUndefs(const SCEV *S) const
Return true if the SCEV expression contains an undef value.
std::optional< MonotonicPredicateType > getMonotonicPredicateType(const SCEVAddRecExpr *LHS, ICmpInst::Predicate Pred)
If, for all loop invariant X, the predicate "LHS `Pred` X" is monotonically increasing or decreasing,...
const SCEV * getCouldNotCompute()
bool isAvailableAtLoopEntry(const SCEV *S, const Loop *L)
Determine if the SCEV can be evaluated at loop's entry.
BlockDisposition
An enum describing the relationship between a SCEV and a basic block.
@ DominatesBlock
The SCEV dominates the block.
@ ProperlyDominatesBlock
The SCEV properly dominates the block.
@ DoesNotDominateBlock
The SCEV does not dominate the block.
const SCEV * getExitCount(const Loop *L, const BasicBlock *ExitingBlock, ExitCountKind Kind=Exact)
Return the number of times the backedge executes before the given exit would be taken; if not exactly...
const SCEV * getSignExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth=0)
void getPoisonGeneratingValues(SmallPtrSetImpl< const Value * > &Result, const SCEV *S)
Return the set of Values that, if poison, will definitively result in S being poison as well.
void forgetLoopDispositions()
Called when the client has changed the disposition of values in this loop.
const SCEV * getVScale(Type *Ty)
unsigned getSmallConstantTripCount(const Loop *L)
Returns the exact trip count of the loop if we can compute it, and the result is a small constant.
bool hasComputableLoopEvolution(const SCEV *S, const Loop *L)
Return true if the given SCEV changes value in a known way in the specified loop.
const SCEV * getPointerBase(const SCEV *V)
Transitively follow the chain of pointer-type operands until reaching a SCEV that does not have a sin...
const SCEV * getMinMaxExpr(SCEVTypes Kind, SmallVectorImpl< const SCEV * > &Operands)
bool dominates(const SCEV *S, const BasicBlock *BB)
Return true if elements that makes up the given SCEV dominate the specified basic block.
APInt getUnsignedRangeMax(const SCEV *S)
Determine the max of the unsigned range for a particular SCEV.
ExitCountKind
The terms "backedge taken count" and "exit count" are used interchangeably to refer to the number of ...
@ SymbolicMaximum
An expression which provides an upper bound on the exact trip count.
@ ConstantMaximum
A constant which provides an upper bound on the exact trip count.
@ Exact
An expression exactly describing the number of times the backedge has executed when a loop is exited.
const SCEV * applyLoopGuards(const SCEV *Expr, const Loop *L)
Try to apply information from loop guards for L to Expr.
const SCEV * getMulExpr(SmallVectorImpl< const SCEV * > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical multiply expression, or something simpler if possible.
const SCEVAddRecExpr * convertSCEVToAddRecWithPredicates(const SCEV *S, const Loop *L, SmallVectorImpl< const SCEVPredicate * > &Preds)
Tries to convert the S expression to an AddRec expression, adding additional predicates to Preds as r...
const SCEV * getElementSize(Instruction *Inst)
Return the size of an element read or written by Inst.
const SCEV * getSizeOfExpr(Type *IntTy, TypeSize Size)
Return an expression for a TypeSize.
std::optional< bool > evaluatePredicate(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
Check whether the condition described by Pred, LHS, and RHS is true or false.
const SCEV * getUnknown(Value *V)
std::optional< std::pair< const SCEV *, SmallVector< const SCEVPredicate *, 3 > > > createAddRecFromPHIWithCasts(const SCEVUnknown *SymbolicPHI)
Checks if SymbolicPHI can be rewritten as an AddRecExpr under some Predicates.
const SCEV * getTruncateOrZeroExtend(const SCEV *V, Type *Ty, unsigned Depth=0)
Return a SCEV corresponding to a conversion of the input value to the specified type.
const SCEV * getElementCount(Type *Ty, ElementCount EC)
static SCEV::NoWrapFlags maskFlags(SCEV::NoWrapFlags Flags, int Mask)
Convenient NoWrapFlags manipulation that hides enum casts and is visible in the ScalarEvolution name ...
std::optional< APInt > computeConstantDifference(const SCEV *LHS, const SCEV *RHS)
Compute LHS - RHS and returns the result as an APInt if it is a constant, and std::nullopt if it isn'...
bool properlyDominates(const SCEV *S, const BasicBlock *BB)
Return true if elements that makes up the given SCEV properly dominate the specified basic block.
const SCEV * rewriteUsingPredicate(const SCEV *S, const Loop *L, const SCEVPredicate &A)
Re-writes the SCEV according to the Predicates in A.
std::pair< const SCEV *, const SCEV * > SplitIntoInitAndPostInc(const Loop *L, const SCEV *S)
Splits SCEV expression S into two SCEVs.
bool canReuseInstruction(const SCEV *S, Instruction *I, SmallVectorImpl< Instruction * > &DropPoisonGeneratingInsts)
Check whether it is poison-safe to represent the expression S using the instruction I.
bool isKnownPredicateAt(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS, const Instruction *CtxI)
Test if the given expression is known to satisfy the condition described by Pred, LHS,...
const SCEV * getPredicatedSymbolicMaxBackedgeTakenCount(const Loop *L, SmallVectorImpl< const SCEVPredicate * > &Predicates)
Similar to getSymbolicMaxBackedgeTakenCount, except it will add a set of SCEV predicates to Predicate...
const SCEV * getUDivExactExpr(const SCEV *LHS, const SCEV *RHS)
Get a canonical unsigned division expression, or something simpler if possible.
void registerUser(const SCEV *User, ArrayRef< const SCEV * > Ops)
Notify this ScalarEvolution that User directly uses SCEVs in Ops.
const SCEV * getAddExpr(SmallVectorImpl< const SCEV * > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical add expression, or something simpler if possible.
bool isBasicBlockEntryGuardedByCond(const BasicBlock *BB, CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
Test whether entry to the basic block is protected by a conditional between LHS and RHS.
const SCEV * getTruncateOrSignExtend(const SCEV *V, Type *Ty, unsigned Depth=0)
Return a SCEV corresponding to a conversion of the input value to the specified type.
bool containsErasedValue(const SCEV *S) const
Return true if the SCEV expression contains a Value that has been optimised out and is now a nullptr.
bool isKnownPredicate(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
Test if the given expression is known to satisfy the condition described by Pred, LHS,...
bool isKnownViaInduction(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
We'd like to check the predicate on every iteration of the most dominated loop between loops used in ...
const SCEV * getSymbolicMaxBackedgeTakenCount(const Loop *L)
When successful, this returns a SCEV that is greater than or equal to (i.e.
APInt getSignedRangeMax(const SCEV *S)
Determine the max of the signed range for a particular SCEV.
LLVMContext & getContext() const
This class represents the LLVM 'select' instruction.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void reserve(size_type N)
iterator erase(const_iterator CI)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
iterator insert(iterator I, T &&Elt)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
Used to lazily calculate structure layout information for a target machine, based on the DataLayout s...
TypeSize getElementOffset(unsigned Idx) const
TypeSize getSizeInBits() const
Class to represent struct types.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
The instances of the Type class are immutable: once they are created, they are never changed.
bool isPointerTy() const
True if this is an instance of PointerType.
static IntegerType * getInt1Ty(LLVMContext &C)
static IntegerType * getIntNTy(LLVMContext &C, unsigned N)
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
static IntegerType * getInt8Ty(LLVMContext &C)
bool isIntOrPtrTy() const
Return true if this is an integer type or a pointer type.
static IntegerType * getInt32Ty(LLVMContext &C)
bool isIntegerTy() const
True if this is an instance of IntegerType.
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
A Use represents the edge between a Value definition and its users.
Value * getOperand(unsigned i) const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
unsigned getValueID() const
Return an ID for the concrete type of this object.
void printAsOperand(raw_ostream &O, bool PrintType=true, const Module *M=nullptr) const
Print the name of this Value out to the specified raw_ostream.
LLVMContext & getContext() const
All values hold a context through their type.
StringRef getName() const
Return a constant reference to the value's name.
Represents an op.with.overflow intrinsic.
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
const ParentTy * getParent() const
This class implements an extremely fast bulk output stream that can only output to a stream.
raw_ostream & indent(unsigned NumSpaces)
indent - Insert 'NumSpaces' spaces.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
const APInt & smin(const APInt &A, const APInt &B)
Determine the smaller of two APInts considered to be signed.
const APInt & smax(const APInt &A, const APInt &B)
Determine the larger of two APInts considered to be signed.
const APInt & umin(const APInt &A, const APInt &B)
Determine the smaller of two APInts considered to be unsigned.
std::optional< APInt > SolveQuadraticEquationWrap(APInt A, APInt B, APInt C, unsigned RangeWidth)
Let q(n) = An^2 + Bn + C, and BW = bit width of the value range (e.g.
const APInt & umax(const APInt &A, const APInt &B)
Determine the larger of two APInts considered to be unsigned.
APInt GreatestCommonDivisor(APInt A, APInt B)
Compute GCD of two unsigned APInt values.
@ C
The default llvm calling convention, compatible with C.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Function * getDeclarationIfExists(Module *M, ID id, ArrayRef< Type * > Tys, FunctionType *FT=nullptr)
This version supports overloaded intrinsics.
Predicate
Predicate - These are "(BI << 5) | BO" for various predicates.
BinaryOp_match< LHS, RHS, Instruction::AShr > m_AShr(const LHS &L, const RHS &R)
bool match(Val *V, const Pattern &P)
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
class_match< ConstantInt > m_ConstantInt()
Match an arbitrary ConstantInt and ignore it.
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
bind_ty< WithOverflowInst > m_WithOverflowInst(WithOverflowInst *&I)
Match a with overflow intrinsic, capturing it if we match.
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
brc_match< Cond_t, bind_ty< BasicBlock >, bind_ty< BasicBlock > > m_Br(const Cond_t &C, BasicBlock *&T, BasicBlock *&F)
BinaryOp_match< LHS, RHS, Instruction::SDiv > m_SDiv(const LHS &L, const RHS &R)
apint_match m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
BinaryOp_match< LHS, RHS, Instruction::LShr > m_LShr(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::Shl > m_Shl(const LHS &L, const RHS &R)
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
class_match< BasicBlock > m_BasicBlock()
Match an arbitrary basic block value and ignore it.
match_combine_or< LTy, RTy > m_CombineOr(const LTy &L, const RTy &R)
Combine two pattern matchers matching L || R.
cst_pred_ty< is_all_ones > m_scev_AllOnes()
Match an integer with all bits set.
SCEVUnaryExpr_match< SCEVZeroExtendExpr, Op0_t > m_scev_ZExt(const Op0_t &Op0)
cst_pred_ty< is_one > m_scev_One()
Match an integer 1.
SCEVUnaryExpr_match< SCEVSignExtendExpr, Op0_t > m_scev_SExt(const Op0_t &Op0)
cst_pred_ty< is_zero > m_scev_Zero()
Match an integer 0.
bind_ty< const SCEVConstant > m_SCEVConstant(const SCEVConstant *&V)
bind_ty< const SCEV > m_SCEV(const SCEV *&V)
Match a SCEV, capturing it if we match.
SCEVBinaryExpr_match< SCEVAddExpr, Op0_t, Op1_t > m_scev_Add(const Op0_t &Op0, const Op1_t &Op1)
bool match(const SCEV *S, const Pattern &P)
bind_ty< const SCEVUnknown > m_SCEVUnknown(const SCEVUnknown *&V)
initializer< Ty > init(const Ty &Val)
LocationClass< Ty > location(Ty &L)
@ Switch
The "resume-switch" lowering, where there are separate resume and destroy functions that are shared b...
NodeAddr< PhiNode * > Phi
This is an optimization pass for GlobalISel generic memory operations.
void visitAll(const SCEV *Root, SV &Visitor)
Use SCEVTraversal to visit all nodes in the given expression tree.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
LLVM_ATTRIBUTE_ALWAYS_INLINE DynamicAPInt gcd(const DynamicAPInt &A, const DynamicAPInt &B)
void stable_sort(R &&Range)
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
bool canCreatePoison(const Operator *Op, bool ConsiderFlagsAndMetadata=true)
bool mustTriggerUB(const Instruction *I, const SmallPtrSetImpl< const Value * > &KnownPoison)
Return true if the given instruction must trigger undefined behavior when I is executed with any oper...
bool isUIntN(unsigned N, uint64_t x)
Checks if an unsigned integer fits into the given (dynamic) bit width.
detail::scope_exit< std::decay_t< Callable > > make_scope_exit(Callable &&F)
bool canConstantFoldCallTo(const CallBase *Call, const Function *F)
canConstantFoldCallTo - Return true if its even possible to fold a call to the specified function.
bool verifyFunction(const Function &F, raw_ostream *OS=nullptr)
Check a function for errors, useful for use when debugging a pass.
auto successors(const MachineBasicBlock *BB)
bool set_is_subset(const S1Ty &S1, const S2Ty &S2)
set_is_subset(A, B) - Return true iff A in B
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Constant * ConstantFoldCompareInstOperands(unsigned Predicate, Constant *LHS, Constant *RHS, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const Instruction *I=nullptr)
Attempt to constant fold a compare instruction (icmp/fcmp) with the specified operands.
unsigned short computeExpressionSize(ArrayRef< const SCEV * > Args)
Printable print(const GCNRegPressure &RP, const GCNSubtarget *ST=nullptr)
ConstantRange getConstantRangeFromMetadata(const MDNode &RangeMD)
Parse out a conservative ConstantRange from !range metadata.
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Value * simplifyInstruction(Instruction *I, const SimplifyQuery &Q)
See if we can compute a simplified version of this instruction.
bool isOverflowIntrinsicNoWrap(const WithOverflowInst *WO, const DominatorTree &DT)
Returns true if the arithmetic part of the WO 's result is used only along the paths control dependen...
bool matchSimpleRecurrence(const PHINode *P, BinaryOperator *&BO, Value *&Start, Value *&Step)
Attempt to match a simple first order recurrence cycle of the form: iv = phi Ty [Start,...
void erase(Container &C, ValueType V)
Wrapper function to remove a value from a container:
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
void initializeScalarEvolutionWrapperPassPass(PassRegistry &)
auto reverse(ContainerTy &&C)
bool isMustProgress(const Loop *L)
Return true if this loop can be assumed to make progress.
bool impliesPoison(const Value *ValAssumedPoison, const Value *V)
Return true if V is poison given that ValAssumedPoison is already poison.
bool isFinite(const Loop *L)
Return true if this loop can be assumed to run for a finite number of iterations.
bool programUndefinedIfPoison(const Instruction *Inst)
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
bool isPointerTy(const Type *T)
ConstantRange getVScaleRange(const Function *F, unsigned BitWidth)
Determine the possible constant range of vscale with the given bit width, based on the vscale_range f...
Constant * ConstantFoldInstOperands(Instruction *I, ArrayRef< Constant * > Ops, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, bool AllowNonDeterministic=true)
ConstantFoldInstOperands - Attempt to constant fold an instruction with the specified operands.
bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
bool propagatesPoison(const Use &PoisonOp)
Return true if PoisonOp's user yields poison or raises UB if its operand PoisonOp is poison.
@ UMin
Unsigned integer min implemented in terms of select(cmp()).
@ Mul
Product of integers.
@ SMax
Signed integer max implemented in terms of select(cmp()).
@ SMin
Signed integer min implemented in terms of select(cmp()).
@ UMax
Unsigned integer max implemented in terms of select(cmp()).
bool isIntN(unsigned N, int64_t x)
Checks if an signed integer fits into the given (dynamic) bit width.
auto count(R &&Range, const E &Element)
Wrapper function around std::count to count the number of times an element Element occurs in the give...
void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
DWARFExpression::Operation Op
auto max_element(R &&Range)
Provide wrappers to std::max_element which take ranges instead of having to pass begin/end explicitly...
raw_ostream & operator<<(raw_ostream &OS, const APFixedPoint &FX)
constexpr unsigned BitWidth
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
bool isGuaranteedToTransferExecutionToSuccessor(const Instruction *I)
Return true if this function can prove that the instruction I will always transfer execution to one o...
auto count_if(R &&Range, UnaryPredicate P)
Wrapper function around std::count_if to count the number of times an element satisfying a given pred...
auto predecessors(const MachineBasicBlock *BB)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
unsigned ComputeNumSignBits(const Value *Op, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return the number of times the sign bit of the register is replicated into the other bits.
iterator_range< df_iterator< T > > depth_first(const T &G)
auto seq(T Begin, T End)
Iterate over an integral type from Begin up to - but not including - End.
bool isGuaranteedNotToBePoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Returns true if V cannot be poison, but may be undef.
bool SCEVExprContains(const SCEV *Root, PredTy Pred)
Return true if any node in Root satisfies the predicate Pred.
Implement std::hash so that hash_code can be used in STL containers.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
This struct is a compact representation of a valid (non-zero power of two) alignment.
A special type used by analysis passes to provide an address that identifies that particular analysis...
Incoming for lane maks phi as machine instruction, incoming register Reg and incoming block Block are...
static KnownBits makeConstant(const APInt &C)
Create known bits from a known constant.
bool isNonNegative() const
Returns true if this value is known to be non-negative.
static KnownBits ashr(const KnownBits &LHS, const KnownBits &RHS, bool ShAmtNonZero=false, bool Exact=false)
Compute known bits for ashr(LHS, RHS).
unsigned getBitWidth() const
Get the bit width of this value.
static KnownBits lshr(const KnownBits &LHS, const KnownBits &RHS, bool ShAmtNonZero=false, bool Exact=false)
Compute known bits for lshr(LHS, RHS).
KnownBits zextOrTrunc(unsigned BitWidth) const
Return known bits for a zero extension or truncation of the value we're tracking.
APInt getMaxValue() const
Return the maximal unsigned value possible given these KnownBits.
APInt getMinValue() const
Return the minimal unsigned value possible given these KnownBits.
bool isNegative() const
Returns true if this value is known to be negative.
static KnownBits shl(const KnownBits &LHS, const KnownBits &RHS, bool NUW=false, bool NSW=false, bool ShAmtNonZero=false)
Compute known bits for shl(LHS, RHS).
An object of this class is returned by queries that could not be answered.
static bool classof(const SCEV *S)
Methods for support type inquiry through isa, cast, and dyn_cast:
This class defines a simple visitor class that may be used for various SCEV analysis purposes.
A utility class that uses RAII to save and restore the value of a variable.
Information about the number of loop iterations for which a loop exit's branch condition evaluates to...
ExitLimit(const SCEV *E)
Construct either an exact exit limit from a constant, or an unknown one from a SCEVCouldNotCompute.
const SCEV * ExactNotTaken
const SCEV * SymbolicMaxNotTaken
SmallVector< const SCEVPredicate *, 4 > Predicates
A vector of predicate guards for this ExitLimit.
const SCEV * ConstantMaxNotTaken