83#include "llvm/Config/llvm-config.h"
138#define DEBUG_TYPE "scalar-evolution"
141 "Number of loop exits with predictable exit counts");
143 "Number of loop exits without predictable exit counts");
145 "Number of loops with trip counts computed by force");
147#ifdef EXPENSIVE_CHECKS
155 cl::desc(
"Maximum number of iterations SCEV will "
156 "symbolically execute a constant "
162 cl::desc(
"Verify ScalarEvolution's backedge taken counts (slow)"));
165 cl::desc(
"Enable stricter verification with -verify-scev is passed"));
169 cl::desc(
"Verify IR correctness when making sensitive SCEV queries (slow)"),
174 cl::desc(
"Threshold for inlining multiplication operands into a SCEV"),
179 cl::desc(
"Threshold for inlining addition operands into a SCEV"),
183 "scalar-evolution-max-scev-compare-depth",
cl::Hidden,
184 cl::desc(
"Maximum depth of recursive SCEV complexity comparisons"),
188 "scalar-evolution-max-scev-operations-implication-depth",
cl::Hidden,
189 cl::desc(
"Maximum depth of recursive SCEV operations implication analysis"),
193 "scalar-evolution-max-value-compare-depth",
cl::Hidden,
194 cl::desc(
"Maximum depth of recursive value complexity comparisons"),
199 cl::desc(
"Maximum depth of recursive arithmetics"),
203 "scalar-evolution-max-constant-evolving-depth",
cl::Hidden,
208 cl::desc(
"Maximum depth of recursive SExt/ZExt/Trunc"),
213 cl::desc(
"Max coefficients in AddRec during evolving"),
218 cl::desc(
"Size of the expression which is considered huge"),
223 cl::desc(
"Threshold for switching to iteratively computing SCEV ranges"),
227 "scalar-evolution-max-loop-guard-collection-depth",
cl::Hidden,
228 cl::desc(
"Maximum depth for recursive loop guard collection"),
cl::init(1));
233 cl::desc(
"When printing analysis, include information on every instruction"));
236 "scalar-evolution-use-expensive-range-sharpening",
cl::Hidden,
238 cl::desc(
"Use more powerful methods of sharpening expression ranges. May "
239 "be costly in terms of compile time"));
242 "scalar-evolution-max-scc-analysis-depth",
cl::Hidden,
243 cl::desc(
"Maximum amount of nodes to process while searching SCEVUnknown "
244 "Phi strongly connected components"),
249 cl::desc(
"Handle <= and >= in finite loops"),
253 "scalar-evolution-use-context-for-no-wrap-flag-strenghening",
cl::Hidden,
254 cl::desc(
"Infer nuw/nsw flags using context where suitable"),
261#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
281#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
301 OS <<
"(ptrto" << OpS <<
" " << *
Op->getType() <<
" " << *
Op <<
" to "
308 OS <<
"(trunc " << *
Op->getType() <<
" " << *
Op <<
" to "
315 OS <<
"(zext " << *
Op->getType() <<
" " << *
Op <<
" to "
322 OS <<
"(sext " << *
Op->getType() <<
" " << *
Op <<
" to "
351 const char *OpStr =
nullptr;
364 OpStr =
" umin_seq ";
388 OS <<
"(" << *UDiv->
getLHS() <<
" /u " << *UDiv->
getRHS() <<
")";
395 OS <<
"***COULDNOTCOMPUTE***";
473 if (!
Mul)
return false;
477 if (!SC)
return false;
495 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
return S;
497 UniqueSCEVs.InsertNode(S, IP);
511 ConstantInt::get(ITy, V, isSigned,
true));
519 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
522 UniqueSCEVs.InsertNode(S, IP);
542 "Must be a non-bit-width-changing pointer-to-integer cast!");
549 "Must be a non-bit-width-changing pointer-to-integer cast!");
561 "Cannot truncate non-integer value!");
568 "Cannot zero extend non-integer value!");
575 "Cannot sign extend non-integer value!");
580 SE->forgetMemoizedResults({
this});
583 SE->UniqueSCEVs.RemoveNode(
this);
589void SCEVUnknown::allUsesReplacedWith(
Value *New) {
591 SE->forgetMemoizedResults({
this});
594 SE->UniqueSCEVs.RemoveNode(
this);
616 if (LIsPointer != RIsPointer)
617 return (
int)LIsPointer - (int)RIsPointer;
622 return (
int)LID - (int)RID;
627 unsigned LArgNo = LA->getArgNo(), RArgNo =
RA->getArgNo();
628 return (
int)LArgNo - (int)RArgNo;
634 if (
auto L = LGV->getLinkage() - RGV->getLinkage())
637 const auto IsGVNameSemantic = [&](
const GlobalValue *GV) {
638 auto LT = GV->getLinkage();
645 if (IsGVNameSemantic(LGV) && IsGVNameSemantic(RGV))
646 return LGV->getName().compare(RGV->getName());
657 if (LParent != RParent) {
660 if (LDepth != RDepth)
661 return (
int)LDepth - (int)RDepth;
665 unsigned LNumOps = LInst->getNumOperands(),
666 RNumOps = RInst->getNumOperands();
667 if (LNumOps != RNumOps)
668 return (
int)LNumOps - (int)RNumOps;
670 for (
unsigned Idx :
seq(LNumOps)) {
672 RInst->getOperand(Idx),
Depth + 1);
686static std::optional<int>
696 return (
int)LType - (int)RType;
721 unsigned LBitWidth = LA.
getBitWidth(), RBitWidth =
RA.getBitWidth();
722 if (LBitWidth != RBitWidth)
723 return (
int)LBitWidth - (int)RBitWidth;
724 return LA.
ult(
RA) ? -1 : 1;
730 return LTy->getBitWidth() - RTy->getBitWidth();
741 if (LLoop != RLoop) {
743 assert(LHead != RHead &&
"Two loops share the same header?");
747 "No dominance between recurrences used by one SCEV?");
771 unsigned LNumOps = LOps.
size(), RNumOps = ROps.
size();
772 if (LNumOps != RNumOps)
773 return (
int)LNumOps - (int)RNumOps;
775 for (
unsigned i = 0; i != LNumOps; ++i) {
801 if (
Ops.size() < 2)
return;
806 return Complexity && *Complexity < 0;
808 if (
Ops.size() == 2) {
812 if (IsLessComplex(
RHS,
LHS))
825 for (
unsigned i = 0, e =
Ops.size(); i != e-2; ++i) {
831 for (
unsigned j = i+1; j != e &&
Ops[j]->getSCEVType() == Complexity; ++j) {
836 if (i == e-2)
return;
858template <
typename FoldT,
typename IsIdentityT,
typename IsAbsorberT>
862 IsIdentityT IsIdentity, IsAbsorberT IsAbsorber) {
864 for (
unsigned Idx = 0; Idx <
Ops.size();) {
872 Ops.erase(
Ops.begin() + Idx);
879 assert(Folded &&
"Must have folded value");
883 if (Folded && IsAbsorber(Folded->
getAPInt()))
887 if (Folded && !IsIdentity(Folded->
getAPInt()))
888 Ops.insert(
Ops.begin(), Folded);
890 return Ops.size() == 1 ?
Ops[0] :
nullptr;
965 APInt OddFactorial(W, 1);
967 for (
unsigned i = 3; i <= K; ++i) {
970 OddFactorial *= (i >> TwoFactors);
974 unsigned CalculationBits = W +
T;
988 for (
unsigned i = 1; i != K; ++i) {
1021 for (
unsigned i = 1, e =
Operands.size(); i != e; ++i) {
1050 ConversionFn CreatePtrCast;
1054 ConversionFn CreatePtrCast)
1055 : Base(
SE), TargetTy(TargetTy), CreatePtrCast(
std::
move(CreatePtrCast)) {}
1058 Type *TargetTy, ConversionFn CreatePtrCast) {
1060 return Rewriter.visit(Scev);
1096 "Should only reach pointer-typed SCEVUnknown's.");
1101 return SE.getZero(TargetTy);
1102 return CreatePtrCast(Expr);
1107 assert(
Op->getType()->isPointerTy() &&
"Op must be a pointer");
1126 Op, *
this, IntPtrTy, [
this, IntPtrTy](
const SCEVUnknown *U) {
1131 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
1133 SCEV *S =
new (SCEVAllocator)
1135 UniqueSCEVs.InsertNode(S, IP);
1137 return static_cast<const SCEV *
>(S);
1140 "We must have succeeded in sinking the cast, "
1141 "and ending up with an integer-typed expression!");
1146 assert(
Op->getType()->isPointerTy() &&
"Op must be a pointer");
1150 if (DL.hasUnstableRepresentation(
Op->getType()))
1153 Type *Ty = DL.getAddressType(
Op->getType());
1164 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
1166 SCEV *S =
new (SCEVAllocator)
1168 UniqueSCEVs.InsertNode(S, IP);
1170 return static_cast<const SCEV *
>(S);
1173 "We must have succeeded in sinking the cast, "
1174 "and ending up with an integer-typed expression!");
1179 assert(Ty->isIntegerTy() &&
"Target type must be an integer type!");
1191 "This is not a truncating conversion!");
1193 "This is not a conversion to a SCEVable type!");
1194 assert(!
Op->getType()->isPointerTy() &&
"Can't truncate pointer!");
1202 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
return S;
1224 UniqueSCEVs.InsertNode(S, IP);
1236 unsigned numTruncs = 0;
1237 for (
unsigned i = 0, e = CommOp->getNumOperands(); i != e && numTruncs < 2;
1245 if (numTruncs < 2) {
1255 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
1262 for (
const SCEV *
Op : AddRec->operands())
1277 UniqueSCEVs.InsertNode(S, IP);
1317struct ExtendOpTraitsBase {
1318 typedef const SCEV *(ScalarEvolution::*GetExtendExprTy)(
const SCEV *,
Type *,
1323template <
typename ExtendOp>
struct ExtendOpTraits {
1339 static const GetExtendExprTy GetExtendExpr;
1341 static const SCEV *getOverflowLimitForStep(
const SCEV *Step,
1342 ICmpInst::Predicate *Pred,
1343 ScalarEvolution *SE) {
1348const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits<
1355 static const GetExtendExprTy GetExtendExpr;
1357 static const SCEV *getOverflowLimitForStep(
const SCEV *Step,
1358 ICmpInst::Predicate *Pred,
1359 ScalarEvolution *SE) {
1364const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits<
1376template <
typename ExtendOpTy>
1379 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType;
1380 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr;
1396 for (
auto It = DiffOps.
begin(); It != DiffOps.
end(); ++It)
1409 auto PreStartFlags =
1427 const SCEV *OperandExtendedStart =
1429 (SE->*GetExtendExpr)(Step, WideTy,
Depth));
1430 if ((SE->*GetExtendExpr)(Start, WideTy,
Depth) == OperandExtendedStart) {
1442 const SCEV *OverflowLimit =
1443 ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep(Step, &Pred, SE);
1445 if (OverflowLimit &&
1453template <
typename ExtendOpTy>
1457 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr;
1465 (SE->*GetExtendExpr)(PreStart, Ty,
Depth));
1500template <
typename ExtendOpTy>
1501bool ScalarEvolution::proveNoWrapByVaryingStart(
const SCEV *Start,
1504 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType;
1514 APInt StartAI = StartC->
getAPInt();
1516 for (
unsigned Delta : {-2, -1, 1, 2}) {
1517 const SCEV *PreStart =
getConstant(StartAI - Delta);
1519 FoldingSetNodeID
ID;
1521 ID.AddPointer(PreStart);
1522 ID.AddPointer(Step);
1526 static_cast<SCEVAddRecExpr *
>(UniqueSCEVs.FindNodeOrInsertPos(
ID, IP));
1530 if (PreAR && PreAR->getNoWrapFlags(WrapType)) {
1533 const SCEV *Limit = ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep(
1534 DeltaS, &Pred,
this);
1552 const unsigned BitWidth =
C.getBitWidth();
1570 const APInt &ConstantStart,
1589 auto &UserIDs = FoldCacheUser[
I.first->second];
1590 assert(
count(UserIDs,
ID) == 1 &&
"unexpected duplicates in UserIDs");
1591 for (
unsigned I = 0;
I != UserIDs.size(); ++
I)
1592 if (UserIDs[
I] ==
ID) {
1597 I.first->second = S;
1599 FoldCacheUser[S].push_back(
ID);
1605 "This is not an extending conversion!");
1607 "This is not a conversion to a SCEVable type!");
1608 assert(!
Op->getType()->isPointerTy() &&
"Can't extend pointer!");
1612 if (
const SCEV *S = FoldCache.lookup(
ID))
1624 "This is not an extending conversion!");
1626 assert(!
Op->getType()->isPointerTy() &&
"Can't extend pointer!");
1643 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
return S;
1647 UniqueSCEVs.InsertNode(S, IP);
1656 const SCEV *
X = ST->getOperand();
1670 if (AR->isAffine()) {
1671 const SCEV *Start = AR->getStart();
1672 const SCEV *Step = AR->getStepRecurrence(*
this);
1674 const Loop *L = AR->getLoop();
1678 if (AR->hasNoUnsignedWrap()) {
1699 const SCEV *CastedMaxBECount =
1703 if (MaxBECount == RecastedMaxBECount) {
1713 const SCEV *WideMaxBECount =
1715 const SCEV *OperandExtendedAdd =
1721 if (ZAdd == OperandExtendedAdd) {
1732 OperandExtendedAdd =
1738 if (ZAdd == OperandExtendedAdd) {
1759 !AC.assumptions().empty()) {
1761 auto NewFlags = proveNoUnsignedWrapViaInduction(AR);
1763 if (AR->hasNoUnsignedWrap()) {
1798 const APInt &
C = SC->getAPInt();
1802 const SCEV *SResidual =
1811 if (proveNoWrapByVaryingStart<SCEVZeroExtendExpr>(Start, Step, L)) {
1836 if (SA->hasNoUnsignedWrap()) {
1857 const SCEV *SResidual =
1869 if (
SM->hasNoUnsignedWrap()) {
1891 const SCEV *TruncRHS;
1928 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
return S;
1931 UniqueSCEVs.InsertNode(S, IP);
1939 "This is not an extending conversion!");
1941 "This is not a conversion to a SCEVable type!");
1942 assert(!
Op->getType()->isPointerTy() &&
"Can't extend pointer!");
1946 if (
const SCEV *S = FoldCache.lookup(
ID))
1958 "This is not an extending conversion!");
1960 assert(!
Op->getType()->isPointerTy() &&
"Can't extend pointer!");
1982 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
return S;
1987 UniqueSCEVs.InsertNode(S, IP);
1996 const SCEV *
X = ST->getOperand();
2007 if (SA->hasNoSignedWrap()) {
2029 const SCEV *SResidual =
2043 if (AR->isAffine()) {
2044 const SCEV *Start = AR->getStart();
2045 const SCEV *Step = AR->getStepRecurrence(*
this);
2047 const Loop *L = AR->getLoop();
2051 if (AR->hasNoSignedWrap()) {
2073 const SCEV *CastedMaxBECount =
2077 if (MaxBECount == RecastedMaxBECount) {
2087 const SCEV *WideMaxBECount =
2089 const SCEV *OperandExtendedAdd =
2095 if (SAdd == OperandExtendedAdd) {
2106 OperandExtendedAdd =
2112 if (SAdd == OperandExtendedAdd) {
2132 auto NewFlags = proveNoSignedWrapViaInduction(AR);
2134 if (AR->hasNoSignedWrap()) {
2149 const APInt &
C = SC->getAPInt();
2153 const SCEV *SResidual =
2162 if (proveNoWrapByVaryingStart<SCEVSignExtendExpr>(Start, Step, L)) {
2190 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
return S;
2193 UniqueSCEVs.InsertNode(S, IP);
2219 "This is not an extending conversion!");
2221 "This is not a conversion to a SCEVable type!");
2226 if (SC->getAPInt().isNegative())
2231 const SCEV *NewOp =
T->getOperand();
2250 for (
const SCEV *
Op : AR->operands())
2288 APInt &AccumulatedConstant,
2292 bool Interesting =
false;
2299 if (Scale != 1 || AccumulatedConstant != 0 ||
C->getValue()->isZero())
2301 AccumulatedConstant += Scale *
C->getAPInt();
2306 for (; i !=
Ops.size(); ++i) {
2315 M, NewOps, AccumulatedConstant,
Add->operands(), NewScale, SE);
2321 auto Pair = M.insert({
Key, NewScale});
2325 Pair.first->second += NewScale;
2333 auto Pair = M.insert({
Ops[i], Scale});
2337 Pair.first->second += Scale;
2356 case Instruction::Add:
2359 case Instruction::Sub:
2362 case Instruction::Mul:
2376 const SCEV *
A = (this->*Extension)(
2378 const SCEV *LHSB = (this->*Extension)(LHS, WideTy, 0);
2379 const SCEV *RHSB = (this->*Extension)(RHS, WideTy, 0);
2387 if (BinOp == Instruction::Mul)
2393 APInt C = RHSC->getAPInt();
2394 unsigned NumBits =
C.getBitWidth();
2395 bool IsSub = (BinOp == Instruction::Sub);
2396 bool IsNegativeConst = (
Signed &&
C.isNegative());
2398 bool OverflowDown = IsSub ^ IsNegativeConst;
2400 if (IsNegativeConst) {
2413 APInt Limit = Min + Magnitude;
2419 APInt Limit = Max - Magnitude;
2424std::optional<SCEV::NoWrapFlags>
2429 return std::nullopt;
2438 bool Deduced =
false;
2440 if (OBO->
getOpcode() != Instruction::Add &&
2443 return std::nullopt;
2452 false, LHS, RHS, CtxI)) {
2459 true, LHS, RHS, CtxI)) {
2466 return std::nullopt;
2476 using namespace std::placeholders;
2483 assert(CanAnalyze &&
"don't call from other places!");
2490 auto IsKnownNonNegative = [&](
SCEVUse U) {
2500 if (SignOrUnsignWrap != SignOrUnsignMask &&
2507 return Instruction::Add;
2509 return Instruction::Mul;
2520 Opcode,
C, OBO::NoSignedWrap);
2528 Opcode,
C, OBO::NoUnsignedWrap);
2538 Ops[0]->isZero() && IsKnownNonNegative(
Ops[1]))
2545 if (UDiv->getOperand(1) ==
Ops[1])
2548 if (UDiv->getOperand(1) ==
Ops[0])
2564 "only nuw or nsw allowed");
2565 assert(!
Ops.empty() &&
"Cannot get empty add!");
2566 if (
Ops.size() == 1)
return Ops[0];
2569 for (
unsigned i = 1, e =
Ops.size(); i != e; ++i)
2571 "SCEVAddExpr operand types don't match!");
2573 Ops, [](
const SCEV *
Op) {
return Op->getType()->isPointerTy(); });
2574 assert(NumPtrs <= 1 &&
"add has at most one pointer operand");
2579 [](
const APInt &C1,
const APInt &C2) {
return C1 + C2; },
2580 [](
const APInt &
C) {
return C.isZero(); },
2581 [](
const APInt &
C) {
return false; });
2594 return getOrCreateAddExpr(
Ops, ComputeFlags(
Ops));
2599 if (
Add->getNoWrapFlags(OrigFlags) != OrigFlags)
2600 Add->setNoWrapFlags(ComputeFlags(
Ops));
2608 bool FoundMatch =
false;
2609 for (
unsigned i = 0, e =
Ops.size(); i != e-1; ++i)
2610 if (
Ops[i] ==
Ops[i+1]) {
2622 --i; e -=
Count - 1;
2632 auto FindTruncSrcType = [&]() ->
Type * {
2638 return T->getOperand()->getType();
2640 SCEVUse LastOp =
Mul->getOperand(
Mul->getNumOperands() - 1);
2642 return T->getOperand()->getType();
2646 if (
auto *SrcType = FindTruncSrcType()) {
2653 if (
T->getOperand()->getType() != SrcType) {
2662 for (
unsigned j = 0, f = M->getNumOperands(); j != f && Ok; ++j) {
2665 if (
T->getOperand()->getType() != SrcType) {
2693 if (
Ops.size() == 2) {
2703 auto C2 =
C->getAPInt();
2706 APInt ConstAdd = C1 + C2;
2707 auto AddFlags = AddExpr->getNoWrapFlags();
2748 if (
Ops.size() == 2 &&
2759 if (Idx <
Ops.size()) {
2760 bool DeletedAdd =
false;
2771 Ops.erase(
Ops.begin()+Idx);
2774 CommonFlags =
maskFlags(CommonFlags,
Add->getNoWrapFlags());
2797 struct APIntCompare {
2798 bool operator()(
const APInt &LHS,
const APInt &RHS)
const {
2799 return LHS.ult(RHS);
2806 std::map<APInt, SmallVector<SCEVUse, 4>, APIntCompare> MulOpLists;
2807 for (
const SCEV *NewOp : NewOps)
2808 MulOpLists[M.find(NewOp)->second].push_back(NewOp);
2811 if (AccumulatedConstant != 0)
2813 for (
auto &MulOp : MulOpLists) {
2814 if (MulOp.first == 1) {
2816 }
else if (MulOp.first != 0) {
2825 if (
Ops.size() == 1)
2836 for (
unsigned MulOp = 0, e =
Mul->getNumOperands(); MulOp != e; ++MulOp) {
2837 const SCEV *MulOpSCEV =
Mul->getOperand(MulOp);
2840 for (
unsigned AddOp = 0, e =
Ops.size(); AddOp != e; ++AddOp)
2841 if (MulOpSCEV ==
Ops[AddOp]) {
2843 const SCEV *InnerMul =
Mul->getOperand(MulOp == 0);
2844 if (
Mul->getNumOperands() != 2) {
2851 const SCEV *AddOne =
2855 if (
Ops.size() == 2)
return OuterMul;
2857 Ops.erase(
Ops.begin()+AddOp);
2858 Ops.erase(
Ops.begin()+Idx-1);
2860 Ops.erase(
Ops.begin()+Idx);
2861 Ops.erase(
Ops.begin()+AddOp-1);
2863 Ops.push_back(OuterMul);
2868 for (
unsigned OtherMulIdx = Idx+1;
2875 OMulOp != e; ++OMulOp)
2876 if (OtherMul->
getOperand(OMulOp) == MulOpSCEV) {
2878 const SCEV *InnerMul1 =
Mul->getOperand(MulOp == 0);
2879 if (
Mul->getNumOperands() != 2) {
2887 OtherMul->
operands().take_front(OMulOp));
2891 const SCEV *InnerMulSum =
2895 if (
Ops.size() == 2)
return OuterMul;
2896 Ops.erase(
Ops.begin()+Idx);
2897 Ops.erase(
Ops.begin()+OtherMulIdx-1);
2898 Ops.push_back(OuterMul);
2918 for (
unsigned i = 0, e =
Ops.size(); i != e; ++i)
2921 Ops.erase(
Ops.begin()+i);
2926 if (!LIOps.
empty()) {
2951 auto *DefI = getDefiningScopeBound(LIOps);
2953 if (!isGuaranteedToTransferExecutionTo(DefI, ReachI))
2965 if (
Ops.size() == 1)
return NewRec;
2968 for (
unsigned i = 0;; ++i)
2969 if (
Ops[i] == AddRec) {
2979 for (
unsigned OtherIdx = Idx+1;
2987 "AddRecExprs are not sorted in reverse dominance order?");
2994 if (OtherAddRec->getLoop() == AddRecLoop) {
2995 for (
unsigned i = 0, e = OtherAddRec->getNumOperands();
2997 if (i >= AddRecOps.
size()) {
2998 append_range(AddRecOps, OtherAddRec->operands().drop_front(i));
3002 getAddExpr(AddRecOps[i], OtherAddRec->getOperand(i),
3005 Ops.erase(
Ops.begin() + OtherIdx); --OtherIdx;
3020 return getOrCreateAddExpr(
Ops, ComputeFlags(
Ops));
3031 static_cast<SCEVAddExpr *
>(UniqueSCEVs.FindNodeOrInsertPos(
ID, IP));
3035 S =
new (SCEVAllocator)
3037 UniqueSCEVs.InsertNode(S, IP);
3047 FoldingSetNodeID
ID;
3049 for (
const SCEV *
Op :
Ops)
3054 static_cast<SCEVAddRecExpr *
>(UniqueSCEVs.FindNodeOrInsertPos(
ID, IP));
3056 SCEVUse *
O = SCEVAllocator.Allocate<SCEVUse>(
Ops.size());
3058 S =
new (SCEVAllocator)
3059 SCEVAddRecExpr(
ID.Intern(SCEVAllocator), O,
Ops.size(), L);
3060 UniqueSCEVs.InsertNode(S, IP);
3061 LoopUsers[
L].push_back(S);
3070 FoldingSetNodeID
ID;
3072 for (
const SCEV *
Op :
Ops)
3076 static_cast<SCEVMulExpr *
>(UniqueSCEVs.FindNodeOrInsertPos(
ID, IP));
3078 SCEVUse *
O = SCEVAllocator.Allocate<SCEVUse>(
Ops.size());
3080 S =
new (SCEVAllocator) SCEVMulExpr(
ID.Intern(SCEVAllocator),
3082 UniqueSCEVs.InsertNode(S, IP);
3091 if (j > 1 && k / j != i) Overflow =
true;
3107 if (n == 0 || n == k)
return 1;
3108 if (k > n)
return 0;
3114 for (
uint64_t i = 1; i <= k; ++i) {
3115 r =
umul_ov(r, n-(i-1), Overflow);
3124 struct FindConstantInAddMulChain {
3125 bool FoundConstant =
false;
3127 bool follow(
const SCEV *S) {
3132 bool isDone()
const {
3133 return FoundConstant;
3137 FindConstantInAddMulChain
F;
3139 ST.visitAll(StartExpr);
3140 return F.FoundConstant;
3148 "only nuw or nsw allowed");
3149 assert(!
Ops.empty() &&
"Cannot get empty mul!");
3150 if (
Ops.size() == 1)
return Ops[0];
3152 Type *ETy =
Ops[0]->getType();
3154 for (
unsigned i = 1, e =
Ops.size(); i != e; ++i)
3156 "SCEVMulExpr operand types don't match!");
3161 [](
const APInt &C1,
const APInt &C2) {
return C1 * C2; },
3162 [](
const APInt &
C) {
return C.isOne(); },
3163 [](
const APInt &
C) {
return C.isZero(); });
3174 return getOrCreateMulExpr(
Ops, ComputeFlags(
Ops));
3179 if (
Mul->getNoWrapFlags(OrigFlags) != OrigFlags)
3180 Mul->setNoWrapFlags(ComputeFlags(
Ops));
3185 if (
Ops.size() == 2) {
3193 const SCEV *Op0, *Op1;
3201 if (
Ops[0]->isAllOnesValue()) {
3206 bool AnyFolded =
false;
3207 for (
const SCEV *AddOp :
Add->operands()) {
3234 AddRec->getNoWrapFlags(FlagsMask));
3257 APInt C1V = LHSC->getAPInt();
3267 const SCEV *NewMul =
nullptr;
3271 assert(C1V.
ugt(1) &&
"C1 <= 1 should have been folded earlier");
3286 if (Idx <
Ops.size()) {
3287 bool DeletedMul =
false;
3293 Ops.erase(
Ops.begin()+Idx);
3317 for (
unsigned i = 0, e =
Ops.size(); i != e; ++i)
3320 Ops.erase(
Ops.begin()+i);
3325 if (!LIOps.
empty()) {
3338 for (
unsigned i = 0, e = AddRec->
getNumOperands(); i != e; ++i) {
3354 if (
Ops.size() == 1)
return NewRec;
3357 for (
unsigned i = 0;; ++i)
3358 if (
Ops[i] == AddRec) {
3379 bool OpsModified =
false;
3380 for (
unsigned OtherIdx = Idx+1;
3394 bool Overflow =
false;
3401 for (
int y = x, ye = 2*x+1; y != ye && !Overflow; ++y) {
3405 z < ze && !Overflow; ++z) {
3408 if (LargerThan64Bits)
3409 Coeff =
umul_ov(Coeff1, Coeff2, Overflow);
3411 Coeff = Coeff1*Coeff2;
3426 if (
Ops.size() == 2)
return NewAddRec;
3427 Ops[Idx] = NewAddRec;
3428 Ops.erase(
Ops.begin() + OtherIdx); --OtherIdx;
3444 return getOrCreateMulExpr(
Ops, ComputeFlags(
Ops));
3451 "SCEVURemExpr operand types don't match!");
3456 if (RHSC->getValue()->isOne())
3457 return getZero(LHS->getType());
3460 if (RHSC->getAPInt().isPowerOf2()) {
3461 Type *FullTy = LHS->getType();
3477 assert(!LHS->getType()->isPointerTy() &&
3478 "SCEVUDivExpr operand can't be pointer!");
3479 assert(LHS->getType() == RHS->getType() &&
3480 "SCEVUDivExpr operand types don't match!");
3487 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
3495 if (RHSC->getValue()->isOne())
3500 if (!RHSC->getValue()->isZero()) {
3504 Type *Ty = LHS->getType();
3505 unsigned LZ = RHSC->getAPInt().countl_zero();
3509 if (!RHSC->getAPInt().isPowerOf2())
3517 const APInt &StepInt = Step->getAPInt();
3518 const APInt &DivInt = RHSC->getAPInt();
3519 if (!StepInt.
urem(DivInt) &&
3525 for (
const SCEV *
Op : AR->operands())
3531 const APInt *StartRem;
3544 bool CanFoldWithWrap = StepInt.
ule(DivInt) &&
3548 const SCEV *NewStart =
3550 if (*StartRem != 0 && (NoWrap || CanFoldWithWrap) &&
3552 const SCEV *NewLHS =
3555 if (LHS != NewLHS) {
3565 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
3574 for (
const SCEV *
Op : M->operands())
3578 for (
unsigned i = 0, e = M->getNumOperands(); i != e; ++i) {
3579 const SCEV *
Op = M->getOperand(i);
3591 if (
auto *DivisorConstant =
3593 bool Overflow =
false;
3595 DivisorConstant->getAPInt().
umul_ov(RHSC->getAPInt(), Overflow);
3606 for (
const SCEV *
Op :
A->operands())
3610 for (
unsigned i = 0, e =
A->getNumOperands(); i != e; ++i) {
3617 if (Operands.
size() ==
A->getNumOperands())
3624 return getConstant(LHSC->getAPInt().udiv(RHSC->getAPInt()));
3634 return getZero(LHS->getType());
3638 const SCEV *NewLHS, *NewRHS;
3646 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
return S;
3649 UniqueSCEVs.InsertNode(S, IP);
3678 if (!
Mul || !
Mul->hasNoUnsignedWrap())
3685 if (LHSCst == RHSCst) {
3693 APInt Factor =
gcd(LHSCst, RHSCst);
3711 for (
int i = 0, e =
Mul->getNumOperands(); i != e; ++i) {
3712 if (
Mul->getOperand(i) == RHS) {
3731 if (StepChrec->getLoop() == L) {
3745 if (Operands.
size() == 1)
return Operands[0];
3750 "SCEVAddRecExpr operand types don't match!");
3751 assert(!
Op->getType()->isPointerTy() &&
"Step must be integer");
3753 for (
const SCEV *
Op : Operands)
3755 "SCEVAddRecExpr operand is not available at loop entry!");
3758 if (Operands.
back()->isZero()) {
3773 const Loop *NestedLoop = NestedAR->getLoop();
3774 if (L->contains(NestedLoop)
3777 DT.dominates(L->getHeader(), NestedLoop->
getHeader()))) {
3779 Operands[0] = NestedAR->getStart();
3783 bool AllInvariant =
all_of(
3795 AllInvariant =
all_of(NestedOperands, [&](
const SCEV *
Op) {
3806 return getAddRecExpr(NestedOperands, NestedLoop, InnerFlags);
3810 Operands[0] = NestedAR;
3816 return getOrCreateAddRecExpr(Operands, L, Flags);
3832 if (!GEPI || !isSCEVExprNeverPoison(GEPI))
3836 return getGEPExpr(BaseExpr, IndexExprs,
GEP->getSourceElementType(), NW);
3850 bool FirstIter =
true;
3852 for (
SCEVUse IndexExpr : IndexExprs) {
3859 Offsets.push_back(FieldOffset);
3862 CurTy = STy->getTypeAtIndex(Index);
3867 "The first index of a GEP indexes a pointer");
3868 CurTy = SrcElementTy;
3879 const SCEV *LocalOffset =
getMulExpr(IndexExpr, ElementSize, OffsetWrap);
3880 Offsets.push_back(LocalOffset);
3885 if (Offsets.empty())
3898 "GEP should not change type mid-flight.");
3902SCEV *ScalarEvolution::findExistingSCEVInCache(
SCEVTypes SCEVType,
3905 ID.AddInteger(SCEVType);
3909 return UniqueSCEVs.FindNodeOrInsertPos(
ID, IP);
3912SCEV *ScalarEvolution::findExistingSCEVInCache(
SCEVTypes SCEVType,
3915 ID.AddInteger(SCEVType);
3919 return UniqueSCEVs.FindNodeOrInsertPos(
ID, IP);
3929 assert(SCEVMinMaxExpr::isMinMaxType(Kind) &&
"Not a SCEVMinMaxExpr!");
3930 assert(!
Ops.empty() &&
"Cannot get empty (u|s)(min|max)!");
3931 if (
Ops.size() == 1)
return Ops[0];
3934 for (
unsigned i = 1, e =
Ops.size(); i != e; ++i) {
3936 "Operand types don't match!");
3939 "min/max should be consistently pointerish");
3965 return IsSigned ?
C.isMinSignedValue() :
C.isMinValue();
3967 return IsSigned ?
C.isMaxSignedValue() :
C.isMaxValue();
3972 return IsSigned ?
C.isMaxSignedValue() :
C.isMaxValue();
3974 return IsSigned ?
C.isMinSignedValue() :
C.isMinValue();
3980 if (
const SCEV *S = findExistingSCEVInCache(Kind,
Ops)) {
3986 while (Idx <
Ops.size() &&
Ops[Idx]->getSCEVType() < Kind)
3991 if (Idx <
Ops.size()) {
3992 bool DeletedAny =
false;
3993 while (
Ops[Idx]->getSCEVType() == Kind) {
3995 Ops.erase(
Ops.begin()+Idx);
4013 for (
unsigned i = 0, e =
Ops.size() - 1; i != e; ++i) {
4014 if (
Ops[i] ==
Ops[i + 1] ||
4015 isKnownViaNonRecursiveReasoning(FirstPred,
Ops[i],
Ops[i + 1])) {
4018 Ops.erase(
Ops.begin() + i + 1,
Ops.begin() + i + 2);
4021 }
else if (isKnownViaNonRecursiveReasoning(SecondPred,
Ops[i],
4024 Ops.erase(
Ops.begin() + i,
Ops.begin() + i + 1);
4030 if (
Ops.size() == 1)
return Ops[0];
4032 assert(!
Ops.empty() &&
"Reduced smax down to nothing!");
4037 ID.AddInteger(Kind);
4041 const SCEV *ExistingSCEV = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP);
4043 return ExistingSCEV;
4046 SCEV *S =
new (SCEVAllocator)
4049 UniqueSCEVs.InsertNode(S, IP);
4056class SCEVSequentialMinMaxDeduplicatingVisitor final
4057 :
public SCEVVisitor<SCEVSequentialMinMaxDeduplicatingVisitor,
4058 std::optional<const SCEV *>> {
4059 using RetVal = std::optional<const SCEV *>;
4067 bool canRecurseInto(
SCEVTypes Kind)
const {
4070 return RootKind == Kind || NonSequentialRootKind == Kind;
4073 RetVal visitAnyMinMaxExpr(
const SCEV *S) {
4075 "Only for min/max expressions.");
4078 if (!canRecurseInto(Kind))
4088 return std::nullopt;
4095 RetVal
visit(
const SCEV *S) {
4097 if (!SeenOps.
insert(S).second)
4098 return std::nullopt;
4099 return Base::visit(S);
4103 SCEVSequentialMinMaxDeduplicatingVisitor(ScalarEvolution &SE,
4105 : SE(SE), RootKind(RootKind),
4106 NonSequentialRootKind(
4107 SCEVSequentialMinMaxExpr::getEquivalentNonSequentialSCEVType(
4111 SmallVectorImpl<SCEVUse> &NewOps) {
4116 for (
const SCEV *
Op : OrigOps) {
4121 Ops.emplace_back(*NewOp);
4125 NewOps = std::move(
Ops);
4129 RetVal visitConstant(
const SCEVConstant *Constant) {
return Constant; }
4131 RetVal visitVScale(
const SCEVVScale *VScale) {
return VScale; }
4133 RetVal visitPtrToAddrExpr(
const SCEVPtrToAddrExpr *Expr) {
return Expr; }
4135 RetVal visitPtrToIntExpr(
const SCEVPtrToIntExpr *Expr) {
return Expr; }
4137 RetVal visitTruncateExpr(
const SCEVTruncateExpr *Expr) {
return Expr; }
4139 RetVal visitZeroExtendExpr(
const SCEVZeroExtendExpr *Expr) {
return Expr; }
4141 RetVal visitSignExtendExpr(
const SCEVSignExtendExpr *Expr) {
return Expr; }
4143 RetVal visitAddExpr(
const SCEVAddExpr *Expr) {
return Expr; }
4145 RetVal visitMulExpr(
const SCEVMulExpr *Expr) {
return Expr; }
4147 RetVal visitUDivExpr(
const SCEVUDivExpr *Expr) {
return Expr; }
4149 RetVal visitAddRecExpr(
const SCEVAddRecExpr *Expr) {
return Expr; }
4151 RetVal visitSMaxExpr(
const SCEVSMaxExpr *Expr) {
4152 return visitAnyMinMaxExpr(Expr);
4155 RetVal visitUMaxExpr(
const SCEVUMaxExpr *Expr) {
4156 return visitAnyMinMaxExpr(Expr);
4159 RetVal visitSMinExpr(
const SCEVSMinExpr *Expr) {
4160 return visitAnyMinMaxExpr(Expr);
4163 RetVal visitUMinExpr(
const SCEVUMinExpr *Expr) {
4164 return visitAnyMinMaxExpr(Expr);
4167 RetVal visitSequentialUMinExpr(
const SCEVSequentialUMinExpr *Expr) {
4168 return visitAnyMinMaxExpr(Expr);
4171 RetVal visitUnknown(
const SCEVUnknown *Expr) {
return Expr; }
4173 RetVal visitCouldNotCompute(
const SCEVCouldNotCompute *Expr) {
return Expr; }
4216struct SCEVPoisonCollector {
4217 bool LookThroughMaybePoisonBlocking;
4218 SmallPtrSet<const SCEVUnknown *, 4> MaybePoison;
4219 SCEVPoisonCollector(
bool LookThroughMaybePoisonBlocking)
4220 : LookThroughMaybePoisonBlocking(LookThroughMaybePoisonBlocking) {}
4222 bool follow(
const SCEV *S) {
4223 if (!LookThroughMaybePoisonBlocking &&
4233 bool isDone()
const {
return false; }
4243 SCEVPoisonCollector PC1(
true);
4248 if (PC1.MaybePoison.empty())
4254 SCEVPoisonCollector PC2(
false);
4264 SCEVPoisonCollector PC(
false);
4287 while (!Worklist.
empty()) {
4289 if (!Visited.
insert(V).second)
4293 if (Visited.
size() > 16)
4298 if (PoisonVals.
contains(V) || ::isGuaranteedNotToBePoison(V))
4309 if (PDI->isDisjoint())
4316 II &&
II->getIntrinsicID() == Intrinsic::vscale)
4323 if (
I->hasPoisonGeneratingAnnotations())
4334 assert(SCEVSequentialMinMaxExpr::isSequentialMinMaxType(Kind) &&
4335 "Not a SCEVSequentialMinMaxExpr!");
4336 assert(!
Ops.empty() &&
"Cannot get empty (u|s)(min|max)!");
4337 if (
Ops.size() == 1)
4341 for (
unsigned i = 1, e =
Ops.size(); i != e; ++i) {
4343 "Operand types don't match!");
4346 "min/max should be consistently pointerish");
4354 if (
const SCEV *S = findExistingSCEVInCache(Kind,
Ops))
4361 SCEVSequentialMinMaxDeduplicatingVisitor Deduplicator(*
this, Kind);
4371 bool DeletedAny =
false;
4372 while (Idx <
Ops.size()) {
4373 if (
Ops[Idx]->getSCEVType() != Kind) {
4378 Ops.erase(
Ops.begin() + Idx);
4379 Ops.insert(
Ops.begin() + Idx, SMME->operands().begin(),
4380 SMME->operands().end());
4388 const SCEV *SaturationPoint;
4399 for (
unsigned i = 1, e =
Ops.size(); i != e; ++i) {
4400 if (!isGuaranteedNotToCauseUB(
Ops[i]))
4412 Ops.erase(
Ops.begin() + i);
4417 if (isKnownViaNonRecursiveReasoning(Pred,
Ops[i - 1],
Ops[i])) {
4418 Ops.erase(
Ops.begin() + i);
4426 ID.AddInteger(Kind);
4430 const SCEV *ExistingSCEV = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP);
4432 return ExistingSCEV;
4436 SCEV *S =
new (SCEVAllocator)
4439 UniqueSCEVs.InsertNode(S, IP);
4486 if (
Size.isScalable())
4507 "Cannot get offset for structure containing scalable vector types");
4521 if (
SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP)) {
4523 "Stale SCEVUnknown in uniquing map!");
4529 UniqueSCEVs.InsertNode(S, IP);
4543 return Ty->isIntOrPtrTy();
4550 if (Ty->isPointerTy())
4561 if (Ty->isIntegerTy())
4565 assert(Ty->isPointerTy() &&
"Unexpected non-pointer non-integer type!");
4577 bool PreciseA, PreciseB;
4578 auto *ScopeA = getDefiningScopeBound({
A}, PreciseA);
4579 auto *ScopeB = getDefiningScopeBound({
B}, PreciseB);
4580 if (!PreciseA || !PreciseB)
4583 return (ScopeA == ScopeB) || DT.dominates(ScopeA, ScopeB) ||
4584 DT.dominates(ScopeB, ScopeA);
4588 return CouldNotCompute.get();
4591bool ScalarEvolution::checkValidity(
const SCEV *S)
const {
4594 return SU && SU->getValue() ==
nullptr;
4597 return !ContainsNulls;
4602 if (
I != HasRecMap.end())
4607 HasRecMap.insert({S, FoundAddRec});
4615 if (
SI == ExprValueMap.
end())
4617 return SI->second.getArrayRef();
4623void ScalarEvolution::eraseValueFromMap(
Value *V) {
4625 if (
I != ValueExprMap.end()) {
4626 auto EVIt = ExprValueMap.find(
I->second);
4627 bool Removed = EVIt->second.remove(V);
4629 assert(Removed &&
"Value not in ExprValueMap?");
4630 ValueExprMap.erase(
I);
4634void ScalarEvolution::insertValueToMap(
Value *V,
const SCEV *S) {
4638 auto It = ValueExprMap.find_as(V);
4639 if (It == ValueExprMap.end()) {
4641 ExprValueMap[S].insert(V);
4652 return createSCEVIter(V);
4659 if (
I != ValueExprMap.end()) {
4660 const SCEV *S =
I->second;
4661 assert(checkValidity(S) &&
4662 "existing SCEV has not been properly invalidated");
4675 Type *Ty = V->getType();
4691 assert(!V->getType()->isPointerTy() &&
"Can't negate pointer");
4704 return (
const SCEV *)
nullptr;
4710 if (
const SCEV *Replaced = MatchMinMaxNegation(MME))
4714 Type *Ty = V->getType();
4720 assert(
P->getType()->isPointerTy());
4735 if (AddOp->getType()->isPointerTy()) {
4736 assert(!PtrOp &&
"Cannot have multiple pointer ops");
4754 return getZero(LHS->getType());
4759 if (RHS->getType()->isPointerTy()) {
4760 if (!LHS->getType()->isPointerTy() ||
4770 const bool RHSIsNotMinSigned =
4801 Type *SrcTy = V->getType();
4802 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
4803 "Cannot truncate or zero extend with non-integer arguments!");
4813 Type *SrcTy = V->getType();
4814 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
4815 "Cannot truncate or zero extend with non-integer arguments!");
4825 Type *SrcTy = V->getType();
4826 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
4827 "Cannot noop or zero extend with non-integer arguments!");
4829 "getNoopOrZeroExtend cannot truncate!");
4837 Type *SrcTy = V->getType();
4838 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
4839 "Cannot noop or sign extend with non-integer arguments!");
4841 "getNoopOrSignExtend cannot truncate!");
4849 Type *SrcTy = V->getType();
4850 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
4851 "Cannot noop or any extend with non-integer arguments!");
4853 "getNoopOrAnyExtend cannot truncate!");
4861 Type *SrcTy = V->getType();
4862 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
4863 "Cannot truncate or noop with non-integer arguments!");
4865 "getTruncateOrNoop cannot extend!");
4873 const SCEV *PromotedLHS = LHS;
4874 const SCEV *PromotedRHS = RHS;
4894 assert(!
Ops.empty() &&
"At least one operand must be!");
4896 if (
Ops.size() == 1)
4900 Type *MaxType =
nullptr;
4906 assert(MaxType &&
"Failed to find maximum type!");
4919 if (!V->getType()->isPointerTy())
4924 V = AddRec->getStart();
4926 const SCEV *PtrOp =
nullptr;
4927 for (
const SCEV *AddOp :
Add->operands()) {
4928 if (AddOp->getType()->isPointerTy()) {
4929 assert(!PtrOp &&
"Cannot have multiple pointer ops");
4933 assert(PtrOp &&
"Must have pointer op");
4945 for (
User *U :
I->users()) {
4947 if (Visited.
insert(UserInsn).second)
4961 static const SCEV *rewrite(
const SCEV *S,
const Loop *L, ScalarEvolution &SE,
4962 bool IgnoreOtherLoops =
true) {
4965 if (
Rewriter.hasSeenLoopVariantSCEVUnknown())
4967 return Rewriter.hasSeenOtherLoops() && !IgnoreOtherLoops
4972 const SCEV *visitUnknown(
const SCEVUnknown *Expr) {
4974 SeenLoopVariantSCEVUnknown =
true;
4978 const SCEV *visitAddRecExpr(
const SCEVAddRecExpr *Expr) {
4982 SeenOtherLoops =
true;
4986 bool hasSeenLoopVariantSCEVUnknown() {
return SeenLoopVariantSCEVUnknown; }
4988 bool hasSeenOtherLoops() {
return SeenOtherLoops; }
4991 explicit SCEVInitRewriter(
const Loop *L, ScalarEvolution &SE)
4992 : SCEVRewriteVisitor(SE),
L(
L) {}
4995 bool SeenLoopVariantSCEVUnknown =
false;
4996 bool SeenOtherLoops =
false;
5005 static const SCEV *rewrite(
const SCEV *S,
const Loop *L, ScalarEvolution &SE) {
5006 SCEVPostIncRewriter
Rewriter(L, SE);
5008 return Rewriter.hasSeenLoopVariantSCEVUnknown()
5013 const SCEV *visitUnknown(
const SCEVUnknown *Expr) {
5015 SeenLoopVariantSCEVUnknown =
true;
5019 const SCEV *visitAddRecExpr(
const SCEVAddRecExpr *Expr) {
5023 SeenOtherLoops =
true;
5027 bool hasSeenLoopVariantSCEVUnknown() {
return SeenLoopVariantSCEVUnknown; }
5029 bool hasSeenOtherLoops() {
return SeenOtherLoops; }
5032 explicit SCEVPostIncRewriter(
const Loop *L, ScalarEvolution &SE)
5033 : SCEVRewriteVisitor(SE),
L(
L) {}
5036 bool SeenLoopVariantSCEVUnknown =
false;
5037 bool SeenOtherLoops =
false;
5043class SCEVBackedgeConditionFolder
5046 static const SCEV *rewrite(
const SCEV *S,
const Loop *L,
5047 ScalarEvolution &SE) {
5048 bool IsPosBECond =
false;
5049 Value *BECond =
nullptr;
5050 if (BasicBlock *Latch =
L->getLoopLatch()) {
5052 assert(BI->getSuccessor(0) != BI->getSuccessor(1) &&
5053 "Both outgoing branches should not target same header!");
5054 BECond = BI->getCondition();
5055 IsPosBECond = BI->getSuccessor(0) ==
L->getHeader();
5060 SCEVBackedgeConditionFolder
Rewriter(L, BECond, IsPosBECond, SE);
5064 const SCEV *visitUnknown(
const SCEVUnknown *Expr) {
5065 const SCEV *
Result = Expr;
5070 switch (
I->getOpcode()) {
5071 case Instruction::Select: {
5073 std::optional<const SCEV *> Res =
5074 compareWithBackedgeCondition(
SI->getCondition());
5082 std::optional<const SCEV *> Res = compareWithBackedgeCondition(
I);
5093 explicit SCEVBackedgeConditionFolder(
const Loop *L,
Value *BECond,
5094 bool IsPosBECond, ScalarEvolution &SE)
5095 : SCEVRewriteVisitor(SE),
L(
L), BackedgeCond(BECond),
5096 IsPositiveBECond(IsPosBECond) {}
5098 std::optional<const SCEV *> compareWithBackedgeCondition(
Value *IC);
5102 Value *BackedgeCond =
nullptr;
5104 bool IsPositiveBECond;
5107std::optional<const SCEV *>
5108SCEVBackedgeConditionFolder::compareWithBackedgeCondition(
Value *IC) {
5113 if (BackedgeCond == IC)
5116 return std::nullopt;
5121 static const SCEV *rewrite(
const SCEV *S,
const Loop *L,
5122 ScalarEvolution &SE) {
5128 const SCEV *visitUnknown(
const SCEVUnknown *Expr) {
5135 const SCEV *visitAddRecExpr(
const SCEVAddRecExpr *Expr) {
5145 explicit SCEVShiftRewriter(
const Loop *L, ScalarEvolution &SE)
5146 : SCEVRewriteVisitor(SE),
L(
L) {}
5155ScalarEvolution::proveNoWrapViaConstantRanges(
const SCEVAddRecExpr *AR) {
5159 using OBO = OverflowingBinaryOperator;
5167 const APInt &BECountAP = BECountMax->getAPInt();
5168 unsigned NoOverflowBitWidth =
5180 Instruction::Add, IncRange, OBO::NoSignedWrap);
5181 if (NSWRegion.contains(AddRecRange))
5190 Instruction::Add, IncRange, OBO::NoUnsignedWrap);
5191 if (NUWRegion.contains(AddRecRange))
5199ScalarEvolution::proveNoSignedWrapViaInduction(
const SCEVAddRecExpr *AR) {
5209 if (!SignedWrapViaInductionTried.insert(AR).second)
5234 AC.assumptions().empty())
5242 const SCEV *OverflowLimit =
5244 if (OverflowLimit &&
5252ScalarEvolution::proveNoUnsignedWrapViaInduction(
const SCEVAddRecExpr *AR) {
5262 if (!UnsignedWrapViaInductionTried.insert(AR).second)
5288 AC.assumptions().empty())
5323 explicit BinaryOp(Operator *
Op)
5327 IsNSW = OBO->hasNoSignedWrap();
5328 IsNUW = OBO->hasNoUnsignedWrap();
5332 explicit BinaryOp(
unsigned Opcode,
Value *
LHS,
Value *
RHS,
bool IsNSW =
false,
5334 : Opcode(Opcode),
LHS(
LHS),
RHS(
RHS), IsNSW(IsNSW), IsNUW(IsNUW) {}
5346 return std::nullopt;
5352 switch (
Op->getOpcode()) {
5353 case Instruction::Add:
5354 case Instruction::Sub:
5355 case Instruction::Mul:
5356 case Instruction::UDiv:
5357 case Instruction::URem:
5358 case Instruction::And:
5359 case Instruction::AShr:
5360 case Instruction::Shl:
5361 return BinaryOp(
Op);
5363 case Instruction::Or: {
5366 return BinaryOp(Instruction::Add,
Op->getOperand(0),
Op->getOperand(1),
5368 return BinaryOp(
Op);
5371 case Instruction::Xor:
5375 if (RHSC->getValue().isSignMask())
5376 return BinaryOp(Instruction::Add,
Op->getOperand(0),
Op->getOperand(1));
5378 if (V->getType()->isIntegerTy(1))
5379 return BinaryOp(Instruction::Add,
Op->getOperand(0),
Op->getOperand(1));
5380 return BinaryOp(
Op);
5382 case Instruction::LShr:
5391 if (SA->getValue().ult(
BitWidth)) {
5393 ConstantInt::get(SA->getContext(),
5395 return BinaryOp(Instruction::UDiv,
Op->getOperand(0),
X);
5398 return BinaryOp(
Op);
5400 case Instruction::ExtractValue: {
5402 if (EVI->getNumIndices() != 1 || EVI->getIndices()[0] != 0)
5410 bool Signed = WO->isSigned();
5413 return BinaryOp(BinOp, WO->getLHS(), WO->getRHS());
5418 return BinaryOp(BinOp, WO->getLHS(), WO->getRHS(),
5429 if (
II->getIntrinsicID() == Intrinsic::loop_decrement_reg)
5430 return BinaryOp(Instruction::Sub,
II->getOperand(0),
II->getOperand(1));
5432 return std::nullopt;
5458 if (
Op == SymbolicPHI)
5463 if (SourceBits != NewBits)
5481 if (!L || L->getHeader() != PN->
getParent())
5539std::optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>>
5540ScalarEvolution::createAddRecFromPHIWithCastsImpl(
const SCEVUnknown *SymbolicPHI) {
5548 assert(L &&
"Expecting an integer loop header phi");
5553 Value *BEValueV =
nullptr, *StartValueV =
nullptr;
5554 for (
unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
5555 Value *
V = PN->getIncomingValue(i);
5556 if (
L->contains(PN->getIncomingBlock(i))) {
5559 }
else if (BEValueV != V) {
5563 }
else if (!StartValueV) {
5565 }
else if (StartValueV != V) {
5566 StartValueV =
nullptr;
5570 if (!BEValueV || !StartValueV)
5571 return std::nullopt;
5573 const SCEV *BEValue =
getSCEV(BEValueV);
5580 return std::nullopt;
5584 unsigned FoundIndex =
Add->getNumOperands();
5585 Type *TruncTy =
nullptr;
5587 for (
unsigned i = 0, e =
Add->getNumOperands(); i != e; ++i)
5590 if (FoundIndex == e) {
5595 if (FoundIndex ==
Add->getNumOperands())
5596 return std::nullopt;
5600 for (
unsigned i = 0, e =
Add->getNumOperands(); i != e; ++i)
5601 if (i != FoundIndex)
5602 Ops.push_back(
Add->getOperand(i));
5608 return std::nullopt;
5661 const SCEV *StartVal =
getSCEV(StartValueV);
5662 const SCEV *PHISCEV =
5689 auto getExtendedExpr = [&](
const SCEV *Expr,
5690 bool CreateSignExtend) ->
const SCEV * {
5693 const SCEV *ExtendedExpr =
5696 return ExtendedExpr;
5704 auto PredIsKnownFalse = [&](
const SCEV *Expr,
5705 const SCEV *ExtendedExpr) ->
bool {
5706 return Expr != ExtendedExpr &&
5710 const SCEV *StartExtended = getExtendedExpr(StartVal,
Signed);
5711 if (PredIsKnownFalse(StartVal, StartExtended)) {
5713 return std::nullopt;
5718 const SCEV *AccumExtended = getExtendedExpr(Accum,
true);
5719 if (PredIsKnownFalse(Accum, AccumExtended)) {
5721 return std::nullopt;
5724 auto AppendPredicate = [&](
const SCEV *Expr,
5725 const SCEV *ExtendedExpr) ->
void {
5726 if (Expr != ExtendedExpr &&
5734 AppendPredicate(StartVal, StartExtended);
5735 AppendPredicate(Accum, AccumExtended);
5743 std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>> PredRewrite =
5744 std::make_pair(NewAR, Predicates);
5746 PredicatedSCEVRewrites[{SymbolicPHI,
L}] = PredRewrite;
5750std::optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>>
5755 return std::nullopt;
5758 auto I = PredicatedSCEVRewrites.find({SymbolicPHI, L});
5759 if (
I != PredicatedSCEVRewrites.end()) {
5760 std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>> Rewrite =
5763 if (Rewrite.first == SymbolicPHI)
5764 return std::nullopt;
5768 assert(!(Rewrite.second).empty() &&
"Expected to find Predicates");
5772 std::optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>>
5773 Rewrite = createAddRecFromPHIWithCastsImpl(SymbolicPHI);
5778 PredicatedSCEVRewrites[{SymbolicPHI, L}] = {SymbolicPHI, Predicates};
5779 return std::nullopt;
5796 auto areExprsEqual = [&](
const SCEV *Expr1,
const SCEV *Expr2) ->
bool {
5797 if (Expr1 != Expr2 &&
5798 !Preds->implies(SE.getEqualPredicate(Expr1, Expr2), SE) &&
5799 !Preds->implies(SE.getEqualPredicate(Expr2, Expr1), SE))
5816const SCEV *ScalarEvolution::createSimpleAffineAddRec(
PHINode *PN,
5818 Value *StartValueV) {
5821 assert(BEValueV && StartValueV);
5827 if (BO->Opcode != Instruction::Add)
5830 const SCEV *Accum =
nullptr;
5831 if (BO->LHS == PN && L->isLoopInvariant(BO->RHS))
5833 else if (BO->RHS == PN && L->isLoopInvariant(BO->LHS))
5847 insertValueToMap(PN, PHISCEV);
5852 proveNoWrapViaConstantRanges(AR)));
5860 "Accum is defined outside L, but is not invariant?");
5861 if (isAddRecNeverPoison(BEInst, L))
5868const SCEV *ScalarEvolution::createAddRecFromPHI(
PHINode *PN) {
5869 const Loop *
L = LI.getLoopFor(PN->
getParent());
5876 Value *BEValueV =
nullptr, *StartValueV =
nullptr;
5882 }
else if (BEValueV != V) {
5886 }
else if (!StartValueV) {
5888 }
else if (StartValueV != V) {
5889 StartValueV =
nullptr;
5893 if (!BEValueV || !StartValueV)
5896 assert(ValueExprMap.find_as(PN) == ValueExprMap.end() &&
5897 "PHI node already processed?");
5901 if (
auto *S = createSimpleAffineAddRec(PN, BEValueV, StartValueV))
5906 insertValueToMap(PN, SymbolicName);
5910 const SCEV *BEValue =
getSCEV(BEValueV);
5920 unsigned FoundIndex =
Add->getNumOperands();
5921 for (
unsigned i = 0, e =
Add->getNumOperands(); i != e; ++i)
5922 if (
Add->getOperand(i) == SymbolicName)
5923 if (FoundIndex == e) {
5928 if (FoundIndex !=
Add->getNumOperands()) {
5931 for (
unsigned i = 0, e =
Add->getNumOperands(); i != e; ++i)
5932 if (i != FoundIndex)
5933 Ops.push_back(SCEVBackedgeConditionFolder::rewrite(
Add->getOperand(i),
5945 if (BO->Opcode == Instruction::Add && BO->LHS == PN) {
5952 if (
GEP->getOperand(0) == PN) {
5953 GEPNoWrapFlags NW =
GEP->getNoWrapFlags();
5971 const SCEV *StartVal =
getSCEV(StartValueV);
5972 const SCEV *PHISCEV =
getAddRecExpr(StartVal, Accum, L, Flags);
5977 forgetMemoizedResults({SymbolicName});
5978 insertValueToMap(PN, PHISCEV);
5983 proveNoWrapViaConstantRanges(AR)));
6008 const SCEV *Shifted = SCEVShiftRewriter::rewrite(BEValue, L, *
this);
6009 const SCEV *
Start = SCEVInitRewriter::rewrite(Shifted, L, *
this,
false);
6011 isGuaranteedNotToCauseUB(Shifted) &&
::impliesPoison(Shifted, Start)) {
6012 const SCEV *StartVal =
getSCEV(StartValueV);
6013 if (Start == StartVal) {
6017 forgetMemoizedResults({SymbolicName});
6018 insertValueToMap(PN, Shifted);
6028 eraseValueFromMap(PN);
6043 Use &LeftUse =
Merge->getOperandUse(0);
6044 Use &RightUse =
Merge->getOperandUse(1);
6080 assert(IDom &&
"At least the entry block should dominate PN");
6088const SCEV *ScalarEvolution::createNodeFromSelectLikePHI(
PHINode *PN) {
6093 return createNodeForSelectOrPHI(PN,
Cond,
LHS,
RHS);
6110 CommonInst = IncomingInst;
6126ScalarEvolution::createNodeForPHIWithIdenticalOperands(
PHINode *PN) {
6132 const SCEV *CommonSCEV =
getSCEV(CommonInst);
6133 bool SCEVExprsIdentical =
6135 [
this, CommonSCEV](
Value *V) { return CommonSCEV == getSCEV(V); });
6136 return SCEVExprsIdentical ? CommonSCEV :
nullptr;
6139const SCEV *ScalarEvolution::createNodeForPHI(
PHINode *PN) {
6140 if (
const SCEV *S = createAddRecFromPHI(PN))
6150 if (
const SCEV *S = createNodeForPHIWithIdenticalOperands(PN))
6153 if (
const SCEV *S = createNodeFromSelectLikePHI(PN))
6162 struct FindClosure {
6163 const SCEV *OperandToFind;
6169 bool canRecurseInto(
SCEVTypes Kind)
const {
6172 return RootKind == Kind || NonSequentialRootKind == Kind ||
6177 : OperandToFind(OperandToFind), RootKind(RootKind),
6178 NonSequentialRootKind(
6182 bool follow(
const SCEV *S) {
6183 Found = S == OperandToFind;
6185 return !isDone() && canRecurseInto(S->
getSCEVType());
6188 bool isDone()
const {
return Found; }
6191 FindClosure FC(OperandToFind, RootKind);
6196std::optional<const SCEV *>
6197ScalarEvolution::createNodeForSelectOrPHIInstWithICmpInstCond(
Type *Ty,
6207 switch (ICI->getPredicate()) {
6221 bool Signed = ICI->isSigned();
6222 const SCEV *LA =
getSCEV(TrueVal);
6230 if (LA == LS &&
RA == RS)
6232 if (LA == RS &&
RA == LS)
6235 auto CoerceOperand = [&](
const SCEV *
Op) ->
const SCEV * {
6236 if (
Op->getType()->isPointerTy()) {
6247 LS = CoerceOperand(LS);
6248 RS = CoerceOperand(RS);
6272 const SCEV *TrueValExpr =
getSCEV(TrueVal);
6273 const SCEV *FalseValExpr =
getSCEV(FalseVal);
6287 X = ZExt->getOperand();
6289 const SCEV *FalseValExpr =
getSCEV(FalseVal);
6300 return std::nullopt;
6303static std::optional<const SCEV *>
6305 const SCEV *TrueExpr,
const SCEV *FalseExpr) {
6309 "Unexpected operands of a select.");
6321 return std::nullopt;
6336static std::optional<const SCEV *>
6340 return std::nullopt;
6343 const auto *SETrue = SE->
getSCEV(TrueVal);
6344 const auto *SEFalse = SE->
getSCEV(FalseVal);
6348const SCEV *ScalarEvolution::createNodeForSelectOrPHIViaUMinSeq(
6350 assert(
Cond->getType()->isIntegerTy(1) &&
"Select condition is not an i1?");
6352 V->getType() ==
TrueVal->getType() &&
6353 "Types of select hands and of the result must match.");
6356 if (!
V->getType()->isIntegerTy(1))
6359 if (std::optional<const SCEV *> S =
6372 return getSCEV(CI->isOne() ? TrueVal : FalseVal);
6376 if (std::optional<const SCEV *> S =
6377 createNodeForSelectOrPHIInstWithICmpInstCond(
I->getType(), ICI,
6383 return createNodeForSelectOrPHIViaUMinSeq(V,
Cond, TrueVal, FalseVal);
6389 assert(
GEP->getSourceElementType()->isSized() &&
6390 "GEP source element type must be sized");
6393 for (
Value *Index :
GEP->indices())
6398APInt ScalarEvolution::getConstantMultipleImpl(
const SCEV *S,
6401 auto GetShiftedByZeros = [
BitWidth](uint32_t TrailingZeros) {
6404 : APInt::getOneBitSet(
BitWidth, TrailingZeros);
6406 auto GetGCDMultiple = [
this, CtxI](
const SCEVNAryExpr *
N) {
6409 for (
unsigned I = 1,
E =
N->getNumOperands();
I <
E && Res != 1; ++
I)
6428 return GetShiftedByZeros(TZ);
6438 return GetShiftedByZeros(TZ);
6442 if (
M->hasNoUnsignedWrap()) {
6445 for (
const SCEV *Operand :
M->operands().drop_front())
6453 for (
const SCEV *Operand :
M->operands())
6455 return GetShiftedByZeros(TZ);
6460 if (
N->hasNoUnsignedWrap())
6461 return GetGCDMultiple(
N);
6464 for (
const SCEV *Operand :
N->operands().drop_front())
6466 return GetShiftedByZeros(TZ);
6483 CtxI = &*F.getEntryBlock().begin();
6489 .countMinTrailingZeros();
6490 return GetShiftedByZeros(Known);
6503 return getConstantMultipleImpl(S, CtxI);
6505 auto I = ConstantMultipleCache.find(S);
6506 if (
I != ConstantMultipleCache.end())
6509 APInt Result = getConstantMultipleImpl(S, CtxI);
6510 auto InsertPair = ConstantMultipleCache.insert({S, Result});
6511 assert(InsertPair.second &&
"Should insert a new key");
6512 return InsertPair.first->second;
6529 if (
MDNode *MD =
I->getMetadata(LLVMContext::MD_range))
6532 if (std::optional<ConstantRange>
Range = CB->getRange())
6536 if (std::optional<ConstantRange>
Range =
A->getRange())
6539 return std::nullopt;
6546 UnsignedRanges.erase(AddRec);
6547 SignedRanges.erase(AddRec);
6548 ConstantMultipleCache.erase(AddRec);
6553getRangeForUnknownRecurrence(
const SCEVUnknown *U) {
6579 Value *Start, *Step;
6586 assert(L && L->getHeader() ==
P->getParent());
6599 case Instruction::AShr:
6600 case Instruction::LShr:
6601 case Instruction::Shl:
6616 KnownStep.getBitWidth() ==
BitWidth);
6619 auto MaxShiftAmt = KnownStep.getMaxValue();
6621 bool Overflow =
false;
6622 auto TotalShift = MaxShiftAmt.umul_ov(TCAP, Overflow);
6629 case Instruction::AShr: {
6637 if (KnownStart.isNonNegative())
6640 KnownStart.getMaxValue() + 1);
6641 if (KnownStart.isNegative())
6644 KnownEnd.getMaxValue() + 1);
6647 case Instruction::LShr: {
6656 KnownStart.getMaxValue() + 1);
6658 case Instruction::Shl: {
6662 if (TotalShift.ult(KnownStart.countMinLeadingZeros()))
6663 return ConstantRange(KnownStart.getMinValue(),
6664 KnownEnd.getMaxValue() + 1);
6689 [&](
Value *Operand) { return DT.dominates(Operand, PHI); }))
6696ScalarEvolution::getRangeRefIter(
const SCEV *S,
6697 ScalarEvolution::RangeSignHint SignHint) {
6698 DenseMap<const SCEV *, ConstantRange> &Cache =
6699 SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED ? UnsignedRanges
6702 SmallPtrSet<const SCEV *, 8> Seen;
6706 auto AddToWorklist = [&WorkList, &Seen, &Cache](
const SCEV *Expr) {
6707 if (!Seen.
insert(Expr).second)
6741 for (
unsigned I = 0;
I != WorkList.
size(); ++
I) {
6742 const SCEV *
P = WorkList[
I];
6746 for (
const SCEV *
Op :
P->operands())
6759 if (!WorkList.
empty()) {
6764 getRangeRef(
P, SignHint);
6768 return getRangeRef(S, SignHint, 0);
6775 const SCEV *S, ScalarEvolution::RangeSignHint SignHint,
unsigned Depth) {
6776 DenseMap<const SCEV *, ConstantRange> &Cache =
6777 SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED ? UnsignedRanges
6785 if (
I != Cache.
end())
6789 return setRange(
C, SignHint, ConstantRange(
C->getAPInt()));
6794 return getRangeRefIter(S, SignHint);
6797 ConstantRange ConservativeResult(
BitWidth,
true);
6798 using OBO = OverflowingBinaryOperator;
6802 if (SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED) {
6806 ConservativeResult =
6813 ConservativeResult = ConstantRange(
6829 ConservativeResult.intersectWith(
X.truncate(
BitWidth), RangeType));
6836 ConservativeResult.intersectWith(
X.zeroExtend(
BitWidth), RangeType));
6843 ConservativeResult.intersectWith(
X.signExtend(
BitWidth), RangeType));
6849 return setRange(Cast, SignHint,
X);
6854 const SCEV *URemLHS =
nullptr, *URemRHS =
nullptr;
6855 if (SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED &&
6857 ConstantRange LHSRange = getRangeRef(URemLHS, SignHint,
Depth + 1);
6858 ConstantRange RHSRange = getRangeRef(URemRHS, SignHint,
Depth + 1);
6859 ConservativeResult =
6860 ConservativeResult.intersectWith(LHSRange.
urem(RHSRange), RangeType);
6862 ConstantRange
X = getRangeRef(
Add->getOperand(0), SignHint,
Depth + 1);
6863 unsigned WrapType = OBO::AnyWrap;
6864 if (
Add->hasNoSignedWrap())
6865 WrapType |= OBO::NoSignedWrap;
6866 if (
Add->hasNoUnsignedWrap())
6867 WrapType |= OBO::NoUnsignedWrap;
6869 X =
X.addWithNoWrap(getRangeRef(
Op, SignHint,
Depth + 1), WrapType,
6871 return setRange(
Add, SignHint,
6872 ConservativeResult.intersectWith(
X, RangeType));
6876 ConstantRange
X = getRangeRef(
Mul->getOperand(0), SignHint,
Depth + 1);
6878 X =
X.multiply(getRangeRef(
Op, SignHint,
Depth + 1));
6879 return setRange(
Mul, SignHint,
6880 ConservativeResult.intersectWith(
X, RangeType));
6884 ConstantRange
X = getRangeRef(UDiv->
getLHS(), SignHint,
Depth + 1);
6885 ConstantRange
Y = getRangeRef(UDiv->
getRHS(), SignHint,
Depth + 1);
6886 return setRange(UDiv, SignHint,
6887 ConservativeResult.intersectWith(
X.udiv(
Y), RangeType));
6895 if (!UnsignedMinValue.
isZero())
6896 ConservativeResult = ConservativeResult.intersectWith(
6897 ConstantRange(UnsignedMinValue, APInt(
BitWidth, 0)), RangeType);
6906 bool AllNonNeg =
true;
6907 bool AllNonPos =
true;
6908 for (
unsigned i = 1, e = AddRec->
getNumOperands(); i != e; ++i) {
6915 ConservativeResult = ConservativeResult.intersectWith(
6920 ConservativeResult = ConservativeResult.intersectWith(
6929 const SCEV *MaxBEScev =
6943 auto RangeFromAffine = getRangeForAffineAR(
6945 ConservativeResult =
6946 ConservativeResult.intersectWith(RangeFromAffine, RangeType);
6948 auto RangeFromFactoring = getRangeViaFactoring(
6950 ConservativeResult =
6951 ConservativeResult.intersectWith(RangeFromFactoring, RangeType);
6957 const SCEV *SymbolicMaxBECount =
6962 auto RangeFromAffineNew = getRangeForAffineNoSelfWrappingAR(
6963 AddRec, SymbolicMaxBECount,
BitWidth, SignHint);
6964 ConservativeResult =
6965 ConservativeResult.intersectWith(RangeFromAffineNew, RangeType);
6970 return setRange(AddRec, SignHint, std::move(ConservativeResult));
6980 ID = Intrinsic::umax;
6983 ID = Intrinsic::smax;
6987 ID = Intrinsic::umin;
6990 ID = Intrinsic::smin;
6997 ConstantRange
X = getRangeRef(NAry->getOperand(0), SignHint,
Depth + 1);
6998 for (
unsigned i = 1, e = NAry->getNumOperands(); i != e; ++i)
7000 ID, {
X, getRangeRef(NAry->getOperand(i), SignHint,
Depth + 1)});
7001 return setRange(S, SignHint,
7002 ConservativeResult.intersectWith(
X, RangeType));
7011 ConservativeResult =
7012 ConservativeResult.intersectWith(*MDRange, RangeType);
7017 auto CR = getRangeForUnknownRecurrence(U);
7018 ConservativeResult = ConservativeResult.intersectWith(CR);
7029 if (
U->getType()->isPointerTy()) {
7032 unsigned ptrSize = DL.getPointerTypeSizeInBits(
U->getType());
7033 int ptrIdxDiff = ptrSize -
BitWidth;
7034 if (ptrIdxDiff > 0 && ptrSize >
BitWidth && NS > (
unsigned)ptrIdxDiff)
7047 ConservativeResult = ConservativeResult.intersectWith(
7051 ConservativeResult = ConservativeResult.intersectWith(
7056 if (
U->getType()->isPointerTy() && SignHint == HINT_RANGE_UNSIGNED) {
7059 bool CanBeNull, CanBeFreed;
7060 uint64_t DerefBytes =
7061 V->getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed);
7071 uint64_t
Align =
U->getValue()->getPointerAlignment(DL).value();
7072 uint64_t Rem = MaxVal.
urem(Align);
7077 ConservativeResult = ConservativeResult.intersectWith(
7087 return getRangeRef(AR, SignHint,
Depth + 1);
7091 ConstantRange RangeFromOps(
BitWidth,
false);
7093 for (
const auto &
Op :
Phi->operands()) {
7095 RangeFromOps = RangeFromOps.unionWith(OpRange);
7097 if (RangeFromOps.isFullSet())
7100 ConservativeResult =
7101 ConservativeResult.intersectWith(RangeFromOps, RangeType);
7107 if (
II->getIntrinsicID() == Intrinsic::vscale) {
7109 ConservativeResult = ConservativeResult.difference(Disallowed);
7112 return setRange(U, SignHint, std::move(ConservativeResult));
7118 return setRange(S, SignHint, std::move(ConservativeResult));
7127 const APInt &MaxBECount,
7134 if (Step == 0 || MaxBECount == 0)
7141 return ConstantRange::getFull(
BitWidth);
7157 return ConstantRange::getFull(
BitWidth);
7169 APInt MovedBoundary = Descending ? (StartLower - std::move(
Offset))
7170 : (StartUpper + std::move(
Offset));
7175 if (StartRange.
contains(MovedBoundary))
7176 return ConstantRange::getFull(
BitWidth);
7179 Descending ? std::move(MovedBoundary) : std::move(StartLower);
7181 Descending ? std::move(StartUpper) : std::move(MovedBoundary);
7190 const APInt &MaxBECount) {
7194 "mismatched bit widths");
7203 StepSRange.
getSignedMin(), StartSRange, MaxBECount,
true);
7205 StartSRange, MaxBECount,
7217ConstantRange ScalarEvolution::getRangeForAffineNoSelfWrappingAR(
7219 ScalarEvolution::RangeSignHint SignHint) {
7220 assert(AddRec->
isAffine() &&
"Non-affine AddRecs are not suppored!\n");
7222 "This only works for non-self-wrapping AddRecs!");
7223 const bool IsSigned = SignHint == HINT_RANGE_SIGNED;
7227 return ConstantRange::getFull(
BitWidth);
7235 return ConstantRange::getFull(
BitWidth);
7239 const SCEV *MaxItersWithoutWrap =
getUDivExpr(RangeWidth, StepAbs);
7241 MaxItersWithoutWrap))
7242 return ConstantRange::getFull(
BitWidth);
7263 ConstantRange StartRange = getRangeRef(Start, SignHint);
7264 ConstantRange EndRange = getRangeRef(End, SignHint);
7265 ConstantRange RangeBetween = StartRange.
unionWith(EndRange);
7269 return RangeBetween;
7274 return ConstantRange::getFull(
BitWidth);
7277 isKnownPredicateViaConstantRanges(LEPred, Start, End))
7278 return RangeBetween;
7280 isKnownPredicateViaConstantRanges(GEPred, Start, End))
7281 return RangeBetween;
7282 return ConstantRange::getFull(
BitWidth);
7287 const APInt &MaxBECount) {
7294 "mismatched bit widths");
7296 struct SelectPattern {
7297 Value *Condition =
nullptr;
7301 explicit SelectPattern(ScalarEvolution &SE,
unsigned BitWidth,
7303 std::optional<unsigned> CastOp;
7317 CastOp = SCast->getSCEVType();
7318 S = SCast->getOperand();
7321 using namespace llvm::PatternMatch;
7328 Condition =
nullptr;
7360 bool isRecognized() {
return Condition !=
nullptr; }
7363 SelectPattern StartPattern(*
this,
BitWidth, Start);
7364 if (!StartPattern.isRecognized())
7365 return ConstantRange::getFull(
BitWidth);
7367 SelectPattern StepPattern(*
this,
BitWidth, Step);
7368 if (!StepPattern.isRecognized())
7369 return ConstantRange::getFull(
BitWidth);
7371 if (StartPattern.Condition != StepPattern.Condition) {
7375 return ConstantRange::getFull(
BitWidth);
7386 const SCEV *TrueStart = this->
getConstant(StartPattern.TrueValue);
7387 const SCEV *TrueStep = this->
getConstant(StepPattern.TrueValue);
7388 const SCEV *FalseStart = this->
getConstant(StartPattern.FalseValue);
7389 const SCEV *FalseStep = this->
getConstant(StepPattern.FalseValue);
7391 ConstantRange TrueRange =
7392 this->getRangeForAffineAR(TrueStart, TrueStep, MaxBECount);
7393 ConstantRange FalseRange =
7394 this->getRangeForAffineAR(FalseStart, FalseStep, MaxBECount);
7416ScalarEvolution::getNonTrivialDefiningScopeBound(
const SCEV *S) {
7429 SmallPtrSet<const SCEV *, 16> Visited;
7431 auto pushOp = [&](
const SCEV *S) {
7432 if (!Visited.
insert(S).second)
7435 if (Visited.
size() > 30) {
7442 for (SCEVUse S :
Ops)
7446 while (!Worklist.
empty()) {
7448 if (
auto *DefI = getNonTrivialDefiningScopeBound(S)) {
7449 if (!Bound || DT.dominates(Bound, DefI))
7456 return Bound ? Bound : &*F.getEntryBlock().begin();
7462 return getDefiningScopeBound(
Ops, Discard);
7465bool ScalarEvolution::isGuaranteedToTransferExecutionTo(
const Instruction *
A,
7467 if (
A->getParent() ==
B->getParent() &&
7472 auto *BLoop = LI.getLoopFor(
B->getParent());
7473 if (BLoop && BLoop->getHeader() ==
B->getParent() &&
7474 BLoop->getLoopPreheader() ==
A->getParent() &&
7476 A->getParent()->end()) &&
7483bool ScalarEvolution::isGuaranteedNotToBePoison(
const SCEV *
Op) {
7484 SCEVPoisonCollector PC(
true);
7486 return PC.MaybePoison.empty();
7489bool ScalarEvolution::isGuaranteedNotToCauseUB(
const SCEV *
Op) {
7495 return M && (!
isKnownNonZero(Op1) || !isGuaranteedNotToBePoison(Op1));
7499bool ScalarEvolution::isSCEVExprNeverPoison(
const Instruction *
I) {
7516 for (
const Use &
Op :
I->operands()) {
7522 auto *DefI = getDefiningScopeBound(SCEVOps);
7523 return isGuaranteedToTransferExecutionTo(DefI,
I);
7526bool ScalarEvolution::isAddRecNeverPoison(
const Instruction *
I,
const Loop *L) {
7528 if (isSCEVExprNeverPoison(
I))
7539 auto *ExitingBB =
L->getExitingBlock();
7543 SmallPtrSet<const Value *, 16> KnownPoison;
7552 while (!Worklist.
empty()) {
7555 for (
const Use &U :
Poison->uses()) {
7558 DT.dominates(PoisonUser->
getParent(), ExitingBB))
7562 if (KnownPoison.
insert(PoisonUser).second)
7570ScalarEvolution::LoopProperties
7571ScalarEvolution::getLoopProperties(
const Loop *L) {
7572 using LoopProperties = ScalarEvolution::LoopProperties;
7574 auto Itr = LoopPropertiesCache.find(L);
7575 if (Itr == LoopPropertiesCache.end()) {
7578 return !
SI->isSimple();
7588 return I->mayWriteToMemory();
7591 LoopProperties LP = {
true,
7594 for (
auto *BB :
L->getBlocks())
7595 for (
auto &
I : *BB) {
7597 LP.HasNoAbnormalExits =
false;
7598 if (HasSideEffects(&
I))
7599 LP.HasNoSideEffects =
false;
7600 if (!LP.HasNoAbnormalExits && !LP.HasNoSideEffects)
7604 auto InsertPair = LoopPropertiesCache.insert({
L, LP});
7605 assert(InsertPair.second &&
"We just checked!");
7606 Itr = InsertPair.first;
7619const SCEV *ScalarEvolution::createSCEVIter(
Value *V) {
7625 Stack.emplace_back(V,
true);
7626 Stack.emplace_back(V,
false);
7627 while (!Stack.empty()) {
7628 auto E = Stack.pop_back_val();
7629 Value *CurV = E.getPointer();
7635 const SCEV *CreatedSCEV =
nullptr;
7638 CreatedSCEV = createSCEV(CurV);
7643 CreatedSCEV = getOperandsToCreate(CurV,
Ops);
7647 insertValueToMap(CurV, CreatedSCEV);
7651 Stack.emplace_back(CurV,
true);
7653 Stack.emplace_back(
Op,
false);
7670 if (!DT.isReachableFromEntry(
I->getParent()))
7683 switch (BO->Opcode) {
7684 case Instruction::Add:
7685 case Instruction::Mul: {
7692 Ops.push_back(BO->
Op);
7696 Ops.push_back(BO->RHS);
7700 (BO->Opcode == Instruction::Add &&
7701 (NewBO->Opcode != Instruction::Add &&
7702 NewBO->Opcode != Instruction::Sub)) ||
7703 (BO->Opcode == Instruction::Mul &&
7704 NewBO->Opcode != Instruction::Mul)) {
7705 Ops.push_back(BO->LHS);
7710 if (BO->
Op && (BO->IsNSW || BO->IsNUW)) {
7713 Ops.push_back(BO->LHS);
7721 case Instruction::Sub:
7722 case Instruction::UDiv:
7723 case Instruction::URem:
7725 case Instruction::AShr:
7726 case Instruction::Shl:
7727 case Instruction::Xor:
7731 case Instruction::And:
7732 case Instruction::Or:
7736 case Instruction::LShr:
7743 Ops.push_back(BO->LHS);
7744 Ops.push_back(BO->RHS);
7748 switch (
U->getOpcode()) {
7749 case Instruction::Trunc:
7750 case Instruction::ZExt:
7751 case Instruction::SExt:
7752 case Instruction::PtrToAddr:
7753 case Instruction::PtrToInt:
7754 Ops.push_back(
U->getOperand(0));
7757 case Instruction::BitCast:
7759 Ops.push_back(
U->getOperand(0));
7764 case Instruction::SDiv:
7765 case Instruction::SRem:
7766 Ops.push_back(
U->getOperand(0));
7767 Ops.push_back(
U->getOperand(1));
7770 case Instruction::GetElementPtr:
7772 "GEP source element type must be sized");
7776 case Instruction::IntToPtr:
7779 case Instruction::PHI:
7810 Ops.push_back(CondICmp->getOperand(0));
7811 Ops.push_back(CondICmp->getOperand(1));
7831 case Instruction::Select: {
7833 auto CanSimplifyToUnknown = [
this,
U]() {
7851 if (CanSimplifyToUnknown())
7858 case Instruction::Call:
7859 case Instruction::Invoke:
7866 switch (
II->getIntrinsicID()) {
7867 case Intrinsic::abs:
7868 Ops.push_back(
II->getArgOperand(0));
7870 case Intrinsic::umax:
7871 case Intrinsic::umin:
7872 case Intrinsic::smax:
7873 case Intrinsic::smin:
7874 case Intrinsic::usub_sat:
7875 case Intrinsic::uadd_sat:
7876 Ops.push_back(
II->getArgOperand(0));
7877 Ops.push_back(
II->getArgOperand(1));
7879 case Intrinsic::start_loop_iterations:
7880 case Intrinsic::annotation:
7881 case Intrinsic::ptr_annotation:
7882 Ops.push_back(
II->getArgOperand(0));
7894const SCEV *ScalarEvolution::createSCEV(
Value *V) {
7903 if (!DT.isReachableFromEntry(
I->getParent()))
7918 switch (BO->Opcode) {
7919 case Instruction::Add: {
7945 if (BO->Opcode == Instruction::Sub)
7953 if (BO->Opcode == Instruction::Sub)
7960 if (!NewBO || (NewBO->Opcode != Instruction::Add &&
7961 NewBO->Opcode != Instruction::Sub)) {
7971 case Instruction::Mul: {
7992 if (!NewBO || NewBO->Opcode != Instruction::Mul) {
8001 case Instruction::UDiv:
8005 case Instruction::URem:
8009 case Instruction::Sub: {
8012 Flags = getNoWrapFlagsFromUB(BO->
Op);
8017 case Instruction::And:
8023 if (CI->isMinusOne())
8025 const APInt &
A = CI->getValue();
8031 unsigned LZ =
A.countl_zero();
8032 unsigned TZ =
A.countr_zero();
8037 APInt EffectiveMask =
8039 if ((LZ != 0 || TZ != 0) && !((~
A & ~Known.
Zero) & EffectiveMask)) {
8042 const SCEV *ShiftedLHS =
nullptr;
8046 unsigned MulZeros = OpC->getAPInt().countr_zero();
8047 unsigned GCD = std::min(MulZeros, TZ);
8052 auto *NewMul =
getMulExpr(MulOps, LHSMul->getNoWrapFlags());
8074 case Instruction::Or:
8083 case Instruction::Xor:
8086 if (CI->isMinusOne())
8095 if (LBO->getOpcode() == Instruction::And &&
8096 LCI->getValue() == CI->getValue())
8097 if (
const SCEVZeroExtendExpr *Z =
8100 const SCEV *Z0 =
Z->getOperand();
8107 if (CI->getValue().isMask(Z0TySize))
8113 APInt Trunc = CI->getValue().trunc(Z0TySize);
8122 case Instruction::Shl:
8140 auto MulFlags = getNoWrapFlagsFromUB(BO->
Op);
8148 ConstantInt *
X = ConstantInt::get(
8154 case Instruction::AShr:
8176 const SCEV *AddTruncateExpr =
nullptr;
8177 ConstantInt *ShlAmtCI =
nullptr;
8178 const SCEV *AddConstant =
nullptr;
8180 if (L &&
L->getOpcode() == Instruction::Add) {
8188 if (LShift && LShift->
getOpcode() == Instruction::Shl) {
8195 APInt AddOperand = AddOperandCI->
getValue().
ashr(AShrAmt);
8203 }
else if (L &&
L->getOpcode() == Instruction::Shl) {
8208 const SCEV *ShlOp0SCEV =
getSCEV(
L->getOperand(0));
8213 if (AddTruncateExpr && ShlAmtCI) {
8225 const APInt &ShlAmt = ShlAmtCI->
getValue();
8229 const SCEV *CompositeExpr =
8231 if (
L->getOpcode() != Instruction::Shl)
8232 CompositeExpr =
getAddExpr(CompositeExpr, AddConstant);
8241 switch (
U->getOpcode()) {
8242 case Instruction::Trunc:
8245 case Instruction::ZExt:
8248 case Instruction::SExt:
8258 if (BO->Opcode == Instruction::Sub && BO->IsNSW) {
8259 Type *Ty =
U->getType();
8267 case Instruction::BitCast:
8273 case Instruction::PtrToAddr: {
8280 case Instruction::PtrToInt: {
8283 Type *DstIntTy =
U->getType();
8291 case Instruction::IntToPtr:
8295 case Instruction::SDiv:
8302 case Instruction::SRem:
8309 case Instruction::GetElementPtr:
8312 case Instruction::PHI:
8315 case Instruction::Select:
8316 return createNodeForSelectOrPHI(U,
U->getOperand(0),
U->getOperand(1),
8319 case Instruction::Call:
8320 case Instruction::Invoke:
8325 switch (
II->getIntrinsicID()) {
8326 case Intrinsic::abs:
8330 case Intrinsic::umax:
8334 case Intrinsic::umin:
8338 case Intrinsic::smax:
8342 case Intrinsic::smin:
8346 case Intrinsic::usub_sat: {
8347 const SCEV *
X =
getSCEV(
II->getArgOperand(0));
8348 const SCEV *
Y =
getSCEV(
II->getArgOperand(1));
8352 case Intrinsic::uadd_sat: {
8353 const SCEV *
X =
getSCEV(
II->getArgOperand(0));
8354 const SCEV *
Y =
getSCEV(
II->getArgOperand(1));
8358 case Intrinsic::start_loop_iterations:
8359 case Intrinsic::annotation:
8360 case Intrinsic::ptr_annotation:
8364 case Intrinsic::vscale:
8384 auto *ExitCountType = ExitCount->
getType();
8385 assert(ExitCountType->isIntegerTy());
8387 1 + ExitCountType->getScalarSizeInBits());
8400 auto CanAddOneWithoutOverflow = [&]() {
8402 getRangeRef(ExitCount, RangeSignHint::HINT_RANGE_UNSIGNED);
8413 if (EvalSize > ExitCountSize && CanAddOneWithoutOverflow())
8443 assert(ExitingBlock &&
"Must pass a non-null exiting block!");
8444 assert(L->isLoopExiting(ExitingBlock) &&
8445 "Exiting block must actually branch out of the loop!");
8454 const auto *MaxExitCount =
8462 L->getExitingBlocks(ExitingBlocks);
8464 std::optional<unsigned> Res;
8465 for (
auto *ExitingBB : ExitingBlocks) {
8469 Res = std::gcd(*Res, Multiple);
8471 return Res.value_or(1);
8475 const SCEV *ExitCount) {
8505 assert(ExitingBlock &&
"Must pass a non-null exiting block!");
8506 assert(L->isLoopExiting(ExitingBlock) &&
8507 "Exiting block must actually branch out of the loop!");
8517 return getBackedgeTakenInfo(L).getExact(ExitingBlock,
this);
8519 return getBackedgeTakenInfo(L).getSymbolicMax(ExitingBlock,
this);
8521 return getBackedgeTakenInfo(L).getConstantMax(ExitingBlock,
this);
8531 return getPredicatedBackedgeTakenInfo(L).getExact(ExitingBlock,
this,
8534 return getPredicatedBackedgeTakenInfo(L).getSymbolicMax(ExitingBlock,
this,
8537 return getPredicatedBackedgeTakenInfo(L).getConstantMax(ExitingBlock,
this,
8545 return getPredicatedBackedgeTakenInfo(L).getExact(L,
this, &Preds);
8552 return getBackedgeTakenInfo(L).getExact(L,
this);
8554 return getBackedgeTakenInfo(L).getConstantMax(
this);
8556 return getBackedgeTakenInfo(L).getSymbolicMax(L,
this);
8563 return getPredicatedBackedgeTakenInfo(L).getSymbolicMax(L,
this, &Preds);
8568 return getPredicatedBackedgeTakenInfo(L).getConstantMax(
this, &Preds);
8572 return getBackedgeTakenInfo(L).isConstantMaxOrZero(
this);
8582 for (
PHINode &PN : Header->phis())
8583 if (Visited.
insert(&PN).second)
8587ScalarEvolution::BackedgeTakenInfo &
8588ScalarEvolution::getPredicatedBackedgeTakenInfo(
const Loop *L) {
8589 auto &BTI = getBackedgeTakenInfo(L);
8590 if (BTI.hasFullInfo())
8593 auto Pair = PredicatedBackedgeTakenCounts.try_emplace(L);
8596 return Pair.first->second;
8598 BackedgeTakenInfo
Result =
8599 computeBackedgeTakenCount(L,
true);
8601 return PredicatedBackedgeTakenCounts.find(L)->second = std::move(Result);
8604ScalarEvolution::BackedgeTakenInfo &
8605ScalarEvolution::getBackedgeTakenInfo(
const Loop *L) {
8611 std::pair<DenseMap<const Loop *, BackedgeTakenInfo>::iterator,
bool> Pair =
8612 BackedgeTakenCounts.try_emplace(L);
8614 return Pair.first->second;
8619 BackedgeTakenInfo
Result = computeBackedgeTakenCount(L);
8626 if (
Result.hasAnyInfo()) {
8629 auto LoopUsersIt = LoopUsers.find(L);
8630 if (LoopUsersIt != LoopUsers.end())
8632 forgetMemoizedResults(ToForget);
8635 for (PHINode &PN :
L->getHeader()->phis())
8636 ConstantEvolutionLoopExitValue.erase(&PN);
8644 return BackedgeTakenCounts.find(L)->second = std::move(Result);
8653 BackedgeTakenCounts.clear();
8654 PredicatedBackedgeTakenCounts.clear();
8655 BECountUsers.clear();
8656 LoopPropertiesCache.clear();
8657 ConstantEvolutionLoopExitValue.clear();
8658 ValueExprMap.clear();
8659 ValuesAtScopes.clear();
8660 ValuesAtScopesUsers.clear();
8661 LoopDispositions.clear();
8662 BlockDispositions.clear();
8663 UnsignedRanges.clear();
8664 SignedRanges.clear();
8665 ExprValueMap.clear();
8667 ConstantMultipleCache.clear();
8668 PredicatedSCEVRewrites.clear();
8670 FoldCacheUser.clear();
8672void ScalarEvolution::visitAndClearUsers(
8676 while (!Worklist.
empty()) {
8683 if (It != ValueExprMap.
end()) {
8684 eraseValueFromMap(It->first);
8687 ConstantEvolutionLoopExitValue.erase(PN);
8701 while (!LoopWorklist.
empty()) {
8705 forgetBackedgeTakenCounts(CurrL,
false);
8706 forgetBackedgeTakenCounts(CurrL,
true);
8709 for (
auto I = PredicatedSCEVRewrites.begin();
8710 I != PredicatedSCEVRewrites.end();) {
8711 std::pair<const SCEV *, const Loop *> Entry =
I->first;
8712 if (Entry.second == CurrL)
8713 PredicatedSCEVRewrites.erase(
I++);
8718 auto LoopUsersItr = LoopUsers.find(CurrL);
8719 if (LoopUsersItr != LoopUsers.end())
8724 visitAndClearUsers(Worklist, Visited, ToForget);
8726 LoopPropertiesCache.erase(CurrL);
8729 LoopWorklist.
append(CurrL->begin(), CurrL->end());
8731 forgetMemoizedResults(ToForget);
8748 visitAndClearUsers(Worklist, Visited, ToForget);
8750 forgetMemoizedResults(ToForget);
8762 struct InvalidationRootCollector {
8766 InvalidationRootCollector(
Loop *L) : L(L) {}
8768 bool follow(
const SCEV *S) {
8774 if (L->contains(AddRec->
getLoop()))
8779 bool isDone()
const {
return false; }
8782 InvalidationRootCollector
C(L);
8784 forgetMemoizedResults(
C.Roots);
8797 BlockDispositions.clear();
8798 LoopDispositions.clear();
8815 while (!Worklist.
empty()) {
8817 bool LoopDispoRemoved = LoopDispositions.erase(Curr);
8818 bool BlockDispoRemoved = BlockDispositions.erase(Curr);
8819 if (!LoopDispoRemoved && !BlockDispoRemoved)
8821 auto Users = SCEVUsers.find(Curr);
8822 if (
Users != SCEVUsers.end())
8835const SCEV *ScalarEvolution::BackedgeTakenInfo::getExact(
8839 if (!isComplete() || ExitNotTaken.
empty())
8850 for (
const auto &ENT : ExitNotTaken) {
8851 const SCEV *BECount = ENT.ExactNotTaken;
8854 "We should only have known counts for exiting blocks that dominate "
8857 Ops.push_back(BECount);
8862 assert((Preds || ENT.hasAlwaysTruePredicate()) &&
8863 "Predicate should be always true!");
8872const ScalarEvolution::ExitNotTakenInfo *
8873ScalarEvolution::BackedgeTakenInfo::getExitNotTaken(
8874 const BasicBlock *ExitingBlock,
8875 SmallVectorImpl<const SCEVPredicate *> *Predicates)
const {
8876 for (
const auto &ENT : ExitNotTaken)
8877 if (ENT.ExitingBlock == ExitingBlock) {
8878 if (ENT.hasAlwaysTruePredicate())
8880 else if (Predicates) {
8890const SCEV *ScalarEvolution::BackedgeTakenInfo::getConstantMax(
8892 SmallVectorImpl<const SCEVPredicate *> *Predicates)
const {
8893 if (!getConstantMax())
8896 for (
const auto &ENT : ExitNotTaken)
8897 if (!ENT.hasAlwaysTruePredicate()) {
8905 "No point in having a non-constant max backedge taken count!");
8906 return getConstantMax();
8909const SCEV *ScalarEvolution::BackedgeTakenInfo::getSymbolicMax(
8911 SmallVectorImpl<const SCEVPredicate *> *Predicates) {
8919 for (
const auto &ENT : ExitNotTaken) {
8920 const SCEV *ExitCount = ENT.SymbolicMaxNotTaken;
8923 "We should only have known counts for exiting blocks that "
8929 assert((Predicates || ENT.hasAlwaysTruePredicate()) &&
8930 "Predicate should be always true!");
8933 if (ExitCounts.
empty())
8942bool ScalarEvolution::BackedgeTakenInfo::isConstantMaxOrZero(
8944 auto PredicateNotAlwaysTrue = [](
const ExitNotTakenInfo &ENT) {
8945 return !ENT.hasAlwaysTruePredicate();
8947 return MaxOrZero && !
any_of(ExitNotTaken, PredicateNotAlwaysTrue);
8963 this->ExactNotTaken = E = ConstantMaxNotTaken;
8964 this->SymbolicMaxNotTaken = SymbolicMaxNotTaken = ConstantMaxNotTaken;
8969 "Exact is not allowed to be less precise than Constant Max");
8972 "Exact is not allowed to be less precise than Symbolic Max");
8975 "Symbolic Max is not allowed to be less precise than Constant Max");
8978 "No point in having a non-constant max backedge taken count!");
8980 for (
const auto PredList : PredLists)
8981 for (
const auto *
P : PredList) {
8989 "Backedge count should be int");
8992 "Max backedge count should be int");
9005ScalarEvolution::BackedgeTakenInfo::BackedgeTakenInfo(
9007 bool IsComplete,
const SCEV *ConstantMax,
bool MaxOrZero)
9008 : ConstantMax(ConstantMax), IsComplete(IsComplete), MaxOrZero(MaxOrZero) {
9009 using EdgeExitInfo = ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo;
9011 ExitNotTaken.reserve(ExitCounts.
size());
9012 std::transform(ExitCounts.
begin(), ExitCounts.
end(),
9013 std::back_inserter(ExitNotTaken),
9014 [&](
const EdgeExitInfo &EEI) {
9015 BasicBlock *ExitBB = EEI.first;
9016 const ExitLimit &EL = EEI.second;
9017 return ExitNotTakenInfo(ExitBB, EL.ExactNotTaken,
9018 EL.ConstantMaxNotTaken, EL.SymbolicMaxNotTaken,
9023 "No point in having a non-constant max backedge taken count!");
9027ScalarEvolution::BackedgeTakenInfo
9028ScalarEvolution::computeBackedgeTakenCount(
const Loop *L,
9029 bool AllowPredicates) {
9031 L->getExitingBlocks(ExitingBlocks);
9033 using EdgeExitInfo = ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo;
9036 bool CouldComputeBECount =
true;
9038 const SCEV *MustExitMaxBECount =
nullptr;
9039 const SCEV *MayExitMaxBECount =
nullptr;
9040 bool MustExitMaxOrZero =
false;
9041 bool IsOnlyExit = ExitingBlocks.
size() == 1;
9052 bool ExitIfTrue = !L->contains(BI->getSuccessor(0));
9053 if (ExitIfTrue == CI->
isZero())
9057 ExitLimit EL = computeExitLimit(L, ExitBB, IsOnlyExit, AllowPredicates);
9059 assert((AllowPredicates || EL.Predicates.empty()) &&
9060 "Predicated exit limit when predicates are not allowed!");
9065 ++NumExitCountsComputed;
9069 CouldComputeBECount =
false;
9076 "Exact is known but symbolic isn't?");
9077 ++NumExitCountsNotComputed;
9092 DT.dominates(ExitBB, Latch)) {
9093 if (!MustExitMaxBECount) {
9094 MustExitMaxBECount = EL.ConstantMaxNotTaken;
9095 MustExitMaxOrZero = EL.MaxOrZero;
9098 EL.ConstantMaxNotTaken);
9102 MayExitMaxBECount = EL.ConstantMaxNotTaken;
9105 EL.ConstantMaxNotTaken);
9109 const SCEV *MaxBECount = MustExitMaxBECount ? MustExitMaxBECount :
9113 bool MaxOrZero = (MustExitMaxOrZero && ExitingBlocks.size() == 1);
9119 for (
const auto &Pair : ExitCounts) {
9121 BECountUsers[Pair.second.ExactNotTaken].insert({
L, AllowPredicates});
9123 BECountUsers[Pair.second.SymbolicMaxNotTaken].insert(
9124 {
L, AllowPredicates});
9126 return BackedgeTakenInfo(std::move(ExitCounts), CouldComputeBECount,
9127 MaxBECount, MaxOrZero);
9130ScalarEvolution::ExitLimit
9131ScalarEvolution::computeExitLimit(
const Loop *L, BasicBlock *ExitingBlock,
9132 bool IsOnlyExit,
bool AllowPredicates) {
9133 assert(
L->contains(ExitingBlock) &&
"Exit count for non-loop block?");
9137 if (!Latch || !DT.dominates(ExitingBlock, Latch))
9142 bool ExitIfTrue = !
L->contains(BI->getSuccessor(0));
9143 assert(ExitIfTrue ==
L->contains(BI->getSuccessor(1)) &&
9144 "It should have one successor in loop and one exit block!");
9155 if (!
L->contains(SBB)) {
9160 assert(Exit &&
"Exiting block must have at least one exit");
9161 return computeExitLimitFromSingleExitSwitch(
9162 L, SI, Exit, IsOnlyExit);
9169 const Loop *L,
Value *ExitCond,
bool ExitIfTrue,
bool ControlsOnlyExit,
9170 bool AllowPredicates) {
9171 ScalarEvolution::ExitLimitCacheTy Cache(L, ExitIfTrue, AllowPredicates);
9172 return computeExitLimitFromCondCached(Cache, L, ExitCond, ExitIfTrue,
9173 ControlsOnlyExit, AllowPredicates);
9176std::optional<ScalarEvolution::ExitLimit>
9177ScalarEvolution::ExitLimitCache::find(
const Loop *L,
Value *ExitCond,
9178 bool ExitIfTrue,
bool ControlsOnlyExit,
9179 bool AllowPredicates) {
9181 (void)this->ExitIfTrue;
9182 (void)this->AllowPredicates;
9184 assert(this->L == L && this->ExitIfTrue == ExitIfTrue &&
9185 this->AllowPredicates == AllowPredicates &&
9186 "Variance in assumed invariant key components!");
9187 auto Itr = TripCountMap.find({ExitCond, ControlsOnlyExit});
9188 if (Itr == TripCountMap.end())
9189 return std::nullopt;
9193void ScalarEvolution::ExitLimitCache::insert(
const Loop *L,
Value *ExitCond,
9195 bool ControlsOnlyExit,
9196 bool AllowPredicates,
9198 assert(this->L == L && this->ExitIfTrue == ExitIfTrue &&
9199 this->AllowPredicates == AllowPredicates &&
9200 "Variance in assumed invariant key components!");
9202 auto InsertResult = TripCountMap.insert({{ExitCond, ControlsOnlyExit}, EL});
9203 assert(InsertResult.second &&
"Expected successful insertion!");
9208ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCondCached(
9209 ExitLimitCacheTy &Cache,
const Loop *L,
Value *ExitCond,
bool ExitIfTrue,
9210 bool ControlsOnlyExit,
bool AllowPredicates) {
9212 if (
auto MaybeEL = Cache.find(L, ExitCond, ExitIfTrue, ControlsOnlyExit,
9216 ExitLimit EL = computeExitLimitFromCondImpl(
9217 Cache, L, ExitCond, ExitIfTrue, ControlsOnlyExit, AllowPredicates);
9218 Cache.insert(L, ExitCond, ExitIfTrue, ControlsOnlyExit, AllowPredicates, EL);
9222ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCondImpl(
9223 ExitLimitCacheTy &Cache,
const Loop *L,
Value *ExitCond,
bool ExitIfTrue,
9224 bool ControlsOnlyExit,
bool AllowPredicates) {
9226 if (
auto LimitFromBinOp = computeExitLimitFromCondFromBinOp(
9227 Cache, L, ExitCond, ExitIfTrue, ControlsOnlyExit, AllowPredicates))
9228 return *LimitFromBinOp;
9234 computeExitLimitFromICmp(L, ExitCondICmp, ExitIfTrue, ControlsOnlyExit);
9235 if (EL.hasFullInfo() || !AllowPredicates)
9239 return computeExitLimitFromICmp(L, ExitCondICmp, ExitIfTrue,
9259 const WithOverflowInst *WO;
9274 auto EL = computeExitLimitFromICmp(L, Pred,
LHS,
getConstant(NewRHSC),
9275 ControlsOnlyExit, AllowPredicates);
9276 if (EL.hasAnyInfo())
9281 return computeExitCountExhaustively(L, ExitCond, ExitIfTrue);
9284std::optional<ScalarEvolution::ExitLimit>
9285ScalarEvolution::computeExitLimitFromCondFromBinOp(
9286 ExitLimitCacheTy &Cache,
const Loop *L,
Value *ExitCond,
bool ExitIfTrue,
9287 bool ControlsOnlyExit,
bool AllowPredicates) {
9296 return std::nullopt;
9301 bool EitherMayExit = IsAnd ^ ExitIfTrue;
9302 ExitLimit EL0 = computeExitLimitFromCondCached(
9303 Cache, L, Op0, ExitIfTrue, ControlsOnlyExit && !EitherMayExit,
9305 ExitLimit EL1 = computeExitLimitFromCondCached(
9306 Cache, L, Op1, ExitIfTrue, ControlsOnlyExit && !EitherMayExit,
9310 const Constant *NeutralElement = ConstantInt::get(ExitCond->
getType(), IsAnd);
9312 return Op1 == NeutralElement ? EL0 : EL1;
9314 return Op0 == NeutralElement ? EL1 : EL0;
9319 if (EitherMayExit) {
9329 ConstantMaxBECount = EL1.ConstantMaxNotTaken;
9331 ConstantMaxBECount = EL0.ConstantMaxNotTaken;
9334 EL1.ConstantMaxNotTaken);
9336 SymbolicMaxBECount = EL1.SymbolicMaxNotTaken;
9338 SymbolicMaxBECount = EL0.SymbolicMaxNotTaken;
9341 EL0.SymbolicMaxNotTaken, EL1.SymbolicMaxNotTaken, UseSequentialUMin);
9345 if (EL0.ExactNotTaken == EL1.ExactNotTaken)
9346 BECount = EL0.ExactNotTaken;
9359 SymbolicMaxBECount =
9361 return ExitLimit(BECount, ConstantMaxBECount, SymbolicMaxBECount,
false,
9365ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromICmp(
9366 const Loop *L, ICmpInst *ExitCond,
bool ExitIfTrue,
bool ControlsOnlyExit,
9367 bool AllowPredicates) {
9379 ExitLimit EL = computeExitLimitFromICmp(L, Pred,
LHS,
RHS, ControlsOnlyExit,
9381 if (EL.hasAnyInfo())
9384 auto *ExhaustiveCount =
9385 computeExitCountExhaustively(L, ExitCond, ExitIfTrue);
9388 return ExhaustiveCount;
9390 return computeShiftCompareExitLimit(ExitCond->
getOperand(0),
9393ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromICmp(
9394 const Loop *L, CmpPredicate Pred, SCEVUse
LHS, SCEVUse
RHS,
9395 bool ControlsOnlyExit,
bool AllowPredicates) {
9420 ConstantRange CompRange =
9436 SCEVUse InnerLHS =
LHS;
9438 InnerLHS = ZExt->getOperand();
9485 if (EL.hasAnyInfo())
9502 if (EL.hasAnyInfo())
return EL;
9534 ExitLimit EL = howManyLessThans(
LHS,
RHS, L, IsSigned, ControlsOnlyExit,
9536 if (EL.hasAnyInfo())
9552 ExitLimit EL = howManyGreaterThans(
LHS,
RHS, L, IsSigned, ControlsOnlyExit,
9554 if (EL.hasAnyInfo())
9565ScalarEvolution::ExitLimit
9566ScalarEvolution::computeExitLimitFromSingleExitSwitch(
const Loop *L,
9568 BasicBlock *ExitingBlock,
9569 bool ControlsOnlyExit) {
9570 assert(!
L->contains(ExitingBlock) &&
"Not an exiting block!");
9573 if (
Switch->getDefaultDest() == ExitingBlock)
9577 "Default case must not exit the loop!");
9583 if (EL.hasAnyInfo())
9595 "Evaluation of SCEV at constant didn't fold correctly?");
9599ScalarEvolution::ExitLimit ScalarEvolution::computeShiftCompareExitLimit(
9609 const BasicBlock *Predecessor =
L->getLoopPredecessor();
9615 auto MatchPositiveShift =
9618 using namespace PatternMatch;
9620 ConstantInt *ShiftAmt;
9622 OutOpCode = Instruction::LShr;
9624 OutOpCode = Instruction::AShr;
9626 OutOpCode = Instruction::Shl;
9641 auto MatchShiftRecurrence =
9643 std::optional<Instruction::BinaryOps> PostShiftOpCode;
9658 if (MatchPositiveShift(
LHS, V, OpC)) {
9659 PostShiftOpCode = OpC;
9665 if (!PNOut || PNOut->getParent() !=
L->getHeader())
9668 Value *BEValue = PNOut->getIncomingValueForBlock(Latch);
9674 MatchPositiveShift(BEValue, OpLHS, OpCodeOut) &&
9681 (!PostShiftOpCode || *PostShiftOpCode == OpCodeOut);
9686 if (!MatchShiftRecurrence(
LHS, PN, OpCode))
9698 ConstantInt *StableValue =
nullptr;
9703 case Instruction::AShr: {
9711 StableValue = ConstantInt::get(Ty, 0);
9713 StableValue = ConstantInt::get(Ty, -1,
true);
9719 case Instruction::LShr:
9720 case Instruction::Shl:
9730 "Otherwise cannot be an operand to a branch instruction");
9732 if (
Result->isNullValue()) {
9734 const SCEV *UpperBound =
9751 if (
const Function *
F = CI->getCalledFunction())
9760 if (!L->contains(
I))
return false;
9765 return L->getHeader() ==
I->getParent();
9841 if (!
I)
return nullptr;
9854 std::vector<Constant*> Operands(
I->getNumOperands());
9856 for (
unsigned i = 0, e =
I->getNumOperands(); i != e; ++i) {
9860 if (!Operands[i])
return nullptr;
9865 if (!
C)
return nullptr;
9887 if (IncomingVal != CurrentVal) {
9890 IncomingVal = CurrentVal;
9902ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN,
9905 auto [
I,
Inserted] = ConstantEvolutionLoopExitValue.try_emplace(PN);
9914 DenseMap<Instruction *, Constant *> CurrentIterVals;
9916 assert(PN->
getParent() == Header &&
"Can't evaluate PHI not in loop header!");
9922 for (PHINode &
PHI : Header->phis()) {
9924 CurrentIterVals[&
PHI] = StartCST;
9926 if (!CurrentIterVals.
count(PN))
9927 return RetVal =
nullptr;
9933 "BEs is <= MaxBruteForceIterations which is an 'unsigned'!");
9936 unsigned IterationNum = 0;
9938 for (; ; ++IterationNum) {
9939 if (IterationNum == NumIterations)
9940 return RetVal = CurrentIterVals[PN];
9944 DenseMap<Instruction *, Constant *> NextIterVals;
9949 NextIterVals[PN] = NextPHI;
9951 bool StoppedEvolving = NextPHI == CurrentIterVals[PN];
9957 for (
const auto &
I : CurrentIterVals) {
9959 if (!
PHI ||
PHI == PN ||
PHI->getParent() != Header)
continue;
9964 for (
const auto &
I : PHIsToCompute) {
9965 PHINode *
PHI =
I.first;
9968 Value *BEValue =
PHI->getIncomingValueForBlock(Latch);
9971 if (NextPHI !=
I.second)
9972 StoppedEvolving =
false;
9977 if (StoppedEvolving)
9978 return RetVal = CurrentIterVals[PN];
9980 CurrentIterVals.swap(NextIterVals);
9984const SCEV *ScalarEvolution::computeExitCountExhaustively(
const Loop *L,
9994 DenseMap<Instruction *, Constant *> CurrentIterVals;
9996 assert(PN->
getParent() == Header &&
"Can't evaluate PHI not in loop header!");
9999 assert(Latch &&
"Should follow from NumIncomingValues == 2!");
10001 for (PHINode &
PHI : Header->phis()) {
10003 CurrentIterVals[&
PHI] = StartCST;
10005 if (!CurrentIterVals.
count(PN))
10013 for (
unsigned IterationNum = 0; IterationNum != MaxIterations;++IterationNum){
10020 if (CondVal->getValue() == uint64_t(ExitWhen)) {
10021 ++NumBruteForceTripCountsComputed;
10026 DenseMap<Instruction *, Constant *> NextIterVals;
10032 for (
const auto &
I : CurrentIterVals) {
10034 if (!
PHI ||
PHI->getParent() != Header)
continue;
10037 for (PHINode *
PHI : PHIsToCompute) {
10039 if (NextPHI)
continue;
10041 Value *BEValue =
PHI->getIncomingValueForBlock(Latch);
10044 CurrentIterVals.
swap(NextIterVals);
10055 for (
auto &LS : Values)
10057 return LS.second ? LS.second : V;
10062 const SCEV *
C = computeSCEVAtScope(V, L);
10063 for (
auto &LS :
reverse(ValuesAtScopes[V]))
10064 if (LS.first == L) {
10067 ValuesAtScopesUsers[
C].push_back({L, V});
10078 switch (V->getSCEVType()) {
10118 assert(!
C->getType()->isPointerTy() &&
10119 "Can only have one pointer, and it must be last");
10144const SCEV *ScalarEvolution::getWithOperands(
const SCEV *S,
10145 SmallVectorImpl<SCEVUse> &NewOps) {
10180const SCEV *ScalarEvolution::computeSCEVAtScope(
const SCEV *V,
const Loop *L) {
10181 switch (
V->getSCEVType()) {
10192 for (
unsigned i = 0, e = AddRec->
getNumOperands(); i != e; ++i) {
10203 for (++i; i !=
e; ++i)
10248 for (
unsigned i = 0, e =
Ops.size(); i != e; ++i) {
10258 for (++i; i !=
e; ++i) {
10263 return getWithOperands(V, NewOps);
10278 const Loop *CurrLoop = this->LI[
I->getParent()];
10289 if (BackedgeTakenCount->
isZero()) {
10290 Value *InitValue =
nullptr;
10291 bool MultipleInitValues =
false;
10297 MultipleInitValues =
true;
10302 if (!MultipleInitValues && InitValue)
10311 unsigned InLoopPred =
10322 getConstantEvolutionLoopExitValue(PN, BTCC->getAPInt(), CurrLoop);
10336 SmallVector<Constant *, 4> Operands;
10337 Operands.
reserve(
I->getNumOperands());
10338 bool MadeImprovement =
false;
10353 MadeImprovement |= OrigV != OpV;
10358 assert(
C->getType() ==
Op->getType() &&
"Type mismatch");
10363 if (!MadeImprovement)
10384const SCEV *ScalarEvolution::stripInjectiveFunctions(
const SCEV *S)
const {
10386 return stripInjectiveFunctions(ZExt->getOperand());
10388 return stripInjectiveFunctions(SExt->getOperand());
10406 assert(
A != 0 &&
"A must be non-zero.");
10422 if (MinTZ < Mult2 && L->getLoopPredecessor())
10424 if (MinTZ < Mult2) {
10447 APInt AD =
A.lshr(Mult2).trunc(BW - Mult2);
10467static std::optional<std::tuple<APInt, APInt, APInt, APInt, unsigned>>
10473 LLVM_DEBUG(
dbgs() << __func__ <<
": analyzing quadratic addrec: "
10474 << *AddRec <<
'\n');
10477 if (!LC || !MC || !
NC) {
10478 LLVM_DEBUG(
dbgs() << __func__ <<
": coefficients are not constant\n");
10479 return std::nullopt;
10485 assert(!
N.isZero() &&
"This is not a quadratic addrec");
10493 N =
N.sext(NewWidth);
10494 M = M.sext(NewWidth);
10495 L = L.sext(NewWidth);
10512 <<
"x + " <<
C <<
", coeff bw: " << NewWidth
10513 <<
", multiplied by " <<
T <<
'\n');
10522 std::optional<APInt>
Y) {
10524 unsigned W = std::max(
X->getBitWidth(),
Y->getBitWidth());
10527 return XW.
slt(YW) ? *
X : *
Y;
10530 return std::nullopt;
10531 return X ? *
X : *
Y;
10548 return std::nullopt;
10549 unsigned W =
X->getBitWidth();
10569static std::optional<APInt>
10575 return std::nullopt;
10578 LLVM_DEBUG(
dbgs() << __func__ <<
": solving for unsigned overflow\n");
10579 std::optional<APInt>
X =
10582 return std::nullopt;
10587 return std::nullopt;
10602static std::optional<APInt>
10606 "Starting value of addrec should be 0");
10607 LLVM_DEBUG(
dbgs() << __func__ <<
": solving boundary crossing for range "
10608 <<
Range <<
", addrec " << *AddRec <<
'\n');
10612 "Addrec's initial value should be in range");
10618 return std::nullopt;
10628 auto SolveForBoundary =
10629 [&](
APInt Bound) -> std::pair<std::optional<APInt>,
bool> {
10632 LLVM_DEBUG(
dbgs() <<
"SolveQuadraticAddRecRange: checking boundary "
10633 << Bound <<
" (before multiplying by " << M <<
")\n");
10636 std::optional<APInt> SO;
10639 "signed overflow\n");
10643 "unsigned overflow\n");
10644 std::optional<APInt> UO =
10647 auto LeavesRange = [&] (
const APInt &
X) {
10664 return {std::nullopt,
false};
10669 if (LeavesRange(*Min))
10670 return { Min,
true };
10671 std::optional<APInt> Max = Min == SO ? UO : SO;
10672 if (LeavesRange(*Max))
10673 return { Max,
true };
10676 return {std::nullopt,
true};
10683 auto SL = SolveForBoundary(
Lower);
10684 auto SU = SolveForBoundary(
Upper);
10687 if (!SL.second || !SU.second)
10688 return std::nullopt;
10731ScalarEvolution::ExitLimit ScalarEvolution::howFarToZero(
const SCEV *V,
10733 bool ControlsOnlyExit,
10734 bool AllowPredicates) {
10745 if (
C->getValue()->isZero())
return C;
10749 const SCEVAddRecExpr *AddRec =
10752 if (!AddRec && AllowPredicates)
10758 if (!AddRec || AddRec->
getLoop() != L)
10769 return ExitLimit(R, R, R,
false, Predicates);
10827 const SCEV *DistancePlusOne =
getAddExpr(Distance, One);
10853 const SCEV *
Exact =
10861 const SCEV *SymbolicMax =
10863 return ExitLimit(
Exact, ConstantMax, SymbolicMax,
false, Predicates);
10872 AllowPredicates ? &Predicates :
nullptr, *
this, L);
10880 return ExitLimit(
E, M, S,
false, Predicates);
10883ScalarEvolution::ExitLimit
10884ScalarEvolution::howFarToNonZero(
const SCEV *V,
const Loop *L) {
10892 if (!
C->getValue()->isZero())
10902std::pair<const BasicBlock *, const BasicBlock *>
10903ScalarEvolution::getPredecessorWithUniqueSuccessorForBB(
const BasicBlock *BB)
10914 if (
const Loop *L = LI.getLoopFor(BB))
10915 return {
L->getLoopPredecessor(),
L->getHeader()};
10917 return {
nullptr, BB};
10926 if (
A ==
B)
return true;
10941 if (ComputesEqualValues(AI, BI))
10949 const SCEV *Op0, *Op1;
10968 auto TrivialCase = [&](
bool TriviallyTrue) {
10977 const SCEV *NewLHS, *NewRHS;
11001 return TrivialCase(
false);
11002 return TrivialCase(
true);
11025 const APInt &
RA = RC->getAPInt();
11027 bool SimplifiedByConstantRange =
false;
11032 return TrivialCase(
true);
11034 return TrivialCase(
false);
11043 Changed = SimplifiedByConstantRange =
true;
11047 if (!SimplifiedByConstantRange) {
11064 assert(!
RA.isMinValue() &&
"Should have been caught earlier!");
11070 assert(!
RA.isMaxValue() &&
"Should have been caught earlier!");
11076 assert(!
RA.isMinSignedValue() &&
"Should have been caught earlier!");
11082 assert(!
RA.isMaxSignedValue() &&
"Should have been caught earlier!");
11094 return TrivialCase(
true);
11096 return TrivialCase(
false);
11201 auto NonRecursive = [OrNegative](
const SCEV *S) {
11203 return C->getAPInt().isPowerOf2() ||
11204 (OrNegative &&
C->getAPInt().isNegatedPowerOf2());
11210 if (NonRecursive(S))
11236 APInt C = Cst->getAPInt();
11237 return C.urem(M) == 0;
11245 const SCEV *SmodM =
11260 for (
auto *
A : Assumptions)
11261 if (
A->implies(
P, *
this))
11274std::pair<const SCEV *, const SCEV *>
11277 const SCEV *Start = SCEVInitRewriter::rewrite(S, L, *
this);
11279 return { Start, Start };
11281 const SCEV *
PostInc = SCEVPostIncRewriter::rewrite(S, L, *
this);
11290 getUsedLoops(LHS, LoopsUsed);
11291 getUsedLoops(RHS, LoopsUsed);
11293 if (LoopsUsed.
empty())
11298 for (
const auto *L1 : LoopsUsed)
11299 for (
const auto *L2 : LoopsUsed)
11300 assert((DT.dominates(L1->getHeader(), L2->getHeader()) ||
11301 DT.dominates(L2->getHeader(), L1->getHeader())) &&
11302 "Domination relationship is not a linear order");
11332 SplitRHS.second) &&
11344 if (isKnownPredicateViaSplitting(Pred, LHS, RHS))
11348 return isKnownViaNonRecursiveReasoning(Pred, LHS, RHS);
11358 return std::nullopt;
11373 if (KnownWithoutContext)
11374 return KnownWithoutContext;
11381 return std::nullopt;
11387 const Loop *L = LHS->getLoop();
11392std::optional<ScalarEvolution::MonotonicPredicateType>
11395 auto Result = getMonotonicPredicateTypeImpl(LHS, Pred);
11401 auto ResultSwapped =
11404 assert(*ResultSwapped != *Result &&
11405 "monotonicity should flip as we flip the predicate");
11412std::optional<ScalarEvolution::MonotonicPredicateType>
11413ScalarEvolution::getMonotonicPredicateTypeImpl(
const SCEVAddRecExpr *LHS,
11427 return std::nullopt;
11431 "Should be greater or less!");
11435 if (!LHS->hasNoUnsignedWrap())
11436 return std::nullopt;
11440 "Relational predicate is either signed or unsigned!");
11441 if (!
LHS->hasNoSignedWrap())
11442 return std::nullopt;
11444 const SCEV *Step =
LHS->getStepRecurrence(*
this);
11452 return std::nullopt;
11455std::optional<ScalarEvolution::LoopInvariantPredicate>
11462 return std::nullopt;
11469 if (!ArLHS || ArLHS->
getLoop() != L)
11470 return std::nullopt;
11474 return std::nullopt;
11500 return std::nullopt;
11537 return std::nullopt;
11540std::optional<ScalarEvolution::LoopInvariantPredicate>
11545 Pred, LHS, RHS, L, CtxI, MaxIter))
11555 Pred, LHS, RHS, L, CtxI,
Op))
11557 return std::nullopt;
11560std::optional<ScalarEvolution::LoopInvariantPredicate>
11575 return std::nullopt;
11582 if (!AR || AR->
getLoop() != L)
11583 return std::nullopt;
11588 Pred = Pred.dropSameSign();
11592 return std::nullopt;
11598 if (Step != One && Step != MinusOne)
11599 return std::nullopt;
11605 return std::nullopt;
11611 return std::nullopt;
11619 if (Step == MinusOne)
11623 return std::nullopt;
11629bool ScalarEvolution::isKnownPredicateViaConstantRanges(
CmpPredicate Pred,
11635 auto CheckRange = [&](
bool IsSigned) {
11638 return RangeLHS.
icmp(Pred, RangeRHS);
11647 if (CheckRange(
true) || CheckRange(
false))
11656bool ScalarEvolution::isKnownPredicateViaNoOverflow(CmpPredicate Pred,
11657 SCEVUse
LHS, SCEVUse
RHS) {
11662 auto MatchBinaryAddToConst = [
this](SCEVUse
X, SCEVUse
Y, APInt &OutC1,
11665 SCEVUse XNonConstOp, XConstOp;
11666 SCEVUse YNonConstOp, YConstOp;
11670 if (!splitBinaryAdd(
X, XConstOp, XNonConstOp, XFlagsPresent)) {
11673 XFlagsPresent = ExpectedFlags;
11678 if (!splitBinaryAdd(
Y, YConstOp, YNonConstOp, YFlagsPresent)) {
11681 YFlagsPresent = ExpectedFlags;
11684 if (YNonConstOp != XNonConstOp)
11692 if ((YFlagsPresent & ExpectedFlags) != ExpectedFlags)
11695 (XFlagsPresent & ExpectedFlags) != ExpectedFlags) {
11755bool ScalarEvolution::isKnownPredicateViaSplitting(CmpPredicate Pred,
11756 SCEVUse
LHS, SCEVUse
RHS) {
11776bool ScalarEvolution::isImpliedViaGuard(
const BasicBlock *BB, CmpPredicate Pred,
11777 const SCEV *
LHS,
const SCEV *
RHS) {
11782 return any_of(*BB, [&](
const Instruction &
I) {
11783 using namespace llvm::PatternMatch;
11788 isImpliedCond(Pred,
LHS,
RHS, Condition,
false);
11802 if (!L || !DT.isReachableFromEntry(L->getHeader()))
11807 "This cannot be done on broken IR!");
11810 if (isKnownViaNonRecursiveReasoning(Pred, LHS, RHS))
11819 if (LoopContinuePredicate &&
11820 isImpliedCond(Pred, LHS, RHS, LoopContinuePredicate->
getCondition(),
11821 LoopContinuePredicate->
getSuccessor(0) != L->getHeader()))
11826 if (WalkingBEDominatingConds)
11832 const auto &BETakenInfo = getBackedgeTakenInfo(L);
11833 const SCEV *LatchBECount = BETakenInfo.getExact(Latch,
this);
11840 const SCEV *LoopCounter =
11848 for (
auto &AssumeVH : AC.assumptions()) {
11855 if (isImpliedCond(Pred, LHS, RHS, CI->getArgOperand(0),
false))
11859 if (isImpliedViaGuard(Latch, Pred, LHS, RHS))
11862 for (
DomTreeNode *DTN = DT[Latch], *HeaderDTN = DT[L->getHeader()];
11863 DTN != HeaderDTN; DTN = DTN->getIDom()) {
11864 assert(DTN &&
"should reach the loop header before reaching the root!");
11867 if (isImpliedViaGuard(BB, Pred, LHS, RHS))
11885 if (isImpliedCond(Pred, LHS, RHS, ContBr->
getCondition(),
11898 if (!DT.isReachableFromEntry(BB))
11902 "This cannot be done on broken IR!");
11910 const bool ProvingStrictComparison =
11912 bool ProvedNonStrictComparison =
false;
11913 bool ProvedNonEquality =
false;
11916 if (!ProvedNonStrictComparison)
11917 ProvedNonStrictComparison = Fn(NonStrictPredicate);
11918 if (!ProvedNonEquality)
11920 if (ProvedNonStrictComparison && ProvedNonEquality)
11925 if (ProvingStrictComparison) {
11927 return isKnownViaNonRecursiveReasoning(
P, LHS, RHS);
11929 if (SplitAndProve(ProofFn))
11934 auto ProveViaCond = [&](
const Value *Condition,
bool Inverse) {
11936 if (isImpliedCond(Pred, LHS, RHS, Condition,
Inverse, CtxI))
11938 if (ProvingStrictComparison) {
11940 return isImpliedCond(
P, LHS, RHS, Condition,
Inverse, CtxI);
11942 if (SplitAndProve(ProofFn))
11951 const Loop *ContainingLoop = LI.getLoopFor(BB);
11953 if (ContainingLoop && ContainingLoop->
getHeader() == BB)
11957 for (std::pair<const BasicBlock *, const BasicBlock *> Pair(PredBB, BB);
11958 Pair.first; Pair = getPredecessorWithUniqueSuccessorForBB(Pair.first)) {
11961 if (!BlockEntryPredicate)
11970 for (
auto &AssumeVH : AC.assumptions()) {
11974 if (!DT.dominates(CI, BB))
11977 if (ProveViaCond(CI->getArgOperand(0),
false))
11983 F.getParent(), Intrinsic::experimental_guard);
11985 for (
const auto *GU : GuardDecl->users())
11987 if (Guard->getFunction() == BB->
getParent() && DT.dominates(Guard, BB))
11988 if (ProveViaCond(Guard->getArgOperand(0),
false))
12003 "LHS is not available at Loop Entry");
12005 "RHS is not available at Loop Entry");
12007 if (isKnownViaNonRecursiveReasoning(Pred, LHS, RHS))
12018 if (FoundCondValue ==
12022 if (!PendingLoopPredicates.insert(FoundCondValue).second)
12026 [&]() { PendingLoopPredicates.erase(FoundCondValue); });
12029 const Value *Op0, *Op1;
12032 return isImpliedCond(Pred,
LHS,
RHS, Op0,
Inverse, CtxI) ||
12036 return isImpliedCond(Pred,
LHS,
RHS, Op0, Inverse, CtxI) ||
12037 isImpliedCond(Pred,
LHS,
RHS, Op1, Inverse, CtxI);
12041 if (!ICI)
return false;
12045 CmpPredicate FoundPred;
12054 return isImpliedCond(Pred,
LHS,
RHS, FoundPred, FoundLHS, FoundRHS, CtxI);
12057bool ScalarEvolution::isImpliedCond(CmpPredicate Pred,
const SCEV *
LHS,
12058 const SCEV *
RHS, CmpPredicate FoundPred,
12059 const SCEV *FoundLHS,
const SCEV *FoundRHS,
12060 const Instruction *CtxI) {
12070 auto *WideType = FoundLHS->
getType();
12082 TruncFoundLHS, TruncFoundRHS, CtxI))
12108 return isImpliedCondBalancedTypes(Pred,
LHS,
RHS, FoundPred, FoundLHS,
12112bool ScalarEvolution::isImpliedCondBalancedTypes(
12113 CmpPredicate Pred, SCEVUse
LHS, SCEVUse
RHS, CmpPredicate FoundPred,
12114 SCEVUse FoundLHS, SCEVUse FoundRHS,
const Instruction *CtxI) {
12117 "Types should be balanced!");
12124 if (FoundLHS == FoundRHS)
12128 if (
LHS == FoundRHS ||
RHS == FoundLHS) {
12140 return isImpliedCondOperands(*
P,
LHS,
RHS, FoundLHS, FoundRHS, CtxI);
12157 LHS, FoundLHS, FoundRHS, CtxI);
12159 return isImpliedCondOperands(*
P,
LHS,
RHS, FoundRHS, FoundLHS, CtxI);
12181 assert(P1 != P2 &&
"Handled earlier!");
12185 if (IsSignFlippedPredicate(Pred, FoundPred)) {
12189 return isImpliedCondOperands(Pred,
LHS,
RHS, FoundLHS, FoundRHS, CtxI);
12192 CmpPredicate CanonicalPred = Pred, CanonicalFoundPred = FoundPred;
12193 const SCEV *CanonicalLHS =
LHS, *CanonicalRHS =
RHS,
12194 *CanonicalFoundLHS = FoundLHS, *CanonicalFoundRHS = FoundRHS;
12199 std::swap(CanonicalFoundLHS, CanonicalFoundRHS);
12210 return isImpliedCondOperands(CanonicalFoundPred, CanonicalLHS,
12211 CanonicalRHS, CanonicalFoundLHS,
12212 CanonicalFoundRHS);
12217 return isImpliedCondOperands(CanonicalFoundPred, CanonicalLHS,
12218 CanonicalRHS, CanonicalFoundLHS,
12219 CanonicalFoundRHS);
12226 const SCEVConstant *
C =
nullptr;
12227 const SCEV *
V =
nullptr;
12245 if (Min ==
C->getAPInt()) {
12250 APInt SharperMin = Min + 1;
12253 case ICmpInst::ICMP_SGE:
12254 case ICmpInst::ICMP_UGE:
12257 if (isImpliedCondOperands(Pred, LHS, RHS, V, getConstant(SharperMin),
12262 case ICmpInst::ICMP_SGT:
12263 case ICmpInst::ICMP_UGT:
12273 if (isImpliedCondOperands(Pred, LHS, RHS, V, getConstant(Min), CtxI))
12278 case ICmpInst::ICMP_SLE:
12279 case ICmpInst::ICMP_ULE:
12280 if (isImpliedCondOperands(ICmpInst::getSwappedCmpPredicate(Pred), RHS,
12281 LHS, V, getConstant(SharperMin), CtxI))
12285 case ICmpInst::ICMP_SLT:
12286 case ICmpInst::ICMP_ULT:
12287 if (isImpliedCondOperands(ICmpInst::getSwappedCmpPredicate(Pred), RHS,
12288 LHS, V, getConstant(Min), CtxI))
12302 if (isImpliedCondOperands(Pred,
LHS,
RHS, FoundLHS, FoundRHS, CtxI))
12306 if (isImpliedCondOperands(FoundPred,
LHS,
RHS, FoundLHS, FoundRHS, CtxI))
12309 if (isImpliedCondOperandsViaRanges(Pred,
LHS,
RHS, FoundPred, FoundLHS, FoundRHS))
12316bool ScalarEvolution::splitBinaryAdd(SCEVUse Expr, SCEVUse &L, SCEVUse &R,
12325std::optional<APInt>
12332 APInt DiffMul(BW, 1);
12335 for (
unsigned I = 0;
I < 8; ++
I) {
12344 if (LAR->getLoop() != MAR->getLoop())
12345 return std::nullopt;
12349 if (!LAR->isAffine() || !MAR->isAffine())
12350 return std::nullopt;
12352 if (LAR->getStepRecurrence(*
this) != MAR->getStepRecurrence(*
this))
12353 return std::nullopt;
12355 Less = LAR->getStart();
12356 More = MAR->getStart();
12361 auto MatchConstMul =
12362 [](
const SCEV *S) -> std::optional<std::pair<const SCEV *, APInt>> {
12367 return std::nullopt;
12369 if (
auto MatchedMore = MatchConstMul(More)) {
12370 if (
auto MatchedLess = MatchConstMul(
Less)) {
12371 if (MatchedMore->second == MatchedLess->second) {
12372 More = MatchedMore->first;
12373 Less = MatchedLess->first;
12374 DiffMul *= MatchedMore->second;
12385 Diff +=
C->getAPInt() * DiffMul;
12388 Diff -=
C->getAPInt() * DiffMul;
12391 Multiplicity[S] +=
Mul;
12393 auto Decompose = [&](
const SCEV *S,
int Mul) {
12400 Decompose(More, 1);
12401 Decompose(
Less, -1);
12405 const SCEV *NewMore =
nullptr, *NewLess =
nullptr;
12406 for (
const auto &[S,
Mul] : Multiplicity) {
12411 return std::nullopt;
12413 }
else if (
Mul == -1) {
12415 return std::nullopt;
12418 return std::nullopt;
12422 if (NewMore == More || NewLess ==
Less)
12423 return std::nullopt;
12429 if (!More && !
Less)
12433 if (!More || !
Less)
12434 return std::nullopt;
12438 return std::nullopt;
12441bool ScalarEvolution::isImpliedCondOperandsViaAddRecStart(
12463 const auto *Latch = L->getLoopLatch();
12466 if (!L->contains(ContextBB) || !Latch || !DT.
dominates(ContextBB, Latch))
12475 const auto *Latch = L->getLoopLatch();
12478 if (!L->contains(ContextBB) || !Latch || !DT.
dominates(ContextBB, Latch))
12488bool ScalarEvolution::isImpliedCondOperandsViaNoOverflow(CmpPredicate Pred,
12491 const SCEV *FoundLHS,
12492 const SCEV *FoundRHS) {
12501 if (!AddRecFoundLHS)
12508 const Loop *
L = AddRecFoundLHS->getLoop();
12509 if (L != AddRecLHS->getLoop())
12548 if (!RDiff || *LDiff != *RDiff)
12551 if (LDiff->isMinValue())
12554 APInt FoundRHSLimit;
12557 FoundRHSLimit = -(*RDiff);
12569bool ScalarEvolution::isImpliedViaMerge(CmpPredicate Pred,
const SCEV *
LHS,
12570 const SCEV *
RHS,
const SCEV *FoundLHS,
12571 const SCEV *FoundRHS,
unsigned Depth) {
12572 const PHINode *LPhi =
nullptr, *RPhi =
nullptr;
12576 bool Erased = PendingMerges.erase(LPhi);
12577 assert(Erased &&
"Failed to erase LPhi!");
12581 bool Erased = PendingMerges.erase(RPhi);
12582 assert(Erased &&
"Failed to erase RPhi!");
12590 if (!PendingMerges.insert(Phi).second)
12604 if (!PendingMerges.insert(Phi).second)
12610 if (!LPhi && !RPhi)
12621 assert(LPhi &&
"LPhi should definitely be a SCEVUnknown Phi!");
12625 auto ProvedEasily = [&](
const SCEV *
S1,
const SCEV *S2) {
12626 return isKnownViaNonRecursiveReasoning(Pred,
S1, S2) ||
12627 isImpliedCondOperandsViaRanges(Pred,
S1, S2, Pred, FoundLHS, FoundRHS) ||
12628 isImpliedViaOperations(Pred,
S1, S2, FoundLHS, FoundRHS,
Depth);
12631 if (RPhi && RPhi->getParent() == LBB) {
12638 const SCEV *
R =
getSCEV(RPhi->getIncomingValueForBlock(IncBB));
12639 if (!ProvedEasily(L, R))
12650 auto *RLoop = RAR->
getLoop();
12651 auto *Predecessor = RLoop->getLoopPredecessor();
12652 assert(Predecessor &&
"Loop with AddRec with no predecessor?");
12654 if (!ProvedEasily(L1, RAR->
getStart()))
12656 auto *Latch = RLoop->getLoopLatch();
12657 assert(Latch &&
"Loop with AddRec with no latch?");
12678 if (
auto *Loop = LI.getLoopFor(LBB))
12681 if (!ProvedEasily(L,
RHS))
12688bool ScalarEvolution::isImpliedCondOperandsViaShift(CmpPredicate Pred,
12691 const SCEV *FoundLHS,
12692 const SCEV *FoundRHS) {
12695 if (
RHS == FoundRHS) {
12700 if (
LHS != FoundLHS)
12707 Value *Shiftee, *ShiftValue;
12709 using namespace PatternMatch;
12710 if (
match(SUFoundRHS->getValue(),
12712 auto *ShifteeS =
getSCEV(Shiftee);
12730bool ScalarEvolution::isImpliedCondOperands(CmpPredicate Pred,
const SCEV *
LHS,
12732 const SCEV *FoundLHS,
12733 const SCEV *FoundRHS,
12734 const Instruction *CtxI) {
12735 return isImpliedCondOperandsViaRanges(Pred,
LHS,
RHS, Pred, FoundLHS,
12737 isImpliedCondOperandsViaNoOverflow(Pred,
LHS,
RHS, FoundLHS,
12739 isImpliedCondOperandsViaShift(Pred,
LHS,
RHS, FoundLHS, FoundRHS) ||
12740 isImpliedCondOperandsViaAddRecStart(Pred,
LHS,
RHS, FoundLHS, FoundRHS,
12742 isImpliedCondOperandsHelper(Pred,
LHS,
RHS, FoundLHS, FoundRHS);
12746template <
typename MinMaxExprType>
12748 const SCEV *Candidate) {
12753 return is_contained(MinMaxExpr->operands(), Candidate);
12766 const SCEV *LStart, *RStart, *Step;
12816bool ScalarEvolution::isImpliedViaOperations(CmpPredicate Pred,
const SCEV *
LHS,
12818 const SCEV *FoundLHS,
12819 const SCEV *FoundRHS,
12823 "LHS and RHS have different sizes?");
12826 "FoundLHS and FoundRHS have different sizes?");
12860 auto GetOpFromSExt = [&](
const SCEV *S) ->
const SCEV * {
12862 return Ext->getOperand();
12869 auto *OrigLHS =
LHS;
12870 auto *OrigFoundLHS = FoundLHS;
12871 LHS = GetOpFromSExt(
LHS);
12872 FoundLHS = GetOpFromSExt(FoundLHS);
12875 auto IsSGTViaContext = [&](
const SCEV *
S1,
const SCEV *S2) {
12878 FoundRHS,
Depth + 1);
12891 if (!LHSAddExpr->hasNoSignedWrap())
12894 SCEVUse LL = LHSAddExpr->getOperand(0);
12895 SCEVUse LR = LHSAddExpr->getOperand(1);
12899 auto IsSumGreaterThanRHS = [&](
const SCEV *
S1,
const SCEV *S2) {
12900 return IsSGTViaContext(
S1, MinusOne) && IsSGTViaContext(S2,
RHS);
12905 if (IsSumGreaterThanRHS(LL, LR) || IsSumGreaterThanRHS(LR, LL))
12911 using namespace llvm::PatternMatch;
12930 if (!Numerator || Numerator->getType() != FoundLHS->
getType())
12938 auto *DTy = Denominator->getType();
12939 auto *FRHSTy = FoundRHS->
getType();
12940 if (DTy->isPointerTy() != FRHSTy->isPointerTy())
12959 IsSGTViaContext(FoundRHSExt, DenomMinusTwo))
12970 auto *NegDenomMinusOne =
getMinusSCEV(MinusOne, DenominatorExt);
12972 IsSGTViaContext(FoundRHSExt, NegDenomMinusOne))
12980 if (isImpliedViaMerge(Pred, OrigLHS,
RHS, OrigFoundLHS, FoundRHS,
Depth + 1))
13013bool ScalarEvolution::isKnownViaNonRecursiveReasoning(CmpPredicate Pred,
13017 isKnownPredicateViaConstantRanges(Pred,
LHS,
RHS) ||
13020 isKnownPredicateViaNoOverflow(Pred,
LHS,
RHS);
13023bool ScalarEvolution::isImpliedCondOperandsHelper(CmpPredicate Pred,
13026 const SCEV *FoundLHS,
13027 const SCEV *FoundRHS) {
13063 if (isImpliedViaOperations(Pred,
LHS,
RHS, FoundLHS, FoundRHS))
13069bool ScalarEvolution::isImpliedCondOperandsViaRanges(
13070 CmpPredicate Pred,
const SCEV *
LHS,
const SCEV *
RHS, CmpPredicate FoundPred,
13071 const SCEV *FoundLHS,
const SCEV *FoundRHS) {
13085 ConstantRange FoundLHSRange =
13089 ConstantRange LHSRange = FoundLHSRange.
add(ConstantRange(*Addend));
13096 return LHSRange.
icmp(Pred, ConstRHS);
13099bool ScalarEvolution::canIVOverflowOnLT(
const SCEV *
RHS,
const SCEV *Stride,
13112 return (std::move(MaxValue) - MaxStrideMinusOne).slt(MaxRHS);
13120 return (std::move(MaxValue) - MaxStrideMinusOne).ult(MaxRHS);
13123bool ScalarEvolution::canIVOverflowOnGT(
const SCEV *
RHS,
const SCEV *Stride,
13135 return (std::move(MinValue) + MaxStrideMinusOne).sgt(MinRHS);
13143 return (std::move(MinValue) + MaxStrideMinusOne).ugt(MinRHS);
13155const SCEV *ScalarEvolution::computeMaxBECountForLT(
const SCEV *Start,
13156 const SCEV *Stride,
13187 APInt Limit = MaxValue - (StrideForMaxBECount - 1);
13198 :
APIntOps::umax(MaxEnd, MinStart);
13205ScalarEvolution::howManyLessThans(
const SCEV *
LHS,
const SCEV *
RHS,
13206 const Loop *L,
bool IsSigned,
13207 bool ControlsOnlyExit,
bool AllowPredicates) {
13211 bool PredicatedIV =
false;
13216 auto canProveNUW = [&]() {
13219 if (!ControlsOnlyExit)
13240 Limit = Limit.
zext(OuterBitWidth);
13252 Type *Ty = ZExt->getType();
13263 if (!
IV && AllowPredicates) {
13268 PredicatedIV =
true;
13272 if (!
IV ||
IV->getLoop() != L || !
IV->isAffine())
13286 bool NoWrap = ControlsOnlyExit &&
IV->getNoWrapFlags(WrapType);
13289 const SCEV *Stride =
IV->getStepRecurrence(*
this);
13294 if (!PositiveStride) {
13346 auto wouldZeroStrideBeUB = [&]() {
13358 if (!wouldZeroStrideBeUB()) {
13362 }
else if (!NoWrap) {
13365 if (canIVOverflowOnLT(
RHS, Stride, IsSigned))
13378 const SCEV *
Start =
IV->getStart();
13384 const SCEV *OrigStart =
Start;
13385 const SCEV *OrigRHS =
RHS;
13386 if (
Start->getType()->isPointerTy()) {
13397 const SCEV *End =
nullptr, *BECount =
nullptr,
13398 *BECountIfBackedgeTaken =
nullptr;
13401 if (PositiveStride && RHSAddRec !=
nullptr && RHSAddRec->getLoop() == L &&
13402 RHSAddRec->getNoWrapFlags()) {
13415 const SCEV *RHSStart = RHSAddRec->getStart();
13416 const SCEV *RHSStride = RHSAddRec->getStepRecurrence(*
this);
13428 const SCEV *Denominator =
getMinusSCEV(Stride, RHSStride);
13437 BECountIfBackedgeTaken =
13442 if (BECount ==
nullptr) {
13447 const SCEV *MaxBECount = computeMaxBECountForLT(
13450 MaxBECount,
false , Predicates);
13457 auto *OrigStartMinusStride =
getMinusSCEV(OrigStart, Stride);
13484 const SCEV *Numerator =
13490 auto canProveRHSGreaterThanEqualStart = [&]() {
13509 auto *StartMinusOne =
13516 if (canProveRHSGreaterThanEqualStart()) {
13531 BECountIfBackedgeTaken =
13547 bool MayAddOverflow = [&] {
13593 if (Start == Stride || Start ==
getMinusSCEV(Stride, One)) {
13607 if (!MayAddOverflow) {
13619 const SCEV *ConstantMaxBECount;
13620 bool MaxOrZero =
false;
13622 ConstantMaxBECount = BECount;
13623 }
else if (BECountIfBackedgeTaken &&
13628 ConstantMaxBECount = BECountIfBackedgeTaken;
13631 ConstantMaxBECount = computeMaxBECountForLT(
13639 const SCEV *SymbolicMaxBECount =
13641 return ExitLimit(BECount, ConstantMaxBECount, SymbolicMaxBECount, MaxOrZero,
13645ScalarEvolution::ExitLimit ScalarEvolution::howManyGreaterThans(
13646 const SCEV *
LHS,
const SCEV *
RHS,
const Loop *L,
bool IsSigned,
13647 bool ControlsOnlyExit,
bool AllowPredicates) {
13654 if (!
IV && AllowPredicates)
13661 if (!
IV ||
IV->getLoop() != L || !
IV->isAffine())
13665 bool NoWrap = ControlsOnlyExit &&
IV->getNoWrapFlags(WrapType);
13678 if (!Stride->
isOne() && !NoWrap)
13679 if (canIVOverflowOnGT(
RHS, Stride, IsSigned))
13682 const SCEV *
Start =
IV->getStart();
13683 const SCEV *End =
RHS;
13694 if (
Start->getType()->isPointerTy()) {
13729 const SCEV *ConstantMaxBECount =
13736 ConstantMaxBECount = BECount;
13737 const SCEV *SymbolicMaxBECount =
13740 return ExitLimit(BECount, ConstantMaxBECount, SymbolicMaxBECount,
false,
13746 if (
Range.isFullSet())
13751 if (!SC->getValue()->isZero()) {
13757 return ShiftedAddRec->getNumIterationsInRange(
13758 Range.subtract(SC->getAPInt()), SE);
13789 APInt ExitVal = (End +
A).udiv(
A);
13802 ConstantInt::get(SE.
getContext(), ExitVal - 1), SE)->getValue()) &&
13803 "Linear scev computation is off in a bad way!");
13834 assert(!
Last->isZero() &&
"Recurrency with zero step?");
13859 Ty = Store->getValueOperand()->getType();
13861 Ty = Load->getType();
13874 assert(SE &&
"SCEVCallbackVH called with a null ScalarEvolution!");
13876 SE->ConstantEvolutionLoopExitValue.erase(PN);
13877 SE->eraseValueFromMap(getValPtr());
13881void ScalarEvolution::SCEVCallbackVH::allUsesReplacedWith(
Value *V) {
13882 assert(SE &&
"SCEVCallbackVH called with a null ScalarEvolution!");
13892 : CallbackVH(
V), SE(se) {}
13901 : F(F), DL(F.
getDataLayout()), TLI(TLI), AC(AC), DT(DT), LI(LI),
13903 LoopDispositions(64), BlockDispositions(64) {
13915 F.getParent(), Intrinsic::experimental_guard);
13916 HasGuards = GuardDecl && !GuardDecl->use_empty();
13920 : F(Arg.F), DL(Arg.DL), HasGuards(Arg.HasGuards), TLI(Arg.TLI), AC(Arg.AC),
13921 DT(Arg.DT), LI(Arg.LI), CouldNotCompute(
std::
move(Arg.CouldNotCompute)),
13922 ValueExprMap(
std::
move(Arg.ValueExprMap)),
13923 PendingLoopPredicates(
std::
move(Arg.PendingLoopPredicates)),
13924 PendingMerges(
std::
move(Arg.PendingMerges)),
13925 ConstantMultipleCache(
std::
move(Arg.ConstantMultipleCache)),
13926 BackedgeTakenCounts(
std::
move(Arg.BackedgeTakenCounts)),
13927 PredicatedBackedgeTakenCounts(
13928 std::
move(Arg.PredicatedBackedgeTakenCounts)),
13929 BECountUsers(
std::
move(Arg.BECountUsers)),
13930 ConstantEvolutionLoopExitValue(
13931 std::
move(Arg.ConstantEvolutionLoopExitValue)),
13932 ValuesAtScopes(
std::
move(Arg.ValuesAtScopes)),
13933 ValuesAtScopesUsers(
std::
move(Arg.ValuesAtScopesUsers)),
13934 LoopDispositions(
std::
move(Arg.LoopDispositions)),
13935 LoopPropertiesCache(
std::
move(Arg.LoopPropertiesCache)),
13936 BlockDispositions(
std::
move(Arg.BlockDispositions)),
13937 SCEVUsers(
std::
move(Arg.SCEVUsers)),
13938 UnsignedRanges(
std::
move(Arg.UnsignedRanges)),
13939 SignedRanges(
std::
move(Arg.SignedRanges)),
13940 UniqueSCEVs(
std::
move(Arg.UniqueSCEVs)),
13941 UniquePreds(
std::
move(Arg.UniquePreds)),
13942 SCEVAllocator(
std::
move(Arg.SCEVAllocator)),
13943 LoopUsers(
std::
move(Arg.LoopUsers)),
13944 PredicatedSCEVRewrites(
std::
move(Arg.PredicatedSCEVRewrites)),
13945 FirstUnknown(Arg.FirstUnknown) {
13946 Arg.FirstUnknown =
nullptr;
13955 Tmp->~SCEVUnknown();
13957 FirstUnknown =
nullptr;
13959 ExprValueMap.clear();
13960 ValueExprMap.clear();
13962 BackedgeTakenCounts.clear();
13963 PredicatedBackedgeTakenCounts.clear();
13965 assert(PendingLoopPredicates.empty() &&
"isImpliedCond garbage");
13966 assert(PendingMerges.empty() &&
"isImpliedViaMerge garbage");
13967 assert(!WalkingBEDominatingConds &&
"isLoopBackedgeGuardedByCond garbage!");
13968 assert(!ProvingSplitPredicate &&
"ProvingSplitPredicate garbage!");
13990 L->getHeader()->printAsOperand(OS,
false);
13994 L->getExitingBlocks(ExitingBlocks);
13995 if (ExitingBlocks.
size() != 1)
13996 OS <<
"<multiple exits> ";
14000 OS <<
"backedge-taken count is ";
14003 OS <<
"Unpredictable backedge-taken count.";
14006 if (ExitingBlocks.
size() > 1)
14007 for (
BasicBlock *ExitingBlock : ExitingBlocks) {
14008 OS <<
" exit count for " << ExitingBlock->
getName() <<
": ";
14016 OS <<
"\n predicated exit count for " << ExitingBlock->
getName()
14019 OS <<
"\n Predicates:\n";
14020 for (
const auto *
P : Predicates)
14028 L->getHeader()->printAsOperand(OS,
false);
14033 OS <<
"constant max backedge-taken count is ";
14036 OS <<
", actual taken count either this or zero.";
14038 OS <<
"Unpredictable constant max backedge-taken count. ";
14043 L->getHeader()->printAsOperand(OS,
false);
14048 OS <<
"symbolic max backedge-taken count is ";
14051 OS <<
", actual taken count either this or zero.";
14053 OS <<
"Unpredictable symbolic max backedge-taken count. ";
14057 if (ExitingBlocks.
size() > 1)
14058 for (
BasicBlock *ExitingBlock : ExitingBlocks) {
14059 OS <<
" symbolic max exit count for " << ExitingBlock->
getName() <<
": ";
14069 OS <<
"\n predicated symbolic max exit count for "
14070 << ExitingBlock->
getName() <<
": ";
14072 OS <<
"\n Predicates:\n";
14073 for (
const auto *
P : Predicates)
14083 assert(!Preds.
empty() &&
"Different predicated BTC, but no predicates");
14085 L->getHeader()->printAsOperand(OS,
false);
14088 OS <<
"Predicated backedge-taken count is ";
14091 OS <<
"Unpredictable predicated backedge-taken count.";
14093 OS <<
" Predicates:\n";
14094 for (
const auto *
P : Preds)
14099 auto *PredConstantMax =
14101 if (PredConstantMax != ConstantBTC) {
14103 "different predicated constant max BTC but no predicates");
14105 L->getHeader()->printAsOperand(OS,
false);
14108 OS <<
"Predicated constant max backedge-taken count is ";
14111 OS <<
"Unpredictable predicated constant max backedge-taken count.";
14113 OS <<
" Predicates:\n";
14114 for (
const auto *
P : Preds)
14119 auto *PredSymbolicMax =
14121 if (SymbolicBTC != PredSymbolicMax) {
14123 "Different predicated symbolic max BTC, but no predicates");
14125 L->getHeader()->printAsOperand(OS,
false);
14128 OS <<
"Predicated symbolic max backedge-taken count is ";
14131 OS <<
"Unpredictable predicated symbolic max backedge-taken count.";
14133 OS <<
" Predicates:\n";
14134 for (
const auto *
P : Preds)
14140 L->getHeader()->printAsOperand(OS,
false);
14164 OS <<
"Computable";
14174 OS <<
"DoesNotDominate";
14180 OS <<
"ProperlyDominates";
14197 OS <<
"Classifying expressions for: ";
14198 F.printAsOperand(OS,
false);
14213 const Loop *L = LI.getLoopFor(
I.getParent());
14228 OS <<
"\t\t" "Exits: ";
14231 OS <<
"<<Unknown>>";
14237 for (
const auto *Iter = L; Iter; Iter = Iter->getParentLoop()) {
14239 Iter->getHeader()->printAsOperand(OS,
false);
14247 InnerL->getHeader()->printAsOperand(OS,
false);
14258 OS <<
"Determining loop execution counts for: ";
14259 F.printAsOperand(OS,
false);
14267 auto &Values = LoopDispositions[S];
14268 for (
auto &V : Values) {
14269 if (V.getPointer() == L)
14274 auto &Values2 = LoopDispositions[S];
14276 if (V.getPointer() == L) {
14285ScalarEvolution::computeLoopDisposition(
const SCEV *S,
const Loop *L) {
14304 assert(!L->contains(AR->
getLoop()) &&
"Containing loop's header does not"
14305 " dominate the contained loop's header?");
14333 bool HasVarying =
false;
14367 auto &Values = BlockDispositions[S];
14368 for (
auto &V : Values) {
14369 if (V.getPointer() == BB)
14374 auto &Values2 = BlockDispositions[S];
14376 if (V.getPointer() == BB) {
14385ScalarEvolution::computeBlockDisposition(
const SCEV *S,
const BasicBlock *BB) {
14415 bool Proper =
true;
14426 if (Instruction *
I =
14428 if (
I->getParent() == BB)
14430 if (DT.properlyDominates(
I->getParent(), BB))
14453void ScalarEvolution::forgetBackedgeTakenCounts(
const Loop *L,
14456 Predicated ? PredicatedBackedgeTakenCounts : BackedgeTakenCounts;
14457 auto It = BECounts.find(L);
14458 if (It != BECounts.end()) {
14459 for (
const ExitNotTakenInfo &ENT : It->second.ExitNotTaken) {
14460 for (
const SCEV *S : {ENT.ExactNotTaken, ENT.SymbolicMaxNotTaken}) {
14462 auto UserIt = BECountUsers.find(S);
14463 assert(UserIt != BECountUsers.end());
14468 BECounts.erase(It);
14476 while (!Worklist.
empty()) {
14478 auto Users = SCEVUsers.find(Curr);
14479 if (
Users != SCEVUsers.end())
14480 for (
const auto *User :
Users->second)
14481 if (ToForget.
insert(User).second)
14485 for (
const auto *S : ToForget)
14486 forgetMemoizedResultsImpl(S);
14488 for (
auto I = PredicatedSCEVRewrites.begin();
14489 I != PredicatedSCEVRewrites.end();) {
14490 std::pair<const SCEV *, const Loop *>
Entry =
I->first;
14491 if (ToForget.count(
Entry.first))
14492 PredicatedSCEVRewrites.erase(
I++);
14498void ScalarEvolution::forgetMemoizedResultsImpl(
const SCEV *S) {
14499 LoopDispositions.erase(S);
14500 BlockDispositions.erase(S);
14501 UnsignedRanges.erase(S);
14502 SignedRanges.erase(S);
14503 HasRecMap.erase(S);
14504 ConstantMultipleCache.erase(S);
14507 UnsignedWrapViaInductionTried.erase(AR);
14508 SignedWrapViaInductionTried.erase(AR);
14511 auto ExprIt = ExprValueMap.find(S);
14512 if (ExprIt != ExprValueMap.end()) {
14513 for (
Value *V : ExprIt->second) {
14514 auto ValueIt = ValueExprMap.find_as(V);
14515 if (ValueIt != ValueExprMap.end())
14516 ValueExprMap.erase(ValueIt);
14518 ExprValueMap.erase(ExprIt);
14521 auto ScopeIt = ValuesAtScopes.find(S);
14522 if (ScopeIt != ValuesAtScopes.end()) {
14523 for (
const auto &Pair : ScopeIt->second)
14526 std::make_pair(Pair.first, S));
14527 ValuesAtScopes.erase(ScopeIt);
14530 auto ScopeUserIt = ValuesAtScopesUsers.find(S);
14531 if (ScopeUserIt != ValuesAtScopesUsers.end()) {
14532 for (
const auto &Pair : ScopeUserIt->second)
14533 llvm::erase(ValuesAtScopes[Pair.second], std::make_pair(Pair.first, S));
14534 ValuesAtScopesUsers.erase(ScopeUserIt);
14537 auto BEUsersIt = BECountUsers.find(S);
14538 if (BEUsersIt != BECountUsers.end()) {
14540 auto Copy = BEUsersIt->second;
14541 for (
const auto &Pair : Copy)
14542 forgetBackedgeTakenCounts(Pair.getPointer(), Pair.getInt());
14543 BECountUsers.erase(BEUsersIt);
14546 auto FoldUser = FoldCacheUser.find(S);
14547 if (FoldUser != FoldCacheUser.end())
14548 for (
auto &KV : FoldUser->second)
14549 FoldCache.erase(KV);
14550 FoldCacheUser.erase(S);
14554ScalarEvolution::getUsedLoops(
const SCEV *S,
14556 struct FindUsedLoops {
14557 FindUsedLoops(SmallPtrSetImpl<const Loop *> &LoopsUsed)
14558 : LoopsUsed(LoopsUsed) {}
14559 SmallPtrSetImpl<const Loop *> &LoopsUsed;
14560 bool follow(
const SCEV *S) {
14566 bool isDone()
const {
return false; }
14569 FindUsedLoops
F(LoopsUsed);
14570 SCEVTraversal<FindUsedLoops>(F).visitAll(S);
14573void ScalarEvolution::getReachableBlocks(
14576 Worklist.
push_back(&F.getEntryBlock());
14577 while (!Worklist.
empty()) {
14579 if (!Reachable.
insert(BB).second)
14587 Worklist.
push_back(
C->isOne() ? TrueBB : FalseBB);
14594 if (isKnownPredicateViaConstantRanges(
Cmp->getCmpPredicate(), L, R)) {
14598 if (isKnownPredicateViaConstantRanges(
Cmp->getInverseCmpPredicate(), L,
14633 SCEVMapper SCM(SE2);
14635 SE2.getReachableBlocks(ReachableBlocks, F);
14637 auto GetDelta = [&](
const SCEV *Old,
const SCEV *New) ->
const SCEV * {
14655 while (!LoopStack.
empty()) {
14661 if (!ReachableBlocks.
contains(L->getHeader()))
14666 auto It = BackedgeTakenCounts.find(L);
14667 if (It == BackedgeTakenCounts.end())
14671 SCM.visit(It->second.getExact(L,
const_cast<ScalarEvolution *
>(
this)));
14691 const SCEV *Delta = GetDelta(CurBECount, NewBECount);
14692 if (Delta && !Delta->
isZero()) {
14693 dbgs() <<
"Trip Count for " << *L <<
" Changed!\n";
14694 dbgs() <<
"Old: " << *CurBECount <<
"\n";
14695 dbgs() <<
"New: " << *NewBECount <<
"\n";
14696 dbgs() <<
"Delta: " << *Delta <<
"\n";
14704 while (!Worklist.
empty()) {
14706 if (ValidLoops.
insert(L).second)
14707 Worklist.
append(L->begin(), L->end());
14709 for (
const auto &KV : ValueExprMap) {
14714 "AddRec references invalid loop");
14719 auto It = ExprValueMap.find(KV.second);
14720 if (It == ExprValueMap.end() || !It->second.contains(KV.first)) {
14721 dbgs() <<
"Value " << *KV.first
14722 <<
" is in ValueExprMap but not in ExprValueMap\n";
14727 if (!ReachableBlocks.
contains(
I->getParent()))
14729 const SCEV *OldSCEV = SCM.visit(KV.second);
14731 const SCEV *Delta = GetDelta(OldSCEV, NewSCEV);
14732 if (Delta && !Delta->
isZero()) {
14733 dbgs() <<
"SCEV for value " << *
I <<
" changed!\n"
14734 <<
"Old: " << *OldSCEV <<
"\n"
14735 <<
"New: " << *NewSCEV <<
"\n"
14736 <<
"Delta: " << *Delta <<
"\n";
14742 for (
const auto &KV : ExprValueMap) {
14743 for (
Value *V : KV.second) {
14744 const SCEV *S = ValueExprMap.lookup(V);
14746 dbgs() <<
"Value " << *V
14747 <<
" is in ExprValueMap but not in ValueExprMap\n";
14750 if (S != KV.first) {
14751 dbgs() <<
"Value " << *V <<
" mapped to " << *S <<
" rather than "
14752 << *KV.first <<
"\n";
14759 for (
const auto &S : UniqueSCEVs) {
14764 auto It = SCEVUsers.find(
Op);
14765 if (It != SCEVUsers.end() && It->second.count(&S))
14767 dbgs() <<
"Use of operand " << *
Op <<
" by user " << S
14768 <<
" is not being tracked!\n";
14774 for (
const auto &ValueAndVec : ValuesAtScopes) {
14776 for (
const auto &LoopAndValueAtScope : ValueAndVec.second) {
14777 const Loop *L = LoopAndValueAtScope.first;
14778 const SCEV *ValueAtScope = LoopAndValueAtScope.second;
14780 auto It = ValuesAtScopesUsers.find(ValueAtScope);
14781 if (It != ValuesAtScopesUsers.end() &&
14784 dbgs() <<
"Value: " << *
Value <<
", Loop: " << *L <<
", ValueAtScope: "
14785 << *ValueAtScope <<
" missing in ValuesAtScopesUsers\n";
14791 for (
const auto &ValueAtScopeAndVec : ValuesAtScopesUsers) {
14792 const SCEV *ValueAtScope = ValueAtScopeAndVec.first;
14793 for (
const auto &LoopAndValue : ValueAtScopeAndVec.second) {
14794 const Loop *L = LoopAndValue.first;
14795 const SCEV *
Value = LoopAndValue.second;
14797 auto It = ValuesAtScopes.find(
Value);
14798 if (It != ValuesAtScopes.end() &&
14799 is_contained(It->second, std::make_pair(L, ValueAtScope)))
14801 dbgs() <<
"Value: " << *
Value <<
", Loop: " << *L <<
", ValueAtScope: "
14802 << *ValueAtScope <<
" missing in ValuesAtScopes\n";
14808 auto VerifyBECountUsers = [&](
bool Predicated) {
14810 Predicated ? PredicatedBackedgeTakenCounts : BackedgeTakenCounts;
14811 for (
const auto &LoopAndBEInfo : BECounts) {
14812 for (
const ExitNotTakenInfo &ENT : LoopAndBEInfo.second.ExitNotTaken) {
14813 for (
const SCEV *S : {ENT.ExactNotTaken, ENT.SymbolicMaxNotTaken}) {
14815 auto UserIt = BECountUsers.find(S);
14816 if (UserIt != BECountUsers.end() &&
14817 UserIt->second.contains({ LoopAndBEInfo.first, Predicated }))
14819 dbgs() <<
"Value " << *S <<
" for loop " << *LoopAndBEInfo.first
14820 <<
" missing from BECountUsers\n";
14827 VerifyBECountUsers(
false);
14828 VerifyBECountUsers(
true);
14831 for (
auto &[S, Values] : LoopDispositions) {
14832 for (
auto [
Loop, CachedDisposition] : Values) {
14834 if (CachedDisposition != RecomputedDisposition) {
14835 dbgs() <<
"Cached disposition of " << *S <<
" for loop " << *
Loop
14836 <<
" is incorrect: cached " << CachedDisposition <<
", actual "
14837 << RecomputedDisposition <<
"\n";
14844 for (
auto &[S, Values] : BlockDispositions) {
14845 for (
auto [BB, CachedDisposition] : Values) {
14847 if (CachedDisposition != RecomputedDisposition) {
14848 dbgs() <<
"Cached disposition of " << *S <<
" for block %"
14849 << BB->
getName() <<
" is incorrect: cached " << CachedDisposition
14850 <<
", actual " << RecomputedDisposition <<
"\n";
14857 for (
auto [
FoldID, Expr] : FoldCache) {
14858 auto I = FoldCacheUser.find(Expr);
14859 if (
I == FoldCacheUser.end()) {
14860 dbgs() <<
"Missing entry in FoldCacheUser for cached expression " << *Expr
14865 dbgs() <<
"Missing FoldID in cached users of " << *Expr <<
"!\n";
14869 for (
auto [Expr, IDs] : FoldCacheUser) {
14870 for (
auto &
FoldID : IDs) {
14873 dbgs() <<
"Missing entry in FoldCache for expression " << *Expr
14878 dbgs() <<
"Entry in FoldCache doesn't match FoldCacheUser: " << *S
14879 <<
" != " << *Expr <<
"!\n";
14890 for (
auto [S, Multiple] : ConstantMultipleCache) {
14892 if ((Multiple != 0 && RecomputedMultiple != 0 &&
14893 Multiple.
urem(RecomputedMultiple) != 0 &&
14894 RecomputedMultiple.
urem(Multiple) != 0)) {
14895 dbgs() <<
"Incorrect cached computation in ConstantMultipleCache for "
14896 << *S <<
" : Computed " << RecomputedMultiple
14897 <<
" but cache contains " << Multiple <<
"!\n";
14905 FunctionAnalysisManager::Invalidator &Inv) {
14937 OS <<
"Printing analysis 'Scalar Evolution Analysis' for function '"
14938 <<
F.getName() <<
"':\n";
14944 "Scalar Evolution Analysis",
false,
true)
14993 const SCEV *LHS,
const SCEV *RHS) {
14995 assert(LHS->getType() == RHS->getType() &&
14996 "Type mismatch between LHS and RHS");
14999 ID.AddInteger(Pred);
15000 ID.AddPointer(LHS);
15001 ID.AddPointer(RHS);
15002 void *IP =
nullptr;
15003 if (
const auto *S = UniquePreds.FindNodeOrInsertPos(
ID, IP))
15007 UniquePreds.InsertNode(Eq, IP);
15018 ID.AddInteger(AddedFlags);
15019 void *IP =
nullptr;
15020 if (
const auto *S = UniquePreds.FindNodeOrInsertPos(
ID, IP))
15022 auto *OF =
new (SCEVAllocator)
15024 UniquePreds.InsertNode(OF, IP);
15044 SCEVPredicateRewriter
Rewriter(L, SE, NewPreds, Pred);
15045 return Rewriter.visit(S);
15051 for (
const auto *Pred : U->getPredicates())
15053 if (IPred->getLHS() == Expr &&
15055 return IPred->getRHS();
15057 if (IPred->getLHS() == Expr &&
15058 IPred->getPredicate() == ICmpInst::ICMP_EQ)
15059 return IPred->getRHS();
15062 return convertToAddRecWithPreds(Expr);
15065 const SCEV *visitZeroExtendExpr(
const SCEVZeroExtendExpr *Expr) {
15081 const SCEV *visitSignExtendExpr(
const SCEVSignExtendExpr *Expr) {
15098 explicit SCEVPredicateRewriter(
15099 const Loop *L, ScalarEvolution &SE,
15100 SmallVectorImpl<const SCEVPredicate *> *NewPreds,
15101 const SCEVPredicate *Pred)
15102 : SCEVRewriteVisitor(SE), NewPreds(NewPreds), Pred(Pred),
L(
L) {}
15104 bool addOverflowAssumption(
const SCEVPredicate *
P) {
15107 return Pred && Pred->
implies(
P, SE);
15113 bool addOverflowAssumption(
const SCEVAddRecExpr *AR,
15116 return addOverflowAssumption(
A);
15125 const SCEV *convertToAddRecWithPreds(
const SCEVUnknown *Expr) {
15129 std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>>
15131 if (!PredicatedRewrite)
15133 for (
const auto *
P : PredicatedRewrite->second){
15136 if (L != WP->getExpr()->getLoop())
15139 if (!addOverflowAssumption(
P))
15142 return PredicatedRewrite->first;
15145 SmallVectorImpl<const SCEVPredicate *> *NewPreds;
15146 const SCEVPredicate *Pred;
15155 return SCEVPredicateRewriter::rewrite(S, L, *
this,
nullptr, &Preds);
15162 S = SCEVPredicateRewriter::rewrite(S, L, *
this, &TransformPreds,
nullptr);
15182 if (!Step->
isOne())
15207 assert(LHS->getType() == RHS->getType() &&
"LHS and RHS types don't match");
15208 assert(LHS != RHS &&
"LHS and RHS are the same SCEV");
15221 return Op->LHS == LHS &&
Op->RHS == RHS;
15228 OS.
indent(
Depth) <<
"Equal predicate: " << *LHS <<
" == " << *RHS <<
"\n";
15230 OS.
indent(
Depth) <<
"Compare predicate: " << *LHS <<
" " << Pred <<
") "
15255 const SCEV *Start = AR->getStart();
15256 const SCEV *OpStart =
Op->AR->getStart();
15261 if (Start->getType()->isPointerTy() && Start->getType() != OpStart->
getType())
15264 const SCEV *Step = AR->getStepRecurrence(SE);
15265 const SCEV *OpStep =
Op->AR->getStepRecurrence(SE);
15318 if (Step->getValue()->getValue().isNonNegative())
15322 return ImpliedFlags;
15329 for (
const auto *
P : Preds)
15342 return this->implies(I, SE);
15350 for (
const auto *Pred : Preds)
15351 Pred->print(OS,
Depth);
15356 for (
const auto *Pred : Set->Preds)
15364 bool CheckImplies = Preds.
size() < 16;
15367 if (CheckImplies &&
implies(
N, SE))
15373 for (
auto *
P : Preds) {
15374 if (CheckImplies &&
N->implies(
P, SE))
15378 Preds = std::move(PrunedPreds);
15379 Preds.push_back(
N);
15386 Preds = std::make_unique<SCEVUnionPredicate>(
Empty, SE);
15391 for (
const auto *
Op :
Ops)
15396 SCEVUsers[
Op].insert(
User);
15405 SCEVUsers[
Op].insert(
User);
15409 const SCEV *Expr = SE.getSCEV(V);
15414 RewriteEntry &Entry = RewriteMap[Expr];
15417 if (Entry.second && Generation == Entry.first)
15418 return Entry.second;
15423 Expr = Entry.second;
15425 const SCEV *NewSCEV = SE.rewriteUsingPredicate(Expr, &L, *Preds);
15426 Entry = {Generation, NewSCEV};
15432 if (!BackedgeCount) {
15434 BackedgeCount = SE.getPredicatedBackedgeTakenCount(&L, Preds);
15435 for (
const auto *
P : Preds)
15438 return BackedgeCount;
15442 if (!SymbolicMaxBackedgeCount) {
15444 SymbolicMaxBackedgeCount =
15445 SE.getPredicatedSymbolicMaxBackedgeTakenCount(&L, Preds);
15446 for (
const auto *
P : Preds)
15449 return SymbolicMaxBackedgeCount;
15453 if (!SmallConstantMaxTripCount) {
15455 SmallConstantMaxTripCount = SE.getSmallConstantMaxTripCount(&L, &Preds);
15456 for (
const auto *
P : Preds)
15459 return *SmallConstantMaxTripCount;
15463 if (Preds->implies(&Pred, SE))
15468 Preds = std::make_unique<SCEVUnionPredicate>(NewPreds, SE);
15469 updateGeneration();
15476void PredicatedScalarEvolution::updateGeneration() {
15478 if (++Generation == 0) {
15479 for (
auto &
II : RewriteMap) {
15480 const SCEV *Rewritten =
II.second.second;
15497 auto II = FlagsMap.insert({V, Flags});
15510 auto II = FlagsMap.find(V);
15512 if (
II != FlagsMap.end())
15521 auto *New = SE.convertSCEVToAddRecWithPredicates(Expr, &L, NewPreds);
15526 for (
const auto *
P : NewPreds)
15529 RewriteMap[SE.getSCEV(V)] = {Generation, New};
15535 : RewriteMap(
Init.RewriteMap), SE(
Init.SE), L(
Init.L),
15538 Generation(
Init.Generation), BackedgeCount(
Init.BackedgeCount) {
15539 for (
auto I :
Init.FlagsMap)
15540 FlagsMap.insert(
I);
15545 for (
auto *BB : L.getBlocks())
15546 for (
auto &
I : *BB) {
15547 if (!SE.isSCEVable(
I.getType()))
15550 auto *Expr = SE.getSCEV(&
I);
15551 auto II = RewriteMap.find(Expr);
15553 if (
II == RewriteMap.end())
15557 if (
II->second.second == Expr)
15562 OS.
indent(
Depth + 2) <<
"--> " << *
II->second.second <<
"\n";
15570 LoopGuards Guards(SE);
15578void ScalarEvolution::LoopGuards::collectFromPHI(
15586 using MinMaxPattern = std::pair<const SCEVConstant *, SCEVTypes>;
15587 auto GetMinMaxConst = [&](
unsigned IncomingIdx) -> MinMaxPattern {
15601 auto &RewriteMap =
G->second.RewriteMap;
15602 if (RewriteMap.empty())
15604 auto S = RewriteMap.find(SE.
getSCEV(
Phi.getIncomingValue(IncomingIdx)));
15605 if (S == RewriteMap.end())
15611 return {C0,
SM->getSCEVType()};
15614 auto MergeMinMaxConst = [](MinMaxPattern
P1,
15615 MinMaxPattern
P2) -> MinMaxPattern {
15616 auto [C1,
T1] =
P1;
15617 auto [C2, T2] =
P2;
15618 if (!C1 || !C2 ||
T1 != T2)
15622 return {C1->getAPInt().
ult(C2->getAPInt()) ? C1 : C2,
T1};
15624 return {C1->getAPInt().
slt(C2->getAPInt()) ? C1 : C2,
T1};
15626 return {C1->getAPInt().
ugt(C2->getAPInt()) ? C1 : C2,
T1};
15628 return {C1->getAPInt().
sgt(C2->getAPInt()) ? C1 : C2,
T1};
15633 auto P = GetMinMaxConst(0);
15634 for (
unsigned int In = 1;
In <
Phi.getNumIncomingValues();
In++) {
15637 P = MergeMinMaxConst(
P, GetMinMaxConst(In));
15640 const SCEV *
LHS = SE.
getSCEV(
const_cast<PHINode *
>(&Phi));
15643 Guards.RewriteMap.insert({
LHS,
RHS});
15651 const APInt &DivisorVal,
15653 const APInt *ExprVal;
15666 const APInt &DivisorVal,
15668 const APInt *ExprVal;
15676 return SE.
getConstant(*ExprVal + DivisorVal - Rem);
15690 const SCEV *URemRHS =
nullptr;
15694 const SCEV *Multiple =
15696 DivInfo[URemLHS] = Multiple;
15698 Multiples[URemLHS] =
C->getAPInt();
15718 auto IsMinMaxSCEVWithNonNegativeConstant =
15722 if (
MinMax->getNumOperands() != 2)
15725 if (
C->getAPInt().isNegative())
15727 SCTy =
MinMax->getSCEVType();
15736 const SCEV *MinMaxLHS =
nullptr, *MinMaxRHS =
nullptr;
15738 if (!IsMinMaxSCEVWithNonNegativeConstant(MinMaxExpr, SCTy, MinMaxLHS,
15743 auto *DivisibleExpr =
15751void ScalarEvolution::LoopGuards::collectFromBlock(
15753 const BasicBlock *
Block,
const BasicBlock *Pred,
15761 DenseMap<const SCEV *, const SCEV *> &RewriteMap,
15772 &ExprsToRewrite]() {
15773 const SCEVConstant *C1;
15786 if (ExactRegion.isWrappedSet() || ExactRegion.isFullSet())
15788 auto [
I,
Inserted] = RewriteMap.try_emplace(LHSUnknown);
15789 const SCEV *RewrittenLHS =
Inserted ? LHSUnknown :
I->second;
15797 if (MatchRangeCheckIdiom())
15814 auto AddRewrite = [&](
const SCEV *From,
const SCEV *FromRewritten,
15816 if (From == FromRewritten)
15818 RewriteMap[From] = To;
15824 auto GetMaybeRewritten = [&](
const SCEV *S) {
15825 return RewriteMap.lookup_or(S, S);
15828 const SCEV *RewrittenLHS = GetMaybeRewritten(
LHS);
15830 const APInt &DividesBy =
15845 switch (Predicate) {
15874 SmallPtrSet<const SCEV *, 16> Visited;
15876 auto EnqueueOperands = [&Worklist](
const SCEVNAryExpr *S) {
15880 while (!Worklist.
empty()) {
15884 if (!Visited.
insert(From).second)
15886 const SCEV *FromRewritten = GetMaybeRewritten(From);
15887 const SCEV *To =
nullptr;
15889 switch (Predicate) {
15894 EnqueueOperands(
UMax);
15900 EnqueueOperands(
SMax);
15906 EnqueueOperands(
UMin);
15912 EnqueueOperands(
SMin);
15920 const SCEV *OneAlignedUp =
15922 To = SE.
getUMaxExpr(FromRewritten, OneAlignedUp);
15934 const SCEVConstant *
C;
15943 Guards.NotEqual.insert({
LHS,
RHS});
15952 AddRewrite(From, FromRewritten, To);
15969 SE.F.
getParent(), Intrinsic::experimental_guard);
15971 for (
const auto *GU : GuardDecl->users())
15973 if (Guard->getFunction() ==
Block->getParent() &&
15982 unsigned NumCollectedConditions = 0;
15984 std::pair<const BasicBlock *, const BasicBlock *> Pair(Pred,
Block);
15986 Pair = SE.getPredecessorWithUniqueSuccessorForBB(Pair.first)) {
15988 const CondBrInst *LoopEntryPredicate =
15990 if (!LoopEntryPredicate)
15995 NumCollectedConditions++;
15999 if (
Depth > 0 && NumCollectedConditions == 2)
16007 if (Pair.second->hasNPredecessorsOrMore(2) &&
16009 SmallDenseMap<const BasicBlock *, LoopGuards> IncomingGuards;
16010 for (
auto &Phi : Pair.second->phis())
16021 for (
auto [Term, EnterIfTrue] :
reverse(Terms)) {
16022 SmallVector<Value *, 8> Worklist;
16023 SmallPtrSet<Value *, 8> Visited;
16025 while (!Worklist.
empty()) {
16032 EnterIfTrue ?
Cmp->getPredicate() :
Cmp->getInversePredicate();
16056 DenseMap<const SCEV *, APInt> Multiples;
16058 for (
const auto &[Predicate,
LHS,
RHS] : GuardsToProcess) {
16065 for (
const auto &[Predicate,
LHS,
RHS] : GuardsToProcess)
16066 CollectCondition(Predicate,
LHS,
RHS, Guards.RewriteMap, DivGuards);
16070 for (
const auto &[K, Divisor] : Multiples) {
16071 const SCEV *DivisorSCEV = SE.
getConstant(Divisor);
16072 Guards.RewriteMap[
K] =
16074 Guards.
rewrite(K), Divisor, SE),
16083 Guards.PreserveNUW =
true;
16084 Guards.PreserveNSW =
true;
16085 for (
const SCEV *Expr : ExprsToRewrite) {
16086 const SCEV *RewriteTo = Guards.RewriteMap[Expr];
16087 Guards.PreserveNUW &=
16089 Guards.PreserveNSW &=
16096 if (ExprsToRewrite.size() > 1) {
16097 for (
const SCEV *Expr : ExprsToRewrite) {
16098 const SCEV *RewriteTo = Guards.RewriteMap[Expr];
16099 Guards.RewriteMap.erase(Expr);
16100 Guards.RewriteMap.insert({Expr, Guards.
rewrite(RewriteTo)});
16109 class SCEVLoopGuardRewriter
16120 NotEqual(Guards.NotEqual) {
16121 if (Guards.PreserveNUW)
16123 if (Guards.PreserveNSW)
16130 return Map.lookup_or(Expr, Expr);
16134 if (
const SCEV *S = Map.lookup(Expr))
16141 unsigned Bitwidth = Ty->getScalarSizeInBits() / 2;
16142 while (Bitwidth % 8 == 0 && Bitwidth >= 8 &&
16143 Bitwidth >
Op->getType()->getScalarSizeInBits()) {
16145 auto *NarrowExt = SE.getZeroExtendExpr(
Op, NarrowTy);
16146 if (
const SCEV *S = Map.lookup(NarrowExt))
16147 return SE.getZeroExtendExpr(S, Ty);
16148 Bitwidth = Bitwidth / 2;
16156 if (
const SCEV *S = Map.lookup(Expr))
16163 if (
const SCEV *S = Map.lookup(Expr))
16169 if (
const SCEV *S = Map.lookup(Expr))
16177 auto RewriteSubtraction = [&](
const SCEV *S) ->
const SCEV * {
16182 if (NotEqual.contains({LHS, RHS})) {
16184 SE.getOne(S->
getType()), SE.getConstantMultiple(S), SE);
16185 return SE.getUMaxExpr(OneAlignedUp, S);
16192 if (
const SCEV *Rewritten = RewriteSubtraction(Expr))
16203 if (
const SCEV *Rewritten = RewriteSubtraction(
Add))
16204 return SE.getAddExpr(
16207 if (
const SCEV *S = Map.lookup(
Add))
16208 return SE.getAddExpr(Expr->
getOperand(0), S);
16220 : SE.getAddExpr(Operands,
16236 : SE.getMulExpr(Operands,
16242 if (RewriteMap.empty() && NotEqual.empty())
16245 SCEVLoopGuardRewriter
Rewriter(SE, *
this);
16246 return Rewriter.visit(Expr);
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Expand Atomic instructions
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
#define LLVM_DUMP_METHOD
Mark debug helper function definitions like dump() that should not be stripped from debug builds.
This file contains the declarations for the subclasses of Constant, which represent the different fla...
SmallPtrSet< const BasicBlock *, 8 > VisitedBlocks
This file defines the DenseMap class.
This file builds on the ADT/GraphTraits.h file to build generic depth first graph iterator.
This file defines a hash set that can be used to remove duplication of nodes in a graph.
Value * getPointer(Value *Ptr)
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
This defines the Use class.
iv Induction Variable Users
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
static bool isZero(Value *V, const DataLayout &DL, DominatorTree *DT, AssumptionCache *AC)
static constexpr unsigned SM(unsigned Version)
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t IntrinsicInst * II
PowerPC Reduce CR logical Operation
#define INITIALIZE_PASS_DEPENDENCY(depName)
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
const SmallVectorImpl< MachineOperand > & Cond
static DominatorTree getDomTree(Function &F)
static bool isValid(const char C)
Returns true if C is a valid mangled character: <0-9a-zA-Z_>.
SI optimize exec mask operations pre RA
static void visit(BasicBlock &Start, std::function< bool(BasicBlock *)> op)
This file provides utility classes that use RAII to save and restore values.
bool SCEVMinMaxExprContains(const SCEV *Root, const SCEV *OperandToFind, SCEVTypes RootKind)
static cl::opt< unsigned > MaxAddRecSize("scalar-evolution-max-add-rec-size", cl::Hidden, cl::desc("Max coefficients in AddRec during evolving"), cl::init(8))
static cl::opt< unsigned > RangeIterThreshold("scev-range-iter-threshold", cl::Hidden, cl::desc("Threshold for switching to iteratively computing SCEV ranges"), cl::init(32))
static const Loop * isIntegerLoopHeaderPHI(const PHINode *PN, LoopInfo &LI)
static unsigned getConstantTripCount(const SCEVConstant *ExitCount)
static int CompareValueComplexity(const LoopInfo *const LI, Value *LV, Value *RV, unsigned Depth)
Compare the two values LV and RV in terms of their "complexity" where "complexity" is a partial (and ...
static const SCEV * getNextSCEVDivisibleByDivisor(const SCEV *Expr, const APInt &DivisorVal, ScalarEvolution &SE)
static void PushLoopPHIs(const Loop *L, SmallVectorImpl< Instruction * > &Worklist, SmallPtrSetImpl< Instruction * > &Visited)
Push PHI nodes in the header of the given loop onto the given Worklist.
static void insertFoldCacheEntry(const ScalarEvolution::FoldID &ID, const SCEV *S, DenseMap< ScalarEvolution::FoldID, const SCEV * > &FoldCache, DenseMap< const SCEV *, SmallVector< ScalarEvolution::FoldID, 2 > > &FoldCacheUser)
static cl::opt< bool > ClassifyExpressions("scalar-evolution-classify-expressions", cl::Hidden, cl::init(true), cl::desc("When printing analysis, include information on every instruction"))
static bool hasHugeExpression(ArrayRef< SCEVUse > Ops)
Returns true if Ops contains a huge SCEV (the subtree of S contains at least HugeExprThreshold nodes)...
static bool CanConstantFold(const Instruction *I)
Return true if we can constant fold an instruction of the specified type, assuming that all operands ...
static cl::opt< unsigned > AddOpsInlineThreshold("scev-addops-inline-threshold", cl::Hidden, cl::desc("Threshold for inlining addition operands into a SCEV"), cl::init(500))
static cl::opt< unsigned > MaxLoopGuardCollectionDepth("scalar-evolution-max-loop-guard-collection-depth", cl::Hidden, cl::desc("Maximum depth for recursive loop guard collection"), cl::init(1))
static cl::opt< bool > VerifyIR("scev-verify-ir", cl::Hidden, cl::desc("Verify IR correctness when making sensitive SCEV queries (slow)"), cl::init(false))
static bool RangeRefPHIAllowedOperands(DominatorTree &DT, PHINode *PHI)
static const SCEV * getPreStartForExtend(const SCEVAddRecExpr *AR, Type *Ty, ScalarEvolution *SE, unsigned Depth)
static std::optional< APInt > MinOptional(std::optional< APInt > X, std::optional< APInt > Y)
Helper function to compare optional APInts: (a) if X and Y both exist, return min(X,...
static cl::opt< unsigned > MulOpsInlineThreshold("scev-mulops-inline-threshold", cl::Hidden, cl::desc("Threshold for inlining multiplication operands into a SCEV"), cl::init(32))
static BinaryOperator * getCommonInstForPHI(PHINode *PN)
static bool isDivisibilityGuard(const SCEV *LHS, const SCEV *RHS, ScalarEvolution &SE)
static std::optional< const SCEV * > createNodeForSelectViaUMinSeq(ScalarEvolution *SE, const SCEV *CondExpr, const SCEV *TrueExpr, const SCEV *FalseExpr)
static Constant * BuildConstantFromSCEV(const SCEV *V)
This builds up a Constant using the ConstantExpr interface.
static ConstantInt * EvaluateConstantChrecAtConstant(const SCEVAddRecExpr *AddRec, ConstantInt *C, ScalarEvolution &SE)
static const SCEV * BinomialCoefficient(const SCEV *It, unsigned K, ScalarEvolution &SE, Type *ResultTy)
Compute BC(It, K). The result has width W. Assume, K > 0.
static cl::opt< unsigned > MaxCastDepth("scalar-evolution-max-cast-depth", cl::Hidden, cl::desc("Maximum depth of recursive SExt/ZExt/Trunc"), cl::init(8))
static bool IsMinMaxConsistingOf(const SCEV *MaybeMinMaxExpr, const SCEV *Candidate)
Is MaybeMinMaxExpr an (U|S)(Min|Max) of Candidate and some other values?
static PHINode * getConstantEvolvingPHI(Value *V, const Loop *L)
getConstantEvolvingPHI - Given an LLVM value and a loop, return a PHI node in the loop that V is deri...
static const SCEV * SolveLinEquationWithOverflow(const APInt &A, const SCEV *B, SmallVectorImpl< const SCEVPredicate * > *Predicates, ScalarEvolution &SE, const Loop *L)
Finds the minimum unsigned root of the following equation:
static cl::opt< unsigned > MaxBruteForceIterations("scalar-evolution-max-iterations", cl::ReallyHidden, cl::desc("Maximum number of iterations SCEV will " "symbolically execute a constant " "derived loop"), cl::init(100))
static uint64_t umul_ov(uint64_t i, uint64_t j, bool &Overflow)
static void PrintSCEVWithTypeHint(raw_ostream &OS, const SCEV *S)
When printing a top-level SCEV for trip counts, it's helpful to include a type for constants which ar...
static void PrintLoopInfo(raw_ostream &OS, ScalarEvolution *SE, const Loop *L)
static SCEV::NoWrapFlags StrengthenNoWrapFlags(ScalarEvolution *SE, SCEVTypes Type, ArrayRef< SCEVUse > Ops, SCEV::NoWrapFlags Flags)
static bool containsConstantInAddMulChain(const SCEV *StartExpr)
Determine if any of the operands in this SCEV are a constant or if any of the add or multiply express...
static const SCEV * getExtendAddRecStart(const SCEVAddRecExpr *AR, Type *Ty, ScalarEvolution *SE, unsigned Depth)
static bool CollectAddOperandsWithScales(SmallDenseMap< SCEVUse, APInt, 16 > &M, SmallVectorImpl< SCEVUse > &NewOps, APInt &AccumulatedConstant, ArrayRef< SCEVUse > Ops, const APInt &Scale, ScalarEvolution &SE)
Process the given Ops list, which is a list of operands to be added under the given scale,...
static const SCEV * constantFoldAndGroupOps(ScalarEvolution &SE, LoopInfo &LI, DominatorTree &DT, SmallVectorImpl< SCEVUse > &Ops, FoldT Fold, IsIdentityT IsIdentity, IsAbsorberT IsAbsorber)
Performs a number of common optimizations on the passed Ops.
static cl::opt< unsigned > MaxPhiSCCAnalysisSize("scalar-evolution-max-scc-analysis-depth", cl::Hidden, cl::desc("Maximum amount of nodes to process while searching SCEVUnknown " "Phi strongly connected components"), cl::init(8))
static bool IsKnownPredicateViaAddRecStart(ScalarEvolution &SE, CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
static void GroupByComplexity(SmallVectorImpl< SCEVUse > &Ops, LoopInfo *LI, DominatorTree &DT)
Given a list of SCEV objects, order them by their complexity, and group objects of the same complexit...
static bool collectDivisibilityInformation(ICmpInst::Predicate Predicate, const SCEV *LHS, const SCEV *RHS, DenseMap< const SCEV *, const SCEV * > &DivInfo, DenseMap< const SCEV *, APInt > &Multiples, ScalarEvolution &SE)
static cl::opt< unsigned > MaxSCEVOperationsImplicationDepth("scalar-evolution-max-scev-operations-implication-depth", cl::Hidden, cl::desc("Maximum depth of recursive SCEV operations implication analysis"), cl::init(2))
static void PushDefUseChildren(Instruction *I, SmallVectorImpl< Instruction * > &Worklist, SmallPtrSetImpl< Instruction * > &Visited)
Push users of the given Instruction onto the given Worklist.
static std::optional< APInt > SolveQuadraticAddRecRange(const SCEVAddRecExpr *AddRec, const ConstantRange &Range, ScalarEvolution &SE)
Let c(n) be the value of the quadratic chrec {0,+,M,+,N} after n iterations.
static cl::opt< bool > UseContextForNoWrapFlagInference("scalar-evolution-use-context-for-no-wrap-flag-strenghening", cl::Hidden, cl::desc("Infer nuw/nsw flags using context where suitable"), cl::init(true))
static cl::opt< bool > EnableFiniteLoopControl("scalar-evolution-finite-loop", cl::Hidden, cl::desc("Handle <= and >= in finite loops"), cl::init(true))
static bool getOperandsForSelectLikePHI(DominatorTree &DT, PHINode *PN, Value *&Cond, Value *&LHS, Value *&RHS)
static std::optional< std::tuple< APInt, APInt, APInt, APInt, unsigned > > GetQuadraticEquation(const SCEVAddRecExpr *AddRec)
For a given quadratic addrec, generate coefficients of the corresponding quadratic equation,...
static bool isKnownPredicateExtendIdiom(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
static std::optional< BinaryOp > MatchBinaryOp(Value *V, const DataLayout &DL, AssumptionCache &AC, const DominatorTree &DT, const Instruction *CxtI)
Try to map V into a BinaryOp, and return std::nullopt on failure.
static std::optional< APInt > SolveQuadraticAddRecExact(const SCEVAddRecExpr *AddRec, ScalarEvolution &SE)
Let c(n) be the value of the quadratic chrec {L,+,M,+,N} after n iterations.
static std::optional< APInt > TruncIfPossible(std::optional< APInt > X, unsigned BitWidth)
Helper function to truncate an optional APInt to a given BitWidth.
static cl::opt< unsigned > MaxSCEVCompareDepth("scalar-evolution-max-scev-compare-depth", cl::Hidden, cl::desc("Maximum depth of recursive SCEV complexity comparisons"), cl::init(32))
static APInt extractConstantWithoutWrapping(ScalarEvolution &SE, const SCEVConstant *ConstantTerm, const SCEVAddExpr *WholeAddExpr)
static cl::opt< unsigned > MaxConstantEvolvingDepth("scalar-evolution-max-constant-evolving-depth", cl::Hidden, cl::desc("Maximum depth of recursive constant evolving"), cl::init(32))
static ConstantRange getRangeForAffineARHelper(APInt Step, const ConstantRange &StartRange, const APInt &MaxBECount, bool Signed)
static bool MatchBinarySub(const SCEV *S, SCEVUse &LHS, SCEVUse &RHS)
static std::optional< ConstantRange > GetRangeFromMetadata(Value *V)
Helper method to assign a range to V from metadata present in the IR.
static cl::opt< unsigned > HugeExprThreshold("scalar-evolution-huge-expr-threshold", cl::Hidden, cl::desc("Size of the expression which is considered huge"), cl::init(4096))
static Type * isSimpleCastedPHI(const SCEV *Op, const SCEVUnknown *SymbolicPHI, bool &Signed, ScalarEvolution &SE)
Helper function to createAddRecFromPHIWithCasts.
static Constant * EvaluateExpression(Value *V, const Loop *L, DenseMap< Instruction *, Constant * > &Vals, const DataLayout &DL, const TargetLibraryInfo *TLI)
EvaluateExpression - Given an expression that passes the getConstantEvolvingPHI predicate,...
static const SCEV * getPreviousSCEVDivisibleByDivisor(const SCEV *Expr, const APInt &DivisorVal, ScalarEvolution &SE)
static const SCEV * MatchNotExpr(const SCEV *Expr)
If Expr computes ~A, return A else return nullptr.
static cl::opt< unsigned > MaxValueCompareDepth("scalar-evolution-max-value-compare-depth", cl::Hidden, cl::desc("Maximum depth of recursive value complexity comparisons"), cl::init(2))
static const SCEV * applyDivisibilityOnMinMaxExpr(const SCEV *MinMaxExpr, APInt Divisor, ScalarEvolution &SE)
static cl::opt< bool, true > VerifySCEVOpt("verify-scev", cl::Hidden, cl::location(VerifySCEV), cl::desc("Verify ScalarEvolution's backedge taken counts (slow)"))
static const SCEV * getSignedOverflowLimitForStep(const SCEV *Step, ICmpInst::Predicate *Pred, ScalarEvolution *SE)
static cl::opt< unsigned > MaxArithDepth("scalar-evolution-max-arith-depth", cl::Hidden, cl::desc("Maximum depth of recursive arithmetics"), cl::init(32))
static bool HasSameValue(const SCEV *A, const SCEV *B)
SCEV structural equivalence is usually sufficient for testing whether two expressions are equal,...
static uint64_t Choose(uint64_t n, uint64_t k, bool &Overflow)
Compute the result of "n choose k", the binomial coefficient.
static std::optional< int > CompareSCEVComplexity(const LoopInfo *const LI, const SCEV *LHS, const SCEV *RHS, DominatorTree &DT, unsigned Depth=0)
static bool canConstantEvolve(Instruction *I, const Loop *L)
Determine whether this instruction can constant evolve within this loop assuming its operands can all...
static PHINode * getConstantEvolvingPHIOperands(Instruction *UseInst, const Loop *L, DenseMap< Instruction *, PHINode * > &PHIMap, unsigned Depth)
getConstantEvolvingPHIOperands - Implement getConstantEvolvingPHI by recursing through each instructi...
static bool scevUnconditionallyPropagatesPoisonFromOperands(SCEVTypes Kind)
static cl::opt< bool > VerifySCEVStrict("verify-scev-strict", cl::Hidden, cl::desc("Enable stricter verification with -verify-scev is passed"))
static Constant * getOtherIncomingValue(PHINode *PN, BasicBlock *BB)
static cl::opt< bool > UseExpensiveRangeSharpening("scalar-evolution-use-expensive-range-sharpening", cl::Hidden, cl::init(false), cl::desc("Use more powerful methods of sharpening expression ranges. May " "be costly in terms of compile time"))
static const SCEV * getUnsignedOverflowLimitForStep(const SCEV *Step, ICmpInst::Predicate *Pred, ScalarEvolution *SE)
static bool IsKnownPredicateViaMinOrMax(ScalarEvolution &SE, CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
Is LHS Pred RHS true on the virtue of LHS or RHS being a Min or Max expression?
static bool BrPHIToSelect(DominatorTree &DT, CondBrInst *BI, PHINode *Merge, Value *&C, Value *&LHS, Value *&RHS)
This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...
static bool InBlock(const Value *V, const BasicBlock *BB)
Provides some synthesis utilities to produce sequences of values.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static SymbolRef::Type getType(const Symbol *Sym)
LocallyHashedType DenseMapInfo< LocallyHashedType >::Empty
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
static std::optional< bool > isImpliedCondOperands(CmpInst::Predicate Pred, const Value *ALHS, const Value *ARHS, const Value *BLHS, const Value *BRHS)
Return true if "icmp Pred BLHS BRHS" is true whenever "icmp PredALHS ARHS" is true.
Virtual Register Rewriter
static const uint32_t IV[8]
SCEVCastSinkingRewriter(ScalarEvolution &SE, Type *TargetTy, ConversionFn CreatePtrCast)
static const SCEV * rewrite(const SCEV *Scev, ScalarEvolution &SE, Type *TargetTy, ConversionFn CreatePtrCast)
const SCEV * visitUnknown(const SCEVUnknown *Expr)
const SCEV * visitMulExpr(const SCEVMulExpr *Expr)
const SCEV * visitAddExpr(const SCEVAddExpr *Expr)
const SCEV * visit(const SCEV *S)
Class for arbitrary precision integers.
LLVM_ABI APInt umul_ov(const APInt &RHS, bool &Overflow) const
LLVM_ABI APInt zext(unsigned width) const
Zero extend to a new width.
bool isMinSignedValue() const
Determine if this is the smallest signed value.
uint64_t getZExtValue() const
Get zero extended value.
void setHighBits(unsigned hiBits)
Set the top hiBits bits.
LLVM_ABI APInt getHiBits(unsigned numBits) const
Compute an APInt containing numBits highbits from this APInt.
unsigned getActiveBits() const
Compute the number of active bits in the value.
LLVM_ABI APInt trunc(unsigned width) const
Truncate to new width.
static APInt getMaxValue(unsigned numBits)
Gets maximum unsigned value of APInt for specific bit width.
APInt abs() const
Get the absolute value.
bool sgt(const APInt &RHS) const
Signed greater than comparison.
bool isAllOnes() const
Determine if all bits are set. This is true for zero-width values.
bool ugt(const APInt &RHS) const
Unsigned greater than comparison.
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
bool isSignMask() const
Check if the APInt's value is returned by getSignMask.
LLVM_ABI APInt urem(const APInt &RHS) const
Unsigned remainder operation.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool ult(const APInt &RHS) const
Unsigned less than comparison.
static APInt getSignedMaxValue(unsigned numBits)
Gets maximum signed value of APInt for a specific bit width.
static APInt getMinValue(unsigned numBits)
Gets minimum unsigned value of APInt for a specific bit width.
bool isNegative() const
Determine sign of this APInt.
bool sle(const APInt &RHS) const
Signed less or equal comparison.
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
bool isNonPositive() const
Determine if this APInt Value is non-positive (<= 0).
unsigned countTrailingZeros() const
bool isStrictlyPositive() const
Determine if this APInt Value is positive.
unsigned logBase2() const
APInt ashr(unsigned ShiftAmt) const
Arithmetic right-shift function.
LLVM_ABI APInt multiplicativeInverse() const
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
LLVM_ABI APInt sext(unsigned width) const
Sign extend to a new width.
APInt shl(unsigned shiftAmt) const
Left-shift function.
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
bool isSignBitSet() const
Determine if sign bit of this APInt is set.
bool slt(const APInt &RHS) const
Signed less than comparison.
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
bool isIntN(unsigned N) const
Check if this APInt has an N-bits unsigned integer value.
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
This templated class represents "all analyses that operate over <aparticular IR unit>" (e....
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Represent the analysis usage information of a pass.
void setPreservesAll()
Set by analyses that do not transform their input at all.
AnalysisUsage & addRequiredTransitive()
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
A function analysis which provides an AssumptionCache.
An immutable pass that tracks lazily created AssumptionCache objects.
A cache of @llvm.assume calls within a function.
MutableArrayRef< WeakVH > assumptions()
Access the list of assumption handles currently tracked for this function.
LLVM Basic Block Representation.
iterator begin()
Instruction iterator methods.
const Function * getParent() const
Return the enclosing method, or null if none.
LLVM_ABI const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
const Instruction & front() const
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
LLVM_ABI unsigned getNoWrapKind() const
Returns one of OBO::NoSignedWrap or OBO::NoUnsignedWrap.
LLVM_ABI Instruction::BinaryOps getBinaryOp() const
Returns the binary operation underlying the intrinsic.
BinaryOps getOpcode() const
This class represents a function call, abstracting a target machine's calling convention.
virtual void deleted()
Callback for Value destruction.
bool isFalseWhenEqual() const
This is just a convenience.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ ICMP_UGE
unsigned greater or equal
@ ICMP_UGT
unsigned greater than
@ ICMP_SGT
signed greater than
@ ICMP_ULT
unsigned less than
@ ICMP_SGE
signed greater or equal
@ ICMP_ULE
unsigned less or equal
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
bool isTrueWhenEqual() const
This is just a convenience.
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
bool isRelational() const
Return true if the predicate is relational (not EQ or NE).
An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...
static LLVM_ABI std::optional< CmpPredicate > getMatching(CmpPredicate A, CmpPredicate B)
Compares two CmpPredicates taking samesign into account and returns the canonicalized CmpPredicate if...
LLVM_ABI CmpInst::Predicate getPreferredSignedPredicate() const
Attempts to return a signed CmpInst::Predicate from the CmpPredicate.
CmpInst::Predicate dropSameSign() const
Drops samesign information.
Conditional Branch instruction.
Value * getCondition() const
BasicBlock * getSuccessor(unsigned i) const
static LLVM_ABI Constant * getNot(Constant *C)
static Constant * getPtrAdd(Constant *Ptr, Constant *Offset, GEPNoWrapFlags NW=GEPNoWrapFlags::none(), std::optional< ConstantRange > InRange=std::nullopt, Type *OnlyIfReduced=nullptr)
Create a getelementptr i8, ptr, offset constant expression.
static LLVM_ABI Constant * getPtrToInt(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static LLVM_ABI Constant * getPtrToAddr(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static LLVM_ABI Constant * getAdd(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
static LLVM_ABI Constant * getNeg(Constant *C, bool HasNSW=false)
static LLVM_ABI Constant * getTrunc(Constant *C, Type *Ty, bool OnlyIfReduced=false)
This is the shared class of boolean and integer constants.
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
static LLVM_ABI ConstantInt * getFalse(LLVMContext &Context)
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
const APInt & getValue() const
Return the constant as an APInt value reference.
static LLVM_ABI ConstantInt * getBool(LLVMContext &Context, bool V)
This class represents a range of values.
LLVM_ABI ConstantRange add(const ConstantRange &Other) const
Return a new range representing the possible values resulting from an addition of a value in this ran...
LLVM_ABI ConstantRange zextOrTrunc(uint32_t BitWidth) const
Make this range have the bit width given by BitWidth.
PreferredRangeType
If represented precisely, the result of some range operations may consist of multiple disjoint ranges...
LLVM_ABI bool getEquivalentICmp(CmpInst::Predicate &Pred, APInt &RHS) const
Set up Pred and RHS such that ConstantRange::makeExactICmpRegion(Pred, RHS) == *this.
const APInt & getLower() const
Return the lower value for this range.
LLVM_ABI ConstantRange urem(const ConstantRange &Other) const
Return a new range representing the possible values resulting from an unsigned remainder operation of...
LLVM_ABI bool isFullSet() const
Return true if this set contains all of the elements possible for this data-type.
LLVM_ABI bool icmp(CmpInst::Predicate Pred, const ConstantRange &Other) const
Does the predicate Pred hold between ranges this and Other?
LLVM_ABI bool isEmptySet() const
Return true if this set contains no members.
LLVM_ABI ConstantRange zeroExtend(uint32_t BitWidth) const
Return a new range in the specified integer type, which must be strictly larger than the current type...
LLVM_ABI bool isSignWrappedSet() const
Return true if this set wraps around the signed domain.
LLVM_ABI APInt getSignedMin() const
Return the smallest signed value contained in the ConstantRange.
LLVM_ABI bool isWrappedSet() const
Return true if this set wraps around the unsigned domain.
LLVM_ABI void print(raw_ostream &OS) const
Print out the bounds to a stream.
LLVM_ABI ConstantRange truncate(uint32_t BitWidth, unsigned NoWrapKind=0) const
Return a new range in the specified integer type, which must be strictly smaller than the current typ...
LLVM_ABI ConstantRange signExtend(uint32_t BitWidth) const
Return a new range in the specified integer type, which must be strictly larger than the current type...
const APInt & getUpper() const
Return the upper value for this range.
LLVM_ABI ConstantRange unionWith(const ConstantRange &CR, PreferredRangeType Type=Smallest) const
Return the range that results from the union of this range with another range.
static LLVM_ABI ConstantRange makeExactICmpRegion(CmpInst::Predicate Pred, const APInt &Other)
Produce the exact range such that all values in the returned range satisfy the given predicate with a...
LLVM_ABI bool contains(const APInt &Val) const
Return true if the specified value is in the set.
LLVM_ABI APInt getUnsignedMax() const
Return the largest unsigned value contained in the ConstantRange.
LLVM_ABI ConstantRange intersectWith(const ConstantRange &CR, PreferredRangeType Type=Smallest) const
Return the range that results from the intersection of this range with another range.
LLVM_ABI APInt getSignedMax() const
Return the largest signed value contained in the ConstantRange.
static ConstantRange getNonEmpty(APInt Lower, APInt Upper)
Create non-empty constant range with the given bounds.
static LLVM_ABI ConstantRange makeGuaranteedNoWrapRegion(Instruction::BinaryOps BinOp, const ConstantRange &Other, unsigned NoWrapKind)
Produce the largest range containing all X such that "X BinOp Y" is guaranteed not to wrap (overflow)...
LLVM_ABI unsigned getMinSignedBits() const
Compute the maximal number of bits needed to represent every value in this signed range.
uint32_t getBitWidth() const
Get the bit width of this ConstantRange.
LLVM_ABI ConstantRange sub(const ConstantRange &Other) const
Return a new range representing the possible values resulting from a subtraction of a value in this r...
LLVM_ABI ConstantRange sextOrTrunc(uint32_t BitWidth) const
Make this range have the bit width given by BitWidth.
static LLVM_ABI ConstantRange makeExactNoWrapRegion(Instruction::BinaryOps BinOp, const APInt &Other, unsigned NoWrapKind)
Produce the range that contains X if and only if "X BinOp Other" does not wrap.
This is an important base class in LLVM.
A parsed version of the target data layout string in and methods for querying it.
LLVM_ABI const StructLayout * getStructLayout(StructType *Ty) const
Returns a StructLayout object, indicating the alignment of the struct, its size, and the offsets of i...
LLVM_ABI IntegerType * getIntPtrType(LLVMContext &C, unsigned AddressSpace=0) const
Returns an integer type with size at least as big as that of a pointer in the given address space.
LLVM_ABI unsigned getIndexTypeSizeInBits(Type *Ty) const
The size in bits of the index used in GEP calculation for this type.
LLVM_ABI IntegerType * getIndexType(LLVMContext &C, unsigned AddressSpace) const
Returns the type of a GEP index in AddressSpace.
TypeSize getTypeSizeInBits(Type *Ty) const
Size examples:
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
iterator find(const_arg_type_t< KeyT > Val)
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&...Args)
DenseMapIterator< KeyT, ValueT, KeyInfoT, BucketT > iterator
iterator find_as(const LookupKeyT &Val)
Alternate version of find() which allows a different, and possibly less expensive,...
size_type count(const_arg_type_t< KeyT > Val) const
Return 1 if the specified key is in the map, 0 otherwise.
bool contains(const_arg_type_t< KeyT > Val) const
Return true if the specified key is in the map, false otherwise.
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Analysis pass which computes a DominatorTree.
Legacy analysis pass which computes a DominatorTree.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
LLVM_ABI bool isReachableFromEntry(const Use &U) const
Provide an overload for a Use.
LLVM_ABI bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
FoldingSetNodeIDRef - This class describes a reference to an interned FoldingSetNodeID,...
FoldingSetNodeID - This class is used to gather all the unique data bits of a node.
Represents flags for the getelementptr instruction/expression.
bool hasNoUnsignedSignedWrap() const
bool hasNoUnsignedWrap() const
static GEPNoWrapFlags none()
static LLVM_ABI Type * getTypeAtIndex(Type *Ty, Value *Idx)
Return the type of the element at the given index of an indexable type.
Module * getParent()
Get the module that this global value is contained inside of...
static bool isPrivateLinkage(LinkageTypes Linkage)
static bool isInternalLinkage(LinkageTypes Linkage)
This instruction compares its operands according to the predicate given to the constructor.
CmpPredicate getCmpPredicate() const
static bool isGE(Predicate P)
Return true if the predicate is SGE or UGE.
CmpPredicate getSwappedCmpPredicate() const
static LLVM_ABI bool compare(const APInt &LHS, const APInt &RHS, ICmpInst::Predicate Pred)
Return result of LHS Pred RHS comparison.
static bool isLT(Predicate P)
Return true if the predicate is SLT or ULT.
CmpPredicate getInverseCmpPredicate() const
Predicate getNonStrictCmpPredicate() const
For example, SGT -> SGE, SLT -> SLE, ULT -> ULE, UGT -> UGE.
static bool isGT(Predicate P)
Return true if the predicate is SGT or UGT.
Predicate getFlippedSignednessPredicate() const
For example, SLT->ULT, ULT->SLT, SLE->ULE, ULE->SLE, EQ->EQ.
static CmpPredicate getInverseCmpPredicate(CmpPredicate Pred)
static bool isEquality(Predicate P)
Return true if this predicate is either EQ or NE.
bool isRelational() const
Return true if the predicate is relational (not EQ or NE).
static bool isLE(Predicate P)
Return true if the predicate is SLE or ULE.
LLVM_ABI bool hasNoUnsignedWrap() const LLVM_READONLY
Determine whether the no unsigned wrap flag is set.
LLVM_ABI bool hasNoSignedWrap() const LLVM_READONLY
Determine whether the no signed wrap flag is set.
LLVM_ABI bool isIdenticalToWhenDefined(const Instruction *I, bool IntersectAttrs=false) const LLVM_READONLY
This is like isIdenticalTo, except that it ignores the SubclassOptionalData flags,...
Class to represent integer types.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
A helper class to return the specified delimiter string after the first invocation of operator String...
An instruction for reading from memory.
Analysis pass that exposes the LoopInfo for a function.
bool contains(const LoopT *L) const
Return true if the specified loop is contained within in this loop.
BlockT * getHeader() const
unsigned getLoopDepth() const
Return the nesting level of this loop.
BlockT * getLoopPredecessor() const
If the given loop's header has exactly one unique predecessor outside the loop, return it.
LoopT * getParentLoop() const
Return the parent loop if it exists or nullptr for top level loops.
unsigned getLoopDepth(const BlockT *BB) const
Return the loop nesting level of the specified block.
LoopT * getLoopFor(const BlockT *BB) const
Return the inner most loop that BB lives in.
The legacy pass manager's analysis pass to compute loop information.
Represents a single loop in the control flow graph.
bool isLoopInvariant(const Value *V) const
Return true if the specified value is loop invariant.
A Module instance is used to store all the information related to an LLVM module.
unsigned getOpcode() const
Return the opcode for this Instruction or ConstantExpr.
Utility class for integer operators which may exhibit overflow - Add, Sub, Mul, and Shl.
bool hasNoSignedWrap() const
Test whether this operation is known to never undergo signed overflow, aka the nsw property.
bool hasNoUnsignedWrap() const
Test whether this operation is known to never undergo unsigned overflow, aka the nuw property.
iterator_range< const_block_iterator > blocks() const
op_range incoming_values()
Value * getIncomingValueForBlock(const BasicBlock *BB) const
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
AnalysisType & getAnalysis() const
getAnalysis<AnalysisType>() - This function is used by subclasses to get to the analysis information ...
PointerIntPair - This class implements a pair of a pointer and small integer.
const SCEV * getPointer() const
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the default address space (address sp...
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
LLVM_ABI void addPredicate(const SCEVPredicate &Pred)
Adds a new predicate.
LLVM_ABI const SCEVPredicate & getPredicate() const
LLVM_ABI const SCEV * getPredicatedSCEV(const SCEV *Expr)
Returns the rewritten SCEV for Expr in the context of the current SCEV predicate.
LLVM_ABI bool hasNoOverflow(Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags)
Returns true if we've proved that V doesn't wrap by means of a SCEV predicate.
LLVM_ABI void setNoOverflow(Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags)
Proves that V doesn't overflow by adding SCEV predicate.
LLVM_ABI void print(raw_ostream &OS, unsigned Depth) const
Print the SCEV mappings done by the Predicated Scalar Evolution.
LLVM_ABI bool areAddRecsEqualWithPreds(const SCEVAddRecExpr *AR1, const SCEVAddRecExpr *AR2) const
Check if AR1 and AR2 are equal, while taking into account Equal predicates in Preds.
LLVM_ABI PredicatedScalarEvolution(ScalarEvolution &SE, Loop &L)
LLVM_ABI const SCEVAddRecExpr * getAsAddRec(Value *V)
Attempts to produce an AddRecExpr for V by adding additional SCEV predicates.
LLVM_ABI unsigned getSmallConstantMaxTripCount()
Returns the upper bound of the loop trip count as a normal unsigned value, or 0 if the trip count is ...
LLVM_ABI const SCEV * getBackedgeTakenCount()
Get the (predicated) backedge count for the analyzed loop.
LLVM_ABI const SCEV * getSymbolicMaxBackedgeTakenCount()
Get the (predicated) symbolic max backedge count for the analyzed loop.
LLVM_ABI const SCEV * getSCEV(Value *V)
Returns the SCEV expression of V, in the context of the current SCEV predicate.
A set of analyses that are preserved following a run of a transformation pass.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
PreservedAnalysisChecker getChecker() const
Build a checker for this PreservedAnalyses and the specified analysis type.
constexpr bool isValid() const
This node represents an addition of some number of SCEVs.
This node represents a polynomial recurrence on the trip count of the specified loop.
friend class ScalarEvolution
LLVM_ABI const SCEV * evaluateAtIteration(const SCEV *It, ScalarEvolution &SE) const
Return the value of this chain of recurrences at the specified iteration number.
void setNoWrapFlags(NoWrapFlags Flags)
Set flags for a recurrence without clearing any previously set flags.
bool isAffine() const
Return true if this represents an expression A + B*x where A and B are loop invariant values.
bool isQuadratic() const
Return true if this represents an expression A + B*x + C*x^2 where A, B and C are loop invariant valu...
LLVM_ABI const SCEV * getNumIterationsInRange(const ConstantRange &Range, ScalarEvolution &SE) const
Return the number of iterations of this loop that produce values in the specified constant range.
LLVM_ABI const SCEVAddRecExpr * getPostIncExpr(ScalarEvolution &SE) const
Return an expression representing the value of this expression one iteration of the loop ahead.
const Loop * getLoop() const
SCEVUse getStepRecurrence(ScalarEvolution &SE) const
Constructs and returns the recurrence indicating how much this expression steps by.
This is the base class for unary cast operator classes.
SCEVUse getOperand() const
LLVM_ABI SCEVCastExpr(const FoldingSetNodeIDRef ID, SCEVTypes SCEVTy, SCEVUse op, Type *ty)
void setNoWrapFlags(NoWrapFlags Flags)
Set flags for a non-recurrence without clearing previously set flags.
This class represents an assumption that the expression LHS Pred RHS evaluates to true,...
SCEVComparePredicate(const FoldingSetNodeIDRef ID, const ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS)
bool isAlwaysTrue() const override
Returns true if the predicate is always true.
void print(raw_ostream &OS, unsigned Depth=0) const override
Prints a textual representation of this predicate with an indentation of Depth.
bool implies(const SCEVPredicate *N, ScalarEvolution &SE) const override
Implementation of the SCEVPredicate interface.
This class represents a constant integer value.
ConstantInt * getValue() const
const APInt & getAPInt() const
This is the base class for unary integral cast operator classes.
LLVM_ABI SCEVIntegralCastExpr(const FoldingSetNodeIDRef ID, SCEVTypes SCEVTy, SCEVUse op, Type *ty)
This node is the base class min/max selections.
static enum SCEVTypes negate(enum SCEVTypes T)
This node represents multiplication of some number of SCEVs.
This node is a base class providing common functionality for n'ary operators.
bool hasNoUnsignedWrap() const
ArrayRef< SCEVUse > operands() const
bool hasNoSelfWrap() const
size_t getNumOperands() const
bool hasNoSignedWrap() const
NoWrapFlags getNoWrapFlags(NoWrapFlags Mask=NoWrapMask) const
SCEVUse getOperand(unsigned i) const
This class represents an assumption made using SCEV expressions which can be checked at run-time.
SCEVPredicate(const SCEVPredicate &)=default
virtual bool implies(const SCEVPredicate *N, ScalarEvolution &SE) const =0
Returns true if this predicate implies N.
This class represents a cast from a pointer to a pointer-sized integer value, without capturing the p...
This class represents a cast from a pointer to a pointer-sized integer value.
This visitor recursively visits a SCEV expression and re-writes it.
const SCEV * visitSignExtendExpr(const SCEVSignExtendExpr *Expr)
const SCEV * visit(const SCEV *S)
const SCEV * visitZeroExtendExpr(const SCEVZeroExtendExpr *Expr)
const SCEV * visitSMinExpr(const SCEVSMinExpr *Expr)
SCEVRewriteVisitor(ScalarEvolution &SE)
const SCEV * visitUMinExpr(const SCEVUMinExpr *Expr)
This class represents a signed minimum selection.
This node is the base class for sequential/in-order min/max selections.
static SCEVTypes getEquivalentNonSequentialSCEVType(SCEVTypes Ty)
This class represents a sign extension of a small integer value to a larger integer value.
Visit all nodes in the expression tree using worklist traversal.
This class represents a truncation of an integer value to a smaller integer value.
This class represents a binary unsigned division operation.
This class represents an unsigned minimum selection.
This class represents a composition of other SCEV predicates, and is the class that most clients will...
void print(raw_ostream &OS, unsigned Depth) const override
Prints a textual representation of this predicate with an indentation of Depth.
bool implies(const SCEVPredicate *N, ScalarEvolution &SE) const override
Returns true if this predicate implies N.
SCEVUnionPredicate(ArrayRef< const SCEVPredicate * > Preds, ScalarEvolution &SE)
Union predicates don't get cached so create a dummy set ID for it.
bool isAlwaysTrue() const override
Implementation of the SCEVPredicate interface.
This means that we are dealing with an entirely unknown SCEV value, and only represent it as its LLVM...
This class represents the value of vscale, as used when defining the length of a scalable vector or r...
This class represents an assumption made on an AddRec expression.
IncrementWrapFlags
Similar to SCEV::NoWrapFlags, but with slightly different semantics for FlagNUSW.
SCEVWrapPredicate(const FoldingSetNodeIDRef ID, const SCEVAddRecExpr *AR, IncrementWrapFlags Flags)
bool implies(const SCEVPredicate *N, ScalarEvolution &SE) const override
Returns true if this predicate implies N.
static SCEVWrapPredicate::IncrementWrapFlags setFlags(SCEVWrapPredicate::IncrementWrapFlags Flags, SCEVWrapPredicate::IncrementWrapFlags OnFlags)
void print(raw_ostream &OS, unsigned Depth=0) const override
Prints a textual representation of this predicate with an indentation of Depth.
bool isAlwaysTrue() const override
Returns true if the predicate is always true.
const SCEVAddRecExpr * getExpr() const
Implementation of the SCEVPredicate interface.
static SCEVWrapPredicate::IncrementWrapFlags clearFlags(SCEVWrapPredicate::IncrementWrapFlags Flags, SCEVWrapPredicate::IncrementWrapFlags OffFlags)
Convenient IncrementWrapFlags manipulation methods.
static SCEVWrapPredicate::IncrementWrapFlags getImpliedFlags(const SCEVAddRecExpr *AR, ScalarEvolution &SE)
Returns the set of SCEVWrapPredicate no wrap flags implied by a SCEVAddRecExpr.
IncrementWrapFlags getFlags() const
Returns the set assumed no overflow flags.
This class represents a zero extension of a small integer value to a larger integer value.
This class represents an analyzed expression in the program.
unsigned short getExpressionSize() const
LLVM_ABI bool isOne() const
Return true if the expression is a constant one.
LLVM_ABI bool isZero() const
Return true if the expression is a constant zero.
LLVM_ABI void dump() const
This method is used for debugging.
LLVM_ABI bool isAllOnesValue() const
Return true if the expression is a constant all-ones value.
LLVM_ABI bool isNonConstantNegative() const
Return true if the specified scev is negated, but not a constant.
LLVM_ABI ArrayRef< SCEVUse > operands() const
Return operands of this SCEV expression.
LLVM_ABI void print(raw_ostream &OS) const
Print out the internal representation of this scalar to the specified stream.
SCEV(const FoldingSetNodeIDRef ID, SCEVTypes SCEVTy, unsigned short ExpressionSize)
SCEVTypes getSCEVType() const
LLVM_ABI Type * getType() const
Return the LLVM type of this SCEV expression.
NoWrapFlags
NoWrapFlags are bitfield indices into SubclassData.
Analysis pass that exposes the ScalarEvolution for a function.
LLVM_ABI ScalarEvolution run(Function &F, FunctionAnalysisManager &AM)
LLVM_ABI PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
LLVM_ABI PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
void print(raw_ostream &OS, const Module *=nullptr) const override
print - Print out the internal state of the pass.
bool runOnFunction(Function &F) override
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass.
void releaseMemory() override
releaseMemory() - This member can be implemented by a pass if it wants to be able to release its memo...
void verifyAnalysis() const override
verifyAnalysis() - This member can be implemented by a analysis pass to check state of analysis infor...
ScalarEvolutionWrapperPass()
static LLVM_ABI LoopGuards collect(const Loop *L, ScalarEvolution &SE)
Collect rewrite map for loop guards for loop L, together with flags indicating if NUW and NSW can be ...
LLVM_ABI const SCEV * rewrite(const SCEV *Expr) const
Try to apply the collected loop guards to Expr.
The main scalar evolution driver.
LLVM_ABI const SCEV * getUDivExpr(SCEVUse LHS, SCEVUse RHS)
Get a canonical unsigned division expression, or something simpler if possible.
const SCEV * getConstantMaxBackedgeTakenCount(const Loop *L)
When successful, this returns a SCEVConstant that is greater than or equal to (i.e.
static bool hasFlags(SCEV::NoWrapFlags Flags, SCEV::NoWrapFlags TestFlags)
const DataLayout & getDataLayout() const
Return the DataLayout associated with the module this SCEV instance is operating on.
LLVM_ABI bool isKnownNonNegative(const SCEV *S)
Test if the given expression is known to be non-negative.
LLVM_ABI bool isKnownOnEveryIteration(CmpPredicate Pred, const SCEVAddRecExpr *LHS, const SCEV *RHS)
Test if the condition described by Pred, LHS, RHS is known to be true on every iteration of the loop ...
LLVM_ABI const SCEV * getNegativeSCEV(const SCEV *V, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap)
Return the SCEV object corresponding to -V.
LLVM_ABI std::optional< LoopInvariantPredicate > getLoopInvariantExitCondDuringFirstIterationsImpl(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS, const Loop *L, const Instruction *CtxI, const SCEV *MaxIter)
LLVM_ABI const SCEV * getUDivCeilSCEV(const SCEV *N, const SCEV *D)
Compute ceil(N / D).
LLVM_ABI std::optional< LoopInvariantPredicate > getLoopInvariantExitCondDuringFirstIterations(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS, const Loop *L, const Instruction *CtxI, const SCEV *MaxIter)
If the result of the predicate LHS Pred RHS is loop invariant with respect to L at given Context duri...
LLVM_ABI Type * getWiderType(Type *Ty1, Type *Ty2) const
LLVM_ABI const SCEV * getAbsExpr(const SCEV *Op, bool IsNSW)
LLVM_ABI bool isKnownNonPositive(const SCEV *S)
Test if the given expression is known to be non-positive.
LLVM_ABI bool isKnownNegative(const SCEV *S)
Test if the given expression is known to be negative.
LLVM_ABI const SCEV * getPredicatedConstantMaxBackedgeTakenCount(const Loop *L, SmallVectorImpl< const SCEVPredicate * > &Predicates)
Similar to getConstantMaxBackedgeTakenCount, except it will add a set of SCEV predicates to Predicate...
LLVM_ABI const SCEV * removePointerBase(const SCEV *S)
Compute an expression equivalent to S - getPointerBase(S).
LLVM_ABI bool isLoopEntryGuardedByCond(const Loop *L, CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
Test whether entry to the loop is protected by a conditional between LHS and RHS.
LLVM_ABI bool isKnownNonZero(const SCEV *S)
Test if the given expression is known to be non-zero.
LLVM_ABI const SCEV * getURemExpr(SCEVUse LHS, SCEVUse RHS)
Represents an unsigned remainder expression based on unsigned division.
LLVM_ABI const SCEV * getSCEVAtScope(const SCEV *S, const Loop *L)
Return a SCEV expression for the specified value at the specified scope in the program.
LLVM_ABI const SCEV * getBackedgeTakenCount(const Loop *L, ExitCountKind Kind=Exact)
If the specified loop has a predictable backedge-taken count, return it, otherwise return a SCEVCould...
LLVM_ABI const SCEV * getSMinExpr(SCEVUse LHS, SCEVUse RHS)
LLVM_ABI void setNoWrapFlags(SCEVAddRecExpr *AddRec, SCEV::NoWrapFlags Flags)
Update no-wrap flags of an AddRec.
LLVM_ABI const SCEV * getUMaxFromMismatchedTypes(const SCEV *LHS, const SCEV *RHS)
Promote the operands to the wider of the types using zero-extension, and then perform a umax operatio...
const SCEV * getZero(Type *Ty)
Return a SCEV for the constant 0 of a specific type.
LLVM_ABI bool willNotOverflow(Instruction::BinaryOps BinOp, bool Signed, const SCEV *LHS, const SCEV *RHS, const Instruction *CtxI=nullptr)
Is operation BinOp between LHS and RHS provably does not have a signed/unsigned overflow (Signed)?
LLVM_ABI ExitLimit computeExitLimitFromCond(const Loop *L, Value *ExitCond, bool ExitIfTrue, bool ControlsOnlyExit, bool AllowPredicates=false)
Compute the number of times the backedge of the specified loop will execute if its exit condition wer...
LLVM_ABI const SCEV * getZeroExtendExprImpl(const SCEV *Op, Type *Ty, unsigned Depth=0)
LLVM_ABI const SCEV * getMinMaxExpr(SCEVTypes Kind, SmallVectorImpl< SCEVUse > &Operands)
LLVM_ABI const SCEVPredicate * getEqualPredicate(const SCEV *LHS, const SCEV *RHS)
LLVM_ABI unsigned getSmallConstantTripMultiple(const Loop *L, const SCEV *ExitCount)
Returns the largest constant divisor of the trip count as a normal unsigned value,...
LLVM_ABI uint64_t getTypeSizeInBits(Type *Ty) const
Return the size in bits of the specified type, for which isSCEVable must return true.
LLVM_ABI const SCEV * getConstant(ConstantInt *V)
LLVM_ABI const SCEV * getPredicatedBackedgeTakenCount(const Loop *L, SmallVectorImpl< const SCEVPredicate * > &Predicates)
Similar to getBackedgeTakenCount, except it will add a set of SCEV predicates to Predicates that are ...
LLVM_ABI const SCEV * getSCEV(Value *V)
Return a SCEV expression for the full generality of the specified expression.
LLVM_ABI const SCEV * getMinusSCEV(SCEVUse LHS, SCEVUse RHS, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Return LHS-RHS.
ConstantRange getSignedRange(const SCEV *S)
Determine the signed range for a particular SCEV.
LLVM_ABI const SCEV * getAddRecExpr(SCEVUse Start, SCEVUse Step, const Loop *L, SCEV::NoWrapFlags Flags)
Get an add recurrence expression for the specified loop.
LLVM_ABI const SCEV * getNoopOrSignExtend(const SCEV *V, Type *Ty)
Return a SCEV corresponding to a conversion of the input value to the specified type.
bool loopHasNoAbnormalExits(const Loop *L)
Return true if the loop has no abnormal exits.
LLVM_ABI const SCEV * getTripCountFromExitCount(const SCEV *ExitCount)
A version of getTripCountFromExitCount below which always picks an evaluation type which can not resu...
LLVM_ABI ScalarEvolution(Function &F, TargetLibraryInfo &TLI, AssumptionCache &AC, DominatorTree &DT, LoopInfo &LI)
const SCEV * getOne(Type *Ty)
Return a SCEV for the constant 1 of a specific type.
LLVM_ABI const SCEV * getTruncateOrNoop(const SCEV *V, Type *Ty)
Return a SCEV corresponding to a conversion of the input value to the specified type.
LLVM_ABI const SCEV * getLosslessPtrToIntExpr(const SCEV *Op)
LLVM_ABI const SCEV * getCastExpr(SCEVTypes Kind, const SCEV *Op, Type *Ty)
LLVM_ABI const SCEV * getSequentialMinMaxExpr(SCEVTypes Kind, SmallVectorImpl< SCEVUse > &Operands)
LLVM_ABI std::optional< bool > evaluatePredicateAt(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS, const Instruction *CtxI)
Check whether the condition described by Pred, LHS, and RHS is true or false in the given Context.
LLVM_ABI unsigned getSmallConstantMaxTripCount(const Loop *L, SmallVectorImpl< const SCEVPredicate * > *Predicates=nullptr)
Returns the upper bound of the loop trip count as a normal unsigned value.
LLVM_ABI const SCEV * getPtrToIntExpr(const SCEV *Op, Type *Ty)
LLVM_ABI bool isBackedgeTakenCountMaxOrZero(const Loop *L)
Return true if the backedge taken count is either the value returned by getConstantMaxBackedgeTakenCo...
LLVM_ABI void forgetLoop(const Loop *L)
This method should be called by the client when it has changed a loop in a way that may effect Scalar...
LLVM_ABI bool isLoopInvariant(const SCEV *S, const Loop *L)
Return true if the value of the given SCEV is unchanging in the specified loop.
LLVM_ABI bool isKnownPositive(const SCEV *S)
Test if the given expression is known to be positive.
LLVM_ABI bool SimplifyICmpOperands(CmpPredicate &Pred, SCEVUse &LHS, SCEVUse &RHS, unsigned Depth=0)
Simplify LHS and RHS in a comparison with predicate Pred.
APInt getUnsignedRangeMin(const SCEV *S)
Determine the min of the unsigned range for a particular SCEV.
LLVM_ABI const SCEV * getOffsetOfExpr(Type *IntTy, StructType *STy, unsigned FieldNo)
Return an expression for offsetof on the given field with type IntTy.
LLVM_ABI LoopDisposition getLoopDisposition(const SCEV *S, const Loop *L)
Return the "disposition" of the given SCEV with respect to the given loop.
LLVM_ABI bool containsAddRecurrence(const SCEV *S)
Return true if the SCEV is a scAddRecExpr or it contains scAddRecExpr.
LLVM_ABI const SCEV * getSignExtendExprImpl(const SCEV *Op, Type *Ty, unsigned Depth=0)
LLVM_ABI bool hasOperand(const SCEV *S, const SCEV *Op) const
Test whether the given SCEV has Op as a direct or indirect operand.
LLVM_ABI const SCEV * getZeroExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth=0)
LLVM_ABI bool isSCEVable(Type *Ty) const
Test if values of the given type are analyzable within the SCEV framework.
LLVM_ABI Type * getEffectiveSCEVType(Type *Ty) const
Return a type with the same bitwidth as the given type and which represents how SCEV will treat the g...
LLVM_ABI const SCEVPredicate * getComparePredicate(ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS)
LLVM_ABI bool haveSameSign(const SCEV *S1, const SCEV *S2)
Return true if we know that S1 and S2 must have the same sign.
LLVM_ABI const SCEV * getNotSCEV(const SCEV *V)
Return the SCEV object corresponding to ~V.
LLVM_ABI const SCEV * getElementCount(Type *Ty, ElementCount EC, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap)
LLVM_ABI bool instructionCouldExistWithOperands(const SCEV *A, const SCEV *B)
Return true if there exists a point in the program at which both A and B could be operands to the sam...
ConstantRange getUnsignedRange(const SCEV *S)
Determine the unsigned range for a particular SCEV.
LLVM_ABI void print(raw_ostream &OS) const
LLVM_ABI const SCEV * getPredicatedExitCount(const Loop *L, const BasicBlock *ExitingBlock, SmallVectorImpl< const SCEVPredicate * > *Predicates, ExitCountKind Kind=Exact)
Same as above except this uses the predicated backedge taken info and may require predicates.
static SCEV::NoWrapFlags clearFlags(SCEV::NoWrapFlags Flags, SCEV::NoWrapFlags OffFlags)
LLVM_ABI void forgetTopmostLoop(const Loop *L)
LLVM_ABI void forgetValue(Value *V)
This method should be called by the client when it has changed a value in a way that may effect its v...
APInt getSignedRangeMin(const SCEV *S)
Determine the min of the signed range for a particular SCEV.
LLVM_ABI const SCEV * getNoopOrAnyExtend(const SCEV *V, Type *Ty)
Return a SCEV corresponding to a conversion of the input value to the specified type.
LLVM_ABI void forgetBlockAndLoopDispositions(Value *V=nullptr)
Called when the client has changed the disposition of values in a loop or block.
LLVM_ABI const SCEV * getTruncateExpr(const SCEV *Op, Type *Ty, unsigned Depth=0)
LLVM_ABI const SCEV * getUMaxExpr(SCEVUse LHS, SCEVUse RHS)
@ MonotonicallyDecreasing
@ MonotonicallyIncreasing
LLVM_ABI std::optional< LoopInvariantPredicate > getLoopInvariantPredicate(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS, const Loop *L, const Instruction *CtxI=nullptr)
If the result of the predicate LHS Pred RHS is loop invariant with respect to L, return a LoopInvaria...
LLVM_ABI const SCEV * getStoreSizeOfExpr(Type *IntTy, Type *StoreTy)
Return an expression for the store size of StoreTy that is type IntTy.
LLVM_ABI const SCEVPredicate * getWrapPredicate(const SCEVAddRecExpr *AR, SCEVWrapPredicate::IncrementWrapFlags AddedFlags)
LLVM_ABI bool isLoopBackedgeGuardedByCond(const Loop *L, CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
Test whether the backedge of the loop is protected by a conditional between LHS and RHS.
LLVM_ABI APInt getNonZeroConstantMultiple(const SCEV *S)
const SCEV * getMinusOne(Type *Ty)
Return a SCEV for the constant -1 of a specific type.
static SCEV::NoWrapFlags setFlags(SCEV::NoWrapFlags Flags, SCEV::NoWrapFlags OnFlags)
LLVM_ABI bool hasLoopInvariantBackedgeTakenCount(const Loop *L)
Return true if the specified loop has an analyzable loop-invariant backedge-taken count.
LLVM_ABI BlockDisposition getBlockDisposition(const SCEV *S, const BasicBlock *BB)
Return the "disposition" of the given SCEV with respect to the given block.
LLVM_ABI const SCEV * getNoopOrZeroExtend(const SCEV *V, Type *Ty)
Return a SCEV corresponding to a conversion of the input value to the specified type.
LLVM_ABI bool invalidate(Function &F, const PreservedAnalyses &PA, FunctionAnalysisManager::Invalidator &Inv)
LLVM_ABI const SCEV * getUMinFromMismatchedTypes(const SCEV *LHS, const SCEV *RHS, bool Sequential=false)
Promote the operands to the wider of the types using zero-extension, and then perform a umin operatio...
LLVM_ABI bool loopIsFiniteByAssumption(const Loop *L)
Return true if this loop is finite by assumption.
LLVM_ABI const SCEV * getExistingSCEV(Value *V)
Return an existing SCEV for V if there is one, otherwise return nullptr.
LLVM_ABI APInt getConstantMultiple(const SCEV *S, const Instruction *CtxI=nullptr)
Returns the max constant multiple of S.
LoopDisposition
An enum describing the relationship between a SCEV and a loop.
@ LoopComputable
The SCEV varies predictably with the loop.
@ LoopVariant
The SCEV is loop-variant (unknown).
@ LoopInvariant
The SCEV is loop-invariant.
friend class SCEVCallbackVH
LLVM_ABI bool isKnownMultipleOf(const SCEV *S, uint64_t M, SmallVectorImpl< const SCEVPredicate * > &Assumptions)
Check that S is a multiple of M.
LLVM_ABI const SCEV * getAnyExtendExpr(const SCEV *Op, Type *Ty)
getAnyExtendExpr - Return a SCEV for the given operand extended with unspecified bits out to the give...
LLVM_ABI bool isKnownToBeAPowerOfTwo(const SCEV *S, bool OrZero=false, bool OrNegative=false)
Test if the given expression is known to be a power of 2.
LLVM_ABI std::optional< SCEV::NoWrapFlags > getStrengthenedNoWrapFlagsFromBinOp(const OverflowingBinaryOperator *OBO)
Parse NSW/NUW flags from add/sub/mul IR binary operation Op into SCEV no-wrap flags,...
LLVM_ABI void forgetLcssaPhiWithNewPredecessor(Loop *L, PHINode *V)
Forget LCSSA phi node V of loop L to which a new predecessor was added, such that it may no longer be...
LLVM_ABI bool containsUndefs(const SCEV *S) const
Return true if the SCEV expression contains an undef value.
LLVM_ABI std::optional< MonotonicPredicateType > getMonotonicPredicateType(const SCEVAddRecExpr *LHS, ICmpInst::Predicate Pred)
If, for all loop invariant X, the predicate "LHS `Pred` X" is monotonically increasing or decreasing,...
LLVM_ABI const SCEV * getCouldNotCompute()
LLVM_ABI const SCEV * getMulExpr(SmallVectorImpl< SCEVUse > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical multiply expression, or something simpler if possible.
LLVM_ABI bool isAvailableAtLoopEntry(const SCEV *S, const Loop *L)
Determine if the SCEV can be evaluated at loop's entry.
LLVM_ABI uint32_t getMinTrailingZeros(const SCEV *S, const Instruction *CtxI=nullptr)
Determine the minimum number of zero bits that S is guaranteed to end in (at every loop iteration).
BlockDisposition
An enum describing the relationship between a SCEV and a basic block.
@ DominatesBlock
The SCEV dominates the block.
@ ProperlyDominatesBlock
The SCEV properly dominates the block.
@ DoesNotDominateBlock
The SCEV does not dominate the block.
LLVM_ABI const SCEV * getExitCount(const Loop *L, const BasicBlock *ExitingBlock, ExitCountKind Kind=Exact)
Return the number of times the backedge executes before the given exit would be taken; if not exactly...
LLVM_ABI const SCEV * getSignExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth=0)
LLVM_ABI void getPoisonGeneratingValues(SmallPtrSetImpl< const Value * > &Result, const SCEV *S)
Return the set of Values that, if poison, will definitively result in S being poison as well.
LLVM_ABI void forgetLoopDispositions()
Called when the client has changed the disposition of values in this loop.
LLVM_ABI const SCEV * getVScale(Type *Ty)
LLVM_ABI unsigned getSmallConstantTripCount(const Loop *L)
Returns the exact trip count of the loop if we can compute it, and the result is a small constant.
LLVM_ABI bool hasComputableLoopEvolution(const SCEV *S, const Loop *L)
Return true if the given SCEV changes value in a known way in the specified loop.
LLVM_ABI const SCEV * getPointerBase(const SCEV *V)
Transitively follow the chain of pointer-type operands until reaching a SCEV that does not have a sin...
LLVM_ABI void forgetAllLoops()
LLVM_ABI bool dominates(const SCEV *S, const BasicBlock *BB)
Return true if elements that makes up the given SCEV dominate the specified basic block.
APInt getUnsignedRangeMax(const SCEV *S)
Determine the max of the unsigned range for a particular SCEV.
LLVM_ABI const SCEV * getAddExpr(SmallVectorImpl< SCEVUse > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical add expression, or something simpler if possible.
ExitCountKind
The terms "backedge taken count" and "exit count" are used interchangeably to refer to the number of ...
@ SymbolicMaximum
An expression which provides an upper bound on the exact trip count.
@ ConstantMaximum
A constant which provides an upper bound on the exact trip count.
@ Exact
An expression exactly describing the number of times the backedge has executed when a loop is exited.
LLVM_ABI bool isKnownPredicate(CmpPredicate Pred, SCEVUse LHS, SCEVUse RHS)
Test if the given expression is known to satisfy the condition described by Pred, LHS,...
LLVM_ABI const SCEV * applyLoopGuards(const SCEV *Expr, const Loop *L)
Try to apply information from loop guards for L to Expr.
LLVM_ABI const SCEV * getPtrToAddrExpr(const SCEV *Op)
LLVM_ABI const SCEVAddRecExpr * convertSCEVToAddRecWithPredicates(const SCEV *S, const Loop *L, SmallVectorImpl< const SCEVPredicate * > &Preds)
Tries to convert the S expression to an AddRec expression, adding additional predicates to Preds as r...
LLVM_ABI const SCEV * getSMaxExpr(SCEVUse LHS, SCEVUse RHS)
LLVM_ABI const SCEV * getElementSize(Instruction *Inst)
Return the size of an element read or written by Inst.
LLVM_ABI const SCEV * getSizeOfExpr(Type *IntTy, TypeSize Size)
Return an expression for a TypeSize.
LLVM_ABI std::optional< bool > evaluatePredicate(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
Check whether the condition described by Pred, LHS, and RHS is true or false.
LLVM_ABI const SCEV * getUnknown(Value *V)
LLVM_ABI std::optional< std::pair< const SCEV *, SmallVector< const SCEVPredicate *, 3 > > > createAddRecFromPHIWithCasts(const SCEVUnknown *SymbolicPHI)
Checks if SymbolicPHI can be rewritten as an AddRecExpr under some Predicates.
LLVM_ABI const SCEV * getTruncateOrZeroExtend(const SCEV *V, Type *Ty, unsigned Depth=0)
Return a SCEV corresponding to a conversion of the input value to the specified type.
LLVM_ABI bool isKnownViaInduction(CmpPredicate Pred, SCEVUse LHS, SCEVUse RHS)
We'd like to check the predicate on every iteration of the most dominated loop between loops used in ...
static SCEV::NoWrapFlags maskFlags(SCEV::NoWrapFlags Flags, int Mask)
Convenient NoWrapFlags manipulation that hides enum casts and is visible in the ScalarEvolution name ...
LLVM_ABI std::optional< APInt > computeConstantDifference(const SCEV *LHS, const SCEV *RHS)
Compute LHS - RHS and returns the result as an APInt if it is a constant, and std::nullopt if it isn'...
LLVM_ABI bool properlyDominates(const SCEV *S, const BasicBlock *BB)
Return true if elements that makes up the given SCEV properly dominate the specified basic block.
LLVM_ABI const SCEV * getUDivExactExpr(SCEVUse LHS, SCEVUse RHS)
Get a canonical unsigned division expression, or something simpler if possible.
LLVM_ABI const SCEV * rewriteUsingPredicate(const SCEV *S, const Loop *L, const SCEVPredicate &A)
Re-writes the SCEV according to the Predicates in A.
LLVM_ABI std::pair< const SCEV *, const SCEV * > SplitIntoInitAndPostInc(const Loop *L, const SCEV *S)
Splits SCEV expression S into two SCEVs.
LLVM_ABI bool canReuseInstruction(const SCEV *S, Instruction *I, SmallVectorImpl< Instruction * > &DropPoisonGeneratingInsts)
Check whether it is poison-safe to represent the expression S using the instruction I.
LLVM_ABI bool isKnownPredicateAt(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS, const Instruction *CtxI)
Test if the given expression is known to satisfy the condition described by Pred, LHS,...
LLVM_ABI const SCEV * getPredicatedSymbolicMaxBackedgeTakenCount(const Loop *L, SmallVectorImpl< const SCEVPredicate * > &Predicates)
Similar to getSymbolicMaxBackedgeTakenCount, except it will add a set of SCEV predicates to Predicate...
LLVM_ABI ~ScalarEvolution()
LLVM_ABI const SCEV * getGEPExpr(GEPOperator *GEP, ArrayRef< SCEVUse > IndexExprs)
Returns an expression for a GEP.
LLVM_ABI const SCEV * getUMinExpr(SCEVUse LHS, SCEVUse RHS, bool Sequential=false)
LLVM_ABI void registerUser(const SCEV *User, ArrayRef< const SCEV * > Ops)
Notify this ScalarEvolution that User directly uses SCEVs in Ops.
LLVM_ABI bool isBasicBlockEntryGuardedByCond(const BasicBlock *BB, CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
Test whether entry to the basic block is protected by a conditional between LHS and RHS.
LLVM_ABI const SCEV * getTruncateOrSignExtend(const SCEV *V, Type *Ty, unsigned Depth=0)
Return a SCEV corresponding to a conversion of the input value to the specified type.
LLVM_ABI bool containsErasedValue(const SCEV *S) const
Return true if the SCEV expression contains a Value that has been optimised out and is now a nullptr.
const SCEV * getSymbolicMaxBackedgeTakenCount(const Loop *L)
When successful, this returns a SCEV that is greater than or equal to (i.e.
APInt getSignedRangeMax(const SCEV *S)
Determine the max of the signed range for a particular SCEV.
LLVM_ABI void verify() const
LLVMContext & getContext() const
Implements a dense probed hash-table based set with some number of buckets stored inline.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void reserve(size_type N)
iterator erase(const_iterator CI)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
iterator insert(iterator I, T &&Elt)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
Used to lazily calculate structure layout information for a target machine, based on the DataLayout s...
TypeSize getElementOffset(unsigned Idx) const
TypeSize getSizeInBits() const
Class to represent struct types.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
The instances of the Type class are immutable: once they are created, they are never changed.
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
bool isPointerTy() const
True if this is an instance of PointerType.
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
static LLVM_ABI IntegerType * getInt1Ty(LLVMContext &C)
bool isIntOrPtrTy() const
Return true if this is an integer type or a pointer type.
bool isIntegerTy() const
True if this is an instance of IntegerType.
static LLVM_ABI IntegerType * getIntNTy(LLVMContext &C, unsigned N)
A Use represents the edge between a Value definition and its users.
Value * getOperand(unsigned i) const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
LLVMContext & getContext() const
All values hold a context through their type.
unsigned getValueID() const
Return an ID for the concrete type of this object.
LLVM_ABI void printAsOperand(raw_ostream &O, bool PrintType=true, const Module *M=nullptr) const
Print the name of this Value out to the specified raw_ostream.
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
This class implements an extremely fast bulk output stream that can only output to a stream.
raw_ostream & indent(unsigned NumSpaces)
indent - Insert 'NumSpaces' spaces.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
const APInt & smin(const APInt &A, const APInt &B)
Determine the smaller of two APInts considered to be signed.
const APInt & smax(const APInt &A, const APInt &B)
Determine the larger of two APInts considered to be signed.
const APInt & umin(const APInt &A, const APInt &B)
Determine the smaller of two APInts considered to be unsigned.
LLVM_ABI std::optional< APInt > SolveQuadraticEquationWrap(APInt A, APInt B, APInt C, unsigned RangeWidth)
Let q(n) = An^2 + Bn + C, and BW = bit width of the value range (e.g.
const APInt & umax(const APInt &A, const APInt &B)
Determine the larger of two APInts considered to be unsigned.
LLVM_ABI APInt GreatestCommonDivisor(APInt A, APInt B)
Compute GCD of two unsigned APInt values.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ C
The default llvm calling convention, compatible with C.
int getMinValue(MCInstrInfo const &MCII, MCInst const &MCI)
Return the minimum value of an extendable operand.
@ BasicBlock
Various leaf nodes.
LLVM_ABI Function * getDeclarationIfExists(const Module *M, ID id)
Look up the Function declaration of the intrinsic id in the Module M and return it if it exists.
Predicate
Predicate - These are "(BI << 5) | BO" for various predicates.
BinaryOp_match< LHS, RHS, Instruction::AShr > m_AShr(const LHS &L, const RHS &R)
ap_match< APInt > m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
bool match(Val *V, const Pattern &P)
class_match< ConstantInt > m_ConstantInt()
Match an arbitrary ConstantInt and ignore it.
IntrinsicID_match m_Intrinsic()
Match intrinsic calls like this: m_Intrinsic<Intrinsic::fabs>(m_Value(X))
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
ExtractValue_match< Ind, Val_t > m_ExtractValue(const Val_t &V)
Match a single index ExtractValue instruction.
bind_ty< WithOverflowInst > m_WithOverflowInst(WithOverflowInst *&I)
Match a with overflow intrinsic, capturing it if we match.
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
brc_match< Cond_t, bind_ty< BasicBlock >, bind_ty< BasicBlock > > m_Br(const Cond_t &C, BasicBlock *&T, BasicBlock *&F)
BinaryOp_match< LHS, RHS, Instruction::SDiv > m_SDiv(const LHS &L, const RHS &R)
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
BinaryOp_match< LHS, RHS, Instruction::LShr > m_LShr(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::Shl > m_Shl(const LHS &L, const RHS &R)
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
class_match< BasicBlock > m_BasicBlock()
Match an arbitrary basic block value and ignore it.
match_combine_or< LTy, RTy > m_CombineOr(const LTy &L, const RTy &R)
Combine two pattern matchers matching L || R.
class_match< const SCEVVScale > m_SCEVVScale()
bind_cst_ty m_scev_APInt(const APInt *&C)
Match an SCEV constant and bind it to an APInt.
cst_pred_ty< is_all_ones > m_scev_AllOnes()
Match an integer with all bits set.
SCEVUnaryExpr_match< SCEVZeroExtendExpr, Op0_t > m_scev_ZExt(const Op0_t &Op0)
is_undef_or_poison m_scev_UndefOrPoison()
Match an SCEVUnknown wrapping undef or poison.
class_match< const SCEVConstant > m_SCEVConstant()
cst_pred_ty< is_one > m_scev_One()
Match an integer 1.
specificloop_ty m_SpecificLoop(const Loop *L)
SCEVAffineAddRec_match< Op0_t, Op1_t, class_match< const Loop > > m_scev_AffineAddRec(const Op0_t &Op0, const Op1_t &Op1)
bind_ty< const SCEVMulExpr > m_scev_Mul(const SCEVMulExpr *&V)
SCEVUnaryExpr_match< SCEVSignExtendExpr, Op0_t > m_scev_SExt(const Op0_t &Op0)
cst_pred_ty< is_zero > m_scev_Zero()
Match an integer 0.
SCEVUnaryExpr_match< SCEVTruncateExpr, Op0_t > m_scev_Trunc(const Op0_t &Op0)
bool match(const SCEV *S, const Pattern &P)
SCEVBinaryExpr_match< SCEVUDivExpr, Op0_t, Op1_t > m_scev_UDiv(const Op0_t &Op0, const Op1_t &Op1)
specificscev_ty m_scev_Specific(const SCEV *S)
Match if we have a specific specified SCEV.
SCEVBinaryExpr_match< SCEVMulExpr, Op0_t, Op1_t, SCEV::FlagNUW, true > m_scev_c_NUWMul(const Op0_t &Op0, const Op1_t &Op1)
class_match< const Loop > m_Loop()
bind_ty< const SCEVAddExpr > m_scev_Add(const SCEVAddExpr *&V)
bind_ty< const SCEVUnknown > m_SCEVUnknown(const SCEVUnknown *&V)
SCEVBinaryExpr_match< SCEVMulExpr, Op0_t, Op1_t, SCEV::FlagAnyWrap, true > m_scev_c_Mul(const Op0_t &Op0, const Op1_t &Op1)
SCEVBinaryExpr_match< SCEVSMaxExpr, Op0_t, Op1_t > m_scev_SMax(const Op0_t &Op0, const Op1_t &Op1)
SCEVURem_match< Op0_t, Op1_t > m_scev_URem(Op0_t LHS, Op1_t RHS, ScalarEvolution &SE)
Match the mathematical pattern A - (A / B) * B, where A and B can be arbitrary expressions.
class_match< const SCEV > m_SCEV()
@ Valid
The data is already valid.
initializer< Ty > init(const Ty &Val)
LocationClass< Ty > location(Ty &L)
@ Switch
The "resume-switch" lowering, where there are separate resume and destroy functions that are shared b...
NodeAddr< PhiNode * > Phi
friend class Instruction
Iterator for Instructions in a `BasicBlock.
This is an optimization pass for GlobalISel generic memory operations.
void visitAll(const SCEV *Root, SV &Visitor)
Use SCEVTraversal to visit all nodes in the given expression tree.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
FunctionAddr VTableAddr Value
LLVM_ATTRIBUTE_ALWAYS_INLINE DynamicAPInt gcd(const DynamicAPInt &A, const DynamicAPInt &B)
void stable_sort(R &&Range)
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
SaveAndRestore(T &) -> SaveAndRestore< T >
Printable print(const GCNRegPressure &RP, const GCNSubtarget *ST=nullptr, unsigned DynamicVGPRBlockSize=0)
LLVM_ABI bool canCreatePoison(const Operator *Op, bool ConsiderFlagsAndMetadata=true)
LLVM_ABI bool mustTriggerUB(const Instruction *I, const SmallPtrSetImpl< const Value * > &KnownPoison)
Return true if the given instruction must trigger undefined behavior when I is executed with any oper...
LLVM_ABI bool canConstantFoldCallTo(const CallBase *Call, const Function *F)
canConstantFoldCallTo - Return true if its even possible to fold a call to the specified function.
InterleavedRange< Range > interleaved(const Range &R, StringRef Separator=", ", StringRef Prefix="", StringRef Suffix="")
Output range R as a sequence of interleaved elements.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
LLVM_ABI bool verifyFunction(const Function &F, raw_ostream *OS=nullptr)
Check a function for errors, useful for use when debugging a pass.
auto successors(const MachineBasicBlock *BB)
scope_exit(Callable) -> scope_exit< Callable >
constexpr from_range_t from_range
auto dyn_cast_if_present(const Y &Val)
dyn_cast_if_present<X> - Functionally identical to dyn_cast, except that a null (or none in the case ...
bool set_is_subset(const S1Ty &S1, const S2Ty &S2)
set_is_subset(A, B) - Return true iff A in B
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
constexpr bool isUIntN(unsigned N, uint64_t x)
Checks if an unsigned integer fits into the given (dynamic) bit width.
LLVM_ABI Constant * ConstantFoldCompareInstOperands(unsigned Predicate, Constant *LHS, Constant *RHS, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const Instruction *I=nullptr)
Attempt to constant fold a compare instruction (icmp/fcmp) with the specified operands.
auto uninitialized_copy(R &&Src, IterTy Dst)
bool isa_and_nonnull(const Y &Val)
LLVM_ABI ConstantRange getConstantRangeFromMetadata(const MDNode &RangeMD)
Parse out a conservative ConstantRange from !range metadata.
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
LLVM_ABI Value * simplifyInstruction(Instruction *I, const SimplifyQuery &Q)
See if we can compute a simplified version of this instruction.
LLVM_ABI bool isOverflowIntrinsicNoWrap(const WithOverflowInst *WO, const DominatorTree &DT)
Returns true if the arithmetic part of the WO 's result is used only along the paths control dependen...
DomTreeNodeBase< BasicBlock > DomTreeNode
LLVM_ABI bool matchSimpleRecurrence(const PHINode *P, BinaryOperator *&BO, Value *&Start, Value *&Step)
Attempt to match a simple first order recurrence cycle of the form: iv = phi Ty [Start,...
auto dyn_cast_or_null(const Y &Val)
void erase(Container &C, ValueType V)
Wrapper function to remove a value from a container:
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
iterator_range< pointee_iterator< WrappedIteratorT > > make_pointee_range(RangeT &&Range)
auto reverse(ContainerTy &&C)
LLVM_ABI bool isMustProgress(const Loop *L)
Return true if this loop can be assumed to make progress.
LLVM_ABI bool impliesPoison(const Value *ValAssumedPoison, const Value *V)
Return true if V is poison given that ValAssumedPoison is already poison.
LLVM_ABI bool isFinite(const Loop *L)
Return true if this loop can be assumed to run for a finite number of iterations.
LLVM_ABI void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
unsigned short computeExpressionSize(ArrayRef< SCEVUse > Args)
LLVM_ABI bool programUndefinedIfPoison(const Instruction *Inst)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
bool isPointerTy(const Type *T)
FunctionAddr VTableAddr Count
LLVM_ABI ConstantRange getVScaleRange(const Function *F, unsigned BitWidth)
Determine the possible constant range of vscale with the given bit width, based on the vscale_range f...
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
LLVM_ATTRIBUTE_VISIBILITY_DEFAULT AnalysisKey InnerAnalysisManagerProxy< AnalysisManagerT, IRUnitT, ExtraArgTs... >::Key
LLVM_ABI bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
LLVM_ABI bool propagatesPoison(const Use &PoisonOp)
Return true if PoisonOp's user yields poison or raises UB if its operand PoisonOp is poison.
@ UMin
Unsigned integer min implemented in terms of select(cmp()).
@ Mul
Product of integers.
@ SMax
Signed integer max implemented in terms of select(cmp()).
@ SMin
Signed integer min implemented in terms of select(cmp()).
@ UMax
Unsigned integer max implemented in terms of select(cmp()).
auto count(R &&Range, const E &Element)
Wrapper function around std::count to count the number of times an element Element occurs in the give...
DWARFExpression::Operation Op
auto max_element(R &&Range)
Provide wrappers to std::max_element which take ranges instead of having to pass begin/end explicitly...
raw_ostream & operator<<(raw_ostream &OS, const APFixedPoint &FX)
ArrayRef(const T &OneElt) -> ArrayRef< T >
LLVM_ABI unsigned ComputeNumSignBits(const Value *Op, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Return the number of times the sign bit of the register is replicated into the other bits.
constexpr unsigned BitWidth
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI bool isGuaranteedToTransferExecutionToSuccessor(const Instruction *I)
Return true if this function can prove that the instruction I will always transfer execution to one o...
auto count_if(R &&Range, UnaryPredicate P)
Wrapper function around std::count_if to count the number of times an element satisfying a given pred...
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
constexpr bool isIntN(unsigned N, int64_t x)
Checks if an signed integer fits into the given (dynamic) bit width.
auto predecessors(const MachineBasicBlock *BB)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
iterator_range< df_iterator< T > > depth_first(const T &G)
auto seq(T Begin, T End)
Iterate over an integral type from Begin up to - but not including - End.
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
LLVM_ABI bool isGuaranteedNotToBePoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Returns true if V cannot be poison, but may be undef.
LLVM_ABI Constant * ConstantFoldInstOperands(const Instruction *I, ArrayRef< Constant * > Ops, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, bool AllowNonDeterministic=true)
ConstantFoldInstOperands - Attempt to constant fold an instruction with the specified operands.
bool SCEVExprContains(const SCEV *Root, PredTy Pred)
Return true if any node in Root satisfies the predicate Pred.
Implement std::hash so that hash_code can be used in STL containers.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
A special type used by analysis passes to provide an address that identifies that particular analysis...
Incoming for lane mask phi as machine instruction, incoming register Reg and incoming block Block are...
static KnownBits makeConstant(const APInt &C)
Create known bits from a known constant.
bool isNonNegative() const
Returns true if this value is known to be non-negative.
static LLVM_ABI KnownBits ashr(const KnownBits &LHS, const KnownBits &RHS, bool ShAmtNonZero=false, bool Exact=false)
Compute known bits for ashr(LHS, RHS).
unsigned getBitWidth() const
Get the bit width of this value.
static LLVM_ABI KnownBits lshr(const KnownBits &LHS, const KnownBits &RHS, bool ShAmtNonZero=false, bool Exact=false)
Compute known bits for lshr(LHS, RHS).
KnownBits zextOrTrunc(unsigned BitWidth) const
Return known bits for a zero extension or truncation of the value we're tracking.
APInt getMaxValue() const
Return the maximal unsigned value possible given these KnownBits.
APInt getMinValue() const
Return the minimal unsigned value possible given these KnownBits.
bool isNegative() const
Returns true if this value is known to be negative.
static LLVM_ABI KnownBits shl(const KnownBits &LHS, const KnownBits &RHS, bool NUW=false, bool NSW=false, bool ShAmtNonZero=false)
Compute known bits for shl(LHS, RHS).
An object of this class is returned by queries that could not be answered.
LLVM_ABI SCEVCouldNotCompute()
static LLVM_ABI bool classof(const SCEV *S)
Methods for support type inquiry through isa, cast, and dyn_cast:
void dump() const
This method is used for debugging.
void print(raw_ostream &OS) const
Print out the internal representation of this scalar to the specified stream.
This class defines a simple visitor class that may be used for various SCEV analysis purposes.
A utility class that uses RAII to save and restore the value of a variable.
Information about the number of loop iterations for which a loop exit's branch condition evaluates to...
LLVM_ABI ExitLimit(const SCEV *E)
Construct either an exact exit limit from a constant, or an unknown one from a SCEVCouldNotCompute.
const SCEV * ExactNotTaken
const SCEV * SymbolicMaxNotTaken
SmallVector< const SCEVPredicate *, 4 > Predicates
A vector of predicate guards for this ExitLimit.
const SCEV * ConstantMaxNotTaken