83#include "llvm/Config/llvm-config.h"
138#define DEBUG_TYPE "scalar-evolution"
141 "Number of loop exits with predictable exit counts");
143 "Number of loop exits without predictable exit counts");
145 "Number of loops with trip counts computed by force");
147#ifdef EXPENSIVE_CHECKS
155 cl::desc(
"Maximum number of iterations SCEV will "
156 "symbolically execute a constant "
162 cl::desc(
"Verify ScalarEvolution's backedge taken counts (slow)"));
165 cl::desc(
"Enable stricter verification with -verify-scev is passed"));
169 cl::desc(
"Verify IR correctness when making sensitive SCEV queries (slow)"),
174 cl::desc(
"Threshold for inlining multiplication operands into a SCEV"),
179 cl::desc(
"Threshold for inlining addition operands into a SCEV"),
183 "scalar-evolution-max-scev-compare-depth",
cl::Hidden,
184 cl::desc(
"Maximum depth of recursive SCEV complexity comparisons"),
188 "scalar-evolution-max-scev-operations-implication-depth",
cl::Hidden,
189 cl::desc(
"Maximum depth of recursive SCEV operations implication analysis"),
193 "scalar-evolution-max-value-compare-depth",
cl::Hidden,
194 cl::desc(
"Maximum depth of recursive value complexity comparisons"),
199 cl::desc(
"Maximum depth of recursive arithmetics"),
203 "scalar-evolution-max-constant-evolving-depth",
cl::Hidden,
208 cl::desc(
"Maximum depth of recursive SExt/ZExt/Trunc"),
213 cl::desc(
"Max coefficients in AddRec during evolving"),
218 cl::desc(
"Size of the expression which is considered huge"),
223 cl::desc(
"Threshold for switching to iteratively computing SCEV ranges"),
227 "scalar-evolution-max-loop-guard-collection-depth",
cl::Hidden,
228 cl::desc(
"Maximum depth for recursive loop guard collection"),
cl::init(1));
233 cl::desc(
"When printing analysis, include information on every instruction"));
236 "scalar-evolution-use-expensive-range-sharpening",
cl::Hidden,
238 cl::desc(
"Use more powerful methods of sharpening expression ranges. May "
239 "be costly in terms of compile time"));
242 "scalar-evolution-max-scc-analysis-depth",
cl::Hidden,
243 cl::desc(
"Maximum amount of nodes to process while searching SCEVUnknown "
244 "Phi strongly connected components"),
249 cl::desc(
"Handle <= and >= in finite loops"),
253 "scalar-evolution-use-context-for-no-wrap-flag-strenghening",
cl::Hidden,
254 cl::desc(
"Infer nuw/nsw flags using context where suitable"),
339#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
359#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
379 OS <<
"(ptrto" << OpS <<
" " << *
Op->getType() <<
" " << *
Op <<
" to "
386 OS <<
"(trunc " << *
Op->getType() <<
" " << *
Op <<
" to "
393 OS <<
"(zext " << *
Op->getType() <<
" " << *
Op <<
" to "
400 OS <<
"(sext " << *
Op->getType() <<
" " << *
Op <<
" to "
429 const char *OpStr =
nullptr;
442 OpStr =
" umin_seq ";
466 OS <<
"(" << *UDiv->
getLHS() <<
" /u " << *UDiv->
getRHS() <<
")";
473 OS <<
"***COULDNOTCOMPUTE***";
551 if (!
Mul)
return false;
555 if (!SC)
return false;
573 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
return S;
575 UniqueSCEVs.InsertNode(S, IP);
590 ConstantInt::get(ITy, V, isSigned,
true));
598 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
601 UniqueSCEVs.InsertNode(S, IP);
622 "Must be a non-bit-width-changing pointer-to-integer cast!");
629 "Must be a non-bit-width-changing pointer-to-integer cast!");
641 "Cannot truncate non-integer value!");
648 "Cannot zero extend non-integer value!");
655 "Cannot sign extend non-integer value!");
660 SE->forgetMemoizedResults({
this});
663 SE->UniqueSCEVs.RemoveNode(
this);
669void SCEVUnknown::allUsesReplacedWith(
Value *New) {
671 SE->forgetMemoizedResults({
this});
674 SE->UniqueSCEVs.RemoveNode(
this);
696 if (LIsPointer != RIsPointer)
697 return (
int)LIsPointer - (int)RIsPointer;
702 return (
int)LID - (int)RID;
707 unsigned LArgNo = LA->getArgNo(), RArgNo =
RA->getArgNo();
708 return (
int)LArgNo - (int)RArgNo;
714 if (
auto L = LGV->getLinkage() - RGV->getLinkage())
717 const auto IsGVNameSemantic = [&](
const GlobalValue *GV) {
718 auto LT = GV->getLinkage();
725 if (IsGVNameSemantic(LGV) && IsGVNameSemantic(RGV))
726 return LGV->getName().compare(RGV->getName());
737 if (LParent != RParent) {
740 if (LDepth != RDepth)
741 return (
int)LDepth - (int)RDepth;
745 unsigned LNumOps = LInst->getNumOperands(),
746 RNumOps = RInst->getNumOperands();
747 if (LNumOps != RNumOps)
748 return (
int)LNumOps - (int)RNumOps;
750 for (
unsigned Idx :
seq(LNumOps)) {
752 RInst->getOperand(Idx),
Depth + 1);
766static std::optional<int>
776 return (
int)LType - (int)RType;
801 unsigned LBitWidth = LA.
getBitWidth(), RBitWidth =
RA.getBitWidth();
802 if (LBitWidth != RBitWidth)
803 return (
int)LBitWidth - (int)RBitWidth;
804 return LA.
ult(
RA) ? -1 : 1;
810 return LTy->getBitWidth() - RTy->getBitWidth();
821 if (LLoop != RLoop) {
823 assert(LHead != RHead &&
"Two loops share the same header?");
827 "No dominance between recurrences used by one SCEV?");
851 unsigned LNumOps = LOps.
size(), RNumOps = ROps.
size();
852 if (LNumOps != RNumOps)
853 return (
int)LNumOps - (int)RNumOps;
855 for (
unsigned i = 0; i != LNumOps; ++i) {
881 if (
Ops.size() < 2)
return;
886 return Complexity && *Complexity < 0;
888 if (
Ops.size() == 2) {
892 if (IsLessComplex(
RHS,
LHS))
905 for (
unsigned i = 0, e =
Ops.size(); i != e-2; ++i) {
911 for (
unsigned j = i+1; j != e &&
Ops[j]->getSCEVType() == Complexity; ++j) {
916 if (i == e-2)
return;
938template <
typename FoldT,
typename IsIdentityT,
typename IsAbsorberT>
942 IsIdentityT IsIdentity, IsAbsorberT IsAbsorber) {
944 for (
unsigned Idx = 0; Idx <
Ops.size();) {
952 Ops.erase(
Ops.begin() + Idx);
959 assert(Folded &&
"Must have folded value");
963 if (Folded && IsAbsorber(Folded->
getAPInt()))
967 if (Folded && !IsIdentity(Folded->
getAPInt()))
968 Ops.insert(
Ops.begin(), Folded);
970 return Ops.size() == 1 ?
Ops[0] :
nullptr;
1045 APInt OddFactorial(W, 1);
1047 for (
unsigned i = 3; i <= K; ++i) {
1050 OddFactorial *= (i >> TwoFactors);
1054 unsigned CalculationBits = W +
T;
1068 for (
unsigned i = 1; i != K; ++i) {
1101 for (
unsigned i = 1, e =
Operands.size(); i != e; ++i) {
1130 ConversionFn CreatePtrCast;
1134 ConversionFn CreatePtrCast)
1135 : Base(
SE), TargetTy(TargetTy), CreatePtrCast(
std::
move(CreatePtrCast)) {}
1138 Type *TargetTy, ConversionFn CreatePtrCast) {
1140 return Rewriter.visit(Scev);
1176 "Should only reach pointer-typed SCEVUnknown's.");
1181 return SE.getZero(TargetTy);
1182 return CreatePtrCast(Expr);
1187 assert(
Op->getType()->isPointerTy() &&
"Op must be a pointer");
1206 Op, *
this, IntPtrTy, [
this, IntPtrTy](
const SCEVUnknown *U) {
1211 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
1213 SCEV *S =
new (SCEVAllocator)
1215 UniqueSCEVs.InsertNode(S, IP);
1218 return static_cast<const SCEV *
>(S);
1221 "We must have succeeded in sinking the cast, "
1222 "and ending up with an integer-typed expression!");
1227 assert(
Op->getType()->isPointerTy() &&
"Op must be a pointer");
1231 if (DL.hasUnstableRepresentation(
Op->getType()))
1234 Type *Ty = DL.getAddressType(
Op->getType());
1245 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
1247 SCEV *S =
new (SCEVAllocator)
1249 UniqueSCEVs.InsertNode(S, IP);
1252 return static_cast<const SCEV *
>(S);
1255 "We must have succeeded in sinking the cast, "
1256 "and ending up with an integer-typed expression!");
1261 assert(Ty->isIntegerTy() &&
"Target type must be an integer type!");
1273 "This is not a truncating conversion!");
1275 "This is not a conversion to a SCEVable type!");
1276 assert(!
Op->getType()->isPointerTy() &&
"Can't truncate pointer!");
1284 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
return S;
1306 UniqueSCEVs.InsertNode(S, IP);
1319 unsigned numTruncs = 0;
1320 for (
unsigned i = 0, e = CommOp->getNumOperands(); i != e && numTruncs < 2;
1328 if (numTruncs < 2) {
1338 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
1345 for (
const SCEV *
Op : AddRec->operands())
1360 UniqueSCEVs.InsertNode(S, IP);
1401struct ExtendOpTraitsBase {
1402 typedef const SCEV *(ScalarEvolution::*GetExtendExprTy)(
const SCEV *,
Type *,
1407template <
typename ExtendOp>
struct ExtendOpTraits {
1423 static const GetExtendExprTy GetExtendExpr;
1425 static const SCEV *getOverflowLimitForStep(
const SCEV *Step,
1426 ICmpInst::Predicate *Pred,
1427 ScalarEvolution *SE) {
1432const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits<
1439 static const GetExtendExprTy GetExtendExpr;
1441 static const SCEV *getOverflowLimitForStep(
const SCEV *Step,
1442 ICmpInst::Predicate *Pred,
1443 ScalarEvolution *SE) {
1448const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits<
1460template <
typename ExtendOpTy>
1463 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType;
1464 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr;
1480 for (
auto It = DiffOps.
begin(); It != DiffOps.
end(); ++It)
1493 auto PreStartFlags =
1511 const SCEV *OperandExtendedStart =
1513 (SE->*GetExtendExpr)(Step, WideTy,
Depth));
1514 if ((SE->*GetExtendExpr)(Start, WideTy,
Depth) == OperandExtendedStart) {
1526 const SCEV *OverflowLimit =
1527 ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep(Step, &Pred, SE);
1529 if (OverflowLimit &&
1537template <
typename ExtendOpTy>
1541 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr;
1549 (SE->*GetExtendExpr)(PreStart, Ty,
Depth));
1584template <
typename ExtendOpTy>
1585bool ScalarEvolution::proveNoWrapByVaryingStart(
const SCEV *Start,
1588 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType;
1598 APInt StartAI = StartC->
getAPInt();
1600 for (
unsigned Delta : {-2, -1, 1, 2}) {
1601 const SCEV *PreStart =
getConstant(StartAI - Delta);
1603 FoldingSetNodeID
ID;
1605 ID.AddPointer(PreStart);
1606 ID.AddPointer(Step);
1610 static_cast<SCEVAddRecExpr *
>(UniqueSCEVs.FindNodeOrInsertPos(
ID, IP));
1614 if (PreAR && PreAR->getNoWrapFlags(WrapType)) {
1617 const SCEV *Limit = ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep(
1618 DeltaS, &Pred,
this);
1636 const unsigned BitWidth =
C.getBitWidth();
1654 const APInt &ConstantStart,
1673 auto &UserIDs = FoldCacheUser[
I.first->second];
1674 assert(
count(UserIDs,
ID) == 1 &&
"unexpected duplicates in UserIDs");
1675 for (
unsigned I = 0;
I != UserIDs.size(); ++
I)
1676 if (UserIDs[
I] ==
ID) {
1681 I.first->second = S;
1683 FoldCacheUser[S].push_back(
ID);
1689 "This is not an extending conversion!");
1691 "This is not a conversion to a SCEVable type!");
1692 assert(!
Op->getType()->isPointerTy() &&
"Can't extend pointer!");
1696 if (
const SCEV *S = FoldCache.lookup(
ID))
1708 "This is not an extending conversion!");
1710 assert(!
Op->getType()->isPointerTy() &&
"Can't extend pointer!");
1727 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
return S;
1731 UniqueSCEVs.InsertNode(S, IP);
1741 const SCEV *
X = ST->getOperand();
1755 if (AR->isAffine()) {
1756 const SCEV *Start = AR->getStart();
1757 const SCEV *Step = AR->getStepRecurrence(*
this);
1759 const Loop *L = AR->getLoop();
1763 if (AR->hasNoUnsignedWrap()) {
1784 const SCEV *CastedMaxBECount =
1788 if (MaxBECount == RecastedMaxBECount) {
1798 const SCEV *WideMaxBECount =
1800 const SCEV *OperandExtendedAdd =
1806 if (ZAdd == OperandExtendedAdd) {
1817 OperandExtendedAdd =
1823 if (ZAdd == OperandExtendedAdd) {
1844 !AC.assumptions().empty()) {
1846 auto NewFlags = proveNoUnsignedWrapViaInduction(AR);
1848 if (AR->hasNoUnsignedWrap()) {
1883 const APInt &
C = SC->getAPInt();
1887 const SCEV *SResidual =
1896 if (proveNoWrapByVaryingStart<SCEVZeroExtendExpr>(Start, Step, L)) {
1921 if (SA->hasNoUnsignedWrap()) {
1942 const SCEV *SResidual =
1954 if (
SM->hasNoUnsignedWrap()) {
1976 const SCEV *TruncRHS;
2013 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
return S;
2016 UniqueSCEVs.InsertNode(S, IP);
2025 "This is not an extending conversion!");
2027 "This is not a conversion to a SCEVable type!");
2028 assert(!
Op->getType()->isPointerTy() &&
"Can't extend pointer!");
2032 if (
const SCEV *S = FoldCache.lookup(
ID))
2044 "This is not an extending conversion!");
2046 assert(!
Op->getType()->isPointerTy() &&
"Can't extend pointer!");
2068 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
return S;
2073 UniqueSCEVs.InsertNode(S, IP);
2083 const SCEV *
X = ST->getOperand();
2094 if (SA->hasNoSignedWrap()) {
2116 const SCEV *SResidual =
2130 if (AR->isAffine()) {
2131 const SCEV *Start = AR->getStart();
2132 const SCEV *Step = AR->getStepRecurrence(*
this);
2134 const Loop *L = AR->getLoop();
2138 if (AR->hasNoSignedWrap()) {
2160 const SCEV *CastedMaxBECount =
2164 if (MaxBECount == RecastedMaxBECount) {
2174 const SCEV *WideMaxBECount =
2176 const SCEV *OperandExtendedAdd =
2182 if (SAdd == OperandExtendedAdd) {
2193 OperandExtendedAdd =
2199 if (SAdd == OperandExtendedAdd) {
2219 auto NewFlags = proveNoSignedWrapViaInduction(AR);
2221 if (AR->hasNoSignedWrap()) {
2236 const APInt &
C = SC->getAPInt();
2240 const SCEV *SResidual =
2249 if (proveNoWrapByVaryingStart<SCEVSignExtendExpr>(Start, Step, L)) {
2277 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
return S;
2280 UniqueSCEVs.InsertNode(S, IP);
2307 "This is not an extending conversion!");
2309 "This is not a conversion to a SCEVable type!");
2314 if (SC->getAPInt().isNegative())
2319 const SCEV *NewOp =
T->getOperand();
2338 for (
const SCEV *
Op : AR->operands())
2376 APInt &AccumulatedConstant,
2380 bool Interesting =
false;
2387 if (Scale != 1 || AccumulatedConstant != 0 ||
C->getValue()->isZero())
2389 AccumulatedConstant += Scale *
C->getAPInt();
2394 for (; i !=
Ops.size(); ++i) {
2403 M, NewOps, AccumulatedConstant,
Add->operands(), NewScale, SE);
2409 auto Pair = M.insert({
Key, NewScale});
2413 Pair.first->second += NewScale;
2421 auto Pair = M.insert({
Ops[i], Scale});
2425 Pair.first->second += Scale;
2444 case Instruction::Add:
2447 case Instruction::Sub:
2450 case Instruction::Mul:
2464 const SCEV *
A = (this->*Extension)(
2466 const SCEV *LHSB = (this->*Extension)(LHS, WideTy, 0);
2467 const SCEV *RHSB = (this->*Extension)(RHS, WideTy, 0);
2475 if (BinOp == Instruction::Mul)
2481 APInt C = RHSC->getAPInt();
2482 unsigned NumBits =
C.getBitWidth();
2483 bool IsSub = (BinOp == Instruction::Sub);
2484 bool IsNegativeConst = (
Signed &&
C.isNegative());
2486 bool OverflowDown = IsSub ^ IsNegativeConst;
2488 if (IsNegativeConst) {
2501 APInt Limit = Min + Magnitude;
2507 APInt Limit = Max - Magnitude;
2512std::optional<SCEV::NoWrapFlags>
2517 return std::nullopt;
2526 bool Deduced =
false;
2528 if (OBO->
getOpcode() != Instruction::Add &&
2531 return std::nullopt;
2540 false, LHS, RHS, CtxI)) {
2547 true, LHS, RHS, CtxI)) {
2554 return std::nullopt;
2564 using namespace std::placeholders;
2571 assert(CanAnalyze &&
"don't call from other places!");
2578 auto IsKnownNonNegative = [&](
SCEVUse U) {
2588 if (SignOrUnsignWrap != SignOrUnsignMask &&
2595 return Instruction::Add;
2597 return Instruction::Mul;
2608 Opcode,
C, OBO::NoSignedWrap);
2616 Opcode,
C, OBO::NoUnsignedWrap);
2626 Ops[0]->isZero() && IsKnownNonNegative(
Ops[1]))
2633 if (UDiv->getOperand(1) ==
Ops[1])
2636 if (UDiv->getOperand(1) ==
Ops[0])
2652 "only nuw or nsw allowed");
2653 assert(!
Ops.empty() &&
"Cannot get empty add!");
2654 if (
Ops.size() == 1)
return Ops[0];
2657 for (
unsigned i = 1, e =
Ops.size(); i != e; ++i)
2659 "SCEVAddExpr operand types don't match!");
2661 Ops, [](
const SCEV *
Op) {
return Op->getType()->isPointerTy(); });
2662 assert(NumPtrs <= 1 &&
"add has at most one pointer operand");
2667 [](
const APInt &C1,
const APInt &C2) {
return C1 + C2; },
2668 [](
const APInt &
C) {
return C.isZero(); },
2669 [](
const APInt &
C) {
return false; });
2682 return getOrCreateAddExpr(
Ops, ComputeFlags(
Ops));
2687 if (
Add->getNoWrapFlags(OrigFlags) != OrigFlags)
2688 Add->setNoWrapFlags(ComputeFlags(
Ops));
2696 bool FoundMatch =
false;
2697 for (
unsigned i = 0, e =
Ops.size(); i != e-1; ++i)
2698 if (
Ops[i] ==
Ops[i+1]) {
2710 --i; e -=
Count - 1;
2720 auto FindTruncSrcType = [&]() ->
Type * {
2726 return T->getOperand()->getType();
2728 SCEVUse LastOp =
Mul->getOperand(
Mul->getNumOperands() - 1);
2730 return T->getOperand()->getType();
2734 if (
auto *SrcType = FindTruncSrcType()) {
2741 if (
T->getOperand()->getType() != SrcType) {
2750 for (
unsigned j = 0, f = M->getNumOperands(); j != f && Ok; ++j) {
2753 if (
T->getOperand()->getType() != SrcType) {
2781 if (
Ops.size() == 2) {
2791 auto C2 =
C->getAPInt();
2794 APInt ConstAdd = C1 + C2;
2795 auto AddFlags = AddExpr->getNoWrapFlags();
2836 if (
Ops.size() == 2 &&
2847 if (Idx <
Ops.size()) {
2848 bool DeletedAdd =
false;
2859 Ops.erase(
Ops.begin()+Idx);
2862 CommonFlags =
maskFlags(CommonFlags,
Add->getNoWrapFlags());
2885 struct APIntCompare {
2886 bool operator()(
const APInt &LHS,
const APInt &RHS)
const {
2887 return LHS.ult(RHS);
2894 std::map<APInt, SmallVector<SCEVUse, 4>, APIntCompare> MulOpLists;
2895 for (
const SCEV *NewOp : NewOps)
2896 MulOpLists[M.find(NewOp)->second].push_back(NewOp);
2899 if (AccumulatedConstant != 0)
2901 for (
auto &MulOp : MulOpLists) {
2902 if (MulOp.first == 1) {
2904 }
else if (MulOp.first != 0) {
2913 if (
Ops.size() == 1)
2924 for (
unsigned MulOp = 0, e =
Mul->getNumOperands(); MulOp != e; ++MulOp) {
2925 const SCEV *MulOpSCEV =
Mul->getOperand(MulOp);
2928 for (
unsigned AddOp = 0, e =
Ops.size(); AddOp != e; ++AddOp)
2929 if (MulOpSCEV ==
Ops[AddOp]) {
2931 const SCEV *InnerMul =
Mul->getOperand(MulOp == 0);
2932 if (
Mul->getNumOperands() != 2) {
2939 const SCEV *AddOne =
2943 if (
Ops.size() == 2)
return OuterMul;
2945 Ops.erase(
Ops.begin()+AddOp);
2946 Ops.erase(
Ops.begin()+Idx-1);
2948 Ops.erase(
Ops.begin()+Idx);
2949 Ops.erase(
Ops.begin()+AddOp-1);
2951 Ops.push_back(OuterMul);
2956 for (
unsigned OtherMulIdx = Idx+1;
2963 OMulOp != e; ++OMulOp)
2964 if (OtherMul->
getOperand(OMulOp) == MulOpSCEV) {
2966 const SCEV *InnerMul1 =
Mul->getOperand(MulOp == 0);
2967 if (
Mul->getNumOperands() != 2) {
2975 OtherMul->
operands().take_front(OMulOp));
2979 const SCEV *InnerMulSum =
2983 if (
Ops.size() == 2)
return OuterMul;
2984 Ops.erase(
Ops.begin()+Idx);
2985 Ops.erase(
Ops.begin()+OtherMulIdx-1);
2986 Ops.push_back(OuterMul);
3006 for (
unsigned i = 0, e =
Ops.size(); i != e; ++i)
3009 Ops.erase(
Ops.begin()+i);
3014 if (!LIOps.
empty()) {
3039 auto *DefI = getDefiningScopeBound(LIOps);
3041 if (!isGuaranteedToTransferExecutionTo(DefI, ReachI))
3053 if (
Ops.size() == 1)
return NewRec;
3056 for (
unsigned i = 0;; ++i)
3057 if (
Ops[i] == AddRec) {
3067 for (
unsigned OtherIdx = Idx+1;
3075 "AddRecExprs are not sorted in reverse dominance order?");
3082 if (OtherAddRec->getLoop() == AddRecLoop) {
3083 for (
unsigned i = 0, e = OtherAddRec->getNumOperands();
3085 if (i >= AddRecOps.
size()) {
3086 append_range(AddRecOps, OtherAddRec->operands().drop_front(i));
3090 getAddExpr(AddRecOps[i], OtherAddRec->getOperand(i),
3093 Ops.erase(
Ops.begin() + OtherIdx); --OtherIdx;
3108 return getOrCreateAddExpr(
Ops, ComputeFlags(
Ops));
3119 static_cast<SCEVAddExpr *
>(UniqueSCEVs.FindNodeOrInsertPos(
ID, IP));
3123 S =
new (SCEVAllocator)
3125 UniqueSCEVs.InsertNode(S, IP);
3136 FoldingSetNodeID
ID;
3138 for (
const SCEV *
Op :
Ops)
3143 static_cast<SCEVAddRecExpr *
>(UniqueSCEVs.FindNodeOrInsertPos(
ID, IP));
3145 SCEVUse *
O = SCEVAllocator.Allocate<SCEVUse>(
Ops.size());
3147 S =
new (SCEVAllocator)
3148 SCEVAddRecExpr(
ID.Intern(SCEVAllocator), O,
Ops.size(), L);
3149 UniqueSCEVs.InsertNode(S, IP);
3151 LoopUsers[
L].push_back(S);
3160 FoldingSetNodeID
ID;
3162 for (
const SCEV *
Op :
Ops)
3166 static_cast<SCEVMulExpr *
>(UniqueSCEVs.FindNodeOrInsertPos(
ID, IP));
3168 SCEVUse *
O = SCEVAllocator.Allocate<SCEVUse>(
Ops.size());
3170 S =
new (SCEVAllocator) SCEVMulExpr(
ID.Intern(SCEVAllocator),
3172 UniqueSCEVs.InsertNode(S, IP);
3182 if (j > 1 && k / j != i) Overflow =
true;
3198 if (n == 0 || n == k)
return 1;
3199 if (k > n)
return 0;
3205 for (
uint64_t i = 1; i <= k; ++i) {
3206 r =
umul_ov(r, n-(i-1), Overflow);
3215 struct FindConstantInAddMulChain {
3216 bool FoundConstant =
false;
3218 bool follow(
const SCEV *S) {
3223 bool isDone()
const {
3224 return FoundConstant;
3228 FindConstantInAddMulChain
F;
3230 ST.visitAll(StartExpr);
3231 return F.FoundConstant;
3239 "only nuw or nsw allowed");
3240 assert(!
Ops.empty() &&
"Cannot get empty mul!");
3241 if (
Ops.size() == 1)
return Ops[0];
3243 Type *ETy =
Ops[0]->getType();
3245 for (
unsigned i = 1, e =
Ops.size(); i != e; ++i)
3247 "SCEVMulExpr operand types don't match!");
3252 [](
const APInt &C1,
const APInt &C2) {
return C1 * C2; },
3253 [](
const APInt &
C) {
return C.isOne(); },
3254 [](
const APInt &
C) {
return C.isZero(); });
3265 return getOrCreateMulExpr(
Ops, ComputeFlags(
Ops));
3270 if (
Mul->getNoWrapFlags(OrigFlags) != OrigFlags)
3271 Mul->setNoWrapFlags(ComputeFlags(
Ops));
3276 if (
Ops.size() == 2) {
3284 const SCEV *Op0, *Op1;
3292 if (
Ops[0]->isAllOnesValue()) {
3297 bool AnyFolded =
false;
3298 for (
const SCEV *AddOp :
Add->operands()) {
3325 AddRec->getNoWrapFlags(FlagsMask));
3348 APInt C1V = LHSC->getAPInt();
3358 const SCEV *NewMul =
nullptr;
3362 assert(C1V.
ugt(1) &&
"C1 <= 1 should have been folded earlier");
3377 if (Idx <
Ops.size()) {
3378 bool DeletedMul =
false;
3384 Ops.erase(
Ops.begin()+Idx);
3408 for (
unsigned i = 0, e =
Ops.size(); i != e; ++i)
3411 Ops.erase(
Ops.begin()+i);
3416 if (!LIOps.
empty()) {
3429 for (
unsigned i = 0, e = AddRec->
getNumOperands(); i != e; ++i) {
3445 if (
Ops.size() == 1)
return NewRec;
3448 for (
unsigned i = 0;; ++i)
3449 if (
Ops[i] == AddRec) {
3470 bool OpsModified =
false;
3471 for (
unsigned OtherIdx = Idx+1;
3485 bool Overflow =
false;
3492 for (
int y = x, ye = 2*x+1; y != ye && !Overflow; ++y) {
3496 z < ze && !Overflow; ++z) {
3499 if (LargerThan64Bits)
3500 Coeff =
umul_ov(Coeff1, Coeff2, Overflow);
3502 Coeff = Coeff1*Coeff2;
3517 if (
Ops.size() == 2)
return NewAddRec;
3518 Ops[Idx] = NewAddRec;
3519 Ops.erase(
Ops.begin() + OtherIdx); --OtherIdx;
3535 return getOrCreateMulExpr(
Ops, ComputeFlags(
Ops));
3542 "SCEVURemExpr operand types don't match!");
3547 if (RHSC->getValue()->isOne())
3548 return getZero(LHS->getType());
3551 if (RHSC->getAPInt().isPowerOf2()) {
3552 Type *FullTy = LHS->getType();
3568 assert(!LHS->getType()->isPointerTy() &&
3569 "SCEVUDivExpr operand can't be pointer!");
3570 assert(LHS->getType() == RHS->getType() &&
3571 "SCEVUDivExpr operand types don't match!");
3578 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
3586 if (RHSC->getValue()->isOne())
3591 if (!RHSC->getValue()->isZero()) {
3595 Type *Ty = LHS->getType();
3596 unsigned LZ = RHSC->getAPInt().countl_zero();
3600 if (!RHSC->getAPInt().isPowerOf2())
3608 const APInt &StepInt = Step->getAPInt();
3609 const APInt &DivInt = RHSC->getAPInt();
3610 if (!StepInt.
urem(DivInt) &&
3616 for (
const SCEV *
Op : AR->operands())
3622 const APInt *StartRem;
3635 bool CanFoldWithWrap = StepInt.
ule(DivInt) &&
3639 const SCEV *NewStart =
3641 if (*StartRem != 0 && (NoWrap || CanFoldWithWrap) &&
3643 const SCEV *NewLHS =
3646 if (LHS != NewLHS) {
3656 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
3665 for (
const SCEV *
Op : M->operands())
3669 for (
unsigned i = 0, e = M->getNumOperands(); i != e; ++i) {
3670 const SCEV *
Op = M->getOperand(i);
3682 if (
auto *DivisorConstant =
3684 bool Overflow =
false;
3686 DivisorConstant->getAPInt().
umul_ov(RHSC->getAPInt(), Overflow);
3697 for (
const SCEV *
Op :
A->operands())
3701 for (
unsigned i = 0, e =
A->getNumOperands(); i != e; ++i) {
3708 if (Operands.
size() ==
A->getNumOperands())
3715 return getConstant(LHSC->getAPInt().udiv(RHSC->getAPInt()));
3725 return getZero(LHS->getType());
3729 const SCEV *NewLHS, *NewRHS;
3737 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
return S;
3740 UniqueSCEVs.InsertNode(S, IP);
3770 if (!
Mul || !
Mul->hasNoUnsignedWrap())
3777 if (LHSCst == RHSCst) {
3785 APInt Factor =
gcd(LHSCst, RHSCst);
3803 for (
int i = 0, e =
Mul->getNumOperands(); i != e; ++i) {
3804 if (
Mul->getOperand(i) == RHS) {
3823 if (StepChrec->getLoop() == L) {
3837 if (Operands.
size() == 1)
return Operands[0];
3842 "SCEVAddRecExpr operand types don't match!");
3843 assert(!
Op->getType()->isPointerTy() &&
"Step must be integer");
3845 for (
const SCEV *
Op : Operands)
3847 "SCEVAddRecExpr operand is not available at loop entry!");
3850 if (Operands.
back()->isZero()) {
3865 const Loop *NestedLoop = NestedAR->getLoop();
3866 if (L->contains(NestedLoop)
3869 DT.dominates(L->getHeader(), NestedLoop->
getHeader()))) {
3871 Operands[0] = NestedAR->getStart();
3875 bool AllInvariant =
all_of(
3887 AllInvariant =
all_of(NestedOperands, [&](
const SCEV *
Op) {
3898 return getAddRecExpr(NestedOperands, NestedLoop, InnerFlags);
3902 Operands[0] = NestedAR;
3908 return getOrCreateAddRecExpr(Operands, L, Flags);
3924 if (!GEPI || !isSCEVExprNeverPoison(GEPI))
3928 return getGEPExpr(BaseExpr, IndexExprs,
GEP->getSourceElementType(), NW);
3942 bool FirstIter =
true;
3944 for (
SCEVUse IndexExpr : IndexExprs) {
3951 Offsets.push_back(FieldOffset);
3954 CurTy = STy->getTypeAtIndex(Index);
3959 "The first index of a GEP indexes a pointer");
3960 CurTy = SrcElementTy;
3971 const SCEV *LocalOffset =
getMulExpr(IndexExpr, ElementSize, OffsetWrap);
3972 Offsets.push_back(LocalOffset);
3977 if (Offsets.empty())
3990 "GEP should not change type mid-flight.");
3994SCEV *ScalarEvolution::findExistingSCEVInCache(
SCEVTypes SCEVType,
3997 ID.AddInteger(SCEVType);
4001 return UniqueSCEVs.FindNodeOrInsertPos(
ID, IP);
4004SCEV *ScalarEvolution::findExistingSCEVInCache(
SCEVTypes SCEVType,
4007 ID.AddInteger(SCEVType);
4011 return UniqueSCEVs.FindNodeOrInsertPos(
ID, IP);
4021 assert(SCEVMinMaxExpr::isMinMaxType(Kind) &&
"Not a SCEVMinMaxExpr!");
4022 assert(!
Ops.empty() &&
"Cannot get empty (u|s)(min|max)!");
4023 if (
Ops.size() == 1)
return Ops[0];
4026 for (
unsigned i = 1, e =
Ops.size(); i != e; ++i) {
4028 "Operand types don't match!");
4031 "min/max should be consistently pointerish");
4057 return IsSigned ?
C.isMinSignedValue() :
C.isMinValue();
4059 return IsSigned ?
C.isMaxSignedValue() :
C.isMaxValue();
4064 return IsSigned ?
C.isMaxSignedValue() :
C.isMaxValue();
4066 return IsSigned ?
C.isMinSignedValue() :
C.isMinValue();
4072 if (
const SCEV *S = findExistingSCEVInCache(Kind,
Ops)) {
4078 while (Idx <
Ops.size() &&
Ops[Idx]->getSCEVType() < Kind)
4083 if (Idx <
Ops.size()) {
4084 bool DeletedAny =
false;
4085 while (
Ops[Idx]->getSCEVType() == Kind) {
4087 Ops.erase(
Ops.begin()+Idx);
4105 for (
unsigned i = 0, e =
Ops.size() - 1; i != e; ++i) {
4106 if (
Ops[i] ==
Ops[i + 1] ||
4107 isKnownViaNonRecursiveReasoning(FirstPred,
Ops[i],
Ops[i + 1])) {
4110 Ops.erase(
Ops.begin() + i + 1,
Ops.begin() + i + 2);
4113 }
else if (isKnownViaNonRecursiveReasoning(SecondPred,
Ops[i],
4116 Ops.erase(
Ops.begin() + i,
Ops.begin() + i + 1);
4122 if (
Ops.size() == 1)
return Ops[0];
4124 assert(!
Ops.empty() &&
"Reduced smax down to nothing!");
4129 ID.AddInteger(Kind);
4133 const SCEV *ExistingSCEV = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP);
4135 return ExistingSCEV;
4138 SCEV *S =
new (SCEVAllocator)
4141 UniqueSCEVs.InsertNode(S, IP);
4149class SCEVSequentialMinMaxDeduplicatingVisitor final
4150 :
public SCEVVisitor<SCEVSequentialMinMaxDeduplicatingVisitor,
4151 std::optional<const SCEV *>> {
4152 using RetVal = std::optional<const SCEV *>;
4160 bool canRecurseInto(
SCEVTypes Kind)
const {
4163 return RootKind == Kind || NonSequentialRootKind == Kind;
4166 RetVal visitAnyMinMaxExpr(
const SCEV *S) {
4168 "Only for min/max expressions.");
4171 if (!canRecurseInto(Kind))
4181 return std::nullopt;
4188 RetVal
visit(
const SCEV *S) {
4190 if (!SeenOps.
insert(S).second)
4191 return std::nullopt;
4192 return Base::visit(S);
4196 SCEVSequentialMinMaxDeduplicatingVisitor(ScalarEvolution &SE,
4198 : SE(SE), RootKind(RootKind),
4199 NonSequentialRootKind(
4200 SCEVSequentialMinMaxExpr::getEquivalentNonSequentialSCEVType(
4204 SmallVectorImpl<SCEVUse> &NewOps) {
4209 for (
const SCEV *
Op : OrigOps) {
4214 Ops.emplace_back(*NewOp);
4218 NewOps = std::move(
Ops);
4222 RetVal visitConstant(
const SCEVConstant *Constant) {
return Constant; }
4224 RetVal visitVScale(
const SCEVVScale *VScale) {
return VScale; }
4226 RetVal visitPtrToAddrExpr(
const SCEVPtrToAddrExpr *Expr) {
return Expr; }
4228 RetVal visitPtrToIntExpr(
const SCEVPtrToIntExpr *Expr) {
return Expr; }
4230 RetVal visitTruncateExpr(
const SCEVTruncateExpr *Expr) {
return Expr; }
4232 RetVal visitZeroExtendExpr(
const SCEVZeroExtendExpr *Expr) {
return Expr; }
4234 RetVal visitSignExtendExpr(
const SCEVSignExtendExpr *Expr) {
return Expr; }
4236 RetVal visitAddExpr(
const SCEVAddExpr *Expr) {
return Expr; }
4238 RetVal visitMulExpr(
const SCEVMulExpr *Expr) {
return Expr; }
4240 RetVal visitUDivExpr(
const SCEVUDivExpr *Expr) {
return Expr; }
4242 RetVal visitAddRecExpr(
const SCEVAddRecExpr *Expr) {
return Expr; }
4244 RetVal visitSMaxExpr(
const SCEVSMaxExpr *Expr) {
4245 return visitAnyMinMaxExpr(Expr);
4248 RetVal visitUMaxExpr(
const SCEVUMaxExpr *Expr) {
4249 return visitAnyMinMaxExpr(Expr);
4252 RetVal visitSMinExpr(
const SCEVSMinExpr *Expr) {
4253 return visitAnyMinMaxExpr(Expr);
4256 RetVal visitUMinExpr(
const SCEVUMinExpr *Expr) {
4257 return visitAnyMinMaxExpr(Expr);
4260 RetVal visitSequentialUMinExpr(
const SCEVSequentialUMinExpr *Expr) {
4261 return visitAnyMinMaxExpr(Expr);
4264 RetVal visitUnknown(
const SCEVUnknown *Expr) {
return Expr; }
4266 RetVal visitCouldNotCompute(
const SCEVCouldNotCompute *Expr) {
return Expr; }
4309struct SCEVPoisonCollector {
4310 bool LookThroughMaybePoisonBlocking;
4311 SmallPtrSet<const SCEVUnknown *, 4> MaybePoison;
4312 SCEVPoisonCollector(
bool LookThroughMaybePoisonBlocking)
4313 : LookThroughMaybePoisonBlocking(LookThroughMaybePoisonBlocking) {}
4315 bool follow(
const SCEV *S) {
4316 if (!LookThroughMaybePoisonBlocking &&
4326 bool isDone()
const {
return false; }
4336 SCEVPoisonCollector PC1(
true);
4341 if (PC1.MaybePoison.empty())
4347 SCEVPoisonCollector PC2(
false);
4357 SCEVPoisonCollector PC(
false);
4380 while (!Worklist.
empty()) {
4382 if (!Visited.
insert(V).second)
4386 if (Visited.
size() > 16)
4391 if (PoisonVals.
contains(V) || ::isGuaranteedNotToBePoison(V))
4402 if (PDI->isDisjoint())
4409 II &&
II->getIntrinsicID() == Intrinsic::vscale)
4416 if (
I->hasPoisonGeneratingAnnotations())
4427 assert(SCEVSequentialMinMaxExpr::isSequentialMinMaxType(Kind) &&
4428 "Not a SCEVSequentialMinMaxExpr!");
4429 assert(!
Ops.empty() &&
"Cannot get empty (u|s)(min|max)!");
4430 if (
Ops.size() == 1)
4434 for (
unsigned i = 1, e =
Ops.size(); i != e; ++i) {
4436 "Operand types don't match!");
4439 "min/max should be consistently pointerish");
4447 if (
const SCEV *S = findExistingSCEVInCache(Kind,
Ops))
4454 SCEVSequentialMinMaxDeduplicatingVisitor Deduplicator(*
this, Kind);
4464 bool DeletedAny =
false;
4465 while (Idx <
Ops.size()) {
4466 if (
Ops[Idx]->getSCEVType() != Kind) {
4471 Ops.erase(
Ops.begin() + Idx);
4472 Ops.insert(
Ops.begin() + Idx, SMME->operands().begin(),
4473 SMME->operands().end());
4481 const SCEV *SaturationPoint;
4492 for (
unsigned i = 1, e =
Ops.size(); i != e; ++i) {
4493 if (!isGuaranteedNotToCauseUB(
Ops[i]))
4505 Ops.erase(
Ops.begin() + i);
4510 if (isKnownViaNonRecursiveReasoning(Pred,
Ops[i - 1],
Ops[i])) {
4511 Ops.erase(
Ops.begin() + i);
4519 ID.AddInteger(Kind);
4523 const SCEV *ExistingSCEV = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP);
4525 return ExistingSCEV;
4529 SCEV *S =
new (SCEVAllocator)
4532 UniqueSCEVs.InsertNode(S, IP);
4580 if (
Size.isScalable())
4601 "Cannot get offset for structure containing scalable vector types");
4615 if (
SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP)) {
4617 "Stale SCEVUnknown in uniquing map!");
4623 UniqueSCEVs.InsertNode(S, IP);
4638 return Ty->isIntOrPtrTy();
4645 if (Ty->isPointerTy())
4656 if (Ty->isIntegerTy())
4660 assert(Ty->isPointerTy() &&
"Unexpected non-pointer non-integer type!");
4672 bool PreciseA, PreciseB;
4673 auto *ScopeA = getDefiningScopeBound({
A}, PreciseA);
4674 auto *ScopeB = getDefiningScopeBound({
B}, PreciseB);
4675 if (!PreciseA || !PreciseB)
4678 return (ScopeA == ScopeB) || DT.dominates(ScopeA, ScopeB) ||
4679 DT.dominates(ScopeB, ScopeA);
4683 return CouldNotCompute.get();
4686bool ScalarEvolution::checkValidity(
const SCEV *S)
const {
4689 return SU && SU->getValue() ==
nullptr;
4692 return !ContainsNulls;
4697 if (
I != HasRecMap.end())
4702 HasRecMap.insert({S, FoundAddRec});
4710 if (
SI == ExprValueMap.
end())
4712 return SI->second.getArrayRef();
4718void ScalarEvolution::eraseValueFromMap(
Value *V) {
4720 if (
I != ValueExprMap.end()) {
4721 auto EVIt = ExprValueMap.find(
I->second);
4722 bool Removed = EVIt->second.remove(V);
4724 assert(Removed &&
"Value not in ExprValueMap?");
4725 ValueExprMap.erase(
I);
4729void ScalarEvolution::insertValueToMap(
Value *V,
const SCEV *S) {
4733 auto It = ValueExprMap.find_as(V);
4734 if (It == ValueExprMap.end()) {
4736 ExprValueMap[S].insert(V);
4747 return createSCEVIter(V);
4754 if (
I != ValueExprMap.end()) {
4755 const SCEV *S =
I->second;
4756 assert(checkValidity(S) &&
4757 "existing SCEV has not been properly invalidated");
4770 Type *Ty = V->getType();
4786 assert(!V->getType()->isPointerTy() &&
"Can't negate pointer");
4799 return (
const SCEV *)
nullptr;
4805 if (
const SCEV *Replaced = MatchMinMaxNegation(MME))
4809 Type *Ty = V->getType();
4815 assert(
P->getType()->isPointerTy());
4830 if (AddOp->getType()->isPointerTy()) {
4831 assert(!PtrOp &&
"Cannot have multiple pointer ops");
4849 return getZero(LHS->getType());
4854 if (RHS->getType()->isPointerTy()) {
4855 if (!LHS->getType()->isPointerTy() ||
4865 const bool RHSIsNotMinSigned =
4896 Type *SrcTy = V->getType();
4897 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
4898 "Cannot truncate or zero extend with non-integer arguments!");
4908 Type *SrcTy = V->getType();
4909 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
4910 "Cannot truncate or zero extend with non-integer arguments!");
4920 Type *SrcTy = V->getType();
4921 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
4922 "Cannot noop or zero extend with non-integer arguments!");
4924 "getNoopOrZeroExtend cannot truncate!");
4932 Type *SrcTy = V->getType();
4933 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
4934 "Cannot noop or sign extend with non-integer arguments!");
4936 "getNoopOrSignExtend cannot truncate!");
4944 Type *SrcTy = V->getType();
4945 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
4946 "Cannot noop or any extend with non-integer arguments!");
4948 "getNoopOrAnyExtend cannot truncate!");
4956 Type *SrcTy = V->getType();
4957 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
4958 "Cannot truncate or noop with non-integer arguments!");
4960 "getTruncateOrNoop cannot extend!");
4968 const SCEV *PromotedLHS = LHS;
4969 const SCEV *PromotedRHS = RHS;
4989 assert(!
Ops.empty() &&
"At least one operand must be!");
4991 if (
Ops.size() == 1)
4995 Type *MaxType =
nullptr;
5001 assert(MaxType &&
"Failed to find maximum type!");
5014 if (!V->getType()->isPointerTy())
5019 V = AddRec->getStart();
5021 const SCEV *PtrOp =
nullptr;
5022 for (
const SCEV *AddOp :
Add->operands()) {
5023 if (AddOp->getType()->isPointerTy()) {
5024 assert(!PtrOp &&
"Cannot have multiple pointer ops");
5028 assert(PtrOp &&
"Must have pointer op");
5040 for (
User *U :
I->users()) {
5042 if (Visited.
insert(UserInsn).second)
5056 static const SCEV *rewrite(
const SCEV *S,
const Loop *L, ScalarEvolution &SE,
5057 bool IgnoreOtherLoops =
true) {
5060 if (
Rewriter.hasSeenLoopVariantSCEVUnknown())
5062 return Rewriter.hasSeenOtherLoops() && !IgnoreOtherLoops
5067 const SCEV *visitUnknown(
const SCEVUnknown *Expr) {
5069 SeenLoopVariantSCEVUnknown =
true;
5073 const SCEV *visitAddRecExpr(
const SCEVAddRecExpr *Expr) {
5077 SeenOtherLoops =
true;
5081 bool hasSeenLoopVariantSCEVUnknown() {
return SeenLoopVariantSCEVUnknown; }
5083 bool hasSeenOtherLoops() {
return SeenOtherLoops; }
5086 explicit SCEVInitRewriter(
const Loop *L, ScalarEvolution &SE)
5087 : SCEVRewriteVisitor(SE),
L(
L) {}
5090 bool SeenLoopVariantSCEVUnknown =
false;
5091 bool SeenOtherLoops =
false;
5100 static const SCEV *rewrite(
const SCEV *S,
const Loop *L, ScalarEvolution &SE) {
5101 SCEVPostIncRewriter
Rewriter(L, SE);
5103 return Rewriter.hasSeenLoopVariantSCEVUnknown()
5108 const SCEV *visitUnknown(
const SCEVUnknown *Expr) {
5110 SeenLoopVariantSCEVUnknown =
true;
5114 const SCEV *visitAddRecExpr(
const SCEVAddRecExpr *Expr) {
5118 SeenOtherLoops =
true;
5122 bool hasSeenLoopVariantSCEVUnknown() {
return SeenLoopVariantSCEVUnknown; }
5124 bool hasSeenOtherLoops() {
return SeenOtherLoops; }
5127 explicit SCEVPostIncRewriter(
const Loop *L, ScalarEvolution &SE)
5128 : SCEVRewriteVisitor(SE),
L(
L) {}
5131 bool SeenLoopVariantSCEVUnknown =
false;
5132 bool SeenOtherLoops =
false;
5138class SCEVBackedgeConditionFolder
5141 static const SCEV *rewrite(
const SCEV *S,
const Loop *L,
5142 ScalarEvolution &SE) {
5143 bool IsPosBECond =
false;
5144 Value *BECond =
nullptr;
5145 if (BasicBlock *Latch =
L->getLoopLatch()) {
5147 assert(BI->getSuccessor(0) != BI->getSuccessor(1) &&
5148 "Both outgoing branches should not target same header!");
5149 BECond = BI->getCondition();
5150 IsPosBECond = BI->getSuccessor(0) ==
L->getHeader();
5155 SCEVBackedgeConditionFolder
Rewriter(L, BECond, IsPosBECond, SE);
5159 const SCEV *visitUnknown(
const SCEVUnknown *Expr) {
5160 const SCEV *
Result = Expr;
5165 switch (
I->getOpcode()) {
5166 case Instruction::Select: {
5168 std::optional<const SCEV *> Res =
5169 compareWithBackedgeCondition(
SI->getCondition());
5177 std::optional<const SCEV *> Res = compareWithBackedgeCondition(
I);
5188 explicit SCEVBackedgeConditionFolder(
const Loop *L,
Value *BECond,
5189 bool IsPosBECond, ScalarEvolution &SE)
5190 : SCEVRewriteVisitor(SE),
L(
L), BackedgeCond(BECond),
5191 IsPositiveBECond(IsPosBECond) {}
5193 std::optional<const SCEV *> compareWithBackedgeCondition(
Value *IC);
5197 Value *BackedgeCond =
nullptr;
5199 bool IsPositiveBECond;
5202std::optional<const SCEV *>
5203SCEVBackedgeConditionFolder::compareWithBackedgeCondition(
Value *IC) {
5208 if (BackedgeCond == IC)
5211 return std::nullopt;
5216 static const SCEV *rewrite(
const SCEV *S,
const Loop *L,
5217 ScalarEvolution &SE) {
5223 const SCEV *visitUnknown(
const SCEVUnknown *Expr) {
5230 const SCEV *visitAddRecExpr(
const SCEVAddRecExpr *Expr) {
5240 explicit SCEVShiftRewriter(
const Loop *L, ScalarEvolution &SE)
5241 : SCEVRewriteVisitor(SE),
L(
L) {}
5250ScalarEvolution::proveNoWrapViaConstantRanges(
const SCEVAddRecExpr *AR) {
5254 using OBO = OverflowingBinaryOperator;
5262 const APInt &BECountAP = BECountMax->getAPInt();
5263 unsigned NoOverflowBitWidth =
5275 Instruction::Add, IncRange, OBO::NoSignedWrap);
5276 if (NSWRegion.contains(AddRecRange))
5285 Instruction::Add, IncRange, OBO::NoUnsignedWrap);
5286 if (NUWRegion.contains(AddRecRange))
5294ScalarEvolution::proveNoSignedWrapViaInduction(
const SCEVAddRecExpr *AR) {
5304 if (!SignedWrapViaInductionTried.insert(AR).second)
5329 AC.assumptions().empty())
5337 const SCEV *OverflowLimit =
5339 if (OverflowLimit &&
5347ScalarEvolution::proveNoUnsignedWrapViaInduction(
const SCEVAddRecExpr *AR) {
5357 if (!UnsignedWrapViaInductionTried.insert(AR).second)
5383 AC.assumptions().empty())
5418 explicit BinaryOp(Operator *
Op)
5422 IsNSW = OBO->hasNoSignedWrap();
5423 IsNUW = OBO->hasNoUnsignedWrap();
5427 explicit BinaryOp(
unsigned Opcode,
Value *
LHS,
Value *
RHS,
bool IsNSW =
false,
5429 : Opcode(Opcode),
LHS(
LHS),
RHS(
RHS), IsNSW(IsNSW), IsNUW(IsNUW) {}
5441 return std::nullopt;
5447 switch (
Op->getOpcode()) {
5448 case Instruction::Add:
5449 case Instruction::Sub:
5450 case Instruction::Mul:
5451 case Instruction::UDiv:
5452 case Instruction::URem:
5453 case Instruction::And:
5454 case Instruction::AShr:
5455 case Instruction::Shl:
5456 return BinaryOp(
Op);
5458 case Instruction::Or: {
5461 return BinaryOp(Instruction::Add,
Op->getOperand(0),
Op->getOperand(1),
5463 return BinaryOp(
Op);
5466 case Instruction::Xor:
5470 if (RHSC->getValue().isSignMask())
5471 return BinaryOp(Instruction::Add,
Op->getOperand(0),
Op->getOperand(1));
5473 if (V->getType()->isIntegerTy(1))
5474 return BinaryOp(Instruction::Add,
Op->getOperand(0),
Op->getOperand(1));
5475 return BinaryOp(
Op);
5477 case Instruction::LShr:
5486 if (SA->getValue().ult(
BitWidth)) {
5488 ConstantInt::get(SA->getContext(),
5490 return BinaryOp(Instruction::UDiv,
Op->getOperand(0),
X);
5493 return BinaryOp(
Op);
5495 case Instruction::ExtractValue: {
5497 if (EVI->getNumIndices() != 1 || EVI->getIndices()[0] != 0)
5505 bool Signed = WO->isSigned();
5508 return BinaryOp(BinOp, WO->getLHS(), WO->getRHS());
5513 return BinaryOp(BinOp, WO->getLHS(), WO->getRHS(),
5524 if (
II->getIntrinsicID() == Intrinsic::loop_decrement_reg)
5525 return BinaryOp(Instruction::Sub,
II->getOperand(0),
II->getOperand(1));
5527 return std::nullopt;
5553 if (
Op == SymbolicPHI)
5558 if (SourceBits != NewBits)
5576 if (!L || L->getHeader() != PN->
getParent())
5634std::optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>>
5635ScalarEvolution::createAddRecFromPHIWithCastsImpl(
const SCEVUnknown *SymbolicPHI) {
5643 assert(L &&
"Expecting an integer loop header phi");
5648 Value *BEValueV =
nullptr, *StartValueV =
nullptr;
5649 for (
unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
5650 Value *
V = PN->getIncomingValue(i);
5651 if (
L->contains(PN->getIncomingBlock(i))) {
5654 }
else if (BEValueV != V) {
5658 }
else if (!StartValueV) {
5660 }
else if (StartValueV != V) {
5661 StartValueV =
nullptr;
5665 if (!BEValueV || !StartValueV)
5666 return std::nullopt;
5668 const SCEV *BEValue =
getSCEV(BEValueV);
5675 return std::nullopt;
5679 unsigned FoundIndex =
Add->getNumOperands();
5680 Type *TruncTy =
nullptr;
5682 for (
unsigned i = 0, e =
Add->getNumOperands(); i != e; ++i)
5685 if (FoundIndex == e) {
5690 if (FoundIndex ==
Add->getNumOperands())
5691 return std::nullopt;
5695 for (
unsigned i = 0, e =
Add->getNumOperands(); i != e; ++i)
5696 if (i != FoundIndex)
5697 Ops.push_back(
Add->getOperand(i));
5703 return std::nullopt;
5756 const SCEV *StartVal =
getSCEV(StartValueV);
5757 const SCEV *PHISCEV =
5784 auto getExtendedExpr = [&](
const SCEV *Expr,
5785 bool CreateSignExtend) ->
const SCEV * {
5788 const SCEV *ExtendedExpr =
5791 return ExtendedExpr;
5799 auto PredIsKnownFalse = [&](
const SCEV *Expr,
5800 const SCEV *ExtendedExpr) ->
bool {
5801 return Expr != ExtendedExpr &&
5805 const SCEV *StartExtended = getExtendedExpr(StartVal,
Signed);
5806 if (PredIsKnownFalse(StartVal, StartExtended)) {
5808 return std::nullopt;
5813 const SCEV *AccumExtended = getExtendedExpr(Accum,
true);
5814 if (PredIsKnownFalse(Accum, AccumExtended)) {
5816 return std::nullopt;
5819 auto AppendPredicate = [&](
const SCEV *Expr,
5820 const SCEV *ExtendedExpr) ->
void {
5821 if (Expr != ExtendedExpr &&
5829 AppendPredicate(StartVal, StartExtended);
5830 AppendPredicate(Accum, AccumExtended);
5838 std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>> PredRewrite =
5839 std::make_pair(NewAR, Predicates);
5841 PredicatedSCEVRewrites[{SymbolicPHI,
L}] = PredRewrite;
5845std::optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>>
5850 return std::nullopt;
5853 auto I = PredicatedSCEVRewrites.find({SymbolicPHI, L});
5854 if (
I != PredicatedSCEVRewrites.end()) {
5855 std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>> Rewrite =
5858 if (Rewrite.first == SymbolicPHI)
5859 return std::nullopt;
5863 assert(!(Rewrite.second).empty() &&
"Expected to find Predicates");
5867 std::optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>>
5868 Rewrite = createAddRecFromPHIWithCastsImpl(SymbolicPHI);
5873 PredicatedSCEVRewrites[{SymbolicPHI, L}] = {SymbolicPHI, Predicates};
5874 return std::nullopt;
5891 auto areExprsEqual = [&](
const SCEV *Expr1,
const SCEV *Expr2) ->
bool {
5892 if (Expr1 != Expr2 &&
5893 !Preds->implies(SE.getEqualPredicate(Expr1, Expr2), SE) &&
5894 !Preds->implies(SE.getEqualPredicate(Expr2, Expr1), SE))
5911const SCEV *ScalarEvolution::createSimpleAffineAddRec(
PHINode *PN,
5913 Value *StartValueV) {
5916 assert(BEValueV && StartValueV);
5922 if (BO->Opcode != Instruction::Add)
5925 const SCEV *Accum =
nullptr;
5926 if (BO->LHS == PN && L->isLoopInvariant(BO->RHS))
5928 else if (BO->RHS == PN && L->isLoopInvariant(BO->LHS))
5942 insertValueToMap(PN, PHISCEV);
5947 proveNoWrapViaConstantRanges(AR)));
5955 "Accum is defined outside L, but is not invariant?");
5956 if (isAddRecNeverPoison(BEInst, L))
5963const SCEV *ScalarEvolution::createAddRecFromPHI(
PHINode *PN) {
5964 const Loop *
L = LI.getLoopFor(PN->
getParent());
5971 Value *BEValueV =
nullptr, *StartValueV =
nullptr;
5977 }
else if (BEValueV != V) {
5981 }
else if (!StartValueV) {
5983 }
else if (StartValueV != V) {
5984 StartValueV =
nullptr;
5988 if (!BEValueV || !StartValueV)
5991 assert(ValueExprMap.find_as(PN) == ValueExprMap.end() &&
5992 "PHI node already processed?");
5996 if (
auto *S = createSimpleAffineAddRec(PN, BEValueV, StartValueV))
6001 insertValueToMap(PN, SymbolicName);
6005 const SCEV *BEValue =
getSCEV(BEValueV);
6015 unsigned FoundIndex =
Add->getNumOperands();
6016 for (
unsigned i = 0, e =
Add->getNumOperands(); i != e; ++i)
6017 if (
Add->getOperand(i) == SymbolicName)
6018 if (FoundIndex == e) {
6023 if (FoundIndex !=
Add->getNumOperands()) {
6026 for (
unsigned i = 0, e =
Add->getNumOperands(); i != e; ++i)
6027 if (i != FoundIndex)
6028 Ops.push_back(SCEVBackedgeConditionFolder::rewrite(
Add->getOperand(i),
6040 if (BO->Opcode == Instruction::Add && BO->LHS == PN) {
6047 if (
GEP->getOperand(0) == PN) {
6048 GEPNoWrapFlags NW =
GEP->getNoWrapFlags();
6066 const SCEV *StartVal =
getSCEV(StartValueV);
6067 const SCEV *PHISCEV =
getAddRecExpr(StartVal, Accum, L, Flags);
6072 forgetMemoizedResults({SymbolicName});
6073 insertValueToMap(PN, PHISCEV);
6078 proveNoWrapViaConstantRanges(AR)));
6103 const SCEV *Shifted = SCEVShiftRewriter::rewrite(BEValue, L, *
this);
6104 const SCEV *
Start = SCEVInitRewriter::rewrite(Shifted, L, *
this,
false);
6106 isGuaranteedNotToCauseUB(Shifted) &&
::impliesPoison(Shifted, Start)) {
6107 const SCEV *StartVal =
getSCEV(StartValueV);
6108 if (Start == StartVal) {
6112 forgetMemoizedResults({SymbolicName});
6113 insertValueToMap(PN, Shifted);
6123 eraseValueFromMap(PN);
6138 Use &LeftUse =
Merge->getOperandUse(0);
6139 Use &RightUse =
Merge->getOperandUse(1);
6175 assert(IDom &&
"At least the entry block should dominate PN");
6183const SCEV *ScalarEvolution::createNodeFromSelectLikePHI(
PHINode *PN) {
6188 return createNodeForSelectOrPHI(PN,
Cond,
LHS,
RHS);
6205 CommonInst = IncomingInst;
6221ScalarEvolution::createNodeForPHIWithIdenticalOperands(
PHINode *PN) {
6227 const SCEV *CommonSCEV =
getSCEV(CommonInst);
6228 bool SCEVExprsIdentical =
6230 [
this, CommonSCEV](
Value *V) { return CommonSCEV == getSCEV(V); });
6231 return SCEVExprsIdentical ? CommonSCEV :
nullptr;
6234const SCEV *ScalarEvolution::createNodeForPHI(
PHINode *PN) {
6235 if (
const SCEV *S = createAddRecFromPHI(PN))
6245 if (
const SCEV *S = createNodeForPHIWithIdenticalOperands(PN))
6248 if (
const SCEV *S = createNodeFromSelectLikePHI(PN))
6257 struct FindClosure {
6258 const SCEV *OperandToFind;
6264 bool canRecurseInto(
SCEVTypes Kind)
const {
6267 return RootKind == Kind || NonSequentialRootKind == Kind ||
6272 : OperandToFind(OperandToFind), RootKind(RootKind),
6273 NonSequentialRootKind(
6277 bool follow(
const SCEV *S) {
6278 Found = S == OperandToFind;
6280 return !isDone() && canRecurseInto(S->
getSCEVType());
6283 bool isDone()
const {
return Found; }
6286 FindClosure FC(OperandToFind, RootKind);
6291std::optional<const SCEV *>
6292ScalarEvolution::createNodeForSelectOrPHIInstWithICmpInstCond(
Type *Ty,
6302 switch (ICI->getPredicate()) {
6316 bool Signed = ICI->isSigned();
6317 const SCEV *LA =
getSCEV(TrueVal);
6325 if (LA == LS &&
RA == RS)
6327 if (LA == RS &&
RA == LS)
6330 auto CoerceOperand = [&](
const SCEV *
Op) ->
const SCEV * {
6331 if (
Op->getType()->isPointerTy()) {
6342 LS = CoerceOperand(LS);
6343 RS = CoerceOperand(RS);
6367 const SCEV *TrueValExpr =
getSCEV(TrueVal);
6368 const SCEV *FalseValExpr =
getSCEV(FalseVal);
6382 X = ZExt->getOperand();
6384 const SCEV *FalseValExpr =
getSCEV(FalseVal);
6395 return std::nullopt;
6398static std::optional<const SCEV *>
6400 const SCEV *TrueExpr,
const SCEV *FalseExpr) {
6404 "Unexpected operands of a select.");
6416 return std::nullopt;
6431static std::optional<const SCEV *>
6435 return std::nullopt;
6438 const auto *SETrue = SE->
getSCEV(TrueVal);
6439 const auto *SEFalse = SE->
getSCEV(FalseVal);
6443const SCEV *ScalarEvolution::createNodeForSelectOrPHIViaUMinSeq(
6445 assert(
Cond->getType()->isIntegerTy(1) &&
"Select condition is not an i1?");
6447 V->getType() ==
TrueVal->getType() &&
6448 "Types of select hands and of the result must match.");
6451 if (!
V->getType()->isIntegerTy(1))
6454 if (std::optional<const SCEV *> S =
6467 return getSCEV(CI->isOne() ? TrueVal : FalseVal);
6471 if (std::optional<const SCEV *> S =
6472 createNodeForSelectOrPHIInstWithICmpInstCond(
I->getType(), ICI,
6478 return createNodeForSelectOrPHIViaUMinSeq(V,
Cond, TrueVal, FalseVal);
6484 assert(
GEP->getSourceElementType()->isSized() &&
6485 "GEP source element type must be sized");
6488 for (
Value *Index :
GEP->indices())
6493APInt ScalarEvolution::getConstantMultipleImpl(
const SCEV *S,
6496 auto GetShiftedByZeros = [
BitWidth](uint32_t TrailingZeros) {
6499 : APInt::getOneBitSet(
BitWidth, TrailingZeros);
6501 auto GetGCDMultiple = [
this, CtxI](
const SCEVNAryExpr *
N) {
6504 for (
unsigned I = 1,
E =
N->getNumOperands();
I <
E && Res != 1; ++
I)
6523 return GetShiftedByZeros(TZ);
6533 return GetShiftedByZeros(TZ);
6537 if (
M->hasNoUnsignedWrap()) {
6540 for (
const SCEV *Operand :
M->operands().drop_front())
6548 for (
const SCEV *Operand :
M->operands())
6550 return GetShiftedByZeros(TZ);
6555 if (
N->hasNoUnsignedWrap())
6556 return GetGCDMultiple(
N);
6559 for (
const SCEV *Operand :
N->operands().drop_front())
6561 return GetShiftedByZeros(TZ);
6578 CtxI = &*F.getEntryBlock().begin();
6584 .countMinTrailingZeros();
6585 return GetShiftedByZeros(Known);
6598 return getConstantMultipleImpl(S, CtxI);
6600 auto I = ConstantMultipleCache.find(S);
6601 if (
I != ConstantMultipleCache.end())
6604 APInt Result = getConstantMultipleImpl(S, CtxI);
6605 auto InsertPair = ConstantMultipleCache.insert({S, Result});
6606 assert(InsertPair.second &&
"Should insert a new key");
6607 return InsertPair.first->second;
6624 if (
MDNode *MD =
I->getMetadata(LLVMContext::MD_range))
6627 if (std::optional<ConstantRange>
Range = CB->getRange())
6631 if (std::optional<ConstantRange>
Range =
A->getRange())
6634 return std::nullopt;
6641 UnsignedRanges.erase(AddRec);
6642 SignedRanges.erase(AddRec);
6643 ConstantMultipleCache.erase(AddRec);
6648getRangeForUnknownRecurrence(
const SCEVUnknown *U) {
6674 Value *Start, *Step;
6681 assert(L && L->getHeader() ==
P->getParent());
6694 case Instruction::AShr:
6695 case Instruction::LShr:
6696 case Instruction::Shl:
6711 KnownStep.getBitWidth() ==
BitWidth);
6714 auto MaxShiftAmt = KnownStep.getMaxValue();
6716 bool Overflow =
false;
6717 auto TotalShift = MaxShiftAmt.umul_ov(TCAP, Overflow);
6724 case Instruction::AShr: {
6732 if (KnownStart.isNonNegative())
6735 KnownStart.getMaxValue() + 1);
6736 if (KnownStart.isNegative())
6739 KnownEnd.getMaxValue() + 1);
6742 case Instruction::LShr: {
6751 KnownStart.getMaxValue() + 1);
6753 case Instruction::Shl: {
6757 if (TotalShift.ult(KnownStart.countMinLeadingZeros()))
6758 return ConstantRange(KnownStart.getMinValue(),
6759 KnownEnd.getMaxValue() + 1);
6784 [&](
Value *Operand) { return DT.dominates(Operand, PHI); }))
6791ScalarEvolution::getRangeRefIter(
const SCEV *S,
6792 ScalarEvolution::RangeSignHint SignHint) {
6793 DenseMap<const SCEV *, ConstantRange> &Cache =
6794 SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED ? UnsignedRanges
6797 SmallPtrSet<const SCEV *, 8> Seen;
6801 auto AddToWorklist = [&WorkList, &Seen, &Cache](
const SCEV *Expr) {
6802 if (!Seen.
insert(Expr).second)
6836 for (
unsigned I = 0;
I != WorkList.
size(); ++
I) {
6837 const SCEV *
P = WorkList[
I];
6841 for (
const SCEV *
Op :
P->operands())
6854 if (!WorkList.
empty()) {
6859 getRangeRef(
P, SignHint);
6863 return getRangeRef(S, SignHint, 0);
6870 const SCEV *S, ScalarEvolution::RangeSignHint SignHint,
unsigned Depth) {
6871 DenseMap<const SCEV *, ConstantRange> &Cache =
6872 SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED ? UnsignedRanges
6880 if (
I != Cache.
end())
6884 return setRange(
C, SignHint, ConstantRange(
C->getAPInt()));
6889 return getRangeRefIter(S, SignHint);
6892 ConstantRange ConservativeResult(
BitWidth,
true);
6893 using OBO = OverflowingBinaryOperator;
6897 if (SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED) {
6901 ConservativeResult =
6908 ConservativeResult = ConstantRange(
6924 ConservativeResult.intersectWith(
X.truncate(
BitWidth), RangeType));
6931 ConservativeResult.intersectWith(
X.zeroExtend(
BitWidth), RangeType));
6938 ConservativeResult.intersectWith(
X.signExtend(
BitWidth), RangeType));
6944 return setRange(Cast, SignHint,
X);
6949 const SCEV *URemLHS =
nullptr, *URemRHS =
nullptr;
6950 if (SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED &&
6952 ConstantRange LHSRange = getRangeRef(URemLHS, SignHint,
Depth + 1);
6953 ConstantRange RHSRange = getRangeRef(URemRHS, SignHint,
Depth + 1);
6954 ConservativeResult =
6955 ConservativeResult.intersectWith(LHSRange.
urem(RHSRange), RangeType);
6957 ConstantRange
X = getRangeRef(
Add->getOperand(0), SignHint,
Depth + 1);
6958 unsigned WrapType = OBO::AnyWrap;
6959 if (
Add->hasNoSignedWrap())
6960 WrapType |= OBO::NoSignedWrap;
6961 if (
Add->hasNoUnsignedWrap())
6962 WrapType |= OBO::NoUnsignedWrap;
6964 X =
X.addWithNoWrap(getRangeRef(
Op, SignHint,
Depth + 1), WrapType,
6966 return setRange(
Add, SignHint,
6967 ConservativeResult.intersectWith(
X, RangeType));
6971 ConstantRange
X = getRangeRef(
Mul->getOperand(0), SignHint,
Depth + 1);
6973 X =
X.multiply(getRangeRef(
Op, SignHint,
Depth + 1));
6974 return setRange(
Mul, SignHint,
6975 ConservativeResult.intersectWith(
X, RangeType));
6979 ConstantRange
X = getRangeRef(UDiv->
getLHS(), SignHint,
Depth + 1);
6980 ConstantRange
Y = getRangeRef(UDiv->
getRHS(), SignHint,
Depth + 1);
6981 return setRange(UDiv, SignHint,
6982 ConservativeResult.intersectWith(
X.udiv(
Y), RangeType));
6990 if (!UnsignedMinValue.
isZero())
6991 ConservativeResult = ConservativeResult.intersectWith(
6992 ConstantRange(UnsignedMinValue, APInt(
BitWidth, 0)), RangeType);
7001 bool AllNonNeg =
true;
7002 bool AllNonPos =
true;
7003 for (
unsigned i = 1, e = AddRec->
getNumOperands(); i != e; ++i) {
7010 ConservativeResult = ConservativeResult.intersectWith(
7015 ConservativeResult = ConservativeResult.intersectWith(
7024 const SCEV *MaxBEScev =
7038 auto RangeFromAffine = getRangeForAffineAR(
7040 ConservativeResult =
7041 ConservativeResult.intersectWith(RangeFromAffine, RangeType);
7043 auto RangeFromFactoring = getRangeViaFactoring(
7045 ConservativeResult =
7046 ConservativeResult.intersectWith(RangeFromFactoring, RangeType);
7052 const SCEV *SymbolicMaxBECount =
7057 auto RangeFromAffineNew = getRangeForAffineNoSelfWrappingAR(
7058 AddRec, SymbolicMaxBECount,
BitWidth, SignHint);
7059 ConservativeResult =
7060 ConservativeResult.intersectWith(RangeFromAffineNew, RangeType);
7065 return setRange(AddRec, SignHint, std::move(ConservativeResult));
7075 ID = Intrinsic::umax;
7078 ID = Intrinsic::smax;
7082 ID = Intrinsic::umin;
7085 ID = Intrinsic::smin;
7092 ConstantRange
X = getRangeRef(NAry->getOperand(0), SignHint,
Depth + 1);
7093 for (
unsigned i = 1, e = NAry->getNumOperands(); i != e; ++i)
7095 ID, {
X, getRangeRef(NAry->getOperand(i), SignHint,
Depth + 1)});
7096 return setRange(S, SignHint,
7097 ConservativeResult.intersectWith(
X, RangeType));
7106 ConservativeResult =
7107 ConservativeResult.intersectWith(*MDRange, RangeType);
7112 auto CR = getRangeForUnknownRecurrence(U);
7113 ConservativeResult = ConservativeResult.intersectWith(CR);
7124 if (
U->getType()->isPointerTy()) {
7127 unsigned ptrSize = DL.getPointerTypeSizeInBits(
U->getType());
7128 int ptrIdxDiff = ptrSize -
BitWidth;
7129 if (ptrIdxDiff > 0 && ptrSize >
BitWidth && NS > (
unsigned)ptrIdxDiff)
7142 ConservativeResult = ConservativeResult.intersectWith(
7146 ConservativeResult = ConservativeResult.intersectWith(
7151 if (
U->getType()->isPointerTy() && SignHint == HINT_RANGE_UNSIGNED) {
7154 bool CanBeNull, CanBeFreed;
7155 uint64_t DerefBytes =
7156 V->getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed);
7166 uint64_t
Align =
U->getValue()->getPointerAlignment(DL).value();
7167 uint64_t Rem = MaxVal.
urem(Align);
7172 ConservativeResult = ConservativeResult.intersectWith(
7182 return getRangeRef(AR, SignHint,
Depth + 1);
7186 ConstantRange RangeFromOps(
BitWidth,
false);
7188 for (
const auto &
Op :
Phi->operands()) {
7190 RangeFromOps = RangeFromOps.unionWith(OpRange);
7192 if (RangeFromOps.isFullSet())
7195 ConservativeResult =
7196 ConservativeResult.intersectWith(RangeFromOps, RangeType);
7202 if (
II->getIntrinsicID() == Intrinsic::vscale) {
7204 ConservativeResult = ConservativeResult.difference(Disallowed);
7207 return setRange(U, SignHint, std::move(ConservativeResult));
7213 return setRange(S, SignHint, std::move(ConservativeResult));
7222 const APInt &MaxBECount,
7229 if (Step == 0 || MaxBECount == 0)
7236 return ConstantRange::getFull(
BitWidth);
7252 return ConstantRange::getFull(
BitWidth);
7264 APInt MovedBoundary = Descending ? (StartLower - std::move(
Offset))
7265 : (StartUpper + std::move(
Offset));
7270 if (StartRange.
contains(MovedBoundary))
7271 return ConstantRange::getFull(
BitWidth);
7274 Descending ? std::move(MovedBoundary) : std::move(StartLower);
7276 Descending ? std::move(StartUpper) : std::move(MovedBoundary);
7285 const APInt &MaxBECount) {
7289 "mismatched bit widths");
7298 StepSRange.
getSignedMin(), StartSRange, MaxBECount,
true);
7300 StartSRange, MaxBECount,
7312ConstantRange ScalarEvolution::getRangeForAffineNoSelfWrappingAR(
7314 ScalarEvolution::RangeSignHint SignHint) {
7315 assert(AddRec->
isAffine() &&
"Non-affine AddRecs are not suppored!\n");
7317 "This only works for non-self-wrapping AddRecs!");
7318 const bool IsSigned = SignHint == HINT_RANGE_SIGNED;
7322 return ConstantRange::getFull(
BitWidth);
7330 return ConstantRange::getFull(
BitWidth);
7334 const SCEV *MaxItersWithoutWrap =
getUDivExpr(RangeWidth, StepAbs);
7336 MaxItersWithoutWrap))
7337 return ConstantRange::getFull(
BitWidth);
7358 ConstantRange StartRange = getRangeRef(Start, SignHint);
7359 ConstantRange EndRange = getRangeRef(End, SignHint);
7360 ConstantRange RangeBetween = StartRange.
unionWith(EndRange);
7364 return RangeBetween;
7369 return ConstantRange::getFull(
BitWidth);
7372 isKnownPredicateViaConstantRanges(LEPred, Start, End))
7373 return RangeBetween;
7375 isKnownPredicateViaConstantRanges(GEPred, Start, End))
7376 return RangeBetween;
7377 return ConstantRange::getFull(
BitWidth);
7382 const APInt &MaxBECount) {
7389 "mismatched bit widths");
7391 struct SelectPattern {
7392 Value *Condition =
nullptr;
7396 explicit SelectPattern(ScalarEvolution &SE,
unsigned BitWidth,
7398 std::optional<unsigned> CastOp;
7412 CastOp = SCast->getSCEVType();
7413 S = SCast->getOperand();
7416 using namespace llvm::PatternMatch;
7423 Condition =
nullptr;
7455 bool isRecognized() {
return Condition !=
nullptr; }
7458 SelectPattern StartPattern(*
this,
BitWidth, Start);
7459 if (!StartPattern.isRecognized())
7460 return ConstantRange::getFull(
BitWidth);
7462 SelectPattern StepPattern(*
this,
BitWidth, Step);
7463 if (!StepPattern.isRecognized())
7464 return ConstantRange::getFull(
BitWidth);
7466 if (StartPattern.Condition != StepPattern.Condition) {
7470 return ConstantRange::getFull(
BitWidth);
7481 const SCEV *TrueStart = this->
getConstant(StartPattern.TrueValue);
7482 const SCEV *TrueStep = this->
getConstant(StepPattern.TrueValue);
7483 const SCEV *FalseStart = this->
getConstant(StartPattern.FalseValue);
7484 const SCEV *FalseStep = this->
getConstant(StepPattern.FalseValue);
7486 ConstantRange TrueRange =
7487 this->getRangeForAffineAR(TrueStart, TrueStep, MaxBECount);
7488 ConstantRange FalseRange =
7489 this->getRangeForAffineAR(FalseStart, FalseStep, MaxBECount);
7511ScalarEvolution::getNonTrivialDefiningScopeBound(
const SCEV *S) {
7524 SmallPtrSet<const SCEV *, 16> Visited;
7526 auto pushOp = [&](
const SCEV *S) {
7527 if (!Visited.
insert(S).second)
7530 if (Visited.
size() > 30) {
7537 for (SCEVUse S :
Ops)
7541 while (!Worklist.
empty()) {
7543 if (
auto *DefI = getNonTrivialDefiningScopeBound(S)) {
7544 if (!Bound || DT.dominates(Bound, DefI))
7551 return Bound ? Bound : &*F.getEntryBlock().begin();
7557 return getDefiningScopeBound(
Ops, Discard);
7560bool ScalarEvolution::isGuaranteedToTransferExecutionTo(
const Instruction *
A,
7562 if (
A->getParent() ==
B->getParent() &&
7567 auto *BLoop = LI.getLoopFor(
B->getParent());
7568 if (BLoop && BLoop->getHeader() ==
B->getParent() &&
7569 BLoop->getLoopPreheader() ==
A->getParent() &&
7571 A->getParent()->end()) &&
7578bool ScalarEvolution::isGuaranteedNotToBePoison(
const SCEV *
Op) {
7579 SCEVPoisonCollector PC(
true);
7581 return PC.MaybePoison.empty();
7584bool ScalarEvolution::isGuaranteedNotToCauseUB(
const SCEV *
Op) {
7590 return M && (!
isKnownNonZero(Op1) || !isGuaranteedNotToBePoison(Op1));
7594bool ScalarEvolution::isSCEVExprNeverPoison(
const Instruction *
I) {
7611 for (
const Use &
Op :
I->operands()) {
7617 auto *DefI = getDefiningScopeBound(SCEVOps);
7618 return isGuaranteedToTransferExecutionTo(DefI,
I);
7621bool ScalarEvolution::isAddRecNeverPoison(
const Instruction *
I,
const Loop *L) {
7623 if (isSCEVExprNeverPoison(
I))
7634 auto *ExitingBB =
L->getExitingBlock();
7638 SmallPtrSet<const Value *, 16> KnownPoison;
7647 while (!Worklist.
empty()) {
7650 for (
const Use &U :
Poison->uses()) {
7653 DT.dominates(PoisonUser->
getParent(), ExitingBB))
7657 if (KnownPoison.
insert(PoisonUser).second)
7665ScalarEvolution::LoopProperties
7666ScalarEvolution::getLoopProperties(
const Loop *L) {
7667 using LoopProperties = ScalarEvolution::LoopProperties;
7669 auto Itr = LoopPropertiesCache.find(L);
7670 if (Itr == LoopPropertiesCache.end()) {
7673 return !
SI->isSimple();
7683 return I->mayWriteToMemory();
7686 LoopProperties LP = {
true,
7689 for (
auto *BB :
L->getBlocks())
7690 for (
auto &
I : *BB) {
7692 LP.HasNoAbnormalExits =
false;
7693 if (HasSideEffects(&
I))
7694 LP.HasNoSideEffects =
false;
7695 if (!LP.HasNoAbnormalExits && !LP.HasNoSideEffects)
7699 auto InsertPair = LoopPropertiesCache.insert({
L, LP});
7700 assert(InsertPair.second &&
"We just checked!");
7701 Itr = InsertPair.first;
7714const SCEV *ScalarEvolution::createSCEVIter(
Value *V) {
7720 Stack.emplace_back(V,
true);
7721 Stack.emplace_back(V,
false);
7722 while (!Stack.empty()) {
7723 auto E = Stack.pop_back_val();
7724 Value *CurV = E.getPointer();
7730 const SCEV *CreatedSCEV =
nullptr;
7733 CreatedSCEV = createSCEV(CurV);
7738 CreatedSCEV = getOperandsToCreate(CurV,
Ops);
7742 insertValueToMap(CurV, CreatedSCEV);
7746 Stack.emplace_back(CurV,
true);
7748 Stack.emplace_back(
Op,
false);
7765 if (!DT.isReachableFromEntry(
I->getParent()))
7778 switch (BO->Opcode) {
7779 case Instruction::Add:
7780 case Instruction::Mul: {
7787 Ops.push_back(BO->
Op);
7791 Ops.push_back(BO->RHS);
7795 (BO->Opcode == Instruction::Add &&
7796 (NewBO->Opcode != Instruction::Add &&
7797 NewBO->Opcode != Instruction::Sub)) ||
7798 (BO->Opcode == Instruction::Mul &&
7799 NewBO->Opcode != Instruction::Mul)) {
7800 Ops.push_back(BO->LHS);
7805 if (BO->
Op && (BO->IsNSW || BO->IsNUW)) {
7808 Ops.push_back(BO->LHS);
7816 case Instruction::Sub:
7817 case Instruction::UDiv:
7818 case Instruction::URem:
7820 case Instruction::AShr:
7821 case Instruction::Shl:
7822 case Instruction::Xor:
7826 case Instruction::And:
7827 case Instruction::Or:
7831 case Instruction::LShr:
7838 Ops.push_back(BO->LHS);
7839 Ops.push_back(BO->RHS);
7843 switch (
U->getOpcode()) {
7844 case Instruction::Trunc:
7845 case Instruction::ZExt:
7846 case Instruction::SExt:
7847 case Instruction::PtrToAddr:
7848 case Instruction::PtrToInt:
7849 Ops.push_back(
U->getOperand(0));
7852 case Instruction::BitCast:
7854 Ops.push_back(
U->getOperand(0));
7859 case Instruction::SDiv:
7860 case Instruction::SRem:
7861 Ops.push_back(
U->getOperand(0));
7862 Ops.push_back(
U->getOperand(1));
7865 case Instruction::GetElementPtr:
7867 "GEP source element type must be sized");
7871 case Instruction::IntToPtr:
7874 case Instruction::PHI:
7905 Ops.push_back(CondICmp->getOperand(0));
7906 Ops.push_back(CondICmp->getOperand(1));
7926 case Instruction::Select: {
7928 auto CanSimplifyToUnknown = [
this,
U]() {
7946 if (CanSimplifyToUnknown())
7953 case Instruction::Call:
7954 case Instruction::Invoke:
7961 switch (
II->getIntrinsicID()) {
7962 case Intrinsic::abs:
7963 Ops.push_back(
II->getArgOperand(0));
7965 case Intrinsic::umax:
7966 case Intrinsic::umin:
7967 case Intrinsic::smax:
7968 case Intrinsic::smin:
7969 case Intrinsic::usub_sat:
7970 case Intrinsic::uadd_sat:
7971 Ops.push_back(
II->getArgOperand(0));
7972 Ops.push_back(
II->getArgOperand(1));
7974 case Intrinsic::start_loop_iterations:
7975 case Intrinsic::annotation:
7976 case Intrinsic::ptr_annotation:
7977 Ops.push_back(
II->getArgOperand(0));
7989const SCEV *ScalarEvolution::createSCEV(
Value *V) {
7998 if (!DT.isReachableFromEntry(
I->getParent()))
8013 switch (BO->Opcode) {
8014 case Instruction::Add: {
8040 if (BO->Opcode == Instruction::Sub)
8048 if (BO->Opcode == Instruction::Sub)
8055 if (!NewBO || (NewBO->Opcode != Instruction::Add &&
8056 NewBO->Opcode != Instruction::Sub)) {
8066 case Instruction::Mul: {
8087 if (!NewBO || NewBO->Opcode != Instruction::Mul) {
8096 case Instruction::UDiv:
8100 case Instruction::URem:
8104 case Instruction::Sub: {
8107 Flags = getNoWrapFlagsFromUB(BO->
Op);
8112 case Instruction::And:
8118 if (CI->isMinusOne())
8120 const APInt &
A = CI->getValue();
8126 unsigned LZ =
A.countl_zero();
8127 unsigned TZ =
A.countr_zero();
8132 APInt EffectiveMask =
8134 if ((LZ != 0 || TZ != 0) && !((~
A & ~Known.
Zero) & EffectiveMask)) {
8137 const SCEV *ShiftedLHS =
nullptr;
8141 unsigned MulZeros = OpC->getAPInt().countr_zero();
8142 unsigned GCD = std::min(MulZeros, TZ);
8147 auto *NewMul =
getMulExpr(MulOps, LHSMul->getNoWrapFlags());
8169 case Instruction::Or:
8178 case Instruction::Xor:
8181 if (CI->isMinusOne())
8190 if (LBO->getOpcode() == Instruction::And &&
8191 LCI->getValue() == CI->getValue())
8192 if (
const SCEVZeroExtendExpr *Z =
8195 const SCEV *Z0 =
Z->getOperand();
8202 if (CI->getValue().isMask(Z0TySize))
8208 APInt Trunc = CI->getValue().trunc(Z0TySize);
8217 case Instruction::Shl:
8235 auto MulFlags = getNoWrapFlagsFromUB(BO->
Op);
8243 ConstantInt *
X = ConstantInt::get(
8249 case Instruction::AShr:
8271 const SCEV *AddTruncateExpr =
nullptr;
8272 ConstantInt *ShlAmtCI =
nullptr;
8273 const SCEV *AddConstant =
nullptr;
8275 if (L &&
L->getOpcode() == Instruction::Add) {
8283 if (LShift && LShift->
getOpcode() == Instruction::Shl) {
8290 APInt AddOperand = AddOperandCI->
getValue().
ashr(AShrAmt);
8298 }
else if (L &&
L->getOpcode() == Instruction::Shl) {
8303 const SCEV *ShlOp0SCEV =
getSCEV(
L->getOperand(0));
8308 if (AddTruncateExpr && ShlAmtCI) {
8320 const APInt &ShlAmt = ShlAmtCI->
getValue();
8324 const SCEV *CompositeExpr =
8326 if (
L->getOpcode() != Instruction::Shl)
8327 CompositeExpr =
getAddExpr(CompositeExpr, AddConstant);
8336 switch (
U->getOpcode()) {
8337 case Instruction::Trunc:
8340 case Instruction::ZExt:
8343 case Instruction::SExt:
8353 if (BO->Opcode == Instruction::Sub && BO->IsNSW) {
8354 Type *Ty =
U->getType();
8362 case Instruction::BitCast:
8368 case Instruction::PtrToAddr: {
8375 case Instruction::PtrToInt: {
8378 Type *DstIntTy =
U->getType();
8386 case Instruction::IntToPtr:
8390 case Instruction::SDiv:
8397 case Instruction::SRem:
8404 case Instruction::GetElementPtr:
8407 case Instruction::PHI:
8410 case Instruction::Select:
8411 return createNodeForSelectOrPHI(U,
U->getOperand(0),
U->getOperand(1),
8414 case Instruction::Call:
8415 case Instruction::Invoke:
8420 switch (
II->getIntrinsicID()) {
8421 case Intrinsic::abs:
8425 case Intrinsic::umax:
8429 case Intrinsic::umin:
8433 case Intrinsic::smax:
8437 case Intrinsic::smin:
8441 case Intrinsic::usub_sat: {
8442 const SCEV *
X =
getSCEV(
II->getArgOperand(0));
8443 const SCEV *
Y =
getSCEV(
II->getArgOperand(1));
8447 case Intrinsic::uadd_sat: {
8448 const SCEV *
X =
getSCEV(
II->getArgOperand(0));
8449 const SCEV *
Y =
getSCEV(
II->getArgOperand(1));
8453 case Intrinsic::start_loop_iterations:
8454 case Intrinsic::annotation:
8455 case Intrinsic::ptr_annotation:
8459 case Intrinsic::vscale:
8479 auto *ExitCountType = ExitCount->
getType();
8480 assert(ExitCountType->isIntegerTy());
8482 1 + ExitCountType->getScalarSizeInBits());
8495 auto CanAddOneWithoutOverflow = [&]() {
8497 getRangeRef(ExitCount, RangeSignHint::HINT_RANGE_UNSIGNED);
8508 if (EvalSize > ExitCountSize && CanAddOneWithoutOverflow())
8538 assert(ExitingBlock &&
"Must pass a non-null exiting block!");
8539 assert(L->isLoopExiting(ExitingBlock) &&
8540 "Exiting block must actually branch out of the loop!");
8549 const auto *MaxExitCount =
8557 L->getExitingBlocks(ExitingBlocks);
8559 std::optional<unsigned> Res;
8560 for (
auto *ExitingBB : ExitingBlocks) {
8564 Res = std::gcd(*Res, Multiple);
8566 return Res.value_or(1);
8570 const SCEV *ExitCount) {
8600 assert(ExitingBlock &&
"Must pass a non-null exiting block!");
8601 assert(L->isLoopExiting(ExitingBlock) &&
8602 "Exiting block must actually branch out of the loop!");
8612 return getBackedgeTakenInfo(L).getExact(ExitingBlock,
this);
8614 return getBackedgeTakenInfo(L).getSymbolicMax(ExitingBlock,
this);
8616 return getBackedgeTakenInfo(L).getConstantMax(ExitingBlock,
this);
8626 return getPredicatedBackedgeTakenInfo(L).getExact(ExitingBlock,
this,
8629 return getPredicatedBackedgeTakenInfo(L).getSymbolicMax(ExitingBlock,
this,
8632 return getPredicatedBackedgeTakenInfo(L).getConstantMax(ExitingBlock,
this,
8640 return getPredicatedBackedgeTakenInfo(L).getExact(L,
this, &Preds);
8647 return getBackedgeTakenInfo(L).getExact(L,
this);
8649 return getBackedgeTakenInfo(L).getConstantMax(
this);
8651 return getBackedgeTakenInfo(L).getSymbolicMax(L,
this);
8658 return getPredicatedBackedgeTakenInfo(L).getSymbolicMax(L,
this, &Preds);
8663 return getPredicatedBackedgeTakenInfo(L).getConstantMax(
this, &Preds);
8667 return getBackedgeTakenInfo(L).isConstantMaxOrZero(
this);
8677 for (
PHINode &PN : Header->phis())
8678 if (Visited.
insert(&PN).second)
8682ScalarEvolution::BackedgeTakenInfo &
8683ScalarEvolution::getPredicatedBackedgeTakenInfo(
const Loop *L) {
8684 auto &BTI = getBackedgeTakenInfo(L);
8685 if (BTI.hasFullInfo())
8688 auto Pair = PredicatedBackedgeTakenCounts.try_emplace(L);
8691 return Pair.first->second;
8693 BackedgeTakenInfo
Result =
8694 computeBackedgeTakenCount(L,
true);
8696 return PredicatedBackedgeTakenCounts.find(L)->second = std::move(Result);
8699ScalarEvolution::BackedgeTakenInfo &
8700ScalarEvolution::getBackedgeTakenInfo(
const Loop *L) {
8706 std::pair<DenseMap<const Loop *, BackedgeTakenInfo>::iterator,
bool> Pair =
8707 BackedgeTakenCounts.try_emplace(L);
8709 return Pair.first->second;
8714 BackedgeTakenInfo
Result = computeBackedgeTakenCount(L);
8721 if (
Result.hasAnyInfo()) {
8724 auto LoopUsersIt = LoopUsers.find(L);
8725 if (LoopUsersIt != LoopUsers.end())
8727 forgetMemoizedResults(ToForget);
8730 for (PHINode &PN :
L->getHeader()->phis())
8731 ConstantEvolutionLoopExitValue.erase(&PN);
8739 return BackedgeTakenCounts.find(L)->second = std::move(Result);
8748 BackedgeTakenCounts.clear();
8749 PredicatedBackedgeTakenCounts.clear();
8750 BECountUsers.clear();
8751 LoopPropertiesCache.clear();
8752 ConstantEvolutionLoopExitValue.clear();
8753 ValueExprMap.clear();
8754 ValuesAtScopes.clear();
8755 ValuesAtScopesUsers.clear();
8756 LoopDispositions.clear();
8757 BlockDispositions.clear();
8758 UnsignedRanges.clear();
8759 SignedRanges.clear();
8760 ExprValueMap.clear();
8762 ConstantMultipleCache.clear();
8763 PredicatedSCEVRewrites.clear();
8765 FoldCacheUser.clear();
8767void ScalarEvolution::visitAndClearUsers(
8771 while (!Worklist.
empty()) {
8778 if (It != ValueExprMap.
end()) {
8779 eraseValueFromMap(It->first);
8782 ConstantEvolutionLoopExitValue.erase(PN);
8796 while (!LoopWorklist.
empty()) {
8800 forgetBackedgeTakenCounts(CurrL,
false);
8801 forgetBackedgeTakenCounts(CurrL,
true);
8804 for (
auto I = PredicatedSCEVRewrites.begin();
8805 I != PredicatedSCEVRewrites.end();) {
8806 std::pair<const SCEV *, const Loop *> Entry =
I->first;
8807 if (Entry.second == CurrL)
8808 PredicatedSCEVRewrites.erase(
I++);
8813 auto LoopUsersItr = LoopUsers.find(CurrL);
8814 if (LoopUsersItr != LoopUsers.end())
8819 visitAndClearUsers(Worklist, Visited, ToForget);
8821 LoopPropertiesCache.erase(CurrL);
8824 LoopWorklist.
append(CurrL->begin(), CurrL->end());
8826 forgetMemoizedResults(ToForget);
8843 visitAndClearUsers(Worklist, Visited, ToForget);
8845 forgetMemoizedResults(ToForget);
8857 struct InvalidationRootCollector {
8861 InvalidationRootCollector(
Loop *L) : L(L) {}
8863 bool follow(
const SCEV *S) {
8869 if (L->contains(AddRec->
getLoop()))
8874 bool isDone()
const {
return false; }
8877 InvalidationRootCollector
C(L);
8879 forgetMemoizedResults(
C.Roots);
8892 BlockDispositions.clear();
8893 LoopDispositions.clear();
8910 while (!Worklist.
empty()) {
8912 bool LoopDispoRemoved = LoopDispositions.erase(Curr);
8913 bool BlockDispoRemoved = BlockDispositions.erase(Curr);
8914 if (!LoopDispoRemoved && !BlockDispoRemoved)
8916 auto Users = SCEVUsers.find(Curr);
8917 if (
Users != SCEVUsers.end())
8930const SCEV *ScalarEvolution::BackedgeTakenInfo::getExact(
8934 if (!isComplete() || ExitNotTaken.
empty())
8945 for (
const auto &ENT : ExitNotTaken) {
8946 const SCEV *BECount = ENT.ExactNotTaken;
8949 "We should only have known counts for exiting blocks that dominate "
8952 Ops.push_back(BECount);
8957 assert((Preds || ENT.hasAlwaysTruePredicate()) &&
8958 "Predicate should be always true!");
8967const ScalarEvolution::ExitNotTakenInfo *
8968ScalarEvolution::BackedgeTakenInfo::getExitNotTaken(
8969 const BasicBlock *ExitingBlock,
8970 SmallVectorImpl<const SCEVPredicate *> *Predicates)
const {
8971 for (
const auto &ENT : ExitNotTaken)
8972 if (ENT.ExitingBlock == ExitingBlock) {
8973 if (ENT.hasAlwaysTruePredicate())
8975 else if (Predicates) {
8985const SCEV *ScalarEvolution::BackedgeTakenInfo::getConstantMax(
8987 SmallVectorImpl<const SCEVPredicate *> *Predicates)
const {
8988 if (!getConstantMax())
8991 for (
const auto &ENT : ExitNotTaken)
8992 if (!ENT.hasAlwaysTruePredicate()) {
9000 "No point in having a non-constant max backedge taken count!");
9001 return getConstantMax();
9004const SCEV *ScalarEvolution::BackedgeTakenInfo::getSymbolicMax(
9006 SmallVectorImpl<const SCEVPredicate *> *Predicates) {
9014 for (
const auto &ENT : ExitNotTaken) {
9015 const SCEV *ExitCount = ENT.SymbolicMaxNotTaken;
9018 "We should only have known counts for exiting blocks that "
9024 assert((Predicates || ENT.hasAlwaysTruePredicate()) &&
9025 "Predicate should be always true!");
9028 if (ExitCounts.
empty())
9037bool ScalarEvolution::BackedgeTakenInfo::isConstantMaxOrZero(
9039 auto PredicateNotAlwaysTrue = [](
const ExitNotTakenInfo &ENT) {
9040 return !ENT.hasAlwaysTruePredicate();
9042 return MaxOrZero && !
any_of(ExitNotTaken, PredicateNotAlwaysTrue);
9058 this->ExactNotTaken = E = ConstantMaxNotTaken;
9059 this->SymbolicMaxNotTaken = SymbolicMaxNotTaken = ConstantMaxNotTaken;
9064 "Exact is not allowed to be less precise than Constant Max");
9067 "Exact is not allowed to be less precise than Symbolic Max");
9070 "Symbolic Max is not allowed to be less precise than Constant Max");
9073 "No point in having a non-constant max backedge taken count!");
9075 for (
const auto PredList : PredLists)
9076 for (
const auto *
P : PredList) {
9084 "Backedge count should be int");
9087 "Max backedge count should be int");
9100ScalarEvolution::BackedgeTakenInfo::BackedgeTakenInfo(
9102 bool IsComplete,
const SCEV *ConstantMax,
bool MaxOrZero)
9103 : ConstantMax(ConstantMax), IsComplete(IsComplete), MaxOrZero(MaxOrZero) {
9104 using EdgeExitInfo = ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo;
9106 ExitNotTaken.reserve(ExitCounts.
size());
9107 std::transform(ExitCounts.
begin(), ExitCounts.
end(),
9108 std::back_inserter(ExitNotTaken),
9109 [&](
const EdgeExitInfo &EEI) {
9110 BasicBlock *ExitBB = EEI.first;
9111 const ExitLimit &EL = EEI.second;
9112 return ExitNotTakenInfo(ExitBB, EL.ExactNotTaken,
9113 EL.ConstantMaxNotTaken, EL.SymbolicMaxNotTaken,
9118 "No point in having a non-constant max backedge taken count!");
9122ScalarEvolution::BackedgeTakenInfo
9123ScalarEvolution::computeBackedgeTakenCount(
const Loop *L,
9124 bool AllowPredicates) {
9126 L->getExitingBlocks(ExitingBlocks);
9128 using EdgeExitInfo = ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo;
9131 bool CouldComputeBECount =
true;
9133 const SCEV *MustExitMaxBECount =
nullptr;
9134 const SCEV *MayExitMaxBECount =
nullptr;
9135 bool MustExitMaxOrZero =
false;
9136 bool IsOnlyExit = ExitingBlocks.
size() == 1;
9147 bool ExitIfTrue = !L->contains(BI->getSuccessor(0));
9148 if (ExitIfTrue == CI->
isZero())
9152 ExitLimit EL = computeExitLimit(L, ExitBB, IsOnlyExit, AllowPredicates);
9154 assert((AllowPredicates || EL.Predicates.empty()) &&
9155 "Predicated exit limit when predicates are not allowed!");
9160 ++NumExitCountsComputed;
9164 CouldComputeBECount =
false;
9171 "Exact is known but symbolic isn't?");
9172 ++NumExitCountsNotComputed;
9187 DT.dominates(ExitBB, Latch)) {
9188 if (!MustExitMaxBECount) {
9189 MustExitMaxBECount = EL.ConstantMaxNotTaken;
9190 MustExitMaxOrZero = EL.MaxOrZero;
9193 EL.ConstantMaxNotTaken);
9197 MayExitMaxBECount = EL.ConstantMaxNotTaken;
9200 EL.ConstantMaxNotTaken);
9204 const SCEV *MaxBECount = MustExitMaxBECount ? MustExitMaxBECount :
9208 bool MaxOrZero = (MustExitMaxOrZero && ExitingBlocks.size() == 1);
9214 for (
const auto &Pair : ExitCounts) {
9216 BECountUsers[Pair.second.ExactNotTaken].insert({
L, AllowPredicates});
9218 BECountUsers[Pair.second.SymbolicMaxNotTaken].insert(
9219 {
L, AllowPredicates});
9221 return BackedgeTakenInfo(std::move(ExitCounts), CouldComputeBECount,
9222 MaxBECount, MaxOrZero);
9225ScalarEvolution::ExitLimit
9226ScalarEvolution::computeExitLimit(
const Loop *L, BasicBlock *ExitingBlock,
9227 bool IsOnlyExit,
bool AllowPredicates) {
9228 assert(
L->contains(ExitingBlock) &&
"Exit count for non-loop block?");
9232 if (!Latch || !DT.dominates(ExitingBlock, Latch))
9237 bool ExitIfTrue = !
L->contains(BI->getSuccessor(0));
9238 assert(ExitIfTrue ==
L->contains(BI->getSuccessor(1)) &&
9239 "It should have one successor in loop and one exit block!");
9250 if (!
L->contains(SBB)) {
9255 assert(Exit &&
"Exiting block must have at least one exit");
9256 return computeExitLimitFromSingleExitSwitch(
9257 L, SI, Exit, IsOnlyExit);
9264 const Loop *L,
Value *ExitCond,
bool ExitIfTrue,
bool ControlsOnlyExit,
9265 bool AllowPredicates) {
9266 ScalarEvolution::ExitLimitCacheTy Cache(L, ExitIfTrue, AllowPredicates);
9267 return computeExitLimitFromCondCached(Cache, L, ExitCond, ExitIfTrue,
9268 ControlsOnlyExit, AllowPredicates);
9271std::optional<ScalarEvolution::ExitLimit>
9272ScalarEvolution::ExitLimitCache::find(
const Loop *L,
Value *ExitCond,
9273 bool ExitIfTrue,
bool ControlsOnlyExit,
9274 bool AllowPredicates) {
9276 (void)this->ExitIfTrue;
9277 (void)this->AllowPredicates;
9279 assert(this->L == L && this->ExitIfTrue == ExitIfTrue &&
9280 this->AllowPredicates == AllowPredicates &&
9281 "Variance in assumed invariant key components!");
9282 auto Itr = TripCountMap.find({ExitCond, ControlsOnlyExit});
9283 if (Itr == TripCountMap.end())
9284 return std::nullopt;
9288void ScalarEvolution::ExitLimitCache::insert(
const Loop *L,
Value *ExitCond,
9290 bool ControlsOnlyExit,
9291 bool AllowPredicates,
9293 assert(this->L == L && this->ExitIfTrue == ExitIfTrue &&
9294 this->AllowPredicates == AllowPredicates &&
9295 "Variance in assumed invariant key components!");
9297 auto InsertResult = TripCountMap.insert({{ExitCond, ControlsOnlyExit}, EL});
9298 assert(InsertResult.second &&
"Expected successful insertion!");
9303ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCondCached(
9304 ExitLimitCacheTy &Cache,
const Loop *L,
Value *ExitCond,
bool ExitIfTrue,
9305 bool ControlsOnlyExit,
bool AllowPredicates) {
9307 if (
auto MaybeEL = Cache.find(L, ExitCond, ExitIfTrue, ControlsOnlyExit,
9311 ExitLimit EL = computeExitLimitFromCondImpl(
9312 Cache, L, ExitCond, ExitIfTrue, ControlsOnlyExit, AllowPredicates);
9313 Cache.insert(L, ExitCond, ExitIfTrue, ControlsOnlyExit, AllowPredicates, EL);
9317ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCondImpl(
9318 ExitLimitCacheTy &Cache,
const Loop *L,
Value *ExitCond,
bool ExitIfTrue,
9319 bool ControlsOnlyExit,
bool AllowPredicates) {
9321 if (
auto LimitFromBinOp = computeExitLimitFromCondFromBinOp(
9322 Cache, L, ExitCond, ExitIfTrue, ControlsOnlyExit, AllowPredicates))
9323 return *LimitFromBinOp;
9329 computeExitLimitFromICmp(L, ExitCondICmp, ExitIfTrue, ControlsOnlyExit);
9330 if (EL.hasFullInfo() || !AllowPredicates)
9334 return computeExitLimitFromICmp(L, ExitCondICmp, ExitIfTrue,
9354 const WithOverflowInst *WO;
9369 auto EL = computeExitLimitFromICmp(L, Pred,
LHS,
getConstant(NewRHSC),
9370 ControlsOnlyExit, AllowPredicates);
9371 if (EL.hasAnyInfo())
9376 return computeExitCountExhaustively(L, ExitCond, ExitIfTrue);
9379std::optional<ScalarEvolution::ExitLimit>
9380ScalarEvolution::computeExitLimitFromCondFromBinOp(
9381 ExitLimitCacheTy &Cache,
const Loop *L,
Value *ExitCond,
bool ExitIfTrue,
9382 bool ControlsOnlyExit,
bool AllowPredicates) {
9391 return std::nullopt;
9396 bool EitherMayExit = IsAnd ^ ExitIfTrue;
9397 ExitLimit EL0 = computeExitLimitFromCondCached(
9398 Cache, L, Op0, ExitIfTrue, ControlsOnlyExit && !EitherMayExit,
9400 ExitLimit EL1 = computeExitLimitFromCondCached(
9401 Cache, L, Op1, ExitIfTrue, ControlsOnlyExit && !EitherMayExit,
9405 const Constant *NeutralElement = ConstantInt::get(ExitCond->
getType(), IsAnd);
9407 return Op1 == NeutralElement ? EL0 : EL1;
9409 return Op0 == NeutralElement ? EL1 : EL0;
9414 if (EitherMayExit) {
9424 ConstantMaxBECount = EL1.ConstantMaxNotTaken;
9426 ConstantMaxBECount = EL0.ConstantMaxNotTaken;
9429 EL1.ConstantMaxNotTaken);
9431 SymbolicMaxBECount = EL1.SymbolicMaxNotTaken;
9433 SymbolicMaxBECount = EL0.SymbolicMaxNotTaken;
9436 EL0.SymbolicMaxNotTaken, EL1.SymbolicMaxNotTaken, UseSequentialUMin);
9440 if (EL0.ExactNotTaken == EL1.ExactNotTaken)
9441 BECount = EL0.ExactNotTaken;
9454 SymbolicMaxBECount =
9456 return ExitLimit(BECount, ConstantMaxBECount, SymbolicMaxBECount,
false,
9460ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromICmp(
9461 const Loop *L, ICmpInst *ExitCond,
bool ExitIfTrue,
bool ControlsOnlyExit,
9462 bool AllowPredicates) {
9474 ExitLimit EL = computeExitLimitFromICmp(L, Pred,
LHS,
RHS, ControlsOnlyExit,
9476 if (EL.hasAnyInfo())
9479 auto *ExhaustiveCount =
9480 computeExitCountExhaustively(L, ExitCond, ExitIfTrue);
9483 return ExhaustiveCount;
9485 return computeShiftCompareExitLimit(ExitCond->
getOperand(0),
9488ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromICmp(
9489 const Loop *L, CmpPredicate Pred, SCEVUse
LHS, SCEVUse
RHS,
9490 bool ControlsOnlyExit,
bool AllowPredicates) {
9515 ConstantRange CompRange =
9531 SCEVUse InnerLHS =
LHS;
9533 InnerLHS = ZExt->getOperand();
9580 if (EL.hasAnyInfo())
9597 if (EL.hasAnyInfo())
return EL;
9629 ExitLimit EL = howManyLessThans(
LHS,
RHS, L, IsSigned, ControlsOnlyExit,
9631 if (EL.hasAnyInfo())
9647 ExitLimit EL = howManyGreaterThans(
LHS,
RHS, L, IsSigned, ControlsOnlyExit,
9649 if (EL.hasAnyInfo())
9660ScalarEvolution::ExitLimit
9661ScalarEvolution::computeExitLimitFromSingleExitSwitch(
const Loop *L,
9663 BasicBlock *ExitingBlock,
9664 bool ControlsOnlyExit) {
9665 assert(!
L->contains(ExitingBlock) &&
"Not an exiting block!");
9668 if (
Switch->getDefaultDest() == ExitingBlock)
9672 "Default case must not exit the loop!");
9678 if (EL.hasAnyInfo())
9690 "Evaluation of SCEV at constant didn't fold correctly?");
9694ScalarEvolution::ExitLimit ScalarEvolution::computeShiftCompareExitLimit(
9704 const BasicBlock *Predecessor =
L->getLoopPredecessor();
9710 auto MatchPositiveShift =
9713 using namespace PatternMatch;
9715 ConstantInt *ShiftAmt;
9717 OutOpCode = Instruction::LShr;
9719 OutOpCode = Instruction::AShr;
9721 OutOpCode = Instruction::Shl;
9736 auto MatchShiftRecurrence =
9738 std::optional<Instruction::BinaryOps> PostShiftOpCode;
9753 if (MatchPositiveShift(
LHS, V, OpC)) {
9754 PostShiftOpCode = OpC;
9760 if (!PNOut || PNOut->getParent() !=
L->getHeader())
9763 Value *BEValue = PNOut->getIncomingValueForBlock(Latch);
9769 MatchPositiveShift(BEValue, OpLHS, OpCodeOut) &&
9776 (!PostShiftOpCode || *PostShiftOpCode == OpCodeOut);
9781 if (!MatchShiftRecurrence(
LHS, PN, OpCode))
9793 ConstantInt *StableValue =
nullptr;
9798 case Instruction::AShr: {
9806 StableValue = ConstantInt::get(Ty, 0);
9808 StableValue = ConstantInt::get(Ty, -1,
true);
9814 case Instruction::LShr:
9815 case Instruction::Shl:
9825 "Otherwise cannot be an operand to a branch instruction");
9827 if (
Result->isNullValue()) {
9829 const SCEV *UpperBound =
9846 if (
const Function *
F = CI->getCalledFunction())
9855 if (!L->contains(
I))
return false;
9860 return L->getHeader() ==
I->getParent();
9936 if (!
I)
return nullptr;
9949 std::vector<Constant*> Operands(
I->getNumOperands());
9951 for (
unsigned i = 0, e =
I->getNumOperands(); i != e; ++i) {
9955 if (!Operands[i])
return nullptr;
9960 if (!
C)
return nullptr;
9982 if (IncomingVal != CurrentVal) {
9985 IncomingVal = CurrentVal;
9997ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN,
10000 auto [
I,
Inserted] = ConstantEvolutionLoopExitValue.try_emplace(PN);
10009 DenseMap<Instruction *, Constant *> CurrentIterVals;
10011 assert(PN->
getParent() == Header &&
"Can't evaluate PHI not in loop header!");
10017 for (PHINode &
PHI : Header->phis()) {
10019 CurrentIterVals[&
PHI] = StartCST;
10021 if (!CurrentIterVals.
count(PN))
10022 return RetVal =
nullptr;
10028 "BEs is <= MaxBruteForceIterations which is an 'unsigned'!");
10031 unsigned IterationNum = 0;
10033 for (; ; ++IterationNum) {
10034 if (IterationNum == NumIterations)
10035 return RetVal = CurrentIterVals[PN];
10039 DenseMap<Instruction *, Constant *> NextIterVals;
10044 NextIterVals[PN] = NextPHI;
10046 bool StoppedEvolving = NextPHI == CurrentIterVals[PN];
10052 for (
const auto &
I : CurrentIterVals) {
10054 if (!
PHI ||
PHI == PN ||
PHI->getParent() != Header)
continue;
10059 for (
const auto &
I : PHIsToCompute) {
10060 PHINode *
PHI =
I.first;
10063 Value *BEValue =
PHI->getIncomingValueForBlock(Latch);
10066 if (NextPHI !=
I.second)
10067 StoppedEvolving =
false;
10072 if (StoppedEvolving)
10073 return RetVal = CurrentIterVals[PN];
10075 CurrentIterVals.swap(NextIterVals);
10079const SCEV *ScalarEvolution::computeExitCountExhaustively(
const Loop *L,
10089 DenseMap<Instruction *, Constant *> CurrentIterVals;
10091 assert(PN->
getParent() == Header &&
"Can't evaluate PHI not in loop header!");
10094 assert(Latch &&
"Should follow from NumIncomingValues == 2!");
10096 for (PHINode &
PHI : Header->phis()) {
10098 CurrentIterVals[&
PHI] = StartCST;
10100 if (!CurrentIterVals.
count(PN))
10108 for (
unsigned IterationNum = 0; IterationNum != MaxIterations;++IterationNum){
10115 if (CondVal->getValue() == uint64_t(ExitWhen)) {
10116 ++NumBruteForceTripCountsComputed;
10121 DenseMap<Instruction *, Constant *> NextIterVals;
10127 for (
const auto &
I : CurrentIterVals) {
10129 if (!
PHI ||
PHI->getParent() != Header)
continue;
10132 for (PHINode *
PHI : PHIsToCompute) {
10134 if (NextPHI)
continue;
10136 Value *BEValue =
PHI->getIncomingValueForBlock(Latch);
10139 CurrentIterVals.
swap(NextIterVals);
10150 for (
auto &LS : Values)
10152 return LS.second ? LS.second : V;
10157 const SCEV *
C = computeSCEVAtScope(V, L);
10158 for (
auto &LS :
reverse(ValuesAtScopes[V]))
10159 if (LS.first == L) {
10162 ValuesAtScopesUsers[
C].push_back({L, V});
10173 switch (V->getSCEVType()) {
10213 assert(!
C->getType()->isPointerTy() &&
10214 "Can only have one pointer, and it must be last");
10239const SCEV *ScalarEvolution::getWithOperands(
const SCEV *S,
10240 SmallVectorImpl<SCEVUse> &NewOps) {
10275const SCEV *ScalarEvolution::computeSCEVAtScope(
const SCEV *V,
const Loop *L) {
10276 switch (
V->getSCEVType()) {
10287 for (
unsigned i = 0, e = AddRec->
getNumOperands(); i != e; ++i) {
10298 for (++i; i !=
e; ++i)
10343 for (
unsigned i = 0, e =
Ops.size(); i != e; ++i) {
10353 for (++i; i !=
e; ++i) {
10358 return getWithOperands(V, NewOps);
10373 const Loop *CurrLoop = this->LI[
I->getParent()];
10384 if (BackedgeTakenCount->
isZero()) {
10385 Value *InitValue =
nullptr;
10386 bool MultipleInitValues =
false;
10392 MultipleInitValues =
true;
10397 if (!MultipleInitValues && InitValue)
10406 unsigned InLoopPred =
10417 getConstantEvolutionLoopExitValue(PN, BTCC->getAPInt(), CurrLoop);
10431 SmallVector<Constant *, 4> Operands;
10432 Operands.
reserve(
I->getNumOperands());
10433 bool MadeImprovement =
false;
10448 MadeImprovement |= OrigV != OpV;
10453 assert(
C->getType() ==
Op->getType() &&
"Type mismatch");
10458 if (!MadeImprovement)
10479const SCEV *ScalarEvolution::stripInjectiveFunctions(
const SCEV *S)
const {
10481 return stripInjectiveFunctions(ZExt->getOperand());
10483 return stripInjectiveFunctions(SExt->getOperand());
10501 assert(
A != 0 &&
"A must be non-zero.");
10517 if (MinTZ < Mult2 && L->getLoopPredecessor())
10519 if (MinTZ < Mult2) {
10542 APInt AD =
A.lshr(Mult2).trunc(BW - Mult2);
10562static std::optional<std::tuple<APInt, APInt, APInt, APInt, unsigned>>
10568 LLVM_DEBUG(
dbgs() << __func__ <<
": analyzing quadratic addrec: "
10569 << *AddRec <<
'\n');
10572 if (!LC || !MC || !
NC) {
10573 LLVM_DEBUG(
dbgs() << __func__ <<
": coefficients are not constant\n");
10574 return std::nullopt;
10580 assert(!
N.isZero() &&
"This is not a quadratic addrec");
10588 N =
N.sext(NewWidth);
10589 M = M.sext(NewWidth);
10590 L = L.sext(NewWidth);
10607 <<
"x + " <<
C <<
", coeff bw: " << NewWidth
10608 <<
", multiplied by " <<
T <<
'\n');
10617 std::optional<APInt>
Y) {
10619 unsigned W = std::max(
X->getBitWidth(),
Y->getBitWidth());
10622 return XW.
slt(YW) ? *
X : *
Y;
10625 return std::nullopt;
10626 return X ? *
X : *
Y;
10643 return std::nullopt;
10644 unsigned W =
X->getBitWidth();
10664static std::optional<APInt>
10670 return std::nullopt;
10673 LLVM_DEBUG(
dbgs() << __func__ <<
": solving for unsigned overflow\n");
10674 std::optional<APInt>
X =
10677 return std::nullopt;
10682 return std::nullopt;
10697static std::optional<APInt>
10701 "Starting value of addrec should be 0");
10702 LLVM_DEBUG(
dbgs() << __func__ <<
": solving boundary crossing for range "
10703 <<
Range <<
", addrec " << *AddRec <<
'\n');
10707 "Addrec's initial value should be in range");
10713 return std::nullopt;
10723 auto SolveForBoundary =
10724 [&](
APInt Bound) -> std::pair<std::optional<APInt>,
bool> {
10727 LLVM_DEBUG(
dbgs() <<
"SolveQuadraticAddRecRange: checking boundary "
10728 << Bound <<
" (before multiplying by " << M <<
")\n");
10731 std::optional<APInt> SO;
10734 "signed overflow\n");
10738 "unsigned overflow\n");
10739 std::optional<APInt> UO =
10742 auto LeavesRange = [&] (
const APInt &
X) {
10759 return {std::nullopt,
false};
10764 if (LeavesRange(*Min))
10765 return { Min,
true };
10766 std::optional<APInt> Max = Min == SO ? UO : SO;
10767 if (LeavesRange(*Max))
10768 return { Max,
true };
10771 return {std::nullopt,
true};
10778 auto SL = SolveForBoundary(
Lower);
10779 auto SU = SolveForBoundary(
Upper);
10782 if (!SL.second || !SU.second)
10783 return std::nullopt;
10826ScalarEvolution::ExitLimit ScalarEvolution::howFarToZero(
const SCEV *V,
10828 bool ControlsOnlyExit,
10829 bool AllowPredicates) {
10840 if (
C->getValue()->isZero())
return C;
10844 const SCEVAddRecExpr *AddRec =
10847 if (!AddRec && AllowPredicates)
10853 if (!AddRec || AddRec->
getLoop() != L)
10864 return ExitLimit(R, R, R,
false, Predicates);
10922 const SCEV *DistancePlusOne =
getAddExpr(Distance, One);
10948 const SCEV *
Exact =
10956 const SCEV *SymbolicMax =
10958 return ExitLimit(
Exact, ConstantMax, SymbolicMax,
false, Predicates);
10967 AllowPredicates ? &Predicates :
nullptr, *
this, L);
10975 return ExitLimit(
E, M, S,
false, Predicates);
10978ScalarEvolution::ExitLimit
10979ScalarEvolution::howFarToNonZero(
const SCEV *V,
const Loop *L) {
10987 if (!
C->getValue()->isZero())
10997std::pair<const BasicBlock *, const BasicBlock *>
10998ScalarEvolution::getPredecessorWithUniqueSuccessorForBB(
const BasicBlock *BB)
11009 if (
const Loop *L = LI.getLoopFor(BB))
11010 return {
L->getLoopPredecessor(),
L->getHeader()};
11012 return {
nullptr, BB};
11021 if (
A ==
B)
return true;
11036 if (ComputesEqualValues(AI, BI))
11044 const SCEV *Op0, *Op1;
11063 auto TrivialCase = [&](
bool TriviallyTrue) {
11072 const SCEV *NewLHS, *NewRHS;
11096 return TrivialCase(
false);
11097 return TrivialCase(
true);
11120 const APInt &
RA = RC->getAPInt();
11122 bool SimplifiedByConstantRange =
false;
11127 return TrivialCase(
true);
11129 return TrivialCase(
false);
11138 Changed = SimplifiedByConstantRange =
true;
11142 if (!SimplifiedByConstantRange) {
11159 assert(!
RA.isMinValue() &&
"Should have been caught earlier!");
11165 assert(!
RA.isMaxValue() &&
"Should have been caught earlier!");
11171 assert(!
RA.isMinSignedValue() &&
"Should have been caught earlier!");
11177 assert(!
RA.isMaxSignedValue() &&
"Should have been caught earlier!");
11189 return TrivialCase(
true);
11191 return TrivialCase(
false);
11296 auto NonRecursive = [OrNegative](
const SCEV *S) {
11298 return C->getAPInt().isPowerOf2() ||
11299 (OrNegative &&
C->getAPInt().isNegatedPowerOf2());
11305 if (NonRecursive(S))
11331 APInt C = Cst->getAPInt();
11332 return C.urem(M) == 0;
11340 const SCEV *SmodM =
11355 for (
auto *
A : Assumptions)
11356 if (
A->implies(
P, *
this))
11369std::pair<const SCEV *, const SCEV *>
11372 const SCEV *Start = SCEVInitRewriter::rewrite(S, L, *
this);
11374 return { Start, Start };
11376 const SCEV *
PostInc = SCEVPostIncRewriter::rewrite(S, L, *
this);
11385 getUsedLoops(LHS, LoopsUsed);
11386 getUsedLoops(RHS, LoopsUsed);
11388 if (LoopsUsed.
empty())
11393 for (
const auto *L1 : LoopsUsed)
11394 for (
const auto *L2 : LoopsUsed)
11395 assert((DT.dominates(L1->getHeader(), L2->getHeader()) ||
11396 DT.dominates(L2->getHeader(), L1->getHeader())) &&
11397 "Domination relationship is not a linear order");
11427 SplitRHS.second) &&
11439 if (isKnownPredicateViaSplitting(Pred, LHS, RHS))
11443 return isKnownViaNonRecursiveReasoning(Pred, LHS, RHS);
11453 return std::nullopt;
11468 if (KnownWithoutContext)
11469 return KnownWithoutContext;
11476 return std::nullopt;
11482 const Loop *L = LHS->getLoop();
11487std::optional<ScalarEvolution::MonotonicPredicateType>
11490 auto Result = getMonotonicPredicateTypeImpl(LHS, Pred);
11496 auto ResultSwapped =
11499 assert(*ResultSwapped != *Result &&
11500 "monotonicity should flip as we flip the predicate");
11507std::optional<ScalarEvolution::MonotonicPredicateType>
11508ScalarEvolution::getMonotonicPredicateTypeImpl(
const SCEVAddRecExpr *LHS,
11522 return std::nullopt;
11526 "Should be greater or less!");
11530 if (!LHS->hasNoUnsignedWrap())
11531 return std::nullopt;
11535 "Relational predicate is either signed or unsigned!");
11536 if (!
LHS->hasNoSignedWrap())
11537 return std::nullopt;
11539 const SCEV *Step =
LHS->getStepRecurrence(*
this);
11547 return std::nullopt;
11550std::optional<ScalarEvolution::LoopInvariantPredicate>
11557 return std::nullopt;
11564 if (!ArLHS || ArLHS->
getLoop() != L)
11565 return std::nullopt;
11569 return std::nullopt;
11595 return std::nullopt;
11632 return std::nullopt;
11635std::optional<ScalarEvolution::LoopInvariantPredicate>
11640 Pred, LHS, RHS, L, CtxI, MaxIter))
11650 Pred, LHS, RHS, L, CtxI,
Op))
11652 return std::nullopt;
11655std::optional<ScalarEvolution::LoopInvariantPredicate>
11670 return std::nullopt;
11677 if (!AR || AR->
getLoop() != L)
11678 return std::nullopt;
11683 Pred = Pred.dropSameSign();
11687 return std::nullopt;
11693 if (Step != One && Step != MinusOne)
11694 return std::nullopt;
11700 return std::nullopt;
11706 return std::nullopt;
11714 if (Step == MinusOne)
11718 return std::nullopt;
11724bool ScalarEvolution::isKnownPredicateViaConstantRanges(
CmpPredicate Pred,
11730 auto CheckRange = [&](
bool IsSigned) {
11733 return RangeLHS.
icmp(Pred, RangeRHS);
11742 if (CheckRange(
true) || CheckRange(
false))
11751bool ScalarEvolution::isKnownPredicateViaNoOverflow(CmpPredicate Pred,
11752 SCEVUse
LHS, SCEVUse
RHS) {
11757 auto MatchBinaryAddToConst = [
this](SCEVUse
X, SCEVUse
Y, APInt &OutC1,
11760 SCEVUse XNonConstOp, XConstOp;
11761 SCEVUse YNonConstOp, YConstOp;
11765 if (!splitBinaryAdd(
X, XConstOp, XNonConstOp, XFlagsPresent)) {
11768 XFlagsPresent = ExpectedFlags;
11773 if (!splitBinaryAdd(
Y, YConstOp, YNonConstOp, YFlagsPresent)) {
11776 YFlagsPresent = ExpectedFlags;
11779 if (YNonConstOp != XNonConstOp)
11787 if ((YFlagsPresent & ExpectedFlags) != ExpectedFlags)
11790 (XFlagsPresent & ExpectedFlags) != ExpectedFlags) {
11850bool ScalarEvolution::isKnownPredicateViaSplitting(CmpPredicate Pred,
11851 SCEVUse
LHS, SCEVUse
RHS) {
11871bool ScalarEvolution::isImpliedViaGuard(
const BasicBlock *BB, CmpPredicate Pred,
11872 const SCEV *
LHS,
const SCEV *
RHS) {
11877 return any_of(*BB, [&](
const Instruction &
I) {
11878 using namespace llvm::PatternMatch;
11883 isImpliedCond(Pred,
LHS,
RHS, Condition,
false);
11897 if (!L || !DT.isReachableFromEntry(L->getHeader()))
11902 "This cannot be done on broken IR!");
11905 if (isKnownViaNonRecursiveReasoning(Pred, LHS, RHS))
11914 if (LoopContinuePredicate &&
11915 isImpliedCond(Pred, LHS, RHS, LoopContinuePredicate->
getCondition(),
11916 LoopContinuePredicate->
getSuccessor(0) != L->getHeader()))
11921 if (WalkingBEDominatingConds)
11927 const auto &BETakenInfo = getBackedgeTakenInfo(L);
11928 const SCEV *LatchBECount = BETakenInfo.getExact(Latch,
this);
11935 const SCEV *LoopCounter =
11943 for (
auto &AssumeVH : AC.assumptions()) {
11950 if (isImpliedCond(Pred, LHS, RHS, CI->getArgOperand(0),
false))
11954 if (isImpliedViaGuard(Latch, Pred, LHS, RHS))
11957 for (
DomTreeNode *DTN = DT[Latch], *HeaderDTN = DT[L->getHeader()];
11958 DTN != HeaderDTN; DTN = DTN->getIDom()) {
11959 assert(DTN &&
"should reach the loop header before reaching the root!");
11962 if (isImpliedViaGuard(BB, Pred, LHS, RHS))
11980 if (isImpliedCond(Pred, LHS, RHS, ContBr->
getCondition(),
11993 if (!DT.isReachableFromEntry(BB))
11997 "This cannot be done on broken IR!");
12005 const bool ProvingStrictComparison =
12007 bool ProvedNonStrictComparison =
false;
12008 bool ProvedNonEquality =
false;
12011 if (!ProvedNonStrictComparison)
12012 ProvedNonStrictComparison = Fn(NonStrictPredicate);
12013 if (!ProvedNonEquality)
12015 if (ProvedNonStrictComparison && ProvedNonEquality)
12020 if (ProvingStrictComparison) {
12022 return isKnownViaNonRecursiveReasoning(
P, LHS, RHS);
12024 if (SplitAndProve(ProofFn))
12029 auto ProveViaCond = [&](
const Value *Condition,
bool Inverse) {
12031 if (isImpliedCond(Pred, LHS, RHS, Condition,
Inverse, CtxI))
12033 if (ProvingStrictComparison) {
12035 return isImpliedCond(
P, LHS, RHS, Condition,
Inverse, CtxI);
12037 if (SplitAndProve(ProofFn))
12046 const Loop *ContainingLoop = LI.getLoopFor(BB);
12048 if (ContainingLoop && ContainingLoop->
getHeader() == BB)
12052 for (std::pair<const BasicBlock *, const BasicBlock *> Pair(PredBB, BB);
12053 Pair.first; Pair = getPredecessorWithUniqueSuccessorForBB(Pair.first)) {
12056 if (!BlockEntryPredicate)
12065 for (
auto &AssumeVH : AC.assumptions()) {
12069 if (!DT.dominates(CI, BB))
12072 if (ProveViaCond(CI->getArgOperand(0),
false))
12078 F.getParent(), Intrinsic::experimental_guard);
12080 for (
const auto *GU : GuardDecl->users())
12082 if (Guard->getFunction() == BB->
getParent() && DT.dominates(Guard, BB))
12083 if (ProveViaCond(Guard->getArgOperand(0),
false))
12098 "LHS is not available at Loop Entry");
12100 "RHS is not available at Loop Entry");
12102 if (isKnownViaNonRecursiveReasoning(Pred, LHS, RHS))
12113 if (FoundCondValue ==
12117 if (!PendingLoopPredicates.insert(FoundCondValue).second)
12121 [&]() { PendingLoopPredicates.erase(FoundCondValue); });
12124 const Value *Op0, *Op1;
12127 return isImpliedCond(Pred,
LHS,
RHS, Op0,
Inverse, CtxI) ||
12131 return isImpliedCond(Pred,
LHS,
RHS, Op0, Inverse, CtxI) ||
12132 isImpliedCond(Pred,
LHS,
RHS, Op1, Inverse, CtxI);
12136 if (!ICI)
return false;
12140 CmpPredicate FoundPred;
12149 return isImpliedCond(Pred,
LHS,
RHS, FoundPred, FoundLHS, FoundRHS, CtxI);
12152bool ScalarEvolution::isImpliedCond(CmpPredicate Pred,
const SCEV *
LHS,
12153 const SCEV *
RHS, CmpPredicate FoundPred,
12154 const SCEV *FoundLHS,
const SCEV *FoundRHS,
12155 const Instruction *CtxI) {
12165 auto *WideType = FoundLHS->
getType();
12177 TruncFoundLHS, TruncFoundRHS, CtxI))
12203 return isImpliedCondBalancedTypes(Pred,
LHS,
RHS, FoundPred, FoundLHS,
12207bool ScalarEvolution::isImpliedCondBalancedTypes(
12208 CmpPredicate Pred, SCEVUse
LHS, SCEVUse
RHS, CmpPredicate FoundPred,
12209 SCEVUse FoundLHS, SCEVUse FoundRHS,
const Instruction *CtxI) {
12212 "Types should be balanced!");
12219 if (FoundLHS == FoundRHS)
12223 if (
LHS == FoundRHS ||
RHS == FoundLHS) {
12235 return isImpliedCondOperands(*
P,
LHS,
RHS, FoundLHS, FoundRHS, CtxI);
12252 LHS, FoundLHS, FoundRHS, CtxI);
12254 return isImpliedCondOperands(*
P,
LHS,
RHS, FoundRHS, FoundLHS, CtxI);
12276 assert(P1 != P2 &&
"Handled earlier!");
12280 if (IsSignFlippedPredicate(Pred, FoundPred)) {
12284 return isImpliedCondOperands(Pred,
LHS,
RHS, FoundLHS, FoundRHS, CtxI);
12287 CmpPredicate CanonicalPred = Pred, CanonicalFoundPred = FoundPred;
12288 const SCEV *CanonicalLHS =
LHS, *CanonicalRHS =
RHS,
12289 *CanonicalFoundLHS = FoundLHS, *CanonicalFoundRHS = FoundRHS;
12294 std::swap(CanonicalFoundLHS, CanonicalFoundRHS);
12305 return isImpliedCondOperands(CanonicalFoundPred, CanonicalLHS,
12306 CanonicalRHS, CanonicalFoundLHS,
12307 CanonicalFoundRHS);
12312 return isImpliedCondOperands(CanonicalFoundPred, CanonicalLHS,
12313 CanonicalRHS, CanonicalFoundLHS,
12314 CanonicalFoundRHS);
12321 const SCEVConstant *
C =
nullptr;
12322 const SCEV *
V =
nullptr;
12340 if (Min ==
C->getAPInt()) {
12345 APInt SharperMin = Min + 1;
12348 case ICmpInst::ICMP_SGE:
12349 case ICmpInst::ICMP_UGE:
12352 if (isImpliedCondOperands(Pred, LHS, RHS, V, getConstant(SharperMin),
12357 case ICmpInst::ICMP_SGT:
12358 case ICmpInst::ICMP_UGT:
12368 if (isImpliedCondOperands(Pred, LHS, RHS, V, getConstant(Min), CtxI))
12373 case ICmpInst::ICMP_SLE:
12374 case ICmpInst::ICMP_ULE:
12375 if (isImpliedCondOperands(ICmpInst::getSwappedCmpPredicate(Pred), RHS,
12376 LHS, V, getConstant(SharperMin), CtxI))
12380 case ICmpInst::ICMP_SLT:
12381 case ICmpInst::ICMP_ULT:
12382 if (isImpliedCondOperands(ICmpInst::getSwappedCmpPredicate(Pred), RHS,
12383 LHS, V, getConstant(Min), CtxI))
12397 if (isImpliedCondOperands(Pred,
LHS,
RHS, FoundLHS, FoundRHS, CtxI))
12401 if (isImpliedCondOperands(FoundPred,
LHS,
RHS, FoundLHS, FoundRHS, CtxI))
12404 if (isImpliedCondOperandsViaRanges(Pred,
LHS,
RHS, FoundPred, FoundLHS, FoundRHS))
12411bool ScalarEvolution::splitBinaryAdd(SCEVUse Expr, SCEVUse &L, SCEVUse &R,
12420std::optional<APInt>
12427 APInt DiffMul(BW, 1);
12430 for (
unsigned I = 0;
I < 8; ++
I) {
12439 if (LAR->getLoop() != MAR->getLoop())
12440 return std::nullopt;
12444 if (!LAR->isAffine() || !MAR->isAffine())
12445 return std::nullopt;
12447 if (LAR->getStepRecurrence(*
this) != MAR->getStepRecurrence(*
this))
12448 return std::nullopt;
12450 Less = LAR->getStart();
12451 More = MAR->getStart();
12456 auto MatchConstMul =
12457 [](
const SCEV *S) -> std::optional<std::pair<const SCEV *, APInt>> {
12462 return std::nullopt;
12464 if (
auto MatchedMore = MatchConstMul(More)) {
12465 if (
auto MatchedLess = MatchConstMul(
Less)) {
12466 if (MatchedMore->second == MatchedLess->second) {
12467 More = MatchedMore->first;
12468 Less = MatchedLess->first;
12469 DiffMul *= MatchedMore->second;
12480 Diff +=
C->getAPInt() * DiffMul;
12483 Diff -=
C->getAPInt() * DiffMul;
12486 Multiplicity[S] +=
Mul;
12488 auto Decompose = [&](
const SCEV *S,
int Mul) {
12495 Decompose(More, 1);
12496 Decompose(
Less, -1);
12500 const SCEV *NewMore =
nullptr, *NewLess =
nullptr;
12501 for (
const auto &[S,
Mul] : Multiplicity) {
12506 return std::nullopt;
12508 }
else if (
Mul == -1) {
12510 return std::nullopt;
12513 return std::nullopt;
12517 if (NewMore == More || NewLess ==
Less)
12518 return std::nullopt;
12524 if (!More && !
Less)
12528 if (!More || !
Less)
12529 return std::nullopt;
12533 return std::nullopt;
12536bool ScalarEvolution::isImpliedCondOperandsViaAddRecStart(
12558 const auto *Latch = L->getLoopLatch();
12561 if (!L->contains(ContextBB) || !Latch || !DT.
dominates(ContextBB, Latch))
12570 const auto *Latch = L->getLoopLatch();
12573 if (!L->contains(ContextBB) || !Latch || !DT.
dominates(ContextBB, Latch))
12583bool ScalarEvolution::isImpliedCondOperandsViaNoOverflow(CmpPredicate Pred,
12586 const SCEV *FoundLHS,
12587 const SCEV *FoundRHS) {
12596 if (!AddRecFoundLHS)
12603 const Loop *
L = AddRecFoundLHS->getLoop();
12604 if (L != AddRecLHS->getLoop())
12643 if (!RDiff || *LDiff != *RDiff)
12646 if (LDiff->isMinValue())
12649 APInt FoundRHSLimit;
12652 FoundRHSLimit = -(*RDiff);
12664bool ScalarEvolution::isImpliedViaMerge(CmpPredicate Pred,
const SCEV *
LHS,
12665 const SCEV *
RHS,
const SCEV *FoundLHS,
12666 const SCEV *FoundRHS,
unsigned Depth) {
12667 const PHINode *LPhi =
nullptr, *RPhi =
nullptr;
12671 bool Erased = PendingMerges.erase(LPhi);
12672 assert(Erased &&
"Failed to erase LPhi!");
12676 bool Erased = PendingMerges.erase(RPhi);
12677 assert(Erased &&
"Failed to erase RPhi!");
12685 if (!PendingMerges.insert(Phi).second)
12699 if (!PendingMerges.insert(Phi).second)
12705 if (!LPhi && !RPhi)
12716 assert(LPhi &&
"LPhi should definitely be a SCEVUnknown Phi!");
12720 auto ProvedEasily = [&](
const SCEV *
S1,
const SCEV *S2) {
12721 return isKnownViaNonRecursiveReasoning(Pred,
S1, S2) ||
12722 isImpliedCondOperandsViaRanges(Pred,
S1, S2, Pred, FoundLHS, FoundRHS) ||
12723 isImpliedViaOperations(Pred,
S1, S2, FoundLHS, FoundRHS,
Depth);
12726 if (RPhi && RPhi->getParent() == LBB) {
12733 const SCEV *
R =
getSCEV(RPhi->getIncomingValueForBlock(IncBB));
12734 if (!ProvedEasily(L, R))
12745 auto *RLoop = RAR->
getLoop();
12746 auto *Predecessor = RLoop->getLoopPredecessor();
12747 assert(Predecessor &&
"Loop with AddRec with no predecessor?");
12749 if (!ProvedEasily(L1, RAR->
getStart()))
12751 auto *Latch = RLoop->getLoopLatch();
12752 assert(Latch &&
"Loop with AddRec with no latch?");
12773 if (
auto *Loop = LI.getLoopFor(LBB))
12776 if (!ProvedEasily(L,
RHS))
12783bool ScalarEvolution::isImpliedCondOperandsViaShift(CmpPredicate Pred,
12786 const SCEV *FoundLHS,
12787 const SCEV *FoundRHS) {
12790 if (
RHS == FoundRHS) {
12795 if (
LHS != FoundLHS)
12802 Value *Shiftee, *ShiftValue;
12804 using namespace PatternMatch;
12805 if (
match(SUFoundRHS->getValue(),
12807 auto *ShifteeS =
getSCEV(Shiftee);
12825bool ScalarEvolution::isImpliedCondOperands(CmpPredicate Pred,
const SCEV *
LHS,
12827 const SCEV *FoundLHS,
12828 const SCEV *FoundRHS,
12829 const Instruction *CtxI) {
12830 return isImpliedCondOperandsViaRanges(Pred,
LHS,
RHS, Pred, FoundLHS,
12832 isImpliedCondOperandsViaNoOverflow(Pred,
LHS,
RHS, FoundLHS,
12834 isImpliedCondOperandsViaShift(Pred,
LHS,
RHS, FoundLHS, FoundRHS) ||
12835 isImpliedCondOperandsViaAddRecStart(Pred,
LHS,
RHS, FoundLHS, FoundRHS,
12837 isImpliedCondOperandsHelper(Pred,
LHS,
RHS, FoundLHS, FoundRHS);
12841template <
typename MinMaxExprType>
12843 const SCEV *Candidate) {
12848 return is_contained(MinMaxExpr->operands(), Candidate);
12861 const SCEV *LStart, *RStart, *Step;
12911bool ScalarEvolution::isImpliedViaOperations(CmpPredicate Pred,
const SCEV *
LHS,
12913 const SCEV *FoundLHS,
12914 const SCEV *FoundRHS,
12918 "LHS and RHS have different sizes?");
12921 "FoundLHS and FoundRHS have different sizes?");
12955 auto GetOpFromSExt = [&](
const SCEV *S) ->
const SCEV * {
12957 return Ext->getOperand();
12964 auto *OrigLHS =
LHS;
12965 auto *OrigFoundLHS = FoundLHS;
12966 LHS = GetOpFromSExt(
LHS);
12967 FoundLHS = GetOpFromSExt(FoundLHS);
12970 auto IsSGTViaContext = [&](
const SCEV *
S1,
const SCEV *S2) {
12973 FoundRHS,
Depth + 1);
12986 if (!LHSAddExpr->hasNoSignedWrap())
12989 SCEVUse LL = LHSAddExpr->getOperand(0);
12990 SCEVUse LR = LHSAddExpr->getOperand(1);
12994 auto IsSumGreaterThanRHS = [&](
const SCEV *
S1,
const SCEV *S2) {
12995 return IsSGTViaContext(
S1, MinusOne) && IsSGTViaContext(S2,
RHS);
13000 if (IsSumGreaterThanRHS(LL, LR) || IsSumGreaterThanRHS(LR, LL))
13006 using namespace llvm::PatternMatch;
13025 if (!Numerator || Numerator->getType() != FoundLHS->
getType())
13033 auto *DTy = Denominator->getType();
13034 auto *FRHSTy = FoundRHS->
getType();
13035 if (DTy->isPointerTy() != FRHSTy->isPointerTy())
13054 IsSGTViaContext(FoundRHSExt, DenomMinusTwo))
13065 auto *NegDenomMinusOne =
getMinusSCEV(MinusOne, DenominatorExt);
13067 IsSGTViaContext(FoundRHSExt, NegDenomMinusOne))
13075 if (isImpliedViaMerge(Pred, OrigLHS,
RHS, OrigFoundLHS, FoundRHS,
Depth + 1))
13108bool ScalarEvolution::isKnownViaNonRecursiveReasoning(CmpPredicate Pred,
13112 isKnownPredicateViaConstantRanges(Pred,
LHS,
RHS) ||
13115 isKnownPredicateViaNoOverflow(Pred,
LHS,
RHS);
13118bool ScalarEvolution::isImpliedCondOperandsHelper(CmpPredicate Pred,
13121 const SCEV *FoundLHS,
13122 const SCEV *FoundRHS) {
13158 if (isImpliedViaOperations(Pred,
LHS,
RHS, FoundLHS, FoundRHS))
13164bool ScalarEvolution::isImpliedCondOperandsViaRanges(
13165 CmpPredicate Pred,
const SCEV *
LHS,
const SCEV *
RHS, CmpPredicate FoundPred,
13166 const SCEV *FoundLHS,
const SCEV *FoundRHS) {
13180 ConstantRange FoundLHSRange =
13184 ConstantRange LHSRange = FoundLHSRange.
add(ConstantRange(*Addend));
13191 return LHSRange.
icmp(Pred, ConstRHS);
13194bool ScalarEvolution::canIVOverflowOnLT(
const SCEV *
RHS,
const SCEV *Stride,
13207 return (std::move(MaxValue) - MaxStrideMinusOne).slt(MaxRHS);
13215 return (std::move(MaxValue) - MaxStrideMinusOne).ult(MaxRHS);
13218bool ScalarEvolution::canIVOverflowOnGT(
const SCEV *
RHS,
const SCEV *Stride,
13230 return (std::move(MinValue) + MaxStrideMinusOne).sgt(MinRHS);
13238 return (std::move(MinValue) + MaxStrideMinusOne).ugt(MinRHS);
13250const SCEV *ScalarEvolution::computeMaxBECountForLT(
const SCEV *Start,
13251 const SCEV *Stride,
13282 APInt Limit = MaxValue - (StrideForMaxBECount - 1);
13293 :
APIntOps::umax(MaxEnd, MinStart);
13300ScalarEvolution::howManyLessThans(
const SCEV *
LHS,
const SCEV *
RHS,
13301 const Loop *L,
bool IsSigned,
13302 bool ControlsOnlyExit,
bool AllowPredicates) {
13306 bool PredicatedIV =
false;
13311 auto canProveNUW = [&]() {
13314 if (!ControlsOnlyExit)
13335 Limit = Limit.
zext(OuterBitWidth);
13347 Type *Ty = ZExt->getType();
13358 if (!
IV && AllowPredicates) {
13363 PredicatedIV =
true;
13367 if (!
IV ||
IV->getLoop() != L || !
IV->isAffine())
13381 bool NoWrap = ControlsOnlyExit &&
IV->getNoWrapFlags(WrapType);
13384 const SCEV *Stride =
IV->getStepRecurrence(*
this);
13389 if (!PositiveStride) {
13441 auto wouldZeroStrideBeUB = [&]() {
13453 if (!wouldZeroStrideBeUB()) {
13457 }
else if (!NoWrap) {
13460 if (canIVOverflowOnLT(
RHS, Stride, IsSigned))
13473 const SCEV *
Start =
IV->getStart();
13479 const SCEV *OrigStart =
Start;
13480 const SCEV *OrigRHS =
RHS;
13481 if (
Start->getType()->isPointerTy()) {
13492 const SCEV *End =
nullptr, *BECount =
nullptr,
13493 *BECountIfBackedgeTaken =
nullptr;
13496 if (PositiveStride && RHSAddRec !=
nullptr && RHSAddRec->getLoop() == L &&
13497 RHSAddRec->getNoWrapFlags()) {
13510 const SCEV *RHSStart = RHSAddRec->getStart();
13511 const SCEV *RHSStride = RHSAddRec->getStepRecurrence(*
this);
13523 const SCEV *Denominator =
getMinusSCEV(Stride, RHSStride);
13532 BECountIfBackedgeTaken =
13537 if (BECount ==
nullptr) {
13542 const SCEV *MaxBECount = computeMaxBECountForLT(
13545 MaxBECount,
false , Predicates);
13552 auto *OrigStartMinusStride =
getMinusSCEV(OrigStart, Stride);
13579 const SCEV *Numerator =
13585 auto canProveRHSGreaterThanEqualStart = [&]() {
13604 auto *StartMinusOne =
13611 if (canProveRHSGreaterThanEqualStart()) {
13626 BECountIfBackedgeTaken =
13642 bool MayAddOverflow = [&] {
13688 if (Start == Stride || Start ==
getMinusSCEV(Stride, One)) {
13702 if (!MayAddOverflow) {
13714 const SCEV *ConstantMaxBECount;
13715 bool MaxOrZero =
false;
13717 ConstantMaxBECount = BECount;
13718 }
else if (BECountIfBackedgeTaken &&
13723 ConstantMaxBECount = BECountIfBackedgeTaken;
13726 ConstantMaxBECount = computeMaxBECountForLT(
13734 const SCEV *SymbolicMaxBECount =
13736 return ExitLimit(BECount, ConstantMaxBECount, SymbolicMaxBECount, MaxOrZero,
13740ScalarEvolution::ExitLimit ScalarEvolution::howManyGreaterThans(
13741 const SCEV *
LHS,
const SCEV *
RHS,
const Loop *L,
bool IsSigned,
13742 bool ControlsOnlyExit,
bool AllowPredicates) {
13749 if (!
IV && AllowPredicates)
13756 if (!
IV ||
IV->getLoop() != L || !
IV->isAffine())
13760 bool NoWrap = ControlsOnlyExit &&
IV->getNoWrapFlags(WrapType);
13773 if (!Stride->
isOne() && !NoWrap)
13774 if (canIVOverflowOnGT(
RHS, Stride, IsSigned))
13777 const SCEV *
Start =
IV->getStart();
13778 const SCEV *End =
RHS;
13789 if (
Start->getType()->isPointerTy()) {
13824 const SCEV *ConstantMaxBECount =
13831 ConstantMaxBECount = BECount;
13832 const SCEV *SymbolicMaxBECount =
13835 return ExitLimit(BECount, ConstantMaxBECount, SymbolicMaxBECount,
false,
13841 if (
Range.isFullSet())
13846 if (!SC->getValue()->isZero()) {
13852 return ShiftedAddRec->getNumIterationsInRange(
13853 Range.subtract(SC->getAPInt()), SE);
13884 APInt ExitVal = (End +
A).udiv(
A);
13897 ConstantInt::get(SE.
getContext(), ExitVal - 1), SE)->getValue()) &&
13898 "Linear scev computation is off in a bad way!");
13929 assert(!
Last->isZero() &&
"Recurrency with zero step?");
13954 Ty = Store->getValueOperand()->getType();
13956 Ty = Load->getType();
13969 assert(SE &&
"SCEVCallbackVH called with a null ScalarEvolution!");
13971 SE->ConstantEvolutionLoopExitValue.erase(PN);
13972 SE->eraseValueFromMap(getValPtr());
13976void ScalarEvolution::SCEVCallbackVH::allUsesReplacedWith(
Value *V) {
13977 assert(SE &&
"SCEVCallbackVH called with a null ScalarEvolution!");
13987 : CallbackVH(
V), SE(se) {}
13996 : F(F), DL(F.
getDataLayout()), TLI(TLI), AC(AC), DT(DT), LI(LI),
13998 LoopDispositions(64), BlockDispositions(64) {
14010 F.getParent(), Intrinsic::experimental_guard);
14011 HasGuards = GuardDecl && !GuardDecl->use_empty();
14015 : F(Arg.F), DL(Arg.DL), HasGuards(Arg.HasGuards), TLI(Arg.TLI), AC(Arg.AC),
14016 DT(Arg.DT), LI(Arg.LI), CouldNotCompute(
std::
move(Arg.CouldNotCompute)),
14017 ValueExprMap(
std::
move(Arg.ValueExprMap)),
14018 PendingLoopPredicates(
std::
move(Arg.PendingLoopPredicates)),
14019 PendingMerges(
std::
move(Arg.PendingMerges)),
14020 ConstantMultipleCache(
std::
move(Arg.ConstantMultipleCache)),
14021 BackedgeTakenCounts(
std::
move(Arg.BackedgeTakenCounts)),
14022 PredicatedBackedgeTakenCounts(
14023 std::
move(Arg.PredicatedBackedgeTakenCounts)),
14024 BECountUsers(
std::
move(Arg.BECountUsers)),
14025 ConstantEvolutionLoopExitValue(
14026 std::
move(Arg.ConstantEvolutionLoopExitValue)),
14027 ValuesAtScopes(
std::
move(Arg.ValuesAtScopes)),
14028 ValuesAtScopesUsers(
std::
move(Arg.ValuesAtScopesUsers)),
14029 LoopDispositions(
std::
move(Arg.LoopDispositions)),
14030 LoopPropertiesCache(
std::
move(Arg.LoopPropertiesCache)),
14031 BlockDispositions(
std::
move(Arg.BlockDispositions)),
14032 SCEVUsers(
std::
move(Arg.SCEVUsers)),
14033 UnsignedRanges(
std::
move(Arg.UnsignedRanges)),
14034 SignedRanges(
std::
move(Arg.SignedRanges)),
14035 UniqueSCEVs(
std::
move(Arg.UniqueSCEVs)),
14036 UniquePreds(
std::
move(Arg.UniquePreds)),
14037 SCEVAllocator(
std::
move(Arg.SCEVAllocator)),
14038 LoopUsers(
std::
move(Arg.LoopUsers)),
14039 PredicatedSCEVRewrites(
std::
move(Arg.PredicatedSCEVRewrites)),
14040 FirstUnknown(Arg.FirstUnknown) {
14041 Arg.FirstUnknown =
nullptr;
14050 Tmp->~SCEVUnknown();
14052 FirstUnknown =
nullptr;
14054 ExprValueMap.clear();
14055 ValueExprMap.clear();
14057 BackedgeTakenCounts.clear();
14058 PredicatedBackedgeTakenCounts.clear();
14060 assert(PendingLoopPredicates.empty() &&
"isImpliedCond garbage");
14061 assert(PendingMerges.empty() &&
"isImpliedViaMerge garbage");
14062 assert(!WalkingBEDominatingConds &&
"isLoopBackedgeGuardedByCond garbage!");
14063 assert(!ProvingSplitPredicate &&
"ProvingSplitPredicate garbage!");
14085 L->getHeader()->printAsOperand(OS,
false);
14089 L->getExitingBlocks(ExitingBlocks);
14090 if (ExitingBlocks.
size() != 1)
14091 OS <<
"<multiple exits> ";
14095 OS <<
"backedge-taken count is ";
14098 OS <<
"Unpredictable backedge-taken count.";
14101 if (ExitingBlocks.
size() > 1)
14102 for (
BasicBlock *ExitingBlock : ExitingBlocks) {
14103 OS <<
" exit count for " << ExitingBlock->
getName() <<
": ";
14111 OS <<
"\n predicated exit count for " << ExitingBlock->
getName()
14114 OS <<
"\n Predicates:\n";
14115 for (
const auto *
P : Predicates)
14123 L->getHeader()->printAsOperand(OS,
false);
14128 OS <<
"constant max backedge-taken count is ";
14131 OS <<
", actual taken count either this or zero.";
14133 OS <<
"Unpredictable constant max backedge-taken count. ";
14138 L->getHeader()->printAsOperand(OS,
false);
14143 OS <<
"symbolic max backedge-taken count is ";
14146 OS <<
", actual taken count either this or zero.";
14148 OS <<
"Unpredictable symbolic max backedge-taken count. ";
14152 if (ExitingBlocks.
size() > 1)
14153 for (
BasicBlock *ExitingBlock : ExitingBlocks) {
14154 OS <<
" symbolic max exit count for " << ExitingBlock->
getName() <<
": ";
14164 OS <<
"\n predicated symbolic max exit count for "
14165 << ExitingBlock->
getName() <<
": ";
14167 OS <<
"\n Predicates:\n";
14168 for (
const auto *
P : Predicates)
14178 assert(!Preds.
empty() &&
"Different predicated BTC, but no predicates");
14180 L->getHeader()->printAsOperand(OS,
false);
14183 OS <<
"Predicated backedge-taken count is ";
14186 OS <<
"Unpredictable predicated backedge-taken count.";
14188 OS <<
" Predicates:\n";
14189 for (
const auto *
P : Preds)
14194 auto *PredConstantMax =
14196 if (PredConstantMax != ConstantBTC) {
14198 "different predicated constant max BTC but no predicates");
14200 L->getHeader()->printAsOperand(OS,
false);
14203 OS <<
"Predicated constant max backedge-taken count is ";
14206 OS <<
"Unpredictable predicated constant max backedge-taken count.";
14208 OS <<
" Predicates:\n";
14209 for (
const auto *
P : Preds)
14214 auto *PredSymbolicMax =
14216 if (SymbolicBTC != PredSymbolicMax) {
14218 "Different predicated symbolic max BTC, but no predicates");
14220 L->getHeader()->printAsOperand(OS,
false);
14223 OS <<
"Predicated symbolic max backedge-taken count is ";
14226 OS <<
"Unpredictable predicated symbolic max backedge-taken count.";
14228 OS <<
" Predicates:\n";
14229 for (
const auto *
P : Preds)
14235 L->getHeader()->printAsOperand(OS,
false);
14259 OS <<
"Computable";
14269 OS <<
"DoesNotDominate";
14275 OS <<
"ProperlyDominates";
14292 OS <<
"Classifying expressions for: ";
14293 F.printAsOperand(OS,
false);
14308 const Loop *L = LI.getLoopFor(
I.getParent());
14323 OS <<
"\t\t" "Exits: ";
14326 OS <<
"<<Unknown>>";
14332 for (
const auto *Iter = L; Iter; Iter = Iter->getParentLoop()) {
14334 Iter->getHeader()->printAsOperand(OS,
false);
14342 InnerL->getHeader()->printAsOperand(OS,
false);
14353 OS <<
"Determining loop execution counts for: ";
14354 F.printAsOperand(OS,
false);
14362 auto &Values = LoopDispositions[S];
14363 for (
auto &V : Values) {
14364 if (V.getPointer() == L)
14369 auto &Values2 = LoopDispositions[S];
14371 if (V.getPointer() == L) {
14380ScalarEvolution::computeLoopDisposition(
const SCEV *S,
const Loop *L) {
14399 assert(!L->contains(AR->
getLoop()) &&
"Containing loop's header does not"
14400 " dominate the contained loop's header?");
14428 bool HasVarying =
false;
14462 auto &Values = BlockDispositions[S];
14463 for (
auto &V : Values) {
14464 if (V.getPointer() == BB)
14469 auto &Values2 = BlockDispositions[S];
14471 if (V.getPointer() == BB) {
14480ScalarEvolution::computeBlockDisposition(
const SCEV *S,
const BasicBlock *BB) {
14510 bool Proper =
true;
14521 if (Instruction *
I =
14523 if (
I->getParent() == BB)
14525 if (DT.properlyDominates(
I->getParent(), BB))
14548void ScalarEvolution::forgetBackedgeTakenCounts(
const Loop *L,
14551 Predicated ? PredicatedBackedgeTakenCounts : BackedgeTakenCounts;
14552 auto It = BECounts.find(L);
14553 if (It != BECounts.end()) {
14554 for (
const ExitNotTakenInfo &ENT : It->second.ExitNotTaken) {
14555 for (
const SCEV *S : {ENT.ExactNotTaken, ENT.SymbolicMaxNotTaken}) {
14557 auto UserIt = BECountUsers.find(S);
14558 assert(UserIt != BECountUsers.end());
14563 BECounts.erase(It);
14571 while (!Worklist.
empty()) {
14573 auto Users = SCEVUsers.find(Curr);
14574 if (
Users != SCEVUsers.end())
14575 for (
const auto *User :
Users->second)
14576 if (ToForget.
insert(User).second)
14580 for (
const auto *S : ToForget)
14581 forgetMemoizedResultsImpl(S);
14583 for (
auto I = PredicatedSCEVRewrites.begin();
14584 I != PredicatedSCEVRewrites.end();) {
14585 std::pair<const SCEV *, const Loop *>
Entry =
I->first;
14586 if (ToForget.count(
Entry.first))
14587 PredicatedSCEVRewrites.erase(
I++);
14593void ScalarEvolution::forgetMemoizedResultsImpl(
const SCEV *S) {
14594 LoopDispositions.erase(S);
14595 BlockDispositions.erase(S);
14596 UnsignedRanges.erase(S);
14597 SignedRanges.erase(S);
14598 HasRecMap.erase(S);
14599 ConstantMultipleCache.erase(S);
14602 UnsignedWrapViaInductionTried.erase(AR);
14603 SignedWrapViaInductionTried.erase(AR);
14606 auto ExprIt = ExprValueMap.find(S);
14607 if (ExprIt != ExprValueMap.end()) {
14608 for (
Value *V : ExprIt->second) {
14609 auto ValueIt = ValueExprMap.find_as(V);
14610 if (ValueIt != ValueExprMap.end())
14611 ValueExprMap.erase(ValueIt);
14613 ExprValueMap.erase(ExprIt);
14616 auto ScopeIt = ValuesAtScopes.find(S);
14617 if (ScopeIt != ValuesAtScopes.end()) {
14618 for (
const auto &Pair : ScopeIt->second)
14621 std::make_pair(Pair.first, S));
14622 ValuesAtScopes.erase(ScopeIt);
14625 auto ScopeUserIt = ValuesAtScopesUsers.find(S);
14626 if (ScopeUserIt != ValuesAtScopesUsers.end()) {
14627 for (
const auto &Pair : ScopeUserIt->second)
14628 llvm::erase(ValuesAtScopes[Pair.second], std::make_pair(Pair.first, S));
14629 ValuesAtScopesUsers.erase(ScopeUserIt);
14632 auto BEUsersIt = BECountUsers.find(S);
14633 if (BEUsersIt != BECountUsers.end()) {
14635 auto Copy = BEUsersIt->second;
14636 for (
const auto &Pair : Copy)
14637 forgetBackedgeTakenCounts(Pair.getPointer(), Pair.getInt());
14638 BECountUsers.erase(BEUsersIt);
14641 auto FoldUser = FoldCacheUser.find(S);
14642 if (FoldUser != FoldCacheUser.end())
14643 for (
auto &KV : FoldUser->second)
14644 FoldCache.erase(KV);
14645 FoldCacheUser.erase(S);
14649ScalarEvolution::getUsedLoops(
const SCEV *S,
14651 struct FindUsedLoops {
14652 FindUsedLoops(SmallPtrSetImpl<const Loop *> &LoopsUsed)
14653 : LoopsUsed(LoopsUsed) {}
14654 SmallPtrSetImpl<const Loop *> &LoopsUsed;
14655 bool follow(
const SCEV *S) {
14661 bool isDone()
const {
return false; }
14664 FindUsedLoops
F(LoopsUsed);
14665 SCEVTraversal<FindUsedLoops>(F).visitAll(S);
14668void ScalarEvolution::getReachableBlocks(
14671 Worklist.
push_back(&F.getEntryBlock());
14672 while (!Worklist.
empty()) {
14674 if (!Reachable.
insert(BB).second)
14682 Worklist.
push_back(
C->isOne() ? TrueBB : FalseBB);
14689 if (isKnownPredicateViaConstantRanges(
Cmp->getCmpPredicate(), L, R)) {
14693 if (isKnownPredicateViaConstantRanges(
Cmp->getInverseCmpPredicate(), L,
14728 SCEVMapper SCM(SE2);
14730 SE2.getReachableBlocks(ReachableBlocks, F);
14732 auto GetDelta = [&](
const SCEV *Old,
const SCEV *New) ->
const SCEV * {
14750 while (!LoopStack.
empty()) {
14756 if (!ReachableBlocks.
contains(L->getHeader()))
14761 auto It = BackedgeTakenCounts.find(L);
14762 if (It == BackedgeTakenCounts.end())
14766 SCM.visit(It->second.getExact(L,
const_cast<ScalarEvolution *
>(
this)));
14786 const SCEV *Delta = GetDelta(CurBECount, NewBECount);
14787 if (Delta && !Delta->
isZero()) {
14788 dbgs() <<
"Trip Count for " << *L <<
" Changed!\n";
14789 dbgs() <<
"Old: " << *CurBECount <<
"\n";
14790 dbgs() <<
"New: " << *NewBECount <<
"\n";
14791 dbgs() <<
"Delta: " << *Delta <<
"\n";
14799 while (!Worklist.
empty()) {
14801 if (ValidLoops.
insert(L).second)
14802 Worklist.
append(L->begin(), L->end());
14804 for (
const auto &KV : ValueExprMap) {
14809 "AddRec references invalid loop");
14814 auto It = ExprValueMap.find(KV.second);
14815 if (It == ExprValueMap.end() || !It->second.contains(KV.first)) {
14816 dbgs() <<
"Value " << *KV.first
14817 <<
" is in ValueExprMap but not in ExprValueMap\n";
14822 if (!ReachableBlocks.
contains(
I->getParent()))
14824 const SCEV *OldSCEV = SCM.visit(KV.second);
14826 const SCEV *Delta = GetDelta(OldSCEV, NewSCEV);
14827 if (Delta && !Delta->
isZero()) {
14828 dbgs() <<
"SCEV for value " << *
I <<
" changed!\n"
14829 <<
"Old: " << *OldSCEV <<
"\n"
14830 <<
"New: " << *NewSCEV <<
"\n"
14831 <<
"Delta: " << *Delta <<
"\n";
14837 for (
const auto &KV : ExprValueMap) {
14838 for (
Value *V : KV.second) {
14839 const SCEV *S = ValueExprMap.lookup(V);
14841 dbgs() <<
"Value " << *V
14842 <<
" is in ExprValueMap but not in ValueExprMap\n";
14845 if (S != KV.first) {
14846 dbgs() <<
"Value " << *V <<
" mapped to " << *S <<
" rather than "
14847 << *KV.first <<
"\n";
14854 for (
const auto &S : UniqueSCEVs) {
14859 auto It = SCEVUsers.find(
Op);
14860 if (It != SCEVUsers.end() && It->second.count(&S))
14862 dbgs() <<
"Use of operand " << *
Op <<
" by user " << S
14863 <<
" is not being tracked!\n";
14869 for (
const auto &ValueAndVec : ValuesAtScopes) {
14871 for (
const auto &LoopAndValueAtScope : ValueAndVec.second) {
14872 const Loop *L = LoopAndValueAtScope.first;
14873 const SCEV *ValueAtScope = LoopAndValueAtScope.second;
14875 auto It = ValuesAtScopesUsers.find(ValueAtScope);
14876 if (It != ValuesAtScopesUsers.end() &&
14879 dbgs() <<
"Value: " << *
Value <<
", Loop: " << *L <<
", ValueAtScope: "
14880 << *ValueAtScope <<
" missing in ValuesAtScopesUsers\n";
14886 for (
const auto &ValueAtScopeAndVec : ValuesAtScopesUsers) {
14887 const SCEV *ValueAtScope = ValueAtScopeAndVec.first;
14888 for (
const auto &LoopAndValue : ValueAtScopeAndVec.second) {
14889 const Loop *L = LoopAndValue.first;
14890 const SCEV *
Value = LoopAndValue.second;
14892 auto It = ValuesAtScopes.find(
Value);
14893 if (It != ValuesAtScopes.end() &&
14894 is_contained(It->second, std::make_pair(L, ValueAtScope)))
14896 dbgs() <<
"Value: " << *
Value <<
", Loop: " << *L <<
", ValueAtScope: "
14897 << *ValueAtScope <<
" missing in ValuesAtScopes\n";
14903 auto VerifyBECountUsers = [&](
bool Predicated) {
14905 Predicated ? PredicatedBackedgeTakenCounts : BackedgeTakenCounts;
14906 for (
const auto &LoopAndBEInfo : BECounts) {
14907 for (
const ExitNotTakenInfo &ENT : LoopAndBEInfo.second.ExitNotTaken) {
14908 for (
const SCEV *S : {ENT.ExactNotTaken, ENT.SymbolicMaxNotTaken}) {
14910 auto UserIt = BECountUsers.find(S);
14911 if (UserIt != BECountUsers.end() &&
14912 UserIt->second.contains({ LoopAndBEInfo.first, Predicated }))
14914 dbgs() <<
"Value " << *S <<
" for loop " << *LoopAndBEInfo.first
14915 <<
" missing from BECountUsers\n";
14922 VerifyBECountUsers(
false);
14923 VerifyBECountUsers(
true);
14926 for (
auto &[S, Values] : LoopDispositions) {
14927 for (
auto [
Loop, CachedDisposition] : Values) {
14929 if (CachedDisposition != RecomputedDisposition) {
14930 dbgs() <<
"Cached disposition of " << *S <<
" for loop " << *
Loop
14931 <<
" is incorrect: cached " << CachedDisposition <<
", actual "
14932 << RecomputedDisposition <<
"\n";
14939 for (
auto &[S, Values] : BlockDispositions) {
14940 for (
auto [BB, CachedDisposition] : Values) {
14942 if (CachedDisposition != RecomputedDisposition) {
14943 dbgs() <<
"Cached disposition of " << *S <<
" for block %"
14944 << BB->
getName() <<
" is incorrect: cached " << CachedDisposition
14945 <<
", actual " << RecomputedDisposition <<
"\n";
14952 for (
auto [
FoldID, Expr] : FoldCache) {
14953 auto I = FoldCacheUser.find(Expr);
14954 if (
I == FoldCacheUser.end()) {
14955 dbgs() <<
"Missing entry in FoldCacheUser for cached expression " << *Expr
14960 dbgs() <<
"Missing FoldID in cached users of " << *Expr <<
"!\n";
14964 for (
auto [Expr, IDs] : FoldCacheUser) {
14965 for (
auto &
FoldID : IDs) {
14968 dbgs() <<
"Missing entry in FoldCache for expression " << *Expr
14973 dbgs() <<
"Entry in FoldCache doesn't match FoldCacheUser: " << *S
14974 <<
" != " << *Expr <<
"!\n";
14985 for (
auto [S, Multiple] : ConstantMultipleCache) {
14987 if ((Multiple != 0 && RecomputedMultiple != 0 &&
14988 Multiple.
urem(RecomputedMultiple) != 0 &&
14989 RecomputedMultiple.
urem(Multiple) != 0)) {
14990 dbgs() <<
"Incorrect cached computation in ConstantMultipleCache for "
14991 << *S <<
" : Computed " << RecomputedMultiple
14992 <<
" but cache contains " << Multiple <<
"!\n";
15000 FunctionAnalysisManager::Invalidator &Inv) {
15032 OS <<
"Printing analysis 'Scalar Evolution Analysis' for function '"
15033 <<
F.getName() <<
"':\n";
15039 "Scalar Evolution Analysis",
false,
true)
15088 const SCEV *LHS,
const SCEV *RHS) {
15090 assert(LHS->getType() == RHS->getType() &&
15091 "Type mismatch between LHS and RHS");
15094 ID.AddInteger(Pred);
15095 ID.AddPointer(LHS);
15096 ID.AddPointer(RHS);
15097 void *IP =
nullptr;
15098 if (
const auto *S = UniquePreds.FindNodeOrInsertPos(
ID, IP))
15102 UniquePreds.InsertNode(Eq, IP);
15113 ID.AddInteger(AddedFlags);
15114 void *IP =
nullptr;
15115 if (
const auto *S = UniquePreds.FindNodeOrInsertPos(
ID, IP))
15117 auto *OF =
new (SCEVAllocator)
15119 UniquePreds.InsertNode(OF, IP);
15139 SCEVPredicateRewriter
Rewriter(L, SE, NewPreds, Pred);
15140 return Rewriter.visit(S);
15146 for (
const auto *Pred : U->getPredicates())
15148 if (IPred->getLHS() == Expr &&
15150 return IPred->getRHS();
15152 if (IPred->getLHS() == Expr &&
15153 IPred->getPredicate() == ICmpInst::ICMP_EQ)
15154 return IPred->getRHS();
15157 return convertToAddRecWithPreds(Expr);
15160 const SCEV *visitZeroExtendExpr(
const SCEVZeroExtendExpr *Expr) {
15176 const SCEV *visitSignExtendExpr(
const SCEVSignExtendExpr *Expr) {
15193 explicit SCEVPredicateRewriter(
15194 const Loop *L, ScalarEvolution &SE,
15195 SmallVectorImpl<const SCEVPredicate *> *NewPreds,
15196 const SCEVPredicate *Pred)
15197 : SCEVRewriteVisitor(SE), NewPreds(NewPreds), Pred(Pred),
L(
L) {}
15199 bool addOverflowAssumption(
const SCEVPredicate *
P) {
15202 return Pred && Pred->
implies(
P, SE);
15208 bool addOverflowAssumption(
const SCEVAddRecExpr *AR,
15211 return addOverflowAssumption(
A);
15220 const SCEV *convertToAddRecWithPreds(
const SCEVUnknown *Expr) {
15224 std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>>
15226 if (!PredicatedRewrite)
15228 for (
const auto *
P : PredicatedRewrite->second){
15231 if (L != WP->getExpr()->getLoop())
15234 if (!addOverflowAssumption(
P))
15237 return PredicatedRewrite->first;
15240 SmallVectorImpl<const SCEVPredicate *> *NewPreds;
15241 const SCEVPredicate *Pred;
15250 return SCEVPredicateRewriter::rewrite(S, L, *
this,
nullptr, &Preds);
15257 S = SCEVPredicateRewriter::rewrite(S, L, *
this, &TransformPreds,
nullptr);
15277 if (!Step->
isOne())
15302 assert(LHS->getType() == RHS->getType() &&
"LHS and RHS types don't match");
15303 assert(LHS != RHS &&
"LHS and RHS are the same SCEV");
15316 return Op->LHS == LHS &&
Op->RHS == RHS;
15323 OS.
indent(
Depth) <<
"Equal predicate: " << *LHS <<
" == " << *RHS <<
"\n";
15325 OS.
indent(
Depth) <<
"Compare predicate: " << *LHS <<
" " << Pred <<
") "
15350 const SCEV *Start = AR->getStart();
15351 const SCEV *OpStart =
Op->AR->getStart();
15356 if (Start->getType()->isPointerTy() && Start->getType() != OpStart->
getType())
15359 const SCEV *Step = AR->getStepRecurrence(SE);
15360 const SCEV *OpStep =
Op->AR->getStepRecurrence(SE);
15413 if (Step->getValue()->getValue().isNonNegative())
15417 return ImpliedFlags;
15424 for (
const auto *
P : Preds)
15437 return this->implies(I, SE);
15445 for (
const auto *Pred : Preds)
15446 Pred->print(OS,
Depth);
15451 for (
const auto *Pred : Set->Preds)
15459 bool CheckImplies = Preds.
size() < 16;
15462 if (CheckImplies &&
implies(
N, SE))
15468 for (
auto *
P : Preds) {
15469 if (CheckImplies &&
N->implies(
P, SE))
15473 Preds = std::move(PrunedPreds);
15474 Preds.push_back(
N);
15481 Preds = std::make_unique<SCEVUnionPredicate>(
Empty, SE);
15486 for (
const auto *
Op :
Ops)
15491 SCEVUsers[
Op].insert(
User);
15500 SCEVUsers[
Op].insert(
User);
15504 const SCEV *Expr = SE.getSCEV(V);
15509 RewriteEntry &Entry = RewriteMap[Expr];
15512 if (Entry.second && Generation == Entry.first)
15513 return Entry.second;
15518 Expr = Entry.second;
15520 const SCEV *NewSCEV = SE.rewriteUsingPredicate(Expr, &L, *Preds);
15521 Entry = {Generation, NewSCEV};
15527 if (!BackedgeCount) {
15529 BackedgeCount = SE.getPredicatedBackedgeTakenCount(&L, Preds);
15530 for (
const auto *
P : Preds)
15533 return BackedgeCount;
15537 if (!SymbolicMaxBackedgeCount) {
15539 SymbolicMaxBackedgeCount =
15540 SE.getPredicatedSymbolicMaxBackedgeTakenCount(&L, Preds);
15541 for (
const auto *
P : Preds)
15544 return SymbolicMaxBackedgeCount;
15548 if (!SmallConstantMaxTripCount) {
15550 SmallConstantMaxTripCount = SE.getSmallConstantMaxTripCount(&L, &Preds);
15551 for (
const auto *
P : Preds)
15554 return *SmallConstantMaxTripCount;
15558 if (Preds->implies(&Pred, SE))
15563 Preds = std::make_unique<SCEVUnionPredicate>(NewPreds, SE);
15564 updateGeneration();
15571void PredicatedScalarEvolution::updateGeneration() {
15573 if (++Generation == 0) {
15574 for (
auto &
II : RewriteMap) {
15575 const SCEV *Rewritten =
II.second.second;
15592 auto II = FlagsMap.insert({V, Flags});
15605 auto II = FlagsMap.find(V);
15607 if (
II != FlagsMap.end())
15616 auto *New = SE.convertSCEVToAddRecWithPredicates(Expr, &L, NewPreds);
15621 for (
const auto *
P : NewPreds)
15624 RewriteMap[SE.getSCEV(V)] = {Generation, New};
15630 : RewriteMap(
Init.RewriteMap), SE(
Init.SE), L(
Init.L),
15633 Generation(
Init.Generation), BackedgeCount(
Init.BackedgeCount) {
15634 for (
auto I :
Init.FlagsMap)
15635 FlagsMap.insert(
I);
15640 for (
auto *BB : L.getBlocks())
15641 for (
auto &
I : *BB) {
15642 if (!SE.isSCEVable(
I.getType()))
15645 auto *Expr = SE.getSCEV(&
I);
15646 auto II = RewriteMap.find(Expr);
15648 if (
II == RewriteMap.end())
15652 if (
II->second.second == Expr)
15657 OS.
indent(
Depth + 2) <<
"--> " << *
II->second.second <<
"\n";
15665 LoopGuards Guards(SE);
15673void ScalarEvolution::LoopGuards::collectFromPHI(
15681 using MinMaxPattern = std::pair<const SCEVConstant *, SCEVTypes>;
15682 auto GetMinMaxConst = [&](
unsigned IncomingIdx) -> MinMaxPattern {
15696 auto &RewriteMap =
G->second.RewriteMap;
15697 if (RewriteMap.empty())
15699 auto S = RewriteMap.find(SE.
getSCEV(
Phi.getIncomingValue(IncomingIdx)));
15700 if (S == RewriteMap.end())
15706 return {C0,
SM->getSCEVType()};
15709 auto MergeMinMaxConst = [](MinMaxPattern
P1,
15710 MinMaxPattern
P2) -> MinMaxPattern {
15711 auto [C1,
T1] =
P1;
15712 auto [C2, T2] =
P2;
15713 if (!C1 || !C2 ||
T1 != T2)
15717 return {C1->getAPInt().
ult(C2->getAPInt()) ? C1 : C2,
T1};
15719 return {C1->getAPInt().
slt(C2->getAPInt()) ? C1 : C2,
T1};
15721 return {C1->getAPInt().
ugt(C2->getAPInt()) ? C1 : C2,
T1};
15723 return {C1->getAPInt().
sgt(C2->getAPInt()) ? C1 : C2,
T1};
15728 auto P = GetMinMaxConst(0);
15729 for (
unsigned int In = 1;
In <
Phi.getNumIncomingValues();
In++) {
15732 P = MergeMinMaxConst(
P, GetMinMaxConst(In));
15735 const SCEV *
LHS = SE.
getSCEV(
const_cast<PHINode *
>(&Phi));
15738 Guards.RewriteMap.insert({
LHS,
RHS});
15746 const APInt &DivisorVal,
15748 const APInt *ExprVal;
15761 const APInt &DivisorVal,
15763 const APInt *ExprVal;
15771 return SE.
getConstant(*ExprVal + DivisorVal - Rem);
15785 const SCEV *URemRHS =
nullptr;
15789 const SCEV *Multiple =
15791 DivInfo[URemLHS] = Multiple;
15793 Multiples[URemLHS] =
C->getAPInt();
15813 auto IsMinMaxSCEVWithNonNegativeConstant =
15817 if (
MinMax->getNumOperands() != 2)
15820 if (
C->getAPInt().isNegative())
15822 SCTy =
MinMax->getSCEVType();
15831 const SCEV *MinMaxLHS =
nullptr, *MinMaxRHS =
nullptr;
15833 if (!IsMinMaxSCEVWithNonNegativeConstant(MinMaxExpr, SCTy, MinMaxLHS,
15838 auto *DivisibleExpr =
15846void ScalarEvolution::LoopGuards::collectFromBlock(
15848 const BasicBlock *
Block,
const BasicBlock *Pred,
15856 DenseMap<const SCEV *, const SCEV *> &RewriteMap,
15867 &ExprsToRewrite]() {
15868 const SCEVConstant *C1;
15881 if (ExactRegion.isWrappedSet() || ExactRegion.isFullSet())
15883 auto [
I,
Inserted] = RewriteMap.try_emplace(LHSUnknown);
15884 const SCEV *RewrittenLHS =
Inserted ? LHSUnknown :
I->second;
15892 if (MatchRangeCheckIdiom())
15909 auto AddRewrite = [&](
const SCEV *From,
const SCEV *FromRewritten,
15911 if (From == FromRewritten)
15913 RewriteMap[From] = To;
15919 auto GetMaybeRewritten = [&](
const SCEV *S) {
15920 return RewriteMap.lookup_or(S, S);
15923 const SCEV *RewrittenLHS = GetMaybeRewritten(
LHS);
15925 const APInt &DividesBy =
15940 switch (Predicate) {
15969 SmallPtrSet<const SCEV *, 16> Visited;
15971 auto EnqueueOperands = [&Worklist](
const SCEVNAryExpr *S) {
15975 while (!Worklist.
empty()) {
15979 if (!Visited.
insert(From).second)
15981 const SCEV *FromRewritten = GetMaybeRewritten(From);
15982 const SCEV *To =
nullptr;
15984 switch (Predicate) {
15989 EnqueueOperands(
UMax);
15995 EnqueueOperands(
SMax);
16001 EnqueueOperands(
UMin);
16007 EnqueueOperands(
SMin);
16015 const SCEV *OneAlignedUp =
16017 To = SE.
getUMaxExpr(FromRewritten, OneAlignedUp);
16029 const SCEVConstant *
C;
16038 Guards.NotEqual.insert({
LHS,
RHS});
16047 AddRewrite(From, FromRewritten, To);
16064 SE.F.
getParent(), Intrinsic::experimental_guard);
16066 for (
const auto *GU : GuardDecl->users())
16068 if (Guard->getFunction() ==
Block->getParent() &&
16077 unsigned NumCollectedConditions = 0;
16079 std::pair<const BasicBlock *, const BasicBlock *> Pair(Pred,
Block);
16081 Pair = SE.getPredecessorWithUniqueSuccessorForBB(Pair.first)) {
16083 const CondBrInst *LoopEntryPredicate =
16085 if (!LoopEntryPredicate)
16090 NumCollectedConditions++;
16094 if (
Depth > 0 && NumCollectedConditions == 2)
16102 if (Pair.second->hasNPredecessorsOrMore(2) &&
16104 SmallDenseMap<const BasicBlock *, LoopGuards> IncomingGuards;
16105 for (
auto &Phi : Pair.second->phis())
16116 for (
auto [Term, EnterIfTrue] :
reverse(Terms)) {
16117 SmallVector<Value *, 8> Worklist;
16118 SmallPtrSet<Value *, 8> Visited;
16120 while (!Worklist.
empty()) {
16127 EnterIfTrue ?
Cmp->getPredicate() :
Cmp->getInversePredicate();
16151 DenseMap<const SCEV *, APInt> Multiples;
16153 for (
const auto &[Predicate,
LHS,
RHS] : GuardsToProcess) {
16160 for (
const auto &[Predicate,
LHS,
RHS] : GuardsToProcess)
16161 CollectCondition(Predicate,
LHS,
RHS, Guards.RewriteMap, DivGuards);
16165 for (
const auto &[K, Divisor] : Multiples) {
16166 const SCEV *DivisorSCEV = SE.
getConstant(Divisor);
16167 Guards.RewriteMap[
K] =
16169 Guards.
rewrite(K), Divisor, SE),
16178 Guards.PreserveNUW =
true;
16179 Guards.PreserveNSW =
true;
16180 for (
const SCEV *Expr : ExprsToRewrite) {
16181 const SCEV *RewriteTo = Guards.RewriteMap[Expr];
16182 Guards.PreserveNUW &=
16184 Guards.PreserveNSW &=
16191 if (ExprsToRewrite.size() > 1) {
16192 for (
const SCEV *Expr : ExprsToRewrite) {
16193 const SCEV *RewriteTo = Guards.RewriteMap[Expr];
16194 Guards.RewriteMap.erase(Expr);
16195 Guards.RewriteMap.insert({Expr, Guards.
rewrite(RewriteTo)});
16204 class SCEVLoopGuardRewriter
16215 NotEqual(Guards.NotEqual) {
16216 if (Guards.PreserveNUW)
16218 if (Guards.PreserveNSW)
16225 return Map.lookup_or(Expr, Expr);
16229 if (
const SCEV *S = Map.lookup(Expr))
16236 unsigned Bitwidth = Ty->getScalarSizeInBits() / 2;
16237 while (Bitwidth % 8 == 0 && Bitwidth >= 8 &&
16238 Bitwidth >
Op->getType()->getScalarSizeInBits()) {
16240 auto *NarrowExt = SE.getZeroExtendExpr(
Op, NarrowTy);
16241 if (
const SCEV *S = Map.lookup(NarrowExt))
16242 return SE.getZeroExtendExpr(S, Ty);
16243 Bitwidth = Bitwidth / 2;
16251 if (
const SCEV *S = Map.lookup(Expr))
16258 if (
const SCEV *S = Map.lookup(Expr))
16264 if (
const SCEV *S = Map.lookup(Expr))
16272 auto RewriteSubtraction = [&](
const SCEV *S) ->
const SCEV * {
16277 if (NotEqual.contains({LHS, RHS})) {
16279 SE.getOne(S->
getType()), SE.getConstantMultiple(S), SE);
16280 return SE.getUMaxExpr(OneAlignedUp, S);
16287 if (
const SCEV *Rewritten = RewriteSubtraction(Expr))
16298 if (
const SCEV *Rewritten = RewriteSubtraction(
Add))
16299 return SE.getAddExpr(
16302 if (
const SCEV *S = Map.lookup(
Add))
16303 return SE.getAddExpr(Expr->
getOperand(0), S);
16315 : SE.getAddExpr(Operands,
16331 : SE.getMulExpr(Operands,
16337 if (RewriteMap.empty() && NotEqual.empty())
16340 SCEVLoopGuardRewriter
Rewriter(SE, *
this);
16341 return Rewriter.visit(Expr);
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Expand Atomic instructions
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
#define LLVM_DUMP_METHOD
Mark debug helper function definitions like dump() that should not be stripped from debug builds.
This file contains the declarations for the subclasses of Constant, which represent the different fla...
SmallPtrSet< const BasicBlock *, 8 > VisitedBlocks
This file defines the DenseMap class.
This file builds on the ADT/GraphTraits.h file to build generic depth first graph iterator.
This file defines a hash set that can be used to remove duplication of nodes in a graph.
Value * getPointer(Value *Ptr)
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
This defines the Use class.
iv Induction Variable Users
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
static bool isZero(Value *V, const DataLayout &DL, DominatorTree *DT, AssumptionCache *AC)
static constexpr unsigned SM(unsigned Version)
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t IntrinsicInst * II
PowerPC Reduce CR logical Operation
#define INITIALIZE_PASS_DEPENDENCY(depName)
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
const SmallVectorImpl< MachineOperand > & Cond
static DominatorTree getDomTree(Function &F)
static bool isValid(const char C)
Returns true if C is a valid mangled character: <0-9a-zA-Z_>.
SI optimize exec mask operations pre RA
static void visit(BasicBlock &Start, std::function< bool(BasicBlock *)> op)
This file provides utility classes that use RAII to save and restore values.
bool SCEVMinMaxExprContains(const SCEV *Root, const SCEV *OperandToFind, SCEVTypes RootKind)
static cl::opt< unsigned > MaxAddRecSize("scalar-evolution-max-add-rec-size", cl::Hidden, cl::desc("Max coefficients in AddRec during evolving"), cl::init(8))
static cl::opt< unsigned > RangeIterThreshold("scev-range-iter-threshold", cl::Hidden, cl::desc("Threshold for switching to iteratively computing SCEV ranges"), cl::init(32))
static const Loop * isIntegerLoopHeaderPHI(const PHINode *PN, LoopInfo &LI)
static unsigned getConstantTripCount(const SCEVConstant *ExitCount)
static int CompareValueComplexity(const LoopInfo *const LI, Value *LV, Value *RV, unsigned Depth)
Compare the two values LV and RV in terms of their "complexity" where "complexity" is a partial (and ...
static const SCEV * getNextSCEVDivisibleByDivisor(const SCEV *Expr, const APInt &DivisorVal, ScalarEvolution &SE)
static void PushLoopPHIs(const Loop *L, SmallVectorImpl< Instruction * > &Worklist, SmallPtrSetImpl< Instruction * > &Visited)
Push PHI nodes in the header of the given loop onto the given Worklist.
static void insertFoldCacheEntry(const ScalarEvolution::FoldID &ID, const SCEV *S, DenseMap< ScalarEvolution::FoldID, const SCEV * > &FoldCache, DenseMap< const SCEV *, SmallVector< ScalarEvolution::FoldID, 2 > > &FoldCacheUser)
static cl::opt< bool > ClassifyExpressions("scalar-evolution-classify-expressions", cl::Hidden, cl::init(true), cl::desc("When printing analysis, include information on every instruction"))
static bool hasHugeExpression(ArrayRef< SCEVUse > Ops)
Returns true if Ops contains a huge SCEV (the subtree of S contains at least HugeExprThreshold nodes)...
static bool CanConstantFold(const Instruction *I)
Return true if we can constant fold an instruction of the specified type, assuming that all operands ...
static cl::opt< unsigned > AddOpsInlineThreshold("scev-addops-inline-threshold", cl::Hidden, cl::desc("Threshold for inlining addition operands into a SCEV"), cl::init(500))
static cl::opt< unsigned > MaxLoopGuardCollectionDepth("scalar-evolution-max-loop-guard-collection-depth", cl::Hidden, cl::desc("Maximum depth for recursive loop guard collection"), cl::init(1))
static cl::opt< bool > VerifyIR("scev-verify-ir", cl::Hidden, cl::desc("Verify IR correctness when making sensitive SCEV queries (slow)"), cl::init(false))
static bool RangeRefPHIAllowedOperands(DominatorTree &DT, PHINode *PHI)
static const SCEV * getPreStartForExtend(const SCEVAddRecExpr *AR, Type *Ty, ScalarEvolution *SE, unsigned Depth)
static std::optional< APInt > MinOptional(std::optional< APInt > X, std::optional< APInt > Y)
Helper function to compare optional APInts: (a) if X and Y both exist, return min(X,...
static cl::opt< unsigned > MulOpsInlineThreshold("scev-mulops-inline-threshold", cl::Hidden, cl::desc("Threshold for inlining multiplication operands into a SCEV"), cl::init(32))
static BinaryOperator * getCommonInstForPHI(PHINode *PN)
static bool isDivisibilityGuard(const SCEV *LHS, const SCEV *RHS, ScalarEvolution &SE)
static std::optional< const SCEV * > createNodeForSelectViaUMinSeq(ScalarEvolution *SE, const SCEV *CondExpr, const SCEV *TrueExpr, const SCEV *FalseExpr)
static Constant * BuildConstantFromSCEV(const SCEV *V)
This builds up a Constant using the ConstantExpr interface.
static ConstantInt * EvaluateConstantChrecAtConstant(const SCEVAddRecExpr *AddRec, ConstantInt *C, ScalarEvolution &SE)
static const SCEV * BinomialCoefficient(const SCEV *It, unsigned K, ScalarEvolution &SE, Type *ResultTy)
Compute BC(It, K). The result has width W. Assume, K > 0.
static cl::opt< unsigned > MaxCastDepth("scalar-evolution-max-cast-depth", cl::Hidden, cl::desc("Maximum depth of recursive SExt/ZExt/Trunc"), cl::init(8))
static bool IsMinMaxConsistingOf(const SCEV *MaybeMinMaxExpr, const SCEV *Candidate)
Is MaybeMinMaxExpr an (U|S)(Min|Max) of Candidate and some other values?
static PHINode * getConstantEvolvingPHI(Value *V, const Loop *L)
getConstantEvolvingPHI - Given an LLVM value and a loop, return a PHI node in the loop that V is deri...
static const SCEV * SolveLinEquationWithOverflow(const APInt &A, const SCEV *B, SmallVectorImpl< const SCEVPredicate * > *Predicates, ScalarEvolution &SE, const Loop *L)
Finds the minimum unsigned root of the following equation:
static cl::opt< unsigned > MaxBruteForceIterations("scalar-evolution-max-iterations", cl::ReallyHidden, cl::desc("Maximum number of iterations SCEV will " "symbolically execute a constant " "derived loop"), cl::init(100))
static uint64_t umul_ov(uint64_t i, uint64_t j, bool &Overflow)
static void PrintSCEVWithTypeHint(raw_ostream &OS, const SCEV *S)
When printing a top-level SCEV for trip counts, it's helpful to include a type for constants which ar...
static void PrintLoopInfo(raw_ostream &OS, ScalarEvolution *SE, const Loop *L)
static SCEV::NoWrapFlags StrengthenNoWrapFlags(ScalarEvolution *SE, SCEVTypes Type, ArrayRef< SCEVUse > Ops, SCEV::NoWrapFlags Flags)
static bool containsConstantInAddMulChain(const SCEV *StartExpr)
Determine if any of the operands in this SCEV are a constant or if any of the add or multiply express...
static const SCEV * getExtendAddRecStart(const SCEVAddRecExpr *AR, Type *Ty, ScalarEvolution *SE, unsigned Depth)
static bool CollectAddOperandsWithScales(SmallDenseMap< SCEVUse, APInt, 16 > &M, SmallVectorImpl< SCEVUse > &NewOps, APInt &AccumulatedConstant, ArrayRef< SCEVUse > Ops, const APInt &Scale, ScalarEvolution &SE)
Process the given Ops list, which is a list of operands to be added under the given scale,...
static const SCEV * constantFoldAndGroupOps(ScalarEvolution &SE, LoopInfo &LI, DominatorTree &DT, SmallVectorImpl< SCEVUse > &Ops, FoldT Fold, IsIdentityT IsIdentity, IsAbsorberT IsAbsorber)
Performs a number of common optimizations on the passed Ops.
static cl::opt< unsigned > MaxPhiSCCAnalysisSize("scalar-evolution-max-scc-analysis-depth", cl::Hidden, cl::desc("Maximum amount of nodes to process while searching SCEVUnknown " "Phi strongly connected components"), cl::init(8))
static bool IsKnownPredicateViaAddRecStart(ScalarEvolution &SE, CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
static void GroupByComplexity(SmallVectorImpl< SCEVUse > &Ops, LoopInfo *LI, DominatorTree &DT)
Given a list of SCEV objects, order them by their complexity, and group objects of the same complexit...
static bool collectDivisibilityInformation(ICmpInst::Predicate Predicate, const SCEV *LHS, const SCEV *RHS, DenseMap< const SCEV *, const SCEV * > &DivInfo, DenseMap< const SCEV *, APInt > &Multiples, ScalarEvolution &SE)
static cl::opt< unsigned > MaxSCEVOperationsImplicationDepth("scalar-evolution-max-scev-operations-implication-depth", cl::Hidden, cl::desc("Maximum depth of recursive SCEV operations implication analysis"), cl::init(2))
static void PushDefUseChildren(Instruction *I, SmallVectorImpl< Instruction * > &Worklist, SmallPtrSetImpl< Instruction * > &Visited)
Push users of the given Instruction onto the given Worklist.
static std::optional< APInt > SolveQuadraticAddRecRange(const SCEVAddRecExpr *AddRec, const ConstantRange &Range, ScalarEvolution &SE)
Let c(n) be the value of the quadratic chrec {0,+,M,+,N} after n iterations.
static cl::opt< bool > UseContextForNoWrapFlagInference("scalar-evolution-use-context-for-no-wrap-flag-strenghening", cl::Hidden, cl::desc("Infer nuw/nsw flags using context where suitable"), cl::init(true))
static cl::opt< bool > EnableFiniteLoopControl("scalar-evolution-finite-loop", cl::Hidden, cl::desc("Handle <= and >= in finite loops"), cl::init(true))
static bool getOperandsForSelectLikePHI(DominatorTree &DT, PHINode *PN, Value *&Cond, Value *&LHS, Value *&RHS)
static std::optional< std::tuple< APInt, APInt, APInt, APInt, unsigned > > GetQuadraticEquation(const SCEVAddRecExpr *AddRec)
For a given quadratic addrec, generate coefficients of the corresponding quadratic equation,...
static bool isKnownPredicateExtendIdiom(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
static std::optional< BinaryOp > MatchBinaryOp(Value *V, const DataLayout &DL, AssumptionCache &AC, const DominatorTree &DT, const Instruction *CxtI)
Try to map V into a BinaryOp, and return std::nullopt on failure.
static std::optional< APInt > SolveQuadraticAddRecExact(const SCEVAddRecExpr *AddRec, ScalarEvolution &SE)
Let c(n) be the value of the quadratic chrec {L,+,M,+,N} after n iterations.
static std::optional< APInt > TruncIfPossible(std::optional< APInt > X, unsigned BitWidth)
Helper function to truncate an optional APInt to a given BitWidth.
static cl::opt< unsigned > MaxSCEVCompareDepth("scalar-evolution-max-scev-compare-depth", cl::Hidden, cl::desc("Maximum depth of recursive SCEV complexity comparisons"), cl::init(32))
static APInt extractConstantWithoutWrapping(ScalarEvolution &SE, const SCEVConstant *ConstantTerm, const SCEVAddExpr *WholeAddExpr)
static cl::opt< unsigned > MaxConstantEvolvingDepth("scalar-evolution-max-constant-evolving-depth", cl::Hidden, cl::desc("Maximum depth of recursive constant evolving"), cl::init(32))
static ConstantRange getRangeForAffineARHelper(APInt Step, const ConstantRange &StartRange, const APInt &MaxBECount, bool Signed)
static bool MatchBinarySub(const SCEV *S, SCEVUse &LHS, SCEVUse &RHS)
static std::optional< ConstantRange > GetRangeFromMetadata(Value *V)
Helper method to assign a range to V from metadata present in the IR.
static cl::opt< unsigned > HugeExprThreshold("scalar-evolution-huge-expr-threshold", cl::Hidden, cl::desc("Size of the expression which is considered huge"), cl::init(4096))
static Type * isSimpleCastedPHI(const SCEV *Op, const SCEVUnknown *SymbolicPHI, bool &Signed, ScalarEvolution &SE)
Helper function to createAddRecFromPHIWithCasts.
static Constant * EvaluateExpression(Value *V, const Loop *L, DenseMap< Instruction *, Constant * > &Vals, const DataLayout &DL, const TargetLibraryInfo *TLI)
EvaluateExpression - Given an expression that passes the getConstantEvolvingPHI predicate,...
static const SCEV * getPreviousSCEVDivisibleByDivisor(const SCEV *Expr, const APInt &DivisorVal, ScalarEvolution &SE)
static const SCEV * MatchNotExpr(const SCEV *Expr)
If Expr computes ~A, return A else return nullptr.
static cl::opt< unsigned > MaxValueCompareDepth("scalar-evolution-max-value-compare-depth", cl::Hidden, cl::desc("Maximum depth of recursive value complexity comparisons"), cl::init(2))
static const SCEV * applyDivisibilityOnMinMaxExpr(const SCEV *MinMaxExpr, APInt Divisor, ScalarEvolution &SE)
static cl::opt< bool, true > VerifySCEVOpt("verify-scev", cl::Hidden, cl::location(VerifySCEV), cl::desc("Verify ScalarEvolution's backedge taken counts (slow)"))
static const SCEV * getSignedOverflowLimitForStep(const SCEV *Step, ICmpInst::Predicate *Pred, ScalarEvolution *SE)
static cl::opt< unsigned > MaxArithDepth("scalar-evolution-max-arith-depth", cl::Hidden, cl::desc("Maximum depth of recursive arithmetics"), cl::init(32))
static bool HasSameValue(const SCEV *A, const SCEV *B)
SCEV structural equivalence is usually sufficient for testing whether two expressions are equal,...
static uint64_t Choose(uint64_t n, uint64_t k, bool &Overflow)
Compute the result of "n choose k", the binomial coefficient.
static std::optional< int > CompareSCEVComplexity(const LoopInfo *const LI, const SCEV *LHS, const SCEV *RHS, DominatorTree &DT, unsigned Depth=0)
static bool canConstantEvolve(Instruction *I, const Loop *L)
Determine whether this instruction can constant evolve within this loop assuming its operands can all...
static PHINode * getConstantEvolvingPHIOperands(Instruction *UseInst, const Loop *L, DenseMap< Instruction *, PHINode * > &PHIMap, unsigned Depth)
getConstantEvolvingPHIOperands - Implement getConstantEvolvingPHI by recursing through each instructi...
static bool scevUnconditionallyPropagatesPoisonFromOperands(SCEVTypes Kind)
static cl::opt< bool > VerifySCEVStrict("verify-scev-strict", cl::Hidden, cl::desc("Enable stricter verification with -verify-scev is passed"))
static Constant * getOtherIncomingValue(PHINode *PN, BasicBlock *BB)
static cl::opt< bool > UseExpensiveRangeSharpening("scalar-evolution-use-expensive-range-sharpening", cl::Hidden, cl::init(false), cl::desc("Use more powerful methods of sharpening expression ranges. May " "be costly in terms of compile time"))
static const SCEV * getUnsignedOverflowLimitForStep(const SCEV *Step, ICmpInst::Predicate *Pred, ScalarEvolution *SE)
static bool IsKnownPredicateViaMinOrMax(ScalarEvolution &SE, CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
Is LHS Pred RHS true on the virtue of LHS or RHS being a Min or Max expression?
static bool BrPHIToSelect(DominatorTree &DT, CondBrInst *BI, PHINode *Merge, Value *&C, Value *&LHS, Value *&RHS)
This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...
static bool InBlock(const Value *V, const BasicBlock *BB)
Provides some synthesis utilities to produce sequences of values.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static SymbolRef::Type getType(const Symbol *Sym)
LocallyHashedType DenseMapInfo< LocallyHashedType >::Empty
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
static std::optional< bool > isImpliedCondOperands(CmpInst::Predicate Pred, const Value *ALHS, const Value *ARHS, const Value *BLHS, const Value *BRHS)
Return true if "icmp Pred BLHS BRHS" is true whenever "icmp PredALHS ARHS" is true.
Virtual Register Rewriter
static const uint32_t IV[8]
SCEVCastSinkingRewriter(ScalarEvolution &SE, Type *TargetTy, ConversionFn CreatePtrCast)
static const SCEV * rewrite(const SCEV *Scev, ScalarEvolution &SE, Type *TargetTy, ConversionFn CreatePtrCast)
const SCEV * visitUnknown(const SCEVUnknown *Expr)
const SCEV * visitMulExpr(const SCEVMulExpr *Expr)
const SCEV * visitAddExpr(const SCEVAddExpr *Expr)
const SCEV * visit(const SCEV *S)
Class for arbitrary precision integers.
LLVM_ABI APInt umul_ov(const APInt &RHS, bool &Overflow) const
LLVM_ABI APInt zext(unsigned width) const
Zero extend to a new width.
bool isMinSignedValue() const
Determine if this is the smallest signed value.
uint64_t getZExtValue() const
Get zero extended value.
void setHighBits(unsigned hiBits)
Set the top hiBits bits.
LLVM_ABI APInt getHiBits(unsigned numBits) const
Compute an APInt containing numBits highbits from this APInt.
unsigned getActiveBits() const
Compute the number of active bits in the value.
LLVM_ABI APInt trunc(unsigned width) const
Truncate to new width.
static APInt getMaxValue(unsigned numBits)
Gets maximum unsigned value of APInt for specific bit width.
APInt abs() const
Get the absolute value.
bool sgt(const APInt &RHS) const
Signed greater than comparison.
bool isAllOnes() const
Determine if all bits are set. This is true for zero-width values.
bool ugt(const APInt &RHS) const
Unsigned greater than comparison.
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
bool isSignMask() const
Check if the APInt's value is returned by getSignMask.
LLVM_ABI APInt urem(const APInt &RHS) const
Unsigned remainder operation.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool ult(const APInt &RHS) const
Unsigned less than comparison.
static APInt getSignedMaxValue(unsigned numBits)
Gets maximum signed value of APInt for a specific bit width.
static APInt getMinValue(unsigned numBits)
Gets minimum unsigned value of APInt for a specific bit width.
bool isNegative() const
Determine sign of this APInt.
bool sle(const APInt &RHS) const
Signed less or equal comparison.
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
bool isNonPositive() const
Determine if this APInt Value is non-positive (<= 0).
unsigned countTrailingZeros() const
bool isStrictlyPositive() const
Determine if this APInt Value is positive.
unsigned logBase2() const
APInt ashr(unsigned ShiftAmt) const
Arithmetic right-shift function.
LLVM_ABI APInt multiplicativeInverse() const
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
LLVM_ABI APInt sext(unsigned width) const
Sign extend to a new width.
APInt shl(unsigned shiftAmt) const
Left-shift function.
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
bool isSignBitSet() const
Determine if sign bit of this APInt is set.
bool slt(const APInt &RHS) const
Signed less than comparison.
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
bool isIntN(unsigned N) const
Check if this APInt has an N-bits unsigned integer value.
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
This templated class represents "all analyses that operate over <aparticular IR unit>" (e....
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Represent the analysis usage information of a pass.
void setPreservesAll()
Set by analyses that do not transform their input at all.
AnalysisUsage & addRequiredTransitive()
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
A function analysis which provides an AssumptionCache.
An immutable pass that tracks lazily created AssumptionCache objects.
A cache of @llvm.assume calls within a function.
MutableArrayRef< WeakVH > assumptions()
Access the list of assumption handles currently tracked for this function.
LLVM Basic Block Representation.
iterator begin()
Instruction iterator methods.
const Function * getParent() const
Return the enclosing method, or null if none.
LLVM_ABI const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
const Instruction & front() const
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction; assumes that the block is well-formed.
LLVM_ABI unsigned getNoWrapKind() const
Returns one of OBO::NoSignedWrap or OBO::NoUnsignedWrap.
LLVM_ABI Instruction::BinaryOps getBinaryOp() const
Returns the binary operation underlying the intrinsic.
BinaryOps getOpcode() const
This class represents a function call, abstracting a target machine's calling convention.
virtual void deleted()
Callback for Value destruction.
bool isFalseWhenEqual() const
This is just a convenience.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ ICMP_UGE
unsigned greater or equal
@ ICMP_UGT
unsigned greater than
@ ICMP_SGT
signed greater than
@ ICMP_ULT
unsigned less than
@ ICMP_SGE
signed greater or equal
@ ICMP_ULE
unsigned less or equal
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
bool isTrueWhenEqual() const
This is just a convenience.
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
bool isRelational() const
Return true if the predicate is relational (not EQ or NE).
An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...
static LLVM_ABI std::optional< CmpPredicate > getMatching(CmpPredicate A, CmpPredicate B)
Compares two CmpPredicates taking samesign into account and returns the canonicalized CmpPredicate if...
LLVM_ABI CmpInst::Predicate getPreferredSignedPredicate() const
Attempts to return a signed CmpInst::Predicate from the CmpPredicate.
CmpInst::Predicate dropSameSign() const
Drops samesign information.
Conditional Branch instruction.
Value * getCondition() const
BasicBlock * getSuccessor(unsigned i) const
static LLVM_ABI Constant * getNot(Constant *C)
static Constant * getPtrAdd(Constant *Ptr, Constant *Offset, GEPNoWrapFlags NW=GEPNoWrapFlags::none(), std::optional< ConstantRange > InRange=std::nullopt, Type *OnlyIfReduced=nullptr)
Create a getelementptr i8, ptr, offset constant expression.
static LLVM_ABI Constant * getPtrToInt(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static LLVM_ABI Constant * getPtrToAddr(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static LLVM_ABI Constant * getAdd(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
static LLVM_ABI Constant * getNeg(Constant *C, bool HasNSW=false)
static LLVM_ABI Constant * getTrunc(Constant *C, Type *Ty, bool OnlyIfReduced=false)
This is the shared class of boolean and integer constants.
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
static LLVM_ABI ConstantInt * getFalse(LLVMContext &Context)
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
const APInt & getValue() const
Return the constant as an APInt value reference.
static LLVM_ABI ConstantInt * getBool(LLVMContext &Context, bool V)
This class represents a range of values.
LLVM_ABI ConstantRange add(const ConstantRange &Other) const
Return a new range representing the possible values resulting from an addition of a value in this ran...
LLVM_ABI ConstantRange zextOrTrunc(uint32_t BitWidth) const
Make this range have the bit width given by BitWidth.
PreferredRangeType
If represented precisely, the result of some range operations may consist of multiple disjoint ranges...
LLVM_ABI bool getEquivalentICmp(CmpInst::Predicate &Pred, APInt &RHS) const
Set up Pred and RHS such that ConstantRange::makeExactICmpRegion(Pred, RHS) == *this.
const APInt & getLower() const
Return the lower value for this range.
LLVM_ABI ConstantRange urem(const ConstantRange &Other) const
Return a new range representing the possible values resulting from an unsigned remainder operation of...
LLVM_ABI bool isFullSet() const
Return true if this set contains all of the elements possible for this data-type.
LLVM_ABI bool icmp(CmpInst::Predicate Pred, const ConstantRange &Other) const
Does the predicate Pred hold between ranges this and Other?
LLVM_ABI bool isEmptySet() const
Return true if this set contains no members.
LLVM_ABI ConstantRange zeroExtend(uint32_t BitWidth) const
Return a new range in the specified integer type, which must be strictly larger than the current type...
LLVM_ABI bool isSignWrappedSet() const
Return true if this set wraps around the signed domain.
LLVM_ABI APInt getSignedMin() const
Return the smallest signed value contained in the ConstantRange.
LLVM_ABI bool isWrappedSet() const
Return true if this set wraps around the unsigned domain.
LLVM_ABI void print(raw_ostream &OS) const
Print out the bounds to a stream.
LLVM_ABI ConstantRange truncate(uint32_t BitWidth, unsigned NoWrapKind=0) const
Return a new range in the specified integer type, which must be strictly smaller than the current typ...
LLVM_ABI ConstantRange signExtend(uint32_t BitWidth) const
Return a new range in the specified integer type, which must be strictly larger than the current type...
const APInt & getUpper() const
Return the upper value for this range.
LLVM_ABI ConstantRange unionWith(const ConstantRange &CR, PreferredRangeType Type=Smallest) const
Return the range that results from the union of this range with another range.
static LLVM_ABI ConstantRange makeExactICmpRegion(CmpInst::Predicate Pred, const APInt &Other)
Produce the exact range such that all values in the returned range satisfy the given predicate with a...
LLVM_ABI bool contains(const APInt &Val) const
Return true if the specified value is in the set.
LLVM_ABI APInt getUnsignedMax() const
Return the largest unsigned value contained in the ConstantRange.
LLVM_ABI ConstantRange intersectWith(const ConstantRange &CR, PreferredRangeType Type=Smallest) const
Return the range that results from the intersection of this range with another range.
LLVM_ABI APInt getSignedMax() const
Return the largest signed value contained in the ConstantRange.
static ConstantRange getNonEmpty(APInt Lower, APInt Upper)
Create non-empty constant range with the given bounds.
static LLVM_ABI ConstantRange makeGuaranteedNoWrapRegion(Instruction::BinaryOps BinOp, const ConstantRange &Other, unsigned NoWrapKind)
Produce the largest range containing all X such that "X BinOp Y" is guaranteed not to wrap (overflow)...
LLVM_ABI unsigned getMinSignedBits() const
Compute the maximal number of bits needed to represent every value in this signed range.
uint32_t getBitWidth() const
Get the bit width of this ConstantRange.
LLVM_ABI ConstantRange sub(const ConstantRange &Other) const
Return a new range representing the possible values resulting from a subtraction of a value in this r...
LLVM_ABI ConstantRange sextOrTrunc(uint32_t BitWidth) const
Make this range have the bit width given by BitWidth.
static LLVM_ABI ConstantRange makeExactNoWrapRegion(Instruction::BinaryOps BinOp, const APInt &Other, unsigned NoWrapKind)
Produce the range that contains X if and only if "X BinOp Other" does not wrap.
This is an important base class in LLVM.
A parsed version of the target data layout string in and methods for querying it.
LLVM_ABI const StructLayout * getStructLayout(StructType *Ty) const
Returns a StructLayout object, indicating the alignment of the struct, its size, and the offsets of i...
LLVM_ABI IntegerType * getIntPtrType(LLVMContext &C, unsigned AddressSpace=0) const
Returns an integer type with size at least as big as that of a pointer in the given address space.
LLVM_ABI unsigned getIndexTypeSizeInBits(Type *Ty) const
The size in bits of the index used in GEP calculation for this type.
LLVM_ABI IntegerType * getIndexType(LLVMContext &C, unsigned AddressSpace) const
Returns the type of a GEP index in AddressSpace.
TypeSize getTypeSizeInBits(Type *Ty) const
Size examples:
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
iterator find(const_arg_type_t< KeyT > Val)
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&...Args)
DenseMapIterator< KeyT, ValueT, KeyInfoT, BucketT > iterator
iterator find_as(const LookupKeyT &Val)
Alternate version of find() which allows a different, and possibly less expensive,...
size_type count(const_arg_type_t< KeyT > Val) const
Return 1 if the specified key is in the map, 0 otherwise.
bool contains(const_arg_type_t< KeyT > Val) const
Return true if the specified key is in the map, false otherwise.
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Analysis pass which computes a DominatorTree.
Legacy analysis pass which computes a DominatorTree.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
LLVM_ABI bool isReachableFromEntry(const Use &U) const
Provide an overload for a Use.
LLVM_ABI bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
FoldingSetNodeIDRef - This class describes a reference to an interned FoldingSetNodeID,...
FoldingSetNodeID - This class is used to gather all the unique data bits of a node.
Represents flags for the getelementptr instruction/expression.
bool hasNoUnsignedSignedWrap() const
bool hasNoUnsignedWrap() const
static GEPNoWrapFlags none()
static LLVM_ABI Type * getTypeAtIndex(Type *Ty, Value *Idx)
Return the type of the element at the given index of an indexable type.
Module * getParent()
Get the module that this global value is contained inside of...
static bool isPrivateLinkage(LinkageTypes Linkage)
static bool isInternalLinkage(LinkageTypes Linkage)
This instruction compares its operands according to the predicate given to the constructor.
CmpPredicate getCmpPredicate() const
static bool isGE(Predicate P)
Return true if the predicate is SGE or UGE.
CmpPredicate getSwappedCmpPredicate() const
static LLVM_ABI bool compare(const APInt &LHS, const APInt &RHS, ICmpInst::Predicate Pred)
Return result of LHS Pred RHS comparison.
static bool isLT(Predicate P)
Return true if the predicate is SLT or ULT.
CmpPredicate getInverseCmpPredicate() const
Predicate getNonStrictCmpPredicate() const
For example, SGT -> SGE, SLT -> SLE, ULT -> ULE, UGT -> UGE.
static bool isGT(Predicate P)
Return true if the predicate is SGT or UGT.
Predicate getFlippedSignednessPredicate() const
For example, SLT->ULT, ULT->SLT, SLE->ULE, ULE->SLE, EQ->EQ.
static CmpPredicate getInverseCmpPredicate(CmpPredicate Pred)
static bool isEquality(Predicate P)
Return true if this predicate is either EQ or NE.
bool isRelational() const
Return true if the predicate is relational (not EQ or NE).
static bool isLE(Predicate P)
Return true if the predicate is SLE or ULE.
LLVM_ABI bool hasNoUnsignedWrap() const LLVM_READONLY
Determine whether the no unsigned wrap flag is set.
LLVM_ABI bool hasNoSignedWrap() const LLVM_READONLY
Determine whether the no signed wrap flag is set.
LLVM_ABI bool isIdenticalToWhenDefined(const Instruction *I, bool IntersectAttrs=false) const LLVM_READONLY
This is like isIdenticalTo, except that it ignores the SubclassOptionalData flags,...
Class to represent integer types.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
A helper class to return the specified delimiter string after the first invocation of operator String...
An instruction for reading from memory.
Analysis pass that exposes the LoopInfo for a function.
bool contains(const LoopT *L) const
Return true if the specified loop is contained within in this loop.
BlockT * getHeader() const
unsigned getLoopDepth() const
Return the nesting level of this loop.
BlockT * getLoopPredecessor() const
If the given loop's header has exactly one unique predecessor outside the loop, return it.
LoopT * getParentLoop() const
Return the parent loop if it exists or nullptr for top level loops.
unsigned getLoopDepth(const BlockT *BB) const
Return the loop nesting level of the specified block.
LoopT * getLoopFor(const BlockT *BB) const
Return the inner most loop that BB lives in.
The legacy pass manager's analysis pass to compute loop information.
Represents a single loop in the control flow graph.
bool isLoopInvariant(const Value *V) const
Return true if the specified value is loop invariant.
A Module instance is used to store all the information related to an LLVM module.
unsigned getOpcode() const
Return the opcode for this Instruction or ConstantExpr.
Utility class for integer operators which may exhibit overflow - Add, Sub, Mul, and Shl.
bool hasNoSignedWrap() const
Test whether this operation is known to never undergo signed overflow, aka the nsw property.
bool hasNoUnsignedWrap() const
Test whether this operation is known to never undergo unsigned overflow, aka the nuw property.
iterator_range< const_block_iterator > blocks() const
op_range incoming_values()
Value * getIncomingValueForBlock(const BasicBlock *BB) const
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
AnalysisType & getAnalysis() const
getAnalysis<AnalysisType>() - This function is used by subclasses to get to the analysis information ...
PointerIntPair - This class implements a pair of a pointer and small integer.
const SCEV * getPointer() const
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the default address space (address sp...
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
LLVM_ABI void addPredicate(const SCEVPredicate &Pred)
Adds a new predicate.
LLVM_ABI const SCEVPredicate & getPredicate() const
LLVM_ABI const SCEV * getPredicatedSCEV(const SCEV *Expr)
Returns the rewritten SCEV for Expr in the context of the current SCEV predicate.
LLVM_ABI bool hasNoOverflow(Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags)
Returns true if we've proved that V doesn't wrap by means of a SCEV predicate.
LLVM_ABI void setNoOverflow(Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags)
Proves that V doesn't overflow by adding SCEV predicate.
LLVM_ABI void print(raw_ostream &OS, unsigned Depth) const
Print the SCEV mappings done by the Predicated Scalar Evolution.
LLVM_ABI bool areAddRecsEqualWithPreds(const SCEVAddRecExpr *AR1, const SCEVAddRecExpr *AR2) const
Check if AR1 and AR2 are equal, while taking into account Equal predicates in Preds.
LLVM_ABI PredicatedScalarEvolution(ScalarEvolution &SE, Loop &L)
LLVM_ABI const SCEVAddRecExpr * getAsAddRec(Value *V)
Attempts to produce an AddRecExpr for V by adding additional SCEV predicates.
LLVM_ABI unsigned getSmallConstantMaxTripCount()
Returns the upper bound of the loop trip count as a normal unsigned value, or 0 if the trip count is ...
LLVM_ABI const SCEV * getBackedgeTakenCount()
Get the (predicated) backedge count for the analyzed loop.
LLVM_ABI const SCEV * getSymbolicMaxBackedgeTakenCount()
Get the (predicated) symbolic max backedge count for the analyzed loop.
LLVM_ABI const SCEV * getSCEV(Value *V)
Returns the SCEV expression of V, in the context of the current SCEV predicate.
A set of analyses that are preserved following a run of a transformation pass.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
PreservedAnalysisChecker getChecker() const
Build a checker for this PreservedAnalyses and the specified analysis type.
constexpr bool isValid() const
This node represents an addition of some number of SCEVs.
This node represents a polynomial recurrence on the trip count of the specified loop.
friend class ScalarEvolution
LLVM_ABI const SCEV * evaluateAtIteration(const SCEV *It, ScalarEvolution &SE) const
Return the value of this chain of recurrences at the specified iteration number.
void setNoWrapFlags(NoWrapFlags Flags)
Set flags for a recurrence without clearing any previously set flags.
bool isAffine() const
Return true if this represents an expression A + B*x where A and B are loop invariant values.
bool isQuadratic() const
Return true if this represents an expression A + B*x + C*x^2 where A, B and C are loop invariant valu...
LLVM_ABI const SCEV * getNumIterationsInRange(const ConstantRange &Range, ScalarEvolution &SE) const
Return the number of iterations of this loop that produce values in the specified constant range.
LLVM_ABI const SCEVAddRecExpr * getPostIncExpr(ScalarEvolution &SE) const
Return an expression representing the value of this expression one iteration of the loop ahead.
const Loop * getLoop() const
SCEVUse getStepRecurrence(ScalarEvolution &SE) const
Constructs and returns the recurrence indicating how much this expression steps by.
This is the base class for unary cast operator classes.
SCEVUse getOperand() const
LLVM_ABI SCEVCastExpr(const FoldingSetNodeIDRef ID, SCEVTypes SCEVTy, SCEVUse op, Type *ty)
void setNoWrapFlags(NoWrapFlags Flags)
Set flags for a non-recurrence without clearing previously set flags.
This class represents an assumption that the expression LHS Pred RHS evaluates to true,...
SCEVComparePredicate(const FoldingSetNodeIDRef ID, const ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS)
bool isAlwaysTrue() const override
Returns true if the predicate is always true.
void print(raw_ostream &OS, unsigned Depth=0) const override
Prints a textual representation of this predicate with an indentation of Depth.
bool implies(const SCEVPredicate *N, ScalarEvolution &SE) const override
Implementation of the SCEVPredicate interface.
This class represents a constant integer value.
ConstantInt * getValue() const
const APInt & getAPInt() const
This is the base class for unary integral cast operator classes.
LLVM_ABI SCEVIntegralCastExpr(const FoldingSetNodeIDRef ID, SCEVTypes SCEVTy, SCEVUse op, Type *ty)
This node is the base class min/max selections.
static enum SCEVTypes negate(enum SCEVTypes T)
This node represents multiplication of some number of SCEVs.
This node is a base class providing common functionality for n'ary operators.
bool hasNoUnsignedWrap() const
ArrayRef< SCEVUse > operands() const
bool hasNoSelfWrap() const
size_t getNumOperands() const
bool hasNoSignedWrap() const
NoWrapFlags getNoWrapFlags(NoWrapFlags Mask=NoWrapMask) const
SCEVUse getOperand(unsigned i) const
This class represents an assumption made using SCEV expressions which can be checked at run-time.
SCEVPredicate(const SCEVPredicate &)=default
virtual bool implies(const SCEVPredicate *N, ScalarEvolution &SE) const =0
Returns true if this predicate implies N.
This class represents a cast from a pointer to a pointer-sized integer value, without capturing the p...
This class represents a cast from a pointer to a pointer-sized integer value.
This visitor recursively visits a SCEV expression and re-writes it.
const SCEV * visitSignExtendExpr(const SCEVSignExtendExpr *Expr)
const SCEV * visit(const SCEV *S)
const SCEV * visitZeroExtendExpr(const SCEVZeroExtendExpr *Expr)
const SCEV * visitSMinExpr(const SCEVSMinExpr *Expr)
SCEVRewriteVisitor(ScalarEvolution &SE)
const SCEV * visitUMinExpr(const SCEVUMinExpr *Expr)
This class represents a signed minimum selection.
This node is the base class for sequential/in-order min/max selections.
static SCEVTypes getEquivalentNonSequentialSCEVType(SCEVTypes Ty)
This class represents a sign extension of a small integer value to a larger integer value.
Visit all nodes in the expression tree using worklist traversal.
This class represents a truncation of an integer value to a smaller integer value.
This class represents a binary unsigned division operation.
This class represents an unsigned minimum selection.
This class represents a composition of other SCEV predicates, and is the class that most clients will...
void print(raw_ostream &OS, unsigned Depth) const override
Prints a textual representation of this predicate with an indentation of Depth.
bool implies(const SCEVPredicate *N, ScalarEvolution &SE) const override
Returns true if this predicate implies N.
SCEVUnionPredicate(ArrayRef< const SCEVPredicate * > Preds, ScalarEvolution &SE)
Union predicates don't get cached so create a dummy set ID for it.
bool isAlwaysTrue() const override
Implementation of the SCEVPredicate interface.
This means that we are dealing with an entirely unknown SCEV value, and only represent it as its LLVM...
This class represents the value of vscale, as used when defining the length of a scalable vector or r...
This class represents an assumption made on an AddRec expression.
IncrementWrapFlags
Similar to SCEV::NoWrapFlags, but with slightly different semantics for FlagNUSW.
SCEVWrapPredicate(const FoldingSetNodeIDRef ID, const SCEVAddRecExpr *AR, IncrementWrapFlags Flags)
bool implies(const SCEVPredicate *N, ScalarEvolution &SE) const override
Returns true if this predicate implies N.
static SCEVWrapPredicate::IncrementWrapFlags setFlags(SCEVWrapPredicate::IncrementWrapFlags Flags, SCEVWrapPredicate::IncrementWrapFlags OnFlags)
void print(raw_ostream &OS, unsigned Depth=0) const override
Prints a textual representation of this predicate with an indentation of Depth.
bool isAlwaysTrue() const override
Returns true if the predicate is always true.
const SCEVAddRecExpr * getExpr() const
Implementation of the SCEVPredicate interface.
static SCEVWrapPredicate::IncrementWrapFlags clearFlags(SCEVWrapPredicate::IncrementWrapFlags Flags, SCEVWrapPredicate::IncrementWrapFlags OffFlags)
Convenient IncrementWrapFlags manipulation methods.
static SCEVWrapPredicate::IncrementWrapFlags getImpliedFlags(const SCEVAddRecExpr *AR, ScalarEvolution &SE)
Returns the set of SCEVWrapPredicate no wrap flags implied by a SCEVAddRecExpr.
IncrementWrapFlags getFlags() const
Returns the set assumed no overflow flags.
This class represents a zero extension of a small integer value to a larger integer value.
This class represents an analyzed expression in the program.
unsigned short getExpressionSize() const
LLVM_ABI bool isOne() const
Return true if the expression is a constant one.
LLVM_ABI void computeAndSetCanonical(ScalarEvolution &SE)
Compute and set the canonical SCEV, by constructing a SCEV with the same operands,...
LLVM_ABI bool isZero() const
Return true if the expression is a constant zero.
const SCEV * CanonicalSCEV
Pointer to the canonical version of the SCEV, i.e.
LLVM_ABI void dump() const
This method is used for debugging.
LLVM_ABI bool isAllOnesValue() const
Return true if the expression is a constant all-ones value.
LLVM_ABI bool isNonConstantNegative() const
Return true if the specified scev is negated, but not a constant.
LLVM_ABI ArrayRef< SCEVUse > operands() const
Return operands of this SCEV expression.
LLVM_ABI void print(raw_ostream &OS) const
Print out the internal representation of this scalar to the specified stream.
SCEV(const FoldingSetNodeIDRef ID, SCEVTypes SCEVTy, unsigned short ExpressionSize)
SCEVTypes getSCEVType() const
LLVM_ABI Type * getType() const
Return the LLVM type of this SCEV expression.
NoWrapFlags
NoWrapFlags are bitfield indices into SubclassData.
Analysis pass that exposes the ScalarEvolution for a function.
LLVM_ABI ScalarEvolution run(Function &F, FunctionAnalysisManager &AM)
LLVM_ABI PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
LLVM_ABI PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
void print(raw_ostream &OS, const Module *=nullptr) const override
print - Print out the internal state of the pass.
bool runOnFunction(Function &F) override
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass.
void releaseMemory() override
releaseMemory() - This member can be implemented by a pass if it wants to be able to release its memo...
void verifyAnalysis() const override
verifyAnalysis() - This member can be implemented by a analysis pass to check state of analysis infor...
ScalarEvolutionWrapperPass()
static LLVM_ABI LoopGuards collect(const Loop *L, ScalarEvolution &SE)
Collect rewrite map for loop guards for loop L, together with flags indicating if NUW and NSW can be ...
LLVM_ABI const SCEV * rewrite(const SCEV *Expr) const
Try to apply the collected loop guards to Expr.
The main scalar evolution driver.
LLVM_ABI const SCEV * getUDivExpr(SCEVUse LHS, SCEVUse RHS)
Get a canonical unsigned division expression, or something simpler if possible.
const SCEV * getConstantMaxBackedgeTakenCount(const Loop *L)
When successful, this returns a SCEVConstant that is greater than or equal to (i.e.
static bool hasFlags(SCEV::NoWrapFlags Flags, SCEV::NoWrapFlags TestFlags)
const DataLayout & getDataLayout() const
Return the DataLayout associated with the module this SCEV instance is operating on.
LLVM_ABI bool isKnownNonNegative(const SCEV *S)
Test if the given expression is known to be non-negative.
LLVM_ABI bool isKnownOnEveryIteration(CmpPredicate Pred, const SCEVAddRecExpr *LHS, const SCEV *RHS)
Test if the condition described by Pred, LHS, RHS is known to be true on every iteration of the loop ...
LLVM_ABI const SCEV * getNegativeSCEV(const SCEV *V, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap)
Return the SCEV object corresponding to -V.
LLVM_ABI std::optional< LoopInvariantPredicate > getLoopInvariantExitCondDuringFirstIterationsImpl(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS, const Loop *L, const Instruction *CtxI, const SCEV *MaxIter)
LLVM_ABI const SCEV * getUDivCeilSCEV(const SCEV *N, const SCEV *D)
Compute ceil(N / D).
LLVM_ABI std::optional< LoopInvariantPredicate > getLoopInvariantExitCondDuringFirstIterations(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS, const Loop *L, const Instruction *CtxI, const SCEV *MaxIter)
If the result of the predicate LHS Pred RHS is loop invariant with respect to L at given Context duri...
LLVM_ABI Type * getWiderType(Type *Ty1, Type *Ty2) const
LLVM_ABI const SCEV * getAbsExpr(const SCEV *Op, bool IsNSW)
LLVM_ABI bool isKnownNonPositive(const SCEV *S)
Test if the given expression is known to be non-positive.
LLVM_ABI bool isKnownNegative(const SCEV *S)
Test if the given expression is known to be negative.
LLVM_ABI const SCEV * getPredicatedConstantMaxBackedgeTakenCount(const Loop *L, SmallVectorImpl< const SCEVPredicate * > &Predicates)
Similar to getConstantMaxBackedgeTakenCount, except it will add a set of SCEV predicates to Predicate...
LLVM_ABI const SCEV * removePointerBase(const SCEV *S)
Compute an expression equivalent to S - getPointerBase(S).
LLVM_ABI bool isLoopEntryGuardedByCond(const Loop *L, CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
Test whether entry to the loop is protected by a conditional between LHS and RHS.
LLVM_ABI bool isKnownNonZero(const SCEV *S)
Test if the given expression is known to be non-zero.
LLVM_ABI const SCEV * getURemExpr(SCEVUse LHS, SCEVUse RHS)
Represents an unsigned remainder expression based on unsigned division.
LLVM_ABI const SCEV * getSCEVAtScope(const SCEV *S, const Loop *L)
Return a SCEV expression for the specified value at the specified scope in the program.
LLVM_ABI const SCEV * getBackedgeTakenCount(const Loop *L, ExitCountKind Kind=Exact)
If the specified loop has a predictable backedge-taken count, return it, otherwise return a SCEVCould...
LLVM_ABI const SCEV * getSMinExpr(SCEVUse LHS, SCEVUse RHS)
LLVM_ABI void setNoWrapFlags(SCEVAddRecExpr *AddRec, SCEV::NoWrapFlags Flags)
Update no-wrap flags of an AddRec.
LLVM_ABI const SCEV * getUMaxFromMismatchedTypes(const SCEV *LHS, const SCEV *RHS)
Promote the operands to the wider of the types using zero-extension, and then perform a umax operatio...
const SCEV * getZero(Type *Ty)
Return a SCEV for the constant 0 of a specific type.
LLVM_ABI bool willNotOverflow(Instruction::BinaryOps BinOp, bool Signed, const SCEV *LHS, const SCEV *RHS, const Instruction *CtxI=nullptr)
Is operation BinOp between LHS and RHS provably does not have a signed/unsigned overflow (Signed)?
LLVM_ABI ExitLimit computeExitLimitFromCond(const Loop *L, Value *ExitCond, bool ExitIfTrue, bool ControlsOnlyExit, bool AllowPredicates=false)
Compute the number of times the backedge of the specified loop will execute if its exit condition wer...
LLVM_ABI const SCEV * getZeroExtendExprImpl(const SCEV *Op, Type *Ty, unsigned Depth=0)
LLVM_ABI const SCEV * getMinMaxExpr(SCEVTypes Kind, SmallVectorImpl< SCEVUse > &Operands)
LLVM_ABI const SCEVPredicate * getEqualPredicate(const SCEV *LHS, const SCEV *RHS)
LLVM_ABI unsigned getSmallConstantTripMultiple(const Loop *L, const SCEV *ExitCount)
Returns the largest constant divisor of the trip count as a normal unsigned value,...
LLVM_ABI uint64_t getTypeSizeInBits(Type *Ty) const
Return the size in bits of the specified type, for which isSCEVable must return true.
LLVM_ABI const SCEV * getConstant(ConstantInt *V)
LLVM_ABI const SCEV * getPredicatedBackedgeTakenCount(const Loop *L, SmallVectorImpl< const SCEVPredicate * > &Predicates)
Similar to getBackedgeTakenCount, except it will add a set of SCEV predicates to Predicates that are ...
LLVM_ABI const SCEV * getSCEV(Value *V)
Return a SCEV expression for the full generality of the specified expression.
LLVM_ABI const SCEV * getMinusSCEV(SCEVUse LHS, SCEVUse RHS, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Return LHS-RHS.
ConstantRange getSignedRange(const SCEV *S)
Determine the signed range for a particular SCEV.
LLVM_ABI const SCEV * getAddRecExpr(SCEVUse Start, SCEVUse Step, const Loop *L, SCEV::NoWrapFlags Flags)
Get an add recurrence expression for the specified loop.
LLVM_ABI const SCEV * getNoopOrSignExtend(const SCEV *V, Type *Ty)
Return a SCEV corresponding to a conversion of the input value to the specified type.
bool loopHasNoAbnormalExits(const Loop *L)
Return true if the loop has no abnormal exits.
LLVM_ABI const SCEV * getTripCountFromExitCount(const SCEV *ExitCount)
A version of getTripCountFromExitCount below which always picks an evaluation type which can not resu...
LLVM_ABI ScalarEvolution(Function &F, TargetLibraryInfo &TLI, AssumptionCache &AC, DominatorTree &DT, LoopInfo &LI)
const SCEV * getOne(Type *Ty)
Return a SCEV for the constant 1 of a specific type.
LLVM_ABI const SCEV * getTruncateOrNoop(const SCEV *V, Type *Ty)
Return a SCEV corresponding to a conversion of the input value to the specified type.
LLVM_ABI const SCEV * getLosslessPtrToIntExpr(const SCEV *Op)
LLVM_ABI const SCEV * getCastExpr(SCEVTypes Kind, const SCEV *Op, Type *Ty)
LLVM_ABI const SCEV * getSequentialMinMaxExpr(SCEVTypes Kind, SmallVectorImpl< SCEVUse > &Operands)
LLVM_ABI std::optional< bool > evaluatePredicateAt(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS, const Instruction *CtxI)
Check whether the condition described by Pred, LHS, and RHS is true or false in the given Context.
LLVM_ABI unsigned getSmallConstantMaxTripCount(const Loop *L, SmallVectorImpl< const SCEVPredicate * > *Predicates=nullptr)
Returns the upper bound of the loop trip count as a normal unsigned value.
LLVM_ABI const SCEV * getPtrToIntExpr(const SCEV *Op, Type *Ty)
LLVM_ABI bool isBackedgeTakenCountMaxOrZero(const Loop *L)
Return true if the backedge taken count is either the value returned by getConstantMaxBackedgeTakenCo...
LLVM_ABI void forgetLoop(const Loop *L)
This method should be called by the client when it has changed a loop in a way that may effect Scalar...
LLVM_ABI bool isLoopInvariant(const SCEV *S, const Loop *L)
Return true if the value of the given SCEV is unchanging in the specified loop.
LLVM_ABI bool isKnownPositive(const SCEV *S)
Test if the given expression is known to be positive.
LLVM_ABI bool SimplifyICmpOperands(CmpPredicate &Pred, SCEVUse &LHS, SCEVUse &RHS, unsigned Depth=0)
Simplify LHS and RHS in a comparison with predicate Pred.
APInt getUnsignedRangeMin(const SCEV *S)
Determine the min of the unsigned range for a particular SCEV.
LLVM_ABI const SCEV * getOffsetOfExpr(Type *IntTy, StructType *STy, unsigned FieldNo)
Return an expression for offsetof on the given field with type IntTy.
LLVM_ABI LoopDisposition getLoopDisposition(const SCEV *S, const Loop *L)
Return the "disposition" of the given SCEV with respect to the given loop.
LLVM_ABI bool containsAddRecurrence(const SCEV *S)
Return true if the SCEV is a scAddRecExpr or it contains scAddRecExpr.
LLVM_ABI const SCEV * getSignExtendExprImpl(const SCEV *Op, Type *Ty, unsigned Depth=0)
LLVM_ABI bool hasOperand(const SCEV *S, const SCEV *Op) const
Test whether the given SCEV has Op as a direct or indirect operand.
LLVM_ABI const SCEV * getZeroExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth=0)
LLVM_ABI bool isSCEVable(Type *Ty) const
Test if values of the given type are analyzable within the SCEV framework.
LLVM_ABI Type * getEffectiveSCEVType(Type *Ty) const
Return a type with the same bitwidth as the given type and which represents how SCEV will treat the g...
LLVM_ABI const SCEVPredicate * getComparePredicate(ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS)
LLVM_ABI bool haveSameSign(const SCEV *S1, const SCEV *S2)
Return true if we know that S1 and S2 must have the same sign.
LLVM_ABI const SCEV * getNotSCEV(const SCEV *V)
Return the SCEV object corresponding to ~V.
LLVM_ABI const SCEV * getElementCount(Type *Ty, ElementCount EC, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap)
LLVM_ABI bool instructionCouldExistWithOperands(const SCEV *A, const SCEV *B)
Return true if there exists a point in the program at which both A and B could be operands to the sam...
ConstantRange getUnsignedRange(const SCEV *S)
Determine the unsigned range for a particular SCEV.
LLVM_ABI void print(raw_ostream &OS) const
LLVM_ABI const SCEV * getPredicatedExitCount(const Loop *L, const BasicBlock *ExitingBlock, SmallVectorImpl< const SCEVPredicate * > *Predicates, ExitCountKind Kind=Exact)
Same as above except this uses the predicated backedge taken info and may require predicates.
static SCEV::NoWrapFlags clearFlags(SCEV::NoWrapFlags Flags, SCEV::NoWrapFlags OffFlags)
LLVM_ABI void forgetTopmostLoop(const Loop *L)
LLVM_ABI void forgetValue(Value *V)
This method should be called by the client when it has changed a value in a way that may effect its v...
APInt getSignedRangeMin(const SCEV *S)
Determine the min of the signed range for a particular SCEV.
LLVM_ABI const SCEV * getNoopOrAnyExtend(const SCEV *V, Type *Ty)
Return a SCEV corresponding to a conversion of the input value to the specified type.
LLVM_ABI void forgetBlockAndLoopDispositions(Value *V=nullptr)
Called when the client has changed the disposition of values in a loop or block.
LLVM_ABI const SCEV * getTruncateExpr(const SCEV *Op, Type *Ty, unsigned Depth=0)
LLVM_ABI const SCEV * getUMaxExpr(SCEVUse LHS, SCEVUse RHS)
@ MonotonicallyDecreasing
@ MonotonicallyIncreasing
LLVM_ABI std::optional< LoopInvariantPredicate > getLoopInvariantPredicate(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS, const Loop *L, const Instruction *CtxI=nullptr)
If the result of the predicate LHS Pred RHS is loop invariant with respect to L, return a LoopInvaria...
LLVM_ABI const SCEV * getStoreSizeOfExpr(Type *IntTy, Type *StoreTy)
Return an expression for the store size of StoreTy that is type IntTy.
LLVM_ABI const SCEVPredicate * getWrapPredicate(const SCEVAddRecExpr *AR, SCEVWrapPredicate::IncrementWrapFlags AddedFlags)
LLVM_ABI bool isLoopBackedgeGuardedByCond(const Loop *L, CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
Test whether the backedge of the loop is protected by a conditional between LHS and RHS.
LLVM_ABI APInt getNonZeroConstantMultiple(const SCEV *S)
const SCEV * getMinusOne(Type *Ty)
Return a SCEV for the constant -1 of a specific type.
static SCEV::NoWrapFlags setFlags(SCEV::NoWrapFlags Flags, SCEV::NoWrapFlags OnFlags)
LLVM_ABI bool hasLoopInvariantBackedgeTakenCount(const Loop *L)
Return true if the specified loop has an analyzable loop-invariant backedge-taken count.
LLVM_ABI BlockDisposition getBlockDisposition(const SCEV *S, const BasicBlock *BB)
Return the "disposition" of the given SCEV with respect to the given block.
LLVM_ABI const SCEV * getNoopOrZeroExtend(const SCEV *V, Type *Ty)
Return a SCEV corresponding to a conversion of the input value to the specified type.
LLVM_ABI bool invalidate(Function &F, const PreservedAnalyses &PA, FunctionAnalysisManager::Invalidator &Inv)
LLVM_ABI const SCEV * getUMinFromMismatchedTypes(const SCEV *LHS, const SCEV *RHS, bool Sequential=false)
Promote the operands to the wider of the types using zero-extension, and then perform a umin operatio...
LLVM_ABI bool loopIsFiniteByAssumption(const Loop *L)
Return true if this loop is finite by assumption.
LLVM_ABI const SCEV * getExistingSCEV(Value *V)
Return an existing SCEV for V if there is one, otherwise return nullptr.
LLVM_ABI APInt getConstantMultiple(const SCEV *S, const Instruction *CtxI=nullptr)
Returns the max constant multiple of S.
LoopDisposition
An enum describing the relationship between a SCEV and a loop.
@ LoopComputable
The SCEV varies predictably with the loop.
@ LoopVariant
The SCEV is loop-variant (unknown).
@ LoopInvariant
The SCEV is loop-invariant.
friend class SCEVCallbackVH
LLVM_ABI bool isKnownMultipleOf(const SCEV *S, uint64_t M, SmallVectorImpl< const SCEVPredicate * > &Assumptions)
Check that S is a multiple of M.
LLVM_ABI const SCEV * getAnyExtendExpr(const SCEV *Op, Type *Ty)
getAnyExtendExpr - Return a SCEV for the given operand extended with unspecified bits out to the give...
LLVM_ABI bool isKnownToBeAPowerOfTwo(const SCEV *S, bool OrZero=false, bool OrNegative=false)
Test if the given expression is known to be a power of 2.
LLVM_ABI std::optional< SCEV::NoWrapFlags > getStrengthenedNoWrapFlagsFromBinOp(const OverflowingBinaryOperator *OBO)
Parse NSW/NUW flags from add/sub/mul IR binary operation Op into SCEV no-wrap flags,...
LLVM_ABI void forgetLcssaPhiWithNewPredecessor(Loop *L, PHINode *V)
Forget LCSSA phi node V of loop L to which a new predecessor was added, such that it may no longer be...
LLVM_ABI bool containsUndefs(const SCEV *S) const
Return true if the SCEV expression contains an undef value.
LLVM_ABI std::optional< MonotonicPredicateType > getMonotonicPredicateType(const SCEVAddRecExpr *LHS, ICmpInst::Predicate Pred)
If, for all loop invariant X, the predicate "LHS `Pred` X" is monotonically increasing or decreasing,...
LLVM_ABI const SCEV * getCouldNotCompute()
LLVM_ABI const SCEV * getMulExpr(SmallVectorImpl< SCEVUse > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical multiply expression, or something simpler if possible.
LLVM_ABI bool isAvailableAtLoopEntry(const SCEV *S, const Loop *L)
Determine if the SCEV can be evaluated at loop's entry.
LLVM_ABI uint32_t getMinTrailingZeros(const SCEV *S, const Instruction *CtxI=nullptr)
Determine the minimum number of zero bits that S is guaranteed to end in (at every loop iteration).
BlockDisposition
An enum describing the relationship between a SCEV and a basic block.
@ DominatesBlock
The SCEV dominates the block.
@ ProperlyDominatesBlock
The SCEV properly dominates the block.
@ DoesNotDominateBlock
The SCEV does not dominate the block.
LLVM_ABI const SCEV * getExitCount(const Loop *L, const BasicBlock *ExitingBlock, ExitCountKind Kind=Exact)
Return the number of times the backedge executes before the given exit would be taken; if not exactly...
LLVM_ABI const SCEV * getSignExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth=0)
LLVM_ABI void getPoisonGeneratingValues(SmallPtrSetImpl< const Value * > &Result, const SCEV *S)
Return the set of Values that, if poison, will definitively result in S being poison as well.
LLVM_ABI void forgetLoopDispositions()
Called when the client has changed the disposition of values in this loop.
LLVM_ABI const SCEV * getVScale(Type *Ty)
LLVM_ABI unsigned getSmallConstantTripCount(const Loop *L)
Returns the exact trip count of the loop if we can compute it, and the result is a small constant.
LLVM_ABI bool hasComputableLoopEvolution(const SCEV *S, const Loop *L)
Return true if the given SCEV changes value in a known way in the specified loop.
LLVM_ABI const SCEV * getPointerBase(const SCEV *V)
Transitively follow the chain of pointer-type operands until reaching a SCEV that does not have a sin...
LLVM_ABI void forgetAllLoops()
LLVM_ABI bool dominates(const SCEV *S, const BasicBlock *BB)
Return true if elements that makes up the given SCEV dominate the specified basic block.
APInt getUnsignedRangeMax(const SCEV *S)
Determine the max of the unsigned range for a particular SCEV.
LLVM_ABI const SCEV * getAddExpr(SmallVectorImpl< SCEVUse > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical add expression, or something simpler if possible.
ExitCountKind
The terms "backedge taken count" and "exit count" are used interchangeably to refer to the number of ...
@ SymbolicMaximum
An expression which provides an upper bound on the exact trip count.
@ ConstantMaximum
A constant which provides an upper bound on the exact trip count.
@ Exact
An expression exactly describing the number of times the backedge has executed when a loop is exited.
LLVM_ABI bool isKnownPredicate(CmpPredicate Pred, SCEVUse LHS, SCEVUse RHS)
Test if the given expression is known to satisfy the condition described by Pred, LHS,...
LLVM_ABI const SCEV * applyLoopGuards(const SCEV *Expr, const Loop *L)
Try to apply information from loop guards for L to Expr.
LLVM_ABI const SCEV * getPtrToAddrExpr(const SCEV *Op)
LLVM_ABI const SCEVAddRecExpr * convertSCEVToAddRecWithPredicates(const SCEV *S, const Loop *L, SmallVectorImpl< const SCEVPredicate * > &Preds)
Tries to convert the S expression to an AddRec expression, adding additional predicates to Preds as r...
LLVM_ABI const SCEV * getSMaxExpr(SCEVUse LHS, SCEVUse RHS)
LLVM_ABI const SCEV * getElementSize(Instruction *Inst)
Return the size of an element read or written by Inst.
LLVM_ABI const SCEV * getSizeOfExpr(Type *IntTy, TypeSize Size)
Return an expression for a TypeSize.
LLVM_ABI std::optional< bool > evaluatePredicate(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
Check whether the condition described by Pred, LHS, and RHS is true or false.
LLVM_ABI const SCEV * getUnknown(Value *V)
LLVM_ABI std::optional< std::pair< const SCEV *, SmallVector< const SCEVPredicate *, 3 > > > createAddRecFromPHIWithCasts(const SCEVUnknown *SymbolicPHI)
Checks if SymbolicPHI can be rewritten as an AddRecExpr under some Predicates.
LLVM_ABI const SCEV * getTruncateOrZeroExtend(const SCEV *V, Type *Ty, unsigned Depth=0)
Return a SCEV corresponding to a conversion of the input value to the specified type.
LLVM_ABI bool isKnownViaInduction(CmpPredicate Pred, SCEVUse LHS, SCEVUse RHS)
We'd like to check the predicate on every iteration of the most dominated loop between loops used in ...
static SCEV::NoWrapFlags maskFlags(SCEV::NoWrapFlags Flags, int Mask)
Convenient NoWrapFlags manipulation that hides enum casts and is visible in the ScalarEvolution name ...
LLVM_ABI std::optional< APInt > computeConstantDifference(const SCEV *LHS, const SCEV *RHS)
Compute LHS - RHS and returns the result as an APInt if it is a constant, and std::nullopt if it isn'...
LLVM_ABI bool properlyDominates(const SCEV *S, const BasicBlock *BB)
Return true if elements that makes up the given SCEV properly dominate the specified basic block.
LLVM_ABI const SCEV * getUDivExactExpr(SCEVUse LHS, SCEVUse RHS)
Get a canonical unsigned division expression, or something simpler if possible.
LLVM_ABI const SCEV * rewriteUsingPredicate(const SCEV *S, const Loop *L, const SCEVPredicate &A)
Re-writes the SCEV according to the Predicates in A.
LLVM_ABI std::pair< const SCEV *, const SCEV * > SplitIntoInitAndPostInc(const Loop *L, const SCEV *S)
Splits SCEV expression S into two SCEVs.
LLVM_ABI bool canReuseInstruction(const SCEV *S, Instruction *I, SmallVectorImpl< Instruction * > &DropPoisonGeneratingInsts)
Check whether it is poison-safe to represent the expression S using the instruction I.
LLVM_ABI bool isKnownPredicateAt(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS, const Instruction *CtxI)
Test if the given expression is known to satisfy the condition described by Pred, LHS,...
LLVM_ABI const SCEV * getPredicatedSymbolicMaxBackedgeTakenCount(const Loop *L, SmallVectorImpl< const SCEVPredicate * > &Predicates)
Similar to getSymbolicMaxBackedgeTakenCount, except it will add a set of SCEV predicates to Predicate...
LLVM_ABI ~ScalarEvolution()
LLVM_ABI const SCEV * getGEPExpr(GEPOperator *GEP, ArrayRef< SCEVUse > IndexExprs)
Returns an expression for a GEP.
LLVM_ABI const SCEV * getUMinExpr(SCEVUse LHS, SCEVUse RHS, bool Sequential=false)
LLVM_ABI void registerUser(const SCEV *User, ArrayRef< const SCEV * > Ops)
Notify this ScalarEvolution that User directly uses SCEVs in Ops.
LLVM_ABI bool isBasicBlockEntryGuardedByCond(const BasicBlock *BB, CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
Test whether entry to the basic block is protected by a conditional between LHS and RHS.
LLVM_ABI const SCEV * getTruncateOrSignExtend(const SCEV *V, Type *Ty, unsigned Depth=0)
Return a SCEV corresponding to a conversion of the input value to the specified type.
LLVM_ABI bool containsErasedValue(const SCEV *S) const
Return true if the SCEV expression contains a Value that has been optimised out and is now a nullptr.
const SCEV * getSymbolicMaxBackedgeTakenCount(const Loop *L)
When successful, this returns a SCEV that is greater than or equal to (i.e.
APInt getSignedRangeMax(const SCEV *S)
Determine the max of the signed range for a particular SCEV.
LLVM_ABI void verify() const
LLVMContext & getContext() const
Implements a dense probed hash-table based set with some number of buckets stored inline.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void reserve(size_type N)
iterator erase(const_iterator CI)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
iterator insert(iterator I, T &&Elt)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
Used to lazily calculate structure layout information for a target machine, based on the DataLayout s...
TypeSize getElementOffset(unsigned Idx) const
TypeSize getSizeInBits() const
Class to represent struct types.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
The instances of the Type class are immutable: once they are created, they are never changed.
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
bool isPointerTy() const
True if this is an instance of PointerType.
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
static LLVM_ABI IntegerType * getInt1Ty(LLVMContext &C)
bool isIntOrPtrTy() const
Return true if this is an integer type or a pointer type.
bool isIntegerTy() const
True if this is an instance of IntegerType.
static LLVM_ABI IntegerType * getIntNTy(LLVMContext &C, unsigned N)
A Use represents the edge between a Value definition and its users.
Value * getOperand(unsigned i) const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
LLVMContext & getContext() const
All values hold a context through their type.
unsigned getValueID() const
Return an ID for the concrete type of this object.
LLVM_ABI void printAsOperand(raw_ostream &O, bool PrintType=true, const Module *M=nullptr) const
Print the name of this Value out to the specified raw_ostream.
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
This class implements an extremely fast bulk output stream that can only output to a stream.
raw_ostream & indent(unsigned NumSpaces)
indent - Insert 'NumSpaces' spaces.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
const APInt & smin(const APInt &A, const APInt &B)
Determine the smaller of two APInts considered to be signed.
const APInt & smax(const APInt &A, const APInt &B)
Determine the larger of two APInts considered to be signed.
const APInt & umin(const APInt &A, const APInt &B)
Determine the smaller of two APInts considered to be unsigned.
LLVM_ABI std::optional< APInt > SolveQuadraticEquationWrap(APInt A, APInt B, APInt C, unsigned RangeWidth)
Let q(n) = An^2 + Bn + C, and BW = bit width of the value range (e.g.
const APInt & umax(const APInt &A, const APInt &B)
Determine the larger of two APInts considered to be unsigned.
LLVM_ABI APInt GreatestCommonDivisor(APInt A, APInt B)
Compute GCD of two unsigned APInt values.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ C
The default llvm calling convention, compatible with C.
int getMinValue(MCInstrInfo const &MCII, MCInst const &MCI)
Return the minimum value of an extendable operand.
@ BasicBlock
Various leaf nodes.
LLVM_ABI Function * getDeclarationIfExists(const Module *M, ID id)
Look up the Function declaration of the intrinsic id in the Module M and return it if it exists.
Predicate
Predicate - These are "(BI << 5) | BO" for various predicates.
BinaryOp_match< LHS, RHS, Instruction::AShr > m_AShr(const LHS &L, const RHS &R)
ap_match< APInt > m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
bool match(Val *V, const Pattern &P)
class_match< ConstantInt > m_ConstantInt()
Match an arbitrary ConstantInt and ignore it.
IntrinsicID_match m_Intrinsic()
Match intrinsic calls like this: m_Intrinsic<Intrinsic::fabs>(m_Value(X))
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
ExtractValue_match< Ind, Val_t > m_ExtractValue(const Val_t &V)
Match a single index ExtractValue instruction.
bind_ty< WithOverflowInst > m_WithOverflowInst(WithOverflowInst *&I)
Match a with overflow intrinsic, capturing it if we match.
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
brc_match< Cond_t, bind_ty< BasicBlock >, bind_ty< BasicBlock > > m_Br(const Cond_t &C, BasicBlock *&T, BasicBlock *&F)
BinaryOp_match< LHS, RHS, Instruction::SDiv > m_SDiv(const LHS &L, const RHS &R)
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
BinaryOp_match< LHS, RHS, Instruction::LShr > m_LShr(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::Shl > m_Shl(const LHS &L, const RHS &R)
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
class_match< BasicBlock > m_BasicBlock()
Match an arbitrary basic block value and ignore it.
match_combine_or< LTy, RTy > m_CombineOr(const LTy &L, const RTy &R)
Combine two pattern matchers matching L || R.
class_match< const SCEVVScale > m_SCEVVScale()
bind_cst_ty m_scev_APInt(const APInt *&C)
Match an SCEV constant and bind it to an APInt.
cst_pred_ty< is_all_ones > m_scev_AllOnes()
Match an integer with all bits set.
SCEVUnaryExpr_match< SCEVZeroExtendExpr, Op0_t > m_scev_ZExt(const Op0_t &Op0)
is_undef_or_poison m_scev_UndefOrPoison()
Match an SCEVUnknown wrapping undef or poison.
class_match< const SCEVConstant > m_SCEVConstant()
cst_pred_ty< is_one > m_scev_One()
Match an integer 1.
specificloop_ty m_SpecificLoop(const Loop *L)
SCEVAffineAddRec_match< Op0_t, Op1_t, class_match< const Loop > > m_scev_AffineAddRec(const Op0_t &Op0, const Op1_t &Op1)
bind_ty< const SCEVMulExpr > m_scev_Mul(const SCEVMulExpr *&V)
SCEVUnaryExpr_match< SCEVSignExtendExpr, Op0_t > m_scev_SExt(const Op0_t &Op0)
cst_pred_ty< is_zero > m_scev_Zero()
Match an integer 0.
SCEVUnaryExpr_match< SCEVTruncateExpr, Op0_t > m_scev_Trunc(const Op0_t &Op0)
bool match(const SCEV *S, const Pattern &P)
SCEVBinaryExpr_match< SCEVUDivExpr, Op0_t, Op1_t > m_scev_UDiv(const Op0_t &Op0, const Op1_t &Op1)
specificscev_ty m_scev_Specific(const SCEV *S)
Match if we have a specific specified SCEV.
SCEVBinaryExpr_match< SCEVMulExpr, Op0_t, Op1_t, SCEV::FlagNUW, true > m_scev_c_NUWMul(const Op0_t &Op0, const Op1_t &Op1)
class_match< const Loop > m_Loop()
bind_ty< const SCEVAddExpr > m_scev_Add(const SCEVAddExpr *&V)
bind_ty< const SCEVUnknown > m_SCEVUnknown(const SCEVUnknown *&V)
SCEVBinaryExpr_match< SCEVMulExpr, Op0_t, Op1_t, SCEV::FlagAnyWrap, true > m_scev_c_Mul(const Op0_t &Op0, const Op1_t &Op1)
SCEVBinaryExpr_match< SCEVSMaxExpr, Op0_t, Op1_t > m_scev_SMax(const Op0_t &Op0, const Op1_t &Op1)
SCEVURem_match< Op0_t, Op1_t > m_scev_URem(Op0_t LHS, Op1_t RHS, ScalarEvolution &SE)
Match the mathematical pattern A - (A / B) * B, where A and B can be arbitrary expressions.
class_match< const SCEV > m_SCEV()
@ Valid
The data is already valid.
initializer< Ty > init(const Ty &Val)
LocationClass< Ty > location(Ty &L)
@ Switch
The "resume-switch" lowering, where there are separate resume and destroy functions that are shared b...
NodeAddr< PhiNode * > Phi
friend class Instruction
Iterator for Instructions in a `BasicBlock.
This is an optimization pass for GlobalISel generic memory operations.
void visitAll(const SCEV *Root, SV &Visitor)
Use SCEVTraversal to visit all nodes in the given expression tree.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
FunctionAddr VTableAddr Value
LLVM_ATTRIBUTE_ALWAYS_INLINE DynamicAPInt gcd(const DynamicAPInt &A, const DynamicAPInt &B)
void stable_sort(R &&Range)
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
SaveAndRestore(T &) -> SaveAndRestore< T >
Printable print(const GCNRegPressure &RP, const GCNSubtarget *ST=nullptr, unsigned DynamicVGPRBlockSize=0)
LLVM_ABI bool canCreatePoison(const Operator *Op, bool ConsiderFlagsAndMetadata=true)
LLVM_ABI bool mustTriggerUB(const Instruction *I, const SmallPtrSetImpl< const Value * > &KnownPoison)
Return true if the given instruction must trigger undefined behavior when I is executed with any oper...
LLVM_ABI bool canConstantFoldCallTo(const CallBase *Call, const Function *F)
canConstantFoldCallTo - Return true if its even possible to fold a call to the specified function.
InterleavedRange< Range > interleaved(const Range &R, StringRef Separator=", ", StringRef Prefix="", StringRef Suffix="")
Output range R as a sequence of interleaved elements.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
LLVM_ABI bool verifyFunction(const Function &F, raw_ostream *OS=nullptr)
Check a function for errors, useful for use when debugging a pass.
auto successors(const MachineBasicBlock *BB)
scope_exit(Callable) -> scope_exit< Callable >
constexpr from_range_t from_range
auto dyn_cast_if_present(const Y &Val)
dyn_cast_if_present<X> - Functionally identical to dyn_cast, except that a null (or none in the case ...
bool set_is_subset(const S1Ty &S1, const S2Ty &S2)
set_is_subset(A, B) - Return true iff A in B
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
constexpr bool isUIntN(unsigned N, uint64_t x)
Checks if an unsigned integer fits into the given (dynamic) bit width.
LLVM_ABI Constant * ConstantFoldCompareInstOperands(unsigned Predicate, Constant *LHS, Constant *RHS, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const Instruction *I=nullptr)
Attempt to constant fold a compare instruction (icmp/fcmp) with the specified operands.
auto uninitialized_copy(R &&Src, IterTy Dst)
bool isa_and_nonnull(const Y &Val)
LLVM_ABI ConstantRange getConstantRangeFromMetadata(const MDNode &RangeMD)
Parse out a conservative ConstantRange from !range metadata.
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
LLVM_ABI Value * simplifyInstruction(Instruction *I, const SimplifyQuery &Q)
See if we can compute a simplified version of this instruction.
LLVM_ABI bool isOverflowIntrinsicNoWrap(const WithOverflowInst *WO, const DominatorTree &DT)
Returns true if the arithmetic part of the WO 's result is used only along the paths control dependen...
DomTreeNodeBase< BasicBlock > DomTreeNode
LLVM_ABI bool matchSimpleRecurrence(const PHINode *P, BinaryOperator *&BO, Value *&Start, Value *&Step)
Attempt to match a simple first order recurrence cycle of the form: iv = phi Ty [Start,...
auto dyn_cast_or_null(const Y &Val)
void erase(Container &C, ValueType V)
Wrapper function to remove a value from a container:
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
iterator_range< pointee_iterator< WrappedIteratorT > > make_pointee_range(RangeT &&Range)
auto reverse(ContainerTy &&C)
LLVM_ABI bool isMustProgress(const Loop *L)
Return true if this loop can be assumed to make progress.
LLVM_ABI bool impliesPoison(const Value *ValAssumedPoison, const Value *V)
Return true if V is poison given that ValAssumedPoison is already poison.
LLVM_ABI bool isFinite(const Loop *L)
Return true if this loop can be assumed to run for a finite number of iterations.
LLVM_ABI void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
unsigned short computeExpressionSize(ArrayRef< SCEVUse > Args)
LLVM_ABI bool programUndefinedIfPoison(const Instruction *Inst)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
bool isPointerTy(const Type *T)
FunctionAddr VTableAddr Count
LLVM_ABI ConstantRange getVScaleRange(const Function *F, unsigned BitWidth)
Determine the possible constant range of vscale with the given bit width, based on the vscale_range f...
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
LLVM_ATTRIBUTE_VISIBILITY_DEFAULT AnalysisKey InnerAnalysisManagerProxy< AnalysisManagerT, IRUnitT, ExtraArgTs... >::Key
LLVM_ABI bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
LLVM_ABI bool propagatesPoison(const Use &PoisonOp)
Return true if PoisonOp's user yields poison or raises UB if its operand PoisonOp is poison.
@ UMin
Unsigned integer min implemented in terms of select(cmp()).
@ Mul
Product of integers.
@ SMax
Signed integer max implemented in terms of select(cmp()).
@ SMin
Signed integer min implemented in terms of select(cmp()).
@ UMax
Unsigned integer max implemented in terms of select(cmp()).
auto count(R &&Range, const E &Element)
Wrapper function around std::count to count the number of times an element Element occurs in the give...
DWARFExpression::Operation Op
auto max_element(R &&Range)
Provide wrappers to std::max_element which take ranges instead of having to pass begin/end explicitly...
raw_ostream & operator<<(raw_ostream &OS, const APFixedPoint &FX)
ArrayRef(const T &OneElt) -> ArrayRef< T >
LLVM_ABI unsigned ComputeNumSignBits(const Value *Op, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Return the number of times the sign bit of the register is replicated into the other bits.
constexpr unsigned BitWidth
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI bool isGuaranteedToTransferExecutionToSuccessor(const Instruction *I)
Return true if this function can prove that the instruction I will always transfer execution to one o...
auto count_if(R &&Range, UnaryPredicate P)
Wrapper function around std::count_if to count the number of times an element satisfying a given pred...
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
constexpr bool isIntN(unsigned N, int64_t x)
Checks if an signed integer fits into the given (dynamic) bit width.
auto predecessors(const MachineBasicBlock *BB)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
iterator_range< df_iterator< T > > depth_first(const T &G)
auto seq(T Begin, T End)
Iterate over an integral type from Begin up to - but not including - End.
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
LLVM_ABI bool isGuaranteedNotToBePoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Returns true if V cannot be poison, but may be undef.
LLVM_ABI Constant * ConstantFoldInstOperands(const Instruction *I, ArrayRef< Constant * > Ops, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, bool AllowNonDeterministic=true)
ConstantFoldInstOperands - Attempt to constant fold an instruction with the specified operands.
bool SCEVExprContains(const SCEV *Root, PredTy Pred)
Return true if any node in Root satisfies the predicate Pred.
Implement std::hash so that hash_code can be used in STL containers.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
A special type used by analysis passes to provide an address that identifies that particular analysis...
Incoming for lane mask phi as machine instruction, incoming register Reg and incoming block Block are...
static KnownBits makeConstant(const APInt &C)
Create known bits from a known constant.
bool isNonNegative() const
Returns true if this value is known to be non-negative.
static LLVM_ABI KnownBits ashr(const KnownBits &LHS, const KnownBits &RHS, bool ShAmtNonZero=false, bool Exact=false)
Compute known bits for ashr(LHS, RHS).
unsigned getBitWidth() const
Get the bit width of this value.
static LLVM_ABI KnownBits lshr(const KnownBits &LHS, const KnownBits &RHS, bool ShAmtNonZero=false, bool Exact=false)
Compute known bits for lshr(LHS, RHS).
KnownBits zextOrTrunc(unsigned BitWidth) const
Return known bits for a zero extension or truncation of the value we're tracking.
APInt getMaxValue() const
Return the maximal unsigned value possible given these KnownBits.
APInt getMinValue() const
Return the minimal unsigned value possible given these KnownBits.
bool isNegative() const
Returns true if this value is known to be negative.
static LLVM_ABI KnownBits shl(const KnownBits &LHS, const KnownBits &RHS, bool NUW=false, bool NSW=false, bool ShAmtNonZero=false)
Compute known bits for shl(LHS, RHS).
An object of this class is returned by queries that could not be answered.
LLVM_ABI SCEVCouldNotCompute()
static LLVM_ABI bool classof(const SCEV *S)
Methods for support type inquiry through isa, cast, and dyn_cast:
void dump() const
This method is used for debugging.
void print(raw_ostream &OS) const
Print out the internal representation of this scalar to the specified stream.
This class defines a simple visitor class that may be used for various SCEV analysis purposes.
A utility class that uses RAII to save and restore the value of a variable.
Information about the number of loop iterations for which a loop exit's branch condition evaluates to...
LLVM_ABI ExitLimit(const SCEV *E)
Construct either an exact exit limit from a constant, or an unknown one from a SCEVCouldNotCompute.
const SCEV * ExactNotTaken
const SCEV * SymbolicMaxNotTaken
SmallVector< const SCEVPredicate *, 4 > Predicates
A vector of predicate guards for this ExitLimit.
const SCEV * ConstantMaxNotTaken