83#include "llvm/Config/llvm-config.h"
138#define DEBUG_TYPE "scalar-evolution"
141 "Number of loop exits with predictable exit counts");
143 "Number of loop exits without predictable exit counts");
145 "Number of loops with trip counts computed by force");
147#ifdef EXPENSIVE_CHECKS
155 cl::desc(
"Maximum number of iterations SCEV will "
156 "symbolically execute a constant "
162 cl::desc(
"Verify ScalarEvolution's backedge taken counts (slow)"));
165 cl::desc(
"Enable stricter verification with -verify-scev is passed"));
169 cl::desc(
"Verify IR correctness when making sensitive SCEV queries (slow)"),
174 cl::desc(
"Threshold for inlining multiplication operands into a SCEV"),
179 cl::desc(
"Threshold for inlining addition operands into a SCEV"),
183 "scalar-evolution-max-scev-compare-depth",
cl::Hidden,
184 cl::desc(
"Maximum depth of recursive SCEV complexity comparisons"),
188 "scalar-evolution-max-scev-operations-implication-depth",
cl::Hidden,
189 cl::desc(
"Maximum depth of recursive SCEV operations implication analysis"),
193 "scalar-evolution-max-value-compare-depth",
cl::Hidden,
194 cl::desc(
"Maximum depth of recursive value complexity comparisons"),
199 cl::desc(
"Maximum depth of recursive arithmetics"),
203 "scalar-evolution-max-constant-evolving-depth",
cl::Hidden,
208 cl::desc(
"Maximum depth of recursive SExt/ZExt/Trunc"),
213 cl::desc(
"Max coefficients in AddRec during evolving"),
218 cl::desc(
"Size of the expression which is considered huge"),
223 cl::desc(
"Threshold for switching to iteratively computing SCEV ranges"),
227 "scalar-evolution-max-loop-guard-collection-depth",
cl::Hidden,
228 cl::desc(
"Maximum depth for recursive loop guard collection"),
cl::init(1));
233 cl::desc(
"When printing analysis, include information on every instruction"));
236 "scalar-evolution-use-expensive-range-sharpening",
cl::Hidden,
238 cl::desc(
"Use more powerful methods of sharpening expression ranges. May "
239 "be costly in terms of compile time"));
242 "scalar-evolution-max-scc-analysis-depth",
cl::Hidden,
243 cl::desc(
"Maximum amount of nodes to process while searching SCEVUnknown "
244 "Phi strongly connected components"),
249 cl::desc(
"Handle <= and >= in finite loops"),
253 "scalar-evolution-use-context-for-no-wrap-flag-strenghening",
cl::Hidden,
254 cl::desc(
"Infer nuw/nsw flags using context where suitable"),
265#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
285 OS <<
"(ptrto" << OpS <<
" " << *
Op->getType() <<
" " << *
Op <<
" to "
292 OS <<
"(trunc " << *
Op->getType() <<
" " << *
Op <<
" to "
299 OS <<
"(zext " << *
Op->getType() <<
" " << *
Op <<
" to "
306 OS <<
"(sext " << *
Op->getType() <<
" " << *
Op <<
" to "
335 const char *OpStr =
nullptr;
348 OpStr =
" umin_seq ";
372 OS <<
"(" << *UDiv->
getLHS() <<
" /u " << *UDiv->
getRHS() <<
")";
379 OS <<
"***COULDNOTCOMPUTE***";
457 if (!
Mul)
return false;
461 if (!SC)
return false;
479 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
return S;
481 UniqueSCEVs.InsertNode(S, IP);
495 ConstantInt::get(ITy, V, isSigned,
true));
503 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
506 UniqueSCEVs.InsertNode(S, IP);
526 "Must be a non-bit-width-changing pointer-to-integer cast!");
533 "Must be a non-bit-width-changing pointer-to-integer cast!");
545 "Cannot truncate non-integer value!");
552 "Cannot zero extend non-integer value!");
559 "Cannot sign extend non-integer value!");
564 SE->forgetMemoizedResults(
this);
567 SE->UniqueSCEVs.RemoveNode(
this);
573void SCEVUnknown::allUsesReplacedWith(
Value *New) {
575 SE->forgetMemoizedResults(
this);
578 SE->UniqueSCEVs.RemoveNode(
this);
600 if (LIsPointer != RIsPointer)
601 return (
int)LIsPointer - (int)RIsPointer;
606 return (
int)LID - (int)RID;
611 unsigned LArgNo = LA->getArgNo(), RArgNo =
RA->getArgNo();
612 return (
int)LArgNo - (int)RArgNo;
618 if (
auto L = LGV->getLinkage() - RGV->getLinkage())
621 const auto IsGVNameSemantic = [&](
const GlobalValue *GV) {
622 auto LT = GV->getLinkage();
629 if (IsGVNameSemantic(LGV) && IsGVNameSemantic(RGV))
630 return LGV->getName().compare(RGV->getName());
641 if (LParent != RParent) {
644 if (LDepth != RDepth)
645 return (
int)LDepth - (int)RDepth;
649 unsigned LNumOps = LInst->getNumOperands(),
650 RNumOps = RInst->getNumOperands();
651 if (LNumOps != RNumOps)
652 return (
int)LNumOps - (int)RNumOps;
654 for (
unsigned Idx :
seq(LNumOps)) {
656 RInst->getOperand(Idx),
Depth + 1);
670static std::optional<int>
680 return (
int)LType - (int)RType;
705 unsigned LBitWidth = LA.
getBitWidth(), RBitWidth =
RA.getBitWidth();
706 if (LBitWidth != RBitWidth)
707 return (
int)LBitWidth - (int)RBitWidth;
708 return LA.
ult(
RA) ? -1 : 1;
714 return LTy->getBitWidth() - RTy->getBitWidth();
725 if (LLoop != RLoop) {
727 assert(LHead != RHead &&
"Two loops share the same header?");
731 "No dominance between recurrences used by one SCEV?");
755 unsigned LNumOps = LOps.
size(), RNumOps = ROps.
size();
756 if (LNumOps != RNumOps)
757 return (
int)LNumOps - (int)RNumOps;
759 for (
unsigned i = 0; i != LNumOps; ++i) {
784 if (
Ops.size() < 2)
return;
789 return Complexity && *Complexity < 0;
791 if (
Ops.size() == 2) {
795 if (IsLessComplex(
RHS,
LHS))
802 return IsLessComplex(
LHS,
RHS);
809 for (
unsigned i = 0, e =
Ops.size(); i != e-2; ++i) {
815 for (
unsigned j = i+1; j != e &&
Ops[j]->getSCEVType() == Complexity; ++j) {
820 if (i == e-2)
return;
842template <
typename FoldT,
typename IsIdentityT,
typename IsAbsorberT>
846 IsIdentityT IsIdentity, IsAbsorberT IsAbsorber) {
848 for (
unsigned Idx = 0; Idx <
Ops.size();) {
856 Ops.erase(
Ops.begin() + Idx);
863 assert(Folded &&
"Must have folded value");
867 if (Folded && IsAbsorber(Folded->
getAPInt()))
871 if (Folded && !IsIdentity(Folded->
getAPInt()))
872 Ops.insert(
Ops.begin(), Folded);
874 return Ops.size() == 1 ?
Ops[0] :
nullptr;
949 APInt OddFactorial(W, 1);
951 for (
unsigned i = 3; i <= K; ++i) {
954 OddFactorial *= (i >> TwoFactors);
958 unsigned CalculationBits = W +
T;
972 for (
unsigned i = 1; i != K; ++i) {
1005 for (
unsigned i = 1, e =
Operands.size(); i != e; ++i) {
1033 ConversionFn CreatePtrCast;
1037 ConversionFn CreatePtrCast)
1038 : Base(
SE), TargetTy(TargetTy), CreatePtrCast(
std::
move(CreatePtrCast)) {}
1041 Type *TargetTy, ConversionFn CreatePtrCast) {
1043 return Rewriter.visit(Scev);
1079 "Should only reach pointer-typed SCEVUnknown's.");
1084 return SE.getZero(TargetTy);
1085 return CreatePtrCast(Expr);
1090 assert(
Op->getType()->isPointerTy() &&
"Op must be a pointer");
1109 Op, *
this, IntPtrTy, [
this, IntPtrTy](
const SCEVUnknown *U) {
1114 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
1116 SCEV *S =
new (SCEVAllocator)
1118 UniqueSCEVs.InsertNode(S, IP);
1120 return static_cast<const SCEV *
>(S);
1123 "We must have succeeded in sinking the cast, "
1124 "and ending up with an integer-typed expression!");
1129 assert(
Op->getType()->isPointerTy() &&
"Op must be a pointer");
1130 Type *Ty = DL.getAddressType(
Op->getType());
1139 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
1157 UniqueSCEVs.InsertNode(S, IP);
1163 assert(Ty->isIntegerTy() &&
"Target type must be an integer type!");
1175 "This is not a truncating conversion!");
1177 "This is not a conversion to a SCEVable type!");
1178 assert(!
Op->getType()->isPointerTy() &&
"Can't truncate pointer!");
1186 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
return S;
1208 UniqueSCEVs.InsertNode(S, IP);
1220 unsigned numTruncs = 0;
1221 for (
unsigned i = 0, e = CommOp->getNumOperands(); i != e && numTruncs < 2;
1229 if (numTruncs < 2) {
1239 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
1246 for (
const SCEV *
Op : AddRec->operands())
1261 UniqueSCEVs.InsertNode(S, IP);
1301struct ExtendOpTraitsBase {
1302 typedef const SCEV *(ScalarEvolution::*GetExtendExprTy)(
const SCEV *,
Type *,
1307template <
typename ExtendOp>
struct ExtendOpTraits {
1323 static const GetExtendExprTy GetExtendExpr;
1325 static const SCEV *getOverflowLimitForStep(
const SCEV *Step,
1326 ICmpInst::Predicate *Pred,
1327 ScalarEvolution *SE) {
1332const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits<
1339 static const GetExtendExprTy GetExtendExpr;
1341 static const SCEV *getOverflowLimitForStep(
const SCEV *Step,
1342 ICmpInst::Predicate *Pred,
1343 ScalarEvolution *SE) {
1348const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits<
1360template <
typename ExtendOpTy>
1363 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType;
1364 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr;
1380 for (
auto It = DiffOps.
begin(); It != DiffOps.
end(); ++It)
1393 auto PreStartFlags =
1411 const SCEV *OperandExtendedStart =
1413 (SE->*GetExtendExpr)(Step, WideTy,
Depth));
1414 if ((SE->*GetExtendExpr)(Start, WideTy,
Depth) == OperandExtendedStart) {
1426 const SCEV *OverflowLimit =
1427 ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep(Step, &Pred, SE);
1429 if (OverflowLimit &&
1437template <
typename ExtendOpTy>
1441 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr;
1449 (SE->*GetExtendExpr)(PreStart, Ty,
Depth));
1484template <
typename ExtendOpTy>
1485bool ScalarEvolution::proveNoWrapByVaryingStart(
const SCEV *Start,
1488 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType;
1498 APInt StartAI = StartC->
getAPInt();
1500 for (
unsigned Delta : {-2, -1, 1, 2}) {
1501 const SCEV *PreStart =
getConstant(StartAI - Delta);
1503 FoldingSetNodeID
ID;
1505 ID.AddPointer(PreStart);
1506 ID.AddPointer(Step);
1510 static_cast<SCEVAddRecExpr *
>(UniqueSCEVs.FindNodeOrInsertPos(
ID, IP));
1514 if (PreAR && PreAR->getNoWrapFlags(WrapType)) {
1517 const SCEV *Limit = ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep(
1518 DeltaS, &Pred,
this);
1536 const unsigned BitWidth =
C.getBitWidth();
1554 const APInt &ConstantStart,
1573 auto &UserIDs = FoldCacheUser[
I.first->second];
1574 assert(
count(UserIDs,
ID) == 1 &&
"unexpected duplicates in UserIDs");
1575 for (
unsigned I = 0;
I != UserIDs.size(); ++
I)
1576 if (UserIDs[
I] ==
ID) {
1581 I.first->second = S;
1583 FoldCacheUser[S].push_back(
ID);
1589 "This is not an extending conversion!");
1591 "This is not a conversion to a SCEVable type!");
1592 assert(!
Op->getType()->isPointerTy() &&
"Can't extend pointer!");
1596 if (
const SCEV *S = FoldCache.lookup(
ID))
1608 "This is not an extending conversion!");
1610 assert(!
Op->getType()->isPointerTy() &&
"Can't extend pointer!");
1627 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
return S;
1631 UniqueSCEVs.InsertNode(S, IP);
1640 const SCEV *
X = ST->getOperand();
1654 if (AR->isAffine()) {
1655 const SCEV *Start = AR->getStart();
1656 const SCEV *Step = AR->getStepRecurrence(*
this);
1658 const Loop *L = AR->getLoop();
1662 if (AR->hasNoUnsignedWrap()) {
1683 const SCEV *CastedMaxBECount =
1687 if (MaxBECount == RecastedMaxBECount) {
1697 const SCEV *WideMaxBECount =
1699 const SCEV *OperandExtendedAdd =
1705 if (ZAdd == OperandExtendedAdd) {
1716 OperandExtendedAdd =
1722 if (ZAdd == OperandExtendedAdd) {
1743 !AC.assumptions().empty()) {
1745 auto NewFlags = proveNoUnsignedWrapViaInduction(AR);
1747 if (AR->hasNoUnsignedWrap()) {
1782 const APInt &
C = SC->getAPInt();
1786 const SCEV *SResidual =
1795 if (proveNoWrapByVaryingStart<SCEVZeroExtendExpr>(Start, Step, L)) {
1820 if (SA->hasNoUnsignedWrap()) {
1824 for (
const auto *
Op : SA->operands())
1841 const SCEV *SResidual =
1853 if (SM->hasNoUnsignedWrap()) {
1857 for (
const auto *
Op : SM->operands())
1875 const SCEV *TruncRHS;
1894 for (
auto *Operand :
MinMax->operands())
1905 for (
auto *Operand :
MinMax->operands())
1912 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
return S;
1915 UniqueSCEVs.InsertNode(S, IP);
1923 "This is not an extending conversion!");
1925 "This is not a conversion to a SCEVable type!");
1926 assert(!
Op->getType()->isPointerTy() &&
"Can't extend pointer!");
1930 if (
const SCEV *S = FoldCache.lookup(
ID))
1942 "This is not an extending conversion!");
1944 assert(!
Op->getType()->isPointerTy() &&
"Can't extend pointer!");
1966 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
return S;
1971 UniqueSCEVs.InsertNode(S, IP);
1980 const SCEV *
X = ST->getOperand();
1991 if (SA->hasNoSignedWrap()) {
1995 for (
const auto *
Op : SA->operands())
2013 const SCEV *SResidual =
2027 if (AR->isAffine()) {
2028 const SCEV *Start = AR->getStart();
2029 const SCEV *Step = AR->getStepRecurrence(*
this);
2031 const Loop *L = AR->getLoop();
2035 if (AR->hasNoSignedWrap()) {
2057 const SCEV *CastedMaxBECount =
2061 if (MaxBECount == RecastedMaxBECount) {
2071 const SCEV *WideMaxBECount =
2073 const SCEV *OperandExtendedAdd =
2079 if (SAdd == OperandExtendedAdd) {
2090 OperandExtendedAdd =
2096 if (SAdd == OperandExtendedAdd) {
2116 auto NewFlags = proveNoSignedWrapViaInduction(AR);
2118 if (AR->hasNoSignedWrap()) {
2133 const APInt &
C = SC->getAPInt();
2137 const SCEV *SResidual =
2146 if (proveNoWrapByVaryingStart<SCEVSignExtendExpr>(Start, Step, L)) {
2165 for (
auto *Operand :
MinMax->operands())
2174 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
return S;
2177 UniqueSCEVs.InsertNode(S, IP);
2203 "This is not an extending conversion!");
2205 "This is not a conversion to a SCEVable type!");
2210 if (SC->getAPInt().isNegative())
2215 const SCEV *NewOp =
T->getOperand();
2234 for (
const SCEV *
Op : AR->operands())
2273 APInt &AccumulatedConstant,
2276 bool Interesting =
false;
2283 if (Scale != 1 || AccumulatedConstant != 0 ||
C->getValue()->isZero())
2285 AccumulatedConstant += Scale *
C->getAPInt();
2290 for (; i !=
Ops.size(); ++i) {
2300 Add->operands(), NewScale, SE);
2306 auto Pair = M.insert({
Key, NewScale});
2310 Pair.first->second += NewScale;
2318 std::pair<DenseMap<const SCEV *, APInt>::iterator,
bool> Pair =
2319 M.insert({
Ops[i], Scale});
2323 Pair.first->second += Scale;
2342 case Instruction::Add:
2345 case Instruction::Sub:
2348 case Instruction::Mul:
2362 const SCEV *
A = (this->*Extension)(
2364 const SCEV *LHSB = (this->*Extension)(LHS, WideTy, 0);
2365 const SCEV *RHSB = (this->*Extension)(RHS, WideTy, 0);
2373 if (BinOp == Instruction::Mul)
2379 APInt C = RHSC->getAPInt();
2380 unsigned NumBits =
C.getBitWidth();
2381 bool IsSub = (BinOp == Instruction::Sub);
2382 bool IsNegativeConst = (
Signed &&
C.isNegative());
2384 bool OverflowDown = IsSub ^ IsNegativeConst;
2386 if (IsNegativeConst) {
2399 APInt Limit = Min + Magnitude;
2405 APInt Limit = Max - Magnitude;
2410std::optional<SCEV::NoWrapFlags>
2415 return std::nullopt;
2424 bool Deduced =
false;
2426 if (OBO->
getOpcode() != Instruction::Add &&
2429 return std::nullopt;
2438 false, LHS, RHS, CtxI)) {
2445 true, LHS, RHS, CtxI)) {
2452 return std::nullopt;
2462 using namespace std::placeholders;
2469 assert(CanAnalyze &&
"don't call from other places!");
2476 auto IsKnownNonNegative = [&](
const SCEV *S) {
2486 if (SignOrUnsignWrap != SignOrUnsignMask &&
2493 return Instruction::Add;
2495 return Instruction::Mul;
2506 Opcode,
C, OBO::NoSignedWrap);
2514 Opcode,
C, OBO::NoUnsignedWrap);
2524 Ops[0]->isZero() && IsKnownNonNegative(
Ops[1]))
2531 if (UDiv->getOperand(1) ==
Ops[1])
2534 if (UDiv->getOperand(1) ==
Ops[0])
2550 "only nuw or nsw allowed");
2551 assert(!
Ops.empty() &&
"Cannot get empty add!");
2552 if (
Ops.size() == 1)
return Ops[0];
2555 for (
unsigned i = 1, e =
Ops.size(); i != e; ++i)
2557 "SCEVAddExpr operand types don't match!");
2559 Ops, [](
const SCEV *
Op) {
return Op->getType()->isPointerTy(); });
2560 assert(NumPtrs <= 1 &&
"add has at most one pointer operand");
2565 [](
const APInt &C1,
const APInt &C2) {
return C1 + C2; },
2566 [](
const APInt &
C) {
return C.isZero(); },
2567 [](
const APInt &
C) {
return false; });
2580 return getOrCreateAddExpr(
Ops, ComputeFlags(
Ops));
2585 if (
Add->getNoWrapFlags(OrigFlags) != OrigFlags)
2586 Add->setNoWrapFlags(ComputeFlags(
Ops));
2594 bool FoundMatch =
false;
2595 for (
unsigned i = 0, e =
Ops.size(); i != e-1; ++i)
2596 if (
Ops[i] ==
Ops[i+1]) {
2608 --i; e -=
Count - 1;
2618 auto FindTruncSrcType = [&]() ->
Type * {
2624 return T->getOperand()->getType();
2626 const auto *LastOp =
Mul->getOperand(
Mul->getNumOperands() - 1);
2628 return T->getOperand()->getType();
2632 if (
auto *SrcType = FindTruncSrcType()) {
2639 if (
T->getOperand()->getType() != SrcType) {
2648 for (
unsigned j = 0, f = M->getNumOperands(); j != f && Ok; ++j) {
2651 if (
T->getOperand()->getType() != SrcType) {
2679 if (
Ops.size() == 2) {
2689 auto C2 =
C->getAPInt();
2692 APInt ConstAdd = C1 + C2;
2693 auto AddFlags = AddExpr->getNoWrapFlags();
2734 if (
Ops.size() == 2 &&
2745 if (Idx <
Ops.size()) {
2746 bool DeletedAdd =
false;
2757 Ops.erase(
Ops.begin()+Idx);
2760 CommonFlags =
maskFlags(CommonFlags,
Add->getNoWrapFlags());
2783 struct APIntCompare {
2784 bool operator()(
const APInt &LHS,
const APInt &RHS)
const {
2785 return LHS.ult(RHS);
2792 std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare> MulOpLists;
2793 for (
const SCEV *NewOp : NewOps)
2794 MulOpLists[M.find(NewOp)->second].push_back(NewOp);
2797 if (AccumulatedConstant != 0)
2799 for (
auto &MulOp : MulOpLists) {
2800 if (MulOp.first == 1) {
2802 }
else if (MulOp.first != 0) {
2811 if (
Ops.size() == 1)
2822 for (
unsigned MulOp = 0, e =
Mul->getNumOperands(); MulOp != e; ++MulOp) {
2823 const SCEV *MulOpSCEV =
Mul->getOperand(MulOp);
2826 for (
unsigned AddOp = 0, e =
Ops.size(); AddOp != e; ++AddOp)
2827 if (MulOpSCEV ==
Ops[AddOp]) {
2829 const SCEV *InnerMul =
Mul->getOperand(MulOp == 0);
2830 if (
Mul->getNumOperands() != 2) {
2834 Mul->operands().take_front(MulOp));
2842 if (
Ops.size() == 2)
return OuterMul;
2844 Ops.erase(
Ops.begin()+AddOp);
2845 Ops.erase(
Ops.begin()+Idx-1);
2847 Ops.erase(
Ops.begin()+Idx);
2848 Ops.erase(
Ops.begin()+AddOp-1);
2850 Ops.push_back(OuterMul);
2855 for (
unsigned OtherMulIdx = Idx+1;
2862 OMulOp != e; ++OMulOp)
2863 if (OtherMul->
getOperand(OMulOp) == MulOpSCEV) {
2865 const SCEV *InnerMul1 =
Mul->getOperand(MulOp == 0);
2866 if (
Mul->getNumOperands() != 2) {
2868 Mul->operands().take_front(MulOp));
2875 OtherMul->
operands().take_front(OMulOp));
2880 const SCEV *InnerMulSum =
2884 if (
Ops.size() == 2)
return OuterMul;
2885 Ops.erase(
Ops.begin()+Idx);
2886 Ops.erase(
Ops.begin()+OtherMulIdx-1);
2887 Ops.push_back(OuterMul);
2907 for (
unsigned i = 0, e =
Ops.size(); i != e; ++i)
2910 Ops.erase(
Ops.begin()+i);
2915 if (!LIOps.
empty()) {
2940 auto *DefI = getDefiningScopeBound(LIOps);
2942 if (!isGuaranteedToTransferExecutionTo(DefI, ReachI))
2954 if (
Ops.size() == 1)
return NewRec;
2957 for (
unsigned i = 0;; ++i)
2958 if (
Ops[i] == AddRec) {
2968 for (
unsigned OtherIdx = Idx+1;
2976 "AddRecExprs are not sorted in reverse dominance order?");
2983 if (OtherAddRec->getLoop() == AddRecLoop) {
2984 for (
unsigned i = 0, e = OtherAddRec->getNumOperands();
2986 if (i >= AddRecOps.
size()) {
2987 append_range(AddRecOps, OtherAddRec->operands().drop_front(i));
2991 AddRecOps[i], OtherAddRec->getOperand(i)};
2994 Ops.erase(
Ops.begin() + OtherIdx); --OtherIdx;
3009 return getOrCreateAddExpr(
Ops, ComputeFlags(
Ops));
3021 static_cast<SCEVAddExpr *
>(UniqueSCEVs.FindNodeOrInsertPos(
ID, IP));
3023 const SCEV **O = SCEVAllocator.Allocate<
const SCEV *>(
Ops.size());
3025 S =
new (SCEVAllocator)
3027 UniqueSCEVs.InsertNode(S, IP);
3037 FoldingSetNodeID
ID;
3039 for (
const SCEV *
Op :
Ops)
3044 static_cast<SCEVAddRecExpr *
>(UniqueSCEVs.FindNodeOrInsertPos(
ID, IP));
3046 const SCEV **
O = SCEVAllocator.Allocate<
const SCEV *>(
Ops.size());
3048 S =
new (SCEVAllocator)
3049 SCEVAddRecExpr(
ID.Intern(SCEVAllocator), O,
Ops.size(), L);
3050 UniqueSCEVs.InsertNode(S, IP);
3051 LoopUsers[
L].push_back(S);
3061 FoldingSetNodeID
ID;
3063 for (
const SCEV *
Op :
Ops)
3067 static_cast<SCEVMulExpr *
>(UniqueSCEVs.FindNodeOrInsertPos(
ID, IP));
3069 const SCEV **
O = SCEVAllocator.Allocate<
const SCEV *>(
Ops.size());
3071 S =
new (SCEVAllocator) SCEVMulExpr(
ID.Intern(SCEVAllocator),
3073 UniqueSCEVs.InsertNode(S, IP);
3082 if (j > 1 && k / j != i) Overflow =
true;
3098 if (n == 0 || n == k)
return 1;
3099 if (k > n)
return 0;
3105 for (
uint64_t i = 1; i <= k; ++i) {
3106 r =
umul_ov(r, n-(i-1), Overflow);
3115 struct FindConstantInAddMulChain {
3116 bool FoundConstant =
false;
3118 bool follow(
const SCEV *S) {
3123 bool isDone()
const {
3124 return FoundConstant;
3128 FindConstantInAddMulChain
F;
3130 ST.visitAll(StartExpr);
3131 return F.FoundConstant;
3139 "only nuw or nsw allowed");
3140 assert(!
Ops.empty() &&
"Cannot get empty mul!");
3141 if (
Ops.size() == 1)
return Ops[0];
3143 Type *ETy =
Ops[0]->getType();
3145 for (
unsigned i = 1, e =
Ops.size(); i != e; ++i)
3147 "SCEVMulExpr operand types don't match!");
3152 [](
const APInt &C1,
const APInt &C2) {
return C1 * C2; },
3153 [](
const APInt &
C) {
return C.isOne(); },
3154 [](
const APInt &
C) {
return C.isZero(); });
3165 return getOrCreateMulExpr(
Ops, ComputeFlags(
Ops));
3170 if (
Mul->getNoWrapFlags(OrigFlags) != OrigFlags)
3171 Mul->setNoWrapFlags(ComputeFlags(
Ops));
3176 if (
Ops.size() == 2) {
3184 const SCEV *Op0, *Op1;
3192 if (
Ops[0]->isAllOnesValue()) {
3197 bool AnyFolded =
false;
3198 for (
const SCEV *AddOp :
Add->operands()) {
3225 AddRec->getNoWrapFlags(FlagsMask));
3248 APInt C1V = LHSC->getAPInt();
3258 const SCEV *NewMul =
nullptr;
3262 assert(C1V.
ugt(1) &&
"C1 <= 1 should have been folded earlier");
3277 if (Idx <
Ops.size()) {
3278 bool DeletedMul =
false;
3284 Ops.erase(
Ops.begin()+Idx);
3308 for (
unsigned i = 0, e =
Ops.size(); i != e; ++i)
3311 Ops.erase(
Ops.begin()+i);
3316 if (!LIOps.
empty()) {
3329 for (
unsigned i = 0, e = AddRec->
getNumOperands(); i != e; ++i) {
3345 if (
Ops.size() == 1)
return NewRec;
3348 for (
unsigned i = 0;; ++i)
3349 if (
Ops[i] == AddRec) {
3370 bool OpsModified =
false;
3371 for (
unsigned OtherIdx = Idx+1;
3385 bool Overflow =
false;
3392 for (
int y = x, ye = 2*x+1; y != ye && !Overflow; ++y) {
3396 z < ze && !Overflow; ++z) {
3399 if (LargerThan64Bits)
3400 Coeff =
umul_ov(Coeff1, Coeff2, Overflow);
3402 Coeff = Coeff1*Coeff2;
3417 if (
Ops.size() == 2)
return NewAddRec;
3418 Ops[Idx] = NewAddRec;
3419 Ops.erase(
Ops.begin() + OtherIdx); --OtherIdx;
3435 return getOrCreateMulExpr(
Ops, ComputeFlags(
Ops));
3443 "SCEVURemExpr operand types don't match!");
3448 if (RHSC->getValue()->isOne())
3449 return getZero(LHS->getType());
3452 if (RHSC->getAPInt().isPowerOf2()) {
3453 Type *FullTy = LHS->getType();
3470 assert(!LHS->getType()->isPointerTy() &&
3471 "SCEVUDivExpr operand can't be pointer!");
3472 assert(LHS->getType() == RHS->getType() &&
3473 "SCEVUDivExpr operand types don't match!");
3480 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
3488 if (RHSC->getValue()->isOne())
3493 if (!RHSC->getValue()->isZero()) {
3497 Type *Ty = LHS->getType();
3498 unsigned LZ = RHSC->getAPInt().countl_zero();
3502 if (!RHSC->getAPInt().isPowerOf2())
3510 const APInt &StepInt = Step->getAPInt();
3511 const APInt &DivInt = RHSC->getAPInt();
3512 if (!StepInt.
urem(DivInt) &&
3518 for (
const SCEV *
Op : AR->operands())
3524 const APInt *StartRem;
3537 bool CanFoldWithWrap = StepInt.
ule(DivInt) &&
3541 const SCEV *NewStart =
3543 if (*StartRem != 0 && (NoWrap || CanFoldWithWrap) &&
3545 const SCEV *NewLHS =
3548 if (LHS != NewLHS) {
3558 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
3567 for (
const SCEV *
Op : M->operands())
3571 for (
unsigned i = 0, e = M->getNumOperands(); i != e; ++i) {
3572 const SCEV *
Op = M->getOperand(i);
3584 if (
auto *DivisorConstant =
3586 bool Overflow =
false;
3588 DivisorConstant->getAPInt().
umul_ov(RHSC->getAPInt(), Overflow);
3599 for (
const SCEV *
Op :
A->operands())
3603 for (
unsigned i = 0, e =
A->getNumOperands(); i != e; ++i) {
3610 if (Operands.
size() ==
A->getNumOperands())
3617 return getConstant(LHSC->getAPInt().udiv(RHSC->getAPInt()));
3627 return getZero(LHS->getType());
3631 const SCEV *NewLHS, *NewRHS;
3639 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
return S;
3642 UniqueSCEVs.InsertNode(S, IP);
3672 if (!
Mul || !
Mul->hasNoUnsignedWrap())
3679 if (LHSCst == RHSCst) {
3687 APInt Factor =
gcd(LHSCst, RHSCst);
3705 for (
int i = 0, e =
Mul->getNumOperands(); i != e; ++i) {
3706 if (
Mul->getOperand(i) == RHS) {
3725 if (StepChrec->getLoop() == L) {
3739 if (Operands.
size() == 1)
return Operands[0];
3744 "SCEVAddRecExpr operand types don't match!");
3745 assert(!
Op->getType()->isPointerTy() &&
"Step must be integer");
3747 for (
const SCEV *
Op : Operands)
3749 "SCEVAddRecExpr operand is not available at loop entry!");
3752 if (Operands.
back()->isZero()) {
3767 const Loop *NestedLoop = NestedAR->getLoop();
3768 if (L->contains(NestedLoop)
3771 DT.dominates(L->getHeader(), NestedLoop->
getHeader()))) {
3773 Operands[0] = NestedAR->getStart();
3777 bool AllInvariant =
all_of(
3789 AllInvariant =
all_of(NestedOperands, [&](
const SCEV *
Op) {
3800 return getAddRecExpr(NestedOperands, NestedLoop, InnerFlags);
3804 Operands[0] = NestedAR;
3810 return getOrCreateAddRecExpr(Operands, L, Flags);
3826 if (!GEPI || !isSCEVExprNeverPoison(GEPI))
3830 return getGEPExpr(BaseExpr, IndexExprs,
GEP->getSourceElementType(), NW);
3844 bool FirstIter =
true;
3846 for (
const SCEV *IndexExpr : IndexExprs) {
3853 Offsets.push_back(FieldOffset);
3856 CurTy = STy->getTypeAtIndex(Index);
3861 "The first index of a GEP indexes a pointer");
3862 CurTy = SrcElementTy;
3873 const SCEV *LocalOffset =
getMulExpr(IndexExpr, ElementSize, OffsetWrap);
3874 Offsets.push_back(LocalOffset);
3879 if (Offsets.empty())
3892 "GEP should not change type mid-flight.");
3896SCEV *ScalarEvolution::findExistingSCEVInCache(
SCEVTypes SCEVType,
3899 ID.AddInteger(SCEVType);
3903 return UniqueSCEVs.FindNodeOrInsertPos(
ID, IP);
3913 assert(SCEVMinMaxExpr::isMinMaxType(Kind) &&
"Not a SCEVMinMaxExpr!");
3914 assert(!
Ops.empty() &&
"Cannot get empty (u|s)(min|max)!");
3915 if (
Ops.size() == 1)
return Ops[0];
3918 for (
unsigned i = 1, e =
Ops.size(); i != e; ++i) {
3920 "Operand types don't match!");
3923 "min/max should be consistently pointerish");
3949 return IsSigned ?
C.isMinSignedValue() :
C.isMinValue();
3951 return IsSigned ?
C.isMaxSignedValue() :
C.isMaxValue();
3956 return IsSigned ?
C.isMaxSignedValue() :
C.isMaxValue();
3958 return IsSigned ?
C.isMinSignedValue() :
C.isMinValue();
3964 if (
const SCEV *S = findExistingSCEVInCache(Kind,
Ops)) {
3970 while (Idx <
Ops.size() &&
Ops[Idx]->getSCEVType() < Kind)
3975 if (Idx <
Ops.size()) {
3976 bool DeletedAny =
false;
3977 while (
Ops[Idx]->getSCEVType() == Kind) {
3979 Ops.erase(
Ops.begin()+Idx);
3997 for (
unsigned i = 0, e =
Ops.size() - 1; i != e; ++i) {
3998 if (
Ops[i] ==
Ops[i + 1] ||
3999 isKnownViaNonRecursiveReasoning(FirstPred,
Ops[i],
Ops[i + 1])) {
4002 Ops.erase(
Ops.begin() + i + 1,
Ops.begin() + i + 2);
4005 }
else if (isKnownViaNonRecursiveReasoning(SecondPred,
Ops[i],
4008 Ops.erase(
Ops.begin() + i,
Ops.begin() + i + 1);
4014 if (
Ops.size() == 1)
return Ops[0];
4016 assert(!
Ops.empty() &&
"Reduced smax down to nothing!");
4021 ID.AddInteger(Kind);
4025 const SCEV *ExistingSCEV = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP);
4027 return ExistingSCEV;
4028 const SCEV **O = SCEVAllocator.Allocate<
const SCEV *>(
Ops.size());
4030 SCEV *S =
new (SCEVAllocator)
4033 UniqueSCEVs.InsertNode(S, IP);
4040class SCEVSequentialMinMaxDeduplicatingVisitor final
4041 :
public SCEVVisitor<SCEVSequentialMinMaxDeduplicatingVisitor,
4042 std::optional<const SCEV *>> {
4043 using RetVal = std::optional<const SCEV *>;
4051 bool canRecurseInto(
SCEVTypes Kind)
const {
4054 return RootKind == Kind || NonSequentialRootKind == Kind;
4057 RetVal visitAnyMinMaxExpr(
const SCEV *S) {
4059 "Only for min/max expressions.");
4062 if (!canRecurseInto(Kind))
4072 return std::nullopt;
4079 RetVal
visit(
const SCEV *S) {
4081 if (!SeenOps.
insert(S).second)
4082 return std::nullopt;
4083 return Base::visit(S);
4087 SCEVSequentialMinMaxDeduplicatingVisitor(ScalarEvolution &SE,
4089 : SE(SE), RootKind(RootKind),
4090 NonSequentialRootKind(
4091 SCEVSequentialMinMaxExpr::getEquivalentNonSequentialSCEVType(
4095 SmallVectorImpl<const SCEV *> &NewOps) {
4100 for (
const SCEV *
Op : OrigOps) {
4105 Ops.emplace_back(*NewOp);
4109 NewOps = std::move(
Ops);
4113 RetVal visitConstant(
const SCEVConstant *Constant) {
return Constant; }
4115 RetVal visitVScale(
const SCEVVScale *VScale) {
return VScale; }
4117 RetVal visitPtrToAddrExpr(
const SCEVPtrToAddrExpr *Expr) {
return Expr; }
4119 RetVal visitPtrToIntExpr(
const SCEVPtrToIntExpr *Expr) {
return Expr; }
4121 RetVal visitTruncateExpr(
const SCEVTruncateExpr *Expr) {
return Expr; }
4123 RetVal visitZeroExtendExpr(
const SCEVZeroExtendExpr *Expr) {
return Expr; }
4125 RetVal visitSignExtendExpr(
const SCEVSignExtendExpr *Expr) {
return Expr; }
4127 RetVal visitAddExpr(
const SCEVAddExpr *Expr) {
return Expr; }
4129 RetVal visitMulExpr(
const SCEVMulExpr *Expr) {
return Expr; }
4131 RetVal visitUDivExpr(
const SCEVUDivExpr *Expr) {
return Expr; }
4133 RetVal visitAddRecExpr(
const SCEVAddRecExpr *Expr) {
return Expr; }
4135 RetVal visitSMaxExpr(
const SCEVSMaxExpr *Expr) {
4136 return visitAnyMinMaxExpr(Expr);
4139 RetVal visitUMaxExpr(
const SCEVUMaxExpr *Expr) {
4140 return visitAnyMinMaxExpr(Expr);
4143 RetVal visitSMinExpr(
const SCEVSMinExpr *Expr) {
4144 return visitAnyMinMaxExpr(Expr);
4147 RetVal visitUMinExpr(
const SCEVUMinExpr *Expr) {
4148 return visitAnyMinMaxExpr(Expr);
4151 RetVal visitSequentialUMinExpr(
const SCEVSequentialUMinExpr *Expr) {
4152 return visitAnyMinMaxExpr(Expr);
4155 RetVal visitUnknown(
const SCEVUnknown *Expr) {
return Expr; }
4157 RetVal visitCouldNotCompute(
const SCEVCouldNotCompute *Expr) {
return Expr; }
4200struct SCEVPoisonCollector {
4201 bool LookThroughMaybePoisonBlocking;
4202 SmallPtrSet<const SCEVUnknown *, 4> MaybePoison;
4203 SCEVPoisonCollector(
bool LookThroughMaybePoisonBlocking)
4204 : LookThroughMaybePoisonBlocking(LookThroughMaybePoisonBlocking) {}
4206 bool follow(
const SCEV *S) {
4207 if (!LookThroughMaybePoisonBlocking &&
4217 bool isDone()
const {
return false; }
4227 SCEVPoisonCollector PC1(
true);
4232 if (PC1.MaybePoison.empty())
4238 SCEVPoisonCollector PC2(
false);
4248 SCEVPoisonCollector PC(
false);
4271 while (!Worklist.
empty()) {
4273 if (!Visited.
insert(V).second)
4277 if (Visited.
size() > 16)
4282 if (PoisonVals.
contains(V) || ::isGuaranteedNotToBePoison(V))
4293 if (PDI->isDisjoint())
4300 II &&
II->getIntrinsicID() == Intrinsic::vscale)
4307 if (
I->hasPoisonGeneratingAnnotations())
4318 assert(SCEVSequentialMinMaxExpr::isSequentialMinMaxType(Kind) &&
4319 "Not a SCEVSequentialMinMaxExpr!");
4320 assert(!
Ops.empty() &&
"Cannot get empty (u|s)(min|max)!");
4321 if (
Ops.size() == 1)
4325 for (
unsigned i = 1, e =
Ops.size(); i != e; ++i) {
4327 "Operand types don't match!");
4330 "min/max should be consistently pointerish");
4338 if (
const SCEV *S = findExistingSCEVInCache(Kind,
Ops))
4345 SCEVSequentialMinMaxDeduplicatingVisitor Deduplicator(*
this, Kind);
4355 bool DeletedAny =
false;
4356 while (Idx <
Ops.size()) {
4357 if (
Ops[Idx]->getSCEVType() != Kind) {
4362 Ops.erase(
Ops.begin() + Idx);
4363 Ops.insert(
Ops.begin() + Idx, SMME->operands().begin(),
4364 SMME->operands().end());
4372 const SCEV *SaturationPoint;
4383 for (
unsigned i = 1, e =
Ops.size(); i != e; ++i) {
4384 if (!isGuaranteedNotToCauseUB(
Ops[i]))
4396 Ops.erase(
Ops.begin() + i);
4401 if (isKnownViaNonRecursiveReasoning(Pred,
Ops[i - 1],
Ops[i])) {
4402 Ops.erase(
Ops.begin() + i);
4410 ID.AddInteger(Kind);
4414 const SCEV *ExistingSCEV = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP);
4416 return ExistingSCEV;
4418 const SCEV **O = SCEVAllocator.Allocate<
const SCEV *>(
Ops.size());
4420 SCEV *S =
new (SCEVAllocator)
4423 UniqueSCEVs.InsertNode(S, IP);
4471 if (
Size.isScalable())
4492 "Cannot get offset for structure containing scalable vector types");
4506 if (
SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP)) {
4508 "Stale SCEVUnknown in uniquing map!");
4514 UniqueSCEVs.InsertNode(S, IP);
4528 return Ty->isIntOrPtrTy();
4535 if (Ty->isPointerTy())
4546 if (Ty->isIntegerTy())
4550 assert(Ty->isPointerTy() &&
"Unexpected non-pointer non-integer type!");
4562 bool PreciseA, PreciseB;
4563 auto *ScopeA = getDefiningScopeBound({
A}, PreciseA);
4564 auto *ScopeB = getDefiningScopeBound({
B}, PreciseB);
4565 if (!PreciseA || !PreciseB)
4568 return (ScopeA == ScopeB) || DT.dominates(ScopeA, ScopeB) ||
4569 DT.dominates(ScopeB, ScopeA);
4573 return CouldNotCompute.get();
4576bool ScalarEvolution::checkValidity(
const SCEV *S)
const {
4579 return SU && SU->getValue() ==
nullptr;
4582 return !ContainsNulls;
4587 if (
I != HasRecMap.end())
4592 HasRecMap.insert({S, FoundAddRec});
4600 if (
SI == ExprValueMap.
end())
4602 return SI->second.getArrayRef();
4608void ScalarEvolution::eraseValueFromMap(
Value *V) {
4610 if (
I != ValueExprMap.end()) {
4611 auto EVIt = ExprValueMap.find(
I->second);
4612 bool Removed = EVIt->second.remove(V);
4614 assert(Removed &&
"Value not in ExprValueMap?");
4615 ValueExprMap.erase(
I);
4619void ScalarEvolution::insertValueToMap(
Value *V,
const SCEV *S) {
4623 auto It = ValueExprMap.find_as(V);
4624 if (It == ValueExprMap.end()) {
4626 ExprValueMap[S].insert(V);
4637 return createSCEVIter(V);
4644 if (
I != ValueExprMap.end()) {
4645 const SCEV *S =
I->second;
4646 assert(checkValidity(S) &&
4647 "existing SCEV has not been properly invalidated");
4660 Type *Ty = V->getType();
4676 assert(!V->getType()->isPointerTy() &&
"Can't negate pointer");
4689 return (
const SCEV *)
nullptr;
4695 if (
const SCEV *Replaced = MatchMinMaxNegation(MME))
4699 Type *Ty = V->getType();
4705 assert(
P->getType()->isPointerTy());
4718 const SCEV **PtrOp =
nullptr;
4719 for (
const SCEV *&AddOp :
Ops) {
4720 if (AddOp->getType()->isPointerTy()) {
4721 assert(!PtrOp &&
"Cannot have multiple pointer ops");
4739 return getZero(LHS->getType());
4744 if (RHS->getType()->isPointerTy()) {
4745 if (!LHS->getType()->isPointerTy() ||
4755 const bool RHSIsNotMinSigned =
4786 Type *SrcTy = V->getType();
4787 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
4788 "Cannot truncate or zero extend with non-integer arguments!");
4798 Type *SrcTy = V->getType();
4799 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
4800 "Cannot truncate or zero extend with non-integer arguments!");
4810 Type *SrcTy = V->getType();
4811 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
4812 "Cannot noop or zero extend with non-integer arguments!");
4814 "getNoopOrZeroExtend cannot truncate!");
4822 Type *SrcTy = V->getType();
4823 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
4824 "Cannot noop or sign extend with non-integer arguments!");
4826 "getNoopOrSignExtend cannot truncate!");
4834 Type *SrcTy = V->getType();
4835 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
4836 "Cannot noop or any extend with non-integer arguments!");
4838 "getNoopOrAnyExtend cannot truncate!");
4846 Type *SrcTy = V->getType();
4847 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
4848 "Cannot truncate or noop with non-integer arguments!");
4850 "getTruncateOrNoop cannot extend!");
4858 const SCEV *PromotedLHS = LHS;
4859 const SCEV *PromotedRHS = RHS;
4879 assert(!
Ops.empty() &&
"At least one operand must be!");
4881 if (
Ops.size() == 1)
4885 Type *MaxType =
nullptr;
4886 for (
const auto *S :
Ops)
4891 assert(MaxType &&
"Failed to find maximum type!");
4895 for (
const auto *S :
Ops)
4904 if (!V->getType()->isPointerTy())
4909 V = AddRec->getStart();
4911 const SCEV *PtrOp =
nullptr;
4912 for (
const SCEV *AddOp :
Add->operands()) {
4913 if (AddOp->getType()->isPointerTy()) {
4914 assert(!PtrOp &&
"Cannot have multiple pointer ops");
4918 assert(PtrOp &&
"Must have pointer op");
4930 for (
User *U :
I->users()) {
4932 if (Visited.
insert(UserInsn).second)
4946 static const SCEV *rewrite(
const SCEV *S,
const Loop *L, ScalarEvolution &SE,
4947 bool IgnoreOtherLoops =
true) {
4950 if (
Rewriter.hasSeenLoopVariantSCEVUnknown())
4952 return Rewriter.hasSeenOtherLoops() && !IgnoreOtherLoops
4957 const SCEV *visitUnknown(
const SCEVUnknown *Expr) {
4959 SeenLoopVariantSCEVUnknown =
true;
4963 const SCEV *visitAddRecExpr(
const SCEVAddRecExpr *Expr) {
4967 SeenOtherLoops =
true;
4971 bool hasSeenLoopVariantSCEVUnknown() {
return SeenLoopVariantSCEVUnknown; }
4973 bool hasSeenOtherLoops() {
return SeenOtherLoops; }
4976 explicit SCEVInitRewriter(
const Loop *L, ScalarEvolution &SE)
4977 : SCEVRewriteVisitor(SE),
L(
L) {}
4980 bool SeenLoopVariantSCEVUnknown =
false;
4981 bool SeenOtherLoops =
false;
4990 static const SCEV *rewrite(
const SCEV *S,
const Loop *L, ScalarEvolution &SE) {
4991 SCEVPostIncRewriter
Rewriter(L, SE);
4993 return Rewriter.hasSeenLoopVariantSCEVUnknown()
4998 const SCEV *visitUnknown(
const SCEVUnknown *Expr) {
5000 SeenLoopVariantSCEVUnknown =
true;
5004 const SCEV *visitAddRecExpr(
const SCEVAddRecExpr *Expr) {
5008 SeenOtherLoops =
true;
5012 bool hasSeenLoopVariantSCEVUnknown() {
return SeenLoopVariantSCEVUnknown; }
5014 bool hasSeenOtherLoops() {
return SeenOtherLoops; }
5017 explicit SCEVPostIncRewriter(
const Loop *L, ScalarEvolution &SE)
5018 : SCEVRewriteVisitor(SE),
L(
L) {}
5021 bool SeenLoopVariantSCEVUnknown =
false;
5022 bool SeenOtherLoops =
false;
5028class SCEVBackedgeConditionFolder
5031 static const SCEV *rewrite(
const SCEV *S,
const Loop *L,
5032 ScalarEvolution &SE) {
5033 bool IsPosBECond =
false;
5034 Value *BECond =
nullptr;
5035 if (BasicBlock *Latch =
L->getLoopLatch()) {
5039 "Both outgoing branches should not target same header!");
5046 SCEVBackedgeConditionFolder
Rewriter(L, BECond, IsPosBECond, SE);
5050 const SCEV *visitUnknown(
const SCEVUnknown *Expr) {
5051 const SCEV *
Result = Expr;
5056 switch (
I->getOpcode()) {
5057 case Instruction::Select: {
5059 std::optional<const SCEV *> Res =
5060 compareWithBackedgeCondition(
SI->getCondition());
5068 std::optional<const SCEV *> Res = compareWithBackedgeCondition(
I);
5079 explicit SCEVBackedgeConditionFolder(
const Loop *L,
Value *BECond,
5080 bool IsPosBECond, ScalarEvolution &SE)
5081 : SCEVRewriteVisitor(SE),
L(
L), BackedgeCond(BECond),
5082 IsPositiveBECond(IsPosBECond) {}
5084 std::optional<const SCEV *> compareWithBackedgeCondition(
Value *IC);
5088 Value *BackedgeCond =
nullptr;
5090 bool IsPositiveBECond;
5093std::optional<const SCEV *>
5094SCEVBackedgeConditionFolder::compareWithBackedgeCondition(
Value *IC) {
5099 if (BackedgeCond == IC)
5102 return std::nullopt;
5107 static const SCEV *rewrite(
const SCEV *S,
const Loop *L,
5108 ScalarEvolution &SE) {
5114 const SCEV *visitUnknown(
const SCEVUnknown *Expr) {
5121 const SCEV *visitAddRecExpr(
const SCEVAddRecExpr *Expr) {
5131 explicit SCEVShiftRewriter(
const Loop *L, ScalarEvolution &SE)
5132 : SCEVRewriteVisitor(SE),
L(
L) {}
5141ScalarEvolution::proveNoWrapViaConstantRanges(
const SCEVAddRecExpr *AR) {
5145 using OBO = OverflowingBinaryOperator;
5153 const APInt &BECountAP = BECountMax->getAPInt();
5154 unsigned NoOverflowBitWidth =
5166 Instruction::Add, IncRange, OBO::NoSignedWrap);
5167 if (NSWRegion.contains(AddRecRange))
5176 Instruction::Add, IncRange, OBO::NoUnsignedWrap);
5177 if (NUWRegion.contains(AddRecRange))
5185ScalarEvolution::proveNoSignedWrapViaInduction(
const SCEVAddRecExpr *AR) {
5195 if (!SignedWrapViaInductionTried.insert(AR).second)
5220 AC.assumptions().empty())
5228 const SCEV *OverflowLimit =
5230 if (OverflowLimit &&
5238ScalarEvolution::proveNoUnsignedWrapViaInduction(
const SCEVAddRecExpr *AR) {
5248 if (!UnsignedWrapViaInductionTried.insert(AR).second)
5274 AC.assumptions().empty())
5309 explicit BinaryOp(Operator *
Op)
5313 IsNSW = OBO->hasNoSignedWrap();
5314 IsNUW = OBO->hasNoUnsignedWrap();
5318 explicit BinaryOp(
unsigned Opcode,
Value *
LHS,
Value *
RHS,
bool IsNSW =
false,
5320 : Opcode(Opcode),
LHS(
LHS),
RHS(
RHS), IsNSW(IsNSW), IsNUW(IsNUW) {}
5332 return std::nullopt;
5338 switch (
Op->getOpcode()) {
5339 case Instruction::Add:
5340 case Instruction::Sub:
5341 case Instruction::Mul:
5342 case Instruction::UDiv:
5343 case Instruction::URem:
5344 case Instruction::And:
5345 case Instruction::AShr:
5346 case Instruction::Shl:
5347 return BinaryOp(
Op);
5349 case Instruction::Or: {
5352 return BinaryOp(Instruction::Add,
Op->getOperand(0),
Op->getOperand(1),
5354 return BinaryOp(
Op);
5357 case Instruction::Xor:
5361 if (RHSC->getValue().isSignMask())
5362 return BinaryOp(Instruction::Add,
Op->getOperand(0),
Op->getOperand(1));
5364 if (V->getType()->isIntegerTy(1))
5365 return BinaryOp(Instruction::Add,
Op->getOperand(0),
Op->getOperand(1));
5366 return BinaryOp(
Op);
5368 case Instruction::LShr:
5377 if (SA->getValue().ult(
BitWidth)) {
5379 ConstantInt::get(SA->getContext(),
5381 return BinaryOp(Instruction::UDiv,
Op->getOperand(0),
X);
5384 return BinaryOp(
Op);
5386 case Instruction::ExtractValue: {
5388 if (EVI->getNumIndices() != 1 || EVI->getIndices()[0] != 0)
5396 bool Signed = WO->isSigned();
5399 return BinaryOp(BinOp, WO->getLHS(), WO->getRHS());
5404 return BinaryOp(BinOp, WO->getLHS(), WO->getRHS(),
5415 if (
II->getIntrinsicID() == Intrinsic::loop_decrement_reg)
5416 return BinaryOp(Instruction::Sub,
II->getOperand(0),
II->getOperand(1));
5418 return std::nullopt;
5444 if (
Op == SymbolicPHI)
5449 if (SourceBits != NewBits)
5467 if (!L || L->getHeader() != PN->
getParent())
5525std::optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>>
5526ScalarEvolution::createAddRecFromPHIWithCastsImpl(
const SCEVUnknown *SymbolicPHI) {
5534 assert(L &&
"Expecting an integer loop header phi");
5539 Value *BEValueV =
nullptr, *StartValueV =
nullptr;
5540 for (
unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
5541 Value *
V = PN->getIncomingValue(i);
5542 if (
L->contains(PN->getIncomingBlock(i))) {
5545 }
else if (BEValueV != V) {
5549 }
else if (!StartValueV) {
5551 }
else if (StartValueV != V) {
5552 StartValueV =
nullptr;
5556 if (!BEValueV || !StartValueV)
5557 return std::nullopt;
5559 const SCEV *BEValue =
getSCEV(BEValueV);
5566 return std::nullopt;
5570 unsigned FoundIndex =
Add->getNumOperands();
5571 Type *TruncTy =
nullptr;
5573 for (
unsigned i = 0, e =
Add->getNumOperands(); i != e; ++i)
5576 if (FoundIndex == e) {
5581 if (FoundIndex ==
Add->getNumOperands())
5582 return std::nullopt;
5586 for (
unsigned i = 0, e =
Add->getNumOperands(); i != e; ++i)
5587 if (i != FoundIndex)
5588 Ops.push_back(
Add->getOperand(i));
5594 return std::nullopt;
5647 const SCEV *StartVal =
getSCEV(StartValueV);
5648 const SCEV *PHISCEV =
5675 auto getExtendedExpr = [&](
const SCEV *Expr,
5676 bool CreateSignExtend) ->
const SCEV * {
5679 const SCEV *ExtendedExpr =
5682 return ExtendedExpr;
5690 auto PredIsKnownFalse = [&](
const SCEV *Expr,
5691 const SCEV *ExtendedExpr) ->
bool {
5692 return Expr != ExtendedExpr &&
5696 const SCEV *StartExtended = getExtendedExpr(StartVal,
Signed);
5697 if (PredIsKnownFalse(StartVal, StartExtended)) {
5699 return std::nullopt;
5704 const SCEV *AccumExtended = getExtendedExpr(Accum,
true);
5705 if (PredIsKnownFalse(Accum, AccumExtended)) {
5707 return std::nullopt;
5710 auto AppendPredicate = [&](
const SCEV *Expr,
5711 const SCEV *ExtendedExpr) ->
void {
5712 if (Expr != ExtendedExpr &&
5720 AppendPredicate(StartVal, StartExtended);
5721 AppendPredicate(Accum, AccumExtended);
5729 std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>> PredRewrite =
5730 std::make_pair(NewAR, Predicates);
5732 PredicatedSCEVRewrites[{SymbolicPHI,
L}] = PredRewrite;
5736std::optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>>
5741 return std::nullopt;
5744 auto I = PredicatedSCEVRewrites.find({SymbolicPHI, L});
5745 if (
I != PredicatedSCEVRewrites.end()) {
5746 std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>> Rewrite =
5749 if (Rewrite.first == SymbolicPHI)
5750 return std::nullopt;
5754 assert(!(Rewrite.second).empty() &&
"Expected to find Predicates");
5758 std::optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>>
5759 Rewrite = createAddRecFromPHIWithCastsImpl(SymbolicPHI);
5764 PredicatedSCEVRewrites[{SymbolicPHI, L}] = {SymbolicPHI, Predicates};
5765 return std::nullopt;
5782 auto areExprsEqual = [&](
const SCEV *Expr1,
const SCEV *Expr2) ->
bool {
5783 if (Expr1 != Expr2 &&
5784 !Preds->implies(SE.getEqualPredicate(Expr1, Expr2), SE) &&
5785 !Preds->implies(SE.getEqualPredicate(Expr2, Expr1), SE))
5802const SCEV *ScalarEvolution::createSimpleAffineAddRec(
PHINode *PN,
5804 Value *StartValueV) {
5807 assert(BEValueV && StartValueV);
5813 if (BO->Opcode != Instruction::Add)
5816 const SCEV *Accum =
nullptr;
5817 if (BO->LHS == PN && L->isLoopInvariant(BO->RHS))
5819 else if (BO->RHS == PN && L->isLoopInvariant(BO->LHS))
5833 insertValueToMap(PN, PHISCEV);
5838 proveNoWrapViaConstantRanges(AR)));
5846 "Accum is defined outside L, but is not invariant?");
5847 if (isAddRecNeverPoison(BEInst, L))
5854const SCEV *ScalarEvolution::createAddRecFromPHI(
PHINode *PN) {
5855 const Loop *
L = LI.getLoopFor(PN->
getParent());
5862 Value *BEValueV =
nullptr, *StartValueV =
nullptr;
5868 }
else if (BEValueV != V) {
5872 }
else if (!StartValueV) {
5874 }
else if (StartValueV != V) {
5875 StartValueV =
nullptr;
5879 if (!BEValueV || !StartValueV)
5882 assert(ValueExprMap.find_as(PN) == ValueExprMap.end() &&
5883 "PHI node already processed?");
5887 if (
auto *S = createSimpleAffineAddRec(PN, BEValueV, StartValueV))
5892 insertValueToMap(PN, SymbolicName);
5896 const SCEV *BEValue =
getSCEV(BEValueV);
5906 unsigned FoundIndex =
Add->getNumOperands();
5907 for (
unsigned i = 0, e =
Add->getNumOperands(); i != e; ++i)
5908 if (
Add->getOperand(i) == SymbolicName)
5909 if (FoundIndex == e) {
5914 if (FoundIndex !=
Add->getNumOperands()) {
5917 for (
unsigned i = 0, e =
Add->getNumOperands(); i != e; ++i)
5918 if (i != FoundIndex)
5919 Ops.push_back(SCEVBackedgeConditionFolder::rewrite(
Add->getOperand(i),
5931 if (BO->Opcode == Instruction::Add && BO->LHS == PN) {
5938 if (
GEP->getOperand(0) == PN) {
5939 GEPNoWrapFlags NW =
GEP->getNoWrapFlags();
5957 const SCEV *StartVal =
getSCEV(StartValueV);
5958 const SCEV *PHISCEV =
getAddRecExpr(StartVal, Accum, L, Flags);
5963 forgetMemoizedResults(SymbolicName);
5964 insertValueToMap(PN, PHISCEV);
5969 proveNoWrapViaConstantRanges(AR)));
5994 const SCEV *Shifted = SCEVShiftRewriter::rewrite(BEValue, L, *
this);
5995 const SCEV *
Start = SCEVInitRewriter::rewrite(Shifted, L, *
this,
false);
5997 isGuaranteedNotToCauseUB(Shifted) &&
::impliesPoison(Shifted, Start)) {
5998 const SCEV *StartVal =
getSCEV(StartValueV);
5999 if (Start == StartVal) {
6003 forgetMemoizedResults(SymbolicName);
6004 insertValueToMap(PN, Shifted);
6014 eraseValueFromMap(PN);
6034 Use &LeftUse =
Merge->getOperandUse(0);
6035 Use &RightUse =
Merge->getOperandUse(1);
6052const SCEV *ScalarEvolution::createNodeFromSelectLikePHI(
PHINode *PN) {
6054 [&](
BasicBlock *BB) {
return DT.isReachableFromEntry(BB); };
6069 assert(IDom &&
"At least the entry block should dominate PN");
6078 return createNodeForSelectOrPHI(PN,
Cond,
LHS,
RHS);
6094ScalarEvolution::createNodeForPHIWithIdenticalOperands(
PHINode *PN) {
6095 BinaryOperator *CommonInst =
nullptr;
6106 CommonInst = IncomingInst;
6113 const SCEV *CommonSCEV =
getSCEV(CommonInst);
6114 bool SCEVExprsIdentical =
6116 [
this, CommonSCEV](
Value *V) { return CommonSCEV == getSCEV(V); });
6117 return SCEVExprsIdentical ? CommonSCEV :
nullptr;
6120const SCEV *ScalarEvolution::createNodeForPHI(
PHINode *PN) {
6121 if (
const SCEV *S = createAddRecFromPHI(PN))
6131 if (
const SCEV *S = createNodeForPHIWithIdenticalOperands(PN))
6134 if (
const SCEV *S = createNodeFromSelectLikePHI(PN))
6143 struct FindClosure {
6144 const SCEV *OperandToFind;
6150 bool canRecurseInto(
SCEVTypes Kind)
const {
6153 return RootKind == Kind || NonSequentialRootKind == Kind ||
6158 : OperandToFind(OperandToFind), RootKind(RootKind),
6159 NonSequentialRootKind(
6163 bool follow(
const SCEV *S) {
6164 Found = S == OperandToFind;
6166 return !isDone() && canRecurseInto(S->
getSCEVType());
6169 bool isDone()
const {
return Found; }
6172 FindClosure FC(OperandToFind, RootKind);
6177std::optional<const SCEV *>
6178ScalarEvolution::createNodeForSelectOrPHIInstWithICmpInstCond(
Type *Ty,
6188 switch (ICI->getPredicate()) {
6202 bool Signed = ICI->isSigned();
6203 const SCEV *LA =
getSCEV(TrueVal);
6211 if (LA == LS &&
RA == RS)
6213 if (LA == RS &&
RA == LS)
6216 auto CoerceOperand = [&](
const SCEV *
Op) ->
const SCEV * {
6217 if (
Op->getType()->isPointerTy()) {
6228 LS = CoerceOperand(LS);
6229 RS = CoerceOperand(RS);
6253 const SCEV *TrueValExpr =
getSCEV(TrueVal);
6254 const SCEV *FalseValExpr =
getSCEV(FalseVal);
6268 X = ZExt->getOperand();
6270 const SCEV *FalseValExpr =
getSCEV(FalseVal);
6281 return std::nullopt;
6284static std::optional<const SCEV *>
6286 const SCEV *TrueExpr,
const SCEV *FalseExpr) {
6290 "Unexpected operands of a select.");
6302 return std::nullopt;
6317static std::optional<const SCEV *>
6321 return std::nullopt;
6324 const auto *SETrue = SE->
getSCEV(TrueVal);
6325 const auto *SEFalse = SE->
getSCEV(FalseVal);
6329const SCEV *ScalarEvolution::createNodeForSelectOrPHIViaUMinSeq(
6331 assert(
Cond->getType()->isIntegerTy(1) &&
"Select condition is not an i1?");
6333 V->getType() ==
TrueVal->getType() &&
6334 "Types of select hands and of the result must match.");
6337 if (!
V->getType()->isIntegerTy(1))
6340 if (std::optional<const SCEV *> S =
6353 return getSCEV(CI->isOne() ? TrueVal : FalseVal);
6357 if (std::optional<const SCEV *> S =
6358 createNodeForSelectOrPHIInstWithICmpInstCond(
I->getType(), ICI,
6364 return createNodeForSelectOrPHIViaUMinSeq(V,
Cond, TrueVal, FalseVal);
6370 assert(
GEP->getSourceElementType()->isSized() &&
6371 "GEP source element type must be sized");
6374 for (
Value *Index :
GEP->indices())
6379APInt ScalarEvolution::getConstantMultipleImpl(
const SCEV *S,
6382 auto GetShiftedByZeros = [
BitWidth](uint32_t TrailingZeros) {
6385 : APInt::getOneBitSet(
BitWidth, TrailingZeros);
6387 auto GetGCDMultiple = [
this, CtxI](
const SCEVNAryExpr *
N) {
6390 for (
unsigned I = 1,
E =
N->getNumOperands();
I <
E && Res != 1; ++
I)
6409 return GetShiftedByZeros(TZ);
6419 return GetShiftedByZeros(TZ);
6423 if (
M->hasNoUnsignedWrap()) {
6426 for (
const SCEV *Operand :
M->operands().drop_front())
6434 for (
const SCEV *Operand :
M->operands())
6436 return GetShiftedByZeros(TZ);
6441 if (
N->hasNoUnsignedWrap())
6442 return GetGCDMultiple(
N);
6445 for (
const SCEV *Operand :
N->operands().drop_front())
6447 return GetShiftedByZeros(TZ);
6464 CtxI = &*F.getEntryBlock().begin();
6470 .countMinTrailingZeros();
6471 return GetShiftedByZeros(Known);
6484 return getConstantMultipleImpl(S, CtxI);
6486 auto I = ConstantMultipleCache.find(S);
6487 if (
I != ConstantMultipleCache.end())
6490 APInt Result = getConstantMultipleImpl(S, CtxI);
6491 auto InsertPair = ConstantMultipleCache.insert({S, Result});
6492 assert(InsertPair.second &&
"Should insert a new key");
6493 return InsertPair.first->second;
6510 if (
MDNode *MD =
I->getMetadata(LLVMContext::MD_range))
6513 if (std::optional<ConstantRange>
Range = CB->getRange())
6517 if (std::optional<ConstantRange>
Range =
A->getRange())
6520 return std::nullopt;
6527 UnsignedRanges.erase(AddRec);
6528 SignedRanges.erase(AddRec);
6529 ConstantMultipleCache.erase(AddRec);
6534getRangeForUnknownRecurrence(
const SCEVUnknown *U) {
6560 Value *Start, *Step;
6567 assert(L && L->getHeader() ==
P->getParent());
6580 case Instruction::AShr:
6581 case Instruction::LShr:
6582 case Instruction::Shl:
6597 KnownStep.getBitWidth() ==
BitWidth);
6600 auto MaxShiftAmt = KnownStep.getMaxValue();
6602 bool Overflow =
false;
6603 auto TotalShift = MaxShiftAmt.umul_ov(TCAP, Overflow);
6610 case Instruction::AShr: {
6618 if (KnownStart.isNonNegative())
6621 KnownStart.getMaxValue() + 1);
6622 if (KnownStart.isNegative())
6625 KnownEnd.getMaxValue() + 1);
6628 case Instruction::LShr: {
6637 KnownStart.getMaxValue() + 1);
6639 case Instruction::Shl: {
6643 if (TotalShift.ult(KnownStart.countMinLeadingZeros()))
6644 return ConstantRange(KnownStart.getMinValue(),
6645 KnownEnd.getMaxValue() + 1);
6653ScalarEvolution::getRangeRefIter(
const SCEV *S,
6654 ScalarEvolution::RangeSignHint SignHint) {
6655 DenseMap<const SCEV *, ConstantRange> &Cache =
6656 SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED ? UnsignedRanges
6659 SmallPtrSet<const SCEV *, 8> Seen;
6663 auto AddToWorklist = [&WorkList, &Seen, &Cache](
const SCEV *Expr) {
6664 if (!Seen.
insert(Expr).second)
6698 for (
unsigned I = 0;
I != WorkList.
size(); ++
I) {
6699 const SCEV *
P = WorkList[
I];
6703 for (
const SCEV *
Op :
P->operands())
6709 if (!PendingPhiRangesIter.insert(
P).second)
6716 if (!WorkList.
empty()) {
6721 getRangeRef(
P, SignHint);
6725 PendingPhiRangesIter.erase(
P);
6729 return getRangeRef(S, SignHint, 0);
6736 const SCEV *S, ScalarEvolution::RangeSignHint SignHint,
unsigned Depth) {
6737 DenseMap<const SCEV *, ConstantRange> &Cache =
6738 SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED ? UnsignedRanges
6746 if (
I != Cache.
end())
6750 return setRange(
C, SignHint, ConstantRange(
C->getAPInt()));
6755 return getRangeRefIter(S, SignHint);
6758 ConstantRange ConservativeResult(
BitWidth,
true);
6759 using OBO = OverflowingBinaryOperator;
6763 if (SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED) {
6767 ConservativeResult =
6774 ConservativeResult = ConstantRange(
6790 ConservativeResult.intersectWith(
X.truncate(
BitWidth), RangeType));
6797 ConservativeResult.intersectWith(
X.zeroExtend(
BitWidth), RangeType));
6804 ConservativeResult.intersectWith(
X.signExtend(
BitWidth), RangeType));
6810 return setRange(Cast, SignHint,
X);
6815 const SCEV *URemLHS =
nullptr, *URemRHS =
nullptr;
6816 if (SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED &&
6818 ConstantRange LHSRange = getRangeRef(URemLHS, SignHint,
Depth + 1);
6819 ConstantRange RHSRange = getRangeRef(URemRHS, SignHint,
Depth + 1);
6820 ConservativeResult =
6821 ConservativeResult.intersectWith(LHSRange.
urem(RHSRange), RangeType);
6823 ConstantRange
X = getRangeRef(
Add->getOperand(0), SignHint,
Depth + 1);
6824 unsigned WrapType = OBO::AnyWrap;
6825 if (
Add->hasNoSignedWrap())
6826 WrapType |= OBO::NoSignedWrap;
6827 if (
Add->hasNoUnsignedWrap())
6828 WrapType |= OBO::NoUnsignedWrap;
6830 X =
X.addWithNoWrap(getRangeRef(
Op, SignHint,
Depth + 1), WrapType,
6832 return setRange(
Add, SignHint,
6833 ConservativeResult.intersectWith(
X, RangeType));
6837 ConstantRange
X = getRangeRef(
Mul->getOperand(0), SignHint,
Depth + 1);
6839 X =
X.multiply(getRangeRef(
Op, SignHint,
Depth + 1));
6840 return setRange(
Mul, SignHint,
6841 ConservativeResult.intersectWith(
X, RangeType));
6845 ConstantRange
X = getRangeRef(UDiv->
getLHS(), SignHint,
Depth + 1);
6846 ConstantRange
Y = getRangeRef(UDiv->
getRHS(), SignHint,
Depth + 1);
6847 return setRange(UDiv, SignHint,
6848 ConservativeResult.intersectWith(
X.udiv(
Y), RangeType));
6856 if (!UnsignedMinValue.
isZero())
6857 ConservativeResult = ConservativeResult.intersectWith(
6858 ConstantRange(UnsignedMinValue, APInt(
BitWidth, 0)), RangeType);
6867 bool AllNonNeg =
true;
6868 bool AllNonPos =
true;
6869 for (
unsigned i = 1, e = AddRec->
getNumOperands(); i != e; ++i) {
6876 ConservativeResult = ConservativeResult.intersectWith(
6881 ConservativeResult = ConservativeResult.intersectWith(
6890 const SCEV *MaxBEScev =
6904 auto RangeFromAffine = getRangeForAffineAR(
6906 ConservativeResult =
6907 ConservativeResult.intersectWith(RangeFromAffine, RangeType);
6909 auto RangeFromFactoring = getRangeViaFactoring(
6911 ConservativeResult =
6912 ConservativeResult.intersectWith(RangeFromFactoring, RangeType);
6918 const SCEV *SymbolicMaxBECount =
6923 auto RangeFromAffineNew = getRangeForAffineNoSelfWrappingAR(
6924 AddRec, SymbolicMaxBECount,
BitWidth, SignHint);
6925 ConservativeResult =
6926 ConservativeResult.intersectWith(RangeFromAffineNew, RangeType);
6931 return setRange(AddRec, SignHint, std::move(ConservativeResult));
6941 ID = Intrinsic::umax;
6944 ID = Intrinsic::smax;
6948 ID = Intrinsic::umin;
6951 ID = Intrinsic::smin;
6958 ConstantRange
X = getRangeRef(NAry->getOperand(0), SignHint,
Depth + 1);
6959 for (
unsigned i = 1, e = NAry->getNumOperands(); i != e; ++i)
6961 ID, {
X, getRangeRef(NAry->getOperand(i), SignHint,
Depth + 1)});
6962 return setRange(S, SignHint,
6963 ConservativeResult.intersectWith(
X, RangeType));
6972 ConservativeResult =
6973 ConservativeResult.intersectWith(*MDRange, RangeType);
6978 auto CR = getRangeForUnknownRecurrence(U);
6979 ConservativeResult = ConservativeResult.intersectWith(CR);
6990 if (
U->getType()->isPointerTy()) {
6993 unsigned ptrSize = DL.getPointerTypeSizeInBits(
U->getType());
6994 int ptrIdxDiff = ptrSize -
BitWidth;
6995 if (ptrIdxDiff > 0 && ptrSize >
BitWidth && NS > (
unsigned)ptrIdxDiff)
7008 ConservativeResult = ConservativeResult.intersectWith(
7012 ConservativeResult = ConservativeResult.intersectWith(
7017 if (
U->getType()->isPointerTy() && SignHint == HINT_RANGE_UNSIGNED) {
7020 bool CanBeNull, CanBeFreed;
7021 uint64_t DerefBytes =
7022 V->getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed);
7032 uint64_t
Align =
U->getValue()->getPointerAlignment(DL).value();
7033 uint64_t Rem = MaxVal.
urem(Align);
7038 ConservativeResult = ConservativeResult.intersectWith(
7046 if (PendingPhiRanges.insert(Phi).second) {
7047 ConstantRange RangeFromOps(
BitWidth,
false);
7049 for (
const auto &
Op :
Phi->operands()) {
7051 RangeFromOps = RangeFromOps.unionWith(OpRange);
7053 if (RangeFromOps.isFullSet())
7056 ConservativeResult =
7057 ConservativeResult.intersectWith(RangeFromOps, RangeType);
7058 bool Erased = PendingPhiRanges.erase(Phi);
7059 assert(Erased &&
"Failed to erase Phi properly?");
7066 if (
II->getIntrinsicID() == Intrinsic::vscale) {
7068 ConservativeResult = ConservativeResult.difference(Disallowed);
7071 return setRange(U, SignHint, std::move(ConservativeResult));
7077 return setRange(S, SignHint, std::move(ConservativeResult));
7086 const APInt &MaxBECount,
7093 if (Step == 0 || MaxBECount == 0)
7100 return ConstantRange::getFull(
BitWidth);
7116 return ConstantRange::getFull(
BitWidth);
7128 APInt MovedBoundary = Descending ? (StartLower - std::move(
Offset))
7129 : (StartUpper + std::move(
Offset));
7134 if (StartRange.
contains(MovedBoundary))
7135 return ConstantRange::getFull(
BitWidth);
7138 Descending ? std::move(MovedBoundary) : std::move(StartLower);
7140 Descending ? std::move(StartUpper) : std::move(MovedBoundary);
7149 const APInt &MaxBECount) {
7153 "mismatched bit widths");
7162 StepSRange.
getSignedMin(), StartSRange, MaxBECount,
true);
7164 StartSRange, MaxBECount,
7176ConstantRange ScalarEvolution::getRangeForAffineNoSelfWrappingAR(
7178 ScalarEvolution::RangeSignHint SignHint) {
7179 assert(AddRec->
isAffine() &&
"Non-affine AddRecs are not suppored!\n");
7181 "This only works for non-self-wrapping AddRecs!");
7182 const bool IsSigned = SignHint == HINT_RANGE_SIGNED;
7186 return ConstantRange::getFull(
BitWidth);
7194 return ConstantRange::getFull(
BitWidth);
7198 const SCEV *MaxItersWithoutWrap =
getUDivExpr(RangeWidth, StepAbs);
7200 MaxItersWithoutWrap))
7201 return ConstantRange::getFull(
BitWidth);
7222 ConstantRange StartRange = getRangeRef(Start, SignHint);
7223 ConstantRange EndRange = getRangeRef(End, SignHint);
7224 ConstantRange RangeBetween = StartRange.
unionWith(EndRange);
7228 return RangeBetween;
7233 return ConstantRange::getFull(
BitWidth);
7236 isKnownPredicateViaConstantRanges(LEPred, Start, End))
7237 return RangeBetween;
7239 isKnownPredicateViaConstantRanges(GEPred, Start, End))
7240 return RangeBetween;
7241 return ConstantRange::getFull(
BitWidth);
7246 const APInt &MaxBECount) {
7253 "mismatched bit widths");
7255 struct SelectPattern {
7256 Value *Condition =
nullptr;
7260 explicit SelectPattern(ScalarEvolution &SE,
unsigned BitWidth,
7262 std::optional<unsigned> CastOp;
7276 CastOp = SCast->getSCEVType();
7277 S = SCast->getOperand();
7280 using namespace llvm::PatternMatch;
7287 Condition =
nullptr;
7319 bool isRecognized() {
return Condition !=
nullptr; }
7322 SelectPattern StartPattern(*
this,
BitWidth, Start);
7323 if (!StartPattern.isRecognized())
7324 return ConstantRange::getFull(
BitWidth);
7326 SelectPattern StepPattern(*
this,
BitWidth, Step);
7327 if (!StepPattern.isRecognized())
7328 return ConstantRange::getFull(
BitWidth);
7330 if (StartPattern.Condition != StepPattern.Condition) {
7334 return ConstantRange::getFull(
BitWidth);
7345 const SCEV *TrueStart = this->
getConstant(StartPattern.TrueValue);
7346 const SCEV *TrueStep = this->
getConstant(StepPattern.TrueValue);
7347 const SCEV *FalseStart = this->
getConstant(StartPattern.FalseValue);
7348 const SCEV *FalseStep = this->
getConstant(StepPattern.FalseValue);
7350 ConstantRange TrueRange =
7351 this->getRangeForAffineAR(TrueStart, TrueStep, MaxBECount);
7352 ConstantRange FalseRange =
7353 this->getRangeForAffineAR(FalseStart, FalseStep, MaxBECount);
7375ScalarEvolution::getNonTrivialDefiningScopeBound(
const SCEV *S) {
7389 SmallPtrSet<const SCEV *, 16> Visited;
7391 auto pushOp = [&](
const SCEV *S) {
7392 if (!Visited.
insert(S).second)
7395 if (Visited.
size() > 30) {
7402 for (
const auto *S :
Ops)
7406 while (!Worklist.
empty()) {
7408 if (
auto *DefI = getNonTrivialDefiningScopeBound(S)) {
7409 if (!Bound || DT.dominates(Bound, DefI))
7416 return Bound ? Bound : &*F.getEntryBlock().begin();
7422 return getDefiningScopeBound(
Ops, Discard);
7425bool ScalarEvolution::isGuaranteedToTransferExecutionTo(
const Instruction *
A,
7427 if (
A->getParent() ==
B->getParent() &&
7432 auto *BLoop = LI.getLoopFor(
B->getParent());
7433 if (BLoop && BLoop->getHeader() ==
B->getParent() &&
7434 BLoop->getLoopPreheader() ==
A->getParent() &&
7436 A->getParent()->end()) &&
7443bool ScalarEvolution::isGuaranteedNotToBePoison(
const SCEV *
Op) {
7444 SCEVPoisonCollector PC(
true);
7446 return PC.MaybePoison.empty();
7449bool ScalarEvolution::isGuaranteedNotToCauseUB(
const SCEV *
Op) {
7455 return M && (!
isKnownNonZero(Op1) || !isGuaranteedNotToBePoison(Op1));
7459bool ScalarEvolution::isSCEVExprNeverPoison(
const Instruction *
I) {
7476 for (
const Use &
Op :
I->operands()) {
7482 auto *DefI = getDefiningScopeBound(SCEVOps);
7483 return isGuaranteedToTransferExecutionTo(DefI,
I);
7486bool ScalarEvolution::isAddRecNeverPoison(
const Instruction *
I,
const Loop *L) {
7488 if (isSCEVExprNeverPoison(
I))
7499 auto *ExitingBB =
L->getExitingBlock();
7503 SmallPtrSet<const Value *, 16> KnownPoison;
7512 while (!Worklist.
empty()) {
7515 for (
const Use &U :
Poison->uses()) {
7518 DT.dominates(PoisonUser->
getParent(), ExitingBB))
7522 if (KnownPoison.
insert(PoisonUser).second)
7530ScalarEvolution::LoopProperties
7531ScalarEvolution::getLoopProperties(
const Loop *L) {
7532 using LoopProperties = ScalarEvolution::LoopProperties;
7534 auto Itr = LoopPropertiesCache.find(L);
7535 if (Itr == LoopPropertiesCache.end()) {
7538 return !
SI->isSimple();
7548 return I->mayWriteToMemory();
7551 LoopProperties LP = {
true,
7554 for (
auto *BB :
L->getBlocks())
7555 for (
auto &
I : *BB) {
7557 LP.HasNoAbnormalExits =
false;
7558 if (HasSideEffects(&
I))
7559 LP.HasNoSideEffects =
false;
7560 if (!LP.HasNoAbnormalExits && !LP.HasNoSideEffects)
7564 auto InsertPair = LoopPropertiesCache.insert({
L, LP});
7565 assert(InsertPair.second &&
"We just checked!");
7566 Itr = InsertPair.first;
7579const SCEV *ScalarEvolution::createSCEVIter(
Value *V) {
7585 Stack.emplace_back(V,
true);
7586 Stack.emplace_back(V,
false);
7587 while (!Stack.empty()) {
7588 auto E = Stack.pop_back_val();
7589 Value *CurV = E.getPointer();
7595 const SCEV *CreatedSCEV =
nullptr;
7598 CreatedSCEV = createSCEV(CurV);
7603 CreatedSCEV = getOperandsToCreate(CurV,
Ops);
7607 insertValueToMap(CurV, CreatedSCEV);
7611 Stack.emplace_back(CurV,
true);
7613 Stack.emplace_back(
Op,
false);
7630 if (!DT.isReachableFromEntry(
I->getParent()))
7643 switch (BO->Opcode) {
7644 case Instruction::Add:
7645 case Instruction::Mul: {
7652 Ops.push_back(BO->
Op);
7656 Ops.push_back(BO->RHS);
7660 (BO->Opcode == Instruction::Add &&
7661 (NewBO->Opcode != Instruction::Add &&
7662 NewBO->Opcode != Instruction::Sub)) ||
7663 (BO->Opcode == Instruction::Mul &&
7664 NewBO->Opcode != Instruction::Mul)) {
7665 Ops.push_back(BO->LHS);
7670 if (BO->
Op && (BO->IsNSW || BO->IsNUW)) {
7673 Ops.push_back(BO->LHS);
7681 case Instruction::Sub:
7682 case Instruction::UDiv:
7683 case Instruction::URem:
7685 case Instruction::AShr:
7686 case Instruction::Shl:
7687 case Instruction::Xor:
7691 case Instruction::And:
7692 case Instruction::Or:
7696 case Instruction::LShr:
7703 Ops.push_back(BO->LHS);
7704 Ops.push_back(BO->RHS);
7708 switch (
U->getOpcode()) {
7709 case Instruction::Trunc:
7710 case Instruction::ZExt:
7711 case Instruction::SExt:
7712 case Instruction::PtrToAddr:
7713 case Instruction::PtrToInt:
7714 Ops.push_back(
U->getOperand(0));
7717 case Instruction::BitCast:
7719 Ops.push_back(
U->getOperand(0));
7724 case Instruction::SDiv:
7725 case Instruction::SRem:
7726 Ops.push_back(
U->getOperand(0));
7727 Ops.push_back(
U->getOperand(1));
7730 case Instruction::GetElementPtr:
7732 "GEP source element type must be sized");
7736 case Instruction::IntToPtr:
7739 case Instruction::PHI:
7743 case Instruction::Select: {
7745 auto CanSimplifyToUnknown = [
this,
U]() {
7763 if (CanSimplifyToUnknown())
7770 case Instruction::Call:
7771 case Instruction::Invoke:
7778 switch (
II->getIntrinsicID()) {
7779 case Intrinsic::abs:
7780 Ops.push_back(
II->getArgOperand(0));
7782 case Intrinsic::umax:
7783 case Intrinsic::umin:
7784 case Intrinsic::smax:
7785 case Intrinsic::smin:
7786 case Intrinsic::usub_sat:
7787 case Intrinsic::uadd_sat:
7788 Ops.push_back(
II->getArgOperand(0));
7789 Ops.push_back(
II->getArgOperand(1));
7791 case Intrinsic::start_loop_iterations:
7792 case Intrinsic::annotation:
7793 case Intrinsic::ptr_annotation:
7794 Ops.push_back(
II->getArgOperand(0));
7806const SCEV *ScalarEvolution::createSCEV(
Value *V) {
7815 if (!DT.isReachableFromEntry(
I->getParent()))
7830 switch (BO->Opcode) {
7831 case Instruction::Add: {
7857 if (BO->Opcode == Instruction::Sub)
7865 if (BO->Opcode == Instruction::Sub)
7872 if (!NewBO || (NewBO->Opcode != Instruction::Add &&
7873 NewBO->Opcode != Instruction::Sub)) {
7883 case Instruction::Mul: {
7904 if (!NewBO || NewBO->Opcode != Instruction::Mul) {
7913 case Instruction::UDiv:
7917 case Instruction::URem:
7921 case Instruction::Sub: {
7924 Flags = getNoWrapFlagsFromUB(BO->
Op);
7929 case Instruction::And:
7935 if (CI->isMinusOne())
7937 const APInt &
A = CI->getValue();
7943 unsigned LZ =
A.countl_zero();
7944 unsigned TZ =
A.countr_zero();
7949 APInt EffectiveMask =
7951 if ((LZ != 0 || TZ != 0) && !((~
A & ~Known.
Zero) & EffectiveMask)) {
7954 const SCEV *ShiftedLHS =
nullptr;
7958 unsigned MulZeros = OpC->getAPInt().countr_zero();
7959 unsigned GCD = std::min(MulZeros, TZ);
7964 auto *NewMul =
getMulExpr(MulOps, LHSMul->getNoWrapFlags());
7986 case Instruction::Or:
7995 case Instruction::Xor:
7998 if (CI->isMinusOne())
8007 if (LBO->getOpcode() == Instruction::And &&
8008 LCI->getValue() == CI->getValue())
8009 if (
const SCEVZeroExtendExpr *Z =
8012 const SCEV *Z0 =
Z->getOperand();
8019 if (CI->getValue().isMask(Z0TySize))
8025 APInt Trunc = CI->getValue().trunc(Z0TySize);
8034 case Instruction::Shl:
8052 auto MulFlags = getNoWrapFlagsFromUB(BO->
Op);
8060 ConstantInt *
X = ConstantInt::get(
8066 case Instruction::AShr:
8088 const SCEV *AddTruncateExpr =
nullptr;
8089 ConstantInt *ShlAmtCI =
nullptr;
8090 const SCEV *AddConstant =
nullptr;
8092 if (L &&
L->getOpcode() == Instruction::Add) {
8100 if (LShift && LShift->
getOpcode() == Instruction::Shl) {
8107 APInt AddOperand = AddOperandCI->
getValue().
ashr(AShrAmt);
8115 }
else if (L &&
L->getOpcode() == Instruction::Shl) {
8120 const SCEV *ShlOp0SCEV =
getSCEV(
L->getOperand(0));
8125 if (AddTruncateExpr && ShlAmtCI) {
8137 const APInt &ShlAmt = ShlAmtCI->
getValue();
8141 const SCEV *CompositeExpr =
8143 if (
L->getOpcode() != Instruction::Shl)
8144 CompositeExpr =
getAddExpr(CompositeExpr, AddConstant);
8153 switch (
U->getOpcode()) {
8154 case Instruction::Trunc:
8157 case Instruction::ZExt:
8160 case Instruction::SExt:
8170 if (BO->Opcode == Instruction::Sub && BO->IsNSW) {
8171 Type *Ty =
U->getType();
8179 case Instruction::BitCast:
8185 case Instruction::PtrToAddr:
8188 case Instruction::PtrToInt: {
8191 Type *DstIntTy =
U->getType();
8199 case Instruction::IntToPtr:
8203 case Instruction::SDiv:
8210 case Instruction::SRem:
8217 case Instruction::GetElementPtr:
8220 case Instruction::PHI:
8223 case Instruction::Select:
8224 return createNodeForSelectOrPHI(U,
U->getOperand(0),
U->getOperand(1),
8227 case Instruction::Call:
8228 case Instruction::Invoke:
8233 switch (
II->getIntrinsicID()) {
8234 case Intrinsic::abs:
8238 case Intrinsic::umax:
8242 case Intrinsic::umin:
8246 case Intrinsic::smax:
8250 case Intrinsic::smin:
8254 case Intrinsic::usub_sat: {
8255 const SCEV *
X =
getSCEV(
II->getArgOperand(0));
8256 const SCEV *
Y =
getSCEV(
II->getArgOperand(1));
8260 case Intrinsic::uadd_sat: {
8261 const SCEV *
X =
getSCEV(
II->getArgOperand(0));
8262 const SCEV *
Y =
getSCEV(
II->getArgOperand(1));
8266 case Intrinsic::start_loop_iterations:
8267 case Intrinsic::annotation:
8268 case Intrinsic::ptr_annotation:
8272 case Intrinsic::vscale:
8292 auto *ExitCountType = ExitCount->
getType();
8293 assert(ExitCountType->isIntegerTy());
8295 1 + ExitCountType->getScalarSizeInBits());
8308 auto CanAddOneWithoutOverflow = [&]() {
8310 getRangeRef(ExitCount, RangeSignHint::HINT_RANGE_UNSIGNED);
8321 if (EvalSize > ExitCountSize && CanAddOneWithoutOverflow())
8351 assert(ExitingBlock &&
"Must pass a non-null exiting block!");
8352 assert(L->isLoopExiting(ExitingBlock) &&
8353 "Exiting block must actually branch out of the loop!");
8362 const auto *MaxExitCount =
8370 L->getExitingBlocks(ExitingBlocks);
8372 std::optional<unsigned> Res;
8373 for (
auto *ExitingBB : ExitingBlocks) {
8377 Res = std::gcd(*Res, Multiple);
8379 return Res.value_or(1);
8383 const SCEV *ExitCount) {
8413 assert(ExitingBlock &&
"Must pass a non-null exiting block!");
8414 assert(L->isLoopExiting(ExitingBlock) &&
8415 "Exiting block must actually branch out of the loop!");
8425 return getBackedgeTakenInfo(L).getExact(ExitingBlock,
this);
8427 return getBackedgeTakenInfo(L).getSymbolicMax(ExitingBlock,
this);
8429 return getBackedgeTakenInfo(L).getConstantMax(ExitingBlock,
this);
8439 return getPredicatedBackedgeTakenInfo(L).getExact(ExitingBlock,
this,
8442 return getPredicatedBackedgeTakenInfo(L).getSymbolicMax(ExitingBlock,
this,
8445 return getPredicatedBackedgeTakenInfo(L).getConstantMax(ExitingBlock,
this,
8453 return getPredicatedBackedgeTakenInfo(L).getExact(L,
this, &Preds);
8460 return getBackedgeTakenInfo(L).getExact(L,
this);
8462 return getBackedgeTakenInfo(L).getConstantMax(
this);
8464 return getBackedgeTakenInfo(L).getSymbolicMax(L,
this);
8471 return getPredicatedBackedgeTakenInfo(L).getSymbolicMax(L,
this, &Preds);
8476 return getPredicatedBackedgeTakenInfo(L).getConstantMax(
this, &Preds);
8480 return getBackedgeTakenInfo(L).isConstantMaxOrZero(
this);
8490 for (
PHINode &PN : Header->phis())
8491 if (Visited.
insert(&PN).second)
8495ScalarEvolution::BackedgeTakenInfo &
8496ScalarEvolution::getPredicatedBackedgeTakenInfo(
const Loop *L) {
8497 auto &BTI = getBackedgeTakenInfo(L);
8498 if (BTI.hasFullInfo())
8501 auto Pair = PredicatedBackedgeTakenCounts.try_emplace(L);
8504 return Pair.first->second;
8506 BackedgeTakenInfo
Result =
8507 computeBackedgeTakenCount(L,
true);
8509 return PredicatedBackedgeTakenCounts.find(L)->second = std::move(Result);
8512ScalarEvolution::BackedgeTakenInfo &
8513ScalarEvolution::getBackedgeTakenInfo(
const Loop *L) {
8519 std::pair<DenseMap<const Loop *, BackedgeTakenInfo>::iterator,
bool> Pair =
8520 BackedgeTakenCounts.try_emplace(L);
8522 return Pair.first->second;
8527 BackedgeTakenInfo
Result = computeBackedgeTakenCount(L);
8534 if (
Result.hasAnyInfo()) {
8537 auto LoopUsersIt = LoopUsers.find(L);
8538 if (LoopUsersIt != LoopUsers.end())
8540 forgetMemoizedResults(ToForget);
8543 for (PHINode &PN :
L->getHeader()->phis())
8544 ConstantEvolutionLoopExitValue.erase(&PN);
8552 return BackedgeTakenCounts.find(L)->second = std::move(Result);
8561 BackedgeTakenCounts.clear();
8562 PredicatedBackedgeTakenCounts.clear();
8563 BECountUsers.clear();
8564 LoopPropertiesCache.clear();
8565 ConstantEvolutionLoopExitValue.clear();
8566 ValueExprMap.clear();
8567 ValuesAtScopes.clear();
8568 ValuesAtScopesUsers.clear();
8569 LoopDispositions.clear();
8570 BlockDispositions.clear();
8571 UnsignedRanges.clear();
8572 SignedRanges.clear();
8573 ExprValueMap.clear();
8575 ConstantMultipleCache.clear();
8576 PredicatedSCEVRewrites.clear();
8578 FoldCacheUser.clear();
8580void ScalarEvolution::visitAndClearUsers(
8584 while (!Worklist.
empty()) {
8591 if (It != ValueExprMap.
end()) {
8592 eraseValueFromMap(It->first);
8595 ConstantEvolutionLoopExitValue.erase(PN);
8609 while (!LoopWorklist.
empty()) {
8613 forgetBackedgeTakenCounts(CurrL,
false);
8614 forgetBackedgeTakenCounts(CurrL,
true);
8617 for (
auto I = PredicatedSCEVRewrites.begin();
8618 I != PredicatedSCEVRewrites.end();) {
8619 std::pair<const SCEV *, const Loop *> Entry =
I->first;
8620 if (Entry.second == CurrL)
8621 PredicatedSCEVRewrites.erase(
I++);
8626 auto LoopUsersItr = LoopUsers.find(CurrL);
8627 if (LoopUsersItr != LoopUsers.end())
8632 visitAndClearUsers(Worklist, Visited, ToForget);
8634 LoopPropertiesCache.erase(CurrL);
8637 LoopWorklist.
append(CurrL->begin(), CurrL->end());
8639 forgetMemoizedResults(ToForget);
8656 visitAndClearUsers(Worklist, Visited, ToForget);
8658 forgetMemoizedResults(ToForget);
8670 struct InvalidationRootCollector {
8674 InvalidationRootCollector(
Loop *L) : L(L) {}
8676 bool follow(
const SCEV *S) {
8682 if (L->contains(AddRec->
getLoop()))
8687 bool isDone()
const {
return false; }
8690 InvalidationRootCollector
C(L);
8692 forgetMemoizedResults(
C.Roots);
8705 BlockDispositions.clear();
8706 LoopDispositions.clear();
8723 while (!Worklist.
empty()) {
8725 bool LoopDispoRemoved = LoopDispositions.erase(Curr);
8726 bool BlockDispoRemoved = BlockDispositions.erase(Curr);
8727 if (!LoopDispoRemoved && !BlockDispoRemoved)
8729 auto Users = SCEVUsers.find(Curr);
8730 if (
Users != SCEVUsers.end())
8743const SCEV *ScalarEvolution::BackedgeTakenInfo::getExact(
8747 if (!isComplete() || ExitNotTaken.
empty())
8758 for (
const auto &ENT : ExitNotTaken) {
8759 const SCEV *BECount = ENT.ExactNotTaken;
8762 "We should only have known counts for exiting blocks that dominate "
8765 Ops.push_back(BECount);
8770 assert((Preds || ENT.hasAlwaysTruePredicate()) &&
8771 "Predicate should be always true!");
8780const ScalarEvolution::ExitNotTakenInfo *
8781ScalarEvolution::BackedgeTakenInfo::getExitNotTaken(
8782 const BasicBlock *ExitingBlock,
8783 SmallVectorImpl<const SCEVPredicate *> *Predicates)
const {
8784 for (
const auto &ENT : ExitNotTaken)
8785 if (ENT.ExitingBlock == ExitingBlock) {
8786 if (ENT.hasAlwaysTruePredicate())
8788 else if (Predicates) {
8798const SCEV *ScalarEvolution::BackedgeTakenInfo::getConstantMax(
8800 SmallVectorImpl<const SCEVPredicate *> *Predicates)
const {
8801 if (!getConstantMax())
8804 for (
const auto &ENT : ExitNotTaken)
8805 if (!ENT.hasAlwaysTruePredicate()) {
8813 "No point in having a non-constant max backedge taken count!");
8814 return getConstantMax();
8817const SCEV *ScalarEvolution::BackedgeTakenInfo::getSymbolicMax(
8819 SmallVectorImpl<const SCEVPredicate *> *Predicates) {
8827 for (
const auto &ENT : ExitNotTaken) {
8828 const SCEV *ExitCount = ENT.SymbolicMaxNotTaken;
8831 "We should only have known counts for exiting blocks that "
8837 assert((Predicates || ENT.hasAlwaysTruePredicate()) &&
8838 "Predicate should be always true!");
8841 if (ExitCounts.
empty())
8850bool ScalarEvolution::BackedgeTakenInfo::isConstantMaxOrZero(
8852 auto PredicateNotAlwaysTrue = [](
const ExitNotTakenInfo &ENT) {
8853 return !ENT.hasAlwaysTruePredicate();
8855 return MaxOrZero && !
any_of(ExitNotTaken, PredicateNotAlwaysTrue);
8871 this->ExactNotTaken = E = ConstantMaxNotTaken;
8872 this->SymbolicMaxNotTaken = SymbolicMaxNotTaken = ConstantMaxNotTaken;
8877 "Exact is not allowed to be less precise than Constant Max");
8880 "Exact is not allowed to be less precise than Symbolic Max");
8883 "Symbolic Max is not allowed to be less precise than Constant Max");
8886 "No point in having a non-constant max backedge taken count!");
8888 for (
const auto PredList : PredLists)
8889 for (
const auto *
P : PredList) {
8897 "Backedge count should be int");
8900 "Max backedge count should be int");
8913ScalarEvolution::BackedgeTakenInfo::BackedgeTakenInfo(
8915 bool IsComplete,
const SCEV *ConstantMax,
bool MaxOrZero)
8916 : ConstantMax(ConstantMax), IsComplete(IsComplete), MaxOrZero(MaxOrZero) {
8917 using EdgeExitInfo = ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo;
8919 ExitNotTaken.reserve(ExitCounts.
size());
8920 std::transform(ExitCounts.
begin(), ExitCounts.
end(),
8921 std::back_inserter(ExitNotTaken),
8922 [&](
const EdgeExitInfo &EEI) {
8923 BasicBlock *ExitBB = EEI.first;
8924 const ExitLimit &EL = EEI.second;
8925 return ExitNotTakenInfo(ExitBB, EL.ExactNotTaken,
8926 EL.ConstantMaxNotTaken, EL.SymbolicMaxNotTaken,
8931 "No point in having a non-constant max backedge taken count!");
8935ScalarEvolution::BackedgeTakenInfo
8936ScalarEvolution::computeBackedgeTakenCount(
const Loop *L,
8937 bool AllowPredicates) {
8939 L->getExitingBlocks(ExitingBlocks);
8941 using EdgeExitInfo = ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo;
8944 bool CouldComputeBECount =
true;
8946 const SCEV *MustExitMaxBECount =
nullptr;
8947 const SCEV *MayExitMaxBECount =
nullptr;
8948 bool MustExitMaxOrZero =
false;
8949 bool IsOnlyExit = ExitingBlocks.
size() == 1;
8961 if (ExitIfTrue == CI->
isZero())
8965 ExitLimit EL = computeExitLimit(L, ExitBB, IsOnlyExit, AllowPredicates);
8967 assert((AllowPredicates || EL.Predicates.empty()) &&
8968 "Predicated exit limit when predicates are not allowed!");
8973 ++NumExitCountsComputed;
8977 CouldComputeBECount =
false;
8984 "Exact is known but symbolic isn't?");
8985 ++NumExitCountsNotComputed;
9000 DT.dominates(ExitBB, Latch)) {
9001 if (!MustExitMaxBECount) {
9002 MustExitMaxBECount = EL.ConstantMaxNotTaken;
9003 MustExitMaxOrZero = EL.MaxOrZero;
9006 EL.ConstantMaxNotTaken);
9010 MayExitMaxBECount = EL.ConstantMaxNotTaken;
9013 EL.ConstantMaxNotTaken);
9017 const SCEV *MaxBECount = MustExitMaxBECount ? MustExitMaxBECount :
9021 bool MaxOrZero = (MustExitMaxOrZero && ExitingBlocks.size() == 1);
9027 for (
const auto &Pair : ExitCounts) {
9029 BECountUsers[Pair.second.ExactNotTaken].insert({
L, AllowPredicates});
9031 BECountUsers[Pair.second.SymbolicMaxNotTaken].insert(
9032 {
L, AllowPredicates});
9034 return BackedgeTakenInfo(std::move(ExitCounts), CouldComputeBECount,
9035 MaxBECount, MaxOrZero);
9038ScalarEvolution::ExitLimit
9039ScalarEvolution::computeExitLimit(
const Loop *L, BasicBlock *ExitingBlock,
9040 bool IsOnlyExit,
bool AllowPredicates) {
9041 assert(
L->contains(ExitingBlock) &&
"Exit count for non-loop block?");
9045 if (!Latch || !DT.dominates(ExitingBlock, Latch))
9053 "It should have one successor in loop and one exit block!");
9064 if (!
L->contains(SBB)) {
9069 assert(Exit &&
"Exiting block must have at least one exit");
9070 return computeExitLimitFromSingleExitSwitch(
9071 L, SI, Exit, IsOnlyExit);
9078 const Loop *L,
Value *ExitCond,
bool ExitIfTrue,
bool ControlsOnlyExit,
9079 bool AllowPredicates) {
9080 ScalarEvolution::ExitLimitCacheTy Cache(L, ExitIfTrue, AllowPredicates);
9081 return computeExitLimitFromCondCached(Cache, L, ExitCond, ExitIfTrue,
9082 ControlsOnlyExit, AllowPredicates);
9085std::optional<ScalarEvolution::ExitLimit>
9086ScalarEvolution::ExitLimitCache::find(
const Loop *L,
Value *ExitCond,
9087 bool ExitIfTrue,
bool ControlsOnlyExit,
9088 bool AllowPredicates) {
9090 (void)this->ExitIfTrue;
9091 (void)this->AllowPredicates;
9093 assert(this->L == L && this->ExitIfTrue == ExitIfTrue &&
9094 this->AllowPredicates == AllowPredicates &&
9095 "Variance in assumed invariant key components!");
9096 auto Itr = TripCountMap.find({ExitCond, ControlsOnlyExit});
9097 if (Itr == TripCountMap.end())
9098 return std::nullopt;
9102void ScalarEvolution::ExitLimitCache::insert(
const Loop *L,
Value *ExitCond,
9104 bool ControlsOnlyExit,
9105 bool AllowPredicates,
9107 assert(this->L == L && this->ExitIfTrue == ExitIfTrue &&
9108 this->AllowPredicates == AllowPredicates &&
9109 "Variance in assumed invariant key components!");
9111 auto InsertResult = TripCountMap.insert({{ExitCond, ControlsOnlyExit}, EL});
9112 assert(InsertResult.second &&
"Expected successful insertion!");
9117ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCondCached(
9118 ExitLimitCacheTy &Cache,
const Loop *L,
Value *ExitCond,
bool ExitIfTrue,
9119 bool ControlsOnlyExit,
bool AllowPredicates) {
9121 if (
auto MaybeEL = Cache.find(L, ExitCond, ExitIfTrue, ControlsOnlyExit,
9125 ExitLimit EL = computeExitLimitFromCondImpl(
9126 Cache, L, ExitCond, ExitIfTrue, ControlsOnlyExit, AllowPredicates);
9127 Cache.insert(L, ExitCond, ExitIfTrue, ControlsOnlyExit, AllowPredicates, EL);
9131ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCondImpl(
9132 ExitLimitCacheTy &Cache,
const Loop *L,
Value *ExitCond,
bool ExitIfTrue,
9133 bool ControlsOnlyExit,
bool AllowPredicates) {
9135 if (
auto LimitFromBinOp = computeExitLimitFromCondFromBinOp(
9136 Cache, L, ExitCond, ExitIfTrue, ControlsOnlyExit, AllowPredicates))
9137 return *LimitFromBinOp;
9143 computeExitLimitFromICmp(L, ExitCondICmp, ExitIfTrue, ControlsOnlyExit);
9144 if (EL.hasFullInfo() || !AllowPredicates)
9148 return computeExitLimitFromICmp(L, ExitCondICmp, ExitIfTrue,
9168 const WithOverflowInst *WO;
9183 auto EL = computeExitLimitFromICmp(L, Pred,
LHS,
getConstant(NewRHSC),
9184 ControlsOnlyExit, AllowPredicates);
9185 if (EL.hasAnyInfo())
9190 return computeExitCountExhaustively(L, ExitCond, ExitIfTrue);
9193std::optional<ScalarEvolution::ExitLimit>
9194ScalarEvolution::computeExitLimitFromCondFromBinOp(
9195 ExitLimitCacheTy &Cache,
const Loop *L,
Value *ExitCond,
bool ExitIfTrue,
9196 bool ControlsOnlyExit,
bool AllowPredicates) {
9205 return std::nullopt;
9210 bool EitherMayExit = IsAnd ^ ExitIfTrue;
9211 ExitLimit EL0 = computeExitLimitFromCondCached(
9212 Cache, L, Op0, ExitIfTrue, ControlsOnlyExit && !EitherMayExit,
9214 ExitLimit EL1 = computeExitLimitFromCondCached(
9215 Cache, L, Op1, ExitIfTrue, ControlsOnlyExit && !EitherMayExit,
9219 const Constant *NeutralElement = ConstantInt::get(ExitCond->
getType(), IsAnd);
9221 return Op1 == NeutralElement ? EL0 : EL1;
9223 return Op0 == NeutralElement ? EL1 : EL0;
9228 if (EitherMayExit) {
9238 ConstantMaxBECount = EL1.ConstantMaxNotTaken;
9240 ConstantMaxBECount = EL0.ConstantMaxNotTaken;
9243 EL1.ConstantMaxNotTaken);
9245 SymbolicMaxBECount = EL1.SymbolicMaxNotTaken;
9247 SymbolicMaxBECount = EL0.SymbolicMaxNotTaken;
9250 EL0.SymbolicMaxNotTaken, EL1.SymbolicMaxNotTaken, UseSequentialUMin);
9254 if (EL0.ExactNotTaken == EL1.ExactNotTaken)
9255 BECount = EL0.ExactNotTaken;
9268 SymbolicMaxBECount =
9270 return ExitLimit(BECount, ConstantMaxBECount, SymbolicMaxBECount,
false,
9274ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromICmp(
9275 const Loop *L, ICmpInst *ExitCond,
bool ExitIfTrue,
bool ControlsOnlyExit,
9276 bool AllowPredicates) {
9288 ExitLimit EL = computeExitLimitFromICmp(L, Pred,
LHS,
RHS, ControlsOnlyExit,
9290 if (EL.hasAnyInfo())
9293 auto *ExhaustiveCount =
9294 computeExitCountExhaustively(L, ExitCond, ExitIfTrue);
9297 return ExhaustiveCount;
9299 return computeShiftCompareExitLimit(ExitCond->
getOperand(0),
9302ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromICmp(
9303 const Loop *L, CmpPredicate Pred,
const SCEV *
LHS,
const SCEV *
RHS,
9304 bool ControlsOnlyExit,
bool AllowPredicates) {
9329 ConstantRange CompRange =
9345 auto *InnerLHS =
LHS;
9347 InnerLHS = ZExt->getOperand();
9394 if (EL.hasAnyInfo())
9411 if (EL.hasAnyInfo())
return EL;
9443 ExitLimit EL = howManyLessThans(
LHS,
RHS, L, IsSigned, ControlsOnlyExit,
9445 if (EL.hasAnyInfo())
9461 ExitLimit EL = howManyGreaterThans(
LHS,
RHS, L, IsSigned, ControlsOnlyExit,
9463 if (EL.hasAnyInfo())
9474ScalarEvolution::ExitLimit
9475ScalarEvolution::computeExitLimitFromSingleExitSwitch(
const Loop *L,
9477 BasicBlock *ExitingBlock,
9478 bool ControlsOnlyExit) {
9479 assert(!
L->contains(ExitingBlock) &&
"Not an exiting block!");
9482 if (
Switch->getDefaultDest() == ExitingBlock)
9486 "Default case must not exit the loop!");
9492 if (EL.hasAnyInfo())
9504 "Evaluation of SCEV at constant didn't fold correctly?");
9508ScalarEvolution::ExitLimit ScalarEvolution::computeShiftCompareExitLimit(
9518 const BasicBlock *Predecessor =
L->getLoopPredecessor();
9524 auto MatchPositiveShift =
9527 using namespace PatternMatch;
9529 ConstantInt *ShiftAmt;
9531 OutOpCode = Instruction::LShr;
9533 OutOpCode = Instruction::AShr;
9535 OutOpCode = Instruction::Shl;
9550 auto MatchShiftRecurrence =
9552 std::optional<Instruction::BinaryOps> PostShiftOpCode;
9567 if (MatchPositiveShift(
LHS, V, OpC)) {
9568 PostShiftOpCode = OpC;
9574 if (!PNOut || PNOut->getParent() !=
L->getHeader())
9577 Value *BEValue = PNOut->getIncomingValueForBlock(Latch);
9583 MatchPositiveShift(BEValue, OpLHS, OpCodeOut) &&
9590 (!PostShiftOpCode || *PostShiftOpCode == OpCodeOut);
9595 if (!MatchShiftRecurrence(
LHS, PN, OpCode))
9607 ConstantInt *StableValue =
nullptr;
9612 case Instruction::AShr: {
9620 StableValue = ConstantInt::get(Ty, 0);
9622 StableValue = ConstantInt::get(Ty, -1,
true);
9628 case Instruction::LShr:
9629 case Instruction::Shl:
9639 "Otherwise cannot be an operand to a branch instruction");
9641 if (
Result->isZeroValue()) {
9643 const SCEV *UpperBound =
9660 if (
const Function *
F = CI->getCalledFunction())
9669 if (!L->contains(
I))
return false;
9674 return L->getHeader() ==
I->getParent();
9750 if (!
I)
return nullptr;
9763 std::vector<Constant*> Operands(
I->getNumOperands());
9765 for (
unsigned i = 0, e =
I->getNumOperands(); i != e; ++i) {
9769 if (!Operands[i])
return nullptr;
9774 if (!
C)
return nullptr;
9796 if (IncomingVal != CurrentVal) {
9799 IncomingVal = CurrentVal;
9811ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN,
9814 auto [
I,
Inserted] = ConstantEvolutionLoopExitValue.try_emplace(PN);
9823 DenseMap<Instruction *, Constant *> CurrentIterVals;
9825 assert(PN->
getParent() == Header &&
"Can't evaluate PHI not in loop header!");
9831 for (PHINode &
PHI : Header->phis()) {
9833 CurrentIterVals[&
PHI] = StartCST;
9835 if (!CurrentIterVals.
count(PN))
9836 return RetVal =
nullptr;
9842 "BEs is <= MaxBruteForceIterations which is an 'unsigned'!");
9845 unsigned IterationNum = 0;
9847 for (; ; ++IterationNum) {
9848 if (IterationNum == NumIterations)
9849 return RetVal = CurrentIterVals[PN];
9853 DenseMap<Instruction *, Constant *> NextIterVals;
9858 NextIterVals[PN] = NextPHI;
9860 bool StoppedEvolving = NextPHI == CurrentIterVals[PN];
9866 for (
const auto &
I : CurrentIterVals) {
9868 if (!
PHI ||
PHI == PN ||
PHI->getParent() != Header)
continue;
9873 for (
const auto &
I : PHIsToCompute) {
9874 PHINode *
PHI =
I.first;
9877 Value *BEValue =
PHI->getIncomingValueForBlock(Latch);
9880 if (NextPHI !=
I.second)
9881 StoppedEvolving =
false;
9886 if (StoppedEvolving)
9887 return RetVal = CurrentIterVals[PN];
9889 CurrentIterVals.swap(NextIterVals);
9893const SCEV *ScalarEvolution::computeExitCountExhaustively(
const Loop *L,
9903 DenseMap<Instruction *, Constant *> CurrentIterVals;
9905 assert(PN->
getParent() == Header &&
"Can't evaluate PHI not in loop header!");
9908 assert(Latch &&
"Should follow from NumIncomingValues == 2!");
9910 for (PHINode &
PHI : Header->phis()) {
9912 CurrentIterVals[&
PHI] = StartCST;
9914 if (!CurrentIterVals.
count(PN))
9922 for (
unsigned IterationNum = 0; IterationNum != MaxIterations;++IterationNum){
9929 if (CondVal->getValue() == uint64_t(ExitWhen)) {
9930 ++NumBruteForceTripCountsComputed;
9935 DenseMap<Instruction *, Constant *> NextIterVals;
9941 for (
const auto &
I : CurrentIterVals) {
9943 if (!
PHI ||
PHI->getParent() != Header)
continue;
9946 for (PHINode *
PHI : PHIsToCompute) {
9948 if (NextPHI)
continue;
9950 Value *BEValue =
PHI->getIncomingValueForBlock(Latch);
9953 CurrentIterVals.
swap(NextIterVals);
9964 for (
auto &LS : Values)
9966 return LS.second ? LS.second : V;
9971 const SCEV *
C = computeSCEVAtScope(V, L);
9972 for (
auto &LS :
reverse(ValuesAtScopes[V]))
9973 if (LS.first == L) {
9976 ValuesAtScopesUsers[
C].push_back({L, V});
9987 switch (V->getSCEVType()) {
10027 assert(!
C->getType()->isPointerTy() &&
10028 "Can only have one pointer, and it must be last");
10055ScalarEvolution::getWithOperands(
const SCEV *S,
10056 SmallVectorImpl<const SCEV *> &NewOps) {
10091const SCEV *ScalarEvolution::computeSCEVAtScope(
const SCEV *V,
const Loop *L) {
10092 switch (
V->getSCEVType()) {
10103 for (
unsigned i = 0, e = AddRec->
getNumOperands(); i != e; ++i) {
10114 for (++i; i !=
e; ++i)
10159 for (
unsigned i = 0, e =
Ops.size(); i != e; ++i) {
10161 if (OpAtScope !=
Ops[i]) {
10169 for (++i; i !=
e; ++i) {
10174 return getWithOperands(V, NewOps);
10189 const Loop *CurrLoop = this->LI[
I->getParent()];
10200 if (BackedgeTakenCount->
isZero()) {
10201 Value *InitValue =
nullptr;
10202 bool MultipleInitValues =
false;
10208 MultipleInitValues =
true;
10213 if (!MultipleInitValues && InitValue)
10222 unsigned InLoopPred =
10233 getConstantEvolutionLoopExitValue(PN, BTCC->getAPInt(), CurrLoop);
10247 SmallVector<Constant *, 4> Operands;
10248 Operands.
reserve(
I->getNumOperands());
10249 bool MadeImprovement =
false;
10264 MadeImprovement |= OrigV != OpV;
10269 assert(
C->getType() ==
Op->getType() &&
"Type mismatch");
10274 if (!MadeImprovement)
10295const SCEV *ScalarEvolution::stripInjectiveFunctions(
const SCEV *S)
const {
10297 return stripInjectiveFunctions(ZExt->getOperand());
10299 return stripInjectiveFunctions(SExt->getOperand());
10317 assert(
A != 0 &&
"A must be non-zero.");
10333 if (MinTZ < Mult2 && L->getLoopPredecessor())
10335 if (MinTZ < Mult2) {
10358 APInt AD =
A.lshr(Mult2).trunc(BW - Mult2);
10378static std::optional<std::tuple<APInt, APInt, APInt, APInt, unsigned>>
10384 LLVM_DEBUG(
dbgs() << __func__ <<
": analyzing quadratic addrec: "
10385 << *AddRec <<
'\n');
10388 if (!LC || !MC || !
NC) {
10389 LLVM_DEBUG(
dbgs() << __func__ <<
": coefficients are not constant\n");
10390 return std::nullopt;
10396 assert(!
N.isZero() &&
"This is not a quadratic addrec");
10404 N =
N.sext(NewWidth);
10405 M = M.sext(NewWidth);
10406 L = L.sext(NewWidth);
10423 <<
"x + " <<
C <<
", coeff bw: " << NewWidth
10424 <<
", multiplied by " <<
T <<
'\n');
10433 std::optional<APInt>
Y) {
10435 unsigned W = std::max(
X->getBitWidth(),
Y->getBitWidth());
10438 return XW.
slt(YW) ? *
X : *
Y;
10441 return std::nullopt;
10442 return X ? *
X : *
Y;
10459 return std::nullopt;
10460 unsigned W =
X->getBitWidth();
10480static std::optional<APInt>
10486 return std::nullopt;
10489 LLVM_DEBUG(
dbgs() << __func__ <<
": solving for unsigned overflow\n");
10490 std::optional<APInt>
X =
10493 return std::nullopt;
10498 return std::nullopt;
10513static std::optional<APInt>
10517 "Starting value of addrec should be 0");
10518 LLVM_DEBUG(
dbgs() << __func__ <<
": solving boundary crossing for range "
10519 <<
Range <<
", addrec " << *AddRec <<
'\n');
10523 "Addrec's initial value should be in range");
10529 return std::nullopt;
10539 auto SolveForBoundary =
10540 [&](
APInt Bound) -> std::pair<std::optional<APInt>,
bool> {
10543 LLVM_DEBUG(
dbgs() <<
"SolveQuadraticAddRecRange: checking boundary "
10544 << Bound <<
" (before multiplying by " << M <<
")\n");
10547 std::optional<APInt> SO;
10550 "signed overflow\n");
10554 "unsigned overflow\n");
10555 std::optional<APInt> UO =
10558 auto LeavesRange = [&] (
const APInt &
X) {
10575 return {std::nullopt,
false};
10580 if (LeavesRange(*Min))
10581 return { Min,
true };
10582 std::optional<APInt> Max = Min == SO ? UO : SO;
10583 if (LeavesRange(*Max))
10584 return { Max,
true };
10587 return {std::nullopt,
true};
10594 auto SL = SolveForBoundary(
Lower);
10595 auto SU = SolveForBoundary(
Upper);
10598 if (!SL.second || !SU.second)
10599 return std::nullopt;
10642ScalarEvolution::ExitLimit ScalarEvolution::howFarToZero(
const SCEV *V,
10644 bool ControlsOnlyExit,
10645 bool AllowPredicates) {
10656 if (
C->getValue()->isZero())
return C;
10660 const SCEVAddRecExpr *AddRec =
10663 if (!AddRec && AllowPredicates)
10669 if (!AddRec || AddRec->
getLoop() != L)
10680 return ExitLimit(R, R, R,
false, Predicates);
10738 const SCEV *DistancePlusOne =
getAddExpr(Distance, One);
10764 const SCEV *
Exact =
10772 const SCEV *SymbolicMax =
10774 return ExitLimit(
Exact, ConstantMax, SymbolicMax,
false, Predicates);
10783 AllowPredicates ? &Predicates :
nullptr, *
this, L);
10791 return ExitLimit(
E, M, S,
false, Predicates);
10794ScalarEvolution::ExitLimit
10795ScalarEvolution::howFarToNonZero(
const SCEV *V,
const Loop *L) {
10803 if (!
C->getValue()->isZero())
10813std::pair<const BasicBlock *, const BasicBlock *>
10814ScalarEvolution::getPredecessorWithUniqueSuccessorForBB(
const BasicBlock *BB)
10825 if (
const Loop *L = LI.getLoopFor(BB))
10826 return {
L->getLoopPredecessor(),
L->getHeader()};
10828 return {
nullptr, BB};
10837 if (
A ==
B)
return true;
10852 if (ComputesEqualValues(AI, BI))
10860 const SCEV *Op0, *Op1;
10879 auto TrivialCase = [&](
bool TriviallyTrue) {
10888 const SCEV *NewLHS, *NewRHS;
10912 return TrivialCase(
false);
10913 return TrivialCase(
true);
10936 const APInt &
RA = RC->getAPInt();
10938 bool SimplifiedByConstantRange =
false;
10943 return TrivialCase(
true);
10945 return TrivialCase(
false);
10954 Changed = SimplifiedByConstantRange =
true;
10958 if (!SimplifiedByConstantRange) {
10975 assert(!
RA.isMinValue() &&
"Should have been caught earlier!");
10981 assert(!
RA.isMaxValue() &&
"Should have been caught earlier!");
10987 assert(!
RA.isMinSignedValue() &&
"Should have been caught earlier!");
10993 assert(!
RA.isMaxSignedValue() &&
"Should have been caught earlier!");
11005 return TrivialCase(
true);
11007 return TrivialCase(
false);
11112 auto NonRecursive = [
this, OrNegative](
const SCEV *S) {
11114 return C->getAPInt().isPowerOf2() ||
11115 (OrNegative &&
C->getAPInt().isNegatedPowerOf2());
11118 return isa<SCEVVScale>(S) && F.hasFnAttribute(Attribute::VScaleRange);
11121 if (NonRecursive(S))
11147 APInt C = Cst->getAPInt();
11148 return C.urem(M) == 0;
11156 const SCEV *SmodM =
11171 for (
auto *
A : Assumptions)
11172 if (
A->implies(
P, *
this))
11185std::pair<const SCEV *, const SCEV *>
11188 const SCEV *Start = SCEVInitRewriter::rewrite(S, L, *
this);
11190 return { Start, Start };
11192 const SCEV *
PostInc = SCEVPostIncRewriter::rewrite(S, L, *
this);
11201 getUsedLoops(LHS, LoopsUsed);
11202 getUsedLoops(RHS, LoopsUsed);
11204 if (LoopsUsed.
empty())
11209 for (
const auto *L1 : LoopsUsed)
11210 for (
const auto *L2 : LoopsUsed)
11211 assert((DT.dominates(L1->getHeader(), L2->getHeader()) ||
11212 DT.dominates(L2->getHeader(), L1->getHeader())) &&
11213 "Domination relationship is not a linear order");
11243 SplitRHS.second) &&
11255 if (isKnownPredicateViaSplitting(Pred, LHS, RHS))
11259 return isKnownViaNonRecursiveReasoning(Pred, LHS, RHS);
11269 return std::nullopt;
11284 if (KnownWithoutContext)
11285 return KnownWithoutContext;
11292 return std::nullopt;
11298 const Loop *L = LHS->getLoop();
11303std::optional<ScalarEvolution::MonotonicPredicateType>
11306 auto Result = getMonotonicPredicateTypeImpl(LHS, Pred);
11312 auto ResultSwapped =
11315 assert(*ResultSwapped != *Result &&
11316 "monotonicity should flip as we flip the predicate");
11323std::optional<ScalarEvolution::MonotonicPredicateType>
11324ScalarEvolution::getMonotonicPredicateTypeImpl(
const SCEVAddRecExpr *LHS,
11338 return std::nullopt;
11342 "Should be greater or less!");
11346 if (!LHS->hasNoUnsignedWrap())
11347 return std::nullopt;
11351 "Relational predicate is either signed or unsigned!");
11352 if (!
LHS->hasNoSignedWrap())
11353 return std::nullopt;
11355 const SCEV *Step =
LHS->getStepRecurrence(*
this);
11363 return std::nullopt;
11366std::optional<ScalarEvolution::LoopInvariantPredicate>
11373 return std::nullopt;
11380 if (!ArLHS || ArLHS->
getLoop() != L)
11381 return std::nullopt;
11385 return std::nullopt;
11411 return std::nullopt;
11448 return std::nullopt;
11451std::optional<ScalarEvolution::LoopInvariantPredicate>
11456 Pred, LHS, RHS, L, CtxI, MaxIter))
11464 for (
auto *
Op :
UMin->operands())
11466 Pred, LHS, RHS, L, CtxI,
Op))
11468 return std::nullopt;
11471std::optional<ScalarEvolution::LoopInvariantPredicate>
11486 return std::nullopt;
11493 if (!AR || AR->
getLoop() != L)
11494 return std::nullopt;
11498 return std::nullopt;
11504 if (Step != One && Step != MinusOne)
11505 return std::nullopt;
11511 return std::nullopt;
11517 return std::nullopt;
11525 if (Step == MinusOne)
11529 return std::nullopt;
11535bool ScalarEvolution::isKnownPredicateViaConstantRanges(
CmpPredicate Pred,
11541 auto CheckRange = [&](
bool IsSigned) {
11544 return RangeLHS.
icmp(Pred, RangeRHS);
11553 if (CheckRange(
true) || CheckRange(
false))
11562bool ScalarEvolution::isKnownPredicateViaNoOverflow(CmpPredicate Pred,
11569 auto MatchBinaryAddToConst = [
this](
const SCEV *
X,
const SCEV *
Y,
11570 APInt &OutC1, APInt &OutC2,
11572 const SCEV *XNonConstOp, *XConstOp;
11573 const SCEV *YNonConstOp, *YConstOp;
11577 if (!splitBinaryAdd(
X, XConstOp, XNonConstOp, XFlagsPresent)) {
11580 XFlagsPresent = ExpectedFlags;
11585 if (!splitBinaryAdd(
Y, YConstOp, YNonConstOp, YFlagsPresent)) {
11588 YFlagsPresent = ExpectedFlags;
11591 if (YNonConstOp != XNonConstOp)
11599 if ((YFlagsPresent & ExpectedFlags) != ExpectedFlags)
11602 (XFlagsPresent & ExpectedFlags) != ExpectedFlags) {
11662bool ScalarEvolution::isKnownPredicateViaSplitting(CmpPredicate Pred,
11684bool ScalarEvolution::isImpliedViaGuard(
const BasicBlock *BB, CmpPredicate Pred,
11685 const SCEV *
LHS,
const SCEV *
RHS) {
11690 return any_of(*BB, [&](
const Instruction &
I) {
11691 using namespace llvm::PatternMatch;
11696 isImpliedCond(Pred,
LHS,
RHS, Condition,
false);
11710 if (!L || !DT.isReachableFromEntry(L->getHeader()))
11715 "This cannot be done on broken IR!");
11718 if (isKnownViaNonRecursiveReasoning(Pred, LHS, RHS))
11727 if (LoopContinuePredicate && LoopContinuePredicate->
isConditional() &&
11728 isImpliedCond(Pred, LHS, RHS,
11730 LoopContinuePredicate->
getSuccessor(0) != L->getHeader()))
11735 if (WalkingBEDominatingConds)
11741 const auto &BETakenInfo = getBackedgeTakenInfo(L);
11742 const SCEV *LatchBECount = BETakenInfo.getExact(Latch,
this);
11749 const SCEV *LoopCounter =
11757 for (
auto &AssumeVH : AC.assumptions()) {
11764 if (isImpliedCond(Pred, LHS, RHS, CI->getArgOperand(0),
false))
11768 if (isImpliedViaGuard(Latch, Pred, LHS, RHS))
11771 for (
DomTreeNode *DTN = DT[Latch], *HeaderDTN = DT[L->getHeader()];
11772 DTN != HeaderDTN; DTN = DTN->getIDom()) {
11773 assert(DTN &&
"should reach the loop header before reaching the root!");
11776 if (isImpliedViaGuard(BB, Pred, LHS, RHS))
11784 if (!ContinuePredicate || !ContinuePredicate->
isConditional())
11798 assert(DT.dominates(DominatingEdge, Latch) &&
"should be!");
11800 if (isImpliedCond(Pred, LHS, RHS, Condition,
11814 if (!DT.isReachableFromEntry(BB))
11818 "This cannot be done on broken IR!");
11826 const bool ProvingStrictComparison =
11828 bool ProvedNonStrictComparison =
false;
11829 bool ProvedNonEquality =
false;
11832 if (!ProvedNonStrictComparison)
11833 ProvedNonStrictComparison = Fn(NonStrictPredicate);
11834 if (!ProvedNonEquality)
11836 if (ProvedNonStrictComparison && ProvedNonEquality)
11841 if (ProvingStrictComparison) {
11843 return isKnownViaNonRecursiveReasoning(
P, LHS, RHS);
11845 if (SplitAndProve(ProofFn))
11850 auto ProveViaCond = [&](
const Value *Condition,
bool Inverse) {
11852 if (isImpliedCond(Pred, LHS, RHS, Condition,
Inverse, CtxI))
11854 if (ProvingStrictComparison) {
11856 return isImpliedCond(
P, LHS, RHS, Condition,
Inverse, CtxI);
11858 if (SplitAndProve(ProofFn))
11867 const Loop *ContainingLoop = LI.getLoopFor(BB);
11869 if (ContainingLoop && ContainingLoop->
getHeader() == BB)
11873 for (std::pair<const BasicBlock *, const BasicBlock *> Pair(PredBB, BB);
11874 Pair.first; Pair = getPredecessorWithUniqueSuccessorForBB(Pair.first)) {
11886 for (
auto &AssumeVH : AC.assumptions()) {
11890 if (!DT.dominates(CI, BB))
11893 if (ProveViaCond(CI->getArgOperand(0),
false))
11899 F.getParent(), Intrinsic::experimental_guard);
11901 for (
const auto *GU : GuardDecl->users())
11903 if (Guard->getFunction() == BB->
getParent() && DT.dominates(Guard, BB))
11904 if (ProveViaCond(Guard->getArgOperand(0),
false))
11919 "LHS is not available at Loop Entry");
11921 "RHS is not available at Loop Entry");
11923 if (isKnownViaNonRecursiveReasoning(Pred, LHS, RHS))
11934 if (FoundCondValue ==
11938 if (!PendingLoopPredicates.insert(FoundCondValue).second)
11942 [&]() { PendingLoopPredicates.erase(FoundCondValue); });
11945 const Value *Op0, *Op1;
11948 return isImpliedCond(Pred,
LHS,
RHS, Op0,
Inverse, CtxI) ||
11952 return isImpliedCond(Pred,
LHS,
RHS, Op0, Inverse, CtxI) ||
11953 isImpliedCond(Pred,
LHS,
RHS, Op1, Inverse, CtxI);
11957 if (!ICI)
return false;
11961 CmpPredicate FoundPred;
11970 return isImpliedCond(Pred,
LHS,
RHS, FoundPred, FoundLHS, FoundRHS, CtxI);
11973bool ScalarEvolution::isImpliedCond(CmpPredicate Pred,
const SCEV *
LHS,
11974 const SCEV *
RHS, CmpPredicate FoundPred,
11975 const SCEV *FoundLHS,
const SCEV *FoundRHS,
11976 const Instruction *CtxI) {
11986 auto *WideType = FoundLHS->
getType();
11998 TruncFoundLHS, TruncFoundRHS, CtxI))
12024 return isImpliedCondBalancedTypes(Pred,
LHS,
RHS, FoundPred, FoundLHS,
12028bool ScalarEvolution::isImpliedCondBalancedTypes(
12029 CmpPredicate Pred,
const SCEV *
LHS,
const SCEV *
RHS, CmpPredicate FoundPred,
12030 const SCEV *FoundLHS,
const SCEV *FoundRHS,
const Instruction *CtxI) {
12033 "Types should be balanced!");
12040 if (FoundLHS == FoundRHS)
12044 if (
LHS == FoundRHS ||
RHS == FoundLHS) {
12056 return isImpliedCondOperands(*
P,
LHS,
RHS, FoundLHS, FoundRHS, CtxI);
12073 LHS, FoundLHS, FoundRHS, CtxI);
12075 return isImpliedCondOperands(*
P,
LHS,
RHS, FoundRHS, FoundLHS, CtxI);
12097 assert(P1 != P2 &&
"Handled earlier!");
12101 if (IsSignFlippedPredicate(Pred, FoundPred)) {
12105 return isImpliedCondOperands(Pred,
LHS,
RHS, FoundLHS, FoundRHS, CtxI);
12108 CmpPredicate CanonicalPred = Pred, CanonicalFoundPred = FoundPred;
12109 const SCEV *CanonicalLHS =
LHS, *CanonicalRHS =
RHS,
12110 *CanonicalFoundLHS = FoundLHS, *CanonicalFoundRHS = FoundRHS;
12115 std::swap(CanonicalFoundLHS, CanonicalFoundRHS);
12126 return isImpliedCondOperands(CanonicalFoundPred, CanonicalLHS,
12127 CanonicalRHS, CanonicalFoundLHS,
12128 CanonicalFoundRHS);
12133 return isImpliedCondOperands(CanonicalFoundPred, CanonicalLHS,
12134 CanonicalRHS, CanonicalFoundLHS,
12135 CanonicalFoundRHS);
12142 const SCEVConstant *
C =
nullptr;
12143 const SCEV *
V =
nullptr;
12161 if (Min ==
C->getAPInt()) {
12166 APInt SharperMin = Min + 1;
12169 case ICmpInst::ICMP_SGE:
12170 case ICmpInst::ICMP_UGE:
12173 if (isImpliedCondOperands(Pred, LHS, RHS, V, getConstant(SharperMin),
12178 case ICmpInst::ICMP_SGT:
12179 case ICmpInst::ICMP_UGT:
12189 if (isImpliedCondOperands(Pred, LHS, RHS, V, getConstant(Min), CtxI))
12194 case ICmpInst::ICMP_SLE:
12195 case ICmpInst::ICMP_ULE:
12196 if (isImpliedCondOperands(ICmpInst::getSwappedCmpPredicate(Pred), RHS,
12197 LHS, V, getConstant(SharperMin), CtxI))
12201 case ICmpInst::ICMP_SLT:
12202 case ICmpInst::ICMP_ULT:
12203 if (isImpliedCondOperands(ICmpInst::getSwappedCmpPredicate(Pred), RHS,
12204 LHS, V, getConstant(Min), CtxI))
12218 if (isImpliedCondOperands(Pred,
LHS,
RHS, FoundLHS, FoundRHS, CtxI))
12222 if (isImpliedCondOperands(FoundPred,
LHS,
RHS, FoundLHS, FoundRHS, CtxI))
12225 if (isImpliedCondOperandsViaRanges(Pred,
LHS,
RHS, FoundPred, FoundLHS, FoundRHS))
12232bool ScalarEvolution::splitBinaryAdd(
const SCEV *Expr,
12233 const SCEV *&L,
const SCEV *&R,
12242std::optional<APInt>
12249 APInt DiffMul(BW, 1);
12252 for (
unsigned I = 0;
I < 8; ++
I) {
12261 if (LAR->getLoop() != MAR->getLoop())
12262 return std::nullopt;
12266 if (!LAR->isAffine() || !MAR->isAffine())
12267 return std::nullopt;
12269 if (LAR->getStepRecurrence(*
this) != MAR->getStepRecurrence(*
this))
12270 return std::nullopt;
12272 Less = LAR->getStart();
12273 More = MAR->getStart();
12278 auto MatchConstMul =
12279 [](
const SCEV *S) -> std::optional<std::pair<const SCEV *, APInt>> {
12284 return std::nullopt;
12286 if (
auto MatchedMore = MatchConstMul(More)) {
12287 if (
auto MatchedLess = MatchConstMul(
Less)) {
12288 if (MatchedMore->second == MatchedLess->second) {
12289 More = MatchedMore->first;
12290 Less = MatchedLess->first;
12291 DiffMul *= MatchedMore->second;
12302 Diff +=
C->getAPInt() * DiffMul;
12305 Diff -=
C->getAPInt() * DiffMul;
12308 Multiplicity[S] +=
Mul;
12310 auto Decompose = [&](
const SCEV *S,
int Mul) {
12317 Decompose(More, 1);
12318 Decompose(
Less, -1);
12322 const SCEV *NewMore =
nullptr, *NewLess =
nullptr;
12323 for (
const auto &[S,
Mul] : Multiplicity) {
12328 return std::nullopt;
12330 }
else if (
Mul == -1) {
12332 return std::nullopt;
12335 return std::nullopt;
12339 if (NewMore == More || NewLess ==
Less)
12340 return std::nullopt;
12346 if (!More && !
Less)
12350 if (!More || !
Less)
12351 return std::nullopt;
12355 return std::nullopt;
12358bool ScalarEvolution::isImpliedCondOperandsViaAddRecStart(
12382 if (!L->contains(ContextBB) || !DT.
dominates(ContextBB, L->getLoopLatch()))
12393 if (!L->contains(ContextBB) || !DT.
dominates(ContextBB, L->getLoopLatch()))
12403bool ScalarEvolution::isImpliedCondOperandsViaNoOverflow(CmpPredicate Pred,
12406 const SCEV *FoundLHS,
12407 const SCEV *FoundRHS) {
12416 if (!AddRecFoundLHS)
12423 const Loop *
L = AddRecFoundLHS->getLoop();
12424 if (L != AddRecLHS->getLoop())
12463 if (!RDiff || *LDiff != *RDiff)
12466 if (LDiff->isMinValue())
12469 APInt FoundRHSLimit;
12472 FoundRHSLimit = -(*RDiff);
12484bool ScalarEvolution::isImpliedViaMerge(CmpPredicate Pred,
const SCEV *
LHS,
12485 const SCEV *
RHS,
const SCEV *FoundLHS,
12486 const SCEV *FoundRHS,
unsigned Depth) {
12487 const PHINode *LPhi =
nullptr, *RPhi =
nullptr;
12491 bool Erased = PendingMerges.erase(LPhi);
12492 assert(Erased &&
"Failed to erase LPhi!");
12496 bool Erased = PendingMerges.erase(RPhi);
12497 assert(Erased &&
"Failed to erase RPhi!");
12505 if (!PendingMerges.insert(Phi).second)
12519 if (!PendingMerges.insert(Phi).second)
12525 if (!LPhi && !RPhi)
12536 assert(LPhi &&
"LPhi should definitely be a SCEVUnknown Phi!");
12540 auto ProvedEasily = [&](
const SCEV *
S1,
const SCEV *S2) {
12541 return isKnownViaNonRecursiveReasoning(Pred,
S1, S2) ||
12542 isImpliedCondOperandsViaRanges(Pred,
S1, S2, Pred, FoundLHS, FoundRHS) ||
12543 isImpliedViaOperations(Pred,
S1, S2, FoundLHS, FoundRHS,
Depth);
12546 if (RPhi && RPhi->getParent() == LBB) {
12553 const SCEV *
R =
getSCEV(RPhi->getIncomingValueForBlock(IncBB));
12554 if (!ProvedEasily(L, R))
12565 auto *RLoop = RAR->
getLoop();
12566 auto *Predecessor = RLoop->getLoopPredecessor();
12567 assert(Predecessor &&
"Loop with AddRec with no predecessor?");
12569 if (!ProvedEasily(L1, RAR->
getStart()))
12571 auto *Latch = RLoop->getLoopLatch();
12572 assert(Latch &&
"Loop with AddRec with no latch?");
12593 if (
auto *Loop = LI.getLoopFor(LBB))
12596 if (!ProvedEasily(L,
RHS))
12603bool ScalarEvolution::isImpliedCondOperandsViaShift(CmpPredicate Pred,
12606 const SCEV *FoundLHS,
12607 const SCEV *FoundRHS) {
12610 if (
RHS == FoundRHS) {
12615 if (
LHS != FoundLHS)
12622 Value *Shiftee, *ShiftValue;
12624 using namespace PatternMatch;
12625 if (
match(SUFoundRHS->getValue(),
12627 auto *ShifteeS =
getSCEV(Shiftee);
12645bool ScalarEvolution::isImpliedCondOperands(CmpPredicate Pred,
const SCEV *
LHS,
12647 const SCEV *FoundLHS,
12648 const SCEV *FoundRHS,
12649 const Instruction *CtxI) {
12650 return isImpliedCondOperandsViaRanges(Pred,
LHS,
RHS, Pred, FoundLHS,
12652 isImpliedCondOperandsViaNoOverflow(Pred,
LHS,
RHS, FoundLHS,
12654 isImpliedCondOperandsViaShift(Pred,
LHS,
RHS, FoundLHS, FoundRHS) ||
12655 isImpliedCondOperandsViaAddRecStart(Pred,
LHS,
RHS, FoundLHS, FoundRHS,
12657 isImpliedCondOperandsHelper(Pred,
LHS,
RHS, FoundLHS, FoundRHS);
12661template <
typename MinMaxExprType>
12663 const SCEV *Candidate) {
12668 return is_contained(MinMaxExpr->operands(), Candidate);
12681 const SCEV *LStart, *RStart, *Step;
12731bool ScalarEvolution::isImpliedViaOperations(CmpPredicate Pred,
const SCEV *
LHS,
12733 const SCEV *FoundLHS,
12734 const SCEV *FoundRHS,
12738 "LHS and RHS have different sizes?");
12741 "FoundLHS and FoundRHS have different sizes?");
12775 auto GetOpFromSExt = [&](
const SCEV *S) {
12777 return Ext->getOperand();
12784 auto *OrigLHS =
LHS;
12785 auto *OrigFoundLHS = FoundLHS;
12786 LHS = GetOpFromSExt(
LHS);
12787 FoundLHS = GetOpFromSExt(FoundLHS);
12790 auto IsSGTViaContext = [&](
const SCEV *
S1,
const SCEV *S2) {
12793 FoundRHS,
Depth + 1);
12806 if (!LHSAddExpr->hasNoSignedWrap())
12809 auto *LL = LHSAddExpr->getOperand(0);
12810 auto *LR = LHSAddExpr->getOperand(1);
12814 auto IsSumGreaterThanRHS = [&](
const SCEV *
S1,
const SCEV *S2) {
12815 return IsSGTViaContext(
S1, MinusOne) && IsSGTViaContext(S2,
RHS);
12820 if (IsSumGreaterThanRHS(LL, LR) || IsSumGreaterThanRHS(LR, LL))
12826 using namespace llvm::PatternMatch;
12845 if (!Numerator || Numerator->getType() != FoundLHS->
getType())
12853 auto *DTy = Denominator->getType();
12854 auto *FRHSTy = FoundRHS->
getType();
12855 if (DTy->isPointerTy() != FRHSTy->isPointerTy())
12874 IsSGTViaContext(FoundRHSExt, DenomMinusTwo))
12885 auto *NegDenomMinusOne =
getMinusSCEV(MinusOne, DenominatorExt);
12887 IsSGTViaContext(FoundRHSExt, NegDenomMinusOne))
12895 if (isImpliedViaMerge(Pred, OrigLHS,
RHS, OrigFoundLHS, FoundRHS,
Depth + 1))
12928bool ScalarEvolution::isKnownViaNonRecursiveReasoning(CmpPredicate Pred,
12932 isKnownPredicateViaConstantRanges(Pred,
LHS,
RHS) ||
12935 isKnownPredicateViaNoOverflow(Pred,
LHS,
RHS);
12938bool ScalarEvolution::isImpliedCondOperandsHelper(CmpPredicate Pred,
12941 const SCEV *FoundLHS,
12942 const SCEV *FoundRHS) {
12978 if (isImpliedViaOperations(Pred,
LHS,
RHS, FoundLHS, FoundRHS))
12984bool ScalarEvolution::isImpliedCondOperandsViaRanges(
12985 CmpPredicate Pred,
const SCEV *
LHS,
const SCEV *
RHS, CmpPredicate FoundPred,
12986 const SCEV *FoundLHS,
const SCEV *FoundRHS) {
13000 ConstantRange FoundLHSRange =
13004 ConstantRange LHSRange = FoundLHSRange.
add(ConstantRange(*Addend));
13011 return LHSRange.
icmp(Pred, ConstRHS);
13014bool ScalarEvolution::canIVOverflowOnLT(
const SCEV *
RHS,
const SCEV *Stride,
13027 return (std::move(MaxValue) - MaxStrideMinusOne).slt(MaxRHS);
13035 return (std::move(MaxValue) - MaxStrideMinusOne).ult(MaxRHS);
13038bool ScalarEvolution::canIVOverflowOnGT(
const SCEV *
RHS,
const SCEV *Stride,
13050 return (std::move(MinValue) + MaxStrideMinusOne).sgt(MinRHS);
13058 return (std::move(MinValue) + MaxStrideMinusOne).ugt(MinRHS);
13070const SCEV *ScalarEvolution::computeMaxBECountForLT(
const SCEV *Start,
13071 const SCEV *Stride,
13102 APInt Limit = MaxValue - (StrideForMaxBECount - 1);
13113 :
APIntOps::umax(MaxEnd, MinStart);
13120ScalarEvolution::howManyLessThans(
const SCEV *
LHS,
const SCEV *
RHS,
13121 const Loop *L,
bool IsSigned,
13122 bool ControlsOnlyExit,
bool AllowPredicates) {
13126 bool PredicatedIV =
false;
13131 auto canProveNUW = [&]() {
13134 if (!ControlsOnlyExit)
13155 Limit = Limit.
zext(OuterBitWidth);
13167 Type *Ty = ZExt->getType();
13178 if (!
IV && AllowPredicates) {
13183 PredicatedIV =
true;
13187 if (!
IV ||
IV->getLoop() != L || !
IV->isAffine())
13201 bool NoWrap = ControlsOnlyExit &&
IV->getNoWrapFlags(WrapType);
13204 const SCEV *Stride =
IV->getStepRecurrence(*
this);
13209 if (!PositiveStride) {
13261 auto wouldZeroStrideBeUB = [&]() {
13273 if (!wouldZeroStrideBeUB()) {
13277 }
else if (!NoWrap) {
13280 if (canIVOverflowOnLT(
RHS, Stride, IsSigned))
13293 const SCEV *
Start =
IV->getStart();
13299 const SCEV *OrigStart =
Start;
13300 const SCEV *OrigRHS =
RHS;
13301 if (
Start->getType()->isPointerTy()) {
13312 const SCEV *End =
nullptr, *BECount =
nullptr,
13313 *BECountIfBackedgeTaken =
nullptr;
13316 if (PositiveStride && RHSAddRec !=
nullptr && RHSAddRec->getLoop() == L &&
13317 RHSAddRec->getNoWrapFlags()) {
13330 const SCEV *RHSStart = RHSAddRec->getStart();
13331 const SCEV *RHSStride = RHSAddRec->getStepRecurrence(*
this);
13343 const SCEV *Denominator =
getMinusSCEV(Stride, RHSStride);
13352 BECountIfBackedgeTaken =
13357 if (BECount ==
nullptr) {
13362 const SCEV *MaxBECount = computeMaxBECountForLT(
13365 MaxBECount,
false , Predicates);
13372 auto *OrigStartMinusStride =
getMinusSCEV(OrigStart, Stride);
13399 const SCEV *Numerator =
13405 auto canProveRHSGreaterThanEqualStart = [&]() {
13424 auto *StartMinusOne =
13431 if (canProveRHSGreaterThanEqualStart()) {
13446 BECountIfBackedgeTaken =
13462 bool MayAddOverflow = [&] {
13508 if (Start == Stride || Start ==
getMinusSCEV(Stride, One)) {
13522 if (!MayAddOverflow) {
13534 const SCEV *ConstantMaxBECount;
13535 bool MaxOrZero =
false;
13537 ConstantMaxBECount = BECount;
13538 }
else if (BECountIfBackedgeTaken &&
13543 ConstantMaxBECount = BECountIfBackedgeTaken;
13546 ConstantMaxBECount = computeMaxBECountForLT(
13554 const SCEV *SymbolicMaxBECount =
13556 return ExitLimit(BECount, ConstantMaxBECount, SymbolicMaxBECount, MaxOrZero,
13560ScalarEvolution::ExitLimit ScalarEvolution::howManyGreaterThans(
13561 const SCEV *
LHS,
const SCEV *
RHS,
const Loop *L,
bool IsSigned,
13562 bool ControlsOnlyExit,
bool AllowPredicates) {
13569 if (!
IV && AllowPredicates)
13576 if (!
IV ||
IV->getLoop() != L || !
IV->isAffine())
13580 bool NoWrap = ControlsOnlyExit &&
IV->getNoWrapFlags(WrapType);
13593 if (!Stride->
isOne() && !NoWrap)
13594 if (canIVOverflowOnGT(
RHS, Stride, IsSigned))
13597 const SCEV *
Start =
IV->getStart();
13598 const SCEV *End =
RHS;
13609 if (
Start->getType()->isPointerTy()) {
13644 const SCEV *ConstantMaxBECount =
13651 ConstantMaxBECount = BECount;
13652 const SCEV *SymbolicMaxBECount =
13655 return ExitLimit(BECount, ConstantMaxBECount, SymbolicMaxBECount,
false,
13661 if (
Range.isFullSet())
13666 if (!SC->getValue()->isZero()) {
13672 return ShiftedAddRec->getNumIterationsInRange(
13673 Range.subtract(SC->getAPInt()), SE);
13704 APInt ExitVal = (End +
A).udiv(
A);
13717 ConstantInt::get(SE.
getContext(), ExitVal - 1), SE)->getValue()) &&
13718 "Linear scev computation is off in a bad way!");
13749 assert(!
Last->isZero() &&
"Recurrency with zero step?");
13774 Ty = Store->getValueOperand()->getType();
13776 Ty = Load->getType();
13789 assert(SE &&
"SCEVCallbackVH called with a null ScalarEvolution!");
13791 SE->ConstantEvolutionLoopExitValue.erase(PN);
13792 SE->eraseValueFromMap(getValPtr());
13796void ScalarEvolution::SCEVCallbackVH::allUsesReplacedWith(
Value *V) {
13797 assert(SE &&
"SCEVCallbackVH called with a null ScalarEvolution!");
13807 : CallbackVH(
V), SE(se) {}
13816 : F(F), DL(F.
getDataLayout()), TLI(TLI), AC(AC), DT(DT), LI(LI),
13818 LoopDispositions(64), BlockDispositions(64) {
13830 F.getParent(), Intrinsic::experimental_guard);
13831 HasGuards = GuardDecl && !GuardDecl->use_empty();
13835 : F(Arg.F), DL(Arg.DL), HasGuards(Arg.HasGuards), TLI(Arg.TLI), AC(Arg.AC),
13836 DT(Arg.DT), LI(Arg.LI), CouldNotCompute(
std::
move(Arg.CouldNotCompute)),
13837 ValueExprMap(
std::
move(Arg.ValueExprMap)),
13838 PendingLoopPredicates(
std::
move(Arg.PendingLoopPredicates)),
13839 PendingPhiRanges(
std::
move(Arg.PendingPhiRanges)),
13840 PendingMerges(
std::
move(Arg.PendingMerges)),
13841 ConstantMultipleCache(
std::
move(Arg.ConstantMultipleCache)),
13842 BackedgeTakenCounts(
std::
move(Arg.BackedgeTakenCounts)),
13843 PredicatedBackedgeTakenCounts(
13844 std::
move(Arg.PredicatedBackedgeTakenCounts)),
13845 BECountUsers(
std::
move(Arg.BECountUsers)),
13846 ConstantEvolutionLoopExitValue(
13847 std::
move(Arg.ConstantEvolutionLoopExitValue)),
13848 ValuesAtScopes(
std::
move(Arg.ValuesAtScopes)),
13849 ValuesAtScopesUsers(
std::
move(Arg.ValuesAtScopesUsers)),
13850 LoopDispositions(
std::
move(Arg.LoopDispositions)),
13851 LoopPropertiesCache(
std::
move(Arg.LoopPropertiesCache)),
13852 BlockDispositions(
std::
move(Arg.BlockDispositions)),
13853 SCEVUsers(
std::
move(Arg.SCEVUsers)),
13854 UnsignedRanges(
std::
move(Arg.UnsignedRanges)),
13855 SignedRanges(
std::
move(Arg.SignedRanges)),
13856 UniqueSCEVs(
std::
move(Arg.UniqueSCEVs)),
13857 UniquePreds(
std::
move(Arg.UniquePreds)),
13858 SCEVAllocator(
std::
move(Arg.SCEVAllocator)),
13859 LoopUsers(
std::
move(Arg.LoopUsers)),
13860 PredicatedSCEVRewrites(
std::
move(Arg.PredicatedSCEVRewrites)),
13861 FirstUnknown(Arg.FirstUnknown) {
13862 Arg.FirstUnknown =
nullptr;
13871 Tmp->~SCEVUnknown();
13873 FirstUnknown =
nullptr;
13875 ExprValueMap.clear();
13876 ValueExprMap.clear();
13878 BackedgeTakenCounts.clear();
13879 PredicatedBackedgeTakenCounts.clear();
13881 assert(PendingLoopPredicates.empty() &&
"isImpliedCond garbage");
13882 assert(PendingPhiRanges.empty() &&
"getRangeRef garbage");
13883 assert(PendingMerges.empty() &&
"isImpliedViaMerge garbage");
13884 assert(!WalkingBEDominatingConds &&
"isLoopBackedgeGuardedByCond garbage!");
13885 assert(!ProvingSplitPredicate &&
"ProvingSplitPredicate garbage!");
13907 L->getHeader()->printAsOperand(OS,
false);
13911 L->getExitingBlocks(ExitingBlocks);
13912 if (ExitingBlocks.
size() != 1)
13913 OS <<
"<multiple exits> ";
13917 OS <<
"backedge-taken count is ";
13920 OS <<
"Unpredictable backedge-taken count.";
13923 if (ExitingBlocks.
size() > 1)
13924 for (
BasicBlock *ExitingBlock : ExitingBlocks) {
13925 OS <<
" exit count for " << ExitingBlock->
getName() <<
": ";
13933 OS <<
"\n predicated exit count for " << ExitingBlock->
getName()
13936 OS <<
"\n Predicates:\n";
13937 for (
const auto *
P : Predicates)
13945 L->getHeader()->printAsOperand(OS,
false);
13950 OS <<
"constant max backedge-taken count is ";
13953 OS <<
", actual taken count either this or zero.";
13955 OS <<
"Unpredictable constant max backedge-taken count. ";
13960 L->getHeader()->printAsOperand(OS,
false);
13965 OS <<
"symbolic max backedge-taken count is ";
13968 OS <<
", actual taken count either this or zero.";
13970 OS <<
"Unpredictable symbolic max backedge-taken count. ";
13974 if (ExitingBlocks.
size() > 1)
13975 for (
BasicBlock *ExitingBlock : ExitingBlocks) {
13976 OS <<
" symbolic max exit count for " << ExitingBlock->
getName() <<
": ";
13986 OS <<
"\n predicated symbolic max exit count for "
13987 << ExitingBlock->
getName() <<
": ";
13989 OS <<
"\n Predicates:\n";
13990 for (
const auto *
P : Predicates)
14000 assert(!Preds.
empty() &&
"Different predicated BTC, but no predicates");
14002 L->getHeader()->printAsOperand(OS,
false);
14005 OS <<
"Predicated backedge-taken count is ";
14008 OS <<
"Unpredictable predicated backedge-taken count.";
14010 OS <<
" Predicates:\n";
14011 for (
const auto *
P : Preds)
14016 auto *PredConstantMax =
14018 if (PredConstantMax != ConstantBTC) {
14020 "different predicated constant max BTC but no predicates");
14022 L->getHeader()->printAsOperand(OS,
false);
14025 OS <<
"Predicated constant max backedge-taken count is ";
14028 OS <<
"Unpredictable predicated constant max backedge-taken count.";
14030 OS <<
" Predicates:\n";
14031 for (
const auto *
P : Preds)
14036 auto *PredSymbolicMax =
14038 if (SymbolicBTC != PredSymbolicMax) {
14040 "Different predicated symbolic max BTC, but no predicates");
14042 L->getHeader()->printAsOperand(OS,
false);
14045 OS <<
"Predicated symbolic max backedge-taken count is ";
14048 OS <<
"Unpredictable predicated symbolic max backedge-taken count.";
14050 OS <<
" Predicates:\n";
14051 for (
const auto *
P : Preds)
14057 L->getHeader()->printAsOperand(OS,
false);
14081 OS <<
"Computable";
14091 OS <<
"DoesNotDominate";
14097 OS <<
"ProperlyDominates";
14114 OS <<
"Classifying expressions for: ";
14115 F.printAsOperand(OS,
false);
14130 const Loop *L = LI.getLoopFor(
I.getParent());
14145 OS <<
"\t\t" "Exits: ";
14148 OS <<
"<<Unknown>>";
14154 for (
const auto *Iter = L; Iter; Iter = Iter->getParentLoop()) {
14156 Iter->getHeader()->printAsOperand(OS,
false);
14164 InnerL->getHeader()->printAsOperand(OS,
false);
14175 OS <<
"Determining loop execution counts for: ";
14176 F.printAsOperand(OS,
false);
14184 auto &Values = LoopDispositions[S];
14185 for (
auto &V : Values) {
14186 if (V.getPointer() == L)
14191 auto &Values2 = LoopDispositions[S];
14193 if (V.getPointer() == L) {
14202ScalarEvolution::computeLoopDisposition(
const SCEV *S,
const Loop *L) {
14221 assert(!L->contains(AR->
getLoop()) &&
"Containing loop's header does not"
14222 " dominate the contained loop's header?");
14250 bool HasVarying =
false;
14284 auto &Values = BlockDispositions[S];
14285 for (
auto &V : Values) {
14286 if (V.getPointer() == BB)
14291 auto &Values2 = BlockDispositions[S];
14293 if (V.getPointer() == BB) {
14302ScalarEvolution::computeBlockDisposition(
const SCEV *S,
const BasicBlock *BB) {
14332 bool Proper =
true;
14343 if (Instruction *
I =
14345 if (
I->getParent() == BB)
14347 if (DT.properlyDominates(
I->getParent(), BB))
14370void ScalarEvolution::forgetBackedgeTakenCounts(
const Loop *L,
14373 Predicated ? PredicatedBackedgeTakenCounts : BackedgeTakenCounts;
14374 auto It = BECounts.find(L);
14375 if (It != BECounts.end()) {
14376 for (
const ExitNotTakenInfo &ENT : It->second.ExitNotTaken) {
14377 for (
const SCEV *S : {ENT.ExactNotTaken, ENT.SymbolicMaxNotTaken}) {
14379 auto UserIt = BECountUsers.find(S);
14380 assert(UserIt != BECountUsers.end());
14385 BECounts.erase(It);
14393 while (!Worklist.
empty()) {
14395 auto Users = SCEVUsers.find(Curr);
14396 if (
Users != SCEVUsers.end())
14397 for (
const auto *User :
Users->second)
14398 if (ToForget.
insert(User).second)
14402 for (
const auto *S : ToForget)
14403 forgetMemoizedResultsImpl(S);
14405 for (
auto I = PredicatedSCEVRewrites.begin();
14406 I != PredicatedSCEVRewrites.end();) {
14407 std::pair<const SCEV *, const Loop *>
Entry =
I->first;
14408 if (ToForget.count(
Entry.first))
14409 PredicatedSCEVRewrites.erase(
I++);
14415void ScalarEvolution::forgetMemoizedResultsImpl(
const SCEV *S) {
14416 LoopDispositions.erase(S);
14417 BlockDispositions.erase(S);
14418 UnsignedRanges.erase(S);
14419 SignedRanges.erase(S);
14420 HasRecMap.erase(S);
14421 ConstantMultipleCache.erase(S);
14424 UnsignedWrapViaInductionTried.erase(AR);
14425 SignedWrapViaInductionTried.erase(AR);
14428 auto ExprIt = ExprValueMap.find(S);
14429 if (ExprIt != ExprValueMap.end()) {
14430 for (
Value *V : ExprIt->second) {
14431 auto ValueIt = ValueExprMap.find_as(V);
14432 if (ValueIt != ValueExprMap.end())
14433 ValueExprMap.erase(ValueIt);
14435 ExprValueMap.erase(ExprIt);
14438 auto ScopeIt = ValuesAtScopes.find(S);
14439 if (ScopeIt != ValuesAtScopes.end()) {
14440 for (
const auto &Pair : ScopeIt->second)
14443 std::make_pair(Pair.first, S));
14444 ValuesAtScopes.erase(ScopeIt);
14447 auto ScopeUserIt = ValuesAtScopesUsers.find(S);
14448 if (ScopeUserIt != ValuesAtScopesUsers.end()) {
14449 for (
const auto &Pair : ScopeUserIt->second)
14450 llvm::erase(ValuesAtScopes[Pair.second], std::make_pair(Pair.first, S));
14451 ValuesAtScopesUsers.erase(ScopeUserIt);
14454 auto BEUsersIt = BECountUsers.find(S);
14455 if (BEUsersIt != BECountUsers.end()) {
14457 auto Copy = BEUsersIt->second;
14458 for (
const auto &Pair : Copy)
14459 forgetBackedgeTakenCounts(Pair.getPointer(), Pair.getInt());
14460 BECountUsers.erase(BEUsersIt);
14463 auto FoldUser = FoldCacheUser.find(S);
14464 if (FoldUser != FoldCacheUser.end())
14465 for (
auto &KV : FoldUser->second)
14466 FoldCache.erase(KV);
14467 FoldCacheUser.erase(S);
14471ScalarEvolution::getUsedLoops(
const SCEV *S,
14473 struct FindUsedLoops {
14474 FindUsedLoops(SmallPtrSetImpl<const Loop *> &LoopsUsed)
14475 : LoopsUsed(LoopsUsed) {}
14476 SmallPtrSetImpl<const Loop *> &LoopsUsed;
14477 bool follow(
const SCEV *S) {
14483 bool isDone()
const {
return false; }
14486 FindUsedLoops
F(LoopsUsed);
14487 SCEVTraversal<FindUsedLoops>(F).visitAll(S);
14490void ScalarEvolution::getReachableBlocks(
14493 Worklist.
push_back(&F.getEntryBlock());
14494 while (!Worklist.
empty()) {
14496 if (!Reachable.
insert(BB).second)
14504 Worklist.
push_back(
C->isOne() ? TrueBB : FalseBB);
14511 if (isKnownPredicateViaConstantRanges(
Cmp->getCmpPredicate(), L, R)) {
14515 if (isKnownPredicateViaConstantRanges(
Cmp->getInverseCmpPredicate(), L,
14550 SCEVMapper SCM(SE2);
14552 SE2.getReachableBlocks(ReachableBlocks, F);
14554 auto GetDelta = [&](
const SCEV *Old,
const SCEV *New) ->
const SCEV * {
14572 while (!LoopStack.
empty()) {
14578 if (!ReachableBlocks.
contains(L->getHeader()))
14583 auto It = BackedgeTakenCounts.find(L);
14584 if (It == BackedgeTakenCounts.end())
14588 SCM.visit(It->second.getExact(L,
const_cast<ScalarEvolution *
>(
this)));
14608 const SCEV *Delta = GetDelta(CurBECount, NewBECount);
14609 if (Delta && !Delta->
isZero()) {
14610 dbgs() <<
"Trip Count for " << *L <<
" Changed!\n";
14611 dbgs() <<
"Old: " << *CurBECount <<
"\n";
14612 dbgs() <<
"New: " << *NewBECount <<
"\n";
14613 dbgs() <<
"Delta: " << *Delta <<
"\n";
14621 while (!Worklist.
empty()) {
14623 if (ValidLoops.
insert(L).second)
14624 Worklist.
append(L->begin(), L->end());
14626 for (
const auto &KV : ValueExprMap) {
14631 "AddRec references invalid loop");
14636 auto It = ExprValueMap.find(KV.second);
14637 if (It == ExprValueMap.end() || !It->second.contains(KV.first)) {
14638 dbgs() <<
"Value " << *KV.first
14639 <<
" is in ValueExprMap but not in ExprValueMap\n";
14644 if (!ReachableBlocks.
contains(
I->getParent()))
14646 const SCEV *OldSCEV = SCM.visit(KV.second);
14648 const SCEV *Delta = GetDelta(OldSCEV, NewSCEV);
14649 if (Delta && !Delta->
isZero()) {
14650 dbgs() <<
"SCEV for value " << *
I <<
" changed!\n"
14651 <<
"Old: " << *OldSCEV <<
"\n"
14652 <<
"New: " << *NewSCEV <<
"\n"
14653 <<
"Delta: " << *Delta <<
"\n";
14659 for (
const auto &KV : ExprValueMap) {
14660 for (
Value *V : KV.second) {
14661 const SCEV *S = ValueExprMap.lookup(V);
14663 dbgs() <<
"Value " << *V
14664 <<
" is in ExprValueMap but not in ValueExprMap\n";
14667 if (S != KV.first) {
14668 dbgs() <<
"Value " << *V <<
" mapped to " << *S <<
" rather than "
14669 << *KV.first <<
"\n";
14676 for (
const auto &S : UniqueSCEVs) {
14681 auto It = SCEVUsers.find(
Op);
14682 if (It != SCEVUsers.end() && It->second.count(&S))
14684 dbgs() <<
"Use of operand " << *
Op <<
" by user " << S
14685 <<
" is not being tracked!\n";
14691 for (
const auto &ValueAndVec : ValuesAtScopes) {
14693 for (
const auto &LoopAndValueAtScope : ValueAndVec.second) {
14694 const Loop *L = LoopAndValueAtScope.first;
14695 const SCEV *ValueAtScope = LoopAndValueAtScope.second;
14697 auto It = ValuesAtScopesUsers.find(ValueAtScope);
14698 if (It != ValuesAtScopesUsers.end() &&
14701 dbgs() <<
"Value: " << *
Value <<
", Loop: " << *L <<
", ValueAtScope: "
14702 << *ValueAtScope <<
" missing in ValuesAtScopesUsers\n";
14708 for (
const auto &ValueAtScopeAndVec : ValuesAtScopesUsers) {
14709 const SCEV *ValueAtScope = ValueAtScopeAndVec.first;
14710 for (
const auto &LoopAndValue : ValueAtScopeAndVec.second) {
14711 const Loop *L = LoopAndValue.first;
14712 const SCEV *
Value = LoopAndValue.second;
14714 auto It = ValuesAtScopes.find(
Value);
14715 if (It != ValuesAtScopes.end() &&
14716 is_contained(It->second, std::make_pair(L, ValueAtScope)))
14718 dbgs() <<
"Value: " << *
Value <<
", Loop: " << *L <<
", ValueAtScope: "
14719 << *ValueAtScope <<
" missing in ValuesAtScopes\n";
14725 auto VerifyBECountUsers = [&](
bool Predicated) {
14727 Predicated ? PredicatedBackedgeTakenCounts : BackedgeTakenCounts;
14728 for (
const auto &LoopAndBEInfo : BECounts) {
14729 for (
const ExitNotTakenInfo &ENT : LoopAndBEInfo.second.ExitNotTaken) {
14730 for (
const SCEV *S : {ENT.ExactNotTaken, ENT.SymbolicMaxNotTaken}) {
14732 auto UserIt = BECountUsers.find(S);
14733 if (UserIt != BECountUsers.end() &&
14734 UserIt->second.contains({ LoopAndBEInfo.first, Predicated }))
14736 dbgs() <<
"Value " << *S <<
" for loop " << *LoopAndBEInfo.first
14737 <<
" missing from BECountUsers\n";
14744 VerifyBECountUsers(
false);
14745 VerifyBECountUsers(
true);
14748 for (
auto &[S, Values] : LoopDispositions) {
14749 for (
auto [
Loop, CachedDisposition] : Values) {
14751 if (CachedDisposition != RecomputedDisposition) {
14752 dbgs() <<
"Cached disposition of " << *S <<
" for loop " << *
Loop
14753 <<
" is incorrect: cached " << CachedDisposition <<
", actual "
14754 << RecomputedDisposition <<
"\n";
14761 for (
auto &[S, Values] : BlockDispositions) {
14762 for (
auto [BB, CachedDisposition] : Values) {
14764 if (CachedDisposition != RecomputedDisposition) {
14765 dbgs() <<
"Cached disposition of " << *S <<
" for block %"
14766 << BB->
getName() <<
" is incorrect: cached " << CachedDisposition
14767 <<
", actual " << RecomputedDisposition <<
"\n";
14774 for (
auto [
FoldID, Expr] : FoldCache) {
14775 auto I = FoldCacheUser.find(Expr);
14776 if (
I == FoldCacheUser.end()) {
14777 dbgs() <<
"Missing entry in FoldCacheUser for cached expression " << *Expr
14782 dbgs() <<
"Missing FoldID in cached users of " << *Expr <<
"!\n";
14786 for (
auto [Expr, IDs] : FoldCacheUser) {
14787 for (
auto &
FoldID : IDs) {
14790 dbgs() <<
"Missing entry in FoldCache for expression " << *Expr
14795 dbgs() <<
"Entry in FoldCache doesn't match FoldCacheUser: " << *S
14796 <<
" != " << *Expr <<
"!\n";
14807 for (
auto [S, Multiple] : ConstantMultipleCache) {
14809 if ((Multiple != 0 && RecomputedMultiple != 0 &&
14810 Multiple.
urem(RecomputedMultiple) != 0 &&
14811 RecomputedMultiple.
urem(Multiple) != 0)) {
14812 dbgs() <<
"Incorrect cached computation in ConstantMultipleCache for "
14813 << *S <<
" : Computed " << RecomputedMultiple
14814 <<
" but cache contains " << Multiple <<
"!\n";
14822 FunctionAnalysisManager::Invalidator &Inv) {
14854 OS <<
"Printing analysis 'Scalar Evolution Analysis' for function '"
14855 <<
F.getName() <<
"':\n";
14861 "Scalar Evolution Analysis",
false,
true)
14910 const SCEV *LHS,
const SCEV *RHS) {
14912 assert(LHS->getType() == RHS->getType() &&
14913 "Type mismatch between LHS and RHS");
14916 ID.AddInteger(Pred);
14917 ID.AddPointer(LHS);
14918 ID.AddPointer(RHS);
14919 void *IP =
nullptr;
14920 if (
const auto *S = UniquePreds.FindNodeOrInsertPos(
ID, IP))
14924 UniquePreds.InsertNode(Eq, IP);
14935 ID.AddInteger(AddedFlags);
14936 void *IP =
nullptr;
14937 if (
const auto *S = UniquePreds.FindNodeOrInsertPos(
ID, IP))
14939 auto *OF =
new (SCEVAllocator)
14941 UniquePreds.InsertNode(OF, IP);
14961 SCEVPredicateRewriter
Rewriter(L, SE, NewPreds, Pred);
14962 return Rewriter.visit(S);
14968 for (
const auto *Pred : U->getPredicates())
14970 if (IPred->getLHS() == Expr &&
14972 return IPred->getRHS();
14974 if (IPred->getLHS() == Expr &&
14975 IPred->getPredicate() == ICmpInst::ICMP_EQ)
14976 return IPred->getRHS();
14979 return convertToAddRecWithPreds(Expr);
14982 const SCEV *visitZeroExtendExpr(
const SCEVZeroExtendExpr *Expr) {
14998 const SCEV *visitSignExtendExpr(
const SCEVSignExtendExpr *Expr) {
15015 explicit SCEVPredicateRewriter(
15016 const Loop *L, ScalarEvolution &SE,
15017 SmallVectorImpl<const SCEVPredicate *> *NewPreds,
15018 const SCEVPredicate *Pred)
15019 : SCEVRewriteVisitor(SE), NewPreds(NewPreds), Pred(Pred),
L(
L) {}
15021 bool addOverflowAssumption(
const SCEVPredicate *
P) {
15024 return Pred && Pred->
implies(
P, SE);
15030 bool addOverflowAssumption(
const SCEVAddRecExpr *AR,
15033 return addOverflowAssumption(
A);
15042 const SCEV *convertToAddRecWithPreds(
const SCEVUnknown *Expr) {
15046 std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>>
15048 if (!PredicatedRewrite)
15050 for (
const auto *
P : PredicatedRewrite->second){
15053 if (L != WP->getExpr()->getLoop())
15056 if (!addOverflowAssumption(
P))
15059 return PredicatedRewrite->first;
15062 SmallVectorImpl<const SCEVPredicate *> *NewPreds;
15063 const SCEVPredicate *Pred;
15072 return SCEVPredicateRewriter::rewrite(S, L, *
this,
nullptr, &Preds);
15079 S = SCEVPredicateRewriter::rewrite(S, L, *
this, &TransformPreds,
nullptr);
15099 if (!Step->
isOne())
15124 assert(LHS->getType() == RHS->getType() &&
"LHS and RHS types don't match");
15125 assert(LHS != RHS &&
"LHS and RHS are the same SCEV");
15138 return Op->LHS == LHS &&
Op->RHS == RHS;
15145 OS.
indent(
Depth) <<
"Equal predicate: " << *LHS <<
" == " << *RHS <<
"\n";
15147 OS.
indent(
Depth) <<
"Compare predicate: " << *LHS <<
" " << Pred <<
") "
15172 const SCEV *Start = AR->getStart();
15173 const SCEV *OpStart =
Op->AR->getStart();
15178 if (Start->getType()->isPointerTy() && Start->getType() != OpStart->
getType())
15181 const SCEV *Step = AR->getStepRecurrence(SE);
15182 const SCEV *OpStep =
Op->AR->getStepRecurrence(SE);
15235 if (Step->getValue()->getValue().isNonNegative())
15239 return ImpliedFlags;
15246 for (
const auto *
P : Preds)
15259 return this->implies(I, SE);
15267 for (
const auto *Pred : Preds)
15268 Pred->print(OS,
Depth);
15273 for (
const auto *Pred : Set->Preds)
15281 bool CheckImplies = Preds.
size() < 16;
15284 if (CheckImplies &&
implies(
N, SE))
15290 for (
auto *
P : Preds) {
15291 if (CheckImplies &&
N->implies(
P, SE))
15295 Preds = std::move(PrunedPreds);
15296 Preds.push_back(
N);
15303 Preds = std::make_unique<SCEVUnionPredicate>(
Empty, SE);
15308 for (
const auto *
Op :
Ops)
15313 SCEVUsers[
Op].insert(
User);
15317 const SCEV *Expr = SE.getSCEV(V);
15322 RewriteEntry &Entry = RewriteMap[Expr];
15325 if (Entry.second && Generation == Entry.first)
15326 return Entry.second;
15331 Expr = Entry.second;
15333 const SCEV *NewSCEV = SE.rewriteUsingPredicate(Expr, &L, *Preds);
15334 Entry = {Generation, NewSCEV};
15340 if (!BackedgeCount) {
15342 BackedgeCount = SE.getPredicatedBackedgeTakenCount(&L, Preds);
15343 for (
const auto *
P : Preds)
15346 return BackedgeCount;
15350 if (!SymbolicMaxBackedgeCount) {
15352 SymbolicMaxBackedgeCount =
15353 SE.getPredicatedSymbolicMaxBackedgeTakenCount(&L, Preds);
15354 for (
const auto *
P : Preds)
15357 return SymbolicMaxBackedgeCount;
15361 if (!SmallConstantMaxTripCount) {
15363 SmallConstantMaxTripCount = SE.getSmallConstantMaxTripCount(&L, &Preds);
15364 for (
const auto *
P : Preds)
15367 return *SmallConstantMaxTripCount;
15371 if (Preds->implies(&Pred, SE))
15376 Preds = std::make_unique<SCEVUnionPredicate>(NewPreds, SE);
15377 updateGeneration();
15384void PredicatedScalarEvolution::updateGeneration() {
15386 if (++Generation == 0) {
15387 for (
auto &
II : RewriteMap) {
15388 const SCEV *Rewritten =
II.second.second;
15405 auto II = FlagsMap.insert({V, Flags});
15418 auto II = FlagsMap.find(V);
15420 if (
II != FlagsMap.end())
15429 auto *New = SE.convertSCEVToAddRecWithPredicates(Expr, &L, NewPreds);
15434 for (
const auto *
P : NewPreds)
15437 RewriteMap[SE.getSCEV(V)] = {Generation, New};
15443 : RewriteMap(
Init.RewriteMap), SE(
Init.SE), L(
Init.L),
15446 Generation(
Init.Generation), BackedgeCount(
Init.BackedgeCount) {
15447 for (
auto I :
Init.FlagsMap)
15448 FlagsMap.insert(
I);
15453 for (
auto *BB : L.getBlocks())
15454 for (
auto &
I : *BB) {
15455 if (!SE.isSCEVable(
I.getType()))
15458 auto *Expr = SE.getSCEV(&
I);
15459 auto II = RewriteMap.find(Expr);
15461 if (
II == RewriteMap.end())
15465 if (
II->second.second == Expr)
15470 OS.
indent(
Depth + 2) <<
"--> " << *
II->second.second <<
"\n";
15478 LoopGuards Guards(SE);
15486void ScalarEvolution::LoopGuards::collectFromPHI(
15494 using MinMaxPattern = std::pair<const SCEVConstant *, SCEVTypes>;
15495 auto GetMinMaxConst = [&](
unsigned IncomingIdx) -> MinMaxPattern {
15509 auto &RewriteMap =
G->second.RewriteMap;
15510 if (RewriteMap.empty())
15512 auto S = RewriteMap.find(SE.
getSCEV(
Phi.getIncomingValue(IncomingIdx)));
15513 if (S == RewriteMap.end())
15519 return {C0, SM->getSCEVType()};
15522 auto MergeMinMaxConst = [](MinMaxPattern
P1,
15523 MinMaxPattern P2) -> MinMaxPattern {
15524 auto [C1,
T1] =
P1;
15525 auto [C2, T2] = P2;
15526 if (!C1 || !C2 ||
T1 != T2)
15530 return {C1->getAPInt().
ult(C2->getAPInt()) ? C1 : C2,
T1};
15532 return {C1->getAPInt().
slt(C2->getAPInt()) ? C1 : C2,
T1};
15534 return {C1->getAPInt().
ugt(C2->getAPInt()) ? C1 : C2,
T1};
15536 return {C1->getAPInt().
sgt(C2->getAPInt()) ? C1 : C2,
T1};
15541 auto P = GetMinMaxConst(0);
15542 for (
unsigned int In = 1;
In <
Phi.getNumIncomingValues();
In++) {
15545 P = MergeMinMaxConst(
P, GetMinMaxConst(In));
15548 const SCEV *
LHS = SE.
getSCEV(
const_cast<PHINode *
>(&Phi));
15551 Guards.RewriteMap.insert({
LHS,
RHS});
15559 const APInt &DivisorVal,
15561 const APInt *ExprVal;
15574 const APInt &DivisorVal,
15576 const APInt *ExprVal;
15584 return SE.
getConstant(*ExprVal + DivisorVal - Rem);
15598 const SCEV *URemRHS =
nullptr;
15602 const SCEV *Multiple =
15604 DivInfo[URemLHS] = Multiple;
15606 Multiples[URemLHS] =
C->getAPInt();
15626 auto IsMinMaxSCEVWithNonNegativeConstant =
15630 if (
MinMax->getNumOperands() != 2)
15633 if (
C->getAPInt().isNegative())
15635 SCTy =
MinMax->getSCEVType();
15644 const SCEV *MinMaxLHS =
nullptr, *MinMaxRHS =
nullptr;
15646 if (!IsMinMaxSCEVWithNonNegativeConstant(MinMaxExpr, SCTy, MinMaxLHS,
15651 auto *DivisibleExpr =
15659void ScalarEvolution::LoopGuards::collectFromBlock(
15661 const BasicBlock *
Block,
const BasicBlock *Pred,
15669 DenseMap<const SCEV *, const SCEV *> &RewriteMap,
15680 &ExprsToRewrite]() {
15681 const SCEVConstant *C1;
15694 if (ExactRegion.isWrappedSet() || ExactRegion.isFullSet())
15696 auto [
I,
Inserted] = RewriteMap.try_emplace(LHSUnknown);
15697 const SCEV *RewrittenLHS =
Inserted ? LHSUnknown :
I->second;
15705 if (MatchRangeCheckIdiom())
15722 auto AddRewrite = [&](
const SCEV *From,
const SCEV *FromRewritten,
15724 if (From == FromRewritten)
15726 RewriteMap[From] = To;
15732 auto GetMaybeRewritten = [&](
const SCEV *S) {
15733 return RewriteMap.lookup_or(S, S);
15736 const SCEV *RewrittenLHS = GetMaybeRewritten(
LHS);
15738 const APInt &DividesBy =
15753 switch (Predicate) {
15782 SmallPtrSet<const SCEV *, 16> Visited;
15784 auto EnqueueOperands = [&Worklist](
const SCEVNAryExpr *S) {
15788 while (!Worklist.
empty()) {
15792 if (!Visited.
insert(From).second)
15794 const SCEV *FromRewritten = GetMaybeRewritten(From);
15795 const SCEV *To =
nullptr;
15797 switch (Predicate) {
15802 EnqueueOperands(
UMax);
15808 EnqueueOperands(
SMax);
15814 EnqueueOperands(
UMin);
15820 EnqueueOperands(
SMin);
15828 const SCEV *OneAlignedUp =
15830 To = SE.
getUMaxExpr(FromRewritten, OneAlignedUp);
15842 const SCEVConstant *
C;
15851 Guards.NotEqual.insert({
LHS,
RHS});
15860 AddRewrite(From, FromRewritten, To);
15877 SE.F.
getParent(), Intrinsic::experimental_guard);
15879 for (
const auto *GU : GuardDecl->users())
15881 if (Guard->getFunction() ==
Block->getParent() &&
15890 unsigned NumCollectedConditions = 0;
15892 std::pair<const BasicBlock *, const BasicBlock *> Pair(Pred,
Block);
15894 Pair = SE.getPredecessorWithUniqueSuccessorForBB(Pair.first)) {
15896 const BranchInst *LoopEntryPredicate =
15903 NumCollectedConditions++;
15907 if (
Depth > 0 && NumCollectedConditions == 2)
15915 if (Pair.second->hasNPredecessorsOrMore(2) &&
15917 SmallDenseMap<const BasicBlock *, LoopGuards> IncomingGuards;
15918 for (
auto &Phi : Pair.second->phis())
15929 for (
auto [Term, EnterIfTrue] :
reverse(Terms)) {
15930 SmallVector<Value *, 8> Worklist;
15931 SmallPtrSet<Value *, 8> Visited;
15933 while (!Worklist.
empty()) {
15940 EnterIfTrue ?
Cmp->getPredicate() :
Cmp->getInversePredicate();
15964 DenseMap<const SCEV *, APInt> Multiples;
15966 for (
const auto &[Predicate,
LHS,
RHS] : GuardsToProcess) {
15973 for (
const auto &[Predicate,
LHS,
RHS] : GuardsToProcess)
15974 CollectCondition(Predicate,
LHS,
RHS, Guards.RewriteMap, DivGuards);
15978 for (
const auto &[K, Divisor] : Multiples) {
15979 const SCEV *DivisorSCEV = SE.
getConstant(Divisor);
15980 Guards.RewriteMap[
K] =
15982 Guards.
rewrite(K), Divisor, SE),
15991 Guards.PreserveNUW =
true;
15992 Guards.PreserveNSW =
true;
15993 for (
const SCEV *Expr : ExprsToRewrite) {
15994 const SCEV *RewriteTo = Guards.RewriteMap[Expr];
15995 Guards.PreserveNUW &=
15997 Guards.PreserveNSW &=
16004 if (ExprsToRewrite.size() > 1) {
16005 for (
const SCEV *Expr : ExprsToRewrite) {
16006 const SCEV *RewriteTo = Guards.RewriteMap[Expr];
16007 Guards.RewriteMap.erase(Expr);
16008 Guards.RewriteMap.insert({Expr, Guards.
rewrite(RewriteTo)});
16017 class SCEVLoopGuardRewriter
16028 NotEqual(Guards.NotEqual) {
16029 if (Guards.PreserveNUW)
16031 if (Guards.PreserveNSW)
16038 return Map.lookup_or(Expr, Expr);
16042 if (
const SCEV *S = Map.lookup(Expr))
16049 unsigned Bitwidth = Ty->getScalarSizeInBits() / 2;
16050 while (Bitwidth % 8 == 0 && Bitwidth >= 8 &&
16051 Bitwidth >
Op->getType()->getScalarSizeInBits()) {
16053 auto *NarrowExt = SE.getZeroExtendExpr(
Op, NarrowTy);
16054 if (
const SCEV *S = Map.lookup(NarrowExt))
16055 return SE.getZeroExtendExpr(S, Ty);
16056 Bitwidth = Bitwidth / 2;
16064 if (
const SCEV *S = Map.lookup(Expr))
16071 if (
const SCEV *S = Map.lookup(Expr))
16077 if (
const SCEV *S = Map.lookup(Expr))
16085 auto RewriteSubtraction = [&](
const SCEV *S) ->
const SCEV * {
16086 const SCEV *LHS, *RHS;
16090 if (NotEqual.contains({LHS, RHS})) {
16092 SE.getOne(S->
getType()), SE.getConstantMultiple(S), SE);
16093 return SE.getUMaxExpr(OneAlignedUp, S);
16100 if (
const SCEV *Rewritten = RewriteSubtraction(Expr))
16111 if (
const SCEV *Rewritten = RewriteSubtraction(
Add))
16112 return SE.getAddExpr(
16115 if (
const SCEV *S = Map.lookup(
Add))
16116 return SE.getAddExpr(Expr->
getOperand(0), S);
16128 : SE.getAddExpr(Operands,
16144 : SE.getMulExpr(Operands,
16150 if (RewriteMap.empty() && NotEqual.empty())
16153 SCEVLoopGuardRewriter
Rewriter(SE, *
this);
16154 return Rewriter.visit(Expr);
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Expand Atomic instructions
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
#define LLVM_DUMP_METHOD
Mark debug helper function definitions like dump() that should not be stripped from debug builds.
This file contains the declarations for the subclasses of Constant, which represent the different fla...
SmallPtrSet< const BasicBlock *, 8 > VisitedBlocks
This file defines the DenseMap class.
This file builds on the ADT/GraphTraits.h file to build generic depth first graph iterator.
This file defines a hash set that can be used to remove duplication of nodes in a graph.
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
This defines the Use class.
iv Induction Variable Users
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
static bool isZero(Value *V, const DataLayout &DL, DominatorTree *DT, AssumptionCache *AC)
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t IntrinsicInst * II
PowerPC Reduce CR logical Operation
#define INITIALIZE_PASS_DEPENDENCY(depName)
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
const SmallVectorImpl< MachineOperand > & Cond
static bool isValid(const char C)
Returns true if C is a valid mangled character: <0-9a-zA-Z_>.
SI optimize exec mask operations pre RA
void visit(MachineFunction &MF, MachineBasicBlock &Start, std::function< void(MachineBasicBlock *)> op)
This file provides utility classes that use RAII to save and restore values.
bool SCEVMinMaxExprContains(const SCEV *Root, const SCEV *OperandToFind, SCEVTypes RootKind)
static cl::opt< unsigned > MaxAddRecSize("scalar-evolution-max-add-rec-size", cl::Hidden, cl::desc("Max coefficients in AddRec during evolving"), cl::init(8))
static cl::opt< unsigned > RangeIterThreshold("scev-range-iter-threshold", cl::Hidden, cl::desc("Threshold for switching to iteratively computing SCEV ranges"), cl::init(32))
static const Loop * isIntegerLoopHeaderPHI(const PHINode *PN, LoopInfo &LI)
static unsigned getConstantTripCount(const SCEVConstant *ExitCount)
static int CompareValueComplexity(const LoopInfo *const LI, Value *LV, Value *RV, unsigned Depth)
Compare the two values LV and RV in terms of their "complexity" where "complexity" is a partial (and ...
static const SCEV * getNextSCEVDivisibleByDivisor(const SCEV *Expr, const APInt &DivisorVal, ScalarEvolution &SE)
static void PushLoopPHIs(const Loop *L, SmallVectorImpl< Instruction * > &Worklist, SmallPtrSetImpl< Instruction * > &Visited)
Push PHI nodes in the header of the given loop onto the given Worklist.
static void insertFoldCacheEntry(const ScalarEvolution::FoldID &ID, const SCEV *S, DenseMap< ScalarEvolution::FoldID, const SCEV * > &FoldCache, DenseMap< const SCEV *, SmallVector< ScalarEvolution::FoldID, 2 > > &FoldCacheUser)
static cl::opt< bool > ClassifyExpressions("scalar-evolution-classify-expressions", cl::Hidden, cl::init(true), cl::desc("When printing analysis, include information on every instruction"))
static bool CanConstantFold(const Instruction *I)
Return true if we can constant fold an instruction of the specified type, assuming that all operands ...
static cl::opt< unsigned > AddOpsInlineThreshold("scev-addops-inline-threshold", cl::Hidden, cl::desc("Threshold for inlining addition operands into a SCEV"), cl::init(500))
static cl::opt< unsigned > MaxLoopGuardCollectionDepth("scalar-evolution-max-loop-guard-collection-depth", cl::Hidden, cl::desc("Maximum depth for recursive loop guard collection"), cl::init(1))
static cl::opt< bool > VerifyIR("scev-verify-ir", cl::Hidden, cl::desc("Verify IR correctness when making sensitive SCEV queries (slow)"), cl::init(false))
static bool BrPHIToSelect(DominatorTree &DT, BranchInst *BI, PHINode *Merge, Value *&C, Value *&LHS, Value *&RHS)
static const SCEV * getPreStartForExtend(const SCEVAddRecExpr *AR, Type *Ty, ScalarEvolution *SE, unsigned Depth)
static std::optional< APInt > MinOptional(std::optional< APInt > X, std::optional< APInt > Y)
Helper function to compare optional APInts: (a) if X and Y both exist, return min(X,...
static cl::opt< unsigned > MulOpsInlineThreshold("scev-mulops-inline-threshold", cl::Hidden, cl::desc("Threshold for inlining multiplication operands into a SCEV"), cl::init(32))
static void GroupByComplexity(SmallVectorImpl< const SCEV * > &Ops, LoopInfo *LI, DominatorTree &DT)
Given a list of SCEV objects, order them by their complexity, and group objects of the same complexit...
static const SCEV * constantFoldAndGroupOps(ScalarEvolution &SE, LoopInfo &LI, DominatorTree &DT, SmallVectorImpl< const SCEV * > &Ops, FoldT Fold, IsIdentityT IsIdentity, IsAbsorberT IsAbsorber)
Performs a number of common optimizations on the passed Ops.
static bool isDivisibilityGuard(const SCEV *LHS, const SCEV *RHS, ScalarEvolution &SE)
static std::optional< const SCEV * > createNodeForSelectViaUMinSeq(ScalarEvolution *SE, const SCEV *CondExpr, const SCEV *TrueExpr, const SCEV *FalseExpr)
static Constant * BuildConstantFromSCEV(const SCEV *V)
This builds up a Constant using the ConstantExpr interface.
static ConstantInt * EvaluateConstantChrecAtConstant(const SCEVAddRecExpr *AddRec, ConstantInt *C, ScalarEvolution &SE)
static const SCEV * BinomialCoefficient(const SCEV *It, unsigned K, ScalarEvolution &SE, Type *ResultTy)
Compute BC(It, K). The result has width W. Assume, K > 0.
static cl::opt< unsigned > MaxCastDepth("scalar-evolution-max-cast-depth", cl::Hidden, cl::desc("Maximum depth of recursive SExt/ZExt/Trunc"), cl::init(8))
static bool IsMinMaxConsistingOf(const SCEV *MaybeMinMaxExpr, const SCEV *Candidate)
Is MaybeMinMaxExpr an (U|S)(Min|Max) of Candidate and some other values?
static PHINode * getConstantEvolvingPHI(Value *V, const Loop *L)
getConstantEvolvingPHI - Given an LLVM value and a loop, return a PHI node in the loop that V is deri...
static const SCEV * SolveLinEquationWithOverflow(const APInt &A, const SCEV *B, SmallVectorImpl< const SCEVPredicate * > *Predicates, ScalarEvolution &SE, const Loop *L)
Finds the minimum unsigned root of the following equation:
static cl::opt< unsigned > MaxBruteForceIterations("scalar-evolution-max-iterations", cl::ReallyHidden, cl::desc("Maximum number of iterations SCEV will " "symbolically execute a constant " "derived loop"), cl::init(100))
static bool MatchBinarySub(const SCEV *S, const SCEV *&LHS, const SCEV *&RHS)
static uint64_t umul_ov(uint64_t i, uint64_t j, bool &Overflow)
static void PrintSCEVWithTypeHint(raw_ostream &OS, const SCEV *S)
When printing a top-level SCEV for trip counts, it's helpful to include a type for constants which ar...
static void PrintLoopInfo(raw_ostream &OS, ScalarEvolution *SE, const Loop *L)
static bool containsConstantInAddMulChain(const SCEV *StartExpr)
Determine if any of the operands in this SCEV are a constant or if any of the add or multiply express...
static const SCEV * getExtendAddRecStart(const SCEVAddRecExpr *AR, Type *Ty, ScalarEvolution *SE, unsigned Depth)
static bool hasHugeExpression(ArrayRef< const SCEV * > Ops)
Returns true if Ops contains a huge SCEV (the subtree of S contains at least HugeExprThreshold nodes)...
static cl::opt< unsigned > MaxPhiSCCAnalysisSize("scalar-evolution-max-scc-analysis-depth", cl::Hidden, cl::desc("Maximum amount of nodes to process while searching SCEVUnknown " "Phi strongly connected components"), cl::init(8))
static bool IsKnownPredicateViaAddRecStart(ScalarEvolution &SE, CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
static bool collectDivisibilityInformation(ICmpInst::Predicate Predicate, const SCEV *LHS, const SCEV *RHS, DenseMap< const SCEV *, const SCEV * > &DivInfo, DenseMap< const SCEV *, APInt > &Multiples, ScalarEvolution &SE)
static cl::opt< unsigned > MaxSCEVOperationsImplicationDepth("scalar-evolution-max-scev-operations-implication-depth", cl::Hidden, cl::desc("Maximum depth of recursive SCEV operations implication analysis"), cl::init(2))
static void PushDefUseChildren(Instruction *I, SmallVectorImpl< Instruction * > &Worklist, SmallPtrSetImpl< Instruction * > &Visited)
Push users of the given Instruction onto the given Worklist.
static std::optional< APInt > SolveQuadraticAddRecRange(const SCEVAddRecExpr *AddRec, const ConstantRange &Range, ScalarEvolution &SE)
Let c(n) be the value of the quadratic chrec {0,+,M,+,N} after n iterations.
static cl::opt< bool > UseContextForNoWrapFlagInference("scalar-evolution-use-context-for-no-wrap-flag-strenghening", cl::Hidden, cl::desc("Infer nuw/nsw flags using context where suitable"), cl::init(true))
static cl::opt< bool > EnableFiniteLoopControl("scalar-evolution-finite-loop", cl::Hidden, cl::desc("Handle <= and >= in finite loops"), cl::init(true))
static std::optional< std::tuple< APInt, APInt, APInt, APInt, unsigned > > GetQuadraticEquation(const SCEVAddRecExpr *AddRec)
For a given quadratic addrec, generate coefficients of the corresponding quadratic equation,...
static bool isKnownPredicateExtendIdiom(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
static std::optional< BinaryOp > MatchBinaryOp(Value *V, const DataLayout &DL, AssumptionCache &AC, const DominatorTree &DT, const Instruction *CxtI)
Try to map V into a BinaryOp, and return std::nullopt on failure.
static std::optional< APInt > SolveQuadraticAddRecExact(const SCEVAddRecExpr *AddRec, ScalarEvolution &SE)
Let c(n) be the value of the quadratic chrec {L,+,M,+,N} after n iterations.
static std::optional< APInt > TruncIfPossible(std::optional< APInt > X, unsigned BitWidth)
Helper function to truncate an optional APInt to a given BitWidth.
static cl::opt< unsigned > MaxSCEVCompareDepth("scalar-evolution-max-scev-compare-depth", cl::Hidden, cl::desc("Maximum depth of recursive SCEV complexity comparisons"), cl::init(32))
static APInt extractConstantWithoutWrapping(ScalarEvolution &SE, const SCEVConstant *ConstantTerm, const SCEVAddExpr *WholeAddExpr)
static cl::opt< unsigned > MaxConstantEvolvingDepth("scalar-evolution-max-constant-evolving-depth", cl::Hidden, cl::desc("Maximum depth of recursive constant evolving"), cl::init(32))
static ConstantRange getRangeForAffineARHelper(APInt Step, const ConstantRange &StartRange, const APInt &MaxBECount, bool Signed)
static std::optional< ConstantRange > GetRangeFromMetadata(Value *V)
Helper method to assign a range to V from metadata present in the IR.
static cl::opt< unsigned > HugeExprThreshold("scalar-evolution-huge-expr-threshold", cl::Hidden, cl::desc("Size of the expression which is considered huge"), cl::init(4096))
static SCEV::NoWrapFlags StrengthenNoWrapFlags(ScalarEvolution *SE, SCEVTypes Type, ArrayRef< const SCEV * > Ops, SCEV::NoWrapFlags Flags)
static Type * isSimpleCastedPHI(const SCEV *Op, const SCEVUnknown *SymbolicPHI, bool &Signed, ScalarEvolution &SE)
Helper function to createAddRecFromPHIWithCasts.
static Constant * EvaluateExpression(Value *V, const Loop *L, DenseMap< Instruction *, Constant * > &Vals, const DataLayout &DL, const TargetLibraryInfo *TLI)
EvaluateExpression - Given an expression that passes the getConstantEvolvingPHI predicate,...
static const SCEV * getPreviousSCEVDivisibleByDivisor(const SCEV *Expr, const APInt &DivisorVal, ScalarEvolution &SE)
static const SCEV * MatchNotExpr(const SCEV *Expr)
If Expr computes ~A, return A else return nullptr.
static cl::opt< unsigned > MaxValueCompareDepth("scalar-evolution-max-value-compare-depth", cl::Hidden, cl::desc("Maximum depth of recursive value complexity comparisons"), cl::init(2))
static const SCEV * applyDivisibilityOnMinMaxExpr(const SCEV *MinMaxExpr, APInt Divisor, ScalarEvolution &SE)
static cl::opt< bool, true > VerifySCEVOpt("verify-scev", cl::Hidden, cl::location(VerifySCEV), cl::desc("Verify ScalarEvolution's backedge taken counts (slow)"))
static const SCEV * getSignedOverflowLimitForStep(const SCEV *Step, ICmpInst::Predicate *Pred, ScalarEvolution *SE)
static cl::opt< unsigned > MaxArithDepth("scalar-evolution-max-arith-depth", cl::Hidden, cl::desc("Maximum depth of recursive arithmetics"), cl::init(32))
static bool HasSameValue(const SCEV *A, const SCEV *B)
SCEV structural equivalence is usually sufficient for testing whether two expressions are equal,...
static uint64_t Choose(uint64_t n, uint64_t k, bool &Overflow)
Compute the result of "n choose k", the binomial coefficient.
static std::optional< int > CompareSCEVComplexity(const LoopInfo *const LI, const SCEV *LHS, const SCEV *RHS, DominatorTree &DT, unsigned Depth=0)
static bool CollectAddOperandsWithScales(SmallDenseMap< const SCEV *, APInt, 16 > &M, SmallVectorImpl< const SCEV * > &NewOps, APInt &AccumulatedConstant, ArrayRef< const SCEV * > Ops, const APInt &Scale, ScalarEvolution &SE)
Process the given Ops list, which is a list of operands to be added under the given scale,...
static bool canConstantEvolve(Instruction *I, const Loop *L)
Determine whether this instruction can constant evolve within this loop assuming its operands can all...
static PHINode * getConstantEvolvingPHIOperands(Instruction *UseInst, const Loop *L, DenseMap< Instruction *, PHINode * > &PHIMap, unsigned Depth)
getConstantEvolvingPHIOperands - Implement getConstantEvolvingPHI by recursing through each instructi...
static bool scevUnconditionallyPropagatesPoisonFromOperands(SCEVTypes Kind)
static cl::opt< bool > VerifySCEVStrict("verify-scev-strict", cl::Hidden, cl::desc("Enable stricter verification with -verify-scev is passed"))
static Constant * getOtherIncomingValue(PHINode *PN, BasicBlock *BB)
static cl::opt< bool > UseExpensiveRangeSharpening("scalar-evolution-use-expensive-range-sharpening", cl::Hidden, cl::init(false), cl::desc("Use more powerful methods of sharpening expression ranges. May " "be costly in terms of compile time"))
static const SCEV * getUnsignedOverflowLimitForStep(const SCEV *Step, ICmpInst::Predicate *Pred, ScalarEvolution *SE)
static bool IsKnownPredicateViaMinOrMax(ScalarEvolution &SE, CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
Is LHS Pred RHS true on the virtue of LHS or RHS being a Min or Max expression?
This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...
static bool InBlock(const Value *V, const BasicBlock *BB)
Provides some synthesis utilities to produce sequences of values.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static SymbolRef::Type getType(const Symbol *Sym)
LocallyHashedType DenseMapInfo< LocallyHashedType >::Empty
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
static std::optional< bool > isImpliedCondOperands(CmpInst::Predicate Pred, const Value *ALHS, const Value *ARHS, const Value *BLHS, const Value *BRHS)
Return true if "icmp Pred BLHS BRHS" is true whenever "icmp PredALHS ARHS" is true.
Virtual Register Rewriter
static const uint32_t IV[8]
SCEVCastSinkingRewriter(ScalarEvolution &SE, Type *TargetTy, ConversionFn CreatePtrCast)
static const SCEV * rewrite(const SCEV *Scev, ScalarEvolution &SE, Type *TargetTy, ConversionFn CreatePtrCast)
const SCEV * visitUnknown(const SCEVUnknown *Expr)
const SCEV * visitMulExpr(const SCEVMulExpr *Expr)
const SCEV * visitAddExpr(const SCEVAddExpr *Expr)
const SCEV * visit(const SCEV *S)
Class for arbitrary precision integers.
LLVM_ABI APInt umul_ov(const APInt &RHS, bool &Overflow) const
LLVM_ABI APInt zext(unsigned width) const
Zero extend to a new width.
bool isMinSignedValue() const
Determine if this is the smallest signed value.
uint64_t getZExtValue() const
Get zero extended value.
void setHighBits(unsigned hiBits)
Set the top hiBits bits.
LLVM_ABI APInt getHiBits(unsigned numBits) const
Compute an APInt containing numBits highbits from this APInt.
unsigned getActiveBits() const
Compute the number of active bits in the value.
LLVM_ABI APInt trunc(unsigned width) const
Truncate to new width.
static APInt getMaxValue(unsigned numBits)
Gets maximum unsigned value of APInt for specific bit width.
APInt abs() const
Get the absolute value.
bool sgt(const APInt &RHS) const
Signed greater than comparison.
bool isAllOnes() const
Determine if all bits are set. This is true for zero-width values.
bool ugt(const APInt &RHS) const
Unsigned greater than comparison.
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
bool isSignMask() const
Check if the APInt's value is returned by getSignMask.
LLVM_ABI APInt urem(const APInt &RHS) const
Unsigned remainder operation.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool ult(const APInt &RHS) const
Unsigned less than comparison.
static APInt getSignedMaxValue(unsigned numBits)
Gets maximum signed value of APInt for a specific bit width.
static APInt getMinValue(unsigned numBits)
Gets minimum unsigned value of APInt for a specific bit width.
bool isNegative() const
Determine sign of this APInt.
bool sle(const APInt &RHS) const
Signed less or equal comparison.
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
bool isNonPositive() const
Determine if this APInt Value is non-positive (<= 0).
unsigned countTrailingZeros() const
bool isStrictlyPositive() const
Determine if this APInt Value is positive.
unsigned logBase2() const
APInt ashr(unsigned ShiftAmt) const
Arithmetic right-shift function.
LLVM_ABI APInt multiplicativeInverse() const
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
LLVM_ABI APInt sext(unsigned width) const
Sign extend to a new width.
APInt shl(unsigned shiftAmt) const
Left-shift function.
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
bool isSignBitSet() const
Determine if sign bit of this APInt is set.
bool slt(const APInt &RHS) const
Signed less than comparison.
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
bool isIntN(unsigned N) const
Check if this APInt has an N-bits unsigned integer value.
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
This templated class represents "all analyses that operate over <aparticular IR unit>" (e....
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Represent the analysis usage information of a pass.
void setPreservesAll()
Set by analyses that do not transform their input at all.
AnalysisUsage & addRequiredTransitive()
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
A function analysis which provides an AssumptionCache.
An immutable pass that tracks lazily created AssumptionCache objects.
A cache of @llvm.assume calls within a function.
MutableArrayRef< WeakVH > assumptions()
Access the list of assumption handles currently tracked for this function.
LLVM_ABI bool isSingleEdge() const
Check if this is the only edge between Start and End.
LLVM Basic Block Representation.
iterator begin()
Instruction iterator methods.
const Function * getParent() const
Return the enclosing method, or null if none.
LLVM_ABI const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
const Instruction & front() const
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
LLVM_ABI unsigned getNoWrapKind() const
Returns one of OBO::NoSignedWrap or OBO::NoUnsignedWrap.
LLVM_ABI Instruction::BinaryOps getBinaryOp() const
Returns the binary operation underlying the intrinsic.
BinaryOps getOpcode() const
Conditional or Unconditional Branch instruction.
bool isConditional() const
BasicBlock * getSuccessor(unsigned i) const
bool isUnconditional() const
Value * getCondition() const
This class represents a function call, abstracting a target machine's calling convention.
virtual void deleted()
Callback for Value destruction.
bool isFalseWhenEqual() const
This is just a convenience.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ ICMP_UGE
unsigned greater or equal
@ ICMP_UGT
unsigned greater than
@ ICMP_SGT
signed greater than
@ ICMP_ULT
unsigned less than
@ ICMP_SGE
signed greater or equal
@ ICMP_ULE
unsigned less or equal
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
bool isTrueWhenEqual() const
This is just a convenience.
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
bool isRelational() const
Return true if the predicate is relational (not EQ or NE).
An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...
static LLVM_ABI std::optional< CmpPredicate > getMatching(CmpPredicate A, CmpPredicate B)
Compares two CmpPredicates taking samesign into account and returns the canonicalized CmpPredicate if...
LLVM_ABI CmpInst::Predicate getPreferredSignedPredicate() const
Attempts to return a signed CmpInst::Predicate from the CmpPredicate.
CmpInst::Predicate dropSameSign() const
Drops samesign information.
static LLVM_ABI Constant * getNot(Constant *C)
static LLVM_ABI Constant * getPtrToInt(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static LLVM_ABI Constant * getPtrToAddr(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static Constant * getGetElementPtr(Type *Ty, Constant *C, ArrayRef< Constant * > IdxList, GEPNoWrapFlags NW=GEPNoWrapFlags::none(), std::optional< ConstantRange > InRange=std::nullopt, Type *OnlyIfReducedTy=nullptr)
Getelementptr form.
static LLVM_ABI Constant * getAdd(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
static LLVM_ABI Constant * getNeg(Constant *C, bool HasNSW=false)
static LLVM_ABI Constant * getTrunc(Constant *C, Type *Ty, bool OnlyIfReduced=false)
This is the shared class of boolean and integer constants.
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
static LLVM_ABI ConstantInt * getFalse(LLVMContext &Context)
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
const APInt & getValue() const
Return the constant as an APInt value reference.
static LLVM_ABI ConstantInt * getBool(LLVMContext &Context, bool V)
This class represents a range of values.
LLVM_ABI ConstantRange add(const ConstantRange &Other) const
Return a new range representing the possible values resulting from an addition of a value in this ran...
LLVM_ABI ConstantRange zextOrTrunc(uint32_t BitWidth) const
Make this range have the bit width given by BitWidth.
PreferredRangeType
If represented precisely, the result of some range operations may consist of multiple disjoint ranges...
LLVM_ABI bool getEquivalentICmp(CmpInst::Predicate &Pred, APInt &RHS) const
Set up Pred and RHS such that ConstantRange::makeExactICmpRegion(Pred, RHS) == *this.
const APInt & getLower() const
Return the lower value for this range.
LLVM_ABI ConstantRange urem(const ConstantRange &Other) const
Return a new range representing the possible values resulting from an unsigned remainder operation of...
LLVM_ABI bool isFullSet() const
Return true if this set contains all of the elements possible for this data-type.
LLVM_ABI bool icmp(CmpInst::Predicate Pred, const ConstantRange &Other) const
Does the predicate Pred hold between ranges this and Other?
LLVM_ABI bool isEmptySet() const
Return true if this set contains no members.
LLVM_ABI ConstantRange zeroExtend(uint32_t BitWidth) const
Return a new range in the specified integer type, which must be strictly larger than the current type...
LLVM_ABI bool isSignWrappedSet() const
Return true if this set wraps around the signed domain.
LLVM_ABI APInt getSignedMin() const
Return the smallest signed value contained in the ConstantRange.
LLVM_ABI bool isWrappedSet() const
Return true if this set wraps around the unsigned domain.
LLVM_ABI void print(raw_ostream &OS) const
Print out the bounds to a stream.
LLVM_ABI ConstantRange truncate(uint32_t BitWidth, unsigned NoWrapKind=0) const
Return a new range in the specified integer type, which must be strictly smaller than the current typ...
LLVM_ABI ConstantRange signExtend(uint32_t BitWidth) const
Return a new range in the specified integer type, which must be strictly larger than the current type...
const APInt & getUpper() const
Return the upper value for this range.
LLVM_ABI ConstantRange unionWith(const ConstantRange &CR, PreferredRangeType Type=Smallest) const
Return the range that results from the union of this range with another range.
static LLVM_ABI ConstantRange makeExactICmpRegion(CmpInst::Predicate Pred, const APInt &Other)
Produce the exact range such that all values in the returned range satisfy the given predicate with a...
LLVM_ABI bool contains(const APInt &Val) const
Return true if the specified value is in the set.
LLVM_ABI APInt getUnsignedMax() const
Return the largest unsigned value contained in the ConstantRange.
LLVM_ABI ConstantRange intersectWith(const ConstantRange &CR, PreferredRangeType Type=Smallest) const
Return the range that results from the intersection of this range with another range.
LLVM_ABI APInt getSignedMax() const
Return the largest signed value contained in the ConstantRange.
static ConstantRange getNonEmpty(APInt Lower, APInt Upper)
Create non-empty constant range with the given bounds.
static LLVM_ABI ConstantRange makeGuaranteedNoWrapRegion(Instruction::BinaryOps BinOp, const ConstantRange &Other, unsigned NoWrapKind)
Produce the largest range containing all X such that "X BinOp Y" is guaranteed not to wrap (overflow)...
LLVM_ABI unsigned getMinSignedBits() const
Compute the maximal number of bits needed to represent every value in this signed range.
uint32_t getBitWidth() const
Get the bit width of this ConstantRange.
LLVM_ABI ConstantRange sub(const ConstantRange &Other) const
Return a new range representing the possible values resulting from a subtraction of a value in this r...
LLVM_ABI ConstantRange sextOrTrunc(uint32_t BitWidth) const
Make this range have the bit width given by BitWidth.
static LLVM_ABI ConstantRange makeExactNoWrapRegion(Instruction::BinaryOps BinOp, const APInt &Other, unsigned NoWrapKind)
Produce the range that contains X if and only if "X BinOp Other" does not wrap.
This is an important base class in LLVM.
A parsed version of the target data layout string in and methods for querying it.
LLVM_ABI const StructLayout * getStructLayout(StructType *Ty) const
Returns a StructLayout object, indicating the alignment of the struct, its size, and the offsets of i...
LLVM_ABI IntegerType * getIntPtrType(LLVMContext &C, unsigned AddressSpace=0) const
Returns an integer type with size at least as big as that of a pointer in the given address space.
LLVM_ABI unsigned getIndexTypeSizeInBits(Type *Ty) const
The size in bits of the index used in GEP calculation for this type.
LLVM_ABI IntegerType * getIndexType(LLVMContext &C, unsigned AddressSpace) const
Returns the type of a GEP index in AddressSpace.
TypeSize getTypeSizeInBits(Type *Ty) const
Size examples:
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
iterator find(const_arg_type_t< KeyT > Val)
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&...Args)
DenseMapIterator< KeyT, ValueT, KeyInfoT, BucketT > iterator
iterator find_as(const LookupKeyT &Val)
Alternate version of find() which allows a different, and possibly less expensive,...
size_type count(const_arg_type_t< KeyT > Val) const
Return 1 if the specified key is in the map, 0 otherwise.
bool contains(const_arg_type_t< KeyT > Val) const
Return true if the specified key is in the map, false otherwise.
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Analysis pass which computes a DominatorTree.
Legacy analysis pass which computes a DominatorTree.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
LLVM_ABI bool isReachableFromEntry(const Use &U) const
Provide an overload for a Use.
LLVM_ABI bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
FoldingSetNodeIDRef - This class describes a reference to an interned FoldingSetNodeID,...
FoldingSetNodeID - This class is used to gather all the unique data bits of a node.
Represents flags for the getelementptr instruction/expression.
bool hasNoUnsignedSignedWrap() const
bool hasNoUnsignedWrap() const
static GEPNoWrapFlags none()
static LLVM_ABI Type * getTypeAtIndex(Type *Ty, Value *Idx)
Return the type of the element at the given index of an indexable type.
Module * getParent()
Get the module that this global value is contained inside of...
static bool isPrivateLinkage(LinkageTypes Linkage)
static bool isInternalLinkage(LinkageTypes Linkage)
This instruction compares its operands according to the predicate given to the constructor.
CmpPredicate getCmpPredicate() const
static bool isGE(Predicate P)
Return true if the predicate is SGE or UGE.
CmpPredicate getSwappedCmpPredicate() const
static LLVM_ABI bool compare(const APInt &LHS, const APInt &RHS, ICmpInst::Predicate Pred)
Return result of LHS Pred RHS comparison.
static bool isLT(Predicate P)
Return true if the predicate is SLT or ULT.
CmpPredicate getInverseCmpPredicate() const
Predicate getNonStrictCmpPredicate() const
For example, SGT -> SGE, SLT -> SLE, ULT -> ULE, UGT -> UGE.
static bool isGT(Predicate P)
Return true if the predicate is SGT or UGT.
Predicate getFlippedSignednessPredicate() const
For example, SLT->ULT, ULT->SLT, SLE->ULE, ULE->SLE, EQ->EQ.
static CmpPredicate getInverseCmpPredicate(CmpPredicate Pred)
static bool isEquality(Predicate P)
Return true if this predicate is either EQ or NE.
bool isRelational() const
Return true if the predicate is relational (not EQ or NE).
static bool isLE(Predicate P)
Return true if the predicate is SLE or ULE.
LLVM_ABI bool hasNoUnsignedWrap() const LLVM_READONLY
Determine whether the no unsigned wrap flag is set.
LLVM_ABI bool hasNoSignedWrap() const LLVM_READONLY
Determine whether the no signed wrap flag is set.
LLVM_ABI bool isIdenticalToWhenDefined(const Instruction *I, bool IntersectAttrs=false) const LLVM_READONLY
This is like isIdenticalTo, except that it ignores the SubclassOptionalData flags,...
Class to represent integer types.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
A helper class to return the specified delimiter string after the first invocation of operator String...
An instruction for reading from memory.
Analysis pass that exposes the LoopInfo for a function.
bool contains(const LoopT *L) const
Return true if the specified loop is contained within in this loop.
BlockT * getHeader() const
unsigned getLoopDepth() const
Return the nesting level of this loop.
BlockT * getLoopPredecessor() const
If the given loop's header has exactly one unique predecessor outside the loop, return it.
LoopT * getParentLoop() const
Return the parent loop if it exists or nullptr for top level loops.
unsigned getLoopDepth(const BlockT *BB) const
Return the loop nesting level of the specified block.
LoopT * getLoopFor(const BlockT *BB) const
Return the inner most loop that BB lives in.
The legacy pass manager's analysis pass to compute loop information.
Represents a single loop in the control flow graph.
bool isLoopInvariant(const Value *V) const
Return true if the specified value is loop invariant.
A Module instance is used to store all the information related to an LLVM module.
unsigned getOpcode() const
Return the opcode for this Instruction or ConstantExpr.
Utility class for integer operators which may exhibit overflow - Add, Sub, Mul, and Shl.
bool hasNoSignedWrap() const
Test whether this operation is known to never undergo signed overflow, aka the nsw property.
bool hasNoUnsignedWrap() const
Test whether this operation is known to never undergo unsigned overflow, aka the nuw property.
iterator_range< const_block_iterator > blocks() const
op_range incoming_values()
Value * getIncomingValueForBlock(const BasicBlock *BB) const
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
AnalysisType & getAnalysis() const
getAnalysis<AnalysisType>() - This function is used by subclasses to get to the analysis information ...
PointerIntPair - This class implements a pair of a pointer and small integer.
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the default address space (address sp...
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
LLVM_ABI void addPredicate(const SCEVPredicate &Pred)
Adds a new predicate.
LLVM_ABI const SCEVPredicate & getPredicate() const
LLVM_ABI const SCEV * getPredicatedSCEV(const SCEV *Expr)
Returns the rewritten SCEV for Expr in the context of the current SCEV predicate.
LLVM_ABI bool hasNoOverflow(Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags)
Returns true if we've proved that V doesn't wrap by means of a SCEV predicate.
LLVM_ABI void setNoOverflow(Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags)
Proves that V doesn't overflow by adding SCEV predicate.
LLVM_ABI void print(raw_ostream &OS, unsigned Depth) const
Print the SCEV mappings done by the Predicated Scalar Evolution.
LLVM_ABI bool areAddRecsEqualWithPreds(const SCEVAddRecExpr *AR1, const SCEVAddRecExpr *AR2) const
Check if AR1 and AR2 are equal, while taking into account Equal predicates in Preds.
LLVM_ABI PredicatedScalarEvolution(ScalarEvolution &SE, Loop &L)
LLVM_ABI const SCEVAddRecExpr * getAsAddRec(Value *V)
Attempts to produce an AddRecExpr for V by adding additional SCEV predicates.
LLVM_ABI unsigned getSmallConstantMaxTripCount()
Returns the upper bound of the loop trip count as a normal unsigned value, or 0 if the trip count is ...
LLVM_ABI const SCEV * getBackedgeTakenCount()
Get the (predicated) backedge count for the analyzed loop.
LLVM_ABI const SCEV * getSymbolicMaxBackedgeTakenCount()
Get the (predicated) symbolic max backedge count for the analyzed loop.
LLVM_ABI const SCEV * getSCEV(Value *V)
Returns the SCEV expression of V, in the context of the current SCEV predicate.
A set of analyses that are preserved following a run of a transformation pass.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
PreservedAnalysisChecker getChecker() const
Build a checker for this PreservedAnalyses and the specified analysis type.
constexpr bool isValid() const
This node represents an addition of some number of SCEVs.
This node represents a polynomial recurrence on the trip count of the specified loop.
friend class ScalarEvolution
const SCEV * getStart() const
LLVM_ABI const SCEV * evaluateAtIteration(const SCEV *It, ScalarEvolution &SE) const
Return the value of this chain of recurrences at the specified iteration number.
const SCEV * getStepRecurrence(ScalarEvolution &SE) const
Constructs and returns the recurrence indicating how much this expression steps by.
void setNoWrapFlags(NoWrapFlags Flags)
Set flags for a recurrence without clearing any previously set flags.
bool isAffine() const
Return true if this represents an expression A + B*x where A and B are loop invariant values.
bool isQuadratic() const
Return true if this represents an expression A + B*x + C*x^2 where A, B and C are loop invariant valu...
LLVM_ABI const SCEV * getNumIterationsInRange(const ConstantRange &Range, ScalarEvolution &SE) const
Return the number of iterations of this loop that produce values in the specified constant range.
LLVM_ABI const SCEVAddRecExpr * getPostIncExpr(ScalarEvolution &SE) const
Return an expression representing the value of this expression one iteration of the loop ahead.
const Loop * getLoop() const
This is the base class for unary cast operator classes.
const SCEV * getOperand() const
LLVM_ABI SCEVCastExpr(const FoldingSetNodeIDRef ID, SCEVTypes SCEVTy, const SCEV *op, Type *ty)
void setNoWrapFlags(NoWrapFlags Flags)
Set flags for a non-recurrence without clearing previously set flags.
This class represents an assumption that the expression LHS Pred RHS evaluates to true,...
SCEVComparePredicate(const FoldingSetNodeIDRef ID, const ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS)
bool isAlwaysTrue() const override
Returns true if the predicate is always true.
void print(raw_ostream &OS, unsigned Depth=0) const override
Prints a textual representation of this predicate with an indentation of Depth.
bool implies(const SCEVPredicate *N, ScalarEvolution &SE) const override
Implementation of the SCEVPredicate interface.
This class represents a constant integer value.
ConstantInt * getValue() const
const APInt & getAPInt() const
This is the base class for unary integral cast operator classes.
LLVM_ABI SCEVIntegralCastExpr(const FoldingSetNodeIDRef ID, SCEVTypes SCEVTy, const SCEV *op, Type *ty)
This node is the base class min/max selections.
static enum SCEVTypes negate(enum SCEVTypes T)
This node represents multiplication of some number of SCEVs.
This node is a base class providing common functionality for n'ary operators.
bool hasNoUnsignedWrap() const
bool hasNoSelfWrap() const
size_t getNumOperands() const
bool hasNoSignedWrap() const
NoWrapFlags getNoWrapFlags(NoWrapFlags Mask=NoWrapMask) const
const SCEV * getOperand(unsigned i) const
const SCEV *const * Operands
ArrayRef< const SCEV * > operands() const
This class represents an assumption made using SCEV expressions which can be checked at run-time.
SCEVPredicate(const SCEVPredicate &)=default
virtual bool implies(const SCEVPredicate *N, ScalarEvolution &SE) const =0
Returns true if this predicate implies N.
This class represents a cast from a pointer to a pointer-sized integer value, without capturing the p...
This class represents a cast from a pointer to a pointer-sized integer value.
This visitor recursively visits a SCEV expression and re-writes it.
const SCEV * visitSignExtendExpr(const SCEVSignExtendExpr *Expr)
const SCEV * visit(const SCEV *S)
const SCEV * visitZeroExtendExpr(const SCEVZeroExtendExpr *Expr)
const SCEV * visitSMinExpr(const SCEVSMinExpr *Expr)
SCEVRewriteVisitor(ScalarEvolution &SE)
const SCEV * visitUMinExpr(const SCEVUMinExpr *Expr)
This class represents a signed minimum selection.
This node is the base class for sequential/in-order min/max selections.
static SCEVTypes getEquivalentNonSequentialSCEVType(SCEVTypes Ty)
This class represents a sign extension of a small integer value to a larger integer value.
Visit all nodes in the expression tree using worklist traversal.
This class represents a truncation of an integer value to a smaller integer value.
This class represents a binary unsigned division operation.
const SCEV * getLHS() const
const SCEV * getRHS() const
This class represents an unsigned minimum selection.
This class represents a composition of other SCEV predicates, and is the class that most clients will...
void print(raw_ostream &OS, unsigned Depth) const override
Prints a textual representation of this predicate with an indentation of Depth.
bool implies(const SCEVPredicate *N, ScalarEvolution &SE) const override
Returns true if this predicate implies N.
SCEVUnionPredicate(ArrayRef< const SCEVPredicate * > Preds, ScalarEvolution &SE)
Union predicates don't get cached so create a dummy set ID for it.
bool isAlwaysTrue() const override
Implementation of the SCEVPredicate interface.
This means that we are dealing with an entirely unknown SCEV value, and only represent it as its LLVM...
This class represents the value of vscale, as used when defining the length of a scalable vector or r...
This class represents an assumption made on an AddRec expression.
IncrementWrapFlags
Similar to SCEV::NoWrapFlags, but with slightly different semantics for FlagNUSW.
SCEVWrapPredicate(const FoldingSetNodeIDRef ID, const SCEVAddRecExpr *AR, IncrementWrapFlags Flags)
bool implies(const SCEVPredicate *N, ScalarEvolution &SE) const override
Returns true if this predicate implies N.
static SCEVWrapPredicate::IncrementWrapFlags setFlags(SCEVWrapPredicate::IncrementWrapFlags Flags, SCEVWrapPredicate::IncrementWrapFlags OnFlags)
void print(raw_ostream &OS, unsigned Depth=0) const override
Prints a textual representation of this predicate with an indentation of Depth.
bool isAlwaysTrue() const override
Returns true if the predicate is always true.
const SCEVAddRecExpr * getExpr() const
Implementation of the SCEVPredicate interface.
static SCEVWrapPredicate::IncrementWrapFlags clearFlags(SCEVWrapPredicate::IncrementWrapFlags Flags, SCEVWrapPredicate::IncrementWrapFlags OffFlags)
Convenient IncrementWrapFlags manipulation methods.
static SCEVWrapPredicate::IncrementWrapFlags getImpliedFlags(const SCEVAddRecExpr *AR, ScalarEvolution &SE)
Returns the set of SCEVWrapPredicate no wrap flags implied by a SCEVAddRecExpr.
IncrementWrapFlags getFlags() const
Returns the set assumed no overflow flags.
This class represents a zero extension of a small integer value to a larger integer value.
This class represents an analyzed expression in the program.
LLVM_ABI ArrayRef< const SCEV * > operands() const
Return operands of this SCEV expression.
unsigned short getExpressionSize() const
LLVM_ABI bool isOne() const
Return true if the expression is a constant one.
LLVM_ABI bool isZero() const
Return true if the expression is a constant zero.
LLVM_ABI void dump() const
This method is used for debugging.
LLVM_ABI bool isAllOnesValue() const
Return true if the expression is a constant all-ones value.
LLVM_ABI bool isNonConstantNegative() const
Return true if the specified scev is negated, but not a constant.
LLVM_ABI void print(raw_ostream &OS) const
Print out the internal representation of this scalar to the specified stream.
SCEV(const FoldingSetNodeIDRef ID, SCEVTypes SCEVTy, unsigned short ExpressionSize)
SCEVTypes getSCEVType() const
LLVM_ABI Type * getType() const
Return the LLVM type of this SCEV expression.
NoWrapFlags
NoWrapFlags are bitfield indices into SubclassData.
Analysis pass that exposes the ScalarEvolution for a function.
LLVM_ABI ScalarEvolution run(Function &F, FunctionAnalysisManager &AM)
LLVM_ABI PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
LLVM_ABI PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
void print(raw_ostream &OS, const Module *=nullptr) const override
print - Print out the internal state of the pass.
bool runOnFunction(Function &F) override
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass.
void releaseMemory() override
releaseMemory() - This member can be implemented by a pass if it wants to be able to release its memo...
void verifyAnalysis() const override
verifyAnalysis() - This member can be implemented by a analysis pass to check state of analysis infor...
ScalarEvolutionWrapperPass()
static LLVM_ABI LoopGuards collect(const Loop *L, ScalarEvolution &SE)
Collect rewrite map for loop guards for loop L, together with flags indicating if NUW and NSW can be ...
LLVM_ABI const SCEV * rewrite(const SCEV *Expr) const
Try to apply the collected loop guards to Expr.
The main scalar evolution driver.
const SCEV * getConstantMaxBackedgeTakenCount(const Loop *L)
When successful, this returns a SCEVConstant that is greater than or equal to (i.e.
static bool hasFlags(SCEV::NoWrapFlags Flags, SCEV::NoWrapFlags TestFlags)
const DataLayout & getDataLayout() const
Return the DataLayout associated with the module this SCEV instance is operating on.
LLVM_ABI bool isKnownNonNegative(const SCEV *S)
Test if the given expression is known to be non-negative.
LLVM_ABI bool isKnownOnEveryIteration(CmpPredicate Pred, const SCEVAddRecExpr *LHS, const SCEV *RHS)
Test if the condition described by Pred, LHS, RHS is known to be true on every iteration of the loop ...
LLVM_ABI const SCEV * getNegativeSCEV(const SCEV *V, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap)
Return the SCEV object corresponding to -V.
LLVM_ABI std::optional< LoopInvariantPredicate > getLoopInvariantExitCondDuringFirstIterationsImpl(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS, const Loop *L, const Instruction *CtxI, const SCEV *MaxIter)
LLVM_ABI const SCEV * getSMaxExpr(const SCEV *LHS, const SCEV *RHS)
LLVM_ABI const SCEV * getUDivCeilSCEV(const SCEV *N, const SCEV *D)
Compute ceil(N / D).
LLVM_ABI std::optional< LoopInvariantPredicate > getLoopInvariantExitCondDuringFirstIterations(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS, const Loop *L, const Instruction *CtxI, const SCEV *MaxIter)
If the result of the predicate LHS Pred RHS is loop invariant with respect to L at given Context duri...
LLVM_ABI Type * getWiderType(Type *Ty1, Type *Ty2) const
LLVM_ABI const SCEV * getAbsExpr(const SCEV *Op, bool IsNSW)
LLVM_ABI bool isKnownNonPositive(const SCEV *S)
Test if the given expression is known to be non-positive.
LLVM_ABI const SCEV * getURemExpr(const SCEV *LHS, const SCEV *RHS)
Represents an unsigned remainder expression based on unsigned division.
LLVM_ABI bool isKnownNegative(const SCEV *S)
Test if the given expression is known to be negative.
LLVM_ABI const SCEV * getPredicatedConstantMaxBackedgeTakenCount(const Loop *L, SmallVectorImpl< const SCEVPredicate * > &Predicates)
Similar to getConstantMaxBackedgeTakenCount, except it will add a set of SCEV predicates to Predicate...
LLVM_ABI const SCEV * removePointerBase(const SCEV *S)
Compute an expression equivalent to S - getPointerBase(S).
LLVM_ABI bool isLoopEntryGuardedByCond(const Loop *L, CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
Test whether entry to the loop is protected by a conditional between LHS and RHS.
LLVM_ABI bool isKnownNonZero(const SCEV *S)
Test if the given expression is known to be non-zero.
LLVM_ABI const SCEV * getSCEVAtScope(const SCEV *S, const Loop *L)
Return a SCEV expression for the specified value at the specified scope in the program.
LLVM_ABI const SCEV * getSMinExpr(const SCEV *LHS, const SCEV *RHS)
LLVM_ABI const SCEV * getBackedgeTakenCount(const Loop *L, ExitCountKind Kind=Exact)
If the specified loop has a predictable backedge-taken count, return it, otherwise return a SCEVCould...
LLVM_ABI const SCEV * getUMaxExpr(const SCEV *LHS, const SCEV *RHS)
LLVM_ABI void setNoWrapFlags(SCEVAddRecExpr *AddRec, SCEV::NoWrapFlags Flags)
Update no-wrap flags of an AddRec.
LLVM_ABI const SCEV * getUMaxFromMismatchedTypes(const SCEV *LHS, const SCEV *RHS)
Promote the operands to the wider of the types using zero-extension, and then perform a umax operatio...
const SCEV * getZero(Type *Ty)
Return a SCEV for the constant 0 of a specific type.
LLVM_ABI bool willNotOverflow(Instruction::BinaryOps BinOp, bool Signed, const SCEV *LHS, const SCEV *RHS, const Instruction *CtxI=nullptr)
Is operation BinOp between LHS and RHS provably does not have a signed/unsigned overflow (Signed)?
LLVM_ABI ExitLimit computeExitLimitFromCond(const Loop *L, Value *ExitCond, bool ExitIfTrue, bool ControlsOnlyExit, bool AllowPredicates=false)
Compute the number of times the backedge of the specified loop will execute if its exit condition wer...
LLVM_ABI const SCEV * getZeroExtendExprImpl(const SCEV *Op, Type *Ty, unsigned Depth=0)
LLVM_ABI const SCEVPredicate * getEqualPredicate(const SCEV *LHS, const SCEV *RHS)
LLVM_ABI unsigned getSmallConstantTripMultiple(const Loop *L, const SCEV *ExitCount)
Returns the largest constant divisor of the trip count as a normal unsigned value,...
LLVM_ABI uint64_t getTypeSizeInBits(Type *Ty) const
Return the size in bits of the specified type, for which isSCEVable must return true.
LLVM_ABI const SCEV * getConstant(ConstantInt *V)
LLVM_ABI const SCEV * getPredicatedBackedgeTakenCount(const Loop *L, SmallVectorImpl< const SCEVPredicate * > &Predicates)
Similar to getBackedgeTakenCount, except it will add a set of SCEV predicates to Predicates that are ...
LLVM_ABI const SCEV * getSCEV(Value *V)
Return a SCEV expression for the full generality of the specified expression.
ConstantRange getSignedRange(const SCEV *S)
Determine the signed range for a particular SCEV.
LLVM_ABI const SCEV * getNoopOrSignExtend(const SCEV *V, Type *Ty)
Return a SCEV corresponding to a conversion of the input value to the specified type.
bool loopHasNoAbnormalExits(const Loop *L)
Return true if the loop has no abnormal exits.
LLVM_ABI const SCEV * getTripCountFromExitCount(const SCEV *ExitCount)
A version of getTripCountFromExitCount below which always picks an evaluation type which can not resu...
LLVM_ABI ScalarEvolution(Function &F, TargetLibraryInfo &TLI, AssumptionCache &AC, DominatorTree &DT, LoopInfo &LI)
const SCEV * getOne(Type *Ty)
Return a SCEV for the constant 1 of a specific type.
LLVM_ABI const SCEV * getTruncateOrNoop(const SCEV *V, Type *Ty)
Return a SCEV corresponding to a conversion of the input value to the specified type.
LLVM_ABI const SCEV * getLosslessPtrToIntExpr(const SCEV *Op)
LLVM_ABI const SCEV * getCastExpr(SCEVTypes Kind, const SCEV *Op, Type *Ty)
LLVM_ABI const SCEV * getSequentialMinMaxExpr(SCEVTypes Kind, SmallVectorImpl< const SCEV * > &Operands)
LLVM_ABI std::optional< bool > evaluatePredicateAt(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS, const Instruction *CtxI)
Check whether the condition described by Pred, LHS, and RHS is true or false in the given Context.
LLVM_ABI unsigned getSmallConstantMaxTripCount(const Loop *L, SmallVectorImpl< const SCEVPredicate * > *Predicates=nullptr)
Returns the upper bound of the loop trip count as a normal unsigned value.
LLVM_ABI const SCEV * getPtrToIntExpr(const SCEV *Op, Type *Ty)
LLVM_ABI bool isBackedgeTakenCountMaxOrZero(const Loop *L)
Return true if the backedge taken count is either the value returned by getConstantMaxBackedgeTakenCo...
LLVM_ABI void forgetLoop(const Loop *L)
This method should be called by the client when it has changed a loop in a way that may effect Scalar...
LLVM_ABI bool isLoopInvariant(const SCEV *S, const Loop *L)
Return true if the value of the given SCEV is unchanging in the specified loop.
LLVM_ABI bool isKnownPositive(const SCEV *S)
Test if the given expression is known to be positive.
APInt getUnsignedRangeMin(const SCEV *S)
Determine the min of the unsigned range for a particular SCEV.
LLVM_ABI bool SimplifyICmpOperands(CmpPredicate &Pred, const SCEV *&LHS, const SCEV *&RHS, unsigned Depth=0)
Simplify LHS and RHS in a comparison with predicate Pred.
LLVM_ABI const SCEV * getOffsetOfExpr(Type *IntTy, StructType *STy, unsigned FieldNo)
Return an expression for offsetof on the given field with type IntTy.
LLVM_ABI LoopDisposition getLoopDisposition(const SCEV *S, const Loop *L)
Return the "disposition" of the given SCEV with respect to the given loop.
LLVM_ABI bool containsAddRecurrence(const SCEV *S)
Return true if the SCEV is a scAddRecExpr or it contains scAddRecExpr.
LLVM_ABI const SCEV * getSignExtendExprImpl(const SCEV *Op, Type *Ty, unsigned Depth=0)
LLVM_ABI const SCEV * getAddRecExpr(const SCEV *Start, const SCEV *Step, const Loop *L, SCEV::NoWrapFlags Flags)
Get an add recurrence expression for the specified loop.
LLVM_ABI bool hasOperand(const SCEV *S, const SCEV *Op) const
Test whether the given SCEV has Op as a direct or indirect operand.
LLVM_ABI const SCEV * getUDivExpr(const SCEV *LHS, const SCEV *RHS)
Get a canonical unsigned division expression, or something simpler if possible.
LLVM_ABI const SCEV * getZeroExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth=0)
LLVM_ABI bool isSCEVable(Type *Ty) const
Test if values of the given type are analyzable within the SCEV framework.
LLVM_ABI Type * getEffectiveSCEVType(Type *Ty) const
Return a type with the same bitwidth as the given type and which represents how SCEV will treat the g...
LLVM_ABI const SCEVPredicate * getComparePredicate(ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS)
LLVM_ABI bool haveSameSign(const SCEV *S1, const SCEV *S2)
Return true if we know that S1 and S2 must have the same sign.
LLVM_ABI const SCEV * getNotSCEV(const SCEV *V)
Return the SCEV object corresponding to ~V.
LLVM_ABI const SCEV * getElementCount(Type *Ty, ElementCount EC, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap)
LLVM_ABI bool instructionCouldExistWithOperands(const SCEV *A, const SCEV *B)
Return true if there exists a point in the program at which both A and B could be operands to the sam...
ConstantRange getUnsignedRange(const SCEV *S)
Determine the unsigned range for a particular SCEV.
LLVM_ABI void print(raw_ostream &OS) const
LLVM_ABI const SCEV * getUMinExpr(const SCEV *LHS, const SCEV *RHS, bool Sequential=false)
LLVM_ABI const SCEV * getPredicatedExitCount(const Loop *L, const BasicBlock *ExitingBlock, SmallVectorImpl< const SCEVPredicate * > *Predicates, ExitCountKind Kind=Exact)
Same as above except this uses the predicated backedge taken info and may require predicates.
static SCEV::NoWrapFlags clearFlags(SCEV::NoWrapFlags Flags, SCEV::NoWrapFlags OffFlags)
LLVM_ABI void forgetTopmostLoop(const Loop *L)
LLVM_ABI void forgetValue(Value *V)
This method should be called by the client when it has changed a value in a way that may effect its v...
APInt getSignedRangeMin(const SCEV *S)
Determine the min of the signed range for a particular SCEV.
LLVM_ABI const SCEV * getNoopOrAnyExtend(const SCEV *V, Type *Ty)
Return a SCEV corresponding to a conversion of the input value to the specified type.
LLVM_ABI void forgetBlockAndLoopDispositions(Value *V=nullptr)
Called when the client has changed the disposition of values in a loop or block.
LLVM_ABI const SCEV * getTruncateExpr(const SCEV *Op, Type *Ty, unsigned Depth=0)
@ MonotonicallyDecreasing
@ MonotonicallyIncreasing
LLVM_ABI std::optional< LoopInvariantPredicate > getLoopInvariantPredicate(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS, const Loop *L, const Instruction *CtxI=nullptr)
If the result of the predicate LHS Pred RHS is loop invariant with respect to L, return a LoopInvaria...
LLVM_ABI const SCEV * getStoreSizeOfExpr(Type *IntTy, Type *StoreTy)
Return an expression for the store size of StoreTy that is type IntTy.
LLVM_ABI const SCEVPredicate * getWrapPredicate(const SCEVAddRecExpr *AR, SCEVWrapPredicate::IncrementWrapFlags AddedFlags)
LLVM_ABI bool isLoopBackedgeGuardedByCond(const Loop *L, CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
Test whether the backedge of the loop is protected by a conditional between LHS and RHS.
LLVM_ABI const SCEV * getMinusSCEV(const SCEV *LHS, const SCEV *RHS, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Return LHS-RHS.
LLVM_ABI APInt getNonZeroConstantMultiple(const SCEV *S)
const SCEV * getMinusOne(Type *Ty)
Return a SCEV for the constant -1 of a specific type.
static SCEV::NoWrapFlags setFlags(SCEV::NoWrapFlags Flags, SCEV::NoWrapFlags OnFlags)
LLVM_ABI bool hasLoopInvariantBackedgeTakenCount(const Loop *L)
Return true if the specified loop has an analyzable loop-invariant backedge-taken count.
LLVM_ABI BlockDisposition getBlockDisposition(const SCEV *S, const BasicBlock *BB)
Return the "disposition" of the given SCEV with respect to the given block.
LLVM_ABI const SCEV * getNoopOrZeroExtend(const SCEV *V, Type *Ty)
Return a SCEV corresponding to a conversion of the input value to the specified type.
LLVM_ABI bool invalidate(Function &F, const PreservedAnalyses &PA, FunctionAnalysisManager::Invalidator &Inv)
LLVM_ABI const SCEV * getUMinFromMismatchedTypes(const SCEV *LHS, const SCEV *RHS, bool Sequential=false)
Promote the operands to the wider of the types using zero-extension, and then perform a umin operatio...
LLVM_ABI bool loopIsFiniteByAssumption(const Loop *L)
Return true if this loop is finite by assumption.
LLVM_ABI const SCEV * getExistingSCEV(Value *V)
Return an existing SCEV for V if there is one, otherwise return nullptr.
LLVM_ABI APInt getConstantMultiple(const SCEV *S, const Instruction *CtxI=nullptr)
Returns the max constant multiple of S.
LoopDisposition
An enum describing the relationship between a SCEV and a loop.
@ LoopComputable
The SCEV varies predictably with the loop.
@ LoopVariant
The SCEV is loop-variant (unknown).
@ LoopInvariant
The SCEV is loop-invariant.
friend class SCEVCallbackVH
LLVM_ABI bool isKnownMultipleOf(const SCEV *S, uint64_t M, SmallVectorImpl< const SCEVPredicate * > &Assumptions)
Check that S is a multiple of M.
LLVM_ABI const SCEV * getAnyExtendExpr(const SCEV *Op, Type *Ty)
getAnyExtendExpr - Return a SCEV for the given operand extended with unspecified bits out to the give...
LLVM_ABI bool isKnownToBeAPowerOfTwo(const SCEV *S, bool OrZero=false, bool OrNegative=false)
Test if the given expression is known to be a power of 2.
LLVM_ABI std::optional< SCEV::NoWrapFlags > getStrengthenedNoWrapFlagsFromBinOp(const OverflowingBinaryOperator *OBO)
Parse NSW/NUW flags from add/sub/mul IR binary operation Op into SCEV no-wrap flags,...
LLVM_ABI void forgetLcssaPhiWithNewPredecessor(Loop *L, PHINode *V)
Forget LCSSA phi node V of loop L to which a new predecessor was added, such that it may no longer be...
LLVM_ABI bool containsUndefs(const SCEV *S) const
Return true if the SCEV expression contains an undef value.
LLVM_ABI std::optional< MonotonicPredicateType > getMonotonicPredicateType(const SCEVAddRecExpr *LHS, ICmpInst::Predicate Pred)
If, for all loop invariant X, the predicate "LHS `Pred` X" is monotonically increasing or decreasing,...
LLVM_ABI const SCEV * getCouldNotCompute()
LLVM_ABI bool isAvailableAtLoopEntry(const SCEV *S, const Loop *L)
Determine if the SCEV can be evaluated at loop's entry.
LLVM_ABI uint32_t getMinTrailingZeros(const SCEV *S, const Instruction *CtxI=nullptr)
Determine the minimum number of zero bits that S is guaranteed to end in (at every loop iteration).
BlockDisposition
An enum describing the relationship between a SCEV and a basic block.
@ DominatesBlock
The SCEV dominates the block.
@ ProperlyDominatesBlock
The SCEV properly dominates the block.
@ DoesNotDominateBlock
The SCEV does not dominate the block.
LLVM_ABI const SCEV * getGEPExpr(GEPOperator *GEP, ArrayRef< const SCEV * > IndexExprs)
Returns an expression for a GEP.
LLVM_ABI const SCEV * getExitCount(const Loop *L, const BasicBlock *ExitingBlock, ExitCountKind Kind=Exact)
Return the number of times the backedge executes before the given exit would be taken; if not exactly...
LLVM_ABI const SCEV * getSignExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth=0)
LLVM_ABI void getPoisonGeneratingValues(SmallPtrSetImpl< const Value * > &Result, const SCEV *S)
Return the set of Values that, if poison, will definitively result in S being poison as well.
LLVM_ABI void forgetLoopDispositions()
Called when the client has changed the disposition of values in this loop.
LLVM_ABI const SCEV * getVScale(Type *Ty)
LLVM_ABI unsigned getSmallConstantTripCount(const Loop *L)
Returns the exact trip count of the loop if we can compute it, and the result is a small constant.
LLVM_ABI bool hasComputableLoopEvolution(const SCEV *S, const Loop *L)
Return true if the given SCEV changes value in a known way in the specified loop.
LLVM_ABI const SCEV * getPointerBase(const SCEV *V)
Transitively follow the chain of pointer-type operands until reaching a SCEV that does not have a sin...
LLVM_ABI const SCEV * getMinMaxExpr(SCEVTypes Kind, SmallVectorImpl< const SCEV * > &Operands)
LLVM_ABI void forgetAllLoops()
LLVM_ABI bool dominates(const SCEV *S, const BasicBlock *BB)
Return true if elements that makes up the given SCEV dominate the specified basic block.
APInt getUnsignedRangeMax(const SCEV *S)
Determine the max of the unsigned range for a particular SCEV.
ExitCountKind
The terms "backedge taken count" and "exit count" are used interchangeably to refer to the number of ...
@ SymbolicMaximum
An expression which provides an upper bound on the exact trip count.
@ ConstantMaximum
A constant which provides an upper bound on the exact trip count.
@ Exact
An expression exactly describing the number of times the backedge has executed when a loop is exited.
LLVM_ABI const SCEV * applyLoopGuards(const SCEV *Expr, const Loop *L)
Try to apply information from loop guards for L to Expr.
LLVM_ABI const SCEV * getMulExpr(SmallVectorImpl< const SCEV * > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical multiply expression, or something simpler if possible.
LLVM_ABI const SCEV * getPtrToAddrExpr(const SCEV *Op)
LLVM_ABI const SCEVAddRecExpr * convertSCEVToAddRecWithPredicates(const SCEV *S, const Loop *L, SmallVectorImpl< const SCEVPredicate * > &Preds)
Tries to convert the S expression to an AddRec expression, adding additional predicates to Preds as r...
LLVM_ABI const SCEV * getElementSize(Instruction *Inst)
Return the size of an element read or written by Inst.
LLVM_ABI const SCEV * getSizeOfExpr(Type *IntTy, TypeSize Size)
Return an expression for a TypeSize.
LLVM_ABI std::optional< bool > evaluatePredicate(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
Check whether the condition described by Pred, LHS, and RHS is true or false.
LLVM_ABI const SCEV * getUnknown(Value *V)
LLVM_ABI std::optional< std::pair< const SCEV *, SmallVector< const SCEVPredicate *, 3 > > > createAddRecFromPHIWithCasts(const SCEVUnknown *SymbolicPHI)
Checks if SymbolicPHI can be rewritten as an AddRecExpr under some Predicates.
LLVM_ABI const SCEV * getTruncateOrZeroExtend(const SCEV *V, Type *Ty, unsigned Depth=0)
Return a SCEV corresponding to a conversion of the input value to the specified type.
static SCEV::NoWrapFlags maskFlags(SCEV::NoWrapFlags Flags, int Mask)
Convenient NoWrapFlags manipulation that hides enum casts and is visible in the ScalarEvolution name ...
LLVM_ABI std::optional< APInt > computeConstantDifference(const SCEV *LHS, const SCEV *RHS)
Compute LHS - RHS and returns the result as an APInt if it is a constant, and std::nullopt if it isn'...
LLVM_ABI bool properlyDominates(const SCEV *S, const BasicBlock *BB)
Return true if elements that makes up the given SCEV properly dominate the specified basic block.
LLVM_ABI const SCEV * rewriteUsingPredicate(const SCEV *S, const Loop *L, const SCEVPredicate &A)
Re-writes the SCEV according to the Predicates in A.
LLVM_ABI std::pair< const SCEV *, const SCEV * > SplitIntoInitAndPostInc(const Loop *L, const SCEV *S)
Splits SCEV expression S into two SCEVs.
LLVM_ABI bool canReuseInstruction(const SCEV *S, Instruction *I, SmallVectorImpl< Instruction * > &DropPoisonGeneratingInsts)
Check whether it is poison-safe to represent the expression S using the instruction I.
LLVM_ABI bool isKnownPredicateAt(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS, const Instruction *CtxI)
Test if the given expression is known to satisfy the condition described by Pred, LHS,...
LLVM_ABI const SCEV * getPredicatedSymbolicMaxBackedgeTakenCount(const Loop *L, SmallVectorImpl< const SCEVPredicate * > &Predicates)
Similar to getSymbolicMaxBackedgeTakenCount, except it will add a set of SCEV predicates to Predicate...
LLVM_ABI ~ScalarEvolution()
LLVM_ABI const SCEV * getUDivExactExpr(const SCEV *LHS, const SCEV *RHS)
Get a canonical unsigned division expression, or something simpler if possible.
LLVM_ABI void registerUser(const SCEV *User, ArrayRef< const SCEV * > Ops)
Notify this ScalarEvolution that User directly uses SCEVs in Ops.
LLVM_ABI const SCEV * getAddExpr(SmallVectorImpl< const SCEV * > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical add expression, or something simpler if possible.
LLVM_ABI bool isBasicBlockEntryGuardedByCond(const BasicBlock *BB, CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
Test whether entry to the basic block is protected by a conditional between LHS and RHS.
LLVM_ABI const SCEV * getTruncateOrSignExtend(const SCEV *V, Type *Ty, unsigned Depth=0)
Return a SCEV corresponding to a conversion of the input value to the specified type.
LLVM_ABI bool containsErasedValue(const SCEV *S) const
Return true if the SCEV expression contains a Value that has been optimised out and is now a nullptr.
LLVM_ABI bool isKnownPredicate(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
Test if the given expression is known to satisfy the condition described by Pred, LHS,...
LLVM_ABI bool isKnownViaInduction(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
We'd like to check the predicate on every iteration of the most dominated loop between loops used in ...
const SCEV * getSymbolicMaxBackedgeTakenCount(const Loop *L)
When successful, this returns a SCEV that is greater than or equal to (i.e.
APInt getSignedRangeMax(const SCEV *S)
Determine the max of the signed range for a particular SCEV.
LLVM_ABI void verify() const
LLVMContext & getContext() const
Implements a dense probed hash-table based set with some number of buckets stored inline.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void reserve(size_type N)
iterator erase(const_iterator CI)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
iterator insert(iterator I, T &&Elt)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
Used to lazily calculate structure layout information for a target machine, based on the DataLayout s...
TypeSize getElementOffset(unsigned Idx) const
TypeSize getSizeInBits() const
Class to represent struct types.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
The instances of the Type class are immutable: once they are created, they are never changed.
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
bool isPointerTy() const
True if this is an instance of PointerType.
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
static LLVM_ABI IntegerType * getInt1Ty(LLVMContext &C)
bool isIntOrPtrTy() const
Return true if this is an integer type or a pointer type.
bool isIntegerTy() const
True if this is an instance of IntegerType.
static LLVM_ABI IntegerType * getIntNTy(LLVMContext &C, unsigned N)
A Use represents the edge between a Value definition and its users.
Value * getOperand(unsigned i) const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
unsigned getValueID() const
Return an ID for the concrete type of this object.
LLVM_ABI void printAsOperand(raw_ostream &O, bool PrintType=true, const Module *M=nullptr) const
Print the name of this Value out to the specified raw_ostream.
LLVM_ABI LLVMContext & getContext() const
All values hold a context through their type.
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
This class implements an extremely fast bulk output stream that can only output to a stream.
raw_ostream & indent(unsigned NumSpaces)
indent - Insert 'NumSpaces' spaces.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
const APInt & smin(const APInt &A, const APInt &B)
Determine the smaller of two APInts considered to be signed.
const APInt & smax(const APInt &A, const APInt &B)
Determine the larger of two APInts considered to be signed.
const APInt & umin(const APInt &A, const APInt &B)
Determine the smaller of two APInts considered to be unsigned.
LLVM_ABI std::optional< APInt > SolveQuadraticEquationWrap(APInt A, APInt B, APInt C, unsigned RangeWidth)
Let q(n) = An^2 + Bn + C, and BW = bit width of the value range (e.g.
const APInt & umax(const APInt &A, const APInt &B)
Determine the larger of two APInts considered to be unsigned.
LLVM_ABI APInt GreatestCommonDivisor(APInt A, APInt B)
Compute GCD of two unsigned APInt values.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ C
The default llvm calling convention, compatible with C.
int getMinValue(MCInstrInfo const &MCII, MCInst const &MCI)
Return the minimum value of an extendable operand.
@ BasicBlock
Various leaf nodes.
LLVM_ABI Function * getDeclarationIfExists(const Module *M, ID id)
Look up the Function declaration of the intrinsic id in the Module M and return it if it exists.
Predicate
Predicate - These are "(BI << 5) | BO" for various predicates.
BinaryOp_match< LHS, RHS, Instruction::AShr > m_AShr(const LHS &L, const RHS &R)
ap_match< APInt > m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
bool match(Val *V, const Pattern &P)
class_match< ConstantInt > m_ConstantInt()
Match an arbitrary ConstantInt and ignore it.
IntrinsicID_match m_Intrinsic()
Match intrinsic calls like this: m_Intrinsic<Intrinsic::fabs>(m_Value(X))
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
ExtractValue_match< Ind, Val_t > m_ExtractValue(const Val_t &V)
Match a single index ExtractValue instruction.
bind_ty< WithOverflowInst > m_WithOverflowInst(WithOverflowInst *&I)
Match a with overflow intrinsic, capturing it if we match.
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
brc_match< Cond_t, bind_ty< BasicBlock >, bind_ty< BasicBlock > > m_Br(const Cond_t &C, BasicBlock *&T, BasicBlock *&F)
BinaryOp_match< LHS, RHS, Instruction::SDiv > m_SDiv(const LHS &L, const RHS &R)
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
BinaryOp_match< LHS, RHS, Instruction::LShr > m_LShr(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::Shl > m_Shl(const LHS &L, const RHS &R)
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
class_match< BasicBlock > m_BasicBlock()
Match an arbitrary basic block value and ignore it.
match_combine_or< LTy, RTy > m_CombineOr(const LTy &L, const RTy &R)
Combine two pattern matchers matching L || R.
class_match< const SCEVVScale > m_SCEVVScale()
bind_cst_ty m_scev_APInt(const APInt *&C)
Match an SCEV constant and bind it to an APInt.
cst_pred_ty< is_all_ones > m_scev_AllOnes()
Match an integer with all bits set.
SCEVUnaryExpr_match< SCEVZeroExtendExpr, Op0_t > m_scev_ZExt(const Op0_t &Op0)
is_undef_or_poison m_scev_UndefOrPoison()
Match an SCEVUnknown wrapping undef or poison.
class_match< const SCEVConstant > m_SCEVConstant()
cst_pred_ty< is_one > m_scev_One()
Match an integer 1.
specificloop_ty m_SpecificLoop(const Loop *L)
SCEVAffineAddRec_match< Op0_t, Op1_t, class_match< const Loop > > m_scev_AffineAddRec(const Op0_t &Op0, const Op1_t &Op1)
bind_ty< const SCEVMulExpr > m_scev_Mul(const SCEVMulExpr *&V)
SCEVUnaryExpr_match< SCEVSignExtendExpr, Op0_t > m_scev_SExt(const Op0_t &Op0)
cst_pred_ty< is_zero > m_scev_Zero()
Match an integer 0.
SCEVUnaryExpr_match< SCEVTruncateExpr, Op0_t > m_scev_Trunc(const Op0_t &Op0)
bool match(const SCEV *S, const Pattern &P)
SCEVBinaryExpr_match< SCEVUDivExpr, Op0_t, Op1_t > m_scev_UDiv(const Op0_t &Op0, const Op1_t &Op1)
specificscev_ty m_scev_Specific(const SCEV *S)
Match if we have a specific specified SCEV.
SCEVBinaryExpr_match< SCEVMulExpr, Op0_t, Op1_t, SCEV::FlagNUW, true > m_scev_c_NUWMul(const Op0_t &Op0, const Op1_t &Op1)
class_match< const Loop > m_Loop()
bind_ty< const SCEVAddExpr > m_scev_Add(const SCEVAddExpr *&V)
bind_ty< const SCEVUnknown > m_SCEVUnknown(const SCEVUnknown *&V)
SCEVBinaryExpr_match< SCEVMulExpr, Op0_t, Op1_t, SCEV::FlagAnyWrap, true > m_scev_c_Mul(const Op0_t &Op0, const Op1_t &Op1)
SCEVBinaryExpr_match< SCEVSMaxExpr, Op0_t, Op1_t > m_scev_SMax(const Op0_t &Op0, const Op1_t &Op1)
SCEVURem_match< Op0_t, Op1_t > m_scev_URem(Op0_t LHS, Op1_t RHS, ScalarEvolution &SE)
Match the mathematical pattern A - (A / B) * B, where A and B can be arbitrary expressions.
class_match< const SCEV > m_SCEV()
@ Valid
The data is already valid.
initializer< Ty > init(const Ty &Val)
LocationClass< Ty > location(Ty &L)
@ Switch
The "resume-switch" lowering, where there are separate resume and destroy functions that are shared b...
NodeAddr< PhiNode * > Phi
friend class Instruction
Iterator for Instructions in a `BasicBlock.
This is an optimization pass for GlobalISel generic memory operations.
void visitAll(const SCEV *Root, SV &Visitor)
Use SCEVTraversal to visit all nodes in the given expression tree.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
FunctionAddr VTableAddr Value
LLVM_ATTRIBUTE_ALWAYS_INLINE DynamicAPInt gcd(const DynamicAPInt &A, const DynamicAPInt &B)
void stable_sort(R &&Range)
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
SaveAndRestore(T &) -> SaveAndRestore< T >
Printable print(const GCNRegPressure &RP, const GCNSubtarget *ST=nullptr, unsigned DynamicVGPRBlockSize=0)
LLVM_ABI bool canCreatePoison(const Operator *Op, bool ConsiderFlagsAndMetadata=true)
LLVM_ABI bool mustTriggerUB(const Instruction *I, const SmallPtrSetImpl< const Value * > &KnownPoison)
Return true if the given instruction must trigger undefined behavior when I is executed with any oper...
LLVM_ABI bool canConstantFoldCallTo(const CallBase *Call, const Function *F)
canConstantFoldCallTo - Return true if its even possible to fold a call to the specified function.
InterleavedRange< Range > interleaved(const Range &R, StringRef Separator=", ", StringRef Prefix="", StringRef Suffix="")
Output range R as a sequence of interleaved elements.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
LLVM_ABI bool verifyFunction(const Function &F, raw_ostream *OS=nullptr)
Check a function for errors, useful for use when debugging a pass.
auto successors(const MachineBasicBlock *BB)
scope_exit(Callable) -> scope_exit< Callable >
constexpr from_range_t from_range
auto dyn_cast_if_present(const Y &Val)
dyn_cast_if_present<X> - Functionally identical to dyn_cast, except that a null (or none in the case ...
bool set_is_subset(const S1Ty &S1, const S2Ty &S2)
set_is_subset(A, B) - Return true iff A in B
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
constexpr bool isUIntN(unsigned N, uint64_t x)
Checks if an unsigned integer fits into the given (dynamic) bit width.
LLVM_ABI Constant * ConstantFoldCompareInstOperands(unsigned Predicate, Constant *LHS, Constant *RHS, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const Instruction *I=nullptr)
Attempt to constant fold a compare instruction (icmp/fcmp) with the specified operands.
unsigned short computeExpressionSize(ArrayRef< const SCEV * > Args)
auto uninitialized_copy(R &&Src, IterTy Dst)
bool isa_and_nonnull(const Y &Val)
LLVM_ABI ConstantRange getConstantRangeFromMetadata(const MDNode &RangeMD)
Parse out a conservative ConstantRange from !range metadata.
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
LLVM_ABI Value * simplifyInstruction(Instruction *I, const SimplifyQuery &Q)
See if we can compute a simplified version of this instruction.
LLVM_ABI bool isOverflowIntrinsicNoWrap(const WithOverflowInst *WO, const DominatorTree &DT)
Returns true if the arithmetic part of the WO 's result is used only along the paths control dependen...
DomTreeNodeBase< BasicBlock > DomTreeNode
LLVM_ABI bool matchSimpleRecurrence(const PHINode *P, BinaryOperator *&BO, Value *&Start, Value *&Step)
Attempt to match a simple first order recurrence cycle of the form: iv = phi Ty [Start,...
auto dyn_cast_or_null(const Y &Val)
void erase(Container &C, ValueType V)
Wrapper function to remove a value from a container:
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
iterator_range< pointee_iterator< WrappedIteratorT > > make_pointee_range(RangeT &&Range)
auto reverse(ContainerTy &&C)
LLVM_ABI bool isMustProgress(const Loop *L)
Return true if this loop can be assumed to make progress.
LLVM_ABI bool impliesPoison(const Value *ValAssumedPoison, const Value *V)
Return true if V is poison given that ValAssumedPoison is already poison.
LLVM_ABI bool isFinite(const Loop *L)
Return true if this loop can be assumed to run for a finite number of iterations.
LLVM_ABI void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
LLVM_ABI bool programUndefinedIfPoison(const Instruction *Inst)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
bool isPointerTy(const Type *T)
FunctionAddr VTableAddr Count
LLVM_ABI ConstantRange getVScaleRange(const Function *F, unsigned BitWidth)
Determine the possible constant range of vscale with the given bit width, based on the vscale_range f...
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
LLVM_ATTRIBUTE_VISIBILITY_DEFAULT AnalysisKey InnerAnalysisManagerProxy< AnalysisManagerT, IRUnitT, ExtraArgTs... >::Key
LLVM_ABI bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
LLVM_ABI bool propagatesPoison(const Use &PoisonOp)
Return true if PoisonOp's user yields poison or raises UB if its operand PoisonOp is poison.
@ UMin
Unsigned integer min implemented in terms of select(cmp()).
@ Mul
Product of integers.
@ SMax
Signed integer max implemented in terms of select(cmp()).
@ SMin
Signed integer min implemented in terms of select(cmp()).
@ UMax
Unsigned integer max implemented in terms of select(cmp()).
auto count(R &&Range, const E &Element)
Wrapper function around std::count to count the number of times an element Element occurs in the give...
DWARFExpression::Operation Op
auto max_element(R &&Range)
Provide wrappers to std::max_element which take ranges instead of having to pass begin/end explicitly...
raw_ostream & operator<<(raw_ostream &OS, const APFixedPoint &FX)
ArrayRef(const T &OneElt) -> ArrayRef< T >
LLVM_ABI unsigned ComputeNumSignBits(const Value *Op, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Return the number of times the sign bit of the register is replicated into the other bits.
constexpr unsigned BitWidth
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI bool isGuaranteedToTransferExecutionToSuccessor(const Instruction *I)
Return true if this function can prove that the instruction I will always transfer execution to one o...
auto count_if(R &&Range, UnaryPredicate P)
Wrapper function around std::count_if to count the number of times an element satisfying a given pred...
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
constexpr bool isIntN(unsigned N, int64_t x)
Checks if an signed integer fits into the given (dynamic) bit width.
auto predecessors(const MachineBasicBlock *BB)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
iterator_range< df_iterator< T > > depth_first(const T &G)
auto seq(T Begin, T End)
Iterate over an integral type from Begin up to - but not including - End.
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
LLVM_ABI bool isGuaranteedNotToBePoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Returns true if V cannot be poison, but may be undef.
LLVM_ABI Constant * ConstantFoldInstOperands(const Instruction *I, ArrayRef< Constant * > Ops, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, bool AllowNonDeterministic=true)
ConstantFoldInstOperands - Attempt to constant fold an instruction with the specified operands.
bool SCEVExprContains(const SCEV *Root, PredTy Pred)
Return true if any node in Root satisfies the predicate Pred.
Implement std::hash so that hash_code can be used in STL containers.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
A special type used by analysis passes to provide an address that identifies that particular analysis...
static KnownBits makeConstant(const APInt &C)
Create known bits from a known constant.
bool isNonNegative() const
Returns true if this value is known to be non-negative.
static LLVM_ABI KnownBits ashr(const KnownBits &LHS, const KnownBits &RHS, bool ShAmtNonZero=false, bool Exact=false)
Compute known bits for ashr(LHS, RHS).
unsigned getBitWidth() const
Get the bit width of this value.
static LLVM_ABI KnownBits lshr(const KnownBits &LHS, const KnownBits &RHS, bool ShAmtNonZero=false, bool Exact=false)
Compute known bits for lshr(LHS, RHS).
KnownBits zextOrTrunc(unsigned BitWidth) const
Return known bits for a zero extension or truncation of the value we're tracking.
APInt getMaxValue() const
Return the maximal unsigned value possible given these KnownBits.
APInt getMinValue() const
Return the minimal unsigned value possible given these KnownBits.
bool isNegative() const
Returns true if this value is known to be negative.
static LLVM_ABI KnownBits shl(const KnownBits &LHS, const KnownBits &RHS, bool NUW=false, bool NSW=false, bool ShAmtNonZero=false)
Compute known bits for shl(LHS, RHS).
An object of this class is returned by queries that could not be answered.
LLVM_ABI SCEVCouldNotCompute()
static LLVM_ABI bool classof(const SCEV *S)
Methods for support type inquiry through isa, cast, and dyn_cast:
This class defines a simple visitor class that may be used for various SCEV analysis purposes.
A utility class that uses RAII to save and restore the value of a variable.
Information about the number of loop iterations for which a loop exit's branch condition evaluates to...
LLVM_ABI ExitLimit(const SCEV *E)
Construct either an exact exit limit from a constant, or an unknown one from a SCEVCouldNotCompute.
const SCEV * ExactNotTaken
const SCEV * SymbolicMaxNotTaken
SmallVector< const SCEVPredicate *, 4 > Predicates
A vector of predicate guards for this ExitLimit.
const SCEV * ConstantMaxNotTaken