83#include "llvm/Config/llvm-config.h"
138#define DEBUG_TYPE "scalar-evolution"
141 "Number of loop exits with predictable exit counts");
143 "Number of loop exits without predictable exit counts");
145 "Number of loops with trip counts computed by force");
147#ifdef EXPENSIVE_CHECKS
155 cl::desc(
"Maximum number of iterations SCEV will "
156 "symbolically execute a constant "
162 cl::desc(
"Verify ScalarEvolution's backedge taken counts (slow)"));
165 cl::desc(
"Enable stricter verification with -verify-scev is passed"));
169 cl::desc(
"Verify IR correctness when making sensitive SCEV queries (slow)"),
174 cl::desc(
"Threshold for inlining multiplication operands into a SCEV"),
179 cl::desc(
"Threshold for inlining addition operands into a SCEV"),
183 "scalar-evolution-max-scev-compare-depth",
cl::Hidden,
184 cl::desc(
"Maximum depth of recursive SCEV complexity comparisons"),
188 "scalar-evolution-max-scev-operations-implication-depth",
cl::Hidden,
189 cl::desc(
"Maximum depth of recursive SCEV operations implication analysis"),
193 "scalar-evolution-max-value-compare-depth",
cl::Hidden,
194 cl::desc(
"Maximum depth of recursive value complexity comparisons"),
199 cl::desc(
"Maximum depth of recursive arithmetics"),
203 "scalar-evolution-max-constant-evolving-depth",
cl::Hidden,
208 cl::desc(
"Maximum depth of recursive SExt/ZExt/Trunc"),
213 cl::desc(
"Max coefficients in AddRec during evolving"),
218 cl::desc(
"Size of the expression which is considered huge"),
223 cl::desc(
"Threshold for switching to iteratively computing SCEV ranges"),
227 "scalar-evolution-max-loop-guard-collection-depth",
cl::Hidden,
228 cl::desc(
"Maximum depth for recursive loop guard collection"),
cl::init(1));
233 cl::desc(
"When printing analysis, include information on every instruction"));
236 "scalar-evolution-use-expensive-range-sharpening",
cl::Hidden,
238 cl::desc(
"Use more powerful methods of sharpening expression ranges. May "
239 "be costly in terms of compile time"));
242 "scalar-evolution-max-scc-analysis-depth",
cl::Hidden,
243 cl::desc(
"Maximum amount of nodes to process while searching SCEVUnknown "
244 "Phi strongly connected components"),
249 cl::desc(
"Handle <= and >= in finite loops"),
253 "scalar-evolution-use-context-for-no-wrap-flag-strenghening",
cl::Hidden,
254 cl::desc(
"Infer nuw/nsw flags using context where suitable"),
343#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
363 OS <<
"(ptrto" << OpS <<
" " << *
Op->getType() <<
" " << *
Op <<
" to "
370 OS <<
"(trunc " << *
Op->getType() <<
" " << *
Op <<
" to "
377 OS <<
"(zext " << *
Op->getType() <<
" " << *
Op <<
" to "
384 OS <<
"(sext " << *
Op->getType() <<
" " << *
Op <<
" to "
413 const char *OpStr =
nullptr;
426 OpStr =
" umin_seq ";
450 OS <<
"(" << *UDiv->
getLHS() <<
" /u " << *UDiv->
getRHS() <<
")";
457 OS <<
"***COULDNOTCOMPUTE***";
535 if (!
Mul)
return false;
539 if (!SC)
return false;
557 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
return S;
559 UniqueSCEVs.InsertNode(S, IP);
574 ConstantInt::get(ITy, V,
isSigned,
true));
582 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
585 UniqueSCEVs.InsertNode(S, IP);
606 "Must be a non-bit-width-changing pointer-to-integer cast!");
613 "Must be a non-bit-width-changing pointer-to-integer cast!");
625 "Cannot truncate non-integer value!");
632 "Cannot zero extend non-integer value!");
639 "Cannot sign extend non-integer value!");
644 SE->forgetMemoizedResults({
this});
647 SE->UniqueSCEVs.RemoveNode(
this);
653void SCEVUnknown::allUsesReplacedWith(
Value *New) {
655 SE->forgetMemoizedResults({
this});
658 SE->UniqueSCEVs.RemoveNode(
this);
680 if (LIsPointer != RIsPointer)
681 return (
int)LIsPointer - (int)RIsPointer;
686 return (
int)LID - (int)RID;
691 unsigned LArgNo = LA->getArgNo(), RArgNo =
RA->getArgNo();
692 return (
int)LArgNo - (int)RArgNo;
698 if (
auto L = LGV->getLinkage() - RGV->getLinkage())
701 const auto IsGVNameSemantic = [&](
const GlobalValue *GV) {
702 auto LT = GV->getLinkage();
709 if (IsGVNameSemantic(LGV) && IsGVNameSemantic(RGV))
710 return LGV->getName().compare(RGV->getName());
721 if (LParent != RParent) {
724 if (LDepth != RDepth)
725 return (
int)LDepth - (int)RDepth;
729 unsigned LNumOps = LInst->getNumOperands(),
730 RNumOps = RInst->getNumOperands();
731 if (LNumOps != RNumOps)
732 return (
int)LNumOps - (int)RNumOps;
734 for (
unsigned Idx :
seq(LNumOps)) {
736 RInst->getOperand(Idx),
Depth + 1);
750static std::optional<int>
760 return (
int)LType - (int)RType;
785 unsigned LBitWidth = LA.
getBitWidth(), RBitWidth =
RA.getBitWidth();
786 if (LBitWidth != RBitWidth)
787 return (
int)LBitWidth - (int)RBitWidth;
788 return LA.
ult(
RA) ? -1 : 1;
794 return LTy->getBitWidth() - RTy->getBitWidth();
805 if (LLoop != RLoop) {
807 assert(LHead != RHead &&
"Two loops share the same header?");
811 "No dominance between recurrences used by one SCEV?");
835 unsigned LNumOps = LOps.
size(), RNumOps = ROps.
size();
836 if (LNumOps != RNumOps)
837 return (
int)LNumOps - (int)RNumOps;
839 for (
unsigned i = 0; i != LNumOps; ++i) {
865 if (
Ops.size() < 2)
return;
870 return Complexity && *Complexity < 0;
872 if (
Ops.size() == 2) {
876 if (IsLessComplex(
RHS,
LHS))
889 for (
unsigned i = 0, e =
Ops.size(); i != e-2; ++i) {
895 for (
unsigned j = i+1; j != e &&
Ops[j]->getSCEVType() == Complexity; ++j) {
900 if (i == e-2)
return;
922template <
typename FoldT,
typename IsIdentityT,
typename IsAbsorberT>
926 IsIdentityT IsIdentity, IsAbsorberT IsAbsorber) {
928 for (
unsigned Idx = 0; Idx <
Ops.size();) {
936 Ops.erase(
Ops.begin() + Idx);
943 assert(Folded &&
"Must have folded value");
947 if (Folded && IsAbsorber(Folded->
getAPInt()))
951 if (Folded && !IsIdentity(Folded->
getAPInt()))
952 Ops.insert(
Ops.begin(), Folded);
954 return Ops.size() == 1 ?
Ops[0] :
nullptr;
1029 APInt OddFactorial(W, 1);
1031 for (
unsigned i = 3; i <= K; ++i) {
1034 OddFactorial *= (i >> TwoFactors);
1038 unsigned CalculationBits = W +
T;
1052 for (
unsigned i = 1; i != K; ++i) {
1085 for (
unsigned i = 1, e =
Operands.size(); i != e; ++i) {
1114 ConversionFn CreatePtrCast;
1118 ConversionFn CreatePtrCast)
1119 : Base(
SE), TargetTy(TargetTy), CreatePtrCast(
std::
move(CreatePtrCast)) {}
1122 Type *TargetTy, ConversionFn CreatePtrCast) {
1124 return Rewriter.visit(Scev);
1160 "Should only reach pointer-typed SCEVUnknown's.");
1165 return SE.getZero(TargetTy);
1166 return CreatePtrCast(Expr);
1171 assert(
Op->getType()->isPointerTy() &&
"Op must be a pointer");
1190 Op, *
this, IntPtrTy, [
this, IntPtrTy](
const SCEVUnknown *U) {
1195 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
1197 SCEV *S =
new (SCEVAllocator)
1199 UniqueSCEVs.InsertNode(S, IP);
1202 return static_cast<const SCEV *
>(S);
1205 "We must have succeeded in sinking the cast, "
1206 "and ending up with an integer-typed expression!");
1211 assert(
Op->getType()->isPointerTy() &&
"Op must be a pointer");
1215 if (DL.hasUnstableRepresentation(
Op->getType()))
1218 Type *Ty = DL.getAddressType(
Op->getType());
1229 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
1231 SCEV *S =
new (SCEVAllocator)
1233 UniqueSCEVs.InsertNode(S, IP);
1236 return static_cast<const SCEV *
>(S);
1239 "We must have succeeded in sinking the cast, "
1240 "and ending up with an integer-typed expression!");
1245 assert(Ty->isIntegerTy() &&
"Target type must be an integer type!");
1257 "This is not a truncating conversion!");
1259 "This is not a conversion to a SCEVable type!");
1260 assert(!
Op->getType()->isPointerTy() &&
"Can't truncate pointer!");
1268 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
return S;
1290 UniqueSCEVs.InsertNode(S, IP);
1303 unsigned numTruncs = 0;
1304 for (
unsigned i = 0, e = CommOp->getNumOperands(); i != e && numTruncs < 2;
1312 if (numTruncs < 2) {
1322 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
1329 for (
const SCEV *
Op : AddRec->operands())
1344 UniqueSCEVs.InsertNode(S, IP);
1385struct ExtendOpTraitsBase {
1386 typedef const SCEV *(ScalarEvolution::*GetExtendExprTy)(
const SCEV *,
Type *,
1391template <
typename ExtendOp>
struct ExtendOpTraits {
1407 static const GetExtendExprTy GetExtendExpr;
1409 static const SCEV *getOverflowLimitForStep(
const SCEV *Step,
1410 ICmpInst::Predicate *Pred,
1411 ScalarEvolution *SE) {
1416const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits<
1423 static const GetExtendExprTy GetExtendExpr;
1425 static const SCEV *getOverflowLimitForStep(
const SCEV *Step,
1426 ICmpInst::Predicate *Pred,
1427 ScalarEvolution *SE) {
1432const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits<
1444template <
typename ExtendOpTy>
1447 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType;
1448 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr;
1464 for (
auto It = DiffOps.
begin(); It != DiffOps.
end(); ++It)
1477 auto PreStartFlags =
1495 const SCEV *OperandExtendedStart =
1497 (SE->*GetExtendExpr)(Step, WideTy,
Depth));
1498 if ((SE->*GetExtendExpr)(Start, WideTy,
Depth) == OperandExtendedStart) {
1510 const SCEV *OverflowLimit =
1511 ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep(Step, &Pred, SE);
1513 if (OverflowLimit &&
1521template <
typename ExtendOpTy>
1525 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr;
1533 (SE->*GetExtendExpr)(PreStart, Ty,
Depth));
1568template <
typename ExtendOpTy>
1569bool ScalarEvolution::proveNoWrapByVaryingStart(
const SCEV *Start,
1572 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType;
1582 APInt StartAI = StartC->
getAPInt();
1584 for (
unsigned Delta : {-2, -1, 1, 2}) {
1585 const SCEV *PreStart =
getConstant(StartAI - Delta);
1587 FoldingSetNodeID
ID;
1589 ID.AddPointer(PreStart);
1590 ID.AddPointer(Step);
1594 static_cast<SCEVAddRecExpr *
>(UniqueSCEVs.FindNodeOrInsertPos(
ID, IP));
1598 if (PreAR &&
any(PreAR->getNoWrapFlags(WrapType))) {
1601 const SCEV *Limit = ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep(
1602 DeltaS, &Pred,
this);
1620 const unsigned BitWidth =
C.getBitWidth();
1638 const APInt &ConstantStart,
1657 auto &UserIDs = FoldCacheUser[
I.first->second];
1658 assert(
count(UserIDs,
ID) == 1 &&
"unexpected duplicates in UserIDs");
1659 for (
unsigned I = 0;
I != UserIDs.size(); ++
I)
1660 if (UserIDs[
I] ==
ID) {
1665 I.first->second = S;
1667 FoldCacheUser[S].push_back(
ID);
1673 "This is not an extending conversion!");
1675 "This is not a conversion to a SCEVable type!");
1676 assert(!
Op->getType()->isPointerTy() &&
"Can't extend pointer!");
1680 if (
const SCEV *S = FoldCache.lookup(
ID))
1692 "This is not an extending conversion!");
1694 assert(!
Op->getType()->isPointerTy() &&
"Can't extend pointer!");
1711 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
return S;
1715 UniqueSCEVs.InsertNode(S, IP);
1725 const SCEV *
X = ST->getOperand();
1739 if (AR->isAffine()) {
1740 const SCEV *Start = AR->getStart();
1741 const SCEV *Step = AR->getStepRecurrence(*
this);
1743 const Loop *L = AR->getLoop();
1747 if (AR->hasNoUnsignedWrap()) {
1768 const SCEV *CastedMaxBECount =
1772 if (MaxBECount == RecastedMaxBECount) {
1782 const SCEV *WideMaxBECount =
1784 const SCEV *OperandExtendedAdd =
1790 if (ZAdd == OperandExtendedAdd) {
1801 OperandExtendedAdd =
1807 if (ZAdd == OperandExtendedAdd) {
1828 !AC.assumptions().empty()) {
1830 auto NewFlags = proveNoUnsignedWrapViaInduction(AR);
1832 if (AR->hasNoUnsignedWrap()) {
1867 const APInt &
C = SC->getAPInt();
1871 const SCEV *SResidual =
1879 if (proveNoWrapByVaryingStart<SCEVZeroExtendExpr>(Start, Step, L)) {
1904 if (SA->hasNoUnsignedWrap()) {
1925 const SCEV *SResidual =
1936 if (
SM->hasNoUnsignedWrap()) {
1958 const SCEV *TruncRHS;
1995 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
return S;
1998 UniqueSCEVs.InsertNode(S, IP);
2007 "This is not an extending conversion!");
2009 "This is not a conversion to a SCEVable type!");
2010 assert(!
Op->getType()->isPointerTy() &&
"Can't extend pointer!");
2014 if (
const SCEV *S = FoldCache.lookup(
ID))
2026 "This is not an extending conversion!");
2028 assert(!
Op->getType()->isPointerTy() &&
"Can't extend pointer!");
2050 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
return S;
2055 UniqueSCEVs.InsertNode(S, IP);
2065 const SCEV *
X = ST->getOperand();
2076 if (SA->hasNoSignedWrap()) {
2098 const SCEV *SResidual =
2111 if (AR->isAffine()) {
2112 const SCEV *Start = AR->getStart();
2113 const SCEV *Step = AR->getStepRecurrence(*
this);
2115 const Loop *L = AR->getLoop();
2119 if (AR->hasNoSignedWrap()) {
2141 const SCEV *CastedMaxBECount =
2145 if (MaxBECount == RecastedMaxBECount) {
2155 const SCEV *WideMaxBECount =
2157 const SCEV *OperandExtendedAdd =
2163 if (SAdd == OperandExtendedAdd) {
2174 OperandExtendedAdd =
2180 if (SAdd == OperandExtendedAdd) {
2200 auto NewFlags = proveNoSignedWrapViaInduction(AR);
2202 if (AR->hasNoSignedWrap()) {
2217 const APInt &
C = SC->getAPInt();
2221 const SCEV *SResidual =
2229 if (proveNoWrapByVaryingStart<SCEVSignExtendExpr>(Start, Step, L)) {
2257 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
return S;
2260 UniqueSCEVs.InsertNode(S, IP);
2287 "This is not an extending conversion!");
2289 "This is not a conversion to a SCEVable type!");
2294 if (SC->getAPInt().isNegative())
2299 const SCEV *NewOp =
T->getOperand();
2318 for (
const SCEV *
Op : AR->operands())
2356 APInt &AccumulatedConstant,
2360 bool Interesting =
false;
2367 if (Scale != 1 || AccumulatedConstant != 0 ||
C->getValue()->isZero())
2369 AccumulatedConstant += Scale *
C->getAPInt();
2374 for (; i !=
Ops.size(); ++i) {
2383 M, NewOps, AccumulatedConstant,
Add->operands(), NewScale, SE);
2389 auto Pair = M.insert({
Key, NewScale});
2393 Pair.first->second += NewScale;
2401 auto Pair = M.insert({
Ops[i], Scale});
2405 Pair.first->second += Scale;
2424 case Instruction::Add:
2427 case Instruction::Sub:
2430 case Instruction::Mul:
2444 const SCEV *
A = (this->*Extension)(
2446 const SCEV *LHSB = (this->*Extension)(LHS, WideTy, 0);
2447 const SCEV *RHSB = (this->*Extension)(RHS, WideTy, 0);
2455 if (BinOp == Instruction::Mul)
2461 APInt C = RHSC->getAPInt();
2462 unsigned NumBits =
C.getBitWidth();
2463 bool IsSub = (BinOp == Instruction::Sub);
2464 bool IsNegativeConst = (
Signed &&
C.isNegative());
2466 bool OverflowDown = IsSub ^ IsNegativeConst;
2468 if (IsNegativeConst) {
2481 APInt Limit = Min + Magnitude;
2487 APInt Limit = Max - Magnitude;
2492std::optional<SCEV::NoWrapFlags>
2497 return std::nullopt;
2506 bool Deduced =
false;
2508 if (OBO->
getOpcode() != Instruction::Add &&
2511 return std::nullopt;
2520 false, LHS, RHS, CtxI)) {
2527 true, LHS, RHS, CtxI)) {
2534 return std::nullopt;
2544 using namespace std::placeholders;
2551 assert(CanAnalyze &&
"don't call from other places!");
2558 auto IsKnownNonNegative = [&](
SCEVUse U) {
2567 if (SignOrUnsignWrap != SignOrUnsignMask &&
2574 return Instruction::Add;
2576 return Instruction::Mul;
2587 Opcode,
C, OBO::NoSignedWrap);
2595 Opcode,
C, OBO::NoUnsignedWrap);
2605 Ops[0]->isZero() && IsKnownNonNegative(
Ops[1]))
2612 if (UDiv->getOperand(1) ==
Ops[1])
2615 if (UDiv->getOperand(1) ==
Ops[0])
2631 "only nuw or nsw allowed");
2632 assert(!
Ops.empty() &&
"Cannot get empty add!");
2633 if (
Ops.size() == 1)
return Ops[0];
2636 for (
unsigned i = 1, e =
Ops.size(); i != e; ++i)
2638 "SCEVAddExpr operand types don't match!");
2640 Ops, [](
const SCEV *
Op) {
return Op->getType()->isPointerTy(); });
2641 assert(NumPtrs <= 1 &&
"add has at most one pointer operand");
2646 [](
const APInt &C1,
const APInt &C2) {
return C1 + C2; },
2647 [](
const APInt &
C) {
return C.isZero(); },
2648 [](
const APInt &
C) {
return false; });
2661 return getOrCreateAddExpr(
Ops, ComputeFlags(
Ops));
2666 if (
Add->getNoWrapFlags(OrigFlags) != OrigFlags)
2667 Add->setNoWrapFlags(ComputeFlags(
Ops));
2675 bool FoundMatch =
false;
2676 for (
unsigned i = 0, e =
Ops.size(); i != e-1; ++i)
2677 if (
Ops[i] ==
Ops[i+1]) {
2689 --i; e -=
Count - 1;
2699 auto FindTruncSrcType = [&]() ->
Type * {
2705 return T->getOperand()->getType();
2707 SCEVUse LastOp =
Mul->getOperand(
Mul->getNumOperands() - 1);
2709 return T->getOperand()->getType();
2713 if (
auto *SrcType = FindTruncSrcType()) {
2720 if (
T->getOperand()->getType() != SrcType) {
2729 for (
unsigned j = 0, f = M->getNumOperands(); j != f && Ok; ++j) {
2732 if (
T->getOperand()->getType() != SrcType) {
2760 if (
Ops.size() == 2) {
2770 auto C2 =
C->getAPInt();
2773 APInt ConstAdd = C1 + C2;
2774 auto AddFlags = AddExpr->getNoWrapFlags();
2815 if (
Ops.size() == 2 &&
2826 if (Idx <
Ops.size()) {
2827 bool DeletedAdd =
false;
2838 Ops.erase(
Ops.begin()+Idx);
2841 CommonFlags =
maskFlags(CommonFlags,
Add->getNoWrapFlags());
2864 struct APIntCompare {
2865 bool operator()(
const APInt &LHS,
const APInt &RHS)
const {
2866 return LHS.ult(RHS);
2873 std::map<APInt, SmallVector<SCEVUse, 4>, APIntCompare> MulOpLists;
2874 for (
const SCEV *NewOp : NewOps)
2875 MulOpLists[M.find(NewOp)->second].push_back(NewOp);
2878 if (AccumulatedConstant != 0)
2880 for (
auto &MulOp : MulOpLists) {
2881 if (MulOp.first == 1) {
2883 }
else if (MulOp.first != 0) {
2892 if (
Ops.size() == 1)
2903 for (
unsigned MulOp = 0, e =
Mul->getNumOperands(); MulOp != e; ++MulOp) {
2904 const SCEV *MulOpSCEV =
Mul->getOperand(MulOp);
2907 for (
unsigned AddOp = 0, e =
Ops.size(); AddOp != e; ++AddOp)
2908 if (MulOpSCEV ==
Ops[AddOp]) {
2910 const SCEV *InnerMul =
Mul->getOperand(MulOp == 0);
2911 if (
Mul->getNumOperands() != 2) {
2918 const SCEV *AddOne =
2922 if (
Ops.size() == 2)
return OuterMul;
2924 Ops.erase(
Ops.begin()+AddOp);
2925 Ops.erase(
Ops.begin()+Idx-1);
2927 Ops.erase(
Ops.begin()+Idx);
2928 Ops.erase(
Ops.begin()+AddOp-1);
2930 Ops.push_back(OuterMul);
2935 for (
unsigned OtherMulIdx = Idx+1;
2942 OMulOp != e; ++OMulOp)
2943 if (OtherMul->
getOperand(OMulOp) == MulOpSCEV) {
2945 const SCEV *InnerMul1 =
Mul->getOperand(MulOp == 0);
2946 if (
Mul->getNumOperands() != 2) {
2954 OtherMul->
operands().take_front(OMulOp));
2958 const SCEV *InnerMulSum =
2962 if (
Ops.size() == 2)
return OuterMul;
2963 Ops.erase(
Ops.begin()+Idx);
2964 Ops.erase(
Ops.begin()+OtherMulIdx-1);
2965 Ops.push_back(OuterMul);
2985 for (
unsigned i = 0, e =
Ops.size(); i != e; ++i)
2988 Ops.erase(
Ops.begin()+i);
2993 if (!LIOps.
empty()) {
3018 auto *DefI = getDefiningScopeBound(LIOps);
3020 if (!isGuaranteedToTransferExecutionTo(DefI, ReachI))
3032 if (
Ops.size() == 1)
return NewRec;
3035 for (
unsigned i = 0;; ++i)
3036 if (
Ops[i] == AddRec) {
3046 for (
unsigned OtherIdx = Idx+1;
3054 "AddRecExprs are not sorted in reverse dominance order?");
3061 if (OtherAddRec->getLoop() == AddRecLoop) {
3062 for (
unsigned i = 0, e = OtherAddRec->getNumOperands();
3064 if (i >= AddRecOps.
size()) {
3065 append_range(AddRecOps, OtherAddRec->operands().drop_front(i));
3069 getAddExpr(AddRecOps[i], OtherAddRec->getOperand(i),
3072 Ops.erase(
Ops.begin() + OtherIdx); --OtherIdx;
3087 return getOrCreateAddExpr(
Ops, ComputeFlags(
Ops));
3098 static_cast<SCEVAddExpr *
>(UniqueSCEVs.FindNodeOrInsertPos(
ID, IP));
3102 S =
new (SCEVAllocator)
3104 UniqueSCEVs.InsertNode(S, IP);
3115 FoldingSetNodeID
ID;
3117 for (
const SCEV *
Op :
Ops)
3122 static_cast<SCEVAddRecExpr *
>(UniqueSCEVs.FindNodeOrInsertPos(
ID, IP));
3126 S =
new (SCEVAllocator)
3127 SCEVAddRecExpr(
ID.Intern(SCEVAllocator), O,
Ops.size(), L);
3128 UniqueSCEVs.InsertNode(S, IP);
3130 LoopUsers[
L].push_back(S);
3139 FoldingSetNodeID
ID;
3141 for (
const SCEV *
Op :
Ops)
3145 static_cast<SCEVMulExpr *
>(UniqueSCEVs.FindNodeOrInsertPos(
ID, IP));
3149 S =
new (SCEVAllocator) SCEVMulExpr(
ID.Intern(SCEVAllocator),
3151 UniqueSCEVs.InsertNode(S, IP);
3161 if (j > 1 && k / j != i) Overflow =
true;
3177 if (n == 0 || n == k)
return 1;
3178 if (k > n)
return 0;
3184 for (
uint64_t i = 1; i <= k; ++i) {
3185 r =
umul_ov(r, n-(i-1), Overflow);
3194 struct FindConstantInAddMulChain {
3195 bool FoundConstant =
false;
3197 bool follow(
const SCEV *S) {
3202 bool isDone()
const {
3203 return FoundConstant;
3207 FindConstantInAddMulChain
F;
3209 ST.visitAll(StartExpr);
3210 return F.FoundConstant;
3218 "only nuw or nsw allowed");
3219 assert(!
Ops.empty() &&
"Cannot get empty mul!");
3220 if (
Ops.size() == 1)
return Ops[0];
3222 Type *ETy =
Ops[0]->getType();
3224 for (
unsigned i = 1, e =
Ops.size(); i != e; ++i)
3226 "SCEVMulExpr operand types don't match!");
3231 [](
const APInt &C1,
const APInt &C2) {
return C1 * C2; },
3232 [](
const APInt &
C) {
return C.isOne(); },
3233 [](
const APInt &
C) {
return C.isZero(); });
3244 return getOrCreateMulExpr(
Ops, ComputeFlags(
Ops));
3249 if (
Mul->getNoWrapFlags(OrigFlags) != OrigFlags)
3250 Mul->setNoWrapFlags(ComputeFlags(
Ops));
3255 if (
Ops.size() == 2) {
3263 const SCEV *Op0, *Op1;
3271 if (
Ops[0]->isAllOnesValue()) {
3276 bool AnyFolded =
false;
3277 for (
const SCEV *AddOp :
Add->operands()) {
3297 if (AddRec->hasNoSignedWrap()) {
3304 AddRec->getNoWrapFlags(FlagsMask));
3327 APInt C1V = LHSC->getAPInt();
3337 const SCEV *NewMul =
nullptr;
3341 assert(C1V.
ugt(1) &&
"C1 <= 1 should have been folded earlier");
3356 if (Idx <
Ops.size()) {
3357 bool DeletedMul =
false;
3363 Ops.erase(
Ops.begin()+Idx);
3387 for (
unsigned i = 0, e =
Ops.size(); i != e; ++i)
3390 Ops.erase(
Ops.begin()+i);
3395 if (!LIOps.
empty()) {
3408 for (
unsigned i = 0, e = AddRec->
getNumOperands(); i != e; ++i) {
3424 if (
Ops.size() == 1)
return NewRec;
3427 for (
unsigned i = 0;; ++i)
3428 if (
Ops[i] == AddRec) {
3449 bool OpsModified =
false;
3450 for (
unsigned OtherIdx = Idx+1;
3464 bool Overflow =
false;
3471 for (
int y = x, ye = 2*x+1; y != ye && !Overflow; ++y) {
3475 z < ze && !Overflow; ++z) {
3478 if (LargerThan64Bits)
3479 Coeff =
umul_ov(Coeff1, Coeff2, Overflow);
3481 Coeff = Coeff1*Coeff2;
3496 if (
Ops.size() == 2)
return NewAddRec;
3497 Ops[Idx] = NewAddRec;
3498 Ops.erase(
Ops.begin() + OtherIdx); --OtherIdx;
3514 return getOrCreateMulExpr(
Ops, ComputeFlags(
Ops));
3521 "SCEVURemExpr operand types don't match!");
3526 if (RHSC->getValue()->isOne())
3527 return getZero(LHS->getType());
3530 if (RHSC->getAPInt().isPowerOf2()) {
3531 Type *FullTy = LHS->getType();
3547 assert(!LHS->getType()->isPointerTy() &&
3548 "SCEVUDivExpr operand can't be pointer!");
3549 assert(LHS->getType() == RHS->getType() &&
3550 "SCEVUDivExpr operand types don't match!");
3557 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
3565 if (RHSC->getValue()->isOne())
3570 if (!RHSC->getValue()->isZero()) {
3574 Type *Ty = LHS->getType();
3575 unsigned LZ = RHSC->getAPInt().countl_zero();
3579 if (!RHSC->getAPInt().isPowerOf2())
3587 const APInt &StepInt = Step->getAPInt();
3588 const APInt &DivInt = RHSC->getAPInt();
3589 if (!StepInt.
urem(DivInt) &&
3595 for (
const SCEV *
Op : AR->operands())
3601 const APInt *StartRem;
3614 bool CanFoldWithWrap = StepInt.
ule(DivInt) &&
3618 const SCEV *NewStart =
3620 if (*StartRem != 0 && (NoWrap || CanFoldWithWrap) &&
3622 const SCEV *NewLHS =
3625 if (LHS != NewLHS) {
3635 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
3644 for (
const SCEV *
Op : M->operands())
3648 for (
unsigned i = 0, e = M->getNumOperands(); i != e; ++i) {
3649 const SCEV *
Op = M->getOperand(i);
3676 if (
auto *DivisorConstant =
3678 bool Overflow =
false;
3680 DivisorConstant->getAPInt().
umul_ov(RHSC->getAPInt(), Overflow);
3691 for (
const SCEV *
Op :
A->operands())
3695 for (
unsigned i = 0, e =
A->getNumOperands(); i != e; ++i) {
3702 if (Operands.
size() ==
A->getNumOperands())
3709 return getConstant(LHSC->getAPInt().udiv(RHSC->getAPInt()));
3719 return getZero(LHS->getType());
3723 if (
Mul &&
Mul->hasNoUnsignedWrap()) {
3724 for (
int i = 0, e =
Mul->getNumOperands(); i != e; ++i) {
3725 if (
Mul->getOperand(i) == RHS) {
3736 const SCEV *NewLHS, *NewRHS;
3744 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
return S;
3747 UniqueSCEVs.InsertNode(S, IP);
3784 if (StepChrec->getLoop() == L) {
3798 if (Operands.
size() == 1)
return Operands[0];
3803 "SCEVAddRecExpr operand types don't match!");
3804 assert(!
Op->getType()->isPointerTy() &&
"Step must be integer");
3806 for (
const SCEV *
Op : Operands)
3808 "SCEVAddRecExpr operand is not available at loop entry!");
3811 if (Operands.
back()->isZero()) {
3826 const Loop *NestedLoop = NestedAR->getLoop();
3827 if (L->contains(NestedLoop)
3830 DT.dominates(L->getHeader(), NestedLoop->
getHeader()))) {
3832 Operands[0] = NestedAR->getStart();
3836 bool AllInvariant =
all_of(
3848 AllInvariant =
all_of(NestedOperands, [&](
const SCEV *
Op) {
3859 return getAddRecExpr(NestedOperands, NestedLoop, InnerFlags);
3863 Operands[0] = NestedAR;
3869 return getOrCreateAddRecExpr(Operands, L, Flags);
3885 if (!GEPI || !isSCEVExprNeverPoison(GEPI))
3889 return getGEPExpr(BaseExpr, IndexExprs,
GEP->getSourceElementType(), NW);
3903 bool FirstIter =
true;
3905 for (
SCEVUse IndexExpr : IndexExprs) {
3912 Offsets.push_back(FieldOffset);
3915 CurTy = STy->getTypeAtIndex(Index);
3920 "The first index of a GEP indexes a pointer");
3921 CurTy = SrcElementTy;
3932 const SCEV *LocalOffset =
getMulExpr(IndexExpr, ElementSize, OffsetWrap);
3933 Offsets.push_back(LocalOffset);
3938 if (Offsets.empty())
3951 "GEP should not change type mid-flight.");
3955SCEV *ScalarEvolution::findExistingSCEVInCache(
SCEVTypes SCEVType,
3958 ID.AddInteger(SCEVType);
3962 return UniqueSCEVs.FindNodeOrInsertPos(
ID, IP);
3965SCEV *ScalarEvolution::findExistingSCEVInCache(
SCEVTypes SCEVType,
3968 ID.AddInteger(SCEVType);
3972 return UniqueSCEVs.FindNodeOrInsertPos(
ID, IP);
3982 assert(SCEVMinMaxExpr::isMinMaxType(Kind) &&
"Not a SCEVMinMaxExpr!");
3983 assert(!
Ops.empty() &&
"Cannot get empty (u|s)(min|max)!");
3984 if (
Ops.size() == 1)
return Ops[0];
3987 for (
unsigned i = 1, e =
Ops.size(); i != e; ++i) {
3989 "Operand types don't match!");
3992 "min/max should be consistently pointerish");
4018 return IsSigned ?
C.isMinSignedValue() :
C.isMinValue();
4020 return IsSigned ?
C.isMaxSignedValue() :
C.isMaxValue();
4025 return IsSigned ?
C.isMaxSignedValue() :
C.isMaxValue();
4027 return IsSigned ?
C.isMinSignedValue() :
C.isMinValue();
4033 if (
const SCEV *S = findExistingSCEVInCache(Kind,
Ops)) {
4039 while (Idx <
Ops.size() &&
Ops[Idx]->getSCEVType() < Kind)
4044 if (Idx <
Ops.size()) {
4045 bool DeletedAny =
false;
4046 while (
Ops[Idx]->getSCEVType() == Kind) {
4048 Ops.erase(
Ops.begin()+Idx);
4066 for (
unsigned i = 0, e =
Ops.size() - 1; i != e; ++i) {
4067 if (
Ops[i] ==
Ops[i + 1] ||
4068 isKnownViaNonRecursiveReasoning(FirstPred,
Ops[i],
Ops[i + 1])) {
4071 Ops.erase(
Ops.begin() + i + 1,
Ops.begin() + i + 2);
4074 }
else if (isKnownViaNonRecursiveReasoning(SecondPred,
Ops[i],
4077 Ops.erase(
Ops.begin() + i,
Ops.begin() + i + 1);
4083 if (
Ops.size() == 1)
return Ops[0];
4085 assert(!
Ops.empty() &&
"Reduced smax down to nothing!");
4090 ID.AddInteger(Kind);
4094 const SCEV *ExistingSCEV = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP);
4096 return ExistingSCEV;
4099 SCEV *S =
new (SCEVAllocator)
4102 UniqueSCEVs.InsertNode(S, IP);
4110class SCEVSequentialMinMaxDeduplicatingVisitor final
4111 :
public SCEVVisitor<SCEVSequentialMinMaxDeduplicatingVisitor,
4112 std::optional<const SCEV *>> {
4113 using RetVal = std::optional<const SCEV *>;
4121 bool canRecurseInto(
SCEVTypes Kind)
const {
4124 return RootKind == Kind || NonSequentialRootKind == Kind;
4127 RetVal visitAnyMinMaxExpr(
const SCEV *S) {
4129 "Only for min/max expressions.");
4132 if (!canRecurseInto(Kind))
4142 return std::nullopt;
4149 RetVal
visit(
const SCEV *S) {
4151 if (!SeenOps.
insert(S).second)
4152 return std::nullopt;
4153 return Base::visit(S);
4157 SCEVSequentialMinMaxDeduplicatingVisitor(ScalarEvolution &SE,
4159 : SE(SE), RootKind(RootKind),
4160 NonSequentialRootKind(
4161 SCEVSequentialMinMaxExpr::getEquivalentNonSequentialSCEVType(
4165 SmallVectorImpl<SCEVUse> &NewOps) {
4170 for (
const SCEV *
Op : OrigOps) {
4175 Ops.emplace_back(*NewOp);
4179 NewOps = std::move(
Ops);
4183 RetVal visitConstant(
const SCEVConstant *Constant) {
return Constant; }
4185 RetVal visitVScale(
const SCEVVScale *VScale) {
return VScale; }
4187 RetVal visitPtrToAddrExpr(
const SCEVPtrToAddrExpr *Expr) {
return Expr; }
4189 RetVal visitPtrToIntExpr(
const SCEVPtrToIntExpr *Expr) {
return Expr; }
4191 RetVal visitTruncateExpr(
const SCEVTruncateExpr *Expr) {
return Expr; }
4193 RetVal visitZeroExtendExpr(
const SCEVZeroExtendExpr *Expr) {
return Expr; }
4195 RetVal visitSignExtendExpr(
const SCEVSignExtendExpr *Expr) {
return Expr; }
4197 RetVal visitAddExpr(
const SCEVAddExpr *Expr) {
return Expr; }
4199 RetVal visitMulExpr(
const SCEVMulExpr *Expr) {
return Expr; }
4201 RetVal visitUDivExpr(
const SCEVUDivExpr *Expr) {
return Expr; }
4203 RetVal visitAddRecExpr(
const SCEVAddRecExpr *Expr) {
return Expr; }
4205 RetVal visitSMaxExpr(
const SCEVSMaxExpr *Expr) {
4206 return visitAnyMinMaxExpr(Expr);
4209 RetVal visitUMaxExpr(
const SCEVUMaxExpr *Expr) {
4210 return visitAnyMinMaxExpr(Expr);
4213 RetVal visitSMinExpr(
const SCEVSMinExpr *Expr) {
4214 return visitAnyMinMaxExpr(Expr);
4217 RetVal visitUMinExpr(
const SCEVUMinExpr *Expr) {
4218 return visitAnyMinMaxExpr(Expr);
4221 RetVal visitSequentialUMinExpr(
const SCEVSequentialUMinExpr *Expr) {
4222 return visitAnyMinMaxExpr(Expr);
4225 RetVal visitUnknown(
const SCEVUnknown *Expr) {
return Expr; }
4227 RetVal visitCouldNotCompute(
const SCEVCouldNotCompute *Expr) {
return Expr; }
4270struct SCEVPoisonCollector {
4271 bool LookThroughMaybePoisonBlocking;
4272 SmallPtrSet<const SCEVUnknown *, 4> MaybePoison;
4273 SCEVPoisonCollector(
bool LookThroughMaybePoisonBlocking)
4274 : LookThroughMaybePoisonBlocking(LookThroughMaybePoisonBlocking) {}
4276 bool follow(
const SCEV *S) {
4277 if (!LookThroughMaybePoisonBlocking &&
4287 bool isDone()
const {
return false; }
4297 SCEVPoisonCollector PC1(
true);
4302 if (PC1.MaybePoison.empty())
4308 SCEVPoisonCollector PC2(
false);
4318 SCEVPoisonCollector PC(
false);
4341 while (!Worklist.
empty()) {
4343 if (!Visited.
insert(V).second)
4347 if (Visited.
size() > 16)
4352 if (PoisonVals.
contains(V) || ::isGuaranteedNotToBePoison(V))
4363 if (PDI->isDisjoint())
4370 II &&
II->getIntrinsicID() == Intrinsic::vscale)
4377 if (
I->hasPoisonGeneratingAnnotations())
4388 assert(SCEVSequentialMinMaxExpr::isSequentialMinMaxType(Kind) &&
4389 "Not a SCEVSequentialMinMaxExpr!");
4390 assert(!
Ops.empty() &&
"Cannot get empty (u|s)(min|max)!");
4391 if (
Ops.size() == 1)
4395 for (
unsigned i = 1, e =
Ops.size(); i != e; ++i) {
4397 "Operand types don't match!");
4400 "min/max should be consistently pointerish");
4408 if (
const SCEV *S = findExistingSCEVInCache(Kind,
Ops))
4415 SCEVSequentialMinMaxDeduplicatingVisitor Deduplicator(*
this, Kind);
4425 bool DeletedAny =
false;
4426 while (Idx <
Ops.size()) {
4427 if (
Ops[Idx]->getSCEVType() != Kind) {
4432 Ops.erase(
Ops.begin() + Idx);
4433 Ops.insert(
Ops.begin() + Idx, SMME->operands().begin(),
4434 SMME->operands().end());
4442 const SCEV *SaturationPoint;
4453 for (
unsigned i = 1, e =
Ops.size(); i != e; ++i) {
4454 if (!isGuaranteedNotToCauseUB(
Ops[i]))
4466 Ops.erase(
Ops.begin() + i);
4471 if (isKnownViaNonRecursiveReasoning(Pred,
Ops[i - 1],
Ops[i])) {
4472 Ops.erase(
Ops.begin() + i);
4480 ID.AddInteger(Kind);
4484 const SCEV *ExistingSCEV = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP);
4486 return ExistingSCEV;
4490 SCEV *S =
new (SCEVAllocator)
4493 UniqueSCEVs.InsertNode(S, IP);
4541 if (
Size.isScalable())
4562 "Cannot get offset for structure containing scalable vector types");
4576 if (
SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP)) {
4578 "Stale SCEVUnknown in uniquing map!");
4584 UniqueSCEVs.InsertNode(S, IP);
4599 return Ty->isIntOrPtrTy();
4606 if (Ty->isPointerTy())
4617 if (Ty->isIntegerTy())
4621 assert(Ty->isPointerTy() &&
"Unexpected non-pointer non-integer type!");
4633 bool PreciseA, PreciseB;
4634 auto *ScopeA = getDefiningScopeBound({
A}, PreciseA);
4635 auto *ScopeB = getDefiningScopeBound({
B}, PreciseB);
4636 if (!PreciseA || !PreciseB)
4639 return (ScopeA == ScopeB) || DT.dominates(ScopeA, ScopeB) ||
4640 DT.dominates(ScopeB, ScopeA);
4644 return CouldNotCompute.get();
4647bool ScalarEvolution::checkValidity(
const SCEV *S)
const {
4650 return SU && SU->getValue() ==
nullptr;
4653 return !ContainsNulls;
4658 if (
I != HasRecMap.end())
4663 HasRecMap.insert({S, FoundAddRec});
4671 if (
SI == ExprValueMap.
end())
4673 return SI->second.getArrayRef();
4679void ScalarEvolution::eraseValueFromMap(
Value *V) {
4681 if (
I != ValueExprMap.end()) {
4682 auto EVIt = ExprValueMap.find(
I->second);
4683 bool Removed = EVIt->second.remove(V);
4685 assert(Removed &&
"Value not in ExprValueMap?");
4686 ValueExprMap.erase(
I);
4690void ScalarEvolution::insertValueToMap(
Value *V,
const SCEV *S) {
4694 auto It = ValueExprMap.find_as(V);
4695 if (It == ValueExprMap.end()) {
4697 ExprValueMap[S].insert(V);
4708 return createSCEVIter(V);
4715 if (
I != ValueExprMap.end()) {
4716 const SCEV *S =
I->second;
4717 assert(checkValidity(S) &&
4718 "existing SCEV has not been properly invalidated");
4731 Type *Ty = V->getType();
4747 assert(!V->getType()->isPointerTy() &&
"Can't negate pointer");
4760 return (
const SCEV *)
nullptr;
4766 if (
const SCEV *Replaced = MatchMinMaxNegation(MME))
4770 Type *Ty = V->getType();
4776 assert(
P->getType()->isPointerTy());
4791 if (AddOp->getType()->isPointerTy()) {
4792 assert(!PtrOp &&
"Cannot have multiple pointer ops");
4810 return getZero(LHS->getType());
4815 if (RHS->getType()->isPointerTy()) {
4816 if (!LHS->getType()->isPointerTy() ||
4826 const bool RHSIsNotMinSigned =
4857 Type *SrcTy = V->getType();
4858 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
4859 "Cannot truncate or zero extend with non-integer arguments!");
4869 Type *SrcTy = V->getType();
4870 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
4871 "Cannot truncate or zero extend with non-integer arguments!");
4881 Type *SrcTy = V->getType();
4882 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
4883 "Cannot noop or zero extend with non-integer arguments!");
4885 "getNoopOrZeroExtend cannot truncate!");
4893 Type *SrcTy = V->getType();
4894 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
4895 "Cannot noop or sign extend with non-integer arguments!");
4897 "getNoopOrSignExtend cannot truncate!");
4905 Type *SrcTy = V->getType();
4906 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
4907 "Cannot noop or any extend with non-integer arguments!");
4909 "getNoopOrAnyExtend cannot truncate!");
4917 Type *SrcTy = V->getType();
4918 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
4919 "Cannot truncate or noop with non-integer arguments!");
4921 "getTruncateOrNoop cannot extend!");
4929 const SCEV *PromotedLHS = LHS;
4930 const SCEV *PromotedRHS = RHS;
4950 assert(!
Ops.empty() &&
"At least one operand must be!");
4952 if (
Ops.size() == 1)
4956 Type *MaxType =
nullptr;
4962 assert(MaxType &&
"Failed to find maximum type!");
4975 if (!V->getType()->isPointerTy())
4980 V = AddRec->getStart();
4982 const SCEV *PtrOp =
nullptr;
4983 for (
const SCEV *AddOp :
Add->operands()) {
4984 if (AddOp->getType()->isPointerTy()) {
4985 assert(!PtrOp &&
"Cannot have multiple pointer ops");
4989 assert(PtrOp &&
"Must have pointer op");
5001 for (
User *U :
I->users()) {
5003 if (Visited.
insert(UserInsn).second)
5017 static const SCEV *rewrite(
const SCEV *S,
const Loop *L, ScalarEvolution &SE,
5018 bool IgnoreOtherLoops =
true) {
5021 if (
Rewriter.hasSeenLoopVariantSCEVUnknown())
5023 return Rewriter.hasSeenOtherLoops() && !IgnoreOtherLoops
5028 const SCEV *visitUnknown(
const SCEVUnknown *Expr) {
5030 SeenLoopVariantSCEVUnknown =
true;
5034 const SCEV *visitAddRecExpr(
const SCEVAddRecExpr *Expr) {
5038 SeenOtherLoops =
true;
5042 bool hasSeenLoopVariantSCEVUnknown() {
return SeenLoopVariantSCEVUnknown; }
5044 bool hasSeenOtherLoops() {
return SeenOtherLoops; }
5047 explicit SCEVInitRewriter(
const Loop *L, ScalarEvolution &SE)
5048 : SCEVRewriteVisitor(SE),
L(
L) {}
5051 bool SeenLoopVariantSCEVUnknown =
false;
5052 bool SeenOtherLoops =
false;
5061 static const SCEV *rewrite(
const SCEV *S,
const Loop *L, ScalarEvolution &SE) {
5062 SCEVPostIncRewriter
Rewriter(L, SE);
5064 return Rewriter.hasSeenLoopVariantSCEVUnknown()
5069 const SCEV *visitUnknown(
const SCEVUnknown *Expr) {
5071 SeenLoopVariantSCEVUnknown =
true;
5075 const SCEV *visitAddRecExpr(
const SCEVAddRecExpr *Expr) {
5079 SeenOtherLoops =
true;
5083 bool hasSeenLoopVariantSCEVUnknown() {
return SeenLoopVariantSCEVUnknown; }
5085 bool hasSeenOtherLoops() {
return SeenOtherLoops; }
5088 explicit SCEVPostIncRewriter(
const Loop *L, ScalarEvolution &SE)
5089 : SCEVRewriteVisitor(SE),
L(
L) {}
5092 bool SeenLoopVariantSCEVUnknown =
false;
5093 bool SeenOtherLoops =
false;
5099class SCEVBackedgeConditionFolder
5102 static const SCEV *rewrite(
const SCEV *S,
const Loop *L,
5103 ScalarEvolution &SE) {
5104 bool IsPosBECond =
false;
5105 Value *BECond =
nullptr;
5106 if (BasicBlock *Latch =
L->getLoopLatch()) {
5108 assert(BI->getSuccessor(0) != BI->getSuccessor(1) &&
5109 "Both outgoing branches should not target same header!");
5110 BECond = BI->getCondition();
5111 IsPosBECond = BI->getSuccessor(0) ==
L->getHeader();
5116 SCEVBackedgeConditionFolder
Rewriter(L, BECond, IsPosBECond, SE);
5120 const SCEV *visitUnknown(
const SCEVUnknown *Expr) {
5121 const SCEV *
Result = Expr;
5126 switch (
I->getOpcode()) {
5127 case Instruction::Select: {
5129 std::optional<const SCEV *> Res =
5130 compareWithBackedgeCondition(
SI->getCondition());
5138 std::optional<const SCEV *> Res = compareWithBackedgeCondition(
I);
5149 explicit SCEVBackedgeConditionFolder(
const Loop *L,
Value *BECond,
5150 bool IsPosBECond, ScalarEvolution &SE)
5151 : SCEVRewriteVisitor(SE),
L(
L), BackedgeCond(BECond),
5152 IsPositiveBECond(IsPosBECond) {}
5154 std::optional<const SCEV *> compareWithBackedgeCondition(
Value *IC);
5158 Value *BackedgeCond =
nullptr;
5160 bool IsPositiveBECond;
5163std::optional<const SCEV *>
5164SCEVBackedgeConditionFolder::compareWithBackedgeCondition(
Value *IC) {
5169 if (BackedgeCond == IC)
5172 return std::nullopt;
5177 static const SCEV *rewrite(
const SCEV *S,
const Loop *L,
5178 ScalarEvolution &SE) {
5184 const SCEV *visitUnknown(
const SCEVUnknown *Expr) {
5191 const SCEV *visitAddRecExpr(
const SCEVAddRecExpr *Expr) {
5201 explicit SCEVShiftRewriter(
const Loop *L, ScalarEvolution &SE)
5202 : SCEVRewriteVisitor(SE),
L(
L) {}
5211ScalarEvolution::proveNoWrapViaConstantRanges(
const SCEVAddRecExpr *AR) {
5215 using OBO = OverflowingBinaryOperator;
5223 const APInt &BECountAP = BECountMax->getAPInt();
5224 unsigned NoOverflowBitWidth =
5236 Instruction::Add, IncRange, OBO::NoSignedWrap);
5237 if (NSWRegion.contains(AddRecRange))
5246 Instruction::Add, IncRange, OBO::NoUnsignedWrap);
5247 if (NUWRegion.contains(AddRecRange))
5255ScalarEvolution::proveNoSignedWrapViaInduction(
const SCEVAddRecExpr *AR) {
5265 if (!SignedWrapViaInductionTried.insert(AR).second)
5290 AC.assumptions().empty())
5298 const SCEV *OverflowLimit =
5300 if (OverflowLimit &&
5308ScalarEvolution::proveNoUnsignedWrapViaInduction(
const SCEVAddRecExpr *AR) {
5318 if (!UnsignedWrapViaInductionTried.insert(AR).second)
5344 AC.assumptions().empty())
5379 explicit BinaryOp(Operator *
Op)
5380 : Opcode(
Op->getOpcode()),
LHS(
Op->getOperand(0)),
RHS(
Op->getOperand(1)),
5383 IsNSW = OBO->hasNoSignedWrap();
5384 IsNUW = OBO->hasNoUnsignedWrap();
5388 explicit BinaryOp(
unsigned Opcode,
Value *
LHS,
Value *
RHS,
bool IsNSW =
false,
5390 : Opcode(Opcode),
LHS(
LHS),
RHS(
RHS), IsNSW(IsNSW), IsNUW(IsNUW) {}
5402 return std::nullopt;
5408 switch (
Op->getOpcode()) {
5409 case Instruction::Add:
5410 case Instruction::Sub:
5411 case Instruction::Mul:
5412 case Instruction::UDiv:
5413 case Instruction::URem:
5414 case Instruction::And:
5415 case Instruction::AShr:
5416 case Instruction::Shl:
5417 return BinaryOp(
Op);
5419 case Instruction::Or: {
5422 BinaryOp BinOp(Instruction::Add,
Op->getOperand(0),
Op->getOperand(1),
5429 return BinaryOp(
Op);
5432 case Instruction::Xor:
5436 if (RHSC->getValue().isSignMask())
5437 return BinaryOp(Instruction::Add,
Op->getOperand(0),
Op->getOperand(1));
5439 if (V->getType()->isIntegerTy(1))
5440 return BinaryOp(Instruction::Add,
Op->getOperand(0),
Op->getOperand(1));
5441 return BinaryOp(
Op);
5443 case Instruction::LShr:
5452 if (SA->getValue().ult(
BitWidth)) {
5454 ConstantInt::get(SA->getContext(),
5456 return BinaryOp(Instruction::UDiv,
Op->getOperand(0),
X);
5459 return BinaryOp(
Op);
5461 case Instruction::ExtractValue: {
5463 if (EVI->getNumIndices() != 1 || EVI->getIndices()[0] != 0)
5471 bool Signed = WO->isSigned();
5474 return BinaryOp(BinOp, WO->getLHS(), WO->getRHS());
5479 return BinaryOp(BinOp, WO->getLHS(), WO->getRHS(),
5490 if (
II->getIntrinsicID() == Intrinsic::loop_decrement_reg)
5491 return BinaryOp(Instruction::Sub,
II->getOperand(0),
II->getOperand(1));
5493 return std::nullopt;
5519 if (
Op == SymbolicPHI)
5524 if (SourceBits != NewBits)
5542 if (!L || L->getHeader() != PN->
getParent())
5600std::optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>>
5601ScalarEvolution::createAddRecFromPHIWithCastsImpl(
const SCEVUnknown *SymbolicPHI) {
5609 assert(L &&
"Expecting an integer loop header phi");
5614 Value *BEValueV =
nullptr, *StartValueV =
nullptr;
5615 for (
unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
5616 Value *
V = PN->getIncomingValue(i);
5617 if (
L->contains(PN->getIncomingBlock(i))) {
5620 }
else if (BEValueV != V) {
5624 }
else if (!StartValueV) {
5626 }
else if (StartValueV != V) {
5627 StartValueV =
nullptr;
5631 if (!BEValueV || !StartValueV)
5632 return std::nullopt;
5634 const SCEV *BEValue =
getSCEV(BEValueV);
5641 return std::nullopt;
5645 unsigned FoundIndex =
Add->getNumOperands();
5646 Type *TruncTy =
nullptr;
5648 for (
unsigned i = 0, e =
Add->getNumOperands(); i != e; ++i)
5651 if (FoundIndex == e) {
5656 if (FoundIndex ==
Add->getNumOperands())
5657 return std::nullopt;
5661 for (
unsigned i = 0, e =
Add->getNumOperands(); i != e; ++i)
5662 if (i != FoundIndex)
5663 Ops.push_back(
Add->getOperand(i));
5669 return std::nullopt;
5722 const SCEV *StartVal =
getSCEV(StartValueV);
5723 const SCEV *PHISCEV =
5750 auto getExtendedExpr = [&](
const SCEV *Expr,
5751 bool CreateSignExtend) ->
const SCEV * {
5754 const SCEV *ExtendedExpr =
5757 return ExtendedExpr;
5765 auto PredIsKnownFalse = [&](
const SCEV *Expr,
5766 const SCEV *ExtendedExpr) ->
bool {
5767 return Expr != ExtendedExpr &&
5771 const SCEV *StartExtended = getExtendedExpr(StartVal,
Signed);
5772 if (PredIsKnownFalse(StartVal, StartExtended)) {
5774 return std::nullopt;
5779 const SCEV *AccumExtended = getExtendedExpr(Accum,
true);
5780 if (PredIsKnownFalse(Accum, AccumExtended)) {
5782 return std::nullopt;
5785 auto AppendPredicate = [&](
const SCEV *Expr,
5786 const SCEV *ExtendedExpr) ->
void {
5787 if (Expr != ExtendedExpr &&
5795 AppendPredicate(StartVal, StartExtended);
5796 AppendPredicate(Accum, AccumExtended);
5804 std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>> PredRewrite =
5805 std::make_pair(NewAR, Predicates);
5807 PredicatedSCEVRewrites[{SymbolicPHI,
L}] = PredRewrite;
5811std::optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>>
5816 return std::nullopt;
5819 auto I = PredicatedSCEVRewrites.find({SymbolicPHI, L});
5820 if (
I != PredicatedSCEVRewrites.end()) {
5821 std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>> Rewrite =
5824 if (Rewrite.first == SymbolicPHI)
5825 return std::nullopt;
5829 assert(!(Rewrite.second).empty() &&
"Expected to find Predicates");
5833 std::optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>>
5834 Rewrite = createAddRecFromPHIWithCastsImpl(SymbolicPHI);
5839 PredicatedSCEVRewrites[{SymbolicPHI, L}] = {SymbolicPHI, Predicates};
5840 return std::nullopt;
5857 auto areExprsEqual = [&](
const SCEV *Expr1,
const SCEV *Expr2) ->
bool {
5858 if (Expr1 != Expr2 &&
5859 !Preds->implies(SE.getEqualPredicate(Expr1, Expr2), SE) &&
5860 !Preds->implies(SE.getEqualPredicate(Expr2, Expr1), SE))
5877const SCEV *ScalarEvolution::createSimpleAffineAddRec(
PHINode *PN,
5879 Value *StartValueV) {
5882 assert(BEValueV && StartValueV);
5888 if (BO->Opcode != Instruction::Add)
5891 const SCEV *Accum =
nullptr;
5892 if (BO->LHS == PN && L->isLoopInvariant(BO->RHS))
5894 else if (BO->RHS == PN && L->isLoopInvariant(BO->LHS))
5908 insertValueToMap(PN, PHISCEV);
5920 "Accum is defined outside L, but is not invariant?");
5921 if (isAddRecNeverPoison(BEInst, L))
5928const SCEV *ScalarEvolution::createAddRecFromPHI(
PHINode *PN) {
5929 const Loop *
L = LI.getLoopFor(PN->
getParent());
5936 Value *BEValueV =
nullptr, *StartValueV =
nullptr;
5942 }
else if (BEValueV != V) {
5946 }
else if (!StartValueV) {
5948 }
else if (StartValueV != V) {
5949 StartValueV =
nullptr;
5953 if (!BEValueV || !StartValueV)
5956 assert(ValueExprMap.find_as(PN) == ValueExprMap.end() &&
5957 "PHI node already processed?");
5961 if (
auto *S = createSimpleAffineAddRec(PN, BEValueV, StartValueV))
5966 insertValueToMap(PN, SymbolicName);
5970 const SCEV *BEValue =
getSCEV(BEValueV);
5980 unsigned FoundIndex =
Add->getNumOperands();
5981 for (
unsigned i = 0, e =
Add->getNumOperands(); i != e; ++i)
5982 if (
Add->getOperand(i) == SymbolicName)
5983 if (FoundIndex == e) {
5988 if (FoundIndex !=
Add->getNumOperands()) {
5991 for (
unsigned i = 0, e =
Add->getNumOperands(); i != e; ++i)
5992 if (i != FoundIndex)
5993 Ops.push_back(SCEVBackedgeConditionFolder::rewrite(
Add->getOperand(i),
6005 if (BO->Opcode == Instruction::Add && BO->LHS == PN) {
6012 if (
GEP->getOperand(0) == PN) {
6013 GEPNoWrapFlags NW =
GEP->getNoWrapFlags();
6031 const SCEV *StartVal =
getSCEV(StartValueV);
6032 const SCEV *PHISCEV =
getAddRecExpr(StartVal, Accum, L, Flags);
6037 forgetMemoizedResults({SymbolicName});
6038 insertValueToMap(PN, PHISCEV);
6042 const_cast<SCEVAddRecExpr *
>(AR),
6068 const SCEV *Shifted = SCEVShiftRewriter::rewrite(BEValue, L, *
this);
6069 const SCEV *
Start = SCEVInitRewriter::rewrite(Shifted, L, *
this,
false);
6071 isGuaranteedNotToCauseUB(Shifted) &&
::impliesPoison(Shifted, Start)) {
6072 const SCEV *StartVal =
getSCEV(StartValueV);
6073 if (Start == StartVal) {
6077 forgetMemoizedResults({SymbolicName});
6078 insertValueToMap(PN, Shifted);
6088 eraseValueFromMap(PN);
6103 Use &LeftUse =
Merge->getOperandUse(0);
6104 Use &RightUse =
Merge->getOperandUse(1);
6140 assert(IDom &&
"At least the entry block should dominate PN");
6148const SCEV *ScalarEvolution::createNodeFromSelectLikePHI(
PHINode *PN) {
6153 return createNodeForSelectOrPHI(PN,
Cond,
LHS,
RHS);
6170 CommonInst = IncomingInst;
6186ScalarEvolution::createNodeForPHIWithIdenticalOperands(
PHINode *PN) {
6192 const SCEV *CommonSCEV =
getSCEV(CommonInst);
6193 bool SCEVExprsIdentical =
6195 [
this, CommonSCEV](
Value *V) { return CommonSCEV == getSCEV(V); });
6196 return SCEVExprsIdentical ? CommonSCEV :
nullptr;
6199const SCEV *ScalarEvolution::createNodeForPHI(
PHINode *PN) {
6200 if (
const SCEV *S = createAddRecFromPHI(PN))
6210 if (
const SCEV *S = createNodeForPHIWithIdenticalOperands(PN))
6213 if (
const SCEV *S = createNodeFromSelectLikePHI(PN))
6222 struct FindClosure {
6223 const SCEV *OperandToFind;
6229 bool canRecurseInto(
SCEVTypes Kind)
const {
6232 return RootKind == Kind || NonSequentialRootKind == Kind ||
6237 : OperandToFind(OperandToFind), RootKind(RootKind),
6238 NonSequentialRootKind(
6242 bool follow(
const SCEV *S) {
6243 Found = S == OperandToFind;
6245 return !isDone() && canRecurseInto(S->
getSCEVType());
6248 bool isDone()
const {
return Found; }
6251 FindClosure FC(OperandToFind, RootKind);
6256std::optional<const SCEV *>
6257ScalarEvolution::createNodeForSelectOrPHIInstWithICmpInstCond(
Type *Ty,
6267 switch (ICI->getPredicate()) {
6281 bool Signed = ICI->isSigned();
6282 const SCEV *LA =
getSCEV(TrueVal);
6290 if (LA == LS &&
RA == RS)
6292 if (LA == RS &&
RA == LS)
6295 auto CoerceOperand = [&](
const SCEV *
Op) ->
const SCEV * {
6296 if (
Op->getType()->isPointerTy()) {
6307 LS = CoerceOperand(LS);
6308 RS = CoerceOperand(RS);
6332 const SCEV *TrueValExpr =
getSCEV(TrueVal);
6333 const SCEV *FalseValExpr =
getSCEV(FalseVal);
6347 X = ZExt->getOperand();
6349 const SCEV *FalseValExpr =
getSCEV(FalseVal);
6360 return std::nullopt;
6363static std::optional<const SCEV *>
6365 const SCEV *TrueExpr,
const SCEV *FalseExpr) {
6369 "Unexpected operands of a select.");
6381 return std::nullopt;
6396static std::optional<const SCEV *>
6400 return std::nullopt;
6403 const auto *SETrue = SE->
getSCEV(TrueVal);
6404 const auto *SEFalse = SE->
getSCEV(FalseVal);
6408const SCEV *ScalarEvolution::createNodeForSelectOrPHIViaUMinSeq(
6410 assert(
Cond->getType()->isIntegerTy(1) &&
"Select condition is not an i1?");
6412 V->getType() ==
TrueVal->getType() &&
6413 "Types of select hands and of the result must match.");
6416 if (!
V->getType()->isIntegerTy(1))
6419 if (std::optional<const SCEV *> S =
6432 return getSCEV(CI->isOne() ? TrueVal : FalseVal);
6436 if (std::optional<const SCEV *> S =
6437 createNodeForSelectOrPHIInstWithICmpInstCond(
I->getType(), ICI,
6443 return createNodeForSelectOrPHIViaUMinSeq(V,
Cond, TrueVal, FalseVal);
6449 assert(
GEP->getSourceElementType()->isSized() &&
6450 "GEP source element type must be sized");
6453 for (
Value *Index :
GEP->indices())
6458APInt ScalarEvolution::getConstantMultipleImpl(
const SCEV *S,
6461 auto GetShiftedByZeros = [
BitWidth](uint32_t TrailingZeros) {
6464 : APInt::getOneBitSet(
BitWidth, TrailingZeros);
6466 auto GetGCDMultiple = [
this, CtxI](
const SCEVNAryExpr *
N) {
6469 for (
unsigned I = 1,
E =
N->getNumOperands();
I <
E && Res != 1; ++
I)
6488 return GetShiftedByZeros(TZ);
6498 return GetShiftedByZeros(TZ);
6502 if (
M->hasNoUnsignedWrap()) {
6505 for (
const SCEV *Operand :
M->operands().drop_front())
6513 for (
const SCEV *Operand :
M->operands())
6515 return GetShiftedByZeros(TZ);
6520 if (
N->hasNoUnsignedWrap())
6521 return GetGCDMultiple(
N);
6524 for (
const SCEV *Operand :
N->operands().drop_front())
6526 return GetShiftedByZeros(TZ);
6543 CtxI = &*F.getEntryBlock().begin();
6550 .allowEphemerals(
true))
6551 .countMinTrailingZeros();
6552 return GetShiftedByZeros(Known);
6565 return getConstantMultipleImpl(S, CtxI);
6567 auto I = ConstantMultipleCache.find(S);
6568 if (
I != ConstantMultipleCache.end())
6571 APInt Result = getConstantMultipleImpl(S, CtxI);
6572 auto InsertPair = ConstantMultipleCache.insert({S, Result});
6573 assert(InsertPair.second &&
"Should insert a new key");
6574 return InsertPair.first->second;
6591 if (
MDNode *MD =
I->getMetadata(LLVMContext::MD_range))
6594 if (std::optional<ConstantRange>
Range = CB->getRange())
6598 if (std::optional<ConstantRange>
Range =
A->getRange())
6601 return std::nullopt;
6608 UnsignedRanges.erase(AddRec);
6609 SignedRanges.erase(AddRec);
6610 ConstantMultipleCache.erase(AddRec);
6615getRangeForUnknownRecurrence(
const SCEVUnknown *U) {
6641 Value *Start, *Step;
6648 assert(L && L->getHeader() ==
P->getParent());
6661 case Instruction::AShr:
6662 case Instruction::LShr:
6663 case Instruction::Shl:
6678 KnownStep.getBitWidth() ==
BitWidth);
6681 auto MaxShiftAmt = KnownStep.getMaxValue();
6683 bool Overflow =
false;
6684 auto TotalShift = MaxShiftAmt.umul_ov(TCAP, Overflow);
6691 case Instruction::AShr: {
6699 if (KnownStart.isNonNegative())
6702 KnownStart.getMaxValue() + 1);
6703 if (KnownStart.isNegative())
6706 KnownEnd.getMaxValue() + 1);
6709 case Instruction::LShr: {
6718 KnownStart.getMaxValue() + 1);
6720 case Instruction::Shl: {
6724 if (TotalShift.ult(KnownStart.countMinLeadingZeros()))
6725 return ConstantRange(KnownStart.getMinValue(),
6726 KnownEnd.getMaxValue() + 1);
6751 [&](
Value *Operand) { return DT.dominates(Operand, PHI); }))
6758ScalarEvolution::getRangeRefIter(
const SCEV *S,
6759 ScalarEvolution::RangeSignHint SignHint) {
6760 DenseMap<const SCEV *, ConstantRange> &Cache =
6761 SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED ? UnsignedRanges
6764 SmallPtrSet<const SCEV *, 8> Seen;
6768 auto AddToWorklist = [&WorkList, &Seen, &Cache](
const SCEV *Expr) {
6769 if (!Seen.
insert(Expr).second)
6803 for (
unsigned I = 0;
I != WorkList.
size(); ++
I) {
6804 const SCEV *
P = WorkList[
I];
6808 for (
const SCEV *
Op :
P->operands())
6821 if (!WorkList.
empty()) {
6826 getRangeRef(
P, SignHint);
6830 return getRangeRef(S, SignHint, 0);
6837 const SCEV *S, ScalarEvolution::RangeSignHint SignHint,
unsigned Depth) {
6838 DenseMap<const SCEV *, ConstantRange> &Cache =
6839 SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED ? UnsignedRanges
6846 auto I = Cache.
find(S);
6847 if (
I != Cache.
end())
6851 return setRange(
C, SignHint, ConstantRange(
C->getAPInt()));
6856 return getRangeRefIter(S, SignHint);
6859 ConstantRange ConservativeResult(
BitWidth,
true);
6860 using OBO = OverflowingBinaryOperator;
6864 if (SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED) {
6868 ConservativeResult =
6875 ConservativeResult = ConstantRange(
6891 ConservativeResult.intersectWith(
X.truncate(
BitWidth), RangeType));
6898 ConservativeResult.intersectWith(
X.zeroExtend(
BitWidth), RangeType));
6905 ConservativeResult.intersectWith(
X.signExtend(
BitWidth), RangeType));
6911 return setRange(Cast, SignHint,
X);
6916 const SCEV *URemLHS =
nullptr, *URemRHS =
nullptr;
6917 if (SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED &&
6919 ConstantRange LHSRange = getRangeRef(URemLHS, SignHint,
Depth + 1);
6920 ConstantRange RHSRange = getRangeRef(URemRHS, SignHint,
Depth + 1);
6921 ConservativeResult =
6922 ConservativeResult.intersectWith(LHSRange.
urem(RHSRange), RangeType);
6924 ConstantRange
X = getRangeRef(
Add->getOperand(0), SignHint,
Depth + 1);
6925 unsigned WrapType = OBO::AnyWrap;
6926 if (
Add->hasNoSignedWrap())
6927 WrapType |= OBO::NoSignedWrap;
6928 if (
Add->hasNoUnsignedWrap())
6929 WrapType |= OBO::NoUnsignedWrap;
6931 X =
X.addWithNoWrap(getRangeRef(
Op, SignHint,
Depth + 1), WrapType,
6933 return setRange(
Add, SignHint,
6934 ConservativeResult.intersectWith(
X, RangeType));
6938 ConstantRange
X = getRangeRef(
Mul->getOperand(0), SignHint,
Depth + 1);
6940 X =
X.multiply(getRangeRef(
Op, SignHint,
Depth + 1));
6941 return setRange(
Mul, SignHint,
6942 ConservativeResult.intersectWith(
X, RangeType));
6946 ConstantRange
X = getRangeRef(UDiv->
getLHS(), SignHint,
Depth + 1);
6947 ConstantRange
Y = getRangeRef(UDiv->
getRHS(), SignHint,
Depth + 1);
6948 return setRange(UDiv, SignHint,
6949 ConservativeResult.intersectWith(
X.udiv(
Y), RangeType));
6957 if (!UnsignedMinValue.
isZero())
6958 ConservativeResult = ConservativeResult.intersectWith(
6959 ConstantRange(UnsignedMinValue, APInt(
BitWidth, 0)), RangeType);
6968 bool AllNonNeg =
true;
6969 bool AllNonPos =
true;
6970 for (
unsigned i = 1, e = AddRec->
getNumOperands(); i != e; ++i) {
6977 ConservativeResult = ConservativeResult.intersectWith(
6982 ConservativeResult = ConservativeResult.intersectWith(
6991 const SCEV *MaxBEScev =
7005 auto RangeFromAffine = getRangeForAffineAR(
7007 ConservativeResult =
7008 ConservativeResult.intersectWith(RangeFromAffine, RangeType);
7010 auto RangeFromFactoring = getRangeViaFactoring(
7012 ConservativeResult =
7013 ConservativeResult.intersectWith(RangeFromFactoring, RangeType);
7019 const SCEV *SymbolicMaxBECount =
7024 auto RangeFromAffineNew = getRangeForAffineNoSelfWrappingAR(
7025 AddRec, SymbolicMaxBECount,
BitWidth, SignHint);
7026 ConservativeResult =
7027 ConservativeResult.intersectWith(RangeFromAffineNew, RangeType);
7032 return setRange(AddRec, SignHint, std::move(ConservativeResult));
7042 ID = Intrinsic::umax;
7045 ID = Intrinsic::smax;
7049 ID = Intrinsic::umin;
7052 ID = Intrinsic::smin;
7059 ConstantRange
X = getRangeRef(NAry->getOperand(0), SignHint,
Depth + 1);
7060 for (
unsigned i = 1, e = NAry->getNumOperands(); i != e; ++i)
7062 ID, {
X, getRangeRef(NAry->getOperand(i), SignHint,
Depth + 1)});
7063 return setRange(S, SignHint,
7064 ConservativeResult.intersectWith(
X, RangeType));
7073 ConservativeResult =
7074 ConservativeResult.intersectWith(*MDRange, RangeType);
7079 auto CR = getRangeForUnknownRecurrence(U);
7080 ConservativeResult = ConservativeResult.intersectWith(CR);
7091 if (
U->getType()->isPointerTy()) {
7094 unsigned ptrSize = DL.getPointerTypeSizeInBits(
U->getType());
7095 int ptrIdxDiff = ptrSize -
BitWidth;
7096 if (ptrIdxDiff > 0 && ptrSize >
BitWidth && NS > (
unsigned)ptrIdxDiff)
7109 ConservativeResult = ConservativeResult.intersectWith(
7113 ConservativeResult = ConservativeResult.intersectWith(
7118 if (
U->getType()->isPointerTy() && SignHint == HINT_RANGE_UNSIGNED) {
7121 bool CanBeNull, CanBeFreed;
7122 uint64_t DerefBytes =
7123 V->getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed);
7133 uint64_t
Align =
U->getValue()->getPointerAlignment(DL).value();
7134 uint64_t Rem = MaxVal.
urem(Align);
7139 ConservativeResult = ConservativeResult.intersectWith(
7149 return getRangeRef(AR, SignHint,
Depth + 1);
7153 ConstantRange RangeFromOps(
BitWidth,
false);
7155 for (
const auto &
Op :
Phi->operands()) {
7157 RangeFromOps = RangeFromOps.unionWith(OpRange);
7159 if (RangeFromOps.isFullSet())
7162 ConservativeResult =
7163 ConservativeResult.intersectWith(RangeFromOps, RangeType);
7169 if (
II->getIntrinsicID() == Intrinsic::vscale) {
7171 ConservativeResult = ConservativeResult.difference(Disallowed);
7174 return setRange(U, SignHint, std::move(ConservativeResult));
7180 return setRange(S, SignHint, std::move(ConservativeResult));
7189 const APInt &MaxBECount,
7196 if (Step == 0 || MaxBECount == 0)
7203 return ConstantRange::getFull(
BitWidth);
7219 return ConstantRange::getFull(
BitWidth);
7231 APInt MovedBoundary = Descending ? (StartLower - std::move(
Offset))
7232 : (StartUpper + std::move(
Offset));
7237 if (StartRange.
contains(MovedBoundary))
7238 return ConstantRange::getFull(
BitWidth);
7241 Descending ? std::move(MovedBoundary) : std::move(StartLower);
7243 Descending ? std::move(StartUpper) : std::move(MovedBoundary);
7252 const APInt &MaxBECount) {
7256 "mismatched bit widths");
7265 StepSRange.
getSignedMin(), StartSRange, MaxBECount,
true);
7267 StartSRange, MaxBECount,
7279ConstantRange ScalarEvolution::getRangeForAffineNoSelfWrappingAR(
7281 ScalarEvolution::RangeSignHint SignHint) {
7282 assert(AddRec->
isAffine() &&
"Non-affine AddRecs are not suppored!\n");
7284 "This only works for non-self-wrapping AddRecs!");
7285 const bool IsSigned = SignHint == HINT_RANGE_SIGNED;
7289 return ConstantRange::getFull(
BitWidth);
7297 return ConstantRange::getFull(
BitWidth);
7301 const SCEV *MaxItersWithoutWrap =
getUDivExpr(RangeWidth, StepAbs);
7303 MaxItersWithoutWrap))
7304 return ConstantRange::getFull(
BitWidth);
7325 ConstantRange StartRange = getRangeRef(Start, SignHint);
7326 ConstantRange EndRange = getRangeRef(End, SignHint);
7327 ConstantRange RangeBetween = StartRange.
unionWith(EndRange);
7331 return RangeBetween;
7336 return ConstantRange::getFull(
BitWidth);
7339 isKnownPredicateViaConstantRanges(LEPred, Start, End))
7340 return RangeBetween;
7342 isKnownPredicateViaConstantRanges(GEPred, Start, End))
7343 return RangeBetween;
7344 return ConstantRange::getFull(
BitWidth);
7349 const APInt &MaxBECount) {
7356 "mismatched bit widths");
7358 struct SelectPattern {
7359 Value *Condition =
nullptr;
7363 explicit SelectPattern(ScalarEvolution &SE,
unsigned BitWidth,
7365 std::optional<unsigned> CastOp;
7379 CastOp = SCast->getSCEVType();
7380 S = SCast->getOperand();
7383 using namespace llvm::PatternMatch;
7390 Condition =
nullptr;
7422 bool isRecognized() {
return Condition !=
nullptr; }
7425 SelectPattern StartPattern(*
this,
BitWidth, Start);
7426 if (!StartPattern.isRecognized())
7427 return ConstantRange::getFull(
BitWidth);
7429 SelectPattern StepPattern(*
this,
BitWidth, Step);
7430 if (!StepPattern.isRecognized())
7431 return ConstantRange::getFull(
BitWidth);
7433 if (StartPattern.Condition != StepPattern.Condition) {
7437 return ConstantRange::getFull(
BitWidth);
7448 const SCEV *TrueStart = this->
getConstant(StartPattern.TrueValue);
7449 const SCEV *TrueStep = this->
getConstant(StepPattern.TrueValue);
7450 const SCEV *FalseStart = this->
getConstant(StartPattern.FalseValue);
7451 const SCEV *FalseStep = this->
getConstant(StepPattern.FalseValue);
7453 ConstantRange TrueRange =
7454 this->getRangeForAffineAR(TrueStart, TrueStep, MaxBECount);
7455 ConstantRange FalseRange =
7456 this->getRangeForAffineAR(FalseStart, FalseStep, MaxBECount);
7468 PDI && PDI->isDisjoint()) {
7483ScalarEvolution::getNonTrivialDefiningScopeBound(
const SCEV *S) {
7496 SmallPtrSet<const SCEV *, 16> Visited;
7498 auto pushOp = [&](
const SCEV *S) {
7499 if (!Visited.
insert(S).second)
7502 if (Visited.
size() > 30) {
7513 while (!Worklist.
empty()) {
7515 if (
auto *DefI = getNonTrivialDefiningScopeBound(S)) {
7516 if (!Bound || DT.dominates(Bound, DefI))
7523 return Bound ? Bound : &*F.getEntryBlock().begin();
7529 return getDefiningScopeBound(
Ops, Discard);
7532bool ScalarEvolution::isGuaranteedToTransferExecutionTo(
const Instruction *
A,
7534 if (
A->getParent() ==
B->getParent() &&
7539 auto *BLoop = LI.getLoopFor(
B->getParent());
7540 if (BLoop && BLoop->getHeader() ==
B->getParent() &&
7541 BLoop->getLoopPreheader() ==
A->getParent() &&
7543 A->getParent()->end()) &&
7550bool ScalarEvolution::isGuaranteedNotToBePoison(
const SCEV *
Op) {
7551 SCEVPoisonCollector PC(
true);
7553 return PC.MaybePoison.empty();
7556bool ScalarEvolution::isGuaranteedNotToCauseUB(
const SCEV *
Op) {
7562 return M && (!
isKnownNonZero(Op1) || !isGuaranteedNotToBePoison(Op1));
7566bool ScalarEvolution::isSCEVExprNeverPoison(
const Instruction *
I) {
7583 for (
const Use &
Op :
I->operands()) {
7589 auto *DefI = getDefiningScopeBound(SCEVOps);
7590 return isGuaranteedToTransferExecutionTo(DefI,
I);
7593bool ScalarEvolution::isAddRecNeverPoison(
const Instruction *
I,
const Loop *L) {
7595 if (isSCEVExprNeverPoison(
I))
7606 auto *ExitingBB =
L->getExitingBlock();
7610 SmallPtrSet<const Value *, 16> KnownPoison;
7619 while (!Worklist.
empty()) {
7622 for (
const Use &U :
Poison->uses()) {
7625 DT.dominates(PoisonUser->
getParent(), ExitingBB))
7629 if (KnownPoison.
insert(PoisonUser).second)
7637ScalarEvolution::LoopProperties
7638ScalarEvolution::getLoopProperties(
const Loop *L) {
7639 using LoopProperties = ScalarEvolution::LoopProperties;
7641 auto Itr = LoopPropertiesCache.find(L);
7642 if (Itr == LoopPropertiesCache.end()) {
7645 return !
SI->isSimple();
7655 return I->mayWriteToMemory();
7658 LoopProperties LP = {
true,
7661 for (
auto *BB :
L->getBlocks())
7662 for (
auto &
I : *BB) {
7664 LP.HasNoAbnormalExits =
false;
7665 if (HasSideEffects(&
I))
7666 LP.HasNoSideEffects =
false;
7667 if (!LP.HasNoAbnormalExits && !LP.HasNoSideEffects)
7671 auto InsertPair = LoopPropertiesCache.insert({
L, LP});
7672 assert(InsertPair.second &&
"We just checked!");
7673 Itr = InsertPair.first;
7686const SCEV *ScalarEvolution::createSCEVIter(
Value *V) {
7692 Stack.emplace_back(V,
true);
7693 Stack.emplace_back(V,
false);
7694 while (!Stack.empty()) {
7695 auto E = Stack.pop_back_val();
7696 Value *CurV = E.getPointer();
7702 const SCEV *CreatedSCEV =
nullptr;
7705 CreatedSCEV = createSCEV(CurV);
7710 CreatedSCEV = getOperandsToCreate(CurV,
Ops);
7714 insertValueToMap(CurV, CreatedSCEV);
7718 Stack.emplace_back(CurV,
true);
7720 Stack.emplace_back(
Op,
false);
7737 if (!DT.isReachableFromEntry(
I->getParent()))
7750 switch (BO->Opcode) {
7751 case Instruction::Add:
7752 case Instruction::Mul: {
7759 Ops.push_back(BO->
Op);
7763 Ops.push_back(BO->RHS);
7767 (BO->Opcode == Instruction::Add &&
7768 (NewBO->Opcode != Instruction::Add &&
7769 NewBO->Opcode != Instruction::Sub)) ||
7770 (BO->Opcode == Instruction::Mul &&
7771 NewBO->Opcode != Instruction::Mul)) {
7772 Ops.push_back(BO->LHS);
7777 if (BO->
Op && (BO->IsNSW || BO->IsNUW)) {
7780 Ops.push_back(BO->LHS);
7788 case Instruction::Sub:
7789 case Instruction::UDiv:
7790 case Instruction::URem:
7792 case Instruction::AShr:
7793 case Instruction::Shl:
7794 case Instruction::Xor:
7798 case Instruction::And:
7799 case Instruction::Or:
7803 case Instruction::LShr:
7810 Ops.push_back(BO->LHS);
7811 Ops.push_back(BO->RHS);
7815 switch (
U->getOpcode()) {
7816 case Instruction::Trunc:
7817 case Instruction::ZExt:
7818 case Instruction::SExt:
7819 case Instruction::PtrToAddr:
7820 case Instruction::PtrToInt:
7821 Ops.push_back(
U->getOperand(0));
7824 case Instruction::BitCast:
7826 Ops.push_back(
U->getOperand(0));
7831 case Instruction::SDiv:
7832 case Instruction::SRem:
7833 Ops.push_back(
U->getOperand(0));
7834 Ops.push_back(
U->getOperand(1));
7837 case Instruction::GetElementPtr:
7839 "GEP source element type must be sized");
7843 case Instruction::IntToPtr:
7846 case Instruction::PHI:
7877 Ops.push_back(CondICmp->getOperand(0));
7878 Ops.push_back(CondICmp->getOperand(1));
7898 case Instruction::Select: {
7900 auto CanSimplifyToUnknown = [
this,
U]() {
7918 if (CanSimplifyToUnknown())
7925 case Instruction::Call:
7926 case Instruction::Invoke:
7933 switch (
II->getIntrinsicID()) {
7934 case Intrinsic::abs:
7935 Ops.push_back(
II->getArgOperand(0));
7937 case Intrinsic::umax:
7938 case Intrinsic::umin:
7939 case Intrinsic::smax:
7940 case Intrinsic::smin:
7941 case Intrinsic::usub_sat:
7942 case Intrinsic::uadd_sat:
7943 Ops.push_back(
II->getArgOperand(0));
7944 Ops.push_back(
II->getArgOperand(1));
7946 case Intrinsic::start_loop_iterations:
7947 case Intrinsic::annotation:
7948 case Intrinsic::ptr_annotation:
7949 Ops.push_back(
II->getArgOperand(0));
7961const SCEV *ScalarEvolution::createSCEV(
Value *V) {
7970 if (!DT.isReachableFromEntry(
I->getParent()))
7985 switch (BO->Opcode) {
7986 case Instruction::Add: {
8012 if (BO->Opcode == Instruction::Sub)
8020 if (BO->Opcode == Instruction::Sub)
8027 if (!NewBO || (NewBO->Opcode != Instruction::Add &&
8028 NewBO->Opcode != Instruction::Sub)) {
8038 case Instruction::Mul: {
8059 if (!NewBO || NewBO->Opcode != Instruction::Mul) {
8068 case Instruction::UDiv:
8072 case Instruction::URem:
8076 case Instruction::Sub: {
8079 Flags = getNoWrapFlagsFromUB(BO->
Op);
8084 case Instruction::And:
8090 if (CI->isMinusOne())
8092 const APInt &
A = CI->getValue();
8098 unsigned LZ =
A.countl_zero();
8099 unsigned TZ =
A.countr_zero();
8104 APInt EffectiveMask =
8106 if ((LZ != 0 || TZ != 0) && !((~
A & ~Known.
Zero) & EffectiveMask)) {
8109 const SCEV *ShiftedLHS =
nullptr;
8113 unsigned MulZeros = OpC->getAPInt().countr_zero();
8114 unsigned GCD = std::min(MulZeros, TZ);
8119 auto *NewMul =
getMulExpr(MulOps, LHSMul->getNoWrapFlags());
8141 case Instruction::Or:
8150 case Instruction::Xor:
8153 if (CI->isMinusOne())
8162 if (LBO->getOpcode() == Instruction::And &&
8163 LCI->getValue() == CI->getValue())
8164 if (
const SCEVZeroExtendExpr *Z =
8167 const SCEV *Z0 =
Z->getOperand();
8174 if (CI->getValue().isMask(Z0TySize))
8180 APInt Trunc = CI->getValue().trunc(Z0TySize);
8189 case Instruction::Shl:
8207 auto MulFlags = getNoWrapFlagsFromUB(BO->
Op);
8216 ConstantInt *
X = ConstantInt::get(
8222 case Instruction::AShr:
8244 const SCEV *AddTruncateExpr =
nullptr;
8245 ConstantInt *ShlAmtCI =
nullptr;
8246 const SCEV *AddConstant =
nullptr;
8248 if (L &&
L->getOpcode() == Instruction::Add) {
8256 if (LShift && LShift->
getOpcode() == Instruction::Shl) {
8263 APInt AddOperand = AddOperandCI->
getValue().
ashr(AShrAmt);
8271 }
else if (L &&
L->getOpcode() == Instruction::Shl) {
8276 const SCEV *ShlOp0SCEV =
getSCEV(
L->getOperand(0));
8281 if (AddTruncateExpr && ShlAmtCI) {
8293 const APInt &ShlAmt = ShlAmtCI->
getValue();
8297 const SCEV *CompositeExpr =
8299 if (
L->getOpcode() != Instruction::Shl)
8300 CompositeExpr =
getAddExpr(CompositeExpr, AddConstant);
8309 switch (
U->getOpcode()) {
8310 case Instruction::Trunc:
8313 case Instruction::ZExt:
8316 case Instruction::SExt:
8326 if (BO->Opcode == Instruction::Sub && BO->IsNSW) {
8327 Type *Ty =
U->getType();
8335 case Instruction::BitCast:
8341 case Instruction::PtrToAddr: {
8348 case Instruction::PtrToInt: {
8351 Type *DstIntTy =
U->getType();
8359 case Instruction::IntToPtr:
8363 case Instruction::SDiv:
8370 case Instruction::SRem:
8377 case Instruction::GetElementPtr:
8380 case Instruction::PHI:
8383 case Instruction::Select:
8384 return createNodeForSelectOrPHI(U,
U->getOperand(0),
U->getOperand(1),
8387 case Instruction::Call:
8388 case Instruction::Invoke:
8393 switch (
II->getIntrinsicID()) {
8394 case Intrinsic::abs:
8398 case Intrinsic::umax:
8402 case Intrinsic::umin:
8406 case Intrinsic::smax:
8410 case Intrinsic::smin:
8414 case Intrinsic::usub_sat: {
8415 const SCEV *
X =
getSCEV(
II->getArgOperand(0));
8416 const SCEV *
Y =
getSCEV(
II->getArgOperand(1));
8420 case Intrinsic::uadd_sat: {
8421 const SCEV *
X =
getSCEV(
II->getArgOperand(0));
8422 const SCEV *
Y =
getSCEV(
II->getArgOperand(1));
8426 case Intrinsic::start_loop_iterations:
8427 case Intrinsic::annotation:
8428 case Intrinsic::ptr_annotation:
8432 case Intrinsic::vscale:
8452 auto *ExitCountType = ExitCount->
getType();
8453 assert(ExitCountType->isIntegerTy());
8455 1 + ExitCountType->getScalarSizeInBits());
8468 auto CanAddOneWithoutOverflow = [&]() {
8470 getRangeRef(ExitCount, RangeSignHint::HINT_RANGE_UNSIGNED);
8481 if (EvalSize > ExitCountSize && CanAddOneWithoutOverflow())
8511 assert(ExitingBlock &&
"Must pass a non-null exiting block!");
8512 assert(L->isLoopExiting(ExitingBlock) &&
8513 "Exiting block must actually branch out of the loop!");
8522 const auto *MaxExitCount =
8530 L->getExitingBlocks(ExitingBlocks);
8532 std::optional<unsigned> Res;
8533 for (
auto *ExitingBB : ExitingBlocks) {
8537 Res = std::gcd(*Res, Multiple);
8539 return Res.value_or(1);
8543 const SCEV *ExitCount) {
8573 assert(ExitingBlock &&
"Must pass a non-null exiting block!");
8574 assert(L->isLoopExiting(ExitingBlock) &&
8575 "Exiting block must actually branch out of the loop!");
8585 return getBackedgeTakenInfo(L).getExact(ExitingBlock,
this);
8587 return getBackedgeTakenInfo(L).getSymbolicMax(ExitingBlock,
this);
8589 return getBackedgeTakenInfo(L).getConstantMax(ExitingBlock,
this);
8599 return getPredicatedBackedgeTakenInfo(L).getExact(ExitingBlock,
this,
8602 return getPredicatedBackedgeTakenInfo(L).getSymbolicMax(ExitingBlock,
this,
8605 return getPredicatedBackedgeTakenInfo(L).getConstantMax(ExitingBlock,
this,
8613 return getPredicatedBackedgeTakenInfo(L).getExact(L,
this, &Preds);
8620 return getBackedgeTakenInfo(L).getExact(L,
this);
8622 return getBackedgeTakenInfo(L).getConstantMax(
this);
8624 return getBackedgeTakenInfo(L).getSymbolicMax(L,
this);
8631 return getPredicatedBackedgeTakenInfo(L).getSymbolicMax(L,
this, &Preds);
8636 return getPredicatedBackedgeTakenInfo(L).getConstantMax(
this, &Preds);
8640 return getBackedgeTakenInfo(L).isConstantMaxOrZero(
this);
8650 for (
PHINode &PN : Header->phis())
8651 if (Visited.
insert(&PN).second)
8655ScalarEvolution::BackedgeTakenInfo &
8656ScalarEvolution::getPredicatedBackedgeTakenInfo(
const Loop *L) {
8657 auto &BTI = getBackedgeTakenInfo(L);
8658 if (BTI.hasFullInfo())
8661 auto Pair = PredicatedBackedgeTakenCounts.try_emplace(L);
8664 return Pair.first->second;
8666 BackedgeTakenInfo
Result =
8667 computeBackedgeTakenCount(L,
true);
8669 return PredicatedBackedgeTakenCounts.find(L)->second = std::move(Result);
8672ScalarEvolution::BackedgeTakenInfo &
8673ScalarEvolution::getBackedgeTakenInfo(
const Loop *L) {
8679 std::pair<DenseMap<const Loop *, BackedgeTakenInfo>::iterator,
bool> Pair =
8680 BackedgeTakenCounts.try_emplace(L);
8682 return Pair.first->second;
8687 BackedgeTakenInfo
Result = computeBackedgeTakenCount(L);
8694 if (
Result.hasAnyInfo()) {
8697 auto LoopUsersIt = LoopUsers.find(L);
8698 if (LoopUsersIt != LoopUsers.end())
8700 forgetMemoizedResults(ToForget);
8703 for (PHINode &PN :
L->getHeader()->phis())
8704 ConstantEvolutionLoopExitValue.erase(&PN);
8712 return BackedgeTakenCounts.find(L)->second = std::move(Result);
8721 BackedgeTakenCounts.clear();
8722 PredicatedBackedgeTakenCounts.clear();
8723 BECountUsers.clear();
8724 LoopPropertiesCache.clear();
8725 ConstantEvolutionLoopExitValue.clear();
8726 ValueExprMap.clear();
8727 ValuesAtScopes.clear();
8728 ValuesAtScopesUsers.clear();
8729 LoopDispositions.clear();
8730 BlockDispositions.clear();
8731 UnsignedRanges.clear();
8732 SignedRanges.clear();
8733 ExprValueMap.clear();
8735 ConstantMultipleCache.clear();
8736 PredicatedSCEVRewrites.clear();
8738 FoldCacheUser.clear();
8740void ScalarEvolution::visitAndClearUsers(
8744 while (!Worklist.
empty()) {
8751 if (It != ValueExprMap.
end()) {
8752 eraseValueFromMap(It->first);
8755 ConstantEvolutionLoopExitValue.erase(PN);
8769 while (!LoopWorklist.
empty()) {
8773 forgetBackedgeTakenCounts(CurrL,
false);
8774 forgetBackedgeTakenCounts(CurrL,
true);
8777 for (
auto I = PredicatedSCEVRewrites.begin();
8778 I != PredicatedSCEVRewrites.end();) {
8779 std::pair<const SCEV *, const Loop *> Entry =
I->first;
8780 if (Entry.second == CurrL)
8781 PredicatedSCEVRewrites.erase(
I++);
8786 auto LoopUsersItr = LoopUsers.find(CurrL);
8787 if (LoopUsersItr != LoopUsers.end())
8792 visitAndClearUsers(Worklist, Visited, ToForget);
8794 LoopPropertiesCache.erase(CurrL);
8797 LoopWorklist.
append(CurrL->begin(), CurrL->end());
8799 forgetMemoizedResults(ToForget);
8816 visitAndClearUsers(Worklist, Visited, ToForget);
8818 forgetMemoizedResults(ToForget);
8830 struct InvalidationRootCollector {
8834 InvalidationRootCollector(
Loop *L) : L(L) {}
8836 bool follow(
const SCEV *S) {
8842 if (L->contains(AddRec->
getLoop()))
8847 bool isDone()
const {
return false; }
8850 InvalidationRootCollector
C(L);
8852 forgetMemoizedResults(
C.Roots);
8865 BlockDispositions.clear();
8866 LoopDispositions.clear();
8883 while (!Worklist.
empty()) {
8885 bool LoopDispoRemoved = LoopDispositions.erase(Curr);
8886 bool BlockDispoRemoved = BlockDispositions.erase(Curr);
8887 if (!LoopDispoRemoved && !BlockDispoRemoved)
8889 auto Users = SCEVUsers.find(Curr);
8890 if (
Users != SCEVUsers.end())
8903const SCEV *ScalarEvolution::BackedgeTakenInfo::getExact(
8907 if (!isComplete() || ExitNotTaken.
empty())
8918 for (
const auto &ENT : ExitNotTaken) {
8919 const SCEV *BECount = ENT.ExactNotTaken;
8922 "We should only have known counts for exiting blocks that dominate "
8925 Ops.push_back(BECount);
8930 assert((Preds || ENT.hasAlwaysTruePredicate()) &&
8931 "Predicate should be always true!");
8940const ScalarEvolution::ExitNotTakenInfo *
8941ScalarEvolution::BackedgeTakenInfo::getExitNotTaken(
8942 const BasicBlock *ExitingBlock,
8943 SmallVectorImpl<const SCEVPredicate *> *Predicates)
const {
8944 for (
const auto &ENT : ExitNotTaken)
8945 if (ENT.ExitingBlock == ExitingBlock) {
8946 if (ENT.hasAlwaysTruePredicate())
8948 else if (Predicates) {
8958const SCEV *ScalarEvolution::BackedgeTakenInfo::getConstantMax(
8960 SmallVectorImpl<const SCEVPredicate *> *Predicates)
const {
8961 if (!getConstantMax())
8964 for (
const auto &ENT : ExitNotTaken)
8965 if (!ENT.hasAlwaysTruePredicate()) {
8973 "No point in having a non-constant max backedge taken count!");
8974 return getConstantMax();
8977const SCEV *ScalarEvolution::BackedgeTakenInfo::getSymbolicMax(
8979 SmallVectorImpl<const SCEVPredicate *> *Predicates) {
8987 for (
const auto &ENT : ExitNotTaken) {
8988 const SCEV *ExitCount = ENT.SymbolicMaxNotTaken;
8991 "We should only have known counts for exiting blocks that "
8997 assert((Predicates || ENT.hasAlwaysTruePredicate()) &&
8998 "Predicate should be always true!");
9001 if (ExitCounts.
empty())
9010bool ScalarEvolution::BackedgeTakenInfo::isConstantMaxOrZero(
9012 auto PredicateNotAlwaysTrue = [](
const ExitNotTakenInfo &ENT) {
9013 return !ENT.hasAlwaysTruePredicate();
9015 return MaxOrZero && !
any_of(ExitNotTaken, PredicateNotAlwaysTrue);
9031 this->ExactNotTaken = E = ConstantMaxNotTaken;
9032 this->SymbolicMaxNotTaken = SymbolicMaxNotTaken = ConstantMaxNotTaken;
9037 "Exact is not allowed to be less precise than Constant Max");
9040 "Exact is not allowed to be less precise than Symbolic Max");
9043 "Symbolic Max is not allowed to be less precise than Constant Max");
9046 "No point in having a non-constant max backedge taken count!");
9048 for (
const auto PredList : PredLists)
9049 for (
const auto *
P : PredList) {
9057 "Backedge count should be int");
9060 "Max backedge count should be int");
9073ScalarEvolution::BackedgeTakenInfo::BackedgeTakenInfo(
9075 bool IsComplete,
const SCEV *ConstantMax,
bool MaxOrZero)
9076 : ConstantMax(ConstantMax), IsComplete(IsComplete), MaxOrZero(MaxOrZero) {
9077 using EdgeExitInfo = ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo;
9079 ExitNotTaken.reserve(ExitCounts.
size());
9080 std::transform(ExitCounts.
begin(), ExitCounts.
end(),
9081 std::back_inserter(ExitNotTaken),
9082 [&](
const EdgeExitInfo &EEI) {
9083 BasicBlock *ExitBB = EEI.first;
9084 const ExitLimit &EL = EEI.second;
9085 return ExitNotTakenInfo(ExitBB, EL.ExactNotTaken,
9086 EL.ConstantMaxNotTaken, EL.SymbolicMaxNotTaken,
9091 "No point in having a non-constant max backedge taken count!");
9095ScalarEvolution::BackedgeTakenInfo
9096ScalarEvolution::computeBackedgeTakenCount(
const Loop *L,
9097 bool AllowPredicates) {
9099 L->getExitingBlocks(ExitingBlocks);
9101 using EdgeExitInfo = ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo;
9104 bool CouldComputeBECount =
true;
9106 const SCEV *MustExitMaxBECount =
nullptr;
9107 const SCEV *MayExitMaxBECount =
nullptr;
9108 bool MustExitMaxOrZero =
false;
9109 bool IsOnlyExit = ExitingBlocks.
size() == 1;
9120 bool ExitIfTrue = !L->contains(BI->getSuccessor(0));
9121 if (ExitIfTrue == CI->
isZero())
9125 ExitLimit EL = computeExitLimit(L, ExitBB, IsOnlyExit, AllowPredicates);
9127 assert((AllowPredicates || EL.Predicates.empty()) &&
9128 "Predicated exit limit when predicates are not allowed!");
9133 ++NumExitCountsComputed;
9137 CouldComputeBECount =
false;
9144 "Exact is known but symbolic isn't?");
9145 ++NumExitCountsNotComputed;
9160 DT.dominates(ExitBB, Latch)) {
9161 if (!MustExitMaxBECount) {
9162 MustExitMaxBECount = EL.ConstantMaxNotTaken;
9163 MustExitMaxOrZero = EL.MaxOrZero;
9166 EL.ConstantMaxNotTaken);
9170 MayExitMaxBECount = EL.ConstantMaxNotTaken;
9173 EL.ConstantMaxNotTaken);
9177 const SCEV *MaxBECount = MustExitMaxBECount ? MustExitMaxBECount :
9181 bool MaxOrZero = (MustExitMaxOrZero && ExitingBlocks.size() == 1);
9187 for (
const auto &Pair : ExitCounts) {
9189 BECountUsers[Pair.second.ExactNotTaken].insert({
L, AllowPredicates});
9191 BECountUsers[Pair.second.SymbolicMaxNotTaken].insert(
9192 {
L, AllowPredicates});
9194 return BackedgeTakenInfo(std::move(ExitCounts), CouldComputeBECount,
9195 MaxBECount, MaxOrZero);
9198ScalarEvolution::ExitLimit
9199ScalarEvolution::computeExitLimit(
const Loop *L, BasicBlock *ExitingBlock,
9200 bool IsOnlyExit,
bool AllowPredicates) {
9201 assert(
L->contains(ExitingBlock) &&
"Exit count for non-loop block?");
9205 if (!Latch || !DT.dominates(ExitingBlock, Latch))
9210 bool ExitIfTrue = !
L->contains(BI->getSuccessor(0));
9211 assert(ExitIfTrue ==
L->contains(BI->getSuccessor(1)) &&
9212 "It should have one successor in loop and one exit block!");
9223 if (!
L->contains(SBB)) {
9228 assert(Exit &&
"Exiting block must have at least one exit");
9229 return computeExitLimitFromSingleExitSwitch(
9230 L, SI, Exit, IsOnlyExit);
9237 const Loop *L,
Value *ExitCond,
bool ExitIfTrue,
bool ControlsOnlyExit,
9238 bool AllowPredicates) {
9239 ScalarEvolution::ExitLimitCacheTy Cache(L, ExitIfTrue, AllowPredicates);
9240 return computeExitLimitFromCondCached(Cache, L, ExitCond, ExitIfTrue,
9241 ControlsOnlyExit, AllowPredicates);
9244std::optional<ScalarEvolution::ExitLimit>
9245ScalarEvolution::ExitLimitCache::find(
const Loop *L,
Value *ExitCond,
9246 bool ExitIfTrue,
bool ControlsOnlyExit,
9247 bool AllowPredicates) {
9249 (void)this->ExitIfTrue;
9250 (void)this->AllowPredicates;
9252 assert(this->L == L && this->ExitIfTrue == ExitIfTrue &&
9253 this->AllowPredicates == AllowPredicates &&
9254 "Variance in assumed invariant key components!");
9255 auto Itr = TripCountMap.find({ExitCond, ControlsOnlyExit});
9256 if (Itr == TripCountMap.end())
9257 return std::nullopt;
9261void ScalarEvolution::ExitLimitCache::insert(
const Loop *L,
Value *ExitCond,
9263 bool ControlsOnlyExit,
9264 bool AllowPredicates,
9266 assert(this->L == L && this->ExitIfTrue == ExitIfTrue &&
9267 this->AllowPredicates == AllowPredicates &&
9268 "Variance in assumed invariant key components!");
9270 auto InsertResult = TripCountMap.insert({{ExitCond, ControlsOnlyExit}, EL});
9271 assert(InsertResult.second &&
"Expected successful insertion!");
9276ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCondCached(
9277 ExitLimitCacheTy &Cache,
const Loop *L,
Value *ExitCond,
bool ExitIfTrue,
9278 bool ControlsOnlyExit,
bool AllowPredicates) {
9280 if (
auto MaybeEL = Cache.find(L, ExitCond, ExitIfTrue, ControlsOnlyExit,
9284 ExitLimit EL = computeExitLimitFromCondImpl(
9285 Cache, L, ExitCond, ExitIfTrue, ControlsOnlyExit, AllowPredicates);
9286 Cache.insert(L, ExitCond, ExitIfTrue, ControlsOnlyExit, AllowPredicates, EL);
9290ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCondImpl(
9291 ExitLimitCacheTy &Cache,
const Loop *L,
Value *ExitCond,
bool ExitIfTrue,
9292 bool ControlsOnlyExit,
bool AllowPredicates) {
9294 if (
auto LimitFromBinOp = computeExitLimitFromCondFromBinOp(
9295 Cache, L, ExitCond, ExitIfTrue, AllowPredicates))
9296 return *LimitFromBinOp;
9302 computeExitLimitFromICmp(L, ExitCondICmp, ExitIfTrue, ControlsOnlyExit);
9303 if (EL.hasFullInfo() || !AllowPredicates)
9307 return computeExitLimitFromICmp(L, ExitCondICmp, ExitIfTrue,
9327 const WithOverflowInst *WO;
9342 auto EL = computeExitLimitFromICmp(L, Pred,
LHS,
getConstant(NewRHSC),
9343 ControlsOnlyExit, AllowPredicates);
9344 if (EL.hasAnyInfo())
9349 return computeExitCountExhaustively(L, ExitCond, ExitIfTrue);
9352std::optional<ScalarEvolution::ExitLimit>
9353ScalarEvolution::computeExitLimitFromCondFromBinOp(ExitLimitCacheTy &Cache,
9357 bool AllowPredicates) {
9366 return std::nullopt;
9370 ExitLimit EL0 = computeExitLimitFromCondCached(
9371 Cache, L, Op0, ExitIfTrue,
false, AllowPredicates);
9372 ExitLimit EL1 = computeExitLimitFromCondCached(
9373 Cache, L, Op1, ExitIfTrue,
false, AllowPredicates);
9378 bool EitherMayExit = IsAnd ^ ExitIfTrue;
9383 if (EitherMayExit) {
9393 ConstantMaxBECount = EL1.ConstantMaxNotTaken;
9395 ConstantMaxBECount = EL0.ConstantMaxNotTaken;
9398 EL1.ConstantMaxNotTaken);
9400 SymbolicMaxBECount = EL1.SymbolicMaxNotTaken;
9402 SymbolicMaxBECount = EL0.SymbolicMaxNotTaken;
9405 EL0.SymbolicMaxNotTaken, EL1.SymbolicMaxNotTaken, UseSequentialUMin);
9409 if (EL0.ExactNotTaken == EL1.ExactNotTaken)
9410 BECount = EL0.ExactNotTaken;
9423 SymbolicMaxBECount =
9425 return ExitLimit(BECount, ConstantMaxBECount, SymbolicMaxBECount,
false,
9429ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromICmp(
9430 const Loop *L, ICmpInst *ExitCond,
bool ExitIfTrue,
bool ControlsOnlyExit,
9431 bool AllowPredicates) {
9443 ExitLimit EL = computeExitLimitFromICmp(L, Pred,
LHS,
RHS, ControlsOnlyExit,
9445 if (EL.hasAnyInfo())
9448 auto *ExhaustiveCount =
9449 computeExitCountExhaustively(L, ExitCond, ExitIfTrue);
9452 return ExhaustiveCount;
9454 return computeShiftCompareExitLimit(ExitCond->
getOperand(0),
9457ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromICmp(
9459 bool ControlsOnlyExit,
bool AllowPredicates) {
9484 ConstantRange CompRange =
9502 InnerLHS = ZExt->getOperand();
9549 if (EL.hasAnyInfo())
9566 if (EL.hasAnyInfo())
return EL;
9598 ExitLimit EL = howManyLessThans(
LHS,
RHS, L, IsSigned, ControlsOnlyExit,
9600 if (EL.hasAnyInfo())
9616 ExitLimit EL = howManyGreaterThans(
LHS,
RHS, L, IsSigned, ControlsOnlyExit,
9618 if (EL.hasAnyInfo())
9629ScalarEvolution::ExitLimit
9630ScalarEvolution::computeExitLimitFromSingleExitSwitch(
const Loop *L,
9632 BasicBlock *ExitingBlock,
9633 bool ControlsOnlyExit) {
9634 assert(!
L->contains(ExitingBlock) &&
"Not an exiting block!");
9637 if (
Switch->getDefaultDest() == ExitingBlock)
9641 "Default case must not exit the loop!");
9647 if (EL.hasAnyInfo())
9659 "Evaluation of SCEV at constant didn't fold correctly?");
9663ScalarEvolution::ExitLimit ScalarEvolution::computeShiftCompareExitLimit(
9673 const BasicBlock *Predecessor =
L->getLoopPredecessor();
9679 auto MatchPositiveShift =
9682 using namespace PatternMatch;
9684 ConstantInt *ShiftAmt;
9686 OutOpCode = Instruction::LShr;
9688 OutOpCode = Instruction::AShr;
9690 OutOpCode = Instruction::Shl;
9705 auto MatchShiftRecurrence =
9707 std::optional<Instruction::BinaryOps> PostShiftOpCode;
9722 if (MatchPositiveShift(
LHS, V, OpC)) {
9723 PostShiftOpCode = OpC;
9729 if (!PNOut || PNOut->getParent() !=
L->getHeader())
9732 Value *BEValue = PNOut->getIncomingValueForBlock(Latch);
9738 MatchPositiveShift(BEValue, OpLHS, OpCodeOut) &&
9745 (!PostShiftOpCode || *PostShiftOpCode == OpCodeOut);
9750 if (!MatchShiftRecurrence(
LHS, PN, OpCode))
9762 ConstantInt *StableValue =
nullptr;
9767 case Instruction::AShr: {
9775 StableValue = ConstantInt::get(Ty, 0);
9777 StableValue = ConstantInt::get(Ty, -1,
true);
9783 case Instruction::LShr:
9784 case Instruction::Shl:
9794 "Otherwise cannot be an operand to a branch instruction");
9796 if (
Result->isNullValue()) {
9798 const SCEV *UpperBound =
9815 if (
const Function *
F = CI->getCalledFunction())
9824 if (!L->contains(
I))
return false;
9829 return L->getHeader() ==
I->getParent();
9905 if (!
I)
return nullptr;
9918 std::vector<Constant*> Operands(
I->getNumOperands());
9920 for (
unsigned i = 0, e =
I->getNumOperands(); i != e; ++i) {
9924 if (!Operands[i])
return nullptr;
9929 if (!
C)
return nullptr;
9951 if (IncomingVal != CurrentVal) {
9954 IncomingVal = CurrentVal;
9966ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN,
9969 auto [
I,
Inserted] = ConstantEvolutionLoopExitValue.try_emplace(PN);
9978 DenseMap<Instruction *, Constant *> CurrentIterVals;
9980 assert(PN->
getParent() == Header &&
"Can't evaluate PHI not in loop header!");
9986 for (PHINode &
PHI : Header->phis()) {
9988 CurrentIterVals[&
PHI] = StartCST;
9990 if (!CurrentIterVals.
count(PN))
9991 return RetVal =
nullptr;
9997 "BEs is <= MaxBruteForceIterations which is an 'unsigned'!");
10000 unsigned IterationNum = 0;
10002 for (; ; ++IterationNum) {
10003 if (IterationNum == NumIterations)
10004 return RetVal = CurrentIterVals[PN];
10008 DenseMap<Instruction *, Constant *> NextIterVals;
10013 NextIterVals[PN] = NextPHI;
10015 bool StoppedEvolving = NextPHI == CurrentIterVals[PN];
10021 for (
const auto &
I : CurrentIterVals) {
10023 if (!
PHI ||
PHI == PN ||
PHI->getParent() != Header)
continue;
10028 for (
const auto &
I : PHIsToCompute) {
10029 PHINode *
PHI =
I.first;
10032 Value *BEValue =
PHI->getIncomingValueForBlock(Latch);
10035 if (NextPHI !=
I.second)
10036 StoppedEvolving =
false;
10041 if (StoppedEvolving)
10042 return RetVal = CurrentIterVals[PN];
10044 CurrentIterVals.swap(NextIterVals);
10048const SCEV *ScalarEvolution::computeExitCountExhaustively(
const Loop *L,
10058 DenseMap<Instruction *, Constant *> CurrentIterVals;
10060 assert(PN->
getParent() == Header &&
"Can't evaluate PHI not in loop header!");
10063 assert(Latch &&
"Should follow from NumIncomingValues == 2!");
10065 for (PHINode &
PHI : Header->phis()) {
10067 CurrentIterVals[&
PHI] = StartCST;
10069 if (!CurrentIterVals.
count(PN))
10077 for (
unsigned IterationNum = 0; IterationNum != MaxIterations;++IterationNum){
10084 if (CondVal->getValue() == uint64_t(ExitWhen)) {
10085 ++NumBruteForceTripCountsComputed;
10090 DenseMap<Instruction *, Constant *> NextIterVals;
10096 for (
const auto &
I : CurrentIterVals) {
10098 if (!
PHI ||
PHI->getParent() != Header)
continue;
10101 for (PHINode *
PHI : PHIsToCompute) {
10103 if (NextPHI)
continue;
10105 Value *BEValue =
PHI->getIncomingValueForBlock(Latch);
10108 CurrentIterVals.
swap(NextIterVals);
10119 for (
auto &LS : Values)
10121 return LS.second ? LS.second : V;
10126 const SCEV *
C = computeSCEVAtScope(V, L);
10127 for (
auto &LS :
reverse(ValuesAtScopes[V]))
10128 if (LS.first == L) {
10131 ValuesAtScopesUsers[
C].push_back({L, V});
10142 switch (V->getSCEVType()) {
10182 assert(!
C->getType()->isPointerTy() &&
10183 "Can only have one pointer, and it must be last");
10208const SCEV *ScalarEvolution::getWithOperands(
const SCEV *S,
10209 SmallVectorImpl<SCEVUse> &NewOps) {
10244const SCEV *ScalarEvolution::computeSCEVAtScope(
const SCEV *V,
const Loop *L) {
10245 switch (
V->getSCEVType()) {
10256 for (
unsigned i = 0, e = AddRec->
getNumOperands(); i != e; ++i) {
10267 for (++i; i !=
e; ++i)
10312 for (
unsigned i = 0, e =
Ops.size(); i != e; ++i) {
10322 for (++i; i !=
e; ++i) {
10327 return getWithOperands(V, NewOps);
10342 const Loop *CurrLoop = this->LI[
I->getParent()];
10353 if (BackedgeTakenCount->
isZero()) {
10354 Value *InitValue =
nullptr;
10355 bool MultipleInitValues =
false;
10361 MultipleInitValues =
true;
10366 if (!MultipleInitValues && InitValue)
10375 unsigned InLoopPred =
10386 getConstantEvolutionLoopExitValue(PN, BTCC->getAPInt(), CurrLoop);
10400 SmallVector<Constant *, 4> Operands;
10401 Operands.
reserve(
I->getNumOperands());
10402 bool MadeImprovement =
false;
10417 MadeImprovement |= OrigV != OpV;
10422 assert(
C->getType() ==
Op->getType() &&
"Type mismatch");
10427 if (!MadeImprovement)
10448const SCEV *ScalarEvolution::stripInjectiveFunctions(
const SCEV *S)
const {
10450 return stripInjectiveFunctions(ZExt->getOperand());
10452 return stripInjectiveFunctions(SExt->getOperand());
10470 assert(
A != 0 &&
"A must be non-zero.");
10486 if (MinTZ < Mult2 && L->getLoopPredecessor())
10488 if (MinTZ < Mult2) {
10511 APInt AD =
A.lshr(Mult2).trunc(BW - Mult2);
10531static std::optional<std::tuple<APInt, APInt, APInt, APInt, unsigned>>
10537 LLVM_DEBUG(
dbgs() << __func__ <<
": analyzing quadratic addrec: "
10538 << *AddRec <<
'\n');
10541 if (!LC || !MC || !
NC) {
10542 LLVM_DEBUG(
dbgs() << __func__ <<
": coefficients are not constant\n");
10543 return std::nullopt;
10549 assert(!
N.isZero() &&
"This is not a quadratic addrec");
10557 N =
N.sext(NewWidth);
10558 M = M.sext(NewWidth);
10559 L = L.sext(NewWidth);
10576 <<
"x + " <<
C <<
", coeff bw: " << NewWidth
10577 <<
", multiplied by " <<
T <<
'\n');
10586 std::optional<APInt>
Y) {
10588 unsigned W = std::max(
X->getBitWidth(),
Y->getBitWidth());
10591 return XW.
slt(YW) ? *
X : *
Y;
10594 return std::nullopt;
10595 return X ? *
X : *
Y;
10612 return std::nullopt;
10613 unsigned W =
X->getBitWidth();
10633static std::optional<APInt>
10639 return std::nullopt;
10642 LLVM_DEBUG(
dbgs() << __func__ <<
": solving for unsigned overflow\n");
10643 std::optional<APInt>
X =
10646 return std::nullopt;
10651 return std::nullopt;
10666static std::optional<APInt>
10670 "Starting value of addrec should be 0");
10671 LLVM_DEBUG(
dbgs() << __func__ <<
": solving boundary crossing for range "
10672 <<
Range <<
", addrec " << *AddRec <<
'\n');
10676 "Addrec's initial value should be in range");
10682 return std::nullopt;
10692 auto SolveForBoundary =
10693 [&](
APInt Bound) -> std::pair<std::optional<APInt>,
bool> {
10696 LLVM_DEBUG(
dbgs() <<
"SolveQuadraticAddRecRange: checking boundary "
10697 << Bound <<
" (before multiplying by " << M <<
")\n");
10700 std::optional<APInt> SO;
10703 "signed overflow\n");
10707 "unsigned overflow\n");
10708 std::optional<APInt> UO =
10711 auto LeavesRange = [&] (
const APInt &
X) {
10728 return {std::nullopt,
false};
10733 if (LeavesRange(*Min))
10734 return { Min,
true };
10735 std::optional<APInt> Max = Min == SO ? UO : SO;
10736 if (LeavesRange(*Max))
10737 return { Max,
true };
10740 return {std::nullopt,
true};
10747 auto SL = SolveForBoundary(
Lower);
10748 auto SU = SolveForBoundary(
Upper);
10751 if (!SL.second || !SU.second)
10752 return std::nullopt;
10795ScalarEvolution::ExitLimit ScalarEvolution::howFarToZero(
const SCEV *V,
10797 bool ControlsOnlyExit,
10798 bool AllowPredicates) {
10809 if (
C->getValue()->isZero())
return C;
10813 const SCEVAddRecExpr *AddRec =
10816 if (!AddRec && AllowPredicates)
10822 if (!AddRec || AddRec->
getLoop() != L)
10833 return ExitLimit(R, R, R,
false, Predicates);
10891 const SCEV *DistancePlusOne =
getAddExpr(Distance, One);
10917 const SCEV *
Exact =
10925 const SCEV *SymbolicMax =
10927 return ExitLimit(
Exact, ConstantMax, SymbolicMax,
false, Predicates);
10936 AllowPredicates ? &Predicates :
nullptr, *
this, L);
10944 return ExitLimit(
E, M, S,
false, Predicates);
10947ScalarEvolution::ExitLimit
10948ScalarEvolution::howFarToNonZero(
const SCEV *V,
const Loop *L) {
10956 if (!
C->getValue()->isZero())
10966std::pair<const BasicBlock *, const BasicBlock *>
10967ScalarEvolution::getPredecessorWithUniqueSuccessorForBB(
const BasicBlock *BB)
10978 if (
const Loop *L = LI.getLoopFor(BB))
10979 return {
L->getLoopPredecessor(),
L->getHeader()};
10981 return {
nullptr, BB};
10990 if (
A ==
B)
return true;
11005 if (ComputesEqualValues(AI, BI))
11013 const SCEV *Op0, *Op1;
11032 auto TrivialCase = [&](
bool TriviallyTrue) {
11041 const SCEV *NewLHS, *NewRHS;
11065 return TrivialCase(
false);
11066 return TrivialCase(
true);
11089 const APInt &
RA = RC->getAPInt();
11091 bool SimplifiedByConstantRange =
false;
11096 return TrivialCase(
true);
11098 return TrivialCase(
false);
11107 Changed = SimplifiedByConstantRange =
true;
11111 if (!SimplifiedByConstantRange) {
11128 assert(!
RA.isMinValue() &&
"Should have been caught earlier!");
11134 assert(!
RA.isMaxValue() &&
"Should have been caught earlier!");
11140 assert(!
RA.isMinSignedValue() &&
"Should have been caught earlier!");
11146 assert(!
RA.isMaxSignedValue() &&
"Should have been caught earlier!");
11158 return TrivialCase(
true);
11160 return TrivialCase(
false);
11265 auto NonRecursive = [OrNegative](
const SCEV *S) {
11267 return C->getAPInt().isPowerOf2() ||
11268 (OrNegative &&
C->getAPInt().isNegatedPowerOf2());
11274 if (NonRecursive(S))
11300 APInt C = Cst->getAPInt();
11301 return C.urem(M) == 0;
11309 const SCEV *SmodM =
11324 for (
auto *
A : Assumptions)
11325 if (
A->implies(
P, *
this))
11338std::pair<const SCEV *, const SCEV *>
11341 const SCEV *Start = SCEVInitRewriter::rewrite(S, L, *
this);
11343 return { Start, Start };
11345 const SCEV *
PostInc = SCEVPostIncRewriter::rewrite(S, L, *
this);
11354 getUsedLoops(LHS, LoopsUsed);
11355 getUsedLoops(RHS, LoopsUsed);
11357 if (LoopsUsed.
empty())
11362 for (
const auto *L1 : LoopsUsed)
11363 for (
const auto *L2 : LoopsUsed)
11364 assert((DT.dominates(L1->getHeader(), L2->getHeader()) ||
11365 DT.dominates(L2->getHeader(), L1->getHeader())) &&
11366 "Domination relationship is not a linear order");
11396 SplitRHS.second) &&
11408 if (isKnownPredicateViaSplitting(Pred, LHS, RHS))
11412 return isKnownViaNonRecursiveReasoning(Pred, LHS, RHS);
11422 return std::nullopt;
11437 if (KnownWithoutContext)
11438 return KnownWithoutContext;
11445 return std::nullopt;
11451 const Loop *L = LHS->getLoop();
11456std::optional<ScalarEvolution::MonotonicPredicateType>
11459 auto Result = getMonotonicPredicateTypeImpl(LHS, Pred);
11465 auto ResultSwapped =
11468 assert(*ResultSwapped != *Result &&
11469 "monotonicity should flip as we flip the predicate");
11476std::optional<ScalarEvolution::MonotonicPredicateType>
11477ScalarEvolution::getMonotonicPredicateTypeImpl(
const SCEVAddRecExpr *LHS,
11491 return std::nullopt;
11495 "Should be greater or less!");
11499 if (!LHS->hasNoUnsignedWrap())
11500 return std::nullopt;
11504 "Relational predicate is either signed or unsigned!");
11505 if (!
LHS->hasNoSignedWrap())
11506 return std::nullopt;
11508 const SCEV *Step =
LHS->getStepRecurrence(*
this);
11516 return std::nullopt;
11519std::optional<ScalarEvolution::LoopInvariantPredicate>
11526 return std::nullopt;
11533 if (!ArLHS || ArLHS->
getLoop() != L)
11534 return std::nullopt;
11538 return std::nullopt;
11564 return std::nullopt;
11601 return std::nullopt;
11604std::optional<ScalarEvolution::LoopInvariantPredicate>
11609 Pred, LHS, RHS, L, CtxI, MaxIter))
11619 Pred, LHS, RHS, L, CtxI,
Op))
11621 return std::nullopt;
11624std::optional<ScalarEvolution::LoopInvariantPredicate>
11639 return std::nullopt;
11646 if (!AR || AR->
getLoop() != L)
11647 return std::nullopt;
11652 Pred = Pred.dropSameSign();
11656 return std::nullopt;
11662 if (Step != One && Step != MinusOne)
11663 return std::nullopt;
11669 return std::nullopt;
11675 return std::nullopt;
11683 if (Step == MinusOne)
11687 return std::nullopt;
11693bool ScalarEvolution::isKnownPredicateViaConstantRanges(
CmpPredicate Pred,
11699 auto CheckRange = [&](
bool IsSigned) {
11702 return RangeLHS.
icmp(Pred, RangeRHS);
11711 if (CheckRange(
true) || CheckRange(
false))
11720bool ScalarEvolution::isKnownPredicateViaNoOverflow(CmpPredicate Pred,
11729 SCEVUse XNonConstOp, XConstOp;
11730 SCEVUse YNonConstOp, YConstOp;
11734 if (!splitBinaryAdd(
X, XConstOp, XNonConstOp, XFlagsPresent)) {
11737 XFlagsPresent = ExpectedFlags;
11742 if (!splitBinaryAdd(
Y, YConstOp, YNonConstOp, YFlagsPresent)) {
11745 YFlagsPresent = ExpectedFlags;
11748 if (YNonConstOp != XNonConstOp)
11756 if ((YFlagsPresent & ExpectedFlags) != ExpectedFlags)
11759 (XFlagsPresent & ExpectedFlags) != ExpectedFlags) {
11819bool ScalarEvolution::isKnownPredicateViaSplitting(CmpPredicate Pred,
11840bool ScalarEvolution::isImpliedViaGuard(
const BasicBlock *BB, CmpPredicate Pred,
11841 const SCEV *
LHS,
const SCEV *
RHS) {
11846 return any_of(*BB, [&](
const Instruction &
I) {
11847 using namespace llvm::PatternMatch;
11852 isImpliedCond(Pred,
LHS,
RHS, Condition,
false);
11866 if (!L || !DT.isReachableFromEntry(L->getHeader()))
11871 "This cannot be done on broken IR!");
11874 if (isKnownViaNonRecursiveReasoning(Pred, LHS, RHS))
11883 if (LoopContinuePredicate &&
11884 isImpliedCond(Pred, LHS, RHS, LoopContinuePredicate->
getCondition(),
11885 LoopContinuePredicate->
getSuccessor(0) != L->getHeader()))
11890 if (WalkingBEDominatingConds)
11896 const auto &BETakenInfo = getBackedgeTakenInfo(L);
11897 const SCEV *LatchBECount = BETakenInfo.getExact(Latch,
this);
11904 const SCEV *LoopCounter =
11912 for (
auto &AssumeVH : AC.assumptions()) {
11919 if (isImpliedCond(Pred, LHS, RHS, CI->getArgOperand(0),
false))
11923 if (isImpliedViaGuard(Latch, Pred, LHS, RHS))
11926 for (
DomTreeNode *DTN = DT[Latch], *HeaderDTN = DT[L->getHeader()];
11927 DTN != HeaderDTN; DTN = DTN->getIDom()) {
11928 assert(DTN &&
"should reach the loop header before reaching the root!");
11931 if (isImpliedViaGuard(BB, Pred, LHS, RHS))
11949 if (isImpliedCond(Pred, LHS, RHS, ContBr->
getCondition(),
11962 if (!DT.isReachableFromEntry(BB))
11966 "This cannot be done on broken IR!");
11974 const bool ProvingStrictComparison =
11976 bool ProvedNonStrictComparison =
false;
11977 bool ProvedNonEquality =
false;
11980 if (!ProvedNonStrictComparison)
11981 ProvedNonStrictComparison = Fn(NonStrictPredicate);
11982 if (!ProvedNonEquality)
11984 if (ProvedNonStrictComparison && ProvedNonEquality)
11989 if (ProvingStrictComparison) {
11991 return isKnownViaNonRecursiveReasoning(
P, LHS, RHS);
11993 if (SplitAndProve(ProofFn))
11998 auto ProveViaCond = [&](
const Value *Condition,
bool Inverse) {
12000 if (isImpliedCond(Pred, LHS, RHS, Condition,
Inverse, CtxI))
12002 if (ProvingStrictComparison) {
12004 return isImpliedCond(
P, LHS, RHS, Condition,
Inverse, CtxI);
12006 if (SplitAndProve(ProofFn))
12015 const Loop *ContainingLoop = LI.getLoopFor(BB);
12017 if (ContainingLoop && ContainingLoop->
getHeader() == BB)
12021 for (std::pair<const BasicBlock *, const BasicBlock *> Pair(PredBB, BB);
12022 Pair.first; Pair = getPredecessorWithUniqueSuccessorForBB(Pair.first)) {
12025 if (!BlockEntryPredicate)
12034 for (
auto &AssumeVH : AC.assumptions()) {
12038 if (!DT.dominates(CI, BB))
12041 if (ProveViaCond(CI->getArgOperand(0),
false))
12047 F.getParent(), Intrinsic::experimental_guard);
12049 for (
const auto *GU : GuardDecl->users())
12051 if (Guard->getFunction() == BB->
getParent() && DT.dominates(Guard, BB))
12052 if (ProveViaCond(Guard->getArgOperand(0),
false))
12067 "LHS is not available at Loop Entry");
12069 "RHS is not available at Loop Entry");
12071 if (isKnownViaNonRecursiveReasoning(Pred, LHS, RHS))
12082 if (FoundCondValue ==
12086 if (!PendingLoopPredicates.insert(FoundCondValue).second)
12090 [&]() { PendingLoopPredicates.erase(FoundCondValue); });
12093 const Value *Op0, *Op1;
12096 return isImpliedCond(Pred,
LHS,
RHS, Op0,
Inverse, CtxI) ||
12100 return isImpliedCond(Pred,
LHS,
RHS, Op0, Inverse, CtxI) ||
12101 isImpliedCond(Pred,
LHS,
RHS, Op1, Inverse, CtxI);
12105 if (!ICI)
return false;
12109 CmpPredicate FoundPred;
12118 return isImpliedCond(Pred,
LHS,
RHS, FoundPred, FoundLHS, FoundRHS, CtxI);
12121bool ScalarEvolution::isImpliedCond(CmpPredicate Pred,
const SCEV *
LHS,
12122 const SCEV *
RHS, CmpPredicate FoundPred,
12123 const SCEV *FoundLHS,
const SCEV *FoundRHS,
12124 const Instruction *CtxI) {
12134 auto *WideType = FoundLHS->
getType();
12146 TruncFoundLHS, TruncFoundRHS, CtxI))
12172 return isImpliedCondBalancedTypes(Pred,
LHS,
RHS, FoundPred, FoundLHS,
12176bool ScalarEvolution::isImpliedCondBalancedTypes(
12181 "Types should be balanced!");
12188 if (FoundLHS == FoundRHS)
12192 if (
LHS == FoundRHS ||
RHS == FoundLHS) {
12204 return isImpliedCondOperands(*
P,
LHS,
RHS, FoundLHS, FoundRHS, CtxI);
12221 LHS, FoundLHS, FoundRHS, CtxI);
12223 return isImpliedCondOperands(*
P,
LHS,
RHS, FoundRHS, FoundLHS, CtxI);
12245 assert(P1 != P2 &&
"Handled earlier!");
12249 if (IsSignFlippedPredicate(Pred, FoundPred)) {
12253 return isImpliedCondOperands(Pred,
LHS,
RHS, FoundLHS, FoundRHS, CtxI);
12256 CmpPredicate CanonicalPred = Pred, CanonicalFoundPred = FoundPred;
12257 const SCEV *CanonicalLHS =
LHS, *CanonicalRHS =
RHS,
12258 *CanonicalFoundLHS = FoundLHS, *CanonicalFoundRHS = FoundRHS;
12263 std::swap(CanonicalFoundLHS, CanonicalFoundRHS);
12274 return isImpliedCondOperands(CanonicalFoundPred, CanonicalLHS,
12275 CanonicalRHS, CanonicalFoundLHS,
12276 CanonicalFoundRHS);
12281 return isImpliedCondOperands(CanonicalFoundPred, CanonicalLHS,
12282 CanonicalRHS, CanonicalFoundLHS,
12283 CanonicalFoundRHS);
12290 const SCEVConstant *
C =
nullptr;
12291 const SCEV *
V =
nullptr;
12309 if (Min ==
C->getAPInt()) {
12314 APInt SharperMin = Min + 1;
12317 case ICmpInst::ICMP_SGE:
12318 case ICmpInst::ICMP_UGE:
12321 if (isImpliedCondOperands(Pred, LHS, RHS, V, getConstant(SharperMin),
12326 case ICmpInst::ICMP_SGT:
12327 case ICmpInst::ICMP_UGT:
12337 if (isImpliedCondOperands(Pred, LHS, RHS, V, getConstant(Min), CtxI))
12342 case ICmpInst::ICMP_SLE:
12343 case ICmpInst::ICMP_ULE:
12344 if (isImpliedCondOperands(ICmpInst::getSwappedCmpPredicate(Pred), RHS,
12345 LHS, V, getConstant(SharperMin), CtxI))
12349 case ICmpInst::ICMP_SLT:
12350 case ICmpInst::ICMP_ULT:
12351 if (isImpliedCondOperands(ICmpInst::getSwappedCmpPredicate(Pred), RHS,
12352 LHS, V, getConstant(Min), CtxI))
12366 if (isImpliedCondOperands(Pred,
LHS,
RHS, FoundLHS, FoundRHS, CtxI))
12370 if (isImpliedCondOperands(FoundPred,
LHS,
RHS, FoundLHS, FoundRHS, CtxI))
12373 if (isImpliedCondOperandsViaRanges(Pred,
LHS,
RHS, FoundPred, FoundLHS, FoundRHS))
12389std::optional<APInt>
12396 APInt DiffMul(BW, 1);
12399 for (
unsigned I = 0;
I < 8; ++
I) {
12408 if (LAR->getLoop() != MAR->getLoop())
12409 return std::nullopt;
12413 if (!LAR->isAffine() || !MAR->isAffine())
12414 return std::nullopt;
12416 if (LAR->getStepRecurrence(*
this) != MAR->getStepRecurrence(*
this))
12417 return std::nullopt;
12419 Less = LAR->getStart();
12420 More = MAR->getStart();
12425 auto MatchConstMul =
12426 [](
const SCEV *S) -> std::optional<std::pair<const SCEV *, APInt>> {
12431 return std::nullopt;
12433 if (
auto MatchedMore = MatchConstMul(More)) {
12434 if (
auto MatchedLess = MatchConstMul(
Less)) {
12435 if (MatchedMore->second == MatchedLess->second) {
12436 More = MatchedMore->first;
12437 Less = MatchedLess->first;
12438 DiffMul *= MatchedMore->second;
12449 Diff +=
C->getAPInt() * DiffMul;
12452 Diff -=
C->getAPInt() * DiffMul;
12455 Multiplicity[S] +=
Mul;
12457 auto Decompose = [&](
const SCEV *S,
int Mul) {
12464 Decompose(More, 1);
12465 Decompose(
Less, -1);
12469 const SCEV *NewMore =
nullptr, *NewLess =
nullptr;
12470 for (
const auto &[S,
Mul] : Multiplicity) {
12475 return std::nullopt;
12477 }
else if (
Mul == -1) {
12479 return std::nullopt;
12482 return std::nullopt;
12486 if (NewMore == More || NewLess ==
Less)
12487 return std::nullopt;
12493 if (!More && !
Less)
12497 if (!More || !
Less)
12498 return std::nullopt;
12502 return std::nullopt;
12505bool ScalarEvolution::isImpliedCondOperandsViaAddRecStart(
12527 const auto *Latch = L->getLoopLatch();
12530 if (!L->contains(ContextBB) || !Latch || !DT.
dominates(ContextBB, Latch))
12539 const auto *Latch = L->getLoopLatch();
12542 if (!L->contains(ContextBB) || !Latch || !DT.
dominates(ContextBB, Latch))
12552bool ScalarEvolution::isImpliedCondOperandsViaNoOverflow(CmpPredicate Pred,
12555 const SCEV *FoundLHS,
12556 const SCEV *FoundRHS) {
12565 if (!AddRecFoundLHS)
12572 const Loop *
L = AddRecFoundLHS->getLoop();
12573 if (L != AddRecLHS->getLoop())
12612 if (!RDiff || *LDiff != *RDiff)
12615 if (LDiff->isMinValue())
12618 APInt FoundRHSLimit;
12621 FoundRHSLimit = -(*RDiff);
12633bool ScalarEvolution::isImpliedViaMerge(CmpPredicate Pred,
const SCEV *
LHS,
12634 const SCEV *
RHS,
const SCEV *FoundLHS,
12635 const SCEV *FoundRHS,
unsigned Depth) {
12636 const PHINode *LPhi =
nullptr, *RPhi =
nullptr;
12640 bool Erased = PendingMerges.erase(LPhi);
12641 assert(Erased &&
"Failed to erase LPhi!");
12645 bool Erased = PendingMerges.erase(RPhi);
12646 assert(Erased &&
"Failed to erase RPhi!");
12654 if (!PendingMerges.insert(Phi).second)
12668 if (!PendingMerges.insert(Phi).second)
12674 if (!LPhi && !RPhi)
12685 assert(LPhi &&
"LPhi should definitely be a SCEVUnknown Phi!");
12689 auto ProvedEasily = [&](
const SCEV *
S1,
const SCEV *S2) {
12690 return isKnownViaNonRecursiveReasoning(Pred,
S1, S2) ||
12691 isImpliedCondOperandsViaRanges(Pred,
S1, S2, Pred, FoundLHS, FoundRHS) ||
12692 isImpliedViaOperations(Pred,
S1, S2, FoundLHS, FoundRHS,
Depth);
12695 if (RPhi && RPhi->getParent() == LBB) {
12702 const SCEV *
R =
getSCEV(RPhi->getIncomingValueForBlock(IncBB));
12703 if (!ProvedEasily(L, R))
12714 auto *RLoop = RAR->
getLoop();
12715 auto *Predecessor = RLoop->getLoopPredecessor();
12716 assert(Predecessor &&
"Loop with AddRec with no predecessor?");
12718 if (!ProvedEasily(L1, RAR->
getStart()))
12720 auto *Latch = RLoop->getLoopLatch();
12721 assert(Latch &&
"Loop with AddRec with no latch?");
12742 if (
auto *Loop = LI.getLoopFor(LBB))
12745 if (!ProvedEasily(L,
RHS))
12752bool ScalarEvolution::isImpliedCondOperandsViaShift(CmpPredicate Pred,
12755 const SCEV *FoundLHS,
12756 const SCEV *FoundRHS) {
12759 if (
RHS == FoundRHS) {
12764 if (
LHS != FoundLHS)
12771 Value *Shiftee, *ShiftValue;
12773 using namespace PatternMatch;
12774 if (
match(SUFoundRHS->getValue(),
12776 auto *ShifteeS =
getSCEV(Shiftee);
12794bool ScalarEvolution::isImpliedCondOperands(CmpPredicate Pred,
const SCEV *
LHS,
12796 const SCEV *FoundLHS,
12797 const SCEV *FoundRHS,
12798 const Instruction *CtxI) {
12799 return isImpliedCondOperandsViaRanges(Pred,
LHS,
RHS, Pred, FoundLHS,
12801 isImpliedCondOperandsViaNoOverflow(Pred,
LHS,
RHS, FoundLHS,
12803 isImpliedCondOperandsViaShift(Pred,
LHS,
RHS, FoundLHS, FoundRHS) ||
12804 isImpliedCondOperandsViaAddRecStart(Pred,
LHS,
RHS, FoundLHS, FoundRHS,
12806 isImpliedCondOperandsHelper(Pred,
LHS,
RHS, FoundLHS, FoundRHS);
12810template <
typename MinMaxExprType>
12812 const SCEV *Candidate) {
12817 return is_contained(MinMaxExpr->operands(), Candidate);
12830 const SCEV *LStart, *RStart, *Step;
12880bool ScalarEvolution::isImpliedViaOperations(CmpPredicate Pred,
const SCEV *
LHS,
12882 const SCEV *FoundLHS,
12883 const SCEV *FoundRHS,
12887 "LHS and RHS have different sizes?");
12890 "FoundLHS and FoundRHS have different sizes?");
12924 auto GetOpFromSExt = [&](
const SCEV *S) ->
const SCEV * {
12926 return Ext->getOperand();
12933 auto *OrigLHS =
LHS;
12934 auto *OrigFoundLHS = FoundLHS;
12935 LHS = GetOpFromSExt(
LHS);
12936 FoundLHS = GetOpFromSExt(FoundLHS);
12939 auto IsSGTViaContext = [&](
const SCEV *
S1,
const SCEV *S2) {
12942 FoundRHS,
Depth + 1);
12955 if (!LHSAddExpr->hasNoSignedWrap())
12958 SCEVUse LL = LHSAddExpr->getOperand(0);
12959 SCEVUse LR = LHSAddExpr->getOperand(1);
12963 auto IsSumGreaterThanRHS = [&](
const SCEV *
S1,
const SCEV *S2) {
12964 return IsSGTViaContext(
S1, MinusOne) && IsSGTViaContext(S2,
RHS);
12969 if (IsSumGreaterThanRHS(LL, LR) || IsSumGreaterThanRHS(LR, LL))
12975 using namespace llvm::PatternMatch;
12994 if (!Numerator || Numerator->getType() != FoundLHS->
getType())
13002 auto *DTy = Denominator->getType();
13003 auto *FRHSTy = FoundRHS->
getType();
13004 if (DTy->isPointerTy() != FRHSTy->isPointerTy())
13023 IsSGTViaContext(FoundRHSExt, DenomMinusTwo))
13034 auto *NegDenomMinusOne =
getMinusSCEV(MinusOne, DenominatorExt);
13036 IsSGTViaContext(FoundRHSExt, NegDenomMinusOne))
13044 if (isImpliedViaMerge(Pred, OrigLHS,
RHS, OrigFoundLHS, FoundRHS,
Depth + 1))
13077bool ScalarEvolution::isKnownViaNonRecursiveReasoning(CmpPredicate Pred,
13081 isKnownPredicateViaConstantRanges(Pred,
LHS,
RHS) ||
13084 isKnownPredicateViaNoOverflow(Pred,
LHS,
RHS);
13087bool ScalarEvolution::isImpliedCondOperandsHelper(CmpPredicate Pred,
13090 const SCEV *FoundLHS,
13091 const SCEV *FoundRHS) {
13127 if (isImpliedViaOperations(Pred,
LHS,
RHS, FoundLHS, FoundRHS))
13133bool ScalarEvolution::isImpliedCondOperandsViaRanges(
13134 CmpPredicate Pred,
const SCEV *
LHS,
const SCEV *
RHS, CmpPredicate FoundPred,
13135 const SCEV *FoundLHS,
const SCEV *FoundRHS) {
13149 ConstantRange FoundLHSRange =
13153 ConstantRange LHSRange = FoundLHSRange.
add(ConstantRange(*Addend));
13160 return LHSRange.
icmp(Pred, ConstRHS);
13163bool ScalarEvolution::canIVOverflowOnLT(
const SCEV *
RHS,
const SCEV *Stride,
13176 return (std::move(MaxValue) - MaxStrideMinusOne).slt(MaxRHS);
13184 return (std::move(MaxValue) - MaxStrideMinusOne).ult(MaxRHS);
13187bool ScalarEvolution::canIVOverflowOnGT(
const SCEV *
RHS,
const SCEV *Stride,
13199 return (std::move(MinValue) + MaxStrideMinusOne).sgt(MinRHS);
13207 return (std::move(MinValue) + MaxStrideMinusOne).ugt(MinRHS);
13219const SCEV *ScalarEvolution::computeMaxBECountForLT(
const SCEV *Start,
13220 const SCEV *Stride,
13251 APInt Limit = MaxValue - (StrideForMaxBECount - 1);
13262 :
APIntOps::umax(MaxEnd, MinStart);
13269ScalarEvolution::howManyLessThans(
const SCEV *
LHS,
const SCEV *
RHS,
13270 const Loop *L,
bool IsSigned,
13271 bool ControlsOnlyExit,
bool AllowPredicates) {
13275 bool PredicatedIV =
false;
13280 auto canProveNUW = [&]() {
13283 if (!ControlsOnlyExit)
13304 Limit = Limit.
zext(OuterBitWidth);
13316 Type *Ty = ZExt->getType();
13327 if (!
IV && AllowPredicates) {
13332 PredicatedIV =
true;
13336 if (!
IV ||
IV->getLoop() != L || !
IV->isAffine())
13350 bool NoWrap = ControlsOnlyExit &&
any(
IV->getNoWrapFlags(WrapType));
13353 const SCEV *Stride =
IV->getStepRecurrence(*
this);
13358 if (!PositiveStride) {
13410 auto wouldZeroStrideBeUB = [&]() {
13422 if (!wouldZeroStrideBeUB()) {
13426 }
else if (!NoWrap) {
13429 if (canIVOverflowOnLT(
RHS, Stride, IsSigned))
13442 const SCEV *
Start =
IV->getStart();
13448 const SCEV *OrigStart =
Start;
13449 const SCEV *OrigRHS =
RHS;
13450 if (
Start->getType()->isPointerTy()) {
13461 const SCEV *End =
nullptr, *BECount =
nullptr,
13462 *BECountIfBackedgeTaken =
nullptr;
13465 if (PositiveStride && RHSAddRec !=
nullptr && RHSAddRec->getLoop() == L &&
13466 any(RHSAddRec->getNoWrapFlags())) {
13479 const SCEV *RHSStart = RHSAddRec->getStart();
13480 const SCEV *RHSStride = RHSAddRec->getStepRecurrence(*
this);
13492 const SCEV *Denominator =
getMinusSCEV(Stride, RHSStride);
13501 BECountIfBackedgeTaken =
13506 if (BECount ==
nullptr) {
13511 const SCEV *MaxBECount = computeMaxBECountForLT(
13514 MaxBECount,
false , Predicates);
13521 auto *OrigStartMinusStride =
getMinusSCEV(OrigStart, Stride);
13548 const SCEV *Numerator =
13554 auto canProveRHSGreaterThanEqualStart = [&]() {
13573 auto *StartMinusOne =
13580 if (canProveRHSGreaterThanEqualStart()) {
13595 BECountIfBackedgeTaken =
13611 bool MayAddOverflow = [&] {
13657 if (Start == Stride || Start ==
getMinusSCEV(Stride, One)) {
13671 if (!MayAddOverflow) {
13683 const SCEV *ConstantMaxBECount;
13684 bool MaxOrZero =
false;
13686 ConstantMaxBECount = BECount;
13687 }
else if (BECountIfBackedgeTaken &&
13692 ConstantMaxBECount = BECountIfBackedgeTaken;
13695 ConstantMaxBECount = computeMaxBECountForLT(
13703 const SCEV *SymbolicMaxBECount =
13705 return ExitLimit(BECount, ConstantMaxBECount, SymbolicMaxBECount, MaxOrZero,
13709ScalarEvolution::ExitLimit ScalarEvolution::howManyGreaterThans(
13710 const SCEV *
LHS,
const SCEV *
RHS,
const Loop *L,
bool IsSigned,
13711 bool ControlsOnlyExit,
bool AllowPredicates) {
13718 if (!
IV && AllowPredicates)
13725 if (!
IV ||
IV->getLoop() != L || !
IV->isAffine())
13729 bool NoWrap = ControlsOnlyExit &&
any(
IV->getNoWrapFlags(WrapType));
13742 if (!Stride->
isOne() && !NoWrap)
13743 if (canIVOverflowOnGT(
RHS, Stride, IsSigned))
13746 const SCEV *
Start =
IV->getStart();
13747 const SCEV *End =
RHS;
13758 if (
Start->getType()->isPointerTy()) {
13793 const SCEV *ConstantMaxBECount =
13800 ConstantMaxBECount = BECount;
13801 const SCEV *SymbolicMaxBECount =
13804 return ExitLimit(BECount, ConstantMaxBECount, SymbolicMaxBECount,
false,
13810 if (
Range.isFullSet())
13815 if (!SC->getValue()->isZero()) {
13821 return ShiftedAddRec->getNumIterationsInRange(
13822 Range.subtract(SC->getAPInt()), SE);
13853 APInt ExitVal = (End +
A).udiv(
A);
13866 ConstantInt::get(SE.
getContext(), ExitVal - 1), SE)->getValue()) &&
13867 "Linear scev computation is off in a bad way!");
13898 assert(!
Last->isZero() &&
"Recurrency with zero step?");
13923 Ty = Store->getValueOperand()->getType();
13925 Ty = Load->getType();
13938 assert(SE &&
"SCEVCallbackVH called with a null ScalarEvolution!");
13940 SE->ConstantEvolutionLoopExitValue.erase(PN);
13941 SE->eraseValueFromMap(getValPtr());
13945void ScalarEvolution::SCEVCallbackVH::allUsesReplacedWith(
Value *V) {
13946 assert(SE &&
"SCEVCallbackVH called with a null ScalarEvolution!");
13956 : CallbackVH(
V), SE(se) {}
13965 : F(F), DL(F.
getDataLayout()), TLI(TLI), AC(AC), DT(DT), LI(LI),
13967 LoopDispositions(64), BlockDispositions(64) {
13979 F.getParent(), Intrinsic::experimental_guard);
13980 HasGuards = GuardDecl && !GuardDecl->use_empty();
13984 : F(Arg.F), DL(Arg.DL), HasGuards(Arg.HasGuards), TLI(Arg.TLI), AC(Arg.AC),
13985 DT(Arg.DT), LI(Arg.LI), CouldNotCompute(
std::
move(Arg.CouldNotCompute)),
13986 ValueExprMap(
std::
move(Arg.ValueExprMap)),
13987 PendingLoopPredicates(
std::
move(Arg.PendingLoopPredicates)),
13988 PendingMerges(
std::
move(Arg.PendingMerges)),
13989 ConstantMultipleCache(
std::
move(Arg.ConstantMultipleCache)),
13990 BackedgeTakenCounts(
std::
move(Arg.BackedgeTakenCounts)),
13991 PredicatedBackedgeTakenCounts(
13992 std::
move(Arg.PredicatedBackedgeTakenCounts)),
13993 BECountUsers(
std::
move(Arg.BECountUsers)),
13994 ConstantEvolutionLoopExitValue(
13995 std::
move(Arg.ConstantEvolutionLoopExitValue)),
13996 ValuesAtScopes(
std::
move(Arg.ValuesAtScopes)),
13997 ValuesAtScopesUsers(
std::
move(Arg.ValuesAtScopesUsers)),
13998 LoopDispositions(
std::
move(Arg.LoopDispositions)),
13999 LoopPropertiesCache(
std::
move(Arg.LoopPropertiesCache)),
14000 BlockDispositions(
std::
move(Arg.BlockDispositions)),
14001 SCEVUsers(
std::
move(Arg.SCEVUsers)),
14002 UnsignedRanges(
std::
move(Arg.UnsignedRanges)),
14003 SignedRanges(
std::
move(Arg.SignedRanges)),
14004 UniqueSCEVs(
std::
move(Arg.UniqueSCEVs)),
14005 UniquePreds(
std::
move(Arg.UniquePreds)),
14006 SCEVAllocator(
std::
move(Arg.SCEVAllocator)),
14007 LoopUsers(
std::
move(Arg.LoopUsers)),
14008 PredicatedSCEVRewrites(
std::
move(Arg.PredicatedSCEVRewrites)),
14009 FirstUnknown(Arg.FirstUnknown) {
14010 Arg.FirstUnknown =
nullptr;
14019 Tmp->~SCEVUnknown();
14021 FirstUnknown =
nullptr;
14023 ExprValueMap.clear();
14024 ValueExprMap.clear();
14026 BackedgeTakenCounts.clear();
14027 PredicatedBackedgeTakenCounts.clear();
14029 assert(PendingLoopPredicates.empty() &&
"isImpliedCond garbage");
14030 assert(PendingMerges.empty() &&
"isImpliedViaMerge garbage");
14031 assert(!WalkingBEDominatingConds &&
"isLoopBackedgeGuardedByCond garbage!");
14032 assert(!ProvingSplitPredicate &&
"ProvingSplitPredicate garbage!");
14054 L->getHeader()->printAsOperand(OS,
false);
14058 L->getExitingBlocks(ExitingBlocks);
14059 if (ExitingBlocks.
size() != 1)
14060 OS <<
"<multiple exits> ";
14064 OS <<
"backedge-taken count is ";
14067 OS <<
"Unpredictable backedge-taken count.";
14070 if (ExitingBlocks.
size() > 1)
14071 for (
BasicBlock *ExitingBlock : ExitingBlocks) {
14072 OS <<
" exit count for " << ExitingBlock->
getName() <<
": ";
14080 OS <<
"\n predicated exit count for " << ExitingBlock->
getName()
14083 OS <<
"\n Predicates:\n";
14084 for (
const auto *
P : Predicates)
14092 L->getHeader()->printAsOperand(OS,
false);
14097 OS <<
"constant max backedge-taken count is ";
14100 OS <<
", actual taken count either this or zero.";
14102 OS <<
"Unpredictable constant max backedge-taken count. ";
14107 L->getHeader()->printAsOperand(OS,
false);
14112 OS <<
"symbolic max backedge-taken count is ";
14115 OS <<
", actual taken count either this or zero.";
14117 OS <<
"Unpredictable symbolic max backedge-taken count. ";
14121 if (ExitingBlocks.
size() > 1)
14122 for (
BasicBlock *ExitingBlock : ExitingBlocks) {
14123 OS <<
" symbolic max exit count for " << ExitingBlock->
getName() <<
": ";
14133 OS <<
"\n predicated symbolic max exit count for "
14134 << ExitingBlock->
getName() <<
": ";
14136 OS <<
"\n Predicates:\n";
14137 for (
const auto *
P : Predicates)
14147 assert(!Preds.
empty() &&
"Different predicated BTC, but no predicates");
14149 L->getHeader()->printAsOperand(OS,
false);
14152 OS <<
"Predicated backedge-taken count is ";
14155 OS <<
"Unpredictable predicated backedge-taken count.";
14157 OS <<
" Predicates:\n";
14158 for (
const auto *
P : Preds)
14163 auto *PredConstantMax =
14165 if (PredConstantMax != ConstantBTC) {
14167 "different predicated constant max BTC but no predicates");
14169 L->getHeader()->printAsOperand(OS,
false);
14172 OS <<
"Predicated constant max backedge-taken count is ";
14175 OS <<
"Unpredictable predicated constant max backedge-taken count.";
14177 OS <<
" Predicates:\n";
14178 for (
const auto *
P : Preds)
14183 auto *PredSymbolicMax =
14185 if (SymbolicBTC != PredSymbolicMax) {
14187 "Different predicated symbolic max BTC, but no predicates");
14189 L->getHeader()->printAsOperand(OS,
false);
14192 OS <<
"Predicated symbolic max backedge-taken count is ";
14195 OS <<
"Unpredictable predicated symbolic max backedge-taken count.";
14197 OS <<
" Predicates:\n";
14198 for (
const auto *
P : Preds)
14204 L->getHeader()->printAsOperand(OS,
false);
14228 OS <<
"Computable";
14238 OS <<
"DoesNotDominate";
14244 OS <<
"ProperlyDominates";
14261 OS <<
"Classifying expressions for: ";
14262 F.printAsOperand(OS,
false);
14277 const Loop *L = LI.getLoopFor(
I.getParent());
14292 OS <<
"\t\t" "Exits: ";
14295 OS <<
"<<Unknown>>";
14301 for (
const auto *Iter = L; Iter; Iter = Iter->getParentLoop()) {
14303 Iter->getHeader()->printAsOperand(OS,
false);
14311 InnerL->getHeader()->printAsOperand(OS,
false);
14322 OS <<
"Determining loop execution counts for: ";
14323 F.printAsOperand(OS,
false);
14331 auto &Values = LoopDispositions[S];
14332 for (
auto &V : Values) {
14333 if (V.getPointer() == L)
14338 auto &Values2 = LoopDispositions[S];
14340 if (V.getPointer() == L) {
14349ScalarEvolution::computeLoopDisposition(
const SCEV *S,
const Loop *L) {
14368 assert(!L->contains(AR->
getLoop()) &&
"Containing loop's header does not"
14369 " dominate the contained loop's header?");
14397 bool HasVarying =
false;
14431 auto &Values = BlockDispositions[S];
14432 for (
auto &V : Values) {
14433 if (V.getPointer() == BB)
14438 auto &Values2 = BlockDispositions[S];
14440 if (V.getPointer() == BB) {
14449ScalarEvolution::computeBlockDisposition(
const SCEV *S,
const BasicBlock *BB) {
14479 bool Proper =
true;
14490 if (Instruction *
I =
14492 if (
I->getParent() == BB)
14494 if (DT.properlyDominates(
I->getParent(), BB))
14517void ScalarEvolution::forgetBackedgeTakenCounts(
const Loop *L,
14520 Predicated ? PredicatedBackedgeTakenCounts : BackedgeTakenCounts;
14521 auto It = BECounts.find(L);
14522 if (It != BECounts.end()) {
14523 for (
const ExitNotTakenInfo &ENT : It->second.ExitNotTaken) {
14524 for (
const SCEV *S : {ENT.ExactNotTaken, ENT.SymbolicMaxNotTaken}) {
14526 auto UserIt = BECountUsers.find(S);
14527 assert(UserIt != BECountUsers.end());
14532 BECounts.erase(It);
14540 while (!Worklist.
empty()) {
14542 auto Users = SCEVUsers.find(Curr);
14543 if (
Users != SCEVUsers.end())
14544 for (
const auto *User :
Users->second)
14545 if (ToForget.
insert(User).second)
14549 for (
const auto *S : ToForget)
14550 forgetMemoizedResultsImpl(S);
14552 for (
auto I = PredicatedSCEVRewrites.begin();
14553 I != PredicatedSCEVRewrites.end();) {
14554 std::pair<const SCEV *, const Loop *>
Entry =
I->first;
14555 if (ToForget.count(
Entry.first))
14556 PredicatedSCEVRewrites.erase(
I++);
14562void ScalarEvolution::forgetMemoizedResultsImpl(
const SCEV *S) {
14563 LoopDispositions.erase(S);
14564 BlockDispositions.erase(S);
14565 UnsignedRanges.erase(S);
14566 SignedRanges.erase(S);
14567 HasRecMap.erase(S);
14568 ConstantMultipleCache.erase(S);
14571 UnsignedWrapViaInductionTried.erase(AR);
14572 SignedWrapViaInductionTried.erase(AR);
14575 auto ExprIt = ExprValueMap.find(S);
14576 if (ExprIt != ExprValueMap.end()) {
14577 for (
Value *V : ExprIt->second) {
14578 auto ValueIt = ValueExprMap.find_as(V);
14579 if (ValueIt != ValueExprMap.end())
14580 ValueExprMap.erase(ValueIt);
14582 ExprValueMap.erase(ExprIt);
14585 auto ScopeIt = ValuesAtScopes.find(S);
14586 if (ScopeIt != ValuesAtScopes.end()) {
14587 for (
const auto &Pair : ScopeIt->second)
14590 std::make_pair(Pair.first, S));
14591 ValuesAtScopes.erase(ScopeIt);
14594 auto ScopeUserIt = ValuesAtScopesUsers.find(S);
14595 if (ScopeUserIt != ValuesAtScopesUsers.end()) {
14596 for (
const auto &Pair : ScopeUserIt->second)
14597 llvm::erase(ValuesAtScopes[Pair.second], std::make_pair(Pair.first, S));
14598 ValuesAtScopesUsers.erase(ScopeUserIt);
14601 auto BEUsersIt = BECountUsers.find(S);
14602 if (BEUsersIt != BECountUsers.end()) {
14604 auto Copy = BEUsersIt->second;
14605 for (
const auto &Pair : Copy)
14606 forgetBackedgeTakenCounts(Pair.getPointer(), Pair.getInt());
14607 BECountUsers.erase(BEUsersIt);
14610 auto FoldUser = FoldCacheUser.find(S);
14611 if (FoldUser != FoldCacheUser.end())
14612 for (
auto &KV : FoldUser->second)
14613 FoldCache.erase(KV);
14614 FoldCacheUser.erase(S);
14618ScalarEvolution::getUsedLoops(
const SCEV *S,
14620 struct FindUsedLoops {
14621 FindUsedLoops(SmallPtrSetImpl<const Loop *> &LoopsUsed)
14622 : LoopsUsed(LoopsUsed) {}
14623 SmallPtrSetImpl<const Loop *> &LoopsUsed;
14624 bool follow(
const SCEV *S) {
14630 bool isDone()
const {
return false; }
14633 FindUsedLoops
F(LoopsUsed);
14634 SCEVTraversal<FindUsedLoops>(F).visitAll(S);
14637void ScalarEvolution::getReachableBlocks(
14640 Worklist.
push_back(&F.getEntryBlock());
14641 while (!Worklist.
empty()) {
14643 if (!Reachable.
insert(BB).second)
14651 Worklist.
push_back(
C->isOne() ? TrueBB : FalseBB);
14658 if (isKnownPredicateViaConstantRanges(
Cmp->getCmpPredicate(), L, R)) {
14662 if (isKnownPredicateViaConstantRanges(
Cmp->getInverseCmpPredicate(), L,
14697 SCEVMapper SCM(SE2);
14699 SE2.getReachableBlocks(ReachableBlocks, F);
14701 auto GetDelta = [&](
const SCEV *Old,
const SCEV *New) ->
const SCEV * {
14719 while (!LoopStack.
empty()) {
14725 if (!ReachableBlocks.
contains(L->getHeader()))
14730 auto It = BackedgeTakenCounts.find(L);
14731 if (It == BackedgeTakenCounts.end())
14735 SCM.visit(It->second.getExact(L,
const_cast<ScalarEvolution *
>(
this)));
14755 const SCEV *Delta = GetDelta(CurBECount, NewBECount);
14756 if (Delta && !Delta->
isZero()) {
14757 dbgs() <<
"Trip Count for " << *L <<
" Changed!\n";
14758 dbgs() <<
"Old: " << *CurBECount <<
"\n";
14759 dbgs() <<
"New: " << *NewBECount <<
"\n";
14760 dbgs() <<
"Delta: " << *Delta <<
"\n";
14768 while (!Worklist.
empty()) {
14770 if (ValidLoops.
insert(L).second)
14771 Worklist.
append(L->begin(), L->end());
14773 for (
const auto &KV : ValueExprMap) {
14778 "AddRec references invalid loop");
14783 auto It = ExprValueMap.find(KV.second);
14784 if (It == ExprValueMap.end() || !It->second.contains(KV.first)) {
14785 dbgs() <<
"Value " << *KV.first
14786 <<
" is in ValueExprMap but not in ExprValueMap\n";
14791 if (!ReachableBlocks.
contains(
I->getParent()))
14793 const SCEV *OldSCEV = SCM.visit(KV.second);
14795 const SCEV *Delta = GetDelta(OldSCEV, NewSCEV);
14796 if (Delta && !Delta->
isZero()) {
14797 dbgs() <<
"SCEV for value " << *
I <<
" changed!\n"
14798 <<
"Old: " << *OldSCEV <<
"\n"
14799 <<
"New: " << *NewSCEV <<
"\n"
14800 <<
"Delta: " << *Delta <<
"\n";
14806 for (
const auto &KV : ExprValueMap) {
14807 for (
Value *V : KV.second) {
14808 const SCEV *S = ValueExprMap.lookup(V);
14810 dbgs() <<
"Value " << *V
14811 <<
" is in ExprValueMap but not in ValueExprMap\n";
14814 if (S != KV.first) {
14815 dbgs() <<
"Value " << *V <<
" mapped to " << *S <<
" rather than "
14816 << *KV.first <<
"\n";
14823 for (
const auto &S : UniqueSCEVs) {
14828 auto It = SCEVUsers.find(
Op);
14829 if (It != SCEVUsers.end() && It->second.count(&S))
14831 dbgs() <<
"Use of operand " << *
Op <<
" by user " << S
14832 <<
" is not being tracked!\n";
14838 for (
const auto &ValueAndVec : ValuesAtScopes) {
14840 for (
const auto &LoopAndValueAtScope : ValueAndVec.second) {
14841 const Loop *L = LoopAndValueAtScope.first;
14842 const SCEV *ValueAtScope = LoopAndValueAtScope.second;
14844 auto It = ValuesAtScopesUsers.find(ValueAtScope);
14845 if (It != ValuesAtScopesUsers.end() &&
14848 dbgs() <<
"Value: " << *
Value <<
", Loop: " << *L <<
", ValueAtScope: "
14849 << *ValueAtScope <<
" missing in ValuesAtScopesUsers\n";
14855 for (
const auto &ValueAtScopeAndVec : ValuesAtScopesUsers) {
14856 const SCEV *ValueAtScope = ValueAtScopeAndVec.first;
14857 for (
const auto &LoopAndValue : ValueAtScopeAndVec.second) {
14858 const Loop *L = LoopAndValue.first;
14859 const SCEV *
Value = LoopAndValue.second;
14861 auto It = ValuesAtScopes.find(
Value);
14862 if (It != ValuesAtScopes.end() &&
14863 is_contained(It->second, std::make_pair(L, ValueAtScope)))
14865 dbgs() <<
"Value: " << *
Value <<
", Loop: " << *L <<
", ValueAtScope: "
14866 << *ValueAtScope <<
" missing in ValuesAtScopes\n";
14872 auto VerifyBECountUsers = [&](
bool Predicated) {
14874 Predicated ? PredicatedBackedgeTakenCounts : BackedgeTakenCounts;
14875 for (
const auto &LoopAndBEInfo : BECounts) {
14876 for (
const ExitNotTakenInfo &ENT : LoopAndBEInfo.second.ExitNotTaken) {
14877 for (
const SCEV *S : {ENT.ExactNotTaken, ENT.SymbolicMaxNotTaken}) {
14879 auto UserIt = BECountUsers.find(S);
14880 if (UserIt != BECountUsers.end() &&
14881 UserIt->second.contains({ LoopAndBEInfo.first, Predicated }))
14883 dbgs() <<
"Value " << *S <<
" for loop " << *LoopAndBEInfo.first
14884 <<
" missing from BECountUsers\n";
14891 VerifyBECountUsers(
false);
14892 VerifyBECountUsers(
true);
14895 for (
auto &[S, Values] : LoopDispositions) {
14896 for (
auto [
Loop, CachedDisposition] : Values) {
14898 if (CachedDisposition != RecomputedDisposition) {
14899 dbgs() <<
"Cached disposition of " << *S <<
" for loop " << *
Loop
14900 <<
" is incorrect: cached " << CachedDisposition <<
", actual "
14901 << RecomputedDisposition <<
"\n";
14908 for (
auto &[S, Values] : BlockDispositions) {
14909 for (
auto [BB, CachedDisposition] : Values) {
14911 if (CachedDisposition != RecomputedDisposition) {
14912 dbgs() <<
"Cached disposition of " << *S <<
" for block %"
14913 << BB->
getName() <<
" is incorrect: cached " << CachedDisposition
14914 <<
", actual " << RecomputedDisposition <<
"\n";
14921 for (
auto [
FoldID, Expr] : FoldCache) {
14922 auto I = FoldCacheUser.find(Expr);
14923 if (
I == FoldCacheUser.end()) {
14924 dbgs() <<
"Missing entry in FoldCacheUser for cached expression " << *Expr
14929 dbgs() <<
"Missing FoldID in cached users of " << *Expr <<
"!\n";
14933 for (
auto [Expr, IDs] : FoldCacheUser) {
14934 for (
auto &
FoldID : IDs) {
14937 dbgs() <<
"Missing entry in FoldCache for expression " << *Expr
14942 dbgs() <<
"Entry in FoldCache doesn't match FoldCacheUser: " << *S
14943 <<
" != " << *Expr <<
"!\n";
14954 for (
auto [S, Multiple] : ConstantMultipleCache) {
14956 if ((Multiple != 0 && RecomputedMultiple != 0 &&
14957 Multiple.
urem(RecomputedMultiple) != 0 &&
14958 RecomputedMultiple.
urem(Multiple) != 0)) {
14959 dbgs() <<
"Incorrect cached computation in ConstantMultipleCache for "
14960 << *S <<
" : Computed " << RecomputedMultiple
14961 <<
" but cache contains " << Multiple <<
"!\n";
14969 FunctionAnalysisManager::Invalidator &Inv) {
15001 OS <<
"Printing analysis 'Scalar Evolution Analysis' for function '"
15002 <<
F.getName() <<
"':\n";
15008 "Scalar Evolution Analysis",
false,
true)
15057 const SCEV *LHS,
const SCEV *RHS) {
15059 assert(LHS->getType() == RHS->getType() &&
15060 "Type mismatch between LHS and RHS");
15063 ID.AddInteger(Pred);
15064 ID.AddPointer(LHS);
15065 ID.AddPointer(RHS);
15066 void *IP =
nullptr;
15067 if (
const auto *S = UniquePreds.FindNodeOrInsertPos(
ID, IP))
15071 UniquePreds.InsertNode(Eq, IP);
15082 ID.AddInteger(AddedFlags);
15083 void *IP =
nullptr;
15084 if (
const auto *S = UniquePreds.FindNodeOrInsertPos(
ID, IP))
15086 auto *OF =
new (SCEVAllocator)
15088 UniquePreds.InsertNode(OF, IP);
15108 SCEVPredicateRewriter
Rewriter(L, SE, NewPreds, Pred);
15109 return Rewriter.visit(S);
15115 for (
const auto *Pred : U->getPredicates())
15117 if (IPred->getLHS() == Expr &&
15119 return IPred->getRHS();
15121 if (IPred->getLHS() == Expr &&
15122 IPred->getPredicate() == ICmpInst::ICMP_EQ)
15123 return IPred->getRHS();
15126 return convertToAddRecWithPreds(Expr);
15129 const SCEV *visitZeroExtendExpr(
const SCEVZeroExtendExpr *Expr) {
15145 const SCEV *visitSignExtendExpr(
const SCEVSignExtendExpr *Expr) {
15162 explicit SCEVPredicateRewriter(
15163 const Loop *L, ScalarEvolution &SE,
15164 SmallVectorImpl<const SCEVPredicate *> *NewPreds,
15165 const SCEVPredicate *Pred)
15166 : SCEVRewriteVisitor(SE), NewPreds(NewPreds), Pred(Pred),
L(
L) {}
15168 bool addOverflowAssumption(
const SCEVPredicate *
P) {
15171 return Pred && Pred->
implies(
P, SE);
15177 bool addOverflowAssumption(
const SCEVAddRecExpr *AR,
15180 return addOverflowAssumption(
A);
15189 const SCEV *convertToAddRecWithPreds(
const SCEVUnknown *Expr) {
15193 std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>>
15195 if (!PredicatedRewrite)
15197 for (
const auto *
P : PredicatedRewrite->second){
15200 if (L != WP->getExpr()->getLoop())
15203 if (!addOverflowAssumption(
P))
15206 return PredicatedRewrite->first;
15209 SmallVectorImpl<const SCEVPredicate *> *NewPreds;
15210 const SCEVPredicate *Pred;
15219 return SCEVPredicateRewriter::rewrite(S, L, *
this,
nullptr, &Preds);
15226 S = SCEVPredicateRewriter::rewrite(S, L, *
this, &TransformPreds,
nullptr);
15246 if (!Step->
isOne())
15271 assert(LHS->getType() == RHS->getType() &&
"LHS and RHS types don't match");
15272 assert(LHS != RHS &&
"LHS and RHS are the same SCEV");
15285 return Op->LHS == LHS &&
Op->RHS == RHS;
15292 OS.
indent(
Depth) <<
"Equal predicate: " << *LHS <<
" == " << *RHS <<
"\n";
15294 OS.
indent(
Depth) <<
"Compare predicate: " << *LHS <<
" " << Pred <<
") "
15319 const SCEV *Start = AR->getStart();
15320 const SCEV *OpStart =
Op->AR->getStart();
15325 if (Start->getType()->isPointerTy() && Start->getType() != OpStart->
getType())
15334 const SCEV *Step = AR->getStepRecurrence(SE);
15335 const SCEV *OpStep =
Op->AR->getStepRecurrence(SE);
15388 if (Step->getValue()->getValue().isNonNegative())
15392 return ImpliedFlags;
15399 for (
const auto *
P : Preds)
15412 return this->implies(I, SE);
15420 for (
const auto *Pred : Preds)
15421 Pred->print(OS,
Depth);
15426 for (
const auto *Pred : Set->Preds)
15434 bool CheckImplies = Preds.
size() < 16;
15437 if (CheckImplies &&
implies(
N, SE))
15443 for (
auto *
P : Preds) {
15444 if (CheckImplies &&
N->implies(
P, SE))
15448 Preds = std::move(PrunedPreds);
15449 Preds.push_back(
N);
15456 Preds = std::make_unique<SCEVUnionPredicate>(
Empty, SE);
15461 for (
const auto *
Op :
Ops)
15466 SCEVUsers[
Op].insert(
User);
15475 SCEVUsers[
Op].insert(
User);
15479 const SCEV *Expr = SE.getSCEV(V);
15484 RewriteEntry &Entry = RewriteMap[Expr];
15487 if (Entry.second && Generation == Entry.first)
15488 return Entry.second;
15493 Expr = Entry.second;
15495 const SCEV *NewSCEV = SE.rewriteUsingPredicate(Expr, &L, *Preds);
15496 Entry = {Generation, NewSCEV};
15502 if (!BackedgeCount) {
15504 BackedgeCount = SE.getPredicatedBackedgeTakenCount(&L, Preds);
15505 for (
const auto *
P : Preds)
15508 return BackedgeCount;
15512 if (!SymbolicMaxBackedgeCount) {
15514 SymbolicMaxBackedgeCount =
15515 SE.getPredicatedSymbolicMaxBackedgeTakenCount(&L, Preds);
15516 for (
const auto *
P : Preds)
15519 return SymbolicMaxBackedgeCount;
15523 if (!SmallConstantMaxTripCount) {
15525 SmallConstantMaxTripCount = SE.getSmallConstantMaxTripCount(&L, &Preds);
15526 for (
const auto *
P : Preds)
15529 return *SmallConstantMaxTripCount;
15533 if (Preds->implies(&Pred, SE))
15538 Preds = std::make_unique<SCEVUnionPredicate>(NewPreds, SE);
15539 updateGeneration();
15546void PredicatedScalarEvolution::updateGeneration() {
15548 if (++Generation == 0) {
15549 for (
auto &
II : RewriteMap) {
15550 const SCEV *Rewritten =
II.second.second;
15567 auto II = FlagsMap.insert({V, Flags});
15580 auto II = FlagsMap.find(V);
15582 if (
II != FlagsMap.end())
15591 auto *New = SE.convertSCEVToAddRecWithPredicates(Expr, &L, NewPreds);
15596 for (
const auto *
P : NewPreds)
15599 RewriteMap[SE.getSCEV(V)] = {Generation, New};
15605 : RewriteMap(
Init.RewriteMap), SE(
Init.SE), L(
Init.L),
15608 Generation(
Init.Generation), BackedgeCount(
Init.BackedgeCount) {
15609 for (
auto I :
Init.FlagsMap)
15610 FlagsMap.insert(
I);
15615 for (
auto *BB : L.getBlocks())
15616 for (
auto &
I : *BB) {
15617 if (!SE.isSCEVable(
I.getType()))
15620 auto *Expr = SE.getSCEV(&
I);
15621 auto II = RewriteMap.find(Expr);
15623 if (
II == RewriteMap.end())
15627 if (
II->second.second == Expr)
15632 OS.
indent(
Depth + 2) <<
"--> " << *
II->second.second <<
"\n";
15640 LoopGuards Guards(SE);
15648void ScalarEvolution::LoopGuards::collectFromPHI(
15656 using MinMaxPattern = std::pair<const SCEVConstant *, SCEVTypes>;
15657 auto GetMinMaxConst = [&](
unsigned IncomingIdx) -> MinMaxPattern {
15671 auto &RewriteMap =
G->second.RewriteMap;
15672 if (RewriteMap.empty())
15674 auto S = RewriteMap.find(SE.
getSCEV(
Phi.getIncomingValue(IncomingIdx)));
15675 if (S == RewriteMap.end())
15681 return {C0,
SM->getSCEVType()};
15684 auto MergeMinMaxConst = [](MinMaxPattern
P1,
15685 MinMaxPattern
P2) -> MinMaxPattern {
15686 auto [C1,
T1] =
P1;
15687 auto [C2, T2] =
P2;
15688 if (!C1 || !C2 ||
T1 != T2)
15692 return {C1->getAPInt().
ult(C2->getAPInt()) ? C1 : C2,
T1};
15694 return {C1->getAPInt().
slt(C2->getAPInt()) ? C1 : C2,
T1};
15696 return {C1->getAPInt().
ugt(C2->getAPInt()) ? C1 : C2,
T1};
15698 return {C1->getAPInt().
sgt(C2->getAPInt()) ? C1 : C2,
T1};
15703 auto P = GetMinMaxConst(0);
15704 for (
unsigned int In = 1;
In <
Phi.getNumIncomingValues();
In++) {
15707 P = MergeMinMaxConst(
P, GetMinMaxConst(In));
15710 const SCEV *
LHS = SE.
getSCEV(
const_cast<PHINode *
>(&Phi));
15713 Guards.RewriteMap.insert({
LHS,
RHS});
15721 const APInt &DivisorVal,
15723 const APInt *ExprVal;
15736 const APInt &DivisorVal,
15738 const APInt *ExprVal;
15746 return SE.
getConstant(*ExprVal + DivisorVal - Rem);
15760 const SCEV *URemRHS =
nullptr;
15764 const SCEV *Multiple =
15766 DivInfo[URemLHS] = Multiple;
15768 Multiples[URemLHS] =
C->getAPInt();
15788 auto IsMinMaxSCEVWithNonNegativeConstant =
15792 if (
MinMax->getNumOperands() != 2)
15795 if (
C->getAPInt().isNegative())
15797 SCTy =
MinMax->getSCEVType();
15806 const SCEV *MinMaxLHS =
nullptr, *MinMaxRHS =
nullptr;
15808 if (!IsMinMaxSCEVWithNonNegativeConstant(MinMaxExpr, SCTy, MinMaxLHS,
15813 auto *DivisibleExpr =
15821void ScalarEvolution::LoopGuards::collectFromBlock(
15823 const BasicBlock *
Block,
const BasicBlock *Pred,
15831 DenseMap<const SCEV *, const SCEV *> &RewriteMap,
15842 &ExprsToRewrite]() {
15843 const SCEVConstant *C1;
15856 if (ExactRegion.isWrappedSet() || ExactRegion.isFullSet())
15858 auto [
I,
Inserted] = RewriteMap.try_emplace(LHSUnknown);
15859 const SCEV *RewrittenLHS =
Inserted ? LHSUnknown :
I->second;
15867 if (MatchRangeCheckIdiom())
15884 auto AddRewrite = [&](
const SCEV *From,
const SCEV *FromRewritten,
15886 if (From == FromRewritten)
15888 RewriteMap[From] = To;
15894 auto GetMaybeRewritten = [&](
const SCEV *S) {
15895 return RewriteMap.lookup_or(S, S);
15898 const SCEV *RewrittenLHS = GetMaybeRewritten(
LHS);
15900 const APInt &DividesBy =
15915 switch (Predicate) {
15944 SmallPtrSet<const SCEV *, 16> Visited;
15946 auto EnqueueOperands = [&Worklist](
const SCEVNAryExpr *S) {
15950 while (!Worklist.
empty()) {
15954 if (!Visited.
insert(From).second)
15956 const SCEV *FromRewritten = GetMaybeRewritten(From);
15957 const SCEV *To =
nullptr;
15959 switch (Predicate) {
15964 EnqueueOperands(
UMax);
15970 EnqueueOperands(
SMax);
15976 EnqueueOperands(
UMin);
15982 EnqueueOperands(
SMin);
15990 const SCEV *OneAlignedUp =
15992 To = SE.
getUMaxExpr(FromRewritten, OneAlignedUp);
16004 const SCEVConstant *
C;
16013 Guards.NotEqual.insert({
LHS,
RHS});
16022 AddRewrite(From, FromRewritten, To);
16039 SE.F.
getParent(), Intrinsic::experimental_guard);
16041 for (
const auto *GU : GuardDecl->users())
16043 if (Guard->getFunction() ==
Block->getParent() &&
16052 unsigned NumCollectedConditions = 0;
16054 std::pair<const BasicBlock *, const BasicBlock *> Pair(Pred,
Block);
16056 Pair = SE.getPredecessorWithUniqueSuccessorForBB(Pair.first)) {
16058 const CondBrInst *LoopEntryPredicate =
16060 if (!LoopEntryPredicate)
16065 NumCollectedConditions++;
16069 if (
Depth > 0 && NumCollectedConditions == 2)
16077 if (Pair.second->hasNPredecessorsOrMore(2) &&
16079 SmallDenseMap<const BasicBlock *, LoopGuards> IncomingGuards;
16080 for (
auto &Phi : Pair.second->phis())
16091 for (
auto [Term, EnterIfTrue] :
reverse(Terms)) {
16092 SmallVector<Value *, 8> Worklist;
16093 SmallPtrSet<Value *, 8> Visited;
16095 while (!Worklist.
empty()) {
16102 EnterIfTrue ?
Cmp->getPredicate() :
Cmp->getInversePredicate();
16126 DenseMap<const SCEV *, APInt> Multiples;
16128 for (
const auto &[Predicate,
LHS,
RHS] : GuardsToProcess) {
16135 for (
const auto &[Predicate,
LHS,
RHS] : GuardsToProcess)
16136 CollectCondition(Predicate,
LHS,
RHS, Guards.RewriteMap, DivGuards);
16140 for (
const auto &[K, Divisor] : Multiples) {
16141 const SCEV *DivisorSCEV = SE.
getConstant(Divisor);
16142 Guards.RewriteMap[
K] =
16144 Guards.
rewrite(K), Divisor, SE),
16153 Guards.PreserveNUW =
true;
16154 Guards.PreserveNSW =
true;
16155 for (
const SCEV *Expr : ExprsToRewrite) {
16156 const SCEV *RewriteTo = Guards.RewriteMap[Expr];
16157 Guards.PreserveNUW &=
16159 Guards.PreserveNSW &=
16166 if (ExprsToRewrite.size() > 1) {
16167 for (
const SCEV *Expr : ExprsToRewrite) {
16168 const SCEV *RewriteTo = Guards.RewriteMap[Expr];
16169 Guards.RewriteMap.erase(Expr);
16170 Guards.RewriteMap.insert({Expr, Guards.
rewrite(RewriteTo)});
16179 class SCEVLoopGuardRewriter
16190 NotEqual(Guards.NotEqual) {
16191 if (Guards.PreserveNUW)
16193 if (Guards.PreserveNSW)
16200 return Map.lookup_or(Expr, Expr);
16204 if (
const SCEV *S = Map.lookup(Expr))
16211 unsigned Bitwidth = Ty->getScalarSizeInBits() / 2;
16212 while (Bitwidth % 8 == 0 && Bitwidth >= 8 &&
16213 Bitwidth >
Op->getType()->getScalarSizeInBits()) {
16215 auto *NarrowExt = SE.getZeroExtendExpr(
Op, NarrowTy);
16216 if (
const SCEV *S = Map.lookup(NarrowExt))
16217 return SE.getZeroExtendExpr(S, Ty);
16218 Bitwidth = Bitwidth / 2;
16226 if (
const SCEV *S = Map.lookup(Expr))
16233 if (
const SCEV *S = Map.lookup(Expr))
16239 if (
const SCEV *S = Map.lookup(Expr))
16247 auto RewriteSubtraction = [&](
const SCEV *S) ->
const SCEV * {
16252 if (NotEqual.contains({LHS, RHS})) {
16254 SE.getOne(S->
getType()), SE.getConstantMultiple(S), SE);
16255 return SE.getUMaxExpr(OneAlignedUp, S);
16262 if (
const SCEV *Rewritten = RewriteSubtraction(Expr))
16273 if (
const SCEV *Rewritten = RewriteSubtraction(
Add))
16274 return SE.getAddExpr(
16277 if (
const SCEV *S = Map.lookup(
Add))
16278 return SE.getAddExpr(Expr->
getOperand(0), S);
16290 : SE.getAddExpr(Operands,
16306 : SE.getMulExpr(Operands,
16312 if (RewriteMap.empty() && NotEqual.empty())
16315 SCEVLoopGuardRewriter
Rewriter(SE, *
this);
16316 return Rewriter.visit(Expr);
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Expand Atomic instructions
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
#define LLVM_DUMP_METHOD
Mark debug helper function definitions like dump() that should not be stripped from debug builds.
This file contains the declarations for the subclasses of Constant, which represent the different fla...
SmallPtrSet< const BasicBlock *, 8 > VisitedBlocks
This file defines the DenseMap class.
This file builds on the ADT/GraphTraits.h file to build generic depth first graph iterator.
static bool isSigned(unsigned Opcode)
This file defines a hash set that can be used to remove duplication of nodes in a graph.
Value * getPointer(Value *Ptr)
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
This defines the Use class.
iv Induction Variable Users
static constexpr Value * getValue(Ty &ValueOrUse)
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
static bool isZero(Value *V, const DataLayout &DL, DominatorTree *DT, AssumptionCache *AC)
static constexpr unsigned SM(unsigned Version)
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t IntrinsicInst * II
PowerPC Reduce CR logical Operation
#define INITIALIZE_PASS_DEPENDENCY(depName)
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
const SmallVectorImpl< MachineOperand > & Cond
static DominatorTree getDomTree(Function &F)
static bool isValid(const char C)
Returns true if C is a valid mangled character: <0-9a-zA-Z_>.
SI optimize exec mask operations pre RA
static void visit(BasicBlock &Start, std::function< bool(BasicBlock *)> op)
This file provides utility classes that use RAII to save and restore values.
bool SCEVMinMaxExprContains(const SCEV *Root, const SCEV *OperandToFind, SCEVTypes RootKind)
static cl::opt< unsigned > MaxAddRecSize("scalar-evolution-max-add-rec-size", cl::Hidden, cl::desc("Max coefficients in AddRec during evolving"), cl::init(8))
static cl::opt< unsigned > RangeIterThreshold("scev-range-iter-threshold", cl::Hidden, cl::desc("Threshold for switching to iteratively computing SCEV ranges"), cl::init(32))
static const Loop * isIntegerLoopHeaderPHI(const PHINode *PN, LoopInfo &LI)
static unsigned getConstantTripCount(const SCEVConstant *ExitCount)
static int CompareValueComplexity(const LoopInfo *const LI, Value *LV, Value *RV, unsigned Depth)
Compare the two values LV and RV in terms of their "complexity" where "complexity" is a partial (and ...
static const SCEV * getNextSCEVDivisibleByDivisor(const SCEV *Expr, const APInt &DivisorVal, ScalarEvolution &SE)
static void PushLoopPHIs(const Loop *L, SmallVectorImpl< Instruction * > &Worklist, SmallPtrSetImpl< Instruction * > &Visited)
Push PHI nodes in the header of the given loop onto the given Worklist.
static void insertFoldCacheEntry(const ScalarEvolution::FoldID &ID, const SCEV *S, DenseMap< ScalarEvolution::FoldID, const SCEV * > &FoldCache, DenseMap< const SCEV *, SmallVector< ScalarEvolution::FoldID, 2 > > &FoldCacheUser)
static cl::opt< bool > ClassifyExpressions("scalar-evolution-classify-expressions", cl::Hidden, cl::init(true), cl::desc("When printing analysis, include information on every instruction"))
static bool hasHugeExpression(ArrayRef< SCEVUse > Ops)
Returns true if Ops contains a huge SCEV (the subtree of S contains at least HugeExprThreshold nodes)...
static bool CanConstantFold(const Instruction *I)
Return true if we can constant fold an instruction of the specified type, assuming that all operands ...
static cl::opt< unsigned > AddOpsInlineThreshold("scev-addops-inline-threshold", cl::Hidden, cl::desc("Threshold for inlining addition operands into a SCEV"), cl::init(500))
static cl::opt< unsigned > MaxLoopGuardCollectionDepth("scalar-evolution-max-loop-guard-collection-depth", cl::Hidden, cl::desc("Maximum depth for recursive loop guard collection"), cl::init(1))
static cl::opt< bool > VerifyIR("scev-verify-ir", cl::Hidden, cl::desc("Verify IR correctness when making sensitive SCEV queries (slow)"), cl::init(false))
static bool RangeRefPHIAllowedOperands(DominatorTree &DT, PHINode *PHI)
static const SCEV * getPreStartForExtend(const SCEVAddRecExpr *AR, Type *Ty, ScalarEvolution *SE, unsigned Depth)
static std::optional< APInt > MinOptional(std::optional< APInt > X, std::optional< APInt > Y)
Helper function to compare optional APInts: (a) if X and Y both exist, return min(X,...
static cl::opt< unsigned > MulOpsInlineThreshold("scev-mulops-inline-threshold", cl::Hidden, cl::desc("Threshold for inlining multiplication operands into a SCEV"), cl::init(32))
static BinaryOperator * getCommonInstForPHI(PHINode *PN)
static bool isDivisibilityGuard(const SCEV *LHS, const SCEV *RHS, ScalarEvolution &SE)
static std::optional< const SCEV * > createNodeForSelectViaUMinSeq(ScalarEvolution *SE, const SCEV *CondExpr, const SCEV *TrueExpr, const SCEV *FalseExpr)
static Constant * BuildConstantFromSCEV(const SCEV *V)
This builds up a Constant using the ConstantExpr interface.
static ConstantInt * EvaluateConstantChrecAtConstant(const SCEVAddRecExpr *AddRec, ConstantInt *C, ScalarEvolution &SE)
static const SCEV * BinomialCoefficient(const SCEV *It, unsigned K, ScalarEvolution &SE, Type *ResultTy)
Compute BC(It, K). The result has width W. Assume, K > 0.
static cl::opt< unsigned > MaxCastDepth("scalar-evolution-max-cast-depth", cl::Hidden, cl::desc("Maximum depth of recursive SExt/ZExt/Trunc"), cl::init(8))
static bool IsMinMaxConsistingOf(const SCEV *MaybeMinMaxExpr, const SCEV *Candidate)
Is MaybeMinMaxExpr an (U|S)(Min|Max) of Candidate and some other values?
static PHINode * getConstantEvolvingPHI(Value *V, const Loop *L)
getConstantEvolvingPHI - Given an LLVM value and a loop, return a PHI node in the loop that V is deri...
static const SCEV * SolveLinEquationWithOverflow(const APInt &A, const SCEV *B, SmallVectorImpl< const SCEVPredicate * > *Predicates, ScalarEvolution &SE, const Loop *L)
Finds the minimum unsigned root of the following equation:
static cl::opt< unsigned > MaxBruteForceIterations("scalar-evolution-max-iterations", cl::ReallyHidden, cl::desc("Maximum number of iterations SCEV will " "symbolically execute a constant " "derived loop"), cl::init(100))
static uint64_t umul_ov(uint64_t i, uint64_t j, bool &Overflow)
static void PrintSCEVWithTypeHint(raw_ostream &OS, const SCEV *S)
When printing a top-level SCEV for trip counts, it's helpful to include a type for constants which ar...
static void PrintLoopInfo(raw_ostream &OS, ScalarEvolution *SE, const Loop *L)
static SCEV::NoWrapFlags StrengthenNoWrapFlags(ScalarEvolution *SE, SCEVTypes Type, ArrayRef< SCEVUse > Ops, SCEV::NoWrapFlags Flags)
static bool containsConstantInAddMulChain(const SCEV *StartExpr)
Determine if any of the operands in this SCEV are a constant or if any of the add or multiply express...
static const SCEV * getExtendAddRecStart(const SCEVAddRecExpr *AR, Type *Ty, ScalarEvolution *SE, unsigned Depth)
static bool CollectAddOperandsWithScales(SmallDenseMap< SCEVUse, APInt, 16 > &M, SmallVectorImpl< SCEVUse > &NewOps, APInt &AccumulatedConstant, ArrayRef< SCEVUse > Ops, const APInt &Scale, ScalarEvolution &SE)
Process the given Ops list, which is a list of operands to be added under the given scale,...
static const SCEV * constantFoldAndGroupOps(ScalarEvolution &SE, LoopInfo &LI, DominatorTree &DT, SmallVectorImpl< SCEVUse > &Ops, FoldT Fold, IsIdentityT IsIdentity, IsAbsorberT IsAbsorber)
Performs a number of common optimizations on the passed Ops.
static cl::opt< unsigned > MaxPhiSCCAnalysisSize("scalar-evolution-max-scc-analysis-depth", cl::Hidden, cl::desc("Maximum amount of nodes to process while searching SCEVUnknown " "Phi strongly connected components"), cl::init(8))
static bool IsKnownPredicateViaAddRecStart(ScalarEvolution &SE, CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
static void GroupByComplexity(SmallVectorImpl< SCEVUse > &Ops, LoopInfo *LI, DominatorTree &DT)
Given a list of SCEV objects, order them by their complexity, and group objects of the same complexit...
static bool collectDivisibilityInformation(ICmpInst::Predicate Predicate, const SCEV *LHS, const SCEV *RHS, DenseMap< const SCEV *, const SCEV * > &DivInfo, DenseMap< const SCEV *, APInt > &Multiples, ScalarEvolution &SE)
static cl::opt< unsigned > MaxSCEVOperationsImplicationDepth("scalar-evolution-max-scev-operations-implication-depth", cl::Hidden, cl::desc("Maximum depth of recursive SCEV operations implication analysis"), cl::init(2))
static void PushDefUseChildren(Instruction *I, SmallVectorImpl< Instruction * > &Worklist, SmallPtrSetImpl< Instruction * > &Visited)
Push users of the given Instruction onto the given Worklist.
static std::optional< APInt > SolveQuadraticAddRecRange(const SCEVAddRecExpr *AddRec, const ConstantRange &Range, ScalarEvolution &SE)
Let c(n) be the value of the quadratic chrec {0,+,M,+,N} after n iterations.
static cl::opt< bool > UseContextForNoWrapFlagInference("scalar-evolution-use-context-for-no-wrap-flag-strenghening", cl::Hidden, cl::desc("Infer nuw/nsw flags using context where suitable"), cl::init(true))
static cl::opt< bool > EnableFiniteLoopControl("scalar-evolution-finite-loop", cl::Hidden, cl::desc("Handle <= and >= in finite loops"), cl::init(true))
static bool getOperandsForSelectLikePHI(DominatorTree &DT, PHINode *PN, Value *&Cond, Value *&LHS, Value *&RHS)
static std::optional< std::tuple< APInt, APInt, APInt, APInt, unsigned > > GetQuadraticEquation(const SCEVAddRecExpr *AddRec)
For a given quadratic addrec, generate coefficients of the corresponding quadratic equation,...
static bool isKnownPredicateExtendIdiom(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
static std::optional< BinaryOp > MatchBinaryOp(Value *V, const DataLayout &DL, AssumptionCache &AC, const DominatorTree &DT, const Instruction *CxtI)
Try to map V into a BinaryOp, and return std::nullopt on failure.
static std::optional< APInt > SolveQuadraticAddRecExact(const SCEVAddRecExpr *AddRec, ScalarEvolution &SE)
Let c(n) be the value of the quadratic chrec {L,+,M,+,N} after n iterations.
static std::optional< APInt > TruncIfPossible(std::optional< APInt > X, unsigned BitWidth)
Helper function to truncate an optional APInt to a given BitWidth.
static cl::opt< unsigned > MaxSCEVCompareDepth("scalar-evolution-max-scev-compare-depth", cl::Hidden, cl::desc("Maximum depth of recursive SCEV complexity comparisons"), cl::init(32))
static APInt extractConstantWithoutWrapping(ScalarEvolution &SE, const SCEVConstant *ConstantTerm, const SCEVAddExpr *WholeAddExpr)
static cl::opt< unsigned > MaxConstantEvolvingDepth("scalar-evolution-max-constant-evolving-depth", cl::Hidden, cl::desc("Maximum depth of recursive constant evolving"), cl::init(32))
static ConstantRange getRangeForAffineARHelper(APInt Step, const ConstantRange &StartRange, const APInt &MaxBECount, bool Signed)
static bool MatchBinarySub(const SCEV *S, SCEVUse &LHS, SCEVUse &RHS)
static std::optional< ConstantRange > GetRangeFromMetadata(Value *V)
Helper method to assign a range to V from metadata present in the IR.
static cl::opt< unsigned > HugeExprThreshold("scalar-evolution-huge-expr-threshold", cl::Hidden, cl::desc("Size of the expression which is considered huge"), cl::init(4096))
static Type * isSimpleCastedPHI(const SCEV *Op, const SCEVUnknown *SymbolicPHI, bool &Signed, ScalarEvolution &SE)
Helper function to createAddRecFromPHIWithCasts.
static Constant * EvaluateExpression(Value *V, const Loop *L, DenseMap< Instruction *, Constant * > &Vals, const DataLayout &DL, const TargetLibraryInfo *TLI)
EvaluateExpression - Given an expression that passes the getConstantEvolvingPHI predicate,...
static const SCEV * getPreviousSCEVDivisibleByDivisor(const SCEV *Expr, const APInt &DivisorVal, ScalarEvolution &SE)
static const SCEV * MatchNotExpr(const SCEV *Expr)
If Expr computes ~A, return A else return nullptr.
static cl::opt< unsigned > MaxValueCompareDepth("scalar-evolution-max-value-compare-depth", cl::Hidden, cl::desc("Maximum depth of recursive value complexity comparisons"), cl::init(2))
static const SCEV * applyDivisibilityOnMinMaxExpr(const SCEV *MinMaxExpr, APInt Divisor, ScalarEvolution &SE)
static cl::opt< bool, true > VerifySCEVOpt("verify-scev", cl::Hidden, cl::location(VerifySCEV), cl::desc("Verify ScalarEvolution's backedge taken counts (slow)"))
static const SCEV * getSignedOverflowLimitForStep(const SCEV *Step, ICmpInst::Predicate *Pred, ScalarEvolution *SE)
static cl::opt< unsigned > MaxArithDepth("scalar-evolution-max-arith-depth", cl::Hidden, cl::desc("Maximum depth of recursive arithmetics"), cl::init(32))
static bool HasSameValue(const SCEV *A, const SCEV *B)
SCEV structural equivalence is usually sufficient for testing whether two expressions are equal,...
static uint64_t Choose(uint64_t n, uint64_t k, bool &Overflow)
Compute the result of "n choose k", the binomial coefficient.
static std::optional< int > CompareSCEVComplexity(const LoopInfo *const LI, const SCEV *LHS, const SCEV *RHS, DominatorTree &DT, unsigned Depth=0)
static bool canConstantEvolve(Instruction *I, const Loop *L)
Determine whether this instruction can constant evolve within this loop assuming its operands can all...
static PHINode * getConstantEvolvingPHIOperands(Instruction *UseInst, const Loop *L, DenseMap< Instruction *, PHINode * > &PHIMap, unsigned Depth)
getConstantEvolvingPHIOperands - Implement getConstantEvolvingPHI by recursing through each instructi...
static bool scevUnconditionallyPropagatesPoisonFromOperands(SCEVTypes Kind)
static cl::opt< bool > VerifySCEVStrict("verify-scev-strict", cl::Hidden, cl::desc("Enable stricter verification with -verify-scev is passed"))
static Constant * getOtherIncomingValue(PHINode *PN, BasicBlock *BB)
static cl::opt< bool > UseExpensiveRangeSharpening("scalar-evolution-use-expensive-range-sharpening", cl::Hidden, cl::init(false), cl::desc("Use more powerful methods of sharpening expression ranges. May " "be costly in terms of compile time"))
static const SCEV * getUnsignedOverflowLimitForStep(const SCEV *Step, ICmpInst::Predicate *Pred, ScalarEvolution *SE)
static bool IsKnownPredicateViaMinOrMax(ScalarEvolution &SE, CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
Is LHS Pred RHS true on the virtue of LHS or RHS being a Min or Max expression?
static bool BrPHIToSelect(DominatorTree &DT, CondBrInst *BI, PHINode *Merge, Value *&C, Value *&LHS, Value *&RHS)
This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...
static bool InBlock(const Value *V, const BasicBlock *BB)
Provides some synthesis utilities to produce sequences of values.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static SymbolRef::Type getType(const Symbol *Sym)
LocallyHashedType DenseMapInfo< LocallyHashedType >::Empty
static std::optional< bool > isImpliedCondOperands(CmpInst::Predicate Pred, const Value *ALHS, const Value *ARHS, const Value *BLHS, const Value *BRHS)
Return true if "icmp Pred BLHS BRHS" is true whenever "icmp PredALHS ARHS" is true.
Virtual Register Rewriter
static const uint32_t IV[8]
SCEVCastSinkingRewriter(ScalarEvolution &SE, Type *TargetTy, ConversionFn CreatePtrCast)
static const SCEV * rewrite(const SCEV *Scev, ScalarEvolution &SE, Type *TargetTy, ConversionFn CreatePtrCast)
const SCEV * visitUnknown(const SCEVUnknown *Expr)
const SCEV * visitMulExpr(const SCEVMulExpr *Expr)
const SCEV * visitAddExpr(const SCEVAddExpr *Expr)
const SCEV * visit(const SCEV *S)
Class for arbitrary precision integers.
LLVM_ABI APInt umul_ov(const APInt &RHS, bool &Overflow) const
LLVM_ABI APInt zext(unsigned width) const
Zero extend to a new width.
bool isMinSignedValue() const
Determine if this is the smallest signed value.
uint64_t getZExtValue() const
Get zero extended value.
void setHighBits(unsigned hiBits)
Set the top hiBits bits.
LLVM_ABI APInt getHiBits(unsigned numBits) const
Compute an APInt containing numBits highbits from this APInt.
unsigned getActiveBits() const
Compute the number of active bits in the value.
LLVM_ABI APInt trunc(unsigned width) const
Truncate to new width.
static APInt getMaxValue(unsigned numBits)
Gets maximum unsigned value of APInt for specific bit width.
APInt abs() const
Get the absolute value.
bool sgt(const APInt &RHS) const
Signed greater than comparison.
bool isAllOnes() const
Determine if all bits are set. This is true for zero-width values.
bool ugt(const APInt &RHS) const
Unsigned greater than comparison.
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
bool isSignMask() const
Check if the APInt's value is returned by getSignMask.
LLVM_ABI APInt urem(const APInt &RHS) const
Unsigned remainder operation.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool ult(const APInt &RHS) const
Unsigned less than comparison.
static APInt getSignedMaxValue(unsigned numBits)
Gets maximum signed value of APInt for a specific bit width.
static APInt getMinValue(unsigned numBits)
Gets minimum unsigned value of APInt for a specific bit width.
bool isNegative() const
Determine sign of this APInt.
bool sle(const APInt &RHS) const
Signed less or equal comparison.
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
bool isNonPositive() const
Determine if this APInt Value is non-positive (<= 0).
unsigned countTrailingZeros() const
bool isStrictlyPositive() const
Determine if this APInt Value is positive.
unsigned logBase2() const
APInt ashr(unsigned ShiftAmt) const
Arithmetic right-shift function.
LLVM_ABI APInt multiplicativeInverse() const
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
LLVM_ABI APInt sext(unsigned width) const
Sign extend to a new width.
APInt shl(unsigned shiftAmt) const
Left-shift function.
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
bool isSignBitSet() const
Determine if sign bit of this APInt is set.
bool slt(const APInt &RHS) const
Signed less than comparison.
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
bool isIntN(unsigned N) const
Check if this APInt has an N-bits unsigned integer value.
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
This templated class represents "all analyses that operate over <aparticular IR unit>" (e....
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Represent the analysis usage information of a pass.
void setPreservesAll()
Set by analyses that do not transform their input at all.
AnalysisUsage & addRequiredTransitive()
Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
Get the array size.
A function analysis which provides an AssumptionCache.
An immutable pass that tracks lazily created AssumptionCache objects.
A cache of @llvm.assume calls within a function.
MutableArrayRef< WeakVH > assumptions()
Access the list of assumption handles currently tracked for this function.
LLVM Basic Block Representation.
iterator begin()
Instruction iterator methods.
const Function * getParent() const
Return the enclosing method, or null if none.
LLVM_ABI const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
const Instruction & front() const
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction; assumes that the block is well-formed.
LLVM_ABI unsigned getNoWrapKind() const
Returns one of OBO::NoSignedWrap or OBO::NoUnsignedWrap.
LLVM_ABI Instruction::BinaryOps getBinaryOp() const
Returns the binary operation underlying the intrinsic.
BinaryOps getOpcode() const
This class represents a function call, abstracting a target machine's calling convention.
virtual void deleted()
Callback for Value destruction.
bool isFalseWhenEqual() const
This is just a convenience.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ ICMP_UGE
unsigned greater or equal
@ ICMP_UGT
unsigned greater than
@ ICMP_SGT
signed greater than
@ ICMP_ULT
unsigned less than
@ ICMP_SGE
signed greater or equal
@ ICMP_ULE
unsigned less or equal
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
bool isTrueWhenEqual() const
This is just a convenience.
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
bool isRelational() const
Return true if the predicate is relational (not EQ or NE).
An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...
static LLVM_ABI std::optional< CmpPredicate > getMatching(CmpPredicate A, CmpPredicate B)
Compares two CmpPredicates taking samesign into account and returns the canonicalized CmpPredicate if...
LLVM_ABI CmpInst::Predicate getPreferredSignedPredicate() const
Attempts to return a signed CmpInst::Predicate from the CmpPredicate.
CmpInst::Predicate dropSameSign() const
Drops samesign information.
Conditional Branch instruction.
Value * getCondition() const
BasicBlock * getSuccessor(unsigned i) const
static LLVM_ABI Constant * getNot(Constant *C)
static Constant * getPtrAdd(Constant *Ptr, Constant *Offset, GEPNoWrapFlags NW=GEPNoWrapFlags::none(), std::optional< ConstantRange > InRange=std::nullopt, Type *OnlyIfReduced=nullptr)
Create a getelementptr i8, ptr, offset constant expression.
static LLVM_ABI Constant * getPtrToInt(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static LLVM_ABI Constant * getPtrToAddr(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static LLVM_ABI Constant * getAdd(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
static LLVM_ABI Constant * getNeg(Constant *C, bool HasNSW=false)
static LLVM_ABI Constant * getTrunc(Constant *C, Type *Ty, bool OnlyIfReduced=false)
This is the shared class of boolean and integer constants.
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
static LLVM_ABI ConstantInt * getFalse(LLVMContext &Context)
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
const APInt & getValue() const
Return the constant as an APInt value reference.
static LLVM_ABI ConstantInt * getBool(LLVMContext &Context, bool V)
This class represents a range of values.
LLVM_ABI ConstantRange add(const ConstantRange &Other) const
Return a new range representing the possible values resulting from an addition of a value in this ran...
LLVM_ABI ConstantRange zextOrTrunc(uint32_t BitWidth) const
Make this range have the bit width given by BitWidth.
PreferredRangeType
If represented precisely, the result of some range operations may consist of multiple disjoint ranges...
LLVM_ABI bool getEquivalentICmp(CmpInst::Predicate &Pred, APInt &RHS) const
Set up Pred and RHS such that ConstantRange::makeExactICmpRegion(Pred, RHS) == *this.
const APInt & getLower() const
Return the lower value for this range.
LLVM_ABI ConstantRange urem(const ConstantRange &Other) const
Return a new range representing the possible values resulting from an unsigned remainder operation of...
LLVM_ABI bool isFullSet() const
Return true if this set contains all of the elements possible for this data-type.
LLVM_ABI bool icmp(CmpInst::Predicate Pred, const ConstantRange &Other) const
Does the predicate Pred hold between ranges this and Other?
LLVM_ABI bool isEmptySet() const
Return true if this set contains no members.
LLVM_ABI ConstantRange zeroExtend(uint32_t BitWidth) const
Return a new range in the specified integer type, which must be strictly larger than the current type...
LLVM_ABI bool isSignWrappedSet() const
Return true if this set wraps around the signed domain.
LLVM_ABI APInt getSignedMin() const
Return the smallest signed value contained in the ConstantRange.
LLVM_ABI bool isWrappedSet() const
Return true if this set wraps around the unsigned domain.
LLVM_ABI void print(raw_ostream &OS) const
Print out the bounds to a stream.
LLVM_ABI ConstantRange truncate(uint32_t BitWidth, unsigned NoWrapKind=0) const
Return a new range in the specified integer type, which must be strictly smaller than the current typ...
LLVM_ABI ConstantRange signExtend(uint32_t BitWidth) const
Return a new range in the specified integer type, which must be strictly larger than the current type...
const APInt & getUpper() const
Return the upper value for this range.
LLVM_ABI ConstantRange unionWith(const ConstantRange &CR, PreferredRangeType Type=Smallest) const
Return the range that results from the union of this range with another range.
static LLVM_ABI ConstantRange makeExactICmpRegion(CmpInst::Predicate Pred, const APInt &Other)
Produce the exact range such that all values in the returned range satisfy the given predicate with a...
LLVM_ABI bool contains(const APInt &Val) const
Return true if the specified value is in the set.
LLVM_ABI APInt getUnsignedMax() const
Return the largest unsigned value contained in the ConstantRange.
LLVM_ABI ConstantRange intersectWith(const ConstantRange &CR, PreferredRangeType Type=Smallest) const
Return the range that results from the intersection of this range with another range.
LLVM_ABI APInt getSignedMax() const
Return the largest signed value contained in the ConstantRange.
static ConstantRange getNonEmpty(APInt Lower, APInt Upper)
Create non-empty constant range with the given bounds.
static LLVM_ABI ConstantRange makeGuaranteedNoWrapRegion(Instruction::BinaryOps BinOp, const ConstantRange &Other, unsigned NoWrapKind)
Produce the largest range containing all X such that "X BinOp Y" is guaranteed not to wrap (overflow)...
LLVM_ABI unsigned getMinSignedBits() const
Compute the maximal number of bits needed to represent every value in this signed range.
uint32_t getBitWidth() const
Get the bit width of this ConstantRange.
LLVM_ABI ConstantRange sub(const ConstantRange &Other) const
Return a new range representing the possible values resulting from a subtraction of a value in this r...
LLVM_ABI ConstantRange sextOrTrunc(uint32_t BitWidth) const
Make this range have the bit width given by BitWidth.
static LLVM_ABI ConstantRange makeExactNoWrapRegion(Instruction::BinaryOps BinOp, const APInt &Other, unsigned NoWrapKind)
Produce the range that contains X if and only if "X BinOp Other" does not wrap.
This is an important base class in LLVM.
A parsed version of the target data layout string in and methods for querying it.
LLVM_ABI const StructLayout * getStructLayout(StructType *Ty) const
Returns a StructLayout object, indicating the alignment of the struct, its size, and the offsets of i...
LLVM_ABI IntegerType * getIntPtrType(LLVMContext &C, unsigned AddressSpace=0) const
Returns an integer type with size at least as big as that of a pointer in the given address space.
LLVM_ABI unsigned getIndexTypeSizeInBits(Type *Ty) const
The size in bits of the index used in GEP calculation for this type.
LLVM_ABI IntegerType * getIndexType(LLVMContext &C, unsigned AddressSpace) const
Returns the type of a GEP index in AddressSpace.
TypeSize getTypeSizeInBits(Type *Ty) const
Size examples:
ValueT lookup(const_arg_type_t< KeyT > Val) const
Return the entry for the specified key, or a default constructed value if no such entry exists.
iterator find(const_arg_type_t< KeyT > Val)
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&...Args)
DenseMapIterator< KeyT, ValueT, KeyInfoT, BucketT > iterator
iterator find_as(const LookupKeyT &Val)
Alternate version of find() which allows a different, and possibly less expensive,...
size_type count(const_arg_type_t< KeyT > Val) const
Return 1 if the specified key is in the map, 0 otherwise.
bool contains(const_arg_type_t< KeyT > Val) const
Return true if the specified key is in the map, false otherwise.
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Analysis pass which computes a DominatorTree.
Legacy analysis pass which computes a DominatorTree.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
LLVM_ABI bool isReachableFromEntry(const Use &U) const
Provide an overload for a Use.
LLVM_ABI bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
This class describes a reference to an interned FoldingSetNodeID, which can be a useful to store node...
This class is used to gather all the unique data bits of a node.
Represents flags for the getelementptr instruction/expression.
bool hasNoUnsignedSignedWrap() const
bool hasNoUnsignedWrap() const
static GEPNoWrapFlags none()
static LLVM_ABI Type * getTypeAtIndex(Type *Ty, Value *Idx)
Return the type of the element at the given index of an indexable type.
Module * getParent()
Get the module that this global value is contained inside of...
static bool isPrivateLinkage(LinkageTypes Linkage)
static bool isInternalLinkage(LinkageTypes Linkage)
This instruction compares its operands according to the predicate given to the constructor.
CmpPredicate getCmpPredicate() const
static bool isGE(Predicate P)
Return true if the predicate is SGE or UGE.
CmpPredicate getSwappedCmpPredicate() const
static LLVM_ABI bool compare(const APInt &LHS, const APInt &RHS, ICmpInst::Predicate Pred)
Return result of LHS Pred RHS comparison.
static bool isLT(Predicate P)
Return true if the predicate is SLT or ULT.
CmpPredicate getInverseCmpPredicate() const
Predicate getNonStrictCmpPredicate() const
For example, SGT -> SGE, SLT -> SLE, ULT -> ULE, UGT -> UGE.
static bool isGT(Predicate P)
Return true if the predicate is SGT or UGT.
Predicate getFlippedSignednessPredicate() const
For example, SLT->ULT, ULT->SLT, SLE->ULE, ULE->SLE, EQ->EQ.
static CmpPredicate getInverseCmpPredicate(CmpPredicate Pred)
static bool isEquality(Predicate P)
Return true if this predicate is either EQ or NE.
bool isRelational() const
Return true if the predicate is relational (not EQ or NE).
static bool isLE(Predicate P)
Return true if the predicate is SLE or ULE.
LLVM_ABI bool hasNoUnsignedWrap() const LLVM_READONLY
Determine whether the no unsigned wrap flag is set.
LLVM_ABI bool hasNoSignedWrap() const LLVM_READONLY
Determine whether the no signed wrap flag is set.
LLVM_ABI bool isIdenticalToWhenDefined(const Instruction *I, bool IntersectAttrs=false) const LLVM_READONLY
This is like isIdenticalTo, except that it ignores the SubclassOptionalData flags,...
Class to represent integer types.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
A helper class to return the specified delimiter string after the first invocation of operator String...
An instruction for reading from memory.
Analysis pass that exposes the LoopInfo for a function.
bool contains(const LoopT *L) const
Return true if the specified loop is contained within in this loop.
BlockT * getHeader() const
unsigned getLoopDepth() const
Return the nesting level of this loop.
BlockT * getLoopPredecessor() const
If the given loop's header has exactly one unique predecessor outside the loop, return it.
LoopT * getParentLoop() const
Return the parent loop if it exists or nullptr for top level loops.
unsigned getLoopDepth(const BlockT *BB) const
Return the loop nesting level of the specified block.
LoopT * getLoopFor(const BlockT *BB) const
Return the inner most loop that BB lives in.
The legacy pass manager's analysis pass to compute loop information.
Represents a single loop in the control flow graph.
bool isLoopInvariant(const Value *V) const
Return true if the specified value is loop invariant.
A Module instance is used to store all the information related to an LLVM module.
unsigned getOpcode() const
Return the opcode for this Instruction or ConstantExpr.
Utility class for integer operators which may exhibit overflow - Add, Sub, Mul, and Shl.
bool hasNoSignedWrap() const
Test whether this operation is known to never undergo signed overflow, aka the nsw property.
bool hasNoUnsignedWrap() const
Test whether this operation is known to never undergo unsigned overflow, aka the nuw property.
iterator_range< const_block_iterator > blocks() const
op_range incoming_values()
Value * getIncomingValueForBlock(const BasicBlock *BB) const
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
AnalysisType & getAnalysis() const
getAnalysis<AnalysisType>() - This function is used by subclasses to get to the analysis information ...
PointerIntPair - This class implements a pair of a pointer and small integer.
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the default address space (address sp...
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
LLVM_ABI void addPredicate(const SCEVPredicate &Pred)
Adds a new predicate.
LLVM_ABI const SCEVPredicate & getPredicate() const
LLVM_ABI const SCEV * getPredicatedSCEV(const SCEV *Expr)
Returns the rewritten SCEV for Expr in the context of the current SCEV predicate.
LLVM_ABI bool hasNoOverflow(Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags)
Returns true if we've proved that V doesn't wrap by means of a SCEV predicate.
LLVM_ABI void setNoOverflow(Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags)
Proves that V doesn't overflow by adding SCEV predicate.
LLVM_ABI void print(raw_ostream &OS, unsigned Depth) const
Print the SCEV mappings done by the Predicated Scalar Evolution.
LLVM_ABI bool areAddRecsEqualWithPreds(const SCEVAddRecExpr *AR1, const SCEVAddRecExpr *AR2) const
Check if AR1 and AR2 are equal, while taking into account Equal predicates in Preds.
LLVM_ABI PredicatedScalarEvolution(ScalarEvolution &SE, Loop &L)
LLVM_ABI const SCEVAddRecExpr * getAsAddRec(Value *V)
Attempts to produce an AddRecExpr for V by adding additional SCEV predicates.
LLVM_ABI unsigned getSmallConstantMaxTripCount()
Returns the upper bound of the loop trip count as a normal unsigned value, or 0 if the trip count is ...
LLVM_ABI const SCEV * getBackedgeTakenCount()
Get the (predicated) backedge count for the analyzed loop.
LLVM_ABI const SCEV * getSymbolicMaxBackedgeTakenCount()
Get the (predicated) symbolic max backedge count for the analyzed loop.
LLVM_ABI const SCEV * getSCEV(Value *V)
Returns the SCEV expression of V, in the context of the current SCEV predicate.
A set of analyses that are preserved following a run of a transformation pass.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
PreservedAnalysisChecker getChecker() const
Build a checker for this PreservedAnalyses and the specified analysis type.
constexpr bool isValid() const
This node represents an addition of some number of SCEVs.
This node represents a polynomial recurrence on the trip count of the specified loop.
friend class ScalarEvolution
LLVM_ABI const SCEV * evaluateAtIteration(const SCEV *It, ScalarEvolution &SE) const
Return the value of this chain of recurrences at the specified iteration number.
void setNoWrapFlags(NoWrapFlags Flags)
Set flags for a recurrence without clearing any previously set flags.
bool isAffine() const
Return true if this represents an expression A + B*x where A and B are loop invariant values.
bool isQuadratic() const
Return true if this represents an expression A + B*x + C*x^2 where A, B and C are loop invariant valu...
LLVM_ABI const SCEV * getNumIterationsInRange(const ConstantRange &Range, ScalarEvolution &SE) const
Return the number of iterations of this loop that produce values in the specified constant range.
LLVM_ABI const SCEVAddRecExpr * getPostIncExpr(ScalarEvolution &SE) const
Return an expression representing the value of this expression one iteration of the loop ahead.
const Loop * getLoop() const
SCEVUse getStepRecurrence(ScalarEvolution &SE) const
Constructs and returns the recurrence indicating how much this expression steps by.
This is the base class for unary cast operator classes.
SCEVUse getOperand() const
LLVM_ABI SCEVCastExpr(const FoldingSetNodeIDRef ID, SCEVTypes SCEVTy, SCEVUse op, Type *ty)
void setNoWrapFlags(NoWrapFlags Flags)
Set flags for a non-recurrence without clearing previously set flags.
This class represents an assumption that the expression LHS Pred RHS evaluates to true,...
SCEVComparePredicate(const FoldingSetNodeIDRef ID, const ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS)
bool isAlwaysTrue() const override
Returns true if the predicate is always true.
void print(raw_ostream &OS, unsigned Depth=0) const override
Prints a textual representation of this predicate with an indentation of Depth.
bool implies(const SCEVPredicate *N, ScalarEvolution &SE) const override
Implementation of the SCEVPredicate interface.
This class represents a constant integer value.
ConstantInt * getValue() const
const APInt & getAPInt() const
This is the base class for unary integral cast operator classes.
LLVM_ABI SCEVIntegralCastExpr(const FoldingSetNodeIDRef ID, SCEVTypes SCEVTy, SCEVUse op, Type *ty)
This node is the base class min/max selections.
static enum SCEVTypes negate(enum SCEVTypes T)
This node represents multiplication of some number of SCEVs.
This node is a base class providing common functionality for n'ary operators.
bool hasNoUnsignedWrap() const
ArrayRef< SCEVUse > operands() const
bool hasNoSelfWrap() const
size_t getNumOperands() const
bool hasNoSignedWrap() const
NoWrapFlags getNoWrapFlags(NoWrapFlags Mask=NoWrapMask) const
SCEVUse getOperand(unsigned i) const
This class represents an assumption made using SCEV expressions which can be checked at run-time.
SCEVPredicate(const SCEVPredicate &)=default
virtual bool implies(const SCEVPredicate *N, ScalarEvolution &SE) const =0
Returns true if this predicate implies N.
This class represents a cast from a pointer to a pointer-sized integer value, without capturing the p...
This class represents a cast from a pointer to a pointer-sized integer value.
This visitor recursively visits a SCEV expression and re-writes it.
const SCEV * visitSignExtendExpr(const SCEVSignExtendExpr *Expr)
const SCEV * visit(const SCEV *S)
const SCEV * visitZeroExtendExpr(const SCEVZeroExtendExpr *Expr)
const SCEV * visitSMinExpr(const SCEVSMinExpr *Expr)
SCEVRewriteVisitor(ScalarEvolution &SE)
const SCEV * visitUMinExpr(const SCEVUMinExpr *Expr)
This class represents a signed minimum selection.
This node is the base class for sequential/in-order min/max selections.
static SCEVTypes getEquivalentNonSequentialSCEVType(SCEVTypes Ty)
This class represents a sign extension of a small integer value to a larger integer value.
Visit all nodes in the expression tree using worklist traversal.
This class represents a truncation of an integer value to a smaller integer value.
This class represents a binary unsigned division operation.
This class represents an unsigned minimum selection.
This class represents a composition of other SCEV predicates, and is the class that most clients will...
void print(raw_ostream &OS, unsigned Depth) const override
Prints a textual representation of this predicate with an indentation of Depth.
bool implies(const SCEVPredicate *N, ScalarEvolution &SE) const override
Returns true if this predicate implies N.
SCEVUnionPredicate(ArrayRef< const SCEVPredicate * > Preds, ScalarEvolution &SE)
Union predicates don't get cached so create a dummy set ID for it.
bool isAlwaysTrue() const override
Implementation of the SCEVPredicate interface.
This means that we are dealing with an entirely unknown SCEV value, and only represent it as its LLVM...
This class represents the value of vscale, as used when defining the length of a scalable vector or r...
This class represents an assumption made on an AddRec expression.
IncrementWrapFlags
Similar to SCEV::NoWrapFlags, but with slightly different semantics for FlagNUSW.
SCEVWrapPredicate(const FoldingSetNodeIDRef ID, const SCEVAddRecExpr *AR, IncrementWrapFlags Flags)
bool implies(const SCEVPredicate *N, ScalarEvolution &SE) const override
Returns true if this predicate implies N.
static SCEVWrapPredicate::IncrementWrapFlags setFlags(SCEVWrapPredicate::IncrementWrapFlags Flags, SCEVWrapPredicate::IncrementWrapFlags OnFlags)
void print(raw_ostream &OS, unsigned Depth=0) const override
Prints a textual representation of this predicate with an indentation of Depth.
bool isAlwaysTrue() const override
Returns true if the predicate is always true.
const SCEVAddRecExpr * getExpr() const
Implementation of the SCEVPredicate interface.
static SCEVWrapPredicate::IncrementWrapFlags clearFlags(SCEVWrapPredicate::IncrementWrapFlags Flags, SCEVWrapPredicate::IncrementWrapFlags OffFlags)
Convenient IncrementWrapFlags manipulation methods.
static SCEVWrapPredicate::IncrementWrapFlags getImpliedFlags(const SCEVAddRecExpr *AR, ScalarEvolution &SE)
Returns the set of SCEVWrapPredicate no wrap flags implied by a SCEVAddRecExpr.
IncrementWrapFlags getFlags() const
Returns the set assumed no overflow flags.
This class represents a zero extension of a small integer value to a larger integer value.
This class represents an analyzed expression in the program.
unsigned short getExpressionSize() const
SCEVNoWrapFlags NoWrapFlags
LLVM_ABI bool isOne() const
Return true if the expression is a constant one.
static constexpr auto FlagNUW
LLVM_ABI void computeAndSetCanonical(ScalarEvolution &SE)
Compute and set the canonical SCEV, by constructing a SCEV with the same operands,...
LLVM_ABI bool isZero() const
Return true if the expression is a constant zero.
const SCEV * CanonicalSCEV
Pointer to the canonical version of the SCEV, i.e.
static constexpr auto FlagAnyWrap
LLVM_ABI void dump() const
This method is used for debugging.
LLVM_ABI bool isAllOnesValue() const
Return true if the expression is a constant all-ones value.
LLVM_ABI bool isNonConstantNegative() const
Return true if the specified scev is negated, but not a constant.
static constexpr auto FlagNSW
LLVM_ABI ArrayRef< SCEVUse > operands() const
Return operands of this SCEV expression.
LLVM_ABI void print(raw_ostream &OS) const
Print out the internal representation of this scalar to the specified stream.
SCEV(const FoldingSetNodeIDRef ID, SCEVTypes SCEVTy, unsigned short ExpressionSize)
SCEVTypes getSCEVType() const
static constexpr auto FlagNW
LLVM_ABI Type * getType() const
Return the LLVM type of this SCEV expression.
Analysis pass that exposes the ScalarEvolution for a function.
LLVM_ABI ScalarEvolution run(Function &F, FunctionAnalysisManager &AM)
LLVM_ABI PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
LLVM_ABI PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
void print(raw_ostream &OS, const Module *=nullptr) const override
print - Print out the internal state of the pass.
bool runOnFunction(Function &F) override
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass.
void releaseMemory() override
releaseMemory() - This member can be implemented by a pass if it wants to be able to release its memo...
void verifyAnalysis() const override
verifyAnalysis() - This member can be implemented by a analysis pass to check state of analysis infor...
ScalarEvolutionWrapperPass()
static LLVM_ABI LoopGuards collect(const Loop *L, ScalarEvolution &SE)
Collect rewrite map for loop guards for loop L, together with flags indicating if NUW and NSW can be ...
LLVM_ABI const SCEV * rewrite(const SCEV *Expr) const
Try to apply the collected loop guards to Expr.
The main scalar evolution driver.
LLVM_ABI const SCEV * getUDivExpr(SCEVUse LHS, SCEVUse RHS)
Get a canonical unsigned division expression, or something simpler if possible.
const SCEV * getConstantMaxBackedgeTakenCount(const Loop *L)
When successful, this returns a SCEVConstant that is greater than or equal to (i.e.
static bool hasFlags(SCEV::NoWrapFlags Flags, SCEV::NoWrapFlags TestFlags)
const DataLayout & getDataLayout() const
Return the DataLayout associated with the module this SCEV instance is operating on.
LLVM_ABI bool isKnownNonNegative(const SCEV *S)
Test if the given expression is known to be non-negative.
LLVM_ABI bool isKnownOnEveryIteration(CmpPredicate Pred, const SCEVAddRecExpr *LHS, const SCEV *RHS)
Test if the condition described by Pred, LHS, RHS is known to be true on every iteration of the loop ...
LLVM_ABI const SCEV * getNegativeSCEV(const SCEV *V, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap)
Return the SCEV object corresponding to -V.
LLVM_ABI std::optional< LoopInvariantPredicate > getLoopInvariantExitCondDuringFirstIterationsImpl(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS, const Loop *L, const Instruction *CtxI, const SCEV *MaxIter)
LLVM_ABI const SCEV * getUDivCeilSCEV(const SCEV *N, const SCEV *D)
Compute ceil(N / D).
LLVM_ABI std::optional< LoopInvariantPredicate > getLoopInvariantExitCondDuringFirstIterations(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS, const Loop *L, const Instruction *CtxI, const SCEV *MaxIter)
If the result of the predicate LHS Pred RHS is loop invariant with respect to L at given Context duri...
LLVM_ABI Type * getWiderType(Type *Ty1, Type *Ty2) const
LLVM_ABI const SCEV * getAbsExpr(const SCEV *Op, bool IsNSW)
LLVM_ABI bool isKnownNonPositive(const SCEV *S)
Test if the given expression is known to be non-positive.
LLVM_ABI bool isKnownNegative(const SCEV *S)
Test if the given expression is known to be negative.
LLVM_ABI const SCEV * getPredicatedConstantMaxBackedgeTakenCount(const Loop *L, SmallVectorImpl< const SCEVPredicate * > &Predicates)
Similar to getConstantMaxBackedgeTakenCount, except it will add a set of SCEV predicates to Predicate...
LLVM_ABI const SCEV * removePointerBase(const SCEV *S)
Compute an expression equivalent to S - getPointerBase(S).
LLVM_ABI bool isLoopEntryGuardedByCond(const Loop *L, CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
Test whether entry to the loop is protected by a conditional between LHS and RHS.
LLVM_ABI bool isKnownNonZero(const SCEV *S)
Test if the given expression is known to be non-zero.
LLVM_ABI const SCEV * getURemExpr(SCEVUse LHS, SCEVUse RHS)
Represents an unsigned remainder expression based on unsigned division.
LLVM_ABI const SCEV * getSCEVAtScope(const SCEV *S, const Loop *L)
Return a SCEV expression for the specified value at the specified scope in the program.
LLVM_ABI const SCEV * getBackedgeTakenCount(const Loop *L, ExitCountKind Kind=Exact)
If the specified loop has a predictable backedge-taken count, return it, otherwise return a SCEVCould...
LLVM_ABI const SCEV * getSMinExpr(SCEVUse LHS, SCEVUse RHS)
LLVM_ABI void setNoWrapFlags(SCEVAddRecExpr *AddRec, SCEV::NoWrapFlags Flags)
Update no-wrap flags of an AddRec.
LLVM_ABI const SCEV * getUMaxFromMismatchedTypes(const SCEV *LHS, const SCEV *RHS)
Promote the operands to the wider of the types using zero-extension, and then perform a umax operatio...
const SCEV * getZero(Type *Ty)
Return a SCEV for the constant 0 of a specific type.
LLVM_ABI bool willNotOverflow(Instruction::BinaryOps BinOp, bool Signed, const SCEV *LHS, const SCEV *RHS, const Instruction *CtxI=nullptr)
Is operation BinOp between LHS and RHS provably does not have a signed/unsigned overflow (Signed)?
LLVM_ABI ExitLimit computeExitLimitFromCond(const Loop *L, Value *ExitCond, bool ExitIfTrue, bool ControlsOnlyExit, bool AllowPredicates=false)
Compute the number of times the backedge of the specified loop will execute if its exit condition wer...
LLVM_ABI const SCEV * getZeroExtendExprImpl(const SCEV *Op, Type *Ty, unsigned Depth=0)
LLVM_ABI const SCEV * getMinMaxExpr(SCEVTypes Kind, SmallVectorImpl< SCEVUse > &Operands)
LLVM_ABI const SCEVPredicate * getEqualPredicate(const SCEV *LHS, const SCEV *RHS)
LLVM_ABI unsigned getSmallConstantTripMultiple(const Loop *L, const SCEV *ExitCount)
Returns the largest constant divisor of the trip count as a normal unsigned value,...
LLVM_ABI uint64_t getTypeSizeInBits(Type *Ty) const
Return the size in bits of the specified type, for which isSCEVable must return true.
LLVM_ABI const SCEV * getConstant(ConstantInt *V)
LLVM_ABI const SCEV * getPredicatedBackedgeTakenCount(const Loop *L, SmallVectorImpl< const SCEVPredicate * > &Predicates)
Similar to getBackedgeTakenCount, except it will add a set of SCEV predicates to Predicates that are ...
LLVM_ABI const SCEV * getSCEV(Value *V)
Return a SCEV expression for the full generality of the specified expression.
LLVM_ABI const SCEV * getMinusSCEV(SCEVUse LHS, SCEVUse RHS, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Return LHS-RHS.
ConstantRange getSignedRange(const SCEV *S)
Determine the signed range for a particular SCEV.
LLVM_ABI const SCEV * getAddRecExpr(SCEVUse Start, SCEVUse Step, const Loop *L, SCEV::NoWrapFlags Flags)
Get an add recurrence expression for the specified loop.
LLVM_ABI const SCEV * getNoopOrSignExtend(const SCEV *V, Type *Ty)
Return a SCEV corresponding to a conversion of the input value to the specified type.
bool loopHasNoAbnormalExits(const Loop *L)
Return true if the loop has no abnormal exits.
LLVM_ABI const SCEV * getTripCountFromExitCount(const SCEV *ExitCount)
A version of getTripCountFromExitCount below which always picks an evaluation type which can not resu...
LLVM_ABI ScalarEvolution(Function &F, TargetLibraryInfo &TLI, AssumptionCache &AC, DominatorTree &DT, LoopInfo &LI)
const SCEV * getOne(Type *Ty)
Return a SCEV for the constant 1 of a specific type.
LLVM_ABI const SCEV * getTruncateOrNoop(const SCEV *V, Type *Ty)
Return a SCEV corresponding to a conversion of the input value to the specified type.
LLVM_ABI const SCEV * getLosslessPtrToIntExpr(const SCEV *Op)
LLVM_ABI const SCEV * getCastExpr(SCEVTypes Kind, const SCEV *Op, Type *Ty)
LLVM_ABI const SCEV * getSequentialMinMaxExpr(SCEVTypes Kind, SmallVectorImpl< SCEVUse > &Operands)
LLVM_ABI std::optional< bool > evaluatePredicateAt(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS, const Instruction *CtxI)
Check whether the condition described by Pred, LHS, and RHS is true or false in the given Context.
LLVM_ABI unsigned getSmallConstantMaxTripCount(const Loop *L, SmallVectorImpl< const SCEVPredicate * > *Predicates=nullptr)
Returns the upper bound of the loop trip count as a normal unsigned value.
LLVM_ABI const SCEV * getPtrToIntExpr(const SCEV *Op, Type *Ty)
LLVM_ABI bool isBackedgeTakenCountMaxOrZero(const Loop *L)
Return true if the backedge taken count is either the value returned by getConstantMaxBackedgeTakenCo...
LLVM_ABI void forgetLoop(const Loop *L)
This method should be called by the client when it has changed a loop in a way that may effect Scalar...
LLVM_ABI bool isLoopInvariant(const SCEV *S, const Loop *L)
Return true if the value of the given SCEV is unchanging in the specified loop.
LLVM_ABI bool isKnownPositive(const SCEV *S)
Test if the given expression is known to be positive.
LLVM_ABI bool SimplifyICmpOperands(CmpPredicate &Pred, SCEVUse &LHS, SCEVUse &RHS, unsigned Depth=0)
Simplify LHS and RHS in a comparison with predicate Pred.
APInt getUnsignedRangeMin(const SCEV *S)
Determine the min of the unsigned range for a particular SCEV.
LLVM_ABI const SCEV * getOffsetOfExpr(Type *IntTy, StructType *STy, unsigned FieldNo)
Return an expression for offsetof on the given field with type IntTy.
LLVM_ABI LoopDisposition getLoopDisposition(const SCEV *S, const Loop *L)
Return the "disposition" of the given SCEV with respect to the given loop.
LLVM_ABI bool containsAddRecurrence(const SCEV *S)
Return true if the SCEV is a scAddRecExpr or it contains scAddRecExpr.
LLVM_ABI const SCEV * getSignExtendExprImpl(const SCEV *Op, Type *Ty, unsigned Depth=0)
LLVM_ABI bool hasOperand(const SCEV *S, const SCEV *Op) const
Test whether the given SCEV has Op as a direct or indirect operand.
LLVM_ABI const SCEV * getZeroExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth=0)
LLVM_ABI bool isSCEVable(Type *Ty) const
Test if values of the given type are analyzable within the SCEV framework.
LLVM_ABI Type * getEffectiveSCEVType(Type *Ty) const
Return a type with the same bitwidth as the given type and which represents how SCEV will treat the g...
LLVM_ABI const SCEVPredicate * getComparePredicate(ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS)
LLVM_ABI bool haveSameSign(const SCEV *S1, const SCEV *S2)
Return true if we know that S1 and S2 must have the same sign.
LLVM_ABI const SCEV * getNotSCEV(const SCEV *V)
Return the SCEV object corresponding to ~V.
LLVM_ABI const SCEV * getElementCount(Type *Ty, ElementCount EC, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap)
LLVM_ABI bool instructionCouldExistWithOperands(const SCEV *A, const SCEV *B)
Return true if there exists a point in the program at which both A and B could be operands to the sam...
ConstantRange getUnsignedRange(const SCEV *S)
Determine the unsigned range for a particular SCEV.
LLVM_ABI void print(raw_ostream &OS) const
LLVM_ABI const SCEV * getPredicatedExitCount(const Loop *L, const BasicBlock *ExitingBlock, SmallVectorImpl< const SCEVPredicate * > *Predicates, ExitCountKind Kind=Exact)
Same as above except this uses the predicated backedge taken info and may require predicates.
static SCEV::NoWrapFlags clearFlags(SCEV::NoWrapFlags Flags, SCEV::NoWrapFlags OffFlags)
LLVM_ABI void forgetTopmostLoop(const Loop *L)
LLVM_ABI void forgetValue(Value *V)
This method should be called by the client when it has changed a value in a way that may effect its v...
APInt getSignedRangeMin(const SCEV *S)
Determine the min of the signed range for a particular SCEV.
LLVM_ABI const SCEV * getNoopOrAnyExtend(const SCEV *V, Type *Ty)
Return a SCEV corresponding to a conversion of the input value to the specified type.
LLVM_ABI void forgetBlockAndLoopDispositions(Value *V=nullptr)
Called when the client has changed the disposition of values in a loop or block.
LLVM_ABI const SCEV * getTruncateExpr(const SCEV *Op, Type *Ty, unsigned Depth=0)
LLVM_ABI const SCEV * getUMaxExpr(SCEVUse LHS, SCEVUse RHS)
static SCEV::NoWrapFlags maskFlags(SCEV::NoWrapFlags Flags, SCEV::NoWrapFlags Mask)
Convenient NoWrapFlags manipulation.
@ MonotonicallyDecreasing
@ MonotonicallyIncreasing
LLVM_ABI std::optional< LoopInvariantPredicate > getLoopInvariantPredicate(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS, const Loop *L, const Instruction *CtxI=nullptr)
If the result of the predicate LHS Pred RHS is loop invariant with respect to L, return a LoopInvaria...
LLVM_ABI const SCEV * getStoreSizeOfExpr(Type *IntTy, Type *StoreTy)
Return an expression for the store size of StoreTy that is type IntTy.
LLVM_ABI const SCEVPredicate * getWrapPredicate(const SCEVAddRecExpr *AR, SCEVWrapPredicate::IncrementWrapFlags AddedFlags)
LLVM_ABI bool isLoopBackedgeGuardedByCond(const Loop *L, CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
Test whether the backedge of the loop is protected by a conditional between LHS and RHS.
LLVM_ABI APInt getNonZeroConstantMultiple(const SCEV *S)
const SCEV * getMinusOne(Type *Ty)
Return a SCEV for the constant -1 of a specific type.
static SCEV::NoWrapFlags setFlags(SCEV::NoWrapFlags Flags, SCEV::NoWrapFlags OnFlags)
LLVM_ABI bool hasLoopInvariantBackedgeTakenCount(const Loop *L)
Return true if the specified loop has an analyzable loop-invariant backedge-taken count.
LLVM_ABI BlockDisposition getBlockDisposition(const SCEV *S, const BasicBlock *BB)
Return the "disposition" of the given SCEV with respect to the given block.
LLVM_ABI const SCEV * getNoopOrZeroExtend(const SCEV *V, Type *Ty)
Return a SCEV corresponding to a conversion of the input value to the specified type.
LLVM_ABI bool invalidate(Function &F, const PreservedAnalyses &PA, FunctionAnalysisManager::Invalidator &Inv)
LLVM_ABI const SCEV * getUMinFromMismatchedTypes(const SCEV *LHS, const SCEV *RHS, bool Sequential=false)
Promote the operands to the wider of the types using zero-extension, and then perform a umin operatio...
LLVM_ABI bool loopIsFiniteByAssumption(const Loop *L)
Return true if this loop is finite by assumption.
LLVM_ABI const SCEV * getExistingSCEV(Value *V)
Return an existing SCEV for V if there is one, otherwise return nullptr.
LLVM_ABI APInt getConstantMultiple(const SCEV *S, const Instruction *CtxI=nullptr)
Returns the max constant multiple of S.
LoopDisposition
An enum describing the relationship between a SCEV and a loop.
@ LoopComputable
The SCEV varies predictably with the loop.
@ LoopVariant
The SCEV is loop-variant (unknown).
@ LoopInvariant
The SCEV is loop-invariant.
friend class SCEVCallbackVH
LLVM_ABI bool isKnownMultipleOf(const SCEV *S, uint64_t M, SmallVectorImpl< const SCEVPredicate * > &Assumptions)
Check that S is a multiple of M.
LLVM_ABI const SCEV * getAnyExtendExpr(const SCEV *Op, Type *Ty)
getAnyExtendExpr - Return a SCEV for the given operand extended with unspecified bits out to the give...
LLVM_ABI bool isKnownToBeAPowerOfTwo(const SCEV *S, bool OrZero=false, bool OrNegative=false)
Test if the given expression is known to be a power of 2.
LLVM_ABI std::optional< SCEV::NoWrapFlags > getStrengthenedNoWrapFlagsFromBinOp(const OverflowingBinaryOperator *OBO)
Parse NSW/NUW flags from add/sub/mul IR binary operation Op into SCEV no-wrap flags,...
LLVM_ABI void forgetLcssaPhiWithNewPredecessor(Loop *L, PHINode *V)
Forget LCSSA phi node V of loop L to which a new predecessor was added, such that it may no longer be...
LLVM_ABI bool containsUndefs(const SCEV *S) const
Return true if the SCEV expression contains an undef value.
LLVM_ABI std::optional< MonotonicPredicateType > getMonotonicPredicateType(const SCEVAddRecExpr *LHS, ICmpInst::Predicate Pred)
If, for all loop invariant X, the predicate "LHS `Pred` X" is monotonically increasing or decreasing,...
LLVM_ABI const SCEV * getCouldNotCompute()
LLVM_ABI const SCEV * getMulExpr(SmallVectorImpl< SCEVUse > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical multiply expression, or something simpler if possible.
LLVM_ABI bool isAvailableAtLoopEntry(const SCEV *S, const Loop *L)
Determine if the SCEV can be evaluated at loop's entry.
LLVM_ABI uint32_t getMinTrailingZeros(const SCEV *S, const Instruction *CtxI=nullptr)
Determine the minimum number of zero bits that S is guaranteed to end in (at every loop iteration).
BlockDisposition
An enum describing the relationship between a SCEV and a basic block.
@ DominatesBlock
The SCEV dominates the block.
@ ProperlyDominatesBlock
The SCEV properly dominates the block.
@ DoesNotDominateBlock
The SCEV does not dominate the block.
LLVM_ABI const SCEV * getExitCount(const Loop *L, const BasicBlock *ExitingBlock, ExitCountKind Kind=Exact)
Return the number of times the backedge executes before the given exit would be taken; if not exactly...
LLVM_ABI const SCEV * getSignExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth=0)
LLVM_ABI void getPoisonGeneratingValues(SmallPtrSetImpl< const Value * > &Result, const SCEV *S)
Return the set of Values that, if poison, will definitively result in S being poison as well.
LLVM_ABI void forgetLoopDispositions()
Called when the client has changed the disposition of values in this loop.
LLVM_ABI const SCEV * getVScale(Type *Ty)
LLVM_ABI unsigned getSmallConstantTripCount(const Loop *L)
Returns the exact trip count of the loop if we can compute it, and the result is a small constant.
LLVM_ABI bool hasComputableLoopEvolution(const SCEV *S, const Loop *L)
Return true if the given SCEV changes value in a known way in the specified loop.
LLVM_ABI const SCEV * getPointerBase(const SCEV *V)
Transitively follow the chain of pointer-type operands until reaching a SCEV that does not have a sin...
LLVM_ABI void forgetAllLoops()
LLVM_ABI bool dominates(const SCEV *S, const BasicBlock *BB)
Return true if elements that makes up the given SCEV dominate the specified basic block.
APInt getUnsignedRangeMax(const SCEV *S)
Determine the max of the unsigned range for a particular SCEV.
LLVM_ABI const SCEV * getAddExpr(SmallVectorImpl< SCEVUse > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical add expression, or something simpler if possible.
ExitCountKind
The terms "backedge taken count" and "exit count" are used interchangeably to refer to the number of ...
@ SymbolicMaximum
An expression which provides an upper bound on the exact trip count.
@ ConstantMaximum
A constant which provides an upper bound on the exact trip count.
@ Exact
An expression exactly describing the number of times the backedge has executed when a loop is exited.
LLVM_ABI bool isKnownPredicate(CmpPredicate Pred, SCEVUse LHS, SCEVUse RHS)
Test if the given expression is known to satisfy the condition described by Pred, LHS,...
LLVM_ABI const SCEV * applyLoopGuards(const SCEV *Expr, const Loop *L)
Try to apply information from loop guards for L to Expr.
LLVM_ABI const SCEV * getPtrToAddrExpr(const SCEV *Op)
LLVM_ABI const SCEVAddRecExpr * convertSCEVToAddRecWithPredicates(const SCEV *S, const Loop *L, SmallVectorImpl< const SCEVPredicate * > &Preds)
Tries to convert the S expression to an AddRec expression, adding additional predicates to Preds as r...
LLVM_ABI const SCEV * getSMaxExpr(SCEVUse LHS, SCEVUse RHS)
LLVM_ABI const SCEV * getElementSize(Instruction *Inst)
Return the size of an element read or written by Inst.
LLVM_ABI const SCEV * getSizeOfExpr(Type *IntTy, TypeSize Size)
Return an expression for a TypeSize.
LLVM_ABI std::optional< bool > evaluatePredicate(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
Check whether the condition described by Pred, LHS, and RHS is true or false.
LLVM_ABI const SCEV * getUnknown(Value *V)
LLVM_ABI std::optional< std::pair< const SCEV *, SmallVector< const SCEVPredicate *, 3 > > > createAddRecFromPHIWithCasts(const SCEVUnknown *SymbolicPHI)
Checks if SymbolicPHI can be rewritten as an AddRecExpr under some Predicates.
LLVM_ABI const SCEV * getTruncateOrZeroExtend(const SCEV *V, Type *Ty, unsigned Depth=0)
Return a SCEV corresponding to a conversion of the input value to the specified type.
LLVM_ABI bool isKnownViaInduction(CmpPredicate Pred, SCEVUse LHS, SCEVUse RHS)
We'd like to check the predicate on every iteration of the most dominated loop between loops used in ...
LLVM_ABI std::optional< APInt > computeConstantDifference(const SCEV *LHS, const SCEV *RHS)
Compute LHS - RHS and returns the result as an APInt if it is a constant, and std::nullopt if it isn'...
LLVM_ABI bool properlyDominates(const SCEV *S, const BasicBlock *BB)
Return true if elements that makes up the given SCEV properly dominate the specified basic block.
LLVM_ABI const SCEV * getUDivExactExpr(SCEVUse LHS, SCEVUse RHS)
Get a canonical unsigned division expression, or something simpler if possible.
LLVM_ABI const SCEV * rewriteUsingPredicate(const SCEV *S, const Loop *L, const SCEVPredicate &A)
Re-writes the SCEV according to the Predicates in A.
LLVM_ABI std::pair< const SCEV *, const SCEV * > SplitIntoInitAndPostInc(const Loop *L, const SCEV *S)
Splits SCEV expression S into two SCEVs.
LLVM_ABI bool canReuseInstruction(const SCEV *S, Instruction *I, SmallVectorImpl< Instruction * > &DropPoisonGeneratingInsts)
Check whether it is poison-safe to represent the expression S using the instruction I.
LLVM_ABI bool isKnownPredicateAt(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS, const Instruction *CtxI)
Test if the given expression is known to satisfy the condition described by Pred, LHS,...
LLVM_ABI const SCEV * getPredicatedSymbolicMaxBackedgeTakenCount(const Loop *L, SmallVectorImpl< const SCEVPredicate * > &Predicates)
Similar to getSymbolicMaxBackedgeTakenCount, except it will add a set of SCEV predicates to Predicate...
LLVM_ABI ~ScalarEvolution()
LLVM_ABI const SCEV * getGEPExpr(GEPOperator *GEP, ArrayRef< SCEVUse > IndexExprs)
Returns an expression for a GEP.
LLVM_ABI const SCEV * getUMinExpr(SCEVUse LHS, SCEVUse RHS, bool Sequential=false)
LLVM_ABI void registerUser(const SCEV *User, ArrayRef< const SCEV * > Ops)
Notify this ScalarEvolution that User directly uses SCEVs in Ops.
LLVM_ABI bool isBasicBlockEntryGuardedByCond(const BasicBlock *BB, CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
Test whether entry to the basic block is protected by a conditional between LHS and RHS.
LLVM_ABI const SCEV * getTruncateOrSignExtend(const SCEV *V, Type *Ty, unsigned Depth=0)
Return a SCEV corresponding to a conversion of the input value to the specified type.
LLVM_ABI bool containsErasedValue(const SCEV *S) const
Return true if the SCEV expression contains a Value that has been optimised out and is now a nullptr.
const SCEV * getSymbolicMaxBackedgeTakenCount(const Loop *L)
When successful, this returns a SCEV that is greater than or equal to (i.e.
APInt getSignedRangeMax(const SCEV *S)
Determine the max of the signed range for a particular SCEV.
LLVM_ABI void verify() const
LLVMContext & getContext() const
Implements a dense probed hash-table based set with some number of buckets stored inline.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void reserve(size_type N)
iterator erase(const_iterator CI)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
iterator insert(iterator I, T &&Elt)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
Represent a constant reference to a string, i.e.
Used to lazily calculate structure layout information for a target machine, based on the DataLayout s...
TypeSize getElementOffset(unsigned Idx) const
TypeSize getSizeInBits() const
Class to represent struct types.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
The instances of the Type class are immutable: once they are created, they are never changed.
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
bool isPointerTy() const
True if this is an instance of PointerType.
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
static LLVM_ABI IntegerType * getInt1Ty(LLVMContext &C)
bool isIntOrPtrTy() const
Return true if this is an integer type or a pointer type.
bool isIntegerTy() const
True if this is an instance of IntegerType.
static LLVM_ABI IntegerType * getIntNTy(LLVMContext &C, unsigned N)
A Use represents the edge between a Value definition and its users.
Value * getOperand(unsigned i) const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
LLVMContext & getContext() const
All values hold a context through their type.
unsigned getValueID() const
Return an ID for the concrete type of this object.
LLVM_ABI void printAsOperand(raw_ostream &O, bool PrintType=true, const Module *M=nullptr) const
Print the name of this Value out to the specified raw_ostream.
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
This class implements an extremely fast bulk output stream that can only output to a stream.
raw_ostream & indent(unsigned NumSpaces)
indent - Insert 'NumSpaces' spaces.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
const APInt & smin(const APInt &A, const APInt &B)
Determine the smaller of two APInts considered to be signed.
const APInt & smax(const APInt &A, const APInt &B)
Determine the larger of two APInts considered to be signed.
const APInt & umin(const APInt &A, const APInt &B)
Determine the smaller of two APInts considered to be unsigned.
LLVM_ABI std::optional< APInt > SolveQuadraticEquationWrap(APInt A, APInt B, APInt C, unsigned RangeWidth)
Let q(n) = An^2 + Bn + C, and BW = bit width of the value range (e.g.
const APInt & umax(const APInt &A, const APInt &B)
Determine the larger of two APInts considered to be unsigned.
LLVM_ABI APInt GreatestCommonDivisor(APInt A, APInt B)
Compute GCD of two unsigned APInt values.
constexpr bool any(E Val)
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ C
The default llvm calling convention, compatible with C.
int getMinValue(MCInstrInfo const &MCII, MCInst const &MCI)
Return the minimum value of an extendable operand.
@ BasicBlock
Various leaf nodes.
LLVM_ABI Function * getDeclarationIfExists(const Module *M, ID id)
Look up the Function declaration of the intrinsic id in the Module M and return it if it exists.
Predicate
Predicate - These are "(BI << 5) | BO" for various predicates.
match_combine_or< Ty... > m_CombineOr(const Ty &...Ps)
Combine pattern matchers matching any of Ps patterns.
BinaryOp_match< LHS, RHS, Instruction::AShr > m_AShr(const LHS &L, const RHS &R)
ap_match< APInt > m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
bool match(Val *V, const Pattern &P)
IntrinsicID_match m_Intrinsic()
Match intrinsic calls like this: m_Intrinsic<Intrinsic::fabs>(m_Value(X))
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
auto m_BasicBlock()
Match an arbitrary basic block value and ignore it.
ExtractValue_match< Ind, Val_t > m_ExtractValue(const Val_t &V)
Match a single index ExtractValue instruction.
auto m_Value()
Match an arbitrary value and ignore it.
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
match_bind< WithOverflowInst > m_WithOverflowInst(WithOverflowInst *&I)
Match a with overflow intrinsic, capturing it if we match.
BinaryOp_match< LHS, RHS, Instruction::SDiv > m_SDiv(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::LShr > m_LShr(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::Shl > m_Shl(const LHS &L, const RHS &R)
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
brc_match< Cond_t, match_bind< BasicBlock >, match_bind< BasicBlock > > m_Br(const Cond_t &C, BasicBlock *&T, BasicBlock *&F)
auto m_ConstantInt()
Match an arbitrary ConstantInt and ignore it.
bind_cst_ty m_scev_APInt(const APInt *&C)
Match an SCEV constant and bind it to an APInt.
cst_pred_ty< is_all_ones > m_scev_AllOnes()
Match an integer with all bits set.
SCEVUnaryExpr_match< SCEVZeroExtendExpr, Op0_t > m_scev_ZExt(const Op0_t &Op0)
is_undef_or_poison m_scev_UndefOrPoison()
Match an SCEVUnknown wrapping undef or poison.
cst_pred_ty< is_one > m_scev_One()
Match an integer 1.
specificloop_ty m_SpecificLoop(const Loop *L)
SCEVUnaryExpr_match< SCEVSignExtendExpr, Op0_t > m_scev_SExt(const Op0_t &Op0)
match_bind< const SCEVMulExpr > m_scev_Mul(const SCEVMulExpr *&V)
cst_pred_ty< is_zero > m_scev_Zero()
Match an integer 0.
SCEVUnaryExpr_match< SCEVTruncateExpr, Op0_t > m_scev_Trunc(const Op0_t &Op0)
bool match(const SCEV *S, const Pattern &P)
SCEVBinaryExpr_match< SCEVUDivExpr, Op0_t, Op1_t > m_scev_UDiv(const Op0_t &Op0, const Op1_t &Op1)
specificscev_ty m_scev_Specific(const SCEV *S)
Match if we have a specific specified SCEV.
SCEVAffineAddRec_match< Op0_t, Op1_t, match_isa< const Loop > > m_scev_AffineAddRec(const Op0_t &Op0, const Op1_t &Op1)
match_bind< const SCEVUnknown > m_SCEVUnknown(const SCEVUnknown *&V)
SCEVBinaryExpr_match< SCEVMulExpr, Op0_t, Op1_t, SCEV::FlagNUW, true > m_scev_c_NUWMul(const Op0_t &Op0, const Op1_t &Op1)
match_bind< const SCEVAddExpr > m_scev_Add(const SCEVAddExpr *&V)
SCEVBinaryExpr_match< SCEVMulExpr, Op0_t, Op1_t, SCEV::FlagAnyWrap, true > m_scev_c_Mul(const Op0_t &Op0, const Op1_t &Op1)
SCEVBinaryExpr_match< SCEVSMaxExpr, Op0_t, Op1_t > m_scev_SMax(const Op0_t &Op0, const Op1_t &Op1)
SCEVURem_match< Op0_t, Op1_t > m_scev_URem(Op0_t LHS, Op1_t RHS, ScalarEvolution &SE)
Match the mathematical pattern A - (A / B) * B, where A and B can be arbitrary expressions.
@ Valid
The data is already valid.
initializer< Ty > init(const Ty &Val)
LocationClass< Ty > location(Ty &L)
@ Switch
The "resume-switch" lowering, where there are separate resume and destroy functions that are shared b...
NodeAddr< PhiNode * > Phi
friend class Instruction
Iterator for Instructions in a `BasicBlock.
This is an optimization pass for GlobalISel generic memory operations.
void visitAll(const SCEV *Root, SV &Visitor)
Use SCEVTraversal to visit all nodes in the given expression tree.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
FunctionAddr VTableAddr Value
LLVM_ATTRIBUTE_ALWAYS_INLINE DynamicAPInt gcd(const DynamicAPInt &A, const DynamicAPInt &B)
void stable_sort(R &&Range)
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
SaveAndRestore(T &) -> SaveAndRestore< T >
Printable print(const GCNRegPressure &RP, const GCNSubtarget *ST=nullptr, unsigned DynamicVGPRBlockSize=0)
LLVM_ABI bool canCreatePoison(const Operator *Op, bool ConsiderFlagsAndMetadata=true)
LLVM_ABI bool mustTriggerUB(const Instruction *I, const SmallPtrSetImpl< const Value * > &KnownPoison)
Return true if the given instruction must trigger undefined behavior when I is executed with any oper...
LLVM_ABI bool canConstantFoldCallTo(const CallBase *Call, const Function *F)
canConstantFoldCallTo - Return true if its even possible to fold a call to the specified function.
InterleavedRange< Range > interleaved(const Range &R, StringRef Separator=", ", StringRef Prefix="", StringRef Suffix="")
Output range R as a sequence of interleaved elements.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
LLVM_ABI bool verifyFunction(const Function &F, raw_ostream *OS=nullptr)
Check a function for errors, useful for use when debugging a pass.
auto successors(const MachineBasicBlock *BB)
scope_exit(Callable) -> scope_exit< Callable >
constexpr from_range_t from_range
auto dyn_cast_if_present(const Y &Val)
dyn_cast_if_present<X> - Functionally identical to dyn_cast, except that a null (or none in the case ...
bool set_is_subset(const S1Ty &S1, const S2Ty &S2)
set_is_subset(A, B) - Return true iff A in B
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
constexpr bool isUIntN(unsigned N, uint64_t x)
Checks if an unsigned integer fits into the given (dynamic) bit width.
LLVM_ABI Constant * ConstantFoldCompareInstOperands(unsigned Predicate, Constant *LHS, Constant *RHS, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const Instruction *I=nullptr)
Attempt to constant fold a compare instruction (icmp/fcmp) with the specified operands.
auto uninitialized_copy(R &&Src, IterTy Dst)
bool isa_and_nonnull(const Y &Val)
LLVM_ABI ConstantRange getConstantRangeFromMetadata(const MDNode &RangeMD)
Parse out a conservative ConstantRange from !range metadata.
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
LLVM_ABI Value * simplifyInstruction(Instruction *I, const SimplifyQuery &Q)
See if we can compute a simplified version of this instruction.
LLVM_ABI bool isOverflowIntrinsicNoWrap(const WithOverflowInst *WO, const DominatorTree &DT)
Returns true if the arithmetic part of the WO 's result is used only along the paths control dependen...
DomTreeNodeBase< BasicBlock > DomTreeNode
LLVM_ABI bool matchSimpleRecurrence(const PHINode *P, BinaryOperator *&BO, Value *&Start, Value *&Step)
Attempt to match a simple first order recurrence cycle of the form: iv = phi Ty [Start,...
auto dyn_cast_or_null(const Y &Val)
void erase(Container &C, ValueType V)
Wrapper function to remove a value from a container:
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
iterator_range< pointee_iterator< WrappedIteratorT > > make_pointee_range(RangeT &&Range)
auto reverse(ContainerTy &&C)
LLVM_ABI bool isMustProgress(const Loop *L)
Return true if this loop can be assumed to make progress.
LLVM_ABI bool impliesPoison(const Value *ValAssumedPoison, const Value *V)
Return true if V is poison given that ValAssumedPoison is already poison.
LLVM_ABI bool isFinite(const Loop *L)
Return true if this loop can be assumed to run for a finite number of iterations.
LLVM_ABI void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
unsigned short computeExpressionSize(ArrayRef< SCEVUse > Args)
LLVM_ABI bool programUndefinedIfPoison(const Instruction *Inst)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
bool isPointerTy(const Type *T)
FunctionAddr VTableAddr Count
LLVM_ABI ConstantRange getVScaleRange(const Function *F, unsigned BitWidth)
Determine the possible constant range of vscale with the given bit width, based on the vscale_range f...
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
LLVM_ATTRIBUTE_VISIBILITY_DEFAULT AnalysisKey InnerAnalysisManagerProxy< AnalysisManagerT, IRUnitT, ExtraArgTs... >::Key
LLVM_ABI bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
LLVM_ABI bool propagatesPoison(const Use &PoisonOp)
Return true if PoisonOp's user yields poison or raises UB if its operand PoisonOp is poison.
@ UMin
Unsigned integer min implemented in terms of select(cmp()).
@ Mul
Product of integers.
@ SMax
Signed integer max implemented in terms of select(cmp()).
@ SMin
Signed integer min implemented in terms of select(cmp()).
@ UMax
Unsigned integer max implemented in terms of select(cmp()).
auto count(R &&Range, const E &Element)
Wrapper function around std::count to count the number of times an element Element occurs in the give...
DWARFExpression::Operation Op
auto max_element(R &&Range)
Provide wrappers to std::max_element which take ranges instead of having to pass begin/end explicitly...
raw_ostream & operator<<(raw_ostream &OS, const APFixedPoint &FX)
ArrayRef(const T &OneElt) -> ArrayRef< T >
LLVM_ABI unsigned ComputeNumSignBits(const Value *Op, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Return the number of times the sign bit of the register is replicated into the other bits.
constexpr unsigned BitWidth
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI bool isGuaranteedToTransferExecutionToSuccessor(const Instruction *I)
Return true if this function can prove that the instruction I will always transfer execution to one o...
auto count_if(R &&Range, UnaryPredicate P)
Wrapper function around std::count_if to count the number of times an element satisfying a given pred...
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
constexpr bool isIntN(unsigned N, int64_t x)
Checks if an signed integer fits into the given (dynamic) bit width.
auto predecessors(const MachineBasicBlock *BB)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
iterator_range< df_iterator< T > > depth_first(const T &G)
auto seq(T Begin, T End)
Iterate over an integral type from Begin up to - but not including - End.
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
LLVM_ABI bool isGuaranteedNotToBePoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Returns true if V cannot be poison, but may be undef.
LLVM_ABI Constant * ConstantFoldInstOperands(const Instruction *I, ArrayRef< Constant * > Ops, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, bool AllowNonDeterministic=true)
ConstantFoldInstOperands - Attempt to constant fold an instruction with the specified operands.
SCEVUseT< const SCEV * > SCEVUse
bool SCEVExprContains(const SCEV *Root, PredTy Pred)
Return true if any node in Root satisfies the predicate Pred.
Implement std::hash so that hash_code can be used in STL containers.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
A special type used by analysis passes to provide an address that identifies that particular analysis...
static KnownBits makeConstant(const APInt &C)
Create known bits from a known constant.
bool isNonNegative() const
Returns true if this value is known to be non-negative.
static LLVM_ABI KnownBits ashr(const KnownBits &LHS, const KnownBits &RHS, bool ShAmtNonZero=false, bool Exact=false)
Compute known bits for ashr(LHS, RHS).
unsigned getBitWidth() const
Get the bit width of this value.
static LLVM_ABI KnownBits lshr(const KnownBits &LHS, const KnownBits &RHS, bool ShAmtNonZero=false, bool Exact=false)
Compute known bits for lshr(LHS, RHS).
KnownBits zextOrTrunc(unsigned BitWidth) const
Return known bits for a zero extension or truncation of the value we're tracking.
APInt getMaxValue() const
Return the maximal unsigned value possible given these KnownBits.
APInt getMinValue() const
Return the minimal unsigned value possible given these KnownBits.
bool isNegative() const
Returns true if this value is known to be negative.
static LLVM_ABI KnownBits shl(const KnownBits &LHS, const KnownBits &RHS, bool NUW=false, bool NSW=false, bool ShAmtNonZero=false)
Compute known bits for shl(LHS, RHS).
An object of this class is returned by queries that could not be answered.
LLVM_ABI SCEVCouldNotCompute()
static LLVM_ABI bool classof(const SCEV *S)
Methods for support type inquiry through isa, cast, and dyn_cast:
This class defines a simple visitor class that may be used for various SCEV analysis purposes.
A utility class that uses RAII to save and restore the value of a variable.
Information about the number of loop iterations for which a loop exit's branch condition evaluates to...
LLVM_ABI ExitLimit(const SCEV *E)
Construct either an exact exit limit from a constant, or an unknown one from a SCEVCouldNotCompute.
const SCEV * ExactNotTaken
const SCEV * SymbolicMaxNotTaken
SmallVector< const SCEVPredicate *, 4 > Predicates
A vector of predicate guards for this ExitLimit.
const SCEV * ConstantMaxNotTaken