83#include "llvm/Config/llvm-config.h"
138#define DEBUG_TYPE "scalar-evolution"
141 "Number of loop exits with predictable exit counts");
143 "Number of loop exits without predictable exit counts");
145 "Number of loops with trip counts computed by force");
147#ifdef EXPENSIVE_CHECKS
155 cl::desc(
"Maximum number of iterations SCEV will "
156 "symbolically execute a constant "
162 cl::desc(
"Verify ScalarEvolution's backedge taken counts (slow)"));
165 cl::desc(
"Enable stricter verification with -verify-scev is passed"));
169 cl::desc(
"Verify IR correctness when making sensitive SCEV queries (slow)"),
174 cl::desc(
"Threshold for inlining multiplication operands into a SCEV"),
179 cl::desc(
"Threshold for inlining addition operands into a SCEV"),
183 "scalar-evolution-max-scev-compare-depth",
cl::Hidden,
184 cl::desc(
"Maximum depth of recursive SCEV complexity comparisons"),
188 "scalar-evolution-max-scev-operations-implication-depth",
cl::Hidden,
189 cl::desc(
"Maximum depth of recursive SCEV operations implication analysis"),
193 "scalar-evolution-max-value-compare-depth",
cl::Hidden,
194 cl::desc(
"Maximum depth of recursive value complexity comparisons"),
199 cl::desc(
"Maximum depth of recursive arithmetics"),
203 "scalar-evolution-max-constant-evolving-depth",
cl::Hidden,
208 cl::desc(
"Maximum depth of recursive SExt/ZExt/Trunc"),
213 cl::desc(
"Max coefficients in AddRec during evolving"),
218 cl::desc(
"Size of the expression which is considered huge"),
223 cl::desc(
"Threshold for switching to iteratively computing SCEV ranges"),
227 "scalar-evolution-max-loop-guard-collection-depth",
cl::Hidden,
228 cl::desc(
"Maximum depth for recursive loop guard collection"),
cl::init(1));
233 cl::desc(
"When printing analysis, include information on every instruction"));
236 "scalar-evolution-use-expensive-range-sharpening",
cl::Hidden,
238 cl::desc(
"Use more powerful methods of sharpening expression ranges. May "
239 "be costly in terms of compile time"));
242 "scalar-evolution-max-scc-analysis-depth",
cl::Hidden,
243 cl::desc(
"Maximum amount of nodes to process while searching SCEVUnknown "
244 "Phi strongly connected components"),
249 cl::desc(
"Handle <= and >= in finite loops"),
253 "scalar-evolution-use-context-for-no-wrap-flag-strenghening",
cl::Hidden,
254 cl::desc(
"Infer nuw/nsw flags using context where suitable"),
265#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
283 OS <<
"(ptrtoint " << *
Op->getType() <<
" " << *
Op <<
" to "
284 << *PtrToInt->
getType() <<
")";
290 OS <<
"(trunc " << *
Op->getType() <<
" " << *
Op <<
" to "
297 OS <<
"(zext " << *
Op->getType() <<
" " << *
Op <<
" to "
304 OS <<
"(sext " << *
Op->getType() <<
" " << *
Op <<
" to "
333 const char *OpStr =
nullptr;
346 OpStr =
" umin_seq ";
370 OS <<
"(" << *UDiv->
getLHS() <<
" /u " << *UDiv->
getRHS() <<
")";
377 OS <<
"***COULDNOTCOMPUTE***";
453 if (!
Mul)
return false;
457 if (!SC)
return false;
475 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
return S;
477 UniqueSCEVs.InsertNode(S, IP);
491 ConstantInt::get(ITy, V, isSigned,
true));
499 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
502 UniqueSCEVs.InsertNode(S, IP);
522 "Must be a non-bit-width-changing pointer-to-integer cast!");
534 "Cannot truncate non-integer value!");
541 "Cannot zero extend non-integer value!");
548 "Cannot sign extend non-integer value!");
553 SE->forgetMemoizedResults(
this);
556 SE->UniqueSCEVs.RemoveNode(
this);
562void SCEVUnknown::allUsesReplacedWith(
Value *New) {
564 SE->forgetMemoizedResults(
this);
567 SE->UniqueSCEVs.RemoveNode(
this);
589 if (LIsPointer != RIsPointer)
590 return (
int)LIsPointer - (int)RIsPointer;
595 return (
int)LID - (int)RID;
600 unsigned LArgNo = LA->getArgNo(), RArgNo =
RA->getArgNo();
601 return (
int)LArgNo - (int)RArgNo;
607 if (
auto L = LGV->getLinkage() - RGV->getLinkage())
610 const auto IsGVNameSemantic = [&](
const GlobalValue *GV) {
611 auto LT = GV->getLinkage();
618 if (IsGVNameSemantic(LGV) && IsGVNameSemantic(RGV))
619 return LGV->getName().compare(RGV->getName());
630 if (LParent != RParent) {
633 if (LDepth != RDepth)
634 return (
int)LDepth - (int)RDepth;
638 unsigned LNumOps = LInst->getNumOperands(),
639 RNumOps = RInst->getNumOperands();
640 if (LNumOps != RNumOps)
641 return (
int)LNumOps - (int)RNumOps;
643 for (
unsigned Idx :
seq(LNumOps)) {
645 RInst->getOperand(Idx),
Depth + 1);
659static std::optional<int>
669 return (
int)LType - (int)RType;
694 unsigned LBitWidth = LA.
getBitWidth(), RBitWidth =
RA.getBitWidth();
695 if (LBitWidth != RBitWidth)
696 return (
int)LBitWidth - (int)RBitWidth;
697 return LA.
ult(
RA) ? -1 : 1;
703 return LTy->getBitWidth() - RTy->getBitWidth();
714 if (LLoop != RLoop) {
716 assert(LHead != RHead &&
"Two loops share the same header?");
720 "No dominance between recurrences used by one SCEV?");
743 unsigned LNumOps = LOps.
size(), RNumOps = ROps.
size();
744 if (LNumOps != RNumOps)
745 return (
int)LNumOps - (int)RNumOps;
747 for (
unsigned i = 0; i != LNumOps; ++i) {
772 if (
Ops.size() < 2)
return;
777 return Complexity && *Complexity < 0;
779 if (
Ops.size() == 2) {
783 if (IsLessComplex(
RHS,
LHS))
790 return IsLessComplex(
LHS,
RHS);
797 for (
unsigned i = 0, e =
Ops.size(); i != e-2; ++i) {
803 for (
unsigned j = i+1; j != e &&
Ops[j]->getSCEVType() == Complexity; ++j) {
808 if (i == e-2)
return;
830template <
typename FoldT,
typename IsIdentityT,
typename IsAbsorberT>
834 IsIdentityT IsIdentity, IsAbsorberT IsAbsorber) {
836 for (
unsigned Idx = 0; Idx <
Ops.size();) {
844 Ops.erase(
Ops.begin() + Idx);
851 assert(Folded &&
"Must have folded value");
855 if (Folded && IsAbsorber(Folded->
getAPInt()))
859 if (Folded && !IsIdentity(Folded->
getAPInt()))
860 Ops.insert(
Ops.begin(), Folded);
862 return Ops.size() == 1 ?
Ops[0] :
nullptr;
937 APInt OddFactorial(W, 1);
939 for (
unsigned i = 3; i <= K; ++i) {
942 OddFactorial *= (i >> TwoFactors);
946 unsigned CalculationBits = W +
T;
960 for (
unsigned i = 1; i != K; ++i) {
993 for (
unsigned i = 1, e =
Operands.size(); i != e; ++i) {
1021 ConversionFn CreatePtrCast;
1025 ConversionFn CreatePtrCast)
1026 : Base(
SE), TargetTy(TargetTy), CreatePtrCast(
std::
move(CreatePtrCast)) {}
1029 Type *TargetTy, ConversionFn CreatePtrCast) {
1031 return Rewriter.visit(Scev);
1067 "Should only reach pointer-typed SCEVUnknown's.");
1072 return SE.getZero(TargetTy);
1073 return CreatePtrCast(Expr);
1078 assert(
Op->getType()->isPointerTy() &&
"Op must be a pointer");
1097 Op, *
this, IntPtrTy, [
this, IntPtrTy](
const SCEVUnknown *U) {
1102 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
1104 SCEV *S =
new (SCEVAllocator)
1106 UniqueSCEVs.InsertNode(S, IP);
1108 return static_cast<const SCEV *
>(S);
1111 "We must have succeeded in sinking the cast, "
1112 "and ending up with an integer-typed expression!");
1117 assert(Ty->isIntegerTy() &&
"Target type must be an integer type!");
1129 "This is not a truncating conversion!");
1131 "This is not a conversion to a SCEVable type!");
1132 assert(!
Op->getType()->isPointerTy() &&
"Can't truncate pointer!");
1140 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
return S;
1162 UniqueSCEVs.InsertNode(S, IP);
1174 unsigned numTruncs = 0;
1175 for (
unsigned i = 0, e = CommOp->getNumOperands(); i != e && numTruncs < 2;
1183 if (numTruncs < 2) {
1193 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
1200 for (
const SCEV *
Op : AddRec->operands())
1215 UniqueSCEVs.InsertNode(S, IP);
1255struct ExtendOpTraitsBase {
1256 typedef const SCEV *(ScalarEvolution::*GetExtendExprTy)(
const SCEV *,
Type *,
1261template <
typename ExtendOp>
struct ExtendOpTraits {
1277 static const GetExtendExprTy GetExtendExpr;
1279 static const SCEV *getOverflowLimitForStep(
const SCEV *Step,
1280 ICmpInst::Predicate *Pred,
1281 ScalarEvolution *SE) {
1286const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits<
1293 static const GetExtendExprTy GetExtendExpr;
1295 static const SCEV *getOverflowLimitForStep(
const SCEV *Step,
1296 ICmpInst::Predicate *Pred,
1297 ScalarEvolution *SE) {
1302const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits<
1314template <
typename ExtendOpTy>
1317 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType;
1318 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr;
1334 for (
auto It = DiffOps.
begin(); It != DiffOps.
end(); ++It)
1347 auto PreStartFlags =
1365 const SCEV *OperandExtendedStart =
1367 (SE->*GetExtendExpr)(Step, WideTy,
Depth));
1368 if ((SE->*GetExtendExpr)(Start, WideTy,
Depth) == OperandExtendedStart) {
1380 const SCEV *OverflowLimit =
1381 ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep(Step, &Pred, SE);
1383 if (OverflowLimit &&
1391template <
typename ExtendOpTy>
1395 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr;
1403 (SE->*GetExtendExpr)(PreStart, Ty,
Depth));
1438template <
typename ExtendOpTy>
1439bool ScalarEvolution::proveNoWrapByVaryingStart(
const SCEV *Start,
1442 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType;
1452 APInt StartAI = StartC->
getAPInt();
1454 for (
unsigned Delta : {-2, -1, 1, 2}) {
1455 const SCEV *PreStart =
getConstant(StartAI - Delta);
1457 FoldingSetNodeID
ID;
1459 ID.AddPointer(PreStart);
1460 ID.AddPointer(Step);
1464 static_cast<SCEVAddRecExpr *
>(UniqueSCEVs.FindNodeOrInsertPos(
ID, IP));
1468 if (PreAR && PreAR->getNoWrapFlags(WrapType)) {
1471 const SCEV *Limit = ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep(
1472 DeltaS, &Pred,
this);
1490 const unsigned BitWidth =
C.getBitWidth();
1508 const APInt &ConstantStart,
1527 auto &UserIDs = FoldCacheUser[
I.first->second];
1528 assert(
count(UserIDs,
ID) == 1 &&
"unexpected duplicates in UserIDs");
1529 for (
unsigned I = 0;
I != UserIDs.size(); ++
I)
1530 if (UserIDs[
I] ==
ID) {
1535 I.first->second = S;
1537 FoldCacheUser[S].push_back(
ID);
1543 "This is not an extending conversion!");
1545 "This is not a conversion to a SCEVable type!");
1546 assert(!
Op->getType()->isPointerTy() &&
"Can't extend pointer!");
1550 if (
const SCEV *S = FoldCache.lookup(
ID))
1562 "This is not an extending conversion!");
1564 assert(!
Op->getType()->isPointerTy() &&
"Can't extend pointer!");
1581 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
return S;
1585 UniqueSCEVs.InsertNode(S, IP);
1594 const SCEV *
X = ST->getOperand();
1608 if (AR->isAffine()) {
1609 const SCEV *Start = AR->getStart();
1610 const SCEV *Step = AR->getStepRecurrence(*
this);
1612 const Loop *L = AR->getLoop();
1616 if (AR->hasNoUnsignedWrap()) {
1637 const SCEV *CastedMaxBECount =
1641 if (MaxBECount == RecastedMaxBECount) {
1651 const SCEV *WideMaxBECount =
1653 const SCEV *OperandExtendedAdd =
1659 if (ZAdd == OperandExtendedAdd) {
1670 OperandExtendedAdd =
1676 if (ZAdd == OperandExtendedAdd) {
1697 !AC.assumptions().empty()) {
1699 auto NewFlags = proveNoUnsignedWrapViaInduction(AR);
1701 if (AR->hasNoUnsignedWrap()) {
1736 const APInt &
C = SC->getAPInt();
1740 const SCEV *SResidual =
1749 if (proveNoWrapByVaryingStart<SCEVZeroExtendExpr>(Start, Step, L)) {
1774 if (SA->hasNoUnsignedWrap()) {
1778 for (
const auto *
Op : SA->operands())
1795 const SCEV *SResidual =
1807 if (SM->hasNoUnsignedWrap()) {
1811 for (
const auto *
Op : SM->operands())
1829 const SCEV *TruncRHS;
1848 for (
auto *Operand :
MinMax->operands())
1859 for (
auto *Operand :
MinMax->operands())
1866 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
return S;
1869 UniqueSCEVs.InsertNode(S, IP);
1877 "This is not an extending conversion!");
1879 "This is not a conversion to a SCEVable type!");
1880 assert(!
Op->getType()->isPointerTy() &&
"Can't extend pointer!");
1884 if (
const SCEV *S = FoldCache.lookup(
ID))
1896 "This is not an extending conversion!");
1898 assert(!
Op->getType()->isPointerTy() &&
"Can't extend pointer!");
1920 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
return S;
1925 UniqueSCEVs.InsertNode(S, IP);
1934 const SCEV *
X = ST->getOperand();
1945 if (SA->hasNoSignedWrap()) {
1949 for (
const auto *
Op : SA->operands())
1967 const SCEV *SResidual =
1981 if (AR->isAffine()) {
1982 const SCEV *Start = AR->getStart();
1983 const SCEV *Step = AR->getStepRecurrence(*
this);
1985 const Loop *L = AR->getLoop();
1989 if (AR->hasNoSignedWrap()) {
2011 const SCEV *CastedMaxBECount =
2015 if (MaxBECount == RecastedMaxBECount) {
2025 const SCEV *WideMaxBECount =
2027 const SCEV *OperandExtendedAdd =
2033 if (SAdd == OperandExtendedAdd) {
2044 OperandExtendedAdd =
2050 if (SAdd == OperandExtendedAdd) {
2070 auto NewFlags = proveNoSignedWrapViaInduction(AR);
2072 if (AR->hasNoSignedWrap()) {
2087 const APInt &
C = SC->getAPInt();
2091 const SCEV *SResidual =
2100 if (proveNoWrapByVaryingStart<SCEVSignExtendExpr>(Start, Step, L)) {
2119 for (
auto *Operand :
MinMax->operands())
2128 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
return S;
2131 UniqueSCEVs.InsertNode(S, IP);
2157 "This is not an extending conversion!");
2159 "This is not a conversion to a SCEVable type!");
2164 if (SC->getAPInt().isNegative())
2169 const SCEV *NewOp =
T->getOperand();
2188 for (
const SCEV *
Op : AR->operands())
2227 APInt &AccumulatedConstant,
2230 bool Interesting =
false;
2237 if (Scale != 1 || AccumulatedConstant != 0 ||
C->getValue()->isZero())
2239 AccumulatedConstant += Scale *
C->getAPInt();
2244 for (; i !=
Ops.size(); ++i) {
2254 Add->operands(), NewScale, SE);
2260 auto Pair = M.insert({
Key, NewScale});
2264 Pair.first->second += NewScale;
2272 std::pair<DenseMap<const SCEV *, APInt>::iterator,
bool> Pair =
2273 M.insert({
Ops[i], Scale});
2277 Pair.first->second += Scale;
2296 case Instruction::Add:
2299 case Instruction::Sub:
2302 case Instruction::Mul:
2316 const SCEV *
A = (this->*Extension)(
2318 const SCEV *LHSB = (this->*Extension)(LHS, WideTy, 0);
2319 const SCEV *RHSB = (this->*Extension)(RHS, WideTy, 0);
2327 if (BinOp == Instruction::Mul)
2333 APInt C = RHSC->getAPInt();
2334 unsigned NumBits =
C.getBitWidth();
2335 bool IsSub = (BinOp == Instruction::Sub);
2336 bool IsNegativeConst = (
Signed &&
C.isNegative());
2338 bool OverflowDown = IsSub ^ IsNegativeConst;
2340 if (IsNegativeConst) {
2353 APInt Limit = Min + Magnitude;
2359 APInt Limit = Max - Magnitude;
2364std::optional<SCEV::NoWrapFlags>
2369 return std::nullopt;
2378 bool Deduced =
false;
2380 if (OBO->
getOpcode() != Instruction::Add &&
2383 return std::nullopt;
2392 false, LHS, RHS, CtxI)) {
2399 true, LHS, RHS, CtxI)) {
2406 return std::nullopt;
2416 using namespace std::placeholders;
2423 assert(CanAnalyze &&
"don't call from other places!");
2430 auto IsKnownNonNegative = [&](
const SCEV *S) {
2440 if (SignOrUnsignWrap != SignOrUnsignMask &&
2447 return Instruction::Add;
2449 return Instruction::Mul;
2460 Opcode,
C, OBO::NoSignedWrap);
2468 Opcode,
C, OBO::NoUnsignedWrap);
2478 Ops[0]->isZero() && IsKnownNonNegative(
Ops[1]))
2485 if (UDiv->getOperand(1) ==
Ops[1])
2488 if (UDiv->getOperand(1) ==
Ops[0])
2504 "only nuw or nsw allowed");
2505 assert(!
Ops.empty() &&
"Cannot get empty add!");
2506 if (
Ops.size() == 1)
return Ops[0];
2509 for (
unsigned i = 1, e =
Ops.size(); i != e; ++i)
2511 "SCEVAddExpr operand types don't match!");
2513 Ops, [](
const SCEV *
Op) {
return Op->getType()->isPointerTy(); });
2514 assert(NumPtrs <= 1 &&
"add has at most one pointer operand");
2519 [](
const APInt &C1,
const APInt &C2) {
return C1 + C2; },
2520 [](
const APInt &
C) {
return C.isZero(); },
2521 [](
const APInt &
C) {
return false; });
2534 return getOrCreateAddExpr(
Ops, ComputeFlags(
Ops));
2539 if (
Add->getNoWrapFlags(OrigFlags) != OrigFlags)
2540 Add->setNoWrapFlags(ComputeFlags(
Ops));
2548 bool FoundMatch =
false;
2549 for (
unsigned i = 0, e =
Ops.size(); i != e-1; ++i)
2550 if (
Ops[i] ==
Ops[i+1]) {
2562 --i; e -=
Count - 1;
2572 auto FindTruncSrcType = [&]() ->
Type * {
2578 return T->getOperand()->getType();
2580 const auto *LastOp =
Mul->getOperand(
Mul->getNumOperands() - 1);
2582 return T->getOperand()->getType();
2586 if (
auto *SrcType = FindTruncSrcType()) {
2593 if (
T->getOperand()->getType() != SrcType) {
2602 for (
unsigned j = 0, f = M->getNumOperands(); j != f && Ok; ++j) {
2605 if (
T->getOperand()->getType() != SrcType) {
2633 if (
Ops.size() == 2) {
2643 auto C2 =
C->getAPInt();
2646 APInt ConstAdd = C1 + C2;
2647 auto AddFlags = AddExpr->getNoWrapFlags();
2688 if (
Ops.size() == 2 &&
2699 if (Idx <
Ops.size()) {
2700 bool DeletedAdd =
false;
2711 Ops.erase(
Ops.begin()+Idx);
2714 CommonFlags =
maskFlags(CommonFlags,
Add->getNoWrapFlags());
2737 struct APIntCompare {
2738 bool operator()(
const APInt &LHS,
const APInt &RHS)
const {
2739 return LHS.ult(RHS);
2746 std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare> MulOpLists;
2747 for (
const SCEV *NewOp : NewOps)
2748 MulOpLists[M.find(NewOp)->second].push_back(NewOp);
2751 if (AccumulatedConstant != 0)
2753 for (
auto &MulOp : MulOpLists) {
2754 if (MulOp.first == 1) {
2756 }
else if (MulOp.first != 0) {
2765 if (
Ops.size() == 1)
2776 for (
unsigned MulOp = 0, e =
Mul->getNumOperands(); MulOp != e; ++MulOp) {
2777 const SCEV *MulOpSCEV =
Mul->getOperand(MulOp);
2780 for (
unsigned AddOp = 0, e =
Ops.size(); AddOp != e; ++AddOp)
2781 if (MulOpSCEV ==
Ops[AddOp]) {
2783 const SCEV *InnerMul =
Mul->getOperand(MulOp == 0);
2784 if (
Mul->getNumOperands() != 2) {
2788 Mul->operands().take_front(MulOp));
2796 if (
Ops.size() == 2)
return OuterMul;
2798 Ops.erase(
Ops.begin()+AddOp);
2799 Ops.erase(
Ops.begin()+Idx-1);
2801 Ops.erase(
Ops.begin()+Idx);
2802 Ops.erase(
Ops.begin()+AddOp-1);
2804 Ops.push_back(OuterMul);
2809 for (
unsigned OtherMulIdx = Idx+1;
2816 OMulOp != e; ++OMulOp)
2817 if (OtherMul->
getOperand(OMulOp) == MulOpSCEV) {
2819 const SCEV *InnerMul1 =
Mul->getOperand(MulOp == 0);
2820 if (
Mul->getNumOperands() != 2) {
2822 Mul->operands().take_front(MulOp));
2829 OtherMul->
operands().take_front(OMulOp));
2834 const SCEV *InnerMulSum =
2838 if (
Ops.size() == 2)
return OuterMul;
2839 Ops.erase(
Ops.begin()+Idx);
2840 Ops.erase(
Ops.begin()+OtherMulIdx-1);
2841 Ops.push_back(OuterMul);
2861 for (
unsigned i = 0, e =
Ops.size(); i != e; ++i)
2864 Ops.erase(
Ops.begin()+i);
2869 if (!LIOps.
empty()) {
2894 auto *DefI = getDefiningScopeBound(LIOps);
2896 if (!isGuaranteedToTransferExecutionTo(DefI, ReachI))
2908 if (
Ops.size() == 1)
return NewRec;
2911 for (
unsigned i = 0;; ++i)
2912 if (
Ops[i] == AddRec) {
2922 for (
unsigned OtherIdx = Idx+1;
2930 "AddRecExprs are not sorted in reverse dominance order?");
2937 if (OtherAddRec->getLoop() == AddRecLoop) {
2938 for (
unsigned i = 0, e = OtherAddRec->getNumOperands();
2940 if (i >= AddRecOps.
size()) {
2941 append_range(AddRecOps, OtherAddRec->operands().drop_front(i));
2945 AddRecOps[i], OtherAddRec->getOperand(i)};
2948 Ops.erase(
Ops.begin() + OtherIdx); --OtherIdx;
2963 return getOrCreateAddExpr(
Ops, ComputeFlags(
Ops));
2975 static_cast<SCEVAddExpr *
>(UniqueSCEVs.FindNodeOrInsertPos(
ID, IP));
2977 const SCEV **O = SCEVAllocator.Allocate<
const SCEV *>(
Ops.size());
2979 S =
new (SCEVAllocator)
2981 UniqueSCEVs.InsertNode(S, IP);
2991 FoldingSetNodeID
ID;
2993 for (
const SCEV *
Op :
Ops)
2998 static_cast<SCEVAddRecExpr *
>(UniqueSCEVs.FindNodeOrInsertPos(
ID, IP));
3000 const SCEV **
O = SCEVAllocator.Allocate<
const SCEV *>(
Ops.size());
3002 S =
new (SCEVAllocator)
3003 SCEVAddRecExpr(
ID.Intern(SCEVAllocator), O,
Ops.size(), L);
3004 UniqueSCEVs.InsertNode(S, IP);
3005 LoopUsers[
L].push_back(S);
3015 FoldingSetNodeID
ID;
3017 for (
const SCEV *
Op :
Ops)
3021 static_cast<SCEVMulExpr *
>(UniqueSCEVs.FindNodeOrInsertPos(
ID, IP));
3023 const SCEV **
O = SCEVAllocator.Allocate<
const SCEV *>(
Ops.size());
3025 S =
new (SCEVAllocator) SCEVMulExpr(
ID.Intern(SCEVAllocator),
3027 UniqueSCEVs.InsertNode(S, IP);
3036 if (j > 1 && k / j != i) Overflow =
true;
3052 if (n == 0 || n == k)
return 1;
3053 if (k > n)
return 0;
3059 for (
uint64_t i = 1; i <= k; ++i) {
3060 r =
umul_ov(r, n-(i-1), Overflow);
3069 struct FindConstantInAddMulChain {
3070 bool FoundConstant =
false;
3072 bool follow(
const SCEV *S) {
3077 bool isDone()
const {
3078 return FoundConstant;
3082 FindConstantInAddMulChain
F;
3084 ST.visitAll(StartExpr);
3085 return F.FoundConstant;
3093 "only nuw or nsw allowed");
3094 assert(!
Ops.empty() &&
"Cannot get empty mul!");
3095 if (
Ops.size() == 1)
return Ops[0];
3097 Type *ETy =
Ops[0]->getType();
3099 for (
unsigned i = 1, e =
Ops.size(); i != e; ++i)
3101 "SCEVMulExpr operand types don't match!");
3106 [](
const APInt &C1,
const APInt &C2) {
return C1 * C2; },
3107 [](
const APInt &
C) {
return C.isOne(); },
3108 [](
const APInt &
C) {
return C.isZero(); });
3119 return getOrCreateMulExpr(
Ops, ComputeFlags(
Ops));
3124 if (
Mul->getNoWrapFlags(OrigFlags) != OrigFlags)
3125 Mul->setNoWrapFlags(ComputeFlags(
Ops));
3130 if (
Ops.size() == 2) {
3138 const SCEV *Op0, *Op1;
3146 if (
Ops[0]->isAllOnesValue()) {
3151 bool AnyFolded =
false;
3152 for (
const SCEV *AddOp :
Add->operands()) {
3179 AddRec->getNoWrapFlags(FlagsMask));
3202 APInt C1V = LHSC->getAPInt();
3212 const SCEV *NewMul =
nullptr;
3216 assert(C1V.
ugt(1) &&
"C1 <= 1 should have been folded earlier");
3231 if (Idx <
Ops.size()) {
3232 bool DeletedMul =
false;
3238 Ops.erase(
Ops.begin()+Idx);
3262 for (
unsigned i = 0, e =
Ops.size(); i != e; ++i)
3265 Ops.erase(
Ops.begin()+i);
3270 if (!LIOps.
empty()) {
3283 for (
unsigned i = 0, e = AddRec->
getNumOperands(); i != e; ++i) {
3299 if (
Ops.size() == 1)
return NewRec;
3302 for (
unsigned i = 0;; ++i)
3303 if (
Ops[i] == AddRec) {
3324 bool OpsModified =
false;
3325 for (
unsigned OtherIdx = Idx+1;
3339 bool Overflow =
false;
3346 for (
int y = x, ye = 2*x+1; y != ye && !Overflow; ++y) {
3350 z < ze && !Overflow; ++z) {
3353 if (LargerThan64Bits)
3354 Coeff =
umul_ov(Coeff1, Coeff2, Overflow);
3356 Coeff = Coeff1*Coeff2;
3371 if (
Ops.size() == 2)
return NewAddRec;
3372 Ops[Idx] = NewAddRec;
3373 Ops.erase(
Ops.begin() + OtherIdx); --OtherIdx;
3389 return getOrCreateMulExpr(
Ops, ComputeFlags(
Ops));
3397 "SCEVURemExpr operand types don't match!");
3402 if (RHSC->getValue()->isOne())
3403 return getZero(LHS->getType());
3406 if (RHSC->getAPInt().isPowerOf2()) {
3407 Type *FullTy = LHS->getType();
3424 assert(!LHS->getType()->isPointerTy() &&
3425 "SCEVUDivExpr operand can't be pointer!");
3426 assert(LHS->getType() == RHS->getType() &&
3427 "SCEVUDivExpr operand types don't match!");
3434 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
3442 if (RHSC->getValue()->isOne())
3447 if (!RHSC->getValue()->isZero()) {
3451 Type *Ty = LHS->getType();
3452 unsigned LZ = RHSC->getAPInt().countl_zero();
3456 if (!RHSC->getAPInt().isPowerOf2())
3464 const APInt &StepInt = Step->getAPInt();
3465 const APInt &DivInt = RHSC->getAPInt();
3466 if (!StepInt.
urem(DivInt) &&
3472 for (
const SCEV *
Op : AR->operands())
3478 const APInt *StartRem;
3491 bool CanFoldWithWrap = StepInt.
ule(DivInt) &&
3495 const SCEV *NewStart =
3497 if (*StartRem != 0 && (NoWrap || CanFoldWithWrap) &&
3499 const SCEV *NewLHS =
3502 if (LHS != NewLHS) {
3512 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
3521 for (
const SCEV *
Op : M->operands())
3525 for (
unsigned i = 0, e = M->getNumOperands(); i != e; ++i) {
3526 const SCEV *
Op = M->getOperand(i);
3538 if (
auto *DivisorConstant =
3540 bool Overflow =
false;
3542 DivisorConstant->getAPInt().
umul_ov(RHSC->getAPInt(), Overflow);
3553 for (
const SCEV *
Op :
A->operands())
3557 for (
unsigned i = 0, e =
A->getNumOperands(); i != e; ++i) {
3564 if (Operands.
size() ==
A->getNumOperands())
3571 return getConstant(LHSC->getAPInt().udiv(RHSC->getAPInt()));
3581 return getZero(LHS->getType());
3585 const SCEV *NewLHS, *NewRHS;
3593 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
return S;
3596 UniqueSCEVs.InsertNode(S, IP);
3626 if (!
Mul || !
Mul->hasNoUnsignedWrap())
3633 if (LHSCst == RHSCst) {
3641 APInt Factor =
gcd(LHSCst, RHSCst);
3659 for (
int i = 0, e =
Mul->getNumOperands(); i != e; ++i) {
3660 if (
Mul->getOperand(i) == RHS) {
3679 if (StepChrec->getLoop() == L) {
3693 if (Operands.
size() == 1)
return Operands[0];
3698 "SCEVAddRecExpr operand types don't match!");
3699 assert(!
Op->getType()->isPointerTy() &&
"Step must be integer");
3701 for (
const SCEV *
Op : Operands)
3703 "SCEVAddRecExpr operand is not available at loop entry!");
3706 if (Operands.
back()->isZero()) {
3721 const Loop *NestedLoop = NestedAR->getLoop();
3722 if (L->contains(NestedLoop)
3725 DT.dominates(L->getHeader(), NestedLoop->
getHeader()))) {
3727 Operands[0] = NestedAR->getStart();
3731 bool AllInvariant =
all_of(
3743 AllInvariant =
all_of(NestedOperands, [&](
const SCEV *
Op) {
3754 return getAddRecExpr(NestedOperands, NestedLoop, InnerFlags);
3758 Operands[0] = NestedAR;
3764 return getOrCreateAddRecExpr(Operands, L, Flags);
3780 if (!GEPI || !isSCEVExprNeverPoison(GEPI))
3784 return getGEPExpr(BaseExpr, IndexExprs,
GEP->getSourceElementType(), NW);
3798 bool FirstIter =
true;
3800 for (
const SCEV *IndexExpr : IndexExprs) {
3807 Offsets.push_back(FieldOffset);
3810 CurTy = STy->getTypeAtIndex(Index);
3815 "The first index of a GEP indexes a pointer");
3816 CurTy = SrcElementTy;
3827 const SCEV *LocalOffset =
getMulExpr(IndexExpr, ElementSize, OffsetWrap);
3828 Offsets.push_back(LocalOffset);
3833 if (Offsets.empty())
3846 "GEP should not change type mid-flight.");
3850SCEV *ScalarEvolution::findExistingSCEVInCache(
SCEVTypes SCEVType,
3853 ID.AddInteger(SCEVType);
3857 return UniqueSCEVs.FindNodeOrInsertPos(
ID, IP);
3867 assert(SCEVMinMaxExpr::isMinMaxType(Kind) &&
"Not a SCEVMinMaxExpr!");
3868 assert(!
Ops.empty() &&
"Cannot get empty (u|s)(min|max)!");
3869 if (
Ops.size() == 1)
return Ops[0];
3872 for (
unsigned i = 1, e =
Ops.size(); i != e; ++i) {
3874 "Operand types don't match!");
3877 "min/max should be consistently pointerish");
3903 return IsSigned ?
C.isMinSignedValue() :
C.isMinValue();
3905 return IsSigned ?
C.isMaxSignedValue() :
C.isMaxValue();
3910 return IsSigned ?
C.isMaxSignedValue() :
C.isMaxValue();
3912 return IsSigned ?
C.isMinSignedValue() :
C.isMinValue();
3918 if (
const SCEV *S = findExistingSCEVInCache(Kind,
Ops)) {
3924 while (Idx <
Ops.size() &&
Ops[Idx]->getSCEVType() < Kind)
3929 if (Idx <
Ops.size()) {
3930 bool DeletedAny =
false;
3931 while (
Ops[Idx]->getSCEVType() == Kind) {
3933 Ops.erase(
Ops.begin()+Idx);
3951 for (
unsigned i = 0, e =
Ops.size() - 1; i != e; ++i) {
3952 if (
Ops[i] ==
Ops[i + 1] ||
3953 isKnownViaNonRecursiveReasoning(FirstPred,
Ops[i],
Ops[i + 1])) {
3956 Ops.erase(
Ops.begin() + i + 1,
Ops.begin() + i + 2);
3959 }
else if (isKnownViaNonRecursiveReasoning(SecondPred,
Ops[i],
3962 Ops.erase(
Ops.begin() + i,
Ops.begin() + i + 1);
3968 if (
Ops.size() == 1)
return Ops[0];
3970 assert(!
Ops.empty() &&
"Reduced smax down to nothing!");
3975 ID.AddInteger(Kind);
3979 const SCEV *ExistingSCEV = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP);
3981 return ExistingSCEV;
3982 const SCEV **O = SCEVAllocator.Allocate<
const SCEV *>(
Ops.size());
3984 SCEV *S =
new (SCEVAllocator)
3987 UniqueSCEVs.InsertNode(S, IP);
3994class SCEVSequentialMinMaxDeduplicatingVisitor final
3995 :
public SCEVVisitor<SCEVSequentialMinMaxDeduplicatingVisitor,
3996 std::optional<const SCEV *>> {
3997 using RetVal = std::optional<const SCEV *>;
4005 bool canRecurseInto(
SCEVTypes Kind)
const {
4008 return RootKind == Kind || NonSequentialRootKind == Kind;
4011 RetVal visitAnyMinMaxExpr(
const SCEV *S) {
4013 "Only for min/max expressions.");
4016 if (!canRecurseInto(Kind))
4026 return std::nullopt;
4033 RetVal
visit(
const SCEV *S) {
4035 if (!SeenOps.
insert(S).second)
4036 return std::nullopt;
4037 return Base::visit(S);
4041 SCEVSequentialMinMaxDeduplicatingVisitor(ScalarEvolution &SE,
4043 : SE(SE), RootKind(RootKind),
4044 NonSequentialRootKind(
4045 SCEVSequentialMinMaxExpr::getEquivalentNonSequentialSCEVType(
4049 SmallVectorImpl<const SCEV *> &NewOps) {
4054 for (
const SCEV *
Op : OrigOps) {
4059 Ops.emplace_back(*NewOp);
4063 NewOps = std::move(
Ops);
4067 RetVal visitConstant(
const SCEVConstant *Constant) {
return Constant; }
4069 RetVal visitVScale(
const SCEVVScale *VScale) {
return VScale; }
4071 RetVal visitPtrToIntExpr(
const SCEVPtrToIntExpr *Expr) {
return Expr; }
4073 RetVal visitTruncateExpr(
const SCEVTruncateExpr *Expr) {
return Expr; }
4075 RetVal visitZeroExtendExpr(
const SCEVZeroExtendExpr *Expr) {
return Expr; }
4077 RetVal visitSignExtendExpr(
const SCEVSignExtendExpr *Expr) {
return Expr; }
4079 RetVal visitAddExpr(
const SCEVAddExpr *Expr) {
return Expr; }
4081 RetVal visitMulExpr(
const SCEVMulExpr *Expr) {
return Expr; }
4083 RetVal visitUDivExpr(
const SCEVUDivExpr *Expr) {
return Expr; }
4085 RetVal visitAddRecExpr(
const SCEVAddRecExpr *Expr) {
return Expr; }
4087 RetVal visitSMaxExpr(
const SCEVSMaxExpr *Expr) {
4088 return visitAnyMinMaxExpr(Expr);
4091 RetVal visitUMaxExpr(
const SCEVUMaxExpr *Expr) {
4092 return visitAnyMinMaxExpr(Expr);
4095 RetVal visitSMinExpr(
const SCEVSMinExpr *Expr) {
4096 return visitAnyMinMaxExpr(Expr);
4099 RetVal visitUMinExpr(
const SCEVUMinExpr *Expr) {
4100 return visitAnyMinMaxExpr(Expr);
4103 RetVal visitSequentialUMinExpr(
const SCEVSequentialUMinExpr *Expr) {
4104 return visitAnyMinMaxExpr(Expr);
4107 RetVal visitUnknown(
const SCEVUnknown *Expr) {
return Expr; }
4109 RetVal visitCouldNotCompute(
const SCEVCouldNotCompute *Expr) {
return Expr; }
4151struct SCEVPoisonCollector {
4152 bool LookThroughMaybePoisonBlocking;
4153 SmallPtrSet<const SCEVUnknown *, 4> MaybePoison;
4154 SCEVPoisonCollector(
bool LookThroughMaybePoisonBlocking)
4155 : LookThroughMaybePoisonBlocking(LookThroughMaybePoisonBlocking) {}
4157 bool follow(
const SCEV *S) {
4158 if (!LookThroughMaybePoisonBlocking &&
4168 bool isDone()
const {
return false; }
4178 SCEVPoisonCollector PC1(
true);
4183 if (PC1.MaybePoison.empty())
4189 SCEVPoisonCollector PC2(
false);
4199 SCEVPoisonCollector PC(
false);
4222 while (!Worklist.
empty()) {
4224 if (!Visited.
insert(V).second)
4228 if (Visited.
size() > 16)
4233 if (PoisonVals.
contains(V) || ::isGuaranteedNotToBePoison(V))
4244 if (PDI->isDisjoint())
4251 II &&
II->getIntrinsicID() == Intrinsic::vscale)
4258 if (
I->hasPoisonGeneratingAnnotations())
4269 assert(SCEVSequentialMinMaxExpr::isSequentialMinMaxType(Kind) &&
4270 "Not a SCEVSequentialMinMaxExpr!");
4271 assert(!
Ops.empty() &&
"Cannot get empty (u|s)(min|max)!");
4272 if (
Ops.size() == 1)
4276 for (
unsigned i = 1, e =
Ops.size(); i != e; ++i) {
4278 "Operand types don't match!");
4281 "min/max should be consistently pointerish");
4289 if (
const SCEV *S = findExistingSCEVInCache(Kind,
Ops))
4296 SCEVSequentialMinMaxDeduplicatingVisitor Deduplicator(*
this, Kind);
4306 bool DeletedAny =
false;
4307 while (Idx <
Ops.size()) {
4308 if (
Ops[Idx]->getSCEVType() != Kind) {
4313 Ops.erase(
Ops.begin() + Idx);
4314 Ops.insert(
Ops.begin() + Idx, SMME->operands().begin(),
4315 SMME->operands().end());
4323 const SCEV *SaturationPoint;
4334 for (
unsigned i = 1, e =
Ops.size(); i != e; ++i) {
4335 if (!isGuaranteedNotToCauseUB(
Ops[i]))
4347 Ops.erase(
Ops.begin() + i);
4352 if (isKnownViaNonRecursiveReasoning(Pred,
Ops[i - 1],
Ops[i])) {
4353 Ops.erase(
Ops.begin() + i);
4361 ID.AddInteger(Kind);
4365 const SCEV *ExistingSCEV = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP);
4367 return ExistingSCEV;
4369 const SCEV **O = SCEVAllocator.Allocate<
const SCEV *>(
Ops.size());
4371 SCEV *S =
new (SCEVAllocator)
4374 UniqueSCEVs.InsertNode(S, IP);
4422 if (
Size.isScalable())
4443 "Cannot get offset for structure containing scalable vector types");
4457 if (
SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP)) {
4459 "Stale SCEVUnknown in uniquing map!");
4465 UniqueSCEVs.InsertNode(S, IP);
4479 return Ty->isIntOrPtrTy();
4486 if (Ty->isPointerTy())
4497 if (Ty->isIntegerTy())
4501 assert(Ty->isPointerTy() &&
"Unexpected non-pointer non-integer type!");
4513 bool PreciseA, PreciseB;
4514 auto *ScopeA = getDefiningScopeBound({
A}, PreciseA);
4515 auto *ScopeB = getDefiningScopeBound({
B}, PreciseB);
4516 if (!PreciseA || !PreciseB)
4519 return (ScopeA == ScopeB) || DT.dominates(ScopeA, ScopeB) ||
4520 DT.dominates(ScopeB, ScopeA);
4524 return CouldNotCompute.get();
4527bool ScalarEvolution::checkValidity(
const SCEV *S)
const {
4530 return SU && SU->getValue() ==
nullptr;
4533 return !ContainsNulls;
4538 if (
I != HasRecMap.end())
4543 HasRecMap.insert({S, FoundAddRec});
4551 if (
SI == ExprValueMap.
end())
4553 return SI->second.getArrayRef();
4559void ScalarEvolution::eraseValueFromMap(
Value *V) {
4561 if (
I != ValueExprMap.end()) {
4562 auto EVIt = ExprValueMap.find(
I->second);
4563 bool Removed = EVIt->second.remove(V);
4565 assert(Removed &&
"Value not in ExprValueMap?");
4566 ValueExprMap.erase(
I);
4570void ScalarEvolution::insertValueToMap(
Value *V,
const SCEV *S) {
4574 auto It = ValueExprMap.find_as(V);
4575 if (It == ValueExprMap.end()) {
4577 ExprValueMap[S].insert(V);
4588 return createSCEVIter(V);
4595 if (
I != ValueExprMap.end()) {
4596 const SCEV *S =
I->second;
4597 assert(checkValidity(S) &&
4598 "existing SCEV has not been properly invalidated");
4611 Type *Ty = V->getType();
4627 assert(!V->getType()->isPointerTy() &&
"Can't negate pointer");
4640 return (
const SCEV *)
nullptr;
4646 if (
const SCEV *Replaced = MatchMinMaxNegation(MME))
4650 Type *Ty = V->getType();
4656 assert(
P->getType()->isPointerTy());
4669 const SCEV **PtrOp =
nullptr;
4670 for (
const SCEV *&AddOp :
Ops) {
4671 if (AddOp->getType()->isPointerTy()) {
4672 assert(!PtrOp &&
"Cannot have multiple pointer ops");
4690 return getZero(LHS->getType());
4695 if (RHS->getType()->isPointerTy()) {
4696 if (!LHS->getType()->isPointerTy() ||
4706 const bool RHSIsNotMinSigned =
4737 Type *SrcTy = V->getType();
4738 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
4739 "Cannot truncate or zero extend with non-integer arguments!");
4749 Type *SrcTy = V->getType();
4750 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
4751 "Cannot truncate or zero extend with non-integer arguments!");
4761 Type *SrcTy = V->getType();
4762 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
4763 "Cannot noop or zero extend with non-integer arguments!");
4765 "getNoopOrZeroExtend cannot truncate!");
4773 Type *SrcTy = V->getType();
4774 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
4775 "Cannot noop or sign extend with non-integer arguments!");
4777 "getNoopOrSignExtend cannot truncate!");
4785 Type *SrcTy = V->getType();
4786 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
4787 "Cannot noop or any extend with non-integer arguments!");
4789 "getNoopOrAnyExtend cannot truncate!");
4797 Type *SrcTy = V->getType();
4798 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
4799 "Cannot truncate or noop with non-integer arguments!");
4801 "getTruncateOrNoop cannot extend!");
4809 const SCEV *PromotedLHS = LHS;
4810 const SCEV *PromotedRHS = RHS;
4830 assert(!
Ops.empty() &&
"At least one operand must be!");
4832 if (
Ops.size() == 1)
4836 Type *MaxType =
nullptr;
4837 for (
const auto *S :
Ops)
4842 assert(MaxType &&
"Failed to find maximum type!");
4846 for (
const auto *S :
Ops)
4855 if (!V->getType()->isPointerTy())
4860 V = AddRec->getStart();
4862 const SCEV *PtrOp =
nullptr;
4863 for (
const SCEV *AddOp :
Add->operands()) {
4864 if (AddOp->getType()->isPointerTy()) {
4865 assert(!PtrOp &&
"Cannot have multiple pointer ops");
4869 assert(PtrOp &&
"Must have pointer op");
4881 for (
User *U :
I->users()) {
4883 if (Visited.
insert(UserInsn).second)
4897 static const SCEV *rewrite(
const SCEV *S,
const Loop *L, ScalarEvolution &SE,
4898 bool IgnoreOtherLoops =
true) {
4901 if (
Rewriter.hasSeenLoopVariantSCEVUnknown())
4903 return Rewriter.hasSeenOtherLoops() && !IgnoreOtherLoops
4908 const SCEV *visitUnknown(
const SCEVUnknown *Expr) {
4910 SeenLoopVariantSCEVUnknown =
true;
4914 const SCEV *visitAddRecExpr(
const SCEVAddRecExpr *Expr) {
4918 SeenOtherLoops =
true;
4922 bool hasSeenLoopVariantSCEVUnknown() {
return SeenLoopVariantSCEVUnknown; }
4924 bool hasSeenOtherLoops() {
return SeenOtherLoops; }
4927 explicit SCEVInitRewriter(
const Loop *L, ScalarEvolution &SE)
4928 : SCEVRewriteVisitor(SE),
L(
L) {}
4931 bool SeenLoopVariantSCEVUnknown =
false;
4932 bool SeenOtherLoops =
false;
4941 static const SCEV *rewrite(
const SCEV *S,
const Loop *L, ScalarEvolution &SE) {
4942 SCEVPostIncRewriter
Rewriter(L, SE);
4944 return Rewriter.hasSeenLoopVariantSCEVUnknown()
4949 const SCEV *visitUnknown(
const SCEVUnknown *Expr) {
4951 SeenLoopVariantSCEVUnknown =
true;
4955 const SCEV *visitAddRecExpr(
const SCEVAddRecExpr *Expr) {
4959 SeenOtherLoops =
true;
4963 bool hasSeenLoopVariantSCEVUnknown() {
return SeenLoopVariantSCEVUnknown; }
4965 bool hasSeenOtherLoops() {
return SeenOtherLoops; }
4968 explicit SCEVPostIncRewriter(
const Loop *L, ScalarEvolution &SE)
4969 : SCEVRewriteVisitor(SE),
L(
L) {}
4972 bool SeenLoopVariantSCEVUnknown =
false;
4973 bool SeenOtherLoops =
false;
4979class SCEVBackedgeConditionFolder
4982 static const SCEV *rewrite(
const SCEV *S,
const Loop *L,
4983 ScalarEvolution &SE) {
4984 bool IsPosBECond =
false;
4985 Value *BECond =
nullptr;
4986 if (BasicBlock *Latch =
L->getLoopLatch()) {
4990 "Both outgoing branches should not target same header!");
4997 SCEVBackedgeConditionFolder
Rewriter(L, BECond, IsPosBECond, SE);
5001 const SCEV *visitUnknown(
const SCEVUnknown *Expr) {
5002 const SCEV *
Result = Expr;
5007 switch (
I->getOpcode()) {
5008 case Instruction::Select: {
5010 std::optional<const SCEV *> Res =
5011 compareWithBackedgeCondition(
SI->getCondition());
5019 std::optional<const SCEV *> Res = compareWithBackedgeCondition(
I);
5030 explicit SCEVBackedgeConditionFolder(
const Loop *L,
Value *BECond,
5031 bool IsPosBECond, ScalarEvolution &SE)
5032 : SCEVRewriteVisitor(SE),
L(
L), BackedgeCond(BECond),
5033 IsPositiveBECond(IsPosBECond) {}
5035 std::optional<const SCEV *> compareWithBackedgeCondition(
Value *IC);
5039 Value *BackedgeCond =
nullptr;
5041 bool IsPositiveBECond;
5044std::optional<const SCEV *>
5045SCEVBackedgeConditionFolder::compareWithBackedgeCondition(
Value *IC) {
5050 if (BackedgeCond == IC)
5053 return std::nullopt;
5058 static const SCEV *rewrite(
const SCEV *S,
const Loop *L,
5059 ScalarEvolution &SE) {
5065 const SCEV *visitUnknown(
const SCEVUnknown *Expr) {
5072 const SCEV *visitAddRecExpr(
const SCEVAddRecExpr *Expr) {
5082 explicit SCEVShiftRewriter(
const Loop *L, ScalarEvolution &SE)
5083 : SCEVRewriteVisitor(SE),
L(
L) {}
5092ScalarEvolution::proveNoWrapViaConstantRanges(
const SCEVAddRecExpr *AR) {
5096 using OBO = OverflowingBinaryOperator;
5104 const APInt &BECountAP = BECountMax->getAPInt();
5105 unsigned NoOverflowBitWidth =
5117 Instruction::Add, IncRange, OBO::NoSignedWrap);
5118 if (NSWRegion.contains(AddRecRange))
5127 Instruction::Add, IncRange, OBO::NoUnsignedWrap);
5128 if (NUWRegion.contains(AddRecRange))
5136ScalarEvolution::proveNoSignedWrapViaInduction(
const SCEVAddRecExpr *AR) {
5146 if (!SignedWrapViaInductionTried.insert(AR).second)
5171 AC.assumptions().empty())
5179 const SCEV *OverflowLimit =
5181 if (OverflowLimit &&
5189ScalarEvolution::proveNoUnsignedWrapViaInduction(
const SCEVAddRecExpr *AR) {
5199 if (!UnsignedWrapViaInductionTried.insert(AR).second)
5225 AC.assumptions().empty())
5260 explicit BinaryOp(Operator *
Op)
5264 IsNSW = OBO->hasNoSignedWrap();
5265 IsNUW = OBO->hasNoUnsignedWrap();
5269 explicit BinaryOp(
unsigned Opcode,
Value *
LHS,
Value *
RHS,
bool IsNSW =
false,
5271 : Opcode(Opcode),
LHS(
LHS),
RHS(
RHS), IsNSW(IsNSW), IsNUW(IsNUW) {}
5283 return std::nullopt;
5289 switch (
Op->getOpcode()) {
5290 case Instruction::Add:
5291 case Instruction::Sub:
5292 case Instruction::Mul:
5293 case Instruction::UDiv:
5294 case Instruction::URem:
5295 case Instruction::And:
5296 case Instruction::AShr:
5297 case Instruction::Shl:
5298 return BinaryOp(
Op);
5300 case Instruction::Or: {
5303 return BinaryOp(Instruction::Add,
Op->getOperand(0),
Op->getOperand(1),
5305 return BinaryOp(
Op);
5308 case Instruction::Xor:
5312 if (RHSC->getValue().isSignMask())
5313 return BinaryOp(Instruction::Add,
Op->getOperand(0),
Op->getOperand(1));
5315 if (V->getType()->isIntegerTy(1))
5316 return BinaryOp(Instruction::Add,
Op->getOperand(0),
Op->getOperand(1));
5317 return BinaryOp(
Op);
5319 case Instruction::LShr:
5328 if (SA->getValue().ult(
BitWidth)) {
5330 ConstantInt::get(SA->getContext(),
5332 return BinaryOp(Instruction::UDiv,
Op->getOperand(0),
X);
5335 return BinaryOp(
Op);
5337 case Instruction::ExtractValue: {
5339 if (EVI->getNumIndices() != 1 || EVI->getIndices()[0] != 0)
5347 bool Signed = WO->isSigned();
5350 return BinaryOp(BinOp, WO->getLHS(), WO->getRHS());
5355 return BinaryOp(BinOp, WO->getLHS(), WO->getRHS(),
5366 if (
II->getIntrinsicID() == Intrinsic::loop_decrement_reg)
5367 return BinaryOp(Instruction::Sub,
II->getOperand(0),
II->getOperand(1));
5369 return std::nullopt;
5395 if (
Op == SymbolicPHI)
5400 if (SourceBits != NewBits)
5418 if (!L || L->getHeader() != PN->
getParent())
5476std::optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>>
5477ScalarEvolution::createAddRecFromPHIWithCastsImpl(
const SCEVUnknown *SymbolicPHI) {
5485 assert(L &&
"Expecting an integer loop header phi");
5490 Value *BEValueV =
nullptr, *StartValueV =
nullptr;
5491 for (
unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
5492 Value *
V = PN->getIncomingValue(i);
5493 if (
L->contains(PN->getIncomingBlock(i))) {
5496 }
else if (BEValueV != V) {
5500 }
else if (!StartValueV) {
5502 }
else if (StartValueV != V) {
5503 StartValueV =
nullptr;
5507 if (!BEValueV || !StartValueV)
5508 return std::nullopt;
5510 const SCEV *BEValue =
getSCEV(BEValueV);
5517 return std::nullopt;
5521 unsigned FoundIndex =
Add->getNumOperands();
5522 Type *TruncTy =
nullptr;
5524 for (
unsigned i = 0, e =
Add->getNumOperands(); i != e; ++i)
5527 if (FoundIndex == e) {
5532 if (FoundIndex ==
Add->getNumOperands())
5533 return std::nullopt;
5537 for (
unsigned i = 0, e =
Add->getNumOperands(); i != e; ++i)
5538 if (i != FoundIndex)
5539 Ops.push_back(
Add->getOperand(i));
5545 return std::nullopt;
5598 const SCEV *StartVal =
getSCEV(StartValueV);
5599 const SCEV *PHISCEV =
5626 auto getExtendedExpr = [&](
const SCEV *Expr,
5627 bool CreateSignExtend) ->
const SCEV * {
5630 const SCEV *ExtendedExpr =
5633 return ExtendedExpr;
5641 auto PredIsKnownFalse = [&](
const SCEV *Expr,
5642 const SCEV *ExtendedExpr) ->
bool {
5643 return Expr != ExtendedExpr &&
5647 const SCEV *StartExtended = getExtendedExpr(StartVal,
Signed);
5648 if (PredIsKnownFalse(StartVal, StartExtended)) {
5650 return std::nullopt;
5655 const SCEV *AccumExtended = getExtendedExpr(Accum,
true);
5656 if (PredIsKnownFalse(Accum, AccumExtended)) {
5658 return std::nullopt;
5661 auto AppendPredicate = [&](
const SCEV *Expr,
5662 const SCEV *ExtendedExpr) ->
void {
5663 if (Expr != ExtendedExpr &&
5671 AppendPredicate(StartVal, StartExtended);
5672 AppendPredicate(Accum, AccumExtended);
5680 std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>> PredRewrite =
5681 std::make_pair(NewAR, Predicates);
5683 PredicatedSCEVRewrites[{SymbolicPHI,
L}] = PredRewrite;
5687std::optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>>
5692 return std::nullopt;
5695 auto I = PredicatedSCEVRewrites.find({SymbolicPHI, L});
5696 if (
I != PredicatedSCEVRewrites.end()) {
5697 std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>> Rewrite =
5700 if (Rewrite.first == SymbolicPHI)
5701 return std::nullopt;
5705 assert(!(Rewrite.second).empty() &&
"Expected to find Predicates");
5709 std::optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>>
5710 Rewrite = createAddRecFromPHIWithCastsImpl(SymbolicPHI);
5715 PredicatedSCEVRewrites[{SymbolicPHI, L}] = {SymbolicPHI, Predicates};
5716 return std::nullopt;
5733 auto areExprsEqual = [&](
const SCEV *Expr1,
const SCEV *Expr2) ->
bool {
5734 if (Expr1 != Expr2 &&
5735 !Preds->implies(SE.getEqualPredicate(Expr1, Expr2), SE) &&
5736 !Preds->implies(SE.getEqualPredicate(Expr2, Expr1), SE))
5753const SCEV *ScalarEvolution::createSimpleAffineAddRec(
PHINode *PN,
5755 Value *StartValueV) {
5758 assert(BEValueV && StartValueV);
5764 if (BO->Opcode != Instruction::Add)
5767 const SCEV *Accum =
nullptr;
5768 if (BO->LHS == PN && L->isLoopInvariant(BO->RHS))
5770 else if (BO->RHS == PN && L->isLoopInvariant(BO->LHS))
5784 insertValueToMap(PN, PHISCEV);
5789 proveNoWrapViaConstantRanges(AR)));
5797 "Accum is defined outside L, but is not invariant?");
5798 if (isAddRecNeverPoison(BEInst, L))
5805const SCEV *ScalarEvolution::createAddRecFromPHI(
PHINode *PN) {
5806 const Loop *
L = LI.getLoopFor(PN->
getParent());
5813 Value *BEValueV =
nullptr, *StartValueV =
nullptr;
5819 }
else if (BEValueV != V) {
5823 }
else if (!StartValueV) {
5825 }
else if (StartValueV != V) {
5826 StartValueV =
nullptr;
5830 if (!BEValueV || !StartValueV)
5833 assert(ValueExprMap.find_as(PN) == ValueExprMap.end() &&
5834 "PHI node already processed?");
5838 if (
auto *S = createSimpleAffineAddRec(PN, BEValueV, StartValueV))
5843 insertValueToMap(PN, SymbolicName);
5847 const SCEV *BEValue =
getSCEV(BEValueV);
5857 unsigned FoundIndex =
Add->getNumOperands();
5858 for (
unsigned i = 0, e =
Add->getNumOperands(); i != e; ++i)
5859 if (
Add->getOperand(i) == SymbolicName)
5860 if (FoundIndex == e) {
5865 if (FoundIndex !=
Add->getNumOperands()) {
5868 for (
unsigned i = 0, e =
Add->getNumOperands(); i != e; ++i)
5869 if (i != FoundIndex)
5870 Ops.push_back(SCEVBackedgeConditionFolder::rewrite(
Add->getOperand(i),
5882 if (BO->Opcode == Instruction::Add && BO->LHS == PN) {
5889 if (
GEP->getOperand(0) == PN) {
5890 GEPNoWrapFlags NW =
GEP->getNoWrapFlags();
5908 const SCEV *StartVal =
getSCEV(StartValueV);
5909 const SCEV *PHISCEV =
getAddRecExpr(StartVal, Accum, L, Flags);
5914 forgetMemoizedResults(SymbolicName);
5915 insertValueToMap(PN, PHISCEV);
5920 proveNoWrapViaConstantRanges(AR)));
5945 const SCEV *Shifted = SCEVShiftRewriter::rewrite(BEValue, L, *
this);
5946 const SCEV *
Start = SCEVInitRewriter::rewrite(Shifted, L, *
this,
false);
5948 isGuaranteedNotToCauseUB(Shifted) &&
::impliesPoison(Shifted, Start)) {
5949 const SCEV *StartVal =
getSCEV(StartValueV);
5950 if (Start == StartVal) {
5954 forgetMemoizedResults(SymbolicName);
5955 insertValueToMap(PN, Shifted);
5965 eraseValueFromMap(PN);
5985 Use &LeftUse =
Merge->getOperandUse(0);
5986 Use &RightUse =
Merge->getOperandUse(1);
6003const SCEV *ScalarEvolution::createNodeFromSelectLikePHI(
PHINode *PN) {
6005 [&](
BasicBlock *BB) {
return DT.isReachableFromEntry(BB); };
6020 assert(IDom &&
"At least the entry block should dominate PN");
6029 return createNodeForSelectOrPHI(PN,
Cond,
LHS,
RHS);
6045ScalarEvolution::createNodeForPHIWithIdenticalOperands(
PHINode *PN) {
6046 BinaryOperator *CommonInst =
nullptr;
6057 CommonInst = IncomingInst;
6064 const SCEV *CommonSCEV =
getSCEV(CommonInst);
6065 bool SCEVExprsIdentical =
6067 [
this, CommonSCEV](
Value *V) { return CommonSCEV == getSCEV(V); });
6068 return SCEVExprsIdentical ? CommonSCEV :
nullptr;
6071const SCEV *ScalarEvolution::createNodeForPHI(
PHINode *PN) {
6072 if (
const SCEV *S = createAddRecFromPHI(PN))
6082 if (
const SCEV *S = createNodeForPHIWithIdenticalOperands(PN))
6085 if (
const SCEV *S = createNodeFromSelectLikePHI(PN))
6094 struct FindClosure {
6095 const SCEV *OperandToFind;
6101 bool canRecurseInto(
SCEVTypes Kind)
const {
6104 return RootKind == Kind || NonSequentialRootKind == Kind ||
6109 : OperandToFind(OperandToFind), RootKind(RootKind),
6110 NonSequentialRootKind(
6114 bool follow(
const SCEV *S) {
6115 Found = S == OperandToFind;
6117 return !isDone() && canRecurseInto(S->
getSCEVType());
6120 bool isDone()
const {
return Found; }
6123 FindClosure FC(OperandToFind, RootKind);
6128std::optional<const SCEV *>
6129ScalarEvolution::createNodeForSelectOrPHIInstWithICmpInstCond(
Type *Ty,
6139 switch (ICI->getPredicate()) {
6153 bool Signed = ICI->isSigned();
6154 const SCEV *LA =
getSCEV(TrueVal);
6162 if (LA == LS &&
RA == RS)
6164 if (LA == RS &&
RA == LS)
6167 auto CoerceOperand = [&](
const SCEV *
Op) ->
const SCEV * {
6168 if (
Op->getType()->isPointerTy()) {
6179 LS = CoerceOperand(LS);
6180 RS = CoerceOperand(RS);
6204 const SCEV *TrueValExpr =
getSCEV(TrueVal);
6205 const SCEV *FalseValExpr =
getSCEV(FalseVal);
6219 X = ZExt->getOperand();
6221 const SCEV *FalseValExpr =
getSCEV(FalseVal);
6232 return std::nullopt;
6235static std::optional<const SCEV *>
6237 const SCEV *TrueExpr,
const SCEV *FalseExpr) {
6241 "Unexpected operands of a select.");
6253 return std::nullopt;
6268static std::optional<const SCEV *>
6272 return std::nullopt;
6275 const auto *SETrue = SE->
getSCEV(TrueVal);
6276 const auto *SEFalse = SE->
getSCEV(FalseVal);
6280const SCEV *ScalarEvolution::createNodeForSelectOrPHIViaUMinSeq(
6282 assert(
Cond->getType()->isIntegerTy(1) &&
"Select condition is not an i1?");
6284 V->getType() ==
TrueVal->getType() &&
6285 "Types of select hands and of the result must match.");
6288 if (!
V->getType()->isIntegerTy(1))
6291 if (std::optional<const SCEV *> S =
6304 return getSCEV(CI->isOne() ? TrueVal : FalseVal);
6308 if (std::optional<const SCEV *> S =
6309 createNodeForSelectOrPHIInstWithICmpInstCond(
I->getType(), ICI,
6315 return createNodeForSelectOrPHIViaUMinSeq(V,
Cond, TrueVal, FalseVal);
6321 assert(
GEP->getSourceElementType()->isSized() &&
6322 "GEP source element type must be sized");
6325 for (
Value *Index :
GEP->indices())
6330APInt ScalarEvolution::getConstantMultipleImpl(
const SCEV *S,
6333 auto GetShiftedByZeros = [
BitWidth](uint32_t TrailingZeros) {
6336 : APInt::getOneBitSet(
BitWidth, TrailingZeros);
6338 auto GetGCDMultiple = [
this, CtxI](
const SCEVNAryExpr *
N) {
6341 for (
unsigned I = 1,
E =
N->getNumOperands();
I <
E && Res != 1; ++
I)
6359 return GetShiftedByZeros(TZ);
6369 return GetShiftedByZeros(TZ);
6373 if (
M->hasNoUnsignedWrap()) {
6376 for (
const SCEV *Operand :
M->operands().drop_front())
6384 for (
const SCEV *Operand :
M->operands())
6386 return GetShiftedByZeros(TZ);
6391 if (
N->hasNoUnsignedWrap())
6392 return GetGCDMultiple(
N);
6395 for (
const SCEV *Operand :
N->operands().drop_front())
6397 return GetShiftedByZeros(TZ);
6414 CtxI = &*F.getEntryBlock().begin();
6420 .countMinTrailingZeros();
6421 return GetShiftedByZeros(Known);
6434 return getConstantMultipleImpl(S, CtxI);
6436 auto I = ConstantMultipleCache.find(S);
6437 if (
I != ConstantMultipleCache.end())
6440 APInt Result = getConstantMultipleImpl(S, CtxI);
6441 auto InsertPair = ConstantMultipleCache.insert({S, Result});
6442 assert(InsertPair.second &&
"Should insert a new key");
6443 return InsertPair.first->second;
6460 if (
MDNode *MD =
I->getMetadata(LLVMContext::MD_range))
6463 if (std::optional<ConstantRange>
Range = CB->getRange())
6467 if (std::optional<ConstantRange>
Range =
A->getRange())
6470 return std::nullopt;
6477 UnsignedRanges.erase(AddRec);
6478 SignedRanges.erase(AddRec);
6479 ConstantMultipleCache.erase(AddRec);
6484getRangeForUnknownRecurrence(
const SCEVUnknown *U) {
6510 Value *Start, *Step;
6517 assert(L && L->getHeader() ==
P->getParent());
6530 case Instruction::AShr:
6531 case Instruction::LShr:
6532 case Instruction::Shl:
6547 KnownStep.getBitWidth() ==
BitWidth);
6550 auto MaxShiftAmt = KnownStep.getMaxValue();
6552 bool Overflow =
false;
6553 auto TotalShift = MaxShiftAmt.umul_ov(TCAP, Overflow);
6560 case Instruction::AShr: {
6568 if (KnownStart.isNonNegative())
6571 KnownStart.getMaxValue() + 1);
6572 if (KnownStart.isNegative())
6575 KnownEnd.getMaxValue() + 1);
6578 case Instruction::LShr: {
6587 KnownStart.getMaxValue() + 1);
6589 case Instruction::Shl: {
6593 if (TotalShift.ult(KnownStart.countMinLeadingZeros()))
6594 return ConstantRange(KnownStart.getMinValue(),
6595 KnownEnd.getMaxValue() + 1);
6603ScalarEvolution::getRangeRefIter(
const SCEV *S,
6604 ScalarEvolution::RangeSignHint SignHint) {
6605 DenseMap<const SCEV *, ConstantRange> &Cache =
6606 SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED ? UnsignedRanges
6609 SmallPtrSet<const SCEV *, 8> Seen;
6613 auto AddToWorklist = [&WorkList, &Seen, &Cache](
const SCEV *Expr) {
6614 if (!Seen.
insert(Expr).second)
6647 for (
unsigned I = 0;
I != WorkList.
size(); ++
I) {
6648 const SCEV *
P = WorkList[
I];
6652 for (
const SCEV *
Op :
P->operands())
6658 if (!PendingPhiRangesIter.insert(
P).second)
6665 if (!WorkList.
empty()) {
6670 getRangeRef(
P, SignHint);
6674 PendingPhiRangesIter.erase(
P);
6678 return getRangeRef(S, SignHint, 0);
6685 const SCEV *S, ScalarEvolution::RangeSignHint SignHint,
unsigned Depth) {
6686 DenseMap<const SCEV *, ConstantRange> &Cache =
6687 SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED ? UnsignedRanges
6695 if (
I != Cache.
end())
6699 return setRange(
C, SignHint, ConstantRange(
C->getAPInt()));
6704 return getRangeRefIter(S, SignHint);
6707 ConstantRange ConservativeResult(
BitWidth,
true);
6708 using OBO = OverflowingBinaryOperator;
6712 if (SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED) {
6716 ConservativeResult =
6723 ConservativeResult = ConstantRange(
6739 ConservativeResult.intersectWith(
X.truncate(
BitWidth), RangeType));
6746 ConservativeResult.intersectWith(
X.zeroExtend(
BitWidth), RangeType));
6753 ConservativeResult.intersectWith(
X.signExtend(
BitWidth), RangeType));
6757 ConstantRange
X = getRangeRef(PtrToInt->
getOperand(), SignHint,
Depth + 1);
6758 return setRange(PtrToInt, SignHint,
X);
6763 const SCEV *URemLHS =
nullptr, *URemRHS =
nullptr;
6764 if (SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED &&
6766 ConstantRange LHSRange = getRangeRef(URemLHS, SignHint,
Depth + 1);
6767 ConstantRange RHSRange = getRangeRef(URemRHS, SignHint,
Depth + 1);
6768 ConservativeResult =
6769 ConservativeResult.intersectWith(LHSRange.
urem(RHSRange), RangeType);
6771 ConstantRange
X = getRangeRef(
Add->getOperand(0), SignHint,
Depth + 1);
6772 unsigned WrapType = OBO::AnyWrap;
6773 if (
Add->hasNoSignedWrap())
6774 WrapType |= OBO::NoSignedWrap;
6775 if (
Add->hasNoUnsignedWrap())
6776 WrapType |= OBO::NoUnsignedWrap;
6778 X =
X.addWithNoWrap(getRangeRef(
Op, SignHint,
Depth + 1), WrapType,
6780 return setRange(
Add, SignHint,
6781 ConservativeResult.intersectWith(
X, RangeType));
6785 ConstantRange
X = getRangeRef(
Mul->getOperand(0), SignHint,
Depth + 1);
6787 X =
X.multiply(getRangeRef(
Op, SignHint,
Depth + 1));
6788 return setRange(
Mul, SignHint,
6789 ConservativeResult.intersectWith(
X, RangeType));
6793 ConstantRange
X = getRangeRef(UDiv->
getLHS(), SignHint,
Depth + 1);
6794 ConstantRange
Y = getRangeRef(UDiv->
getRHS(), SignHint,
Depth + 1);
6795 return setRange(UDiv, SignHint,
6796 ConservativeResult.intersectWith(
X.udiv(
Y), RangeType));
6804 if (!UnsignedMinValue.
isZero())
6805 ConservativeResult = ConservativeResult.intersectWith(
6806 ConstantRange(UnsignedMinValue, APInt(
BitWidth, 0)), RangeType);
6815 bool AllNonNeg =
true;
6816 bool AllNonPos =
true;
6817 for (
unsigned i = 1, e = AddRec->
getNumOperands(); i != e; ++i) {
6824 ConservativeResult = ConservativeResult.intersectWith(
6829 ConservativeResult = ConservativeResult.intersectWith(
6838 const SCEV *MaxBEScev =
6852 auto RangeFromAffine = getRangeForAffineAR(
6854 ConservativeResult =
6855 ConservativeResult.intersectWith(RangeFromAffine, RangeType);
6857 auto RangeFromFactoring = getRangeViaFactoring(
6859 ConservativeResult =
6860 ConservativeResult.intersectWith(RangeFromFactoring, RangeType);
6866 const SCEV *SymbolicMaxBECount =
6871 auto RangeFromAffineNew = getRangeForAffineNoSelfWrappingAR(
6872 AddRec, SymbolicMaxBECount,
BitWidth, SignHint);
6873 ConservativeResult =
6874 ConservativeResult.intersectWith(RangeFromAffineNew, RangeType);
6879 return setRange(AddRec, SignHint, std::move(ConservativeResult));
6889 ID = Intrinsic::umax;
6892 ID = Intrinsic::smax;
6896 ID = Intrinsic::umin;
6899 ID = Intrinsic::smin;
6906 ConstantRange
X = getRangeRef(NAry->getOperand(0), SignHint,
Depth + 1);
6907 for (
unsigned i = 1, e = NAry->getNumOperands(); i != e; ++i)
6909 ID, {
X, getRangeRef(NAry->getOperand(i), SignHint,
Depth + 1)});
6910 return setRange(S, SignHint,
6911 ConservativeResult.intersectWith(
X, RangeType));
6920 ConservativeResult =
6921 ConservativeResult.intersectWith(*MDRange, RangeType);
6926 auto CR = getRangeForUnknownRecurrence(U);
6927 ConservativeResult = ConservativeResult.intersectWith(CR);
6938 if (
U->getType()->isPointerTy()) {
6941 unsigned ptrSize = DL.getPointerTypeSizeInBits(
U->getType());
6942 int ptrIdxDiff = ptrSize -
BitWidth;
6943 if (ptrIdxDiff > 0 && ptrSize >
BitWidth && NS > (
unsigned)ptrIdxDiff)
6956 ConservativeResult = ConservativeResult.intersectWith(
6960 ConservativeResult = ConservativeResult.intersectWith(
6965 if (
U->getType()->isPointerTy() && SignHint == HINT_RANGE_UNSIGNED) {
6968 bool CanBeNull, CanBeFreed;
6969 uint64_t DerefBytes =
6970 V->getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed);
6980 uint64_t
Align =
U->getValue()->getPointerAlignment(DL).value();
6981 uint64_t Rem = MaxVal.
urem(Align);
6986 ConservativeResult = ConservativeResult.intersectWith(
6994 if (PendingPhiRanges.insert(Phi).second) {
6995 ConstantRange RangeFromOps(
BitWidth,
false);
6997 for (
const auto &
Op :
Phi->operands()) {
6999 RangeFromOps = RangeFromOps.unionWith(OpRange);
7001 if (RangeFromOps.isFullSet())
7004 ConservativeResult =
7005 ConservativeResult.intersectWith(RangeFromOps, RangeType);
7006 bool Erased = PendingPhiRanges.erase(Phi);
7007 assert(Erased &&
"Failed to erase Phi properly?");
7014 if (
II->getIntrinsicID() == Intrinsic::vscale) {
7016 ConservativeResult = ConservativeResult.difference(Disallowed);
7019 return setRange(U, SignHint, std::move(ConservativeResult));
7025 return setRange(S, SignHint, std::move(ConservativeResult));
7034 const APInt &MaxBECount,
7041 if (Step == 0 || MaxBECount == 0)
7048 return ConstantRange::getFull(
BitWidth);
7064 return ConstantRange::getFull(
BitWidth);
7076 APInt MovedBoundary = Descending ? (StartLower - std::move(
Offset))
7077 : (StartUpper + std::move(
Offset));
7082 if (StartRange.
contains(MovedBoundary))
7083 return ConstantRange::getFull(
BitWidth);
7086 Descending ? std::move(MovedBoundary) : std::move(StartLower);
7088 Descending ? std::move(StartUpper) : std::move(MovedBoundary);
7097 const APInt &MaxBECount) {
7101 "mismatched bit widths");
7110 StepSRange.
getSignedMin(), StartSRange, MaxBECount,
true);
7112 StartSRange, MaxBECount,
7124ConstantRange ScalarEvolution::getRangeForAffineNoSelfWrappingAR(
7126 ScalarEvolution::RangeSignHint SignHint) {
7127 assert(AddRec->
isAffine() &&
"Non-affine AddRecs are not suppored!\n");
7129 "This only works for non-self-wrapping AddRecs!");
7130 const bool IsSigned = SignHint == HINT_RANGE_SIGNED;
7134 return ConstantRange::getFull(
BitWidth);
7142 return ConstantRange::getFull(
BitWidth);
7146 const SCEV *MaxItersWithoutWrap =
getUDivExpr(RangeWidth, StepAbs);
7148 MaxItersWithoutWrap))
7149 return ConstantRange::getFull(
BitWidth);
7170 ConstantRange StartRange = getRangeRef(Start, SignHint);
7171 ConstantRange EndRange = getRangeRef(End, SignHint);
7172 ConstantRange RangeBetween = StartRange.
unionWith(EndRange);
7176 return RangeBetween;
7181 return ConstantRange::getFull(
BitWidth);
7184 isKnownPredicateViaConstantRanges(LEPred, Start, End))
7185 return RangeBetween;
7187 isKnownPredicateViaConstantRanges(GEPred, Start, End))
7188 return RangeBetween;
7189 return ConstantRange::getFull(
BitWidth);
7194 const APInt &MaxBECount) {
7201 "mismatched bit widths");
7203 struct SelectPattern {
7204 Value *Condition =
nullptr;
7208 explicit SelectPattern(ScalarEvolution &SE,
unsigned BitWidth,
7210 std::optional<unsigned> CastOp;
7224 CastOp = SCast->getSCEVType();
7225 S = SCast->getOperand();
7228 using namespace llvm::PatternMatch;
7235 Condition =
nullptr;
7267 bool isRecognized() {
return Condition !=
nullptr; }
7270 SelectPattern StartPattern(*
this,
BitWidth, Start);
7271 if (!StartPattern.isRecognized())
7272 return ConstantRange::getFull(
BitWidth);
7274 SelectPattern StepPattern(*
this,
BitWidth, Step);
7275 if (!StepPattern.isRecognized())
7276 return ConstantRange::getFull(
BitWidth);
7278 if (StartPattern.Condition != StepPattern.Condition) {
7282 return ConstantRange::getFull(
BitWidth);
7293 const SCEV *TrueStart = this->
getConstant(StartPattern.TrueValue);
7294 const SCEV *TrueStep = this->
getConstant(StepPattern.TrueValue);
7295 const SCEV *FalseStart = this->
getConstant(StartPattern.FalseValue);
7296 const SCEV *FalseStep = this->
getConstant(StepPattern.FalseValue);
7298 ConstantRange TrueRange =
7299 this->getRangeForAffineAR(TrueStart, TrueStep, MaxBECount);
7300 ConstantRange FalseRange =
7301 this->getRangeForAffineAR(FalseStart, FalseStep, MaxBECount);
7323ScalarEvolution::getNonTrivialDefiningScopeBound(
const SCEV *S) {
7337 SmallPtrSet<const SCEV *, 16> Visited;
7339 auto pushOp = [&](
const SCEV *S) {
7340 if (!Visited.
insert(S).second)
7343 if (Visited.
size() > 30) {
7350 for (
const auto *S :
Ops)
7354 while (!Worklist.
empty()) {
7356 if (
auto *DefI = getNonTrivialDefiningScopeBound(S)) {
7357 if (!Bound || DT.dominates(Bound, DefI))
7364 return Bound ? Bound : &*F.getEntryBlock().begin();
7370 return getDefiningScopeBound(
Ops, Discard);
7373bool ScalarEvolution::isGuaranteedToTransferExecutionTo(
const Instruction *
A,
7375 if (
A->getParent() ==
B->getParent() &&
7380 auto *BLoop = LI.getLoopFor(
B->getParent());
7381 if (BLoop && BLoop->getHeader() ==
B->getParent() &&
7382 BLoop->getLoopPreheader() ==
A->getParent() &&
7384 A->getParent()->end()) &&
7391bool ScalarEvolution::isGuaranteedNotToBePoison(
const SCEV *
Op) {
7392 SCEVPoisonCollector PC(
true);
7394 return PC.MaybePoison.empty();
7397bool ScalarEvolution::isGuaranteedNotToCauseUB(
const SCEV *
Op) {
7403 return M && (!
isKnownNonZero(Op1) || !isGuaranteedNotToBePoison(Op1));
7407bool ScalarEvolution::isSCEVExprNeverPoison(
const Instruction *
I) {
7424 for (
const Use &
Op :
I->operands()) {
7430 auto *DefI = getDefiningScopeBound(SCEVOps);
7431 return isGuaranteedToTransferExecutionTo(DefI,
I);
7434bool ScalarEvolution::isAddRecNeverPoison(
const Instruction *
I,
const Loop *L) {
7436 if (isSCEVExprNeverPoison(
I))
7447 auto *ExitingBB =
L->getExitingBlock();
7451 SmallPtrSet<const Value *, 16> KnownPoison;
7460 while (!Worklist.
empty()) {
7463 for (
const Use &U :
Poison->uses()) {
7466 DT.dominates(PoisonUser->
getParent(), ExitingBB))
7470 if (KnownPoison.
insert(PoisonUser).second)
7478ScalarEvolution::LoopProperties
7479ScalarEvolution::getLoopProperties(
const Loop *L) {
7480 using LoopProperties = ScalarEvolution::LoopProperties;
7482 auto Itr = LoopPropertiesCache.find(L);
7483 if (Itr == LoopPropertiesCache.end()) {
7486 return !
SI->isSimple();
7496 return I->mayWriteToMemory();
7499 LoopProperties LP = {
true,
7502 for (
auto *BB :
L->getBlocks())
7503 for (
auto &
I : *BB) {
7505 LP.HasNoAbnormalExits =
false;
7506 if (HasSideEffects(&
I))
7507 LP.HasNoSideEffects =
false;
7508 if (!LP.HasNoAbnormalExits && !LP.HasNoSideEffects)
7512 auto InsertPair = LoopPropertiesCache.insert({
L, LP});
7513 assert(InsertPair.second &&
"We just checked!");
7514 Itr = InsertPair.first;
7527const SCEV *ScalarEvolution::createSCEVIter(
Value *V) {
7533 Stack.emplace_back(V,
true);
7534 Stack.emplace_back(V,
false);
7535 while (!Stack.empty()) {
7536 auto E = Stack.pop_back_val();
7537 Value *CurV = E.getPointer();
7543 const SCEV *CreatedSCEV =
nullptr;
7546 CreatedSCEV = createSCEV(CurV);
7551 CreatedSCEV = getOperandsToCreate(CurV,
Ops);
7555 insertValueToMap(CurV, CreatedSCEV);
7559 Stack.emplace_back(CurV,
true);
7561 Stack.emplace_back(
Op,
false);
7578 if (!DT.isReachableFromEntry(
I->getParent()))
7591 switch (BO->Opcode) {
7592 case Instruction::Add:
7593 case Instruction::Mul: {
7600 Ops.push_back(BO->
Op);
7604 Ops.push_back(BO->RHS);
7608 (BO->Opcode == Instruction::Add &&
7609 (NewBO->Opcode != Instruction::Add &&
7610 NewBO->Opcode != Instruction::Sub)) ||
7611 (BO->Opcode == Instruction::Mul &&
7612 NewBO->Opcode != Instruction::Mul)) {
7613 Ops.push_back(BO->LHS);
7618 if (BO->
Op && (BO->IsNSW || BO->IsNUW)) {
7621 Ops.push_back(BO->LHS);
7629 case Instruction::Sub:
7630 case Instruction::UDiv:
7631 case Instruction::URem:
7633 case Instruction::AShr:
7634 case Instruction::Shl:
7635 case Instruction::Xor:
7639 case Instruction::And:
7640 case Instruction::Or:
7644 case Instruction::LShr:
7651 Ops.push_back(BO->LHS);
7652 Ops.push_back(BO->RHS);
7656 switch (
U->getOpcode()) {
7657 case Instruction::Trunc:
7658 case Instruction::ZExt:
7659 case Instruction::SExt:
7660 case Instruction::PtrToInt:
7661 Ops.push_back(
U->getOperand(0));
7664 case Instruction::BitCast:
7666 Ops.push_back(
U->getOperand(0));
7671 case Instruction::SDiv:
7672 case Instruction::SRem:
7673 Ops.push_back(
U->getOperand(0));
7674 Ops.push_back(
U->getOperand(1));
7677 case Instruction::GetElementPtr:
7679 "GEP source element type must be sized");
7683 case Instruction::IntToPtr:
7686 case Instruction::PHI:
7690 case Instruction::Select: {
7692 auto CanSimplifyToUnknown = [
this,
U]() {
7710 if (CanSimplifyToUnknown())
7717 case Instruction::Call:
7718 case Instruction::Invoke:
7725 switch (
II->getIntrinsicID()) {
7726 case Intrinsic::abs:
7727 Ops.push_back(
II->getArgOperand(0));
7729 case Intrinsic::umax:
7730 case Intrinsic::umin:
7731 case Intrinsic::smax:
7732 case Intrinsic::smin:
7733 case Intrinsic::usub_sat:
7734 case Intrinsic::uadd_sat:
7735 Ops.push_back(
II->getArgOperand(0));
7736 Ops.push_back(
II->getArgOperand(1));
7738 case Intrinsic::start_loop_iterations:
7739 case Intrinsic::annotation:
7740 case Intrinsic::ptr_annotation:
7741 Ops.push_back(
II->getArgOperand(0));
7753const SCEV *ScalarEvolution::createSCEV(
Value *V) {
7762 if (!DT.isReachableFromEntry(
I->getParent()))
7777 switch (BO->Opcode) {
7778 case Instruction::Add: {
7804 if (BO->Opcode == Instruction::Sub)
7812 if (BO->Opcode == Instruction::Sub)
7819 if (!NewBO || (NewBO->Opcode != Instruction::Add &&
7820 NewBO->Opcode != Instruction::Sub)) {
7830 case Instruction::Mul: {
7851 if (!NewBO || NewBO->Opcode != Instruction::Mul) {
7860 case Instruction::UDiv:
7864 case Instruction::URem:
7868 case Instruction::Sub: {
7871 Flags = getNoWrapFlagsFromUB(BO->
Op);
7876 case Instruction::And:
7882 if (CI->isMinusOne())
7884 const APInt &
A = CI->getValue();
7890 unsigned LZ =
A.countl_zero();
7891 unsigned TZ =
A.countr_zero();
7896 APInt EffectiveMask =
7898 if ((LZ != 0 || TZ != 0) && !((~
A & ~Known.
Zero) & EffectiveMask)) {
7901 const SCEV *ShiftedLHS =
nullptr;
7905 unsigned MulZeros = OpC->getAPInt().countr_zero();
7906 unsigned GCD = std::min(MulZeros, TZ);
7911 auto *NewMul =
getMulExpr(MulOps, LHSMul->getNoWrapFlags());
7933 case Instruction::Or:
7942 case Instruction::Xor:
7945 if (CI->isMinusOne())
7954 if (LBO->getOpcode() == Instruction::And &&
7955 LCI->getValue() == CI->getValue())
7956 if (
const SCEVZeroExtendExpr *Z =
7959 const SCEV *Z0 =
Z->getOperand();
7966 if (CI->getValue().isMask(Z0TySize))
7972 APInt Trunc = CI->getValue().trunc(Z0TySize);
7981 case Instruction::Shl:
7999 auto MulFlags = getNoWrapFlagsFromUB(BO->
Op);
8007 ConstantInt *
X = ConstantInt::get(
8013 case Instruction::AShr:
8035 const SCEV *AddTruncateExpr =
nullptr;
8036 ConstantInt *ShlAmtCI =
nullptr;
8037 const SCEV *AddConstant =
nullptr;
8039 if (L &&
L->getOpcode() == Instruction::Add) {
8047 if (LShift && LShift->
getOpcode() == Instruction::Shl) {
8054 APInt AddOperand = AddOperandCI->
getValue().
ashr(AShrAmt);
8062 }
else if (L &&
L->getOpcode() == Instruction::Shl) {
8067 const SCEV *ShlOp0SCEV =
getSCEV(
L->getOperand(0));
8072 if (AddTruncateExpr && ShlAmtCI) {
8084 const APInt &ShlAmt = ShlAmtCI->
getValue();
8088 const SCEV *CompositeExpr =
8090 if (
L->getOpcode() != Instruction::Shl)
8091 CompositeExpr =
getAddExpr(CompositeExpr, AddConstant);
8100 switch (
U->getOpcode()) {
8101 case Instruction::Trunc:
8104 case Instruction::ZExt:
8107 case Instruction::SExt:
8117 if (BO->Opcode == Instruction::Sub && BO->IsNSW) {
8118 Type *Ty =
U->getType();
8126 case Instruction::BitCast:
8132 case Instruction::PtrToInt: {
8135 Type *DstIntTy =
U->getType();
8143 case Instruction::IntToPtr:
8147 case Instruction::SDiv:
8154 case Instruction::SRem:
8161 case Instruction::GetElementPtr:
8164 case Instruction::PHI:
8167 case Instruction::Select:
8168 return createNodeForSelectOrPHI(U,
U->getOperand(0),
U->getOperand(1),
8171 case Instruction::Call:
8172 case Instruction::Invoke:
8177 switch (
II->getIntrinsicID()) {
8178 case Intrinsic::abs:
8182 case Intrinsic::umax:
8186 case Intrinsic::umin:
8190 case Intrinsic::smax:
8194 case Intrinsic::smin:
8198 case Intrinsic::usub_sat: {
8199 const SCEV *
X =
getSCEV(
II->getArgOperand(0));
8200 const SCEV *
Y =
getSCEV(
II->getArgOperand(1));
8204 case Intrinsic::uadd_sat: {
8205 const SCEV *
X =
getSCEV(
II->getArgOperand(0));
8206 const SCEV *
Y =
getSCEV(
II->getArgOperand(1));
8210 case Intrinsic::start_loop_iterations:
8211 case Intrinsic::annotation:
8212 case Intrinsic::ptr_annotation:
8216 case Intrinsic::vscale:
8236 auto *ExitCountType = ExitCount->
getType();
8237 assert(ExitCountType->isIntegerTy());
8239 1 + ExitCountType->getScalarSizeInBits());
8252 auto CanAddOneWithoutOverflow = [&]() {
8254 getRangeRef(ExitCount, RangeSignHint::HINT_RANGE_UNSIGNED);
8265 if (EvalSize > ExitCountSize && CanAddOneWithoutOverflow())
8295 assert(ExitingBlock &&
"Must pass a non-null exiting block!");
8296 assert(L->isLoopExiting(ExitingBlock) &&
8297 "Exiting block must actually branch out of the loop!");
8306 const auto *MaxExitCount =
8314 L->getExitingBlocks(ExitingBlocks);
8316 std::optional<unsigned> Res;
8317 for (
auto *ExitingBB : ExitingBlocks) {
8321 Res = std::gcd(*Res, Multiple);
8323 return Res.value_or(1);
8327 const SCEV *ExitCount) {
8357 assert(ExitingBlock &&
"Must pass a non-null exiting block!");
8358 assert(L->isLoopExiting(ExitingBlock) &&
8359 "Exiting block must actually branch out of the loop!");
8369 return getBackedgeTakenInfo(L).getExact(ExitingBlock,
this);
8371 return getBackedgeTakenInfo(L).getSymbolicMax(ExitingBlock,
this);
8373 return getBackedgeTakenInfo(L).getConstantMax(ExitingBlock,
this);
8383 return getPredicatedBackedgeTakenInfo(L).getExact(ExitingBlock,
this,
8386 return getPredicatedBackedgeTakenInfo(L).getSymbolicMax(ExitingBlock,
this,
8389 return getPredicatedBackedgeTakenInfo(L).getConstantMax(ExitingBlock,
this,
8397 return getPredicatedBackedgeTakenInfo(L).getExact(L,
this, &Preds);
8404 return getBackedgeTakenInfo(L).getExact(L,
this);
8406 return getBackedgeTakenInfo(L).getConstantMax(
this);
8408 return getBackedgeTakenInfo(L).getSymbolicMax(L,
this);
8415 return getPredicatedBackedgeTakenInfo(L).getSymbolicMax(L,
this, &Preds);
8420 return getPredicatedBackedgeTakenInfo(L).getConstantMax(
this, &Preds);
8424 return getBackedgeTakenInfo(L).isConstantMaxOrZero(
this);
8434 for (
PHINode &PN : Header->phis())
8435 if (Visited.
insert(&PN).second)
8439ScalarEvolution::BackedgeTakenInfo &
8440ScalarEvolution::getPredicatedBackedgeTakenInfo(
const Loop *L) {
8441 auto &BTI = getBackedgeTakenInfo(L);
8442 if (BTI.hasFullInfo())
8445 auto Pair = PredicatedBackedgeTakenCounts.try_emplace(L);
8448 return Pair.first->second;
8450 BackedgeTakenInfo
Result =
8451 computeBackedgeTakenCount(L,
true);
8453 return PredicatedBackedgeTakenCounts.find(L)->second = std::move(Result);
8456ScalarEvolution::BackedgeTakenInfo &
8457ScalarEvolution::getBackedgeTakenInfo(
const Loop *L) {
8463 std::pair<DenseMap<const Loop *, BackedgeTakenInfo>::iterator,
bool> Pair =
8464 BackedgeTakenCounts.try_emplace(L);
8466 return Pair.first->second;
8471 BackedgeTakenInfo
Result = computeBackedgeTakenCount(L);
8478 if (
Result.hasAnyInfo()) {
8481 auto LoopUsersIt = LoopUsers.find(L);
8482 if (LoopUsersIt != LoopUsers.end())
8484 forgetMemoizedResults(ToForget);
8487 for (PHINode &PN :
L->getHeader()->phis())
8488 ConstantEvolutionLoopExitValue.erase(&PN);
8496 return BackedgeTakenCounts.find(L)->second = std::move(Result);
8505 BackedgeTakenCounts.clear();
8506 PredicatedBackedgeTakenCounts.clear();
8507 BECountUsers.clear();
8508 LoopPropertiesCache.clear();
8509 ConstantEvolutionLoopExitValue.clear();
8510 ValueExprMap.clear();
8511 ValuesAtScopes.clear();
8512 ValuesAtScopesUsers.clear();
8513 LoopDispositions.clear();
8514 BlockDispositions.clear();
8515 UnsignedRanges.clear();
8516 SignedRanges.clear();
8517 ExprValueMap.clear();
8519 ConstantMultipleCache.clear();
8520 PredicatedSCEVRewrites.clear();
8522 FoldCacheUser.clear();
8524void ScalarEvolution::visitAndClearUsers(
8528 while (!Worklist.
empty()) {
8535 if (It != ValueExprMap.
end()) {
8536 eraseValueFromMap(It->first);
8539 ConstantEvolutionLoopExitValue.erase(PN);
8553 while (!LoopWorklist.
empty()) {
8557 forgetBackedgeTakenCounts(CurrL,
false);
8558 forgetBackedgeTakenCounts(CurrL,
true);
8561 for (
auto I = PredicatedSCEVRewrites.begin();
8562 I != PredicatedSCEVRewrites.end();) {
8563 std::pair<const SCEV *, const Loop *> Entry =
I->first;
8564 if (Entry.second == CurrL)
8565 PredicatedSCEVRewrites.erase(
I++);
8570 auto LoopUsersItr = LoopUsers.find(CurrL);
8571 if (LoopUsersItr != LoopUsers.end())
8576 visitAndClearUsers(Worklist, Visited, ToForget);
8578 LoopPropertiesCache.erase(CurrL);
8581 LoopWorklist.
append(CurrL->begin(), CurrL->end());
8583 forgetMemoizedResults(ToForget);
8600 visitAndClearUsers(Worklist, Visited, ToForget);
8602 forgetMemoizedResults(ToForget);
8614 struct InvalidationRootCollector {
8618 InvalidationRootCollector(
Loop *L) : L(L) {}
8620 bool follow(
const SCEV *S) {
8626 if (L->contains(AddRec->
getLoop()))
8631 bool isDone()
const {
return false; }
8634 InvalidationRootCollector
C(L);
8636 forgetMemoizedResults(
C.Roots);
8649 BlockDispositions.clear();
8650 LoopDispositions.clear();
8667 while (!Worklist.
empty()) {
8669 bool LoopDispoRemoved = LoopDispositions.erase(Curr);
8670 bool BlockDispoRemoved = BlockDispositions.erase(Curr);
8671 if (!LoopDispoRemoved && !BlockDispoRemoved)
8673 auto Users = SCEVUsers.find(Curr);
8674 if (
Users != SCEVUsers.end())
8687const SCEV *ScalarEvolution::BackedgeTakenInfo::getExact(
8691 if (!isComplete() || ExitNotTaken.
empty())
8702 for (
const auto &ENT : ExitNotTaken) {
8703 const SCEV *BECount = ENT.ExactNotTaken;
8706 "We should only have known counts for exiting blocks that dominate "
8709 Ops.push_back(BECount);
8714 assert((Preds || ENT.hasAlwaysTruePredicate()) &&
8715 "Predicate should be always true!");
8724const ScalarEvolution::ExitNotTakenInfo *
8725ScalarEvolution::BackedgeTakenInfo::getExitNotTaken(
8726 const BasicBlock *ExitingBlock,
8727 SmallVectorImpl<const SCEVPredicate *> *Predicates)
const {
8728 for (
const auto &ENT : ExitNotTaken)
8729 if (ENT.ExitingBlock == ExitingBlock) {
8730 if (ENT.hasAlwaysTruePredicate())
8732 else if (Predicates) {
8742const SCEV *ScalarEvolution::BackedgeTakenInfo::getConstantMax(
8744 SmallVectorImpl<const SCEVPredicate *> *Predicates)
const {
8745 if (!getConstantMax())
8748 for (
const auto &ENT : ExitNotTaken)
8749 if (!ENT.hasAlwaysTruePredicate()) {
8757 "No point in having a non-constant max backedge taken count!");
8758 return getConstantMax();
8761const SCEV *ScalarEvolution::BackedgeTakenInfo::getSymbolicMax(
8763 SmallVectorImpl<const SCEVPredicate *> *Predicates) {
8771 for (
const auto &ENT : ExitNotTaken) {
8772 const SCEV *ExitCount = ENT.SymbolicMaxNotTaken;
8775 "We should only have known counts for exiting blocks that "
8781 assert((Predicates || ENT.hasAlwaysTruePredicate()) &&
8782 "Predicate should be always true!");
8785 if (ExitCounts.
empty())
8794bool ScalarEvolution::BackedgeTakenInfo::isConstantMaxOrZero(
8796 auto PredicateNotAlwaysTrue = [](
const ExitNotTakenInfo &ENT) {
8797 return !ENT.hasAlwaysTruePredicate();
8799 return MaxOrZero && !
any_of(ExitNotTaken, PredicateNotAlwaysTrue);
8815 this->ExactNotTaken = E = ConstantMaxNotTaken;
8816 this->SymbolicMaxNotTaken = SymbolicMaxNotTaken = ConstantMaxNotTaken;
8821 "Exact is not allowed to be less precise than Constant Max");
8824 "Exact is not allowed to be less precise than Symbolic Max");
8827 "Symbolic Max is not allowed to be less precise than Constant Max");
8830 "No point in having a non-constant max backedge taken count!");
8832 for (
const auto PredList : PredLists)
8833 for (
const auto *
P : PredList) {
8841 "Backedge count should be int");
8844 "Max backedge count should be int");
8857ScalarEvolution::BackedgeTakenInfo::BackedgeTakenInfo(
8859 bool IsComplete,
const SCEV *ConstantMax,
bool MaxOrZero)
8860 : ConstantMax(ConstantMax), IsComplete(IsComplete), MaxOrZero(MaxOrZero) {
8861 using EdgeExitInfo = ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo;
8863 ExitNotTaken.reserve(ExitCounts.
size());
8864 std::transform(ExitCounts.
begin(), ExitCounts.
end(),
8865 std::back_inserter(ExitNotTaken),
8866 [&](
const EdgeExitInfo &EEI) {
8867 BasicBlock *ExitBB = EEI.first;
8868 const ExitLimit &EL = EEI.second;
8869 return ExitNotTakenInfo(ExitBB, EL.ExactNotTaken,
8870 EL.ConstantMaxNotTaken, EL.SymbolicMaxNotTaken,
8875 "No point in having a non-constant max backedge taken count!");
8879ScalarEvolution::BackedgeTakenInfo
8880ScalarEvolution::computeBackedgeTakenCount(
const Loop *L,
8881 bool AllowPredicates) {
8883 L->getExitingBlocks(ExitingBlocks);
8885 using EdgeExitInfo = ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo;
8888 bool CouldComputeBECount =
true;
8890 const SCEV *MustExitMaxBECount =
nullptr;
8891 const SCEV *MayExitMaxBECount =
nullptr;
8892 bool MustExitMaxOrZero =
false;
8893 bool IsOnlyExit = ExitingBlocks.
size() == 1;
8905 if (ExitIfTrue == CI->
isZero())
8909 ExitLimit EL = computeExitLimit(L, ExitBB, IsOnlyExit, AllowPredicates);
8911 assert((AllowPredicates || EL.Predicates.empty()) &&
8912 "Predicated exit limit when predicates are not allowed!");
8917 ++NumExitCountsComputed;
8921 CouldComputeBECount =
false;
8928 "Exact is known but symbolic isn't?");
8929 ++NumExitCountsNotComputed;
8944 DT.dominates(ExitBB, Latch)) {
8945 if (!MustExitMaxBECount) {
8946 MustExitMaxBECount = EL.ConstantMaxNotTaken;
8947 MustExitMaxOrZero = EL.MaxOrZero;
8950 EL.ConstantMaxNotTaken);
8954 MayExitMaxBECount = EL.ConstantMaxNotTaken;
8957 EL.ConstantMaxNotTaken);
8961 const SCEV *MaxBECount = MustExitMaxBECount ? MustExitMaxBECount :
8965 bool MaxOrZero = (MustExitMaxOrZero && ExitingBlocks.size() == 1);
8971 for (
const auto &Pair : ExitCounts) {
8973 BECountUsers[Pair.second.ExactNotTaken].insert({
L, AllowPredicates});
8975 BECountUsers[Pair.second.SymbolicMaxNotTaken].insert(
8976 {
L, AllowPredicates});
8978 return BackedgeTakenInfo(std::move(ExitCounts), CouldComputeBECount,
8979 MaxBECount, MaxOrZero);
8982ScalarEvolution::ExitLimit
8983ScalarEvolution::computeExitLimit(
const Loop *L, BasicBlock *ExitingBlock,
8984 bool IsOnlyExit,
bool AllowPredicates) {
8985 assert(
L->contains(ExitingBlock) &&
"Exit count for non-loop block?");
8989 if (!Latch || !DT.dominates(ExitingBlock, Latch))
8997 "It should have one successor in loop and one exit block!");
9008 if (!
L->contains(SBB)) {
9013 assert(Exit &&
"Exiting block must have at least one exit");
9014 return computeExitLimitFromSingleExitSwitch(
9015 L, SI, Exit, IsOnlyExit);
9022 const Loop *L,
Value *ExitCond,
bool ExitIfTrue,
bool ControlsOnlyExit,
9023 bool AllowPredicates) {
9024 ScalarEvolution::ExitLimitCacheTy Cache(L, ExitIfTrue, AllowPredicates);
9025 return computeExitLimitFromCondCached(Cache, L, ExitCond, ExitIfTrue,
9026 ControlsOnlyExit, AllowPredicates);
9029std::optional<ScalarEvolution::ExitLimit>
9030ScalarEvolution::ExitLimitCache::find(
const Loop *L,
Value *ExitCond,
9031 bool ExitIfTrue,
bool ControlsOnlyExit,
9032 bool AllowPredicates) {
9034 (void)this->ExitIfTrue;
9035 (void)this->AllowPredicates;
9037 assert(this->L == L && this->ExitIfTrue == ExitIfTrue &&
9038 this->AllowPredicates == AllowPredicates &&
9039 "Variance in assumed invariant key components!");
9040 auto Itr = TripCountMap.find({ExitCond, ControlsOnlyExit});
9041 if (Itr == TripCountMap.end())
9042 return std::nullopt;
9046void ScalarEvolution::ExitLimitCache::insert(
const Loop *L,
Value *ExitCond,
9048 bool ControlsOnlyExit,
9049 bool AllowPredicates,
9051 assert(this->L == L && this->ExitIfTrue == ExitIfTrue &&
9052 this->AllowPredicates == AllowPredicates &&
9053 "Variance in assumed invariant key components!");
9055 auto InsertResult = TripCountMap.insert({{ExitCond, ControlsOnlyExit}, EL});
9056 assert(InsertResult.second &&
"Expected successful insertion!");
9061ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCondCached(
9062 ExitLimitCacheTy &Cache,
const Loop *L,
Value *ExitCond,
bool ExitIfTrue,
9063 bool ControlsOnlyExit,
bool AllowPredicates) {
9065 if (
auto MaybeEL = Cache.find(L, ExitCond, ExitIfTrue, ControlsOnlyExit,
9069 ExitLimit EL = computeExitLimitFromCondImpl(
9070 Cache, L, ExitCond, ExitIfTrue, ControlsOnlyExit, AllowPredicates);
9071 Cache.insert(L, ExitCond, ExitIfTrue, ControlsOnlyExit, AllowPredicates, EL);
9075ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCondImpl(
9076 ExitLimitCacheTy &Cache,
const Loop *L,
Value *ExitCond,
bool ExitIfTrue,
9077 bool ControlsOnlyExit,
bool AllowPredicates) {
9079 if (
auto LimitFromBinOp = computeExitLimitFromCondFromBinOp(
9080 Cache, L, ExitCond, ExitIfTrue, ControlsOnlyExit, AllowPredicates))
9081 return *LimitFromBinOp;
9087 computeExitLimitFromICmp(L, ExitCondICmp, ExitIfTrue, ControlsOnlyExit);
9088 if (EL.hasFullInfo() || !AllowPredicates)
9092 return computeExitLimitFromICmp(L, ExitCondICmp, ExitIfTrue,
9112 const WithOverflowInst *WO;
9127 auto EL = computeExitLimitFromICmp(L, Pred,
LHS,
getConstant(NewRHSC),
9128 ControlsOnlyExit, AllowPredicates);
9129 if (EL.hasAnyInfo())
9134 return computeExitCountExhaustively(L, ExitCond, ExitIfTrue);
9137std::optional<ScalarEvolution::ExitLimit>
9138ScalarEvolution::computeExitLimitFromCondFromBinOp(
9139 ExitLimitCacheTy &Cache,
const Loop *L,
Value *ExitCond,
bool ExitIfTrue,
9140 bool ControlsOnlyExit,
bool AllowPredicates) {
9149 return std::nullopt;
9154 bool EitherMayExit = IsAnd ^ ExitIfTrue;
9155 ExitLimit EL0 = computeExitLimitFromCondCached(
9156 Cache, L, Op0, ExitIfTrue, ControlsOnlyExit && !EitherMayExit,
9158 ExitLimit EL1 = computeExitLimitFromCondCached(
9159 Cache, L, Op1, ExitIfTrue, ControlsOnlyExit && !EitherMayExit,
9163 const Constant *NeutralElement = ConstantInt::get(ExitCond->
getType(), IsAnd);
9165 return Op1 == NeutralElement ? EL0 : EL1;
9167 return Op0 == NeutralElement ? EL1 : EL0;
9172 if (EitherMayExit) {
9182 ConstantMaxBECount = EL1.ConstantMaxNotTaken;
9184 ConstantMaxBECount = EL0.ConstantMaxNotTaken;
9187 EL1.ConstantMaxNotTaken);
9189 SymbolicMaxBECount = EL1.SymbolicMaxNotTaken;
9191 SymbolicMaxBECount = EL0.SymbolicMaxNotTaken;
9194 EL0.SymbolicMaxNotTaken, EL1.SymbolicMaxNotTaken, UseSequentialUMin);
9198 if (EL0.ExactNotTaken == EL1.ExactNotTaken)
9199 BECount = EL0.ExactNotTaken;
9212 SymbolicMaxBECount =
9214 return ExitLimit(BECount, ConstantMaxBECount, SymbolicMaxBECount,
false,
9218ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromICmp(
9219 const Loop *L, ICmpInst *ExitCond,
bool ExitIfTrue,
bool ControlsOnlyExit,
9220 bool AllowPredicates) {
9232 ExitLimit EL = computeExitLimitFromICmp(L, Pred,
LHS,
RHS, ControlsOnlyExit,
9234 if (EL.hasAnyInfo())
9237 auto *ExhaustiveCount =
9238 computeExitCountExhaustively(L, ExitCond, ExitIfTrue);
9241 return ExhaustiveCount;
9243 return computeShiftCompareExitLimit(ExitCond->
getOperand(0),
9246ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromICmp(
9247 const Loop *L, CmpPredicate Pred,
const SCEV *
LHS,
const SCEV *
RHS,
9248 bool ControlsOnlyExit,
bool AllowPredicates) {
9273 ConstantRange CompRange =
9289 auto *InnerLHS =
LHS;
9291 InnerLHS = ZExt->getOperand();
9338 if (EL.hasAnyInfo())
9355 if (EL.hasAnyInfo())
return EL;
9387 ExitLimit EL = howManyLessThans(
LHS,
RHS, L, IsSigned, ControlsOnlyExit,
9389 if (EL.hasAnyInfo())
9405 ExitLimit EL = howManyGreaterThans(
LHS,
RHS, L, IsSigned, ControlsOnlyExit,
9407 if (EL.hasAnyInfo())
9418ScalarEvolution::ExitLimit
9419ScalarEvolution::computeExitLimitFromSingleExitSwitch(
const Loop *L,
9421 BasicBlock *ExitingBlock,
9422 bool ControlsOnlyExit) {
9423 assert(!
L->contains(ExitingBlock) &&
"Not an exiting block!");
9426 if (
Switch->getDefaultDest() == ExitingBlock)
9430 "Default case must not exit the loop!");
9436 if (EL.hasAnyInfo())
9448 "Evaluation of SCEV at constant didn't fold correctly?");
9452ScalarEvolution::ExitLimit ScalarEvolution::computeShiftCompareExitLimit(
9462 const BasicBlock *Predecessor =
L->getLoopPredecessor();
9468 auto MatchPositiveShift =
9471 using namespace PatternMatch;
9473 ConstantInt *ShiftAmt;
9475 OutOpCode = Instruction::LShr;
9477 OutOpCode = Instruction::AShr;
9479 OutOpCode = Instruction::Shl;
9494 auto MatchShiftRecurrence =
9496 std::optional<Instruction::BinaryOps> PostShiftOpCode;
9511 if (MatchPositiveShift(
LHS, V, OpC)) {
9512 PostShiftOpCode = OpC;
9518 if (!PNOut || PNOut->getParent() !=
L->getHeader())
9521 Value *BEValue = PNOut->getIncomingValueForBlock(Latch);
9527 MatchPositiveShift(BEValue, OpLHS, OpCodeOut) &&
9534 (!PostShiftOpCode || *PostShiftOpCode == OpCodeOut);
9539 if (!MatchShiftRecurrence(
LHS, PN, OpCode))
9551 ConstantInt *StableValue =
nullptr;
9556 case Instruction::AShr: {
9564 StableValue = ConstantInt::get(Ty, 0);
9566 StableValue = ConstantInt::get(Ty, -1,
true);
9572 case Instruction::LShr:
9573 case Instruction::Shl:
9583 "Otherwise cannot be an operand to a branch instruction");
9585 if (
Result->isZeroValue()) {
9587 const SCEV *UpperBound =
9604 if (
const Function *
F = CI->getCalledFunction())
9613 if (!L->contains(
I))
return false;
9618 return L->getHeader() ==
I->getParent();
9694 if (!
I)
return nullptr;
9707 std::vector<Constant*> Operands(
I->getNumOperands());
9709 for (
unsigned i = 0, e =
I->getNumOperands(); i != e; ++i) {
9713 if (!Operands[i])
return nullptr;
9718 if (!
C)
return nullptr;
9740 if (IncomingVal != CurrentVal) {
9743 IncomingVal = CurrentVal;
9755ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN,
9758 auto [
I,
Inserted] = ConstantEvolutionLoopExitValue.try_emplace(PN);
9767 DenseMap<Instruction *, Constant *> CurrentIterVals;
9769 assert(PN->
getParent() == Header &&
"Can't evaluate PHI not in loop header!");
9775 for (PHINode &
PHI : Header->phis()) {
9777 CurrentIterVals[&
PHI] = StartCST;
9779 if (!CurrentIterVals.
count(PN))
9780 return RetVal =
nullptr;
9786 "BEs is <= MaxBruteForceIterations which is an 'unsigned'!");
9789 unsigned IterationNum = 0;
9791 for (; ; ++IterationNum) {
9792 if (IterationNum == NumIterations)
9793 return RetVal = CurrentIterVals[PN];
9797 DenseMap<Instruction *, Constant *> NextIterVals;
9802 NextIterVals[PN] = NextPHI;
9804 bool StoppedEvolving = NextPHI == CurrentIterVals[PN];
9810 for (
const auto &
I : CurrentIterVals) {
9812 if (!
PHI ||
PHI == PN ||
PHI->getParent() != Header)
continue;
9817 for (
const auto &
I : PHIsToCompute) {
9818 PHINode *
PHI =
I.first;
9821 Value *BEValue =
PHI->getIncomingValueForBlock(Latch);
9824 if (NextPHI !=
I.second)
9825 StoppedEvolving =
false;
9830 if (StoppedEvolving)
9831 return RetVal = CurrentIterVals[PN];
9833 CurrentIterVals.swap(NextIterVals);
9837const SCEV *ScalarEvolution::computeExitCountExhaustively(
const Loop *L,
9847 DenseMap<Instruction *, Constant *> CurrentIterVals;
9849 assert(PN->
getParent() == Header &&
"Can't evaluate PHI not in loop header!");
9852 assert(Latch &&
"Should follow from NumIncomingValues == 2!");
9854 for (PHINode &
PHI : Header->phis()) {
9856 CurrentIterVals[&
PHI] = StartCST;
9858 if (!CurrentIterVals.
count(PN))
9866 for (
unsigned IterationNum = 0; IterationNum != MaxIterations;++IterationNum){
9873 if (CondVal->getValue() == uint64_t(ExitWhen)) {
9874 ++NumBruteForceTripCountsComputed;
9879 DenseMap<Instruction *, Constant *> NextIterVals;
9885 for (
const auto &
I : CurrentIterVals) {
9887 if (!
PHI ||
PHI->getParent() != Header)
continue;
9890 for (PHINode *
PHI : PHIsToCompute) {
9892 if (NextPHI)
continue;
9894 Value *BEValue =
PHI->getIncomingValueForBlock(Latch);
9897 CurrentIterVals.
swap(NextIterVals);
9908 for (
auto &LS : Values)
9910 return LS.second ? LS.second : V;
9915 const SCEV *
C = computeSCEVAtScope(V, L);
9916 for (
auto &LS :
reverse(ValuesAtScopes[V]))
9917 if (LS.first == L) {
9920 ValuesAtScopesUsers[
C].push_back({L, V});
9931 switch (V->getSCEVType()) {
9964 assert(!
C->getType()->isPointerTy() &&
9965 "Can only have one pointer, and it must be last");
9992ScalarEvolution::getWithOperands(
const SCEV *S,
9993 SmallVectorImpl<const SCEV *> &NewOps) {
10027const SCEV *ScalarEvolution::computeSCEVAtScope(
const SCEV *V,
const Loop *L) {
10028 switch (
V->getSCEVType()) {
10039 for (
unsigned i = 0, e = AddRec->
getNumOperands(); i != e; ++i) {
10050 for (++i; i !=
e; ++i)
10094 for (
unsigned i = 0, e =
Ops.size(); i != e; ++i) {
10096 if (OpAtScope !=
Ops[i]) {
10104 for (++i; i !=
e; ++i) {
10109 return getWithOperands(V, NewOps);
10124 const Loop *CurrLoop = this->LI[
I->getParent()];
10135 if (BackedgeTakenCount->
isZero()) {
10136 Value *InitValue =
nullptr;
10137 bool MultipleInitValues =
false;
10143 MultipleInitValues =
true;
10148 if (!MultipleInitValues && InitValue)
10157 unsigned InLoopPred =
10168 getConstantEvolutionLoopExitValue(PN, BTCC->getAPInt(), CurrLoop);
10182 SmallVector<Constant *, 4> Operands;
10183 Operands.
reserve(
I->getNumOperands());
10184 bool MadeImprovement =
false;
10199 MadeImprovement |= OrigV != OpV;
10204 assert(
C->getType() ==
Op->getType() &&
"Type mismatch");
10209 if (!MadeImprovement)
10230const SCEV *ScalarEvolution::stripInjectiveFunctions(
const SCEV *S)
const {
10232 return stripInjectiveFunctions(ZExt->getOperand());
10234 return stripInjectiveFunctions(SExt->getOperand());
10252 assert(
A != 0 &&
"A must be non-zero.");
10268 if (MinTZ < Mult2 && L->getLoopPredecessor())
10270 if (MinTZ < Mult2) {
10293 APInt AD =
A.lshr(Mult2).trunc(BW - Mult2);
10313static std::optional<std::tuple<APInt, APInt, APInt, APInt, unsigned>>
10319 LLVM_DEBUG(
dbgs() << __func__ <<
": analyzing quadratic addrec: "
10320 << *AddRec <<
'\n');
10323 if (!LC || !MC || !
NC) {
10324 LLVM_DEBUG(
dbgs() << __func__ <<
": coefficients are not constant\n");
10325 return std::nullopt;
10331 assert(!
N.isZero() &&
"This is not a quadratic addrec");
10339 N =
N.sext(NewWidth);
10340 M = M.sext(NewWidth);
10341 L = L.sext(NewWidth);
10358 <<
"x + " <<
C <<
", coeff bw: " << NewWidth
10359 <<
", multiplied by " <<
T <<
'\n');
10368 std::optional<APInt>
Y) {
10370 unsigned W = std::max(
X->getBitWidth(),
Y->getBitWidth());
10373 return XW.
slt(YW) ? *
X : *
Y;
10376 return std::nullopt;
10377 return X ? *
X : *
Y;
10394 return std::nullopt;
10395 unsigned W =
X->getBitWidth();
10415static std::optional<APInt>
10421 return std::nullopt;
10424 LLVM_DEBUG(
dbgs() << __func__ <<
": solving for unsigned overflow\n");
10425 std::optional<APInt>
X =
10428 return std::nullopt;
10433 return std::nullopt;
10448static std::optional<APInt>
10452 "Starting value of addrec should be 0");
10453 LLVM_DEBUG(
dbgs() << __func__ <<
": solving boundary crossing for range "
10454 <<
Range <<
", addrec " << *AddRec <<
'\n');
10458 "Addrec's initial value should be in range");
10464 return std::nullopt;
10474 auto SolveForBoundary =
10475 [&](
APInt Bound) -> std::pair<std::optional<APInt>,
bool> {
10478 LLVM_DEBUG(
dbgs() <<
"SolveQuadraticAddRecRange: checking boundary "
10479 << Bound <<
" (before multiplying by " << M <<
")\n");
10482 std::optional<APInt> SO;
10485 "signed overflow\n");
10489 "unsigned overflow\n");
10490 std::optional<APInt> UO =
10493 auto LeavesRange = [&] (
const APInt &
X) {
10510 return {std::nullopt,
false};
10515 if (LeavesRange(*Min))
10516 return { Min,
true };
10517 std::optional<APInt> Max = Min == SO ? UO : SO;
10518 if (LeavesRange(*Max))
10519 return { Max,
true };
10522 return {std::nullopt,
true};
10529 auto SL = SolveForBoundary(
Lower);
10530 auto SU = SolveForBoundary(
Upper);
10533 if (!SL.second || !SU.second)
10534 return std::nullopt;
10577ScalarEvolution::ExitLimit ScalarEvolution::howFarToZero(
const SCEV *V,
10579 bool ControlsOnlyExit,
10580 bool AllowPredicates) {
10591 if (
C->getValue()->isZero())
return C;
10595 const SCEVAddRecExpr *AddRec =
10598 if (!AddRec && AllowPredicates)
10604 if (!AddRec || AddRec->
getLoop() != L)
10615 return ExitLimit(R, R, R,
false, Predicates);
10673 const SCEV *DistancePlusOne =
getAddExpr(Distance, One);
10699 const SCEV *
Exact =
10707 const SCEV *SymbolicMax =
10709 return ExitLimit(
Exact, ConstantMax, SymbolicMax,
false, Predicates);
10718 AllowPredicates ? &Predicates :
nullptr, *
this, L);
10726 return ExitLimit(
E, M, S,
false, Predicates);
10729ScalarEvolution::ExitLimit
10730ScalarEvolution::howFarToNonZero(
const SCEV *V,
const Loop *L) {
10738 if (!
C->getValue()->isZero())
10748std::pair<const BasicBlock *, const BasicBlock *>
10749ScalarEvolution::getPredecessorWithUniqueSuccessorForBB(
const BasicBlock *BB)
10760 if (
const Loop *L = LI.getLoopFor(BB))
10761 return {
L->getLoopPredecessor(),
L->getHeader()};
10763 return {
nullptr, BB};
10772 if (
A ==
B)
return true;
10787 if (ComputesEqualValues(AI, BI))
10795 const SCEV *Op0, *Op1;
10814 auto TrivialCase = [&](
bool TriviallyTrue) {
10823 const SCEV *NewLHS, *NewRHS;
10847 return TrivialCase(
false);
10848 return TrivialCase(
true);
10871 const APInt &
RA = RC->getAPInt();
10873 bool SimplifiedByConstantRange =
false;
10878 return TrivialCase(
true);
10880 return TrivialCase(
false);
10889 Changed = SimplifiedByConstantRange =
true;
10893 if (!SimplifiedByConstantRange) {
10910 assert(!
RA.isMinValue() &&
"Should have been caught earlier!");
10916 assert(!
RA.isMaxValue() &&
"Should have been caught earlier!");
10922 assert(!
RA.isMinSignedValue() &&
"Should have been caught earlier!");
10928 assert(!
RA.isMaxSignedValue() &&
"Should have been caught earlier!");
10940 return TrivialCase(
true);
10942 return TrivialCase(
false);
11047 auto NonRecursive = [
this, OrNegative](
const SCEV *S) {
11049 return C->getAPInt().isPowerOf2() ||
11050 (OrNegative &&
C->getAPInt().isNegatedPowerOf2());
11053 return isa<SCEVVScale>(S) && F.hasFnAttribute(Attribute::VScaleRange);
11056 if (NonRecursive(S))
11082 APInt C = Cst->getAPInt();
11083 return C.urem(M) == 0;
11091 const SCEV *SmodM =
11106 for (
auto *
A : Assumptions)
11107 if (
A->implies(
P, *
this))
11120std::pair<const SCEV *, const SCEV *>
11123 const SCEV *Start = SCEVInitRewriter::rewrite(S, L, *
this);
11125 return { Start, Start };
11127 const SCEV *
PostInc = SCEVPostIncRewriter::rewrite(S, L, *
this);
11136 getUsedLoops(LHS, LoopsUsed);
11137 getUsedLoops(RHS, LoopsUsed);
11139 if (LoopsUsed.
empty())
11144 for (
const auto *L1 : LoopsUsed)
11145 for (
const auto *L2 : LoopsUsed)
11146 assert((DT.dominates(L1->getHeader(), L2->getHeader()) ||
11147 DT.dominates(L2->getHeader(), L1->getHeader())) &&
11148 "Domination relationship is not a linear order");
11178 SplitRHS.second) &&
11190 if (isKnownPredicateViaSplitting(Pred, LHS, RHS))
11194 return isKnownViaNonRecursiveReasoning(Pred, LHS, RHS);
11204 return std::nullopt;
11219 if (KnownWithoutContext)
11220 return KnownWithoutContext;
11227 return std::nullopt;
11233 const Loop *L = LHS->getLoop();
11238std::optional<ScalarEvolution::MonotonicPredicateType>
11241 auto Result = getMonotonicPredicateTypeImpl(LHS, Pred);
11247 auto ResultSwapped =
11250 assert(*ResultSwapped != *Result &&
11251 "monotonicity should flip as we flip the predicate");
11258std::optional<ScalarEvolution::MonotonicPredicateType>
11259ScalarEvolution::getMonotonicPredicateTypeImpl(
const SCEVAddRecExpr *LHS,
11273 return std::nullopt;
11277 "Should be greater or less!");
11281 if (!LHS->hasNoUnsignedWrap())
11282 return std::nullopt;
11286 "Relational predicate is either signed or unsigned!");
11287 if (!
LHS->hasNoSignedWrap())
11288 return std::nullopt;
11290 const SCEV *Step =
LHS->getStepRecurrence(*
this);
11298 return std::nullopt;
11301std::optional<ScalarEvolution::LoopInvariantPredicate>
11308 return std::nullopt;
11315 if (!ArLHS || ArLHS->
getLoop() != L)
11316 return std::nullopt;
11320 return std::nullopt;
11346 return std::nullopt;
11383 return std::nullopt;
11386std::optional<ScalarEvolution::LoopInvariantPredicate>
11391 Pred, LHS, RHS, L, CtxI, MaxIter))
11399 for (
auto *
Op :
UMin->operands())
11401 Pred, LHS, RHS, L, CtxI,
Op))
11403 return std::nullopt;
11406std::optional<ScalarEvolution::LoopInvariantPredicate>
11421 return std::nullopt;
11428 if (!AR || AR->
getLoop() != L)
11429 return std::nullopt;
11433 return std::nullopt;
11439 if (Step != One && Step != MinusOne)
11440 return std::nullopt;
11446 return std::nullopt;
11452 return std::nullopt;
11460 if (Step == MinusOne)
11464 return std::nullopt;
11470bool ScalarEvolution::isKnownPredicateViaConstantRanges(
CmpPredicate Pred,
11476 auto CheckRange = [&](
bool IsSigned) {
11479 return RangeLHS.
icmp(Pred, RangeRHS);
11488 if (CheckRange(
true) || CheckRange(
false))
11497bool ScalarEvolution::isKnownPredicateViaNoOverflow(CmpPredicate Pred,
11504 auto MatchBinaryAddToConst = [
this](
const SCEV *
X,
const SCEV *
Y,
11505 APInt &OutC1, APInt &OutC2,
11507 const SCEV *XNonConstOp, *XConstOp;
11508 const SCEV *YNonConstOp, *YConstOp;
11512 if (!splitBinaryAdd(
X, XConstOp, XNonConstOp, XFlagsPresent)) {
11515 XFlagsPresent = ExpectedFlags;
11520 if (!splitBinaryAdd(
Y, YConstOp, YNonConstOp, YFlagsPresent)) {
11523 YFlagsPresent = ExpectedFlags;
11526 if (YNonConstOp != XNonConstOp)
11534 if ((YFlagsPresent & ExpectedFlags) != ExpectedFlags)
11537 (XFlagsPresent & ExpectedFlags) != ExpectedFlags) {
11597bool ScalarEvolution::isKnownPredicateViaSplitting(CmpPredicate Pred,
11619bool ScalarEvolution::isImpliedViaGuard(
const BasicBlock *BB, CmpPredicate Pred,
11620 const SCEV *
LHS,
const SCEV *
RHS) {
11625 return any_of(*BB, [&](
const Instruction &
I) {
11626 using namespace llvm::PatternMatch;
11631 isImpliedCond(Pred,
LHS,
RHS, Condition,
false);
11645 if (!L || !DT.isReachableFromEntry(L->getHeader()))
11650 "This cannot be done on broken IR!");
11653 if (isKnownViaNonRecursiveReasoning(Pred, LHS, RHS))
11662 if (LoopContinuePredicate && LoopContinuePredicate->
isConditional() &&
11663 isImpliedCond(Pred, LHS, RHS,
11665 LoopContinuePredicate->
getSuccessor(0) != L->getHeader()))
11670 if (WalkingBEDominatingConds)
11676 const auto &BETakenInfo = getBackedgeTakenInfo(L);
11677 const SCEV *LatchBECount = BETakenInfo.getExact(Latch,
this);
11684 const SCEV *LoopCounter =
11692 for (
auto &AssumeVH : AC.assumptions()) {
11699 if (isImpliedCond(Pred, LHS, RHS, CI->getArgOperand(0),
false))
11703 if (isImpliedViaGuard(Latch, Pred, LHS, RHS))
11706 for (
DomTreeNode *DTN = DT[Latch], *HeaderDTN = DT[L->getHeader()];
11707 DTN != HeaderDTN; DTN = DTN->getIDom()) {
11708 assert(DTN &&
"should reach the loop header before reaching the root!");
11711 if (isImpliedViaGuard(BB, Pred, LHS, RHS))
11719 if (!ContinuePredicate || !ContinuePredicate->
isConditional())
11733 assert(DT.dominates(DominatingEdge, Latch) &&
"should be!");
11735 if (isImpliedCond(Pred, LHS, RHS, Condition,
11749 if (!DT.isReachableFromEntry(BB))
11753 "This cannot be done on broken IR!");
11761 const bool ProvingStrictComparison =
11763 bool ProvedNonStrictComparison =
false;
11764 bool ProvedNonEquality =
false;
11767 if (!ProvedNonStrictComparison)
11768 ProvedNonStrictComparison = Fn(NonStrictPredicate);
11769 if (!ProvedNonEquality)
11771 if (ProvedNonStrictComparison && ProvedNonEquality)
11776 if (ProvingStrictComparison) {
11778 return isKnownViaNonRecursiveReasoning(
P, LHS, RHS);
11780 if (SplitAndProve(ProofFn))
11785 auto ProveViaCond = [&](
const Value *Condition,
bool Inverse) {
11787 if (isImpliedCond(Pred, LHS, RHS, Condition,
Inverse, CtxI))
11789 if (ProvingStrictComparison) {
11791 return isImpliedCond(
P, LHS, RHS, Condition,
Inverse, CtxI);
11793 if (SplitAndProve(ProofFn))
11802 const Loop *ContainingLoop = LI.getLoopFor(BB);
11804 if (ContainingLoop && ContainingLoop->
getHeader() == BB)
11808 for (std::pair<const BasicBlock *, const BasicBlock *> Pair(PredBB, BB);
11809 Pair.first; Pair = getPredecessorWithUniqueSuccessorForBB(Pair.first)) {
11821 for (
auto &AssumeVH : AC.assumptions()) {
11825 if (!DT.dominates(CI, BB))
11828 if (ProveViaCond(CI->getArgOperand(0),
false))
11834 F.getParent(), Intrinsic::experimental_guard);
11836 for (
const auto *GU : GuardDecl->users())
11838 if (Guard->getFunction() == BB->
getParent() && DT.dominates(Guard, BB))
11839 if (ProveViaCond(Guard->getArgOperand(0),
false))
11854 "LHS is not available at Loop Entry");
11856 "RHS is not available at Loop Entry");
11858 if (isKnownViaNonRecursiveReasoning(Pred, LHS, RHS))
11869 if (FoundCondValue ==
11873 if (!PendingLoopPredicates.insert(FoundCondValue).second)
11877 [&]() { PendingLoopPredicates.erase(FoundCondValue); });
11880 const Value *Op0, *Op1;
11883 return isImpliedCond(Pred,
LHS,
RHS, Op0,
Inverse, CtxI) ||
11887 return isImpliedCond(Pred,
LHS,
RHS, Op0, Inverse, CtxI) ||
11888 isImpliedCond(Pred,
LHS,
RHS, Op1, Inverse, CtxI);
11892 if (!ICI)
return false;
11896 CmpPredicate FoundPred;
11905 return isImpliedCond(Pred,
LHS,
RHS, FoundPred, FoundLHS, FoundRHS, CtxI);
11908bool ScalarEvolution::isImpliedCond(CmpPredicate Pred,
const SCEV *
LHS,
11909 const SCEV *
RHS, CmpPredicate FoundPred,
11910 const SCEV *FoundLHS,
const SCEV *FoundRHS,
11911 const Instruction *CtxI) {
11921 auto *WideType = FoundLHS->
getType();
11933 TruncFoundLHS, TruncFoundRHS, CtxI))
11959 return isImpliedCondBalancedTypes(Pred,
LHS,
RHS, FoundPred, FoundLHS,
11963bool ScalarEvolution::isImpliedCondBalancedTypes(
11964 CmpPredicate Pred,
const SCEV *
LHS,
const SCEV *
RHS, CmpPredicate FoundPred,
11965 const SCEV *FoundLHS,
const SCEV *FoundRHS,
const Instruction *CtxI) {
11968 "Types should be balanced!");
11975 if (FoundLHS == FoundRHS)
11979 if (
LHS == FoundRHS ||
RHS == FoundLHS) {
11991 return isImpliedCondOperands(*
P,
LHS,
RHS, FoundLHS, FoundRHS, CtxI);
12008 LHS, FoundLHS, FoundRHS, CtxI);
12010 return isImpliedCondOperands(*
P,
LHS,
RHS, FoundRHS, FoundLHS, CtxI);
12032 assert(P1 != P2 &&
"Handled earlier!");
12036 if (IsSignFlippedPredicate(Pred, FoundPred)) {
12040 return isImpliedCondOperands(Pred,
LHS,
RHS, FoundLHS, FoundRHS, CtxI);
12043 CmpPredicate CanonicalPred = Pred, CanonicalFoundPred = FoundPred;
12044 const SCEV *CanonicalLHS =
LHS, *CanonicalRHS =
RHS,
12045 *CanonicalFoundLHS = FoundLHS, *CanonicalFoundRHS = FoundRHS;
12050 std::swap(CanonicalFoundLHS, CanonicalFoundRHS);
12061 return isImpliedCondOperands(CanonicalFoundPred, CanonicalLHS,
12062 CanonicalRHS, CanonicalFoundLHS,
12063 CanonicalFoundRHS);
12068 return isImpliedCondOperands(CanonicalFoundPred, CanonicalLHS,
12069 CanonicalRHS, CanonicalFoundLHS,
12070 CanonicalFoundRHS);
12077 const SCEVConstant *
C =
nullptr;
12078 const SCEV *
V =
nullptr;
12096 if (Min ==
C->getAPInt()) {
12101 APInt SharperMin = Min + 1;
12104 case ICmpInst::ICMP_SGE:
12105 case ICmpInst::ICMP_UGE:
12108 if (isImpliedCondOperands(Pred, LHS, RHS, V, getConstant(SharperMin),
12113 case ICmpInst::ICMP_SGT:
12114 case ICmpInst::ICMP_UGT:
12124 if (isImpliedCondOperands(Pred, LHS, RHS, V, getConstant(Min), CtxI))
12129 case ICmpInst::ICMP_SLE:
12130 case ICmpInst::ICMP_ULE:
12131 if (isImpliedCondOperands(ICmpInst::getSwappedCmpPredicate(Pred), RHS,
12132 LHS, V, getConstant(SharperMin), CtxI))
12136 case ICmpInst::ICMP_SLT:
12137 case ICmpInst::ICMP_ULT:
12138 if (isImpliedCondOperands(ICmpInst::getSwappedCmpPredicate(Pred), RHS,
12139 LHS, V, getConstant(Min), CtxI))
12153 if (isImpliedCondOperands(Pred,
LHS,
RHS, FoundLHS, FoundRHS, CtxI))
12157 if (isImpliedCondOperands(FoundPred,
LHS,
RHS, FoundLHS, FoundRHS, CtxI))
12160 if (isImpliedCondOperandsViaRanges(Pred,
LHS,
RHS, FoundPred, FoundLHS, FoundRHS))
12167bool ScalarEvolution::splitBinaryAdd(
const SCEV *Expr,
12168 const SCEV *&L,
const SCEV *&R,
12177std::optional<APInt>
12184 APInt DiffMul(BW, 1);
12187 for (
unsigned I = 0;
I < 8; ++
I) {
12196 if (LAR->getLoop() != MAR->getLoop())
12197 return std::nullopt;
12201 if (!LAR->isAffine() || !MAR->isAffine())
12202 return std::nullopt;
12204 if (LAR->getStepRecurrence(*
this) != MAR->getStepRecurrence(*
this))
12205 return std::nullopt;
12207 Less = LAR->getStart();
12208 More = MAR->getStart();
12213 auto MatchConstMul =
12214 [](
const SCEV *S) -> std::optional<std::pair<const SCEV *, APInt>> {
12219 return std::nullopt;
12221 if (
auto MatchedMore = MatchConstMul(More)) {
12222 if (
auto MatchedLess = MatchConstMul(
Less)) {
12223 if (MatchedMore->second == MatchedLess->second) {
12224 More = MatchedMore->first;
12225 Less = MatchedLess->first;
12226 DiffMul *= MatchedMore->second;
12237 Diff +=
C->getAPInt() * DiffMul;
12240 Diff -=
C->getAPInt() * DiffMul;
12243 Multiplicity[S] +=
Mul;
12245 auto Decompose = [&](
const SCEV *S,
int Mul) {
12252 Decompose(More, 1);
12253 Decompose(
Less, -1);
12257 const SCEV *NewMore =
nullptr, *NewLess =
nullptr;
12258 for (
const auto &[S,
Mul] : Multiplicity) {
12263 return std::nullopt;
12265 }
else if (
Mul == -1) {
12267 return std::nullopt;
12270 return std::nullopt;
12274 if (NewMore == More || NewLess ==
Less)
12275 return std::nullopt;
12281 if (!More && !
Less)
12285 if (!More || !
Less)
12286 return std::nullopt;
12290 return std::nullopt;
12293bool ScalarEvolution::isImpliedCondOperandsViaAddRecStart(
12317 if (!L->contains(ContextBB) || !DT.
dominates(ContextBB, L->getLoopLatch()))
12328 if (!L->contains(ContextBB) || !DT.
dominates(ContextBB, L->getLoopLatch()))
12338bool ScalarEvolution::isImpliedCondOperandsViaNoOverflow(CmpPredicate Pred,
12341 const SCEV *FoundLHS,
12342 const SCEV *FoundRHS) {
12351 if (!AddRecFoundLHS)
12358 const Loop *
L = AddRecFoundLHS->getLoop();
12359 if (L != AddRecLHS->getLoop())
12398 if (!RDiff || *LDiff != *RDiff)
12401 if (LDiff->isMinValue())
12404 APInt FoundRHSLimit;
12407 FoundRHSLimit = -(*RDiff);
12419bool ScalarEvolution::isImpliedViaMerge(CmpPredicate Pred,
const SCEV *
LHS,
12420 const SCEV *
RHS,
const SCEV *FoundLHS,
12421 const SCEV *FoundRHS,
unsigned Depth) {
12422 const PHINode *LPhi =
nullptr, *RPhi =
nullptr;
12426 bool Erased = PendingMerges.erase(LPhi);
12427 assert(Erased &&
"Failed to erase LPhi!");
12431 bool Erased = PendingMerges.erase(RPhi);
12432 assert(Erased &&
"Failed to erase RPhi!");
12440 if (!PendingMerges.insert(Phi).second)
12454 if (!PendingMerges.insert(Phi).second)
12460 if (!LPhi && !RPhi)
12471 assert(LPhi &&
"LPhi should definitely be a SCEVUnknown Phi!");
12475 auto ProvedEasily = [&](
const SCEV *
S1,
const SCEV *S2) {
12476 return isKnownViaNonRecursiveReasoning(Pred,
S1, S2) ||
12477 isImpliedCondOperandsViaRanges(Pred,
S1, S2, Pred, FoundLHS, FoundRHS) ||
12478 isImpliedViaOperations(Pred,
S1, S2, FoundLHS, FoundRHS,
Depth);
12481 if (RPhi && RPhi->getParent() == LBB) {
12488 const SCEV *
R =
getSCEV(RPhi->getIncomingValueForBlock(IncBB));
12489 if (!ProvedEasily(L, R))
12500 auto *RLoop = RAR->
getLoop();
12501 auto *Predecessor = RLoop->getLoopPredecessor();
12502 assert(Predecessor &&
"Loop with AddRec with no predecessor?");
12504 if (!ProvedEasily(L1, RAR->
getStart()))
12506 auto *Latch = RLoop->getLoopLatch();
12507 assert(Latch &&
"Loop with AddRec with no latch?");
12528 if (
auto *Loop = LI.getLoopFor(LBB))
12531 if (!ProvedEasily(L,
RHS))
12538bool ScalarEvolution::isImpliedCondOperandsViaShift(CmpPredicate Pred,
12541 const SCEV *FoundLHS,
12542 const SCEV *FoundRHS) {
12545 if (
RHS == FoundRHS) {
12550 if (
LHS != FoundLHS)
12557 Value *Shiftee, *ShiftValue;
12559 using namespace PatternMatch;
12560 if (
match(SUFoundRHS->getValue(),
12562 auto *ShifteeS =
getSCEV(Shiftee);
12580bool ScalarEvolution::isImpliedCondOperands(CmpPredicate Pred,
const SCEV *
LHS,
12582 const SCEV *FoundLHS,
12583 const SCEV *FoundRHS,
12584 const Instruction *CtxI) {
12585 return isImpliedCondOperandsViaRanges(Pred,
LHS,
RHS, Pred, FoundLHS,
12587 isImpliedCondOperandsViaNoOverflow(Pred,
LHS,
RHS, FoundLHS,
12589 isImpliedCondOperandsViaShift(Pred,
LHS,
RHS, FoundLHS, FoundRHS) ||
12590 isImpliedCondOperandsViaAddRecStart(Pred,
LHS,
RHS, FoundLHS, FoundRHS,
12592 isImpliedCondOperandsHelper(Pred,
LHS,
RHS, FoundLHS, FoundRHS);
12596template <
typename MinMaxExprType>
12598 const SCEV *Candidate) {
12603 return is_contained(MinMaxExpr->operands(), Candidate);
12616 const SCEV *LStart, *RStart, *Step;
12666bool ScalarEvolution::isImpliedViaOperations(CmpPredicate Pred,
const SCEV *
LHS,
12668 const SCEV *FoundLHS,
12669 const SCEV *FoundRHS,
12673 "LHS and RHS have different sizes?");
12676 "FoundLHS and FoundRHS have different sizes?");
12710 auto GetOpFromSExt = [&](
const SCEV *S) {
12712 return Ext->getOperand();
12719 auto *OrigLHS =
LHS;
12720 auto *OrigFoundLHS = FoundLHS;
12721 LHS = GetOpFromSExt(
LHS);
12722 FoundLHS = GetOpFromSExt(FoundLHS);
12725 auto IsSGTViaContext = [&](
const SCEV *
S1,
const SCEV *S2) {
12728 FoundRHS,
Depth + 1);
12741 if (!LHSAddExpr->hasNoSignedWrap())
12744 auto *LL = LHSAddExpr->getOperand(0);
12745 auto *LR = LHSAddExpr->getOperand(1);
12749 auto IsSumGreaterThanRHS = [&](
const SCEV *
S1,
const SCEV *S2) {
12750 return IsSGTViaContext(
S1, MinusOne) && IsSGTViaContext(S2,
RHS);
12755 if (IsSumGreaterThanRHS(LL, LR) || IsSumGreaterThanRHS(LR, LL))
12761 using namespace llvm::PatternMatch;
12780 if (!Numerator || Numerator->getType() != FoundLHS->
getType())
12788 auto *DTy = Denominator->getType();
12789 auto *FRHSTy = FoundRHS->
getType();
12790 if (DTy->isPointerTy() != FRHSTy->isPointerTy())
12809 IsSGTViaContext(FoundRHSExt, DenomMinusTwo))
12820 auto *NegDenomMinusOne =
getMinusSCEV(MinusOne, DenominatorExt);
12822 IsSGTViaContext(FoundRHSExt, NegDenomMinusOne))
12830 if (isImpliedViaMerge(Pred, OrigLHS,
RHS, OrigFoundLHS, FoundRHS,
Depth + 1))
12863bool ScalarEvolution::isKnownViaNonRecursiveReasoning(CmpPredicate Pred,
12867 isKnownPredicateViaConstantRanges(Pred,
LHS,
RHS) ||
12870 isKnownPredicateViaNoOverflow(Pred,
LHS,
RHS);
12873bool ScalarEvolution::isImpliedCondOperandsHelper(CmpPredicate Pred,
12876 const SCEV *FoundLHS,
12877 const SCEV *FoundRHS) {
12913 if (isImpliedViaOperations(Pred,
LHS,
RHS, FoundLHS, FoundRHS))
12919bool ScalarEvolution::isImpliedCondOperandsViaRanges(
12920 CmpPredicate Pred,
const SCEV *
LHS,
const SCEV *
RHS, CmpPredicate FoundPred,
12921 const SCEV *FoundLHS,
const SCEV *FoundRHS) {
12935 ConstantRange FoundLHSRange =
12939 ConstantRange LHSRange = FoundLHSRange.
add(ConstantRange(*Addend));
12946 return LHSRange.
icmp(Pred, ConstRHS);
12949bool ScalarEvolution::canIVOverflowOnLT(
const SCEV *
RHS,
const SCEV *Stride,
12962 return (std::move(MaxValue) - MaxStrideMinusOne).slt(MaxRHS);
12970 return (std::move(MaxValue) - MaxStrideMinusOne).ult(MaxRHS);
12973bool ScalarEvolution::canIVOverflowOnGT(
const SCEV *
RHS,
const SCEV *Stride,
12985 return (std::move(MinValue) + MaxStrideMinusOne).sgt(MinRHS);
12993 return (std::move(MinValue) + MaxStrideMinusOne).ugt(MinRHS);
13005const SCEV *ScalarEvolution::computeMaxBECountForLT(
const SCEV *Start,
13006 const SCEV *Stride,
13037 APInt Limit = MaxValue - (StrideForMaxBECount - 1);
13048 :
APIntOps::umax(MaxEnd, MinStart);
13055ScalarEvolution::howManyLessThans(
const SCEV *
LHS,
const SCEV *
RHS,
13056 const Loop *L,
bool IsSigned,
13057 bool ControlsOnlyExit,
bool AllowPredicates) {
13061 bool PredicatedIV =
false;
13066 auto canProveNUW = [&]() {
13069 if (!ControlsOnlyExit)
13090 Limit = Limit.
zext(OuterBitWidth);
13102 Type *Ty = ZExt->getType();
13113 if (!
IV && AllowPredicates) {
13118 PredicatedIV =
true;
13122 if (!
IV ||
IV->getLoop() != L || !
IV->isAffine())
13136 bool NoWrap = ControlsOnlyExit &&
IV->getNoWrapFlags(WrapType);
13139 const SCEV *Stride =
IV->getStepRecurrence(*
this);
13144 if (!PositiveStride) {
13196 auto wouldZeroStrideBeUB = [&]() {
13208 if (!wouldZeroStrideBeUB()) {
13212 }
else if (!NoWrap) {
13215 if (canIVOverflowOnLT(
RHS, Stride, IsSigned))
13228 const SCEV *
Start =
IV->getStart();
13234 const SCEV *OrigStart =
Start;
13235 const SCEV *OrigRHS =
RHS;
13236 if (
Start->getType()->isPointerTy()) {
13247 const SCEV *End =
nullptr, *BECount =
nullptr,
13248 *BECountIfBackedgeTaken =
nullptr;
13251 if (PositiveStride && RHSAddRec !=
nullptr && RHSAddRec->getLoop() == L &&
13252 RHSAddRec->getNoWrapFlags()) {
13265 const SCEV *RHSStart = RHSAddRec->getStart();
13266 const SCEV *RHSStride = RHSAddRec->getStepRecurrence(*
this);
13278 const SCEV *Denominator =
getMinusSCEV(Stride, RHSStride);
13287 BECountIfBackedgeTaken =
13292 if (BECount ==
nullptr) {
13297 const SCEV *MaxBECount = computeMaxBECountForLT(
13300 MaxBECount,
false , Predicates);
13307 auto *OrigStartMinusStride =
getMinusSCEV(OrigStart, Stride);
13334 const SCEV *Numerator =
13340 auto canProveRHSGreaterThanEqualStart = [&]() {
13359 auto *StartMinusOne =
13366 if (canProveRHSGreaterThanEqualStart()) {
13381 BECountIfBackedgeTaken =
13397 bool MayAddOverflow = [&] {
13443 if (Start == Stride || Start ==
getMinusSCEV(Stride, One)) {
13457 if (!MayAddOverflow) {
13469 const SCEV *ConstantMaxBECount;
13470 bool MaxOrZero =
false;
13472 ConstantMaxBECount = BECount;
13473 }
else if (BECountIfBackedgeTaken &&
13478 ConstantMaxBECount = BECountIfBackedgeTaken;
13481 ConstantMaxBECount = computeMaxBECountForLT(
13489 const SCEV *SymbolicMaxBECount =
13491 return ExitLimit(BECount, ConstantMaxBECount, SymbolicMaxBECount, MaxOrZero,
13495ScalarEvolution::ExitLimit ScalarEvolution::howManyGreaterThans(
13496 const SCEV *
LHS,
const SCEV *
RHS,
const Loop *L,
bool IsSigned,
13497 bool ControlsOnlyExit,
bool AllowPredicates) {
13504 if (!
IV && AllowPredicates)
13511 if (!
IV ||
IV->getLoop() != L || !
IV->isAffine())
13515 bool NoWrap = ControlsOnlyExit &&
IV->getNoWrapFlags(WrapType);
13528 if (!Stride->
isOne() && !NoWrap)
13529 if (canIVOverflowOnGT(
RHS, Stride, IsSigned))
13532 const SCEV *
Start =
IV->getStart();
13533 const SCEV *End =
RHS;
13544 if (
Start->getType()->isPointerTy()) {
13579 const SCEV *ConstantMaxBECount =
13586 ConstantMaxBECount = BECount;
13587 const SCEV *SymbolicMaxBECount =
13590 return ExitLimit(BECount, ConstantMaxBECount, SymbolicMaxBECount,
false,
13596 if (
Range.isFullSet())
13601 if (!SC->getValue()->isZero()) {
13607 return ShiftedAddRec->getNumIterationsInRange(
13608 Range.subtract(SC->getAPInt()), SE);
13639 APInt ExitVal = (End +
A).udiv(
A);
13652 ConstantInt::get(SE.
getContext(), ExitVal - 1), SE)->getValue()) &&
13653 "Linear scev computation is off in a bad way!");
13684 assert(!
Last->isZero() &&
"Recurrency with zero step?");
13709 Ty = Store->getValueOperand()->getType();
13711 Ty = Load->getType();
13724 assert(SE &&
"SCEVCallbackVH called with a null ScalarEvolution!");
13726 SE->ConstantEvolutionLoopExitValue.erase(PN);
13727 SE->eraseValueFromMap(getValPtr());
13731void ScalarEvolution::SCEVCallbackVH::allUsesReplacedWith(
Value *V) {
13732 assert(SE &&
"SCEVCallbackVH called with a null ScalarEvolution!");
13742 : CallbackVH(
V), SE(se) {}
13751 : F(F), DL(F.
getDataLayout()), TLI(TLI), AC(AC), DT(DT), LI(LI),
13753 LoopDispositions(64), BlockDispositions(64) {
13765 F.getParent(), Intrinsic::experimental_guard);
13766 HasGuards = GuardDecl && !GuardDecl->use_empty();
13770 : F(Arg.F), DL(Arg.DL), HasGuards(Arg.HasGuards), TLI(Arg.TLI), AC(Arg.AC),
13771 DT(Arg.DT), LI(Arg.LI), CouldNotCompute(
std::
move(Arg.CouldNotCompute)),
13772 ValueExprMap(
std::
move(Arg.ValueExprMap)),
13773 PendingLoopPredicates(
std::
move(Arg.PendingLoopPredicates)),
13774 PendingPhiRanges(
std::
move(Arg.PendingPhiRanges)),
13775 PendingMerges(
std::
move(Arg.PendingMerges)),
13776 ConstantMultipleCache(
std::
move(Arg.ConstantMultipleCache)),
13777 BackedgeTakenCounts(
std::
move(Arg.BackedgeTakenCounts)),
13778 PredicatedBackedgeTakenCounts(
13779 std::
move(Arg.PredicatedBackedgeTakenCounts)),
13780 BECountUsers(
std::
move(Arg.BECountUsers)),
13781 ConstantEvolutionLoopExitValue(
13782 std::
move(Arg.ConstantEvolutionLoopExitValue)),
13783 ValuesAtScopes(
std::
move(Arg.ValuesAtScopes)),
13784 ValuesAtScopesUsers(
std::
move(Arg.ValuesAtScopesUsers)),
13785 LoopDispositions(
std::
move(Arg.LoopDispositions)),
13786 LoopPropertiesCache(
std::
move(Arg.LoopPropertiesCache)),
13787 BlockDispositions(
std::
move(Arg.BlockDispositions)),
13788 SCEVUsers(
std::
move(Arg.SCEVUsers)),
13789 UnsignedRanges(
std::
move(Arg.UnsignedRanges)),
13790 SignedRanges(
std::
move(Arg.SignedRanges)),
13791 UniqueSCEVs(
std::
move(Arg.UniqueSCEVs)),
13792 UniquePreds(
std::
move(Arg.UniquePreds)),
13793 SCEVAllocator(
std::
move(Arg.SCEVAllocator)),
13794 LoopUsers(
std::
move(Arg.LoopUsers)),
13795 PredicatedSCEVRewrites(
std::
move(Arg.PredicatedSCEVRewrites)),
13796 FirstUnknown(Arg.FirstUnknown) {
13797 Arg.FirstUnknown =
nullptr;
13806 Tmp->~SCEVUnknown();
13808 FirstUnknown =
nullptr;
13810 ExprValueMap.clear();
13811 ValueExprMap.clear();
13813 BackedgeTakenCounts.clear();
13814 PredicatedBackedgeTakenCounts.clear();
13816 assert(PendingLoopPredicates.empty() &&
"isImpliedCond garbage");
13817 assert(PendingPhiRanges.empty() &&
"getRangeRef garbage");
13818 assert(PendingMerges.empty() &&
"isImpliedViaMerge garbage");
13819 assert(!WalkingBEDominatingConds &&
"isLoopBackedgeGuardedByCond garbage!");
13820 assert(!ProvingSplitPredicate &&
"ProvingSplitPredicate garbage!");
13842 L->getHeader()->printAsOperand(OS,
false);
13846 L->getExitingBlocks(ExitingBlocks);
13847 if (ExitingBlocks.
size() != 1)
13848 OS <<
"<multiple exits> ";
13852 OS <<
"backedge-taken count is ";
13855 OS <<
"Unpredictable backedge-taken count.";
13858 if (ExitingBlocks.
size() > 1)
13859 for (
BasicBlock *ExitingBlock : ExitingBlocks) {
13860 OS <<
" exit count for " << ExitingBlock->
getName() <<
": ";
13868 OS <<
"\n predicated exit count for " << ExitingBlock->
getName()
13871 OS <<
"\n Predicates:\n";
13872 for (
const auto *
P : Predicates)
13880 L->getHeader()->printAsOperand(OS,
false);
13885 OS <<
"constant max backedge-taken count is ";
13888 OS <<
", actual taken count either this or zero.";
13890 OS <<
"Unpredictable constant max backedge-taken count. ";
13895 L->getHeader()->printAsOperand(OS,
false);
13900 OS <<
"symbolic max backedge-taken count is ";
13903 OS <<
", actual taken count either this or zero.";
13905 OS <<
"Unpredictable symbolic max backedge-taken count. ";
13909 if (ExitingBlocks.
size() > 1)
13910 for (
BasicBlock *ExitingBlock : ExitingBlocks) {
13911 OS <<
" symbolic max exit count for " << ExitingBlock->
getName() <<
": ";
13921 OS <<
"\n predicated symbolic max exit count for "
13922 << ExitingBlock->
getName() <<
": ";
13924 OS <<
"\n Predicates:\n";
13925 for (
const auto *
P : Predicates)
13935 assert(!Preds.
empty() &&
"Different predicated BTC, but no predicates");
13937 L->getHeader()->printAsOperand(OS,
false);
13940 OS <<
"Predicated backedge-taken count is ";
13943 OS <<
"Unpredictable predicated backedge-taken count.";
13945 OS <<
" Predicates:\n";
13946 for (
const auto *
P : Preds)
13951 auto *PredConstantMax =
13953 if (PredConstantMax != ConstantBTC) {
13955 "different predicated constant max BTC but no predicates");
13957 L->getHeader()->printAsOperand(OS,
false);
13960 OS <<
"Predicated constant max backedge-taken count is ";
13963 OS <<
"Unpredictable predicated constant max backedge-taken count.";
13965 OS <<
" Predicates:\n";
13966 for (
const auto *
P : Preds)
13971 auto *PredSymbolicMax =
13973 if (SymbolicBTC != PredSymbolicMax) {
13975 "Different predicated symbolic max BTC, but no predicates");
13977 L->getHeader()->printAsOperand(OS,
false);
13980 OS <<
"Predicated symbolic max backedge-taken count is ";
13983 OS <<
"Unpredictable predicated symbolic max backedge-taken count.";
13985 OS <<
" Predicates:\n";
13986 for (
const auto *
P : Preds)
13992 L->getHeader()->printAsOperand(OS,
false);
14016 OS <<
"Computable";
14026 OS <<
"DoesNotDominate";
14032 OS <<
"ProperlyDominates";
14049 OS <<
"Classifying expressions for: ";
14050 F.printAsOperand(OS,
false);
14065 const Loop *L = LI.getLoopFor(
I.getParent());
14080 OS <<
"\t\t" "Exits: ";
14083 OS <<
"<<Unknown>>";
14089 for (
const auto *Iter = L; Iter; Iter = Iter->getParentLoop()) {
14091 Iter->getHeader()->printAsOperand(OS,
false);
14099 InnerL->getHeader()->printAsOperand(OS,
false);
14110 OS <<
"Determining loop execution counts for: ";
14111 F.printAsOperand(OS,
false);
14119 auto &Values = LoopDispositions[S];
14120 for (
auto &V : Values) {
14121 if (V.getPointer() == L)
14126 auto &Values2 = LoopDispositions[S];
14128 if (V.getPointer() == L) {
14137ScalarEvolution::computeLoopDisposition(
const SCEV *S,
const Loop *L) {
14156 assert(!L->contains(AR->
getLoop()) &&
"Containing loop's header does not"
14157 " dominate the contained loop's header?");
14184 bool HasVarying =
false;
14218 auto &Values = BlockDispositions[S];
14219 for (
auto &V : Values) {
14220 if (V.getPointer() == BB)
14225 auto &Values2 = BlockDispositions[S];
14227 if (V.getPointer() == BB) {
14236ScalarEvolution::computeBlockDisposition(
const SCEV *S,
const BasicBlock *BB) {
14265 bool Proper =
true;
14276 if (Instruction *
I =
14278 if (
I->getParent() == BB)
14280 if (DT.properlyDominates(
I->getParent(), BB))
14303void ScalarEvolution::forgetBackedgeTakenCounts(
const Loop *L,
14306 Predicated ? PredicatedBackedgeTakenCounts : BackedgeTakenCounts;
14307 auto It = BECounts.find(L);
14308 if (It != BECounts.end()) {
14309 for (
const ExitNotTakenInfo &ENT : It->second.ExitNotTaken) {
14310 for (
const SCEV *S : {ENT.ExactNotTaken, ENT.SymbolicMaxNotTaken}) {
14312 auto UserIt = BECountUsers.find(S);
14313 assert(UserIt != BECountUsers.end());
14318 BECounts.erase(It);
14326 while (!Worklist.
empty()) {
14328 auto Users = SCEVUsers.find(Curr);
14329 if (
Users != SCEVUsers.end())
14330 for (
const auto *User :
Users->second)
14331 if (ToForget.
insert(User).second)
14335 for (
const auto *S : ToForget)
14336 forgetMemoizedResultsImpl(S);
14338 for (
auto I = PredicatedSCEVRewrites.begin();
14339 I != PredicatedSCEVRewrites.end();) {
14340 std::pair<const SCEV *, const Loop *>
Entry =
I->first;
14341 if (ToForget.count(
Entry.first))
14342 PredicatedSCEVRewrites.erase(
I++);
14348void ScalarEvolution::forgetMemoizedResultsImpl(
const SCEV *S) {
14349 LoopDispositions.erase(S);
14350 BlockDispositions.erase(S);
14351 UnsignedRanges.erase(S);
14352 SignedRanges.erase(S);
14353 HasRecMap.erase(S);
14354 ConstantMultipleCache.erase(S);
14357 UnsignedWrapViaInductionTried.erase(AR);
14358 SignedWrapViaInductionTried.erase(AR);
14361 auto ExprIt = ExprValueMap.find(S);
14362 if (ExprIt != ExprValueMap.end()) {
14363 for (
Value *V : ExprIt->second) {
14364 auto ValueIt = ValueExprMap.find_as(V);
14365 if (ValueIt != ValueExprMap.end())
14366 ValueExprMap.erase(ValueIt);
14368 ExprValueMap.erase(ExprIt);
14371 auto ScopeIt = ValuesAtScopes.find(S);
14372 if (ScopeIt != ValuesAtScopes.end()) {
14373 for (
const auto &Pair : ScopeIt->second)
14376 std::make_pair(Pair.first, S));
14377 ValuesAtScopes.erase(ScopeIt);
14380 auto ScopeUserIt = ValuesAtScopesUsers.find(S);
14381 if (ScopeUserIt != ValuesAtScopesUsers.end()) {
14382 for (
const auto &Pair : ScopeUserIt->second)
14383 llvm::erase(ValuesAtScopes[Pair.second], std::make_pair(Pair.first, S));
14384 ValuesAtScopesUsers.erase(ScopeUserIt);
14387 auto BEUsersIt = BECountUsers.find(S);
14388 if (BEUsersIt != BECountUsers.end()) {
14390 auto Copy = BEUsersIt->second;
14391 for (
const auto &Pair : Copy)
14392 forgetBackedgeTakenCounts(Pair.getPointer(), Pair.getInt());
14393 BECountUsers.erase(BEUsersIt);
14396 auto FoldUser = FoldCacheUser.find(S);
14397 if (FoldUser != FoldCacheUser.end())
14398 for (
auto &KV : FoldUser->second)
14399 FoldCache.erase(KV);
14400 FoldCacheUser.erase(S);
14404ScalarEvolution::getUsedLoops(
const SCEV *S,
14406 struct FindUsedLoops {
14407 FindUsedLoops(SmallPtrSetImpl<const Loop *> &LoopsUsed)
14408 : LoopsUsed(LoopsUsed) {}
14409 SmallPtrSetImpl<const Loop *> &LoopsUsed;
14410 bool follow(
const SCEV *S) {
14416 bool isDone()
const {
return false; }
14419 FindUsedLoops
F(LoopsUsed);
14420 SCEVTraversal<FindUsedLoops>(F).visitAll(S);
14423void ScalarEvolution::getReachableBlocks(
14426 Worklist.
push_back(&F.getEntryBlock());
14427 while (!Worklist.
empty()) {
14429 if (!Reachable.
insert(BB).second)
14437 Worklist.
push_back(
C->isOne() ? TrueBB : FalseBB);
14444 if (isKnownPredicateViaConstantRanges(
Cmp->getCmpPredicate(), L, R)) {
14448 if (isKnownPredicateViaConstantRanges(
Cmp->getInverseCmpPredicate(), L,
14483 SCEVMapper SCM(SE2);
14485 SE2.getReachableBlocks(ReachableBlocks, F);
14487 auto GetDelta = [&](
const SCEV *Old,
const SCEV *New) ->
const SCEV * {
14505 while (!LoopStack.
empty()) {
14511 if (!ReachableBlocks.
contains(L->getHeader()))
14516 auto It = BackedgeTakenCounts.find(L);
14517 if (It == BackedgeTakenCounts.end())
14521 SCM.visit(It->second.getExact(L,
const_cast<ScalarEvolution *
>(
this)));
14541 const SCEV *Delta = GetDelta(CurBECount, NewBECount);
14542 if (Delta && !Delta->
isZero()) {
14543 dbgs() <<
"Trip Count for " << *L <<
" Changed!\n";
14544 dbgs() <<
"Old: " << *CurBECount <<
"\n";
14545 dbgs() <<
"New: " << *NewBECount <<
"\n";
14546 dbgs() <<
"Delta: " << *Delta <<
"\n";
14554 while (!Worklist.
empty()) {
14556 if (ValidLoops.
insert(L).second)
14557 Worklist.
append(L->begin(), L->end());
14559 for (
const auto &KV : ValueExprMap) {
14564 "AddRec references invalid loop");
14569 auto It = ExprValueMap.find(KV.second);
14570 if (It == ExprValueMap.end() || !It->second.contains(KV.first)) {
14571 dbgs() <<
"Value " << *KV.first
14572 <<
" is in ValueExprMap but not in ExprValueMap\n";
14577 if (!ReachableBlocks.
contains(
I->getParent()))
14579 const SCEV *OldSCEV = SCM.visit(KV.second);
14581 const SCEV *Delta = GetDelta(OldSCEV, NewSCEV);
14582 if (Delta && !Delta->
isZero()) {
14583 dbgs() <<
"SCEV for value " << *
I <<
" changed!\n"
14584 <<
"Old: " << *OldSCEV <<
"\n"
14585 <<
"New: " << *NewSCEV <<
"\n"
14586 <<
"Delta: " << *Delta <<
"\n";
14592 for (
const auto &KV : ExprValueMap) {
14593 for (
Value *V : KV.second) {
14594 const SCEV *S = ValueExprMap.lookup(V);
14596 dbgs() <<
"Value " << *V
14597 <<
" is in ExprValueMap but not in ValueExprMap\n";
14600 if (S != KV.first) {
14601 dbgs() <<
"Value " << *V <<
" mapped to " << *S <<
" rather than "
14602 << *KV.first <<
"\n";
14609 for (
const auto &S : UniqueSCEVs) {
14614 auto It = SCEVUsers.find(
Op);
14615 if (It != SCEVUsers.end() && It->second.count(&S))
14617 dbgs() <<
"Use of operand " << *
Op <<
" by user " << S
14618 <<
" is not being tracked!\n";
14624 for (
const auto &ValueAndVec : ValuesAtScopes) {
14626 for (
const auto &LoopAndValueAtScope : ValueAndVec.second) {
14627 const Loop *L = LoopAndValueAtScope.first;
14628 const SCEV *ValueAtScope = LoopAndValueAtScope.second;
14630 auto It = ValuesAtScopesUsers.find(ValueAtScope);
14631 if (It != ValuesAtScopesUsers.end() &&
14634 dbgs() <<
"Value: " << *
Value <<
", Loop: " << *L <<
", ValueAtScope: "
14635 << *ValueAtScope <<
" missing in ValuesAtScopesUsers\n";
14641 for (
const auto &ValueAtScopeAndVec : ValuesAtScopesUsers) {
14642 const SCEV *ValueAtScope = ValueAtScopeAndVec.first;
14643 for (
const auto &LoopAndValue : ValueAtScopeAndVec.second) {
14644 const Loop *L = LoopAndValue.first;
14645 const SCEV *
Value = LoopAndValue.second;
14647 auto It = ValuesAtScopes.find(
Value);
14648 if (It != ValuesAtScopes.end() &&
14649 is_contained(It->second, std::make_pair(L, ValueAtScope)))
14651 dbgs() <<
"Value: " << *
Value <<
", Loop: " << *L <<
", ValueAtScope: "
14652 << *ValueAtScope <<
" missing in ValuesAtScopes\n";
14658 auto VerifyBECountUsers = [&](
bool Predicated) {
14660 Predicated ? PredicatedBackedgeTakenCounts : BackedgeTakenCounts;
14661 for (
const auto &LoopAndBEInfo : BECounts) {
14662 for (
const ExitNotTakenInfo &ENT : LoopAndBEInfo.second.ExitNotTaken) {
14663 for (
const SCEV *S : {ENT.ExactNotTaken, ENT.SymbolicMaxNotTaken}) {
14665 auto UserIt = BECountUsers.find(S);
14666 if (UserIt != BECountUsers.end() &&
14667 UserIt->second.contains({ LoopAndBEInfo.first, Predicated }))
14669 dbgs() <<
"Value " << *S <<
" for loop " << *LoopAndBEInfo.first
14670 <<
" missing from BECountUsers\n";
14677 VerifyBECountUsers(
false);
14678 VerifyBECountUsers(
true);
14681 for (
auto &[S, Values] : LoopDispositions) {
14682 for (
auto [
Loop, CachedDisposition] : Values) {
14684 if (CachedDisposition != RecomputedDisposition) {
14685 dbgs() <<
"Cached disposition of " << *S <<
" for loop " << *
Loop
14686 <<
" is incorrect: cached " << CachedDisposition <<
", actual "
14687 << RecomputedDisposition <<
"\n";
14694 for (
auto &[S, Values] : BlockDispositions) {
14695 for (
auto [BB, CachedDisposition] : Values) {
14697 if (CachedDisposition != RecomputedDisposition) {
14698 dbgs() <<
"Cached disposition of " << *S <<
" for block %"
14699 << BB->
getName() <<
" is incorrect: cached " << CachedDisposition
14700 <<
", actual " << RecomputedDisposition <<
"\n";
14707 for (
auto [
FoldID, Expr] : FoldCache) {
14708 auto I = FoldCacheUser.find(Expr);
14709 if (
I == FoldCacheUser.end()) {
14710 dbgs() <<
"Missing entry in FoldCacheUser for cached expression " << *Expr
14715 dbgs() <<
"Missing FoldID in cached users of " << *Expr <<
"!\n";
14719 for (
auto [Expr, IDs] : FoldCacheUser) {
14720 for (
auto &
FoldID : IDs) {
14723 dbgs() <<
"Missing entry in FoldCache for expression " << *Expr
14728 dbgs() <<
"Entry in FoldCache doesn't match FoldCacheUser: " << *S
14729 <<
" != " << *Expr <<
"!\n";
14740 for (
auto [S, Multiple] : ConstantMultipleCache) {
14742 if ((Multiple != 0 && RecomputedMultiple != 0 &&
14743 Multiple.
urem(RecomputedMultiple) != 0 &&
14744 RecomputedMultiple.
urem(Multiple) != 0)) {
14745 dbgs() <<
"Incorrect cached computation in ConstantMultipleCache for "
14746 << *S <<
" : Computed " << RecomputedMultiple
14747 <<
" but cache contains " << Multiple <<
"!\n";
14755 FunctionAnalysisManager::Invalidator &Inv) {
14787 OS <<
"Printing analysis 'Scalar Evolution Analysis' for function '"
14788 <<
F.getName() <<
"':\n";
14794 "Scalar Evolution Analysis",
false,
true)
14843 const SCEV *LHS,
const SCEV *RHS) {
14845 assert(LHS->getType() == RHS->getType() &&
14846 "Type mismatch between LHS and RHS");
14849 ID.AddInteger(Pred);
14850 ID.AddPointer(LHS);
14851 ID.AddPointer(RHS);
14852 void *IP =
nullptr;
14853 if (
const auto *S = UniquePreds.FindNodeOrInsertPos(
ID, IP))
14857 UniquePreds.InsertNode(Eq, IP);
14868 ID.AddInteger(AddedFlags);
14869 void *IP =
nullptr;
14870 if (
const auto *S = UniquePreds.FindNodeOrInsertPos(
ID, IP))
14872 auto *OF =
new (SCEVAllocator)
14874 UniquePreds.InsertNode(OF, IP);
14894 SCEVPredicateRewriter
Rewriter(L, SE, NewPreds, Pred);
14895 return Rewriter.visit(S);
14901 for (
const auto *Pred : U->getPredicates())
14903 if (IPred->getLHS() == Expr &&
14905 return IPred->getRHS();
14907 if (IPred->getLHS() == Expr &&
14908 IPred->getPredicate() == ICmpInst::ICMP_EQ)
14909 return IPred->getRHS();
14912 return convertToAddRecWithPreds(Expr);
14915 const SCEV *visitZeroExtendExpr(
const SCEVZeroExtendExpr *Expr) {
14931 const SCEV *visitSignExtendExpr(
const SCEVSignExtendExpr *Expr) {
14948 explicit SCEVPredicateRewriter(
14949 const Loop *L, ScalarEvolution &SE,
14950 SmallVectorImpl<const SCEVPredicate *> *NewPreds,
14951 const SCEVPredicate *Pred)
14952 : SCEVRewriteVisitor(SE), NewPreds(NewPreds), Pred(Pred),
L(
L) {}
14954 bool addOverflowAssumption(
const SCEVPredicate *
P) {
14957 return Pred && Pred->
implies(
P, SE);
14963 bool addOverflowAssumption(
const SCEVAddRecExpr *AR,
14966 return addOverflowAssumption(
A);
14975 const SCEV *convertToAddRecWithPreds(
const SCEVUnknown *Expr) {
14979 std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>>
14981 if (!PredicatedRewrite)
14983 for (
const auto *
P : PredicatedRewrite->second){
14986 if (L != WP->getExpr()->getLoop())
14989 if (!addOverflowAssumption(
P))
14992 return PredicatedRewrite->first;
14995 SmallVectorImpl<const SCEVPredicate *> *NewPreds;
14996 const SCEVPredicate *Pred;
15005 return SCEVPredicateRewriter::rewrite(S, L, *
this,
nullptr, &Preds);
15012 S = SCEVPredicateRewriter::rewrite(S, L, *
this, &TransformPreds,
nullptr);
15032 if (!Step->
isOne())
15057 assert(LHS->getType() == RHS->getType() &&
"LHS and RHS types don't match");
15058 assert(LHS != RHS &&
"LHS and RHS are the same SCEV");
15071 return Op->LHS == LHS &&
Op->RHS == RHS;
15078 OS.
indent(
Depth) <<
"Equal predicate: " << *LHS <<
" == " << *RHS <<
"\n";
15080 OS.
indent(
Depth) <<
"Compare predicate: " << *LHS <<
" " << Pred <<
") "
15105 const SCEV *Start = AR->getStart();
15106 const SCEV *OpStart =
Op->AR->getStart();
15111 if (Start->getType()->isPointerTy() && Start->getType() != OpStart->
getType())
15114 const SCEV *Step = AR->getStepRecurrence(SE);
15115 const SCEV *OpStep =
Op->AR->getStepRecurrence(SE);
15168 if (Step->getValue()->getValue().isNonNegative())
15172 return ImpliedFlags;
15179 for (
const auto *
P : Preds)
15192 return this->implies(I, SE);
15200 for (
const auto *Pred : Preds)
15201 Pred->print(OS,
Depth);
15206 for (
const auto *Pred : Set->Preds)
15214 bool CheckImplies = Preds.
size() < 16;
15217 if (CheckImplies &&
implies(
N, SE))
15223 for (
auto *
P : Preds) {
15224 if (CheckImplies &&
N->implies(
P, SE))
15228 Preds = std::move(PrunedPreds);
15229 Preds.push_back(
N);
15236 Preds = std::make_unique<SCEVUnionPredicate>(
Empty, SE);
15241 for (
const auto *
Op :
Ops)
15246 SCEVUsers[
Op].insert(
User);
15250 const SCEV *Expr = SE.getSCEV(V);
15255 RewriteEntry &Entry = RewriteMap[Expr];
15258 if (Entry.second && Generation == Entry.first)
15259 return Entry.second;
15264 Expr = Entry.second;
15266 const SCEV *NewSCEV = SE.rewriteUsingPredicate(Expr, &L, *Preds);
15267 Entry = {Generation, NewSCEV};
15273 if (!BackedgeCount) {
15275 BackedgeCount = SE.getPredicatedBackedgeTakenCount(&L, Preds);
15276 for (
const auto *
P : Preds)
15279 return BackedgeCount;
15283 if (!SymbolicMaxBackedgeCount) {
15285 SymbolicMaxBackedgeCount =
15286 SE.getPredicatedSymbolicMaxBackedgeTakenCount(&L, Preds);
15287 for (
const auto *
P : Preds)
15290 return SymbolicMaxBackedgeCount;
15294 if (!SmallConstantMaxTripCount) {
15296 SmallConstantMaxTripCount = SE.getSmallConstantMaxTripCount(&L, &Preds);
15297 for (
const auto *
P : Preds)
15300 return *SmallConstantMaxTripCount;
15304 if (Preds->implies(&Pred, SE))
15309 Preds = std::make_unique<SCEVUnionPredicate>(NewPreds, SE);
15310 updateGeneration();
15317void PredicatedScalarEvolution::updateGeneration() {
15319 if (++Generation == 0) {
15320 for (
auto &
II : RewriteMap) {
15321 const SCEV *Rewritten =
II.second.second;
15338 auto II = FlagsMap.insert({V, Flags});
15351 auto II = FlagsMap.find(V);
15353 if (
II != FlagsMap.end())
15362 auto *New = SE.convertSCEVToAddRecWithPredicates(Expr, &L, NewPreds);
15367 for (
const auto *
P : NewPreds)
15370 RewriteMap[SE.getSCEV(V)] = {Generation, New};
15376 : RewriteMap(
Init.RewriteMap), SE(
Init.SE), L(
Init.L),
15379 Generation(
Init.Generation), BackedgeCount(
Init.BackedgeCount) {
15380 for (
auto I :
Init.FlagsMap)
15381 FlagsMap.insert(
I);
15386 for (
auto *BB : L.getBlocks())
15387 for (
auto &
I : *BB) {
15388 if (!SE.isSCEVable(
I.getType()))
15391 auto *Expr = SE.getSCEV(&
I);
15392 auto II = RewriteMap.find(Expr);
15394 if (
II == RewriteMap.end())
15398 if (
II->second.second == Expr)
15403 OS.
indent(
Depth + 2) <<
"--> " << *
II->second.second <<
"\n";
15411 LoopGuards Guards(SE);
15419void ScalarEvolution::LoopGuards::collectFromPHI(
15427 using MinMaxPattern = std::pair<const SCEVConstant *, SCEVTypes>;
15428 auto GetMinMaxConst = [&](
unsigned IncomingIdx) -> MinMaxPattern {
15442 auto &RewriteMap =
G->second.RewriteMap;
15443 if (RewriteMap.empty())
15445 auto S = RewriteMap.find(SE.
getSCEV(
Phi.getIncomingValue(IncomingIdx)));
15446 if (S == RewriteMap.end())
15452 return {C0, SM->getSCEVType()};
15455 auto MergeMinMaxConst = [](MinMaxPattern
P1,
15456 MinMaxPattern P2) -> MinMaxPattern {
15457 auto [C1,
T1] =
P1;
15458 auto [C2, T2] = P2;
15459 if (!C1 || !C2 ||
T1 != T2)
15463 return {C1->getAPInt().
ult(C2->getAPInt()) ? C1 : C2,
T1};
15465 return {C1->getAPInt().
slt(C2->getAPInt()) ? C1 : C2,
T1};
15467 return {C1->getAPInt().
ugt(C2->getAPInt()) ? C1 : C2,
T1};
15469 return {C1->getAPInt().
sgt(C2->getAPInt()) ? C1 : C2,
T1};
15474 auto P = GetMinMaxConst(0);
15475 for (
unsigned int In = 1;
In <
Phi.getNumIncomingValues();
In++) {
15478 P = MergeMinMaxConst(
P, GetMinMaxConst(In));
15481 const SCEV *
LHS = SE.
getSCEV(
const_cast<PHINode *
>(&Phi));
15484 Guards.RewriteMap.insert({
LHS,
RHS});
15492 const APInt &DivisorVal,
15494 const APInt *ExprVal;
15507 const APInt &DivisorVal,
15509 const APInt *ExprVal;
15517 return SE.
getConstant(*ExprVal + DivisorVal - Rem);
15531 const SCEV *URemRHS =
nullptr;
15535 const SCEV *Multiple =
15537 DivInfo[URemLHS] = Multiple;
15539 Multiples[URemLHS] =
C->getAPInt();
15559 auto IsMinMaxSCEVWithNonNegativeConstant =
15563 if (
MinMax->getNumOperands() != 2)
15566 if (
C->getAPInt().isNegative())
15568 SCTy =
MinMax->getSCEVType();
15577 const SCEV *MinMaxLHS =
nullptr, *MinMaxRHS =
nullptr;
15579 if (!IsMinMaxSCEVWithNonNegativeConstant(MinMaxExpr, SCTy, MinMaxLHS,
15584 auto *DivisibleExpr =
15592void ScalarEvolution::LoopGuards::collectFromBlock(
15594 const BasicBlock *
Block,
const BasicBlock *Pred,
15602 DenseMap<const SCEV *, const SCEV *> &RewriteMap,
15613 &ExprsToRewrite]() {
15614 const SCEVConstant *C1;
15627 if (ExactRegion.isWrappedSet() || ExactRegion.isFullSet())
15629 auto [
I,
Inserted] = RewriteMap.try_emplace(LHSUnknown);
15630 const SCEV *RewrittenLHS =
Inserted ? LHSUnknown :
I->second;
15638 if (MatchRangeCheckIdiom())
15655 auto AddRewrite = [&](
const SCEV *From,
const SCEV *FromRewritten,
15657 if (From == FromRewritten)
15659 RewriteMap[From] = To;
15665 auto GetMaybeRewritten = [&](
const SCEV *S) {
15666 return RewriteMap.lookup_or(S, S);
15669 const SCEV *RewrittenLHS = GetMaybeRewritten(
LHS);
15671 const APInt &DividesBy =
15686 switch (Predicate) {
15715 SmallPtrSet<const SCEV *, 16> Visited;
15717 auto EnqueueOperands = [&Worklist](
const SCEVNAryExpr *S) {
15721 while (!Worklist.
empty()) {
15725 if (!Visited.
insert(From).second)
15727 const SCEV *FromRewritten = GetMaybeRewritten(From);
15728 const SCEV *To =
nullptr;
15730 switch (Predicate) {
15735 EnqueueOperands(
UMax);
15741 EnqueueOperands(
SMax);
15747 EnqueueOperands(
UMin);
15753 EnqueueOperands(
SMin);
15761 const SCEV *OneAlignedUp =
15763 To = SE.
getUMaxExpr(FromRewritten, OneAlignedUp);
15775 const SCEVConstant *
C;
15784 Guards.NotEqual.insert({
LHS,
RHS});
15793 AddRewrite(From, FromRewritten, To);
15810 SE.F.
getParent(), Intrinsic::experimental_guard);
15812 for (
const auto *GU : GuardDecl->users())
15814 if (Guard->getFunction() ==
Block->getParent() &&
15823 unsigned NumCollectedConditions = 0;
15825 std::pair<const BasicBlock *, const BasicBlock *> Pair(Pred,
Block);
15827 Pair = SE.getPredecessorWithUniqueSuccessorForBB(Pair.first)) {
15829 const BranchInst *LoopEntryPredicate =
15836 NumCollectedConditions++;
15840 if (
Depth > 0 && NumCollectedConditions == 2)
15848 if (Pair.second->hasNPredecessorsOrMore(2) &&
15850 SmallDenseMap<const BasicBlock *, LoopGuards> IncomingGuards;
15851 for (
auto &Phi : Pair.second->phis())
15862 for (
auto [Term, EnterIfTrue] :
reverse(Terms)) {
15863 SmallVector<Value *, 8> Worklist;
15864 SmallPtrSet<Value *, 8> Visited;
15866 while (!Worklist.
empty()) {
15873 EnterIfTrue ?
Cmp->getPredicate() :
Cmp->getInversePredicate();
15897 DenseMap<const SCEV *, APInt> Multiples;
15899 for (
const auto &[Predicate,
LHS,
RHS] : GuardsToProcess) {
15906 for (
const auto &[Predicate,
LHS,
RHS] : GuardsToProcess)
15907 CollectCondition(Predicate,
LHS,
RHS, Guards.RewriteMap, DivGuards);
15911 for (
const auto &[K, Divisor] : Multiples) {
15912 const SCEV *DivisorSCEV = SE.
getConstant(Divisor);
15913 Guards.RewriteMap[
K] =
15915 Guards.
rewrite(K), Divisor, SE),
15924 Guards.PreserveNUW =
true;
15925 Guards.PreserveNSW =
true;
15926 for (
const SCEV *Expr : ExprsToRewrite) {
15927 const SCEV *RewriteTo = Guards.RewriteMap[Expr];
15928 Guards.PreserveNUW &=
15930 Guards.PreserveNSW &=
15937 if (ExprsToRewrite.size() > 1) {
15938 for (
const SCEV *Expr : ExprsToRewrite) {
15939 const SCEV *RewriteTo = Guards.RewriteMap[Expr];
15940 Guards.RewriteMap.erase(Expr);
15941 Guards.RewriteMap.insert({Expr, Guards.
rewrite(RewriteTo)});
15950 class SCEVLoopGuardRewriter
15961 NotEqual(Guards.NotEqual) {
15962 if (Guards.PreserveNUW)
15964 if (Guards.PreserveNSW)
15971 return Map.lookup_or(Expr, Expr);
15975 if (
const SCEV *S = Map.lookup(Expr))
15982 unsigned Bitwidth = Ty->getScalarSizeInBits() / 2;
15983 while (Bitwidth % 8 == 0 && Bitwidth >= 8 &&
15984 Bitwidth >
Op->getType()->getScalarSizeInBits()) {
15986 auto *NarrowExt = SE.getZeroExtendExpr(
Op, NarrowTy);
15987 if (
const SCEV *S = Map.lookup(NarrowExt))
15988 return SE.getZeroExtendExpr(S, Ty);
15989 Bitwidth = Bitwidth / 2;
15997 if (
const SCEV *S = Map.lookup(Expr))
16004 if (
const SCEV *S = Map.lookup(Expr))
16010 if (
const SCEV *S = Map.lookup(Expr))
16018 auto RewriteSubtraction = [&](
const SCEV *S) ->
const SCEV * {
16019 const SCEV *LHS, *RHS;
16023 if (NotEqual.contains({LHS, RHS})) {
16025 SE.getOne(S->
getType()), SE.getConstantMultiple(S), SE);
16026 return SE.getUMaxExpr(OneAlignedUp, S);
16033 if (
const SCEV *Rewritten = RewriteSubtraction(Expr))
16044 if (
const SCEV *Rewritten = RewriteSubtraction(
Add))
16045 return SE.getAddExpr(
16048 if (
const SCEV *S = Map.lookup(
Add))
16049 return SE.getAddExpr(Expr->
getOperand(0), S);
16061 : SE.getAddExpr(Operands,
16077 : SE.getMulExpr(Operands,
16083 if (RewriteMap.empty() && NotEqual.empty())
16086 SCEVLoopGuardRewriter
Rewriter(SE, *
this);
16087 return Rewriter.visit(Expr);
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Expand Atomic instructions
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
#define LLVM_DUMP_METHOD
Mark debug helper function definitions like dump() that should not be stripped from debug builds.
This file contains the declarations for the subclasses of Constant, which represent the different fla...
SmallPtrSet< const BasicBlock *, 8 > VisitedBlocks
This file defines the DenseMap class.
This file builds on the ADT/GraphTraits.h file to build generic depth first graph iterator.
This file defines a hash set that can be used to remove duplication of nodes in a graph.
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
This defines the Use class.
iv Induction Variable Users
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
static bool isZero(Value *V, const DataLayout &DL, DominatorTree *DT, AssumptionCache *AC)
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t IntrinsicInst * II
PowerPC Reduce CR logical Operation
#define INITIALIZE_PASS_DEPENDENCY(depName)
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
const SmallVectorImpl< MachineOperand > & Cond
static bool isValid(const char C)
Returns true if C is a valid mangled character: <0-9a-zA-Z_>.
SI optimize exec mask operations pre RA
void visit(MachineFunction &MF, MachineBasicBlock &Start, std::function< void(MachineBasicBlock *)> op)
This file provides utility classes that use RAII to save and restore values.
bool SCEVMinMaxExprContains(const SCEV *Root, const SCEV *OperandToFind, SCEVTypes RootKind)
static cl::opt< unsigned > MaxAddRecSize("scalar-evolution-max-add-rec-size", cl::Hidden, cl::desc("Max coefficients in AddRec during evolving"), cl::init(8))
static cl::opt< unsigned > RangeIterThreshold("scev-range-iter-threshold", cl::Hidden, cl::desc("Threshold for switching to iteratively computing SCEV ranges"), cl::init(32))
static const Loop * isIntegerLoopHeaderPHI(const PHINode *PN, LoopInfo &LI)
static unsigned getConstantTripCount(const SCEVConstant *ExitCount)
static int CompareValueComplexity(const LoopInfo *const LI, Value *LV, Value *RV, unsigned Depth)
Compare the two values LV and RV in terms of their "complexity" where "complexity" is a partial (and ...
static const SCEV * getNextSCEVDivisibleByDivisor(const SCEV *Expr, const APInt &DivisorVal, ScalarEvolution &SE)
static void PushLoopPHIs(const Loop *L, SmallVectorImpl< Instruction * > &Worklist, SmallPtrSetImpl< Instruction * > &Visited)
Push PHI nodes in the header of the given loop onto the given Worklist.
static void insertFoldCacheEntry(const ScalarEvolution::FoldID &ID, const SCEV *S, DenseMap< ScalarEvolution::FoldID, const SCEV * > &FoldCache, DenseMap< const SCEV *, SmallVector< ScalarEvolution::FoldID, 2 > > &FoldCacheUser)
static cl::opt< bool > ClassifyExpressions("scalar-evolution-classify-expressions", cl::Hidden, cl::init(true), cl::desc("When printing analysis, include information on every instruction"))
static bool CanConstantFold(const Instruction *I)
Return true if we can constant fold an instruction of the specified type, assuming that all operands ...
static cl::opt< unsigned > AddOpsInlineThreshold("scev-addops-inline-threshold", cl::Hidden, cl::desc("Threshold for inlining addition operands into a SCEV"), cl::init(500))
static cl::opt< unsigned > MaxLoopGuardCollectionDepth("scalar-evolution-max-loop-guard-collection-depth", cl::Hidden, cl::desc("Maximum depth for recursive loop guard collection"), cl::init(1))
static cl::opt< bool > VerifyIR("scev-verify-ir", cl::Hidden, cl::desc("Verify IR correctness when making sensitive SCEV queries (slow)"), cl::init(false))
static bool BrPHIToSelect(DominatorTree &DT, BranchInst *BI, PHINode *Merge, Value *&C, Value *&LHS, Value *&RHS)
static const SCEV * getPreStartForExtend(const SCEVAddRecExpr *AR, Type *Ty, ScalarEvolution *SE, unsigned Depth)
static std::optional< APInt > MinOptional(std::optional< APInt > X, std::optional< APInt > Y)
Helper function to compare optional APInts: (a) if X and Y both exist, return min(X,...
static cl::opt< unsigned > MulOpsInlineThreshold("scev-mulops-inline-threshold", cl::Hidden, cl::desc("Threshold for inlining multiplication operands into a SCEV"), cl::init(32))
static void GroupByComplexity(SmallVectorImpl< const SCEV * > &Ops, LoopInfo *LI, DominatorTree &DT)
Given a list of SCEV objects, order them by their complexity, and group objects of the same complexit...
static const SCEV * constantFoldAndGroupOps(ScalarEvolution &SE, LoopInfo &LI, DominatorTree &DT, SmallVectorImpl< const SCEV * > &Ops, FoldT Fold, IsIdentityT IsIdentity, IsAbsorberT IsAbsorber)
Performs a number of common optimizations on the passed Ops.
static bool isDivisibilityGuard(const SCEV *LHS, const SCEV *RHS, ScalarEvolution &SE)
static std::optional< const SCEV * > createNodeForSelectViaUMinSeq(ScalarEvolution *SE, const SCEV *CondExpr, const SCEV *TrueExpr, const SCEV *FalseExpr)
static Constant * BuildConstantFromSCEV(const SCEV *V)
This builds up a Constant using the ConstantExpr interface.
static ConstantInt * EvaluateConstantChrecAtConstant(const SCEVAddRecExpr *AddRec, ConstantInt *C, ScalarEvolution &SE)
static const SCEV * BinomialCoefficient(const SCEV *It, unsigned K, ScalarEvolution &SE, Type *ResultTy)
Compute BC(It, K). The result has width W. Assume, K > 0.
static cl::opt< unsigned > MaxCastDepth("scalar-evolution-max-cast-depth", cl::Hidden, cl::desc("Maximum depth of recursive SExt/ZExt/Trunc"), cl::init(8))
static bool IsMinMaxConsistingOf(const SCEV *MaybeMinMaxExpr, const SCEV *Candidate)
Is MaybeMinMaxExpr an (U|S)(Min|Max) of Candidate and some other values?
static PHINode * getConstantEvolvingPHI(Value *V, const Loop *L)
getConstantEvolvingPHI - Given an LLVM value and a loop, return a PHI node in the loop that V is deri...
static const SCEV * SolveLinEquationWithOverflow(const APInt &A, const SCEV *B, SmallVectorImpl< const SCEVPredicate * > *Predicates, ScalarEvolution &SE, const Loop *L)
Finds the minimum unsigned root of the following equation:
static cl::opt< unsigned > MaxBruteForceIterations("scalar-evolution-max-iterations", cl::ReallyHidden, cl::desc("Maximum number of iterations SCEV will " "symbolically execute a constant " "derived loop"), cl::init(100))
static bool MatchBinarySub(const SCEV *S, const SCEV *&LHS, const SCEV *&RHS)
static uint64_t umul_ov(uint64_t i, uint64_t j, bool &Overflow)
static void PrintSCEVWithTypeHint(raw_ostream &OS, const SCEV *S)
When printing a top-level SCEV for trip counts, it's helpful to include a type for constants which ar...
static void PrintLoopInfo(raw_ostream &OS, ScalarEvolution *SE, const Loop *L)
static bool containsConstantInAddMulChain(const SCEV *StartExpr)
Determine if any of the operands in this SCEV are a constant or if any of the add or multiply express...
static const SCEV * getExtendAddRecStart(const SCEVAddRecExpr *AR, Type *Ty, ScalarEvolution *SE, unsigned Depth)
static bool hasHugeExpression(ArrayRef< const SCEV * > Ops)
Returns true if Ops contains a huge SCEV (the subtree of S contains at least HugeExprThreshold nodes)...
static cl::opt< unsigned > MaxPhiSCCAnalysisSize("scalar-evolution-max-scc-analysis-depth", cl::Hidden, cl::desc("Maximum amount of nodes to process while searching SCEVUnknown " "Phi strongly connected components"), cl::init(8))
static bool IsKnownPredicateViaAddRecStart(ScalarEvolution &SE, CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
static bool collectDivisibilityInformation(ICmpInst::Predicate Predicate, const SCEV *LHS, const SCEV *RHS, DenseMap< const SCEV *, const SCEV * > &DivInfo, DenseMap< const SCEV *, APInt > &Multiples, ScalarEvolution &SE)
static cl::opt< unsigned > MaxSCEVOperationsImplicationDepth("scalar-evolution-max-scev-operations-implication-depth", cl::Hidden, cl::desc("Maximum depth of recursive SCEV operations implication analysis"), cl::init(2))
static void PushDefUseChildren(Instruction *I, SmallVectorImpl< Instruction * > &Worklist, SmallPtrSetImpl< Instruction * > &Visited)
Push users of the given Instruction onto the given Worklist.
static std::optional< APInt > SolveQuadraticAddRecRange(const SCEVAddRecExpr *AddRec, const ConstantRange &Range, ScalarEvolution &SE)
Let c(n) be the value of the quadratic chrec {0,+,M,+,N} after n iterations.
static cl::opt< bool > UseContextForNoWrapFlagInference("scalar-evolution-use-context-for-no-wrap-flag-strenghening", cl::Hidden, cl::desc("Infer nuw/nsw flags using context where suitable"), cl::init(true))
static cl::opt< bool > EnableFiniteLoopControl("scalar-evolution-finite-loop", cl::Hidden, cl::desc("Handle <= and >= in finite loops"), cl::init(true))
static std::optional< std::tuple< APInt, APInt, APInt, APInt, unsigned > > GetQuadraticEquation(const SCEVAddRecExpr *AddRec)
For a given quadratic addrec, generate coefficients of the corresponding quadratic equation,...
static bool isKnownPredicateExtendIdiom(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
static std::optional< BinaryOp > MatchBinaryOp(Value *V, const DataLayout &DL, AssumptionCache &AC, const DominatorTree &DT, const Instruction *CxtI)
Try to map V into a BinaryOp, and return std::nullopt on failure.
static std::optional< APInt > SolveQuadraticAddRecExact(const SCEVAddRecExpr *AddRec, ScalarEvolution &SE)
Let c(n) be the value of the quadratic chrec {L,+,M,+,N} after n iterations.
static std::optional< APInt > TruncIfPossible(std::optional< APInt > X, unsigned BitWidth)
Helper function to truncate an optional APInt to a given BitWidth.
static cl::opt< unsigned > MaxSCEVCompareDepth("scalar-evolution-max-scev-compare-depth", cl::Hidden, cl::desc("Maximum depth of recursive SCEV complexity comparisons"), cl::init(32))
static APInt extractConstantWithoutWrapping(ScalarEvolution &SE, const SCEVConstant *ConstantTerm, const SCEVAddExpr *WholeAddExpr)
static cl::opt< unsigned > MaxConstantEvolvingDepth("scalar-evolution-max-constant-evolving-depth", cl::Hidden, cl::desc("Maximum depth of recursive constant evolving"), cl::init(32))
static ConstantRange getRangeForAffineARHelper(APInt Step, const ConstantRange &StartRange, const APInt &MaxBECount, bool Signed)
static std::optional< ConstantRange > GetRangeFromMetadata(Value *V)
Helper method to assign a range to V from metadata present in the IR.
static cl::opt< unsigned > HugeExprThreshold("scalar-evolution-huge-expr-threshold", cl::Hidden, cl::desc("Size of the expression which is considered huge"), cl::init(4096))
static SCEV::NoWrapFlags StrengthenNoWrapFlags(ScalarEvolution *SE, SCEVTypes Type, ArrayRef< const SCEV * > Ops, SCEV::NoWrapFlags Flags)
static Type * isSimpleCastedPHI(const SCEV *Op, const SCEVUnknown *SymbolicPHI, bool &Signed, ScalarEvolution &SE)
Helper function to createAddRecFromPHIWithCasts.
static Constant * EvaluateExpression(Value *V, const Loop *L, DenseMap< Instruction *, Constant * > &Vals, const DataLayout &DL, const TargetLibraryInfo *TLI)
EvaluateExpression - Given an expression that passes the getConstantEvolvingPHI predicate,...
static const SCEV * getPreviousSCEVDivisibleByDivisor(const SCEV *Expr, const APInt &DivisorVal, ScalarEvolution &SE)
static const SCEV * MatchNotExpr(const SCEV *Expr)
If Expr computes ~A, return A else return nullptr.
static cl::opt< unsigned > MaxValueCompareDepth("scalar-evolution-max-value-compare-depth", cl::Hidden, cl::desc("Maximum depth of recursive value complexity comparisons"), cl::init(2))
static const SCEV * applyDivisibilityOnMinMaxExpr(const SCEV *MinMaxExpr, APInt Divisor, ScalarEvolution &SE)
static cl::opt< bool, true > VerifySCEVOpt("verify-scev", cl::Hidden, cl::location(VerifySCEV), cl::desc("Verify ScalarEvolution's backedge taken counts (slow)"))
static const SCEV * getSignedOverflowLimitForStep(const SCEV *Step, ICmpInst::Predicate *Pred, ScalarEvolution *SE)
static cl::opt< unsigned > MaxArithDepth("scalar-evolution-max-arith-depth", cl::Hidden, cl::desc("Maximum depth of recursive arithmetics"), cl::init(32))
static bool HasSameValue(const SCEV *A, const SCEV *B)
SCEV structural equivalence is usually sufficient for testing whether two expressions are equal,...
static uint64_t Choose(uint64_t n, uint64_t k, bool &Overflow)
Compute the result of "n choose k", the binomial coefficient.
static std::optional< int > CompareSCEVComplexity(const LoopInfo *const LI, const SCEV *LHS, const SCEV *RHS, DominatorTree &DT, unsigned Depth=0)
static bool CollectAddOperandsWithScales(SmallDenseMap< const SCEV *, APInt, 16 > &M, SmallVectorImpl< const SCEV * > &NewOps, APInt &AccumulatedConstant, ArrayRef< const SCEV * > Ops, const APInt &Scale, ScalarEvolution &SE)
Process the given Ops list, which is a list of operands to be added under the given scale,...
static bool canConstantEvolve(Instruction *I, const Loop *L)
Determine whether this instruction can constant evolve within this loop assuming its operands can all...
static PHINode * getConstantEvolvingPHIOperands(Instruction *UseInst, const Loop *L, DenseMap< Instruction *, PHINode * > &PHIMap, unsigned Depth)
getConstantEvolvingPHIOperands - Implement getConstantEvolvingPHI by recursing through each instructi...
static bool scevUnconditionallyPropagatesPoisonFromOperands(SCEVTypes Kind)
static cl::opt< bool > VerifySCEVStrict("verify-scev-strict", cl::Hidden, cl::desc("Enable stricter verification with -verify-scev is passed"))
static Constant * getOtherIncomingValue(PHINode *PN, BasicBlock *BB)
static cl::opt< bool > UseExpensiveRangeSharpening("scalar-evolution-use-expensive-range-sharpening", cl::Hidden, cl::init(false), cl::desc("Use more powerful methods of sharpening expression ranges. May " "be costly in terms of compile time"))
static const SCEV * getUnsignedOverflowLimitForStep(const SCEV *Step, ICmpInst::Predicate *Pred, ScalarEvolution *SE)
static bool IsKnownPredicateViaMinOrMax(ScalarEvolution &SE, CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
Is LHS Pred RHS true on the virtue of LHS or RHS being a Min or Max expression?
This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...
static bool InBlock(const Value *V, const BasicBlock *BB)
Provides some synthesis utilities to produce sequences of values.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static SymbolRef::Type getType(const Symbol *Sym)
LocallyHashedType DenseMapInfo< LocallyHashedType >::Empty
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
static std::optional< bool > isImpliedCondOperands(CmpInst::Predicate Pred, const Value *ALHS, const Value *ARHS, const Value *BLHS, const Value *BRHS)
Return true if "icmp Pred BLHS BRHS" is true whenever "icmp PredALHS ARHS" is true.
Virtual Register Rewriter
static const uint32_t IV[8]
SCEVCastSinkingRewriter(ScalarEvolution &SE, Type *TargetTy, ConversionFn CreatePtrCast)
static const SCEV * rewrite(const SCEV *Scev, ScalarEvolution &SE, Type *TargetTy, ConversionFn CreatePtrCast)
const SCEV * visitUnknown(const SCEVUnknown *Expr)
const SCEV * visitMulExpr(const SCEVMulExpr *Expr)
const SCEV * visitAddExpr(const SCEVAddExpr *Expr)
const SCEV * visit(const SCEV *S)
Class for arbitrary precision integers.
LLVM_ABI APInt umul_ov(const APInt &RHS, bool &Overflow) const
LLVM_ABI APInt zext(unsigned width) const
Zero extend to a new width.
bool isMinSignedValue() const
Determine if this is the smallest signed value.
uint64_t getZExtValue() const
Get zero extended value.
void setHighBits(unsigned hiBits)
Set the top hiBits bits.
LLVM_ABI APInt getHiBits(unsigned numBits) const
Compute an APInt containing numBits highbits from this APInt.
unsigned getActiveBits() const
Compute the number of active bits in the value.
LLVM_ABI APInt trunc(unsigned width) const
Truncate to new width.
static APInt getMaxValue(unsigned numBits)
Gets maximum unsigned value of APInt for specific bit width.
APInt abs() const
Get the absolute value.
bool sgt(const APInt &RHS) const
Signed greater than comparison.
bool isAllOnes() const
Determine if all bits are set. This is true for zero-width values.
bool ugt(const APInt &RHS) const
Unsigned greater than comparison.
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
bool isSignMask() const
Check if the APInt's value is returned by getSignMask.
LLVM_ABI APInt urem(const APInt &RHS) const
Unsigned remainder operation.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool ult(const APInt &RHS) const
Unsigned less than comparison.
static APInt getSignedMaxValue(unsigned numBits)
Gets maximum signed value of APInt for a specific bit width.
static APInt getMinValue(unsigned numBits)
Gets minimum unsigned value of APInt for a specific bit width.
bool isNegative() const
Determine sign of this APInt.
bool sle(const APInt &RHS) const
Signed less or equal comparison.
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
bool isNonPositive() const
Determine if this APInt Value is non-positive (<= 0).
unsigned countTrailingZeros() const
bool isStrictlyPositive() const
Determine if this APInt Value is positive.
unsigned logBase2() const
APInt ashr(unsigned ShiftAmt) const
Arithmetic right-shift function.
LLVM_ABI APInt multiplicativeInverse() const
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
LLVM_ABI APInt sext(unsigned width) const
Sign extend to a new width.
APInt shl(unsigned shiftAmt) const
Left-shift function.
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
bool isSignBitSet() const
Determine if sign bit of this APInt is set.
bool slt(const APInt &RHS) const
Signed less than comparison.
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
bool isIntN(unsigned N) const
Check if this APInt has an N-bits unsigned integer value.
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
This templated class represents "all analyses that operate over <aparticular IR unit>" (e....
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Represent the analysis usage information of a pass.
void setPreservesAll()
Set by analyses that do not transform their input at all.
AnalysisUsage & addRequiredTransitive()
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
A function analysis which provides an AssumptionCache.
An immutable pass that tracks lazily created AssumptionCache objects.
A cache of @llvm.assume calls within a function.
MutableArrayRef< WeakVH > assumptions()
Access the list of assumption handles currently tracked for this function.
LLVM_ABI bool isSingleEdge() const
Check if this is the only edge between Start and End.
LLVM Basic Block Representation.
iterator begin()
Instruction iterator methods.
const Function * getParent() const
Return the enclosing method, or null if none.
LLVM_ABI const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
const Instruction & front() const
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
LLVM_ABI unsigned getNoWrapKind() const
Returns one of OBO::NoSignedWrap or OBO::NoUnsignedWrap.
LLVM_ABI Instruction::BinaryOps getBinaryOp() const
Returns the binary operation underlying the intrinsic.
BinaryOps getOpcode() const
Conditional or Unconditional Branch instruction.
bool isConditional() const
BasicBlock * getSuccessor(unsigned i) const
bool isUnconditional() const
Value * getCondition() const
This class represents a function call, abstracting a target machine's calling convention.
virtual void deleted()
Callback for Value destruction.
bool isFalseWhenEqual() const
This is just a convenience.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ ICMP_UGE
unsigned greater or equal
@ ICMP_UGT
unsigned greater than
@ ICMP_SGT
signed greater than
@ ICMP_ULT
unsigned less than
@ ICMP_SGE
signed greater or equal
@ ICMP_ULE
unsigned less or equal
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
bool isTrueWhenEqual() const
This is just a convenience.
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
bool isRelational() const
Return true if the predicate is relational (not EQ or NE).
An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...
static LLVM_ABI std::optional< CmpPredicate > getMatching(CmpPredicate A, CmpPredicate B)
Compares two CmpPredicates taking samesign into account and returns the canonicalized CmpPredicate if...
LLVM_ABI CmpInst::Predicate getPreferredSignedPredicate() const
Attempts to return a signed CmpInst::Predicate from the CmpPredicate.
CmpInst::Predicate dropSameSign() const
Drops samesign information.
static LLVM_ABI Constant * getNot(Constant *C)
static LLVM_ABI Constant * getPtrToInt(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static Constant * getGetElementPtr(Type *Ty, Constant *C, ArrayRef< Constant * > IdxList, GEPNoWrapFlags NW=GEPNoWrapFlags::none(), std::optional< ConstantRange > InRange=std::nullopt, Type *OnlyIfReducedTy=nullptr)
Getelementptr form.
static LLVM_ABI Constant * getAdd(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
static LLVM_ABI Constant * getNeg(Constant *C, bool HasNSW=false)
static LLVM_ABI Constant * getTrunc(Constant *C, Type *Ty, bool OnlyIfReduced=false)
This is the shared class of boolean and integer constants.
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
static LLVM_ABI ConstantInt * getFalse(LLVMContext &Context)
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
const APInt & getValue() const
Return the constant as an APInt value reference.
static LLVM_ABI ConstantInt * getBool(LLVMContext &Context, bool V)
This class represents a range of values.
LLVM_ABI ConstantRange add(const ConstantRange &Other) const
Return a new range representing the possible values resulting from an addition of a value in this ran...
LLVM_ABI ConstantRange zextOrTrunc(uint32_t BitWidth) const
Make this range have the bit width given by BitWidth.
PreferredRangeType
If represented precisely, the result of some range operations may consist of multiple disjoint ranges...
LLVM_ABI bool getEquivalentICmp(CmpInst::Predicate &Pred, APInt &RHS) const
Set up Pred and RHS such that ConstantRange::makeExactICmpRegion(Pred, RHS) == *this.
const APInt & getLower() const
Return the lower value for this range.
LLVM_ABI ConstantRange urem(const ConstantRange &Other) const
Return a new range representing the possible values resulting from an unsigned remainder operation of...
LLVM_ABI bool isFullSet() const
Return true if this set contains all of the elements possible for this data-type.
LLVM_ABI bool icmp(CmpInst::Predicate Pred, const ConstantRange &Other) const
Does the predicate Pred hold between ranges this and Other?
LLVM_ABI bool isEmptySet() const
Return true if this set contains no members.
LLVM_ABI ConstantRange zeroExtend(uint32_t BitWidth) const
Return a new range in the specified integer type, which must be strictly larger than the current type...
LLVM_ABI bool isSignWrappedSet() const
Return true if this set wraps around the signed domain.
LLVM_ABI APInt getSignedMin() const
Return the smallest signed value contained in the ConstantRange.
LLVM_ABI bool isWrappedSet() const
Return true if this set wraps around the unsigned domain.
LLVM_ABI void print(raw_ostream &OS) const
Print out the bounds to a stream.
LLVM_ABI ConstantRange truncate(uint32_t BitWidth, unsigned NoWrapKind=0) const
Return a new range in the specified integer type, which must be strictly smaller than the current typ...
LLVM_ABI ConstantRange signExtend(uint32_t BitWidth) const
Return a new range in the specified integer type, which must be strictly larger than the current type...
const APInt & getUpper() const
Return the upper value for this range.
LLVM_ABI ConstantRange unionWith(const ConstantRange &CR, PreferredRangeType Type=Smallest) const
Return the range that results from the union of this range with another range.
static LLVM_ABI ConstantRange makeExactICmpRegion(CmpInst::Predicate Pred, const APInt &Other)
Produce the exact range such that all values in the returned range satisfy the given predicate with a...
LLVM_ABI bool contains(const APInt &Val) const
Return true if the specified value is in the set.
LLVM_ABI APInt getUnsignedMax() const
Return the largest unsigned value contained in the ConstantRange.
LLVM_ABI ConstantRange intersectWith(const ConstantRange &CR, PreferredRangeType Type=Smallest) const
Return the range that results from the intersection of this range with another range.
LLVM_ABI APInt getSignedMax() const
Return the largest signed value contained in the ConstantRange.
static ConstantRange getNonEmpty(APInt Lower, APInt Upper)
Create non-empty constant range with the given bounds.
static LLVM_ABI ConstantRange makeGuaranteedNoWrapRegion(Instruction::BinaryOps BinOp, const ConstantRange &Other, unsigned NoWrapKind)
Produce the largest range containing all X such that "X BinOp Y" is guaranteed not to wrap (overflow)...
LLVM_ABI unsigned getMinSignedBits() const
Compute the maximal number of bits needed to represent every value in this signed range.
uint32_t getBitWidth() const
Get the bit width of this ConstantRange.
LLVM_ABI ConstantRange sub(const ConstantRange &Other) const
Return a new range representing the possible values resulting from a subtraction of a value in this r...
LLVM_ABI ConstantRange sextOrTrunc(uint32_t BitWidth) const
Make this range have the bit width given by BitWidth.
static LLVM_ABI ConstantRange makeExactNoWrapRegion(Instruction::BinaryOps BinOp, const APInt &Other, unsigned NoWrapKind)
Produce the range that contains X if and only if "X BinOp Other" does not wrap.
This is an important base class in LLVM.
A parsed version of the target data layout string in and methods for querying it.
LLVM_ABI const StructLayout * getStructLayout(StructType *Ty) const
Returns a StructLayout object, indicating the alignment of the struct, its size, and the offsets of i...
LLVM_ABI IntegerType * getIntPtrType(LLVMContext &C, unsigned AddressSpace=0) const
Returns an integer type with size at least as big as that of a pointer in the given address space.
LLVM_ABI unsigned getIndexTypeSizeInBits(Type *Ty) const
The size in bits of the index used in GEP calculation for this type.
LLVM_ABI IntegerType * getIndexType(LLVMContext &C, unsigned AddressSpace) const
Returns the type of a GEP index in AddressSpace.
TypeSize getTypeSizeInBits(Type *Ty) const
Size examples:
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
iterator find(const_arg_type_t< KeyT > Val)
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&...Args)
DenseMapIterator< KeyT, ValueT, KeyInfoT, BucketT > iterator
iterator find_as(const LookupKeyT &Val)
Alternate version of find() which allows a different, and possibly less expensive,...
size_type count(const_arg_type_t< KeyT > Val) const
Return 1 if the specified key is in the map, 0 otherwise.
bool contains(const_arg_type_t< KeyT > Val) const
Return true if the specified key is in the map, false otherwise.
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Analysis pass which computes a DominatorTree.
Legacy analysis pass which computes a DominatorTree.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
LLVM_ABI bool isReachableFromEntry(const Use &U) const
Provide an overload for a Use.
LLVM_ABI bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
FoldingSetNodeIDRef - This class describes a reference to an interned FoldingSetNodeID,...
FoldingSetNodeID - This class is used to gather all the unique data bits of a node.
Represents flags for the getelementptr instruction/expression.
bool hasNoUnsignedSignedWrap() const
bool hasNoUnsignedWrap() const
static GEPNoWrapFlags none()
static LLVM_ABI Type * getTypeAtIndex(Type *Ty, Value *Idx)
Return the type of the element at the given index of an indexable type.
Module * getParent()
Get the module that this global value is contained inside of...
static bool isPrivateLinkage(LinkageTypes Linkage)
static bool isInternalLinkage(LinkageTypes Linkage)
This instruction compares its operands according to the predicate given to the constructor.
CmpPredicate getCmpPredicate() const
static bool isGE(Predicate P)
Return true if the predicate is SGE or UGE.
CmpPredicate getSwappedCmpPredicate() const
static LLVM_ABI bool compare(const APInt &LHS, const APInt &RHS, ICmpInst::Predicate Pred)
Return result of LHS Pred RHS comparison.
static bool isLT(Predicate P)
Return true if the predicate is SLT or ULT.
CmpPredicate getInverseCmpPredicate() const
Predicate getNonStrictCmpPredicate() const
For example, SGT -> SGE, SLT -> SLE, ULT -> ULE, UGT -> UGE.
static bool isGT(Predicate P)
Return true if the predicate is SGT or UGT.
Predicate getFlippedSignednessPredicate() const
For example, SLT->ULT, ULT->SLT, SLE->ULE, ULE->SLE, EQ->EQ.
static CmpPredicate getInverseCmpPredicate(CmpPredicate Pred)
static bool isEquality(Predicate P)
Return true if this predicate is either EQ or NE.
bool isRelational() const
Return true if the predicate is relational (not EQ or NE).
static bool isLE(Predicate P)
Return true if the predicate is SLE or ULE.
LLVM_ABI bool hasNoUnsignedWrap() const LLVM_READONLY
Determine whether the no unsigned wrap flag is set.
LLVM_ABI bool hasNoSignedWrap() const LLVM_READONLY
Determine whether the no signed wrap flag is set.
LLVM_ABI bool isIdenticalToWhenDefined(const Instruction *I, bool IntersectAttrs=false) const LLVM_READONLY
This is like isIdenticalTo, except that it ignores the SubclassOptionalData flags,...
Class to represent integer types.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
A helper class to return the specified delimiter string after the first invocation of operator String...
An instruction for reading from memory.
Analysis pass that exposes the LoopInfo for a function.
bool contains(const LoopT *L) const
Return true if the specified loop is contained within in this loop.
BlockT * getHeader() const
unsigned getLoopDepth() const
Return the nesting level of this loop.
BlockT * getLoopPredecessor() const
If the given loop's header has exactly one unique predecessor outside the loop, return it.
LoopT * getParentLoop() const
Return the parent loop if it exists or nullptr for top level loops.
unsigned getLoopDepth(const BlockT *BB) const
Return the loop nesting level of the specified block.
LoopT * getLoopFor(const BlockT *BB) const
Return the inner most loop that BB lives in.
The legacy pass manager's analysis pass to compute loop information.
Represents a single loop in the control flow graph.
bool isLoopInvariant(const Value *V) const
Return true if the specified value is loop invariant.
A Module instance is used to store all the information related to an LLVM module.
unsigned getOpcode() const
Return the opcode for this Instruction or ConstantExpr.
Utility class for integer operators which may exhibit overflow - Add, Sub, Mul, and Shl.
bool hasNoSignedWrap() const
Test whether this operation is known to never undergo signed overflow, aka the nsw property.
bool hasNoUnsignedWrap() const
Test whether this operation is known to never undergo unsigned overflow, aka the nuw property.
iterator_range< const_block_iterator > blocks() const
op_range incoming_values()
Value * getIncomingValueForBlock(const BasicBlock *BB) const
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
AnalysisType & getAnalysis() const
getAnalysis<AnalysisType>() - This function is used by subclasses to get to the analysis information ...
PointerIntPair - This class implements a pair of a pointer and small integer.
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the default address space (address sp...
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
LLVM_ABI void addPredicate(const SCEVPredicate &Pred)
Adds a new predicate.
LLVM_ABI const SCEVPredicate & getPredicate() const
LLVM_ABI const SCEV * getPredicatedSCEV(const SCEV *Expr)
Returns the rewritten SCEV for Expr in the context of the current SCEV predicate.
LLVM_ABI bool hasNoOverflow(Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags)
Returns true if we've proved that V doesn't wrap by means of a SCEV predicate.
LLVM_ABI void setNoOverflow(Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags)
Proves that V doesn't overflow by adding SCEV predicate.
LLVM_ABI void print(raw_ostream &OS, unsigned Depth) const
Print the SCEV mappings done by the Predicated Scalar Evolution.
LLVM_ABI bool areAddRecsEqualWithPreds(const SCEVAddRecExpr *AR1, const SCEVAddRecExpr *AR2) const
Check if AR1 and AR2 are equal, while taking into account Equal predicates in Preds.
LLVM_ABI PredicatedScalarEvolution(ScalarEvolution &SE, Loop &L)
LLVM_ABI const SCEVAddRecExpr * getAsAddRec(Value *V)
Attempts to produce an AddRecExpr for V by adding additional SCEV predicates.
LLVM_ABI unsigned getSmallConstantMaxTripCount()
Returns the upper bound of the loop trip count as a normal unsigned value, or 0 if the trip count is ...
LLVM_ABI const SCEV * getBackedgeTakenCount()
Get the (predicated) backedge count for the analyzed loop.
LLVM_ABI const SCEV * getSymbolicMaxBackedgeTakenCount()
Get the (predicated) symbolic max backedge count for the analyzed loop.
LLVM_ABI const SCEV * getSCEV(Value *V)
Returns the SCEV expression of V, in the context of the current SCEV predicate.
A set of analyses that are preserved following a run of a transformation pass.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
PreservedAnalysisChecker getChecker() const
Build a checker for this PreservedAnalyses and the specified analysis type.
constexpr bool isValid() const
This node represents an addition of some number of SCEVs.
This node represents a polynomial recurrence on the trip count of the specified loop.
friend class ScalarEvolution
const SCEV * getStart() const
LLVM_ABI const SCEV * evaluateAtIteration(const SCEV *It, ScalarEvolution &SE) const
Return the value of this chain of recurrences at the specified iteration number.
const SCEV * getStepRecurrence(ScalarEvolution &SE) const
Constructs and returns the recurrence indicating how much this expression steps by.
void setNoWrapFlags(NoWrapFlags Flags)
Set flags for a recurrence without clearing any previously set flags.
bool isAffine() const
Return true if this represents an expression A + B*x where A and B are loop invariant values.
bool isQuadratic() const
Return true if this represents an expression A + B*x + C*x^2 where A, B and C are loop invariant valu...
LLVM_ABI const SCEV * getNumIterationsInRange(const ConstantRange &Range, ScalarEvolution &SE) const
Return the number of iterations of this loop that produce values in the specified constant range.
LLVM_ABI const SCEVAddRecExpr * getPostIncExpr(ScalarEvolution &SE) const
Return an expression representing the value of this expression one iteration of the loop ahead.
const Loop * getLoop() const
This is the base class for unary cast operator classes.
const SCEV * getOperand() const
LLVM_ABI SCEVCastExpr(const FoldingSetNodeIDRef ID, SCEVTypes SCEVTy, const SCEV *op, Type *ty)
void setNoWrapFlags(NoWrapFlags Flags)
Set flags for a non-recurrence without clearing previously set flags.
This class represents an assumption that the expression LHS Pred RHS evaluates to true,...
SCEVComparePredicate(const FoldingSetNodeIDRef ID, const ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS)
bool isAlwaysTrue() const override
Returns true if the predicate is always true.
void print(raw_ostream &OS, unsigned Depth=0) const override
Prints a textual representation of this predicate with an indentation of Depth.
bool implies(const SCEVPredicate *N, ScalarEvolution &SE) const override
Implementation of the SCEVPredicate interface.
This class represents a constant integer value.
ConstantInt * getValue() const
const APInt & getAPInt() const
This is the base class for unary integral cast operator classes.
LLVM_ABI SCEVIntegralCastExpr(const FoldingSetNodeIDRef ID, SCEVTypes SCEVTy, const SCEV *op, Type *ty)
This node is the base class min/max selections.
static enum SCEVTypes negate(enum SCEVTypes T)
This node represents multiplication of some number of SCEVs.
This node is a base class providing common functionality for n'ary operators.
bool hasNoUnsignedWrap() const
bool hasNoSelfWrap() const
size_t getNumOperands() const
bool hasNoSignedWrap() const
NoWrapFlags getNoWrapFlags(NoWrapFlags Mask=NoWrapMask) const
const SCEV * getOperand(unsigned i) const
const SCEV *const * Operands
ArrayRef< const SCEV * > operands() const
This class represents an assumption made using SCEV expressions which can be checked at run-time.
SCEVPredicate(const SCEVPredicate &)=default
virtual bool implies(const SCEVPredicate *N, ScalarEvolution &SE) const =0
Returns true if this predicate implies N.
This class represents a cast from a pointer to a pointer-sized integer value.
This visitor recursively visits a SCEV expression and re-writes it.
const SCEV * visitSignExtendExpr(const SCEVSignExtendExpr *Expr)
const SCEV * visit(const SCEV *S)
const SCEV * visitZeroExtendExpr(const SCEVZeroExtendExpr *Expr)
const SCEV * visitSMinExpr(const SCEVSMinExpr *Expr)
SCEVRewriteVisitor(ScalarEvolution &SE)
const SCEV * visitUMinExpr(const SCEVUMinExpr *Expr)
This class represents a signed minimum selection.
This node is the base class for sequential/in-order min/max selections.
static SCEVTypes getEquivalentNonSequentialSCEVType(SCEVTypes Ty)
This class represents a sign extension of a small integer value to a larger integer value.
Visit all nodes in the expression tree using worklist traversal.
This class represents a truncation of an integer value to a smaller integer value.
This class represents a binary unsigned division operation.
const SCEV * getLHS() const
const SCEV * getRHS() const
This class represents an unsigned minimum selection.
This class represents a composition of other SCEV predicates, and is the class that most clients will...
void print(raw_ostream &OS, unsigned Depth) const override
Prints a textual representation of this predicate with an indentation of Depth.
bool implies(const SCEVPredicate *N, ScalarEvolution &SE) const override
Returns true if this predicate implies N.
SCEVUnionPredicate(ArrayRef< const SCEVPredicate * > Preds, ScalarEvolution &SE)
Union predicates don't get cached so create a dummy set ID for it.
bool isAlwaysTrue() const override
Implementation of the SCEVPredicate interface.
This means that we are dealing with an entirely unknown SCEV value, and only represent it as its LLVM...
This class represents the value of vscale, as used when defining the length of a scalable vector or r...
This class represents an assumption made on an AddRec expression.
IncrementWrapFlags
Similar to SCEV::NoWrapFlags, but with slightly different semantics for FlagNUSW.
SCEVWrapPredicate(const FoldingSetNodeIDRef ID, const SCEVAddRecExpr *AR, IncrementWrapFlags Flags)
bool implies(const SCEVPredicate *N, ScalarEvolution &SE) const override
Returns true if this predicate implies N.
static SCEVWrapPredicate::IncrementWrapFlags setFlags(SCEVWrapPredicate::IncrementWrapFlags Flags, SCEVWrapPredicate::IncrementWrapFlags OnFlags)
void print(raw_ostream &OS, unsigned Depth=0) const override
Prints a textual representation of this predicate with an indentation of Depth.
bool isAlwaysTrue() const override
Returns true if the predicate is always true.
const SCEVAddRecExpr * getExpr() const
Implementation of the SCEVPredicate interface.
static SCEVWrapPredicate::IncrementWrapFlags clearFlags(SCEVWrapPredicate::IncrementWrapFlags Flags, SCEVWrapPredicate::IncrementWrapFlags OffFlags)
Convenient IncrementWrapFlags manipulation methods.
static SCEVWrapPredicate::IncrementWrapFlags getImpliedFlags(const SCEVAddRecExpr *AR, ScalarEvolution &SE)
Returns the set of SCEVWrapPredicate no wrap flags implied by a SCEVAddRecExpr.
IncrementWrapFlags getFlags() const
Returns the set assumed no overflow flags.
This class represents a zero extension of a small integer value to a larger integer value.
This class represents an analyzed expression in the program.
LLVM_ABI ArrayRef< const SCEV * > operands() const
Return operands of this SCEV expression.
unsigned short getExpressionSize() const
LLVM_ABI bool isOne() const
Return true if the expression is a constant one.
LLVM_ABI bool isZero() const
Return true if the expression is a constant zero.
LLVM_ABI void dump() const
This method is used for debugging.
LLVM_ABI bool isAllOnesValue() const
Return true if the expression is a constant all-ones value.
LLVM_ABI bool isNonConstantNegative() const
Return true if the specified scev is negated, but not a constant.
LLVM_ABI void print(raw_ostream &OS) const
Print out the internal representation of this scalar to the specified stream.
SCEV(const FoldingSetNodeIDRef ID, SCEVTypes SCEVTy, unsigned short ExpressionSize)
SCEVTypes getSCEVType() const
LLVM_ABI Type * getType() const
Return the LLVM type of this SCEV expression.
NoWrapFlags
NoWrapFlags are bitfield indices into SubclassData.
Analysis pass that exposes the ScalarEvolution for a function.
LLVM_ABI ScalarEvolution run(Function &F, FunctionAnalysisManager &AM)
LLVM_ABI PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
LLVM_ABI PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
void print(raw_ostream &OS, const Module *=nullptr) const override
print - Print out the internal state of the pass.
bool runOnFunction(Function &F) override
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass.
void releaseMemory() override
releaseMemory() - This member can be implemented by a pass if it wants to be able to release its memo...
void verifyAnalysis() const override
verifyAnalysis() - This member can be implemented by a analysis pass to check state of analysis infor...
ScalarEvolutionWrapperPass()
static LLVM_ABI LoopGuards collect(const Loop *L, ScalarEvolution &SE)
Collect rewrite map for loop guards for loop L, together with flags indicating if NUW and NSW can be ...
LLVM_ABI const SCEV * rewrite(const SCEV *Expr) const
Try to apply the collected loop guards to Expr.
The main scalar evolution driver.
const SCEV * getConstantMaxBackedgeTakenCount(const Loop *L)
When successful, this returns a SCEVConstant that is greater than or equal to (i.e.
static bool hasFlags(SCEV::NoWrapFlags Flags, SCEV::NoWrapFlags TestFlags)
const DataLayout & getDataLayout() const
Return the DataLayout associated with the module this SCEV instance is operating on.
LLVM_ABI bool isKnownNonNegative(const SCEV *S)
Test if the given expression is known to be non-negative.
LLVM_ABI bool isKnownOnEveryIteration(CmpPredicate Pred, const SCEVAddRecExpr *LHS, const SCEV *RHS)
Test if the condition described by Pred, LHS, RHS is known to be true on every iteration of the loop ...
LLVM_ABI const SCEV * getNegativeSCEV(const SCEV *V, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap)
Return the SCEV object corresponding to -V.
LLVM_ABI std::optional< LoopInvariantPredicate > getLoopInvariantExitCondDuringFirstIterationsImpl(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS, const Loop *L, const Instruction *CtxI, const SCEV *MaxIter)
LLVM_ABI const SCEV * getSMaxExpr(const SCEV *LHS, const SCEV *RHS)
LLVM_ABI const SCEV * getUDivCeilSCEV(const SCEV *N, const SCEV *D)
Compute ceil(N / D).
LLVM_ABI std::optional< LoopInvariantPredicate > getLoopInvariantExitCondDuringFirstIterations(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS, const Loop *L, const Instruction *CtxI, const SCEV *MaxIter)
If the result of the predicate LHS Pred RHS is loop invariant with respect to L at given Context duri...
LLVM_ABI Type * getWiderType(Type *Ty1, Type *Ty2) const
LLVM_ABI const SCEV * getAbsExpr(const SCEV *Op, bool IsNSW)
LLVM_ABI bool isKnownNonPositive(const SCEV *S)
Test if the given expression is known to be non-positive.
LLVM_ABI const SCEV * getURemExpr(const SCEV *LHS, const SCEV *RHS)
Represents an unsigned remainder expression based on unsigned division.
LLVM_ABI bool isKnownNegative(const SCEV *S)
Test if the given expression is known to be negative.
LLVM_ABI const SCEV * getPredicatedConstantMaxBackedgeTakenCount(const Loop *L, SmallVectorImpl< const SCEVPredicate * > &Predicates)
Similar to getConstantMaxBackedgeTakenCount, except it will add a set of SCEV predicates to Predicate...
LLVM_ABI const SCEV * removePointerBase(const SCEV *S)
Compute an expression equivalent to S - getPointerBase(S).
LLVM_ABI bool isLoopEntryGuardedByCond(const Loop *L, CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
Test whether entry to the loop is protected by a conditional between LHS and RHS.
LLVM_ABI bool isKnownNonZero(const SCEV *S)
Test if the given expression is known to be non-zero.
LLVM_ABI const SCEV * getSCEVAtScope(const SCEV *S, const Loop *L)
Return a SCEV expression for the specified value at the specified scope in the program.
LLVM_ABI const SCEV * getSMinExpr(const SCEV *LHS, const SCEV *RHS)
LLVM_ABI const SCEV * getBackedgeTakenCount(const Loop *L, ExitCountKind Kind=Exact)
If the specified loop has a predictable backedge-taken count, return it, otherwise return a SCEVCould...
LLVM_ABI const SCEV * getUMaxExpr(const SCEV *LHS, const SCEV *RHS)
LLVM_ABI void setNoWrapFlags(SCEVAddRecExpr *AddRec, SCEV::NoWrapFlags Flags)
Update no-wrap flags of an AddRec.
LLVM_ABI const SCEV * getUMaxFromMismatchedTypes(const SCEV *LHS, const SCEV *RHS)
Promote the operands to the wider of the types using zero-extension, and then perform a umax operatio...
const SCEV * getZero(Type *Ty)
Return a SCEV for the constant 0 of a specific type.
LLVM_ABI bool willNotOverflow(Instruction::BinaryOps BinOp, bool Signed, const SCEV *LHS, const SCEV *RHS, const Instruction *CtxI=nullptr)
Is operation BinOp between LHS and RHS provably does not have a signed/unsigned overflow (Signed)?
LLVM_ABI ExitLimit computeExitLimitFromCond(const Loop *L, Value *ExitCond, bool ExitIfTrue, bool ControlsOnlyExit, bool AllowPredicates=false)
Compute the number of times the backedge of the specified loop will execute if its exit condition wer...
LLVM_ABI const SCEV * getZeroExtendExprImpl(const SCEV *Op, Type *Ty, unsigned Depth=0)
LLVM_ABI const SCEVPredicate * getEqualPredicate(const SCEV *LHS, const SCEV *RHS)
LLVM_ABI unsigned getSmallConstantTripMultiple(const Loop *L, const SCEV *ExitCount)
Returns the largest constant divisor of the trip count as a normal unsigned value,...
LLVM_ABI uint64_t getTypeSizeInBits(Type *Ty) const
Return the size in bits of the specified type, for which isSCEVable must return true.
LLVM_ABI const SCEV * getConstant(ConstantInt *V)
LLVM_ABI const SCEV * getPredicatedBackedgeTakenCount(const Loop *L, SmallVectorImpl< const SCEVPredicate * > &Predicates)
Similar to getBackedgeTakenCount, except it will add a set of SCEV predicates to Predicates that are ...
LLVM_ABI const SCEV * getSCEV(Value *V)
Return a SCEV expression for the full generality of the specified expression.
ConstantRange getSignedRange(const SCEV *S)
Determine the signed range for a particular SCEV.
LLVM_ABI const SCEV * getNoopOrSignExtend(const SCEV *V, Type *Ty)
Return a SCEV corresponding to a conversion of the input value to the specified type.
bool loopHasNoAbnormalExits(const Loop *L)
Return true if the loop has no abnormal exits.
LLVM_ABI const SCEV * getTripCountFromExitCount(const SCEV *ExitCount)
A version of getTripCountFromExitCount below which always picks an evaluation type which can not resu...
LLVM_ABI ScalarEvolution(Function &F, TargetLibraryInfo &TLI, AssumptionCache &AC, DominatorTree &DT, LoopInfo &LI)
const SCEV * getOne(Type *Ty)
Return a SCEV for the constant 1 of a specific type.
LLVM_ABI const SCEV * getTruncateOrNoop(const SCEV *V, Type *Ty)
Return a SCEV corresponding to a conversion of the input value to the specified type.
LLVM_ABI const SCEV * getLosslessPtrToIntExpr(const SCEV *Op)
LLVM_ABI const SCEV * getCastExpr(SCEVTypes Kind, const SCEV *Op, Type *Ty)
LLVM_ABI const SCEV * getSequentialMinMaxExpr(SCEVTypes Kind, SmallVectorImpl< const SCEV * > &Operands)
LLVM_ABI std::optional< bool > evaluatePredicateAt(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS, const Instruction *CtxI)
Check whether the condition described by Pred, LHS, and RHS is true or false in the given Context.
LLVM_ABI unsigned getSmallConstantMaxTripCount(const Loop *L, SmallVectorImpl< const SCEVPredicate * > *Predicates=nullptr)
Returns the upper bound of the loop trip count as a normal unsigned value.
LLVM_ABI const SCEV * getPtrToIntExpr(const SCEV *Op, Type *Ty)
LLVM_ABI bool isBackedgeTakenCountMaxOrZero(const Loop *L)
Return true if the backedge taken count is either the value returned by getConstantMaxBackedgeTakenCo...
LLVM_ABI void forgetLoop(const Loop *L)
This method should be called by the client when it has changed a loop in a way that may effect Scalar...
LLVM_ABI bool isLoopInvariant(const SCEV *S, const Loop *L)
Return true if the value of the given SCEV is unchanging in the specified loop.
LLVM_ABI bool isKnownPositive(const SCEV *S)
Test if the given expression is known to be positive.
APInt getUnsignedRangeMin(const SCEV *S)
Determine the min of the unsigned range for a particular SCEV.
LLVM_ABI bool SimplifyICmpOperands(CmpPredicate &Pred, const SCEV *&LHS, const SCEV *&RHS, unsigned Depth=0)
Simplify LHS and RHS in a comparison with predicate Pred.
LLVM_ABI const SCEV * getOffsetOfExpr(Type *IntTy, StructType *STy, unsigned FieldNo)
Return an expression for offsetof on the given field with type IntTy.
LLVM_ABI LoopDisposition getLoopDisposition(const SCEV *S, const Loop *L)
Return the "disposition" of the given SCEV with respect to the given loop.
LLVM_ABI bool containsAddRecurrence(const SCEV *S)
Return true if the SCEV is a scAddRecExpr or it contains scAddRecExpr.
LLVM_ABI const SCEV * getSignExtendExprImpl(const SCEV *Op, Type *Ty, unsigned Depth=0)
LLVM_ABI const SCEV * getAddRecExpr(const SCEV *Start, const SCEV *Step, const Loop *L, SCEV::NoWrapFlags Flags)
Get an add recurrence expression for the specified loop.
LLVM_ABI bool hasOperand(const SCEV *S, const SCEV *Op) const
Test whether the given SCEV has Op as a direct or indirect operand.
LLVM_ABI const SCEV * getUDivExpr(const SCEV *LHS, const SCEV *RHS)
Get a canonical unsigned division expression, or something simpler if possible.
LLVM_ABI const SCEV * getZeroExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth=0)
LLVM_ABI bool isSCEVable(Type *Ty) const
Test if values of the given type are analyzable within the SCEV framework.
LLVM_ABI Type * getEffectiveSCEVType(Type *Ty) const
Return a type with the same bitwidth as the given type and which represents how SCEV will treat the g...
LLVM_ABI const SCEVPredicate * getComparePredicate(ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS)
LLVM_ABI bool haveSameSign(const SCEV *S1, const SCEV *S2)
Return true if we know that S1 and S2 must have the same sign.
LLVM_ABI const SCEV * getNotSCEV(const SCEV *V)
Return the SCEV object corresponding to ~V.
LLVM_ABI const SCEV * getElementCount(Type *Ty, ElementCount EC, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap)
LLVM_ABI bool instructionCouldExistWithOperands(const SCEV *A, const SCEV *B)
Return true if there exists a point in the program at which both A and B could be operands to the sam...
ConstantRange getUnsignedRange(const SCEV *S)
Determine the unsigned range for a particular SCEV.
LLVM_ABI void print(raw_ostream &OS) const
LLVM_ABI const SCEV * getUMinExpr(const SCEV *LHS, const SCEV *RHS, bool Sequential=false)
LLVM_ABI const SCEV * getPredicatedExitCount(const Loop *L, const BasicBlock *ExitingBlock, SmallVectorImpl< const SCEVPredicate * > *Predicates, ExitCountKind Kind=Exact)
Same as above except this uses the predicated backedge taken info and may require predicates.
static SCEV::NoWrapFlags clearFlags(SCEV::NoWrapFlags Flags, SCEV::NoWrapFlags OffFlags)
LLVM_ABI void forgetTopmostLoop(const Loop *L)
LLVM_ABI void forgetValue(Value *V)
This method should be called by the client when it has changed a value in a way that may effect its v...
APInt getSignedRangeMin(const SCEV *S)
Determine the min of the signed range for a particular SCEV.
LLVM_ABI const SCEV * getNoopOrAnyExtend(const SCEV *V, Type *Ty)
Return a SCEV corresponding to a conversion of the input value to the specified type.
LLVM_ABI void forgetBlockAndLoopDispositions(Value *V=nullptr)
Called when the client has changed the disposition of values in a loop or block.
LLVM_ABI const SCEV * getTruncateExpr(const SCEV *Op, Type *Ty, unsigned Depth=0)
@ MonotonicallyDecreasing
@ MonotonicallyIncreasing
LLVM_ABI std::optional< LoopInvariantPredicate > getLoopInvariantPredicate(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS, const Loop *L, const Instruction *CtxI=nullptr)
If the result of the predicate LHS Pred RHS is loop invariant with respect to L, return a LoopInvaria...
LLVM_ABI const SCEV * getStoreSizeOfExpr(Type *IntTy, Type *StoreTy)
Return an expression for the store size of StoreTy that is type IntTy.
LLVM_ABI const SCEVPredicate * getWrapPredicate(const SCEVAddRecExpr *AR, SCEVWrapPredicate::IncrementWrapFlags AddedFlags)
LLVM_ABI bool isLoopBackedgeGuardedByCond(const Loop *L, CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
Test whether the backedge of the loop is protected by a conditional between LHS and RHS.
LLVM_ABI const SCEV * getMinusSCEV(const SCEV *LHS, const SCEV *RHS, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Return LHS-RHS.
LLVM_ABI APInt getNonZeroConstantMultiple(const SCEV *S)
const SCEV * getMinusOne(Type *Ty)
Return a SCEV for the constant -1 of a specific type.
static SCEV::NoWrapFlags setFlags(SCEV::NoWrapFlags Flags, SCEV::NoWrapFlags OnFlags)
LLVM_ABI bool hasLoopInvariantBackedgeTakenCount(const Loop *L)
Return true if the specified loop has an analyzable loop-invariant backedge-taken count.
LLVM_ABI BlockDisposition getBlockDisposition(const SCEV *S, const BasicBlock *BB)
Return the "disposition" of the given SCEV with respect to the given block.
LLVM_ABI const SCEV * getNoopOrZeroExtend(const SCEV *V, Type *Ty)
Return a SCEV corresponding to a conversion of the input value to the specified type.
LLVM_ABI bool invalidate(Function &F, const PreservedAnalyses &PA, FunctionAnalysisManager::Invalidator &Inv)
LLVM_ABI const SCEV * getUMinFromMismatchedTypes(const SCEV *LHS, const SCEV *RHS, bool Sequential=false)
Promote the operands to the wider of the types using zero-extension, and then perform a umin operatio...
LLVM_ABI bool loopIsFiniteByAssumption(const Loop *L)
Return true if this loop is finite by assumption.
LLVM_ABI const SCEV * getExistingSCEV(Value *V)
Return an existing SCEV for V if there is one, otherwise return nullptr.
LLVM_ABI APInt getConstantMultiple(const SCEV *S, const Instruction *CtxI=nullptr)
Returns the max constant multiple of S.
LoopDisposition
An enum describing the relationship between a SCEV and a loop.
@ LoopComputable
The SCEV varies predictably with the loop.
@ LoopVariant
The SCEV is loop-variant (unknown).
@ LoopInvariant
The SCEV is loop-invariant.
friend class SCEVCallbackVH
LLVM_ABI bool isKnownMultipleOf(const SCEV *S, uint64_t M, SmallVectorImpl< const SCEVPredicate * > &Assumptions)
Check that S is a multiple of M.
LLVM_ABI const SCEV * getAnyExtendExpr(const SCEV *Op, Type *Ty)
getAnyExtendExpr - Return a SCEV for the given operand extended with unspecified bits out to the give...
LLVM_ABI bool isKnownToBeAPowerOfTwo(const SCEV *S, bool OrZero=false, bool OrNegative=false)
Test if the given expression is known to be a power of 2.
LLVM_ABI std::optional< SCEV::NoWrapFlags > getStrengthenedNoWrapFlagsFromBinOp(const OverflowingBinaryOperator *OBO)
Parse NSW/NUW flags from add/sub/mul IR binary operation Op into SCEV no-wrap flags,...
LLVM_ABI void forgetLcssaPhiWithNewPredecessor(Loop *L, PHINode *V)
Forget LCSSA phi node V of loop L to which a new predecessor was added, such that it may no longer be...
LLVM_ABI bool containsUndefs(const SCEV *S) const
Return true if the SCEV expression contains an undef value.
LLVM_ABI std::optional< MonotonicPredicateType > getMonotonicPredicateType(const SCEVAddRecExpr *LHS, ICmpInst::Predicate Pred)
If, for all loop invariant X, the predicate "LHS `Pred` X" is monotonically increasing or decreasing,...
LLVM_ABI const SCEV * getCouldNotCompute()
LLVM_ABI bool isAvailableAtLoopEntry(const SCEV *S, const Loop *L)
Determine if the SCEV can be evaluated at loop's entry.
LLVM_ABI uint32_t getMinTrailingZeros(const SCEV *S, const Instruction *CtxI=nullptr)
Determine the minimum number of zero bits that S is guaranteed to end in (at every loop iteration).
BlockDisposition
An enum describing the relationship between a SCEV and a basic block.
@ DominatesBlock
The SCEV dominates the block.
@ ProperlyDominatesBlock
The SCEV properly dominates the block.
@ DoesNotDominateBlock
The SCEV does not dominate the block.
LLVM_ABI const SCEV * getGEPExpr(GEPOperator *GEP, ArrayRef< const SCEV * > IndexExprs)
Returns an expression for a GEP.
LLVM_ABI const SCEV * getExitCount(const Loop *L, const BasicBlock *ExitingBlock, ExitCountKind Kind=Exact)
Return the number of times the backedge executes before the given exit would be taken; if not exactly...
LLVM_ABI const SCEV * getSignExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth=0)
LLVM_ABI void getPoisonGeneratingValues(SmallPtrSetImpl< const Value * > &Result, const SCEV *S)
Return the set of Values that, if poison, will definitively result in S being poison as well.
LLVM_ABI void forgetLoopDispositions()
Called when the client has changed the disposition of values in this loop.
LLVM_ABI const SCEV * getVScale(Type *Ty)
LLVM_ABI unsigned getSmallConstantTripCount(const Loop *L)
Returns the exact trip count of the loop if we can compute it, and the result is a small constant.
LLVM_ABI bool hasComputableLoopEvolution(const SCEV *S, const Loop *L)
Return true if the given SCEV changes value in a known way in the specified loop.
LLVM_ABI const SCEV * getPointerBase(const SCEV *V)
Transitively follow the chain of pointer-type operands until reaching a SCEV that does not have a sin...
LLVM_ABI const SCEV * getMinMaxExpr(SCEVTypes Kind, SmallVectorImpl< const SCEV * > &Operands)
LLVM_ABI void forgetAllLoops()
LLVM_ABI bool dominates(const SCEV *S, const BasicBlock *BB)
Return true if elements that makes up the given SCEV dominate the specified basic block.
APInt getUnsignedRangeMax(const SCEV *S)
Determine the max of the unsigned range for a particular SCEV.
ExitCountKind
The terms "backedge taken count" and "exit count" are used interchangeably to refer to the number of ...
@ SymbolicMaximum
An expression which provides an upper bound on the exact trip count.
@ ConstantMaximum
A constant which provides an upper bound on the exact trip count.
@ Exact
An expression exactly describing the number of times the backedge has executed when a loop is exited.
LLVM_ABI const SCEV * applyLoopGuards(const SCEV *Expr, const Loop *L)
Try to apply information from loop guards for L to Expr.
LLVM_ABI const SCEV * getMulExpr(SmallVectorImpl< const SCEV * > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical multiply expression, or something simpler if possible.
LLVM_ABI const SCEVAddRecExpr * convertSCEVToAddRecWithPredicates(const SCEV *S, const Loop *L, SmallVectorImpl< const SCEVPredicate * > &Preds)
Tries to convert the S expression to an AddRec expression, adding additional predicates to Preds as r...
LLVM_ABI const SCEV * getElementSize(Instruction *Inst)
Return the size of an element read or written by Inst.
LLVM_ABI const SCEV * getSizeOfExpr(Type *IntTy, TypeSize Size)
Return an expression for a TypeSize.
LLVM_ABI std::optional< bool > evaluatePredicate(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
Check whether the condition described by Pred, LHS, and RHS is true or false.
LLVM_ABI const SCEV * getUnknown(Value *V)
LLVM_ABI std::optional< std::pair< const SCEV *, SmallVector< const SCEVPredicate *, 3 > > > createAddRecFromPHIWithCasts(const SCEVUnknown *SymbolicPHI)
Checks if SymbolicPHI can be rewritten as an AddRecExpr under some Predicates.
LLVM_ABI const SCEV * getTruncateOrZeroExtend(const SCEV *V, Type *Ty, unsigned Depth=0)
Return a SCEV corresponding to a conversion of the input value to the specified type.
static SCEV::NoWrapFlags maskFlags(SCEV::NoWrapFlags Flags, int Mask)
Convenient NoWrapFlags manipulation that hides enum casts and is visible in the ScalarEvolution name ...
LLVM_ABI std::optional< APInt > computeConstantDifference(const SCEV *LHS, const SCEV *RHS)
Compute LHS - RHS and returns the result as an APInt if it is a constant, and std::nullopt if it isn'...
LLVM_ABI bool properlyDominates(const SCEV *S, const BasicBlock *BB)
Return true if elements that makes up the given SCEV properly dominate the specified basic block.
LLVM_ABI const SCEV * rewriteUsingPredicate(const SCEV *S, const Loop *L, const SCEVPredicate &A)
Re-writes the SCEV according to the Predicates in A.
LLVM_ABI std::pair< const SCEV *, const SCEV * > SplitIntoInitAndPostInc(const Loop *L, const SCEV *S)
Splits SCEV expression S into two SCEVs.
LLVM_ABI bool canReuseInstruction(const SCEV *S, Instruction *I, SmallVectorImpl< Instruction * > &DropPoisonGeneratingInsts)
Check whether it is poison-safe to represent the expression S using the instruction I.
LLVM_ABI bool isKnownPredicateAt(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS, const Instruction *CtxI)
Test if the given expression is known to satisfy the condition described by Pred, LHS,...
LLVM_ABI const SCEV * getPredicatedSymbolicMaxBackedgeTakenCount(const Loop *L, SmallVectorImpl< const SCEVPredicate * > &Predicates)
Similar to getSymbolicMaxBackedgeTakenCount, except it will add a set of SCEV predicates to Predicate...
LLVM_ABI ~ScalarEvolution()
LLVM_ABI const SCEV * getUDivExactExpr(const SCEV *LHS, const SCEV *RHS)
Get a canonical unsigned division expression, or something simpler if possible.
LLVM_ABI void registerUser(const SCEV *User, ArrayRef< const SCEV * > Ops)
Notify this ScalarEvolution that User directly uses SCEVs in Ops.
LLVM_ABI const SCEV * getAddExpr(SmallVectorImpl< const SCEV * > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical add expression, or something simpler if possible.
LLVM_ABI bool isBasicBlockEntryGuardedByCond(const BasicBlock *BB, CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
Test whether entry to the basic block is protected by a conditional between LHS and RHS.
LLVM_ABI const SCEV * getTruncateOrSignExtend(const SCEV *V, Type *Ty, unsigned Depth=0)
Return a SCEV corresponding to a conversion of the input value to the specified type.
LLVM_ABI bool containsErasedValue(const SCEV *S) const
Return true if the SCEV expression contains a Value that has been optimised out and is now a nullptr.
LLVM_ABI bool isKnownPredicate(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
Test if the given expression is known to satisfy the condition described by Pred, LHS,...
LLVM_ABI bool isKnownViaInduction(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
We'd like to check the predicate on every iteration of the most dominated loop between loops used in ...
const SCEV * getSymbolicMaxBackedgeTakenCount(const Loop *L)
When successful, this returns a SCEV that is greater than or equal to (i.e.
APInt getSignedRangeMax(const SCEV *S)
Determine the max of the signed range for a particular SCEV.
LLVM_ABI void verify() const
LLVMContext & getContext() const
Implements a dense probed hash-table based set with some number of buckets stored inline.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void reserve(size_type N)
iterator erase(const_iterator CI)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
iterator insert(iterator I, T &&Elt)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
Used to lazily calculate structure layout information for a target machine, based on the DataLayout s...
TypeSize getElementOffset(unsigned Idx) const
TypeSize getSizeInBits() const
Class to represent struct types.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
The instances of the Type class are immutable: once they are created, they are never changed.
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
bool isPointerTy() const
True if this is an instance of PointerType.
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
static LLVM_ABI IntegerType * getInt1Ty(LLVMContext &C)
bool isIntOrPtrTy() const
Return true if this is an integer type or a pointer type.
bool isIntegerTy() const
True if this is an instance of IntegerType.
static LLVM_ABI IntegerType * getIntNTy(LLVMContext &C, unsigned N)
A Use represents the edge between a Value definition and its users.
Value * getOperand(unsigned i) const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
unsigned getValueID() const
Return an ID for the concrete type of this object.
LLVM_ABI void printAsOperand(raw_ostream &O, bool PrintType=true, const Module *M=nullptr) const
Print the name of this Value out to the specified raw_ostream.
LLVM_ABI LLVMContext & getContext() const
All values hold a context through their type.
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
This class implements an extremely fast bulk output stream that can only output to a stream.
raw_ostream & indent(unsigned NumSpaces)
indent - Insert 'NumSpaces' spaces.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
const APInt & smin(const APInt &A, const APInt &B)
Determine the smaller of two APInts considered to be signed.
const APInt & smax(const APInt &A, const APInt &B)
Determine the larger of two APInts considered to be signed.
const APInt & umin(const APInt &A, const APInt &B)
Determine the smaller of two APInts considered to be unsigned.
LLVM_ABI std::optional< APInt > SolveQuadraticEquationWrap(APInt A, APInt B, APInt C, unsigned RangeWidth)
Let q(n) = An^2 + Bn + C, and BW = bit width of the value range (e.g.
const APInt & umax(const APInt &A, const APInt &B)
Determine the larger of two APInts considered to be unsigned.
LLVM_ABI APInt GreatestCommonDivisor(APInt A, APInt B)
Compute GCD of two unsigned APInt values.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ C
The default llvm calling convention, compatible with C.
int getMinValue(MCInstrInfo const &MCII, MCInst const &MCI)
Return the minimum value of an extendable operand.
@ BasicBlock
Various leaf nodes.
LLVM_ABI Function * getDeclarationIfExists(const Module *M, ID id)
Look up the Function declaration of the intrinsic id in the Module M and return it if it exists.
Predicate
Predicate - These are "(BI << 5) | BO" for various predicates.
BinaryOp_match< LHS, RHS, Instruction::AShr > m_AShr(const LHS &L, const RHS &R)
ap_match< APInt > m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
bool match(Val *V, const Pattern &P)
class_match< ConstantInt > m_ConstantInt()
Match an arbitrary ConstantInt and ignore it.
IntrinsicID_match m_Intrinsic()
Match intrinsic calls like this: m_Intrinsic<Intrinsic::fabs>(m_Value(X))
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
ExtractValue_match< Ind, Val_t > m_ExtractValue(const Val_t &V)
Match a single index ExtractValue instruction.
bind_ty< WithOverflowInst > m_WithOverflowInst(WithOverflowInst *&I)
Match a with overflow intrinsic, capturing it if we match.
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
brc_match< Cond_t, bind_ty< BasicBlock >, bind_ty< BasicBlock > > m_Br(const Cond_t &C, BasicBlock *&T, BasicBlock *&F)
BinaryOp_match< LHS, RHS, Instruction::SDiv > m_SDiv(const LHS &L, const RHS &R)
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
BinaryOp_match< LHS, RHS, Instruction::LShr > m_LShr(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::Shl > m_Shl(const LHS &L, const RHS &R)
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
class_match< BasicBlock > m_BasicBlock()
Match an arbitrary basic block value and ignore it.
match_combine_or< LTy, RTy > m_CombineOr(const LTy &L, const RTy &R)
Combine two pattern matchers matching L || R.
class_match< const SCEVVScale > m_SCEVVScale()
bind_cst_ty m_scev_APInt(const APInt *&C)
Match an SCEV constant and bind it to an APInt.
cst_pred_ty< is_all_ones > m_scev_AllOnes()
Match an integer with all bits set.
SCEVUnaryExpr_match< SCEVZeroExtendExpr, Op0_t > m_scev_ZExt(const Op0_t &Op0)
is_undef_or_poison m_scev_UndefOrPoison()
Match an SCEVUnknown wrapping undef or poison.
class_match< const SCEVConstant > m_SCEVConstant()
cst_pred_ty< is_one > m_scev_One()
Match an integer 1.
specificloop_ty m_SpecificLoop(const Loop *L)
SCEVAffineAddRec_match< Op0_t, Op1_t, class_match< const Loop > > m_scev_AffineAddRec(const Op0_t &Op0, const Op1_t &Op1)
bind_ty< const SCEVMulExpr > m_scev_Mul(const SCEVMulExpr *&V)
SCEVUnaryExpr_match< SCEVSignExtendExpr, Op0_t > m_scev_SExt(const Op0_t &Op0)
cst_pred_ty< is_zero > m_scev_Zero()
Match an integer 0.
SCEVUnaryExpr_match< SCEVTruncateExpr, Op0_t > m_scev_Trunc(const Op0_t &Op0)
bool match(const SCEV *S, const Pattern &P)
SCEVBinaryExpr_match< SCEVUDivExpr, Op0_t, Op1_t > m_scev_UDiv(const Op0_t &Op0, const Op1_t &Op1)
specificscev_ty m_scev_Specific(const SCEV *S)
Match if we have a specific specified SCEV.
SCEVBinaryExpr_match< SCEVMulExpr, Op0_t, Op1_t, SCEV::FlagNUW, true > m_scev_c_NUWMul(const Op0_t &Op0, const Op1_t &Op1)
class_match< const Loop > m_Loop()
bind_ty< const SCEVAddExpr > m_scev_Add(const SCEVAddExpr *&V)
bind_ty< const SCEVUnknown > m_SCEVUnknown(const SCEVUnknown *&V)
SCEVBinaryExpr_match< SCEVMulExpr, Op0_t, Op1_t, SCEV::FlagAnyWrap, true > m_scev_c_Mul(const Op0_t &Op0, const Op1_t &Op1)
SCEVBinaryExpr_match< SCEVSMaxExpr, Op0_t, Op1_t > m_scev_SMax(const Op0_t &Op0, const Op1_t &Op1)
SCEVURem_match< Op0_t, Op1_t > m_scev_URem(Op0_t LHS, Op1_t RHS, ScalarEvolution &SE)
Match the mathematical pattern A - (A / B) * B, where A and B can be arbitrary expressions.
class_match< const SCEV > m_SCEV()
@ Valid
The data is already valid.
initializer< Ty > init(const Ty &Val)
LocationClass< Ty > location(Ty &L)
@ Switch
The "resume-switch" lowering, where there are separate resume and destroy functions that are shared b...
NodeAddr< PhiNode * > Phi
friend class Instruction
Iterator for Instructions in a `BasicBlock.
This is an optimization pass for GlobalISel generic memory operations.
void visitAll(const SCEV *Root, SV &Visitor)
Use SCEVTraversal to visit all nodes in the given expression tree.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
FunctionAddr VTableAddr Value
LLVM_ATTRIBUTE_ALWAYS_INLINE DynamicAPInt gcd(const DynamicAPInt &A, const DynamicAPInt &B)
void stable_sort(R &&Range)
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
SaveAndRestore(T &) -> SaveAndRestore< T >
Printable print(const GCNRegPressure &RP, const GCNSubtarget *ST=nullptr, unsigned DynamicVGPRBlockSize=0)
LLVM_ABI bool canCreatePoison(const Operator *Op, bool ConsiderFlagsAndMetadata=true)
LLVM_ABI bool mustTriggerUB(const Instruction *I, const SmallPtrSetImpl< const Value * > &KnownPoison)
Return true if the given instruction must trigger undefined behavior when I is executed with any oper...
LLVM_ABI bool canConstantFoldCallTo(const CallBase *Call, const Function *F)
canConstantFoldCallTo - Return true if its even possible to fold a call to the specified function.
InterleavedRange< Range > interleaved(const Range &R, StringRef Separator=", ", StringRef Prefix="", StringRef Suffix="")
Output range R as a sequence of interleaved elements.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
LLVM_ABI bool verifyFunction(const Function &F, raw_ostream *OS=nullptr)
Check a function for errors, useful for use when debugging a pass.
auto successors(const MachineBasicBlock *BB)
scope_exit(Callable) -> scope_exit< Callable >
constexpr from_range_t from_range
auto dyn_cast_if_present(const Y &Val)
dyn_cast_if_present<X> - Functionally identical to dyn_cast, except that a null (or none in the case ...
bool set_is_subset(const S1Ty &S1, const S2Ty &S2)
set_is_subset(A, B) - Return true iff A in B
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
constexpr bool isUIntN(unsigned N, uint64_t x)
Checks if an unsigned integer fits into the given (dynamic) bit width.
LLVM_ABI Constant * ConstantFoldCompareInstOperands(unsigned Predicate, Constant *LHS, Constant *RHS, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const Instruction *I=nullptr)
Attempt to constant fold a compare instruction (icmp/fcmp) with the specified operands.
unsigned short computeExpressionSize(ArrayRef< const SCEV * > Args)
auto uninitialized_copy(R &&Src, IterTy Dst)
bool isa_and_nonnull(const Y &Val)
LLVM_ABI ConstantRange getConstantRangeFromMetadata(const MDNode &RangeMD)
Parse out a conservative ConstantRange from !range metadata.
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
LLVM_ABI Value * simplifyInstruction(Instruction *I, const SimplifyQuery &Q)
See if we can compute a simplified version of this instruction.
LLVM_ABI bool isOverflowIntrinsicNoWrap(const WithOverflowInst *WO, const DominatorTree &DT)
Returns true if the arithmetic part of the WO 's result is used only along the paths control dependen...
DomTreeNodeBase< BasicBlock > DomTreeNode
LLVM_ABI bool matchSimpleRecurrence(const PHINode *P, BinaryOperator *&BO, Value *&Start, Value *&Step)
Attempt to match a simple first order recurrence cycle of the form: iv = phi Ty [Start,...
auto dyn_cast_or_null(const Y &Val)
void erase(Container &C, ValueType V)
Wrapper function to remove a value from a container:
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
iterator_range< pointee_iterator< WrappedIteratorT > > make_pointee_range(RangeT &&Range)
auto reverse(ContainerTy &&C)
LLVM_ABI bool isMustProgress(const Loop *L)
Return true if this loop can be assumed to make progress.
LLVM_ABI bool impliesPoison(const Value *ValAssumedPoison, const Value *V)
Return true if V is poison given that ValAssumedPoison is already poison.
LLVM_ABI bool isFinite(const Loop *L)
Return true if this loop can be assumed to run for a finite number of iterations.
LLVM_ABI void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
LLVM_ABI bool programUndefinedIfPoison(const Instruction *Inst)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
bool isPointerTy(const Type *T)
FunctionAddr VTableAddr Count
LLVM_ABI ConstantRange getVScaleRange(const Function *F, unsigned BitWidth)
Determine the possible constant range of vscale with the given bit width, based on the vscale_range f...
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
LLVM_ATTRIBUTE_VISIBILITY_DEFAULT AnalysisKey InnerAnalysisManagerProxy< AnalysisManagerT, IRUnitT, ExtraArgTs... >::Key
LLVM_ABI bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
LLVM_ABI bool propagatesPoison(const Use &PoisonOp)
Return true if PoisonOp's user yields poison or raises UB if its operand PoisonOp is poison.
@ UMin
Unsigned integer min implemented in terms of select(cmp()).
@ Mul
Product of integers.
@ SMax
Signed integer max implemented in terms of select(cmp()).
@ SMin
Signed integer min implemented in terms of select(cmp()).
@ UMax
Unsigned integer max implemented in terms of select(cmp()).
auto count(R &&Range, const E &Element)
Wrapper function around std::count to count the number of times an element Element occurs in the give...
DWARFExpression::Operation Op
auto max_element(R &&Range)
Provide wrappers to std::max_element which take ranges instead of having to pass begin/end explicitly...
raw_ostream & operator<<(raw_ostream &OS, const APFixedPoint &FX)
ArrayRef(const T &OneElt) -> ArrayRef< T >
LLVM_ABI unsigned ComputeNumSignBits(const Value *Op, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Return the number of times the sign bit of the register is replicated into the other bits.
constexpr unsigned BitWidth
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI bool isGuaranteedToTransferExecutionToSuccessor(const Instruction *I)
Return true if this function can prove that the instruction I will always transfer execution to one o...
auto count_if(R &&Range, UnaryPredicate P)
Wrapper function around std::count_if to count the number of times an element satisfying a given pred...
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
constexpr bool isIntN(unsigned N, int64_t x)
Checks if an signed integer fits into the given (dynamic) bit width.
auto predecessors(const MachineBasicBlock *BB)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
iterator_range< df_iterator< T > > depth_first(const T &G)
auto seq(T Begin, T End)
Iterate over an integral type from Begin up to - but not including - End.
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
LLVM_ABI bool isGuaranteedNotToBePoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Returns true if V cannot be poison, but may be undef.
LLVM_ABI Constant * ConstantFoldInstOperands(const Instruction *I, ArrayRef< Constant * > Ops, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, bool AllowNonDeterministic=true)
ConstantFoldInstOperands - Attempt to constant fold an instruction with the specified operands.
bool SCEVExprContains(const SCEV *Root, PredTy Pred)
Return true if any node in Root satisfies the predicate Pred.
Implement std::hash so that hash_code can be used in STL containers.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
A special type used by analysis passes to provide an address that identifies that particular analysis...
static KnownBits makeConstant(const APInt &C)
Create known bits from a known constant.
bool isNonNegative() const
Returns true if this value is known to be non-negative.
static LLVM_ABI KnownBits ashr(const KnownBits &LHS, const KnownBits &RHS, bool ShAmtNonZero=false, bool Exact=false)
Compute known bits for ashr(LHS, RHS).
unsigned getBitWidth() const
Get the bit width of this value.
static LLVM_ABI KnownBits lshr(const KnownBits &LHS, const KnownBits &RHS, bool ShAmtNonZero=false, bool Exact=false)
Compute known bits for lshr(LHS, RHS).
KnownBits zextOrTrunc(unsigned BitWidth) const
Return known bits for a zero extension or truncation of the value we're tracking.
APInt getMaxValue() const
Return the maximal unsigned value possible given these KnownBits.
APInt getMinValue() const
Return the minimal unsigned value possible given these KnownBits.
bool isNegative() const
Returns true if this value is known to be negative.
static LLVM_ABI KnownBits shl(const KnownBits &LHS, const KnownBits &RHS, bool NUW=false, bool NSW=false, bool ShAmtNonZero=false)
Compute known bits for shl(LHS, RHS).
An object of this class is returned by queries that could not be answered.
LLVM_ABI SCEVCouldNotCompute()
static LLVM_ABI bool classof(const SCEV *S)
Methods for support type inquiry through isa, cast, and dyn_cast:
This class defines a simple visitor class that may be used for various SCEV analysis purposes.
A utility class that uses RAII to save and restore the value of a variable.
Information about the number of loop iterations for which a loop exit's branch condition evaluates to...
LLVM_ABI ExitLimit(const SCEV *E)
Construct either an exact exit limit from a constant, or an unknown one from a SCEVCouldNotCompute.
const SCEV * ExactNotTaken
const SCEV * SymbolicMaxNotTaken
SmallVector< const SCEVPredicate *, 4 > Predicates
A vector of predicate guards for this ExitLimit.
const SCEV * ConstantMaxNotTaken