83#include "llvm/Config/llvm-config.h"
138#define DEBUG_TYPE "scalar-evolution"
141 "Number of loop exits with predictable exit counts");
143 "Number of loop exits without predictable exit counts");
145 "Number of loops with trip counts computed by force");
147#ifdef EXPENSIVE_CHECKS
155 cl::desc(
"Maximum number of iterations SCEV will "
156 "symbolically execute a constant "
162 cl::desc(
"Verify ScalarEvolution's backedge taken counts (slow)"));
165 cl::desc(
"Enable stricter verification with -verify-scev is passed"));
169 cl::desc(
"Verify IR correctness when making sensitive SCEV queries (slow)"),
174 cl::desc(
"Threshold for inlining multiplication operands into a SCEV"),
179 cl::desc(
"Threshold for inlining addition operands into a SCEV"),
183 "scalar-evolution-max-scev-compare-depth",
cl::Hidden,
184 cl::desc(
"Maximum depth of recursive SCEV complexity comparisons"),
188 "scalar-evolution-max-scev-operations-implication-depth",
cl::Hidden,
189 cl::desc(
"Maximum depth of recursive SCEV operations implication analysis"),
193 "scalar-evolution-max-value-compare-depth",
cl::Hidden,
194 cl::desc(
"Maximum depth of recursive value complexity comparisons"),
199 cl::desc(
"Maximum depth of recursive arithmetics"),
203 "scalar-evolution-max-constant-evolving-depth",
cl::Hidden,
208 cl::desc(
"Maximum depth of recursive SExt/ZExt/Trunc"),
213 cl::desc(
"Max coefficients in AddRec during evolving"),
218 cl::desc(
"Size of the expression which is considered huge"),
223 cl::desc(
"Threshold for switching to iteratively computing SCEV ranges"),
227 "scalar-evolution-max-loop-guard-collection-depth",
cl::Hidden,
228 cl::desc(
"Maximum depth for recursive loop guard collection"),
cl::init(1));
233 cl::desc(
"When printing analysis, include information on every instruction"));
236 "scalar-evolution-use-expensive-range-sharpening",
cl::Hidden,
238 cl::desc(
"Use more powerful methods of sharpening expression ranges. May "
239 "be costly in terms of compile time"));
242 "scalar-evolution-max-scc-analysis-depth",
cl::Hidden,
243 cl::desc(
"Maximum amount of nodes to process while searching SCEVUnknown "
244 "Phi strongly connected components"),
249 cl::desc(
"Handle <= and >= in finite loops"),
253 "scalar-evolution-use-context-for-no-wrap-flag-strenghening",
cl::Hidden,
254 cl::desc(
"Infer nuw/nsw flags using context where suitable"),
265#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
283 OS <<
"(ptrtoint " << *
Op->getType() <<
" " << *
Op <<
" to "
284 << *PtrToInt->
getType() <<
")";
290 OS <<
"(trunc " << *
Op->getType() <<
" " << *
Op <<
" to "
297 OS <<
"(zext " << *
Op->getType() <<
" " << *
Op <<
" to "
304 OS <<
"(sext " << *
Op->getType() <<
" " << *
Op <<
" to "
333 const char *OpStr =
nullptr;
346 OpStr =
" umin_seq ";
370 OS <<
"(" << *UDiv->
getLHS() <<
" /u " << *UDiv->
getRHS() <<
")";
377 OS <<
"***COULDNOTCOMPUTE***";
453 if (!
Mul)
return false;
457 if (!SC)
return false;
475 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
return S;
477 UniqueSCEVs.InsertNode(S, IP);
496 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
499 UniqueSCEVs.InsertNode(S, IP);
519 "Must be a non-bit-width-changing pointer-to-integer cast!");
531 "Cannot truncate non-integer value!");
538 "Cannot zero extend non-integer value!");
545 "Cannot sign extend non-integer value!");
550 SE->forgetMemoizedResults(
this);
553 SE->UniqueSCEVs.RemoveNode(
this);
559void SCEVUnknown::allUsesReplacedWith(
Value *New) {
561 SE->forgetMemoizedResults(
this);
564 SE->UniqueSCEVs.RemoveNode(
this);
586 if (LIsPointer != RIsPointer)
587 return (
int)LIsPointer - (int)RIsPointer;
592 return (
int)LID - (int)RID;
597 unsigned LArgNo = LA->getArgNo(), RArgNo =
RA->getArgNo();
598 return (
int)LArgNo - (int)RArgNo;
604 if (
auto L = LGV->getLinkage() - RGV->getLinkage())
607 const auto IsGVNameSemantic = [&](
const GlobalValue *GV) {
608 auto LT = GV->getLinkage();
615 if (IsGVNameSemantic(LGV) && IsGVNameSemantic(RGV))
616 return LGV->getName().compare(RGV->getName());
627 if (LParent != RParent) {
630 if (LDepth != RDepth)
631 return (
int)LDepth - (int)RDepth;
635 unsigned LNumOps = LInst->getNumOperands(),
636 RNumOps = RInst->getNumOperands();
637 if (LNumOps != RNumOps)
638 return (
int)LNumOps - (int)RNumOps;
640 for (
unsigned Idx :
seq(LNumOps)) {
642 RInst->getOperand(Idx),
Depth + 1);
656static std::optional<int>
666 return (
int)LType - (int)RType;
691 unsigned LBitWidth = LA.
getBitWidth(), RBitWidth =
RA.getBitWidth();
692 if (LBitWidth != RBitWidth)
693 return (
int)LBitWidth - (int)RBitWidth;
694 return LA.
ult(
RA) ? -1 : 1;
700 return LTy->getBitWidth() - RTy->getBitWidth();
711 if (LLoop != RLoop) {
713 assert(LHead != RHead &&
"Two loops share the same header?");
717 "No dominance between recurrences used by one SCEV?");
740 unsigned LNumOps = LOps.
size(), RNumOps = ROps.
size();
741 if (LNumOps != RNumOps)
742 return (
int)LNumOps - (int)RNumOps;
744 for (
unsigned i = 0; i != LNumOps; ++i) {
769 if (
Ops.size() < 2)
return;
774 return Complexity && *Complexity < 0;
776 if (
Ops.size() == 2) {
780 if (IsLessComplex(
RHS,
LHS))
787 return IsLessComplex(
LHS,
RHS);
794 for (
unsigned i = 0, e =
Ops.size(); i != e-2; ++i) {
800 for (
unsigned j = i+1; j != e &&
Ops[j]->getSCEVType() == Complexity; ++j) {
805 if (i == e-2)
return;
827template <
typename FoldT,
typename IsIdentityT,
typename IsAbsorberT>
831 IsIdentityT IsIdentity, IsAbsorberT IsAbsorber) {
833 for (
unsigned Idx = 0; Idx <
Ops.size();) {
841 Ops.erase(
Ops.begin() + Idx);
848 assert(Folded &&
"Must have folded value");
852 if (Folded && IsAbsorber(Folded->
getAPInt()))
856 if (Folded && !IsIdentity(Folded->
getAPInt()))
857 Ops.insert(
Ops.begin(), Folded);
859 return Ops.size() == 1 ?
Ops[0] :
nullptr;
934 APInt OddFactorial(W, 1);
936 for (
unsigned i = 3; i <= K; ++i) {
939 OddFactorial *= (i >> TwoFactors);
943 unsigned CalculationBits = W +
T;
957 for (
unsigned i = 1; i != K; ++i) {
990 for (
unsigned i = 1, e =
Operands.size(); i != e; ++i) {
1010 "getLosslessPtrToIntExpr() should self-recurse at most once.");
1014 if (!
Op->getType()->isPointerTy())
1025 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
1055 SCEV *S =
new (SCEVAllocator)
1057 UniqueSCEVs.InsertNode(S, IP);
1062 assert(
Depth == 0 &&
"getLosslessPtrToIntExpr() should not self-recurse for "
1063 "non-SCEVUnknown's.");
1075 class SCEVPtrToIntSinkingRewriter
1083 SCEVPtrToIntSinkingRewriter
Rewriter(SE);
1084 return Rewriter.visit(Scev);
1093 return Base::visit(S);
1118 "Should only reach pointer-typed SCEVUnknown's.");
1124 const SCEV *IntOp = SCEVPtrToIntSinkingRewriter::rewrite(
Op, *
this);
1126 "We must have succeeded in sinking the cast, "
1127 "and ending up with an integer-typed expression!");
1132 assert(Ty->isIntegerTy() &&
"Target type must be an integer type!");
1144 "This is not a truncating conversion!");
1146 "This is not a conversion to a SCEVable type!");
1147 assert(!
Op->getType()->isPointerTy() &&
"Can't truncate pointer!");
1155 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
return S;
1177 UniqueSCEVs.InsertNode(S, IP);
1189 unsigned numTruncs = 0;
1190 for (
unsigned i = 0, e = CommOp->getNumOperands(); i != e && numTruncs < 2;
1198 if (numTruncs < 2) {
1208 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
1215 for (
const SCEV *
Op : AddRec->operands())
1230 UniqueSCEVs.InsertNode(S, IP);
1270struct ExtendOpTraitsBase {
1271 typedef const SCEV *(ScalarEvolution::*GetExtendExprTy)(
const SCEV *,
Type *,
1276template <
typename ExtendOp>
struct ExtendOpTraits {
1292 static const GetExtendExprTy GetExtendExpr;
1294 static const SCEV *getOverflowLimitForStep(
const SCEV *Step,
1295 ICmpInst::Predicate *Pred,
1296 ScalarEvolution *SE) {
1301const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits<
1308 static const GetExtendExprTy GetExtendExpr;
1310 static const SCEV *getOverflowLimitForStep(
const SCEV *Step,
1311 ICmpInst::Predicate *Pred,
1312 ScalarEvolution *SE) {
1317const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits<
1329template <
typename ExtendOpTy>
1332 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType;
1333 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr;
1349 for (
auto It = DiffOps.
begin(); It != DiffOps.
end(); ++It)
1362 auto PreStartFlags =
1380 const SCEV *OperandExtendedStart =
1382 (SE->*GetExtendExpr)(Step, WideTy,
Depth));
1383 if ((SE->*GetExtendExpr)(Start, WideTy,
Depth) == OperandExtendedStart) {
1395 const SCEV *OverflowLimit =
1396 ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep(Step, &Pred, SE);
1398 if (OverflowLimit &&
1406template <
typename ExtendOpTy>
1410 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr;
1418 (SE->*GetExtendExpr)(PreStart, Ty,
Depth));
1453template <
typename ExtendOpTy>
1454bool ScalarEvolution::proveNoWrapByVaryingStart(
const SCEV *Start,
1457 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType;
1467 APInt StartAI = StartC->
getAPInt();
1469 for (
unsigned Delta : {-2, -1, 1, 2}) {
1470 const SCEV *PreStart =
getConstant(StartAI - Delta);
1472 FoldingSetNodeID
ID;
1474 ID.AddPointer(PreStart);
1475 ID.AddPointer(Step);
1479 static_cast<SCEVAddRecExpr *
>(UniqueSCEVs.FindNodeOrInsertPos(
ID, IP));
1483 if (PreAR && PreAR->getNoWrapFlags(WrapType)) {
1486 const SCEV *Limit = ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep(
1487 DeltaS, &Pred,
this);
1505 const unsigned BitWidth =
C.getBitWidth();
1523 const APInt &ConstantStart,
1542 auto &UserIDs = FoldCacheUser[
I.first->second];
1543 assert(
count(UserIDs,
ID) == 1 &&
"unexpected duplicates in UserIDs");
1544 for (
unsigned I = 0;
I != UserIDs.size(); ++
I)
1545 if (UserIDs[
I] ==
ID) {
1550 I.first->second = S;
1552 FoldCacheUser[S].push_back(
ID);
1558 "This is not an extending conversion!");
1560 "This is not a conversion to a SCEVable type!");
1561 assert(!
Op->getType()->isPointerTy() &&
"Can't extend pointer!");
1565 if (
const SCEV *S = FoldCache.lookup(
ID))
1577 "This is not an extending conversion!");
1579 assert(!
Op->getType()->isPointerTy() &&
"Can't extend pointer!");
1596 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
return S;
1600 UniqueSCEVs.InsertNode(S, IP);
1609 const SCEV *
X = ST->getOperand();
1623 if (AR->isAffine()) {
1624 const SCEV *Start = AR->getStart();
1625 const SCEV *Step = AR->getStepRecurrence(*
this);
1627 const Loop *L = AR->getLoop();
1631 if (AR->hasNoUnsignedWrap()) {
1652 const SCEV *CastedMaxBECount =
1656 if (MaxBECount == RecastedMaxBECount) {
1666 const SCEV *WideMaxBECount =
1668 const SCEV *OperandExtendedAdd =
1674 if (ZAdd == OperandExtendedAdd) {
1685 OperandExtendedAdd =
1691 if (ZAdd == OperandExtendedAdd) {
1712 !AC.assumptions().empty()) {
1714 auto NewFlags = proveNoUnsignedWrapViaInduction(AR);
1716 if (AR->hasNoUnsignedWrap()) {
1751 const APInt &
C = SC->getAPInt();
1755 const SCEV *SResidual =
1764 if (proveNoWrapByVaryingStart<SCEVZeroExtendExpr>(Start, Step, L)) {
1789 if (SA->hasNoUnsignedWrap()) {
1793 for (
const auto *
Op : SA->operands())
1810 const SCEV *SResidual =
1822 if (SM->hasNoUnsignedWrap()) {
1826 for (
const auto *
Op : SM->operands())
1844 const SCEV *TruncRHS;
1863 for (
auto *Operand :
MinMax->operands())
1874 for (
auto *Operand :
MinMax->operands())
1881 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
return S;
1884 UniqueSCEVs.InsertNode(S, IP);
1892 "This is not an extending conversion!");
1894 "This is not a conversion to a SCEVable type!");
1895 assert(!
Op->getType()->isPointerTy() &&
"Can't extend pointer!");
1899 if (
const SCEV *S = FoldCache.lookup(
ID))
1911 "This is not an extending conversion!");
1913 assert(!
Op->getType()->isPointerTy() &&
"Can't extend pointer!");
1935 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
return S;
1940 UniqueSCEVs.InsertNode(S, IP);
1949 const SCEV *
X = ST->getOperand();
1960 if (SA->hasNoSignedWrap()) {
1964 for (
const auto *
Op : SA->operands())
1982 const SCEV *SResidual =
1996 if (AR->isAffine()) {
1997 const SCEV *Start = AR->getStart();
1998 const SCEV *Step = AR->getStepRecurrence(*
this);
2000 const Loop *L = AR->getLoop();
2004 if (AR->hasNoSignedWrap()) {
2026 const SCEV *CastedMaxBECount =
2030 if (MaxBECount == RecastedMaxBECount) {
2040 const SCEV *WideMaxBECount =
2042 const SCEV *OperandExtendedAdd =
2048 if (SAdd == OperandExtendedAdd) {
2059 OperandExtendedAdd =
2065 if (SAdd == OperandExtendedAdd) {
2085 auto NewFlags = proveNoSignedWrapViaInduction(AR);
2087 if (AR->hasNoSignedWrap()) {
2102 const APInt &
C = SC->getAPInt();
2106 const SCEV *SResidual =
2115 if (proveNoWrapByVaryingStart<SCEVSignExtendExpr>(Start, Step, L)) {
2134 for (
auto *Operand :
MinMax->operands())
2143 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
return S;
2146 UniqueSCEVs.InsertNode(S, IP);
2172 "This is not an extending conversion!");
2174 "This is not a conversion to a SCEVable type!");
2179 if (SC->getAPInt().isNegative())
2184 const SCEV *NewOp =
T->getOperand();
2203 for (
const SCEV *
Op : AR->operands())
2242 APInt &AccumulatedConstant,
2245 bool Interesting =
false;
2252 if (Scale != 1 || AccumulatedConstant != 0 ||
C->getValue()->isZero())
2254 AccumulatedConstant += Scale *
C->getAPInt();
2259 for (; i !=
Ops.size(); ++i) {
2269 Add->operands(), NewScale, SE);
2275 auto Pair = M.insert({
Key, NewScale});
2279 Pair.first->second += NewScale;
2287 std::pair<DenseMap<const SCEV *, APInt>::iterator,
bool> Pair =
2288 M.insert({
Ops[i], Scale});
2292 Pair.first->second += Scale;
2311 case Instruction::Add:
2314 case Instruction::Sub:
2317 case Instruction::Mul:
2331 const SCEV *
A = (this->*Extension)(
2333 const SCEV *LHSB = (this->*Extension)(LHS, WideTy, 0);
2334 const SCEV *RHSB = (this->*Extension)(RHS, WideTy, 0);
2342 if (BinOp == Instruction::Mul)
2348 APInt C = RHSC->getAPInt();
2349 unsigned NumBits =
C.getBitWidth();
2350 bool IsSub = (BinOp == Instruction::Sub);
2351 bool IsNegativeConst = (
Signed &&
C.isNegative());
2353 bool OverflowDown = IsSub ^ IsNegativeConst;
2355 if (IsNegativeConst) {
2368 APInt Limit = Min + Magnitude;
2374 APInt Limit = Max - Magnitude;
2379std::optional<SCEV::NoWrapFlags>
2384 return std::nullopt;
2393 bool Deduced =
false;
2395 if (OBO->
getOpcode() != Instruction::Add &&
2398 return std::nullopt;
2407 false, LHS, RHS, CtxI)) {
2414 true, LHS, RHS, CtxI)) {
2421 return std::nullopt;
2431 using namespace std::placeholders;
2438 assert(CanAnalyze &&
"don't call from other places!");
2445 auto IsKnownNonNegative = [&](
const SCEV *S) {
2455 if (SignOrUnsignWrap != SignOrUnsignMask &&
2462 return Instruction::Add;
2464 return Instruction::Mul;
2475 Opcode,
C, OBO::NoSignedWrap);
2483 Opcode,
C, OBO::NoUnsignedWrap);
2493 Ops[0]->isZero() && IsKnownNonNegative(
Ops[1]))
2500 if (UDiv->getOperand(1) ==
Ops[1])
2503 if (UDiv->getOperand(1) ==
Ops[0])
2519 "only nuw or nsw allowed");
2520 assert(!
Ops.empty() &&
"Cannot get empty add!");
2521 if (
Ops.size() == 1)
return Ops[0];
2524 for (
unsigned i = 1, e =
Ops.size(); i != e; ++i)
2526 "SCEVAddExpr operand types don't match!");
2528 Ops, [](
const SCEV *
Op) {
return Op->getType()->isPointerTy(); });
2529 assert(NumPtrs <= 1 &&
"add has at most one pointer operand");
2534 [](
const APInt &C1,
const APInt &C2) {
return C1 + C2; },
2535 [](
const APInt &
C) {
return C.isZero(); },
2536 [](
const APInt &
C) {
return false; });
2549 return getOrCreateAddExpr(
Ops, ComputeFlags(
Ops));
2554 if (
Add->getNoWrapFlags(OrigFlags) != OrigFlags)
2555 Add->setNoWrapFlags(ComputeFlags(
Ops));
2563 bool FoundMatch =
false;
2564 for (
unsigned i = 0, e =
Ops.size(); i != e-1; ++i)
2565 if (
Ops[i] ==
Ops[i+1]) {
2577 --i; e -=
Count - 1;
2587 auto FindTruncSrcType = [&]() ->
Type * {
2593 return T->getOperand()->getType();
2595 const auto *LastOp =
Mul->getOperand(
Mul->getNumOperands() - 1);
2597 return T->getOperand()->getType();
2601 if (
auto *SrcType = FindTruncSrcType()) {
2608 if (
T->getOperand()->getType() != SrcType) {
2617 for (
unsigned j = 0, f = M->getNumOperands(); j != f && Ok; ++j) {
2620 if (
T->getOperand()->getType() != SrcType) {
2648 if (
Ops.size() == 2) {
2658 auto C2 =
C->getAPInt();
2661 APInt ConstAdd = C1 + C2;
2662 auto AddFlags = AddExpr->getNoWrapFlags();
2703 if (
Ops.size() == 2 &&
2714 if (Idx <
Ops.size()) {
2715 bool DeletedAdd =
false;
2726 Ops.erase(
Ops.begin()+Idx);
2729 CommonFlags =
maskFlags(CommonFlags,
Add->getNoWrapFlags());
2752 struct APIntCompare {
2753 bool operator()(
const APInt &LHS,
const APInt &RHS)
const {
2754 return LHS.ult(RHS);
2761 std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare> MulOpLists;
2762 for (
const SCEV *NewOp : NewOps)
2763 MulOpLists[M.find(NewOp)->second].push_back(NewOp);
2766 if (AccumulatedConstant != 0)
2768 for (
auto &MulOp : MulOpLists) {
2769 if (MulOp.first == 1) {
2771 }
else if (MulOp.first != 0) {
2780 if (
Ops.size() == 1)
2791 for (
unsigned MulOp = 0, e =
Mul->getNumOperands(); MulOp != e; ++MulOp) {
2792 const SCEV *MulOpSCEV =
Mul->getOperand(MulOp);
2795 for (
unsigned AddOp = 0, e =
Ops.size(); AddOp != e; ++AddOp)
2796 if (MulOpSCEV ==
Ops[AddOp]) {
2798 const SCEV *InnerMul =
Mul->getOperand(MulOp == 0);
2799 if (
Mul->getNumOperands() != 2) {
2803 Mul->operands().take_front(MulOp));
2811 if (
Ops.size() == 2)
return OuterMul;
2813 Ops.erase(
Ops.begin()+AddOp);
2814 Ops.erase(
Ops.begin()+Idx-1);
2816 Ops.erase(
Ops.begin()+Idx);
2817 Ops.erase(
Ops.begin()+AddOp-1);
2819 Ops.push_back(OuterMul);
2824 for (
unsigned OtherMulIdx = Idx+1;
2831 OMulOp != e; ++OMulOp)
2832 if (OtherMul->
getOperand(OMulOp) == MulOpSCEV) {
2834 const SCEV *InnerMul1 =
Mul->getOperand(MulOp == 0);
2835 if (
Mul->getNumOperands() != 2) {
2837 Mul->operands().take_front(MulOp));
2844 OtherMul->
operands().take_front(OMulOp));
2849 const SCEV *InnerMulSum =
2853 if (
Ops.size() == 2)
return OuterMul;
2854 Ops.erase(
Ops.begin()+Idx);
2855 Ops.erase(
Ops.begin()+OtherMulIdx-1);
2856 Ops.push_back(OuterMul);
2876 for (
unsigned i = 0, e =
Ops.size(); i != e; ++i)
2879 Ops.erase(
Ops.begin()+i);
2884 if (!LIOps.
empty()) {
2909 auto *DefI = getDefiningScopeBound(LIOps);
2911 if (!isGuaranteedToTransferExecutionTo(DefI, ReachI))
2923 if (
Ops.size() == 1)
return NewRec;
2926 for (
unsigned i = 0;; ++i)
2927 if (
Ops[i] == AddRec) {
2937 for (
unsigned OtherIdx = Idx+1;
2945 "AddRecExprs are not sorted in reverse dominance order?");
2952 if (OtherAddRec->getLoop() == AddRecLoop) {
2953 for (
unsigned i = 0, e = OtherAddRec->getNumOperands();
2955 if (i >= AddRecOps.
size()) {
2956 append_range(AddRecOps, OtherAddRec->operands().drop_front(i));
2960 AddRecOps[i], OtherAddRec->getOperand(i)};
2963 Ops.erase(
Ops.begin() + OtherIdx); --OtherIdx;
2978 return getOrCreateAddExpr(
Ops, ComputeFlags(
Ops));
2990 static_cast<SCEVAddExpr *
>(UniqueSCEVs.FindNodeOrInsertPos(
ID, IP));
2992 const SCEV **O = SCEVAllocator.Allocate<
const SCEV *>(
Ops.size());
2994 S =
new (SCEVAllocator)
2996 UniqueSCEVs.InsertNode(S, IP);
3006 FoldingSetNodeID
ID;
3008 for (
const SCEV *
Op :
Ops)
3013 static_cast<SCEVAddRecExpr *
>(UniqueSCEVs.FindNodeOrInsertPos(
ID, IP));
3015 const SCEV **
O = SCEVAllocator.Allocate<
const SCEV *>(
Ops.size());
3017 S =
new (SCEVAllocator)
3018 SCEVAddRecExpr(
ID.Intern(SCEVAllocator), O,
Ops.size(), L);
3019 UniqueSCEVs.InsertNode(S, IP);
3020 LoopUsers[
L].push_back(S);
3030 FoldingSetNodeID
ID;
3032 for (
const SCEV *
Op :
Ops)
3036 static_cast<SCEVMulExpr *
>(UniqueSCEVs.FindNodeOrInsertPos(
ID, IP));
3038 const SCEV **
O = SCEVAllocator.Allocate<
const SCEV *>(
Ops.size());
3040 S =
new (SCEVAllocator) SCEVMulExpr(
ID.Intern(SCEVAllocator),
3042 UniqueSCEVs.InsertNode(S, IP);
3051 if (j > 1 && k / j != i) Overflow =
true;
3067 if (n == 0 || n == k)
return 1;
3068 if (k > n)
return 0;
3074 for (
uint64_t i = 1; i <= k; ++i) {
3075 r =
umul_ov(r, n-(i-1), Overflow);
3084 struct FindConstantInAddMulChain {
3085 bool FoundConstant =
false;
3087 bool follow(
const SCEV *S) {
3092 bool isDone()
const {
3093 return FoundConstant;
3097 FindConstantInAddMulChain
F;
3099 ST.visitAll(StartExpr);
3100 return F.FoundConstant;
3108 "only nuw or nsw allowed");
3109 assert(!
Ops.empty() &&
"Cannot get empty mul!");
3110 if (
Ops.size() == 1)
return Ops[0];
3112 Type *ETy =
Ops[0]->getType();
3114 for (
unsigned i = 1, e =
Ops.size(); i != e; ++i)
3116 "SCEVMulExpr operand types don't match!");
3121 [](
const APInt &C1,
const APInt &C2) {
return C1 * C2; },
3122 [](
const APInt &
C) {
return C.isOne(); },
3123 [](
const APInt &
C) {
return C.isZero(); });
3134 return getOrCreateMulExpr(
Ops, ComputeFlags(
Ops));
3139 if (
Mul->getNoWrapFlags(OrigFlags) != OrigFlags)
3140 Mul->setNoWrapFlags(ComputeFlags(
Ops));
3145 if (
Ops.size() == 2) {
3153 const SCEV *Op0, *Op1;
3161 if (
Ops[0]->isAllOnesValue()) {
3166 bool AnyFolded =
false;
3167 for (
const SCEV *AddOp :
Add->operands()) {
3194 AddRec->getNoWrapFlags(FlagsMask));
3217 APInt C1V = LHSC->getAPInt();
3227 const SCEV *NewMul =
nullptr;
3231 assert(C1V.
ugt(1) &&
"C1 <= 1 should have been folded earlier");
3246 if (Idx <
Ops.size()) {
3247 bool DeletedMul =
false;
3253 Ops.erase(
Ops.begin()+Idx);
3277 for (
unsigned i = 0, e =
Ops.size(); i != e; ++i)
3280 Ops.erase(
Ops.begin()+i);
3285 if (!LIOps.
empty()) {
3298 for (
unsigned i = 0, e = AddRec->
getNumOperands(); i != e; ++i) {
3314 if (
Ops.size() == 1)
return NewRec;
3317 for (
unsigned i = 0;; ++i)
3318 if (
Ops[i] == AddRec) {
3339 bool OpsModified =
false;
3340 for (
unsigned OtherIdx = Idx+1;
3354 bool Overflow =
false;
3361 for (
int y = x, ye = 2*x+1; y != ye && !Overflow; ++y) {
3365 z < ze && !Overflow; ++z) {
3368 if (LargerThan64Bits)
3369 Coeff =
umul_ov(Coeff1, Coeff2, Overflow);
3371 Coeff = Coeff1*Coeff2;
3386 if (
Ops.size() == 2)
return NewAddRec;
3387 Ops[Idx] = NewAddRec;
3388 Ops.erase(
Ops.begin() + OtherIdx); --OtherIdx;
3404 return getOrCreateMulExpr(
Ops, ComputeFlags(
Ops));
3412 "SCEVURemExpr operand types don't match!");
3417 if (RHSC->getValue()->isOne())
3418 return getZero(LHS->getType());
3421 if (RHSC->getAPInt().isPowerOf2()) {
3422 Type *FullTy = LHS->getType();
3439 assert(!LHS->getType()->isPointerTy() &&
3440 "SCEVUDivExpr operand can't be pointer!");
3441 assert(LHS->getType() == RHS->getType() &&
3442 "SCEVUDivExpr operand types don't match!");
3449 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
3457 if (RHSC->getValue()->isOne())
3462 if (!RHSC->getValue()->isZero()) {
3466 Type *Ty = LHS->getType();
3467 unsigned LZ = RHSC->getAPInt().countl_zero();
3471 if (!RHSC->getAPInt().isPowerOf2())
3479 const APInt &StepInt = Step->getAPInt();
3480 const APInt &DivInt = RHSC->getAPInt();
3481 if (!StepInt.
urem(DivInt) &&
3487 for (
const SCEV *
Op : AR->operands())
3493 const APInt *StartRem;
3506 bool CanFoldWithWrap = StepInt.
ule(DivInt) &&
3510 const SCEV *NewStart =
3512 if (*StartRem != 0 && (NoWrap || CanFoldWithWrap) &&
3514 const SCEV *NewLHS =
3517 if (LHS != NewLHS) {
3527 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
3536 for (
const SCEV *
Op : M->operands())
3540 for (
unsigned i = 0, e = M->getNumOperands(); i != e; ++i) {
3541 const SCEV *
Op = M->getOperand(i);
3553 if (
auto *DivisorConstant =
3555 bool Overflow =
false;
3557 DivisorConstant->getAPInt().
umul_ov(RHSC->getAPInt(), Overflow);
3568 for (
const SCEV *
Op :
A->operands())
3572 for (
unsigned i = 0, e =
A->getNumOperands(); i != e; ++i) {
3579 if (Operands.
size() ==
A->getNumOperands())
3586 return getConstant(LHSC->getAPInt().udiv(RHSC->getAPInt()));
3596 return getZero(LHS->getType());
3600 const SCEV *NewLHS, *NewRHS;
3608 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
return S;
3611 UniqueSCEVs.InsertNode(S, IP);
3641 if (!
Mul || !
Mul->hasNoUnsignedWrap())
3648 if (LHSCst == RHSCst) {
3656 APInt Factor =
gcd(LHSCst, RHSCst);
3674 for (
int i = 0, e =
Mul->getNumOperands(); i != e; ++i) {
3675 if (
Mul->getOperand(i) == RHS) {
3694 if (StepChrec->getLoop() == L) {
3708 if (Operands.
size() == 1)
return Operands[0];
3713 "SCEVAddRecExpr operand types don't match!");
3714 assert(!
Op->getType()->isPointerTy() &&
"Step must be integer");
3716 for (
const SCEV *
Op : Operands)
3718 "SCEVAddRecExpr operand is not available at loop entry!");
3721 if (Operands.
back()->isZero()) {
3736 const Loop *NestedLoop = NestedAR->getLoop();
3737 if (L->contains(NestedLoop)
3740 DT.dominates(L->getHeader(), NestedLoop->
getHeader()))) {
3742 Operands[0] = NestedAR->getStart();
3746 bool AllInvariant =
all_of(
3758 AllInvariant =
all_of(NestedOperands, [&](
const SCEV *
Op) {
3769 return getAddRecExpr(NestedOperands, NestedLoop, InnerFlags);
3773 Operands[0] = NestedAR;
3779 return getOrCreateAddRecExpr(Operands, L, Flags);
3795 if (!GEPI || !isSCEVExprNeverPoison(GEPI))
3799 return getGEPExpr(BaseExpr, IndexExprs,
GEP->getSourceElementType(), NW);
3813 bool FirstIter =
true;
3815 for (
const SCEV *IndexExpr : IndexExprs) {
3822 Offsets.push_back(FieldOffset);
3825 CurTy = STy->getTypeAtIndex(Index);
3830 "The first index of a GEP indexes a pointer");
3831 CurTy = SrcElementTy;
3842 const SCEV *LocalOffset =
getMulExpr(IndexExpr, ElementSize, OffsetWrap);
3843 Offsets.push_back(LocalOffset);
3848 if (Offsets.empty())
3861 "GEP should not change type mid-flight.");
3865SCEV *ScalarEvolution::findExistingSCEVInCache(
SCEVTypes SCEVType,
3868 ID.AddInteger(SCEVType);
3872 return UniqueSCEVs.FindNodeOrInsertPos(
ID, IP);
3882 assert(SCEVMinMaxExpr::isMinMaxType(Kind) &&
"Not a SCEVMinMaxExpr!");
3883 assert(!
Ops.empty() &&
"Cannot get empty (u|s)(min|max)!");
3884 if (
Ops.size() == 1)
return Ops[0];
3887 for (
unsigned i = 1, e =
Ops.size(); i != e; ++i) {
3889 "Operand types don't match!");
3892 "min/max should be consistently pointerish");
3918 return IsSigned ?
C.isMinSignedValue() :
C.isMinValue();
3920 return IsSigned ?
C.isMaxSignedValue() :
C.isMaxValue();
3925 return IsSigned ?
C.isMaxSignedValue() :
C.isMaxValue();
3927 return IsSigned ?
C.isMinSignedValue() :
C.isMinValue();
3933 if (
const SCEV *S = findExistingSCEVInCache(Kind,
Ops)) {
3939 while (Idx <
Ops.size() &&
Ops[Idx]->getSCEVType() < Kind)
3944 if (Idx <
Ops.size()) {
3945 bool DeletedAny =
false;
3946 while (
Ops[Idx]->getSCEVType() == Kind) {
3948 Ops.erase(
Ops.begin()+Idx);
3966 for (
unsigned i = 0, e =
Ops.size() - 1; i != e; ++i) {
3967 if (
Ops[i] ==
Ops[i + 1] ||
3968 isKnownViaNonRecursiveReasoning(FirstPred,
Ops[i],
Ops[i + 1])) {
3971 Ops.erase(
Ops.begin() + i + 1,
Ops.begin() + i + 2);
3974 }
else if (isKnownViaNonRecursiveReasoning(SecondPred,
Ops[i],
3977 Ops.erase(
Ops.begin() + i,
Ops.begin() + i + 1);
3983 if (
Ops.size() == 1)
return Ops[0];
3985 assert(!
Ops.empty() &&
"Reduced smax down to nothing!");
3990 ID.AddInteger(Kind);
3994 const SCEV *ExistingSCEV = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP);
3996 return ExistingSCEV;
3997 const SCEV **O = SCEVAllocator.Allocate<
const SCEV *>(
Ops.size());
3999 SCEV *S =
new (SCEVAllocator)
4002 UniqueSCEVs.InsertNode(S, IP);
4009class SCEVSequentialMinMaxDeduplicatingVisitor final
4010 :
public SCEVVisitor<SCEVSequentialMinMaxDeduplicatingVisitor,
4011 std::optional<const SCEV *>> {
4012 using RetVal = std::optional<const SCEV *>;
4020 bool canRecurseInto(
SCEVTypes Kind)
const {
4023 return RootKind == Kind || NonSequentialRootKind == Kind;
4026 RetVal visitAnyMinMaxExpr(
const SCEV *S) {
4028 "Only for min/max expressions.");
4031 if (!canRecurseInto(Kind))
4041 return std::nullopt;
4048 RetVal
visit(
const SCEV *S) {
4050 if (!SeenOps.
insert(S).second)
4051 return std::nullopt;
4052 return Base::visit(S);
4056 SCEVSequentialMinMaxDeduplicatingVisitor(ScalarEvolution &SE,
4058 : SE(SE), RootKind(RootKind),
4059 NonSequentialRootKind(
4060 SCEVSequentialMinMaxExpr::getEquivalentNonSequentialSCEVType(
4064 SmallVectorImpl<const SCEV *> &NewOps) {
4069 for (
const SCEV *
Op : OrigOps) {
4074 Ops.emplace_back(*NewOp);
4078 NewOps = std::move(
Ops);
4082 RetVal visitConstant(
const SCEVConstant *Constant) {
return Constant; }
4084 RetVal visitVScale(
const SCEVVScale *VScale) {
return VScale; }
4086 RetVal visitPtrToIntExpr(
const SCEVPtrToIntExpr *Expr) {
return Expr; }
4088 RetVal visitTruncateExpr(
const SCEVTruncateExpr *Expr) {
return Expr; }
4090 RetVal visitZeroExtendExpr(
const SCEVZeroExtendExpr *Expr) {
return Expr; }
4092 RetVal visitSignExtendExpr(
const SCEVSignExtendExpr *Expr) {
return Expr; }
4094 RetVal visitAddExpr(
const SCEVAddExpr *Expr) {
return Expr; }
4096 RetVal visitMulExpr(
const SCEVMulExpr *Expr) {
return Expr; }
4098 RetVal visitUDivExpr(
const SCEVUDivExpr *Expr) {
return Expr; }
4100 RetVal visitAddRecExpr(
const SCEVAddRecExpr *Expr) {
return Expr; }
4102 RetVal visitSMaxExpr(
const SCEVSMaxExpr *Expr) {
4103 return visitAnyMinMaxExpr(Expr);
4106 RetVal visitUMaxExpr(
const SCEVUMaxExpr *Expr) {
4107 return visitAnyMinMaxExpr(Expr);
4110 RetVal visitSMinExpr(
const SCEVSMinExpr *Expr) {
4111 return visitAnyMinMaxExpr(Expr);
4114 RetVal visitUMinExpr(
const SCEVUMinExpr *Expr) {
4115 return visitAnyMinMaxExpr(Expr);
4118 RetVal visitSequentialUMinExpr(
const SCEVSequentialUMinExpr *Expr) {
4119 return visitAnyMinMaxExpr(Expr);
4122 RetVal visitUnknown(
const SCEVUnknown *Expr) {
return Expr; }
4124 RetVal visitCouldNotCompute(
const SCEVCouldNotCompute *Expr) {
return Expr; }
4166struct SCEVPoisonCollector {
4167 bool LookThroughMaybePoisonBlocking;
4168 SmallPtrSet<const SCEVUnknown *, 4> MaybePoison;
4169 SCEVPoisonCollector(
bool LookThroughMaybePoisonBlocking)
4170 : LookThroughMaybePoisonBlocking(LookThroughMaybePoisonBlocking) {}
4172 bool follow(
const SCEV *S) {
4173 if (!LookThroughMaybePoisonBlocking &&
4183 bool isDone()
const {
return false; }
4193 SCEVPoisonCollector PC1(
true);
4198 if (PC1.MaybePoison.empty())
4204 SCEVPoisonCollector PC2(
false);
4214 SCEVPoisonCollector PC(
false);
4237 while (!Worklist.
empty()) {
4239 if (!Visited.
insert(V).second)
4243 if (Visited.
size() > 16)
4248 if (PoisonVals.
contains(V) || ::isGuaranteedNotToBePoison(V))
4259 if (PDI->isDisjoint())
4266 II &&
II->getIntrinsicID() == Intrinsic::vscale)
4273 if (
I->hasPoisonGeneratingAnnotations())
4284 assert(SCEVSequentialMinMaxExpr::isSequentialMinMaxType(Kind) &&
4285 "Not a SCEVSequentialMinMaxExpr!");
4286 assert(!
Ops.empty() &&
"Cannot get empty (u|s)(min|max)!");
4287 if (
Ops.size() == 1)
4291 for (
unsigned i = 1, e =
Ops.size(); i != e; ++i) {
4293 "Operand types don't match!");
4296 "min/max should be consistently pointerish");
4304 if (
const SCEV *S = findExistingSCEVInCache(Kind,
Ops))
4311 SCEVSequentialMinMaxDeduplicatingVisitor Deduplicator(*
this, Kind);
4321 bool DeletedAny =
false;
4322 while (Idx <
Ops.size()) {
4323 if (
Ops[Idx]->getSCEVType() != Kind) {
4328 Ops.erase(
Ops.begin() + Idx);
4329 Ops.insert(
Ops.begin() + Idx, SMME->operands().begin(),
4330 SMME->operands().end());
4338 const SCEV *SaturationPoint;
4349 for (
unsigned i = 1, e =
Ops.size(); i != e; ++i) {
4350 if (!isGuaranteedNotToCauseUB(
Ops[i]))
4362 Ops.erase(
Ops.begin() + i);
4367 if (isKnownViaNonRecursiveReasoning(Pred,
Ops[i - 1],
Ops[i])) {
4368 Ops.erase(
Ops.begin() + i);
4376 ID.AddInteger(Kind);
4380 const SCEV *ExistingSCEV = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP);
4382 return ExistingSCEV;
4384 const SCEV **O = SCEVAllocator.Allocate<
const SCEV *>(
Ops.size());
4386 SCEV *S =
new (SCEVAllocator)
4389 UniqueSCEVs.InsertNode(S, IP);
4437 if (
Size.isScalable())
4458 "Cannot get offset for structure containing scalable vector types");
4472 if (
SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP)) {
4474 "Stale SCEVUnknown in uniquing map!");
4480 UniqueSCEVs.InsertNode(S, IP);
4494 return Ty->isIntOrPtrTy();
4501 if (Ty->isPointerTy())
4512 if (Ty->isIntegerTy())
4516 assert(Ty->isPointerTy() &&
"Unexpected non-pointer non-integer type!");
4528 bool PreciseA, PreciseB;
4529 auto *ScopeA = getDefiningScopeBound({
A}, PreciseA);
4530 auto *ScopeB = getDefiningScopeBound({
B}, PreciseB);
4531 if (!PreciseA || !PreciseB)
4534 return (ScopeA == ScopeB) || DT.dominates(ScopeA, ScopeB) ||
4535 DT.dominates(ScopeB, ScopeA);
4539 return CouldNotCompute.get();
4542bool ScalarEvolution::checkValidity(
const SCEV *S)
const {
4545 return SU && SU->getValue() ==
nullptr;
4548 return !ContainsNulls;
4553 if (
I != HasRecMap.end())
4558 HasRecMap.insert({S, FoundAddRec});
4566 if (
SI == ExprValueMap.
end())
4568 return SI->second.getArrayRef();
4574void ScalarEvolution::eraseValueFromMap(
Value *V) {
4576 if (
I != ValueExprMap.end()) {
4577 auto EVIt = ExprValueMap.find(
I->second);
4578 bool Removed = EVIt->second.remove(V);
4580 assert(Removed &&
"Value not in ExprValueMap?");
4581 ValueExprMap.erase(
I);
4585void ScalarEvolution::insertValueToMap(
Value *V,
const SCEV *S) {
4589 auto It = ValueExprMap.find_as(V);
4590 if (It == ValueExprMap.end()) {
4592 ExprValueMap[S].insert(V);
4603 return createSCEVIter(V);
4610 if (
I != ValueExprMap.end()) {
4611 const SCEV *S =
I->second;
4612 assert(checkValidity(S) &&
4613 "existing SCEV has not been properly invalidated");
4626 Type *Ty = V->getType();
4642 assert(!V->getType()->isPointerTy() &&
"Can't negate pointer");
4655 return (
const SCEV *)
nullptr;
4661 if (
const SCEV *Replaced = MatchMinMaxNegation(MME))
4665 Type *Ty = V->getType();
4671 assert(
P->getType()->isPointerTy());
4684 const SCEV **PtrOp =
nullptr;
4685 for (
const SCEV *&AddOp :
Ops) {
4686 if (AddOp->getType()->isPointerTy()) {
4687 assert(!PtrOp &&
"Cannot have multiple pointer ops");
4705 return getZero(LHS->getType());
4710 if (RHS->getType()->isPointerTy()) {
4711 if (!LHS->getType()->isPointerTy() ||
4721 const bool RHSIsNotMinSigned =
4752 Type *SrcTy = V->getType();
4753 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
4754 "Cannot truncate or zero extend with non-integer arguments!");
4764 Type *SrcTy = V->getType();
4765 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
4766 "Cannot truncate or zero extend with non-integer arguments!");
4776 Type *SrcTy = V->getType();
4777 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
4778 "Cannot noop or zero extend with non-integer arguments!");
4780 "getNoopOrZeroExtend cannot truncate!");
4788 Type *SrcTy = V->getType();
4789 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
4790 "Cannot noop or sign extend with non-integer arguments!");
4792 "getNoopOrSignExtend cannot truncate!");
4800 Type *SrcTy = V->getType();
4801 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
4802 "Cannot noop or any extend with non-integer arguments!");
4804 "getNoopOrAnyExtend cannot truncate!");
4812 Type *SrcTy = V->getType();
4813 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
4814 "Cannot truncate or noop with non-integer arguments!");
4816 "getTruncateOrNoop cannot extend!");
4824 const SCEV *PromotedLHS = LHS;
4825 const SCEV *PromotedRHS = RHS;
4845 assert(!
Ops.empty() &&
"At least one operand must be!");
4847 if (
Ops.size() == 1)
4851 Type *MaxType =
nullptr;
4852 for (
const auto *S :
Ops)
4857 assert(MaxType &&
"Failed to find maximum type!");
4861 for (
const auto *S :
Ops)
4870 if (!V->getType()->isPointerTy())
4875 V = AddRec->getStart();
4877 const SCEV *PtrOp =
nullptr;
4878 for (
const SCEV *AddOp :
Add->operands()) {
4879 if (AddOp->getType()->isPointerTy()) {
4880 assert(!PtrOp &&
"Cannot have multiple pointer ops");
4884 assert(PtrOp &&
"Must have pointer op");
4896 for (
User *U :
I->users()) {
4898 if (Visited.
insert(UserInsn).second)
4912 static const SCEV *rewrite(
const SCEV *S,
const Loop *L, ScalarEvolution &SE,
4913 bool IgnoreOtherLoops =
true) {
4916 if (
Rewriter.hasSeenLoopVariantSCEVUnknown())
4918 return Rewriter.hasSeenOtherLoops() && !IgnoreOtherLoops
4923 const SCEV *visitUnknown(
const SCEVUnknown *Expr) {
4925 SeenLoopVariantSCEVUnknown =
true;
4929 const SCEV *visitAddRecExpr(
const SCEVAddRecExpr *Expr) {
4933 SeenOtherLoops =
true;
4937 bool hasSeenLoopVariantSCEVUnknown() {
return SeenLoopVariantSCEVUnknown; }
4939 bool hasSeenOtherLoops() {
return SeenOtherLoops; }
4942 explicit SCEVInitRewriter(
const Loop *L, ScalarEvolution &SE)
4943 : SCEVRewriteVisitor(SE),
L(
L) {}
4946 bool SeenLoopVariantSCEVUnknown =
false;
4947 bool SeenOtherLoops =
false;
4956 static const SCEV *rewrite(
const SCEV *S,
const Loop *L, ScalarEvolution &SE) {
4957 SCEVPostIncRewriter
Rewriter(L, SE);
4959 return Rewriter.hasSeenLoopVariantSCEVUnknown()
4964 const SCEV *visitUnknown(
const SCEVUnknown *Expr) {
4966 SeenLoopVariantSCEVUnknown =
true;
4970 const SCEV *visitAddRecExpr(
const SCEVAddRecExpr *Expr) {
4974 SeenOtherLoops =
true;
4978 bool hasSeenLoopVariantSCEVUnknown() {
return SeenLoopVariantSCEVUnknown; }
4980 bool hasSeenOtherLoops() {
return SeenOtherLoops; }
4983 explicit SCEVPostIncRewriter(
const Loop *L, ScalarEvolution &SE)
4984 : SCEVRewriteVisitor(SE),
L(
L) {}
4987 bool SeenLoopVariantSCEVUnknown =
false;
4988 bool SeenOtherLoops =
false;
4994class SCEVBackedgeConditionFolder
4997 static const SCEV *rewrite(
const SCEV *S,
const Loop *L,
4998 ScalarEvolution &SE) {
4999 bool IsPosBECond =
false;
5000 Value *BECond =
nullptr;
5001 if (BasicBlock *Latch =
L->getLoopLatch()) {
5005 "Both outgoing branches should not target same header!");
5012 SCEVBackedgeConditionFolder
Rewriter(L, BECond, IsPosBECond, SE);
5016 const SCEV *visitUnknown(
const SCEVUnknown *Expr) {
5017 const SCEV *
Result = Expr;
5022 switch (
I->getOpcode()) {
5023 case Instruction::Select: {
5025 std::optional<const SCEV *> Res =
5026 compareWithBackedgeCondition(
SI->getCondition());
5034 std::optional<const SCEV *> Res = compareWithBackedgeCondition(
I);
5045 explicit SCEVBackedgeConditionFolder(
const Loop *L,
Value *BECond,
5046 bool IsPosBECond, ScalarEvolution &SE)
5047 : SCEVRewriteVisitor(SE),
L(
L), BackedgeCond(BECond),
5048 IsPositiveBECond(IsPosBECond) {}
5050 std::optional<const SCEV *> compareWithBackedgeCondition(
Value *IC);
5054 Value *BackedgeCond =
nullptr;
5056 bool IsPositiveBECond;
5059std::optional<const SCEV *>
5060SCEVBackedgeConditionFolder::compareWithBackedgeCondition(
Value *IC) {
5065 if (BackedgeCond == IC)
5068 return std::nullopt;
5073 static const SCEV *rewrite(
const SCEV *S,
const Loop *L,
5074 ScalarEvolution &SE) {
5080 const SCEV *visitUnknown(
const SCEVUnknown *Expr) {
5087 const SCEV *visitAddRecExpr(
const SCEVAddRecExpr *Expr) {
5097 explicit SCEVShiftRewriter(
const Loop *L, ScalarEvolution &SE)
5098 : SCEVRewriteVisitor(SE),
L(
L) {}
5107ScalarEvolution::proveNoWrapViaConstantRanges(
const SCEVAddRecExpr *AR) {
5111 using OBO = OverflowingBinaryOperator;
5119 const APInt &BECountAP = BECountMax->getAPInt();
5120 unsigned NoOverflowBitWidth =
5132 Instruction::Add, IncRange, OBO::NoSignedWrap);
5133 if (NSWRegion.contains(AddRecRange))
5142 Instruction::Add, IncRange, OBO::NoUnsignedWrap);
5143 if (NUWRegion.contains(AddRecRange))
5151ScalarEvolution::proveNoSignedWrapViaInduction(
const SCEVAddRecExpr *AR) {
5161 if (!SignedWrapViaInductionTried.insert(AR).second)
5186 AC.assumptions().empty())
5194 const SCEV *OverflowLimit =
5196 if (OverflowLimit &&
5204ScalarEvolution::proveNoUnsignedWrapViaInduction(
const SCEVAddRecExpr *AR) {
5214 if (!UnsignedWrapViaInductionTried.insert(AR).second)
5240 AC.assumptions().empty())
5275 explicit BinaryOp(Operator *
Op)
5279 IsNSW = OBO->hasNoSignedWrap();
5280 IsNUW = OBO->hasNoUnsignedWrap();
5284 explicit BinaryOp(
unsigned Opcode,
Value *
LHS,
Value *
RHS,
bool IsNSW =
false,
5286 : Opcode(Opcode),
LHS(
LHS),
RHS(
RHS), IsNSW(IsNSW), IsNUW(IsNUW) {}
5298 return std::nullopt;
5304 switch (
Op->getOpcode()) {
5305 case Instruction::Add:
5306 case Instruction::Sub:
5307 case Instruction::Mul:
5308 case Instruction::UDiv:
5309 case Instruction::URem:
5310 case Instruction::And:
5311 case Instruction::AShr:
5312 case Instruction::Shl:
5313 return BinaryOp(
Op);
5315 case Instruction::Or: {
5318 return BinaryOp(Instruction::Add,
Op->getOperand(0),
Op->getOperand(1),
5320 return BinaryOp(
Op);
5323 case Instruction::Xor:
5327 if (RHSC->getValue().isSignMask())
5328 return BinaryOp(Instruction::Add,
Op->getOperand(0),
Op->getOperand(1));
5330 if (V->getType()->isIntegerTy(1))
5331 return BinaryOp(Instruction::Add,
Op->getOperand(0),
Op->getOperand(1));
5332 return BinaryOp(
Op);
5334 case Instruction::LShr:
5343 if (SA->getValue().ult(
BitWidth)) {
5345 ConstantInt::get(SA->getContext(),
5347 return BinaryOp(Instruction::UDiv,
Op->getOperand(0),
X);
5350 return BinaryOp(
Op);
5352 case Instruction::ExtractValue: {
5354 if (EVI->getNumIndices() != 1 || EVI->getIndices()[0] != 0)
5362 bool Signed = WO->isSigned();
5365 return BinaryOp(BinOp, WO->getLHS(), WO->getRHS());
5370 return BinaryOp(BinOp, WO->getLHS(), WO->getRHS(),
5381 if (
II->getIntrinsicID() == Intrinsic::loop_decrement_reg)
5382 return BinaryOp(Instruction::Sub,
II->getOperand(0),
II->getOperand(1));
5384 return std::nullopt;
5410 if (
Op == SymbolicPHI)
5415 if (SourceBits != NewBits)
5433 if (!L || L->getHeader() != PN->
getParent())
5491std::optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>>
5492ScalarEvolution::createAddRecFromPHIWithCastsImpl(
const SCEVUnknown *SymbolicPHI) {
5500 assert(L &&
"Expecting an integer loop header phi");
5505 Value *BEValueV =
nullptr, *StartValueV =
nullptr;
5506 for (
unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
5507 Value *
V = PN->getIncomingValue(i);
5508 if (
L->contains(PN->getIncomingBlock(i))) {
5511 }
else if (BEValueV != V) {
5515 }
else if (!StartValueV) {
5517 }
else if (StartValueV != V) {
5518 StartValueV =
nullptr;
5522 if (!BEValueV || !StartValueV)
5523 return std::nullopt;
5525 const SCEV *BEValue =
getSCEV(BEValueV);
5532 return std::nullopt;
5536 unsigned FoundIndex =
Add->getNumOperands();
5537 Type *TruncTy =
nullptr;
5539 for (
unsigned i = 0, e =
Add->getNumOperands(); i != e; ++i)
5542 if (FoundIndex == e) {
5547 if (FoundIndex ==
Add->getNumOperands())
5548 return std::nullopt;
5552 for (
unsigned i = 0, e =
Add->getNumOperands(); i != e; ++i)
5553 if (i != FoundIndex)
5554 Ops.push_back(
Add->getOperand(i));
5560 return std::nullopt;
5613 const SCEV *StartVal =
getSCEV(StartValueV);
5614 const SCEV *PHISCEV =
5641 auto getExtendedExpr = [&](
const SCEV *Expr,
5642 bool CreateSignExtend) ->
const SCEV * {
5645 const SCEV *ExtendedExpr =
5648 return ExtendedExpr;
5656 auto PredIsKnownFalse = [&](
const SCEV *Expr,
5657 const SCEV *ExtendedExpr) ->
bool {
5658 return Expr != ExtendedExpr &&
5662 const SCEV *StartExtended = getExtendedExpr(StartVal,
Signed);
5663 if (PredIsKnownFalse(StartVal, StartExtended)) {
5665 return std::nullopt;
5670 const SCEV *AccumExtended = getExtendedExpr(Accum,
true);
5671 if (PredIsKnownFalse(Accum, AccumExtended)) {
5673 return std::nullopt;
5676 auto AppendPredicate = [&](
const SCEV *Expr,
5677 const SCEV *ExtendedExpr) ->
void {
5678 if (Expr != ExtendedExpr &&
5686 AppendPredicate(StartVal, StartExtended);
5687 AppendPredicate(Accum, AccumExtended);
5695 std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>> PredRewrite =
5696 std::make_pair(NewAR, Predicates);
5698 PredicatedSCEVRewrites[{SymbolicPHI,
L}] = PredRewrite;
5702std::optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>>
5707 return std::nullopt;
5710 auto I = PredicatedSCEVRewrites.find({SymbolicPHI, L});
5711 if (
I != PredicatedSCEVRewrites.end()) {
5712 std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>> Rewrite =
5715 if (Rewrite.first == SymbolicPHI)
5716 return std::nullopt;
5720 assert(!(Rewrite.second).empty() &&
"Expected to find Predicates");
5724 std::optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>>
5725 Rewrite = createAddRecFromPHIWithCastsImpl(SymbolicPHI);
5730 PredicatedSCEVRewrites[{SymbolicPHI, L}] = {SymbolicPHI, Predicates};
5731 return std::nullopt;
5748 auto areExprsEqual = [&](
const SCEV *Expr1,
const SCEV *Expr2) ->
bool {
5749 if (Expr1 != Expr2 &&
5750 !Preds->implies(SE.getEqualPredicate(Expr1, Expr2), SE) &&
5751 !Preds->implies(SE.getEqualPredicate(Expr2, Expr1), SE))
5768const SCEV *ScalarEvolution::createSimpleAffineAddRec(
PHINode *PN,
5770 Value *StartValueV) {
5773 assert(BEValueV && StartValueV);
5779 if (BO->Opcode != Instruction::Add)
5782 const SCEV *Accum =
nullptr;
5783 if (BO->LHS == PN && L->isLoopInvariant(BO->RHS))
5785 else if (BO->RHS == PN && L->isLoopInvariant(BO->LHS))
5799 insertValueToMap(PN, PHISCEV);
5804 proveNoWrapViaConstantRanges(AR)));
5812 "Accum is defined outside L, but is not invariant?");
5813 if (isAddRecNeverPoison(BEInst, L))
5820const SCEV *ScalarEvolution::createAddRecFromPHI(
PHINode *PN) {
5821 const Loop *
L = LI.getLoopFor(PN->
getParent());
5828 Value *BEValueV =
nullptr, *StartValueV =
nullptr;
5834 }
else if (BEValueV != V) {
5838 }
else if (!StartValueV) {
5840 }
else if (StartValueV != V) {
5841 StartValueV =
nullptr;
5845 if (!BEValueV || !StartValueV)
5848 assert(ValueExprMap.find_as(PN) == ValueExprMap.end() &&
5849 "PHI node already processed?");
5853 if (
auto *S = createSimpleAffineAddRec(PN, BEValueV, StartValueV))
5858 insertValueToMap(PN, SymbolicName);
5862 const SCEV *BEValue =
getSCEV(BEValueV);
5872 unsigned FoundIndex =
Add->getNumOperands();
5873 for (
unsigned i = 0, e =
Add->getNumOperands(); i != e; ++i)
5874 if (
Add->getOperand(i) == SymbolicName)
5875 if (FoundIndex == e) {
5880 if (FoundIndex !=
Add->getNumOperands()) {
5883 for (
unsigned i = 0, e =
Add->getNumOperands(); i != e; ++i)
5884 if (i != FoundIndex)
5885 Ops.push_back(SCEVBackedgeConditionFolder::rewrite(
Add->getOperand(i),
5897 if (BO->Opcode == Instruction::Add && BO->LHS == PN) {
5904 if (
GEP->getOperand(0) == PN) {
5905 GEPNoWrapFlags NW =
GEP->getNoWrapFlags();
5923 const SCEV *StartVal =
getSCEV(StartValueV);
5924 const SCEV *PHISCEV =
getAddRecExpr(StartVal, Accum, L, Flags);
5929 forgetMemoizedResults(SymbolicName);
5930 insertValueToMap(PN, PHISCEV);
5935 proveNoWrapViaConstantRanges(AR)));
5960 const SCEV *Shifted = SCEVShiftRewriter::rewrite(BEValue, L, *
this);
5961 const SCEV *
Start = SCEVInitRewriter::rewrite(Shifted, L, *
this,
false);
5963 isGuaranteedNotToCauseUB(Shifted) &&
::impliesPoison(Shifted, Start)) {
5964 const SCEV *StartVal =
getSCEV(StartValueV);
5965 if (Start == StartVal) {
5969 forgetMemoizedResults(SymbolicName);
5970 insertValueToMap(PN, Shifted);
5980 eraseValueFromMap(PN);
6000 Use &LeftUse =
Merge->getOperandUse(0);
6001 Use &RightUse =
Merge->getOperandUse(1);
6018const SCEV *ScalarEvolution::createNodeFromSelectLikePHI(
PHINode *PN) {
6020 [&](
BasicBlock *BB) {
return DT.isReachableFromEntry(BB); };
6035 assert(IDom &&
"At least the entry block should dominate PN");
6044 return createNodeForSelectOrPHI(PN,
Cond,
LHS,
RHS);
6060ScalarEvolution::createNodeForPHIWithIdenticalOperands(
PHINode *PN) {
6061 BinaryOperator *CommonInst =
nullptr;
6072 CommonInst = IncomingInst;
6079 const SCEV *CommonSCEV =
getSCEV(CommonInst);
6080 bool SCEVExprsIdentical =
6082 [
this, CommonSCEV](
Value *V) { return CommonSCEV == getSCEV(V); });
6083 return SCEVExprsIdentical ? CommonSCEV :
nullptr;
6086const SCEV *ScalarEvolution::createNodeForPHI(
PHINode *PN) {
6087 if (
const SCEV *S = createAddRecFromPHI(PN))
6097 if (
const SCEV *S = createNodeForPHIWithIdenticalOperands(PN))
6100 if (
const SCEV *S = createNodeFromSelectLikePHI(PN))
6109 struct FindClosure {
6110 const SCEV *OperandToFind;
6116 bool canRecurseInto(
SCEVTypes Kind)
const {
6119 return RootKind == Kind || NonSequentialRootKind == Kind ||
6124 : OperandToFind(OperandToFind), RootKind(RootKind),
6125 NonSequentialRootKind(
6129 bool follow(
const SCEV *S) {
6130 Found = S == OperandToFind;
6132 return !isDone() && canRecurseInto(S->
getSCEVType());
6135 bool isDone()
const {
return Found; }
6138 FindClosure FC(OperandToFind, RootKind);
6143std::optional<const SCEV *>
6144ScalarEvolution::createNodeForSelectOrPHIInstWithICmpInstCond(
Type *Ty,
6154 switch (ICI->getPredicate()) {
6168 bool Signed = ICI->isSigned();
6169 const SCEV *LA =
getSCEV(TrueVal);
6177 if (LA == LS &&
RA == RS)
6179 if (LA == RS &&
RA == LS)
6182 auto CoerceOperand = [&](
const SCEV *
Op) ->
const SCEV * {
6183 if (
Op->getType()->isPointerTy()) {
6194 LS = CoerceOperand(LS);
6195 RS = CoerceOperand(RS);
6219 const SCEV *TrueValExpr =
getSCEV(TrueVal);
6220 const SCEV *FalseValExpr =
getSCEV(FalseVal);
6234 X = ZExt->getOperand();
6236 const SCEV *FalseValExpr =
getSCEV(FalseVal);
6247 return std::nullopt;
6250static std::optional<const SCEV *>
6252 const SCEV *TrueExpr,
const SCEV *FalseExpr) {
6256 "Unexpected operands of a select.");
6268 return std::nullopt;
6283static std::optional<const SCEV *>
6287 return std::nullopt;
6290 const auto *SETrue = SE->
getSCEV(TrueVal);
6291 const auto *SEFalse = SE->
getSCEV(FalseVal);
6295const SCEV *ScalarEvolution::createNodeForSelectOrPHIViaUMinSeq(
6297 assert(
Cond->getType()->isIntegerTy(1) &&
"Select condition is not an i1?");
6299 V->getType() ==
TrueVal->getType() &&
6300 "Types of select hands and of the result must match.");
6303 if (!
V->getType()->isIntegerTy(1))
6306 if (std::optional<const SCEV *> S =
6319 return getSCEV(CI->isOne() ? TrueVal : FalseVal);
6323 if (std::optional<const SCEV *> S =
6324 createNodeForSelectOrPHIInstWithICmpInstCond(
I->getType(), ICI,
6330 return createNodeForSelectOrPHIViaUMinSeq(V,
Cond, TrueVal, FalseVal);
6336 assert(
GEP->getSourceElementType()->isSized() &&
6337 "GEP source element type must be sized");
6340 for (
Value *Index :
GEP->indices())
6345APInt ScalarEvolution::getConstantMultipleImpl(
const SCEV *S,
6348 auto GetShiftedByZeros = [
BitWidth](uint32_t TrailingZeros) {
6351 : APInt::getOneBitSet(
BitWidth, TrailingZeros);
6353 auto GetGCDMultiple = [
this, CtxI](
const SCEVNAryExpr *
N) {
6356 for (
unsigned I = 1,
E =
N->getNumOperands();
I <
E && Res != 1; ++
I)
6374 return GetShiftedByZeros(TZ);
6384 return GetShiftedByZeros(TZ);
6388 if (
M->hasNoUnsignedWrap()) {
6391 for (
const SCEV *Operand :
M->operands().drop_front())
6399 for (
const SCEV *Operand :
M->operands())
6401 return GetShiftedByZeros(TZ);
6406 if (
N->hasNoUnsignedWrap())
6407 return GetGCDMultiple(
N);
6410 for (
const SCEV *Operand :
N->operands().drop_front())
6412 return GetShiftedByZeros(TZ);
6429 CtxI = &*F.getEntryBlock().begin();
6435 .countMinTrailingZeros();
6436 return GetShiftedByZeros(Known);
6449 return getConstantMultipleImpl(S, CtxI);
6451 auto I = ConstantMultipleCache.find(S);
6452 if (
I != ConstantMultipleCache.end())
6455 APInt Result = getConstantMultipleImpl(S, CtxI);
6456 auto InsertPair = ConstantMultipleCache.insert({S, Result});
6457 assert(InsertPair.second &&
"Should insert a new key");
6458 return InsertPair.first->second;
6475 if (
MDNode *MD =
I->getMetadata(LLVMContext::MD_range))
6478 if (std::optional<ConstantRange>
Range = CB->getRange())
6482 if (std::optional<ConstantRange>
Range =
A->getRange())
6485 return std::nullopt;
6492 UnsignedRanges.erase(AddRec);
6493 SignedRanges.erase(AddRec);
6494 ConstantMultipleCache.erase(AddRec);
6499getRangeForUnknownRecurrence(
const SCEVUnknown *U) {
6525 Value *Start, *Step;
6532 assert(L && L->getHeader() ==
P->getParent());
6545 case Instruction::AShr:
6546 case Instruction::LShr:
6547 case Instruction::Shl:
6562 KnownStep.getBitWidth() ==
BitWidth);
6565 auto MaxShiftAmt = KnownStep.getMaxValue();
6567 bool Overflow =
false;
6568 auto TotalShift = MaxShiftAmt.umul_ov(TCAP, Overflow);
6575 case Instruction::AShr: {
6583 if (KnownStart.isNonNegative())
6586 KnownStart.getMaxValue() + 1);
6587 if (KnownStart.isNegative())
6590 KnownEnd.getMaxValue() + 1);
6593 case Instruction::LShr: {
6602 KnownStart.getMaxValue() + 1);
6604 case Instruction::Shl: {
6608 if (TotalShift.ult(KnownStart.countMinLeadingZeros()))
6609 return ConstantRange(KnownStart.getMinValue(),
6610 KnownEnd.getMaxValue() + 1);
6618ScalarEvolution::getRangeRefIter(
const SCEV *S,
6619 ScalarEvolution::RangeSignHint SignHint) {
6620 DenseMap<const SCEV *, ConstantRange> &Cache =
6621 SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED ? UnsignedRanges
6624 SmallPtrSet<const SCEV *, 8> Seen;
6628 auto AddToWorklist = [&WorkList, &Seen, &Cache](
const SCEV *Expr) {
6629 if (!Seen.
insert(Expr).second)
6662 for (
unsigned I = 0;
I != WorkList.
size(); ++
I) {
6663 const SCEV *
P = WorkList[
I];
6667 for (
const SCEV *
Op :
P->operands())
6673 if (!PendingPhiRangesIter.insert(
P).second)
6680 if (!WorkList.
empty()) {
6685 getRangeRef(
P, SignHint);
6689 PendingPhiRangesIter.erase(
P);
6693 return getRangeRef(S, SignHint, 0);
6700 const SCEV *S, ScalarEvolution::RangeSignHint SignHint,
unsigned Depth) {
6701 DenseMap<const SCEV *, ConstantRange> &Cache =
6702 SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED ? UnsignedRanges
6710 if (
I != Cache.
end())
6714 return setRange(
C, SignHint, ConstantRange(
C->getAPInt()));
6719 return getRangeRefIter(S, SignHint);
6722 ConstantRange ConservativeResult(
BitWidth,
true);
6723 using OBO = OverflowingBinaryOperator;
6727 if (SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED) {
6731 ConservativeResult =
6738 ConservativeResult = ConstantRange(
6754 ConservativeResult.intersectWith(
X.truncate(
BitWidth), RangeType));
6761 ConservativeResult.intersectWith(
X.zeroExtend(
BitWidth), RangeType));
6768 ConservativeResult.intersectWith(
X.signExtend(
BitWidth), RangeType));
6772 ConstantRange
X = getRangeRef(PtrToInt->
getOperand(), SignHint,
Depth + 1);
6773 return setRange(PtrToInt, SignHint,
X);
6777 ConstantRange
X = getRangeRef(
Add->getOperand(0), SignHint,
Depth + 1);
6778 unsigned WrapType = OBO::AnyWrap;
6779 if (
Add->hasNoSignedWrap())
6780 WrapType |= OBO::NoSignedWrap;
6781 if (
Add->hasNoUnsignedWrap())
6782 WrapType |= OBO::NoUnsignedWrap;
6784 X =
X.addWithNoWrap(getRangeRef(
Op, SignHint,
Depth + 1), WrapType,
6786 return setRange(
Add, SignHint,
6787 ConservativeResult.intersectWith(
X, RangeType));
6791 ConstantRange
X = getRangeRef(
Mul->getOperand(0), SignHint,
Depth + 1);
6793 X =
X.multiply(getRangeRef(
Op, SignHint,
Depth + 1));
6794 return setRange(
Mul, SignHint,
6795 ConservativeResult.intersectWith(
X, RangeType));
6799 ConstantRange
X = getRangeRef(UDiv->
getLHS(), SignHint,
Depth + 1);
6800 ConstantRange
Y = getRangeRef(UDiv->
getRHS(), SignHint,
Depth + 1);
6801 return setRange(UDiv, SignHint,
6802 ConservativeResult.intersectWith(
X.udiv(
Y), RangeType));
6810 if (!UnsignedMinValue.
isZero())
6811 ConservativeResult = ConservativeResult.intersectWith(
6812 ConstantRange(UnsignedMinValue, APInt(
BitWidth, 0)), RangeType);
6821 bool AllNonNeg =
true;
6822 bool AllNonPos =
true;
6823 for (
unsigned i = 1, e = AddRec->
getNumOperands(); i != e; ++i) {
6830 ConservativeResult = ConservativeResult.intersectWith(
6835 ConservativeResult = ConservativeResult.intersectWith(
6844 const SCEV *MaxBEScev =
6858 auto RangeFromAffine = getRangeForAffineAR(
6860 ConservativeResult =
6861 ConservativeResult.intersectWith(RangeFromAffine, RangeType);
6863 auto RangeFromFactoring = getRangeViaFactoring(
6865 ConservativeResult =
6866 ConservativeResult.intersectWith(RangeFromFactoring, RangeType);
6872 const SCEV *SymbolicMaxBECount =
6877 auto RangeFromAffineNew = getRangeForAffineNoSelfWrappingAR(
6878 AddRec, SymbolicMaxBECount,
BitWidth, SignHint);
6879 ConservativeResult =
6880 ConservativeResult.intersectWith(RangeFromAffineNew, RangeType);
6885 return setRange(AddRec, SignHint, std::move(ConservativeResult));
6895 ID = Intrinsic::umax;
6898 ID = Intrinsic::smax;
6902 ID = Intrinsic::umin;
6905 ID = Intrinsic::smin;
6912 ConstantRange
X = getRangeRef(NAry->getOperand(0), SignHint,
Depth + 1);
6913 for (
unsigned i = 1, e = NAry->getNumOperands(); i != e; ++i)
6915 ID, {
X, getRangeRef(NAry->getOperand(i), SignHint,
Depth + 1)});
6916 return setRange(S, SignHint,
6917 ConservativeResult.intersectWith(
X, RangeType));
6926 ConservativeResult =
6927 ConservativeResult.intersectWith(*MDRange, RangeType);
6932 auto CR = getRangeForUnknownRecurrence(U);
6933 ConservativeResult = ConservativeResult.intersectWith(CR);
6944 if (
U->getType()->isPointerTy()) {
6947 unsigned ptrSize = DL.getPointerTypeSizeInBits(
U->getType());
6948 int ptrIdxDiff = ptrSize -
BitWidth;
6949 if (ptrIdxDiff > 0 && ptrSize >
BitWidth && NS > (
unsigned)ptrIdxDiff)
6962 ConservativeResult = ConservativeResult.intersectWith(
6966 ConservativeResult = ConservativeResult.intersectWith(
6971 if (
U->getType()->isPointerTy() && SignHint == HINT_RANGE_UNSIGNED) {
6974 bool CanBeNull, CanBeFreed;
6975 uint64_t DerefBytes =
6976 V->getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed);
6986 uint64_t
Align =
U->getValue()->getPointerAlignment(DL).value();
6987 uint64_t Rem = MaxVal.
urem(Align);
6992 ConservativeResult = ConservativeResult.intersectWith(
7000 if (PendingPhiRanges.insert(Phi).second) {
7001 ConstantRange RangeFromOps(
BitWidth,
false);
7003 for (
const auto &
Op :
Phi->operands()) {
7005 RangeFromOps = RangeFromOps.unionWith(OpRange);
7007 if (RangeFromOps.isFullSet())
7010 ConservativeResult =
7011 ConservativeResult.intersectWith(RangeFromOps, RangeType);
7012 bool Erased = PendingPhiRanges.erase(Phi);
7013 assert(Erased &&
"Failed to erase Phi properly?");
7020 if (
II->getIntrinsicID() == Intrinsic::vscale) {
7022 ConservativeResult = ConservativeResult.difference(Disallowed);
7025 return setRange(U, SignHint, std::move(ConservativeResult));
7031 return setRange(S, SignHint, std::move(ConservativeResult));
7040 const APInt &MaxBECount,
7047 if (Step == 0 || MaxBECount == 0)
7054 return ConstantRange::getFull(
BitWidth);
7070 return ConstantRange::getFull(
BitWidth);
7082 APInt MovedBoundary = Descending ? (StartLower - std::move(
Offset))
7083 : (StartUpper + std::move(
Offset));
7088 if (StartRange.
contains(MovedBoundary))
7089 return ConstantRange::getFull(
BitWidth);
7092 Descending ? std::move(MovedBoundary) : std::move(StartLower);
7094 Descending ? std::move(StartUpper) : std::move(MovedBoundary);
7103 const APInt &MaxBECount) {
7107 "mismatched bit widths");
7116 StepSRange.
getSignedMin(), StartSRange, MaxBECount,
true);
7118 StartSRange, MaxBECount,
7130ConstantRange ScalarEvolution::getRangeForAffineNoSelfWrappingAR(
7132 ScalarEvolution::RangeSignHint SignHint) {
7133 assert(AddRec->
isAffine() &&
"Non-affine AddRecs are not suppored!\n");
7135 "This only works for non-self-wrapping AddRecs!");
7136 const bool IsSigned = SignHint == HINT_RANGE_SIGNED;
7140 return ConstantRange::getFull(
BitWidth);
7148 return ConstantRange::getFull(
BitWidth);
7152 const SCEV *MaxItersWithoutWrap =
getUDivExpr(RangeWidth, StepAbs);
7154 MaxItersWithoutWrap))
7155 return ConstantRange::getFull(
BitWidth);
7176 ConstantRange StartRange = getRangeRef(Start, SignHint);
7177 ConstantRange EndRange = getRangeRef(End, SignHint);
7178 ConstantRange RangeBetween = StartRange.
unionWith(EndRange);
7182 return RangeBetween;
7187 return ConstantRange::getFull(
BitWidth);
7190 isKnownPredicateViaConstantRanges(LEPred, Start, End))
7191 return RangeBetween;
7193 isKnownPredicateViaConstantRanges(GEPred, Start, End))
7194 return RangeBetween;
7195 return ConstantRange::getFull(
BitWidth);
7200 const APInt &MaxBECount) {
7207 "mismatched bit widths");
7209 struct SelectPattern {
7210 Value *Condition =
nullptr;
7214 explicit SelectPattern(ScalarEvolution &SE,
unsigned BitWidth,
7216 std::optional<unsigned> CastOp;
7230 CastOp = SCast->getSCEVType();
7231 S = SCast->getOperand();
7234 using namespace llvm::PatternMatch;
7241 Condition =
nullptr;
7273 bool isRecognized() {
return Condition !=
nullptr; }
7276 SelectPattern StartPattern(*
this,
BitWidth, Start);
7277 if (!StartPattern.isRecognized())
7278 return ConstantRange::getFull(
BitWidth);
7280 SelectPattern StepPattern(*
this,
BitWidth, Step);
7281 if (!StepPattern.isRecognized())
7282 return ConstantRange::getFull(
BitWidth);
7284 if (StartPattern.Condition != StepPattern.Condition) {
7288 return ConstantRange::getFull(
BitWidth);
7299 const SCEV *TrueStart = this->
getConstant(StartPattern.TrueValue);
7300 const SCEV *TrueStep = this->
getConstant(StepPattern.TrueValue);
7301 const SCEV *FalseStart = this->
getConstant(StartPattern.FalseValue);
7302 const SCEV *FalseStep = this->
getConstant(StepPattern.FalseValue);
7304 ConstantRange TrueRange =
7305 this->getRangeForAffineAR(TrueStart, TrueStep, MaxBECount);
7306 ConstantRange FalseRange =
7307 this->getRangeForAffineAR(FalseStart, FalseStep, MaxBECount);
7329ScalarEvolution::getNonTrivialDefiningScopeBound(
const SCEV *S) {
7343 SmallPtrSet<const SCEV *, 16> Visited;
7345 auto pushOp = [&](
const SCEV *S) {
7346 if (!Visited.
insert(S).second)
7349 if (Visited.
size() > 30) {
7356 for (
const auto *S :
Ops)
7360 while (!Worklist.
empty()) {
7362 if (
auto *DefI = getNonTrivialDefiningScopeBound(S)) {
7363 if (!Bound || DT.dominates(Bound, DefI))
7370 return Bound ? Bound : &*F.getEntryBlock().begin();
7376 return getDefiningScopeBound(
Ops, Discard);
7379bool ScalarEvolution::isGuaranteedToTransferExecutionTo(
const Instruction *
A,
7381 if (
A->getParent() ==
B->getParent() &&
7386 auto *BLoop = LI.getLoopFor(
B->getParent());
7387 if (BLoop && BLoop->getHeader() ==
B->getParent() &&
7388 BLoop->getLoopPreheader() ==
A->getParent() &&
7390 A->getParent()->end()) &&
7397bool ScalarEvolution::isGuaranteedNotToBePoison(
const SCEV *
Op) {
7398 SCEVPoisonCollector PC(
true);
7400 return PC.MaybePoison.empty();
7403bool ScalarEvolution::isGuaranteedNotToCauseUB(
const SCEV *
Op) {
7409 return M && (!
isKnownNonZero(Op1) || !isGuaranteedNotToBePoison(Op1));
7413bool ScalarEvolution::isSCEVExprNeverPoison(
const Instruction *
I) {
7430 for (
const Use &
Op :
I->operands()) {
7436 auto *DefI = getDefiningScopeBound(SCEVOps);
7437 return isGuaranteedToTransferExecutionTo(DefI,
I);
7440bool ScalarEvolution::isAddRecNeverPoison(
const Instruction *
I,
const Loop *L) {
7442 if (isSCEVExprNeverPoison(
I))
7453 auto *ExitingBB =
L->getExitingBlock();
7457 SmallPtrSet<const Value *, 16> KnownPoison;
7466 while (!Worklist.
empty()) {
7469 for (
const Use &U :
Poison->uses()) {
7472 DT.dominates(PoisonUser->
getParent(), ExitingBB))
7476 if (KnownPoison.
insert(PoisonUser).second)
7484ScalarEvolution::LoopProperties
7485ScalarEvolution::getLoopProperties(
const Loop *L) {
7486 using LoopProperties = ScalarEvolution::LoopProperties;
7488 auto Itr = LoopPropertiesCache.find(L);
7489 if (Itr == LoopPropertiesCache.end()) {
7492 return !
SI->isSimple();
7502 return I->mayWriteToMemory();
7505 LoopProperties LP = {
true,
7508 for (
auto *BB :
L->getBlocks())
7509 for (
auto &
I : *BB) {
7511 LP.HasNoAbnormalExits =
false;
7512 if (HasSideEffects(&
I))
7513 LP.HasNoSideEffects =
false;
7514 if (!LP.HasNoAbnormalExits && !LP.HasNoSideEffects)
7518 auto InsertPair = LoopPropertiesCache.insert({
L, LP});
7519 assert(InsertPair.second &&
"We just checked!");
7520 Itr = InsertPair.first;
7533const SCEV *ScalarEvolution::createSCEVIter(
Value *V) {
7539 Stack.emplace_back(V,
true);
7540 Stack.emplace_back(V,
false);
7541 while (!Stack.empty()) {
7542 auto E = Stack.pop_back_val();
7543 Value *CurV = E.getPointer();
7549 const SCEV *CreatedSCEV =
nullptr;
7552 CreatedSCEV = createSCEV(CurV);
7557 CreatedSCEV = getOperandsToCreate(CurV,
Ops);
7561 insertValueToMap(CurV, CreatedSCEV);
7565 Stack.emplace_back(CurV,
true);
7567 Stack.emplace_back(
Op,
false);
7584 if (!DT.isReachableFromEntry(
I->getParent()))
7597 switch (BO->Opcode) {
7598 case Instruction::Add:
7599 case Instruction::Mul: {
7606 Ops.push_back(BO->
Op);
7610 Ops.push_back(BO->RHS);
7614 (BO->Opcode == Instruction::Add &&
7615 (NewBO->Opcode != Instruction::Add &&
7616 NewBO->Opcode != Instruction::Sub)) ||
7617 (BO->Opcode == Instruction::Mul &&
7618 NewBO->Opcode != Instruction::Mul)) {
7619 Ops.push_back(BO->LHS);
7624 if (BO->
Op && (BO->IsNSW || BO->IsNUW)) {
7627 Ops.push_back(BO->LHS);
7635 case Instruction::Sub:
7636 case Instruction::UDiv:
7637 case Instruction::URem:
7639 case Instruction::AShr:
7640 case Instruction::Shl:
7641 case Instruction::Xor:
7645 case Instruction::And:
7646 case Instruction::Or:
7650 case Instruction::LShr:
7657 Ops.push_back(BO->LHS);
7658 Ops.push_back(BO->RHS);
7662 switch (
U->getOpcode()) {
7663 case Instruction::Trunc:
7664 case Instruction::ZExt:
7665 case Instruction::SExt:
7666 case Instruction::PtrToInt:
7667 Ops.push_back(
U->getOperand(0));
7670 case Instruction::BitCast:
7672 Ops.push_back(
U->getOperand(0));
7677 case Instruction::SDiv:
7678 case Instruction::SRem:
7679 Ops.push_back(
U->getOperand(0));
7680 Ops.push_back(
U->getOperand(1));
7683 case Instruction::GetElementPtr:
7685 "GEP source element type must be sized");
7689 case Instruction::IntToPtr:
7692 case Instruction::PHI:
7696 case Instruction::Select: {
7698 auto CanSimplifyToUnknown = [
this,
U]() {
7716 if (CanSimplifyToUnknown())
7723 case Instruction::Call:
7724 case Instruction::Invoke:
7731 switch (
II->getIntrinsicID()) {
7732 case Intrinsic::abs:
7733 Ops.push_back(
II->getArgOperand(0));
7735 case Intrinsic::umax:
7736 case Intrinsic::umin:
7737 case Intrinsic::smax:
7738 case Intrinsic::smin:
7739 case Intrinsic::usub_sat:
7740 case Intrinsic::uadd_sat:
7741 Ops.push_back(
II->getArgOperand(0));
7742 Ops.push_back(
II->getArgOperand(1));
7744 case Intrinsic::start_loop_iterations:
7745 case Intrinsic::annotation:
7746 case Intrinsic::ptr_annotation:
7747 Ops.push_back(
II->getArgOperand(0));
7759const SCEV *ScalarEvolution::createSCEV(
Value *V) {
7768 if (!DT.isReachableFromEntry(
I->getParent()))
7783 switch (BO->Opcode) {
7784 case Instruction::Add: {
7810 if (BO->Opcode == Instruction::Sub)
7818 if (BO->Opcode == Instruction::Sub)
7825 if (!NewBO || (NewBO->Opcode != Instruction::Add &&
7826 NewBO->Opcode != Instruction::Sub)) {
7836 case Instruction::Mul: {
7857 if (!NewBO || NewBO->Opcode != Instruction::Mul) {
7866 case Instruction::UDiv:
7870 case Instruction::URem:
7874 case Instruction::Sub: {
7877 Flags = getNoWrapFlagsFromUB(BO->
Op);
7882 case Instruction::And:
7888 if (CI->isMinusOne())
7890 const APInt &
A = CI->getValue();
7896 unsigned LZ =
A.countl_zero();
7897 unsigned TZ =
A.countr_zero();
7902 APInt EffectiveMask =
7904 if ((LZ != 0 || TZ != 0) && !((~
A & ~Known.
Zero) & EffectiveMask)) {
7907 const SCEV *ShiftedLHS =
nullptr;
7911 unsigned MulZeros = OpC->getAPInt().countr_zero();
7912 unsigned GCD = std::min(MulZeros, TZ);
7917 auto *NewMul =
getMulExpr(MulOps, LHSMul->getNoWrapFlags());
7939 case Instruction::Or:
7948 case Instruction::Xor:
7951 if (CI->isMinusOne())
7960 if (LBO->getOpcode() == Instruction::And &&
7961 LCI->getValue() == CI->getValue())
7962 if (
const SCEVZeroExtendExpr *Z =
7965 const SCEV *Z0 =
Z->getOperand();
7972 if (CI->getValue().isMask(Z0TySize))
7978 APInt Trunc = CI->getValue().trunc(Z0TySize);
7987 case Instruction::Shl:
8005 auto MulFlags = getNoWrapFlagsFromUB(BO->
Op);
8013 ConstantInt *
X = ConstantInt::get(
8019 case Instruction::AShr:
8041 const SCEV *AddTruncateExpr =
nullptr;
8042 ConstantInt *ShlAmtCI =
nullptr;
8043 const SCEV *AddConstant =
nullptr;
8045 if (L &&
L->getOpcode() == Instruction::Add) {
8053 if (LShift && LShift->
getOpcode() == Instruction::Shl) {
8060 APInt AddOperand = AddOperandCI->
getValue().
ashr(AShrAmt);
8068 }
else if (L &&
L->getOpcode() == Instruction::Shl) {
8073 const SCEV *ShlOp0SCEV =
getSCEV(
L->getOperand(0));
8078 if (AddTruncateExpr && ShlAmtCI) {
8090 const APInt &ShlAmt = ShlAmtCI->
getValue();
8094 const SCEV *CompositeExpr =
8096 if (
L->getOpcode() != Instruction::Shl)
8097 CompositeExpr =
getAddExpr(CompositeExpr, AddConstant);
8106 switch (
U->getOpcode()) {
8107 case Instruction::Trunc:
8110 case Instruction::ZExt:
8113 case Instruction::SExt:
8123 if (BO->Opcode == Instruction::Sub && BO->IsNSW) {
8124 Type *Ty =
U->getType();
8132 case Instruction::BitCast:
8138 case Instruction::PtrToInt: {
8141 Type *DstIntTy =
U->getType();
8149 case Instruction::IntToPtr:
8153 case Instruction::SDiv:
8160 case Instruction::SRem:
8167 case Instruction::GetElementPtr:
8170 case Instruction::PHI:
8173 case Instruction::Select:
8174 return createNodeForSelectOrPHI(U,
U->getOperand(0),
U->getOperand(1),
8177 case Instruction::Call:
8178 case Instruction::Invoke:
8183 switch (
II->getIntrinsicID()) {
8184 case Intrinsic::abs:
8188 case Intrinsic::umax:
8192 case Intrinsic::umin:
8196 case Intrinsic::smax:
8200 case Intrinsic::smin:
8204 case Intrinsic::usub_sat: {
8205 const SCEV *
X =
getSCEV(
II->getArgOperand(0));
8206 const SCEV *
Y =
getSCEV(
II->getArgOperand(1));
8210 case Intrinsic::uadd_sat: {
8211 const SCEV *
X =
getSCEV(
II->getArgOperand(0));
8212 const SCEV *
Y =
getSCEV(
II->getArgOperand(1));
8216 case Intrinsic::start_loop_iterations:
8217 case Intrinsic::annotation:
8218 case Intrinsic::ptr_annotation:
8222 case Intrinsic::vscale:
8242 auto *ExitCountType = ExitCount->
getType();
8243 assert(ExitCountType->isIntegerTy());
8245 1 + ExitCountType->getScalarSizeInBits());
8258 auto CanAddOneWithoutOverflow = [&]() {
8260 getRangeRef(ExitCount, RangeSignHint::HINT_RANGE_UNSIGNED);
8271 if (EvalSize > ExitCountSize && CanAddOneWithoutOverflow())
8301 assert(ExitingBlock &&
"Must pass a non-null exiting block!");
8302 assert(L->isLoopExiting(ExitingBlock) &&
8303 "Exiting block must actually branch out of the loop!");
8312 const auto *MaxExitCount =
8320 L->getExitingBlocks(ExitingBlocks);
8322 std::optional<unsigned> Res;
8323 for (
auto *ExitingBB : ExitingBlocks) {
8327 Res = std::gcd(*Res, Multiple);
8329 return Res.value_or(1);
8333 const SCEV *ExitCount) {
8363 assert(ExitingBlock &&
"Must pass a non-null exiting block!");
8364 assert(L->isLoopExiting(ExitingBlock) &&
8365 "Exiting block must actually branch out of the loop!");
8375 return getBackedgeTakenInfo(L).getExact(ExitingBlock,
this);
8377 return getBackedgeTakenInfo(L).getSymbolicMax(ExitingBlock,
this);
8379 return getBackedgeTakenInfo(L).getConstantMax(ExitingBlock,
this);
8389 return getPredicatedBackedgeTakenInfo(L).getExact(ExitingBlock,
this,
8392 return getPredicatedBackedgeTakenInfo(L).getSymbolicMax(ExitingBlock,
this,
8395 return getPredicatedBackedgeTakenInfo(L).getConstantMax(ExitingBlock,
this,
8403 return getPredicatedBackedgeTakenInfo(L).getExact(L,
this, &Preds);
8410 return getBackedgeTakenInfo(L).getExact(L,
this);
8412 return getBackedgeTakenInfo(L).getConstantMax(
this);
8414 return getBackedgeTakenInfo(L).getSymbolicMax(L,
this);
8421 return getPredicatedBackedgeTakenInfo(L).getSymbolicMax(L,
this, &Preds);
8426 return getPredicatedBackedgeTakenInfo(L).getConstantMax(
this, &Preds);
8430 return getBackedgeTakenInfo(L).isConstantMaxOrZero(
this);
8440 for (
PHINode &PN : Header->phis())
8441 if (Visited.
insert(&PN).second)
8445ScalarEvolution::BackedgeTakenInfo &
8446ScalarEvolution::getPredicatedBackedgeTakenInfo(
const Loop *L) {
8447 auto &BTI = getBackedgeTakenInfo(L);
8448 if (BTI.hasFullInfo())
8451 auto Pair = PredicatedBackedgeTakenCounts.try_emplace(L);
8454 return Pair.first->second;
8456 BackedgeTakenInfo
Result =
8457 computeBackedgeTakenCount(L,
true);
8459 return PredicatedBackedgeTakenCounts.find(L)->second = std::move(Result);
8462ScalarEvolution::BackedgeTakenInfo &
8463ScalarEvolution::getBackedgeTakenInfo(
const Loop *L) {
8469 std::pair<DenseMap<const Loop *, BackedgeTakenInfo>::iterator,
bool> Pair =
8470 BackedgeTakenCounts.try_emplace(L);
8472 return Pair.first->second;
8477 BackedgeTakenInfo
Result = computeBackedgeTakenCount(L);
8484 if (
Result.hasAnyInfo()) {
8487 auto LoopUsersIt = LoopUsers.find(L);
8488 if (LoopUsersIt != LoopUsers.end())
8490 forgetMemoizedResults(ToForget);
8493 for (PHINode &PN :
L->getHeader()->phis())
8494 ConstantEvolutionLoopExitValue.erase(&PN);
8502 return BackedgeTakenCounts.find(L)->second = std::move(Result);
8511 BackedgeTakenCounts.clear();
8512 PredicatedBackedgeTakenCounts.clear();
8513 BECountUsers.clear();
8514 LoopPropertiesCache.clear();
8515 ConstantEvolutionLoopExitValue.clear();
8516 ValueExprMap.clear();
8517 ValuesAtScopes.clear();
8518 ValuesAtScopesUsers.clear();
8519 LoopDispositions.clear();
8520 BlockDispositions.clear();
8521 UnsignedRanges.clear();
8522 SignedRanges.clear();
8523 ExprValueMap.clear();
8525 ConstantMultipleCache.clear();
8526 PredicatedSCEVRewrites.clear();
8528 FoldCacheUser.clear();
8530void ScalarEvolution::visitAndClearUsers(
8534 while (!Worklist.
empty()) {
8541 if (It != ValueExprMap.
end()) {
8542 eraseValueFromMap(It->first);
8545 ConstantEvolutionLoopExitValue.erase(PN);
8559 while (!LoopWorklist.
empty()) {
8563 forgetBackedgeTakenCounts(CurrL,
false);
8564 forgetBackedgeTakenCounts(CurrL,
true);
8567 for (
auto I = PredicatedSCEVRewrites.begin();
8568 I != PredicatedSCEVRewrites.end();) {
8569 std::pair<const SCEV *, const Loop *> Entry =
I->first;
8570 if (Entry.second == CurrL)
8571 PredicatedSCEVRewrites.erase(
I++);
8576 auto LoopUsersItr = LoopUsers.find(CurrL);
8577 if (LoopUsersItr != LoopUsers.end())
8582 visitAndClearUsers(Worklist, Visited, ToForget);
8584 LoopPropertiesCache.erase(CurrL);
8587 LoopWorklist.
append(CurrL->begin(), CurrL->end());
8589 forgetMemoizedResults(ToForget);
8606 visitAndClearUsers(Worklist, Visited, ToForget);
8608 forgetMemoizedResults(ToForget);
8620 struct InvalidationRootCollector {
8624 InvalidationRootCollector(
Loop *L) : L(L) {}
8626 bool follow(
const SCEV *S) {
8632 if (L->contains(AddRec->
getLoop()))
8637 bool isDone()
const {
return false; }
8640 InvalidationRootCollector
C(L);
8642 forgetMemoizedResults(
C.Roots);
8655 BlockDispositions.clear();
8656 LoopDispositions.clear();
8673 while (!Worklist.
empty()) {
8675 bool LoopDispoRemoved = LoopDispositions.erase(Curr);
8676 bool BlockDispoRemoved = BlockDispositions.erase(Curr);
8677 if (!LoopDispoRemoved && !BlockDispoRemoved)
8679 auto Users = SCEVUsers.find(Curr);
8680 if (
Users != SCEVUsers.end())
8693const SCEV *ScalarEvolution::BackedgeTakenInfo::getExact(
8697 if (!isComplete() || ExitNotTaken.
empty())
8708 for (
const auto &ENT : ExitNotTaken) {
8709 const SCEV *BECount = ENT.ExactNotTaken;
8712 "We should only have known counts for exiting blocks that dominate "
8715 Ops.push_back(BECount);
8720 assert((Preds || ENT.hasAlwaysTruePredicate()) &&
8721 "Predicate should be always true!");
8730const ScalarEvolution::ExitNotTakenInfo *
8731ScalarEvolution::BackedgeTakenInfo::getExitNotTaken(
8732 const BasicBlock *ExitingBlock,
8733 SmallVectorImpl<const SCEVPredicate *> *Predicates)
const {
8734 for (
const auto &ENT : ExitNotTaken)
8735 if (ENT.ExitingBlock == ExitingBlock) {
8736 if (ENT.hasAlwaysTruePredicate())
8738 else if (Predicates) {
8748const SCEV *ScalarEvolution::BackedgeTakenInfo::getConstantMax(
8750 SmallVectorImpl<const SCEVPredicate *> *Predicates)
const {
8751 if (!getConstantMax())
8754 for (
const auto &ENT : ExitNotTaken)
8755 if (!ENT.hasAlwaysTruePredicate()) {
8763 "No point in having a non-constant max backedge taken count!");
8764 return getConstantMax();
8767const SCEV *ScalarEvolution::BackedgeTakenInfo::getSymbolicMax(
8769 SmallVectorImpl<const SCEVPredicate *> *Predicates) {
8777 for (
const auto &ENT : ExitNotTaken) {
8778 const SCEV *ExitCount = ENT.SymbolicMaxNotTaken;
8781 "We should only have known counts for exiting blocks that "
8787 assert((Predicates || ENT.hasAlwaysTruePredicate()) &&
8788 "Predicate should be always true!");
8791 if (ExitCounts.
empty())
8800bool ScalarEvolution::BackedgeTakenInfo::isConstantMaxOrZero(
8802 auto PredicateNotAlwaysTrue = [](
const ExitNotTakenInfo &ENT) {
8803 return !ENT.hasAlwaysTruePredicate();
8805 return MaxOrZero && !
any_of(ExitNotTaken, PredicateNotAlwaysTrue);
8821 this->ExactNotTaken = E = ConstantMaxNotTaken;
8822 this->SymbolicMaxNotTaken = SymbolicMaxNotTaken = ConstantMaxNotTaken;
8827 "Exact is not allowed to be less precise than Constant Max");
8830 "Exact is not allowed to be less precise than Symbolic Max");
8833 "Symbolic Max is not allowed to be less precise than Constant Max");
8836 "No point in having a non-constant max backedge taken count!");
8838 for (
const auto PredList : PredLists)
8839 for (
const auto *
P : PredList) {
8847 "Backedge count should be int");
8850 "Max backedge count should be int");
8863ScalarEvolution::BackedgeTakenInfo::BackedgeTakenInfo(
8865 bool IsComplete,
const SCEV *ConstantMax,
bool MaxOrZero)
8866 : ConstantMax(ConstantMax), IsComplete(IsComplete), MaxOrZero(MaxOrZero) {
8867 using EdgeExitInfo = ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo;
8869 ExitNotTaken.reserve(ExitCounts.
size());
8870 std::transform(ExitCounts.
begin(), ExitCounts.
end(),
8871 std::back_inserter(ExitNotTaken),
8872 [&](
const EdgeExitInfo &EEI) {
8873 BasicBlock *ExitBB = EEI.first;
8874 const ExitLimit &EL = EEI.second;
8875 return ExitNotTakenInfo(ExitBB, EL.ExactNotTaken,
8876 EL.ConstantMaxNotTaken, EL.SymbolicMaxNotTaken,
8881 "No point in having a non-constant max backedge taken count!");
8885ScalarEvolution::BackedgeTakenInfo
8886ScalarEvolution::computeBackedgeTakenCount(
const Loop *L,
8887 bool AllowPredicates) {
8889 L->getExitingBlocks(ExitingBlocks);
8891 using EdgeExitInfo = ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo;
8894 bool CouldComputeBECount =
true;
8896 const SCEV *MustExitMaxBECount =
nullptr;
8897 const SCEV *MayExitMaxBECount =
nullptr;
8898 bool MustExitMaxOrZero =
false;
8899 bool IsOnlyExit = ExitingBlocks.
size() == 1;
8911 if (ExitIfTrue == CI->
isZero())
8915 ExitLimit EL = computeExitLimit(L, ExitBB, IsOnlyExit, AllowPredicates);
8917 assert((AllowPredicates || EL.Predicates.empty()) &&
8918 "Predicated exit limit when predicates are not allowed!");
8923 ++NumExitCountsComputed;
8927 CouldComputeBECount =
false;
8934 "Exact is known but symbolic isn't?");
8935 ++NumExitCountsNotComputed;
8950 DT.dominates(ExitBB, Latch)) {
8951 if (!MustExitMaxBECount) {
8952 MustExitMaxBECount = EL.ConstantMaxNotTaken;
8953 MustExitMaxOrZero = EL.MaxOrZero;
8956 EL.ConstantMaxNotTaken);
8960 MayExitMaxBECount = EL.ConstantMaxNotTaken;
8963 EL.ConstantMaxNotTaken);
8967 const SCEV *MaxBECount = MustExitMaxBECount ? MustExitMaxBECount :
8971 bool MaxOrZero = (MustExitMaxOrZero && ExitingBlocks.size() == 1);
8977 for (
const auto &Pair : ExitCounts) {
8979 BECountUsers[Pair.second.ExactNotTaken].insert({
L, AllowPredicates});
8981 BECountUsers[Pair.second.SymbolicMaxNotTaken].insert(
8982 {
L, AllowPredicates});
8984 return BackedgeTakenInfo(std::move(ExitCounts), CouldComputeBECount,
8985 MaxBECount, MaxOrZero);
8988ScalarEvolution::ExitLimit
8989ScalarEvolution::computeExitLimit(
const Loop *L, BasicBlock *ExitingBlock,
8990 bool IsOnlyExit,
bool AllowPredicates) {
8991 assert(
L->contains(ExitingBlock) &&
"Exit count for non-loop block?");
8995 if (!Latch || !DT.dominates(ExitingBlock, Latch))
9003 "It should have one successor in loop and one exit block!");
9014 if (!
L->contains(SBB)) {
9019 assert(Exit &&
"Exiting block must have at least one exit");
9020 return computeExitLimitFromSingleExitSwitch(
9021 L, SI, Exit, IsOnlyExit);
9028 const Loop *L,
Value *ExitCond,
bool ExitIfTrue,
bool ControlsOnlyExit,
9029 bool AllowPredicates) {
9030 ScalarEvolution::ExitLimitCacheTy Cache(L, ExitIfTrue, AllowPredicates);
9031 return computeExitLimitFromCondCached(Cache, L, ExitCond, ExitIfTrue,
9032 ControlsOnlyExit, AllowPredicates);
9035std::optional<ScalarEvolution::ExitLimit>
9036ScalarEvolution::ExitLimitCache::find(
const Loop *L,
Value *ExitCond,
9037 bool ExitIfTrue,
bool ControlsOnlyExit,
9038 bool AllowPredicates) {
9040 (void)this->ExitIfTrue;
9041 (void)this->AllowPredicates;
9043 assert(this->L == L && this->ExitIfTrue == ExitIfTrue &&
9044 this->AllowPredicates == AllowPredicates &&
9045 "Variance in assumed invariant key components!");
9046 auto Itr = TripCountMap.find({ExitCond, ControlsOnlyExit});
9047 if (Itr == TripCountMap.end())
9048 return std::nullopt;
9052void ScalarEvolution::ExitLimitCache::insert(
const Loop *L,
Value *ExitCond,
9054 bool ControlsOnlyExit,
9055 bool AllowPredicates,
9057 assert(this->L == L && this->ExitIfTrue == ExitIfTrue &&
9058 this->AllowPredicates == AllowPredicates &&
9059 "Variance in assumed invariant key components!");
9061 auto InsertResult = TripCountMap.insert({{ExitCond, ControlsOnlyExit}, EL});
9062 assert(InsertResult.second &&
"Expected successful insertion!");
9067ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCondCached(
9068 ExitLimitCacheTy &Cache,
const Loop *L,
Value *ExitCond,
bool ExitIfTrue,
9069 bool ControlsOnlyExit,
bool AllowPredicates) {
9071 if (
auto MaybeEL = Cache.find(L, ExitCond, ExitIfTrue, ControlsOnlyExit,
9075 ExitLimit EL = computeExitLimitFromCondImpl(
9076 Cache, L, ExitCond, ExitIfTrue, ControlsOnlyExit, AllowPredicates);
9077 Cache.insert(L, ExitCond, ExitIfTrue, ControlsOnlyExit, AllowPredicates, EL);
9081ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCondImpl(
9082 ExitLimitCacheTy &Cache,
const Loop *L,
Value *ExitCond,
bool ExitIfTrue,
9083 bool ControlsOnlyExit,
bool AllowPredicates) {
9085 if (
auto LimitFromBinOp = computeExitLimitFromCondFromBinOp(
9086 Cache, L, ExitCond, ExitIfTrue, ControlsOnlyExit, AllowPredicates))
9087 return *LimitFromBinOp;
9093 computeExitLimitFromICmp(L, ExitCondICmp, ExitIfTrue, ControlsOnlyExit);
9094 if (EL.hasFullInfo() || !AllowPredicates)
9098 return computeExitLimitFromICmp(L, ExitCondICmp, ExitIfTrue,
9118 const WithOverflowInst *WO;
9133 auto EL = computeExitLimitFromICmp(L, Pred,
LHS,
getConstant(NewRHSC),
9134 ControlsOnlyExit, AllowPredicates);
9135 if (EL.hasAnyInfo())
9140 return computeExitCountExhaustively(L, ExitCond, ExitIfTrue);
9143std::optional<ScalarEvolution::ExitLimit>
9144ScalarEvolution::computeExitLimitFromCondFromBinOp(
9145 ExitLimitCacheTy &Cache,
const Loop *L,
Value *ExitCond,
bool ExitIfTrue,
9146 bool ControlsOnlyExit,
bool AllowPredicates) {
9155 return std::nullopt;
9160 bool EitherMayExit = IsAnd ^ ExitIfTrue;
9161 ExitLimit EL0 = computeExitLimitFromCondCached(
9162 Cache, L, Op0, ExitIfTrue, ControlsOnlyExit && !EitherMayExit,
9164 ExitLimit EL1 = computeExitLimitFromCondCached(
9165 Cache, L, Op1, ExitIfTrue, ControlsOnlyExit && !EitherMayExit,
9169 const Constant *NeutralElement = ConstantInt::get(ExitCond->
getType(), IsAnd);
9171 return Op1 == NeutralElement ? EL0 : EL1;
9173 return Op0 == NeutralElement ? EL1 : EL0;
9178 if (EitherMayExit) {
9188 ConstantMaxBECount = EL1.ConstantMaxNotTaken;
9190 ConstantMaxBECount = EL0.ConstantMaxNotTaken;
9193 EL1.ConstantMaxNotTaken);
9195 SymbolicMaxBECount = EL1.SymbolicMaxNotTaken;
9197 SymbolicMaxBECount = EL0.SymbolicMaxNotTaken;
9200 EL0.SymbolicMaxNotTaken, EL1.SymbolicMaxNotTaken, UseSequentialUMin);
9204 if (EL0.ExactNotTaken == EL1.ExactNotTaken)
9205 BECount = EL0.ExactNotTaken;
9218 SymbolicMaxBECount =
9220 return ExitLimit(BECount, ConstantMaxBECount, SymbolicMaxBECount,
false,
9224ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromICmp(
9225 const Loop *L, ICmpInst *ExitCond,
bool ExitIfTrue,
bool ControlsOnlyExit,
9226 bool AllowPredicates) {
9238 ExitLimit EL = computeExitLimitFromICmp(L, Pred,
LHS,
RHS, ControlsOnlyExit,
9240 if (EL.hasAnyInfo())
9243 auto *ExhaustiveCount =
9244 computeExitCountExhaustively(L, ExitCond, ExitIfTrue);
9247 return ExhaustiveCount;
9249 return computeShiftCompareExitLimit(ExitCond->
getOperand(0),
9252ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromICmp(
9253 const Loop *L, CmpPredicate Pred,
const SCEV *
LHS,
const SCEV *
RHS,
9254 bool ControlsOnlyExit,
bool AllowPredicates) {
9279 ConstantRange CompRange =
9295 auto *InnerLHS =
LHS;
9297 InnerLHS = ZExt->getOperand();
9344 if (EL.hasAnyInfo())
9361 if (EL.hasAnyInfo())
return EL;
9393 ExitLimit EL = howManyLessThans(
LHS,
RHS, L, IsSigned, ControlsOnlyExit,
9395 if (EL.hasAnyInfo())
9411 ExitLimit EL = howManyGreaterThans(
LHS,
RHS, L, IsSigned, ControlsOnlyExit,
9413 if (EL.hasAnyInfo())
9424ScalarEvolution::ExitLimit
9425ScalarEvolution::computeExitLimitFromSingleExitSwitch(
const Loop *L,
9427 BasicBlock *ExitingBlock,
9428 bool ControlsOnlyExit) {
9429 assert(!
L->contains(ExitingBlock) &&
"Not an exiting block!");
9432 if (
Switch->getDefaultDest() == ExitingBlock)
9436 "Default case must not exit the loop!");
9442 if (EL.hasAnyInfo())
9454 "Evaluation of SCEV at constant didn't fold correctly?");
9458ScalarEvolution::ExitLimit ScalarEvolution::computeShiftCompareExitLimit(
9468 const BasicBlock *Predecessor =
L->getLoopPredecessor();
9474 auto MatchPositiveShift =
9477 using namespace PatternMatch;
9479 ConstantInt *ShiftAmt;
9481 OutOpCode = Instruction::LShr;
9483 OutOpCode = Instruction::AShr;
9485 OutOpCode = Instruction::Shl;
9500 auto MatchShiftRecurrence =
9502 std::optional<Instruction::BinaryOps> PostShiftOpCode;
9517 if (MatchPositiveShift(
LHS, V, OpC)) {
9518 PostShiftOpCode = OpC;
9524 if (!PNOut || PNOut->getParent() !=
L->getHeader())
9527 Value *BEValue = PNOut->getIncomingValueForBlock(Latch);
9533 MatchPositiveShift(BEValue, OpLHS, OpCodeOut) &&
9540 (!PostShiftOpCode || *PostShiftOpCode == OpCodeOut);
9545 if (!MatchShiftRecurrence(
LHS, PN, OpCode))
9557 ConstantInt *StableValue =
nullptr;
9562 case Instruction::AShr: {
9570 StableValue = ConstantInt::get(Ty, 0);
9572 StableValue = ConstantInt::get(Ty, -1,
true);
9578 case Instruction::LShr:
9579 case Instruction::Shl:
9589 "Otherwise cannot be an operand to a branch instruction");
9591 if (
Result->isZeroValue()) {
9593 const SCEV *UpperBound =
9610 if (
const Function *
F = CI->getCalledFunction())
9619 if (!L->contains(
I))
return false;
9624 return L->getHeader() ==
I->getParent();
9700 if (!
I)
return nullptr;
9713 std::vector<Constant*> Operands(
I->getNumOperands());
9715 for (
unsigned i = 0, e =
I->getNumOperands(); i != e; ++i) {
9719 if (!Operands[i])
return nullptr;
9724 if (!
C)
return nullptr;
9746 if (IncomingVal != CurrentVal) {
9749 IncomingVal = CurrentVal;
9761ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN,
9764 auto [
I,
Inserted] = ConstantEvolutionLoopExitValue.try_emplace(PN);
9773 DenseMap<Instruction *, Constant *> CurrentIterVals;
9775 assert(PN->
getParent() == Header &&
"Can't evaluate PHI not in loop header!");
9781 for (PHINode &
PHI : Header->phis()) {
9783 CurrentIterVals[&
PHI] = StartCST;
9785 if (!CurrentIterVals.
count(PN))
9786 return RetVal =
nullptr;
9792 "BEs is <= MaxBruteForceIterations which is an 'unsigned'!");
9795 unsigned IterationNum = 0;
9797 for (; ; ++IterationNum) {
9798 if (IterationNum == NumIterations)
9799 return RetVal = CurrentIterVals[PN];
9803 DenseMap<Instruction *, Constant *> NextIterVals;
9808 NextIterVals[PN] = NextPHI;
9810 bool StoppedEvolving = NextPHI == CurrentIterVals[PN];
9816 for (
const auto &
I : CurrentIterVals) {
9818 if (!
PHI ||
PHI == PN ||
PHI->getParent() != Header)
continue;
9823 for (
const auto &
I : PHIsToCompute) {
9824 PHINode *
PHI =
I.first;
9827 Value *BEValue =
PHI->getIncomingValueForBlock(Latch);
9830 if (NextPHI !=
I.second)
9831 StoppedEvolving =
false;
9836 if (StoppedEvolving)
9837 return RetVal = CurrentIterVals[PN];
9839 CurrentIterVals.swap(NextIterVals);
9843const SCEV *ScalarEvolution::computeExitCountExhaustively(
const Loop *L,
9853 DenseMap<Instruction *, Constant *> CurrentIterVals;
9855 assert(PN->
getParent() == Header &&
"Can't evaluate PHI not in loop header!");
9858 assert(Latch &&
"Should follow from NumIncomingValues == 2!");
9860 for (PHINode &
PHI : Header->phis()) {
9862 CurrentIterVals[&
PHI] = StartCST;
9864 if (!CurrentIterVals.
count(PN))
9872 for (
unsigned IterationNum = 0; IterationNum != MaxIterations;++IterationNum){
9879 if (CondVal->getValue() == uint64_t(ExitWhen)) {
9880 ++NumBruteForceTripCountsComputed;
9885 DenseMap<Instruction *, Constant *> NextIterVals;
9891 for (
const auto &
I : CurrentIterVals) {
9893 if (!
PHI ||
PHI->getParent() != Header)
continue;
9896 for (PHINode *
PHI : PHIsToCompute) {
9898 if (NextPHI)
continue;
9900 Value *BEValue =
PHI->getIncomingValueForBlock(Latch);
9903 CurrentIterVals.
swap(NextIterVals);
9914 for (
auto &LS : Values)
9916 return LS.second ? LS.second : V;
9921 const SCEV *
C = computeSCEVAtScope(V, L);
9922 for (
auto &LS :
reverse(ValuesAtScopes[V]))
9923 if (LS.first == L) {
9926 ValuesAtScopesUsers[
C].push_back({L, V});
9937 switch (V->getSCEVType()) {
9970 assert(!
C->getType()->isPointerTy() &&
9971 "Can only have one pointer, and it must be last");
9998ScalarEvolution::getWithOperands(
const SCEV *S,
9999 SmallVectorImpl<const SCEV *> &NewOps) {
10033const SCEV *ScalarEvolution::computeSCEVAtScope(
const SCEV *V,
const Loop *L) {
10034 switch (
V->getSCEVType()) {
10045 for (
unsigned i = 0, e = AddRec->
getNumOperands(); i != e; ++i) {
10056 for (++i; i !=
e; ++i)
10100 for (
unsigned i = 0, e =
Ops.size(); i != e; ++i) {
10102 if (OpAtScope !=
Ops[i]) {
10110 for (++i; i !=
e; ++i) {
10115 return getWithOperands(V, NewOps);
10130 const Loop *CurrLoop = this->LI[
I->getParent()];
10141 if (BackedgeTakenCount->
isZero()) {
10142 Value *InitValue =
nullptr;
10143 bool MultipleInitValues =
false;
10149 MultipleInitValues =
true;
10154 if (!MultipleInitValues && InitValue)
10163 unsigned InLoopPred =
10174 getConstantEvolutionLoopExitValue(PN, BTCC->getAPInt(), CurrLoop);
10189 Operands.
reserve(
I->getNumOperands());
10190 bool MadeImprovement =
false;
10205 MadeImprovement |= OrigV != OpV;
10210 assert(
C->getType() ==
Op->getType() &&
"Type mismatch");
10215 if (!MadeImprovement)
10236const SCEV *ScalarEvolution::stripInjectiveFunctions(
const SCEV *S)
const {
10238 return stripInjectiveFunctions(ZExt->getOperand());
10240 return stripInjectiveFunctions(SExt->getOperand());
10258 assert(
A != 0 &&
"A must be non-zero.");
10274 if (MinTZ < Mult2 && L->getLoopPredecessor())
10276 if (MinTZ < Mult2) {
10299 APInt AD =
A.lshr(Mult2).trunc(BW - Mult2);
10319static std::optional<std::tuple<APInt, APInt, APInt, APInt, unsigned>>
10325 LLVM_DEBUG(
dbgs() << __func__ <<
": analyzing quadratic addrec: "
10326 << *AddRec <<
'\n');
10329 if (!LC || !MC || !
NC) {
10330 LLVM_DEBUG(
dbgs() << __func__ <<
": coefficients are not constant\n");
10331 return std::nullopt;
10337 assert(!
N.isZero() &&
"This is not a quadratic addrec");
10345 N =
N.sext(NewWidth);
10346 M = M.sext(NewWidth);
10347 L = L.sext(NewWidth);
10364 <<
"x + " <<
C <<
", coeff bw: " << NewWidth
10365 <<
", multiplied by " <<
T <<
'\n');
10374 std::optional<APInt>
Y) {
10376 unsigned W = std::max(
X->getBitWidth(),
Y->getBitWidth());
10379 return XW.
slt(YW) ? *
X : *
Y;
10382 return std::nullopt;
10383 return X ? *
X : *
Y;
10400 return std::nullopt;
10401 unsigned W =
X->getBitWidth();
10421static std::optional<APInt>
10427 return std::nullopt;
10430 LLVM_DEBUG(
dbgs() << __func__ <<
": solving for unsigned overflow\n");
10431 std::optional<APInt>
X =
10434 return std::nullopt;
10439 return std::nullopt;
10454static std::optional<APInt>
10458 "Starting value of addrec should be 0");
10459 LLVM_DEBUG(
dbgs() << __func__ <<
": solving boundary crossing for range "
10460 <<
Range <<
", addrec " << *AddRec <<
'\n');
10464 "Addrec's initial value should be in range");
10470 return std::nullopt;
10480 auto SolveForBoundary =
10481 [&](
APInt Bound) -> std::pair<std::optional<APInt>,
bool> {
10484 LLVM_DEBUG(
dbgs() <<
"SolveQuadraticAddRecRange: checking boundary "
10485 << Bound <<
" (before multiplying by " << M <<
")\n");
10488 std::optional<APInt> SO;
10491 "signed overflow\n");
10495 "unsigned overflow\n");
10496 std::optional<APInt> UO =
10499 auto LeavesRange = [&] (
const APInt &
X) {
10516 return {std::nullopt,
false};
10521 if (LeavesRange(*Min))
10522 return { Min,
true };
10523 std::optional<APInt> Max = Min == SO ? UO : SO;
10524 if (LeavesRange(*Max))
10525 return { Max,
true };
10528 return {std::nullopt,
true};
10535 auto SL = SolveForBoundary(
Lower);
10536 auto SU = SolveForBoundary(
Upper);
10539 if (!SL.second || !SU.second)
10540 return std::nullopt;
10583ScalarEvolution::ExitLimit ScalarEvolution::howFarToZero(
const SCEV *V,
10585 bool ControlsOnlyExit,
10586 bool AllowPredicates) {
10597 if (
C->getValue()->isZero())
return C;
10601 const SCEVAddRecExpr *AddRec =
10604 if (!AddRec && AllowPredicates)
10610 if (!AddRec || AddRec->
getLoop() != L)
10621 return ExitLimit(R, R, R,
false, Predicates);
10679 const SCEV *DistancePlusOne =
getAddExpr(Distance, One);
10705 const SCEV *
Exact =
10713 const SCEV *SymbolicMax =
10715 return ExitLimit(
Exact, ConstantMax, SymbolicMax,
false, Predicates);
10724 AllowPredicates ? &Predicates :
nullptr, *
this, L);
10732 return ExitLimit(
E, M, S,
false, Predicates);
10735ScalarEvolution::ExitLimit
10736ScalarEvolution::howFarToNonZero(
const SCEV *V,
const Loop *L) {
10744 if (!
C->getValue()->isZero())
10754std::pair<const BasicBlock *, const BasicBlock *>
10755ScalarEvolution::getPredecessorWithUniqueSuccessorForBB(
const BasicBlock *BB)
10766 if (
const Loop *L = LI.getLoopFor(BB))
10767 return {
L->getLoopPredecessor(),
L->getHeader()};
10769 return {
nullptr, BB};
10778 if (
A ==
B)
return true;
10793 if (ComputesEqualValues(AI, BI))
10801 const SCEV *Op0, *Op1;
10820 auto TrivialCase = [&](
bool TriviallyTrue) {
10829 const SCEV *NewLHS, *NewRHS;
10853 return TrivialCase(
false);
10854 return TrivialCase(
true);
10877 const APInt &
RA = RC->getAPInt();
10879 bool SimplifiedByConstantRange =
false;
10884 return TrivialCase(
true);
10886 return TrivialCase(
false);
10895 Changed = SimplifiedByConstantRange =
true;
10899 if (!SimplifiedByConstantRange) {
10916 assert(!
RA.isMinValue() &&
"Should have been caught earlier!");
10922 assert(!
RA.isMaxValue() &&
"Should have been caught earlier!");
10928 assert(!
RA.isMinSignedValue() &&
"Should have been caught earlier!");
10934 assert(!
RA.isMaxSignedValue() &&
"Should have been caught earlier!");
10946 return TrivialCase(
true);
10948 return TrivialCase(
false);
11053 auto NonRecursive = [
this, OrNegative](
const SCEV *S) {
11055 return C->getAPInt().isPowerOf2() ||
11056 (OrNegative &&
C->getAPInt().isNegatedPowerOf2());
11059 return isa<SCEVVScale>(S) && F.hasFnAttribute(Attribute::VScaleRange);
11062 if (NonRecursive(S))
11088 APInt C = Cst->getAPInt();
11089 return C.urem(M) == 0;
11097 const SCEV *SmodM =
11112 for (
auto *
A : Assumptions)
11113 if (
A->implies(
P, *
this))
11126std::pair<const SCEV *, const SCEV *>
11129 const SCEV *Start = SCEVInitRewriter::rewrite(S, L, *
this);
11131 return { Start, Start };
11133 const SCEV *
PostInc = SCEVPostIncRewriter::rewrite(S, L, *
this);
11142 getUsedLoops(LHS, LoopsUsed);
11143 getUsedLoops(RHS, LoopsUsed);
11145 if (LoopsUsed.
empty())
11150 for (
const auto *L1 : LoopsUsed)
11151 for (
const auto *L2 : LoopsUsed)
11152 assert((DT.dominates(L1->getHeader(), L2->getHeader()) ||
11153 DT.dominates(L2->getHeader(), L1->getHeader())) &&
11154 "Domination relationship is not a linear order");
11184 SplitRHS.second) &&
11196 if (isKnownPredicateViaSplitting(Pred, LHS, RHS))
11200 return isKnownViaNonRecursiveReasoning(Pred, LHS, RHS);
11210 return std::nullopt;
11225 if (KnownWithoutContext)
11226 return KnownWithoutContext;
11233 return std::nullopt;
11239 const Loop *L = LHS->getLoop();
11244std::optional<ScalarEvolution::MonotonicPredicateType>
11247 auto Result = getMonotonicPredicateTypeImpl(LHS, Pred);
11253 auto ResultSwapped =
11256 assert(*ResultSwapped != *Result &&
11257 "monotonicity should flip as we flip the predicate");
11264std::optional<ScalarEvolution::MonotonicPredicateType>
11265ScalarEvolution::getMonotonicPredicateTypeImpl(
const SCEVAddRecExpr *LHS,
11279 return std::nullopt;
11283 "Should be greater or less!");
11287 if (!LHS->hasNoUnsignedWrap())
11288 return std::nullopt;
11292 "Relational predicate is either signed or unsigned!");
11293 if (!
LHS->hasNoSignedWrap())
11294 return std::nullopt;
11296 const SCEV *Step =
LHS->getStepRecurrence(*
this);
11304 return std::nullopt;
11307std::optional<ScalarEvolution::LoopInvariantPredicate>
11314 return std::nullopt;
11321 if (!ArLHS || ArLHS->
getLoop() != L)
11322 return std::nullopt;
11326 return std::nullopt;
11352 return std::nullopt;
11389 return std::nullopt;
11392std::optional<ScalarEvolution::LoopInvariantPredicate>
11397 Pred, LHS, RHS, L, CtxI, MaxIter))
11405 for (
auto *
Op :
UMin->operands())
11407 Pred, LHS, RHS, L, CtxI,
Op))
11409 return std::nullopt;
11412std::optional<ScalarEvolution::LoopInvariantPredicate>
11427 return std::nullopt;
11434 if (!AR || AR->
getLoop() != L)
11435 return std::nullopt;
11439 return std::nullopt;
11445 if (Step != One && Step != MinusOne)
11446 return std::nullopt;
11452 return std::nullopt;
11458 return std::nullopt;
11466 if (Step == MinusOne)
11470 return std::nullopt;
11476bool ScalarEvolution::isKnownPredicateViaConstantRanges(
CmpPredicate Pred,
11482 auto CheckRange = [&](
bool IsSigned) {
11485 return RangeLHS.
icmp(Pred, RangeRHS);
11494 if (CheckRange(
true) || CheckRange(
false))
11503bool ScalarEvolution::isKnownPredicateViaNoOverflow(CmpPredicate Pred,
11510 auto MatchBinaryAddToConst = [
this](
const SCEV *
X,
const SCEV *
Y,
11511 APInt &OutC1, APInt &OutC2,
11513 const SCEV *XNonConstOp, *XConstOp;
11514 const SCEV *YNonConstOp, *YConstOp;
11518 if (!splitBinaryAdd(
X, XConstOp, XNonConstOp, XFlagsPresent)) {
11521 XFlagsPresent = ExpectedFlags;
11526 if (!splitBinaryAdd(
Y, YConstOp, YNonConstOp, YFlagsPresent)) {
11529 YFlagsPresent = ExpectedFlags;
11532 if (YNonConstOp != XNonConstOp)
11540 if ((YFlagsPresent & ExpectedFlags) != ExpectedFlags)
11543 (XFlagsPresent & ExpectedFlags) != ExpectedFlags) {
11603bool ScalarEvolution::isKnownPredicateViaSplitting(CmpPredicate Pred,
11625bool ScalarEvolution::isImpliedViaGuard(
const BasicBlock *BB, CmpPredicate Pred,
11626 const SCEV *
LHS,
const SCEV *
RHS) {
11631 return any_of(*BB, [&](
const Instruction &
I) {
11632 using namespace llvm::PatternMatch;
11637 isImpliedCond(Pred,
LHS,
RHS, Condition,
false);
11651 if (!L || !DT.isReachableFromEntry(L->getHeader()))
11656 "This cannot be done on broken IR!");
11659 if (isKnownViaNonRecursiveReasoning(Pred, LHS, RHS))
11668 if (LoopContinuePredicate && LoopContinuePredicate->
isConditional() &&
11669 isImpliedCond(Pred, LHS, RHS,
11671 LoopContinuePredicate->
getSuccessor(0) != L->getHeader()))
11676 if (WalkingBEDominatingConds)
11682 const auto &BETakenInfo = getBackedgeTakenInfo(L);
11683 const SCEV *LatchBECount = BETakenInfo.getExact(Latch,
this);
11690 const SCEV *LoopCounter =
11698 for (
auto &AssumeVH : AC.assumptions()) {
11705 if (isImpliedCond(Pred, LHS, RHS, CI->getArgOperand(0),
false))
11709 if (isImpliedViaGuard(Latch, Pred, LHS, RHS))
11712 for (
DomTreeNode *DTN = DT[Latch], *HeaderDTN = DT[L->getHeader()];
11713 DTN != HeaderDTN; DTN = DTN->getIDom()) {
11714 assert(DTN &&
"should reach the loop header before reaching the root!");
11717 if (isImpliedViaGuard(BB, Pred, LHS, RHS))
11725 if (!ContinuePredicate || !ContinuePredicate->
isConditional())
11739 assert(DT.dominates(DominatingEdge, Latch) &&
"should be!");
11741 if (isImpliedCond(Pred, LHS, RHS, Condition,
11755 if (!DT.isReachableFromEntry(BB))
11759 "This cannot be done on broken IR!");
11767 const bool ProvingStrictComparison =
11769 bool ProvedNonStrictComparison =
false;
11770 bool ProvedNonEquality =
false;
11773 if (!ProvedNonStrictComparison)
11774 ProvedNonStrictComparison = Fn(NonStrictPredicate);
11775 if (!ProvedNonEquality)
11777 if (ProvedNonStrictComparison && ProvedNonEquality)
11782 if (ProvingStrictComparison) {
11784 return isKnownViaNonRecursiveReasoning(
P, LHS, RHS);
11786 if (SplitAndProve(ProofFn))
11791 auto ProveViaCond = [&](
const Value *Condition,
bool Inverse) {
11793 if (isImpliedCond(Pred, LHS, RHS, Condition,
Inverse, CtxI))
11795 if (ProvingStrictComparison) {
11797 return isImpliedCond(
P, LHS, RHS, Condition,
Inverse, CtxI);
11799 if (SplitAndProve(ProofFn))
11808 const Loop *ContainingLoop = LI.getLoopFor(BB);
11810 if (ContainingLoop && ContainingLoop->
getHeader() == BB)
11814 for (std::pair<const BasicBlock *, const BasicBlock *> Pair(PredBB, BB);
11815 Pair.first; Pair = getPredecessorWithUniqueSuccessorForBB(Pair.first)) {
11827 for (
auto &AssumeVH : AC.assumptions()) {
11831 if (!DT.dominates(CI, BB))
11834 if (ProveViaCond(CI->getArgOperand(0),
false))
11840 F.getParent(), Intrinsic::experimental_guard);
11842 for (
const auto *GU : GuardDecl->users())
11844 if (Guard->getFunction() == BB->
getParent() && DT.dominates(Guard, BB))
11845 if (ProveViaCond(Guard->getArgOperand(0),
false))
11860 "LHS is not available at Loop Entry");
11862 "RHS is not available at Loop Entry");
11864 if (isKnownViaNonRecursiveReasoning(Pred, LHS, RHS))
11875 if (FoundCondValue ==
11879 if (!PendingLoopPredicates.insert(FoundCondValue).second)
11883 make_scope_exit([&]() { PendingLoopPredicates.erase(FoundCondValue); });
11886 const Value *Op0, *Op1;
11889 return isImpliedCond(Pred,
LHS,
RHS, Op0,
Inverse, CtxI) ||
11893 return isImpliedCond(Pred,
LHS,
RHS, Op0, Inverse, CtxI) ||
11894 isImpliedCond(Pred,
LHS,
RHS, Op1, Inverse, CtxI);
11898 if (!ICI)
return false;
11902 CmpPredicate FoundPred;
11911 return isImpliedCond(Pred,
LHS,
RHS, FoundPred, FoundLHS, FoundRHS, CtxI);
11914bool ScalarEvolution::isImpliedCond(CmpPredicate Pred,
const SCEV *
LHS,
11915 const SCEV *
RHS, CmpPredicate FoundPred,
11916 const SCEV *FoundLHS,
const SCEV *FoundRHS,
11917 const Instruction *CtxI) {
11927 auto *WideType = FoundLHS->
getType();
11939 TruncFoundLHS, TruncFoundRHS, CtxI))
11965 return isImpliedCondBalancedTypes(Pred,
LHS,
RHS, FoundPred, FoundLHS,
11969bool ScalarEvolution::isImpliedCondBalancedTypes(
11970 CmpPredicate Pred,
const SCEV *
LHS,
const SCEV *
RHS, CmpPredicate FoundPred,
11971 const SCEV *FoundLHS,
const SCEV *FoundRHS,
const Instruction *CtxI) {
11974 "Types should be balanced!");
11981 if (FoundLHS == FoundRHS)
11985 if (
LHS == FoundRHS ||
RHS == FoundLHS) {
11997 return isImpliedCondOperands(*
P,
LHS,
RHS, FoundLHS, FoundRHS, CtxI);
12014 LHS, FoundLHS, FoundRHS, CtxI);
12016 return isImpliedCondOperands(*
P,
LHS,
RHS, FoundRHS, FoundLHS, CtxI);
12038 assert(P1 != P2 &&
"Handled earlier!");
12042 if (IsSignFlippedPredicate(Pred, FoundPred)) {
12046 return isImpliedCondOperands(Pred,
LHS,
RHS, FoundLHS, FoundRHS, CtxI);
12049 CmpPredicate CanonicalPred = Pred, CanonicalFoundPred = FoundPred;
12050 const SCEV *CanonicalLHS =
LHS, *CanonicalRHS =
RHS,
12051 *CanonicalFoundLHS = FoundLHS, *CanonicalFoundRHS = FoundRHS;
12056 std::swap(CanonicalFoundLHS, CanonicalFoundRHS);
12067 return isImpliedCondOperands(CanonicalFoundPred, CanonicalLHS,
12068 CanonicalRHS, CanonicalFoundLHS,
12069 CanonicalFoundRHS);
12074 return isImpliedCondOperands(CanonicalFoundPred, CanonicalLHS,
12075 CanonicalRHS, CanonicalFoundLHS,
12076 CanonicalFoundRHS);
12083 const SCEVConstant *
C =
nullptr;
12084 const SCEV *
V =
nullptr;
12102 if (Min ==
C->getAPInt()) {
12107 APInt SharperMin = Min + 1;
12110 case ICmpInst::ICMP_SGE:
12111 case ICmpInst::ICMP_UGE:
12114 if (isImpliedCondOperands(Pred, LHS, RHS, V, getConstant(SharperMin),
12119 case ICmpInst::ICMP_SGT:
12120 case ICmpInst::ICMP_UGT:
12130 if (isImpliedCondOperands(Pred, LHS, RHS, V, getConstant(Min), CtxI))
12135 case ICmpInst::ICMP_SLE:
12136 case ICmpInst::ICMP_ULE:
12137 if (isImpliedCondOperands(ICmpInst::getSwappedCmpPredicate(Pred), RHS,
12138 LHS, V, getConstant(SharperMin), CtxI))
12142 case ICmpInst::ICMP_SLT:
12143 case ICmpInst::ICMP_ULT:
12144 if (isImpliedCondOperands(ICmpInst::getSwappedCmpPredicate(Pred), RHS,
12145 LHS, V, getConstant(Min), CtxI))
12159 if (isImpliedCondOperands(Pred,
LHS,
RHS, FoundLHS, FoundRHS, CtxI))
12163 if (isImpliedCondOperands(FoundPred,
LHS,
RHS, FoundLHS, FoundRHS, CtxI))
12166 if (isImpliedCondOperandsViaRanges(Pred,
LHS,
RHS, FoundPred, FoundLHS, FoundRHS))
12173bool ScalarEvolution::splitBinaryAdd(
const SCEV *Expr,
12174 const SCEV *&L,
const SCEV *&R,
12183std::optional<APInt>
12190 APInt DiffMul(BW, 1);
12193 for (
unsigned I = 0;
I < 8; ++
I) {
12202 if (LAR->getLoop() != MAR->getLoop())
12203 return std::nullopt;
12207 if (!LAR->isAffine() || !MAR->isAffine())
12208 return std::nullopt;
12210 if (LAR->getStepRecurrence(*
this) != MAR->getStepRecurrence(*
this))
12211 return std::nullopt;
12213 Less = LAR->getStart();
12214 More = MAR->getStart();
12219 auto MatchConstMul =
12220 [](
const SCEV *S) -> std::optional<std::pair<const SCEV *, APInt>> {
12225 return std::nullopt;
12227 if (
auto MatchedMore = MatchConstMul(More)) {
12228 if (
auto MatchedLess = MatchConstMul(
Less)) {
12229 if (MatchedMore->second == MatchedLess->second) {
12230 More = MatchedMore->first;
12231 Less = MatchedLess->first;
12232 DiffMul *= MatchedMore->second;
12243 Diff +=
C->getAPInt() * DiffMul;
12246 Diff -=
C->getAPInt() * DiffMul;
12249 Multiplicity[S] +=
Mul;
12251 auto Decompose = [&](
const SCEV *S,
int Mul) {
12258 Decompose(More, 1);
12259 Decompose(
Less, -1);
12263 const SCEV *NewMore =
nullptr, *NewLess =
nullptr;
12264 for (
const auto &[S,
Mul] : Multiplicity) {
12269 return std::nullopt;
12271 }
else if (
Mul == -1) {
12273 return std::nullopt;
12276 return std::nullopt;
12280 if (NewMore == More || NewLess ==
Less)
12281 return std::nullopt;
12287 if (!More && !
Less)
12291 if (!More || !
Less)
12292 return std::nullopt;
12296 return std::nullopt;
12299bool ScalarEvolution::isImpliedCondOperandsViaAddRecStart(
12323 if (!L->contains(ContextBB) || !DT.
dominates(ContextBB, L->getLoopLatch()))
12334 if (!L->contains(ContextBB) || !DT.
dominates(ContextBB, L->getLoopLatch()))
12344bool ScalarEvolution::isImpliedCondOperandsViaNoOverflow(CmpPredicate Pred,
12347 const SCEV *FoundLHS,
12348 const SCEV *FoundRHS) {
12357 if (!AddRecFoundLHS)
12364 const Loop *
L = AddRecFoundLHS->getLoop();
12365 if (L != AddRecLHS->getLoop())
12404 if (!RDiff || *LDiff != *RDiff)
12407 if (LDiff->isMinValue())
12410 APInt FoundRHSLimit;
12413 FoundRHSLimit = -(*RDiff);
12425bool ScalarEvolution::isImpliedViaMerge(CmpPredicate Pred,
const SCEV *
LHS,
12426 const SCEV *
RHS,
const SCEV *FoundLHS,
12427 const SCEV *FoundRHS,
unsigned Depth) {
12428 const PHINode *LPhi =
nullptr, *RPhi =
nullptr;
12432 bool Erased = PendingMerges.erase(LPhi);
12433 assert(Erased &&
"Failed to erase LPhi!");
12437 bool Erased = PendingMerges.erase(RPhi);
12438 assert(Erased &&
"Failed to erase RPhi!");
12446 if (!PendingMerges.insert(Phi).second)
12460 if (!PendingMerges.insert(Phi).second)
12466 if (!LPhi && !RPhi)
12477 assert(LPhi &&
"LPhi should definitely be a SCEVUnknown Phi!");
12481 auto ProvedEasily = [&](
const SCEV *
S1,
const SCEV *S2) {
12482 return isKnownViaNonRecursiveReasoning(Pred,
S1, S2) ||
12483 isImpliedCondOperandsViaRanges(Pred,
S1, S2, Pred, FoundLHS, FoundRHS) ||
12484 isImpliedViaOperations(Pred,
S1, S2, FoundLHS, FoundRHS,
Depth);
12487 if (RPhi && RPhi->getParent() == LBB) {
12494 const SCEV *
R =
getSCEV(RPhi->getIncomingValueForBlock(IncBB));
12495 if (!ProvedEasily(L, R))
12506 auto *RLoop = RAR->
getLoop();
12507 auto *Predecessor = RLoop->getLoopPredecessor();
12508 assert(Predecessor &&
"Loop with AddRec with no predecessor?");
12510 if (!ProvedEasily(L1, RAR->
getStart()))
12512 auto *Latch = RLoop->getLoopLatch();
12513 assert(Latch &&
"Loop with AddRec with no latch?");
12534 if (
auto *Loop = LI.getLoopFor(LBB))
12537 if (!ProvedEasily(L,
RHS))
12544bool ScalarEvolution::isImpliedCondOperandsViaShift(CmpPredicate Pred,
12547 const SCEV *FoundLHS,
12548 const SCEV *FoundRHS) {
12551 if (
RHS == FoundRHS) {
12556 if (
LHS != FoundLHS)
12563 Value *Shiftee, *ShiftValue;
12565 using namespace PatternMatch;
12566 if (
match(SUFoundRHS->getValue(),
12568 auto *ShifteeS =
getSCEV(Shiftee);
12586bool ScalarEvolution::isImpliedCondOperands(CmpPredicate Pred,
const SCEV *
LHS,
12588 const SCEV *FoundLHS,
12589 const SCEV *FoundRHS,
12590 const Instruction *CtxI) {
12591 return isImpliedCondOperandsViaRanges(Pred,
LHS,
RHS, Pred, FoundLHS,
12593 isImpliedCondOperandsViaNoOverflow(Pred,
LHS,
RHS, FoundLHS,
12595 isImpliedCondOperandsViaShift(Pred,
LHS,
RHS, FoundLHS, FoundRHS) ||
12596 isImpliedCondOperandsViaAddRecStart(Pred,
LHS,
RHS, FoundLHS, FoundRHS,
12598 isImpliedCondOperandsHelper(Pred,
LHS,
RHS, FoundLHS, FoundRHS);
12602template <
typename MinMaxExprType>
12604 const SCEV *Candidate) {
12609 return is_contained(MinMaxExpr->operands(), Candidate);
12622 const SCEV *LStart, *RStart, *Step;
12672bool ScalarEvolution::isImpliedViaOperations(CmpPredicate Pred,
const SCEV *
LHS,
12674 const SCEV *FoundLHS,
12675 const SCEV *FoundRHS,
12679 "LHS and RHS have different sizes?");
12682 "FoundLHS and FoundRHS have different sizes?");
12716 auto GetOpFromSExt = [&](
const SCEV *S) {
12718 return Ext->getOperand();
12725 auto *OrigLHS =
LHS;
12726 auto *OrigFoundLHS = FoundLHS;
12727 LHS = GetOpFromSExt(
LHS);
12728 FoundLHS = GetOpFromSExt(FoundLHS);
12731 auto IsSGTViaContext = [&](
const SCEV *
S1,
const SCEV *S2) {
12734 FoundRHS,
Depth + 1);
12747 if (!LHSAddExpr->hasNoSignedWrap())
12750 auto *LL = LHSAddExpr->getOperand(0);
12751 auto *LR = LHSAddExpr->getOperand(1);
12755 auto IsSumGreaterThanRHS = [&](
const SCEV *
S1,
const SCEV *S2) {
12756 return IsSGTViaContext(
S1, MinusOne) && IsSGTViaContext(S2,
RHS);
12761 if (IsSumGreaterThanRHS(LL, LR) || IsSumGreaterThanRHS(LR, LL))
12767 using namespace llvm::PatternMatch;
12786 if (!Numerator || Numerator->getType() != FoundLHS->
getType())
12794 auto *DTy = Denominator->getType();
12795 auto *FRHSTy = FoundRHS->
getType();
12796 if (DTy->isPointerTy() != FRHSTy->isPointerTy())
12815 IsSGTViaContext(FoundRHSExt, DenomMinusTwo))
12826 auto *NegDenomMinusOne =
getMinusSCEV(MinusOne, DenominatorExt);
12828 IsSGTViaContext(FoundRHSExt, NegDenomMinusOne))
12836 if (isImpliedViaMerge(Pred, OrigLHS,
RHS, OrigFoundLHS, FoundRHS,
Depth + 1))
12869bool ScalarEvolution::isKnownViaNonRecursiveReasoning(CmpPredicate Pred,
12873 isKnownPredicateViaConstantRanges(Pred,
LHS,
RHS) ||
12876 isKnownPredicateViaNoOverflow(Pred,
LHS,
RHS);
12879bool ScalarEvolution::isImpliedCondOperandsHelper(CmpPredicate Pred,
12882 const SCEV *FoundLHS,
12883 const SCEV *FoundRHS) {
12919 if (isImpliedViaOperations(Pred,
LHS,
RHS, FoundLHS, FoundRHS))
12925bool ScalarEvolution::isImpliedCondOperandsViaRanges(
12926 CmpPredicate Pred,
const SCEV *
LHS,
const SCEV *
RHS, CmpPredicate FoundPred,
12927 const SCEV *FoundLHS,
const SCEV *FoundRHS) {
12941 ConstantRange FoundLHSRange =
12945 ConstantRange LHSRange = FoundLHSRange.
add(ConstantRange(*Addend));
12952 return LHSRange.
icmp(Pred, ConstRHS);
12955bool ScalarEvolution::canIVOverflowOnLT(
const SCEV *
RHS,
const SCEV *Stride,
12968 return (std::move(MaxValue) - MaxStrideMinusOne).slt(MaxRHS);
12976 return (std::move(MaxValue) - MaxStrideMinusOne).ult(MaxRHS);
12979bool ScalarEvolution::canIVOverflowOnGT(
const SCEV *
RHS,
const SCEV *Stride,
12991 return (std::move(MinValue) + MaxStrideMinusOne).sgt(MinRHS);
12999 return (std::move(MinValue) + MaxStrideMinusOne).ugt(MinRHS);
13011const SCEV *ScalarEvolution::computeMaxBECountForLT(
const SCEV *Start,
13012 const SCEV *Stride,
13043 APInt Limit = MaxValue - (StrideForMaxBECount - 1);
13054 :
APIntOps::umax(MaxEnd, MinStart);
13061ScalarEvolution::howManyLessThans(
const SCEV *
LHS,
const SCEV *
RHS,
13062 const Loop *L,
bool IsSigned,
13063 bool ControlsOnlyExit,
bool AllowPredicates) {
13067 bool PredicatedIV =
false;
13072 auto canProveNUW = [&]() {
13075 if (!ControlsOnlyExit)
13096 Limit = Limit.
zext(OuterBitWidth);
13108 Type *Ty = ZExt->getType();
13119 if (!
IV && AllowPredicates) {
13124 PredicatedIV =
true;
13128 if (!
IV ||
IV->getLoop() != L || !
IV->isAffine())
13142 bool NoWrap = ControlsOnlyExit &&
IV->getNoWrapFlags(WrapType);
13145 const SCEV *Stride =
IV->getStepRecurrence(*
this);
13150 if (!PositiveStride) {
13202 auto wouldZeroStrideBeUB = [&]() {
13214 if (!wouldZeroStrideBeUB()) {
13218 }
else if (!NoWrap) {
13221 if (canIVOverflowOnLT(
RHS, Stride, IsSigned))
13234 const SCEV *
Start =
IV->getStart();
13240 const SCEV *OrigStart =
Start;
13241 const SCEV *OrigRHS =
RHS;
13242 if (
Start->getType()->isPointerTy()) {
13253 const SCEV *End =
nullptr, *BECount =
nullptr,
13254 *BECountIfBackedgeTaken =
nullptr;
13257 if (PositiveStride && RHSAddRec !=
nullptr && RHSAddRec->getLoop() == L &&
13258 RHSAddRec->getNoWrapFlags()) {
13271 const SCEV *RHSStart = RHSAddRec->getStart();
13272 const SCEV *RHSStride = RHSAddRec->getStepRecurrence(*
this);
13284 const SCEV *Denominator =
getMinusSCEV(Stride, RHSStride);
13293 BECountIfBackedgeTaken =
13298 if (BECount ==
nullptr) {
13303 const SCEV *MaxBECount = computeMaxBECountForLT(
13306 MaxBECount,
false , Predicates);
13313 auto *OrigStartMinusStride =
getMinusSCEV(OrigStart, Stride);
13340 const SCEV *Numerator =
13346 auto canProveRHSGreaterThanEqualStart = [&]() {
13365 auto *StartMinusOne =
13372 if (canProveRHSGreaterThanEqualStart()) {
13387 BECountIfBackedgeTaken =
13403 bool MayAddOverflow = [&] {
13449 if (Start == Stride || Start ==
getMinusSCEV(Stride, One)) {
13463 if (!MayAddOverflow) {
13475 const SCEV *ConstantMaxBECount;
13476 bool MaxOrZero =
false;
13478 ConstantMaxBECount = BECount;
13479 }
else if (BECountIfBackedgeTaken &&
13484 ConstantMaxBECount = BECountIfBackedgeTaken;
13487 ConstantMaxBECount = computeMaxBECountForLT(
13495 const SCEV *SymbolicMaxBECount =
13497 return ExitLimit(BECount, ConstantMaxBECount, SymbolicMaxBECount, MaxOrZero,
13501ScalarEvolution::ExitLimit ScalarEvolution::howManyGreaterThans(
13502 const SCEV *
LHS,
const SCEV *
RHS,
const Loop *L,
bool IsSigned,
13503 bool ControlsOnlyExit,
bool AllowPredicates) {
13510 if (!
IV && AllowPredicates)
13517 if (!
IV ||
IV->getLoop() != L || !
IV->isAffine())
13521 bool NoWrap = ControlsOnlyExit &&
IV->getNoWrapFlags(WrapType);
13534 if (!Stride->
isOne() && !NoWrap)
13535 if (canIVOverflowOnGT(
RHS, Stride, IsSigned))
13538 const SCEV *
Start =
IV->getStart();
13539 const SCEV *End =
RHS;
13550 if (
Start->getType()->isPointerTy()) {
13585 const SCEV *ConstantMaxBECount =
13592 ConstantMaxBECount = BECount;
13593 const SCEV *SymbolicMaxBECount =
13596 return ExitLimit(BECount, ConstantMaxBECount, SymbolicMaxBECount,
false,
13602 if (
Range.isFullSet())
13607 if (!SC->getValue()->isZero()) {
13613 return ShiftedAddRec->getNumIterationsInRange(
13614 Range.subtract(SC->getAPInt()), SE);
13645 APInt ExitVal = (End +
A).udiv(
A);
13658 ConstantInt::get(SE.
getContext(), ExitVal - 1), SE)->getValue()) &&
13659 "Linear scev computation is off in a bad way!");
13690 assert(!
Last->isZero() &&
"Recurrency with zero step?");
13715 Ty = Store->getValueOperand()->getType();
13717 Ty = Load->getType();
13730 assert(SE &&
"SCEVCallbackVH called with a null ScalarEvolution!");
13732 SE->ConstantEvolutionLoopExitValue.erase(PN);
13733 SE->eraseValueFromMap(getValPtr());
13737void ScalarEvolution::SCEVCallbackVH::allUsesReplacedWith(
Value *V) {
13738 assert(SE &&
"SCEVCallbackVH called with a null ScalarEvolution!");
13748 : CallbackVH(
V), SE(se) {}
13757 : F(F), DL(F.
getDataLayout()), TLI(TLI), AC(AC), DT(DT), LI(LI),
13759 LoopDispositions(64), BlockDispositions(64) {
13771 F.getParent(), Intrinsic::experimental_guard);
13772 HasGuards = GuardDecl && !GuardDecl->use_empty();
13776 : F(Arg.F), DL(Arg.DL), HasGuards(Arg.HasGuards), TLI(Arg.TLI), AC(Arg.AC),
13777 DT(Arg.DT), LI(Arg.LI), CouldNotCompute(
std::
move(Arg.CouldNotCompute)),
13778 ValueExprMap(
std::
move(Arg.ValueExprMap)),
13779 PendingLoopPredicates(
std::
move(Arg.PendingLoopPredicates)),
13780 PendingPhiRanges(
std::
move(Arg.PendingPhiRanges)),
13781 PendingMerges(
std::
move(Arg.PendingMerges)),
13782 ConstantMultipleCache(
std::
move(Arg.ConstantMultipleCache)),
13783 BackedgeTakenCounts(
std::
move(Arg.BackedgeTakenCounts)),
13784 PredicatedBackedgeTakenCounts(
13785 std::
move(Arg.PredicatedBackedgeTakenCounts)),
13786 BECountUsers(
std::
move(Arg.BECountUsers)),
13787 ConstantEvolutionLoopExitValue(
13788 std::
move(Arg.ConstantEvolutionLoopExitValue)),
13789 ValuesAtScopes(
std::
move(Arg.ValuesAtScopes)),
13790 ValuesAtScopesUsers(
std::
move(Arg.ValuesAtScopesUsers)),
13791 LoopDispositions(
std::
move(Arg.LoopDispositions)),
13792 LoopPropertiesCache(
std::
move(Arg.LoopPropertiesCache)),
13793 BlockDispositions(
std::
move(Arg.BlockDispositions)),
13794 SCEVUsers(
std::
move(Arg.SCEVUsers)),
13795 UnsignedRanges(
std::
move(Arg.UnsignedRanges)),
13796 SignedRanges(
std::
move(Arg.SignedRanges)),
13797 UniqueSCEVs(
std::
move(Arg.UniqueSCEVs)),
13798 UniquePreds(
std::
move(Arg.UniquePreds)),
13799 SCEVAllocator(
std::
move(Arg.SCEVAllocator)),
13800 LoopUsers(
std::
move(Arg.LoopUsers)),
13801 PredicatedSCEVRewrites(
std::
move(Arg.PredicatedSCEVRewrites)),
13802 FirstUnknown(Arg.FirstUnknown) {
13803 Arg.FirstUnknown =
nullptr;
13812 Tmp->~SCEVUnknown();
13814 FirstUnknown =
nullptr;
13816 ExprValueMap.clear();
13817 ValueExprMap.clear();
13819 BackedgeTakenCounts.clear();
13820 PredicatedBackedgeTakenCounts.clear();
13822 assert(PendingLoopPredicates.empty() &&
"isImpliedCond garbage");
13823 assert(PendingPhiRanges.empty() &&
"getRangeRef garbage");
13824 assert(PendingMerges.empty() &&
"isImpliedViaMerge garbage");
13825 assert(!WalkingBEDominatingConds &&
"isLoopBackedgeGuardedByCond garbage!");
13826 assert(!ProvingSplitPredicate &&
"ProvingSplitPredicate garbage!");
13848 L->getHeader()->printAsOperand(OS,
false);
13852 L->getExitingBlocks(ExitingBlocks);
13853 if (ExitingBlocks.
size() != 1)
13854 OS <<
"<multiple exits> ";
13858 OS <<
"backedge-taken count is ";
13861 OS <<
"Unpredictable backedge-taken count.";
13864 if (ExitingBlocks.
size() > 1)
13865 for (
BasicBlock *ExitingBlock : ExitingBlocks) {
13866 OS <<
" exit count for " << ExitingBlock->
getName() <<
": ";
13874 OS <<
"\n predicated exit count for " << ExitingBlock->
getName()
13877 OS <<
"\n Predicates:\n";
13878 for (
const auto *
P : Predicates)
13886 L->getHeader()->printAsOperand(OS,
false);
13891 OS <<
"constant max backedge-taken count is ";
13894 OS <<
", actual taken count either this or zero.";
13896 OS <<
"Unpredictable constant max backedge-taken count. ";
13901 L->getHeader()->printAsOperand(OS,
false);
13906 OS <<
"symbolic max backedge-taken count is ";
13909 OS <<
", actual taken count either this or zero.";
13911 OS <<
"Unpredictable symbolic max backedge-taken count. ";
13915 if (ExitingBlocks.
size() > 1)
13916 for (
BasicBlock *ExitingBlock : ExitingBlocks) {
13917 OS <<
" symbolic max exit count for " << ExitingBlock->
getName() <<
": ";
13927 OS <<
"\n predicated symbolic max exit count for "
13928 << ExitingBlock->
getName() <<
": ";
13930 OS <<
"\n Predicates:\n";
13931 for (
const auto *
P : Predicates)
13941 assert(!Preds.
empty() &&
"Different predicated BTC, but no predicates");
13943 L->getHeader()->printAsOperand(OS,
false);
13946 OS <<
"Predicated backedge-taken count is ";
13949 OS <<
"Unpredictable predicated backedge-taken count.";
13951 OS <<
" Predicates:\n";
13952 for (
const auto *
P : Preds)
13957 auto *PredConstantMax =
13959 if (PredConstantMax != ConstantBTC) {
13961 "different predicated constant max BTC but no predicates");
13963 L->getHeader()->printAsOperand(OS,
false);
13966 OS <<
"Predicated constant max backedge-taken count is ";
13969 OS <<
"Unpredictable predicated constant max backedge-taken count.";
13971 OS <<
" Predicates:\n";
13972 for (
const auto *
P : Preds)
13977 auto *PredSymbolicMax =
13979 if (SymbolicBTC != PredSymbolicMax) {
13981 "Different predicated symbolic max BTC, but no predicates");
13983 L->getHeader()->printAsOperand(OS,
false);
13986 OS <<
"Predicated symbolic max backedge-taken count is ";
13989 OS <<
"Unpredictable predicated symbolic max backedge-taken count.";
13991 OS <<
" Predicates:\n";
13992 for (
const auto *
P : Preds)
13998 L->getHeader()->printAsOperand(OS,
false);
14022 OS <<
"Computable";
14032 OS <<
"DoesNotDominate";
14038 OS <<
"ProperlyDominates";
14055 OS <<
"Classifying expressions for: ";
14056 F.printAsOperand(OS,
false);
14071 const Loop *L = LI.getLoopFor(
I.getParent());
14086 OS <<
"\t\t" "Exits: ";
14089 OS <<
"<<Unknown>>";
14095 for (
const auto *Iter = L; Iter; Iter = Iter->getParentLoop()) {
14097 OS <<
"\t\t" "LoopDispositions: { ";
14103 Iter->getHeader()->printAsOperand(OS,
false);
14111 OS <<
"\t\t" "LoopDispositions: { ";
14117 InnerL->getHeader()->printAsOperand(OS,
false);
14128 OS <<
"Determining loop execution counts for: ";
14129 F.printAsOperand(OS,
false);
14137 auto &Values = LoopDispositions[S];
14138 for (
auto &V : Values) {
14139 if (V.getPointer() == L)
14144 auto &Values2 = LoopDispositions[S];
14146 if (V.getPointer() == L) {
14155ScalarEvolution::computeLoopDisposition(
const SCEV *S,
const Loop *L) {
14174 assert(!L->contains(AR->
getLoop()) &&
"Containing loop's header does not"
14175 " dominate the contained loop's header?");
14202 bool HasVarying =
false;
14236 auto &Values = BlockDispositions[S];
14237 for (
auto &V : Values) {
14238 if (V.getPointer() == BB)
14243 auto &Values2 = BlockDispositions[S];
14245 if (V.getPointer() == BB) {
14254ScalarEvolution::computeBlockDisposition(
const SCEV *S,
const BasicBlock *BB) {
14283 bool Proper =
true;
14294 if (Instruction *
I =
14296 if (
I->getParent() == BB)
14298 if (DT.properlyDominates(
I->getParent(), BB))
14321void ScalarEvolution::forgetBackedgeTakenCounts(
const Loop *L,
14324 Predicated ? PredicatedBackedgeTakenCounts : BackedgeTakenCounts;
14325 auto It = BECounts.find(L);
14326 if (It != BECounts.end()) {
14327 for (
const ExitNotTakenInfo &ENT : It->second.ExitNotTaken) {
14328 for (
const SCEV *S : {ENT.ExactNotTaken, ENT.SymbolicMaxNotTaken}) {
14330 auto UserIt = BECountUsers.find(S);
14331 assert(UserIt != BECountUsers.end());
14336 BECounts.erase(It);
14344 while (!Worklist.
empty()) {
14346 auto Users = SCEVUsers.find(Curr);
14347 if (
Users != SCEVUsers.end())
14348 for (
const auto *User :
Users->second)
14349 if (ToForget.
insert(User).second)
14353 for (
const auto *S : ToForget)
14354 forgetMemoizedResultsImpl(S);
14356 for (
auto I = PredicatedSCEVRewrites.begin();
14357 I != PredicatedSCEVRewrites.end();) {
14358 std::pair<const SCEV *, const Loop *>
Entry =
I->first;
14359 if (ToForget.count(
Entry.first))
14360 PredicatedSCEVRewrites.erase(
I++);
14366void ScalarEvolution::forgetMemoizedResultsImpl(
const SCEV *S) {
14367 LoopDispositions.erase(S);
14368 BlockDispositions.erase(S);
14369 UnsignedRanges.erase(S);
14370 SignedRanges.erase(S);
14371 HasRecMap.erase(S);
14372 ConstantMultipleCache.erase(S);
14375 UnsignedWrapViaInductionTried.erase(AR);
14376 SignedWrapViaInductionTried.erase(AR);
14379 auto ExprIt = ExprValueMap.find(S);
14380 if (ExprIt != ExprValueMap.end()) {
14381 for (
Value *V : ExprIt->second) {
14382 auto ValueIt = ValueExprMap.find_as(V);
14383 if (ValueIt != ValueExprMap.end())
14384 ValueExprMap.erase(ValueIt);
14386 ExprValueMap.erase(ExprIt);
14389 auto ScopeIt = ValuesAtScopes.find(S);
14390 if (ScopeIt != ValuesAtScopes.end()) {
14391 for (
const auto &Pair : ScopeIt->second)
14394 std::make_pair(Pair.first, S));
14395 ValuesAtScopes.erase(ScopeIt);
14398 auto ScopeUserIt = ValuesAtScopesUsers.find(S);
14399 if (ScopeUserIt != ValuesAtScopesUsers.end()) {
14400 for (
const auto &Pair : ScopeUserIt->second)
14401 llvm::erase(ValuesAtScopes[Pair.second], std::make_pair(Pair.first, S));
14402 ValuesAtScopesUsers.erase(ScopeUserIt);
14405 auto BEUsersIt = BECountUsers.find(S);
14406 if (BEUsersIt != BECountUsers.end()) {
14408 auto Copy = BEUsersIt->second;
14409 for (
const auto &Pair : Copy)
14410 forgetBackedgeTakenCounts(Pair.getPointer(), Pair.getInt());
14411 BECountUsers.erase(BEUsersIt);
14414 auto FoldUser = FoldCacheUser.find(S);
14415 if (FoldUser != FoldCacheUser.end())
14416 for (
auto &KV : FoldUser->second)
14417 FoldCache.erase(KV);
14418 FoldCacheUser.erase(S);
14422ScalarEvolution::getUsedLoops(
const SCEV *S,
14424 struct FindUsedLoops {
14425 FindUsedLoops(SmallPtrSetImpl<const Loop *> &LoopsUsed)
14426 : LoopsUsed(LoopsUsed) {}
14427 SmallPtrSetImpl<const Loop *> &LoopsUsed;
14428 bool follow(
const SCEV *S) {
14434 bool isDone()
const {
return false; }
14437 FindUsedLoops
F(LoopsUsed);
14438 SCEVTraversal<FindUsedLoops>(F).visitAll(S);
14441void ScalarEvolution::getReachableBlocks(
14444 Worklist.
push_back(&F.getEntryBlock());
14445 while (!Worklist.
empty()) {
14447 if (!Reachable.
insert(BB).second)
14455 Worklist.
push_back(
C->isOne() ? TrueBB : FalseBB);
14462 if (isKnownPredicateViaConstantRanges(
Cmp->getCmpPredicate(), L, R)) {
14466 if (isKnownPredicateViaConstantRanges(
Cmp->getInverseCmpPredicate(), L,
14501 SCEVMapper SCM(SE2);
14503 SE2.getReachableBlocks(ReachableBlocks, F);
14505 auto GetDelta = [&](
const SCEV *Old,
const SCEV *New) ->
const SCEV * {
14523 while (!LoopStack.
empty()) {
14529 if (!ReachableBlocks.
contains(L->getHeader()))
14534 auto It = BackedgeTakenCounts.find(L);
14535 if (It == BackedgeTakenCounts.end())
14539 SCM.visit(It->second.getExact(L,
const_cast<ScalarEvolution *
>(
this)));
14559 const SCEV *Delta = GetDelta(CurBECount, NewBECount);
14560 if (Delta && !Delta->
isZero()) {
14561 dbgs() <<
"Trip Count for " << *L <<
" Changed!\n";
14562 dbgs() <<
"Old: " << *CurBECount <<
"\n";
14563 dbgs() <<
"New: " << *NewBECount <<
"\n";
14564 dbgs() <<
"Delta: " << *Delta <<
"\n";
14572 while (!Worklist.
empty()) {
14574 if (ValidLoops.
insert(L).second)
14575 Worklist.
append(L->begin(), L->end());
14577 for (
const auto &KV : ValueExprMap) {
14582 "AddRec references invalid loop");
14587 auto It = ExprValueMap.find(KV.second);
14588 if (It == ExprValueMap.end() || !It->second.contains(KV.first)) {
14589 dbgs() <<
"Value " << *KV.first
14590 <<
" is in ValueExprMap but not in ExprValueMap\n";
14595 if (!ReachableBlocks.
contains(
I->getParent()))
14597 const SCEV *OldSCEV = SCM.visit(KV.second);
14599 const SCEV *Delta = GetDelta(OldSCEV, NewSCEV);
14600 if (Delta && !Delta->
isZero()) {
14601 dbgs() <<
"SCEV for value " << *
I <<
" changed!\n"
14602 <<
"Old: " << *OldSCEV <<
"\n"
14603 <<
"New: " << *NewSCEV <<
"\n"
14604 <<
"Delta: " << *Delta <<
"\n";
14610 for (
const auto &KV : ExprValueMap) {
14611 for (
Value *V : KV.second) {
14612 const SCEV *S = ValueExprMap.lookup(V);
14614 dbgs() <<
"Value " << *V
14615 <<
" is in ExprValueMap but not in ValueExprMap\n";
14618 if (S != KV.first) {
14619 dbgs() <<
"Value " << *V <<
" mapped to " << *S <<
" rather than "
14620 << *KV.first <<
"\n";
14627 for (
const auto &S : UniqueSCEVs) {
14632 auto It = SCEVUsers.find(
Op);
14633 if (It != SCEVUsers.end() && It->second.count(&S))
14635 dbgs() <<
"Use of operand " << *
Op <<
" by user " << S
14636 <<
" is not being tracked!\n";
14642 for (
const auto &ValueAndVec : ValuesAtScopes) {
14644 for (
const auto &LoopAndValueAtScope : ValueAndVec.second) {
14645 const Loop *L = LoopAndValueAtScope.first;
14646 const SCEV *ValueAtScope = LoopAndValueAtScope.second;
14648 auto It = ValuesAtScopesUsers.find(ValueAtScope);
14649 if (It != ValuesAtScopesUsers.end() &&
14652 dbgs() <<
"Value: " << *
Value <<
", Loop: " << *L <<
", ValueAtScope: "
14653 << *ValueAtScope <<
" missing in ValuesAtScopesUsers\n";
14659 for (
const auto &ValueAtScopeAndVec : ValuesAtScopesUsers) {
14660 const SCEV *ValueAtScope = ValueAtScopeAndVec.first;
14661 for (
const auto &LoopAndValue : ValueAtScopeAndVec.second) {
14662 const Loop *L = LoopAndValue.first;
14663 const SCEV *
Value = LoopAndValue.second;
14665 auto It = ValuesAtScopes.find(
Value);
14666 if (It != ValuesAtScopes.end() &&
14667 is_contained(It->second, std::make_pair(L, ValueAtScope)))
14669 dbgs() <<
"Value: " << *
Value <<
", Loop: " << *L <<
", ValueAtScope: "
14670 << *ValueAtScope <<
" missing in ValuesAtScopes\n";
14676 auto VerifyBECountUsers = [&](
bool Predicated) {
14678 Predicated ? PredicatedBackedgeTakenCounts : BackedgeTakenCounts;
14679 for (
const auto &LoopAndBEInfo : BECounts) {
14680 for (
const ExitNotTakenInfo &ENT : LoopAndBEInfo.second.ExitNotTaken) {
14681 for (
const SCEV *S : {ENT.ExactNotTaken, ENT.SymbolicMaxNotTaken}) {
14683 auto UserIt = BECountUsers.find(S);
14684 if (UserIt != BECountUsers.end() &&
14685 UserIt->second.contains({ LoopAndBEInfo.first, Predicated }))
14687 dbgs() <<
"Value " << *S <<
" for loop " << *LoopAndBEInfo.first
14688 <<
" missing from BECountUsers\n";
14695 VerifyBECountUsers(
false);
14696 VerifyBECountUsers(
true);
14699 for (
auto &[S, Values] : LoopDispositions) {
14700 for (
auto [
Loop, CachedDisposition] : Values) {
14702 if (CachedDisposition != RecomputedDisposition) {
14703 dbgs() <<
"Cached disposition of " << *S <<
" for loop " << *
Loop
14704 <<
" is incorrect: cached " << CachedDisposition <<
", actual "
14705 << RecomputedDisposition <<
"\n";
14712 for (
auto &[S, Values] : BlockDispositions) {
14713 for (
auto [BB, CachedDisposition] : Values) {
14715 if (CachedDisposition != RecomputedDisposition) {
14716 dbgs() <<
"Cached disposition of " << *S <<
" for block %"
14717 << BB->
getName() <<
" is incorrect: cached " << CachedDisposition
14718 <<
", actual " << RecomputedDisposition <<
"\n";
14725 for (
auto [
FoldID, Expr] : FoldCache) {
14726 auto I = FoldCacheUser.find(Expr);
14727 if (
I == FoldCacheUser.end()) {
14728 dbgs() <<
"Missing entry in FoldCacheUser for cached expression " << *Expr
14733 dbgs() <<
"Missing FoldID in cached users of " << *Expr <<
"!\n";
14737 for (
auto [Expr, IDs] : FoldCacheUser) {
14738 for (
auto &
FoldID : IDs) {
14741 dbgs() <<
"Missing entry in FoldCache for expression " << *Expr
14746 dbgs() <<
"Entry in FoldCache doesn't match FoldCacheUser: " << *S
14747 <<
" != " << *Expr <<
"!\n";
14758 for (
auto [S, Multiple] : ConstantMultipleCache) {
14760 if ((Multiple != 0 && RecomputedMultiple != 0 &&
14761 Multiple.
urem(RecomputedMultiple) != 0 &&
14762 RecomputedMultiple.
urem(Multiple) != 0)) {
14763 dbgs() <<
"Incorrect cached computation in ConstantMultipleCache for "
14764 << *S <<
" : Computed " << RecomputedMultiple
14765 <<
" but cache contains " << Multiple <<
"!\n";
14773 FunctionAnalysisManager::Invalidator &Inv) {
14805 OS <<
"Printing analysis 'Scalar Evolution Analysis' for function '"
14806 <<
F.getName() <<
"':\n";
14812 "Scalar Evolution Analysis",
false,
true)
14861 const SCEV *LHS,
const SCEV *RHS) {
14863 assert(LHS->getType() == RHS->getType() &&
14864 "Type mismatch between LHS and RHS");
14867 ID.AddInteger(Pred);
14868 ID.AddPointer(LHS);
14869 ID.AddPointer(RHS);
14870 void *IP =
nullptr;
14871 if (
const auto *S = UniquePreds.FindNodeOrInsertPos(
ID, IP))
14875 UniquePreds.InsertNode(Eq, IP);
14886 ID.AddInteger(AddedFlags);
14887 void *IP =
nullptr;
14888 if (
const auto *S = UniquePreds.FindNodeOrInsertPos(
ID, IP))
14890 auto *OF =
new (SCEVAllocator)
14892 UniquePreds.InsertNode(OF, IP);
14912 SCEVPredicateRewriter
Rewriter(L, SE, NewPreds, Pred);
14913 return Rewriter.visit(S);
14919 for (
const auto *Pred : U->getPredicates())
14921 if (IPred->getLHS() == Expr &&
14923 return IPred->getRHS();
14925 if (IPred->getLHS() == Expr &&
14926 IPred->getPredicate() == ICmpInst::ICMP_EQ)
14927 return IPred->getRHS();
14930 return convertToAddRecWithPreds(Expr);
14933 const SCEV *visitZeroExtendExpr(
const SCEVZeroExtendExpr *Expr) {
14949 const SCEV *visitSignExtendExpr(
const SCEVSignExtendExpr *Expr) {
14966 explicit SCEVPredicateRewriter(
14967 const Loop *L, ScalarEvolution &SE,
14968 SmallVectorImpl<const SCEVPredicate *> *NewPreds,
14969 const SCEVPredicate *Pred)
14970 : SCEVRewriteVisitor(SE), NewPreds(NewPreds), Pred(Pred),
L(
L) {}
14972 bool addOverflowAssumption(
const SCEVPredicate *
P) {
14975 return Pred && Pred->
implies(
P, SE);
14981 bool addOverflowAssumption(
const SCEVAddRecExpr *AR,
14984 return addOverflowAssumption(
A);
14993 const SCEV *convertToAddRecWithPreds(
const SCEVUnknown *Expr) {
14997 std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>>
14999 if (!PredicatedRewrite)
15001 for (
const auto *
P : PredicatedRewrite->second){
15004 if (L != WP->getExpr()->getLoop())
15007 if (!addOverflowAssumption(
P))
15010 return PredicatedRewrite->first;
15013 SmallVectorImpl<const SCEVPredicate *> *NewPreds;
15014 const SCEVPredicate *Pred;
15023 return SCEVPredicateRewriter::rewrite(S, L, *
this,
nullptr, &Preds);
15030 S = SCEVPredicateRewriter::rewrite(S, L, *
this, &TransformPreds,
nullptr);
15050 if (!Step->
isOne())
15075 assert(LHS->getType() == RHS->getType() &&
"LHS and RHS types don't match");
15076 assert(LHS != RHS &&
"LHS and RHS are the same SCEV");
15089 return Op->LHS == LHS &&
Op->RHS == RHS;
15096 OS.
indent(
Depth) <<
"Equal predicate: " << *LHS <<
" == " << *RHS <<
"\n";
15098 OS.
indent(
Depth) <<
"Compare predicate: " << *LHS <<
" " << Pred <<
") "
15123 const SCEV *Start = AR->getStart();
15124 const SCEV *OpStart =
Op->AR->getStart();
15129 if (Start->getType()->isPointerTy() && Start->getType() != OpStart->
getType())
15132 const SCEV *Step = AR->getStepRecurrence(SE);
15133 const SCEV *OpStep =
Op->AR->getStepRecurrence(SE);
15186 if (Step->getValue()->getValue().isNonNegative())
15190 return ImpliedFlags;
15197 for (
const auto *
P : Preds)
15210 return this->implies(I, SE);
15218 for (
const auto *Pred : Preds)
15219 Pred->print(OS,
Depth);
15224 for (
const auto *Pred : Set->Preds)
15232 bool CheckImplies = Preds.
size() < 16;
15235 if (CheckImplies &&
implies(
N, SE))
15241 for (
auto *
P : Preds) {
15242 if (CheckImplies &&
N->implies(
P, SE))
15246 Preds = std::move(PrunedPreds);
15247 Preds.push_back(
N);
15254 Preds = std::make_unique<SCEVUnionPredicate>(
Empty, SE);
15259 for (
const auto *
Op :
Ops)
15264 SCEVUsers[
Op].insert(
User);
15268 const SCEV *Expr = SE.getSCEV(V);
15269 RewriteEntry &Entry = RewriteMap[Expr];
15272 if (Entry.second && Generation == Entry.first)
15273 return Entry.second;
15278 Expr = Entry.second;
15280 const SCEV *NewSCEV = SE.rewriteUsingPredicate(Expr, &L, *Preds);
15281 Entry = {Generation, NewSCEV};
15287 if (!BackedgeCount) {
15289 BackedgeCount = SE.getPredicatedBackedgeTakenCount(&L, Preds);
15290 for (
const auto *
P : Preds)
15293 return BackedgeCount;
15297 if (!SymbolicMaxBackedgeCount) {
15299 SymbolicMaxBackedgeCount =
15300 SE.getPredicatedSymbolicMaxBackedgeTakenCount(&L, Preds);
15301 for (
const auto *
P : Preds)
15304 return SymbolicMaxBackedgeCount;
15308 if (!SmallConstantMaxTripCount) {
15310 SmallConstantMaxTripCount = SE.getSmallConstantMaxTripCount(&L, &Preds);
15311 for (
const auto *
P : Preds)
15314 return *SmallConstantMaxTripCount;
15318 if (Preds->implies(&Pred, SE))
15323 Preds = std::make_unique<SCEVUnionPredicate>(NewPreds, SE);
15324 updateGeneration();
15331void PredicatedScalarEvolution::updateGeneration() {
15333 if (++Generation == 0) {
15334 for (
auto &
II : RewriteMap) {
15335 const SCEV *Rewritten =
II.second.second;
15352 auto II = FlagsMap.insert({V, Flags});
15365 auto II = FlagsMap.find(V);
15367 if (
II != FlagsMap.end())
15376 auto *New = SE.convertSCEVToAddRecWithPredicates(Expr, &L, NewPreds);
15381 for (
const auto *
P : NewPreds)
15384 RewriteMap[SE.getSCEV(V)] = {Generation, New};
15390 : RewriteMap(
Init.RewriteMap), SE(
Init.SE), L(
Init.L),
15393 Generation(
Init.Generation), BackedgeCount(
Init.BackedgeCount) {
15394 for (
auto I :
Init.FlagsMap)
15395 FlagsMap.insert(
I);
15400 for (
auto *BB : L.getBlocks())
15401 for (
auto &
I : *BB) {
15402 if (!SE.isSCEVable(
I.getType()))
15405 auto *Expr = SE.getSCEV(&
I);
15406 auto II = RewriteMap.find(Expr);
15408 if (
II == RewriteMap.end())
15412 if (
II->second.second == Expr)
15417 OS.
indent(
Depth + 2) <<
"--> " << *
II->second.second <<
"\n";
15425 LoopGuards Guards(SE);
15433void ScalarEvolution::LoopGuards::collectFromPHI(
15441 using MinMaxPattern = std::pair<const SCEVConstant *, SCEVTypes>;
15442 auto GetMinMaxConst = [&](
unsigned IncomingIdx) -> MinMaxPattern {
15456 auto &RewriteMap =
G->second.RewriteMap;
15457 if (RewriteMap.empty())
15459 auto S = RewriteMap.find(SE.
getSCEV(
Phi.getIncomingValue(IncomingIdx)));
15460 if (S == RewriteMap.end())
15466 return {C0, SM->getSCEVType()};
15469 auto MergeMinMaxConst = [](MinMaxPattern
P1,
15470 MinMaxPattern P2) -> MinMaxPattern {
15471 auto [C1,
T1] =
P1;
15472 auto [C2, T2] = P2;
15473 if (!C1 || !C2 ||
T1 != T2)
15477 return {C1->getAPInt().
ult(C2->getAPInt()) ? C1 : C2,
T1};
15479 return {C1->getAPInt().
slt(C2->getAPInt()) ? C1 : C2,
T1};
15481 return {C1->getAPInt().
ugt(C2->getAPInt()) ? C1 : C2,
T1};
15483 return {C1->getAPInt().
sgt(C2->getAPInt()) ? C1 : C2,
T1};
15488 auto P = GetMinMaxConst(0);
15489 for (
unsigned int In = 1;
In <
Phi.getNumIncomingValues();
In++) {
15492 P = MergeMinMaxConst(
P, GetMinMaxConst(In));
15495 const SCEV *
LHS = SE.
getSCEV(
const_cast<PHINode *
>(&Phi));
15498 Guards.RewriteMap.insert({
LHS,
RHS});
15506 const APInt &DivisorVal,
15508 const APInt *ExprVal;
15521 const APInt &DivisorVal,
15523 const APInt *ExprVal;
15531 return SE.
getConstant(*ExprVal + DivisorVal - Rem);
15545 const SCEV *URemRHS =
nullptr;
15549 const SCEV *Multiple =
15551 DivInfo[URemLHS] = Multiple;
15553 Multiples[URemLHS] =
C->getAPInt();
15573 auto IsMinMaxSCEVWithNonNegativeConstant =
15577 if (
MinMax->getNumOperands() != 2)
15580 if (
C->getAPInt().isNegative())
15582 SCTy =
MinMax->getSCEVType();
15591 const SCEV *MinMaxLHS =
nullptr, *MinMaxRHS =
nullptr;
15593 if (!IsMinMaxSCEVWithNonNegativeConstant(MinMaxExpr, SCTy, MinMaxLHS,
15598 auto *DivisibleExpr =
15606void ScalarEvolution::LoopGuards::collectFromBlock(
15608 const BasicBlock *
Block,
const BasicBlock *Pred,
15616 DenseMap<const SCEV *, const SCEV *> &RewriteMap,
15627 &ExprsToRewrite]() {
15628 const SCEVConstant *C1;
15641 if (ExactRegion.isWrappedSet() || ExactRegion.isFullSet())
15643 auto [
I,
Inserted] = RewriteMap.try_emplace(LHSUnknown);
15644 const SCEV *RewrittenLHS =
Inserted ? LHSUnknown :
I->second;
15652 if (MatchRangeCheckIdiom())
15669 auto AddRewrite = [&](
const SCEV *From,
const SCEV *FromRewritten,
15671 if (From == FromRewritten)
15673 RewriteMap[From] = To;
15679 auto GetMaybeRewritten = [&](
const SCEV *S) {
15680 return RewriteMap.lookup_or(S, S);
15683 const SCEV *RewrittenLHS = GetMaybeRewritten(
LHS);
15685 const APInt &DividesBy =
15700 switch (Predicate) {
15729 SmallPtrSet<const SCEV *, 16> Visited;
15731 auto EnqueueOperands = [&Worklist](
const SCEVNAryExpr *S) {
15735 while (!Worklist.
empty()) {
15739 if (!Visited.
insert(From).second)
15741 const SCEV *FromRewritten = GetMaybeRewritten(From);
15742 const SCEV *To =
nullptr;
15744 switch (Predicate) {
15749 EnqueueOperands(
UMax);
15755 EnqueueOperands(
SMax);
15761 EnqueueOperands(
UMin);
15767 EnqueueOperands(
SMin);
15775 const SCEV *OneAlignedUp =
15777 To = SE.
getUMaxExpr(FromRewritten, OneAlignedUp);
15789 const SCEVConstant *
C;
15798 Guards.NotEqual.insert({
LHS,
RHS});
15807 AddRewrite(From, FromRewritten, To);
15824 SE.F.
getParent(), Intrinsic::experimental_guard);
15826 for (
const auto *GU : GuardDecl->users())
15828 if (Guard->getFunction() ==
Block->getParent() &&
15837 unsigned NumCollectedConditions = 0;
15839 std::pair<const BasicBlock *, const BasicBlock *> Pair(Pred,
Block);
15841 Pair = SE.getPredecessorWithUniqueSuccessorForBB(Pair.first)) {
15843 const BranchInst *LoopEntryPredicate =
15850 NumCollectedConditions++;
15854 if (
Depth > 0 && NumCollectedConditions == 2)
15862 if (Pair.second->hasNPredecessorsOrMore(2) &&
15864 SmallDenseMap<const BasicBlock *, LoopGuards> IncomingGuards;
15865 for (
auto &Phi : Pair.second->phis())
15876 for (
auto [Term, EnterIfTrue] :
reverse(Terms)) {
15877 SmallVector<Value *, 8> Worklist;
15878 SmallPtrSet<Value *, 8> Visited;
15880 while (!Worklist.
empty()) {
15887 EnterIfTrue ?
Cmp->getPredicate() :
Cmp->getInversePredicate();
15911 DenseMap<const SCEV *, APInt> Multiples;
15913 for (
const auto &[Predicate,
LHS,
RHS] : GuardsToProcess) {
15920 for (
const auto &[Predicate,
LHS,
RHS] : GuardsToProcess)
15921 CollectCondition(Predicate,
LHS,
RHS, Guards.RewriteMap, DivGuards);
15925 for (
const auto &[K, Divisor] : Multiples) {
15926 const SCEV *DivisorSCEV = SE.
getConstant(Divisor);
15927 Guards.RewriteMap[
K] =
15929 Guards.
rewrite(K), Divisor, SE),
15938 Guards.PreserveNUW =
true;
15939 Guards.PreserveNSW =
true;
15940 for (
const SCEV *Expr : ExprsToRewrite) {
15941 const SCEV *RewriteTo = Guards.RewriteMap[Expr];
15942 Guards.PreserveNUW &=
15944 Guards.PreserveNSW &=
15951 if (ExprsToRewrite.size() > 1) {
15952 for (
const SCEV *Expr : ExprsToRewrite) {
15953 const SCEV *RewriteTo = Guards.RewriteMap[Expr];
15954 Guards.RewriteMap.erase(Expr);
15955 Guards.RewriteMap.insert({Expr, Guards.
rewrite(RewriteTo)});
15964 class SCEVLoopGuardRewriter
15975 NotEqual(Guards.NotEqual) {
15976 if (Guards.PreserveNUW)
15978 if (Guards.PreserveNSW)
15985 return Map.lookup_or(Expr, Expr);
15989 if (
const SCEV *S = Map.lookup(Expr))
15996 unsigned Bitwidth = Ty->getScalarSizeInBits() / 2;
15997 while (Bitwidth % 8 == 0 && Bitwidth >= 8 &&
15998 Bitwidth >
Op->getType()->getScalarSizeInBits()) {
16000 auto *NarrowExt = SE.getZeroExtendExpr(
Op, NarrowTy);
16001 if (
const SCEV *S = Map.lookup(NarrowExt))
16002 return SE.getZeroExtendExpr(S, Ty);
16003 Bitwidth = Bitwidth / 2;
16011 if (
const SCEV *S = Map.lookup(Expr))
16018 if (
const SCEV *S = Map.lookup(Expr))
16024 if (
const SCEV *S = Map.lookup(Expr))
16032 auto RewriteSubtraction = [&](
const SCEV *S) ->
const SCEV * {
16033 const SCEV *LHS, *RHS;
16037 if (NotEqual.contains({LHS, RHS})) {
16039 SE.getOne(S->
getType()), SE.getConstantMultiple(S), SE);
16040 return SE.getUMaxExpr(OneAlignedUp, S);
16047 if (
const SCEV *Rewritten = RewriteSubtraction(Expr))
16058 if (
const SCEV *Rewritten = RewriteSubtraction(
Add))
16059 return SE.getAddExpr(
16062 if (
const SCEV *S = Map.lookup(
Add))
16063 return SE.getAddExpr(Expr->
getOperand(0), S);
16075 : SE.getAddExpr(Operands,
16091 : SE.getMulExpr(Operands,
16097 if (RewriteMap.empty() && NotEqual.empty())
16100 SCEVLoopGuardRewriter
Rewriter(SE, *
this);
16101 return Rewriter.visit(Expr);
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Expand Atomic instructions
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
#define LLVM_DUMP_METHOD
Mark debug helper function definitions like dump() that should not be stripped from debug builds.
This file contains the declarations for the subclasses of Constant, which represent the different fla...
SmallPtrSet< const BasicBlock *, 8 > VisitedBlocks
This file defines the DenseMap class.
This file builds on the ADT/GraphTraits.h file to build generic depth first graph iterator.
static bool isSigned(unsigned int Opcode)
This file defines a hash set that can be used to remove duplication of nodes in a graph.
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
This defines the Use class.
iv Induction Variable Users
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
static bool isZero(Value *V, const DataLayout &DL, DominatorTree *DT, AssumptionCache *AC)
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t IntrinsicInst * II
PowerPC Reduce CR logical Operation
#define INITIALIZE_PASS_DEPENDENCY(depName)
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
const SmallVectorImpl< MachineOperand > & Cond
static bool isValid(const char C)
Returns true if C is a valid mangled character: <0-9a-zA-Z_>.
SI optimize exec mask operations pre RA
void visit(MachineFunction &MF, MachineBasicBlock &Start, std::function< void(MachineBasicBlock *)> op)
This file provides utility classes that use RAII to save and restore values.
bool SCEVMinMaxExprContains(const SCEV *Root, const SCEV *OperandToFind, SCEVTypes RootKind)
static cl::opt< unsigned > MaxAddRecSize("scalar-evolution-max-add-rec-size", cl::Hidden, cl::desc("Max coefficients in AddRec during evolving"), cl::init(8))
static cl::opt< unsigned > RangeIterThreshold("scev-range-iter-threshold", cl::Hidden, cl::desc("Threshold for switching to iteratively computing SCEV ranges"), cl::init(32))
static const Loop * isIntegerLoopHeaderPHI(const PHINode *PN, LoopInfo &LI)
static unsigned getConstantTripCount(const SCEVConstant *ExitCount)
static int CompareValueComplexity(const LoopInfo *const LI, Value *LV, Value *RV, unsigned Depth)
Compare the two values LV and RV in terms of their "complexity" where "complexity" is a partial (and ...
static const SCEV * getNextSCEVDivisibleByDivisor(const SCEV *Expr, const APInt &DivisorVal, ScalarEvolution &SE)
static void PushLoopPHIs(const Loop *L, SmallVectorImpl< Instruction * > &Worklist, SmallPtrSetImpl< Instruction * > &Visited)
Push PHI nodes in the header of the given loop onto the given Worklist.
static void insertFoldCacheEntry(const ScalarEvolution::FoldID &ID, const SCEV *S, DenseMap< ScalarEvolution::FoldID, const SCEV * > &FoldCache, DenseMap< const SCEV *, SmallVector< ScalarEvolution::FoldID, 2 > > &FoldCacheUser)
static cl::opt< bool > ClassifyExpressions("scalar-evolution-classify-expressions", cl::Hidden, cl::init(true), cl::desc("When printing analysis, include information on every instruction"))
static bool CanConstantFold(const Instruction *I)
Return true if we can constant fold an instruction of the specified type, assuming that all operands ...
static cl::opt< unsigned > AddOpsInlineThreshold("scev-addops-inline-threshold", cl::Hidden, cl::desc("Threshold for inlining addition operands into a SCEV"), cl::init(500))
static cl::opt< unsigned > MaxLoopGuardCollectionDepth("scalar-evolution-max-loop-guard-collection-depth", cl::Hidden, cl::desc("Maximum depth for recursive loop guard collection"), cl::init(1))
static cl::opt< bool > VerifyIR("scev-verify-ir", cl::Hidden, cl::desc("Verify IR correctness when making sensitive SCEV queries (slow)"), cl::init(false))
static bool BrPHIToSelect(DominatorTree &DT, BranchInst *BI, PHINode *Merge, Value *&C, Value *&LHS, Value *&RHS)
static const SCEV * getPreStartForExtend(const SCEVAddRecExpr *AR, Type *Ty, ScalarEvolution *SE, unsigned Depth)
static std::optional< APInt > MinOptional(std::optional< APInt > X, std::optional< APInt > Y)
Helper function to compare optional APInts: (a) if X and Y both exist, return min(X,...
static cl::opt< unsigned > MulOpsInlineThreshold("scev-mulops-inline-threshold", cl::Hidden, cl::desc("Threshold for inlining multiplication operands into a SCEV"), cl::init(32))
static void GroupByComplexity(SmallVectorImpl< const SCEV * > &Ops, LoopInfo *LI, DominatorTree &DT)
Given a list of SCEV objects, order them by their complexity, and group objects of the same complexit...
static const SCEV * constantFoldAndGroupOps(ScalarEvolution &SE, LoopInfo &LI, DominatorTree &DT, SmallVectorImpl< const SCEV * > &Ops, FoldT Fold, IsIdentityT IsIdentity, IsAbsorberT IsAbsorber)
Performs a number of common optimizations on the passed Ops.
static bool isDivisibilityGuard(const SCEV *LHS, const SCEV *RHS, ScalarEvolution &SE)
static std::optional< const SCEV * > createNodeForSelectViaUMinSeq(ScalarEvolution *SE, const SCEV *CondExpr, const SCEV *TrueExpr, const SCEV *FalseExpr)
static Constant * BuildConstantFromSCEV(const SCEV *V)
This builds up a Constant using the ConstantExpr interface.
static ConstantInt * EvaluateConstantChrecAtConstant(const SCEVAddRecExpr *AddRec, ConstantInt *C, ScalarEvolution &SE)
static const SCEV * BinomialCoefficient(const SCEV *It, unsigned K, ScalarEvolution &SE, Type *ResultTy)
Compute BC(It, K). The result has width W. Assume, K > 0.
static cl::opt< unsigned > MaxCastDepth("scalar-evolution-max-cast-depth", cl::Hidden, cl::desc("Maximum depth of recursive SExt/ZExt/Trunc"), cl::init(8))
static bool IsMinMaxConsistingOf(const SCEV *MaybeMinMaxExpr, const SCEV *Candidate)
Is MaybeMinMaxExpr an (U|S)(Min|Max) of Candidate and some other values?
static PHINode * getConstantEvolvingPHI(Value *V, const Loop *L)
getConstantEvolvingPHI - Given an LLVM value and a loop, return a PHI node in the loop that V is deri...
static const SCEV * SolveLinEquationWithOverflow(const APInt &A, const SCEV *B, SmallVectorImpl< const SCEVPredicate * > *Predicates, ScalarEvolution &SE, const Loop *L)
Finds the minimum unsigned root of the following equation:
static cl::opt< unsigned > MaxBruteForceIterations("scalar-evolution-max-iterations", cl::ReallyHidden, cl::desc("Maximum number of iterations SCEV will " "symbolically execute a constant " "derived loop"), cl::init(100))
static bool MatchBinarySub(const SCEV *S, const SCEV *&LHS, const SCEV *&RHS)
static uint64_t umul_ov(uint64_t i, uint64_t j, bool &Overflow)
static void PrintSCEVWithTypeHint(raw_ostream &OS, const SCEV *S)
When printing a top-level SCEV for trip counts, it's helpful to include a type for constants which ar...
static void PrintLoopInfo(raw_ostream &OS, ScalarEvolution *SE, const Loop *L)
static bool containsConstantInAddMulChain(const SCEV *StartExpr)
Determine if any of the operands in this SCEV are a constant or if any of the add or multiply express...
static const SCEV * getExtendAddRecStart(const SCEVAddRecExpr *AR, Type *Ty, ScalarEvolution *SE, unsigned Depth)
static bool hasHugeExpression(ArrayRef< const SCEV * > Ops)
Returns true if Ops contains a huge SCEV (the subtree of S contains at least HugeExprThreshold nodes)...
static cl::opt< unsigned > MaxPhiSCCAnalysisSize("scalar-evolution-max-scc-analysis-depth", cl::Hidden, cl::desc("Maximum amount of nodes to process while searching SCEVUnknown " "Phi strongly connected components"), cl::init(8))
static bool IsKnownPredicateViaAddRecStart(ScalarEvolution &SE, CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
static bool collectDivisibilityInformation(ICmpInst::Predicate Predicate, const SCEV *LHS, const SCEV *RHS, DenseMap< const SCEV *, const SCEV * > &DivInfo, DenseMap< const SCEV *, APInt > &Multiples, ScalarEvolution &SE)
static cl::opt< unsigned > MaxSCEVOperationsImplicationDepth("scalar-evolution-max-scev-operations-implication-depth", cl::Hidden, cl::desc("Maximum depth of recursive SCEV operations implication analysis"), cl::init(2))
static void PushDefUseChildren(Instruction *I, SmallVectorImpl< Instruction * > &Worklist, SmallPtrSetImpl< Instruction * > &Visited)
Push users of the given Instruction onto the given Worklist.
static std::optional< APInt > SolveQuadraticAddRecRange(const SCEVAddRecExpr *AddRec, const ConstantRange &Range, ScalarEvolution &SE)
Let c(n) be the value of the quadratic chrec {0,+,M,+,N} after n iterations.
static cl::opt< bool > UseContextForNoWrapFlagInference("scalar-evolution-use-context-for-no-wrap-flag-strenghening", cl::Hidden, cl::desc("Infer nuw/nsw flags using context where suitable"), cl::init(true))
static cl::opt< bool > EnableFiniteLoopControl("scalar-evolution-finite-loop", cl::Hidden, cl::desc("Handle <= and >= in finite loops"), cl::init(true))
static std::optional< std::tuple< APInt, APInt, APInt, APInt, unsigned > > GetQuadraticEquation(const SCEVAddRecExpr *AddRec)
For a given quadratic addrec, generate coefficients of the corresponding quadratic equation,...
static bool isKnownPredicateExtendIdiom(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
static std::optional< BinaryOp > MatchBinaryOp(Value *V, const DataLayout &DL, AssumptionCache &AC, const DominatorTree &DT, const Instruction *CxtI)
Try to map V into a BinaryOp, and return std::nullopt on failure.
static std::optional< APInt > SolveQuadraticAddRecExact(const SCEVAddRecExpr *AddRec, ScalarEvolution &SE)
Let c(n) be the value of the quadratic chrec {L,+,M,+,N} after n iterations.
static std::optional< APInt > TruncIfPossible(std::optional< APInt > X, unsigned BitWidth)
Helper function to truncate an optional APInt to a given BitWidth.
static cl::opt< unsigned > MaxSCEVCompareDepth("scalar-evolution-max-scev-compare-depth", cl::Hidden, cl::desc("Maximum depth of recursive SCEV complexity comparisons"), cl::init(32))
static APInt extractConstantWithoutWrapping(ScalarEvolution &SE, const SCEVConstant *ConstantTerm, const SCEVAddExpr *WholeAddExpr)
static cl::opt< unsigned > MaxConstantEvolvingDepth("scalar-evolution-max-constant-evolving-depth", cl::Hidden, cl::desc("Maximum depth of recursive constant evolving"), cl::init(32))
static ConstantRange getRangeForAffineARHelper(APInt Step, const ConstantRange &StartRange, const APInt &MaxBECount, bool Signed)
static std::optional< ConstantRange > GetRangeFromMetadata(Value *V)
Helper method to assign a range to V from metadata present in the IR.
static cl::opt< unsigned > HugeExprThreshold("scalar-evolution-huge-expr-threshold", cl::Hidden, cl::desc("Size of the expression which is considered huge"), cl::init(4096))
static SCEV::NoWrapFlags StrengthenNoWrapFlags(ScalarEvolution *SE, SCEVTypes Type, ArrayRef< const SCEV * > Ops, SCEV::NoWrapFlags Flags)
static Type * isSimpleCastedPHI(const SCEV *Op, const SCEVUnknown *SymbolicPHI, bool &Signed, ScalarEvolution &SE)
Helper function to createAddRecFromPHIWithCasts.
static Constant * EvaluateExpression(Value *V, const Loop *L, DenseMap< Instruction *, Constant * > &Vals, const DataLayout &DL, const TargetLibraryInfo *TLI)
EvaluateExpression - Given an expression that passes the getConstantEvolvingPHI predicate,...
static const SCEV * getPreviousSCEVDivisibleByDivisor(const SCEV *Expr, const APInt &DivisorVal, ScalarEvolution &SE)
static const SCEV * MatchNotExpr(const SCEV *Expr)
If Expr computes ~A, return A else return nullptr.
static cl::opt< unsigned > MaxValueCompareDepth("scalar-evolution-max-value-compare-depth", cl::Hidden, cl::desc("Maximum depth of recursive value complexity comparisons"), cl::init(2))
static const SCEV * applyDivisibilityOnMinMaxExpr(const SCEV *MinMaxExpr, APInt Divisor, ScalarEvolution &SE)
static cl::opt< bool, true > VerifySCEVOpt("verify-scev", cl::Hidden, cl::location(VerifySCEV), cl::desc("Verify ScalarEvolution's backedge taken counts (slow)"))
static const SCEV * getSignedOverflowLimitForStep(const SCEV *Step, ICmpInst::Predicate *Pred, ScalarEvolution *SE)
static cl::opt< unsigned > MaxArithDepth("scalar-evolution-max-arith-depth", cl::Hidden, cl::desc("Maximum depth of recursive arithmetics"), cl::init(32))
static bool HasSameValue(const SCEV *A, const SCEV *B)
SCEV structural equivalence is usually sufficient for testing whether two expressions are equal,...
static uint64_t Choose(uint64_t n, uint64_t k, bool &Overflow)
Compute the result of "n choose k", the binomial coefficient.
static std::optional< int > CompareSCEVComplexity(const LoopInfo *const LI, const SCEV *LHS, const SCEV *RHS, DominatorTree &DT, unsigned Depth=0)
static bool CollectAddOperandsWithScales(SmallDenseMap< const SCEV *, APInt, 16 > &M, SmallVectorImpl< const SCEV * > &NewOps, APInt &AccumulatedConstant, ArrayRef< const SCEV * > Ops, const APInt &Scale, ScalarEvolution &SE)
Process the given Ops list, which is a list of operands to be added under the given scale,...
static bool canConstantEvolve(Instruction *I, const Loop *L)
Determine whether this instruction can constant evolve within this loop assuming its operands can all...
static PHINode * getConstantEvolvingPHIOperands(Instruction *UseInst, const Loop *L, DenseMap< Instruction *, PHINode * > &PHIMap, unsigned Depth)
getConstantEvolvingPHIOperands - Implement getConstantEvolvingPHI by recursing through each instructi...
static bool scevUnconditionallyPropagatesPoisonFromOperands(SCEVTypes Kind)
static cl::opt< bool > VerifySCEVStrict("verify-scev-strict", cl::Hidden, cl::desc("Enable stricter verification with -verify-scev is passed"))
static Constant * getOtherIncomingValue(PHINode *PN, BasicBlock *BB)
static cl::opt< bool > UseExpensiveRangeSharpening("scalar-evolution-use-expensive-range-sharpening", cl::Hidden, cl::init(false), cl::desc("Use more powerful methods of sharpening expression ranges. May " "be costly in terms of compile time"))
static const SCEV * getUnsignedOverflowLimitForStep(const SCEV *Step, ICmpInst::Predicate *Pred, ScalarEvolution *SE)
static bool IsKnownPredicateViaMinOrMax(ScalarEvolution &SE, CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
Is LHS Pred RHS true on the virtue of LHS or RHS being a Min or Max expression?
This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...
static bool InBlock(const Value *V, const BasicBlock *BB)
Provides some synthesis utilities to produce sequences of values.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static SymbolRef::Type getType(const Symbol *Sym)
LocallyHashedType DenseMapInfo< LocallyHashedType >::Empty
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
static std::optional< bool > isImpliedCondOperands(CmpInst::Predicate Pred, const Value *ALHS, const Value *ARHS, const Value *BLHS, const Value *BRHS)
Return true if "icmp Pred BLHS BRHS" is true whenever "icmp PredALHS ARHS" is true.
Virtual Register Rewriter
static const uint32_t IV[8]
Class for arbitrary precision integers.
LLVM_ABI APInt umul_ov(const APInt &RHS, bool &Overflow) const
LLVM_ABI APInt zext(unsigned width) const
Zero extend to a new width.
bool isMinSignedValue() const
Determine if this is the smallest signed value.
uint64_t getZExtValue() const
Get zero extended value.
void setHighBits(unsigned hiBits)
Set the top hiBits bits.
LLVM_ABI APInt getHiBits(unsigned numBits) const
Compute an APInt containing numBits highbits from this APInt.
unsigned getActiveBits() const
Compute the number of active bits in the value.
LLVM_ABI APInt trunc(unsigned width) const
Truncate to new width.
static APInt getMaxValue(unsigned numBits)
Gets maximum unsigned value of APInt for specific bit width.
APInt abs() const
Get the absolute value.
bool sgt(const APInt &RHS) const
Signed greater than comparison.
bool isAllOnes() const
Determine if all bits are set. This is true for zero-width values.
bool ugt(const APInt &RHS) const
Unsigned greater than comparison.
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
bool isSignMask() const
Check if the APInt's value is returned by getSignMask.
LLVM_ABI APInt urem(const APInt &RHS) const
Unsigned remainder operation.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool ult(const APInt &RHS) const
Unsigned less than comparison.
static APInt getSignedMaxValue(unsigned numBits)
Gets maximum signed value of APInt for a specific bit width.
static APInt getMinValue(unsigned numBits)
Gets minimum unsigned value of APInt for a specific bit width.
bool isNegative() const
Determine sign of this APInt.
bool sle(const APInt &RHS) const
Signed less or equal comparison.
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
bool isNonPositive() const
Determine if this APInt Value is non-positive (<= 0).
unsigned countTrailingZeros() const
bool isStrictlyPositive() const
Determine if this APInt Value is positive.
unsigned logBase2() const
APInt ashr(unsigned ShiftAmt) const
Arithmetic right-shift function.
LLVM_ABI APInt multiplicativeInverse() const
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
LLVM_ABI APInt sext(unsigned width) const
Sign extend to a new width.
APInt shl(unsigned shiftAmt) const
Left-shift function.
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
bool isSignBitSet() const
Determine if sign bit of this APInt is set.
bool slt(const APInt &RHS) const
Signed less than comparison.
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
bool isIntN(unsigned N) const
Check if this APInt has an N-bits unsigned integer value.
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
This templated class represents "all analyses that operate over <aparticular IR unit>" (e....
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Represent the analysis usage information of a pass.
void setPreservesAll()
Set by analyses that do not transform their input at all.
AnalysisUsage & addRequiredTransitive()
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
A function analysis which provides an AssumptionCache.
An immutable pass that tracks lazily created AssumptionCache objects.
A cache of @llvm.assume calls within a function.
MutableArrayRef< WeakVH > assumptions()
Access the list of assumption handles currently tracked for this function.
LLVM_ABI bool isSingleEdge() const
Check if this is the only edge between Start and End.
LLVM Basic Block Representation.
iterator begin()
Instruction iterator methods.
const Function * getParent() const
Return the enclosing method, or null if none.
LLVM_ABI const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
const Instruction & front() const
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
LLVM_ABI unsigned getNoWrapKind() const
Returns one of OBO::NoSignedWrap or OBO::NoUnsignedWrap.
LLVM_ABI Instruction::BinaryOps getBinaryOp() const
Returns the binary operation underlying the intrinsic.
BinaryOps getOpcode() const
Conditional or Unconditional Branch instruction.
bool isConditional() const
BasicBlock * getSuccessor(unsigned i) const
bool isUnconditional() const
Value * getCondition() const
This class represents a function call, abstracting a target machine's calling convention.
virtual void deleted()
Callback for Value destruction.
bool isFalseWhenEqual() const
This is just a convenience.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ ICMP_UGE
unsigned greater or equal
@ ICMP_UGT
unsigned greater than
@ ICMP_SGT
signed greater than
@ ICMP_ULT
unsigned less than
@ ICMP_SGE
signed greater or equal
@ ICMP_ULE
unsigned less or equal
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
bool isTrueWhenEqual() const
This is just a convenience.
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
bool isRelational() const
Return true if the predicate is relational (not EQ or NE).
An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...
static LLVM_ABI std::optional< CmpPredicate > getMatching(CmpPredicate A, CmpPredicate B)
Compares two CmpPredicates taking samesign into account and returns the canonicalized CmpPredicate if...
LLVM_ABI CmpInst::Predicate getPreferredSignedPredicate() const
Attempts to return a signed CmpInst::Predicate from the CmpPredicate.
CmpInst::Predicate dropSameSign() const
Drops samesign information.
static LLVM_ABI Constant * getNot(Constant *C)
static LLVM_ABI Constant * getPtrToInt(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static Constant * getGetElementPtr(Type *Ty, Constant *C, ArrayRef< Constant * > IdxList, GEPNoWrapFlags NW=GEPNoWrapFlags::none(), std::optional< ConstantRange > InRange=std::nullopt, Type *OnlyIfReducedTy=nullptr)
Getelementptr form.
static LLVM_ABI Constant * getAdd(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
static LLVM_ABI Constant * getNeg(Constant *C, bool HasNSW=false)
static LLVM_ABI Constant * getTrunc(Constant *C, Type *Ty, bool OnlyIfReduced=false)
This is the shared class of boolean and integer constants.
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
static LLVM_ABI ConstantInt * getFalse(LLVMContext &Context)
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
const APInt & getValue() const
Return the constant as an APInt value reference.
static LLVM_ABI ConstantInt * getBool(LLVMContext &Context, bool V)
This class represents a range of values.
LLVM_ABI ConstantRange add(const ConstantRange &Other) const
Return a new range representing the possible values resulting from an addition of a value in this ran...
LLVM_ABI ConstantRange zextOrTrunc(uint32_t BitWidth) const
Make this range have the bit width given by BitWidth.
PreferredRangeType
If represented precisely, the result of some range operations may consist of multiple disjoint ranges...
LLVM_ABI bool getEquivalentICmp(CmpInst::Predicate &Pred, APInt &RHS) const
Set up Pred and RHS such that ConstantRange::makeExactICmpRegion(Pred, RHS) == *this.
const APInt & getLower() const
Return the lower value for this range.
LLVM_ABI bool isFullSet() const
Return true if this set contains all of the elements possible for this data-type.
LLVM_ABI bool icmp(CmpInst::Predicate Pred, const ConstantRange &Other) const
Does the predicate Pred hold between ranges this and Other?
LLVM_ABI bool isEmptySet() const
Return true if this set contains no members.
LLVM_ABI ConstantRange zeroExtend(uint32_t BitWidth) const
Return a new range in the specified integer type, which must be strictly larger than the current type...
LLVM_ABI bool isSignWrappedSet() const
Return true if this set wraps around the signed domain.
LLVM_ABI APInt getSignedMin() const
Return the smallest signed value contained in the ConstantRange.
LLVM_ABI bool isWrappedSet() const
Return true if this set wraps around the unsigned domain.
LLVM_ABI void print(raw_ostream &OS) const
Print out the bounds to a stream.
LLVM_ABI ConstantRange truncate(uint32_t BitWidth, unsigned NoWrapKind=0) const
Return a new range in the specified integer type, which must be strictly smaller than the current typ...
LLVM_ABI ConstantRange signExtend(uint32_t BitWidth) const
Return a new range in the specified integer type, which must be strictly larger than the current type...
const APInt & getUpper() const
Return the upper value for this range.
LLVM_ABI ConstantRange unionWith(const ConstantRange &CR, PreferredRangeType Type=Smallest) const
Return the range that results from the union of this range with another range.
static LLVM_ABI ConstantRange makeExactICmpRegion(CmpInst::Predicate Pred, const APInt &Other)
Produce the exact range such that all values in the returned range satisfy the given predicate with a...
LLVM_ABI bool contains(const APInt &Val) const
Return true if the specified value is in the set.
LLVM_ABI APInt getUnsignedMax() const
Return the largest unsigned value contained in the ConstantRange.
LLVM_ABI ConstantRange intersectWith(const ConstantRange &CR, PreferredRangeType Type=Smallest) const
Return the range that results from the intersection of this range with another range.
LLVM_ABI APInt getSignedMax() const
Return the largest signed value contained in the ConstantRange.
static ConstantRange getNonEmpty(APInt Lower, APInt Upper)
Create non-empty constant range with the given bounds.
static LLVM_ABI ConstantRange makeGuaranteedNoWrapRegion(Instruction::BinaryOps BinOp, const ConstantRange &Other, unsigned NoWrapKind)
Produce the largest range containing all X such that "X BinOp Y" is guaranteed not to wrap (overflow)...
LLVM_ABI unsigned getMinSignedBits() const
Compute the maximal number of bits needed to represent every value in this signed range.
uint32_t getBitWidth() const
Get the bit width of this ConstantRange.
LLVM_ABI ConstantRange sub(const ConstantRange &Other) const
Return a new range representing the possible values resulting from a subtraction of a value in this r...
LLVM_ABI ConstantRange sextOrTrunc(uint32_t BitWidth) const
Make this range have the bit width given by BitWidth.
static LLVM_ABI ConstantRange makeExactNoWrapRegion(Instruction::BinaryOps BinOp, const APInt &Other, unsigned NoWrapKind)
Produce the range that contains X if and only if "X BinOp Other" does not wrap.
This is an important base class in LLVM.
A parsed version of the target data layout string in and methods for querying it.
LLVM_ABI const StructLayout * getStructLayout(StructType *Ty) const
Returns a StructLayout object, indicating the alignment of the struct, its size, and the offsets of i...
LLVM_ABI IntegerType * getIntPtrType(LLVMContext &C, unsigned AddressSpace=0) const
Returns an integer type with size at least as big as that of a pointer in the given address space.
LLVM_ABI unsigned getIndexTypeSizeInBits(Type *Ty) const
The size in bits of the index used in GEP calculation for this type.
LLVM_ABI IntegerType * getIndexType(LLVMContext &C, unsigned AddressSpace) const
Returns the type of a GEP index in AddressSpace.
TypeSize getTypeSizeInBits(Type *Ty) const
Size examples:
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
iterator find(const_arg_type_t< KeyT > Val)
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&...Args)
DenseMapIterator< KeyT, ValueT, KeyInfoT, BucketT > iterator
iterator find_as(const LookupKeyT &Val)
Alternate version of find() which allows a different, and possibly less expensive,...
size_type count(const_arg_type_t< KeyT > Val) const
Return 1 if the specified key is in the map, 0 otherwise.
bool contains(const_arg_type_t< KeyT > Val) const
Return true if the specified key is in the map, false otherwise.
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Analysis pass which computes a DominatorTree.
Legacy analysis pass which computes a DominatorTree.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
LLVM_ABI bool isReachableFromEntry(const Use &U) const
Provide an overload for a Use.
LLVM_ABI bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
FoldingSetNodeIDRef - This class describes a reference to an interned FoldingSetNodeID,...
FoldingSetNodeID - This class is used to gather all the unique data bits of a node.
Represents flags for the getelementptr instruction/expression.
bool hasNoUnsignedSignedWrap() const
bool hasNoUnsignedWrap() const
static GEPNoWrapFlags none()
static LLVM_ABI Type * getTypeAtIndex(Type *Ty, Value *Idx)
Return the type of the element at the given index of an indexable type.
Module * getParent()
Get the module that this global value is contained inside of...
static bool isPrivateLinkage(LinkageTypes Linkage)
static bool isInternalLinkage(LinkageTypes Linkage)
This instruction compares its operands according to the predicate given to the constructor.
CmpPredicate getCmpPredicate() const
static bool isGE(Predicate P)
Return true if the predicate is SGE or UGE.
CmpPredicate getSwappedCmpPredicate() const
static LLVM_ABI bool compare(const APInt &LHS, const APInt &RHS, ICmpInst::Predicate Pred)
Return result of LHS Pred RHS comparison.
static bool isLT(Predicate P)
Return true if the predicate is SLT or ULT.
CmpPredicate getInverseCmpPredicate() const
Predicate getNonStrictCmpPredicate() const
For example, SGT -> SGE, SLT -> SLE, ULT -> ULE, UGT -> UGE.
static bool isGT(Predicate P)
Return true if the predicate is SGT or UGT.
Predicate getFlippedSignednessPredicate() const
For example, SLT->ULT, ULT->SLT, SLE->ULE, ULE->SLE, EQ->EQ.
static CmpPredicate getInverseCmpPredicate(CmpPredicate Pred)
static bool isEquality(Predicate P)
Return true if this predicate is either EQ or NE.
bool isRelational() const
Return true if the predicate is relational (not EQ or NE).
static bool isLE(Predicate P)
Return true if the predicate is SLE or ULE.
LLVM_ABI bool hasNoUnsignedWrap() const LLVM_READONLY
Determine whether the no unsigned wrap flag is set.
LLVM_ABI bool hasNoSignedWrap() const LLVM_READONLY
Determine whether the no signed wrap flag is set.
LLVM_ABI bool isIdenticalToWhenDefined(const Instruction *I, bool IntersectAttrs=false) const LLVM_READONLY
This is like isIdenticalTo, except that it ignores the SubclassOptionalData flags,...
Class to represent integer types.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
An instruction for reading from memory.
Analysis pass that exposes the LoopInfo for a function.
bool contains(const LoopT *L) const
Return true if the specified loop is contained within in this loop.
BlockT * getHeader() const
unsigned getLoopDepth() const
Return the nesting level of this loop.
BlockT * getLoopPredecessor() const
If the given loop's header has exactly one unique predecessor outside the loop, return it.
LoopT * getParentLoop() const
Return the parent loop if it exists or nullptr for top level loops.
unsigned getLoopDepth(const BlockT *BB) const
Return the loop nesting level of the specified block.
LoopT * getLoopFor(const BlockT *BB) const
Return the inner most loop that BB lives in.
The legacy pass manager's analysis pass to compute loop information.
Represents a single loop in the control flow graph.
bool isLoopInvariant(const Value *V) const
Return true if the specified value is loop invariant.
A Module instance is used to store all the information related to an LLVM module.
unsigned getOpcode() const
Return the opcode for this Instruction or ConstantExpr.
Utility class for integer operators which may exhibit overflow - Add, Sub, Mul, and Shl.
bool hasNoSignedWrap() const
Test whether this operation is known to never undergo signed overflow, aka the nsw property.
bool hasNoUnsignedWrap() const
Test whether this operation is known to never undergo unsigned overflow, aka the nuw property.
iterator_range< const_block_iterator > blocks() const
op_range incoming_values()
Value * getIncomingValueForBlock(const BasicBlock *BB) const
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
AnalysisType & getAnalysis() const
getAnalysis<AnalysisType>() - This function is used by subclasses to get to the analysis information ...
PointerIntPair - This class implements a pair of a pointer and small integer.
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the default address space (address sp...
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
LLVM_ABI void addPredicate(const SCEVPredicate &Pred)
Adds a new predicate.
LLVM_ABI const SCEVPredicate & getPredicate() const
LLVM_ABI bool hasNoOverflow(Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags)
Returns true if we've proved that V doesn't wrap by means of a SCEV predicate.
LLVM_ABI void setNoOverflow(Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags)
Proves that V doesn't overflow by adding SCEV predicate.
LLVM_ABI void print(raw_ostream &OS, unsigned Depth) const
Print the SCEV mappings done by the Predicated Scalar Evolution.
LLVM_ABI bool areAddRecsEqualWithPreds(const SCEVAddRecExpr *AR1, const SCEVAddRecExpr *AR2) const
Check if AR1 and AR2 are equal, while taking into account Equal predicates in Preds.
LLVM_ABI PredicatedScalarEvolution(ScalarEvolution &SE, Loop &L)
LLVM_ABI const SCEVAddRecExpr * getAsAddRec(Value *V)
Attempts to produce an AddRecExpr for V by adding additional SCEV predicates.
LLVM_ABI unsigned getSmallConstantMaxTripCount()
Returns the upper bound of the loop trip count as a normal unsigned value, or 0 if the trip count is ...
LLVM_ABI const SCEV * getBackedgeTakenCount()
Get the (predicated) backedge count for the analyzed loop.
LLVM_ABI const SCEV * getSymbolicMaxBackedgeTakenCount()
Get the (predicated) symbolic max backedge count for the analyzed loop.
LLVM_ABI const SCEV * getSCEV(Value *V)
Returns the SCEV expression of V, in the context of the current SCEV predicate.
A set of analyses that are preserved following a run of a transformation pass.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
PreservedAnalysisChecker getChecker() const
Build a checker for this PreservedAnalyses and the specified analysis type.
constexpr bool isValid() const
This node represents an addition of some number of SCEVs.
This node represents a polynomial recurrence on the trip count of the specified loop.
friend class ScalarEvolution
const SCEV * getStart() const
LLVM_ABI const SCEV * evaluateAtIteration(const SCEV *It, ScalarEvolution &SE) const
Return the value of this chain of recurrences at the specified iteration number.
const SCEV * getStepRecurrence(ScalarEvolution &SE) const
Constructs and returns the recurrence indicating how much this expression steps by.
void setNoWrapFlags(NoWrapFlags Flags)
Set flags for a recurrence without clearing any previously set flags.
bool isAffine() const
Return true if this represents an expression A + B*x where A and B are loop invariant values.
bool isQuadratic() const
Return true if this represents an expression A + B*x + C*x^2 where A, B and C are loop invariant valu...
LLVM_ABI const SCEV * getNumIterationsInRange(const ConstantRange &Range, ScalarEvolution &SE) const
Return the number of iterations of this loop that produce values in the specified constant range.
LLVM_ABI const SCEVAddRecExpr * getPostIncExpr(ScalarEvolution &SE) const
Return an expression representing the value of this expression one iteration of the loop ahead.
const Loop * getLoop() const
This is the base class for unary cast operator classes.
const SCEV * getOperand() const
LLVM_ABI SCEVCastExpr(const FoldingSetNodeIDRef ID, SCEVTypes SCEVTy, const SCEV *op, Type *ty)
void setNoWrapFlags(NoWrapFlags Flags)
Set flags for a non-recurrence without clearing previously set flags.
This class represents an assumption that the expression LHS Pred RHS evaluates to true,...
SCEVComparePredicate(const FoldingSetNodeIDRef ID, const ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS)
bool isAlwaysTrue() const override
Returns true if the predicate is always true.
void print(raw_ostream &OS, unsigned Depth=0) const override
Prints a textual representation of this predicate with an indentation of Depth.
bool implies(const SCEVPredicate *N, ScalarEvolution &SE) const override
Implementation of the SCEVPredicate interface.
This class represents a constant integer value.
ConstantInt * getValue() const
const APInt & getAPInt() const
This is the base class for unary integral cast operator classes.
LLVM_ABI SCEVIntegralCastExpr(const FoldingSetNodeIDRef ID, SCEVTypes SCEVTy, const SCEV *op, Type *ty)
This node is the base class min/max selections.
static enum SCEVTypes negate(enum SCEVTypes T)
This node represents multiplication of some number of SCEVs.
This node is a base class providing common functionality for n'ary operators.
bool hasNoUnsignedWrap() const
bool hasNoSelfWrap() const
size_t getNumOperands() const
bool hasNoSignedWrap() const
NoWrapFlags getNoWrapFlags(NoWrapFlags Mask=NoWrapMask) const
const SCEV * getOperand(unsigned i) const
const SCEV *const * Operands
ArrayRef< const SCEV * > operands() const
This class represents an assumption made using SCEV expressions which can be checked at run-time.
SCEVPredicate(const SCEVPredicate &)=default
virtual bool implies(const SCEVPredicate *N, ScalarEvolution &SE) const =0
Returns true if this predicate implies N.
This class represents a cast from a pointer to a pointer-sized integer value.
This visitor recursively visits a SCEV expression and re-writes it.
const SCEV * visitSignExtendExpr(const SCEVSignExtendExpr *Expr)
const SCEV * visit(const SCEV *S)
const SCEV * visitZeroExtendExpr(const SCEVZeroExtendExpr *Expr)
const SCEV * visitSMinExpr(const SCEVSMinExpr *Expr)
const SCEV * visitUMinExpr(const SCEVUMinExpr *Expr)
This class represents a signed minimum selection.
This node is the base class for sequential/in-order min/max selections.
static SCEVTypes getEquivalentNonSequentialSCEVType(SCEVTypes Ty)
This class represents a sign extension of a small integer value to a larger integer value.
Visit all nodes in the expression tree using worklist traversal.
This class represents a truncation of an integer value to a smaller integer value.
This class represents a binary unsigned division operation.
const SCEV * getLHS() const
const SCEV * getRHS() const
This class represents an unsigned minimum selection.
This class represents a composition of other SCEV predicates, and is the class that most clients will...
void print(raw_ostream &OS, unsigned Depth) const override
Prints a textual representation of this predicate with an indentation of Depth.
bool implies(const SCEVPredicate *N, ScalarEvolution &SE) const override
Returns true if this predicate implies N.
SCEVUnionPredicate(ArrayRef< const SCEVPredicate * > Preds, ScalarEvolution &SE)
Union predicates don't get cached so create a dummy set ID for it.
bool isAlwaysTrue() const override
Implementation of the SCEVPredicate interface.
This means that we are dealing with an entirely unknown SCEV value, and only represent it as its LLVM...
This class represents the value of vscale, as used when defining the length of a scalable vector or r...
This class represents an assumption made on an AddRec expression.
IncrementWrapFlags
Similar to SCEV::NoWrapFlags, but with slightly different semantics for FlagNUSW.
SCEVWrapPredicate(const FoldingSetNodeIDRef ID, const SCEVAddRecExpr *AR, IncrementWrapFlags Flags)
bool implies(const SCEVPredicate *N, ScalarEvolution &SE) const override
Returns true if this predicate implies N.
static SCEVWrapPredicate::IncrementWrapFlags setFlags(SCEVWrapPredicate::IncrementWrapFlags Flags, SCEVWrapPredicate::IncrementWrapFlags OnFlags)
void print(raw_ostream &OS, unsigned Depth=0) const override
Prints a textual representation of this predicate with an indentation of Depth.
bool isAlwaysTrue() const override
Returns true if the predicate is always true.
const SCEVAddRecExpr * getExpr() const
Implementation of the SCEVPredicate interface.
static SCEVWrapPredicate::IncrementWrapFlags clearFlags(SCEVWrapPredicate::IncrementWrapFlags Flags, SCEVWrapPredicate::IncrementWrapFlags OffFlags)
Convenient IncrementWrapFlags manipulation methods.
static SCEVWrapPredicate::IncrementWrapFlags getImpliedFlags(const SCEVAddRecExpr *AR, ScalarEvolution &SE)
Returns the set of SCEVWrapPredicate no wrap flags implied by a SCEVAddRecExpr.
IncrementWrapFlags getFlags() const
Returns the set assumed no overflow flags.
This class represents a zero extension of a small integer value to a larger integer value.
This class represents an analyzed expression in the program.
LLVM_ABI ArrayRef< const SCEV * > operands() const
Return operands of this SCEV expression.
unsigned short getExpressionSize() const
LLVM_ABI bool isOne() const
Return true if the expression is a constant one.
LLVM_ABI bool isZero() const
Return true if the expression is a constant zero.
LLVM_ABI void dump() const
This method is used for debugging.
LLVM_ABI bool isAllOnesValue() const
Return true if the expression is a constant all-ones value.
LLVM_ABI bool isNonConstantNegative() const
Return true if the specified scev is negated, but not a constant.
LLVM_ABI void print(raw_ostream &OS) const
Print out the internal representation of this scalar to the specified stream.
SCEV(const FoldingSetNodeIDRef ID, SCEVTypes SCEVTy, unsigned short ExpressionSize)
SCEVTypes getSCEVType() const
LLVM_ABI Type * getType() const
Return the LLVM type of this SCEV expression.
NoWrapFlags
NoWrapFlags are bitfield indices into SubclassData.
Analysis pass that exposes the ScalarEvolution for a function.
LLVM_ABI ScalarEvolution run(Function &F, FunctionAnalysisManager &AM)
LLVM_ABI PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
LLVM_ABI PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
void print(raw_ostream &OS, const Module *=nullptr) const override
print - Print out the internal state of the pass.
bool runOnFunction(Function &F) override
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass.
void releaseMemory() override
releaseMemory() - This member can be implemented by a pass if it wants to be able to release its memo...
void verifyAnalysis() const override
verifyAnalysis() - This member can be implemented by a analysis pass to check state of analysis infor...
ScalarEvolutionWrapperPass()
static LLVM_ABI LoopGuards collect(const Loop *L, ScalarEvolution &SE)
Collect rewrite map for loop guards for loop L, together with flags indicating if NUW and NSW can be ...
LLVM_ABI const SCEV * rewrite(const SCEV *Expr) const
Try to apply the collected loop guards to Expr.
The main scalar evolution driver.
const SCEV * getConstantMaxBackedgeTakenCount(const Loop *L)
When successful, this returns a SCEVConstant that is greater than or equal to (i.e.
static bool hasFlags(SCEV::NoWrapFlags Flags, SCEV::NoWrapFlags TestFlags)
const DataLayout & getDataLayout() const
Return the DataLayout associated with the module this SCEV instance is operating on.
LLVM_ABI bool isKnownNonNegative(const SCEV *S)
Test if the given expression is known to be non-negative.
LLVM_ABI bool isKnownOnEveryIteration(CmpPredicate Pred, const SCEVAddRecExpr *LHS, const SCEV *RHS)
Test if the condition described by Pred, LHS, RHS is known to be true on every iteration of the loop ...
LLVM_ABI const SCEV * getNegativeSCEV(const SCEV *V, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap)
Return the SCEV object corresponding to -V.
LLVM_ABI std::optional< LoopInvariantPredicate > getLoopInvariantExitCondDuringFirstIterationsImpl(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS, const Loop *L, const Instruction *CtxI, const SCEV *MaxIter)
LLVM_ABI const SCEV * getSMaxExpr(const SCEV *LHS, const SCEV *RHS)
LLVM_ABI const SCEV * getUDivCeilSCEV(const SCEV *N, const SCEV *D)
Compute ceil(N / D).
LLVM_ABI std::optional< LoopInvariantPredicate > getLoopInvariantExitCondDuringFirstIterations(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS, const Loop *L, const Instruction *CtxI, const SCEV *MaxIter)
If the result of the predicate LHS Pred RHS is loop invariant with respect to L at given Context duri...
LLVM_ABI Type * getWiderType(Type *Ty1, Type *Ty2) const
LLVM_ABI const SCEV * getAbsExpr(const SCEV *Op, bool IsNSW)
LLVM_ABI bool isKnownNonPositive(const SCEV *S)
Test if the given expression is known to be non-positive.
LLVM_ABI const SCEV * getURemExpr(const SCEV *LHS, const SCEV *RHS)
Represents an unsigned remainder expression based on unsigned division.
LLVM_ABI bool isKnownNegative(const SCEV *S)
Test if the given expression is known to be negative.
LLVM_ABI const SCEV * getPredicatedConstantMaxBackedgeTakenCount(const Loop *L, SmallVectorImpl< const SCEVPredicate * > &Predicates)
Similar to getConstantMaxBackedgeTakenCount, except it will add a set of SCEV predicates to Predicate...
LLVM_ABI const SCEV * removePointerBase(const SCEV *S)
Compute an expression equivalent to S - getPointerBase(S).
LLVM_ABI bool isLoopEntryGuardedByCond(const Loop *L, CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
Test whether entry to the loop is protected by a conditional between LHS and RHS.
LLVM_ABI bool isKnownNonZero(const SCEV *S)
Test if the given expression is known to be non-zero.
LLVM_ABI const SCEV * getSCEVAtScope(const SCEV *S, const Loop *L)
Return a SCEV expression for the specified value at the specified scope in the program.
LLVM_ABI const SCEV * getSMinExpr(const SCEV *LHS, const SCEV *RHS)
LLVM_ABI const SCEV * getBackedgeTakenCount(const Loop *L, ExitCountKind Kind=Exact)
If the specified loop has a predictable backedge-taken count, return it, otherwise return a SCEVCould...
LLVM_ABI const SCEV * getUMaxExpr(const SCEV *LHS, const SCEV *RHS)
LLVM_ABI void setNoWrapFlags(SCEVAddRecExpr *AddRec, SCEV::NoWrapFlags Flags)
Update no-wrap flags of an AddRec.
LLVM_ABI const SCEV * getUMaxFromMismatchedTypes(const SCEV *LHS, const SCEV *RHS)
Promote the operands to the wider of the types using zero-extension, and then perform a umax operatio...
const SCEV * getZero(Type *Ty)
Return a SCEV for the constant 0 of a specific type.
LLVM_ABI bool willNotOverflow(Instruction::BinaryOps BinOp, bool Signed, const SCEV *LHS, const SCEV *RHS, const Instruction *CtxI=nullptr)
Is operation BinOp between LHS and RHS provably does not have a signed/unsigned overflow (Signed)?
LLVM_ABI ExitLimit computeExitLimitFromCond(const Loop *L, Value *ExitCond, bool ExitIfTrue, bool ControlsOnlyExit, bool AllowPredicates=false)
Compute the number of times the backedge of the specified loop will execute if its exit condition wer...
LLVM_ABI const SCEV * getZeroExtendExprImpl(const SCEV *Op, Type *Ty, unsigned Depth=0)
LLVM_ABI const SCEVPredicate * getEqualPredicate(const SCEV *LHS, const SCEV *RHS)
LLVM_ABI unsigned getSmallConstantTripMultiple(const Loop *L, const SCEV *ExitCount)
Returns the largest constant divisor of the trip count as a normal unsigned value,...
LLVM_ABI uint64_t getTypeSizeInBits(Type *Ty) const
Return the size in bits of the specified type, for which isSCEVable must return true.
LLVM_ABI const SCEV * getConstant(ConstantInt *V)
LLVM_ABI const SCEV * getPredicatedBackedgeTakenCount(const Loop *L, SmallVectorImpl< const SCEVPredicate * > &Predicates)
Similar to getBackedgeTakenCount, except it will add a set of SCEV predicates to Predicates that are ...
LLVM_ABI const SCEV * getSCEV(Value *V)
Return a SCEV expression for the full generality of the specified expression.
ConstantRange getSignedRange(const SCEV *S)
Determine the signed range for a particular SCEV.
LLVM_ABI const SCEV * getNoopOrSignExtend(const SCEV *V, Type *Ty)
Return a SCEV corresponding to a conversion of the input value to the specified type.
bool loopHasNoAbnormalExits(const Loop *L)
Return true if the loop has no abnormal exits.
LLVM_ABI const SCEV * getTripCountFromExitCount(const SCEV *ExitCount)
A version of getTripCountFromExitCount below which always picks an evaluation type which can not resu...
LLVM_ABI ScalarEvolution(Function &F, TargetLibraryInfo &TLI, AssumptionCache &AC, DominatorTree &DT, LoopInfo &LI)
const SCEV * getOne(Type *Ty)
Return a SCEV for the constant 1 of a specific type.
LLVM_ABI const SCEV * getTruncateOrNoop(const SCEV *V, Type *Ty)
Return a SCEV corresponding to a conversion of the input value to the specified type.
LLVM_ABI const SCEV * getCastExpr(SCEVTypes Kind, const SCEV *Op, Type *Ty)
LLVM_ABI const SCEV * getSequentialMinMaxExpr(SCEVTypes Kind, SmallVectorImpl< const SCEV * > &Operands)
LLVM_ABI const SCEV * getLosslessPtrToIntExpr(const SCEV *Op, unsigned Depth=0)
LLVM_ABI std::optional< bool > evaluatePredicateAt(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS, const Instruction *CtxI)
Check whether the condition described by Pred, LHS, and RHS is true or false in the given Context.
LLVM_ABI unsigned getSmallConstantMaxTripCount(const Loop *L, SmallVectorImpl< const SCEVPredicate * > *Predicates=nullptr)
Returns the upper bound of the loop trip count as a normal unsigned value.
LLVM_ABI const SCEV * getPtrToIntExpr(const SCEV *Op, Type *Ty)
LLVM_ABI bool isBackedgeTakenCountMaxOrZero(const Loop *L)
Return true if the backedge taken count is either the value returned by getConstantMaxBackedgeTakenCo...
LLVM_ABI void forgetLoop(const Loop *L)
This method should be called by the client when it has changed a loop in a way that may effect Scalar...
LLVM_ABI bool isLoopInvariant(const SCEV *S, const Loop *L)
Return true if the value of the given SCEV is unchanging in the specified loop.
LLVM_ABI bool isKnownPositive(const SCEV *S)
Test if the given expression is known to be positive.
APInt getUnsignedRangeMin(const SCEV *S)
Determine the min of the unsigned range for a particular SCEV.
LLVM_ABI bool SimplifyICmpOperands(CmpPredicate &Pred, const SCEV *&LHS, const SCEV *&RHS, unsigned Depth=0)
Simplify LHS and RHS in a comparison with predicate Pred.
LLVM_ABI const SCEV * getOffsetOfExpr(Type *IntTy, StructType *STy, unsigned FieldNo)
Return an expression for offsetof on the given field with type IntTy.
LLVM_ABI LoopDisposition getLoopDisposition(const SCEV *S, const Loop *L)
Return the "disposition" of the given SCEV with respect to the given loop.
LLVM_ABI bool containsAddRecurrence(const SCEV *S)
Return true if the SCEV is a scAddRecExpr or it contains scAddRecExpr.
LLVM_ABI const SCEV * getSignExtendExprImpl(const SCEV *Op, Type *Ty, unsigned Depth=0)
LLVM_ABI const SCEV * getAddRecExpr(const SCEV *Start, const SCEV *Step, const Loop *L, SCEV::NoWrapFlags Flags)
Get an add recurrence expression for the specified loop.
LLVM_ABI bool hasOperand(const SCEV *S, const SCEV *Op) const
Test whether the given SCEV has Op as a direct or indirect operand.
LLVM_ABI const SCEV * getUDivExpr(const SCEV *LHS, const SCEV *RHS)
Get a canonical unsigned division expression, or something simpler if possible.
LLVM_ABI const SCEV * getZeroExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth=0)
LLVM_ABI bool isSCEVable(Type *Ty) const
Test if values of the given type are analyzable within the SCEV framework.
LLVM_ABI Type * getEffectiveSCEVType(Type *Ty) const
Return a type with the same bitwidth as the given type and which represents how SCEV will treat the g...
LLVM_ABI const SCEVPredicate * getComparePredicate(ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS)
LLVM_ABI bool haveSameSign(const SCEV *S1, const SCEV *S2)
Return true if we know that S1 and S2 must have the same sign.
LLVM_ABI const SCEV * getNotSCEV(const SCEV *V)
Return the SCEV object corresponding to ~V.
LLVM_ABI const SCEV * getElementCount(Type *Ty, ElementCount EC, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap)
LLVM_ABI bool instructionCouldExistWithOperands(const SCEV *A, const SCEV *B)
Return true if there exists a point in the program at which both A and B could be operands to the sam...
ConstantRange getUnsignedRange(const SCEV *S)
Determine the unsigned range for a particular SCEV.
LLVM_ABI void print(raw_ostream &OS) const
LLVM_ABI const SCEV * getUMinExpr(const SCEV *LHS, const SCEV *RHS, bool Sequential=false)
LLVM_ABI const SCEV * getPredicatedExitCount(const Loop *L, const BasicBlock *ExitingBlock, SmallVectorImpl< const SCEVPredicate * > *Predicates, ExitCountKind Kind=Exact)
Same as above except this uses the predicated backedge taken info and may require predicates.
static SCEV::NoWrapFlags clearFlags(SCEV::NoWrapFlags Flags, SCEV::NoWrapFlags OffFlags)
LLVM_ABI void forgetTopmostLoop(const Loop *L)
LLVM_ABI void forgetValue(Value *V)
This method should be called by the client when it has changed a value in a way that may effect its v...
APInt getSignedRangeMin(const SCEV *S)
Determine the min of the signed range for a particular SCEV.
LLVM_ABI const SCEV * getNoopOrAnyExtend(const SCEV *V, Type *Ty)
Return a SCEV corresponding to a conversion of the input value to the specified type.
LLVM_ABI void forgetBlockAndLoopDispositions(Value *V=nullptr)
Called when the client has changed the disposition of values in a loop or block.
LLVM_ABI const SCEV * getTruncateExpr(const SCEV *Op, Type *Ty, unsigned Depth=0)
@ MonotonicallyDecreasing
@ MonotonicallyIncreasing
LLVM_ABI std::optional< LoopInvariantPredicate > getLoopInvariantPredicate(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS, const Loop *L, const Instruction *CtxI=nullptr)
If the result of the predicate LHS Pred RHS is loop invariant with respect to L, return a LoopInvaria...
LLVM_ABI const SCEV * getStoreSizeOfExpr(Type *IntTy, Type *StoreTy)
Return an expression for the store size of StoreTy that is type IntTy.
LLVM_ABI const SCEVPredicate * getWrapPredicate(const SCEVAddRecExpr *AR, SCEVWrapPredicate::IncrementWrapFlags AddedFlags)
LLVM_ABI bool isLoopBackedgeGuardedByCond(const Loop *L, CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
Test whether the backedge of the loop is protected by a conditional between LHS and RHS.
LLVM_ABI const SCEV * getMinusSCEV(const SCEV *LHS, const SCEV *RHS, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Return LHS-RHS.
LLVM_ABI APInt getNonZeroConstantMultiple(const SCEV *S)
const SCEV * getMinusOne(Type *Ty)
Return a SCEV for the constant -1 of a specific type.
static SCEV::NoWrapFlags setFlags(SCEV::NoWrapFlags Flags, SCEV::NoWrapFlags OnFlags)
LLVM_ABI bool hasLoopInvariantBackedgeTakenCount(const Loop *L)
Return true if the specified loop has an analyzable loop-invariant backedge-taken count.
LLVM_ABI BlockDisposition getBlockDisposition(const SCEV *S, const BasicBlock *BB)
Return the "disposition" of the given SCEV with respect to the given block.
LLVM_ABI const SCEV * getNoopOrZeroExtend(const SCEV *V, Type *Ty)
Return a SCEV corresponding to a conversion of the input value to the specified type.
LLVM_ABI bool invalidate(Function &F, const PreservedAnalyses &PA, FunctionAnalysisManager::Invalidator &Inv)
LLVM_ABI const SCEV * getUMinFromMismatchedTypes(const SCEV *LHS, const SCEV *RHS, bool Sequential=false)
Promote the operands to the wider of the types using zero-extension, and then perform a umin operatio...
LLVM_ABI bool loopIsFiniteByAssumption(const Loop *L)
Return true if this loop is finite by assumption.
LLVM_ABI const SCEV * getExistingSCEV(Value *V)
Return an existing SCEV for V if there is one, otherwise return nullptr.
LLVM_ABI APInt getConstantMultiple(const SCEV *S, const Instruction *CtxI=nullptr)
Returns the max constant multiple of S.
LoopDisposition
An enum describing the relationship between a SCEV and a loop.
@ LoopComputable
The SCEV varies predictably with the loop.
@ LoopVariant
The SCEV is loop-variant (unknown).
@ LoopInvariant
The SCEV is loop-invariant.
friend class SCEVCallbackVH
LLVM_ABI bool isKnownMultipleOf(const SCEV *S, uint64_t M, SmallVectorImpl< const SCEVPredicate * > &Assumptions)
Check that S is a multiple of M.
LLVM_ABI const SCEV * getAnyExtendExpr(const SCEV *Op, Type *Ty)
getAnyExtendExpr - Return a SCEV for the given operand extended with unspecified bits out to the give...
LLVM_ABI bool isKnownToBeAPowerOfTwo(const SCEV *S, bool OrZero=false, bool OrNegative=false)
Test if the given expression is known to be a power of 2.
LLVM_ABI std::optional< SCEV::NoWrapFlags > getStrengthenedNoWrapFlagsFromBinOp(const OverflowingBinaryOperator *OBO)
Parse NSW/NUW flags from add/sub/mul IR binary operation Op into SCEV no-wrap flags,...
LLVM_ABI void forgetLcssaPhiWithNewPredecessor(Loop *L, PHINode *V)
Forget LCSSA phi node V of loop L to which a new predecessor was added, such that it may no longer be...
LLVM_ABI bool containsUndefs(const SCEV *S) const
Return true if the SCEV expression contains an undef value.
LLVM_ABI std::optional< MonotonicPredicateType > getMonotonicPredicateType(const SCEVAddRecExpr *LHS, ICmpInst::Predicate Pred)
If, for all loop invariant X, the predicate "LHS `Pred` X" is monotonically increasing or decreasing,...
LLVM_ABI const SCEV * getCouldNotCompute()
LLVM_ABI bool isAvailableAtLoopEntry(const SCEV *S, const Loop *L)
Determine if the SCEV can be evaluated at loop's entry.
LLVM_ABI uint32_t getMinTrailingZeros(const SCEV *S, const Instruction *CtxI=nullptr)
Determine the minimum number of zero bits that S is guaranteed to end in (at every loop iteration).
BlockDisposition
An enum describing the relationship between a SCEV and a basic block.
@ DominatesBlock
The SCEV dominates the block.
@ ProperlyDominatesBlock
The SCEV properly dominates the block.
@ DoesNotDominateBlock
The SCEV does not dominate the block.
LLVM_ABI const SCEV * getGEPExpr(GEPOperator *GEP, ArrayRef< const SCEV * > IndexExprs)
Returns an expression for a GEP.
LLVM_ABI const SCEV * getExitCount(const Loop *L, const BasicBlock *ExitingBlock, ExitCountKind Kind=Exact)
Return the number of times the backedge executes before the given exit would be taken; if not exactly...
LLVM_ABI const SCEV * getSignExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth=0)
LLVM_ABI void getPoisonGeneratingValues(SmallPtrSetImpl< const Value * > &Result, const SCEV *S)
Return the set of Values that, if poison, will definitively result in S being poison as well.
LLVM_ABI void forgetLoopDispositions()
Called when the client has changed the disposition of values in this loop.
LLVM_ABI const SCEV * getVScale(Type *Ty)
LLVM_ABI unsigned getSmallConstantTripCount(const Loop *L)
Returns the exact trip count of the loop if we can compute it, and the result is a small constant.
LLVM_ABI bool hasComputableLoopEvolution(const SCEV *S, const Loop *L)
Return true if the given SCEV changes value in a known way in the specified loop.
LLVM_ABI const SCEV * getPointerBase(const SCEV *V)
Transitively follow the chain of pointer-type operands until reaching a SCEV that does not have a sin...
LLVM_ABI const SCEV * getMinMaxExpr(SCEVTypes Kind, SmallVectorImpl< const SCEV * > &Operands)
LLVM_ABI void forgetAllLoops()
LLVM_ABI bool dominates(const SCEV *S, const BasicBlock *BB)
Return true if elements that makes up the given SCEV dominate the specified basic block.
APInt getUnsignedRangeMax(const SCEV *S)
Determine the max of the unsigned range for a particular SCEV.
ExitCountKind
The terms "backedge taken count" and "exit count" are used interchangeably to refer to the number of ...
@ SymbolicMaximum
An expression which provides an upper bound on the exact trip count.
@ ConstantMaximum
A constant which provides an upper bound on the exact trip count.
@ Exact
An expression exactly describing the number of times the backedge has executed when a loop is exited.
LLVM_ABI const SCEV * applyLoopGuards(const SCEV *Expr, const Loop *L)
Try to apply information from loop guards for L to Expr.
LLVM_ABI const SCEV * getMulExpr(SmallVectorImpl< const SCEV * > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical multiply expression, or something simpler if possible.
LLVM_ABI const SCEVAddRecExpr * convertSCEVToAddRecWithPredicates(const SCEV *S, const Loop *L, SmallVectorImpl< const SCEVPredicate * > &Preds)
Tries to convert the S expression to an AddRec expression, adding additional predicates to Preds as r...
LLVM_ABI const SCEV * getElementSize(Instruction *Inst)
Return the size of an element read or written by Inst.
LLVM_ABI const SCEV * getSizeOfExpr(Type *IntTy, TypeSize Size)
Return an expression for a TypeSize.
LLVM_ABI std::optional< bool > evaluatePredicate(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
Check whether the condition described by Pred, LHS, and RHS is true or false.
LLVM_ABI const SCEV * getUnknown(Value *V)
LLVM_ABI std::optional< std::pair< const SCEV *, SmallVector< const SCEVPredicate *, 3 > > > createAddRecFromPHIWithCasts(const SCEVUnknown *SymbolicPHI)
Checks if SymbolicPHI can be rewritten as an AddRecExpr under some Predicates.
LLVM_ABI const SCEV * getTruncateOrZeroExtend(const SCEV *V, Type *Ty, unsigned Depth=0)
Return a SCEV corresponding to a conversion of the input value to the specified type.
static SCEV::NoWrapFlags maskFlags(SCEV::NoWrapFlags Flags, int Mask)
Convenient NoWrapFlags manipulation that hides enum casts and is visible in the ScalarEvolution name ...
LLVM_ABI std::optional< APInt > computeConstantDifference(const SCEV *LHS, const SCEV *RHS)
Compute LHS - RHS and returns the result as an APInt if it is a constant, and std::nullopt if it isn'...
LLVM_ABI bool properlyDominates(const SCEV *S, const BasicBlock *BB)
Return true if elements that makes up the given SCEV properly dominate the specified basic block.
LLVM_ABI const SCEV * rewriteUsingPredicate(const SCEV *S, const Loop *L, const SCEVPredicate &A)
Re-writes the SCEV according to the Predicates in A.
LLVM_ABI std::pair< const SCEV *, const SCEV * > SplitIntoInitAndPostInc(const Loop *L, const SCEV *S)
Splits SCEV expression S into two SCEVs.
LLVM_ABI bool canReuseInstruction(const SCEV *S, Instruction *I, SmallVectorImpl< Instruction * > &DropPoisonGeneratingInsts)
Check whether it is poison-safe to represent the expression S using the instruction I.
LLVM_ABI bool isKnownPredicateAt(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS, const Instruction *CtxI)
Test if the given expression is known to satisfy the condition described by Pred, LHS,...
LLVM_ABI const SCEV * getPredicatedSymbolicMaxBackedgeTakenCount(const Loop *L, SmallVectorImpl< const SCEVPredicate * > &Predicates)
Similar to getSymbolicMaxBackedgeTakenCount, except it will add a set of SCEV predicates to Predicate...
LLVM_ABI ~ScalarEvolution()
LLVM_ABI const SCEV * getUDivExactExpr(const SCEV *LHS, const SCEV *RHS)
Get a canonical unsigned division expression, or something simpler if possible.
LLVM_ABI void registerUser(const SCEV *User, ArrayRef< const SCEV * > Ops)
Notify this ScalarEvolution that User directly uses SCEVs in Ops.
LLVM_ABI const SCEV * getAddExpr(SmallVectorImpl< const SCEV * > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical add expression, or something simpler if possible.
LLVM_ABI bool isBasicBlockEntryGuardedByCond(const BasicBlock *BB, CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
Test whether entry to the basic block is protected by a conditional between LHS and RHS.
LLVM_ABI const SCEV * getTruncateOrSignExtend(const SCEV *V, Type *Ty, unsigned Depth=0)
Return a SCEV corresponding to a conversion of the input value to the specified type.
LLVM_ABI bool containsErasedValue(const SCEV *S) const
Return true if the SCEV expression contains a Value that has been optimised out and is now a nullptr.
LLVM_ABI bool isKnownPredicate(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
Test if the given expression is known to satisfy the condition described by Pred, LHS,...
LLVM_ABI bool isKnownViaInduction(CmpPredicate Pred, const SCEV *LHS, const SCEV *RHS)
We'd like to check the predicate on every iteration of the most dominated loop between loops used in ...
const SCEV * getSymbolicMaxBackedgeTakenCount(const Loop *L)
When successful, this returns a SCEV that is greater than or equal to (i.e.
APInt getSignedRangeMax(const SCEV *S)
Determine the max of the signed range for a particular SCEV.
LLVM_ABI void verify() const
LLVMContext & getContext() const
Implements a dense probed hash-table based set with some number of buckets stored inline.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void reserve(size_type N)
iterator erase(const_iterator CI)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
iterator insert(iterator I, T &&Elt)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
Used to lazily calculate structure layout information for a target machine, based on the DataLayout s...
TypeSize getElementOffset(unsigned Idx) const
TypeSize getSizeInBits() const
Class to represent struct types.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
The instances of the Type class are immutable: once they are created, they are never changed.
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
bool isPointerTy() const
True if this is an instance of PointerType.
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
static LLVM_ABI IntegerType * getInt1Ty(LLVMContext &C)
bool isIntOrPtrTy() const
Return true if this is an integer type or a pointer type.
bool isIntegerTy() const
True if this is an instance of IntegerType.
static LLVM_ABI IntegerType * getIntNTy(LLVMContext &C, unsigned N)
A Use represents the edge between a Value definition and its users.
Value * getOperand(unsigned i) const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
unsigned getValueID() const
Return an ID for the concrete type of this object.
LLVM_ABI void printAsOperand(raw_ostream &O, bool PrintType=true, const Module *M=nullptr) const
Print the name of this Value out to the specified raw_ostream.
LLVM_ABI LLVMContext & getContext() const
All values hold a context through their type.
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
const ParentTy * getParent() const
This class implements an extremely fast bulk output stream that can only output to a stream.
raw_ostream & indent(unsigned NumSpaces)
indent - Insert 'NumSpaces' spaces.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
const APInt & smin(const APInt &A, const APInt &B)
Determine the smaller of two APInts considered to be signed.
const APInt & smax(const APInt &A, const APInt &B)
Determine the larger of two APInts considered to be signed.
const APInt & umin(const APInt &A, const APInt &B)
Determine the smaller of two APInts considered to be unsigned.
LLVM_ABI std::optional< APInt > SolveQuadraticEquationWrap(APInt A, APInt B, APInt C, unsigned RangeWidth)
Let q(n) = An^2 + Bn + C, and BW = bit width of the value range (e.g.
const APInt & umax(const APInt &A, const APInt &B)
Determine the larger of two APInts considered to be unsigned.
LLVM_ABI APInt GreatestCommonDivisor(APInt A, APInt B)
Compute GCD of two unsigned APInt values.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ C
The default llvm calling convention, compatible with C.
int getMinValue(MCInstrInfo const &MCII, MCInst const &MCI)
Return the minimum value of an extendable operand.
@ BasicBlock
Various leaf nodes.
LLVM_ABI Function * getDeclarationIfExists(const Module *M, ID id)
Look up the Function declaration of the intrinsic id in the Module M and return it if it exists.
Predicate
Predicate - These are "(BI << 5) | BO" for various predicates.
BinaryOp_match< LHS, RHS, Instruction::AShr > m_AShr(const LHS &L, const RHS &R)
ap_match< APInt > m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
bool match(Val *V, const Pattern &P)
class_match< ConstantInt > m_ConstantInt()
Match an arbitrary ConstantInt and ignore it.
IntrinsicID_match m_Intrinsic()
Match intrinsic calls like this: m_Intrinsic<Intrinsic::fabs>(m_Value(X))
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
ExtractValue_match< Ind, Val_t > m_ExtractValue(const Val_t &V)
Match a single index ExtractValue instruction.
bind_ty< WithOverflowInst > m_WithOverflowInst(WithOverflowInst *&I)
Match a with overflow intrinsic, capturing it if we match.
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
brc_match< Cond_t, bind_ty< BasicBlock >, bind_ty< BasicBlock > > m_Br(const Cond_t &C, BasicBlock *&T, BasicBlock *&F)
BinaryOp_match< LHS, RHS, Instruction::SDiv > m_SDiv(const LHS &L, const RHS &R)
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
BinaryOp_match< LHS, RHS, Instruction::LShr > m_LShr(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::Shl > m_Shl(const LHS &L, const RHS &R)
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
class_match< BasicBlock > m_BasicBlock()
Match an arbitrary basic block value and ignore it.
match_combine_or< LTy, RTy > m_CombineOr(const LTy &L, const RTy &R)
Combine two pattern matchers matching L || R.
class_match< const SCEVVScale > m_SCEVVScale()
bind_cst_ty m_scev_APInt(const APInt *&C)
Match an SCEV constant and bind it to an APInt.
cst_pred_ty< is_all_ones > m_scev_AllOnes()
Match an integer with all bits set.
SCEVUnaryExpr_match< SCEVZeroExtendExpr, Op0_t > m_scev_ZExt(const Op0_t &Op0)
is_undef_or_poison m_scev_UndefOrPoison()
Match an SCEVUnknown wrapping undef or poison.
class_match< const SCEVConstant > m_SCEVConstant()
cst_pred_ty< is_one > m_scev_One()
Match an integer 1.
specificloop_ty m_SpecificLoop(const Loop *L)
SCEVAffineAddRec_match< Op0_t, Op1_t, class_match< const Loop > > m_scev_AffineAddRec(const Op0_t &Op0, const Op1_t &Op1)
bind_ty< const SCEVMulExpr > m_scev_Mul(const SCEVMulExpr *&V)
SCEVUnaryExpr_match< SCEVSignExtendExpr, Op0_t > m_scev_SExt(const Op0_t &Op0)
cst_pred_ty< is_zero > m_scev_Zero()
Match an integer 0.
SCEVUnaryExpr_match< SCEVTruncateExpr, Op0_t > m_scev_Trunc(const Op0_t &Op0)
bool match(const SCEV *S, const Pattern &P)
SCEVBinaryExpr_match< SCEVUDivExpr, Op0_t, Op1_t > m_scev_UDiv(const Op0_t &Op0, const Op1_t &Op1)
specificscev_ty m_scev_Specific(const SCEV *S)
Match if we have a specific specified SCEV.
SCEVBinaryExpr_match< SCEVMulExpr, Op0_t, Op1_t, SCEV::FlagNUW, true > m_scev_c_NUWMul(const Op0_t &Op0, const Op1_t &Op1)
class_match< const Loop > m_Loop()
bind_ty< const SCEVAddExpr > m_scev_Add(const SCEVAddExpr *&V)
bind_ty< const SCEVUnknown > m_SCEVUnknown(const SCEVUnknown *&V)
SCEVBinaryExpr_match< SCEVMulExpr, Op0_t, Op1_t, SCEV::FlagAnyWrap, true > m_scev_c_Mul(const Op0_t &Op0, const Op1_t &Op1)
SCEVBinaryExpr_match< SCEVSMaxExpr, Op0_t, Op1_t > m_scev_SMax(const Op0_t &Op0, const Op1_t &Op1)
SCEVURem_match< Op0_t, Op1_t > m_scev_URem(Op0_t LHS, Op1_t RHS, ScalarEvolution &SE)
Match the mathematical pattern A - (A / B) * B, where A and B can be arbitrary expressions.
class_match< const SCEV > m_SCEV()
@ Valid
The data is already valid.
initializer< Ty > init(const Ty &Val)
LocationClass< Ty > location(Ty &L)
@ Switch
The "resume-switch" lowering, where there are separate resume and destroy functions that are shared b...
NodeAddr< PhiNode * > Phi
friend class Instruction
Iterator for Instructions in a `BasicBlock.
This is an optimization pass for GlobalISel generic memory operations.
void visitAll(const SCEV *Root, SV &Visitor)
Use SCEVTraversal to visit all nodes in the given expression tree.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
FunctionAddr VTableAddr Value
LLVM_ATTRIBUTE_ALWAYS_INLINE DynamicAPInt gcd(const DynamicAPInt &A, const DynamicAPInt &B)
void stable_sort(R &&Range)
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
SaveAndRestore(T &) -> SaveAndRestore< T >
Printable print(const GCNRegPressure &RP, const GCNSubtarget *ST=nullptr, unsigned DynamicVGPRBlockSize=0)
LLVM_ABI bool canCreatePoison(const Operator *Op, bool ConsiderFlagsAndMetadata=true)
LLVM_ABI bool mustTriggerUB(const Instruction *I, const SmallPtrSetImpl< const Value * > &KnownPoison)
Return true if the given instruction must trigger undefined behavior when I is executed with any oper...
detail::scope_exit< std::decay_t< Callable > > make_scope_exit(Callable &&F)
LLVM_ABI bool canConstantFoldCallTo(const CallBase *Call, const Function *F)
canConstantFoldCallTo - Return true if its even possible to fold a call to the specified function.
InterleavedRange< Range > interleaved(const Range &R, StringRef Separator=", ", StringRef Prefix="", StringRef Suffix="")
Output range R as a sequence of interleaved elements.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
LLVM_ABI bool verifyFunction(const Function &F, raw_ostream *OS=nullptr)
Check a function for errors, useful for use when debugging a pass.
auto successors(const MachineBasicBlock *BB)
constexpr from_range_t from_range
auto dyn_cast_if_present(const Y &Val)
dyn_cast_if_present<X> - Functionally identical to dyn_cast, except that a null (or none in the case ...
bool set_is_subset(const S1Ty &S1, const S2Ty &S2)
set_is_subset(A, B) - Return true iff A in B
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
constexpr bool isUIntN(unsigned N, uint64_t x)
Checks if an unsigned integer fits into the given (dynamic) bit width.
LLVM_ABI Constant * ConstantFoldCompareInstOperands(unsigned Predicate, Constant *LHS, Constant *RHS, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const Instruction *I=nullptr)
Attempt to constant fold a compare instruction (icmp/fcmp) with the specified operands.
unsigned short computeExpressionSize(ArrayRef< const SCEV * > Args)
auto uninitialized_copy(R &&Src, IterTy Dst)
bool isa_and_nonnull(const Y &Val)
LLVM_ABI ConstantRange getConstantRangeFromMetadata(const MDNode &RangeMD)
Parse out a conservative ConstantRange from !range metadata.
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
LLVM_ABI Value * simplifyInstruction(Instruction *I, const SimplifyQuery &Q)
See if we can compute a simplified version of this instruction.
LLVM_ABI bool isOverflowIntrinsicNoWrap(const WithOverflowInst *WO, const DominatorTree &DT)
Returns true if the arithmetic part of the WO 's result is used only along the paths control dependen...
DomTreeNodeBase< BasicBlock > DomTreeNode
LLVM_ABI bool matchSimpleRecurrence(const PHINode *P, BinaryOperator *&BO, Value *&Start, Value *&Step)
Attempt to match a simple first order recurrence cycle of the form: iv = phi Ty [Start,...
auto dyn_cast_or_null(const Y &Val)
void erase(Container &C, ValueType V)
Wrapper function to remove a value from a container:
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
iterator_range< pointee_iterator< WrappedIteratorT > > make_pointee_range(RangeT &&Range)
auto reverse(ContainerTy &&C)
LLVM_ABI bool isMustProgress(const Loop *L)
Return true if this loop can be assumed to make progress.
LLVM_ABI bool impliesPoison(const Value *ValAssumedPoison, const Value *V)
Return true if V is poison given that ValAssumedPoison is already poison.
LLVM_ABI bool isFinite(const Loop *L)
Return true if this loop can be assumed to run for a finite number of iterations.
LLVM_ABI void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
LLVM_ABI bool programUndefinedIfPoison(const Instruction *Inst)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
bool isPointerTy(const Type *T)
FunctionAddr VTableAddr Count
LLVM_ABI ConstantRange getVScaleRange(const Function *F, unsigned BitWidth)
Determine the possible constant range of vscale with the given bit width, based on the vscale_range f...
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
LLVM_ATTRIBUTE_VISIBILITY_DEFAULT AnalysisKey InnerAnalysisManagerProxy< AnalysisManagerT, IRUnitT, ExtraArgTs... >::Key
LLVM_ABI bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
LLVM_ABI bool propagatesPoison(const Use &PoisonOp)
Return true if PoisonOp's user yields poison or raises UB if its operand PoisonOp is poison.
@ UMin
Unsigned integer min implemented in terms of select(cmp()).
@ Mul
Product of integers.
@ SMax
Signed integer max implemented in terms of select(cmp()).
@ SMin
Signed integer min implemented in terms of select(cmp()).
@ UMax
Unsigned integer max implemented in terms of select(cmp()).
auto count(R &&Range, const E &Element)
Wrapper function around std::count to count the number of times an element Element occurs in the give...
DWARFExpression::Operation Op
auto max_element(R &&Range)
Provide wrappers to std::max_element which take ranges instead of having to pass begin/end explicitly...
raw_ostream & operator<<(raw_ostream &OS, const APFixedPoint &FX)
ArrayRef(const T &OneElt) -> ArrayRef< T >
LLVM_ABI unsigned ComputeNumSignBits(const Value *Op, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Return the number of times the sign bit of the register is replicated into the other bits.
constexpr unsigned BitWidth
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI bool isGuaranteedToTransferExecutionToSuccessor(const Instruction *I)
Return true if this function can prove that the instruction I will always transfer execution to one o...
auto count_if(R &&Range, UnaryPredicate P)
Wrapper function around std::count_if to count the number of times an element satisfying a given pred...
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
constexpr bool isIntN(unsigned N, int64_t x)
Checks if an signed integer fits into the given (dynamic) bit width.
auto predecessors(const MachineBasicBlock *BB)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
iterator_range< df_iterator< T > > depth_first(const T &G)
auto seq(T Begin, T End)
Iterate over an integral type from Begin up to - but not including - End.
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
LLVM_ABI bool isGuaranteedNotToBePoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Returns true if V cannot be poison, but may be undef.
LLVM_ABI Constant * ConstantFoldInstOperands(const Instruction *I, ArrayRef< Constant * > Ops, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, bool AllowNonDeterministic=true)
ConstantFoldInstOperands - Attempt to constant fold an instruction with the specified operands.
bool SCEVExprContains(const SCEV *Root, PredTy Pred)
Return true if any node in Root satisfies the predicate Pred.
Implement std::hash so that hash_code can be used in STL containers.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
A special type used by analysis passes to provide an address that identifies that particular analysis...
static KnownBits makeConstant(const APInt &C)
Create known bits from a known constant.
bool isNonNegative() const
Returns true if this value is known to be non-negative.
static LLVM_ABI KnownBits ashr(const KnownBits &LHS, const KnownBits &RHS, bool ShAmtNonZero=false, bool Exact=false)
Compute known bits for ashr(LHS, RHS).
unsigned getBitWidth() const
Get the bit width of this value.
static LLVM_ABI KnownBits lshr(const KnownBits &LHS, const KnownBits &RHS, bool ShAmtNonZero=false, bool Exact=false)
Compute known bits for lshr(LHS, RHS).
KnownBits zextOrTrunc(unsigned BitWidth) const
Return known bits for a zero extension or truncation of the value we're tracking.
APInt getMaxValue() const
Return the maximal unsigned value possible given these KnownBits.
APInt getMinValue() const
Return the minimal unsigned value possible given these KnownBits.
bool isNegative() const
Returns true if this value is known to be negative.
static LLVM_ABI KnownBits shl(const KnownBits &LHS, const KnownBits &RHS, bool NUW=false, bool NSW=false, bool ShAmtNonZero=false)
Compute known bits for shl(LHS, RHS).
An object of this class is returned by queries that could not be answered.
LLVM_ABI SCEVCouldNotCompute()
static LLVM_ABI bool classof(const SCEV *S)
Methods for support type inquiry through isa, cast, and dyn_cast:
This class defines a simple visitor class that may be used for various SCEV analysis purposes.
A utility class that uses RAII to save and restore the value of a variable.
Information about the number of loop iterations for which a loop exit's branch condition evaluates to...
LLVM_ABI ExitLimit(const SCEV *E)
Construct either an exact exit limit from a constant, or an unknown one from a SCEVCouldNotCompute.
const SCEV * ExactNotTaken
const SCEV * SymbolicMaxNotTaken
SmallVector< const SCEVPredicate *, 4 > Predicates
A vector of predicate guards for this ExitLimit.
const SCEV * ConstantMaxNotTaken