31#ifdef LLVM_ENABLE_ABI_BREAKING_CHECKS
32#define SCEV_DEBUG_WITH_TYPE(TYPE, X) DEBUG_WITH_TYPE(TYPE, X)
34#define SCEV_DEBUG_WITH_TYPE(TYPE, X)
41 cl::desc(
"When performing SCEV expansion only if it is cheap to do, this "
42 "controls the budget that is considered cheap (default = 4)"));
44using namespace PatternMatch;
66 for (
User *U : V->users()) {
67 if (U->getType() != Ty)
69 CastInst *CI = dyn_cast<CastInst>(U);
75 if (IP->getParent() == CI->
getParent() && &*BIP != CI &&
84 SCEVInsertPointGuard Guard(Builder,
this);
92 assert(!isa<Instruction>(Ret) ||
93 SE.DT.
dominates(cast<Instruction>(Ret), &*BIP));
102 if (
auto *II = dyn_cast<InvokeInst>(
I))
103 IP = II->getNormalDest()->begin();
105 while (isa<PHINode>(IP))
108 if (isa<FuncletPadInst>(IP) || isa<LandingPadInst>(IP)) {
110 }
else if (isa<CatchSwitchInst>(IP)) {
113 assert(!IP->isEHPad() &&
"unexpected eh pad!");
126SCEVExpander::GetOptimalInsertionPointForCastOf(
Value *V)
const {
129 if (
Argument *
A = dyn_cast<Argument>(V)) {
131 while ((isa<BitCastInst>(IP) &&
132 isa<Argument>(cast<BitCastInst>(IP)->getOperand(0)) &&
133 cast<BitCastInst>(IP)->getOperand(0) !=
A) ||
134 isa<DbgInfoIntrinsic>(IP))
145 assert(isa<Constant>(V) &&
146 "Expected the cast argument to be a global/constant");
158 assert((Op == Instruction::BitCast ||
159 Op == Instruction::PtrToInt ||
160 Op == Instruction::IntToPtr) &&
161 "InsertNoopCastOfTo cannot perform non-noop casts!");
163 "InsertNoopCastOfTo cannot change sizes!");
170 if (Op == Instruction::IntToPtr) {
171 auto *PtrTy = cast<PointerType>(Ty);
173 auto *Int8PtrTy = Builder.
getInt8PtrTy(PtrTy->getAddressSpace());
175 "alloc size of i8 must by 1 byte for the GEP to be correct");
182 if (Op == Instruction::BitCast) {
183 if (
V->getType() == Ty)
185 if (
CastInst *CI = dyn_cast<CastInst>(V)) {
191 if ((Op == Instruction::PtrToInt || Op == Instruction::IntToPtr) &&
193 if (
CastInst *CI = dyn_cast<CastInst>(V))
194 if ((CI->
getOpcode() == Instruction::PtrToInt ||
195 CI->
getOpcode() == Instruction::IntToPtr) &&
200 if ((
CE->getOpcode() == Instruction::PtrToInt ||
201 CE->getOpcode() == Instruction::IntToPtr) &&
204 return CE->getOperand(0);
212 return ReuseOrCreateCast(V, Ty, Op, GetOptimalInsertionPointForCastOf(V));
222 if (
Constant *CLHS = dyn_cast<Constant>(LHS))
223 if (
Constant *CRHS = dyn_cast<Constant>(RHS))
228 unsigned ScanLimit = 6;
232 if (IP != BlockBegin) {
234 for (; ScanLimit; --IP, --ScanLimit) {
237 if (isa<DbgInfoIntrinsic>(IP))
242 if (isa<OverflowingBinaryOperator>(
I)) {
250 if (isa<PossiblyExactOperator>(
I) &&
I->isExact())
254 if (IP->getOpcode() == (
unsigned)Opcode && IP->getOperand(0) ==
LHS &&
255 IP->getOperand(1) ==
RHS && !canGenerateIncompatiblePoison(&*IP))
257 if (IP == BlockBegin)
break;
263 SCEVInsertPointGuard Guard(Builder,
this);
268 if (!
L->isLoopInvariant(LHS) || !
L->isLoopInvariant(RHS))
break;
270 if (!Preheader)
break;
313 if (
const SCEVConstant *FC = dyn_cast<SCEVConstant>(Factor)) {
323 Remainder, SE.
getConstant(
C->getAPInt().srem(FC->getAPInt())));
331 if (
const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) {
334 if (
const SCEVConstant *FC = dyn_cast<SCEVConstant>(Factor))
335 if (
const SCEVConstant *
C = dyn_cast<SCEVConstant>(M->getOperand(0)))
336 if (!
C->getAPInt().srem(FC->getAPInt())) {
338 NewMulOps[0] = SE.
getConstant(
C->getAPInt().sdiv(FC->getAPInt()));
346 const SCEV *Step =
A->getStepRecurrence(SE);
352 const SCEV *Start =
A->getStart();
370 unsigned NumAddRecs = 0;
371 for (
unsigned i = Ops.
size(); i > 0 && isa<SCEVAddRecExpr>(Ops[i-1]); --i)
401 for (
unsigned i = 0, e = Ops.
size(); i != e; ++i)
403 const SCEV *Start =
A->getStart();
404 if (Start->isZero())
break;
407 A->getStepRecurrence(SE),
413 e +=
Add->getNumOperands();
418 if (!AddRecs.
empty()) {
453Value *SCEVExpander::expandAddToGEP(
const SCEV *
const *op_begin,
454 const SCEV *
const *op_end,
460 bool AnyNonZeroIndices =
false;
469 if (!PTy->isOpaque()) {
475 Type *ElTy = PTy->getNonOpaquePointerElementType();
484 for (
const SCEV *Op : Ops) {
491 AnyNonZeroIndices =
true;
499 if (!ScaledOps.
empty()) {
513 : expandCodeForImpl(SE.getAddExpr(ScaledOps), Ty);
517 while (
StructType *STy = dyn_cast<StructType>(ElTy)) {
518 bool FoundFieldNo =
false;
520 if (STy->getNumElements() == 0)
break;
526 !STy->containsScalableVectorType() &&
527 "GEPs are not supported on structures containing scalable vectors");
531 uint64_t FullOffset =
C->getValue()->getZExtValue();
536 ElTy = STy->getTypeAtIndex(ElIdx);
539 AnyNonZeroIndices =
true;
547 ElTy = STy->getTypeAtIndex(0u);
553 if (
ArrayType *ATy = dyn_cast<ArrayType>(ElTy))
554 ElTy = ATy->getElementType();
567 if (!AnyNonZeroIndices) {
569 if (!PTy->isOpaque())
570 V = InsertNoopCastOfTo(V,
573 assert(!isa<Instruction>(V) ||
580 if (
Constant *CLHS = dyn_cast<Constant>(V))
585 unsigned ScanLimit = 6;
589 if (IP != BlockBegin) {
591 for (; ScanLimit; --IP, --ScanLimit) {
594 if (isa<DbgInfoIntrinsic>(IP))
596 if (IP->getOpcode() == Instruction::GetElementPtr &&
597 IP->getOperand(0) == V && IP->getOperand(1) ==
Idx &&
598 cast<GEPOperator>(&*IP)->getSourceElementType() ==
601 if (IP == BlockBegin)
break;
606 SCEVInsertPointGuard Guard(Builder,
this);
610 if (!
L->isLoopInvariant(V) || !
L->isLoopInvariant(
Idx))
break;
612 if (!Preheader)
break;
623 SCEVInsertPointGuard Guard(Builder,
this);
627 if (!
L->isLoopInvariant(V))
break;
629 bool AnyIndexNotLoopInvariant =
any_of(
630 GepIndices, [L](
Value *Op) {
return !
L->isLoopInvariant(Op); });
632 if (AnyIndexNotLoopInvariant)
636 if (!Preheader)
break;
646 if (
V->getType() != PTy)
647 Casted = InsertNoopCastOfTo(Casted, PTy);
649 Casted, GepIndices,
"scevgep");
658 const SCEV *
const Ops[1] = {
Op};
659 return expandAddToGEP(Ops, Ops + 1, PTy, Ty, V);
669 if (
A->contains(
B))
return B;
670 if (
B->contains(
A))
return A;
671 if (DT.
dominates(
A->getHeader(),
B->getHeader()))
return B;
672 if (DT.
dominates(
B->getHeader(),
A->getHeader()))
return A;
678const Loop *SCEVExpander::getRelevantLoop(
const SCEV *S) {
680 auto Pair = RelevantLoops.insert(std::make_pair(S,
nullptr));
682 return Pair.first->second;
701 const Loop *
L =
nullptr;
706 return RelevantLoops[S] =
L;
710 if (
const Instruction *
I = dyn_cast<Instruction>(
U->getValue()))
711 return Pair.first->second = SE.LI.
getLoopFor(
I->getParent());
729 bool operator()(std::pair<const Loop *, const SCEV *>
LHS,
730 std::pair<const Loop *, const SCEV *>
RHS)
const {
737 if (
LHS.first !=
RHS.first)
743 if (
LHS.second->isNonConstantNegative()) {
744 if (!
RHS.second->isNonConstantNegative())
746 }
else if (
RHS.second->isNonConstantNegative())
765 OpsAndLoops.
push_back(std::make_pair(getRelevantLoop(Op), Op));
773 Value *Sum =
nullptr;
774 for (
auto I = OpsAndLoops.
begin(),
E = OpsAndLoops.
end();
I !=
E;) {
775 const Loop *CurLoop =
I->first;
784 assert(!
Op->getType()->isPointerTy() &&
"Only first op can be pointer");
789 for (;
I !=
E &&
I->first == CurLoop; ++
I) {
792 const SCEV *
X =
I->second;
794 if (!isa<Instruction>(
U->getValue()))
798 Sum = expandAddToGEP(NewOps.
begin(), NewOps.
end(), PTy, Ty, Sum);
799 }
else if (
Op->isNonConstantNegative()) {
802 Sum = InsertNoopCastOfTo(Sum, Ty);
808 Value *
W = expandCodeForImpl(Op, Ty);
809 Sum = InsertNoopCastOfTo(Sum, Ty);
811 if (isa<Constant>(Sum))
std::swap(Sum, W);
828 OpsAndLoops.
push_back(std::make_pair(getRelevantLoop(Op), Op));
835 Value *Prod =
nullptr;
836 auto I = OpsAndLoops.
begin();
841 const auto ExpandOpBinPowN = [
this, &
I, &OpsAndLoops, &Ty]() {
851 while (
E != OpsAndLoops.end() && *
I == *
E &&
Exponent != MaxExponent) {
855 assert(
Exponent > 0 &&
"Trying to calculate a zeroth exponent of operand?");
859 Value *
P = expandCodeForImpl(
I->second, Ty);
874 assert(Result &&
"Nothing was expanded?");
878 while (
I != OpsAndLoops.end()) {
881 Prod = ExpandOpBinPowN();
882 }
else if (
I->second->isAllOnesValue()) {
884 Prod = InsertNoopCastOfTo(Prod, Ty);
890 Value *
W = ExpandOpBinPowN();
891 Prod = InsertNoopCastOfTo(Prod, Ty);
893 if (isa<Constant>(Prod))
std::swap(Prod, W);
900 if (
RHS->logBase2() ==
RHS->getBitWidth() - 1)
902 Prod = InsertBinop(Instruction::Shl, Prod,
906 Prod = InsertBinop(Instruction::Mul, Prod, W, S->
getNoWrapFlags(),
921 if (
RHS.isPowerOf2())
922 return InsertBinop(Instruction::LShr, LHS,
937 (isa<CastInst>(IncV) && !isa<BitCastInst>(IncV)))
942 if (L == IVIncInsertLoop) {
944 if (
Instruction *OInst = dyn_cast<Instruction>(Op))
945 if (!SE.DT.
dominates(OInst, IVIncInsertPos))
949 IncV = dyn_cast<Instruction>(IncV->
getOperand(0));
959 return isNormalAddRecExprPHI(PN, IncV, L);
974 if (IncV == InsertPos)
981 case Instruction::Add:
982 case Instruction::Sub: {
984 if (!OInst || SE.DT.
dominates(OInst, InsertPos))
985 return dyn_cast<Instruction>(IncV->
getOperand(0));
988 case Instruction::BitCast:
989 return dyn_cast<Instruction>(IncV->
getOperand(0));
990 case Instruction::GetElementPtr:
992 if (isa<Constant>(U))
994 if (
Instruction *OInst = dyn_cast<Instruction>(U)) {
1008 unsigned AS = cast<PointerType>(IncV->
getType())->getAddressSpace();
1014 return dyn_cast<Instruction>(IncV->
getOperand(0));
1030 for (
auto *InsertPtGuard : InsertPointGuards)
1031 if (InsertPtGuard->GetInsertPoint() == It)
1032 InsertPtGuard->SetInsertPoint(NewInsertPt);
1039 bool RecomputePoisonFlags) {
1043 I->dropPoisonGeneratingFlags();
1044 if (
auto *OBO = dyn_cast<OverflowingBinaryOperator>(
I))
1046 auto *BO = cast<BinaryOperator>(
I);
1055 if (RecomputePoisonFlags)
1056 FixupPoisonFlags(IncV);
1062 if (isa<PHINode>(InsertPos) ||
1082 fixupInsertPoints(
I);
1083 I->moveBefore(InsertPos);
1084 if (RecomputePoisonFlags)
1085 FixupPoisonFlags(
I);
1098 (IVOper =
getIVIncOperand(IVOper, L->getLoopPreheader()->getTerminator(),
1115 PointerType *GEPPtrTy = cast<PointerType>(ExpandTy);
1118 if (!isa<ConstantInt>(StepV))
1120 GEPPtrTy->getAddressSpace());
1121 IncV = expandAddToGEP(SE.
getSCEV(StepV), GEPPtrTy, IntTy, PN);
1125 IncV = useSubtract ?
1154 if (Phi == Requested) {
1169 if (!isa<IntegerType>(AR->
getType()))
1177 const SCEV *ExtendAfterOp =
1179 return ExtendAfterOp == OpAfterExtend;
1183 if (!isa<IntegerType>(AR->
getType()))
1191 const SCEV *ExtendAfterOp =
1193 return ExtendAfterOp == OpAfterExtend;
1200SCEVExpander::getAddRecExprPHILiterally(
const SCEVAddRecExpr *Normalized,
1206 assert((!IVIncInsertLoop||IVIncInsertPos) &&
"Uninitialized insert position");
1211 PHINode *AddRecPhiMatch =
nullptr;
1218 bool TryNonMatchingSCEV =
1222 for (
PHINode &PN :
L->getHeader()->phis()) {
1230 DebugType,
dbgs() <<
"One incomplete PHI is found: " << PN <<
"\n");
1238 bool IsMatchingSCEV = PhiSCEV == Normalized;
1242 if (!IsMatchingSCEV && !TryNonMatchingSCEV)
1253 if (!isExpandedAddRecExprPHI(&PN, TempIncV, L))
1256 if (!isNormalAddRecExprPHI(&PN, TempIncV, L))
1261 if (IsMatchingSCEV) {
1265 AddRecPhiMatch = &PN;
1271 if ((!TruncTy || InvertStep) &&
1275 AddRecPhiMatch = &PN;
1281 if (AddRecPhiMatch) {
1284 InsertedValues.insert(AddRecPhiMatch);
1286 rememberInstruction(IncV);
1288 ReusedValues.
insert(AddRecPhiMatch);
1289 ReusedValues.
insert(IncV);
1290 return AddRecPhiMatch;
1295 SCEVInsertPointGuard Guard(Builder,
this);
1305 PostIncLoops.
clear();
1308 assert(
L->getLoopPreheader() &&
1309 "Can't expand add recurrences without a loop preheader!");
1311 expandCodeForImpl(Normalized->
getStart(), ExpandTy,
1312 L->getLoopPreheader()->getTerminator());
1316 assert(!isa<Instruction>(StartV) ||
1330 Value *StepV = expandCodeForImpl(
1331 Step, IntTy, &*
L->getHeader()->getFirstInsertionPt());
1336 bool IncrementIsNUW = !useSubtract &&
IsIncrementNUW(SE, Normalized);
1337 bool IncrementIsNSW = !useSubtract &&
IsIncrementNSW(SE, Normalized);
1344 Twine(IVName) +
".iv");
1351 if (!
L->contains(Pred)) {
1362 Value *IncV = expandIVInc(PN, StepV, L, ExpandTy, IntTy, useSubtract);
1364 if (isa<OverflowingBinaryOperator>(IncV)) {
1366 cast<BinaryOperator>(IncV)->setHasNoUnsignedWrap();
1368 cast<BinaryOperator>(IncV)->setHasNoSignedWrap();
1375 PostIncLoops = SavedPostIncLoops;
1379 InsertedValues.insert(PN);
1380 InsertedIVs.push_back(PN);
1392 if (PostIncLoops.
count(L)) {
1400 const SCEV *PostLoopOffset =
nullptr;
1402 PostLoopOffset = Start;
1404 Normalized = cast<SCEVAddRecExpr>(
1412 const SCEV *PostLoopScale =
nullptr;
1414 PostLoopScale = Step;
1416 if (!Start->isZero()) {
1419 assert(!PostLoopOffset &&
"Start not-null but PostLoopOffset set?");
1420 PostLoopOffset = Start;
1425 Start, Step, Normalized->
getLoop(),
1431 Type *ExpandTy = PostLoopScale ? IntTy : STy;
1434 Type *AddRecPHIExpandTy =
1439 Type *TruncTy =
nullptr;
1440 bool InvertStep =
false;
1441 PHINode *PN = getAddRecExprPHILiterally(Normalized, L, AddRecPHIExpandTy,
1442 IntTy, TruncTy, InvertStep);
1446 if (!PostIncLoops.
count(L))
1451 assert(LatchBlock &&
"PostInc mode requires a unique loop latch!");
1457 if (isa<OverflowingBinaryOperator>(Result)) {
1458 auto *
I = cast<Instruction>(Result);
1460 I->setHasNoUnsignedWrap(
false);
1462 I->setHasNoSignedWrap(
false);
1468 if (isa<Instruction>(Result) &&
1469 !SE.DT.
dominates(cast<Instruction>(Result),
1487 SCEVInsertPointGuard Guard(Builder,
this);
1488 StepV = expandCodeForImpl(
1489 Step, IntTy, &*
L->getHeader()->getFirstInsertionPt());
1491 Result = expandIVInc(PN, StepV, L, ExpandTy, IntTy, useSubtract);
1503 if (TruncTy !=
Result->getType())
1509 expandCodeForImpl(Normalized->
getStart(), TruncTy), Result);
1513 if (PostLoopScale) {
1514 assert(S->
isAffine() &&
"Can't linearly scale non-affine recurrences.");
1515 Result = InsertNoopCastOfTo(Result, IntTy);
1517 expandCodeForImpl(PostLoopScale, IntTy));
1521 if (PostLoopOffset) {
1522 if (
PointerType *PTy = dyn_cast<PointerType>(ExpandTy)) {
1523 if (
Result->getType()->isIntegerTy()) {
1524 Value *
Base = expandCodeForImpl(PostLoopOffset, ExpandTy);
1527 Result = expandAddToGEP(PostLoopOffset, PTy, IntTy, Result);
1530 Result = InsertNoopCastOfTo(Result, IntTy);
1532 Result, expandCodeForImpl(PostLoopOffset, IntTy));
1551 return expandAddRecExprLiterally(S);
1557 PHINode *CanonicalIV =
nullptr;
1558 if (
PHINode *PN =
L->getCanonicalInductionVariable())
1583 assert(StartV->
getType() == PTy &&
"Pointer type mismatch for GEP!");
1598 return expand(SE.
getAddExpr(AddExprLHS, AddExprRHS));
1609 rememberInstruction(CanonicalIV);
1615 if (!PredSeen.
insert(HP).second) {
1622 if (
L->contains(HP)) {
1629 rememberInstruction(
Add);
1640 "IVs with types different from the canonical IV should "
1641 "already have been handled!");
1663 const SCEV *NewS = S;
1665 if (isa<SCEVAddRecExpr>(Ext))
1668 const SCEV *
V = cast<SCEVAddRecExpr>(NewS)->evaluateAtIteration(IH, SE);
1678 return ReuseOrCreateCast(V, S->
getType(), CastInst::PtrToInt,
1679 GetOptimalInsertionPointForCastOf(V));
1684 Value *
V = expandCodeForImpl(
1692 Value *
V = expandCodeForImpl(
1700 Value *
V = expandCodeForImpl(
1708 bool IsSequential) {
1715 if (IsSequential && i != 0)
1732 return expandMinMaxExpr(S, Intrinsic::smax,
"smax");
1736 return expandMinMaxExpr(S, Intrinsic::umax,
"umax");
1740 return expandMinMaxExpr(S, Intrinsic::smin,
"smin");
1744 return expandMinMaxExpr(S, Intrinsic::umin,
"umin");
1748 return expandMinMaxExpr(S, Intrinsic::umin,
"umin",
true);
1755Value *SCEVExpander::expandCodeForImpl(
const SCEV *SH,
Type *Ty,
1758 Value *
V = expandCodeForImpl(SH, Ty);
1762Value *SCEVExpander::expandCodeForImpl(
const SCEV *SH,
Type *Ty) {
1768 "non-trivial casts should be done with the SCEVs directly!");
1769 V = InsertNoopCastOfTo(V, Ty);
1774Value *SCEVExpander::FindValueInExprValueMap(
const SCEV *S,
1782 if (isa<SCEVConstant>(S))
1788 for (
Value *V : SE.getSCEVValues(S)) {
1794 if (S->
getType() ==
V->getType() &&
1809Value *SCEVExpander::expand(
const SCEV *S) {
1816 auto SafeToHoist = [](
const SCEV *S) {
1818 if (
const auto *
D = dyn_cast<SCEVUDivExpr>(S)) {
1819 if (
const auto *SC = dyn_cast<SCEVConstant>(
D->getRHS()))
1821 return SC->getValue()->isZero();
1831 if (SafeToHoist(S)) {
1833 L =
L->getParentLoop()) {
1836 if (
BasicBlock *Preheader =
L->getLoopPreheader())
1842 InsertPt = &*
L->getHeader()->getFirstInsertionPt();
1848 InsertPt = &*
L->getHeader()->getFirstInsertionPt();
1852 isa<DbgInfoIntrinsic>(InsertPt))) {
1861 auto I = InsertedExpressions.find(std::make_pair(S, InsertPt));
1862 if (
I != InsertedExpressions.end())
1865 SCEVInsertPointGuard Guard(Builder,
this);
1869 Value *
V = FindValueInExprValueMap(S, InsertPt);
1872 V = fixupLCSSAFormFor(V);
1878 if (
auto *
I = dyn_cast<Instruction>(V))
1880 I->dropPoisonGeneratingFlags();
1888 InsertedExpressions[std::make_pair(S, InsertPt)] =
V;
1892void SCEVExpander::rememberInstruction(
Value *
I) {
1893 auto DoInsert = [
this](
Value *
V) {
1894 if (!PostIncLoops.
empty())
1895 InsertedPostIncValues.insert(V);
1897 InsertedValues.insert(V);
1914 for (
PHINode &PN : L->getHeader()->phis())
1928 unsigned NumElim = 0;
1938 auto *Const = dyn_cast<SCEVConstant>(SE.
getSCEV(PN));
1941 return Const->getValue();
1946 if (
Value *V = SimplifyPHINode(Phi)) {
1947 if (V->getType() != Phi->getType())
1950 Phi->replaceAllUsesWith(V);
1954 dbgs() <<
"INDVARS: Eliminated constant iv: " << *Phi
1965 if (Phi->getType()->isIntegerTy() &&
TTI &&
1969 const SCEV *TruncExpr =
1971 ExprToIVMap[TruncExpr] = Phi;
1981 if (
BasicBlock *LatchBlock = L->getLoopLatch()) {
1985 dyn_cast<Instruction>(Phi->getIncomingValueForBlock(LatchBlock));
1987 if (OrigInc && IsomorphicInc) {
1991 if (OrigPhiRef->
getType() == Phi->getType() &&
1992 !(ChainedPhis.count(Phi) ||
1993 isExpandedAddRecExprPHI(OrigPhiRef, OrigInc, L)) &&
1994 (ChainedPhis.count(Phi) ||
1995 isExpandedAddRecExprPHI(Phi, IsomorphicInc, L))) {
2008 const SCEV *TruncExpr =
2010 if (OrigInc != IsomorphicInc &&
2011 TruncExpr == SE.
getSCEV(IsomorphicInc) &&
2015 DebugType,
dbgs() <<
"INDVARS: Eliminated congruent iv.inc: "
2016 << *IsomorphicInc <<
'\n');
2017 Value *NewInc = OrigInc;
2020 if (
PHINode *PN = dyn_cast<PHINode>(OrigInc))
2027 NewInc =
Builder.CreateTruncOrBitCast(
2028 OrigInc, IsomorphicInc->
getType(), IVName);
2036 dbgs() <<
"INDVARS: Eliminated congruent iv: " << *Phi
2039 DebugType,
dbgs() <<
"INDVARS: Original iv: " << *OrigPhiRef <<
'\n');
2041 Value *NewIV = OrigPhiRef;
2042 if (OrigPhiRef->
getType() != Phi->getType()) {
2043 IRBuilder<> Builder(&*L->getHeader()->getFirstInsertionPt());
2044 Builder.SetCurrentDebugLocation(Phi->getDebugLoc());
2045 NewIV =
Builder.CreateTruncOrBitCast(OrigPhiRef, Phi->
getType(), IVName);
2047 Phi->replaceAllUsesWith(NewIV);
2059 L->getExitingBlocks(ExitingBlocks);
2066 if (!
match(BB->getTerminator(),
2082 return FindValueInExprValueMap(S, At);
2093 struct OperationIndices {
2094 OperationIndices(
unsigned Opc,
size_t min,
size_t max) :
2095 Opcode(Opc), MinIdx(
min), MaxIdx(
max) { }
2109 S->getOperand(0)->getType(),
2113 auto ArithCost = [&](
unsigned Opcode,
unsigned NumRequired,
2114 unsigned MinIdx = 0,
2117 return NumRequired *
2121 auto CmpSelCost = [&](
unsigned Opcode,
unsigned NumRequired,
unsigned MinIdx,
2124 Type *OpType = S->getType();
2130 switch (S->getSCEVType()) {
2138 Cost = CastCost(Instruction::PtrToInt);
2141 Cost = CastCost(Instruction::Trunc);
2144 Cost = CastCost(Instruction::ZExt);
2147 Cost = CastCost(Instruction::SExt);
2150 unsigned Opcode = Instruction::UDiv;
2151 if (
auto *SC = dyn_cast<SCEVConstant>(S->getOperand(1)))
2152 if (SC->getAPInt().isPowerOf2())
2153 Opcode = Instruction::LShr;
2154 Cost = ArithCost(Opcode, 1);
2158 Cost = ArithCost(Instruction::Add, S->getNumOperands() - 1);
2164 Cost = ArithCost(Instruction::Mul, S->getNumOperands() - 1);
2173 Cost += CmpSelCost(Instruction::ICmp, S->getNumOperands() - 1, 0, 1);
2174 Cost += CmpSelCost(Instruction::Select, S->getNumOperands() - 1, 0, 2);
2175 switch (S->getSCEVType()) {
2179 Cost += CmpSelCost(Instruction::ICmp, S->getNumOperands() - 1, 0, 0);
2180 Cost += ArithCost(Instruction::Or,
2181 S->getNumOperands() > 2 ? S->getNumOperands() - 2 : 0);
2182 Cost += CmpSelCost(Instruction::Select, 1, 0, 1);
2186 assert(!isa<SCEVSequentialMinMaxExpr>(S) &&
2187 "Unhandled SCEV expression type?");
2196 return !Op->isZero();
2199 assert(NumTerms >= 1 &&
"Polynominal should have at least one term.");
2200 assert(!(*std::prev(S->operands().end()))->isZero() &&
2201 "Last operand should not be zero");
2204 int NumNonZeroDegreeNonOneTerms =
2206 auto *SConst = dyn_cast<SCEVConstant>(Op);
2207 return !SConst || SConst->getAPInt().ugt(1);
2216 ArithCost(Instruction::Mul, NumNonZeroDegreeNonOneTerms);
2217 Cost = AddCost + MulCost;
2220 int PolyDegree = S->getNumOperands() - 1;
2221 assert(PolyDegree >= 1 &&
"Should be at least affine.");
2229 Cost += MulCost * (PolyDegree - 1);
2234 for (
auto &CostOp : Operations) {
2235 for (
auto SCEVOp :
enumerate(S->operands())) {
2237 size_t MinIdx = std::max(SCEVOp.index(), CostOp.MinIdx);
2238 size_t OpIdx = std::min(MinIdx, CostOp.MaxIdx);
2239 Worklist.
emplace_back(CostOp.Opcode, OpIdx, SCEVOp.value());
2245bool SCEVExpander::isHighCostExpansionHelper(
2255 if (!isa<SCEVConstant>(S) && !Processed.
insert(S).second)
2264 L->getHeader()->getParent()->hasMinSize()
2279 const APInt &
Imm = cast<SCEVConstant>(S)->getAPInt();
2283 return Cost > Budget;
2317 assert(cast<SCEVNAryExpr>(S)->getNumOperands() > 1 &&
2318 "Nary expr should have more than 1 operand.");
2323 return Cost > Budget;
2326 assert(cast<SCEVAddRecExpr>(S)->getNumOperands() >= 2 &&
2327 "Polynomial should be at least linear");
2328 Cost += costAndCollectOperands<SCEVAddRecExpr>(
2330 return Cost > Budget;
2345 auto *AddRecPred = cast<SCEVWrapPredicate>(Pred);
2361 auto *
I = Builder.
CreateICmp(InvPred, Expr0, Expr1,
"ident.check");
2368 "non-affine expression");
2372 const SCEV *ExitCount =
2375 assert(!isa<SCEVCouldNotCompute>(ExitCount) &&
"Invalid loop count");
2391 Value *TripCountVal = expandCodeForImpl(ExitCount, CountTy, Loc);
2396 Value *StepValue = expandCodeForImpl(Step, Ty, Loc);
2397 Value *NegStepValue =
2399 Value *StartValue = expandCodeForImpl(Start, ARTy, Loc);
2417 auto ComputeEndCheck = [&]() ->
Value * {
2425 Value *MulV, *OfMul;
2426 if (Step->
isOne()) {
2430 MulV = TruncTripCount;
2434 Intrinsic::umul_with_overflow, Ty);
2436 Builder.
CreateCall(MulF, {AbsStep, TruncTripCount},
"mul");
2441 Value *
Add =
nullptr, *Sub =
nullptr;
2445 if (
PointerType *ARPtrTy = dyn_cast<PointerType>(ARTy)) {
2446 StartValue = InsertNoopCastOfTo(
2447 StartValue, Builder.
getInt8PtrTy(ARPtrTy->getAddressSpace()));
2457 Sub = Builder.
CreateSub(StartValue, MulV);
2460 Value *EndCompareLT =
nullptr;
2461 Value *EndCompareGT =
nullptr;
2462 Value *EndCheck =
nullptr;
2464 EndCheck = EndCompareLT = Builder.
CreateICmp(
2467 EndCheck = EndCompareGT = Builder.
CreateICmp(
2469 if (NeedPosCheck && NeedNegCheck) {
2471 EndCheck = Builder.
CreateSelect(StepCompare, EndCompareGT, EndCompareLT);
2473 return Builder.
CreateOr(EndCheck, OfMul);
2475 Value *EndCheck = ComputeEndCheck();
2482 auto *BackedgeCheck =
2488 EndCheck = Builder.
CreateOr(EndCheck, BackedgeCheck);
2496 const auto *
A = cast<SCEVAddRecExpr>(Pred->
getExpr());
2497 Value *NSSWCheck =
nullptr, *NUSWCheck =
nullptr;
2507 if (NUSWCheck && NSSWCheck)
2508 return Builder.
CreateOr(NUSWCheck, NSSWCheck);
2523 for (
const auto *Pred : Union->getPredicates()) {
2533Value *SCEVExpander::fixupLCSSAFormFor(
Value *V) {
2534 auto *DefI = dyn_cast<Instruction>(V);
2535 if (!PreserveLCSSA || !DefI)
2541 if (!DefLoop || UseLoop == DefLoop || DefLoop->
contains(UseLoop))
2552 if (DefI->getType()->isIntegerTy())
2558 auto RemoveUserOnExit =
2567 for (
PHINode *PN : InsertedPHIs)
2568 rememberInstruction(PN);
2569 for (
PHINode *PN : PHIsToRemove) {
2572 InsertedValues.erase(PN);
2573 InsertedPostIncValues.erase(PN);
2599struct SCEVFindUnsafe {
2602 bool IsUnsafe =
false;
2605 : SE(SE), CanonicalMode(CanonicalMode) {}
2607 bool follow(
const SCEV *S) {
2615 const SCEV *Step = AR->getStepRecurrence(SE);
2616 if (!AR->isAffine() && !SE.
dominates(Step, AR->getLoop()->getHeader())) {
2623 if (!AR->getLoop()->getLoopPreheader() &&
2624 (!CanonicalMode || !AR->isAffine())) {
2631 bool isDone()
const {
return IsUnsafe; }
2636 SCEVFindUnsafe Search(SE, CanonicalMode);
2638 return !Search.IsUnsafe;
2656 if (
const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S))
2671 InsertedInstructions.end());
2681 [&InsertedSet](
Value *U) {
2682 return InsertedSet.contains(cast<Instruction>(U));
2684 "removed instruction should only be used by instructions inserted "
2685 "during expansion");
2687 assert(!
I->getType()->isVoidTy() &&
2688 "inserted instruction should have non-void types");
2690 I->eraseFromParent();
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static cl::opt< TargetTransformInfo::TargetCostKind > CostKind("cost-kind", cl::desc("Target cost kind"), cl::init(TargetTransformInfo::TCK_RecipThroughput), cl::values(clEnumValN(TargetTransformInfo::TCK_RecipThroughput, "throughput", "Reciprocal throughput"), clEnumValN(TargetTransformInfo::TCK_Latency, "latency", "Instruction latency"), clEnumValN(TargetTransformInfo::TCK_CodeSize, "code-size", "Code size"), clEnumValN(TargetTransformInfo::TCK_SizeAndLatency, "size-latency", "Code size and latency")))
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static bool IsIncrementNUW(ScalarEvolution &SE, const SCEVAddRecExpr *AR)
static const Loop * PickMostRelevantLoop(const Loop *A, const Loop *B, DominatorTree &DT)
PickMostRelevantLoop - Given two loops pick the one that's most relevant for SCEV expansion.
static InstructionCost costAndCollectOperands(const SCEVOperand &WorkItem, const TargetTransformInfo &TTI, TargetTransformInfo::TargetCostKind CostKind, SmallVectorImpl< SCEVOperand > &Worklist)
static bool IsIncrementNSW(ScalarEvolution &SE, const SCEVAddRecExpr *AR)
static void SimplifyAddOperands(SmallVectorImpl< const SCEV * > &Ops, Type *Ty, ScalarEvolution &SE)
SimplifyAddOperands - Sort and simplify a list of add operands.
static bool canBeCheaplyTransformed(ScalarEvolution &SE, const SCEVAddRecExpr *Phi, const SCEVAddRecExpr *Requested, bool &InvertStep)
Check whether we can cheaply express the requested SCEV in terms of the available PHI SCEV by truncat...
#define SCEV_DEBUG_WITH_TYPE(TYPE, X)
static bool FactorOutConstant(const SCEV *&S, const SCEV *&Remainder, const SCEV *Factor, ScalarEvolution &SE, const DataLayout &DL)
FactorOutConstant - Test if S is divisible by Factor, using signed division.
static void SplitAddRecs(SmallVectorImpl< const SCEV * > &Ops, Type *Ty, ScalarEvolution &SE)
SplitAddRecs - Flatten a list of add operands, moving addrec start values out to the top level.
This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...
This file defines the SmallSet class.
Class for arbitrary precision integers.
APInt zext(unsigned width) const
Zero extend to a new width.
static APInt getMaxValue(unsigned numBits)
Gets maximum unsigned value of APInt for specific bit width.
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
This class represents an incoming formal argument to a Function.
LLVM Basic Block Representation.
iterator begin()
Instruction iterator methods.
const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
const Function * getParent() const
Return the enclosing method, or null if none.
InstListType::iterator iterator
Instruction iterators...
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
static BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name=Twine(), Instruction *InsertBefore=nullptr)
Construct a binary instruction, given the opcode and the two operands.
This class represents a function call, abstracting a target machine's calling convention.
This is the base class for all instructions that perform data casts.
static Instruction::CastOps getCastOpcode(const Value *Val, bool SrcIsSigned, Type *Ty, bool DstIsSigned)
Returns the opcode necessary to cast Val into Ty using usual casting rules.
static CastInst * CreateBitOrPointerCast(Value *S, Type *Ty, const Twine &Name="", Instruction *InsertBefore=nullptr)
Create a BitCast, a PtrToInt, or an IntToPTr cast instruction.
Instruction::CastOps getOpcode() const
Return the opcode of this CastInst.
static Type * makeCmpResultType(Type *opnd_type)
Create a result type for fcmp/icmp.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ ICMP_SLT
signed less than
@ ICMP_UGT
unsigned greater than
@ ICMP_SGT
signed greater than
@ ICMP_ULT
unsigned less than
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
A constant value that is initialized with an expression using other constant values.
static Constant * getCast(unsigned ops, Constant *C, Type *Ty, bool OnlyIfReduced=false)
Convenience function for getting a Cast operation.
This is the shared class of boolean and integer constants.
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
static Constant * get(Type *Ty, uint64_t V, bool IsSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
static ConstantInt * getFalse(LLVMContext &Context)
This is an important base class in LLVM.
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
A parsed version of the target data layout string in and methods for querying it.
const StructLayout * getStructLayout(StructType *Ty) const
Returns a StructLayout object, indicating the alignment of the struct, its size, and the offsets of i...
IntegerType * getIndexType(LLVMContext &C, unsigned AddressSpace) const
Returns the type of a GEP index in AddressSpace.
TypeSize getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
bool isNonIntegralPointerType(PointerType *PT) const
bool properlyDominates(const DomTreeNodeBase< NodeT > *A, const DomTreeNodeBase< NodeT > *B) const
properlyDominates - Returns true iff A dominates B and A != B.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
const BasicBlock & getEntryBlock() const
Value * CreateTrunc(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateVScale(Constant *Scaling, const Twine &Name="")
Create a call to llvm.vscale, multiplied by Scaling.
Value * CreateNeg(Value *V, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Value * CreateZExtOrTrunc(Value *V, Type *DestTy, const Twine &Name="")
Create a ZExt or Trunc from the integer value V to DestTy.
Value * CreateExtractValue(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &Name="")
CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > Types, ArrayRef< Value * > Args, Instruction *FMFSource=nullptr, const Twine &Name="")
Create a call to intrinsic ID with Args, mangled using Types.
Value * CreateSelect(Value *C, Value *True, Value *False, const Twine &Name="", Instruction *MDFrom=nullptr)
BasicBlock::iterator GetInsertPoint() const
Value * CreateSExt(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateFreeze(Value *V, const Twine &Name="")
BasicBlock * GetInsertBlock() const
Value * CreateZExt(Value *V, Type *DestTy, const Twine &Name="")
PHINode * CreatePHI(Type *Ty, unsigned NumReservedValues, const Twine &Name="")
InstTy * Insert(InstTy *I, const Twine &Name="") const
Insert and return the specified instruction.
Value * CreateSub(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Value * CreateBitCast(Value *V, Type *DestTy, const Twine &Name="")
PointerType * getInt8PtrTy(unsigned AddrSpace=0)
Fetch the type representing a pointer to an 8-bit integer value.
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateCast(Instruction::CastOps Op, Value *V, Type *DestTy, const Twine &Name="")
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args=std::nullopt, const Twine &Name="", MDNode *FPMathTag=nullptr)
Value * CreateGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="", bool IsInBounds=false)
Value * CreateICmp(CmpInst::Predicate P, Value *LHS, Value *RHS, const Twine &Name="")
IntegerType * getInt8Ty()
Fetch the type representing an 8-bit integer.
Value * CreateMul(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
void setHasNoUnsignedWrap(bool b=true)
Set or clear the nuw flag on this instruction, which must be an operator which supports this flag.
void setHasNoSignedWrap(bool b=true)
Set or clear the nsw flag on this instruction, which must be an operator which supports this flag.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
const BasicBlock * getParent() const
const Function * getFunction() const
Return the function this instruction belongs to.
bool mayHaveSideEffects() const LLVM_READONLY
Return true if the instruction may have side effects.
bool comesBefore(const Instruction *Other) const
Given an instruction Other in the same basic block as this instruction, return true if this instructi...
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
SymbolTableList< Instruction >::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
Class to represent integer types.
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
bool contains(const LoopT *L) const
Return true if the specified loop is contained within in this loop.
BlockT * getHeader() const
LoopT * getLoopFor(const BlockT *BB) const
Return the inner most loop that BB lives in.
bool replacementPreservesLCSSAForm(Instruction *From, Value *To)
Returns true if replacing From with To everywhere is guaranteed to preserve LCSSA form.
bool movementPreservesLCSSAForm(Instruction *Inst, Instruction *NewLoc)
Checks if moving a specific instruction can break LCSSA in any loop.
Represents a single loop in the control flow graph.
ICmpInst::Predicate getPredicate() const
Returns the comparison predicate underlying the intrinsic.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
bool isComplete() const
If the PHI node is complete which means all of its parent's predecessors have incoming value in this ...
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
Value * getIncomingValueForBlock(const BasicBlock *BB) const
Class to represent pointers.
static PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
This node represents an addition of some number of SCEVs.
This node represents a polynomial recurrence on the trip count of the specified loop.
const SCEV * getStart() const
const SCEV * getStepRecurrence(ScalarEvolution &SE) const
Constructs and returns the recurrence indicating how much this expression steps by.
bool isAffine() const
Return true if this represents an expression A + B*x where A and B are loop invariant values.
const Loop * getLoop() const
const SCEV * getOperand() const
This class represents an assumption that the expression LHS Pred RHS evaluates to true,...
const SCEV * getRHS() const
Returns the right hand side of the predicate.
ICmpInst::Predicate getPredicate() const
const SCEV * getLHS() const
Returns the left hand side of the predicate.
This class represents a constant integer value.
Value * generateOverflowCheck(const SCEVAddRecExpr *AR, Instruction *Loc, bool Signed)
Generates code that evaluates if the AR expression will overflow.
SmallVector< Instruction *, 32 > getAllInsertedInstructions() const
Return a vector containing all instructions inserted during expansion.
bool isSafeToExpand(const SCEV *S) const
Return true if the given expression is safe to expand in the sense that all materialized values are s...
bool isSafeToExpandAt(const SCEV *S, const Instruction *InsertionPoint) const
Return true if the given expression is safe to expand in the sense that all materialized values are d...
unsigned replaceCongruentIVs(Loop *L, const DominatorTree *DT, SmallVectorImpl< WeakTrackingVH > &DeadInsts, const TargetTransformInfo *TTI=nullptr)
replace congruent phis with their most canonical representative.
Value * getRelatedExistingExpansion(const SCEV *S, const Instruction *At, Loop *L)
Try to find the ValueOffsetPair for S.
Value * expandUnionPredicate(const SCEVUnionPredicate *Pred, Instruction *Loc)
A specialized variant of expandCodeForPredicate, handling the case when we are expanding code for a S...
bool hoistIVInc(Instruction *IncV, Instruction *InsertPos, bool RecomputePoisonFlags=false)
Utility for hoisting IncV (with all subexpressions requried for its computation) before InsertPos.
void clear()
Erase the contents of the InsertedExpressions map so that users trying to expand the same expression ...
bool isInsertedInstruction(Instruction *I) const
Return true if the specified instruction was inserted by the code rewriter.
Value * expandCodeForPredicate(const SCEVPredicate *Pred, Instruction *Loc)
Generates a code sequence that evaluates this predicate.
Value * expandComparePredicate(const SCEVComparePredicate *Pred, Instruction *Loc)
A specialized variant of expandCodeForPredicate, handling the case when we are expanding code for a S...
Value * expandWrapPredicate(const SCEVWrapPredicate *P, Instruction *Loc)
A specialized variant of expandCodeForPredicate, handling the case when we are expanding code for a S...
Instruction * getIVIncOperand(Instruction *IncV, Instruction *InsertPos, bool allowScale)
Return the induction variable increment's IV operand.
BasicBlock::iterator findInsertPointAfter(Instruction *I, Instruction *MustDominate) const
Returns a suitable insert point after I, that dominates MustDominate.
void setInsertPoint(Instruction *IP)
Set the current insertion point.
This node represents multiplication of some number of SCEVs.
This node is a base class providing common functionality for n'ary operators.
bool hasNoUnsignedWrap() const
size_t getNumOperands() const
bool hasNoSignedWrap() const
NoWrapFlags getNoWrapFlags(NoWrapFlags Mask=NoWrapMask) const
const SCEV * getOperand(unsigned i) const
ArrayRef< const SCEV * > operands() const
This class represents an assumption made using SCEV expressions which can be checked at run-time.
SCEVPredicateKind getKind() const
This class represents a cast from a pointer to a pointer-sized integer value.
This class represents a signed maximum selection.
This class represents a signed minimum selection.
This class represents a sequential/in-order unsigned minimum selection.
This class represents a sign extension of a small integer value to a larger integer value.
This class represents a truncation of an integer value to a smaller integer value.
This class represents a binary unsigned division operation.
const SCEV * getLHS() const
const SCEV * getRHS() const
This class represents an unsigned maximum selection.
This class represents an unsigned minimum selection.
This class represents a composition of other SCEV predicates, and is the class that most clients will...
This means that we are dealing with an entirely unknown SCEV value, and only represent it as its LLVM...
This class represents the value of vscale, as used when defining the length of a scalable vector or r...
This class represents an assumption made on an AddRec expression.
const SCEVAddRecExpr * getExpr() const
Implementation of the SCEVPredicate interface.
IncrementWrapFlags getFlags() const
Returns the set assumed no overflow flags.
This class represents a zero extension of a small integer value to a larger integer value.
This class represents an analyzed expression in the program.
ArrayRef< const SCEV * > operands() const
Return operands of this SCEV expression.
bool isOne() const
Return true if the expression is a constant one.
bool isZero() const
Return true if the expression is a constant zero.
bool isNonConstantNegative() const
Return true if the specified scev is negated, but not a constant.
SCEVTypes getSCEVType() const
Type * getType() const
Return the LLVM type of this SCEV expression.
NoWrapFlags
NoWrapFlags are bitfield indices into SubclassData.
The main scalar evolution driver.
const SCEV * getNegativeSCEV(const SCEV *V, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap)
Return the SCEV object corresponding to -V.
bool isKnownNegative(const SCEV *S)
Test if the given expression is known to be negative.
const SCEV * removePointerBase(const SCEV *S)
Compute an expression equivalent to S - getPointerBase(S).
bool isKnownNonZero(const SCEV *S)
Test if the given expression is known to be non-zero.
uint64_t getTypeSizeInBits(Type *Ty) const
Return the size in bits of the specified type, for which isSCEVable must return true.
const SCEV * getConstant(ConstantInt *V)
const SCEV * getSCEV(Value *V)
Return a SCEV expression for the full generality of the specified expression.
const SCEV * getTruncateOrNoop(const SCEV *V, Type *Ty)
Return a SCEV corresponding to a conversion of the input value to the specified type.
bool isLoopInvariant(const SCEV *S, const Loop *L)
Return true if the value of the given SCEV is unchanging in the specified loop.
bool isKnownPositive(const SCEV *S)
Test if the given expression is known to be positive.
bool containsAddRecurrence(const SCEV *S)
Return true if the SCEV is a scAddRecExpr or it contains scAddRecExpr.
const SCEV * getAddRecExpr(const SCEV *Start, const SCEV *Step, const Loop *L, SCEV::NoWrapFlags Flags)
Get an add recurrence expression for the specified loop.
const SCEV * getZeroExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth=0)
bool isSCEVable(Type *Ty) const
Test if values of the given type are analyzable within the SCEV framework.
Type * getEffectiveSCEVType(Type *Ty) const
Return a type with the same bitwidth as the given type and which represents how SCEV will treat the g...
const SCEV * getPredicatedBackedgeTakenCount(const Loop *L, SmallVector< const SCEVPredicate *, 4 > &Predicates)
Similar to getBackedgeTakenCount, except it will add a set of SCEV predicates to Predicates that are ...
static SCEV::NoWrapFlags clearFlags(SCEV::NoWrapFlags Flags, SCEV::NoWrapFlags OffFlags)
void forgetValue(Value *V)
This method should be called by the client when it has changed a value in a way that may effect its v...
const SCEV * getNoopOrAnyExtend(const SCEV *V, Type *Ty)
Return a SCEV corresponding to a conversion of the input value to the specified type.
const SCEV * getTruncateExpr(const SCEV *Op, Type *Ty, unsigned Depth=0)
const SCEV * getMinusSCEV(const SCEV *LHS, const SCEV *RHS, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Return LHS-RHS.
const SCEV * getAnyExtendExpr(const SCEV *Op, Type *Ty)
getAnyExtendExpr - Return a SCEV for the given operand extended with unspecified bits out to the give...
std::optional< SCEV::NoWrapFlags > getStrengthenedNoWrapFlagsFromBinOp(const OverflowingBinaryOperator *OBO)
Parse NSW/NUW flags from add/sub/mul IR binary operation Op into SCEV no-wrap flags,...
const SCEV * getSignExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth=0)
bool hasComputableLoopEvolution(const SCEV *S, const Loop *L)
Return true if the given SCEV changes value in a known way in the specified loop.
const SCEV * getPointerBase(const SCEV *V)
Transitively follow the chain of pointer-type operands until reaching a SCEV that does not have a sin...
bool dominates(const SCEV *S, const BasicBlock *BB)
Return true if elements that makes up the given SCEV dominate the specified basic block.
const SCEV * getMulExpr(SmallVectorImpl< const SCEV * > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical multiply expression, or something simpler if possible.
const SCEV * getSizeOfExpr(Type *IntTy, TypeSize Size)
Return an expression for a TypeSize.
const SCEV * getUnknown(Value *V)
static SCEV::NoWrapFlags maskFlags(SCEV::NoWrapFlags Flags, int Mask)
Convenient NoWrapFlags manipulation that hides enum casts and is visible in the ScalarEvolution name ...
bool properlyDominates(const SCEV *S, const BasicBlock *BB)
Return true if elements that makes up the given SCEV properly dominate the specified basic block.
const SCEV * getAddExpr(SmallVectorImpl< const SCEV * > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical add expression, or something simpler if possible.
LLVMContext & getContext() const
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Used to lazily calculate structure layout information for a target machine, based on the DataLayout s...
TypeSize getSizeInBytes() const
unsigned getElementContainingOffset(uint64_t FixedOffset) const
Given a valid byte offset into the structure, returns the structure index that contains it.
TypeSize getElementOffset(unsigned Idx) const
Class to represent struct types.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
The instances of the Type class are immutable: once they are created, they are never changed.
unsigned getIntegerBitWidth() const
bool isVectorTy() const
True if this is an instance of VectorType.
PointerType * getPointerTo(unsigned AddrSpace=0) const
Return a pointer to the current type.
static PointerType * getInt1PtrTy(LLVMContext &C, unsigned AS=0)
bool isPointerTy() const
True if this is an instance of PointerType.
static IntegerType * getInt1Ty(LLVMContext &C)
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
static IntegerType * getInt8Ty(LLVMContext &C)
static PointerType * getInt8PtrTy(LLVMContext &C, unsigned AS=0)
static IntegerType * getInt32Ty(LLVMContext &C)
bool isIntegerTy() const
True if this is an instance of IntegerType.
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
A Use represents the edge between a Value definition and its users.
Value * getOperand(unsigned i) const
unsigned getNumOperands() const
iterator_range< value_op_iterator > operand_values()
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
LLVMContext & getContext() const
All values hold a context through their type.
constexpr ScalarTy getFixedValue() const
self_iterator getIterator()
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
Function * getDeclaration(Module *M, ID id, ArrayRef< Type * > Tys=std::nullopt)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
@ SC
CHAIN = SC CHAIN, Imm128 - System call.
cst_pred_ty< is_power2 > m_Power2()
Match an integer or vector power-of-2.
bool match(Val *V, const Pattern &P)
bind_ty< Instruction > m_Instruction(Instruction *&I)
Match an instruction, capturing it if we match.
CmpClass_match< LHS, RHS, ICmpInst, ICmpInst::Predicate > m_ICmp(ICmpInst::Predicate &Pred, const LHS &L, const RHS &R)
brc_match< Cond_t, bind_ty< BasicBlock >, bind_ty< BasicBlock > > m_Br(const Cond_t &C, BasicBlock *&T, BasicBlock *&F)
class_match< BasicBlock > m_BasicBlock()
Match an arbitrary basic block value and ignore it.
@ CE
Windows NT (Windows on ARM)
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
void visitAll(const SCEV *Root, SV &Visitor)
Use SCEVTraversal to visit all nodes in the given expression tree.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
void stable_sort(R &&Range)
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
detail::scope_exit< std::decay_t< Callable > > make_scope_exit(Callable &&F)
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are are tuples (A,...
const SCEV * normalizeForPostIncUse(const SCEV *S, const PostIncLoopSet &Loops, ScalarEvolution &SE)
Normalize S to be post-increment for all loops present in Loops.
void append_range(Container &C, Range &&R)
Wrapper function to append a range to a container.
Expected< ExpressionValue > min(const ExpressionValue &Lhs, const ExpressionValue &Rhs)
Value * simplifyInstruction(Instruction *I, const SimplifyQuery &Q)
See if we can compute a simplified version of this instruction.
Interval::pred_iterator pred_end(Interval *I)
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
auto reverse(ContainerTy &&C)
bool programUndefinedIfPoison(const Instruction *Inst)
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Interval::pred_iterator pred_begin(Interval *I)
pred_begin/pred_end - define methods so that Intervals may be used just like BasicBlocks can with the...
cl::opt< unsigned > SCEVCheapExpansionBudget
Constant * ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS, Constant *RHS, const DataLayout &DL)
Attempt to constant fold a binary operation with the specified operands.
@ Mul
Product of integers.
Expected< ExpressionValue > max(const ExpressionValue &Lhs, const ExpressionValue &Rhs)
constexpr unsigned BitWidth
bool formLCSSAForInstructions(SmallVectorImpl< Instruction * > &Worklist, const DominatorTree &DT, const LoopInfo &LI, ScalarEvolution *SE, SmallVectorImpl< PHINode * > *PHIsToRemove=nullptr, SmallVectorImpl< PHINode * > *InsertedPHIs=nullptr)
Ensures LCSSA form for every instruction from the Worklist in the scope of innermost containing loop.
auto count_if(R &&Range, UnaryPredicate P)
Wrapper function around std::count_if to count the number of times an element satisfying a given pred...
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
bool SCEVExprContains(const SCEV *Root, PredTy Pred)
Return true if any node in Root satisfies the predicate Pred.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
struct for holding enough information to help calculate the cost of the given SCEV when expanded into...
Value * visit(const SCEV *S)