32#if LLVM_ENABLE_ABI_BREAKING_CHECKS
33#define SCEV_DEBUG_WITH_TYPE(TYPE, X) DEBUG_WITH_TYPE(TYPE, X)
35#define SCEV_DEBUG_WITH_TYPE(TYPE, X)
42 cl::desc(
"When performing SCEV expansion only if it is cheap to do, this "
43 "controls the budget that is considered cheap (default = 4)"));
57 NUW = OBO->hasNoUnsignedWrap();
58 NSW = OBO->hasNoSignedWrap();
61 Exact = PEO->isExact();
65 NNeg = PNI->hasNonNeg();
67 NUW = TI->hasNoUnsignedWrap();
68 NSW = TI->hasNoSignedWrap();
78 I->setHasNoUnsignedWrap(
NUW);
79 I->setHasNoSignedWrap(
NSW);
88 I->setHasNoUnsignedWrap(
NUW);
89 I->setHasNoSignedWrap(
NSW);
114 Value *Ret =
nullptr;
119 if (U->getType() != Ty)
128 if (IP->getParent() == CI->
getParent() && &*BIP != CI &&
138 SCEVInsertPointGuard Guard(Builder,
this);
139 Builder.SetInsertPoint(&*IP);
140 Ret = Builder.CreateCast(
Op, V, Ty,
V->getName());
157 IP =
II->getNormalDest()->begin();
165 IP = MustDominate->
getParent()->getFirstInsertionPt();
167 assert(!IP->isEHPad() &&
"unexpected eh pad!");
183 while (!WorkList.
empty()) {
192 InsertedValues.erase(
I);
193 InsertedPostIncValues.erase(
I);
195 I->eraseFromParent();
200SCEVExpander::GetOptimalInsertionPointForCastOf(
Value *V)
const {
219 "Expected the cast argument to be a global/constant");
220 return Builder.GetInsertBlock()
223 .getFirstInsertionPt();
231 assert((
Op == Instruction::BitCast ||
232 Op == Instruction::PtrToInt ||
233 Op == Instruction::IntToPtr) &&
234 "InsertNoopCastOfTo cannot perform non-noop casts!");
235 assert(SE.getTypeSizeInBits(
V->getType()) == SE.getTypeSizeInBits(Ty) &&
236 "InsertNoopCastOfTo cannot change sizes!");
243 if (
Op == Instruction::IntToPtr) {
245 if (DL.isNonIntegralPointerType(PtrTy))
249 if (
Op == Instruction::BitCast) {
250 if (
V->getType() == Ty)
258 if ((
Op == Instruction::PtrToInt ||
Op == Instruction::IntToPtr) &&
259 SE.getTypeSizeInBits(Ty) == SE.getTypeSizeInBits(
V->getType())) {
261 if ((CI->
getOpcode() == Instruction::PtrToInt ||
262 CI->
getOpcode() == Instruction::IntToPtr) &&
263 SE.getTypeSizeInBits(CI->
getType()) ==
267 if ((
CE->getOpcode() == Instruction::PtrToInt ||
268 CE->getOpcode() == Instruction::IntToPtr) &&
269 SE.getTypeSizeInBits(
CE->getType()) ==
270 SE.getTypeSizeInBits(
CE->getOperand(0)->getType()))
271 return CE->getOperand(0);
279 return ReuseOrCreateCast(V, Ty,
Op, GetOptimalInsertionPointForCastOf(V));
295 unsigned ScanLimit = 6;
299 if (IP != BlockBegin) {
301 for (; ScanLimit; --IP, --ScanLimit) {
316 if (IP->getOpcode() == (
unsigned)Opcode && IP->getOperand(0) ==
LHS &&
317 IP->getOperand(1) ==
RHS && !canGenerateIncompatiblePoison(&*IP))
319 if (IP == BlockBegin)
break;
324 DebugLoc Loc = Builder.GetInsertPoint()->getDebugLoc();
325 SCEVInsertPointGuard Guard(Builder,
this);
329 while (
const Loop *L = SE.LI.getLoopFor(Builder.GetInsertBlock())) {
330 if (!
L->isLoopInvariant(
LHS) || !
L->isLoopInvariant(
RHS))
break;
332 if (!Preheader)
break;
387 : GEPNoWrapFlags::
none();
392 return Builder.CreatePtrAdd(CLHS, CRHS,
"", NW);
395 unsigned ScanLimit = 6;
399 if (IP != BlockBegin) {
401 for (; ScanLimit; --IP, --ScanLimit) {
403 if (
GEP->getPointerOperand() == V &&
404 GEP->getSourceElementType() == Builder.getInt8Ty() &&
405 GEP->getOperand(1) == Idx) {
407 GEP->setNoWrapFlags(
GEP->getNoWrapFlags() & NW);
411 if (IP == BlockBegin)
break;
416 SCEVInsertPointGuard Guard(Builder,
this);
419 while (
const Loop *L = SE.LI.getLoopFor(Builder.GetInsertBlock())) {
420 if (!
L->isLoopInvariant(V) || !
L->isLoopInvariant(Idx))
break;
422 if (!Preheader)
break;
429 return Builder.CreatePtrAdd(V, Idx,
"scevgep", NW);
439 if (
A->contains(
B))
return B;
440 if (
B->contains(
A))
return A;
441 if (DT.
dominates(
A->getHeader(),
B->getHeader()))
return B;
442 if (DT.
dominates(
B->getHeader(),
A->getHeader()))
return A;
448const Loop *SCEVExpander::getRelevantLoop(
const SCEV *S) {
450 auto Pair = RelevantLoops.try_emplace(S);
452 return Pair.first->second;
472 const Loop *
L =
nullptr;
477 return RelevantLoops[S] =
L;
482 return Pair.first->second = SE.LI.getLoopFor(
I->getParent());
498 explicit LoopCompare(DominatorTree &dt) : DT(dt) {}
500 bool operator()(std::pair<const Loop *, const SCEV *>
LHS,
501 std::pair<const Loop *, const SCEV *>
RHS)
const {
508 if (
LHS.first !=
RHS.first)
514 if (
LHS.second->isNonConstantNegative()) {
515 if (!
RHS.second->isNonConstantNegative())
517 }
else if (
RHS.second->isNonConstantNegative())
529 const SCEV *URemLHS =
nullptr;
530 const SCEV *URemRHS =
nullptr;
543 for (
const SCEV *
Op :
reverse(S->operands()))
544 OpsAndLoops.
push_back(std::make_pair(getRelevantLoop(
Op),
Op));
552 Value *Sum =
nullptr;
553 for (
auto I = OpsAndLoops.
begin(),
E = OpsAndLoops.
end();
I !=
E;) {
554 const Loop *CurLoop =
I->first;
555 const SCEV *
Op =
I->second;
563 assert(!
Op->getType()->isPointerTy() &&
"Only first op can be pointer");
568 for (;
I !=
E &&
I->first == CurLoop; ++
I) {
571 const SCEV *
X =
I->second;
574 X = SE.getSCEV(
U->getValue());
577 Sum = expandAddToGEP(SE.getAddExpr(NewOps), Sum, S.
getNoWrapFlags());
578 }
else if (
Op->isNonConstantNegative()) {
580 Value *
W = expand(SE.getNegativeSCEV(
Op));
600 Type *Ty = S->getType();
605 for (
const SCEV *
Op :
reverse(S->operands()))
606 OpsAndLoops.
push_back(std::make_pair(getRelevantLoop(
Op),
Op));
613 Value *Prod =
nullptr;
614 auto I = OpsAndLoops.
begin();
619 const auto ExpandOpBinPowN = [
this, &
I, &OpsAndLoops]() {
629 while (
E != OpsAndLoops.
end() && *
I == *
E &&
Exponent != MaxExponent) {
633 assert(
Exponent > 0 &&
"Trying to calculate a zeroth exponent of operand?");
641 for (uint64_t BinExp = 2; BinExp <=
Exponent; BinExp <<= 1) {
652 assert(Result &&
"Nothing was expanded?");
656 while (
I != OpsAndLoops.
end()) {
659 Prod = ExpandOpBinPowN();
660 }
else if (
I->second->isAllOnesValue()) {
667 Value *
W = ExpandOpBinPowN();
676 if (
RHS->logBase2() ==
RHS->getBitWidth() - 1)
678 Prod = InsertBinop(Instruction::Shl, Prod,
679 ConstantInt::get(Ty,
RHS->logBase2()), NWFlags,
694 const APInt &
RHS = SC->getAPInt();
695 if (
RHS.isPowerOf2())
696 return InsertBinop(Instruction::LShr,
LHS,
697 ConstantInt::get(SC->getType(),
RHS.logBase2()),
701 const SCEV *RHSExpr = S->getRHS();
704 bool GuaranteedNotPoison =
705 ScalarEvolution::isGuaranteedNotToBePoison(RHSExpr);
706 if (!GuaranteedNotPoison)
707 RHS = Builder.CreateFreeze(
RHS);
712 if (!SE.isKnownNonZero(RHSExpr) || !GuaranteedNotPoison)
713 RHS = Builder.CreateIntrinsic(
RHS->
getType(), Intrinsic::umax,
714 {RHS, ConstantInt::get(RHS->getType(), 1)});
717 SE.isKnownNonZero(S->getRHS()));
730 if (L == IVIncInsertLoop) {
733 if (!SE.DT.dominates(OInst, IVIncInsertPos))
747 return isNormalAddRecExprPHI(PN, IncV, L);
762 if (IncV == InsertPos)
769 case Instruction::Add:
770 case Instruction::Sub: {
772 if (!OInst || SE.DT.dominates(OInst, InsertPos))
776 case Instruction::BitCast:
778 case Instruction::GetElementPtr:
783 if (!SE.DT.dominates(OInst, InsertPos))
809 if (Builder.GetInsertPoint() == It)
810 Builder.SetInsertPoint(&*NewInsertPt);
811 for (
auto *InsertPtGuard : InsertPointGuards)
812 if (InsertPtGuard->GetInsertPoint() == It)
813 InsertPtGuard->SetInsertPoint(NewInsertPt);
820 bool RecomputePoisonFlags) {
825 I->dropPoisonGeneratingFlags();
827 if (
auto Flags = SE.getStrengthenedNoWrapFlagsFromBinOp(OBO)) {
836 if (SE.DT.dominates(IncV, InsertPos)) {
837 if (RecomputePoisonFlags)
838 FixupPoisonFlags(IncV);
848 if (!SE.LI.movementPreservesLCSSAForm(IncV, InsertPos))
860 if (SE.DT.dominates(IncV, InsertPos))
864 fixupInsertPoints(
I);
866 if (RecomputePoisonFlags)
889 (IVOper =
getIVIncOperand(IVOper, L->getLoopPreheader()->getTerminator(),
906 IncV = Builder.CreatePtrAdd(PN, StepV,
"scevgep");
909 Builder.CreateSub(PN, StepV, Twine(IVName) +
".iv.next") :
910 Builder.CreateAdd(PN, StepV, Twine(IVName) +
".iv.next");
922 Type *PhiTy = Phi->getType();
936 if (Phi == Requested) {
959 const SCEV *ExtendAfterOp =
961 return ExtendAfterOp == OpAfterExtend;
973 const SCEV *ExtendAfterOp =
975 return ExtendAfterOp == OpAfterExtend;
982SCEVExpander::getAddRecExprPHILiterally(
const SCEVAddRecExpr *Normalized,
985 assert((!IVIncInsertLoop || IVIncInsertPos) &&
986 "Uninitialized insert position");
991 PHINode *AddRecPhiMatch =
nullptr;
998 bool TryNonMatchingSCEV =
1000 SE.DT.properlyDominates(LatchBlock, IVIncInsertLoop->getHeader());
1002 for (PHINode &PN :
L->getHeader()->phis()) {
1003 if (!SE.isSCEVable(PN.
getType()))
1010 DebugType,
dbgs() <<
"One incomplete PHI is found: " << PN <<
"\n");
1018 bool IsMatchingSCEV = PhiSCEV == Normalized;
1022 if (!IsMatchingSCEV && !TryNonMatchingSCEV)
1033 if (!isExpandedAddRecExprPHI(&PN, TempIncV, L))
1036 if (!isNormalAddRecExprPHI(&PN, TempIncV, L))
1041 if (IsMatchingSCEV) {
1045 AddRecPhiMatch = &PN;
1051 if ((!TruncTy || InvertStep) &&
1055 AddRecPhiMatch = &PN;
1057 TruncTy = Normalized->
getType();
1061 if (AddRecPhiMatch) {
1064 InsertedValues.insert(AddRecPhiMatch);
1066 rememberInstruction(IncV);
1068 ReusedValues.insert(AddRecPhiMatch);
1069 ReusedValues.insert(IncV);
1070 return AddRecPhiMatch;
1075 SCEVInsertPointGuard Guard(Builder,
this);
1085 PostIncLoops.
clear();
1088 assert(
L->getLoopPreheader() &&
1089 "Can't expand add recurrences without a loop preheader!");
1091 expand(Normalized->
getStart(),
L->getLoopPreheader()->getTerminator());
1108 Step = SE.getNegativeSCEV(Step);
1110 Value *StepV = expand(Step,
L->getHeader()->getFirstInsertionPt());
1115 bool IncrementIsNUW = !useSubtract &&
IsIncrementNUW(SE, Normalized);
1116 bool IncrementIsNSW = !useSubtract &&
IsIncrementNSW(SE, Normalized);
1120 Builder.SetInsertPoint(Header, Header->begin());
1122 Builder.CreatePHI(ExpandTy,
pred_size(Header), Twine(IVName) +
".iv");
1127 if (!
L->contains(Pred)) {
1136 IVIncInsertPos : Pred->getTerminator();
1137 Builder.SetInsertPoint(InsertPos);
1138 Value *IncV = expandIVInc(PN, StepV, L, useSubtract);
1151 PostIncLoops = SavedPostIncLoops;
1155 InsertedValues.
insert(PN);
1156 InsertedIVs.push_back(PN);
1162 const Loop *
L = S->getLoop();
1166 const SCEVAddRecExpr *Normalized = S;
1167 if (PostIncLoops.count(L)) {
1174 [[maybe_unused]]
const SCEV *
Start = Normalized->
getStart();
1176 assert(SE.properlyDominates(Start,
L->getHeader()) &&
1177 "Start does not properly dominate loop header");
1178 assert(SE.dominates(Step,
L->getHeader()) &&
"Step not dominate loop header");
1182 Type *TruncTy =
nullptr;
1183 bool InvertStep =
false;
1184 PHINode *PN = getAddRecExprPHILiterally(Normalized, L, TruncTy, InvertStep);
1188 if (!PostIncLoops.count(L))
1193 assert(LatchBlock &&
"PostInc mode requires a unique loop latch!");
1201 if (!S->hasNoUnsignedWrap())
1202 I->setHasNoUnsignedWrap(
false);
1203 if (!S->hasNoSignedWrap())
1204 I->setHasNoSignedWrap(
false);
1212 &*Builder.GetInsertPoint())) {
1225 Step = SE.getNegativeSCEV(Step);
1229 SCEVInsertPointGuard Guard(Builder,
this);
1230 StepV = expand(Step,
L->getHeader()->getFirstInsertionPt());
1232 Result = expandIVInc(PN, StepV, L, useSubtract);
1240 if (TruncTy !=
Result->getType())
1241 Result = Builder.CreateTrunc(Result, TruncTy);
1245 Result = Builder.CreateSub(expand(Normalized->
getStart()), Result);
1252 Type *STy = S->getType();
1253 const Loop *
L = S->getLoop();
1256 !SE.DT.dominates(EB, Builder.GetInsertBlock()))
1261 auto CanReuse = [&](
const SCEV *ExitSCEV) ->
const SCEV * {
1264 const SCEV *Diff = SE.getMinusSCEV(S, ExitSCEV);
1265 const SCEV *
Op = Diff;
1275 for (
auto &PN : EB->
phis()) {
1276 if (!SE.isSCEVable(PN.
getType()))
1278 auto *ExitSCEV = SE.getSCEV(&PN);
1282 const SCEV *Diff =
nullptr;
1284 DL.getAddressType(PhiTy) == STy) {
1286 const SCEV *AddrSCEV = SE.getPtrToAddrExpr(ExitSCEV);
1287 Diff = CanReuse(AddrSCEV);
1289 const SCEV *IntSCEV = SE.getPtrToIntExpr(ExitSCEV, STy);
1290 Diff = CanReuse(IntSCEV);
1292 }
else if (STy == PhiTy) {
1293 Diff = CanReuse(ExitSCEV);
1299 "difference must be of integer type");
1300 Value *DiffV = expand(Diff);
1301 Value *BaseV = fixupLCSSAFormFor(&PN);
1304 return Builder.CreatePtrAdd(BaseV, DiffV);
1305 BaseV = Builder.CreatePtrToAddr(BaseV);
1307 return Builder.CreateAdd(BaseV, DiffV);
1324 if (!CanonicalMode || (S->getNumOperands() > 2))
1325 return expandAddRecExprLiterally(S);
1327 Type *Ty = SE.getEffectiveSCEVType(S->getType());
1328 const Loop *
L = S->getLoop();
1331 PHINode *CanonicalIV =
nullptr;
1332 if (PHINode *PN =
L->getCanonicalInductionVariable())
1333 if (SE.getTypeSizeInBits(PN->
getType()) >= SE.getTypeSizeInBits(Ty))
1339 SE.getTypeSizeInBits(CanonicalIV->
getType()) > SE.getTypeSizeInBits(Ty) &&
1340 !S->getType()->isPointerTy()) {
1342 for (
unsigned i = 0, e = S->getNumOperands(); i != e; ++i)
1343 NewOps[i] = SE.getAnyExtendExpr(S->getOperand(i), CanonicalIV->
getType());
1348 V = expand(SE.getTruncateExpr(SE.getUnknown(V), Ty), NewInsertPt);
1354 if (
Value *V = tryToReuseLCSSAPhi(S))
1358 if (!S->getStart()->isZero()) {
1360 Value *StartV = expand(SE.getPointerBase(S));
1361 return expandAddToGEP(SE.removePointerBase(S), StartV,
1366 NewOps[0] = SE.getConstant(Ty, 0);
1374 const SCEV *AddExprLHS = SE.getUnknown(expand(S->getStart()));
1375 const SCEV *AddExprRHS = SE.getUnknown(expand(Rest));
1376 return expand(SE.getAddExpr(AddExprLHS, AddExprRHS));
1387 rememberInstruction(CanonicalIV);
1389 SmallPtrSet<BasicBlock *, 4> PredSeen;
1390 Constant *One = ConstantInt::get(Ty, 1);
1393 if (!PredSeen.
insert(HP).second) {
1400 if (
L->contains(HP)) {
1407 rememberInstruction(
Add);
1416 if (S->isAffine() && S->getOperand(1)->isOne()) {
1417 assert(Ty == SE.getEffectiveSCEVType(CanonicalIV->
getType()) &&
1418 "IVs with types different from the canonical IV should "
1419 "already have been handled!");
1428 expand(SE.getTruncateOrNoop(
1429 SE.getMulExpr(SE.getUnknown(CanonicalIV),
1430 SE.getNoopOrAnyExtend(S->getOperand(1),
1438 const SCEV *IH = SE.getUnknown(CanonicalIV);
1441 const SCEV *NewS = S;
1442 const SCEV *Ext = SE.getNoopOrAnyExtend(S, CanonicalIV->
getType());
1449 const SCEV *
T = SE.getTruncateOrNoop(V, Ty);
1454 Value *
V = expand(S->getOperand());
1455 Type *Ty = S->getType();
1460 for (User *U :
V->users()) {
1462 if (CI && CI->
getType() == Ty &&
1463 (CI->
getOpcode() == CastInst::PtrToAddr ||
1464 CI->
getOpcode() == CastInst::PtrToInt) &&
1465 &*BIP != CI && SE.DT.dominates(CI, &*BIP))
1469 return ReuseOrCreateCast(V, Ty, CastInst::PtrToAddr,
1470 GetOptimalInsertionPointForCastOf(V));
1474 Value *
V = expand(S->getOperand());
1475 return ReuseOrCreateCast(V, S->getType(), CastInst::PtrToInt,
1476 GetOptimalInsertionPointForCastOf(V));
1480 Value *
V = expand(S->getOperand());
1481 return Builder.CreateTrunc(V, S->getType());
1486 Value *
V = expand(S->getOperand());
1487 return Builder.CreateZExt(V, S->getType(),
"",
1488 SE.isKnownNonNegative(S->getOperand()));
1493 Value *
V = expand(S->getOperand());
1494 return Builder.CreateSExt(V, S->getType());
1499 bool IsSequential) {
1500 bool PrevSafeMode = SafeUDivMode;
1501 SafeUDivMode |= IsSequential;
1502 Value *
LHS = expand(S->getOperand(S->getNumOperands() - 1));
1505 LHS = Builder.CreateFreeze(
LHS);
1506 for (
int i = S->getNumOperands() - 2; i >= 0; --i) {
1507 SafeUDivMode = (IsSequential && i != 0) || PrevSafeMode;
1508 Value *
RHS = expand(S->getOperand(i));
1509 if (IsSequential && i != 0)
1510 RHS = Builder.CreateFreeze(
RHS);
1513 Sel = Builder.CreateIntrinsic(IntrinID, {Ty}, {
LHS,
RHS},
1518 Sel = Builder.CreateSelect(ICmp,
LHS,
RHS, Name);
1522 SafeUDivMode = PrevSafeMode;
1527 return expandMinMaxExpr(S, Intrinsic::smax,
"smax");
1531 return expandMinMaxExpr(S, Intrinsic::umax,
"umax");
1535 return expandMinMaxExpr(S, Intrinsic::smin,
"smin");
1539 return expandMinMaxExpr(S, Intrinsic::umin,
"umin");
1542Value *SCEVExpander::visitSequentialUMinExpr(
1544 return expandMinMaxExpr(S, Intrinsic::umin,
"umin",
1549 return Builder.CreateVScale(S->getType());
1560 Value *V = expand(SH);
1562 if (Ty && Ty != V->getType()) {
1563 assert(SE.getTypeSizeInBits(Ty) == SE.getTypeSizeInBits(SH->
getType()) &&
1564 "non-trivial casts should be done with the SCEVs directly!");
1565 V = InsertNoopCastOfTo(V, Ty);
1570Value *SCEVExpander::FindValueInExprValueMap(
1582 for (
Value *V : SE.getSCEVValues(S)) {
1599 DropPoisonGeneratingInsts.
clear();
1617 auto SafeToHoist = [](
const SCEV *S) {
1622 return SC->getValue()->isZero();
1632 if (SafeToHoist(S)) {
1633 for (Loop *L = SE.LI.getLoopFor(Builder.GetInsertBlock());;
1634 L =
L->getParentLoop()) {
1635 if (SE.isLoopInvariant(S, L)) {
1637 if (BasicBlock *Preheader =
L->getLoopPreheader()) {
1643 InsertPt =
L->getHeader()->getFirstInsertionPt();
1649 if (L && SE.hasComputableLoopEvolution(S, L) && !PostIncLoops.count(L))
1650 InsertPt =
L->getHeader()->getFirstInsertionPt();
1652 while (InsertPt != Builder.GetInsertPoint() &&
1654 InsertPt = std::next(InsertPt);
1662 auto I = InsertedExpressions.find(std::make_pair(S, &*InsertPt));
1663 if (
I != InsertedExpressions.end())
1666 SCEVInsertPointGuard Guard(Builder,
this);
1667 Builder.SetInsertPoint(InsertPt->getParent(), InsertPt);
1671 Value *
V = FindValueInExprValueMap(S, &*InsertPt, DropPoisonGeneratingInsts);
1674 V = fixupLCSSAFormFor(V);
1676 for (Instruction *
I : DropPoisonGeneratingInsts) {
1678 I->dropPoisonGeneratingAnnotations();
1682 if (
auto Flags = SE.getStrengthenedNoWrapFlagsFromBinOp(OBO)) {
1690 auto *Src = NNI->getOperand(0);
1693 DL).value_or(
false))
1694 NNI->setNonNeg(
true);
1704 InsertedExpressions[std::make_pair(S, &*InsertPt)] =
V;
1708void SCEVExpander::rememberInstruction(
Value *
I) {
1709 auto DoInsert = [
this](
Value *
V) {
1710 if (!PostIncLoops.empty())
1711 InsertedPostIncValues.insert(V);
1713 InsertedValues.insert(V);
1720 OrigFlags.try_emplace(
I, PoisonFlags(
I));
1723void SCEVExpander::replaceCongruentIVInc(
1734 if (!OrigInc || !IsomorphicInc)
1741 bool Chained = ChainedPhis.contains(Phi);
1742 if (!(Chained || isExpandedAddRecExprPHI(OrigPhi, OrigInc, L)) &&
1743 (Chained || isExpandedAddRecExprPHI(Phi, IsomorphicInc, L))) {
1758 const SCEV *TruncExpr =
1759 SE.getTruncateOrNoop(SE.getSCEV(OrigInc), IsomorphicInc->
getType());
1760 if (OrigInc == IsomorphicInc || TruncExpr != SE.getSCEV(IsomorphicInc) ||
1761 !SE.LI.replacementPreservesLCSSAForm(IsomorphicInc, OrigInc))
1764 bool BothHaveNUW =
false;
1765 bool BothHaveNSW =
false;
1768 if (OBOIncV && OBOIsomorphic) {
1770 OBOIncV->hasNoUnsignedWrap() && OBOIsomorphic->hasNoUnsignedWrap();
1772 OBOIncV->hasNoSignedWrap() && OBOIsomorphic->hasNoSignedWrap();
1785 "Should only replace an increment with a wider one.");
1786 if (BothHaveNUW || BothHaveNSW) {
1792 dbgs() <<
"INDVARS: Eliminated congruent iv.inc: "
1793 << *IsomorphicInc <<
'\n');
1794 Value *NewInc = OrigInc;
1798 IP = PN->
getParent()->getFirstInsertionPt();
1803 Builder.SetCurrentDebugLocation(IsomorphicInc->
getDebugLoc());
1805 Builder.CreateTruncOrBitCast(OrigInc, IsomorphicInc->
getType(), IVName);
1830 if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
1831 return RHS->getType()->isIntegerTy() && !LHS->getType()->isIntegerTy();
1832 return RHS->getType()->getPrimitiveSizeInBits().getFixedValue() <
1833 LHS->getType()->getPrimitiveSizeInBits().getFixedValue();
1836 unsigned NumElim = 0;
1844 if (!SE.isSCEVable(PN->
getType()))
1849 return Const->getValue();
1854 if (
Value *V = SimplifyPHINode(Phi)) {
1855 if (V->getType() != Phi->getType())
1857 SE.forgetValue(Phi);
1858 Phi->replaceAllUsesWith(V);
1862 dbgs() <<
"INDVARS: Eliminated constant iv: " << *Phi
1867 if (!SE.isSCEVable(Phi->getType()))
1870 PHINode *&OrigPhiRef = ExprToIVMap[SE.getSCEV(Phi)];
1873 if (Phi->getType()->isIntegerTy() &&
TTI &&
1874 TTI->isTruncateFree(Phi->getType(), Phis.
back()->getType())) {
1878 const SCEV *PhiExpr = SE.getSCEV(Phi);
1882 const SCEV *TruncExpr =
1883 SE.getTruncateExpr(PhiExpr, Phis.
back()->getType());
1884 ExprToIVMap[TruncExpr] = Phi;
1895 replaceCongruentIVInc(Phi, OrigPhiRef, L, DT, DeadInsts);
1897 dbgs() <<
"INDVARS: Eliminated congruent iv: " << *Phi
1900 DebugType,
dbgs() <<
"INDVARS: Original iv: " << *OrigPhiRef <<
'\n');
1902 Value *NewIV = OrigPhiRef;
1903 if (OrigPhiRef->
getType() != Phi->getType()) {
1905 L->getHeader()->getFirstInsertionPt());
1906 Builder.SetCurrentDebugLocation(Phi->getDebugLoc());
1907 NewIV = Builder.CreateTruncOrBitCast(OrigPhiRef, Phi->getType(), IVName);
1909 Phi->replaceAllUsesWith(NewIV);
1921 L->getExitingBlocks(ExitingBlocks);
1928 if (!
match(BB->getTerminator(),
1933 if (SE.getSCEV(LHS) == S && SE.DT.dominates(LHS, At))
1936 if (SE.getSCEV(RHS) == S && SE.DT.dominates(RHS, At))
1945 return FindValueInExprValueMap(S, At, DropPoisonGeneratingInsts) !=
nullptr;
1956 struct OperationIndices {
1957 OperationIndices(
unsigned Opc,
size_t min,
size_t max) :
1958 Opcode(
Opc), MinIdx(min), MaxIdx(
max) { }
1971 return TTI.getCastInstrCost(Opcode, S->getType(),
1972 S->getOperand(0)->getType(),
1976 auto ArithCost = [&](
unsigned Opcode,
unsigned NumRequired,
1977 unsigned MinIdx = 0,
1980 return NumRequired *
1981 TTI.getArithmeticInstrCost(Opcode, S->getType(),
CostKind);
1984 auto CmpSelCost = [&](
unsigned Opcode,
unsigned NumRequired,
unsigned MinIdx,
1987 Type *OpType = S->getType();
1988 return NumRequired *
TTI.getCmpSelInstrCost(
1993 switch (S->getSCEVType()) {
2001 Cost = CastCost(Instruction::PtrToAddr);
2004 Cost = CastCost(Instruction::PtrToInt);
2007 Cost = CastCost(Instruction::Trunc);
2010 Cost = CastCost(Instruction::ZExt);
2013 Cost = CastCost(Instruction::SExt);
2016 unsigned Opcode = Instruction::UDiv;
2018 if (SC->getAPInt().isPowerOf2())
2019 Opcode = Instruction::LShr;
2020 Cost = ArithCost(Opcode, 1);
2024 Cost = ArithCost(Instruction::Add, S->getNumOperands() - 1);
2035 unsigned OpCode = Instruction::Mul;
2036 if (S->getNumOperands() == 2)
2038 if (SC->getAPInt().isAllOnes())
2039 OpCode = Instruction::Sub;
2040 else if (SC->getAPInt().isPowerOf2())
2041 OpCode = Instruction::Shl;
2043 Cost = ArithCost(OpCode, S->getNumOperands() - 1);
2053 Cost += CmpSelCost(Instruction::ICmp, S->getNumOperands() - 1, 0, 1);
2054 Cost += CmpSelCost(Instruction::Select, S->getNumOperands() - 1, 0, 2);
2055 switch (S->getSCEVType()) {
2059 Cost += CmpSelCost(Instruction::ICmp, S->getNumOperands() - 1, 0, 0);
2060 Cost += ArithCost(Instruction::Or,
2061 S->getNumOperands() > 2 ? S->getNumOperands() - 2 : 0);
2062 Cost += CmpSelCost(Instruction::Select, 1, 0, 1);
2067 "Unhandled SCEV expression type?");
2074 unsigned NumRecurrences = S->getNumOperands() - 1;
2075 Cost +=
TTI.getCFInstrCost(Instruction::PHI,
CostKind) * NumRecurrences;
2077 TTI.getArithmeticInstrCost(Instruction::Add, S->getType(),
CostKind) *
2080 Worklist.
emplace_back(Instruction::PHI, 0, S->getOperand(0));
2082 for (
const SCEV *
Op : S->operands().drop_front())
2088 for (
auto &CostOp : Operations) {
2089 for (
auto SCEVOp :
enumerate(S->operands())) {
2091 size_t MinIdx = std::max(SCEVOp.index(), CostOp.MinIdx);
2092 size_t OpIdx = std::min(MinIdx, CostOp.MaxIdx);
2099bool SCEVExpander::isHighCostExpansionHelper(
2107 const SCEV *S = WorkItem.
S;
2118 L->getHeader()->getParent()->hasMinSize()
2137 return Cost > Budget;
2158 SE.getAddExpr(S, SE.getConstant(S->
getType(), 1)), &At, L))
2173 "Nary expr should have more than 1 operand.");
2178 return Cost > Budget;
2182 "Polynomial should be at least linear");
2185 return Cost > Budget;
2194 switch (Pred->getKind()) {
2209 Value *Expr0 = expand(Pred->getLHS(), IP);
2210 Value *Expr1 = expand(Pred->getRHS(), IP);
2212 Builder.SetInsertPoint(IP);
2214 auto *
I = Builder.CreateICmp(InvPred, Expr0, Expr1,
"ident.check");
2221 "non-affine expression");
2225 const SCEV *ExitCount =
2226 SE.getPredicatedSymbolicMaxBackedgeTakenCount(AR->
getLoop(), Pred);
2234 unsigned SrcBits = SE.getTypeSizeInBits(ExitCount->
getType());
2235 unsigned DstBits = SE.getTypeSizeInBits(ARTy);
2242 Builder.SetInsertPoint(
Loc);
2243 Value *TripCountVal = expand(ExitCount,
Loc);
2248 Value *StepValue = expand(Step,
Loc);
2249 Value *NegStepValue = expand(SE.getNegativeSCEV(Step),
Loc);
2250 Value *StartValue = expand(Start,
Loc);
2255 Builder.SetInsertPoint(
Loc);
2258 Value *AbsStep = Builder.CreateSelect(StepCompare, NegStepValue, StepValue);
2268 auto ComputeEndCheck = [&]() ->
Value * {
2270 Value *TruncTripCount = Builder.CreateZExtOrTrunc(TripCountVal, Ty);
2272 CallInst *
Mul = Builder.CreateIntrinsic(Intrinsic::umul_with_overflow, Ty,
2273 {AbsStep, TruncTripCount},
2275 Value *MulV = Builder.CreateExtractValue(
Mul, 0,
"mul.result");
2276 Value *OfMul = Builder.CreateExtractValue(
Mul, 1,
"mul.overflow");
2279 bool NeedPosCheck = !SE.isKnownNegative(Step);
2280 bool NeedNegCheck = !SE.isKnownPositive(Step);
2283 Value *NegMulV = Builder.CreateNeg(MulV);
2285 Add = Builder.CreatePtrAdd(StartValue, MulV);
2287 Sub = Builder.CreatePtrAdd(StartValue, NegMulV);
2290 Add = Builder.CreateAdd(StartValue, MulV);
2292 Sub = Builder.CreateSub(StartValue, MulV);
2295 Value *EndCompareLT =
nullptr;
2296 Value *EndCompareGT =
nullptr;
2297 Value *EndCheck =
nullptr;
2299 EndCheck = EndCompareLT = Builder.CreateICmp(
2302 EndCheck = EndCompareGT = Builder.CreateICmp(
2304 if (NeedPosCheck && NeedNegCheck) {
2306 EndCheck = Builder.CreateSelect(StepCompare, EndCompareGT, EndCompareLT);
2308 return Builder.CreateOr(EndCheck, OfMul);
2310 Value *EndCheck = ComputeEndCheck();
2315 if (SrcBits > DstBits) {
2317 auto *BackedgeCheck =
2319 ConstantInt::get(
Loc->getContext(), MaxVal));
2320 BackedgeCheck = Builder.CreateAnd(
2323 EndCheck = Builder.CreateOr(EndCheck, BackedgeCheck);
2332 Value *NSSWCheck =
nullptr, *NUSWCheck =
nullptr;
2342 if (NUSWCheck && NSSWCheck)
2343 return Builder.CreateOr(NUSWCheck, NSSWCheck);
2358 for (
const auto *Pred : Union->getPredicates()) {
2360 Builder.SetInsertPoint(IP);
2365 return Builder.CreateOr(Checks);
2368Value *SCEVExpander::fixupLCSSAFormFor(
Value *V) {
2370 if (!PreserveLCSSA || !DefI)
2376 if (!DefLoop || UseLoop == DefLoop || DefLoop->
contains(UseLoop))
2387 if (DefI->getType()->isIntegerTy())
2401 for (
PHINode *PN : InsertedPHIs)
2402 rememberInstruction(PN);
2403 for (
PHINode *PN : PHIsToRemove) {
2406 InsertedValues.erase(PN);
2407 InsertedPostIncValues.erase(PN);
2411 return User->getOperand(0);
2433struct SCEVFindUnsafe {
2434 ScalarEvolution &SE;
2436 bool IsUnsafe =
false;
2438 SCEVFindUnsafe(ScalarEvolution &SE,
bool CanonicalMode)
2439 : SE(SE), CanonicalMode(CanonicalMode) {}
2441 bool follow(
const SCEV *S) {
2451 if (!AR->getLoop()->getLoopPreheader() &&
2452 (!CanonicalMode || !AR->isAffine())) {
2459 bool isDone()
const {
return IsUnsafe; }
2464 SCEVFindUnsafe Search(SE, CanonicalMode);
2466 return !Search.IsUnsafe;
2497 for (
auto [
I, Flags] : Expander.OrigFlags)
2500 auto InsertedInstructions = Expander.getAllInsertedInstructions();
2503 InsertedInstructions);
2513 [&InsertedSet](
Value *U) {
2514 return InsertedSet.contains(cast<Instruction>(U));
2516 "removed instruction should only be used by instructions inserted "
2517 "during expansion");
2519 assert(!
I->getType()->isVoidTy() &&
2520 "inserted instruction should have non-void types");
2522 I->eraseFromParent();
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static cl::opt< OutputCostKind > CostKind("cost-kind", cl::desc("Target cost kind"), cl::init(OutputCostKind::RecipThroughput), cl::values(clEnumValN(OutputCostKind::RecipThroughput, "throughput", "Reciprocal throughput"), clEnumValN(OutputCostKind::Latency, "latency", "Instruction latency"), clEnumValN(OutputCostKind::CodeSize, "code-size", "Code size"), clEnumValN(OutputCostKind::SizeAndLatency, "size-latency", "Code size and latency"), clEnumValN(OutputCostKind::All, "all", "Print all cost kinds")))
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
static bool IsIncrementNUW(ScalarEvolution &SE, const SCEVAddRecExpr *AR)
static const Loop * PickMostRelevantLoop(const Loop *A, const Loop *B, DominatorTree &DT)
PickMostRelevantLoop - Given two loops pick the one that's most relevant for SCEV expansion.
static InstructionCost costAndCollectOperands(const SCEVOperand &WorkItem, const TargetTransformInfo &TTI, TargetTransformInfo::TargetCostKind CostKind, SmallVectorImpl< SCEVOperand > &Worklist)
static bool IsIncrementNSW(ScalarEvolution &SE, const SCEVAddRecExpr *AR)
static bool canBeCheaplyTransformed(ScalarEvolution &SE, const SCEVAddRecExpr *Phi, const SCEVAddRecExpr *Requested, bool &InvertStep)
Check whether we can cheaply express the requested SCEV in terms of the available PHI SCEV by truncat...
#define SCEV_DEBUG_WITH_TYPE(TYPE, X)
This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...
LLVM_ABI APInt zext(unsigned width) const
Zero extend to a new width.
static APInt getMaxValue(unsigned numBits)
Gets maximum unsigned value of APInt for specific bit width.
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
This class represents an incoming formal argument to a Function.
LLVM Basic Block Representation.
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
LLVM_ABI const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
InstListType::iterator iterator
Instruction iterators...
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction; assumes that the block is well-formed.
static LLVM_ABI BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name=Twine(), InsertPosition InsertBefore=nullptr)
Construct a binary instruction, given the opcode and the two operands.
This class represents a function call, abstracting a target machine's calling convention.
This is the base class for all instructions that perform data casts.
static LLVM_ABI Instruction::CastOps getCastOpcode(const Value *Val, bool SrcIsSigned, Type *Ty, bool DstIsSigned)
Returns the opcode necessary to cast Val into Ty using usual casting rules.
Instruction::CastOps getOpcode() const
Return the opcode of this CastInst.
static LLVM_ABI CastInst * CreateBitOrPointerCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a BitCast, a PtrToInt, or an IntToPTr cast instruction.
static Type * makeCmpResultType(Type *opnd_type)
Create a result type for fcmp/icmp.
@ ICMP_SLT
signed less than
@ ICMP_UGT
unsigned greater than
@ ICMP_SGT
signed greater than
@ ICMP_ULT
unsigned less than
@ ICMP_SGE
signed greater or equal
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...
static LLVM_ABI Constant * getCast(unsigned ops, Constant *C, Type *Ty, bool OnlyIfReduced=false)
Convenience function for getting a Cast operation.
This is the shared class of boolean and integer constants.
static LLVM_ABI ConstantInt * getFalse(LLVMContext &Context)
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
LLVM_ABI bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
static GEPNoWrapFlags noUnsignedWrap()
static GEPNoWrapFlags none()
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
LLVM_ABI void setHasNoUnsignedWrap(bool b=true)
Set or clear the nuw flag on this instruction, which must be an operator which supports this flag.
LLVM_ABI void setHasNoSignedWrap(bool b=true)
Set or clear the nsw flag on this instruction, which must be an operator which supports this flag.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
LLVM_ABI void insertBefore(InstListType::iterator InsertPos)
Insert an unlinked instruction into a basic block immediately before the specified position.
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
LLVM_ABI bool mayHaveSideEffects() const LLVM_READONLY
Return true if the instruction may have side effects.
LLVM_ABI bool comesBefore(const Instruction *Other) const
Given an instruction Other in the same basic block as this instruction, return true if this instructi...
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
Class to represent integer types.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
bool contains(const LoopT *L) const
Return true if the specified loop is contained within in this loop.
LoopT * getLoopFor(const BlockT *BB) const
Return the inner most loop that BB lives in.
Represents a single loop in the control flow graph.
ICmpInst::Predicate getPredicate() const
Returns the comparison predicate underlying the intrinsic.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
bool isComplete() const
If the PHI node is complete which means all of its parent's predecessors have incoming value in this ...
Value * getIncomingValueForBlock(const BasicBlock *BB) const
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
static LLVM_ABI PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
This node represents a polynomial recurrence on the trip count of the specified loop.
bool isAffine() const
Return true if this represents an expression A + B*x where A and B are loop invariant values.
const Loop * getLoop() const
SCEVUse getStepRecurrence(ScalarEvolution &SE) const
Constructs and returns the recurrence indicating how much this expression steps by.
This class represents an assumption that the expression LHS Pred RHS evaluates to true,...
LLVM_ABI Value * generateOverflowCheck(const SCEVAddRecExpr *AR, Instruction *Loc, bool Signed)
Generates code that evaluates if the AR expression will overflow.
LLVM_ABI bool hasRelatedExistingExpansion(const SCEV *S, const Instruction *At, Loop *L)
Determine whether there is an existing expansion of S that can be reused.
SmallVector< Instruction *, 32 > getAllInsertedInstructions() const
Return a vector containing all instructions inserted during expansion.
LLVM_ABI bool isSafeToExpand(const SCEV *S) const
Return true if the given expression is safe to expand in the sense that all materialized values are s...
LLVM_ABI bool isSafeToExpandAt(const SCEV *S, const Instruction *InsertionPoint) const
Return true if the given expression is safe to expand in the sense that all materialized values are d...
LLVM_ABI unsigned replaceCongruentIVs(Loop *L, const DominatorTree *DT, SmallVectorImpl< WeakTrackingVH > &DeadInsts, const TargetTransformInfo *TTI=nullptr)
replace congruent phis with their most canonical representative.
LLVM_ABI Value * expandUnionPredicate(const SCEVUnionPredicate *Pred, Instruction *Loc)
A specialized variant of expandCodeForPredicate, handling the case when we are expanding code for a S...
LLVM_ABI bool hoistIVInc(Instruction *IncV, Instruction *InsertPos, bool RecomputePoisonFlags=false)
Utility for hoisting IncV (with all subexpressions requried for its computation) before InsertPos.
bool isInsertedInstruction(Instruction *I) const
Return true if the specified instruction was inserted by the code rewriter.
LLVM_ABI Value * expandCodeForPredicate(const SCEVPredicate *Pred, Instruction *Loc)
Generates a code sequence that evaluates this predicate.
static LLVM_ABI bool canReuseFlagsFromOriginalIVInc(PHINode *OrigPhi, PHINode *WidePhi, Instruction *OrigInc, Instruction *WideInc)
Return true if both increments directly increment the corresponding IV PHI nodes and have the same op...
LLVM_ABI Value * expandCodeFor(SCEVUse SH, Type *Ty, BasicBlock::iterator I)
Insert code to directly compute the specified SCEV expression into the program.
LLVM_ABI Value * expandComparePredicate(const SCEVComparePredicate *Pred, Instruction *Loc)
A specialized variant of expandCodeForPredicate, handling the case when we are expanding code for a S...
LLVM_ABI Value * expandWrapPredicate(const SCEVWrapPredicate *P, Instruction *Loc)
A specialized variant of expandCodeForPredicate, handling the case when we are expanding code for a S...
LLVM_ABI Instruction * getIVIncOperand(Instruction *IncV, Instruction *InsertPos, bool allowScale)
Return the induction variable increment's IV operand.
void eraseDeadInstructions(Value *Root)
Remove inserted instructions that are dead, e.g.
LLVM_ABI BasicBlock::iterator findInsertPointAfter(Instruction *I, Instruction *MustDominate) const
Returns a suitable insert point after I, that dominates MustDominate.
void setInsertPoint(Instruction *IP)
Set the current insertion point.
This class represents an assumption made using SCEV expressions which can be checked at run-time.
This class represents a composition of other SCEV predicates, and is the class that most clients will...
This means that we are dealing with an entirely unknown SCEV value, and only represent it as its LLVM...
This class represents an assumption made on an AddRec expression.
This class represents an analyzed expression in the program.
SCEVNoWrapFlags NoWrapFlags
static constexpr auto FlagNUW
static constexpr auto FlagAnyWrap
LLVM_ABI bool isNonConstantNegative() const
Return true if the specified scev is negated, but not a constant.
static constexpr auto FlagNSW
LLVM_ABI ArrayRef< SCEVUse > operands() const
Return operands of this SCEV expression.
SCEVTypes getSCEVType() const
static constexpr auto FlagNW
LLVM_ABI Type * getType() const
Return the LLVM type of this SCEV expression.
The main scalar evolution driver.
LLVM_ABI bool isKnownNonZero(const SCEV *S)
Test if the given expression is known to be non-zero.
LLVM_ABI const SCEV * getMinusSCEV(SCEVUse LHS, SCEVUse RHS, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Return LHS-RHS.
LLVM_ABI const SCEV * getTruncateOrNoop(const SCEV *V, Type *Ty)
Return a SCEV corresponding to a conversion of the input value to the specified type.
LLVM_ABI bool containsAddRecurrence(const SCEV *S)
Return true if the SCEV is a scAddRecExpr or it contains scAddRecExpr.
LLVM_ABI const SCEV * getZeroExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth=0)
static SCEV::NoWrapFlags clearFlags(SCEV::NoWrapFlags Flags, SCEV::NoWrapFlags OffFlags)
static SCEV::NoWrapFlags maskFlags(SCEV::NoWrapFlags Flags, SCEV::NoWrapFlags Mask)
Convenient NoWrapFlags manipulation.
LLVM_ABI const SCEV * getSignExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth=0)
LLVM_ABI const SCEV * getAddExpr(SmallVectorImpl< SCEVUse > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical add expression, or something simpler if possible.
LLVM_ABI bool canReuseInstruction(const SCEV *S, Instruction *I, SmallVectorImpl< Instruction * > &DropPoisonGeneratingInsts)
Check whether it is poison-safe to represent the expression S using the instruction I.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
The instances of the Type class are immutable: once they are created, they are never changed.
LLVM_ABI unsigned getIntegerBitWidth() const
bool isVectorTy() const
True if this is an instance of VectorType.
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
bool isPointerTy() const
True if this is an instance of PointerType.
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isIntegerTy() const
True if this is an instance of IntegerType.
A Use represents the edge between a Value definition and its users.
Value * getOperand(unsigned i) const
unsigned getNumOperands() const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
LLVMContext & getContext() const
All values hold a context through their type.
iterator_range< user_iterator > users()
const ParentTy * getParent() const
self_iterator getIterator()
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr bool any(E Val)
@ C
The default llvm calling convention, compatible with C.
@ BasicBlock
Various leaf nodes.
cst_pred_ty< is_power2 > m_Power2()
Match an integer or vector power-of-2.
bool match(Val *V, const Pattern &P)
bind_ty< Instruction > m_Instruction(Instruction *&I)
Match an instruction, capturing it if we match.
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
auto m_BasicBlock()
Match an arbitrary basic block value and ignore it.
auto m_Value()
Match an arbitrary value and ignore it.
brc_match< Cond_t, bind_ty< BasicBlock >, bind_ty< BasicBlock > > m_Br(const Cond_t &C, BasicBlock *&T, BasicBlock *&F)
AnyBinaryOp_match< LHS, RHS, true > m_c_BinOp(const LHS &L, const RHS &R)
Matches a BinaryOperator with LHS and RHS in either order.
CmpClass_match< LHS, RHS, ICmpInst > m_ICmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
cst_pred_ty< is_all_ones > m_scev_AllOnes()
Match an integer with all bits set.
bind_ty< const SCEVMulExpr > m_scev_Mul(const SCEVMulExpr *&V)
SCEVUnaryExpr_match< SCEVPtrToAddrExpr, Op0_t > m_scev_PtrToAddr(const Op0_t &Op0)
bind_ty< const SCEVAddExpr > m_scev_Add(const SCEVAddExpr *&V)
SCEVUnaryExpr_match< SCEVPtrToIntExpr, Op0_t > m_scev_PtrToInt(const Op0_t &Op0)
SCEVURem_match< Op0_t, Op1_t > m_scev_URem(Op0_t LHS, Op1_t RHS, ScalarEvolution &SE)
Match the mathematical pattern A - (A / B) * B, where A and B can be arbitrary expressions.
@ CE
Windows NT (Windows on ARM)
initializer< Ty > init(const Ty &Val)
@ User
could "use" a pointer
NodeAddr< PhiNode * > Phi
friend class Instruction
Iterator for Instructions in a `BasicBlock.
This is an optimization pass for GlobalISel generic memory operations.
void visitAll(const SCEV *Root, SV &Visitor)
Use SCEVTraversal to visit all nodes in the given expression tree.
GCNRegPressure max(const GCNRegPressure &P1, const GCNRegPressure &P2)
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
FunctionAddr VTableAddr Value
void stable_sort(R &&Range)
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
auto pred_end(const MachineBasicBlock *BB)
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
constexpr from_range_t from_range
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
auto pred_size(const MachineBasicBlock *BB)
LLVM_ABI Value * simplifyInstruction(Instruction *I, const SimplifyQuery &Q)
See if we can compute a simplified version of this instruction.
LLVM_ABI bool isInstructionTriviallyDead(Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction is not used, and the instruction will return.
auto reverse(ContainerTy &&C)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
LLVM_ABI cl::opt< unsigned > SCEVCheapExpansionBudget
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
LLVM_ABI Constant * ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS, Constant *RHS, const DataLayout &DL)
Attempt to constant fold a binary operation with the specified operands.
LLVM_ABI const SCEV * normalizeForPostIncUse(const SCEV *S, const PostIncLoopSet &Loops, ScalarEvolution &SE, bool CheckInvertible=true)
Normalize S to be post-increment for all loops present in Loops.
IRBuilder(LLVMContext &, FolderTy, InserterTy, MDNode *, ArrayRef< OperandBundleDef >) -> IRBuilder< FolderTy, InserterTy >
@ Mul
Product of integers.
@ Sub
Subtraction of integers.
DWARFExpression::Operation Op
PredIterator< BasicBlock, Value::user_iterator > pred_iterator
constexpr unsigned BitWidth
LLVM_ABI bool formLCSSAForInstructions(SmallVectorImpl< Instruction * > &Worklist, const DominatorTree &DT, const LoopInfo &LI, ScalarEvolution *SE, SmallVectorImpl< PHINode * > *PHIsToRemove=nullptr, SmallVectorImpl< PHINode * > *InsertedPHIs=nullptr)
Ensures LCSSA form for every instruction from the Worklist in the scope of innermost containing loop.
auto pred_begin(const MachineBasicBlock *BB)
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
SmallPtrSet< const Loop *, 2 > PostIncLoopSet
auto predecessors(const MachineBasicBlock *BB)
iterator_range< pointer_iterator< WrappedIteratorT > > make_pointer_range(RangeT &&Range)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
LLVM_ABI std::optional< bool > isImpliedByDomCondition(const Value *Cond, const Instruction *ContextI, const DataLayout &DL)
Return the boolean condition value in the context of the given instruction if it is known based on do...
SCEVUseT< const SCEV * > SCEVUse
bool SCEVExprContains(const SCEV *Root, PredTy Pred)
Return true if any node in Root satisfies the predicate Pred.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
LLVM_ABI void apply(Instruction *I)
LLVM_ABI PoisonFlags(const Instruction *I)
struct for holding enough information to help calculate the cost of the given SCEV when expanded into...
const SCEV * S
The SCEV operand to be costed.
unsigned ParentOpcode
LLVM instruction opcode that uses the operand.
int OperandIdx
The use index of an expanded instruction.
SCEVNoWrapFlags getNoWrapFlags(SCEVNoWrapFlags Mask=SCEVNoWrapFlags::NoWrapMask) const
Return the no-wrap flags for this SCEVUse, which is the union of the use-specific flags and the under...