32 using namespace PatternMatch;
56 if (U->getType() == Ty)
57 if (
CastInst *CI = dyn_cast<CastInst>(U))
58 if (CI->getOpcode() ==
Op) {
68 CI->replaceAllUsesWith(Ret);
83 assert(SE.DT.dominates(Ret, &*BIP));
85 rememberInstruction(Ret);
92 if (
auto *II = dyn_cast<InvokeInst>(I))
93 IP = II->getNormalDest()->begin();
95 while (isa<PHINode>(IP))
98 if (isa<FuncletPadInst>(IP) || isa<LandingPadInst>(IP)) {
100 }
else if (isa<CatchSwitchInst>(IP)) {
103 assert(!IP->isEHPad() &&
"unexpected eh pad!");
114 assert((Op == Instruction::BitCast ||
115 Op == Instruction::PtrToInt ||
116 Op == Instruction::IntToPtr) &&
117 "InsertNoopCastOfTo cannot perform non-noop casts!");
118 assert(SE.getTypeSizeInBits(V->
getType()) == SE.getTypeSizeInBits(Ty) &&
119 "InsertNoopCastOfTo cannot change sizes!");
122 if (Op == Instruction::BitCast) {
125 if (
CastInst *CI = dyn_cast<CastInst>(V)) {
126 if (CI->getOperand(0)->getType() == Ty)
127 return CI->getOperand(0);
131 if ((Op == Instruction::PtrToInt || Op == Instruction::IntToPtr) &&
132 SE.getTypeSizeInBits(Ty) == SE.getTypeSizeInBits(V->
getType())) {
133 if (
CastInst *CI = dyn_cast<CastInst>(V))
134 if ((CI->getOpcode() == Instruction::PtrToInt ||
135 CI->getOpcode() == Instruction::IntToPtr) &&
136 SE.getTypeSizeInBits(CI->getType()) ==
137 SE.getTypeSizeInBits(CI->getOperand(0)->getType()))
138 return CI->getOperand(0);
140 if ((
CE->getOpcode() == Instruction::PtrToInt ||
141 CE->getOpcode() == Instruction::IntToPtr) &&
142 SE.getTypeSizeInBits(
CE->getType()) ==
143 SE.getTypeSizeInBits(
CE->getOperand(0)->getType()))
144 return CE->getOperand(0);
153 if (
Argument *
A = dyn_cast<Argument>(V)) {
155 while ((isa<BitCastInst>(IP) &&
156 isa<Argument>(cast<BitCastInst>(IP)->getOperand(0)) &&
157 cast<BitCastInst>(IP)->getOperand(0) !=
A) ||
158 isa<DbgInfoIntrinsic>(IP))
160 return ReuseOrCreateCast(
A, Ty, Op, IP);
166 return ReuseOrCreateCast(I, Ty, Op, IP);
174 if (
Constant *CLHS = dyn_cast<Constant>(LHS))
175 if (
Constant *CRHS = dyn_cast<Constant>(RHS))
179 unsigned ScanLimit = 6;
183 if (IP != BlockBegin) {
185 for (; ScanLimit; --IP, --ScanLimit) {
188 if (isa<DbgInfoIntrinsic>(IP))
190 if (IP->getOpcode() == (
unsigned)Opcode && IP->getOperand(0) == LHS &&
191 IP->getOperand(1) == RHS)
193 if (IP == BlockBegin)
break;
198 DebugLoc Loc = Builder.GetInsertPoint()->getDebugLoc();
199 SCEVInsertPointGuard Guard(Builder,
this);
202 while (
const Loop *
L = SE.LI.getLoopFor(Builder.GetInsertBlock())) {
203 if (!
L->isLoopInvariant(LHS) || !
L->isLoopInvariant(RHS))
break;
205 if (!Preheader)
break;
212 Instruction *BO = cast<Instruction>(Builder.CreateBinOp(Opcode, LHS, RHS));
214 rememberInstruction(BO);
263 if (
const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) {
267 if (
const SCEVConstant *
C = dyn_cast<SCEVConstant>(M->getOperand(0)))
268 if (!
C->getAPInt().srem(FC->
getAPInt())) {
278 const SCEV *Step =
A->getStepRecurrence(SE);
284 const SCEV *Start =
A->getStart();
302 unsigned NumAddRecs = 0;
303 for (
unsigned i = Ops.
size();
i > 0 && isa<SCEVAddRecExpr>(Ops[
i-1]); --
i)
309 const SCEV *Sum = NoAddRecs.empty() ?
317 else if (!Sum->isZero())
320 Ops.
append(AddRecs.begin(), AddRecs.end());
333 for (
unsigned i = 0, e = Ops.
size();
i != e; ++
i)
335 const SCEV *Start =
A->getStart();
336 if (Start->
isZero())
break;
339 A->getStepRecurrence(SE),
345 e +=
Add->getNumOperands();
350 if (!AddRecs.
empty()) {
385 Value *SCEVExpander::expandAddToGEP(
const SCEV *
const *op_begin,
386 const SCEV *
const *op_end,
391 Type *ElTy = OriginalElTy;
394 bool AnyNonZeroIndices =
false;
400 Type *IntPtrTy =
DL.getIntPtrType(PTy);
412 const SCEV *ElSize = SE.getSizeOfExpr(IntPtrTy, ElTy);
415 for (
const SCEV *Op : Ops) {
416 const SCEV *Remainder = SE.getConstant(Ty, 0);
422 AnyNonZeroIndices =
true;
430 if (!ScaledOps.
empty()) {
443 expandCodeFor(SE.getAddExpr(ScaledOps), Ty);
447 while (
StructType *STy = dyn_cast<StructType>(ElTy)) {
448 bool FoundFieldNo =
false;
450 if (STy->getNumElements() == 0)
break;
456 if (SE.getTypeSizeInBits(
C->getType()) <= 64) {
458 uint64_t FullOffset =
C->getValue()->getZExtValue();
463 ElTy = STy->getTypeAtIndex(ElIdx);
466 AnyNonZeroIndices =
true;
474 ElTy = STy->getTypeAtIndex(0u);
480 if (
ArrayType *ATy = dyn_cast<ArrayType>(ElTy))
481 ElTy = ATy->getElementType();
489 if (!AnyNonZeroIndices) {
491 V = InsertNoopCastOfTo(V,
494 assert(!isa<Instruction>(V) ||
495 SE.DT.dominates(cast<Instruction>(V), &*Builder.GetInsertPoint()));
498 Value *Idx = expandCodeFor(SE.getAddExpr(Ops), Ty);
501 if (
Constant *CLHS = dyn_cast<Constant>(V))
502 if (
Constant *CRHS = dyn_cast<Constant>(Idx))
507 unsigned ScanLimit = 6;
511 if (IP != BlockBegin) {
513 for (; ScanLimit; --IP, --ScanLimit) {
516 if (isa<DbgInfoIntrinsic>(IP))
518 if (IP->getOpcode() == Instruction::GetElementPtr &&
519 IP->getOperand(0) == V && IP->getOperand(1) == Idx)
521 if (IP == BlockBegin)
break;
526 SCEVInsertPointGuard Guard(Builder,
this);
529 while (
const Loop *
L = SE.LI.getLoopFor(Builder.GetInsertBlock())) {
530 if (!
L->isLoopInvariant(V) || !
L->isLoopInvariant(Idx))
break;
532 if (!Preheader)
break;
539 Value *
GEP = Builder.CreateGEP(Builder.getInt8Ty(), V, Idx,
"uglygep");
540 rememberInstruction(GEP);
546 SCEVInsertPointGuard Guard(Builder,
this);
549 while (
const Loop *
L = SE.LI.getLoopFor(Builder.GetInsertBlock())) {
550 if (!
L->isLoopInvariant(V))
break;
552 bool AnyIndexNotLoopInvariant =
any_of(
553 GepIndices, [
L](
Value *Op) {
return !
L->isLoopInvariant(Op); });
555 if (AnyIndexNotLoopInvariant)
559 if (!Preheader)
break;
570 Casted = InsertNoopCastOfTo(Casted, PTy);
571 Value *GEP = Builder.CreateGEP(OriginalElTy, Casted, GepIndices,
"scevgep");
572 Ops.push_back(SE.getUnknown(GEP));
573 rememberInstruction(GEP);
576 return expand(SE.getAddExpr(Ops));
595 const Loop *SCEVExpander::getRelevantLoop(
const SCEV *S) {
597 auto Pair = RelevantLoops.insert(std::make_pair(S,
nullptr));
599 return Pair.first->second;
601 if (isa<SCEVConstant>(S))
604 if (
const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
605 if (
const Instruction *I = dyn_cast<Instruction>(U->getValue()))
606 return Pair.first->second = SE.LI.getLoopFor(I->
getParent());
611 const Loop *
L =
nullptr;
614 for (
const SCEV *Op :
N->operands())
616 return RelevantLoops[
N] =
L;
619 const Loop *Result = getRelevantLoop(
C->getOperand());
620 return RelevantLoops[
C] = Result;
624 getRelevantLoop(
D->getLHS()), getRelevantLoop(
D->getRHS()), SE.DT);
625 return RelevantLoops[
D] = Result;
638 bool operator()(std::pair<const Loop *, const SCEV *> LHS,
639 std::pair<const Loop *, const SCEV *> RHS)
const {
641 if (LHS.second->getType()->isPointerTy() !=
642 RHS.second->getType()->isPointerTy())
643 return LHS.second->getType()->isPointerTy();
646 if (LHS.first != RHS.first)
652 if (LHS.second->isNonConstantNegative()) {
653 if (!RHS.second->isNonConstantNegative())
655 }
else if (RHS.second->isNonConstantNegative())
673 for (std::reverse_iterator<SCEVAddExpr::op_iterator>
I(S->
op_end()),
675 OpsAndLoops.
push_back(std::make_pair(getRelevantLoop(*I), *I));
679 std::stable_sort(OpsAndLoops.
begin(), OpsAndLoops.
end(), LoopCompare(SE.DT));
683 Value *Sum =
nullptr;
684 for (
auto I = OpsAndLoops.
begin(),
E = OpsAndLoops.
end(); I !=
E;) {
685 const Loop *CurLoop = I->first;
686 const SCEV *Op = I->second;
691 }
else if (
PointerType *PTy = dyn_cast<PointerType>(Sum->getType())) {
695 for (; I !=
E && I->first == CurLoop; ++
I) {
698 const SCEV *
X = I->second;
699 if (
const SCEVUnknown *U = dyn_cast<SCEVUnknown>(X))
700 if (!isa<Instruction>(U->getValue()))
701 X = SE.getSCEV(U->getValue());
704 Sum = expandAddToGEP(NewOps.
begin(), NewOps.
end(), PTy, Ty, Sum);
710 NewOps.
push_back(isa<Instruction>(Sum) ? SE.getUnknown(Sum) :
712 for (++I; I !=
E && I->first == CurLoop; ++
I)
714 Sum = expandAddToGEP(NewOps.
begin(), NewOps.
end(), PTy, Ty,
expand(Op));
717 Value *W = expandCodeFor(SE.getNegativeSCEV(Op), Ty);
718 Sum = InsertNoopCastOfTo(Sum, Ty);
719 Sum = InsertBinop(Instruction::Sub, Sum, W);
723 Value *W = expandCodeFor(Op, Ty);
724 Sum = InsertNoopCastOfTo(Sum, Ty);
726 if (isa<Constant>(Sum))
std::swap(Sum, W);
741 for (std::reverse_iterator<SCEVMulExpr::op_iterator>
I(S->
op_end()),
743 OpsAndLoops.
push_back(std::make_pair(getRelevantLoop(*I), *I));
746 std::stable_sort(OpsAndLoops.
begin(), OpsAndLoops.
end(), LoopCompare(SE.DT));
750 Value *Prod =
nullptr;
751 for (
const auto &I : OpsAndLoops) {
752 const SCEV *Op = I.second;
758 Prod = InsertNoopCastOfTo(Prod, Ty);
762 Value *W = expandCodeFor(Op, Ty);
763 Prod = InsertNoopCastOfTo(Prod, Ty);
765 if (isa<Constant>(Prod))
std::swap(Prod, W);
770 Prod = InsertBinop(Instruction::Shl, Prod,
773 Prod = InsertBinop(Instruction::Mul, Prod, W);
786 const APInt &RHS =
SC->getAPInt();
788 return InsertBinop(Instruction::LShr, LHS,
793 return InsertBinop(Instruction::UDiv, LHS, RHS);
802 Base =
A->getStart();
805 A->getStepRecurrence(SE),
809 if (
const SCEVAddExpr *
A = dyn_cast<SCEVAddExpr>(Base)) {
810 Base =
A->getOperand(
A->getNumOperands()-1);
812 NewAddOps.
back() = Rest;
823 (isa<CastInst>(IncV) && !isa<BitCastInst>(IncV)))
828 if (L == IVIncInsertLoop) {
830 OE = IncV->
op_end(); OI != OE; ++OI)
831 if (
Instruction *OInst = dyn_cast<Instruction>(OI))
832 if (!SE.DT.dominates(OInst, IVIncInsertPos))
846 return isNormalAddRecExprPHI(PN, IncV, L);
861 if (IncV == InsertPos)
869 case Instruction::Sub: {
871 if (!OInst || SE.DT.dominates(OInst, InsertPos))
872 return dyn_cast<Instruction>(IncV->
getOperand(0));
875 case Instruction::BitCast:
877 case Instruction::GetElementPtr:
879 if (isa<Constant>(*I))
881 if (
Instruction *OInst = dyn_cast<Instruction>(*I)) {
882 if (!SE.DT.dominates(OInst, InsertPos))
895 unsigned AS = cast<PointerType>(IncV->
getType())->getAddressSpace();
912 void SCEVExpander::fixupInsertPoints(
Instruction *I) {
915 if (Builder.GetInsertPoint() == It)
916 Builder.SetInsertPoint(&*NewInsertPt);
917 for (
auto *InsertPtGuard : InsertPointGuards)
918 if (InsertPtGuard->GetInsertPoint() == It)
919 InsertPtGuard->SetInsertPoint(NewInsertPt);
926 if (SE.DT.dominates(IncV, InsertPos))
931 if (isa<PHINode>(InsertPos) ||
935 if (!SE.LI.movementPreservesLCSSAForm(IncV, InsertPos))
941 Instruction *Oper = getIVIncOperand(IncV, InsertPos,
true);
947 if (SE.DT.dominates(IncV, InsertPos))
950 for (
auto I = IVIncs.
rbegin(),
E = IVIncs.
rend(); I !=
E; ++
I) {
951 fixupInsertPoints(*I);
952 (*I)->moveBefore(InsertPos);
982 PointerType *GEPPtrTy = cast<PointerType>(ExpandTy);
985 if (!isa<ConstantInt>(StepV))
988 const SCEV *
const StepArray[1] = { SE.getSCEV(StepV) };
989 IncV = expandAddToGEP(StepArray, StepArray+1, GEPPtrTy, IntTy, PN);
991 IncV = Builder.CreateBitCast(IncV, PN->
getType());
992 rememberInstruction(IncV);
996 Builder.CreateSub(PN, StepV,
Twine(IVName) +
".iv.next") :
997 Builder.CreateAdd(PN, StepV,
Twine(IVName) +
".iv.next");
998 rememberInstruction(IncV);
1012 fixupInsertPoints(InstToHoist);
1015 InstToHoist = cast<Instruction>(InstToHoist->
getOperand(0));
1016 }
while (InstToHoist != LoopPhi);
1037 if (Phi == Requested) {
1053 if (!isa<IntegerType>(AR->
getType()))
1061 const SCEV *ExtendAfterOp =
1063 return ExtendAfterOp == OpAfterExtend;
1067 if (!isa<IntegerType>(AR->
getType()))
1075 const SCEV *ExtendAfterOp =
1077 return ExtendAfterOp == OpAfterExtend;
1084 SCEVExpander::getAddRecExprPHILiterally(
const SCEVAddRecExpr *Normalized,
1090 assert((!IVIncInsertLoop||IVIncInsertPos) &&
"Uninitialized insert position");
1095 PHINode *AddRecPhiMatch =
nullptr;
1102 bool TryNonMatchingSCEV =
1104 SE.DT.properlyDominates(LatchBlock, IVIncInsertLoop->getHeader());
1108 if (!PN || !SE.isSCEVable(PN->
getType()))
1115 bool IsMatchingSCEV = PhiSCEV == Normalized;
1119 if (!IsMatchingSCEV && !TryNonMatchingSCEV)
1127 if (!isExpandedAddRecExprPHI(PN, TempIncV, L))
1129 if (L == IVIncInsertLoop && !hoistIVInc(TempIncV, IVIncInsertPos))
1132 if (!isNormalAddRecExprPHI(PN, TempIncV, L))
1137 if (IsMatchingSCEV) {
1141 AddRecPhiMatch = PN;
1147 if ((!TruncTy || InvertStep) &&
1151 AddRecPhiMatch = PN;
1153 TruncTy = SE.getEffectiveSCEVType(Normalized->
getType());
1157 if (AddRecPhiMatch) {
1160 if (L == IVIncInsertLoop)
1161 hoistBeforePos(&SE.DT, IncV, IVIncInsertPos, AddRecPhiMatch);
1165 InsertedValues.insert(AddRecPhiMatch);
1167 rememberInstruction(IncV);
1168 return AddRecPhiMatch;
1173 SCEVInsertPointGuard Guard(Builder,
this);
1183 PostIncLoops.
clear();
1187 "Can't expand add recurrences without a loop preheader!");
1188 Value *StartV = expandCodeFor(Normalized->
getStart(), ExpandTy,
1193 assert(!isa<Instruction>(StartV) ||
1194 SE.DT.properlyDominates(cast<Instruction>(StartV)->getParent(),
1205 Step = SE.getNegativeSCEV(Step);
1207 Value *StepV = expandCodeFor(Step, IntTy, &L->
getHeader()->front());
1212 bool IncrementIsNUW = !useSubtract &&
IsIncrementNUW(SE, Normalized);
1213 bool IncrementIsNSW = !useSubtract &&
IsIncrementNSW(SE, Normalized);
1217 Builder.SetInsertPoint(Header, Header->
begin());
1219 PHINode *PN = Builder.CreatePHI(ExpandTy, std::distance(HPB, HPE),
1220 Twine(IVName) +
".iv");
1221 rememberInstruction(PN);
1238 Builder.SetInsertPoint(InsertPos);
1239 Value *IncV = expandIVInc(PN, StepV, L, ExpandTy, IntTy, useSubtract);
1241 if (isa<OverflowingBinaryOperator>(IncV)) {
1243 cast<BinaryOperator>(IncV)->setHasNoUnsignedWrap();
1245 cast<BinaryOperator>(IncV)->setHasNoSignedWrap();
1252 PostIncLoops = SavedPostIncLoops;
1255 InsertedValues.
insert(PN);
1262 Type *IntTy = SE.getEffectiveSCEVType(STy);
1268 if (PostIncLoops.count(L)) {
1272 Normalize, S,
nullptr,
nullptr, Loops, SE, SE.DT));
1277 const SCEV *PostLoopOffset =
nullptr;
1278 if (!SE.properlyDominates(Start, L->
getHeader())) {
1279 PostLoopOffset = Start;
1280 Start = SE.getConstant(Normalized->
getType(), 0);
1281 Normalized = cast<SCEVAddRecExpr>(
1289 const SCEV *PostLoopScale =
nullptr;
1290 if (!SE.dominates(Step, L->
getHeader())) {
1291 PostLoopScale = Step;
1292 Step = SE.getConstant(Normalized->
getType(), 1);
1296 assert(!PostLoopOffset &&
"Start not-null but PostLoopOffset set?");
1297 PostLoopOffset = Start;
1298 Start = SE.getConstant(Normalized->
getType(), 0);
1301 cast<SCEVAddRecExpr>(SE.getAddRecExpr(
1302 Start, Step, Normalized->
getLoop(),
1308 Type *ExpandTy = PostLoopScale ? IntTy : STy;
1311 Type *TruncTy =
nullptr;
1312 bool InvertStep =
false;
1313 PHINode *PN = getAddRecExprPHILiterally(Normalized, L, ExpandTy, IntTy,
1314 TruncTy, InvertStep);
1318 if (!PostIncLoops.count(L))
1323 assert(LatchBlock &&
"PostInc mode requires a unique loop latch!");
1329 if (isa<Instruction>(Result) &&
1330 !SE.DT.dominates(cast<Instruction>(Result),
1331 &*Builder.GetInsertPoint())) {
1344 Step = SE.getNegativeSCEV(Step);
1348 SCEVInsertPointGuard Guard(Builder,
this);
1349 StepV = expandCodeFor(Step, IntTy, &L->
getHeader()->front());
1351 Result = expandIVInc(PN, StepV, L, ExpandTy, IntTy, useSubtract);
1360 if (ResTy != SE.getEffectiveSCEVType(ResTy))
1361 Result = InsertNoopCastOfTo(Result, SE.getEffectiveSCEVType(ResTy));
1363 if (TruncTy != Result->
getType()) {
1364 Result = Builder.CreateTrunc(Result, TruncTy);
1365 rememberInstruction(Result);
1369 Result = Builder.CreateSub(expandCodeFor(Normalized->
getStart(), TruncTy),
1371 rememberInstruction(Result);
1376 if (PostLoopScale) {
1377 assert(S->
isAffine() &&
"Can't linearly scale non-affine recurrences.");
1378 Result = InsertNoopCastOfTo(Result, IntTy);
1379 Result = Builder.CreateMul(Result,
1380 expandCodeFor(PostLoopScale, IntTy));
1381 rememberInstruction(Result);
1385 if (PostLoopOffset) {
1386 if (
PointerType *PTy = dyn_cast<PointerType>(ExpandTy)) {
1387 const SCEV *
const OffsetArray[1] = { PostLoopOffset };
1388 Result = expandAddToGEP(OffsetArray, OffsetArray+1, PTy, IntTy, Result);
1390 Result = InsertNoopCastOfTo(Result, IntTy);
1391 Result = Builder.CreateAdd(Result,
1392 expandCodeFor(PostLoopOffset, IntTy));
1393 rememberInstruction(Result);
1401 if (!CanonicalMode)
return expandAddRecExprLiterally(S);
1407 PHINode *CanonicalIV =
nullptr;
1409 if (SE.getTypeSizeInBits(PN->
getType()) >= SE.getTypeSizeInBits(Ty))
1415 SE.getTypeSizeInBits(CanonicalIV->
getType()) >
1416 SE.getTypeSizeInBits(Ty)) {
1424 V = expandCodeFor(SE.getTruncateExpr(SE.getUnknown(V), Ty),
nullptr,
1432 NewOps[0] = SE.getConstant(Ty, 0);
1433 const SCEV *Rest = SE.getAddRecExpr(NewOps, L,
1439 const SCEV *RestArray[1] = { Rest };
1447 if (!isa<SCEVMulExpr>(Base) && !isa<SCEVUDivExpr>(Base)) {
1449 assert(StartV->
getType() == PTy &&
"Pointer type mismatch for GEP!");
1450 return expandAddToGEP(RestArray, RestArray+1, PTy, Ty, StartV);
1459 const SCEV *AddExprRHS = SE.getUnknown(
expand(Rest));
1460 return expand(SE.getAddExpr(AddExprLHS, AddExprRHS));
1471 rememberInstruction(CanonicalIV);
1477 if (!PredSeen.
insert(HP).second) {
1491 rememberInstruction(Add);
1501 assert(Ty == SE.getEffectiveSCEVType(CanonicalIV->
getType()) &&
1502 "IVs with types different from the canonical IV should "
1503 "already have been handled!");
1512 expand(SE.getTruncateOrNoop(
1513 SE.getMulExpr(SE.getUnknown(CanonicalIV),
1522 const SCEV *IH = SE.getUnknown(CanonicalIV);
1525 const SCEV *NewS = S;
1526 const SCEV *
Ext = SE.getNoopOrAnyExtend(S, CanonicalIV->
getType());
1527 if (isa<SCEVAddRecExpr>(Ext))
1530 const SCEV *V = cast<SCEVAddRecExpr>(NewS)->evaluateAtIteration(IH, SE);
1534 const SCEV *
T = SE.getTruncateOrNoop(V, Ty);
1542 Value *I = Builder.CreateTrunc(V, Ty);
1543 rememberInstruction(I);
1551 Value *I = Builder.CreateZExt(V, Ty);
1552 rememberInstruction(I);
1560 Value *I = Builder.CreateSExt(V, Ty);
1561 rememberInstruction(I);
1572 Ty = SE.getEffectiveSCEVType(Ty);
1573 LHS = InsertNoopCastOfTo(LHS, Ty);
1576 Value *ICmp = Builder.CreateICmpSGT(LHS, RHS);
1577 rememberInstruction(ICmp);
1578 Value *Sel = Builder.CreateSelect(ICmp, LHS, RHS,
"smax");
1579 rememberInstruction(Sel);
1585 LHS = InsertNoopCastOfTo(LHS, S->
getType());
1596 Ty = SE.getEffectiveSCEVType(Ty);
1597 LHS = InsertNoopCastOfTo(LHS, Ty);
1600 Value *ICmp = Builder.CreateICmpUGT(LHS, RHS);
1601 rememberInstruction(ICmp);
1602 Value *Sel = Builder.CreateSelect(ICmp, LHS, RHS,
"umax");
1603 rememberInstruction(Sel);
1609 LHS = InsertNoopCastOfTo(LHS, S->
getType());
1616 return expandCodeFor(SH, Ty);
1623 assert(SE.getTypeSizeInBits(Ty) == SE.getTypeSizeInBits(SH->
getType()) &&
1624 "non-trivial casts should be done with the SCEVs directly!");
1625 V = InsertNoopCastOfTo(V, Ty);
1630 ScalarEvolution::ValueOffsetPair
1631 SCEVExpander::FindValueInExprValueMap(
const SCEV *S,
1636 if (CanonicalMode || !SE.containsAddRecurrence(S)) {
1642 for (
auto const &VOPair : *Set) {
1643 Value *V = VOPair.first;
1646 if (V && isa<Instruction>(V) && (EntInst = cast<Instruction>(V)) &&
1649 SE.DT.dominates(EntInst, InsertPt) &&
1650 (SE.LI.getLoopFor(EntInst->
getParent()) ==
nullptr ||
1656 return {
nullptr,
nullptr};
1665 Value *SCEVExpander::expand(
const SCEV *S) {
1668 Instruction *InsertPt = &*Builder.GetInsertPoint();
1669 for (
Loop *L = SE.LI.getLoopFor(Builder.GetInsertBlock());;
1671 if (SE.isLoopInvariant(S, L)) {
1679 InsertPt = &*L->
getHeader()->getFirstInsertionPt();
1685 if (L && SE.hasComputableLoopEvolution(S, L) && !PostIncLoops.count(L))
1686 InsertPt = &*L->
getHeader()->getFirstInsertionPt();
1687 while (InsertPt->
getIterator() != Builder.GetInsertPoint() &&
1688 (isInsertedInstruction(InsertPt) ||
1689 isa<DbgInfoIntrinsic>(InsertPt))) {
1696 auto I = InsertedExpressions.find(std::make_pair(S, InsertPt));
1697 if (I != InsertedExpressions.end())
1700 SCEVInsertPointGuard Guard(Builder,
this);
1701 Builder.SetInsertPoint(InsertPt);
1704 ScalarEvolution::ValueOffsetPair VO = FindValueInExprValueMap(S, InsertPt);
1705 Value *V = VO.first;
1709 else if (VO.second) {
1713 int64_t ESize = SE.getTypeSizeInBits(Ety);
1714 if ((Offset * 8) % ESize == 0) {
1717 V = Builder.CreateGEP(Ety, V, Idx,
"scevgep");
1721 unsigned AS = Vty->getAddressSpace();
1725 V = Builder.CreateBitCast(V, Vty);
1728 V = Builder.CreateSub(V, VO.second);
1737 InsertedExpressions[std::make_pair(S, InsertPt)] = V;
1741 void SCEVExpander::rememberInstruction(
Value *I) {
1742 if (!PostIncLoops.empty())
1743 InsertedPostIncValues.insert(I);
1745 InsertedValues.insert(I);
1759 const SCEV *
H = SE.getAddRecExpr(SE.getConstant(Ty, 0),
1763 SCEVInsertPointGuard Guard(Builder,
this);
1765 cast<PHINode>(expandCodeFor(H,
nullptr, &L->
getHeader()->front()));
1782 if (
auto *PN = dyn_cast<PHINode>(&I))
1797 unsigned NumElim = 0;
1805 if (!SE.isSCEVable(PN->
getType()))
1810 return Const->getValue();
1816 if (V->
getType() != Phi->getType())
1818 Phi->replaceAllUsesWith(V);
1822 <<
"INDVARS: Eliminated constant iv: " << *Phi <<
'\n');
1826 if (!SE.isSCEVable(Phi->getType()))
1829 PHINode *&OrigPhiRef = ExprToIVMap[SE.getSCEV(Phi)];
1832 if (Phi->getType()->isIntegerTy() && TTI &&
1836 const SCEV *TruncExpr =
1837 SE.getTruncateExpr(SE.getSCEV(Phi), Phis.back()->getType());
1838 ExprToIVMap[TruncExpr] = Phi;
1854 if (OrigInc && IsomorphicInc) {
1858 if (OrigPhiRef->
getType() == Phi->getType() &&
1859 !(ChainedPhis.count(Phi) ||
1860 isExpandedAddRecExprPHI(OrigPhiRef, OrigInc, L)) &&
1861 (ChainedPhis.count(Phi) ||
1862 isExpandedAddRecExprPHI(Phi, IsomorphicInc, L))) {
1873 const SCEV *TruncExpr =
1874 SE.getTruncateOrNoop(SE.getSCEV(OrigInc), IsomorphicInc->getType());
1875 if (OrigInc != IsomorphicInc &&
1876 TruncExpr == SE.getSCEV(IsomorphicInc) &&
1877 SE.LI.replacementPreservesLCSSAForm(IsomorphicInc, OrigInc) &&
1878 hoistIVInc(OrigInc, IsomorphicInc)) {
1880 dbgs() <<
"INDVARS: Eliminated congruent iv.inc: "
1881 << *IsomorphicInc <<
'\n');
1882 Value *NewInc = OrigInc;
1883 if (OrigInc->
getType() != IsomorphicInc->getType()) {
1885 if (
PHINode *PN = dyn_cast<PHINode>(OrigInc))
1893 OrigInc, IsomorphicInc->
getType(), IVName);
1903 Value *NewIV = OrigPhiRef;
1904 if (OrigPhiRef->
getType() != Phi->getType()) {
1907 NewIV = Builder.CreateTruncOrBitCast(OrigPhiRef, Phi->
getType(), IVName);
1918 getRelatedExistingExpansion(S, At, L);
1919 if (VO && VO.
getValue().second ==
nullptr)
1927 using namespace llvm::PatternMatch;
1938 if (!
match(BB->getTerminator(),
1943 if (SE.getSCEV(LHS) == S && SE.DT.dominates(LHS, At))
1944 return ScalarEvolution::ValueOffsetPair(LHS,
nullptr);
1946 if (SE.getSCEV(RHS) == S && SE.DT.dominates(RHS, At))
1947 return ScalarEvolution::ValueOffsetPair(RHS,
nullptr);
1952 ScalarEvolution::ValueOffsetPair VO = FindValueInExprValueMap(S, At);
1963 bool SCEVExpander::isHighCostExpansionHelper(
1969 if (At && getRelatedExistingExpansion(S, At, L))
1978 return isHighCostExpansionHelper(cast<SCEVTruncateExpr>(S)->getOperand(),
1981 return isHighCostExpansionHelper(cast<SCEVZeroExtendExpr>(S)->getOperand(),
1984 return isHighCostExpansionHelper(cast<SCEVSignExtendExpr>(S)->getOperand(),
1988 if (!Processed.
insert(S).second)
1991 if (
auto *UDivExpr = dyn_cast<SCEVUDivExpr>(S)) {
1995 if (
auto *
SC = dyn_cast<SCEVConstant>(UDivExpr->getRHS()))
1996 if (
SC->getAPInt().isPowerOf2()) {
1998 L->
getHeader()->getParent()->getParent()->getDataLayout();
1999 unsigned Width = cast<IntegerType>(UDivExpr->getType())->
getBitWidth();
2016 At = &ExitingBB->
back();
2017 if (!getRelatedExistingExpansion(
2018 SE.getAddExpr(S, SE.getConstant(S->
getType(), 1)), At,
L))
2024 if (isa<SCEVSMaxExpr>(S) || isa<SCEVUMaxExpr>(S))
2030 if (
const SCEVNAryExpr *NAry = dyn_cast<SCEVNAryExpr>(S)) {
2031 for (
auto *Op : NAry->operands())
2032 if (isHighCostExpansionHelper(Op, L, At, Processed))
2046 return expandUnionPredicate(cast<SCEVUnionPredicate>(Pred), IP);
2048 return expandEqualPredicate(cast<SCEVEqualPredicate>(Pred), IP);
2050 auto *AddRecPred = cast<SCEVWrapPredicate>(Pred);
2051 return expandWrapPredicate(AddRecPred, IP);
2062 Builder.SetInsertPoint(IP);
2063 auto *I = Builder.CreateICmpNE(Expr0, Expr1,
"ident.check");
2070 "non-affine expression");
2073 const SCEV *ExitCount =
2074 SE.getPredicatedBackedgeTakenCount(AR->
getLoop(), Pred);
2076 assert(ExitCount != SE.getCouldNotCompute() &&
"Invalid loop count");
2081 unsigned SrcBits = SE.getTypeSizeInBits(ExitCount->
getType());
2082 unsigned DstBits = SE.getTypeSizeInBits(AR->
getType());
2090 Builder.SetInsertPoint(Loc);
2091 Value *TripCountVal = expandCodeFor(ExitCount, CountTy, Loc);
2096 Value *StepValue = expandCodeFor(Step, Ty, Loc);
2097 Value *NegStepValue = expandCodeFor(SE.getNegativeSCEV(Step), Ty, Loc);
2098 Value *StartValue = expandCodeFor(Start, Ty, Loc);
2103 Builder.SetInsertPoint(Loc);
2106 Value *AbsStep = Builder.CreateSelect(StepCompare, NegStepValue, StepValue);
2109 Value *TruncTripCount = Builder.CreateZExtOrTrunc(TripCountVal, Ty);
2111 Intrinsic::umul_with_overflow, Ty);
2114 CallInst *Mul = Builder.CreateCall(MulF, {AbsStep, TruncTripCount},
"mul");
2115 Value *MulV = Builder.CreateExtractValue(Mul, 0,
"mul.result");
2116 Value *OfMul = Builder.CreateExtractValue(Mul, 1,
"mul.overflow");
2121 Value *Add = Builder.CreateAdd(StartValue, MulV);
2122 Value *Sub = Builder.CreateSub(StartValue, MulV);
2124 Value *EndCompareGT = Builder.CreateICmp(
2127 Value *EndCompareLT = Builder.CreateICmp(
2132 Builder.CreateSelect(StepCompare, EndCompareGT, EndCompareLT);
2137 if (SE.getTypeSizeInBits(CountTy) > SE.getTypeSizeInBits(Ty)) {
2139 auto *BackedgeCheck =
2140 Builder.CreateICmp(ICmpInst::ICMP_UGT, TripCountVal,
2142 BackedgeCheck = Builder.CreateAnd(
2145 EndCheck = Builder.CreateOr(EndCheck, BackedgeCheck);
2148 EndCheck = Builder.CreateOr(EndCheck, OfMul);
2154 const auto *
A = cast<SCEVAddRecExpr>(Pred->
getExpr());
2155 Value *NSSWCheck =
nullptr, *NUSWCheck =
nullptr;
2159 NUSWCheck = generateOverflowCheck(
A, IP,
false);
2163 NSSWCheck = generateOverflowCheck(
A, IP,
true);
2165 if (NUSWCheck && NSSWCheck)
2166 return Builder.CreateOr(NUSWCheck, NSSWCheck);
2184 auto *NextCheck = expandCodeForPredicate(Pred, IP);
2185 Builder.SetInsertPoint(IP);
2186 Check = Builder.CreateOr(Check, NextCheck);
2213 struct SCEVFindUnsafe {
2219 bool follow(
const SCEV *S) {
2228 const SCEV *Step = AR->getStepRecurrence(SE);
2229 if (!AR->isAffine() && !SE.dominates(Step, AR->getLoop()->getHeader())) {
2236 bool isDone()
const {
return IsUnsafe; }
2242 SCEVFindUnsafe Search(SE);
2244 return !Search.IsUnsafe;
NoWrapFlags getNoWrapFlags(NoWrapFlags Mask=NoWrapMask) const
static unsigned getBitWidth(Type *Ty, const DataLayout &DL)
Returns the bitwidth of the given scalar or pointer type (if unknown returns 0).
const SCEV * getTruncateOrNoop(const SCEV *V, Type *Ty)
Return a SCEV corresponding to a conversion of the input value to the specified type.
static bool Check(DecodeStatus &Out, DecodeStatus In)
IncrementWrapFlags getFlags() const
Returns the set assumed no overflow flags.
void push_back(const T &Elt)
A parsed version of the target data layout string in and methods for querying it. ...
bool hoistIVInc(Instruction *IncV, Instruction *InsertPos)
Utility for hoisting an IV increment.
static ConstantInt * getFalse(LLVMContext &Context)
static IntegerType * getInt1Ty(LLVMContext &C)
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
Value * getExactExistingExpansion(const SCEV *S, const Instruction *At, Loop *L)
Try to find existing LLVM IR value for S available at the point At.
LLVM Argument representation.
const SCEV * TransformForPostIncUse(TransformKind Kind, const SCEV *S, Instruction *User, Value *OperandValToReplace, PostIncLoopSet &Loops, ScalarEvolution &SE, DominatorTree &DT)
TransformForPostIncUse - Transform the given expression according to the given transformation kind...
const Instruction & back() const
bool isOne() const
Return true if the expression is a constant one.
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
const SCEV * getConstant(ConstantInt *V)
LLVMContext & getContext() const
bool isZero() const
Return true if the expression is a constant zero.
unsigned getNumOperands() const
The main scalar evolution driver.
This class represents a function call, abstracting a target machine's calling convention.
static PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space...
bool mayHaveSideEffects() const
Return true if the instruction may have side effects.
static Constant * getGetElementPtr(Type *Ty, Constant *C, ArrayRef< Constant * > IdxList, bool InBounds=false, Optional< unsigned > InRangeIndex=None, Type *OnlyIfReducedTy=nullptr)
Getelementptr form.
const SCEV * getStepRecurrence(ScalarEvolution &SE) const
Constructs and returns the recurrence indicating how much this expression steps by.
Optional< ScalarEvolution::ValueOffsetPair > getRelatedExistingExpansion(const SCEV *S, const Instruction *At, Loop *L)
Try to find the ValueOffsetPair for S.
This class represents a truncation of an integer value to a smaller integer value.
Value * expandWrapPredicate(const SCEVWrapPredicate *P, Instruction *Loc)
A specialized variant of expandCodeForPredicate, handling the case when we are expanding code for a S...
LoopT * getParentLoop() const
const Instruction & front() const
unsigned getAddressSpace() const
Return the address space of the Pointer type.
Type * getElementType() const
BlockT * getHeader() const
This is the base class for unary cast operator classes.
return AArch64::GPR64RegClass contains(Reg)
Type * getPointerElementType() const
const SCEV * getStart() const
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
StringRef getName() const
Return a constant reference to the value's name.
BlockT * getLoopLatch() const
If there is a single latch block for this loop, return it.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
iterator begin()
Instruction iterator methods.
#define DEBUG_WITH_TYPE(TYPE, X)
DEBUG_WITH_TYPE macro - This macro should be used by passes to emit debug information.
bool match(Val *V, const Pattern &P)
Used to lazily calculate structure layout information for a target machine, based on the DataLayout s...
Value * expandCodeForPredicate(const SCEVPredicate *Pred, Instruction *Loc)
Generates a code sequence that evaluates this predicate.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
static const Loop * PickMostRelevantLoop(const Loop *A, const Loop *B, DominatorTree &DT)
PickMostRelevantLoop - Given two loops pick the one that's most relevant for SCEV expansion...
This is the base class for all instructions that perform data casts.
Class to represent struct types.
A Use represents the edge between a Value definition and its users.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
static GCRegistry::Add< StatepointGC > D("statepoint-example","an example strategy for statepoint")
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
void getExitingBlocks(SmallVectorImpl< BlockT * > &ExitingBlocks) const
Return all blocks inside the loop that have successors outside of the loop.
Windows NT (Windows on ARM)
op_iterator op_begin() const
This node represents multiplication of some number of SCEVs.
Value * generateOverflowCheck(const SCEVAddRecExpr *AR, Instruction *Loc, bool Signed)
Generates code that evaluates if the AR expression will overflow.
LLVM_NODISCARD bool empty() const
static Constant * get(unsigned Opcode, Constant *C1, Constant *C2, unsigned Flags=0, Type *OnlyIfReducedTy=nullptr)
get - Return a binary or shift operator constant expression, folding if possible. ...
A constant value that is initialized with an expression using other constant values.
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
This node represents a polynomial recurrence on the trip count of the specified loop.
static Value * SimplifyPHINode(PHINode *PN, const Query &Q)
See if we can fold the given phi. If not, returns null.
const T & getValue() const LLVM_LVALUE_FUNCTION
Class to represent array types.
Function Alias Analysis false
Function * getDeclaration(Module *M, ID id, ArrayRef< Type * > Tys=None)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
static GCRegistry::Add< OcamlGC > B("ocaml","ocaml 3.10-compatible GC")
const Function * getFunction() const
Return the function this instruction belongs to.
void SetCurrentDebugLocation(DebugLoc L)
Set location information used by debugging information.
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
cst_pred_ty< is_power2 > m_Power2()
Match an integer or vector power of 2.
Type * getEffectiveSCEVType(Type *Ty) const
Return a type with the same bitwidth as the given type and which represents how SCEV will treat the g...
void takeName(Value *V)
Transfer the name from V to this value.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree...
const SCEV * getAddRecExpr(const SCEV *Start, const SCEV *Step, const Loop *L, SCEV::NoWrapFlags Flags)
Get an add recurrence expression for the specified loop.
static BinaryOperator * CreateAdd(Value *S1, Value *S2, const Twine &Name, Instruction *InsertBefore, Value *FlagsOp)
size_t getNumOperands() const
Class to represent pointers.
static GCRegistry::Add< CoreCLRGC > E("coreclr","CoreCLR-compatible GC")
uint64_t getElementOffset(unsigned Idx) const
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
This means that we are dealing with an entirely unknown SCEV value, and only represent it as its LLVM...
bool isAffine() const
Return true if this represents an expression A + B*x where A and B are loop invariant values...
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
BlockT * getLoopPreheader() const
If there is a preheader for this loop, return it.
LLVM Basic Block Representation.
This class represents a binary unsigned division operation.
The instances of the Type class are immutable: once they are created, they are never changed...
Type * getType() const
Return the LLVM type of this SCEV expression.
static void SplitAddRecs(SmallVectorImpl< const SCEV * > &Ops, Type *Ty, ScalarEvolution &SE)
SplitAddRecs - Flatten a list of add operands, moving addrec start values out to the top level...
bool isVectorTy() const
True if this is an instance of VectorType.
This is an important base class in LLVM.
LLVM_ATTRIBUTE_ALWAYS_INLINE iterator begin()
const SCEV * getOperand(unsigned i) const
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Normalize - Normalize according to the given loops.
const SCEV * getExpr() const override
Implementation of the SCEVPredicate interface.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Interval::pred_iterator pred_begin(Interval *I)
pred_begin/pred_end - define methods so that Intervals may be used just like BasicBlocks can with the...
bool isIllegalInteger(uint64_t Width) const
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
brc_match< Cond_t > m_Br(const Cond_t &C, BasicBlock *&T, BasicBlock *&F)
const SCEVUnknown * getLHS() const
Returns the left hand side of the equality.
bool contains(const LoopT *L) const
Return true if the specified loop is contained within in this loop.
bool any_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly...
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang","erlang-compatible garbage collector")
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
static bool FactorOutConstant(const SCEV *&S, const SCEV *&Remainder, const SCEV *Factor, ScalarEvolution &SE, const DataLayout &DL)
FactorOutConstant - Test if S is divisible by Factor, using signed division.
Value * expandCodeFor(const SCEV *SH, Type *Ty, Instruction *I)
Insert code to directly compute the specified SCEV expression into the program.
BlockT * getExitingBlock() const
If getExitingBlocks would return exactly one block, return that block.
Value * getOperand(unsigned i) const
Interval::pred_iterator pred_end(Interval *I)
self_iterator getIterator()
unsigned getIntegerBitWidth() const
Class to represent integer types.
std::pair< NoneType, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
static Expected< BitVector > expand(StringRef S, StringRef Original)
const SCEV * getLHS() const
static void SimplifyAddOperands(SmallVectorImpl< const SCEV * > &Ops, Type *Ty, ScalarEvolution &SE)
SimplifyAddOperands - Sort and simplify a list of add operands.
const APInt & getAPInt() const
void append(in_iter in_start, in_iter in_end)
Add the specified range to the end of the SmallVector.
bool isPointerTy() const
True if this is an instance of PointerType.
static UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
bool isNonConstantNegative() const
Return true if the specified scev is negated, but not a constant.
SCEVPredicateKind getKind() const
LLVMContext & getContext() const
All values hold a context through their type.
static PointerType * getInt8PtrTy(LLVMContext &C, unsigned AS=0)
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
const SCEV * getRHS() const
This class represents an assumption made using SCEV expressions which can be checked at run-time...
static bool IsIncrementNSW(ScalarEvolution &SE, const SCEVAddRecExpr *AR)
bool dominates(const Instruction *Def, const Use &U) const
Return true if Def dominates a use in User.
static void ExposePointerBase(const SCEV *&Base, const SCEV *&Rest, ScalarEvolution &SE)
Move parts of Base into Rest to leave Base with the minimal expression that provides a pointer operan...
unsigned replaceCongruentIVs(Loop *L, const DominatorTree *DT, SmallVectorImpl< WeakVH > &DeadInsts, const TargetTransformInfo *TTI=nullptr)
replace congruent phis with their most canonical representative.
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
const SCEVConstant * getRHS() const
Returns the right hand side of the equality.
Iterator for intrusive lists based on ilist_node.
static PointerType * getInt1PtrTy(LLVMContext &C, unsigned AS=0)
This is the shared class of boolean and integer constants.
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
unsigned logBase2() const
PHINode * getOrInsertCanonicalInductionVariable(const Loop *L, Type *Ty)
This method returns the canonical induction variable of the specified type for the specified loop (in...
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small...
bool isAllOnesValue() const
Return true if the expression is a constant all-ones value.
Module.h This file contains the declarations for the Module class.
Type * getType() const
All values are typed, get the type of this value.
CHAIN = SC CHAIN, Imm128 - System call.
uint64_t getSizeInBytes() const
PHINode * getCanonicalInductionVariable() const
Check to see if the loop has a canonical induction variable: an integer recurrence that starts at 0 a...
unsigned getElementContainingOffset(uint64_t Offset) const
Given a valid byte offset into the structure, returns the structure index that contains it...
static Constant * get(Type *Ty, uint64_t V, bool isSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
static ConstantInt * getSigned(IntegerType *Ty, int64_t V)
Return a ConstantInt with the specified value for the specified type.
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
ConstantInt * getValue() const
static GCRegistry::Add< ShadowStackGC > C("shadow-stack","Very portable GC for uncooperative code generators")
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Class for arbitrary precision integers.
This node represents an addition of some number of SCEVs.
static BasicBlock::iterator findInsertPointAfter(Instruction *I, BasicBlock *MustDominate)
const SCEV * getSignExtendExpr(const SCEV *Op, Type *Ty)
Value * getIncomingValueForBlock(const BasicBlock *BB) const
bool isIntegerTy() const
True if this is an instance of IntegerType.
This class represents a signed maximum selection.
iterator_range< user_iterator > users()
Value * expandUnionPredicate(const SCEVUnionPredicate *Pred, Instruction *Loc)
A specialized variant of expandCodeForPredicate, handling the case when we are expanding code for a S...
static Constant * getCast(unsigned ops, Constant *C, Type *Ty, bool OnlyIfReduced=false)
Convenience function for getting a Cast operation.
const SCEV * getAddExpr(SmallVectorImpl< const SCEV * > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap)
Get a canonical add expression, or something simpler if possible.
void visitAll(const SCEV *Root, SV &Visitor)
Use SCEVTraversal to visit all nodes in the given expression tree.
static APInt getMaxValue(unsigned numBits)
Gets maximum unsigned value of APInt for specific bit width.
This class represents a zero extension of a small integer value to a larger integer value...
Value * CreateTruncOrBitCast(Value *V, Type *DestTy, const Twine &Name="")
static bool IsIncrementNUW(ScalarEvolution &SE, const SCEVAddRecExpr *AR)
static CastInst * Create(Instruction::CastOps, Value *S, Type *Ty, const Twine &Name="", Instruction *InsertBefore=nullptr)
Provides a way to construct any of the CastInst subclasses using an opcode instead of the subclass's ...
LLVM_ATTRIBUTE_ALWAYS_INLINE iterator end()
void emplace_back(ArgTypes &&...Args)
This class represents an analyzed expression in the program.
static IntegerType * getInt32Ty(LLVMContext &C)
Represents a single loop in the control flow graph.
static Instruction::CastOps getCastOpcode(const Value *Val, bool SrcIsSigned, Type *Ty, bool DstIsSigned)
Returns the opcode necessary to cast Val into Ty using usual casting rules.
TerminatorInst * getTerminator()
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
LLVM_ATTRIBUTE_ALWAYS_INLINE size_type size() const
This class represents a sign extension of a small integer value to a larger integer value...
This class represents an unsigned maximum selection.
LLVM_NODISCARD std::enable_if<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
Instruction * getIVIncOperand(Instruction *IncV, Instruction *InsertPos, bool allowScale)
Return the induction variable increment's IV operand.
const Loop * getLoop() const
reverse_iterator rbegin()
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This class represents a composition of other SCEV predicates, and is the class that most clients will...
unsigned getSCEVType() const
unsigned getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
const SCEV * getNegativeSCEV(const SCEV *V, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap)
Return the SCEV object corresponding to -V.
Value * expandEqualPredicate(const SCEVEqualPredicate *Pred, Instruction *Loc)
A specialized variant of expandCodeForPredicate, handling the case when we are expanding code for a S...
LLVM Value Representation.
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
A vector that has set insertion semantics.
void moveBefore(Instruction *MovePos)
Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...
op_iterator op_end() const
APInt zext(unsigned width) const
Zero extend to a new width.
Value * SimplifyInstruction(Instruction *I, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const DominatorTree *DT=nullptr, AssumptionCache *AC=nullptr)
See if we can compute a simplified version of this instruction.
iterator getFirstInsertionPt()
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
static APInt getNullValue(unsigned numBits)
Get the '0' value.
const SCEV * getZeroExtendExpr(const SCEV *Op, Type *Ty)
This node is a base class providing common functionality for n'ary operators.
This class represents an assumption made on an AddRec expression.
int64_t getSExtValue() const
Return the constant as a 64-bit integer value after it has been sign extended as appropriate for the ...
bool isSafeToExpand(const SCEV *S, ScalarEvolution &SE)
Return true if the given expression is safe to expand in the sense that all materialized values are s...
const SCEV * getOperand() const
static GCRegistry::Add< ErlangGC > A("erlang","erlang-compatible garbage collector")
const SCEV * getMulExpr(SmallVectorImpl< const SCEV * > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap)
Get a canonical multiply expression, or something simpler if possible.
This class represents an assumption that two SCEV expressions are equal, and this can be checked at r...
static IntegerType * getInt8Ty(LLVMContext &C)
const BasicBlock * getParent() const
const SmallVectorImpl< const SCEVPredicate * > & getPredicates() const
bind_ty< Instruction > m_Instruction(Instruction *&I)
Match an instruction, capturing it if we match.
static bool canBeCheaplyTransformed(ScalarEvolution &SE, const SCEVAddRecExpr *Phi, const SCEVAddRecExpr *Requested, bool &InvertStep)
Check whether we can cheaply express the requested SCEV in terms of the available PHI SCEV by truncat...
This class represents a constant integer value.
CmpClass_match< LHS, RHS, ICmpInst, ICmpInst::Predicate > m_ICmp(ICmpInst::Predicate &Pred, const LHS &L, const RHS &R)