110#define DEBUG_TYPE "instcombine"
118 "Number of instruction combining iterations performed");
119STATISTIC(NumOneIteration,
"Number of functions with one iteration");
120STATISTIC(NumTwoIterations,
"Number of functions with two iterations");
121STATISTIC(NumThreeIterations,
"Number of functions with three iterations");
123 "Number of functions with four or more iterations");
127STATISTIC(NumDeadInst ,
"Number of dead inst eliminated");
133 "Controls which instructions are visited");
140 "instcombine-max-sink-users",
cl::init(32),
141 cl::desc(
"Maximum number of undroppable users for instruction sinking"));
145 cl::desc(
"Maximum array size considered when doing a combine"));
149 cl::desc(
"Maximum number of users to visit in alloc-site "
150 "removability analysis"));
166std::optional<Instruction *>
169 if (
II.getCalledFunction()->isTargetIntrinsic()) {
170 return TTIForTargetIntrinsicsOnly.instCombineIntrinsic(*
this,
II);
177 bool &KnownBitsComputed) {
179 if (
II.getCalledFunction()->isTargetIntrinsic()) {
180 return TTIForTargetIntrinsicsOnly.simplifyDemandedUseBitsIntrinsic(
181 *
this,
II, DemandedMask, Known, KnownBitsComputed);
192 if (
II.getCalledFunction()->isTargetIntrinsic()) {
193 return TTIForTargetIntrinsicsOnly.simplifyDemandedVectorEltsIntrinsic(
194 *
this,
II, DemandedElts, PoisonElts, PoisonElts2, PoisonElts3,
204 return TTIForTargetIntrinsicsOnly.isValidAddrSpaceCast(FromAS, ToAS);
214 Builder.SetInsertPoint(Inst);
218 if (Inst && !
GEP->hasAllConstantIndices() &&
219 !
GEP->getSourceElementType()->isIntegerTy(8)) {
221 *Inst, Builder.CreateGEP(Builder.getInt8Ty(),
GEP->getPointerOperand(),
239 Value *Sum =
nullptr;
240 Value *OneUseSum =
nullptr;
241 Value *OneUseBase =
nullptr;
248 IRBuilderBase::InsertPointGuard Guard(
Builder);
250 if (RewriteGEPs && Inst)
254 if (
Offset->getType() != IdxTy)
257 if (
GEP->hasOneUse()) {
262 OneUseBase =
GEP->getPointerOperand();
271 if (RewriteGEPs && Inst &&
272 Offset->getType()->isVectorTy() ==
GEP->getType()->isVectorTy() &&
273 !(
GEP->getSourceElementType()->isIntegerTy(8) &&
278 OneUseBase ? OneUseBase :
GEP->getPointerOperand(),
Offset,
"",
285 OneUseSum = OneUseBase =
nullptr;
289 Sum =
Add(Sum, OneUseSum);
300bool InstCombinerImpl::isDesirableIntType(
unsigned BitWidth)
const {
319bool InstCombinerImpl::shouldChangeType(
unsigned FromWidth,
320 unsigned ToWidth)
const {
321 bool FromLegal = FromWidth == 1 ||
DL.isLegalInteger(FromWidth);
322 bool ToLegal = ToWidth == 1 ||
DL.isLegalInteger(ToWidth);
326 if (ToWidth < FromWidth && isDesirableIntType(ToWidth))
331 if ((FromLegal || isDesirableIntType(FromWidth)) && !ToLegal)
336 if (!FromLegal && !ToLegal && ToWidth > FromWidth)
347bool InstCombinerImpl::shouldChangeType(
Type *From,
Type *To)
const {
355 return shouldChangeType(FromWidth, ToWidth);
365 if (!OBO || !OBO->hasNoSignedWrap())
368 const APInt *BVal, *CVal;
373 bool Overflow =
false;
374 switch (
I.getOpcode()) {
375 case Instruction::Add:
376 (void)BVal->
sadd_ov(*CVal, Overflow);
378 case Instruction::Sub:
379 (void)BVal->
ssub_ov(*CVal, Overflow);
381 case Instruction::Mul:
382 (void)BVal->
smul_ov(*CVal, Overflow);
393 return OBO && OBO->hasNoUnsignedWrap();
398 return OBO && OBO->hasNoSignedWrap();
407 I.clearSubclassOptionalData();
412 I.clearSubclassOptionalData();
413 I.setFastMathFlags(FMF);
423 if (!Cast || !Cast->hasOneUse())
427 auto CastOpcode = Cast->getOpcode();
428 if (CastOpcode != Instruction::ZExt)
437 if (!BinOp2 || !BinOp2->hasOneUse() || BinOp2->getOpcode() != AssocOpcode)
463 Cast->dropPoisonGeneratingFlags();
469Value *InstCombinerImpl::simplifyIntToPtrRoundTripCast(
Value *Val) {
471 if (IntToPtr &&
DL.getTypeSizeInBits(IntToPtr->getDestTy()) ==
472 DL.getTypeSizeInBits(IntToPtr->getSrcTy())) {
474 Type *CastTy = IntToPtr->getDestTy();
477 PtrToInt->getSrcTy()->getPointerAddressSpace() &&
478 DL.getTypeSizeInBits(PtrToInt->getSrcTy()) ==
479 DL.getTypeSizeInBits(PtrToInt->getDestTy()))
480 return PtrToInt->getOperand(0);
517 if (
I.isCommutative()) {
518 if (
auto Pair = matchSymmetricPair(
I.getOperand(0),
I.getOperand(1))) {
528 if (
I.isAssociative()) {
551 I.setHasNoUnsignedWrap(
true);
554 I.setHasNoSignedWrap(
true);
583 if (
I.isAssociative() &&
I.isCommutative()) {
660 I.setHasNoUnsignedWrap(
true);
678 if (LOp == Instruction::And)
679 return ROp == Instruction::Or || ROp == Instruction::Xor;
682 if (LOp == Instruction::Or)
683 return ROp == Instruction::And;
687 if (LOp == Instruction::Mul)
688 return ROp == Instruction::Add || ROp == Instruction::Sub;
725 assert(
Op &&
"Expected a binary operator");
726 LHS =
Op->getOperand(0);
727 RHS =
Op->getOperand(1);
728 if (TopOpcode == Instruction::Add || TopOpcode == Instruction::Sub) {
733 Instruction::Shl, ConstantInt::get(
Op->getType(), 1),
C);
734 assert(
RHS &&
"Constant folding of immediate constants failed");
735 return Instruction::Mul;
740 if (OtherOp && OtherOp->
getOpcode() == Instruction::AShr &&
743 return Instruction::AShr;
746 return Op->getOpcode();
755 assert(
A &&
B &&
C &&
D &&
"All values must be provided");
758 Value *RetVal =
nullptr;
769 if (
A ==
C || (InnerCommutative &&
A ==
D)) {
778 if (!V && (
LHS->hasOneUse() ||
RHS->hasOneUse()))
779 V = Builder.CreateBinOp(TopLevelOpcode,
B,
D,
RHS->getName());
781 RetVal = Builder.CreateBinOp(InnerOpcode,
A, V);
789 if (
B ==
D || (InnerCommutative &&
B ==
C)) {
798 if (!V && (
LHS->hasOneUse() ||
RHS->hasOneUse()))
799 V = Builder.CreateBinOp(TopLevelOpcode,
A,
C,
LHS->getName());
801 RetVal = Builder.CreateBinOp(InnerOpcode, V,
B);
816 HasNSW =
I.hasNoSignedWrap();
817 HasNUW =
I.hasNoUnsignedWrap();
820 HasNSW &= LOBO->hasNoSignedWrap();
821 HasNUW &= LOBO->hasNoUnsignedWrap();
825 HasNSW &= ROBO->hasNoSignedWrap();
826 HasNUW &= ROBO->hasNoUnsignedWrap();
829 if (TopLevelOpcode == Instruction::Add && InnerOpcode == Instruction::Mul) {
857 unsigned Opc =
I->getOpcode();
858 unsigned ConstIdx = 1;
865 case Instruction::Sub:
868 case Instruction::ICmp:
875 case Instruction::Or:
879 case Instruction::Add:
885 if (!
match(
I->getOperand(1 - ConstIdx),
895 Constant *BitWidthC = ConstantInt::get(Ty, Ty->getScalarSizeInBits());
901 if (!Cmp || !Cmp->isNullValue())
906 bool Consumes =
false;
910 assert(NotOp !=
nullptr &&
911 "Desync between isFreeToInvert and getFreelyInverted");
913 Value *CtpopOfNotOp =
Builder.CreateIntrinsic(Ty, Intrinsic::ctpop, NotOp);
920 case Instruction::Sub:
923 case Instruction::Or:
924 case Instruction::Add:
927 case Instruction::ICmp:
963 auto IsValidBinOpc = [](
unsigned Opc) {
967 case Instruction::And:
968 case Instruction::Or:
969 case Instruction::Xor:
970 case Instruction::Add:
979 auto IsCompletelyDistributable = [](
unsigned BinOpc1,
unsigned BinOpc2,
981 assert(ShOpc != Instruction::AShr);
982 return (BinOpc1 != Instruction::Add && BinOpc2 != Instruction::Add) ||
983 ShOpc == Instruction::Shl;
986 auto GetInvShift = [](
unsigned ShOpc) {
987 assert(ShOpc != Instruction::AShr);
988 return ShOpc == Instruction::LShr ? Instruction::Shl : Instruction::LShr;
991 auto CanDistributeBinops = [&](
unsigned BinOpc1,
unsigned BinOpc2,
995 if (BinOpc1 == Instruction::And)
1000 if (!IsCompletelyDistributable(BinOpc1, BinOpc2, ShOpc))
1006 if (BinOpc2 == Instruction::And)
1017 auto MatchBinOp = [&](
unsigned ShOpnum) ->
Instruction * {
1019 Value *
X, *
Y, *ShiftedX, *Mask, *Shift;
1020 if (!
match(
I.getOperand(ShOpnum),
1024 I.getOperand(1 - ShOpnum),
1037 unsigned ShOpc = IY->getOpcode();
1038 if (ShOpc != IX->getOpcode())
1046 unsigned BinOpc = BO2->getOpcode();
1048 if (!IsValidBinOpc(
I.getOpcode()) || !IsValidBinOpc(BinOpc))
1051 if (ShOpc == Instruction::AShr) {
1065 if (BinOpc ==
I.getOpcode() &&
1066 IsCompletelyDistributable(
I.getOpcode(), BinOpc, ShOpc)) {
1081 if (!CanDistributeBinops(
I.getOpcode(), BinOpc, ShOpc, CMask, CShift))
1088 Value *NewBinOp1 =
Builder.CreateBinOp(
I.getOpcode(),
Y, NewBinOp2);
1095 return MatchBinOp(1);
1112 Value *LHS =
I.getOperand(0), *RHS =
I.getOperand(1);
1113 Value *
A, *CondVal, *TrueVal, *FalseVal;
1115 Constant *CastTrueVal, *CastFalseVal;
1117 auto MatchSelectAndCast = [&](
Value *CastOp,
Value *SelectOp) {
1126 if (MatchSelectAndCast(LHS, RHS))
1128 else if (MatchSelectAndCast(RHS, LHS))
1137 auto NewFoldedConst = [&](
bool IsTrueArm,
Value *V) {
1138 bool IsCastOpRHS = (CastOp == RHS);
1139 Value *CastVal = IsTrueArm ? CastFalseVal : CastTrueVal;
1141 return IsCastOpRHS ?
Builder.CreateBinOp(
Opc, V, CastVal)
1148 Value *NewTrueVal = NewFoldedConst(
false, TrueVal);
1150 NewFoldedConst(
true, FalseVal),
"",
nullptr,
SI);
1153 Value *NewTrueVal = NewFoldedConst(
true, TrueVal);
1155 NewFoldedConst(
false, FalseVal),
"",
nullptr,
SI);
1162 Value *LHS =
I.getOperand(0), *RHS =
I.getOperand(1);
1176 if (Op0 && Op1 && LHSOpcode == RHSOpcode)
1205 Value *LHS =
I.getOperand(0), *RHS =
I.getOperand(1);
1222 auto SQDistributive =
SQ.getWithInstruction(&
I).getWithoutUndef();
1230 C =
Builder.CreateBinOp(InnerOpcode, L, R);
1239 C =
Builder.CreateBinOp(TopLevelOpcode,
B,
C);
1248 C =
Builder.CreateBinOp(TopLevelOpcode,
A,
C);
1261 auto SQDistributive =
SQ.getWithInstruction(&
I).getWithoutUndef();
1269 A =
Builder.CreateBinOp(InnerOpcode, L, R);
1278 A =
Builder.CreateBinOp(TopLevelOpcode,
A,
C);
1287 A =
Builder.CreateBinOp(TopLevelOpcode,
A,
B);
1296static std::optional<std::pair<Value *, Value *>>
1298 if (
LHS->getParent() !=
RHS->getParent())
1299 return std::nullopt;
1301 if (
LHS->getNumIncomingValues() < 2)
1302 return std::nullopt;
1305 return std::nullopt;
1307 Value *L0 =
LHS->getIncomingValue(0);
1308 Value *R0 =
RHS->getIncomingValue(0);
1310 for (
unsigned I = 1,
E =
LHS->getNumIncomingValues();
I !=
E; ++
I) {
1314 if ((L0 == L1 && R0 == R1) || (L0 == R1 && R0 == L1))
1317 return std::nullopt;
1320 return std::optional(std::pair(L0, R0));
1323std::optional<std::pair<Value *, Value *>>
1328 return std::nullopt;
1330 case Instruction::PHI:
1332 case Instruction::Select: {
1338 return std::pair(TrueVal, FalseVal);
1339 return std::nullopt;
1341 case Instruction::Call: {
1345 if (LHSMinMax && RHSMinMax &&
1352 return std::pair(LHSMinMax->
getLHS(), LHSMinMax->
getRHS());
1353 return std::nullopt;
1356 return std::nullopt;
1366 if (!LHSIsSelect && !RHSIsSelect)
1376 FMF = FPOp->getFastMathFlags();
1377 Builder.setFastMathFlags(FMF);
1383 Value *
Cond, *True =
nullptr, *False =
nullptr;
1391 if (Opcode != Instruction::Add || (!True && !False) || (True && False))
1405 if (LHSIsSelect && RHSIsSelect &&
A ==
D) {
1411 if (LHS->hasOneUse() && RHS->hasOneUse()) {
1413 True =
Builder.CreateBinOp(Opcode,
B, E);
1414 else if (True && !False)
1415 False =
Builder.CreateBinOp(Opcode,
C,
F);
1417 }
else if (LHSIsSelect && LHS->hasOneUse()) {
1422 if (
Value *NewSel = foldAddNegate(
B,
C, RHS))
1424 }
else if (RHSIsSelect && RHS->hasOneUse()) {
1429 if (
Value *NewSel = foldAddNegate(E,
F, LHS))
1433 if (!True || !False)
1446 if (U == IgnoredUser)
1449 case Instruction::Select: {
1452 SI->swapProfMetadata();
1455 case Instruction::CondBr: {
1459 BPI->swapSuccEdgesProbabilities(BI->getParent());
1462 case Instruction::Xor:
1469 "canFreelyInvertAllUsersOf() ?");
1479 for (
unsigned Idx = 0, End = DbgVal->getNumVariableLocationOps();
1481 if (DbgVal->getVariableLocationOp(Idx) ==
I)
1482 DbgVal->setExpression(
1489Value *InstCombinerImpl::dyn_castNegVal(
Value *V)
const {
1499 if (
C->getType()->getElementType()->isIntegerTy())
1503 for (
unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) {
1519 if (CV->getType()->isVectorTy() &&
1520 CV->getType()->getScalarType()->isIntegerTy() && CV->getSplatValue())
1533Instruction *InstCombinerImpl::foldFBinOpOfIntCastsFromSign(
1534 BinaryOperator &BO,
bool OpsFromSigned, std::array<Value *, 2> IntOps,
1538 Type *IntTy = IntOps[0]->getType();
1543 unsigned MaxRepresentableBits =
1548 unsigned NumUsedLeadingBits[2] = {IntSz, IntSz};
1552 auto IsNonZero = [&](
unsigned OpNo) ->
bool {
1553 if (OpsKnown[OpNo].hasKnownBits() &&
1554 OpsKnown[OpNo].getKnownBits(
SQ).isNonZero())
1559 auto IsNonNeg = [&](
unsigned OpNo) ->
bool {
1563 return OpsKnown[OpNo].getKnownBits(
SQ).isNonNegative();
1567 auto IsValidPromotion = [&](
unsigned OpNo) ->
bool {
1578 if (MaxRepresentableBits < IntSz) {
1588 NumUsedLeadingBits[OpNo] =
1589 IntSz - OpsKnown[OpNo].getKnownBits(
SQ).countMinLeadingZeros();
1597 if (MaxRepresentableBits < NumUsedLeadingBits[OpNo])
1600 return !OpsFromSigned || BO.
getOpcode() != Instruction::FMul ||
1605 if (Op1FpC !=
nullptr) {
1607 if (OpsFromSigned && BO.
getOpcode() == Instruction::FMul &&
1612 OpsFromSigned ? Instruction::FPToSI : Instruction::FPToUI, Op1FpC,
1614 if (Op1IntC ==
nullptr)
1617 : Instruction::UIToFP,
1618 Op1IntC, FPTy,
DL) != Op1FpC)
1622 IntOps[1] = Op1IntC;
1626 if (IntTy != IntOps[1]->
getType())
1629 if (Op1FpC ==
nullptr) {
1630 if (!IsValidPromotion(1))
1633 if (!IsValidPromotion(0))
1639 bool NeedsOverflowCheck =
true;
1642 unsigned OverflowMaxOutputBits = OpsFromSigned ? 2 : 1;
1643 unsigned OverflowMaxCurBits =
1644 std::max(NumUsedLeadingBits[0], NumUsedLeadingBits[1]);
1645 bool OutputSigned = OpsFromSigned;
1647 case Instruction::FAdd:
1648 IntOpc = Instruction::Add;
1649 OverflowMaxOutputBits += OverflowMaxCurBits;
1651 case Instruction::FSub:
1652 IntOpc = Instruction::Sub;
1653 OverflowMaxOutputBits += OverflowMaxCurBits;
1655 case Instruction::FMul:
1656 IntOpc = Instruction::Mul;
1657 OverflowMaxOutputBits += OverflowMaxCurBits * 2;
1663 if (OverflowMaxOutputBits < IntSz) {
1664 NeedsOverflowCheck =
false;
1667 if (IntOpc == Instruction::Sub)
1668 OutputSigned =
true;
1674 if (NeedsOverflowCheck &&
1675 !willNotOverflow(IntOpc, IntOps[0], IntOps[1], BO, OutputSigned))
1678 Value *IntBinOp =
Builder.CreateBinOp(IntOpc, IntOps[0], IntOps[1]);
1680 IntBO->setHasNoSignedWrap(OutputSigned);
1681 IntBO->setHasNoUnsignedWrap(!OutputSigned);
1684 return new SIToFPInst(IntBinOp, FPTy);
1685 return new UIToFPInst(IntBinOp, FPTy);
1699 std::array<Value *, 2> IntOps = {
nullptr,
nullptr};
1717 if (Instruction *R = foldFBinOpOfIntCastsFromSign(BO,
false,
1718 IntOps, Op1FpC, OpsKnown))
1720 return foldFBinOpOfIntCastsFromSign(BO,
true, IntOps,
1736 !
X->getType()->isIntOrIntVectorTy(1))
1744 return createSelectInstWithUnknownProfile(
X, TVal, FVal);
1753 V = IsTrueArm ?
SI->getTrueValue() :
SI->getFalseValue();
1754 }
else if (
match(
SI->getCondition(),
1761 V = IsTrueArm ? ConstantInt::get(
Op->getType(), 1)
1782 bool FoldWithMultiUse,
1783 bool SimplifyBothArms) {
1785 if (!
SI->hasOneUser() && !FoldWithMultiUse)
1788 Value *TV =
SI->getTrueValue();
1789 Value *FV =
SI->getFalseValue();
1792 if (
SI->getType()->isIntOrIntVectorTy(1))
1798 for (
Value *IntrinOp :
Op.operands())
1800 for (
Value *PhiOp : PN->operands())
1812 if (CI->hasOneUse()) {
1813 Value *Op0 = CI->getOperand(0), *Op1 = CI->getOperand(1);
1814 if (((TV == Op0 && FV == Op1) || (FV == Op0 && TV == Op1)) &&
1815 !CI->isCommutative())
1824 if (!NewTV && !NewFV)
1827 if (SimplifyBothArms && !(NewTV && NewFV))
1847 Ops.push_back(InValue);
1887 assert(
Op.isAssociative() &&
"The operation must be associative!");
1893 !
Op.hasOneUse() || !
SI->hasOneUse())
1896 Value *TV =
SI->getTrueValue();
1897 Value *FV =
SI->getFalseValue();
1915 if (!NewTV || !NewFV)
1919 Builder.CreateSelect(
SI->getCondition(), NewTV, NewFV,
"",
1925 bool AllowMultipleUses) {
1927 if (NumPHIValues == 0)
1934 bool IdenticalUsers =
false;
1935 if (!AllowMultipleUses && !OneUse) {
1939 if (UI != &
I && !
I.isIdenticalTo(UI))
1943 IdenticalUsers =
true;
1973 bool SeenNonSimplifiedInVal =
false;
1974 for (
unsigned i = 0; i != NumPHIValues; ++i) {
1985 auto WillFold = [&]() {
1990 const APInt *Ignored;
2011 if (!OneUse && !IdenticalUsers)
2014 if (SeenNonSimplifiedInVal)
2016 SeenNonSimplifiedInVal =
true;
2024 if (!BI || !
DT.isReachableFromEntry(InBB))
2040 for (
auto OpIndex : OpsToMoveUseToIncomingBB) {
2051 U = U->DoPHITranslation(PN->
getParent(), OpBB);
2054 Clones.
insert({OpBB, Clone});
2059 NewPhiValues[
OpIndex] = Clone;
2068 for (
unsigned i = 0; i != NumPHIValues; ++i)
2071 if (IdenticalUsers) {
2102 BO0->getOpcode() !=
Opc || BO1->getOpcode() !=
Opc ||
2103 !BO0->isAssociative() || !BO1->isAssociative() ||
2104 BO0->getParent() != BO1->getParent())
2108 "Expected commutative instructions!");
2112 Value *Start0, *Step0, *Start1, *Step1;
2119 "Expected PHIs with two incoming values!");
2126 if (!Init0 || !Init1 || !C0 || !C1)
2141 if (
Opc == Instruction::FAdd ||
Opc == Instruction::FMul) {
2145 NewBO->setFastMathFlags(Intersect);
2149 Flags.AllKnownNonZero =
false;
2150 Flags.mergeFlags(*BO0);
2151 Flags.mergeFlags(*BO1);
2152 Flags.mergeFlags(BO);
2153 Flags.applyFlags(*NewBO);
2155 NewBO->takeName(&BO);
2165 "Invalid incoming block!");
2166 NewPN->addIncoming(
Init, BB);
2167 }
else if (V == BO0) {
2172 "Invalid incoming block!");
2173 NewPN->addIncoming(NewBO, BB);
2179 <<
"\n with " << *PN1 <<
"\n " << *BO1
2206 if (!Phi0 || !Phi1 || !Phi0->hasOneUse() || !Phi1->hasOneUse() ||
2207 Phi0->getNumOperands() != Phi1->getNumOperands())
2211 if (BO.
getParent() != Phi0->getParent() ||
2228 auto CanFoldIncomingValuePair = [&](std::tuple<Use &, Use &>
T) {
2229 auto &Phi0Use = std::get<0>(
T);
2230 auto &Phi1Use = std::get<1>(
T);
2231 if (Phi0->getIncomingBlock(Phi0Use) != Phi1->getIncomingBlock(Phi1Use))
2233 Value *Phi0UseV = Phi0Use.get();
2234 Value *Phi1UseV = Phi1Use.get();
2237 else if (Phi1UseV ==
C)
2244 if (
all_of(
zip(Phi0->operands(), Phi1->operands()),
2245 CanFoldIncomingValuePair)) {
2248 assert(NewIncomingValues.
size() == Phi0->getNumOperands() &&
2249 "The number of collected incoming values should equal the number "
2250 "of the original PHINode operands!");
2251 for (
unsigned I = 0;
I < Phi0->getNumOperands();
I++)
2252 NewPhi->
addIncoming(NewIncomingValues[
I], Phi0->getIncomingBlock(
I));
2257 if (Phi0->getNumOperands() != 2 || Phi1->getNumOperands() != 2)
2264 ConstBB = Phi0->getIncomingBlock(0);
2265 OtherBB = Phi0->getIncomingBlock(1);
2267 ConstBB = Phi0->getIncomingBlock(1);
2268 OtherBB = Phi0->getIncomingBlock(0);
2279 if (!PredBlockBranch || !
DT.isReachableFromEntry(OtherBB))
2285 for (
auto BBIter = BO.
getParent()->begin(); &*BBIter != &BO; ++BBIter)
2296 Builder.SetInsertPoint(PredBlockBranch);
2298 Phi0->getIncomingValueForBlock(OtherBB),
2299 Phi1->getIncomingValueForBlock(OtherBB));
2301 NotFoldedNewBO->copyIRFlags(&BO);
2311 auto TryFoldOperand = [&](
unsigned OpIdx,
2330 if (
GEP.hasAllZeroIndices() && !Src.hasAllZeroIndices() &&
2359 for (
unsigned I = 0;
I < NumElts; ++
I) {
2361 if (ShMask[
I] >= 0) {
2362 assert(ShMask[
I] < (
int)NumElts &&
"Not expecting narrowing shuffle");
2373 NewVecC[ShMask[
I]] = CElt;
2391template <Intrinsic::ID SpliceID>
2410 (
LHS->hasOneUse() ||
RHS->hasOneUse() ||
2412 return CreateBinOpSplice(V1, V2,
Offset);
2417 return CreateBinOpSplice(V1,
RHS,
Offset);
2424 return CreateBinOpSplice(
LHS, V2,
Offset);
2444 auto foldConstantsThroughSubVectorInsertSplat =
2445 [&](
Value *MaybeSubVector,
Value *MaybeSplat,
2450 !
match(MaybeSubVector,
2457 if (!SubVector || !Dest)
2459 auto *InsertVector =
2460 Builder.CreateInsertVector(Dest->
getType(), Dest, SubVector, Idx);
2468 if (
Instruction *Folded = foldConstantsThroughSubVectorInsertSplat(
2471 if (
Instruction *Folded = foldConstantsThroughSubVectorInsertSplat(
2478 Value *L0, *L1, *R0, *R1;
2482 LHS->hasOneUse() && RHS->hasOneUse() &&
2505 M, Intrinsic::vector_reverse, V->getType());
2516 (LHS->hasOneUse() || RHS->hasOneUse() ||
2517 (LHS == RHS && LHS->hasNUses(2))))
2518 return createBinOpReverse(V1, V2);
2522 return createBinOpReverse(V1, RHS);
2526 return createBinOpReverse(LHS, V2);
2537 M, Intrinsic::experimental_vp_reverse, V->getType());
2547 (LHS->hasOneUse() || RHS->hasOneUse() ||
2548 (LHS == RHS && LHS->hasNUses(2))))
2549 return createBinOpVPReverse(V1, V2, EVL);
2553 return createBinOpVPReverse(V1, RHS, EVL);
2559 return createBinOpVPReverse(LHS, V2, EVL);
2586 (LHS->hasOneUse() || RHS->hasOneUse() || LHS == RHS)) {
2588 return createBinOpShuffle(V1, V2, Mask);
2603 if (LShuf->isSelect() &&
2605 RShuf->isSelect() &&
2627 "Shuffle should not change scalar type");
2639 Value *NewLHS = ConstOp1 ? V1 : NewC;
2640 Value *NewRHS = ConstOp1 ? NewC : V1;
2641 return createBinOpShuffle(NewLHS, NewRHS, Mask);
2676 Value *NewSplat =
Builder.CreateShuffleVector(NewBO, NewMask);
2682 R->copyFastMathFlags(&Inst);
2686 NewInstBO->copyIRFlags(R);
2716 (Op0->
hasOneUse() || Op1->hasOneUse()))) {
2742 NewBinOp->setHasNoSignedWrap();
2744 NewBinOp->setHasNoUnsignedWrap();
2760 if (!
GEP.hasAllConstantIndices())
2776 Type *Ty =
GEP.getSourceElementType();
2777 Value *NewTrueC = Builder.CreateGEP(Ty, TrueC, IndexC,
"", NW);
2778 Value *NewFalseC = Builder.CreateGEP(Ty, FalseC, IndexC,
"", NW);
2788 if (
GEP.getNumIndices() != 1)
2798 unsigned IndexSizeInBits =
DL.getIndexTypeSizeInBits(PtrTy);
2809 if (NewOffset.
isZero() ||
2810 (Src->hasOneUse() &&
GEP.getOperand(1)->hasOneUse())) {
2812 if (
GEP.hasNoUnsignedWrap() &&
2832 if (!
GEP.hasAllConstantIndices())
2843 if (InnerGEP->hasAllConstantIndices())
2846 if (!InnerGEP->hasOneUse())
2849 Skipped.push_back(InnerGEP);
2855 if (Skipped.empty())
2860 if (!InnerGEP->hasOneUse())
2865 if (InnerGEP->getType() != Ty)
2871 !InnerGEP->accumulateConstantOffset(
DL,
Offset))
2874 IC.
replaceOperand(*Skipped.back(), 0, InnerGEP->getPointerOperand());
2876 SkippedGEP->setNoWrapFlags(NW);
2898 if (Src->getResultElementType() !=
GEP.getSourceElementType())
2904 if (Src->hasOneUse() &&
GEP.getNumIndices() == 1 &&
2905 Src->getNumIndices() == 1) {
2906 Value *SrcIdx = *Src->idx_begin();
2908 const APInt *ConstOffset, *TrueVal, *FalseVal;
2921 if (!
Select->hasOneUse())
2924 if (TrueVal->getBitWidth() != ConstOffset->
getBitWidth() ||
2925 FalseVal->getBitWidth() != ConstOffset->
getBitWidth())
2928 APInt NewTrueVal = *ConstOffset + *TrueVal;
2929 APInt NewFalseVal = *ConstOffset + *FalseVal;
2930 Constant *NewTrue = ConstantInt::get(
Select->getType(), NewTrueVal);
2931 Constant *NewFalse = ConstantInt::get(
Select->getType(), NewFalseVal);
2933 Cond, NewTrue, NewFalse,
"",
2938 Builder.CreateGEP(
GEP.getResultElementType(),
2939 Src->getPointerOperand(),
2940 NewSelect,
"", Flags));
2945 bool EndsWithSequential =
false;
2948 EndsWithSequential =
I.isSequential();
2949 if (!EndsWithSequential)
2954 Value *SO1 = Src->getOperand(Src->getNumOperands() - 1);
2972 Indices.
append(Src->op_begin() + 1, Src->op_end() - 1);
2977 unsigned NumNonZeroIndices =
count_if(Indices, [](
Value *Idx) {
2979 return !
C || !
C->isNullValue();
2981 if (NumNonZeroIndices > 1)
2986 Src->getSourceElementType(), Src->getOperand(0), Indices,
"",
2992 bool &DoesConsume,
unsigned Depth) {
2993 static Value *
const NonNull =
reinterpret_cast<Value *
>(uintptr_t(1));
3011 if (!WillInvertAllUses)
3018 return Builder->CreateCmp(
I->getInversePredicate(),
I->getOperand(0),
3027 DoesConsume,
Depth))
3030 DoesConsume,
Depth))
3039 DoesConsume,
Depth))
3042 DoesConsume,
Depth))
3051 DoesConsume,
Depth))
3060 DoesConsume,
Depth))
3072 bool LocalDoesConsume = DoesConsume;
3074 LocalDoesConsume,
Depth))
3077 LocalDoesConsume,
Depth)) {
3078 DoesConsume = LocalDoesConsume;
3081 DoesConsume,
Depth);
3082 assert(NotB !=
nullptr &&
3083 "Unable to build inverted value for known freely invertable op");
3085 return Builder->CreateBinaryIntrinsic(
3088 Cond, NotA, NotB,
"",
3096 bool LocalDoesConsume = DoesConsume;
3098 for (
Use &U : PN->operands()) {
3099 BasicBlock *IncomingBlock = PN->getIncomingBlock(U);
3103 if (NewIncomingVal ==
nullptr)
3106 if (NewIncomingVal == V)
3109 IncomingValues.
emplace_back(NewIncomingVal, IncomingBlock);
3112 DoesConsume = LocalDoesConsume;
3117 Builder->CreatePHI(PN->getType(), PN->getNumIncomingValues());
3118 for (
auto [Val, Pred] : IncomingValues)
3127 DoesConsume,
Depth))
3128 return Builder ?
Builder->CreateSExt(AV, V->getType()) : NonNull;
3134 DoesConsume,
Depth))
3135 return Builder ?
Builder->CreateTrunc(AV, V->getType()) : NonNull;
3143 bool IsLogical,
Value *
A,
3145 bool LocalDoesConsume = DoesConsume;
3147 LocalDoesConsume,
Depth))
3150 LocalDoesConsume,
Depth)) {
3152 LocalDoesConsume,
Depth);
3153 DoesConsume = LocalDoesConsume;
3155 return Builder ?
Builder->CreateLogicalOp(Opcode, NotA, NotB) : NonNull;
3156 return Builder ?
Builder->CreateBinOp(Opcode, NotA, NotB) : NonNull;
3163 return TryInvertAndOrUsingDeMorgan(Instruction::And,
false,
A,
3167 return TryInvertAndOrUsingDeMorgan(Instruction::Or,
false,
A,
3171 return TryInvertAndOrUsingDeMorgan(Instruction::And,
true,
A,
3175 return TryInvertAndOrUsingDeMorgan(Instruction::Or,
true,
A,
3184 Type *GEPEltType =
GEP.getSourceElementType();
3195 if (
GEP.getNumIndices() == 1 &&
3204 return PtrOpGep && PtrOpGep->hasAllConstantIndices() &&
3207 return match(V, m_APInt(C)) && !C->isZero();
3231 if (!Op2 || Op1->getNumOperands() != Op2->getNumOperands() ||
3232 Op1->getSourceElementType() != Op2->getSourceElementType())
3240 Type *CurTy =
nullptr;
3242 for (
unsigned J = 0,
F = Op1->getNumOperands(); J !=
F; ++J) {
3243 if (Op1->getOperand(J)->getType() != Op2->getOperand(J)->getType())
3246 if (Op1->getOperand(J) != Op2->getOperand(J)) {
3255 assert(CurTy &&
"No current type?");
3275 CurTy = Op1->getSourceElementType();
3283 NW &= Op2->getNoWrapFlags();
3293 NewGEP->setNoWrapFlags(NW);
3305 Builder.SetInsertPoint(PN);
3306 NewPN = Builder.CreatePHI(Op1->getOperand(DI)->getType(),
3314 NewGEP->setOperand(DI, NewPN);
3317 NewGEP->insertBefore(*
GEP.getParent(),
GEP.getParent()->getFirstInsertionPt());
3324 Type *GEPType =
GEP.getType();
3325 Type *GEPEltType =
GEP.getSourceElementType();
3328 SQ.getWithInstruction(&
GEP)))
3335 auto VWidth = GEPFVTy->getNumElements();
3336 APInt PoisonElts(VWidth, 0);
3348 bool MadeChange =
false;
3352 Type *NewScalarIndexTy =
3353 DL.getIndexType(
GEP.getPointerOperandType()->getScalarType());
3362 Type *IndexTy = (*I)->getType();
3363 Type *NewIndexType =
3372 if (EltTy->
isSized() &&
DL.getTypeAllocSize(EltTy).isZero())
3378 if (IndexTy != NewIndexType) {
3384 if (
GEP.hasNoUnsignedWrap() &&
GEP.hasNoUnsignedSignedWrap())
3385 *
I =
Builder.CreateZExt(*
I, NewIndexType,
"",
true);
3387 *
I =
Builder.CreateSExt(*
I, NewIndexType);
3389 *
I =
Builder.CreateTrunc(*
I, NewIndexType,
"",
GEP.hasNoUnsignedWrap(),
3390 GEP.hasNoUnsignedSignedWrap());
3399 if (!GEPEltType->
isIntegerTy(8) &&
GEP.hasAllConstantIndices()) {
3404 GEP.getNoWrapFlags()));
3416 if (LastIdx && LastIdx->isNullValue() && !LastIdx->getType()->isVectorTy()) {
3424 if (FirstIdx && FirstIdx->isNullValue() &&
3425 !FirstIdx->getType()->isVectorTy()) {
3431 GEP.getPointerOperand(),
3433 GEP.getNoWrapFlags()));
3440 return Op->getType()->isVectorTy() && getSplatValue(Op);
3443 for (
auto &
Op :
GEP.operands()) {
3444 if (
Op->getType()->isVectorTy())
3454 GEP.getNoWrapFlags());
3457 Res =
Builder.CreateVectorSplat(EC, Res);
3462 bool SeenNonZeroIndex =
false;
3463 for (
auto [IdxNum, Idx] :
enumerate(Indices)) {
3466 if (
C &&
C->isNullValue() && IdxNum == 0)
3469 if (!SeenNonZeroIndex) {
3470 SeenNonZeroIndex =
true;
3477 Builder.CreateGEP(GEPEltType, PtrOp, FrontIndices,
3478 GEP.getName() +
".split",
GEP.getNoWrapFlags());
3485 BackIndices,
GEP.getNoWrapFlags());
3489 auto IsCanonicalType = [](
Type *Ty) {
3491 Ty = AT->getElementType();
3492 return Ty->isIntegerTy(8);
3494 if (Indices.
size() == 1 && !IsCanonicalType(GEPEltType)) {
3495 TypeSize Scale =
DL.getTypeAllocSize(GEPEltType);
3500 GEP.setSourceElementType(NewElemTy);
3501 GEP.setResultElementType(NewElemTy);
3516 if (
GEP.getNumIndices() == 1) {
3517 unsigned AS =
GEP.getPointerAddressSpace();
3518 if (
GEP.getOperand(1)->getType()->getScalarSizeInBits() ==
3519 DL.getIndexSizeInBits(AS)) {
3520 uint64_t TyAllocSize =
DL.getTypeAllocSize(GEPEltType).getFixedValue();
3522 if (TyAllocSize == 1) {
3531 GEPType ==
Y->getType()) {
3532 bool HasNonAddressBits =
3533 DL.getAddressSizeInBits(AS) !=
DL.getPointerSizeInBits(AS);
3540 }
else if (
auto *ExactIns =
3544 if (ExactIns->isExact()) {
3552 GEP.getPointerOperand(), V,
3553 GEP.getNoWrapFlags());
3556 if (ExactIns->isExact() && ExactIns->hasOneUse()) {
3562 std::optional<APInt> NewC;
3582 if (NewC.has_value()) {
3585 ConstantInt::get(V->getType(), *NewC));
3588 GEP.getPointerOperand(), NewOp,
3589 GEP.getNoWrapFlags());
3599 if (!
GEP.isInBounds()) {
3602 APInt BasePtrOffset(IdxWidth, 0);
3603 Value *UnderlyingPtrOp =
3605 bool CanBeNull, CanBeFreed;
3607 DL, CanBeNull, CanBeFreed);
3608 if (!CanBeNull && !CanBeFreed && DerefBytes != 0) {
3609 if (
GEP.accumulateConstantOffset(
DL, BasePtrOffset) &&
3611 APInt AllocSize(IdxWidth, DerefBytes);
3612 if (BasePtrOffset.
ule(AllocSize)) {
3614 GEP.getSourceElementType(), PtrOp, Indices,
GEP.getName());
3621 if (
GEP.hasNoUnsignedSignedWrap() && !
GEP.hasNoUnsignedWrap() &&
3623 return isKnownNonNegative(Idx, SQ.getWithInstruction(&GEP));
3631 if (
GEP.getNumIndices() == 1) {
3634 auto GetPreservedNoWrapFlags = [&](
bool AddIsNUW) {
3637 if (
GEP.hasNoUnsignedWrap() && AddIsNUW)
3638 return GEP.getNoWrapFlags();
3654 Builder.CreateGEP(
GEP.getSourceElementType(),
GEP.getPointerOperand(),
3657 Builder.CreateGEP(
GEP.getSourceElementType(),
3658 NewPtr, Idx2,
"", NWFlags));
3669 bool NUW =
match(
GEP.getOperand(1),
3672 auto *NewPtr =
Builder.CreateGEP(
3673 GEP.getSourceElementType(),
GEP.getPointerOperand(),
3674 Builder.CreateSExt(Idx1,
GEP.getOperand(1)->getType()),
"", NWFlags);
3677 Builder.CreateGEP(
GEP.getSourceElementType(), NewPtr,
3678 Builder.CreateSExt(
C,
GEP.getOperand(1)->getType()),
3687 if (Indices.
size() == 1 &&
GEP.isInBounds() &&
GEP.hasNoUnsignedWrap()) {
3701 GEP.getNoWrapFlags());
3737 return Dest && Dest->Ptr == UsedV;
3740static std::optional<ModRefInfo>
3753 return std::nullopt;
3754 switch (
I->getOpcode()) {
3757 return std::nullopt;
3759 case Instruction::AddrSpaceCast:
3760 case Instruction::BitCast:
3761 case Instruction::GetElementPtr:
3766 case Instruction::ICmp: {
3772 return std::nullopt;
3773 unsigned OtherIndex = (ICI->
getOperand(0) == PI) ? 1 : 0;
3775 return std::nullopt;
3780 auto AlignmentAndSizeKnownValid = [](
CallBase *CB) {
3784 const APInt *Alignment;
3786 return match(CB->getArgOperand(0),
m_APInt(Alignment)) &&
3792 if (CB && TLI.
getLibFunc(*CB->getCalledFunction(), TheLibFunc) &&
3793 TLI.
has(TheLibFunc) && TheLibFunc == LibFunc_aligned_alloc &&
3794 !AlignmentAndSizeKnownValid(CB))
3795 return std::nullopt;
3800 case Instruction::Call:
3803 switch (
II->getIntrinsicID()) {
3805 return std::nullopt;
3807 case Intrinsic::memmove:
3808 case Intrinsic::memcpy:
3809 case Intrinsic::memset: {
3811 if (
MI->isVolatile())
3812 return std::nullopt;
3818 return std::nullopt;
3822 case Intrinsic::assume:
3823 case Intrinsic::invariant_start:
3824 case Intrinsic::invariant_end:
3825 case Intrinsic::lifetime_start:
3826 case Intrinsic::lifetime_end:
3827 case Intrinsic::objectsize:
3830 case Intrinsic::launder_invariant_group:
3831 case Intrinsic::strip_invariant_group:
3858 return std::nullopt;
3860 case Instruction::Store: {
3862 if (
SI->isVolatile() ||
SI->getPointerOperand() != PI)
3863 return std::nullopt;
3865 return std::nullopt;
3871 case Instruction::Load: {
3874 return std::nullopt;
3876 return std::nullopt;
3884 }
while (!Worklist.
empty());
3912 std::unique_ptr<DIBuilder> DIB;
3920 bool KnowInitUndef =
false;
3921 bool KnowInitZero =
false;
3926 KnowInitUndef =
true;
3927 else if (
Init->isNullValue())
3928 KnowInitZero =
true;
3932 auto &
F = *
MI.getFunction();
3933 if (
F.hasFnAttribute(Attribute::SanitizeMemory) ||
3934 F.hasFnAttribute(Attribute::SanitizeAddress))
3935 KnowInitUndef =
false;
3950 if (
II->getIntrinsicID() == Intrinsic::objectsize) {
3953 II,
DL, &
TLI,
AA,
true, &InsertedInstructions);
3954 for (
Instruction *Inserted : InsertedInstructions)
3962 if (KnowInitZero &&
isRefSet(*Removable)) {
3965 auto *M =
Builder.CreateMemSet(
3968 MTI->getLength(), MTI->getDestAlign());
3969 M->copyMetadata(*MTI);
3983 C->isFalseWhenEqual()));
3985 for (
auto *DVR : DVRs)
3986 if (DVR->isAddressOfVariable())
3993 assert(KnowInitZero || KnowInitUndef);
4008 F,
II->getNormalDest(),
II->getUnwindDest(), {},
"",
II->getParent());
4009 NewII->setDebugLoc(
II->getDebugLoc());
4037 for (
auto *DVR : DVRs)
4038 if (DVR->isAddressOfVariable() || DVR->getExpression()->startsWithDeref())
4039 DVR->eraseFromParent();
4085 if (FreeInstrBB->
size() != 2) {
4087 if (&Inst == &FI || &Inst == FreeInstrBBTerminator ||
4091 if (!Cast || !Cast->isNoopCast(
DL))
4112 "Broken CFG: missing edge from predecessor to successor");
4117 if (&Instr == FreeInstrBBTerminator)
4122 "Only the branch instruction should remain");
4133 Attrs = Attrs.removeParamAttribute(FI.
getContext(), 0, Attribute::NonNull);
4134 Attribute Dereferenceable = Attrs.getParamAttr(0, Attribute::Dereferenceable);
4135 if (Dereferenceable.
isValid()) {
4137 Attrs = Attrs.removeParamAttribute(FI.
getContext(), 0,
4138 Attribute::Dereferenceable);
4139 Attrs = Attrs.addDereferenceableOrNullParamAttr(FI.
getContext(), 0, Bytes);
4178 if (
TLI.getLibFunc(FI, Func) &&
TLI.has(Func) && Func == LibFunc_free)
4194 bool HasDereferenceable =
4195 F->getAttributes().getRetDereferenceableBytes() > 0;
4196 if (
F->hasRetAttribute(Attribute::NonNull) ||
4197 (HasDereferenceable &&
4199 if (
Value *V = simplifyNonNullOperand(RetVal, HasDereferenceable))
4204 if (!AttributeFuncs::isNoFPClassCompatibleType(RetTy))
4207 FPClassTest ReturnClass =
F->getAttributes().getRetNoFPClass();
4208 if (ReturnClass ==
fcNone)
4213 SQ.getWithInstruction(&RI)))
4230 if (Prev->isEHPad())
4260 if (BBI != FirstInstr)
4262 }
while (BBI != FirstInstr && BBI->isDebugOrPseudoInst());
4276 if (!
DeadEdges.insert({From, To}).second)
4281 for (
Use &U : PN.incoming_values())
4298 std::next(
I->getReverseIterator())))) {
4299 if (!Inst.use_empty() && !Inst.getType()->isTokenTy()) {
4303 if (Inst.isEHPad() || Inst.getType()->isTokenTy())
4306 Inst.dropDbgRecords();
4328 return DeadEdges.contains({Pred, BB}) ||
DT.dominates(BB, Pred);
4341 if (Succ == LiveSucc)
4358 BPI->swapSuccEdgesProbabilities(BI.getParent());
4379 "Unexpected number of branch weights!");
4388 BPI->swapSuccEdgesProbabilities(BI.getParent());
4406 BPI->swapSuccEdgesProbabilities(BI.getParent());
4427 if (
DT.dominates(Edge0, U)) {
4433 if (
DT.dominates(Edge1, U)) {
4440 DC.registerBranch(&BI);
4450 unsigned CstOpIdx = IsTrueArm ? 1 : 2;
4455 BasicBlock *CstBB =
SI.findCaseValue(
C)->getCaseSuccessor();
4456 if (CstBB !=
SI.getDefaultDest())
4469 for (
auto Case :
SI.cases())
4470 if (!CR.
contains(Case.getCaseValue()->getValue()))
4479 const APInt *CondOpC;
4482 auto MaybeInvertible = [&](
Value *
Cond) -> InvertFn {
4485 return [](
const APInt &Case,
const APInt &
C) {
return Case -
C; };
4489 return [](
const APInt &Case,
const APInt &
C) {
return C - Case; };
4495 return [](
const APInt &Case,
const APInt &
C) {
return Case ^
C; };
4502 if (
auto InvertFn = MaybeInvertible(
Cond); InvertFn &&
Cond->hasOneUse()) {
4503 for (
auto &Case :
SI.cases()) {
4504 const APInt &New = InvertFn(Case.getCaseValue()->getValue(), *CondOpC);
4505 Case.setValue(ConstantInt::get(
SI.getContext(), New));
4513 all_of(
SI.cases(), [&](
const auto &Case) {
4514 return Case.getCaseValue()->getValue().countr_zero() >= ShiftAmt;
4520 Value *NewCond = Op0;
4527 for (
auto Case :
SI.cases()) {
4528 const APInt &CaseVal = Case.getCaseValue()->getValue();
4530 : CaseVal.
lshr(ShiftAmt);
4531 Case.setValue(ConstantInt::get(
SI.getContext(), ShiftedCase));
4543 if (
all_of(
SI.cases(), [&](
const auto &Case) {
4544 const APInt &CaseVal = Case.getCaseValue()->getValue();
4545 return IsZExt ? CaseVal.isIntN(NewWidth)
4546 : CaseVal.isSignedIntN(NewWidth);
4548 for (
auto &Case :
SI.cases()) {
4549 APInt TruncatedCase = Case.getCaseValue()->getValue().
trunc(NewWidth);
4550 Case.setValue(ConstantInt::get(
SI.getContext(), TruncatedCase));
4572 for (
const auto &
C :
SI.cases()) {
4574 std::min(LeadingKnownZeros,
C.getCaseValue()->getValue().countl_zero());
4576 std::min(LeadingKnownOnes,
C.getCaseValue()->getValue().countl_one());
4579 unsigned NewWidth = Known.
getBitWidth() - std::max(LeadingKnownZeros, LeadingKnownOnes);
4585 if (NewWidth > 0 && NewWidth < Known.
getBitWidth() &&
4586 shouldChangeType(Known.
getBitWidth(), NewWidth)) {
4591 for (
auto Case :
SI.cases()) {
4592 APInt TruncatedCase = Case.getCaseValue()->getValue().
trunc(NewWidth);
4593 Case.setValue(ConstantInt::get(
SI.getContext(), TruncatedCase));
4604 SI.findCaseValue(CI)->getCaseSuccessor());
4618 const APInt *
C =
nullptr;
4620 if (*EV.
idx_begin() == 0 && (OvID == Intrinsic::smul_with_overflow ||
4621 OvID == Intrinsic::umul_with_overflow)) {
4626 if (
C->isPowerOf2()) {
4627 return BinaryOperator::CreateShl(
4629 ConstantInt::get(WO->getLHS()->getType(),
C->logBase2()));
4637 if (!WO->hasOneUse())
4651 assert(*EV.
idx_begin() == 1 &&
"Unexpected extract index for overflow inst");
4654 if (OvID == Intrinsic::usub_with_overflow)
4659 if (OvID == Intrinsic::smul_with_overflow &&
4660 WO->getLHS()->getType()->isIntOrIntVectorTy(1))
4661 return BinaryOperator::CreateAnd(WO->getLHS(), WO->getRHS());
4664 if (OvID == Intrinsic::umul_with_overflow && WO->getLHS() == WO->getRHS()) {
4665 unsigned BitWidth = WO->getLHS()->getType()->getScalarSizeInBits();
4668 return new ICmpInst(
4670 ConstantInt::get(WO->getLHS()->getType(),
4681 WO->getBinaryOp(), *
C, WO->getNoWrapKind());
4686 auto *OpTy = WO->getRHS()->getType();
4687 auto *NewLHS = WO->getLHS();
4689 NewLHS =
Builder.CreateAdd(NewLHS, ConstantInt::get(OpTy,
Offset));
4691 ConstantInt::get(OpTy, NewRHSC));
4708 const APFloat *ConstVal =
nullptr;
4709 Value *VarOp =
nullptr;
4710 bool ConstIsTrue =
false;
4717 ConstIsTrue =
false;
4722 Builder.SetInsertPoint(&EV);
4728 Value *NewEV = Builder.CreateExtractValue(NewFrexp, 0,
"mantissa");
4733 Constant *ConstantMantissa = ConstantFP::get(TrueVal->getType(), Mantissa);
4735 Value *NewSel = Builder.CreateSelectFMF(
4736 Cond, ConstIsTrue ? ConstantMantissa : NewEV,
4737 ConstIsTrue ? NewEV : ConstantMantissa,
SelectInst,
"select.frexp");
4747 SQ.getWithInstruction(&EV)))
4761 const unsigned *exti, *exte, *insi, *inse;
4762 for (exti = EV.
idx_begin(), insi =
IV->idx_begin(),
4763 exte = EV.
idx_end(), inse =
IV->idx_end();
4764 exti != exte && insi != inse;
4778 if (exti == exte && insi == inse)
4793 Value *NewEV =
Builder.CreateExtractValue(
IV->getAggregateOperand(),
4811 if (
Instruction *R = foldExtractOfOverflowIntrinsic(EV))
4817 STy && STy->isScalableTy())
4825 if (L->isSimple() && L->hasOneUse()) {
4830 for (
unsigned Idx : EV.
indices())
4837 L->getPointerOperand(), Indices);
4871 switch (Personality) {
4915 bool MakeNewInstruction =
false;
4921 bool isLastClause = i + 1 == e;
4929 if (AlreadyCaught.
insert(TypeInfo).second) {
4934 MakeNewInstruction =
true;
4941 MakeNewInstruction =
true;
4942 CleanupFlag =
false;
4961 if (!NumTypeInfos) {
4964 MakeNewInstruction =
true;
4965 CleanupFlag =
false;
4969 bool MakeNewFilter =
false;
4973 assert(NumTypeInfos > 0 &&
"Should have handled empty filter already!");
4979 MakeNewInstruction =
true;
4986 if (NumTypeInfos > 1)
4987 MakeNewFilter =
true;
4991 NewFilterElts.
reserve(NumTypeInfos);
4996 bool SawCatchAll =
false;
4997 for (
unsigned j = 0; j != NumTypeInfos; ++j) {
5025 if (SeenInFilter.
insert(TypeInfo).second)
5031 MakeNewInstruction =
true;
5036 if (NewFilterElts.
size() < NumTypeInfos)
5037 MakeNewFilter =
true;
5039 if (MakeNewFilter) {
5041 NewFilterElts.
size());
5043 MakeNewInstruction =
true;
5052 if (MakeNewFilter && !NewFilterElts.
size()) {
5053 assert(MakeNewInstruction &&
"New filter but not a new instruction!");
5054 CleanupFlag =
false;
5065 for (
unsigned i = 0, e = NewClauses.
size(); i + 1 < e; ) {
5068 for (j = i; j != e; ++j)
5075 for (
unsigned k = i; k + 1 < j; ++k)
5079 std::stable_sort(NewClauses.
begin() + i, NewClauses.
begin() + j,
5081 MakeNewInstruction =
true;
5100 for (
unsigned i = 0; i + 1 < NewClauses.
size(); ++i) {
5110 for (
unsigned j = NewClauses.
size() - 1; j != i; --j) {
5111 Value *LFilter = NewClauses[j];
5122 NewClauses.
erase(J);
5123 MakeNewInstruction =
true;
5127 unsigned LElts = LTy->getNumElements();
5137 assert(FElts <= LElts &&
"Should have handled this case earlier!");
5139 NewClauses.
erase(J);
5140 MakeNewInstruction =
true;
5149 assert(FElts > 0 &&
"Should have eliminated the empty filter earlier!");
5150 for (
unsigned l = 0; l != LElts; ++l)
5153 NewClauses.
erase(J);
5154 MakeNewInstruction =
true;
5165 bool AllFound =
true;
5166 for (
unsigned f = 0; f != FElts; ++f) {
5169 for (
unsigned l = 0; l != LElts; ++l) {
5171 if (LTypeInfo == FTypeInfo) {
5181 NewClauses.
erase(J);
5182 MakeNewInstruction =
true;
5190 if (MakeNewInstruction) {
5198 if (NewClauses.empty())
5207 assert(!CleanupFlag &&
"Adding a cleanup, not removing one?!");
5237 if (!OrigOpInst || !OrigOpInst->hasOneUse() ||
isa<PHINode>(OrigOp))
5251 Value *MaybePoisonOperand =
nullptr;
5252 for (
Value *V : OrigOpInst->operands()) {
5255 (MaybePoisonOperand && MaybePoisonOperand == V))
5257 if (!MaybePoisonOperand)
5258 MaybePoisonOperand = V;
5263 OrigOpInst->dropPoisonGeneratingAnnotations();
5266 if (!MaybePoisonOperand)
5269 Builder.SetInsertPoint(OrigOpInst);
5270 Value *FrozenMaybePoisonOperand =
Builder.CreateFreeze(
5271 MaybePoisonOperand, MaybePoisonOperand->
getName() +
".fr");
5273 OrigOpInst->replaceUsesOfWith(MaybePoisonOperand, FrozenMaybePoisonOperand);
5284 Use *StartU =
nullptr;
5302 Value *StartV = StartU->get();
5314 if (!Visited.
insert(V).second)
5317 if (Visited.
size() > 32)
5334 I->dropPoisonGeneratingAnnotations();
5336 if (StartNeedsFreeze) {
5364 MoveBefore = *MoveBeforeOpt;
5368 MoveBefore.setHeadBit(
false);
5371 if (&FI != &*MoveBefore) {
5372 FI.
moveBefore(*MoveBefore->getParent(), MoveBefore);
5377 Changed |=
Op->replaceUsesWithIf(&FI, [&](
Use &U) ->
bool {
5378 if (!
DT.dominates(&FI, U))
5381 Users.push_back(U.getUser());
5385 for (
auto *U :
Users) {
5386 for (
auto &AssumeVH :
AC.assumptionsFor(U)) {
5398 for (
auto *U : V->users()) {
5408 Value *Op0 =
I.getOperand(0);
5438 auto getUndefReplacement = [&](
Type *Ty) {
5439 auto pickCommonConstantFromPHI = [](
PHINode &PN) ->
Value * {
5443 for (
Value *V : PN.incoming_values()) {
5454 if (BestValue && BestValue !=
C)
5463 Value *BestValue =
nullptr;
5464 for (
auto *U :
I.users()) {
5465 Value *V = NullValue;
5474 if (
Value *MaybeV = pickCommonConstantFromPHI(*
PHI))
5480 else if (BestValue != V)
5481 BestValue = NullValue;
5483 assert(BestValue &&
"Must have at least one use");
5484 assert(BestValue != &
I &&
"Cannot replace with itself");
5498 Type *Ty =
C->getType();
5502 unsigned NumElts = VTy->getNumElements();
5504 for (
unsigned i = 0; i != NumElts; ++i) {
5505 Constant *EltC =
C->getAggregateElement(i);
5516 !
C->containsConstantExpression()) {
5517 if (
Constant *Repl = getFreezeVectorReplacement(
C))
5551 for (
const User *U :
I.users()) {
5552 if (Visited.
insert(U).second)
5557 while (!AllocaUsers.
empty()) {
5580 if (
isa<PHINode>(
I) ||
I->isEHPad() ||
I->mayThrow() || !
I->willReturn() ||
5597 if (CI->isConvergent())
5603 if (
I->mayWriteToMemory()) {
5610 if (
I->mayReadFromMemory() &&
5611 !
I->hasMetadata(LLVMContext::MD_invariant_load)) {
5618 E =
I->getParent()->end();
5620 if (Scan->mayWriteToMemory())
5624 I->dropDroppableUses([&](
const Use *U) {
5626 if (
I &&
I->getParent() != DestBlock) {
5636 I->moveBefore(*DestBlock, InsertPos);
5646 if (!DbgVariableRecords.
empty())
5648 DbgVariableRecords);
5671 for (
auto &DVR : DbgVariableRecords)
5672 if (DVR->getParent() != DestBlock)
5673 DbgVariableRecordsToSalvage.
push_back(DVR);
5679 if (DVR->getParent() == SrcBlock)
5680 DbgVariableRecordsToSink.
push_back(DVR);
5687 return B->getInstruction()->comesBefore(
A->getInstruction());
5694 using InstVarPair = std::pair<const Instruction *, DebugVariable>;
5696 if (DbgVariableRecordsToSink.
size() > 1) {
5702 DVR->getDebugLoc()->getInlinedAt());
5703 CountMap[std::make_pair(DVR->getInstruction(), DbgUserVariable)] += 1;
5709 for (
auto It : CountMap) {
5710 if (It.second > 1) {
5711 FilterOutMap[It.first] =
nullptr;
5712 DupSet.
insert(It.first.first);
5723 DVR.getDebugLoc()->getInlinedAt());
5725 FilterOutMap.
find(std::make_pair(Inst, DbgUserVariable));
5726 if (FilterIt == FilterOutMap.
end())
5728 if (FilterIt->second !=
nullptr)
5730 FilterIt->second = &DVR;
5745 DVR->getDebugLoc()->getInlinedAt());
5749 if (!FilterOutMap.
empty()) {
5750 InstVarPair IVP = std::make_pair(DVR->getInstruction(), DbgUserVariable);
5751 auto It = FilterOutMap.
find(IVP);
5754 if (It != FilterOutMap.
end() && It->second != DVR)
5758 if (!SunkVariables.
insert(DbgUserVariable).second)
5761 if (DVR->isDbgAssign())
5769 if (DVRClones.
empty())
5783 assert(InsertPos.getHeadBit());
5785 InsertPos->getParent()->insertDbgRecordBefore(DVRClone, InsertPos);
5809 if (
I ==
nullptr)
continue;
5824 auto getOptionalSinkBlockForInst =
5825 [
this](
Instruction *
I) -> std::optional<BasicBlock *> {
5827 return std::nullopt;
5831 unsigned NumUsers = 0;
5833 for (
Use &U :
I->uses()) {
5839 if (
II->getIntrinsicID() != Intrinsic::assume ||
5840 !
II->getOperandBundle(
"dereferenceable"))
5845 return std::nullopt;
5851 UserBB = PN->getIncomingBlock(U);
5855 if (UserParent && UserParent != UserBB)
5856 return std::nullopt;
5857 UserParent = UserBB;
5861 if (NumUsers == 0) {
5864 if (UserParent == BB || !
DT.isReachableFromEntry(UserParent))
5865 return std::nullopt;
5877 return std::nullopt;
5879 assert(
DT.dominates(BB, UserParent) &&
"Dominance relation broken?");
5887 return std::nullopt;
5892 auto OptBB = getOptionalSinkBlockForInst(
I);
5894 auto *UserParent = *OptBB;
5902 for (
Use &U :
I->operands())
5910 Builder.CollectMetadataToCopy(
5911 I, {LLVMContext::MD_dbg, LLVMContext::MD_annotation});
5924 <<
" New = " << *Result <<
'\n');
5929 Result->setDebugLoc(Result->getDebugLoc().orElse(
I->getDebugLoc()));
5931 Result->copyMetadata(*
I, LLVMContext::MD_annotation);
5933 I->replaceAllUsesWith(Result);
5936 Result->takeName(
I);
5951 Result->insertInto(InstParent, InsertPos);
5954 Worklist.pushUsersToWorkList(*Result);
5960 <<
" New = " << *
I <<
'\n');
5992 if (!
I->hasMetadataOtherThanDebugLoc())
5995 auto Track = [](
Metadata *ScopeList,
auto &Container) {
5997 if (!MDScopeList || !Container.insert(MDScopeList).second)
5999 for (
const auto &
MDOperand : MDScopeList->operands())
6001 Container.insert(MDScope);
6004 Track(
I->getMetadata(LLVMContext::MD_alias_scope), UsedAliasScopesAndLists);
6005 Track(
I->getMetadata(LLVMContext::MD_noalias), UsedNoAliasScopesAndLists);
6014 "llvm.experimental.noalias.scope.decl in use ?");
6017 "llvm.experimental.noalias.scope should refer to a single scope");
6020 return !UsedAliasScopesAndLists.contains(MD) ||
6021 !UsedNoAliasScopesAndLists.contains(MD);
6045 if (Succ != LiveSucc &&
DeadEdges.insert({BB, Succ}).second)
6046 for (
PHINode &PN : Succ->phis())
6047 for (
Use &U : PN.incoming_values())
6056 return DeadEdges.contains({Pred, BB}) ||
DT.dominates(BB, Pred);
6058 HandleOnlyLiveSuccessor(BB,
nullptr);
6065 if (!Inst.use_empty() &&
6066 (Inst.getNumOperands() == 0 ||
isa<Constant>(Inst.getOperand(0))))
6070 Inst.replaceAllUsesWith(
C);
6073 Inst.eraseFromParent();
6079 for (
Use &U : Inst.operands()) {
6084 Constant *&FoldRes = FoldedConstants[
C];
6090 <<
"\n Old = " << *
C
6091 <<
"\n New = " << *FoldRes <<
'\n');
6100 if (!Inst.isDebugOrPseudoInst()) {
6101 InstrsForInstructionWorklist.
push_back(&Inst);
6102 SeenAliasScopes.
analyse(&Inst);
6112 HandleOnlyLiveSuccessor(BB,
nullptr);
6116 bool CondVal =
Cond->getZExtValue();
6117 HandleOnlyLiveSuccessor(BB, BI->getSuccessor(!CondVal));
6123 HandleOnlyLiveSuccessor(BB,
nullptr);
6127 HandleOnlyLiveSuccessor(BB,
6128 SI->findCaseValue(
Cond)->getCaseSuccessor());
6138 if (LiveBlocks.
count(&BB))
6141 unsigned NumDeadInstInBB;
6145 NumDeadInst += NumDeadInstInBB;
6162 Inst->eraseFromParent();
6177 Visited[BB->getNumber()] =
true;
6179 if (Visited[Succ->getNumber()])
6191 auto &
DL =
F.getDataLayout();
6193 !
F.hasFnAttribute(
"instcombine-no-verify-fixpoint");
6209 bool MadeIRChange =
false;
6214 unsigned Iteration = 0;
6218 <<
" on " <<
F.getName()
6219 <<
" reached; stopping without verifying fixpoint\n");
6224 ++NumWorklistIterations;
6225 LLVM_DEBUG(
dbgs() <<
"\n\nINSTCOMBINE ITERATION #" << Iteration <<
" on "
6226 <<
F.getName() <<
"\n");
6228 InstCombinerImpl IC(Worklist, Builder,
F,
AA, AC, TLI,
TTI, DT, ORE, BFI,
6229 BPI, PSI,
DL, RPOT);
6232 MadeChangeInThisIteration |= IC.
run();
6233 if (!MadeChangeInThisIteration)
6236 MadeIRChange =
true;
6239 "Instruction Combining on " +
Twine(
F.getName()) +
6242 "Use 'instcombine<no-verify-fixpoint>' or function attribute "
6243 "'instcombine-no-verify-fixpoint' to suppress this error.");
6249 else if (Iteration == 2)
6251 else if (Iteration == 3)
6252 ++NumThreeIterations;
6254 ++NumFourOrMoreIterations;
6256 return MadeIRChange;
6264 OS, MapClassName2PassName);
6266 OS <<
"max-iterations=" << Options.MaxIterations <<
";";
6267 OS << (Options.VerifyFixpoint ?
"" :
"no-") <<
"verify-fixpoint";
6271char InstCombinePass::ID = 0;
6277 if (LRT.shouldSkip(&ID))
6290 auto *BFI = (PSI && PSI->hasProfileSummary()) ?
6295 BFI, BPI, PSI, Options)) {
6297 LRT.update(&ID,
false);
6303 LRT.update(&ID,
true);
6345 if (
auto *WrapperPass =
6347 BPI = &WrapperPass->getBPI();
6358 "Combine redundant instructions",
false,
false)
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
AMDGPU Register Bank Select
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This is the interface for LLVM's primary stateless and local alias analysis.
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This file provides an implementation of debug counters.
#define DEBUG_COUNTER(VARNAME, COUNTERNAME, DESC)
This file defines the DenseMap class.
static bool isSigned(unsigned Opcode)
This is the interface for a simple mod/ref and alias analysis over globals.
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
This header defines various interfaces for pass management in LLVM.
This defines the Use class.
iv Induction Variable Users
static bool rightDistributesOverLeft(Instruction::BinaryOps LOp, bool HasNUW, bool HasNSW, Intrinsic::ID ROp)
Return whether "(X ROp Y) LOp Z" is always equal to "(X LOp Z) ROp (Y LOp Z)".
static bool leftDistributesOverRight(Instruction::BinaryOps LOp, bool HasNUW, bool HasNSW, Intrinsic::ID ROp)
Return whether "X LOp (Y ROp Z)" is always equal to "(X LOp Y) ROp (X LOp Z)".
This file provides internal interfaces used to implement the InstCombine.
This file provides the primary interface to the instcombine pass.
static Value * simplifySwitchOnSelectUsingRanges(SwitchInst &SI, SelectInst *Select, bool IsTrueArm)
static bool isUsedWithinShuffleVector(Value *V)
static bool isNeverEqualToUnescapedAlloc(Value *V, const TargetLibraryInfo &TLI, Instruction *AI)
static Constant * constantFoldBinOpWithSplat(unsigned Opcode, Constant *Vector, Constant *Splat, bool SplatLHS, const DataLayout &DL)
static bool shorter_filter(const Value *LHS, const Value *RHS)
static Instruction * combineConstantOffsets(GetElementPtrInst &GEP, InstCombinerImpl &IC)
Combine constant offsets separated by variable offsets.
static Instruction * foldSelectGEP(GetElementPtrInst &GEP, InstCombiner::BuilderTy &Builder)
Thread a GEP operation with constant indices through the constant true/false arms of a select.
static bool shouldMergeGEPs(GEPOperator &GEP, GEPOperator &Src)
static cl::opt< unsigned > MaxArraySize("instcombine-maxarray-size", cl::init(1024), cl::desc("Maximum array size considered when doing a combine"))
static Instruction * foldSpliceBinOp(BinaryOperator &Inst, InstCombiner::BuilderTy &Builder)
static cl::opt< unsigned > ShouldLowerDbgDeclare("instcombine-lower-dbg-declare", cl::Hidden, cl::init(true))
static bool hasNoSignedWrap(BinaryOperator &I)
static bool simplifyAssocCastAssoc(BinaryOperator *BinOp1, InstCombinerImpl &IC)
Combine constant operands of associative operations either before or after a cast to eliminate one of...
static bool combineInstructionsOverFunction(Function &F, InstructionWorklist &Worklist, AliasAnalysis *AA, AssumptionCache &AC, TargetLibraryInfo &TLI, TargetTransformInfo &TTI, DominatorTree &DT, OptimizationRemarkEmitter &ORE, BlockFrequencyInfo *BFI, BranchProbabilityInfo *BPI, ProfileSummaryInfo *PSI, const InstCombineOptions &Opts)
static Value * simplifyInstructionWithPHI(Instruction &I, PHINode *PN, Value *InValue, BasicBlock *InBB, const DataLayout &DL, const SimplifyQuery SQ)
static bool shouldCanonicalizeGEPToPtrAdd(GetElementPtrInst &GEP)
Return true if we should canonicalize the gep to an i8 ptradd.
static void ClearSubclassDataAfterReassociation(BinaryOperator &I)
Conservatively clears subclassOptionalData after a reassociation or commutation.
static Value * getIdentityValue(Instruction::BinaryOps Opcode, Value *V)
This function returns identity value for given opcode, which can be used to factor patterns like (X *...
static Value * foldFrexpOfSelect(ExtractValueInst &EV, IntrinsicInst *FrexpCall, SelectInst *SelectInst, InstCombiner::BuilderTy &Builder)
static std::optional< std::pair< Value *, Value * > > matchSymmetricPhiNodesPair(PHINode *LHS, PHINode *RHS)
static std::optional< ModRefInfo > isAllocSiteRemovable(Instruction *AI, SmallVectorImpl< Instruction * > &Users, const TargetLibraryInfo &TLI, bool KnowInit)
static cl::opt< unsigned > MaxAllocSiteRemovableUsers("instcombine-max-allocsite-removable-users", cl::Hidden, cl::init(2048), cl::desc("Maximum number of users to visit in alloc-site " "removability analysis"))
static Value * foldOperationIntoSelectOperand(Instruction &I, SelectInst *SI, Value *NewOp, InstCombiner &IC)
static Instruction * canonicalizeGEPOfConstGEPI8(GetElementPtrInst &GEP, GEPOperator *Src, InstCombinerImpl &IC)
static Instruction * tryToMoveFreeBeforeNullTest(CallInst &FI, const DataLayout &DL)
Move the call to free before a NULL test.
static Value * simplifyOperationIntoSelectOperand(Instruction &I, SelectInst *SI, bool IsTrueArm)
static Value * tryFactorization(BinaryOperator &I, const SimplifyQuery &SQ, InstCombiner::BuilderTy &Builder, Instruction::BinaryOps InnerOpcode, Value *A, Value *B, Value *C, Value *D)
This tries to simplify binary operations by factorizing out common terms (e.
static bool isRemovableWrite(CallBase &CB, Value *UsedV, const TargetLibraryInfo &TLI)
Given a call CB which uses an address UsedV, return true if we can prove the call's only possible eff...
static Instruction::BinaryOps getBinOpsForFactorization(Instruction::BinaryOps TopOpcode, BinaryOperator *Op, Value *&LHS, Value *&RHS, BinaryOperator *OtherOp)
This function predicates factorization using distributive laws.
static bool hasNoUnsignedWrap(BinaryOperator &I)
static bool SoleWriteToDeadLocal(Instruction *I, TargetLibraryInfo &TLI)
Check for case where the call writes to an otherwise dead alloca.
static cl::opt< unsigned > MaxSinkNumUsers("instcombine-max-sink-users", cl::init(32), cl::desc("Maximum number of undroppable users for instruction sinking"))
static Instruction * foldGEPOfPhi(GetElementPtrInst &GEP, PHINode *PN, IRBuilderBase &Builder)
static bool isCatchAll(EHPersonality Personality, Constant *TypeInfo)
Return 'true' if the given typeinfo will match anything.
static cl::opt< bool > EnableCodeSinking("instcombine-code-sinking", cl::desc("Enable code sinking"), cl::init(true))
static bool maintainNoSignedWrap(BinaryOperator &I, Value *B, Value *C)
static GEPNoWrapFlags getMergedGEPNoWrapFlags(GEPOperator &GEP1, GEPOperator &GEP2)
Determine nowrap flags for (gep (gep p, x), y) to (gep p, (x + y)) transform.
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
static bool IsSelect(MachineInstr &MI)
#define INITIALIZE_PASS_DEPENDENCY(depName)
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
const SmallVectorImpl< MachineOperand > & Cond
static unsigned getNumElements(Type *Ty)
BaseType
A given derived pointer can have multiple base pointers through phi/selects.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static SymbolRef::Type getType(const Symbol *Sym)
static const uint32_t IV[8]
bool isNoAliasScopeDeclDead(Instruction *Inst)
void analyse(Instruction *I)
A manager for alias analyses.
A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object.
static constexpr roundingMode rmNearestTiesToEven
static LLVM_ABI unsigned int semanticsPrecision(const fltSemantics &)
Class for arbitrary precision integers.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
static LLVM_ABI void udivrem(const APInt &LHS, const APInt &RHS, APInt &Quotient, APInt &Remainder)
Dual division/remainder interface.
bool isMinSignedValue() const
Determine if this is the smallest signed value.
static LLVM_ABI void sdivrem(const APInt &LHS, const APInt &RHS, APInt &Quotient, APInt &Remainder)
LLVM_ABI APInt trunc(unsigned width) const
Truncate to new width.
bool isAllOnes() const
Determine if all bits are set. This is true for zero-width values.
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
unsigned getBitWidth() const
Return the number of bits in the APInt.
LLVM_ABI APInt sadd_ov(const APInt &RHS, bool &Overflow) const
APInt ashr(unsigned ShiftAmt) const
Arithmetic right-shift function.
LLVM_ABI APInt smul_ov(const APInt &RHS, bool &Overflow) const
bool isMaxSignedValue() const
Determine if this is the largest signed value.
bool isNonNegative() const
Determine if this APInt Value is non-negative (>= 0)
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
LLVM_ABI APInt ssub_ov(const APInt &RHS, bool &Overflow) const
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
PassT::Result * getCachedResult(IRUnitT &IR) const
Get the cached result of an analysis pass for a given IR unit.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
LLVM_ABI void setPreservesCFG()
This function should be called by the pass, iff they do not:
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
ArrayRef< T > take_front(size_t N=1) const
Return a copy of *this with only the first N elements.
size_t size() const
size - Get the array size.
Class to represent array types.
static LLVM_ABI ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
uint64_t getNumElements() const
Type * getElementType() const
A function analysis which provides an AssumptionCache.
An immutable pass that tracks lazily created AssumptionCache objects.
A cache of @llvm.assume calls within a function.
LLVM_ABI void registerAssumption(AssumeInst *CI)
Add an @llvm.assume intrinsic to this function's cache.
Functions, function parameters, and return types can have attributes to indicate how they should be t...
LLVM_ABI uint64_t getDereferenceableBytes() const
Returns the number of dereferenceable bytes from the dereferenceable attribute.
bool isValid() const
Return true if the attribute is any kind of attribute.
Legacy wrapper pass to provide the BasicAAResult object.
LLVM Basic Block Representation.
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
LLVM_ABI const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
LLVM_ABI InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
LLVM_ABI bool isEntryBlock() const
Return true if this is the entry block of the containing function.
LLVM_ABI const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
const Instruction & front() const
LLVM_ABI const BasicBlock * getUniquePredecessor() const
Return the predecessor of this block if it has a unique predecessor block.
InstListType::iterator iterator
Instruction iterators...
LLVM_ABI const_iterator getFirstNonPHIOrDbgOrAlloca() const
Returns an iterator to the first instruction in this block that is not a PHINode, a debug intrinsic,...
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction; assumes that the block is well-formed.
static LLVM_ABI BinaryOperator * CreateNeg(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Helper functions to construct and inspect unary operations (NEG and NOT) via binary operators SUB and...
BinaryOps getOpcode() const
static LLVM_ABI BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name=Twine(), InsertPosition InsertBefore=nullptr)
Construct a binary instruction, given the opcode and the two operands.
static BinaryOperator * CreateNUW(BinaryOps Opc, Value *V1, Value *V2, const Twine &Name="")
Analysis pass which computes BlockFrequencyInfo.
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Analysis pass which computes BranchProbabilityInfo.
Analysis providing branch probability information.
Represents analyses that only rely on functions' control flow.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
void setAttributes(AttributeList A)
Set the attributes for this call.
bool doesNotThrow() const
Determine if the call cannot unwind.
Value * getArgOperand(unsigned i) const
AttributeList getAttributes() const
Return the attributes for this call.
This class represents a function call, abstracting a target machine's calling convention.
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static LLVM_ABI CastInst * Create(Instruction::CastOps, Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Provides a way to construct any of the CastInst subclasses using an opcode instead of the subclass's ...
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ ICMP_UGT
unsigned greater than
@ ICMP_ULT
unsigned less than
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...
Conditional Branch instruction.
LLVM_ABI void swapSuccessors()
Swap the successors of this branch instruction.
Value * getCondition() const
BasicBlock * getSuccessor(unsigned i) const
ConstantArray - Constant Array Declarations.
static LLVM_ABI Constant * get(ArrayType *T, ArrayRef< Constant * > V)
A vector constant whose element type is a simple 1/2/4/8-byte integer or float/double,...
static LLVM_ABI Constant * getSub(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
static LLVM_ABI Constant * getNot(Constant *C)
static LLVM_ABI Constant * getAdd(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
static LLVM_ABI Constant * getBinOpIdentity(unsigned Opcode, Type *Ty, bool AllowRHSConstant=false, bool NSZ=false)
Return the identity constant for a binary opcode.
static LLVM_ABI Constant * getNeg(Constant *C, bool HasNSW=false)
This is the shared class of boolean and integer constants.
static LLVM_ABI ConstantInt * getTrue(LLVMContext &Context)
static LLVM_ABI ConstantInt * getFalse(LLVMContext &Context)
static LLVM_ABI ConstantInt * getBool(LLVMContext &Context, bool V)
This class represents a range of values.
LLVM_ABI bool getEquivalentICmp(CmpInst::Predicate &Pred, APInt &RHS) const
Set up Pred and RHS such that ConstantRange::makeExactICmpRegion(Pred, RHS) == *this.
static LLVM_ABI ConstantRange makeExactICmpRegion(CmpInst::Predicate Pred, const APInt &Other)
Produce the exact range such that all values in the returned range satisfy the given predicate with a...
LLVM_ABI bool contains(const APInt &Val) const
Return true if the specified value is in the set.
static LLVM_ABI ConstantRange makeExactNoWrapRegion(Instruction::BinaryOps BinOp, const APInt &Other, unsigned NoWrapKind)
Produce the range that contains X if and only if "X BinOp Other" does not wrap.
Constant Vector Declarations.
static LLVM_ABI Constant * getSplat(ElementCount EC, Constant *Elt)
Return a ConstantVector with the specified constant in each element.
static LLVM_ABI Constant * get(ArrayRef< Constant * > V)
This is an important base class in LLVM.
static LLVM_ABI Constant * replaceUndefsWith(Constant *C, Constant *Replacement)
Try to replace undefined constant C or undefined elements in C with Replacement.
static LLVM_ABI Constant * getAllOnesValue(Type *Ty)
const Constant * stripPointerCasts() const
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
LLVM_ABI Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
LLVM_ABI bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
static LLVM_ABI DIExpression * appendOpsToArg(const DIExpression *Expr, ArrayRef< uint64_t > Ops, unsigned ArgNo, bool StackValue=false)
Create a copy of Expr by appending the given list of Ops to each instance of the operand DW_OP_LLVM_a...
A parsed version of the target data layout string in and methods for querying it.
Record of a variable value-assignment, aka a non instruction representation of the dbg....
static bool shouldExecute(CounterInfo &Counter)
Identifies a unique instance of a variable.
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
iterator find(const_arg_type_t< KeyT > Val)
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Analysis pass which computes a DominatorTree.
Legacy analysis pass which computes a DominatorTree.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Utility class for floating point operations which can have information about relaxed accuracy require...
Convenience struct for specifying and reasoning about fast-math flags.
This class represents a freeze function that returns random concrete value if an operand is either a ...
FunctionPass class - This class is used to implement most global optimizations.
bool skipFunction(const Function &F) const
Optional passes call this function to check whether the pass should be skipped.
const BasicBlock & getEntryBlock() const
Represents flags for the getelementptr instruction/expression.
static GEPNoWrapFlags inBounds()
static GEPNoWrapFlags all()
static GEPNoWrapFlags noUnsignedWrap()
GEPNoWrapFlags intersectForReassociate(GEPNoWrapFlags Other) const
Given (gep (gep p, x), y), determine the nowrap flags for (gep (gep, p, y), x).
bool hasNoUnsignedWrap() const
GEPNoWrapFlags intersectForOffsetAdd(GEPNoWrapFlags Other) const
Given (gep (gep p, x), y), determine the nowrap flags for (gep p, x+y).
static GEPNoWrapFlags none()
GEPNoWrapFlags getNoWrapFlags() const
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
static LLVM_ABI Type * getTypeAtIndex(Type *Ty, Value *Idx)
Return the type of the element at the given index of an indexable type.
static GetElementPtrInst * Create(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static LLVM_ABI Type * getIndexedType(Type *Ty, ArrayRef< Value * > IdxList)
Returns the result type of a getelementptr with the given source element type and indexes.
static GetElementPtrInst * CreateInBounds(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Create an "inbounds" getelementptr.
Legacy wrapper pass to provide the GlobalsAAResult object.
This instruction compares its operands according to the predicate given to the constructor.
CmpPredicate getCmpPredicate() const
static bool isEquality(Predicate P)
Return true if this predicate is either EQ or NE.
Common base class shared among various IRBuilders.
Value * CreatePtrAdd(Value *Ptr, Value *Offset, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
ConstantInt * getInt(const APInt &AI)
Get a constant integer value.
Provides an 'InsertHelper' that calls a user-provided callback after performing the default insertion...
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
This instruction inserts a struct field of array element value into an aggregate value.
static InsertValueInst * Create(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
LLVM_ABI InstCombinePass(InstCombineOptions Opts={})
LLVM_ABI void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
LLVM_ABI PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
Instruction * foldBinOpOfSelectAndCastOfSelectCondition(BinaryOperator &I)
Tries to simplify binops of select and cast of the select condition.
Instruction * visitCondBrInst(CondBrInst &BI)
Instruction * foldBinOpIntoSelectOrPhi(BinaryOperator &I)
This is a convenience wrapper function for the above two functions.
bool SimplifyAssociativeOrCommutative(BinaryOperator &I)
Performs a few simplifications for operators which are associative or commutative.
Instruction * visitGEPOfGEP(GetElementPtrInst &GEP, GEPOperator *Src)
Value * foldUsingDistributiveLaws(BinaryOperator &I)
Tries to simplify binary operations which some other binary operation distributes over.
Instruction * foldBinOpShiftWithShift(BinaryOperator &I)
Instruction * visitUnreachableInst(UnreachableInst &I)
Instruction * foldOpIntoPhi(Instruction &I, PHINode *PN, bool AllowMultipleUses=false)
Given a binary operator, cast instruction, or select which has a PHI node as operand #0,...
void handleUnreachableFrom(Instruction *I, SmallVectorImpl< BasicBlock * > &Worklist)
Value * SimplifyDemandedVectorElts(Value *V, APInt DemandedElts, APInt &PoisonElts, unsigned Depth=0, bool AllowMultipleUsers=false) override
The specified value produces a vector with any number of elements.
Instruction * visitFreeze(FreezeInst &I)
Instruction * foldBinOpSelectBinOp(BinaryOperator &Op)
In some cases it is beneficial to fold a select into a binary operator.
void handlePotentiallyDeadBlocks(SmallVectorImpl< BasicBlock * > &Worklist)
bool prepareWorklist(Function &F)
Perform early cleanup and prepare the InstCombine worklist.
Instruction * FoldOpIntoSelect(Instruction &Op, SelectInst *SI, bool FoldWithMultiUse=false, bool SimplifyBothArms=false)
Given an instruction with a select as one operand and a constant as the other operand,...
Instruction * visitFree(CallInst &FI, Value *FreedOp)
Instruction * visitExtractValueInst(ExtractValueInst &EV)
void handlePotentiallyDeadSuccessors(BasicBlock *BB, BasicBlock *LiveSucc)
Instruction * foldBinopWithRecurrence(BinaryOperator &BO)
Try to fold binary operators whose operands are simple interleaved recurrences to a single recurrence...
Instruction * eraseInstFromFunction(Instruction &I) override
Combiner aware instruction erasure.
Instruction * visitLandingPadInst(LandingPadInst &LI)
Instruction * visitReturnInst(ReturnInst &RI)
Instruction * visitSwitchInst(SwitchInst &SI)
Instruction * foldBinopWithPhiOperands(BinaryOperator &BO)
For a binary operator with 2 phi operands, try to hoist the binary operation before the phi.
bool SimplifyDemandedFPClass(Instruction *I, unsigned Op, FPClassTest DemandedMask, KnownFPClass &Known, const SimplifyQuery &Q, unsigned Depth=0)
bool mergeStoreIntoSuccessor(StoreInst &SI)
Try to transform: if () { *P = v1; } else { *P = v2 } or: *P = v1; if () { *P = v2; }...
Instruction * tryFoldInstWithCtpopWithNot(Instruction *I)
Instruction * visitUncondBrInst(UncondBrInst &BI)
void CreateNonTerminatorUnreachable(Instruction *InsertAt)
Create and insert the idiom we use to indicate a block is unreachable without having to rewrite the C...
Value * pushFreezeToPreventPoisonFromPropagating(FreezeInst &FI)
bool run()
Run the combiner over the entire worklist until it is empty.
Instruction * foldVectorBinop(BinaryOperator &Inst)
Canonicalize the position of binops relative to shufflevector.
bool removeInstructionsBeforeUnreachable(Instruction &I)
Value * SimplifySelectsFeedingBinaryOp(BinaryOperator &I, Value *LHS, Value *RHS)
void tryToSinkInstructionDbgVariableRecords(Instruction *I, BasicBlock::iterator InsertPos, BasicBlock *SrcBlock, BasicBlock *DestBlock, SmallVectorImpl< DbgVariableRecord * > &DPUsers)
void addDeadEdge(BasicBlock *From, BasicBlock *To, SmallVectorImpl< BasicBlock * > &Worklist)
Constant * unshuffleConstant(ArrayRef< int > ShMask, Constant *C, VectorType *NewCTy)
Find a constant NewC that has property: shuffle(NewC, ShMask) = C Returns nullptr if such a constant ...
Instruction * visitAllocSite(Instruction &FI)
Instruction * visitGetElementPtrInst(GetElementPtrInst &GEP)
Value * tryFactorizationFolds(BinaryOperator &I)
This tries to simplify binary operations by factorizing out common terms (e.
Instruction * foldFreezeIntoRecurrence(FreezeInst &I, PHINode *PN)
bool tryToSinkInstruction(Instruction *I, BasicBlock *DestBlock)
Try to move the specified instruction from its current block into the beginning of DestBlock,...
bool freezeOtherUses(FreezeInst &FI)
void freelyInvertAllUsersOf(Value *V, Value *IgnoredUser=nullptr)
Freely adapt every user of V as-if V was changed to !V.
The core instruction combiner logic.
const DataLayout & getDataLayout() const
IRBuilder< TargetFolder, IRBuilderCallbackInserter > BuilderTy
An IRBuilder that automatically inserts new instructions into the worklist.
bool isFreeToInvert(Value *V, bool WillInvertAllUses, bool &DoesConsume)
Return true if the specified value is free to invert (apply ~ to).
static unsigned getComplexity(Value *V)
Assign a complexity or rank value to LLVM Values.
unsigned ComputeNumSignBits(const Value *Op, const Instruction *CxtI=nullptr, unsigned Depth=0) const
Instruction * InsertNewInstBefore(Instruction *New, BasicBlock::iterator Old)
Inserts an instruction New before instruction Old.
Instruction * replaceInstUsesWith(Instruction &I, Value *V)
A combiner-aware RAUW-like routine.
uint64_t MaxArraySizeForCombine
Maximum size of array considered when transforming.
static bool shouldAvoidAbsorbingNotIntoSelect(const SelectInst &SI)
void replaceUse(Use &U, Value *NewValue)
Replace use and add the previously used value to the worklist.
static bool isCanonicalPredicate(CmpPredicate Pred)
Predicate canonicalization reduces the number of patterns that need to be matched by other transforms...
InstructionWorklist & Worklist
A worklist of the instructions that need to be simplified.
Instruction * InsertNewInstWith(Instruction *New, BasicBlock::iterator Old)
Same as InsertNewInstBefore, but also sets the debug loc.
BranchProbabilityInfo * BPI
ReversePostOrderTraversal< BasicBlock * > & RPOT
void computeKnownBits(const Value *V, KnownBits &Known, const Instruction *CxtI, unsigned Depth=0) const
std::optional< Instruction * > targetInstCombineIntrinsic(IntrinsicInst &II)
void addToWorklist(Instruction *I)
Value * getFreelyInvertedImpl(Value *V, bool WillInvertAllUses, BuilderTy *Builder, bool &DoesConsume, unsigned Depth)
Return nonnull value if V is free to invert under the condition of WillInvertAllUses.
SmallDenseSet< std::pair< const BasicBlock *, const BasicBlock * >, 8 > BackEdges
Backedges, used to avoid pushing instructions across backedges in cases where this may result in infi...
std::optional< Value * > targetSimplifyDemandedVectorEltsIntrinsic(IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts, APInt &UndefElts2, APInt &UndefElts3, std::function< void(Instruction *, unsigned, APInt, APInt &)> SimplifyAndSetOp)
Instruction * replaceOperand(Instruction &I, unsigned OpNum, Value *V)
Replace operand of instruction and add old operand to the worklist.
static Constant * getSafeVectorConstantForBinop(BinaryOperator::BinaryOps Opcode, Constant *In, bool IsRHSConstant)
Some binary operators require special handling to avoid poison and undefined behavior.
SmallDenseSet< std::pair< BasicBlock *, BasicBlock * >, 8 > DeadEdges
Edges that are known to never be taken.
std::optional< Value * > targetSimplifyDemandedUseBitsIntrinsic(IntrinsicInst &II, APInt DemandedMask, KnownBits &Known, bool &KnownBitsComputed)
bool isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const
Value * getFreelyInverted(Value *V, bool WillInvertAllUses, BuilderTy *Builder, bool &DoesConsume)
bool isBackEdge(const BasicBlock *From, const BasicBlock *To)
bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero=false, const Instruction *CxtI=nullptr, unsigned Depth=0)
void visit(Iterator Start, Iterator End)
The legacy pass manager's instcombine pass.
InstructionCombiningPass()
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
bool runOnFunction(Function &F) override
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass.
InstructionWorklist - This is the worklist management logic for InstCombine and other simplification ...
void add(Instruction *I)
Add instruction to the worklist.
LLVM_ABI void dropUBImplyingAttrsAndMetadata(ArrayRef< unsigned > Keep={})
Drop any attributes or metadata that can cause immediate undefined behavior.
static bool isBitwiseLogicOp(unsigned Opcode)
Determine if the Opcode is and/or/xor.
LLVM_ABI void copyIRFlags(const Value *V, bool IncludeWrapFlags=true)
Convenience method to copy supported exact, fast-math, and (optionally) wrapping flags from V to this...
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
LLVM_ABI const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
LLVM_ABI void setAAMetadata(const AAMDNodes &N)
Sets the AA metadata on this instruction from the AAMDNodes structure.
LLVM_ABI bool isAssociative() const LLVM_READONLY
Return true if the instruction is associative:
LLVM_ABI bool isCommutative() const LLVM_READONLY
Return true if the instruction is commutative:
LLVM_ABI void moveBefore(InstListType::iterator InsertPos)
Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...
LLVM_ABI void setFastMathFlags(FastMathFlags FMF)
Convenience function for setting multiple fast-math flags on this instruction, which must be an opera...
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
bool isTerminator() const
LLVM_ABI FastMathFlags getFastMathFlags() const LLVM_READONLY
Convenience function for getting all the fast-math flags, which must be an operator which supports th...
LLVM_ABI bool willReturn() const LLVM_READONLY
Return true if the instruction will return (unwinding is considered as a form of returning control fl...
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
bool isBitwiseLogicOp() const
Return true if this is and/or/xor.
LLVM_ABI void dropPoisonGeneratingFlags()
Drops flags that may cause this instruction to evaluate to poison despite having non-poison inputs.
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
Class to represent integer types.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
A wrapper class for inspecting calls to intrinsic functions.
static InvokeInst * Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, BasicBlock *IfException, ArrayRef< Value * > Args, const Twine &NameStr, InsertPosition InsertBefore=nullptr)
The landingpad instruction holds all of the information necessary to generate correct exception handl...
bool isCleanup() const
Return 'true' if this landingpad instruction is a cleanup.
unsigned getNumClauses() const
Get the number of clauses for this landing pad.
static LLVM_ABI LandingPadInst * Create(Type *RetTy, unsigned NumReservedClauses, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedClauses is a hint for the number of incoming clauses that this landingpad w...
LLVM_ABI void addClause(Constant *ClauseVal)
Add a catch or filter clause to the landing pad.
bool isCatch(unsigned Idx) const
Return 'true' if the clause and index Idx is a catch clause.
bool isFilter(unsigned Idx) const
Return 'true' if the clause and index Idx is a filter clause.
Constant * getClause(unsigned Idx) const
Get the value of the clause at index Idx.
void setCleanup(bool V)
Indicate that this landingpad instruction is a cleanup.
A function/module analysis which provides an empty LastRunTrackingInfo.
This is an alternative analysis pass to BlockFrequencyInfoWrapperPass.
static void getLazyBFIAnalysisUsage(AnalysisUsage &AU)
Helper for client passes to set up the analysis usage on behalf of this pass.
An instruction for reading from memory.
Value * getPointerOperand()
bool isVolatile() const
Return true if this is a load from a volatile memory location.
const MDOperand & getOperand(unsigned I) const
unsigned getNumOperands() const
Return number of MDNode operands.
Tracking metadata reference owned by Metadata.
This is the common base class for memset/memcpy/memmove.
static LLVM_ABI MemoryLocation getForDest(const MemIntrinsic *MI)
Return a location representing the destination of a memory set or transfer.
static ICmpInst::Predicate getPredicate(Intrinsic::ID ID)
Returns the comparison predicate underlying the intrinsic.
A Module instance is used to store all the information related to an LLVM module.
MDNode * getScopeList() const
Utility class for integer operators which may exhibit overflow - Add, Sub, Mul, and Shl.
bool hasNoSignedWrap() const
Test whether this operation is known to never undergo signed overflow, aka the nsw property.
bool hasNoUnsignedWrap() const
Test whether this operation is known to never undergo unsigned overflow, aka the nuw property.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
op_range incoming_values()
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
PassRegistry - This class manages the registration and intitialization of the pass subsystem as appli...
AnalysisType & getAnalysis() const
getAnalysis<AnalysisType>() - This function is used by subclasses to get to the analysis information ...
AnalysisType * getAnalysisIfAvailable() const
getAnalysisIfAvailable<AnalysisType>() - Subclasses use this function to get analysis information tha...
In order to facilitate speculative execution, many instructions do not invoke immediate undefined beh...
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
A set of analyses that are preserved following a run of a transformation pass.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
PreservedAnalyses & preserveSet()
Mark an analysis set as preserved.
PreservedAnalyses & preserve()
Mark an analysis as preserved.
An analysis pass based on the new PM to deliver ProfileSummaryInfo.
An analysis pass based on legacy pass manager to deliver ProfileSummaryInfo.
Analysis providing profile information.
bool hasProfileSummary() const
Returns true if profile summary is available.
A global registry used in conjunction with static constructors to make pluggable components (like tar...
Return a value (possibly void), from a function.
Value * getReturnValue() const
Convenience accessor. Returns null if there is no return value.
This class represents the LLVM 'select' instruction.
const Value * getFalseValue() const
const Value * getCondition() const
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", InsertPosition InsertBefore=nullptr, const Instruction *MDFrom=nullptr)
const Value * getTrueValue() const
bool insert(const value_type &X)
Insert a new element into the SetVector.
This instruction constructs a fixed permutation of two input vectors.
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
A SetVector that performs no allocations if smaller than a certain size.
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void reserve(size_type N)
iterator erase(const_iterator CI)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
typename SuperClass::iterator iterator
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
TargetFolder - Create constants with target dependent folding.
Analysis pass providing the TargetTransformInfo.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
bool has(LibFunc F) const
Tests whether a library function is available.
bool getLibFunc(StringRef funcName, LibFunc &F) const
Searches for a particular function name.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
The instances of the Type class are immutable: once they are created, they are never changed.
bool isVectorTy() const
True if this is an instance of VectorType.
LLVM_ABI bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this is a type whose size is a known multiple of vscale.
bool isPointerTy() const
True if this is an instance of PointerType.
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
bool isStructTy() const
True if this is an instance of StructType.
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
static LLVM_ABI IntegerType * getInt1Ty(LLVMContext &C)
bool isIntegerTy() const
True if this is an instance of IntegerType.
LLVM_ABI const fltSemantics & getFltSemantics() const
Unconditional Branch instruction.
static LLVM_ABI UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
This function has undefined behavior.
A Use represents the edge between a Value definition and its users.
LLVM_ABI bool isDroppable() const
A droppable user is a user for which uses can be dropped without affecting correctness and should be ...
LLVM_ABI bool replaceUsesOfWith(Value *From, Value *To)
Replace uses of one Value with another.
Value * getOperand(unsigned i) const
unsigned getNumOperands() const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
const Value * stripAndAccumulateInBoundsConstantOffsets(const DataLayout &DL, APInt &Offset) const
This is a wrapper around stripAndAccumulateConstantOffsets with the in-bounds requirement set to fals...
LLVM_ABI bool hasOneUser() const
Return true if there is exactly one user of this value.
bool hasOneUse() const
Return true if there is exactly one use of this value.
LLVMContext & getContext() const
All values hold a context through their type.
iterator_range< user_iterator > users()
bool hasUseList() const
Check if this Value has a use-list.
LLVM_ABI bool hasNUses(unsigned N) const
Return true if this Value has exactly N uses.
LLVM_ABI const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
LLVM_ABI uint64_t getPointerDereferenceableBytes(const DataLayout &DL, bool &CanBeNull, bool &CanBeFreed) const
Returns the number of bytes known to be dereferenceable for the pointer value.
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
LLVM_ABI void takeName(Value *V)
Transfer the name from V to this value.
Base class of all SIMD vector types.
ElementCount getElementCount() const
Return an ElementCount instance to represent the (possibly scalable) number of elements in the vector...
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
Value handle that is nullable, but tries to track the Value.
constexpr ScalarTy getFixedValue() const
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
An efficient, type-erasing, non-owning reference to a callable.
TypeSize getSequentialElementStride(const DataLayout &DL) const
Type * getIndexedType() const
const ParentTy * getParent() const
reverse_self_iterator getReverseIterator()
self_iterator getIterator()
This class implements an extremely fast bulk output stream that can only output to a stream.
A raw_ostream that writes to an std::string.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Abstract Attribute helper functions.
@ C
The default llvm calling convention, compatible with C.
LLVM_ABI Function * getOrInsertDeclaration(Module *M, ID id, ArrayRef< Type * > OverloadTys={})
Look up the Function declaration of the intrinsic id in the Module M.
BinaryOp_match< SpecificConstantMatch, SrcTy, TargetOpcode::G_SUB > m_Neg(const SrcTy &&Src)
Matches a register negated by a G_SUB.
BinaryOp_match< SrcTy, SpecificConstantMatch, TargetOpcode::G_XOR, true > m_Not(const SrcTy &&Src)
Matches a register not-ed by a G_XOR.
OneUse_match< SubPat > m_OneUse(const SubPat &SP)
match_combine_or< Ty... > m_CombineOr(const Ty &...Ps)
Combine pattern matchers matching any of Ps patterns.
match_combine_and< Ty... > m_CombineAnd(const Ty &...Ps)
Combine pattern matchers matching all of Ps patterns.
cst_pred_ty< is_all_ones > m_AllOnes()
Match an integer or vector with all bits set.
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
PtrAdd_match< PointerOpTy, OffsetOpTy > m_PtrAdd(const PointerOpTy &PointerOp, const OffsetOpTy &OffsetOp)
Matches GEP with i8 source element type.
BinaryOp_match< LHS, RHS, Instruction::Add > m_Add(const LHS &L, const RHS &R)
CmpClass_match< LHS, RHS, FCmpInst > m_FCmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::AShr > m_AShr(const LHS &L, const RHS &R)
auto m_PtrToIntOrAddr(const OpTy &Op)
Matches PtrToInt or PtrToAddr.
OneOps_match< OpTy, Instruction::Freeze > m_Freeze(const OpTy &Op)
Matches FreezeInst.
auto m_Poison()
Match an arbitrary poison constant.
ap_match< APInt > m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
CastInst_match< OpTy, TruncInst > m_Trunc(const OpTy &Op)
Matches Trunc.
BinaryOp_match< LHS, RHS, Instruction::Xor > m_Xor(const LHS &L, const RHS &R)
br_match m_UnconditionalBr(BasicBlock *&Succ)
ap_match< APInt > m_APIntAllowPoison(const APInt *&Res)
Match APInt while allowing poison in splat vector constants.
specific_intval< false > m_SpecificInt(const APInt &V)
Match a specific integer value or vector with all elements equal to the value.
bool match(Val *V, const Pattern &P)
BinOpPred_match< LHS, RHS, is_idiv_op > m_IDiv(const LHS &L, const RHS &R)
Matches integer division operations.
match_bind< Instruction > m_Instruction(Instruction *&I)
Match an instruction, capturing it if we match.
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
DisjointOr_match< LHS, RHS > m_DisjointOr(const LHS &L, const RHS &R)
constantexpr_match m_ConstantExpr()
Match a constant expression or a constant that contains a constant expression.
BinOpPred_match< LHS, RHS, is_right_shift_op > m_Shr(const LHS &L, const RHS &R)
Matches logical shift operations.
ap_match< APFloat > m_APFloat(const APFloat *&Res)
Match a ConstantFP or splatted ConstantVector, binding the specified pointer to the contained APFloat...
cst_pred_ty< is_nonnegative > m_NonNegative()
Match an integer or vector of non-negative values.
IntrinsicID_match m_Intrinsic()
Match intrinsic calls like this: m_Intrinsic<Intrinsic::fabs>(m_Value(X))
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
auto m_BinOp()
Match an arbitrary binary operation and ignore it.
ExtractValue_match< Ind, Val_t > m_ExtractValue(const Val_t &V)
Match a single index ExtractValue instruction.
match_combine_or< CastInst_match< OpTy, UIToFPInst >, CastInst_match< OpTy, SIToFPInst > > m_IToFP(const OpTy &Op)
auto m_Value()
Match an arbitrary value and ignore it.
BinaryOp_match< LHS, RHS, Instruction::Mul > m_Mul(const LHS &L, const RHS &R)
auto m_Constant()
Match an arbitrary Constant and ignore it.
NNegZExt_match< OpTy > m_NNegZExt(const OpTy &Op)
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
Splat_match< T > m_ConstantSplat(const T &SubPattern)
Match a constant splat. TODO: Extend this to non-constant splats.
TwoOps_match< V1_t, V2_t, Instruction::ShuffleVector > m_Shuffle(const V1_t &v1, const V2_t &v2)
Matches ShuffleVectorInst independently of mask value.
ThreeOps_match< decltype(m_Value()), LHS, RHS, Instruction::Select, true > m_c_Select(const LHS &L, const RHS &R)
Match Select(C, LHS, RHS) or Select(C, RHS, LHS)
SpecificCmpClass_match< LHS, RHS, ICmpInst > m_SpecificICmp(CmpPredicate MatchPred, const LHS &L, const RHS &R)
CastInst_match< OpTy, ZExtInst > m_ZExt(const OpTy &Op)
Matches ZExt.
BinaryOp_match< LHS, RHS, Instruction::UDiv > m_UDiv(const LHS &L, const RHS &R)
match_immconstant_ty m_ImmConstant()
Match an arbitrary immediate Constant and ignore it.
SelectLike_match< CondTy, LTy, RTy > m_SelectLike(const CondTy &C, const LTy &TrueC, const RTy &FalseC)
Matches a value that behaves like a boolean-controlled select, i.e.
auto m_MaxOrMin(const LHS &L, const RHS &R)
match_combine_or< BinaryOp_match< LHS, RHS, Instruction::Add >, DisjointOr_match< LHS, RHS > > m_AddLike(const LHS &L, const RHS &R)
Match either "add" or "or disjoint".
CastOperator_match< OpTy, Instruction::BitCast > m_BitCast(const OpTy &Op)
Matches BitCast.
match_combine_or< CastInst_match< OpTy, SExtInst >, NNegZExt_match< OpTy > > m_SExtLike(const OpTy &Op)
Match either "sext" or "zext nneg".
BinaryOp_match< LHS, RHS, Instruction::SDiv > m_SDiv(const LHS &L, const RHS &R)
match_combine_or< OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoSignedWrap >, DisjointOr_match< LHS, RHS > > m_NSWAddLike(const LHS &L, const RHS &R)
Match either "add nsw" or "or disjoint".
AnyBinaryOp_match< LHS, RHS, true > m_c_BinOp(const LHS &L, const RHS &R)
Matches a BinaryOperator with LHS and RHS in either order.
BinaryOp_match< LHS, RHS, Instruction::LShr > m_LShr(const LHS &L, const RHS &R)
CmpClass_match< LHS, RHS, ICmpInst > m_ICmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
match_combine_or< CastInst_match< OpTy, ZExtInst >, CastInst_match< OpTy, SExtInst > > m_ZExtOrSExt(const OpTy &Op)
BinOpPred_match< LHS, RHS, is_shift_op > m_Shift(const LHS &L, const RHS &R)
Matches shift operations.
BinaryOp_match< LHS, RHS, Instruction::Shl > m_Shl(const LHS &L, const RHS &R)
cstfp_pred_ty< is_non_zero_fp > m_NonZeroFP()
Match a floating-point non-zero.
m_Intrinsic_Ty< Opnd0 >::Ty m_VecReverse(const Opnd0 &Op0)
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
brc_match< Cond_t, match_bind< BasicBlock >, match_bind< BasicBlock > > m_Br(const Cond_t &C, BasicBlock *&T, BasicBlock *&F)
BinaryOp_match< LHS, RHS, Instruction::SRem > m_SRem(const LHS &L, const RHS &R)
auto m_Undef()
Match an arbitrary undef constant.
BinaryOp_match< LHS, RHS, Instruction::Or > m_Or(const LHS &L, const RHS &R)
CastInst_match< OpTy, SExtInst > m_SExt(const OpTy &Op)
Matches SExt.
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
match_combine_or< OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoUnsignedWrap >, DisjointOr_match< LHS, RHS > > m_NUWAddLike(const LHS &L, const RHS &R)
Match either "add nuw" or "or disjoint".
m_Intrinsic_Ty< Opnd0, Opnd1, Opnd2 >::Ty m_VectorInsert(const Opnd0 &Op0, const Opnd1 &Op1, const Opnd2 &Op2)
BinaryOp_match< LHS, RHS, Instruction::Sub > m_Sub(const LHS &L, const RHS &R)
auto m_ConstantInt()
Match an arbitrary ConstantInt and ignore it.
initializer< Ty > init(const Ty &Val)
friend class Instruction
Iterator for Instructions in a `BasicBlock.
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
LLVM_ABI Intrinsic::ID getInverseMinMaxIntrinsic(Intrinsic::ID MinMaxID)
detail::zippy< detail::zip_shortest, T, U, Args... > zip(T &&t, U &&u, Args &&...args)
zip iterator for two or more iteratable types.
FunctionAddr VTableAddr Value
void stable_sort(R &&Range)
LLVM_ABI void initializeInstructionCombiningPassPass(PassRegistry &)
cl::opt< bool > ProfcheckDisableMetadataFixes
LLVM_ABI unsigned removeAllNonTerminatorAndEHPadInstructions(BasicBlock *BB)
Remove all instructions from a basic block other than its terminator and any present EH pad instructi...
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI Value * simplifyGEPInst(Type *SrcTy, Value *Ptr, ArrayRef< Value * > Indices, GEPNoWrapFlags NW, const SimplifyQuery &Q)
Given operands for a GetElementPtrInst, fold the result or return null.
LLVM_ABI Constant * getInitialValueOfAllocation(const Value *V, const TargetLibraryInfo *TLI, Type *Ty)
If this is a call to an allocation function that initializes memory to a fixed value,...
bool succ_empty(const Instruction *I)
LLVM_ABI Value * simplifyFreezeInst(Value *Op, const SimplifyQuery &Q)
Given an operand for a Freeze, see if we can fold the result.
LLVM_ABI FunctionPass * createInstructionCombiningPass()
LLVM_ABI void findDbgValues(Value *V, SmallVectorImpl< DbgVariableRecord * > &DbgVariableRecords)
Finds the dbg.values describing a value.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
LLVM_ABI void salvageDebugInfo(const MachineRegisterInfo &MRI, MachineInstr &MI)
Assuming the instruction MI is going to be deleted, attempt to salvage debug users of MI by writing t...
auto successors(const MachineBasicBlock *BB)
LLVM_ABI Constant * ConstantFoldInstruction(const Instruction *I, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldInstruction - Try to constant fold the specified instruction.
LLVM_ABI bool isRemovableAlloc(const CallBase *V, const TargetLibraryInfo *TLI)
Return true if this is a call to an allocation function that does not have side effects that we are r...
LLVM_ABI std::optional< StringRef > getAllocationFamily(const Value *I, const TargetLibraryInfo *TLI)
If a function is part of an allocation family (e.g.
OuterAnalysisManagerProxy< ModuleAnalysisManager, Function > ModuleAnalysisManagerFunctionProxy
Provide the ModuleAnalysisManager to Function proxy.
LLVM_ABI Value * lowerObjectSizeCall(IntrinsicInst *ObjectSize, const DataLayout &DL, const TargetLibraryInfo *TLI, bool MustSucceed)
Try to turn a call to @llvm.objectsize into an integer value of the given Type.
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
LLVM_ABI Value * simplifyInstructionWithOperands(Instruction *I, ArrayRef< Value * > NewOps, const SimplifyQuery &Q)
Like simplifyInstruction but the operands of I are replaced with NewOps.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
LLVM_ABI Constant * ConstantFoldCompareInstOperands(unsigned Predicate, Constant *LHS, Constant *RHS, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const Instruction *I=nullptr)
Attempt to constant fold a compare instruction (icmp/fcmp) with the specified operands.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
gep_type_iterator gep_type_end(const User *GEP)
LLVM_ABI Value * getSplatValue(const Value *V)
Get splat value if the input is a splat vector or return nullptr.
LLVM_ABI Value * getReallocatedOperand(const CallBase *CB)
If this is a call to a realloc function, return the reallocated operand.
APFloat frexp(const APFloat &X, int &Exp, APFloat::roundingMode RM)
Equivalent of C standard library function.
LLVM_ABI bool isAllocLikeFn(const Value *V, const TargetLibraryInfo *TLI)
Tests if a value is a call or invoke to a library function that allocates memory (either malloc,...
LLVM_ABI bool handleUnreachableTerminator(Instruction *I, SmallVectorImpl< Value * > &PoisonedValues)
If a terminator in an unreachable basic block has an operand of type Instruction, transform it into p...
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
LLVM_ABI void setBranchWeights(Instruction &I, ArrayRef< uint32_t > Weights, bool IsExpected, bool ElideAllZero=false)
Create a new branch_weights metadata node and add or overwrite a prof metadata reference to instructi...
LLVM_ABI bool matchSimpleRecurrence(const PHINode *P, BinaryOperator *&BO, Value *&Start, Value *&Step)
Attempt to match a simple first order recurrence cycle of the form: iv = phi Ty [Start,...
LLVM_ABI Value * simplifyAddInst(Value *LHS, Value *RHS, bool IsNSW, bool IsNUW, const SimplifyQuery &Q)
Given operands for an Add, fold the result or return null.
LLVM_ABI Constant * ConstantFoldConstant(const Constant *C, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldConstant - Fold the constant using the specified DataLayout.
auto dyn_cast_or_null(const Y &Val)
constexpr bool has_single_bit(T Value) noexcept
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI bool isInstructionTriviallyDead(Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction is not used, and the instruction will return.
LLVM_ABI bool isSplatValue(const Value *V, int Index=-1, unsigned Depth=0)
Return true if each element of the vector value V is poisoned or equal to every other non-poisoned el...
LLVM_ABI Value * emitGEPOffset(IRBuilderBase *Builder, const DataLayout &DL, User *GEP, bool NoAssumptions=false)
Given a getelementptr instruction/constantexpr, emit the code necessary to compute the offset from th...
constexpr unsigned MaxAnalysisRecursionDepth
auto reverse(ContainerTy &&C)
bool isModSet(const ModRefInfo MRI)
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
LLVM_ABI bool LowerDbgDeclare(Function &F)
Lowers dbg.declare records into appropriate set of dbg.value records.
LLVM_ABI bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
generic_gep_type_iterator<> gep_type_iterator
LLVM_ABI void ConvertDebugDeclareToDebugValue(DbgVariableRecord *DVR, StoreInst *SI, DIBuilder &Builder)
Inserts a dbg.value record before a store to an alloca'd value that has an associated dbg....
LLVM_ABI void salvageDebugInfoForDbgValues(Instruction &I, ArrayRef< DbgVariableRecord * > DPInsns)
Implementation of salvageDebugInfo, applying only to instructions in Insns, rather than all debug use...
LLVM_ABI Constant * ConstantFoldCastOperand(unsigned Opcode, Constant *C, Type *DestTy, const DataLayout &DL)
Attempt to constant fold a cast with the specified operand.
LLVM_ABI bool canCreateUndefOrPoison(const Operator *Op, bool ConsiderFlagsAndMetadata=true)
canCreateUndefOrPoison returns true if Op can create undef or poison from non-undef & non-poison oper...
LLVM_ABI EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
LLVM_ABI Value * simplifyExtractValueInst(Value *Agg, ArrayRef< unsigned > Idxs, const SimplifyQuery &Q)
Given operands for an ExtractValueInst, fold the result or return null.
LLVM_ABI Constant * ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS, Constant *RHS, const DataLayout &DL)
Attempt to constant fold a binary operation with the specified operands.
LLVM_ABI bool replaceAllDbgUsesWith(Instruction &From, Value &To, Instruction &DomPoint, DominatorTree &DT)
Point debug users of From to To or salvage them.
LLVM_ABI bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
constexpr int PoisonMaskElem
auto drop_end(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the last N elements excluded.
ModRefInfo
Flags indicating whether a memory access modifies or references memory.
@ Ref
The access may reference the value stored in memory.
@ ModRef
The access may reference and may modify the value stored in memory.
@ Mod
The access may modify the value stored in memory.
@ NoModRef
The access neither references nor modifies the value stored in memory.
LLVM_ABI Value * simplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for a BinaryOperator, fold the result or return null.
@ Sub
Subtraction of integers.
DWARFExpression::Operation Op
bool isSafeToSpeculativelyExecuteWithVariableReplaced(const Instruction *I, bool IgnoreUBImplyingAttrs=true)
Don't use information from its non-constant operands.
LLVM_ABI bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
ArrayRef(const T &OneElt) -> ArrayRef< T >
LLVM_ABI Value * getFreedOperand(const CallBase *CB, const TargetLibraryInfo *TLI)
If this if a call to a free function, return the freed operand.
constexpr unsigned BitWidth
LLVM_ABI bool isGuaranteedToTransferExecutionToSuccessor(const Instruction *I)
Return true if this function can prove that the instruction I will always transfer execution to one o...
LLVM_ABI Constant * getLosslessInvCast(Constant *C, Type *InvCastTo, unsigned CastOp, const DataLayout &DL, PreservedCastFlags *Flags=nullptr)
Try to cast C to InvC losslessly, satisfying CastOp(InvC) equals C, or CastOp(InvC) is a refined valu...
LLVM_ABI bool extractBranchWeights(const MDNode *ProfileData, SmallVectorImpl< uint32_t > &Weights)
Extract branch weights from MD_prof metadata.
auto count_if(R &&Range, UnaryPredicate P)
Wrapper function around std::count_if to count the number of times an element satisfying a given pred...
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
gep_type_iterator gep_type_begin(const User *GEP)
auto predecessors(const MachineBasicBlock *BB)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
bool equal(L &&LRange, R &&RRange)
Wrapper function around std::equal to detect if pair-wise elements between two ranges are the same.
LLVM_ABI const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=MaxLookupSearchDepth)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
AAResults AliasAnalysis
Temporary typedef for legacy code that uses a generic AliasAnalysis pointer or reference.
static auto filterDbgVars(iterator_range< simple_ilist< DbgRecord >::iterator > R)
Filter the DbgRecord range to DbgVariableRecord types only and downcast.
LLVM_ABI void initializeInstCombine(PassRegistry &)
Initialize all passes linked into the InstCombine library.
LLVM_ABI void findDbgUsers(Value *V, SmallVectorImpl< DbgVariableRecord * > &DbgVariableRecords)
Finds the debug info records describing a value.
LLVM_ABI Constant * ConstantFoldBinaryInstruction(unsigned Opcode, Constant *V1, Constant *V2)
bool isRefSet(const ModRefInfo MRI)
LLVM_ABI std::optional< bool > isImpliedCondition(const Value *LHS, const Value *RHS, const DataLayout &DL, bool LHSIsTrue=true, unsigned Depth=0)
Return true if RHS is known to be implied true by LHS.
LLVM_ABI void reportFatalUsageError(Error Err)
Report a fatal error that does not indicate a bug in LLVM.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
unsigned countMinLeadingOnes() const
Returns the minimum number of leading one bits.
unsigned getBitWidth() const
Get the bit width of this value.
unsigned countMinLeadingZeros() const
Returns the minimum number of leading zero bits.
A CRTP mix-in to automatically provide informational APIs needed for passes.
SimplifyQuery getWithInstruction(const Instruction *I) const