108#define DEBUG_TYPE "instcombine"
116 "Number of instruction combining iterations performed");
117STATISTIC(NumOneIteration,
"Number of functions with one iteration");
118STATISTIC(NumTwoIterations,
"Number of functions with two iterations");
119STATISTIC(NumThreeIterations,
"Number of functions with three iterations");
121 "Number of functions with four or more iterations");
125STATISTIC(NumDeadInst ,
"Number of dead inst eliminated");
131 "Controls which instructions are visited");
138 "instcombine-max-sink-users",
cl::init(32),
139 cl::desc(
"Maximum number of undroppable users for instruction sinking"));
143 cl::desc(
"Maximum array size considered when doing a combine"));
155std::optional<Instruction *>
158 if (
II.getCalledFunction()->isTargetIntrinsic()) {
166 bool &KnownBitsComputed) {
168 if (
II.getCalledFunction()->isTargetIntrinsic()) {
170 *
this,
II, DemandedMask, Known, KnownBitsComputed);
181 if (
II.getCalledFunction()->isTargetIntrinsic()) {
183 *
this,
II, DemandedElts, PoisonElts, PoisonElts2, PoisonElts3,
201 auto *Inst = dyn_cast<Instruction>(
GEP);
208 if (Inst && !
GEP->hasOneUse() && !
GEP->hasAllConstantIndices() &&
209 !
GEP->getSourceElementType()->isIntegerTy(8)) {
223bool InstCombinerImpl::isDesirableIntType(
unsigned BitWidth)
const {
242bool InstCombinerImpl::shouldChangeType(
unsigned FromWidth,
243 unsigned ToWidth)
const {
249 if (ToWidth < FromWidth && isDesirableIntType(ToWidth))
254 if ((FromLegal || isDesirableIntType(FromWidth)) && !ToLegal)
259 if (!FromLegal && !ToLegal && ToWidth > FromWidth)
270bool InstCombinerImpl::shouldChangeType(
Type *
From,
Type *To)
const {
276 unsigned FromWidth =
From->getPrimitiveSizeInBits();
278 return shouldChangeType(FromWidth, ToWidth);
287 auto *OBO = dyn_cast<OverflowingBinaryOperator>(&
I);
288 if (!OBO || !OBO->hasNoSignedWrap())
293 if (Opcode != Instruction::Add && Opcode != Instruction::Sub)
296 const APInt *BVal, *CVal;
300 bool Overflow =
false;
301 if (Opcode == Instruction::Add)
302 (void)BVal->
sadd_ov(*CVal, Overflow);
304 (
void)BVal->
ssub_ov(*CVal, Overflow);
310 auto *OBO = dyn_cast<OverflowingBinaryOperator>(&
I);
311 return OBO && OBO->hasNoUnsignedWrap();
315 auto *OBO = dyn_cast<OverflowingBinaryOperator>(&
I);
316 return OBO && OBO->hasNoSignedWrap();
325 I.clearSubclassOptionalData();
330 I.clearSubclassOptionalData();
331 I.setFastMathFlags(FMF);
340 auto *Cast = dyn_cast<CastInst>(BinOp1->
getOperand(0));
341 if (!Cast || !Cast->hasOneUse())
345 auto CastOpcode = Cast->getOpcode();
346 if (CastOpcode != Instruction::ZExt)
354 auto *BinOp2 = dyn_cast<BinaryOperator>(Cast->getOperand(0));
355 if (!BinOp2 || !BinOp2->hasOneUse() || BinOp2->getOpcode() != AssocOpcode)
381 Cast->dropPoisonGeneratingFlags();
387Value *InstCombinerImpl::simplifyIntToPtrRoundTripCast(
Value *Val) {
388 auto *IntToPtr = dyn_cast<IntToPtrInst>(Val);
391 auto *PtrToInt = dyn_cast<PtrToIntInst>(IntToPtr->getOperand(0));
392 Type *CastTy = IntToPtr->getDestTy();
395 PtrToInt->getSrcTy()->getPointerAddressSpace() &&
398 return PtrToInt->getOperand(0);
425 bool Changed =
false;
433 Changed = !
I.swapOperands();
435 if (
I.isCommutative()) {
436 if (
auto Pair = matchSymmetricPair(
I.getOperand(0),
I.getOperand(1))) {
446 if (
I.isAssociative()) {
469 I.setHasNoUnsignedWrap(
true);
472 I.setHasNoSignedWrap(
true);
501 if (
I.isAssociative() &&
I.isCommutative()) {
564 if (isa<FPMathOperator>(NewBO)) {
578 I.setHasNoUnsignedWrap(
true);
596 if (LOp == Instruction::And)
597 return ROp == Instruction::Or || ROp == Instruction::Xor;
600 if (LOp == Instruction::Or)
601 return ROp == Instruction::And;
605 if (LOp == Instruction::Mul)
606 return ROp == Instruction::Add || ROp == Instruction::Sub;
629 if (isa<Constant>(V))
643 assert(
Op &&
"Expected a binary operator");
644 LHS =
Op->getOperand(0);
645 RHS =
Op->getOperand(1);
646 if (TopOpcode == Instruction::Add || TopOpcode == Instruction::Sub) {
651 Instruction::Shl, ConstantInt::get(
Op->getType(), 1),
C);
652 assert(
RHS &&
"Constant folding of immediate constants failed");
653 return Instruction::Mul;
658 if (OtherOp && OtherOp->
getOpcode() == Instruction::AShr &&
661 return Instruction::AShr;
664 return Op->getOpcode();
673 assert(
A &&
B &&
C &&
D &&
"All values must be provided");
676 Value *RetVal =
nullptr;
687 if (
A ==
C || (InnerCommutative &&
A ==
D)) {
707 if (
B ==
D || (InnerCommutative &&
B ==
C)) {
730 if (isa<OverflowingBinaryOperator>(RetVal)) {
733 if (isa<OverflowingBinaryOperator>(&
I)) {
734 HasNSW =
I.hasNoSignedWrap();
735 HasNUW =
I.hasNoUnsignedWrap();
737 if (
auto *LOBO = dyn_cast<OverflowingBinaryOperator>(
LHS)) {
738 HasNSW &= LOBO->hasNoSignedWrap();
739 HasNUW &= LOBO->hasNoUnsignedWrap();
742 if (
auto *ROBO = dyn_cast<OverflowingBinaryOperator>(
RHS)) {
743 HasNSW &= ROBO->hasNoSignedWrap();
744 HasNUW &= ROBO->hasNoUnsignedWrap();
747 if (TopLevelOpcode == Instruction::Add && InnerOpcode == Instruction::Mul) {
757 cast<Instruction>(RetVal)->setHasNoSignedWrap(HasNSW);
760 cast<Instruction>(RetVal)->setHasNoUnsignedWrap(HasNUW);
775 unsigned Opc =
I->getOpcode();
776 unsigned ConstIdx = 1;
783 case Instruction::Sub:
786 case Instruction::ICmp:
793 case Instruction::Or:
797 case Instruction::Add:
803 if (!
match(
I->getOperand(1 - ConstIdx),
816 if (Opc == Instruction::ICmp && !cast<ICmpInst>(
I)->isEquality()) {
819 if (!Cmp || !Cmp->isZeroValue())
824 bool Consumes =
false;
828 assert(NotOp !=
nullptr &&
829 "Desync between isFreeToInvert and getFreelyInverted");
838 case Instruction::Sub:
841 case Instruction::Or:
842 case Instruction::Add:
845 case Instruction::ICmp:
881 auto IsValidBinOpc = [](
unsigned Opc) {
885 case Instruction::And:
886 case Instruction::Or:
887 case Instruction::Xor:
888 case Instruction::Add:
897 auto IsCompletelyDistributable = [](
unsigned BinOpc1,
unsigned BinOpc2,
899 assert(ShOpc != Instruction::AShr);
900 return (BinOpc1 != Instruction::Add && BinOpc2 != Instruction::Add) ||
901 ShOpc == Instruction::Shl;
904 auto GetInvShift = [](
unsigned ShOpc) {
905 assert(ShOpc != Instruction::AShr);
906 return ShOpc == Instruction::LShr ? Instruction::Shl : Instruction::LShr;
909 auto CanDistributeBinops = [&](
unsigned BinOpc1,
unsigned BinOpc2,
913 if (BinOpc1 == Instruction::And)
918 if (!IsCompletelyDistributable(BinOpc1, BinOpc2, ShOpc))
924 if (BinOpc2 == Instruction::And)
935 auto MatchBinOp = [&](
unsigned ShOpnum) ->
Instruction * {
937 Value *
X, *
Y, *ShiftedX, *Mask, *Shift;
938 if (!
match(
I.getOperand(ShOpnum),
941 if (!
match(
I.getOperand(1 - ShOpnum),
949 auto *IY = dyn_cast<Instruction>(
I.getOperand(ShOpnum));
950 auto *IX = dyn_cast<Instruction>(ShiftedX);
955 unsigned ShOpc = IY->getOpcode();
956 if (ShOpc != IX->getOpcode())
960 auto *BO2 = dyn_cast<Instruction>(
I.getOperand(1 - ShOpnum));
964 unsigned BinOpc = BO2->getOpcode();
966 if (!IsValidBinOpc(
I.getOpcode()) || !IsValidBinOpc(BinOpc))
969 if (ShOpc == Instruction::AShr) {
983 if (BinOpc ==
I.getOpcode() &&
984 IsCompletelyDistributable(
I.getOpcode(), BinOpc, ShOpc)) {
999 if (!CanDistributeBinops(
I.getOpcode(), BinOpc, ShOpc, CMask, CShift))
1013 return MatchBinOp(1);
1031 Value *
A, *CondVal, *TrueVal, *FalseVal;
1034 auto MatchSelectAndCast = [&](
Value *CastOp,
Value *SelectOp) {
1036 A->getType()->getScalarSizeInBits() == 1 &&
1043 if (MatchSelectAndCast(
LHS,
RHS))
1045 else if (MatchSelectAndCast(
RHS,
LHS))
1050 auto NewFoldedConst = [&](
bool IsTrueArm,
Value *V) {
1051 bool IsCastOpRHS = (CastOp ==
RHS);
1052 bool IsZExt = isa<ZExtInst>(CastOp);
1057 }
else if (IsZExt) {
1058 unsigned BitWidth = V->getType()->getScalarSizeInBits();
1071 Value *NewTrueVal = NewFoldedConst(
false, TrueVal);
1073 NewFoldedConst(
true, FalseVal));
1077 Value *NewTrueVal = NewFoldedConst(
true, TrueVal);
1079 NewFoldedConst(
false, FalseVal));
1100 if (Op0 && Op1 && LHSOpcode == RHSOpcode)
1220static std::optional<std::pair<Value *, Value *>>
1222 if (
LHS->getParent() !=
RHS->getParent())
1223 return std::nullopt;
1225 if (
LHS->getNumIncomingValues() < 2)
1226 return std::nullopt;
1229 return std::nullopt;
1231 Value *L0 =
LHS->getIncomingValue(0);
1232 Value *R0 =
RHS->getIncomingValue(0);
1234 for (
unsigned I = 1, E =
LHS->getNumIncomingValues();
I != E; ++
I) {
1238 if ((L0 == L1 && R0 == R1) || (L0 == R1 && R0 == L1))
1241 return std::nullopt;
1244 return std::optional(std::pair(L0, R0));
1247std::optional<std::pair<Value *, Value *>>
1248InstCombinerImpl::matchSymmetricPair(
Value *LHS,
Value *RHS) {
1249 Instruction *LHSInst = dyn_cast<Instruction>(LHS);
1250 Instruction *RHSInst = dyn_cast<Instruction>(RHS);
1252 return std::nullopt;
1254 case Instruction::PHI:
1256 case Instruction::Select: {
1262 return std::pair(TrueVal, FalseVal);
1263 return std::nullopt;
1265 case Instruction::Call: {
1269 if (LHSMinMax && RHSMinMax &&
1276 return std::pair(LHSMinMax->
getLHS(), LHSMinMax->
getRHS());
1277 return std::nullopt;
1280 return std::nullopt;
1290 if (!LHSIsSelect && !RHSIsSelect)
1295 if (isa<FPMathOperator>(&
I)) {
1296 FMF =
I.getFastMathFlags();
1303 Value *
Cond, *True =
nullptr, *False =
nullptr;
1311 if (Opcode != Instruction::Add || (!True && !False) || (True && False))
1326 if (LHSIsSelect && RHSIsSelect &&
A ==
D) {
1335 else if (True && !False)
1343 if (
Value *NewSel = foldAddNegate(
B,
C,
RHS))
1350 if (
Value *NewSel = foldAddNegate(E,
F,
LHS))
1354 if (!True || !False)
1365 assert(!isa<Constant>(
I) &&
"Shouldn't invert users of constant");
1367 if (U == IgnoredUser)
1369 switch (cast<Instruction>(U)->
getOpcode()) {
1370 case Instruction::Select: {
1371 auto *SI = cast<SelectInst>(U);
1373 SI->swapProfMetadata();
1376 case Instruction::Br: {
1383 case Instruction::Xor:
1390 "canFreelyInvertAllUsersOf() ?");
1397Value *InstCombinerImpl::dyn_castNegVal(
Value *V)
const {
1407 if (
C->getType()->getElementType()->isIntegerTy())
1411 for (
unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) {
1416 if (isa<UndefValue>(Elt))
1419 if (!isa<ConstantInt>(Elt))
1426 if (
auto *CV = dyn_cast<Constant>(V))
1427 if (CV->getType()->isVectorTy() &&
1428 CV->getType()->getScalarType()->isIntegerTy() && CV->getSplatValue())
1441Instruction *InstCombinerImpl::foldFBinOpOfIntCastsFromSign(
1442 BinaryOperator &BO,
bool OpsFromSigned, std::array<Value *, 2> IntOps,
1446 Type *IntTy = IntOps[0]->getType();
1451 unsigned MaxRepresentableBits =
1456 unsigned NumUsedLeadingBits[2] = {IntSz, IntSz};
1460 auto IsNonZero = [&](
unsigned OpNo) ->
bool {
1461 if (OpsKnown[OpNo].hasKnownBits() &&
1462 OpsKnown[OpNo].getKnownBits(
SQ).isNonZero())
1467 auto IsNonNeg = [&](
unsigned OpNo) ->
bool {
1471 return OpsKnown[OpNo].getKnownBits(
SQ).isNonNegative();
1475 auto IsValidPromotion = [&](
unsigned OpNo) ->
bool {
1477 if (OpsFromSigned != isa<SIToFPInst>(BO.
getOperand(OpNo)) &&
1486 if (MaxRepresentableBits < IntSz) {
1496 NumUsedLeadingBits[OpNo] =
1497 IntSz - OpsKnown[OpNo].getKnownBits(
SQ).countMinLeadingZeros();
1505 if (MaxRepresentableBits < NumUsedLeadingBits[OpNo])
1508 return !OpsFromSigned || BO.
getOpcode() != Instruction::FMul ||
1513 if (Op1FpC !=
nullptr) {
1515 if (OpsFromSigned && BO.
getOpcode() == Instruction::FMul &&
1520 OpsFromSigned ? Instruction::FPToSI : Instruction::FPToUI, Op1FpC,
1522 if (Op1IntC ==
nullptr)
1525 : Instruction::UIToFP,
1526 Op1IntC, FPTy,
DL) != Op1FpC)
1530 IntOps[1] = Op1IntC;
1534 if (IntTy != IntOps[1]->
getType())
1537 if (Op1FpC ==
nullptr) {
1538 if (!IsValidPromotion(1))
1541 if (!IsValidPromotion(0))
1547 bool NeedsOverflowCheck =
true;
1550 unsigned OverflowMaxOutputBits = OpsFromSigned ? 2 : 1;
1551 unsigned OverflowMaxCurBits =
1552 std::max(NumUsedLeadingBits[0], NumUsedLeadingBits[1]);
1553 bool OutputSigned = OpsFromSigned;
1555 case Instruction::FAdd:
1556 IntOpc = Instruction::Add;
1557 OverflowMaxOutputBits += OverflowMaxCurBits;
1559 case Instruction::FSub:
1560 IntOpc = Instruction::Sub;
1561 OverflowMaxOutputBits += OverflowMaxCurBits;
1563 case Instruction::FMul:
1564 IntOpc = Instruction::Mul;
1565 OverflowMaxOutputBits += OverflowMaxCurBits * 2;
1571 if (OverflowMaxOutputBits < IntSz) {
1572 NeedsOverflowCheck =
false;
1575 if (IntOpc == Instruction::Sub)
1576 OutputSigned =
true;
1582 if (NeedsOverflowCheck &&
1583 !willNotOverflow(IntOpc, IntOps[0], IntOps[1], BO, OutputSigned))
1587 if (
auto *IntBO = dyn_cast<BinaryOperator>(IntBinOp)) {
1588 IntBO->setHasNoSignedWrap(OutputSigned);
1589 IntBO->setHasNoUnsignedWrap(!OutputSigned);
1602 std::array<Value *, 2> IntOps = {
nullptr,
nullptr};
1622 if (
Instruction *R = foldFBinOpOfIntCastsFromSign(BO,
false,
1623 IntOps, Op1FpC, OpsKnown))
1625 return foldFBinOpOfIntCastsFromSign(BO,
true, IntOps,
1641 !
X->getType()->isIntOrIntVectorTy(1))
1658 V = IsTrueArm ? SI->getTrueValue() : SI->getFalseValue();
1659 }
else if (
match(SI->getCondition(),
1684 bool FoldWithMultiUse) {
1686 if (!SI->hasOneUse() && !FoldWithMultiUse)
1689 Value *TV = SI->getTrueValue();
1690 Value *FV = SI->getFalseValue();
1693 if (SI->getType()->isIntOrIntVectorTy(1))
1703 if (
auto *CI = dyn_cast<FCmpInst>(SI->getCondition())) {
1704 if (CI->hasOneUse()) {
1705 Value *Op0 = CI->getOperand(0), *Op1 = CI->getOperand(1);
1706 if ((TV == Op0 && FV == Op1) || (FV == Op0 && TV == Op1))
1715 if (!NewTV && !NewFV)
1752 const ICmpInst *ICmp = dyn_cast<ICmpInst>(&
I);
1767 bool AllowMultipleUses) {
1769 if (NumPHIValues == 0)
1776 bool IdenticalUsers =
false;
1777 if (!AllowMultipleUses && !OneUse) {
1781 if (UI != &
I && !
I.isIdenticalTo(UI))
1785 IdenticalUsers =
true;
1794 auto *
I = dyn_cast<Instruction>(
Op);
1799 if (isa<PHINode>(
I))
1815 bool SeenNonSimplifiedInVal =
false;
1816 for (
unsigned i = 0; i != NumPHIValues; ++i) {
1828 const APInt *Ignored;
1829 if (isa<CmpIntrinsic>(InVal) && InVal->
hasOneUser() &&
1836 if (!OneUse && !IdenticalUsers)
1839 if (SeenNonSimplifiedInVal)
1841 SeenNonSimplifiedInVal =
true;
1857 if (isa<InvokeInst>(InVal))
1858 if (cast<Instruction>(InVal)->
getParent() == InBB)
1871 for (
auto OpIndex : OpsToMoveUseToIncomingBB) {
1882 U = U->DoPHITranslation(PN->
getParent(), OpBB);
1885 Clones.
insert({OpBB, Clone});
1888 NewPhiValues[
OpIndex] = Clone;
1897 for (
unsigned i = 0; i != NumPHIValues; ++i)
1900 if (IdenticalUsers) {
1913 const_cast<PHINode &
>(*NewPN),
1923 auto *Phi0 = dyn_cast<PHINode>(BO.
getOperand(0));
1924 auto *Phi1 = dyn_cast<PHINode>(BO.
getOperand(1));
1925 if (!Phi0 || !Phi1 || !Phi0->hasOneUse() || !Phi1->hasOneUse() ||
1926 Phi0->getNumOperands() != Phi1->getNumOperands())
1930 if (BO.
getParent() != Phi0->getParent() ||
1947 auto CanFoldIncomingValuePair = [&](std::tuple<Use &, Use &>
T) {
1948 auto &Phi0Use = std::get<0>(
T);
1949 auto &Phi1Use = std::get<1>(
T);
1950 if (Phi0->getIncomingBlock(Phi0Use) != Phi1->getIncomingBlock(Phi1Use))
1952 Value *Phi0UseV = Phi0Use.get();
1953 Value *Phi1UseV = Phi1Use.get();
1956 else if (Phi1UseV ==
C)
1963 if (
all_of(
zip(Phi0->operands(), Phi1->operands()),
1964 CanFoldIncomingValuePair)) {
1967 assert(NewIncomingValues.
size() == Phi0->getNumOperands() &&
1968 "The number of collected incoming values should equal the number "
1969 "of the original PHINode operands!");
1970 for (
unsigned I = 0;
I < Phi0->getNumOperands();
I++)
1971 NewPhi->
addIncoming(NewIncomingValues[
I], Phi0->getIncomingBlock(
I));
1976 if (Phi0->getNumOperands() != 2 || Phi1->getNumOperands() != 2)
1983 ConstBB = Phi0->getIncomingBlock(0);
1984 OtherBB = Phi0->getIncomingBlock(1);
1986 ConstBB = Phi0->getIncomingBlock(1);
1987 OtherBB = Phi0->getIncomingBlock(0);
1997 auto *PredBlockBranch = dyn_cast<BranchInst>(OtherBB->
getTerminator());
1998 if (!PredBlockBranch || PredBlockBranch->isConditional() ||
2005 for (
auto BBIter = BO.
getParent()->begin(); &*BBIter != &BO; ++BBIter)
2018 Phi0->getIncomingValueForBlock(OtherBB),
2019 Phi1->getIncomingValueForBlock(OtherBB));
2020 if (
auto *NotFoldedNewBO = dyn_cast<BinaryOperator>(NewBO))
2021 NotFoldedNewBO->copyIRFlags(&BO);
2031 if (!isa<Constant>(
I.getOperand(1)))
2034 if (
auto *Sel = dyn_cast<SelectInst>(
I.getOperand(0))) {
2037 }
else if (
auto *PN = dyn_cast<PHINode>(
I.getOperand(0))) {
2048 if (
GEP.hasAllZeroIndices() && !Src.hasAllZeroIndices() &&
2055 if (!isa<VectorType>(Inst.
getType()))
2061 cast<VectorType>(Inst.
getType())->getElementCount());
2063 cast<VectorType>(Inst.
getType())->getElementCount());
2068 Value *L0, *L1, *R0, *R1;
2073 cast<ShuffleVectorInst>(
LHS)->isConcat() &&
2074 cast<ShuffleVectorInst>(
RHS)->isConcat()) {
2081 if (
auto *BO = dyn_cast<BinaryOperator>(NewBO0))
2084 if (
auto *BO = dyn_cast<BinaryOperator>(NewBO1))
2091 if (
auto *BO = dyn_cast<BinaryOperator>(V))
2095 M, Intrinsic::vector_reverse, V->getType());
2108 return createBinOpReverse(V1, V2);
2112 return createBinOpReverse(V1,
RHS);
2116 return createBinOpReverse(
LHS, V2);
2126 if (
auto *BO = dyn_cast<BinaryOperator>(XY))
2135 V1->
getType() == V2->getType() &&
2138 return createBinOpShuffle(V1, V2, Mask);
2147 auto *LShuf = cast<ShuffleVectorInst>(
LHS);
2148 auto *RShuf = cast<ShuffleVectorInst>(
RHS);
2153 if (LShuf->isSelect() &&
2155 RShuf->isSelect() &&
2173 auto *InstVTy = dyn_cast<FixedVectorType>(Inst.
getType());
2178 cast<FixedVectorType>(V1->
getType())->getNumElements() <=
2179 InstVTy->getNumElements()) {
2181 "Shuffle should not change scalar type");
2188 bool ConstOp1 = isa<Constant>(
RHS);
2190 unsigned SrcVecNumElts =
2191 cast<FixedVectorType>(V1->
getType())->getNumElements();
2194 bool MayChange =
true;
2195 unsigned NumElts = InstVTy->getNumElements();
2196 for (
unsigned I = 0;
I < NumElts; ++
I) {
2198 if (ShMask[
I] >= 0) {
2199 assert(ShMask[
I] < (
int)NumElts &&
"Not expecting narrowing shuffle");
2207 if (!CElt || (!isa<PoisonValue>(NewCElt) && NewCElt != CElt) ||
2208 I >= SrcVecNumElts) {
2212 NewVecC[ShMask[
I]] = CElt;
2223 if (
I >= SrcVecNumElts || ShMask[
I] < 0) {
2228 if (!MaybePoison || !isa<PoisonValue>(MaybePoison)) {
2245 Value *NewLHS = ConstOp1 ? V1 : NewC;
2246 Value *NewRHS = ConstOp1 ? NewC : V1;
2247 return createBinOpShuffle(NewLHS, NewRHS, Mask);
2254 if (isa<ShuffleVectorInst>(
RHS))
2287 if (isa<FPMathOperator>(R)) {
2288 R->copyFastMathFlags(&Inst);
2291 if (
auto *NewInstBO = dyn_cast<BinaryOperator>(NewBO))
2292 NewInstBO->copyIRFlags(R);
2321 cast<Operator>(Op1)->getOpcode() == CastOpc &&
2322 (Op0->
hasOneUse() || Op1->hasOneUse()))) {
2340 if (!willNotOverflow(BO.
getOpcode(),
X,
Y, BO, IsSext))
2346 if (
auto *NewBinOp = dyn_cast<BinaryOperator>(NarrowBO)) {
2348 NewBinOp->setHasNoSignedWrap();
2350 NewBinOp->setHasNoUnsignedWrap();
2366 if (!
GEP.hasAllConstantIndices())
2382 Type *Ty =
GEP.getSourceElementType();
2384 Value *NewFalseC = Builder.
CreateGEP(Ty, FalseC, IndexC,
"", NW);
2394 if (
GEP.getNumIndices() != 1)
2403 Type *PtrTy = Src->getType()->getScalarType();
2404 unsigned IndexSizeInBits =
DL.getIndexTypeSizeInBits(PtrTy);
2411 if (isa<ScalableVectorType>(
BaseType))
2415 if (NewOffset.
isZero() ||
2416 (Src->hasOneUse() &&
GEP.getOperand(1)->hasOneUse())) {
2437 Type *PtrTy = Src->getType()->getScalarType();
2438 if (
GEP.hasAllConstantIndices() &&
2439 (Src->hasOneUse() || Src->hasAllConstantIndices())) {
2443 bool IsFirstType =
true;
2444 unsigned NumVarIndices = 0;
2445 for (
auto Pair :
enumerate(Src->indices())) {
2446 if (!isa<ConstantInt>(Pair.value())) {
2448 IsFirstType =
false;
2449 NumVarIndices = Pair.index() + 1;
2456 if (NumVarIndices != Src->getNumIndices()) {
2476 if (!
Offset.isZero() || (!IsFirstType && !ConstIndices[0].isZero()))
2482 Src->getNumIndices() - NumVarIndices));
2489 if (
Idx.isNonNegative() != ConstIndices[0].isNonNegative())
2491 if (!
Idx.isNonNegative())
2500 if (Src->getResultElementType() !=
GEP.getSourceElementType())
2506 bool EndsWithSequential =
false;
2509 EndsWithSequential =
I.isSequential();
2512 if (EndsWithSequential) {
2515 Value *SO1 = Src->getOperand(Src->getNumOperands()-1);
2532 Indices.
append(Src->op_begin()+1, Src->op_end()-1);
2535 }
else if (isa<Constant>(*
GEP.idx_begin()) &&
2536 cast<Constant>(*
GEP.idx_begin())->isNullValue() &&
2537 Src->getNumOperands() != 1) {
2539 Indices.
append(Src->op_begin()+1, Src->op_end());
2543 if (!Indices.
empty())
2546 Src->getSourceElementType(), Src->getOperand(0), Indices,
"",
2554 bool &DoesConsume,
unsigned Depth) {
2555 static Value *
const NonNull =
reinterpret_cast<Value *
>(uintptr_t(1));
2573 if (!WillInvertAllUses)
2578 if (
auto *
I = dyn_cast<CmpInst>(V)) {
2589 DoesConsume,
Depth))
2592 DoesConsume,
Depth))
2601 DoesConsume,
Depth))
2604 DoesConsume,
Depth))
2613 DoesConsume,
Depth))
2622 DoesConsume,
Depth))
2634 bool LocalDoesConsume = DoesConsume;
2636 LocalDoesConsume,
Depth))
2639 LocalDoesConsume,
Depth)) {
2640 DoesConsume = LocalDoesConsume;
2643 DoesConsume,
Depth);
2644 assert(NotB !=
nullptr &&
2645 "Unable to build inverted value for known freely invertable op");
2646 if (
auto *
II = dyn_cast<IntrinsicInst>(V))
2655 if (
PHINode *PN = dyn_cast<PHINode>(V)) {
2656 bool LocalDoesConsume = DoesConsume;
2658 for (
Use &U : PN->operands()) {
2659 BasicBlock *IncomingBlock = PN->getIncomingBlock(U);
2663 if (NewIncomingVal ==
nullptr)
2666 if (NewIncomingVal == V)
2669 IncomingValues.
emplace_back(NewIncomingVal, IncomingBlock);
2672 DoesConsume = LocalDoesConsume;
2678 for (
auto [Val, Pred] : IncomingValues)
2687 DoesConsume,
Depth))
2694 DoesConsume,
Depth))
2703 bool IsLogical,
Value *
A,
2705 bool LocalDoesConsume = DoesConsume;
2707 LocalDoesConsume,
Depth))
2710 LocalDoesConsume,
Depth)) {
2712 LocalDoesConsume,
Depth);
2713 DoesConsume = LocalDoesConsume;
2723 return TryInvertAndOrUsingDeMorgan(Instruction::And,
false,
A,
2727 return TryInvertAndOrUsingDeMorgan(Instruction::Or,
false,
A,
2731 return TryInvertAndOrUsingDeMorgan(Instruction::And,
true,
A,
2735 return TryInvertAndOrUsingDeMorgan(Instruction::Or,
true,
A,
2744 Type *GEPEltType =
GEP.getSourceElementType();
2755 if (
GEP.getNumIndices() == 1 &&
2763 auto PtrOpGep = dyn_cast<GEPOperator>(PtrOp);
2764 return PtrOpGep && PtrOpGep->hasAllConstantIndices() &&
2767 return match(V, m_APInt(C)) && !C->isZero();
2773 auto *Op1 = dyn_cast<GetElementPtrInst>(PN->
getOperand(0));
2789 auto *Op2 = dyn_cast<GetElementPtrInst>(*
I);
2790 if (!Op2 || Op1->getNumOperands() != Op2->getNumOperands() ||
2791 Op1->getSourceElementType() != Op2->getSourceElementType())
2799 Type *CurTy =
nullptr;
2801 for (
unsigned J = 0,
F = Op1->getNumOperands(); J !=
F; ++J) {
2802 if (Op1->getOperand(J)->getType() != Op2->getOperand(J)->getType())
2805 if (Op1->getOperand(J) != Op2->getOperand(J)) {
2814 assert(CurTy &&
"No current type?");
2834 CurTy = Op1->getSourceElementType();
2849 auto *NewGEP = cast<GetElementPtrInst>(Op1->clone());
2861 NewPN = Builder.
CreatePHI(Op1->getOperand(DI)->getType(),
2866 NewPN->
addIncoming(cast<GEPOperator>(
I)->getOperand(DI),
2869 NewGEP->setOperand(DI, NewPN);
2872 NewGEP->insertBefore(*
GEP.getParent(),
GEP.getParent()->getFirstInsertionPt());
2879 Type *GEPType =
GEP.getType();
2880 Type *GEPEltType =
GEP.getSourceElementType();
2889 if (
auto *GEPFVTy = dyn_cast<FixedVectorType>(GEPType)) {
2890 auto VWidth = GEPFVTy->getNumElements();
2891 APInt PoisonElts(VWidth, 0);
2907 bool MadeChange =
false;
2911 Type *NewScalarIndexTy =
2921 Type *IndexTy = (*I)->getType();
2922 Type *NewIndexType =
2925 cast<VectorType>(IndexTy)->getElementCount())
2937 if (IndexTy != NewIndexType) {
2949 if (!GEPEltType->
isIntegerTy(8) &&
GEP.hasAllConstantIndices()) {
2954 GEP.getNoWrapFlags()));
2965 if (
auto *PN = dyn_cast<PHINode>(PtrOp)) {
2970 if (
auto *Src = dyn_cast<GEPOperator>(PtrOp))
2974 if (
GEP.getNumIndices() == 1) {
2975 unsigned AS =
GEP.getPointerAddressSpace();
2976 if (
GEP.getOperand(1)->getType()->getScalarSizeInBits() ==
2980 if (TyAllocSize == 1) {
2989 GEPType ==
Y->getType()) {
2990 bool HasSameUnderlyingObject =
2992 bool Changed =
false;
2993 GEP.replaceUsesWithIf(
Y, [&](
Use &U) {
2994 bool ShouldReplace = HasSameUnderlyingObject ||
2995 isa<ICmpInst>(U.getUser()) ||
2996 isa<PtrToIntInst>(U.getUser());
2997 Changed |= ShouldReplace;
2998 return ShouldReplace;
3000 return Changed ? &
GEP :
nullptr;
3002 }
else if (
auto *ExactIns =
3003 dyn_cast<PossiblyExactOperator>(
GEP.getOperand(1))) {
3006 if (ExactIns->isExact()) {
3014 GEP.getPointerOperand(), V,
3015 GEP.getNoWrapFlags());
3018 if (ExactIns->isExact() && ExactIns->hasOneUse()) {
3024 std::optional<APInt> NewC;
3044 if (NewC.has_value()) {
3047 ConstantInt::get(V->getType(), *NewC));
3048 cast<BinaryOperator>(NewOp)->setIsExact();
3050 GEP.getPointerOperand(), NewOp,
3051 GEP.getNoWrapFlags());
3061 if (
GEP.getNumIndices() == 1) {
3064 auto CanPreserveInBounds = [&](
bool AddIsNSW,
Value *Idx1,
Value *Idx2) {
3079 bool IsInBounds = CanPreserveInBounds(
3080 cast<OverflowingBinaryOperator>(
GEP.getOperand(1))->hasNoSignedWrap(),
3084 Idx1,
"", IsInBounds);
3098 bool IsInBounds = CanPreserveInBounds(
3101 GEP.getSourceElementType(),
GEP.getPointerOperand(),
3112 if (!
GEP.isInBounds()) {
3115 APInt BasePtrOffset(IdxWidth, 0);
3116 Value *UnderlyingPtrOp =
3119 bool CanBeNull, CanBeFreed;
3121 DL, CanBeNull, CanBeFreed);
3122 if (!CanBeNull && !CanBeFreed && DerefBytes != 0) {
3123 if (
GEP.accumulateConstantOffset(
DL, BasePtrOffset) &&
3125 APInt AllocSize(IdxWidth, DerefBytes);
3126 if (BasePtrOffset.
ule(AllocSize)) {
3128 GEP.getSourceElementType(), PtrOp, Indices,
GEP.getName());
3135 if (
GEP.hasNoUnsignedSignedWrap() && !
GEP.hasNoUnsignedWrap() &&
3137 return isKnownNonNegative(Idx, SQ.getWithInstruction(&GEP));
3151 if (isa<ConstantPointerNull>(V))
3153 if (
auto *LI = dyn_cast<LoadInst>(V))
3154 return isa<GlobalVariable>(LI->getPointerOperand());
3178 return Dest && Dest->Ptr == UsedV;
3192 switch (
I->getOpcode()) {
3197 case Instruction::AddrSpaceCast:
3198 case Instruction::BitCast:
3199 case Instruction::GetElementPtr:
3204 case Instruction::ICmp: {
3211 unsigned OtherIndex = (ICI->
getOperand(0) == PI) ? 1 : 0;
3218 auto AlignmentAndSizeKnownValid = [](
CallBase *CB) {
3222 const APInt *Alignment;
3224 return match(CB->getArgOperand(0),
m_APInt(Alignment)) &&
3228 auto *CB = dyn_cast<CallBase>(AI);
3230 if (CB && TLI.
getLibFunc(*CB->getCalledFunction(), TheLibFunc) &&
3231 TLI.
has(TheLibFunc) && TheLibFunc == LibFunc_aligned_alloc &&
3232 !AlignmentAndSizeKnownValid(CB))
3238 case Instruction::Call:
3241 switch (
II->getIntrinsicID()) {
3245 case Intrinsic::memmove:
3246 case Intrinsic::memcpy:
3247 case Intrinsic::memset: {
3249 if (
MI->isVolatile() ||
MI->getRawDest() != PI)
3253 case Intrinsic::assume:
3254 case Intrinsic::invariant_start:
3255 case Intrinsic::invariant_end:
3256 case Intrinsic::lifetime_start:
3257 case Intrinsic::lifetime_end:
3258 case Intrinsic::objectsize:
3261 case Intrinsic::launder_invariant_group:
3262 case Intrinsic::strip_invariant_group:
3291 case Instruction::Store: {
3293 if (SI->isVolatile() || SI->getPointerOperand() != PI)
3301 }
while (!Worklist.
empty());
3324 std::unique_ptr<DIBuilder> DIB;
3325 if (isa<AllocaInst>(
MI)) {
3331 for (
unsigned i = 0, e =
Users.size(); i != e; ++i) {
3340 if (
II->getIntrinsicID() == Intrinsic::objectsize) {
3343 II,
DL, &
TLI,
AA,
true, &InsertedInstructions);
3344 for (
Instruction *Inserted : InsertedInstructions)
3352 for (
unsigned i = 0, e =
Users.size(); i != e; ++i) {
3361 C->isFalseWhenEqual()));
3362 }
else if (
auto *SI = dyn_cast<StoreInst>(
I)) {
3363 for (
auto *DVI : DVIs)
3364 if (DVI->isAddressOfVariable())
3366 for (
auto *DVR : DVRs)
3367 if (DVR->isAddressOfVariable())
3410 for (
auto *DVI : DVIs)
3411 if (DVI->isAddressOfVariable() || DVI->getExpression()->startsWithDeref())
3412 DVI->eraseFromParent();
3413 for (
auto *DVR : DVRs)
3414 if (DVR->isAddressOfVariable() || DVR->getExpression()->startsWithDeref())
3415 DVR->eraseFromParent();
3461 if (FreeInstrBB->
size() != 2) {
3463 if (&Inst == &FI || &Inst == FreeInstrBBTerminator)
3465 auto *Cast = dyn_cast<CastInst>(&Inst);
3466 if (!Cast || !Cast->isNoopCast(
DL))
3487 "Broken CFG: missing edge from predecessor to successor");
3492 if (&Instr == FreeInstrBBTerminator)
3494 Instr.moveBeforePreserving(TI);
3497 "Only the branch instruction should remain");
3508 Attrs = Attrs.removeParamAttribute(FI.
getContext(), 0, Attribute::NonNull);
3509 Attribute Dereferenceable = Attrs.getParamAttr(0, Attribute::Dereferenceable);
3510 if (Dereferenceable.
isValid()) {
3512 Attrs = Attrs.removeParamAttribute(FI.
getContext(), 0,
3513 Attribute::Dereferenceable);
3514 Attrs = Attrs.addDereferenceableOrNullParamAttr(FI.
getContext(), 0, Bytes);
3523 if (isa<UndefValue>(
Op)) {
3531 if (isa<ConstantPointerNull>(
Op))
3567 FPClassTest ReturnClass =
F->getAttributes().getRetNoFPClass();
3568 if (ReturnClass ==
fcNone)
3585 bool Changed =
false;
3586 while (
Instruction *Prev =
I.getPrevNonDebugInstruction()) {
3591 if (Prev->isEHPad())
3622 return BBI->isDebugOrPseudoInst() ||
3623 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy());
3628 if (BBI != FirstInstr)
3630 }
while (BBI != FirstInstr && IsNoopInstrForStoreMerging(BBI));
3632 return dyn_cast<StoreInst>(BBI);
3644 if (!
DeadEdges.insert({From, To}).second)
3649 for (
Use &U : PN.incoming_values())
3650 if (PN.getIncomingBlock(U) ==
From && !isa<PoisonValue>(U)) {
3666 std::next(
I->getReverseIterator())))) {
3667 if (!Inst.use_empty() && !Inst.getType()->isTokenTy()) {
3671 if (Inst.isEHPad() || Inst.getType()->isTokenTy())
3674 Inst.dropDbgRecords();
3682 for (
Value *V : Changed)
3709 if (Succ == LiveSucc)
3737 if (isa<SelectInst>(
Cond) &&
3758 auto *Cmp = cast<CmpInst>(
Cond);
3767 if (isa<UndefValue>(
Cond)) {
3771 if (
auto *CI = dyn_cast<ConstantInt>(
Cond)) {
3806 unsigned CstOpIdx = IsTrueArm ? 1 : 2;
3807 auto *
C = dyn_cast<ConstantInt>(
Select->getOperand(CstOpIdx));
3811 BasicBlock *CstBB = SI.findCaseValue(
C)->getCaseSuccessor();
3812 if (CstBB != SI.getDefaultDest())
3825 for (
auto Case : SI.cases())
3826 if (!CR.
contains(Case.getCaseValue()->getValue()))
3838 for (
auto Case : SI.cases()) {
3840 assert(isa<ConstantInt>(NewCase) &&
3841 "Result of expression should be constant");
3842 Case.setValue(cast<ConstantInt>(NewCase));
3850 for (
auto Case : SI.cases()) {
3852 assert(isa<ConstantInt>(NewCase) &&
3853 "Result of expression should be constant");
3854 Case.setValue(cast<ConstantInt>(NewCase));
3862 all_of(SI.cases(), [&](
const auto &Case) {
3863 return Case.getCaseValue()->getValue().countr_zero() >= ShiftAmt;
3869 Value *NewCond = Op0;
3876 for (
auto Case : SI.cases()) {
3877 const APInt &CaseVal = Case.getCaseValue()->getValue();
3879 : CaseVal.
lshr(ShiftAmt);
3880 Case.setValue(ConstantInt::get(SI.getContext(), ShiftedCase));
3888 bool IsZExt = isa<ZExtInst>(
Cond);
3892 if (
all_of(SI.cases(), [&](
const auto &Case) {
3893 const APInt &CaseVal = Case.getCaseValue()->getValue();
3894 return IsZExt ? CaseVal.isIntN(NewWidth)
3895 : CaseVal.isSignedIntN(NewWidth);
3897 for (
auto &Case : SI.cases()) {
3898 APInt TruncatedCase = Case.getCaseValue()->getValue().
trunc(NewWidth);
3899 Case.setValue(ConstantInt::get(SI.getContext(), TruncatedCase));
3906 if (
auto *
Select = dyn_cast<SelectInst>(
Cond)) {
3921 for (
const auto &
C : SI.cases()) {
3923 std::min(LeadingKnownZeros,
C.getCaseValue()->getValue().countl_zero());
3925 std::min(LeadingKnownOnes,
C.getCaseValue()->getValue().countl_one());
3928 unsigned NewWidth = Known.
getBitWidth() - std::max(LeadingKnownZeros, LeadingKnownOnes);
3934 if (NewWidth > 0 && NewWidth < Known.
getBitWidth() &&
3935 shouldChangeType(Known.
getBitWidth(), NewWidth)) {
3940 for (
auto Case : SI.cases()) {
3941 APInt TruncatedCase = Case.getCaseValue()->getValue().
trunc(NewWidth);
3942 Case.setValue(ConstantInt::get(SI.getContext(), TruncatedCase));
3947 if (isa<UndefValue>(
Cond)) {
3951 if (
auto *CI = dyn_cast<ConstantInt>(
Cond)) {
3953 SI.findCaseValue(CI)->getCaseSuccessor());
3967 const APInt *
C =
nullptr;
3969 if (*EV.
idx_begin() == 0 && (OvID == Intrinsic::smul_with_overflow ||
3970 OvID == Intrinsic::umul_with_overflow)) {
3975 if (
C->isPowerOf2()) {
3976 return BinaryOperator::CreateShl(
3978 ConstantInt::get(WO->getLHS()->getType(),
C->logBase2()));
3986 if (!WO->hasOneUse())
4000 assert(*EV.
idx_begin() == 1 &&
"Unexpected extract index for overflow inst");
4003 if (OvID == Intrinsic::usub_with_overflow)
4008 if (OvID == Intrinsic::smul_with_overflow &&
4009 WO->getLHS()->getType()->isIntOrIntVectorTy(1))
4010 return BinaryOperator::CreateAnd(WO->getLHS(), WO->getRHS());
4013 if (OvID == Intrinsic::umul_with_overflow && WO->getLHS() == WO->getRHS()) {
4014 unsigned BitWidth = WO->getLHS()->getType()->getScalarSizeInBits();
4019 ConstantInt::get(WO->getLHS()->getType(),
4030 WO->getBinaryOp(), *
C, WO->getNoWrapKind());
4035 auto *OpTy = WO->getRHS()->getType();
4036 auto *NewLHS = WO->getLHS();
4040 ConstantInt::get(OpTy, NewRHSC));
4058 const unsigned *exti, *exte, *insi, *inse;
4059 for (exti = EV.
idx_begin(), insi =
IV->idx_begin(),
4060 exte = EV.
idx_end(), inse =
IV->idx_end();
4061 exti != exte && insi != inse;
4075 if (exti == exte && insi == inse)
4108 if (
Instruction *R = foldExtractOfOverflowIntrinsic(EV))
4111 if (
LoadInst *L = dyn_cast<LoadInst>(Agg)) {
4113 if (
auto *STy = dyn_cast<StructType>(Agg->
getType());
4114 STy && STy->isScalableTy())
4122 if (L->isSimple() && L->hasOneUse()) {
4134 L->getPointerOperand(), Indices);
4145 if (
auto *PN = dyn_cast<PHINode>(Agg))
4151 if (
auto *SI = dyn_cast<SelectInst>(Agg))
4168 switch (Personality) {
4198 cast<ArrayType>(
LHS->
getType())->getNumElements()
4200 cast<ArrayType>(
RHS->
getType())->getNumElements();
4212 bool MakeNewInstruction =
false;
4218 bool isLastClause = i + 1 == e;
4226 if (AlreadyCaught.
insert(TypeInfo).second) {
4231 MakeNewInstruction =
true;
4238 MakeNewInstruction =
true;
4239 CleanupFlag =
false;
4258 if (!NumTypeInfos) {
4261 MakeNewInstruction =
true;
4262 CleanupFlag =
false;
4266 bool MakeNewFilter =
false;
4268 if (isa<ConstantAggregateZero>(FilterClause)) {
4270 assert(NumTypeInfos > 0 &&
"Should have handled empty filter already!");
4276 MakeNewInstruction =
true;
4283 if (NumTypeInfos > 1)
4284 MakeNewFilter =
true;
4288 NewFilterElts.
reserve(NumTypeInfos);
4293 bool SawCatchAll =
false;
4294 for (
unsigned j = 0; j != NumTypeInfos; ++j) {
4322 if (SeenInFilter.
insert(TypeInfo).second)
4323 NewFilterElts.
push_back(cast<Constant>(Elt));
4328 MakeNewInstruction =
true;
4333 if (NewFilterElts.
size() < NumTypeInfos)
4334 MakeNewFilter =
true;
4336 if (MakeNewFilter) {
4338 NewFilterElts.
size());
4340 MakeNewInstruction =
true;
4349 if (MakeNewFilter && !NewFilterElts.
size()) {
4350 assert(MakeNewInstruction &&
"New filter but not a new instruction!");
4351 CleanupFlag =
false;
4362 for (
unsigned i = 0, e = NewClauses.
size(); i + 1 < e; ) {
4365 for (j = i; j != e; ++j)
4366 if (!isa<ArrayType>(NewClauses[j]->
getType()))
4372 for (
unsigned k = i; k + 1 < j; ++k)
4376 std::stable_sort(NewClauses.
begin() + i, NewClauses.
begin() + j,
4378 MakeNewInstruction =
true;
4397 for (
unsigned i = 0; i + 1 < NewClauses.
size(); ++i) {
4407 for (
unsigned j = NewClauses.
size() - 1; j != i; --j) {
4408 Value *LFilter = NewClauses[j];
4419 NewClauses.
erase(J);
4420 MakeNewInstruction =
true;
4430 if (isa<ConstantAggregateZero>(LFilter)) {
4433 if (isa<ConstantAggregateZero>(
Filter)) {
4434 assert(FElts <= LElts &&
"Should have handled this case earlier!");
4436 NewClauses.
erase(J);
4437 MakeNewInstruction =
true;
4443 if (isa<ConstantAggregateZero>(
Filter)) {
4446 assert(FElts > 0 &&
"Should have eliminated the empty filter earlier!");
4447 for (
unsigned l = 0; l != LElts; ++l)
4450 NewClauses.
erase(J);
4451 MakeNewInstruction =
true;
4462 bool AllFound =
true;
4463 for (
unsigned f = 0; f != FElts; ++f) {
4466 for (
unsigned l = 0; l != LElts; ++l) {
4468 if (LTypeInfo == FTypeInfo) {
4478 NewClauses.
erase(J);
4479 MakeNewInstruction =
true;
4487 if (MakeNewInstruction) {
4495 if (NewClauses.empty())
4504 assert(!CleanupFlag &&
"Adding a cleanup, not removing one?!");
4529 auto *OrigOpInst = dyn_cast<Instruction>(OrigOp);
4534 if (!OrigOpInst || !OrigOpInst->hasOneUse() || isa<PHINode>(OrigOp))
4548 Use *MaybePoisonOperand =
nullptr;
4549 for (
Use &U : OrigOpInst->operands()) {
4550 if (isa<MetadataAsValue>(U.get()) ||
4553 if (!MaybePoisonOperand)
4554 MaybePoisonOperand = &U;
4559 OrigOpInst->dropPoisonGeneratingAnnotations();
4562 if (!MaybePoisonOperand)
4567 MaybePoisonOperand->get(), MaybePoisonOperand->get()->
getName() +
".fr");
4569 replaceUse(*MaybePoisonOperand, FrozenMaybePoisonOperand);
4580 Use *StartU =
nullptr;
4598 Value *StartV = StartU->get();
4610 if (!Visited.
insert(V).second)
4613 if (Visited.
size() > 32)
4630 I->dropPoisonGeneratingAnnotations();
4632 if (StartNeedsFreeze) {
4644 if (isa<Constant>(
Op) ||
Op->hasOneUse())
4653 if (isa<Argument>(
Op)) {
4657 auto MoveBeforeOpt = cast<Instruction>(
Op)->getInsertionPointAfterDef();
4660 MoveBefore = *MoveBeforeOpt;
4664 if (isa<DbgInfoIntrinsic>(MoveBefore))
4665 MoveBefore = MoveBefore->getNextNonDebugInstruction()->getIterator();
4668 MoveBefore.setHeadBit(
false);
4670 bool Changed =
false;
4671 if (&FI != &*MoveBefore) {
4672 FI.
moveBefore(*MoveBefore->getParent(), MoveBefore);
4676 Op->replaceUsesWithIf(&FI, [&](
Use &U) ->
bool {
4678 Changed |= Dominates;
4687 for (
auto *U : V->users()) {
4688 if (isa<ShuffleVectorInst>(U))
4697 Value *Op0 =
I.getOperand(0);
4703 if (
auto *PN = dyn_cast<PHINode>(Op0)) {
4726 auto getUndefReplacement = [&
I](
Type *Ty) {
4729 for (
const auto *U :
I.users()) {
4738 else if (BestValue !=
C)
4739 BestValue = NullValue;
4741 assert(BestValue &&
"Must have at least one use");
4756 Constant *ReplaceC = getUndefReplacement(
I.getType()->getScalarType());
4771 auto *CB = dyn_cast<CallBase>(
I);
4790 for (
const User *U :
I.users()) {
4791 if (Visited.
insert(U).second)
4796 while (!AllocaUsers.
empty()) {
4797 auto *UserI = cast<Instruction>(AllocaUsers.
pop_back_val());
4798 if (isa<GetElementPtrInst>(UserI) || isa<AddrSpaceCastInst>(UserI)) {
4819 if (isa<PHINode>(
I) ||
I->isEHPad() ||
I->mayThrow() || !
I->willReturn() ||
4827 if (isa<AllocaInst>(
I))
4835 if (
auto *CI = dyn_cast<CallInst>(
I)) {
4836 if (CI->isConvergent())
4842 if (
I->mayWriteToMemory()) {
4849 if (
I->mayReadFromMemory() &&
4850 !
I->hasMetadata(LLVMContext::MD_invariant_load)) {
4857 E =
I->getParent()->end();
4859 if (Scan->mayWriteToMemory())
4863 I->dropDroppableUses([&](
const Use *U) {
4864 auto *
I = dyn_cast<Instruction>(U->getUser());
4865 if (
I &&
I->getParent() != DestBlock) {
4875 I->moveBefore(*DestBlock, InsertPos);
4886 if (!DbgUsers.
empty())
4888 if (!DbgVariableRecords.
empty())
4890 DbgVariableRecords);
4910 for (
auto &DbgUser : DbgUsers)
4911 if (DbgUser->getParent() != DestBlock)
4918 if (DVI->getParent() == SrcBlock)
4921 [](
auto *
A,
auto *
B) {
return B->comesBefore(
A); });
4925 for (
auto *
User : DbgUsersToSink) {
4930 if (isa<DbgDeclareInst>(
User))
4935 User->getDebugLoc()->getInlinedAt());
4937 if (!SunkVariables.
insert(DbgUserVariable).second)
4942 if (isa<DbgAssignIntrinsic>(
User))
4945 DIIClones.emplace_back(cast<DbgVariableIntrinsic>(
User->clone()));
4946 if (isa<DbgDeclareInst>(
User) && isa<CastInst>(
I))
4947 DIIClones.back()->replaceVariableLocationOp(
I,
I->getOperand(0));
4952 if (!DIIClones.empty()) {
4957 DIIClone->insertBefore(&*InsertPos);
4972 for (
auto &DVR : DbgVariableRecords)
4973 if (DVR->getParent() != DestBlock)
4974 DbgVariableRecordsToSalvage.
push_back(DVR);
4980 if (DVR->getParent() == SrcBlock)
4981 DbgVariableRecordsToSink.
push_back(DVR);
4988 return B->getInstruction()->comesBefore(
A->getInstruction());
4995 using InstVarPair = std::pair<const Instruction *, DebugVariable>;
4997 if (DbgVariableRecordsToSink.
size() > 1) {
5003 DVR->getDebugLoc()->getInlinedAt());
5004 CountMap[std::make_pair(DVR->getInstruction(), DbgUserVariable)] += 1;
5010 for (
auto It : CountMap) {
5011 if (It.second > 1) {
5012 FilterOutMap[It.first] =
nullptr;
5013 DupSet.
insert(It.first.first);
5024 DVR.getDebugLoc()->getInlinedAt());
5026 FilterOutMap.
find(std::make_pair(Inst, DbgUserVariable));
5027 if (FilterIt == FilterOutMap.
end())
5029 if (FilterIt->second !=
nullptr)
5031 FilterIt->second = &DVR;
5046 DVR->getDebugLoc()->getInlinedAt());
5050 if (!FilterOutMap.
empty()) {
5051 InstVarPair IVP = std::make_pair(DVR->getInstruction(), DbgUserVariable);
5052 auto It = FilterOutMap.
find(IVP);
5055 if (It != FilterOutMap.
end() && It->second != DVR)
5059 if (!SunkVariables.
insert(DbgUserVariable).second)
5062 if (DVR->isDbgAssign())
5070 if (DVRClones.
empty())
5084 assert(InsertPos.getHeadBit());
5086 InsertPos->getParent()->insertDbgRecordBefore(DVRClone, InsertPos);
5110 if (
I ==
nullptr)
continue;
5125 auto getOptionalSinkBlockForInst =
5126 [
this](
Instruction *
I) -> std::optional<BasicBlock *> {
5128 return std::nullopt;
5132 unsigned NumUsers = 0;
5134 for (
Use &U :
I->uses()) {
5139 return std::nullopt;
5144 if (
PHINode *PN = dyn_cast<PHINode>(UserInst))
5145 UserBB = PN->getIncomingBlock(U);
5149 if (UserParent && UserParent != UserBB)
5150 return std::nullopt;
5151 UserParent = UserBB;
5155 if (NumUsers == 0) {
5159 return std::nullopt;
5171 return std::nullopt;
5181 return std::nullopt;
5186 auto OptBB = getOptionalSinkBlockForInst(
I);
5188 auto *UserParent = *OptBB;
5196 for (
Use &U :
I->operands())
5197 if (
Instruction *OpI = dyn_cast<Instruction>(U.get()))
5205 I, {LLVMContext::MD_dbg, LLVMContext::MD_annotation});
5218 <<
" New = " << *Result <<
'\n');
5223 if (!Result->getDebugLoc())
5224 Result->setDebugLoc(
I->getDebugLoc());
5226 Result->copyMetadata(*
I, LLVMContext::MD_annotation);
5228 I->replaceAllUsesWith(Result);
5231 Result->takeName(
I);
5238 if (isa<PHINode>(Result) != isa<PHINode>(
I)) {
5240 if (isa<PHINode>(
I))
5246 Result->insertInto(InstParent, InsertPos);
5255 <<
" New = " << *
I <<
'\n');
5287 if (!
I->hasMetadataOtherThanDebugLoc())
5290 auto Track = [](
Metadata *ScopeList,
auto &Container) {
5291 const auto *MDScopeList = dyn_cast_or_null<MDNode>(ScopeList);
5292 if (!MDScopeList || !Container.insert(MDScopeList).second)
5294 for (
const auto &
MDOperand : MDScopeList->operands())
5295 if (
auto *MDScope = dyn_cast<MDNode>(
MDOperand))
5296 Container.insert(MDScope);
5299 Track(
I->getMetadata(LLVMContext::MD_alias_scope), UsedAliasScopesAndLists);
5300 Track(
I->getMetadata(LLVMContext::MD_noalias), UsedNoAliasScopesAndLists);
5309 "llvm.experimental.noalias.scope.decl in use ?");
5312 "llvm.experimental.noalias.scope should refer to a single scope");
5314 if (
auto *MD = dyn_cast<MDNode>(
MDOperand))
5315 return !UsedAliasScopesAndLists.
contains(MD) ||
5316 !UsedNoAliasScopesAndLists.
contains(MD);
5340 if (Succ != LiveSucc &&
DeadEdges.insert({BB, Succ}).second)
5341 for (
PHINode &PN : Succ->phis())
5342 for (
Use &U : PN.incoming_values())
5343 if (PN.getIncomingBlock(U) == BB && !isa<PoisonValue>(U)) {
5353 HandleOnlyLiveSuccessor(BB,
nullptr);
5360 if (!Inst.use_empty() &&
5361 (Inst.getNumOperands() == 0 || isa<Constant>(Inst.getOperand(0))))
5365 Inst.replaceAllUsesWith(
C);
5368 Inst.eraseFromParent();
5374 for (
Use &U : Inst.operands()) {
5375 if (!isa<ConstantVector>(U) && !isa<ConstantExpr>(U))
5378 auto *
C = cast<Constant>(U);
5379 Constant *&FoldRes = FoldedConstants[
C];
5385 <<
"\n Old = " << *
C
5386 <<
"\n New = " << *FoldRes <<
'\n');
5395 if (!Inst.isDebugOrPseudoInst()) {
5396 InstrsForInstructionWorklist.
push_back(&Inst);
5397 SeenAliasScopes.
analyse(&Inst);
5405 if (isa<UndefValue>(BI->getCondition())) {
5407 HandleOnlyLiveSuccessor(BB,
nullptr);
5410 if (
auto *
Cond = dyn_cast<ConstantInt>(BI->getCondition())) {
5411 bool CondVal =
Cond->getZExtValue();
5412 HandleOnlyLiveSuccessor(BB, BI->getSuccessor(!CondVal));
5415 }
else if (
SwitchInst *SI = dyn_cast<SwitchInst>(TI)) {
5416 if (isa<UndefValue>(SI->getCondition())) {
5418 HandleOnlyLiveSuccessor(BB,
nullptr);
5421 if (
auto *
Cond = dyn_cast<ConstantInt>(SI->getCondition())) {
5422 HandleOnlyLiveSuccessor(BB,
5423 SI->findCaseValue(
Cond)->getCaseSuccessor());
5433 if (LiveBlocks.
count(&BB))
5436 unsigned NumDeadInstInBB;
5437 unsigned NumDeadDbgInstInBB;
5438 std::tie(NumDeadInstInBB, NumDeadDbgInstInBB) =
5441 MadeIRChange |= NumDeadInstInBB + NumDeadDbgInstInBB > 0;
5442 NumDeadInst += NumDeadInstInBB;
5459 Inst->eraseFromParent();
5488 auto &
DL =
F.getDataLayout();
5490 !
F.hasFnAttribute(
"instcombine-no-verify-fixpoint");
5498 if (
auto *Assume = dyn_cast<AssumeInst>(
I))
5506 bool MadeIRChange =
false;
5511 unsigned Iteration = 0;
5517 <<
" on " <<
F.getName()
5518 <<
" reached; stopping without verifying fixpoint\n");
5522 ++NumWorklistIterations;
5523 LLVM_DEBUG(
dbgs() <<
"\n\nINSTCOMBINE ITERATION #" << Iteration <<
" on "
5524 <<
F.getName() <<
"\n");
5527 ORE, BFI, BPI, PSI,
DL, RPOT);
5530 MadeChangeInThisIteration |= IC.
run();
5531 if (!MadeChangeInThisIteration)
5534 MadeIRChange =
true;
5537 "Instruction Combining on " +
Twine(
F.getName()) +
5540 "Use 'instcombine<no-verify-fixpoint>' or function attribute "
5541 "'instcombine-no-verify-fixpoint' to suppress this error.",
5548 else if (Iteration == 2)
5550 else if (Iteration == 3)
5551 ++NumThreeIterations;
5553 ++NumFourOrMoreIterations;
5555 return MadeIRChange;
5563 OS, MapClassName2PassName);
5570char InstCombinePass::ID = 0;
5576 if (LRT.shouldSkip(&
ID))
5589 auto *BFI = (PSI && PSI->hasProfileSummary()) ?
5594 BFI, BPI, PSI, Options)) {
5596 LRT.update(&
ID,
false);
5602 LRT.update(&
ID,
true);
5629 auto AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
5630 auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(
F);
5631 auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(
F);
5632 auto &
TTI = getAnalysis<TargetTransformInfoWrapperPass>().getTTI(
F);
5633 auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
5634 auto &ORE = getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE();
5638 &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI();
5641 &getAnalysis<LazyBlockFrequencyInfoPass>().getBFI() :
5644 if (
auto *WrapperPass =
5645 getAnalysisIfAvailable<BranchProbabilityInfoWrapperPass>())
5646 BPI = &WrapperPass->getBPI();
5659 "Combine redundant instructions",
false,
false)
AMDGPU Register Bank Select
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Expand Atomic instructions
static const Function * getParent(const Value *V)
This is the interface for LLVM's primary stateless and local alias analysis.
BlockVerifier::State From
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This file provides an implementation of debug counters.
#define DEBUG_COUNTER(VARNAME, COUNTERNAME, DESC)
This file defines the DenseMap class.
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
static bool isSigned(unsigned int Opcode)
This is the interface for a simple mod/ref and alias analysis over globals.
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
This header defines various interfaces for pass management in LLVM.
This defines the Use class.
iv Induction Variable Users
static bool leftDistributesOverRight(Instruction::BinaryOps LOp, bool HasNUW, bool HasNSW, Intrinsic::ID ROp)
Return whether "X LOp (Y ROp Z)" is always equal to "(X LOp Y) ROp (X LOp Z)".
This file provides internal interfaces used to implement the InstCombine.
This file provides the primary interface to the instcombine pass.
static Value * simplifySwitchOnSelectUsingRanges(SwitchInst &SI, SelectInst *Select, bool IsTrueArm)
static bool isUsedWithinShuffleVector(Value *V)
static bool isNeverEqualToUnescapedAlloc(Value *V, const TargetLibraryInfo &TLI, Instruction *AI)
static bool shorter_filter(const Value *LHS, const Value *RHS)
static Instruction * foldSelectGEP(GetElementPtrInst &GEP, InstCombiner::BuilderTy &Builder)
Thread a GEP operation with constant indices through the constant true/false arms of a select.
static bool shouldMergeGEPs(GEPOperator &GEP, GEPOperator &Src)
static cl::opt< unsigned > MaxArraySize("instcombine-maxarray-size", cl::init(1024), cl::desc("Maximum array size considered when doing a combine"))
static cl::opt< unsigned > ShouldLowerDbgDeclare("instcombine-lower-dbg-declare", cl::Hidden, cl::init(true))
static bool hasNoSignedWrap(BinaryOperator &I)
static bool simplifyAssocCastAssoc(BinaryOperator *BinOp1, InstCombinerImpl &IC)
Combine constant operands of associative operations either before or after a cast to eliminate one of...
static bool combineInstructionsOverFunction(Function &F, InstructionWorklist &Worklist, AliasAnalysis *AA, AssumptionCache &AC, TargetLibraryInfo &TLI, TargetTransformInfo &TTI, DominatorTree &DT, OptimizationRemarkEmitter &ORE, BlockFrequencyInfo *BFI, BranchProbabilityInfo *BPI, ProfileSummaryInfo *PSI, const InstCombineOptions &Opts)
static Value * simplifyInstructionWithPHI(Instruction &I, PHINode *PN, Value *InValue, BasicBlock *InBB, const DataLayout &DL, const SimplifyQuery SQ)
static bool shouldCanonicalizeGEPToPtrAdd(GetElementPtrInst &GEP)
Return true if we should canonicalize the gep to an i8 ptradd.
static void ClearSubclassDataAfterReassociation(BinaryOperator &I)
Conservatively clears subclassOptionalData after a reassociation or commutation.
static bool isAllocSiteRemovable(Instruction *AI, SmallVectorImpl< WeakTrackingVH > &Users, const TargetLibraryInfo &TLI)
static Value * getIdentityValue(Instruction::BinaryOps Opcode, Value *V)
This function returns identity value for given opcode, which can be used to factor patterns like (X *...
static std::optional< std::pair< Value *, Value * > > matchSymmetricPhiNodesPair(PHINode *LHS, PHINode *RHS)
static Value * foldOperationIntoSelectOperand(Instruction &I, SelectInst *SI, Value *NewOp, InstCombiner &IC)
static Instruction * canonicalizeGEPOfConstGEPI8(GetElementPtrInst &GEP, GEPOperator *Src, InstCombinerImpl &IC)
static Instruction * tryToMoveFreeBeforeNullTest(CallInst &FI, const DataLayout &DL)
Move the call to free before a NULL test.
static Value * simplifyOperationIntoSelectOperand(Instruction &I, SelectInst *SI, bool IsTrueArm)
static bool rightDistributesOverLeft(Instruction::BinaryOps LOp, Instruction::BinaryOps ROp)
Return whether "(X LOp Y) ROp Z" is always equal to "(X ROp Z) LOp (Y ROp Z)".
static Value * tryFactorization(BinaryOperator &I, const SimplifyQuery &SQ, InstCombiner::BuilderTy &Builder, Instruction::BinaryOps InnerOpcode, Value *A, Value *B, Value *C, Value *D)
This tries to simplify binary operations by factorizing out common terms (e.
static bool isRemovableWrite(CallBase &CB, Value *UsedV, const TargetLibraryInfo &TLI)
Given a call CB which uses an address UsedV, return true if we can prove the call's only possible eff...
static Instruction::BinaryOps getBinOpsForFactorization(Instruction::BinaryOps TopOpcode, BinaryOperator *Op, Value *&LHS, Value *&RHS, BinaryOperator *OtherOp)
This function predicates factorization using distributive laws.
static bool hasNoUnsignedWrap(BinaryOperator &I)
static bool SoleWriteToDeadLocal(Instruction *I, TargetLibraryInfo &TLI)
Check for case where the call writes to an otherwise dead alloca.
static cl::opt< unsigned > MaxSinkNumUsers("instcombine-max-sink-users", cl::init(32), cl::desc("Maximum number of undroppable users for instruction sinking"))
static Instruction * foldGEPOfPhi(GetElementPtrInst &GEP, PHINode *PN, IRBuilderBase &Builder)
static bool isCatchAll(EHPersonality Personality, Constant *TypeInfo)
Return 'true' if the given typeinfo will match anything.
static cl::opt< bool > EnableCodeSinking("instcombine-code-sinking", cl::desc("Enable code sinking"), cl::init(true))
static bool maintainNoSignedWrap(BinaryOperator &I, Value *B, Value *C)
static GEPNoWrapFlags getMergedGEPNoWrapFlags(GEPOperator &GEP1, GEPOperator &GEP2)
Determine nowrap flags for (gep (gep p, x), y) to (gep p, (x + y)) transform.
uint64_t IntrinsicInst * II
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
static bool IsSelect(MachineInstr &MI)
#define INITIALIZE_PASS_DEPENDENCY(depName)
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static SymbolRef::Type getType(const Symbol *Sym)
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
static const uint32_t IV[8]
bool isNoAliasScopeDeclDead(Instruction *Inst)
void analyse(Instruction *I)
A manager for alias analyses.
A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object.
Class for arbitrary precision integers.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
static void udivrem(const APInt &LHS, const APInt &RHS, APInt &Quotient, APInt &Remainder)
Dual division/remainder interface.
bool isMinSignedValue() const
Determine if this is the smallest signed value.
static void sdivrem(const APInt &LHS, const APInt &RHS, APInt &Quotient, APInt &Remainder)
APInt trunc(unsigned width) const
Truncate to new width.
bool isAllOnes() const
Determine if all bits are set. This is true for zero-width values.
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
unsigned getBitWidth() const
Return the number of bits in the APInt.
APInt sadd_ov(const APInt &RHS, bool &Overflow) const
APInt ashr(unsigned ShiftAmt) const
Arithmetic right-shift function.
bool isNonNegative() const
Determine if this APInt Value is non-negative (>= 0)
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
APInt ssub_ov(const APInt &RHS, bool &Overflow) const
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
A container for analyses that lazily runs them and caches their results.
PassT::Result * getCachedResult(IRUnitT &IR) const
Get the cached result of an analysis pass for a given IR unit.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
void setPreservesCFG()
This function should be called by the pass, iff they do not:
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
Class to represent array types.
uint64_t getNumElements() const
static ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
Type * getElementType() const
A function analysis which provides an AssumptionCache.
An immutable pass that tracks lazily created AssumptionCache objects.
A cache of @llvm.assume calls within a function.
void registerAssumption(AssumeInst *CI)
Add an @llvm.assume intrinsic to this function's cache.
uint64_t getDereferenceableBytes() const
Returns the number of dereferenceable bytes from the dereferenceable attribute.
bool isValid() const
Return true if the attribute is any kind of attribute.
Legacy wrapper pass to provide the BasicAAResult object.
LLVM Basic Block Representation.
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
iterator_range< filter_iterator< BasicBlock::const_iterator, std::function< bool(const Instruction &)> > > instructionsWithoutDebug(bool SkipPseudoOp=true) const
Return a const iterator range over the instructions in the block, skipping any debug instructions.
InstListType::const_iterator getFirstNonPHIIt() const
Iterator returning form of getFirstNonPHI.
const Instruction & front() const
bool isEntryBlock() const
Return true if this is the entry block of the containing function.
const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
const BasicBlock * getUniquePredecessor() const
Return the predecessor of this block if it has a unique predecessor block.
InstListType::iterator iterator
Instruction iterators...
const_iterator getFirstNonPHIOrDbgOrAlloca() const
Returns an iterator to the first instruction in this block that is not a PHINode, a debug intrinsic,...
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
static BinaryOperator * CreateNeg(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Helper functions to construct and inspect unary operations (NEG and NOT) via binary operators SUB and...
BinaryOps getOpcode() const
static BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name=Twine(), InsertPosition InsertBefore=nullptr)
Construct a binary instruction, given the opcode and the two operands.
static BinaryOperator * CreateNUW(BinaryOps Opc, Value *V1, Value *V2, const Twine &Name="")
Analysis pass which computes BlockFrequencyInfo.
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Conditional or Unconditional Branch instruction.
void swapSuccessors()
Swap the successors of this branch instruction.
bool isConditional() const
BasicBlock * getSuccessor(unsigned i) const
bool isUnconditional() const
Value * getCondition() const
Analysis pass which computes BranchProbabilityInfo.
Analysis providing branch probability information.
void swapSuccEdgesProbabilities(const BasicBlock *Src)
Swap outgoing edges probabilities for Src with branch terminator.
Represents analyses that only rely on functions' control flow.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
void setAttributes(AttributeList A)
Set the attributes for this call.
bool doesNotThrow() const
Determine if the call cannot unwind.
Value * getArgOperand(unsigned i) const
AttributeList getAttributes() const
Return the attributes for this call.
This class represents a function call, abstracting a target machine's calling convention.
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static CastInst * Create(Instruction::CastOps, Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Provides a way to construct any of the CastInst subclasses using an opcode instead of the subclass's ...
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ ICMP_UGT
unsigned greater than
@ ICMP_ULT
unsigned less than
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...
ConstantArray - Constant Array Declarations.
static Constant * get(ArrayType *T, ArrayRef< Constant * > V)
A vector constant whose element type is a simple 1/2/4/8-byte integer or float/double,...
static Constant * getSub(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
static Constant * getNot(Constant *C)
static Constant * getAdd(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
static Constant * getBinOpIdentity(unsigned Opcode, Type *Ty, bool AllowRHSConstant=false, bool NSZ=false)
Return the identity constant for a binary opcode.
static Constant * getNeg(Constant *C, bool HasNSW=false)
This is the shared class of boolean and integer constants.
static ConstantInt * getTrue(LLVMContext &Context)
static ConstantInt * getFalse(LLVMContext &Context)
static ConstantInt * getBool(LLVMContext &Context, bool V)
This class represents a range of values.
bool getEquivalentICmp(CmpInst::Predicate &Pred, APInt &RHS) const
Set up Pred and RHS such that ConstantRange::makeExactICmpRegion(Pred, RHS) == *this.
static ConstantRange makeExactICmpRegion(CmpInst::Predicate Pred, const APInt &Other)
Produce the exact range such that all values in the returned range satisfy the given predicate with a...
bool contains(const APInt &Val) const
Return true if the specified value is in the set.
static ConstantRange makeExactNoWrapRegion(Instruction::BinaryOps BinOp, const APInt &Other, unsigned NoWrapKind)
Produce the range that contains X if and only if "X BinOp Other" does not wrap.
Constant Vector Declarations.
static Constant * get(ArrayRef< Constant * > V)
This is an important base class in LLVM.
static Constant * getIntegerValue(Type *Ty, const APInt &V)
Return the value for an integer or pointer constant, or a vector thereof, with the given scalar value...
static Constant * replaceUndefsWith(Constant *C, Constant *Replacement)
Try to replace undefined constant C or undefined elements in C with Replacement.
static Constant * getAllOnesValue(Type *Ty)
const Constant * stripPointerCasts() const
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
SmallVector< APInt > getGEPIndicesForOffset(Type *&ElemTy, APInt &Offset) const
Get GEP indices to access Offset inside ElemTy.
bool isLegalInteger(uint64_t Width) const
Returns true if the specified type is known to be a native integer type supported by the CPU.
unsigned getIndexTypeSizeInBits(Type *Ty) const
Layout size of the index used in GEP calculation.
IntegerType * getIndexType(LLVMContext &C, unsigned AddressSpace) const
Returns the type of a GEP index in AddressSpace.
TypeSize getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
unsigned getIndexSizeInBits(unsigned AS) const
Size in bits of index used for address calculation in getelementptr.
TypeSize getTypeSizeInBits(Type *Ty) const
Size examples:
int64_t getIndexedOffsetInType(Type *ElemTy, ArrayRef< Value * > Indices) const
Returns the offset from the beginning of the type for the specified indices.
This is the common base class for debug info intrinsics for variables.
Record of a variable value-assignment, aka a non instruction representation of the dbg....
static bool shouldExecute(unsigned CounterName)
Identifies a unique instance of a variable.
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
iterator find(const_arg_type_t< KeyT > Val)
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
void registerBranch(BranchInst *BI)
Add a branch condition to the cache.
Analysis pass which computes a DominatorTree.
Legacy analysis pass which computes a DominatorTree.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
bool isReachableFromEntry(const Use &U) const
Provide an overload for a Use.
bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
Utility class for floating point operations which can have information about relaxed accuracy require...
Convenience struct for specifying and reasoning about fast-math flags.
This class represents a freeze function that returns random concrete value if an operand is either a ...
FunctionPass class - This class is used to implement most global optimizations.
bool skipFunction(const Function &F) const
Optional passes call this function to check whether the pass should be skipped.
const BasicBlock & getEntryBlock() const
Represents flags for the getelementptr instruction/expression.
GEPNoWrapFlags withoutNoUnsignedSignedWrap() const
static GEPNoWrapFlags noUnsignedWrap()
GEPNoWrapFlags intersectForOffsetAdd(GEPNoWrapFlags Other) const
Given (gep (gep p, x), y), determine the nowrap flags for (gep p, x+y).
GEPNoWrapFlags withoutNoUnsignedWrap() const
GEPNoWrapFlags getNoWrapFlags() const
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
static Type * getTypeAtIndex(Type *Ty, Value *Idx)
Return the type of the element at the given index of an indexable type.
static GetElementPtrInst * Create(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static GetElementPtrInst * CreateInBounds(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Create an "inbounds" getelementptr.
Legacy wrapper pass to provide the GlobalsAAResult object.
This instruction compares its operands according to the predicate given to the constructor.
CmpPredicate getCmpPredicate() const
static bool isEquality(Predicate P)
Return true if this predicate is either EQ or NE.
Common base class shared among various IRBuilders.
Value * CreateBinaryIntrinsic(Intrinsic::ID ID, Value *LHS, Value *RHS, Instruction *FMFSource=nullptr, const Twine &Name="")
Create a call to intrinsic ID with 2 operands which is mangled on the first type.
Value * CreateLogicalOp(Instruction::BinaryOps Opc, Value *Cond1, Value *Cond2, const Twine &Name="")
Value * CreateExtractValue(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &Name="")
CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > Types, ArrayRef< Value * > Args, Instruction *FMFSource=nullptr, const Twine &Name="")
Create a call to intrinsic ID with Args, mangled using Types.
Value * CreateSelect(Value *C, Value *True, Value *False, const Twine &Name="", Instruction *MDFrom=nullptr)
Value * CreateSExt(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateFreeze(Value *V, const Twine &Name="")
Value * CreatePtrAdd(Value *Ptr, Value *Offset, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
void setFastMathFlags(FastMathFlags NewFMF)
Set the fast-math flags to be used with generated fp-math operators.
Value * CreateInBoundsGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="")
Value * CreateGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
void CollectMetadataToCopy(Instruction *Src, ArrayRef< unsigned > MetadataKinds)
Collect metadata with IDs MetadataKinds from Src which should be added to all created instructions.
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
Value * CreateCmp(CmpInst::Predicate Pred, Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
PHINode * CreatePHI(Type *Ty, unsigned NumReservedValues, const Twine &Name="")
Value * CreateNot(Value *V, const Twine &Name="")
Value * CreateSub(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
LoadInst * CreateLoad(Type *Ty, Value *Ptr, const char *Name)
Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of converting the string to 'bool...
Value * CreateShuffleVector(Value *V1, Value *V2, Value *Mask, const Twine &Name="")
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Value * CreateTrunc(Value *V, Type *DestTy, const Twine &Name="", bool IsNUW=false, bool IsNSW=false)
Value * CreateBinOp(Instruction::BinaryOps Opc, Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
Value * CreateIntCast(Value *V, Type *DestTy, bool isSigned, const Twine &Name="")
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Value * CreateAShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Value * CreateXor(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateICmp(CmpInst::Predicate P, Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateLogicalOr(Value *Cond1, Value *Cond2, const Twine &Name="")
IntegerType * getInt8Ty()
Fetch the type representing an 8-bit integer.
ConstantInt * getInt(const APInt &AI)
Get a constant integer value.
Provides an 'InsertHelper' that calls a user-provided callback after performing the default insertion...
This instruction inserts a struct field of array element value into an aggregate value.
static InsertValueInst * Create(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
InstCombinePass(InstCombineOptions Opts={})
void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
Instruction * FoldOpIntoSelect(Instruction &Op, SelectInst *SI, bool FoldWithMultiUse=false)
Given an instruction with a select as one operand and a constant as the other operand,...
Instruction * foldBinOpOfSelectAndCastOfSelectCondition(BinaryOperator &I)
Tries to simplify binops of select and cast of the select condition.
Instruction * foldBinOpIntoSelectOrPhi(BinaryOperator &I)
This is a convenience wrapper function for the above two functions.
bool SimplifyAssociativeOrCommutative(BinaryOperator &I)
Performs a few simplifications for operators which are associative or commutative.
Instruction * visitGEPOfGEP(GetElementPtrInst &GEP, GEPOperator *Src)
Value * foldUsingDistributiveLaws(BinaryOperator &I)
Tries to simplify binary operations which some other binary operation distributes over.
Instruction * foldBinOpShiftWithShift(BinaryOperator &I)
Instruction * visitUnreachableInst(UnreachableInst &I)
Instruction * foldOpIntoPhi(Instruction &I, PHINode *PN, bool AllowMultipleUses=false)
Given a binary operator, cast instruction, or select which has a PHI node as operand #0,...
void handleUnreachableFrom(Instruction *I, SmallVectorImpl< BasicBlock * > &Worklist)
Value * SimplifyDemandedVectorElts(Value *V, APInt DemandedElts, APInt &PoisonElts, unsigned Depth=0, bool AllowMultipleUsers=false) override
The specified value produces a vector with any number of elements.
Instruction * visitFreeze(FreezeInst &I)
void handlePotentiallyDeadBlocks(SmallVectorImpl< BasicBlock * > &Worklist)
bool prepareWorklist(Function &F)
Perform early cleanup and prepare the InstCombine worklist.
Instruction * visitFree(CallInst &FI, Value *FreedOp)
Instruction * visitExtractValueInst(ExtractValueInst &EV)
void handlePotentiallyDeadSuccessors(BasicBlock *BB, BasicBlock *LiveSucc)
Instruction * visitUnconditionalBranchInst(BranchInst &BI)
Instruction * eraseInstFromFunction(Instruction &I) override
Combiner aware instruction erasure.
Instruction * visitLandingPadInst(LandingPadInst &LI)
Instruction * visitReturnInst(ReturnInst &RI)
Instruction * visitSwitchInst(SwitchInst &SI)
Instruction * foldBinopWithPhiOperands(BinaryOperator &BO)
For a binary operator with 2 phi operands, try to hoist the binary operation before the phi.
Constant * getLosslessTrunc(Constant *C, Type *TruncTy, unsigned ExtOp)
Value * SimplifyDemandedUseFPClass(Value *V, FPClassTest DemandedMask, KnownFPClass &Known, unsigned Depth, Instruction *CxtI)
Attempts to replace V with a simpler value based on the demanded floating-point classes.
bool mergeStoreIntoSuccessor(StoreInst &SI)
Try to transform: if () { *P = v1; } else { *P = v2 } or: *P = v1; if () { *P = v2; } into a phi node...
Instruction * tryFoldInstWithCtpopWithNot(Instruction *I)
void tryToSinkInstructionDbgValues(Instruction *I, BasicBlock::iterator InsertPos, BasicBlock *SrcBlock, BasicBlock *DestBlock, SmallVectorImpl< DbgVariableIntrinsic * > &DbgUsers)
void CreateNonTerminatorUnreachable(Instruction *InsertAt)
Create and insert the idiom we use to indicate a block is unreachable without having to rewrite the C...
Value * pushFreezeToPreventPoisonFromPropagating(FreezeInst &FI)
bool run()
Run the combiner over the entire worklist until it is empty.
Instruction * foldVectorBinop(BinaryOperator &Inst)
Canonicalize the position of binops relative to shufflevector.
bool removeInstructionsBeforeUnreachable(Instruction &I)
Value * SimplifySelectsFeedingBinaryOp(BinaryOperator &I, Value *LHS, Value *RHS)
void tryToSinkInstructionDbgVariableRecords(Instruction *I, BasicBlock::iterator InsertPos, BasicBlock *SrcBlock, BasicBlock *DestBlock, SmallVectorImpl< DbgVariableRecord * > &DPUsers)
void addDeadEdge(BasicBlock *From, BasicBlock *To, SmallVectorImpl< BasicBlock * > &Worklist)
Instruction * visitAllocSite(Instruction &FI)
Instruction * visitGetElementPtrInst(GetElementPtrInst &GEP)
Instruction * visitBranchInst(BranchInst &BI)
Value * tryFactorizationFolds(BinaryOperator &I)
This tries to simplify binary operations by factorizing out common terms (e.
Instruction * foldFreezeIntoRecurrence(FreezeInst &I, PHINode *PN)
bool tryToSinkInstruction(Instruction *I, BasicBlock *DestBlock)
Try to move the specified instruction from its current block into the beginning of DestBlock,...
bool freezeOtherUses(FreezeInst &FI)
void freelyInvertAllUsersOf(Value *V, Value *IgnoredUser=nullptr)
Freely adapt every user of V as-if V was changed to !V.
The core instruction combiner logic.
const DataLayout & getDataLayout() const
bool isFreeToInvert(Value *V, bool WillInvertAllUses, bool &DoesConsume)
Return true if the specified value is free to invert (apply ~ to).
static unsigned getComplexity(Value *V)
Assign a complexity or rank value to LLVM Values.
Instruction * InsertNewInstBefore(Instruction *New, BasicBlock::iterator Old)
Inserts an instruction New before instruction Old.
Instruction * replaceInstUsesWith(Instruction &I, Value *V)
A combiner-aware RAUW-like routine.
uint64_t MaxArraySizeForCombine
Maximum size of array considered when transforming.
static bool shouldAvoidAbsorbingNotIntoSelect(const SelectInst &SI)
void replaceUse(Use &U, Value *NewValue)
Replace use and add the previously used value to the worklist.
static bool isCanonicalPredicate(CmpPredicate Pred)
Predicate canonicalization reduces the number of patterns that need to be matched by other transforms...
InstructionWorklist & Worklist
A worklist of the instructions that need to be simplified.
Instruction * InsertNewInstWith(Instruction *New, BasicBlock::iterator Old)
Same as InsertNewInstBefore, but also sets the debug loc.
BranchProbabilityInfo * BPI
ReversePostOrderTraversal< BasicBlock * > & RPOT
unsigned ComputeNumSignBits(const Value *Op, unsigned Depth=0, const Instruction *CxtI=nullptr) const
std::optional< Instruction * > targetInstCombineIntrinsic(IntrinsicInst &II)
void addToWorklist(Instruction *I)
Value * getFreelyInvertedImpl(Value *V, bool WillInvertAllUses, BuilderTy *Builder, bool &DoesConsume, unsigned Depth)
Return nonnull value if V is free to invert under the condition of WillInvertAllUses.
SmallDenseSet< std::pair< const BasicBlock *, const BasicBlock * >, 8 > BackEdges
Backedges, used to avoid pushing instructions across backedges in cases where this may result in infi...
std::optional< Value * > targetSimplifyDemandedVectorEltsIntrinsic(IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts, APInt &UndefElts2, APInt &UndefElts3, std::function< void(Instruction *, unsigned, APInt, APInt &)> SimplifyAndSetOp)
Instruction * replaceOperand(Instruction &I, unsigned OpNum, Value *V)
Replace operand of instruction and add old operand to the worklist.
static Constant * getSafeVectorConstantForBinop(BinaryOperator::BinaryOps Opcode, Constant *In, bool IsRHSConstant)
Some binary operators require special handling to avoid poison and undefined behavior.
SmallDenseSet< std::pair< BasicBlock *, BasicBlock * >, 8 > DeadEdges
Edges that are known to never be taken.
std::optional< Value * > targetSimplifyDemandedUseBitsIntrinsic(IntrinsicInst &II, APInt DemandedMask, KnownBits &Known, bool &KnownBitsComputed)
void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth, const Instruction *CxtI) const
bool isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const
Value * getFreelyInverted(Value *V, bool WillInvertAllUses, BuilderTy *Builder, bool &DoesConsume)
bool isBackEdge(const BasicBlock *From, const BasicBlock *To)
void visit(Iterator Start, Iterator End)
The legacy pass manager's instcombine pass.
InstructionCombiningPass()
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
bool runOnFunction(Function &F) override
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass.
InstructionWorklist - This is the worklist management logic for InstCombine and other simplification ...
Instruction * removeOne()
void pushUsersToWorkList(Instruction &I)
When an instruction is simplified, add all users of the instruction to the work lists because they mi...
void add(Instruction *I)
Add instruction to the worklist.
void push(Instruction *I)
Push the instruction onto the worklist stack.
Instruction * popDeferred()
void zap()
Check that the worklist is empty and nuke the backing store for the map.
void reserve(size_t Size)
static bool isBitwiseLogicOp(unsigned Opcode)
Determine if the Opcode is and/or/xor.
void copyIRFlags(const Value *V, bool IncludeWrapFlags=true)
Convenience method to copy supported exact, fast-math, and (optionally) wrapping flags from V to this...
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
void setAAMetadata(const AAMDNodes &N)
Sets the AA metadata on this instruction from the AAMDNodes structure.
bool isAssociative() const LLVM_READONLY
Return true if the instruction is associative:
bool isCommutative() const LLVM_READONLY
Return true if the instruction is commutative:
void setFastMathFlags(FastMathFlags FMF)
Convenience function for setting multiple fast-math flags on this instruction, which must be an opera...
const Function * getFunction() const
Return the function this instruction belongs to.
bool isTerminator() const
void dropUBImplyingAttrsAndMetadata()
Drop any attributes or metadata that can cause immediate undefined behavior.
FastMathFlags getFastMathFlags() const LLVM_READONLY
Convenience function for getting all the fast-math flags, which must be an operator which supports th...
bool willReturn() const LLVM_READONLY
Return true if the instruction will return (unwinding is considered as a form of returning control fl...
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
bool isBitwiseLogicOp() const
Return true if this is and/or/xor.
void dropPoisonGeneratingFlags()
Drops flags that may cause this instruction to evaluate to poison despite having non-poison inputs.
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
void moveBefore(Instruction *MovePos)
Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...
Class to represent integer types.
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
A wrapper class for inspecting calls to intrinsic functions.
static InvokeInst * Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, BasicBlock *IfException, ArrayRef< Value * > Args, const Twine &NameStr, InsertPosition InsertBefore=nullptr)
The landingpad instruction holds all of the information necessary to generate correct exception handl...
bool isCleanup() const
Return 'true' if this landingpad instruction is a cleanup.
unsigned getNumClauses() const
Get the number of clauses for this landing pad.
static LandingPadInst * Create(Type *RetTy, unsigned NumReservedClauses, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedClauses is a hint for the number of incoming clauses that this landingpad w...
void addClause(Constant *ClauseVal)
Add a catch or filter clause to the landing pad.
bool isCatch(unsigned Idx) const
Return 'true' if the clause and index Idx is a catch clause.
bool isFilter(unsigned Idx) const
Return 'true' if the clause and index Idx is a filter clause.
Constant * getClause(unsigned Idx) const
Get the value of the clause at index Idx.
void setCleanup(bool V)
Indicate that this landingpad instruction is a cleanup.
A function/module analysis which provides an empty LastRunTrackingInfo.
This is an alternative analysis pass to BlockFrequencyInfoWrapperPass.
static void getLazyBFIAnalysisUsage(AnalysisUsage &AU)
Helper for client passes to set up the analysis usage on behalf of this pass.
An instruction for reading from memory.
const MDOperand & getOperand(unsigned I) const
unsigned getNumOperands() const
Return number of MDNode operands.
Tracking metadata reference owned by Metadata.
This is the common base class for memset/memcpy/memmove.
static MemoryLocation getForDest(const MemIntrinsic *MI)
Return a location representing the destination of a memory set or transfer.
This class represents min/max intrinsics.
static ICmpInst::Predicate getPredicate(Intrinsic::ID ID)
Returns the comparison predicate underlying the intrinsic.
A Module instance is used to store all the information related to an LLVM module.
MDNode * getScopeList() const
An analysis over an "inner" IR unit that provides access to an analysis manager over a "outer" IR uni...
Utility class for integer operators which may exhibit overflow - Add, Sub, Mul, and Shl.
bool hasNoSignedWrap() const
Test whether this operation is known to never undergo signed overflow, aka the nsw property.
bool hasNoUnsignedWrap() const
Test whether this operation is known to never undergo unsigned overflow, aka the nuw property.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
op_range incoming_values()
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
PassRegistry - This class manages the registration and intitialization of the pass subsystem as appli...
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
In order to facilitate speculative execution, many instructions do not invoke immediate undefined beh...
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
A set of analyses that are preserved following a run of a transformation pass.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
void preserveSet()
Mark an analysis set as preserved.
void preserve()
Mark an analysis as preserved.
An analysis pass based on the new PM to deliver ProfileSummaryInfo.
An analysis pass based on legacy pass manager to deliver ProfileSummaryInfo.
Analysis providing profile information.
bool hasProfileSummary() const
Returns true if profile summary is available.
A global registry used in conjunction with static constructors to make pluggable components (like tar...
Return a value (possibly void), from a function.
Value * getReturnValue() const
Convenience accessor. Returns null if there is no return value.
static ReturnInst * Create(LLVMContext &C, Value *retVal=nullptr, InsertPosition InsertBefore=nullptr)
This class represents a cast from signed integer to floating point.
This class represents the LLVM 'select' instruction.
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", InsertPosition InsertBefore=nullptr, Instruction *MDFrom=nullptr)
This instruction constructs a fixed permutation of two input vectors.
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void reserve(size_type N)
iterator erase(const_iterator CI)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
typename SuperClass::iterator iterator
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
TargetFolder - Create constants with target dependent folding.
Analysis pass providing the TargetTransformInfo.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
bool has(LibFunc F) const
Tests whether a library function is available.
bool getLibFunc(StringRef funcName, LibFunc &F) const
Searches for a particular function name.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
The instances of the Type class are immutable: once they are created, they are never changed.
const fltSemantics & getFltSemantics() const
bool isVectorTy() const
True if this is an instance of VectorType.
static IntegerType * getInt1Ty(LLVMContext &C)
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isStructTy() const
True if this is an instance of StructType.
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this is a type whose size is a known multiple of vscale.
static IntegerType * getInt32Ty(LLVMContext &C)
bool isIntegerTy() const
True if this is an instance of IntegerType.
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
This class represents a cast unsigned integer to floating point.
This function has undefined behavior.
A Use represents the edge between a Value definition and its users.
bool replaceUsesOfWith(Value *From, Value *To)
Replace uses of one Value with another.
Value * getOperand(unsigned i) const
unsigned getNumOperands() const
bool isDroppable() const
A droppable user is a user for which uses can be dropped without affecting correctness and should be ...
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
const Value * stripAndAccumulateInBoundsConstantOffsets(const DataLayout &DL, APInt &Offset) const
This is a wrapper around stripAndAccumulateConstantOffsets with the in-bounds requirement set to fals...
bool hasOneUser() const
Return true if there is exactly one user of this value.
bool hasOneUse() const
Return true if there is exactly one use of this value.
iterator_range< user_iterator > users()
bool hasNUses(unsigned N) const
Return true if this Value has exactly N uses.
const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
LLVMContext & getContext() const
All values hold a context through their type.
uint64_t getPointerDereferenceableBytes(const DataLayout &DL, bool &CanBeNull, bool &CanBeFreed) const
Returns the number of bytes known to be dereferenceable for the pointer value.
StringRef getName() const
Return a constant reference to the value's name.
void takeName(Value *V)
Transfer the name from V to this value.
static VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
constexpr ScalarTy getFixedValue() const
constexpr bool isZero() const
An efficient, type-erasing, non-owning reference to a callable.
Type * getIndexedType() const
const ParentTy * getParent() const
reverse_self_iterator getReverseIterator()
self_iterator getIterator()
This class implements an extremely fast bulk output stream that can only output to a stream.
A raw_ostream that writes to an std::string.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
bool isNoFPClassCompatibleType(Type *Ty)
Returns true if this is a type legal for the 'nofpclass' attribute.
@ C
The default llvm calling convention, compatible with C.
Function * getOrInsertDeclaration(Module *M, ID id, ArrayRef< Type * > Tys={})
Look up the Function declaration of the intrinsic id in the Module M.
cst_pred_ty< is_all_ones > m_AllOnes()
Match an integer or vector with all bits set.
class_match< PoisonValue > m_Poison()
Match an arbitrary poison constant.
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
PtrAdd_match< PointerOpTy, OffsetOpTy > m_PtrAdd(const PointerOpTy &PointerOp, const OffsetOpTy &OffsetOp)
Matches GEP with i8 source element type.
BinaryOp_match< LHS, RHS, Instruction::Add > m_Add(const LHS &L, const RHS &R)
class_match< BinaryOperator > m_BinOp()
Match an arbitrary binary operation and ignore it.
CmpClass_match< LHS, RHS, FCmpInst > m_FCmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::AShr > m_AShr(const LHS &L, const RHS &R)
class_match< Constant > m_Constant()
Match an arbitrary Constant and ignore it.
CastInst_match< OpTy, TruncInst > m_Trunc(const OpTy &Op)
Matches Trunc.
BinaryOp_match< LHS, RHS, Instruction::Xor > m_Xor(const LHS &L, const RHS &R)
br_match m_UnconditionalBr(BasicBlock *&Succ)
specific_intval< false > m_SpecificInt(const APInt &V)
Match a specific integer value or vector with all elements equal to the value.
bool match(Val *V, const Pattern &P)
BinOpPred_match< LHS, RHS, is_idiv_op > m_IDiv(const LHS &L, const RHS &R)
Matches integer division operations.
bind_ty< Instruction > m_Instruction(Instruction *&I)
Match an instruction, capturing it if we match.
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
DisjointOr_match< LHS, RHS > m_DisjointOr(const LHS &L, const RHS &R)
constantexpr_match m_ConstantExpr()
Match a constant expression or a constant that contains a constant expression.
BinOpPred_match< LHS, RHS, is_right_shift_op > m_Shr(const LHS &L, const RHS &R)
Matches logical shift operations.
cst_pred_ty< is_nonnegative > m_NonNegative()
Match an integer or vector of non-negative values.
class_match< ConstantInt > m_ConstantInt()
Match an arbitrary ConstantInt and ignore it.
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
BinaryOp_match< LHS, RHS, Instruction::Mul > m_Mul(const LHS &L, const RHS &R)
apint_match m_APIntAllowPoison(const APInt *&Res)
Match APInt while allowing poison in splat vector constants.
OneUse_match< T > m_OneUse(const T &SubPattern)
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
BinaryOp_match< cst_pred_ty< is_zero_int >, ValTy, Instruction::Sub > m_Neg(const ValTy &V)
Matches a 'Neg' as 'sub 0, V'.
TwoOps_match< V1_t, V2_t, Instruction::ShuffleVector > m_Shuffle(const V1_t &v1, const V2_t &v2)
Matches ShuffleVectorInst independently of mask value.
match_combine_and< class_match< Constant >, match_unless< constantexpr_match > > m_ImmConstant()
Match an arbitrary immediate Constant and ignore it.
SpecificCmpClass_match< LHS, RHS, ICmpInst > m_SpecificICmp(CmpPredicate MatchPred, const LHS &L, const RHS &R)
CastInst_match< OpTy, ZExtInst > m_ZExt(const OpTy &Op)
Matches ZExt.
BinaryOp_match< LHS, RHS, Instruction::UDiv > m_UDiv(const LHS &L, const RHS &R)
brc_match< Cond_t, bind_ty< BasicBlock >, bind_ty< BasicBlock > > m_Br(const Cond_t &C, BasicBlock *&T, BasicBlock *&F)
match_combine_or< BinaryOp_match< LHS, RHS, Instruction::Add >, DisjointOr_match< LHS, RHS > > m_AddLike(const LHS &L, const RHS &R)
Match either "add" or "or disjoint".
CastInst_match< OpTy, UIToFPInst > m_UIToFP(const OpTy &Op)
CastOperator_match< OpTy, Instruction::BitCast > m_BitCast(const OpTy &Op)
Matches BitCast.
match_combine_or< CastInst_match< OpTy, SExtInst >, NNegZExt_match< OpTy > > m_SExtLike(const OpTy &Op)
Match either "sext" or "zext nneg".
BinaryOp_match< LHS, RHS, Instruction::SDiv > m_SDiv(const LHS &L, const RHS &R)
apint_match m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
AnyBinaryOp_match< LHS, RHS, true > m_c_BinOp(const LHS &L, const RHS &R)
Matches a BinaryOperator with LHS and RHS in either order.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoSignedWrap > m_NSWAdd(const LHS &L, const RHS &R)
CastInst_match< OpTy, SIToFPInst > m_SIToFP(const OpTy &Op)
BinaryOp_match< LHS, RHS, Instruction::LShr > m_LShr(const LHS &L, const RHS &R)
CmpClass_match< LHS, RHS, ICmpInst > m_ICmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
match_combine_or< CastInst_match< OpTy, ZExtInst >, CastInst_match< OpTy, SExtInst > > m_ZExtOrSExt(const OpTy &Op)
BinOpPred_match< LHS, RHS, is_shift_op > m_Shift(const LHS &L, const RHS &R)
Matches shift operations.
BinaryOp_match< LHS, RHS, Instruction::Shl > m_Shl(const LHS &L, const RHS &R)
cstfp_pred_ty< is_non_zero_fp > m_NonZeroFP()
Match a floating-point non-zero.
m_Intrinsic_Ty< Opnd0 >::Ty m_VecReverse(const Opnd0 &Op0)
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
match_combine_or< match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty >, MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty > >, match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty >, MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty > > > m_MaxOrMin(const LHS &L, const RHS &R)
auto m_Undef()
Match an arbitrary undef constant.
BinaryOp_match< cst_pred_ty< is_all_ones >, ValTy, Instruction::Xor, true > m_Not(const ValTy &V)
Matches a 'Not' as 'xor V, -1' or 'xor -1, V'.
BinaryOp_match< LHS, RHS, Instruction::Or > m_Or(const LHS &L, const RHS &R)
CastInst_match< OpTy, SExtInst > m_SExt(const OpTy &Op)
Matches SExt.
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
CastOperator_match< OpTy, Instruction::PtrToInt > m_PtrToInt(const OpTy &Op)
Matches PtrToInt.
BinaryOp_match< LHS, RHS, Instruction::Sub > m_Sub(const LHS &L, const RHS &R)
match_combine_or< LTy, RTy > m_CombineOr(const LTy &L, const RTy &R)
Combine two pattern matchers matching L || R.
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Intrinsic::ID getInverseMinMaxIntrinsic(Intrinsic::ID MinMaxID)
detail::zippy< detail::zip_shortest, T, U, Args... > zip(T &&t, U &&u, Args &&...args)
zip iterator for two or more iteratable types.
void stable_sort(R &&Range)
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Value * simplifyGEPInst(Type *SrcTy, Value *Ptr, ArrayRef< Value * > Indices, GEPNoWrapFlags NW, const SimplifyQuery &Q)
Given operands for a GetElementPtrInst, fold the result or return null.
bool succ_empty(const Instruction *I)
Value * simplifyFreezeInst(Value *Op, const SimplifyQuery &Q)
Given an operand for a Freeze, see if we can fold the result.
FunctionPass * createInstructionCombiningPass()
bool isSafeToSpeculativelyExecuteWithVariableReplaced(const Instruction *I)
Don't use information from its non-constant operands.
std::pair< unsigned, unsigned > removeAllNonTerminatorAndEHPadInstructions(BasicBlock *BB)
Remove all instructions from a basic block other than its terminator and any present EH pad instructi...
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
void salvageDebugInfoForDbgValues(Instruction &I, ArrayRef< DbgVariableIntrinsic * > Insns, ArrayRef< DbgVariableRecord * > DPInsns)
Implementation of salvageDebugInfo, applying only to instructions in Insns, rather than all debug use...
void findDbgUsers(SmallVectorImpl< DbgVariableIntrinsic * > &DbgInsts, Value *V, SmallVectorImpl< DbgVariableRecord * > *DbgVariableRecords=nullptr)
Finds the debug info intrinsics describing a value.
void salvageDebugInfo(const MachineRegisterInfo &MRI, MachineInstr &MI)
Assuming the instruction MI is going to be deleted, attempt to salvage debug users of MI by writing t...
auto successors(const MachineBasicBlock *BB)
bool isRemovableAlloc(const CallBase *V, const TargetLibraryInfo *TLI)
Return true if this is a call to an allocation function that does not have side effects that we are r...
std::optional< StringRef > getAllocationFamily(const Value *I, const TargetLibraryInfo *TLI)
If a function is part of an allocation family (e.g.
Value * lowerObjectSizeCall(IntrinsicInst *ObjectSize, const DataLayout &DL, const TargetLibraryInfo *TLI, bool MustSucceed)
Try to turn a call to @llvm.objectsize into an integer value of the given Type.
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
Value * simplifyInstructionWithOperands(Instruction *I, ArrayRef< Value * > NewOps, const SimplifyQuery &Q)
Like simplifyInstruction but the operands of I are replaced with NewOps.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=6)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
Constant * ConstantFoldCompareInstOperands(unsigned Predicate, Constant *LHS, Constant *RHS, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const Instruction *I=nullptr)
Attempt to constant fold a compare instruction (icmp/fcmp) with the specified operands.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
gep_type_iterator gep_type_end(const User *GEP)
Value * getReallocatedOperand(const CallBase *CB)
If this is a call to a realloc function, return the reallocated operand.
bool isAllocLikeFn(const Value *V, const TargetLibraryInfo *TLI)
Tests if a value is a call or invoke to a library function that allocates memory (either malloc,...
bool handleUnreachableTerminator(Instruction *I, SmallVectorImpl< Value * > &PoisonedValues)
If a terminator in an unreachable basic block has an operand of type Instruction, transform it into p...
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Value * simplifyAddInst(Value *LHS, Value *RHS, bool IsNSW, bool IsNUW, const SimplifyQuery &Q)
Given operands for an Add, fold the result or return null.
Constant * ConstantFoldConstant(const Constant *C, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldConstant - Fold the constant using the specified DataLayout.
constexpr bool has_single_bit(T Value) noexcept
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
bool isInstructionTriviallyDead(Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction is not used, and the instruction will return.
bool isSplatValue(const Value *V, int Index=-1, unsigned Depth=0)
Return true if each element of the vector value V is poisoned or equal to every other non-poisoned el...
Value * emitGEPOffset(IRBuilderBase *Builder, const DataLayout &DL, User *GEP, bool NoAssumptions=false)
Given a getelementptr instruction/constantexpr, emit the code necessary to compute the offset from th...
constexpr unsigned MaxAnalysisRecursionDepth
auto reverse(ContainerTy &&C)
void sort(IteratorTy Start, IteratorTy End)
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
bool LowerDbgDeclare(Function &F)
Lowers llvm.dbg.declare intrinsics into appropriate set of llvm.dbg.value intrinsics.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
void ConvertDebugDeclareToDebugValue(DbgVariableIntrinsic *DII, StoreInst *SI, DIBuilder &Builder)
Inserts a llvm.dbg.value intrinsic before a store to an alloca'd value that has an associated llvm....
Constant * ConstantFoldCastOperand(unsigned Opcode, Constant *C, Type *DestTy, const DataLayout &DL)
Attempt to constant fold a cast with the specified operand.
bool canCreateUndefOrPoison(const Operator *Op, bool ConsiderFlagsAndMetadata=true)
canCreateUndefOrPoison returns true if Op can create undef or poison from non-undef & non-poison oper...
EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
Value * simplifyExtractValueInst(Value *Agg, ArrayRef< unsigned > Idxs, const SimplifyQuery &Q)
Given operands for an ExtractValueInst, fold the result or return null.
Constant * ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS, Constant *RHS, const DataLayout &DL)
Attempt to constant fold a binary operation with the specified operands.
bool replaceAllDbgUsesWith(Instruction &From, Value &To, Instruction &DomPoint, DominatorTree &DT)
Point debug users of From to To or salvage them.
bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
constexpr int PoisonMaskElem
auto drop_end(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the last N elements excluded.
Value * simplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for a BinaryOperator, fold the result or return null.
@ Or
Bitwise or logical OR of integers.
DWARFExpression::Operation Op
Constant * ConstantFoldInstruction(Instruction *I, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldInstruction - Try to constant fold the specified instruction.
bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
Value * getFreedOperand(const CallBase *CB, const TargetLibraryInfo *TLI)
If this if a call to a free function, return the freed operand.
constexpr unsigned BitWidth
bool isGuaranteedToTransferExecutionToSuccessor(const Instruction *I)
Return true if this function can prove that the instruction I will always transfer execution to one o...
gep_type_iterator gep_type_begin(const User *GEP)
auto predecessors(const MachineBasicBlock *BB)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
bool equal(L &&LRange, R &&RRange)
Wrapper function around std::equal to detect if pair-wise elements between two ranges are the same.
bool isKnownNonNegative(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Returns true if the give value is known to be non-negative.
static auto filterDbgVars(iterator_range< simple_ilist< DbgRecord >::iterator > R)
Filter the DbgRecord range to DbgVariableRecord types only and downcast.
void initializeInstCombine(PassRegistry &)
Initialize all passes linked into the InstCombine library.
void initializeInstructionCombiningPassPass(PassRegistry &)
Constant * ConstantFoldBinaryInstruction(unsigned Opcode, Constant *V1, Constant *V2)
std::optional< bool > isImpliedCondition(const Value *LHS, const Value *RHS, const DataLayout &DL, bool LHSIsTrue=true, unsigned Depth=0)
Return true if RHS is known to be implied true by LHS.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
static unsigned int semanticsPrecision(const fltSemantics &)
unsigned countMinLeadingOnes() const
Returns the minimum number of leading one bits.
unsigned getBitWidth() const
Get the bit width of this value.
unsigned countMinLeadingZeros() const
Returns the minimum number of leading zero bits.
A CRTP mix-in to automatically provide informational APIs needed for passes.
SimplifyQuery getWithInstruction(const Instruction *I) const
SimplifyQuery getWithoutUndef() const