26#define DEBUG_TYPE "instcombine"
30 cl::desc(
"Verify that computeKnownBits() and "
31 "SimplifyDemandedBits() are consistent"),
35 "instcombine-simplify-vector-elts-depth",
37 "Depth limit when simplifying vector instructions and their operands"),
44 const APInt &Demanded) {
46 assert(OpNo < I->getNumOperands() &&
"Operand index too large");
55 if (
C->isSubsetOf(Demanded))
59 I->setOperand(OpNo, ConstantInt::get(
Op->getType(), *
C & Demanded));
71 const APInt &DemandedMask,
74 assert(
I->getOpcode() == Instruction::LShr &&
75 "Only lshr instruction supported");
79 if (!
match(
I->getOperand(0),
89 if (DemandedBitWidth > ShlAmt)
93 if (
Upper->getType()->getScalarSizeInBits() < ShlAmt + DemandedBitWidth)
100 Value *ShrAmt =
I->getOperand(1);
105 if (~KnownShrBits.
Zero != ShlAmt)
124 if (
unsigned BitWidth = Ty->getScalarSizeInBits())
127 return DL.getPointerTypeSizeInBits(Ty);
136 SQ.getWithInstruction(&Inst));
137 if (!V)
return false;
138 if (V == &Inst)
return true;
167 const APInt &DemandedMask,
171 Use &U =
I->getOperandUse(OpNo);
179 if (DemandedMask.
isZero()) {
204 if (!NewVal)
return false;
236 const APInt &DemandedMask,
240 assert(
I !=
nullptr &&
"Null pointer of Value???");
243 Type *VTy =
I->getType();
247 "Value *V, DemandedMask and Known must have same BitWidth");
253 auto disableWrapFlagsBasedOnUnusedHighBits = [](
Instruction *
I,
259 I->setHasNoSignedWrap(
false);
260 I->setHasNoUnsignedWrap(
false);
267 auto simplifyOperandsBasedOnUnusedHighBits = [&](
APInt &DemandedFromOps) {
276 disableWrapFlagsBasedOnUnusedHighBits(
I, NLZ);
282 switch (
I->getOpcode()) {
286 case Instruction::And: {
304 return I->getOperand(0);
306 return I->getOperand(1);
314 case Instruction::Or: {
320 I->dropPoisonGeneratingFlags();
335 return I->getOperand(0);
337 return I->getOperand(1);
346 RHSCache(
I->getOperand(1), RHSKnown);
355 case Instruction::Xor: {
360 if (DemandedMask == 1 &&
367 return Builder.CreateUnaryIntrinsic(Intrinsic::ctpop,
Xor);
381 return I->getOperand(0);
383 return I->getOperand(1);
390 BinaryOperator::CreateOr(
I->getOperand(0),
I->getOperand(1));
404 ~RHSKnown.
One & DemandedMask);
414 if ((*
C | ~DemandedMask).isAllOnes()) {
430 if (LHSInst->getOpcode() == Instruction::And && LHSInst->hasOneUse() &&
433 (LHSKnown.One & RHSKnown.
One & DemandedMask) != 0) {
434 APInt NewMask = ~(LHSKnown.One & RHSKnown.
One & DemandedMask);
437 Instruction *NewAnd = BinaryOperator::CreateAnd(
I->getOperand(0), AndC);
441 Instruction *NewXor = BinaryOperator::CreateXor(NewAnd, XorC);
447 case Instruction::Select: {
457 auto CanonicalizeSelectConstant = [](
Instruction *
I,
unsigned OpNo,
458 const APInt &DemandedMask) {
478 if ((*CmpC & DemandedMask) == (*SelC & DemandedMask)) {
479 I->setOperand(OpNo, ConstantInt::get(
I->getType(), *CmpC));
484 if (CanonicalizeSelectConstant(
I, 1, DemandedMask) ||
485 CanonicalizeSelectConstant(
I, 2, DemandedMask))
496 case Instruction::Trunc: {
510 return Builder.CreateLShr(Trunc,
C->getZExtValue());
515 case Instruction::ZExt: {
516 unsigned SrcBitWidth =
I->getOperand(0)->getType()->getScalarSizeInBits();
524 I->dropPoisonGeneratingFlags();
528 if (
I->getOpcode() == Instruction::ZExt &&
I->hasNonNeg() &&
535 case Instruction::SExt: {
537 unsigned SrcBitWidth =
I->getOperand(0)->getType()->getScalarSizeInBits();
539 APInt InputDemandedBits = DemandedMask.
trunc(SrcBitWidth);
544 InputDemandedBits.
setBit(SrcBitWidth-1);
565 case Instruction::Add: {
566 if ((DemandedMask & 1) == 0) {
572 X->getType()->isIntOrIntVectorTy(1) &&
X->getType() ==
Y->getType()) {
582 return Builder.CreateSExt(AndNot, VTy);
587 X->getType()->isIntOrIntVectorTy(1) &&
X->getType() ==
Y->getType() &&
588 (
I->getOperand(0)->hasOneUse() ||
I->getOperand(1)->hasOneUse())) {
609 return disableWrapFlagsBasedOnUnusedHighBits(
I, NLZ);
615 APInt DemandedFromLHS = DemandedFromOps;
619 return disableWrapFlagsBasedOnUnusedHighBits(
I, NLZ);
621 unsigned NtzLHS = (~DemandedMask & LHSKnown.Zero).
countr_one();
622 APInt DemandedFromRHS = DemandedFromOps;
625 return disableWrapFlagsBasedOnUnusedHighBits(
I, NLZ);
630 return I->getOperand(0);
631 if (DemandedFromOps.
isSubsetOf(LHSKnown.Zero))
632 return I->getOperand(1);
641 return Builder.CreateXor(
I->getOperand(0), ConstantInt::get(VTy, *
C));
651 case Instruction::Sub: {
658 return disableWrapFlagsBasedOnUnusedHighBits(
I, NLZ);
664 APInt DemandedFromLHS = DemandedFromOps;
668 return disableWrapFlagsBasedOnUnusedHighBits(
I, NLZ);
673 return I->getOperand(0);
676 if (DemandedFromOps.
isOne() && DemandedFromOps.
isSubsetOf(LHSKnown.Zero))
677 return I->getOperand(1);
685 return Builder.CreateNot(
I->getOperand(1));
694 case Instruction::Mul: {
695 APInt DemandedFromOps;
696 if (simplifyOperandsBasedOnUnusedHighBits(DemandedFromOps))
706 Constant *ShiftC = ConstantInt::get(VTy, CTZ);
707 Instruction *Shl = BinaryOperator::CreateShl(
I->getOperand(0), ShiftC);
714 if (
I->getOperand(0) ==
I->getOperand(1) && DemandedMask.
ult(4)) {
715 Constant *One = ConstantInt::get(VTy, 1);
716 Instruction *And1 = BinaryOperator::CreateAnd(
I->getOperand(0), One);
723 case Instruction::Shl: {
730 DemandedMask, Known))
734 if (
I->hasOneUse()) {
736 if (Inst && Inst->getOpcode() == BinaryOperator::Or) {
738 auto [IID, FShiftArgs] = *Opt;
739 if ((IID == Intrinsic::fshl || IID == Intrinsic::fshr) &&
740 FShiftArgs[0] == FShiftArgs[1]) {
752 if (
I->hasNoSignedWrap()) {
756 if (SignBits > ShiftAmt && SignBits - ShiftAmt >= NumHiDemandedBits)
757 return I->getOperand(0);
767 Constant *LeftShiftAmtC = ConstantInt::get(VTy, ShiftAmt);
771 LeftShiftAmtC,
DL) ==
C) {
772 Instruction *Lshr = BinaryOperator::CreateLShr(NewC,
X);
778 APInt DemandedMaskIn(DemandedMask.
lshr(ShiftAmt));
802 I->dropPoisonGeneratingFlags();
810 case Instruction::LShr: {
816 if (
I->hasOneUse()) {
818 if (Inst && Inst->getOpcode() == BinaryOperator::Or) {
820 auto [IID, FShiftArgs] = *Opt;
821 if ((IID == Intrinsic::fshl || IID == Intrinsic::fshr) &&
822 FShiftArgs[0] == FShiftArgs[1]) {
838 if (SignBits >= NumHiDemandedBits)
839 return I->getOperand(0);
848 Constant *RightShiftAmtC = ConstantInt::get(VTy, ShiftAmt);
852 RightShiftAmtC,
DL) ==
C) {
859 if (
match(
I->getOperand(0),
863 X, ConstantInt::get(
X->getType(), Factor->
lshr(ShiftAmt)));
869 APInt DemandedMaskIn(DemandedMask.
shl(ShiftAmt));
872 I->dropPoisonGeneratingFlags();
887 case Instruction::AShr: {
893 if (SignBits >= NumHiDemandedBits)
894 return I->getOperand(0);
900 if (DemandedMask.
isOne()) {
903 I->getOperand(0),
I->getOperand(1),
I->getName());
912 APInt DemandedMaskIn(DemandedMask.
shl(ShiftAmt));
915 bool ShiftedInBitsDemanded = DemandedMask.
countl_zero() < ShiftAmt;
916 if (ShiftedInBitsDemanded)
920 I->dropPoisonGeneratingFlags();
926 if (Known.
Zero[
BitWidth - 1] || !ShiftedInBitsDemanded) {
936 ShiftAmt != 0,
I->isExact());
942 case Instruction::UDiv: {
948 APInt DemandedMaskIn =
953 I->dropPoisonGeneratingFlags();
964 case Instruction::SRem: {
967 if (DemandedMask.
ult(*Rem))
968 return I->getOperand(0);
970 APInt LowBits = *Rem - 1;
981 case Instruction::Call: {
982 bool KnownBitsComputed =
false;
984 switch (
II->getIntrinsicID()) {
985 case Intrinsic::abs: {
986 if (DemandedMask == 1)
987 return II->getArgOperand(0);
990 case Intrinsic::ctpop: {
998 II->getModule(), Intrinsic::ctpop, VTy);
1003 case Intrinsic::bswap: {
1020 NewVal = BinaryOperator::CreateLShr(
1021 II->getArgOperand(0), ConstantInt::get(VTy, NLZ - NTZ));
1023 NewVal = BinaryOperator::CreateShl(
1024 II->getArgOperand(0), ConstantInt::get(VTy, NTZ - NLZ));
1030 case Intrinsic::ptrmask: {
1031 unsigned MaskWidth =
I->getOperand(1)->getType()->getScalarSizeInBits();
1036 I, 1, (DemandedMask & ~LHSKnown.Zero).zextOrTrunc(MaskWidth),
1037 RHSKnown, Q,
Depth + 1))
1043 Known = LHSKnown & RHSKnown;
1044 KnownBitsComputed =
true;
1059 if (DemandedMask.
isSubsetOf(RHSKnown.One | LHSKnown.Zero))
1060 return I->getOperand(0);
1064 I, 1, (DemandedMask & ~LHSKnown.Zero).zextOrTrunc(MaskWidth)))
1079 if (!LHSKnown.isZero()) {
1080 const unsigned trailingZeros = LHSKnown.countMinTrailingZeros();
1083 uint64_t HighBitsGEPIndex = GEPIndex & ~PointerAlignBits;
1085 GEPIndex & PointerAlignBits & PtrMaskImmediate;
1087 uint64_t MaskedGEPIndex = HighBitsGEPIndex | MaskedLowBitsGEPIndex;
1089 if (MaskedGEPIndex != GEPIndex) {
1092 Type *GEPIndexType =
1093 DL.getIndexType(
GEP->getPointerOperand()->getType());
1095 GEP->getSourceElementType(), InnerPtr,
1096 ConstantInt::get(GEPIndexType, MaskedGEPIndex),
1097 GEP->getName(),
GEP->isInBounds());
1108 case Intrinsic::fshr:
1109 case Intrinsic::fshl: {
1117 if (
II->getIntrinsicID() == Intrinsic::fshr)
1120 APInt DemandedMaskLHS(DemandedMask.
lshr(ShiftAmt));
1122 if (
I->getOperand(0) !=
I->getOperand(1)) {
1128 I->dropPoisonGeneratingAnnotations();
1135 if (DemandedMaskLHS.
isSubsetOf(LHSKnown.Zero | LHSKnown.One) &&
1149 LHSKnown <<= ShiftAmt;
1152 KnownBitsComputed =
true;
1155 case Intrinsic::umax: {
1162 CTZ >=
C->getActiveBits())
1163 return II->getArgOperand(0);
1166 case Intrinsic::umin: {
1174 CTZ >=
C->getBitWidth() -
C->countl_one())
1175 return II->getArgOperand(0);
1181 *
II, DemandedMask, Known, KnownBitsComputed);
1189 if (!KnownBitsComputed)
1195 if (
I->getType()->isPointerTy()) {
1196 Align Alignment =
I->getPointerAlignment(
DL);
1204 if (!
I->getType()->isPointerTy() &&
1210 if (Known != ReferenceKnown) {
1211 errs() <<
"Mismatched known bits for " << *
I <<
" in "
1212 <<
I->getFunction()->getName() <<
"\n";
1213 errs() <<
"computeKnownBits(): " << ReferenceKnown <<
"\n";
1214 errs() <<
"SimplifyDemandedBits(): " << Known <<
"\n";
1229 Type *ITy =
I->getType();
1238 switch (
I->getOpcode()) {
1239 case Instruction::And: {
1254 return I->getOperand(0);
1256 return I->getOperand(1);
1260 case Instruction::Or: {
1277 return I->getOperand(0);
1279 return I->getOperand(1);
1283 case Instruction::Xor: {
1299 return I->getOperand(0);
1301 return I->getOperand(1);
1305 case Instruction::Add: {
1313 return I->getOperand(0);
1317 return I->getOperand(1);
1325 case Instruction::Sub: {
1333 return I->getOperand(0);
1342 case Instruction::AShr: {
1355 const APInt *ShiftRC;
1356 const APInt *ShiftLC;
1404 if (!ShlOp1 || !ShrOp1)
1409 unsigned BitWidth = Ty->getScalarSizeInBits();
1418 Known.
Zero &= DemandedMask;
1423 bool isLshr = (Shr->
getOpcode() == Instruction::LShr);
1424 BitMask1 = isLshr ? (BitMask1.
lshr(ShrAmt) << ShlAmt) :
1425 (BitMask1.
ashr(ShrAmt) << ShlAmt);
1427 if (ShrAmt <= ShlAmt) {
1428 BitMask2 <<= (ShlAmt - ShrAmt);
1430 BitMask2 = isLshr ? BitMask2.
lshr(ShrAmt - ShlAmt):
1431 BitMask2.
ashr(ShrAmt - ShlAmt);
1435 if ((BitMask1 & DemandedMask) == (BitMask2 & DemandedMask)) {
1436 if (ShrAmt == ShlAmt)
1443 if (ShrAmt < ShlAmt) {
1445 New = BinaryOperator::CreateShl(VarX, Amt);
1451 New = isLshr ? BinaryOperator::CreateLShr(VarX, Amt) :
1452 BinaryOperator::CreateAShr(VarX, Amt);
1454 New->setIsExact(
true);
1480 bool AllowMultipleUsers) {
1488 assert((DemandedElts & ~EltMask) == 0 &&
"Invalid DemandedElts!");
1492 PoisonElts = EltMask;
1496 if (DemandedElts.
isZero()) {
1497 PoisonElts = EltMask;
1512 for (
unsigned i = 0; i != VWidth; ++i) {
1513 if (!DemandedElts[i]) {
1519 Constant *Elt =
C->getAggregateElement(i);
1520 if (!Elt)
return nullptr;
1529 return NewCV !=
C ? NewCV :
nullptr;
1536 if (!AllowMultipleUsers) {
1540 if (!V->hasOneUse()) {
1549 DemandedElts = EltMask;
1554 if (!
I)
return nullptr;
1556 bool MadeChange =
false;
1557 auto simplifyAndSetOp = [&](
Instruction *Inst,
unsigned OpNum,
1567 APInt PoisonElts2(VWidth, 0);
1568 APInt PoisonElts3(VWidth, 0);
1569 switch (
I->getOpcode()) {
1572 case Instruction::GetElementPtr: {
1590 for (
unsigned i = 0; i <
I->getNumOperands(); i++) {
1594 PoisonElts = EltMask;
1597 if (
I->getOperand(i)->getType()->isVectorTy()) {
1598 APInt PoisonEltsOp(VWidth, 0);
1599 simplifyAndSetOp(
I, i, DemandedElts, PoisonEltsOp);
1604 PoisonElts |= PoisonEltsOp;
1610 case Instruction::InsertElement: {
1617 simplifyAndSetOp(
I, 0, DemandedElts, PoisonElts2);
1624 APInt PreInsertDemandedElts = DemandedElts;
1626 PreInsertDemandedElts.
clearBit(IdxNo);
1634 if (PreInsertDemandedElts == 0 &&
1641 simplifyAndSetOp(
I, 0, PreInsertDemandedElts, PoisonElts);
1645 if (IdxNo >= VWidth || !DemandedElts[IdxNo]) {
1647 return I->getOperand(0);
1654 case Instruction::ShuffleVector: {
1656 assert(Shuffle->getOperand(0)->getType() ==
1657 Shuffle->getOperand(1)->getType() &&
1658 "Expected shuffle operands to have same type");
1669 APInt LeftDemanded(OpWidth, 1);
1670 APInt LHSPoisonElts(OpWidth, 0);
1671 simplifyAndSetOp(
I, 0, LeftDemanded, LHSPoisonElts);
1672 if (LHSPoisonElts[0])
1673 PoisonElts = EltMask;
1679 APInt LeftDemanded(OpWidth, 0), RightDemanded(OpWidth, 0);
1680 for (
unsigned i = 0; i < VWidth; i++) {
1681 if (DemandedElts[i]) {
1682 unsigned MaskVal = Shuffle->getMaskValue(i);
1683 if (MaskVal != -1u) {
1684 assert(MaskVal < OpWidth * 2 &&
1685 "shufflevector mask index out of range!");
1686 if (MaskVal < OpWidth)
1687 LeftDemanded.setBit(MaskVal);
1689 RightDemanded.
setBit(MaskVal - OpWidth);
1694 APInt LHSPoisonElts(OpWidth, 0);
1695 simplifyAndSetOp(
I, 0, LeftDemanded, LHSPoisonElts);
1697 APInt RHSPoisonElts(OpWidth, 0);
1698 simplifyAndSetOp(
I, 1, RightDemanded, RHSPoisonElts);
1711 if (VWidth == OpWidth) {
1712 bool IsIdentityShuffle =
true;
1713 for (
unsigned i = 0; i < VWidth; i++) {
1714 unsigned MaskVal = Shuffle->getMaskValue(i);
1715 if (DemandedElts[i] && i != MaskVal) {
1716 IsIdentityShuffle =
false;
1720 if (IsIdentityShuffle)
1721 return Shuffle->getOperand(0);
1724 bool NewPoisonElts =
false;
1725 unsigned LHSIdx = -1u, LHSValIdx = -1u;
1726 unsigned RHSIdx = -1u, RHSValIdx = -1u;
1727 bool LHSUniform =
true;
1728 bool RHSUniform =
true;
1729 for (
unsigned i = 0; i < VWidth; i++) {
1730 unsigned MaskVal = Shuffle->getMaskValue(i);
1731 if (MaskVal == -1u) {
1733 }
else if (!DemandedElts[i]) {
1734 NewPoisonElts =
true;
1736 }
else if (MaskVal < OpWidth) {
1737 if (LHSPoisonElts[MaskVal]) {
1738 NewPoisonElts =
true;
1741 LHSIdx = LHSIdx == -1u ? i : OpWidth;
1742 LHSValIdx = LHSValIdx == -1u ? MaskVal : OpWidth;
1743 LHSUniform = LHSUniform && (MaskVal == i);
1746 if (RHSPoisonElts[MaskVal - OpWidth]) {
1747 NewPoisonElts =
true;
1750 RHSIdx = RHSIdx == -1u ? i : OpWidth;
1751 RHSValIdx = RHSValIdx == -1u ? MaskVal - OpWidth : OpWidth;
1752 RHSUniform = RHSUniform && (MaskVal - OpWidth == i);
1768 if (LHSIdx < OpWidth && RHSUniform) {
1770 Op = Shuffle->getOperand(1);
1771 Value = CV->getOperand(LHSValIdx);
1775 if (RHSIdx < OpWidth && LHSUniform) {
1777 Op = Shuffle->getOperand(0);
1778 Value = CV->getOperand(RHSValIdx);
1791 if (NewPoisonElts) {
1794 for (
unsigned i = 0; i < VWidth; ++i) {
1798 Elts.
push_back(Shuffle->getMaskValue(i));
1800 Shuffle->setShuffleMask(Elts);
1805 case Instruction::Select: {
1815 simplifyAndSetOp(
I, 0, DemandedElts, PoisonElts);
1819 APInt DemandedLHS(DemandedElts), DemandedRHS(DemandedElts);
1821 for (
unsigned i = 0; i < VWidth; i++) {
1826 DemandedLHS.clearBit(i);
1832 simplifyAndSetOp(
I, 1, DemandedLHS, PoisonElts2);
1833 simplifyAndSetOp(
I, 2, DemandedRHS, PoisonElts3);
1837 PoisonElts = PoisonElts2 & PoisonElts3;
1840 case Instruction::BitCast: {
1845 APInt InputDemandedElts(InVWidth, 0);
1846 PoisonElts2 =
APInt(InVWidth, 0);
1849 if (VWidth == InVWidth) {
1853 InputDemandedElts = DemandedElts;
1854 }
else if ((VWidth % InVWidth) == 0) {
1858 Ratio = VWidth / InVWidth;
1859 for (
unsigned OutIdx = 0; OutIdx != VWidth; ++OutIdx)
1860 if (DemandedElts[OutIdx])
1861 InputDemandedElts.
setBit(OutIdx / Ratio);
1862 }
else if ((InVWidth % VWidth) == 0) {
1866 Ratio = InVWidth / VWidth;
1867 for (
unsigned InIdx = 0; InIdx != InVWidth; ++InIdx)
1868 if (DemandedElts[InIdx / Ratio])
1869 InputDemandedElts.
setBit(InIdx);
1875 simplifyAndSetOp(
I, 0, InputDemandedElts, PoisonElts2);
1877 if (VWidth == InVWidth) {
1878 PoisonElts = PoisonElts2;
1879 }
else if ((VWidth % InVWidth) == 0) {
1883 for (
unsigned OutIdx = 0; OutIdx != VWidth; ++OutIdx)
1884 if (PoisonElts2[OutIdx / Ratio])
1885 PoisonElts.
setBit(OutIdx);
1886 }
else if ((InVWidth % VWidth) == 0) {
1890 for (
unsigned OutIdx = 0; OutIdx != VWidth; ++OutIdx) {
1893 PoisonElts.
setBit(OutIdx);
1900 case Instruction::FPTrunc:
1901 case Instruction::FPExt:
1902 simplifyAndSetOp(
I, 0, DemandedElts, PoisonElts);
1905 case Instruction::Call: {
1908 switch (
II->getIntrinsicID()) {
1909 case Intrinsic::masked_gather:
1910 case Intrinsic::masked_load: {
1915 DemandedPassThrough(DemandedElts);
1917 for (
unsigned i = 0; i < VWidth; i++) {
1919 if (CElt->isNullValue())
1920 DemandedPtrs.clearBit(i);
1921 else if (CElt->isAllOnesValue())
1927 if (
II->getIntrinsicID() == Intrinsic::masked_gather)
1928 simplifyAndSetOp(
II, 0, DemandedPtrs, PoisonElts2);
1929 simplifyAndSetOp(
II, 2, DemandedPassThrough, PoisonElts3);
1933 PoisonElts = PoisonElts2 & PoisonElts3;
1939 *
II, DemandedElts, PoisonElts, PoisonElts2, PoisonElts3,
1973 if (DemandedElts == 1 && !
X->hasOneUse() && !
Y->hasOneUse() &&
1976 auto findShufBO = [&](
bool MatchShufAsOp0) ->
User * {
1981 Value *OtherOp = MatchShufAsOp0 ?
Y :
X;
1986 Value *ShufOp = MatchShufAsOp0 ?
X :
Y;
1997 if (
DT.dominates(U,
I))
2003 if (
User *ShufBO = findShufBO(
true))
2005 if (
User *ShufBO = findShufBO(
false))
2009 simplifyAndSetOp(
I, 0, DemandedElts, PoisonElts);
2010 simplifyAndSetOp(
I, 1, DemandedElts, PoisonElts2);
2014 PoisonElts &= PoisonElts2;
2022 return MadeChange ?
I :
nullptr;
2028 bool IsCanonicalizing =
false) {
2036 if (Ty->isAggregateType())
2040 if (Mask ==
fcNan && IsCanonicalizing)
2069 if (KnownSrc.
SignBit ==
false ||
2113 return DemandedMask;
2131 if (InferredFMF != FMF) {
2173 if ((DemandedMask & ~NegOrZero) ==
fcNone &&
2177 if ((DemandedMask & ~PosOrZero) ==
fcNone &&
2196 bool OrderedZeroSign = !NSZ;
2200 case Intrinsic::maximum: {
2217 case Intrinsic::minimum: {
2234 case Intrinsic::maxnum:
2235 case Intrinsic::maximumnum: {
2251 case Intrinsic::minnum:
2252 case Intrinsic::minimumnum: {
2287 if (DemandedMask &
fcNan)
2288 SrcDemandedMask |=
fcNan;
2326 Type *VTy =
I->getType();
2330 FMF = FPOp->getFastMathFlags();
2334 switch (
I->getOpcode()) {
2335 case Instruction::FNeg: {
2338 Value *FNegSrc =
I->getOperand(0);
2353 Known, FNegFAbsSrc, ThisDemandedMask, KnownSrc, IsNSZ))
2362 if (KnownSrc.
SignBit ==
false)
2380 case Instruction::FAdd:
2381 case Instruction::FSub: {
2385 if (
I->getOperand(0) ==
I->getOperand(1) &&
2386 I->getOpcode() == Instruction::FAdd &&
2393 if (DemandedMask &
fcNan)
2394 SrcDemandedMask |=
fcNan;
2407 if (Mode.inputsMayBePositiveZero() || Mode.outputsMayBePositiveZero())
2425 KnownRHS = KnownLHS;
2430 if (DemandedMask &
fcNan)
2433 if (DemandedMask &
fcInf)
2434 SrcDemandedMask |=
fcInf;
2443 Known =
I->getOpcode() == Instruction::FAdd
2455 bool ResultNotNan = (DemandedMask &
fcNan) ==
fcNone;
2458 if (ResultNotNan &&
I->getOpcode() == Instruction::FAdd &&
2460 return I->getOperand(1);
2466 return I->getOperand(0);
2470 if (InferredFMF != FMF) {
2471 I->setFastMathFlags(InferredFMF);
2477 case Instruction::FMul: {
2486 if (DemandedMask &
fcInf) {
2492 if (DemandedMask &
fcNan) {
2506 if (DemandedMask &
fcZero)
2532 Value *Fabs =
Builder.CreateUnaryIntrinsic(Intrinsic::fabs,
X, FMF);
2550 bool NonNanResult = (DemandedMask &
fcNan) ==
fcNone;
2641 if (InferredFMF != FMF) {
2642 I->setFastMathFlags(InferredFMF);
2648 case Instruction::FDiv: {
2660 Builder.CreateUnaryIntrinsic(Intrinsic::fabs,
I->getOperand(0), FMF);
2664 Value *IsInfOrZeroOrNan =
Builder.CreateOr(IsInfOrNan, IsZeroOrNan);
2666 return Builder.CreateSelectFMFWithUnknownProfile(
2687 if (DemandedMask &
fcNan) {
2698 if (DemandedMask &
fcZero)
2705 if (DemandedMask &
fcZero) {
2707 "should not have to worry about daz here");
2708 LHSDemandedMask |=
fcZero;
2709 RHSDemandedMask |=
fcInf;
2716 if (DemandedMask &
fcInf) {
2742 bool ResultNotNan = (DemandedMask &
fcNan) ==
fcNone;
2743 bool ResultNotInf = (DemandedMask &
fcInf) ==
fcNone;
2745 if (!ResultNotInf &&
2774 if (InferredFMF != FMF) {
2775 I->setFastMathFlags(InferredFMF);
2781 case Instruction::FPTrunc:
2784 case Instruction::FPExt: {
2786 if (DemandedMask &
fcNan)
2787 SrcDemandedMask |=
fcNan;
2801 I->getOperand(0)->getType()->getScalarType()->getFltSemantics();
2809 case Instruction::Call: {
2813 case Intrinsic::fabs: {
2816 KnownSrc,
Depth + 1))
2825 case Intrinsic::arithmetic_fence:
2829 case Intrinsic::copysign: {
2837 if ((DemandedMask &
fcNegative) == DemandedMask) {
2839 CI->
setOperand(1, ConstantFP::get(VTy, -1.0));
2843 if ((DemandedMask &
fcPositive) == DemandedMask) {
2865 if (KnownSign.
SignBit ==
false) {
2871 if (KnownSign.
SignBit ==
true) {
2873 CI->
setOperand(1, ConstantFP::get(VTy, -1.0));
2881 case Intrinsic::fma:
2882 case Intrinsic::fmuladd: {
2886 if (DemandedMask &
fcNan)
2887 SrcDemandedMask |=
fcNan;
2901 KnownSrc[1] = KnownSrc[0];
2918 case Intrinsic::maximum:
2919 case Intrinsic::minimum:
2920 case Intrinsic::maximumnum:
2921 case Intrinsic::minimumnum:
2922 case Intrinsic::maxnum:
2923 case Intrinsic::minnum: {
2924 const bool PropagateNaN =
2925 IID == Intrinsic::maximum || IID == Intrinsic::minimum;
2931 PropagateNaN && ((DemandedMask &
fcNan) ==
fcNone)
2932 ? DemandedMask | ~
fcNan
2962 bool ResultNotLogical0 = (ValidResults & ZeroMask) ==
fcNone;
2971 ((PropagateNaN && (ValidResults &
fcNan) ==
fcNone) ||
2977 if (InferredFMF != FMF) {
2984 case Intrinsic::exp:
2985 case Intrinsic::exp2:
2986 case Intrinsic::exp10: {
3000 if (DemandedMask &
fcNan)
3001 SrcDemandedMask |=
fcNan;
3003 if (DemandedMask &
fcZero) {
3042 return ConstantFP::get(VTy, 1.0);
3057 ConstantFP::get(VTy, 1.0), FMF);
3072 Value *ZeroOrInf =
Builder.CreateSelectFMFWithUnknownProfile(
3083 case Intrinsic::log:
3084 case Intrinsic::log2:
3085 case Intrinsic::log10: {
3087 if (DemandedMask &
fcNan)
3088 DemandedSrcMask |=
fcNan;
3094 if (DemandedMask &
fcNan)
3099 DemandedSrcMask |=
fcZero;
3102 if (Mode.inputsMayBeZero())
3110 if (DemandedMask &
fcZero)
3123 case Intrinsic::sqrt: {
3127 if (DemandedMask &
fcNan)
3172 case Intrinsic::ldexp: {
3174 if (DemandedMask &
fcNan)
3175 SrcDemandedMask |=
fcNan;
3209 case Intrinsic::trunc:
3210 case Intrinsic::floor:
3211 case Intrinsic::ceil:
3212 case Intrinsic::rint:
3213 case Intrinsic::nearbyint:
3214 case Intrinsic::round:
3215 case Intrinsic::roundeven: {
3217 if (DemandedMask &
fcNan)
3218 DemandedSrcMask |=
fcNan;
3235 bool IsRoundNearestOrTrunc =
3236 IID == Intrinsic::round || IID == Intrinsic::roundeven ||
3237 IID == Intrinsic::nearbyint || IID == Intrinsic::rint ||
3238 IID == Intrinsic::trunc;
3241 if ((IID == Intrinsic::floor || IsRoundNearestOrTrunc) &&
3245 if ((IID == Intrinsic::ceil || IsRoundNearestOrTrunc) &&
3250 return ConstantFP::get(VTy, -1.0);
3253 return ConstantFP::get(VTy, 1.0);
3256 KnownSrc, IID == Intrinsic::trunc,
3265 if ((IID == Intrinsic::trunc || IsRoundNearestOrTrunc) &&
3278 if (InferredFMF != FMF) {
3286 case Intrinsic::fptrunc_round:
3289 case Intrinsic::canonicalize: {
3302 SrcDemandedMask |=
fcSNan;
3346 if (InferredFMF != FMF) {
3365 case Instruction::Select: {
3372 return I->getOperand(2);
3374 return I->getOperand(1);
3384 case Instruction::ExtractElement: {
3391 case Instruction::InsertElement: {
3398 Known = KnownVec | KnownInserted;
3402 case Instruction::ShuffleVector: {
3410 Known = KnownLHS | KnownRHS;
3414 case Instruction::ExtractValue: {
3420 case Intrinsic::frexp: {
3422 if (DemandedMask &
fcNan)
3423 SrcDemandedMask |=
fcNan;
3450 return II->getArgOperand(0);
3478 FMF = FPOp->getFastMathFlags();
3482 switch (
I->getOpcode()) {
3483 case Instruction::Select: {
3489 return I->getOperand(1);
3494 return I->getOperand(2);
3504 case Instruction::FNeg: {
3508 Value *FNegSrc =
I->getOperand(0);
3524 Known, Src, ThisDemandedMask, KnownSrc,
false))
3528 case Instruction::Call: {
3532 case Intrinsic::fabs: {
3545 case Intrinsic::copysign: {
3557 Mag, DemandedMask, KnownMag,
false))
3575 case Intrinsic::maxnum:
3576 case Intrinsic::minnum:
3577 case Intrinsic::maximum:
3578 case Intrinsic::minimum:
3579 case Intrinsic::maximumnum:
3580 case Intrinsic::minimumnum: {
3591 KnownLHS, KnownRHS,
F,
3613 Use &U =
I->getOperandUse(OpNo);
3615 Type *VTy = V->getType();
3617 if (DemandedMask ==
fcNone) {
3645 if (!FoldedToConst || FoldedToConst == V)
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
AMDGPU Register Bank Select
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file provides internal interfaces used to implement the InstCombine.
static Value * simplifyDemandedUseFPClassFPTrunc(InstCombinerImpl &IC, Instruction &I, FastMathFlags FMF, FPClassTest DemandedMask, KnownFPClass &Known, unsigned Depth)
static cl::opt< unsigned > SimplifyDemandedVectorEltsDepthLimit("instcombine-simplify-vector-elts-depth", cl::desc("Depth limit when simplifying vector instructions and their operands"), cl::Hidden, cl::init(10))
static Constant * getFPClassConstant(Type *Ty, FPClassTest Mask, bool IsCanonicalizing=false)
For floating-point classes that resolve to a single bit pattern, return that value.
static cl::opt< bool > VerifyKnownBits("instcombine-verify-known-bits", cl::desc("Verify that computeKnownBits() and " "SimplifyDemandedBits() are consistent"), cl::Hidden, cl::init(false))
static unsigned getBitWidth(Type *Ty, const DataLayout &DL)
Returns the bitwidth of the given scalar or pointer type.
static Value * simplifyDemandedFPClassFabs(KnownFPClass &Known, Value *Src, FPClassTest DemandedMask, KnownFPClass KnownSrc, bool NSZ)
Perform multiple-use aware simplfications for fabs(Src).
static Value * simplifyDemandedFPClassFnegFabs(KnownFPClass &Known, Value *Src, FPClassTest DemandedMask, KnownFPClass KnownSrc, bool NSZ)
Perform multiple-use aware simplfications for fneg(fabs(Src)).
static bool ShrinkDemandedConstant(Instruction *I, unsigned OpNo, const APInt &Demanded)
Check to see if the specified operand of the specified instruction is a constant integer.
static Value * simplifyShiftSelectingPackedElement(Instruction *I, const APInt &DemandedMask, InstCombinerImpl &IC, unsigned Depth)
Let N = 2 * M.
static Value * simplifyDemandedFPClassMinMax(KnownFPClass &Known, Intrinsic::ID IID, const CallInst *CI, FPClassTest DemandedMask, KnownFPClass KnownLHS, KnownFPClass KnownRHS, const Function &F, bool NSZ)
static Value * simplifyDemandedFPClassCopysignMag(Value *MagSrc, FPClassTest DemandedMask, KnownFPClass KnownSrc, bool NSZ)
static FPClassTest adjustDemandedMaskFromFlags(FPClassTest DemandedMask, FastMathFlags FMF)
static FastMathFlags inferFastMathValueFlags(FastMathFlags FMF, FPClassTest ValidResults, ArrayRef< KnownFPClass > Known)
Try to set an inferred no-nans or no-infs in FMF.
static Value * simplifyDemandedFPClassResult(Instruction *FPOp, FastMathFlags FMF, FPClassTest DemandedMask, KnownFPClass &Known, ArrayRef< KnownFPClass > KnownSrcs)
Apply epilog fixups to a floating-point intrinsic.
This file provides the interface for the instcombine pass implementation.
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
This file contains the declarations for profiling metadata utility functions.
static cl::opt< RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode > Mode("regalloc-enable-advisor", cl::Hidden, cl::init(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default), cl::desc("Enable regalloc advisor mode"), cl::values(clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default, "default", "Default"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Release, "release", "precompiled"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Development, "development", "for training")))
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static unsigned getBitWidth(Type *Ty, const DataLayout &DL)
Returns the bitwidth of the given scalar or pointer type.
static APFloat getOne(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative One.
Class for arbitrary precision integers.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
void clearBit(unsigned BitPosition)
Set a given bit to 0.
static APInt getSignMask(unsigned BitWidth)
Get the SignMask for a specific bit width.
uint64_t getZExtValue() const
Get zero extended value.
void setHighBits(unsigned hiBits)
Set the top hiBits bits.
unsigned popcount() const
Count the number of bits set.
LLVM_ABI APInt zextOrTrunc(unsigned width) const
Zero extend or truncate to width.
unsigned getActiveBits() const
Compute the number of active bits in the value.
LLVM_ABI APInt trunc(unsigned width) const
Truncate to new width.
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
bool isAllOnes() const
Determine if all bits are set. This is true for zero-width values.
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
LLVM_ABI APInt urem(const APInt &RHS) const
Unsigned remainder operation.
void setSignBit()
Set the sign bit to 1.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool ult(const APInt &RHS) const
Unsigned less than comparison.
void clearAllBits()
Set every bit to 0.
unsigned countr_zero() const
Count the number of trailing zero bits.
unsigned countl_zero() const
The APInt version of std::countl_zero.
void clearLowBits(unsigned loBits)
Set bottom loBits bits to 0.
uint64_t getLimitedValue(uint64_t Limit=UINT64_MAX) const
If this value is smaller than the specified limit, return it, otherwise return the limit value.
APInt ashr(unsigned ShiftAmt) const
Arithmetic right-shift function.
APInt shl(unsigned shiftAmt) const
Left-shift function.
bool isSubsetOf(const APInt &RHS) const
This operation checks that all bits set in this APInt are also set in RHS.
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)
Constructs an APInt value that has the top hiBitsSet bits set.
void setLowBits(unsigned loBits)
Set the bottom loBits bits.
bool isIntN(unsigned N) const
Check if this APInt has an N-bits unsigned integer value.
bool isOne() const
Determine if this is a value of 1.
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
BinaryOps getOpcode() const
Value * getArgOperand(unsigned i) const
LLVM_ABI Intrinsic::ID getIntrinsicID() const
Returns the intrinsic ID of the intrinsic called or Intrinsic::not_intrinsic if the called function i...
This class represents a function call, abstracting a target machine's calling convention.
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
This is the base class for all instructions that perform data casts.
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
static LLVM_ABI Constant * getInfinity(Type *Ty, bool Negative=false)
static LLVM_ABI Constant * getZero(Type *Ty, bool Negative=false)
static LLVM_ABI Constant * getQNaN(Type *Ty, bool Negative=false, APInt *Payload=nullptr)
This is the shared class of boolean and integer constants.
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
const APInt & getValue() const
Return the constant as an APInt value reference.
static LLVM_ABI Constant * get(ArrayRef< Constant * > V)
This is an important base class in LLVM.
static LLVM_ABI Constant * getIntegerValue(Type *Ty, const APInt &V)
Return the value for an integer or pointer constant, or a vector thereof, with the given scalar value...
static LLVM_ABI Constant * getAllOnesValue(Type *Ty)
LLVM_ABI bool isOneValue() const
Returns true if the value is one.
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
LLVM_ABI Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
LLVM_ABI bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
A parsed version of the target data layout string in and methods for querying it.
Convenience struct for specifying and reasoning about fast-math flags.
bool noSignedZeros() const
void setNoSignedZeros(bool B=true)
void setNoNaNs(bool B=true)
void setNoInfs(bool B=true)
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Value * CreateICmpEQ(Value *LHS, Value *RHS, const Twine &Name="")
LLVM_ABI Value * CreateSelectWithUnknownProfile(Value *C, Value *True, Value *False, StringRef PassName, const Twine &Name="")
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
static InsertElementInst * Create(Value *Vec, Value *NewElt, Value *Idx, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
KnownFPClass computeKnownFPClass(Value *Val, FastMathFlags FMF, FPClassTest Interested=fcAllFlags, const Instruction *CtxI=nullptr, unsigned Depth=0) const
Value * SimplifyDemandedUseFPClass(Instruction *I, FPClassTest DemandedMask, KnownFPClass &Known, Instruction *CxtI, unsigned Depth=0)
Attempts to replace V with a simpler value based on the demanded floating-point classes.
bool SimplifyDemandedInstructionFPClass(Instruction &Inst)
Value * SimplifyDemandedVectorElts(Value *V, APInt DemandedElts, APInt &PoisonElts, unsigned Depth=0, bool AllowMultipleUsers=false) override
The specified value produces a vector with any number of elements.
Value * SimplifyMultipleUseDemandedFPClass(Instruction *I, FPClassTest DemandedMask, KnownFPClass &Known, Instruction *CxtI, unsigned Depth)
Helper routine of SimplifyDemandedUseFPClass.
bool SimplifyDemandedBits(Instruction *I, unsigned Op, const APInt &DemandedMask, KnownBits &Known, const SimplifyQuery &Q, unsigned Depth=0) override
This form of SimplifyDemandedBits simplifies the specified instruction operand if possible,...
std::optional< std::pair< Intrinsic::ID, SmallVector< Value *, 3 > > > convertOrOfShiftsToFunnelShift(Instruction &Or)
Value * simplifyShrShlDemandedBits(Instruction *Shr, const APInt &ShrOp1, Instruction *Shl, const APInt &ShlOp1, const APInt &DemandedMask, KnownBits &Known)
Helper routine of SimplifyDemandedUseBits.
Value * SimplifyDemandedUseBits(Instruction *I, const APInt &DemandedMask, KnownBits &Known, const SimplifyQuery &Q, unsigned Depth=0)
Attempts to replace I with a simpler value based on the demanded bits.
bool SimplifyDemandedFPClass(Instruction *I, unsigned Op, FPClassTest DemandedMask, KnownFPClass &Known, unsigned Depth=0)
bool SimplifyDemandedInstructionBits(Instruction &Inst)
Tries to simplify operands to an integer instruction based on its demanded bits.
Value * SimplifyMultipleUseDemandedBits(Instruction *I, const APInt &DemandedMask, KnownBits &Known, const SimplifyQuery &Q, unsigned Depth=0)
Helper routine of SimplifyDemandedUseBits.
unsigned ComputeNumSignBits(const Value *Op, const Instruction *CxtI=nullptr, unsigned Depth=0) const
Instruction * replaceInstUsesWith(Instruction &I, Value *V)
A combiner-aware RAUW-like routine.
void replaceUse(Use &U, Value *NewValue)
Replace use and add the previously used value to the worklist.
InstructionWorklist & Worklist
A worklist of the instructions that need to be simplified.
Instruction * InsertNewInstWith(Instruction *New, BasicBlock::iterator Old)
Same as InsertNewInstBefore, but also sets the debug loc.
void computeKnownBits(const Value *V, KnownBits &Known, const Instruction *CxtI, unsigned Depth=0) const
std::optional< Value * > targetSimplifyDemandedVectorEltsIntrinsic(IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts, APInt &UndefElts2, APInt &UndefElts3, std::function< void(Instruction *, unsigned, APInt, APInt &)> SimplifyAndSetOp)
Instruction * replaceOperand(Instruction &I, unsigned OpNum, Value *V)
Replace operand of instruction and add old operand to the worklist.
std::optional< Value * > targetSimplifyDemandedUseBitsIntrinsic(IntrinsicInst &II, APInt DemandedMask, KnownBits &Known, bool &KnownBitsComputed)
LLVM_ABI void dropUBImplyingAttrsAndMetadata(ArrayRef< unsigned > Keep={})
Drop any attributes or metadata that can cause immediate undefined behavior.
LLVM_ABI bool hasNoUnsignedWrap() const LLVM_READONLY
Determine whether the no unsigned wrap flag is set.
LLVM_ABI bool hasNoSignedWrap() const LLVM_READONLY
Determine whether the no signed wrap flag is set.
LLVM_ABI bool isCommutative() const LLVM_READONLY
Return true if the instruction is commutative:
LLVM_ABI void setFastMathFlags(FastMathFlags FMF)
Convenience function for setting multiple fast-math flags on this instruction, which must be an opera...
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
LLVM_ABI void setIsExact(bool b=true)
Set or clear the exact flag on this instruction, which must be an operator which supports this flag.
A wrapper class for inspecting calls to intrinsic functions.
bool hasNoSignedWrap() const
Test whether this operation is known to never undergo signed overflow, aka the nsw property.
bool hasNoUnsignedWrap() const
Test whether this operation is known to never undergo unsigned overflow, aka the nuw property.
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
This class represents the LLVM 'select' instruction.
const Value * getCondition() const
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
The instances of the Type class are immutable: once they are created, they are never changed.
static LLVM_ABI IntegerType * getInt64Ty(LLVMContext &C)
bool isVectorTy() const
True if this is an instance of VectorType.
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
bool isMultiUnitFPType() const
Returns true if this is a floating-point type that is an unevaluated sum of multiple floating-point u...
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isIEEELikeFPTy() const
Return true if this is a well-behaved IEEE-like type, which has a IEEE compatible layout,...
LLVM_ABI const fltSemantics & getFltSemantics() const
static LLVM_ABI UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
A Use represents the edge between a Value definition and its users.
void setOperand(unsigned i, Value *Val)
Value * getOperand(unsigned i) const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
bool hasOneUse() const
Return true if there is exactly one use of this value.
iterator_range< user_iterator > users()
bool hasUseList() const
Check if this Value has a use-list.
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
LLVM_ABI void takeName(Value *V)
Transfer the name from V to this value.
Base class of all SIMD vector types.
This class represents zero extension of integer types.
self_iterator getIterator()
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
LLVM_ABI Function * getOrInsertDeclaration(Module *M, ID id, ArrayRef< Type * > Tys={})
Look up the Function declaration of the intrinsic id in the Module M.
BinaryOp_match< SrcTy, SpecificConstantMatch, TargetOpcode::G_XOR, true > m_Not(const SrcTy &&Src)
Matches a register not-ed by a G_XOR.
OneUse_match< SubPat > m_OneUse(const SubPat &SP)
class_match< PoisonValue > m_Poison()
Match an arbitrary poison constant.
cst_pred_ty< is_lowbit_mask > m_LowBitMask()
Match an integer or vector with only the low bit(s) set.
PtrAdd_match< PointerOpTy, OffsetOpTy > m_PtrAdd(const PointerOpTy &PointerOp, const OffsetOpTy &OffsetOp)
Matches GEP with i8 source element type.
BinaryOp_match< LHS, RHS, Instruction::Add > m_Add(const LHS &L, const RHS &R)
class_match< BinaryOperator > m_BinOp()
Match an arbitrary binary operation and ignore it.
BinaryOp_match< LHS, RHS, Instruction::AShr > m_AShr(const LHS &L, const RHS &R)
ap_match< APInt > m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
specific_intval< false > m_SpecificInt(const APInt &V)
Match a specific integer value or vector with all elements equal to the value.
bool match(Val *V, const Pattern &P)
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
BinOpPred_match< LHS, RHS, is_right_shift_op > m_Shr(const LHS &L, const RHS &R)
Matches logical shift operations.
TwoOps_match< Val_t, Idx_t, Instruction::ExtractElement > m_ExtractElt(const Val_t &Val, const Idx_t &Idx)
Matches ExtractElementInst.
class_match< ConstantInt > m_ConstantInt()
Match an arbitrary ConstantInt and ignore it.
IntrinsicID_match m_Intrinsic()
Match intrinsic calls like this: m_Intrinsic<Intrinsic::fabs>(m_Value(X))
ExtractValue_match< Ind, Val_t > m_ExtractValue(const Val_t &V)
Match a single index ExtractValue instruction.
BinaryOp_match< LHS, RHS, Instruction::Mul > m_Mul(const LHS &L, const RHS &R)
TwoOps_match< V1_t, V2_t, Instruction::ShuffleVector > m_Shuffle(const V1_t &v1, const V2_t &v2)
Matches ShuffleVectorInst independently of mask value.
CastInst_match< OpTy, ZExtInst > m_ZExt(const OpTy &Op)
Matches ZExt.
match_immconstant_ty m_ImmConstant()
Match an arbitrary immediate Constant and ignore it.
DisjointOr_match< LHS, RHS, true > m_c_DisjointOr(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::Add, true > m_c_Add(const LHS &L, const RHS &R)
Matches a Add with LHS and RHS in either order.
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
AnyBinaryOp_match< LHS, RHS, true > m_c_BinOp(const LHS &L, const RHS &R)
Matches a BinaryOperator with LHS and RHS in either order.
BinaryOp_match< LHS, RHS, Instruction::LShr > m_LShr(const LHS &L, const RHS &R)
CmpClass_match< LHS, RHS, ICmpInst > m_ICmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::Shl > m_Shl(const LHS &L, const RHS &R)
auto m_Undef()
Match an arbitrary undef constant.
CastInst_match< OpTy, SExtInst > m_SExt(const OpTy &Op)
Matches SExt.
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
m_Intrinsic_Ty< Opnd0 >::Ty m_FAbs(const Opnd0 &Op0)
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
LLVM_ABI bool haveNoCommonBitsSet(const WithCache< const Value * > &LHSCache, const WithCache< const Value * > &RHSCache, const SimplifyQuery &SQ)
Return true if LHS and RHS have no common bits set.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI void computeKnownBitsFromContext(const Value *V, KnownBits &Known, const SimplifyQuery &Q, unsigned Depth=0)
Merge bits known from context-dependent facts into Known.
@ Undef
Value of the register doesn't matter.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
int countr_one(T Value)
Count the number of ones from the least significant bit to the first zero bit.
LLVM_ABI void salvageDebugInfo(const MachineRegisterInfo &MRI, MachineInstr &MI)
Assuming the instruction MI is going to be deleted, attempt to salvage debug users of MI by writing t...
constexpr T alignDown(U Value, V Align, W Skew=0)
Returns the largest unsigned integer less than or equal to Value and is Skew mod Align.
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
gep_type_iterator gep_type_end(const User *GEP)
constexpr auto equal_to(T &&Arg)
Functor variant of std::equal_to that can be used as a UnaryPredicate in functional algorithms like a...
LLVM_ABI bool isGuaranteedNotToBeUndef(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Returns true if V cannot be undef, but may be poison.
LLVM_ABI bool cannotOrderStrictlyLess(FPClassTest LHS, FPClassTest RHS, bool OrderedZeroSign=false)
Returns true if all values in LHS must be greater than or equal to those in RHS.
LLVM_ABI bool cannotOrderStrictlyGreater(FPClassTest LHS, FPClassTest RHS, bool OrderedZeroSign=false)
Returns true if all values in LHS must be less than or equal to those in RHS.
constexpr unsigned MaxAnalysisRecursionDepth
LLVM_ABI void adjustKnownBitsForSelectArm(KnownBits &Known, Value *Cond, Value *Arm, bool Invert, const SimplifyQuery &Q, unsigned Depth=0)
Adjust Known for the given select Arm to include information from the select Cond.
LLVM_ABI FPClassTest fneg(FPClassTest Mask)
Return the test mask which returns true if the value's sign bit is flipped.
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
LLVM_ABI void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
LLVM_ABI void adjustKnownFPClassForSelectArm(KnownFPClass &Known, Value *Cond, Value *Arm, bool Invert, const SimplifyQuery &Q, unsigned Depth=0)
Adjust Known for the given select Arm to include information from the select Cond.
LLVM_ABI FPClassTest inverse_fabs(FPClassTest Mask)
Return the test mask which returns true after fabs is applied to the value.
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
LLVM_ABI Constant * ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS, Constant *RHS, const DataLayout &DL)
Attempt to constant fold a binary operation with the specified operands.
constexpr int PoisonMaskElem
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
@ Mul
Product of integers.
@ Xor
Bitwise or logical XOR of integers.
LLVM_ABI FPClassTest unknown_sign(FPClassTest Mask)
Return the test mask which returns true if the value could have the same set of classes,...
DWARFExpression::Operation Op
constexpr unsigned BitWidth
LLVM_ABI KnownBits analyzeKnownBitsFromAndXorOr(const Operator *I, const KnownBits &KnownLHS, const KnownBits &KnownRHS, const SimplifyQuery &SQ, unsigned Depth=0)
Using KnownBits LHS/RHS produce the known bits for logic op (and/xor/or).
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
gep_type_iterator gep_type_begin(const User *GEP)
unsigned Log2(Align A)
Returns the log2 of the alignment.
This struct is a compact representation of a valid (non-zero power of two) alignment.
Represent subnormal handling kind for floating point instruction inputs and outputs.
static constexpr DenormalMode getPreserveSign()
static constexpr DenormalMode getIEEE()
static KnownBits makeConstant(const APInt &C)
Create known bits from a known constant.
KnownBits anyextOrTrunc(unsigned BitWidth) const
Return known bits for an "any" extension or truncation of the value we're tracking.
bool isNonNegative() const
Returns true if this value is known to be non-negative.
void makeNonNegative()
Make this value non-negative.
static LLVM_ABI KnownBits ashr(const KnownBits &LHS, const KnownBits &RHS, bool ShAmtNonZero=false, bool Exact=false)
Compute known bits for ashr(LHS, RHS).
unsigned getBitWidth() const
Get the bit width of this value.
void resetAll()
Resets the known state of all bits.
KnownBits unionWith(const KnownBits &RHS) const
Returns KnownBits information that is known to be true for either this or RHS or both.
KnownBits intersectWith(const KnownBits &RHS) const
Returns KnownBits information that is known to be true for both this and RHS.
KnownBits sext(unsigned BitWidth) const
Return known bits for a sign extension of the value we're tracking.
static KnownBits add(const KnownBits &LHS, const KnownBits &RHS, bool NSW=false, bool NUW=false)
Compute knownbits resulting from addition of LHS and RHS.
KnownBits zextOrTrunc(unsigned BitWidth) const
Return known bits for a zero extension or truncation of the value we're tracking.
APInt getMaxValue() const
Return the maximal unsigned value possible given these KnownBits.
static LLVM_ABI KnownBits srem(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for srem(LHS, RHS).
static LLVM_ABI KnownBits udiv(const KnownBits &LHS, const KnownBits &RHS, bool Exact=false)
Compute known bits for udiv(LHS, RHS).
bool isNegative() const
Returns true if this value is known to be negative.
static KnownBits sub(const KnownBits &LHS, const KnownBits &RHS, bool NSW=false, bool NUW=false)
Compute knownbits resulting from subtraction of LHS and RHS.
static LLVM_ABI KnownBits shl(const KnownBits &LHS, const KnownBits &RHS, bool NUW=false, bool NSW=false, bool ShAmtNonZero=false)
Compute known bits for shl(LHS, RHS).
bool isKnownNeverInfOrNaN() const
Return true if it's known this can never be an infinity or nan.
FPClassTest KnownFPClasses
Floating-point classes the value could be one of.
bool isKnownNeverInfinity() const
Return true if it's known this can never be an infinity.
static constexpr FPClassTest OrderedGreaterThanZeroMask
static constexpr FPClassTest OrderedLessThanZeroMask
void knownNot(FPClassTest RuleOut)
static LLVM_ABI KnownFPClass fmul(const KnownFPClass &LHS, const KnownFPClass &RHS, DenormalMode Mode=DenormalMode::getDynamic())
Report known values for fmul.
static LLVM_ABI KnownFPClass fadd_self(const KnownFPClass &Src, DenormalMode Mode=DenormalMode::getDynamic())
Report known values for fadd x, x.
void copysign(const KnownFPClass &Sign)
static KnownFPClass square(const KnownFPClass &Src, DenormalMode Mode=DenormalMode::getDynamic())
static LLVM_ABI KnownFPClass fsub(const KnownFPClass &LHS, const KnownFPClass &RHS, DenormalMode Mode=DenormalMode::getDynamic())
Report known values for fsub.
bool isKnownNeverSubnormal() const
Return true if it's known this can never be a subnormal.
bool isKnownAlways(FPClassTest Mask) const
static LLVM_ABI KnownFPClass canonicalize(const KnownFPClass &Src, DenormalMode DenormMode=DenormalMode::getDynamic())
Apply the canonicalize intrinsic to this value.
LLVM_ABI bool isKnownNeverLogicalZero(DenormalMode Mode) const
Return true if it's known this can never be interpreted as a zero.
static LLVM_ABI KnownFPClass log(const KnownFPClass &Src, DenormalMode Mode=DenormalMode::getDynamic())
Propagate known class for log/log2/log10.
static LLVM_ABI KnownFPClass fdiv(const KnownFPClass &LHS, const KnownFPClass &RHS, DenormalMode Mode=DenormalMode::getDynamic())
Report known values for fdiv.
static LLVM_ABI KnownFPClass roundToIntegral(const KnownFPClass &Src, bool IsTrunc, bool IsMultiUnitFPType)
Propagate known class for rounding intrinsics (trunc, floor, ceil, rint, nearbyint,...
static LLVM_ABI KnownFPClass ldexp(const KnownFPClass &Src, const KnownBits &N, const fltSemantics &Flt, DenormalMode Mode=DenormalMode::getDynamic())
Propagate known class for ldexp.
static LLVM_ABI KnownFPClass minMaxLike(const KnownFPClass &LHS, const KnownFPClass &RHS, MinMaxKind Kind, DenormalMode DenormMode=DenormalMode::getDynamic())
KnownFPClass intersectWith(const KnownFPClass &RHS) const
static LLVM_ABI KnownFPClass exp(const KnownFPClass &Src)
Report known values for exp, exp2 and exp10.
static LLVM_ABI KnownFPClass frexp_mant(const KnownFPClass &Src, DenormalMode Mode=DenormalMode::getDynamic())
Propagate known class for mantissa component of frexp.
std::optional< bool > SignBit
std::nullopt if the sign bit is unknown, true if the sign bit is definitely set or false if the sign ...
bool isKnownNeverNaN() const
Return true if it's known this can never be a nan.
bool isKnownNever(FPClassTest Mask) const
Return true if it's known this can never be one of the mask entries.
static LLVM_ABI KnownFPClass fpext(const KnownFPClass &KnownSrc, const fltSemantics &DstTy, const fltSemantics &SrcTy)
Propagate known class for fpext.
static LLVM_ABI KnownFPClass fma(const KnownFPClass &LHS, const KnownFPClass &RHS, const KnownFPClass &Addend, DenormalMode Mode=DenormalMode::getDynamic())
Report known values for fma.
static LLVM_ABI KnownFPClass fptrunc(const KnownFPClass &KnownSrc)
Propagate known class for fptrunc.
static LLVM_ABI KnownFPClass sqrt(const KnownFPClass &Src, DenormalMode Mode=DenormalMode::getDynamic())
Propagate known class for sqrt.
LLVM_ABI bool isKnownNeverLogicalPosZero(DenormalMode Mode) const
Return true if it's known this can never be interpreted as a positive zero.
bool cannotBeOrderedGreaterEqZero(DenormalMode Mode) const
Return true if it's know this can never be a negative value or a logical 0.
static LLVM_ABI KnownFPClass fadd(const KnownFPClass &LHS, const KnownFPClass &RHS, DenormalMode Mode=DenormalMode::getDynamic())
Report known values for fadd.
LLVM_ABI bool isKnownNeverLogicalNegZero(DenormalMode Mode) const
Return true if it's known this can never be interpreted as a negative zero.
static LLVM_ABI KnownFPClass fma_square(const KnownFPClass &Squared, const KnownFPClass &Addend, DenormalMode Mode=DenormalMode::getDynamic())
Report known values for fma squared, squared, addend.