35#define DEBUG_TYPE "instcombine"
54 unsigned Opc =
I->getOpcode();
56 case Instruction::Add:
57 case Instruction::Sub:
58 case Instruction::Mul:
59 case Instruction::And:
61 case Instruction::Xor:
62 case Instruction::AShr:
63 case Instruction::LShr:
64 case Instruction::Shl:
65 case Instruction::UDiv:
66 case Instruction::URem: {
72 if (
Opc == Instruction::LShr ||
Opc == Instruction::AShr)
76 case Instruction::Trunc:
77 case Instruction::ZExt:
78 case Instruction::SExt:
82 if (
I->getOperand(0)->getType() == Ty)
83 return I->getOperand(0);
88 Opc == Instruction::SExt);
90 case Instruction::Select: {
98 case Instruction::PHI: {
109 case Instruction::FPToUI:
110 case Instruction::FPToSI:
112 I->getOperand(0), Ty);
114 case Instruction::Call:
116 switch (
II->getIntrinsicID()) {
119 case Intrinsic::vscale: {
121 I->getModule(), Intrinsic::vscale, {Ty});
128 case Instruction::ShuffleVector: {
151 Processed[V] = Result;
165InstCombinerImpl::isEliminableCastPair(
const CastInst *CI1,
182 if ((Res == Instruction::IntToPtr && SrcTy != DstIntPtrTy) ||
183 (Res == Instruction::PtrToInt && DstTy != SrcIntPtrTy))
205 if (CSrc->hasOneUse())
218 if (!Cmp || Cmp->getOperand(0)->getType() != Sel->getType() ||
224 if (CI.
getOpcode() != Instruction::BitCast ||
254 if (SrcTy && DestTy &&
255 SrcTy->getNumElements() == DestTy->getNumElements() &&
256 SrcTy->getPrimitiveSizeInBits() == DestTy->getPrimitiveSizeInBits()) {
269class TypeEvaluationHelper {
274 [[nodiscard]]
static bool canEvaluateTruncated(
Value *V,
Type *Ty,
280 [[nodiscard]]
static bool canEvaluateZExtd(
Value *V,
Type *Ty,
281 unsigned &BitsToClear,
288 [[nodiscard]]
static bool canEvaluateSExtd(
Value *V,
Type *Ty);
293 [[nodiscard]]
static bool canAlwaysEvaluateInType(
Value *V,
Type *Ty);
296 [[nodiscard]]
bool allPendingVisited()
const {
298 [
this](
Value *V) {
return Visited.contains(V); });
306 if (canAlwaysEvaluateInType(V, Ty))
315 const auto [It,
Inserted] = Visited.insert({
V,
false});
332 return It->getSecond();
396 const bool Result = Pred(V, Ty);
405 [[nodiscard]]
bool canNotEvaluateInType(
Value *V,
Type *Ty);
407 [[nodiscard]]
bool canEvaluateTruncatedImpl(
Value *V,
Type *Ty,
408 InstCombinerImpl &IC,
410 [[nodiscard]]
bool canEvaluateTruncatedPred(
Value *V,
Type *Ty,
411 InstCombinerImpl &IC,
413 [[nodiscard]]
bool canEvaluateZExtdImpl(
Value *V,
Type *Ty,
414 unsigned &BitsToClear,
415 InstCombinerImpl &IC,
417 [[nodiscard]]
bool canEvaluateSExtdImpl(
Value *V,
Type *Ty);
418 [[nodiscard]]
bool canEvaluateSExtdPred(
Value *V,
Type *Ty);
422 SmallDenseMap<Value *, bool, 8> Visited;
425 SmallVector<Value *, 8> Pending;
432bool TypeEvaluationHelper::canAlwaysEvaluateInType(
Value *V,
Type *Ty) {
446bool TypeEvaluationHelper::canNotEvaluateInType(
Value *V,
Type *Ty) {
468bool TypeEvaluationHelper::canEvaluateTruncated(
Value *V,
Type *Ty,
471 TypeEvaluationHelper TYH;
472 return TYH.canEvaluateTruncatedImpl(V, Ty, IC, CxtI) &&
475 TYH.allPendingVisited();
478bool TypeEvaluationHelper::canEvaluateTruncatedImpl(
Value *V,
Type *Ty,
481 return canEvaluate(V, Ty, [
this, &IC, CxtI](
Value *V,
Type *Ty) {
482 return canEvaluateTruncatedPred(V, Ty, IC, CxtI);
486bool TypeEvaluationHelper::canEvaluateTruncatedPred(
Value *V,
Type *Ty,
490 Type *OrigTy =
V->getType();
491 switch (
I->getOpcode()) {
492 case Instruction::Add:
493 case Instruction::Sub:
494 case Instruction::Mul:
495 case Instruction::And:
496 case Instruction::Or:
497 case Instruction::Xor:
499 return canEvaluateTruncatedImpl(
I->getOperand(0), Ty, IC, CxtI) &&
500 canEvaluateTruncatedImpl(
I->getOperand(1), Ty, IC, CxtI);
502 case Instruction::UDiv:
503 case Instruction::URem: {
513 return canEvaluateTruncatedImpl(
I->getOperand(0), Ty, IC, CxtI) &&
514 canEvaluateTruncatedImpl(
I->getOperand(1), Ty, IC, CxtI);
518 case Instruction::Shl: {
525 return canEvaluateTruncatedImpl(
I->getOperand(0), Ty, IC, CxtI) &&
526 canEvaluateTruncatedImpl(
I->getOperand(1), Ty, IC, CxtI);
529 case Instruction::LShr: {
544 auto DemandedBits = Trunc->getType()->getScalarSizeInBits();
546 return canEvaluateTruncatedImpl(
I->getOperand(0), Ty, IC, CxtI) &&
547 canEvaluateTruncatedImpl(
I->getOperand(1), Ty, IC, CxtI);
550 return canEvaluateTruncatedImpl(
I->getOperand(0), Ty, IC, CxtI) &&
551 canEvaluateTruncatedImpl(
I->getOperand(1), Ty, IC, CxtI);
555 case Instruction::AShr: {
565 unsigned ShiftedBits = OrigBitWidth -
BitWidth;
568 return canEvaluateTruncatedImpl(
I->getOperand(0), Ty, IC, CxtI) &&
569 canEvaluateTruncatedImpl(
I->getOperand(1), Ty, IC, CxtI);
572 case Instruction::Trunc:
575 case Instruction::ZExt:
576 case Instruction::SExt:
580 case Instruction::Select: {
582 return canEvaluateTruncatedImpl(
SI->getTrueValue(), Ty, IC, CxtI) &&
583 canEvaluateTruncatedImpl(
SI->getFalseValue(), Ty, IC, CxtI);
585 case Instruction::PHI: {
592 return canEvaluateTruncatedImpl(IncValue, Ty, IC, CxtI);
595 case Instruction::FPToUI:
596 case Instruction::FPToSI: {
603 Semantics,
I->getOpcode() == Instruction::FPToSI);
606 case Instruction::ShuffleVector:
607 return canEvaluateTruncatedImpl(
I->getOperand(0), Ty, IC, CxtI) &&
608 canEvaluateTruncatedImpl(
I->getOperand(1), Ty, IC, CxtI);
631 Value *VecInput =
nullptr;
640 unsigned VecWidth = VecType->getPrimitiveSizeInBits();
642 unsigned ShiftAmount = ShiftVal ? ShiftVal->
getZExtValue() : 0;
644 if ((VecWidth % DestWidth != 0) || (ShiftAmount % DestWidth != 0))
649 unsigned NumVecElts = VecWidth / DestWidth;
650 if (VecType->getElementType() != DestType) {
655 unsigned Elt = ShiftAmount / DestWidth;
657 Elt = NumVecElts - 1 - Elt;
677 Type *SrcType = Src->getType();
683 unsigned DstBits = DstType->getScalarSizeInBits();
684 unsigned TruncRatio = SrcBits / DstBits;
685 if ((SrcBits % DstBits) != 0)
690 const APInt *ShiftAmount =
nullptr;
698 auto VecElts = VecOpTy->getElementCount();
700 uint64_t BitCastNumElts = VecElts.getKnownMinValue() * TruncRatio;
703 if (Cst->
uge(std::numeric_limits<uint64_t>::max() / TruncRatio))
707 ? (VecOpIdx + 1) * TruncRatio - 1
708 : VecOpIdx * TruncRatio;
714 if (ShiftAmount->
uge(SrcBits) || ShiftAmount->
urem(DstBits) != 0)
720 assert(IdxOfs < TruncRatio &&
721 "IdxOfs is expected to be less than TruncRatio.");
726 assert(BitCastNumElts <= std::numeric_limits<uint32_t>::max() &&
740 "Don't narrow to an illegal scalar type");
752 BinaryOperator *Or0, *Or1;
756 Value *ShVal0, *ShVal1, *ShAmt0, *ShAmt1;
763 if (Or0->
getOpcode() == BinaryOperator::LShr) {
769 Or1->
getOpcode() == BinaryOperator::LShr &&
770 "Illegal or(shift,shift) pair");
779 unsigned MaxShiftAmountWidth =
Log2_32(NarrowWidth);
780 APInt HiBitMask = ~APInt::getLowBitsSet(WideWidth, MaxShiftAmountWidth);
787 if (ShVal0 != ShVal1)
793 unsigned Mask = Width - 1;
806 Value *ShAmt = matchShiftAmount(ShAmt0, ShAmt1, NarrowWidth);
809 ShAmt = matchShiftAmount(ShAmt1, ShAmt0, NarrowWidth);
827 Value *NarrowShAmt =
Builder.CreateZExtOrTrunc(ShAmt, DestTy);
830 X =
Y =
Builder.CreateTrunc(ShVal0, DestTy);
831 if (ShVal0 != ShVal1)
832 Y =
Builder.CreateTrunc(ShVal1, DestTy);
833 Intrinsic::ID IID = IsFshl ? Intrinsic::fshl : Intrinsic::fshr;
850 BinaryOperator *BinOp;
857 case Instruction::And:
858 case Instruction::Or:
859 case Instruction::Xor:
860 case Instruction::Add:
861 case Instruction::Sub:
862 case Instruction::Mul: {
889 case Instruction::LShr:
890 case Instruction::AShr: {
895 unsigned MaxShiftAmt = SrcWidth - DestWidth;
899 APInt(SrcWidth, MaxShiftAmt)))) {
901 bool IsExact = OldShift->isExact();
906 OldShift->getOpcode() == Instruction::AShr
907 ?
Builder.CreateAShr(
A, ShAmt, OldShift->getName(), IsExact)
908 :
Builder.CreateLShr(
A, ShAmt, OldShift->getName(), IsExact);
918 if (Instruction *NarrowOr = narrowFunnelShift(Trunc))
930 if (Shuf && Shuf->hasOneUse() &&
match(Shuf->getOperand(1),
m_Undef()) &&
934 ->getElementCount())) {
939 Value *NarrowOp = Builder.CreateTrunc(Shuf->getOperand(0), NewTruncTy);
954 assert((Opcode == Instruction::Trunc || Opcode == Instruction::FPTrunc) &&
955 "Unexpected instruction for shrinking");
958 if (!InsElt || !InsElt->hasOneUse())
963 Value *VecOp = InsElt->getOperand(0);
964 Value *ScalarOp = InsElt->getOperand(1);
965 Value *Index = InsElt->getOperand(2);
971 Value *NarrowOp = Builder.CreateCast(Opcode, ScalarOp, DestScalarTy);
983 Type *DestTy = Trunc.
getType(), *SrcTy = Src->getType();
985 unsigned SrcWidth = SrcTy->getScalarSizeInBits();
991 if ((DestTy->
isVectorTy() || shouldChangeType(SrcTy, DestTy)) &&
992 TypeEvaluationHelper::canEvaluateTruncated(Src, DestTy, *
this, &Trunc)) {
997 dbgs() <<
"ICE: EvaluateInDifferentType converting expression type"
1010 if (DestWidth * 2 < SrcWidth) {
1011 auto *NewDestTy = DestITy->getExtendedType();
1012 if (shouldChangeType(SrcTy, NewDestTy) &&
1013 TypeEvaluationHelper::canEvaluateTruncated(Src, NewDestTy, *
this,
1016 dbgs() <<
"ICE: EvaluateInDifferentType converting expression type"
1017 " to reduce the width of operand of"
1030 if (DestWidth == 1) {
1053 Constant *One = ConstantInt::get(SrcTy,
APInt(SrcWidth, 1));
1061 Constant *One = ConstantInt::get(SrcTy,
APInt(SrcWidth, 1));
1097 A->getType() == DestTy &&
B->getType() == DestTy) {
1099 Trunc,
Builder.CreateBinaryIntrinsic(Intrinsic::uadd_sat,
A,
B));
1106 A->getType() == DestTy &&
B->getType() == DestTy) {
1108 Trunc,
Builder.CreateBinaryIntrinsic(Intrinsic::usub_sat,
A,
B));
1112 unsigned AWidth =
A->getType()->getScalarSizeInBits();
1113 unsigned MaxShiftAmt = SrcWidth - std::max(DestWidth, AWidth);
1115 bool IsExact = OldSh->isExact();
1120 APInt(SrcWidth, MaxShiftAmt)))) {
1121 auto GetNewShAmt = [&](
unsigned Width) {
1122 Constant *MaxAmt = ConstantInt::get(SrcTy, Width - 1,
false);
1131 if (
A->getType() == DestTy) {
1132 Constant *ShAmt = GetNewShAmt(DestWidth);
1134 return IsExact ? BinaryOperator::CreateExactAShr(
A, ShAmt)
1135 : BinaryOperator::CreateAShr(
A, ShAmt);
1139 if (Src->hasOneUse()) {
1140 Constant *ShAmt = GetNewShAmt(AWidth);
1157 if (Src->hasOneUse() &&
1165 APInt Threshold =
APInt(
C->getType()->getScalarSizeInBits(), DestWidth);
1167 Value *NewTrunc =
Builder.CreateTrunc(
A, DestTy,
A->getName() +
".tr");
1176 if (SrcTy->isIntegerTy() &&
isPowerOf2_64(SrcTy->getPrimitiveSizeInBits()) &&
1184 APInt UpperBound =
C->getUniqueInteger();
1187 if (!UpperBound.
isZero() && UpperBound - 1 == TruncatedMax) {
1189 {ConstantInt::get(SrcTy, 0),
A});
1191 Intrinsic::smin, {SrcTy},
1192 {
SMax, ConstantInt::get(SrcTy, TruncatedMax)});
1205 unsigned AWidth =
A->getType()->getScalarSizeInBits();
1206 if (AWidth == DestWidth && AWidth >
Log2_32(SrcWidth)) {
1207 Value *WidthDiff = ConstantInt::get(
A->getType(), SrcWidth - AWidth);
1210 return BinaryOperator::CreateAdd(NarrowCtlz, WidthDiff);
1220 if (
Log2_32(*MaxVScale) < DestWidth)
1225 if (DestWidth == 1 &&
1268 return Changed ? &Trunc :
nullptr;
1288 Value *In = Cmp->getOperand(0);
1289 Value *Sh = ConstantInt::get(In->getType(),
1290 In->getType()->getScalarSizeInBits() - 1);
1291 In = Builder.CreateLShr(In, Sh, In->getName() +
".lobit");
1292 if (In->getType() != Zext.
getType())
1293 In = Builder.CreateIntCast(In, Zext.
getType(),
false );
1303 if (Op1CV->
isZero() && Cmp->isEquality()) {
1308 uint32_t ShAmt = KnownZeroMask.logBase2();
1309 bool IsExpectShAmt = KnownZeroMask.isPowerOf2() &&
1311 if (IsExpectShAmt &&
1312 (Cmp->getOperand(0)->getType() == Zext.
getType() ||
1314 Value *In = Cmp->getOperand(0);
1318 In = Builder.CreateLShr(In, ConstantInt::get(In->getType(), ShAmt),
1319 In->getName() +
".lobit");
1324 In =
Builder.CreateXor(In, ConstantInt::get(
In->getType(), 1));
1335 if (
Cmp->isEquality()) {
1344 Value *Shift =
And->getOperand(
X ==
And->getOperand(0) ? 1 : 0);
1351 Builder.CreateAnd(Lshr, ConstantInt::get(
X->getType(), 1));
1379bool TypeEvaluationHelper::canEvaluateZExtd(
Value *V,
Type *Ty,
1380 unsigned &BitsToClear,
1383 TypeEvaluationHelper TYH;
1384 return TYH.canEvaluateZExtdImpl(V, Ty, BitsToClear, IC, CxtI);
1386bool TypeEvaluationHelper::canEvaluateZExtdImpl(
Value *V,
Type *Ty,
1387 unsigned &BitsToClear,
1391 if (canAlwaysEvaluateInType(V, Ty))
1395 if (canNotEvaluateInType(V, Ty))
1400 switch (
I->getOpcode()) {
1401 case Instruction::ZExt:
1402 case Instruction::SExt:
1403 case Instruction::Trunc:
1405 case Instruction::And:
1406 case Instruction::Or:
1407 case Instruction::Xor:
1408 case Instruction::Add:
1409 case Instruction::Sub:
1410 case Instruction::Mul:
1411 if (!canEvaluateZExtdImpl(
I->getOperand(0), Ty, BitsToClear, IC, CxtI) ||
1412 !canEvaluateZExtdImpl(
I->getOperand(1), Ty, Tmp, IC, CxtI))
1415 if (BitsToClear == 0 && Tmp == 0)
1420 if (Tmp == 0 &&
I->isBitwiseLogicOp()) {
1423 unsigned VSize =
V->getType()->getScalarSizeInBits();
1429 if (
I->getOpcode() == Instruction::And)
1438 case Instruction::Shl: {
1443 if (!canEvaluateZExtdImpl(
I->getOperand(0), Ty, BitsToClear, IC, CxtI))
1445 BitsToClear = ShiftAmt < BitsToClear ? BitsToClear - ShiftAmt : 0;
1450 case Instruction::LShr: {
1455 if (!canEvaluateZExtdImpl(
I->getOperand(0), Ty, BitsToClear, IC, CxtI))
1457 BitsToClear += ShiftAmt;
1458 if (BitsToClear >
V->getType()->getScalarSizeInBits())
1459 BitsToClear =
V->getType()->getScalarSizeInBits();
1465 case Instruction::Select:
1466 if (!canEvaluateZExtdImpl(
I->getOperand(1), Ty, Tmp, IC, CxtI) ||
1467 !canEvaluateZExtdImpl(
I->getOperand(2), Ty, BitsToClear, IC, CxtI) ||
1474 case Instruction::PHI: {
1490 case Instruction::Call:
1494 if (
II->getIntrinsicID() == Intrinsic::vscale)
1515 Type *SrcTy = Src->getType(), *DestTy = Zext.
getType();
1518 if (SrcTy->isIntOrIntVectorTy(1) && Zext.
hasNonNeg())
1522 unsigned BitsToClear;
1523 if (shouldChangeType(SrcTy, DestTy) &&
1524 TypeEvaluationHelper::canEvaluateZExtd(Src, DestTy, BitsToClear, *
this,
1527 "Can't clear more bits than in SrcTy");
1531 dbgs() <<
"ICE: EvaluateInDifferentType converting expression type"
1532 " to avoid zero extend: "
1539 if (
SrcOp->hasOneUse())
1542 uint32_t SrcBitsKept = SrcTy->getScalarSizeInBits() - BitsToClear;
1555 return BinaryOperator::CreateAnd(Res,
C);
1566 Value *
A = CSrc->getOperand(0);
1567 unsigned SrcSize =
A->getType()->getScalarSizeInBits();
1568 unsigned MidSize = CSrc->getType()->getScalarSizeInBits();
1574 if (SrcSize < DstSize) {
1576 Constant *AndConst = ConstantInt::get(
A->getType(), AndValue);
1581 if (SrcSize == DstSize) {
1583 return BinaryOperator::CreateAnd(
A, ConstantInt::get(
A->getType(),
1586 if (SrcSize > DstSize) {
1589 return BinaryOperator::CreateAnd(Trunc,
1590 ConstantInt::get(Trunc->
getType(),
1596 return transformZExtICmp(Cmp, Zext);
1602 X->getType() == DestTy)
1603 return BinaryOperator::CreateAnd(
X,
Builder.CreateZExt(
C, DestTy));
1609 X->getType() == DestTy) {
1611 return BinaryOperator::CreateXor(
Builder.CreateAnd(
X, ZC), ZC);
1620 X->getType() == DestTy) {
1622 return BinaryOperator::CreateAnd(
X, ZextC);
1631 unsigned TypeWidth = Src->getType()->getScalarSizeInBits();
1632 if (
Log2_32(*MaxVScale) < TypeWidth)
1641 SrcTy->getScalarSizeInBits() >
1660 Value *Op0 = Cmp->getOperand(0), *Op1 = Cmp->getOperand(1);
1664 if (!Op1->getType()->isIntOrIntVectorTy())
1671 Value *In = Builder.CreateAShr(Op0, Sh, Op0->
getName() +
".lobit");
1672 if (In->getType() != Sext.
getType())
1673 In = Builder.CreateIntCast(In, Sext.
getType(),
true );
1682 if (Cmp->hasOneUse() &&
1683 Cmp->isEquality() && (Op1C->isZero() || Op1C->getValue().isPowerOf2())){
1687 if (KnownZeroMask.isPowerOf2()) {
1688 Value *In = Cmp->getOperand(0);
1691 if (!Op1C->isZero() && Op1C->getValue() != KnownZeroMask) {
1701 unsigned ShiftAmt = KnownZeroMask.countr_zero();
1705 ConstantInt::get(
In->getType(), ShiftAmt));
1715 unsigned ShiftAmt = KnownZeroMask.countl_zero();
1719 ConstantInt::get(
In->getType(), ShiftAmt));
1722 In =
Builder.CreateAShr(In, ConstantInt::get(
In->getType(),
1723 KnownZeroMask.getBitWidth() - 1),
"sext");
1743bool TypeEvaluationHelper::canEvaluateSExtd(
Value *V,
Type *Ty) {
1744 TypeEvaluationHelper TYH;
1745 return TYH.canEvaluateSExtdImpl(V, Ty) && TYH.allPendingVisited();
1748bool TypeEvaluationHelper::canEvaluateSExtdImpl(
Value *V,
Type *Ty) {
1749 return canEvaluate(V, Ty, [
this](
Value *V,
Type *Ty) {
1750 return canEvaluateSExtdPred(V, Ty);
1754bool TypeEvaluationHelper::canEvaluateSExtdPred(
Value *V,
Type *Ty) {
1756 "Can't sign extend type to a smaller type");
1759 switch (
I->getOpcode()) {
1760 case Instruction::SExt:
1761 case Instruction::ZExt:
1762 case Instruction::Trunc:
1764 case Instruction::And:
1765 case Instruction::Or:
1766 case Instruction::Xor:
1767 case Instruction::Add:
1768 case Instruction::Sub:
1769 case Instruction::Mul:
1771 return canEvaluateSExtdImpl(
I->getOperand(0), Ty) &&
1772 canEvaluateSExtdImpl(
I->getOperand(1), Ty);
1777 case Instruction::Select:
1778 return canEvaluateSExtdImpl(
I->getOperand(1), Ty) &&
1779 canEvaluateSExtdImpl(
I->getOperand(2), Ty);
1781 case Instruction::PHI: {
1787 if (!canEvaluateSExtdImpl(IncValue, Ty))
1809 Type *SrcTy = Src->getType(), *DestTy = Sext.
getType();
1816 CI->setNonNeg(
true);
1821 bool ShouldExtendExpression =
true;
1822 Value *TruncSrc =
nullptr;
1827 ShouldExtendExpression =
false;
1828 if (ShouldExtendExpression && shouldChangeType(SrcTy, DestTy) &&
1829 TypeEvaluationHelper::canEvaluateSExtd(Src, DestTy)) {
1832 dbgs() <<
"ICE: EvaluateInDifferentType converting expression type"
1833 " to avoid sign extend: "
1844 Value *ShAmt = ConstantInt::get(DestTy, DestBitSize - SrcBitSize);
1845 return BinaryOperator::CreateAShr(
Builder.CreateShl(Res, ShAmt,
"sext"),
1853 unsigned XBitSize =
X->getType()->getScalarSizeInBits();
1858 ResTrunc->setHasNoSignedWrap(
true);
1863 if (Src->hasOneUse() &&
X->getType() == DestTy) {
1865 Constant *ShAmt = ConstantInt::get(DestTy, DestBitSize - SrcBitSize);
1866 return BinaryOperator::CreateAShr(
Builder.CreateShl(
X, ShAmt), ShAmt);
1874 if (Src->hasOneUse() &&
1883 return transformSExtICmp(Cmp, Sext);
1900 Constant *BA =
nullptr, *CA =
nullptr;
1906 assert(WideCurrShAmt &&
"Constant folding of ImmConstant cannot fail");
1915 return BinaryOperator::CreateAShr(
A, NewShAmt);
1923 Type *XTy =
X->getType();
1925 Constant *ShlAmtC = ConstantInt::get(XTy, XBitSize - SrcBitSize);
1926 Constant *AshrAmtC = ConstantInt::get(XTy, XBitSize - 1);
1928 return BinaryOperator::CreateAShr(
Builder.CreateShl(
X, ShlAmtC),
1942 if (
Log2_32(*MaxVScale) < (SrcBitSize - 1))
1953 if ((IID == Intrinsic::scmp || IID == Intrinsic::ucmp) &&
II->hasOneUse())
1955 Sext,
Builder.CreateIntrinsic(
1956 DestTy, IID, {II->getArgOperand(0), II->getArgOperand(1)}));
1971 bool PreferBFloat) {
1992 if (Ty->getScalarType()->isPPC_FP128Ty())
2012 Type *MinType =
nullptr;
2014 unsigned NumElts = CVVTy->getNumElements();
2018 for (
unsigned i = 0; i != NumElts; ++i) {
2043 return FPExt->getOperand(0)->getType();
2071 return V->getType();
2077 Type *SrcTy = V->getType();
2078 assert(SrcTy->isIntOrIntVectorTy() &&
"Expected an integer type");
2079 int SrcSize = (int)SrcTy->getScalarSizeInBits() - IsSigned;
2084 if (SrcSize <= DestNumSigBits)
2093 int SrcNumSigBits =
F->getType()->getFPMantissaWidth();
2100 if (SrcNumSigBits > 0 && DestNumSigBits > 0 &&
2101 SrcNumSigBits <= DestNumSigBits)
2108 int SigBits = (int)SrcTy->getScalarSizeInBits() -
2111 if (SigBits <= DestNumSigBits)
2118 if (SigBits <= DestNumSigBits)
2127 assert((Opcode == CastInst::SIToFP || Opcode == CastInst::UIToFP) &&
2129 Value *Src =
I.getOperand(0);
2130 Type *FPTy =
I.getType();
2147 if (BO && BO->hasOneUse()) {
2150 unsigned OpWidth = BO->getType()->getFPMantissaWidth();
2153 unsigned SrcWidth = std::max(LHSWidth, RHSWidth);
2154 unsigned DstWidth = Ty->getFPMantissaWidth();
2155 switch (BO->getOpcode()) {
2157 case Instruction::FAdd:
2158 case Instruction::FSub:
2177 if (OpWidth >= 2*DstWidth+1 && DstWidth >= SrcWidth) {
2178 Value *LHS =
Builder.CreateFPTrunc(BO->getOperand(0), Ty);
2179 Value *RHS =
Builder.CreateFPTrunc(BO->getOperand(1), Ty);
2185 case Instruction::FMul:
2191 if (OpWidth >= LHSWidth + RHSWidth && DstWidth >= SrcWidth) {
2192 Value *LHS =
Builder.CreateFPTrunc(BO->getOperand(0), Ty);
2193 Value *RHS =
Builder.CreateFPTrunc(BO->getOperand(1), Ty);
2197 case Instruction::FDiv:
2204 if (OpWidth >= 2*DstWidth && DstWidth >= SrcWidth) {
2205 Value *LHS =
Builder.CreateFPTrunc(BO->getOperand(0), Ty);
2206 Value *RHS =
Builder.CreateFPTrunc(BO->getOperand(1), Ty);
2210 case Instruction::FRem: {
2215 if (SrcWidth == OpWidth)
2218 if (LHSWidth == SrcWidth) {
2219 LHS =
Builder.CreateFPTrunc(BO->getOperand(0), LHSMinType);
2220 RHS =
Builder.CreateFPTrunc(BO->getOperand(1), LHSMinType);
2222 LHS =
Builder.CreateFPTrunc(BO->getOperand(0), RHSMinType);
2223 RHS =
Builder.CreateFPTrunc(BO->getOperand(1), RHSMinType);
2226 Value *ExactResult =
Builder.CreateFRemFMF(LHS, RHS, BO);
2235 if (
Op &&
Op->hasOneUse()) {
2238 FMF &= FPMO->getFastMathFlags();
2250 X->getType() == Ty) {
2254 Builder.CreateSelectFMF(
Cond,
X, NarrowY, FMF,
"narrow.sel",
Op);
2258 X->getType() == Ty) {
2262 Builder.CreateSelectFMF(
Cond, NarrowY,
X, FMF,
"narrow.sel",
Op);
2268 switch (
II->getIntrinsicID()) {
2270 case Intrinsic::ceil:
2271 case Intrinsic::fabs:
2272 case Intrinsic::floor:
2273 case Intrinsic::nearbyint:
2274 case Intrinsic::rint:
2275 case Intrinsic::round:
2276 case Intrinsic::roundeven:
2277 case Intrinsic::trunc: {
2278 Value *Src =
II->getArgOperand(0);
2279 if (!Src->hasOneUse())
2285 if (
II->getIntrinsicID() != Intrinsic::fabs) {
2287 if (!FPExtSrc || FPExtSrc->
getSrcTy() != Ty)
2297 II->getOperandBundlesAsDefs(OpBundles);
2339template <
typename FPToIntTy>
2341 constexpr bool IsSaturating = std::is_same_v<FPToIntTy, IntrinsicInst>;
2347 Value *
X = OpI->getOperand(0);
2348 Type *XType =
X->getType();
2349 Type *DestType = FI.getType();
2352 bool IsOutputSigned;
2353 if constexpr (IsSaturating)
2354 IsOutputSigned = FI.getIntrinsicID() == Intrinsic::fptosi_sat;
2365 if constexpr (!IsSaturating) {
2373 if (OutputSize > OpI->getType()->getFPMantissaWidth())
2385 if constexpr (IsSaturating) {
2388 if (IsInputSigned != IsOutputSigned || DestWidth < SrcWidth)
2392 if (DestWidth > SrcWidth) {
2393 if (IsInputSigned && IsOutputSigned)
2397 if (DestWidth < SrcWidth)
2400 assert(XType == DestType &&
"Unexpected types for int to FP to int casts");
2456 UI->setNonNeg(
true);
2468 DL.getPointerSizeInBits(AS)) {
2480 auto UsesPointerAsInt = [](
User *U) {
2491 Base->getType()->getPointerAddressSpace() &&
2508 if (!
GEP || !
GEP->hasOneUse())
2511 Ptr =
GEP->getPointerOperand();
2520 Type *IdxTy =
DL.getIndexType(PtrTy);
2522 Res->
getType() == IntTy && IntTy == IdxTy) {
2535 return Builder.CreateZExtOrTrunc(Res, IntTy);
2546 unsigned TySize = Ty->getScalarSizeInBits();
2547 unsigned PtrSize =
DL.getPointerSizeInBits(AS);
2548 if (TySize != PtrSize) {
2561 Mask->getType() == Ty)
2562 return BinaryOperator::CreateAnd(
Builder.CreatePtrToInt(Ptr, Ty), Mask);
2567 Value *Vec, *Scalar, *Index;
2574 Value *NewCast =
Builder.CreatePtrToInt(Scalar, Ty->getScalarType());
2591 Mask->getType() == Ty)
2592 return BinaryOperator::CreateAnd(
Builder.CreatePtrToAddr(Ptr), Mask);
2625 if (SrcTy->getElementType() != DestTy->getElementType()) {
2630 if (SrcTy->getElementType()->getPrimitiveSizeInBits() !=
2631 DestTy->getElementType()->getPrimitiveSizeInBits())
2644 assert(SrcElts != DestElts &&
"Element counts should be different.");
2653 if (SrcElts > DestElts) {
2662 ShuffleMask = ShuffleMaskStorage;
2664 ShuffleMask = ShuffleMask.take_back(DestElts);
2666 ShuffleMask = ShuffleMask.take_front(DestElts);
2677 unsigned DeltaElts = DestElts - SrcElts;
2679 ShuffleMaskStorage.insert(ShuffleMaskStorage.begin(), DeltaElts, NullElt);
2681 ShuffleMaskStorage.append(DeltaElts, NullElt);
2682 ShuffleMask = ShuffleMaskStorage;
2689 return Value % Ty->getPrimitiveSizeInBits() == 0;
2693 return Value / Ty->getPrimitiveSizeInBits();
2710 "Shift should be a multiple of the element type size");
2717 if (V->getType() == VecEltTy) {
2720 if (
C->isNullValue())
2725 ElementIndex = Elements.size() - ElementIndex - 1;
2728 if (Elements[ElementIndex])
2731 Elements[ElementIndex] = V;
2750 C->getType()->getPrimitiveSizeInBits()));
2754 for (
unsigned i = 0; i != NumElts; ++i) {
2755 unsigned ShiftI = i * ElementSize;
2757 Instruction::LShr,
C, ConstantInt::get(
C->getType(), ShiftI));
2769 if (!V->hasOneUse())
return false;
2772 if (!
I)
return false;
2773 switch (
I->getOpcode()) {
2774 default:
return false;
2775 case Instruction::BitCast:
2776 if (
I->getOperand(0)->getType()->isVectorTy())
2780 case Instruction::ZExt:
2782 I->getOperand(0)->getType()->getPrimitiveSizeInBits(),
2787 case Instruction::Or:
2792 case Instruction::Shl: {
2795 if (!CI)
return false;
2832 DestVecTy->getElementType(),
2840 for (
unsigned i = 0, e = Elements.size(); i != e; ++i) {
2841 if (!Elements[i])
continue;
2856 Value *VecOp, *Index;
2874 if (DestType->
isVectorTy() && FixedVType && FixedVType->getNumElements() == 1)
2901 if (
X->getType()->isFPOrFPVectorTy() &&
2902 Y->getType()->isIntOrIntVectorTy()) {
2904 Builder.CreateBitCast(BO->
getOperand(0),
Y->getType());
2908 if (
X->getType()->isIntOrIntVectorTy() &&
2909 Y->getType()->isFPOrFPVectorTy()) {
2911 Builder.CreateBitCast(BO->
getOperand(1),
X->getType());
2945 Value *CastedC = Builder.CreateBitCast(
C, DestTy);
2968 CondVTy->getElementCount() != DestVecTy->getElementCount())
2977 SrcVecTy->getElementCount())))) {
2980 Value *CastedTVal = Builder.CreateBitCast(TVal, DestTy);
2981 Value *CastedFVal = Builder.CreateBitCast(FVal, DestTy);
2989 if ((DestVecTy !=
nullptr) != (SrcVecTy !=
nullptr))
2996 Value *CastedVal = Builder.CreateBitCast(FVal, DestTy);
3003 Value *CastedVal = Builder.CreateBitCast(TVal, DestTy);
3034 Type *SrcTy = Src->getType();
3038 SmallSetVector<PHINode *, 4> OldPhiNodes;
3046 while (!PhiWorklist.
empty()) {
3048 for (
Value *IncValue : OldPN->incoming_values()) {
3057 Value *Addr = LI->getOperand(0);
3066 if (LI->hasOneUse() && LI->isSimple())
3074 if (OldPhiNodes.
insert(PNode))
3085 Type *TyA = BCI->getOperand(0)->getType();
3086 Type *TyB = BCI->getType();
3087 if (TyA != DestTy || TyB != SrcTy)
3094 for (
auto *OldPN : OldPhiNodes) {
3095 for (User *V : OldPN->users()) {
3097 if (!
SI->isSimple() ||
SI->getOperand(0) != OldPN)
3101 Type *TyB = BCI->getOperand(0)->getType();
3102 Type *TyA = BCI->getType();
3103 if (TyA != DestTy || TyB != SrcTy)
3109 if (!OldPhiNodes.contains(
PHI))
3118 SmallDenseMap<PHINode *, PHINode *> NewPNodes;
3119 for (
auto *OldPN : OldPhiNodes) {
3120 Builder.SetInsertPoint(OldPN);
3121 PHINode *NewPN =
Builder.CreatePHI(DestTy, OldPN->getNumOperands());
3122 NewPNodes[OldPN] = NewPN;
3126 for (
auto *OldPN : OldPhiNodes) {
3127 PHINode *NewPN = NewPNodes[OldPN];
3128 for (
unsigned j = 0, e = OldPN->getNumOperands(); j != e; ++j) {
3129 Value *
V = OldPN->getOperand(j);
3130 Value *NewV =
nullptr;
3143 NewV = BCI->getOperand(0);
3145 NewV = NewPNodes[PrevPN];
3148 NewPN->
addIncoming(NewV, OldPN->getIncomingBlock(j));
3162 for (
auto *OldPN : OldPhiNodes) {
3163 PHINode *NewPN = NewPNodes[OldPN];
3166 assert(
SI->isSimple() &&
SI->getOperand(0) == OldPN);
3170 SI->setOperand(0, NewBC);
3175 Type *TyB = BCI->getOperand(0)->getType();
3176 Type *TyA = BCI->getType();
3177 assert(TyA == DestTy && TyB == SrcTy);
3208 if (
X->getType() != FTy)
3213 return Builder.CreateCopySign(Builder.CreateBitCast(
Y, FTy),
X);
3220 Type *SrcTy = Src->getType();
3225 if (DestTy == Src->getType())
3251 if (SrcVTy->getNumElements() == 1) {
3256 Builder.CreateExtractElement(Src,
3265 return new BitCastInst(InsElt->getOperand(1), DestTy);
3275 Y->getType()->isIntegerTy() && isDesirableIntType(
BitWidth)) {
3277 if (
DL.isBigEndian())
3278 IndexC = SrcVTy->getNumElements() - 1 - IndexC;
3284 unsigned EltWidth =
Y->getType()->getScalarSizeInBits();
3288 return BinaryOperator::CreateOr(AndX, ZextY);
3296 Value *ShufOp0 = Shuf->getOperand(0);
3297 Value *ShufOp1 = Shuf->getOperand(1);
3300 if (Shuf->hasOneUse() && DestTy->
isVectorTy() &&
3302 ShufElts == SrcVecElts) {
3323 if (DestTy->
isIntegerTy() && ShufElts.getKnownMinValue() % 2 == 0 &&
3324 Shuf->hasOneUse() && Shuf->isReverse()) {
3325 unsigned IntrinsicNum = 0;
3327 SrcTy->getScalarSizeInBits() == 8) {
3328 IntrinsicNum = Intrinsic::bswap;
3329 }
else if (SrcTy->getScalarSizeInBits() == 1) {
3330 IntrinsicNum = Intrinsic::bitreverse;
3332 if (IntrinsicNum != 0) {
3333 assert(ShufOp0->
getType() == SrcTy &&
"Unexpected shuffle mask");
3337 Value *ScalarX =
Builder.CreateBitCast(ShufOp0, DestTy);
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static std::optional< bool > isBigEndian(const SmallDenseMap< int64_t, int64_t, 8 > &MemOffset2Idx, int64_t LowestIdx)
Given a map from byte offsets in memory to indices in a load/store, determine if that map corresponds...
This file defines the DenseMap class.
static bool isSigned(unsigned Opcode)
static bool collectInsertionElements(Value *V, unsigned Shift, SmallVectorImpl< Value * > &Elements, Type *VecEltTy, bool isBigEndian)
V is a value which is inserted into a vector of VecEltTy.
static bool hasStoreUsersOnly(CastInst &CI)
Check if all users of CI are StoreInsts.
static Value * foldCopySignIdioms(BitCastInst &CI, InstCombiner::BuilderTy &Builder, const SimplifyQuery &SQ)
Fold (bitcast (or (and (bitcast X to int), signmask), nneg Y) to fp) to copysign((bitcast Y to fp),...
static Type * shrinkFPConstantVector(Value *V, bool PreferBFloat)
static Instruction * canonicalizeBitCastExtElt(BitCastInst &BitCast, InstCombinerImpl &IC)
Canonicalize scalar bitcasts of extracted elements into a bitcast of the vector followed by extract e...
static Instruction * shrinkSplatShuffle(TruncInst &Trunc, InstCombiner::BuilderTy &Builder)
Try to narrow the width of a splat shuffle.
static Instruction * foldFPtoI(Instruction &FI, InstCombiner &IC)
static Instruction * foldBitCastSelect(BitCastInst &BitCast, InstCombiner::BuilderTy &Builder)
Change the type of a select if we can eliminate a bitcast.
static Instruction * foldBitCastBitwiseLogic(BitCastInst &BitCast, InstCombiner::BuilderTy &Builder)
Change the type of a bitwise logic operation if we can eliminate a bitcast.
static bool fitsInFPType(APFloat F, const fltSemantics &Sem)
Return a Constant* for the specified floating-point constant if it fits in the specified FP type with...
static Instruction * optimizeVectorResizeWithIntegerBitCasts(Value *InVal, VectorType *DestTy, InstCombinerImpl &IC)
This input value (which is known to have vector type) is being zero extended or truncated to the spec...
static Instruction * shrinkInsertElt(CastInst &Trunc, InstCombiner::BuilderTy &Builder)
Try to narrow the width of an insert element.
SmallDenseMap< Value *, Value *, 8 > EvaluatedMap
static Type * getMinimumFPType(Value *V, Type *PreferredTy, InstCombiner &IC)
Find the minimum FP type we can safely truncate to.
static bool isMultipleOfTypeSize(unsigned Value, Type *Ty)
static Value * optimizeIntegerToVectorInsertions(BitCastInst &CI, InstCombinerImpl &IC)
If the input is an 'or' instruction, we may be doing shifts and ors to assemble the elements of the v...
static Type * shrinkFPConstant(LLVMContext &Ctx, const APFloat &F, bool PreferBFloat)
static Instruction * foldVecExtTruncToExtElt(TruncInst &Trunc, InstCombinerImpl &IC)
Whenever an element is extracted from a vector, optionally shifted down, and then truncated,...
static Value * EvaluateInDifferentTypeImpl(Value *V, Type *Ty, bool isSigned, InstCombinerImpl &IC, EvaluatedMap &Processed)
static unsigned getTypeSizeIndex(unsigned Value, Type *Ty)
static Instruction * foldVecTruncToExtElt(TruncInst &Trunc, InstCombinerImpl &IC)
Given a vector that is bitcast to an integer, optionally logically right-shifted, and truncated,...
This file provides internal interfaces used to implement the InstCombine.
This file provides the interface for the instcombine pass implementation.
uint64_t IntrinsicInst * II
const SmallVectorImpl< MachineOperand > & Cond
This file implements a set that has insertion order iteration characteristics.
This file defines the SmallVector class.
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static SymbolRef::Type getType(const Symbol *Sym)
static const fltSemantics & IEEEsingle()
static const fltSemantics & BFloat()
static const fltSemantics & IEEEdouble()
static constexpr roundingMode rmNearestTiesToEven
static const fltSemantics & IEEEhalf()
static LLVM_ABI unsigned int semanticsIntSizeInBits(const fltSemantics &, bool)
Class for arbitrary precision integers.
LLVM_ABI APInt udiv(const APInt &RHS) const
Unsigned division operation.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
LLVM_ABI APInt zext(unsigned width) const
Zero extend to a new width.
uint64_t getZExtValue() const
Get zero extended value.
static APInt getMaxValue(unsigned numBits)
Gets maximum unsigned value of APInt for specific bit width.
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
LLVM_ABI APInt urem(const APInt &RHS) const
Unsigned remainder operation.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool ult(const APInt &RHS) const
Unsigned less than comparison.
int32_t exactLogBase2() const
unsigned countr_zero() const
Count the number of trailing zero bits.
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)
Constructs an APInt value that has the top hiBitsSet bits set.
static APInt getBitsSetFrom(unsigned numBits, unsigned loBit)
Constructs an APInt value that has a contiguous range of bits set.
unsigned countr_one() const
Count the number of trailing one bits.
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
This class represents a conversion between pointers from one address space to another.
Represent a constant reference to an array (0 or more elements consecutively in memory),...
Functions, function parameters, and return types can have attributes to indicate how they should be t...
LLVM_ABI std::optional< unsigned > getVScaleRangeMax() const
Returns the maximum value for the vscale_range attribute or std::nullopt when unknown.
BinaryOps getOpcode() const
static LLVM_ABI BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name=Twine(), InsertPosition InsertBefore=nullptr)
Construct a binary instruction, given the opcode and the two operands.
static BinaryOperator * CreateFMulFMF(Value *V1, Value *V2, FastMathFlags FMF, const Twine &Name="")
static BinaryOperator * CreateFDivFMF(Value *V1, Value *V2, FastMathFlags FMF, const Twine &Name="")
This class represents a no-op cast from one type to another.
This class represents a function call, abstracting a target machine's calling convention.
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
This is the base class for all instructions that perform data casts.
Type * getSrcTy() const
Return the source type, as a convenience.
Instruction::CastOps getOpcode() const
Return the opcode of this CastInst.
static LLVM_ABI unsigned isEliminableCastPair(Instruction::CastOps firstOpcode, Instruction::CastOps secondOpcode, Type *SrcTy, Type *MidTy, Type *DstTy, const DataLayout *DL)
Determine how a pair of casts can be eliminated, if they can be at all.
static LLVM_ABI CastInst * CreateIntegerCast(Value *S, Type *Ty, bool isSigned, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a ZExt, BitCast, or Trunc for int -> int casts.
static LLVM_ABI CastInst * CreateFPCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create an FPExt, BitCast, or FPTrunc for fp -> fp casts.
static LLVM_ABI CastInst * CreateTruncOrBitCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a Trunc or BitCast cast instruction.
static LLVM_ABI CastInst * CreateBitOrPointerCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a BitCast, a PtrToInt, or an IntToPTr cast instruction.
static LLVM_ABI CastInst * Create(Instruction::CastOps, Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Provides a way to construct any of the CastInst subclasses using an opcode instead of the subclass's ...
Type * getDestTy() const
Return the destination type, as a convenience.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ ICMP_SLT
signed less than
@ ICMP_UGE
unsigned greater or equal
@ ICMP_SGT
signed greater than
@ ICMP_ULT
unsigned less than
@ ICMP_ULE
unsigned less or equal
static LLVM_ABI Constant * getSub(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
static LLVM_ABI Constant * getBitCast(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static LLVM_ABI Constant * getTrunc(Constant *C, Type *Ty, bool OnlyIfReduced=false)
ConstantFP - Floating Point Values [float, double].
const APFloat & getValueAPF() const
This is the shared class of boolean and integer constants.
static LLVM_ABI ConstantInt * getTrue(LLVMContext &Context)
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
bool uge(uint64_t Num) const
This function will return true iff this constant represents a value with active bits bigger than 64 b...
This is an important base class in LLVM.
static LLVM_ABI Constant * mergeUndefsWith(Constant *C, Constant *Other)
Merges undefs of a Constant with another Constant, along with the undefs already present.
static LLVM_ABI Constant * getAllOnesValue(Type *Ty)
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
LLVM_ABI bool isElementWiseEqual(Value *Y) const
Return true if this constant and a constant 'Y' are element-wise equal.
ValueT lookup(const_arg_type_t< KeyT > Val) const
Return the entry for the specified key, or a default constructed value if no such entry exists.
This class represents an extension of floating point types.
This class represents a cast from floating point to signed integer.
This class represents a cast from floating point to unsigned integer.
This class represents a truncation of floating point types.
Convenience struct for specifying and reasoning about fast-math flags.
Class to represent fixed width SIMD vectors.
static LLVM_ABI FixedVectorType * get(Type *ElementType, unsigned NumElts)
FunctionType * getFunctionType() const
Returns the FunctionType for me.
Attribute getFnAttribute(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind.
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
static GetElementPtrInst * Create(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
This instruction compares its operands according to the predicate given to the constructor.
Value * CreateInsertElement(Type *VecTy, Value *NewElt, Value *Idx, const Twine &Name="")
ConstantInt * getInt64(uint64_t C)
Get a constant 64-bit value.
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
Value * CreateBitCast(Value *V, Type *DestTy, const Twine &Name="")
static InsertElementInst * Create(Value *Vec, Value *NewElt, Value *Idx, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Instruction * visitZExt(ZExtInst &Zext)
Instruction * visitAddrSpaceCast(AddrSpaceCastInst &CI)
Instruction * visitSExt(SExtInst &Sext)
Instruction * foldOpIntoPhi(Instruction &I, PHINode *PN, bool AllowMultipleUses=false)
Given a binary operator, cast instruction, or select which has a PHI node as operand #0,...
Instruction * visitFPToSI(FPToSIInst &FI)
Instruction * visitTrunc(TruncInst &CI)
Instruction * visitUIToFP(CastInst &CI)
Instruction * visitPtrToInt(PtrToIntInst &CI)
Instruction * FoldOpIntoSelect(Instruction &Op, SelectInst *SI, bool FoldWithMultiUse=false, bool SimplifyBothArms=false)
Given an instruction with a select as one operand and a constant as the other operand,...
Instruction * foldItoFPtoI(FPToIntTy &FI)
fpto{s/u}i.sat --> X or zext(X) or sext(X) or trunc(X) This is safe if the intermediate type has enou...
Instruction * visitSIToFP(CastInst &CI)
Instruction * commonCastTransforms(CastInst &CI)
Implement the transforms common to all CastInst visitors.
Instruction * eraseInstFromFunction(Instruction &I) override
Combiner aware instruction erasure.
Instruction * visitFPTrunc(FPTruncInst &CI)
Value * foldPtrToIntOrAddrOfGEP(Type *IntTy, Value *Ptr)
Instruction * visitBitCast(BitCastInst &CI)
Instruction * visitIntToPtr(IntToPtrInst &CI)
Instruction * visitFPToUI(FPToUIInst &FI)
Instruction * visitPtrToAddr(PtrToAddrInst &CI)
Value * EvaluateInDifferentType(Value *V, Type *Ty, bool isSigned)
Given an expression that CanEvaluateTruncated or CanEvaluateSExtd returns true for,...
bool SimplifyDemandedInstructionBits(Instruction &Inst)
Tries to simplify operands to an integer instruction based on its demanded bits.
Instruction * visitFPExt(CastInst &CI)
LoadInst * combineLoadToNewType(LoadInst &LI, Type *NewTy, const Twine &Suffix="")
Helper to combine a load to a new type.
The core instruction combiner logic.
const DataLayout & getDataLayout() const
unsigned ComputeMaxSignificantBits(const Value *Op, const Instruction *CxtI=nullptr, unsigned Depth=0) const
IRBuilder< TargetFolder, IRBuilderCallbackInserter > BuilderTy
An IRBuilder that automatically inserts new instructions into the worklist.
unsigned ComputeNumSignBits(const Value *Op, const Instruction *CxtI=nullptr, unsigned Depth=0) const
Instruction * replaceInstUsesWith(Instruction &I, Value *V)
A combiner-aware RAUW-like routine.
bool canBeCastedExactlyIntToFP(Value *V, Type *FPTy, bool IsSigned, const Instruction *CxtI=nullptr) const
InstructionWorklist & Worklist
A worklist of the instructions that need to be simplified.
Instruction * InsertNewInstWith(Instruction *New, BasicBlock::iterator Old)
Same as InsertNewInstBefore, but also sets the debug loc.
void computeKnownBits(const Value *V, KnownBits &Known, const Instruction *CxtI, unsigned Depth=0) const
bool isKnownExactCastIntToFP(CastInst &I) const
Return true if the cast from integer to FP can be proven to be exact for all possible inputs (the con...
bool MaskedValueIsZero(const Value *V, const APInt &Mask, const Instruction *CxtI=nullptr, unsigned Depth=0) const
const SimplifyQuery & getSimplifyQuery() const
LLVM_ABI void copyFastMathFlags(FastMathFlags FMF)
Convenience function for transferring all fast-math flag values to this instruction,...
static bool isBitwiseLogicOp(unsigned Opcode)
Determine if the Opcode is and/or/xor.
LLVM_ABI const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
Instruction * user_back()
Specialize the methods defined in Value, as we know that an instruction can only be used by other ins...
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
LLVM_ABI void setNonNeg(bool b=true)
Set or clear the nneg flag on this instruction, which must be a zext instruction.
LLVM_ABI bool hasNonNeg() const LLVM_READONLY
Determine whether the the nneg flag is set.
LLVM_ABI FastMathFlags getFastMathFlags() const LLVM_READONLY
Convenience function for getting all the fast-math flags, which must be an operator which supports th...
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
LLVM_ABI void setIsExact(bool b=true)
Set or clear the exact flag on this instruction, which must be an operator which supports this flag.
This class represents a cast from an integer to a pointer.
unsigned getAddressSpace() const
Returns the address space of this instruction's pointer type.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
A wrapper class for inspecting calls to intrinsic functions.
This is an important class for using LLVM in a threaded context.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
op_range incoming_values()
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
This class represents a cast from a pointer to an address (non-capturing ptrtoint).
Value * getPointerOperand()
Gets the pointer operand.
This class represents a cast from a pointer to an integer.
Value * getPointerOperand()
Gets the pointer operand.
unsigned getPointerAddressSpace() const
Returns the address space of the pointer operand.
This class represents a sign extension of integer types.
This class represents the LLVM 'select' instruction.
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", InsertPosition InsertBefore=nullptr, const Instruction *MDFrom=nullptr)
bool insert(const value_type &X)
Insert a new element into the SetVector.
This instruction constructs a fixed permutation of two input vectors.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
This class represents a truncation of integer types.
void setHasNoSignedWrap(bool B)
void setHasNoUnsignedWrap(bool B)
bool hasNoSignedWrap() const
Test whether this operation is known to never undergo signed overflow, aka the nsw property.
bool hasNoUnsignedWrap() const
Test whether this operation is known to never undergo unsigned overflow, aka the nuw property.
The instances of the Type class are immutable: once they are created, they are never changed.
LLVM_ABI unsigned getIntegerBitWidth() const
bool isVectorTy() const
True if this is an instance of VectorType.
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
bool isBFloatTy() const
Return true if this is 'bfloat', a 16-bit bfloat type.
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
LLVM_ABI Type * getWithNewType(Type *EltTy) const
Given vector type, change the element type, whilst keeping the old number of elements.
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
bool isX86_AMXTy() const
Return true if this is X86 AMX.
bool isIntegerTy() const
True if this is an instance of IntegerType.
static LLVM_ABI Type * getDoubleTy(LLVMContext &C)
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
static LLVM_ABI Type * getFloatTy(LLVMContext &C)
LLVM_ABI int getFPMantissaWidth() const
Return the width of the mantissa of this type.
LLVM_ABI const fltSemantics & getFltSemantics() const
static LLVM_ABI Type * getBFloatTy(LLVMContext &C)
static LLVM_ABI Type * getHalfTy(LLVMContext &C)
'undef' values are things that do not have specified contents.
static LLVM_ABI UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
Value * getOperand(unsigned i) const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
bool hasOneUse() const
Return true if there is exactly one use of this value.
LLVMContext & getContext() const
All values hold a context through their type.
iterator_range< user_iterator > users()
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
LLVM_ABI void takeName(Value *V)
Transfer the name from V to this value.
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
static LLVM_ABI bool isValidElementType(Type *ElemTy)
Return true if the specified type is valid as a element type.
This class represents zero extension of integer types.
static constexpr bool isKnownLE(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
static constexpr bool isKnownGE(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ C
The default llvm calling convention, compatible with C.
LLVM_ABI Function * getOrInsertDeclaration(Module *M, ID id, ArrayRef< Type * > OverloadTys={})
Look up the Function declaration of the intrinsic id in the Module M.
SpecificConstantMatch m_ZeroInt()
Convenience matchers for specific integer values.
BinaryOp_match< SpecificConstantMatch, SrcTy, TargetOpcode::G_SUB > m_Neg(const SrcTy &&Src)
Matches a register negated by a G_SUB.
OneUse_match< SubPat > m_OneUse(const SubPat &SP)
match_combine_or< Ty... > m_CombineOr(const Ty &...Ps)
Combine pattern matchers matching any of Ps patterns.
cst_pred_ty< is_lowbit_mask > m_LowBitMask()
Match an integer or vector with only the low bit(s) set.
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
PtrToIntSameSize_match< OpTy > m_PtrToIntSameSize(const DataLayout &DL, const OpTy &Op)
BinaryOp_match< LHS, RHS, Instruction::Add > m_Add(const LHS &L, const RHS &R)
cst_pred_ty< is_sign_mask > m_SignMask()
Match an integer or vector with only the sign bit(s) set.
BinaryOp_match< LHS, RHS, Instruction::AShr > m_AShr(const LHS &L, const RHS &R)
cst_pred_ty< is_power2 > m_Power2()
Match an integer or vector power-of-2.
ap_match< APInt > m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
BinaryOp_match< LHS, RHS, Instruction::And, true > m_c_And(const LHS &L, const RHS &R)
Matches an And with LHS and RHS in either order.
CastInst_match< OpTy, TruncInst > m_Trunc(const OpTy &Op)
Matches Trunc.
BinaryOp_match< LHS, RHS, Instruction::Xor > m_Xor(const LHS &L, const RHS &R)
specific_intval< false > m_SpecificInt(const APInt &V)
Match a specific integer value or vector with all elements equal to the value.
bool match(Val *V, const Pattern &P)
match_deferred< Value > m_Deferred(Value *const &V)
Like m_Specific(), but works if the specific value to match is determined as part of the same match()...
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
BinOpPred_match< LHS, RHS, is_right_shift_op > m_Shr(const LHS &L, const RHS &R)
Matches logical shift operations.
specific_intval< true > m_SpecificIntAllowPoison(const APInt &V)
TwoOps_match< Val_t, Idx_t, Instruction::ExtractElement > m_ExtractElt(const Val_t &Val, const Idx_t &Idx)
Matches ExtractElementInst.
cst_pred_ty< is_one > m_One()
Match an integer 1 or a vector with all elements equal to 1.
IntrinsicID_match m_Intrinsic()
Match intrinsic calls like this: m_Intrinsic<Intrinsic::fabs>(m_Value(X))
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
IntrinsicID_match m_VScale()
Matches a call to llvm.vscale().
auto m_BinOp()
Match an arbitrary binary operation and ignore it.
BinOpPred_match< LHS, RHS, is_logical_shift_op > m_LogicalShift(const LHS &L, const RHS &R)
Matches logical shift operations.
MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty > m_SMin(const LHS &L, const RHS &R)
match_combine_or< CastInst_match< OpTy, UIToFPInst >, CastInst_match< OpTy, SIToFPInst > > m_IToFP(const OpTy &Op)
auto m_Value()
Match an arbitrary value and ignore it.
auto m_Constant()
Match an arbitrary Constant and ignore it.
TwoOps_match< V1_t, V2_t, Instruction::ShuffleVector > m_Shuffle(const V1_t &v1, const V2_t &v2)
Matches ShuffleVectorInst independently of mask value.
match_combine_or< CastInst_match< OpTy, FPToUIInst >, CastInst_match< OpTy, FPToSIInst > > m_FPToI(const OpTy &Op)
CastInst_match< OpTy, FPExtInst > m_FPExt(const OpTy &Op)
SpecificCmpClass_match< LHS, RHS, ICmpInst > m_SpecificICmp(CmpPredicate MatchPred, const LHS &L, const RHS &R)
CastInst_match< OpTy, ZExtInst > m_ZExt(const OpTy &Op)
Matches ZExt.
cst_pred_ty< is_negated_power2 > m_NegatedPower2()
Match a integer or vector negated power-of-2.
match_immconstant_ty m_ImmConstant()
Match an arbitrary immediate Constant and ignore it.
BinaryOp_match< LHS, RHS, Instruction::Add, true > m_c_Add(const LHS &L, const RHS &R)
Matches a Add with LHS and RHS in either order.
CastOperator_match< OpTy, Instruction::BitCast > m_BitCast(const OpTy &Op)
Matches BitCast.
CastInst_match< OpTy, FPToSIInst > m_FPToSI(const OpTy &Op)
MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty > m_SMax(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::LShr > m_LShr(const LHS &L, const RHS &R)
match_combine_or< CastInst_match< OpTy, ZExtInst >, CastInst_match< OpTy, SExtInst > > m_ZExtOrSExt(const OpTy &Op)
FNeg_match< OpTy > m_FNeg(const OpTy &X)
Match 'fneg X' as 'fsub -0.0, X'.
BinOpPred_match< LHS, RHS, is_shift_op > m_Shift(const LHS &L, const RHS &R)
Matches shift operations.
BinaryOp_match< LHS, RHS, Instruction::Shl > m_Shl(const LHS &L, const RHS &R)
m_Intrinsic_Ty< Opnd0, Opnd1 >::Ty m_Ctlz(const Opnd0 &Op0, const Opnd1 &Op1)
auto m_Undef()
Match an arbitrary undef constant.
BinaryOp_match< LHS, RHS, Instruction::Or > m_Or(const LHS &L, const RHS &R)
CastInst_match< OpTy, SExtInst > m_SExt(const OpTy &Op)
Matches SExt.
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
BinaryOp_match< LHS, RHS, Instruction::Or, true > m_c_Or(const LHS &L, const RHS &R)
Matches an Or with LHS and RHS in either order.
CastOperator_match< OpTy, Instruction::IntToPtr > m_IntToPtr(const OpTy &Op)
Matches IntToPtr.
ThreeOps_match< Val_t, Elt_t, Idx_t, Instruction::InsertElement > m_InsertElt(const Val_t &Val, const Elt_t &Elt, const Idx_t &Idx)
Matches InsertElementInst.
ElementWiseBitCast_match< OpTy > m_ElementWiseBitCast(const OpTy &Op)
BinaryOp_match< LHS, RHS, Instruction::Sub > m_Sub(const LHS &L, const RHS &R)
MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty > m_UMin(const LHS &L, const RHS &R)
cst_pred_ty< icmp_pred_with_threshold > m_SpecificInt_ICMP(ICmpInst::Predicate Predicate, const APInt &Threshold)
Match an integer or vector with every element comparing 'pred' (eg/ne/...) to Threshold.
auto m_ConstantInt()
Match an arbitrary ConstantInt and ignore it.
friend class Instruction
Iterator for Instructions in a `BasicBlock.
This is an optimization pass for GlobalISel generic memory operations.
FunctionAddr VTableAddr Value
LLVM_ABI KnownFPClass computeKnownFPClass(const Value *V, const APInt &DemandedElts, FPClassTest InterestedClasses, const SimplifyQuery &SQ, unsigned Depth=0)
Determine which floating-point classes are valid for V, and return them in KnownFPClass bit sets.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI Constant * ConstantFoldSelectInstruction(Constant *Cond, Constant *V1, Constant *V2)
Attempt to constant fold a select instruction with the specified operands.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
unsigned Log2_64_Ceil(uint64_t Value)
Return the ceil log base 2 of the specified value, 64 if the value is zero.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
LLVM_ABI Constant * ConstantFoldCompareInstOperands(unsigned Predicate, Constant *LHS, Constant *RHS, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const Instruction *I=nullptr)
Attempt to constant fold a compare instruction (icmp/fcmp) with the specified operands.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
LLVM_ABI Value * simplifyCastInst(unsigned CastOpc, Value *Op, Type *Ty, const SimplifyQuery &Q)
Given operands for a CastInst, fold the result or return null.
auto dyn_cast_or_null(const Y &Val)
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
auto reverse(ContainerTy &&C)
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
LLVM_ABI void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
SmallVector< ValueTypeFromRangeType< R >, Size > to_vector(R &&Range)
Given a range of type R, iterate the entire range and return a SmallVector with elements of the vecto...
LLVM_ABI Constant * ConstantFoldCastOperand(unsigned Opcode, Constant *C, Type *DestTy, const DataLayout &DL)
Attempt to constant fold a cast with the specified operand.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
LLVM_ABI bool replaceAllDbgUsesWith(Instruction &From, Value &To, Instruction &DomPoint, DominatorTree &DT)
Point debug users of From to To or salvage them.
LLVM_ABI bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
@ SMax
Signed integer max implemented in terms of select(cmp()).
@ And
Bitwise or logical AND of integers.
@ SMin
Signed integer min implemented in terms of select(cmp()).
DWARFExpression::Operation Op
constexpr unsigned BitWidth
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
bool all_equal(std::initializer_list< T > Values)
Returns true if all Values in the initializer lists are equal or the list.
auto seq(T Begin, T End)
Iterate over an integral type from Begin up to - but not including - End.
LLVM_ABI Constant * ConstantFoldIntegerCast(Constant *C, Type *DestTy, bool IsSigned, const DataLayout &DL)
Constant fold a zext, sext or trunc, depending on IsSigned and whether the DestTy is wider or narrowe...
LLVM_ABI bool isKnownNonNegative(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Returns true if the give value is known to be non-negative.
LLVM_ABI Constant * ConstantFoldBinaryInstruction(unsigned Opcode, Constant *V1, Constant *V2)
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
unsigned countMinTrailingZeros() const
Returns the minimum number of trailing zero bits.
unsigned countMinLeadingZeros() const
Returns the minimum number of leading zero bits.
APInt getMaxValue() const
Return the maximal unsigned value possible given these KnownBits.
bool isKnownNever(FPClassTest Mask) const
Return true if it's known this can never be one of the mask entries.
SimplifyQuery getWithInstruction(const Instruction *I) const