28#define DEBUG_TYPE "instcombine"
40 unsigned Opc =
I->getOpcode();
42 case Instruction::Add:
43 case Instruction::Sub:
44 case Instruction::Mul:
45 case Instruction::And:
47 case Instruction::Xor:
48 case Instruction::AShr:
49 case Instruction::LShr:
50 case Instruction::Shl:
51 case Instruction::UDiv:
52 case Instruction::URem: {
56 if (
Opc == Instruction::LShr ||
Opc == Instruction::AShr)
60 case Instruction::Trunc:
61 case Instruction::ZExt:
62 case Instruction::SExt:
66 if (
I->getOperand(0)->getType() == Ty)
67 return I->getOperand(0);
72 Opc == Instruction::SExt);
74 case Instruction::Select: {
80 case Instruction::PHI: {
91 case Instruction::FPToUI:
92 case Instruction::FPToSI:
96 case Instruction::Call:
98 switch (
II->getIntrinsicID()) {
101 case Intrinsic::vscale: {
103 I->getModule(), Intrinsic::vscale, {Ty});
110 case Instruction::ShuffleVector: {
130InstCombinerImpl::isEliminableCastPair(
const CastInst *CI1,
147 if ((Res == Instruction::IntToPtr && SrcTy != DstIntPtrTy) ||
148 (Res == Instruction::PtrToInt && DstTy != SrcIntPtrTy))
170 if (CSrc->hasOneUse())
183 if (!Cmp || Cmp->getOperand(0)->getType() != Sel->getType() ||
189 if (CI.
getOpcode() != Instruction::BitCast ||
219 if (SrcTy && DestTy &&
220 SrcTy->getNumElements() == DestTy->getNumElements() &&
221 SrcTy->getPrimitiveSizeInBits() == DestTy->getPrimitiveSizeInBits()) {
276 Type *OrigTy = V->getType();
277 switch (
I->getOpcode()) {
278 case Instruction::Add:
279 case Instruction::Sub:
280 case Instruction::Mul:
281 case Instruction::And:
282 case Instruction::Or:
283 case Instruction::Xor:
288 case Instruction::UDiv:
289 case Instruction::URem: {
304 case Instruction::Shl: {
315 case Instruction::LShr: {
330 auto DemandedBits = Trunc->getType()->getScalarSizeInBits();
341 case Instruction::AShr: {
351 unsigned ShiftedBits = OrigBitWidth -
BitWidth;
358 case Instruction::Trunc:
361 case Instruction::ZExt:
362 case Instruction::SExt:
366 case Instruction::Select: {
371 case Instruction::PHI: {
381 case Instruction::FPToUI:
382 case Instruction::FPToSI: {
386 Type *InputTy =
I->getOperand(0)->getType()->getScalarType();
390 I->getOpcode() == Instruction::FPToSI);
391 return Ty->getScalarSizeInBits() >= MinBitWidth;
393 case Instruction::ShuffleVector:
417 Value *VecInput =
nullptr;
426 unsigned VecWidth = VecType->getPrimitiveSizeInBits();
428 unsigned ShiftAmount = ShiftVal ? ShiftVal->
getZExtValue() : 0;
430 if ((VecWidth % DestWidth != 0) || (ShiftAmount % DestWidth != 0))
435 unsigned NumVecElts = VecWidth / DestWidth;
436 if (VecType->getElementType() != DestType) {
441 unsigned Elt = ShiftAmount / DestWidth;
443 Elt = NumVecElts - 1 - Elt;
463 Type *SrcType = Src->getType();
469 unsigned DstBits = DstType->getScalarSizeInBits();
470 unsigned TruncRatio = SrcBits / DstBits;
471 if ((SrcBits % DstBits) != 0)
476 const APInt *ShiftAmount =
nullptr;
484 auto VecElts = VecOpTy->getElementCount();
486 uint64_t BitCastNumElts = VecElts.getKnownMinValue() * TruncRatio;
489 ? (VecOpIdx + 1) * TruncRatio - 1
490 : VecOpIdx * TruncRatio;
496 if (ShiftAmount->
uge(SrcBits) || ShiftAmount->
urem(DstBits) != 0)
504 assert(BitCastNumElts <= std::numeric_limits<uint32_t>::max() &&
505 NewIdx <= std::numeric_limits<uint32_t>::max() &&
"overflow 32-bits");
518 "Don't narrow to an illegal scalar type");
530 BinaryOperator *Or0, *Or1;
534 Value *ShVal0, *ShVal1, *ShAmt0, *ShAmt1;
541 if (Or0->
getOpcode() == BinaryOperator::LShr) {
547 Or1->
getOpcode() == BinaryOperator::LShr &&
548 "Illegal or(shift,shift) pair");
557 unsigned MaxShiftAmountWidth =
Log2_32(NarrowWidth);
558 APInt HiBitMask = ~APInt::getLowBitsSet(WideWidth, MaxShiftAmountWidth);
565 if (ShVal0 != ShVal1)
571 unsigned Mask = Width - 1;
584 Value *ShAmt = matchShiftAmount(ShAmt0, ShAmt1, NarrowWidth);
587 ShAmt = matchShiftAmount(ShAmt1, ShAmt0, NarrowWidth);
605 Value *NarrowShAmt =
Builder.CreateZExtOrTrunc(ShAmt, DestTy);
608 X =
Y =
Builder.CreateTrunc(ShVal0, DestTy);
609 if (ShVal0 != ShVal1)
610 Y =
Builder.CreateTrunc(ShVal1, DestTy);
611 Intrinsic::ID IID = IsFshl ? Intrinsic::fshl : Intrinsic::fshr;
628 BinaryOperator *BinOp;
635 case Instruction::And:
636 case Instruction::Or:
637 case Instruction::Xor:
638 case Instruction::Add:
639 case Instruction::Sub:
640 case Instruction::Mul: {
667 case Instruction::LShr:
668 case Instruction::AShr: {
673 unsigned MaxShiftAmt = SrcWidth - DestWidth;
677 APInt(SrcWidth, MaxShiftAmt)))) {
679 bool IsExact = OldShift->isExact();
684 OldShift->getOpcode() == Instruction::AShr
685 ?
Builder.CreateAShr(
A, ShAmt, OldShift->getName(), IsExact)
686 :
Builder.CreateLShr(
A, ShAmt, OldShift->getName(), IsExact);
696 if (Instruction *NarrowOr = narrowFunnelShift(Trunc))
708 if (Shuf && Shuf->hasOneUse() &&
match(Shuf->getOperand(1),
m_Undef()) &&
712 ->getElementCount())) {
717 Value *NarrowOp = Builder.CreateTrunc(Shuf->getOperand(0), NewTruncTy);
732 assert((Opcode == Instruction::Trunc || Opcode == Instruction::FPTrunc) &&
733 "Unexpected instruction for shrinking");
736 if (!InsElt || !InsElt->hasOneUse())
741 Value *VecOp = InsElt->getOperand(0);
742 Value *ScalarOp = InsElt->getOperand(1);
743 Value *Index = InsElt->getOperand(2);
749 Value *NarrowOp = Builder.CreateCast(Opcode, ScalarOp, DestScalarTy);
761 Type *DestTy = Trunc.
getType(), *SrcTy = Src->getType();
763 unsigned SrcWidth = SrcTy->getScalarSizeInBits();
769 if ((DestTy->
isVectorTy() || shouldChangeType(SrcTy, DestTy)) &&
775 dbgs() <<
"ICE: EvaluateInDifferentType converting expression type"
788 if (DestWidth * 2 < SrcWidth) {
789 auto *NewDestTy = DestITy->getExtendedType();
790 if (shouldChangeType(SrcTy, NewDestTy) &&
793 dbgs() <<
"ICE: EvaluateInDifferentType converting expression type"
794 " to reduce the width of operand of"
807 if (DestWidth == 1) {
862 unsigned AWidth =
A->getType()->getScalarSizeInBits();
863 unsigned MaxShiftAmt = SrcWidth - std::max(DestWidth, AWidth);
865 bool IsExact = OldSh->isExact();
870 APInt(SrcWidth, MaxShiftAmt)))) {
871 auto GetNewShAmt = [&](
unsigned Width) {
872 Constant *MaxAmt = ConstantInt::get(SrcTy, Width - 1,
false);
881 if (
A->getType() == DestTy) {
882 Constant *ShAmt = GetNewShAmt(DestWidth);
884 return IsExact ? BinaryOperator::CreateExactAShr(
A, ShAmt)
885 : BinaryOperator::CreateAShr(
A, ShAmt);
889 if (Src->hasOneUse()) {
890 Constant *ShAmt = GetNewShAmt(AWidth);
907 if (Src->hasOneUse() &&
915 APInt Threshold =
APInt(
C->getType()->getScalarSizeInBits(), DestWidth);
917 Value *NewTrunc =
Builder.CreateTrunc(
A, DestTy,
A->getName() +
".tr");
933 unsigned AWidth =
A->getType()->getScalarSizeInBits();
934 if (AWidth == DestWidth && AWidth >
Log2_32(SrcWidth)) {
935 Value *WidthDiff = ConstantInt::get(
A->getType(), SrcWidth - AWidth);
938 return BinaryOperator::CreateAdd(NarrowCtlz, WidthDiff);
948 if (
Log2_32(*MaxVScale) < DestWidth)
953 if (DestWidth == 1 &&
996 return Changed ? &Trunc :
nullptr;
1016 Value *In = Cmp->getOperand(0);
1017 Value *Sh = ConstantInt::get(In->getType(),
1018 In->getType()->getScalarSizeInBits() - 1);
1019 In = Builder.CreateLShr(In, Sh, In->getName() +
".lobit");
1020 if (In->getType() != Zext.
getType())
1021 In = Builder.CreateIntCast(In, Zext.
getType(),
false );
1031 if (Op1CV->
isZero() && Cmp->isEquality()) {
1036 uint32_t ShAmt = KnownZeroMask.logBase2();
1037 bool IsExpectShAmt = KnownZeroMask.isPowerOf2() &&
1039 if (IsExpectShAmt &&
1040 (Cmp->getOperand(0)->getType() == Zext.
getType() ||
1042 Value *In = Cmp->getOperand(0);
1046 In = Builder.CreateLShr(In, ConstantInt::get(In->getType(), ShAmt),
1047 In->getName() +
".lobit");
1052 In =
Builder.CreateXor(In, ConstantInt::get(
In->getType(), 1));
1063 if (
Cmp->isEquality()) {
1072 Value *Shift =
And->getOperand(
X ==
And->getOperand(0) ? 1 : 0);
1079 Builder.CreateAnd(Lshr, ConstantInt::get(
X->getType(), 1));
1117 switch (
I->getOpcode()) {
1118 case Instruction::ZExt:
1119 case Instruction::SExt:
1120 case Instruction::Trunc:
1122 case Instruction::And:
1123 case Instruction::Or:
1124 case Instruction::Xor:
1125 case Instruction::Add:
1126 case Instruction::Sub:
1127 case Instruction::Mul:
1132 if (BitsToClear == 0 && Tmp == 0)
1137 if (Tmp == 0 &&
I->isBitwiseLogicOp()) {
1140 unsigned VSize = V->getType()->getScalarSizeInBits();
1146 if (
I->getOpcode() == Instruction::And)
1155 case Instruction::Shl: {
1162 BitsToClear = ShiftAmt < BitsToClear ? BitsToClear - ShiftAmt : 0;
1167 case Instruction::LShr: {
1174 BitsToClear += ShiftAmt;
1175 if (BitsToClear > V->getType()->getScalarSizeInBits())
1176 BitsToClear = V->getType()->getScalarSizeInBits();
1182 case Instruction::Select:
1191 case Instruction::PHI: {
1206 case Instruction::Call:
1210 if (
II->getIntrinsicID() == Intrinsic::vscale)
1231 Type *SrcTy = Src->getType(), *DestTy = Zext.
getType();
1234 if (SrcTy->isIntOrIntVectorTy(1) && Zext.
hasNonNeg())
1238 unsigned BitsToClear;
1239 if (shouldChangeType(SrcTy, DestTy) &&
1242 "Can't clear more bits than in SrcTy");
1246 dbgs() <<
"ICE: EvaluateInDifferentType converting expression type"
1247 " to avoid zero extend: "
1254 if (
SrcOp->hasOneUse())
1257 uint32_t SrcBitsKept = SrcTy->getScalarSizeInBits() - BitsToClear;
1270 return BinaryOperator::CreateAnd(Res,
C);
1281 Value *
A = CSrc->getOperand(0);
1282 unsigned SrcSize =
A->getType()->getScalarSizeInBits();
1283 unsigned MidSize = CSrc->getType()->getScalarSizeInBits();
1289 if (SrcSize < DstSize) {
1291 Constant *AndConst = ConstantInt::get(
A->getType(), AndValue);
1296 if (SrcSize == DstSize) {
1298 return BinaryOperator::CreateAnd(
A, ConstantInt::get(
A->getType(),
1301 if (SrcSize > DstSize) {
1304 return BinaryOperator::CreateAnd(Trunc,
1305 ConstantInt::get(Trunc->
getType(),
1311 return transformZExtICmp(Cmp, Zext);
1317 X->getType() == DestTy)
1318 return BinaryOperator::CreateAnd(
X,
Builder.CreateZExt(
C, DestTy));
1324 X->getType() == DestTy) {
1326 return BinaryOperator::CreateXor(
Builder.CreateAnd(
X, ZC), ZC);
1335 X->getType() == DestTy) {
1337 return BinaryOperator::CreateAnd(
X, ZextC);
1346 unsigned TypeWidth = Src->getType()->getScalarSizeInBits();
1347 if (
Log2_32(*MaxVScale) < TypeWidth)
1356 SrcTy->getScalarSizeInBits() >
1375 Value *Op0 = Cmp->getOperand(0), *Op1 = Cmp->getOperand(1);
1379 if (!Op1->getType()->isIntOrIntVectorTy())
1386 Value *In = Builder.CreateAShr(Op0, Sh, Op0->
getName() +
".lobit");
1387 if (In->getType() != Sext.
getType())
1388 In = Builder.CreateIntCast(In, Sext.
getType(),
true );
1397 if (Cmp->hasOneUse() &&
1398 Cmp->isEquality() && (Op1C->isZero() || Op1C->getValue().isPowerOf2())){
1402 if (KnownZeroMask.isPowerOf2()) {
1403 Value *In = Cmp->getOperand(0);
1406 if (!Op1C->isZero() && Op1C->getValue() != KnownZeroMask) {
1416 unsigned ShiftAmt = KnownZeroMask.countr_zero();
1420 ConstantInt::get(
In->getType(), ShiftAmt));
1430 unsigned ShiftAmt = KnownZeroMask.countl_zero();
1434 ConstantInt::get(
In->getType(), ShiftAmt));
1437 In =
Builder.CreateAShr(In, ConstantInt::get(
In->getType(),
1438 KnownZeroMask.getBitWidth() - 1),
"sext");
1459 assert(V->getType()->getScalarSizeInBits() < Ty->getScalarSizeInBits() &&
1460 "Can't sign extend type to a smaller type");
1467 switch (
I->getOpcode()) {
1468 case Instruction::SExt:
1469 case Instruction::ZExt:
1470 case Instruction::Trunc:
1472 case Instruction::And:
1473 case Instruction::Or:
1474 case Instruction::Xor:
1475 case Instruction::Add:
1476 case Instruction::Sub:
1477 case Instruction::Mul:
1485 case Instruction::Select:
1489 case Instruction::PHI: {
1516 Type *SrcTy = Src->getType(), *DestTy = Sext.
getType();
1523 CI->setNonNeg(
true);
1531 dbgs() <<
"ICE: EvaluateInDifferentType converting expression type"
1532 " to avoid sign extend: "
1543 Value *ShAmt = ConstantInt::get(DestTy, DestBitSize-SrcBitSize);
1544 return BinaryOperator::CreateAShr(
Builder.CreateShl(Res, ShAmt,
"sext"),
1552 unsigned XBitSize =
X->getType()->getScalarSizeInBits();
1557 if (Src->hasOneUse() &&
X->getType() == DestTy) {
1559 Constant *ShAmt = ConstantInt::get(DestTy, DestBitSize - SrcBitSize);
1560 return BinaryOperator::CreateAShr(
Builder.CreateShl(
X, ShAmt), ShAmt);
1568 if (Src->hasOneUse() &&
1577 return transformSExtICmp(Cmp, Sext);
1594 Constant *BA =
nullptr, *CA =
nullptr;
1600 assert(WideCurrShAmt &&
"Constant folding of ImmConstant cannot fail");
1609 return BinaryOperator::CreateAShr(
A, NewShAmt);
1617 Type *XTy =
X->getType();
1619 Constant *ShlAmtC = ConstantInt::get(XTy, XBitSize - SrcBitSize);
1620 Constant *AshrAmtC = ConstantInt::get(XTy, XBitSize - 1);
1622 return BinaryOperator::CreateAShr(
Builder.CreateShl(
X, ShlAmtC),
1636 if (
Log2_32(*MaxVScale) < (SrcBitSize - 1))
1653 bool PreferBFloat) {
1674 if (Ty->getScalarType()->isPPC_FP128Ty())
1694 Type *MinType =
nullptr;
1696 unsigned NumElts = CVVTy->getNumElements();
1700 for (
unsigned i = 0; i != NumElts; ++i) {
1725 return FPExt->getOperand(0)->getType();
1746 return V->getType();
1753 assert((Opcode == CastInst::SIToFP || Opcode == CastInst::UIToFP) &&
1755 Value *Src =
I.getOperand(0);
1756 Type *SrcTy = Src->getType();
1757 Type *FPTy =
I.getType();
1758 bool IsSigned = Opcode == Instruction::SIToFP;
1759 int SrcSize = (int)SrcTy->getScalarSizeInBits() - IsSigned;
1764 if (SrcSize <= DestNumSigBits)
1773 int SrcNumSigBits =
F->getType()->getFPMantissaWidth();
1780 if (SrcNumSigBits > 0 && DestNumSigBits > 0 &&
1781 SrcNumSigBits <= DestNumSigBits)
1789 int SigBits = (int)SrcTy->getScalarSizeInBits() -
1792 if (SigBits <= DestNumSigBits)
1811 if (BO && BO->hasOneUse()) {
1812 bool PreferBFloat = Ty->getScalarType()->isBFloatTy();
1815 unsigned OpWidth = BO->getType()->getFPMantissaWidth();
1818 unsigned SrcWidth = std::max(LHSWidth, RHSWidth);
1819 unsigned DstWidth = Ty->getFPMantissaWidth();
1820 switch (BO->getOpcode()) {
1822 case Instruction::FAdd:
1823 case Instruction::FSub:
1842 if (OpWidth >= 2*DstWidth+1 && DstWidth >= SrcWidth) {
1843 Value *LHS =
Builder.CreateFPTrunc(BO->getOperand(0), Ty);
1844 Value *RHS =
Builder.CreateFPTrunc(BO->getOperand(1), Ty);
1850 case Instruction::FMul:
1856 if (OpWidth >= LHSWidth + RHSWidth && DstWidth >= SrcWidth) {
1857 Value *LHS =
Builder.CreateFPTrunc(BO->getOperand(0), Ty);
1858 Value *RHS =
Builder.CreateFPTrunc(BO->getOperand(1), Ty);
1862 case Instruction::FDiv:
1869 if (OpWidth >= 2*DstWidth && DstWidth >= SrcWidth) {
1870 Value *LHS =
Builder.CreateFPTrunc(BO->getOperand(0), Ty);
1871 Value *RHS =
Builder.CreateFPTrunc(BO->getOperand(1), Ty);
1875 case Instruction::FRem: {
1880 if (SrcWidth == OpWidth)
1883 if (LHSWidth == SrcWidth) {
1884 LHS =
Builder.CreateFPTrunc(BO->getOperand(0), LHSMinType);
1885 RHS =
Builder.CreateFPTrunc(BO->getOperand(1), LHSMinType);
1887 LHS =
Builder.CreateFPTrunc(BO->getOperand(0), RHSMinType);
1888 RHS =
Builder.CreateFPTrunc(BO->getOperand(1), RHSMinType);
1891 Value *ExactResult =
Builder.CreateFRemFMF(LHS, RHS, BO);
1900 if (
Op &&
Op->hasOneUse()) {
1903 FMF &= FPMO->getFastMathFlags();
1915 X->getType() == Ty) {
1919 Builder.CreateSelectFMF(
Cond,
X, NarrowY, FMF,
"narrow.sel",
Op);
1923 X->getType() == Ty) {
1927 Builder.CreateSelectFMF(
Cond, NarrowY,
X, FMF,
"narrow.sel",
Op);
1933 switch (
II->getIntrinsicID()) {
1935 case Intrinsic::ceil:
1936 case Intrinsic::fabs:
1937 case Intrinsic::floor:
1938 case Intrinsic::nearbyint:
1939 case Intrinsic::rint:
1940 case Intrinsic::round:
1941 case Intrinsic::roundeven:
1942 case Intrinsic::trunc: {
1943 Value *Src =
II->getArgOperand(0);
1944 if (!Src->hasOneUse())
1950 if (
II->getIntrinsicID() != Intrinsic::fabs) {
1952 if (!FPExtSrc || FPExtSrc->
getSrcTy() != Ty)
1962 II->getOperandBundlesAsDefs(OpBundles);
2009 Value *
X = OpI->getOperand(0);
2010 Type *XType =
X->getType();
2027 if (OutputSize > OpI->getType()->getFPMantissaWidth())
2033 if (IsInputSigned && IsOutputSigned)
2040 assert(XType == DestType &&
"Unexpected types for int to FP to int casts");
2092 UI->setNonNeg(
true);
2104 DL.getPointerSizeInBits(AS)) {
2116 auto UsesPointerAsInt = [](
User *U) {
2127 Base->getType()->getPointerAddressSpace() &&
2144 if (!
GEP || !
GEP->hasOneUse())
2147 Ptr =
GEP->getPointerOperand();
2151 if (GEPs.
empty() || PtrTy !=
Ptr->getType())
2156 Type *IdxTy =
DL.getIndexType(PtrTy);
2158 Res->
getType() == IntTy && IntTy == IdxTy) {
2171 return Builder.CreateZExtOrTrunc(Res, IntTy);
2182 unsigned TySize = Ty->getScalarSizeInBits();
2183 unsigned PtrSize =
DL.getPointerSizeInBits(AS);
2184 if (TySize != PtrSize) {
2197 Mask->getType() == Ty)
2198 return BinaryOperator::CreateAnd(
Builder.CreatePtrToInt(
Ptr, Ty), Mask);
2203 Value *Vec, *Scalar, *Index;
2210 Value *NewCast =
Builder.CreatePtrToInt(Scalar, Ty->getScalarType());
2246 if (SrcTy->getElementType() != DestTy->getElementType()) {
2251 if (SrcTy->getElementType()->getPrimitiveSizeInBits() !=
2252 DestTy->getElementType()->getPrimitiveSizeInBits())
2265 assert(SrcElts != DestElts &&
"Element counts should be different.");
2274 if (SrcElts > DestElts) {
2283 ShuffleMask = ShuffleMaskStorage;
2285 ShuffleMask = ShuffleMask.take_back(DestElts);
2287 ShuffleMask = ShuffleMask.take_front(DestElts);
2298 unsigned DeltaElts = DestElts - SrcElts;
2300 ShuffleMaskStorage.insert(ShuffleMaskStorage.begin(), DeltaElts, NullElt);
2302 ShuffleMaskStorage.append(DeltaElts, NullElt);
2303 ShuffleMask = ShuffleMaskStorage;
2310 return Value % Ty->getPrimitiveSizeInBits() == 0;
2314 return Value / Ty->getPrimitiveSizeInBits();
2331 "Shift should be a multiple of the element type size");
2338 if (V->getType() == VecEltTy) {
2341 if (
C->isNullValue())
2346 ElementIndex = Elements.size() - ElementIndex - 1;
2349 if (Elements[ElementIndex])
2352 Elements[ElementIndex] = V;
2371 C->getType()->getPrimitiveSizeInBits()));
2375 for (
unsigned i = 0; i != NumElts; ++i) {
2376 unsigned ShiftI = i * ElementSize;
2378 Instruction::LShr,
C, ConstantInt::get(
C->getType(), ShiftI));
2390 if (!V->hasOneUse())
return false;
2393 if (!
I)
return false;
2394 switch (
I->getOpcode()) {
2395 default:
return false;
2396 case Instruction::BitCast:
2397 if (
I->getOperand(0)->getType()->isVectorTy())
2401 case Instruction::ZExt:
2403 I->getOperand(0)->getType()->getPrimitiveSizeInBits(),
2408 case Instruction::Or:
2413 case Instruction::Shl: {
2416 if (!CI)
return false;
2453 DestVecTy->getElementType(),
2461 for (
unsigned i = 0, e = Elements.size(); i != e; ++i) {
2462 if (!Elements[i])
continue;
2477 Value *VecOp, *Index;
2495 if (DestType->
isVectorTy() && FixedVType && FixedVType->getNumElements() == 1)
2522 if (
X->getType()->isFPOrFPVectorTy() &&
2523 Y->getType()->isIntOrIntVectorTy()) {
2525 Builder.CreateBitCast(BO->
getOperand(0),
Y->getType());
2529 if (
X->getType()->isIntOrIntVectorTy() &&
2530 Y->getType()->isFPOrFPVectorTy()) {
2532 Builder.CreateBitCast(BO->
getOperand(1),
X->getType());
2566 Value *CastedC = Builder.CreateBitCast(
C, DestTy);
2586 CondVTy->getElementCount() !=
2602 Value *CastedVal = Builder.CreateBitCast(FVal, DestTy);
2609 Value *CastedVal = Builder.CreateBitCast(TVal, DestTy);
2640 Type *SrcTy = Src->getType();
2644 SmallSetVector<PHINode *, 4> OldPhiNodes;
2652 while (!PhiWorklist.
empty()) {
2654 for (
Value *IncValue : OldPN->incoming_values()) {
2663 Value *Addr = LI->getOperand(0);
2672 if (LI->hasOneUse() && LI->isSimple())
2680 if (OldPhiNodes.
insert(PNode))
2691 Type *TyA = BCI->getOperand(0)->getType();
2692 Type *TyB = BCI->getType();
2693 if (TyA != DestTy || TyB != SrcTy)
2700 for (
auto *OldPN : OldPhiNodes) {
2701 for (User *V : OldPN->users()) {
2703 if (!
SI->isSimple() ||
SI->getOperand(0) != OldPN)
2707 Type *TyB = BCI->getOperand(0)->getType();
2708 Type *TyA = BCI->getType();
2709 if (TyA != DestTy || TyB != SrcTy)
2715 if (!OldPhiNodes.contains(
PHI))
2724 SmallDenseMap<PHINode *, PHINode *> NewPNodes;
2725 for (
auto *OldPN : OldPhiNodes) {
2726 Builder.SetInsertPoint(OldPN);
2727 PHINode *NewPN =
Builder.CreatePHI(DestTy, OldPN->getNumOperands());
2728 NewPNodes[OldPN] = NewPN;
2732 for (
auto *OldPN : OldPhiNodes) {
2733 PHINode *NewPN = NewPNodes[OldPN];
2734 for (
unsigned j = 0, e = OldPN->getNumOperands(); j != e; ++j) {
2735 Value *
V = OldPN->getOperand(j);
2736 Value *NewV =
nullptr;
2749 NewV = BCI->getOperand(0);
2751 NewV = NewPNodes[PrevPN];
2754 NewPN->
addIncoming(NewV, OldPN->getIncomingBlock(j));
2768 for (
auto *OldPN : OldPhiNodes) {
2769 PHINode *NewPN = NewPNodes[OldPN];
2772 assert(
SI->isSimple() &&
SI->getOperand(0) == OldPN);
2776 SI->setOperand(0, NewBC);
2781 Type *TyB = BCI->getOperand(0)->getType();
2782 Type *TyA = BCI->getType();
2783 assert(TyA == DestTy && TyB == SrcTy);
2814 if (
X->getType() != FTy)
2819 return Builder.CreateCopySign(Builder.CreateBitCast(
Y, FTy),
X);
2826 Type *SrcTy = Src->getType();
2831 if (DestTy == Src->getType())
2857 if (SrcVTy->getNumElements() == 1) {
2862 Builder.CreateExtractElement(Src,
2871 return new BitCastInst(InsElt->getOperand(1), DestTy);
2881 Y->getType()->isIntegerTy() && isDesirableIntType(
BitWidth)) {
2883 if (
DL.isBigEndian())
2884 IndexC = SrcVTy->getNumElements() - 1 - IndexC;
2890 unsigned EltWidth =
Y->getType()->getScalarSizeInBits();
2894 return BinaryOperator::CreateOr(AndX, ZextY);
2902 Value *ShufOp0 = Shuf->getOperand(0);
2903 Value *ShufOp1 = Shuf->getOperand(1);
2906 if (Shuf->hasOneUse() && DestTy->
isVectorTy() &&
2908 ShufElts == SrcVecElts) {
2929 if (DestTy->
isIntegerTy() && ShufElts.getKnownMinValue() % 2 == 0 &&
2930 Shuf->hasOneUse() && Shuf->isReverse()) {
2931 unsigned IntrinsicNum = 0;
2933 SrcTy->getScalarSizeInBits() == 8) {
2934 IntrinsicNum = Intrinsic::bswap;
2935 }
else if (SrcTy->getScalarSizeInBits() == 1) {
2936 IntrinsicNum = Intrinsic::bitreverse;
2938 if (IntrinsicNum != 0) {
2939 assert(ShufOp0->
getType() == SrcTy &&
"Unexpected shuffle mask");
2943 Value *ScalarX =
Builder.CreateBitCast(ShufOp0, DestTy);
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static std::optional< bool > isBigEndian(const SmallDenseMap< int64_t, int64_t, 8 > &MemOffset2Idx, int64_t LowestIdx)
Given a map from byte offsets in memory to indices in a load/store, determine if that map corresponds...
static bool isSigned(unsigned int Opcode)
static bool collectInsertionElements(Value *V, unsigned Shift, SmallVectorImpl< Value * > &Elements, Type *VecEltTy, bool isBigEndian)
V is a value which is inserted into a vector of VecEltTy.
static bool canEvaluateSExtd(Value *V, Type *Ty)
Return true if we can take the specified value and return it as type Ty without inserting any new cas...
static bool hasStoreUsersOnly(CastInst &CI)
Check if all users of CI are StoreInsts.
static Value * foldCopySignIdioms(BitCastInst &CI, InstCombiner::BuilderTy &Builder, const SimplifyQuery &SQ)
Fold (bitcast (or (and (bitcast X to int), signmask), nneg Y) to fp) to copysign((bitcast Y to fp),...
static Type * shrinkFPConstantVector(Value *V, bool PreferBFloat)
static bool canEvaluateZExtd(Value *V, Type *Ty, unsigned &BitsToClear, InstCombinerImpl &IC, Instruction *CxtI)
Determine if the specified value can be computed in the specified wider type and produce the same low...
static Instruction * canonicalizeBitCastExtElt(BitCastInst &BitCast, InstCombinerImpl &IC)
Canonicalize scalar bitcasts of extracted elements into a bitcast of the vector followed by extract e...
static Instruction * shrinkSplatShuffle(TruncInst &Trunc, InstCombiner::BuilderTy &Builder)
Try to narrow the width of a splat shuffle.
static Instruction * foldFPtoI(Instruction &FI, InstCombiner &IC)
static Instruction * foldBitCastSelect(BitCastInst &BitCast, InstCombiner::BuilderTy &Builder)
Change the type of a select if we can eliminate a bitcast.
static Instruction * foldBitCastBitwiseLogic(BitCastInst &BitCast, InstCombiner::BuilderTy &Builder)
Change the type of a bitwise logic operation if we can eliminate a bitcast.
static bool fitsInFPType(APFloat F, const fltSemantics &Sem)
Return a Constant* for the specified floating-point constant if it fits in the specified FP type with...
static Instruction * optimizeVectorResizeWithIntegerBitCasts(Value *InVal, VectorType *DestTy, InstCombinerImpl &IC)
This input value (which is known to have vector type) is being zero extended or truncated to the spec...
static Instruction * shrinkInsertElt(CastInst &Trunc, InstCombiner::BuilderTy &Builder)
Try to narrow the width of an insert element.
static Type * getMinimumFPType(Value *V, bool PreferBFloat)
Find the minimum FP type we can safely truncate to.
static bool isMultipleOfTypeSize(unsigned Value, Type *Ty)
static Value * optimizeIntegerToVectorInsertions(BitCastInst &CI, InstCombinerImpl &IC)
If the input is an 'or' instruction, we may be doing shifts and ors to assemble the elements of the v...
static bool canAlwaysEvaluateInType(Value *V, Type *Ty)
Constants and extensions/truncates from the destination type are always free to be evaluated in that ...
static Type * shrinkFPConstant(LLVMContext &Ctx, const APFloat &F, bool PreferBFloat)
static Instruction * foldVecExtTruncToExtElt(TruncInst &Trunc, InstCombinerImpl &IC)
Whenever an element is extracted from a vector, optionally shifted down, and then truncated,...
static bool canNotEvaluateInType(Value *V, Type *Ty)
Filter out values that we can not evaluate in the destination type for free.
static bool isKnownExactCastIntToFP(CastInst &I, InstCombinerImpl &IC)
Return true if the cast from integer to FP can be proven to be exact for all possible inputs (the con...
static unsigned getTypeSizeIndex(unsigned Value, Type *Ty)
static Instruction * foldVecTruncToExtElt(TruncInst &Trunc, InstCombinerImpl &IC)
Given a vector that is bitcast to an integer, optionally logically right-shifted, and truncated,...
static bool canEvaluateTruncated(Value *V, Type *Ty, InstCombinerImpl &IC, Instruction *CxtI)
Return true if we can evaluate the specified expression tree as type Ty instead of its larger type,...
This file provides internal interfaces used to implement the InstCombine.
This file provides the interface for the instcombine pass implementation.
uint64_t IntrinsicInst * II
const SmallVectorImpl< MachineOperand > & Cond
This file implements a set that has insertion order iteration characteristics.
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static SymbolRef::Type getType(const Symbol *Sym)
static const fltSemantics & IEEEsingle()
static const fltSemantics & BFloat()
static constexpr roundingMode rmNearestTiesToEven
static const fltSemantics & IEEEdouble()
static const fltSemantics & IEEEhalf()
static LLVM_ABI unsigned int semanticsIntSizeInBits(const fltSemantics &, bool)
Class for arbitrary precision integers.
LLVM_ABI APInt udiv(const APInt &RHS) const
Unsigned division operation.
uint64_t getZExtValue() const
Get zero extended value.
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
LLVM_ABI APInt urem(const APInt &RHS) const
Unsigned remainder operation.
bool ult(const APInt &RHS) const
Unsigned less than comparison.
int32_t exactLogBase2() const
unsigned countr_zero() const
Count the number of trailing zero bits.
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)
Constructs an APInt value that has the top hiBitsSet bits set.
static APInt getBitsSetFrom(unsigned numBits, unsigned loBit)
Constructs an APInt value that has a contiguous range of bits set.
unsigned countr_one() const
Count the number of trailing one bits.
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
This class represents a conversion between pointers from one address space to another.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Functions, function parameters, and return types can have attributes to indicate how they should be t...
LLVM_ABI std::optional< unsigned > getVScaleRangeMax() const
Returns the maximum value for the vscale_range attribute or std::nullopt when unknown.
BinaryOps getOpcode() const
static LLVM_ABI BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name=Twine(), InsertPosition InsertBefore=nullptr)
Construct a binary instruction, given the opcode and the two operands.
static BinaryOperator * CreateFMulFMF(Value *V1, Value *V2, FastMathFlags FMF, const Twine &Name="")
static BinaryOperator * CreateFDivFMF(Value *V1, Value *V2, FastMathFlags FMF, const Twine &Name="")
This class represents a no-op cast from one type to another.
This class represents a function call, abstracting a target machine's calling convention.
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
This is the base class for all instructions that perform data casts.
Type * getSrcTy() const
Return the source type, as a convenience.
Instruction::CastOps getOpcode() const
Return the opcode of this CastInst.
static LLVM_ABI unsigned isEliminableCastPair(Instruction::CastOps firstOpcode, Instruction::CastOps secondOpcode, Type *SrcTy, Type *MidTy, Type *DstTy, const DataLayout *DL)
Determine how a pair of casts can be eliminated, if they can be at all.
static LLVM_ABI CastInst * CreateIntegerCast(Value *S, Type *Ty, bool isSigned, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a ZExt, BitCast, or Trunc for int -> int casts.
static LLVM_ABI CastInst * CreateFPCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create an FPExt, BitCast, or FPTrunc for fp -> fp casts.
static LLVM_ABI CastInst * CreateTruncOrBitCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a Trunc or BitCast cast instruction.
static LLVM_ABI CastInst * CreateBitOrPointerCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a BitCast, a PtrToInt, or an IntToPTr cast instruction.
static LLVM_ABI CastInst * Create(Instruction::CastOps, Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Provides a way to construct any of the CastInst subclasses using an opcode instead of the subclass's ...
Type * getDestTy() const
Return the destination type, as a convenience.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ ICMP_SLT
signed less than
@ ICMP_UGE
unsigned greater or equal
@ ICMP_ULT
unsigned less than
@ ICMP_ULE
unsigned less or equal
static LLVM_ABI Constant * getSub(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
static LLVM_ABI Constant * getBitCast(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static LLVM_ABI Constant * getTrunc(Constant *C, Type *Ty, bool OnlyIfReduced=false)
ConstantFP - Floating Point Values [float, double].
const APFloat & getValueAPF() const
This is the shared class of boolean and integer constants.
static LLVM_ABI ConstantInt * getTrue(LLVMContext &Context)
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
This is an important base class in LLVM.
static LLVM_ABI Constant * mergeUndefsWith(Constant *C, Constant *Other)
Merges undefs of a Constant with another Constant, along with the undefs already present.
static LLVM_ABI Constant * getAllOnesValue(Type *Ty)
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
LLVM_ABI bool isElementWiseEqual(Value *Y) const
Return true if this constant and a constant 'Y' are element-wise equal.
This class represents an extension of floating point types.
This class represents a cast from floating point to signed integer.
This class represents a cast from floating point to unsigned integer.
This class represents a truncation of floating point types.
Convenience struct for specifying and reasoning about fast-math flags.
Class to represent fixed width SIMD vectors.
static LLVM_ABI FixedVectorType * get(Type *ElementType, unsigned NumElts)
FunctionType * getFunctionType() const
Returns the FunctionType for me.
Attribute getFnAttribute(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind.
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
static GetElementPtrInst * Create(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
This instruction compares its operands according to the predicate given to the constructor.
Value * CreateInsertElement(Type *VecTy, Value *NewElt, Value *Idx, const Twine &Name="")
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
Value * CreateBitCast(Value *V, Type *DestTy, const Twine &Name="")
static InsertElementInst * Create(Value *Vec, Value *NewElt, Value *Idx, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Instruction * visitZExt(ZExtInst &Zext)
Instruction * visitAddrSpaceCast(AddrSpaceCastInst &CI)
Instruction * visitSExt(SExtInst &Sext)
Instruction * foldOpIntoPhi(Instruction &I, PHINode *PN, bool AllowMultipleUses=false)
Given a binary operator, cast instruction, or select which has a PHI node as operand #0,...
Instruction * visitFPToSI(FPToSIInst &FI)
Instruction * visitTrunc(TruncInst &CI)
Instruction * visitUIToFP(CastInst &CI)
Instruction * visitPtrToInt(PtrToIntInst &CI)
Instruction * FoldOpIntoSelect(Instruction &Op, SelectInst *SI, bool FoldWithMultiUse=false, bool SimplifyBothArms=false)
Given an instruction with a select as one operand and a constant as the other operand,...
Instruction * visitSIToFP(CastInst &CI)
Value * foldPtrToIntOfGEP(Type *IntTy, Value *Ptr)
Instruction * commonCastTransforms(CastInst &CI)
Implement the transforms common to all CastInst visitors.
Instruction * eraseInstFromFunction(Instruction &I) override
Combiner aware instruction erasure.
Instruction * foldItoFPtoI(CastInst &FI)
fpto{s/u}i({u/s}itofp(X)) --> X or zext(X) or sext(X) or trunc(X) This is safe if the intermediate ty...
Instruction * visitFPTrunc(FPTruncInst &CI)
Instruction * visitBitCast(BitCastInst &CI)
Instruction * visitIntToPtr(IntToPtrInst &CI)
Instruction * visitFPToUI(FPToUIInst &FI)
Instruction * visitPtrToAddr(PtrToAddrInst &CI)
Value * EvaluateInDifferentType(Value *V, Type *Ty, bool isSigned)
Given an expression that CanEvaluateTruncated or CanEvaluateSExtd returns true for,...
bool SimplifyDemandedInstructionBits(Instruction &Inst)
Tries to simplify operands to an integer instruction based on its demanded bits.
Instruction * visitFPExt(CastInst &CI)
LoadInst * combineLoadToNewType(LoadInst &LI, Type *NewTy, const Twine &Suffix="")
Helper to combine a load to a new type.
The core instruction combiner logic.
const DataLayout & getDataLayout() const
unsigned ComputeMaxSignificantBits(const Value *Op, const Instruction *CxtI=nullptr, unsigned Depth=0) const
IRBuilder< TargetFolder, IRBuilderCallbackInserter > BuilderTy
An IRBuilder that automatically inserts new instructions into the worklist.
unsigned ComputeNumSignBits(const Value *Op, const Instruction *CxtI=nullptr, unsigned Depth=0) const
Instruction * replaceInstUsesWith(Instruction &I, Value *V)
A combiner-aware RAUW-like routine.
InstructionWorklist & Worklist
A worklist of the instructions that need to be simplified.
Instruction * InsertNewInstWith(Instruction *New, BasicBlock::iterator Old)
Same as InsertNewInstBefore, but also sets the debug loc.
void computeKnownBits(const Value *V, KnownBits &Known, const Instruction *CxtI, unsigned Depth=0) const
bool MaskedValueIsZero(const Value *V, const APInt &Mask, const Instruction *CxtI=nullptr, unsigned Depth=0) const
const SimplifyQuery & getSimplifyQuery() const
LLVM_ABI void copyFastMathFlags(FastMathFlags FMF)
Convenience function for transferring all fast-math flag values to this instruction,...
static bool isBitwiseLogicOp(unsigned Opcode)
Determine if the Opcode is and/or/xor.
LLVM_ABI const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
Instruction * user_back()
Specialize the methods defined in Value, as we know that an instruction can only be used by other ins...
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
LLVM_ABI void setNonNeg(bool b=true)
Set or clear the nneg flag on this instruction, which must be a zext instruction.
LLVM_ABI bool hasNonNeg() const LLVM_READONLY
Determine whether the the nneg flag is set.
LLVM_ABI FastMathFlags getFastMathFlags() const LLVM_READONLY
Convenience function for getting all the fast-math flags, which must be an operator which supports th...
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
LLVM_ABI void setIsExact(bool b=true)
Set or clear the exact flag on this instruction, which must be an operator which supports this flag.
This class represents a cast from an integer to a pointer.
unsigned getAddressSpace() const
Returns the address space of this instruction's pointer type.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
A wrapper class for inspecting calls to intrinsic functions.
This is an important class for using LLVM in a threaded context.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
op_range incoming_values()
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
This class represents a cast from a pointer to an address (non-capturing ptrtoint).
This class represents a cast from a pointer to an integer.
Value * getPointerOperand()
Gets the pointer operand.
unsigned getPointerAddressSpace() const
Returns the address space of the pointer operand.
This class represents a sign extension of integer types.
This class represents the LLVM 'select' instruction.
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", InsertPosition InsertBefore=nullptr, const Instruction *MDFrom=nullptr)
bool insert(const value_type &X)
Insert a new element into the SetVector.
This instruction constructs a fixed permutation of two input vectors.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
This class represents a truncation of integer types.
void setHasNoSignedWrap(bool B)
void setHasNoUnsignedWrap(bool B)
bool hasNoSignedWrap() const
Test whether this operation is known to never undergo signed overflow, aka the nsw property.
bool hasNoUnsignedWrap() const
Test whether this operation is known to never undergo unsigned overflow, aka the nuw property.
The instances of the Type class are immutable: once they are created, they are never changed.
bool isVectorTy() const
True if this is an instance of VectorType.
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
LLVM_ABI Type * getWithNewType(Type *EltTy) const
Given vector type, change the element type, whilst keeping the old number of elements.
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
bool isX86_AMXTy() const
Return true if this is X86 AMX.
bool isIntegerTy() const
True if this is an instance of IntegerType.
static LLVM_ABI Type * getDoubleTy(LLVMContext &C)
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
static LLVM_ABI Type * getFloatTy(LLVMContext &C)
LLVM_ABI int getFPMantissaWidth() const
Return the width of the mantissa of this type.
LLVM_ABI const fltSemantics & getFltSemantics() const
static LLVM_ABI Type * getBFloatTy(LLVMContext &C)
static LLVM_ABI Type * getHalfTy(LLVMContext &C)
'undef' values are things that do not have specified contents.
static LLVM_ABI UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
Value * getOperand(unsigned i) const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
bool hasOneUse() const
Return true if there is exactly one use of this value.
iterator_range< user_iterator > users()
LLVM_ABI LLVMContext & getContext() const
All values hold a context through their type.
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
LLVM_ABI void takeName(Value *V)
Transfer the name from V to this value.
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
static LLVM_ABI bool isValidElementType(Type *ElemTy)
Return true if the specified type is valid as a element type.
This class represents zero extension of integer types.
static constexpr bool isKnownGE(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ C
The default llvm calling convention, compatible with C.
LLVM_ABI Function * getOrInsertDeclaration(Module *M, ID id, ArrayRef< Type * > Tys={})
Look up the Function declaration of the intrinsic id in the Module M.
SpecificConstantMatch m_ZeroInt()
Convenience matchers for specific integer values.
BinaryOp_match< SpecificConstantMatch, SrcTy, TargetOpcode::G_SUB > m_Neg(const SrcTy &&Src)
Matches a register negated by a G_SUB.
OneUse_match< SubPat > m_OneUse(const SubPat &SP)
cst_pred_ty< is_lowbit_mask > m_LowBitMask()
Match an integer or vector with only the low bit(s) set.
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
PtrToIntSameSize_match< OpTy > m_PtrToIntSameSize(const DataLayout &DL, const OpTy &Op)
class_match< BinaryOperator > m_BinOp()
Match an arbitrary binary operation and ignore it.
cst_pred_ty< is_sign_mask > m_SignMask()
Match an integer or vector with only the sign bit(s) set.
BinaryOp_match< LHS, RHS, Instruction::AShr > m_AShr(const LHS &L, const RHS &R)
cst_pred_ty< is_power2 > m_Power2()
Match an integer or vector power-of-2.
class_match< Constant > m_Constant()
Match an arbitrary Constant and ignore it.
ap_match< APInt > m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
BinaryOp_match< LHS, RHS, Instruction::And, true > m_c_And(const LHS &L, const RHS &R)
Matches an And with LHS and RHS in either order.
CastInst_match< OpTy, TruncInst > m_Trunc(const OpTy &Op)
Matches Trunc.
BinaryOp_match< LHS, RHS, Instruction::Xor > m_Xor(const LHS &L, const RHS &R)
specific_intval< false > m_SpecificInt(const APInt &V)
Match a specific integer value or vector with all elements equal to the value.
bool match(Val *V, const Pattern &P)
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
BinOpPred_match< LHS, RHS, is_right_shift_op > m_Shr(const LHS &L, const RHS &R)
Matches logical shift operations.
specific_intval< true > m_SpecificIntAllowPoison(const APInt &V)
TwoOps_match< Val_t, Idx_t, Instruction::ExtractElement > m_ExtractElt(const Val_t &Val, const Idx_t &Idx)
Matches ExtractElementInst.
class_match< ConstantInt > m_ConstantInt()
Match an arbitrary ConstantInt and ignore it.
cst_pred_ty< is_one > m_One()
Match an integer 1 or a vector with all elements equal to 1.
IntrinsicID_match m_Intrinsic()
Match intrinsic calls like this: m_Intrinsic<Intrinsic::fabs>(m_Value(X))
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
IntrinsicID_match m_VScale()
Matches a call to llvm.vscale().
BinOpPred_match< LHS, RHS, is_logical_shift_op > m_LogicalShift(const LHS &L, const RHS &R)
Matches logical shift operations.
CastInst_match< OpTy, FPToUIInst > m_FPToUI(const OpTy &Op)
deferredval_ty< Value > m_Deferred(Value *const &V)
Like m_Specific(), but works if the specific value to match is determined as part of the same match()...
TwoOps_match< V1_t, V2_t, Instruction::ShuffleVector > m_Shuffle(const V1_t &v1, const V2_t &v2)
Matches ShuffleVectorInst independently of mask value.
CastInst_match< OpTy, FPExtInst > m_FPExt(const OpTy &Op)
CastInst_match< OpTy, ZExtInst > m_ZExt(const OpTy &Op)
Matches ZExt.
cst_pred_ty< is_negated_power2 > m_NegatedPower2()
Match a integer or vector negated power-of-2.
match_immconstant_ty m_ImmConstant()
Match an arbitrary immediate Constant and ignore it.
BinaryOp_match< LHS, RHS, Instruction::Add, true > m_c_Add(const LHS &L, const RHS &R)
Matches a Add with LHS and RHS in either order.
CastOperator_match< OpTy, Instruction::BitCast > m_BitCast(const OpTy &Op)
Matches BitCast.
CastInst_match< OpTy, FPToSIInst > m_FPToSI(const OpTy &Op)
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
BinaryOp_match< LHS, RHS, Instruction::LShr > m_LShr(const LHS &L, const RHS &R)
match_combine_or< CastInst_match< OpTy, ZExtInst >, CastInst_match< OpTy, SExtInst > > m_ZExtOrSExt(const OpTy &Op)
FNeg_match< OpTy > m_FNeg(const OpTy &X)
Match 'fneg X' as 'fsub -0.0, X'.
BinOpPred_match< LHS, RHS, is_shift_op > m_Shift(const LHS &L, const RHS &R)
Matches shift operations.
BinaryOp_match< LHS, RHS, Instruction::Shl > m_Shl(const LHS &L, const RHS &R)
auto m_Undef()
Match an arbitrary undef constant.
BinaryOp_match< LHS, RHS, Instruction::Or > m_Or(const LHS &L, const RHS &R)
CastInst_match< OpTy, SExtInst > m_SExt(const OpTy &Op)
Matches SExt.
BinaryOp_match< LHS, RHS, Instruction::Or, true > m_c_Or(const LHS &L, const RHS &R)
Matches an Or with LHS and RHS in either order.
CastOperator_match< OpTy, Instruction::IntToPtr > m_IntToPtr(const OpTy &Op)
Matches IntToPtr.
ThreeOps_match< Val_t, Elt_t, Idx_t, Instruction::InsertElement > m_InsertElt(const Val_t &Val, const Elt_t &Elt, const Idx_t &Idx)
Matches InsertElementInst.
ElementWiseBitCast_match< OpTy > m_ElementWiseBitCast(const OpTy &Op)
BinaryOp_match< LHS, RHS, Instruction::Sub > m_Sub(const LHS &L, const RHS &R)
match_combine_or< LTy, RTy > m_CombineOr(const LTy &L, const RTy &R)
Combine two pattern matchers matching L || R.
cst_pred_ty< icmp_pred_with_threshold > m_SpecificInt_ICMP(ICmpInst::Predicate Predicate, const APInt &Threshold)
Match an integer or vector with every element comparing 'pred' (eg/ne/...) to Threshold.
friend class Instruction
Iterator for Instructions in a `BasicBlock.
This is an optimization pass for GlobalISel generic memory operations.
FunctionAddr VTableAddr Value
LLVM_ABI KnownFPClass computeKnownFPClass(const Value *V, const APInt &DemandedElts, FPClassTest InterestedClasses, const SimplifyQuery &SQ, unsigned Depth=0)
Determine which floating-point classes are valid for V, and return them in KnownFPClass bit sets.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI Constant * ConstantFoldSelectInstruction(Constant *Cond, Constant *V1, Constant *V2)
Attempt to constant fold a select instruction with the specified operands.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
unsigned Log2_64_Ceil(uint64_t Value)
Return the ceil log base 2 of the specified value, 64 if the value is zero.
LLVM_ABI Constant * ConstantFoldCompareInstOperands(unsigned Predicate, Constant *LHS, Constant *RHS, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const Instruction *I=nullptr)
Attempt to constant fold a compare instruction (icmp/fcmp) with the specified operands.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
LLVM_ABI Value * simplifyCastInst(unsigned CastOpc, Value *Op, Type *Ty, const SimplifyQuery &Q)
Given operands for a CastInst, fold the result or return null.
auto dyn_cast_or_null(const Y &Val)
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
auto reverse(ContainerTy &&C)
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
LLVM_ABI void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
SmallVector< ValueTypeFromRangeType< R >, Size > to_vector(R &&Range)
Given a range of type R, iterate the entire range and return a SmallVector with elements of the vecto...
LLVM_ABI Constant * ConstantFoldCastOperand(unsigned Opcode, Constant *C, Type *DestTy, const DataLayout &DL)
Attempt to constant fold a cast with the specified operand.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
LLVM_ABI bool replaceAllDbgUsesWith(Instruction &From, Value &To, Instruction &DomPoint, DominatorTree &DT)
Point debug users of From to To or salvage them.
LLVM_ABI bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
@ And
Bitwise or logical AND of integers.
DWARFExpression::Operation Op
constexpr unsigned BitWidth
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
bool all_equal(std::initializer_list< T > Values)
Returns true if all Values in the initializer lists are equal or the list.
auto seq(T Begin, T End)
Iterate over an integral type from Begin up to - but not including - End.
LLVM_ABI Constant * ConstantFoldIntegerCast(Constant *C, Type *DestTy, bool IsSigned, const DataLayout &DL)
Constant fold a zext, sext or trunc, depending on IsSigned and whether the DestTy is wider or narrowe...
LLVM_ABI bool isKnownNonNegative(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Returns true if the give value is known to be non-negative.
LLVM_ABI Constant * ConstantFoldBinaryInstruction(unsigned Opcode, Constant *V1, Constant *V2)
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
unsigned countMinTrailingZeros() const
Returns the minimum number of trailing zero bits.
unsigned countMinLeadingZeros() const
Returns the minimum number of leading zero bits.
APInt getMaxValue() const
Return the maximal unsigned value possible given these KnownBits.
bool isKnownNever(FPClassTest Mask) const
Return true if it's known this can never be one of the mask entries.
SimplifyQuery getWithInstruction(const Instruction *I) const