24using namespace PatternMatch;
26#define DEBUG_TYPE "instcombine"
38 unsigned Opc =
I->getOpcode();
40 case Instruction::Add:
41 case Instruction::Sub:
42 case Instruction::Mul:
43 case Instruction::And:
45 case Instruction::Xor:
46 case Instruction::AShr:
47 case Instruction::LShr:
48 case Instruction::Shl:
49 case Instruction::UDiv:
50 case Instruction::URem: {
56 case Instruction::Trunc:
57 case Instruction::ZExt:
58 case Instruction::SExt:
62 if (
I->getOperand(0)->getType() == Ty)
63 return I->getOperand(0);
68 Opc == Instruction::SExt);
70 case Instruction::Select: {
76 case Instruction::PHI: {
87 case Instruction::FPToUI:
88 case Instruction::FPToSI:
92 case Instruction::Call:
94 switch (
II->getIntrinsicID()) {
97 case Intrinsic::vscale: {
106 case Instruction::ShuffleVector: {
107 auto *ScalarTy = cast<VectorType>(Ty)->getElementType();
108 auto *VTy = cast<VectorType>(
I->getOperand(0)->getType());
113 cast<ShuffleVectorInst>(
I)->getShuffleMask());
126InstCombinerImpl::isEliminableCastPair(
const CastInst *CI1,
141 DstTy, SrcIntPtrTy, MidIntPtrTy,
146 if ((Res == Instruction::IntToPtr && SrcTy != DstIntPtrTy) ||
147 (Res == Instruction::PtrToInt && DstTy != SrcIntPtrTy))
158 if (
auto *SrcC = dyn_cast<Constant>(Src))
163 if (
auto *CSrc = dyn_cast<CastInst>(Src)) {
169 if (CSrc->hasOneUse())
175 if (
auto *Sel = dyn_cast<SelectInst>(Src)) {
181 auto *Cmp = dyn_cast<CmpInst>(Sel->getCondition());
182 if (!Cmp || Cmp->getOperand(0)->getType() != Sel->getType() ||
188 if (CI.
getOpcode() != Instruction::BitCast ||
199 if (
auto *PN = dyn_cast<PHINode>(Src)) {
216 auto *SrcTy = dyn_cast<FixedVectorType>(
X->getType());
217 auto *DestTy = dyn_cast<FixedVectorType>(Ty);
218 if (SrcTy && DestTy &&
219 SrcTy->getNumElements() == DestTy->getNumElements() &&
232 if (isa<Constant>(V))
246 if (!isa<Instruction>(V))
274 auto *
I = cast<Instruction>(V);
275 Type *OrigTy = V->getType();
276 switch (
I->getOpcode()) {
277 case Instruction::Add:
278 case Instruction::Sub:
279 case Instruction::Mul:
280 case Instruction::And:
281 case Instruction::Or:
282 case Instruction::Xor:
287 case Instruction::UDiv:
288 case Instruction::URem: {
303 case Instruction::Shl: {
314 case Instruction::LShr: {
332 case Instruction::AShr: {
342 unsigned ShiftedBits = OrigBitWidth -
BitWidth;
349 case Instruction::Trunc:
352 case Instruction::ZExt:
353 case Instruction::SExt:
357 case Instruction::Select: {
362 case Instruction::PHI: {
372 case Instruction::FPToUI:
373 case Instruction::FPToSI: {
377 Type *InputTy =
I->getOperand(0)->getType()->getScalarType();
381 I->getOpcode() == Instruction::FPToSI);
384 case Instruction::ShuffleVector:
405 if (!TruncOp->
hasOneUse() || !isa<IntegerType>(DestType))
408 Value *VecInput =
nullptr;
413 !isa<VectorType>(VecInput->
getType()))
417 unsigned VecWidth = VecType->getPrimitiveSizeInBits();
419 unsigned ShiftAmount = ShiftVal ? ShiftVal->
getZExtValue() : 0;
421 if ((VecWidth % DestWidth != 0) || (ShiftAmount % DestWidth != 0))
426 unsigned NumVecElts = VecWidth / DestWidth;
427 if (VecType->getElementType() != DestType) {
432 unsigned Elt = ShiftAmount / DestWidth;
434 Elt = NumVecElts - 1 - Elt;
444 "Don't narrow to an illegal scalar type");
460 Value *ShVal0, *ShVal1, *ShAmt0, *ShAmt1;
467 if (Or0->
getOpcode() == BinaryOperator::LShr) {
473 Or1->
getOpcode() == BinaryOperator::LShr &&
474 "Illegal or(shift,shift) pair");
483 unsigned MaxShiftAmountWidth =
Log2_32(NarrowWidth);
484 APInt HiBitMask =
~APInt::getLowBitsSet(WideWidth, MaxShiftAmountWidth);
491 if (ShVal0 != ShVal1)
497 unsigned Mask = Width - 1;
510 Value *ShAmt = matchShiftAmount(ShAmt0, ShAmt1, NarrowWidth);
513 ShAmt = matchShiftAmount(ShAmt1, ShAmt0, NarrowWidth);
535 if (ShVal0 != ShVal1)
537 Intrinsic::ID IID = IsFshl ? Intrinsic::fshl : Intrinsic::fshr;
550 if (!isa<VectorType>(SrcTy) && !shouldChangeType(SrcTy, DestTy))
560 case Instruction::And:
561 case Instruction::Or:
562 case Instruction::Xor:
563 case Instruction::Add:
564 case Instruction::Sub:
565 case Instruction::Mul: {
592 case Instruction::LShr:
593 case Instruction::AShr: {
598 unsigned MaxShiftAmt = SrcWidth - DestWidth;
602 APInt(SrcWidth, MaxShiftAmt)))) {
603 auto *OldShift = cast<Instruction>(Trunc.
getOperand(0));
604 bool IsExact = OldShift->isExact();
609 OldShift->getOpcode() == Instruction::AShr
621 if (
Instruction *NarrowOr = narrowFunnelShift(Trunc))
632 auto *Shuf = dyn_cast<ShuffleVectorInst>(Trunc.
getOperand(0));
633 if (Shuf && Shuf->hasOneUse() &&
match(Shuf->getOperand(1),
m_Undef()) &&
635 Shuf->getType() == Shuf->getOperand(0)->getType()) {
653 assert((Opcode == Instruction::Trunc || Opcode == Instruction::FPTrunc) &&
654 "Unexpected instruction for shrinking");
656 auto *InsElt = dyn_cast<InsertElementInst>(Trunc.
getOperand(0));
657 if (!InsElt || !InsElt->hasOneUse())
662 Value *VecOp = InsElt->getOperand(0);
663 Value *ScalarOp = InsElt->getOperand(1);
682 Type *DestTy = Trunc.
getType(), *SrcTy = Src->getType();
690 if ((DestTy->
isVectorTy() || shouldChangeType(SrcTy, DestTy)) &&
696 dbgs() <<
"ICE: EvaluateInDifferentType converting expression type"
708 if (
auto *DestITy = dyn_cast<IntegerType>(DestTy)) {
709 if (DestWidth * 2 < SrcWidth) {
710 auto *NewDestTy = DestITy->getExtendedType();
711 if (shouldChangeType(SrcTy, NewDestTy) &&
714 dbgs() <<
"ICE: EvaluateInDifferentType converting expression type"
715 " to reduce the width of operand of"
728 if (
SelectInst *Sel = dyn_cast<SelectInst>(Src))
737 if (DestWidth == 1) {
786 unsigned AWidth =
A->getType()->getScalarSizeInBits();
787 unsigned MaxShiftAmt = SrcWidth - std::max(DestWidth, AWidth);
788 auto *OldSh = cast<Instruction>(Src);
789 bool IsExact = OldSh->isExact();
794 APInt(SrcWidth, MaxShiftAmt)))) {
795 auto GetNewShAmt = [&](
unsigned Width) {
796 Constant *MaxAmt = ConstantInt::get(SrcTy, Width - 1,
false);
805 if (
A->getType() == DestTy) {
806 Constant *ShAmt = GetNewShAmt(DestWidth);
808 return IsExact ? BinaryOperator::CreateExactAShr(
A, ShAmt)
809 : BinaryOperator::CreateAShr(
A, ShAmt);
813 if (Src->hasOneUse()) {
814 Constant *ShAmt = GetNewShAmt(AWidth);
831 if (Src->hasOneUse() &&
832 (isa<VectorType>(SrcTy) || shouldChangeType(SrcTy, DestTy))) {
839 APInt Threshold =
APInt(
C->getType()->getScalarSizeInBits(), DestWidth);
862 auto *VecOpTy = cast<VectorType>(VecOp->
getType());
863 auto VecElts = VecOpTy->getElementCount();
866 if (SrcWidth % DestWidth == 0) {
867 uint64_t TruncRatio = SrcWidth / DestWidth;
868 uint64_t BitCastNumElts = VecElts.getKnownMinValue() * TruncRatio;
871 : VecOpIdx * TruncRatio;
872 assert(BitCastNumElts <= std::numeric_limits<uint32_t>::max() &&
885 unsigned AWidth =
A->getType()->getScalarSizeInBits();
886 if (AWidth == DestWidth && AWidth >
Log2_32(SrcWidth)) {
887 Value *WidthDiff = ConstantInt::get(
A->getType(), SrcWidth - AWidth);
890 return BinaryOperator::CreateAdd(NarrowCtlz, WidthDiff);
900 if (
Log2_32(*MaxVScale) < DestWidth) {
908 bool Changed =
false;
921 return Changed ? &Trunc :
nullptr;
941 Value *In = Cmp->getOperand(0);
942 Value *Sh = ConstantInt::get(In->getType(),
943 In->getType()->getScalarSizeInBits() - 1);
945 if (In->getType() != Zext.
getType())
956 if (Op1CV->
isZero() && Cmp->isEquality()) {
961 uint32_t ShAmt = KnownZeroMask.logBase2();
962 bool IsExpectShAmt = KnownZeroMask.isPowerOf2() &&
965 (Cmp->getOperand(0)->getType() == Zext.
getType() ||
967 Value *In = Cmp->getOperand(0);
972 In->getName() +
".lobit");
988 if (
Cmp->isEquality() && Zext.
getType() ==
Cmp->getOperand(0)->getType()) {
1033 auto *
I = cast<Instruction>(V);
1035 switch (
I->getOpcode()) {
1036 case Instruction::ZExt:
1037 case Instruction::SExt:
1038 case Instruction::Trunc:
1040 case Instruction::And:
1041 case Instruction::Or:
1042 case Instruction::Xor:
1043 case Instruction::Add:
1044 case Instruction::Sub:
1045 case Instruction::Mul:
1050 if (BitsToClear == 0 && Tmp == 0)
1055 if (Tmp == 0 &&
I->isBitwiseLogicOp()) {
1058 unsigned VSize = V->getType()->getScalarSizeInBits();
1064 if (
I->getOpcode() == Instruction::And)
1073 case Instruction::Shl: {
1081 BitsToClear = ShiftAmt < BitsToClear ? BitsToClear - ShiftAmt : 0;
1086 case Instruction::LShr: {
1094 if (BitsToClear > V->getType()->getScalarSizeInBits())
1095 BitsToClear = V->getType()->getScalarSizeInBits();
1101 case Instruction::Select:
1110 case Instruction::PHI: {
1125 case Instruction::Call:
1129 if (
II->getIntrinsicID() == Intrinsic::vscale)
1150 Type *SrcTy = Src->getType(), *DestTy = Zext.
getType();
1157 unsigned BitsToClear;
1158 if (shouldChangeType(SrcTy, DestTy) &&
1161 "Can't clear more bits than in SrcTy");
1165 dbgs() <<
"ICE: EvaluateInDifferentType converting expression type"
1166 " to avoid zero extend: "
1172 if (
auto *
SrcOp = dyn_cast<Instruction>(Src))
1173 if (
SrcOp->hasOneUse())
1183 DestBitSize - SrcBitsKept),
1190 return BinaryOperator::CreateAnd(Res,
C);
1196 if (
auto *CSrc = dyn_cast<TruncInst>(Src)) {
1201 Value *
A = CSrc->getOperand(0);
1202 unsigned SrcSize =
A->getType()->getScalarSizeInBits();
1203 unsigned MidSize = CSrc->getType()->getScalarSizeInBits();
1209 if (SrcSize < DstSize) {
1211 Constant *AndConst = ConstantInt::get(
A->getType(), AndValue);
1216 if (SrcSize == DstSize) {
1218 return BinaryOperator::CreateAnd(
A, ConstantInt::get(
A->getType(),
1221 if (SrcSize > DstSize) {
1224 return BinaryOperator::CreateAnd(Trunc,
1225 ConstantInt::get(Trunc->
getType(),
1230 if (
auto *Cmp = dyn_cast<ICmpInst>(Src))
1231 return transformZExtICmp(Cmp, Zext);
1237 X->getType() == DestTy)
1244 X->getType() == DestTy) {
1255 X->getType() == DestTy) {
1257 return BinaryOperator::CreateAnd(
X, ZextC);
1266 unsigned TypeWidth = Src->getType()->getScalarSizeInBits();
1267 if (
Log2_32(*MaxVScale) < TypeWidth) {
1297 Value *Op0 = Cmp->getOperand(0), *Op1 = Cmp->getOperand(1);
1301 if (!Op1->getType()->isIntOrIntVectorTy())
1309 if (In->getType() != Sext.
getType())
1315 if (
ConstantInt *Op1C = dyn_cast<ConstantInt>(Op1)) {
1319 if (Cmp->hasOneUse() &&
1320 Cmp->isEquality() && (Op1C->isZero() || Op1C->getValue().isPowerOf2())){
1324 if (KnownZeroMask.isPowerOf2()) {
1325 Value *In = Cmp->getOperand(0);
1328 if (!Op1C->isZero() && Op1C->getValue() != KnownZeroMask) {
1338 unsigned ShiftAmt = KnownZeroMask.countr_zero();
1342 ConstantInt::get(
In->getType(), ShiftAmt));
1352 unsigned ShiftAmt = KnownZeroMask.countl_zero();
1356 ConstantInt::get(
In->getType(), ShiftAmt));
1360 KnownZeroMask.getBitWidth() - 1),
"sext");
1382 "Can't sign extend type to a smaller type");
1388 auto *
I = cast<Instruction>(V);
1389 switch (
I->getOpcode()) {
1390 case Instruction::SExt:
1391 case Instruction::ZExt:
1392 case Instruction::Trunc:
1394 case Instruction::And:
1395 case Instruction::Or:
1396 case Instruction::Xor:
1397 case Instruction::Add:
1398 case Instruction::Sub:
1399 case Instruction::Mul:
1407 case Instruction::Select:
1411 case Instruction::PHI: {
1438 Type *SrcTy = Src->getType(), *DestTy = Sext.
getType();
1445 CI->setNonNeg(
true);
1453 dbgs() <<
"ICE: EvaluateInDifferentType converting expression type"
1454 " to avoid sign extend: "
1465 Value *ShAmt = ConstantInt::get(DestTy, DestBitSize-SrcBitSize);
1474 unsigned XBitSize =
X->getType()->getScalarSizeInBits();
1479 if (Src->hasOneUse() &&
X->getType() == DestTy) {
1481 Constant *ShAmt = ConstantInt::get(DestTy, DestBitSize - SrcBitSize);
1490 if (Src->hasOneUse() &&
1498 if (
auto *Cmp = dyn_cast<ICmpInst>(Src))
1499 return transformSExtICmp(Cmp, Sext);
1516 Constant *BA =
nullptr, *CA =
nullptr;
1522 assert(WideCurrShAmt &&
"Constant folding of ImmConstant cannot fail");
1531 return BinaryOperator::CreateAShr(
A, NewShAmt);
1539 Type *XTy =
X->getType();
1541 Constant *ShlAmtC = ConstantInt::get(XTy, XBitSize - SrcBitSize);
1542 Constant *AshrAmtC = ConstantInt::get(XTy, XBitSize - 1);
1546 if (cast<BinaryOperator>(Src)->getOperand(0)->hasOneUse()) {
1558 if (
Log2_32(*MaxVScale) < (SrcBitSize - 1)) {
1601 auto *CV = dyn_cast<Constant>(V);
1602 auto *CVVTy = dyn_cast<FixedVectorType>(V->getType());
1606 Type *MinType =
nullptr;
1608 unsigned NumElts = CVVTy->getNumElements();
1612 for (
unsigned i = 0; i != NumElts; ++i) {
1613 if (isa<UndefValue>(CV->getAggregateElement(i)))
1616 auto *CFP = dyn_cast_or_null<ConstantFP>(CV->getAggregateElement(i));
1636 if (
auto *FPExt = dyn_cast<FPExtInst>(V))
1637 return FPExt->getOperand(0)->getType();
1642 if (
auto *CFP = dyn_cast<ConstantFP>(V))
1649 if (
auto *FPCExt = dyn_cast<ConstantExpr>(V))
1650 if (FPCExt->getOpcode() == Instruction::FPExt)
1651 return FPCExt->getOperand(0)->getType();
1658 return V->getType();
1665 assert((Opcode == CastInst::SIToFP || Opcode == CastInst::UIToFP) &&
1667 Value *Src =
I.getOperand(0);
1668 Type *SrcTy = Src->getType();
1669 Type *FPTy =
I.getType();
1670 bool IsSigned = Opcode == Instruction::SIToFP;
1676 if (SrcSize <= DestNumSigBits)
1685 int SrcNumSigBits =
F->getType()->getFPMantissaWidth();
1692 if (SrcNumSigBits > 0 && DestNumSigBits > 0 &&
1693 SrcNumSigBits <= DestNumSigBits)
1704 if (SigBits <= DestNumSigBits)
1722 auto *BO = dyn_cast<BinaryOperator>(FPT.
getOperand(0));
1723 if (BO && BO->hasOneUse()) {
1728 unsigned OpWidth = BO->getType()->getFPMantissaWidth();
1731 unsigned SrcWidth = std::max(LHSWidth, RHSWidth);
1733 switch (BO->getOpcode()) {
1735 case Instruction::FAdd:
1736 case Instruction::FSub:
1755 if (OpWidth >= 2*DstWidth+1 && DstWidth >= SrcWidth) {
1763 case Instruction::FMul:
1769 if (OpWidth >= LHSWidth + RHSWidth && DstWidth >= SrcWidth) {
1775 case Instruction::FDiv:
1782 if (OpWidth >= 2*DstWidth && DstWidth >= SrcWidth) {
1788 case Instruction::FRem: {
1793 if (SrcWidth == OpWidth)
1796 if (LHSWidth == SrcWidth) {
1813 if (
Op &&
Op->hasOneUse()) {
1816 if (isa<FPMathOperator>(
Op))
1829 X->getType() == Ty) {
1836 X->getType() == Ty) {
1844 if (
auto *
II = dyn_cast<IntrinsicInst>(FPT.
getOperand(0))) {
1845 switch (
II->getIntrinsicID()) {
1847 case Intrinsic::ceil:
1848 case Intrinsic::fabs:
1849 case Intrinsic::floor:
1850 case Intrinsic::nearbyint:
1851 case Intrinsic::rint:
1852 case Intrinsic::round:
1853 case Intrinsic::roundeven:
1854 case Intrinsic::trunc: {
1855 Value *Src =
II->getArgOperand(0);
1856 if (!Src->hasOneUse())
1862 if (
II->getIntrinsicID() != Intrinsic::fabs) {
1863 FPExtInst *FPExtSrc = dyn_cast<FPExtInst>(Src);
1864 if (!FPExtSrc || FPExtSrc->
getSrcTy() != Ty)
1872 II->getIntrinsicID(), Ty);
1874 II->getOperandBundlesAsDefs(OpBundles);
1887 if (isa<SIToFPInst>(Src) || isa<UIToFPInst>(Src)) {
1888 auto *FPCast = cast<CastInst>(Src);
1901 if (isa<SIToFPInst>(Src) || isa<UIToFPInst>(Src)) {
1902 auto *FPCast = cast<CastInst>(Src);
1918 auto *OpI = cast<CastInst>(FI.
getOperand(0));
1919 Value *
X = OpI->getOperand(0);
1920 Type *XType =
X->getType();
1922 bool IsOutputSigned = isa<FPToSIInst>(FI);
1937 if (OutputSize > OpI->getType()->getFPMantissaWidth())
1942 bool IsInputSigned = isa<SIToFPInst>(OpI);
1943 if (IsInputSigned && IsOutputSigned)
1950 assert(XType == DestType &&
"Unexpected types for int to FP to int casts");
2003 UI->setNonNeg(
true);
2038 if (TySize != PtrSize) {
2051 Mask->getType() == Ty)
2054 if (
auto *
GEP = dyn_cast<GEPOperator>(
SrcOp)) {
2059 if (
GEP->hasOneUse() &&
2060 isa<ConstantPointerNull>(
GEP->getPointerOperand())) {
2068 if (
GEP->hasOneUse() &&
2070 Base->getType() == Ty) {
2072 auto *NewOp = BinaryOperator::CreateAdd(
Base,
Offset);
2073 if (
GEP->hasNoUnsignedWrap() ||
2074 (
GEP->hasNoUnsignedSignedWrap() &&
2076 NewOp->setHasNoUnsignedWrap(
true);
2119 if (SrcTy->getElementType() != DestTy->getElementType()) {
2124 if (SrcTy->getElementType()->getPrimitiveSizeInBits() !=
2125 DestTy->getElementType()->getPrimitiveSizeInBits())
2130 cast<FixedVectorType>(SrcTy)->getNumElements());
2135 unsigned SrcElts = cast<FixedVectorType>(SrcTy)->getNumElements();
2136 unsigned DestElts = cast<FixedVectorType>(DestTy)->getNumElements();
2138 assert(SrcElts != DestElts &&
"Element counts should be different.");
2143 auto ShuffleMaskStorage = llvm::to_vector<16>(llvm::seq<int>(0, SrcElts));
2147 if (SrcElts > DestElts) {
2156 ShuffleMask = ShuffleMaskStorage;
2158 ShuffleMask = ShuffleMask.
take_back(DestElts);
2160 ShuffleMask = ShuffleMask.
take_front(DestElts);
2171 unsigned DeltaElts = DestElts - SrcElts;
2173 ShuffleMaskStorage.insert(ShuffleMaskStorage.begin(), DeltaElts, NullElt);
2175 ShuffleMaskStorage.append(DeltaElts, NullElt);
2176 ShuffleMask = ShuffleMaskStorage;
2204 "Shift should be a multiple of the element type size");
2207 if (isa<UndefValue>(V))
return true;
2211 if (V->getType() == VecEltTy) {
2213 if (
Constant *
C = dyn_cast<Constant>(V))
2214 if (
C->isNullValue())
2219 ElementIndex = Elements.size() - ElementIndex - 1;
2222 if (Elements[ElementIndex])
2225 Elements[ElementIndex] = V;
2229 if (
Constant *
C = dyn_cast<Constant>(V)) {
2242 if (!isa<IntegerType>(
C->getType()))
2244 C->getType()->getPrimitiveSizeInBits()));
2248 for (
unsigned i = 0; i != NumElts; ++i) {
2249 unsigned ShiftI = i * ElementSize;
2251 Instruction::LShr,
C, ConstantInt::get(
C->getType(), ShiftI));
2263 if (!V->hasOneUse())
return false;
2266 if (!
I)
return false;
2267 switch (
I->getOpcode()) {
2268 default:
return false;
2269 case Instruction::BitCast:
2270 if (
I->getOperand(0)->getType()->isVectorTy())
2274 case Instruction::ZExt:
2276 I->getOperand(0)->getType()->getPrimitiveSizeInBits(),
2281 case Instruction::Or:
2286 case Instruction::Shl: {
2288 ConstantInt *CI = dyn_cast<ConstantInt>(
I->getOperand(1));
2289 if (!CI)
return false;
2316 auto *DestVecTy = cast<FixedVectorType>(CI.
getType());
2321 DestVecTy->getElementType(),
2329 for (
unsigned i = 0, e = Elements.size(); i != e; ++i) {
2330 if (!Elements[i])
continue;
2362 auto *FixedVType = dyn_cast<FixedVectorType>(VecType);
2363 if (DestType->
isVectorTy() && FixedVType && FixedVType->getNumElements() == 1)
2390 if (
X->getType()->isFPOrFPVectorTy() &&
2391 Y->getType()->isIntOrIntVectorTy()) {
2397 if (
X->getType()->isIntOrIntVectorTy() &&
2398 Y->getType()->isFPOrFPVectorTy()) {
2413 X->getType() == DestTy && !isa<Constant>(
X)) {
2420 X->getType() == DestTy && !isa<Constant>(
X)) {
2452 if (
auto *CondVTy = dyn_cast<VectorType>(CondTy))
2454 CondVTy->getElementCount() !=
2455 cast<VectorType>(DestTy)->getElementCount())
2465 auto *Sel = cast<Instruction>(BitCast.
getOperand(0));
2468 !isa<Constant>(
X)) {
2475 !isa<Constant>(
X)) {
2487 if (!isa<StoreInst>(U))
2508 Type *SrcTy = Src->getType();
2520 while (!PhiWorklist.
empty()) {
2522 for (
Value *IncValue : OldPN->incoming_values()) {
2523 if (isa<Constant>(IncValue))
2526 if (
auto *
LI = dyn_cast<LoadInst>(IncValue)) {
2532 if (
Addr == &CI || isa<LoadInst>(
Addr))
2540 if (
LI->hasOneUse() &&
LI->isSimple())
2547 if (
auto *PNode = dyn_cast<PHINode>(IncValue)) {
2548 if (OldPhiNodes.
insert(PNode))
2553 auto *BCI = dyn_cast<BitCastInst>(IncValue);
2559 Type *TyA = BCI->getOperand(0)->getType();
2560 Type *TyB = BCI->getType();
2561 if (TyA != DestTy || TyB != SrcTy)
2568 for (
auto *OldPN : OldPhiNodes) {
2570 if (
auto *SI = dyn_cast<StoreInst>(V)) {
2571 if (!
SI->isSimple() ||
SI->getOperand(0) != OldPN)
2573 }
else if (
auto *BCI = dyn_cast<BitCastInst>(V)) {
2575 Type *TyB = BCI->getOperand(0)->getType();
2576 Type *TyA = BCI->getType();
2577 if (TyA != DestTy || TyB != SrcTy)
2579 }
else if (
auto *
PHI = dyn_cast<PHINode>(V)) {
2583 if (!OldPhiNodes.contains(
PHI))
2593 for (
auto *OldPN : OldPhiNodes) {
2596 NewPNodes[OldPN] = NewPN;
2600 for (
auto *OldPN : OldPhiNodes) {
2601 PHINode *NewPN = NewPNodes[OldPN];
2602 for (
unsigned j = 0, e = OldPN->getNumOperands(); j != e; ++j) {
2603 Value *
V = OldPN->getOperand(j);
2604 Value *NewV =
nullptr;
2605 if (
auto *
C = dyn_cast<Constant>(V)) {
2607 }
else if (
auto *
LI = dyn_cast<LoadInst>(V)) {
2616 }
else if (
auto *BCI = dyn_cast<BitCastInst>(V)) {
2617 NewV = BCI->getOperand(0);
2618 }
else if (
auto *PrevPN = dyn_cast<PHINode>(V)) {
2619 NewV = NewPNodes[PrevPN];
2622 NewPN->
addIncoming(NewV, OldPN->getIncomingBlock(j));
2636 for (
auto *OldPN : OldPhiNodes) {
2637 PHINode *NewPN = NewPNodes[OldPN];
2639 if (
auto *SI = dyn_cast<StoreInst>(V)) {
2640 assert(
SI->isSimple() &&
SI->getOperand(0) == OldPN);
2644 SI->setOperand(0, NewBC);
2648 else if (
auto *BCI = dyn_cast<BitCastInst>(V)) {
2649 Type *TyB = BCI->getOperand(0)->getType();
2650 Type *TyA = BCI->getType();
2651 assert(TyA == DestTy && TyB == SrcTy);
2657 }
else if (
auto *
PHI = dyn_cast<PHINode>(V)) {
2682 if (
X->getType() != FTy)
2694 Type *SrcTy = Src->getType();
2699 if (DestTy == Src->getType())
2702 if (isa<FixedVectorType>(DestTy)) {
2703 if (isa<IntegerType>(SrcTy)) {
2707 if (isa<TruncInst>(Src) || isa<ZExtInst>(Src)) {
2708 CastInst *SrcCast = cast<CastInst>(Src);
2710 if (isa<VectorType>(BCIn->getOperand(0)->getType()))
2712 BCIn->getOperand(0), cast<VectorType>(DestTy), *
this))
2725 if (SrcVTy->getNumElements() == 1) {
2738 if (
auto *InsElt = dyn_cast<InsertElementInst>(Src))
2739 return new BitCastInst(InsElt->getOperand(1), DestTy);
2749 Y->getType()->isIntegerTy() && isDesirableIntType(
BitWidth)) {
2752 IndexC = SrcVTy->getNumElements() - 1 - IndexC;
2758 unsigned EltWidth =
Y->getType()->getScalarSizeInBits();
2762 return BinaryOperator::CreateOr(AndX, ZextY);
2767 if (
auto *Shuf = dyn_cast<ShuffleVectorInst>(Src)) {
2770 Value *ShufOp0 = Shuf->getOperand(0);
2771 Value *ShufOp1 = Shuf->getOperand(1);
2772 auto ShufElts = cast<VectorType>(Shuf->getType())->getElementCount();
2773 auto SrcVecElts = cast<VectorType>(ShufOp0->
getType())->getElementCount();
2774 if (Shuf->hasOneUse() && DestTy->
isVectorTy() &&
2775 cast<VectorType>(DestTy)->getElementCount() == ShufElts &&
2776 ShufElts == SrcVecElts) {
2781 if (((Tmp = dyn_cast<BitCastInst>(ShufOp0)) &&
2783 ((Tmp = dyn_cast<BitCastInst>(ShufOp1)) &&
2797 if (DestTy->
isIntegerTy() && ShufElts.getKnownMinValue() % 2 == 0 &&
2798 Shuf->hasOneUse() && Shuf->isReverse()) {
2799 unsigned IntrinsicNum = 0;
2802 IntrinsicNum = Intrinsic::bswap;
2804 IntrinsicNum = Intrinsic::bitreverse;
2806 if (IntrinsicNum != 0) {
2807 assert(ShufOp0->
getType() == SrcTy &&
"Unexpected shuffle mask");
2818 if (
PHINode *PN = dyn_cast<PHINode>(Src))
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static std::optional< bool > isBigEndian(const SmallDenseMap< int64_t, int64_t, 8 > &MemOffset2Idx, int64_t LowestIdx)
Given a map from byte offsets in memory to indices in a load/store, determine if that map corresponds...
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
static bool isSigned(unsigned int Opcode)
static bool collectInsertionElements(Value *V, unsigned Shift, SmallVectorImpl< Value * > &Elements, Type *VecEltTy, bool isBigEndian)
V is a value which is inserted into a vector of VecEltTy.
static bool canEvaluateSExtd(Value *V, Type *Ty)
Return true if we can take the specified value and return it as type Ty without inserting any new cas...
static bool hasStoreUsersOnly(CastInst &CI)
Check if all users of CI are StoreInsts.
static Value * foldCopySignIdioms(BitCastInst &CI, InstCombiner::BuilderTy &Builder, const SimplifyQuery &SQ)
Fold (bitcast (or (and (bitcast X to int), signmask), nneg Y) to fp) to copysign((bitcast Y to fp),...
static Type * shrinkFPConstantVector(Value *V, bool PreferBFloat)
static bool canEvaluateZExtd(Value *V, Type *Ty, unsigned &BitsToClear, InstCombinerImpl &IC, Instruction *CxtI)
Determine if the specified value can be computed in the specified wider type and produce the same low...
static Instruction * canonicalizeBitCastExtElt(BitCastInst &BitCast, InstCombinerImpl &IC)
Canonicalize scalar bitcasts of extracted elements into a bitcast of the vector followed by extract e...
static Instruction * shrinkSplatShuffle(TruncInst &Trunc, InstCombiner::BuilderTy &Builder)
Try to narrow the width of a splat shuffle.
static Type * shrinkFPConstant(ConstantFP *CFP, bool PreferBFloat)
static Instruction * foldFPtoI(Instruction &FI, InstCombiner &IC)
static Instruction * foldBitCastSelect(BitCastInst &BitCast, InstCombiner::BuilderTy &Builder)
Change the type of a select if we can eliminate a bitcast.
static Instruction * foldBitCastBitwiseLogic(BitCastInst &BitCast, InstCombiner::BuilderTy &Builder)
Change the type of a bitwise logic operation if we can eliminate a bitcast.
static bool fitsInFPType(ConstantFP *CFP, const fltSemantics &Sem)
Return a Constant* for the specified floating-point constant if it fits in the specified FP type with...
static Instruction * optimizeVectorResizeWithIntegerBitCasts(Value *InVal, VectorType *DestTy, InstCombinerImpl &IC)
This input value (which is known to have vector type) is being zero extended or truncated to the spec...
static Instruction * shrinkInsertElt(CastInst &Trunc, InstCombiner::BuilderTy &Builder)
Try to narrow the width of an insert element.
static Type * getMinimumFPType(Value *V, bool PreferBFloat)
Find the minimum FP type we can safely truncate to.
static bool isMultipleOfTypeSize(unsigned Value, Type *Ty)
static Value * optimizeIntegerToVectorInsertions(BitCastInst &CI, InstCombinerImpl &IC)
If the input is an 'or' instruction, we may be doing shifts and ors to assemble the elements of the v...
static bool canAlwaysEvaluateInType(Value *V, Type *Ty)
Constants and extensions/truncates from the destination type are always free to be evaluated in that ...
static bool canNotEvaluateInType(Value *V, Type *Ty)
Filter out values that we can not evaluate in the destination type for free.
static bool isKnownExactCastIntToFP(CastInst &I, InstCombinerImpl &IC)
Return true if the cast from integer to FP can be proven to be exact for all possible inputs (the con...
static unsigned getTypeSizeIndex(unsigned Value, Type *Ty)
static Instruction * foldVecTruncToExtElt(TruncInst &Trunc, InstCombinerImpl &IC)
Given a vector that is bitcast to an integer, optionally logically right-shifted, and truncated,...
static bool canEvaluateTruncated(Value *V, Type *Ty, InstCombinerImpl &IC, Instruction *CxtI)
Return true if we can evaluate the specified expression tree as type Ty instead of its larger type,...
This file provides internal interfaces used to implement the InstCombine.
This file provides the interface for the instcombine pass implementation.
uint64_t IntrinsicInst * II
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file implements a set that has insertion order iteration characteristics.
static SymbolRef::Type getType(const Symbol *Sym)
Class for arbitrary precision integers.
uint64_t getZExtValue() const
Get zero extended value.
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
bool ult(const APInt &RHS) const
Unsigned less than comparison.
int32_t exactLogBase2() const
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)
Constructs an APInt value that has the top hiBitsSet bits set.
static APInt getBitsSetFrom(unsigned numBits, unsigned loBit)
Constructs an APInt value that has a contiguous range of bits set.
This class represents a conversion between pointers from one address space to another.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
ArrayRef< T > take_front(size_t N=1) const
Return a copy of *this with only the first N elements.
ArrayRef< T > take_back(size_t N=1) const
Return a copy of *this with only the last N elements.
std::optional< unsigned > getVScaleRangeMax() const
Returns the maximum value for the vscale_range attribute or std::nullopt when unknown.
BinaryOps getOpcode() const
static BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name=Twine(), InsertPosition InsertBefore=nullptr)
Construct a binary instruction, given the opcode and the two operands.
static BinaryOperator * CreateFMulFMF(Value *V1, Value *V2, FastMathFlags FMF, const Twine &Name="")
static BinaryOperator * CreateFDivFMF(Value *V1, Value *V2, FastMathFlags FMF, const Twine &Name="")
This class represents a no-op cast from one type to another.
This class represents a function call, abstracting a target machine's calling convention.
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
This is the base class for all instructions that perform data casts.
Type * getSrcTy() const
Return the source type, as a convenience.
Instruction::CastOps getOpcode() const
Return the opcode of this CastInst.
static CastInst * CreateIntegerCast(Value *S, Type *Ty, bool isSigned, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a ZExt, BitCast, or Trunc for int -> int casts.
static CastInst * CreateFPCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create an FPExt, BitCast, or FPTrunc for fp -> fp casts.
static unsigned isEliminableCastPair(Instruction::CastOps firstOpcode, Instruction::CastOps secondOpcode, Type *SrcTy, Type *MidTy, Type *DstTy, Type *SrcIntPtrTy, Type *MidIntPtrTy, Type *DstIntPtrTy)
Determine how a pair of casts can be eliminated, if they can be at all.
static CastInst * CreateTruncOrBitCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a Trunc or BitCast cast instruction.
static CastInst * CreateBitOrPointerCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a BitCast, a PtrToInt, or an IntToPTr cast instruction.
static CastInst * Create(Instruction::CastOps, Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Provides a way to construct any of the CastInst subclasses using an opcode instead of the subclass's ...
Type * getDestTy() const
Return the destination type, as a convenience.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ ICMP_SLT
signed less than
@ ICMP_ULT
unsigned less than
@ ICMP_ULE
unsigned less or equal
static Constant * getSub(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
static Constant * getBitCast(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static Constant * getTrunc(Constant *C, Type *Ty, bool OnlyIfReduced=false)
ConstantFP - Floating Point Values [float, double].
const APFloat & getValueAPF() const
This is the shared class of boolean and integer constants.
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
This is an important base class in LLVM.
static Constant * mergeUndefsWith(Constant *C, Constant *Other)
Merges undefs of a Constant with another Constant, along with the undefs already present.
static Constant * getAllOnesValue(Type *Ty)
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
bool isElementWiseEqual(Value *Y) const
Return true if this constant and a constant 'Y' are element-wise equal.
This class represents an Operation in the Expression.
unsigned getPointerSizeInBits(unsigned AS=0) const
Layout pointer size, in bits FIXME: The defaults need to be removed once all of the backends/clients ...
bool isLegalInteger(uint64_t Width) const
Returns true if the specified type is known to be a native integer type supported by the CPU.
IntegerType * getIntPtrType(LLVMContext &C, unsigned AddressSpace=0) const
Returns an integer type with size at least as big as that of a pointer in the given address space.
This class represents an extension of floating point types.
This class represents a cast from floating point to signed integer.
This class represents a cast from floating point to unsigned integer.
This class represents a truncation of floating point types.
Class to represent fixed width SIMD vectors.
static FixedVectorType * get(Type *ElementType, unsigned NumElts)
FunctionType * getFunctionType() const
Returns the FunctionType for me.
Attribute getFnAttribute(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind.
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
This instruction compares its operands according to the predicate given to the constructor.
Value * CreateVScale(Constant *Scaling, const Twine &Name="")
Create a call to llvm.vscale, multiplied by Scaling.
Value * CreateInsertElement(Type *VecTy, Value *NewElt, Value *Idx, const Twine &Name="")
Value * CreateExtractElement(Value *Vec, Value *Idx, const Twine &Name="")
Value * CreateFPTrunc(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateZExtOrTrunc(Value *V, Type *DestTy, const Twine &Name="")
Create a ZExt or Trunc from the integer value V to DestTy.
Value * CreateFRemFMF(Value *L, Value *R, Instruction *FMFSource, const Twine &Name="")
Copy fast-math-flags from an instruction rather than using the builder's default FMF.
CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > Types, ArrayRef< Value * > Args, Instruction *FMFSource=nullptr, const Twine &Name="")
Create a call to intrinsic ID with Args, mangled using Types.
Value * CreateSelect(Value *C, Value *True, Value *False, const Twine &Name="", Instruction *MDFrom=nullptr)
Value * CreateLShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
void setFastMathFlags(FastMathFlags NewFMF)
Set the fast-math flags to be used with generated fp-math operators.
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
PHINode * CreatePHI(Type *Ty, unsigned NumReservedValues, const Twine &Name="")
Value * CreateNot(Value *V, const Twine &Name="")
Value * CreateBitCast(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateShl(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Value * CreateZExt(Value *V, Type *DestTy, const Twine &Name="", bool IsNonNeg=false)
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Value * CreatePtrToInt(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateTrunc(Value *V, Type *DestTy, const Twine &Name="", bool IsNUW=false, bool IsNSW=false)
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateBinOp(Instruction::BinaryOps Opc, Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
Value * CreateCast(Instruction::CastOps Op, Value *V, Type *DestTy, const Twine &Name="")
Value * CreateIntCast(Value *V, Type *DestTy, bool isSigned, const Twine &Name="")
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Value * CreateAShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Value * CreateXor(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateCopySign(Value *LHS, Value *RHS, Instruction *FMFSource=nullptr, const Twine &Name="")
Create call to the copysign intrinsic.
static InsertElementInst * Create(Value *Vec, Value *NewElt, Value *Idx, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Instruction * FoldOpIntoSelect(Instruction &Op, SelectInst *SI, bool FoldWithMultiUse=false)
Given an instruction with a select as one operand and a constant as the other operand,...
Instruction * visitZExt(ZExtInst &Zext)
Instruction * visitAddrSpaceCast(AddrSpaceCastInst &CI)
Instruction * visitSExt(SExtInst &Sext)
Instruction * foldOpIntoPhi(Instruction &I, PHINode *PN)
Given a binary operator, cast instruction, or select which has a PHI node as operand #0,...
Instruction * visitFPToSI(FPToSIInst &FI)
Instruction * visitTrunc(TruncInst &CI)
Instruction * visitUIToFP(CastInst &CI)
Instruction * visitPtrToInt(PtrToIntInst &CI)
Instruction * visitSIToFP(CastInst &CI)
Instruction * commonCastTransforms(CastInst &CI)
Implement the transforms common to all CastInst visitors.
Instruction * eraseInstFromFunction(Instruction &I) override
Combiner aware instruction erasure.
Instruction * foldItoFPtoI(CastInst &FI)
fpto{s/u}i({u/s}itofp(X)) --> X or zext(X) or sext(X) or trunc(X) This is safe if the intermediate ty...
Instruction * visitFPTrunc(FPTruncInst &CI)
Instruction * visitBitCast(BitCastInst &CI)
Instruction * visitIntToPtr(IntToPtrInst &CI)
Instruction * visitFPToUI(FPToUIInst &FI)
Value * EvaluateInDifferentType(Value *V, Type *Ty, bool isSigned)
Given an expression that CanEvaluateTruncated or CanEvaluateSExtd returns true for,...
bool SimplifyDemandedInstructionBits(Instruction &Inst)
Tries to simplify operands to an integer instruction based on its demanded bits.
Instruction * visitFPExt(CastInst &CI)
LoadInst * combineLoadToNewType(LoadInst &LI, Type *NewTy, const Twine &Suffix="")
Helper to combine a load to a new type.
The core instruction combiner logic.
const DataLayout & getDataLayout() const
Instruction * replaceInstUsesWith(Instruction &I, Value *V)
A combiner-aware RAUW-like routine.
InstructionWorklist & Worklist
A worklist of the instructions that need to be simplified.
Instruction * InsertNewInstWith(Instruction *New, BasicBlock::iterator Old)
Same as InsertNewInstBefore, but also sets the debug loc.
unsigned ComputeNumSignBits(const Value *Op, unsigned Depth=0, const Instruction *CxtI=nullptr) const
void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth, const Instruction *CxtI) const
bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth=0, const Instruction *CxtI=nullptr) const
const SimplifyQuery & getSimplifyQuery() const
unsigned ComputeMaxSignificantBits(const Value *Op, unsigned Depth=0, const Instruction *CxtI=nullptr) const
void push(Instruction *I)
Push the instruction onto the worklist stack.
void copyFastMathFlags(FastMathFlags FMF)
Convenience function for transferring all fast-math flag values to this instruction,...
static bool isBitwiseLogicOp(unsigned Opcode)
Determine if the Opcode is and/or/xor.
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
Instruction * user_back()
Specialize the methods defined in Value, as we know that an instruction can only be used by other ins...
const Function * getFunction() const
Return the function this instruction belongs to.
void setNonNeg(bool b=true)
Set or clear the nneg flag on this instruction, which must be a zext instruction.
bool hasNonNeg() const LLVM_READONLY
Determine whether the the nneg flag is set.
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
This class represents a cast from an integer to a pointer.
unsigned getAddressSpace() const
Returns the address space of this instruction's pointer type.
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
A wrapper class for inspecting calls to intrinsic functions.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
op_range incoming_values()
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
This class represents a cast from a pointer to an integer.
Value * getPointerOperand()
Gets the pointer operand.
unsigned getPointerAddressSpace() const
Returns the address space of the pointer operand.
This class represents a sign extension of integer types.
This class represents the LLVM 'select' instruction.
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", InsertPosition InsertBefore=nullptr, Instruction *MDFrom=nullptr)
bool insert(const value_type &X)
Insert a new element into the SetVector.
This instruction constructs a fixed permutation of two input vectors.
A SetVector that performs no allocations if smaller than a certain size.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
This class represents a truncation of integer types.
void setHasNoSignedWrap(bool B)
void setHasNoUnsignedWrap(bool B)
bool hasNoSignedWrap() const
Test whether this operation is known to never undergo signed overflow, aka the nsw property.
bool hasNoUnsignedWrap() const
Test whether this operation is known to never undergo unsigned overflow, aka the nuw property.
The instances of the Type class are immutable: once they are created, they are never changed.
static Type * getHalfTy(LLVMContext &C)
static Type * getDoubleTy(LLVMContext &C)
const fltSemantics & getFltSemantics() const
bool isVectorTy() const
True if this is an instance of VectorType.
static Type * getBFloatTy(LLVMContext &C)
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
bool isBFloatTy() const
Return true if this is 'bfloat', a 16-bit bfloat type.
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Type * getWithNewType(Type *EltTy) const
Given vector type, change the element type, whilst keeping the old number of elements.
int getFPMantissaWidth() const
Return the width of the mantissa of this type.
bool isDoubleTy() const
Return true if this is 'double', a 64-bit IEEE fp type.
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
bool isX86_AMXTy() const
Return true if this is X86 AMX.
static IntegerType * getInt32Ty(LLVMContext &C)
static Type * getFloatTy(LLVMContext &C)
bool isIntegerTy() const
True if this is an instance of IntegerType.
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
static Type * getPPC_FP128Ty(LLVMContext &C)
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
static UnaryOperator * CreateFNegFMF(Value *Op, Instruction *FMFSource, const Twine &Name="", InsertPosition InsertBefore=nullptr)
'undef' values are things that do not have specified contents.
static UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
Value * getOperand(unsigned i) const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
bool hasOneUse() const
Return true if there is exactly one use of this value.
iterator_range< user_iterator > users()
LLVMContext & getContext() const
All values hold a context through their type.
StringRef getName() const
Return a constant reference to the value's name.
void takeName(Value *V)
Transfer the name from V to this value.
static bool isValidElementType(Type *ElemTy)
Return true if the specified type is valid as a element type.
static VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
This class represents zero extension of integer types.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ C
The default llvm calling convention, compatible with C.
Function * getDeclaration(Module *M, ID id, ArrayRef< Type * > Tys=std::nullopt)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
class_match< BinaryOperator > m_BinOp()
Match an arbitrary binary operation and ignore it.
cst_pred_ty< is_sign_mask > m_SignMask()
Match an integer or vector with only the sign bit(s) set.
BinaryOp_match< LHS, RHS, Instruction::AShr > m_AShr(const LHS &L, const RHS &R)
cst_pred_ty< is_power2 > m_Power2()
Match an integer or vector power-of-2.
class_match< Constant > m_Constant()
Match an arbitrary Constant and ignore it.
BinaryOp_match< LHS, RHS, Instruction::And, true > m_c_And(const LHS &L, const RHS &R)
Matches an And with LHS and RHS in either order.
CastInst_match< OpTy, TruncInst > m_Trunc(const OpTy &Op)
Matches Trunc.
BinaryOp_match< LHS, RHS, Instruction::Xor > m_Xor(const LHS &L, const RHS &R)
specific_intval< false > m_SpecificInt(const APInt &V)
Match a specific integer value or vector with all elements equal to the value.
bool match(Val *V, const Pattern &P)
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
BinOpPred_match< LHS, RHS, is_right_shift_op > m_Shr(const LHS &L, const RHS &R)
Matches logical shift operations.
specific_intval< true > m_SpecificIntAllowPoison(const APInt &V)
TwoOps_match< Val_t, Idx_t, Instruction::ExtractElement > m_ExtractElt(const Val_t &Val, const Idx_t &Idx)
Matches ExtractElementInst.
class_match< ConstantInt > m_ConstantInt()
Match an arbitrary ConstantInt and ignore it.
cst_pred_ty< is_one > m_One()
Match an integer 1 or a vector with all elements equal to 1.
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
BinOpPred_match< LHS, RHS, is_logical_shift_op > m_LogicalShift(const LHS &L, const RHS &R)
Matches logical shift operations.
CastInst_match< OpTy, FPToUIInst > m_FPToUI(const OpTy &Op)
deferredval_ty< Value > m_Deferred(Value *const &V)
Like m_Specific(), but works if the specific value to match is determined as part of the same match()...
cst_pred_ty< is_zero_int > m_ZeroInt()
Match an integer 0 or a vector with all elements equal to 0.
OneUse_match< T > m_OneUse(const T &SubPattern)
BinaryOp_match< cst_pred_ty< is_zero_int >, ValTy, Instruction::Sub > m_Neg(const ValTy &V)
Matches a 'Neg' as 'sub 0, V'.
TwoOps_match< V1_t, V2_t, Instruction::ShuffleVector > m_Shuffle(const V1_t &v1, const V2_t &v2)
Matches ShuffleVectorInst independently of mask value.
match_combine_and< class_match< Constant >, match_unless< constantexpr_match > > m_ImmConstant()
Match an arbitrary immediate Constant and ignore it.
CastInst_match< OpTy, FPExtInst > m_FPExt(const OpTy &Op)
CastInst_match< OpTy, ZExtInst > m_ZExt(const OpTy &Op)
Matches ZExt.
CastOperator_match< OpTy, Instruction::BitCast > m_BitCast(const OpTy &Op)
Matches BitCast.
VScaleVal_match m_VScale()
CastInst_match< OpTy, FPToSIInst > m_FPToSI(const OpTy &Op)
apint_match m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
BinaryOp_match< LHS, RHS, Instruction::LShr > m_LShr(const LHS &L, const RHS &R)
match_combine_or< CastInst_match< OpTy, ZExtInst >, CastInst_match< OpTy, SExtInst > > m_ZExtOrSExt(const OpTy &Op)
FNeg_match< OpTy > m_FNeg(const OpTy &X)
Match 'fneg X' as 'fsub -0.0, X'.
BinOpPred_match< LHS, RHS, is_shift_op > m_Shift(const LHS &L, const RHS &R)
Matches shift operations.
BinaryOp_match< LHS, RHS, Instruction::Shl > m_Shl(const LHS &L, const RHS &R)
auto m_Undef()
Match an arbitrary undef constant.
BinaryOp_match< LHS, RHS, Instruction::Or > m_Or(const LHS &L, const RHS &R)
CastInst_match< OpTy, SExtInst > m_SExt(const OpTy &Op)
Matches SExt.
BinaryOp_match< LHS, RHS, Instruction::Or, true > m_c_Or(const LHS &L, const RHS &R)
Matches an Or with LHS and RHS in either order.
CastOperator_match< OpTy, Instruction::IntToPtr > m_IntToPtr(const OpTy &Op)
Matches IntToPtr.
ThreeOps_match< Val_t, Elt_t, Idx_t, Instruction::InsertElement > m_InsertElt(const Val_t &Val, const Elt_t &Elt, const Idx_t &Idx)
Matches InsertElementInst.
ElementWiseBitCast_match< OpTy > m_ElementWiseBitCast(const OpTy &Op)
BinaryOp_match< LHS, RHS, Instruction::Sub > m_Sub(const LHS &L, const RHS &R)
match_combine_or< LTy, RTy > m_CombineOr(const LTy &L, const RTy &R)
Combine two pattern matchers matching L || R.
cst_pred_ty< icmp_pred_with_threshold > m_SpecificInt_ICMP(ICmpInst::Predicate Predicate, const APInt &Threshold)
Match an integer or vector with every element comparing 'pred' (eg/ne/...) to Threshold.
This is an optimization pass for GlobalISel generic memory operations.
Constant * ConstantFoldSelectInstruction(Constant *Cond, Constant *V1, Constant *V2)
Attempt to constant fold a select instruction with the specified operands.
unsigned Log2_64_Ceil(uint64_t Value)
Return the ceil log base 2 of the specified value, 64 if the value is zero.
Constant * ConstantFoldCompareInstOperands(unsigned Predicate, Constant *LHS, Constant *RHS, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const Instruction *I=nullptr)
Attempt to constant fold a compare instruction (icmp/fcmp) with the specified operands.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
SelectPatternResult matchSelectPattern(Value *V, Value *&LHS, Value *&RHS, Instruction::CastOps *CastOp=nullptr, unsigned Depth=0)
Pattern match integer [SU]MIN, [SU]MAX and ABS idioms, returning the kind and providing the out param...
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Constant * ConstantFoldCastOperand(unsigned Opcode, Constant *C, Type *DestTy, const DataLayout &DL)
Attempt to constant fold a cast with the specified operand.
bool replaceAllDbgUsesWith(Instruction &From, Value &To, Instruction &DomPoint, DominatorTree &DT)
Point debug users of From to To or salvage them.
@ And
Bitwise or logical AND of integers.
void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
constexpr unsigned BitWidth
bool all_equal(std::initializer_list< T > Values)
Returns true if all Values in the initializer lists are equal or the list.
KnownFPClass computeKnownFPClass(const Value *V, const APInt &DemandedElts, FPClassTest InterestedClasses, unsigned Depth, const SimplifyQuery &SQ)
Determine which floating-point classes are valid for V, and return them in KnownFPClass bit sets.
Constant * ConstantFoldIntegerCast(Constant *C, Type *DestTy, bool IsSigned, const DataLayout &DL)
Constant fold a zext, sext or trunc, depending on IsSigned and whether the DestTy is wider or narrowe...
bool isKnownNonNegative(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Returns true if the give value is known to be non-negative.
Constant * ConstantFoldBinaryInstruction(unsigned Opcode, Constant *V1, Constant *V2)
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
static const fltSemantics & IEEEsingle() LLVM_READNONE
static constexpr roundingMode rmNearestTiesToEven
static const fltSemantics & IEEEdouble() LLVM_READNONE
static const fltSemantics & IEEEhalf() LLVM_READNONE
static const fltSemantics & BFloat() LLVM_READNONE
static unsigned int semanticsIntSizeInBits(const fltSemantics &, bool)
unsigned countMinTrailingZeros() const
Returns the minimum number of trailing zero bits.
unsigned countMinLeadingZeros() const
Returns the minimum number of leading zero bits.
APInt getMaxValue() const
Return the maximal unsigned value possible given these KnownBits.
bool isKnownNever(FPClassTest Mask) const
Return true if it's known this can never be one of the mask entries.
SimplifyQuery getWithInstruction(const Instruction *I) const