29#define DEBUG_TYPE "instcombine"
44 return Builder.CreateICmp(NewPred,
LHS,
RHS);
54 return Builder.CreateFCmpFMF(NewPred,
LHS,
RHS, FMF);
64 "Lo is not < Hi in range emission code!");
66 Type *Ty = V->getType();
71 if (isSigned ?
Lo.isMinSignedValue() :
Lo.isMinValue()) {
73 return Builder.CreateICmp(Pred, V, ConstantInt::get(Ty,
Hi));
79 Builder.CreateSub(V, ConstantInt::get(Ty,
Lo), V->getName() +
".off");
81 return Builder.CreateICmp(Pred, VMinusLo, HiMinusLo);
128 const APInt *ConstA =
nullptr, *ConstB =
nullptr, *ConstC =
nullptr;
133 bool IsAPow2 = ConstA && ConstA->
isPowerOf2();
134 bool IsBPow2 = ConstB && ConstB->isPowerOf2();
135 unsigned MaskVal = 0;
136 if (ConstC && ConstC->isZero()) {
155 }
else if (ConstA && ConstC && ConstC->
isSubsetOf(*ConstA)) {
165 }
else if (ConstB && ConstC && ConstC->isSubsetOf(*ConstB)) {
200 Y = ConstantInt::get(
X->getType(), Res->Mask);
201 Z = ConstantInt::get(
X->getType(), Res->C);
210static std::optional<std::pair<unsigned, unsigned>>
223 Value *L1, *L11, *L12, *L2, *L21, *L22;
225 L21 = L22 = L1 =
nullptr;
232 if (!LHSCMP->getOperand(0)->getType()->isIntOrIntVectorTy())
235 PredL = LHSCMP->getPredicate();
236 L1 = LHSCMP->getOperand(0);
237 L2 = LHSCMP->getOperand(1);
258 if (R11 == L11 || R11 == L12 || R11 == L21 || R11 == L22) {
261 }
else if (R12 == L11 || R12 == L12 || R12 == L21 || R12 == L22) {
273 if (!RHSCMP->getOperand(0)->getType()->isIntOrIntVectorTy())
276 PredR = RHSCMP->getPredicate();
278 Value *R1 = RHSCMP->getOperand(0);
279 R2 = RHSCMP->getOperand(1);
288 if (R11 == L11 || R11 == L12 || R11 == L21 || R11 == L22) {
293 }
else if (R12 == L11 || R12 == L12 || R12 == L21 || R12 == L22) {
311 if (R11 == L11 || R11 == L12 || R11 == L21 || R11 == L22) {
315 }
else if (R12 == L11 || R12 == L12 || R12 == L21 || R12 == L22) {
332 }
else if (L12 ==
A) {
335 }
else if (L21 ==
A) {
338 }
else if (L22 ==
A) {
345 return std::optional<std::pair<unsigned, unsigned>>(
346 std::make_pair(LeftType, RightType));
368 const APInt *BCst, *DCst, *OrigECst;
379 APInt ECst = *OrigECst;
385 if (*BCst == 0 || *DCst == 0)
395 !Builder.GetInsertBlock()->getParent()->hasFnAttribute(
396 Attribute::StrictFP)) {
398 if (!Ty->isIEEELikeFPTy())
404 APInt FractionBits = ~ExpBits;
406 if (*BCst != FractionBits)
431 if ((((*BCst & *DCst) & ECst) == 0) &&
432 (*BCst & (*BCst ^ *DCst)).isPowerOf2()) {
433 APInt BorD = *BCst | *DCst;
434 APInt BandBxorDorE = (*BCst & (*BCst ^ *DCst)) | ECst;
435 Value *NewMask = ConstantInt::get(
A->getType(), BorD);
436 Value *NewMaskedValue = ConstantInt::get(
A->getType(), BandBxorDorE);
437 Value *NewAnd = Builder.CreateAnd(
A, NewMask);
438 return Builder.CreateICmp(NewCC, NewAnd, NewMaskedValue);
441 auto IsSubSetOrEqual = [](
const APInt *C1,
const APInt *C2) {
442 return (*C1 & *C2) == *C1;
444 auto IsSuperSetOrEqual = [](
const APInt *C1,
const APInt *C2) {
445 return (*C1 & *C2) == *C2;
454 if (!IsSubSetOrEqual(BCst, DCst) && !IsSuperSetOrEqual(BCst, DCst))
466 if (IsSubSetOrEqual(BCst, DCst))
467 return ConstantInt::get(
LHS->getType(), !IsAnd);
477 if (IsSuperSetOrEqual(BCst, DCst)) {
480 ICmp->setSameSign(
false);
486 assert(IsSubSetOrEqual(BCst, DCst) &&
"Precondition due to above code");
487 if ((*BCst & ECst) != 0) {
490 ICmp->setSameSign(
false);
497 return ConstantInt::get(
LHS->getType(), !IsAnd);
509 "Expected equality predicates for masked type of icmps.");
521 LHS,
RHS, IsAnd,
A,
B,
D,
E, PredL, PredR, Builder)) {
526 RHS,
LHS, IsAnd,
A,
D,
B,
C, PredR, PredL, Builder)) {
539 Value *
A =
nullptr, *
B =
nullptr, *
C =
nullptr, *
D =
nullptr, *
E =
nullptr;
541 std::optional<std::pair<unsigned, unsigned>> MaskPair =
546 "Expected equality predicates for masked type of icmps.");
547 unsigned LHSMask = MaskPair->first;
548 unsigned RHSMask = MaskPair->second;
549 unsigned Mask = LHSMask & RHSMask;
554 LHS,
RHS, IsAnd,
A,
B,
C,
D,
E, PredL, PredR, LHSMask, RHSMask,
584 Value *NewOr = Builder.CreateOr(
B,
D);
585 Value *NewAnd = Builder.CreateAnd(
A, NewOr);
590 return Builder.CreateICmp(NewCC, NewAnd, Zero);
597 Value *NewOr = Builder.CreateOr(
B,
D);
598 Value *NewAnd = Builder.CreateAnd(
A, NewOr);
599 return Builder.CreateICmp(NewCC, NewAnd, NewOr);
606 Value *NewAnd1 = Builder.CreateAnd(
B,
D);
607 Value *NewAnd2 = Builder.CreateAnd(
A, NewAnd1);
608 return Builder.CreateICmp(NewCC, NewAnd2,
A);
611 const APInt *ConstB, *ConstD;
619 APInt NewMask = *ConstB & *ConstD;
620 if (NewMask == *ConstB)
622 if (NewMask == *ConstD) {
625 RHSI->dropPoisonGeneratingFlags();
636 APInt NewMask = *ConstB | *ConstD;
637 if (NewMask == *ConstB)
639 if (NewMask == *ConstD)
666 const APInt *OldConstC, *OldConstE;
672 const APInt ConstC = PredL != CC ? *ConstB ^ *OldConstC : *OldConstC;
673 const APInt ConstE = PredR != CC ? *ConstD ^ *OldConstE : *OldConstE;
675 if (((*ConstB & *ConstD) & (ConstC ^ ConstE)).getBoolValue())
676 return IsNot ? nullptr : ConstantInt::get(
LHS->getType(), !IsAnd);
679 !ConstD->isSubsetOf(*ConstB))
684 BD = *ConstB & *ConstD;
685 CE = ConstC & ConstE;
687 BD = *ConstB | *ConstD;
688 CE = ConstC | ConstE;
690 Value *NewAnd = Builder.CreateAnd(
A, BD);
691 Value *CEVal = ConstantInt::get(
A->getType(), CE);
692 return Builder.CreateICmp(CC, NewAnd, CEVal);
696 return FoldBMixed(NewCC,
false);
698 return FoldBMixed(NewCC,
true);
713 D = Builder.CreateFreeze(
D);
714 Value *Mask = Builder.CreateOr(
B,
D);
716 return Builder.CreateICmp(NewCC,
Masked, Mask);
766 default:
return nullptr;
790 if (
LHS->getPredicate() != Pred ||
RHS->getPredicate() != Pred)
815 return Builder.CreateICmp(Pred,
And,
Op);
854 auto tryToMatchSignedTruncationCheck = [](
ICmpInst *ICmp,
Value *&
X,
855 APInt &SignBitMask) ->
bool {
856 const APInt *I01, *I1;
860 I1->ugt(*I01) && I01->
shl(1) == *I1))
872 if (tryToMatchSignedTruncationCheck(ICmp1, X1, HighestBit))
874 else if (tryToMatchSignedTruncationCheck(ICmp0, X1, HighestBit))
879 assert(HighestBit.
isPowerOf2() &&
"expected to be power of two (non-zero)");
883 APInt &UnsetBitsMask) ->
bool {
892 UnsetBitsMask = Res->Mask;
902 if (!tryToDecompose(OtherICmp, X0, UnsetBitsMask))
905 assert(!UnsetBitsMask.
isZero() &&
"empty mask makes no sense.");
920 APInt SignBitsMask = ~(HighestBit - 1U);
927 if (!UnsetBitsMask.
isSubsetOf(SignBitsMask)) {
928 APInt OtherHighestBit = (~UnsetBitsMask) + 1U;
936 return Builder.CreateICmpULT(
X, ConstantInt::get(
X->getType(), HighestBit),
937 CxtI.
getName() +
".simplified");
957 CtPop->dropPoisonGeneratingAnnotations();
959 return Builder.CreateICmpUGT(CtPop, ConstantInt::get(CtPop->getType(), 1));
963 CtPop->dropPoisonGeneratingAnnotations();
965 return Builder.CreateICmpULT(CtPop, ConstantInt::get(CtPop->getType(), 2));
992 CtPop->dropPoisonGeneratingAnnotations();
994 return Builder.CreateICmpEQ(CtPop, ConstantInt::get(CtPop->getType(), 1));
1004 CtPop->dropPoisonGeneratingAnnotations();
1006 return Builder.CreateICmpNE(CtPop, ConstantInt::get(CtPop->getType(), 1));
1020 "Expected equality predicates for masked type of icmps.");
1040 const APInt *BCst, *DCst, *ECst;
1054 if (!BFVTy || !BConst || !DConst || !EConst)
1057 for (
unsigned I = 0;
I != BFVTy->getNumElements(); ++
I) {
1058 const auto *BElt = BConst->getAggregateElement(
I);
1059 const auto *DElt = DConst->getAggregateElement(
I);
1060 const auto *EElt = EConst->getAggregateElement(
I);
1062 if (!BElt || !DElt || !EElt)
1064 if (!isReducible(BElt, DElt, EElt))
1069 if (!isReducible(
B,
D,
E))
1087 Value *
A =
nullptr, *
B =
nullptr, *
C =
nullptr, *
D =
nullptr, *
E =
nullptr;
1092 std::optional<std::pair<unsigned, unsigned>> MaskPair =
1098 unsigned CmpMask0 = MaskPair->first;
1099 unsigned CmpMask1 = MaskPair->second;
1100 if ((CmpMask0 &
Mask_AllZeros) && (CmpMask1 == compareBMask)) {
1104 }
else if ((CmpMask0 == compareBMask) && (CmpMask1 &
Mask_AllZeros)) {
1115 ICmpInst *UnsignedICmp,
bool IsAnd,
1127 if (
match(UnsignedICmp,
1143 IsAnd && GetKnownNonZeroAndOther(
B,
A))
1144 return Builder.CreateICmpULT(Builder.CreateNeg(
B),
A);
1146 !IsAnd && GetKnownNonZeroAndOther(
B,
A))
1147 return Builder.CreateICmpUGE(Builder.CreateNeg(
B),
A);
1163 return std::nullopt;
1165 unsigned NumOriginalBits =
X->getType()->getScalarSizeInBits();
1166 unsigned NumExtractedBits = V->getType()->getScalarSizeInBits();
1172 Shift->
ule(NumOriginalBits - NumExtractedBits))
1174 return {{
X, 0, NumExtractedBits}};
1181 V = Builder.CreateLShr(V,
P.StartBit);
1183 if (TruncTy != V->getType())
1184 V = Builder.CreateTrunc(V, TruncTy);
1191Value *InstCombinerImpl::foldEqOfParts(
Value *Cmp0,
Value *Cmp1,
bool IsAnd) {
1196 auto GetMatchPart = [&](
Value *CmpV,
1197 unsigned OpNo) -> std::optional<IntPart> {
1206 return {{OpNo == 0 ?
X :
Y, 0, 1}};
1210 return std::nullopt;
1212 if (Pred ==
Cmp->getPredicate())
1221 return std::nullopt;
1230 return std::nullopt;
1232 return std::nullopt;
1237 return {{
I->getOperand(OpNo), From,
C->getBitWidth() - From}};
1240 std::optional<IntPart> L0 = GetMatchPart(Cmp0, 0);
1241 std::optional<IntPart> R0 = GetMatchPart(Cmp0, 1);
1242 std::optional<IntPart> L1 = GetMatchPart(Cmp1, 0);
1243 std::optional<IntPart> R1 = GetMatchPart(Cmp1, 1);
1244 if (!L0 || !R0 || !L1 || !R1)
1249 if (L0->From != L1->From || R0->From != R1->From) {
1250 if (L0->From != R1->From || R0->From != L1->From)
1257 if (L0->StartBit + L0->NumBits != L1->StartBit ||
1258 R0->StartBit + R0->NumBits != R1->StartBit) {
1259 if (L1->StartBit + L1->NumBits != L0->StartBit ||
1260 R1->StartBit + R1->NumBits != R0->StartBit)
1267 IntPart
L = {L0->From, L0->StartBit, L0->NumBits + L1->NumBits};
1268 IntPart
R = {R0->From, R0->StartBit, R0->NumBits + R1->NumBits};
1278 bool IsAnd,
bool IsLogical,
1308 if (!SubstituteCmp) {
1313 SubstituteCmp = Builder.CreateICmp(Pred1,
Y,
C);
1318 return IsAnd ? Builder.CreateLogicalAnd(Cmp0, SubstituteCmp,
"", MDFrom)
1319 : Builder.CreateLogicalOr(Cmp0, SubstituteCmp,
"", MDFrom);
1321 return Builder.CreateBinOp(IsAnd ? Instruction::And : Instruction::Or, Cmp0,
1329Value *InstCombinerImpl::foldAndOrOfICmpsUsingRanges(
ICmpInst *ICmp1,
1333 auto MatchExactRangeCheck =
1334 [](ICmpInst *ICmp) -> std::optional<std::pair<Value *, ConstantRange>> {
1337 return std::nullopt;
1339 CmpPredicate Pred = ICmp->getPredicate();
1345 C->countr_zero() >=
Mask->countr_zero()) {
1346 ConstantRange CR(*
C, *
C - *Mask);
1349 return std::make_pair(
X, CR);
1356 return std::make_pair(
X, CR.
subtract(*C1));
1357 return std::make_pair(
LHS, CR);
1360 auto RC1 = MatchExactRangeCheck(ICmp1);
1364 auto RC2 = MatchExactRangeCheck(ICmp2);
1368 auto &[V1, CR1] = *RC1;
1369 auto &[V2, CR2] = *RC2;
1375 CR1 = CR1.inverse();
1376 CR2 = CR2.inverse();
1379 Type *Ty = V1->getType();
1389 APInt LowerDiff = CR1.getLower() ^ CR2.getLower();
1390 APInt UpperDiff = (CR1.getUpper() - 1) ^ (CR2.getUpper() - 1);
1391 APInt CR1Size = CR1.getUpper() - CR1.getLower();
1392 if (!LowerDiff.
isPowerOf2() || LowerDiff != UpperDiff ||
1393 CR1Size != CR2.getUpper() - CR2.getLower())
1396 CR = CR1.getLower().ult(CR2.getLower()) ? CR1 : CR2;
1397 NewV =
Builder.CreateAnd(NewV, ConstantInt::get(Ty, ~LowerDiff));
1405 CR->getEquivalentICmp(NewPred, NewC,
Offset);
1408 NewV =
Builder.CreateAdd(NewV, ConstantInt::get(Ty,
Offset));
1409 return Builder.CreateICmp(NewPred, NewV, ConstantInt::get(Ty, NewC));
1437 Value *LHS0 =
LHS->getOperand(0), *LHS1 =
LHS->getOperand(1);
1438 Value *RHS0 =
RHS->getOperand(0), *RHS1 =
RHS->getOperand(1);
1450 bool IsAnd,
bool IsLogicalSelect) {
1451 Value *LHS0 =
LHS->getOperand(0), *LHS1 =
LHS->getOperand(1);
1452 Value *RHS0 =
RHS->getOperand(0), *RHS1 =
RHS->getOperand(1);
1455 if (LHS0 == RHS1 && RHS0 == LHS1) {
1475 if (LHS0 == RHS0 && LHS1 == RHS1) {
1478 unsigned NewPred = IsAnd ? FCmpCodeL & FCmpCodeR : FCmpCodeL | FCmpCodeR;
1487 if (!IsLogicalSelect &&
1500 return Builder.CreateFCmpFMF(PredL, LHS0, RHS0,
1506 if (!IsLogicalSelect && IsAnd &&
1522 auto [ClassValRHS, ClassMaskRHS] =
1525 auto [ClassValLHS, ClassMaskLHS] =
1527 if (ClassValLHS == ClassValRHS) {
1528 unsigned CombinedMask = IsAnd ? (ClassMaskLHS & ClassMaskRHS)
1529 : (ClassMaskLHS | ClassMaskRHS);
1530 return Builder.CreateIntrinsic(
1531 Intrinsic::is_fpclass, {ClassValLHS->getType()},
1532 {ClassValLHS,
Builder.getInt32(CombinedMask)});
1560 if (IsLessThanOrLessEqual(IsAnd ? PredR : PredL)) {
1564 if (IsLessThanOrLessEqual(IsAnd ? PredL : PredR)) {
1565 FastMathFlags NewFlag =
LHS->getFastMathFlags();
1566 if (!IsLogicalSelect)
1567 NewFlag |=
RHS->getFastMathFlags();
1570 Builder.CreateUnaryIntrinsic(Intrinsic::fabs, LHS0, NewFlag);
1572 PredL, FAbs, ConstantFP::get(LHS0->
getType(), *LHSC), NewFlag);
1584 if (!FCmp || !FCmp->hasOneUse())
1587 std::tie(ClassVal, ClassMask) =
1588 fcmpToClassTest(FCmp->getPredicate(), *FCmp->getParent()->getParent(),
1589 FCmp->getOperand(0), FCmp->getOperand(1));
1590 return ClassVal !=
nullptr;
1601 Value *ClassVal0 =
nullptr;
1602 Value *ClassVal1 =
nullptr;
1603 uint64_t ClassMask0, ClassMask1;
1619 ClassVal0 == ClassVal1) {
1620 unsigned NewClassMask;
1622 case Instruction::And:
1623 NewClassMask = ClassMask0 & ClassMask1;
1625 case Instruction::Or:
1626 NewClassMask = ClassMask0 | ClassMask1;
1628 case Instruction::Xor:
1629 NewClassMask = ClassMask0 ^ ClassMask1;
1638 1, ConstantInt::get(
II->getArgOperand(1)->getType(), NewClassMask));
1645 1, ConstantInt::get(
II->getArgOperand(1)->getType(), NewClassMask));
1649 CallInst *NewClass =
1650 Builder.CreateIntrinsic(Intrinsic::is_fpclass, {ClassVal0->
getType()},
1651 {ClassVal0,
Builder.getInt32(NewClassMask)});
1665Instruction *InstCombinerImpl::canonicalizeConditionalNegationViaMathToSelect(
1667 assert(
I.getOpcode() == BinaryOperator::Xor &&
"Only for xor!");
1672 !
Cond->getType()->isIntOrIntVectorTy(1) ||
1675 return createSelectInstWithUnknownProfile(
1686 assert((Opcode == Instruction::And || Opcode == Instruction::Or) &&
1687 "Expecting and/or op for fcmp transform");
1706 X->getType() !=
Y->getType())
1710 X->getType() !=
Y->getType())
1727 assert((Opcode == Instruction::And || Opcode == Instruction::Or) &&
1728 "Trying to match De Morgan's Laws with something other than and/or");
1732 (Opcode == Instruction::And) ? Instruction::Or : Instruction::And;
1734 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
1760bool InstCombinerImpl::shouldOptimizeCast(
CastInst *CI) {
1770 if (isEliminableCastPair(PrecedingCI, CI))
1798 auto *ZExt =
new ZExtInst(NewOp, DestTy);
1799 ZExt->setNonNeg(Flags.NNeg);
1800 ZExt->andIRFlags(Cast);
1809 return new SExtInst(NewOp, DestTy);
1819 assert(
I.isBitwiseLogicOp() &&
"Unexpected opcode for bitwise logic folding");
1821 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
1827 auto FoldBitwiseICmpZeroWithICmp = [&](
Value *Op0,
1828 Value *Op1) -> Instruction * {
1843 auto *BitwiseOp =
Builder.CreateBinOp(LogicOpc, ICmpL, ICmpR);
1845 return new ZExtInst(BitwiseOp, Op0->
getType());
1848 if (
auto *Ret = FoldBitwiseICmpZeroWithICmp(Op0, Op1))
1851 if (
auto *Ret = FoldBitwiseICmpZeroWithICmp(Op1, Op0))
1860 Type *DestTy =
I.getType();
1886 unsigned XNumBits =
X->getType()->getScalarSizeInBits();
1887 unsigned YNumBits =
Y->getType()->getScalarSizeInBits();
1888 if (XNumBits != YNumBits) {
1896 if (XNumBits < YNumBits) {
1897 X =
Builder.CreateCast(CastOpcode,
X,
Y->getType());
1898 }
else if (YNumBits < XNumBits) {
1899 Y =
Builder.CreateCast(CastOpcode,
Y,
X->getType());
1904 Value *NarrowLogic =
Builder.CreateBinOp(LogicOpc,
X,
Y,
I.getName());
1907 if (Disjoint && NewDisjoint)
1908 NewDisjoint->setIsDisjoint(Disjoint->isDisjoint());
1920 if (shouldOptimizeCast(Cast0) && shouldOptimizeCast(Cast1)) {
1921 Value *NewOp =
Builder.CreateBinOp(LogicOpc, Cast0Src, Cast1Src,
1931 assert(
I.getOpcode() == Instruction::And);
1932 Value *Op0 =
I.getOperand(0);
1933 Value *Op1 =
I.getOperand(1);
1941 return BinaryOperator::CreateXor(
A,
B);
1957 assert(
I.getOpcode() == Instruction::Or);
1958 Value *Op0 =
I.getOperand(0);
1959 Value *Op1 =
I.getOperand(1);
1984 return BinaryOperator::CreateXor(
A,
B);
2004 Value *Op0 =
And.getOperand(0), *Op1 =
And.getOperand(1);
2025 if (
Opc == Instruction::LShr ||
Opc == Instruction::Shl)
2034 return new ZExtInst(
Builder.CreateAnd(NewBO,
X), Ty);
2042 assert(Opcode == Instruction::And || Opcode == Instruction::Or);
2046 (Opcode == Instruction::And) ? Instruction::Or : Instruction::And;
2048 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
2055 const auto matchNotOrAnd =
2056 [Opcode, FlippedOpcode](
Value *
Op,
auto m_A,
auto m_B,
auto m_C,
2057 Value *&
X,
bool CountUses =
false) ->
bool {
2058 if (CountUses && !
Op->hasOneUse())
2064 return !CountUses ||
X->hasOneUse();
2080 return (Opcode == Instruction::Or)
2081 ? BinaryOperator::CreateAnd(
Xor, Builder.CreateNot(
A))
2090 return (Opcode == Instruction::Or)
2091 ? BinaryOperator::CreateAnd(
Xor, Builder.CreateNot(
B))
2100 Opcode, Builder.CreateBinOp(FlippedOpcode,
B,
C),
A));
2107 Opcode, Builder.CreateBinOp(FlippedOpcode,
A,
C),
B));
2113 if (Opcode == Instruction::Or && Op0->
hasOneUse() &&
2151 return (Opcode == Instruction::Or)
2153 : BinaryOperator::CreateOr(
Xor,
X);
2161 FlippedOpcode, Builder.CreateBinOp(Opcode,
C, Builder.CreateNot(
B)),
2169 FlippedOpcode, Builder.CreateBinOp(Opcode,
B, Builder.CreateNot(
C)),
2189 if (!
X->hasOneUse()) {
2190 Value *YZ = Builder.CreateBinOp(Opcode,
Y, Z);
2194 if (!
Y->hasOneUse()) {
2195 Value *XZ = Builder.CreateBinOp(Opcode,
X, Z);
2215 Type *Ty =
I.getType();
2217 Value *Op0 =
I.getOperand(0);
2218 Value *Op1 =
I.getOperand(1);
2226 unsigned Width = Ty->getScalarSizeInBits();
2230 case Instruction::And:
2231 if (
C->countl_one() < LastOneMath)
2234 case Instruction::Xor:
2235 case Instruction::Or:
2236 if (
C->countl_zero() < LastOneMath)
2243 Value *NewBinOp = Builder.CreateBinOp(OpC,
X, ConstantInt::get(Ty, *
C));
2245 ConstantInt::get(Ty, *C2), Op0);
2252 assert((
I.isBitwiseLogicOp() ||
I.getOpcode() == Instruction::Add) &&
2253 "Unexpected opcode");
2256 Constant *ShiftedC1, *ShiftedC2, *AddC;
2257 Type *Ty =
I.getType();
2273 if (!Op0Inst || !Op1Inst)
2279 if (ShiftOp != Op1Inst->getOpcode())
2283 if (
I.getOpcode() == Instruction::Add && ShiftOp != Instruction::Shl)
2287 I.getOpcode(), ShiftedC1,
Builder.CreateBinOp(ShiftOp, ShiftedC2, AddC));
2303 assert(
I.isBitwiseLogicOp() &&
"Should and/or/xor");
2304 if (!
I.getOperand(0)->hasOneUse())
2311 if (
Y && (!
Y->hasOneUse() ||
X->getIntrinsicID() !=
Y->getIntrinsicID()))
2317 if (!
Y && (!(IID == Intrinsic::bswap || IID == Intrinsic::bitreverse) ||
2322 case Intrinsic::fshl:
2323 case Intrinsic::fshr: {
2324 if (
X->getOperand(2) !=
Y->getOperand(2))
2327 Builder.CreateBinOp(
I.getOpcode(),
X->getOperand(0),
Y->getOperand(0));
2329 Builder.CreateBinOp(
I.getOpcode(),
X->getOperand(1),
Y->getOperand(1));
2334 case Intrinsic::bswap:
2335 case Intrinsic::bitreverse: {
2336 Value *NewOp0 = Builder.CreateBinOp(
2337 I.getOpcode(),
X->getOperand(0),
2338 Y ?
Y->getOperand(0)
2339 : ConstantInt::get(
I.getType(), IID == Intrinsic::bswap
2359 unsigned Depth = 0) {
2367 if (!
I || !
I->isBitwiseLogicOp() ||
Depth >= 3)
2370 if (!
I->hasOneUse())
2371 SimplifyOnly =
true;
2374 SimplifyOnly, IC,
Depth + 1);
2376 SimplifyOnly, IC,
Depth + 1);
2377 if (!NewOp0 && !NewOp1)
2381 NewOp0 =
I->getOperand(0);
2383 NewOp1 =
I->getOperand(1);
2399 bool RHSIsLogical) {
2403 if (
Value *Res = foldBooleanAndOr(
LHS,
X,
I, IsAnd,
false))
2404 return RHSIsLogical ?
Builder.CreateLogicalOp(Opcode, Res,
Y)
2405 :
Builder.CreateBinOp(Opcode, Res,
Y);
2408 if (
Value *Res = foldBooleanAndOr(
LHS,
Y,
I, IsAnd,
false))
2409 return RHSIsLogical ?
Builder.CreateLogicalOp(Opcode,
X, Res)
2410 :
Builder.CreateBinOp(Opcode,
X, Res);
2418 Type *Ty =
I.getType();
2421 SQ.getWithInstruction(&
I)))
2452 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
2461 Value *IsZero =
Builder.CreateICmpEQ(
X, ConstantInt::get(Ty, 0));
2471 return createSelectInstWithUnknownProfile(Cmp,
2481 return BinaryOperator::CreateAnd(
Builder.CreateNot(
X),
Y);
2487 Constant *NewC = ConstantInt::get(Ty, *
C & *XorC);
2490 return BinaryOperator::CreateXor(
And, NewC);
2501 APInt Together = *
C & *OrC;
2504 return BinaryOperator::CreateOr(
And, ConstantInt::get(Ty, Together));
2507 unsigned Width = Ty->getScalarSizeInBits();
2508 const APInt *ShiftC;
2510 ShiftC->
ult(Width)) {
2515 Constant *ShAmtC = ConstantInt::get(Ty, ShiftC->
zext(Width));
2516 return BinaryOperator::CreateLShr(Sext, ShAmtC);
2524 return BinaryOperator::CreateLShr(
X, ConstantInt::get(Ty, *ShiftC));
2532 if (Op0->
hasOneUse() &&
C->isPowerOf2() && (*AddC & (*
C - 1)) == 0) {
2533 assert((*
C & *AddC) != 0 &&
"Expected common bit");
2535 return BinaryOperator::CreateXor(NewAnd, Op1);
2542 switch (
B->getOpcode()) {
2543 case Instruction::Xor:
2544 case Instruction::Or:
2545 case Instruction::Mul:
2546 case Instruction::Add:
2547 case Instruction::Sub:
2563 C->isIntN(
X->getType()->getScalarSizeInBits())) {
2564 unsigned XWidth =
X->getType()->getScalarSizeInBits();
2565 Constant *TruncC1 = ConstantInt::get(
X->getType(), C1->
trunc(XWidth));
2567 ?
Builder.CreateBinOp(BOpcode,
X, TruncC1)
2568 :
Builder.CreateBinOp(BOpcode, TruncC1,
X);
2569 Constant *TruncC = ConstantInt::get(
X->getType(),
C->trunc(XWidth));
2579 C->isMask(
X->getType()->getScalarSizeInBits())) {
2581 Value *TrY =
Builder.CreateTrunc(
Y,
X->getType(),
Y->getName() +
".tr");
2589 C->isMask(
X->getType()->getScalarSizeInBits())) {
2591 Value *TrY =
Builder.CreateTrunc(
Y,
X->getType(),
Y->getName() +
".tr");
2608 Value *NewRHS =
Builder.CreateAnd(
Y, Op1,
Y->getName() +
".masked");
2614 Value *NewLHS =
Builder.CreateAnd(
X, Op1,
X->getName() +
".masked");
2623 if (
C->isPowerOf2() &&
2626 int Log2C =
C->exactLogBase2();
2629 int BitNum = IsShiftLeft ? Log2C - Log2ShiftC : Log2ShiftC - Log2C;
2630 assert(BitNum >= 0 &&
"Expected demanded bits to handle impossible mask");
2631 Value *Cmp =
Builder.CreateICmpEQ(
X, ConstantInt::get(Ty, BitNum));
2632 return createSelectInstWithUnknownProfile(Cmp, ConstantInt::get(Ty, *
C),
2652 return createSelectInstWithUnknownProfile(
2663 if (Cmp && Cmp->isZeroValue()) {
2669 return createSelectInstWithUnknownProfile(
2687 !
Builder.GetInsertBlock()->getParent()->hasFnAttribute(
2688 Attribute::NoImplicitFloat)) {
2692 Value *FAbs =
Builder.CreateUnaryIntrinsic(Intrinsic::fabs, CastOp);
2703 APInt(Ty->getScalarSizeInBits(),
2704 Ty->getScalarSizeInBits() -
2705 X->getType()->getScalarSizeInBits())))) {
2706 auto *SExt =
Builder.CreateSExt(
X, Ty,
X->getName() +
".signext");
2707 return BinaryOperator::CreateAnd(SExt, Op1);
2713 if (
I.getType()->isIntOrIntVectorTy(1)) {
2716 foldAndOrOfSelectUsingImpliedCond(Op1, *SI0,
true))
2721 foldAndOrOfSelectUsingImpliedCond(Op0, *SI1,
true))
2736 return BinaryOperator::CreateAnd(Op0,
B);
2739 return BinaryOperator::CreateAnd(Op1,
B);
2747 if (NotC !=
nullptr)
2748 return BinaryOperator::CreateAnd(Op0, NotC);
2757 if (NotC !=
nullptr)
2758 return BinaryOperator::CreateAnd(Op1,
Builder.CreateNot(
C));
2767 return BinaryOperator::CreateAnd(
A,
B);
2775 return BinaryOperator::CreateAnd(
A,
B);
2783 return BinaryOperator::CreateAnd(
Builder.CreateNot(
A),
B);
2791 return BinaryOperator::CreateAnd(
Builder.CreateNot(
A),
B);
2795 foldBooleanAndOr(Op0, Op1,
I,
true,
false))
2800 if (
auto *V = reassociateBooleanAndOr(Op0,
X,
Y,
I,
true,
2806 if (
auto *V = reassociateBooleanAndOr(Op1,
X,
Y,
I,
true,
2814 if (
Instruction *CastedAnd = foldCastedBitwiseLogic(
I))
2827 A->getType()->isIntOrIntVectorTy(1))
2833 A->getType()->isIntOrIntVectorTy(1))
2838 A->getType()->isIntOrIntVectorTy(1))
2839 return createSelectInstWithUnknownProfile(
2840 A,
Builder.CreateAnd(
B, ConstantInt::get(Ty, 1)),
2846 if (
A->getType()->isIntOrIntVectorTy(1))
2850 return createSelectInstWithUnknownProfile(
2860 *
C ==
X->getType()->getScalarSizeInBits() - 1) {
2862 return createSelectInstWithUnknownProfile(IsNeg,
Y,
2870 *
C ==
X->getType()->getScalarSizeInBits() - 1) {
2872 return createSelectInstWithUnknownProfile(IsNeg,
2882 Value *Start =
nullptr, *Step =
nullptr;
2890 return Canonicalized;
2892 if (
Instruction *Folded = foldLogicOfIsFPClass(
I, Op0, Op1))
2904 return BinaryOperator::CreateAnd(V, Op1);
2908 return BinaryOperator::CreateAnd(Op0, V);
2915 bool MatchBitReversals) {
2923 for (
auto *Inst : Insts) {
2924 Inst->setDebugLoc(
I.getDebugLoc());
2930std::optional<std::pair<Intrinsic::ID, SmallVector<Value *, 3>>>
2934 assert(
Or.getOpcode() == BinaryOperator::Or &&
"Expecting or instruction");
2936 unsigned Width =
Or.getType()->getScalarSizeInBits();
2941 return std::nullopt;
2949 Value *ShVal0, *ShVal1, *ShAmt0, *ShAmt1;
2955 return std::nullopt;
2958 if (Or0->
getOpcode() == BinaryOperator::LShr) {
2964 Or1->
getOpcode() == BinaryOperator::LShr &&
2965 "Illegal or(shift,shift) pair");
2969 auto matchShiftAmount = [&](
Value *L,
Value *R,
unsigned Width) ->
Value * {
2971 const APInt *LI, *RI;
2973 if (LI->
ult(Width) && RI->
ult(Width) && (*LI + *RI) == Width)
2974 return ConstantInt::get(L->getType(), *LI);
2998 if (ShVal0 != ShVal1)
3009 unsigned Mask = Width - 1;
3033 Value *ShAmt = matchShiftAmount(ShAmt0, ShAmt1, Width);
3035 ShAmt = matchShiftAmount(ShAmt1, ShAmt0, Width);
3039 return std::nullopt;
3041 FShiftArgs = {ShVal0, ShVal1, ShAmt};
3058 const APInt *ZextHighShlAmt;
3061 return std::nullopt;
3065 return std::nullopt;
3067 unsigned HighSize =
High->getType()->getScalarSizeInBits();
3068 unsigned LowSize =
Low->getType()->getScalarSizeInBits();
3071 if (ZextHighShlAmt->
ult(LowSize) || ZextHighShlAmt->
ugt(Width - HighSize))
3072 return std::nullopt;
3082 const APInt *ZextLowShlAmt;
3089 if (*ZextLowShlAmt + *ZextHighShlAmt != Width)
3095 ZextLowShlAmt->
ule(Width - LowSize) &&
"Invalid concat");
3104 FShiftArgs = {U, U, ConstantInt::get(Or0->
getType(), *ZextHighShlAmt)};
3109 if (FShiftArgs.
empty())
3110 return std::nullopt;
3112 Intrinsic::ID IID = IsFshl ? Intrinsic::fshl : Intrinsic::fshr;
3113 return std::make_pair(IID, FShiftArgs);
3119 auto [IID, FShiftArgs] = *Opt;
3130 assert(
Or.getOpcode() == Instruction::Or &&
"bswap requires an 'or'");
3131 Value *Op0 =
Or.getOperand(0), *Op1 =
Or.getOperand(1);
3134 unsigned Width = Ty->getScalarSizeInBits();
3135 if ((Width & 1) != 0)
3137 unsigned HalfWidth = Width / 2;
3144 Value *LowerSrc, *ShlVal, *UpperSrc;
3155 Value *NewLower = Builder.CreateZExt(
Lo, Ty);
3156 Value *NewUpper = Builder.CreateZExt(
Hi, Ty);
3157 NewUpper = Builder.CreateShl(NewUpper, HalfWidth);
3158 Value *BinOp = Builder.CreateOr(NewLower, NewUpper);
3159 return Builder.CreateIntrinsic(
id, Ty, BinOp);
3164 Value *LowerBSwap, *UpperBSwap;
3167 return ConcatIntrinsicCalls(Intrinsic::bswap, UpperBSwap, LowerBSwap);
3171 Value *LowerBRev, *UpperBRev;
3174 return ConcatIntrinsicCalls(Intrinsic::bitreverse, UpperBRev, LowerBRev);
3186 return Builder.CreateSExt(
X, Ty);
3194 for (
unsigned i = 0; i != NumElts; ++i) {
3197 if (!EltC1 || !EltC2)
3216 Type *Ty =
A->getType();
3232 if (
A->getType()->isIntOrIntVectorTy()) {
3234 if (NumSignBits ==
A->getType()->getScalarSizeInBits() &&
3257 Cond->getType()->isIntOrIntVectorTy(1)) {
3283 Cond->getType()->isIntOrIntVectorTy(1) &&
3297 Value *
D,
bool InvertFalseVal) {
3303 if (
Value *
Cond = getSelectCondition(
A,
C, InvertFalseVal)) {
3308 Type *SelTy =
A->getType();
3311 unsigned Elts = VecTy->getElementCount().getKnownMinValue();
3315 Type *EltTy =
Builder.getIntNTy(SelEltSize / Elts);
3332 bool IsAnd,
bool IsLogical,
3339 IsAnd ?
LHS->getInversePredicate() :
LHS->getPredicate();
3341 IsAnd ?
RHS->getInversePredicate() :
RHS->getPredicate();
3347 !(
LHS->hasOneUse() ||
RHS->hasOneUse()))
3350 auto MatchRHSOp = [LHS0, CInt](
const Value *RHSOp) {
3353 (CInt->
isZero() && RHSOp == LHS0);
3367 return Builder.CreateICmp(
3369 Builder.CreateSub(LHS0, ConstantInt::get(LHS0->
getType(), *CInt + 1)),
3379 const SimplifyQuery Q =
SQ.getWithInstruction(&
I);
3382 Value *LHS0 =
LHS->getOperand(0), *RHS0 =
RHS->getOperand(0);
3383 Value *LHS1 =
LHS->getOperand(1), *RHS1 =
RHS->getOperand(1);
3385 const APInt *LHSC =
nullptr, *RHSC =
nullptr;
3392 if (LHS0 == RHS1 && LHS1 == RHS0) {
3396 if (LHS0 == RHS0 && LHS1 == RHS1) {
3399 bool IsSigned =
LHS->isSigned() ||
RHS->isSigned();
3422 RHS->setSameSign(
false);
3448 if (IsAnd && !IsLogical)
3474 return Builder.CreateICmp(PredL, NewOr,
3485 return Builder.CreateICmp(PredL, NewAnd,
3505 const APInt *AndC, *SmallC =
nullptr, *BigC =
nullptr;
3519 if (SmallC && BigC) {
3520 unsigned BigBitSize = BigC->getBitWidth();
3527 APInt
N = SmallC->
zext(BigBitSize) | *BigC;
3529 return Builder.CreateICmp(PredL, NewAnd, NewVal);
3539 bool TrueIfSignedL, TrueIfSignedR;
3545 if ((TrueIfSignedL && !TrueIfSignedR &&
3548 (!TrueIfSignedL && TrueIfSignedR &&
3552 return Builder.CreateIsNeg(NewXor);
3555 if ((TrueIfSignedL && !TrueIfSignedR &&
3558 (!TrueIfSignedL && TrueIfSignedR &&
3562 return Builder.CreateIsNotNeg(NewXor);
3571 if (LHS0 == RHS0 && PredL == PredR &&
3573 !
I.getFunction()->hasFnAttribute(Attribute::NoImplicitFloat) &&
3576 X->getType()->getScalarType()->isIEEELikeFPTy() &&
3577 APFloat(
X->getType()->getScalarType()->getFltSemantics(), *MaskC)
3579 ((LHSC->
isZero() && *RHSC == *MaskC) ||
3580 (RHSC->
isZero() && *LHSC == *MaskC)))
3584 return foldAndOrOfICmpsUsingRanges(
LHS,
RHS, IsAnd);
3599 SQ.getWithInstruction(&
I)))
3604 if (
Value *Res = foldAndOrOfICmps(LHSCmp, RHSCmp,
I, IsAnd, IsLogical))
3609 if (
Value *Res = foldLogicOfFCmps(LHSCmp, RHSCmp, IsAnd, IsLogical))
3620 assert(
I.getOpcode() == Instruction::Or &&
3621 "Simplification only supports or at the moment.");
3623 Value *Cmp1, *Cmp2, *Cmp3, *Cmp4;
3630 return Builder.CreateXor(Cmp1, Cmp4);
3632 return Builder.CreateXor(Cmp1, Cmp3);
3662 const unsigned EltBitWidth = EltTy->getBitWidth();
3664 if (TargetBitWidth % EltBitWidth != 0 || ShlAmt % EltBitWidth != 0)
3666 const unsigned TargetEltWidth = TargetBitWidth / EltBitWidth;
3667 const unsigned ShlEltAmt = ShlAmt / EltBitWidth;
3669 const unsigned MaskIdx =
3670 DL.isLittleEndian() ? ShlEltAmt : TargetEltWidth - ShlEltAmt - 1;
3672 VecOffset =
static_cast<int64_t
>(VecIdx) -
static_cast<int64_t
>(MaskIdx);
3673 Mask.resize(TargetEltWidth);
3687 Mask.resize(SrcTy->getNumElements());
3701 const unsigned NumVecElts = VecTy->getNumElements();
3702 bool FoundVecOffset =
false;
3703 for (
unsigned Idx = 0; Idx < ShuffleMask.size(); ++Idx) {
3706 const unsigned ShuffleIdx = ShuffleMask[Idx];
3707 if (ShuffleIdx >= NumVecElts) {
3708 const unsigned ConstIdx = ShuffleIdx - NumVecElts;
3711 if (!ConstElt || !ConstElt->isNullValue())
3716 if (FoundVecOffset) {
3717 if (VecOffset + Idx != ShuffleIdx)
3720 if (ShuffleIdx < Idx)
3722 VecOffset = ShuffleIdx - Idx;
3723 FoundVecOffset =
true;
3727 return FoundVecOffset;
3740 bool AlreadyInsertedMaskedElt = Mask.test(InsertIdx);
3742 if (!AlreadyInsertedMaskedElt)
3743 Mask.reset(InsertIdx);
3752 assert(
I.getOpcode() == Instruction::Or);
3753 Value *LhsVec, *RhsVec;
3754 int64_t LhsVecOffset, RhsVecOffset;
3762 if (LhsVec != RhsVec || LhsVecOffset != RhsVecOffset)
3766 const unsigned ZeroVecIdx =
3769 for (
unsigned Idx : Mask.set_bits()) {
3770 assert(LhsVecOffset + Idx >= 0);
3771 ShuffleMask[Idx] = LhsVecOffset + Idx;
3774 Value *MaskedVec = Builder.CreateShuffleVector(
3776 I.getName() +
".v");
3802 const APInt *ShiftedMaskConst =
nullptr;
3809 if (!
match(MaskedOp0,
3814 if (LShrAmt > ShlAmt)
3816 Offset = ShlAmt - LShrAmt;
3818 Mask = ShiftedMaskConst ? ShiftedMaskConst->
shl(LShrAmt)
3820 Int->getType()->getScalarSizeInBits(), LShrAmt);
3830 Value *LhsInt, *RhsInt;
3831 APInt LhsMask, RhsMask;
3833 bool IsLhsShlNUW, IsLhsShlNSW, IsRhsShlNUW, IsRhsShlNSW;
3840 if (LhsInt != RhsInt || LhsOffset != RhsOffset)
3843 APInt Mask = LhsMask | RhsMask;
3846 Value *Res = Builder.CreateShl(
3848 Builder.CreateAnd(LhsInt, Mask, LhsInt->
getName() +
".mask"), DestTy,
3850 ConstantInt::get(DestTy, LhsOffset),
"", IsLhsShlNUW && IsRhsShlNUW,
3851 IsLhsShlNSW && IsRhsShlNSW);
3876 return std::nullopt;
3879 Value *Original =
nullptr;
3880 const APInt *Mask =
nullptr;
3881 const APInt *MulConst =
nullptr;
3884 if (MulConst->
isZero() || Mask->isZero())
3885 return std::nullopt;
3887 return std::optional<DecomposedBitMaskMul>(
3888 {Original, *MulConst, *Mask,
3894 const APInt *EqZero =
nullptr, *NeZero =
nullptr;
3898 auto ICmpDecompose =
3901 if (!ICmpDecompose.has_value())
3902 return std::nullopt;
3905 ICmpDecompose->C.isZero());
3910 if (!EqZero->
isZero() || NeZero->isZero())
3911 return std::nullopt;
3913 if (!ICmpDecompose->Mask.isPowerOf2() || ICmpDecompose->Mask.isZero() ||
3914 NeZero->getBitWidth() != ICmpDecompose->Mask.getBitWidth())
3915 return std::nullopt;
3917 if (!NeZero->urem(ICmpDecompose->Mask).isZero())
3918 return std::nullopt;
3920 return std::optional<DecomposedBitMaskMul>(
3921 {ICmpDecompose->X, NeZero->udiv(ICmpDecompose->Mask),
3922 ICmpDecompose->Mask,
false,
false});
3925 return std::nullopt;
3941 if (Decomp0->isCombineableWith(*Decomp1)) {
3942 Value *NewAnd = Builder.CreateAnd(
3944 ConstantInt::get(Decomp0->X->getType(), Decomp0->Mask + Decomp1->Mask));
3946 return Builder.CreateMul(
3947 NewAnd, ConstantInt::get(NewAnd->
getType(), Decomp1->Factor),
"",
3948 Decomp0->NUW && Decomp1->NUW, Decomp0->NSW && Decomp1->NSW);
3967 if (
Value *Res = foldDisjointOr(
LHS,
X))
3968 return Builder.CreateOr(Res,
Y,
"",
true);
3969 if (
Value *Res = foldDisjointOr(
LHS,
Y))
3970 return Builder.CreateOr(Res,
X,
"",
true);
3974 if (
Value *Res = foldDisjointOr(
X,
RHS))
3975 return Builder.CreateOr(Res,
Y,
"",
true);
3976 if (
Value *Res = foldDisjointOr(
Y,
RHS))
3977 return Builder.CreateOr(Res,
X,
"",
true);
3991 const APInt *C1, *C2;
4000 Constant *NewC = ConstantInt::get(
X->getType(), C2->
udiv(*C1));
4021 return Builder.CreateBinaryIntrinsic(Intrinsic::abs,
X,
4022 Builder.getFalse());
4032 SQ.getWithInstruction(&
I)))
4068 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
4069 Type *Ty =
I.getType();
4070 if (Ty->isIntOrIntVectorTy(1)) {
4073 foldAndOrOfSelectUsingImpliedCond(Op1, *SI0,
false))
4078 foldAndOrOfSelectUsingImpliedCond(Op0, *SI1,
false))
4115 if (
Value *Res = foldDisjointOr(
I.getOperand(0),
I.getOperand(1)))
4118 if (
Value *Res = reassociateDisjointOr(
I.getOperand(0),
I.getOperand(1)))
4129 return BinaryOperator::CreateXor(
Or, ConstantInt::get(Ty, *CV));
4136 Value *IncrementY =
Builder.CreateAdd(
Y, ConstantInt::get(Ty, 1));
4137 return BinaryOperator::CreateMul(
X, IncrementY);
4146 const APInt *C0, *C1;
4152 return BinaryOperator::CreateOr(
Builder.CreateAnd(
X, *C0),
B);
4155 return BinaryOperator::CreateOr(
Builder.CreateAnd(
X, *C1),
A);
4159 return BinaryOperator::CreateXor(
Builder.CreateAnd(
X, *C0),
B);
4162 return BinaryOperator::CreateXor(
Builder.CreateAnd(
X, *C1),
A);
4165 if ((*C0 & *C1).
isZero()) {
4170 Constant *C01 = ConstantInt::get(Ty, *C0 | *C1);
4171 return BinaryOperator::CreateAnd(
A, C01);
4177 Constant *C01 = ConstantInt::get(Ty, *C0 | *C1);
4178 return BinaryOperator::CreateAnd(
B, C01);
4182 const APInt *C2, *C3;
4187 Constant *C01 = ConstantInt::get(Ty, *C0 | *C1);
4188 return BinaryOperator::CreateAnd(
Or, C01);
4198 if (
Value *V = matchSelectFromAndOr(
A,
C,
B,
D))
4200 if (
Value *V = matchSelectFromAndOr(
A,
C,
D,
B))
4202 if (
Value *V = matchSelectFromAndOr(
C,
A,
B,
D))
4204 if (
Value *V = matchSelectFromAndOr(
C,
A,
D,
B))
4206 if (
Value *V = matchSelectFromAndOr(
B,
D,
A,
C))
4208 if (
Value *V = matchSelectFromAndOr(
B,
D,
C,
A))
4210 if (
Value *V = matchSelectFromAndOr(
D,
B,
A,
C))
4212 if (
Value *V = matchSelectFromAndOr(
D,
B,
C,
A))
4221 if (
Value *V = matchSelectFromAndOr(
A,
C,
B,
D,
true))
4223 if (
Value *V = matchSelectFromAndOr(
A,
C,
D,
B,
true))
4225 if (
Value *V = matchSelectFromAndOr(
C,
A,
B,
D,
true))
4227 if (
Value *V = matchSelectFromAndOr(
C,
A,
D,
B,
true))
4236 return BinaryOperator::CreateOr(Op0,
C);
4243 return BinaryOperator::CreateOr(Op1,
C);
4249 bool SwappedForXor =
false;
4252 SwappedForXor =
true;
4259 return BinaryOperator::CreateOr(Op0,
B);
4261 return BinaryOperator::CreateOr(Op0,
A);
4266 return BinaryOperator::CreateOr(
A,
B);
4294 return BinaryOperator::CreateOr(Nand,
C);
4302 foldBooleanAndOr(Op0, Op1,
I,
false,
false))
4307 if (
auto *V = reassociateBooleanAndOr(Op0,
X,
Y,
I,
false,
4313 if (
auto *V = reassociateBooleanAndOr(Op1,
X,
Y,
I,
false,
4333 A->getType()->isIntOrIntVectorTy(1))
4334 return createSelectInstWithUnknownProfile(
4356 return IsDisjointOuter && IsDisjointInner
4357 ? BinaryOperator::CreateDisjointOr(Inner, CI)
4358 : BinaryOperator::CreateOr(Inner, CI);
4365 Value *
X =
nullptr, *
Y =
nullptr;
4384 return createSelectInstWithUnknownProfile(NewICmpInst,
AllOnes,
X);
4397 return BinaryOperator::CreateXor(
A,
B);
4413 Value *
Mul, *Ov, *MulIsNotZero, *UMulWithOv;
4431 return BinaryOperator::CreateAnd(NotNullA, NotNullB);
4440 const APInt *C1, *C2;
4455 : C2->
uadd_ov(*C1, Overflow));
4459 return BinaryOperator::CreateOr(Ov, NewCmp);
4478 ConstantInt::get(Ty, Ty->getScalarSizeInBits() - 1),
X);
4484 Value *Start =
nullptr, *Step =
nullptr;
4502 return BinaryOperator::CreateOr(
4514 return BinaryOperator::CreateOr(
4522 return Canonicalized;
4524 if (
Instruction *Folded = foldLogicOfIsFPClass(
I, Op0, Op1))
4544 !
Builder.GetInsertBlock()->getParent()->hasFnAttribute(
4545 Attribute::NoImplicitFloat)) {
4549 Value *FAbs =
Builder.CreateUnaryIntrinsic(Intrinsic::fabs, CastOp);
4559 if ((KnownX.
One & *C2) == *C2)
4560 return BinaryOperator::CreateAnd(
X, ConstantInt::get(Ty, *C1 | *C2));
4569 return BinaryOperator::CreateOr(V, Op1);
4573 return BinaryOperator::CreateOr(Op0, V);
4589 assert(
I.getOpcode() == Instruction::Xor);
4590 Value *Op0 =
I.getOperand(0);
4591 Value *Op1 =
I.getOperand(1);
4602 return BinaryOperator::CreateXor(
A,
B);
4610 return BinaryOperator::CreateXor(
A,
B);
4618 return BinaryOperator::CreateXor(
A,
B);
4640 assert(
I.getOpcode() == Instruction::Xor &&
I.getOperand(0) ==
LHS &&
4641 I.getOperand(1) ==
RHS &&
"Should be 'xor' with these operands");
4644 Value *LHS0 =
LHS->getOperand(0), *LHS1 =
LHS->getOperand(1);
4645 Value *RHS0 =
RHS->getOperand(0), *RHS1 =
RHS->getOperand(1);
4648 if (LHS0 == RHS1 && LHS1 == RHS0) {
4652 if (LHS0 == RHS0 && LHS1 == RHS1) {
4655 bool IsSigned =
LHS->isSigned() ||
RHS->isSigned();
4660 const APInt *LC, *RC;
4669 bool TrueIfSignedL, TrueIfSignedR;
4674 return TrueIfSignedL == TrueIfSignedR ?
Builder.CreateIsNeg(XorLR) :
4675 Builder.CreateIsNotNeg(XorLR);
4685 if (CRUnion && CRIntersect)
4686 if (
auto CR = CRUnion->exactIntersectWith(CRIntersect->inverse())) {
4687 if (CR->isFullSet())
4689 if (CR->isEmptySet())
4694 CR->getEquivalentICmp(NewPred, NewC,
Offset);
4701 NewV =
Builder.CreateAdd(NewV, ConstantInt::get(Ty,
Offset));
4702 return Builder.CreateICmp(NewPred, NewV,
4703 ConstantInt::get(Ty, NewC));
4735 ICmpInst *
X =
nullptr, *
Y =
nullptr;
4736 if (OrICmp ==
LHS && AndICmp ==
RHS) {
4741 if (OrICmp ==
RHS && AndICmp ==
LHS) {
4748 Y->setPredicate(
Y->getInversePredicate());
4750 if (!
Y->hasOneUse()) {
4757 Builder.SetInsertPoint(
Y->getParent(), ++(
Y->getIterator()));
4761 Y->replaceUsesWithIf(NotY,
4762 [NotY](Use &U) {
return U.getUser() != NotY; });
4800 Value *NewA = Builder.CreateAnd(
D, NotM);
4801 return BinaryOperator::CreateXor(NewA,
X);
4807 Type *EltTy =
C->getType()->getScalarType();
4811 Value *NotC = Builder.CreateNot(
C);
4812 Value *
RHS = Builder.CreateAnd(
B, NotC);
4813 return BinaryOperator::CreateOr(
LHS,
RHS);
4828 return A ==
C ||
A ==
D ||
B ==
C ||
B ==
D;
4836 Value *NotY = Builder.CreateNot(
Y);
4837 return BinaryOperator::CreateOr(
X, NotY);
4844 Value *NotX = Builder.CreateNot(
X);
4845 return BinaryOperator::CreateOr(
Y, NotX);
4855 assert(
Xor.getOpcode() == Instruction::Xor &&
"Expected an xor instruction.");
4861 Value *Op0 =
Xor.getOperand(0), *Op1 =
Xor.getOperand(1);
4869 Op1->
hasNUses(2) && *ShAmt == Ty->getScalarSizeInBits() - 1 &&
4874 Value *IsNeg = Builder.CreateIsNeg(
A);
4877 Value *NegA =
Add->hasNoUnsignedWrap()
4879 : Builder.CreateNeg(
A,
"",
Add->hasNoSignedWrap());
4897 Op->replaceUsesWithIf(NotOp,
4898 [NotOp](
Use &U) {
return U.getUser() != NotOp; });
4939 Builder.SetInsertPoint(*
I.getInsertionPointAfterDef());
4942 NewLogicOp =
Builder.CreateBinOp(NewOpc, Op0, Op1,
I.getName() +
".not");
4945 Builder.CreateLogicalOp(NewOpc, Op0, Op1,
I.getName() +
".not");
4968 Value *NotOp0 =
nullptr;
4969 Value *NotOp1 =
nullptr;
4970 Value **OpToInvert =
nullptr;
4987 Builder.SetInsertPoint(*
I.getInsertionPointAfterDef());
4990 NewBinOp =
Builder.CreateBinOp(NewOpc, Op0, Op1,
I.getName() +
".not");
4992 NewBinOp =
Builder.CreateLogicalOp(NewOpc, Op0, Op1,
I.getName() +
".not");
5015 Type *Ty =
I.getType();
5018 Value *NotY = Builder.CreateNot(
Y,
Y->getName() +
".not");
5019 return BinaryOperator::CreateOr(
X, NotY);
5022 Value *NotY = Builder.CreateNot(
Y,
Y->getName() +
".not");
5030 return BinaryOperator::CreateAnd(
X, NotY);
5038 BinaryOperator *NotVal;
5045 return BinaryOperator::CreateAnd(DecX, NotY);
5050 return BinaryOperator::CreateAShr(
X,
Y);
5056 return BinaryOperator::CreateAShr(
X,
Y);
5063 return new SExtInst(IsNotNeg, Ty);
5090 return BinaryOperator::CreateAdd(
Builder.CreateNot(
X),
Y);
5113 return new BitCastInst(
X, Ty);
5119 X->getType()->isIntOrIntVectorTy(1)) {
5123 return new BitCastInst(Sext, Ty);
5134 if (
II &&
II->hasOneUse()) {
5138 Value *InvMaxMin =
Builder.CreateBinaryIntrinsic(InvID,
X, NotY);
5142 if (
II->getIntrinsicID() == Intrinsic::is_fpclass) {
5145 1, ConstantInt::get(ClassMask->
getType(),
5161 Value *TV = Sel->getTrueValue();
5162 Value *FV = Sel->getFalseValue();
5165 bool InvertibleT = (CmpT && CmpT->hasOneUse()) ||
isa<Constant>(TV);
5166 bool InvertibleF = (CmpF && CmpF->hasOneUse()) ||
isa<Constant>(FV);
5167 if (InvertibleT && InvertibleF) {
5169 CmpT->setPredicate(CmpT->getInversePredicate());
5173 CmpF->setPredicate(CmpF->getInversePredicate());
5197 SQ.getWithInstruction(&
I)))
5227 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
5235 return BinaryOperator::CreateXor(XorAC,
Y);
5238 return BinaryOperator::CreateXor(XorBC,
X);
5248 return BinaryOperator::CreateDisjointOr(Op0, Op1);
5250 return BinaryOperator::CreateOr(Op0, Op1);
5267 return BinaryOperator::CreateXor(
5290 *CA ==
X->getType()->getScalarSizeInBits() - 1 &&
5294 return createSelectInstWithUnknownProfile(IsNotNeg, Op1,
5299 Type *Ty =
I.getType();
5307 return BinaryOperator::CreateSub(ConstantInt::get(Ty, *
C + *RHSC),
X);
5311 return BinaryOperator::CreateAdd(
X, ConstantInt::get(Ty, *
C + *RHSC));
5316 return BinaryOperator::CreateXor(
X, ConstantInt::get(Ty, *
C ^ *RHSC));
5322 if (
II &&
II->hasOneUse() && *RHSC == Ty->getScalarSizeInBits() - 1) {
5324 if ((IID == Intrinsic::ctlz || IID == Intrinsic::cttz) &&
5327 IID = (IID == Intrinsic::ctlz) ? Intrinsic::cttz : Intrinsic::ctlz;
5340 return BinaryOperator::CreateShl(NotX, ConstantInt::get(Ty, *
C));
5346 return BinaryOperator::CreateLShr(NotX, ConstantInt::get(Ty, *
C));
5364 !
Builder.GetInsertBlock()->getParent()->hasFnAttribute(
5365 Attribute::NoImplicitFloat)) {
5388 auto *Opnd0 =
Builder.CreateLShr(
X, C2);
5389 Opnd0->takeName(Op0);
5390 return BinaryOperator::CreateXor(Opnd0, ConstantInt::get(Ty, FoldConst));
5403 return BinaryOperator::CreateAnd(
X,
Builder.CreateNot(Op0));
5407 return BinaryOperator::CreateAnd(
X,
Builder.CreateNot(Op1));
5412 return BinaryOperator::CreateAnd(Op0,
Builder.CreateNot(
X));
5420 return BinaryOperator::CreateAnd(Op1,
Builder.CreateNot(
X));
5426 return BinaryOperator::CreateXor(
5432 return BinaryOperator::CreateXor(
5438 return BinaryOperator::CreateOr(
A,
B);
5442 return BinaryOperator::CreateOr(
A,
B);
5452 return BinaryOperator::CreateOr(
A,
B);
5467 if (
B ==
C ||
B ==
D)
5473 return BinaryOperator::CreateAnd(
Builder.CreateXor(
B,
C), NotA);
5478 if (
I.getType()->isIntOrIntVectorTy(1) &&
5483 if (
B ==
C ||
B ==
D) {
5494 ? createSelectInstWithUnknownProfile(
A, NotB,
C)
5501 if (
Value *V = foldXorOfICmps(LHS, RHS,
I))
5504 if (
Instruction *CastedXor = foldCastedBitwiseLogic(
I))
5517 return BinaryOperator::CreateXor(
Builder.CreateXor(
X,
Y), C1);
5523 return Canonicalized;
5525 if (
Instruction *Folded = foldLogicOfIsFPClass(
I, Op0, Op1))
5528 if (
Instruction *Folded = canonicalizeConditionalNegationViaMathToSelect(
I))
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
AMDGPU Register Bank Select
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static Value * foldAndOrOfICmpsWithConstEq(ICmpInst *Cmp0, ICmpInst *Cmp1, bool IsAnd, bool IsLogical, InstCombiner::BuilderTy &Builder, const SimplifyQuery &Q, Instruction &I)
Reduce logic-of-compares with equality to a constant by substituting a common operand with the consta...
static Value * foldIsPowerOf2OrZero(ICmpInst *Cmp0, ICmpInst *Cmp1, bool IsAnd, InstCombiner::BuilderTy &Builder, InstCombinerImpl &IC)
Fold (icmp eq ctpop(X) 1) | (icmp eq X 0) into (icmp ult ctpop(X) 2) and fold (icmp ne ctpop(X) 1) & ...
static Value * foldBitmaskMul(Value *Op0, Value *Op1, InstCombiner::BuilderTy &Builder)
(A & N) * C + (A & M) * C -> (A & (N + M)) & C This also accepts the equivalent select form of (A & N...
static unsigned conjugateICmpMask(unsigned Mask)
Convert an analysis of a masked ICmp into its equivalent if all boolean operations had the opposite s...
static Instruction * foldNotXor(BinaryOperator &I, InstCombiner::BuilderTy &Builder)
static Value * foldLogOpOfMaskedICmps(Value *LHS, Value *RHS, bool IsAnd, bool IsLogical, InstCombiner::BuilderTy &Builder, const SimplifyQuery &Q)
Try to fold (icmp(A & B) ==/!= C) &/| (icmp(A & D) ==/!= E) into a single (icmp(A & X) ==/!...
static Value * getFCmpValue(unsigned Code, Value *LHS, Value *RHS, InstCombiner::BuilderTy &Builder, FMFSource FMF)
This is the complement of getFCmpCode, which turns an opcode and two operands into either a FCmp inst...
static bool matchIsFPClassLikeFCmp(Value *Op, Value *&ClassVal, uint64_t &ClassMask)
Match an fcmp against a special value that performs a test possible by llvm.is.fpclass.
static Value * foldSignedTruncationCheck(ICmpInst *ICmp0, ICmpInst *ICmp1, Instruction &CxtI, InstCombiner::BuilderTy &Builder)
General pattern: X & Y.
static Instruction * visitMaskedMerge(BinaryOperator &I, InstCombiner::BuilderTy &Builder)
If we have a masked merge, in the canonical form of: (assuming that A only has one use....
static Instruction * canonicalizeAbs(BinaryOperator &Xor, InstCombiner::BuilderTy &Builder)
Canonicalize a shifty way to code absolute value to the more common pattern that uses negation and se...
static Value * foldIsPowerOf2(ICmpInst *Cmp0, ICmpInst *Cmp1, bool JoinedByAnd, InstCombiner::BuilderTy &Builder, InstCombinerImpl &IC)
Reduce a pair of compares that check if a value has exactly 1 bit set.
static Value * foldUnsignedUnderflowCheck(ICmpInst *ZeroICmp, ICmpInst *UnsignedICmp, bool IsAnd, const SimplifyQuery &Q, InstCombiner::BuilderTy &Builder)
Commuted variants are assumed to be handled by calling this function again with the parameters swappe...
static Instruction * foldOrToXor(BinaryOperator &I, InstCombiner::BuilderTy &Builder)
static Value * simplifyAndOrWithOpReplaced(Value *V, Value *Op, Value *RepOp, bool SimplifyOnly, InstCombinerImpl &IC, unsigned Depth=0)
static Instruction * matchDeMorgansLaws(BinaryOperator &I, InstCombiner &IC)
Match variations of De Morgan's Laws: (~A & ~B) == (~(A | B)) (~A | ~B) == (~(A & B))
static Value * foldLogOpOfMaskedICmpsAsymmetric(Value *LHS, Value *RHS, bool IsAnd, Value *A, Value *B, Value *C, Value *D, Value *E, ICmpInst::Predicate PredL, ICmpInst::Predicate PredR, unsigned LHSMask, unsigned RHSMask, InstCombiner::BuilderTy &Builder)
Try to fold (icmp(A & B) ==/!= 0) &/| (icmp(A & D) ==/!= E) into a single (icmp(A & X) ==/!...
static Value * FoldOrOfSelectSmaxToAbs(BinaryOperator &I, InstCombiner::BuilderTy &Builder)
Fold select(X >s 0, 0, -X) | smax(X, 0) --> abs(X) select(X <s 0, -X, 0) | smax(X,...
static Instruction * foldAndToXor(BinaryOperator &I, InstCombiner::BuilderTy &Builder)
static unsigned getMaskedICmpType(Value *A, Value *B, Value *C, ICmpInst::Predicate Pred)
Return the set of patterns (from MaskedICmpType) that (icmp SCC (A & B), C) satisfies.
static Instruction * foldXorToXor(BinaryOperator &I, InstCombiner::BuilderTy &Builder)
A ^ B can be specified using other logic ops in a variety of patterns.
static bool canNarrowShiftAmt(Constant *C, unsigned BitWidth)
Return true if a constant shift amount is always less than the specified bit-width.
static Instruction * foldLogicCastConstant(BinaryOperator &Logic, CastInst *Cast, InstCombinerImpl &IC)
Fold {and,or,xor} (cast X), C.
static Value * foldAndOrOfICmpEqConstantAndICmp(ICmpInst *LHS, ICmpInst *RHS, bool IsAnd, bool IsLogical, IRBuilderBase &Builder)
static bool canFreelyInvert(InstCombiner &IC, Value *Op, Instruction *IgnoredUser)
static Value * foldNegativePower2AndShiftedMask(Value *A, Value *B, Value *D, Value *E, ICmpInst::Predicate PredL, ICmpInst::Predicate PredR, InstCombiner::BuilderTy &Builder)
Try to fold (icmp(A & B) == 0) & (icmp(A & D) != E) into (icmp A u< D) iff B is a contiguous set of o...
static Value * matchIsFiniteTest(InstCombiner::BuilderTy &Builder, FCmpInst *LHS, FCmpInst *RHS)
and (fcmp ord x, 0), (fcmp u* x, inf) -> fcmp o* x, inf
static Value * foldPowerOf2AndShiftedMask(ICmpInst *Cmp0, ICmpInst *Cmp1, bool JoinedByAnd, InstCombiner::BuilderTy &Builder)
Try to fold ((icmp X u< P) & (icmp(X & M) != M)) or ((icmp X s> -1) & (icmp(X & M) !...
static Value * stripSignOnlyFPOps(Value *Val)
Ignore all operations which only change the sign of a value, returning the underlying magnitude value...
static Value * foldOrUnsignedUMulOverflowICmp(BinaryOperator &I, InstCombiner::BuilderTy &Builder, const DataLayout &DL)
Fold Res, Overflow = (umul.with.overflow x c1); (or Overflow (ugt Res c2)) --> (ugt x (c2/c1)).
static Value * freelyInvert(InstCombinerImpl &IC, Value *Op, Instruction *IgnoredUser)
static Value * foldLogOpOfMaskedICmps_NotAllZeros_BMask_Mixed(Value *LHS, Value *RHS, bool IsAnd, Value *A, Value *B, Value *D, Value *E, ICmpInst::Predicate PredL, ICmpInst::Predicate PredR, InstCombiner::BuilderTy &Builder)
Try to fold (icmp(A & B) ==/!= C) &/| (icmp(A & D) ==/!= E) into a single (icmp(A & X) ==/!...
static std::optional< IntPart > matchIntPart(Value *V)
Match an extraction of bits from an integer.
static Instruction * canonicalizeLogicFirst(BinaryOperator &I, InstCombiner::BuilderTy &Builder)
static Instruction * reassociateFCmps(BinaryOperator &BO, InstCombiner::BuilderTy &Builder)
This a limited reassociation for a special case (see above) where we are checking if two values are e...
static Value * getNewICmpValue(unsigned Code, bool Sign, Value *LHS, Value *RHS, InstCombiner::BuilderTy &Builder)
This is the complement of getICmpCode, which turns an opcode and two operands into either a constant ...
static Value * extractIntPart(const IntPart &P, IRBuilderBase &Builder)
Materialize an extraction of bits from an integer in IR.
static bool matchUnorderedInfCompare(FCmpInst::Predicate P, Value *LHS, Value *RHS)
Matches fcmp u__ x, +/-inf.
static bool matchIsNotNaN(FCmpInst::Predicate P, Value *LHS, Value *RHS)
Matches canonical form of isnan, fcmp ord x, 0.
static bool areInverseVectorBitmasks(Constant *C1, Constant *C2)
If all elements of two constant vectors are 0/-1 and inverses, return true.
MaskedICmpType
Classify (icmp eq (A & B), C) and (icmp ne (A & B), C) as matching patterns that can be simplified.
static Instruction * foldComplexAndOrPatterns(BinaryOperator &I, InstCombiner::BuilderTy &Builder)
Try folding relatively complex patterns for both And and Or operations with all And and Or swapped.
static bool matchZExtedSubInteger(Value *V, Value *&Int, APInt &Mask, uint64_t &Offset, bool &IsShlNUW, bool &IsShlNSW)
Match V as "lshr -> mask -> zext -> shl".
static std::optional< DecomposedBitMaskMul > matchBitmaskMul(Value *V)
static Value * foldOrOfInversions(BinaryOperator &I, InstCombiner::BuilderTy &Builder)
static bool matchSubIntegerPackFromVector(Value *V, Value *&Vec, int64_t &VecOffset, SmallBitVector &Mask, const DataLayout &DL)
Match V as "shufflevector -> bitcast" or "extractelement -> zext -> shl" patterns,...
static Instruction * matchFunnelShift(Instruction &Or, InstCombinerImpl &IC)
Match UB-safe variants of the funnel shift intrinsic.
static Instruction * reassociateForUses(BinaryOperator &BO, InstCombinerImpl::BuilderTy &Builder)
Try to reassociate a pair of binops so that values with one use only are part of the same instruction...
static Value * matchOrConcat(Instruction &Or, InstCombiner::BuilderTy &Builder)
Attempt to combine or(zext(x),shl(zext(y),bw/2) concat packing patterns.
static Value * foldAndOrOfICmpsWithPow2AndWithZero(InstCombiner::BuilderTy &Builder, ICmpInst *LHS, ICmpInst *RHS, bool IsAnd, const SimplifyQuery &Q)
static Instruction * foldBitwiseLogicWithIntrinsics(BinaryOperator &I, InstCombiner::BuilderTy &Builder)
static std::optional< std::pair< unsigned, unsigned > > getMaskedTypeForICmpPair(Value *&A, Value *&B, Value *&C, Value *&D, Value *&E, Value *LHS, Value *RHS, ICmpInst::Predicate &PredL, ICmpInst::Predicate &PredR)
Handle (icmp(A & B) ==/!= C) &/| (icmp(A & D) ==/!= E).
static Instruction * foldIntegerPackFromVector(Instruction &I, InstCombiner::BuilderTy &Builder, const DataLayout &DL)
Try to fold the join of two scalar integers whose contents are packed elements of the same vector.
static Value * foldIntegerRepackThroughZExt(Value *Lhs, Value *Rhs, InstCombiner::BuilderTy &Builder)
Try to fold the join of two scalar integers whose bits are unpacked and zexted from the same source i...
This file provides internal interfaces used to implement the InstCombine.
This file provides the interface for the instcombine pass implementation.
static bool isZero(Value *V, const DataLayout &DL, DominatorTree *DT, AssumptionCache *AC)
uint64_t IntrinsicInst * II
const SmallVectorImpl< MachineOperand > & Cond
This file implements the SmallBitVector class.
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static constexpr int Concat[]
static LLVM_ABI bool hasSignBitInMSB(const fltSemantics &)
bool bitwiseIsEqual(const APFloat &RHS) const
APInt bitcastToAPInt() const
static APFloat getInf(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Infinity.
Class for arbitrary precision integers.
LLVM_ABI APInt udiv(const APInt &RHS) const
Unsigned division operation.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
LLVM_ABI APInt zext(unsigned width) const
Zero extend to a new width.
uint64_t getZExtValue() const
Get zero extended value.
LLVM_ABI APInt trunc(unsigned width) const
Truncate to new width.
unsigned countLeadingOnes() const
bool isAllOnes() const
Determine if all bits are set. This is true for zero-width values.
LLVM_ABI APInt usub_ov(const APInt &RHS, bool &Overflow) const
bool ugt(const APInt &RHS) const
Unsigned greater than comparison.
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
bool isSignMask() const
Check if the APInt's value is returned by getSignMask.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool ult(const APInt &RHS) const
Unsigned less than comparison.
LLVM_ABI APInt sadd_ov(const APInt &RHS, bool &Overflow) const
bool intersects(const APInt &RHS) const
This operation tests if there are any pairs of corresponding bits between this APInt and RHS that are...
int32_t exactLogBase2() const
LLVM_ABI APInt reverseBits() const
LLVM_ABI APInt uadd_ov(const APInt &RHS, bool &Overflow) const
unsigned countr_zero() const
Count the number of trailing zero bits.
unsigned countLeadingZeros() const
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
APInt shl(unsigned shiftAmt) const
Left-shift function.
LLVM_ABI APInt byteSwap() const
bool isSubsetOf(const APInt &RHS) const
This operation checks that all bits set in this APInt are also set in RHS.
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
LLVM_ABI APInt ssub_ov(const APInt &RHS, bool &Overflow) const
static APInt getBitsSetFrom(unsigned numBits, unsigned loBit)
Constructs an APInt value that has a contiguous range of bits set.
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
void clearSignBit()
Set the sign bit to 0.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
LLVM_ABI bool isSigned() const
Whether the intrinsic is signed or unsigned.
LLVM_ABI Instruction::BinaryOps getBinaryOp() const
Returns the binary operation underlying the intrinsic.
BinaryOps getOpcode() const
static LLVM_ABI BinaryOperator * CreateNot(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)
static LLVM_ABI BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name=Twine(), InsertPosition InsertBefore=nullptr)
Construct a binary instruction, given the opcode and the two operands.
static BinaryOperator * CreateWithCopiedFlags(BinaryOps Opc, Value *V1, Value *V2, Value *CopyO, const Twine &Name="", InsertPosition InsertBefore=nullptr)
This class represents a no-op cast from one type to another.
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
This is the base class for all instructions that perform data casts.
Type * getSrcTy() const
Return the source type, as a convenience.
Instruction::CastOps getOpcode() const
Return the opcode of this CastInst.
static LLVM_ABI CastInst * Create(Instruction::CastOps, Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Provides a way to construct any of the CastInst subclasses using an opcode instead of the subclass's ...
Type * getDestTy() const
Return the destination type, as a convenience.
static Type * makeCmpResultType(Type *opnd_type)
Create a result type for fcmp/icmp.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ FCMP_OLT
0 1 0 0 True if ordered and less than
@ FCMP_ULE
1 1 0 1 True if unordered, less than, or equal
@ ICMP_UGE
unsigned greater or equal
@ ICMP_UGT
unsigned greater than
@ ICMP_SGT
signed greater than
@ FCMP_ULT
1 1 0 0 True if unordered or less than
@ ICMP_ULT
unsigned less than
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
@ ICMP_SGE
signed greater or equal
@ ICMP_ULE
unsigned less or equal
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Predicate getPredicate() const
Return the predicate for this instruction.
static LLVM_ABI bool isUnordered(Predicate predicate)
Determine if the predicate is an unordered operation.
static Predicate getOrderedPredicate(Predicate Pred)
Returns the ordered variant of a floating point compare.
An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...
static LLVM_ABI Constant * getSub(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
static LLVM_ABI Constant * getNot(Constant *C)
static LLVM_ABI Constant * getXor(Constant *C1, Constant *C2)
static LLVM_ABI Constant * getAdd(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
static LLVM_ABI Constant * getTrunc(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static LLVM_ABI Constant * getExactLogBase2(Constant *C)
If C is a scalar/fixed width vector of known powers of 2, then this function returns a new scalar/fix...
static LLVM_ABI Constant * getZero(Type *Ty, bool Negative=false)
This is the shared class of boolean and integer constants.
bool isMinusOne() const
This function will return true iff every bit in this constant is set to true.
static LLVM_ABI ConstantInt * getTrue(LLVMContext &Context)
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
static LLVM_ABI ConstantInt * getFalse(LLVMContext &Context)
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
const APInt & getValue() const
Return the constant as an APInt value reference.
LLVM_ABI std::optional< ConstantRange > exactUnionWith(const ConstantRange &CR) const
Union the two ranges and return the result if it can be represented exactly, otherwise return std::nu...
LLVM_ABI ConstantRange subtract(const APInt &CI) const
Subtract the specified constant from the endpoints of this constant range.
static LLVM_ABI ConstantRange makeExactICmpRegion(CmpInst::Predicate Pred, const APInt &Other)
Produce the exact range such that all values in the returned range satisfy the given predicate with a...
LLVM_ABI std::optional< ConstantRange > exactIntersectWith(const ConstantRange &CR) const
Intersect the two ranges and return the result if it can be represented exactly, otherwise return std...
This is an important base class in LLVM.
static LLVM_ABI Constant * replaceUndefsWith(Constant *C, Constant *Replacement)
Try to replace undefined constant C or undefined elements in C with Replacement.
static LLVM_ABI Constant * mergeUndefsWith(Constant *C, Constant *Other)
Merges undefs of a Constant with another Constant, along with the undefs already present.
static LLVM_ABI Constant * getAllOnesValue(Type *Ty)
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
LLVM_ABI Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
LLVM_ABI bool isZeroValue() const
Return true if the value is negative zero or null value.
A parsed version of the target data layout string in and methods for querying it.
This instruction compares its operands according to the predicate given to the constructor.
This provides a helper for copying FMF from an instruction or setting specified flags.
static FMFSource intersect(Value *A, Value *B)
Intersect the FMF from two instructions.
This instruction compares its operands according to the predicate given to the constructor.
Predicate getSignedPredicate() const
For example, EQ->EQ, SLE->SLE, UGT->SGT, etc.
bool isEquality() const
Return true if this predicate is either EQ or NE.
static bool isEquality(Predicate P)
Return true if this predicate is either EQ or NE.
Common base class shared among various IRBuilders.
Value * CreateNot(Value *V, const Twine &Name="")
Value * CreateBinOp(Instruction::BinaryOps Opc, Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Instruction * canonicalizeCondSignextOfHighBitExtractToSignextHighBitExtract(BinaryOperator &I)
Instruction * foldBinOpIntoSelectOrPhi(BinaryOperator &I)
This is a convenience wrapper function for the above two functions.
Instruction * visitOr(BinaryOperator &I)
bool SimplifyAssociativeOrCommutative(BinaryOperator &I)
Performs a few simplifications for operators which are associative or commutative.
Value * foldUsingDistributiveLaws(BinaryOperator &I)
Tries to simplify binary operations which some other binary operation distributes over.
Instruction * foldBinOpShiftWithShift(BinaryOperator &I)
Value * insertRangeTest(Value *V, const APInt &Lo, const APInt &Hi, bool isSigned, bool Inside)
Emit a computation of: (V >= Lo && V < Hi) if Inside is true, otherwise (V < Lo || V >= Hi).
Instruction * foldBinOpSelectBinOp(BinaryOperator &Op)
In some cases it is beneficial to fold a select into a binary operator.
bool sinkNotIntoLogicalOp(Instruction &I)
std::optional< std::pair< Intrinsic::ID, SmallVector< Value *, 3 > > > convertOrOfShiftsToFunnelShift(Instruction &Or)
Instruction * visitAnd(BinaryOperator &I)
bool sinkNotIntoOtherHandOfLogicalOp(Instruction &I)
Instruction * foldBinopWithPhiOperands(BinaryOperator &BO)
For a binary operator with 2 phi operands, try to hoist the binary operation before the phi.
Instruction * foldAddLikeCommutative(Value *LHS, Value *RHS, bool NSW, bool NUW)
Common transforms for add / disjoint or.
Value * simplifyRangeCheck(ICmpInst *Cmp0, ICmpInst *Cmp1, bool Inverted)
Try to fold a signed range checked with lower bound 0 to an unsigned icmp.
Instruction * tryFoldInstWithCtpopWithNot(Instruction *I)
Value * SimplifyAddWithRemainder(BinaryOperator &I)
Tries to simplify add operations using the definition of remainder.
Instruction * visitXor(BinaryOperator &I)
bool SimplifyDemandedInstructionBits(Instruction &Inst)
Tries to simplify operands to an integer instruction based on its demanded bits.
Instruction * foldVectorBinop(BinaryOperator &Inst)
Canonicalize the position of binops relative to shufflevector.
Instruction * matchBSwapOrBitReverse(Instruction &I, bool MatchBSwaps, bool MatchBitReversals)
Given an initial instruction, check to see if it is the root of a bswap/bitreverse idiom.
void freelyInvertAllUsersOf(Value *V, Value *IgnoredUser=nullptr)
Freely adapt every user of V as-if V was changed to !V.
The core instruction combiner logic.
const DataLayout & getDataLayout() const
IRBuilder< TargetFolder, IRBuilderCallbackInserter > BuilderTy
An IRBuilder that automatically inserts new instructions into the worklist.
bool isFreeToInvert(Value *V, bool WillInvertAllUses, bool &DoesConsume)
Return true if the specified value is free to invert (apply ~ to).
unsigned ComputeNumSignBits(const Value *Op, const Instruction *CxtI=nullptr, unsigned Depth=0) const
Instruction * replaceInstUsesWith(Instruction &I, Value *V)
A combiner-aware RAUW-like routine.
InstructionWorklist & Worklist
A worklist of the instructions that need to be simplified.
void computeKnownBits(const Value *V, KnownBits &Known, const Instruction *CxtI, unsigned Depth=0) const
static Value * peekThroughBitcast(Value *V, bool OneUseOnly=false)
Return the source operand of a potentially bitcasted value while optionally checking if it has one us...
bool canFreelyInvertAllUsersOf(Instruction *V, Value *IgnoredUser)
Given i1 V, can every user of V be freely adapted if V is changed to !V ?
void addToWorklist(Instruction *I)
bool MaskedValueIsZero(const Value *V, const APInt &Mask, const Instruction *CxtI=nullptr, unsigned Depth=0) const
Value * getFreelyInverted(Value *V, bool WillInvertAllUses, BuilderTy *Builder, bool &DoesConsume)
const SimplifyQuery & getSimplifyQuery() const
bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero=false, const Instruction *CxtI=nullptr, unsigned Depth=0)
LLVM_ABI void removeFromParent()
This method unlinks 'this' from the containing basic block, but does not delete it.
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
A wrapper class for inspecting calls to intrinsic functions.
This class represents a sign extension of integer types.
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", InsertPosition InsertBefore=nullptr, const Instruction *MDFrom=nullptr)
This is a 'bitvector' (really, a variable-sized bit array), optimized for the case when the array is ...
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
The instances of the Type class are immutable: once they are created, they are never changed.
LLVM_ABI unsigned getIntegerBitWidth() const
bool isVectorTy() const
True if this is an instance of VectorType.
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
LLVM_ABI Type * getWithNewBitWidth(unsigned NewBitWidth) const
Given an integer or vector type, change the lane bitwidth to NewBitwidth, whilst keeping the old numb...
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
LLVM_ABI const fltSemantics & getFltSemantics() const
A Use represents the edge between a Value definition and its users.
Value * getOperand(unsigned i) const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
bool hasOneUse() const
Return true if there is exactly one use of this value.
iterator_range< user_iterator > users()
LLVM_ABI bool hasNUsesOrMore(unsigned N) const
Return true if this value has N uses or more.
LLVM_ABI bool hasNUses(unsigned N) const
Return true if this Value has exactly N uses.
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
LLVM_ABI void takeName(Value *V)
Transfer the name from V to this value.
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
Represents an op.with.overflow intrinsic.
This class represents zero extension of integer types.
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
const APInt & umin(const APInt &A, const APInt &B)
Determine the smaller of two APInts considered to be unsigned.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ C
The default llvm calling convention, compatible with C.
LLVM_ABI Function * getOrInsertDeclaration(Module *M, ID id, ArrayRef< Type * > Tys={})
Look up the Function declaration of the intrinsic id in the Module M.
SpecificConstantMatch m_ZeroInt()
Convenience matchers for specific integer values.
BinaryOp_match< SpecificConstantMatch, SrcTy, TargetOpcode::G_SUB > m_Neg(const SrcTy &&Src)
Matches a register negated by a G_SUB.
BinaryOp_match< SrcTy, SpecificConstantMatch, TargetOpcode::G_XOR, true > m_Not(const SrcTy &&Src)
Matches a register not-ed by a G_XOR.
OneUse_match< SubPat > m_OneUse(const SubPat &SP)
cst_pred_ty< is_all_ones > m_AllOnes()
Match an integer or vector with all bits set.
cst_pred_ty< is_lowbit_mask > m_LowBitMask()
Match an integer or vector with only the low bit(s) set.
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
cst_pred_ty< is_negative > m_Negative()
Match an integer or vector of negative values.
BinaryOp_match< LHS, RHS, Instruction::Add > m_Add(const LHS &L, const RHS &R)
class_match< BinaryOperator > m_BinOp()
Match an arbitrary binary operation and ignore it.
CmpClass_match< LHS, RHS, FCmpInst > m_FCmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
cst_pred_ty< is_sign_mask > m_SignMask()
Match an integer or vector with only the sign bit(s) set.
BinaryOp_match< LHS, RHS, Instruction::AShr > m_AShr(const LHS &L, const RHS &R)
cstfp_pred_ty< is_inf > m_Inf()
Match a positive or negative infinity FP constant.
m_Intrinsic_Ty< Opnd0 >::Ty m_BitReverse(const Opnd0 &Op0)
cst_pred_ty< is_power2 > m_Power2()
Match an integer or vector power-of-2.
match_combine_or< CastInst_match< OpTy, TruncInst >, OpTy > m_TruncOrSelf(const OpTy &Op)
auto m_LogicalOp()
Matches either L && R or L || R where L and R are arbitrary values.
class_match< Constant > m_Constant()
Match an arbitrary Constant and ignore it.
ap_match< APInt > m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
BinaryOp_match< LHS, RHS, Instruction::And, true > m_c_And(const LHS &L, const RHS &R)
Matches an And with LHS and RHS in either order.
CastInst_match< OpTy, TruncInst > m_Trunc(const OpTy &Op)
Matches Trunc.
BinaryOp_match< LHS, RHS, Instruction::Xor > m_Xor(const LHS &L, const RHS &R)
ap_match< APInt > m_APIntAllowPoison(const APInt *&Res)
Match APInt while allowing poison in splat vector constants.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Sub, OverflowingBinaryOperator::NoSignedWrap > m_NSWSub(const LHS &L, const RHS &R)
specific_intval< false > m_SpecificInt(const APInt &V)
Match a specific integer value or vector with all elements equal to the value.
match_combine_or< CastInst_match< OpTy, ZExtInst >, OpTy > m_ZExtOrSelf(const OpTy &Op)
bool match(Val *V, const Pattern &P)
cst_pred_ty< is_shifted_mask > m_ShiftedMask()
bind_ty< Instruction > m_Instruction(Instruction *&I)
Match an instruction, capturing it if we match.
cstfp_pred_ty< is_any_zero_fp > m_AnyZeroFP()
Match a floating-point negative zero or positive zero.
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
DisjointOr_match< LHS, RHS > m_DisjointOr(const LHS &L, const RHS &R)
constantexpr_match m_ConstantExpr()
Match a constant expression or a constant that contains a constant expression.
specific_intval< true > m_SpecificIntAllowPoison(const APInt &V)
ap_match< APFloat > m_APFloatAllowPoison(const APFloat *&Res)
Match APFloat while allowing poison in splat vector constants.
CmpClass_match< LHS, RHS, ICmpInst, true > m_c_ICmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
Matches an ICmp with a predicate over LHS and RHS in either order.
TwoOps_match< Val_t, Idx_t, Instruction::ExtractElement > m_ExtractElt(const Val_t &Val, const Idx_t &Idx)
Matches ExtractElementInst.
cst_pred_ty< is_nonnegative > m_NonNegative()
Match an integer or vector of non-negative values.
class_match< ConstantInt > m_ConstantInt()
Match an arbitrary ConstantInt and ignore it.
cst_pred_ty< is_one > m_One()
Match an integer 1 or a vector with all elements equal to 1.
IntrinsicID_match m_Intrinsic()
Match intrinsic calls like this: m_Intrinsic<Intrinsic::fabs>(m_Value(X))
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
match_combine_or< CastInst_match< OpTy, SExtInst >, OpTy > m_SExtOrSelf(const OpTy &Op)
ExtractValue_match< Ind, Val_t > m_ExtractValue(const Val_t &V)
Match a single index ExtractValue instruction.
BinOpPred_match< LHS, RHS, is_logical_shift_op > m_LogicalShift(const LHS &L, const RHS &R)
Matches logical shift operations.
ShiftLike_match< LHS, Instruction::Shl > m_ShlOrSelf(const LHS &L, uint64_t &R)
Matches shl L, ConstShAmt or L itself (R will be set to zero in this case).
bind_ty< WithOverflowInst > m_WithOverflowInst(WithOverflowInst *&I)
Match a with overflow intrinsic, capturing it if we match.
BinaryOp_match< LHS, RHS, Instruction::Xor, true > m_c_Xor(const LHS &L, const RHS &R)
Matches an Xor with LHS and RHS in either order.
SpecificCmpClass_match< LHS, RHS, CmpInst > m_SpecificCmp(CmpPredicate MatchPred, const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::Mul > m_Mul(const LHS &L, const RHS &R)
deferredval_ty< Value > m_Deferred(Value *const &V)
Like m_Specific(), but works if the specific value to match is determined as part of the same match()...
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
TwoOps_match< V1_t, V2_t, Instruction::ShuffleVector > m_Shuffle(const V1_t &v1, const V2_t &v2)
Matches ShuffleVectorInst independently of mask value.
SpecificCmpClass_match< LHS, RHS, ICmpInst > m_SpecificICmp(CmpPredicate MatchPred, const LHS &L, const RHS &R)
CastInst_match< OpTy, ZExtInst > m_ZExt(const OpTy &Op)
Matches ZExt.
class_match< CmpInst > m_Cmp()
Matches any compare instruction and ignore it.
cst_pred_ty< is_negated_power2 > m_NegatedPower2()
Match a integer or vector negated power-of-2.
match_immconstant_ty m_ImmConstant()
Match an arbitrary immediate Constant and ignore it.
DisjointOr_match< LHS, RHS, true > m_c_DisjointOr(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::Add, true > m_c_Add(const LHS &L, const RHS &R)
Matches a Add with LHS and RHS in either order.
SpecificCmpClass_match< LHS, RHS, FCmpInst > m_SpecificFCmp(CmpPredicate MatchPred, const LHS &L, const RHS &R)
match_combine_or< BinaryOp_match< LHS, RHS, Instruction::Add >, DisjointOr_match< LHS, RHS > > m_AddLike(const LHS &L, const RHS &R)
Match either "add" or "or disjoint".
CastOperator_match< OpTy, Instruction::BitCast > m_BitCast(const OpTy &Op)
Matches BitCast.
match_combine_or< match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty, true >, MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty, true > >, match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty, true >, MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty, true > > > m_c_MaxOrMin(const LHS &L, const RHS &R)
match_combine_or< CastInst_match< OpTy, SExtInst >, NNegZExt_match< OpTy > > m_SExtLike(const OpTy &Op)
Match either "sext" or "zext nneg".
MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty > m_SMax(const LHS &L, const RHS &R)
cst_pred_ty< is_maxsignedvalue > m_MaxSignedValue()
Match an integer or vector with values having all bits except for the high bit set (0x7f....
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
AnyBinaryOp_match< LHS, RHS, true > m_c_BinOp(const LHS &L, const RHS &R)
Matches a BinaryOperator with LHS and RHS in either order.
BinaryOp_match< LHS, RHS, Instruction::LShr > m_LShr(const LHS &L, const RHS &R)
CmpClass_match< LHS, RHS, ICmpInst > m_ICmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
match_combine_or< CastInst_match< OpTy, ZExtInst >, CastInst_match< OpTy, SExtInst > > m_ZExtOrSExt(const OpTy &Op)
FNeg_match< OpTy > m_FNeg(const OpTy &X)
Match 'fneg X' as 'fsub -0.0, X'.
BinOpPred_match< LHS, RHS, is_shift_op > m_Shift(const LHS &L, const RHS &R)
Matches shift operations.
cstfp_pred_ty< is_pos_zero_fp > m_PosZeroFP()
Match a floating-point positive zero.
BinaryOp_match< LHS, RHS, Instruction::Shl > m_Shl(const LHS &L, const RHS &R)
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
BinaryOp_match< LHS, RHS, Instruction::Or > m_Or(const LHS &L, const RHS &R)
m_Intrinsic_Ty< Opnd0 >::Ty m_BSwap(const Opnd0 &Op0)
CastInst_match< OpTy, SExtInst > m_SExt(const OpTy &Op)
Matches SExt.
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
BinaryOp_match< LHS, RHS, Instruction::Or, true > m_c_Or(const LHS &L, const RHS &R)
Matches an Or with LHS and RHS in either order.
ThreeOps_match< Val_t, Elt_t, Idx_t, Instruction::InsertElement > m_InsertElt(const Val_t &Val, const Elt_t &Elt, const Idx_t &Idx)
Matches InsertElementInst.
ElementWiseBitCast_match< OpTy > m_ElementWiseBitCast(const OpTy &Op)
m_Intrinsic_Ty< Opnd0 >::Ty m_FAbs(const Opnd0 &Op0)
m_Intrinsic_Ty< Opnd0, Opnd1 >::Ty m_CopySign(const Opnd0 &Op0, const Opnd1 &Op1)
BinaryOp_match< LHS, RHS, Instruction::Sub > m_Sub(const LHS &L, const RHS &R)
match_unless< Ty > m_Unless(const Ty &M)
Match if the inner matcher does NOT match.
match_combine_or< LTy, RTy > m_CombineOr(const LTy &L, const RTy &R)
Combine two pattern matchers matching L || R.
cst_pred_ty< icmp_pred_with_threshold > m_SpecificInt_ICMP(ICmpInst::Predicate Predicate, const APInt &Threshold)
Match an integer or vector with every element comparing 'pred' (eg/ne/...) to Threshold.
NodeAddr< CodeNode * > Code
friend class Instruction
Iterator for Instructions in a `BasicBlock.
This is an optimization pass for GlobalISel generic memory operations.
LLVM_ABI Intrinsic::ID getInverseMinMaxIntrinsic(Intrinsic::ID MinMaxID)
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
FunctionAddr VTableAddr Value
Constant * getPredForFCmpCode(unsigned Code, Type *OpTy, CmpInst::Predicate &Pred)
This is the complement of getFCmpCode.
LLVM_ABI bool isSignBitCheck(ICmpInst::Predicate Pred, const APInt &RHS, bool &TrueIfSigned)
Given an exploded icmp instruction, return true if the comparison only checks the sign bit.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
bool predicatesFoldable(CmpInst::Predicate P1, CmpInst::Predicate P2)
Return true if both predicates match sign or if at least one of them is an equality comparison (which...
LLVM_ABI Constant * ConstantFoldCompareInstOperands(unsigned Predicate, Constant *LHS, Constant *RHS, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const Instruction *I=nullptr)
Attempt to constant fold a compare instruction (icmp/fcmp) with the specified operands.
LLVM_ABI Value * simplifyOrInst(Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for an Or, fold the result or return null.
LLVM_ABI Value * simplifyXorInst(Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for an Xor, fold the result or return null.
LLVM_ABI bool isGuaranteedNotToBeUndef(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Returns true if V cannot be undef, but may be poison.
LLVM_ABI bool matchSimpleRecurrence(const PHINode *P, BinaryOperator *&BO, Value *&Start, Value *&Step)
Attempt to match a simple first order recurrence cycle of the form: iv = phi Ty [Start,...
LLVM_ABI bool isKnownNegative(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Returns true if the given value is known be negative (i.e.
LLVM_ABI Constant * getLosslessUnsignedTrunc(Constant *C, Type *DestTy, const DataLayout &DL, PreservedCastFlags *Flags=nullptr)
LLVM_ABI bool recognizeBSwapOrBitReverseIdiom(Instruction *I, bool MatchBSwaps, bool MatchBitReversals, SmallVectorImpl< Instruction * > &InsertedInsts)
Try to match a bswap or bitreverse idiom.
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
LLVM_ABI Value * simplifyICmpInst(CmpPredicate Pred, Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for an ICmpInst, fold the result or return null.
LLVM_ABI Constant * getLosslessSignedTrunc(Constant *C, Type *DestTy, const DataLayout &DL, PreservedCastFlags *Flags=nullptr)
LLVM_ABI Value * simplifyAndInst(Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for an And, fold the result or return null.
LLVM_ABI bool isKnownInversion(const Value *X, const Value *Y)
Return true iff:
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
LLVM_ABI bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
constexpr int PoisonMaskElem
LLVM_ABI Value * simplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for a BinaryOperator, fold the result or return null.
std::optional< DecomposedBitTest > decomposeBitTest(Value *Cond, bool LookThroughTrunc=true, bool AllowNonZeroC=false, bool DecomposeAnd=false)
Decompose an icmp into the form ((X & Mask) pred C) if possible.
@ Mul
Product of integers.
@ Xor
Bitwise or logical XOR of integers.
@ And
Bitwise or logical AND of integers.
@ Sub
Subtraction of integers.
DWARFExpression::Operation Op
LLVM_ABI bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
constexpr unsigned BitWidth
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
APFloat neg(APFloat X)
Returns the negated value of the argument.
cl::opt< bool > ProfcheckDisableMetadataFixes("profcheck-disable-metadata-fixes", cl::Hidden, cl::init(false), cl::desc("Disable metadata propagation fixes discovered through Issue #147390"))
unsigned getICmpCode(CmpInst::Predicate Pred)
Encode a icmp predicate into a three bit mask.
LLVM_ABI bool isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL, bool OrZero=false, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Return true if the given value is known to have exactly one bit set when defined.
LLVM_ABI bool isGuaranteedNotToBePoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Returns true if V cannot be poison, but may be undef.
std::pair< Value *, FPClassTest > fcmpToClassTest(FCmpInst::Predicate Pred, const Function &F, Value *LHS, Value *RHS, bool LookThroughSrc=true)
Returns a pair of values, which if passed to llvm.is.fpclass, returns the same result as an fcmp with...
unsigned getFCmpCode(CmpInst::Predicate CC)
Similar to getICmpCode but for FCmpInst.
std::optional< DecomposedBitTest > decomposeBitTestICmp(Value *LHS, Value *RHS, CmpInst::Predicate Pred, bool LookThroughTrunc=true, bool AllowNonZeroC=false, bool DecomposeAnd=false)
Decompose an icmp into the form ((X & Mask) pred C) if possible.
Constant * getPredForICmpCode(unsigned Code, bool Sign, Type *OpTy, CmpInst::Predicate &Pred)
This is the complement of getICmpCode.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
bool isCombineableWith(const DecomposedBitMaskMul Other)
bool isNonNegative() const
Returns true if this value is known to be non-negative.
APInt getMaxValue() const
Return the maximal unsigned value possible given these KnownBits.
SimplifyQuery getWithInstruction(const Instruction *I) const