23using namespace PatternMatch;
25#define DEBUG_TYPE "instcombine"
56 "Lo is not < Hi in range emission code!");
58 Type *Ty = V->getType();
63 if (
isSigned ?
Lo.isMinSignedValue() :
Lo.isMinValue()) {
120 const APInt *ConstA =
nullptr, *ConstB =
nullptr, *ConstC =
nullptr;
125 bool IsAPow2 = ConstA && ConstA->
isPowerOf2();
126 bool IsBPow2 = ConstB && ConstB->isPowerOf2();
127 unsigned MaskVal = 0;
128 if (ConstC && ConstC->isZero()) {
147 }
else if (ConstA && ConstC && ConstC->
isSubsetOf(*ConstA)) {
157 }
else if (ConstB && ConstC && ConstC->isSubsetOf(*ConstB)) {
185 LHS,
RHS, Pred,
true,
true);
191 Y = ConstantInt::get(
X->getType(), Res->Mask);
192 Z = ConstantInt::get(
X->getType(), Res->C);
217 Value *L11, *L12, *L21, *L22;
220 L21 = L22 = L1 =
nullptr;
245 if (R11 == L11 || R11 == L12 || R11 == L21 || R11 == L22) {
248 }
else if (R12 == L11 || R12 == L12 || R12 == L21 || R12 == L22) {
265 if (R11 == L11 || R11 == L12 || R11 == L21 || R11 == L22) {
270 }
else if (R12 == L11 || R12 == L12 || R12 == L21 || R12 == L22) {
293 if (R11 == L11 || R11 == L12 || R11 == L21 || R11 == L22) {
298 }
else if (R12 == L11 || R12 == L12 || R12 == L21 || R12 == L22) {
307 assert(Ok &&
"Failed to find AND on the right side of the RHS icmp.");
313 }
else if (L12 ==
A) {
316 }
else if (L21 ==
A) {
319 }
else if (L22 ==
A) {
326 return std::optional<std::pair<unsigned, unsigned>>(
327 std::make_pair(LeftType, RightType));
349 const APInt *BCst, *DCst, *OrigECst;
360 APInt ECst = *OrigECst;
366 if (*BCst == 0 || *DCst == 0)
377 Attribute::StrictFP)) {
378 Type *Ty = Src->getType()->getScalarType();
385 APInt FractionBits = ~ExpBits;
387 if (*BCst != FractionBits)
412 if ((((*BCst & *DCst) & ECst) == 0) &&
413 (*BCst & (*BCst ^ *DCst)).isPowerOf2()) {
414 APInt BorD = *BCst | *DCst;
415 APInt BandBxorDorE = (*BCst & (*BCst ^ *DCst)) | ECst;
416 Value *NewMask = ConstantInt::get(
A->getType(), BorD);
417 Value *NewMaskedValue = ConstantInt::get(
A->getType(), BandBxorDorE);
419 return Builder.
CreateICmp(NewCC, NewAnd, NewMaskedValue);
422 auto IsSubSetOrEqual = [](
const APInt *C1,
const APInt *C2) {
423 return (*C1 & *C2) == *C1;
425 auto IsSuperSetOrEqual = [](
const APInt *C1,
const APInt *C2) {
426 return (*C1 & *C2) == *C2;
435 if (!IsSubSetOrEqual(BCst, DCst) && !IsSuperSetOrEqual(BCst, DCst))
447 if (IsSubSetOrEqual(BCst, DCst))
448 return ConstantInt::get(
LHS->
getType(), !IsAnd);
458 if (IsSuperSetOrEqual(BCst, DCst)) {
460 RHS->setSameSign(
false);
466 assert(IsSubSetOrEqual(BCst, DCst) &&
"Precondition due to above code");
467 if ((*BCst & ECst) != 0) {
469 RHS->setSameSign(
false);
476 return ConstantInt::get(
LHS->
getType(), !IsAnd);
488 "Expected equality predicates for masked type of icmps.");
500 LHS,
RHS, IsAnd,
A,
B,
D, E, PredL, PredR, Builder)) {
505 RHS,
LHS, IsAnd,
A,
D,
B,
C, PredR, PredL, Builder)) {
518 Value *
A =
nullptr, *
B =
nullptr, *
C =
nullptr, *
D =
nullptr, *E =
nullptr;
520 std::optional<std::pair<unsigned, unsigned>> MaskPair =
525 "Expected equality predicates for masked type of icmps.");
526 unsigned LHSMask = MaskPair->first;
527 unsigned RHSMask = MaskPair->second;
528 unsigned Mask = LHSMask & RHSMask;
533 LHS,
RHS, IsAnd,
A,
B,
C,
D, E, PredL, PredR, LHSMask, RHSMask,
569 return Builder.
CreateICmp(NewCC, NewAnd, Zero);
578 return Builder.
CreateICmp(NewCC, NewAnd, NewOr);
590 const APInt *ConstB, *ConstD;
598 APInt NewMask = *ConstB & *ConstD;
599 if (NewMask == *ConstB)
601 if (NewMask == *ConstD)
610 APInt NewMask = *ConstB | *ConstD;
611 if (NewMask == *ConstB)
613 if (NewMask == *ConstD)
640 const APInt *OldConstC, *OldConstE;
646 const APInt ConstC = PredL !=
CC ? *ConstB ^ *OldConstC : *OldConstC;
647 const APInt ConstE = PredR !=
CC ? *ConstD ^ *OldConstE : *OldConstE;
649 if (((*ConstB & *ConstD) & (ConstC ^ ConstE)).getBoolValue())
650 return IsNot ? nullptr : ConstantInt::get(
LHS->
getType(), !IsAnd);
653 !ConstD->isSubsetOf(*ConstB))
658 BD = *ConstB & *ConstD;
659 CE = ConstC & ConstE;
661 BD = *ConstB | *ConstD;
662 CE = ConstC | ConstE;
665 Value *CEVal = ConstantInt::get(
A->getType(), CE);
670 return FoldBMixed(NewCC,
false);
672 return FoldBMixed(NewCC,
true);
740 default:
return nullptr;
764 if (
LHS->getPredicate() != Pred ||
RHS->getPredicate() != Pred)
829 auto tryToMatchSignedTruncationCheck = [](
ICmpInst *ICmp,
Value *&
X,
830 APInt &SignBitMask) ->
bool {
831 const APInt *I01, *I1;
835 I1->ugt(*I01) && I01->
shl(1) == *I1))
847 if (tryToMatchSignedTruncationCheck(ICmp1, X1, HighestBit))
849 else if (tryToMatchSignedTruncationCheck(ICmp0, X1, HighestBit))
854 assert(HighestBit.
isPowerOf2() &&
"expected to be power of two (non-zero)");
858 APInt &UnsetBitsMask) ->
bool {
866 UnsetBitsMask = Res->Mask;
874 UnsetBitsMask = *Mask;
883 if (!tryToDecompose(OtherICmp, X0, UnsetBitsMask))
886 assert(!UnsetBitsMask.
isZero() &&
"empty mask makes no sense.");
901 APInt SignBitsMask = ~(HighestBit - 1U);
908 if (!UnsetBitsMask.
isSubsetOf(SignBitsMask)) {
909 APInt OtherHighestBit = (~UnsetBitsMask) + 1U;
917 return Builder.
CreateICmpULT(
X, ConstantInt::get(
X->getType(), HighestBit),
918 CxtI.
getName() +
".simplified");
935 auto *CtPop = cast<Instruction>(Cmp0->
getOperand(0));
938 CtPop->dropPoisonGeneratingAnnotations();
940 return Builder.
CreateICmpUGT(CtPop, ConstantInt::get(CtPop->getType(), 1));
944 CtPop->dropPoisonGeneratingAnnotations();
946 return Builder.
CreateICmpULT(CtPop, ConstantInt::get(CtPop->getType(), 2));
971 auto *CtPop = cast<Instruction>(Cmp1->
getOperand(0));
973 CtPop->dropPoisonGeneratingAnnotations();
975 return Builder.
CreateICmpEQ(CtPop, ConstantInt::get(CtPop->getType(), 1));
983 auto *CtPop = cast<Instruction>(Cmp1->
getOperand(0));
985 CtPop->dropPoisonGeneratingAnnotations();
987 return Builder.
CreateICmpNE(CtPop, ConstantInt::get(CtPop->getType(), 1));
1001 "Expected equality predicates for masked type of icmps.");
1021 const APInt *BCst, *DCst, *ECst;
1024 (isa<PoisonValue>(
B) ||
1029 if (
const auto *BVTy = dyn_cast<VectorType>(
B->getType())) {
1030 const auto *BFVTy = dyn_cast<FixedVectorType>(BVTy);
1031 const auto *BConst = dyn_cast<Constant>(
B);
1032 const auto *DConst = dyn_cast<Constant>(
D);
1033 const auto *EConst = dyn_cast<Constant>(E);
1035 if (!BFVTy || !BConst || !DConst || !EConst)
1038 for (
unsigned I = 0;
I != BFVTy->getNumElements(); ++
I) {
1039 const auto *BElt = BConst->getAggregateElement(
I);
1040 const auto *DElt = DConst->getAggregateElement(
I);
1041 const auto *EElt = EConst->getAggregateElement(
I);
1043 if (!BElt || !DElt || !EElt)
1045 if (!isReducible(BElt, DElt, EElt))
1050 if (!isReducible(
B,
D, E))
1068 Value *
A =
nullptr, *
B =
nullptr, *
C =
nullptr, *
D =
nullptr, *E =
nullptr;
1074 std::optional<std::pair<unsigned, unsigned>> MaskPair =
1080 unsigned CmpMask0 = MaskPair->first;
1081 unsigned CmpMask1 = MaskPair->second;
1082 if ((CmpMask0 &
Mask_AllZeros) && (CmpMask1 == compareBMask)) {
1086 }
else if ((CmpMask0 == compareBMask) && (CmpMask1 &
Mask_AllZeros)) {
1097 ICmpInst *UnsignedICmp,
bool IsAnd,
1109 if (
match(UnsignedICmp,
1125 IsAnd && GetKnownNonZeroAndOther(
B,
A))
1128 !IsAnd && GetKnownNonZeroAndOther(
B,
A))
1145 return std::nullopt;
1147 unsigned NumOriginalBits =
X->getType()->getScalarSizeInBits();
1148 unsigned NumExtractedBits = V->getType()->getScalarSizeInBits();
1154 Shift->
ule(NumOriginalBits - NumExtractedBits))
1156 return {{
X, 0, NumExtractedBits}};
1164 Type *TruncTy = V->getType()->getWithNewBitWidth(
P.NumBits);
1165 if (TruncTy != V->getType())
1173Value *InstCombinerImpl::foldEqOfParts(
Value *Cmp0,
Value *Cmp1,
bool IsAnd) {
1178 auto GetMatchPart = [&](
Value *CmpV,
1179 unsigned OpNo) -> std::optional<IntPart> {
1188 return {{OpNo == 0 ?
X :
Y, 0, 1}};
1190 auto *
Cmp = dyn_cast<ICmpInst>(CmpV);
1192 return std::nullopt;
1194 if (Pred ==
Cmp->getPredicate())
1203 return std::nullopt;
1212 return std::nullopt;
1214 return std::nullopt;
1219 return {{
I->getOperand(OpNo),
From,
C->getBitWidth() -
From}};
1222 std::optional<IntPart> L0 = GetMatchPart(Cmp0, 0);
1223 std::optional<IntPart> R0 = GetMatchPart(Cmp0, 1);
1224 std::optional<IntPart> L1 = GetMatchPart(Cmp1, 0);
1225 std::optional<IntPart> R1 = GetMatchPart(Cmp1, 1);
1226 if (!L0 || !R0 || !L1 || !R1)
1231 if (L0->From != L1->From || R0->From != R1->From) {
1232 if (L0->From != R1->From || R0->From != L1->From)
1239 if (L0->StartBit + L0->NumBits != L1->StartBit ||
1240 R0->StartBit + R0->NumBits != R1->StartBit) {
1241 if (L1->StartBit + L1->NumBits != L0->StartBit ||
1242 R1->StartBit + R1->NumBits != R0->StartBit)
1249 IntPart L = {L0->From, L0->StartBit, L0->NumBits + L1->NumBits};
1250 IntPart R = {R0->From, R0->StartBit, R0->NumBits + R1->NumBits};
1260 bool IsAnd,
bool IsLogical,
1289 if (!SubstituteCmp) {
1299 return Builder.
CreateBinOp(IsAnd ? Instruction::And : Instruction::Or, Cmp0,
1307Value *InstCombinerImpl::foldAndOrOfICmpsUsingRanges(
ICmpInst *ICmp1,
1312 const APInt *C1, *C2;
1319 const APInt *Offset1 =
nullptr, *Offset2 =
nullptr;
1354 if (!LowerDiff.
isPowerOf2() || LowerDiff != UpperDiff ||
1367 CR->getEquivalentICmp(NewPred, NewC,
Offset);
1399 Value *LHS0 =
LHS->getOperand(0), *LHS1 =
LHS->getOperand(1);
1400 Value *RHS0 =
RHS->getOperand(0), *RHS1 =
RHS->getOperand(1);
1412 bool IsAnd,
bool IsLogicalSelect) {
1413 Value *LHS0 =
LHS->getOperand(0), *LHS1 =
LHS->getOperand(1);
1414 Value *RHS0 =
RHS->getOperand(0), *RHS1 =
RHS->getOperand(1);
1417 if (LHS0 == RHS1 && RHS0 == LHS1) {
1437 if (LHS0 == RHS0 && LHS1 == RHS1) {
1440 unsigned NewPred = IsAnd ? FCmpCodeL & FCmpCodeR : FCmpCodeL | FCmpCodeR;
1449 if (!IsLogicalSelect &&
1482 auto [ClassValRHS, ClassMaskRHS] =
1485 auto [ClassValLHS, ClassMaskLHS] =
1487 if (ClassValLHS == ClassValRHS) {
1488 unsigned CombinedMask = IsAnd ? (ClassMaskLHS & ClassMaskRHS)
1489 : (ClassMaskLHS | ClassMaskRHS);
1491 Intrinsic::is_fpclass, {ClassValLHS->getType()},
1520 if (IsLessThanOrLessEqual(IsAnd ? PredR : PredL)) {
1524 if (IsLessThanOrLessEqual(IsAnd ? PredL : PredR)) {
1526 if (!IsLogicalSelect)
1527 NewFlag |=
RHS->getFastMathFlags();
1532 PredL, FAbs, ConstantFP::get(LHS0->
getType(), *LHSC), NewFlag);
1543 auto *FCmp = dyn_cast<FCmpInst>(
Op);
1544 if (!FCmp || !FCmp->hasOneUse())
1547 std::tie(ClassVal, ClassMask) =
1548 fcmpToClassTest(FCmp->getPredicate(), *FCmp->getParent()->getParent(),
1549 FCmp->getOperand(0), FCmp->getOperand(1));
1550 return ClassVal !=
nullptr;
1561 Value *ClassVal0 =
nullptr;
1562 Value *ClassVal1 =
nullptr;
1579 ClassVal0 == ClassVal1) {
1580 unsigned NewClassMask;
1582 case Instruction::And:
1583 NewClassMask = ClassMask0 & ClassMask1;
1585 case Instruction::Or:
1586 NewClassMask = ClassMask0 | ClassMask1;
1588 case Instruction::Xor:
1589 NewClassMask = ClassMask0 ^ ClassMask1;
1596 auto *
II = cast<IntrinsicInst>(Op0);
1598 1, ConstantInt::get(
II->getArgOperand(1)->getType(), NewClassMask));
1603 auto *
II = cast<IntrinsicInst>(Op1);
1605 1, ConstantInt::get(
II->getArgOperand(1)->getType(), NewClassMask));
1625Instruction *InstCombinerImpl::canonicalizeConditionalNegationViaMathToSelect(
1627 assert(
I.getOpcode() == BinaryOperator::Xor &&
"Only for xor!");
1632 !
Cond->getType()->isIntOrIntVectorTy(1) ||
1646 assert((Opcode == Instruction::And || Opcode == Instruction::Or) &&
1647 "Expecting and/or op for fcmp transform");
1666 X->getType() !=
Y->getType())
1670 X->getType() !=
Y->getType())
1676 if (
auto *NewFCmpInst = dyn_cast<FCmpInst>(NewFCmp)) {
1678 NewFCmpInst->copyIRFlags(Op0);
1679 NewFCmpInst->andIRFlags(BO10);
1690 assert((Opcode == Instruction::And || Opcode == Instruction::Or) &&
1691 "Trying to match De Morgan's Laws with something other than and/or");
1695 (Opcode == Instruction::And) ? Instruction::Or : Instruction::And;
1697 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
1723bool InstCombinerImpl::shouldOptimizeCast(
CastInst *CI) {
1732 if (
const auto *PrecedingCI = dyn_cast<CastInst>(CastSrc))
1733 if (isEliminableCastPair(PrecedingCI, CI))
1759 return new ZExtInst(NewOp, DestTy);
1767 return new SExtInst(NewOp, DestTy);
1776 auto LogicOpc =
I.getOpcode();
1777 assert(
I.isBitwiseLogicOp() &&
"Unexpected opcode for bitwise logic folding");
1779 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
1785 auto FoldBitwiseICmpZeroWithICmp = [&](
Value *Op0,
1800 auto *ICmpR = cast<ZExtInst>(Op1)->getOperand(0);
1806 if (
auto *Ret = FoldBitwiseICmpZeroWithICmp(Op0, Op1))
1809 if (
auto *Ret = FoldBitwiseICmpZeroWithICmp(Op1, Op0))
1812 CastInst *Cast0 = dyn_cast<CastInst>(Op0);
1818 Type *DestTy =
I.getType();
1826 CastInst *Cast1 = dyn_cast<CastInst>(Op1);
1843 unsigned XNumBits =
X->getType()->getScalarSizeInBits();
1844 unsigned YNumBits =
Y->getType()->getScalarSizeInBits();
1845 if (XNumBits < YNumBits)
1863 shouldOptimizeCast(Cast0) && shouldOptimizeCast(Cast1)) {
1874 assert(
I.getOpcode() == Instruction::And);
1875 Value *Op0 =
I.getOperand(0);
1876 Value *Op1 =
I.getOperand(1);
1884 return BinaryOperator::CreateXor(
A,
B);
1900 assert(
I.getOpcode() == Instruction::Or);
1901 Value *Op0 =
I.getOperand(0);
1902 Value *Op1 =
I.getOperand(1);
1927 return BinaryOperator::CreateXor(
A,
B);
1947 Value *Op0 =
And.getOperand(0), *Op1 =
And.getOperand(1);
1961 if (!isa<VectorType>(Ty) && !shouldChangeType(Ty,
X->getType()))
1968 if (Opc == Instruction::LShr || Opc == Instruction::Shl)
1985 assert(Opcode == Instruction::And || Opcode == Instruction::Or);
1989 (Opcode == Instruction::And) ? Instruction::Or : Instruction::And;
1991 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
1998 const auto matchNotOrAnd =
1999 [Opcode, FlippedOpcode](
Value *
Op,
auto m_A,
auto m_B,
auto m_C,
2000 Value *&
X,
bool CountUses =
false) ->
bool {
2001 if (CountUses && !
Op->hasOneUse())
2008 return !CountUses ||
X->hasOneUse();
2024 return (Opcode == Instruction::Or)
2034 return (Opcode == Instruction::Or)
2057 if (Opcode == Instruction::Or && Op0->
hasOneUse() &&
2064 Value *
Or = cast<BinaryOperator>(
X)->getOperand(0);
2096 return (Opcode == Instruction::Or)
2098 : BinaryOperator::CreateOr(
Xor,
X);
2132 if (!isa<Constant>(
X) && !isa<Constant>(
Y) && !isa<Constant>(Z)) {
2134 if (!
X->hasOneUse()) {
2139 if (!
Y->hasOneUse()) {
2160 Type *Ty =
I.getType();
2162 Value *Op0 =
I.getOperand(0);
2163 Value *Op1 =
I.getOperand(1);
2175 case Instruction::And:
2176 if (
C->countl_one() < LastOneMath)
2179 case Instruction::Xor:
2180 case Instruction::Or:
2181 if (
C->countl_zero() < LastOneMath)
2190 ConstantInt::get(Ty, *C2), Op0);
2197 assert((
I.isBitwiseLogicOp() ||
I.getOpcode() == Instruction::Add) &&
2198 "Unexpected opcode");
2201 Constant *ShiftedC1, *ShiftedC2, *AddC;
2202 Type *Ty =
I.getType();
2216 auto *Op0Inst = dyn_cast<Instruction>(
I.getOperand(0));
2217 auto *Op1Inst = dyn_cast<Instruction>(
I.getOperand(1));
2218 if (!Op0Inst || !Op1Inst)
2224 if (ShiftOp != Op1Inst->getOpcode())
2228 if (
I.getOpcode() == Instruction::Add && ShiftOp != Instruction::Shl)
2248 assert(
I.isBitwiseLogicOp() &&
"Should and/or/xor");
2249 if (!
I.getOperand(0)->hasOneUse())
2256 if (
Y && (!
Y->hasOneUse() ||
X->getIntrinsicID() !=
Y->getIntrinsicID()))
2262 if (!
Y && (!(IID == Intrinsic::bswap || IID == Intrinsic::bitreverse) ||
2267 case Intrinsic::fshl:
2268 case Intrinsic::fshr: {
2269 if (
X->getOperand(2) !=
Y->getOperand(2))
2272 Builder.
CreateBinOp(
I.getOpcode(),
X->getOperand(0),
Y->getOperand(0));
2274 Builder.
CreateBinOp(
I.getOpcode(),
X->getOperand(1),
Y->getOperand(1));
2279 case Intrinsic::bswap:
2280 case Intrinsic::bitreverse: {
2282 I.getOpcode(),
X->getOperand(0),
2283 Y ?
Y->getOperand(0)
2284 : ConstantInt::get(
I.getType(), IID == Intrinsic::bswap
2304 unsigned Depth = 0) {
2311 auto *
I = dyn_cast<BinaryOperator>(V);
2312 if (!
I || !
I->isBitwiseLogicOp() ||
Depth >= 3)
2315 if (!
I->hasOneUse())
2316 SimplifyOnly =
true;
2319 SimplifyOnly, IC,
Depth + 1);
2321 SimplifyOnly, IC,
Depth + 1);
2322 if (!NewOp0 && !NewOp1)
2326 NewOp0 =
I->getOperand(0);
2328 NewOp1 =
I->getOperand(1);
2344 bool RHSIsLogical) {
2348 if (
Value *Res = foldBooleanAndOr(LHS,
X,
I, IsAnd,
false))
2353 if (
Value *Res = foldBooleanAndOr(LHS,
Y,
I, IsAnd,
false))
2363 Type *Ty =
I.getType();
2397 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
2432 Constant *NewC = ConstantInt::get(Ty, *
C & *XorC);
2435 return BinaryOperator::CreateXor(
And, NewC);
2446 APInt Together = *
C & *OrC;
2449 return BinaryOperator::CreateOr(
And, ConstantInt::get(Ty, Together));
2453 const APInt *ShiftC;
2455 ShiftC->
ult(Width)) {
2460 Constant *ShAmtC = ConstantInt::get(Ty, ShiftC->
zext(Width));
2461 return BinaryOperator::CreateLShr(Sext, ShAmtC);
2469 return BinaryOperator::CreateLShr(
X, ConstantInt::get(Ty, *ShiftC));
2477 if (Op0->
hasOneUse() &&
C->isPowerOf2() && (*AddC & (*
C - 1)) == 0) {
2478 assert((*
C & *AddC) != 0 &&
"Expected common bit");
2480 return BinaryOperator::CreateXor(NewAnd, Op1);
2487 switch (
B->getOpcode()) {
2488 case Instruction::Xor:
2489 case Instruction::Or:
2490 case Instruction::Mul:
2491 case Instruction::Add:
2492 case Instruction::Sub:
2508 C->isIntN(
X->getType()->getScalarSizeInBits())) {
2509 unsigned XWidth =
X->getType()->getScalarSizeInBits();
2510 Constant *TruncC1 = ConstantInt::get(
X->getType(), C1->
trunc(XWidth));
2514 Constant *TruncC = ConstantInt::get(
X->getType(),
C->trunc(XWidth));
2524 C->isMask(
X->getType()->getScalarSizeInBits())) {
2534 C->isMask(
X->getType()->getScalarSizeInBits())) {
2568 if (
C->isPowerOf2() &&
2571 int Log2C =
C->exactLogBase2();
2573 cast<BinaryOperator>(Op0)->getOpcode() == Instruction::Shl;
2574 int BitNum = IsShiftLeft ? Log2C - Log2ShiftC : Log2ShiftC - Log2C;
2575 assert(BitNum >= 0 &&
"Expected demanded bits to handle impossible mask");
2608 if (Cmp && Cmp->isZeroValue()) {
2633 Attribute::NoImplicitFloat)) {
2649 X->getType()->getScalarSizeInBits())))) {
2651 return BinaryOperator::CreateAnd(SExt, Op1);
2657 if (
I.getType()->isIntOrIntVectorTy(1)) {
2658 if (
auto *SI0 = dyn_cast<SelectInst>(Op0)) {
2660 foldAndOrOfSelectUsingImpliedCond(Op1, *SI0,
true))
2663 if (
auto *SI1 = dyn_cast<SelectInst>(Op1)) {
2665 foldAndOrOfSelectUsingImpliedCond(Op0, *SI1,
true))
2680 return BinaryOperator::CreateAnd(Op0,
B);
2683 return BinaryOperator::CreateAnd(Op1,
B);
2691 if (NotC !=
nullptr)
2692 return BinaryOperator::CreateAnd(Op0, NotC);
2701 if (NotC !=
nullptr)
2711 return BinaryOperator::CreateAnd(
A,
B);
2719 return BinaryOperator::CreateAnd(
A,
B);
2739 foldBooleanAndOr(Op0, Op1,
I,
true,
false))
2743 bool IsLogical = isa<SelectInst>(Op1);
2744 if (
auto *V = reassociateBooleanAndOr(Op0,
X,
Y,
I,
true,
2749 bool IsLogical = isa<SelectInst>(Op0);
2750 if (
auto *V = reassociateBooleanAndOr(Op1,
X,
Y,
I,
true,
2758 if (
Instruction *CastedAnd = foldCastedBitwiseLogic(
I))
2771 A->getType()->isIntOrIntVectorTy(1))
2777 A->getType()->isIntOrIntVectorTy(1))
2782 A->getType()->isIntOrIntVectorTy(1))
2789 if (
A->getType()->isIntOrIntVectorTy(1))
2802 *
C ==
X->getType()->getScalarSizeInBits() - 1) {
2811 *
C ==
X->getType()->getScalarSizeInBits() - 1) {
2822 Value *Start =
nullptr, *Step =
nullptr;
2830 return Canonicalized;
2832 if (
Instruction *Folded = foldLogicOfIsFPClass(
I, Op0, Op1))
2844 return BinaryOperator::CreateAnd(V, Op1);
2848 return BinaryOperator::CreateAnd(Op0, V);
2855 bool MatchBitReversals) {
2863 for (
auto *Inst : Insts) {
2864 Inst->setDebugLoc(
I.getDebugLoc());
2870std::optional<std::pair<Intrinsic::ID, SmallVector<Value *, 3>>>
2874 assert(
Or.getOpcode() == BinaryOperator::Or &&
"Expecting or instruction");
2876 unsigned Width =
Or.getType()->getScalarSizeInBits();
2881 return std::nullopt;
2888 if (isa<BinaryOperator>(Or0) && isa<BinaryOperator>(Or1)) {
2889 Value *ShVal0, *ShVal1, *ShAmt0, *ShAmt1;
2895 return std::nullopt;
2898 if (Or0->
getOpcode() == BinaryOperator::LShr) {
2904 Or1->
getOpcode() == BinaryOperator::LShr &&
2905 "Illegal or(shift,shift) pair");
2909 auto matchShiftAmount = [&](
Value *L,
Value *R,
unsigned Width) ->
Value * {
2911 const APInt *LI, *RI;
2913 if (LI->
ult(Width) && RI->
ult(Width) && (*LI + *RI) == Width)
2914 return ConstantInt::get(L->getType(), *LI);
2938 if (ShVal0 != ShVal1)
2949 unsigned Mask = Width - 1;
2973 Value *ShAmt = matchShiftAmount(ShAmt0, ShAmt1, Width);
2975 ShAmt = matchShiftAmount(ShAmt1, ShAmt0, Width);
2979 return std::nullopt;
2981 FShiftArgs = {ShVal0, ShVal1, ShAmt};
2982 }
else if (isa<ZExtInst>(Or0) || isa<ZExtInst>(Or1)) {
2994 if (!isa<ZExtInst>(Or1))
2998 const APInt *ZextHighShlAmt;
3001 return std::nullopt;
3005 return std::nullopt;
3007 unsigned HighSize =
High->getType()->getScalarSizeInBits();
3008 unsigned LowSize =
Low->getType()->getScalarSizeInBits();
3011 if (ZextHighShlAmt->
ult(LowSize) || ZextHighShlAmt->
ugt(Width - HighSize))
3012 return std::nullopt;
3019 if (!isa<ZExtInst>(
Y))
3022 const APInt *ZextLowShlAmt;
3029 if (*ZextLowShlAmt + *ZextHighShlAmt != Width)
3035 ZextLowShlAmt->
ule(Width - LowSize) &&
"Invalid concat");
3037 FShiftArgs = {U, U, ConstantInt::get(Or0->
getType(), *ZextHighShlAmt)};
3042 if (FShiftArgs.
empty())
3043 return std::nullopt;
3045 Intrinsic::ID IID = IsFshl ? Intrinsic::fshl : Intrinsic::fshr;
3046 return std::make_pair(IID, FShiftArgs);
3052 auto [IID, FShiftArgs] = *Opt;
3064 assert(
Or.getOpcode() == Instruction::Or &&
"bswap requires an 'or'");
3065 Value *Op0 =
Or.getOperand(0), *Op1 =
Or.getOperand(1);
3069 if ((Width & 1) != 0)
3071 unsigned HalfWidth = Width / 2;
3074 if (!isa<ZExtInst>(Op0))
3078 Value *LowerSrc, *ShlVal, *UpperSrc;
3091 NewUpper = Builder.
CreateShl(NewUpper, HalfWidth);
3098 Value *LowerBSwap, *UpperBSwap;
3101 return ConcatIntrinsicCalls(Intrinsic::bswap, UpperBSwap, LowerBSwap);
3105 Value *LowerBRev, *UpperBRev;
3108 return ConcatIntrinsicCalls(Intrinsic::bitreverse, UpperBRev, LowerBRev);
3115 unsigned NumElts = cast<FixedVectorType>(C1->
getType())->getNumElements();
3116 for (
unsigned i = 0; i != NumElts; ++i) {
3119 if (!EltC1 || !EltC2)
3138 Type *Ty =
A->getType();
3154 if (
A->getType()->isIntOrIntVectorTy()) {
3156 if (NumSignBits ==
A->getType()->getScalarSizeInBits() &&
3179 Cond->getType()->isIntOrIntVectorTy(1)) {
3205 Cond->getType()->isIntOrIntVectorTy(1) &&
3219 Value *
D,
bool InvertFalseVal) {
3222 Type *OrigType =
A->getType();
3225 if (
Value *
Cond = getSelectCondition(
A,
C, InvertFalseVal)) {
3230 Type *SelTy =
A->getType();
3231 if (
auto *VecTy = dyn_cast<VectorType>(
Cond->getType())) {
3233 unsigned Elts = VecTy->getElementCount().getKnownMinValue();
3254 bool IsAnd,
bool IsLogical,
3261 IsAnd ?
LHS->getInversePredicate() :
LHS->getPredicate();
3263 IsAnd ?
RHS->getInversePredicate() :
RHS->getPredicate();
3272 auto MatchRHSOp = [LHS0, CInt](
const Value *RHSOp) {
3275 (CInt->
isZero() && RHSOp == LHS0);
3304 Value *LHS0 =
LHS->getOperand(0), *RHS0 =
RHS->getOperand(0);
3305 Value *LHS1 =
LHS->getOperand(1), *RHS1 =
RHS->getOperand(1);
3307 const APInt *LHSC =
nullptr, *RHSC =
nullptr;
3314 if (LHS0 == RHS1 && LHS1 == RHS0) {
3318 if (LHS0 == RHS0 && LHS1 == RHS1) {
3321 bool IsSigned =
LHS->isSigned() ||
RHS->isSigned();
3350 RHS->setSameSign(
false);
3376 if (IsAnd && !IsLogical)
3433 const APInt *AndC, *SmallC =
nullptr, *BigC =
nullptr;
3447 if (SmallC && BigC) {
3448 unsigned BigBitSize = BigC->getBitWidth();
3467 bool TrueIfSignedL, TrueIfSignedR;
3473 if ((TrueIfSignedL && !TrueIfSignedR &&
3476 (!TrueIfSignedL && TrueIfSignedR &&
3483 if ((TrueIfSignedL && !TrueIfSignedR &&
3486 (!TrueIfSignedL && TrueIfSignedR &&
3495 return foldAndOrOfICmpsUsingRanges(LHS, RHS, IsAnd);
3506 if (
auto *LHSCmp = dyn_cast<ICmpInst>(LHS))
3507 if (
auto *RHSCmp = dyn_cast<ICmpInst>(RHS))
3508 if (
Value *Res = foldAndOrOfICmps(LHSCmp, RHSCmp,
I, IsAnd, IsLogical))
3511 if (
auto *LHSCmp = dyn_cast<FCmpInst>(LHS))
3512 if (
auto *RHSCmp = dyn_cast<FCmpInst>(RHS))
3513 if (
Value *Res = foldLogicOfFCmps(LHSCmp, RHSCmp, IsAnd, IsLogical))
3516 if (
Value *Res = foldEqOfParts(LHS, RHS, IsAnd))
3524 assert(
I.getOpcode() == Instruction::Or &&
3525 "Simplification only supports or at the moment.");
3527 Value *Cmp1, *Cmp2, *Cmp3, *Cmp4;
3579 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
3580 Type *Ty =
I.getType();
3582 if (
auto *SI0 = dyn_cast<SelectInst>(Op0)) {
3584 foldAndOrOfSelectUsingImpliedCond(Op1, *SI0,
false))
3587 if (
auto *SI1 = dyn_cast<SelectInst>(Op1)) {
3589 foldAndOrOfSelectUsingImpliedCond(Op0, *SI1,
false))
3613 if (cast<PossiblyDisjointInst>(
I).isDisjoint()) {
3631 return BinaryOperator::CreateXor(
Or, ConstantInt::get(Ty, *CV));
3639 return BinaryOperator::CreateMul(
X, IncrementY);
3648 const APInt *C0, *C1;
3667 if ((*C0 & *C1).
isZero()) {
3672 Constant *C01 = ConstantInt::get(Ty, *C0 | *C1);
3673 return BinaryOperator::CreateAnd(
A, C01);
3679 Constant *C01 = ConstantInt::get(Ty, *C0 | *C1);
3680 return BinaryOperator::CreateAnd(
B, C01);
3684 const APInt *C2, *C3;
3689 Constant *C01 = ConstantInt::get(Ty, *C0 | *C1);
3690 return BinaryOperator::CreateAnd(
Or, C01);
3700 if (
Value *V = matchSelectFromAndOr(
A,
C,
B,
D))
3702 if (
Value *V = matchSelectFromAndOr(
A,
C,
D,
B))
3704 if (
Value *V = matchSelectFromAndOr(
C,
A,
B,
D))
3706 if (
Value *V = matchSelectFromAndOr(
C,
A,
D,
B))
3708 if (
Value *V = matchSelectFromAndOr(
B,
D,
A,
C))
3710 if (
Value *V = matchSelectFromAndOr(
B,
D,
C,
A))
3712 if (
Value *V = matchSelectFromAndOr(
D,
B,
A,
C))
3714 if (
Value *V = matchSelectFromAndOr(
D,
B,
C,
A))
3723 if (
Value *V = matchSelectFromAndOr(
A,
C,
B,
D,
true))
3725 if (
Value *V = matchSelectFromAndOr(
A,
C,
D,
B,
true))
3727 if (
Value *V = matchSelectFromAndOr(
C,
A,
B,
D,
true))
3729 if (
Value *V = matchSelectFromAndOr(
C,
A,
D,
B,
true))
3738 return BinaryOperator::CreateOr(Op0,
C);
3745 return BinaryOperator::CreateOr(Op1,
C);
3751 bool SwappedForXor =
false;
3754 SwappedForXor =
true;
3761 return BinaryOperator::CreateOr(Op0,
B);
3763 return BinaryOperator::CreateOr(Op0,
A);
3768 return BinaryOperator::CreateOr(
A,
B);
3796 return BinaryOperator::CreateOr(Nand,
C);
3804 foldBooleanAndOr(Op0, Op1,
I,
false,
false))
3808 bool IsLogical = isa<SelectInst>(Op1);
3809 if (
auto *V = reassociateBooleanAndOr(Op0,
X,
Y,
I,
false,
3814 bool IsLogical = isa<SelectInst>(Op0);
3815 if (
auto *V = reassociateBooleanAndOr(Op1,
X,
Y,
I,
false,
3835 A->getType()->isIntOrIntVectorTy(1))
3848 return BinaryOperator::CreateOr(Inner, CI);
3855 Value *
X =
nullptr, *
Y =
nullptr;
3887 return BinaryOperator::CreateXor(
A,
B);
3903 Value *
Mul, *Ov, *MulIsNotZero, *UMulWithOv;
3920 if (
match(UMulWithOv, m_Intrinsic<Intrinsic::umul_with_overflow>(
3924 return BinaryOperator::CreateAnd(NotNullA, NotNullB);
3933 const APInt *C1, *C2;
3949 : C2->
uadd_ov(*C1, Overflow));
3953 return BinaryOperator::CreateOr(Ov, NewCmp);
3973 Value *Start =
nullptr, *Step =
nullptr;
3991 return BinaryOperator::CreateOr(
4003 return BinaryOperator::CreateOr(
4011 return Canonicalized;
4013 if (
Instruction *Folded = foldLogicOfIsFPClass(
I, Op0, Op1))
4034 Attribute::NoImplicitFloat)) {
4047 if ((KnownX.
One & *C2) == *C2)
4048 return BinaryOperator::CreateAnd(
X, ConstantInt::get(Ty, *C1 | *C2));
4057 return BinaryOperator::CreateOr(V, Op1);
4061 return BinaryOperator::CreateOr(Op0, V);
4063 if (cast<PossiblyDisjointInst>(
I).isDisjoint())
4074 assert(
I.getOpcode() == Instruction::Xor);
4075 Value *Op0 =
I.getOperand(0);
4076 Value *Op1 =
I.getOperand(1);
4087 return BinaryOperator::CreateXor(
A,
B);
4095 return BinaryOperator::CreateXor(
A,
B);
4103 return BinaryOperator::CreateXor(
A,
B);
4125 assert(
I.getOpcode() == Instruction::Xor &&
I.getOperand(0) == LHS &&
4126 I.getOperand(1) == RHS &&
"Should be 'xor' with these operands");
4129 Value *LHS0 =
LHS->getOperand(0), *LHS1 =
LHS->getOperand(1);
4130 Value *RHS0 =
RHS->getOperand(0), *RHS1 =
RHS->getOperand(1);
4133 if (LHS0 == RHS1 && LHS1 == RHS0) {
4137 if (LHS0 == RHS0 && LHS1 == RHS1) {
4140 bool IsSigned =
LHS->isSigned() ||
RHS->isSigned();
4148 const APInt *LC, *RC;
4157 bool TrueIfSignedL, TrueIfSignedR;
4173 if (CRUnion && CRIntersect)
4174 if (
auto CR = CRUnion->exactIntersectWith(CRIntersect->inverse())) {
4175 if (CR->isFullSet())
4177 if (CR->isEmptySet())
4182 CR->getEquivalentICmp(NewPred, NewC,
Offset);
4191 ConstantInt::get(Ty, NewC));
4209 if (OrICmp == LHS && AndICmp == RHS) {
4214 if (OrICmp == RHS && AndICmp == LHS) {
4221 Y->setPredicate(
Y->getInversePredicate());
4223 if (!
Y->hasOneUse()) {
4234 Y->replaceUsesWithIf(NotY,
4235 [NotY](
Use &U) {
return U.getUser() != NotY; });
4275 return BinaryOperator::CreateXor(NewA,
X);
4281 Type *EltTy =
C->getType()->getScalarType();
4287 return BinaryOperator::CreateOr(
LHS,
RHS);
4302 return A ==
C ||
A ==
D ||
B ==
C ||
B ==
D;
4311 return BinaryOperator::CreateOr(
X, NotY);
4319 return BinaryOperator::CreateOr(
Y, NotX);
4329 assert(
Xor.getOpcode() == Instruction::Xor &&
"Expected an xor instruction.");
4335 Value *Op0 =
Xor.getOperand(0), *Op1 =
Xor.getOperand(1);
4350 auto *
Add = cast<BinaryOperator>(Op0);
4351 Value *NegA =
Add->hasNoUnsignedWrap()
4361 auto *
I = dyn_cast<Instruction>(
Op);
4368 auto *
I = cast<Instruction>(
Op);
4371 Op->replaceUsesWithIf(NotOp,
4372 [NotOp](
Use &U) {
return U.getUser() != NotOp; });
4394 bool IsBinaryOp = isa<BinaryOperator>(
I);
4434 bool IsBinaryOp = isa<BinaryOperator>(
I);
4436 Value *NotOp0 =
nullptr;
4437 Value *NotOp1 =
nullptr;
4438 Value **OpToInvert =
nullptr;
4483 Type *Ty =
I.getType();
4487 return BinaryOperator::CreateOr(
X, NotY);
4498 return BinaryOperator::CreateAnd(
X, NotY);
4513 return BinaryOperator::CreateAnd(DecX, NotY);
4518 return BinaryOperator::CreateAShr(
X,
Y);
4524 return BinaryOperator::CreateAShr(
X,
Y);
4580 Type *SextTy = cast<BitCastOperator>(NotOp)->getSrcTy();
4586 if (
auto *NotOpI = dyn_cast<Instruction>(NotOp))
4593 auto *
II = dyn_cast<IntrinsicInst>(NotOp);
4594 if (
II &&
II->hasOneUse()) {
4602 if (
II->getIntrinsicID() == Intrinsic::is_fpclass) {
4603 ConstantInt *ClassMask = cast<ConstantInt>(
II->getArgOperand(1));
4605 1, ConstantInt::get(ClassMask->
getType(),
4620 if (
auto *Sel = dyn_cast<SelectInst>(NotOp)) {
4621 Value *TV = Sel->getTrueValue();
4622 Value *FV = Sel->getFalseValue();
4623 auto *CmpT = dyn_cast<CmpInst>(TV);
4624 auto *CmpF = dyn_cast<CmpInst>(FV);
4625 bool InvertibleT = (CmpT && CmpT->hasOneUse()) || isa<Constant>(TV);
4626 bool InvertibleF = (CmpF && CmpF->hasOneUse()) || isa<Constant>(FV);
4627 if (InvertibleT && InvertibleF) {
4629 CmpT->setPredicate(CmpT->getInversePredicate());
4633 CmpF->setPredicate(CmpF->getInversePredicate());
4687 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
4695 return BinaryOperator::CreateXor(XorAC,
Y);
4698 return BinaryOperator::CreateXor(XorBC,
X);
4708 return BinaryOperator::CreateDisjointOr(Op0, Op1);
4710 return BinaryOperator::CreateOr(Op0, Op1);
4727 return BinaryOperator::CreateXor(
4750 *CA ==
X->getType()->getScalarSizeInBits() - 1 &&
4758 Type *Ty =
I.getType();
4766 return BinaryOperator::CreateSub(ConstantInt::get(Ty, *
C + *RHSC),
X);
4770 return BinaryOperator::CreateAdd(
X, ConstantInt::get(Ty, *
C + *RHSC));
4775 return BinaryOperator::CreateXor(
X, ConstantInt::get(Ty, *
C ^ *RHSC));
4780 auto *
II = dyn_cast<IntrinsicInst>(Op0);
4783 if ((IID == Intrinsic::ctlz || IID == Intrinsic::cttz) &&
4786 IID = (IID == Intrinsic::ctlz) ? Intrinsic::cttz : Intrinsic::ctlz;
4799 return BinaryOperator::CreateShl(NotX, ConstantInt::get(Ty, *
C));
4805 return BinaryOperator::CreateLShr(NotX, ConstantInt::get(Ty, *
C));
4824 Attribute::NoImplicitFloat)) {
4848 return BinaryOperator::CreateXor(Opnd0, ConstantInt::get(Ty, FoldConst));
4881 return BinaryOperator::CreateXor(
4887 return BinaryOperator::CreateXor(
4893 return BinaryOperator::CreateOr(
A,
B);
4897 return BinaryOperator::CreateOr(
A,
B);
4907 return BinaryOperator::CreateOr(
A,
B);
4922 if (
B ==
C ||
B ==
D)
4933 if (
I.getType()->isIntOrIntVectorTy(1) &&
4936 bool NeedFreeze = isa<SelectInst>(Op0) && isa<SelectInst>(Op1) &&
B ==
D;
4937 if (
B ==
C ||
B ==
D)
4949 if (
auto *
LHS = dyn_cast<ICmpInst>(
I.getOperand(0)))
4950 if (
auto *
RHS = dyn_cast<ICmpInst>(
I.getOperand(1)))
4954 if (
Instruction *CastedXor = foldCastedBitwiseLogic(
I))
4974 return Canonicalized;
4976 if (
Instruction *Folded = foldLogicOfIsFPClass(
I, Op0, Op1))
4979 if (
Instruction *Folded = canonicalizeConditionalNegationViaMathToSelect(
I))
AMDGPU Register Bank Select
BlockVerifier::State From
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
static bool isSigned(unsigned int Opcode)
static Value * foldIsPowerOf2OrZero(ICmpInst *Cmp0, ICmpInst *Cmp1, bool IsAnd, InstCombiner::BuilderTy &Builder, InstCombinerImpl &IC)
Fold (icmp eq ctpop(X) 1) | (icmp eq X 0) into (icmp ult ctpop(X) 2) and fold (icmp ne ctpop(X) 1) & ...
static unsigned conjugateICmpMask(unsigned Mask)
Convert an analysis of a masked ICmp into its equivalent if all boolean operations had the opposite s...
static Instruction * foldNotXor(BinaryOperator &I, InstCombiner::BuilderTy &Builder)
static Value * getFCmpValue(unsigned Code, Value *LHS, Value *RHS, InstCombiner::BuilderTy &Builder, FMFSource FMF)
This is the complement of getFCmpCode, which turns an opcode and two operands into either a FCmp inst...
static bool matchIsFPClassLikeFCmp(Value *Op, Value *&ClassVal, uint64_t &ClassMask)
Match an fcmp against a special value that performs a test possible by llvm.is.fpclass.
static Value * foldSignedTruncationCheck(ICmpInst *ICmp0, ICmpInst *ICmp1, Instruction &CxtI, InstCombiner::BuilderTy &Builder)
General pattern: X & Y.
static Instruction * visitMaskedMerge(BinaryOperator &I, InstCombiner::BuilderTy &Builder)
If we have a masked merge, in the canonical form of: (assuming that A only has one use....
static Instruction * canonicalizeAbs(BinaryOperator &Xor, InstCombiner::BuilderTy &Builder)
Canonicalize a shifty way to code absolute value to the more common pattern that uses negation and se...
static Value * foldIsPowerOf2(ICmpInst *Cmp0, ICmpInst *Cmp1, bool JoinedByAnd, InstCombiner::BuilderTy &Builder, InstCombinerImpl &IC)
Reduce a pair of compares that check if a value has exactly 1 bit set.
static Value * foldUnsignedUnderflowCheck(ICmpInst *ZeroICmp, ICmpInst *UnsignedICmp, bool IsAnd, const SimplifyQuery &Q, InstCombiner::BuilderTy &Builder)
Commuted variants are assumed to be handled by calling this function again with the parameters swappe...
static Instruction * foldOrToXor(BinaryOperator &I, InstCombiner::BuilderTy &Builder)
static Value * simplifyAndOrWithOpReplaced(Value *V, Value *Op, Value *RepOp, bool SimplifyOnly, InstCombinerImpl &IC, unsigned Depth=0)
static Instruction * matchDeMorgansLaws(BinaryOperator &I, InstCombiner &IC)
Match variations of De Morgan's Laws: (~A & ~B) == (~(A | B)) (~A | ~B) == (~(A & B))
static Instruction * foldAndToXor(BinaryOperator &I, InstCombiner::BuilderTy &Builder)
static unsigned getMaskedICmpType(Value *A, Value *B, Value *C, ICmpInst::Predicate Pred)
Return the set of patterns (from MaskedICmpType) that (icmp SCC (A & B), C) satisfies.
static Instruction * foldXorToXor(BinaryOperator &I, InstCombiner::BuilderTy &Builder)
A ^ B can be specified using other logic ops in a variety of patterns.
static bool canNarrowShiftAmt(Constant *C, unsigned BitWidth)
Return true if a constant shift amount is always less than the specified bit-width.
static Instruction * foldLogicCastConstant(BinaryOperator &Logic, CastInst *Cast, InstCombinerImpl &IC)
Fold {and,or,xor} (cast X), C.
static Value * foldAndOrOfICmpEqConstantAndICmp(ICmpInst *LHS, ICmpInst *RHS, bool IsAnd, bool IsLogical, IRBuilderBase &Builder)
static bool canFreelyInvert(InstCombiner &IC, Value *Op, Instruction *IgnoredUser)
static Value * foldNegativePower2AndShiftedMask(Value *A, Value *B, Value *D, Value *E, ICmpInst::Predicate PredL, ICmpInst::Predicate PredR, InstCombiner::BuilderTy &Builder)
Try to fold (icmp(A & B) == 0) & (icmp(A & D) != E) into (icmp A u< D) iff B is a contiguous set of o...
static Value * matchIsFiniteTest(InstCombiner::BuilderTy &Builder, FCmpInst *LHS, FCmpInst *RHS)
and (fcmp ord x, 0), (fcmp u* x, inf) -> fcmp o* x, inf
static Value * foldPowerOf2AndShiftedMask(ICmpInst *Cmp0, ICmpInst *Cmp1, bool JoinedByAnd, InstCombiner::BuilderTy &Builder)
Try to fold ((icmp X u< P) & (icmp(X & M) != M)) or ((icmp X s> -1) & (icmp(X & M) !...
static Value * stripSignOnlyFPOps(Value *Val)
Ignore all operations which only change the sign of a value, returning the underlying magnitude value...
static Value * freelyInvert(InstCombinerImpl &IC, Value *Op, Instruction *IgnoredUser)
static Value * foldLogOpOfMaskedICmpsAsymmetric(ICmpInst *LHS, ICmpInst *RHS, bool IsAnd, Value *A, Value *B, Value *C, Value *D, Value *E, ICmpInst::Predicate PredL, ICmpInst::Predicate PredR, unsigned LHSMask, unsigned RHSMask, InstCombiner::BuilderTy &Builder)
Try to fold (icmp(A & B) ==/!= 0) &/| (icmp(A & D) ==/!= E) into a single (icmp(A & X) ==/!...
static std::optional< IntPart > matchIntPart(Value *V)
Match an extraction of bits from an integer.
static Instruction * canonicalizeLogicFirst(BinaryOperator &I, InstCombiner::BuilderTy &Builder)
static Instruction * reassociateFCmps(BinaryOperator &BO, InstCombiner::BuilderTy &Builder)
This a limited reassociation for a special case (see above) where we are checking if two values are e...
static Value * foldLogOpOfMaskedICmps_NotAllZeros_BMask_Mixed(ICmpInst *LHS, ICmpInst *RHS, bool IsAnd, Value *A, Value *B, Value *D, Value *E, ICmpInst::Predicate PredL, ICmpInst::Predicate PredR, InstCombiner::BuilderTy &Builder)
Try to fold (icmp(A & B) ==/!= C) &/| (icmp(A & D) ==/!= E) into a single (icmp(A & X) ==/!...
static Value * getNewICmpValue(unsigned Code, bool Sign, Value *LHS, Value *RHS, InstCombiner::BuilderTy &Builder)
This is the complement of getICmpCode, which turns an opcode and two operands into either a constant ...
static std::optional< std::pair< unsigned, unsigned > > getMaskedTypeForICmpPair(Value *&A, Value *&B, Value *&C, Value *&D, Value *&E, ICmpInst *LHS, ICmpInst *RHS, ICmpInst::Predicate &PredL, ICmpInst::Predicate &PredR)
Handle (icmp(A & B) ==/!= C) &/| (icmp(A & D) ==/!= E).
static Value * extractIntPart(const IntPart &P, IRBuilderBase &Builder)
Materialize an extraction of bits from an integer in IR.
static bool matchUnorderedInfCompare(FCmpInst::Predicate P, Value *LHS, Value *RHS)
Matches fcmp u__ x, +/-inf.
static Instruction * matchOrConcat(Instruction &Or, InstCombiner::BuilderTy &Builder)
Attempt to combine or(zext(x),shl(zext(y),bw/2) concat packing patterns.
static bool matchIsNotNaN(FCmpInst::Predicate P, Value *LHS, Value *RHS)
Matches canonical form of isnan, fcmp ord x, 0.
static bool areInverseVectorBitmasks(Constant *C1, Constant *C2)
If all elements of two constant vectors are 0/-1 and inverses, return true.
MaskedICmpType
Classify (icmp eq (A & B), C) and (icmp ne (A & B), C) as matching patterns that can be simplified.
static Instruction * foldComplexAndOrPatterns(BinaryOperator &I, InstCombiner::BuilderTy &Builder)
Try folding relatively complex patterns for both And and Or operations with all And and Or swapped.
static Value * foldOrOfInversions(BinaryOperator &I, InstCombiner::BuilderTy &Builder)
static Value * foldLogOpOfMaskedICmps(ICmpInst *LHS, ICmpInst *RHS, bool IsAnd, bool IsLogical, InstCombiner::BuilderTy &Builder, const SimplifyQuery &Q)
Try to fold (icmp(A & B) ==/!= C) &/| (icmp(A & D) ==/!= E) into a single (icmp(A & X) ==/!...
static Instruction * matchFunnelShift(Instruction &Or, InstCombinerImpl &IC)
Match UB-safe variants of the funnel shift intrinsic.
static Instruction * reassociateForUses(BinaryOperator &BO, InstCombinerImpl::BuilderTy &Builder)
Try to reassociate a pair of binops so that values with one use only are part of the same instruction...
static Value * foldAndOrOfICmpsWithPow2AndWithZero(InstCombiner::BuilderTy &Builder, ICmpInst *LHS, ICmpInst *RHS, bool IsAnd, const SimplifyQuery &Q)
static Instruction * foldBitwiseLogicWithIntrinsics(BinaryOperator &I, InstCombiner::BuilderTy &Builder)
static Value * foldAndOrOfICmpsWithConstEq(ICmpInst *Cmp0, ICmpInst *Cmp1, bool IsAnd, bool IsLogical, InstCombiner::BuilderTy &Builder, const SimplifyQuery &Q)
Reduce logic-of-compares with equality to a constant by substituting a common operand with the consta...
This file provides internal interfaces used to implement the InstCombine.
This file provides the interface for the instcombine pass implementation.
static bool isZero(Value *V, const DataLayout &DL, DominatorTree *DT, AssumptionCache *AC)
uint64_t IntrinsicInst * II
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static constexpr int Concat[]
support::ulittle16_t & Lo
support::ulittle16_t & Hi
bool bitwiseIsEqual(const APFloat &RHS) const
APInt bitcastToAPInt() const
static APFloat getInf(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Infinity.
Class for arbitrary precision integers.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
APInt zext(unsigned width) const
Zero extend to a new width.
uint64_t getZExtValue() const
Get zero extended value.
APInt trunc(unsigned width) const
Truncate to new width.
unsigned countLeadingOnes() const
bool isAllOnes() const
Determine if all bits are set. This is true for zero-width values.
APInt usub_ov(const APInt &RHS, bool &Overflow) const
bool ugt(const APInt &RHS) const
Unsigned greater than comparison.
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
bool isSignMask() const
Check if the APInt's value is returned by getSignMask.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool ult(const APInt &RHS) const
Unsigned less than comparison.
APInt sadd_ov(const APInt &RHS, bool &Overflow) const
bool intersects(const APInt &RHS) const
This operation tests if there are any pairs of corresponding bits between this APInt and RHS that are...
int32_t exactLogBase2() const
APInt reverseBits() const
APInt uadd_ov(const APInt &RHS, bool &Overflow) const
unsigned countr_zero() const
Count the number of trailing zero bits.
unsigned countLeadingZeros() const
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
APInt shl(unsigned shiftAmt) const
Left-shift function.
bool isSubsetOf(const APInt &RHS) const
This operation checks that all bits set in this APInt are also set in RHS.
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
APInt ssub_ov(const APInt &RHS, bool &Overflow) const
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
void clearSignBit()
Set the sign bit to 0.
const Function * getParent() const
Return the enclosing method, or null if none.
bool isSigned() const
Whether the intrinsic is signed or unsigned.
Instruction::BinaryOps getBinaryOp() const
Returns the binary operation underlying the intrinsic.
BinaryOps getOpcode() const
static BinaryOperator * CreateNot(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)
static BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name=Twine(), InsertPosition InsertBefore=nullptr)
Construct a binary instruction, given the opcode and the two operands.
static BinaryOperator * CreateWithCopiedFlags(BinaryOps Opc, Value *V1, Value *V2, Value *CopyO, const Twine &Name="", InsertPosition InsertBefore=nullptr)
This class represents a no-op cast from one type to another.
This class represents a function call, abstracting a target machine's calling convention.
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
This is the base class for all instructions that perform data casts.
Type * getSrcTy() const
Return the source type, as a convenience.
Instruction::CastOps getOpcode() const
Return the opcode of this CastInst.
static CastInst * Create(Instruction::CastOps, Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Provides a way to construct any of the CastInst subclasses using an opcode instead of the subclass's ...
Type * getDestTy() const
Return the destination type, as a convenience.
static Type * makeCmpResultType(Type *opnd_type)
Create a result type for fcmp/icmp.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ FCMP_OLT
0 1 0 0 True if ordered and less than
@ FCMP_ULE
1 1 0 1 True if unordered, less than, or equal
@ ICMP_UGE
unsigned greater or equal
@ ICMP_UGT
unsigned greater than
@ ICMP_SGT
signed greater than
@ FCMP_ULT
1 1 0 0 True if unordered or less than
@ ICMP_ULT
unsigned less than
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
@ ICMP_SGE
signed greater or equal
@ ICMP_ULE
unsigned less or equal
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
Predicate getOrderedPredicate() const
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Predicate getPredicate() const
Return the predicate for this instruction.
static bool isUnordered(Predicate predicate)
Determine if the predicate is an unordered operation.
An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...
static Constant * getSub(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
static Constant * getNot(Constant *C)
static Constant * getXor(Constant *C1, Constant *C2)
static Constant * getAdd(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
static Constant * getTrunc(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static Constant * getExactLogBase2(Constant *C)
If C is a scalar/fixed width vector of known powers of 2, then this function returns a new scalar/fix...
static Constant * getZero(Type *Ty, bool Negative=false)
This is the shared class of boolean and integer constants.
bool isMinusOne() const
This function will return true iff every bit in this constant is set to true.
static ConstantInt * getTrue(LLVMContext &Context)
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
static ConstantInt * getFalse(LLVMContext &Context)
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
const APInt & getValue() const
Return the constant as an APInt value reference.
This class represents a range of values.
std::optional< ConstantRange > exactUnionWith(const ConstantRange &CR) const
Union the two ranges and return the result if it can be represented exactly, otherwise return std::nu...
ConstantRange subtract(const APInt &CI) const
Subtract the specified constant from the endpoints of this constant range.
const APInt & getLower() const
Return the lower value for this range.
bool isWrappedSet() const
Return true if this set wraps around the unsigned domain.
const APInt & getUpper() const
Return the upper value for this range.
static ConstantRange makeExactICmpRegion(CmpInst::Predicate Pred, const APInt &Other)
Produce the exact range such that all values in the returned range satisfy the given predicate with a...
std::optional< ConstantRange > exactIntersectWith(const ConstantRange &CR) const
Intersect the two ranges and return the result if it can be represented exactly, otherwise return std...
This is an important base class in LLVM.
static Constant * replaceUndefsWith(Constant *C, Constant *Replacement)
Try to replace undefined constant C or undefined elements in C with Replacement.
static Constant * mergeUndefsWith(Constant *C, Constant *Other)
Merges undefs of a Constant with another Constant, along with the undefs already present.
static Constant * getAllOnesValue(Type *Ty)
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
bool isZeroValue() const
Return true if the value is negative zero or null value.
This class represents an Operation in the Expression.
bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
This instruction compares its operands according to the predicate given to the constructor.
This provides a helper for copying FMF from an instruction or setting specified flags.
static FMFSource intersect(Value *A, Value *B)
Intersect the FMF from two instructions.
Convenience struct for specifying and reasoning about fast-math flags.
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
This instruction compares its operands according to the predicate given to the constructor.
CmpPredicate getInverseCmpPredicate() const
Predicate getSignedPredicate() const
For example, EQ->EQ, SLE->SLE, UGT->SGT, etc.
bool isEquality() const
Return true if this predicate is either EQ or NE.
Common base class shared among various IRBuilders.
Value * CreateICmpULT(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateFCmp(CmpInst::Predicate P, Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
Value * CreateLogicalOp(Instruction::BinaryOps Opc, Value *Cond1, Value *Cond2, const Twine &Name="")
IntegerType * getIntNTy(unsigned N)
Fetch the type representing an N-bit integer.
Value * CreateICmpSGT(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateZExtOrTrunc(Value *V, Type *DestTy, const Twine &Name="")
Create a ZExt or Trunc from the integer value V to DestTy.
ConstantInt * getTrue()
Get the constant value for i1 true.
Value * CreateSelect(Value *C, Value *True, Value *False, const Twine &Name="", Instruction *MDFrom=nullptr)
Value * CreateSExt(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateFreeze(Value *V, const Twine &Name="")
Value * CreateLShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Value * CreateCast(Instruction::CastOps Op, Value *V, Type *DestTy, const Twine &Name="", MDNode *FPMathTag=nullptr, FMFSource FMFSource={})
Value * CreateIsNotNeg(Value *Arg, const Twine &Name="")
Return a boolean value testing if Arg > -1.
BasicBlock * GetInsertBlock() const
Value * CreateICmpNE(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateFCmpFMF(CmpInst::Predicate P, Value *LHS, Value *RHS, FMFSource FMFSource, const Twine &Name="", MDNode *FPMathTag=nullptr)
Value * CreateNeg(Value *V, const Twine &Name="", bool HasNSW=false)
Value * CreateBinaryIntrinsic(Intrinsic::ID ID, Value *LHS, Value *RHS, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with 2 operands which is mangled on the first type.
CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > Types, ArrayRef< Value * > Args, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with Args, mangled using Types.
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
Value * CreateNot(Value *V, const Twine &Name="")
Value * CreateICmpEQ(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateIsNeg(Value *Arg, const Twine &Name="")
Return a boolean value testing if Arg < 0.
Value * CreateSub(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Value * CreateBitCast(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateICmpUGT(Value *LHS, Value *RHS, const Twine &Name="")
CallInst * CreateUnaryIntrinsic(Intrinsic::ID ID, Value *V, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with 1 operand which is mangled on its type.
Value * CreateShl(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Value * CreateZExt(Value *V, Type *DestTy, const Twine &Name="", bool IsNonNeg=false)
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Value * CreateIsNotNull(Value *Arg, const Twine &Name="")
Return a boolean value testing if Arg != 0.
Value * CreateTrunc(Value *V, Type *DestTy, const Twine &Name="", bool IsNUW=false, bool IsNSW=false)
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateBinOp(Instruction::BinaryOps Opc, Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
Value * CreateLogicalAnd(Value *Cond1, Value *Cond2, const Twine &Name="")
Value * CreateICmpSLT(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateICmpUGE(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateIsNull(Value *Arg, const Twine &Name="")
Return a boolean value testing if Arg == 0.
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Value * CreateXor(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateICmp(CmpInst::Predicate P, Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateLogicalOr(Value *Cond1, Value *Cond2, const Twine &Name="")
Value * CreateFNeg(Value *V, const Twine &Name="", MDNode *FPMathTag=nullptr)
Instruction * canonicalizeCondSignextOfHighBitExtractToSignextHighBitExtract(BinaryOperator &I)
Instruction * foldBinOpIntoSelectOrPhi(BinaryOperator &I)
This is a convenience wrapper function for the above two functions.
Instruction * visitOr(BinaryOperator &I)
bool SimplifyAssociativeOrCommutative(BinaryOperator &I)
Performs a few simplifications for operators which are associative or commutative.
Value * foldUsingDistributiveLaws(BinaryOperator &I)
Tries to simplify binary operations which some other binary operation distributes over.
Instruction * foldBinOpShiftWithShift(BinaryOperator &I)
Value * insertRangeTest(Value *V, const APInt &Lo, const APInt &Hi, bool isSigned, bool Inside)
Emit a computation of: (V >= Lo && V < Hi) if Inside is true, otherwise (V < Lo || V >= Hi).
bool sinkNotIntoLogicalOp(Instruction &I)
std::optional< std::pair< Intrinsic::ID, SmallVector< Value *, 3 > > > convertOrOfShiftsToFunnelShift(Instruction &Or)
Constant * getLosslessUnsignedTrunc(Constant *C, Type *TruncTy)
Instruction * visitAnd(BinaryOperator &I)
bool sinkNotIntoOtherHandOfLogicalOp(Instruction &I)
Instruction * foldBinopWithPhiOperands(BinaryOperator &BO)
For a binary operator with 2 phi operands, try to hoist the binary operation before the phi.
Instruction * foldAddLikeCommutative(Value *LHS, Value *RHS, bool NSW, bool NUW)
Common transforms for add / disjoint or.
Value * simplifyRangeCheck(ICmpInst *Cmp0, ICmpInst *Cmp1, bool Inverted)
Try to fold a signed range checked with lower bound 0 to an unsigned icmp.
Instruction * tryFoldInstWithCtpopWithNot(Instruction *I)
Value * SimplifyAddWithRemainder(BinaryOperator &I)
Tries to simplify add operations using the definition of remainder.
Constant * getLosslessSignedTrunc(Constant *C, Type *TruncTy)
Instruction * visitXor(BinaryOperator &I)
bool SimplifyDemandedInstructionBits(Instruction &Inst)
Tries to simplify operands to an integer instruction based on its demanded bits.
Instruction * foldVectorBinop(BinaryOperator &Inst)
Canonicalize the position of binops relative to shufflevector.
Instruction * matchBSwapOrBitReverse(Instruction &I, bool MatchBSwaps, bool MatchBitReversals)
Given an initial instruction, check to see if it is the root of a bswap/bitreverse idiom.
void freelyInvertAllUsersOf(Value *V, Value *IgnoredUser=nullptr)
Freely adapt every user of V as-if V was changed to !V.
The core instruction combiner logic.
bool isFreeToInvert(Value *V, bool WillInvertAllUses, bool &DoesConsume)
Return true if the specified value is free to invert (apply ~ to).
bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero=false, unsigned Depth=0, const Instruction *CxtI=nullptr)
Instruction * replaceInstUsesWith(Instruction &I, Value *V)
A combiner-aware RAUW-like routine.
InstructionWorklist & Worklist
A worklist of the instructions that need to be simplified.
unsigned ComputeNumSignBits(const Value *Op, unsigned Depth=0, const Instruction *CxtI=nullptr) const
static Value * peekThroughBitcast(Value *V, bool OneUseOnly=false)
Return the source operand of a potentially bitcasted value while optionally checking if it has one us...
bool canFreelyInvertAllUsersOf(Instruction *V, Value *IgnoredUser)
Given i1 V, can every user of V be freely adapted if V is changed to !V ? InstCombine's freelyInvertA...
void addToWorklist(Instruction *I)
void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth, const Instruction *CxtI) const
bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth=0, const Instruction *CxtI=nullptr) const
Value * getFreelyInverted(Value *V, bool WillInvertAllUses, BuilderTy *Builder, bool &DoesConsume)
const SimplifyQuery & getSimplifyQuery() const
void pushUsersToWorkList(Instruction &I)
When an instruction is simplified, add all users of the instruction to the work lists because they mi...
void push(Instruction *I)
Push the instruction onto the worklist stack.
void removeFromParent()
This method unlinks 'this' from the containing basic block, but does not delete it.
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
A wrapper class for inspecting calls to intrinsic functions.
This class represents a sign extension of integer types.
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", InsertPosition InsertBefore=nullptr, Instruction *MDFrom=nullptr)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
The instances of the Type class are immutable: once they are created, they are never changed.
const fltSemantics & getFltSemantics() const
bool isVectorTy() const
True if this is an instance of VectorType.
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isIEEE() const
Return whether the type is IEEE compatible, as defined by the eponymous method in APFloat.
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
bool isIEEELikeFPTy() const
Return true if this is a well-behaved IEEE-like type, which has a IEEE compatible layout as defined b...
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
A Use represents the edge between a Value definition and its users.
Value * getOperand(unsigned i) const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
bool hasOneUse() const
Return true if there is exactly one use of this value.
iterator_range< user_iterator > users()
bool hasNUsesOrMore(unsigned N) const
Return true if this value has N uses or more.
bool hasNUses(unsigned N) const
Return true if this Value has exactly N uses.
StringRef getName() const
Return a constant reference to the value's name.
void takeName(Value *V)
Transfer the name from V to this value.
static VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
Represents an op.with.overflow intrinsic.
This class represents zero extension of integer types.
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
const APInt & umin(const APInt &A, const APInt &B)
Determine the smaller of two APInts considered to be unsigned.
@ C
The default llvm calling convention, compatible with C.
Function * getOrInsertDeclaration(Module *M, ID id, ArrayRef< Type * > Tys={})
Look up the Function declaration of the intrinsic id in the Module M.
cst_pred_ty< is_all_ones > m_AllOnes()
Match an integer or vector with all bits set.
cst_pred_ty< is_lowbit_mask > m_LowBitMask()
Match an integer or vector with only the low bit(s) set.
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
cst_pred_ty< is_negative > m_Negative()
Match an integer or vector of negative values.
BinaryOp_match< LHS, RHS, Instruction::Add > m_Add(const LHS &L, const RHS &R)
class_match< BinaryOperator > m_BinOp()
Match an arbitrary binary operation and ignore it.
CmpClass_match< LHS, RHS, FCmpInst > m_FCmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
cst_pred_ty< is_sign_mask > m_SignMask()
Match an integer or vector with only the sign bit(s) set.
BinaryOp_match< LHS, RHS, Instruction::AShr > m_AShr(const LHS &L, const RHS &R)
cstfp_pred_ty< is_inf > m_Inf()
Match a positive or negative infinity FP constant.
m_Intrinsic_Ty< Opnd0 >::Ty m_BitReverse(const Opnd0 &Op0)
cst_pred_ty< is_power2 > m_Power2()
Match an integer or vector power-of-2.
match_combine_or< CastInst_match< OpTy, TruncInst >, OpTy > m_TruncOrSelf(const OpTy &Op)
auto m_LogicalOp()
Matches either L && R or L || R where L and R are arbitrary values.
class_match< Constant > m_Constant()
Match an arbitrary Constant and ignore it.
BinaryOp_match< LHS, RHS, Instruction::And, true > m_c_And(const LHS &L, const RHS &R)
Matches an And with LHS and RHS in either order.
CastInst_match< OpTy, TruncInst > m_Trunc(const OpTy &Op)
Matches Trunc.
BinaryOp_match< LHS, RHS, Instruction::Xor > m_Xor(const LHS &L, const RHS &R)
OverflowingBinaryOp_match< LHS, RHS, Instruction::Sub, OverflowingBinaryOperator::NoSignedWrap > m_NSWSub(const LHS &L, const RHS &R)
specific_intval< false > m_SpecificInt(const APInt &V)
Match a specific integer value or vector with all elements equal to the value.
match_combine_or< CastInst_match< OpTy, ZExtInst >, OpTy > m_ZExtOrSelf(const OpTy &Op)
bool match(Val *V, const Pattern &P)
cst_pred_ty< is_shifted_mask > m_ShiftedMask()
bind_ty< Instruction > m_Instruction(Instruction *&I)
Match an instruction, capturing it if we match.
cstfp_pred_ty< is_any_zero_fp > m_AnyZeroFP()
Match a floating-point negative zero or positive zero.
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
DisjointOr_match< LHS, RHS > m_DisjointOr(const LHS &L, const RHS &R)
constantexpr_match m_ConstantExpr()
Match a constant expression or a constant that contains a constant expression.
specific_intval< true > m_SpecificIntAllowPoison(const APInt &V)
CmpClass_match< LHS, RHS, ICmpInst, true > m_c_ICmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
Matches an ICmp with a predicate over LHS and RHS in either order.
cst_pred_ty< is_nonnegative > m_NonNegative()
Match an integer or vector of non-negative values.
class_match< ConstantInt > m_ConstantInt()
Match an arbitrary ConstantInt and ignore it.
cst_pred_ty< is_one > m_One()
Match an integer 1 or a vector with all elements equal to 1.
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
match_combine_or< CastInst_match< OpTy, SExtInst >, OpTy > m_SExtOrSelf(const OpTy &Op)
BinOpPred_match< LHS, RHS, is_logical_shift_op > m_LogicalShift(const LHS &L, const RHS &R)
Matches logical shift operations.
match_combine_and< LTy, RTy > m_CombineAnd(const LTy &L, const RTy &R)
Combine two pattern matchers matching L && R.
bind_ty< WithOverflowInst > m_WithOverflowInst(WithOverflowInst *&I)
Match a with overflow intrinsic, capturing it if we match.
BinaryOp_match< LHS, RHS, Instruction::Xor, true > m_c_Xor(const LHS &L, const RHS &R)
Matches an Xor with LHS and RHS in either order.
BinaryOp_match< LHS, RHS, Instruction::Mul > m_Mul(const LHS &L, const RHS &R)
deferredval_ty< Value > m_Deferred(Value *const &V)
Like m_Specific(), but works if the specific value to match is determined as part of the same match()...
cst_pred_ty< is_zero_int > m_ZeroInt()
Match an integer 0 or a vector with all elements equal to 0.
apint_match m_APIntAllowPoison(const APInt *&Res)
Match APInt while allowing poison in splat vector constants.
OneUse_match< T > m_OneUse(const T &SubPattern)
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
BinaryOp_match< cst_pred_ty< is_zero_int >, ValTy, Instruction::Sub > m_Neg(const ValTy &V)
Matches a 'Neg' as 'sub 0, V'.
match_combine_and< class_match< Constant >, match_unless< constantexpr_match > > m_ImmConstant()
Match an arbitrary immediate Constant and ignore it.
SpecificCmpClass_match< LHS, RHS, ICmpInst > m_SpecificICmp(CmpPredicate MatchPred, const LHS &L, const RHS &R)
CastInst_match< OpTy, ZExtInst > m_ZExt(const OpTy &Op)
Matches ZExt.
class_match< CmpInst > m_Cmp()
Matches any compare instruction and ignore it.
cst_pred_ty< is_negated_power2 > m_NegatedPower2()
Match a integer or vector negated power-of-2.
DisjointOr_match< LHS, RHS, true > m_c_DisjointOr(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::Add, true > m_c_Add(const LHS &L, const RHS &R)
Matches a Add with LHS and RHS in either order.
SpecificCmpClass_match< LHS, RHS, FCmpInst > m_SpecificFCmp(CmpPredicate MatchPred, const LHS &L, const RHS &R)
apfloat_match m_APFloatAllowPoison(const APFloat *&Res)
Match APFloat while allowing poison in splat vector constants.
match_combine_or< BinaryOp_match< LHS, RHS, Instruction::Add >, DisjointOr_match< LHS, RHS > > m_AddLike(const LHS &L, const RHS &R)
Match either "add" or "or disjoint".
CastOperator_match< OpTy, Instruction::BitCast > m_BitCast(const OpTy &Op)
Matches BitCast.
match_combine_or< match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty, true >, MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty, true > >, match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty, true >, MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty, true > > > m_c_MaxOrMin(const LHS &L, const RHS &R)
match_combine_or< CastInst_match< OpTy, SExtInst >, NNegZExt_match< OpTy > > m_SExtLike(const OpTy &Op)
Match either "sext" or "zext nneg".
apint_match m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
cst_pred_ty< is_maxsignedvalue > m_MaxSignedValue()
Match an integer or vector with values having all bits except for the high bit set (0x7f....
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
AnyBinaryOp_match< LHS, RHS, true > m_c_BinOp(const LHS &L, const RHS &R)
Matches a BinaryOperator with LHS and RHS in either order.
BinaryOp_match< LHS, RHS, Instruction::LShr > m_LShr(const LHS &L, const RHS &R)
CmpClass_match< LHS, RHS, ICmpInst > m_ICmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
match_combine_or< CastInst_match< OpTy, ZExtInst >, CastInst_match< OpTy, SExtInst > > m_ZExtOrSExt(const OpTy &Op)
FNeg_match< OpTy > m_FNeg(const OpTy &X)
Match 'fneg X' as 'fsub -0.0, X'.
BinOpPred_match< LHS, RHS, is_shift_op > m_Shift(const LHS &L, const RHS &R)
Matches shift operations.
cstfp_pred_ty< is_pos_zero_fp > m_PosZeroFP()
Match a floating-point positive zero.
BinaryOp_match< LHS, RHS, Instruction::Shl > m_Shl(const LHS &L, const RHS &R)
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
BinaryOp_match< cst_pred_ty< is_all_ones >, ValTy, Instruction::Xor, true > m_Not(const ValTy &V)
Matches a 'Not' as 'xor V, -1' or 'xor -1, V'.
BinaryOp_match< LHS, RHS, Instruction::Or > m_Or(const LHS &L, const RHS &R)
m_Intrinsic_Ty< Opnd0 >::Ty m_BSwap(const Opnd0 &Op0)
CastInst_match< OpTy, SExtInst > m_SExt(const OpTy &Op)
Matches SExt.
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
BinaryOp_match< LHS, RHS, Instruction::Or, true > m_c_Or(const LHS &L, const RHS &R)
Matches an Or with LHS and RHS in either order.
ElementWiseBitCast_match< OpTy > m_ElementWiseBitCast(const OpTy &Op)
m_Intrinsic_Ty< Opnd0 >::Ty m_FAbs(const Opnd0 &Op0)
m_Intrinsic_Ty< Opnd0, Opnd1 >::Ty m_CopySign(const Opnd0 &Op0, const Opnd1 &Op1)
BinaryOp_match< LHS, RHS, Instruction::Sub > m_Sub(const LHS &L, const RHS &R)
match_unless< Ty > m_Unless(const Ty &M)
Match if the inner matcher does NOT match.
match_combine_or< LTy, RTy > m_CombineOr(const LTy &L, const RTy &R)
Combine two pattern matchers matching L || R.
cst_pred_ty< icmp_pred_with_threshold > m_SpecificInt_ICMP(ICmpInst::Predicate Predicate, const APInt &Threshold)
Match an integer or vector with every element comparing 'pred' (eg/ne/...) to Threshold.
NodeAddr< CodeNode * > Code
This is an optimization pass for GlobalISel generic memory operations.
Intrinsic::ID getInverseMinMaxIntrinsic(Intrinsic::ID MinMaxID)
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
Constant * getPredForFCmpCode(unsigned Code, Type *OpTy, CmpInst::Predicate &Pred)
This is the complement of getFCmpCode.
bool isSignBitCheck(ICmpInst::Predicate Pred, const APInt &RHS, bool &TrueIfSigned)
Given an exploded icmp instruction, return true if the comparison only checks the sign bit.
bool predicatesFoldable(CmpInst::Predicate P1, CmpInst::Predicate P2)
Return true if both predicates match sign or if at least one of them is an equality comparison (which...
Constant * ConstantFoldCompareInstOperands(unsigned Predicate, Constant *LHS, Constant *RHS, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const Instruction *I=nullptr)
Attempt to constant fold a compare instruction (icmp/fcmp) with the specified operands.
bool isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL, bool OrZero=false, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return true if the given value is known to have exactly one bit set when defined.
Value * simplifyOrInst(Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for an Or, fold the result or return null.
Value * simplifyXorInst(Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for an Xor, fold the result or return null.
bool isGuaranteedNotToBeUndef(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Returns true if V cannot be undef, but may be poison.
bool matchSimpleRecurrence(const PHINode *P, BinaryOperator *&BO, Value *&Start, Value *&Step)
Attempt to match a simple first order recurrence cycle of the form: iv = phi Ty [Start,...
bool isKnownNegative(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Returns true if the given value is known be negative (i.e.
bool recognizeBSwapOrBitReverseIdiom(Instruction *I, bool MatchBSwaps, bool MatchBitReversals, SmallVectorImpl< Instruction * > &InsertedInsts)
Try to match a bswap or bitreverse idiom.
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Value * simplifyICmpInst(CmpPredicate Pred, Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for an ICmpInst, fold the result or return null.
Value * simplifyAndInst(Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for an And, fold the result or return null.
bool isKnownInversion(const Value *X, const Value *Y)
Return true iff:
bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
Value * simplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for a BinaryOperator, fold the result or return null.
@ Or
Bitwise or logical OR of integers.
@ Mul
Product of integers.
@ Xor
Bitwise or logical XOR of integers.
@ And
Bitwise or logical AND of integers.
std::optional< DecomposedBitTest > decomposeBitTestICmp(Value *LHS, Value *RHS, CmpInst::Predicate Pred, bool LookThroughTrunc=true, bool AllowNonZeroC=false)
Decompose an icmp into the form ((X & Mask) pred C) if possible.
DWARFExpression::Operation Op
bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
constexpr unsigned BitWidth
std::pair< Value *, FPClassTest > fcmpToClassTest(CmpInst::Predicate Pred, const Function &F, Value *LHS, Value *RHS, bool LookThroughSrc=true)
Returns a pair of values, which if passed to llvm.is.fpclass, returns the same result as an fcmp with...
APFloat neg(APFloat X)
Returns the negated value of the argument.
unsigned getICmpCode(CmpInst::Predicate Pred)
Encode a icmp predicate into a three bit mask.
bool isGuaranteedNotToBePoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Returns true if V cannot be poison, but may be undef.
unsigned getFCmpCode(CmpInst::Predicate CC)
Similar to getICmpCode but for FCmpInst.
Constant * getPredForICmpCode(unsigned Code, bool Sign, Type *OpTy, CmpInst::Predicate &Pred)
This is the complement of getICmpCode.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
bool isNonNegative() const
Returns true if this value is known to be non-negative.
APInt getMaxValue() const
Return the maximal unsigned value possible given these KnownBits.
SimplifyQuery getWithInstruction(const Instruction *I) const