Go to the documentation of this file.
23 using namespace PatternMatch;
25 #define DEBUG_TYPE "instcombine"
31 "Unexpected FCmp predicate!");
62 return Builder.CreateICmp(NewPred, LHS, RHS);
71 "Unexpected FCmp predicate!");
76 return Builder.CreateFCmp(Pred, LHS, RHS);
86 assert(
I.isBitwiseLogicOp() &&
"Unexpected opcode for bswap simplifying");
88 Value *OldLHS =
I.getOperand(0);
89 Value *OldRHS =
I.getOperand(1);
111 Value *BinOp =
Builder.CreateBinOp(
I.getOpcode(), NewLHS, NewRHS);
114 return Builder.CreateCall(
F, BinOp);
121 const APInt &
Hi,
bool isSigned,
124 "Lo is not < Hi in range emission code!");
131 if (isSigned ?
Lo.isMinSignedValue() :
Lo.isMinValue()) {
141 return Builder.CreateICmp(Pred, VMinusLo, HiMinusLo);
194 unsigned MaskVal = 0;
195 if (CCst && CCst->
isZero()) {
285 Value *L11, *L12, *L21, *L22;
288 L21 = L22 = L1 =
nullptr;
313 if (R11 == L11 || R11 == L12 || R11 == L21 || R11 == L22) {
316 }
else if (R12 == L11 || R12 == L12 || R12 == L21 || R12 == L22) {
333 if (R11 == L11 || R11 == L12 || R11 == L21 || R11 == L22) {
338 }
else if (R12 == L11 || R12 == L12 || R12 == L21 || R12 == L22) {
357 if (R11 == L11 || R11 == L12 || R11 == L21 || R11 == L22) {
362 }
else if (R12 == L11 || R12 == L12 || R12 == L21 || R12 == L22) {
377 }
else if (L12 == A) {
380 }
else if (L21 == A) {
383 }
else if (L22 == A) {
461 return Builder.CreateICmp(NewCC, NewAnd, NewMaskedValue);
465 return (
C1->getValue() & C2->getValue()) ==
C1->getValue();
468 return (
C1->getValue() & C2->getValue()) == C2->getValue();
477 if (!IsSubSetOrEqual(BCst, DCst) && !IsSuperSetOrEqual(BCst, DCst))
489 if (IsSubSetOrEqual(BCst, DCst))
500 if (IsSuperSetOrEqual(BCst, DCst))
505 assert(IsSubSetOrEqual(BCst, DCst) &&
"Precondition due to above code");
523 "Expected equality predicates for masked type of icmps.");
535 LHS, RHS, IsAnd, A,
B,
C,
D,
E,
541 RHS, LHS, IsAnd, A,
D,
E,
B,
C,
553 Value *A =
nullptr, *
B =
nullptr, *
C =
nullptr, *
D =
nullptr, *
E =
nullptr;
560 "Expected equality predicates for masked type of icmps.");
561 unsigned LHSMask = MaskPair->first;
562 unsigned RHSMask = MaskPair->second;
563 unsigned Mask = LHSMask & RHSMask;
568 LHS, RHS, IsAnd, A,
B,
C,
D,
E, PredL, PredR, LHSMask, RHSMask,
602 return Builder.CreateICmp(NewCC, NewAnd, Zero);
609 return Builder.CreateICmp(NewCC, NewAnd, NewOr);
616 return Builder.CreateICmp(NewCC, NewAnd2, A);
636 else if (NewMask == DCst->
getValue())
649 else if (NewMask == DCst->
getValue())
681 return Builder.CreateICmp(NewCC, NewAnd, NewOr2);
728 default:
return nullptr;
739 return Builder.CreateICmp(NewPred, Input, RangeEnd);
769 if (
Xor.isPowerOf2()) {
782 if (
C1->isNullValue() && C2->isAllOnesValue())
785 if (*
C1 == *C2 - 1) {
799 Value *InstCombinerImpl::foldAndOrOfICmpsOfAndWithPow2(
ICmpInst *LHS,
802 bool JoinedByAnd = Logic.
getOpcode() == Instruction::And;
820 if (A ==
D ||
B ==
D)
874 auto tryToMatchSignedTruncationCheck = [](
ICmpInst *ICmp,
Value *&
X,
875 APInt &SignBitMask) ->
bool {
877 const APInt *I01, *I1;
892 if (tryToMatchSignedTruncationCheck(ICmp1, X1, HighestBit))
894 else if (tryToMatchSignedTruncationCheck(ICmp0, X1, HighestBit))
899 assert(HighestBit.
isPowerOf2() &&
"expected to be power of two (non-zero)");
903 APInt &UnsetBitsMask) ->
bool {
907 Pred,
X, UnsetBitsMask,
915 UnsetBitsMask = *
Mask;
924 if (!tryToDecompose(OtherICmp, X0, UnsetBitsMask))
942 APInt SignBitsMask = ~(HighestBit - 1U);
949 if (!UnsetBitsMask.
isSubsetOf(SignBitsMask)) {
950 APInt OtherHighestBit = (~UnsetBitsMask) + 1U;
959 CxtI.
getName() +
".simplified");
1004 auto IsKnownNonZero = [&](
Value *V) {
1011 if (
match(UnsignedICmp,
1016 if (!IsKnownNonZero(NonZero))
1018 return IsKnownNonZero(NonZero);
1033 IsAnd && GetKnownNonZeroAndOther(
B, A))
1039 !IsAnd && GetKnownNonZeroAndOther(
B, A))
1047 if (!
match(UnsignedICmp,
1086 bool IsAnd = Logic.
getOpcode() == Instruction::And;
1087 assert((IsAnd || Logic.
getOpcode() == Instruction::Or) &&
"Wrong logic op");
1115 if (!SubstituteCmp) {
1120 SubstituteCmp =
Builder.CreateICmp(Pred1,
Y,
C);
1132 if (
Value *V = foldAndOrOfICmpsOfAndWithPow2(LHS, RHS, And))
1161 if (
Value *V = simplifyRangeCheck(LHS, RHS,
false))
1165 if (
Value *V = simplifyRangeCheck(RHS, LHS,
false))
1192 if (LHSC == RHSC && PredL == PredR) {
1199 return Builder.CreateICmp(PredL, NewOr, LHSC);
1209 ConstantInt *AndC, *SmallC =
nullptr, *BigC =
nullptr;
1223 if (SmallC && BigC) {
1229 if ((Low & AndC->
getValue()).isNullValue() &&
1230 (Low & BigC->getValue()).isNullValue()) {
1234 return Builder.CreateICmp(PredL, NewAnd, NewVal);
1275 assert(LHSC != RHSC &&
"Compares not folded above?");
1287 return Builder.CreateICmpULT(LHS0, LHSC);
1295 return Builder.CreateICmpSLT(LHS0, LHSC);
1313 return Builder.CreateICmp(PredL, LHS0, RHSC);
1331 return Builder.CreateICmp(PredL, LHS0, RHSC);
1353 if (LHS0 == RHS1 && RHS0 == LHS1) {
1373 if (LHS0 == RHS0 && LHS1 == RHS1) {
1376 unsigned NewPred = IsAnd ? FCmpCodeL & FCmpCodeR : FCmpCodeL | FCmpCodeR;
1391 return Builder.CreateFCmp(PredL, LHS0, RHS0);
1404 assert((Opcode == Instruction::And || Opcode == Instruction::Or) &&
1405 "Expecting and/or op for fcmp transform");
1425 Pred != NanPred ||
X->getType() !=
Y->getType())
1429 Pred != NanPred ||
X->getType() !=
Y->getType())
1435 if (
auto *NewFCmpInst = dyn_cast<FCmpInst>(NewFCmp)) {
1437 NewFCmpInst->copyIRFlags(Op0);
1438 NewFCmpInst->andIRFlags(BO10);
1448 auto Opcode =
I.getOpcode();
1449 assert((Opcode == Instruction::And || Opcode == Instruction::Or) &&
1450 "Trying to match De Morgan's Laws with something other than and/or");
1453 Opcode = (Opcode == Instruction::And) ? Instruction::Or : Instruction::And;
1460 Value *AndOr =
Builder.CreateBinOp(Opcode, A,
B,
I.getName() +
".demorgan");
1467 bool InstCombinerImpl::shouldOptimizeCast(
CastInst *CI) {
1476 if (
const auto *PrecedingCI = dyn_cast<CastInst>(CastSrc))
1477 if (isEliminableCastPair(PrecedingCI, CI))
1502 if (ZextTruncC ==
C) {
1505 return new ZExtInst(NewOp, DestTy);
1512 if (SextTruncC ==
C) {
1515 return new SExtInst(NewOp, DestTy);
1524 auto LogicOpc =
I.getOpcode();
1525 assert(
I.isBitwiseLogicOp() &&
"Unexpected opcode for bitwise logic folding");
1527 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
1528 CastInst *Cast0 = dyn_cast<CastInst>(Op0);
1534 Type *DestTy =
I.getType();
1542 CastInst *Cast1 = dyn_cast<CastInst>(Op1);
1556 if (shouldOptimizeCast(Cast0) && shouldOptimizeCast(Cast1)) {
1557 Value *NewOp =
Builder.CreateBinOp(LogicOpc, Cast0Src, Cast1Src,
1563 if (LogicOpc == Instruction::Xor)
1568 ICmpInst *ICmp0 = dyn_cast<ICmpInst>(Cast0Src);
1569 ICmpInst *ICmp1 = dyn_cast<ICmpInst>(Cast1Src);
1570 if (ICmp0 && ICmp1) {
1571 Value *Res = LogicOpc == Instruction::And ? foldAndOfICmps(ICmp0, ICmp1,
I)
1572 : foldOrOfICmps(ICmp0, ICmp1,
I);
1580 FCmpInst *FCmp0 = dyn_cast<FCmpInst>(Cast0Src);
1581 FCmpInst *FCmp1 = dyn_cast<FCmpInst>(Cast1Src);
1583 if (
Value *R = foldLogicOfFCmps(FCmp0, FCmp1, LogicOpc == Instruction::And))
1591 assert(
I.getOpcode() == Instruction::And);
1592 Value *Op0 =
I.getOperand(0);
1593 Value *Op1 =
I.getOperand(1);
1601 return BinaryOperator::CreateXor(A,
B);
1617 assert(
I.getOpcode() == Instruction::Or);
1618 Value *Op0 =
I.getOperand(0);
1619 Value *Op1 =
I.getOperand(1);
1644 return BinaryOperator::CreateXor(A,
B);
1664 Value *Op0 =
And.getOperand(0), *Op1 =
And.getOperand(1);
1678 if (!isa<VectorType>(Ty) && !shouldChangeType(Ty,
X->getType()))
1685 if (Opc == Instruction::LShr || Opc == Instruction::Shl)
1692 Value *NewBO = Opc == Instruction::Sub ?
Builder.CreateBinOp(Opc, NewC,
X)
1693 :
Builder.CreateBinOp(Opc,
X, NewC);
1701 Type *Ty =
I.getType();
1704 SQ.getWithInstruction(&
I)))
1705 return replaceInstUsesWith(
I, V);
1707 if (SimplifyAssociativeOrCommutative(
I))
1715 if (SimplifyDemandedInstructionBits(
I))
1723 if (
Value *V = SimplifyUsingDistributiveLaws(
I))
1724 return replaceInstUsesWith(
I, V);
1727 return replaceInstUsesWith(
I, V);
1729 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
1748 return BinaryOperator::CreateXor(
And, NewC);
1759 APInt Together = *
C & *OrC;
1773 Value *NewRHS =
Builder.CreateAnd(
Y, Op1,
Y->getName() +
".masked");
1779 Value *NewLHS =
Builder.CreateAnd(
X, Op1,
X->getName() +
".masked");
1785 const APInt *ShiftC;
1792 return BinaryOperator::CreateLShr(Sext, ShAmtC);
1802 if ((*AddC & LowMask).isNullValue())
1803 return BinaryOperator::CreateAnd(
X, Op1);
1809 if (Op0->
hasOneUse() &&
C->isPowerOf2() && (*AddC & (*
C - 1)) == 0) {
1810 assert((*
C & *AddC) != 0 &&
"Expected common bit");
1812 return BinaryOperator::CreateXor(NewAnd, Op1);
1826 switch (Op0I->getOpcode()) {
1829 case Instruction::Xor:
1830 case Instruction::Or:
1831 case Instruction::Mul:
1833 case Instruction::Sub:
1840 if (AndRHSMask.
isIntN(
X->getType()->getScalarSizeInBits())) {
1843 Value *Op0LHS = Op0I->getOperand(0);
1844 if (isa<ZExtInst>(Op0LHS))
1845 BinOp =
Builder.CreateBinOp(Op0I->getOpcode(),
X, TruncC1);
1847 BinOp =
Builder.CreateBinOp(Op0I->getOpcode(), TruncC1,
X);
1849 auto *
And =
Builder.CreateAnd(BinOp, TruncC2);
1860 ICmpInst::Predicate::ICMP_EQ,
1863 X->getType()->getScalarSizeInBits())))) {
1864 auto *SExt =
Builder.CreateSExt(
X, Ty,
X->getName() +
".signext");
1865 auto *SanitizedSignMask = cast<Constant>(Op1);
1873 return BinaryOperator::CreateAnd(SExt, SanitizedSignMask);
1879 if (
Instruction *FoldedLogic = foldBinOpIntoSelectOrPhi(
I))
1889 return BinaryOperator::CreateAnd(Op0,
Builder.CreateNot(
B));
1892 return BinaryOperator::CreateAnd(Op1,
Builder.CreateNot(
B));
1896 return BinaryOperator::CreateAnd(Op0,
B);
1899 return BinaryOperator::CreateAnd(Op1,
B);
1904 if (Op1->hasOneUse() || isFreeToInvert(
C,
C->hasOneUse()))
1905 return BinaryOperator::CreateAnd(Op0,
Builder.CreateNot(
C));
1910 if (Op0->
hasOneUse() || isFreeToInvert(
C,
C->hasOneUse()))
1911 return BinaryOperator::CreateAnd(Op1,
Builder.CreateNot(
C));
1919 return BinaryOperator::CreateAnd(A,
B);
1927 return BinaryOperator::CreateAnd(A,
B);
1931 ICmpInst *LHS = dyn_cast<ICmpInst>(Op0);
1932 ICmpInst *RHS = dyn_cast<ICmpInst>(Op1);
1934 if (
Value *Res = foldAndOfICmps(LHS, RHS,
I))
1935 return replaceInstUsesWith(
I, Res);
1940 if (
auto *Cmp = dyn_cast<ICmpInst>(
X))
1941 if (
Value *Res = foldAndOfICmps(LHS, Cmp,
I))
1942 return replaceInstUsesWith(
I,
Builder.CreateAnd(Res,
Y));
1943 if (
auto *Cmp = dyn_cast<ICmpInst>(
Y))
1944 if (
Value *Res = foldAndOfICmps(LHS, Cmp,
I))
1945 return replaceInstUsesWith(
I,
Builder.CreateAnd(Res,
X));
1948 if (
auto *Cmp = dyn_cast<ICmpInst>(
X))
1949 if (
Value *Res = foldAndOfICmps(Cmp, RHS,
I))
1950 return replaceInstUsesWith(
I,
Builder.CreateAnd(Res,
Y));
1951 if (
auto *Cmp = dyn_cast<ICmpInst>(
Y))
1952 if (
Value *Res = foldAndOfICmps(Cmp, RHS,
I))
1953 return replaceInstUsesWith(
I,
Builder.CreateAnd(Res,
X));
1957 if (
FCmpInst *LHS = dyn_cast<FCmpInst>(
I.getOperand(0)))
1958 if (
FCmpInst *RHS = dyn_cast<FCmpInst>(
I.getOperand(1)))
1959 if (
Value *Res = foldLogicOfFCmps(LHS, RHS,
true))
1960 return replaceInstUsesWith(
I, Res);
1965 if (
Instruction *CastedAnd = foldCastedBitwiseLogic(
I))
1971 A->getType()->isIntOrIntVectorTy(1))
1974 A->getType()->isIntOrIntVectorTy(1))
1987 if (sinkNotIntoOtherHandOfAndOrOr(
I))
1995 bool MatchBitReversals) {
2003 for (
auto *Inst : Insts)
2004 Worklist.push(Inst);
2012 unsigned Width =
Or.getType()->getScalarSizeInBits();
2021 Value *ShVal0, *ShVal1, *ShAmt0, *ShAmt1;
2028 if (Or0->
getOpcode() == BinaryOperator::LShr) {
2034 Or1->
getOpcode() == BinaryOperator::LShr &&
2035 "Illegal or(shift,shift) pair");
2041 const APInt *LI, *RI;
2066 if (ShVal0 != ShVal1)
2096 Value *ShAmt = matchShiftAmount(ShAmt0, ShAmt1,
Width);
2099 ShAmt = matchShiftAmount(ShAmt1, ShAmt0,
Width);
2105 Intrinsic::ID IID = IsFshl ? Intrinsic::fshl : Intrinsic::fshr;
2113 assert(
Or.getOpcode() == Instruction::Or &&
"bswap requires an 'or'");
2114 Value *Op0 =
Or.getOperand(0), *Op1 =
Or.getOperand(1);
2118 if ((
Width & 1) != 0)
2120 unsigned HalfWidth =
Width / 2;
2123 if (!isa<ZExtInst>(Op0))
2127 Value *LowerSrc, *ShlVal, *UpperSrc;
2140 NewUpper =
Builder.CreateShl(NewUpper, HalfWidth);
2143 return Builder.CreateCall(
F, BinOp);
2148 Value *LowerBSwap, *UpperBSwap;
2151 return ConcatIntrinsicCalls(Intrinsic::bswap, UpperBSwap, LowerBSwap);
2155 Value *LowerBRev, *UpperBRev;
2158 return ConcatIntrinsicCalls(Intrinsic::bitreverse, UpperBRev, LowerBRev);
2165 unsigned NumElts = cast<FixedVectorType>(
C1->getType())->getNumElements();
2166 for (
unsigned i = 0;
i != NumElts; ++
i) {
2169 if (!EltC1 || !EltC2)
2186 Type *Ty =
A->getType();
2213 Cond->getType()->isIntOrIntVectorTy(1) &&
2230 Cond->getType()->isIntOrIntVectorTy(1) &&
2244 Type *OrigType =
A->getType();
2247 if (
Value *
Cond = getSelectCondition(A,
B)) {
2267 if (
Value *V = foldAndOrOfICmpsOfAndWithPow2(LHS, RHS, Or))
2273 auto *LHSC = dyn_cast<ConstantInt>(LHS1);
2274 auto *RHSC = dyn_cast<ConstantInt>(RHS1);
2310 APInt LowRangeDiff = RRangeLow ^ LRangeLow;
2311 APInt HighRangeDiff = RRangeHigh ^ LRangeHigh;
2312 APInt RangeDiff = LRangeLow.
sgt(RRangeLow) ? LRangeLow - RRangeLow
2313 : RRangeLow - LRangeLow;
2315 if (LowRangeDiff.
isPowerOf2() && LowRangeDiff == HighRangeDiff &&
2329 if (LHS0 == RHS1 && LHS1 == RHS0)
2331 if (LHS0 == RHS0 && LHS1 == RHS1) {
2346 Value *
A =
nullptr, *
B =
nullptr;
2363 if (A &&
B &&
B->getType()->isIntOrIntVectorTy())
2375 if (
Value *V = simplifyRangeCheck(LHS, RHS,
true))
2379 if (
Value *V = simplifyRangeCheck(RHS, LHS,
true))
2402 return Builder.CreateICmp(PredL, NewOr,
2416 return Builder.CreateICmpULE(LHS0, LHSC);
2455 assert(LHSC != RHSC &&
"Compares not folded above?");
2525 SQ.getWithInstruction(&
I)))
2526 return replaceInstUsesWith(
I, V);
2528 if (SimplifyAssociativeOrCommutative(
I))
2536 if (SimplifyDemandedInstructionBits(
I))
2544 if (
Value *V = SimplifyUsingDistributiveLaws(
I))
2545 return replaceInstUsesWith(
I, V);
2548 return replaceInstUsesWith(
I, V);
2550 if (
Instruction *FoldedLogic = foldBinOpIntoSelectOrPhi(
I))
2553 if (
Instruction *BSwap = matchBSwapOrBitReverse(
I,
true,
2561 return replaceInstUsesWith(
I,
Concat);
2574 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
2581 Value *V1 =
nullptr, *
V2 =
nullptr;
2582 if ((
C1->getValue() & C2->
getValue()).isNullValue()) {
2590 return BinaryOperator::CreateAnd(A,
2598 return BinaryOperator::CreateAnd(
B,
2605 (C3->
getValue() & ~
C1->getValue()).isNullValue() &&
2607 (C4->getValue() & ~C2->
getValue()).isNullValue()) {
2609 return BinaryOperator::CreateAnd(
V2,
2619 return BinaryOperator::CreateOr(
Builder.CreateAnd(
X,
C1),
B);
2622 return BinaryOperator::CreateOr(
Builder.CreateAnd(
X, C2), A);
2626 return BinaryOperator::CreateXor(
Builder.CreateAnd(
X,
C1),
B);
2629 return BinaryOperator::CreateXor(
Builder.CreateAnd(
X, C2), A);
2636 if (Op0->
hasOneUse() || Op1->hasOneUse()) {
2638 if (
Value *V = matchSelectFromAndOr(A,
C,
B,
D))
2639 return replaceInstUsesWith(
I, V);
2640 if (
Value *V = matchSelectFromAndOr(A,
C,
D,
B))
2641 return replaceInstUsesWith(
I, V);
2642 if (
Value *V = matchSelectFromAndOr(
C, A,
B,
D))
2643 return replaceInstUsesWith(
I, V);
2644 if (
Value *V = matchSelectFromAndOr(
C, A,
D,
B))
2645 return replaceInstUsesWith(
I, V);
2646 if (
Value *V = matchSelectFromAndOr(
B,
D, A,
C))
2647 return replaceInstUsesWith(
I, V);
2648 if (
Value *V = matchSelectFromAndOr(
B,
D,
C, A))
2649 return replaceInstUsesWith(
I, V);
2650 if (
Value *V = matchSelectFromAndOr(
D,
B, A,
C))
2651 return replaceInstUsesWith(
I, V);
2652 if (
Value *V = matchSelectFromAndOr(
D,
B,
C, A))
2653 return replaceInstUsesWith(
I, V);
2660 return BinaryOperator::CreateOr(Op0,
C);
2665 return BinaryOperator::CreateOr(Op1,
C);
2669 return BinaryOperator::CreateOr(Op1,
Builder.CreateAnd(A,
C));
2675 bool SwappedForXor =
false;
2678 SwappedForXor =
true;
2685 if (Op0 == A || Op0 ==
B)
2686 return BinaryOperator::CreateOr(A,
B);
2690 return BinaryOperator::CreateOr(A,
B);
2694 return BinaryOperator::CreateOr(Not, Op0);
2697 Value *Not =
Builder.CreateNot(A, A->getName() +
".not");
2698 return BinaryOperator::CreateOr(Not, Op0);
2706 if ((Op0 ==
B->getOperand(0) || Op0 ==
B->getOperand(1)) &&
2707 Op1->hasOneUse() && (
B->getOpcode() == Instruction::Or ||
2708 B->getOpcode() == Instruction::Xor)) {
2709 Value *NotOp = Op0 ==
B->getOperand(0) ?
B->getOperand(1) :
2712 return BinaryOperator::CreateOr(Not, Op0);
2719 ICmpInst *LHS = dyn_cast<ICmpInst>(Op0);
2720 ICmpInst *RHS = dyn_cast<ICmpInst>(Op1);
2722 if (
Value *Res = foldOrOfICmps(LHS, RHS,
I))
2723 return replaceInstUsesWith(
I, Res);
2729 if (
auto *Cmp = dyn_cast<ICmpInst>(
X))
2730 if (
Value *Res = foldOrOfICmps(LHS, Cmp,
I))
2731 return replaceInstUsesWith(
I,
Builder.CreateOr(Res,
Y));
2732 if (
auto *Cmp = dyn_cast<ICmpInst>(
Y))
2733 if (
Value *Res = foldOrOfICmps(LHS, Cmp,
I))
2734 return replaceInstUsesWith(
I,
Builder.CreateOr(Res,
X));
2737 if (
auto *Cmp = dyn_cast<ICmpInst>(
X))
2738 if (
Value *Res = foldOrOfICmps(Cmp, RHS,
I))
2739 return replaceInstUsesWith(
I,
Builder.CreateOr(Res,
Y));
2740 if (
auto *Cmp = dyn_cast<ICmpInst>(
Y))
2741 if (
Value *Res = foldOrOfICmps(Cmp, RHS,
I))
2742 return replaceInstUsesWith(
I,
Builder.CreateOr(Res,
X));
2746 if (
FCmpInst *LHS = dyn_cast<FCmpInst>(
I.getOperand(0)))
2747 if (
FCmpInst *RHS = dyn_cast<FCmpInst>(
I.getOperand(1)))
2748 if (
Value *Res = foldLogicOfFCmps(LHS, RHS,
false))
2749 return replaceInstUsesWith(
I, Res);
2759 A->getType()->isIntOrIntVectorTy(1))
2762 A->getType()->isIntOrIntVectorTy(1))
2775 return BinaryOperator::CreateOr(Inner, CI);
2782 Value *
X =
nullptr, *
Y =
nullptr;
2783 if (Op0->
hasOneUse() && Op1->hasOneUse() &&
2795 Type *Ty =
I.getType();
2807 canonicalizeCondSignextOfHighBitExtractToSignextHighBitExtract(
I))
2811 Value *
Mul, *Ov, *MulIsNotZero, *UMulWithOv;
2828 if (
match(UMulWithOv, m_Intrinsic<Intrinsic::umul_with_overflow>(
2832 return BinaryOperator::CreateAnd(NotNullA, NotNullB);
2837 if (sinkNotIntoOtherHandOfAndOrOr(
I))
2847 assert(
I.getOpcode() == Instruction::Xor);
2848 Value *Op0 =
I.getOperand(0);
2849 Value *Op1 =
I.getOperand(1);
2860 return BinaryOperator::CreateXor(A,
B);
2868 return BinaryOperator::CreateXor(A,
B);
2876 return BinaryOperator::CreateXor(A,
B);
2898 assert(
I.getOpcode() == Instruction::Xor &&
I.getOperand(0) == LHS &&
2899 I.getOperand(1) == RHS &&
"Should be 'xor' with these operands");
2931 return Builder.CreateICmpSLT(
Builder.CreateXor(LHS0, RHS0), Zero);
2940 return Builder.CreateICmpSGT(
Builder.CreateXor(LHS0, RHS0), MinusOne);
2956 if (OrICmp == LHS && AndICmp == RHS) {
2961 if (OrICmp == RHS && AndICmp == LHS) {
2966 if (
X &&
Y && (
Y->hasOneUse() || canFreelyInvertAllUsersOf(
Y, &
I))) {
2968 Y->setPredicate(
Y->getInversePredicate());
2970 if (!
Y->hasOneUse()) {
2977 Builder.SetInsertPoint(
Y->getParent(), ++(
Y->getIterator()));
2980 Worklist.pushUsersToWorkList(*
Y);
2981 Y->replaceUsesWithIf(NotY,
2982 [NotY](
Use &U) {
return U.
getUser() != NotY; });
2985 return Builder.CreateAnd(LHS, RHS);
3022 return BinaryOperator::CreateXor(NewA,
X);
3034 return BinaryOperator::CreateOr(LHS, RHS);
3063 return BinaryOperator::CreateXor(NotX,
Y,
I.getName() +
".demorgan");
3073 switch (
I.getOpcode()) {
3074 case Instruction::And:
3075 NewOpc = Instruction::Or;
3077 case Instruction::Or:
3078 NewOpc = Instruction::And;
3100 replaceInstUsesWith(
I, NewBinOp);
3104 freelyInvertAllUsersOf(NewBinOp);
3113 SQ.getWithInstruction(&
I)))
3114 return replaceInstUsesWith(
I, V);
3116 if (SimplifyAssociativeOrCommutative(
I))
3126 if (
Value *V = SimplifyUsingDistributiveLaws(
I))
3127 return replaceInstUsesWith(
I, V);
3131 if (SimplifyDemandedInstructionBits(
I))
3135 return replaceInstUsesWith(
I, V);
3137 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
3138 Type *Ty =
I.getType();
3147 return BinaryOperator::CreateOr(Op0, Op1);
3158 return BinaryOperator::CreateOr(
X, NotY);
3164 return BinaryOperator::CreateAnd(
X, NotY);
3173 if (NotVal->
getOpcode() == Instruction::And ||
3174 NotVal->
getOpcode() == Instruction::Or) {
3184 if (NotVal->
getOpcode() == Instruction::And)
3185 return BinaryOperator::CreateOr(NotX, NotY);
3186 return BinaryOperator::CreateAnd(NotX, NotY);
3197 return BinaryOperator::CreateAShr(
X,
Y);
3255 return replaceInstUsesWith(
I, Op0);
3309 auto *Opnd0 = cast<Instruction>(
Builder.CreateLShr(
X, C2));
3310 Opnd0->takeName(cast<Instruction>(Op0));
3311 Opnd0->setDebugLoc(
I.getDebugLoc());
3316 if (
Instruction *FoldedLogic = foldBinOpIntoSelectOrPhi(
I))
3322 return BinaryOperator::CreateAnd(
X,
Builder.CreateNot(Op0));
3326 return BinaryOperator::CreateAnd(
X,
Builder.CreateNot(Op1));
3331 return BinaryOperator::CreateAnd(Op0,
Builder.CreateNot(
X));
3339 return BinaryOperator::CreateAnd(Op1,
Builder.CreateNot(
X));
3345 return BinaryOperator::CreateXor(
3351 return BinaryOperator::CreateXor(
3357 return BinaryOperator::CreateOr(A,
B);
3361 return BinaryOperator::CreateOr(A,
B);
3371 return BinaryOperator::CreateOr(A,
B);
3378 if (
B ==
C ||
B ==
D)
3384 return BinaryOperator::CreateAnd(
Builder.CreateXor(
B,
C), NotA);
3388 if (
auto *LHS = dyn_cast<ICmpInst>(
I.getOperand(0)))
3389 if (
auto *RHS = dyn_cast<ICmpInst>(
I.getOperand(1)))
3390 if (
Value *V = foldXorOfICmps(LHS, RHS,
I))
3391 return replaceInstUsesWith(
I, V);
3393 if (
Instruction *CastedXor = foldCastedBitwiseLogic(
I))
3413 auto *Add = cast<BinaryOperator>(Op0);
3414 Value *Neg =
Builder.CreateNeg(A,
"", Add->hasNoUnsignedWrap(),
3415 Add->hasNoSignedWrap());
3471 if (
auto *Sel = dyn_cast<SelectInst>(Op0)) {
3472 Value *TV = Sel->getTrueValue();
3473 Value *FV = Sel->getFalseValue();
3474 auto *CmpT = dyn_cast<CmpInst>(TV);
3475 auto *CmpF = dyn_cast<CmpInst>(FV);
3476 bool InvertibleT = (CmpT && CmpT->hasOneUse()) || isa<Constant>(TV);
3477 bool InvertibleF = (CmpF && CmpF->hasOneUse()) || isa<Constant>(FV);
3478 if (InvertibleT && InvertibleF) {
3480 CmpT->setPredicate(CmpT->getInversePredicate());
3484 CmpF->setPredicate(CmpF->getInversePredicate());
3487 return replaceInstUsesWith(
I, Sel);
3503 return BinaryOperator::CreateXor(
Builder.CreateXor(
X,
Y),
C1);
static Value * foldIsPowerOf2(ICmpInst *Cmp0, ICmpInst *Cmp1, bool JoinedByAnd, InstCombiner::BuilderTy &Builder)
Reduce a pair of compares that check if a value has exactly 1 bit set.
@ FCMP_ULE
1 1 0 1 True if unordered, less than, or equal
cst_pred_ty< is_nonnegative > m_NonNegative()
Match an integer or vector of non-negative values.
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
This class represents lattice values for constants.
We currently emits eax Perhaps this is what we really should generate is Is imull three or four cycles eax eax The current instruction priority is based on pattern complexity The former is more complex because it folds a load so the latter will not be emitted Perhaps we should use AddedComplexity to give LEA32r a higher priority We should always try to match LEA first since the LEA matching code does some estimate to determine whether the match is profitable if we care more about code then imull is better It s two bytes shorter than movl leal On a Pentium M
static Constant * getNot(Constant *C)
bool MaskedValueIsZero(const Value *V, const APInt &Mask, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return true if 'V & Mask' is known to be zero.
@ Or
Bitwise or logical OR of integers.
static bool isFreeToInvert(Value *V, bool WillInvertAllUses)
Return true if the specified value is free to invert (apply ~ to).
bool hasOneUse() const
Return true if there is exactly one use of this value.
Function * getDeclaration(Module *M, ID id, ArrayRef< Type * > Tys=None)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
IntegerType * getType() const
getType - Specialize the getType() method to always return an IntegerType, which reduces the amount o...
static Constant * getZExt(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Instruction * visitXor(BinaryOperator &I)
Value * SimplifyAndInst(Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for an And, fold the result or return null.
static BinaryOperator * CreateNot(Value *Op, const Twine &Name="", Instruction *InsertBefore=nullptr)
SelectPatternFlavor Flavor
BinaryOp_match< LHS, RHS, Instruction::LShr > m_LShr(const LHS &L, const RHS &R)
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
bool isMaxValue(bool isSigned) const
This function will return true iff this constant represents the largest value that may be represented...
instcombine should handle this C2 when C1
static Instruction * reassociateFCmps(BinaryOperator &BO, InstCombiner::BuilderTy &Builder)
This a limited reassociation for a special case (see above) where we are checking if two values are e...
bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
const APInt & getValue() const
Return the constant as an APInt value reference.
static Constant * getSExt(Constant *C, Type *Ty, bool OnlyIfReduced=false)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
BinaryOp_match< LHS, RHS, Instruction::Add > m_Add(const LHS &L, const RHS &R)
static Register peekThroughBitcast(Register Reg, const MachineRegisterInfo &MRI)
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
static Type * makeCmpResultType(Type *opnd_type)
Create a result type for fcmp/icmp.
static CastInst * Create(Instruction::CastOps, Value *S, Type *Ty, const Twine &Name="", Instruction *InsertBefore=nullptr)
Provides a way to construct any of the CastInst subclasses using an opcode instead of the subclass's ...
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
static Instruction * matchOrConcat(Instruction &Or, InstCombiner::BuilderTy &Builder)
Attempt to combine or(zext(x),shl(zext(y),bw/2) concat packing patterns.
static unsigned getMaskedICmpType(Value *A, Value *B, Value *C, ICmpInst::Predicate Pred)
Return the set of patterns (from MaskedICmpType) that (icmp SCC (A & B), C) satisfies.
m_Intrinsic_Ty< Opnd0 >::Ty m_BitReverse(const Opnd0 &Op0)
Predicate getSignedPredicate() const
For example, EQ->EQ, SLE->SLE, UGT->SGT, etc.
bool isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return true if the given value is known to be non-zero when defined.
static Value * getFCmpValue(unsigned Code, Value *LHS, Value *RHS, InstCombiner::BuilderTy &Builder)
This is the complement of getFCmpCode, which turns an opcode and two operands into either a FCmp inst...
@ ICMP_SGT
signed greater than
The instances of the Type class are immutable: once they are created, they are never changed.
bool isEquality() const
Return true if this predicate is either EQ or NE.
bool ugt(const APInt &RHS) const
Unsigned greater than comparison.
class_match< BinaryOperator > m_BinOp()
Match an arbitrary binary operation and ignore it.
BinaryOp_match< LHS, RHS, Instruction::AShr > m_AShr(const LHS &L, const RHS &R)
unsigned ComputeNumSignBits(const Value *Op, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return the number of times the sign bit of the register is replicated into the other bits.
@ ICMP_SLE
signed less or equal
bool intersects(const APInt &RHS) const
This operation tests if there are any pairs of corresponding bits between this APInt and RHS that are...
LLVM_NODISCARD T pop_back_val()
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
SelectPatternResult matchSelectPattern(Value *V, Value *&LHS, Value *&RHS, Instruction::CastOps *CastOp=nullptr, unsigned Depth=0)
Pattern match integer [SU]MIN, [SU]MAX and ABS idioms, returning the kind and providing the out param...
BinaryOp_match< LHS, RHS, Instruction::And, true > m_c_And(const LHS &L, const RHS &R)
Matches an And with LHS and RHS in either order.
bool isMinValue(bool isSigned) const
This function will return true iff this constant represents the smallest value that may be represente...
BinaryOp_match< ValTy, cst_pred_ty< is_all_ones >, Instruction::Xor, true > m_Not(const ValTy &V)
Matches a 'Not' as 'xor V, -1' or 'xor -1, V'.
SelectPatternFlavor
Specific patterns of select instructions we can match.
void swapOperands()
Exchange the two operands to this instruction in such a way that it does not modify the semantics of ...
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
deferredval_ty< Value > m_Deferred(Value *const &V)
A commutative-friendly version of m_Specific().
const APInt & umin(const APInt &A, const APInt &B)
Determine the smaller of two APInts considered to be signed.
Type * getDestTy() const
Return the destination type, as a convenience.
match_unless< Ty > m_Unless(const Ty &M)
Match if the inner matcher does NOT match.
bool isSignMask() const
Check if the APInt's value is returned by getSignMask.
CmpClass_match< LHS, RHS, ICmpInst, ICmpInst::Predicate, true > m_c_ICmp(ICmpInst::Predicate &Pred, const LHS &L, const RHS &R)
Matches an ICmp with a predicate over LHS and RHS in either order.
OneUse_match< T > m_OneUse(const T &SubPattern)
@ FCMP_ULT
1 1 0 0 True if unordered or less than
static Value * foldAndOrOfEqualityCmpsWithConstants(ICmpInst *LHS, ICmpInst *RHS, bool JoinedByAnd, InstCombiner::BuilderTy &Builder)
apint_match m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
static Constant * get(Type *Ty, uint64_t V, bool isSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
AnyBinaryOp_match< LHS, RHS, true > m_c_BinOp(const LHS &L, const RHS &R)
Matches a BinaryOperator with LHS and RHS in either order.
Instruction * matchBSwapOrBitReverse(Instruction &I, bool MatchBSwaps, bool MatchBitReversals)
Given an initial instruction, check to see if it is the root of a bswap/bitreverse idiom.
This is the shared class of boolean and integer constants.
bool isNonNegative() const
Returns true if this value is known to be non-negative.
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", Instruction *InsertBefore=nullptr, Instruction *MDFrom=nullptr)
@ And
Bitwise or logical AND of integers.
bool predicatesFoldable(CmpInst::Predicate P1, CmpInst::Predicate P2)
Return true if both predicates match sign or if at least one of them is an equality comparison (which...
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
bool match(Val *V, const Pattern &P)
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
specific_intval< true > m_SpecificIntAllowUndef(APInt V)
static Instruction * foldLogicCastConstant(BinaryOperator &Logic, CastInst *Cast, InstCombiner::BuilderTy &Builder)
Fold {and,or,xor} (cast X), C.
(vector float) vec_cmpeq(*A, *B) C
@ ICMP_ULE
unsigned less or equal
static bool isMinOrMax(SelectPatternFlavor SPF)
When implementing this min/max pattern as fcmp; select, does the fcmp have to be ordered?
Value * SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for an ICmpInst, fold the result or return null.
This instruction compares its operands according to the predicate given to the constructor.
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
cstfp_pred_ty< is_pos_zero_fp > m_PosZeroFP()
Match a floating-point positive zero.
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
@ FCMP_UGE
1 0 1 1 True if unordered, greater than, or equal
bool isVectorTy() const
True if this is an instance of VectorType.
CastClass_match< OpTy, Instruction::ZExt > m_ZExt(const OpTy &Op)
Matches ZExt.
BinaryOp_match< LHS, RHS, Instruction::Add, true > m_c_Add(const LHS &L, const RHS &R)
Matches a Add with LHS and RHS in either order.
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
MaskedICmpType
Classify (icmp eq (A & B), C) and (icmp ne (A & B), C) as matching patterns that can be simplified.
static Constant * getAllOnesValue(Type *Ty)
BinaryOps getOpcode() const
class_match< ConstantInt > m_ConstantInt()
Match an arbitrary ConstantInt and ignore it.
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
static constexpr int Concat[]
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
BinaryOp_match< LHS, RHS, Instruction::Or, true > m_c_Or(const LHS &L, const RHS &R)
Matches an Or with LHS and RHS in either order.
uint64_t getZExtValue() const
Get zero extended value.
static Value * foldUnsignedUnderflowCheck(ICmpInst *ZeroICmp, ICmpInst *UnsignedICmp, bool IsAnd, const SimplifyQuery &Q, InstCombiner::BuilderTy &Builder)
Commuted variants are assumed to be handled by calling this function again with the parameters swappe...
bool isIntN(unsigned N) const
Check if this APInt has an N-bits unsigned integer value.
static Constant * getXor(Constant *C1, Constant *C2)
OverflowingBinaryOp_match< LHS, RHS, Instruction::Sub, OverflowingBinaryOperator::NoSignedWrap > m_NSWSub(const LHS &L, const RHS &R)
User * getUser() const
Returns the User that contains this Use.
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
@ FCMP_OLT
0 1 0 0 True if ordered and less than
void removeFromParent()
This method unlinks 'this' from the containing basic block, but does not delete it.
apint_match m_APIntAllowUndef(const APInt *&Res)
Match APInt while allowing undefs in splat vector constants.
Type * getSrcTy() const
Return the source type, as a convenience.
unsigned getICmpCode(const ICmpInst *ICI, bool InvertPred=false)
Encode a icmp predicate into a three bit mask.
static Instruction * foldXorToXor(BinaryOperator &I, InstCombiner::BuilderTy &Builder)
A ^ B can be specified using other logic ops in a variety of patterns.
bool isAllOnesValue() const
Determine if all bits are set.
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
cst_pred_ty< is_one > m_One()
Match an integer 1 or a vector with all elements equal to 1.
Instruction * visitAnd(BinaryOperator &I)
@ FCMP_FALSE
0 0 0 0 Always false (always folded)
cst_pred_ty< is_power2 > m_Power2()
Match an integer or vector power-of-2.
BinaryOp_match< LHS, RHS, Instruction::Xor > m_Xor(const LHS &L, const RHS &R)
bool isIntegerTy() const
True if this is an instance of IntegerType.
bool isSubsetOf(const APInt &RHS) const
This operation checks that all bits set in this APInt are also set in RHS.
static Constant * getTrunc(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static Value * foldSignedTruncationCheck(ICmpInst *ICmp0, ICmpInst *ICmp1, Instruction &CxtI, InstCombiner::BuilderTy &Builder)
General pattern: X & Y.
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
static Instruction * foldAndToXor(BinaryOperator &I, InstCombiner::BuilderTy &Builder)
static Instruction * matchFunnelShift(Instruction &Or, InstCombinerImpl &IC)
Match UB-safe variants of the funnel shift intrinsic.
This is an important base class in LLVM.
static Value * foldLogOpOfMaskedICmpsAsymmetric(ICmpInst *LHS, ICmpInst *RHS, bool IsAnd, Value *A, Value *B, Value *C, Value *D, Value *E, ICmpInst::Predicate PredL, ICmpInst::Predicate PredR, unsigned LHSMask, unsigned RHSMask, InstCombiner::BuilderTy &Builder)
Try to fold (icmp(A & B) ==/!= 0) &/| (icmp(A & D) ==/!= E) into a single (icmp(A & X) ==/!...
Value * insertRangeTest(Value *V, const APInt &Lo, const APInt &Hi, bool isSigned, bool Inside)
Emit a computation of: (V >= Lo && V < Hi) if Inside is true, otherwise (V < Lo || V >= Hi).
match_combine_and< class_match< Constant >, match_unless< class_match< ConstantExpr > > > m_ImmConstant()
Match an arbitrary immediate Constant and ignore it.
This instruction compares its operands according to the predicate given to the constructor.
static Constant * replaceUndefsWith(Constant *C, Constant *Replacement)
Try to replace undefined constant C or undefined elements in C with Replacement.
static Value * foldLogOpOfMaskedICmps_NotAllZeros_BMask_Mixed(ICmpInst *LHS, ICmpInst *RHS, bool IsAnd, Value *A, Value *B, Value *C, Value *D, Value *E, ICmpInst::Predicate PredL, ICmpInst::Predicate PredR, InstCombiner::BuilderTy &Builder)
Try to fold (icmp(A & B) ==/!= C) &/| (icmp(A & D) ==/!= E) into a single (icmp(A & X) ==/!...
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
Value * SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for a BinaryOperator, fold the result or return null.
BinaryOp_match< LHS, RHS, Instruction::Or > m_Or(const LHS &L, const RHS &R)
cst_pred_ty< is_all_ones > m_AllOnes()
Match an integer or vector with all bits set.
bool hasNUsesOrMore(unsigned N) const
Return true if this value has N uses or more.
static Instruction * visitMaskedMerge(BinaryOperator &I, InstCombiner::BuilderTy &Builder)
If we have a masked merge, in the canonical form of: (assuming that A only has one use....
static Constant * getOr(Constant *C1, Constant *C2)
static unsigned conjugateICmpMask(unsigned Mask)
Convert an analysis of a masked ICmp into its equivalent if all boolean operations had the opposite s...
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
APInt getMaxValue() const
Return the maximal unsigned value possible given these KnownBits.
static bool canFreelyInvertAllUsersOf(Value *V, Value *IgnoredUser)
Given i1 V, can every user of V be freely adapted if V is changed to !V ? InstCombine's freelyInvertA...
void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, OptimizationRemarkEmitter *ORE=nullptr, bool UseInstrInfo=true)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
static Instruction * sinkNotIntoXor(BinaryOperator &I, InstCombiner::BuilderTy &Builder)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
BinaryOp_match< LHS, RHS, Instruction::Sub > m_Sub(const LHS &L, const RHS &R)
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Value * simplifyRangeCheck(ICmpInst *Cmp0, ICmpInst *Cmp1, bool Inverted)
Try to fold a signed range checked with lower bound 0 to an unsigned icmp.
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
@ ICMP_UGE
unsigned greater or equal
SimplifyQuery getWithInstruction(Instruction *I) const
cst_pred_ty< is_negative > m_Negative()
Match an integer or vector of negative values.
class_match< Constant > m_Constant()
Match an arbitrary Constant and ignore it.
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
This class represents zero extension of integer types.
Class for arbitrary precision integers.
Instruction * visitOr(BinaryOperator &I)
@ ICMP_SLT
signed less than
CastClass_match< OpTy, Instruction::SExt > m_SExt(const OpTy &Op)
Matches SExt.
specific_intval< false > m_SpecificInt(APInt V)
Match a specific integer value or vector with all elements equal to the value.
match_combine_and< LTy, RTy > m_CombineAnd(const LTy &L, const RTy &R)
Combine two pattern matchers matching L && R.
static APInt getAllOnesValue(unsigned numBits)
Get the all-ones value.
cst_pred_ty< icmp_pred_with_threshold > m_SpecificInt_ICMP(ICmpInst::Predicate Predicate, const APInt &Threshold)
Match an integer or vector with every element comparing 'pred' (eg/ne/...) to Threshold.
SmallVector< MachineOperand, 4 > Cond
@ ICMP_ULT
unsigned less than
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
Type * getType() const
All values are typed, get the type of this value.
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
bool isMinusOne() const
This function will return true iff every bit in this constant is set to true.
bool recognizeBSwapOrBitReverseIdiom(Instruction *I, bool MatchBSwaps, bool MatchBitReversals, SmallVectorImpl< Instruction * > &InsertedInsts)
Try to match a bswap or bitreverse idiom.
@ Mul
Product of integers.
static unsigned getFCmpCode(FCmpInst::Predicate CC)
Similar to getICmpCode but for FCmpInst.
This is the base class for all instructions that perform data casts.
CmpInst::Predicate getInverseMinMaxPred(SelectPatternFlavor SPF)
Return the canonical inverse comparison predicate for the specified minimum/maximum flavor.
bool ult(const APInt &RHS) const
Unsigned less than comparison.
This class represents a sign extension of integer types.
StringRef getName() const
Return a constant reference to the value's name.
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
static Instruction * foldOrToXor(BinaryOperator &I, InstCombiner::BuilderTy &Builder)
static Value * getNewICmpValue(unsigned Code, bool Sign, Value *LHS, Value *RHS, InstCombiner::BuilderTy &Builder)
This is the complement of getICmpCode, which turns an opcode and two operands into either a constant ...
APInt zext(unsigned width) const
Zero extend to a new width.
static Value * foldAndOrOfICmpsWithConstEq(ICmpInst *Cmp0, ICmpInst *Cmp1, BinaryOperator &Logic, InstCombiner::BuilderTy &Builder, const SimplifyQuery &Q)
Reduce logic-of-compares with equality to a constant by substituting a common operand with the consta...
@ FCMP_UGT
1 0 1 0 True if unordered or greater than
unsigned countLeadingZeros() const
The APInt version of the countLeadingZeros functions in MathExtras.h.
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
bool decomposeBitTestICmp(Value *LHS, Value *RHS, CmpInst::Predicate &Pred, Value *&X, APInt &Mask, bool LookThroughTrunc=true)
Decompose an icmp into the form ((X & Mask) pred 0) if possible.
static Constant * getNeg(Constant *C, bool HasNUW=false, bool HasNSW=false)
static Constant * getAdd(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
static ConstantInt * getSigned(IntegerType *Ty, int64_t V)
Return a ConstantInt with the specified value for the specified type.
bool sinkNotIntoOtherHandOfAndOrOr(BinaryOperator &I)
Constant * getPredForICmpCode(unsigned Code, bool Sign, Type *OpTy, CmpInst::Predicate &Pred)
This is the complement of getICmpCode.
cst_pred_ty< is_sign_mask > m_SignMask()
Match an integer or vector with only the sign bit(s) set.
bool hasNUses(unsigned N) const
Return true if this Value has exactly N uses.
constexpr unsigned BitWidth
static Constant * getAnd(Constant *C1, Constant *C2)
m_Intrinsic_Ty< Opnd0 >::Ty m_BSwap(const Opnd0 &Op0)
cst_pred_ty< is_zero_int > m_ZeroInt()
Match an integer 0 or a vector with all elements equal to 0.
@ ICMP_SGE
signed greater or equal
BinaryOp_match< LHS, RHS, Instruction::Xor, true > m_c_Xor(const LHS &L, const RHS &R)
Matches an Xor with LHS and RHS in either order.
BinOpPred_match< LHS, RHS, is_logical_shift_op > m_LogicalShift(const LHS &L, const RHS &R)
Matches logical shift operations.
cstfp_pred_ty< is_any_zero_fp > m_AnyZeroFP()
Match a floating-point negative zero or positive zero.
static Constant * mergeUndefsWith(Constant *C, Constant *Other)
Merges undefs of a Constant with another Constant, along with the undefs already present.
bool isNullValue() const
Determine if all bits are clear.
class_match< ConstantExpr > m_ConstantExpr()
Match an arbitrary ConstantExpr and ignore it.
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
CmpClass_match< LHS, RHS, ICmpInst, ICmpInst::Predicate > m_ICmp(ICmpInst::Predicate &Pred, const LHS &L, const RHS &R)
@ ICMP_UGT
unsigned greater than
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
Instruction::CastOps getOpcode() const
Return the opcode of this CastInst.
Predicate getPredicate() const
Return the predicate for this instruction.
static Value * SimplifyBSwap(BinaryOperator &I, InstCombiner::BuilderTy &Builder)
Transform BITWISE_OP(BSWAP(A),BSWAP(B)) or BITWISE_OP(BSWAP(A), Constant) to BSWAP(BITWISE_OP(A,...
unsigned getBitWidth() const
Get the number of bits in this IntegerType.
Value * SimplifyOrInst(Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for an Or, fold the result or return null.
static cl::opt< unsigned > Threshold("loop-unswitch-threshold", cl::desc("Max loop size to unswitch"), cl::init(100), cl::Hidden)
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
CmpClass_match< LHS, RHS, FCmpInst, FCmpInst::Predicate > m_FCmp(FCmpInst::Predicate &Pred, const LHS &L, const RHS &R)
static bool canNarrowShiftAmt(Constant *C, unsigned BitWidth)
Return true if a constant shift amount is always less than the specified bit-width.
bool sgt(const APInt &RHS) const
Signed greater than comparison.
static Optional< std::pair< unsigned, unsigned > > getMaskedTypeForICmpPair(Value *&A, Value *&B, Value *&C, Value *&D, Value *&E, ICmpInst *LHS, ICmpInst *RHS, ICmpInst::Predicate &PredL, ICmpInst::Predicate &PredR)
Handle (icmp(A & B) ==/!= C) &/| (icmp(A & D) ==/!= E).
static bool areInverseVectorBitmasks(Constant *C1, Constant *C2)
If all elements of two constant vectors are 0/-1 and inverses, return true.
BinaryOp_match< cst_pred_ty< is_zero_int >, ValTy, Instruction::Sub > m_Neg(const ValTy &V)
Matches a 'Neg' as 'sub 0, V'.
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Get a value with low bits set.
bool isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL, bool OrZero=false, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return true if the given value is known to have exactly one bit set when defined.
static BinaryOperator * CreateAdd(Value *S1, Value *S2, const Twine &Name, Instruction *InsertBefore, Value *FlagsOp)
void takeName(Value *V)
Transfer the name from V to this value.
APInt shl(unsigned shiftAmt) const
Left-shift function.
Value * getOperand(unsigned i) const
CastClass_match< OpTy, Instruction::Trunc > m_Trunc(const OpTy &Op)
Matches Trunc.
static BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name=Twine(), Instruction *InsertBefore=nullptr)
Construct a binary instruction, given the opcode and the two operands.
@ FCMP_TRUE
1 1 1 1 Always true (always folded)
static Instruction * matchDeMorgansLaws(BinaryOperator &I, InstCombiner::BuilderTy &Builder)
Match De Morgan's Laws: (~A & ~B) == (~(A | B)) (~A | ~B) == (~(A & B))
static BinaryOperator * CreateWithCopiedFlags(BinaryOps Opc, Value *V1, Value *V2, Instruction *CopyO, const Twine &Name="")
class_match< CmpInst > m_Cmp()
Matches any compare instruction and ignore it.
LLVM Value Representation.
BinaryOp_match< LHS, RHS, Instruction::Shl > m_Shl(const LHS &L, const RHS &R)
Value * SimplifyXorInst(Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for an Xor, fold the result or return null.
void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth, const Instruction *CxtI) const
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
BinaryOp_match< LHS, RHS, Instruction::Mul > m_Mul(const LHS &L, const RHS &R)
@ Xor
Bitwise or logical XOR of integers.
Optional< std::vector< StOtherPiece > > Other
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
A Use represents the edge between a Value definition and its users.
static Value * foldLogOpOfMaskedICmps(ICmpInst *LHS, ICmpInst *RHS, bool IsAnd, InstCombiner::BuilderTy &Builder)
Try to fold (icmp(A & B) ==/!= C) &/| (icmp(A & D) ==/!= E) into a single (icmp(A & X) ==/!...