40#define DEBUG_TYPE "instcombine"
52 bool IsSigned =
false) {
55 Result = In1.
sadd_ov(In2, Overflow);
57 Result = In1.
uadd_ov(In2, Overflow);
65 bool IsSigned =
false) {
68 Result = In1.
ssub_ov(In2, Overflow);
70 Result = In1.
usub_ov(In2, Overflow);
78 for (
auto *U :
I.users())
100 }
else if (
C.isAllOnes()) {
121 if (LI->
isVolatile() || !GV || !GV->isConstant() ||
122 !GV->hasDefinitiveInitializer())
126 TypeSize EltSize =
DL.getTypeStoreSize(EltTy);
142 if (!ConstOffset.
ult(Stride))
156 enum { Overdefined = -3, Undefined = -2 };
165 int FirstTrueElement = Undefined, SecondTrueElement = Undefined;
169 int FirstFalseElement = Undefined, SecondFalseElement = Undefined;
177 int TrueRangeEnd = Undefined, FalseRangeEnd = Undefined;
187 for (
unsigned i = 0, e = ArrayElementCount; i != e; ++i,
Offset += Stride) {
201 CompareRHS,
DL, &
TLI);
209 if (TrueRangeEnd == (
int)i - 1)
211 if (FalseRangeEnd == (
int)i - 1)
228 if (FirstTrueElement == Undefined)
229 FirstTrueElement = TrueRangeEnd = i;
232 if (SecondTrueElement == Undefined)
233 SecondTrueElement = i;
235 SecondTrueElement = Overdefined;
238 if (TrueRangeEnd == (
int)i - 1)
241 TrueRangeEnd = Overdefined;
245 if (FirstFalseElement == Undefined)
246 FirstFalseElement = FalseRangeEnd = i;
249 if (SecondFalseElement == Undefined)
250 SecondFalseElement = i;
252 SecondFalseElement = Overdefined;
255 if (FalseRangeEnd == (
int)i - 1)
258 FalseRangeEnd = Overdefined;
263 if (i < 64 && IsTrueForElt)
264 MagicBitvector |= 1ULL << i;
269 if ((i & 8) == 0 && i >= 64 && SecondTrueElement == Overdefined &&
270 SecondFalseElement == Overdefined && TrueRangeEnd == Overdefined &&
271 FalseRangeEnd == Overdefined)
285 auto MaskIdx = [&](
Value *Idx) {
289 Idx =
Builder.CreateAnd(Idx, Mask);
296 if (SecondTrueElement != Overdefined) {
299 if (FirstTrueElement == Undefined)
302 Value *FirstTrueIdx = ConstantInt::get(Idx->
getType(), FirstTrueElement);
305 if (SecondTrueElement == Undefined)
310 Value *SecondTrueIdx = ConstantInt::get(Idx->
getType(), SecondTrueElement);
312 return BinaryOperator::CreateOr(C1, C2);
317 if (SecondFalseElement != Overdefined) {
320 if (FirstFalseElement == Undefined)
323 Value *FirstFalseIdx = ConstantInt::get(Idx->
getType(), FirstFalseElement);
326 if (SecondFalseElement == Undefined)
331 Value *SecondFalseIdx =
332 ConstantInt::get(Idx->
getType(), SecondFalseElement);
334 return BinaryOperator::CreateAnd(C1, C2);
339 if (TrueRangeEnd != Overdefined) {
340 assert(TrueRangeEnd != FirstTrueElement &&
"Should emit single compare");
344 if (FirstTrueElement) {
346 Idx =
Builder.CreateAdd(Idx, Offs);
350 ConstantInt::get(Idx->
getType(), TrueRangeEnd - FirstTrueElement + 1);
355 if (FalseRangeEnd != Overdefined) {
356 assert(FalseRangeEnd != FirstFalseElement &&
"Should emit single compare");
359 if (FirstFalseElement) {
361 Idx =
Builder.CreateAdd(Idx, Offs);
365 ConstantInt::get(Idx->
getType(), FalseRangeEnd - FirstFalseElement);
378 if (ArrayElementCount <= Idx->
getType()->getIntegerBitWidth())
381 Ty =
DL.getSmallestLegalIntType(
Init->getContext(), ArrayElementCount);
386 V =
Builder.CreateLShr(ConstantInt::get(Ty, MagicBitvector), V);
387 V =
Builder.CreateAnd(ConstantInt::get(Ty, 1), V);
412 while (!WorkList.
empty()) {
415 while (!WorkList.
empty()) {
416 if (Explored.
size() >= 100)
434 if (!
GEP->isInBounds() ||
count_if(
GEP->indices(), IsNonConst) > 1)
442 if (WorkList.
back() == V) {
458 for (
auto *PN : PHIs)
459 for (
Value *
Op : PN->incoming_values())
467 for (
Value *Val : Explored) {
473 if (Inst ==
Base || Inst ==
PHI || !Inst || !
PHI ||
477 if (
PHI->getParent() == Inst->getParent())
487 bool Before =
true) {
495 I = &*std::next(
I->getIterator());
496 Builder.SetInsertPoint(
I);
501 BasicBlock &Entry =
A->getParent()->getEntryBlock();
502 Builder.SetInsertPoint(&Entry, Entry.getFirstInsertionPt());
524 Base->getContext(),
DL.getIndexTypeSizeInBits(Start->getType()));
530 for (
Value *Val : Explored) {
538 PHI->getName() +
".idx",
PHI->getIterator());
543 for (
Value *Val : Explored) {
552 NewInsts[
GEP] = OffsetV;
554 NewInsts[
GEP] = Builder.CreateAdd(
555 Op, OffsetV,
GEP->getOperand(0)->getName() +
".add",
567 for (
Value *Val : Explored) {
574 for (
unsigned I = 0,
E =
PHI->getNumIncomingValues();
I <
E; ++
I) {
575 Value *NewIncoming =
PHI->getIncomingValue(
I);
577 auto It = NewInsts.
find(NewIncoming);
578 if (It != NewInsts.
end())
579 NewIncoming = It->second;
586 for (
Value *Val : Explored) {
592 Value *NewVal = Builder.CreateGEP(Builder.getInt8Ty(),
Base, NewInsts[Val],
593 Val->getName() +
".ptr", NW);
600 return NewInsts[Start];
686 if (
Base.Ptr == RHS && CanFold(
Base.LHSNW) && !
Base.isExpensive()) {
690 EmitGEPOffsets(
Base.LHSGEPs,
Base.LHSNW, IdxTy,
true);
698 RHS->getType()->getPointerAddressSpace())) {
729 if (GEPLHS->
getOperand(0) != GEPRHS->getOperand(0)) {
730 bool IndicesTheSame =
733 GEPRHS->getPointerOperand()->getType() &&
737 if (GEPLHS->
getOperand(i) != GEPRHS->getOperand(i)) {
738 IndicesTheSame =
false;
744 if (IndicesTheSame &&
752 if (GEPLHS->
isInBounds() && GEPRHS->isInBounds() &&
754 (GEPRHS->hasAllConstantIndices() || GEPRHS->hasOneUse()) &&
758 Value *LOffset = EmitGEPOffset(GEPLHS);
759 Value *ROffset = EmitGEPOffset(GEPRHS);
766 if (LHSIndexTy != RHSIndexTy) {
769 ROffset =
Builder.CreateTrunc(ROffset, LHSIndexTy);
771 LOffset =
Builder.CreateTrunc(LOffset, RHSIndexTy);
780 if (GEPLHS->
getOperand(0) == GEPRHS->getOperand(0) &&
784 unsigned NumDifferences = 0;
785 unsigned DiffOperand = 0;
786 for (
unsigned i = 1, e = GEPRHS->getNumOperands(); i != e; ++i)
787 if (GEPLHS->
getOperand(i) != GEPRHS->getOperand(i)) {
789 Type *RHSType = GEPRHS->getOperand(i)->getType();
800 if (NumDifferences++)
805 if (NumDifferences == 0)
813 Value *RHSV = GEPRHS->getOperand(DiffOperand);
814 return NewICmp(NW, LHSV, RHSV);
822 EmitGEPOffsets(
Base.LHSGEPs,
Base.LHSNW, IdxTy,
true);
824 EmitGEPOffsets(
Base.RHSGEPs,
Base.RHSNW, IdxTy,
true);
825 return NewICmp(
Base.LHSNW &
Base.RHSNW, L, R);
851 bool Captured =
false;
856 CmpCaptureTracker(
AllocaInst *Alloca) : Alloca(Alloca) {}
858 void tooManyUses()
override { Captured =
true; }
870 ICmps[ICmp] |= 1u << U->getOperandNo();
879 CmpCaptureTracker Tracker(Alloca);
881 if (Tracker.Captured)
885 for (
auto [ICmp, Operands] : Tracker.ICmps) {
891 auto *Res = ConstantInt::get(ICmp->getType(),
917 assert(!!
C &&
"C should not be zero!");
933 ConstantInt::get(
X->getType(), -
C));
945 ConstantInt::get(
X->getType(),
SMax -
C));
956 ConstantInt::get(
X->getType(),
SMax - (
C - 1)));
965 assert(
I.isEquality() &&
"Cannot fold icmp gt/lt");
968 if (
I.getPredicate() ==
I.ICMP_NE)
970 return new ICmpInst(Pred, LHS, RHS);
989 return getICmp(
I.ICMP_UGT,
A,
990 ConstantInt::get(
A->getType(), AP2.
logBase2()));
1002 if (IsAShr && AP1 == AP2.
ashr(Shift)) {
1006 return getICmp(
I.ICMP_UGE,
A, ConstantInt::get(
A->getType(), Shift));
1007 return getICmp(
I.ICMP_EQ,
A, ConstantInt::get(
A->getType(), Shift));
1008 }
else if (AP1 == AP2.
lshr(Shift)) {
1009 return getICmp(
I.ICMP_EQ,
A, ConstantInt::get(
A->getType(), Shift));
1015 auto *TorF = ConstantInt::get(
I.getType(),
I.getPredicate() ==
I.ICMP_NE);
1024 assert(
I.isEquality() &&
"Cannot fold icmp gt/lt");
1027 if (
I.getPredicate() ==
I.ICMP_NE)
1029 return new ICmpInst(Pred, LHS, RHS);
1038 if (!AP1 && AP2TrailingZeros != 0)
1041 ConstantInt::get(
A->getType(), AP2.
getBitWidth() - AP2TrailingZeros));
1049 if (Shift > 0 && AP2.
shl(Shift) == AP1)
1050 return getICmp(
I.ICMP_EQ,
A, ConstantInt::get(
A->getType(), Shift));
1054 auto *TorF = ConstantInt::get(
I.getType(),
I.getPredicate() ==
I.ICMP_NE);
1083 if (NewWidth != 7 && NewWidth != 15 && NewWidth != 31)
1107 if (U == AddWithCst)
1125 I.getModule(), Intrinsic::sadd_with_overflow, NewType);
1133 Value *TruncA = Builder.CreateTrunc(
A, NewType,
A->getName() +
".trunc");
1134 Value *TruncB = Builder.CreateTrunc(
B, NewType,
B->getName() +
".trunc");
1135 CallInst *
Call = Builder.CreateCall(
F, {TruncA, TruncB},
"sadd");
1136 Value *
Add = Builder.CreateExtractValue(
Call, 0,
"sadd.result");
1154 if (!
I.isEquality())
1185 APInt(XBitWidth, XBitWidth - 1))))
1212 return new ICmpInst(Pred,
B, Cmp.getOperand(1));
1214 return new ICmpInst(Pred,
A, Cmp.getOperand(1));
1231 return new ICmpInst(Pred,
X, Cmp.getOperand(1));
1243 return new ICmpInst(Pred,
Y, Cmp.getOperand(1));
1249 return new ICmpInst(Pred,
X, Cmp.getOperand(1));
1252 if (BO0->hasNoUnsignedWrap() || BO0->hasNoSignedWrap()) {
1260 return new ICmpInst(Pred,
Y, Cmp.getOperand(1));
1265 return new ICmpInst(Pred,
X, Cmp.getOperand(1));
1281 return new ICmpInst(Pred, Stripped,
1294 const APInt *Mask, *Neg;
1310 auto *NewAnd =
Builder.CreateAnd(Num, *Mask);
1313 return new ICmpInst(Pred, NewAnd, Zero);
1334 Value *Op0 = Cmp.getOperand(0), *Op1 = Cmp.getOperand(1);
1350 for (
Value *V : Phi->incoming_values()) {
1358 PHINode *NewPhi =
Builder.CreatePHI(Cmp.getType(), Phi->getNumOperands());
1359 for (
auto [V, Pred] :
zip(
Ops, Phi->blocks()))
1374 Value *
X = Cmp.getOperand(0), *
Y = Cmp.getOperand(1);
1407 if (Cmp.isEquality() || (IsSignBit &&
hasBranchUse(Cmp)))
1412 if (Cmp.hasOneUse() &&
1426 if (!
match(BI->getCondition(),
1431 if (
DT.dominates(Edge0, Cmp.getParent())) {
1432 if (
auto *V = handleDomCond(DomPred, DomC))
1436 if (
DT.dominates(Edge1, Cmp.getParent()))
1452 Type *SrcTy =
X->getType();
1454 SrcBits = SrcTy->getScalarSizeInBits();
1458 if (shouldChangeType(Trunc->
getType(), SrcTy)) {
1460 return new ICmpInst(Pred,
X, ConstantInt::get(SrcTy,
C.sext(SrcBits)));
1462 return new ICmpInst(Pred,
X, ConstantInt::get(SrcTy,
C.zext(SrcBits)));
1465 if (
C.isOne() &&
C.getBitWidth() > 1) {
1470 ConstantInt::get(V->getType(), 1));
1482 auto NewPred = (Pred == Cmp.ICMP_EQ) ? Cmp.ICMP_UGE : Cmp.ICMP_ULT;
1484 ConstantInt::get(SrcTy, DstBits - Pow2->
logBase2()));
1490 Pred,
Y, ConstantInt::get(SrcTy,
C.logBase2() - Pow2->
logBase2()));
1496 if (!SrcTy->isVectorTy() && shouldChangeType(DstBits, SrcBits)) {
1500 Constant *WideC = ConstantInt::get(SrcTy,
C.zext(SrcBits));
1509 if ((Known.
Zero | Known.
One).countl_one() >= SrcBits - DstBits) {
1511 APInt NewRHS =
C.zext(SrcBits);
1513 return new ICmpInst(Pred,
X, ConstantInt::get(SrcTy, NewRHS));
1525 DstBits == SrcBits - ShAmt) {
1542 bool YIsSExt =
false;
1545 unsigned NoWrapFlags =
cast<TruncInst>(Cmp.getOperand(0))->getNoWrapKind() &
1547 if (Cmp.isSigned()) {
1558 if (
X->getType() !=
Y->getType() &&
1559 (!Cmp.getOperand(0)->hasOneUse() || !Cmp.getOperand(1)->hasOneUse()))
1561 if (!isDesirableIntType(
X->getType()->getScalarSizeInBits()) &&
1562 isDesirableIntType(
Y->getType()->getScalarSizeInBits())) {
1564 Pred = Cmp.getSwappedPredicate(Pred);
1569 else if (!Cmp.isSigned() &&
1583 Type *TruncTy = Cmp.getOperand(0)->getType();
1588 if (isDesirableIntType(TruncBits) &&
1589 !isDesirableIntType(
X->getType()->getScalarSizeInBits()))
1612 bool TrueIfSigned =
false;
1629 if (
Xor->hasOneUse()) {
1631 if (!Cmp.isEquality() && XorC->
isSignMask()) {
1632 Pred = Cmp.getFlippedSignednessPredicate();
1633 return new ICmpInst(Pred,
X, ConstantInt::get(
X->getType(),
C ^ *XorC));
1638 Pred = Cmp.getFlippedSignednessPredicate();
1639 Pred = Cmp.getSwappedPredicate(Pred);
1640 return new ICmpInst(Pred,
X, ConstantInt::get(
X->getType(),
C ^ *XorC));
1647 if (*XorC == ~
C && (
C + 1).isPowerOf2())
1650 if (*XorC ==
C && (
C + 1).isPowerOf2())
1655 if (*XorC == -
C &&
C.isPowerOf2())
1657 ConstantInt::get(
X->getType(), ~
C));
1659 if (*XorC ==
C && (-
C).isPowerOf2())
1661 ConstantInt::get(
X->getType(), ~
C));
1683 const APInt *ShiftC;
1688 Type *XType =
X->getType();
1694 return new ICmpInst(Pred,
Add, ConstantInt::get(XType, Bound));
1703 if (!Shift || !Shift->
isShift())
1711 unsigned ShiftOpcode = Shift->
getOpcode();
1712 bool IsShl = ShiftOpcode == Instruction::Shl;
1715 APInt NewAndCst, NewCmpCst;
1716 bool AnyCmpCstBitsShiftedOut;
1717 if (ShiftOpcode == Instruction::Shl) {
1725 NewCmpCst = C1.
lshr(*C3);
1726 NewAndCst = C2.
lshr(*C3);
1727 AnyCmpCstBitsShiftedOut = NewCmpCst.
shl(*C3) != C1;
1728 }
else if (ShiftOpcode == Instruction::LShr) {
1733 NewCmpCst = C1.
shl(*C3);
1734 NewAndCst = C2.
shl(*C3);
1735 AnyCmpCstBitsShiftedOut = NewCmpCst.
lshr(*C3) != C1;
1741 assert(ShiftOpcode == Instruction::AShr &&
"Unknown shift opcode");
1742 NewCmpCst = C1.
shl(*C3);
1743 NewAndCst = C2.
shl(*C3);
1744 AnyCmpCstBitsShiftedOut = NewCmpCst.
ashr(*C3) != C1;
1745 if (NewAndCst.
ashr(*C3) != C2)
1749 if (AnyCmpCstBitsShiftedOut) {
1759 Shift->
getOperand(0), ConstantInt::get(
And->getType(), NewAndCst));
1760 return new ICmpInst(Cmp.getPredicate(), NewAnd,
1761 ConstantInt::get(
And->getType(), NewCmpCst));
1778 return new ICmpInst(Cmp.getPredicate(), NewAnd, Cmp.getOperand(1));
1792 return new TruncInst(
And->getOperand(0), Cmp.getType());
1803 ConstantInt::get(
X->getType(), ~*C2));
1808 ConstantInt::get(
X->getType(), -*C2));
1811 if (!
And->hasOneUse())
1814 if (Cmp.isEquality() && C1.
isZero()) {
1832 Constant *NegBOC = ConstantInt::get(
And->getType(), -NewC2);
1834 return new ICmpInst(NewPred,
X, NegBOC);
1852 if (!Cmp.getType()->isVectorTy()) {
1853 Type *WideType = W->getType();
1855 Constant *ZextC1 = ConstantInt::get(WideType, C1.
zext(WideScalarBits));
1856 Constant *ZextC2 = ConstantInt::get(WideType, C2->
zext(WideScalarBits));
1858 return new ICmpInst(Cmp.getPredicate(), NewAnd, ZextC1);
1869 if (!Cmp.isSigned() && C1.
isZero() &&
And->getOperand(0)->hasOneUse() &&
1876 unsigned UsesRemoved = 0;
1877 if (
And->hasOneUse())
1879 if (
Or->hasOneUse())
1886 if (UsesRemoved >= RequireUsesRemoved) {
1890 One,
Or->getName());
1892 return new ICmpInst(Cmp.getPredicate(), NewAnd, Cmp.getOperand(1));
1906 if (!Cmp.getParent()->getParent()->hasFnAttribute(
1907 Attribute::NoImplicitFloat) &&
1910 Type *FPType = V->getType()->getScalarType();
1911 if (FPType->isIEEELikeFPTy() && (C1.
isZero() || C1 == *C2)) {
1912 APInt ExponentMask =
1914 if (*C2 == ExponentMask) {
1915 unsigned Mask = C1.
isZero()
1949 Constant *MinSignedC = ConstantInt::get(
1953 return new ICmpInst(NewPred,
X, MinSignedC);
1968 if (!Cmp.isEquality())
1974 if (Cmp.getOperand(1) ==
Y &&
C.isNegatedPowerOf2()) {
1985 X->getType()->isIntOrIntVectorTy(1) && (
C.isZero() ||
C.isOne())) {
1991 return BinaryOperator::CreateAnd(TruncY,
X);
2009 const APInt *Addend, *Msk;
2013 APInt NewComperand = (
C - *Addend) & *Msk;
2014 Value *MaskA =
Builder.CreateAnd(
A, ConstantInt::get(
A->getType(), *Msk));
2016 ConstantInt::get(MaskA->
getType(), NewComperand));
2038 while (!WorkList.
empty()) {
2039 auto MatchOrOperatorArgument = [&](
Value *OrOperatorArgument) {
2042 if (
match(OrOperatorArgument,
2048 if (
match(OrOperatorArgument,
2058 Value *OrOperatorLhs, *OrOperatorRhs;
2060 if (!
match(CurrentValue,
2065 MatchOrOperatorArgument(OrOperatorRhs);
2066 MatchOrOperatorArgument(OrOperatorLhs);
2071 Value *LhsCmp = Builder.CreateICmp(Pred, CmpValues.
rbegin()->first,
2072 CmpValues.
rbegin()->second);
2074 for (
auto It = CmpValues.
rbegin() + 1; It != CmpValues.
rend(); ++It) {
2075 Value *RhsCmp = Builder.CreateICmp(Pred, It->first, It->second);
2076 LhsCmp = Builder.CreateBinOp(BOpc, LhsCmp, RhsCmp);
2092 ConstantInt::get(V->getType(), 1));
2095 Value *OrOp0 =
Or->getOperand(0), *OrOp1 =
Or->getOperand(1);
2102 Builder.CreateXor(OrOp1, ConstantInt::get(OrOp1->getType(),
C));
2103 return new ICmpInst(Pred, OrOp0, NewC);
2107 if (
match(OrOp1,
m_APInt(MaskC)) && Cmp.isEquality()) {
2108 if (*MaskC ==
C && (
C + 1).isPowerOf2()) {
2113 return new ICmpInst(Pred, OrOp0, OrOp1);
2120 if (
Or->hasOneUse()) {
2122 Constant *NewC = ConstantInt::get(
Or->getType(),
C ^ (*MaskC));
2134 Constant *NewC = ConstantInt::get(
X->getType(), TrueIfSigned ? 1 : 0);
2162 if (!Cmp.isEquality() || !
C.isZero() || !
Or->hasOneUse())
2194 if (Cmp.isEquality() &&
C.isZero() &&
X ==
Mul->getOperand(1) &&
2195 (
Mul->hasNoUnsignedWrap() ||
Mul->hasNoSignedWrap()))
2217 if (Cmp.isEquality()) {
2219 if (
Mul->hasNoSignedWrap() &&
C.srem(*MulC).isZero()) {
2220 Constant *NewC = ConstantInt::get(MulTy,
C.sdiv(*MulC));
2228 if (
C.urem(*MulC).isZero()) {
2231 if ((*MulC & 1).isOne() ||
Mul->hasNoUnsignedWrap()) {
2232 Constant *NewC = ConstantInt::get(MulTy,
C.udiv(*MulC));
2245 if (
C.isMinSignedValue() && MulC->
isAllOnes())
2251 NewC = ConstantInt::get(
2255 "Unexpected predicate");
2256 NewC = ConstantInt::get(
2261 NewC = ConstantInt::get(
2265 "Unexpected predicate");
2266 NewC = ConstantInt::get(
2271 return NewC ?
new ICmpInst(Pred,
X, NewC) :
nullptr;
2283 unsigned TypeBits =
C.getBitWidth();
2285 if (Cmp.isUnsigned()) {
2305 return new ICmpInst(Pred,
Y, ConstantInt::get(ShiftType, CLog2));
2306 }
else if (Cmp.isSigned() && C2->
isOne()) {
2307 Constant *BitWidthMinusOne = ConstantInt::get(ShiftType, TypeBits - 1);
2328 const APInt *ShiftVal;
2358 const APInt *ShiftAmt;
2364 unsigned TypeBits =
C.getBitWidth();
2365 if (ShiftAmt->
uge(TypeBits))
2377 APInt ShiftedC =
C.ashr(*ShiftAmt);
2378 return new ICmpInst(Pred,
X, ConstantInt::get(ShType, ShiftedC));
2381 C.ashr(*ShiftAmt).shl(*ShiftAmt) ==
C) {
2382 APInt ShiftedC =
C.ashr(*ShiftAmt);
2383 return new ICmpInst(Pred,
X, ConstantInt::get(ShType, ShiftedC));
2390 assert(!
C.isMinSignedValue() &&
"Unexpected icmp slt");
2391 APInt ShiftedC = (
C - 1).ashr(*ShiftAmt) + 1;
2392 return new ICmpInst(Pred,
X, ConstantInt::get(ShType, ShiftedC));
2402 APInt ShiftedC =
C.lshr(*ShiftAmt);
2403 return new ICmpInst(Pred,
X, ConstantInt::get(ShType, ShiftedC));
2406 C.lshr(*ShiftAmt).shl(*ShiftAmt) ==
C) {
2407 APInt ShiftedC =
C.lshr(*ShiftAmt);
2408 return new ICmpInst(Pred,
X, ConstantInt::get(ShType, ShiftedC));
2415 assert(
C.ugt(0) &&
"ult 0 should have been eliminated");
2416 APInt ShiftedC = (
C - 1).lshr(*ShiftAmt) + 1;
2417 return new ICmpInst(Pred,
X, ConstantInt::get(ShType, ShiftedC));
2421 if (Cmp.isEquality() && Shl->
hasOneUse()) {
2427 Constant *LShrC = ConstantInt::get(ShType,
C.lshr(*ShiftAmt));
2432 bool TrueIfSigned =
false;
2444 if (Cmp.isUnsigned() && Shl->
hasOneUse()) {
2446 if ((
C + 1).isPowerOf2() &&
2454 if (
C.isPowerOf2() &&
2484 Pred, ConstantInt::get(ShType->
getContext(),
C))) {
2485 CmpPred = FlippedStrictness->first;
2493 ConstantInt::get(TruncTy, RHSC.
ashr(*ShiftAmt).
trunc(TypeBits - Amt));
2495 Builder.CreateTrunc(
X, TruncTy,
"",
false,
2512 if (Cmp.isEquality() && Shr->
isExact() &&
C.isZero())
2513 return new ICmpInst(Pred,
X, Cmp.getOperand(1));
2515 bool IsAShr = Shr->
getOpcode() == Instruction::AShr;
2516 const APInt *ShiftValC;
2518 if (Cmp.isEquality())
2536 assert(ShiftValC->
uge(
C) &&
"Expected simplify of compare");
2537 assert((IsUGT || !
C.isZero()) &&
"Expected X u< 0 to simplify");
2539 unsigned CmpLZ = IsUGT ?
C.countl_zero() : (
C - 1).
countl_zero();
2547 const APInt *ShiftAmtC;
2553 unsigned TypeBits =
C.getBitWidth();
2555 if (ShAmtVal >= TypeBits || ShAmtVal == 0)
2558 bool IsExact = Shr->
isExact();
2566 (
C - 1).isPowerOf2() &&
C.countLeadingZeros() > ShAmtVal) {
2572 APInt ShiftedC = (
C - 1).shl(ShAmtVal) + 1;
2573 return new ICmpInst(Pred,
X, ConstantInt::get(ShrTy, ShiftedC));
2579 APInt ShiftedC =
C.shl(ShAmtVal);
2580 if (ShiftedC.
ashr(ShAmtVal) ==
C)
2581 return new ICmpInst(Pred,
X, ConstantInt::get(ShrTy, ShiftedC));
2585 APInt ShiftedC = (
C + 1).shl(ShAmtVal) - 1;
2586 if (!
C.isMaxSignedValue() && !(
C + 1).shl(ShAmtVal).isMinSignedValue() &&
2587 (ShiftedC + 1).ashr(ShAmtVal) == (
C + 1))
2588 return new ICmpInst(Pred,
X, ConstantInt::get(ShrTy, ShiftedC));
2594 APInt ShiftedC = (
C + 1).shl(ShAmtVal) - 1;
2595 if ((ShiftedC + 1).ashr(ShAmtVal) == (
C + 1) ||
2596 (
C + 1).shl(ShAmtVal).isMinSignedValue())
2597 return new ICmpInst(Pred,
X, ConstantInt::get(ShrTy, ShiftedC));
2604 if (
C.getBitWidth() > 2 &&
C.getNumSignBits() <= ShAmtVal) {
2614 }
else if (!IsAShr) {
2618 APInt ShiftedC =
C.shl(ShAmtVal);
2619 if (ShiftedC.
lshr(ShAmtVal) ==
C)
2620 return new ICmpInst(Pred,
X, ConstantInt::get(ShrTy, ShiftedC));
2624 APInt ShiftedC = (
C + 1).shl(ShAmtVal) - 1;
2625 if ((ShiftedC + 1).lshr(ShAmtVal) == (
C + 1))
2626 return new ICmpInst(Pred,
X, ConstantInt::get(ShrTy, ShiftedC));
2630 if (!Cmp.isEquality())
2638 assert(((IsAShr &&
C.shl(ShAmtVal).ashr(ShAmtVal) ==
C) ||
2639 (!IsAShr &&
C.shl(ShAmtVal).lshr(ShAmtVal) ==
C)) &&
2640 "Expected icmp+shr simplify did not occur.");
2645 return new ICmpInst(Pred,
X, ConstantInt::get(ShrTy,
C << ShAmtVal));
2651 Constant *Mask = ConstantInt::get(ShrTy, Val);
2653 return new ICmpInst(Pred,
And, ConstantInt::get(ShrTy,
C << ShAmtVal));
2670 const APInt *DivisorC;
2679 "ult X, 0 should have been simplified already.");
2684 if (!NormalizedC.
uge(DivisorC->
abs() - 1))
2707 const APInt *DivisorC;
2716 !
C.isStrictlyPositive()))
2722 Constant *MaskC = ConstantInt::get(Ty, SignMask | (*DivisorC - 1));
2726 return new ICmpInst(Pred,
And, ConstantInt::get(Ty,
C));
2753 assert(*C2 != 0 &&
"udiv 0, X should have been simplified already.");
2758 "icmp ugt X, UINT_MAX should have been simplified already.");
2760 ConstantInt::get(Ty, C2->
udiv(
C + 1)));
2765 assert(
C != 0 &&
"icmp ult X, 0 should have been simplified already.");
2767 ConstantInt::get(Ty, C2->
udiv(
C)));
2781 bool DivIsSigned = Div->
getOpcode() == Instruction::SDiv;
2791 if (Cmp.isEquality() && Div->
hasOneUse() &&
C.isSignBitSet() &&
2792 (!DivIsSigned ||
C.isMinSignedValue())) {
2793 Value *XBig =
Builder.CreateICmp(Pred,
X, ConstantInt::get(Ty,
C));
2794 Value *YOne =
Builder.CreateICmp(Pred,
Y, ConstantInt::get(Ty, 1));
2817 if (!Cmp.isEquality() && DivIsSigned != Cmp.isSigned())
2836 bool ProdOV = (DivIsSigned ? Prod.
sdiv(*C2) : Prod.
udiv(*C2)) !=
C;
2849 int LoOverflow = 0, HiOverflow = 0;
2850 APInt LoBound, HiBound;
2855 HiOverflow = LoOverflow = ProdOV;
2864 LoBound = -(RangeSize - 1);
2865 HiBound = RangeSize;
2866 }
else if (
C.isStrictlyPositive()) {
2868 HiOverflow = LoOverflow = ProdOV;
2874 LoOverflow = HiOverflow = ProdOV ? -1 : 0;
2876 APInt DivNeg = -RangeSize;
2877 LoOverflow =
addWithOverflow(LoBound, HiBound, DivNeg,
true) ? -1 : 0;
2885 LoBound = RangeSize + 1;
2886 HiBound = -RangeSize;
2887 if (HiBound == *C2) {
2891 }
else if (
C.isStrictlyPositive()) {
2894 HiOverflow = LoOverflow = ProdOV ? -1 : 0;
2900 LoOverflow = HiOverflow = ProdOV;
2913 if (LoOverflow && HiOverflow)
2917 X, ConstantInt::get(Ty, LoBound));
2920 X, ConstantInt::get(Ty, HiBound));
2924 if (LoOverflow && HiOverflow)
2928 X, ConstantInt::get(Ty, LoBound));
2931 X, ConstantInt::get(Ty, HiBound));
2936 if (LoOverflow == +1)
2938 if (LoOverflow == -1)
2940 return new ICmpInst(Pred,
X, ConstantInt::get(Ty, LoBound));
2943 if (HiOverflow == +1)
2945 if (HiOverflow == -1)
2975 bool HasNSW =
Sub->hasNoSignedWrap();
2976 bool HasNUW =
Sub->hasNoUnsignedWrap();
2978 ((Cmp.isUnsigned() && HasNUW) || (Cmp.isSigned() && HasNSW)) &&
2980 return new ICmpInst(SwappedPred,
Y, ConstantInt::get(Ty, SubResult));
2988 if (Cmp.isEquality() &&
C.isZero() &&
2989 none_of((
Sub->users()), [](
const User *U) { return isa<PHINode>(U); }))
2997 if (!
Sub->hasOneUse())
3000 if (
Sub->hasNoSignedWrap()) {
3024 (*C2 & (
C - 1)) == (
C - 1))
3037 return new ICmpInst(SwappedPred,
Add, ConstantInt::get(Ty, ~
C));
3043 auto FoldConstant = [&](
bool Val) {
3044 Constant *Res = Val ? Builder.getTrue() : Builder.getFalse();
3051 switch (Table.to_ulong()) {
3053 return FoldConstant(
false);
3055 return HasOneUse ? Builder.CreateNot(Builder.CreateOr(Op0, Op1)) :
nullptr;
3057 return HasOneUse ? Builder.CreateAnd(Builder.CreateNot(Op0), Op1) :
nullptr;
3059 return Builder.CreateNot(Op0);
3061 return HasOneUse ? Builder.CreateAnd(Op0, Builder.CreateNot(Op1)) :
nullptr;
3063 return Builder.CreateNot(Op1);
3065 return Builder.CreateXor(Op0, Op1);
3067 return HasOneUse ? Builder.CreateNot(Builder.CreateAnd(Op0, Op1)) :
nullptr;
3069 return Builder.CreateAnd(Op0, Op1);
3071 return HasOneUse ? Builder.CreateNot(Builder.CreateXor(Op0, Op1)) :
nullptr;
3075 return HasOneUse ? Builder.CreateOr(Builder.CreateNot(Op0), Op1) :
nullptr;
3079 return HasOneUse ? Builder.CreateOr(Op0, Builder.CreateNot(Op1)) :
nullptr;
3081 return Builder.CreateOr(Op0, Op1);
3083 return FoldConstant(
true);
3098 Cmp.getType() !=
A->getType() || Cmp.getType() !=
B->getType())
3101 std::bitset<4> Table;
3102 auto ComputeTable = [&](
bool First,
bool Second) -> std::optional<bool> {
3106 auto *Val = Res->getType()->isVectorTy() ? Res->getSplatValue() : Res;
3110 return std::nullopt;
3113 for (
unsigned I = 0;
I < 4; ++
I) {
3114 bool First = (
I >> 1) & 1;
3115 bool Second =
I & 1;
3116 if (
auto Res = ComputeTable(
First, Second))
3138 const APInt *ShAmtC;
3146 return new ICmpInst(Pred,
A, ConstantInt::get(
A->getType(),
C));
3158 if (
Add->hasNoUnsignedWrap() &&
3161 APInt NewC =
C.usub_ov(*C2, Overflow);
3165 return new ICmpInst(Pred,
X, ConstantInt::get(Ty, NewC));
3170 if (
Add->hasNoSignedWrap() &&
3173 APInt NewC =
C.ssub_ov(*C2, Overflow);
3177 return new ICmpInst(ChosenPred,
X, ConstantInt::get(Ty, NewC));
3181 C.isNonNegative() && (
C - *C2).isNonNegative() &&
3184 .isAllNonNegative())
3186 ConstantInt::get(Ty,
C - *C2));
3191 if (Cmp.isSigned()) {
3192 if (
Lower.isSignMask())
3194 if (
Upper.isSignMask())
3197 if (
Lower.isMinValue())
3199 if (
Upper.isMinValue())
3232 if (!
Add->hasOneUse())
3247 ConstantInt::get(Ty,
C * 2));
3261 Builder.CreateAdd(
X, ConstantInt::get(Ty, *C2 -
C - 1)),
3262 ConstantInt::get(Ty, ~
C));
3267 Type *NewCmpTy = V->getType();
3269 if (shouldChangeType(Ty, NewCmpTy)) {
3280 :
Builder.CreateAdd(V, ConstantInt::get(NewCmpTy, EquivOffset)),
3281 ConstantInt::get(NewCmpTy, EquivInt));
3303 Value *EqualVal =
SI->getTrueValue();
3304 Value *UnequalVal =
SI->getFalseValue();
3327 auto FlippedStrictness =
3329 if (!FlippedStrictness)
3332 "basic correctness failure");
3333 RHS2 = FlippedStrictness->second;
3345 assert(
C &&
"Cmp RHS should be a constant int!");
3351 Value *OrigLHS, *OrigRHS;
3352 ConstantInt *C1LessThan, *C2Equal, *C3GreaterThan;
3353 if (Cmp.hasOneUse() &&
3356 assert(C1LessThan && C2Equal && C3GreaterThan);
3359 C1LessThan->
getValue(),
C->getValue(), Cmp.getPredicate());
3361 Cmp.getPredicate());
3363 C3GreaterThan->
getValue(),
C->getValue(), Cmp.getPredicate());
3374 if (TrueWhenLessThan)
3380 if (TrueWhenGreaterThan)
3395 Value *Op1 = Cmp.getOperand(1);
3396 Value *BCSrcOp = Bitcast->getOperand(0);
3397 Type *SrcType = Bitcast->getSrcTy();
3398 Type *DstType = Bitcast->getType();
3402 if (SrcType->isVectorTy() == DstType->isVectorTy() &&
3403 SrcType->getScalarSizeInBits() == DstType->getScalarSizeInBits()) {
3418 return new ICmpInst(Pred,
X, ConstantInt::get(
X->getType(), 1));
3445 Type *XType =
X->getType();
3448 if (!(XType->
isPPC_FP128Ty() || SrcType->isPPC_FP128Ty())) {
3463 Type *FPType = SrcType->getScalarType();
3464 if (!Cmp.getParent()->getParent()->hasFnAttribute(
3465 Attribute::NoImplicitFloat) &&
3466 Cmp.isEquality() && FPType->isIEEELikeFPTy()) {
3472 Builder.createIsFPClass(BCSrcOp, Mask));
3479 if (!
match(Cmp.getOperand(1),
m_APInt(
C)) || !DstType->isIntegerTy() ||
3480 !SrcType->isIntOrIntVectorTy())
3490 if (Cmp.isEquality() &&
C->isAllOnes() && Bitcast->hasOneUse()) {
3491 if (
Value *NotBCSrcOp =
3493 Value *Cast =
Builder.CreateBitCast(NotBCSrcOp, DstType);
3502 if (Cmp.isEquality() &&
C->isZero() && Bitcast->hasOneUse() &&
3505 Type *NewType =
Builder.getIntNTy(VecTy->getPrimitiveSizeInBits());
3525 if (
C->isSplat(EltTy->getBitWidth())) {
3532 Value *Extract =
Builder.CreateExtractElement(Vec, Elem);
3533 Value *NewC = ConstantInt::get(EltTy,
C->trunc(EltTy->getBitWidth()));
3534 return new ICmpInst(Pred, Extract, NewC);
3570 Value *Cmp0 = Cmp.getOperand(0);
3572 if (
C->isZero() && Cmp.isEquality() && Cmp0->
hasOneUse() &&
3579 return new ICmpInst(Cmp.getPredicate(),
X,
Y);
3594 if (!Cmp.isEquality())
3603 case Instruction::SRem:
3614 case Instruction::Add: {
3621 }
else if (
C.isZero()) {
3624 if (
Value *NegVal = dyn_castNegVal(BOp1))
3625 return new ICmpInst(Pred, BOp0, NegVal);
3626 if (
Value *NegVal = dyn_castNegVal(BOp0))
3627 return new ICmpInst(Pred, NegVal, BOp1);
3636 return new ICmpInst(Pred, BOp0, Neg);
3641 case Instruction::Xor:
3646 }
else if (
C.isZero()) {
3648 return new ICmpInst(Pred, BOp0, BOp1);
3651 case Instruction::Or: {
3672 Cond->getType() == Cmp.getType()) {
3710 case Instruction::UDiv:
3711 case Instruction::SDiv:
3721 return new ICmpInst(Pred, BOp0, BOp1);
3724 Instruction::Mul, BO->
getOpcode() == Instruction::SDiv, BOp1,
3725 Cmp.getOperand(1), BO);
3729 return new ICmpInst(Pred, YC, BOp0);
3733 if (BO->
getOpcode() == Instruction::UDiv &&
C.isZero()) {
3736 return new ICmpInst(NewPred, BOp1, BOp0);
3750 "Non-ctpop intrin in ctpop fold");
3785 Type *Ty =
II->getType();
3789 switch (
II->getIntrinsicID()) {
3790 case Intrinsic::abs:
3793 if (
C.isZero() ||
C.isMinSignedValue())
3794 return new ICmpInst(Pred,
II->getArgOperand(0), ConstantInt::get(Ty,
C));
3797 case Intrinsic::bswap:
3799 return new ICmpInst(Pred,
II->getArgOperand(0),
3800 ConstantInt::get(Ty,
C.byteSwap()));
3802 case Intrinsic::bitreverse:
3804 return new ICmpInst(Pred,
II->getArgOperand(0),
3805 ConstantInt::get(Ty,
C.reverseBits()));
3807 case Intrinsic::ctlz:
3808 case Intrinsic::cttz: {
3811 return new ICmpInst(Pred,
II->getArgOperand(0),
3817 unsigned Num =
C.getLimitedValue(
BitWidth);
3819 bool IsTrailing =
II->getIntrinsicID() == Intrinsic::cttz;
3822 APInt Mask2 = IsTrailing
3826 ConstantInt::get(Ty, Mask2));
3831 case Intrinsic::ctpop: {
3834 bool IsZero =
C.isZero();
3836 return new ICmpInst(Pred,
II->getArgOperand(0),
3843 case Intrinsic::fshl:
3844 case Intrinsic::fshr:
3845 if (
II->getArgOperand(0) ==
II->getArgOperand(1)) {
3846 const APInt *RotAmtC;
3850 return new ICmpInst(Pred,
II->getArgOperand(0),
3851 II->getIntrinsicID() == Intrinsic::fshl
3852 ? ConstantInt::get(Ty,
C.rotr(*RotAmtC))
3853 : ConstantInt::get(Ty,
C.rotl(*RotAmtC)));
3857 case Intrinsic::umax:
3858 case Intrinsic::uadd_sat: {
3861 if (
C.isZero() &&
II->hasOneUse()) {
3868 case Intrinsic::ssub_sat:
3873 if (
C.isZero() &&
II->getType()->getScalarSizeInBits() > 1)
3874 return new ICmpInst(Pred,
II->getArgOperand(0),
II->getArgOperand(1));
3876 case Intrinsic::usub_sat: {
3881 return new ICmpInst(NewPred,
II->getArgOperand(0),
II->getArgOperand(1));
3896 assert(Cmp.isEquality());
3899 Value *Op0 = Cmp.getOperand(0);
3900 Value *Op1 = Cmp.getOperand(1);
3903 if (!IIOp0 || !IIOp1 || IIOp0->getIntrinsicID() != IIOp1->getIntrinsicID())
3906 switch (IIOp0->getIntrinsicID()) {
3907 case Intrinsic::bswap:
3908 case Intrinsic::bitreverse:
3911 return new ICmpInst(Pred, IIOp0->getOperand(0), IIOp1->getOperand(0));
3912 case Intrinsic::fshl:
3913 case Intrinsic::fshr: {
3916 if (IIOp0->getOperand(0) != IIOp0->getOperand(1))
3918 if (IIOp1->getOperand(0) != IIOp1->getOperand(1))
3920 if (IIOp0->getOperand(2) == IIOp1->getOperand(2))
3921 return new ICmpInst(Pred, IIOp0->getOperand(0), IIOp1->getOperand(0));
3927 unsigned OneUses = IIOp0->hasOneUse() + IIOp1->hasOneUse();
3932 Builder.CreateSub(IIOp0->getOperand(2), IIOp1->getOperand(2));
3933 Value *CombinedRotate = Builder.CreateIntrinsic(
3934 Op0->
getType(), IIOp0->getIntrinsicID(),
3935 {IIOp0->getOperand(0), IIOp0->getOperand(0), SubAmt});
3936 return new ICmpInst(Pred, IIOp1->getOperand(0), CombinedRotate);
3954 switch (
II->getIntrinsicID()) {
3957 case Intrinsic::fshl:
3958 case Intrinsic::fshr:
3959 if (Cmp.isEquality() &&
II->getArgOperand(0) ==
II->getArgOperand(1)) {
3961 if (
C.isZero() ||
C.isAllOnes())
3962 return new ICmpInst(Pred,
II->getArgOperand(0), Cmp.getOperand(1));
3976 case Instruction::Xor:
3980 case Instruction::And:
3984 case Instruction::Or:
3988 case Instruction::Mul:
3992 case Instruction::Shl:
3996 case Instruction::LShr:
3997 case Instruction::AShr:
4001 case Instruction::SRem:
4005 case Instruction::UDiv:
4009 case Instruction::SDiv:
4013 case Instruction::Sub:
4017 case Instruction::Add:
4041 if (!
II->hasOneUse())
4057 Value *Op0 =
II->getOperand(0);
4058 Value *Op1 =
II->getOperand(1);
4067 switch (
II->getIntrinsicID()) {
4070 "This function only works with usub_sat and uadd_sat for now!");
4071 case Intrinsic::uadd_sat:
4074 case Intrinsic::usub_sat:
4084 II->getBinaryOp(), *COp1,
II->getNoWrapKind());
4091 if (
II->getBinaryOp() == Instruction::Add)
4097 SatValCheck ? Instruction::BinaryOps::Or : Instruction::BinaryOps::And;
4099 std::optional<ConstantRange> Combination;
4100 if (CombiningOp == Instruction::BinaryOps::Or)
4112 Combination->getEquivalentICmp(EquivPred, EquivInt, EquivOffset);
4116 Builder.CreateAdd(Op0, ConstantInt::get(Op1->
getType(), EquivOffset)),
4117 ConstantInt::get(Op1->
getType(), EquivInt));
4124 std::optional<ICmpInst::Predicate> NewPredicate = std::nullopt;
4129 NewPredicate = Pred;
4133 else if (
C.isAllOnes())
4141 else if (
C.isZero())
4158 if (!
C.isZero() && !
C.isAllOnes())
4169 if (
I->getIntrinsicID() == Intrinsic::scmp)
4183 switch (
II->getIntrinsicID()) {
4186 case Intrinsic::uadd_sat:
4187 case Intrinsic::usub_sat:
4192 case Intrinsic::ctpop: {
4197 case Intrinsic::scmp:
4198 case Intrinsic::ucmp:
4204 if (Cmp.isEquality())
4207 Type *Ty =
II->getType();
4209 switch (
II->getIntrinsicID()) {
4210 case Intrinsic::ctpop: {
4222 case Intrinsic::ctlz: {
4225 unsigned Num =
C.getLimitedValue();
4228 II->getArgOperand(0), ConstantInt::get(Ty, Limit));
4233 unsigned Num =
C.getLimitedValue();
4236 II->getArgOperand(0), ConstantInt::get(Ty, Limit));
4240 case Intrinsic::cttz: {
4242 if (!
II->hasOneUse())
4249 Builder.CreateAnd(
II->getArgOperand(0), Mask),
4257 Builder.CreateAnd(
II->getArgOperand(0), Mask),
4262 case Intrinsic::ssub_sat:
4269 return new ICmpInst(Pred,
II->getArgOperand(0),
II->getArgOperand(1));
4273 II->getArgOperand(1));
4277 II->getArgOperand(1));
4280 case Intrinsic::abs: {
4281 if (!
II->hasOneUse())
4285 bool IsIntMinPoison =
4292 Builder.CreateAdd(
X, ConstantInt::get(Ty,
C)),
4293 ConstantInt::get(Ty, 2 *
C));
4300 Builder.CreateAdd(
X, ConstantInt::get(Ty,
C - 1)),
4301 ConstantInt::get(Ty, 2 * (
C - 1)));
4315 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
4322 case Instruction::IntToPtr:
4331 case Instruction::Load:
4348 auto SimplifyOp = [&](
Value *
Op,
bool SelectCondIsTrue) ->
Value * {
4352 SI->getCondition(), Pred,
Op, RHS,
DL, SelectCondIsTrue))
4353 return ConstantInt::get(
I.getType(), *Impl);
4358 Value *Op1 = SimplifyOp(
SI->getOperand(1),
true);
4362 Value *Op2 = SimplifyOp(
SI->getOperand(2),
false);
4366 auto Simplifies = [&](
Value *
Op,
unsigned Idx) {
4381 bool Transform =
false;
4384 else if (Simplifies(Op1, 1) || Simplifies(Op2, 2)) {
4386 if (
SI->hasOneUse())
4389 else if (CI && !CI->
isZero())
4397 Op1 =
Builder.CreateICmp(Pred,
SI->getOperand(1), RHS,
I.getName());
4399 Op2 =
Builder.CreateICmp(Pred,
SI->getOperand(2), RHS,
I.getName());
4409 unsigned Depth = 0) {
4412 if (V->getType()->getScalarSizeInBits() == 1)
4420 switch (
I->getOpcode()) {
4421 case Instruction::ZExt:
4424 case Instruction::SExt:
4428 case Instruction::And:
4429 case Instruction::Or:
4436 case Instruction::Xor:
4446 case Instruction::Select:
4450 case Instruction::Shl:
4453 case Instruction::LShr:
4456 case Instruction::AShr:
4460 case Instruction::Add:
4466 case Instruction::Sub:
4472 case Instruction::Call: {
4474 switch (
II->getIntrinsicID()) {
4477 case Intrinsic::umax:
4478 case Intrinsic::smax:
4479 case Intrinsic::umin:
4480 case Intrinsic::smin:
4485 case Intrinsic::bitreverse:
4575 auto IsLowBitMask = [&]() {
4593 auto Check = [&]() {
4611 auto Check = [&]() {
4630 if (!IsLowBitMask())
4649 const APInt *C0, *C1;
4666 const APInt &MaskedBits = *C0;
4667 assert(MaskedBits != 0 &&
"shift by zero should be folded away already.");
4688 auto *XType =
X->getType();
4689 const unsigned XBitWidth = XType->getScalarSizeInBits();
4691 assert(
BitWidth.ugt(MaskedBits) &&
"shifts should leave some bits untouched");
4704 Value *T0 = Builder.CreateAdd(
X, ConstantInt::get(XType, AddCst));
4706 Value *
T1 = Builder.CreateICmp(DstPred, T0, ConstantInt::get(XType, ICmpCst));
4722 !
I.getOperand(0)->hasOneUse())
4747 assert(NarrowestTy ==
I.getOperand(0)->getType() &&
4748 "We did not look past any shifts while matching XShift though.");
4749 bool HadTrunc = WidestTy !=
I.getOperand(0)->getType();
4756 auto XShiftOpcode = XShift->
getOpcode();
4757 if (XShiftOpcode == YShift->
getOpcode())
4760 Value *
X, *XShAmt, *
Y, *YShAmt;
4769 if (!
match(
I.getOperand(0),
4795 unsigned MaximalPossibleTotalShiftAmount =
4798 APInt MaximalRepresentableShiftAmount =
4800 if (MaximalRepresentableShiftAmount.
ult(MaximalPossibleTotalShiftAmount))
4809 if (NewShAmt->getType() != WidestTy) {
4819 if (!
match(NewShAmt,
4821 APInt(WidestBitWidth, WidestBitWidth))))
4826 auto CanFold = [NewShAmt, WidestBitWidth, NarrowestShift, SQ,
4832 ? NewShAmt->getSplatValue()
4835 if (NewShAmtSplat &&
4845 unsigned MaxActiveBits = Known.
getBitWidth() - MinLeadZero;
4846 if (MaxActiveBits <= 1)
4856 unsigned MaxActiveBits = Known.
getBitWidth() - MinLeadZero;
4857 if (MaxActiveBits <= 1)
4860 if (NewShAmtSplat) {
4863 if (AdjNewShAmt.
ule(MinLeadZero))
4874 X = Builder.CreateZExt(
X, WidestTy);
4875 Y = Builder.CreateZExt(
Y, WidestTy);
4877 Value *T0 = XShiftOpcode == Instruction::BinaryOps::LShr
4878 ? Builder.CreateLShr(
X, NewShAmt)
4879 : Builder.CreateShl(
X, NewShAmt);
4880 Value *
T1 = Builder.CreateAnd(T0,
Y);
4881 return Builder.CreateICmp(
I.getPredicate(),
T1,
4899 if (!
I.isEquality() &&
4909 NeedNegation =
false;
4912 NeedNegation =
true;
4918 if (
I.isEquality() &&
4933 bool MulHadOtherUses =
Mul && !
Mul->hasOneUse();
4934 if (MulHadOtherUses)
4938 Div->
getOpcode() == Instruction::UDiv ? Intrinsic::umul_with_overflow
4939 : Intrinsic::smul_with_overflow,
4940 X->getType(), {X, Y},
nullptr,
"mul");
4945 if (MulHadOtherUses)
4950 Res =
Builder.CreateNot(Res,
"mul.not.ov");
4954 if (MulHadOtherUses)
4980 Type *Ty =
X->getType();
4984 Value *
And = Builder.CreateAnd(
X, MaxSignedVal);
4994 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1), *
A;
5056 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1), *
A;
5091 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1), *
A;
5107 return new ICmpInst(PredOut, Op0, Op1);
5127 return new ICmpInst(NewPred, Op0, Const);
5139 if (!
C.isPowerOf2())
5152 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
5220 return new ICmpInst(NewPred, Op1, Zero);
5229 return new ICmpInst(NewPred, Op0, Zero);
5233 bool NoOp0WrapProblem =
false, NoOp1WrapProblem =
false;
5234 bool Op0HasNUW =
false, Op1HasNUW =
false;
5235 bool Op0HasNSW =
false, Op1HasNSW =
false;
5239 bool &HasNSW,
bool &HasNUW) ->
bool {
5246 }
else if (BO.
getOpcode() == Instruction::Or) {
5254 Value *
A =
nullptr, *
B =
nullptr, *
C =
nullptr, *
D =
nullptr;
5258 NoOp0WrapProblem = hasNoWrapProblem(*BO0, Pred, Op0HasNSW, Op0HasNUW);
5262 NoOp1WrapProblem = hasNoWrapProblem(*BO1, Pred, Op1HasNSW, Op1HasNUW);
5267 if ((
A == Op1 ||
B == Op1) && NoOp0WrapProblem)
5273 if ((
C == Op0 ||
D == Op0) && NoOp1WrapProblem)
5278 if (
A &&
C && (
A ==
C ||
A ==
D ||
B ==
C ||
B ==
D) && NoOp0WrapProblem &&
5286 }
else if (
A ==
D) {
5290 }
else if (
B ==
C) {
5307 bool IsNegative) ->
bool {
5308 const APInt *OffsetC;
5320 if (!
C.isStrictlyPositive())
5341 if (
A && NoOp0WrapProblem &&
5342 ShareCommonDivisor(
A, Op1,
B,
5353 if (
C && NoOp1WrapProblem &&
5354 ShareCommonDivisor(Op0,
C,
D,
5367 if (
A &&
C && NoOp0WrapProblem && NoOp1WrapProblem &&
5369 const APInt *AP1, *AP2;
5377 if (AP1Abs.
uge(AP2Abs)) {
5378 APInt Diff = *AP1 - *AP2;
5381 A, C3,
"", Op0HasNUW && Diff.
ule(*AP1), Op0HasNSW);
5384 APInt Diff = *AP2 - *AP1;
5387 C, C3,
"", Op1HasNUW && Diff.
ule(*AP2), Op1HasNSW);
5406 if (BO0 && BO0->
getOpcode() == Instruction::Sub) {
5410 if (BO1 && BO1->
getOpcode() == Instruction::Sub) {
5416 if (
A == Op1 && NoOp0WrapProblem)
5419 if (
C == Op0 && NoOp1WrapProblem)
5439 if (
B &&
D &&
B ==
D && NoOp0WrapProblem && NoOp1WrapProblem)
5443 if (
A &&
C &&
A ==
C && NoOp0WrapProblem && NoOp1WrapProblem)
5451 if (RHSC->isNotMinSignedValue())
5452 return new ICmpInst(
I.getSwappedPredicate(),
X,
5470 if (Op0HasNSW && Op1HasNSW) {
5477 SQ.getWithInstruction(&
I));
5482 SQ.getWithInstruction(&
I));
5483 if (GreaterThan &&
match(GreaterThan,
m_One()))
5490 if (((Op0HasNSW && Op1HasNSW) || (Op0HasNUW && Op1HasNUW)) &&
5502 if (NonZero && BO0 && BO1 && Op0HasNSW && Op1HasNSW)
5509 if (NonZero && BO0 && BO1 && Op0HasNUW && Op1HasNUW)
5520 else if (BO1 && BO1->
getOpcode() == Instruction::SRem &&
5550 case Instruction::Add:
5551 case Instruction::Sub:
5552 case Instruction::Xor: {
5559 if (
C->isSignMask()) {
5565 if (BO0->
getOpcode() == Instruction::Xor &&
C->isMaxSignedValue()) {
5567 NewPred =
I.getSwappedPredicate(NewPred);
5573 case Instruction::Mul: {
5574 if (!
I.isEquality())
5582 if (
unsigned TZs =
C->countr_zero()) {
5588 return new ICmpInst(Pred, And1, And2);
5593 case Instruction::UDiv:
5594 case Instruction::LShr:
5599 case Instruction::SDiv:
5605 case Instruction::AShr:
5610 case Instruction::Shl: {
5611 bool NUW = Op0HasNUW && Op1HasNUW;
5612 bool NSW = Op0HasNSW && Op1HasNSW;
5615 if (!NSW &&
I.isSigned())
5679 auto IsCondKnownTrue = [](
Value *Val) -> std::optional<bool> {
5681 return std::nullopt;
5686 return std::nullopt;
5692 Pred = Pred.dropSameSign();
5695 if (!CmpXZ.has_value() && !CmpYZ.has_value())
5697 if (!CmpXZ.has_value()) {
5703 if (CmpYZ.has_value())
5727 if (!MinMaxCmpXZ.has_value()) {
5735 if (!MinMaxCmpXZ.has_value())
5751 return FoldIntoCmpYZ();
5778 return FoldIntoCmpYZ();
5787 return FoldIntoCmpYZ();
5819 const APInt *
Lo =
nullptr, *
Hi =
nullptr;
5842 I,
Builder.CreateICmp(Pred,
X, ConstantInt::get(
X->getType(),
C)));
5848 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
5852 if (
I.isEquality()) {
5887 Type *Ty =
A->getType();
5888 CallInst *CtPop = Builder.CreateUnaryIntrinsic(Intrinsic::ctpop,
A);
5890 ConstantInt::get(Ty, 2))
5892 ConstantInt::get(Ty, 1));
5899using OffsetOp = std::pair<Instruction::BinaryOps, Value *>;
5901 bool AllowRecursion) {
5907 case Instruction::Add:
5908 Offsets.emplace_back(Instruction::Sub, Inst->
getOperand(1));
5909 Offsets.emplace_back(Instruction::Sub, Inst->
getOperand(0));
5911 case Instruction::Sub:
5912 Offsets.emplace_back(Instruction::Add, Inst->
getOperand(1));
5914 case Instruction::Xor:
5915 Offsets.emplace_back(Instruction::Xor, Inst->
getOperand(1));
5916 Offsets.emplace_back(Instruction::Xor, Inst->
getOperand(0));
5918 case Instruction::Shl:
5920 Offsets.emplace_back(Instruction::AShr, Inst->
getOperand(1));
5922 Offsets.emplace_back(Instruction::LShr, Inst->
getOperand(1));
5924 case Instruction::Select:
5925 if (AllowRecursion) {
5960 return Builder.CreateSelect(
5973 assert(
I.isEquality() &&
"Expected an equality icmp");
5974 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
5985 case Instruction::AShr: {
5986 const APInt *CV, *CRHS;
5988 CV->
ashr(*CRHS).
shl(*CRHS) == *CV) &&
5994 case Instruction::LShr: {
5995 const APInt *CV, *CRHS;
5997 CV->
lshr(*CRHS).
shl(*CRHS) == *CV) &&
6016 auto ApplyOffset = [&](
Value *V,
unsigned BinOpc,
6019 if (!Sel->hasOneUse())
6021 Value *TrueVal = ApplyOffsetImpl(Sel->getTrueValue(), BinOpc,
RHS);
6024 Value *FalseVal = ApplyOffsetImpl(Sel->getFalseValue(), BinOpc,
RHS);
6029 if (
Value *Simplified = ApplyOffsetImpl(V, BinOpc,
RHS))
6034 for (
auto [BinOp,
RHS] : OffsetOps) {
6035 auto BinOpc =
static_cast<unsigned>(BinOp);
6037 auto Op0Result = ApplyOffset(Op0, BinOpc,
RHS);
6038 if (!Op0Result.isValid())
6040 auto Op1Result = ApplyOffset(Op1, BinOpc,
RHS);
6041 if (!Op1Result.isValid())
6044 Value *NewLHS = Op0Result.materialize(Builder);
6045 Value *NewRHS = Op1Result.materialize(Builder);
6046 return new ICmpInst(
I.getPredicate(), NewLHS, NewRHS);
6053 if (!
I.isEquality())
6056 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
6060 if (
A == Op1 ||
B == Op1) {
6061 Value *OtherVal =
A == Op1 ?
B :
A;
6089 Value *OtherVal =
A == Op0 ?
B :
A;
6096 Value *
X =
nullptr, *
Y =
nullptr, *Z =
nullptr;
6102 }
else if (
A ==
D) {
6106 }
else if (
B ==
C) {
6110 }
else if (
B ==
D) {
6120 const APInt *C0, *C1;
6122 (*C0 ^ *C1).isNegatedPowerOf2();
6128 int(Op0->
hasOneUse()) + int(Op1->hasOneUse()) +
6130 if (XorIsNegP2 || UseCnt >= 2) {
6133 Op1 =
Builder.CreateAnd(Op1, Z);
6153 (Op0->
hasOneUse() || Op1->hasOneUse())) {
6158 MaskC->
countr_one() ==
A->getType()->getScalarSizeInBits())
6164 const APInt *AP1, *AP2;
6173 if (ShAmt < TypeBits && ShAmt != 0) {
6178 return new ICmpInst(NewPred,
Xor, ConstantInt::get(
A->getType(), CmpVal));
6188 if (ShAmt < TypeBits && ShAmt != 0) {
6208 if (ShAmt < ASize) {
6231 A->getType()->getScalarSizeInBits() ==
BitWidth * 2 &&
6232 (
I.getOperand(0)->hasOneUse() ||
I.getOperand(1)->hasOneUse())) {
6237 Add, ConstantInt::get(
A->getType(),
C.shl(1)));
6264 Builder.CreateIntrinsic(Op0->
getType(), Intrinsic::fshl, {A, A, B}));
6279 std::optional<bool> IsZero = std::nullopt;
6321 Constant *
C = ConstantInt::get(Res->X->getType(), Res->C);
6325 unsigned SrcBits =
X->getType()->getScalarSizeInBits();
6327 if (
II->getIntrinsicID() == Intrinsic::cttz ||
6328 II->getIntrinsicID() == Intrinsic::ctlz) {
6329 unsigned MaxRet = SrcBits;
6355 bool IsSignedExt = CastOp0->getOpcode() == Instruction::SExt;
6356 bool IsSignedCmp = ICmp.
isSigned();
6364 if (IsZext0 != IsZext1) {
6369 if (ICmp.
isEquality() &&
X->getType()->isIntOrIntVectorTy(1) &&
6370 Y->getType()->isIntOrIntVectorTy(1))
6380 bool IsNonNeg0 = NonNegInst0 && NonNegInst0->hasNonNeg();
6381 bool IsNonNeg1 = NonNegInst1 && NonNegInst1->hasNonNeg();
6383 if ((IsZext0 && IsNonNeg0) || (IsZext1 && IsNonNeg1))
6390 Type *XTy =
X->getType(), *YTy =
Y->getType();
6397 IsSignedExt ? Instruction::SExt : Instruction::ZExt;
6399 X =
Builder.CreateCast(CastOpcode,
X, YTy);
6401 Y =
Builder.CreateCast(CastOpcode,
Y, XTy);
6413 if (IsSignedCmp && IsSignedExt)
6426 Type *SrcTy = CastOp0->getSrcTy();
6434 if (IsSignedExt && IsSignedCmp)
6465 Value *SimplifiedOp0 = simplifyIntToPtrRoundTripCast(ICmp.
getOperand(0));
6466 Value *SimplifiedOp1 = simplifyIntToPtrRoundTripCast(ICmp.
getOperand(1));
6467 if (SimplifiedOp0 || SimplifiedOp1)
6469 SimplifiedOp0 ? SimplifiedOp0 : ICmp.
getOperand(0),
6470 SimplifiedOp1 ? SimplifiedOp1 : ICmp.
getOperand(1));
6478 Value *Op0Src = CastOp0->getOperand(0);
6479 Type *SrcTy = CastOp0->getSrcTy();
6480 Type *DestTy = CastOp0->getDestTy();
6484 auto CompatibleSizes = [&](
Type *PtrTy,
Type *IntTy) {
6489 return DL.getPointerTypeSizeInBits(PtrTy) == IntTy->getIntegerBitWidth();
6491 if (CastOp0->getOpcode() == Instruction::PtrToInt &&
6492 CompatibleSizes(SrcTy, DestTy)) {
6493 Value *NewOp1 =
nullptr;
6495 Value *PtrSrc = PtrToIntOp1->getOperand(0);
6497 NewOp1 = PtrToIntOp1->getOperand(0);
6507 if (CastOp0->getOpcode() == Instruction::IntToPtr &&
6508 CompatibleSizes(DestTy, SrcTy)) {
6509 Value *NewOp1 =
nullptr;
6511 Value *IntSrc = IntToPtrOp1->getOperand(0);
6513 NewOp1 = IntToPtrOp1->getOperand(0);
6533 case Instruction::Add:
6534 case Instruction::Sub:
6536 case Instruction::Mul:
6537 return !(
RHS->getType()->isIntOrIntVectorTy(1) && IsSigned) &&
6549 case Instruction::Add:
6554 case Instruction::Sub:
6559 case Instruction::Mul:
6568 bool IsSigned,
Value *LHS,
6579 Builder.SetInsertPoint(&OrigI);
6596 Result = Builder.CreateBinOp(BinaryOp,
LHS,
RHS);
6597 Result->takeName(&OrigI);
6601 Result = Builder.CreateBinOp(BinaryOp,
LHS,
RHS);
6602 Result->takeName(&OrigI);
6606 Inst->setHasNoSignedWrap();
6608 Inst->setHasNoUnsignedWrap();
6631 const APInt *OtherVal,
6641 assert(MulInstr->getOpcode() == Instruction::Mul);
6645 assert(
LHS->getOpcode() == Instruction::ZExt);
6646 assert(
RHS->getOpcode() == Instruction::ZExt);
6650 Type *TyA =
A->getType(), *TyB =
B->getType();
6652 WidthB = TyB->getPrimitiveSizeInBits();
6655 if (WidthB > WidthA) {
6672 unsigned TruncWidth = TI->getType()->getPrimitiveSizeInBits();
6673 if (TruncWidth > MulWidth)
6677 if (BO->getOpcode() != Instruction::And)
6680 const APInt &CVal = CI->getValue();
6696 switch (
I.getPredicate()) {
6703 if (MaxVal.
eq(*OtherVal))
6713 if (MaxVal.
eq(*OtherVal))
6727 if (WidthA < MulWidth)
6728 MulA = Builder.CreateZExt(
A, MulType);
6729 if (WidthB < MulWidth)
6730 MulB = Builder.CreateZExt(
B, MulType);
6732 Builder.CreateIntrinsic(Intrinsic::umul_with_overflow, MulType,
6733 {MulA, MulB},
nullptr,
"umul");
6740 Value *
Mul = Builder.CreateExtractValue(
Call, 0,
"umul.value");
6745 if (TI->getType()->getPrimitiveSizeInBits() == MulWidth)
6750 assert(BO->getOpcode() == Instruction::And);
6754 Value *ShortAnd = Builder.CreateAnd(
Mul, ShortMask);
6755 Value *Zext = Builder.CreateZExt(ShortAnd, BO->
getType());
6767 Value *Res = Builder.CreateExtractValue(
Call, 1);
6788 switch (
I.getPredicate()) {
6819 assert(DI && UI &&
"Instruction not defined\n");
6831 if (Usr != UI && !
DT.dominates(DB, Usr->getParent()))
6846 if (!IC || (IC->getOperand(0) !=
SI && IC->getOperand(1) !=
SI))
6893 const unsigned SIOpd) {
6894 assert((SIOpd == 1 || SIOpd == 2) &&
"Invalid select operand!");
6896 BasicBlock *Succ =
SI->getParent()->getTerminator()->getSuccessor(1);
6910 SI->replaceUsesOutsideBlock(
SI->getOperand(SIOpd),
SI->getParent());
6920 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
6925 unsigned BitWidth = Ty->isIntOrIntVectorTy()
6926 ? Ty->getScalarSizeInBits()
6927 :
DL.getPointerTypeSizeInBits(Ty->getScalarType());
6980 if (!Cmp.hasOneUse())
6989 if (!isMinMaxCmp(
I)) {
6994 if (Op1Min == Op0Max)
6999 if (*CmpC == Op0Min + 1)
7001 ConstantInt::get(Op1->getType(), *CmpC - 1));
7011 if (Op1Max == Op0Min)
7016 if (*CmpC == Op0Max - 1)
7018 ConstantInt::get(Op1->getType(), *CmpC + 1));
7028 if (Op1Min == Op0Max)
7032 if (*CmpC == Op0Min + 1)
7034 ConstantInt::get(Op1->getType(), *CmpC - 1));
7039 if (Op1Max == Op0Min)
7043 if (*CmpC == Op0Max - 1)
7045 ConstantInt::get(Op1->getType(), *CmpC + 1));
7062 APInt Op0KnownZeroInverted = ~Op0Known.Zero;
7065 Value *LHS =
nullptr;
7068 *LHSC != Op0KnownZeroInverted)
7074 Type *XTy =
X->getType();
7076 APInt C2 = Op0KnownZeroInverted;
7077 APInt C2Pow2 = (C2 & ~(*C1 - 1)) + *C1;
7083 auto *CmpC = ConstantInt::get(XTy, Log2C2 - Log2C1);
7093 (Op0Known & Op1Known) == Op0Known)
7099 if (Op1Min == Op0Max)
7103 if (Op1Max == Op0Min)
7107 if (Op1Min == Op0Max)
7111 if (Op1Max == Op0Min)
7119 if ((
I.isSigned() || (
I.isUnsigned() && !
I.hasSameSign())) &&
7122 I.setPredicate(
I.getUnsignedPredicate());
7140 return BinaryOperator::CreateAnd(
Builder.CreateIsNull(
X),
Y);
7146 return BinaryOperator::CreateOr(
Builder.CreateIsNull(
X),
Y);
7157 bool IsSExt = ExtI->
getOpcode() == Instruction::SExt;
7159 auto CreateRangeCheck = [&] {
7174 }
else if (!IsSExt || HasOneUse) {
7179 return CreateRangeCheck();
7181 }
else if (IsSExt ?
C->isAllOnes() :
C->isOne()) {
7189 }
else if (!IsSExt || HasOneUse) {
7194 return CreateRangeCheck();
7208 Instruction::ICmp, Pred1,
X,
7227 Value *Op0 =
I.getOperand(0);
7228 Value *Op1 =
I.getOperand(1);
7234 if (!FlippedStrictness)
7237 return new ICmpInst(FlippedStrictness->first, Op0, FlippedStrictness->second);
7255 I.setName(
I.getName() +
".not");
7266 Value *
A =
I.getOperand(0), *
B =
I.getOperand(1);
7267 assert(
A->getType()->isIntOrIntVectorTy(1) &&
"Bools only");
7273 switch (
I.getPredicate()) {
7282 switch (
I.getPredicate()) {
7292 switch (
I.getPredicate()) {
7301 return BinaryOperator::CreateXor(
A,
B);
7309 return BinaryOperator::CreateAnd(Builder.CreateNot(
A),
B);
7317 return BinaryOperator::CreateAnd(Builder.CreateNot(
B),
A);
7325 return BinaryOperator::CreateOr(Builder.CreateNot(
A),
B);
7333 return BinaryOperator::CreateOr(Builder.CreateNot(
B),
A);
7381 Value *NewX = Builder.CreateLShr(
X,
Y,
X->getName() +
".highbits");
7389 Value *
LHS = Cmp.getOperand(0), *
RHS = Cmp.getOperand(1);
7393 Value *V = Builder.CreateCmp(Pred,
X,
Y, Cmp.getName());
7395 I->copyIRFlags(&Cmp);
7396 Module *M = Cmp.getModule();
7398 M, Intrinsic::vector_reverse, V->getType());
7405 (
LHS->hasOneUse() ||
RHS->hasOneUse()))
7406 return createCmpReverse(Pred, V1, V2);
7410 return createCmpReverse(Pred, V1,
RHS);
7414 return createCmpReverse(Pred,
LHS, V2);
7425 V1Ty == V2->
getType() && (
LHS->hasOneUse() ||
RHS->hasOneUse())) {
7426 Value *NewCmp = Builder.CreateCmp(Pred, V1, V2);
7439 Constant *ScalarC =
C->getSplatValue(
true);
7447 Value *NewCmp = Builder.CreateCmp(Pred, V1,
C);
7458 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
7464 if (
match(Op0, UAddOvResultPat) &&
7475 (Op0 ==
A || Op0 ==
B))
7485 if (!
I.getOperand(0)->getType()->isPointerTy() ||
7487 I.getParent()->getParent(),
7488 I.getOperand(0)->getType()->getPointerAddressSpace())) {
7494 Op->isLaunderOrStripInvariantGroup()) {
7496 Op->getOperand(0),
I.getOperand(1));
7508 Value *Const =
I.getOperand(1);
7526 Type *VecEltTy = VecTy->getElementType();
7528 DL.getTypeSizeInBits(VecEltTy) * VecTy->getNumElements();
7529 if (!
DL.fitsInLegalInteger(ScalarBW))
7533 ? ConstantInt::get(ScalarTy, 0)
7536 Builder.CreateBitCast(Vec, ScalarTy), NewConst);
7548 if (
I.getType()->isVectorTy())
7571 if (!LHSTy || !LHSTy->getElementType()->isIntegerTy())
7574 LHSTy->getNumElements() * LHSTy->getElementType()->getIntegerBitWidth();
7576 if (!
DL.isLegalInteger(NumBits))
7580 auto *ScalarTy = Builder.getIntNTy(NumBits);
7581 LHS = Builder.CreateBitCast(
LHS, ScalarTy,
LHS->getName() +
".scalar");
7582 RHS = Builder.CreateBitCast(
RHS, ScalarTy,
RHS->getName() +
".scalar");
7638 bool IsIntMinPosion =
C->isAllOnesValue();
7650 CxtI, IsIntMinPosion
7651 ?
Builder.CreateICmpSGT(
X, AllOnesValue)
7653 X, ConstantInt::get(
X->getType(),
SMin + 1)));
7659 CxtI, IsIntMinPosion
7660 ?
Builder.CreateICmpSLT(
X, NullValue)
7662 X, ConstantInt::get(
X->getType(),
SMin)));
7675 auto CheckUGT1 = [](
const APInt &Divisor) {
return Divisor.ugt(1); };
7690 auto CheckNE0 = [](
const APInt &Shift) {
return !Shift.isZero(); };
7710 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
7717 if (Op0Cplxity < Op1Cplxity) {
7732 if (
Value *V = dyn_castNegVal(SelectTrue)) {
7733 if (V == SelectFalse)
7735 }
else if (
Value *V = dyn_castNegVal(SelectFalse)) {
7736 if (V == SelectTrue)
7796 if (
C->isNonNegative())
7800 ConstantInt::get(
X->getType(), ~*
C));
7806 if (
C->isNonNegative())
7810 ConstantInt::get(
X->getType(), ~*
C));
7866 if (
I.isCommutative()) {
7867 if (
auto Pair = matchSymmetricPair(
I.getOperand(0),
I.getOperand(1))) {
7896 (Op0->
hasOneUse() || Op1->hasOneUse())) {
7901 Cond, Res, NewICMP,
"",
nullptr,
7908 Cond, NewICMP, Res,
"",
nullptr,
7924 bool I0NUW = I0->hasNoUnsignedWrap();
7925 bool I1NUW = I1->hasNoUnsignedWrap();
7926 bool I0NSW = I0->hasNoSignedWrap();
7927 bool I1NSW = I1->hasNoSignedWrap();
7931 ((I0NUW || I0NSW) && (I1NUW || I1NSW)))) {
7933 ConstantInt::get(Op0->
getType(), 0));
7940 assert(Op1->getType()->isPointerTy() &&
7941 "Comparing pointer with non-pointer?");
7970 bool ConsumesOp0, ConsumesOp1;
7973 (ConsumesOp0 || ConsumesOp1)) {
7976 assert(InvOp0 && InvOp1 &&
7977 "Mismatch between isFreeToInvert and getFreelyInverted");
7978 return new ICmpInst(
I.getSwappedPredicate(), InvOp0, InvOp1);
7990 if (AddI->
getOpcode() == Instruction::Add &&
7991 OptimizeOverflowCheck(Instruction::Add,
false,
X,
Y, *AddI,
7992 Result, Overflow)) {
8010 if ((
I.isUnsigned() ||
I.isEquality()) &&
8013 Y->getType()->getScalarSizeInBits() == 1 &&
8014 (Op0->
hasOneUse() || Op1->hasOneUse())) {
8021 unsigned ShiftOpc = ShiftI->
getOpcode();
8022 if ((ExtOpc == Instruction::ZExt && ShiftOpc == Instruction::LShr) ||
8023 (ExtOpc == Instruction::SExt && ShiftOpc == Instruction::AShr)) {
8057 if (EVI->getIndices()[0] == 0 && ACXI->getCompareOperand() == Op1 &&
8064 if (
I.getType()->isVectorTy())
8076 const APInt *C1, *C2;
8083 Type *InputTy =
A->getType();
8090 TruncC1.
setBit(InputBitWidth - 1);
8094 ConstantInt::get(InputTy, C2->
trunc(InputBitWidth)));
8114 if (MantissaWidth == -1)
8121 if (
I.isEquality()) {
8123 bool IsExact =
false;
8124 APSInt RHSCvt(IntWidth, LHSUnsigned);
8133 if (*RHS != RHSRoundInt) {
8153 if ((
int)IntWidth > MantissaWidth) {
8155 int Exp =
ilogb(*RHS);
8158 if (MaxExponent < (
int)IntWidth - !LHSUnsigned)
8164 if (MantissaWidth <= Exp && Exp <= (
int)IntWidth - !LHSUnsigned)
8173 assert(!RHS->isNaN() &&
"NaN comparison not already folded!");
8176 switch (
I.getPredicate()) {
8267 APSInt RHSInt(IntWidth, LHSUnsigned);
8270 if (!RHS->isZero()) {
8285 if (RHS->isNegative())
8291 if (RHS->isNegative())
8297 if (RHS->isNegative())
8304 if (!RHS->isNegative())
8310 if (RHS->isNegative())
8316 if (RHS->isNegative())
8322 if (RHS->isNegative())
8329 if (!RHS->isNegative())
8348 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
8359 unsigned Pred =
I.getPredicate();
8367 if (!Res00 || !Res01 || !Res10 || !Res11)
8376 std::bitset<4> Table;
8434 if (
C->isNegative())
8435 Pred =
I.getSwappedPredicate();
8462 "X ord/uno NaN should be folded away by simplifyFCmpInst()");
8468 bool RoundDown =
false;
8489 auto NextValue = [](
const APFloat &
Value,
bool RoundDown) {
8491 NextValue.
next(RoundDown);
8495 APFloat NextCValue = NextValue(*CValue, RoundDown);
8500 APFloat ExtCValue = ConvertFltSema(*CValue, DestFltSema);
8501 APFloat ExtNextCValue = ConvertFltSema(NextCValue, DestFltSema);
8508 APFloat PrevCValue = NextValue(*CValue, !RoundDown);
8509 APFloat Bias = ConvertFltSema(*CValue - PrevCValue, DestFltSema);
8511 ExtNextCValue = ExtCValue + Bias;
8518 C.getType()->getScalarType()->getFltSemantics();
8521 APFloat MidValue = ConvertFltSema(ExtMidValue, SrcFltSema);
8522 if (MidValue != *CValue)
8523 ExtMidValue.
next(!RoundDown);
8531 if (ConvertFltSema(ExtMidValue, SrcFltSema).isInfinity())
8535 APFloat NextExtMidValue = NextValue(ExtMidValue, RoundDown);
8536 if (ConvertFltSema(NextExtMidValue, SrcFltSema).
isFinite())
8541 ConstantFP::get(DestType, ExtMidValue),
"", &
I);
8554 if (!
C->isPosZero()) {
8555 if (!
C->isSmallestNormalized())
8568 switch (
I.getPredicate()) {
8594 switch (
I.getPredicate()) {
8619 assert(!
I.hasNoNaNs() &&
"fcmp should have simplified");
8624 assert(!
I.hasNoNaNs() &&
"fcmp should have simplified");
8638 return replacePredAndOp0(&
I,
I.getPredicate(),
X);
8661 I.setHasNoInfs(
false);
8663 switch (
I.getPredicate()) {
8708 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
8713 Pred =
I.getSwappedPredicate();
8722 return new FCmpInst(Pred, Op0, Zero,
"", &
I);
8758 I.getFunction()->getDenormalMode(
8765 I.setHasNoNaNs(
true);
8790 if (MantissaWidth != -1 &&
ilogb(*
C) < MantissaWidth) {
8792 I.setPredicate(
I.getSwappedPredicate());
8829 if (!IsStrictLt && !IsStrictGt && !IsGe)
8851 }
else if (
match(FAbsArg,
8859 if (
A->getType() !=
B->getType())
8874 Type *OpType =
LHS->getType();
8880 if (!FloorX && !CeilX) {
8884 Pred =
I.getSwappedPredicate();
8960 if (!
I || !(
I->getOpcode() == Instruction::SIToFP ||
8961 I->getOpcode() == Instruction::UIToFP))
8964 bool IsUnsigned =
I->getOpcode() == Instruction::UIToFP;
8965 unsigned BitWidth =
I->getOperand(0)->getType()->getScalarSizeInBits();
8988 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
8990 SQ.getWithInstruction(&
I)))
8995 assert(OpType == Op1->getType() &&
"fcmp with different-typed operands?");
9020 if (
I.isCommutative()) {
9021 if (
auto Pair = matchSymmetricPair(
I.getOperand(0),
I.getOperand(1))) {
9043 return new FCmpInst(
I.getSwappedPredicate(),
X,
Y,
"", &
I);
9059 bool IsRedundantMinMaxClamp =
9121 !
F.getDenormalMode(Op1->getType()->getScalarType()->getFltSemantics())
9122 .inputsMayBeZero()) {
9130 Type *IntTy =
X->getType();
9131 const APInt &SignMask =
~APInt::getSignMask(IntTy->getScalarSizeInBits());
9132 Value *MaskX =
Builder.CreateAnd(
X, ConstantInt::get(IntTy, SignMask));
9142 case Instruction::Select:
9150 case Instruction::FSub:
9155 case Instruction::PHI:
9159 case Instruction::SIToFP:
9160 case Instruction::UIToFP:
9164 case Instruction::FDiv:
9168 case Instruction::Load:
9174 case Instruction::FPTrunc:
9201 return new FCmpInst(
I.getSwappedPredicate(),
X, NegC,
"", &
I);
9215 X->getType() ==
Y->getType())
9226 X->getType()->getScalarType()->getFltSemantics();
9262 Constant *NewC = ConstantFP::get(
X->getType(), TruncC);
9275 Type *IntType =
Builder.getIntNTy(
X->getType()->getScalarSizeInBits());
9288 Value *CanonLHS =
nullptr;
9291 if (CanonLHS == Op1)
9292 return new FCmpInst(Pred, Op1, Op1,
"", &
I);
9294 Value *CanonRHS =
nullptr;
9297 if (CanonRHS == Op0)
9298 return new FCmpInst(Pred, Op0, Op0,
"", &
I);
9301 if (CanonLHS && CanonRHS)
9302 return new FCmpInst(Pred, CanonLHS, CanonRHS,
"", &
I);
9305 if (
I.getType()->isVectorTy())
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
AMDGPU Register Bank Select
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements the APSInt class, which is a simple class that represents an arbitrary sized int...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static Instruction * foldFCmpReciprocalAndZero(FCmpInst &I, Instruction *LHSI, Constant *RHSC)
Fold (C / X) < 0.0 --> X < 0.0 if possible. Swap predicate if necessary.
static Instruction * foldFabsWithFcmpZero(FCmpInst &I, InstCombinerImpl &IC)
Optimize fabs(X) compared with zero.
static void collectOffsetOp(Value *V, SmallVectorImpl< OffsetOp > &Offsets, bool AllowRecursion)
static Value * rewriteGEPAsOffset(Value *Start, Value *Base, GEPNoWrapFlags NW, const DataLayout &DL, SetVector< Value * > &Explored, InstCombiner &IC)
Returns a re-written value of Start as an indexed GEP using Base as a pointer.
static bool isMinMaxCmpSelectEliminable(SelectPatternFlavor Flavor, Value *A, Value *B)
Returns true if a select that implements a min/max is redundant and select result can be replaced wit...
static Instruction * foldICmpEqualityWithOffset(ICmpInst &I, InstCombiner::BuilderTy &Builder, const SimplifyQuery &SQ)
Offset both sides of an equality icmp to see if we can save some instructions: icmp eq/ne X,...
static bool addWithOverflow(APInt &Result, const APInt &In1, const APInt &In2, bool IsSigned=false)
Compute Result = In1+In2, returning true if the result overflowed for this type.
static Instruction * foldICmpOfVectorReduce(ICmpInst &I, const DataLayout &DL, IRBuilderBase &Builder)
static Instruction * foldICmpAndXX(ICmpInst &I, const SimplifyQuery &Q, InstCombinerImpl &IC)
static Instruction * foldVectorCmp(CmpInst &Cmp, InstCombiner::BuilderTy &Builder)
static bool isMaskOrZero(const Value *V, bool Not, const SimplifyQuery &Q, unsigned Depth=0)
static Value * createLogicFromTable(const std::bitset< 4 > &Table, Value *Op0, Value *Op1, IRBuilderBase &Builder, bool HasOneUse)
static Instruction * foldICmpOfUAddOv(ICmpInst &I)
static bool isChainSelectCmpBranch(const SelectInst *SI)
Return true when the instruction sequence within a block is select-cmp-br.
static Instruction * foldICmpInvariantGroup(ICmpInst &I)
std::pair< Instruction::BinaryOps, Value * > OffsetOp
Find all possible pairs (BinOp, RHS) that BinOp V, RHS can be simplified.
static Instruction * foldReductionIdiom(ICmpInst &I, InstCombiner::BuilderTy &Builder, const DataLayout &DL)
This function folds patterns produced by lowering of reduce idioms, such as llvm.vector....
static Instruction * canonicalizeICmpBool(ICmpInst &I, InstCombiner::BuilderTy &Builder)
Integer compare with boolean values can always be turned into bitwise ops.
static Instruction * foldFCmpFSubIntoFCmp(FCmpInst &I, Instruction *LHSI, Constant *RHSC, InstCombinerImpl &CI)
static Value * foldICmpOrXorSubChain(ICmpInst &Cmp, BinaryOperator *Or, InstCombiner::BuilderTy &Builder)
Fold icmp eq/ne (or (xor/sub (X1, X2), xor/sub (X3, X4))), 0.
static bool hasBranchUse(ICmpInst &I)
Given an icmp instruction, return true if any use of this comparison is a branch on sign bit comparis...
static Value * foldICmpWithLowBitMaskedVal(CmpPredicate Pred, Value *Op0, Value *Op1, const SimplifyQuery &Q, InstCombiner &IC)
Some comparisons can be simplified.
static APInt getDemandedBitsLHSMask(ICmpInst &I, unsigned BitWidth)
When performing a comparison against a constant, it is possible that not all the bits in the LHS are ...
static Instruction * foldICmpShlLHSC(ICmpInst &Cmp, Instruction *Shl, const APInt &C)
Fold icmp (shl nuw C2, Y), C.
static Instruction * foldFCmpWithFloorAndCeil(FCmpInst &I, InstCombinerImpl &IC)
static Instruction * foldICmpXorXX(ICmpInst &I, const SimplifyQuery &Q, InstCombinerImpl &IC)
static Instruction * foldICmpOfCmpIntrinsicWithConstant(CmpPredicate Pred, IntrinsicInst *I, const APInt &C, InstCombiner::BuilderTy &Builder)
static Instruction * processUMulZExtIdiom(ICmpInst &I, Value *MulVal, const APInt *OtherVal, InstCombinerImpl &IC)
Recognize and process idiom involving test for multiplication overflow.
static Instruction * foldSqrtWithFcmpZero(FCmpInst &I, InstCombinerImpl &IC)
Optimize sqrt(X) compared with zero.
static Instruction * foldFCmpFNegCommonOp(FCmpInst &I)
static Instruction * foldICmpWithHighBitMask(ICmpInst &Cmp, InstCombiner::BuilderTy &Builder)
static ICmpInst * canonicalizeCmpWithConstant(ICmpInst &I)
If we have an icmp le or icmp ge instruction with a constant operand, turn it into the appropriate ic...
static Instruction * foldICmpIntrinsicWithIntrinsic(ICmpInst &Cmp, InstCombiner::BuilderTy &Builder)
Fold an icmp with LLVM intrinsics.
static Instruction * foldICmpUSubSatOrUAddSatWithConstant(CmpPredicate Pred, SaturatingInst *II, const APInt &C, InstCombiner::BuilderTy &Builder)
static Instruction * foldICmpPow2Test(ICmpInst &I, InstCombiner::BuilderTy &Builder)
static bool subWithOverflow(APInt &Result, const APInt &In1, const APInt &In2, bool IsSigned=false)
Compute Result = In1-In2, returning true if the result overflowed for this type.
static bool canRewriteGEPAsOffset(Value *Start, Value *Base, GEPNoWrapFlags &NW, const DataLayout &DL, SetVector< Value * > &Explored)
Returns true if we can rewrite Start as a GEP with pointer Base and some integer offset.
static Instruction * foldFCmpFpTrunc(FCmpInst &I, const Instruction &FPTrunc, const Constant &C)
static Instruction * foldICmpXNegX(ICmpInst &I, InstCombiner::BuilderTy &Builder)
static Instruction * processUGT_ADDCST_ADD(ICmpInst &I, Value *A, Value *B, ConstantInt *CI2, ConstantInt *CI1, InstCombinerImpl &IC)
The caller has matched a pattern of the form: I = icmp ugt (add (add A, B), CI2), CI1 If this is of t...
static Value * foldShiftIntoShiftInAnotherHandOfAndInICmp(ICmpInst &I, const SimplifyQuery SQ, InstCombiner::BuilderTy &Builder)
static bool isSignTest(ICmpInst::Predicate &Pred, const APInt &C)
Returns true if the exploded icmp can be expressed as a signed comparison to zero and updates the pre...
static Instruction * transformToIndexedCompare(GEPOperator *GEPLHS, Value *RHS, CmpPredicate Cond, const DataLayout &DL, InstCombiner &IC)
Converts (CMP GEPLHS, RHS) if this change would make RHS a constant.
static Instruction * foldCtpopPow2Test(ICmpInst &I, IntrinsicInst *CtpopLhs, const APInt &CRhs, InstCombiner::BuilderTy &Builder, const SimplifyQuery &Q)
static Instruction * foldFCmpFAbsFSubIntToFP(FCmpInst &I, InstCombinerImpl &IC)
Fold: fabs(uitofp(a) - uitofp(b)) pred C --> a == b where 'pred' is olt, ult, ogt,...
static void setInsertionPoint(IRBuilder<> &Builder, Value *V, bool Before=true)
static bool isNeutralValue(Instruction::BinaryOps BinaryOp, Value *RHS, bool IsSigned)
static bool isMultipleOf(Value *X, const APInt &C, const SimplifyQuery &Q)
Return true if X is a multiple of C.
static Value * foldICmpWithTruncSignExtendedVal(ICmpInst &I, InstCombiner::BuilderTy &Builder)
Some comparisons can be simplified.
static Instruction * foldICmpOrXX(ICmpInst &I, const SimplifyQuery &Q, InstCombinerImpl &IC)
This file provides internal interfaces used to implement the InstCombine.
This file provides the interface for the instcombine pass implementation.
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
static bool isZero(Value *V, const DataLayout &DL, DominatorTree *DT, AssumptionCache *AC)
uint64_t IntrinsicInst * II
const SmallVectorImpl< MachineOperand > & Cond
static cl::opt< RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode > Mode("regalloc-enable-advisor", cl::Hidden, cl::init(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default), cl::desc("Enable regalloc advisor mode"), cl::values(clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default, "default", "Default"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Release, "release", "precompiled"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Development, "development", "for training")))
This file implements a set that has insertion order iteration characteristics.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static SymbolRef::Type getType(const Symbol *Sym)
cmpResult
IEEE-754R 5.11: Floating Point Comparison Relations.
static constexpr roundingMode rmTowardZero
static constexpr roundingMode rmNearestTiesToEven
opStatus
IEEE-754R 7: Default exception handling.
LLVM_ABI opStatus convert(const fltSemantics &ToSemantics, roundingMode RM, bool *losesInfo)
static APFloat getOne(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative One.
static APFloat getSmallestNormalized(const fltSemantics &Sem, bool Negative=false)
Returns the smallest (by magnitude) normalized finite number in the given semantics.
APInt bitcastToAPInt() const
static APFloat getLargest(const fltSemantics &Sem, bool Negative=false)
Returns the largest finite number in the given semantics.
opStatus convertToInteger(MutableArrayRef< integerPart > Input, unsigned int Width, bool IsSigned, roundingMode RM, bool *IsExact) const
opStatus next(bool nextDown)
static APFloat getInf(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Infinity.
LLVM_ABI FPClassTest classify() const
Return the FPClassTest which will return true for the value.
opStatus roundToIntegral(roundingMode RM)
Class for arbitrary precision integers.
LLVM_ABI APInt udiv(const APInt &RHS) const
Unsigned division operation.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
static LLVM_ABI void udivrem(const APInt &LHS, const APInt &RHS, APInt &Quotient, APInt &Remainder)
Dual division/remainder interface.
bool isNegatedPowerOf2() const
Check if this APInt's negated value is a power of two greater than zero.
LLVM_ABI APInt zext(unsigned width) const
Zero extend to a new width.
static APInt getSignMask(unsigned BitWidth)
Get the SignMask for a specific bit width.
bool isMinSignedValue() const
Determine if this is the smallest signed value.
uint64_t getZExtValue() const
Get zero extended value.
unsigned getActiveBits() const
Compute the number of active bits in the value.
LLVM_ABI APInt trunc(unsigned width) const
Truncate to new width.
static APInt getMaxValue(unsigned numBits)
Gets maximum unsigned value of APInt for specific bit width.
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
APInt abs() const
Get the absolute value.
unsigned ceilLogBase2() const
bool sgt(const APInt &RHS) const
Signed greater than comparison.
bool isAllOnes() const
Determine if all bits are set. This is true for zero-width values.
LLVM_ABI APInt usub_ov(const APInt &RHS, bool &Overflow) const
bool ugt(const APInt &RHS) const
Unsigned greater than comparison.
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
bool isSignMask() const
Check if the APInt's value is returned by getSignMask.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool ult(const APInt &RHS) const
Unsigned less than comparison.
static APInt getSignedMaxValue(unsigned numBits)
Gets maximum signed value of APInt for a specific bit width.
static APInt getMinValue(unsigned numBits)
Gets minimum unsigned value of APInt for a specific bit width.
bool isNegative() const
Determine sign of this APInt.
LLVM_ABI APInt sadd_ov(const APInt &RHS, bool &Overflow) const
bool eq(const APInt &RHS) const
Equality comparison.
LLVM_ABI APInt sdiv(const APInt &RHS) const
Signed division function for APInt.
LLVM_ABI APInt uadd_ov(const APInt &RHS, bool &Overflow) const
void negate()
Negate this APInt in place.
unsigned countr_zero() const
Count the number of trailing zero bits.
unsigned countl_zero() const
The APInt version of std::countl_zero.
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
bool isStrictlyPositive() const
Determine if this APInt Value is positive.
void flipAllBits()
Toggle every bit to its opposite value.
unsigned countl_one() const
Count the number of leading one bits.
unsigned logBase2() const
uint64_t getLimitedValue(uint64_t Limit=UINT64_MAX) const
If this value is smaller than the specified limit, return it, otherwise return the limit value.
APInt ashr(unsigned ShiftAmt) const
Arithmetic right-shift function.
bool isMaxSignedValue() const
Determine if this is the largest signed value.
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
APInt shl(unsigned shiftAmt) const
Left-shift function.
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)
Constructs an APInt value that has the top hiBitsSet bits set.
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
bool sge(const APInt &RHS) const
Signed greater or equal comparison.
LLVM_ABI APInt ssub_ov(const APInt &RHS, bool &Overflow) const
bool isOne() const
Determine if this is a value of 1.
static APInt getBitsSetFrom(unsigned numBits, unsigned loBit)
Constructs an APInt value that has a contiguous range of bits set.
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
unsigned countr_one() const
Count the number of trailing one bits.
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
An arbitrary precision integer that knows its signedness.
static APSInt getMinValue(uint32_t numBits, bool Unsigned)
Return the APSInt representing the minimum integer value with the given bit width and signedness.
static APSInt getMaxValue(uint32_t numBits, bool Unsigned)
Return the APSInt representing the maximum integer value with the given bit width and signedness.
an instruction to allocate memory on the stack
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
LLVM Basic Block Representation.
LLVM_ABI const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
LLVM_ABI const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction; assumes that the block is well-formed.
BinaryOps getOpcode() const
static LLVM_ABI BinaryOperator * CreateNot(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)
static LLVM_ABI BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name=Twine(), InsertPosition InsertBefore=nullptr)
Construct a binary instruction, given the opcode and the two operands.
Value * getArgOperand(unsigned i) const
This class represents a function call, abstracting a target machine's calling convention.
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
This class is the base class for the comparison instructions.
static Type * makeCmpResultType(Type *opnd_type)
Create a result type for fcmp/icmp.
Predicate getStrictPredicate() const
For example, SGE -> SGT, SLE -> SLT, ULE -> ULT, UGE -> UGT.
static LLVM_ABI Predicate getFlippedStrictnessPredicate(Predicate pred)
This is a static version that you can use without an instruction available.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
@ FCMP_TRUE
1 1 1 1 Always true (always folded)
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ FCMP_OLT
0 1 0 0 True if ordered and less than
@ FCMP_ULE
1 1 0 1 True if unordered, less than, or equal
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
@ ICMP_UGE
unsigned greater or equal
@ ICMP_UGT
unsigned greater than
@ ICMP_SGT
signed greater than
@ FCMP_ULT
1 1 0 0 True if unordered or less than
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
@ ICMP_ULT
unsigned less than
@ FCMP_UGT
1 0 1 0 True if unordered or greater than
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
@ ICMP_SGE
signed greater or equal
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
@ ICMP_ULE
unsigned less or equal
@ FCMP_UGE
1 0 1 1 True if unordered, greater than, or equal
@ FCMP_FALSE
0 0 0 0 Always false (always folded)
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
bool isTrueWhenEqual() const
This is just a convenience.
static LLVM_ABI CmpInst * Create(OtherOps Op, Predicate Pred, Value *S1, Value *S2, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Construct a compare instruction, given the opcode, the predicate and the two operands.
Predicate getNonStrictPredicate() const
For example, SGT -> SGE, SLT -> SLE, ULT -> ULE, UGT -> UGE.
static LLVM_ABI bool isStrictPredicate(Predicate predicate)
This is a static version that you can use without an instruction available.
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Predicate getPredicate() const
Return the predicate for this instruction.
static bool isIntPredicate(Predicate P)
An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...
static LLVM_ABI CmpPredicate getSwapped(CmpPredicate P)
Get the swapped predicate of a CmpPredicate.
Conditional Branch instruction.
static LLVM_ABI Constant * getIntToPtr(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static LLVM_ABI Constant * getPointerBitCastOrAddrSpaceCast(Constant *C, Type *Ty)
Create a BitCast or AddrSpaceCast for a pointer type depending on the address space.
static LLVM_ABI Constant * getSub(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
static LLVM_ABI Constant * getNot(Constant *C)
static LLVM_ABI Constant * getPtrToInt(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static LLVM_ABI Constant * getXor(Constant *C1, Constant *C2)
static LLVM_ABI Constant * getNeg(Constant *C, bool HasNSW=false)
static LLVM_ABI Constant * getZero(Type *Ty, bool Negative=false)
This is the shared class of boolean and integer constants.
uint64_t getLimitedValue(uint64_t Limit=~0ULL) const
getLimitedValue - If the value is smaller than the specified limit, return it, otherwise return the l...
static LLVM_ABI ConstantInt * getTrue(LLVMContext &Context)
static ConstantInt * getSigned(IntegerType *Ty, int64_t V, bool ImplicitTrunc=false)
Return a ConstantInt with the specified value for the specified type.
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
static LLVM_ABI ConstantInt * getFalse(LLVMContext &Context)
unsigned getBitWidth() const
getBitWidth - Return the scalar bitwidth of this constant.
const APInt & getValue() const
Return the constant as an APInt value reference.
static LLVM_ABI ConstantInt * getBool(LLVMContext &Context, bool V)
This class represents a range of values.
LLVM_ABI ConstantRange add(const ConstantRange &Other) const
Return a new range representing the possible values resulting from an addition of a value in this ran...
LLVM_ABI std::optional< ConstantRange > exactUnionWith(const ConstantRange &CR) const
Union the two ranges and return the result if it can be represented exactly, otherwise return std::nu...
LLVM_ABI bool getEquivalentICmp(CmpInst::Predicate &Pred, APInt &RHS) const
Set up Pred and RHS such that ConstantRange::makeExactICmpRegion(Pred, RHS) == *this.
LLVM_ABI ConstantRange subtract(const APInt &CI) const
Subtract the specified constant from the endpoints of this constant range.
const APInt * getSingleElement() const
If this set contains a single element, return it, otherwise return null.
LLVM_ABI ConstantRange difference(const ConstantRange &CR) const
Subtract the specified range from this range (aka relative complement of the sets).
LLVM_ABI bool isEmptySet() const
Return true if this set contains no members.
LLVM_ABI ConstantRange truncate(uint32_t BitWidth, unsigned NoWrapKind=0) const
Return a new range in the specified integer type, which must be strictly smaller than the current typ...
static LLVM_ABI ConstantRange makeExactICmpRegion(CmpInst::Predicate Pred, const APInt &Other)
Produce the exact range such that all values in the returned range satisfy the given predicate with a...
LLVM_ABI ConstantRange inverse() const
Return a new range that is the logical not of the current set.
LLVM_ABI std::optional< ConstantRange > exactIntersectWith(const ConstantRange &CR) const
Intersect the two ranges and return the result if it can be represented exactly, otherwise return std...
LLVM_ABI ConstantRange intersectWith(const ConstantRange &CR, PreferredRangeType Type=Smallest) const
Return the range that results from the intersection of this range with another range.
static ConstantRange getNonEmpty(APInt Lower, APInt Upper)
Create non-empty constant range with the given bounds.
LLVM_ABI ConstantRange sub(const ConstantRange &Other) const
Return a new range representing the possible values resulting from a subtraction of a value in this r...
static LLVM_ABI ConstantRange makeExactNoWrapRegion(Instruction::BinaryOps BinOp, const APInt &Other, unsigned NoWrapKind)
Produce the range that contains X if and only if "X BinOp Other" does not wrap.
static LLVM_ABI Constant * getSplat(ElementCount EC, Constant *Elt)
Return a ConstantVector with the specified constant in each element.
This is an important base class in LLVM.
static LLVM_ABI Constant * getIntegerValue(Type *Ty, const APInt &V)
Return the value for an integer or pointer constant, or a vector thereof, with the given scalar value...
static LLVM_ABI Constant * getAllOnesValue(Type *Ty)
LLVM_ABI bool isAllOnesValue() const
Return true if this is the value that would be returned by getAllOnesValue.
LLVM_ABI const APInt & getUniqueInteger() const
If C is a constant integer then return its value, otherwise C must be a vector of constant integers,...
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
LLVM_ABI bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
A parsed version of the target data layout string in and methods for querying it.
iterator find(const_arg_type_t< KeyT > Val)
bool contains(const_arg_type_t< KeyT > Val) const
Return true if the specified key is in the map, false otherwise.
This instruction compares its operands according to the predicate given to the constructor.
static bool isCommutative(Predicate Pred)
static bool isEquality(Predicate Pred)
Represents flags for the getelementptr instruction/expression.
bool hasNoUnsignedSignedWrap() const
bool hasNoUnsignedWrap() const
GEPNoWrapFlags intersectForOffsetAdd(GEPNoWrapFlags Other) const
Given (gep (gep p, x), y), determine the nowrap flags for (gep p, x+y).
static GEPNoWrapFlags none()
bool isInBounds() const
Test whether this is an inbounds GEP, as defined by LangRef.html.
LLVM_ABI Type * getSourceElementType() const
Value * getPointerOperand()
GEPNoWrapFlags getNoWrapFlags() const
bool hasAllConstantIndices() const
Return true if all of the indices of this GEP are constant integers.
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
This instruction compares its operands according to the predicate given to the constructor.
static bool isGE(Predicate P)
Return true if the predicate is SGE or UGE.
static LLVM_ABI bool compare(const APInt &LHS, const APInt &RHS, ICmpInst::Predicate Pred)
Return result of LHS Pred RHS comparison.
static bool isLT(Predicate P)
Return true if the predicate is SLT or ULT.
static bool isGT(Predicate P)
Return true if the predicate is SGT or UGT.
Predicate getFlippedSignednessPredicate() const
For example, SLT->ULT, ULT->SLT, SLE->ULE, ULE->SLE, EQ->EQ.
Predicate getSignedPredicate() const
For example, EQ->EQ, SLE->SLE, UGT->SGT, etc.
bool isEquality() const
Return true if this predicate is either EQ or NE.
static bool isEquality(Predicate P)
Return true if this predicate is either EQ or NE.
bool isRelational() const
Return true if the predicate is relational (not EQ or NE).
Predicate getUnsignedPredicate() const
For example, EQ->EQ, SLE->ULE, UGT->UGT, etc.
static bool isLE(Predicate P)
Return true if the predicate is SLE or ULE.
Common base class shared among various IRBuilders.
LLVM_ABI CallInst * CreateUnaryIntrinsic(Intrinsic::ID ID, Value *V, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with 1 operand which is mangled on its type.
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Value * CreateICmp(CmpInst::Predicate P, Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="", bool IsDisjoint=false)
ConstantInt * getInt(const APInt &AI)
Get a constant integer value.
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Instruction * foldICmpShrConstant(ICmpInst &Cmp, BinaryOperator *Shr, const APInt &C)
Fold icmp ({al}shr X, Y), C.
Instruction * foldICmpWithZextOrSext(ICmpInst &ICmp)
Instruction * foldICmpSelectConstant(ICmpInst &Cmp, SelectInst *Select, ConstantInt *C)
Instruction * foldICmpSRemConstant(ICmpInst &Cmp, BinaryOperator *UDiv, const APInt &C)
Instruction * foldICmpBinOpWithConstant(ICmpInst &Cmp, BinaryOperator *BO, const APInt &C)
Fold an icmp with BinaryOp and constant operand: icmp Pred BO, C.
Instruction * foldICmpOrConstant(ICmpInst &Cmp, BinaryOperator *Or, const APInt &C)
Fold icmp (or X, Y), C.
Instruction * foldICmpTruncWithTruncOrExt(ICmpInst &Cmp, const SimplifyQuery &Q)
Fold icmp (trunc nuw/nsw X), (trunc nuw/nsw Y).
Instruction * foldSignBitTest(ICmpInst &I)
Fold equality-comparison between zero and any (maybe truncated) right-shift by one-less-than-bitwidth...
Instruction * foldOpIntoPhi(Instruction &I, PHINode *PN, bool AllowMultipleUses=false)
Given a binary operator, cast instruction, or select which has a PHI node as operand #0,...
Value * insertRangeTest(Value *V, const APInt &Lo, const APInt &Hi, bool isSigned, bool Inside)
Emit a computation of: (V >= Lo && V < Hi) if Inside is true, otherwise (V < Lo || V >= Hi).
Instruction * foldICmpBinOp(ICmpInst &Cmp, const SimplifyQuery &SQ)
Try to fold icmp (binop), X or icmp X, (binop).
Instruction * foldCmpLoadFromIndexedGlobal(LoadInst *LI, GetElementPtrInst *GEP, CmpInst &ICI, ConstantInt *AndCst=nullptr)
This is called when we see this pattern: cmp pred (load (gep GV, ...)), cmpcst where GV is a global v...
Instruction * foldICmpSubConstant(ICmpInst &Cmp, BinaryOperator *Sub, const APInt &C)
Fold icmp (sub X, Y), C.
Instruction * foldICmpWithClamp(ICmpInst &Cmp, Value *X, MinMaxIntrinsic *Min)
Match and fold patterns like: icmp eq/ne X, min(max(X, Lo), Hi) which represents a range check and ca...
Instruction * foldICmpInstWithConstantNotInt(ICmpInst &Cmp)
Handle icmp with constant (but not simple integer constant) RHS.
bool SimplifyDemandedBits(Instruction *I, unsigned Op, const APInt &DemandedMask, KnownBits &Known, const SimplifyQuery &Q, unsigned Depth=0) override
This form of SimplifyDemandedBits simplifies the specified instruction operand if possible,...
Instruction * foldICmpShlConstConst(ICmpInst &I, Value *ShAmt, const APInt &C1, const APInt &C2)
Handle "(icmp eq/ne (shl AP2, A), AP1)" -> (icmp eq/ne A, TrailingZeros(AP1) - TrailingZeros(AP2)).
Value * reassociateShiftAmtsOfTwoSameDirectionShifts(BinaryOperator *Sh0, const SimplifyQuery &SQ, bool AnalyzeForSignBitExtraction=false)
Instruction * foldICmpEqIntrinsicWithConstant(ICmpInst &ICI, IntrinsicInst *II, const APInt &C)
Fold an equality icmp with LLVM intrinsic and constant operand.
Instruction * FoldOpIntoSelect(Instruction &Op, SelectInst *SI, bool FoldWithMultiUse=false, bool SimplifyBothArms=false)
Given an instruction with a select as one operand and a constant as the other operand,...
Value * foldMultiplicationOverflowCheck(ICmpInst &Cmp)
Fold (-1 u/ x) u< y ((x * y) ?
Instruction * foldICmpWithConstant(ICmpInst &Cmp)
Fold icmp Pred X, C.
CmpInst * canonicalizeICmpPredicate(CmpInst &I)
If we have a comparison with a non-canonical predicate, if we can update all the users,...
Instruction * eraseInstFromFunction(Instruction &I) override
Combiner aware instruction erasure.
Instruction * foldICmpWithZero(ICmpInst &Cmp)
Instruction * foldICmpCommutative(CmpPredicate Pred, Value *Op0, Value *Op1, ICmpInst &CxtI)
Instruction * foldICmpBinOpEqualityWithConstant(ICmpInst &Cmp, BinaryOperator *BO, const APInt &C)
Fold an icmp equality instruction with binary operator LHS and constant RHS: icmp eq/ne BO,...
Instruction * foldICmpUsingBoolRange(ICmpInst &I)
If one operand of an icmp is effectively a bool (value range of {0,1}), then try to reduce patterns b...
Instruction * foldICmpWithTrunc(ICmpInst &Cmp)
Instruction * foldCmpSelectOfConstants(CmpInst &I)
Fold fcmp/icmp pred (select C1, TV1, FV1), (select C2, TV2, FV2) where all true/false values are cons...
Instruction * foldICmpIntrinsicWithConstant(ICmpInst &ICI, IntrinsicInst *II, const APInt &C)
Fold an icmp with LLVM intrinsic and constant operand: icmp Pred II, C.
bool matchThreeWayIntCompare(SelectInst *SI, Value *&LHS, Value *&RHS, ConstantInt *&Less, ConstantInt *&Equal, ConstantInt *&Greater)
Match a select chain which produces one of three values based on whether the LHS is less than,...
Instruction * visitFCmpInst(FCmpInst &I)
Instruction * foldICmpUsingKnownBits(ICmpInst &Cmp)
Try to fold the comparison based on range information we can get by checking whether bits are known t...
Instruction * foldICmpDivConstant(ICmpInst &Cmp, BinaryOperator *Div, const APInt &C)
Fold icmp ({su}div X, Y), C.
Instruction * foldIRemByPowerOfTwoToBitTest(ICmpInst &I)
If we have: icmp eq/ne (urem/srem x, y), 0 iff y is a power-of-two, we can replace this with a bit te...
Instruction * foldFCmpIntToFPConst(FCmpInst &I, Instruction *LHSI, Constant *RHSC)
Fold fcmp ([us]itofp x, cst) if possible.
Instruction * foldICmpUDivConstant(ICmpInst &Cmp, BinaryOperator *UDiv, const APInt &C)
Fold icmp (udiv X, Y), C.
Instruction * foldICmpAddOpConst(Value *X, const APInt &C, CmpPredicate Pred)
Fold "icmp pred (X+C), X".
Instruction * foldICmpWithCastOp(ICmpInst &ICmp)
Handle icmp (cast x), (cast or constant).
Instruction * foldICmpTruncConstant(ICmpInst &Cmp, TruncInst *Trunc, const APInt &C)
Fold icmp (trunc X), C.
Instruction * foldICmpAddConstant(ICmpInst &Cmp, BinaryOperator *Add, const APInt &C)
Fold icmp (add X, Y), C.
Instruction * foldICmpMulConstant(ICmpInst &Cmp, BinaryOperator *Mul, const APInt &C)
Fold icmp (mul X, Y), C.
Instruction * tryFoldInstWithCtpopWithNot(Instruction *I)
Instruction * foldICmpXorConstant(ICmpInst &Cmp, BinaryOperator *Xor, const APInt &C)
Fold icmp (xor X, Y), C.
Instruction * foldSelectICmp(CmpPredicate Pred, SelectInst *SI, Value *RHS, const ICmpInst &I)
Instruction * foldICmpInstWithConstantAllowPoison(ICmpInst &Cmp, const APInt &C)
Try to fold integer comparisons with a constant operand: icmp Pred X, C where X is some kind of instr...
Instruction * foldIsMultipleOfAPowerOfTwo(ICmpInst &Cmp)
Fold icmp eq (num + mask) & ~mask, num to icmp eq (and num, mask), 0 Where mask is a low bit mask.
Instruction * foldICmpAndShift(ICmpInst &Cmp, BinaryOperator *And, const APInt &C1, const APInt &C2)
Fold icmp (and (sh X, Y), C2), C1.
Instruction * foldICmpBinOpWithConstantViaTruthTable(ICmpInst &Cmp, BinaryOperator *BO, const APInt &C)
Instruction * foldICmpInstWithConstant(ICmpInst &Cmp)
Try to fold integer comparisons with a constant operand: icmp Pred X, C where X is some kind of instr...
Instruction * foldICmpXorShiftConst(ICmpInst &Cmp, BinaryOperator *Xor, const APInt &C)
For power-of-2 C: ((X s>> ShiftC) ^ X) u< C --> (X + C) u< (C << 1) ((X s>> ShiftC) ^ X) u> (C - 1) -...
Instruction * foldICmpShlConstant(ICmpInst &Cmp, BinaryOperator *Shl, const APInt &C)
Fold icmp (shl X, Y), C.
Instruction * foldICmpAndConstant(ICmpInst &Cmp, BinaryOperator *And, const APInt &C)
Fold icmp (and X, Y), C.
Instruction * foldICmpEquality(ICmpInst &Cmp)
Instruction * foldICmpWithMinMax(Instruction &I, MinMaxIntrinsic *MinMax, Value *Z, CmpPredicate Pred)
Fold icmp Pred min|max(X, Y), Z.
bool dominatesAllUses(const Instruction *DI, const Instruction *UI, const BasicBlock *DB) const
True when DB dominates all uses of DI except UI.
bool foldAllocaCmp(AllocaInst *Alloca)
Instruction * visitICmpInst(ICmpInst &I)
OverflowResult computeOverflow(Instruction::BinaryOps BinaryOp, bool IsSigned, Value *LHS, Value *RHS, Instruction *CxtI) const
Instruction * foldICmpWithDominatingICmp(ICmpInst &Cmp)
Canonicalize icmp instructions based on dominating conditions.
bool replacedSelectWithOperand(SelectInst *SI, const ICmpInst *Icmp, const unsigned SIOpd)
Try to replace select with select operand SIOpd in SI-ICmp sequence.
Instruction * foldICmpShrConstConst(ICmpInst &I, Value *ShAmt, const APInt &C1, const APInt &C2)
Handle "(icmp eq/ne (ashr/lshr AP2, A), AP1)" -> (icmp eq/ne A, Log2(AP2/AP1)) -> (icmp eq/ne A,...
void freelyInvertAllUsersOf(Value *V, Value *IgnoredUser=nullptr)
Freely adapt every user of V as-if V was changed to !V.
Instruction * foldICmpAndConstConst(ICmpInst &Cmp, BinaryOperator *And, const APInt &C1)
Fold icmp (and X, C2), C1.
Instruction * foldICmpBitCast(ICmpInst &Cmp)
Instruction * foldGEPICmp(GEPOperator *GEPLHS, Value *RHS, CmpPredicate Cond, Instruction &I)
Fold comparisons between a GEP instruction and something else.
The core instruction combiner logic.
OverflowResult computeOverflowForSignedSub(const Value *LHS, const Value *RHS, const Instruction *CxtI) const
unsigned ComputeMaxSignificantBits(const Value *Op, const Instruction *CxtI=nullptr, unsigned Depth=0) const
IRBuilder< TargetFolder, IRBuilderCallbackInserter > BuilderTy
An IRBuilder that automatically inserts new instructions into the worklist.
bool isFreeToInvert(Value *V, bool WillInvertAllUses, bool &DoesConsume)
Return true if the specified value is free to invert (apply ~ to).
OverflowResult computeOverflowForUnsignedMul(const Value *LHS, const Value *RHS, const Instruction *CxtI, bool IsNSW=false) const
static unsigned getComplexity(Value *V)
Assign a complexity or rank value to LLVM Values.
Instruction * replaceInstUsesWith(Instruction &I, Value *V)
A combiner-aware RAUW-like routine.
uint64_t MaxArraySizeForCombine
Maximum size of array considered when transforming.
bool canBeCastedExactlyIntToFP(Value *V, Type *FPTy, bool IsSigned, const Instruction *CxtI=nullptr) const
OverflowResult computeOverflowForSignedAdd(const WithCache< const Value * > &LHS, const WithCache< const Value * > &RHS, const Instruction *CxtI) const
static Constant * SubOne(Constant *C)
Subtract one from a Constant.
OverflowResult computeOverflowForUnsignedSub(const Value *LHS, const Value *RHS, const Instruction *CxtI) const
static bool isCanonicalPredicate(CmpPredicate Pred)
Predicate canonicalization reduces the number of patterns that need to be matched by other transforms...
void computeKnownBits(const Value *V, KnownBits &Known, const Instruction *CxtI, unsigned Depth=0) const
bool canFreelyInvertAllUsersOf(Instruction *V, Value *IgnoredUser)
Given i1 V, can every user of V be freely adapted if V is changed to !V ?
void addToWorklist(Instruction *I)
Instruction * replaceOperand(Instruction &I, unsigned OpNum, Value *V)
Replace operand of instruction and add old operand to the worklist.
OverflowResult computeOverflowForSignedMul(const Value *LHS, const Value *RHS, const Instruction *CxtI) const
OverflowResult computeOverflowForUnsignedAdd(const WithCache< const Value * > &LHS, const WithCache< const Value * > &RHS, const Instruction *CxtI) const
Value * getFreelyInverted(Value *V, bool WillInvertAllUses, BuilderTy *Builder, bool &DoesConsume)
const SimplifyQuery & getSimplifyQuery() const
bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero=false, const Instruction *CxtI=nullptr, unsigned Depth=0)
LLVM_ABI bool hasNoNaNs() const LLVM_READONLY
Determine whether the no-NaNs flag is set.
LLVM_ABI bool hasNoUnsignedWrap() const LLVM_READONLY
Determine whether the no unsigned wrap flag is set.
LLVM_ABI bool hasNoInfs() const LLVM_READONLY
Determine whether the no-infs flag is set.
bool isArithmeticShift() const
Return true if this is an arithmetic shift right.
LLVM_ABI bool hasNoSignedWrap() const LLVM_READONLY
Determine whether the no signed wrap flag is set.
LLVM_ABI bool isCommutative() const LLVM_READONLY
Return true if the instruction is commutative:
LLVM_ABI bool isExact() const LLVM_READONLY
Determine whether the exact flag is set.
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
A wrapper class for inspecting calls to intrinsic functions.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
An instruction for reading from memory.
bool isVolatile() const
Return true if this is a load from a volatile memory location.
This class represents min/max intrinsics.
static bool isMin(Intrinsic::ID ID)
Whether the intrinsic is a smin or umin.
static bool isSigned(Intrinsic::ID ID)
Whether the intrinsic is signed or unsigned.
A Module instance is used to store all the information related to an LLVM module.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
Represents a saturating add/sub intrinsic.
This class represents the LLVM 'select' instruction.
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", InsertPosition InsertBefore=nullptr, const Instruction *MDFrom=nullptr)
A vector that has set insertion semantics.
size_type size() const
Determine the number of elements in the SetVector.
bool contains(const_arg_type key) const
Check if the SetVector contains the given key.
bool insert(const value_type &X)
Insert a new element into the SetVector.
This instruction constructs a fixed permutation of two input vectors.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
reverse_iterator rbegin()
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
This class represents a truncation of integer types.
bool hasNoSignedWrap() const
Test whether this operation is known to never undergo signed overflow, aka the nsw property.
bool hasNoUnsignedWrap() const
Test whether this operation is known to never undergo unsigned overflow, aka the nuw property.
The instances of the Type class are immutable: once they are created, they are never changed.
bool isVectorTy() const
True if this is an instance of VectorType.
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
bool isPointerTy() const
True if this is an instance of PointerType.
bool isPPC_FP128Ty() const
Return true if this is powerpc long double.
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
LLVM_ABI Type * getWithNewBitWidth(unsigned NewBitWidth) const
Given an integer or vector type, change the lane bitwidth to NewBitwidth, whilst keeping the old numb...
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
static LLVM_ABI IntegerType * getInt1Ty(LLVMContext &C)
LLVM_ABI int getFPMantissaWidth() const
Return the width of the mantissa of this type.
LLVM_ABI const fltSemantics & getFltSemantics() const
A Use represents the edge between a Value definition and its users.
void setOperand(unsigned i, Value *Val)
Value * getOperand(unsigned i) const
unsigned getNumOperands() const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
bool hasOneUse() const
Return true if there is exactly one use of this value.
LLVMContext & getContext() const
All values hold a context through their type.
iterator_range< user_iterator > users()
LLVM_ABI bool hasNUsesOrMore(unsigned N) const
Return true if this value has N uses or more.
LLVM_ABI const Value * stripAndAccumulateConstantOffsets(const DataLayout &DL, APInt &Offset, bool AllowNonInbounds, bool AllowInvariantGroup=false, function_ref< bool(Value &Value, APInt &Offset)> ExternalAnalysis=nullptr, bool LookThroughIntToPtr=false) const
Accumulate the constant offset this value has compared to a base pointer.
LLVM_ABI const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
iterator_range< use_iterator > uses()
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
LLVM_ABI void takeName(Value *V)
Transfer the name from V to this value.
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
constexpr ScalarTy getFixedValue() const
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
const ParentTy * getParent() const
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
LLVM_ABI APInt RoundingUDiv(const APInt &A, const APInt &B, APInt::Rounding RM)
Return A unsign-divided by B, rounded by the given rounding mode.
LLVM_ABI APInt RoundingSDiv(const APInt &A, const APInt &B, APInt::Rounding RM)
Return A sign-divided by B, rounded by the given rounding mode.
@ C
The default llvm calling convention, compatible with C.
LLVM_ABI Function * getOrInsertDeclaration(Module *M, ID id, ArrayRef< Type * > OverloadTys={})
Look up the Function declaration of the intrinsic id in the Module M.
SpecificConstantMatch m_ZeroInt()
Convenience matchers for specific integer values.
BinaryOp_match< SpecificConstantMatch, SrcTy, TargetOpcode::G_SUB > m_Neg(const SrcTy &&Src)
Matches a register negated by a G_SUB.
BinaryOp_match< SrcTy, SpecificConstantMatch, TargetOpcode::G_XOR, true > m_Not(const SrcTy &&Src)
Matches a register not-ed by a G_XOR.
OneUse_match< SubPat > m_OneUse(const SubPat &SP)
match_combine_or< Ty... > m_CombineOr(const Ty &...Ps)
Combine pattern matchers matching any of Ps patterns.
match_combine_and< Ty... > m_CombineAnd(const Ty &...Ps)
Combine pattern matchers matching all of Ps patterns.
cst_pred_ty< is_all_ones > m_AllOnes()
Match an integer or vector with all bits set.
cst_pred_ty< is_lowbit_mask > m_LowBitMask()
Match an integer or vector with only the low bit(s) set.
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
cst_pred_ty< is_negative > m_Negative()
Match an integer or vector of negative values.
BinaryOp_match< LHS, RHS, Instruction::Add > m_Add(const LHS &L, const RHS &R)
cst_pred_ty< is_sign_mask > m_SignMask()
Match an integer or vector with only the sign bit(s) set.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWAdd(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::AShr > m_AShr(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::FSub > m_FSub(const LHS &L, const RHS &R)
cst_pred_ty< is_power2 > m_Power2()
Match an integer or vector power-of-2.
BinaryOp_match< LHS, RHS, Instruction::URem > m_URem(const LHS &L, const RHS &R)
match_combine_or< CastInst_match< OpTy, TruncInst >, OpTy > m_TruncOrSelf(const OpTy &Op)
ap_match< APInt > m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
BinaryOp_match< LHS, RHS, Instruction::And, true > m_c_And(const LHS &L, const RHS &R)
Matches an And with LHS and RHS in either order.
CastInst_match< OpTy, TruncInst > m_Trunc(const OpTy &Op)
Matches Trunc.
BinaryOp_match< LHS, RHS, Instruction::Xor > m_Xor(const LHS &L, const RHS &R)
ap_match< APInt > m_APIntAllowPoison(const APInt *&Res)
Match APInt while allowing poison in splat vector constants.
specific_intval< false > m_SpecificInt(const APInt &V)
Match a specific integer value or vector with all elements equal to the value.
match_combine_or< CastInst_match< OpTy, ZExtInst >, OpTy > m_ZExtOrSelf(const OpTy &Op)
bool match(Val *V, const Pattern &P)
BinOpPred_match< LHS, RHS, is_idiv_op > m_IDiv(const LHS &L, const RHS &R)
Matches integer division operations.
match_bind< Instruction > m_Instruction(Instruction *&I)
Match an instruction, capturing it if we match.
match_deferred< Value > m_Deferred(Value *const &V)
Like m_Specific(), but works if the specific value to match is determined as part of the same match()...
cstfp_pred_ty< is_any_zero_fp > m_AnyZeroFP()
Match a floating-point negative zero or positive zero.
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
BinOpPred_match< LHS, RHS, is_right_shift_op > m_Shr(const LHS &L, const RHS &R)
Matches logical shift operations.
specific_intval< true > m_SpecificIntAllowPoison(const APInt &V)
ap_match< APFloat > m_APFloat(const APFloat *&Res)
Match a ConstantFP or splatted ConstantVector, binding the specified pointer to the contained APFloat...
CmpClass_match< LHS, RHS, ICmpInst, true > m_c_ICmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
Matches an ICmp with a predicate over LHS and RHS in either order.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoUnsignedWrap, true > m_c_NUWAdd(const LHS &L, const RHS &R)
OverflowingBinaryOp_match< cst_pred_ty< is_zero_int >, ValTy, Instruction::Sub, OverflowingBinaryOperator::NoSignedWrap > m_NSWNeg(const ValTy &V)
Matches a 'Neg' as 'sub nsw 0, V'.
cst_pred_ty< is_nonnegative > m_NonNegative()
Match an integer or vector of non-negative values.
cst_pred_ty< is_one > m_One()
Match an integer 1 or a vector with all elements equal to 1.
IntrinsicID_match m_Intrinsic()
Match intrinsic calls like this: m_Intrinsic<Intrinsic::fabs>(m_Value(X))
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
auto m_BinOp()
Match an arbitrary binary operation and ignore it.
ExtractValue_match< Ind, Val_t > m_ExtractValue(const Val_t &V)
Match a single index ExtractValue instruction.
BinOpPred_match< LHS, RHS, is_logical_shift_op > m_LogicalShift(const LHS &L, const RHS &R)
Matches logical shift operations.
MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty > m_SMin(const LHS &L, const RHS &R)
match_combine_or< CastInst_match< OpTy, UIToFPInst >, CastInst_match< OpTy, SIToFPInst > > m_IToFP(const OpTy &Op)
auto m_Value()
Match an arbitrary value and ignore it.
m_Intrinsic_Ty< Opnd0 >::Ty m_Sqrt(const Opnd0 &Op0)
BinaryOp_match< LHS, RHS, Instruction::Xor, true > m_c_Xor(const LHS &L, const RHS &R)
Matches an Xor with LHS and RHS in either order.
BinaryOp_match< LHS, RHS, Instruction::FAdd > m_FAdd(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::Mul > m_Mul(const LHS &L, const RHS &R)
auto m_Constant()
Match an arbitrary Constant and ignore it.
NoWrapTrunc_match< OpTy, TruncInst::NoSignedWrap > m_NSWTrunc(const OpTy &Op)
Matches trunc nsw.
TwoOps_match< V1_t, V2_t, Instruction::ShuffleVector > m_Shuffle(const V1_t &v1, const V2_t &v2)
Matches ShuffleVectorInst independently of mask value.
ThreeOps_match< decltype(m_Value()), LHS, RHS, Instruction::Select, true > m_c_Select(const LHS &L, const RHS &R)
Match Select(C, LHS, RHS) or Select(C, RHS, LHS)
CastInst_match< OpTy, FPExtInst > m_FPExt(const OpTy &Op)
OverflowingBinaryOp_match< LHS, RHS, Instruction::Shl, OverflowingBinaryOperator::NoSignedWrap > m_NSWShl(const LHS &L, const RHS &R)
CastInst_match< OpTy, ZExtInst > m_ZExt(const OpTy &Op)
Matches ZExt.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Shl, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWShl(const LHS &L, const RHS &R)
OverflowingBinaryOp_match< LHS, RHS, Instruction::Mul, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWMul(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::UDiv > m_UDiv(const LHS &L, const RHS &R)
MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty > m_UMax(const LHS &L, const RHS &R)
match_immconstant_ty m_ImmConstant()
Match an arbitrary immediate Constant and ignore it.
cst_pred_ty< is_negated_power2_or_zero > m_NegatedPower2OrZero()
Match a integer or vector negated power-of-2.
NoWrapTrunc_match< OpTy, TruncInst::NoUnsignedWrap > m_NUWTrunc(const OpTy &Op)
Matches trunc nuw.
cst_pred_ty< custom_checkfn< APInt > > m_CheckedInt(function_ref< bool(const APInt &)> CheckFn)
Match an integer or vector where CheckFn(ele) for each element is true.
SelectLike_match< CondTy, LTy, RTy > m_SelectLike(const CondTy &C, const LTy &TrueC, const RTy &FalseC)
Matches a value that behaves like a boolean-controlled select, i.e.
cst_pred_ty< is_lowbit_mask_or_zero > m_LowBitMaskOrZero()
Match an integer or vector with only the low bit(s) set.
auto m_MaxOrMin(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::Add, true > m_c_Add(const LHS &L, const RHS &R)
Matches a Add with LHS and RHS in either order.
match_combine_or< BinaryOp_match< LHS, RHS, Instruction::Add >, DisjointOr_match< LHS, RHS > > m_AddLike(const LHS &L, const RHS &R)
Match either "add" or "or disjoint".
CastInst_match< OpTy, UIToFPInst > m_UIToFP(const OpTy &Op)
CastOperator_match< OpTy, Instruction::BitCast > m_BitCast(const OpTy &Op)
Matches BitCast.
cstfp_pred_ty< is_finitenonzero > m_FiniteNonZero()
Match a finite non-zero FP constant.
BinaryOp_match< LHS, RHS, Instruction::SDiv > m_SDiv(const LHS &L, const RHS &R)
MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty > m_SMax(const LHS &L, const RHS &R)
Signum_match< Val_t > m_Signum(const Val_t &V)
Matches a signum pattern.
CastInst_match< OpTy, SIToFPInst > m_SIToFP(const OpTy &Op)
BinaryOp_match< LHS, RHS, Instruction::LShr > m_LShr(const LHS &L, const RHS &R)
CmpClass_match< LHS, RHS, ICmpInst > m_ICmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
match_combine_or< CastInst_match< OpTy, ZExtInst >, CastInst_match< OpTy, SExtInst > > m_ZExtOrSExt(const OpTy &Op)
FNeg_match< OpTy > m_FNeg(const OpTy &X)
Match 'fneg X' as 'fsub -0.0, X'.
cstfp_pred_ty< is_pos_zero_fp > m_PosZeroFP()
Match a floating-point positive zero.
BinaryOp_match< LHS, RHS, Instruction::Shl > m_Shl(const LHS &L, const RHS &R)
UAddWithOverflow_match< LHS_t, RHS_t, Sum_t > m_UAddWithOverflow(const LHS_t &L, const RHS_t &R, const Sum_t &S)
Match an icmp instruction checking for unsigned overflow on addition.
m_Intrinsic_Ty< Opnd0 >::Ty m_VecReverse(const Opnd0 &Op0)
BinOpPred_match< LHS, RHS, is_irem_op > m_IRem(const LHS &L, const RHS &R)
Matches integer remainder operations.
CastInst_match< OpTy, FPTruncInst > m_FPTrunc(const OpTy &Op)
auto m_Undef()
Match an arbitrary undef constant.
BinaryOp_match< LHS, RHS, Instruction::Or > m_Or(const LHS &L, const RHS &R)
CastInst_match< OpTy, SExtInst > m_SExt(const OpTy &Op)
Matches SExt.
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
BinaryOp_match< LHS, RHS, Instruction::Or, true > m_c_Or(const LHS &L, const RHS &R)
Matches an Or with LHS and RHS in either order.
ElementWiseBitCast_match< OpTy > m_ElementWiseBitCast(const OpTy &Op)
m_Intrinsic_Ty< Opnd0 >::Ty m_FAbs(const Opnd0 &Op0)
BinaryOp_match< LHS, RHS, Instruction::Mul, true > m_c_Mul(const LHS &L, const RHS &R)
Matches a Mul with LHS and RHS in either order.
CastOperator_match< OpTy, Instruction::PtrToInt > m_PtrToInt(const OpTy &Op)
Matches PtrToInt.
BinaryOp_match< LHS, RHS, Instruction::Sub > m_Sub(const LHS &L, const RHS &R)
match_unless< Ty > m_Unless(const Ty &M)
Match if the inner matcher does NOT match.
cst_pred_ty< icmp_pred_with_threshold > m_SpecificInt_ICMP(ICmpInst::Predicate Predicate, const APInt &Threshold)
Match an integer or vector with every element comparing 'pred' (eg/ne/...) to Threshold.
auto m_ConstantInt()
Match an arbitrary ConstantInt and ignore it.
This is an optimization pass for GlobalISel generic memory operations.
detail::zippy< detail::zip_shortest, T, U, Args... > zip(T &&t, U &&u, Args &&...args)
zip iterator for two or more iteratable types.
@ NeverOverflows
Never overflows.
@ AlwaysOverflowsHigh
Always overflows in the direction of signed/unsigned max value.
@ AlwaysOverflowsLow
Always overflows in the direction of signed/unsigned min value.
@ MayOverflow
May or may not overflow.
cl::opt< bool > ProfcheckDisableMetadataFixes
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI bool isKnownNeverInfinity(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Return true if the floating-point scalar value is not an infinity or if the floating-point vector val...
LLVM_ABI bool isSignBitCheck(ICmpInst::Predicate Pred, const APInt &RHS, bool &TrueIfSigned)
Given an exploded icmp instruction, return true if the comparison only checks the sign bit.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
LLVM_ABI Value * stripNullTest(Value *V)
Returns the inner value X if the expression has the form f(X) where f(X) == 0 if and only if X == 0,...
LLVM_ABI Constant * ConstantFoldCompareInstOperands(unsigned Predicate, Constant *LHS, Constant *RHS, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const Instruction *I=nullptr)
Attempt to constant fold a compare instruction (icmp/fcmp) with the specified operands.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
LLVM_ABI Value * simplifyFCmpInst(CmpPredicate Predicate, Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q)
Given operands for an FCmpInst, fold the result or return null.
int ilogb(const APFloat &Arg)
Returns the exponent of the internal representation of the APFloat.
LLVM_ABI bool MaskedValueIsZero(const Value *V, const APInt &Mask, const SimplifyQuery &SQ, unsigned Depth=0)
Return true if 'V & Mask' is known to be zero.
LLVM_ABI Value * simplifyAddInst(Value *LHS, Value *RHS, bool IsNSW, bool IsNUW, const SimplifyQuery &Q)
Given operands for an Add, fold the result or return null.
LLVM_ABI Constant * ConstantFoldConstant(const Constant *C, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldConstant - Fold the constant using the specified DataLayout.
auto dyn_cast_or_null(const Y &Val)
LLVM_ABI bool isSplatValue(const Value *V, int Index=-1, unsigned Depth=0)
Return true if each element of the vector value V is poisoned or equal to every other non-poisoned el...
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
int countl_zero(T Val)
Count number of 0's from the most significant bit to the least stopping at the first 1.
LLVM_ABI Value * emitGEPOffset(IRBuilderBase *Builder, const DataLayout &DL, User *GEP, bool NoAssumptions=false)
Given a getelementptr instruction/constantexpr, emit the code necessary to compute the offset from th...
constexpr unsigned MaxAnalysisRecursionDepth
LLVM_ABI Constant * ConstantFoldUnaryOpOperand(unsigned Opcode, Constant *Op, const DataLayout &DL)
Attempt to constant fold a unary operation with the specified operand.
LLVM_ABI bool isKnownNegative(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Returns true if the given value is known be negative (i.e.
SelectPatternFlavor
Specific patterns of select instructions we can match.
@ SPF_FMAXNUM
Floating point minnum.
@ SPF_FMINNUM
Unsigned maximum.
LLVM_ABI bool impliesPoison(const Value *ValAssumedPoison, const Value *V)
Return true if V is poison given that ValAssumedPoison is already poison.
LLVM_ABI LinearExpression decomposeLinearExpression(const DataLayout &DL, Value *Ptr)
Decompose a pointer into a linear expression.
LLVM_ABI bool isFinite(const Loop *L)
Return true if this loop can be assumed to run for a finite number of iterations.
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
APFloat scalbn(APFloat X, int Exp, APFloat::roundingMode RM)
Returns: X * 2^Exp for integral exponents.
LLVM_ABI void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
LLVM_ABI SelectPatternResult matchSelectPattern(Value *V, Value *&LHS, Value *&RHS, Instruction::CastOps *CastOp=nullptr, unsigned Depth=0)
Pattern match integer [SU]MIN, [SU]MAX and ABS idioms, returning the kind and providing the out param...
LLVM_ABI bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI Value * simplifyICmpInst(CmpPredicate Pred, Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for an ICmpInst, fold the result or return null.
LLVM_ABI Constant * ConstantFoldCastOperand(unsigned Opcode, Constant *C, Type *DestTy, const DataLayout &DL)
Attempt to constant fold a cast with the specified operand.
LLVM_ABI Constant * ConstantFoldLoadFromConst(Constant *C, Type *Ty, const APInt &Offset, const DataLayout &DL)
Extract value of C at the given Offset reinterpreted as Ty.
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
LLVM_ABI Constant * ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS, Constant *RHS, const DataLayout &DL)
Attempt to constant fold a binary operation with the specified operands.
LLVM_ABI bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
constexpr T divideCeil(U Numerator, V Denominator)
Returns the integer ceil(Numerator / Denominator).
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
LLVM_ABI Value * simplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for a BinaryOperator, fold the result or return null.
@ UMin
Unsigned integer min implemented in terms of select(cmp()).
@ Mul
Product of integers.
@ Xor
Bitwise or logical XOR of integers.
@ SMax
Signed integer max implemented in terms of select(cmp()).
@ SMin
Signed integer min implemented in terms of select(cmp()).
@ Sub
Subtraction of integers.
@ UMax
Unsigned integer max implemented in terms of select(cmp()).
LLVM_ABI bool isKnownNonEqual(const Value *V1, const Value *V2, const SimplifyQuery &SQ, unsigned Depth=0)
Return true if the given values are known to be non-equal when defined.
DWARFExpression::Operation Op
LLVM_ABI bool PointerMayBeCaptured(const Value *V, bool ReturnCaptures, unsigned MaxUsesToExplore=0)
PointerMayBeCaptured - Return true if this pointer value may be captured by the enclosing function (w...
constexpr unsigned BitWidth
LLVM_ABI Constant * getLosslessInvCast(Constant *C, Type *InvCastTo, unsigned CastOp, const DataLayout &DL, PreservedCastFlags *Flags=nullptr)
Try to cast C to InvC losslessly, satisfying CastOp(InvC) equals C, or CastOp(InvC) is a refined valu...
auto count_if(R &&Range, UnaryPredicate P)
Wrapper function around std::count_if to count the number of times an element satisfying a given pred...
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
LLVM_ABI bool isKnownNeverNaN(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Return true if the floating-point scalar value is not a NaN or if the floating-point vector value has...
LLVM_ABI std::optional< std::pair< CmpPredicate, Constant * > > getFlippedStrictnessPredicateAndConstant(CmpPredicate Pred, Constant *C)
Convert an integer comparison with a constant RHS into an equivalent form with the strictness flipped...
bool all_equal(std::initializer_list< T > Values)
Returns true if all Values in the initializer lists are equal or the list.
LLVM_ABI bool isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL, bool OrZero=false, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Return true if the given value is known to have exactly one bit set when defined.
LLVM_ABI const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=MaxLookupSearchDepth)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
LLVM_ABI bool isKnownPositive(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Returns true if the given value is known be positive (i.e.
LLVM_ABI bool isKnownNonNegative(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Returns true if the give value is known to be non-negative.
constexpr detail::IsaCheckPredicate< Types... > IsaPred
Function object wrapper for the llvm::isa type check.
LLVM_ABI std::optional< bool > isImpliedCondition(const Value *LHS, const Value *RHS, const DataLayout &DL, bool LHSIsTrue=true, unsigned Depth=0)
Return true if RHS is known to be implied true by LHS.
std::optional< DecomposedBitTest > decomposeBitTestICmp(Value *LHS, Value *RHS, CmpInst::Predicate Pred, bool LookThroughTrunc=true, bool AllowNonZeroC=false, bool DecomposeAnd=false)
Decompose an icmp into the form ((X & Mask) pred C) if possible.
LLVM_ABI ConstantRange computeConstantRange(const Value *V, bool ForSigned, const SimplifyQuery &SQ, unsigned Depth=0)
Determine the possible constant range of an integer or vector of integer value.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Value * materialize(InstCombiner::BuilderTy &Builder) const
static OffsetResult select(Value *Cond, Value *TrueV, Value *FalseV, Instruction *MDFrom)
static OffsetResult value(Value *V)
static OffsetResult invalid()
This callback is used in conjunction with PointerMayBeCaptured.
static CommonPointerBase compute(Value *LHS, Value *RHS)
Represent subnormal handling kind for floating point instruction inputs and outputs.
@ PreserveSign
The sign of a flushed-to-zero number is preserved in the sign of 0.
@ PositiveZero
Denormals are flushed to positive zero.
static constexpr DenormalMode getIEEE()
bool isNonNegative() const
Returns true if this value is known to be non-negative.
bool isZero() const
Returns true if value is all zero.
unsigned countMinTrailingZeros() const
Returns the minimum number of trailing zero bits.
unsigned countMaxTrailingZeros() const
Returns the maximum number of trailing zero bits possible.
APInt getSignedMaxValue() const
Return the maximal signed value possible given these KnownBits.
unsigned countMaxPopulation() const
Returns the maximum number of bits that could be one.
unsigned getBitWidth() const
Get the bit width of this value.
bool isConstant() const
Returns true if we know the value of all bits.
unsigned countMinLeadingZeros() const
Returns the minimum number of leading zero bits.
APInt getMaxValue() const
Return the maximal unsigned value possible given these KnownBits.
APInt getMinValue() const
Return the minimal unsigned value possible given these KnownBits.
bool isStrictlyPositive() const
Returns true if this value is known to be positive.
bool isNegative() const
Returns true if this value is known to be negative.
unsigned countMinPopulation() const
Returns the number of bits known to be one.
APInt getSignedMinValue() const
Return the minimal signed value possible given these KnownBits.
const APInt & getConstant() const
Returns the value when all bits have a known value.
Linear expression BasePtr + Index * Scale + Offset.
SelectPatternFlavor Flavor
static bool isMinOrMax(SelectPatternFlavor SPF)
When implementing this min/max pattern as fcmp; select, does the fcmp have to be ordered?
SimplifyQuery getWithInstruction(const Instruction *I) const
A MapVector that performs no allocations if smaller than a certain size.
Capture information for a specific Use.