39#define DEBUG_TYPE "instcombine" 
   47                            bool IsSigned = 
false) {
 
   50    Result = In1.
sadd_ov(In2, Overflow);
 
   52    Result = In1.
uadd_ov(In2, Overflow);
 
 
   60                            bool IsSigned = 
false) {
 
   63    Result = In1.
ssub_ov(In2, Overflow);
 
   65    Result = In1.
usub_ov(In2, Overflow);
 
 
   73  for (
auto *U : 
I.users())
 
 
   95  } 
else if (
C.isAllOnes()) {
 
 
  116  if (LI->
isVolatile() || !GV || !GV->isConstant() ||
 
  117      !GV->hasDefinitiveInitializer())
 
  121  TypeSize EltSize = 
DL.getTypeStoreSize(EltTy);
 
  137  if (!ConstOffset.
ult(Stride))
 
  151  enum { Overdefined = -3, Undefined = -2 };
 
  160  int FirstTrueElement = Undefined, SecondTrueElement = Undefined;
 
  164  int FirstFalseElement = Undefined, SecondFalseElement = Undefined;
 
  172  int TrueRangeEnd = Undefined, FalseRangeEnd = Undefined;
 
  182  for (
unsigned i = 0, e = ArrayElementCount; i != e; ++i, 
Offset += Stride) {
 
  196                                                  CompareRHS, 
DL, &
TLI);
 
  204      if (TrueRangeEnd == (
int)i - 1)
 
  206      if (FalseRangeEnd == (
int)i - 1)
 
  223      if (FirstTrueElement == Undefined)
 
  224        FirstTrueElement = TrueRangeEnd = i; 
 
  227        if (SecondTrueElement == Undefined)
 
  228          SecondTrueElement = i;
 
  230          SecondTrueElement = Overdefined;
 
  233        if (TrueRangeEnd == (
int)i - 1)
 
  236          TrueRangeEnd = Overdefined;
 
  240      if (FirstFalseElement == Undefined)
 
  241        FirstFalseElement = FalseRangeEnd = i; 
 
  244        if (SecondFalseElement == Undefined)
 
  245          SecondFalseElement = i;
 
  247          SecondFalseElement = Overdefined;
 
  250        if (FalseRangeEnd == (
int)i - 1)
 
  253          FalseRangeEnd = Overdefined;
 
  258    if (i < 64 && IsTrueForElt)
 
  259      MagicBitvector |= 1ULL << i;
 
  264    if ((i & 8) == 0 && i >= 64 && SecondTrueElement == Overdefined &&
 
  265        SecondFalseElement == Overdefined && TrueRangeEnd == Overdefined &&
 
  266        FalseRangeEnd == Overdefined)
 
  280  auto MaskIdx = [&](
Value *Idx) {
 
  284      Idx = 
Builder.CreateAnd(Idx, Mask);
 
  291  if (SecondTrueElement != Overdefined) {
 
  294    if (FirstTrueElement == Undefined)
 
  297    Value *FirstTrueIdx = ConstantInt::get(Idx->
getType(), FirstTrueElement);
 
  300    if (SecondTrueElement == Undefined)
 
  305    Value *SecondTrueIdx = ConstantInt::get(Idx->
getType(), SecondTrueElement);
 
  307    return BinaryOperator::CreateOr(C1, C2);
 
  312  if (SecondFalseElement != Overdefined) {
 
  315    if (FirstFalseElement == Undefined)
 
  318    Value *FirstFalseIdx = ConstantInt::get(Idx->
getType(), FirstFalseElement);
 
  321    if (SecondFalseElement == Undefined)
 
  326    Value *SecondFalseIdx =
 
  327        ConstantInt::get(Idx->
getType(), SecondFalseElement);
 
  329    return BinaryOperator::CreateAnd(C1, C2);
 
  334  if (TrueRangeEnd != Overdefined) {
 
  335    assert(TrueRangeEnd != FirstTrueElement && 
"Should emit single compare");
 
  339    if (FirstTrueElement) {
 
  340      Value *Offs = ConstantInt::get(Idx->
getType(), -FirstTrueElement);
 
  341      Idx = 
Builder.CreateAdd(Idx, Offs);
 
  345        ConstantInt::get(Idx->
getType(), TrueRangeEnd - FirstTrueElement + 1);
 
  350  if (FalseRangeEnd != Overdefined) {
 
  351    assert(FalseRangeEnd != FirstFalseElement && 
"Should emit single compare");
 
  354    if (FirstFalseElement) {
 
  355      Value *Offs = ConstantInt::get(Idx->
getType(), -FirstFalseElement);
 
  356      Idx = 
Builder.CreateAdd(Idx, Offs);
 
  360        ConstantInt::get(Idx->
getType(), FalseRangeEnd - FirstFalseElement);
 
  373    if (ArrayElementCount <= Idx->
getType()->getIntegerBitWidth())
 
  376      Ty = 
DL.getSmallestLegalIntType(
Init->getContext(), ArrayElementCount);
 
  381      V = 
Builder.CreateLShr(ConstantInt::get(Ty, MagicBitvector), V);
 
  382      V = 
Builder.CreateAnd(ConstantInt::get(Ty, 1), V);
 
 
  407  while (!WorkList.
empty()) {
 
  410    while (!WorkList.
empty()) {
 
  411      if (Explored.
size() >= 100)
 
  429        if (!
GEP->isInBounds() || 
count_if(
GEP->indices(), IsNonConst) > 1)
 
  437      if (WorkList.
back() == V) {
 
  453    for (
auto *PN : PHIs)
 
  454      for (
Value *
Op : PN->incoming_values())
 
  462  for (
Value *Val : Explored) {
 
  468      if (Inst == 
Base || Inst == 
PHI || !Inst || !
PHI ||
 
  472      if (
PHI->getParent() == Inst->getParent())
 
 
  482                              bool Before = 
true) {
 
  490      I = &*std::next(
I->getIterator());
 
  491    Builder.SetInsertPoint(
I);
 
  496    BasicBlock &Entry = 
A->getParent()->getEntryBlock();
 
  497    Builder.SetInsertPoint(&Entry, Entry.getFirstInsertionPt());
 
 
  519      Base->getContext(), 
DL.getIndexTypeSizeInBits(Start->getType()));
 
  525  for (
Value *Val : Explored) {
 
  533                          PHI->getName() + 
".idx", 
PHI->getIterator());
 
  538  for (
Value *Val : Explored) {
 
  547        NewInsts[
GEP] = OffsetV;
 
  549        NewInsts[
GEP] = Builder.CreateAdd(
 
  550            Op, OffsetV, 
GEP->getOperand(0)->getName() + 
".add",
 
  562  for (
Value *Val : Explored) {
 
  569      for (
unsigned I = 0, 
E = 
PHI->getNumIncomingValues(); 
I < 
E; ++
I) {
 
  570        Value *NewIncoming = 
PHI->getIncomingValue(
I);
 
  572        auto It = NewInsts.
find(NewIncoming);
 
  573        if (It != NewInsts.
end())
 
  574          NewIncoming = It->second;
 
  581  for (
Value *Val : Explored) {
 
  587    Value *NewVal = Builder.CreateGEP(Builder.getInt8Ty(), 
Base, NewInsts[Val],
 
  588                                      Val->getName() + 
".ptr", NW);
 
  595  return NewInsts[Start];
 
 
  681  if (
Base.Ptr == RHS && CanFold(
Base.LHSNW) && !
Base.isExpensive()) {
 
  685        EmitGEPOffsets(
Base.LHSGEPs, 
Base.LHSNW, IdxTy, 
true);
 
  693                            RHS->getType()->getPointerAddressSpace())) {
 
  724    if (GEPLHS->
getOperand(0) != GEPRHS->getOperand(0)) {
 
  725      bool IndicesTheSame =
 
  728              GEPRHS->getPointerOperand()->getType() &&
 
  732          if (GEPLHS->
getOperand(i) != GEPRHS->getOperand(i)) {
 
  733            IndicesTheSame = 
false;
 
  739      if (IndicesTheSame &&
 
  747      if (GEPLHS->
isInBounds() && GEPRHS->isInBounds() &&
 
  749          (GEPRHS->hasAllConstantIndices() || GEPRHS->hasOneUse()) &&
 
  753        Value *LOffset = EmitGEPOffset(GEPLHS);
 
  754        Value *ROffset = EmitGEPOffset(GEPRHS);
 
  761        if (LHSIndexTy != RHSIndexTy) {
 
  764            ROffset = 
Builder.CreateTrunc(ROffset, LHSIndexTy);
 
  766            LOffset = 
Builder.CreateTrunc(LOffset, RHSIndexTy);
 
  775    if (GEPLHS->
getOperand(0) == GEPRHS->getOperand(0) &&
 
  779      unsigned NumDifferences = 0; 
 
  780      unsigned DiffOperand = 0;    
 
  781      for (
unsigned i = 1, e = GEPRHS->getNumOperands(); i != e; ++i)
 
  782        if (GEPLHS->
getOperand(i) != GEPRHS->getOperand(i)) {
 
  784          Type *RHSType = GEPRHS->getOperand(i)->getType();
 
  795          if (NumDifferences++)
 
  800      if (NumDifferences == 0) 
 
  808        Value *RHSV = GEPRHS->getOperand(DiffOperand);
 
  809        return NewICmp(NW, LHSV, RHSV);
 
  817          EmitGEPOffsets(
Base.LHSGEPs, 
Base.LHSNW, IdxTy, 
true);
 
  819          EmitGEPOffsets(
Base.RHSGEPs, 
Base.RHSNW, IdxTy, 
true);
 
  820      return NewICmp(
Base.LHSNW & 
Base.RHSNW, L, R);
 
 
  846    bool Captured = 
false;
 
  851    CmpCaptureTracker(
AllocaInst *Alloca) : Alloca(Alloca) {}
 
  853    void tooManyUses()
 override { Captured = 
true; }
 
  865        ICmps[ICmp] |= 1u << U->getOperandNo();
 
  874  CmpCaptureTracker Tracker(Alloca);
 
  876  if (Tracker.Captured)
 
  880  for (
auto [ICmp, Operands] : Tracker.ICmps) {
 
  886      auto *Res = ConstantInt::get(ICmp->getType(),
 
 
  912  assert(!!
C && 
"C should not be zero!");
 
  928                        ConstantInt::get(
X->getType(), -
C));
 
  940                        ConstantInt::get(
X->getType(), 
SMax - 
C));
 
  951                      ConstantInt::get(
X->getType(), 
SMax - (
C - 1)));
 
 
  960  assert(
I.isEquality() && 
"Cannot fold icmp gt/lt");
 
  963    if (
I.getPredicate() == 
I.ICMP_NE)
 
  965    return new ICmpInst(Pred, LHS, RHS);
 
  984    return getICmp(
I.ICMP_UGT, 
A,
 
  985                   ConstantInt::get(
A->getType(), AP2.
logBase2()));
 
  997    if (IsAShr && AP1 == AP2.
ashr(Shift)) {
 
 1001        return getICmp(
I.ICMP_UGE, 
A, ConstantInt::get(
A->getType(), Shift));
 
 1002      return getICmp(
I.ICMP_EQ, 
A, ConstantInt::get(
A->getType(), Shift));
 
 1003    } 
else if (AP1 == AP2.
lshr(Shift)) {
 
 1004      return getICmp(
I.ICMP_EQ, 
A, ConstantInt::get(
A->getType(), Shift));
 
 1010  auto *TorF = ConstantInt::get(
I.getType(), 
I.getPredicate() == 
I.ICMP_NE);
 
 
 1019  assert(
I.isEquality() && 
"Cannot fold icmp gt/lt");
 
 1022    if (
I.getPredicate() == 
I.ICMP_NE)
 
 1024    return new ICmpInst(Pred, LHS, RHS);
 
 1033  if (!AP1 && AP2TrailingZeros != 0)
 
 1036        ConstantInt::get(
A->getType(), AP2.
getBitWidth() - AP2TrailingZeros));
 
 1044  if (Shift > 0 && AP2.
shl(Shift) == AP1)
 
 1045    return getICmp(
I.ICMP_EQ, 
A, ConstantInt::get(
A->getType(), Shift));
 
 1049  auto *TorF = ConstantInt::get(
I.getType(), 
I.getPredicate() == 
I.ICMP_NE);
 
 
 1078  if (NewWidth != 7 && NewWidth != 15 && NewWidth != 31)
 
 1102    if (U == AddWithCst)
 
 1120      I.getModule(), Intrinsic::sadd_with_overflow, NewType);
 
 1128  Value *TruncA = Builder.CreateTrunc(
A, NewType, 
A->getName() + 
".trunc");
 
 1129  Value *TruncB = Builder.CreateTrunc(
B, NewType, 
B->getName() + 
".trunc");
 
 1130  CallInst *
Call = Builder.CreateCall(
F, {TruncA, TruncB}, 
"sadd");
 
 1131  Value *
Add = Builder.CreateExtractValue(
Call, 0, 
"sadd.result");
 
 
 1149  if (!
I.isEquality())
 
 
 1180                                     APInt(XBitWidth, XBitWidth - 1))))
 
 
 1207        return new ICmpInst(Pred, 
B, Cmp.getOperand(1));
 
 1209        return new ICmpInst(Pred, 
A, Cmp.getOperand(1));
 
 1226      return new ICmpInst(Pred, 
X, Cmp.getOperand(1));
 
 1238      return new ICmpInst(Pred, 
Y, Cmp.getOperand(1));
 
 1244      return new ICmpInst(Pred, 
X, Cmp.getOperand(1));
 
 1247    if (BO0->hasNoUnsignedWrap() || BO0->hasNoSignedWrap()) {
 
 1255        return new ICmpInst(Pred, 
Y, Cmp.getOperand(1));
 
 1260        return new ICmpInst(Pred, 
X, Cmp.getOperand(1));
 
 1276      return new ICmpInst(Pred, Stripped,
 
 
 1289  const APInt *Mask, *Neg;
 
 1305  auto *NewAnd = 
Builder.CreateAnd(Num, *Mask);
 
 1308  return new ICmpInst(Pred, NewAnd, Zero);
 
 
 1329  Value *Op0 = Cmp.getOperand(0), *Op1 = Cmp.getOperand(1);
 
 1345      for (
Value *V : Phi->incoming_values()) {
 
 1353      PHINode *NewPhi = 
Builder.CreatePHI(Cmp.getType(), Phi->getNumOperands());
 
 1354      for (
auto [V, Pred] : 
zip(
Ops, Phi->blocks()))
 
 
 1369  Value *
X = Cmp.getOperand(0), *
Y = Cmp.getOperand(1);
 
 1402    if (Cmp.isEquality() || (IsSignBit && 
hasBranchUse(Cmp)))
 
 1407    if (Cmp.hasOneUse() &&
 
 1421    if (!
match(BI->getCondition(),
 
 1426    if (
DT.dominates(Edge0, Cmp.getParent())) {
 
 1427      if (
auto *V = handleDomCond(DomPred, DomC))
 
 1431      if (
DT.dominates(Edge1, Cmp.getParent()))
 
 
 1447  Type *SrcTy = 
X->getType();
 
 1449           SrcBits = SrcTy->getScalarSizeInBits();
 
 1453  if (shouldChangeType(Trunc->
getType(), SrcTy)) {
 
 1455      return new ICmpInst(Pred, 
X, ConstantInt::get(SrcTy, 
C.sext(SrcBits)));
 
 1457      return new ICmpInst(Pred, 
X, ConstantInt::get(SrcTy, 
C.zext(SrcBits)));
 
 1460  if (
C.isOne() && 
C.getBitWidth() > 1) {
 
 1465                          ConstantInt::get(V->getType(), 1));
 
 1475      auto NewPred = (Pred == Cmp.ICMP_EQ) ? Cmp.ICMP_UGE : Cmp.ICMP_ULT;
 
 1476      return new ICmpInst(NewPred, 
Y, ConstantInt::get(SrcTy, DstBits));
 
 1481      return new ICmpInst(Pred, 
Y, ConstantInt::get(SrcTy, 
C.logBase2()));
 
 1487    if (!SrcTy->isVectorTy() && shouldChangeType(DstBits, SrcBits)) {
 
 1491      Constant *WideC = ConstantInt::get(SrcTy, 
C.zext(SrcBits));
 
 1500    if ((Known.
Zero | Known.
One).countl_one() >= SrcBits - DstBits) {
 
 1502      APInt NewRHS = 
C.zext(SrcBits);
 
 1504      return new ICmpInst(Pred, 
X, ConstantInt::get(SrcTy, NewRHS));
 
 1516      DstBits == SrcBits - ShAmt) {
 
 
 1533  bool YIsSExt = 
false;
 
 1536    unsigned NoWrapFlags = 
cast<TruncInst>(Cmp.getOperand(0))->getNoWrapKind() &
 
 1538    if (Cmp.isSigned()) {
 
 1549    if (
X->getType() != 
Y->getType() &&
 
 1550        (!Cmp.getOperand(0)->hasOneUse() || !Cmp.getOperand(1)->hasOneUse()))
 
 1552    if (!isDesirableIntType(
X->getType()->getScalarSizeInBits()) &&
 
 1553        isDesirableIntType(
Y->getType()->getScalarSizeInBits())) {
 
 1555      Pred = Cmp.getSwappedPredicate(Pred);
 
 1560  else if (!Cmp.isSigned() &&
 
 1574  Type *TruncTy = Cmp.getOperand(0)->getType();
 
 1579  if (isDesirableIntType(TruncBits) &&
 
 1580      !isDesirableIntType(
X->getType()->getScalarSizeInBits()))
 
 
 1603  bool TrueIfSigned = 
false;
 
 1620  if (
Xor->hasOneUse()) {
 
 1622    if (!Cmp.isEquality() && XorC->
isSignMask()) {
 
 1623      Pred = Cmp.getFlippedSignednessPredicate();
 
 1624      return new ICmpInst(Pred, 
X, ConstantInt::get(
X->getType(), 
C ^ *XorC));
 
 1629      Pred = Cmp.getFlippedSignednessPredicate();
 
 1630      Pred = Cmp.getSwappedPredicate(Pred);
 
 1631      return new ICmpInst(Pred, 
X, ConstantInt::get(
X->getType(), 
C ^ *XorC));
 
 1638    if (*XorC == ~
C && (
C + 1).isPowerOf2())
 
 1641    if (*XorC == 
C && (
C + 1).isPowerOf2())
 
 1646    if (*XorC == -
C && 
C.isPowerOf2())
 
 1648                          ConstantInt::get(
X->getType(), ~
C));
 
 1650    if (*XorC == 
C && (-
C).isPowerOf2())
 
 1652                          ConstantInt::get(
X->getType(), ~
C));
 
 
 1674  const APInt *ShiftC;
 
 1679  Type *XType = 
X->getType();
 
 1685  return new ICmpInst(Pred, 
Add, ConstantInt::get(XType, Bound));
 
 
 1694  if (!Shift || !Shift->
isShift())
 
 1702  unsigned ShiftOpcode = Shift->
getOpcode();
 
 1703  bool IsShl = ShiftOpcode == Instruction::Shl;
 
 1706    APInt NewAndCst, NewCmpCst;
 
 1707    bool AnyCmpCstBitsShiftedOut;
 
 1708    if (ShiftOpcode == Instruction::Shl) {
 
 1716      NewCmpCst = C1.
lshr(*C3);
 
 1717      NewAndCst = C2.
lshr(*C3);
 
 1718      AnyCmpCstBitsShiftedOut = NewCmpCst.
shl(*C3) != C1;
 
 1719    } 
else if (ShiftOpcode == Instruction::LShr) {
 
 1724      NewCmpCst = C1.
shl(*C3);
 
 1725      NewAndCst = C2.
shl(*C3);
 
 1726      AnyCmpCstBitsShiftedOut = NewCmpCst.
lshr(*C3) != C1;
 
 1732      assert(ShiftOpcode == Instruction::AShr && 
"Unknown shift opcode");
 
 1733      NewCmpCst = C1.
shl(*C3);
 
 1734      NewAndCst = C2.
shl(*C3);
 
 1735      AnyCmpCstBitsShiftedOut = NewCmpCst.
ashr(*C3) != C1;
 
 1736      if (NewAndCst.
ashr(*C3) != C2)
 
 1740    if (AnyCmpCstBitsShiftedOut) {
 
 1750          Shift->
getOperand(0), ConstantInt::get(
And->getType(), NewAndCst));
 
 1751      return new ICmpInst(Cmp.getPredicate(), NewAnd,
 
 1752                          ConstantInt::get(
And->getType(), NewCmpCst));
 
 1769    return new ICmpInst(Cmp.getPredicate(), NewAnd, Cmp.getOperand(1));
 
 
 1784  if (isICMP_NE && Cmp.getType()->isVectorTy() && C1.
isZero() &&
 
 1786    return new TruncInst(
And->getOperand(0), Cmp.getType());
 
 1797                        ConstantInt::get(
X->getType(), ~*C2));
 
 1802                        ConstantInt::get(
X->getType(), -*C2));
 
 1805  if (!
And->hasOneUse())
 
 1808  if (Cmp.isEquality() && C1.
isZero()) {
 
 1826      Constant *NegBOC = ConstantInt::get(
And->getType(), -NewC2);
 
 1828      return new ICmpInst(NewPred, 
X, NegBOC);
 
 1846    if (!Cmp.getType()->isVectorTy()) {
 
 1847      Type *WideType = W->getType();
 
 1849      Constant *ZextC1 = ConstantInt::get(WideType, C1.
zext(WideScalarBits));
 
 1850      Constant *ZextC2 = ConstantInt::get(WideType, C2->
zext(WideScalarBits));
 
 1852      return new ICmpInst(Cmp.getPredicate(), NewAnd, ZextC1);
 
 1863  if (!Cmp.isSigned() && C1.
isZero() && 
And->getOperand(0)->hasOneUse() &&
 
 1870      unsigned UsesRemoved = 0;
 
 1871      if (
And->hasOneUse())
 
 1873      if (
Or->hasOneUse())
 
 1880      if (UsesRemoved >= RequireUsesRemoved) {
 
 1884                             One, 
Or->getName());
 
 1886        return new ICmpInst(Cmp.getPredicate(), NewAnd, Cmp.getOperand(1));
 
 1900  if (!Cmp.getParent()->getParent()->hasFnAttribute(
 
 1901          Attribute::NoImplicitFloat) &&
 
 1904    Type *FPType = V->getType()->getScalarType();
 
 1905    if (FPType->isIEEELikeFPTy() && (C1.
isZero() || C1 == *C2)) {
 
 1906      APInt ExponentMask =
 
 1908      if (*C2 == ExponentMask) {
 
 1909        unsigned Mask = C1.
isZero()
 
 
 1943      Constant *MinSignedC = ConstantInt::get(
 
 1947      return new ICmpInst(NewPred, 
X, MinSignedC);
 
 1962  if (!Cmp.isEquality())
 
 1968  if (Cmp.getOperand(1) == 
Y && 
C.isNegatedPowerOf2()) {
 
 1979      X->getType()->isIntOrIntVectorTy(1) && (
C.isZero() || 
C.isOne())) {
 
 1985    return BinaryOperator::CreateAnd(TruncY, 
X);
 
 2003    const APInt *Addend, *Msk;
 
 2007      APInt NewComperand = (
C - *Addend) & *Msk;
 
 2008      Value *MaskA = 
Builder.CreateAnd(
A, ConstantInt::get(
A->getType(), *Msk));
 
 2010                          ConstantInt::get(MaskA->
getType(), NewComperand));
 
 
 2032  while (!WorkList.
empty()) {
 
 2033    auto MatchOrOperatorArgument = [&](
Value *OrOperatorArgument) {
 
 2036      if (
match(OrOperatorArgument,
 
 2042      if (
match(OrOperatorArgument,
 
 2052    Value *OrOperatorLhs, *OrOperatorRhs;
 
 2054    if (!
match(CurrentValue,
 
 2059    MatchOrOperatorArgument(OrOperatorRhs);
 
 2060    MatchOrOperatorArgument(OrOperatorLhs);
 
 2065  Value *LhsCmp = Builder.CreateICmp(Pred, CmpValues.
rbegin()->first,
 
 2066                                     CmpValues.
rbegin()->second);
 
 2068  for (
auto It = CmpValues.
rbegin() + 1; It != CmpValues.
rend(); ++It) {
 
 2069    Value *RhsCmp = Builder.CreateICmp(Pred, It->first, It->second);
 
 2070    LhsCmp = Builder.CreateBinOp(BOpc, LhsCmp, RhsCmp);
 
 
 2086                          ConstantInt::get(V->getType(), 1));
 
 2089  Value *OrOp0 = 
Or->getOperand(0), *OrOp1 = 
Or->getOperand(1);
 
 2096        Builder.CreateXor(OrOp1, ConstantInt::get(OrOp1->getType(), 
C));
 
 2097    return new ICmpInst(Pred, OrOp0, NewC);
 
 2101  if (
match(OrOp1, 
m_APInt(MaskC)) && Cmp.isEquality()) {
 
 2102    if (*MaskC == 
C && (
C + 1).isPowerOf2()) {
 
 2107      return new ICmpInst(Pred, OrOp0, OrOp1);
 
 2114    if (
Or->hasOneUse()) {
 
 2116      Constant *NewC = ConstantInt::get(
Or->getType(), 
C ^ (*MaskC));
 
 2128    Constant *NewC = ConstantInt::get(
X->getType(), TrueIfSigned ? 1 : 0);
 
 2156  if (!Cmp.isEquality() || !
C.isZero() || !
Or->hasOneUse())
 
 
 2188  if (Cmp.isEquality() && 
C.isZero() && 
X == 
Mul->getOperand(1) &&
 
 2189      (
Mul->hasNoUnsignedWrap() || 
Mul->hasNoSignedWrap()))
 
 2211  if (Cmp.isEquality()) {
 
 2213    if (
Mul->hasNoSignedWrap() && 
C.srem(*MulC).isZero()) {
 
 2214      Constant *NewC = ConstantInt::get(MulTy, 
C.sdiv(*MulC));
 
 2222    if (
C.urem(*MulC).isZero()) {
 
 2225      if ((*MulC & 1).isOne() || 
Mul->hasNoUnsignedWrap()) {
 
 2226        Constant *NewC = ConstantInt::get(MulTy, 
C.udiv(*MulC));
 
 2239    if (
C.isMinSignedValue() && MulC->
isAllOnes())
 
 2245      NewC = ConstantInt::get(
 
 2249             "Unexpected predicate");
 
 2250      NewC = ConstantInt::get(
 
 2255      NewC = ConstantInt::get(
 
 2259             "Unexpected predicate");
 
 2260      NewC = ConstantInt::get(
 
 2265  return NewC ? 
new ICmpInst(Pred, 
X, NewC) : 
nullptr;
 
 
 2277  unsigned TypeBits = 
C.getBitWidth();
 
 2279  if (Cmp.isUnsigned()) {
 
 2299    return new ICmpInst(Pred, 
Y, ConstantInt::get(ShiftType, CLog2));
 
 2300  } 
else if (Cmp.isSigned() && C2->
isOne()) {
 
 2301    Constant *BitWidthMinusOne = ConstantInt::get(ShiftType, TypeBits - 1);
 
 
 2322  const APInt *ShiftVal;
 
 2352  const APInt *ShiftAmt;
 
 2358  unsigned TypeBits = 
C.getBitWidth();
 
 2359  if (ShiftAmt->
uge(TypeBits))
 
 2371      APInt ShiftedC = 
C.ashr(*ShiftAmt);
 
 2372      return new ICmpInst(Pred, 
X, ConstantInt::get(ShType, ShiftedC));
 
 2375        C.ashr(*ShiftAmt).shl(*ShiftAmt) == 
C) {
 
 2376      APInt ShiftedC = 
C.ashr(*ShiftAmt);
 
 2377      return new ICmpInst(Pred, 
X, ConstantInt::get(ShType, ShiftedC));
 
 2384      assert(!
C.isMinSignedValue() && 
"Unexpected icmp slt");
 
 2385      APInt ShiftedC = (
C - 1).ashr(*ShiftAmt) + 1;
 
 2386      return new ICmpInst(Pred, 
X, ConstantInt::get(ShType, ShiftedC));
 
 2396      APInt ShiftedC = 
C.lshr(*ShiftAmt);
 
 2397      return new ICmpInst(Pred, 
X, ConstantInt::get(ShType, ShiftedC));
 
 2400        C.lshr(*ShiftAmt).shl(*ShiftAmt) == 
C) {
 
 2401      APInt ShiftedC = 
C.lshr(*ShiftAmt);
 
 2402      return new ICmpInst(Pred, 
X, ConstantInt::get(ShType, ShiftedC));
 
 2409      assert(
C.ugt(0) && 
"ult 0 should have been eliminated");
 
 2410      APInt ShiftedC = (
C - 1).lshr(*ShiftAmt) + 1;
 
 2411      return new ICmpInst(Pred, 
X, ConstantInt::get(ShType, ShiftedC));
 
 2415  if (Cmp.isEquality() && Shl->
hasOneUse()) {
 
 2421    Constant *LShrC = ConstantInt::get(ShType, 
C.lshr(*ShiftAmt));
 
 2426  bool TrueIfSigned = 
false;
 
 2438  if (Cmp.isUnsigned() && Shl->
hasOneUse()) {
 
 2440    if ((
C + 1).isPowerOf2() &&
 
 2448    if (
C.isPowerOf2() &&
 
 2478              Pred, ConstantInt::get(ShType->
getContext(), 
C))) {
 
 2479        CmpPred = FlippedStrictness->first;
 
 2487          ConstantInt::get(TruncTy, RHSC.
ashr(*ShiftAmt).
trunc(TypeBits - Amt));
 
 2489                          Builder.CreateTrunc(
X, TruncTy, 
"", 
false,
 
 
 2506  if (Cmp.isEquality() && Shr->
isExact() && 
C.isZero())
 
 2507    return new ICmpInst(Pred, 
X, Cmp.getOperand(1));
 
 2509  bool IsAShr = Shr->
getOpcode() == Instruction::AShr;
 
 2510  const APInt *ShiftValC;
 
 2512    if (Cmp.isEquality())
 
 2530      assert(ShiftValC->
uge(
C) && 
"Expected simplify of compare");
 
 2531      assert((IsUGT || !
C.isZero()) && 
"Expected X u< 0 to simplify");
 
 2533      unsigned CmpLZ = IsUGT ? 
C.countl_zero() : (
C - 1).
countl_zero();
 
 2541  const APInt *ShiftAmtC;
 
 2547  unsigned TypeBits = 
C.getBitWidth();
 
 2549  if (ShAmtVal >= TypeBits || ShAmtVal == 0)
 
 2552  bool IsExact = Shr->
isExact();
 
 2560        (
C - 1).isPowerOf2() && 
C.countLeadingZeros() > ShAmtVal) {
 
 2566      APInt ShiftedC = (
C - 1).shl(ShAmtVal) + 1;
 
 2567      return new ICmpInst(Pred, 
X, ConstantInt::get(ShrTy, ShiftedC));
 
 2573      APInt ShiftedC = 
C.shl(ShAmtVal);
 
 2574      if (ShiftedC.
ashr(ShAmtVal) == 
C)
 
 2575        return new ICmpInst(Pred, 
X, ConstantInt::get(ShrTy, ShiftedC));
 
 2579      APInt ShiftedC = (
C + 1).shl(ShAmtVal) - 1;
 
 2580      if (!
C.isMaxSignedValue() && !(
C + 1).shl(ShAmtVal).isMinSignedValue() &&
 
 2581          (ShiftedC + 1).ashr(ShAmtVal) == (
C + 1))
 
 2582        return new ICmpInst(Pred, 
X, ConstantInt::get(ShrTy, ShiftedC));
 
 2588      APInt ShiftedC = (
C + 1).shl(ShAmtVal) - 1;
 
 2589      if ((ShiftedC + 1).ashr(ShAmtVal) == (
C + 1) ||
 
 2590          (
C + 1).shl(ShAmtVal).isMinSignedValue())
 
 2591        return new ICmpInst(Pred, 
X, ConstantInt::get(ShrTy, ShiftedC));
 
 2598    if (
C.getBitWidth() > 2 && 
C.getNumSignBits() <= ShAmtVal) {
 
 2608  } 
else if (!IsAShr) {
 
 2612      APInt ShiftedC = 
C.shl(ShAmtVal);
 
 2613      if (ShiftedC.
lshr(ShAmtVal) == 
C)
 
 2614        return new ICmpInst(Pred, 
X, ConstantInt::get(ShrTy, ShiftedC));
 
 2618      APInt ShiftedC = (
C + 1).shl(ShAmtVal) - 1;
 
 2619      if ((ShiftedC + 1).lshr(ShAmtVal) == (
C + 1))
 
 2620        return new ICmpInst(Pred, 
X, ConstantInt::get(ShrTy, ShiftedC));
 
 2624  if (!Cmp.isEquality())
 
 2632  assert(((IsAShr && 
C.shl(ShAmtVal).ashr(ShAmtVal) == 
C) ||
 
 2633          (!IsAShr && 
C.shl(ShAmtVal).lshr(ShAmtVal) == 
C)) &&
 
 2634         "Expected icmp+shr simplify did not occur.");
 
 2639    return new ICmpInst(Pred, 
X, ConstantInt::get(ShrTy, 
C << ShAmtVal));
 
 2645                          ConstantInt::get(ShrTy, (
C + 1).shl(ShAmtVal)));
 
 2648                          ConstantInt::get(ShrTy, (
C + 1).shl(ShAmtVal) - 1));
 
 2655    Constant *Mask = ConstantInt::get(ShrTy, Val);
 
 2657    return new ICmpInst(Pred, 
And, ConstantInt::get(ShrTy, 
C << ShAmtVal));
 
 
 2674    const APInt *DivisorC;
 
 2681             "ult X, 0 should have been simplified already.");
 
 2687           "srem X, 0 should have been simplified already.");
 
 2688    if (!NormalizedC.
uge(DivisorC->
abs() - 1))
 
 2711  const APInt *DivisorC;
 
 2720       !
C.isStrictlyPositive()))
 
 2726  Constant *MaskC = ConstantInt::get(Ty, SignMask | (*DivisorC - 1));
 
 2730    return new ICmpInst(Pred, 
And, ConstantInt::get(Ty, 
C));
 
 
 2757  assert(*C2 != 0 && 
"udiv 0, X should have been simplified already.");
 
 2762           "icmp ugt X, UINT_MAX should have been simplified already.");
 
 2764                        ConstantInt::get(Ty, C2->
udiv(
C + 1)));
 
 2769    assert(
C != 0 && 
"icmp ult X, 0 should have been simplified already.");
 
 2771                        ConstantInt::get(Ty, C2->
udiv(
C)));
 
 
 2785  bool DivIsSigned = Div->
getOpcode() == Instruction::SDiv;
 
 2795  if (Cmp.isEquality() && Div->
hasOneUse() && 
C.isSignBitSet() &&
 
 2796      (!DivIsSigned || 
C.isMinSignedValue())) {
 
 2797    Value *XBig = 
Builder.CreateICmp(Pred, 
X, ConstantInt::get(Ty, 
C));
 
 2798    Value *YOne = 
Builder.CreateICmp(Pred, 
Y, ConstantInt::get(Ty, 1));
 
 2821  if (!Cmp.isEquality() && DivIsSigned != Cmp.isSigned())
 
 2840  bool ProdOV = (DivIsSigned ? Prod.
sdiv(*C2) : Prod.
udiv(*C2)) != 
C;
 
 2853  int LoOverflow = 0, HiOverflow = 0;
 
 2854  APInt LoBound, HiBound;
 
 2859    HiOverflow = LoOverflow = ProdOV;
 
 2868      LoBound = -(RangeSize - 1);
 
 2869      HiBound = RangeSize;
 
 2870    } 
else if (
C.isStrictlyPositive()) { 
 
 2872      HiOverflow = LoOverflow = ProdOV;
 
 2878      LoOverflow = HiOverflow = ProdOV ? -1 : 0;
 
 2880        APInt DivNeg = -RangeSize;
 
 2881        LoOverflow = 
addWithOverflow(LoBound, HiBound, DivNeg, 
true) ? -1 : 0;
 
 2889      LoBound = RangeSize + 1;
 
 2890      HiBound = -RangeSize;
 
 2891      if (HiBound == *C2) { 
 
 2895    } 
else if (
C.isStrictlyPositive()) { 
 
 2898      HiOverflow = LoOverflow = ProdOV ? -1 : 0;
 
 2904      LoOverflow = HiOverflow = ProdOV;
 
 2917    if (LoOverflow && HiOverflow)
 
 2921                          X, ConstantInt::get(Ty, LoBound));
 
 2924                          X, ConstantInt::get(Ty, HiBound));
 
 2928    if (LoOverflow && HiOverflow)
 
 2932                          X, ConstantInt::get(Ty, LoBound));
 
 2935                          X, ConstantInt::get(Ty, HiBound));
 
 2940    if (LoOverflow == +1) 
 
 2942    if (LoOverflow == -1) 
 
 2944    return new ICmpInst(Pred, 
X, ConstantInt::get(Ty, LoBound));
 
 2947    if (HiOverflow == +1) 
 
 2949    if (HiOverflow == -1) 
 
 
 2979  bool HasNSW = 
Sub->hasNoSignedWrap();
 
 2980  bool HasNUW = 
Sub->hasNoUnsignedWrap();
 
 2982      ((Cmp.isUnsigned() && HasNUW) || (Cmp.isSigned() && HasNSW)) &&
 
 2984    return new ICmpInst(SwappedPred, 
Y, ConstantInt::get(Ty, SubResult));
 
 2992  if (Cmp.isEquality() && 
C.isZero() &&
 
 2993      none_of((
Sub->users()), [](
const User *U) { return isa<PHINode>(U); }))
 
 3001  if (!
Sub->hasOneUse())
 
 3004  if (
Sub->hasNoSignedWrap()) {
 
 3028      (*C2 & (
C - 1)) == (
C - 1))
 
 3041  return new ICmpInst(SwappedPred, 
Add, ConstantInt::get(Ty, ~
C));
 
 
 3047  auto FoldConstant = [&](
bool Val) {
 
 3048    Constant *Res = Val ? Builder.getTrue() : Builder.getFalse();
 
 3055  switch (Table.to_ulong()) {
 
 3057    return FoldConstant(
false);
 
 3059    return HasOneUse ? Builder.CreateNot(Builder.CreateOr(Op0, Op1)) : 
nullptr;
 
 3061    return HasOneUse ? Builder.CreateAnd(Builder.CreateNot(Op0), Op1) : 
nullptr;
 
 3063    return Builder.CreateNot(Op0);
 
 3065    return HasOneUse ? Builder.CreateAnd(Op0, Builder.CreateNot(Op1)) : 
nullptr;
 
 3067    return Builder.CreateNot(Op1);
 
 3069    return Builder.CreateXor(Op0, Op1);
 
 3071    return HasOneUse ? Builder.CreateNot(Builder.CreateAnd(Op0, Op1)) : 
nullptr;
 
 3073    return Builder.CreateAnd(Op0, Op1);
 
 3075    return HasOneUse ? Builder.CreateNot(Builder.CreateXor(Op0, Op1)) : 
nullptr;
 
 3079    return HasOneUse ? Builder.CreateOr(Builder.CreateNot(Op0), Op1) : 
nullptr;
 
 3083    return HasOneUse ? Builder.CreateOr(Op0, Builder.CreateNot(Op1)) : 
nullptr;
 
 3085    return Builder.CreateOr(Op0, Op1);
 
 3087    return FoldConstant(
true);
 
 
 3102      Cmp.getType() != 
A->getType())
 
 3105  std::bitset<4> Table;
 
 3106  auto ComputeTable = [&](
bool First, 
bool Second) -> std::optional<bool> {
 
 3110      auto *Val = Res->getType()->isVectorTy() ? Res->getSplatValue() : Res;
 
 3114    return std::nullopt;
 
 3117  for (
unsigned I = 0; 
I < 4; ++
I) {
 
 3118    bool First = (
I >> 1) & 1;
 
 3119    bool Second = 
I & 1;
 
 3120    if (
auto Res = ComputeTable(
First, Second))
 
 
 3148    unsigned BW = 
C.getBitWidth();
 
 3149    std::bitset<4> Table;
 
 3150    auto ComputeTable = [&](
bool Op0Val, 
bool Op1Val) {
 
 3159    Table[0] = ComputeTable(
false, 
false);
 
 3160    Table[1] = ComputeTable(
false, 
true);
 
 3161    Table[2] = ComputeTable(
true, 
false);
 
 3162    Table[3] = ComputeTable(
true, 
true);
 
 3177  if ((
Add->hasNoSignedWrap() &&
 
 3179      (
Add->hasNoUnsignedWrap() &&
 
 3183        Cmp.isSigned() ? 
C.ssub_ov(*C2, Overflow) : 
C.usub_ov(*C2, Overflow);
 
 3189      return new ICmpInst(Pred, 
X, ConstantInt::get(Ty, NewC));
 
 3193      C.isNonNegative() && (
C - *C2).isNonNegative() &&
 
 3196                        ConstantInt::get(Ty, 
C - *C2));
 
 3201  if (Cmp.isSigned()) {
 
 3202    if (
Lower.isSignMask())
 
 3204    if (
Upper.isSignMask())
 
 3207    if (
Lower.isMinValue())
 
 3209    if (
Upper.isMinValue())
 
 3242  if (!
Add->hasOneUse())
 
 3257                        ConstantInt::get(Ty, 
C * 2));
 
 3271                        Builder.CreateAdd(
X, ConstantInt::get(Ty, *C2 - 
C - 1)),
 
 3272                        ConstantInt::get(Ty, ~
C));
 
 3277    Type *NewCmpTy = V->getType();
 
 3279    if (shouldChangeType(Ty, NewCmpTy)) {
 
 3290              : 
Builder.CreateAdd(V, ConstantInt::get(NewCmpTy, EquivOffset)),
 
 3291          ConstantInt::get(NewCmpTy, EquivInt));
 
 
 3313  Value *EqualVal = 
SI->getTrueValue();
 
 3314  Value *UnequalVal = 
SI->getFalseValue();
 
 3337    auto FlippedStrictness =
 
 3339    if (!FlippedStrictness)
 
 3342           "basic correctness failure");
 
 3343    RHS2 = FlippedStrictness->second;
 
 
 3355  assert(
C && 
"Cmp RHS should be a constant int!");
 
 3361  Value *OrigLHS, *OrigRHS;
 
 3362  ConstantInt *C1LessThan, *C2Equal, *C3GreaterThan;
 
 3363  if (Cmp.hasOneUse() &&
 
 3366    assert(C1LessThan && C2Equal && C3GreaterThan);
 
 3369        C1LessThan->
getValue(), 
C->getValue(), Cmp.getPredicate());
 
 3371                                           Cmp.getPredicate());
 
 3373        C3GreaterThan->
getValue(), 
C->getValue(), Cmp.getPredicate());
 
 3384    if (TrueWhenLessThan)
 
 3390    if (TrueWhenGreaterThan)
 
 
 3405  Value *Op1 = Cmp.getOperand(1);
 
 3406  Value *BCSrcOp = Bitcast->getOperand(0);
 
 3407  Type *SrcType = Bitcast->getSrcTy();
 
 3408  Type *DstType = Bitcast->getType();
 
 3412  if (SrcType->isVectorTy() == DstType->isVectorTy() &&
 
 3413      SrcType->getScalarSizeInBits() == DstType->getScalarSizeInBits()) {
 
 3428        return new ICmpInst(Pred, 
X, ConstantInt::get(
X->getType(), 1));
 
 3455        Type *XType = 
X->getType();
 
 3458        if (!(XType->
isPPC_FP128Ty() || SrcType->isPPC_FP128Ty())) {
 
 3473      Type *FPType = SrcType->getScalarType();
 
 3474      if (!Cmp.getParent()->getParent()->hasFnAttribute(
 
 3475              Attribute::NoImplicitFloat) &&
 
 3476          Cmp.isEquality() && FPType->isIEEELikeFPTy()) {
 
 3482                                     Builder.createIsFPClass(BCSrcOp, Mask));
 
 3489  if (!
match(Cmp.getOperand(1), 
m_APInt(
C)) || !DstType->isIntegerTy() ||
 
 3490      !SrcType->isIntOrIntVectorTy())
 
 3500  if (Cmp.isEquality() && 
C->isAllOnes() && Bitcast->hasOneUse()) {
 
 3501    if (
Value *NotBCSrcOp =
 
 3503      Value *Cast = 
Builder.CreateBitCast(NotBCSrcOp, DstType);
 
 3512  if (Cmp.isEquality() && 
C->isZero() && Bitcast->hasOneUse() &&
 
 3515      Type *NewType = 
Builder.getIntNTy(VecTy->getPrimitiveSizeInBits());
 
 3535      if (
C->isSplat(EltTy->getBitWidth())) {
 
 3542        Value *Extract = 
Builder.CreateExtractElement(Vec, Elem);
 
 3543        Value *NewC = ConstantInt::get(EltTy, 
C->trunc(EltTy->getBitWidth()));
 
 3544        return new ICmpInst(Pred, Extract, NewC);
 
 
 3580    Value *Cmp0 = Cmp.getOperand(0);
 
 3582    if (
C->isZero() && Cmp.isEquality() && Cmp0->
hasOneUse() &&
 
 3589      return new ICmpInst(Cmp.getPredicate(), 
X, 
Y);
 
 
 3604  if (!Cmp.isEquality())
 
 3613  case Instruction::SRem:
 
 3624  case Instruction::Add: {
 
 3631    } 
else if (
C.isZero()) {
 
 3634      if (
Value *NegVal = dyn_castNegVal(BOp1))
 
 3635        return new ICmpInst(Pred, BOp0, NegVal);
 
 3636      if (
Value *NegVal = dyn_castNegVal(BOp0))
 
 3637        return new ICmpInst(Pred, NegVal, BOp1);
 
 3646        return new ICmpInst(Pred, BOp0, Neg);
 
 3651  case Instruction::Xor:
 
 3656    } 
else if (
C.isZero()) {
 
 3658      return new ICmpInst(Pred, BOp0, BOp1);
 
 3661  case Instruction::Or: {
 
 3682        Cond->getType() == Cmp.getType()) {
 
 3720  case Instruction::UDiv:
 
 3721  case Instruction::SDiv:
 
 3731        return new ICmpInst(Pred, BOp0, BOp1);
 
 3734            Instruction::Mul, BO->
getOpcode() == Instruction::SDiv, BOp1,
 
 3735            Cmp.getOperand(1), BO);
 
 3739          return new ICmpInst(Pred, YC, BOp0);
 
 3743    if (BO->
getOpcode() == Instruction::UDiv && 
C.isZero()) {
 
 3746      return new ICmpInst(NewPred, BOp1, BOp0);
 
 
 3760         "Non-ctpop intrin in ctpop fold");
 
 
 3795  Type *Ty = 
II->getType();
 
 3799  switch (
II->getIntrinsicID()) {
 
 3800  case Intrinsic::abs:
 
 3803    if (
C.isZero() || 
C.isMinSignedValue())
 
 3804      return new ICmpInst(Pred, 
II->getArgOperand(0), ConstantInt::get(Ty, 
C));
 
 3807  case Intrinsic::bswap:
 
 3809    return new ICmpInst(Pred, 
II->getArgOperand(0),
 
 3810                        ConstantInt::get(Ty, 
C.byteSwap()));
 
 3812  case Intrinsic::bitreverse:
 
 3814    return new ICmpInst(Pred, 
II->getArgOperand(0),
 
 3815                        ConstantInt::get(Ty, 
C.reverseBits()));
 
 3817  case Intrinsic::ctlz:
 
 3818  case Intrinsic::cttz: {
 
 3821      return new ICmpInst(Pred, 
II->getArgOperand(0),
 
 3827    unsigned Num = 
C.getLimitedValue(
BitWidth);
 
 3829      bool IsTrailing = 
II->getIntrinsicID() == Intrinsic::cttz;
 
 3832      APInt Mask2 = IsTrailing
 
 3836                          ConstantInt::get(Ty, Mask2));
 
 3841  case Intrinsic::ctpop: {
 
 3844    bool IsZero = 
C.isZero();
 
 3846      return new ICmpInst(Pred, 
II->getArgOperand(0),
 
 3853  case Intrinsic::fshl:
 
 3854  case Intrinsic::fshr:
 
 3855    if (
II->getArgOperand(0) == 
II->getArgOperand(1)) {
 
 3856      const APInt *RotAmtC;
 
 3860        return new ICmpInst(Pred, 
II->getArgOperand(0),
 
 3861                            II->getIntrinsicID() == Intrinsic::fshl
 
 3862                                ? ConstantInt::get(Ty, 
C.rotr(*RotAmtC))
 
 3863                                : ConstantInt::get(Ty, 
C.rotl(*RotAmtC)));
 
 3867  case Intrinsic::umax:
 
 3868  case Intrinsic::uadd_sat: {
 
 3871    if (
C.isZero() && 
II->hasOneUse()) {
 
 3878  case Intrinsic::ssub_sat:
 
 3881      return new ICmpInst(Pred, 
II->getArgOperand(0), 
II->getArgOperand(1));
 
 3883  case Intrinsic::usub_sat: {
 
 3888      return new ICmpInst(NewPred, 
II->getArgOperand(0), 
II->getArgOperand(1));
 
 
 3903  assert(Cmp.isEquality());
 
 3906  Value *Op0 = Cmp.getOperand(0);
 
 3907  Value *Op1 = Cmp.getOperand(1);
 
 3910  if (!IIOp0 || !IIOp1 || IIOp0->getIntrinsicID() != IIOp1->getIntrinsicID())
 
 3913  switch (IIOp0->getIntrinsicID()) {
 
 3914  case Intrinsic::bswap:
 
 3915  case Intrinsic::bitreverse:
 
 3918    return new ICmpInst(Pred, IIOp0->getOperand(0), IIOp1->getOperand(0));
 
 3919  case Intrinsic::fshl:
 
 3920  case Intrinsic::fshr: {
 
 3923    if (IIOp0->getOperand(0) != IIOp0->getOperand(1))
 
 3925    if (IIOp1->getOperand(0) != IIOp1->getOperand(1))
 
 3927    if (IIOp0->getOperand(2) == IIOp1->getOperand(2))
 
 3928      return new ICmpInst(Pred, IIOp0->getOperand(0), IIOp1->getOperand(0));
 
 3934    unsigned OneUses = IIOp0->hasOneUse() + IIOp1->hasOneUse();
 
 3939          Builder.CreateSub(IIOp0->getOperand(2), IIOp1->getOperand(2));
 
 3940      Value *CombinedRotate = Builder.CreateIntrinsic(
 
 3941          Op0->
getType(), IIOp0->getIntrinsicID(),
 
 3942          {IIOp0->getOperand(0), IIOp0->getOperand(0), SubAmt});
 
 3943      return new ICmpInst(Pred, IIOp1->getOperand(0), CombinedRotate);
 
 
 3961    switch (
II->getIntrinsicID()) {
 
 3964    case Intrinsic::fshl:
 
 3965    case Intrinsic::fshr:
 
 3966      if (Cmp.isEquality() && 
II->getArgOperand(0) == 
II->getArgOperand(1)) {
 
 3968        if (
C.isZero() || 
C.isAllOnes())
 
 3969          return new ICmpInst(Pred, 
II->getArgOperand(0), Cmp.getOperand(1));
 
 
 3983  case Instruction::Xor:
 
 3987  case Instruction::And:
 
 3991  case Instruction::Or:
 
 3995  case Instruction::Mul:
 
 3999  case Instruction::Shl:
 
 4003  case Instruction::LShr:
 
 4004  case Instruction::AShr:
 
 4008  case Instruction::SRem:
 
 4012  case Instruction::UDiv:
 
 4016  case Instruction::SDiv:
 
 4020  case Instruction::Sub:
 
 4024  case Instruction::Add:
 
 
 4048  if (!
II->hasOneUse())
 
 4064  Value *Op0 = 
II->getOperand(0);
 
 4065  Value *Op1 = 
II->getOperand(1);
 
 4074  switch (
II->getIntrinsicID()) {
 
 4077        "This function only works with usub_sat and uadd_sat for now!");
 
 4078  case Intrinsic::uadd_sat:
 
 4081  case Intrinsic::usub_sat:
 
 4091      II->getBinaryOp(), *COp1, 
II->getNoWrapKind());
 
 4098  if (
II->getBinaryOp() == Instruction::Add)
 
 4104      SatValCheck ? Instruction::BinaryOps::Or : Instruction::BinaryOps::And;
 
 4106  std::optional<ConstantRange> Combination;
 
 4107  if (CombiningOp == Instruction::BinaryOps::Or)
 
 4119  Combination->getEquivalentICmp(EquivPred, EquivInt, EquivOffset);
 
 4123      Builder.CreateAdd(Op0, ConstantInt::get(Op1->
getType(), EquivOffset)),
 
 4124      ConstantInt::get(Op1->
getType(), EquivInt));
 
 
 4131  std::optional<ICmpInst::Predicate> NewPredicate = std::nullopt;
 
 4136      NewPredicate = Pred;
 
 4140    else if (
C.isAllOnes())
 
 4148    else if (
C.isZero())
 
 4165    if (!
C.isZero() && !
C.isAllOnes())
 
 4176  if (
I->getIntrinsicID() == Intrinsic::scmp)
 
 
 4190  switch (
II->getIntrinsicID()) {
 
 4193  case Intrinsic::uadd_sat:
 
 4194  case Intrinsic::usub_sat:
 
 4199  case Intrinsic::ctpop: {
 
 4204  case Intrinsic::scmp:
 
 4205  case Intrinsic::ucmp:
 
 4211  if (Cmp.isEquality())
 
 4214  Type *Ty = 
II->getType();
 
 4216  switch (
II->getIntrinsicID()) {
 
 4217  case Intrinsic::ctpop: {
 
 4229  case Intrinsic::ctlz: {
 
 4232      unsigned Num = 
C.getLimitedValue();
 
 4235                             II->getArgOperand(0), ConstantInt::get(Ty, Limit));
 
 4240      unsigned Num = 
C.getLimitedValue();
 
 4243                             II->getArgOperand(0), ConstantInt::get(Ty, Limit));
 
 4247  case Intrinsic::cttz: {
 
 4249    if (!
II->hasOneUse())
 
 4256                             Builder.CreateAnd(
II->getArgOperand(0), Mask),
 
 4264                             Builder.CreateAnd(
II->getArgOperand(0), Mask),
 
 4269  case Intrinsic::ssub_sat:
 
 4273        return new ICmpInst(Pred, 
II->getArgOperand(0), 
II->getArgOperand(1));
 
 4277                            II->getArgOperand(1));
 
 4281                            II->getArgOperand(1));
 
 
 4293  Value *Op0 = 
I.getOperand(0), *Op1 = 
I.getOperand(1);
 
 4300  case Instruction::IntToPtr:
 
 4309  case Instruction::Load:
 
 
 4326  auto SimplifyOp = [&](
Value *
Op, 
bool SelectCondIsTrue) -> 
Value * {
 
 4330            SI->getCondition(), Pred, 
Op, RHS, 
DL, SelectCondIsTrue))
 
 4331      return ConstantInt::get(
I.getType(), *Impl);
 
 4336  Value *Op1 = SimplifyOp(
SI->getOperand(1), 
true);
 
 4340  Value *Op2 = SimplifyOp(
SI->getOperand(2), 
false);
 
 4344  auto Simplifies = [&](
Value *
Op, 
unsigned Idx) {
 
 4359  bool Transform = 
false;
 
 4362  else if (Simplifies(Op1, 1) || Simplifies(Op2, 2)) {
 
 4364    if (
SI->hasOneUse())
 
 4367    else if (CI && !CI->
isZero())
 
 4375      Op1 = 
Builder.CreateICmp(Pred, 
SI->getOperand(1), RHS, 
I.getName());
 
 4377      Op2 = 
Builder.CreateICmp(Pred, 
SI->getOperand(2), RHS, 
I.getName());
 
 
 4386                         unsigned Depth = 0) {
 
 4389  if (V->getType()->getScalarSizeInBits() == 1)
 
 4397  switch (
I->getOpcode()) {
 
 4398  case Instruction::ZExt:
 
 4401  case Instruction::SExt:
 
 4405  case Instruction::And:
 
 4406  case Instruction::Or:
 
 4413  case Instruction::Xor:
 
 4423  case Instruction::Select:
 
 4427  case Instruction::Shl:
 
 4430  case Instruction::LShr:
 
 4433  case Instruction::AShr:
 
 4437  case Instruction::Add:
 
 4443  case Instruction::Sub:
 
 4449  case Instruction::Call: {
 
 4451      switch (
II->getIntrinsicID()) {
 
 4454      case Intrinsic::umax:
 
 4455      case Intrinsic::smax:
 
 4456      case Intrinsic::umin:
 
 4457      case Intrinsic::smin:
 
 4462      case Intrinsic::bitreverse:
 
 
 4552  auto IsLowBitMask = [&]() {
 
 4570      auto Check = [&]() {
 
 4588      auto Check = [&]() {
 
 4607  if (!IsLowBitMask())
 
 
 4626  const APInt *C0, *C1; 
 
 4643  const APInt &MaskedBits = *C0;
 
 4644  assert(MaskedBits != 0 && 
"shift by zero should be folded away already.");
 
 4665  auto *XType = 
X->getType();
 
 4666  const unsigned XBitWidth = XType->getScalarSizeInBits();
 
 4668  assert(
BitWidth.ugt(MaskedBits) && 
"shifts should leave some bits untouched");
 
 4681  Value *T0 = Builder.CreateAdd(
X, ConstantInt::get(XType, AddCst));
 
 4683  Value *
T1 = Builder.CreateICmp(DstPred, T0, ConstantInt::get(XType, ICmpCst));
 
 
 4699      !
I.getOperand(0)->hasOneUse())
 
 4724  assert(NarrowestTy == 
I.getOperand(0)->getType() &&
 
 4725         "We did not look past any shifts while matching XShift though.");
 
 4726  bool HadTrunc = WidestTy != 
I.getOperand(0)->getType();
 
 4733  auto XShiftOpcode = XShift->
getOpcode();
 
 4734  if (XShiftOpcode == YShift->
getOpcode())
 
 4737  Value *
X, *XShAmt, *
Y, *YShAmt;
 
 4746    if (!
match(
I.getOperand(0),
 
 4772  unsigned MaximalPossibleTotalShiftAmount =
 
 4775  APInt MaximalRepresentableShiftAmount =
 
 4777  if (MaximalRepresentableShiftAmount.
ult(MaximalPossibleTotalShiftAmount))
 
 4786  if (NewShAmt->getType() != WidestTy) {
 
 4796  if (!
match(NewShAmt,
 
 4798                                APInt(WidestBitWidth, WidestBitWidth))))
 
 4803    auto CanFold = [NewShAmt, WidestBitWidth, NarrowestShift, SQ,
 
 4809                                    ? NewShAmt->getSplatValue()
 
 4812      if (NewShAmtSplat &&
 
 4822        unsigned MaxActiveBits = Known.
getBitWidth() - MinLeadZero;
 
 4823        if (MaxActiveBits <= 1)
 
 4833        unsigned MaxActiveBits = Known.
getBitWidth() - MinLeadZero;
 
 4834        if (MaxActiveBits <= 1)
 
 4837        if (NewShAmtSplat) {
 
 4840          if (AdjNewShAmt.
ule(MinLeadZero))
 
 4851  X = Builder.CreateZExt(
X, WidestTy);
 
 4852  Y = Builder.CreateZExt(
Y, WidestTy);
 
 4854  Value *T0 = XShiftOpcode == Instruction::BinaryOps::LShr
 
 4855                  ? Builder.CreateLShr(
X, NewShAmt)
 
 4856                  : Builder.CreateShl(
X, NewShAmt);
 
 4857  Value *
T1 = Builder.CreateAnd(T0, 
Y);
 
 4858  return Builder.CreateICmp(
I.getPredicate(), 
T1,
 
 
 4876  if (!
I.isEquality() &&
 
 4886      NeedNegation = 
false;
 
 4889      NeedNegation = 
true;
 
 4895    if (
I.isEquality() &&
 
 4910  bool MulHadOtherUses = 
Mul && !
Mul->hasOneUse();
 
 4911  if (MulHadOtherUses)
 
 4915      Div->
getOpcode() == Instruction::UDiv ? Intrinsic::umul_with_overflow
 
 4916                                            : Intrinsic::smul_with_overflow,
 
 4917      X->getType(), {X, Y}, 
nullptr, 
"mul");
 
 4922  if (MulHadOtherUses)
 
 4927    Res = 
Builder.CreateNot(Res, 
"mul.not.ov");
 
 4931  if (MulHadOtherUses)
 
 
 4957    Type *Ty = 
X->getType();
 
 4961    Value *
And = Builder.CreateAnd(
X, MaxSignedVal);
 
 
 4971  Value *Op0 = 
I.getOperand(0), *Op1 = 
I.getOperand(1), *
A;
 
 
 5033  Value *Op0 = 
I.getOperand(0), *Op1 = 
I.getOperand(1), *
A;
 
 
 5068  Value *Op0 = 
I.getOperand(0), *Op1 = 
I.getOperand(1), *
A;
 
 5084    return new ICmpInst(PredOut, Op0, Op1);
 
 5104    return new ICmpInst(NewPred, Op0, Const);
 
 
 5116  if (!
C.isPowerOf2())
 
 
 5129  Value *Op0 = 
I.getOperand(0), *Op1 = 
I.getOperand(1);
 
 5197      return new ICmpInst(NewPred, Op1, Zero);
 
 5206      return new ICmpInst(NewPred, Op0, Zero);
 
 5210  bool NoOp0WrapProblem = 
false, NoOp1WrapProblem = 
false;
 
 5211  bool Op0HasNUW = 
false, Op1HasNUW = 
false;
 
 5212  bool Op0HasNSW = 
false, Op1HasNSW = 
false;
 
 5216                             bool &HasNSW, 
bool &HasNUW) -> 
bool {
 
 5223    } 
else if (BO.
getOpcode() == Instruction::Or) {
 
 5231  Value *
A = 
nullptr, *
B = 
nullptr, *
C = 
nullptr, *
D = 
nullptr;
 
 5235    NoOp0WrapProblem = hasNoWrapProblem(*BO0, Pred, Op0HasNSW, Op0HasNUW);
 
 5239    NoOp1WrapProblem = hasNoWrapProblem(*BO1, Pred, Op1HasNSW, Op1HasNUW);
 
 5244  if ((
A == Op1 || 
B == Op1) && NoOp0WrapProblem)
 
 5250  if ((
C == Op0 || 
D == Op0) && NoOp1WrapProblem)
 
 5255  if (
A && 
C && (
A == 
C || 
A == 
D || 
B == 
C || 
B == 
D) && NoOp0WrapProblem &&
 
 5263    } 
else if (
A == 
D) {
 
 5267    } 
else if (
B == 
C) {
 
 5284                                   bool IsNegative) -> 
bool {
 
 5285      const APInt *OffsetC;
 
 5297      if (!
C.isStrictlyPositive())
 
 5318    if (
A && NoOp0WrapProblem &&
 
 5319        ShareCommonDivisor(
A, Op1, 
B,
 
 5330    if (
C && NoOp1WrapProblem &&
 
 5331        ShareCommonDivisor(Op0, 
C, 
D,
 
 5344  if (
A && 
C && NoOp0WrapProblem && NoOp1WrapProblem &&
 
 5346    const APInt *AP1, *AP2;
 
 5354      if (AP1Abs.
uge(AP2Abs)) {
 
 5355        APInt Diff = *AP1 - *AP2;
 
 5358            A, C3, 
"", Op0HasNUW && Diff.
ule(*AP1), Op0HasNSW);
 
 5361        APInt Diff = *AP2 - *AP1;
 
 5364            C, C3, 
"", Op1HasNUW && Diff.
ule(*AP2), Op1HasNSW);
 
 5383  if (BO0 && BO0->
getOpcode() == Instruction::Sub) {
 
 5387  if (BO1 && BO1->
getOpcode() == Instruction::Sub) {
 
 5393  if (
A == Op1 && NoOp0WrapProblem)
 
 5396  if (
C == Op0 && NoOp1WrapProblem)
 
 5416  if (
B && 
D && 
B == 
D && NoOp0WrapProblem && NoOp1WrapProblem)
 
 5420  if (
A && 
C && 
A == 
C && NoOp0WrapProblem && NoOp1WrapProblem)
 
 5428        if (RHSC->isNotMinSignedValue())
 
 5429          return new ICmpInst(
I.getSwappedPredicate(), 
X,
 
 5447        if (Op0HasNSW && Op1HasNSW) {
 
 5454                                             SQ.getWithInstruction(&
I));
 
 5459                                                SQ.getWithInstruction(&
I));
 
 5460          if (GreaterThan && 
match(GreaterThan, 
m_One()))
 
 5467          if (((Op0HasNSW && Op1HasNSW) || (Op0HasNUW && Op1HasNUW)) &&
 
 5479          if (NonZero && BO0 && BO1 && Op0HasNSW && Op1HasNSW)
 
 5486        if (NonZero && BO0 && BO1 && Op0HasNUW && Op1HasNUW)
 
 5497  else if (BO1 && BO1->
getOpcode() == Instruction::SRem &&
 
 5527    case Instruction::Add:
 
 5528    case Instruction::Sub:
 
 5529    case Instruction::Xor: {
 
 5536        if (
C->isSignMask()) {
 
 5542        if (BO0->
getOpcode() == Instruction::Xor && 
C->isMaxSignedValue()) {
 
 5544          NewPred = 
I.getSwappedPredicate(NewPred);
 
 5550    case Instruction::Mul: {
 
 5551      if (!
I.isEquality())
 
 5559        if (
unsigned TZs = 
C->countr_zero()) {
 
 5565          return new ICmpInst(Pred, And1, And2);
 
 5570    case Instruction::UDiv:
 
 5571    case Instruction::LShr:
 
 5576    case Instruction::SDiv:
 
 5582    case Instruction::AShr:
 
 5587    case Instruction::Shl: {
 
 5588      bool NUW = Op0HasNUW && Op1HasNUW;
 
 5589      bool NSW = Op0HasNSW && Op1HasNSW;
 
 5592      if (!NSW && 
I.isSigned())
 
 
 5656  auto IsCondKnownTrue = [](
Value *Val) -> std::optional<bool> {
 
 5658      return std::nullopt;
 
 5663    return std::nullopt;
 
 5669  Pred = Pred.dropSameSign();
 
 5672  if (!CmpXZ.has_value() && !CmpYZ.has_value())
 
 5674  if (!CmpXZ.has_value()) {
 
 5680    if (CmpYZ.has_value())
 
 5704    if (!MinMaxCmpXZ.has_value()) {
 
 5712    if (!MinMaxCmpXZ.has_value())
 
 5728      return FoldIntoCmpYZ();
 
 5755        return FoldIntoCmpYZ();
 
 5764        return FoldIntoCmpYZ();
 
 
 5796  const APInt *
Lo = 
nullptr, *
Hi = 
nullptr;
 
 5819      I, 
Builder.CreateICmp(Pred, 
X, ConstantInt::get(
X->getType(), 
C)));
 
 
 5825  Value *Op0 = 
I.getOperand(0), *Op1 = 
I.getOperand(1);
 
 5829  if (
I.isEquality()) {
 
 5864    Type *Ty = 
A->getType();
 
 5865    CallInst *CtPop = Builder.CreateUnaryIntrinsic(Intrinsic::ctpop, 
A);
 
 5867                                  ConstantInt::get(Ty, 2))
 
 5869                                  ConstantInt::get(Ty, 1));
 
 
 5876using OffsetOp = std::pair<Instruction::BinaryOps, Value *>;
 
 5878                            bool AllowRecursion) {
 
 5884  case Instruction::Add:
 
 5885    Offsets.emplace_back(Instruction::Sub, Inst->
getOperand(1));
 
 5886    Offsets.emplace_back(Instruction::Sub, Inst->
getOperand(0));
 
 5888  case Instruction::Sub:
 
 5889    Offsets.emplace_back(Instruction::Add, Inst->
getOperand(1));
 
 5891  case Instruction::Xor:
 
 5892    Offsets.emplace_back(Instruction::Xor, Inst->
getOperand(1));
 
 5893    Offsets.emplace_back(Instruction::Xor, Inst->
getOperand(0));
 
 5895  case Instruction::Select:
 
 5896    if (AllowRecursion) {
 
 
 5929      return Builder.CreateSelect(
V0, 
V1, 
V2);
 
 
 
 5941  assert(
I.isEquality() && 
"Expected an equality icmp");
 
 5942  Value *Op0 = 
I.getOperand(0), *Op1 = 
I.getOperand(1);
 
 5953    if (!Simplified || Simplified == V)
 
 5962  auto ApplyOffset = [&](
Value *V, 
unsigned BinOpc,
 
 5965      if (!Sel->hasOneUse())
 
 5967      Value *TrueVal = ApplyOffsetImpl(Sel->getTrueValue(), BinOpc, 
RHS);
 
 5970      Value *FalseVal = ApplyOffsetImpl(Sel->getFalseValue(), BinOpc, 
RHS);
 
 5975    if (
Value *Simplified = ApplyOffsetImpl(V, BinOpc, 
RHS))
 
 5980  for (
auto [BinOp, 
RHS] : OffsetOps) {
 
 5981    auto BinOpc = 
static_cast<unsigned>(BinOp);
 
 5983    auto Op0Result = ApplyOffset(Op0, BinOpc, 
RHS);
 
 5984    if (!Op0Result.isValid())
 
 5986    auto Op1Result = ApplyOffset(Op1, BinOpc, 
RHS);
 
 5987    if (!Op1Result.isValid())
 
 5990    Value *NewLHS = Op0Result.materialize(Builder);
 
 5991    Value *NewRHS = Op1Result.materialize(Builder);
 
 5992    return new ICmpInst(
I.getPredicate(), NewLHS, NewRHS);
 
 
 5999  if (!
I.isEquality())
 
 6002  Value *Op0 = 
I.getOperand(0), *Op1 = 
I.getOperand(1);
 
 6006    if (
A == Op1 || 
B == Op1) { 
 
 6007      Value *OtherVal = 
A == Op1 ? 
B : 
A;
 
 6035    Value *OtherVal = 
A == Op0 ? 
B : 
A;
 
 6042    Value *
X = 
nullptr, *
Y = 
nullptr, *Z = 
nullptr;
 
 6048    } 
else if (
A == 
D) {
 
 6052    } 
else if (
B == 
C) {
 
 6056    } 
else if (
B == 
D) {
 
 6066      const APInt *C0, *C1;
 
 6068                        (*C0 ^ *C1).isNegatedPowerOf2();
 
 6074          int(Op0->
hasOneUse()) + int(Op1->hasOneUse()) +
 
 6076      if (XorIsNegP2 || UseCnt >= 2) {
 
 6079        Op1 = 
Builder.CreateAnd(Op1, Z);
 
 6099      (Op0->
hasOneUse() || Op1->hasOneUse())) {
 
 6104        MaskC->
countr_one() == 
A->getType()->getScalarSizeInBits())
 
 6110  const APInt *AP1, *AP2;
 
 6119    if (ShAmt < TypeBits && ShAmt != 0) {
 
 6124      return new ICmpInst(NewPred, 
Xor, ConstantInt::get(
A->getType(), CmpVal));
 
 6134    if (ShAmt < TypeBits && ShAmt != 0) {
 
 6154    if (ShAmt < ASize) {
 
 6177      A->getType()->getScalarSizeInBits() == 
BitWidth * 2 &&
 
 6178      (
I.getOperand(0)->hasOneUse() || 
I.getOperand(1)->hasOneUse())) {
 
 6183                        Add, ConstantInt::get(
A->getType(), 
C.shl(1)));
 
 6210        Builder.CreateIntrinsic(Op0->
getType(), Intrinsic::fshl, {A, A, B}));
 
 6225    std::optional<bool> IsZero = std::nullopt;
 
 
 6267    Constant *
C = ConstantInt::get(Res->X->getType(), Res->C);
 
 6271  unsigned SrcBits = 
X->getType()->getScalarSizeInBits();
 
 6273    if (
II->getIntrinsicID() == Intrinsic::cttz ||
 
 6274        II->getIntrinsicID() == Intrinsic::ctlz) {
 
 6275      unsigned MaxRet = SrcBits;
 
 
 6301  bool IsSignedExt = CastOp0->getOpcode() == Instruction::SExt;
 
 6302  bool IsSignedCmp = ICmp.
isSigned();
 
 6310    if (IsZext0 != IsZext1) {
 
 6315      if (ICmp.
isEquality() && 
X->getType()->isIntOrIntVectorTy(1) &&
 
 6316          Y->getType()->isIntOrIntVectorTy(1))
 
 6326      bool IsNonNeg0 = NonNegInst0 && NonNegInst0->hasNonNeg();
 
 6327      bool IsNonNeg1 = NonNegInst1 && NonNegInst1->hasNonNeg();
 
 6329      if ((IsZext0 && IsNonNeg0) || (IsZext1 && IsNonNeg1))
 
 6336    Type *XTy = 
X->getType(), *YTy = 
Y->getType();
 
 6343          IsSignedExt ? Instruction::SExt : Instruction::ZExt;
 
 6345        X = 
Builder.CreateCast(CastOpcode, 
X, YTy);
 
 6347        Y = 
Builder.CreateCast(CastOpcode, 
Y, XTy);
 
 6359    if (IsSignedCmp && IsSignedExt)
 
 6372  Type *SrcTy = CastOp0->getSrcTy();
 
 6380    if (IsSignedExt && IsSignedCmp)
 
 
 6411  Value *SimplifiedOp0 = simplifyIntToPtrRoundTripCast(ICmp.
getOperand(0));
 
 6412  Value *SimplifiedOp1 = simplifyIntToPtrRoundTripCast(ICmp.
getOperand(1));
 
 6413  if (SimplifiedOp0 || SimplifiedOp1)
 
 6415                        SimplifiedOp0 ? SimplifiedOp0 : ICmp.
getOperand(0),
 
 6416                        SimplifiedOp1 ? SimplifiedOp1 : ICmp.
getOperand(1));
 
 6424  Value *Op0Src = CastOp0->getOperand(0);
 
 6425  Type *SrcTy = CastOp0->getSrcTy();
 
 6426  Type *DestTy = CastOp0->getDestTy();
 
 6430  auto CompatibleSizes = [&](
Type *PtrTy, 
Type *IntTy) {
 
 6435    return DL.getPointerTypeSizeInBits(PtrTy) == IntTy->getIntegerBitWidth();
 
 6437  if (CastOp0->getOpcode() == Instruction::PtrToInt &&
 
 6438      CompatibleSizes(SrcTy, DestTy)) {
 
 6439    Value *NewOp1 = 
nullptr;
 
 6441      Value *PtrSrc = PtrToIntOp1->getOperand(0);
 
 6443        NewOp1 = PtrToIntOp1->getOperand(0);
 
 6453  if (CastOp0->getOpcode() == Instruction::IntToPtr &&
 
 6454      CompatibleSizes(DestTy, SrcTy)) {
 
 6455    Value *NewOp1 = 
nullptr;
 
 6457      Value *IntSrc = IntToPtrOp1->getOperand(0);
 
 6459        NewOp1 = IntToPtrOp1->getOperand(0);
 
 
 6479  case Instruction::Add:
 
 6480  case Instruction::Sub:
 
 6482  case Instruction::Mul:
 
 6483    return !(
RHS->getType()->isIntOrIntVectorTy(1) && IsSigned) &&
 
 
 6495  case Instruction::Add:
 
 6500  case Instruction::Sub:
 
 6505  case Instruction::Mul:
 
 
 6514                                             bool IsSigned, 
Value *LHS,
 
 6525  Builder.SetInsertPoint(&OrigI);
 
 6542    Result = Builder.CreateBinOp(BinaryOp, 
LHS, 
RHS);
 
 6543    Result->takeName(&OrigI);
 
 6547    Result = Builder.CreateBinOp(BinaryOp, 
LHS, 
RHS);
 
 6548    Result->takeName(&OrigI);
 
 6552        Inst->setHasNoSignedWrap();
 
 6554        Inst->setHasNoUnsignedWrap();
 
 6577                                         const APInt *OtherVal,
 
 6587  assert(MulInstr->getOpcode() == Instruction::Mul);
 
 6591  assert(
LHS->getOpcode() == Instruction::ZExt);
 
 6592  assert(
RHS->getOpcode() == Instruction::ZExt);
 
 6596  Type *TyA = 
A->getType(), *TyB = 
B->getType();
 
 6598           WidthB = TyB->getPrimitiveSizeInBits();
 
 6601  if (WidthB > WidthA) {
 
 6618        unsigned TruncWidth = TI->getType()->getPrimitiveSizeInBits();
 
 6619        if (TruncWidth > MulWidth)
 
 6623        if (BO->getOpcode() != Instruction::And)
 
 6626          const APInt &CVal = CI->getValue();
 
 6642  switch (
I.getPredicate()) {
 
 6649    if (MaxVal.
eq(*OtherVal))
 
 6659    if (MaxVal.
eq(*OtherVal))
 
 6673  if (WidthA < MulWidth)
 
 6674    MulA = Builder.CreateZExt(
A, MulType);
 
 6675  if (WidthB < MulWidth)
 
 6676    MulB = Builder.CreateZExt(
B, MulType);
 
 6678      Builder.CreateIntrinsic(Intrinsic::umul_with_overflow, MulType,
 
 6679                              {MulA, MulB}, 
nullptr, 
"umul");
 
 6686    Value *
Mul = Builder.CreateExtractValue(
Call, 0, 
"umul.value");
 
 6691        if (TI->getType()->getPrimitiveSizeInBits() == MulWidth)
 
 6696        assert(BO->getOpcode() == Instruction::And);
 
 6700        Value *ShortAnd = Builder.CreateAnd(
Mul, ShortMask);
 
 6701        Value *Zext = Builder.CreateZExt(ShortAnd, BO->
getType());
 
 6713    Value *Res = Builder.CreateExtractValue(
Call, 1);
 
 
 6734  switch (
I.getPredicate()) {
 
 
 6765  assert(DI && UI && 
"Instruction not defined\n");
 
 6777    if (Usr != UI && !
DT.dominates(DB, Usr->getParent()))
 
 
 6789  if (!BI || BI->getNumSuccessors() != 2)
 
 6792  if (!IC || (IC->getOperand(0) != 
SI && IC->getOperand(1) != 
SI))
 
 
 6839                                                 const unsigned SIOpd) {
 
 6840  assert((SIOpd == 1 || SIOpd == 2) && 
"Invalid select operand!");
 
 6842    BasicBlock *Succ = 
SI->getParent()->getTerminator()->getSuccessor(1);
 
 6856      SI->replaceUsesOutsideBlock(
SI->getOperand(SIOpd), 
SI->getParent());
 
 
 6866  Value *Op0 = 
I.getOperand(0), *Op1 = 
I.getOperand(1);
 
 6871  unsigned BitWidth = Ty->isIntOrIntVectorTy()
 
 6872                          ? Ty->getScalarSizeInBits()
 
 6873                          : 
DL.getPointerTypeSizeInBits(Ty->getScalarType());
 
 6926    if (!Cmp.hasOneUse())
 
 6935  if (!isMinMaxCmp(
I)) {
 
 6940      if (Op1Min == Op0Max) 
 
 6945        if (*CmpC == Op0Min + 1)
 
 6947                              ConstantInt::get(Op1->getType(), *CmpC - 1));
 
 6957      if (Op1Max == Op0Min) 
 
 6962        if (*CmpC == Op0Max - 1)
 
 6964                              ConstantInt::get(Op1->getType(), *CmpC + 1));
 
 6974      if (Op1Min == Op0Max) 
 
 6978        if (*CmpC == Op0Min + 1) 
 
 6980                              ConstantInt::get(Op1->getType(), *CmpC - 1));
 
 6985      if (Op1Max == Op0Min) 
 
 6989        if (*CmpC == Op0Max - 1) 
 
 6991                              ConstantInt::get(Op1->getType(), *CmpC + 1));
 
 7008    APInt Op0KnownZeroInverted = ~Op0Known.Zero;
 
 7011      Value *LHS = 
nullptr;
 
 7014          *LHSC != Op0KnownZeroInverted)
 
 7020        Type *XTy = 
X->getType();
 
 7022        APInt C2 = Op0KnownZeroInverted;
 
 7023        APInt C2Pow2 = (C2 & ~(*C1 - 1)) + *C1;
 
 7029          auto *CmpC = ConstantInt::get(XTy, Log2C2 - Log2C1);
 
 7039        (Op0Known & Op1Known) == Op0Known)
 
 7045    if (Op1Min == Op0Max) 
 
 7049    if (Op1Max == Op0Min) 
 
 7053    if (Op1Min == Op0Max) 
 
 7057    if (Op1Max == Op0Min) 
 
 7065  if ((
I.isSigned() || (
I.isUnsigned() && !
I.hasSameSign())) &&
 
 7068    I.setPredicate(
I.getUnsignedPredicate());
 
 
 7086    return BinaryOperator::CreateAnd(
Builder.CreateIsNull(
X), 
Y);
 
 7092    return BinaryOperator::CreateOr(
Builder.CreateIsNull(
X), 
Y);
 
 7103    bool IsSExt = ExtI->
getOpcode() == Instruction::SExt;
 
 7105    auto CreateRangeCheck = [&] {
 
 7120      } 
else if (!IsSExt || HasOneUse) {
 
 7125        return CreateRangeCheck();
 
 7127    } 
else if (IsSExt ? 
C->isAllOnes() : 
C->isOne()) {
 
 7135      } 
else if (!IsSExt || HasOneUse) {
 
 7140        return CreateRangeCheck();
 
 7154          Instruction::ICmp, Pred1, 
X,
 
 
 7173  Value *Op0 = 
I.getOperand(0);
 
 7174  Value *Op1 = 
I.getOperand(1);
 
 7180  if (!FlippedStrictness)
 
 7183  return new ICmpInst(FlippedStrictness->first, Op0, FlippedStrictness->second);
 
 
 7201  I.setName(
I.getName() + 
".not");
 
 
 7212  Value *
A = 
I.getOperand(0), *
B = 
I.getOperand(1);
 
 7213  assert(
A->getType()->isIntOrIntVectorTy(1) && 
"Bools only");
 
 7219    switch (
I.getPredicate()) {
 
 7228    switch (
I.getPredicate()) {
 
 7238  switch (
I.getPredicate()) {
 
 7247    return BinaryOperator::CreateXor(
A, 
B);
 
 7255    return BinaryOperator::CreateAnd(Builder.CreateNot(
A), 
B);
 
 7263    return BinaryOperator::CreateAnd(Builder.CreateNot(
B), 
A);
 
 7271    return BinaryOperator::CreateOr(Builder.CreateNot(
A), 
B);
 
 7279    return BinaryOperator::CreateOr(Builder.CreateNot(
B), 
A);
 
 
 7327  Value *NewX = Builder.CreateLShr(
X, 
Y, 
X->getName() + 
".highbits");
 
 
 7335  Value *
LHS = Cmp.getOperand(0), *
RHS = Cmp.getOperand(1);
 
 7339    Value *V = Builder.CreateCmp(Pred, 
X, 
Y, Cmp.getName());
 
 7341      I->copyIRFlags(&Cmp);
 
 7342    Module *M = Cmp.getModule();
 
 7344        M, Intrinsic::vector_reverse, V->getType());
 
 7351        (
LHS->hasOneUse() || 
RHS->hasOneUse()))
 
 7352      return createCmpReverse(Pred, V1, V2);
 
 7356      return createCmpReverse(Pred, V1, 
RHS);
 
 7360    return createCmpReverse(Pred, 
LHS, V2);
 
 7371      V1Ty == V2->
getType() && (
LHS->hasOneUse() || 
RHS->hasOneUse())) {
 
 7372    Value *NewCmp = Builder.CreateCmp(Pred, V1, V2);
 
 7385  Constant *ScalarC = 
C->getSplatValue( 
true);
 
 7393    Value *NewCmp = Builder.CreateCmp(Pred, V1, 
C);
 
 
 7404  Value *Op0 = 
I.getOperand(0), *Op1 = 
I.getOperand(1);
 
 7410  if (
match(Op0, UAddOvResultPat) &&
 
 7421           (Op0 == 
A || Op0 == 
B))
 
 
 7431  if (!
I.getOperand(0)->getType()->isPointerTy() ||
 
 7433          I.getParent()->getParent(),
 
 7434          I.getOperand(0)->getType()->getPointerAddressSpace())) {
 
 7440      Op->isLaunderOrStripInvariantGroup()) {
 
 7442                            Op->getOperand(0), 
I.getOperand(1));
 
 
 7454  if (
I.getType()->isVectorTy())
 
 7477  if (!LHSTy || !LHSTy->getElementType()->isIntegerTy())
 
 7480      LHSTy->getNumElements() * LHSTy->getElementType()->getIntegerBitWidth();
 
 7482  if (!
DL.isLegalInteger(NumBits))
 
 7486    auto *ScalarTy = Builder.getIntNTy(NumBits);
 
 7487    LHS = Builder.CreateBitCast(
LHS, ScalarTy, 
LHS->getName() + 
".scalar");
 
 7488    RHS = Builder.CreateBitCast(
RHS, ScalarTy, 
RHS->getName() + 
".scalar");
 
 
 7544      bool IsIntMinPosion = 
C->isAllOnesValue();
 
 7556            CxtI, IsIntMinPosion
 
 7557                      ? 
Builder.CreateICmpSGT(
X, AllOnesValue)
 
 7559                            X, ConstantInt::get(
X->getType(), 
SMin + 1)));
 
 7565            CxtI, IsIntMinPosion
 
 7566                      ? 
Builder.CreateICmpSLT(
X, NullValue)
 
 7568                            X, ConstantInt::get(
X->getType(), 
SMin)));
 
 7581  auto CheckUGT1 = [](
const APInt &Divisor) { 
return Divisor.ugt(1); };
 
 7596  auto CheckNE0 = [](
const APInt &Shift) { 
return !Shift.isZero(); };
 
 
 7616  Value *Op0 = 
I.getOperand(0), *Op1 = 
I.getOperand(1);
 
 7623  if (Op0Cplxity < Op1Cplxity) {
 
 7638      if (
Value *V = dyn_castNegVal(SelectTrue)) {
 
 7639        if (V == SelectFalse)
 
 7641      } 
else if (
Value *V = dyn_castNegVal(SelectFalse)) {
 
 7642        if (V == SelectTrue)
 
 7751  if (
I.isCommutative()) {
 
 7752    if (
auto Pair = matchSymmetricPair(
I.getOperand(0), 
I.getOperand(1))) {
 
 7776        (Op0->
hasOneUse() || Op1->hasOneUse())) {
 
 7800      bool I0NUW = I0->hasNoUnsignedWrap();
 
 7801      bool I1NUW = I1->hasNoUnsignedWrap();
 
 7802      bool I0NSW = I0->hasNoSignedWrap();
 
 7803      bool I1NSW = I1->hasNoSignedWrap();
 
 7807           ((I0NUW || I0NSW) && (I1NUW || I1NSW)))) {
 
 7809                            ConstantInt::get(Op0->
getType(), 0));
 
 7816    assert(Op1->getType()->isPointerTy() &&
 
 7817           "Comparing pointer with non-pointer?");
 
 7846      bool ConsumesOp0, ConsumesOp1;
 
 7849          (ConsumesOp0 || ConsumesOp1)) {
 
 7852        assert(InvOp0 && InvOp1 &&
 
 7853               "Mismatch between isFreeToInvert and getFreelyInverted");
 
 7854        return new ICmpInst(
I.getSwappedPredicate(), InvOp0, InvOp1);
 
 7866      if (AddI->
getOpcode() == Instruction::Add &&
 
 7867          OptimizeOverflowCheck(Instruction::Add,  
false, 
X, 
Y, *AddI,
 
 7868                                Result, Overflow)) {
 
 7886    if ((
I.isUnsigned() || 
I.isEquality()) &&
 
 7889        Y->getType()->getScalarSizeInBits() == 1 &&
 
 7890        (Op0->
hasOneUse() || Op1->hasOneUse())) {
 
 7897        unsigned ShiftOpc = ShiftI->
getOpcode();
 
 7898        if ((ExtOpc == Instruction::ZExt && ShiftOpc == Instruction::LShr) ||
 
 7899            (ExtOpc == Instruction::SExt && ShiftOpc == Instruction::AShr)) {
 
 7930        if (EVI->getIndices()[0] == 0 && ACXI->getCompareOperand() == Op1 &&
 
 7937  if (
I.getType()->isVectorTy())
 
 7949    const APInt *C1, *C2;
 
 7956        Type *InputTy = 
A->getType();
 
 7963            TruncC1.
setBit(InputBitWidth - 1);
 
 7967              ConstantInt::get(InputTy, C2->
trunc(InputBitWidth)));
 
 
 7987  if (MantissaWidth == -1)
 
 7994  if (
I.isEquality()) {
 
 7996    bool IsExact = 
false;
 
 7997    APSInt RHSCvt(IntWidth, LHSUnsigned);
 
 8006      if (*RHS != RHSRoundInt) {
 
 8026  if ((
int)IntWidth > MantissaWidth) {
 
 8028    int Exp = 
ilogb(*RHS);
 
 8031      if (MaxExponent < (
int)IntWidth - !LHSUnsigned)
 
 8037      if (MantissaWidth <= Exp && Exp <= (
int)IntWidth - !LHSUnsigned)
 
 8046  assert(!RHS->isNaN() && 
"NaN comparison not already folded!");
 
 8049  switch (
I.getPredicate()) {
 
 8140  APSInt RHSInt(IntWidth, LHSUnsigned);
 
 8143  if (!RHS->isZero()) {
 
 8158        if (RHS->isNegative())
 
 8164        if (RHS->isNegative())
 
 8170        if (RHS->isNegative())
 
 8177        if (!RHS->isNegative())
 
 8183        if (RHS->isNegative())
 
 8189        if (RHS->isNegative())
 
 8195        if (RHS->isNegative())
 
 8202        if (!RHS->isNegative())
 
 
 8256  if (
C->isNegative())
 
 8257    Pred = 
I.getSwappedPredicate();
 
 
 8273  bool RoundDown = 
false;
 
 8298  auto NextValue = [](
const APFloat &
Value, 
bool RoundDown) {
 
 8300    NextValue.
next(RoundDown);
 
 8304  APFloat NextCValue = NextValue(*CValue, RoundDown);
 
 8310  APFloat ExtCValue = ConvertFltSema(*CValue, DestFltSema);
 
 8311  APFloat ExtNextCValue = ConvertFltSema(NextCValue, DestFltSema);
 
 8318    APFloat PrevCValue = NextValue(*CValue, !RoundDown);
 
 8319    APFloat Bias = ConvertFltSema(*CValue - PrevCValue, DestFltSema);
 
 8321    ExtNextCValue = ExtCValue + Bias;
 
 8328      C.getType()->getScalarType()->getFltSemantics();
 
 8331  APFloat MidValue = ConvertFltSema(ExtMidValue, SrcFltSema);
 
 8332  if (MidValue != *CValue)
 
 8333    ExtMidValue.
next(!RoundDown);
 
 8341    if (ConvertFltSema(ExtMidValue, SrcFltSema).isInfinity())
 
 8345    APFloat NextExtMidValue = NextValue(ExtMidValue, RoundDown);
 
 8346    if (ConvertFltSema(NextExtMidValue, SrcFltSema).
isFinite())
 
 8351                      ConstantFP::get(DestType, ExtMidValue), 
"", &
I);
 
 
 8364  if (!
C->isPosZero()) {
 
 8365    if (!
C->isSmallestNormalized())
 
 8378      switch (
I.getPredicate()) {
 
 8404  switch (
I.getPredicate()) {
 
 8429    assert(!
I.hasNoNaNs() && 
"fcmp should have simplified");
 
 8434    assert(!
I.hasNoNaNs() && 
"fcmp should have simplified");
 
 8448    return replacePredAndOp0(&
I, 
I.getPredicate(), 
X);
 
 
 8471    I.setHasNoInfs(
false);
 
 8473  switch (
I.getPredicate()) {
 
 
 8518  Value *Op0 = 
I.getOperand(0), *Op1 = 
I.getOperand(1);
 
 8523    Pred = 
I.getSwappedPredicate();
 
 8532  return new FCmpInst(Pred, Op0, Zero, 
"", &
I);
 
 
 8568        I.getFunction()->getDenormalMode(
 
 8575        I.setHasNoNaNs(
true);
 
 
 8587  Type *OpType = 
LHS->getType();
 
 8593  if (!FloorX && !CeilX) {
 
 8597      Pred = 
I.getSwappedPredicate();
 
 
 8665  Value *Op0 = 
I.getOperand(0), *Op1 = 
I.getOperand(1);
 
 8667                                  SQ.getWithInstruction(&
I)))
 
 8672  assert(OpType == Op1->getType() && 
"fcmp with different-typed operands?");
 
 8697  if (
I.isCommutative()) {
 
 8698    if (
auto Pair = matchSymmetricPair(
I.getOperand(0), 
I.getOperand(1))) {
 
 8720    return new FCmpInst(
I.getSwappedPredicate(), 
X, 
Y, 
"", &
I);
 
 8802      Type *IntTy = 
X->getType();
 
 8803      const APInt &SignMask = 
~APInt::getSignMask(IntTy->getScalarSizeInBits());
 
 8804      Value *MaskX = 
Builder.CreateAnd(
X, ConstantInt::get(IntTy, SignMask));
 
 8814    case Instruction::Select:
 
 8822    case Instruction::FSub:
 
 8827    case Instruction::PHI:
 
 8831    case Instruction::SIToFP:
 
 8832    case Instruction::UIToFP:
 
 8836    case Instruction::FDiv:
 
 8840    case Instruction::Load:
 
 8846    case Instruction::FPTrunc:
 
 8867        return new FCmpInst(
I.getSwappedPredicate(), 
X, NegC, 
"", &
I);
 
 8886          X->getType()->getScalarType()->getFltSemantics();
 
 8922        Constant *NewC = ConstantFP::get(
X->getType(), TruncC);
 
 8935    Type *IntType = 
Builder.getIntNTy(
X->getType()->getScalarSizeInBits());
 
 8948    Value *CanonLHS = 
nullptr;
 
 8951    if (CanonLHS == Op1)
 
 8952      return new FCmpInst(Pred, Op1, Op1, 
"", &
I);
 
 8954    Value *CanonRHS = 
nullptr;
 
 8957    if (CanonRHS == Op0)
 
 8958      return new FCmpInst(Pred, Op0, Op0, 
"", &
I);
 
 8961    if (CanonLHS && CanonRHS)
 
 8962      return new FCmpInst(Pred, CanonLHS, CanonRHS, 
"", &
I);
 
 8965  if (
I.getType()->isVectorTy())
 
 
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
AMDGPU Register Bank Select
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements the APSInt class, which is a simple class that represents an arbitrary sized int...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static Instruction * foldFCmpReciprocalAndZero(FCmpInst &I, Instruction *LHSI, Constant *RHSC)
Fold (C / X) < 0.0 --> X < 0.0 if possible. Swap predicate if necessary.
static Instruction * foldFabsWithFcmpZero(FCmpInst &I, InstCombinerImpl &IC)
Optimize fabs(X) compared with zero.
static void collectOffsetOp(Value *V, SmallVectorImpl< OffsetOp > &Offsets, bool AllowRecursion)
static Value * rewriteGEPAsOffset(Value *Start, Value *Base, GEPNoWrapFlags NW, const DataLayout &DL, SetVector< Value * > &Explored, InstCombiner &IC)
Returns a re-written value of Start as an indexed GEP using Base as a pointer.
static Instruction * foldICmpEqualityWithOffset(ICmpInst &I, InstCombiner::BuilderTy &Builder, const SimplifyQuery &SQ)
Offset both sides of an equality icmp to see if we can save some instructions: icmp eq/ne X,...
static bool addWithOverflow(APInt &Result, const APInt &In1, const APInt &In2, bool IsSigned=false)
Compute Result = In1+In2, returning true if the result overflowed for this type.
static Instruction * foldICmpAndXX(ICmpInst &I, const SimplifyQuery &Q, InstCombinerImpl &IC)
static Instruction * foldVectorCmp(CmpInst &Cmp, InstCombiner::BuilderTy &Builder)
static bool isMaskOrZero(const Value *V, bool Not, const SimplifyQuery &Q, unsigned Depth=0)
static Value * createLogicFromTable(const std::bitset< 4 > &Table, Value *Op0, Value *Op1, IRBuilderBase &Builder, bool HasOneUse)
static Instruction * foldICmpOfUAddOv(ICmpInst &I)
static bool isChainSelectCmpBranch(const SelectInst *SI)
Return true when the instruction sequence within a block is select-cmp-br.
static Instruction * foldICmpInvariantGroup(ICmpInst &I)
std::pair< Instruction::BinaryOps, Value * > OffsetOp
Find all possible pairs (BinOp, RHS) that BinOp V, RHS can be simplified.
static Instruction * foldReductionIdiom(ICmpInst &I, InstCombiner::BuilderTy &Builder, const DataLayout &DL)
This function folds patterns produced by lowering of reduce idioms, such as llvm.vector....
static Instruction * canonicalizeICmpBool(ICmpInst &I, InstCombiner::BuilderTy &Builder)
Integer compare with boolean values can always be turned into bitwise ops.
static Instruction * foldFCmpFSubIntoFCmp(FCmpInst &I, Instruction *LHSI, Constant *RHSC, InstCombinerImpl &CI)
static Value * foldICmpOrXorSubChain(ICmpInst &Cmp, BinaryOperator *Or, InstCombiner::BuilderTy &Builder)
Fold icmp eq/ne (or (xor/sub (X1, X2), xor/sub (X3, X4))), 0.
static bool hasBranchUse(ICmpInst &I)
Given an icmp instruction, return true if any use of this comparison is a branch on sign bit comparis...
static Value * foldICmpWithLowBitMaskedVal(CmpPredicate Pred, Value *Op0, Value *Op1, const SimplifyQuery &Q, InstCombiner &IC)
Some comparisons can be simplified.
static APInt getDemandedBitsLHSMask(ICmpInst &I, unsigned BitWidth)
When performing a comparison against a constant, it is possible that not all the bits in the LHS are ...
static Instruction * foldICmpShlLHSC(ICmpInst &Cmp, Instruction *Shl, const APInt &C)
Fold icmp (shl nuw C2, Y), C.
static Instruction * foldFCmpWithFloorAndCeil(FCmpInst &I, InstCombinerImpl &IC)
static Instruction * foldICmpXorXX(ICmpInst &I, const SimplifyQuery &Q, InstCombinerImpl &IC)
static Instruction * foldICmpOfCmpIntrinsicWithConstant(CmpPredicate Pred, IntrinsicInst *I, const APInt &C, InstCombiner::BuilderTy &Builder)
static Instruction * processUMulZExtIdiom(ICmpInst &I, Value *MulVal, const APInt *OtherVal, InstCombinerImpl &IC)
Recognize and process idiom involving test for multiplication overflow.
static Instruction * foldSqrtWithFcmpZero(FCmpInst &I, InstCombinerImpl &IC)
Optimize sqrt(X) compared with zero.
static Instruction * foldFCmpFNegCommonOp(FCmpInst &I)
static Instruction * foldICmpWithHighBitMask(ICmpInst &Cmp, InstCombiner::BuilderTy &Builder)
static ICmpInst * canonicalizeCmpWithConstant(ICmpInst &I)
If we have an icmp le or icmp ge instruction with a constant operand, turn it into the appropriate ic...
static Instruction * foldICmpIntrinsicWithIntrinsic(ICmpInst &Cmp, InstCombiner::BuilderTy &Builder)
Fold an icmp with LLVM intrinsics.
static Instruction * foldICmpUSubSatOrUAddSatWithConstant(CmpPredicate Pred, SaturatingInst *II, const APInt &C, InstCombiner::BuilderTy &Builder)
static Instruction * foldICmpPow2Test(ICmpInst &I, InstCombiner::BuilderTy &Builder)
static bool subWithOverflow(APInt &Result, const APInt &In1, const APInt &In2, bool IsSigned=false)
Compute Result = In1-In2, returning true if the result overflowed for this type.
static bool canRewriteGEPAsOffset(Value *Start, Value *Base, GEPNoWrapFlags &NW, const DataLayout &DL, SetVector< Value * > &Explored)
Returns true if we can rewrite Start as a GEP with pointer Base and some integer offset.
static Instruction * foldFCmpFpTrunc(FCmpInst &I, const Instruction &FPTrunc, const Constant &C)
static Instruction * foldICmpXNegX(ICmpInst &I, InstCombiner::BuilderTy &Builder)
static Instruction * processUGT_ADDCST_ADD(ICmpInst &I, Value *A, Value *B, ConstantInt *CI2, ConstantInt *CI1, InstCombinerImpl &IC)
The caller has matched a pattern of the form: I = icmp ugt (add (add A, B), CI2), CI1 If this is of t...
static Value * foldShiftIntoShiftInAnotherHandOfAndInICmp(ICmpInst &I, const SimplifyQuery SQ, InstCombiner::BuilderTy &Builder)
static bool isSignTest(ICmpInst::Predicate &Pred, const APInt &C)
Returns true if the exploded icmp can be expressed as a signed comparison to zero and updates the pre...
static Instruction * transformToIndexedCompare(GEPOperator *GEPLHS, Value *RHS, CmpPredicate Cond, const DataLayout &DL, InstCombiner &IC)
Converts (CMP GEPLHS, RHS) if this change would make RHS a constant.
static Instruction * foldCtpopPow2Test(ICmpInst &I, IntrinsicInst *CtpopLhs, const APInt &CRhs, InstCombiner::BuilderTy &Builder, const SimplifyQuery &Q)
static void setInsertionPoint(IRBuilder<> &Builder, Value *V, bool Before=true)
static bool isNeutralValue(Instruction::BinaryOps BinaryOp, Value *RHS, bool IsSigned)
static bool isMultipleOf(Value *X, const APInt &C, const SimplifyQuery &Q)
Return true if X is a multiple of C.
static Value * foldICmpWithTruncSignExtendedVal(ICmpInst &I, InstCombiner::BuilderTy &Builder)
Some comparisons can be simplified.
static Instruction * foldICmpOrXX(ICmpInst &I, const SimplifyQuery &Q, InstCombinerImpl &IC)
This file provides internal interfaces used to implement the InstCombine.
This file provides the interface for the instcombine pass implementation.
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
static bool isZero(Value *V, const DataLayout &DL, DominatorTree *DT, AssumptionCache *AC)
uint64_t IntrinsicInst * II
const SmallVectorImpl< MachineOperand > & Cond
static cl::opt< RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode > Mode("regalloc-enable-advisor", cl::Hidden, cl::init(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default), cl::desc("Enable regalloc advisor mode"), cl::values(clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default, "default", "Default"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Release, "release", "precompiled"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Development, "development", "for training")))
This file implements a set that has insertion order iteration characteristics.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static SymbolRef::Type getType(const Symbol *Sym)
static constexpr roundingMode rmNearestTiesToEven
static constexpr roundingMode rmTowardZero
LLVM_ABI opStatus convert(const fltSemantics &ToSemantics, roundingMode RM, bool *losesInfo)
static APFloat getSmallestNormalized(const fltSemantics &Sem, bool Negative=false)
Returns the smallest (by magnitude) normalized finite number in the given semantics.
APInt bitcastToAPInt() const
static APFloat getLargest(const fltSemantics &Sem, bool Negative=false)
Returns the largest finite number in the given semantics.
opStatus next(bool nextDown)
static APFloat getInf(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Infinity.
LLVM_ABI FPClassTest classify() const
Return the FPClassTest which will return true for the value.
opStatus roundToIntegral(roundingMode RM)
Class for arbitrary precision integers.
LLVM_ABI APInt udiv(const APInt &RHS) const
Unsigned division operation.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
static LLVM_ABI void udivrem(const APInt &LHS, const APInt &RHS, APInt &Quotient, APInt &Remainder)
Dual division/remainder interface.
bool isNegatedPowerOf2() const
Check if this APInt's negated value is a power of two greater than zero.
LLVM_ABI APInt zext(unsigned width) const
Zero extend to a new width.
static APInt getSignMask(unsigned BitWidth)
Get the SignMask for a specific bit width.
bool isMinSignedValue() const
Determine if this is the smallest signed value.
uint64_t getZExtValue() const
Get zero extended value.
unsigned getActiveBits() const
Compute the number of active bits in the value.
LLVM_ABI APInt trunc(unsigned width) const
Truncate to new width.
static APInt getMaxValue(unsigned numBits)
Gets maximum unsigned value of APInt for specific bit width.
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
APInt abs() const
Get the absolute value.
unsigned ceilLogBase2() const
bool sgt(const APInt &RHS) const
Signed greater than comparison.
bool isAllOnes() const
Determine if all bits are set. This is true for zero-width values.
LLVM_ABI APInt usub_ov(const APInt &RHS, bool &Overflow) const
bool ugt(const APInt &RHS) const
Unsigned greater than comparison.
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
bool isSignMask() const
Check if the APInt's value is returned by getSignMask.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool ult(const APInt &RHS) const
Unsigned less than comparison.
static APInt getSignedMaxValue(unsigned numBits)
Gets maximum signed value of APInt for a specific bit width.
static APInt getMinValue(unsigned numBits)
Gets minimum unsigned value of APInt for a specific bit width.
bool isNegative() const
Determine sign of this APInt.
LLVM_ABI APInt sadd_ov(const APInt &RHS, bool &Overflow) const
bool eq(const APInt &RHS) const
Equality comparison.
LLVM_ABI APInt sdiv(const APInt &RHS) const
Signed division function for APInt.
LLVM_ABI APInt uadd_ov(const APInt &RHS, bool &Overflow) const
void negate()
Negate this APInt in place.
unsigned countr_zero() const
Count the number of trailing zero bits.
unsigned countl_zero() const
The APInt version of std::countl_zero.
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
bool isStrictlyPositive() const
Determine if this APInt Value is positive.
void flipAllBits()
Toggle every bit to its opposite value.
unsigned countl_one() const
Count the number of leading one bits.
unsigned logBase2() const
uint64_t getLimitedValue(uint64_t Limit=UINT64_MAX) const
If this value is smaller than the specified limit, return it, otherwise return the limit value.
APInt ashr(unsigned ShiftAmt) const
Arithmetic right-shift function.
bool isMaxSignedValue() const
Determine if this is the largest signed value.
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
APInt shl(unsigned shiftAmt) const
Left-shift function.
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)
Constructs an APInt value that has the top hiBitsSet bits set.
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
bool sge(const APInt &RHS) const
Signed greater or equal comparison.
LLVM_ABI APInt ssub_ov(const APInt &RHS, bool &Overflow) const
bool isOne() const
Determine if this is a value of 1.
static APInt getBitsSetFrom(unsigned numBits, unsigned loBit)
Constructs an APInt value that has a contiguous range of bits set.
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
unsigned countr_one() const
Count the number of trailing one bits.
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
An arbitrary precision integer that knows its signedness.
an instruction to allocate memory on the stack
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
LLVM Basic Block Representation.
LLVM_ABI const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
LLVM_ABI const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
BinaryOps getOpcode() const
static LLVM_ABI BinaryOperator * CreateNot(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)
static LLVM_ABI BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name=Twine(), InsertPosition InsertBefore=nullptr)
Construct a binary instruction, given the opcode and the two operands.
Conditional or Unconditional Branch instruction.
Value * getArgOperand(unsigned i) const
This class represents a function call, abstracting a target machine's calling convention.
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
This class is the base class for the comparison instructions.
static Type * makeCmpResultType(Type *opnd_type)
Create a result type for fcmp/icmp.
Predicate getStrictPredicate() const
For example, SGE -> SGT, SLE -> SLT, ULE -> ULT, UGE -> UGT.
static LLVM_ABI Predicate getFlippedStrictnessPredicate(Predicate pred)
This is a static version that you can use without an instruction available.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
@ FCMP_TRUE
1 1 1 1 Always true (always folded)
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ FCMP_OLT
0 1 0 0 True if ordered and less than
@ FCMP_ULE
1 1 0 1 True if unordered, less than, or equal
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
@ ICMP_UGE
unsigned greater or equal
@ ICMP_UGT
unsigned greater than
@ ICMP_SGT
signed greater than
@ FCMP_ULT
1 1 0 0 True if unordered or less than
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
@ ICMP_ULT
unsigned less than
@ FCMP_UGT
1 0 1 0 True if unordered or greater than
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
@ ICMP_SGE
signed greater or equal
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
@ ICMP_ULE
unsigned less or equal
@ FCMP_UGE
1 0 1 1 True if unordered, greater than, or equal
@ FCMP_FALSE
0 0 0 0 Always false (always folded)
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
bool isTrueWhenEqual() const
This is just a convenience.
static LLVM_ABI CmpInst * Create(OtherOps Op, Predicate Pred, Value *S1, Value *S2, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Construct a compare instruction, given the opcode, the predicate and the two operands.
Predicate getNonStrictPredicate() const
For example, SGT -> SGE, SLT -> SLE, ULT -> ULE, UGT -> UGE.
static LLVM_ABI bool isStrictPredicate(Predicate predicate)
This is a static version that you can use without an instruction available.
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Predicate getPredicate() const
Return the predicate for this instruction.
static bool isIntPredicate(Predicate P)
An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...
static LLVM_ABI CmpPredicate getSwapped(CmpPredicate P)
Get the swapped predicate of a CmpPredicate.
static LLVM_ABI Constant * getIntToPtr(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static LLVM_ABI Constant * getPointerBitCastOrAddrSpaceCast(Constant *C, Type *Ty)
Create a BitCast or AddrSpaceCast for a pointer type depending on the address space.
static LLVM_ABI Constant * getSub(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
static LLVM_ABI Constant * getNot(Constant *C)
static LLVM_ABI Constant * getPtrToInt(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static LLVM_ABI Constant * getXor(Constant *C1, Constant *C2)
static LLVM_ABI Constant * getNeg(Constant *C, bool HasNSW=false)
static LLVM_ABI Constant * getZero(Type *Ty, bool Negative=false)
This is the shared class of boolean and integer constants.
uint64_t getLimitedValue(uint64_t Limit=~0ULL) const
getLimitedValue - If the value is smaller than the specified limit, return it, otherwise return the l...
static LLVM_ABI ConstantInt * getTrue(LLVMContext &Context)
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
static ConstantInt * getSigned(IntegerType *Ty, int64_t V)
Return a ConstantInt with the specified value for the specified type.
static LLVM_ABI ConstantInt * getFalse(LLVMContext &Context)
unsigned getBitWidth() const
getBitWidth - Return the scalar bitwidth of this constant.
const APInt & getValue() const
Return the constant as an APInt value reference.
static LLVM_ABI ConstantInt * getBool(LLVMContext &Context, bool V)
This class represents a range of values.
LLVM_ABI ConstantRange add(const ConstantRange &Other) const
Return a new range representing the possible values resulting from an addition of a value in this ran...
LLVM_ABI std::optional< ConstantRange > exactUnionWith(const ConstantRange &CR) const
Union the two ranges and return the result if it can be represented exactly, otherwise return std::nu...
LLVM_ABI bool getEquivalentICmp(CmpInst::Predicate &Pred, APInt &RHS) const
Set up Pred and RHS such that ConstantRange::makeExactICmpRegion(Pred, RHS) == *this.
LLVM_ABI ConstantRange subtract(const APInt &CI) const
Subtract the specified constant from the endpoints of this constant range.
const APInt * getSingleElement() const
If this set contains a single element, return it, otherwise return null.
LLVM_ABI ConstantRange difference(const ConstantRange &CR) const
Subtract the specified range from this range (aka relative complement of the sets).
LLVM_ABI bool isEmptySet() const
Return true if this set contains no members.
LLVM_ABI ConstantRange truncate(uint32_t BitWidth, unsigned NoWrapKind=0) const
Return a new range in the specified integer type, which must be strictly smaller than the current typ...
static LLVM_ABI ConstantRange makeExactICmpRegion(CmpInst::Predicate Pred, const APInt &Other)
Produce the exact range such that all values in the returned range satisfy the given predicate with a...
LLVM_ABI ConstantRange inverse() const
Return a new range that is the logical not of the current set.
LLVM_ABI std::optional< ConstantRange > exactIntersectWith(const ConstantRange &CR) const
Intersect the two ranges and return the result if it can be represented exactly, otherwise return std...
LLVM_ABI ConstantRange intersectWith(const ConstantRange &CR, PreferredRangeType Type=Smallest) const
Return the range that results from the intersection of this range with another range.
static ConstantRange getNonEmpty(APInt Lower, APInt Upper)
Create non-empty constant range with the given bounds.
LLVM_ABI ConstantRange sub(const ConstantRange &Other) const
Return a new range representing the possible values resulting from a subtraction of a value in this r...
static LLVM_ABI ConstantRange makeExactNoWrapRegion(Instruction::BinaryOps BinOp, const APInt &Other, unsigned NoWrapKind)
Produce the range that contains X if and only if "X BinOp Other" does not wrap.
static LLVM_ABI Constant * getSplat(ElementCount EC, Constant *Elt)
Return a ConstantVector with the specified constant in each element.
This is an important base class in LLVM.
static LLVM_ABI Constant * getIntegerValue(Type *Ty, const APInt &V)
Return the value for an integer or pointer constant, or a vector thereof, with the given scalar value...
static LLVM_ABI Constant * getAllOnesValue(Type *Ty)
LLVM_ABI const APInt & getUniqueInteger() const
If C is a constant integer then return its value, otherwise C must be a vector of constant integers,...
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
LLVM_ABI bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
A parsed version of the target data layout string in and methods for querying it.
iterator find(const_arg_type_t< KeyT > Val)
bool contains(const_arg_type_t< KeyT > Val) const
Return true if the specified key is in the map, false otherwise.
This instruction compares its operands according to the predicate given to the constructor.
static bool isEquality(Predicate Pred)
Represents flags for the getelementptr instruction/expression.
bool hasNoUnsignedSignedWrap() const
bool hasNoUnsignedWrap() const
GEPNoWrapFlags intersectForOffsetAdd(GEPNoWrapFlags Other) const
Given (gep (gep p, x), y), determine the nowrap flags for (gep p, x+y).
static GEPNoWrapFlags none()
bool isInBounds() const
Test whether this is an inbounds GEP, as defined by LangRef.html.
LLVM_ABI Type * getSourceElementType() const
Value * getPointerOperand()
GEPNoWrapFlags getNoWrapFlags() const
bool hasAllConstantIndices() const
Return true if all of the indices of this GEP are constant integers.
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
This instruction compares its operands according to the predicate given to the constructor.
static bool isGE(Predicate P)
Return true if the predicate is SGE or UGE.
static LLVM_ABI bool compare(const APInt &LHS, const APInt &RHS, ICmpInst::Predicate Pred)
Return result of LHS Pred RHS comparison.
static bool isLT(Predicate P)
Return true if the predicate is SLT or ULT.
static bool isGT(Predicate P)
Return true if the predicate is SGT or UGT.
Predicate getFlippedSignednessPredicate() const
For example, SLT->ULT, ULT->SLT, SLE->ULE, ULE->SLE, EQ->EQ.
Predicate getSignedPredicate() const
For example, EQ->EQ, SLE->SLE, UGT->SGT, etc.
bool isEquality() const
Return true if this predicate is either EQ or NE.
static bool isEquality(Predicate P)
Return true if this predicate is either EQ or NE.
bool isRelational() const
Return true if the predicate is relational (not EQ or NE).
Predicate getUnsignedPredicate() const
For example, EQ->EQ, SLE->ULE, UGT->UGT, etc.
static bool isLE(Predicate P)
Return true if the predicate is SLE or ULE.
Common base class shared among various IRBuilders.
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Value * CreateICmp(CmpInst::Predicate P, Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="", bool IsDisjoint=false)
ConstantInt * getInt(const APInt &AI)
Get a constant integer value.
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Instruction * foldICmpShrConstant(ICmpInst &Cmp, BinaryOperator *Shr, const APInt &C)
Fold icmp ({al}shr X, Y), C.
Instruction * foldICmpWithZextOrSext(ICmpInst &ICmp)
Instruction * foldICmpSelectConstant(ICmpInst &Cmp, SelectInst *Select, ConstantInt *C)
Instruction * foldICmpSRemConstant(ICmpInst &Cmp, BinaryOperator *UDiv, const APInt &C)
Instruction * foldICmpBinOpWithConstant(ICmpInst &Cmp, BinaryOperator *BO, const APInt &C)
Fold an icmp with BinaryOp and constant operand: icmp Pred BO, C.
Instruction * foldICmpOrConstant(ICmpInst &Cmp, BinaryOperator *Or, const APInt &C)
Fold icmp (or X, Y), C.
Instruction * foldICmpTruncWithTruncOrExt(ICmpInst &Cmp, const SimplifyQuery &Q)
Fold icmp (trunc nuw/nsw X), (trunc nuw/nsw Y).
Instruction * foldSignBitTest(ICmpInst &I)
Fold equality-comparison between zero and any (maybe truncated) right-shift by one-less-than-bitwidth...
Instruction * foldOpIntoPhi(Instruction &I, PHINode *PN, bool AllowMultipleUses=false)
Given a binary operator, cast instruction, or select which has a PHI node as operand #0,...
Value * insertRangeTest(Value *V, const APInt &Lo, const APInt &Hi, bool isSigned, bool Inside)
Emit a computation of: (V >= Lo && V < Hi) if Inside is true, otherwise (V < Lo || V >= Hi).
Instruction * foldICmpBinOp(ICmpInst &Cmp, const SimplifyQuery &SQ)
Try to fold icmp (binop), X or icmp X, (binop).
Instruction * foldCmpLoadFromIndexedGlobal(LoadInst *LI, GetElementPtrInst *GEP, CmpInst &ICI, ConstantInt *AndCst=nullptr)
This is called when we see this pattern: cmp pred (load (gep GV, ...)), cmpcst where GV is a global v...
Instruction * foldICmpSubConstant(ICmpInst &Cmp, BinaryOperator *Sub, const APInt &C)
Fold icmp (sub X, Y), C.
Instruction * foldICmpWithClamp(ICmpInst &Cmp, Value *X, MinMaxIntrinsic *Min)
Match and fold patterns like: icmp eq/ne X, min(max(X, Lo), Hi) which represents a range check and ca...
Instruction * foldICmpInstWithConstantNotInt(ICmpInst &Cmp)
Handle icmp with constant (but not simple integer constant) RHS.
bool SimplifyDemandedBits(Instruction *I, unsigned Op, const APInt &DemandedMask, KnownBits &Known, const SimplifyQuery &Q, unsigned Depth=0) override
This form of SimplifyDemandedBits simplifies the specified instruction operand if possible,...
Instruction * foldICmpShlConstConst(ICmpInst &I, Value *ShAmt, const APInt &C1, const APInt &C2)
Handle "(icmp eq/ne (shl AP2, A), AP1)" -> (icmp eq/ne A, TrailingZeros(AP1) - TrailingZeros(AP2)).
Value * reassociateShiftAmtsOfTwoSameDirectionShifts(BinaryOperator *Sh0, const SimplifyQuery &SQ, bool AnalyzeForSignBitExtraction=false)
Instruction * foldICmpEqIntrinsicWithConstant(ICmpInst &ICI, IntrinsicInst *II, const APInt &C)
Fold an equality icmp with LLVM intrinsic and constant operand.
Instruction * FoldOpIntoSelect(Instruction &Op, SelectInst *SI, bool FoldWithMultiUse=false, bool SimplifyBothArms=false)
Given an instruction with a select as one operand and a constant as the other operand,...
Value * foldMultiplicationOverflowCheck(ICmpInst &Cmp)
Fold (-1 u/ x) u< y ((x * y) ?
Instruction * foldICmpWithConstant(ICmpInst &Cmp)
Fold icmp Pred X, C.
CmpInst * canonicalizeICmpPredicate(CmpInst &I)
If we have a comparison with a non-canonical predicate, if we can update all the users,...
Instruction * eraseInstFromFunction(Instruction &I) override
Combiner aware instruction erasure.
Instruction * foldICmpWithZero(ICmpInst &Cmp)
Instruction * foldICmpCommutative(CmpPredicate Pred, Value *Op0, Value *Op1, ICmpInst &CxtI)
Instruction * foldICmpBinOpEqualityWithConstant(ICmpInst &Cmp, BinaryOperator *BO, const APInt &C)
Fold an icmp equality instruction with binary operator LHS and constant RHS: icmp eq/ne BO,...
Instruction * foldICmpUsingBoolRange(ICmpInst &I)
If one operand of an icmp is effectively a bool (value range of {0,1}), then try to reduce patterns b...
Instruction * foldICmpWithTrunc(ICmpInst &Cmp)
Instruction * foldICmpIntrinsicWithConstant(ICmpInst &ICI, IntrinsicInst *II, const APInt &C)
Fold an icmp with LLVM intrinsic and constant operand: icmp Pred II, C.
bool matchThreeWayIntCompare(SelectInst *SI, Value *&LHS, Value *&RHS, ConstantInt *&Less, ConstantInt *&Equal, ConstantInt *&Greater)
Match a select chain which produces one of three values based on whether the LHS is less than,...
Instruction * visitFCmpInst(FCmpInst &I)
Instruction * foldICmpUsingKnownBits(ICmpInst &Cmp)
Try to fold the comparison based on range information we can get by checking whether bits are known t...
Instruction * foldICmpDivConstant(ICmpInst &Cmp, BinaryOperator *Div, const APInt &C)
Fold icmp ({su}div X, Y), C.
Instruction * foldIRemByPowerOfTwoToBitTest(ICmpInst &I)
If we have: icmp eq/ne (urem/srem x, y), 0 iff y is a power-of-two, we can replace this with a bit te...
Instruction * foldFCmpIntToFPConst(FCmpInst &I, Instruction *LHSI, Constant *RHSC)
Fold fcmp ([us]itofp x, cst) if possible.
Instruction * foldICmpUDivConstant(ICmpInst &Cmp, BinaryOperator *UDiv, const APInt &C)
Fold icmp (udiv X, Y), C.
Instruction * foldICmpAddOpConst(Value *X, const APInt &C, CmpPredicate Pred)
Fold "icmp pred (X+C), X".
Instruction * foldICmpWithCastOp(ICmpInst &ICmp)
Handle icmp (cast x), (cast or constant).
Instruction * foldICmpTruncConstant(ICmpInst &Cmp, TruncInst *Trunc, const APInt &C)
Fold icmp (trunc X), C.
Instruction * foldICmpAddConstant(ICmpInst &Cmp, BinaryOperator *Add, const APInt &C)
Fold icmp (add X, Y), C.
Instruction * foldICmpMulConstant(ICmpInst &Cmp, BinaryOperator *Mul, const APInt &C)
Fold icmp (mul X, Y), C.
Instruction * tryFoldInstWithCtpopWithNot(Instruction *I)
Instruction * foldICmpXorConstant(ICmpInst &Cmp, BinaryOperator *Xor, const APInt &C)
Fold icmp (xor X, Y), C.
Instruction * foldSelectICmp(CmpPredicate Pred, SelectInst *SI, Value *RHS, const ICmpInst &I)
Instruction * foldICmpInstWithConstantAllowPoison(ICmpInst &Cmp, const APInt &C)
Try to fold integer comparisons with a constant operand: icmp Pred X, C where X is some kind of instr...
Instruction * foldIsMultipleOfAPowerOfTwo(ICmpInst &Cmp)
Fold icmp eq (num + mask) & ~mask, num to icmp eq (and num, mask), 0 Where mask is a low bit mask.
Instruction * foldICmpAndShift(ICmpInst &Cmp, BinaryOperator *And, const APInt &C1, const APInt &C2)
Fold icmp (and (sh X, Y), C2), C1.
Instruction * foldICmpBinOpWithConstantViaTruthTable(ICmpInst &Cmp, BinaryOperator *BO, const APInt &C)
Instruction * foldICmpInstWithConstant(ICmpInst &Cmp)
Try to fold integer comparisons with a constant operand: icmp Pred X, C where X is some kind of instr...
Instruction * foldICmpXorShiftConst(ICmpInst &Cmp, BinaryOperator *Xor, const APInt &C)
For power-of-2 C: ((X s>> ShiftC) ^ X) u< C --> (X + C) u< (C << 1) ((X s>> ShiftC) ^ X) u> (C - 1) -...
Instruction * foldICmpShlConstant(ICmpInst &Cmp, BinaryOperator *Shl, const APInt &C)
Fold icmp (shl X, Y), C.
Instruction * foldICmpAndConstant(ICmpInst &Cmp, BinaryOperator *And, const APInt &C)
Fold icmp (and X, Y), C.
Instruction * foldICmpEquality(ICmpInst &Cmp)
Instruction * foldICmpWithMinMax(Instruction &I, MinMaxIntrinsic *MinMax, Value *Z, CmpPredicate Pred)
Fold icmp Pred min|max(X, Y), Z.
bool dominatesAllUses(const Instruction *DI, const Instruction *UI, const BasicBlock *DB) const
True when DB dominates all uses of DI except UI.
bool foldAllocaCmp(AllocaInst *Alloca)
Instruction * visitICmpInst(ICmpInst &I)
OverflowResult computeOverflow(Instruction::BinaryOps BinaryOp, bool IsSigned, Value *LHS, Value *RHS, Instruction *CxtI) const
Instruction * foldICmpWithDominatingICmp(ICmpInst &Cmp)
Canonicalize icmp instructions based on dominating conditions.
bool replacedSelectWithOperand(SelectInst *SI, const ICmpInst *Icmp, const unsigned SIOpd)
Try to replace select with select operand SIOpd in SI-ICmp sequence.
Instruction * foldICmpShrConstConst(ICmpInst &I, Value *ShAmt, const APInt &C1, const APInt &C2)
Handle "(icmp eq/ne (ashr/lshr AP2, A), AP1)" -> (icmp eq/ne A, Log2(AP2/AP1)) -> (icmp eq/ne A,...
void freelyInvertAllUsersOf(Value *V, Value *IgnoredUser=nullptr)
Freely adapt every user of V as-if V was changed to !V.
Instruction * foldICmpAndConstConst(ICmpInst &Cmp, BinaryOperator *And, const APInt &C1)
Fold icmp (and X, C2), C1.
Instruction * foldICmpBitCast(ICmpInst &Cmp)
Instruction * foldGEPICmp(GEPOperator *GEPLHS, Value *RHS, CmpPredicate Cond, Instruction &I)
Fold comparisons between a GEP instruction and something else.
The core instruction combiner logic.
OverflowResult computeOverflowForSignedSub(const Value *LHS, const Value *RHS, const Instruction *CxtI) const
unsigned ComputeMaxSignificantBits(const Value *Op, const Instruction *CxtI=nullptr, unsigned Depth=0) const
IRBuilder< TargetFolder, IRBuilderCallbackInserter > BuilderTy
An IRBuilder that automatically inserts new instructions into the worklist.
bool isFreeToInvert(Value *V, bool WillInvertAllUses, bool &DoesConsume)
Return true if the specified value is free to invert (apply ~ to).
OverflowResult computeOverflowForUnsignedMul(const Value *LHS, const Value *RHS, const Instruction *CxtI, bool IsNSW=false) const
static unsigned getComplexity(Value *V)
Assign a complexity or rank value to LLVM Values.
Instruction * replaceInstUsesWith(Instruction &I, Value *V)
A combiner-aware RAUW-like routine.
uint64_t MaxArraySizeForCombine
Maximum size of array considered when transforming.
OverflowResult computeOverflowForSignedAdd(const WithCache< const Value * > &LHS, const WithCache< const Value * > &RHS, const Instruction *CxtI) const
static Constant * SubOne(Constant *C)
Subtract one from a Constant.
OverflowResult computeOverflowForUnsignedSub(const Value *LHS, const Value *RHS, const Instruction *CxtI) const
static bool isCanonicalPredicate(CmpPredicate Pred)
Predicate canonicalization reduces the number of patterns that need to be matched by other transforms...
void computeKnownBits(const Value *V, KnownBits &Known, const Instruction *CxtI, unsigned Depth=0) const
bool canFreelyInvertAllUsersOf(Instruction *V, Value *IgnoredUser)
Given i1 V, can every user of V be freely adapted if V is changed to !V ?
void addToWorklist(Instruction *I)
Instruction * replaceOperand(Instruction &I, unsigned OpNum, Value *V)
Replace operand of instruction and add old operand to the worklist.
OverflowResult computeOverflowForSignedMul(const Value *LHS, const Value *RHS, const Instruction *CxtI) const
OverflowResult computeOverflowForUnsignedAdd(const WithCache< const Value * > &LHS, const WithCache< const Value * > &RHS, const Instruction *CxtI) const
Value * getFreelyInverted(Value *V, bool WillInvertAllUses, BuilderTy *Builder, bool &DoesConsume)
const SimplifyQuery & getSimplifyQuery() const
bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero=false, const Instruction *CxtI=nullptr, unsigned Depth=0)
LLVM_ABI bool hasNoNaNs() const LLVM_READONLY
Determine whether the no-NaNs flag is set.
LLVM_ABI bool hasNoUnsignedWrap() const LLVM_READONLY
Determine whether the no unsigned wrap flag is set.
LLVM_ABI bool hasNoInfs() const LLVM_READONLY
Determine whether the no-infs flag is set.
bool isArithmeticShift() const
Return true if this is an arithmetic shift right.
LLVM_ABI bool hasNoSignedWrap() const LLVM_READONLY
Determine whether the no signed wrap flag is set.
LLVM_ABI bool isCommutative() const LLVM_READONLY
Return true if the instruction is commutative:
LLVM_ABI bool isExact() const LLVM_READONLY
Determine whether the exact flag is set.
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
A wrapper class for inspecting calls to intrinsic functions.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
An instruction for reading from memory.
bool isVolatile() const
Return true if this is a load from a volatile memory location.
This class represents min/max intrinsics.
static bool isMin(Intrinsic::ID ID)
Whether the intrinsic is a smin or umin.
static bool isSigned(Intrinsic::ID ID)
Whether the intrinsic is signed or unsigned.
A Module instance is used to store all the information related to an LLVM module.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
Represents a saturating add/sub intrinsic.
This class represents the LLVM 'select' instruction.
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", InsertPosition InsertBefore=nullptr, const Instruction *MDFrom=nullptr)
A vector that has set insertion semantics.
size_type size() const
Determine the number of elements in the SetVector.
bool insert(const value_type &X)
Insert a new element into the SetVector.
bool contains(const key_type &key) const
Check if the SetVector contains the given key.
This instruction constructs a fixed permutation of two input vectors.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
reverse_iterator rbegin()
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
This class represents a truncation of integer types.
bool hasNoSignedWrap() const
Test whether this operation is known to never undergo signed overflow, aka the nsw property.
bool hasNoUnsignedWrap() const
Test whether this operation is known to never undergo unsigned overflow, aka the nuw property.
The instances of the Type class are immutable: once they are created, they are never changed.
bool isVectorTy() const
True if this is an instance of VectorType.
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
bool isPointerTy() const
True if this is an instance of PointerType.
bool isPPC_FP128Ty() const
Return true if this is powerpc long double.
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
LLVM_ABI Type * getWithNewBitWidth(unsigned NewBitWidth) const
Given an integer or vector type, change the lane bitwidth to NewBitwidth, whilst keeping the old numb...
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
static LLVM_ABI IntegerType * getInt1Ty(LLVMContext &C)
LLVM_ABI int getFPMantissaWidth() const
Return the width of the mantissa of this type.
LLVM_ABI const fltSemantics & getFltSemantics() const
A Use represents the edge between a Value definition and its users.
void setOperand(unsigned i, Value *Val)
Value * getOperand(unsigned i) const
unsigned getNumOperands() const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
bool hasOneUse() const
Return true if there is exactly one use of this value.
iterator_range< user_iterator > users()
LLVM_ABI bool hasNUsesOrMore(unsigned N) const
Return true if this value has N uses or more.
LLVM_ABI const Value * stripAndAccumulateConstantOffsets(const DataLayout &DL, APInt &Offset, bool AllowNonInbounds, bool AllowInvariantGroup=false, function_ref< bool(Value &Value, APInt &Offset)> ExternalAnalysis=nullptr, bool LookThroughIntToPtr=false) const
Accumulate the constant offset this value has compared to a base pointer.
LLVM_ABI const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
LLVM_ABI LLVMContext & getContext() const
All values hold a context through their type.
iterator_range< use_iterator > uses()
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
LLVM_ABI void takeName(Value *V)
Transfer the name from V to this value.
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
constexpr ScalarTy getFixedValue() const
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
const ParentTy * getParent() const
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
LLVM_ABI APInt RoundingUDiv(const APInt &A, const APInt &B, APInt::Rounding RM)
Return A unsign-divided by B, rounded by the given rounding mode.
LLVM_ABI APInt RoundingSDiv(const APInt &A, const APInt &B, APInt::Rounding RM)
Return A sign-divided by B, rounded by the given rounding mode.
@ C
The default llvm calling convention, compatible with C.
LLVM_ABI Function * getOrInsertDeclaration(Module *M, ID id, ArrayRef< Type * > Tys={})
Look up the Function declaration of the intrinsic id in the Module M.
SpecificConstantMatch m_ZeroInt()
Convenience matchers for specific integer values.
BinaryOp_match< SpecificConstantMatch, SrcTy, TargetOpcode::G_SUB > m_Neg(const SrcTy &&Src)
Matches a register negated by a G_SUB.
BinaryOp_match< SrcTy, SpecificConstantMatch, TargetOpcode::G_XOR, true > m_Not(const SrcTy &&Src)
Matches a register not-ed by a G_XOR.
OneUse_match< SubPat > m_OneUse(const SubPat &SP)
cst_pred_ty< is_all_ones > m_AllOnes()
Match an integer or vector with all bits set.
cst_pred_ty< is_lowbit_mask > m_LowBitMask()
Match an integer or vector with only the low bit(s) set.
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
cst_pred_ty< is_negative > m_Negative()
Match an integer or vector of negative values.
BinaryOp_match< LHS, RHS, Instruction::Add > m_Add(const LHS &L, const RHS &R)
class_match< BinaryOperator > m_BinOp()
Match an arbitrary binary operation and ignore it.
cst_pred_ty< is_sign_mask > m_SignMask()
Match an integer or vector with only the sign bit(s) set.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWAdd(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::AShr > m_AShr(const LHS &L, const RHS &R)
cst_pred_ty< is_power2 > m_Power2()
Match an integer or vector power-of-2.
BinaryOp_match< LHS, RHS, Instruction::URem > m_URem(const LHS &L, const RHS &R)
match_combine_or< CastInst_match< OpTy, TruncInst >, OpTy > m_TruncOrSelf(const OpTy &Op)
class_match< Constant > m_Constant()
Match an arbitrary Constant and ignore it.
ap_match< APInt > m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
BinaryOp_match< LHS, RHS, Instruction::And, true > m_c_And(const LHS &L, const RHS &R)
Matches an And with LHS and RHS in either order.
CastInst_match< OpTy, TruncInst > m_Trunc(const OpTy &Op)
Matches Trunc.
BinaryOp_match< LHS, RHS, Instruction::Xor > m_Xor(const LHS &L, const RHS &R)
ap_match< APInt > m_APIntAllowPoison(const APInt *&Res)
Match APInt while allowing poison in splat vector constants.
specific_intval< false > m_SpecificInt(const APInt &V)
Match a specific integer value or vector with all elements equal to the value.
match_combine_or< CastInst_match< OpTy, ZExtInst >, OpTy > m_ZExtOrSelf(const OpTy &Op)
bool match(Val *V, const Pattern &P)
BinOpPred_match< LHS, RHS, is_idiv_op > m_IDiv(const LHS &L, const RHS &R)
Matches integer division operations.
bind_ty< Instruction > m_Instruction(Instruction *&I)
Match an instruction, capturing it if we match.
cstfp_pred_ty< is_any_zero_fp > m_AnyZeroFP()
Match a floating-point negative zero or positive zero.
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
BinOpPred_match< LHS, RHS, is_right_shift_op > m_Shr(const LHS &L, const RHS &R)
Matches logical shift operations.
specific_intval< true > m_SpecificIntAllowPoison(const APInt &V)
ap_match< APFloat > m_APFloat(const APFloat *&Res)
Match a ConstantFP or splatted ConstantVector, binding the specified pointer to the contained APFloat...
CmpClass_match< LHS, RHS, ICmpInst, true > m_c_ICmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
Matches an ICmp with a predicate over LHS and RHS in either order.
OverflowingBinaryOp_match< cst_pred_ty< is_zero_int >, ValTy, Instruction::Sub, OverflowingBinaryOperator::NoSignedWrap > m_NSWNeg(const ValTy &V)
Matches a 'Neg' as 'sub nsw 0, V'.
cst_pred_ty< is_nonnegative > m_NonNegative()
Match an integer or vector of non-negative values.
class_match< ConstantInt > m_ConstantInt()
Match an arbitrary ConstantInt and ignore it.
cst_pred_ty< is_one > m_One()
Match an integer 1 or a vector with all elements equal to 1.
IntrinsicID_match m_Intrinsic()
Match intrinsic calls like this: m_Intrinsic<Intrinsic::fabs>(m_Value(X))
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
ExtractValue_match< Ind, Val_t > m_ExtractValue(const Val_t &V)
Match a single index ExtractValue instruction.
BinOpPred_match< LHS, RHS, is_logical_shift_op > m_LogicalShift(const LHS &L, const RHS &R)
Matches logical shift operations.
match_combine_and< LTy, RTy > m_CombineAnd(const LTy &L, const RTy &R)
Combine two pattern matchers matching L && R.
MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty > m_SMin(const LHS &L, const RHS &R)
m_Intrinsic_Ty< Opnd0 >::Ty m_Sqrt(const Opnd0 &Op0)
BinaryOp_match< LHS, RHS, Instruction::Xor, true > m_c_Xor(const LHS &L, const RHS &R)
Matches an Xor with LHS and RHS in either order.
BinaryOp_match< LHS, RHS, Instruction::FAdd > m_FAdd(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::Mul > m_Mul(const LHS &L, const RHS &R)
deferredval_ty< Value > m_Deferred(Value *const &V)
Like m_Specific(), but works if the specific value to match is determined as part of the same match()...
NoWrapTrunc_match< OpTy, TruncInst::NoSignedWrap > m_NSWTrunc(const OpTy &Op)
Matches trunc nsw.
TwoOps_match< V1_t, V2_t, Instruction::ShuffleVector > m_Shuffle(const V1_t &v1, const V2_t &v2)
Matches ShuffleVectorInst independently of mask value.
ThreeOps_match< decltype(m_Value()), LHS, RHS, Instruction::Select, true > m_c_Select(const LHS &L, const RHS &R)
Match Select(C, LHS, RHS) or Select(C, RHS, LHS)
CastInst_match< OpTy, FPExtInst > m_FPExt(const OpTy &Op)
CastInst_match< OpTy, ZExtInst > m_ZExt(const OpTy &Op)
Matches ZExt.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Shl, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWShl(const LHS &L, const RHS &R)
OverflowingBinaryOp_match< LHS, RHS, Instruction::Mul, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWMul(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::UDiv > m_UDiv(const LHS &L, const RHS &R)
MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty > m_UMax(const LHS &L, const RHS &R)
match_immconstant_ty m_ImmConstant()
Match an arbitrary immediate Constant and ignore it.
cst_pred_ty< is_negated_power2_or_zero > m_NegatedPower2OrZero()
Match a integer or vector negated power-of-2.
NoWrapTrunc_match< OpTy, TruncInst::NoUnsignedWrap > m_NUWTrunc(const OpTy &Op)
Matches trunc nuw.
cst_pred_ty< custom_checkfn< APInt > > m_CheckedInt(function_ref< bool(const APInt &)> CheckFn)
Match an integer or vector where CheckFn(ele) for each element is true.
cst_pred_ty< is_lowbit_mask_or_zero > m_LowBitMaskOrZero()
Match an integer or vector with only the low bit(s) set.
BinaryOp_match< LHS, RHS, Instruction::Add, true > m_c_Add(const LHS &L, const RHS &R)
Matches a Add with LHS and RHS in either order.
match_combine_or< BinaryOp_match< LHS, RHS, Instruction::Add >, DisjointOr_match< LHS, RHS > > m_AddLike(const LHS &L, const RHS &R)
Match either "add" or "or disjoint".
CastInst_match< OpTy, UIToFPInst > m_UIToFP(const OpTy &Op)
CastOperator_match< OpTy, Instruction::BitCast > m_BitCast(const OpTy &Op)
Matches BitCast.
BinaryOp_match< LHS, RHS, Instruction::SDiv > m_SDiv(const LHS &L, const RHS &R)
MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty > m_SMax(const LHS &L, const RHS &R)
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
Signum_match< Val_t > m_Signum(const Val_t &V)
Matches a signum pattern.
CastInst_match< OpTy, SIToFPInst > m_SIToFP(const OpTy &Op)
BinaryOp_match< LHS, RHS, Instruction::LShr > m_LShr(const LHS &L, const RHS &R)
CmpClass_match< LHS, RHS, ICmpInst > m_ICmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
match_combine_or< CastInst_match< OpTy, ZExtInst >, CastInst_match< OpTy, SExtInst > > m_ZExtOrSExt(const OpTy &Op)
FNeg_match< OpTy > m_FNeg(const OpTy &X)
Match 'fneg X' as 'fsub -0.0, X'.
cstfp_pred_ty< is_pos_zero_fp > m_PosZeroFP()
Match a floating-point positive zero.
BinaryOp_match< LHS, RHS, Instruction::Shl > m_Shl(const LHS &L, const RHS &R)
UAddWithOverflow_match< LHS_t, RHS_t, Sum_t > m_UAddWithOverflow(const LHS_t &L, const RHS_t &R, const Sum_t &S)
Match an icmp instruction checking for unsigned overflow on addition.
m_Intrinsic_Ty< Opnd0 >::Ty m_VecReverse(const Opnd0 &Op0)
BinOpPred_match< LHS, RHS, is_irem_op > m_IRem(const LHS &L, const RHS &R)
Matches integer remainder operations.
match_combine_or< match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty >, MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty > >, match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty >, MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty > > > m_MaxOrMin(const LHS &L, const RHS &R)
CastInst_match< OpTy, FPTruncInst > m_FPTrunc(const OpTy &Op)
auto m_Undef()
Match an arbitrary undef constant.
BinaryOp_match< LHS, RHS, Instruction::Or > m_Or(const LHS &L, const RHS &R)
CastInst_match< OpTy, SExtInst > m_SExt(const OpTy &Op)
Matches SExt.
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
BinaryOp_match< LHS, RHS, Instruction::Or, true > m_c_Or(const LHS &L, const RHS &R)
Matches an Or with LHS and RHS in either order.
ElementWiseBitCast_match< OpTy > m_ElementWiseBitCast(const OpTy &Op)
m_Intrinsic_Ty< Opnd0 >::Ty m_FAbs(const Opnd0 &Op0)
BinaryOp_match< LHS, RHS, Instruction::Mul, true > m_c_Mul(const LHS &L, const RHS &R)
Matches a Mul with LHS and RHS in either order.
CastOperator_match< OpTy, Instruction::PtrToInt > m_PtrToInt(const OpTy &Op)
Matches PtrToInt.
BinaryOp_match< LHS, RHS, Instruction::Sub > m_Sub(const LHS &L, const RHS &R)
match_unless< Ty > m_Unless(const Ty &M)
Match if the inner matcher does NOT match.
match_combine_or< LTy, RTy > m_CombineOr(const LTy &L, const RTy &R)
Combine two pattern matchers matching L || R.
cst_pred_ty< icmp_pred_with_threshold > m_SpecificInt_ICMP(ICmpInst::Predicate Predicate, const APInt &Threshold)
Match an integer or vector with every element comparing 'pred' (eg/ne/...) to Threshold.
This is an optimization pass for GlobalISel generic memory operations.
detail::zippy< detail::zip_shortest, T, U, Args... > zip(T &&t, U &&u, Args &&...args)
zip iterator for two or more iteratable types.
@ NeverOverflows
Never overflows.
@ AlwaysOverflowsHigh
Always overflows in the direction of signed/unsigned max value.
@ AlwaysOverflowsLow
Always overflows in the direction of signed/unsigned min value.
@ MayOverflow
May or may not overflow.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI bool isKnownNeverInfinity(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Return true if the floating-point scalar value is not an infinity or if the floating-point vector val...
LLVM_ABI bool isSignBitCheck(ICmpInst::Predicate Pred, const APInt &RHS, bool &TrueIfSigned)
Given an exploded icmp instruction, return true if the comparison only checks the sign bit.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
LLVM_ABI Value * stripNullTest(Value *V)
Returns the inner value X if the expression has the form f(X) where f(X) == 0 if and only if X == 0,...
LLVM_ABI Constant * ConstantFoldCompareInstOperands(unsigned Predicate, Constant *LHS, Constant *RHS, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const Instruction *I=nullptr)
Attempt to constant fold a compare instruction (icmp/fcmp) with the specified operands.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
LLVM_ABI Value * simplifyFCmpInst(CmpPredicate Predicate, Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q)
Given operands for an FCmpInst, fold the result or return null.
int ilogb(const APFloat &Arg)
Returns the exponent of the internal representation of the APFloat.
LLVM_ABI ConstantRange computeConstantRange(const Value *V, bool ForSigned, bool UseInstrInfo=true, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Determine the possible constant range of an integer or vector of integer value.
LLVM_ABI bool MaskedValueIsZero(const Value *V, const APInt &Mask, const SimplifyQuery &SQ, unsigned Depth=0)
Return true if 'V & Mask' is known to be zero.
LLVM_ABI Value * simplifyAddInst(Value *LHS, Value *RHS, bool IsNSW, bool IsNUW, const SimplifyQuery &Q)
Given operands for an Add, fold the result or return null.
LLVM_ABI Constant * ConstantFoldConstant(const Constant *C, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldConstant - Fold the constant using the specified DataLayout.
auto dyn_cast_or_null(const Y &Val)
LLVM_ABI bool isSplatValue(const Value *V, int Index=-1, unsigned Depth=0)
Return true if each element of the vector value V is poisoned or equal to every other non-poisoned el...
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
int countl_zero(T Val)
Count number of 0's from the most significant bit to the least stopping at the first 1.
LLVM_ABI Value * emitGEPOffset(IRBuilderBase *Builder, const DataLayout &DL, User *GEP, bool NoAssumptions=false)
Given a getelementptr instruction/constantexpr, emit the code necessary to compute the offset from th...
constexpr unsigned MaxAnalysisRecursionDepth
LLVM_ABI Constant * ConstantFoldUnaryOpOperand(unsigned Opcode, Constant *Op, const DataLayout &DL)
Attempt to constant fold a unary operation with the specified operand.
LLVM_ABI bool isKnownNegative(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Returns true if the given value is known be negative (i.e.
SelectPatternFlavor
Specific patterns of select instructions we can match.
LLVM_ABI bool impliesPoison(const Value *ValAssumedPoison, const Value *V)
Return true if V is poison given that ValAssumedPoison is already poison.
LLVM_ABI LinearExpression decomposeLinearExpression(const DataLayout &DL, Value *Ptr)
Decompose a pointer into a linear expression.
LLVM_ABI bool isFinite(const Loop *L)
Return true if this loop can be assumed to run for a finite number of iterations.
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
APFloat scalbn(APFloat X, int Exp, APFloat::roundingMode RM)
Returns: X * 2^Exp for integral exponents.
LLVM_ABI void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
LLVM_ABI SelectPatternResult matchSelectPattern(Value *V, Value *&LHS, Value *&RHS, Instruction::CastOps *CastOp=nullptr, unsigned Depth=0)
Pattern match integer [SU]MIN, [SU]MAX and ABS idioms, returning the kind and providing the out param...
LLVM_ABI bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI Value * simplifyICmpInst(CmpPredicate Pred, Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for an ICmpInst, fold the result or return null.
LLVM_ABI Constant * ConstantFoldCastOperand(unsigned Opcode, Constant *C, Type *DestTy, const DataLayout &DL)
Attempt to constant fold a cast with the specified operand.
LLVM_ABI Constant * ConstantFoldLoadFromConst(Constant *C, Type *Ty, const APInt &Offset, const DataLayout &DL)
Extract value of C at the given Offset reinterpreted as Ty.
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
LLVM_ABI Constant * ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS, Constant *RHS, const DataLayout &DL)
Attempt to constant fold a binary operation with the specified operands.
LLVM_ABI bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
constexpr T divideCeil(U Numerator, V Denominator)
Returns the integer ceil(Numerator / Denominator).
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
LLVM_ABI Value * simplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for a BinaryOperator, fold the result or return null.
@ UMin
Unsigned integer min implemented in terms of select(cmp()).
@ Mul
Product of integers.
@ Xor
Bitwise or logical XOR of integers.
@ SMax
Signed integer max implemented in terms of select(cmp()).
@ SMin
Signed integer min implemented in terms of select(cmp()).
@ Sub
Subtraction of integers.
@ UMax
Unsigned integer max implemented in terms of select(cmp()).
LLVM_ABI bool isKnownNonEqual(const Value *V1, const Value *V2, const SimplifyQuery &SQ, unsigned Depth=0)
Return true if the given values are known to be non-equal when defined.
DWARFExpression::Operation Op
LLVM_ABI bool PointerMayBeCaptured(const Value *V, bool ReturnCaptures, unsigned MaxUsesToExplore=0)
PointerMayBeCaptured - Return true if this pointer value may be captured by the enclosing function (w...
constexpr unsigned BitWidth
LLVM_ABI Constant * getLosslessInvCast(Constant *C, Type *InvCastTo, unsigned CastOp, const DataLayout &DL, PreservedCastFlags *Flags=nullptr)
Try to cast C to InvC losslessly, satisfying CastOp(InvC) equals C, or CastOp(InvC) is a refined valu...
auto count_if(R &&Range, UnaryPredicate P)
Wrapper function around std::count_if to count the number of times an element satisfying a given pred...
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
LLVM_ABI bool isKnownNeverNaN(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Return true if the floating-point scalar value is not a NaN or if the floating-point vector value has...
LLVM_ABI std::optional< std::pair< CmpPredicate, Constant * > > getFlippedStrictnessPredicateAndConstant(CmpPredicate Pred, Constant *C)
Convert an integer comparison with a constant RHS into an equivalent form with the strictness flipped...
bool all_equal(std::initializer_list< T > Values)
Returns true if all Values in the initializer lists are equal or the list.
LLVM_ABI bool isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL, bool OrZero=false, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Return true if the given value is known to have exactly one bit set when defined.
LLVM_ABI const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=MaxLookupSearchDepth)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
LLVM_ABI bool isKnownPositive(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Returns true if the given value is known be positive (i.e.
LLVM_ABI bool isKnownNonNegative(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Returns true if the give value is known to be non-negative.
constexpr detail::IsaCheckPredicate< Types... > IsaPred
Function object wrapper for the llvm::isa type check.
LLVM_ABI std::optional< bool > isImpliedCondition(const Value *LHS, const Value *RHS, const DataLayout &DL, bool LHSIsTrue=true, unsigned Depth=0)
Return true if RHS is known to be implied true by LHS.
std::optional< DecomposedBitTest > decomposeBitTestICmp(Value *LHS, Value *RHS, CmpInst::Predicate Pred, bool LookThroughTrunc=true, bool AllowNonZeroC=false, bool DecomposeAnd=false)
Decompose an icmp into the form ((X & Mask) pred C) if possible.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Value * materialize(InstCombiner::BuilderTy &Builder) const
static OffsetResult value(Value *V)
static OffsetResult select(Value *Cond, Value *TrueV, Value *FalseV)
static OffsetResult invalid()
This callback is used in conjunction with PointerMayBeCaptured.
static CommonPointerBase compute(Value *LHS, Value *RHS)
Represent subnormal handling kind for floating point instruction inputs and outputs.
@ PreserveSign
The sign of a flushed-to-zero number is preserved in the sign of 0.
@ PositiveZero
Denormals are flushed to positive zero.
static constexpr DenormalMode getIEEE()
bool isNonNegative() const
Returns true if this value is known to be non-negative.
bool isZero() const
Returns true if value is all zero.
unsigned countMinTrailingZeros() const
Returns the minimum number of trailing zero bits.
unsigned countMaxTrailingZeros() const
Returns the maximum number of trailing zero bits possible.
APInt getSignedMaxValue() const
Return the maximal signed value possible given these KnownBits.
unsigned countMaxPopulation() const
Returns the maximum number of bits that could be one.
unsigned getBitWidth() const
Get the bit width of this value.
bool isConstant() const
Returns true if we know the value of all bits.
unsigned countMinLeadingZeros() const
Returns the minimum number of leading zero bits.
APInt getMaxValue() const
Return the maximal unsigned value possible given these KnownBits.
APInt getMinValue() const
Return the minimal unsigned value possible given these KnownBits.
bool isStrictlyPositive() const
Returns true if this value is known to be positive.
bool isNegative() const
Returns true if this value is known to be negative.
unsigned countMinPopulation() const
Returns the number of bits known to be one.
APInt getSignedMinValue() const
Return the minimal signed value possible given these KnownBits.
const APInt & getConstant() const
Returns the value when all bits have a known value.
Linear expression BasePtr + Index * Scale + Offset.
SelectPatternFlavor Flavor
static bool isMinOrMax(SelectPatternFlavor SPF)
When implementing this min/max pattern as fcmp; select, does the fcmp have to be ordered?
SimplifyQuery getWithInstruction(const Instruction *I) const
A MapVector that performs no allocations if smaller than a certain size.
Capture information for a specific Use.