49#define DEBUG_TYPE "instsimplify"
86 if (
auto *BO = dyn_cast<BinaryOperator>(
Cond))
87 BinOpCode = BO->getOpcode();
92 if (BinOpCode == BinaryOperator::Or) {
93 ExpectedPred = ICmpInst::ICMP_NE;
94 }
else if (BinOpCode == BinaryOperator::And) {
95 ExpectedPred = ICmpInst::ICMP_EQ;
116 Pred1 != Pred2 || Pred1 != ExpectedPred)
119 if (
X == TrueVal ||
X == FalseVal ||
Y == TrueVal ||
Y == FalseVal)
120 return BinOpCode == BinaryOperator::Or ? TrueVal : FalseVal;
136 CmpInst *Cmp = dyn_cast<CmpInst>(V);
140 Value *CLHS = Cmp->getOperand(0), *CRHS = Cmp->getOperand(1);
141 if (CPred == Pred && CLHS ==
LHS && CRHS ==
RHS)
157 if (SimplifiedCmp ==
Cond) {
165 return SimplifiedCmp;
172 unsigned MaxRecurse) {
181 unsigned MaxRecurse) {
191 unsigned MaxRecurse) {
228 if (
I->getParent()->isEntryBlock() && !isa<InvokeInst>(
I) &&
241 auto *
B = dyn_cast<BinaryOperator>(V);
242 if (!
B ||
B->getOpcode() != OpcodeToExpand)
244 Value *B0 =
B->getOperand(0), *B1 =
B->getOperand(1);
255 if ((L == B0 && R == B1) ||
276 unsigned MaxRecurse) {
293 unsigned MaxRecurse) {
396 unsigned MaxRecurse) {
402 if (isa<SelectInst>(
LHS)) {
403 SI = cast<SelectInst>(
LHS);
405 assert(isa<SelectInst>(
RHS) &&
"No select instruction operand!");
406 SI = cast<SelectInst>(
RHS);
433 if (TV == SI->getTrueValue() && FV == SI->getFalseValue())
439 if ((FV && !TV) || (TV && !FV)) {
442 Instruction *Simplified = dyn_cast<Instruction>(FV ? FV : TV);
443 if (Simplified && Simplified->getOpcode() ==
unsigned(Opcode)) {
447 Value *UnsimplifiedBranch = FV ? SI->getTrueValue() : SI->getFalseValue();
448 Value *UnsimplifiedLHS = SI ==
LHS ? UnsimplifiedBranch :
LHS;
449 Value *UnsimplifiedRHS = SI ==
LHS ?
RHS : UnsimplifiedBranch;
450 if (Simplified->getOperand(0) == UnsimplifiedLHS &&
451 Simplified->getOperand(1) == UnsimplifiedRHS)
453 if (Simplified->isCommutative() &&
454 Simplified->getOperand(1) == UnsimplifiedLHS &&
455 Simplified->getOperand(0) == UnsimplifiedRHS)
474 unsigned MaxRecurse) {
480 if (!isa<SelectInst>(
LHS)) {
484 assert(isa<SelectInst>(
LHS) &&
"Not comparing with a select instruction!");
487 Value *TV = SI->getTrueValue();
488 Value *FV = SI->getFalseValue();
520 unsigned MaxRecurse) {
526 if (isa<PHINode>(
LHS)) {
527 PI = cast<PHINode>(
LHS);
532 assert(isa<PHINode>(
RHS) &&
"No PHI instruction operand!");
533 PI = cast<PHINode>(
RHS);
540 Value *CommonValue =
nullptr;
553 if (!V || (CommonValue && V != CommonValue))
572 if (!isa<PHINode>(
LHS)) {
576 assert(isa<PHINode>(
LHS) &&
"Not comparing with a phi instruction!");
584 Value *CommonValue =
nullptr;
598 if (!V || (CommonValue && V != CommonValue))
609 if (
auto *CLHS = dyn_cast<Constant>(Op0)) {
610 if (
auto *CRHS = dyn_cast<Constant>(Op1)) {
614 case Instruction::FAdd:
615 case Instruction::FSub:
616 case Instruction::FMul:
617 case Instruction::FDiv:
618 case Instruction::FRem:
619 if (Q.
CxtI !=
nullptr)
640 if (isa<PoisonValue>(Op1))
703 return ::simplifyAddInst(Op0, Op1, IsNSW, IsNUW, Query,
RecursionLimit);
716 bool AllowNonInbounds =
false) {
717 assert(V->getType()->isPtrOrPtrVectorTy());
720 V = V->stripAndAccumulateConstantOffsets(
DL,
Offset, AllowNonInbounds);
723 return Offset.sextOrTrunc(
DL.getIndexTypeSizeInBits(V->getType()));
743 if (
auto *VecTy = dyn_cast<VectorType>(
LHS->
getType()))
758 std::optional<bool> Imp =
763 case Instruction::Sub:
764 case Instruction::Xor:
765 case Instruction::URem:
766 case Instruction::SRem:
769 case Instruction::SDiv:
770 case Instruction::UDiv:
771 return ConstantInt::get(Ty, 1);
773 case Instruction::And:
774 case Instruction::Or:
793 if (isa<PoisonValue>(Op0) || isa<PoisonValue>(Op1))
829 Value *
X =
nullptr, *
Y =
nullptr, *Z = Op1;
887 if (
X->getType() ==
Y->getType())
924 return ::simplifySubInst(Op0, Op1, IsNSW, IsNUW, Q,
RecursionLimit);
935 if (isa<PoisonValue>(Op1))
959 return ConstantInt::getNullValue(Op0->
getType());
974 Instruction::Add, Q, MaxRecurse))
979 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
986 if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
996 return ::simplifyMulInst(Op0, Op1, IsNSW, IsNUW, Q,
RecursionLimit);
1005 Constant *
C = dyn_cast_or_null<Constant>(V);
1006 return (
C &&
C->isAllOnesValue());
1012 unsigned MaxRecurse,
bool IsSigned) {
1029 Type *Ty =
X->getType();
1035 Constant *PosDividendC = ConstantInt::get(Ty,
C->abs());
1036 Constant *NegDividendC = ConstantInt::get(Ty, -
C->abs());
1045 if (
C->isMinSignedValue())
1051 Constant *PosDivisorC = ConstantInt::get(Ty,
C->abs());
1052 Constant *NegDivisorC = ConstantInt::get(Ty, -
C->abs());
1072 return isICmpTrue(ICmpInst::ICMP_ULT,
X,
Y, Q, MaxRecurse);
1079 unsigned MaxRecurse) {
1080 bool IsDiv = (Opcode == Instruction::SDiv || Opcode == Instruction::UDiv);
1081 bool IsSigned = (Opcode == Instruction::SDiv || Opcode == Instruction::SRem);
1098 auto *Op1C = dyn_cast<Constant>(Op1);
1099 auto *VTy = dyn_cast<FixedVectorType>(Ty);
1101 unsigned NumElts = VTy->getNumElements();
1102 for (
unsigned i = 0; i != NumElts; ++i) {
1111 if (isa<PoisonValue>(Op0))
1151 auto *
Mul = cast<OverflowingBinaryOperator>(Op0);
1162 if (
isDivZero(Op0, Op1, Q, MaxRecurse, IsSigned))
1170 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
1176 if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
1186 unsigned MaxRecurse) {
1209 (Opcode == Instruction::UDiv
1229 ((Opcode == Instruction::SRem &&
1231 (Opcode == Instruction::URem &&
1246 return simplifyDiv(Instruction::SDiv, Op0, Op1, IsExact, Q, MaxRecurse);
1258 return simplifyDiv(Instruction::UDiv, Op0, Op1, IsExact, Q, MaxRecurse);
1269 unsigned MaxRecurse) {
1274 return ConstantInt::getNullValue(Op0->
getType());
1278 return ConstantInt::getNullValue(Op0->
getType());
1280 return simplifyRem(Instruction::SRem, Op0, Op1, Q, MaxRecurse);
1290 unsigned MaxRecurse) {
1291 return simplifyRem(Instruction::URem, Op0, Op1, Q, MaxRecurse);
1300 Constant *
C = dyn_cast<Constant>(Amount);
1310 const APInt *AmountC;
1316 if (isa<ConstantVector>(
C) || isa<ConstantDataVector>(
C)) {
1317 for (
unsigned I = 0,
1318 E = cast<FixedVectorType>(
C->getType())->getNumElements();
1332 unsigned MaxRecurse) {
1337 if (isa<PoisonValue>(Op0))
1358 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
1364 if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
1382 assert(Opcode == Instruction::Shl &&
"Expected shl for nsw instruction");
1401 Value *Op1,
bool IsExact,
1420 if (Op0Known.
One[0])
1432 simplifyShift(Instruction::Shl, Op0, Op1, IsNSW, Q, MaxRecurse))
1456 if (IsNSW && IsNUW &&
1465 return ::simplifyShlInst(Op0, Op1, IsNSW, IsNUW, Q,
RecursionLimit);
1487 const APInt *ShRAmt, *ShLAmt;
1490 *ShRAmt == *ShLAmt) {
1493 if (ShRAmt->
uge(EffWidthY))
1541 ICmpInst *UnsignedICmp,
bool IsAnd,
1555 if (
match(UnsignedICmp,
1557 ICmpInst::isUnsigned(UnsignedPred)) {
1559 if ((UnsignedPred == ICmpInst::ICMP_UGE ||
1560 UnsignedPred == ICmpInst::ICMP_ULE) &&
1561 EqPred == ICmpInst::ICMP_NE && !IsAnd)
1564 if ((UnsignedPred == ICmpInst::ICMP_ULT ||
1565 UnsignedPred == ICmpInst::ICMP_UGT) &&
1566 EqPred == ICmpInst::ICMP_EQ && IsAnd)
1571 if (EqPred == ICmpInst::ICMP_NE && (UnsignedPred == ICmpInst::ICMP_ULT ||
1572 UnsignedPred == ICmpInst::ICMP_UGT))
1573 return IsAnd ? UnsignedICmp : ZeroICmp;
1577 if (EqPred == ICmpInst::ICMP_EQ && (UnsignedPred == ICmpInst::ICMP_ULE ||
1578 UnsignedPred == ICmpInst::ICMP_UGE))
1579 return IsAnd ? ZeroICmp : UnsignedICmp;
1585 if (
match(UnsignedICmp,
1587 if (UnsignedPred == ICmpInst::ICMP_UGE && IsAnd &&
1588 EqPred == ICmpInst::ICMP_NE &&
1590 return UnsignedICmp;
1591 if (UnsignedPred == ICmpInst::ICMP_ULT && !IsAnd &&
1592 EqPred == ICmpInst::ICMP_EQ &&
1594 return UnsignedICmp;
1599 ICmpInst::isUnsigned(UnsignedPred))
1601 else if (
match(UnsignedICmp,
1603 ICmpInst::isUnsigned(UnsignedPred))
1604 UnsignedPred = ICmpInst::getSwappedPredicate(UnsignedPred);
1610 if (UnsignedPred == ICmpInst::ICMP_UGT && EqPred == ICmpInst::ICMP_EQ &&
1612 return IsAnd ? ZeroICmp : UnsignedICmp;
1616 if (UnsignedPred == ICmpInst::ICMP_ULE && EqPred == ICmpInst::ICMP_NE &&
1618 return IsAnd ? UnsignedICmp : ZeroICmp;
1627 if (UnsignedPred == ICmpInst::ICMP_ULT && EqPred == ICmpInst::ICMP_NE)
1628 return IsAnd ? UnsignedICmp : ZeroICmp;
1632 if (UnsignedPred == ICmpInst::ICMP_UGE && EqPred == ICmpInst::ICMP_EQ)
1633 return IsAnd ? ZeroICmp : UnsignedICmp;
1636 if (UnsignedPred == ICmpInst::ICMP_ULT && EqPred == ICmpInst::ICMP_EQ &&
1641 if (UnsignedPred == ICmpInst::ICMP_UGE && EqPred == ICmpInst::ICMP_NE &&
1657 const APInt *C0, *C1;
1667 if (IsAnd && Range0.intersectWith(Range1).isEmptySet())
1672 if (!IsAnd && Range0.unionWith(Range1).isFullSet())
1680 if (Range0.contains(Range1))
1681 return IsAnd ? Cmp1 : Cmp0;
1682 if (Range1.contains(Range0))
1683 return IsAnd ? Cmp0 : Cmp1;
1692 const APInt *C0, *C1;
1700 auto *AddInst = cast<OverflowingBinaryOperator>(Op0->
getOperand(0));
1701 if (AddInst->getOperand(1) != Op1->
getOperand(1))
1708 const APInt Delta = *C1 - *C0;
1711 if (Pred0 == ICmpInst::ICMP_ULT && Pred1 == ICmpInst::ICMP_SGT)
1713 if (Pred0 == ICmpInst::ICMP_SLT && Pred1 == ICmpInst::ICMP_SGT && IsNSW)
1717 if (Pred0 == ICmpInst::ICMP_ULE && Pred1 == ICmpInst::ICMP_SGT)
1719 if (Pred0 == ICmpInst::ICMP_SLE && Pred1 == ICmpInst::ICMP_SGT && IsNSW)
1725 if (Pred0 == ICmpInst::ICMP_ULT && Pred1 == ICmpInst::ICMP_UGT)
1728 if (Pred0 == ICmpInst::ICMP_ULE && Pred1 == ICmpInst::ICMP_UGT)
1747 if (!IsAnd && Pred0 == ICmpInst::ICMP_EQ && Pred1 == ICmpInst::ICMP_NE)
1750 if (IsAnd && Pred0 == ICmpInst::ICMP_NE && Pred1 == ICmpInst::ICMP_EQ)
1783 const APInt *C0, *C1;
1791 auto *AddInst = cast<BinaryOperator>(Op0->
getOperand(0));
1792 if (AddInst->getOperand(1) != Op1->
getOperand(1))
1799 const APInt Delta = *C1 - *C0;
1802 if (Pred0 == ICmpInst::ICMP_UGE && Pred1 == ICmpInst::ICMP_SLE)
1804 if (Pred0 == ICmpInst::ICMP_SGE && Pred1 == ICmpInst::ICMP_SLE && IsNSW)
1808 if (Pred0 == ICmpInst::ICMP_UGT && Pred1 == ICmpInst::ICMP_SLE)
1810 if (Pred0 == ICmpInst::ICMP_SGT && Pred1 == ICmpInst::ICMP_SLE && IsNSW)
1816 if (Pred0 == ICmpInst::ICMP_UGE && Pred1 == ICmpInst::ICMP_ULE)
1819 if (Pred0 == ICmpInst::ICMP_UGT && Pred1 == ICmpInst::ICMP_ULE)
1851 Value *LHS0 =
LHS->getOperand(0), *LHS1 =
LHS->getOperand(1);
1852 Value *RHS0 =
RHS->getOperand(0), *RHS1 =
RHS->getOperand(1);
1857 if ((PredL == FCmpInst::FCMP_ORD || PredL == FCmpInst::FCMP_UNO) &&
1858 ((FCmpInst::isOrdered(PredR) && IsAnd) ||
1859 (FCmpInst::isUnordered(PredR) && !IsAnd))) {
1864 if (((LHS1 == RHS0 || LHS1 == RHS1) &&
1866 ((LHS0 == RHS0 || LHS0 == RHS1) &&
1868 return FCmpInst::isOrdered(PredL) == FCmpInst::isOrdered(PredR)
1873 if ((PredR == FCmpInst::FCMP_ORD || PredR == FCmpInst::FCMP_UNO) &&
1874 ((FCmpInst::isOrdered(PredL) && IsAnd) ||
1875 (FCmpInst::isUnordered(PredL) && !IsAnd))) {
1880 if (((RHS1 == LHS0 || RHS1 == LHS1) &&
1882 ((RHS0 == LHS0 || RHS0 == LHS1) &&
1884 return FCmpInst::isOrdered(PredL) == FCmpInst::isOrdered(PredR)
1893 Value *Op1,
bool IsAnd) {
1895 auto *Cast0 = dyn_cast<CastInst>(Op0);
1896 auto *Cast1 = dyn_cast<CastInst>(Op1);
1897 if (Cast0 && Cast1 && Cast0->getOpcode() == Cast1->getOpcode() &&
1898 Cast0->getSrcTy() == Cast1->getSrcTy()) {
1899 Op0 = Cast0->getOperand(0);
1900 Op1 = Cast1->getOperand(0);
1904 auto *ICmp0 = dyn_cast<ICmpInst>(Op0);
1905 auto *ICmp1 = dyn_cast<ICmpInst>(Op1);
1910 auto *FCmp0 = dyn_cast<FCmpInst>(Op0);
1911 auto *FCmp1 = dyn_cast<FCmpInst>(Op1);
1922 if (
auto *
C = dyn_cast<Constant>(V))
1931 bool AllowRefinement,
1933 unsigned MaxRecurse);
1937 unsigned MaxRecurse) {
1938 assert((Opcode == Instruction::And || Opcode == Instruction::Or) &&
1953 (Opcode == Instruction::And ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE)) {
1954 if (Res == Absorber)
1964 if (Res == Absorber)
1971 nullptr, MaxRecurse))
1972 return Simplify(Res);
1975 nullptr, MaxRecurse))
1976 return Simplify(Res);
1986 assert(BinaryOperator::isBitwiseLogicOp(Opcode) &&
"Expected logic op");
1998 return Opcode == Instruction::And ? ConstantInt::getNullValue(Ty)
1999 : ConstantInt::getAllOnesValue(Ty);
2008 unsigned MaxRecurse) {
2042 const APInt *Shift1, *Shift2;
2047 Shift1->
uge(*Shift2))
2060 unsigned MaxRecurse) {
2065 if (isa<PoisonValue>(Op1))
2100 (~(*Mask)).lshr(*ShAmt).isZero())
2106 (~(*Mask)).shl(*ShAmt).isZero())
2111 const APInt *PowerC;
2120 return ConstantInt::getNullValue(Op1->
getType());
2133 Instruction::Or, Q, MaxRecurse))
2138 Instruction::Xor, Q, MaxRecurse))
2141 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1)) {
2159 if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
2183 if (EffWidthY <= ShftCnt) {
2216 if (*Implied ==
true)
2219 if (*Implied ==
false)
2244 assert(
X->getType() ==
Y->getType() &&
"Expected same type for 'or' ops");
2245 Type *Ty =
X->getType();
2249 return ConstantInt::getAllOnesValue(Ty);
2253 return ConstantInt::getAllOnesValue(Ty);
2271 return ConstantInt::getAllOnesValue(Ty);
2295 return ConstantInt::getAllOnesValue(Ty);
2337 unsigned MaxRecurse) {
2342 if (isa<PoisonValue>(Op1))
2376 C->ule(
X->getType()->getScalarSizeInBits())) {
2377 return ConstantInt::getAllOnesValue(
X->getType());
2431 Instruction::And, Q, MaxRecurse))
2434 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1)) {
2452 const APInt *C1, *C2;
2478 if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
2488 if (std::optional<bool> Implied =
2491 if (*Implied ==
false)
2494 if (*Implied ==
true)
2497 if (std::optional<bool> Implied =
2500 if (*Implied ==
false)
2503 if (*Implied ==
true)
2521 unsigned MaxRecurse) {
2526 if (isa<PoisonValue>(Op1))
2564 if (
Value *R = foldAndOrNot(Op0, Op1))
2566 if (
Value *R = foldAndOrNot(Op1, Op0))
2608 CmpInst *Cmp = dyn_cast<CmpInst>(SI->getCondition());
2611 Value *CmpLHS = Cmp->getOperand(0), *CmpRHS = Cmp->getOperand(1);
2612 if (Pred == Cmp->getPredicate() &&
LHS == CmpLHS &&
RHS == CmpRHS)
2615 LHS == CmpRHS &&
RHS == CmpLHS)
2628 if (
const AllocaInst *AI = dyn_cast<AllocaInst>(V))
2629 return AI->isStaticAlloca();
2630 if (
const GlobalValue *GV = dyn_cast<GlobalValue>(V))
2631 return (GV->hasLocalLinkage() || GV->hasHiddenVisibility() ||
2632 GV->hasProtectedVisibility() || GV->hasGlobalUnnamedAddr()) &&
2633 !GV->isThreadLocal();
2634 if (
const Argument *
A = dyn_cast<Argument>(V))
2635 return A->hasByValAttr();
2668 auto isByValArg = [](
const Value *V) {
2669 const Argument *
A = dyn_cast<Argument>(V);
2670 return A &&
A->hasByValAttr();
2676 return isa<AllocaInst>(V2) || isa<GlobalVariable>(V2) || isByValArg(V2);
2678 return isa<AllocaInst>(V1) || isa<GlobalVariable>(V1) || isByValArg(V1);
2680 return isa<AllocaInst>(V1) &&
2681 (isa<AllocaInst>(V2) || isa<GlobalVariable>(V2));
2752 unsigned IndexSize =
DL.getIndexTypeSizeInBits(
LHS->
getType());
2753 APInt LHSOffset(IndexSize, 0), RHSOffset(IndexSize, 0);
2773 Opts.
EvalMode = ObjectSizeOpts::Mode::Min;
2775 if (
auto *
I = dyn_cast<Instruction>(V))
2776 return I->getFunction();
2777 if (
auto *
A = dyn_cast<Argument>(V))
2778 return A->getParent();
2784 APInt Dist = LHSOffset - RHSOffset;
2812 if ((IsNAC(LHSUObjs) && IsAllocDisjoint(RHSUObjs)) ||
2813 (IsNAC(RHSUObjs) && IsAllocDisjoint(LHSUObjs)))
2835 bool Captured =
false;
2838 if (
auto *ICmp = dyn_cast<ICmpInst>(U->getUser())) {
2842 unsigned OtherIdx = 1 - U->getOperandNo();
2843 auto *LI = dyn_cast<LoadInst>(ICmp->getOperand(OtherIdx));
2844 if (LI && isa<GlobalVariable>(LI->getPointerOperand()))
2852 CustomCaptureTracker Tracker;
2854 if (!Tracker.Captured)
2876 auto ExtractNotLHS = [](
Value *V) ->
Value * {
2938 case ICmpInst::ICMP_UGE:
2942 case ICmpInst::ICMP_SGE:
2953 case ICmpInst::ICMP_ULE:
2957 case ICmpInst::ICMP_SLE:
2977 case ICmpInst::ICMP_ULT:
2979 case ICmpInst::ICMP_UGE:
2981 case ICmpInst::ICMP_EQ:
2982 case ICmpInst::ICMP_ULE:
2986 case ICmpInst::ICMP_NE:
2987 case ICmpInst::ICMP_UGT:
2991 case ICmpInst::ICMP_SLT: {
2999 case ICmpInst::ICMP_SLE: {
3008 case ICmpInst::ICMP_SGE: {
3016 case ICmpInst::ICMP_SGT: {
3070 *MulC != 0 &&
C->urem(*MulC) != 0) ||
3072 *MulC != 0 &&
C->srem(*MulC) != 0)))
3073 return ConstantInt::get(ITy, Pred == ICmpInst::ICMP_NE);
3081 unsigned MaxRecurse) {
3087 if (Pred == ICmpInst::ICMP_ULT)
3089 if (Pred == ICmpInst::ICMP_UGE)
3092 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SGE) {
3104 if (Pred == ICmpInst::ICMP_UGT)
3106 if (Pred == ICmpInst::ICMP_ULE)
3115 case ICmpInst::ICMP_SGT:
3116 case ICmpInst::ICMP_SGE: {
3122 case ICmpInst::ICMP_EQ:
3123 case ICmpInst::ICMP_UGT:
3124 case ICmpInst::ICMP_UGE:
3126 case ICmpInst::ICMP_SLT:
3127 case ICmpInst::ICMP_SLE: {
3133 case ICmpInst::ICMP_NE:
3134 case ICmpInst::ICMP_ULT:
3135 case ICmpInst::ICMP_ULE:
3142 if (Pred == ICmpInst::ICMP_ULE)
3144 if (Pred == ICmpInst::ICMP_UGT)
3155 if (Pred == ICmpInst::ICMP_UGT)
3157 if (Pred == ICmpInst::ICMP_ULE)
3178 case ICmpInst::ICMP_EQ:
3179 case ICmpInst::ICMP_UGE:
3181 case ICmpInst::ICMP_NE:
3182 case ICmpInst::ICMP_ULT:
3184 case ICmpInst::ICMP_UGT:
3185 case ICmpInst::ICMP_ULE:
3201 const APInt *C1, *C2;
3208 if (Pred == ICmpInst::ICMP_UGT)
3210 if (Pred == ICmpInst::ICMP_ULE)
3248 const APInt *C1, *C2;
3262 unsigned MaxRecurse) {
3265 if (MaxRecurse && (LBO || RBO)) {
3267 Value *
A =
nullptr, *
B =
nullptr, *
C =
nullptr, *
D =
nullptr;
3269 bool NoLHSWrapProblem =
false, NoRHSWrapProblem =
false;
3270 if (LBO && LBO->
getOpcode() == Instruction::Add) {
3280 if (RBO && RBO->
getOpcode() == Instruction::Add) {
3292 if ((
A ==
RHS ||
B ==
RHS) && NoLHSWrapProblem)
3299 if ((
C ==
LHS ||
D ==
LHS) && NoRHSWrapProblem)
3302 C ==
LHS ?
D :
C, Q, MaxRecurse - 1))
3306 bool CanSimplify = (NoLHSWrapProblem && NoRHSWrapProblem) ||
3308 if (
A &&
C && (
A ==
C ||
A ==
D ||
B ==
C ||
B ==
D) && CanSimplify) {
3315 }
else if (
A ==
D) {
3319 }
else if (
B ==
C) {
3340 ICmpInst::getSwappedPredicate(Pred), RBO,
LHS, Q, MaxRecurse))
3347 if (
C->isStrictlyPositive()) {
3348 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_NE)
3350 if (Pred == ICmpInst::ICMP_SGE || Pred == ICmpInst::ICMP_EQ)
3353 if (
C->isNonNegative()) {
3354 if (Pred == ICmpInst::ICMP_SLE)
3356 if (Pred == ICmpInst::ICMP_SGT)
3379 if (Pred == ICmpInst::ICMP_EQ)
3381 if (Pred == ICmpInst::ICMP_NE)
3390 if (Pred == ICmpInst::ICMP_UGT)
3392 if (Pred == ICmpInst::ICMP_ULE)
3403 case Instruction::Shl: {
3406 if (!NUW || (ICmpInst::isSigned(Pred) && !NSW) ||
3419 case Instruction::And:
3420 case Instruction::Or: {
3421 const APInt *C1, *C2;
3427 Pred = ICmpInst::getSwappedPredicate(Pred);
3430 if (Pred == ICmpInst::ICMP_ULE)
3432 if (Pred == ICmpInst::ICMP_UGT)
3435 if (Pred == ICmpInst::ICMP_SLE)
3437 if (Pred == ICmpInst::ICMP_SGT)
3451 case Instruction::UDiv:
3452 case Instruction::LShr:
3453 if (ICmpInst::isSigned(Pred) || !Q.
IIQ.
isExact(LBO) ||
3460 case Instruction::SDiv:
3468 case Instruction::AShr:
3475 case Instruction::Shl: {
3480 if (!NSW && ICmpInst::isSigned(Pred))
3496 unsigned MaxRecurse) {
3652 Pred = ICmpInst::getSwappedPredicate(Pred);
3658 (
A ==
C ||
A ==
D ||
B ==
C ||
B ==
D)) {
3667 (
A ==
C ||
A ==
D ||
B ==
C ||
B ==
D)) {
3691 CallInst *Assume = cast<CallInst>(AssumeVH);
3704 auto *II = dyn_cast<IntrinsicInst>(
LHS);
3708 switch (II->getIntrinsicID()) {
3709 case Intrinsic::uadd_sat:
3711 if (II->getArgOperand(0) ==
RHS || II->getArgOperand(1) ==
RHS) {
3712 if (Pred == ICmpInst::ICMP_UGE)
3714 if (Pred == ICmpInst::ICMP_ULT)
3718 case Intrinsic::usub_sat:
3720 if (II->getArgOperand(0) ==
RHS) {
3721 if (Pred == ICmpInst::ICMP_ULE)
3723 if (Pred == ICmpInst::ICMP_UGT)
3740 if (
const Argument *
A = dyn_cast<Argument>(V)) {
3741 Range =
A->getAttribute(llvm::Attribute::Range);
3742 }
else if (
const CallBase *CB = dyn_cast<CallBase>(V)) {
3743 Range = CB->getRetAttr(llvm::Attribute::Range);
3746 if (Range.isValid())
3747 return Range.getRange();
3749 return std::nullopt;
3767 assert(!isa<UndefValue>(
LHS) &&
"Unexpected icmp undef,%X");
3772 if (isa<PoisonValue>(
RHS))
3801 if (LhsCr->icmp(Pred, *RhsCr))
3809 if (isa<CastInst>(
LHS) && (isa<Constant>(
RHS) || isa<CastInst>(
RHS))) {
3817 if (MaxRecurse && isa<PtrToIntInst>(LI) &&
3826 if (RI->getOperand(0)->getType() == SrcTy)
3834 if (isa<ZExtInst>(
LHS)) {
3838 if (MaxRecurse && SrcTy == RI->getOperand(0)->getType())
3842 RI->getOperand(0), Q, MaxRecurse - 1))
3846 else if (
SExtInst *RI = dyn_cast<SExtInst>(
RHS)) {
3847 if (
SrcOp == RI->getOperand(0)) {
3848 if (Pred == ICmpInst::ICMP_ULE || Pred == ICmpInst::ICMP_SGE)
3850 if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_SLT)
3864 assert(Trunc &&
"Constant-fold of ImmConstant should not fail");
3867 assert(RExt &&
"Constant-fold of ImmConstant should not fail");
3870 assert(AnyEq &&
"Constant-fold of ImmConstant should not fail");
3877 SrcOp, Trunc, Q, MaxRecurse - 1))
3887 case ICmpInst::ICMP_EQ:
3888 case ICmpInst::ICMP_UGT:
3889 case ICmpInst::ICMP_UGE:
3892 case ICmpInst::ICMP_NE:
3893 case ICmpInst::ICMP_ULT:
3894 case ICmpInst::ICMP_ULE:
3899 case ICmpInst::ICMP_SGT:
3900 case ICmpInst::ICMP_SGE:
3904 case ICmpInst::ICMP_SLT:
3905 case ICmpInst::ICMP_SLE:
3914 if (isa<SExtInst>(
LHS)) {
3918 if (MaxRecurse && SrcTy == RI->getOperand(0)->getType())
3925 else if (
ZExtInst *RI = dyn_cast<ZExtInst>(
RHS)) {
3926 if (
SrcOp == RI->getOperand(0)) {
3927 if (Pred == ICmpInst::ICMP_UGE || Pred == ICmpInst::ICMP_SLE)
3929 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_SGT)
3942 assert(Trunc &&
"Constant-fold of ImmConstant should not fail");
3945 assert(RExt &&
"Constant-fold of ImmConstant should not fail");
3948 assert(AnyEq &&
"Constant-fold of ImmConstant should not fail");
3963 case ICmpInst::ICMP_EQ:
3965 case ICmpInst::ICMP_NE:
3970 case ICmpInst::ICMP_SGT:
3971 case ICmpInst::ICMP_SGE:
3974 case ICmpInst::ICMP_SLT:
3975 case ICmpInst::ICMP_SLE:
3981 case ICmpInst::ICMP_UGT:
3982 case ICmpInst::ICMP_UGE:
3990 case ICmpInst::ICMP_ULT:
3991 case ICmpInst::ICMP_ULE:
4022 ICmpInst::getSwappedPredicate(Pred),
RHS,
LHS))
4028 if (std::optional<bool> Res =
4037 if (
auto *CLHS = dyn_cast<PtrToIntOperator>(
LHS))
4038 if (
auto *CRHS = dyn_cast<PtrToIntOperator>(
RHS))
4039 if (CLHS->getPointerOperandType() == CRHS->getPointerOperandType() &&
4043 CRHS->getPointerOperand(), Q))
4048 if (isa<SelectInst>(
LHS) || isa<SelectInst>(
RHS))
4054 if (isa<PHINode>(
LHS) || isa<PHINode>(
RHS))
4070 unsigned MaxRecurse) {
4086 if (Pred == FCmpInst::FCMP_FALSE)
4088 if (Pred == FCmpInst::FCMP_TRUE)
4093 if (isa<PoisonValue>(
LHS) || isa<PoisonValue>(
RHS))
4116 if (Pred == FCmpInst::FCMP_ORD || Pred == FCmpInst::FCMP_UNO) {
4119 return ConstantInt::get(
RetTy, Pred == FCmpInst::FCMP_ORD);
4124 std::optional<KnownFPClass> FullKnownClassLHS;
4128 auto computeLHSClass = [=, &FullKnownClassLHS](
FPClassTest InterestedFlags =
4130 if (FullKnownClassLHS)
4131 return *FullKnownClassLHS;
4144 FullKnownClassLHS = computeLHSClass();
4145 if ((FullKnownClassLHS->KnownFPClasses & ClassTest) ==
fcNone)
4147 if ((FullKnownClassLHS->KnownFPClasses & ~ClassTest) ==
fcNone)
4162 if (
C->isNegative() && !
C->isNegZero()) {
4168 case FCmpInst::FCMP_UGE:
4169 case FCmpInst::FCMP_UGT:
4170 case FCmpInst::FCMP_UNE: {
4178 case FCmpInst::FCMP_OEQ:
4179 case FCmpInst::FCMP_OLE:
4180 case FCmpInst::FCMP_OLT: {
4199 cast<IntrinsicInst>(
LHS)->getIntrinsicID() == Intrinsic::maxnum;
4203 case FCmpInst::FCMP_OEQ:
4204 case FCmpInst::FCMP_UEQ:
4208 case FCmpInst::FCMP_ONE:
4209 case FCmpInst::FCMP_UNE:
4213 case FCmpInst::FCMP_OGE:
4214 case FCmpInst::FCMP_UGE:
4215 case FCmpInst::FCMP_OGT:
4216 case FCmpInst::FCMP_UGT:
4221 return ConstantInt::get(
RetTy, IsMaxNum);
4222 case FCmpInst::FCMP_OLE:
4223 case FCmpInst::FCMP_ULE:
4224 case FCmpInst::FCMP_OLT:
4225 case FCmpInst::FCMP_ULT:
4230 return ConstantInt::get(
RetTy, !IsMaxNum);
4242 case FCmpInst::FCMP_OGE:
4243 case FCmpInst::FCMP_ULT: {
4246 Interested |=
fcNan;
4257 case FCmpInst::FCMP_UGE:
4258 case FCmpInst::FCMP_OLT: {
4275 if (isa<SelectInst>(
LHS) || isa<SelectInst>(
RHS))
4281 if (isa<PHINode>(
LHS) || isa<PHINode>(
RHS))
4295 bool AllowRefinement,
4297 unsigned MaxRecurse) {
4306 if (isa<Constant>(
Op))
4309 auto *
I = dyn_cast<Instruction>(V);
4315 if (isa<PHINode>(
I))
4318 if (
Op->getType()->isVectorTy()) {
4321 if (!
I->getType()->isVectorTy() || isa<ShuffleVectorInst>(
I) ||
4322 isa<CallBase>(
I) || isa<BitCastInst>(
I))
4327 if (
match(
I, m_Intrinsic<Intrinsic::is_constant>()))
4332 bool AnyReplaced =
false;
4333 for (
Value *InstOp :
I->operands()) {
4335 InstOp,
Op, RepOp, Q, AllowRefinement, DropFlags, MaxRecurse)) {
4337 AnyReplaced = InstOp != NewInstOp;
4346 if (!AllowRefinement) {
4351 if (
auto *BO = dyn_cast<BinaryOperator>(
I)) {
4352 unsigned Opcode = BO->getOpcode();
4361 if ((Opcode == Instruction::And || Opcode == Instruction::Or) &&
4362 NewOps[0] == NewOps[1]) {
4364 if (
auto *PDI = dyn_cast<PossiblyDisjointInst>(BO)) {
4365 if (PDI->isDisjoint()) {
4377 if ((Opcode == Instruction::Sub || Opcode == Instruction::Xor) &&
4378 NewOps[0] == RepOp && NewOps[1] == RepOp)
4390 if ((NewOps[0] == Absorber || NewOps[1] == Absorber) &&
4395 if (isa<GetElementPtrInst>(
I)) {
4411 auto PreventSelfSimplify = [V](
Value *Simplified) {
4412 return Simplified != V ? Simplified :
nullptr;
4415 return PreventSelfSimplify(
4422 for (
Value *NewOp : NewOps) {
4423 if (
Constant *ConstOp = dyn_cast<Constant>(NewOp))
4438 if (!AllowRefinement) {
4441 if (
auto *II = dyn_cast<IntrinsicInst>(
I);
4442 II && II->getIntrinsicID() == Intrinsic::abs) {
4443 if (!ConstOps[0]->isNotMinSignedValue())
4449 if (DropFlags && Res &&
I->hasPoisonGeneratingFlagsOrMetadata())
4459 bool AllowRefinement,
4461 return ::simplifyWithOpReplaced(V,
Op, RepOp, Q, AllowRefinement, DropFlags,
4468 const APInt *
Y,
bool TrueWhenUnset) {
4475 return TrueWhenUnset ? FalseVal : TrueVal;
4481 return TrueWhenUnset ? FalseVal : TrueVal;
4483 if (
Y->isPowerOf2()) {
4489 if (TrueWhenUnset && cast<PossiblyDisjointInst>(TrueVal)->isDisjoint())
4491 return TrueWhenUnset ? TrueVal : FalseVal;
4499 if (!TrueWhenUnset && cast<PossiblyDisjointInst>(FalseVal)->isDisjoint())
4501 return TrueWhenUnset ? TrueVal : FalseVal;
4512 if (CmpRHS == TVal || CmpRHS == FVal) {
4514 Pred = ICmpInst::getSwappedPredicate(Pred);
4518 if (CmpLHS == FVal) {
4520 Pred = ICmpInst::getInversePredicate(Pred);
4525 Value *
X = CmpLHS, *
Y = CmpRHS;
4526 bool PeekedThroughSelectShuffle =
false;
4527 auto *Shuf = dyn_cast<ShuffleVectorInst>(FVal);
4528 if (Shuf && Shuf->isSelect()) {
4529 if (Shuf->getOperand(0) ==
Y)
4530 FVal = Shuf->getOperand(1);
4531 else if (Shuf->getOperand(1) ==
Y)
4532 FVal = Shuf->getOperand(0);
4535 PeekedThroughSelectShuffle =
true;
4539 auto *MMI = dyn_cast<MinMaxIntrinsic>(FVal);
4540 if (!MMI || TVal !=
X ||
4558 if (PeekedThroughSelectShuffle)
4591 Pred == ICmpInst::ICMP_EQ);
4599 unsigned MaxRecurse) {
4602 nullptr, MaxRecurse) == TrueVal)
4606 nullptr, MaxRecurse) == FalseVal)
4617 unsigned MaxRecurse) {
4619 Value *CmpLHS, *CmpRHS;
4627 if (Pred == ICmpInst::ICMP_NE) {
4628 Pred = ICmpInst::ICMP_EQ;
4635 if (TrueVal->getType()->isIntOrIntVectorTy()) {
4643 X->getType()->getScalarSizeInBits());
4649 if (Pred == ICmpInst::ICMP_EQ &&
match(CmpRHS,
m_Zero())) {
4663 if (
match(TrueVal, isFsh) && FalseVal ==
X && CmpLHS == ShAmt)
4676 if (
match(FalseVal, isRotate) && TrueVal ==
X && CmpLHS == ShAmt &&
4677 Pred == ICmpInst::ICMP_EQ)
4682 if (
match(TrueVal, m_Intrinsic<Intrinsic::abs>(
m_Specific(CmpLHS))) &&
4699 if (Pred == ICmpInst::ICMP_EQ) {
4749 bool HasNoSignedZeros =
4756 if (Pred == FCmpInst::FCMP_OEQ)
4761 if (Pred == FCmpInst::FCMP_UNE)
4772 if (
auto *CondC = dyn_cast<Constant>(
Cond)) {
4773 if (
auto *TrueC = dyn_cast<Constant>(TrueVal))
4774 if (
auto *FalseC = dyn_cast<Constant>(FalseVal))
4779 if (isa<PoisonValue>(CondC))
4784 return isa<Constant>(FalseVal) ? FalseVal : TrueVal;
4796 assert(
Cond->getType()->isIntOrIntVectorTy(1) &&
4797 "Select must have bool or bool vector condition");
4798 assert(TrueVal->getType() == FalseVal->getType() &&
4799 "Select must have same types for true/false ops");
4801 if (
Cond->getType() == TrueVal->getType()) {
4864 if (TrueVal == FalseVal)
4867 if (
Cond == TrueVal) {
4875 if (
Cond == FalseVal) {
4889 if (isa<PoisonValue>(TrueVal) ||
4894 if (isa<PoisonValue>(FalseVal) ||
4900 if (isa<FixedVectorType>(TrueVal->getType()) &&
4904 cast<FixedVectorType>(TrueC->
getType())->getNumElements();
4906 for (
unsigned i = 0; i != NumElts; ++i) {
4910 if (!TEltC || !FEltC)
4917 else if (isa<PoisonValue>(TEltC) ||
4920 else if (isa<PoisonValue>(FEltC) ||
4926 if (NewC.
size() == NumElts)
4942 return *Imp ? TrueVal : FalseVal;
4959 cast<PointerType>(
Ptr->getType()->getScalarType())->getAddressSpace();
4962 if (Indices.
empty())
4972 if (
VectorType *VT = dyn_cast<VectorType>(
Op->getType())) {
4973 GEPTy = VectorType::get(GEPTy, VT->getElementCount());
4980 if (
Ptr->getType() == GEPTy &&
4986 if (isa<PoisonValue>(
Ptr) ||
4987 any_of(Indices, [](
const auto *V) {
return isa<PoisonValue>(V); }))
4994 bool IsScalableVec =
4996 return isa<ScalableVectorType>(V->getType());
4999 if (Indices.
size() == 1) {
5001 if (!IsScalableVec && Ty->
isSized()) {
5006 if (TyAllocSize == 0 &&
Ptr->getType() == GEPTy)
5011 if (Indices[0]->
getType()->getScalarSizeInBits() ==
5013 auto CanSimplify = [GEPTy, &
P,
Ptr]() ->
bool {
5014 return P->getType() == GEPTy &&
5018 if (TyAllocSize == 1 &&
5029 TyAllocSize == 1ULL <<
C && CanSimplify())
5045 [](
Value *
Idx) { return match(Idx, m_Zero()); })) {
5049 APInt BasePtrOffset(IdxWidth, 0);
5050 Value *StrippedBasePtr =
5051 Ptr->stripAndAccumulateInBoundsConstantOffsets(Q.
DL, BasePtrOffset);
5060 !BasePtrOffset.
isZero()) {
5061 auto *CI = ConstantInt::get(GEPTy->
getContext(), BasePtrOffset);
5067 !BasePtrOffset.
isOne()) {
5068 auto *CI = ConstantInt::get(GEPTy->
getContext(), BasePtrOffset - 1);
5075 if (!isa<Constant>(
Ptr) ||
5076 !
all_of(Indices, [](
Value *V) {
return isa<Constant>(V); }))
5081 std::nullopt, Indices);
5098 if (
Constant *CAgg = dyn_cast<Constant>(Agg))
5099 if (
Constant *CVal = dyn_cast<Constant>(Val))
5104 if (isa<PoisonValue>(Val) ||
5110 if (EV->getAggregateOperand()->getType() == Agg->
getType() &&
5111 EV->getIndices() == Idxs) {
5114 if (isa<PoisonValue>(Agg) ||
5117 return EV->getAggregateOperand();
5120 if (Agg == EV->getAggregateOperand())
5130 return ::simplifyInsertValueInst(Agg, Val, Idxs, Q,
RecursionLimit);
5136 auto *VecC = dyn_cast<Constant>(Vec);
5137 auto *ValC = dyn_cast<Constant>(Val);
5138 auto *IdxC = dyn_cast<Constant>(
Idx);
5139 if (VecC && ValC && IdxC)
5143 if (
auto *CI = dyn_cast<ConstantInt>(
Idx)) {
5144 if (isa<FixedVectorType>(Vec->
getType()) &&
5145 CI->uge(cast<FixedVectorType>(Vec->
getType())->getNumElements()))
5155 if (isa<PoisonValue>(Val) ||
5172 if (
auto *CAgg = dyn_cast<Constant>(Agg))
5176 unsigned NumIdxs = Idxs.
size();
5177 for (
auto *IVI = dyn_cast<InsertValueInst>(Agg); IVI !=
nullptr;
5178 IVI = dyn_cast<InsertValueInst>(IVI->getAggregateOperand())) {
5180 unsigned NumInsertValueIdxs = InsertValueIdxs.
size();
5181 unsigned NumCommonIdxs = std::min(NumInsertValueIdxs, NumIdxs);
5182 if (InsertValueIdxs.
slice(0, NumCommonIdxs) ==
5183 Idxs.
slice(0, NumCommonIdxs)) {
5184 if (NumIdxs == NumInsertValueIdxs)
5185 return IVI->getInsertedValueOperand();
5202 auto *VecVTy = cast<VectorType>(Vec->
getType());
5203 if (
auto *CVec = dyn_cast<Constant>(Vec)) {
5204 if (
auto *CIdx = dyn_cast<Constant>(
Idx))
5218 if (
auto *IdxC = dyn_cast<ConstantInt>(
Idx)) {
5220 unsigned MinNumElts = VecVTy->getElementCount().getKnownMinValue();
5221 if (isa<FixedVectorType>(VecVTy) && IdxC->getValue().uge(MinNumElts))
5224 if (IdxC->getValue().ult(MinNumElts))
5234 auto *IE = dyn_cast<InsertElementInst>(Vec);
5235 if (IE && IE->getOperand(2) ==
Idx)
5236 return IE->getOperand(1);
5259 Value *CommonValue =
nullptr;
5260 bool HasUndefInput =
false;
5267 HasUndefInput =
true;
5270 if (CommonValue &&
Incoming != CommonValue)
5280 if (HasUndefInput) {
5292 if (
auto *
C = dyn_cast<Constant>(
Op))
5295 if (
auto *CI = dyn_cast<CastInst>(
Op)) {
5296 auto *Src = CI->getOperand(0);
5297 Type *SrcTy = Src->getType();
5298 Type *MidTy = CI->getType();
5300 if (Src->getType() == Ty) {
5310 SrcIntPtrTy, MidIntPtrTy,
5311 DstIntPtrTy) == Instruction::BitCast)
5317 if (CastOpc == Instruction::BitCast)
5318 if (
Op->getType() == Ty)
5333 int MaskVal,
Value *RootVec,
5334 unsigned MaxRecurse) {
5344 int InVecNumElts = cast<FixedVectorType>(Op0->
getType())->getNumElements();
5345 int RootElt = MaskVal;
5346 Value *SourceOp = Op0;
5347 if (MaskVal >= InVecNumElts) {
5348 RootElt = MaskVal - InVecNumElts;
5354 if (
auto *SourceShuf = dyn_cast<ShuffleVectorInst>(SourceOp)) {
5356 DestElt, SourceShuf->getOperand(0), SourceShuf->getOperand(1),
5357 SourceShuf->getMaskValue(RootElt), RootVec, MaxRecurse);
5369 if (RootVec != SourceOp)
5374 if (RootElt != DestElt)
5383 unsigned MaxRecurse) {
5387 auto *InVecTy = cast<VectorType>(Op0->
getType());
5388 unsigned MaskNumElts = Mask.size();
5389 ElementCount InVecEltCount = InVecTy->getElementCount();
5394 Indices.
assign(Mask.begin(), Mask.end());
5399 bool MaskSelects0 =
false, MaskSelects1 =
false;
5401 for (
unsigned i = 0; i != MaskNumElts; ++i) {
5402 if (Indices[i] == -1)
5404 if ((
unsigned)Indices[i] < InVecNumElts)
5405 MaskSelects0 =
true;
5407 MaskSelects1 =
true;
5415 auto *Op0Const = dyn_cast<Constant>(Op0);
5416 auto *Op1Const = dyn_cast<Constant>(Op1);
5421 if (Op0Const && Op1Const)
5427 if (!Scalable && Op0Const && !Op1Const) {
5445 if (
all_of(Indices, [InsertIndex](
int MaskElt) {
5446 return MaskElt == InsertIndex || MaskElt == -1;
5448 assert(isa<UndefValue>(Op1) &&
"Expected undef operand 1 for splat");
5452 for (
unsigned i = 0; i != MaskNumElts; ++i)
5453 if (Indices[i] == -1)
5461 if (
auto *OpShuf = dyn_cast<ShuffleVectorInst>(Op0))
5481 Value *RootVec =
nullptr;
5482 for (
unsigned i = 0; i != MaskNumElts; ++i) {
5504 if (
auto *
C = dyn_cast<Constant>(
Op))
5532 Type *Ty = In->getType();
5533 if (
auto *VecTy = dyn_cast<FixedVectorType>(Ty)) {
5534 unsigned NumElts = VecTy->getNumElements();
5536 for (
unsigned i = 0; i != NumElts; ++i) {
5537 Constant *EltC = In->getAggregateElement(i);
5540 if (EltC && isa<PoisonValue>(EltC))
5542 else if (EltC && EltC->
isNaN())
5543 NewC[i] = ConstantFP::get(
5544 EltC->
getType(), cast<ConstantFP>(EltC)->getValue().makeQuiet());
5558 if (isa<ScalableVectorType>(Ty)) {
5559 auto *
Splat = In->getSplatValue();
5561 "Found a scalable-vector NaN but not a splat");
5567 return ConstantFP::get(Ty, cast<ConstantFP>(In)->getValue().makeQuiet());
5582 for (
Value *V : Ops) {
5590 if (FMF.
noNaNs() && (IsNan || IsUndef))
5592 if (FMF.
noInfs() && (IsInf || IsUndef))
5618 RoundingMode Rounding = RoundingMode::NearestTiesToEven) {
5684 RoundingMode Rounding = RoundingMode::NearestTiesToEven) {
5794 RoundingMode Rounding = RoundingMode::NearestTiesToEven) {
5800 return simplifyFMAFMul(Op0, Op1, FMF, Q, MaxRecurse, ExBehavior, Rounding);
5807 return ::simplifyFAddInst(Op0, Op1, FMF, Q,
RecursionLimit, ExBehavior,
5815 return ::simplifyFSubInst(Op0, Op1, FMF, Q,
RecursionLimit, ExBehavior,
5823 return ::simplifyFMulInst(Op0, Op1, FMF, Q,
RecursionLimit, ExBehavior,
5831 return ::simplifyFMAFMul(Op0, Op1, FMF, Q,
RecursionLimit, ExBehavior,
5839 RoundingMode Rounding = RoundingMode::NearestTiesToEven) {
5864 return ConstantFP::get(Op0->
getType(), 1.0);
5876 return ConstantFP::get(Op0->
getType(), -1.0);
5890 return ::simplifyFDivInst(Op0, Op1, FMF, Q,
RecursionLimit, ExBehavior,
5898 RoundingMode Rounding = RoundingMode::NearestTiesToEven) {
5928 return ::simplifyFRemInst(Op0, Op1, FMF, Q,
RecursionLimit, ExBehavior,
5937 unsigned MaxRecurse) {
5939 case Instruction::FNeg:
5951 unsigned MaxRecurse) {
5953 case Instruction::FNeg:
5974 case Instruction::Add:
5977 case Instruction::Sub:
5980 case Instruction::Mul:
5983 case Instruction::SDiv:
5985 case Instruction::UDiv:
5987 case Instruction::SRem:
5989 case Instruction::URem:
5991 case Instruction::Shl:
5994 case Instruction::LShr:
5996 case Instruction::AShr:
5998 case Instruction::And:
6000 case Instruction::Or:
6002 case Instruction::Xor:
6004 case Instruction::FAdd:
6006 case Instruction::FSub:
6008 case Instruction::FMul:
6010 case Instruction::FDiv:
6012 case Instruction::FRem:
6024 unsigned MaxRecurse) {
6026 case Instruction::FAdd:
6028 case Instruction::FSub:
6030 case Instruction::FMul:
6032 case Instruction::FDiv:
6068 case Intrinsic::fabs:
6069 case Intrinsic::floor:
6070 case Intrinsic::ceil:
6071 case Intrinsic::trunc:
6072 case Intrinsic::rint:
6073 case Intrinsic::nearbyint:
6074 case Intrinsic::round:
6075 case Intrinsic::roundeven:
6076 case Intrinsic::canonicalize:
6077 case Intrinsic::arithmetic_fence:
6089 case Intrinsic::floor:
6090 case Intrinsic::ceil:
6091 case Intrinsic::trunc:
6092 case Intrinsic::rint:
6093 case Intrinsic::nearbyint:
6094 case Intrinsic::round:
6095 case Intrinsic::roundeven:
6109 auto *OffsetConstInt = dyn_cast<ConstantInt>(
Offset);
6110 if (!OffsetConstInt || OffsetConstInt->getBitWidth() > 64)
6114 DL.getIndexTypeSizeInBits(
Ptr->getType()));
6115 if (OffsetInt.
srem(4) != 0)
6122 auto *LoadedCE = dyn_cast<ConstantExpr>(Loaded);
6126 if (LoadedCE->getOpcode() == Instruction::Trunc) {
6127 LoadedCE = dyn_cast<ConstantExpr>(LoadedCE->getOperand(0));
6132 if (LoadedCE->getOpcode() != Instruction::Sub)
6135 auto *LoadedLHS = dyn_cast<ConstantExpr>(LoadedCE->getOperand(0));
6136 if (!LoadedLHS || LoadedLHS->getOpcode() != Instruction::PtrToInt)
6138 auto *LoadedLHSPtr = LoadedLHS->getOperand(0);
6142 APInt LoadedRHSOffset;
6145 PtrSym != LoadedRHSSym || PtrOffset != LoadedRHSOffset)
6148 return LoadedLHSPtr;
6156 if (isa<PoisonValue>(Op0) || isa<PoisonValue>(Op1))
6179 if (
C && (
C->isZero() ||
C->isInfinity()))
6188 if (
C &&
C->isNaN())
6189 return ConstantFP::get(Op0->
getType(),
C->makeQuiet());
6207 if (
auto *II = dyn_cast<IntrinsicInst>(Op0))
6208 if (II->getIntrinsicID() == IID)
6217 auto *II = dyn_cast<IntrinsicInst>(Op0);
6225 case Intrinsic::fabs:
6229 case Intrinsic::bswap:
6234 case Intrinsic::bitreverse:
6239 case Intrinsic::ctpop: {
6243 return ConstantInt::get(Op0->
getType(), 1);
6252 case Intrinsic::exp:
6254 if (Call->hasAllowReassoc() &&
6258 case Intrinsic::exp2:
6260 if (Call->hasAllowReassoc() &&
6264 case Intrinsic::exp10:
6266 if (Call->hasAllowReassoc() &&
6270 case Intrinsic::log:
6272 if (Call->hasAllowReassoc() &&
6276 case Intrinsic::log2:
6278 if (Call->hasAllowReassoc() &&
6284 case Intrinsic::log10:
6287 if (Call->hasAllowReassoc() &&
6293 case Intrinsic::experimental_vector_reverse:
6301 case Intrinsic::frexp: {
6325 auto *MM0 = dyn_cast<IntrinsicInst>(Op0);
6330 if (Op1 ==
X || Op1 ==
Y ||
6347 assert((IID == Intrinsic::maxnum || IID == Intrinsic::minnum ||
6348 IID == Intrinsic::maximum || IID == Intrinsic::minimum) &&
6349 "Unsupported intrinsic");
6351 auto *
M0 = dyn_cast<IntrinsicInst>(Op0);
6355 if (!
M0 ||
M0->getIntrinsicID() != IID)
6357 Value *X0 =
M0->getOperand(0);
6358 Value *Y0 =
M0->getOperand(1);
6365 if (X0 == Op1 || Y0 == Op1)
6368 auto *
M1 = dyn_cast<IntrinsicInst>(Op1);
6371 Value *X1 =
M1->getOperand(0);
6372 Value *Y1 =
M1->getOperand(1);
6380 if ((X0 == X1 && Y0 == Y1) || (X0 == Y1 && Y0 == X1))
6391 unsigned BitWidth = ReturnType->getScalarSizeInBits();
6393 case Intrinsic::abs:
6401 case Intrinsic::cttz: {
6407 case Intrinsic::ctlz: {
6415 case Intrinsic::ptrmask: {
6416 if (isa<PoisonValue>(Op0) || isa<PoisonValue>(Op1))
6426 "Invalid mask width");
6443 APInt IrrelevantPtrBits =
6446 Instruction::Or,
C, ConstantInt::get(
C->getType(), IrrelevantPtrBits),
6448 if (
C !=
nullptr &&
C->isAllOnesValue())
6453 case Intrinsic::smax:
6454 case Intrinsic::smin:
6455 case Intrinsic::umax:
6456 case Intrinsic::umin: {
6467 return ConstantInt::get(
6475 return ConstantInt::get(ReturnType, *
C);
6486 auto *MinMax0 = dyn_cast<IntrinsicInst>(Op0);
6487 if (MinMax0 && MinMax0->getIntrinsicID() == IID) {
6489 Value *M00 = MinMax0->getOperand(0), *M01 = MinMax0->getOperand(1);
6490 const APInt *InnerC;
6493 ICmpInst::getNonStrictPredicate(
6513 case Intrinsic::usub_with_overflow:
6514 case Intrinsic::ssub_with_overflow:
6521 case Intrinsic::uadd_with_overflow:
6522 case Intrinsic::sadd_with_overflow:
6527 cast<StructType>(ReturnType),
6532 case Intrinsic::umul_with_overflow:
6533 case Intrinsic::smul_with_overflow:
6543 case Intrinsic::uadd_sat:
6549 case Intrinsic::sadd_sat:
6564 case Intrinsic::usub_sat:
6569 case Intrinsic::ssub_sat:
6577 case Intrinsic::load_relative:
6578 if (
auto *C0 = dyn_cast<Constant>(Op0))
6579 if (
auto *C1 = dyn_cast<Constant>(Op1))
6582 case Intrinsic::powi:
6583 if (
auto *Power = dyn_cast<ConstantInt>(Op1)) {
6585 if (Power->isZero())
6586 return ConstantFP::get(Op0->
getType(), 1.0);
6592 case Intrinsic::ldexp:
6594 case Intrinsic::copysign:
6604 case Intrinsic::is_fpclass: {
6605 if (isa<PoisonValue>(Op0))
6608 uint64_t Mask = cast<ConstantInt>(Op1)->getZExtValue();
6611 return ConstantInt::get(ReturnType,
true);
6613 return ConstantInt::get(ReturnType,
false);
6618 case Intrinsic::maxnum:
6619 case Intrinsic::minnum:
6620 case Intrinsic::maximum:
6621 case Intrinsic::minimum: {
6627 if (isa<Constant>(Op0))
6634 bool PropagateNaN = IID == Intrinsic::minimum || IID == Intrinsic::maximum;
6635 bool IsMin = IID == Intrinsic::minimum || IID == Intrinsic::minnum;
6642 return PropagateNaN ?
propagateNaN(cast<Constant>(Op1)) : Op0;
6648 (
C->isInfinity() || (Call && Call->hasNoInfs() &&
C->isLargest()))) {
6653 if (
C->isNegative() == IsMin &&
6654 (!PropagateNaN || (Call && Call->hasNoNaNs())))
6655 return ConstantFP::get(ReturnType, *
C);
6661 if (
C->isNegative() != IsMin &&
6662 (PropagateNaN || (Call && Call->hasNoNaNs())))
6675 case Intrinsic::vector_extract: {
6677 unsigned IdxN = cast<ConstantInt>(Op1)->getZExtValue();
6681 IdxN == 0 &&
X->getType() == ReturnType)
6697 assert(Call->arg_size() == Args.size());
6698 unsigned NumOperands = Args.size();
6706 case Intrinsic::vscale: {
6710 return ConstantInt::get(
RetTy,
C->getZExtValue());
6718 if (NumOperands == 1)
6721 if (NumOperands == 2)
6727 case Intrinsic::masked_load:
6728 case Intrinsic::masked_gather: {
6729 Value *MaskArg = Args[2];
6730 Value *PassthruArg = Args[3];
6736 case Intrinsic::fshl:
6737 case Intrinsic::fshr: {
6738 Value *Op0 = Args[0], *Op1 = Args[1], *ShAmtArg = Args[2];
6746 return Args[IID == Intrinsic::fshl ? 0 : 1];
6748 const APInt *ShAmtC;
6753 return Args[IID == Intrinsic::fshl ? 0 : 1];
6758 return ConstantInt::getNullValue(
F->getReturnType());
6762 return ConstantInt::getAllOnesValue(
F->getReturnType());
6766 case Intrinsic::experimental_constrained_fma: {
6767 auto *FPI = cast<ConstrainedFPIntrinsic>(Call);
6769 *FPI->getRoundingMode()))
6773 case Intrinsic::fma:
6774 case Intrinsic::fmuladd: {
6776 RoundingMode::NearestTiesToEven))
6780 case Intrinsic::smul_fix:
6781 case Intrinsic::smul_fix_sat: {
6782 Value *Op0 = Args[0];
6783 Value *Op1 = Args[1];
6784 Value *Op2 = Args[2];
6785 Type *ReturnType =
F->getReturnType();
6790 if (isa<Constant>(Op0))
6804 cast<ConstantInt>(Op2)->getZExtValue());
6810 case Intrinsic::vector_insert: {
6811 Value *Vec = Args[0];
6812 Value *SubVec = Args[1];
6814 Type *ReturnType =
F->getReturnType();
6818 unsigned IdxN = cast<ConstantInt>(
Idx)->getZExtValue();
6823 X->getType() == ReturnType)
6828 case Intrinsic::experimental_constrained_fadd: {
6829 auto *FPI = cast<ConstrainedFPIntrinsic>(Call);
6831 *FPI->getExceptionBehavior(),
6832 *FPI->getRoundingMode());
6834 case Intrinsic::experimental_constrained_fsub: {
6835 auto *FPI = cast<ConstrainedFPIntrinsic>(Call);
6837 *FPI->getExceptionBehavior(),
6838 *FPI->getRoundingMode());
6840 case Intrinsic::experimental_constrained_fmul: {
6841 auto *FPI = cast<ConstrainedFPIntrinsic>(Call);
6843 *FPI->getExceptionBehavior(),
6844 *FPI->getRoundingMode());
6846 case Intrinsic::experimental_constrained_fdiv: {
6847 auto *FPI = cast<ConstrainedFPIntrinsic>(Call);
6849 *FPI->getExceptionBehavior(),
6850 *FPI->getRoundingMode());
6852 case Intrinsic::experimental_constrained_frem: {
6853 auto *FPI = cast<ConstrainedFPIntrinsic>(Call);
6855 *FPI->getExceptionBehavior(),
6856 *FPI->getRoundingMode());
6858 case Intrinsic::experimental_constrained_ldexp:
6860 case Intrinsic::experimental_gc_relocate: {
6866 if (isa<UndefValue>(DerivedPtr) || isa<UndefValue>(BasePtr)) {
6870 if (
auto *PT = dyn_cast<PointerType>(GCR.
getType())) {
6874 if (isa<ConstantPointerNull>(DerivedPtr)) {
6889 auto *
F = dyn_cast<Function>(Callee);
6894 ConstantArgs.
reserve(Args.size());
6895 for (
Value *Arg : Args) {
6898 if (isa<MetadataAsValue>(Arg))
6911 assert(Call->arg_size() == Args.size());
6915 if (Call->isMustTailCall())
6920 if (isa<UndefValue>(Callee) || isa<ConstantPointerNull>(Callee))
6926 auto *
F = dyn_cast<Function>(Callee);
6927 if (
F &&
F->isIntrinsic())
6935 assert(isa<ConstrainedFPIntrinsic>(Call));
6954 return ::simplifyFreezeInst(Op0, Q);
6962 if (
auto *PtrOpC = dyn_cast<Constant>(PtrOp))
6968 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer())
6998 unsigned MaxRecurse) {
6999 assert(
I->getFunction() &&
"instruction should be inserted in a function");
7001 "context instruction should be in the same function");
7005 switch (
I->getOpcode()) {
7010 [](
Value *V) { return cast<Constant>(V); });
7014 case Instruction::FNeg:
7016 case Instruction::FAdd:
7019 case Instruction::Add:
7023 case Instruction::FSub:
7026 case Instruction::Sub:
7030 case Instruction::FMul:
7033 case Instruction::Mul:
7037 case Instruction::SDiv:
7041 case Instruction::UDiv:
7045 case Instruction::FDiv:
7048 case Instruction::SRem:
7050 case Instruction::URem:
7052 case Instruction::FRem:
7055 case Instruction::Shl:
7059 case Instruction::LShr:
7063 case Instruction::AShr:
7067 case Instruction::And:
7069 case Instruction::Or:
7071 case Instruction::Xor:
7073 case Instruction::ICmp:
7075 NewOps[1], Q, MaxRecurse);
7076 case Instruction::FCmp:
7078 NewOps[1],
I->getFastMathFlags(), Q, MaxRecurse);
7079 case Instruction::Select:
7082 case Instruction::GetElementPtr: {
7083 auto *GEPI = cast<GetElementPtrInst>(
I);
7085 ArrayRef(NewOps).slice(1), GEPI->isInBounds(), Q,
7088 case Instruction::InsertValue: {
7093 case Instruction::InsertElement:
7095 case Instruction::ExtractValue: {
7096 auto *EVI = cast<ExtractValueInst>(
I);
7100 case Instruction::ExtractElement:
7102 case Instruction::ShuffleVector: {
7103 auto *SVI = cast<ShuffleVectorInst>(
I);
7105 SVI->getShuffleMask(), SVI->getType(), Q,
7108 case Instruction::PHI:
7110 case Instruction::Call:
7112 cast<CallInst>(
I), NewOps.
back(),
7113 NewOps.
drop_back(1 + cast<CallInst>(
I)->getNumTotalBundleOperands()), Q);
7114 case Instruction::Freeze:
7116#define HANDLE_CAST_INST(num, opc, clas) case Instruction::opc:
7117#include "llvm/IR/Instruction.def"
7118#undef HANDLE_CAST_INST
7121 case Instruction::Alloca:
7124 case Instruction::Load:
7133 "Number of operands should match the instruction!");
7134 return ::simplifyInstructionWithOperands(
I, NewOps, SQ,
RecursionLimit);
7164 bool Simplified =
false;
7171 for (
User *U :
I->users())
7173 Worklist.
insert(cast<Instruction>(U));
7176 I->replaceAllUsesWith(SimpleV);
7178 if (!
I->isEHPad() && !
I->isTerminator() && !
I->mayHaveSideEffects())
7179 I->eraseFromParent();
7191 if (UnsimplifiedUsers)
7192 UnsimplifiedUsers->insert(
I);
7201 for (
User *U :
I->users())
7202 Worklist.
insert(cast<Instruction>(U));
7205 I->replaceAllUsesWith(SimpleV);
7207 if (!
I->isEHPad() && !
I->isTerminator() && !
I->mayHaveSideEffects())
7208 I->eraseFromParent();
7217 assert(
I != SimpleV &&
"replaceAndRecursivelySimplify(X,X) is not valid!");
7218 assert(SimpleV &&
"Must provide a simplified value.");
7226 auto *DT = DTWP ? &DTWP->
getDomTree() :
nullptr;
7228 auto *TLI = TLIWP ? &TLIWP->
getTLI(
F) :
nullptr;
7231 return {
F.getParent()->getDataLayout(), TLI, DT, AC};
7239template <
class T,
class... TArgs>
7242 auto *DT = AM.template getCachedResult<DominatorTreeAnalysis>(
F);
7243 auto *TLI = AM.template getCachedResult<TargetLibraryAnalysis>(
F);
7244 auto *AC = AM.template getCachedResult<AssumptionAnalysis>(
F);
7245 return {
F.getParent()->getDataLayout(), TLI, DT, AC};
7251void InstSimplifyFolder::anchor() {}
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
static Value * simplifyFreezeInst(Value *Op0, const SimplifyQuery &Q)
Given operands for a Freeze, see if we can fold the result.
static Value * simplifyLShrInst(Value *Op0, Value *Op1, bool IsExact, const SimplifyQuery &Q, unsigned MaxRecurse)
Given operands for an LShr, see if we can fold the result.
static Value * simplifyUDivInst(Value *Op0, Value *Op1, bool IsExact, const SimplifyQuery &Q, unsigned MaxRecurse)
Given operands for a UDiv, see if we can fold the result.
static Value * simplifyShuffleVectorInst(Value *Op0, Value *Op1, ArrayRef< int > Mask, Type *RetTy, const SimplifyQuery &Q, unsigned MaxRecurse)
static Value * foldMinMaxSharedOp(Intrinsic::ID IID, Value *Op0, Value *Op1)
Given a min/max intrinsic, see if it can be removed based on having an operand that is another min/ma...
static Value * simplifyGEPInst(Type *, Value *, ArrayRef< Value * >, bool, const SimplifyQuery &, unsigned)
Given operands for an GetElementPtrInst, see if we can fold the result.
static Value * simplifySubInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW, const SimplifyQuery &Q, unsigned MaxRecurse)
Given operands for a Sub, see if we can fold the result.
static Value * simplifyICmpWithIntrinsicOnLHS(CmpInst::Predicate Pred, Value *LHS, Value *RHS)
static Value * expandCommutativeBinOp(Instruction::BinaryOps Opcode, Value *L, Value *R, Instruction::BinaryOps OpcodeToExpand, const SimplifyQuery &Q, unsigned MaxRecurse)
Try to simplify binops of form "A op (B op' C)" or the commuted variant by distributing op over op'.
static Constant * foldOrCommuteConstant(Instruction::BinaryOps Opcode, Value *&Op0, Value *&Op1, const SimplifyQuery &Q)
static bool haveNonOverlappingStorage(const Value *V1, const Value *V2)
Return true if V1 and V2 are each the base of some distict storage region [V, object_size(V)] which d...
static Constant * foldConstant(Instruction::UnaryOps Opcode, Value *&Op, const SimplifyQuery &Q)
static Value * handleOtherCmpSelSimplifications(Value *TCmp, Value *FCmp, Value *Cond, const SimplifyQuery &Q, unsigned MaxRecurse)
We know comparison with both branches of select can be simplified, but they are not equal.
static Constant * propagateNaN(Constant *In)
Try to propagate existing NaN values when possible.
static Value * simplifyAShrInst(Value *Op0, Value *Op1, bool IsExact, const SimplifyQuery &Q, unsigned MaxRecurse)
Given operands for an AShr, see if we can fold the result.
static Value * simplifyRelativeLoad(Constant *Ptr, Constant *Offset, const DataLayout &DL)
static Value * simplifyICmpWithDominatingAssume(CmpInst::Predicate Predicate, Value *LHS, Value *RHS, const SimplifyQuery &Q)
static Value * simplifyCmpSelTrueCase(CmpInst::Predicate Pred, Value *LHS, Value *RHS, Value *Cond, const SimplifyQuery &Q, unsigned MaxRecurse)
Simplify comparison with true branch of select.
static Value * simplifyDiv(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1, bool IsExact, const SimplifyQuery &Q, unsigned MaxRecurse)
These are simplifications common to SDiv and UDiv.
static Value * simplifyCmpSelOfMaxMin(Value *CmpLHS, Value *CmpRHS, ICmpInst::Predicate Pred, Value *TVal, Value *FVal)
static Value * simplifyPHINode(PHINode *PN, ArrayRef< Value * > IncomingValues, const SimplifyQuery &Q)
See if we can fold the given phi. If not, returns null.
static Value * simplifyICmpWithMinMax(CmpInst::Predicate Pred, Value *LHS, Value *RHS, const SimplifyQuery &Q, unsigned MaxRecurse)
simplify integer comparisons where at least one operand of the compare matches an integer min/max idi...
static Value * simplifySelectWithFakeICmpEq(Value *CmpLHS, Value *CmpRHS, ICmpInst::Predicate Pred, Value *TrueVal, Value *FalseVal)
An alternative way to test if a bit is set or not uses sgt/slt instead of eq/ne.
static Value * simplifyCmpInst(unsigned, Value *, Value *, const SimplifyQuery &, unsigned)
Given operands for a CmpInst, see if we can fold the result.
static Value * simplifyExtractValueInst(Value *Agg, ArrayRef< unsigned > Idxs, const SimplifyQuery &, unsigned)
Given operands for an ExtractValueInst, see if we can fold the result.
static Value * simplifySelectInst(Value *, Value *, Value *, const SimplifyQuery &, unsigned)
Given operands for a SelectInst, see if we can fold the result.
static Value * simplifyAddInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW, const SimplifyQuery &Q, unsigned MaxRecurse)
Given operands for an Add, see if we can fold the result.
static Value * threadCmpOverSelect(CmpInst::Predicate Pred, Value *LHS, Value *RHS, const SimplifyQuery &Q, unsigned MaxRecurse)
In the case of a comparison with a select instruction, try to simplify the comparison by seeing wheth...
static Value * simplifyUnOp(unsigned, Value *, const SimplifyQuery &, unsigned)
Given the operand for a UnaryOperator, see if we can fold the result.
static Value * simplifyICmpWithBinOpOnLHS(CmpInst::Predicate Pred, BinaryOperator *LBO, Value *RHS, const SimplifyQuery &Q, unsigned MaxRecurse)
static Value * simplifyAndCommutative(Value *Op0, Value *Op1, const SimplifyQuery &Q, unsigned MaxRecurse)
static Value * simplifyInstructionWithOperands(Instruction *I, ArrayRef< Value * > NewOps, const SimplifyQuery &SQ, unsigned MaxRecurse)
See if we can compute a simplified version of this instruction.
static bool isIdempotent(Intrinsic::ID ID)
static std::optional< ConstantRange > getRange(Value *V, const InstrInfoQuery &IIQ)
Helper method to get range from metadata or attribute.
static Value * simplifyAndOrOfICmpsWithCtpop(ICmpInst *Cmp0, ICmpInst *Cmp1, bool IsAnd)
Try to simplify and/or of icmp with ctpop intrinsic.
static Value * simplifyUnsignedRangeCheck(ICmpInst *ZeroICmp, ICmpInst *UnsignedICmp, bool IsAnd, const SimplifyQuery &Q)
Commuted variants are assumed to be handled by calling this function again with the parameters swappe...
static Value * tryConstantFoldCall(CallBase *Call, Value *Callee, ArrayRef< Value * > Args, const SimplifyQuery &Q)
static Value * simplifyExtractElementInst(Value *Vec, Value *Idx, const SimplifyQuery &Q, unsigned)
Given operands for an ExtractElementInst, see if we can fold the result.
static Value * simplifyAndOfICmpsWithAdd(ICmpInst *Op0, ICmpInst *Op1, const InstrInfoQuery &IIQ)
static Value * threadCmpOverPHI(CmpInst::Predicate Pred, Value *LHS, Value *RHS, const SimplifyQuery &Q, unsigned MaxRecurse)
In the case of a comparison with a PHI instruction, try to simplify the comparison by seeing whether ...
static Value * simplifyIntrinsic(CallBase *Call, Value *Callee, ArrayRef< Value * > Args, const SimplifyQuery &Q)
static bool isPoisonShift(Value *Amount, const SimplifyQuery &Q)
Returns true if a shift by Amount always yields poison.
static APInt stripAndComputeConstantOffsets(const DataLayout &DL, Value *&V, bool AllowNonInbounds=false)
Compute the base pointer and cumulative constant offsets for V.
static Value * simplifyFMAFMul(Value *Op0, Value *Op1, FastMathFlags FMF, const SimplifyQuery &Q, unsigned MaxRecurse, fp::ExceptionBehavior ExBehavior, RoundingMode Rounding)
static Value * simplifyRightShift(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1, bool IsExact, const SimplifyQuery &Q, unsigned MaxRecurse)
Given operands for an LShr or AShr, see if we can fold the result.
static Value * simplifyICmpWithConstant(CmpInst::Predicate Pred, Value *LHS, Value *RHS, const InstrInfoQuery &IIQ)
static Value * simplifySDivInst(Value *Op0, Value *Op1, bool IsExact, const SimplifyQuery &Q, unsigned MaxRecurse)
Given operands for an SDiv, see if we can fold the result.
static Value * simplifyByDomEq(unsigned Opcode, Value *Op0, Value *Op1, const SimplifyQuery &Q, unsigned MaxRecurse)
Test if there is a dominating equivalence condition for the two operands.
static Value * simplifyFPUnOp(unsigned, Value *, const FastMathFlags &, const SimplifyQuery &, unsigned)
Given the operand for a UnaryOperator, see if we can fold the result.
static bool isICmpTrue(ICmpInst::Predicate Pred, Value *LHS, Value *RHS, const SimplifyQuery &Q, unsigned MaxRecurse)
Given a predicate and two operands, return true if the comparison is true.
static Value * simplifyFAddInst(Value *Op0, Value *Op1, FastMathFlags FMF, const SimplifyQuery &Q, unsigned MaxRecurse, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
Given operands for an FAdd, see if we can fold the result.
static Value * simplifyOrOfICmps(ICmpInst *Op0, ICmpInst *Op1, const SimplifyQuery &Q)
static Value * expandBinOp(Instruction::BinaryOps Opcode, Value *V, Value *OtherOp, Instruction::BinaryOps OpcodeToExpand, const SimplifyQuery &Q, unsigned MaxRecurse)
Try to simplify a binary operator of form "V op OtherOp" where V is "(B0 opex B1)" by distributing 'o...
static Constant * getFalse(Type *Ty)
For a boolean type or a vector of boolean type, return false or a vector with every element false.
static Value * simplifyDivRem(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1, const SimplifyQuery &Q, unsigned MaxRecurse)
Check for common or similar folds of integer division or integer remainder.
static bool removesFPFraction(Intrinsic::ID ID)
Return true if the intrinsic rounds a floating-point value to an integral floating-point value (not a...
static Value * simplifyFDivInst(Value *Op0, Value *Op1, FastMathFlags FMF, const SimplifyQuery &Q, unsigned, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
static Value * simplifyOrOfICmpsWithAdd(ICmpInst *Op0, ICmpInst *Op1, const InstrInfoQuery &IIQ)
static Value * simplifyMulInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW, const SimplifyQuery &Q, unsigned MaxRecurse)
Given operands for a Mul, see if we can fold the result.
static Value * simplifyFNegInst(Value *Op, FastMathFlags FMF, const SimplifyQuery &Q, unsigned MaxRecurse)
Given the operand for an FNeg, see if we can fold the result.
static Value * simplifyOrInst(Value *, Value *, const SimplifyQuery &, unsigned)
Given operands for an Or, see if we can fold the result.
static Value * simplifySelectBitTest(Value *TrueVal, Value *FalseVal, Value *X, const APInt *Y, bool TrueWhenUnset)
Try to simplify a select instruction when its condition operand is an integer comparison where one op...
static Value * simplifyAssociativeBinOp(Instruction::BinaryOps Opcode, Value *LHS, Value *RHS, const SimplifyQuery &Q, unsigned MaxRecurse)
Generic simplifications for associative binary operations.
static Value * simplifyICmpWithZero(CmpInst::Predicate Pred, Value *LHS, Value *RHS, const SimplifyQuery &Q)
Try hard to fold icmp with zero RHS because this is a common case.
static Value * foldSelectWithBinaryOp(Value *Cond, Value *TrueVal, Value *FalseVal)
static Value * simplifyShlInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW, const SimplifyQuery &Q, unsigned MaxRecurse)
Given operands for an Shl, see if we can fold the result.
static Value * threadBinOpOverPHI(Instruction::BinaryOps Opcode, Value *LHS, Value *RHS, const SimplifyQuery &Q, unsigned MaxRecurse)
In the case of a binary operation with an operand that is a PHI instruction, try to simplify the bino...
static Value * simplifyFRemInst(Value *Op0, Value *Op1, FastMathFlags FMF, const SimplifyQuery &Q, unsigned, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
static Value * simplifyFSubInst(Value *Op0, Value *Op1, FastMathFlags FMF, const SimplifyQuery &Q, unsigned MaxRecurse, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
Given operands for an FSub, see if we can fold the result.
static bool trySimplifyICmpWithAdds(CmpInst::Predicate Pred, Value *LHS, Value *RHS, const InstrInfoQuery &IIQ)
static Value * simplifyXorInst(Value *, Value *, const SimplifyQuery &, unsigned)
Given operands for a Xor, see if we can fold the result.
static Value * simplifyURemInst(Value *Op0, Value *Op1, const SimplifyQuery &Q, unsigned MaxRecurse)
Given operands for a URem, see if we can fold the result.
static Value * simplifySelectWithFCmp(Value *Cond, Value *T, Value *F, const SimplifyQuery &Q)
Try to simplify a select instruction when its condition operand is a floating-point comparison.
static Constant * simplifyFPOp(ArrayRef< Value * > Ops, FastMathFlags FMF, const SimplifyQuery &Q, fp::ExceptionBehavior ExBehavior, RoundingMode Rounding)
Perform folds that are common to any floating-point operation.
static bool replaceAndRecursivelySimplifyImpl(Instruction *I, Value *SimpleV, const TargetLibraryInfo *TLI, const DominatorTree *DT, AssumptionCache *AC, SmallSetVector< Instruction *, 8 > *UnsimplifiedUsers=nullptr)
Implementation of recursive simplification through an instruction's uses.
static Value * simplifySelectWithICmpEq(Value *CmpLHS, Value *CmpRHS, Value *TrueVal, Value *FalseVal, const SimplifyQuery &Q, unsigned MaxRecurse)
Try to simplify a select instruction when its condition operand is an integer equality comparison.
static bool isAllocDisjoint(const Value *V)
Return true if the underlying object (storage) must be disjoint from storage returned by any noalias ...
static Constant * getTrue(Type *Ty)
For a boolean type or a vector of boolean type, return true or a vector with every element true.
static bool isDivZero(Value *X, Value *Y, const SimplifyQuery &Q, unsigned MaxRecurse, bool IsSigned)
Return true if we can simplify X / Y to 0.
static Value * simplifyCmpSelCase(CmpInst::Predicate Pred, Value *LHS, Value *RHS, Value *Cond, const SimplifyQuery &Q, unsigned MaxRecurse, Constant *TrueOrFalse)
Simplify comparison with true or false branch of select: sel = select i1 cond, i32 tv,...
static Value * simplifyLdexp(Value *Op0, Value *Op1, const SimplifyQuery &Q, bool IsStrict)
static Value * simplifyLogicOfAddSub(Value *Op0, Value *Op1, Instruction::BinaryOps Opcode)
Given a bitwise logic op, check if the operands are add/sub with a common source value and inverted c...
static Value * simplifyOrLogic(Value *X, Value *Y)
static Type * getCompareTy(Value *Op)
static Value * simplifyCastInst(unsigned, Value *, Type *, const SimplifyQuery &, unsigned)
static Value * simplifyAndOfICmps(ICmpInst *Op0, ICmpInst *Op1, const SimplifyQuery &Q)
static Value * simplifyBinOp(unsigned, Value *, Value *, const SimplifyQuery &, unsigned)
Given operands for a BinaryOperator, see if we can fold the result.
static Value * simplifyInsertValueInst(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const SimplifyQuery &Q, unsigned)
Given operands for an InsertValueInst, see if we can fold the result.
static Value * simplifyAndInst(Value *, Value *, const SimplifyQuery &, unsigned)
Given operands for an And, see if we can fold the result.
static Value * foldIdentityShuffles(int DestElt, Value *Op0, Value *Op1, int MaskVal, Value *RootVec, unsigned MaxRecurse)
For the given destination element of a shuffle, peek through shuffles to match a root vector source o...
static Constant * computePointerICmp(CmpInst::Predicate Pred, Value *LHS, Value *RHS, const SimplifyQuery &Q)
static Value * simplifyAndOrOfFCmps(const SimplifyQuery &Q, FCmpInst *LHS, FCmpInst *RHS, bool IsAnd)
static Value * simplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q, unsigned MaxRecurse)
Given operands for an FCmpInst, see if we can fold the result.
static Value * simplifyAndOrOfCmps(const SimplifyQuery &Q, Value *Op0, Value *Op1, bool IsAnd)
static Value * simplifyWithOpReplaced(Value *V, Value *Op, Value *RepOp, const SimplifyQuery &Q, bool AllowRefinement, SmallVectorImpl< Instruction * > *DropFlags, unsigned MaxRecurse)
static Value * threadBinOpOverSelect(Instruction::BinaryOps Opcode, Value *LHS, Value *RHS, const SimplifyQuery &Q, unsigned MaxRecurse)
In the case of a binary operation with a select instruction as an operand, try to simplify the binop ...
static Constant * computePointerDifference(const DataLayout &DL, Value *LHS, Value *RHS)
Compute the constant difference between two pointer values.
static Value * simplifySRemInst(Value *Op0, Value *Op1, const SimplifyQuery &Q, unsigned MaxRecurse)
Given operands for an SRem, see if we can fold the result.
static Value * simplifyFMulInst(Value *Op0, Value *Op1, FastMathFlags FMF, const SimplifyQuery &Q, unsigned MaxRecurse, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
Given the operands for an FMul, see if we can fold the result.
static Value * simplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS, const SimplifyQuery &Q, unsigned MaxRecurse)
Given operands for an ICmpInst, see if we can fold the result.
static Value * simplifyICmpOfBools(CmpInst::Predicate Pred, Value *LHS, Value *RHS, const SimplifyQuery &Q)
Fold an icmp when its operands have i1 scalar type.
static Value * simplifyAndOrOfICmpsWithConstants(ICmpInst *Cmp0, ICmpInst *Cmp1, bool IsAnd)
Test if a pair of compares with a shared operand and 2 constants has an empty set intersection,...
static Value * simplifyAndOrWithICmpEq(unsigned Opcode, Value *Op0, Value *Op1, const SimplifyQuery &Q, unsigned MaxRecurse)
static Value * simplifyICmpWithBinOp(CmpInst::Predicate Pred, Value *LHS, Value *RHS, const SimplifyQuery &Q, unsigned MaxRecurse)
TODO: A large part of this logic is duplicated in InstCombine's foldICmpBinOp().
static Value * simplifyShift(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1, bool IsNSW, const SimplifyQuery &Q, unsigned MaxRecurse)
Given operands for an Shl, LShr or AShr, see if we can fold the result.
static Value * extractEquivalentCondition(Value *V, CmpInst::Predicate Pred, Value *LHS, Value *RHS)
Rummage around inside V looking for something equivalent to the comparison "LHS Pred RHS".
static Value * simplifyRem(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1, const SimplifyQuery &Q, unsigned MaxRecurse)
These are simplifications common to SRem and URem.
static bool valueDominatesPHI(Value *V, PHINode *P, const DominatorTree *DT)
Does the given value dominate the specified phi node?
static Value * simplifyCmpSelFalseCase(CmpInst::Predicate Pred, Value *LHS, Value *RHS, Value *Cond, const SimplifyQuery &Q, unsigned MaxRecurse)
Simplify comparison with false branch of select.
static Value * simplifySelectWithICmpCond(Value *CondVal, Value *TrueVal, Value *FalseVal, const SimplifyQuery &Q, unsigned MaxRecurse)
Try to simplify a select instruction when its condition operand is an integer comparison.
static bool isSameCompare(Value *V, CmpInst::Predicate Pred, Value *LHS, Value *RHS)
isSameCompare - Is V equivalent to the comparison "LHS Pred RHS"?
static Value * foldMinimumMaximumSharedOp(Intrinsic::ID IID, Value *Op0, Value *Op1)
Given a min/max intrinsic, see if it can be removed based on having an operand that is another min/ma...
static Value * simplifyUnaryIntrinsic(Function *F, Value *Op0, const SimplifyQuery &Q, const CallBase *Call)
This header provides classes for managing per-loop analyses.
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file implements a set that has insertion order iteration characteristics.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static SymbolRef::Type getType(const Symbol *Sym)
static const uint32_t IV[8]
Class for arbitrary precision integers.
APInt zextOrTrunc(unsigned width) const
Zero extend or truncate to width.
unsigned getActiveBits() const
Compute the number of active bits in the value.
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
APInt urem(const APInt &RHS) const
Unsigned remainder operation.
void setSignBit()
Set the sign bit to 1.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool ult(const APInt &RHS) const
Unsigned less than comparison.
bool intersects(const APInt &RHS) const
This operation tests if there are any pairs of corresponding bits between this APInt and RHS that are...
unsigned countr_zero() const
Count the number of trailing zero bits.
bool isNonPositive() const
Determine if this APInt Value is non-positive (<= 0).
APInt sextOrTrunc(unsigned width) const
Sign extend or truncate to width.
bool isStrictlyPositive() const
Determine if this APInt Value is positive.
uint64_t getLimitedValue(uint64_t Limit=UINT64_MAX) const
If this value is smaller than the specified limit, return it, otherwise return the limit value.
bool getBoolValue() const
Convert APInt to a boolean value.
APInt srem(const APInt &RHS) const
Function for signed remainder operation.
bool isMask(unsigned numBits) const
bool isMaxSignedValue() const
Determine if this is the largest signed value.
bool isNonNegative() const
Determine if this APInt Value is non-negative (>= 0)
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
bool isSubsetOf(const APInt &RHS) const
This operation checks that all bits set in this APInt are also set in RHS.
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
bool isSignBitSet() const
Determine if sign bit of this APInt is set.
bool slt(const APInt &RHS) const
Signed less than comparison.
static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)
Constructs an APInt value that has the top hiBitsSet bits set.
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
bool isOne() const
Determine if this is a value of 1.
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
an instruction to allocate memory on the stack
A container for analyses that lazily runs them and caches their results.
This class represents an incoming formal argument to a Function.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
const T & back() const
back - Get the last element.
size_t size() const
size - Get the array size.
ArrayRef< T > drop_back(size_t N=1) const
Drop the last N elements of the array.
bool empty() const
empty - Check if the array is empty.
ArrayRef< T > slice(size_t N, size_t M) const
slice(n, m) - Chop off the first N elements of the array, and keep M elements in the array.
An immutable pass that tracks lazily created AssumptionCache objects.
AssumptionCache & getAssumptionCache(Function &F)
Get the cached assumptions for a function.
A cache of @llvm.assume calls within a function.
MutableArrayRef< ResultElem > assumptionsFor(const Value *V)
Access the list of assumptions which affect this value.
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
BinaryOps getOpcode() const
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Value * getArgOperand(unsigned i) const
This class represents a function call, abstracting a target machine's calling convention.
static unsigned isEliminableCastPair(Instruction::CastOps firstOpcode, Instruction::CastOps secondOpcode, Type *SrcTy, Type *MidTy, Type *DstTy, Type *SrcIntPtrTy, Type *MidIntPtrTy, Type *DstIntPtrTy)
Determine how a pair of casts can be eliminated, if they can be at all.
This class is the base class for the comparison instructions.
static Type * makeCmpResultType(Type *opnd_type)
Create a result type for fcmp/icmp.
Predicate getStrictPredicate() const
For example, SGE -> SGT, SLE -> SLT, ULE -> ULT, UGE -> UGT.
bool isFalseWhenEqual() const
This is just a convenience.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ ICMP_UGE
unsigned greater or equal
@ ICMP_UGT
unsigned greater than
@ ICMP_SGT
signed greater than
@ ICMP_ULT
unsigned less than
@ ICMP_SGE
signed greater or equal
@ ICMP_ULE
unsigned less or equal
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
bool isTrueWhenEqual() const
This is just a convenience.
bool isFPPredicate() const
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Predicate getPredicate() const
Return the predicate for this instruction.
static bool isUnordered(Predicate predicate)
Determine if the predicate is an unordered operation.
bool isIntPredicate() const
static Constant * getIntToPtr(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static Constant * getExtractElement(Constant *Vec, Constant *Idx, Type *OnlyIfReducedTy=nullptr)
static Constant * getNot(Constant *C)
static Constant * getInsertElement(Constant *Vec, Constant *Elt, Constant *Idx, Type *OnlyIfReducedTy=nullptr)
static Constant * getICmp(unsigned short pred, Constant *LHS, Constant *RHS, bool OnlyIfReduced=false)
get* - Return some common constants without having to specify the full Instruction::OPCODE identifier...
static Constant * getShuffleVector(Constant *V1, Constant *V2, ArrayRef< int > Mask, Type *OnlyIfReducedTy=nullptr)
static bool isSupportedGetElementPtr(const Type *SrcElemTy)
Whether creating a constant expression for this getelementptr type is supported.
static Constant * getGetElementPtr(Type *Ty, Constant *C, ArrayRef< Constant * > IdxList, bool InBounds=false, std::optional< unsigned > InRangeIndex=std::nullopt, Type *OnlyIfReducedTy=nullptr)
Getelementptr form.
static Constant * getBinOpAbsorber(unsigned Opcode, Type *Ty)
Return the absorbing element for the given binary operation, i.e.
static Constant * getBinOpIdentity(unsigned Opcode, Type *Ty, bool AllowRHSConstant=false, bool NSZ=false)
Return the identity constant for a binary opcode.
static Constant * getZero(Type *Ty, bool Negative=false)
static Constant * getNegativeZero(Type *Ty)
static Constant * getNaN(Type *Ty, bool Negative=false, uint64_t Payload=0)
This is the shared class of boolean and integer constants.
static ConstantInt * getTrue(LLVMContext &Context)
static ConstantInt * getFalse(LLVMContext &Context)
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
static ConstantInt * getBool(LLVMContext &Context, bool V)
static ConstantPointerNull * get(PointerType *T)
Static factory methods - Return objects of the specified value.
This class represents a range of values.
const APInt * getSingleElement() const
If this set contains a single element, return it, otherwise return null.
bool isFullSet() const
Return true if this set contains all of the elements possible for this data-type.
bool isEmptySet() const
Return true if this set contains no members.
static ConstantRange makeExactICmpRegion(CmpInst::Predicate Pred, const APInt &Other)
Produce the exact range such that all values in the returned range satisfy the given predicate with a...
ConstantRange inverse() const
Return a new range that is the logical not of the current set.
bool contains(const APInt &Val) const
Return true if the specified value is in the set.
static Constant * get(StructType *T, ArrayRef< Constant * > V)
static Constant * getSplat(ElementCount EC, Constant *Elt)
Return a ConstantVector with the specified constant in each element.
static Constant * get(ArrayRef< Constant * > V)
This is an important base class in LLVM.
static Constant * getAllOnesValue(Type *Ty)
bool isAllOnesValue() const
Return true if this is the value that would be returned by getAllOnesValue.
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
bool isNaN() const
Return true if this is a floating-point NaN constant or a vector floating-point constant with all NaN...
Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
unsigned getPointerSizeInBits(unsigned AS=0) const
Layout pointer size, in bits FIXME: The defaults need to be removed once all of the backends/clients ...
IntegerType * getIntPtrType(LLVMContext &C, unsigned AddressSpace=0) const
Returns an integer type with size at least as big as that of a pointer in the given address space.
unsigned getIndexTypeSizeInBits(Type *Ty) const
Layout size of the index used in GEP calculation.
TypeSize getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
unsigned getIndexSizeInBits(unsigned AS) const
Size in bits of index used for address calculation in getelementptr.
TypeSize getTypeSizeInBits(Type *Ty) const
Size examples:
Legacy analysis pass which computes a DominatorTree.
DominatorTree & getDomTree()
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
This instruction compares its operands according to the predicate given to the constructor.
Convenience struct for specifying and reasoning about fast-math flags.
bool noSignedZeros() const
bool allowReassoc() const
Flag queries.
Represents calls to the gc.relocate intrinsic.
Value * getBasePtr() const
Value * getDerivedPtr() const
static Type * getIndexedType(Type *Ty, ArrayRef< Value * > IdxList)
Returns the result type of a getelementptr with the given source element type and indexes.
This instruction compares its operands according to the predicate given to the constructor.
static bool compare(const APInt &LHS, const APInt &RHS, ICmpInst::Predicate Pred)
Return result of LHS Pred RHS comparison.
Predicate getSignedPredicate() const
For example, EQ->EQ, SLE->SLE, UGT->SGT, etc.
bool isEquality() const
Return true if this predicate is either EQ or NE.
bool isRelational() const
Return true if the predicate is relational (not EQ or NE).
Predicate getUnsignedPredicate() const
For example, EQ->EQ, SLE->ULE, UGT->UGT, etc.
This instruction inserts a struct field of array element value into an aggregate value.
bool hasNoSignedZeros() const LLVM_READONLY
Determine whether the no-signed-zeros flag is set.
bool isAssociative() const LLVM_READONLY
Return true if the instruction is associative:
bool isCommutative() const LLVM_READONLY
Return true if the instruction is commutative:
const Function * getFunction() const
Return the function this instruction belongs to.
An instruction for reading from memory.
bool isVolatile() const
Return true if this is a load from a volatile memory location.
static APInt getSaturationPoint(Intrinsic::ID ID, unsigned numBits)
Min/max intrinsics are monotonic, they operate on a fixed-bitwidth values, so there is a certain thre...
ICmpInst::Predicate getPredicate() const
Returns the comparison predicate underlying the intrinsic.
op_range incoming_values()
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
Pass interface - Implemented by all 'passes'.
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
This class represents a cast from a pointer to an integer.
This class represents a sign extension of integer types.
This class represents the LLVM 'select' instruction.
size_type size() const
Determine the number of elements in the SetVector.
bool insert(const value_type &X)
Insert a new element into the SetVector.
static void commuteShuffleMask(MutableArrayRef< int > Mask, unsigned InVecNumElts)
Change values in a shuffle permute mask assuming the two vector operands of length InVecNumElts have ...
A SetVector that performs no allocations if smaller than a certain size.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void assign(size_type NumElts, ValueParamT Elt)
void reserve(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
TargetLibraryInfo & getTLI(const Function &F)
Provides information about what library functions are available for the current target.
The instances of the Type class are immutable: once they are created, they are never changed.
bool isVectorTy() const
True if this is an instance of VectorType.
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
bool isPointerTy() const
True if this is an instance of PointerType.
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
bool isScalableTy() const
Return true if this is a type whose size is a known multiple of vscale.
static IntegerType * getInt32Ty(LLVMContext &C)
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
static UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
A Use represents the edge between a Value definition and its users.
Value * getOperand(unsigned i) const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
const Value * stripAndAccumulateConstantOffsets(const DataLayout &DL, APInt &Offset, bool AllowNonInbounds, bool AllowInvariantGroup=false, function_ref< bool(Value &Value, APInt &Offset)> ExternalAnalysis=nullptr) const
Accumulate the constant offset this value has compared to a base pointer.
LLVMContext & getContext() const
All values hold a context through their type.
This class represents zero extension of integer types.
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
cst_pred_ty< is_all_ones > m_AllOnes()
Match an integer or vector with all bits set.
class_match< PoisonValue > m_Poison()
Match an arbitrary poison constant.
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
specific_intval< false > m_SpecificInt(APInt V)
Match a specific integer value or vector with all elements equal to the value.
cst_pred_ty< is_negative > m_Negative()
Match an integer or vector of negative values.
BinaryOp_match< LHS, RHS, Instruction::Add > m_Add(const LHS &L, const RHS &R)
class_match< BinaryOperator > m_BinOp()
Match an arbitrary binary operation and ignore it.
apfloat_match m_APFloatAllowUndef(const APFloat *&Res)
Match APFloat while allowing undefs in splat vector constants.
BinaryOp_match< LHS, RHS, Instruction::FMul, true > m_c_FMul(const LHS &L, const RHS &R)
Matches FMul with LHS and RHS in either order.
cst_pred_ty< is_sign_mask > m_SignMask()
Match an integer or vector with only the sign bit(s) set.
BinaryOp_match< LHS, RHS, Instruction::AShr > m_AShr(const LHS &L, const RHS &R)
cstfp_pred_ty< is_inf > m_Inf()
Match a positive or negative infinity FP constant.
m_Intrinsic_Ty< Opnd0 >::Ty m_BitReverse(const Opnd0 &Op0)
apint_match m_APIntAllowUndef(const APInt *&Res)
Match APInt while allowing undefs in splat vector constants.
BinaryOp_match< LHS, RHS, Instruction::FSub > m_FSub(const LHS &L, const RHS &R)
cst_pred_ty< is_power2 > m_Power2()
Match an integer or vector power-of-2.
BinaryOp_match< cstfp_pred_ty< is_any_zero_fp >, RHS, Instruction::FSub > m_FNegNSZ(const RHS &X)
Match 'fneg X' as 'fsub +-0.0, X'.
BinaryOp_match< LHS, RHS, Instruction::URem > m_URem(const LHS &L, const RHS &R)
class_match< Constant > m_Constant()
Match an arbitrary Constant and ignore it.
BinaryOp_match< LHS, RHS, Instruction::And, true > m_c_And(const LHS &L, const RHS &R)
Matches an And with LHS and RHS in either order.
BinaryOp_match< LHS, RHS, Instruction::Xor > m_Xor(const LHS &L, const RHS &R)
bool match(Val *V, const Pattern &P)
BinOpPred_match< LHS, RHS, is_idiv_op > m_IDiv(const LHS &L, const RHS &R)
Matches integer division operations.
cstfp_pred_ty< is_any_zero_fp > m_AnyZeroFP()
Match a floating-point negative zero or positive zero.
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
BinOpPred_match< LHS, RHS, is_right_shift_op > m_Shr(const LHS &L, const RHS &R)
Matches logical shift operations.
TwoOps_match< Val_t, Idx_t, Instruction::ExtractElement > m_ExtractElt(const Val_t &Val, const Idx_t &Idx)
Matches ExtractElementInst.
class_match< ConstantInt > m_ConstantInt()
Match an arbitrary ConstantInt and ignore it.
cst_pred_ty< is_one > m_One()
Match an integer 1 or a vector with all elements equal to 1.
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
cstfp_pred_ty< is_neg_zero_fp > m_NegZeroFP()
Match a floating-point negative zero.
specific_fpval m_SpecificFP(double V)
Match a specific floating point value or vector with all elements equal to the value.
match_combine_and< LTy, RTy > m_CombineAnd(const LTy &L, const RTy &R)
Combine two pattern matchers matching L && R.
MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty > m_SMin(const LHS &L, const RHS &R)
CmpClass_match< LHS, RHS, FCmpInst, FCmpInst::Predicate > m_FCmp(FCmpInst::Predicate &Pred, const LHS &L, const RHS &R)
CastOperator_match< OpTy, Instruction::Trunc > m_Trunc(const OpTy &Op)
Matches Trunc.
m_Intrinsic_Ty< Opnd0 >::Ty m_Sqrt(const Opnd0 &Op0)
BinaryOp_match< LHS, RHS, Instruction::Xor, true > m_c_Xor(const LHS &L, const RHS &R)
Matches an Xor with LHS and RHS in either order.
BinaryOp_match< LHS, RHS, Instruction::Mul > m_Mul(const LHS &L, const RHS &R)
deferredval_ty< Value > m_Deferred(Value *const &V)
Like m_Specific(), but works if the specific value to match is determined as part of the same match()...
cst_pred_ty< is_zero_int > m_ZeroInt()
Match an integer 0 or a vector with all elements equal to 0.
CmpClass_match< LHS, RHS, ICmpInst, ICmpInst::Predicate > m_ICmp(ICmpInst::Predicate &Pred, const LHS &L, const RHS &R)
BinaryOp_match< cst_pred_ty< is_zero_int >, ValTy, Instruction::Sub > m_Neg(const ValTy &V)
Matches a 'Neg' as 'sub 0, V'.
match_combine_and< class_match< Constant >, match_unless< constantexpr_match > > m_ImmConstant()
Match an arbitrary immediate Constant and ignore it.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Shl, OverflowingBinaryOperator::NoSignedWrap > m_NSWShl(const LHS &L, const RHS &R)
CastInst_match< OpTy, ZExtInst > m_ZExt(const OpTy &Op)
Matches ZExt.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Shl, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWShl(const LHS &L, const RHS &R)
OverflowingBinaryOp_match< LHS, RHS, Instruction::Mul, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWMul(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::UDiv > m_UDiv(const LHS &L, const RHS &R)
MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty > m_UMax(const LHS &L, const RHS &R)
CmpClass_match< LHS, RHS, ICmpInst, ICmpInst::Predicate, true > m_c_ICmp(ICmpInst::Predicate &Pred, const LHS &L, const RHS &R)
Matches an ICmp with a predicate over LHS and RHS in either order.
specific_fpval m_FPOne()
Match a float 1.0 or vector with all elements equal to 1.0.
BinaryOp_match< LHS, RHS, Instruction::Add, true > m_c_Add(const LHS &L, const RHS &R)
Matches a Add with LHS and RHS in either order.
CastInst_match< OpTy, UIToFPInst > m_UIToFP(const OpTy &Op)
m_Intrinsic_Ty< Opnd0, Opnd1, Opnd2 >::Ty m_FShl(const Opnd0 &Op0, const Opnd1 &Op1, const Opnd2 &Op2)
match_combine_or< match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty, true >, MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty, true > >, match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty, true >, MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty, true > > > m_c_MaxOrMin(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::SDiv > m_SDiv(const LHS &L, const RHS &R)
MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty > m_SMax(const LHS &L, const RHS &R)
apint_match m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
AnyBinaryOp_match< LHS, RHS, true > m_c_BinOp(const LHS &L, const RHS &R)
Matches a BinaryOperator with LHS and RHS in either order.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoSignedWrap > m_NSWAdd(const LHS &L, const RHS &R)
CastInst_match< OpTy, SIToFPInst > m_SIToFP(const OpTy &Op)
BinaryOp_match< LHS, RHS, Instruction::LShr > m_LShr(const LHS &L, const RHS &R)
Exact_match< T > m_Exact(const T &SubPattern)
FNeg_match< OpTy > m_FNeg(const OpTy &X)
Match 'fneg X' as 'fsub -0.0, X'.
cstfp_pred_ty< is_pos_zero_fp > m_PosZeroFP()
Match a floating-point positive zero.
BinaryOp_match< LHS, RHS, Instruction::FAdd, true > m_c_FAdd(const LHS &L, const RHS &R)
Matches FAdd with LHS and RHS in either order.
LogicalOp_match< LHS, RHS, Instruction::And, true > m_c_LogicalAnd(const LHS &L, const RHS &R)
Matches L && R with LHS and RHS in either order.
BinaryOp_match< LHS, RHS, Instruction::Shl > m_Shl(const LHS &L, const RHS &R)
NotForbidUndef_match< ValTy > m_NotForbidUndef(const ValTy &V)
Matches a bitwise 'not' as 'xor V, -1' or 'xor -1, V'.
m_Intrinsic_Ty< Opnd0 >::Ty m_VecReverse(const Opnd0 &Op0)
apfloat_match m_APFloat(const APFloat *&Res)
Match a ConstantFP or splatted ConstantVector, binding the specified pointer to the contained APFloat...
match_combine_or< match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty >, MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty > >, match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty >, MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty > > > m_MaxOrMin(const LHS &L, const RHS &R)
m_Intrinsic_Ty< Opnd0, Opnd1, Opnd2 >::Ty m_FShr(const Opnd0 &Op0, const Opnd1 &Op1, const Opnd2 &Op2)
BinaryOp_match< LHS, RHS, Instruction::SRem > m_SRem(const LHS &L, const RHS &R)
cstfp_pred_ty< is_nan > m_NaN()
Match an arbitrary NaN constant.
BinaryOp_match< cst_pred_ty< is_all_ones >, ValTy, Instruction::Xor, true > m_Not(const ValTy &V)
Matches a 'Not' as 'xor V, -1' or 'xor -1, V'.
BinaryOp_match< LHS, RHS, Instruction::Or > m_Or(const LHS &L, const RHS &R)
m_Intrinsic_Ty< Opnd0 >::Ty m_BSwap(const Opnd0 &Op0)
CastInst_match< OpTy, SExtInst > m_SExt(const OpTy &Op)
Matches SExt.
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
BinaryOp_match< LHS, RHS, Instruction::Or, true > m_c_Or(const LHS &L, const RHS &R)
Matches an Or with LHS and RHS in either order.
LogicalOp_match< LHS, RHS, Instruction::Or, true > m_c_LogicalOr(const LHS &L, const RHS &R)
Matches L || R with LHS and RHS in either order.
ThreeOps_match< Val_t, Elt_t, Idx_t, Instruction::InsertElement > m_InsertElt(const Val_t &Val, const Elt_t &Elt, const Idx_t &Idx)
Matches InsertElementInst.
ElementWiseBitCast_match< OpTy > m_ElementWiseBitCast(const OpTy &Op)
BinaryOp_match< LHS, RHS, Instruction::Mul, true > m_c_Mul(const LHS &L, const RHS &R)
Matches a Mul with LHS and RHS in either order.
CastOperator_match< OpTy, Instruction::PtrToInt > m_PtrToInt(const OpTy &Op)
Matches PtrToInt.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Mul, OverflowingBinaryOperator::NoSignedWrap > m_NSWMul(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::Sub > m_Sub(const LHS &L, const RHS &R)
MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty > m_UMin(const LHS &L, const RHS &R)
match_combine_or< LTy, RTy > m_CombineOr(const LTy &L, const RTy &R)
Combine two pattern matchers matching L || R.
ExceptionBehavior
Exception behavior used for floating point operations.
@ ebStrict
This corresponds to "fpexcept.strict".
@ ebIgnore
This corresponds to "fpexcept.ignore".
This is an optimization pass for GlobalISel generic memory operations.
Intrinsic::ID getInverseMinMaxIntrinsic(Intrinsic::ID MinMaxID)
Value * simplifyAShrInst(Value *Op0, Value *Op1, bool IsExact, const SimplifyQuery &Q)
Given operands for a AShr, fold the result or return nulll.
unsigned Log2_32_Ceil(uint32_t Value)
Return the ceil log base 2 of the specified value, 32 if the value is zero.
bool isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return true if the given value is known to be non-zero when defined.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Value * simplifyFMulInst(Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
Given operands for an FMul, fold the result or return null.
bool isValidAssumeForContext(const Instruction *I, const Instruction *CxtI, const DominatorTree *DT=nullptr, bool AllowEphemerals=false)
Return true if it is valid to use the assumptions provided by an assume intrinsic,...
bool canCreatePoison(const Operator *Op, bool ConsiderFlagsAndMetadata=true)
Constant * ConstantFoldSelectInstruction(Constant *Cond, Constant *V1, Constant *V2)
Attempt to constant fold a select instruction with the specified operands.
Value * simplifyFreezeInst(Value *Op, const SimplifyQuery &Q)
Given an operand for a Freeze, see if we can fold the result.
bool MaskedValueIsZero(const Value *V, const APInt &Mask, const SimplifyQuery &DL, unsigned Depth=0)
Return true if 'V & Mask' is known to be zero.
bool isKnownNegation(const Value *X, const Value *Y, bool NeedNSW=false)
Return true if the two given values are negation.
bool isSignBitCheck(ICmpInst::Predicate Pred, const APInt &RHS, bool &TrueIfSigned)
Given an exploded icmp instruction, return true if the comparison only checks the sign bit.
bool canConstantFoldCallTo(const CallBase *Call, const Function *F)
canConstantFoldCallTo - Return true if its even possible to fold a call to the specified function.
APInt getMinMaxLimit(SelectPatternFlavor SPF, unsigned BitWidth)
Return the minimum or maximum constant value for the specified integer min/max flavor and type.
Value * simplifySDivInst(Value *LHS, Value *RHS, bool IsExact, const SimplifyQuery &Q)
Given operands for an SDiv, fold the result or return null.
Constant * ConstantFoldGetElementPtr(Type *Ty, Constant *C, bool InBounds, std::optional< unsigned > InRangeIndex, ArrayRef< Value * > Idxs)
Value * simplifyUnOp(unsigned Opcode, Value *Op, const SimplifyQuery &Q)
Given operand for a UnaryOperator, fold the result or return null.
bool isDefaultFPEnvironment(fp::ExceptionBehavior EB, RoundingMode RM)
Returns true if the exception handling behavior and rounding mode match what is used in the default f...
Constant * ConstantFoldFPInstOperands(unsigned Opcode, Constant *LHS, Constant *RHS, const DataLayout &DL, const Instruction *I)
Attempt to constant fold a floating point binary operation with the specified operands,...
Value * simplifyMulInst(Value *LHS, Value *RHS, bool IsNSW, bool IsNUW, const SimplifyQuery &Q)
Given operands for a Mul, fold the result or return null.
bool IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV, APInt &Offset, const DataLayout &DL, DSOLocalEquivalent **DSOEquiv=nullptr)
If this constant is a constant offset from a global, return the global and the constant.
Value * simplifyInstructionWithOperands(Instruction *I, ArrayRef< Value * > NewOps, const SimplifyQuery &Q)
Like simplifyInstruction but the operands of I are replaced with NewOps.
const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=6)
This method strips off any GEP address adjustments and pointer casts from the specified value,...
Value * simplifyCall(CallBase *Call, Value *Callee, ArrayRef< Value * > Args, const SimplifyQuery &Q)
Given a callsite, callee, and arguments, fold the result or return null.
Constant * ConstantFoldCompareInstOperands(unsigned Predicate, Constant *LHS, Constant *RHS, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const Instruction *I=nullptr)
Attempt to constant fold a compare instruction (icmp/fcmp) with the specified operands.
bool isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL, bool OrZero=false, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return true if the given value is known to have exactly one bit set when defined.
bool canRoundingModeBe(RoundingMode RM, RoundingMode QRM)
Returns true if the rounding mode RM may be QRM at compile time or at run time.
bool isNoAliasCall(const Value *V)
Return true if this pointer is returned by a noalias function.
Value * getSplatValue(const Value *V)
Get splat value if the input is a splat vector or return nullptr.
CmpInst::Predicate getMinMaxPred(SelectPatternFlavor SPF, bool Ordered=false)
Return the canonical comparison predicate for the specified minimum/maximum flavor.
Value * simplifyShuffleVectorInst(Value *Op0, Value *Op1, ArrayRef< int > Mask, Type *RetTy, const SimplifyQuery &Q)
Given operands for a ShuffleVectorInst, fold the result or return null.
Value * simplifyOrInst(Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for an Or, fold the result or return null.
Value * simplifyXorInst(Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for an Xor, fold the result or return null.
ConstantRange getConstantRangeFromMetadata(const MDNode &RangeMD)
Parse out a conservative ConstantRange from !range metadata.
ConstantRange computeConstantRange(const Value *V, bool ForSigned, bool UseInstrInfo=true, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Determine the possible constant range of an integer or vector of integer value.
Constant * ConstantFoldExtractValueInstruction(Constant *Agg, ArrayRef< unsigned > Idxs)
Attempt to constant fold an extractvalue instruction with the specified operands and indices.
bool isAllocLikeFn(const Value *V, const TargetLibraryInfo *TLI)
Tests if a value is a call or invoke to a library function that allocates memory (either malloc,...
Value * simplifyCastInst(unsigned CastOpc, Value *Op, Type *Ty, const SimplifyQuery &Q)
Given operands for a CastInst, fold the result or return null.
Constant * ConstantFoldCall(const CallBase *Call, Function *F, ArrayRef< Constant * > Operands, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldCall - Attempt to constant fold a call to the specified function with the specified argum...
Value * simplifyInstruction(Instruction *I, const SimplifyQuery &Q)
See if we can compute a simplified version of this instruction.
unsigned M1(unsigned Val)
Value * simplifySubInst(Value *LHS, Value *RHS, bool IsNSW, bool IsNUW, const SimplifyQuery &Q)
Given operands for a Sub, fold the result or return null.
Value * simplifyAddInst(Value *LHS, Value *RHS, bool IsNSW, bool IsNUW, const SimplifyQuery &Q)
Given operands for an Add, fold the result or return null.
Constant * ConstantFoldConstant(const Constant *C, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldConstant - Fold the constant using the specified DataLayout.
OutputIt transform(R &&Range, OutputIt d_first, UnaryFunction F)
Wrapper function around std::transform to apply a function to a range and store the result elsewhere.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
bool getObjectSize(const Value *Ptr, uint64_t &Size, const DataLayout &DL, const TargetLibraryInfo *TLI, ObjectSizeOpts Opts={})
Compute the size of the object pointed by Ptr.
bool isSplatValue(const Value *V, int Index=-1, unsigned Depth=0)
Return true if each element of the vector value V is poisoned or equal to every other non-poisoned el...
Constant * ConstantFoldLoadFromUniformValue(Constant *C, Type *Ty, const DataLayout &DL)
If C is a uniform value where all bits are the same (either all zero, all ones, all undef or all pois...
SelectPatternFlavor getInverseMinMaxFlavor(SelectPatternFlavor SPF)
Return the inverse minimum/maximum flavor of the specified flavor.
bool replaceAndRecursivelySimplify(Instruction *I, Value *SimpleV, const TargetLibraryInfo *TLI=nullptr, const DominatorTree *DT=nullptr, AssumptionCache *AC=nullptr, SmallSetVector< Instruction *, 8 > *UnsimplifiedUsers=nullptr)
Replace all uses of 'I' with 'SimpleV' and simplify the uses recursively.
Constant * ConstantFoldUnaryOpOperand(unsigned Opcode, Constant *Op, const DataLayout &DL)
Attempt to constant fold a unary operation with the specified operand.
SelectPatternFlavor
Specific patterns of select instructions we can match.
Value * simplifyShlInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW, const SimplifyQuery &Q)
Given operands for a Shl, fold the result or return null.
Value * simplifyFNegInst(Value *Op, FastMathFlags FMF, const SimplifyQuery &Q)
Given operand for an FNeg, fold the result or return null.
Value * simplifyFSubInst(Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
Given operands for an FSub, fold the result or return null.
bool impliesPoison(const Value *ValAssumedPoison, const Value *V)
Return true if V is poison given that ValAssumedPoison is already poison.
Constant * ConstantFoldInstOperands(Instruction *I, ArrayRef< Constant * > Ops, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldInstOperands - Attempt to constant fold an instruction with the specified operands.
Value * simplifyFRemInst(Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
Given operands for an FRem, fold the result or return null.
Value * simplifyFAddInst(Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
Given operands for an FAdd, fold the result or return null.
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
bool PointerMayBeCaptured(const Value *V, bool ReturnCaptures, bool StoreCaptures, unsigned MaxUsesToExplore=0)
PointerMayBeCaptured - Return true if this pointer value may be captured by the enclosing function (w...
Value * simplifyLShrInst(Value *Op0, Value *Op1, bool IsExact, const SimplifyQuery &Q)
Given operands for a LShr, fold the result or return null.
bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
ConstantRange getVScaleRange(const Function *F, unsigned BitWidth)
Determine the possible constant range of vscale with the given bit width, based on the vscale_range f...
Constant * ConstantFoldCastOperand(unsigned Opcode, Constant *C, Type *DestTy, const DataLayout &DL)
Attempt to constant fold a cast with the specified operand.
Value * simplifyAndInst(Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for an And, fold the result or return null.
Value * simplifyExtractValueInst(Value *Agg, ArrayRef< unsigned > Idxs, const SimplifyQuery &Q)
Given operands for an ExtractValueInst, fold the result or return null.
Value * simplifyInsertValueInst(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const SimplifyQuery &Q)
Given operands for an InsertValueInst, fold the result or return null.
Constant * ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS, Constant *RHS, const DataLayout &DL)
Attempt to constant fold a binary operation with the specified operands.
Value * simplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for an ICmpInst, fold the result or return null.
Value * simplifyFDivInst(Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
Given operands for an FDiv, fold the result or return null.
constexpr int PoisonMaskElem
Value * simplifyLoadInst(LoadInst *LI, Value *PtrOp, const SimplifyQuery &Q)
Given a load instruction and its pointer operand, fold the result or return null.
Value * simplifyFMAFMul(Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
Given operands for the multiplication of a FMA, fold the result or return null.
void getUnderlyingObjects(const Value *V, SmallVectorImpl< const Value * > &Objects, LoopInfo *LI=nullptr, unsigned MaxLookup=6)
This method is similar to getUnderlyingObject except that it can look through phi and select instruct...
Value * simplifyConstrainedFPCall(CallBase *Call, const SimplifyQuery &Q)
Given a constrained FP intrinsic call, tries to compute its simplified version.
Value * simplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for a BinaryOperator, fold the result or return null.
bool isKnownNonEqual(const Value *V1, const Value *V2, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return true if the given values are known to be non-equal when defined.
@ Or
Bitwise or logical OR of integers.
Value * simplifyCmpInst(unsigned Predicate, Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for a CmpInst, fold the result or return null.
Value * findScalarElement(Value *V, unsigned EltNo)
Given a vector and an element number, see if the scalar value is already around as a register,...
void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
Value * simplifyUDivInst(Value *LHS, Value *RHS, bool IsExact, const SimplifyQuery &Q)
Given operands for a UDiv, fold the result or return null.
Value * simplifyBinaryIntrinsic(Intrinsic::ID IID, Type *ReturnType, Value *Op0, Value *Op1, const SimplifyQuery &Q, const CallBase *Call)
Given operands for a BinaryIntrinsic, fold the result or return null.
RoundingMode
Rounding mode.
bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
unsigned M0(unsigned Val)
Value * simplifyInsertElementInst(Value *Vec, Value *Elt, Value *Idx, const SimplifyQuery &Q)
Given operands for an InsertElement, fold the result or return null.
constexpr unsigned BitWidth
SelectPatternResult matchDecomposedSelectPattern(CmpInst *CmpI, Value *TrueVal, Value *FalseVal, Value *&LHS, Value *&RHS, Instruction::CastOps *CastOp=nullptr, unsigned Depth=0)
Determine the pattern that a select with the given compare as its predicate and given values as its t...
Value * simplifyWithOpReplaced(Value *V, Value *Op, Value *RepOp, const SimplifyQuery &Q, bool AllowRefinement, SmallVectorImpl< Instruction * > *DropFlags=nullptr)
See if V simplifies when its operand Op is replaced with RepOp.
bool maskIsAllZeroOrUndef(Value *Mask)
Given a mask vector of i1, Return true if all of the elements of this predicate mask are known to be ...
std::pair< Value *, FPClassTest > fcmpToClassTest(CmpInst::Predicate Pred, const Function &F, Value *LHS, Value *RHS, bool LookThroughSrc=true)
Returns a pair of values, which if passed to llvm.is.fpclass, returns the same result as an fcmp with...
Value * simplifySRemInst(Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for an SRem, fold the result or return null.
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
std::optional< bool > computeKnownFPSignBit(const Value *V, unsigned Depth, const SimplifyQuery &SQ)
Return false if we can prove that the specified FP value's sign bit is 0.
unsigned ComputeNumSignBits(const Value *Op, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return the number of times the sign bit of the register is replicated into the other bits.
bool decomposeBitTestICmp(Value *LHS, Value *RHS, CmpInst::Predicate &Pred, Value *&X, APInt &Mask, bool LookThroughTrunc=true)
Decompose an icmp into the form ((X & Mask) pred 0) if possible.
bool cannotBeNegativeZero(const Value *V, unsigned Depth, const SimplifyQuery &SQ)
Return true if we can prove that the specified FP value is never equal to -0.0.
bool all_equal(std::initializer_list< T > Values)
Returns true if all Values in the initializer lists are equal or the list.
Constant * ConstantFoldInsertValueInstruction(Constant *Agg, Constant *Val, ArrayRef< unsigned > Idxs)
ConstantFoldInsertValueInstruction - Attempt to constant fold an insertvalue instruction with the spe...
Constant * ConstantFoldLoadFromConstPtr(Constant *C, Type *Ty, APInt Offset, const DataLayout &DL)
Return the value that a load from C with offset Offset would produce if it is constant and determinab...
bool isKnownNeverNaN(const Value *V, unsigned Depth, const SimplifyQuery &SQ)
Return true if the floating-point scalar value is not a NaN or if the floating-point vector value has...
std::optional< bool > isImpliedByDomCondition(const Value *Cond, const Instruction *ContextI, const DataLayout &DL)
Return the boolean condition value in the context of the given instruction if it is known based on do...
bool isGuaranteedNotToBePoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Returns true if V cannot be poison, but may be undef.
KnownFPClass computeKnownFPClass(const Value *V, const APInt &DemandedElts, FPClassTest InterestedClasses, unsigned Depth, const SimplifyQuery &SQ)
Determine which floating-point classes are valid for V, and return them in KnownFPClass bit sets.
Constant * ConstantFoldIntegerCast(Constant *C, Type *DestTy, bool IsSigned, const DataLayout &DL)
Constant fold a zext, sext or trunc, depending on IsSigned and whether the DestTy is wider or narrowe...
const SimplifyQuery getBestSimplifyQuery(Pass &, Function &)
Value * simplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q)
Given operands for an FCmpInst, fold the result or return null.
Value * simplifyGEPInst(Type *SrcTy, Value *Ptr, ArrayRef< Value * > Indices, bool InBounds, const SimplifyQuery &Q)
Given operands for a GetElementPtrInst, fold the result or return null.
bool isCheckForZeroAndMulWithOverflow(Value *Op0, Value *Op1, bool IsAnd, Use *&Y)
Match one of the patterns up to the select/logic op: Op0 = icmp ne i4 X, 0 Agg = call { i4,...
bool canIgnoreSNaN(fp::ExceptionBehavior EB, FastMathFlags FMF)
Returns true if the possibility of a signaling NaN can be safely ignored.
Value * simplifyURemInst(Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for a URem, fold the result or return null.
Value * simplifyExtractElementInst(Value *Vec, Value *Idx, const SimplifyQuery &Q)
Given operands for an ExtractElementInst, fold the result or return null.
Value * simplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal, const SimplifyQuery &Q)
Given operands for a SelectInst, fold the result or return null.
std::optional< bool > isImpliedCondition(const Value *LHS, const Value *RHS, const DataLayout &DL, bool LHSIsTrue=true, unsigned Depth=0)
Return true if RHS is known to be implied true by LHS.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
This callback is used in conjunction with PointerMayBeCaptured.
virtual void tooManyUses()=0
tooManyUses - The depth of traversal has breached a limit.
virtual bool captured(const Use *U)=0
captured - Information about the pointer was captured by the user of use U.
Incoming for lane maks phi as machine instruction, incoming register Reg and incoming block Block are...
InstrInfoQuery provides an interface to query additional information for instructions like metadata o...
bool isExact(const BinaryOperator *Op) const
MDNode * getMetadata(const Instruction *I, unsigned KindID) const
bool hasNoSignedWrap(const InstT *Op) const
bool hasNoUnsignedWrap(const InstT *Op) const
bool isNonNegative() const
Returns true if this value is known to be non-negative.
bool isZero() const
Returns true if value is all zero.
unsigned countMinTrailingZeros() const
Returns the minimum number of trailing zero bits.
unsigned countMaxTrailingZeros() const
Returns the maximum number of trailing zero bits possible.
bool hasConflict() const
Returns true if there is conflicting information.
unsigned getBitWidth() const
Get the bit width of this value.
unsigned countMaxActiveBits() const
Returns the maximum number of bits needed to represent all possible unsigned values with these known ...
unsigned countMinLeadingZeros() const
Returns the minimum number of leading zero bits.
APInt getMaxValue() const
Return the maximal unsigned value possible given these KnownBits.
APInt getMinValue() const
Return the minimal unsigned value possible given these KnownBits.
bool isNegative() const
Returns true if this value is known to be negative.
static KnownBits shl(const KnownBits &LHS, const KnownBits &RHS, bool NUW=false, bool NSW=false, bool ShAmtNonZero=false)
Compute known bits for shl(LHS, RHS).
static constexpr FPClassTest OrderedLessThanZeroMask
std::optional< bool > SignBit
std::nullopt if the sign bit is unknown, true if the sign bit is definitely set or false if the sign ...
bool isKnownNeverNaN() const
Return true if it's known this can never be a nan.
bool isKnownNever(FPClassTest Mask) const
Return true if it's known this can never be one of the mask entries.
bool cannotBeOrderedLessThanZero() const
Return true if we can prove that the analyzed floating-point value is either NaN or never less than -...
The adaptor from a function pass to a loop pass computes these analyses and makes them available to t...
Various options to control the behavior of getObjectSize.
bool NullIsUnknownSize
If this is true, null pointers in address space 0 will be treated as though they can't be evaluated.
Mode EvalMode
How we want to evaluate this object's size.
SelectPatternFlavor Flavor
static bool isMinOrMax(SelectPatternFlavor SPF)
When implementing this min/max pattern as fcmp; select, does the fcmp have to be ordered?
SimplifyQuery getWithInstruction(const Instruction *I) const
bool isUndefValue(Value *V) const
If CanUseUndef is true, returns whether V is undef.
const TargetLibraryInfo * TLI
SimplifyQuery getWithoutUndef() const