49#define DEBUG_TYPE "instsimplify"
86 if (
auto *BO = dyn_cast<BinaryOperator>(
Cond))
87 BinOpCode = BO->getOpcode();
92 if (BinOpCode == BinaryOperator::Or) {
93 ExpectedPred = ICmpInst::ICMP_NE;
94 }
else if (BinOpCode == BinaryOperator::And) {
95 ExpectedPred = ICmpInst::ICMP_EQ;
116 Pred1 != Pred2 || Pred1 != ExpectedPred)
119 if (
X == TrueVal ||
X == FalseVal ||
Y == TrueVal ||
Y == FalseVal)
120 return BinOpCode == BinaryOperator::Or ? TrueVal : FalseVal;
136 CmpInst *Cmp = dyn_cast<CmpInst>(V);
140 Value *CLHS = Cmp->getOperand(0), *CRHS = Cmp->getOperand(1);
141 if (CPred == Pred && CLHS ==
LHS && CRHS ==
RHS)
157 if (SimplifiedCmp ==
Cond) {
165 return SimplifiedCmp;
172 unsigned MaxRecurse) {
181 unsigned MaxRecurse) {
191 unsigned MaxRecurse) {
228 if (
I->getParent()->isEntryBlock() && !isa<InvokeInst>(
I) &&
241 auto *
B = dyn_cast<BinaryOperator>(V);
242 if (!
B ||
B->getOpcode() != OpcodeToExpand)
244 Value *B0 =
B->getOperand(0), *B1 =
B->getOperand(1);
255 if ((L == B0 && R == B1) ||
276 unsigned MaxRecurse) {
293 unsigned MaxRecurse) {
396 unsigned MaxRecurse) {
402 if (isa<SelectInst>(
LHS)) {
403 SI = cast<SelectInst>(
LHS);
405 assert(isa<SelectInst>(
RHS) &&
"No select instruction operand!");
406 SI = cast<SelectInst>(
RHS);
433 if (TV == SI->getTrueValue() && FV == SI->getFalseValue())
439 if ((FV && !TV) || (TV && !FV)) {
442 Instruction *Simplified = dyn_cast<Instruction>(FV ? FV : TV);
443 if (Simplified && Simplified->getOpcode() ==
unsigned(Opcode) &&
444 !Simplified->hasPoisonGeneratingFlags()) {
448 Value *UnsimplifiedBranch = FV ? SI->getTrueValue() : SI->getFalseValue();
449 Value *UnsimplifiedLHS = SI ==
LHS ? UnsimplifiedBranch :
LHS;
450 Value *UnsimplifiedRHS = SI ==
LHS ?
RHS : UnsimplifiedBranch;
451 if (Simplified->getOperand(0) == UnsimplifiedLHS &&
452 Simplified->getOperand(1) == UnsimplifiedRHS)
454 if (Simplified->isCommutative() &&
455 Simplified->getOperand(1) == UnsimplifiedLHS &&
456 Simplified->getOperand(0) == UnsimplifiedRHS)
475 unsigned MaxRecurse) {
481 if (!isa<SelectInst>(
LHS)) {
485 assert(isa<SelectInst>(
LHS) &&
"Not comparing with a select instruction!");
488 Value *TV = SI->getTrueValue();
489 Value *FV = SI->getFalseValue();
521 unsigned MaxRecurse) {
527 if (isa<PHINode>(
LHS)) {
528 PI = cast<PHINode>(
LHS);
533 assert(isa<PHINode>(
RHS) &&
"No PHI instruction operand!");
534 PI = cast<PHINode>(
RHS);
541 Value *CommonValue =
nullptr;
554 if (!V || (CommonValue && V != CommonValue))
573 if (!isa<PHINode>(
LHS)) {
577 assert(isa<PHINode>(
LHS) &&
"Not comparing with a phi instruction!");
585 Value *CommonValue =
nullptr;
599 if (!V || (CommonValue && V != CommonValue))
610 if (
auto *CLHS = dyn_cast<Constant>(Op0)) {
611 if (
auto *CRHS = dyn_cast<Constant>(Op1)) {
615 case Instruction::FAdd:
616 case Instruction::FSub:
617 case Instruction::FMul:
618 case Instruction::FDiv:
619 case Instruction::FRem:
620 if (Q.
CxtI !=
nullptr)
641 if (isa<PoisonValue>(Op1))
704 return ::simplifyAddInst(Op0, Op1, IsNSW, IsNUW, Query,
RecursionLimit);
717 bool AllowNonInbounds =
false) {
718 assert(V->getType()->isPtrOrPtrVectorTy());
721 V = V->stripAndAccumulateConstantOffsets(
DL,
Offset, AllowNonInbounds);
724 return Offset.sextOrTrunc(
DL.getIndexTypeSizeInBits(V->getType()));
744 if (
auto *VecTy = dyn_cast<VectorType>(
LHS->
getType()))
759 std::optional<bool> Imp =
764 case Instruction::Sub:
765 case Instruction::Xor:
766 case Instruction::URem:
767 case Instruction::SRem:
770 case Instruction::SDiv:
771 case Instruction::UDiv:
772 return ConstantInt::get(Ty, 1);
774 case Instruction::And:
775 case Instruction::Or:
794 if (isa<PoisonValue>(Op0) || isa<PoisonValue>(Op1))
830 Value *
X =
nullptr, *
Y =
nullptr, *Z = Op1;
888 if (
X->getType() ==
Y->getType())
925 return ::simplifySubInst(Op0, Op1, IsNSW, IsNUW, Q,
RecursionLimit);
936 if (isa<PoisonValue>(Op1))
960 return ConstantInt::getNullValue(Op0->
getType());
975 Instruction::Add, Q, MaxRecurse))
980 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
987 if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
997 return ::simplifyMulInst(Op0, Op1, IsNSW, IsNUW, Q,
RecursionLimit);
1006 Constant *
C = dyn_cast_or_null<Constant>(V);
1007 return (
C &&
C->isAllOnesValue());
1013 unsigned MaxRecurse,
bool IsSigned) {
1030 Type *Ty =
X->getType();
1036 Constant *PosDividendC = ConstantInt::get(Ty,
C->abs());
1037 Constant *NegDividendC = ConstantInt::get(Ty, -
C->abs());
1046 if (
C->isMinSignedValue())
1052 Constant *PosDivisorC = ConstantInt::get(Ty,
C->abs());
1053 Constant *NegDivisorC = ConstantInt::get(Ty, -
C->abs());
1073 return isICmpTrue(ICmpInst::ICMP_ULT,
X,
Y, Q, MaxRecurse);
1080 unsigned MaxRecurse) {
1081 bool IsDiv = (Opcode == Instruction::SDiv || Opcode == Instruction::UDiv);
1082 bool IsSigned = (Opcode == Instruction::SDiv || Opcode == Instruction::SRem);
1099 auto *Op1C = dyn_cast<Constant>(Op1);
1100 auto *VTy = dyn_cast<FixedVectorType>(Ty);
1102 unsigned NumElts = VTy->getNumElements();
1103 for (
unsigned i = 0; i != NumElts; ++i) {
1112 if (isa<PoisonValue>(Op0))
1152 auto *
Mul = cast<OverflowingBinaryOperator>(Op0);
1163 if (
isDivZero(Op0, Op1, Q, MaxRecurse, IsSigned))
1171 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
1177 if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
1187 unsigned MaxRecurse) {
1210 (Opcode == Instruction::UDiv
1230 if ((Opcode == Instruction::SRem &&
1232 (Opcode == Instruction::URem &&
1240 if (Opcode == Instruction::SRem
1243 return C.srem(*C0).isZero();
1247 return C.urem(*C0).isZero();
1263 return simplifyDiv(Instruction::SDiv, Op0, Op1, IsExact, Q, MaxRecurse);
1275 return simplifyDiv(Instruction::UDiv, Op0, Op1, IsExact, Q, MaxRecurse);
1286 unsigned MaxRecurse) {
1291 return ConstantInt::getNullValue(Op0->
getType());
1295 return ConstantInt::getNullValue(Op0->
getType());
1297 return simplifyRem(Instruction::SRem, Op0, Op1, Q, MaxRecurse);
1307 unsigned MaxRecurse) {
1308 return simplifyRem(Instruction::URem, Op0, Op1, Q, MaxRecurse);
1317 Constant *
C = dyn_cast<Constant>(Amount);
1327 const APInt *AmountC;
1333 if (isa<ConstantVector>(
C) || isa<ConstantDataVector>(
C)) {
1334 for (
unsigned I = 0,
1335 E = cast<FixedVectorType>(
C->getType())->getNumElements();
1349 unsigned MaxRecurse) {
1354 if (isa<PoisonValue>(Op0))
1375 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
1381 if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
1399 assert(Opcode == Instruction::Shl &&
"Expected shl for nsw instruction");
1418 Value *Op1,
bool IsExact,
1437 if (Op0Known.
One[0])
1449 simplifyShift(Instruction::Shl, Op0, Op1, IsNSW, Q, MaxRecurse))
1473 if (IsNSW && IsNUW &&
1482 return ::simplifyShlInst(Op0, Op1, IsNSW, IsNUW, Q,
RecursionLimit);
1504 const APInt *ShRAmt, *ShLAmt;
1507 *ShRAmt == *ShLAmt) {
1510 if (ShRAmt->
uge(EffWidthY))
1558 ICmpInst *UnsignedICmp,
bool IsAnd,
1572 if (
match(UnsignedICmp,
1574 ICmpInst::isUnsigned(UnsignedPred)) {
1576 if ((UnsignedPred == ICmpInst::ICMP_UGE ||
1577 UnsignedPred == ICmpInst::ICMP_ULE) &&
1578 EqPred == ICmpInst::ICMP_NE && !IsAnd)
1581 if ((UnsignedPred == ICmpInst::ICMP_ULT ||
1582 UnsignedPred == ICmpInst::ICMP_UGT) &&
1583 EqPred == ICmpInst::ICMP_EQ && IsAnd)
1588 if (EqPred == ICmpInst::ICMP_NE && (UnsignedPred == ICmpInst::ICMP_ULT ||
1589 UnsignedPred == ICmpInst::ICMP_UGT))
1590 return IsAnd ? UnsignedICmp : ZeroICmp;
1594 if (EqPred == ICmpInst::ICMP_EQ && (UnsignedPred == ICmpInst::ICMP_ULE ||
1595 UnsignedPred == ICmpInst::ICMP_UGE))
1596 return IsAnd ? ZeroICmp : UnsignedICmp;
1602 if (
match(UnsignedICmp,
1604 if (UnsignedPred == ICmpInst::ICMP_UGE && IsAnd &&
1606 return UnsignedICmp;
1607 if (UnsignedPred == ICmpInst::ICMP_ULT && !IsAnd &&
1609 return UnsignedICmp;
1614 ICmpInst::isUnsigned(UnsignedPred))
1616 else if (
match(UnsignedICmp,
1618 ICmpInst::isUnsigned(UnsignedPred))
1619 UnsignedPred = ICmpInst::getSwappedPredicate(UnsignedPred);
1625 if (UnsignedPred == ICmpInst::ICMP_UGT && EqPred == ICmpInst::ICMP_EQ &&
1627 return IsAnd ? ZeroICmp : UnsignedICmp;
1631 if (UnsignedPred == ICmpInst::ICMP_ULE && EqPred == ICmpInst::ICMP_NE &&
1633 return IsAnd ? UnsignedICmp : ZeroICmp;
1642 if (UnsignedPred == ICmpInst::ICMP_ULT && EqPred == ICmpInst::ICMP_NE)
1643 return IsAnd ? UnsignedICmp : ZeroICmp;
1647 if (UnsignedPred == ICmpInst::ICMP_UGE && EqPred == ICmpInst::ICMP_EQ)
1648 return IsAnd ? ZeroICmp : UnsignedICmp;
1651 if (UnsignedPred == ICmpInst::ICMP_ULT && EqPred == ICmpInst::ICMP_EQ &&
1656 if (UnsignedPred == ICmpInst::ICMP_UGE && EqPred == ICmpInst::ICMP_NE &&
1672 const APInt *C0, *C1;
1682 if (IsAnd && Range0.intersectWith(Range1).isEmptySet())
1687 if (!IsAnd && Range0.unionWith(Range1).isFullSet())
1695 if (Range0.contains(Range1))
1696 return IsAnd ? Cmp1 : Cmp0;
1697 if (Range1.contains(Range0))
1698 return IsAnd ? Cmp0 : Cmp1;
1707 const APInt *C0, *C1;
1715 auto *AddInst = cast<OverflowingBinaryOperator>(Op0->
getOperand(0));
1716 if (AddInst->getOperand(1) != Op1->
getOperand(1))
1723 const APInt Delta = *C1 - *C0;
1726 if (Pred0 == ICmpInst::ICMP_ULT && Pred1 == ICmpInst::ICMP_SGT)
1728 if (Pred0 == ICmpInst::ICMP_SLT && Pred1 == ICmpInst::ICMP_SGT && IsNSW)
1732 if (Pred0 == ICmpInst::ICMP_ULE && Pred1 == ICmpInst::ICMP_SGT)
1734 if (Pred0 == ICmpInst::ICMP_SLE && Pred1 == ICmpInst::ICMP_SGT && IsNSW)
1740 if (Pred0 == ICmpInst::ICMP_ULT && Pred1 == ICmpInst::ICMP_UGT)
1743 if (Pred0 == ICmpInst::ICMP_ULE && Pred1 == ICmpInst::ICMP_UGT)
1762 if (!IsAnd && Pred0 == ICmpInst::ICMP_EQ && Pred1 == ICmpInst::ICMP_NE)
1765 if (IsAnd && Pred0 == ICmpInst::ICMP_NE && Pred1 == ICmpInst::ICMP_EQ)
1798 const APInt *C0, *C1;
1806 auto *AddInst = cast<BinaryOperator>(Op0->
getOperand(0));
1807 if (AddInst->getOperand(1) != Op1->
getOperand(1))
1814 const APInt Delta = *C1 - *C0;
1817 if (Pred0 == ICmpInst::ICMP_UGE && Pred1 == ICmpInst::ICMP_SLE)
1819 if (Pred0 == ICmpInst::ICMP_SGE && Pred1 == ICmpInst::ICMP_SLE && IsNSW)
1823 if (Pred0 == ICmpInst::ICMP_UGT && Pred1 == ICmpInst::ICMP_SLE)
1825 if (Pred0 == ICmpInst::ICMP_SGT && Pred1 == ICmpInst::ICMP_SLE && IsNSW)
1831 if (Pred0 == ICmpInst::ICMP_UGE && Pred1 == ICmpInst::ICMP_ULE)
1834 if (Pred0 == ICmpInst::ICMP_UGT && Pred1 == ICmpInst::ICMP_ULE)
1866 Value *LHS0 =
LHS->getOperand(0), *LHS1 =
LHS->getOperand(1);
1867 Value *RHS0 =
RHS->getOperand(0), *RHS1 =
RHS->getOperand(1);
1872 if ((PredL == FCmpInst::FCMP_ORD || PredL == FCmpInst::FCMP_UNO) &&
1873 ((FCmpInst::isOrdered(PredR) && IsAnd) ||
1874 (FCmpInst::isUnordered(PredR) && !IsAnd))) {
1880 return FCmpInst::isOrdered(PredL) == FCmpInst::isOrdered(PredR)
1885 if ((PredR == FCmpInst::FCMP_ORD || PredR == FCmpInst::FCMP_UNO) &&
1886 ((FCmpInst::isOrdered(PredL) && IsAnd) ||
1887 (FCmpInst::isUnordered(PredL) && !IsAnd))) {
1893 return FCmpInst::isOrdered(PredL) == FCmpInst::isOrdered(PredR)
1902 Value *Op1,
bool IsAnd) {
1904 auto *Cast0 = dyn_cast<CastInst>(Op0);
1905 auto *Cast1 = dyn_cast<CastInst>(Op1);
1906 if (Cast0 && Cast1 && Cast0->getOpcode() == Cast1->getOpcode() &&
1907 Cast0->getSrcTy() == Cast1->getSrcTy()) {
1908 Op0 = Cast0->getOperand(0);
1909 Op1 = Cast1->getOperand(0);
1913 auto *ICmp0 = dyn_cast<ICmpInst>(Op0);
1914 auto *ICmp1 = dyn_cast<ICmpInst>(Op1);
1919 auto *FCmp0 = dyn_cast<FCmpInst>(Op0);
1920 auto *FCmp1 = dyn_cast<FCmpInst>(Op1);
1931 if (
auto *
C = dyn_cast<Constant>(V))
1940 bool AllowRefinement,
1942 unsigned MaxRecurse);
1946 unsigned MaxRecurse) {
1947 assert((Opcode == Instruction::And || Opcode == Instruction::Or) &&
1962 (Opcode == Instruction::And ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE)) {
1963 if (Res == Absorber)
1973 if (Res == Absorber)
1983 nullptr, MaxRecurse))
1984 return Simplify(Res);
1987 nullptr, MaxRecurse))
1988 return Simplify(Res);
1998 assert(BinaryOperator::isBitwiseLogicOp(Opcode) &&
"Expected logic op");
2010 return Opcode == Instruction::And ? ConstantInt::getNullValue(Ty)
2011 : ConstantInt::getAllOnesValue(Ty);
2020 unsigned MaxRecurse) {
2054 const APInt *Shift1, *Shift2;
2059 Shift1->
uge(*Shift2))
2072 unsigned MaxRecurse) {
2077 if (isa<PoisonValue>(Op1))
2112 (~(*Mask)).lshr(*ShAmt).isZero())
2118 (~(*Mask)).shl(*ShAmt).isZero())
2123 const APInt *PowerC;
2132 return ConstantInt::getNullValue(Op1->
getType());
2145 Instruction::Or, Q, MaxRecurse))
2150 Instruction::Xor, Q, MaxRecurse))
2153 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1)) {
2171 if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
2195 if (EffWidthY <= ShftCnt) {
2228 if (*Implied ==
true)
2231 if (*Implied ==
false)
2256 assert(
X->getType() ==
Y->getType() &&
"Expected same type for 'or' ops");
2257 Type *Ty =
X->getType();
2261 return ConstantInt::getAllOnesValue(Ty);
2265 return ConstantInt::getAllOnesValue(Ty);
2283 return ConstantInt::getAllOnesValue(Ty);
2307 return ConstantInt::getAllOnesValue(Ty);
2347 unsigned MaxRecurse) {
2352 if (isa<PoisonValue>(Op1))
2386 C->ule(
X->getType()->getScalarSizeInBits())) {
2387 return ConstantInt::getAllOnesValue(
X->getType());
2441 Instruction::And, Q, MaxRecurse))
2444 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1)) {
2462 const APInt *C1, *C2;
2488 if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
2498 if (std::optional<bool> Implied =
2501 if (*Implied ==
false)
2504 if (*Implied ==
true)
2507 if (std::optional<bool> Implied =
2510 if (*Implied ==
false)
2513 if (*Implied ==
true)
2531 unsigned MaxRecurse) {
2536 if (isa<PoisonValue>(Op1))
2573 if (
Value *R = foldAndOrNot(Op0, Op1))
2575 if (
Value *R = foldAndOrNot(Op1, Op0))
2617 CmpInst *Cmp = dyn_cast<CmpInst>(SI->getCondition());
2620 Value *CmpLHS = Cmp->getOperand(0), *CmpRHS = Cmp->getOperand(1);
2621 if (Pred == Cmp->getPredicate() &&
LHS == CmpLHS &&
RHS == CmpRHS)
2624 LHS == CmpRHS &&
RHS == CmpLHS)
2637 if (
const AllocaInst *AI = dyn_cast<AllocaInst>(V))
2638 return AI->isStaticAlloca();
2639 if (
const GlobalValue *GV = dyn_cast<GlobalValue>(V))
2640 return (GV->hasLocalLinkage() || GV->hasHiddenVisibility() ||
2641 GV->hasProtectedVisibility() || GV->hasGlobalUnnamedAddr()) &&
2642 !GV->isThreadLocal();
2643 if (
const Argument *
A = dyn_cast<Argument>(V))
2644 return A->hasByValAttr();
2677 auto isByValArg = [](
const Value *V) {
2678 const Argument *
A = dyn_cast<Argument>(V);
2679 return A &&
A->hasByValAttr();
2685 return isa<AllocaInst>(V2) || isa<GlobalVariable>(V2) || isByValArg(V2);
2687 return isa<AllocaInst>(V1) || isa<GlobalVariable>(V1) || isByValArg(V1);
2689 return isa<AllocaInst>(V1) &&
2690 (isa<AllocaInst>(V2) || isa<GlobalVariable>(V2));
2759 unsigned IndexSize =
DL.getIndexTypeSizeInBits(
LHS->
getType());
2760 APInt LHSOffset(IndexSize, 0), RHSOffset(IndexSize, 0);
2780 Opts.
EvalMode = ObjectSizeOpts::Mode::Min;
2782 if (
auto *
I = dyn_cast<Instruction>(V))
2783 return I->getFunction();
2784 if (
auto *
A = dyn_cast<Argument>(V))
2785 return A->getParent();
2791 APInt Dist = LHSOffset - RHSOffset;
2819 if ((IsNAC(LHSUObjs) && IsAllocDisjoint(RHSUObjs)) ||
2820 (IsNAC(RHSUObjs) && IsAllocDisjoint(LHSUObjs)))
2840 bool Captured =
false;
2843 if (
auto *ICmp = dyn_cast<ICmpInst>(U->getUser())) {
2847 unsigned OtherIdx = 1 - U->getOperandNo();
2848 auto *LI = dyn_cast<LoadInst>(ICmp->getOperand(OtherIdx));
2849 if (LI && isa<GlobalVariable>(LI->getPointerOperand()))
2857 CustomCaptureTracker Tracker;
2859 if (!Tracker.Captured)
2881 auto ExtractNotLHS = [](
Value *V) ->
Value * {
2943 case ICmpInst::ICMP_UGE:
2947 case ICmpInst::ICMP_SGE:
2958 case ICmpInst::ICMP_ULE:
2962 case ICmpInst::ICMP_SLE:
2982 case ICmpInst::ICMP_ULT:
2984 case ICmpInst::ICMP_UGE:
2986 case ICmpInst::ICMP_EQ:
2987 case ICmpInst::ICMP_ULE:
2991 case ICmpInst::ICMP_NE:
2992 case ICmpInst::ICMP_UGT:
2996 case ICmpInst::ICMP_SLT: {
3004 case ICmpInst::ICMP_SLE: {
3012 case ICmpInst::ICMP_SGE: {
3020 case ICmpInst::ICMP_SGT: {
3073 *MulC != 0 &&
C->urem(*MulC) != 0) ||
3075 *MulC != 0 &&
C->srem(*MulC) != 0)))
3076 return ConstantInt::get(ITy, Pred == ICmpInst::ICMP_NE);
3084 unsigned MaxRecurse) {
3090 if (Pred == ICmpInst::ICMP_ULT)
3092 if (Pred == ICmpInst::ICMP_UGE)
3095 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SGE) {
3107 if (Pred == ICmpInst::ICMP_UGT)
3109 if (Pred == ICmpInst::ICMP_ULE)
3118 case ICmpInst::ICMP_SGT:
3119 case ICmpInst::ICMP_SGE: {
3125 case ICmpInst::ICMP_EQ:
3126 case ICmpInst::ICMP_UGT:
3127 case ICmpInst::ICMP_UGE:
3129 case ICmpInst::ICMP_SLT:
3130 case ICmpInst::ICMP_SLE: {
3136 case ICmpInst::ICMP_NE:
3137 case ICmpInst::ICMP_ULT:
3138 case ICmpInst::ICMP_ULE:
3145 if (Pred == ICmpInst::ICMP_ULE)
3147 if (Pred == ICmpInst::ICMP_UGT)
3158 if (Pred == ICmpInst::ICMP_UGT)
3160 if (Pred == ICmpInst::ICMP_ULE)
3181 case ICmpInst::ICMP_EQ:
3182 case ICmpInst::ICMP_UGE:
3184 case ICmpInst::ICMP_NE:
3185 case ICmpInst::ICMP_ULT:
3187 case ICmpInst::ICMP_UGT:
3188 case ICmpInst::ICMP_ULE:
3204 const APInt *C1, *C2;
3211 if (Pred == ICmpInst::ICMP_UGT)
3213 if (Pred == ICmpInst::ICMP_ULE)
3251 const APInt *C1, *C2;
3265 unsigned MaxRecurse) {
3268 if (MaxRecurse && (LBO || RBO)) {
3270 Value *
A =
nullptr, *
B =
nullptr, *
C =
nullptr, *
D =
nullptr;
3272 bool NoLHSWrapProblem =
false, NoRHSWrapProblem =
false;
3273 if (LBO && LBO->
getOpcode() == Instruction::Add) {
3283 if (RBO && RBO->
getOpcode() == Instruction::Add) {
3295 if ((
A ==
RHS ||
B ==
RHS) && NoLHSWrapProblem)
3302 if ((
C ==
LHS ||
D ==
LHS) && NoRHSWrapProblem)
3305 C ==
LHS ?
D :
C, Q, MaxRecurse - 1))
3309 bool CanSimplify = (NoLHSWrapProblem && NoRHSWrapProblem) ||
3311 if (
A &&
C && (
A ==
C ||
A ==
D ||
B ==
C ||
B ==
D) && CanSimplify) {
3318 }
else if (
A ==
D) {
3322 }
else if (
B ==
C) {
3343 ICmpInst::getSwappedPredicate(Pred), RBO,
LHS, Q, MaxRecurse))
3350 if (
C->isStrictlyPositive()) {
3351 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_NE)
3353 if (Pred == ICmpInst::ICMP_SGE || Pred == ICmpInst::ICMP_EQ)
3356 if (
C->isNonNegative()) {
3357 if (Pred == ICmpInst::ICMP_SLE)
3359 if (Pred == ICmpInst::ICMP_SGT)
3382 if (Pred == ICmpInst::ICMP_EQ)
3384 if (Pred == ICmpInst::ICMP_NE)
3393 if (Pred == ICmpInst::ICMP_UGT)
3395 if (Pred == ICmpInst::ICMP_ULE)
3406 case Instruction::Shl: {
3409 if (!NUW || (ICmpInst::isSigned(Pred) && !NSW) ||
3422 case Instruction::And:
3423 case Instruction::Or: {
3424 const APInt *C1, *C2;
3430 Pred = ICmpInst::getSwappedPredicate(Pred);
3433 if (Pred == ICmpInst::ICMP_ULE)
3435 if (Pred == ICmpInst::ICMP_UGT)
3438 if (Pred == ICmpInst::ICMP_SLE)
3440 if (Pred == ICmpInst::ICMP_SGT)
3454 case Instruction::UDiv:
3455 case Instruction::LShr:
3456 if (ICmpInst::isSigned(Pred) || !Q.
IIQ.
isExact(LBO) ||
3463 case Instruction::SDiv:
3471 case Instruction::AShr:
3478 case Instruction::Shl: {
3483 if (!NSW && ICmpInst::isSigned(Pred))
3499 unsigned MaxRecurse) {
3655 Pred = ICmpInst::getSwappedPredicate(Pred);
3661 (
A ==
C ||
A ==
D ||
B ==
C ||
B ==
D)) {
3670 (
A ==
C ||
A ==
D ||
B ==
C ||
B ==
D)) {
3694 CallInst *Assume = cast<CallInst>(AssumeVH);
3707 auto *
II = dyn_cast<IntrinsicInst>(
LHS);
3711 switch (
II->getIntrinsicID()) {
3712 case Intrinsic::uadd_sat:
3714 if (
II->getArgOperand(0) ==
RHS ||
II->getArgOperand(1) ==
RHS) {
3715 if (Pred == ICmpInst::ICMP_UGE)
3717 if (Pred == ICmpInst::ICMP_ULT)
3721 case Intrinsic::usub_sat:
3723 if (
II->getArgOperand(0) ==
RHS) {
3724 if (Pred == ICmpInst::ICMP_ULE)
3726 if (Pred == ICmpInst::ICMP_UGT)
3742 if (
const Argument *
A = dyn_cast<Argument>(V))
3743 return A->getRange();
3744 else if (
const CallBase *CB = dyn_cast<CallBase>(V))
3745 return CB->getRange();
3747 return std::nullopt;
3765 assert(!isa<UndefValue>(
LHS) &&
"Unexpected icmp undef,%X");
3770 if (isa<PoisonValue>(
RHS))
3799 if (LhsCr->icmp(Pred, *RhsCr))
3807 if (isa<CastInst>(
LHS) && (isa<Constant>(
RHS) || isa<CastInst>(
RHS))) {
3815 if (MaxRecurse && isa<PtrToIntInst>(LI) &&
3824 if (RI->getOperand(0)->getType() == SrcTy)
3832 if (isa<ZExtInst>(
LHS)) {
3836 if (MaxRecurse && SrcTy == RI->getOperand(0)->getType())
3840 RI->getOperand(0), Q, MaxRecurse - 1))
3844 else if (
SExtInst *RI = dyn_cast<SExtInst>(
RHS)) {
3845 if (
SrcOp == RI->getOperand(0)) {
3846 if (Pred == ICmpInst::ICMP_ULE || Pred == ICmpInst::ICMP_SGE)
3848 if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_SLT)
3862 assert(Trunc &&
"Constant-fold of ImmConstant should not fail");
3865 assert(RExt &&
"Constant-fold of ImmConstant should not fail");
3868 assert(AnyEq &&
"Constant-fold of ImmConstant should not fail");
3875 SrcOp, Trunc, Q, MaxRecurse - 1))
3885 case ICmpInst::ICMP_EQ:
3886 case ICmpInst::ICMP_UGT:
3887 case ICmpInst::ICMP_UGE:
3890 case ICmpInst::ICMP_NE:
3891 case ICmpInst::ICMP_ULT:
3892 case ICmpInst::ICMP_ULE:
3897 case ICmpInst::ICMP_SGT:
3898 case ICmpInst::ICMP_SGE:
3902 case ICmpInst::ICMP_SLT:
3903 case ICmpInst::ICMP_SLE:
3912 if (isa<SExtInst>(
LHS)) {
3916 if (MaxRecurse && SrcTy == RI->getOperand(0)->getType())
3923 else if (
ZExtInst *RI = dyn_cast<ZExtInst>(
RHS)) {
3924 if (
SrcOp == RI->getOperand(0)) {
3925 if (Pred == ICmpInst::ICMP_UGE || Pred == ICmpInst::ICMP_SLE)
3927 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_SGT)
3940 assert(Trunc &&
"Constant-fold of ImmConstant should not fail");
3943 assert(RExt &&
"Constant-fold of ImmConstant should not fail");
3946 assert(AnyEq &&
"Constant-fold of ImmConstant should not fail");
3961 case ICmpInst::ICMP_EQ:
3963 case ICmpInst::ICMP_NE:
3968 case ICmpInst::ICMP_SGT:
3969 case ICmpInst::ICMP_SGE:
3973 case ICmpInst::ICMP_SLT:
3974 case ICmpInst::ICMP_SLE:
3981 case ICmpInst::ICMP_UGT:
3982 case ICmpInst::ICMP_UGE:
3990 case ICmpInst::ICMP_ULT:
3991 case ICmpInst::ICMP_ULE:
4022 ICmpInst::getSwappedPredicate(Pred),
RHS,
LHS))
4028 if (std::optional<bool> Res =
4037 if (
auto *CLHS = dyn_cast<PtrToIntOperator>(
LHS))
4038 if (
auto *CRHS = dyn_cast<PtrToIntOperator>(
RHS))
4039 if (CLHS->getPointerOperandType() == CRHS->getPointerOperandType() &&
4043 CRHS->getPointerOperand(), Q))
4048 if (isa<SelectInst>(
LHS) || isa<SelectInst>(
RHS))
4054 if (isa<PHINode>(
LHS) || isa<PHINode>(
RHS))
4070 unsigned MaxRecurse) {
4086 if (Pred == FCmpInst::FCMP_FALSE)
4088 if (Pred == FCmpInst::FCMP_TRUE)
4093 if (isa<PoisonValue>(
LHS) || isa<PoisonValue>(
RHS))
4116 if (Pred == FCmpInst::FCMP_ORD || Pred == FCmpInst::FCMP_UNO) {
4124 return ConstantInt::get(
RetTy, Pred == FCmpInst::FCMP_ORD);
4132 std::optional<KnownFPClass> FullKnownClassLHS;
4136 auto computeLHSClass = [=, &FullKnownClassLHS](
FPClassTest InterestedFlags =
4138 if (FullKnownClassLHS)
4139 return *FullKnownClassLHS;
4152 FullKnownClassLHS = computeLHSClass();
4153 if ((FullKnownClassLHS->KnownFPClasses & ClassTest) ==
fcNone)
4155 if ((FullKnownClassLHS->KnownFPClasses & ~ClassTest) ==
fcNone)
4170 if (
C->isNegative() && !
C->isNegZero()) {
4176 case FCmpInst::FCMP_UGE:
4177 case FCmpInst::FCMP_UGT:
4178 case FCmpInst::FCMP_UNE: {
4186 case FCmpInst::FCMP_OEQ:
4187 case FCmpInst::FCMP_OLE:
4188 case FCmpInst::FCMP_OLT: {
4207 cast<IntrinsicInst>(
LHS)->getIntrinsicID() == Intrinsic::maxnum;
4211 case FCmpInst::FCMP_OEQ:
4212 case FCmpInst::FCMP_UEQ:
4216 case FCmpInst::FCMP_ONE:
4217 case FCmpInst::FCMP_UNE:
4221 case FCmpInst::FCMP_OGE:
4222 case FCmpInst::FCMP_UGE:
4223 case FCmpInst::FCMP_OGT:
4224 case FCmpInst::FCMP_UGT:
4229 return ConstantInt::get(
RetTy, IsMaxNum);
4230 case FCmpInst::FCMP_OLE:
4231 case FCmpInst::FCMP_ULE:
4232 case FCmpInst::FCMP_OLT:
4233 case FCmpInst::FCMP_ULT:
4238 return ConstantInt::get(
RetTy, !IsMaxNum);
4250 case FCmpInst::FCMP_OGE:
4251 case FCmpInst::FCMP_ULT: {
4254 Interested |=
fcNan;
4265 case FCmpInst::FCMP_UGE:
4266 case FCmpInst::FCMP_OLT: {
4283 if (isa<SelectInst>(
LHS) || isa<SelectInst>(
RHS))
4289 if (isa<PHINode>(
LHS) || isa<PHINode>(
RHS))
4303 bool AllowRefinement,
4305 unsigned MaxRecurse) {
4307 "If AllowRefinement=false then CanUseUndef=false");
4317 if (isa<Constant>(
Op))
4320 auto *
I = dyn_cast<Instruction>(V);
4326 if (isa<PHINode>(
I))
4329 if (
Op->getType()->isVectorTy()) {
4332 if (!
I->getType()->isVectorTy() || isa<ShuffleVectorInst>(
I) ||
4333 isa<CallBase>(
I) || isa<BitCastInst>(
I))
4338 if (
match(
I, m_Intrinsic<Intrinsic::is_constant>()))
4342 if (isa<FreezeInst>(
I))
4347 bool AnyReplaced =
false;
4348 for (
Value *InstOp :
I->operands()) {
4350 InstOp,
Op, RepOp, Q, AllowRefinement, DropFlags, MaxRecurse)) {
4352 AnyReplaced = InstOp != NewInstOp;
4366 if (!AllowRefinement) {
4371 if (
auto *BO = dyn_cast<BinaryOperator>(
I)) {
4372 unsigned Opcode = BO->getOpcode();
4381 if ((Opcode == Instruction::And || Opcode == Instruction::Or) &&
4382 NewOps[0] == NewOps[1]) {
4384 if (
auto *PDI = dyn_cast<PossiblyDisjointInst>(BO)) {
4385 if (PDI->isDisjoint()) {
4397 if ((Opcode == Instruction::Sub || Opcode == Instruction::Xor) &&
4398 NewOps[0] == RepOp && NewOps[1] == RepOp)
4410 if ((NewOps[0] == Absorber || NewOps[1] == Absorber) &&
4415 if (isa<GetElementPtrInst>(
I)) {
4431 auto PreventSelfSimplify = [V](
Value *Simplified) {
4432 return Simplified != V ? Simplified :
nullptr;
4435 return PreventSelfSimplify(
4442 for (
Value *NewOp : NewOps) {
4443 if (
Constant *ConstOp = dyn_cast<Constant>(NewOp))
4458 if (!AllowRefinement) {
4461 if (
auto *
II = dyn_cast<IntrinsicInst>(
I);
4462 II &&
II->getIntrinsicID() == Intrinsic::abs) {
4463 if (!ConstOps[0]->isNotMinSignedValue())
4469 if (DropFlags && Res &&
I->hasPoisonGeneratingAnnotations())
4479 bool AllowRefinement,
4483 if (!AllowRefinement)
4486 return ::simplifyWithOpReplaced(V,
Op, RepOp, Q, AllowRefinement, DropFlags,
4493 const APInt *
Y,
bool TrueWhenUnset) {
4500 return TrueWhenUnset ? FalseVal : TrueVal;
4506 return TrueWhenUnset ? FalseVal : TrueVal;
4508 if (
Y->isPowerOf2()) {
4514 if (TrueWhenUnset && cast<PossiblyDisjointInst>(TrueVal)->isDisjoint())
4516 return TrueWhenUnset ? TrueVal : FalseVal;
4524 if (!TrueWhenUnset && cast<PossiblyDisjointInst>(FalseVal)->isDisjoint())
4526 return TrueWhenUnset ? TrueVal : FalseVal;
4537 if (CmpRHS == TVal || CmpRHS == FVal) {
4539 Pred = ICmpInst::getSwappedPredicate(Pred);
4543 if (CmpLHS == FVal) {
4545 Pred = ICmpInst::getInversePredicate(Pred);
4550 Value *
X = CmpLHS, *
Y = CmpRHS;
4551 bool PeekedThroughSelectShuffle =
false;
4552 auto *Shuf = dyn_cast<ShuffleVectorInst>(FVal);
4553 if (Shuf && Shuf->isSelect()) {
4554 if (Shuf->getOperand(0) ==
Y)
4555 FVal = Shuf->getOperand(1);
4556 else if (Shuf->getOperand(1) ==
Y)
4557 FVal = Shuf->getOperand(0);
4560 PeekedThroughSelectShuffle =
true;
4564 auto *MMI = dyn_cast<MinMaxIntrinsic>(FVal);
4565 if (!MMI || TVal !=
X ||
4583 if (PeekedThroughSelectShuffle)
4616 Pred == ICmpInst::ICMP_EQ);
4624 unsigned MaxRecurse) {
4627 nullptr, MaxRecurse) == TrueVal)
4631 nullptr, MaxRecurse) == FalseVal)
4642 unsigned MaxRecurse) {
4644 Value *CmpLHS, *CmpRHS;
4652 if (Pred == ICmpInst::ICMP_NE) {
4653 Pred = ICmpInst::ICMP_EQ;
4660 if (TrueVal->getType()->isIntOrIntVectorTy()) {
4668 X->getType()->getScalarSizeInBits());
4674 if (Pred == ICmpInst::ICMP_EQ &&
match(CmpRHS,
m_Zero())) {
4688 if (
match(TrueVal, isFsh) && FalseVal ==
X && CmpLHS == ShAmt)
4701 if (
match(FalseVal, isRotate) && TrueVal ==
X && CmpLHS == ShAmt &&
4702 Pred == ICmpInst::ICMP_EQ)
4707 if (
match(TrueVal, m_Intrinsic<Intrinsic::abs>(
m_Specific(CmpLHS))) &&
4724 if (Pred == ICmpInst::ICMP_EQ) {
4774 bool HasNoSignedZeros =
4781 if (Pred == FCmpInst::FCMP_OEQ)
4786 if (Pred == FCmpInst::FCMP_UNE)
4797 if (
auto *CondC = dyn_cast<Constant>(
Cond)) {
4798 if (
auto *TrueC = dyn_cast<Constant>(TrueVal))
4799 if (
auto *FalseC = dyn_cast<Constant>(FalseVal))
4804 if (isa<PoisonValue>(CondC))
4809 return isa<Constant>(FalseVal) ? FalseVal : TrueVal;
4821 assert(
Cond->getType()->isIntOrIntVectorTy(1) &&
4822 "Select must have bool or bool vector condition");
4823 assert(TrueVal->getType() == FalseVal->getType() &&
4824 "Select must have same types for true/false ops");
4826 if (
Cond->getType() == TrueVal->getType()) {
4889 if (TrueVal == FalseVal)
4892 if (
Cond == TrueVal) {
4900 if (
Cond == FalseVal) {
4914 if (isa<PoisonValue>(TrueVal) ||
4919 if (isa<PoisonValue>(FalseVal) ||
4925 if (isa<FixedVectorType>(TrueVal->getType()) &&
4929 cast<FixedVectorType>(TrueC->
getType())->getNumElements();
4931 for (
unsigned i = 0; i != NumElts; ++i) {
4935 if (!TEltC || !FEltC)
4942 else if (isa<PoisonValue>(TEltC) ||
4945 else if (isa<PoisonValue>(FEltC) ||
4951 if (NewC.
size() == NumElts)
4967 return *Imp ? TrueVal : FalseVal;
4984 cast<PointerType>(
Ptr->getType()->getScalarType())->getAddressSpace();
4987 if (Indices.
empty())
4997 if (
VectorType *VT = dyn_cast<VectorType>(
Op->getType())) {
4998 GEPTy = VectorType::get(GEPTy, VT->getElementCount());
5005 if (
Ptr->getType() == GEPTy &&
5011 if (isa<PoisonValue>(
Ptr) ||
5012 any_of(Indices, [](
const auto *V) {
return isa<PoisonValue>(V); }))
5019 bool IsScalableVec =
5021 return isa<ScalableVectorType>(V->getType());
5024 if (Indices.
size() == 1) {
5026 if (!IsScalableVec && Ty->
isSized()) {
5031 if (TyAllocSize == 0 &&
Ptr->getType() == GEPTy)
5036 if (Indices[0]->
getType()->getScalarSizeInBits() ==
5038 auto CanSimplify = [GEPTy, &
P,
Ptr]() ->
bool {
5039 return P->getType() == GEPTy &&
5043 if (TyAllocSize == 1 &&
5054 TyAllocSize == 1ULL <<
C && CanSimplify())
5070 [](
Value *
Idx) { return match(Idx, m_Zero()); })) {
5074 APInt BasePtrOffset(IdxWidth, 0);
5075 Value *StrippedBasePtr =
5076 Ptr->stripAndAccumulateInBoundsConstantOffsets(Q.
DL, BasePtrOffset);
5085 !BasePtrOffset.
isZero()) {
5086 auto *CI = ConstantInt::get(GEPTy->
getContext(), BasePtrOffset);
5092 !BasePtrOffset.
isOne()) {
5093 auto *CI = ConstantInt::get(GEPTy->
getContext(), BasePtrOffset - 1);
5100 if (!isa<Constant>(
Ptr) ||
5101 !
all_of(Indices, [](
Value *V) {
return isa<Constant>(V); }))
5123 if (
Constant *CAgg = dyn_cast<Constant>(Agg))
5124 if (
Constant *CVal = dyn_cast<Constant>(Val))
5129 if (isa<PoisonValue>(Val) ||
5135 if (EV->getAggregateOperand()->getType() == Agg->
getType() &&
5136 EV->getIndices() == Idxs) {
5139 if (isa<PoisonValue>(Agg) ||
5142 return EV->getAggregateOperand();
5145 if (Agg == EV->getAggregateOperand())
5155 return ::simplifyInsertValueInst(Agg, Val, Idxs, Q,
RecursionLimit);
5161 auto *VecC = dyn_cast<Constant>(Vec);
5162 auto *ValC = dyn_cast<Constant>(Val);
5163 auto *IdxC = dyn_cast<Constant>(
Idx);
5164 if (VecC && ValC && IdxC)
5168 if (
auto *CI = dyn_cast<ConstantInt>(
Idx)) {
5169 if (isa<FixedVectorType>(Vec->
getType()) &&
5170 CI->uge(cast<FixedVectorType>(Vec->
getType())->getNumElements()))
5180 if (isa<PoisonValue>(Val) ||
5197 if (
auto *CAgg = dyn_cast<Constant>(Agg))
5201 unsigned NumIdxs = Idxs.
size();
5202 for (
auto *IVI = dyn_cast<InsertValueInst>(Agg); IVI !=
nullptr;
5203 IVI = dyn_cast<InsertValueInst>(IVI->getAggregateOperand())) {
5205 unsigned NumInsertValueIdxs = InsertValueIdxs.
size();
5206 unsigned NumCommonIdxs = std::min(NumInsertValueIdxs, NumIdxs);
5207 if (InsertValueIdxs.
slice(0, NumCommonIdxs) ==
5208 Idxs.
slice(0, NumCommonIdxs)) {
5209 if (NumIdxs == NumInsertValueIdxs)
5210 return IVI->getInsertedValueOperand();
5227 auto *VecVTy = cast<VectorType>(Vec->
getType());
5228 if (
auto *CVec = dyn_cast<Constant>(Vec)) {
5229 if (
auto *CIdx = dyn_cast<Constant>(
Idx))
5243 if (
auto *IdxC = dyn_cast<ConstantInt>(
Idx)) {
5245 unsigned MinNumElts = VecVTy->getElementCount().getKnownMinValue();
5246 if (isa<FixedVectorType>(VecVTy) && IdxC->getValue().uge(MinNumElts))
5249 if (IdxC->getValue().ult(MinNumElts))
5259 auto *IE = dyn_cast<InsertElementInst>(Vec);
5260 if (IE && IE->getOperand(2) ==
Idx)
5261 return IE->getOperand(1);
5284 Value *CommonValue =
nullptr;
5285 bool HasPoisonInput =
false;
5286 bool HasUndefInput =
false;
5292 HasPoisonInput =
true;
5297 HasUndefInput =
true;
5300 if (CommonValue &&
Incoming != CommonValue)
5311 if (HasPoisonInput || HasUndefInput) {
5323 if (
auto *
C = dyn_cast<Constant>(
Op))
5326 if (
auto *CI = dyn_cast<CastInst>(
Op)) {
5327 auto *Src = CI->getOperand(0);
5328 Type *SrcTy = Src->getType();
5329 Type *MidTy = CI->getType();
5331 if (Src->getType() == Ty) {
5341 SrcIntPtrTy, MidIntPtrTy,
5342 DstIntPtrTy) == Instruction::BitCast)
5348 if (CastOpc == Instruction::BitCast)
5349 if (
Op->getType() == Ty)
5354 if (CastOpc == Instruction::PtrToInt &&
5372 int MaskVal,
Value *RootVec,
5373 unsigned MaxRecurse) {
5383 int InVecNumElts = cast<FixedVectorType>(Op0->
getType())->getNumElements();
5384 int RootElt = MaskVal;
5385 Value *SourceOp = Op0;
5386 if (MaskVal >= InVecNumElts) {
5387 RootElt = MaskVal - InVecNumElts;
5393 if (
auto *SourceShuf = dyn_cast<ShuffleVectorInst>(SourceOp)) {
5395 DestElt, SourceShuf->getOperand(0), SourceShuf->getOperand(1),
5396 SourceShuf->getMaskValue(RootElt), RootVec, MaxRecurse);
5405 if (RootVec != SourceOp)
5410 if (RootElt != DestElt)
5419 unsigned MaxRecurse) {
5423 auto *InVecTy = cast<VectorType>(Op0->
getType());
5424 unsigned MaskNumElts = Mask.size();
5425 ElementCount InVecEltCount = InVecTy->getElementCount();
5430 Indices.
assign(Mask.begin(), Mask.end());
5435 bool MaskSelects0 =
false, MaskSelects1 =
false;
5437 for (
unsigned i = 0; i != MaskNumElts; ++i) {
5438 if (Indices[i] == -1)
5440 if ((
unsigned)Indices[i] < InVecNumElts)
5441 MaskSelects0 =
true;
5443 MaskSelects1 =
true;
5451 auto *Op0Const = dyn_cast<Constant>(Op0);
5452 auto *Op1Const = dyn_cast<Constant>(Op1);
5457 if (Op0Const && Op1Const)
5463 if (!Scalable && Op0Const && !Op1Const) {
5481 if (
all_of(Indices, [InsertIndex](
int MaskElt) {
5482 return MaskElt == InsertIndex || MaskElt == -1;
5484 assert(isa<UndefValue>(Op1) &&
"Expected undef operand 1 for splat");
5488 for (
unsigned i = 0; i != MaskNumElts; ++i)
5489 if (Indices[i] == -1)
5497 if (
auto *OpShuf = dyn_cast<ShuffleVectorInst>(Op0))
5517 Value *RootVec =
nullptr;
5518 for (
unsigned i = 0; i != MaskNumElts; ++i) {
5540 if (
auto *
C = dyn_cast<Constant>(
Op))
5568 Type *Ty = In->getType();
5569 if (
auto *VecTy = dyn_cast<FixedVectorType>(Ty)) {
5570 unsigned NumElts = VecTy->getNumElements();
5572 for (
unsigned i = 0; i != NumElts; ++i) {
5573 Constant *EltC = In->getAggregateElement(i);
5576 if (EltC && isa<PoisonValue>(EltC))
5578 else if (EltC && EltC->
isNaN())
5579 NewC[i] = ConstantFP::get(
5580 EltC->
getType(), cast<ConstantFP>(EltC)->getValue().makeQuiet());
5594 if (isa<ScalableVectorType>(Ty)) {
5595 auto *
Splat = In->getSplatValue();
5597 "Found a scalable-vector NaN but not a splat");
5603 return ConstantFP::get(Ty, cast<ConstantFP>(In)->getValue().makeQuiet());
5618 for (
Value *V : Ops) {
5626 if (FMF.
noNaNs() && (IsNan || IsUndef))
5628 if (FMF.
noInfs() && (IsInf || IsUndef))
5654 RoundingMode Rounding = RoundingMode::NearestTiesToEven) {
5720 RoundingMode Rounding = RoundingMode::NearestTiesToEven) {
5835 RoundingMode Rounding = RoundingMode::NearestTiesToEven) {
5841 return simplifyFMAFMul(Op0, Op1, FMF, Q, MaxRecurse, ExBehavior, Rounding);
5848 return ::simplifyFAddInst(Op0, Op1, FMF, Q,
RecursionLimit, ExBehavior,
5856 return ::simplifyFSubInst(Op0, Op1, FMF, Q,
RecursionLimit, ExBehavior,
5864 return ::simplifyFMulInst(Op0, Op1, FMF, Q,
RecursionLimit, ExBehavior,
5872 return ::simplifyFMAFMul(Op0, Op1, FMF, Q,
RecursionLimit, ExBehavior,
5880 RoundingMode Rounding = RoundingMode::NearestTiesToEven) {
5905 return ConstantFP::get(Op0->
getType(), 1.0);
5917 return ConstantFP::get(Op0->
getType(), -1.0);
5931 return ::simplifyFDivInst(Op0, Op1, FMF, Q,
RecursionLimit, ExBehavior,
5939 RoundingMode Rounding = RoundingMode::NearestTiesToEven) {
5969 return ::simplifyFRemInst(Op0, Op1, FMF, Q,
RecursionLimit, ExBehavior,
5978 unsigned MaxRecurse) {
5980 case Instruction::FNeg:
5992 unsigned MaxRecurse) {
5994 case Instruction::FNeg:
6015 case Instruction::Add:
6018 case Instruction::Sub:
6021 case Instruction::Mul:
6024 case Instruction::SDiv:
6026 case Instruction::UDiv:
6028 case Instruction::SRem:
6030 case Instruction::URem:
6032 case Instruction::Shl:
6035 case Instruction::LShr:
6037 case Instruction::AShr:
6039 case Instruction::And:
6041 case Instruction::Or:
6043 case Instruction::Xor:
6045 case Instruction::FAdd:
6047 case Instruction::FSub:
6049 case Instruction::FMul:
6051 case Instruction::FDiv:
6053 case Instruction::FRem:
6065 unsigned MaxRecurse) {
6067 case Instruction::FAdd:
6069 case Instruction::FSub:
6071 case Instruction::FMul:
6073 case Instruction::FDiv:
6109 case Intrinsic::fabs:
6110 case Intrinsic::floor:
6111 case Intrinsic::ceil:
6112 case Intrinsic::trunc:
6113 case Intrinsic::rint:
6114 case Intrinsic::nearbyint:
6115 case Intrinsic::round:
6116 case Intrinsic::roundeven:
6117 case Intrinsic::canonicalize:
6118 case Intrinsic::arithmetic_fence:
6130 case Intrinsic::floor:
6131 case Intrinsic::ceil:
6132 case Intrinsic::trunc:
6133 case Intrinsic::rint:
6134 case Intrinsic::nearbyint:
6135 case Intrinsic::round:
6136 case Intrinsic::roundeven:
6150 auto *OffsetConstInt = dyn_cast<ConstantInt>(
Offset);
6151 if (!OffsetConstInt || OffsetConstInt->getBitWidth() > 64)
6155 DL.getIndexTypeSizeInBits(
Ptr->getType()));
6156 if (OffsetInt.
srem(4) != 0)
6164 auto *LoadedCE = dyn_cast<ConstantExpr>(Loaded);
6168 if (LoadedCE->getOpcode() == Instruction::Trunc) {
6169 LoadedCE = dyn_cast<ConstantExpr>(LoadedCE->getOperand(0));
6174 if (LoadedCE->getOpcode() != Instruction::Sub)
6177 auto *LoadedLHS = dyn_cast<ConstantExpr>(LoadedCE->getOperand(0));
6178 if (!LoadedLHS || LoadedLHS->getOpcode() != Instruction::PtrToInt)
6180 auto *LoadedLHSPtr = LoadedLHS->getOperand(0);
6184 APInt LoadedRHSOffset;
6187 PtrSym != LoadedRHSSym || PtrOffset != LoadedRHSOffset)
6190 return LoadedLHSPtr;
6198 if (isa<PoisonValue>(Op0) || isa<PoisonValue>(Op1))
6221 if (
C && (
C->isZero() ||
C->isInfinity()))
6230 if (
C &&
C->isNaN())
6231 return ConstantFP::get(Op0->
getType(),
C->makeQuiet());
6249 if (
auto *
II = dyn_cast<IntrinsicInst>(Op0))
6250 if (
II->getIntrinsicID() == IID)
6259 auto *
II = dyn_cast<IntrinsicInst>(Op0);
6267 case Intrinsic::fabs:
6271 case Intrinsic::bswap:
6276 case Intrinsic::bitreverse:
6281 case Intrinsic::ctpop: {
6285 return ConstantInt::get(Op0->
getType(), 1);
6294 case Intrinsic::exp:
6296 if (Call->hasAllowReassoc() &&
6300 case Intrinsic::exp2:
6302 if (Call->hasAllowReassoc() &&
6306 case Intrinsic::exp10:
6308 if (Call->hasAllowReassoc() &&
6312 case Intrinsic::log:
6314 if (Call->hasAllowReassoc() &&
6318 case Intrinsic::log2:
6320 if (Call->hasAllowReassoc() &&
6326 case Intrinsic::log10:
6329 if (Call->hasAllowReassoc() &&
6335 case Intrinsic::vector_reverse:
6343 case Intrinsic::frexp: {
6367 auto *MM0 = dyn_cast<IntrinsicInst>(Op0);
6372 if (Op1 ==
X || Op1 ==
Y ||
6389 assert((IID == Intrinsic::maxnum || IID == Intrinsic::minnum ||
6390 IID == Intrinsic::maximum || IID == Intrinsic::minimum) &&
6391 "Unsupported intrinsic");
6393 auto *
M0 = dyn_cast<IntrinsicInst>(Op0);
6397 if (!
M0 ||
M0->getIntrinsicID() != IID)
6399 Value *X0 =
M0->getOperand(0);
6400 Value *Y0 =
M0->getOperand(1);
6407 if (X0 == Op1 || Y0 == Op1)
6410 auto *
M1 = dyn_cast<IntrinsicInst>(Op1);
6413 Value *X1 =
M1->getOperand(0);
6414 Value *Y1 =
M1->getOperand(1);
6422 if ((X0 == X1 && Y0 == Y1) || (X0 == Y1 && Y0 == X1))
6433 unsigned BitWidth = ReturnType->getScalarSizeInBits();
6435 case Intrinsic::abs:
6443 case Intrinsic::cttz: {
6449 case Intrinsic::ctlz: {
6457 case Intrinsic::ptrmask: {
6458 if (isa<PoisonValue>(Op0) || isa<PoisonValue>(Op1))
6468 "Invalid mask width");
6485 APInt IrrelevantPtrBits =
6488 Instruction::Or,
C, ConstantInt::get(
C->getType(), IrrelevantPtrBits),
6490 if (
C !=
nullptr &&
C->isAllOnesValue())
6495 case Intrinsic::smax:
6496 case Intrinsic::smin:
6497 case Intrinsic::umax:
6498 case Intrinsic::umin: {
6509 return ConstantInt::get(
6517 return ConstantInt::get(ReturnType, *
C);
6528 auto *MinMax0 = dyn_cast<IntrinsicInst>(Op0);
6529 if (MinMax0 && MinMax0->getIntrinsicID() == IID) {
6531 Value *M00 = MinMax0->getOperand(0), *M01 = MinMax0->getOperand(1);
6532 const APInt *InnerC;
6535 ICmpInst::getNonStrictPredicate(
6555 case Intrinsic::scmp:
6556 case Intrinsic::ucmp: {
6563 IID == Intrinsic::scmp ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT;
6565 return ConstantInt::get(ReturnType, 1);
6568 IID == Intrinsic::scmp ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT;
6574 case Intrinsic::usub_with_overflow:
6575 case Intrinsic::ssub_with_overflow:
6582 case Intrinsic::uadd_with_overflow:
6583 case Intrinsic::sadd_with_overflow:
6588 cast<StructType>(ReturnType),
6593 case Intrinsic::umul_with_overflow:
6594 case Intrinsic::smul_with_overflow:
6604 case Intrinsic::uadd_sat:
6610 case Intrinsic::sadd_sat:
6625 case Intrinsic::usub_sat:
6630 case Intrinsic::ssub_sat:
6638 case Intrinsic::load_relative:
6639 if (
auto *C0 = dyn_cast<Constant>(Op0))
6640 if (
auto *C1 = dyn_cast<Constant>(Op1))
6643 case Intrinsic::powi:
6644 if (
auto *Power = dyn_cast<ConstantInt>(Op1)) {
6646 if (Power->isZero())
6647 return ConstantFP::get(Op0->
getType(), 1.0);
6653 case Intrinsic::ldexp:
6655 case Intrinsic::copysign:
6665 case Intrinsic::is_fpclass: {
6666 if (isa<PoisonValue>(Op0))
6669 uint64_t Mask = cast<ConstantInt>(Op1)->getZExtValue();
6672 return ConstantInt::get(ReturnType,
true);
6674 return ConstantInt::get(ReturnType,
false);
6679 case Intrinsic::maxnum:
6680 case Intrinsic::minnum:
6681 case Intrinsic::maximum:
6682 case Intrinsic::minimum: {
6688 if (isa<Constant>(Op0))
6695 bool PropagateNaN = IID == Intrinsic::minimum || IID == Intrinsic::maximum;
6696 bool IsMin = IID == Intrinsic::minimum || IID == Intrinsic::minnum;
6703 return PropagateNaN ?
propagateNaN(cast<Constant>(Op1)) : Op0;
6709 (
C->isInfinity() || (Call && Call->hasNoInfs() &&
C->isLargest()))) {
6714 if (
C->isNegative() == IsMin &&
6715 (!PropagateNaN || (Call && Call->hasNoNaNs())))
6716 return ConstantFP::get(ReturnType, *
C);
6722 if (
C->isNegative() != IsMin &&
6723 (PropagateNaN || (Call && Call->hasNoNaNs())))
6736 case Intrinsic::vector_extract: {
6738 unsigned IdxN = cast<ConstantInt>(Op1)->getZExtValue();
6742 IdxN == 0 &&
X->getType() == ReturnType)
6758 assert(Call->arg_size() == Args.size());
6759 unsigned NumOperands = Args.size();
6767 case Intrinsic::vscale: {
6771 return ConstantInt::get(
RetTy,
C->getZExtValue());
6779 if (NumOperands == 1)
6782 if (NumOperands == 2)
6788 case Intrinsic::masked_load:
6789 case Intrinsic::masked_gather: {
6790 Value *MaskArg = Args[2];
6791 Value *PassthruArg = Args[3];
6797 case Intrinsic::fshl:
6798 case Intrinsic::fshr: {
6799 Value *Op0 = Args[0], *Op1 = Args[1], *ShAmtArg = Args[2];
6807 return Args[IID == Intrinsic::fshl ? 0 : 1];
6809 const APInt *ShAmtC;
6814 return Args[IID == Intrinsic::fshl ? 0 : 1];
6819 return ConstantInt::getNullValue(
F->getReturnType());
6823 return ConstantInt::getAllOnesValue(
F->getReturnType());
6827 case Intrinsic::experimental_constrained_fma: {
6828 auto *FPI = cast<ConstrainedFPIntrinsic>(Call);
6830 *FPI->getRoundingMode()))
6834 case Intrinsic::fma:
6835 case Intrinsic::fmuladd: {
6837 RoundingMode::NearestTiesToEven))
6841 case Intrinsic::smul_fix:
6842 case Intrinsic::smul_fix_sat: {
6843 Value *Op0 = Args[0];
6844 Value *Op1 = Args[1];
6845 Value *Op2 = Args[2];
6846 Type *ReturnType =
F->getReturnType();
6851 if (isa<Constant>(Op0))
6865 cast<ConstantInt>(Op2)->getZExtValue());
6871 case Intrinsic::vector_insert: {
6872 Value *Vec = Args[0];
6873 Value *SubVec = Args[1];
6875 Type *ReturnType =
F->getReturnType();
6879 unsigned IdxN = cast<ConstantInt>(
Idx)->getZExtValue();
6884 X->getType() == ReturnType)
6889 case Intrinsic::experimental_constrained_fadd: {
6890 auto *FPI = cast<ConstrainedFPIntrinsic>(Call);
6892 *FPI->getExceptionBehavior(),
6893 *FPI->getRoundingMode());
6895 case Intrinsic::experimental_constrained_fsub: {
6896 auto *FPI = cast<ConstrainedFPIntrinsic>(Call);
6898 *FPI->getExceptionBehavior(),
6899 *FPI->getRoundingMode());
6901 case Intrinsic::experimental_constrained_fmul: {
6902 auto *FPI = cast<ConstrainedFPIntrinsic>(Call);
6904 *FPI->getExceptionBehavior(),
6905 *FPI->getRoundingMode());
6907 case Intrinsic::experimental_constrained_fdiv: {
6908 auto *FPI = cast<ConstrainedFPIntrinsic>(Call);
6910 *FPI->getExceptionBehavior(),
6911 *FPI->getRoundingMode());
6913 case Intrinsic::experimental_constrained_frem: {
6914 auto *FPI = cast<ConstrainedFPIntrinsic>(Call);
6916 *FPI->getExceptionBehavior(),
6917 *FPI->getRoundingMode());
6919 case Intrinsic::experimental_constrained_ldexp:
6921 case Intrinsic::experimental_gc_relocate: {
6927 if (isa<UndefValue>(DerivedPtr) || isa<UndefValue>(BasePtr)) {
6931 if (
auto *PT = dyn_cast<PointerType>(GCR.
getType())) {
6935 if (isa<ConstantPointerNull>(DerivedPtr)) {
6950 auto *
F = dyn_cast<Function>(Callee);
6955 ConstantArgs.
reserve(Args.size());
6956 for (
Value *Arg : Args) {
6959 if (isa<MetadataAsValue>(Arg))
6972 assert(Call->arg_size() == Args.size());
6976 if (Call->isMustTailCall())
6981 if (isa<UndefValue>(Callee) || isa<ConstantPointerNull>(Callee))
6987 auto *
F = dyn_cast<Function>(Callee);
6988 if (
F &&
F->isIntrinsic())
6996 assert(isa<ConstrainedFPIntrinsic>(Call));
7015 return ::simplifyFreezeInst(Op0, Q);
7023 if (
auto *PtrOpC = dyn_cast<Constant>(PtrOp))
7029 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer())
7060 unsigned MaxRecurse) {
7061 assert(
I->getFunction() &&
"instruction should be inserted in a function");
7063 "context instruction should be in the same function");
7067 switch (
I->getOpcode()) {
7072 [](
Value *V) { return cast<Constant>(V); });
7076 case Instruction::FNeg:
7078 case Instruction::FAdd:
7081 case Instruction::Add:
7085 case Instruction::FSub:
7088 case Instruction::Sub:
7092 case Instruction::FMul:
7095 case Instruction::Mul:
7099 case Instruction::SDiv:
7103 case Instruction::UDiv:
7107 case Instruction::FDiv:
7110 case Instruction::SRem:
7112 case Instruction::URem:
7114 case Instruction::FRem:
7117 case Instruction::Shl:
7121 case Instruction::LShr:
7125 case Instruction::AShr:
7129 case Instruction::And:
7131 case Instruction::Or:
7133 case Instruction::Xor:
7135 case Instruction::ICmp:
7137 NewOps[1], Q, MaxRecurse);
7138 case Instruction::FCmp:
7140 NewOps[1],
I->getFastMathFlags(), Q, MaxRecurse);
7141 case Instruction::Select:
7144 case Instruction::GetElementPtr: {
7145 auto *GEPI = cast<GetElementPtrInst>(
I);
7147 ArrayRef(NewOps).slice(1), GEPI->getNoWrapFlags(), Q,
7150 case Instruction::InsertValue: {
7155 case Instruction::InsertElement:
7157 case Instruction::ExtractValue: {
7158 auto *EVI = cast<ExtractValueInst>(
I);
7162 case Instruction::ExtractElement:
7164 case Instruction::ShuffleVector: {
7165 auto *SVI = cast<ShuffleVectorInst>(
I);
7167 SVI->getShuffleMask(), SVI->getType(), Q,
7170 case Instruction::PHI:
7172 case Instruction::Call:
7174 cast<CallInst>(
I), NewOps.
back(),
7175 NewOps.
drop_back(1 + cast<CallInst>(
I)->getNumTotalBundleOperands()), Q);
7176 case Instruction::Freeze:
7178#define HANDLE_CAST_INST(num, opc, clas) case Instruction::opc:
7179#include "llvm/IR/Instruction.def"
7180#undef HANDLE_CAST_INST
7183 case Instruction::Alloca:
7186 case Instruction::Load:
7195 "Number of operands should match the instruction!");
7196 return ::simplifyInstructionWithOperands(
I, NewOps, SQ,
RecursionLimit);
7226 bool Simplified =
false;
7233 for (
User *U :
I->users())
7235 Worklist.
insert(cast<Instruction>(U));
7238 I->replaceAllUsesWith(SimpleV);
7240 if (!
I->isEHPad() && !
I->isTerminator() && !
I->mayHaveSideEffects())
7241 I->eraseFromParent();
7253 if (UnsimplifiedUsers)
7254 UnsimplifiedUsers->insert(
I);
7263 for (
User *U :
I->users())
7264 Worklist.
insert(cast<Instruction>(U));
7267 I->replaceAllUsesWith(SimpleV);
7269 if (!
I->isEHPad() && !
I->isTerminator() && !
I->mayHaveSideEffects())
7270 I->eraseFromParent();
7279 assert(
I != SimpleV &&
"replaceAndRecursivelySimplify(X,X) is not valid!");
7280 assert(SimpleV &&
"Must provide a simplified value.");
7288 auto *DT = DTWP ? &DTWP->
getDomTree() :
nullptr;
7290 auto *TLI = TLIWP ? &TLIWP->
getTLI(
F) :
nullptr;
7293 return {
F.getDataLayout(), TLI, DT, AC};
7301template <
class T,
class... TArgs>
7304 auto *DT = AM.template getCachedResult<DominatorTreeAnalysis>(
F);
7305 auto *TLI = AM.template getCachedResult<TargetLibraryAnalysis>(
F);
7306 auto *AC = AM.template getCachedResult<AssumptionAnalysis>(
F);
7307 return {
F.getDataLayout(), TLI, DT, AC};
7321void InstSimplifyFolder::anchor() {}
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
static Value * simplifyFreezeInst(Value *Op0, const SimplifyQuery &Q)
Given operands for a Freeze, see if we can fold the result.
static Value * simplifyLShrInst(Value *Op0, Value *Op1, bool IsExact, const SimplifyQuery &Q, unsigned MaxRecurse)
Given operands for an LShr, see if we can fold the result.
static Value * simplifyUDivInst(Value *Op0, Value *Op1, bool IsExact, const SimplifyQuery &Q, unsigned MaxRecurse)
Given operands for a UDiv, see if we can fold the result.
static Value * simplifyShuffleVectorInst(Value *Op0, Value *Op1, ArrayRef< int > Mask, Type *RetTy, const SimplifyQuery &Q, unsigned MaxRecurse)
static Value * foldMinMaxSharedOp(Intrinsic::ID IID, Value *Op0, Value *Op1)
Given a min/max intrinsic, see if it can be removed based on having an operand that is another min/ma...
static Value * simplifySubInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW, const SimplifyQuery &Q, unsigned MaxRecurse)
Given operands for a Sub, see if we can fold the result.
static Value * simplifyICmpWithIntrinsicOnLHS(CmpInst::Predicate Pred, Value *LHS, Value *RHS)
static Value * expandCommutativeBinOp(Instruction::BinaryOps Opcode, Value *L, Value *R, Instruction::BinaryOps OpcodeToExpand, const SimplifyQuery &Q, unsigned MaxRecurse)
Try to simplify binops of form "A op (B op' C)" or the commuted variant by distributing op over op'.
static Constant * foldOrCommuteConstant(Instruction::BinaryOps Opcode, Value *&Op0, Value *&Op1, const SimplifyQuery &Q)
static bool haveNonOverlappingStorage(const Value *V1, const Value *V2)
Return true if V1 and V2 are each the base of some distict storage region [V, object_size(V)] which d...
static Constant * foldConstant(Instruction::UnaryOps Opcode, Value *&Op, const SimplifyQuery &Q)
static Value * handleOtherCmpSelSimplifications(Value *TCmp, Value *FCmp, Value *Cond, const SimplifyQuery &Q, unsigned MaxRecurse)
We know comparison with both branches of select can be simplified, but they are not equal.
static Constant * propagateNaN(Constant *In)
Try to propagate existing NaN values when possible.
static Value * simplifyAShrInst(Value *Op0, Value *Op1, bool IsExact, const SimplifyQuery &Q, unsigned MaxRecurse)
Given operands for an AShr, see if we can fold the result.
static Value * simplifyRelativeLoad(Constant *Ptr, Constant *Offset, const DataLayout &DL)
static Value * simplifyICmpWithDominatingAssume(CmpInst::Predicate Predicate, Value *LHS, Value *RHS, const SimplifyQuery &Q)
static Value * simplifyCmpSelTrueCase(CmpInst::Predicate Pred, Value *LHS, Value *RHS, Value *Cond, const SimplifyQuery &Q, unsigned MaxRecurse)
Simplify comparison with true branch of select.
static Value * simplifyDiv(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1, bool IsExact, const SimplifyQuery &Q, unsigned MaxRecurse)
These are simplifications common to SDiv and UDiv.
static Value * simplifyCmpSelOfMaxMin(Value *CmpLHS, Value *CmpRHS, ICmpInst::Predicate Pred, Value *TVal, Value *FVal)
static Value * simplifyPHINode(PHINode *PN, ArrayRef< Value * > IncomingValues, const SimplifyQuery &Q)
See if we can fold the given phi. If not, returns null.
static Value * simplifyICmpWithMinMax(CmpInst::Predicate Pred, Value *LHS, Value *RHS, const SimplifyQuery &Q, unsigned MaxRecurse)
simplify integer comparisons where at least one operand of the compare matches an integer min/max idi...
static Value * simplifySelectWithFakeICmpEq(Value *CmpLHS, Value *CmpRHS, ICmpInst::Predicate Pred, Value *TrueVal, Value *FalseVal)
An alternative way to test if a bit is set or not uses sgt/slt instead of eq/ne.
static Value * simplifyCmpInst(unsigned, Value *, Value *, const SimplifyQuery &, unsigned)
Given operands for a CmpInst, see if we can fold the result.
static Value * simplifyExtractValueInst(Value *Agg, ArrayRef< unsigned > Idxs, const SimplifyQuery &, unsigned)
Given operands for an ExtractValueInst, see if we can fold the result.
static Value * simplifySelectInst(Value *, Value *, Value *, const SimplifyQuery &, unsigned)
Given operands for a SelectInst, see if we can fold the result.
static Value * simplifyAddInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW, const SimplifyQuery &Q, unsigned MaxRecurse)
Given operands for an Add, see if we can fold the result.
static Value * threadCmpOverSelect(CmpInst::Predicate Pred, Value *LHS, Value *RHS, const SimplifyQuery &Q, unsigned MaxRecurse)
In the case of a comparison with a select instruction, try to simplify the comparison by seeing wheth...
static Value * simplifyUnOp(unsigned, Value *, const SimplifyQuery &, unsigned)
Given the operand for a UnaryOperator, see if we can fold the result.
static Value * simplifyICmpWithBinOpOnLHS(CmpInst::Predicate Pred, BinaryOperator *LBO, Value *RHS, const SimplifyQuery &Q, unsigned MaxRecurse)
static Value * simplifyAndCommutative(Value *Op0, Value *Op1, const SimplifyQuery &Q, unsigned MaxRecurse)
static Value * simplifyInstructionWithOperands(Instruction *I, ArrayRef< Value * > NewOps, const SimplifyQuery &SQ, unsigned MaxRecurse)
See if we can compute a simplified version of this instruction.
static bool isIdempotent(Intrinsic::ID ID)
static std::optional< ConstantRange > getRange(Value *V, const InstrInfoQuery &IIQ)
Helper method to get range from metadata or attribute.
static Value * simplifyAndOrOfICmpsWithCtpop(ICmpInst *Cmp0, ICmpInst *Cmp1, bool IsAnd)
Try to simplify and/or of icmp with ctpop intrinsic.
static Value * simplifyUnsignedRangeCheck(ICmpInst *ZeroICmp, ICmpInst *UnsignedICmp, bool IsAnd, const SimplifyQuery &Q)
Commuted variants are assumed to be handled by calling this function again with the parameters swappe...
static Value * tryConstantFoldCall(CallBase *Call, Value *Callee, ArrayRef< Value * > Args, const SimplifyQuery &Q)
static Value * simplifyExtractElementInst(Value *Vec, Value *Idx, const SimplifyQuery &Q, unsigned)
Given operands for an ExtractElementInst, see if we can fold the result.
static Value * simplifyAndOfICmpsWithAdd(ICmpInst *Op0, ICmpInst *Op1, const InstrInfoQuery &IIQ)
static Value * threadCmpOverPHI(CmpInst::Predicate Pred, Value *LHS, Value *RHS, const SimplifyQuery &Q, unsigned MaxRecurse)
In the case of a comparison with a PHI instruction, try to simplify the comparison by seeing whether ...
static Value * simplifyIntrinsic(CallBase *Call, Value *Callee, ArrayRef< Value * > Args, const SimplifyQuery &Q)
static bool isPoisonShift(Value *Amount, const SimplifyQuery &Q)
Returns true if a shift by Amount always yields poison.
static APInt stripAndComputeConstantOffsets(const DataLayout &DL, Value *&V, bool AllowNonInbounds=false)
Compute the base pointer and cumulative constant offsets for V.
static Value * simplifyFMAFMul(Value *Op0, Value *Op1, FastMathFlags FMF, const SimplifyQuery &Q, unsigned MaxRecurse, fp::ExceptionBehavior ExBehavior, RoundingMode Rounding)
static Value * simplifyRightShift(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1, bool IsExact, const SimplifyQuery &Q, unsigned MaxRecurse)
Given operands for an LShr or AShr, see if we can fold the result.
static Value * simplifyICmpWithConstant(CmpInst::Predicate Pred, Value *LHS, Value *RHS, const InstrInfoQuery &IIQ)
static Value * simplifySDivInst(Value *Op0, Value *Op1, bool IsExact, const SimplifyQuery &Q, unsigned MaxRecurse)
Given operands for an SDiv, see if we can fold the result.
static Value * simplifyByDomEq(unsigned Opcode, Value *Op0, Value *Op1, const SimplifyQuery &Q, unsigned MaxRecurse)
Test if there is a dominating equivalence condition for the two operands.
static Value * simplifyFPUnOp(unsigned, Value *, const FastMathFlags &, const SimplifyQuery &, unsigned)
Given the operand for a UnaryOperator, see if we can fold the result.
static bool isICmpTrue(ICmpInst::Predicate Pred, Value *LHS, Value *RHS, const SimplifyQuery &Q, unsigned MaxRecurse)
Given a predicate and two operands, return true if the comparison is true.
static Value * simplifyFAddInst(Value *Op0, Value *Op1, FastMathFlags FMF, const SimplifyQuery &Q, unsigned MaxRecurse, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
Given operands for an FAdd, see if we can fold the result.
static Value * simplifyOrOfICmps(ICmpInst *Op0, ICmpInst *Op1, const SimplifyQuery &Q)
static Value * expandBinOp(Instruction::BinaryOps Opcode, Value *V, Value *OtherOp, Instruction::BinaryOps OpcodeToExpand, const SimplifyQuery &Q, unsigned MaxRecurse)
Try to simplify a binary operator of form "V op OtherOp" where V is "(B0 opex B1)" by distributing 'o...
static Constant * getFalse(Type *Ty)
For a boolean type or a vector of boolean type, return false or a vector with every element false.
static Value * simplifyDivRem(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1, const SimplifyQuery &Q, unsigned MaxRecurse)
Check for common or similar folds of integer division or integer remainder.
static bool removesFPFraction(Intrinsic::ID ID)
Return true if the intrinsic rounds a floating-point value to an integral floating-point value (not a...
static Value * simplifyFDivInst(Value *Op0, Value *Op1, FastMathFlags FMF, const SimplifyQuery &Q, unsigned, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
static Value * simplifyOrOfICmpsWithAdd(ICmpInst *Op0, ICmpInst *Op1, const InstrInfoQuery &IIQ)
static Value * simplifyMulInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW, const SimplifyQuery &Q, unsigned MaxRecurse)
Given operands for a Mul, see if we can fold the result.
static Value * simplifyFNegInst(Value *Op, FastMathFlags FMF, const SimplifyQuery &Q, unsigned MaxRecurse)
Given the operand for an FNeg, see if we can fold the result.
static Value * simplifyOrInst(Value *, Value *, const SimplifyQuery &, unsigned)
Given operands for an Or, see if we can fold the result.
static Value * simplifySelectBitTest(Value *TrueVal, Value *FalseVal, Value *X, const APInt *Y, bool TrueWhenUnset)
Try to simplify a select instruction when its condition operand is an integer comparison where one op...
static Value * simplifyAssociativeBinOp(Instruction::BinaryOps Opcode, Value *LHS, Value *RHS, const SimplifyQuery &Q, unsigned MaxRecurse)
Generic simplifications for associative binary operations.
static Value * simplifyICmpWithZero(CmpInst::Predicate Pred, Value *LHS, Value *RHS, const SimplifyQuery &Q)
Try hard to fold icmp with zero RHS because this is a common case.
static Value * foldSelectWithBinaryOp(Value *Cond, Value *TrueVal, Value *FalseVal)
static Value * simplifyShlInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW, const SimplifyQuery &Q, unsigned MaxRecurse)
Given operands for an Shl, see if we can fold the result.
static Value * threadBinOpOverPHI(Instruction::BinaryOps Opcode, Value *LHS, Value *RHS, const SimplifyQuery &Q, unsigned MaxRecurse)
In the case of a binary operation with an operand that is a PHI instruction, try to simplify the bino...
static Value * simplifyFRemInst(Value *Op0, Value *Op1, FastMathFlags FMF, const SimplifyQuery &Q, unsigned, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
static Value * simplifyFSubInst(Value *Op0, Value *Op1, FastMathFlags FMF, const SimplifyQuery &Q, unsigned MaxRecurse, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
Given operands for an FSub, see if we can fold the result.
static bool trySimplifyICmpWithAdds(CmpInst::Predicate Pred, Value *LHS, Value *RHS, const InstrInfoQuery &IIQ)
static Value * simplifyXorInst(Value *, Value *, const SimplifyQuery &, unsigned)
Given operands for a Xor, see if we can fold the result.
static Value * simplifyURemInst(Value *Op0, Value *Op1, const SimplifyQuery &Q, unsigned MaxRecurse)
Given operands for a URem, see if we can fold the result.
static Value * simplifySelectWithFCmp(Value *Cond, Value *T, Value *F, const SimplifyQuery &Q)
Try to simplify a select instruction when its condition operand is a floating-point comparison.
static Constant * simplifyFPOp(ArrayRef< Value * > Ops, FastMathFlags FMF, const SimplifyQuery &Q, fp::ExceptionBehavior ExBehavior, RoundingMode Rounding)
Perform folds that are common to any floating-point operation.
static bool replaceAndRecursivelySimplifyImpl(Instruction *I, Value *SimpleV, const TargetLibraryInfo *TLI, const DominatorTree *DT, AssumptionCache *AC, SmallSetVector< Instruction *, 8 > *UnsimplifiedUsers=nullptr)
Implementation of recursive simplification through an instruction's uses.
static Value * simplifySelectWithICmpEq(Value *CmpLHS, Value *CmpRHS, Value *TrueVal, Value *FalseVal, const SimplifyQuery &Q, unsigned MaxRecurse)
Try to simplify a select instruction when its condition operand is an integer equality comparison.
static bool isAllocDisjoint(const Value *V)
Return true if the underlying object (storage) must be disjoint from storage returned by any noalias ...
static Constant * getTrue(Type *Ty)
For a boolean type or a vector of boolean type, return true or a vector with every element true.
static Value * simplifyGEPInst(Type *, Value *, ArrayRef< Value * >, GEPNoWrapFlags, const SimplifyQuery &, unsigned)
Given operands for an GetElementPtrInst, see if we can fold the result.
static bool isDivZero(Value *X, Value *Y, const SimplifyQuery &Q, unsigned MaxRecurse, bool IsSigned)
Return true if we can simplify X / Y to 0.
static Value * simplifyCmpSelCase(CmpInst::Predicate Pred, Value *LHS, Value *RHS, Value *Cond, const SimplifyQuery &Q, unsigned MaxRecurse, Constant *TrueOrFalse)
Simplify comparison with true or false branch of select: sel = select i1 cond, i32 tv,...
static Value * simplifyLdexp(Value *Op0, Value *Op1, const SimplifyQuery &Q, bool IsStrict)
static Value * simplifyLogicOfAddSub(Value *Op0, Value *Op1, Instruction::BinaryOps Opcode)
Given a bitwise logic op, check if the operands are add/sub with a common source value and inverted c...
static Value * simplifyOrLogic(Value *X, Value *Y)
static Type * getCompareTy(Value *Op)
static Value * simplifyCastInst(unsigned, Value *, Type *, const SimplifyQuery &, unsigned)
static Value * simplifyAndOfICmps(ICmpInst *Op0, ICmpInst *Op1, const SimplifyQuery &Q)
static Value * simplifyBinOp(unsigned, Value *, Value *, const SimplifyQuery &, unsigned)
Given operands for a BinaryOperator, see if we can fold the result.
static Value * simplifyInsertValueInst(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const SimplifyQuery &Q, unsigned)
Given operands for an InsertValueInst, see if we can fold the result.
static Value * simplifyAndInst(Value *, Value *, const SimplifyQuery &, unsigned)
Given operands for an And, see if we can fold the result.
static Value * foldIdentityShuffles(int DestElt, Value *Op0, Value *Op1, int MaskVal, Value *RootVec, unsigned MaxRecurse)
For the given destination element of a shuffle, peek through shuffles to match a root vector source o...
static Constant * computePointerICmp(CmpInst::Predicate Pred, Value *LHS, Value *RHS, const SimplifyQuery &Q)
static Value * simplifyAndOrOfFCmps(const SimplifyQuery &Q, FCmpInst *LHS, FCmpInst *RHS, bool IsAnd)
static Value * simplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q, unsigned MaxRecurse)
Given operands for an FCmpInst, see if we can fold the result.
static Value * simplifyAndOrOfCmps(const SimplifyQuery &Q, Value *Op0, Value *Op1, bool IsAnd)
static Value * simplifyWithOpReplaced(Value *V, Value *Op, Value *RepOp, const SimplifyQuery &Q, bool AllowRefinement, SmallVectorImpl< Instruction * > *DropFlags, unsigned MaxRecurse)
static Value * threadBinOpOverSelect(Instruction::BinaryOps Opcode, Value *LHS, Value *RHS, const SimplifyQuery &Q, unsigned MaxRecurse)
In the case of a binary operation with a select instruction as an operand, try to simplify the binop ...
static Constant * computePointerDifference(const DataLayout &DL, Value *LHS, Value *RHS)
Compute the constant difference between two pointer values.
static Value * simplifySRemInst(Value *Op0, Value *Op1, const SimplifyQuery &Q, unsigned MaxRecurse)
Given operands for an SRem, see if we can fold the result.
static Value * simplifyFMulInst(Value *Op0, Value *Op1, FastMathFlags FMF, const SimplifyQuery &Q, unsigned MaxRecurse, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
Given the operands for an FMul, see if we can fold the result.
static Value * simplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS, const SimplifyQuery &Q, unsigned MaxRecurse)
Given operands for an ICmpInst, see if we can fold the result.
static Value * simplifyICmpOfBools(CmpInst::Predicate Pred, Value *LHS, Value *RHS, const SimplifyQuery &Q)
Fold an icmp when its operands have i1 scalar type.
static Value * simplifyAndOrOfICmpsWithConstants(ICmpInst *Cmp0, ICmpInst *Cmp1, bool IsAnd)
Test if a pair of compares with a shared operand and 2 constants has an empty set intersection,...
static Value * simplifyAndOrWithICmpEq(unsigned Opcode, Value *Op0, Value *Op1, const SimplifyQuery &Q, unsigned MaxRecurse)
static Value * simplifyICmpWithBinOp(CmpInst::Predicate Pred, Value *LHS, Value *RHS, const SimplifyQuery &Q, unsigned MaxRecurse)
TODO: A large part of this logic is duplicated in InstCombine's foldICmpBinOp().
static Value * simplifyShift(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1, bool IsNSW, const SimplifyQuery &Q, unsigned MaxRecurse)
Given operands for an Shl, LShr or AShr, see if we can fold the result.
static Value * extractEquivalentCondition(Value *V, CmpInst::Predicate Pred, Value *LHS, Value *RHS)
Rummage around inside V looking for something equivalent to the comparison "LHS Pred RHS".
static Value * simplifyRem(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1, const SimplifyQuery &Q, unsigned MaxRecurse)
These are simplifications common to SRem and URem.
static bool valueDominatesPHI(Value *V, PHINode *P, const DominatorTree *DT)
Does the given value dominate the specified phi node?
static Value * simplifyCmpSelFalseCase(CmpInst::Predicate Pred, Value *LHS, Value *RHS, Value *Cond, const SimplifyQuery &Q, unsigned MaxRecurse)
Simplify comparison with false branch of select.
static Value * simplifySelectWithICmpCond(Value *CondVal, Value *TrueVal, Value *FalseVal, const SimplifyQuery &Q, unsigned MaxRecurse)
Try to simplify a select instruction when its condition operand is an integer comparison.
static bool isSameCompare(Value *V, CmpInst::Predicate Pred, Value *LHS, Value *RHS)
isSameCompare - Is V equivalent to the comparison "LHS Pred RHS"?
static Value * foldMinimumMaximumSharedOp(Intrinsic::ID IID, Value *Op0, Value *Op1)
Given a min/max intrinsic, see if it can be removed based on having an operand that is another min/ma...
static Value * simplifyUnaryIntrinsic(Function *F, Value *Op0, const SimplifyQuery &Q, const CallBase *Call)
This header provides classes for managing per-loop analyses.
uint64_t IntrinsicInst * II
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file implements a set that has insertion order iteration characteristics.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static SymbolRef::Type getType(const Symbol *Sym)
static const uint32_t IV[8]
Class for arbitrary precision integers.
APInt zextOrTrunc(unsigned width) const
Zero extend or truncate to width.
unsigned getActiveBits() const
Compute the number of active bits in the value.
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
APInt urem(const APInt &RHS) const
Unsigned remainder operation.
void setSignBit()
Set the sign bit to 1.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool ult(const APInt &RHS) const
Unsigned less than comparison.
bool intersects(const APInt &RHS) const
This operation tests if there are any pairs of corresponding bits between this APInt and RHS that are...
unsigned countr_zero() const
Count the number of trailing zero bits.
bool isNonPositive() const
Determine if this APInt Value is non-positive (<= 0).
APInt sextOrTrunc(unsigned width) const
Sign extend or truncate to width.
bool isStrictlyPositive() const
Determine if this APInt Value is positive.
uint64_t getLimitedValue(uint64_t Limit=UINT64_MAX) const
If this value is smaller than the specified limit, return it, otherwise return the limit value.
bool getBoolValue() const
Convert APInt to a boolean value.
APInt srem(const APInt &RHS) const
Function for signed remainder operation.
bool isMask(unsigned numBits) const
bool isMaxSignedValue() const
Determine if this is the largest signed value.
bool isNonNegative() const
Determine if this APInt Value is non-negative (>= 0)
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
bool isSubsetOf(const APInt &RHS) const
This operation checks that all bits set in this APInt are also set in RHS.
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
bool isSignBitSet() const
Determine if sign bit of this APInt is set.
bool slt(const APInt &RHS) const
Signed less than comparison.
static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)
Constructs an APInt value that has the top hiBitsSet bits set.
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
bool isOne() const
Determine if this is a value of 1.
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
an instruction to allocate memory on the stack
A container for analyses that lazily runs them and caches their results.
This class represents an incoming formal argument to a Function.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
const T & back() const
back - Get the last element.
size_t size() const
size - Get the array size.
ArrayRef< T > drop_back(size_t N=1) const
Drop the last N elements of the array.
bool empty() const
empty - Check if the array is empty.
ArrayRef< T > slice(size_t N, size_t M) const
slice(n, m) - Chop off the first N elements of the array, and keep M elements in the array.
An immutable pass that tracks lazily created AssumptionCache objects.
AssumptionCache & getAssumptionCache(Function &F)
Get the cached assumptions for a function.
A cache of @llvm.assume calls within a function.
MutableArrayRef< ResultElem > assumptionsFor(const Value *V)
Access the list of assumptions which affect this value.
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
BinaryOps getOpcode() const
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Value * getArgOperand(unsigned i) const
This class represents a function call, abstracting a target machine's calling convention.
static unsigned isEliminableCastPair(Instruction::CastOps firstOpcode, Instruction::CastOps secondOpcode, Type *SrcTy, Type *MidTy, Type *DstTy, Type *SrcIntPtrTy, Type *MidIntPtrTy, Type *DstIntPtrTy)
Determine how a pair of casts can be eliminated, if they can be at all.
This class is the base class for the comparison instructions.
static Type * makeCmpResultType(Type *opnd_type)
Create a result type for fcmp/icmp.
Predicate getStrictPredicate() const
For example, SGE -> SGT, SLE -> SLT, ULE -> ULT, UGE -> UGT.
bool isFalseWhenEqual() const
This is just a convenience.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ ICMP_UGE
unsigned greater or equal
@ ICMP_UGT
unsigned greater than
@ ICMP_SGT
signed greater than
@ ICMP_ULT
unsigned less than
@ ICMP_SGE
signed greater or equal
@ ICMP_ULE
unsigned less or equal
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
bool isTrueWhenEqual() const
This is just a convenience.
bool isFPPredicate() const
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Predicate getPredicate() const
Return the predicate for this instruction.
static bool isUnordered(Predicate predicate)
Determine if the predicate is an unordered operation.
bool isIntPredicate() const
static Constant * getIntToPtr(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static Constant * getExtractElement(Constant *Vec, Constant *Idx, Type *OnlyIfReducedTy=nullptr)
static Constant * getNot(Constant *C)
static Constant * getInsertElement(Constant *Vec, Constant *Elt, Constant *Idx, Type *OnlyIfReducedTy=nullptr)
static Constant * getShuffleVector(Constant *V1, Constant *V2, ArrayRef< int > Mask, Type *OnlyIfReducedTy=nullptr)
static bool isSupportedGetElementPtr(const Type *SrcElemTy)
Whether creating a constant expression for this getelementptr type is supported.
static Constant * getBinOpAbsorber(unsigned Opcode, Type *Ty)
Return the absorbing element for the given binary operation, i.e.
static Constant * getGetElementPtr(Type *Ty, Constant *C, ArrayRef< Constant * > IdxList, GEPNoWrapFlags NW=GEPNoWrapFlags::none(), std::optional< ConstantRange > InRange=std::nullopt, Type *OnlyIfReducedTy=nullptr)
Getelementptr form.
static Constant * getBinOpIdentity(unsigned Opcode, Type *Ty, bool AllowRHSConstant=false, bool NSZ=false)
Return the identity constant for a binary opcode.
static Constant * getZero(Type *Ty, bool Negative=false)
static Constant * getNegativeZero(Type *Ty)
static Constant * getNaN(Type *Ty, bool Negative=false, uint64_t Payload=0)
This is the shared class of boolean and integer constants.
static ConstantInt * getTrue(LLVMContext &Context)
static ConstantInt * getSigned(IntegerType *Ty, int64_t V)
Return a ConstantInt with the specified value for the specified type.
static ConstantInt * getFalse(LLVMContext &Context)
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
static ConstantInt * getBool(LLVMContext &Context, bool V)
static ConstantPointerNull * get(PointerType *T)
Static factory methods - Return objects of the specified value.
This class represents a range of values.
const APInt * getSingleElement() const
If this set contains a single element, return it, otherwise return null.
bool isFullSet() const
Return true if this set contains all of the elements possible for this data-type.
bool isEmptySet() const
Return true if this set contains no members.
static ConstantRange makeExactICmpRegion(CmpInst::Predicate Pred, const APInt &Other)
Produce the exact range such that all values in the returned range satisfy the given predicate with a...
ConstantRange inverse() const
Return a new range that is the logical not of the current set.
bool contains(const APInt &Val) const
Return true if the specified value is in the set.
static Constant * get(StructType *T, ArrayRef< Constant * > V)
static Constant * getSplat(ElementCount EC, Constant *Elt)
Return a ConstantVector with the specified constant in each element.
static Constant * get(ArrayRef< Constant * > V)
This is an important base class in LLVM.
static Constant * getAllOnesValue(Type *Ty)
bool isAllOnesValue() const
Return true if this is the value that would be returned by getAllOnesValue.
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
bool isNaN() const
Return true if this is a floating-point NaN constant or a vector floating-point constant with all NaN...
Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
unsigned getPointerSizeInBits(unsigned AS=0) const
Layout pointer size, in bits FIXME: The defaults need to be removed once all of the backends/clients ...
IntegerType * getIntPtrType(LLVMContext &C, unsigned AddressSpace=0) const
Returns an integer type with size at least as big as that of a pointer in the given address space.
unsigned getIndexTypeSizeInBits(Type *Ty) const
Layout size of the index used in GEP calculation.
IntegerType * getIndexType(LLVMContext &C, unsigned AddressSpace) const
Returns the type of a GEP index in AddressSpace.
TypeSize getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
unsigned getIndexSizeInBits(unsigned AS) const
Size in bits of index used for address calculation in getelementptr.
TypeSize getTypeSizeInBits(Type *Ty) const
Size examples:
Legacy analysis pass which computes a DominatorTree.
DominatorTree & getDomTree()
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
This instruction compares its operands according to the predicate given to the constructor.
Convenience struct for specifying and reasoning about fast-math flags.
bool noSignedZeros() const
bool allowReassoc() const
Flag queries.
Represents calls to the gc.relocate intrinsic.
Value * getBasePtr() const
Value * getDerivedPtr() const
Represents flags for the getelementptr instruction/expression.
static Type * getIndexedType(Type *Ty, ArrayRef< Value * > IdxList)
Returns the result type of a getelementptr with the given source element type and indexes.
This instruction compares its operands according to the predicate given to the constructor.
static bool compare(const APInt &LHS, const APInt &RHS, ICmpInst::Predicate Pred)
Return result of LHS Pred RHS comparison.
Predicate getSignedPredicate() const
For example, EQ->EQ, SLE->SLE, UGT->SGT, etc.
bool isEquality() const
Return true if this predicate is either EQ or NE.
bool isRelational() const
Return true if the predicate is relational (not EQ or NE).
Predicate getUnsignedPredicate() const
For example, EQ->EQ, SLE->ULE, UGT->UGT, etc.
This instruction inserts a struct field of array element value into an aggregate value.
bool hasNoSignedZeros() const LLVM_READONLY
Determine whether the no-signed-zeros flag is set.
bool isAssociative() const LLVM_READONLY
Return true if the instruction is associative:
bool isCommutative() const LLVM_READONLY
Return true if the instruction is commutative:
const Function * getFunction() const
Return the function this instruction belongs to.
An instruction for reading from memory.
bool isVolatile() const
Return true if this is a load from a volatile memory location.
static APInt getSaturationPoint(Intrinsic::ID ID, unsigned numBits)
Min/max intrinsics are monotonic, they operate on a fixed-bitwidth values, so there is a certain thre...
ICmpInst::Predicate getPredicate() const
Returns the comparison predicate underlying the intrinsic.
op_range incoming_values()
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
Pass interface - Implemented by all 'passes'.
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
This class represents a cast from a pointer to an integer.
This class represents a sign extension of integer types.
This class represents the LLVM 'select' instruction.
size_type size() const
Determine the number of elements in the SetVector.
bool insert(const value_type &X)
Insert a new element into the SetVector.
static void commuteShuffleMask(MutableArrayRef< int > Mask, unsigned InVecNumElts)
Change values in a shuffle permute mask assuming the two vector operands of length InVecNumElts have ...
A SetVector that performs no allocations if smaller than a certain size.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void assign(size_type NumElts, ValueParamT Elt)
void reserve(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
TargetLibraryInfo & getTLI(const Function &F)
Provides information about what library functions are available for the current target.
The instances of the Type class are immutable: once they are created, they are never changed.
bool isVectorTy() const
True if this is an instance of VectorType.
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
bool isPointerTy() const
True if this is an instance of PointerType.
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
bool isScalableTy() const
Return true if this is a type whose size is a known multiple of vscale.
static IntegerType * getInt32Ty(LLVMContext &C)
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
static UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
A Use represents the edge between a Value definition and its users.
Value * getOperand(unsigned i) const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
const Value * stripAndAccumulateConstantOffsets(const DataLayout &DL, APInt &Offset, bool AllowNonInbounds, bool AllowInvariantGroup=false, function_ref< bool(Value &Value, APInt &Offset)> ExternalAnalysis=nullptr) const
Accumulate the constant offset this value has compared to a base pointer.
LLVMContext & getContext() const
All values hold a context through their type.
This class represents zero extension of integer types.
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
cst_pred_ty< is_all_ones > m_AllOnes()
Match an integer or vector with all bits set.
class_match< PoisonValue > m_Poison()
Match an arbitrary poison constant.
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
PtrAdd_match< PointerOpTy, OffsetOpTy > m_PtrAdd(const PointerOpTy &PointerOp, const OffsetOpTy &OffsetOp)
Matches GEP with i8 source element type.
cst_pred_ty< is_negative > m_Negative()
Match an integer or vector of negative values.
BinaryOp_match< LHS, RHS, Instruction::Add > m_Add(const LHS &L, const RHS &R)
class_match< BinaryOperator > m_BinOp()
Match an arbitrary binary operation and ignore it.
BinaryOp_match< LHS, RHS, Instruction::FMul, true > m_c_FMul(const LHS &L, const RHS &R)
Matches FMul with LHS and RHS in either order.
cst_pred_ty< is_sign_mask > m_SignMask()
Match an integer or vector with only the sign bit(s) set.
BinaryOp_match< LHS, RHS, Instruction::AShr > m_AShr(const LHS &L, const RHS &R)
cstfp_pred_ty< is_inf > m_Inf()
Match a positive or negative infinity FP constant.
m_Intrinsic_Ty< Opnd0 >::Ty m_BitReverse(const Opnd0 &Op0)
BinaryOp_match< LHS, RHS, Instruction::FSub > m_FSub(const LHS &L, const RHS &R)
cst_pred_ty< is_power2 > m_Power2()
Match an integer or vector power-of-2.
BinaryOp_match< cstfp_pred_ty< is_any_zero_fp >, RHS, Instruction::FSub > m_FNegNSZ(const RHS &X)
Match 'fneg X' as 'fsub +-0.0, X'.
BinaryOp_match< LHS, RHS, Instruction::URem > m_URem(const LHS &L, const RHS &R)
class_match< Constant > m_Constant()
Match an arbitrary Constant and ignore it.
BinaryOp_match< LHS, RHS, Instruction::And, true > m_c_And(const LHS &L, const RHS &R)
Matches an And with LHS and RHS in either order.
CastInst_match< OpTy, TruncInst > m_Trunc(const OpTy &Op)
Matches Trunc.
BinaryOp_match< LHS, RHS, Instruction::Xor > m_Xor(const LHS &L, const RHS &R)
specific_intval< false > m_SpecificInt(const APInt &V)
Match a specific integer value or vector with all elements equal to the value.
bool match(Val *V, const Pattern &P)
BinOpPred_match< LHS, RHS, is_idiv_op > m_IDiv(const LHS &L, const RHS &R)
Matches integer division operations.
cstfp_pred_ty< is_any_zero_fp > m_AnyZeroFP()
Match a floating-point negative zero or positive zero.
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
BinOpPred_match< LHS, RHS, is_right_shift_op > m_Shr(const LHS &L, const RHS &R)
Matches logical shift operations.
TwoOps_match< Val_t, Idx_t, Instruction::ExtractElement > m_ExtractElt(const Val_t &Val, const Idx_t &Idx)
Matches ExtractElementInst.
class_match< ConstantInt > m_ConstantInt()
Match an arbitrary ConstantInt and ignore it.
cst_pred_ty< is_one > m_One()
Match an integer 1 or a vector with all elements equal to 1.
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
cstfp_pred_ty< is_neg_zero_fp > m_NegZeroFP()
Match a floating-point negative zero.
specific_fpval m_SpecificFP(double V)
Match a specific floating point value or vector with all elements equal to the value.
match_combine_and< LTy, RTy > m_CombineAnd(const LTy &L, const RTy &R)
Combine two pattern matchers matching L && R.
MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty > m_SMin(const LHS &L, const RHS &R)
CmpClass_match< LHS, RHS, FCmpInst, FCmpInst::Predicate > m_FCmp(FCmpInst::Predicate &Pred, const LHS &L, const RHS &R)
m_Intrinsic_Ty< Opnd0 >::Ty m_Sqrt(const Opnd0 &Op0)
BinaryOp_match< LHS, RHS, Instruction::Xor, true > m_c_Xor(const LHS &L, const RHS &R)
Matches an Xor with LHS and RHS in either order.
BinaryOp_match< LHS, RHS, Instruction::Mul > m_Mul(const LHS &L, const RHS &R)
deferredval_ty< Value > m_Deferred(Value *const &V)
Like m_Specific(), but works if the specific value to match is determined as part of the same match()...
cst_pred_ty< is_zero_int > m_ZeroInt()
Match an integer 0 or a vector with all elements equal to 0.
apint_match m_APIntAllowPoison(const APInt *&Res)
Match APInt while allowing poison in splat vector constants.
CmpClass_match< LHS, RHS, ICmpInst, ICmpInst::Predicate > m_ICmp(ICmpInst::Predicate &Pred, const LHS &L, const RHS &R)
BinaryOp_match< cst_pred_ty< is_zero_int >, ValTy, Instruction::Sub > m_Neg(const ValTy &V)
Matches a 'Neg' as 'sub 0, V'.
match_combine_and< class_match< Constant >, match_unless< constantexpr_match > > m_ImmConstant()
Match an arbitrary immediate Constant and ignore it.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Shl, OverflowingBinaryOperator::NoSignedWrap > m_NSWShl(const LHS &L, const RHS &R)
CastInst_match< OpTy, ZExtInst > m_ZExt(const OpTy &Op)
Matches ZExt.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Shl, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWShl(const LHS &L, const RHS &R)
OverflowingBinaryOp_match< LHS, RHS, Instruction::Mul, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWMul(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::UDiv > m_UDiv(const LHS &L, const RHS &R)
MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty > m_UMax(const LHS &L, const RHS &R)
CmpClass_match< LHS, RHS, ICmpInst, ICmpInst::Predicate, true > m_c_ICmp(ICmpInst::Predicate &Pred, const LHS &L, const RHS &R)
Matches an ICmp with a predicate over LHS and RHS in either order.
cst_pred_ty< custom_checkfn< APInt > > m_CheckedInt(function_ref< bool(const APInt &)> CheckFn)
Match an integer or vector where CheckFn(ele) for each element is true.
specific_fpval m_FPOne()
Match a float 1.0 or vector with all elements equal to 1.0.
BinaryOp_match< LHS, RHS, Instruction::Add, true > m_c_Add(const LHS &L, const RHS &R)
Matches a Add with LHS and RHS in either order.
apfloat_match m_APFloatAllowPoison(const APFloat *&Res)
Match APFloat while allowing poison in splat vector constants.
CastInst_match< OpTy, UIToFPInst > m_UIToFP(const OpTy &Op)
m_Intrinsic_Ty< Opnd0, Opnd1, Opnd2 >::Ty m_FShl(const Opnd0 &Op0, const Opnd1 &Op1, const Opnd2 &Op2)
match_combine_or< match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty, true >, MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty, true > >, match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty, true >, MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty, true > > > m_c_MaxOrMin(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::SDiv > m_SDiv(const LHS &L, const RHS &R)
MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty > m_SMax(const LHS &L, const RHS &R)
apint_match m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
AnyBinaryOp_match< LHS, RHS, true > m_c_BinOp(const LHS &L, const RHS &R)
Matches a BinaryOperator with LHS and RHS in either order.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoSignedWrap > m_NSWAdd(const LHS &L, const RHS &R)
CastInst_match< OpTy, SIToFPInst > m_SIToFP(const OpTy &Op)
BinaryOp_match< LHS, RHS, Instruction::LShr > m_LShr(const LHS &L, const RHS &R)
Exact_match< T > m_Exact(const T &SubPattern)
FNeg_match< OpTy > m_FNeg(const OpTy &X)
Match 'fneg X' as 'fsub -0.0, X'.
cstfp_pred_ty< is_pos_zero_fp > m_PosZeroFP()
Match a floating-point positive zero.
BinaryOp_match< LHS, RHS, Instruction::FAdd, true > m_c_FAdd(const LHS &L, const RHS &R)
Matches FAdd with LHS and RHS in either order.
LogicalOp_match< LHS, RHS, Instruction::And, true > m_c_LogicalAnd(const LHS &L, const RHS &R)
Matches L && R with LHS and RHS in either order.
BinaryOp_match< LHS, RHS, Instruction::Shl > m_Shl(const LHS &L, const RHS &R)
m_Intrinsic_Ty< Opnd0 >::Ty m_VecReverse(const Opnd0 &Op0)
apfloat_match m_APFloat(const APFloat *&Res)
Match a ConstantFP or splatted ConstantVector, binding the specified pointer to the contained APFloat...
match_combine_or< match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty >, MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty > >, match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty >, MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty > > > m_MaxOrMin(const LHS &L, const RHS &R)
m_Intrinsic_Ty< Opnd0, Opnd1, Opnd2 >::Ty m_FShr(const Opnd0 &Op0, const Opnd1 &Op1, const Opnd2 &Op2)
BinaryOp_match< LHS, RHS, Instruction::SRem > m_SRem(const LHS &L, const RHS &R)
auto m_Undef()
Match an arbitrary undef constant.
cstfp_pred_ty< is_nan > m_NaN()
Match an arbitrary NaN constant.
BinaryOp_match< cst_pred_ty< is_all_ones >, ValTy, Instruction::Xor, true > m_Not(const ValTy &V)
Matches a 'Not' as 'xor V, -1' or 'xor -1, V'.
BinaryOp_match< LHS, RHS, Instruction::Or > m_Or(const LHS &L, const RHS &R)
m_Intrinsic_Ty< Opnd0 >::Ty m_BSwap(const Opnd0 &Op0)
CastInst_match< OpTy, SExtInst > m_SExt(const OpTy &Op)
Matches SExt.
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
BinaryOp_match< LHS, RHS, Instruction::Or, true > m_c_Or(const LHS &L, const RHS &R)
Matches an Or with LHS and RHS in either order.
LogicalOp_match< LHS, RHS, Instruction::Or, true > m_c_LogicalOr(const LHS &L, const RHS &R)
Matches L || R with LHS and RHS in either order.
ThreeOps_match< Val_t, Elt_t, Idx_t, Instruction::InsertElement > m_InsertElt(const Val_t &Val, const Elt_t &Elt, const Idx_t &Idx)
Matches InsertElementInst.
ElementWiseBitCast_match< OpTy > m_ElementWiseBitCast(const OpTy &Op)
BinaryOp_match< LHS, RHS, Instruction::Mul, true > m_c_Mul(const LHS &L, const RHS &R)
Matches a Mul with LHS and RHS in either order.
CastOperator_match< OpTy, Instruction::PtrToInt > m_PtrToInt(const OpTy &Op)
Matches PtrToInt.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Mul, OverflowingBinaryOperator::NoSignedWrap > m_NSWMul(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::Sub > m_Sub(const LHS &L, const RHS &R)
MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty > m_UMin(const LHS &L, const RHS &R)
match_combine_or< LTy, RTy > m_CombineOr(const LTy &L, const RTy &R)
Combine two pattern matchers matching L || R.
ExceptionBehavior
Exception behavior used for floating point operations.
@ ebStrict
This corresponds to "fpexcept.strict".
@ ebIgnore
This corresponds to "fpexcept.ignore".
This is an optimization pass for GlobalISel generic memory operations.
Intrinsic::ID getInverseMinMaxIntrinsic(Intrinsic::ID MinMaxID)
Value * simplifyAShrInst(Value *Op0, Value *Op1, bool IsExact, const SimplifyQuery &Q)
Given operands for a AShr, fold the result or return nulll.
unsigned Log2_32_Ceil(uint32_t Value)
Return the ceil log base 2 of the specified value, 32 if the value is zero.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Value * simplifyFMulInst(Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
Given operands for an FMul, fold the result or return null.
Value * simplifyGEPInst(Type *SrcTy, Value *Ptr, ArrayRef< Value * > Indices, GEPNoWrapFlags NW, const SimplifyQuery &Q)
Given operands for a GetElementPtrInst, fold the result or return null.
bool isValidAssumeForContext(const Instruction *I, const Instruction *CxtI, const DominatorTree *DT=nullptr, bool AllowEphemerals=false)
Return true if it is valid to use the assumptions provided by an assume intrinsic,...
bool canCreatePoison(const Operator *Op, bool ConsiderFlagsAndMetadata=true)
Constant * ConstantFoldSelectInstruction(Constant *Cond, Constant *V1, Constant *V2)
Attempt to constant fold a select instruction with the specified operands.
Value * simplifyFreezeInst(Value *Op, const SimplifyQuery &Q)
Given an operand for a Freeze, see if we can fold the result.
bool MaskedValueIsZero(const Value *V, const APInt &Mask, const SimplifyQuery &DL, unsigned Depth=0)
Return true if 'V & Mask' is known to be zero.
Constant * ConstantFoldFPInstOperands(unsigned Opcode, Constant *LHS, Constant *RHS, const DataLayout &DL, const Instruction *I, bool AllowNonDeterministic=true)
Attempt to constant fold a floating point binary operation with the specified operands,...
bool isSignBitCheck(ICmpInst::Predicate Pred, const APInt &RHS, bool &TrueIfSigned)
Given an exploded icmp instruction, return true if the comparison only checks the sign bit.
bool canConstantFoldCallTo(const CallBase *Call, const Function *F)
canConstantFoldCallTo - Return true if its even possible to fold a call to the specified function.
APInt getMinMaxLimit(SelectPatternFlavor SPF, unsigned BitWidth)
Return the minimum or maximum constant value for the specified integer min/max flavor and type.
Value * simplifySDivInst(Value *LHS, Value *RHS, bool IsExact, const SimplifyQuery &Q)
Given operands for an SDiv, fold the result or return null.
Value * simplifyUnOp(unsigned Opcode, Value *Op, const SimplifyQuery &Q)
Given operand for a UnaryOperator, fold the result or return null.
bool isDefaultFPEnvironment(fp::ExceptionBehavior EB, RoundingMode RM)
Returns true if the exception handling behavior and rounding mode match what is used in the default f...
Value * simplifyMulInst(Value *LHS, Value *RHS, bool IsNSW, bool IsNUW, const SimplifyQuery &Q)
Given operands for a Mul, fold the result or return null.
bool IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV, APInt &Offset, const DataLayout &DL, DSOLocalEquivalent **DSOEquiv=nullptr)
If this constant is a constant offset from a global, return the global and the constant.
Value * simplifyInstructionWithOperands(Instruction *I, ArrayRef< Value * > NewOps, const SimplifyQuery &Q)
Like simplifyInstruction but the operands of I are replaced with NewOps.
const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=6)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
Value * simplifyCall(CallBase *Call, Value *Callee, ArrayRef< Value * > Args, const SimplifyQuery &Q)
Given a callsite, callee, and arguments, fold the result or return null.
Constant * ConstantFoldCompareInstOperands(unsigned Predicate, Constant *LHS, Constant *RHS, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const Instruction *I=nullptr)
Attempt to constant fold a compare instruction (icmp/fcmp) with the specified operands.
bool isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL, bool OrZero=false, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return true if the given value is known to have exactly one bit set when defined.
bool canRoundingModeBe(RoundingMode RM, RoundingMode QRM)
Returns true if the rounding mode RM may be QRM at compile time or at run time.
bool isNoAliasCall(const Value *V)
Return true if this pointer is returned by a noalias function.
Value * getSplatValue(const Value *V)
Get splat value if the input is a splat vector or return nullptr.
Constant * ConstantFoldGetElementPtr(Type *Ty, Constant *C, std::optional< ConstantRange > InRange, ArrayRef< Value * > Idxs)
CmpInst::Predicate getMinMaxPred(SelectPatternFlavor SPF, bool Ordered=false)
Return the canonical comparison predicate for the specified minimum/maximum flavor.
Value * simplifyShuffleVectorInst(Value *Op0, Value *Op1, ArrayRef< int > Mask, Type *RetTy, const SimplifyQuery &Q)
Given operands for a ShuffleVectorInst, fold the result or return null.
Constant * ConstantFoldCall(const CallBase *Call, Function *F, ArrayRef< Constant * > Operands, const TargetLibraryInfo *TLI=nullptr, bool AllowNonDeterministic=true)
ConstantFoldCall - Attempt to constant fold a call to the specified function with the specified argum...
Value * simplifyOrInst(Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for an Or, fold the result or return null.
Value * simplifyXorInst(Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for an Xor, fold the result or return null.
ConstantRange getConstantRangeFromMetadata(const MDNode &RangeMD)
Parse out a conservative ConstantRange from !range metadata.
ConstantRange computeConstantRange(const Value *V, bool ForSigned, bool UseInstrInfo=true, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Determine the possible constant range of an integer or vector of integer value.
Constant * ConstantFoldExtractValueInstruction(Constant *Agg, ArrayRef< unsigned > Idxs)
Attempt to constant fold an extractvalue instruction with the specified operands and indices.
bool isAllocLikeFn(const Value *V, const TargetLibraryInfo *TLI)
Tests if a value is a call or invoke to a library function that allocates memory (either malloc,...
Value * simplifyCastInst(unsigned CastOpc, Value *Op, Type *Ty, const SimplifyQuery &Q)
Given operands for a CastInst, fold the result or return null.
Value * simplifyInstruction(Instruction *I, const SimplifyQuery &Q)
See if we can compute a simplified version of this instruction.
unsigned M1(unsigned Val)
Value * simplifySubInst(Value *LHS, Value *RHS, bool IsNSW, bool IsNUW, const SimplifyQuery &Q)
Given operands for a Sub, fold the result or return null.
Value * simplifyAddInst(Value *LHS, Value *RHS, bool IsNSW, bool IsNUW, const SimplifyQuery &Q)
Given operands for an Add, fold the result or return null.
Constant * ConstantFoldConstant(const Constant *C, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldConstant - Fold the constant using the specified DataLayout.
OutputIt transform(R &&Range, OutputIt d_first, UnaryFunction F)
Wrapper function around std::transform to apply a function to a range and store the result elsewhere.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
bool getObjectSize(const Value *Ptr, uint64_t &Size, const DataLayout &DL, const TargetLibraryInfo *TLI, ObjectSizeOpts Opts={})
Compute the size of the object pointed by Ptr.
bool isSplatValue(const Value *V, int Index=-1, unsigned Depth=0)
Return true if each element of the vector value V is poisoned or equal to every other non-poisoned el...
Constant * ConstantFoldLoadFromUniformValue(Constant *C, Type *Ty, const DataLayout &DL)
If C is a uniform value where all bits are the same (either all zero, all ones, all undef or all pois...
SelectPatternFlavor getInverseMinMaxFlavor(SelectPatternFlavor SPF)
Return the inverse minimum/maximum flavor of the specified flavor.
bool replaceAndRecursivelySimplify(Instruction *I, Value *SimpleV, const TargetLibraryInfo *TLI=nullptr, const DominatorTree *DT=nullptr, AssumptionCache *AC=nullptr, SmallSetVector< Instruction *, 8 > *UnsimplifiedUsers=nullptr)
Replace all uses of 'I' with 'SimpleV' and simplify the uses recursively.
Constant * ConstantFoldUnaryOpOperand(unsigned Opcode, Constant *Op, const DataLayout &DL)
Attempt to constant fold a unary operation with the specified operand.
SelectPatternFlavor
Specific patterns of select instructions we can match.
Value * simplifyShlInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW, const SimplifyQuery &Q)
Given operands for a Shl, fold the result or return null.
Value * simplifyFNegInst(Value *Op, FastMathFlags FMF, const SimplifyQuery &Q)
Given operand for an FNeg, fold the result or return null.
Value * simplifyFSubInst(Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
Given operands for an FSub, fold the result or return null.
bool impliesPoison(const Value *ValAssumedPoison, const Value *V)
Return true if V is poison given that ValAssumedPoison is already poison.
Value * simplifyFRemInst(Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
Given operands for an FRem, fold the result or return null.
Value * simplifyFAddInst(Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
Given operands for an FAdd, fold the result or return null.
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
bool PointerMayBeCaptured(const Value *V, bool ReturnCaptures, bool StoreCaptures, unsigned MaxUsesToExplore=0)
PointerMayBeCaptured - Return true if this pointer value may be captured by the enclosing function (w...
Value * simplifyLShrInst(Value *Op0, Value *Op1, bool IsExact, const SimplifyQuery &Q)
Given operands for a LShr, fold the result or return null.
bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
ConstantRange getVScaleRange(const Function *F, unsigned BitWidth)
Determine the possible constant range of vscale with the given bit width, based on the vscale_range f...
Constant * ConstantFoldInstOperands(Instruction *I, ArrayRef< Constant * > Ops, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, bool AllowNonDeterministic=true)
ConstantFoldInstOperands - Attempt to constant fold an instruction with the specified operands.
Constant * ConstantFoldCastOperand(unsigned Opcode, Constant *C, Type *DestTy, const DataLayout &DL)
Attempt to constant fold a cast with the specified operand.
Value * simplifyAndInst(Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for an And, fold the result or return null.
Value * simplifyExtractValueInst(Value *Agg, ArrayRef< unsigned > Idxs, const SimplifyQuery &Q)
Given operands for an ExtractValueInst, fold the result or return null.
Value * simplifyInsertValueInst(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const SimplifyQuery &Q)
Given operands for an InsertValueInst, fold the result or return null.
Constant * ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS, Constant *RHS, const DataLayout &DL)
Attempt to constant fold a binary operation with the specified operands.
Value * simplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for an ICmpInst, fold the result or return null.
Value * simplifyFDivInst(Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
Given operands for an FDiv, fold the result or return null.
bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
constexpr int PoisonMaskElem
Value * simplifyLoadInst(LoadInst *LI, Value *PtrOp, const SimplifyQuery &Q)
Given a load instruction and its pointer operand, fold the result or return null.
Value * simplifyFMAFMul(Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
Given operands for the multiplication of a FMA, fold the result or return null.
Value * simplifyConstrainedFPCall(CallBase *Call, const SimplifyQuery &Q)
Given a constrained FP intrinsic call, tries to compute its simplified version.
Value * simplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for a BinaryOperator, fold the result or return null.
bool isKnownNonEqual(const Value *V1, const Value *V2, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return true if the given values are known to be non-equal when defined.
@ Or
Bitwise or logical OR of integers.
Value * simplifyCmpInst(unsigned Predicate, Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for a CmpInst, fold the result or return null.
Value * findScalarElement(Value *V, unsigned EltNo)
Given a vector and an element number, see if the scalar value is already around as a register,...
void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
Value * simplifyUDivInst(Value *LHS, Value *RHS, bool IsExact, const SimplifyQuery &Q)
Given operands for a UDiv, fold the result or return null.
Value * simplifyBinaryIntrinsic(Intrinsic::ID IID, Type *ReturnType, Value *Op0, Value *Op1, const SimplifyQuery &Q, const CallBase *Call)
Given operands for a BinaryIntrinsic, fold the result or return null.
RoundingMode
Rounding mode.
bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
unsigned M0(unsigned Val)
Value * simplifyInsertElementInst(Value *Vec, Value *Elt, Value *Idx, const SimplifyQuery &Q)
Given operands for an InsertElement, fold the result or return null.
constexpr unsigned BitWidth
SelectPatternResult matchDecomposedSelectPattern(CmpInst *CmpI, Value *TrueVal, Value *FalseVal, Value *&LHS, Value *&RHS, Instruction::CastOps *CastOp=nullptr, unsigned Depth=0)
Determine the pattern that a select with the given compare as its predicate and given values as its t...
Value * simplifyWithOpReplaced(Value *V, Value *Op, Value *RepOp, const SimplifyQuery &Q, bool AllowRefinement, SmallVectorImpl< Instruction * > *DropFlags=nullptr)
See if V simplifies when its operand Op is replaced with RepOp.
bool maskIsAllZeroOrUndef(Value *Mask)
Given a mask vector of i1, Return true if all of the elements of this predicate mask are known to be ...
std::pair< Value *, FPClassTest > fcmpToClassTest(CmpInst::Predicate Pred, const Function &F, Value *LHS, Value *RHS, bool LookThroughSrc=true)
Returns a pair of values, which if passed to llvm.is.fpclass, returns the same result as an fcmp with...
Value * simplifySRemInst(Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for an SRem, fold the result or return null.
void getUnderlyingObjects(const Value *V, SmallVectorImpl< const Value * > &Objects, const LoopInfo *LI=nullptr, unsigned MaxLookup=6)
This method is similar to getUnderlyingObject except that it can look through phi and select instruct...
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
std::optional< bool > computeKnownFPSignBit(const Value *V, unsigned Depth, const SimplifyQuery &SQ)
Return false if we can prove that the specified FP value's sign bit is 0.
unsigned ComputeNumSignBits(const Value *Op, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return the number of times the sign bit of the register is replicated into the other bits.
bool decomposeBitTestICmp(Value *LHS, Value *RHS, CmpInst::Predicate &Pred, Value *&X, APInt &Mask, bool LookThroughTrunc=true)
Decompose an icmp into the form ((X & Mask) pred 0) if possible.
bool cannotBeNegativeZero(const Value *V, unsigned Depth, const SimplifyQuery &SQ)
Return true if we can prove that the specified FP value is never equal to -0.0.
bool all_equal(std::initializer_list< T > Values)
Returns true if all Values in the initializer lists are equal or the list.
Constant * ConstantFoldInsertValueInstruction(Constant *Agg, Constant *Val, ArrayRef< unsigned > Idxs)
ConstantFoldInsertValueInstruction - Attempt to constant fold an insertvalue instruction with the spe...
Constant * ConstantFoldLoadFromConstPtr(Constant *C, Type *Ty, APInt Offset, const DataLayout &DL)
Return the value that a load from C with offset Offset would produce if it is constant and determinab...
std::optional< bool > isImpliedByDomCondition(const Value *Cond, const Instruction *ContextI, const DataLayout &DL)
Return the boolean condition value in the context of the given instruction if it is known based on do...
bool isGuaranteedNotToBePoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Returns true if V cannot be poison, but may be undef.
KnownFPClass computeKnownFPClass(const Value *V, const APInt &DemandedElts, FPClassTest InterestedClasses, unsigned Depth, const SimplifyQuery &SQ)
Determine which floating-point classes are valid for V, and return them in KnownFPClass bit sets.
bool isKnownNegation(const Value *X, const Value *Y, bool NeedNSW=false, bool AllowPoison=true)
Return true if the two given values are negation.
Constant * ConstantFoldIntegerCast(Constant *C, Type *DestTy, bool IsSigned, const DataLayout &DL)
Constant fold a zext, sext or trunc, depending on IsSigned and whether the DestTy is wider or narrowe...
const SimplifyQuery getBestSimplifyQuery(Pass &, Function &)
Value * simplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q)
Given operands for an FCmpInst, fold the result or return null.
bool isCheckForZeroAndMulWithOverflow(Value *Op0, Value *Op1, bool IsAnd, Use *&Y)
Match one of the patterns up to the select/logic op: Op0 = icmp ne i4 X, 0 Agg = call { i4,...
bool canIgnoreSNaN(fp::ExceptionBehavior EB, FastMathFlags FMF)
Returns true if the possibility of a signaling NaN can be safely ignored.
Value * simplifyURemInst(Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for a URem, fold the result or return null.
Value * simplifyExtractElementInst(Value *Vec, Value *Idx, const SimplifyQuery &Q)
Given operands for an ExtractElementInst, fold the result or return null.
Value * simplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal, const SimplifyQuery &Q)
Given operands for a SelectInst, fold the result or return null.
std::optional< bool > isImpliedCondition(const Value *LHS, const Value *RHS, const DataLayout &DL, bool LHSIsTrue=true, unsigned Depth=0)
Return true if RHS is known to be implied true by LHS.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
This callback is used in conjunction with PointerMayBeCaptured.
virtual void tooManyUses()=0
tooManyUses - The depth of traversal has breached a limit.
virtual bool captured(const Use *U)=0
captured - Information about the pointer was captured by the user of use U.
Incoming for lane maks phi as machine instruction, incoming register Reg and incoming block Block are...
InstrInfoQuery provides an interface to query additional information for instructions like metadata o...
bool isExact(const BinaryOperator *Op) const
MDNode * getMetadata(const Instruction *I, unsigned KindID) const
bool hasNoSignedWrap(const InstT *Op) const
bool hasNoUnsignedWrap(const InstT *Op) const
bool isNonNegative() const
Returns true if this value is known to be non-negative.
bool isZero() const
Returns true if value is all zero.
unsigned countMinTrailingZeros() const
Returns the minimum number of trailing zero bits.
unsigned countMaxTrailingZeros() const
Returns the maximum number of trailing zero bits possible.
bool hasConflict() const
Returns true if there is conflicting information.
unsigned getBitWidth() const
Get the bit width of this value.
unsigned countMaxActiveBits() const
Returns the maximum number of bits needed to represent all possible unsigned values with these known ...
unsigned countMinLeadingZeros() const
Returns the minimum number of leading zero bits.
APInt getMaxValue() const
Return the maximal unsigned value possible given these KnownBits.
APInt getMinValue() const
Return the minimal unsigned value possible given these KnownBits.
bool isNegative() const
Returns true if this value is known to be negative.
static KnownBits shl(const KnownBits &LHS, const KnownBits &RHS, bool NUW=false, bool NSW=false, bool ShAmtNonZero=false)
Compute known bits for shl(LHS, RHS).
bool isKnownAlwaysNaN() const
Return true if it's known this must always be a nan.
static constexpr FPClassTest OrderedLessThanZeroMask
std::optional< bool > SignBit
std::nullopt if the sign bit is unknown, true if the sign bit is definitely set or false if the sign ...
bool isKnownNeverNaN() const
Return true if it's known this can never be a nan.
bool isKnownNever(FPClassTest Mask) const
Return true if it's known this can never be one of the mask entries.
bool cannotBeOrderedLessThanZero() const
Return true if we can prove that the analyzed floating-point value is either NaN or never less than -...
The adaptor from a function pass to a loop pass computes these analyses and makes them available to t...
Various options to control the behavior of getObjectSize.
bool NullIsUnknownSize
If this is true, null pointers in address space 0 will be treated as though they can't be evaluated.
Mode EvalMode
How we want to evaluate this object's size.
SelectPatternFlavor Flavor
static bool isMinOrMax(SelectPatternFlavor SPF)
When implementing this min/max pattern as fcmp; select, does the fcmp have to be ordered?
bool CanUseUndef
Controls whether simplifications are allowed to constrain the range of possible values for uses of un...
SimplifyQuery getWithInstruction(const Instruction *I) const
bool isUndefValue(Value *V) const
If CanUseUndef is true, returns whether V is undef.
const TargetLibraryInfo * TLI
SimplifyQuery getWithoutUndef() const