50#define DEBUG_TYPE "instsimplify"
94 CmpInst *Cmp = dyn_cast<CmpInst>(V);
98 Value *CLHS = Cmp->getOperand(0), *CRHS = Cmp->getOperand(1);
99 if (CPred == Pred && CLHS ==
LHS && CRHS ==
RHS)
112 unsigned MaxRecurse,
Constant *TrueOrFalse) {
114 if (SimplifiedCmp ==
Cond) {
122 return SimplifiedCmp;
128 unsigned MaxRecurse) {
136 unsigned MaxRecurse) {
146 unsigned MaxRecurse) {
183 if (
I->getParent()->isEntryBlock() && !isa<InvokeInst>(
I) &&
196 auto *
B = dyn_cast<BinaryOperator>(V);
197 if (!
B ||
B->getOpcode() != OpcodeToExpand)
199 Value *B0 =
B->getOperand(0), *B1 =
B->getOperand(1);
210 if ((L == B0 && R == B1) ||
231 unsigned MaxRecurse) {
248 unsigned MaxRecurse) {
351 unsigned MaxRecurse) {
357 if (isa<SelectInst>(
LHS)) {
358 SI = cast<SelectInst>(
LHS);
360 assert(isa<SelectInst>(
RHS) &&
"No select instruction operand!");
361 SI = cast<SelectInst>(
RHS);
388 if (TV == SI->getTrueValue() && FV == SI->getFalseValue())
394 if ((FV && !TV) || (TV && !FV)) {
397 Instruction *Simplified = dyn_cast<Instruction>(FV ? FV : TV);
398 if (Simplified && Simplified->getOpcode() ==
unsigned(Opcode) &&
399 !Simplified->hasPoisonGeneratingFlags()) {
403 Value *UnsimplifiedBranch = FV ? SI->getTrueValue() : SI->getFalseValue();
404 Value *UnsimplifiedLHS = SI ==
LHS ? UnsimplifiedBranch :
LHS;
405 Value *UnsimplifiedRHS = SI ==
LHS ?
RHS : UnsimplifiedBranch;
406 if (Simplified->getOperand(0) == UnsimplifiedLHS &&
407 Simplified->getOperand(1) == UnsimplifiedRHS)
409 if (Simplified->isCommutative() &&
410 Simplified->getOperand(1) == UnsimplifiedLHS &&
411 Simplified->getOperand(0) == UnsimplifiedRHS)
435 if (!isa<SelectInst>(
LHS)) {
439 assert(isa<SelectInst>(
LHS) &&
"Not comparing with a select instruction!");
442 Value *TV = SI->getTrueValue();
443 Value *FV = SI->getFalseValue();
475 unsigned MaxRecurse) {
481 if (isa<PHINode>(
LHS)) {
482 PI = cast<PHINode>(
LHS);
487 assert(isa<PHINode>(
RHS) &&
"No PHI instruction operand!");
488 PI = cast<PHINode>(
RHS);
495 Value *CommonValue =
nullptr;
508 if (!V || (CommonValue && V != CommonValue))
527 if (!isa<PHINode>(
LHS)) {
531 assert(isa<PHINode>(
LHS) &&
"Not comparing with a phi instruction!");
539 Value *CommonValue =
nullptr;
553 if (!V || (CommonValue && V != CommonValue))
564 if (
auto *CLHS = dyn_cast<Constant>(Op0)) {
565 if (
auto *CRHS = dyn_cast<Constant>(Op1)) {
569 case Instruction::FAdd:
570 case Instruction::FSub:
571 case Instruction::FMul:
572 case Instruction::FDiv:
573 case Instruction::FRem:
574 if (Q.
CxtI !=
nullptr)
595 if (isa<PoisonValue>(Op1))
658 return ::simplifyAddInst(Op0, Op1, IsNSW, IsNUW, Query,
RecursionLimit);
671 bool AllowNonInbounds =
false) {
672 assert(V->getType()->isPtrOrPtrVectorTy());
675 V = V->stripAndAccumulateConstantOffsets(
DL,
Offset, AllowNonInbounds);
678 return Offset.sextOrTrunc(
DL.getIndexTypeSizeInBits(V->getType()));
698 if (
auto *VecTy = dyn_cast<VectorType>(
LHS->
getType()))
713 std::optional<bool> Imp =
718 case Instruction::Sub:
719 case Instruction::Xor:
720 case Instruction::URem:
721 case Instruction::SRem:
724 case Instruction::SDiv:
725 case Instruction::UDiv:
726 return ConstantInt::get(Ty, 1);
728 case Instruction::And:
729 case Instruction::Or:
748 if (isa<PoisonValue>(Op0) || isa<PoisonValue>(Op1))
784 Value *
X =
nullptr, *
Y =
nullptr, *Z = Op1;
842 if (
X->getType() ==
Y->getType())
879 return ::simplifySubInst(Op0, Op1, IsNSW, IsNUW, Q,
RecursionLimit);
890 if (isa<PoisonValue>(Op1))
914 return ConstantInt::getNullValue(Op0->
getType());
929 Instruction::Add, Q, MaxRecurse))
934 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
941 if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
951 return ::simplifyMulInst(Op0, Op1, IsNSW, IsNUW, Q,
RecursionLimit);
960 Constant *
C = dyn_cast_or_null<Constant>(V);
961 return (
C &&
C->isAllOnesValue());
967 unsigned MaxRecurse,
bool IsSigned) {
984 Type *Ty =
X->getType();
990 Constant *PosDividendC = ConstantInt::get(Ty,
C->abs());
991 Constant *NegDividendC = ConstantInt::get(Ty, -
C->abs());
1000 if (
C->isMinSignedValue())
1006 Constant *PosDivisorC = ConstantInt::get(Ty,
C->abs());
1007 Constant *NegDivisorC = ConstantInt::get(Ty, -
C->abs());
1027 return isICmpTrue(ICmpInst::ICMP_ULT,
X,
Y, Q, MaxRecurse);
1034 unsigned MaxRecurse) {
1035 bool IsDiv = (Opcode == Instruction::SDiv || Opcode == Instruction::UDiv);
1036 bool IsSigned = (Opcode == Instruction::SDiv || Opcode == Instruction::SRem);
1053 if (isa<PoisonValue>(Op0))
1093 auto *
Mul = cast<OverflowingBinaryOperator>(Op0);
1104 if (
isDivZero(Op0, Op1, Q, MaxRecurse, IsSigned))
1112 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
1118 if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
1128 unsigned MaxRecurse) {
1151 (Opcode == Instruction::UDiv
1171 if ((Opcode == Instruction::SRem &&
1173 (Opcode == Instruction::URem &&
1181 if (Opcode == Instruction::SRem
1184 return C.srem(*C0).isZero();
1188 return C.urem(*C0).isZero();
1204 return simplifyDiv(Instruction::SDiv, Op0, Op1, IsExact, Q, MaxRecurse);
1216 return simplifyDiv(Instruction::UDiv, Op0, Op1, IsExact, Q, MaxRecurse);
1227 unsigned MaxRecurse) {
1232 return ConstantInt::getNullValue(Op0->
getType());
1236 return ConstantInt::getNullValue(Op0->
getType());
1238 return simplifyRem(Instruction::SRem, Op0, Op1, Q, MaxRecurse);
1248 unsigned MaxRecurse) {
1249 return simplifyRem(Instruction::URem, Op0, Op1, Q, MaxRecurse);
1258 Constant *
C = dyn_cast<Constant>(Amount);
1268 const APInt *AmountC;
1274 if (isa<ConstantVector>(
C) || isa<ConstantDataVector>(
C)) {
1275 for (
unsigned I = 0,
1276 E = cast<FixedVectorType>(
C->getType())->getNumElements();
1290 unsigned MaxRecurse) {
1295 if (isa<PoisonValue>(Op0))
1316 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
1322 if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
1340 assert(Opcode == Instruction::Shl &&
"Expected shl for nsw instruction");
1359 Value *Op1,
bool IsExact,
1378 if (Op0Known.
One[0])
1390 simplifyShift(Instruction::Shl, Op0, Op1, IsNSW, Q, MaxRecurse))
1414 if (IsNSW && IsNUW &&
1423 return ::simplifyShlInst(Op0, Op1, IsNSW, IsNUW, Q,
RecursionLimit);
1445 const APInt *ShRAmt, *ShLAmt;
1448 *ShRAmt == *ShLAmt) {
1451 if (ShRAmt->
uge(EffWidthY))
1499 ICmpInst *UnsignedICmp,
bool IsAnd,
1513 if (
match(UnsignedICmp,
1515 ICmpInst::isUnsigned(UnsignedPred)) {
1517 if ((UnsignedPred == ICmpInst::ICMP_UGE ||
1518 UnsignedPred == ICmpInst::ICMP_ULE) &&
1519 EqPred == ICmpInst::ICMP_NE && !IsAnd)
1522 if ((UnsignedPred == ICmpInst::ICMP_ULT ||
1523 UnsignedPred == ICmpInst::ICMP_UGT) &&
1524 EqPred == ICmpInst::ICMP_EQ && IsAnd)
1529 if (EqPred == ICmpInst::ICMP_NE && (UnsignedPred == ICmpInst::ICMP_ULT ||
1530 UnsignedPred == ICmpInst::ICMP_UGT))
1531 return IsAnd ? UnsignedICmp : ZeroICmp;
1535 if (EqPred == ICmpInst::ICMP_EQ && (UnsignedPred == ICmpInst::ICMP_ULE ||
1536 UnsignedPred == ICmpInst::ICMP_UGE))
1537 return IsAnd ? ZeroICmp : UnsignedICmp;
1543 if (
match(UnsignedICmp,
1545 if (UnsignedPred == ICmpInst::ICMP_UGE && IsAnd &&
1547 return UnsignedICmp;
1548 if (UnsignedPred == ICmpInst::ICMP_ULT && !IsAnd &&
1550 return UnsignedICmp;
1555 ICmpInst::isUnsigned(UnsignedPred))
1557 else if (
match(UnsignedICmp,
1559 ICmpInst::isUnsigned(UnsignedPred))
1560 UnsignedPred = ICmpInst::getSwappedPredicate(UnsignedPred);
1566 if (UnsignedPred == ICmpInst::ICMP_UGT && EqPred == ICmpInst::ICMP_EQ &&
1568 return IsAnd ? ZeroICmp : UnsignedICmp;
1572 if (UnsignedPred == ICmpInst::ICMP_ULE && EqPred == ICmpInst::ICMP_NE &&
1574 return IsAnd ? UnsignedICmp : ZeroICmp;
1583 if (UnsignedPred == ICmpInst::ICMP_ULT && EqPred == ICmpInst::ICMP_NE)
1584 return IsAnd ? UnsignedICmp : ZeroICmp;
1588 if (UnsignedPred == ICmpInst::ICMP_UGE && EqPred == ICmpInst::ICMP_EQ)
1589 return IsAnd ? ZeroICmp : UnsignedICmp;
1592 if (UnsignedPred == ICmpInst::ICMP_ULT && EqPred == ICmpInst::ICMP_EQ &&
1597 if (UnsignedPred == ICmpInst::ICMP_UGE && EqPred == ICmpInst::ICMP_NE &&
1613 const APInt *C0, *C1;
1623 if (IsAnd && Range0.intersectWith(Range1).isEmptySet())
1628 if (!IsAnd && Range0.unionWith(Range1).isFullSet())
1636 if (Range0.contains(Range1))
1637 return IsAnd ? Cmp1 : Cmp0;
1638 if (Range1.contains(Range0))
1639 return IsAnd ? Cmp0 : Cmp1;
1648 const APInt *C0, *C1;
1656 auto *AddInst = cast<OverflowingBinaryOperator>(Op0->
getOperand(0));
1657 if (AddInst->getOperand(1) != Op1->
getOperand(1))
1664 const APInt Delta = *C1 - *C0;
1667 if (Pred0 == ICmpInst::ICMP_ULT && Pred1 == ICmpInst::ICMP_SGT)
1669 if (Pred0 == ICmpInst::ICMP_SLT && Pred1 == ICmpInst::ICMP_SGT && IsNSW)
1673 if (Pred0 == ICmpInst::ICMP_ULE && Pred1 == ICmpInst::ICMP_SGT)
1675 if (Pred0 == ICmpInst::ICMP_SLE && Pred1 == ICmpInst::ICMP_SGT && IsNSW)
1681 if (Pred0 == ICmpInst::ICMP_ULT && Pred1 == ICmpInst::ICMP_UGT)
1684 if (Pred0 == ICmpInst::ICMP_ULE && Pred1 == ICmpInst::ICMP_UGT)
1703 if (!IsAnd && Pred0 == ICmpInst::ICMP_EQ && Pred1 == ICmpInst::ICMP_NE)
1706 if (IsAnd && Pred0 == ICmpInst::ICMP_NE && Pred1 == ICmpInst::ICMP_EQ)
1739 const APInt *C0, *C1;
1747 auto *AddInst = cast<BinaryOperator>(Op0->
getOperand(0));
1748 if (AddInst->getOperand(1) != Op1->
getOperand(1))
1755 const APInt Delta = *C1 - *C0;
1758 if (Pred0 == ICmpInst::ICMP_UGE && Pred1 == ICmpInst::ICMP_SLE)
1760 if (Pred0 == ICmpInst::ICMP_SGE && Pred1 == ICmpInst::ICMP_SLE && IsNSW)
1764 if (Pred0 == ICmpInst::ICMP_UGT && Pred1 == ICmpInst::ICMP_SLE)
1766 if (Pred0 == ICmpInst::ICMP_SGT && Pred1 == ICmpInst::ICMP_SLE && IsNSW)
1772 if (Pred0 == ICmpInst::ICMP_UGE && Pred1 == ICmpInst::ICMP_ULE)
1775 if (Pred0 == ICmpInst::ICMP_UGT && Pred1 == ICmpInst::ICMP_ULE)
1807 Value *LHS0 =
LHS->getOperand(0), *LHS1 =
LHS->getOperand(1);
1808 Value *RHS0 =
RHS->getOperand(0), *RHS1 =
RHS->getOperand(1);
1814 if ((PredL == FCmpInst::FCMP_ORD || PredL == FCmpInst::FCMP_UNO) &&
1815 ((FCmpInst::isOrdered(PredR) && IsAnd) ||
1816 (FCmpInst::isUnordered(PredR) && !IsAnd))) {
1821 if ((
match(RHS0, AbsOrSelfLHS0) ||
match(RHS1, AbsOrSelfLHS0)) &&
1823 return FCmpInst::isOrdered(PredL) == FCmpInst::isOrdered(PredR)
1829 if ((PredR == FCmpInst::FCMP_ORD || PredR == FCmpInst::FCMP_UNO) &&
1830 ((FCmpInst::isOrdered(PredL) && IsAnd) ||
1831 (FCmpInst::isUnordered(PredL) && !IsAnd))) {
1836 if ((
match(LHS0, AbsOrSelfRHS0) ||
match(LHS1, AbsOrSelfRHS0)) &&
1838 return FCmpInst::isOrdered(PredL) == FCmpInst::isOrdered(PredR)
1847 Value *Op1,
bool IsAnd) {
1849 auto *Cast0 = dyn_cast<CastInst>(Op0);
1850 auto *Cast1 = dyn_cast<CastInst>(Op1);
1851 if (Cast0 && Cast1 && Cast0->getOpcode() == Cast1->getOpcode() &&
1852 Cast0->getSrcTy() == Cast1->getSrcTy()) {
1853 Op0 = Cast0->getOperand(0);
1854 Op1 = Cast1->getOperand(0);
1858 auto *ICmp0 = dyn_cast<ICmpInst>(Op0);
1859 auto *ICmp1 = dyn_cast<ICmpInst>(Op1);
1864 auto *FCmp0 = dyn_cast<FCmpInst>(Op0);
1865 auto *FCmp1 = dyn_cast<FCmpInst>(Op1);
1876 if (
auto *
C = dyn_cast<Constant>(V))
1885 bool AllowRefinement,
1887 unsigned MaxRecurse);
1891 unsigned MaxRecurse) {
1892 assert((Opcode == Instruction::And || Opcode == Instruction::Or) &&
1907 (Opcode == Instruction::And ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE)) {
1908 if (Res == Absorber)
1918 if (Res == Absorber)
1928 nullptr, MaxRecurse))
1929 return Simplify(Res);
1932 nullptr, MaxRecurse))
1933 return Simplify(Res);
1943 assert(BinaryOperator::isBitwiseLogicOp(Opcode) &&
"Expected logic op");
1955 return Opcode == Instruction::And ? ConstantInt::getNullValue(Ty)
1956 : ConstantInt::getAllOnesValue(Ty);
1965 unsigned MaxRecurse) {
1999 const APInt *Shift1, *Shift2;
2004 Shift1->
uge(*Shift2))
2017 unsigned MaxRecurse) {
2022 if (isa<PoisonValue>(Op1))
2057 (~(*Mask)).lshr(*ShAmt).isZero())
2063 (~(*Mask)).shl(*ShAmt).isZero())
2068 const APInt *PowerC;
2077 return ConstantInt::getNullValue(Op1->
getType());
2090 Instruction::Or, Q, MaxRecurse))
2095 Instruction::Xor, Q, MaxRecurse))
2098 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1)) {
2116 if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
2140 if (EffWidthY <= ShftCnt) {
2173 if (*Implied ==
true)
2176 if (*Implied ==
false)
2201 assert(
X->getType() ==
Y->getType() &&
"Expected same type for 'or' ops");
2202 Type *Ty =
X->getType();
2206 return ConstantInt::getAllOnesValue(Ty);
2210 return ConstantInt::getAllOnesValue(Ty);
2228 return ConstantInt::getAllOnesValue(Ty);
2252 return ConstantInt::getAllOnesValue(Ty);
2292 unsigned MaxRecurse) {
2297 if (isa<PoisonValue>(Op1))
2331 C->ule(
X->getType()->getScalarSizeInBits())) {
2332 return ConstantInt::getAllOnesValue(
X->getType());
2386 Instruction::And, Q, MaxRecurse))
2389 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1)) {
2407 const APInt *C1, *C2;
2433 if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
2443 if (std::optional<bool> Implied =
2446 if (*Implied ==
false)
2449 if (*Implied ==
true)
2452 if (std::optional<bool> Implied =
2455 if (*Implied ==
false)
2458 if (*Implied ==
true)
2476 unsigned MaxRecurse) {
2481 if (isa<PoisonValue>(Op1))
2518 if (
Value *R = foldAndOrNot(Op0, Op1))
2520 if (
Value *R = foldAndOrNot(Op1, Op0))
2562 CmpInst *Cmp = dyn_cast<CmpInst>(SI->getCondition());
2565 Value *CmpLHS = Cmp->getOperand(0), *CmpRHS = Cmp->getOperand(1);
2566 if (Pred == Cmp->getPredicate() &&
LHS == CmpLHS &&
RHS == CmpRHS)
2569 LHS == CmpRHS &&
RHS == CmpLHS)
2582 if (
const AllocaInst *AI = dyn_cast<AllocaInst>(V))
2583 return AI->isStaticAlloca();
2584 if (
const GlobalValue *GV = dyn_cast<GlobalValue>(V))
2585 return (GV->hasLocalLinkage() || GV->hasHiddenVisibility() ||
2586 GV->hasProtectedVisibility() || GV->hasGlobalUnnamedAddr()) &&
2587 !GV->isThreadLocal();
2588 if (
const Argument *
A = dyn_cast<Argument>(V))
2589 return A->hasByValAttr();
2622 auto isByValArg = [](
const Value *V) {
2623 const Argument *
A = dyn_cast<Argument>(V);
2624 return A &&
A->hasByValAttr();
2630 return isa<AllocaInst>(V2) || isa<GlobalVariable>(V2) || isByValArg(V2);
2632 return isa<AllocaInst>(V1) || isa<GlobalVariable>(V1) || isByValArg(V1);
2634 return isa<AllocaInst>(V1) &&
2635 (isa<AllocaInst>(V2) || isa<GlobalVariable>(V2));
2704 unsigned IndexSize =
DL.getIndexTypeSizeInBits(
LHS->
getType());
2705 APInt LHSOffset(IndexSize, 0), RHSOffset(IndexSize, 0);
2725 Opts.
EvalMode = ObjectSizeOpts::Mode::Min;
2727 if (
auto *
I = dyn_cast<Instruction>(V))
2728 return I->getFunction();
2729 if (
auto *
A = dyn_cast<Argument>(V))
2730 return A->getParent();
2736 APInt Dist = LHSOffset - RHSOffset;
2764 if ((IsNAC(LHSUObjs) && IsAllocDisjoint(RHSUObjs)) ||
2765 (IsNAC(RHSUObjs) && IsAllocDisjoint(LHSUObjs)))
2785 bool Captured =
false;
2788 if (
auto *ICmp = dyn_cast<ICmpInst>(U->getUser())) {
2792 unsigned OtherIdx = 1 - U->getOperandNo();
2793 auto *LI = dyn_cast<LoadInst>(ICmp->getOperand(OtherIdx));
2794 if (LI && isa<GlobalVariable>(LI->getPointerOperand()))
2802 CustomCaptureTracker Tracker;
2804 if (!Tracker.Captured)
2826 auto ExtractNotLHS = [](
Value *V) ->
Value * {
2888 case ICmpInst::ICMP_UGE:
2892 case ICmpInst::ICMP_SGE:
2903 case ICmpInst::ICMP_ULE:
2907 case ICmpInst::ICMP_SLE:
2927 case ICmpInst::ICMP_ULT:
2929 case ICmpInst::ICMP_UGE:
2931 case ICmpInst::ICMP_EQ:
2932 case ICmpInst::ICMP_ULE:
2936 case ICmpInst::ICMP_NE:
2937 case ICmpInst::ICMP_UGT:
2941 case ICmpInst::ICMP_SLT: {
2949 case ICmpInst::ICMP_SLE: {
2957 case ICmpInst::ICMP_SGE: {
2965 case ICmpInst::ICMP_SGT: {
3018 *MulC != 0 &&
C->urem(*MulC) != 0) ||
3020 *MulC != 0 &&
C->srem(*MulC) != 0)))
3021 return ConstantInt::get(ITy, Pred == ICmpInst::ICMP_NE);
3031 if (!Res.
insert(V).second)
3038 auto *
I = dyn_cast<Instruction>(V);
3051 switch (
I->getOpcode()) {
3052 case Instruction::And:
3056 case Instruction::URem:
3057 case Instruction::UDiv:
3058 case Instruction::LShr:
3061 case Instruction::Call:
3073 if (Pred != ICmpInst::ICMP_UGE && Pred != ICmpInst::ICMP_ULT)
3082 for (
Value *GV : GreaterValues)
3085 Pred == ICmpInst::ICMP_UGE);
3091 unsigned MaxRecurse) {
3097 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SGE) {
3112 case ICmpInst::ICMP_SGT:
3113 case ICmpInst::ICMP_SGE: {
3119 case ICmpInst::ICMP_EQ:
3120 case ICmpInst::ICMP_UGT:
3121 case ICmpInst::ICMP_UGE:
3123 case ICmpInst::ICMP_SLT:
3124 case ICmpInst::ICMP_SLE: {
3130 case ICmpInst::ICMP_NE:
3131 case ICmpInst::ICMP_ULT:
3132 case ICmpInst::ICMP_ULE:
3154 case ICmpInst::ICMP_EQ:
3155 case ICmpInst::ICMP_UGE:
3156 case ICmpInst::ICMP_UGT:
3158 case ICmpInst::ICMP_NE:
3159 case ICmpInst::ICMP_ULT:
3160 case ICmpInst::ICMP_ULE:
3175 const APInt *C1, *C2;
3182 if (Pred == ICmpInst::ICMP_UGT)
3184 if (Pred == ICmpInst::ICMP_ULE)
3222 const APInt *C1, *C2;
3236 unsigned MaxRecurse) {
3239 if (MaxRecurse && (LBO || RBO)) {
3241 Value *
A =
nullptr, *
B =
nullptr, *
C =
nullptr, *
D =
nullptr;
3243 bool NoLHSWrapProblem =
false, NoRHSWrapProblem =
false;
3244 if (LBO && LBO->
getOpcode() == Instruction::Add) {
3254 if (RBO && RBO->
getOpcode() == Instruction::Add) {
3266 if ((
A ==
RHS ||
B ==
RHS) && NoLHSWrapProblem)
3273 if ((
C ==
LHS ||
D ==
LHS) && NoRHSWrapProblem)
3276 C ==
LHS ?
D :
C, Q, MaxRecurse - 1))
3280 bool CanSimplify = (NoLHSWrapProblem && NoRHSWrapProblem) ||
3282 if (
A &&
C && (
A ==
C ||
A ==
D ||
B ==
C ||
B ==
D) && CanSimplify) {
3289 }
else if (
A ==
D) {
3293 }
else if (
B ==
C) {
3314 ICmpInst::getSwappedPredicate(Pred), RBO,
LHS, Q, MaxRecurse))
3321 if (
C->isStrictlyPositive()) {
3322 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_NE)
3324 if (Pred == ICmpInst::ICMP_SGE || Pred == ICmpInst::ICMP_EQ)
3327 if (
C->isNonNegative()) {
3328 if (Pred == ICmpInst::ICMP_SLE)
3330 if (Pred == ICmpInst::ICMP_SGT)
3353 if (Pred == ICmpInst::ICMP_EQ)
3355 if (Pred == ICmpInst::ICMP_NE)
3364 if (Pred == ICmpInst::ICMP_UGT)
3366 if (Pred == ICmpInst::ICMP_ULE)
3377 case Instruction::Shl: {
3380 if (!NUW || (ICmpInst::isSigned(Pred) && !NSW) ||
3393 case Instruction::And:
3394 case Instruction::Or: {
3395 const APInt *C1, *C2;
3401 Pred = ICmpInst::getSwappedPredicate(Pred);
3404 if (Pred == ICmpInst::ICMP_ULE)
3406 if (Pred == ICmpInst::ICMP_UGT)
3409 if (Pred == ICmpInst::ICMP_SLE)
3411 if (Pred == ICmpInst::ICMP_SGT)
3425 case Instruction::UDiv:
3426 case Instruction::LShr:
3427 if (ICmpInst::isSigned(Pred) || !Q.
IIQ.
isExact(LBO) ||
3434 case Instruction::SDiv:
3442 case Instruction::AShr:
3449 case Instruction::Shl: {
3454 if (!NSW && ICmpInst::isSigned(Pred))
3470 unsigned MaxRecurse) {
3626 Pred = ICmpInst::getSwappedPredicate(Pred);
3632 (
A ==
C ||
A ==
D ||
B ==
C ||
B ==
D)) {
3641 (
A ==
C ||
A ==
D ||
B ==
C ||
B ==
D)) {
3665 CallInst *Assume = cast<CallInst>(AssumeVH);
3667 Assume->getArgOperand(0), Predicate,
LHS,
RHS, Q.
DL))
3678 auto *
II = dyn_cast<IntrinsicInst>(
LHS);
3682 switch (
II->getIntrinsicID()) {
3683 case Intrinsic::uadd_sat:
3687 if (Pred == ICmpInst::ICMP_UGE)
3689 if (Pred == ICmpInst::ICMP_ULT)
3693 case Intrinsic::usub_sat:
3697 if (Pred == ICmpInst::ICMP_ULE)
3699 if (Pred == ICmpInst::ICMP_UGT)
3715 if (
const Argument *
A = dyn_cast<Argument>(V))
3716 return A->getRange();
3717 else if (
const CallBase *CB = dyn_cast<CallBase>(V))
3718 return CB->getRange();
3720 return std::nullopt;
3737 assert(!isa<UndefValue>(
LHS) &&
"Unexpected icmp undef,%X");
3742 if (isa<PoisonValue>(
RHS))
3771 if (LhsCr->icmp(Pred, *RhsCr))
3779 if (isa<CastInst>(
LHS) && (isa<Constant>(
RHS) || isa<CastInst>(
RHS))) {
3787 if (MaxRecurse && isa<PtrToIntInst>(LI) &&
3796 if (RI->getOperand(0)->getType() == SrcTy)
3804 if (isa<ZExtInst>(
LHS)) {
3808 if (MaxRecurse && SrcTy == RI->getOperand(0)->getType())
3812 RI->getOperand(0), Q, MaxRecurse - 1))
3816 else if (
SExtInst *RI = dyn_cast<SExtInst>(
RHS)) {
3817 if (
SrcOp == RI->getOperand(0)) {
3818 if (Pred == ICmpInst::ICMP_ULE || Pred == ICmpInst::ICMP_SGE)
3820 if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_SLT)
3834 assert(Trunc &&
"Constant-fold of ImmConstant should not fail");
3837 assert(RExt &&
"Constant-fold of ImmConstant should not fail");
3840 assert(AnyEq &&
"Constant-fold of ImmConstant should not fail");
3847 SrcOp, Trunc, Q, MaxRecurse - 1))
3857 case ICmpInst::ICMP_EQ:
3858 case ICmpInst::ICMP_UGT:
3859 case ICmpInst::ICMP_UGE:
3862 case ICmpInst::ICMP_NE:
3863 case ICmpInst::ICMP_ULT:
3864 case ICmpInst::ICMP_ULE:
3869 case ICmpInst::ICMP_SGT:
3870 case ICmpInst::ICMP_SGE:
3874 case ICmpInst::ICMP_SLT:
3875 case ICmpInst::ICMP_SLE:
3884 if (isa<SExtInst>(
LHS)) {
3888 if (MaxRecurse && SrcTy == RI->getOperand(0)->getType())
3895 else if (
ZExtInst *RI = dyn_cast<ZExtInst>(
RHS)) {
3896 if (
SrcOp == RI->getOperand(0)) {
3897 if (Pred == ICmpInst::ICMP_UGE || Pred == ICmpInst::ICMP_SLE)
3899 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_SGT)
3912 assert(Trunc &&
"Constant-fold of ImmConstant should not fail");
3915 assert(RExt &&
"Constant-fold of ImmConstant should not fail");
3918 assert(AnyEq &&
"Constant-fold of ImmConstant should not fail");
3933 case ICmpInst::ICMP_EQ:
3935 case ICmpInst::ICMP_NE:
3940 case ICmpInst::ICMP_SGT:
3941 case ICmpInst::ICMP_SGE:
3945 case ICmpInst::ICMP_SLT:
3946 case ICmpInst::ICMP_SLE:
3953 case ICmpInst::ICMP_UGT:
3954 case ICmpInst::ICMP_UGE:
3962 case ICmpInst::ICMP_ULT:
3963 case ICmpInst::ICMP_ULE:
3994 ICmpInst::getSwappedPredicate(Pred),
RHS,
LHS))
4000 ICmpInst::getSwappedPredicate(Pred),
RHS,
LHS))
4006 if (std::optional<bool> Res =
4015 if (
auto *CLHS = dyn_cast<PtrToIntOperator>(
LHS))
4016 if (
auto *CRHS = dyn_cast<PtrToIntOperator>(
RHS))
4017 if (CLHS->getPointerOperandType() == CRHS->getPointerOperandType() &&
4021 CRHS->getPointerOperand(), Q))
4026 if (isa<SelectInst>(
LHS) || isa<SelectInst>(
RHS))
4032 if (isa<PHINode>(
LHS) || isa<PHINode>(
RHS))
4048 unsigned MaxRecurse) {
4063 if (Pred == FCmpInst::FCMP_FALSE)
4065 if (Pred == FCmpInst::FCMP_TRUE)
4070 if (isa<PoisonValue>(
LHS) || isa<PoisonValue>(
RHS))
4093 if (Pred == FCmpInst::FCMP_ORD || Pred == FCmpInst::FCMP_UNO) {
4101 return ConstantInt::get(
RetTy, Pred == FCmpInst::FCMP_ORD);
4109 std::optional<KnownFPClass> FullKnownClassLHS;
4113 auto computeLHSClass = [=, &FullKnownClassLHS](
FPClassTest InterestedFlags =
4115 if (FullKnownClassLHS)
4116 return *FullKnownClassLHS;
4129 FullKnownClassLHS = computeLHSClass();
4130 if ((FullKnownClassLHS->KnownFPClasses & ClassTest) ==
fcNone)
4132 if ((FullKnownClassLHS->KnownFPClasses & ~ClassTest) ==
fcNone)
4147 if (
C->isNegative() && !
C->isNegZero()) {
4153 case FCmpInst::FCMP_UGE:
4154 case FCmpInst::FCMP_UGT:
4155 case FCmpInst::FCMP_UNE: {
4163 case FCmpInst::FCMP_OEQ:
4164 case FCmpInst::FCMP_OLE:
4165 case FCmpInst::FCMP_OLT: {
4184 cast<IntrinsicInst>(
LHS)->getIntrinsicID() == Intrinsic::maxnum;
4188 case FCmpInst::FCMP_OEQ:
4189 case FCmpInst::FCMP_UEQ:
4193 case FCmpInst::FCMP_ONE:
4194 case FCmpInst::FCMP_UNE:
4198 case FCmpInst::FCMP_OGE:
4199 case FCmpInst::FCMP_UGE:
4200 case FCmpInst::FCMP_OGT:
4201 case FCmpInst::FCMP_UGT:
4206 return ConstantInt::get(
RetTy, IsMaxNum);
4207 case FCmpInst::FCMP_OLE:
4208 case FCmpInst::FCMP_ULE:
4209 case FCmpInst::FCMP_OLT:
4210 case FCmpInst::FCMP_ULT:
4215 return ConstantInt::get(
RetTy, !IsMaxNum);
4227 case FCmpInst::FCMP_OGE:
4228 case FCmpInst::FCMP_ULT: {
4231 Interested |=
fcNan;
4242 case FCmpInst::FCMP_UGE:
4243 case FCmpInst::FCMP_OLT: {
4260 if (isa<SelectInst>(
LHS) || isa<SelectInst>(
RHS))
4266 if (isa<PHINode>(
LHS) || isa<PHINode>(
RHS))
4280 bool AllowRefinement,
4282 unsigned MaxRecurse) {
4284 "If AllowRefinement=false then CanUseUndef=false");
4294 if (isa<Constant>(
Op))
4297 auto *
I = dyn_cast<Instruction>(V);
4303 if (isa<PHINode>(
I))
4312 if (
match(
I, m_Intrinsic<Intrinsic::is_constant>()))
4316 if (isa<FreezeInst>(
I))
4321 bool AnyReplaced =
false;
4322 for (
Value *InstOp :
I->operands()) {
4324 InstOp,
Op, RepOp, Q, AllowRefinement, DropFlags, MaxRecurse)) {
4326 AnyReplaced = InstOp != NewInstOp;
4340 if (!AllowRefinement) {
4345 if (
auto *BO = dyn_cast<BinaryOperator>(
I)) {
4346 unsigned Opcode = BO->getOpcode();
4349 if (!BO->getType()->isFPOrFPVectorTy()) {
4358 if ((Opcode == Instruction::And || Opcode == Instruction::Or) &&
4359 NewOps[0] == NewOps[1]) {
4361 if (
auto *PDI = dyn_cast<PossiblyDisjointInst>(BO)) {
4362 if (PDI->isDisjoint()) {
4374 if ((Opcode == Instruction::Sub || Opcode == Instruction::Xor) &&
4375 NewOps[0] == RepOp && NewOps[1] == RepOp)
4387 if ((NewOps[0] == Absorber || NewOps[1] == Absorber) &&
4392 if (isa<GetElementPtrInst>(
I)) {
4408 auto PreventSelfSimplify = [V](
Value *Simplified) {
4409 return Simplified != V ? Simplified :
nullptr;
4412 return PreventSelfSimplify(
4419 for (
Value *NewOp : NewOps) {
4420 if (
Constant *ConstOp = dyn_cast<Constant>(NewOp))
4435 if (!AllowRefinement) {
4438 if (
auto *
II = dyn_cast<IntrinsicInst>(
I);
4439 II &&
II->getIntrinsicID() == Intrinsic::abs) {
4440 if (!ConstOps[0]->isNotMinSignedValue())
4447 if (DropFlags && Res &&
I->hasPoisonGeneratingAnnotations())
4458 bool AllowRefinement,
4462 if (!AllowRefinement)
4465 return ::simplifyWithOpReplaced(V,
Op, RepOp, Q, AllowRefinement, DropFlags,
4472 const APInt *
Y,
bool TrueWhenUnset) {
4479 return TrueWhenUnset ? FalseVal : TrueVal;
4485 return TrueWhenUnset ? FalseVal : TrueVal;
4487 if (
Y->isPowerOf2()) {
4493 if (TrueWhenUnset && cast<PossiblyDisjointInst>(TrueVal)->isDisjoint())
4495 return TrueWhenUnset ? TrueVal : FalseVal;
4503 if (!TrueWhenUnset && cast<PossiblyDisjointInst>(FalseVal)->isDisjoint())
4505 return TrueWhenUnset ? TrueVal : FalseVal;
4516 if (CmpRHS == TVal || CmpRHS == FVal) {
4518 Pred = ICmpInst::getSwappedPredicate(Pred);
4522 if (CmpLHS == FVal) {
4524 Pred = ICmpInst::getInversePredicate(Pred);
4529 Value *
X = CmpLHS, *
Y = CmpRHS;
4530 bool PeekedThroughSelectShuffle =
false;
4531 auto *Shuf = dyn_cast<ShuffleVectorInst>(FVal);
4532 if (Shuf && Shuf->isSelect()) {
4533 if (Shuf->getOperand(0) ==
Y)
4534 FVal = Shuf->getOperand(1);
4535 else if (Shuf->getOperand(1) ==
Y)
4536 FVal = Shuf->getOperand(0);
4539 PeekedThroughSelectShuffle =
true;
4543 auto *MMI = dyn_cast<MinMaxIntrinsic>(FVal);
4544 if (!MMI || TVal !=
X ||
4562 if (PeekedThroughSelectShuffle)
4591 Res->Pred == ICmpInst::ICMP_EQ);
4601 unsigned MaxRecurse) {
4602 Value *SimplifiedFalseVal =
4605 nullptr, MaxRecurse);
4606 if (!SimplifiedFalseVal)
4607 SimplifiedFalseVal = FalseVal;
4609 Value *SimplifiedTrueVal =
4612 nullptr, MaxRecurse);
4613 if (!SimplifiedTrueVal)
4614 SimplifiedTrueVal = TrueVal;
4616 if (SimplifiedFalseVal == SimplifiedTrueVal)
4627 unsigned MaxRecurse) {
4629 Value *CmpLHS, *CmpRHS;
4637 if (Pred == ICmpInst::ICMP_NE) {
4638 Pred = ICmpInst::ICMP_EQ;
4645 if (TrueVal->getType()->isIntOrIntVectorTy()) {
4653 X->getType()->getScalarSizeInBits());
4659 if (Pred == ICmpInst::ICMP_EQ &&
match(CmpRHS,
m_Zero())) {
4673 if (
match(TrueVal, isFsh) && FalseVal ==
X && CmpLHS == ShAmt)
4686 if (
match(FalseVal, isRotate) && TrueVal ==
X && CmpLHS == ShAmt &&
4687 Pred == ICmpInst::ICMP_EQ)
4692 if (
match(TrueVal, m_Intrinsic<Intrinsic::abs>(
m_Specific(CmpLHS))) &&
4709 if (Pred == ICmpInst::ICMP_EQ) {
4711 FalseVal, Q, MaxRecurse))
4714 FalseVal, Q, MaxRecurse))
4751 unsigned MaxRecurse) {
4753 Value *CmpLHS, *CmpRHS;
4758 bool IsEquiv =
I->isEquivalence();
4759 if (
I->isEquivalence(
true)) {
4761 Pred = FCmpInst::getInversePredicate(Pred);
4777 if (CmpLHS ==
F && CmpRHS ==
T)
4780 if (CmpLHS !=
T || CmpRHS !=
F)
4786 if (Pred == FCmpInst::FCMP_OEQ)
4790 if (Pred == FCmpInst::FCMP_UNE)
4801 if (
auto *CondC = dyn_cast<Constant>(
Cond)) {
4802 if (
auto *TrueC = dyn_cast<Constant>(TrueVal))
4803 if (
auto *FalseC = dyn_cast<Constant>(FalseVal))
4808 if (isa<PoisonValue>(CondC))
4813 return isa<Constant>(FalseVal) ? FalseVal : TrueVal;
4825 assert(
Cond->getType()->isIntOrIntVectorTy(1) &&
4826 "Select must have bool or bool vector condition");
4827 assert(TrueVal->getType() == FalseVal->getType() &&
4828 "Select must have same types for true/false ops");
4830 if (
Cond->getType() == TrueVal->getType()) {
4893 if (TrueVal == FalseVal)
4896 if (
Cond == TrueVal) {
4904 if (
Cond == FalseVal) {
4918 if (isa<PoisonValue>(TrueVal) ||
4923 if (isa<PoisonValue>(FalseVal) ||
4929 if (isa<FixedVectorType>(TrueVal->getType()) &&
4933 cast<FixedVectorType>(TrueC->
getType())->getNumElements();
4935 for (
unsigned i = 0; i != NumElts; ++i) {
4939 if (!TEltC || !FEltC)
4946 else if (isa<PoisonValue>(TEltC) ||
4949 else if (isa<PoisonValue>(FEltC) ||
4955 if (NewC.
size() == NumElts)
4968 return *Imp ? TrueVal : FalseVal;
4985 cast<PointerType>(
Ptr->getType()->getScalarType())->getAddressSpace();
4988 if (Indices.
empty())
4998 if (
VectorType *VT = dyn_cast<VectorType>(
Op->getType())) {
4999 GEPTy = VectorType::get(GEPTy, VT->getElementCount());
5006 if (
Ptr->getType() == GEPTy &&
5012 if (isa<PoisonValue>(
Ptr) ||
5013 any_of(Indices, [](
const auto *V) {
return isa<PoisonValue>(V); }))
5020 bool IsScalableVec =
5022 return isa<ScalableVectorType>(V->getType());
5025 if (Indices.
size() == 1) {
5027 if (!IsScalableVec && Ty->
isSized()) {
5032 if (TyAllocSize == 0 &&
Ptr->getType() == GEPTy)
5037 if (Indices[0]->
getType()->getScalarSizeInBits() ==
5039 auto CanSimplify = [GEPTy, &
P,
Ptr]() ->
bool {
5040 return P->getType() == GEPTy &&
5044 if (TyAllocSize == 1 &&
5055 TyAllocSize == 1ULL <<
C && CanSimplify())
5071 [](
Value *
Idx) { return match(Idx, m_Zero()); })) {
5075 APInt BasePtrOffset(IdxWidth, 0);
5076 Value *StrippedBasePtr =
5077 Ptr->stripAndAccumulateInBoundsConstantOffsets(Q.
DL, BasePtrOffset);
5086 !BasePtrOffset.
isZero()) {
5087 auto *CI = ConstantInt::get(GEPTy->
getContext(), BasePtrOffset);
5093 !BasePtrOffset.
isOne()) {
5094 auto *CI = ConstantInt::get(GEPTy->
getContext(), BasePtrOffset - 1);
5101 if (!isa<Constant>(
Ptr) ||
5102 !
all_of(Indices, [](
Value *V) {
return isa<Constant>(V); }))
5124 if (
Constant *CAgg = dyn_cast<Constant>(Agg))
5125 if (
Constant *CVal = dyn_cast<Constant>(Val))
5130 if (isa<PoisonValue>(Val) ||
5136 if (EV->getAggregateOperand()->getType() == Agg->
getType() &&
5137 EV->getIndices() == Idxs) {
5140 if (isa<PoisonValue>(Agg) ||
5143 return EV->getAggregateOperand();
5146 if (Agg == EV->getAggregateOperand())
5156 return ::simplifyInsertValueInst(Agg, Val, Idxs, Q,
RecursionLimit);
5162 auto *VecC = dyn_cast<Constant>(Vec);
5163 auto *ValC = dyn_cast<Constant>(Val);
5164 auto *IdxC = dyn_cast<Constant>(
Idx);
5165 if (VecC && ValC && IdxC)
5169 if (
auto *CI = dyn_cast<ConstantInt>(
Idx)) {
5170 if (isa<FixedVectorType>(Vec->
getType()) &&
5171 CI->uge(cast<FixedVectorType>(Vec->
getType())->getNumElements()))
5181 if (isa<PoisonValue>(Val) ||
5186 if (VecC && ValC && VecC->getSplatValue() == ValC)
5202 if (
auto *CAgg = dyn_cast<Constant>(Agg))
5206 unsigned NumIdxs = Idxs.
size();
5207 for (
auto *IVI = dyn_cast<InsertValueInst>(Agg); IVI !=
nullptr;
5208 IVI = dyn_cast<InsertValueInst>(IVI->getAggregateOperand())) {
5210 unsigned NumInsertValueIdxs = InsertValueIdxs.
size();
5211 unsigned NumCommonIdxs = std::min(NumInsertValueIdxs, NumIdxs);
5212 if (InsertValueIdxs.
slice(0, NumCommonIdxs) ==
5213 Idxs.
slice(0, NumCommonIdxs)) {
5214 if (NumIdxs == NumInsertValueIdxs)
5215 return IVI->getInsertedValueOperand();
5232 auto *VecVTy = cast<VectorType>(Vec->
getType());
5233 if (
auto *CVec = dyn_cast<Constant>(Vec)) {
5234 if (
auto *CIdx = dyn_cast<Constant>(
Idx))
5248 if (
auto *IdxC = dyn_cast<ConstantInt>(
Idx)) {
5250 unsigned MinNumElts = VecVTy->getElementCount().getKnownMinValue();
5251 if (isa<FixedVectorType>(VecVTy) && IdxC->getValue().uge(MinNumElts))
5254 if (IdxC->getValue().ult(MinNumElts))
5264 auto *IE = dyn_cast<InsertElementInst>(Vec);
5265 if (IE && IE->getOperand(2) ==
Idx)
5266 return IE->getOperand(1);
5289 Value *CommonValue =
nullptr;
5290 bool HasPoisonInput =
false;
5291 bool HasUndefInput =
false;
5297 HasPoisonInput =
true;
5302 HasUndefInput =
true;
5305 if (CommonValue &&
Incoming != CommonValue)
5316 if (HasPoisonInput || HasUndefInput) {
5324 if (HasUndefInput &&
5335 if (
auto *
C = dyn_cast<Constant>(
Op))
5338 if (
auto *CI = dyn_cast<CastInst>(
Op)) {
5339 auto *Src = CI->getOperand(0);
5340 Type *SrcTy = Src->getType();
5341 Type *MidTy = CI->getType();
5343 if (Src->getType() == Ty) {
5353 SrcIntPtrTy, MidIntPtrTy,
5354 DstIntPtrTy) == Instruction::BitCast)
5360 if (CastOpc == Instruction::BitCast)
5361 if (
Op->getType() == Ty)
5366 if (CastOpc == Instruction::PtrToInt &&
5384 int MaskVal,
Value *RootVec,
5385 unsigned MaxRecurse) {
5395 int InVecNumElts = cast<FixedVectorType>(Op0->
getType())->getNumElements();
5396 int RootElt = MaskVal;
5397 Value *SourceOp = Op0;
5398 if (MaskVal >= InVecNumElts) {
5399 RootElt = MaskVal - InVecNumElts;
5405 if (
auto *SourceShuf = dyn_cast<ShuffleVectorInst>(SourceOp)) {
5407 DestElt, SourceShuf->getOperand(0), SourceShuf->getOperand(1),
5408 SourceShuf->getMaskValue(RootElt), RootVec, MaxRecurse);
5417 if (RootVec != SourceOp)
5422 if (RootElt != DestElt)
5431 unsigned MaxRecurse) {
5435 auto *InVecTy = cast<VectorType>(Op0->
getType());
5436 unsigned MaskNumElts = Mask.size();
5437 ElementCount InVecEltCount = InVecTy->getElementCount();
5442 Indices.
assign(Mask.begin(), Mask.end());
5447 bool MaskSelects0 =
false, MaskSelects1 =
false;
5449 for (
unsigned i = 0; i != MaskNumElts; ++i) {
5450 if (Indices[i] == -1)
5452 if ((
unsigned)Indices[i] < InVecNumElts)
5453 MaskSelects0 =
true;
5455 MaskSelects1 =
true;
5463 auto *Op0Const = dyn_cast<Constant>(Op0);
5464 auto *Op1Const = dyn_cast<Constant>(Op1);
5469 if (Op0Const && Op1Const)
5475 if (!Scalable && Op0Const && !Op1Const) {
5493 if (
all_of(Indices, [InsertIndex](
int MaskElt) {
5494 return MaskElt == InsertIndex || MaskElt == -1;
5496 assert(isa<UndefValue>(Op1) &&
"Expected undef operand 1 for splat");
5500 for (
unsigned i = 0; i != MaskNumElts; ++i)
5501 if (Indices[i] == -1)
5509 if (
auto *OpShuf = dyn_cast<ShuffleVectorInst>(Op0))
5529 Value *RootVec =
nullptr;
5530 for (
unsigned i = 0; i != MaskNumElts; ++i) {
5552 if (
auto *
C = dyn_cast<Constant>(
Op))
5580 Type *Ty = In->getType();
5581 if (
auto *VecTy = dyn_cast<FixedVectorType>(Ty)) {
5582 unsigned NumElts = VecTy->getNumElements();
5584 for (
unsigned i = 0; i != NumElts; ++i) {
5585 Constant *EltC = In->getAggregateElement(i);
5588 if (EltC && isa<PoisonValue>(EltC))
5590 else if (EltC && EltC->
isNaN())
5591 NewC[i] = ConstantFP::get(
5592 EltC->
getType(), cast<ConstantFP>(EltC)->getValue().makeQuiet());
5606 if (isa<ScalableVectorType>(Ty)) {
5607 auto *
Splat = In->getSplatValue();
5609 "Found a scalable-vector NaN but not a splat");
5615 return ConstantFP::get(Ty, cast<ConstantFP>(In)->getValue().makeQuiet());
5630 for (
Value *V : Ops) {
5638 if (FMF.
noNaNs() && (IsNan || IsUndef))
5640 if (FMF.
noInfs() && (IsInf || IsUndef))
5666 RoundingMode Rounding = RoundingMode::NearestTiesToEven) {
5732 RoundingMode Rounding = RoundingMode::NearestTiesToEven) {
5847 RoundingMode Rounding = RoundingMode::NearestTiesToEven) {
5853 return simplifyFMAFMul(Op0, Op1, FMF, Q, MaxRecurse, ExBehavior, Rounding);
5860 return ::simplifyFAddInst(Op0, Op1, FMF, Q,
RecursionLimit, ExBehavior,
5868 return ::simplifyFSubInst(Op0, Op1, FMF, Q,
RecursionLimit, ExBehavior,
5876 return ::simplifyFMulInst(Op0, Op1, FMF, Q,
RecursionLimit, ExBehavior,
5884 return ::simplifyFMAFMul(Op0, Op1, FMF, Q,
RecursionLimit, ExBehavior,
5892 RoundingMode Rounding = RoundingMode::NearestTiesToEven) {
5917 return ConstantFP::get(Op0->
getType(), 1.0);
5929 return ConstantFP::get(Op0->
getType(), -1.0);
5943 return ::simplifyFDivInst(Op0, Op1, FMF, Q,
RecursionLimit, ExBehavior,
5951 RoundingMode Rounding = RoundingMode::NearestTiesToEven) {
5981 return ::simplifyFRemInst(Op0, Op1, FMF, Q,
RecursionLimit, ExBehavior,
5990 unsigned MaxRecurse) {
5992 case Instruction::FNeg:
6004 unsigned MaxRecurse) {
6006 case Instruction::FNeg:
6027 case Instruction::Add:
6030 case Instruction::Sub:
6033 case Instruction::Mul:
6036 case Instruction::SDiv:
6038 case Instruction::UDiv:
6040 case Instruction::SRem:
6042 case Instruction::URem:
6044 case Instruction::Shl:
6047 case Instruction::LShr:
6049 case Instruction::AShr:
6051 case Instruction::And:
6053 case Instruction::Or:
6055 case Instruction::Xor:
6057 case Instruction::FAdd:
6059 case Instruction::FSub:
6061 case Instruction::FMul:
6063 case Instruction::FDiv:
6065 case Instruction::FRem:
6077 unsigned MaxRecurse) {
6079 case Instruction::FAdd:
6081 case Instruction::FSub:
6083 case Instruction::FMul:
6085 case Instruction::FDiv:
6121 case Intrinsic::fabs:
6122 case Intrinsic::floor:
6123 case Intrinsic::ceil:
6124 case Intrinsic::trunc:
6125 case Intrinsic::rint:
6126 case Intrinsic::nearbyint:
6127 case Intrinsic::round:
6128 case Intrinsic::roundeven:
6129 case Intrinsic::canonicalize:
6130 case Intrinsic::arithmetic_fence:
6142 case Intrinsic::floor:
6143 case Intrinsic::ceil:
6144 case Intrinsic::trunc:
6145 case Intrinsic::rint:
6146 case Intrinsic::nearbyint:
6147 case Intrinsic::round:
6148 case Intrinsic::roundeven:
6162 auto *OffsetConstInt = dyn_cast<ConstantInt>(
Offset);
6163 if (!OffsetConstInt || OffsetConstInt->getBitWidth() > 64)
6167 DL.getIndexTypeSizeInBits(
Ptr->getType()));
6168 if (OffsetInt.
srem(4) != 0)
6176 auto *LoadedCE = dyn_cast<ConstantExpr>(Loaded);
6180 if (LoadedCE->getOpcode() == Instruction::Trunc) {
6181 LoadedCE = dyn_cast<ConstantExpr>(LoadedCE->getOperand(0));
6186 if (LoadedCE->getOpcode() != Instruction::Sub)
6189 auto *LoadedLHS = dyn_cast<ConstantExpr>(LoadedCE->getOperand(0));
6190 if (!LoadedLHS || LoadedLHS->getOpcode() != Instruction::PtrToInt)
6192 auto *LoadedLHSPtr = LoadedLHS->getOperand(0);
6196 APInt LoadedRHSOffset;
6199 PtrSym != LoadedRHSSym || PtrOffset != LoadedRHSOffset)
6202 return LoadedLHSPtr;
6210 if (isa<PoisonValue>(Op0) || isa<PoisonValue>(Op1))
6233 if (
C && (
C->isZero() ||
C->isInfinity()))
6242 if (
C &&
C->isNaN())
6243 return ConstantFP::get(Op0->
getType(),
C->makeQuiet());
6261 if (
auto *
II = dyn_cast<IntrinsicInst>(Op0))
6262 if (
II->getIntrinsicID() == IID)
6271 auto *
II = dyn_cast<IntrinsicInst>(Op0);
6279 case Intrinsic::fabs:
6283 case Intrinsic::bswap:
6288 case Intrinsic::bitreverse:
6293 case Intrinsic::ctpop: {
6297 return ConstantInt::get(Op0->
getType(), 1);
6306 case Intrinsic::exp:
6308 if (Call->hasAllowReassoc() &&
6312 case Intrinsic::exp2:
6314 if (Call->hasAllowReassoc() &&
6318 case Intrinsic::exp10:
6320 if (Call->hasAllowReassoc() &&
6324 case Intrinsic::log:
6326 if (Call->hasAllowReassoc() &&
6330 case Intrinsic::log2:
6332 if (Call->hasAllowReassoc() &&
6338 case Intrinsic::log10:
6341 if (Call->hasAllowReassoc() &&
6347 case Intrinsic::vector_reverse:
6355 case Intrinsic::frexp: {
6379 auto *MM0 = dyn_cast<IntrinsicInst>(Op0);
6384 if (Op1 ==
X || Op1 ==
Y ||
6401 assert((IID == Intrinsic::maxnum || IID == Intrinsic::minnum ||
6402 IID == Intrinsic::maximum || IID == Intrinsic::minimum) &&
6403 "Unsupported intrinsic");
6405 auto *
M0 = dyn_cast<IntrinsicInst>(Op0);
6409 if (!
M0 ||
M0->getIntrinsicID() != IID)
6411 Value *X0 =
M0->getOperand(0);
6412 Value *Y0 =
M0->getOperand(1);
6419 if (X0 == Op1 || Y0 == Op1)
6422 auto *
M1 = dyn_cast<IntrinsicInst>(Op1);
6425 Value *X1 =
M1->getOperand(0);
6426 Value *Y1 =
M1->getOperand(1);
6434 if ((X0 == X1 && Y0 == Y1) || (X0 == Y1 && Y0 == X1))
6445 unsigned BitWidth = ReturnType->getScalarSizeInBits();
6447 case Intrinsic::abs:
6455 case Intrinsic::cttz: {
6461 case Intrinsic::ctlz: {
6469 case Intrinsic::ptrmask: {
6470 if (isa<PoisonValue>(Op0) || isa<PoisonValue>(Op1))
6480 "Invalid mask width");
6497 APInt IrrelevantPtrBits =
6500 Instruction::Or,
C, ConstantInt::get(
C->getType(), IrrelevantPtrBits),
6502 if (
C !=
nullptr &&
C->isAllOnesValue())
6507 case Intrinsic::smax:
6508 case Intrinsic::smin:
6509 case Intrinsic::umax:
6510 case Intrinsic::umin: {
6521 return ConstantInt::get(
6529 return ConstantInt::get(ReturnType, *
C);
6540 auto *MinMax0 = dyn_cast<IntrinsicInst>(Op0);
6541 if (MinMax0 && MinMax0->getIntrinsicID() == IID) {
6543 Value *M00 = MinMax0->getOperand(0), *M01 = MinMax0->getOperand(1);
6544 const APInt *InnerC;
6547 ICmpInst::getNonStrictPredicate(
6567 case Intrinsic::scmp:
6568 case Intrinsic::ucmp: {
6575 IID == Intrinsic::scmp ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT;
6577 return ConstantInt::get(ReturnType, 1);
6580 IID == Intrinsic::scmp ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT;
6586 case Intrinsic::usub_with_overflow:
6587 case Intrinsic::ssub_with_overflow:
6594 case Intrinsic::uadd_with_overflow:
6595 case Intrinsic::sadd_with_overflow:
6600 cast<StructType>(ReturnType),
6605 case Intrinsic::umul_with_overflow:
6606 case Intrinsic::smul_with_overflow:
6616 case Intrinsic::uadd_sat:
6622 case Intrinsic::sadd_sat:
6637 case Intrinsic::usub_sat:
6642 case Intrinsic::ssub_sat:
6650 case Intrinsic::load_relative:
6651 if (
auto *C0 = dyn_cast<Constant>(Op0))
6652 if (
auto *C1 = dyn_cast<Constant>(Op1))
6655 case Intrinsic::powi:
6656 if (
auto *Power = dyn_cast<ConstantInt>(Op1)) {
6658 if (Power->isZero())
6659 return ConstantFP::get(Op0->
getType(), 1.0);
6665 case Intrinsic::ldexp:
6667 case Intrinsic::copysign:
6677 case Intrinsic::is_fpclass: {
6678 if (isa<PoisonValue>(Op0))
6681 uint64_t Mask = cast<ConstantInt>(Op1)->getZExtValue();
6684 return ConstantInt::get(ReturnType,
true);
6686 return ConstantInt::get(ReturnType,
false);
6691 case Intrinsic::maxnum:
6692 case Intrinsic::minnum:
6693 case Intrinsic::maximum:
6694 case Intrinsic::minimum: {
6700 if (isa<Constant>(Op0))
6707 bool PropagateNaN = IID == Intrinsic::minimum || IID == Intrinsic::maximum;
6708 bool IsMin = IID == Intrinsic::minimum || IID == Intrinsic::minnum;
6715 return PropagateNaN ?
propagateNaN(cast<Constant>(Op1)) : Op0;
6721 (
C->isInfinity() || (Call && Call->hasNoInfs() &&
C->isLargest()))) {
6726 if (
C->isNegative() == IsMin &&
6727 (!PropagateNaN || (Call && Call->hasNoNaNs())))
6728 return ConstantFP::get(ReturnType, *
C);
6734 if (
C->isNegative() != IsMin &&
6735 (PropagateNaN || (Call && Call->hasNoNaNs())))
6748 case Intrinsic::vector_extract: {
6750 unsigned IdxN = cast<ConstantInt>(Op1)->getZExtValue();
6754 IdxN == 0 &&
X->getType() == ReturnType)
6770 assert(Call->arg_size() == Args.size());
6771 unsigned NumOperands = Args.size();
6779 case Intrinsic::vscale: {
6783 return ConstantInt::get(
RetTy,
C->getZExtValue());
6791 if (NumOperands == 1)
6794 if (NumOperands == 2)
6800 case Intrinsic::masked_load:
6801 case Intrinsic::masked_gather: {
6802 Value *MaskArg = Args[2];
6803 Value *PassthruArg = Args[3];
6809 case Intrinsic::fshl:
6810 case Intrinsic::fshr: {
6811 Value *Op0 = Args[0], *Op1 = Args[1], *ShAmtArg = Args[2];
6819 return Args[IID == Intrinsic::fshl ? 0 : 1];
6821 const APInt *ShAmtC;
6826 return Args[IID == Intrinsic::fshl ? 0 : 1];
6831 return ConstantInt::getNullValue(
F->getReturnType());
6835 return ConstantInt::getAllOnesValue(
F->getReturnType());
6839 case Intrinsic::experimental_constrained_fma: {
6840 auto *FPI = cast<ConstrainedFPIntrinsic>(Call);
6842 *FPI->getRoundingMode()))
6846 case Intrinsic::fma:
6847 case Intrinsic::fmuladd: {
6849 RoundingMode::NearestTiesToEven))
6853 case Intrinsic::smul_fix:
6854 case Intrinsic::smul_fix_sat: {
6855 Value *Op0 = Args[0];
6856 Value *Op1 = Args[1];
6857 Value *Op2 = Args[2];
6858 Type *ReturnType =
F->getReturnType();
6863 if (isa<Constant>(Op0))
6877 cast<ConstantInt>(Op2)->getZExtValue());
6883 case Intrinsic::vector_insert: {
6884 Value *Vec = Args[0];
6885 Value *SubVec = Args[1];
6887 Type *ReturnType =
F->getReturnType();
6891 unsigned IdxN = cast<ConstantInt>(
Idx)->getZExtValue();
6896 X->getType() == ReturnType)
6901 case Intrinsic::experimental_constrained_fadd: {
6902 auto *FPI = cast<ConstrainedFPIntrinsic>(Call);
6904 *FPI->getExceptionBehavior(),
6905 *FPI->getRoundingMode());
6907 case Intrinsic::experimental_constrained_fsub: {
6908 auto *FPI = cast<ConstrainedFPIntrinsic>(Call);
6910 *FPI->getExceptionBehavior(),
6911 *FPI->getRoundingMode());
6913 case Intrinsic::experimental_constrained_fmul: {
6914 auto *FPI = cast<ConstrainedFPIntrinsic>(Call);
6916 *FPI->getExceptionBehavior(),
6917 *FPI->getRoundingMode());
6919 case Intrinsic::experimental_constrained_fdiv: {
6920 auto *FPI = cast<ConstrainedFPIntrinsic>(Call);
6922 *FPI->getExceptionBehavior(),
6923 *FPI->getRoundingMode());
6925 case Intrinsic::experimental_constrained_frem: {
6926 auto *FPI = cast<ConstrainedFPIntrinsic>(Call);
6928 *FPI->getExceptionBehavior(),
6929 *FPI->getRoundingMode());
6931 case Intrinsic::experimental_constrained_ldexp:
6933 case Intrinsic::experimental_gc_relocate: {
6939 if (isa<UndefValue>(DerivedPtr) || isa<UndefValue>(BasePtr)) {
6943 if (
auto *PT = dyn_cast<PointerType>(GCR.
getType())) {
6947 if (isa<ConstantPointerNull>(DerivedPtr)) {
6962 auto *
F = dyn_cast<Function>(Callee);
6967 ConstantArgs.
reserve(Args.size());
6968 for (
Value *Arg : Args) {
6971 if (isa<MetadataAsValue>(Arg))
6984 assert(Call->arg_size() == Args.size());
6988 if (Call->isMustTailCall())
6993 if (isa<UndefValue>(Callee) || isa<ConstantPointerNull>(Callee))
6999 auto *
F = dyn_cast<Function>(Callee);
7000 if (
F &&
F->isIntrinsic())
7008 assert(isa<ConstrainedFPIntrinsic>(Call));
7027 return ::simplifyFreezeInst(Op0, Q);
7035 if (
auto *PtrOpC = dyn_cast<Constant>(PtrOp))
7041 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer())
7072 unsigned MaxRecurse) {
7073 assert(
I->getFunction() &&
"instruction should be inserted in a function");
7075 "context instruction should be in the same function");
7079 switch (
I->getOpcode()) {
7084 [](
Value *V) { return cast<Constant>(V); });
7088 case Instruction::FNeg:
7090 case Instruction::FAdd:
7093 case Instruction::Add:
7097 case Instruction::FSub:
7100 case Instruction::Sub:
7104 case Instruction::FMul:
7107 case Instruction::Mul:
7111 case Instruction::SDiv:
7115 case Instruction::UDiv:
7119 case Instruction::FDiv:
7122 case Instruction::SRem:
7124 case Instruction::URem:
7126 case Instruction::FRem:
7129 case Instruction::Shl:
7133 case Instruction::LShr:
7137 case Instruction::AShr:
7141 case Instruction::And:
7143 case Instruction::Or:
7145 case Instruction::Xor:
7147 case Instruction::ICmp:
7149 NewOps[1], Q, MaxRecurse);
7150 case Instruction::FCmp:
7152 NewOps[1],
I->getFastMathFlags(), Q, MaxRecurse);
7153 case Instruction::Select:
7155 case Instruction::GetElementPtr: {
7156 auto *GEPI = cast<GetElementPtrInst>(
I);
7158 ArrayRef(NewOps).slice(1), GEPI->getNoWrapFlags(), Q,
7161 case Instruction::InsertValue: {
7166 case Instruction::InsertElement:
7168 case Instruction::ExtractValue: {
7169 auto *EVI = cast<ExtractValueInst>(
I);
7173 case Instruction::ExtractElement:
7175 case Instruction::ShuffleVector: {
7176 auto *SVI = cast<ShuffleVectorInst>(
I);
7178 SVI->getShuffleMask(), SVI->getType(), Q,
7181 case Instruction::PHI:
7183 case Instruction::Call:
7185 cast<CallInst>(
I), NewOps.
back(),
7186 NewOps.
drop_back(1 + cast<CallInst>(
I)->getNumTotalBundleOperands()), Q);
7187 case Instruction::Freeze:
7189#define HANDLE_CAST_INST(num, opc, clas) case Instruction::opc:
7190#include "llvm/IR/Instruction.def"
7191#undef HANDLE_CAST_INST
7194 case Instruction::Alloca:
7197 case Instruction::Load:
7206 "Number of operands should match the instruction!");
7207 return ::simplifyInstructionWithOperands(
I, NewOps, SQ,
RecursionLimit);
7237 bool Simplified =
false;
7244 for (
User *U :
I->users())
7246 Worklist.
insert(cast<Instruction>(U));
7249 I->replaceAllUsesWith(SimpleV);
7251 if (!
I->isEHPad() && !
I->isTerminator() && !
I->mayHaveSideEffects())
7252 I->eraseFromParent();
7264 if (UnsimplifiedUsers)
7265 UnsimplifiedUsers->insert(
I);
7274 for (
User *U :
I->users())
7275 Worklist.
insert(cast<Instruction>(U));
7278 I->replaceAllUsesWith(SimpleV);
7280 if (!
I->isEHPad() && !
I->isTerminator() && !
I->mayHaveSideEffects())
7281 I->eraseFromParent();
7290 assert(
I != SimpleV &&
"replaceAndRecursivelySimplify(X,X) is not valid!");
7291 assert(SimpleV &&
"Must provide a simplified value.");
7299 auto *DT = DTWP ? &DTWP->getDomTree() :
nullptr;
7301 auto *TLI = TLIWP ? &TLIWP->
getTLI(
F) :
nullptr;
7304 return {
F.getDataLayout(), TLI, DT, AC};
7312template <
class T,
class... TArgs>
7315 auto *DT = AM.template getCachedResult<DominatorTreeAnalysis>(
F);
7316 auto *TLI = AM.template getCachedResult<TargetLibraryAnalysis>(
F);
7317 auto *AC = AM.template getCachedResult<AssumptionAnalysis>(
F);
7318 return {
F.getDataLayout(), TLI, DT, AC};
7332void InstSimplifyFolder::anchor() {}
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
static Value * simplifyFreezeInst(Value *Op0, const SimplifyQuery &Q)
Given operands for a Freeze, see if we can fold the result.
static Value * simplifyCmpSelFalseCase(CmpPredicate Pred, Value *LHS, Value *RHS, Value *Cond, const SimplifyQuery &Q, unsigned MaxRecurse)
Simplify comparison with false branch of select.
static Value * simplifyCmpSelCase(CmpPredicate Pred, Value *LHS, Value *RHS, Value *Cond, const SimplifyQuery &Q, unsigned MaxRecurse, Constant *TrueOrFalse)
Simplify comparison with true or false branch of select: sel = select i1 cond, i32 tv,...
static Value * simplifySelectWithFakeICmpEq(Value *CmpLHS, Value *CmpRHS, CmpPredicate Pred, Value *TrueVal, Value *FalseVal)
An alternative way to test if a bit is set or not uses sgt/slt instead of eq/ne.
static Value * simplifyLShrInst(Value *Op0, Value *Op1, bool IsExact, const SimplifyQuery &Q, unsigned MaxRecurse)
Given operands for an LShr, see if we can fold the result.
static Value * simplifyUDivInst(Value *Op0, Value *Op1, bool IsExact, const SimplifyQuery &Q, unsigned MaxRecurse)
Given operands for a UDiv, see if we can fold the result.
static Value * simplifyShuffleVectorInst(Value *Op0, Value *Op1, ArrayRef< int > Mask, Type *RetTy, const SimplifyQuery &Q, unsigned MaxRecurse)
static Value * foldMinMaxSharedOp(Intrinsic::ID IID, Value *Op0, Value *Op1)
Given a min/max intrinsic, see if it can be removed based on having an operand that is another min/ma...
static Value * simplifySubInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW, const SimplifyQuery &Q, unsigned MaxRecurse)
Given operands for a Sub, see if we can fold the result.
static Value * simplifyFCmpInst(CmpPredicate Pred, Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q, unsigned MaxRecurse)
Given operands for an FCmpInst, see if we can fold the result.
static Value * expandCommutativeBinOp(Instruction::BinaryOps Opcode, Value *L, Value *R, Instruction::BinaryOps OpcodeToExpand, const SimplifyQuery &Q, unsigned MaxRecurse)
Try to simplify binops of form "A op (B op' C)" or the commuted variant by distributing op over op'.
static Constant * foldOrCommuteConstant(Instruction::BinaryOps Opcode, Value *&Op0, Value *&Op1, const SimplifyQuery &Q)
static bool haveNonOverlappingStorage(const Value *V1, const Value *V2)
Return true if V1 and V2 are each the base of some distict storage region [V, object_size(V)] which d...
static Constant * foldConstant(Instruction::UnaryOps Opcode, Value *&Op, const SimplifyQuery &Q)
static Value * handleOtherCmpSelSimplifications(Value *TCmp, Value *FCmp, Value *Cond, const SimplifyQuery &Q, unsigned MaxRecurse)
We know comparison with both branches of select can be simplified, but they are not equal.
static Value * threadCmpOverPHI(CmpPredicate Pred, Value *LHS, Value *RHS, const SimplifyQuery &Q, unsigned MaxRecurse)
In the case of a comparison with a PHI instruction, try to simplify the comparison by seeing whether ...
static Constant * propagateNaN(Constant *In)
Try to propagate existing NaN values when possible.
static Value * simplifyICmpOfBools(CmpPredicate Pred, Value *LHS, Value *RHS, const SimplifyQuery &Q)
Fold an icmp when its operands have i1 scalar type.
static Value * simplifyICmpWithBinOpOnLHS(CmpPredicate Pred, BinaryOperator *LBO, Value *RHS, const SimplifyQuery &Q, unsigned MaxRecurse)
static Value * simplifyAShrInst(Value *Op0, Value *Op1, bool IsExact, const SimplifyQuery &Q, unsigned MaxRecurse)
Given operands for an AShr, see if we can fold the result.
static Value * simplifyRelativeLoad(Constant *Ptr, Constant *Offset, const DataLayout &DL)
static Value * simplifyDiv(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1, bool IsExact, const SimplifyQuery &Q, unsigned MaxRecurse)
These are simplifications common to SDiv and UDiv.
static Value * simplifyPHINode(PHINode *PN, ArrayRef< Value * > IncomingValues, const SimplifyQuery &Q)
See if we can fold the given phi. If not, returns null.
static Value * simplifyExtractValueInst(Value *Agg, ArrayRef< unsigned > Idxs, const SimplifyQuery &, unsigned)
Given operands for an ExtractValueInst, see if we can fold the result.
static Value * simplifySelectInst(Value *, Value *, Value *, const SimplifyQuery &, unsigned)
Given operands for a SelectInst, see if we can fold the result.
static Value * simplifyAddInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW, const SimplifyQuery &Q, unsigned MaxRecurse)
Given operands for an Add, see if we can fold the result.
static Value * simplifyUnOp(unsigned, Value *, const SimplifyQuery &, unsigned)
Given the operand for a UnaryOperator, see if we can fold the result.
static bool isSameCompare(Value *V, CmpPredicate Pred, Value *LHS, Value *RHS)
isSameCompare - Is V equivalent to the comparison "LHS Pred RHS"?
static Value * simplifyAndCommutative(Value *Op0, Value *Op1, const SimplifyQuery &Q, unsigned MaxRecurse)
static Value * simplifyInstructionWithOperands(Instruction *I, ArrayRef< Value * > NewOps, const SimplifyQuery &SQ, unsigned MaxRecurse)
See if we can compute a simplified version of this instruction.
static bool isIdempotent(Intrinsic::ID ID)
static std::optional< ConstantRange > getRange(Value *V, const InstrInfoQuery &IIQ)
Helper method to get range from metadata or attribute.
static Value * simplifyAndOrOfICmpsWithCtpop(ICmpInst *Cmp0, ICmpInst *Cmp1, bool IsAnd)
Try to simplify and/or of icmp with ctpop intrinsic.
static Value * simplifyUnsignedRangeCheck(ICmpInst *ZeroICmp, ICmpInst *UnsignedICmp, bool IsAnd, const SimplifyQuery &Q)
Commuted variants are assumed to be handled by calling this function again with the parameters swappe...
static Value * tryConstantFoldCall(CallBase *Call, Value *Callee, ArrayRef< Value * > Args, const SimplifyQuery &Q)
static Value * simplifyICmpInst(CmpPredicate Predicate, Value *LHS, Value *RHS, const SimplifyQuery &Q, unsigned MaxRecurse)
Given operands for an ICmpInst, see if we can fold the result.
static Value * simplifyExtractElementInst(Value *Vec, Value *Idx, const SimplifyQuery &Q, unsigned)
Given operands for an ExtractElementInst, see if we can fold the result.
static Value * simplifyAndOfICmpsWithAdd(ICmpInst *Op0, ICmpInst *Op1, const InstrInfoQuery &IIQ)
static Value * simplifyICmpWithMinMax(CmpPredicate Pred, Value *LHS, Value *RHS, const SimplifyQuery &Q, unsigned MaxRecurse)
simplify integer comparisons where at least one operand of the compare matches an integer min/max idi...
static Value * simplifyCmpSelTrueCase(CmpPredicate Pred, Value *LHS, Value *RHS, Value *Cond, const SimplifyQuery &Q, unsigned MaxRecurse)
Simplify comparison with true branch of select.
static Value * simplifyIntrinsic(CallBase *Call, Value *Callee, ArrayRef< Value * > Args, const SimplifyQuery &Q)
static void getUnsignedMonotonicValues(SmallPtrSetImpl< Value * > &Res, Value *V, MonotonicType Type, unsigned Depth=0)
Get values V_i such that V uge V_i (GreaterEq) or V ule V_i (LowerEq).
static bool isPoisonShift(Value *Amount, const SimplifyQuery &Q)
Returns true if a shift by Amount always yields poison.
static APInt stripAndComputeConstantOffsets(const DataLayout &DL, Value *&V, bool AllowNonInbounds=false)
Compute the base pointer and cumulative constant offsets for V.
static Value * simplifyCmpInst(CmpPredicate, Value *, Value *, const SimplifyQuery &, unsigned)
Given operands for a CmpInst, see if we can fold the result.
static Value * simplifyFMAFMul(Value *Op0, Value *Op1, FastMathFlags FMF, const SimplifyQuery &Q, unsigned MaxRecurse, fp::ExceptionBehavior ExBehavior, RoundingMode Rounding)
static Value * simplifyRightShift(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1, bool IsExact, const SimplifyQuery &Q, unsigned MaxRecurse)
Given operands for an LShr or AShr, see if we can fold the result.
static Value * simplifyICmpWithIntrinsicOnLHS(CmpPredicate Pred, Value *LHS, Value *RHS)
static Value * simplifySDivInst(Value *Op0, Value *Op1, bool IsExact, const SimplifyQuery &Q, unsigned MaxRecurse)
Given operands for an SDiv, see if we can fold the result.
static Value * simplifySelectWithEquivalence(Value *CmpLHS, Value *CmpRHS, Value *TrueVal, Value *FalseVal, const SimplifyQuery &Q, unsigned MaxRecurse)
Try to simplify a select instruction when its condition operand is an integer equality or floating-po...
static Value * simplifyByDomEq(unsigned Opcode, Value *Op0, Value *Op1, const SimplifyQuery &Q, unsigned MaxRecurse)
Test if there is a dominating equivalence condition for the two operands.
static Value * simplifyFPUnOp(unsigned, Value *, const FastMathFlags &, const SimplifyQuery &, unsigned)
Given the operand for a UnaryOperator, see if we can fold the result.
static Value * simplifyICmpWithBinOp(CmpPredicate Pred, Value *LHS, Value *RHS, const SimplifyQuery &Q, unsigned MaxRecurse)
TODO: A large part of this logic is duplicated in InstCombine's foldICmpBinOp().
static Value * simplifyFAddInst(Value *Op0, Value *Op1, FastMathFlags FMF, const SimplifyQuery &Q, unsigned MaxRecurse, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
Given operands for an FAdd, see if we can fold the result.
static Value * simplifyOrOfICmps(ICmpInst *Op0, ICmpInst *Op1, const SimplifyQuery &Q)
static Value * expandBinOp(Instruction::BinaryOps Opcode, Value *V, Value *OtherOp, Instruction::BinaryOps OpcodeToExpand, const SimplifyQuery &Q, unsigned MaxRecurse)
Try to simplify a binary operator of form "V op OtherOp" where V is "(B0 opex B1)" by distributing 'o...
static Value * simplifyICmpWithZero(CmpPredicate Pred, Value *LHS, Value *RHS, const SimplifyQuery &Q)
Try hard to fold icmp with zero RHS because this is a common case.
static Value * simplifyICmpWithConstant(CmpPredicate Pred, Value *LHS, Value *RHS, const InstrInfoQuery &IIQ)
static Value * simplifySelectWithFCmp(Value *Cond, Value *T, Value *F, const SimplifyQuery &Q, unsigned MaxRecurse)
Try to simplify a select instruction when its condition operand is a floating-point comparison.
static Constant * getFalse(Type *Ty)
For a boolean type or a vector of boolean type, return false or a vector with every element false.
static Value * simplifyDivRem(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1, const SimplifyQuery &Q, unsigned MaxRecurse)
Check for common or similar folds of integer division or integer remainder.
static bool removesFPFraction(Intrinsic::ID ID)
Return true if the intrinsic rounds a floating-point value to an integral floating-point value (not a...
static Value * simplifyFDivInst(Value *Op0, Value *Op1, FastMathFlags FMF, const SimplifyQuery &Q, unsigned, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
static Value * simplifyOrOfICmpsWithAdd(ICmpInst *Op0, ICmpInst *Op1, const InstrInfoQuery &IIQ)
static Value * simplifyMulInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW, const SimplifyQuery &Q, unsigned MaxRecurse)
Given operands for a Mul, see if we can fold the result.
static Value * simplifyFNegInst(Value *Op, FastMathFlags FMF, const SimplifyQuery &Q, unsigned MaxRecurse)
Given the operand for an FNeg, see if we can fold the result.
static Value * simplifyOrInst(Value *, Value *, const SimplifyQuery &, unsigned)
Given operands for an Or, see if we can fold the result.
static bool trySimplifyICmpWithAdds(CmpPredicate Pred, Value *LHS, Value *RHS, const InstrInfoQuery &IIQ)
static Value * simplifySelectBitTest(Value *TrueVal, Value *FalseVal, Value *X, const APInt *Y, bool TrueWhenUnset)
Try to simplify a select instruction when its condition operand is an integer comparison where one op...
static Value * simplifyAssociativeBinOp(Instruction::BinaryOps Opcode, Value *LHS, Value *RHS, const SimplifyQuery &Q, unsigned MaxRecurse)
Generic simplifications for associative binary operations.
static Value * simplifyShlInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW, const SimplifyQuery &Q, unsigned MaxRecurse)
Given operands for an Shl, see if we can fold the result.
static Value * threadBinOpOverPHI(Instruction::BinaryOps Opcode, Value *LHS, Value *RHS, const SimplifyQuery &Q, unsigned MaxRecurse)
In the case of a binary operation with an operand that is a PHI instruction, try to simplify the bino...
static Value * simplifyCmpSelOfMaxMin(Value *CmpLHS, Value *CmpRHS, CmpPredicate Pred, Value *TVal, Value *FVal)
static Value * simplifyFRemInst(Value *Op0, Value *Op1, FastMathFlags FMF, const SimplifyQuery &Q, unsigned, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
static Value * simplifyFSubInst(Value *Op0, Value *Op1, FastMathFlags FMF, const SimplifyQuery &Q, unsigned MaxRecurse, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
Given operands for an FSub, see if we can fold the result.
static Value * simplifyXorInst(Value *, Value *, const SimplifyQuery &, unsigned)
Given operands for a Xor, see if we can fold the result.
static Value * simplifyURemInst(Value *Op0, Value *Op1, const SimplifyQuery &Q, unsigned MaxRecurse)
Given operands for a URem, see if we can fold the result.
static Constant * simplifyFPOp(ArrayRef< Value * > Ops, FastMathFlags FMF, const SimplifyQuery &Q, fp::ExceptionBehavior ExBehavior, RoundingMode Rounding)
Perform folds that are common to any floating-point operation.
static Value * threadCmpOverSelect(CmpPredicate Pred, Value *LHS, Value *RHS, const SimplifyQuery &Q, unsigned MaxRecurse)
In the case of a comparison with a select instruction, try to simplify the comparison by seeing wheth...
static bool replaceAndRecursivelySimplifyImpl(Instruction *I, Value *SimpleV, const TargetLibraryInfo *TLI, const DominatorTree *DT, AssumptionCache *AC, SmallSetVector< Instruction *, 8 > *UnsimplifiedUsers=nullptr)
Implementation of recursive simplification through an instruction's uses.
static bool isAllocDisjoint(const Value *V)
Return true if the underlying object (storage) must be disjoint from storage returned by any noalias ...
static Constant * getTrue(Type *Ty)
For a boolean type or a vector of boolean type, return true or a vector with every element true.
static Value * simplifyGEPInst(Type *, Value *, ArrayRef< Value * >, GEPNoWrapFlags, const SimplifyQuery &, unsigned)
Given operands for an GetElementPtrInst, see if we can fold the result.
static bool isDivZero(Value *X, Value *Y, const SimplifyQuery &Q, unsigned MaxRecurse, bool IsSigned)
Return true if we can simplify X / Y to 0.
static Value * simplifyLdexp(Value *Op0, Value *Op1, const SimplifyQuery &Q, bool IsStrict)
static Value * simplifyLogicOfAddSub(Value *Op0, Value *Op1, Instruction::BinaryOps Opcode)
Given a bitwise logic op, check if the operands are add/sub with a common source value and inverted c...
static Value * simplifyOrLogic(Value *X, Value *Y)
static Type * getCompareTy(Value *Op)
static Value * simplifyCastInst(unsigned, Value *, Type *, const SimplifyQuery &, unsigned)
static Value * simplifyAndOfICmps(ICmpInst *Op0, ICmpInst *Op1, const SimplifyQuery &Q)
static Value * simplifyBinOp(unsigned, Value *, Value *, const SimplifyQuery &, unsigned)
Given operands for a BinaryOperator, see if we can fold the result.
static bool isICmpTrue(CmpPredicate Pred, Value *LHS, Value *RHS, const SimplifyQuery &Q, unsigned MaxRecurse)
Given a predicate and two operands, return true if the comparison is true.
static Value * simplifyInsertValueInst(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const SimplifyQuery &Q, unsigned)
Given operands for an InsertValueInst, see if we can fold the result.
static Value * simplifyAndInst(Value *, Value *, const SimplifyQuery &, unsigned)
Given operands for an And, see if we can fold the result.
static Value * foldIdentityShuffles(int DestElt, Value *Op0, Value *Op1, int MaskVal, Value *RootVec, unsigned MaxRecurse)
For the given destination element of a shuffle, peek through shuffles to match a root vector source o...
static Value * simplifyAndOrOfFCmps(const SimplifyQuery &Q, FCmpInst *LHS, FCmpInst *RHS, bool IsAnd)
static Value * extractEquivalentCondition(Value *V, CmpPredicate Pred, Value *LHS, Value *RHS)
Rummage around inside V looking for something equivalent to the comparison "LHS Pred RHS".
static Value * simplifyAndOrOfCmps(const SimplifyQuery &Q, Value *Op0, Value *Op1, bool IsAnd)
static Value * simplifyWithOpReplaced(Value *V, Value *Op, Value *RepOp, const SimplifyQuery &Q, bool AllowRefinement, SmallVectorImpl< Instruction * > *DropFlags, unsigned MaxRecurse)
static Value * threadBinOpOverSelect(Instruction::BinaryOps Opcode, Value *LHS, Value *RHS, const SimplifyQuery &Q, unsigned MaxRecurse)
In the case of a binary operation with a select instruction as an operand, try to simplify the binop ...
static Value * simplifyICmpUsingMonotonicValues(CmpPredicate Pred, Value *LHS, Value *RHS)
static Constant * computePointerDifference(const DataLayout &DL, Value *LHS, Value *RHS)
Compute the constant difference between two pointer values.
static Value * simplifySRemInst(Value *Op0, Value *Op1, const SimplifyQuery &Q, unsigned MaxRecurse)
Given operands for an SRem, see if we can fold the result.
static Value * simplifyFMulInst(Value *Op0, Value *Op1, FastMathFlags FMF, const SimplifyQuery &Q, unsigned MaxRecurse, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
Given the operands for an FMul, see if we can fold the result.
static Value * simplifyAndOrOfICmpsWithConstants(ICmpInst *Cmp0, ICmpInst *Cmp1, bool IsAnd)
Test if a pair of compares with a shared operand and 2 constants has an empty set intersection,...
static Value * simplifyAndOrWithICmpEq(unsigned Opcode, Value *Op0, Value *Op1, const SimplifyQuery &Q, unsigned MaxRecurse)
static Value * simplifyICmpWithDominatingAssume(CmpPredicate Predicate, Value *LHS, Value *RHS, const SimplifyQuery &Q)
static Value * simplifyShift(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1, bool IsNSW, const SimplifyQuery &Q, unsigned MaxRecurse)
Given operands for an Shl, LShr or AShr, see if we can fold the result.
static Constant * computePointerICmp(CmpPredicate Pred, Value *LHS, Value *RHS, const SimplifyQuery &Q)
static Value * simplifyRem(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1, const SimplifyQuery &Q, unsigned MaxRecurse)
These are simplifications common to SRem and URem.
static bool valueDominatesPHI(Value *V, PHINode *P, const DominatorTree *DT)
Does the given value dominate the specified phi node?
static Value * simplifySelectWithICmpCond(Value *CondVal, Value *TrueVal, Value *FalseVal, const SimplifyQuery &Q, unsigned MaxRecurse)
Try to simplify a select instruction when its condition operand is an integer comparison.
static Value * foldMinimumMaximumSharedOp(Intrinsic::ID IID, Value *Op0, Value *Op1)
Given a min/max intrinsic, see if it can be removed based on having an operand that is another min/ma...
static Value * simplifyUnaryIntrinsic(Function *F, Value *Op0, const SimplifyQuery &Q, const CallBase *Call)
This header provides classes for managing per-loop analyses.
uint64_t IntrinsicInst * II
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file implements a set that has insertion order iteration characteristics.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static SymbolRef::Type getType(const Symbol *Sym)
static const uint32_t IV[8]
Class for arbitrary precision integers.
APInt zextOrTrunc(unsigned width) const
Zero extend or truncate to width.
unsigned getActiveBits() const
Compute the number of active bits in the value.
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
APInt urem(const APInt &RHS) const
Unsigned remainder operation.
void setSignBit()
Set the sign bit to 1.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool ult(const APInt &RHS) const
Unsigned less than comparison.
bool intersects(const APInt &RHS) const
This operation tests if there are any pairs of corresponding bits between this APInt and RHS that are...
unsigned countr_zero() const
Count the number of trailing zero bits.
bool isNonPositive() const
Determine if this APInt Value is non-positive (<= 0).
APInt sextOrTrunc(unsigned width) const
Sign extend or truncate to width.
bool isStrictlyPositive() const
Determine if this APInt Value is positive.
uint64_t getLimitedValue(uint64_t Limit=UINT64_MAX) const
If this value is smaller than the specified limit, return it, otherwise return the limit value.
bool getBoolValue() const
Convert APInt to a boolean value.
APInt srem(const APInt &RHS) const
Function for signed remainder operation.
bool isMask(unsigned numBits) const
bool isMaxSignedValue() const
Determine if this is the largest signed value.
bool isNonNegative() const
Determine if this APInt Value is non-negative (>= 0)
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
bool isSubsetOf(const APInt &RHS) const
This operation checks that all bits set in this APInt are also set in RHS.
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
bool isSignBitSet() const
Determine if sign bit of this APInt is set.
bool slt(const APInt &RHS) const
Signed less than comparison.
static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)
Constructs an APInt value that has the top hiBitsSet bits set.
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
bool isOne() const
Determine if this is a value of 1.
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
an instruction to allocate memory on the stack
A container for analyses that lazily runs them and caches their results.
This class represents an incoming formal argument to a Function.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
const T & back() const
back - Get the last element.
size_t size() const
size - Get the array size.
ArrayRef< T > drop_back(size_t N=1) const
Drop the last N elements of the array.
bool empty() const
empty - Check if the array is empty.
ArrayRef< T > slice(size_t N, size_t M) const
slice(n, m) - Chop off the first N elements of the array, and keep M elements in the array.
An immutable pass that tracks lazily created AssumptionCache objects.
AssumptionCache & getAssumptionCache(Function &F)
Get the cached assumptions for a function.
A cache of @llvm.assume calls within a function.
MutableArrayRef< ResultElem > assumptionsFor(const Value *V)
Access the list of assumptions which affect this value.
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
BinaryOps getOpcode() const
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
This class represents a function call, abstracting a target machine's calling convention.
static unsigned isEliminableCastPair(Instruction::CastOps firstOpcode, Instruction::CastOps secondOpcode, Type *SrcTy, Type *MidTy, Type *DstTy, Type *SrcIntPtrTy, Type *MidIntPtrTy, Type *DstIntPtrTy)
Determine how a pair of casts can be eliminated, if they can be at all.
This class is the base class for the comparison instructions.
static Type * makeCmpResultType(Type *opnd_type)
Create a result type for fcmp/icmp.
Predicate getStrictPredicate() const
For example, SGE -> SGT, SLE -> SLT, ULE -> ULT, UGE -> UGT.
bool isFalseWhenEqual() const
This is just a convenience.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ ICMP_UGE
unsigned greater or equal
@ ICMP_UGT
unsigned greater than
@ ICMP_SGT
signed greater than
@ ICMP_ULT
unsigned less than
@ ICMP_SGE
signed greater or equal
@ ICMP_ULE
unsigned less or equal
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
bool isTrueWhenEqual() const
This is just a convenience.
bool isFPPredicate() const
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Predicate getPredicate() const
Return the predicate for this instruction.
static bool isUnordered(Predicate predicate)
Determine if the predicate is an unordered operation.
bool isIntPredicate() const
An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...
static Constant * getIntToPtr(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static Constant * getExtractElement(Constant *Vec, Constant *Idx, Type *OnlyIfReducedTy=nullptr)
static Constant * getBinOpAbsorber(unsigned Opcode, Type *Ty, bool AllowLHSConstant=false)
Return the absorbing element for the given binary operation, i.e.
static Constant * getNot(Constant *C)
static Constant * getInsertElement(Constant *Vec, Constant *Elt, Constant *Idx, Type *OnlyIfReducedTy=nullptr)
static Constant * getShuffleVector(Constant *V1, Constant *V2, ArrayRef< int > Mask, Type *OnlyIfReducedTy=nullptr)
static bool isSupportedGetElementPtr(const Type *SrcElemTy)
Whether creating a constant expression for this getelementptr type is supported.
static Constant * getGetElementPtr(Type *Ty, Constant *C, ArrayRef< Constant * > IdxList, GEPNoWrapFlags NW=GEPNoWrapFlags::none(), std::optional< ConstantRange > InRange=std::nullopt, Type *OnlyIfReducedTy=nullptr)
Getelementptr form.
static Constant * getBinOpIdentity(unsigned Opcode, Type *Ty, bool AllowRHSConstant=false, bool NSZ=false)
Return the identity constant for a binary opcode.
static Constant * getZero(Type *Ty, bool Negative=false)
static Constant * getNegativeZero(Type *Ty)
static Constant * getNaN(Type *Ty, bool Negative=false, uint64_t Payload=0)
This is the shared class of boolean and integer constants.
static ConstantInt * getTrue(LLVMContext &Context)
static ConstantInt * getSigned(IntegerType *Ty, int64_t V)
Return a ConstantInt with the specified value for the specified type.
static ConstantInt * getFalse(LLVMContext &Context)
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
static ConstantInt * getBool(LLVMContext &Context, bool V)
static ConstantPointerNull * get(PointerType *T)
Static factory methods - Return objects of the specified value.
This class represents a range of values.
const APInt * getSingleElement() const
If this set contains a single element, return it, otherwise return null.
bool isFullSet() const
Return true if this set contains all of the elements possible for this data-type.
bool isEmptySet() const
Return true if this set contains no members.
static ConstantRange makeExactICmpRegion(CmpInst::Predicate Pred, const APInt &Other)
Produce the exact range such that all values in the returned range satisfy the given predicate with a...
ConstantRange inverse() const
Return a new range that is the logical not of the current set.
bool contains(const APInt &Val) const
Return true if the specified value is in the set.
static Constant * get(StructType *T, ArrayRef< Constant * > V)
static Constant * getSplat(ElementCount EC, Constant *Elt)
Return a ConstantVector with the specified constant in each element.
static Constant * get(ArrayRef< Constant * > V)
This is an important base class in LLVM.
static Constant * getAllOnesValue(Type *Ty)
bool isAllOnesValue() const
Return true if this is the value that would be returned by getAllOnesValue.
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
bool isNaN() const
Return true if this is a floating-point NaN constant or a vector floating-point constant with all NaN...
Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
unsigned getPointerSizeInBits(unsigned AS=0) const
Layout pointer size, in bits FIXME: The defaults need to be removed once all of the backends/clients ...
IntegerType * getIntPtrType(LLVMContext &C, unsigned AddressSpace=0) const
Returns an integer type with size at least as big as that of a pointer in the given address space.
unsigned getIndexTypeSizeInBits(Type *Ty) const
Layout size of the index used in GEP calculation.
IntegerType * getIndexType(LLVMContext &C, unsigned AddressSpace) const
Returns the type of a GEP index in AddressSpace.
TypeSize getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
unsigned getIndexSizeInBits(unsigned AS) const
Size in bits of index used for address calculation in getelementptr.
TypeSize getTypeSizeInBits(Type *Ty) const
Size examples:
Legacy analysis pass which computes a DominatorTree.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
This instruction compares its operands according to the predicate given to the constructor.
Convenience struct for specifying and reasoning about fast-math flags.
bool noSignedZeros() const
bool allowReassoc() const
Flag queries.
Represents calls to the gc.relocate intrinsic.
Value * getBasePtr() const
Value * getDerivedPtr() const
Represents flags for the getelementptr instruction/expression.
static Type * getIndexedType(Type *Ty, ArrayRef< Value * > IdxList)
Returns the result type of a getelementptr with the given source element type and indexes.
This instruction compares its operands according to the predicate given to the constructor.
static bool compare(const APInt &LHS, const APInt &RHS, ICmpInst::Predicate Pred)
Return result of LHS Pred RHS comparison.
Predicate getSignedPredicate() const
For example, EQ->EQ, SLE->SLE, UGT->SGT, etc.
bool isEquality() const
Return true if this predicate is either EQ or NE.
bool isRelational() const
Return true if the predicate is relational (not EQ or NE).
Predicate getUnsignedPredicate() const
For example, EQ->EQ, SLE->ULE, UGT->UGT, etc.
This instruction inserts a struct field of array element value into an aggregate value.
bool hasNoSignedZeros() const LLVM_READONLY
Determine whether the no-signed-zeros flag is set.
bool isAssociative() const LLVM_READONLY
Return true if the instruction is associative:
bool isCommutative() const LLVM_READONLY
Return true if the instruction is commutative:
const Function * getFunction() const
Return the function this instruction belongs to.
An instruction for reading from memory.
bool isVolatile() const
Return true if this is a load from a volatile memory location.
static APInt getSaturationPoint(Intrinsic::ID ID, unsigned numBits)
Min/max intrinsics are monotonic, they operate on a fixed-bitwidth values, so there is a certain thre...
ICmpInst::Predicate getPredicate() const
Returns the comparison predicate underlying the intrinsic.
op_range incoming_values()
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
Pass interface - Implemented by all 'passes'.
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
This class represents a cast from a pointer to an integer.
This class represents a sign extension of integer types.
This class represents the LLVM 'select' instruction.
size_type size() const
Determine the number of elements in the SetVector.
bool insert(const value_type &X)
Insert a new element into the SetVector.
static void commuteShuffleMask(MutableArrayRef< int > Mask, unsigned InVecNumElts)
Change values in a shuffle permute mask assuming the two vector operands of length InVecNumElts have ...
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
A SetVector that performs no allocations if smaller than a certain size.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void assign(size_type NumElts, ValueParamT Elt)
void reserve(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
TargetLibraryInfo & getTLI(const Function &F)
Provides information about what library functions are available for the current target.
The instances of the Type class are immutable: once they are created, they are never changed.
bool isVectorTy() const
True if this is an instance of VectorType.
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
bool isPointerTy() const
True if this is an instance of PointerType.
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this is a type whose size is a known multiple of vscale.
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
static IntegerType * getInt32Ty(LLVMContext &C)
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
static UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
A Use represents the edge between a Value definition and its users.
Value * getOperand(unsigned i) const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
const Value * stripAndAccumulateConstantOffsets(const DataLayout &DL, APInt &Offset, bool AllowNonInbounds, bool AllowInvariantGroup=false, function_ref< bool(Value &Value, APInt &Offset)> ExternalAnalysis=nullptr) const
Accumulate the constant offset this value has compared to a base pointer.
LLVMContext & getContext() const
All values hold a context through their type.
This class represents zero extension of integer types.
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
cst_pred_ty< is_all_ones > m_AllOnes()
Match an integer or vector with all bits set.
class_match< PoisonValue > m_Poison()
Match an arbitrary poison constant.
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
PtrAdd_match< PointerOpTy, OffsetOpTy > m_PtrAdd(const PointerOpTy &PointerOp, const OffsetOpTy &OffsetOp)
Matches GEP with i8 source element type.
cst_pred_ty< is_negative > m_Negative()
Match an integer or vector of negative values.
BinaryOp_match< LHS, RHS, Instruction::Add > m_Add(const LHS &L, const RHS &R)
class_match< BinaryOperator > m_BinOp()
Match an arbitrary binary operation and ignore it.
CmpClass_match< LHS, RHS, FCmpInst > m_FCmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::FMul, true > m_c_FMul(const LHS &L, const RHS &R)
Matches FMul with LHS and RHS in either order.
cst_pred_ty< is_sign_mask > m_SignMask()
Match an integer or vector with only the sign bit(s) set.
BinaryOp_match< LHS, RHS, Instruction::AShr > m_AShr(const LHS &L, const RHS &R)
cstfp_pred_ty< is_inf > m_Inf()
Match a positive or negative infinity FP constant.
m_Intrinsic_Ty< Opnd0 >::Ty m_BitReverse(const Opnd0 &Op0)
BinaryOp_match< LHS, RHS, Instruction::FSub > m_FSub(const LHS &L, const RHS &R)
cst_pred_ty< is_power2 > m_Power2()
Match an integer or vector power-of-2.
BinaryOp_match< cstfp_pred_ty< is_any_zero_fp >, RHS, Instruction::FSub > m_FNegNSZ(const RHS &X)
Match 'fneg X' as 'fsub +-0.0, X'.
BinaryOp_match< LHS, RHS, Instruction::URem > m_URem(const LHS &L, const RHS &R)
class_match< Constant > m_Constant()
Match an arbitrary Constant and ignore it.
BinaryOp_match< LHS, RHS, Instruction::And, true > m_c_And(const LHS &L, const RHS &R)
Matches an And with LHS and RHS in either order.
CastInst_match< OpTy, TruncInst > m_Trunc(const OpTy &Op)
Matches Trunc.
BinaryOp_match< LHS, RHS, Instruction::Xor > m_Xor(const LHS &L, const RHS &R)
specific_intval< false > m_SpecificInt(const APInt &V)
Match a specific integer value or vector with all elements equal to the value.
bool match(Val *V, const Pattern &P)
BinOpPred_match< LHS, RHS, is_idiv_op > m_IDiv(const LHS &L, const RHS &R)
Matches integer division operations.
cstfp_pred_ty< is_any_zero_fp > m_AnyZeroFP()
Match a floating-point negative zero or positive zero.
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
BinOpPred_match< LHS, RHS, is_right_shift_op > m_Shr(const LHS &L, const RHS &R)
Matches logical shift operations.
CmpClass_match< LHS, RHS, ICmpInst, true > m_c_ICmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
Matches an ICmp with a predicate over LHS and RHS in either order.
TwoOps_match< Val_t, Idx_t, Instruction::ExtractElement > m_ExtractElt(const Val_t &Val, const Idx_t &Idx)
Matches ExtractElementInst.
class_match< ConstantInt > m_ConstantInt()
Match an arbitrary ConstantInt and ignore it.
cst_pred_ty< is_one > m_One()
Match an integer 1 or a vector with all elements equal to 1.
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
cstfp_pred_ty< is_neg_zero_fp > m_NegZeroFP()
Match a floating-point negative zero.
specific_fpval m_SpecificFP(double V)
Match a specific floating point value or vector with all elements equal to the value.
match_combine_and< LTy, RTy > m_CombineAnd(const LTy &L, const RTy &R)
Combine two pattern matchers matching L && R.
MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty > m_SMin(const LHS &L, const RHS &R)
m_Intrinsic_Ty< Opnd0 >::Ty m_Sqrt(const Opnd0 &Op0)
BinaryOp_match< LHS, RHS, Instruction::Xor, true > m_c_Xor(const LHS &L, const RHS &R)
Matches an Xor with LHS and RHS in either order.
BinaryOp_match< LHS, RHS, Instruction::Mul > m_Mul(const LHS &L, const RHS &R)
deferredval_ty< Value > m_Deferred(Value *const &V)
Like m_Specific(), but works if the specific value to match is determined as part of the same match()...
cst_pred_ty< is_zero_int > m_ZeroInt()
Match an integer 0 or a vector with all elements equal to 0.
apint_match m_APIntAllowPoison(const APInt *&Res)
Match APInt while allowing poison in splat vector constants.
BinaryOp_match< cst_pred_ty< is_zero_int >, ValTy, Instruction::Sub > m_Neg(const ValTy &V)
Matches a 'Neg' as 'sub 0, V'.
match_combine_and< class_match< Constant >, match_unless< constantexpr_match > > m_ImmConstant()
Match an arbitrary immediate Constant and ignore it.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Shl, OverflowingBinaryOperator::NoSignedWrap > m_NSWShl(const LHS &L, const RHS &R)
CastInst_match< OpTy, ZExtInst > m_ZExt(const OpTy &Op)
Matches ZExt.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Shl, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWShl(const LHS &L, const RHS &R)
OverflowingBinaryOp_match< LHS, RHS, Instruction::Mul, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWMul(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::UDiv > m_UDiv(const LHS &L, const RHS &R)
MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty > m_UMax(const LHS &L, const RHS &R)
cst_pred_ty< custom_checkfn< APInt > > m_CheckedInt(function_ref< bool(const APInt &)> CheckFn)
Match an integer or vector where CheckFn(ele) for each element is true.
specific_fpval m_FPOne()
Match a float 1.0 or vector with all elements equal to 1.0.
BinaryOp_match< LHS, RHS, Instruction::Add, true > m_c_Add(const LHS &L, const RHS &R)
Matches a Add with LHS and RHS in either order.
apfloat_match m_APFloatAllowPoison(const APFloat *&Res)
Match APFloat while allowing poison in splat vector constants.
CastInst_match< OpTy, UIToFPInst > m_UIToFP(const OpTy &Op)
m_Intrinsic_Ty< Opnd0, Opnd1, Opnd2 >::Ty m_FShl(const Opnd0 &Op0, const Opnd1 &Op1, const Opnd2 &Op2)
match_combine_or< match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty, true >, MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty, true > >, match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty, true >, MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty, true > > > m_c_MaxOrMin(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::SDiv > m_SDiv(const LHS &L, const RHS &R)
MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty > m_SMax(const LHS &L, const RHS &R)
apint_match m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoSignedWrap > m_NSWAdd(const LHS &L, const RHS &R)
CastInst_match< OpTy, SIToFPInst > m_SIToFP(const OpTy &Op)
BinaryOp_match< LHS, RHS, Instruction::LShr > m_LShr(const LHS &L, const RHS &R)
CmpClass_match< LHS, RHS, ICmpInst > m_ICmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
Exact_match< T > m_Exact(const T &SubPattern)
FNeg_match< OpTy > m_FNeg(const OpTy &X)
Match 'fneg X' as 'fsub -0.0, X'.
cstfp_pred_ty< is_pos_zero_fp > m_PosZeroFP()
Match a floating-point positive zero.
BinaryOp_match< LHS, RHS, Instruction::FAdd, true > m_c_FAdd(const LHS &L, const RHS &R)
Matches FAdd with LHS and RHS in either order.
LogicalOp_match< LHS, RHS, Instruction::And, true > m_c_LogicalAnd(const LHS &L, const RHS &R)
Matches L && R with LHS and RHS in either order.
BinaryOp_match< LHS, RHS, Instruction::Shl > m_Shl(const LHS &L, const RHS &R)
m_Intrinsic_Ty< Opnd0 >::Ty m_VecReverse(const Opnd0 &Op0)
apfloat_match m_APFloat(const APFloat *&Res)
Match a ConstantFP or splatted ConstantVector, binding the specified pointer to the contained APFloat...
match_combine_or< match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty >, MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty > >, match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty >, MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty > > > m_MaxOrMin(const LHS &L, const RHS &R)
m_Intrinsic_Ty< Opnd0, Opnd1, Opnd2 >::Ty m_FShr(const Opnd0 &Op0, const Opnd1 &Op1, const Opnd2 &Op2)
BinaryOp_match< LHS, RHS, Instruction::SRem > m_SRem(const LHS &L, const RHS &R)
auto m_Undef()
Match an arbitrary undef constant.
cstfp_pred_ty< is_nan > m_NaN()
Match an arbitrary NaN constant.
BinaryOp_match< cst_pred_ty< is_all_ones >, ValTy, Instruction::Xor, true > m_Not(const ValTy &V)
Matches a 'Not' as 'xor V, -1' or 'xor -1, V'.
BinaryOp_match< LHS, RHS, Instruction::Or > m_Or(const LHS &L, const RHS &R)
m_Intrinsic_Ty< Opnd0 >::Ty m_BSwap(const Opnd0 &Op0)
CastInst_match< OpTy, SExtInst > m_SExt(const OpTy &Op)
Matches SExt.
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
BinaryOp_match< LHS, RHS, Instruction::Or, true > m_c_Or(const LHS &L, const RHS &R)
Matches an Or with LHS and RHS in either order.
LogicalOp_match< LHS, RHS, Instruction::Or, true > m_c_LogicalOr(const LHS &L, const RHS &R)
Matches L || R with LHS and RHS in either order.
ThreeOps_match< Val_t, Elt_t, Idx_t, Instruction::InsertElement > m_InsertElt(const Val_t &Val, const Elt_t &Elt, const Idx_t &Idx)
Matches InsertElementInst.
ElementWiseBitCast_match< OpTy > m_ElementWiseBitCast(const OpTy &Op)
m_Intrinsic_Ty< Opnd0 >::Ty m_FAbs(const Opnd0 &Op0)
BinaryOp_match< LHS, RHS, Instruction::Mul, true > m_c_Mul(const LHS &L, const RHS &R)
Matches a Mul with LHS and RHS in either order.
CastOperator_match< OpTy, Instruction::PtrToInt > m_PtrToInt(const OpTy &Op)
Matches PtrToInt.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Mul, OverflowingBinaryOperator::NoSignedWrap > m_NSWMul(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::Sub > m_Sub(const LHS &L, const RHS &R)
MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty > m_UMin(const LHS &L, const RHS &R)
match_combine_or< LTy, RTy > m_CombineOr(const LTy &L, const RTy &R)
Combine two pattern matchers matching L || R.
ExceptionBehavior
Exception behavior used for floating point operations.
@ ebStrict
This corresponds to "fpexcept.strict".
@ ebIgnore
This corresponds to "fpexcept.ignore".
This is an optimization pass for GlobalISel generic memory operations.
Intrinsic::ID getInverseMinMaxIntrinsic(Intrinsic::ID MinMaxID)
Value * simplifyAShrInst(Value *Op0, Value *Op1, bool IsExact, const SimplifyQuery &Q)
Given operands for a AShr, fold the result or return nulll.
unsigned Log2_32_Ceil(uint32_t Value)
Return the ceil log base 2 of the specified value, 32 if the value is zero.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Value * simplifyFMulInst(Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
Given operands for an FMul, fold the result or return null.
Value * simplifyGEPInst(Type *SrcTy, Value *Ptr, ArrayRef< Value * > Indices, GEPNoWrapFlags NW, const SimplifyQuery &Q)
Given operands for a GetElementPtrInst, fold the result or return null.
bool isValidAssumeForContext(const Instruction *I, const Instruction *CxtI, const DominatorTree *DT=nullptr, bool AllowEphemerals=false)
Return true if it is valid to use the assumptions provided by an assume intrinsic,...
bool canCreatePoison(const Operator *Op, bool ConsiderFlagsAndMetadata=true)
Constant * ConstantFoldSelectInstruction(Constant *Cond, Constant *V1, Constant *V2)
Attempt to constant fold a select instruction with the specified operands.
Value * simplifyFreezeInst(Value *Op, const SimplifyQuery &Q)
Given an operand for a Freeze, see if we can fold the result.
Constant * ConstantFoldFPInstOperands(unsigned Opcode, Constant *LHS, Constant *RHS, const DataLayout &DL, const Instruction *I, bool AllowNonDeterministic=true)
Attempt to constant fold a floating point binary operation with the specified operands,...
bool isSignBitCheck(ICmpInst::Predicate Pred, const APInt &RHS, bool &TrueIfSigned)
Given an exploded icmp instruction, return true if the comparison only checks the sign bit.
bool canConstantFoldCallTo(const CallBase *Call, const Function *F)
canConstantFoldCallTo - Return true if its even possible to fold a call to the specified function.
APInt getMinMaxLimit(SelectPatternFlavor SPF, unsigned BitWidth)
Return the minimum or maximum constant value for the specified integer min/max flavor and type.
Value * simplifySDivInst(Value *LHS, Value *RHS, bool IsExact, const SimplifyQuery &Q)
Given operands for an SDiv, fold the result or return null.
Value * simplifyUnOp(unsigned Opcode, Value *Op, const SimplifyQuery &Q)
Given operand for a UnaryOperator, fold the result or return null.
bool isDefaultFPEnvironment(fp::ExceptionBehavior EB, RoundingMode RM)
Returns true if the exception handling behavior and rounding mode match what is used in the default f...
Value * simplifyMulInst(Value *LHS, Value *RHS, bool IsNSW, bool IsNUW, const SimplifyQuery &Q)
Given operands for a Mul, fold the result or return null.
bool IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV, APInt &Offset, const DataLayout &DL, DSOLocalEquivalent **DSOEquiv=nullptr)
If this constant is a constant offset from a global, return the global and the constant.
Value * simplifyInstructionWithOperands(Instruction *I, ArrayRef< Value * > NewOps, const SimplifyQuery &Q)
Like simplifyInstruction but the operands of I are replaced with NewOps.
const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=6)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
Value * simplifyCall(CallBase *Call, Value *Callee, ArrayRef< Value * > Args, const SimplifyQuery &Q)
Given a callsite, callee, and arguments, fold the result or return null.
Constant * ConstantFoldCompareInstOperands(unsigned Predicate, Constant *LHS, Constant *RHS, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const Instruction *I=nullptr)
Attempt to constant fold a compare instruction (icmp/fcmp) with the specified operands.
bool isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL, bool OrZero=false, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return true if the given value is known to have exactly one bit set when defined.
bool canRoundingModeBe(RoundingMode RM, RoundingMode QRM)
Returns true if the rounding mode RM may be QRM at compile time or at run time.
bool isNoAliasCall(const Value *V)
Return true if this pointer is returned by a noalias function.
Value * simplifyFCmpInst(CmpPredicate Predicate, Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q)
Given operands for an FCmpInst, fold the result or return null.
Value * getSplatValue(const Value *V)
Get splat value if the input is a splat vector or return nullptr.
Constant * ConstantFoldGetElementPtr(Type *Ty, Constant *C, std::optional< ConstantRange > InRange, ArrayRef< Value * > Idxs)
CmpInst::Predicate getMinMaxPred(SelectPatternFlavor SPF, bool Ordered=false)
Return the canonical comparison predicate for the specified minimum/maximum flavor.
Value * simplifyShuffleVectorInst(Value *Op0, Value *Op1, ArrayRef< int > Mask, Type *RetTy, const SimplifyQuery &Q)
Given operands for a ShuffleVectorInst, fold the result or return null.
Constant * ConstantFoldCall(const CallBase *Call, Function *F, ArrayRef< Constant * > Operands, const TargetLibraryInfo *TLI=nullptr, bool AllowNonDeterministic=true)
ConstantFoldCall - Attempt to constant fold a call to the specified function with the specified argum...
Value * simplifyOrInst(Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for an Or, fold the result or return null.
Value * simplifyXorInst(Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for an Xor, fold the result or return null.
ConstantRange getConstantRangeFromMetadata(const MDNode &RangeMD)
Parse out a conservative ConstantRange from !range metadata.
ConstantRange computeConstantRange(const Value *V, bool ForSigned, bool UseInstrInfo=true, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Determine the possible constant range of an integer or vector of integer value.
Constant * ConstantFoldExtractValueInstruction(Constant *Agg, ArrayRef< unsigned > Idxs)
Attempt to constant fold an extractvalue instruction with the specified operands and indices.
bool isAllocLikeFn(const Value *V, const TargetLibraryInfo *TLI)
Tests if a value is a call or invoke to a library function that allocates memory (either malloc,...
bool MaskedValueIsZero(const Value *V, const APInt &Mask, const SimplifyQuery &SQ, unsigned Depth=0)
Return true if 'V & Mask' is known to be zero.
Value * simplifyCastInst(unsigned CastOpc, Value *Op, Type *Ty, const SimplifyQuery &Q)
Given operands for a CastInst, fold the result or return null.
Value * simplifyInstruction(Instruction *I, const SimplifyQuery &Q)
See if we can compute a simplified version of this instruction.
unsigned M1(unsigned Val)
Value * simplifySubInst(Value *LHS, Value *RHS, bool IsNSW, bool IsNUW, const SimplifyQuery &Q)
Given operands for a Sub, fold the result or return null.
Value * simplifyAddInst(Value *LHS, Value *RHS, bool IsNSW, bool IsNUW, const SimplifyQuery &Q)
Given operands for an Add, fold the result or return null.
Constant * ConstantFoldConstant(const Constant *C, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldConstant - Fold the constant using the specified DataLayout.
OutputIt transform(R &&Range, OutputIt d_first, UnaryFunction F)
Wrapper function around std::transform to apply a function to a range and store the result elsewhere.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
bool getObjectSize(const Value *Ptr, uint64_t &Size, const DataLayout &DL, const TargetLibraryInfo *TLI, ObjectSizeOpts Opts={})
Compute the size of the object pointed by Ptr.
bool isSplatValue(const Value *V, int Index=-1, unsigned Depth=0)
Return true if each element of the vector value V is poisoned or equal to every other non-poisoned el...
Constant * ConstantFoldLoadFromUniformValue(Constant *C, Type *Ty, const DataLayout &DL)
If C is a uniform value where all bits are the same (either all zero, all ones, all undef or all pois...
SelectPatternFlavor getInverseMinMaxFlavor(SelectPatternFlavor SPF)
Return the inverse minimum/maximum flavor of the specified flavor.
bool replaceAndRecursivelySimplify(Instruction *I, Value *SimpleV, const TargetLibraryInfo *TLI=nullptr, const DominatorTree *DT=nullptr, AssumptionCache *AC=nullptr, SmallSetVector< Instruction *, 8 > *UnsimplifiedUsers=nullptr)
Replace all uses of 'I' with 'SimpleV' and simplify the uses recursively.
Constant * ConstantFoldUnaryOpOperand(unsigned Opcode, Constant *Op, const DataLayout &DL)
Attempt to constant fold a unary operation with the specified operand.
SelectPatternFlavor
Specific patterns of select instructions we can match.
Value * simplifyShlInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW, const SimplifyQuery &Q)
Given operands for a Shl, fold the result or return null.
Value * simplifyFNegInst(Value *Op, FastMathFlags FMF, const SimplifyQuery &Q)
Given operand for an FNeg, fold the result or return null.
Value * simplifyFSubInst(Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
Given operands for an FSub, fold the result or return null.
bool impliesPoison(const Value *ValAssumedPoison, const Value *V)
Return true if V is poison given that ValAssumedPoison is already poison.
Value * simplifyFRemInst(Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
Given operands for an FRem, fold the result or return null.
Value * simplifyFAddInst(Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
Given operands for an FAdd, fold the result or return null.
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
bool PointerMayBeCaptured(const Value *V, bool ReturnCaptures, bool StoreCaptures, unsigned MaxUsesToExplore=0)
PointerMayBeCaptured - Return true if this pointer value may be captured by the enclosing function (w...
Value * simplifyLShrInst(Value *Op0, Value *Op1, bool IsExact, const SimplifyQuery &Q)
Given operands for a LShr, fold the result or return null.
bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
Value * simplifyICmpInst(CmpPredicate Pred, Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for an ICmpInst, fold the result or return null.
ConstantRange getVScaleRange(const Function *F, unsigned BitWidth)
Determine the possible constant range of vscale with the given bit width, based on the vscale_range f...
Constant * ConstantFoldInstOperands(Instruction *I, ArrayRef< Constant * > Ops, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, bool AllowNonDeterministic=true)
ConstantFoldInstOperands - Attempt to constant fold an instruction with the specified operands.
Constant * ConstantFoldCastOperand(unsigned Opcode, Constant *C, Type *DestTy, const DataLayout &DL)
Attempt to constant fold a cast with the specified operand.
Value * simplifyAndInst(Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for an And, fold the result or return null.
Value * simplifyExtractValueInst(Value *Agg, ArrayRef< unsigned > Idxs, const SimplifyQuery &Q)
Given operands for an ExtractValueInst, fold the result or return null.
bool isNotCrossLaneOperation(const Instruction *I)
Return true if the instruction doesn't potentially cross vector lanes.
Value * simplifyInsertValueInst(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const SimplifyQuery &Q)
Given operands for an InsertValueInst, fold the result or return null.
Constant * ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS, Constant *RHS, const DataLayout &DL)
Attempt to constant fold a binary operation with the specified operands.
Value * simplifyFDivInst(Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
Given operands for an FDiv, fold the result or return null.
bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
constexpr int PoisonMaskElem
Value * simplifyLoadInst(LoadInst *LI, Value *PtrOp, const SimplifyQuery &Q)
Given a load instruction and its pointer operand, fold the result or return null.
Value * simplifyFMAFMul(Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
Given operands for the multiplication of a FMA, fold the result or return null.
Value * simplifyConstrainedFPCall(CallBase *Call, const SimplifyQuery &Q)
Given a constrained FP intrinsic call, tries to compute its simplified version.
Value * simplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for a BinaryOperator, fold the result or return null.
bool isKnownNonEqual(const Value *V1, const Value *V2, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return true if the given values are known to be non-equal when defined.
@ Or
Bitwise or logical OR of integers.
std::optional< DecomposedBitTest > decomposeBitTestICmp(Value *LHS, Value *RHS, CmpInst::Predicate Pred, bool LookThroughTrunc=true, bool AllowNonZeroC=false)
Decompose an icmp into the form ((X & Mask) pred C) if possible.
Value * findScalarElement(Value *V, unsigned EltNo)
Given a vector and an element number, see if the scalar value is already around as a register,...
void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
Value * simplifyUDivInst(Value *LHS, Value *RHS, bool IsExact, const SimplifyQuery &Q)
Given operands for a UDiv, fold the result or return null.
Value * simplifyBinaryIntrinsic(Intrinsic::ID IID, Type *ReturnType, Value *Op0, Value *Op1, const SimplifyQuery &Q, const CallBase *Call)
Given operands for a BinaryIntrinsic, fold the result or return null.
RoundingMode
Rounding mode.
bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
unsigned M0(unsigned Val)
Value * simplifyInsertElementInst(Value *Vec, Value *Elt, Value *Idx, const SimplifyQuery &Q)
Given operands for an InsertElement, fold the result or return null.
constexpr unsigned BitWidth
SelectPatternResult matchDecomposedSelectPattern(CmpInst *CmpI, Value *TrueVal, Value *FalseVal, Value *&LHS, Value *&RHS, Instruction::CastOps *CastOp=nullptr, unsigned Depth=0)
Determine the pattern that a select with the given compare as its predicate and given values as its t...
Value * simplifyWithOpReplaced(Value *V, Value *Op, Value *RepOp, const SimplifyQuery &Q, bool AllowRefinement, SmallVectorImpl< Instruction * > *DropFlags=nullptr)
See if V simplifies when its operand Op is replaced with RepOp.
bool maskIsAllZeroOrUndef(Value *Mask)
Given a mask vector of i1, Return true if all of the elements of this predicate mask are known to be ...
std::pair< Value *, FPClassTest > fcmpToClassTest(CmpInst::Predicate Pred, const Function &F, Value *LHS, Value *RHS, bool LookThroughSrc=true)
Returns a pair of values, which if passed to llvm.is.fpclass, returns the same result as an fcmp with...
Value * simplifySRemInst(Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for an SRem, fold the result or return null.
void getUnderlyingObjects(const Value *V, SmallVectorImpl< const Value * > &Objects, const LoopInfo *LI=nullptr, unsigned MaxLookup=6)
This method is similar to getUnderlyingObject except that it can look through phi and select instruct...
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
std::optional< bool > computeKnownFPSignBit(const Value *V, unsigned Depth, const SimplifyQuery &SQ)
Return false if we can prove that the specified FP value's sign bit is 0.
unsigned ComputeNumSignBits(const Value *Op, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return the number of times the sign bit of the register is replicated into the other bits.
bool cannotBeNegativeZero(const Value *V, unsigned Depth, const SimplifyQuery &SQ)
Return true if we can prove that the specified FP value is never equal to -0.0.
bool all_equal(std::initializer_list< T > Values)
Returns true if all Values in the initializer lists are equal or the list.
Constant * ConstantFoldInsertValueInstruction(Constant *Agg, Constant *Val, ArrayRef< unsigned > Idxs)
ConstantFoldInsertValueInstruction - Attempt to constant fold an insertvalue instruction with the spe...
Constant * ConstantFoldLoadFromConstPtr(Constant *C, Type *Ty, APInt Offset, const DataLayout &DL)
Return the value that a load from C with offset Offset would produce if it is constant and determinab...
std::optional< bool > isImpliedByDomCondition(const Value *Cond, const Instruction *ContextI, const DataLayout &DL)
Return the boolean condition value in the context of the given instruction if it is known based on do...
Value * simplifyCmpInst(CmpPredicate Predicate, Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for a CmpInst, fold the result or return null.
bool isGuaranteedNotToBePoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Returns true if V cannot be poison, but may be undef.
KnownFPClass computeKnownFPClass(const Value *V, const APInt &DemandedElts, FPClassTest InterestedClasses, unsigned Depth, const SimplifyQuery &SQ)
Determine which floating-point classes are valid for V, and return them in KnownFPClass bit sets.
bool isKnownNegation(const Value *X, const Value *Y, bool NeedNSW=false, bool AllowPoison=true)
Return true if the two given values are negation.
Constant * ConstantFoldIntegerCast(Constant *C, Type *DestTy, bool IsSigned, const DataLayout &DL)
Constant fold a zext, sext or trunc, depending on IsSigned and whether the DestTy is wider or narrowe...
const SimplifyQuery getBestSimplifyQuery(Pass &, Function &)
bool isCheckForZeroAndMulWithOverflow(Value *Op0, Value *Op1, bool IsAnd, Use *&Y)
Match one of the patterns up to the select/logic op: Op0 = icmp ne i4 X, 0 Agg = call { i4,...
bool canIgnoreSNaN(fp::ExceptionBehavior EB, FastMathFlags FMF)
Returns true if the possibility of a signaling NaN can be safely ignored.
Value * simplifyURemInst(Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for a URem, fold the result or return null.
Value * simplifyExtractElementInst(Value *Vec, Value *Idx, const SimplifyQuery &Q)
Given operands for an ExtractElementInst, fold the result or return null.
Value * simplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal, const SimplifyQuery &Q)
Given operands for a SelectInst, fold the result or return null.
std::optional< bool > isImpliedCondition(const Value *LHS, const Value *RHS, const DataLayout &DL, bool LHSIsTrue=true, unsigned Depth=0)
Return true if RHS is known to be implied true by LHS.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
This callback is used in conjunction with PointerMayBeCaptured.
virtual void tooManyUses()=0
tooManyUses - The depth of traversal has breached a limit.
virtual bool captured(const Use *U)=0
captured - Information about the pointer was captured by the user of use U.
Incoming for lane maks phi as machine instruction, incoming register Reg and incoming block Block are...
InstrInfoQuery provides an interface to query additional information for instructions like metadata o...
bool isExact(const BinaryOperator *Op) const
MDNode * getMetadata(const Instruction *I, unsigned KindID) const
bool hasNoSignedWrap(const InstT *Op) const
bool hasNoUnsignedWrap(const InstT *Op) const
bool isNonNegative() const
Returns true if this value is known to be non-negative.
bool isZero() const
Returns true if value is all zero.
unsigned countMinTrailingZeros() const
Returns the minimum number of trailing zero bits.
unsigned countMaxTrailingZeros() const
Returns the maximum number of trailing zero bits possible.
bool hasConflict() const
Returns true if there is conflicting information.
unsigned getBitWidth() const
Get the bit width of this value.
unsigned countMaxActiveBits() const
Returns the maximum number of bits needed to represent all possible unsigned values with these known ...
unsigned countMinLeadingZeros() const
Returns the minimum number of leading zero bits.
APInt getMaxValue() const
Return the maximal unsigned value possible given these KnownBits.
APInt getMinValue() const
Return the minimal unsigned value possible given these KnownBits.
bool isNegative() const
Returns true if this value is known to be negative.
static KnownBits shl(const KnownBits &LHS, const KnownBits &RHS, bool NUW=false, bool NSW=false, bool ShAmtNonZero=false)
Compute known bits for shl(LHS, RHS).
bool isKnownAlwaysNaN() const
Return true if it's known this must always be a nan.
static constexpr FPClassTest OrderedLessThanZeroMask
std::optional< bool > SignBit
std::nullopt if the sign bit is unknown, true if the sign bit is definitely set or false if the sign ...
bool isKnownNeverNaN() const
Return true if it's known this can never be a nan.
bool isKnownNever(FPClassTest Mask) const
Return true if it's known this can never be one of the mask entries.
bool cannotBeOrderedLessThanZero() const
Return true if we can prove that the analyzed floating-point value is either NaN or never less than -...
The adaptor from a function pass to a loop pass computes these analyses and makes them available to t...
Various options to control the behavior of getObjectSize.
bool NullIsUnknownSize
If this is true, null pointers in address space 0 will be treated as though they can't be evaluated.
Mode EvalMode
How we want to evaluate this object's size.
SelectPatternFlavor Flavor
static bool isMinOrMax(SelectPatternFlavor SPF)
When implementing this min/max pattern as fcmp; select, does the fcmp have to be ordered?
bool CanUseUndef
Controls whether simplifications are allowed to constrain the range of possible values for uses of un...
SimplifyQuery getWithInstruction(const Instruction *I) const
bool isUndefValue(Value *V) const
If CanUseUndef is true, returns whether V is undef.
const TargetLibraryInfo * TLI
SimplifyQuery getWithoutUndef() const