39 using namespace llvm::PatternMatch;
41 #define DEBUG_TYPE "instsimplify"
45 STATISTIC(NumExpand,
"Number of expansions");
46 STATISTIC(NumReassoc,
"Number of reassociations");
59 : DL(DL), TLI(tli), DT(dt), AC(ac), CxtI(cxti) {}
67 const Query &,
unsigned);
71 const Query &Q,
unsigned MaxRecurse);
75 const Query &,
unsigned);
81 "Expected i1 type or a vector of i1!");
89 "Expected i1 type or a vector of i1!");
101 if (CPred == Pred && CLHS == LHS && CRHS == RHS)
144 unsigned OpcToExpand,
const Query &Q,
145 unsigned MaxRecurse) {
153 if (Op0->getOpcode() == OpcodeToExpand) {
155 Value *
A = Op0->getOperand(0), *
B = Op0->getOperand(1), *
C = RHS;
162 &&
L ==
B && R == A)) {
176 if (Op1->getOpcode() == OpcodeToExpand) {
178 Value *
A = LHS, *
B = Op1->getOperand(0), *
C = Op1->getOperand(1);
185 &&
L ==
C && R == B)) {
203 const Query &Q,
unsigned MaxRecurse) {
224 if (V == B)
return LHS;
234 if (Op1 && Op1->getOpcode() == Opcode) {
236 Value *
B = Op1->getOperand(0);
237 Value *
C = Op1->getOperand(1);
243 if (V == B)
return RHS;
266 if (V == A)
return LHS;
276 if (Op1 && Op1->getOpcode() == Opcode) {
278 Value *
B = Op1->getOperand(0);
279 Value *
C = Op1->getOperand(1);
285 if (V == C)
return RHS;
302 const Query &Q,
unsigned MaxRecurse) {
308 if (isa<SelectInst>(LHS)) {
309 SI = cast<SelectInst>(LHS);
311 assert(isa<SelectInst>(RHS) &&
"No select instruction operand!");
312 SI = cast<SelectInst>(RHS);
332 if (TV && isa<UndefValue>(TV))
334 if (FV && isa<UndefValue>(FV))
345 if ((FV && !TV) || (TV && !FV)) {
349 if (Simplified && Simplified->
getOpcode() == Opcode) {
354 Value *UnsimplifiedLHS = SI == LHS ? UnsimplifiedBranch : LHS;
355 Value *UnsimplifiedRHS = SI == LHS ? RHS : UnsimplifiedBranch;
356 if (Simplified->
getOperand(0) == UnsimplifiedLHS &&
360 Simplified->
getOperand(1) == UnsimplifiedLHS &&
374 unsigned MaxRecurse) {
380 if (!isa<SelectInst>(LHS)) {
384 assert(isa<SelectInst>(LHS) &&
"Not comparing with a select instruction!");
396 TCmp =
getTrue(Cond->getType());
402 TCmp =
getTrue(Cond->getType());
455 const Query &Q,
unsigned MaxRecurse) {
461 if (isa<PHINode>(LHS)) {
462 PI = cast<PHINode>(LHS);
467 assert(isa<PHINode>(RHS) &&
"No PHI instruction operand!");
468 PI = cast<PHINode>(RHS);
475 Value *CommonValue =
nullptr;
478 if (Incoming == PI)
continue;
479 Value *V = PI == LHS ?
484 if (!V || (CommonValue && V != CommonValue))
497 const Query &Q,
unsigned MaxRecurse) {
503 if (!isa<PHINode>(LHS)) {
507 assert(isa<PHINode>(LHS) &&
"Not comparing with a phi instruction!");
508 PHINode *PI = cast<PHINode>(LHS);
515 Value *CommonValue =
nullptr;
518 if (Incoming == PI)
continue;
522 if (!V || (CommonValue && V != CommonValue))
533 const Query &Q,
unsigned MaxRecurse) {
534 if (
Constant *CLHS = dyn_cast<Constant>(Op0)) {
535 if (
Constant *CRHS = dyn_cast<Constant>(Op1))
604 bool AllowNonInbounds =
false) {
616 if ((!AllowNonInbounds && !
GEP->isInBounds()) ||
617 !
GEP->accumulateConstantOffset(DL, Offset))
619 V =
GEP->getPointerOperand();
621 V = cast<Operator>(V)->getOperand(0);
622 }
else if (
GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
623 if (GA->isInterposable())
625 V = GA->getAliasee();
628 if (
Value *RV = CS.getReturnedArgOperand()) {
635 "Unexpected operand type!");
636 }
while (Visited.insert(V).second);
667 const Query &Q,
unsigned MaxRecurse) {
668 if (
Constant *CLHS = dyn_cast<Constant>(Op0))
669 if (
Constant *CRHS = dyn_cast<Constant>(Op1))
692 APInt KnownZero(BitWidth, 0);
693 APInt KnownOne(BitWidth, 0);
708 Value *
X =
nullptr, *
Y =
nullptr, *Z = Op1;
809 const Query &Q,
unsigned MaxRecurse) {
810 if (
Constant *CLHS = dyn_cast<Constant>(Op0)) {
811 if (
Constant *CRHS = dyn_cast<Constant>(Op1))
830 Value *SubOp =
nullptr;
848 const Query &Q,
unsigned MaxRecurse) {
849 if (
Constant *CLHS = dyn_cast<Constant>(Op0)) {
850 if (
Constant *CRHS = dyn_cast<Constant>(Op1))
874 if (FMF.
noNaNs() && Op0 == Op1)
884 unsigned MaxRecurse) {
885 if (
Constant *CLHS = dyn_cast<Constant>(Op0)) {
886 if (
Constant *CRHS = dyn_cast<Constant>(Op1))
907 unsigned MaxRecurse) {
908 if (
Constant *CLHS = dyn_cast<Constant>(Op0)) {
909 if (
Constant *CRHS = dyn_cast<Constant>(Op1))
951 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
958 if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
1004 const Query &Q,
unsigned MaxRecurse) {
1005 if (
Constant *C0 = dyn_cast<Constant>(Op0))
1006 if (
Constant *C1 = dyn_cast<Constant>(Op1))
1009 bool isSigned = Opcode == Instruction::SDiv;
1040 Value *
X =
nullptr, *
Y =
nullptr;
1050 if (Div->getOpcode() == Opcode && Div->getOperand(1) ==
Y)
1071 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
1077 if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
1087 unsigned MaxRecurse) {
1105 unsigned MaxRecurse) {
1113 if (
C->isAllOnesValue()) {
1131 const Query &Q,
unsigned) {
1180 const Query &Q,
unsigned MaxRecurse) {
1181 if (
Constant *C0 = dyn_cast<Constant>(Op0))
1182 if (
Constant *C1 = dyn_cast<Constant>(Op1))
1214 if ((Opcode == Instruction::SRem &&
1216 (Opcode == Instruction::URem &&
1222 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
1228 if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
1238 unsigned MaxRecurse) {
1256 unsigned MaxRecurse) {
1264 if (
C->isAllOnesValue()) {
1282 const Query &,
unsigned) {
1316 if (isa<UndefValue>(C))
1321 if (CI->getValue().getLimitedValue() >=
1322 CI->getType()->getScalarSizeInBits())
1326 if (isa<ConstantVector>(C) || isa<ConstantDataVector>(
C)) {
1339 const Query &Q,
unsigned MaxRecurse) {
1340 if (
Constant *C0 = dyn_cast<Constant>(Op0))
1341 if (
Constant *C1 = dyn_cast<Constant>(Op1))
1358 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
1364 if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
1371 APInt KnownZero(BitWidth, 0);
1372 APInt KnownOne(BitWidth, 0);
1381 if ((KnownZero & ShiftAmountMask) == ShiftAmountMask)
1390 bool isExact,
const Query &Q,
1391 unsigned MaxRecurse) {
1407 APInt Op0KnownZero(BitWidth, 0);
1408 APInt Op0KnownOne(BitWidth, 0);
1421 const Query &Q,
unsigned MaxRecurse) {
1448 const Query &Q,
unsigned MaxRecurse) {
1473 const Query &Q,
unsigned MaxRecurse) {
1505 ICmpInst *UnsignedICmp,
bool IsAnd) {
1517 else if (
match(UnsignedICmp,
1527 return IsAnd ? UnsignedICmp : ZeroICmp;
1534 return UnsignedICmp;
1582 const APInt *C0, *C1;
1590 if (Range0.intersectWith(Range1).isEmptySet())
1601 auto *AddInst = cast<BinaryOperator>(Op0->
getOperand(0));
1602 if (AddInst->getOperand(1) != Op1->
getOperand(1))
1605 bool isNSW = AddInst->hasNoSignedWrap();
1606 bool isNUW = AddInst->hasNoUnsignedWrap();
1608 const APInt Delta = *C1 - *C0;
1609 if (C0->isStrictlyPositive()) {
1623 if (C0->getBoolValue() && isNUW) {
1638 unsigned MaxRecurse) {
1639 if (
Constant *CLHS = dyn_cast<Constant>(Op0)) {
1640 if (
Constant *CRHS = dyn_cast<Constant>(Op1))
1669 Value *
A =
nullptr, *
B =
nullptr;
1671 (A == Op1 ||
B == Op1))
1676 (A == Op0 ||
B == Op0))
1690 if (
auto *ICILHS = dyn_cast<ICmpInst>(Op0)) {
1691 if (
auto *ICIRHS = dyn_cast<ICmpInst>(Op1)) {
1703 if (Cast0 && Cast1 && Cast0->getOpcode() == Cast1->getOpcode() &&
1704 Cast0->getSrcTy() == Cast1->getSrcTy()) {
1709 Type *ResultType = Cast0->getType();
1734 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
1741 if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
1794 const APInt *C0, *C1;
1802 auto *AddInst = cast<BinaryOperator>(Op0->
getOperand(0));
1803 if (AddInst->getOperand(1) != Op1->
getOperand(1))
1807 bool isNSW = AddInst->hasNoSignedWrap();
1808 bool isNUW = AddInst->hasNoUnsignedWrap();
1810 const APInt Delta = *C1 - *C0;
1811 if (C0->isStrictlyPositive()) {
1825 if (C0->getBoolValue() && isNUW) {
1840 unsigned MaxRecurse) {
1841 if (
Constant *CLHS = dyn_cast<Constant>(Op0)) {
1842 if (
Constant *CRHS = dyn_cast<Constant>(Op1))
1871 Value *
A =
nullptr, *
B =
nullptr;
1873 (A == Op1 ||
B == Op1))
1878 (A == Op0 ||
B == Op0))
1883 (A == Op1 ||
B == Op1))
1888 (A == Op0 ||
B == Op0))
1891 if (
auto *ICILHS = dyn_cast<ICmpInst>(Op0)) {
1892 if (
auto *ICIRHS = dyn_cast<ICmpInst>(Op1)) {
1912 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
1918 Value *
C =
nullptr, *
D =
nullptr;
1923 if (C1 && C2 && (C1->
getValue() == ~C2->getValue())) {
1929 if ((C2->getValue() & (C2->getValue() + 1)) == 0 &&
1955 if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
1973 unsigned MaxRecurse) {
1974 if (
Constant *CLHS = dyn_cast<Constant>(Op0)) {
1975 if (
Constant *CRHS = dyn_cast<Constant>(Op1))
2040 if (Pred == Cmp->
getPredicate() && LHS == CmpLHS && RHS == CmpRHS)
2043 LHS == CmpRHS && RHS == CmpLHS)
2157 if (isa<AllocaInst>(LHS) &&
2158 (isa<AllocaInst>(RHS) || isa<GlobalVariable>(RHS))) {
2161 uint64_t LHSSize, RHSSize;
2162 if (LHSOffsetCI && RHSOffsetCI &&
2166 const APInt &RHSOffsetValue = RHSOffsetCI->getValue();
2169 LHSOffsetValue.
ult(LHSSize) &&
2170 RHSOffsetValue.
ult(RHSSize)) {
2178 if (!cast<PointerType>(LHS->
getType())->isEmptyTy() &&
2179 !cast<PointerType>(RHS->
getType())->isEmptyTy() &&
2219 if (
const AllocaInst *AI = dyn_cast<AllocaInst>(V))
2220 return AI->getParent() && AI->getFunction() && AI->isStaticAlloca();
2221 if (
const GlobalValue *GV = dyn_cast<GlobalValue>(V))
2222 return (GV->hasLocalLinkage() || GV->hasHiddenVisibility() ||
2223 GV->hasProtectedVisibility() || GV->hasGlobalUnnamedAddr()) &&
2224 !GV->isThreadLocal();
2225 if (
const Argument *
A = dyn_cast<Argument>(V))
2226 return A->hasByValAttr();
2231 if ((IsNAC(LHSUObjs) && IsAllocDisjoint(RHSUObjs)) ||
2232 (IsNAC(RHSUObjs) && IsAllocDisjoint(LHSUObjs)))
2325 bool LHSKnownNonNegative, LHSKnownNegative;
2344 ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, Q.DL, 0, Q.AC,
2346 if (LHSKnownNegative)
2348 if (LHSKnownNonNegative)
2352 ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, Q.DL, 0, Q.AC,
2354 if (LHSKnownNegative)
2356 if (LHSKnownNonNegative &&
isKnownNonZero(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT))
2360 ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, Q.DL, 0, Q.AC,
2362 if (LHSKnownNegative)
2364 if (LHSKnownNonNegative)
2368 ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, Q.DL, 0, Q.AC,
2370 if (LHSKnownNegative)
2372 if (LHSKnownNonNegative &&
isKnownNonZero(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT))
2405 Lower = (-Upper) + 1;
2413 Upper = NegOne.
udiv(*C2) + 1;
2418 Upper = Lower.
lshr(1) + 1;
2421 Upper = C2->
abs() + 1;
2422 Lower = (-Upper) + 1;
2435 Lower = IntMin.
sdiv(*C2);
2436 Upper = IntMax.
sdiv(*C2);
2437 if (Lower.
sgt(Upper))
2440 assert(Upper != Lower &&
"Upper part of range has wrapped!");
2450 Lower = C2->
shl(ShiftAmount);
2456 Upper = C2->
shl(ShiftAmount) + 1;
2462 Upper = NegOne.
lshr(*C2) + 1;
2465 unsigned ShiftAmount = Width - 1;
2466 if (*C2 != 0 && cast<BinaryOperator>(LHS)->isExact())
2468 Lower = C2->
lshr(ShiftAmount);
2474 if (C2->
ult(Width)) {
2475 Lower = IntMin.
ashr(*C2);
2476 Upper = IntMax.
ashr(*C2) + 1;
2479 unsigned ShiftAmount = Width - 1;
2480 if (*C2 != 0 && cast<BinaryOperator>(LHS)->isExact())
2485 Upper = C2->
ashr(ShiftAmount) + 1;
2488 Lower = C2->
ashr(ShiftAmount);
2503 Lower != Upper ?
ConstantRange(Lower, Upper) : ConstantRange(Width,
true);
2505 if (
auto *
I = dyn_cast<Instruction>(LHS))
2521 unsigned MaxRecurse) {
2526 if (MaxRecurse && (LBO || RBO)) {
2528 Value *
A =
nullptr, *
B =
nullptr, *
C =
nullptr, *
D =
nullptr;
2530 bool NoLHSWrapProblem =
false, NoRHSWrapProblem =
false;
2540 C = RBO->getOperand(0);
2541 D = RBO->getOperand(1);
2549 if ((A == RHS ||
B == RHS) && NoLHSWrapProblem)
2556 if ((
C == LHS ||
D == LHS) && NoRHSWrapProblem)
2559 C == LHS ?
D :
C, Q, MaxRecurse - 1))
2563 if (A && C && (A == C || A ==
D ||
B == C ||
B ==
D) && NoLHSWrapProblem &&
2571 }
else if (A ==
D) {
2575 }
else if (
B == C) {
2600 bool RHSKnownNonNegative, RHSKnownNegative;
2601 bool YKnownNonNegative, YKnownNegative;
2602 ComputeSignBit(RHS, RHSKnownNonNegative, RHSKnownNegative, Q.DL, 0,
2603 Q.AC, Q.CxtI, Q.DT);
2604 ComputeSignBit(Y, YKnownNonNegative, YKnownNegative, Q.DL, 0, Q.AC,
2606 if (RHSKnownNonNegative && YKnownNegative)
2608 if (RHSKnownNegative || YKnownNonNegative)
2620 bool LHSKnownNonNegative, LHSKnownNegative;
2621 bool YKnownNonNegative, YKnownNegative;
2622 ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, Q.DL, 0,
2623 Q.AC, Q.CxtI, Q.DT);
2624 ComputeSignBit(Y, YKnownNonNegative, YKnownNegative, Q.DL, 0, Q.AC,
2626 if (LHSKnownNonNegative && YKnownNegative)
2628 if (LHSKnownNegative || YKnownNonNegative)
2653 if (
ConstantInt *RHSC = dyn_cast<ConstantInt>(RHS)) {
2654 if (RHSC->getValue().isStrictlyPositive()) {
2664 if (RHSC->getValue().isNonNegative()) {
2675 bool KnownNonNegative, KnownNegative;
2681 ComputeSignBit(RHS, KnownNonNegative, KnownNegative, Q.DL, 0, Q.AC,
2683 if (!KnownNonNegative)
2692 ComputeSignBit(RHS, KnownNonNegative, KnownNegative, Q.DL, 0, Q.AC,
2694 if (!KnownNonNegative)
2706 bool KnownNonNegative, KnownNegative;
2712 ComputeSignBit(LHS, KnownNonNegative, KnownNegative, Q.DL, 0, Q.AC,
2714 if (!KnownNonNegative)
2723 ComputeSignBit(LHS, KnownNonNegative, KnownNegative, Q.DL, 0, Q.AC,
2725 if (!KnownNonNegative)
2762 if (
auto *CI = dyn_cast<ConstantInt>(RHS)) {
2763 const APInt *CI2Val, *CIVal = &CI->getValue();
2776 *CI2Val == 1 || !CI->isZero()) {
2783 if (CIVal->
isSignBit() && *CI2Val == 1) {
2792 if (MaxRecurse && LBO && RBO && LBO->
getOpcode() == RBO->getOpcode() &&
2797 case Instruction::UDiv:
2798 case Instruction::LShr:
2802 case Instruction::SDiv:
2803 case Instruction::AShr:
2804 if (!LBO->
isExact() || !RBO->isExact())
2807 RBO->getOperand(0), Q, MaxRecurse - 1))
2810 case Instruction::Shl: {
2818 RBO->getOperand(0), Q, MaxRecurse - 1))
2831 unsigned MaxRecurse) {
2845 (A == LHS || B == LHS)) {
2852 (A == RHS || B == RHS)) {
2860 (A == LHS || B == LHS)) {
2919 (A == LHS || B == LHS)) {
2926 (A == RHS || B == RHS)) {
2934 (A == LHS || B == LHS)) {
2988 (A == C || A == D || B == C || B == D)) {
2998 (A == C || A == D || B == C || B == D)) {
3008 (A == C || A == D || B == C || B == D)) {
3018 (A == C || A == D || B == C || B == D)) {
3034 const Query &Q,
unsigned MaxRecurse) {
3038 if (
Constant *CLHS = dyn_cast<Constant>(LHS)) {
3039 if (
Constant *CRHS = dyn_cast<Constant>(RHS))
3052 if (LHS == RHS || isa<UndefValue>(RHS))
3066 if (isa<Instruction>(RHS) && isa<Instruction>(LHS)) {
3078 if (Satisfied_CR.contains(LHS_CR))
3083 if (InversedSatisfied_CR.contains(LHS_CR))
3089 if (isa<CastInst>(LHS) && (isa<Constant>(RHS) || isa<CastInst>(RHS))) {
3092 Type *SrcTy = SrcOp->getType();
3097 if (MaxRecurse && isa<PtrToIntInst>(LI) &&
3099 if (
Constant *RHSC = dyn_cast<Constant>(RHS)) {
3105 }
else if (
PtrToIntInst *RI = dyn_cast<PtrToIntInst>(RHS)) {
3106 if (RI->getOperand(0)->getType() == SrcTy)
3114 if (isa<ZExtInst>(LHS)) {
3117 if (
ZExtInst *RI = dyn_cast<ZExtInst>(RHS)) {
3118 if (MaxRecurse && SrcTy == RI->getOperand(0)->getType())
3121 SrcOp, RI->getOperand(0), Q,
3127 else if (
ConstantInt *CI = dyn_cast<ConstantInt>(RHS)) {
3135 if (RExt == CI && MaxRecurse)
3137 SrcOp, Trunc, Q, MaxRecurse-1))
3160 return CI->getValue().isNegative() ?
3166 return CI->getValue().isNegative() ?
3174 if (isa<SExtInst>(LHS)) {
3177 if (
SExtInst *RI = dyn_cast<SExtInst>(RHS)) {
3178 if (MaxRecurse && SrcTy == RI->getOperand(0)->getType())
3186 else if (
ConstantInt *CI = dyn_cast<ConstantInt>(RHS)) {
3194 if (RExt == CI && MaxRecurse)
3212 return CI->getValue().isNegative() ?
3217 return CI->getValue().isNegative() ?
3266 if (
auto *CLHS = dyn_cast<PtrToIntOperator>(LHS))
3267 if (
auto *CRHS = dyn_cast<PtrToIntOperator>(RHS))
3268 if (Q.DL.getTypeSizeInBits(CLHS->getPointerOperandType()) ==
3269 Q.DL.getTypeSizeInBits(CLHS->getType()) &&
3270 Q.DL.getTypeSizeInBits(CRHS->getPointerOperandType()) ==
3271 Q.DL.getTypeSizeInBits(CRHS->getType()))
3273 CLHS->getPointerOperand(),
3274 CRHS->getPointerOperand()))
3278 if (
GEPOperator *GRHS = dyn_cast<GEPOperator>(RHS)) {
3279 if (GLHS->getPointerOperand() == GRHS->getPointerOperand() &&
3280 GLHS->hasAllConstantIndices() && GRHS->hasAllConstantIndices() &&
3282 (GLHS->isInBounds() && GRHS->isInBounds() &&
3290 GLHS->getSourceElementType(), Null, IndicesLHS);
3294 GLHS->getSourceElementType(), Null, IndicesRHS);
3303 const APInt *RHSVal;
3306 APInt LHSKnownZero(BitWidth, 0);
3307 APInt LHSKnownOne(BitWidth, 0);
3310 if (((LHSKnownZero & *RHSVal) != 0) || ((LHSKnownOne & ~(*RHSVal)) != 0))
3318 if (isa<SelectInst>(LHS) || isa<SelectInst>(RHS))
3324 if (isa<PHINode>(LHS) || isa<PHINode>(RHS))
3344 unsigned MaxRecurse) {
3348 if (
Constant *CLHS = dyn_cast<Constant>(LHS)) {
3349 if (
Constant *CRHS = dyn_cast<Constant>(RHS))
3374 if (isa<UndefValue>(LHS) || isa<UndefValue>(RHS)) {
3390 if (
const auto *RHSC = dyn_cast<Constant>(RHS)) {
3392 CFP = dyn_cast_or_null<ConstantFP>(RHSC->getSplatValue());
3402 "Comparison must be either ordered or unordered!");
3451 if (isa<SelectInst>(LHS) || isa<SelectInst>(RHS))
3457 if (isa<PHINode>(LHS) || isa<PHINode>(RHS))
3476 unsigned MaxRecurse) {
3486 if (
auto *
B = dyn_cast<BinaryOperator>(
I)) {
3493 if (isa<OverflowingBinaryOperator>(
B))
3494 if (
B->hasNoSignedWrap() ||
B->hasNoUnsignedWrap())
3496 if (isa<PossiblyExactOperator>(
B))
3501 if (
B->getOperand(0) ==
Op)
3504 if (
B->getOperand(1) ==
Op)
3511 if (
CmpInst *
C = dyn_cast<CmpInst>(
I)) {
3513 if (
C->getOperand(0) ==
Op)
3516 if (
C->getOperand(1) ==
Op)
3526 if (
Constant *CRepOp = dyn_cast<Constant>(RepOp)) {
3529 for (
unsigned i = 0, e =
I->getNumOperands();
i != e; ++
i) {
3530 if (
I->getOperand(
i) ==
Op)
3532 else if (
Constant *COp = dyn_cast<Constant>(
I->getOperand(
i)))
3539 if (ConstOps.
size() ==
I->getNumOperands()) {
3542 ConstOps[1], Q.DL, Q.TLI);
3544 if (
LoadInst *LI = dyn_cast<LoadInst>(
I))
3545 if (!LI->isVolatile())
3558 const APInt *
Y,
bool TrueWhenUnset) {
3565 return TrueWhenUnset ? FalseVal : TrueVal;
3571 return TrueWhenUnset ? FalseVal : TrueVal;
3578 return TrueWhenUnset ? TrueVal : FalseVal;
3584 return TrueWhenUnset ? TrueVal : FalseVal;
3594 bool TrueWhenUnset) {
3599 APInt MinSignedValue;
3624 unsigned MaxRecurse) {
3626 Value *CmpLHS, *CmpRHS;
3703 unsigned MaxRecurse) {
3706 if (
Constant *CB = dyn_cast<Constant>(CondVal)) {
3707 if (CB->isAllOnesValue())
3709 if (CB->isNullValue())
3714 if (TrueVal == FalseVal)
3717 if (isa<UndefValue>(CondVal)) {
3718 if (isa<Constant>(TrueVal))
3722 if (isa<UndefValue>(TrueVal))
3724 if (isa<UndefValue>(FalseVal))
3746 const Query &Q,
unsigned) {
3749 cast<PointerType>(Ops[0]->getType()->getScalarType())->getAddressSpace();
3752 if (Ops.
size() == 1)
3761 if (isa<UndefValue>(Ops[0]))
3764 if (Ops.
size() == 2) {
3773 uint64_t TyAllocSize = Q.DL.getTypeAllocSize(Ty);
3775 if (TyAllocSize == 0)
3780 if (Ops[1]->
getType()->getScalarSizeInBits() ==
3781 Q.DL.getPointerSizeInBits(AS)) {
3782 auto PtrToIntOrZero = [GEPTy](
Value *
P) ->
Value * {
3793 if (TyAllocSize == 1 &&
3795 if (
Value *R = PtrToIntOrZero(P))
3803 TyAllocSize == 1ULL <<
C)
3804 if (
Value *R = PtrToIntOrZero(P))
3812 if (
Value *R = PtrToIntOrZero(P))
3818 if (Q.DL.getTypeAllocSize(LastType) == 1 &&
3822 Q.DL.getPointerSizeInBits(Ops[0]->
getType()->getPointerAddressSpace());
3823 if (Q.DL.getTypeSizeInBits(Ops.
back()->
getType()) == PtrWidth) {
3824 APInt BasePtrOffset(PtrWidth, 0);
3825 Value *StrippedBasePtr =
3826 Ops[0]->stripAndAccumulateInBoundsConstantOffsets(Q.DL,
3845 for (
unsigned i = 0, e = Ops.
size();
i != e; ++
i)
3846 if (!isa<Constant>(Ops[
i]))
3867 if (
Constant *CAgg = dyn_cast<Constant>(Agg))
3868 if (
Constant *CVal = dyn_cast<Constant>(Val))
3877 if (EV->getAggregateOperand()->getType() == Agg->
getType() &&
3878 EV->getIndices() == Idxs) {
3881 return EV->getAggregateOperand();
3884 if (Agg == EV->getAggregateOperand())
3902 const Query &,
unsigned) {
3903 if (
auto *CAgg = dyn_cast<Constant>(Agg))
3907 unsigned NumIdxs = Idxs.
size();
3908 for (
auto *IVI = dyn_cast<InsertValueInst>(Agg); IVI !=
nullptr;
3911 unsigned NumInsertValueIdxs = InsertValueIdxs.
size();
3912 unsigned NumCommonIdxs =
std::min(NumInsertValueIdxs, NumIdxs);
3913 if (InsertValueIdxs.
slice(0, NumCommonIdxs) ==
3914 Idxs.
slice(0, NumCommonIdxs)) {
3915 if (NumIdxs == NumInsertValueIdxs)
3916 return IVI->getInsertedValueOperand();
3938 if (
auto *CVec = dyn_cast<Constant>(Vec)) {
3939 if (
auto *CIdx = dyn_cast<Constant>(Idx))
3943 if (
auto *Splat = CVec->getSplatValue())
3946 if (isa<UndefValue>(Vec))
3952 if (
auto *IdxC = dyn_cast<ConstantInt>(Idx))
3970 Value *CommonValue =
nullptr;
3971 bool HasUndefInput =
false;
3974 if (Incoming == PN)
continue;
3975 if (isa<UndefValue>(Incoming)) {
3977 HasUndefInput =
true;
3980 if (CommonValue && Incoming != CommonValue)
3982 CommonValue = Incoming;
4000 Type *Ty,
const Query &Q,
unsigned MaxRecurse) {
4001 if (
auto *
C = dyn_cast<Constant>(Op))
4004 if (
auto *CI = dyn_cast<CastInst>(Op)) {
4005 auto *Src = CI->getOperand(0);
4006 Type *SrcTy = Src->getType();
4007 Type *MidTy = CI->getType();
4009 if (Src->getType() == Ty) {
4019 SrcIntPtrTy, MidIntPtrTy,
4020 DstIntPtrTy) == Instruction::BitCast)
4026 if (CastOpc == Instruction::BitCast)
4047 const Query &Q,
unsigned MaxRecurse) {
4052 case Instruction::FAdd:
4055 case Instruction::Sub:
4058 case Instruction::FSub:
4061 case Instruction::Mul:
return SimplifyMulInst (LHS, RHS, Q, MaxRecurse);
4062 case Instruction::FMul:
4066 case Instruction::FDiv:
4070 case Instruction::FRem:
4072 case Instruction::Shl:
4075 case Instruction::LShr:
4077 case Instruction::AShr:
4083 if (
Constant *CLHS = dyn_cast<Constant>(LHS))
4084 if (
Constant *CRHS = dyn_cast<Constant>(RHS))
4094 if (isa<SelectInst>(LHS) || isa<SelectInst>(RHS))
4100 if (isa<PHINode>(LHS) || isa<PHINode>(RHS))
4114 unsigned MaxRecurse) {
4116 case Instruction::FAdd:
4118 case Instruction::FSub:
4120 case Instruction::FMul:
4122 case Instruction::FDiv:
4148 const Query &Q,
unsigned MaxRecurse) {
4164 default:
return false;
4167 case Intrinsic::fabs:
4168 case Intrinsic::floor:
4169 case Intrinsic::ceil:
4170 case Intrinsic::trunc:
4171 case Intrinsic::rint:
4172 case Intrinsic::nearbyint:
4191 if (!OffsetConstInt || OffsetConstInt->getType()->getBitWidth() > 64)
4195 if (OffsetInt % 4 != 0)
4209 if (LoadedCE->getOpcode() == Instruction::Trunc) {
4215 if (LoadedCE->getOpcode() != Instruction::Sub)
4219 if (!LoadedLHS || LoadedLHS->getOpcode() != Instruction::PtrToInt)
4221 auto *LoadedLHSPtr = LoadedLHS->
getOperand(0);
4225 APInt LoadedRHSOffset;
4228 PtrSym != LoadedRHSSym || PtrOffset != LoadedRHSOffset)
4238 if (ConstMask->isNullValue() || isa<UndefValue>(ConstMask))
4240 for (
unsigned I = 0,
E = ConstMask->getType()->getVectorNumElements();
I !=
E;
4242 if (
auto *MaskElt = ConstMask->getAggregateElement(
I))
4243 if (MaskElt->isNullValue() || isa<UndefValue>(MaskElt))
4250 template <
typename IterTy>
4252 const Query &Q,
unsigned MaxRecurse) {
4254 unsigned NumOperands = std::distance(ArgBegin, ArgEnd);
4257 if (NumOperands == 1) {
4260 if (
IntrinsicInst *II = dyn_cast<IntrinsicInst>(*ArgBegin)) {
4261 if (II->getIntrinsicID() == IID)
4267 case Intrinsic::fabs: {
4277 if (NumOperands == 2) {
4278 Value *LHS = *ArgBegin;
4279 Value *RHS = *(ArgBegin + 1);
4283 case Intrinsic::usub_with_overflow:
4284 case Intrinsic::ssub_with_overflow: {
4291 if (isa<UndefValue>(LHS) || isa<UndefValue>(RHS))
4296 case Intrinsic::uadd_with_overflow:
4297 case Intrinsic::sadd_with_overflow: {
4299 if (isa<UndefValue>(RHS))
4304 case Intrinsic::umul_with_overflow:
4305 case Intrinsic::smul_with_overflow: {
4316 case Intrinsic::load_relative: {
4330 case Intrinsic::masked_load: {
4331 Value *MaskArg = ArgBegin[2];
4332 Value *PassthruArg = ArgBegin[3];
4343 template <
typename IterTy>
4345 const Query &Q,
unsigned MaxRecurse) {
4348 Ty = PTy->getElementType();
4353 if (isa<UndefValue>(V) || isa<ConstantPointerNull>(V))
4360 if (F->isIntrinsic())
4368 ConstantArgs.
reserve(ArgEnd - ArgBegin);
4369 for (IterTy
I = ArgBegin,
E = ArgEnd;
I !=
E; ++
I) {
4406 case Instruction::FAdd:
4412 cast<BinaryOperator>(
I)->hasNoSignedWrap(),
4413 cast<BinaryOperator>(
I)->hasNoUnsignedWrap(), DL,
4416 case Instruction::FSub:
4420 case Instruction::Sub:
4422 cast<BinaryOperator>(
I)->hasNoSignedWrap(),
4423 cast<BinaryOperator>(
I)->hasNoUnsignedWrap(), DL,
4426 case Instruction::FMul:
4430 case Instruction::Mul:
4434 case Instruction::SDiv:
4438 case Instruction::UDiv:
4442 case Instruction::FDiv:
4446 case Instruction::SRem:
4450 case Instruction::URem:
4454 case Instruction::FRem:
4458 case Instruction::Shl:
4460 cast<BinaryOperator>(
I)->hasNoSignedWrap(),
4461 cast<BinaryOperator>(
I)->hasNoUnsignedWrap(), DL,
4464 case Instruction::LShr:
4466 cast<BinaryOperator>(
I)->isExact(), DL, TLI, DT,
4469 case Instruction::AShr:
4471 cast<BinaryOperator>(
I)->isExact(), DL, TLI, DT,
4486 case Instruction::ICmp:
4491 case Instruction::FCmp:
4500 case Instruction::GetElementPtr: {
4502 Result =
SimplifyGEPInst(cast<GetElementPtrInst>(I)->getSourceElementType(),
4503 Ops, DL, TLI, DT, AC, I);
4506 case Instruction::InsertValue: {
4513 case Instruction::ExtractValue: {
4514 auto *EVI = cast<ExtractValueInst>(
I);
4516 EVI->getIndices(), DL, TLI, DT, AC,
I);
4519 case Instruction::ExtractElement: {
4520 auto *EEI = cast<ExtractElementInst>(
I);
4522 EEI->getVectorOperand(), EEI->getIndexOperand(), DL, TLI, DT, AC,
I);
4525 case Instruction::PHI:
4534 #define HANDLE_CAST_INST(num, opc, clas) case Instruction::opc:
4535 #include "llvm/IR/Instruction.def"
4536 #undef HANDLE_CAST_INST
4538 DL, TLI, DT, AC,
I);
4546 APInt KnownZero(BitWidth, 0);
4547 APInt KnownOne(BitWidth, 0);
4549 if ((KnownZero | KnownOne).isAllOnesValue())
4583 Worklist.
insert(cast<Instruction>(U));
4598 for (
unsigned Idx = 0; Idx != Worklist.
size(); ++Idx) {
4612 Worklist.
insert(cast<Instruction>(U));
4637 assert(I != SimpleV &&
"replaceAndRecursivelySimplify(X,X) is not valid!");
4638 assert(SimpleV &&
"Must provide a simplified value.");
Constant * ConstantFoldCastOperand(unsigned Opcode, Constant *C, Type *DestTy, const DataLayout &DL)
Attempt to constant fold a cast with the specified operand.
MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty > m_UMin(const LHS &L, const RHS &R)
static Value * SimplifyBinOp(unsigned, Value *, Value *, const Query &, unsigned)
Given operands for a BinaryOperator, see if we can fold the result.
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
APInt ashr(unsigned shiftAmt) const
Arithmetic right-shift function.
SymbolTableList< Instruction >::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
bool isImpliedTrueByMatchingCmp(Predicate Pred2)
Determine if Pred1 implies Pred2 is true when two compares have matching operands.
void computeKnownBits(const Value *V, APInt &KnownZero, APInt &KnownOne, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
void push_back(const T &Elt)
unsigned Log2_32_Ceil(uint32_t Value)
Log2_32_Ceil - This function returns the ceil log base 2 of the specified value, 32 if the value is z...
A parsed version of the target data layout string in and methods for querying it. ...
static ConstantInt * getFalse(LLVMContext &Context)
bool isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL, bool OrZero=false, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr)
Return true if the given value is known to have exactly one bit set when defined. ...
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
static Value * SimplifyCastInst(unsigned, Value *, Type *, const Query &, unsigned)
This class is the base class for the comparison instructions.
static APInt getSignBit(unsigned BitWidth)
Get the SignBit for a specific bit width.
class_match< UndefValue > m_Undef()
Match an arbitrary undef constant.
static const Value * SimplifyWithOpReplaced(Value *V, Value *Op, Value *RepOp, const Query &Q, unsigned MaxRecurse)
See if V simplifies when its operand Op is replaced with RepOp.
Value * SimplifyCall(Value *V, User::op_iterator ArgBegin, User::op_iterator ArgEnd, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const DominatorTree *DT=nullptr, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr)
Given a function and iterators over arguments, fold the result or return null.
static Value * SimplifyAddInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW, const Query &Q, unsigned MaxRecurse)
Given operands for an Add, see if we can fold the result.
static APInt getAllOnesValue(unsigned numBits)
Get the all-ones value.
LLVM Argument representation.
BinaryOp_match< LHS, RHS, Instruction::Sub > m_Sub(const LHS &L, const RHS &R)
Value * getAggregateOperand()
static const Value * getFNegArgument(const Value *BinOp)
static Type * makeCmpResultType(Type *opnd_type)
Create a result type for fcmp/icmp.
Value * findScalarElement(Value *V, unsigned EltNo)
Given a vector and an element number, see if the scalar value is already around as a register...
STATISTIC(NumFunctions,"Total number of functions")
ArrayRef< unsigned > getIndices() const
bool canConstantFoldCallTo(const Function *F)
canConstantFoldCallTo - Return true if its even possible to fold a call to the specified function...
static Value * SimplifyCmpInst(unsigned, Value *, Value *, const Query &, unsigned)
Given operands for a CmpInst, see if we can fold the result.
static Value * SimplifyAndInst(Value *, Value *, const Query &, unsigned)
Given operands for an And, see if we can fold the result.
BinaryOp_match< LHS, RHS, Instruction::SRem > m_SRem(const LHS &L, const RHS &R)
match_zero m_Zero()
Match an arbitrary zero/null constant.
This class represents zero extension of integer types.
bool hasNoSignedWrap() const
Test whether this operation is known to never undergo signed overflow, aka the nsw property...
BinaryOp_match< LHS, RHS, Instruction::Mul > m_Mul(const LHS &L, const RHS &R)
Constant * ConstantFoldExtractElementInstruction(Constant *Val, Constant *Idx)
Attempt to constant fold an extractelement instruction with the specified operands and indices...
constexpr T getValueOr(U &&value) const LLVM_LVALUE_FUNCTION
static Value * SimplifyAssociativeBinOp(unsigned Opc, Value *LHS, Value *RHS, const Query &Q, unsigned MaxRecurse)
Generic simplifications for associative binary operations.
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Get a value with low bits set.
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE, etc.
static Value * simplifyUnsignedRangeCheck(ICmpInst *ZeroICmp, ICmpInst *UnsignedICmp, bool IsAnd)
static PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space...
static uint64_t round(uint64_t Acc, uint64_t Input)
BinaryOp_match< LHS, RHS, Instruction::AShr > m_AShr(const LHS &L, const RHS &R)
bool mayHaveSideEffects() const
Return true if the instruction may have side effects.
Value * SimplifySDivInst(Value *LHS, Value *RHS, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const DominatorTree *DT=nullptr, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr)
Given operands for an SDiv, fold the result or return null.
static Constant * getGetElementPtr(Type *Ty, Constant *C, ArrayRef< Constant * > IdxList, bool InBounds=false, Optional< unsigned > InRangeIndex=None, Type *OnlyIfReducedTy=nullptr)
Getelementptr form.
static Constant * getIntToPtr(Constant *C, Type *Ty, bool OnlyIfReduced=false)
bool isSigned() const
Determine if this instruction is using a signed comparison.
A cache of .assume calls within a function.
0 1 0 0 True if ordered and less than
bool IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV, APInt &Offset, const DataLayout &DL)
If this constant is a constant offset from a global, return the global and the constant.
uint64_t getLimitedValue(uint64_t Limit=~0ULL) const
If this value is smaller than the specified limit, return it, otherwise return the limit value...
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
Type * getReturnType() const
Returns the type of the ret val.
Value * SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS, FastMathFlags FMF, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const DominatorTree *DT=nullptr, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr)
Given operands for an FCmpInst, fold the result or return null.
static bool isOrdered(Predicate predicate)
Determine if the predicate is an ordered operation.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly...
FastMathFlags getFastMathFlags() const
Convenience function for getting all the fast-math flags, which must be an operator which supports th...
const Function * getParent() const
Return the enclosing method, or null if none.
This class represents a sign extension of integer types.
bool isNoAliasCall(const Value *V)
Return true if this pointer is returned by a noalias function.
BinaryOp_match< LHS, RHS, Instruction::FSub > m_FSub(const LHS &L, const RHS &R)
An instruction for reading from memory.
static IntegerType * getInt64Ty(LLVMContext &C)
FunctionType * getType(LLVMContext &Context, ID id, ArrayRef< Type * > Tys=None)
Return the function type for an intrinsic.
static Value * simplifyICmpOfBools(CmpInst::Predicate Pred, Value *LHS, Value *RHS, const Query &Q)
Fold an icmp when its operands have i1 scalar type.
void GetUnderlyingObjects(Value *V, SmallVectorImpl< Value * > &Objects, const DataLayout &DL, LoopInfo *LI=nullptr, unsigned MaxLookup=6)
This method is similar to GetUnderlyingObject except that it can look through phi and select instruct...
static Value * SimplifyOrOfICmps(ICmpInst *Op0, ICmpInst *Op1)
Commuted variants are assumed to be handled by calling this function again with the parameters swappe...
static Constant * getTrue(Type *Ty)
For a boolean type, or a vector of boolean type, return true, or a vector with every element true...
static Constant * getSub(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
Value * SimplifyAddInst(Value *LHS, Value *RHS, bool isNSW, bool isNUW, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const DominatorTree *DT=nullptr, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr)
Given operands for an Add, fold the result or return null.
static ConstantRange makeAllowedICmpRegion(CmpInst::Predicate Pred, const ConstantRange &Other)
Produce the smallest range such that all values that may satisfy the given predicate with any value c...
static Value * SimplifySubInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW, const Query &Q, unsigned MaxRecurse)
Given operands for a Sub, see if we can fold the result.
void reserve(size_type N)
static Value * SimplifyLShrInst(Value *Op0, Value *Op1, bool isExact, const Query &Q, unsigned MaxRecurse)
Given operands for an LShr, see if we can fold the result.
size_type size() const
Determine the number of elements in the SetVector.
static Value * SimplifyXorInst(Value *, Value *, const Query &, unsigned)
Given operands for a Xor, see if we can fold the result.
static APInt getSignedMaxValue(unsigned numBits)
Gets maximum signed value of APInt for a specific bit width.
static Constant * computePointerICmp(const DataLayout &DL, const TargetLibraryInfo *TLI, const DominatorTree *DT, CmpInst::Predicate Pred, const Instruction *CxtI, Value *LHS, Value *RHS)
static Value * SimplifyExtractValueInst(Value *Agg, ArrayRef< unsigned > Idxs, const Query &, unsigned)
Given operands for an ExtractValueInst, see if we can fold the result.
static Value * simplifyAndOfICmpsWithSameOperands(ICmpInst *Op0, ICmpInst *Op1)
Commuted variants are assumed to be handled by calling this function again with the parameters swappe...
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
static Constant * getAdd(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
bool isNegative() const
Determine sign of this APInt.
static Value * ThreadCmpOverPHI(CmpInst::Predicate Pred, Value *LHS, Value *RHS, const Query &Q, unsigned MaxRecurse)
In the case of a comparison with a PHI instruction, try to simplify the comparison by seeing whether ...
bool match(Val *V, const Pattern &P)
ArrayRef< T > slice(size_t N, size_t M) const
slice(n, m) - Chop off the first N elements of the array, and keep M elements in the array...
match_combine_or< BinaryOp_match< LHS, RHS, Instruction::Or >, BinaryOp_match< RHS, LHS, Instruction::Or > > m_c_Or(const LHS &L, const RHS &R)
Matches an Or with LHS and RHS in either order.
static Value * simplifyICmpWithConstant(CmpInst::Predicate Pred, Value *LHS, Value *RHS)
static Value * SimplifySDivInst(Value *Op0, Value *Op1, const Query &Q, unsigned MaxRecurse)
Given operands for an SDiv, see if we can fold the result.
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
bool hasNoNaNs() const
Determine whether the no-NaNs flag is set.
static Constant * getIntegerCast(Constant *C, Type *Ty, bool isSigned)
Create a ZExt, Bitcast or Trunc for integer -> integer casts.
BinaryOp_match< LHS, RHS, Instruction::Xor > m_Xor(const LHS &L, const RHS &R)
This class represents the LLVM 'select' instruction.
bool noSignedZeros() const
This is the base class for all instructions that perform data casts.
const APInt & getValue() const
Return the constant as an APInt value reference.
static Value * SimplifyFDivInst(Value *Op0, Value *Op1, FastMathFlags FMF, const Query &Q, unsigned)
Exact_match< T > m_Exact(const T &SubPattern)
BinOp2_match< LHS, RHS, Instruction::LShr, Instruction::AShr > m_Shr(const LHS &L, const RHS &R)
Matches LShr or AShr.
A Use represents the edge between a Value definition and its users.
Value * SimplifyFSubInst(Value *LHS, Value *RHS, FastMathFlags FMF, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const DominatorTree *DT=nullptr, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr)
Given operands for an FSub, fold the result or return null.
ValTy * getCalledValue() const
getCalledValue - Return the pointer to function that is being called.
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
static GCRegistry::Add< StatepointGC > D("statepoint-example","an example strategy for statepoint")
CastClass_match< OpTy, Instruction::Trunc > m_Trunc(const OpTy &Op)
Matches Trunc.
Value * SimplifyFAddInst(Value *LHS, Value *RHS, FastMathFlags FMF, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const DominatorTree *DT=nullptr, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr)
Given operands for an FAdd, fold the result or return null.
Type * getVectorElementType() const
static Value * SimplifyAShrInst(Value *Op0, Value *Op1, bool isExact, const Query &Q, unsigned MaxRecurse)
Given operands for an AShr, see if we can fold the result.
not_match< LHS > m_Not(const LHS &L)
BinaryOp_match< LHS, RHS, Instruction::Add > m_Add(const LHS &L, const RHS &R)
This class represents a cast from a pointer to an integer.
static Value * SimplifyFAddInst(Value *Op0, Value *Op1, FastMathFlags FMF, const Query &Q, unsigned MaxRecurse)
Given operands for an FAdd, see if we can fold the result.
APInt shl(unsigned shiftAmt) const
Left-shift function.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWAdd(const LHS &L, const RHS &R)
Value * SimplifyCastInst(unsigned CastOpc, Value *Op, Type *Ty, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const DominatorTree *DT=nullptr, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr)
Given operands for a CastInst, fold the result or return null.
Class to represent function types.
A constant value that is initialized with an expression using other constant values.
bool insert(const value_type &X)
Insert a new element into the SetVector.
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
match_combine_or< LTy, RTy > m_CombineOr(const LTy &L, const RTy &R)
Combine two pattern matchers matching L || R.
bool contains(const APInt &Val) const
Return true if the specified value is in the set.
static Value * ExtractEquivalentCondition(Value *V, CmpInst::Predicate Pred, Value *LHS, Value *RHS)
Rummage around inside V looking for something equivalent to the comparison "LHS Pred RHS"...
bool isAssociative() const
Return true if the instruction is associative:
static Value * SimplifyPHINode(PHINode *PN, const Query &Q)
See if we can fold the given phi. If not, returns null.
CastClass_match< OpTy, Instruction::ZExt > m_ZExt(const OpTy &Op)
Matches ZExt.
bool isReachableFromEntry(const Use &U) const
Provide an overload for a Use.
static Value * SimplifyRelativeLoad(Constant *Ptr, Constant *Offset, const DataLayout &DL)
bool sgt(const APInt &RHS) const
Signed greather than comparison.
MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty > m_SMin(const LHS &L, const RHS &R)
static bool maskIsAllZeroOrUndef(Value *Mask)
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory)...
Value * getInsertedValueOperand()
class_match< ConstantInt > m_ConstantInt()
Match an arbitrary ConstantInt and ignore it.
Value * SimplifyFMulInst(Value *LHS, Value *RHS, FastMathFlags FMF, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const DominatorTree *DT=nullptr, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr)
Given operands for an FMul, fold the result or return null.
Predicate getUnsignedPredicate() const
For example, EQ->EQ, SLE->ULE, UGT->UGT, etc.
static GCRegistry::Add< OcamlGC > B("ocaml","ocaml 3.10-compatible GC")
bool getObjectSize(const Value *Ptr, uint64_t &Size, const DataLayout &DL, const TargetLibraryInfo *TLI, bool RoundToAlign=false, ObjSizeMode Mode=ObjSizeMode::Exact)
Compute the size of the object pointed by Ptr.
Constant * ConstantFoldInsertValueInstruction(Constant *Agg, Constant *Val, ArrayRef< unsigned > Idxs)
ConstantFoldInsertValueInstruction - Attempt to constant fold an insertvalue instruction with the spe...
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
static Value * ThreadBinOpOverSelect(unsigned Opcode, Value *LHS, Value *RHS, const Query &Q, unsigned MaxRecurse)
In the case of a binary operation with a select instruction as an operand, try to simplify the binop ...
static Value * SimplifyFSubInst(Value *Op0, Value *Op1, FastMathFlags FMF, const Query &Q, unsigned MaxRecurse)
Given operands for an FSub, see if we can fold the result.
APInt umul_ov(const APInt &RHS, bool &Overflow) const
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree...
Type * getScalarType() const LLVM_READONLY
If this is a vector type, return the element type, otherwise return 'this'.
static Value * SimplifyOrInst(Value *, Value *, const Query &, unsigned)
Given operands for an Or, see if we can fold the result.
size_t size() const
size - Get the array size.
static Value * SimplifyURemInst(Value *Op0, Value *Op1, const Query &Q, unsigned MaxRecurse)
Given operands for a URem, see if we can fold the result.
Maximum length of the test input libFuzzer tries to guess a good value based on the corpus and reports it always prefer smaller inputs during the corpus shuffle When libFuzzer itself reports a bug this exit code will be used If indicates the maximal total time in seconds to run the fuzzer minimizes the provided crash input Use with etc Experimental Use value profile to guide fuzzing Number of simultaneous worker processes to run the jobs If min(jobs, NumberOfCpuCores()/2)\" is used.") FUZZER_FLAG_INT(reload
Class to represent pointers.
bool ult(const APInt &RHS) const
Unsigned less than comparison.
static ConstantRange makeSatisfyingICmpRegion(CmpInst::Predicate Pred, const ConstantRange &Other)
Produce the largest range such that all values in the returned range satisfy the given predicate with...
static Value * SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS, const Query &Q, unsigned MaxRecurse)
Given operands for an ICmpInst, see if we can fold the result.
bool CannotBeNegativeZero(const Value *V, const TargetLibraryInfo *TLI, unsigned Depth=0)
Return true if we can prove that the specified FP value is never equal to -0.0.
static GCRegistry::Add< CoreCLRGC > E("coreclr","CoreCLR-compatible GC")
Value * SimplifyOrInst(Value *LHS, Value *RHS, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const DominatorTree *DT=nullptr, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr)
Given operands for an Or, fold the result or return null.
static Constant * getBitCast(Constant *C, Type *Ty, bool OnlyIfReduced=false)
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
an instruction for type-safe pointer arithmetic to access elements of arrays and structs ...
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
static Value * simplifySelectWithFakeICmpEq(Value *CmpLHS, Value *TrueVal, Value *FalseVal, bool TrueWhenUnset)
An alternative way to test if a bit is set or not uses sgt/slt instead of eq/ne.
BinaryOp_match< LHS, RHS, Instruction::LShr > m_LShr(const LHS &L, const RHS &R)
MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty > m_UMax(const LHS &L, const RHS &R)
apint_match m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt...
BinaryOp_match< LHS, RHS, Instruction::SDiv > m_SDiv(const LHS &L, const RHS &R)
ConstantRange intersectWith(const ConstantRange &CR) const
Return the range that results from the intersection of this range with another range.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Shl, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWShl(const LHS &L, const RHS &R)
static Value * SimplifySelectInst(Value *CondVal, Value *TrueVal, Value *FalseVal, const Query &Q, unsigned MaxRecurse)
Given operands for a SelectInst, see if we can fold the result.
bool isFullSet() const
Return true if this set contains all of the elements possible for this data-type. ...
ConstantRange getConstantRangeFromMetadata(const MDNode &RangeMD)
Parse out a conservative ConstantRange from !range metadata.
bool isExact() const
Determine whether the exact flag is set.
The instances of the Type class are immutable: once they are created, they are never changed...
BinaryOp_match< LHS, RHS, Instruction::Or > m_Or(const LHS &L, const RHS &R)
This is an important class for using LLVM in a threaded context.
bool isVectorTy() const
True if this is an instance of VectorType.
bool isEquality() const
Return true if this predicate is either EQ or NE.
static Constant * stripAndComputeConstantOffsets(const DataLayout &DL, Value *&V, bool AllowNonInbounds=false)
Compute the base pointer and cumulative constant offsets for V.
This is an important base class in LLVM.
static Value * SimplifyCall(Value *V, IterTy ArgBegin, IterTy ArgEnd, const Query &Q, unsigned MaxRecurse)
bool isMaxSignedValue() const
Determine if this is the largest signed value.
const Value * getCondition() const
bool MaskedValueIsZero(const Value *V, const APInt &Mask, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr)
Return true if 'V & Mask' is known to be zero.
static Value * ThreadCmpOverSelect(CmpInst::Predicate Pred, Value *LHS, Value *RHS, const Query &Q, unsigned MaxRecurse)
In the case of a comparison with a select instruction, try to simplify the comparison by seeing wheth...
static Value * SimplifySRemInst(Value *Op0, Value *Op1, const Query &Q, unsigned MaxRecurse)
Given operands for an SRem, see if we can fold the result.
static Value * SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS, FastMathFlags FMF, const Query &Q, unsigned MaxRecurse)
Given operands for an FCmpInst, see if we can fold the result.
APInt Or(const APInt &LHS, const APInt &RHS)
Bitwise OR function for APInt.
bool hasNoSignedWrap() const
Determine whether the no signed wrap flag is set.
static Value * SimplifyFMulInst(Value *Op0, Value *Op1, FastMathFlags FMF, const Query &Q, unsigned MaxRecurse)
Given the operands for an FMul, see if we can fold the result.
ConstantFP - Floating Point Values [float, double].
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
APInt Xor(const APInt &LHS, const APInt &RHS)
Bitwise XOR function for APInt.
static Value * simplifySelectWithICmpCond(Value *CondVal, Value *TrueVal, Value *FalseVal, const Query &Q, unsigned MaxRecurse)
Try to simplify a select instruction when its condition operand is an integer comparison.
static Value * SimplifyDiv(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1, const Query &Q, unsigned MaxRecurse)
Given operands for an SDiv or UDiv, see if we can fold the result.
Constant * ConstantFoldInstruction(Instruction *I, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldInstruction - Try to constant fold the specified instruction.
cst_pred_ty< is_all_ones > m_AllOnes()
Match an integer or vector with all bits set to true.
bool isIntPredicate() const
Value * SimplifyShlInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const DominatorTree *DT=nullptr, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr)
Given operands for a Shl, fold the result or return null.
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
BinaryOp_match< LHS, RHS, Instruction::Shl > m_Shl(const LHS &L, const RHS &R)
bool isFalseWhenEqual() const
This is just a convenience.
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang","erlang-compatible garbage collector")
This instruction compares its operands according to the predicate given to the constructor.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Utility class for integer arithmetic operators which may exhibit overflow - Add, Sub, and Mul.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool isMaxValue() const
Determine if this is the largest unsigned value.
APInt sdiv(const APInt &RHS) const
Signed division function for APInt.
Value * getOperand(unsigned i) const
0 1 1 1 True if ordered (no nans)
Value * SimplifyFPBinOp(unsigned Opcode, Value *LHS, Value *RHS, const FastMathFlags &FMF, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const DominatorTree *DT=nullptr, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr)
Given operands for an FP BinaryOperator, fold the result or return null.
bool isCommutative() const
Return true if the instruction is commutative:
static Constant * getICmp(unsigned short pred, Constant *LHS, Constant *RHS, bool OnlyIfReduced=false)
get* - Return some common constants without having to specify the full Instruction::OPCODE identifier...
Predicate getPredicate() const
Return the predicate for this instruction.
static bool ValueDominatesPHI(Value *V, PHINode *P, const DominatorTree *DT)
Does the given value dominate the specified phi node?
Optional< bool > isImpliedCondition(const Value *LHS, const Value *RHS, const DataLayout &DL, bool InvertAPred=false, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr)
Return true if RHS is known to be implied true by LHS.
Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
Constant * ConstantFoldLoadFromConstPtr(Constant *C, Type *Ty, const DataLayout &DL)
ConstantFoldLoadFromConstPtr - Return the value that a load from C would produce if it is constant an...
Value * SimplifySRemInst(Value *LHS, Value *RHS, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const DominatorTree *DT=nullptr, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr)
Given operands for an SRem, fold the result or return null.
static Constant * getAllOnesValue(Type *Ty)
Get the all ones value.
1 1 1 1 Always true (always folded)
static Value * SimplifyInsertValueInst(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const Query &Q, unsigned)
Given operands for an InsertValueInst, see if we can fold the result.
match_combine_or< match_zero, match_neg_zero > m_AnyZero()
Match an arbitrary zero/null constant.
bool isEmptySet() const
Return true if this set contains no members.
bool isPointerTy() const
True if this is an instance of PointerType.
Value * SimplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const DominatorTree *DT=nullptr, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr)
Given operands for a SelectInst, fold the result or return null.
static UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
static Value * SimplifyAndOfICmps(ICmpInst *Op0, ICmpInst *Op1)
Commuted variants are assumed to be handled by calling this function again with the parameters swappe...
bool PointerMayBeCaptured(const Value *V, bool ReturnCaptures, bool StoreCaptures)
PointerMayBeCaptured - Return true if this pointer value may be captured by the enclosing function (w...
LLVMContext & getContext() const
All values hold a context through their type.
static PointerType * getInt8PtrTy(LLVMContext &C, unsigned AS=0)
static Value * simplifyICmpWithMinMax(CmpInst::Predicate Pred, Value *LHS, Value *RHS, const Query &Q, unsigned MaxRecurse)
Simplify integer comparisons where at least one operand of the compare matches an integer min/max idi...
const Value * getTrueValue() const
Constant * ConstantFoldCompareInstOperands(unsigned Predicate, Constant *LHS, Constant *RHS, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldCompareInstOperands - Attempt to constant fold a compare instruction (icmp/fcmp) with the...
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
1 1 0 1 True if unordered, less than, or equal
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
const T & back() const
back - Get the last element.
static Value * ExpandBinOp(unsigned Opcode, Value *LHS, Value *RHS, unsigned OpcToExpand, const Query &Q, unsigned MaxRecurse)
Simplify "A op (B op' C)" by distributing op over op', turning it into "(A op B) op' (A op C)"...
bool recursivelySimplifyInstruction(Instruction *I, const TargetLibraryInfo *TLI=nullptr, const DominatorTree *DT=nullptr, AssumptionCache *AC=nullptr)
Recursively attempt to simplify an instruction.
neg_match< LHS > m_Neg(const LHS &L)
Match an integer negate.
bool dominates(const Instruction *Def, const Use &U) const
Return true if Def dominates a use in User.
static Value * SimplifyIntrinsic(Function *F, IterTy ArgBegin, IterTy ArgEnd, const Query &Q, unsigned MaxRecurse)
unsigned countTrailingZeros() const
Count the number of trailing zero bits.
IntegerType * getIntPtrType(LLVMContext &C, unsigned AddressSpace=0) const
Returns an integer type with size at least as big as that of a pointer in the given address space...
static Value * SimplifyUDivInst(Value *Op0, Value *Op1, const Query &Q, unsigned MaxRecurse)
Given operands for a UDiv, see if we can fold the result.
0 0 1 0 True if ordered and greater than
BinaryOps getOpcode() const
static Constant * getSplat(unsigned NumElts, Constant *Elt)
Return a ConstantVector with the specified constant in each element.
A SetVector that performs no allocations if smaller than a certain size.
static ConstantRange makeExactICmpRegion(CmpInst::Predicate Pred, const APInt &Other)
Produce the exact range such that all values in the returned range satisfy the given predicate with a...
static Value * SimplifyRem(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1, const Query &Q, unsigned MaxRecurse)
Given operands for an SRem or URem, see if we can fold the result.
This is the shared class of boolean and integer constants.
static Value * SimplifyShlInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW, const Query &Q, unsigned MaxRecurse)
Given operands for an Shl, see if we can fold the result.
unsigned ComputeNumSignBits(const Value *Op, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr)
Return the number of times the sign bit of the register is replicated into the other bits...
bool hasNoUnsignedWrap() const
Determine whether the no unsigned wrap flag is set.
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type...
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
ConstantRange inverse() const
Return a new range that is the logical not of the current set.
Type * getType() const
All values are typed, get the type of this value.
Provides information about what library functions are available for the current target.
static Value * simplifySelectBitTest(Value *TrueVal, Value *FalseVal, Value *X, const APInt *Y, bool TrueWhenUnset)
Try to simplify a select instruction when its condition operand is an integer comparison where one op...
BinaryOp_match< LHS, RHS, Instruction::URem > m_URem(const LHS &L, const RHS &R)
This class represents a range of values.
Value * SimplifyCmpInst(unsigned Predicate, Value *LHS, Value *RHS, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const DominatorTree *DT=nullptr, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr)
Given operands for a CmpInst, fold the result or return null.
BinaryOp_match< LHS, RHS, Instruction::UDiv > m_UDiv(const LHS &L, const RHS &R)
bool isTrueWhenEqual() const
This is just a convenience.
static unsigned isEliminableCastPair(Instruction::CastOps firstOpcode, Instruction::CastOps secondOpcode, Type *SrcTy, Type *MidTy, Type *DstTy, Type *SrcIntPtrTy, Type *MidIntPtrTy, Type *DstIntPtrTy)
Determine how a pair of casts can be eliminated, if they can be at all.
Value * stripPointerCasts()
Strip off pointer casts, all-zero GEPs, and aliases.
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
static Constant * getTrunc(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static Constant * get(Type *Ty, uint64_t V, bool isSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
Value * SimplifyInsertValueInst(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const DominatorTree *DT=nullptr, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr)
Given operands for an InsertValueInst, fold the result or return null.
static Value * SimplifyShift(unsigned Opcode, Value *Op0, Value *Op1, const Query &Q, unsigned MaxRecurse)
Given operands for an Shl, LShr or AShr, see if we can fold the result.
const BasicBlock & getEntryBlock() const
static Constant * get(Type *Ty, double V)
This returns a ConstantFP, or a vector containing a splat of a ConstantFP, for the specified value in...
bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
static ConstantInt * getTrue(LLVMContext &Context)
Intrinsic::ID getIntrinsicID() const LLVM_READONLY
getIntrinsicID - This method returns the ID number of the specified function, or Intrinsic::not_intri...
static GCRegistry::Add< ShadowStackGC > C("shadow-stack","Very portable GC for uncooperative code generators")
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Class to represent vector types.
Value * SimplifyExtractElementInst(Value *Vec, Value *Idx, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const DominatorTree *DT=nullptr, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr)
Given operands for an ExtractElementInst, fold the result or return null.
Class for arbitrary precision integers.
static Constant * getFalse(Type *Ty)
For a boolean type, or a vector of boolean type, return false, or a vector with every element false...
static Value * SimplifyFRemInst(Value *Op0, Value *Op1, FastMathFlags FMF, const Query &, unsigned)
bool isIntegerTy() const
True if this is an instance of IntegerType.
CastClass_match< OpTy, Instruction::PtrToInt > m_PtrToInt(const OpTy &Op)
Matches PtrToInt.
iterator_range< user_iterator > users()
Value * SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const DominatorTree *DT=nullptr, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr)
Given operands for a BinaryOperator, fold the result or return null.
unsigned getVectorNumElements() const
static Constant * getCast(unsigned ops, Constant *C, Type *Ty, bool OnlyIfReduced=false)
Convenience function for getting a Cast operation.
Value * SimplifySubInst(Value *LHS, Value *RHS, bool isNSW, bool isNUW, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const DominatorTree *DT=nullptr, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr)
Given operands for a Sub, fold the result or return null.
static Value * simplifyOrOfICmpsWithSameOperands(ICmpInst *Op0, ICmpInst *Op1)
Commuted variants are assumed to be handled by calling this function again with the parameters swappe...
bool isMinValue() const
Determine if this is the smallest unsigned value.
bool isFPPredicate() const
APInt And(const APInt &LHS, const APInt &RHS)
Bitwise AND function for APInt.
unsigned getOpcode() const
Return the opcode for this Instruction or ConstantExpr.
static Type * getIndexedType(Type *Ty, ArrayRef< Value * > IdxList)
Returns the type of the element that would be loaded with a load instruction with the specified param...
bool isEHPad() const
Return true if the instruction is a variety of EH-block.
Value * SimplifyGEPInst(Type *SrcTy, ArrayRef< Value * > Ops, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const DominatorTree *DT=nullptr, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr)
Given operands for a GetElementPtrInst, fold the result or return null.
specific_fpval m_FPOne()
Match a float 1.0 or vector with all elements equal to 1.0.
bool isAllOnesValue() const
Determine if all bits are set.
unsigned countLeadingOnes() const
Count the number of leading one bits.
const DataLayout & getDataLayout() const
Get the data layout for the module's target platform.
static Constant * computePointerDifference(const DataLayout &DL, Value *LHS, Value *RHS)
Compute the constant difference between two pointer values.
bool isKnownNonNull(const Value *V)
Return true if this pointer couldn't possibly be null by its definition.
void ComputeSignBit(const Value *V, bool &KnownZero, bool &KnownOne, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr)
Determine whether the sign bit is known to be zero or one.
bool isMinSignedValue() const
Determine if this is the smallest signed value.
Value * SimplifyAndInst(Value *LHS, Value *RHS, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const DominatorTree *DT=nullptr, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr)
Given operands for an And, fold the result or return null.
APInt udiv(const APInt &RHS) const
Unsigned division operation.
static IntegerType * getInt32Ty(LLVMContext &C)
static Value * SimplifyGEPInst(Type *SrcTy, ArrayRef< Value * > Ops, const Query &Q, unsigned)
Given operands for an GetElementPtrInst, see if we can fold the result.
static Value * SimplifyFPBinOp(unsigned, Value *, Value *, const FastMathFlags &, const Query &, unsigned)
Given operands for a BinaryOperator, see if we can fold the result.
unsigned greater or equal
static bool isUnordered(Predicate predicate)
Determine if the predicate is an unordered operation.
static bool replaceAndRecursivelySimplifyImpl(Instruction *I, Value *SimpleV, const TargetLibraryInfo *TLI, const DominatorTree *DT, AssumptionCache *AC)
Implementation of recursive simplification through an instruction's uses.
static bool isFNeg(const Value *V, bool IgnoreZeroSign=false)
OverflowingBinaryOp_match< LHS, RHS, Instruction::Shl, OverflowingBinaryOperator::NoSignedWrap > m_NSWShl(const LHS &L, const RHS &R)
bool SignBitMustBeZero(const Value *V, const TargetLibraryInfo *TLI)
LLVM_ATTRIBUTE_ALWAYS_INLINE size_type size() const
bool hasOneUse() const
Return true if there is exactly one user of this value.
bool hasNoInfs() const
Determine whether the no-infs flag is set.
bool isAllocLikeFn(const Value *V, const TargetLibraryInfo *TLI, bool LookThroughBitCast=false)
Tests if a value is a call or invoke to a library function that allocates memory (either malloc...
MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty > m_SMax(const LHS &L, const RHS &R)
bool isSignBit() const
Check if the APInt's value is returned by getSignBit.
Value * SimplifyURemInst(Value *LHS, Value *RHS, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const DominatorTree *DT=nullptr, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr)
Given operands for a URem, fold the result or return null.
static Value * SimplifyMulInst(Value *Op0, Value *Op1, const Query &Q, unsigned MaxRecurse)
Given operands for a Mul, see if we can fold the result.
static void Query(const MachineInstr &MI, AliasAnalysis &AA, bool &Read, bool &Write, bool &Effects, bool &StackPointer)
LLVM_NODISCARD std::enable_if<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
bool isKnownNonNullAt(const Value *V, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr)
Return true if this pointer couldn't possibly be null.
static Value * simplifyICmpWithBinOp(CmpInst::Predicate Pred, Value *LHS, Value *RHS, const Query &Q, unsigned MaxRecurse)
static bool IsIdempotent(Intrinsic::ID ID)
Value * SimplifyMulInst(Value *LHS, Value *RHS, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const DominatorTree *DT=nullptr, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr)
Given operands for a Mul, fold the result or return null.
Value * SimplifyExtractValueInst(Value *Agg, ArrayRef< unsigned > Idxs, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const DominatorTree *DT=nullptr, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr)
Given operands for an ExtractValueInst, fold the result or return null.
bool isKnownNonEqual(const Value *V1, const Value *V2, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr)
Return true if the given values are known to be non-equal when defined.
static Value * SimplifyExtractElementInst(Value *Vec, Value *Idx, const Query &, unsigned)
Given operands for an ExtractElementInst, see if we can fold the result.
Value * SimplifyLShrInst(Value *Op0, Value *Op1, bool isExact, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const DominatorTree *DT=nullptr, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr)
Given operands for a LShr, fold the result or return null.
OtherOps getOpcode() const
Get the opcode casted to the right type.
Value * SimplifyUDivInst(Value *LHS, Value *RHS, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const DominatorTree *DT=nullptr, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr)
Given operands for a UDiv, fold the result or return null.
const APFloat & getValueAPF() const
BinOp2_match< LHS, RHS, Instruction::SDiv, Instruction::UDiv > m_IDiv(const LHS &L, const RHS &R)
Matches UDiv and SDiv.
bool isUnsigned() const
Determine if this instruction is using an unsigned comparison.
Value * SimplifyXorInst(Value *LHS, Value *RHS, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const DominatorTree *DT=nullptr, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr)
Given operands for an Xor, fold the result or return null.
match_neg_zero m_NegZero()
Match an arbitrary zero/null constant.
Type * getReturnType() const
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
Value * SimplifyFRemInst(Value *LHS, Value *RHS, FastMathFlags FMF, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const DominatorTree *DT=nullptr, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr)
Given operands for an FRem, fold the result or return null.
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
Constant * ConstantFoldInstOperands(Instruction *I, ArrayRef< Constant * > Ops, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldInstOperands - Attempt to constant fold an instruction with the specified operands...
unsigned getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
LLVM Value Representation.
Value * SimplifyAShrInst(Value *Op0, Value *Op1, bool isExact, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const DominatorTree *DT=nullptr, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr)
Given operands for a AShr, fold the result or return nulll.
1 0 1 1 True if unordered, greater than, or equal
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
cst_pred_ty< is_one > m_One()
Match an integer 1 or a vector with all elements equal to 1.
static VectorType * get(Type *ElementType, unsigned NumElements)
This static method is the primary way to construct an VectorType.
static Value * SimplifyRightShift(unsigned Opcode, Value *Op0, Value *Op1, bool isExact, const Query &Q, unsigned MaxRecurse)
Given operands for an Shl, LShr or AShr, see if we can fold the result.
#define LLVM_FALLTHROUGH
LLVM_FALLTHROUGH - Mark fallthrough cases in switch statements.
std::underlying_type< E >::type Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
static Value * ThreadBinOpOverPHI(unsigned Opcode, Value *LHS, Value *RHS, const Query &Q, unsigned MaxRecurse)
In the case of a binary operation with an operand that is a PHI instruction, try to simplify the bino...
unsigned countLeadingZeros() const
The APInt version of the countLeadingZeros functions in MathExtras.h.
bool isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr)
Return true if the given value is known to be non-zero when defined.
Predicate getSignedPredicate() const
For example, EQ->EQ, SLE->SLE, UGT->SGT, etc.
PointerType * getPointerTo(unsigned AddrSpace=0) const
Return a pointer to the current type.
const Value * getFalseValue() const
Convenience struct for specifying and reasoning about fast-math flags.
APInt zext(unsigned width) const
Zero extend to a new width.
static bool isUndefShift(Value *Amount)
Returns true if a shift by Amount always yields undef.
Constant * ConstantFoldExtractValueInstruction(Constant *Agg, ArrayRef< unsigned > Idxs)
Attempt to constant fold an extractvalue instruction with the specified operands and indices...
Value * SimplifyInstruction(Instruction *I, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const DominatorTree *DT=nullptr, AssumptionCache *AC=nullptr)
See if we can compute a simplified version of this instruction.
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml","ocaml 3.10-compatible collector")
bool CannotBeOrderedLessThanZero(const Value *V, const TargetLibraryInfo *TLI)
Return true if we can prove that the specified FP value is either a NaN or never less than 0...
static APInt getNullValue(unsigned numBits)
Get the '0' value.
specific_intval m_SpecificInt(uint64_t V)
Match a specific integer value or vector with all elements equal to the value.
APInt abs() const
Get the absolute value;.
static Value * simplifyICmpWithZero(CmpInst::Predicate Pred, Value *LHS, Value *RHS, const Query &Q)
Try hard to fold icmp with zero RHS because this is a common case.
static bool isSameCompare(Value *V, CmpInst::Predicate Pred, Value *LHS, Value *RHS)
isSameCompare - Is V equivalent to the comparison "LHS Pred RHS"?
bool replaceAndRecursivelySimplify(Instruction *I, Value *SimpleV, const TargetLibraryInfo *TLI=nullptr, const DominatorTree *DT=nullptr, AssumptionCache *AC=nullptr)
Replace all uses of 'I' with 'SimpleV' and simplify the uses recursively.
op_range incoming_values()
int64_t getSExtValue() const
Return the constant as a 64-bit integer value after it has been sign extended as appropriate for the ...
static GCRegistry::Add< ErlangGC > A("erlang","erlang-compatible garbage collector")
Value * SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const DominatorTree *DT=nullptr, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr)
Given operands for an ICmpInst, fold the result or return null.
const BasicBlock * getParent() const
static Type * GetCompareTy(Value *Op)
bool hasNoUnsignedWrap() const
Test whether this operation is known to never undergo unsigned overflow, aka the nuw property...
0 0 0 0 Always false (always folded)
bool noNaNs() const
Flag queries.
A wrapper class for inspecting calls to intrinsic functions.
Constant * ConstantFoldCall(Function *F, ArrayRef< Constant * > Operands, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldCall - Attempt to constant fold a call to the specified function with the specified argum...
Value * SimplifyFDivInst(Value *LHS, Value *RHS, FastMathFlags FMF, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const DominatorTree *DT=nullptr, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr)
Given operands for an FDiv, fold the result or return null.
an instruction to allocate memory on the stack
This instruction inserts a struct field of array element value into an aggregate value.
CmpClass_match< LHS, RHS, ICmpInst, ICmpInst::Predicate > m_ICmp(ICmpInst::Predicate &Pred, const LHS &L, const RHS &R)
Constant * ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS, Constant *RHS, const DataLayout &DL)
Attempt to constant fold a binary operation with the specified operands.