37using namespace PatternMatch;
39#define DEBUG_TYPE "instcombine"
56 FAddendCoef() =
default;
61 void operator=(
const FAddendCoef &
A);
63 void operator*=(
const FAddendCoef &S);
66 assert(!insaneIntVal(
C) &&
"Insane coefficient");
67 IsFp =
false; IntVal =
C;
74 bool isZero()
const {
return isInt() ? !IntVal : getFpVal().isZero(); }
77 bool isOne()
const {
return isInt() && IntVal == 1; }
78 bool isTwo()
const {
return isInt() && IntVal == 2; }
79 bool isMinusOne()
const {
return isInt() && IntVal == -1; }
80 bool isMinusTwo()
const {
return isInt() && IntVal == -2; }
83 bool insaneIntVal(
int V) {
return V > 4 || V < -4; }
85 APFloat *getFpValPtr() {
return reinterpret_cast<APFloat *
>(&FpValBuf); }
87 const APFloat *getFpValPtr()
const {
88 return reinterpret_cast<const APFloat *
>(&FpValBuf);
91 const APFloat &getFpVal()
const {
92 assert(IsFp && BufHasFpVal &&
"Incorret state");
93 return *getFpValPtr();
97 assert(IsFp && BufHasFpVal &&
"Incorret state");
98 return *getFpValPtr();
101 bool isInt()
const {
return !IsFp; }
115 bool BufHasFpVal =
false;
134 assert((Val ==
T.Val) &&
"Symbolic-values disagree");
138 Value *getSymVal()
const {
return Val; }
139 const FAddendCoef &getCoef()
const {
return Coeff; }
141 bool isConstant()
const {
return Val ==
nullptr; }
142 bool isZero()
const {
return Coeff.isZero(); }
144 void set(
short Coefficient,
Value *V) {
145 Coeff.set(Coefficient);
149 Coeff.set(Coefficient);
153 Coeff.set(Coefficient->getValueAPF());
157 void negate() { Coeff.negate(); }
161 static unsigned drillValueDownOneStep(
Value* V, FAddend &A0, FAddend &A1);
165 unsigned drillAddendDownOneStep(FAddend &Addend0, FAddend &Addend1)
const;
168 void Scale(
const FAddendCoef& ScaleAmt) { Coeff *= ScaleAmt; }
171 Value *Val =
nullptr;
187 Value *simplifyFAdd(AddendVect& V,
unsigned InstrQuota);
190 Value *createAddendVal(
const FAddend &
A,
bool& NeedNeg);
193 unsigned calcInstrNumber(
const AddendVect& Vect);
199 Value *createNaryFAdd(
const AddendVect& Opnds,
unsigned InstrQuota);
200 void createInstPostProc(
Instruction *NewInst,
bool NoNumber =
false);
204 unsigned CreateInstrNum;
205 void initCreateInstNum() { CreateInstrNum = 0; }
206 void incCreateInstNum() { CreateInstrNum++; }
208 void initCreateInstNum() {}
209 void incCreateInstNum() {}
224FAddendCoef::~FAddendCoef() {
226 getFpValPtr()->~APFloat();
229void FAddendCoef::set(
const APFloat&
C) {
239 IsFp = BufHasFpVal =
true;
242void FAddendCoef::convertToFpType(
const fltSemantics &Sem) {
253 IsFp = BufHasFpVal =
true;
266void FAddendCoef::operator=(
const FAddendCoef &That) {
270 set(That.getFpVal());
273void FAddendCoef::operator+=(
const FAddendCoef &That) {
275 if (
isInt() == That.isInt()) {
279 getFpVal().add(That.getFpVal(), RndMode);
285 convertToFpType(
T.getSemantics());
286 getFpVal().add(
T, RndMode);
291 T.add(createAPFloatFromInt(
T.getSemantics(), That.IntVal), RndMode);
294void FAddendCoef::operator*=(
const FAddendCoef &That) {
298 if (That.isMinusOne()) {
303 if (
isInt() && That.isInt()) {
304 int Res =
IntVal * (int)That.IntVal;
305 assert(!insaneIntVal(Res) &&
"Insane int value");
311 isInt() ? That.getFpVal().getSemantics() : getFpVal().getSemantics();
314 convertToFpType(Semantic);
318 F0.
multiply(createAPFloatFromInt(Semantic, That.IntVal),
319 APFloat::rmNearestTiesToEven);
321 F0.
multiply(That.getFpVal(), APFloat::rmNearestTiesToEven);
324void FAddendCoef::negate() {
328 getFpVal().changeSign();
331Value *FAddendCoef::getValue(
Type *Ty)
const {
347unsigned FAddend::drillValueDownOneStep
348 (
Value *Val, FAddend &Addend0, FAddend &Addend1) {
350 if (!Val || !(
I = dyn_cast<Instruction>(Val)))
353 unsigned Opcode =
I->getOpcode();
355 if (Opcode == Instruction::FAdd || Opcode == Instruction::FSub) {
357 Value *Opnd0 =
I->getOperand(0);
358 Value *Opnd1 =
I->getOperand(1);
359 if ((C0 = dyn_cast<ConstantFP>(Opnd0)) && C0->
isZero())
362 if ((C1 = dyn_cast<ConstantFP>(Opnd1)) && C1->
isZero())
367 Addend0.set(1, Opnd0);
369 Addend0.set(C0,
nullptr);
373 FAddend &Addend = Opnd0 ? Addend1 : Addend0;
375 Addend.set(1, Opnd1);
377 Addend.set(C1,
nullptr);
378 if (Opcode == Instruction::FSub)
383 return Opnd0 && Opnd1 ? 2 : 1;
390 if (
I->getOpcode() == Instruction::FMul) {
391 Value *V0 =
I->getOperand(0);
392 Value *V1 =
I->getOperand(1);
410unsigned FAddend::drillAddendDownOneStep
411 (FAddend &Addend0, FAddend &Addend1)
const {
415 unsigned BreakNum = FAddend::drillValueDownOneStep(Val, Addend0, Addend1);
416 if (!BreakNum || Coeff.isOne())
419 Addend0.Scale(Coeff);
422 Addend1.Scale(Coeff);
428 assert(
I->hasAllowReassoc() &&
I->hasNoSignedZeros() &&
429 "Expected 'reassoc'+'nsz' instruction");
432 if (
I->getType()->isVectorTy())
435 assert((
I->getOpcode() == Instruction::FAdd ||
436 I->getOpcode() == Instruction::FSub) &&
"Expect add/sub");
441 FAddend Opnd0, Opnd1, Opnd0_0, Opnd0_1, Opnd1_0, Opnd1_1;
443 unsigned OpndNum = FAddend::drillValueDownOneStep(
I, Opnd0, Opnd1);
446 unsigned Opnd0_ExpNum = 0;
447 unsigned Opnd1_ExpNum = 0;
449 if (!Opnd0.isConstant())
450 Opnd0_ExpNum = Opnd0.drillAddendDownOneStep(Opnd0_0, Opnd0_1);
453 if (OpndNum == 2 && !Opnd1.isConstant())
454 Opnd1_ExpNum = Opnd1.drillAddendDownOneStep(Opnd1_0, Opnd1_1);
457 if (Opnd0_ExpNum && Opnd1_ExpNum) {
459 AllOpnds.push_back(&Opnd0_0);
460 AllOpnds.push_back(&Opnd1_0);
461 if (Opnd0_ExpNum == 2)
462 AllOpnds.push_back(&Opnd0_1);
463 if (Opnd1_ExpNum == 2)
464 AllOpnds.push_back(&Opnd1_1);
467 unsigned InstQuota = 0;
469 Value *V0 =
I->getOperand(0);
470 Value *V1 =
I->getOperand(1);
471 InstQuota = ((!isa<Constant>(V0) && V0->
hasOneUse()) &&
472 (!isa<Constant>(V1) && V1->
hasOneUse())) ? 2 : 1;
474 if (
Value *R = simplifyFAdd(AllOpnds, InstQuota))
483 const FAddendCoef &
CE = Opnd0.getCoef();
484 return CE.isOne() ? Opnd0.getSymVal() :
nullptr;
490 AllOpnds.push_back(&Opnd0);
491 AllOpnds.push_back(&Opnd1_0);
492 if (Opnd1_ExpNum == 2)
493 AllOpnds.push_back(&Opnd1_1);
495 if (
Value *R = simplifyFAdd(AllOpnds, 1))
502 AllOpnds.push_back(&Opnd1);
503 AllOpnds.push_back(&Opnd0_0);
504 if (Opnd0_ExpNum == 2)
505 AllOpnds.push_back(&Opnd0_1);
507 if (
Value *R = simplifyFAdd(AllOpnds, 1))
514Value *FAddCombine::simplifyFAdd(AddendVect& Addends,
unsigned InstrQuota) {
515 unsigned AddendNum = Addends.size();
516 assert(AddendNum <= 4 &&
"Too many addends");
519 unsigned NextTmpIdx = 0;
520 FAddend TmpResult[3];
528 for (
unsigned SymIdx = 0; SymIdx < AddendNum; SymIdx++) {
530 const FAddend *ThisAddend = Addends[SymIdx];
536 Value *Val = ThisAddend->getSymVal();
545 unsigned StartIdx = SimpVect.size();
546 SimpVect.push_back(ThisAddend);
553 for (
unsigned SameSymIdx = SymIdx + 1;
554 SameSymIdx < AddendNum; SameSymIdx++) {
555 const FAddend *
T = Addends[SameSymIdx];
556 if (
T &&
T->getSymVal() == Val) {
559 Addends[SameSymIdx] =
nullptr;
560 SimpVect.push_back(
T);
565 if (StartIdx + 1 != SimpVect.size()) {
566 FAddend &
R = TmpResult[NextTmpIdx ++];
567 R = *SimpVect[StartIdx];
568 for (
unsigned Idx = StartIdx + 1;
Idx < SimpVect.size();
Idx++)
572 SimpVect.resize(StartIdx);
574 SimpVect.push_back(&R);
579 assert((NextTmpIdx <= std::size(TmpResult) + 1) &&
"out-of-bound access");
582 if (!SimpVect.empty())
583 Result = createNaryFAdd(SimpVect, InstrQuota);
592Value *FAddCombine::createNaryFAdd
593 (
const AddendVect &Opnds,
unsigned InstrQuota) {
594 assert(!Opnds.empty() &&
"Expect at least one addend");
598 unsigned InstrNeeded = calcInstrNumber(Opnds);
599 if (InstrNeeded > InstrQuota)
612 Value *LastVal =
nullptr;
613 bool LastValNeedNeg =
false;
616 for (
const FAddend *Opnd : Opnds) {
618 Value *
V = createAddendVal(*Opnd, NeedNeg);
621 LastValNeedNeg = NeedNeg;
625 if (LastValNeedNeg == NeedNeg) {
626 LastVal = createFAdd(LastVal, V);
631 LastVal = createFSub(V, LastVal);
633 LastVal = createFSub(LastVal, V);
635 LastValNeedNeg =
false;
638 if (LastValNeedNeg) {
639 LastVal = createFNeg(LastVal);
643 assert(CreateInstrNum == InstrNeeded &&
644 "Inconsistent in instruction numbers");
653 createInstPostProc(
I);
660 createInstPostProc(
I,
true);
667 createInstPostProc(
I);
674 createInstPostProc(
I);
678void FAddCombine::createInstPostProc(
Instruction *NewInstr,
bool NoNumber) {
691unsigned FAddCombine::calcInstrNumber(
const AddendVect &Opnds) {
692 unsigned OpndNum = Opnds.size();
693 unsigned InstrNeeded = OpndNum - 1;
696 for (
const FAddend *Opnd : Opnds) {
697 if (Opnd->isConstant())
702 if (isa<UndefValue>(Opnd->getSymVal()))
705 const FAddendCoef &
CE = Opnd->getCoef();
709 if (!
CE.isMinusOne() && !
CE.isOne())
723Value *FAddCombine::createAddendVal(
const FAddend &Opnd,
bool &NeedNeg) {
724 const FAddendCoef &Coeff = Opnd.getCoef();
726 if (Opnd.isConstant()) {
728 return Coeff.getValue(
Instr->getType());
731 Value *OpndVal = Opnd.getSymVal();
733 if (Coeff.isMinusOne() || Coeff.isOne()) {
734 NeedNeg = Coeff.isMinusOne();
738 if (Coeff.isTwo() || Coeff.isMinusTwo()) {
739 NeedNeg = Coeff.isMinusTwo();
740 return createFAdd(OpndVal, OpndVal);
744 return createFMul(OpndVal, Coeff.getValue(
Instr->getType()));
761 Value *
X =
nullptr, *
Y =
nullptr, *Z =
nullptr;
762 const APInt *C1 =
nullptr, *C2 =
nullptr;
789 LHS =
I.getOperand(0);
790 RHS =
I.getOperand(1);
811 Value *Op0 =
Add.getOperand(0), *Op1 =
Add.getOperand(1);
820 const APInt *C1, *C2;
836 return BinaryOperator::CreateAdd(WideX, NewC);
843 return BinaryOperator::CreateAdd(WideX, NewC);
850 Value *Op0 =
Add.getOperand(0), *Op1 =
Add.getOperand(1);
875 X->getType()->getScalarSizeInBits() == 1)
879 X->getType()->getScalarSizeInBits() == 1)
886 bool WillNotSOV = willNotOverflowSignedSub(Op1C, COne,
Add);
915 if (
C->isSignMask()) {
918 if (
Add.hasNoSignedWrap() ||
Add.hasNoUnsignedWrap())
919 return BinaryOperator::CreateOr(Op0, Op1);
923 return BinaryOperator::CreateXor(Op0, Op1);
941 if ((*C2 | LHSKnown.
Zero).isAllOnes())
960 return BinaryOperator::CreateAShr(NewShl, ShAmtC);
972 X->getType()->getScalarSizeInBits() == 1)
1006template <
bool FP,
typename Mul2Rhs>
1009 constexpr unsigned MulOp =
FP ? Instruction::FMul : Instruction::Mul;
1010 constexpr unsigned AddOp =
FP ? Instruction::FAdd : Instruction::Add;
1011 constexpr unsigned Mul2Op =
FP ? Instruction::FMul : Instruction::Shl;
1044 return BinaryOperator::CreateMul(AB, AB);
1052 assert(
I.hasAllowReassoc() &&
I.hasNoSignedZeros() &&
"Assumption mismatch");
1128 (void)C0.
smul_ov(C1, overflow);
1130 (
void)C0.
umul_ov(C1, overflow);
1149 if (
MatchRem(MulOpV, RemOpV, C1, Rem2IsSigned) &&
1150 IsSigned == Rem2IsSigned) {
1154 if (
MatchDiv(RemOpV, DivOpV, DivOpC, IsSigned) &&
X == DivOpV &&
1179 Value *NotMask =
Builder.CreateShl(MinusOne, NBits,
"notmask");
1181 if (
auto *BOp = dyn_cast<BinaryOperator>(NotMask)) {
1183 BOp->setHasNoSignedWrap();
1184 BOp->setHasNoUnsignedWrap(
I.hasNoUnsignedWrap());
1191 assert(
I.getOpcode() == Instruction::Add &&
"Expecting add instruction");
1192 Type *Ty =
I.getType();
1193 auto getUAddSat = [&]() {
1222 return BinaryOperator::CreateSub(
A, NewShl);
1248 if (*MaskC != (
SMin | (*DivC - 1)))
1252 return BinaryOperator::CreateAShr(
1259 assert((
I.getOpcode() == Instruction::Add ||
1260 I.getOpcode() == Instruction::Or ||
1261 I.getOpcode() == Instruction::Sub) &&
1262 "Expecting add/or/sub instruction");
1275 if (
I.getOpcode() == Instruction::Sub &&
I.getOperand(1) !=
Select)
1278 Type *XTy =
X->getType();
1279 bool HadTrunc =
I.getType() != XTy;
1296 APInt(
C->getType()->getScalarSizeInBits(),
1297 X->getType()->getScalarSizeInBits()))))
1302 auto SkipExtInMagic = [&
I](
Value *&V) {
1303 if (
I.getOpcode() == Instruction::Sub)
1315 Value *SignExtendingValue, *Zero;
1335 SkipExtInMagic(SignExtendingValue);
1336 Constant *SignExtendingValueBaseConstant;
1337 if (!
match(SignExtendingValue,
1342 if (
I.getOpcode() == Instruction::Sub
1343 ? !
match(SignExtendingValueBaseConstant,
m_One())
1347 auto *NewAShr = BinaryOperator::CreateAShr(
X, LowBitsToSkip,
1348 Extract->
getName() +
".sext");
1349 NewAShr->copyIRFlags(Extract);
1363 assert((
I.getOpcode() == Instruction::Add ||
1364 I.getOpcode() == Instruction::Sub) &&
1365 "Expected add/sub");
1366 auto *Op0 = dyn_cast<BinaryOperator>(
I.getOperand(0));
1367 auto *Op1 = dyn_cast<BinaryOperator>(
I.getOperand(1));
1368 if (!Op0 || !Op1 || !(Op0->hasOneUse() || Op1->hasOneUse()))
1377 bool HasNSW =
I.hasNoSignedWrap() && Op0->hasNoSignedWrap() &&
1378 Op1->hasNoSignedWrap();
1379 bool HasNUW =
I.hasNoUnsignedWrap() && Op0->hasNoUnsignedWrap() &&
1380 Op1->hasNoUnsignedWrap();
1384 if (
auto *NewI = dyn_cast<BinaryOperator>(NewMath)) {
1385 NewI->setHasNoSignedWrap(HasNSW);
1386 NewI->setHasNoUnsignedWrap(HasNUW);
1388 auto *NewShl = BinaryOperator::CreateShl(NewMath, ShAmt);
1389 NewShl->setHasNoSignedWrap(HasNSW);
1390 NewShl->setHasNoUnsignedWrap(HasNUW);
1397 unsigned BitWidth =
I.getType()->getScalarSizeInBits();
1428 return BinaryOperator::CreateMul(
X,
Y);
1435 I.hasNoSignedWrap(),
I.hasNoUnsignedWrap(),
1471 Type *Ty =
I.getType();
1473 return BinaryOperator::CreateXor(
LHS,
RHS);
1478 Shl->setHasNoSignedWrap(
I.hasNoSignedWrap());
1479 Shl->setHasNoUnsignedWrap(
I.hasNoUnsignedWrap());
1490 return BinaryOperator::CreateSub(
RHS,
A);
1495 return BinaryOperator::CreateSub(
LHS,
B);
1506 return BinaryOperator::CreateSub(
A,
B);
1531 return BinaryOperator::CreateAdd(Sub, C1);
1539 const APInt *C1, *C2;
1542 APInt minusC1 = -(*C1);
1543 if (minusC1 == (one << *C2)) {
1545 return BinaryOperator::CreateSRem(
RHS, NewRHS);
1553 return BinaryOperator::CreateAnd(
A, NewMask);
1565 A->getType()->isIntOrIntVectorTy(1))
1570 return BinaryOperator::CreateOr(
LHS,
RHS);
1579 return BinaryOperator::CreateOr(
A,
B);
1599 I.hasNoUnsignedWrap(),
I.hasNoSignedWrap());
1600 return BinaryOperator::CreateAnd(
Add,
A);
1611 return BinaryOperator::CreateAnd(Dec, Not);
1622 Type *Ty =
I.getType();
1629 const APInt *NegPow2C;
1634 return BinaryOperator::CreateSub(
B, Shl);
1647 return BinaryOperator::CreateOr(
LHS, Zext);
1663 bool Changed =
false;
1664 if (!
I.hasNoSignedWrap() && willNotOverflowSignedAdd(
LHS,
RHS,
I)) {
1666 I.setHasNoSignedWrap(
true);
1668 if (!
I.hasNoUnsignedWrap() && willNotOverflowUnsignedAdd(
LHS,
RHS,
I)) {
1670 I.setHasNoUnsignedWrap(
true);
1697 {Builder.CreateOr(A, B)}));
1708 return Changed ? &
I :
nullptr;
1730 assert((
I.getOpcode() == Instruction::FAdd ||
1731 I.getOpcode() == Instruction::FSub) &&
"Expecting fadd/fsub");
1732 assert(
I.hasAllowReassoc() &&
I.hasNoSignedZeros() &&
1733 "FP factorization requires FMF");
1738 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
1739 if (!Op0->
hasOneUse() || !Op1->hasOneUse())
1759 bool IsFAdd =
I.getOpcode() == Instruction::FAdd;
1775 I.getFastMathFlags(),
1818 Value *LHSIntVal = LHSConv->getOperand(0);
1819 Type *FPType = LHSConv->getType();
1824 auto IsValidPromotion = [](
Type *FTy,
Type *ITy) {
1830 unsigned MaxRepresentableBits =
1841 if (IsValidPromotion(FPType, LHSIntVal->
getType())) {
1844 if (LHSConv->hasOneUse() &&
1846 willNotOverflowSignedAdd(LHSIntVal, CI,
I)) {
1855 Value *RHSIntVal = RHSConv->getOperand(0);
1858 if (IsValidPromotion(FPType, LHSIntVal->
getType())) {
1863 (LHSConv->hasOneUse() || RHSConv->hasOneUse()) &&
1864 willNotOverflowSignedAdd(LHSIntVal, RHSIntVal,
I)) {
1877 if (
I.hasAllowReassoc() &&
I.hasNoSignedZeros()) {
1891 {X->getType()}, {Y, X}, &
I));
1901 {X->getType()}, {NewStartC, X}, &
I));
1931 if (!Result->hasNoNaNs())
1932 Result->setHasNoInfs(
false);
1943 Type *Ty,
bool IsNUW) {
1946 bool Swapped =
false;
1948 if (!isa<GEPOperator>(
LHS) && isa<GEPOperator>(
RHS)) {
1954 if (
auto *LHSGEP = dyn_cast<GEPOperator>(
LHS)) {
1956 if (LHSGEP->getOperand(0)->stripPointerCasts() ==
1959 }
else if (
auto *RHSGEP = dyn_cast<GEPOperator>(
RHS)) {
1961 if (LHSGEP->getOperand(0)->stripPointerCasts() ==
1962 RHSGEP->getOperand(0)->stripPointerCasts()) {
1985 unsigned NumNonConstantIndices2 = GEP2->countNonConstantIndices();
1986 if (NumNonConstantIndices1 + NumNonConstantIndices2 > 1 &&
1987 ((NumNonConstantIndices1 > 0 && !GEP1->
hasOneUse()) ||
1988 (NumNonConstantIndices2 > 0 && !GEP2->hasOneUse()))) {
1994 Value *Result = EmitGEPOffset(GEP1);
1998 if (
auto *
I = dyn_cast<Instruction>(Result))
1999 if (IsNUW && !GEP2 && !Swapped && GEP1->
isInBounds() &&
2000 I->getOpcode() == Instruction::Mul)
2001 I->setHasNoUnsignedWrap();
2020 Value *Op0 =
I.getOperand(0);
2021 Value *Op1 =
I.getOperand(1);
2022 Type *Ty =
I.getType();
2023 auto *
MinMax = dyn_cast<MinMaxIntrinsic>(Op1);
2043 Value *USub =
Builder.CreateIntrinsic(Intrinsic::usub_sat, Ty, {
Y, Z});
2044 return BinaryOperator::CreateAdd(
X, USub);
2047 Value *USub =
Builder.CreateIntrinsic(Intrinsic::usub_sat, Ty, {Z,
Y});
2048 return BinaryOperator::CreateAdd(
X, USub);
2066 I.hasNoSignedWrap(),
I.hasNoUnsignedWrap(),
2076 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
2080 if (
Value *V = dyn_castNegVal(Op1)) {
2083 if (
const auto *BO = dyn_cast<BinaryOperator>(Op1)) {
2084 assert(BO->getOpcode() == Instruction::Sub &&
2085 "Expected a subtraction operator!");
2086 if (BO->hasNoSignedWrap() &&
I.hasNoSignedWrap())
2089 if (cast<Constant>(Op1)->isNotMinSignedValue() &&
I.hasNoSignedWrap())
2109 bool WillNotSOV = willNotOverflowSignedSub(
C, C2,
I);
2112 auto *OBO1 = cast<OverflowingBinaryOperator>(Op1);
2119 auto TryToNarrowDeduceFlags = [
this, &
I, &Op0, &Op1]() ->
Instruction * {
2123 bool Changed =
false;
2124 if (!
I.hasNoSignedWrap() && willNotOverflowSignedSub(Op0, Op1,
I)) {
2126 I.setHasNoSignedWrap(
true);
2128 if (!
I.hasNoUnsignedWrap() && willNotOverflowUnsignedSub(Op0, Op1,
I)) {
2130 I.setHasNoUnsignedWrap(
true);
2133 return Changed ? &
I :
nullptr;
2140 if (!IsNegation ||
none_of(
I.users(), [&
I, Op1](
const User *U) {
2141 const Instruction *UI = dyn_cast<Instruction>(U);
2145 m_Select(m_Value(), m_Specific(Op1), m_Specific(&I))) ||
2146 match(UI, m_Select(m_Value(), m_Specific(&I), m_Specific(Op1)));
2149 I.hasNoSignedWrap(),
2151 return BinaryOperator::CreateAdd(NegOp1, Op0);
2154 return TryToNarrowDeduceFlags();
2160 if (
I.getType()->isIntOrIntVectorTy(1))
2161 return BinaryOperator::CreateXor(Op0, Op1);
2180 return BinaryOperator::CreateSub(XZ, YW);
2186 return BinaryOperator::CreateSub(
X,
Add);
2197 return BinaryOperator::CreateSub(NotOp1, NotOp0);
2200 auto m_AddRdx = [](
Value *&Vec) {
2201 return m_OneUse(m_Intrinsic<Intrinsic::vector_reduce_add>(
m_Value(Vec)));
2204 if (
match(Op0, m_AddRdx(V0)) &&
match(Op1, m_AddRdx(V1)) &&
2214 if (
Constant *
C = dyn_cast<Constant>(Op0)) {
2228 if (
SelectInst *SI = dyn_cast<SelectInst>(Op1))
2233 if (
PHINode *PN = dyn_cast<PHINode>(Op1))
2250 if ((*Op0C | RHSKnown.
Zero).isAllOnes())
2251 return BinaryOperator::CreateXor(Op1, Op0);
2258 const APInt *C2, *C3;
2263 APInt C2AndC3 = *C2 & *C3;
2264 APInt C2AndC3Minus1 = C2AndC3 - 1;
2265 APInt C2AddC3 = *C2 + *C3;
2266 if ((*C3 - C2AndC3Minus1).isPowerOf2() &&
2269 return BinaryOperator::CreateAdd(
2291 return BinaryOperator::CreateXor(
A,
B);
2299 return BinaryOperator::CreateAnd(
A,
B);
2307 return BinaryOperator::CreateOr(
A,
B);
2324 return BinaryOperator::CreateAnd(
A,
B);
2340 return BinaryOperator::CreateAnd(
2377 auto SinkSubIntoSelect =
2384 if (OtherHandOfSub != TrueVal && OtherHandOfSub != FalseVal)
2389 bool OtherHandOfSubIsTrueVal = OtherHandOfSub == TrueVal;
2390 Value *NewSub = SubBuilder(OtherHandOfSubIsTrueVal ? FalseVal : TrueVal);
2394 OtherHandOfSubIsTrueVal ? NewSub : Zero);
2417 (Op1->hasOneUse() || isa<Constant>(
Y)))
2418 return BinaryOperator::CreateAnd(
2432 return BinaryOperator::CreateSub(Not,
X);
2438 return BinaryOperator::CreateSub(
X, Not);
2443 Value *LHSOp, *RHSOp;
2447 I.hasNoUnsignedWrap()))
2464 Type *Ty =
I.getType();
2467 Op1->hasNUses(2) && *ShAmt ==
BitWidth - 1 &&
2475 I.hasNoSignedWrap());
2483 const APInt *AddC, *AndC;
2488 if ((HighMask & *AndC).
isZero())
2531 {Builder.CreateNot(X)}));
2537 auto *OBO0 = cast<OverflowingBinaryOperator>(Op0);
2538 auto *OBO1 = cast<OverflowingBinaryOperator>(Op1);
2539 bool PropagateNSW =
I.hasNoSignedWrap() && OBO0->hasNoSignedWrap() &&
2540 OBO1->hasNoSignedWrap() &&
BitWidth > 2;
2541 bool PropagateNUW =
I.hasNoUnsignedWrap() && OBO0->hasNoUnsignedWrap() &&
2542 OBO1->hasNoUnsignedWrap() &&
BitWidth > 1;
2552 if (
I.hasNoUnsignedWrap() ||
I.hasNoSignedWrap()) {
2564 return TryToNarrowDeduceFlags();
2627 if (II->getIntrinsicID() == Intrinsic::ldexp) {
2633 II->getCalledFunction(),
2634 {Builder.CreateFNeg(II->getArgOperand(0)), II->getArgOperand(1)});
2635 New->copyMetadata(*II);
2656 if (
I.hasNoSignedZeros() &&
2664 if (
Instruction *R = hoistFNegAboveFMulFDiv(OneUse,
I))
2673 auto propagateSelectFMF = [&](
SelectInst *S,
bool CommonOperand) {
2675 if (
auto *OldSel = dyn_cast<SelectInst>(
Op)) {
2676 FastMathFlags FMF =
I.getFastMathFlags() | OldSel->getFastMathFlags();
2678 if (!OldSel->hasNoSignedZeros() && !CommonOperand &&
2688 propagateSelectFMF(NewSel,
P ==
Y);
2695 propagateSelectFMF(NewSel,
P ==
X);
2705 FMF &= cast<FPMathOperator>(OneUse)->getFastMathFlags();
2720 I.getFastMathFlags(),
2747 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
2762 if (
I.hasNoSignedZeros() && !isa<ConstantExpr>(Op0) &&
2768 if (isa<Constant>(Op0))
2769 if (
SelectInst *SI = dyn_cast<SelectInst>(Op1))
2786 Type *Ty =
I.getType();
2813 if (
I.hasAllowReassoc() &&
I.hasNoSignedZeros()) {
2847 auto m_FaddRdx = [](
Value *&Sum,
Value *&Vec) {
2848 return m_OneUse(m_Intrinsic<Intrinsic::vector_reduce_fadd>(
m_Value(Sum),
2851 Value *A0, *A1, *V0, *V1;
2852 if (
match(Op0, m_FaddRdx(A0, V0)) &&
match(Op1, m_FaddRdx(A1, V1)) &&
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static bool isConstant(const MachineInstr &MI)
amdgpu AMDGPU Register Bank Select
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
static Instruction * factorizeFAddFSub(BinaryOperator &I, InstCombiner::BuilderTy &Builder)
Factor a common operand out of fadd/fsub of fmul/fdiv.
static Instruction * foldAddToAshr(BinaryOperator &Add)
Try to reduce signed division by power-of-2 to an arithmetic shift right.
static bool MatchMul(Value *E, Value *&Op, APInt &C)
static bool MatchDiv(Value *E, Value *&Op, APInt &C, bool IsSigned)
static Instruction * foldFNegIntoConstant(Instruction &I, const DataLayout &DL)
This eliminates floating-point negation in either 'fneg(X)' or 'fsub(-0.0, X)' form by combining into...
static Instruction * combineAddSubWithShlAddSub(InstCombiner::BuilderTy &Builder, const BinaryOperator &I)
static Instruction * factorizeLerp(BinaryOperator &I, InstCombiner::BuilderTy &Builder)
Eliminate an op from a linear interpolation (lerp) pattern.
static Instruction * foldSubOfMinMax(BinaryOperator &I, InstCombiner::BuilderTy &Builder)
static Instruction * foldBoxMultiply(BinaryOperator &I)
Reduce a sequence of masked half-width multiplies to a single multiply.
static Value * checkForNegativeOperand(BinaryOperator &I, InstCombiner::BuilderTy &Builder)
static bool MulWillOverflow(APInt &C0, APInt &C1, bool IsSigned)
static Instruction * foldNoWrapAdd(BinaryOperator &Add, InstCombiner::BuilderTy &Builder)
Wrapping flags may allow combining constants separated by an extend.
static bool matchesSquareSum(BinaryOperator &I, Mul2Rhs M2Rhs, Value *&A, Value *&B)
static Instruction * factorizeMathWithShlOps(BinaryOperator &I, InstCombiner::BuilderTy &Builder)
This is a specialization of a more general transform from foldUsingDistributiveLaws.
static Instruction * canonicalizeLowbitMask(BinaryOperator &I, InstCombiner::BuilderTy &Builder)
Fold (1 << NBits) - 1 Into: ~(-(1 << NBits)) Because a 'not' is better for bit-tracking analysis and ...
static Instruction * foldToUnsignedSaturatedAdd(BinaryOperator &I)
static bool MatchRem(Value *E, Value *&Op, APInt &C, bool &IsSigned)
This file provides internal interfaces used to implement the InstCombine.
This file provides the interface for the instcombine pass implementation.
static bool isZero(Value *V, const DataLayout &DL, DominatorTree *DT, AssumptionCache *AC)
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the SmallVector class.
const fltSemantics & getSemantics() const
opStatus multiply(const APFloat &RHS, roundingMode RM)
Class for arbitrary precision integers.
APInt umul_ov(const APInt &RHS, bool &Overflow) const
bool isNegatedPowerOf2() const
Check if this APInt's negated value is a power of two greater than zero.
bool isMinSignedValue() const
Determine if this is the smallest signed value.
APInt trunc(unsigned width) const
Truncate to new width.
static APInt getMaxValue(unsigned numBits)
Gets maximum unsigned value of APInt for specific bit width.
bool isSignMask() const
Check if the APInt's value is returned by getSignMask.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool isNegative() const
Determine sign of this APInt.
int32_t exactLogBase2() const
unsigned countr_zero() const
Count the number of trailing zero bits.
unsigned countl_zero() const
The APInt version of std::countl_zero.
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
unsigned logBase2() const
APInt smul_ov(const APInt &RHS, bool &Overflow) const
bool isMask(unsigned numBits) const
APInt sext(unsigned width) const
Sign extend to a new width.
bool isSubsetOf(const APInt &RHS) const
This operation checks that all bits set in this APInt are also set in RHS.
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)
Constructs an APInt value that has the top hiBitsSet bits set.
bool sge(const APInt &RHS) const
Signed greater or equal comparison.
static BinaryOperator * CreateFDivFMF(Value *V1, Value *V2, Instruction *FMFSource, const Twine &Name="")
static BinaryOperator * CreateNeg(Value *Op, const Twine &Name="", Instruction *InsertBefore=nullptr)
Helper functions to construct and inspect unary operations (NEG and NOT) via binary operators SUB and...
static BinaryOperator * CreateNot(Value *Op, const Twine &Name="", Instruction *InsertBefore=nullptr)
static BinaryOperator * CreateWithCopiedFlags(BinaryOps Opc, Value *V1, Value *V2, Value *CopyO, const Twine &Name="", Instruction *InsertBefore=nullptr)
static BinaryOperator * CreateFMulFMF(Value *V1, Value *V2, Instruction *FMFSource, const Twine &Name="")
static BinaryOperator * CreateFSubFMF(Value *V1, Value *V2, Instruction *FMFSource, const Twine &Name="")
static BinaryOperator * CreateFAddFMF(Value *V1, Value *V2, Instruction *FMFSource, const Twine &Name="")
This class represents a function call, abstracting a target machine's calling convention.
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
static CastInst * Create(Instruction::CastOps, Value *S, Type *Ty, const Twine &Name="", Instruction *InsertBefore=nullptr)
Provides a way to construct any of the CastInst subclasses using an opcode instead of the subclass's ...
static CastInst * CreateTruncOrBitCast(Value *S, Type *Ty, const Twine &Name="", Instruction *InsertBefore=nullptr)
Create a Trunc or BitCast cast instruction.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ ICMP_UGT
unsigned greater than
@ ICMP_SGT
signed greater than
static Constant * getSub(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
static Constant * getZExt(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static Constant * getSIToFP(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static Constant * getSExt(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static Constant * getFPToSI(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static Constant * getAdd(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
ConstantFP - Floating Point Values [float, double].
const APFloat & getValueAPF() const
static Constant * get(Type *Ty, double V)
This returns a ConstantFP, or a vector containing a splat of a ConstantFP, for the specified value in...
bool isZero() const
Return true if the value is positive or negative zero.
static Constant * get(Type *Ty, uint64_t V, bool IsSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
This is an important base class in LLVM.
static Constant * getAllOnesValue(Type *Ty)
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Convenience struct for specifying and reasoning about fast-math flags.
bool noSignedZeros() const
bool isInBounds() const
Test whether this is an inbounds GEP, as defined by LangRef.html.
unsigned countNonConstantIndices() const
Value * CreateFAddFMF(Value *L, Value *R, Instruction *FMFSource, const Twine &Name="")
Copy fast-math-flags from an instruction rather than using the builder's default FMF.
Value * CreateNeg(Value *V, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Value * CreateSRem(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateFMulFMF(Value *L, Value *R, Instruction *FMFSource, const Twine &Name="")
Copy fast-math-flags from an instruction rather than using the builder's default FMF.
Value * CreateFSubFMF(Value *L, Value *R, Instruction *FMFSource, const Twine &Name="")
Copy fast-math-flags from an instruction rather than using the builder's default FMF.
Value * CreateFDivFMF(Value *L, Value *R, Instruction *FMFSource, const Twine &Name="")
Copy fast-math-flags from an instruction rather than using the builder's default FMF.
Value * CreateFPTrunc(Value *V, Type *DestTy, const Twine &Name="")
ConstantInt * getTrue()
Get the constant value for i1 true.
CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > Types, ArrayRef< Value * > Args, Instruction *FMFSource=nullptr, const Twine &Name="")
Create a call to intrinsic ID with Args, mangled using Types.
Value * CreateFNegFMF(Value *V, Instruction *FMFSource, const Twine &Name="")
Copy fast-math-flags from an instruction rather than using the builder's default FMF.
Value * CreateIsNotNeg(Value *Arg, const Twine &Name="")
Return a boolean value testing if Arg > -1.
Value * CreateNSWAdd(Value *LHS, Value *RHS, const Twine &Name="")
void setFastMathFlags(FastMathFlags NewFMF)
Set the fast-math flags to be used with generated fp-math operators.
CallInst * CreateCopySign(Value *LHS, Value *RHS, Instruction *FMFSource=nullptr, const Twine &Name="")
Create call to the copysign intrinsic.
Value * CreateZExt(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateNot(Value *V, const Twine &Name="")
InstTy * Insert(InstTy *I, const Twine &Name="") const
Insert and return the specified instruction.
Value * CreateIsNeg(Value *Arg, const Twine &Name="")
Return a boolean value testing if Arg < 0.
Value * CreateSub(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Value * CreateShl(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
CallInst * CreateBinaryIntrinsic(Intrinsic::ID ID, Value *LHS, Value *RHS, Instruction *FMFSource=nullptr, const Twine &Name="")
Create a call to intrinsic ID with 2 operands which is mangled on the first type.
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Value * CreateIsNotNull(Value *Arg, const Twine &Name="")
Return a boolean value testing if Arg != 0.
Value * CreateIntCast(Value *V, Type *DestTy, bool isSigned, const Twine &Name="")
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args=std::nullopt, const Twine &Name="", MDNode *FPMathTag=nullptr)
Value * CreateFPExt(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateXor(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateFNeg(Value *V, const Twine &Name="", MDNode *FPMathTag=nullptr)
Value * CreateURem(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateMul(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Instruction * FoldOpIntoSelect(Instruction &Op, SelectInst *SI, bool FoldWithMultiUse=false)
Given an instruction with a select as one operand and a constant as the other operand,...
Instruction * foldBinOpOfSelectAndCastOfSelectCondition(BinaryOperator &I)
Tries to simplify binops of select and cast of the select condition.
Instruction * visitAdd(BinaryOperator &I)
Instruction * canonicalizeCondSignextOfHighBitExtractToSignextHighBitExtract(BinaryOperator &I)
Instruction * foldBinOpIntoSelectOrPhi(BinaryOperator &I)
This is a convenience wrapper function for the above two functions.
bool SimplifyAssociativeOrCommutative(BinaryOperator &I)
Performs a few simplifications for operators which are associative or commutative.
Value * foldUsingDistributiveLaws(BinaryOperator &I)
Tries to simplify binary operations which some other binary operation distributes over.
Instruction * foldBinOpShiftWithShift(BinaryOperator &I)
Instruction * foldSquareSumInt(BinaryOperator &I)
Instruction * foldOpIntoPhi(Instruction &I, PHINode *PN)
Given a binary operator, cast instruction, or select which has a PHI node as operand #0,...
Instruction * foldSquareSumFP(BinaryOperator &I)
Instruction * visitSub(BinaryOperator &I)
Value * OptimizePointerDifference(Value *LHS, Value *RHS, Type *Ty, bool isNUW)
Optimize pointer differences into the same array into a size.
Instruction * visitFAdd(BinaryOperator &I)
Instruction * foldBinopWithPhiOperands(BinaryOperator &BO)
For a binary operator with 2 phi operands, try to hoist the binary operation before the phi.
Value * SimplifyAddWithRemainder(BinaryOperator &I)
Tries to simplify add operations using the definition of remainder.
Instruction * foldAddWithConstant(BinaryOperator &Add)
Instruction * foldVectorBinop(BinaryOperator &Inst)
Canonicalize the position of binops relative to shufflevector.
Value * SimplifySelectsFeedingBinaryOp(BinaryOperator &I, Value *LHS, Value *RHS)
Instruction * visitFNeg(UnaryOperator &I)
Instruction * visitFSub(BinaryOperator &I)
Instruction * replaceInstUsesWith(Instruction &I, Value *V)
A combiner-aware RAUW-like routine.
static bool isFreeToInvert(Value *V, bool WillInvertAllUses)
Return true if the specified value is free to invert (apply ~ to).
static Constant * SubOne(Constant *C)
Subtract one from a Constant.
unsigned ComputeNumSignBits(const Value *Op, unsigned Depth=0, const Instruction *CxtI=nullptr) const
Instruction * replaceOperand(Instruction &I, unsigned OpNum, Value *V)
Replace operand of instruction and add old operand to the worklist.
static bool isSignBitCheck(ICmpInst::Predicate Pred, const APInt &RHS, bool &TrueIfSigned)
Given an exploded icmp instruction, return true if the comparison only checks the sign bit.
void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth, const Instruction *CxtI) const
bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth=0, const Instruction *CxtI=nullptr) const
const SimplifyQuery & getSimplifyQuery() const
static Constant * AddOne(Constant *C)
Add one to a Constant.
bool hasNoUnsignedWrap() const LLVM_READONLY
Determine whether the no unsigned wrap flag is set.
void copyFastMathFlags(FastMathFlags FMF)
Convenience function for transferring all fast-math flag values to this instruction,...
void setHasNoSignedZeros(bool B)
Set or clear the no-signed-zeros flag on this instruction, which must be an operator which supports t...
void setHasNoSignedWrap(bool b=true)
Set or clear the nsw flag on this instruction, which must be an operator which supports this flag.
void setFastMathFlags(FastMathFlags FMF)
Convenience function for setting multiple fast-math flags on this instruction, which must be an opera...
void setHasNoInfs(bool B)
Set or clear the no-infs flag on this instruction, which must be an operator which supports this flag...
FastMathFlags getFastMathFlags() const LLVM_READONLY
Convenience function for getting all the fast-math flags, which must be an operator which supports th...
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
void copyMetadata(const Instruction &SrcInst, ArrayRef< unsigned > WL=ArrayRef< unsigned >())
Copy metadata from SrcInst to this instruction.
A wrapper class for inspecting calls to intrinsic functions.
static Value * Negate(bool LHSIsZero, bool IsNSW, Value *Root, InstCombinerImpl &IC)
Attempt to negate Root.
This class represents a cast from signed integer to floating point.
This class represents the LLVM 'select' instruction.
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", Instruction *InsertBefore=nullptr, Instruction *MDFrom=nullptr)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
The instances of the Type class are immutable: once they are created, they are never changed.
unsigned getIntegerBitWidth() const
const fltSemantics & getFltSemantics() const
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
static UnaryOperator * CreateFNegFMF(Value *Op, Instruction *FMFSource, const Twine &Name="", Instruction *InsertBefore=nullptr)
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
bool hasOneUse() const
Return true if there is exactly one use of this value.
bool hasNUsesOrMore(unsigned N) const
Return true if this value has N uses or more.
const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
StringRef getName() const
Return a constant reference to the value's name.
This class represents zero extension of integer types.
@ C
The default llvm calling convention, compatible with C.
Function * getDeclaration(Module *M, ID id, ArrayRef< Type * > Tys=std::nullopt)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
cst_pred_ty< is_all_ones > m_AllOnes()
Match an integer or vector with all bits set.
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
specific_intval< false > m_SpecificInt(APInt V)
Match a specific integer value or vector with all elements equal to the value.
match_combine_or< CastClass_match< OpTy, Instruction::ZExt >, OpTy > m_ZExtOrSelf(const OpTy &Op)
BinaryOp_match< LHS, RHS, Instruction::Add > m_Add(const LHS &L, const RHS &R)
class_match< BinaryOperator > m_BinOp()
Match an arbitrary binary operation and ignore it.
BinaryOp_match< LHS, RHS, Instruction::FMul, true > m_c_FMul(const LHS &L, const RHS &R)
Matches FMul with LHS and RHS in either order.
cst_pred_ty< is_sign_mask > m_SignMask()
Match an integer or vector with only the sign bit(s) set.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWAdd(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::AShr > m_AShr(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::FSub > m_FSub(const LHS &L, const RHS &R)
cst_pred_ty< is_power2 > m_Power2()
Match an integer or vector power-of-2.
BinaryOp_match< LHS, RHS, Instruction::URem > m_URem(const LHS &L, const RHS &R)
CastClass_match< OpTy, Instruction::SExt > m_SExt(const OpTy &Op)
Matches SExt.
class_match< Constant > m_Constant()
Match an arbitrary Constant and ignore it.
BinaryOp_match< LHS, RHS, Instruction::And, true > m_c_And(const LHS &L, const RHS &R)
Matches an And with LHS and RHS in either order.
BinaryOp_match< LHS, RHS, Instruction::Xor > m_Xor(const LHS &L, const RHS &R)
CastClass_match< OpTy, Instruction::ZExt > m_ZExt(const OpTy &Op)
Matches ZExt.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Sub, OverflowingBinaryOperator::NoSignedWrap > m_NSWSub(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::FMul > m_FMul(const LHS &L, const RHS &R)
bool match(Val *V, const Pattern &P)
bind_ty< Instruction > m_Instruction(Instruction *&I)
Match an instruction, capturing it if we match.
cstfp_pred_ty< is_any_zero_fp > m_AnyZeroFP()
Match a floating-point negative zero or positive zero.
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
cst_pred_ty< is_one > m_One()
Match an integer 1 or a vector with all elements equal to 1.
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
specific_fpval m_SpecificFP(double V)
Match a specific floating point value or vector with all elements equal to the value.
match_combine_and< LTy, RTy > m_CombineAnd(const LTy &L, const RTy &R)
Combine two pattern matchers matching L && R.
BinaryOp_match< LHS, RHS, Instruction::Xor, true > m_c_Xor(const LHS &L, const RHS &R)
Matches an Xor with LHS and RHS in either order.
BinaryOp_match< LHS, RHS, Instruction::FAdd > m_FAdd(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::Mul > m_Mul(const LHS &L, const RHS &R)
deferredval_ty< Value > m_Deferred(Value *const &V)
Like m_Specific(), but works if the specific value to match is determined as part of the same match()...
cst_pred_ty< is_zero_int > m_ZeroInt()
Match an integer 0 or a vector with all elements equal to 0.
CastClass_match< OpTy, Instruction::FPTrunc > m_FPTrunc(const OpTy &Op)
CmpClass_match< LHS, RHS, ICmpInst, ICmpInst::Predicate > m_ICmp(ICmpInst::Predicate &Pred, const LHS &L, const RHS &R)
OneUse_match< T > m_OneUse(const T &SubPattern)
MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty, true > m_c_SMin(const LHS &L, const RHS &R)
Matches an SMin with LHS and RHS in either order.
BinaryOp_match< cst_pred_ty< is_zero_int >, ValTy, Instruction::Sub > m_Neg(const ValTy &V)
Matches a 'Neg' as 'sub 0, V'.
CastClass_match< OpTy, Instruction::PtrToInt > m_PtrToInt(const OpTy &Op)
Matches PtrToInt.
match_combine_and< class_match< Constant >, match_unless< constantexpr_match > > m_ImmConstant()
Match an arbitrary immediate Constant and ignore it.
MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty, true > m_c_UMax(const LHS &L, const RHS &R)
Matches a UMax with LHS and RHS in either order.
CastClass_match< OpTy, Instruction::Trunc > m_Trunc(const OpTy &Op)
Matches Trunc.
BinaryOp_match< LHS, RHS, Instruction::UDiv > m_UDiv(const LHS &L, const RHS &R)
match_combine_or< CastClass_match< OpTy, Instruction::Trunc >, OpTy > m_TruncOrSelf(const OpTy &Op)
MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty > m_UMax(const LHS &L, const RHS &R)
cst_pred_ty< is_negated_power2 > m_NegatedPower2()
Match a integer or vector negated power-of-2.
specific_fpval m_FPOne()
Match a float 1.0 or vector with all elements equal to 1.0.
MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty, true > m_c_UMin(const LHS &L, const RHS &R)
Matches a UMin with LHS and RHS in either order.
BinaryOp_match< LHS, RHS, Instruction::Add, true > m_c_Add(const LHS &L, const RHS &R)
Matches a Add with LHS and RHS in either order.
CastClass_match< OpTy, Instruction::FPExt > m_FPExt(const OpTy &Op)
MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty, true > m_c_SMax(const LHS &L, const RHS &R)
Matches an SMax with LHS and RHS in either order.
match_combine_or< match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty, true >, MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty, true > >, match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty, true >, MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty, true > > > m_c_MaxOrMin(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::SDiv > m_SDiv(const LHS &L, const RHS &R)
specific_intval< true > m_SpecificIntAllowUndef(APInt V)
OverflowingBinaryOp_match< LHS, RHS, Instruction::Sub, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWSub(const LHS &L, const RHS &R)
MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty > m_SMax(const LHS &L, const RHS &R)
apint_match m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
AnyBinaryOp_match< LHS, RHS, true > m_c_BinOp(const LHS &L, const RHS &R)
Matches a BinaryOperator with LHS and RHS in either order.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoSignedWrap > m_NSWAdd(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::LShr > m_LShr(const LHS &L, const RHS &R)
FNeg_match< OpTy > m_FNeg(const OpTy &X)
Match 'fneg X' as 'fsub -0.0, X'.
BinaryOp_match< LHS, RHS, Instruction::FAdd, true > m_c_FAdd(const LHS &L, const RHS &R)
Matches FAdd with LHS and RHS in either order.
BinaryOp_match< LHS, RHS, Instruction::Shl > m_Shl(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::FDiv > m_FDiv(const LHS &L, const RHS &R)
apfloat_match m_APFloat(const APFloat *&Res)
Match a ConstantFP or splatted ConstantVector, binding the specified pointer to the contained APFloat...
BinaryOp_match< LHS, RHS, Instruction::SRem > m_SRem(const LHS &L, const RHS &R)
match_combine_or< CastClass_match< OpTy, Instruction::SExt >, OpTy > m_SExtOrSelf(const OpTy &Op)
BinaryOp_match< cst_pred_ty< is_all_ones >, ValTy, Instruction::Xor, true > m_Not(const ValTy &V)
Matches a 'Not' as 'xor V, -1' or 'xor -1, V'.
BinaryOp_match< LHS, RHS, Instruction::Or > m_Or(const LHS &L, const RHS &R)
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
BinaryOp_match< LHS, RHS, Instruction::Or, true > m_c_Or(const LHS &L, const RHS &R)
Matches an Or with LHS and RHS in either order.
BinaryOp_match< LHS, RHS, Instruction::Mul, true > m_c_Mul(const LHS &L, const RHS &R)
Matches a Mul with LHS and RHS in either order.
m_Intrinsic_Ty< Opnd0, Opnd1 >::Ty m_CopySign(const Opnd0 &Op0, const Opnd1 &Op1)
BinaryOp_match< LHS, RHS, Instruction::Sub > m_Sub(const LHS &L, const RHS &R)
MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty > m_UMin(const LHS &L, const RHS &R)
match_combine_or< LTy, RTy > m_CombineOr(const LTy &L, const RTy &R)
Combine two pattern matchers matching L || R.
cst_pred_ty< icmp_pred_with_threshold > m_SpecificInt_ICMP(ICmpInst::Predicate Predicate, const APInt &Threshold)
Match an integer or vector with every element comparing 'pred' (eg/ne/...) to Threshold.
@ CE
Windows NT (Windows on ARM)
NodeAddr< InstrNode * > Instr
This is an optimization pass for GlobalISel generic memory operations.
Intrinsic::ID getInverseMinMaxIntrinsic(Intrinsic::ID MinMaxID)
bool isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return true if the given value is known to be non-zero when defined.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
std::string & operator+=(std::string &buffer, StringRef string)
Value * simplifySubInst(Value *LHS, Value *RHS, bool IsNSW, bool IsNUW, const SimplifyQuery &Q)
Given operands for a Sub, fold the result or return null.
Value * simplifyAddInst(Value *LHS, Value *RHS, bool IsNSW, bool IsNUW, const SimplifyQuery &Q)
Given operands for an Add, fold the result or return null.
Constant * ConstantFoldUnaryOpOperand(unsigned Opcode, Constant *Op, const DataLayout &DL)
Attempt to constant fold a unary operation with the specified operand.
bool cannotBeNegativeZero(const Value *V, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return true if we can prove that the specified FP value is never equal to -0.0.
Value * simplifyFNegInst(Value *Op, FastMathFlags FMF, const SimplifyQuery &Q)
Given operand for an FNeg, fold the result or return null.
Value * simplifyFSubInst(Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
Given operands for an FSub, fold the result or return null.
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
Value * simplifyFAddInst(Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
Given operands for an FAdd, fold the result or return null.
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
Constant * ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS, Constant *RHS, const DataLayout &DL)
Attempt to constant fold a binary operation with the specified operands.
bool haveNoCommonBitsSet(const Value *LHS, const Value *RHS, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return true if LHS and RHS have no common bits set.
@ Mul
Product of integers.
@ And
Bitwise or logical AND of integers.
@ SMin
Signed integer min implemented in terms of select(cmp()).
DWARFExpression::Operation Op
RoundingMode
Rounding mode.
bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
constexpr unsigned BitWidth
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
static unsigned int semanticsPrecision(const fltSemantics &)
A suitably aligned and sized character array member which can hold elements of any type.
SimplifyQuery getWithInstruction(Instruction *I) const
const TargetLibraryInfo * TLI