38#define DEBUG_TYPE "instcombine"
52 if (!V->hasOneUse())
return nullptr;
54 bool MadeChange =
false;
58 Value *
A =
nullptr, *
B =
nullptr, *One =
nullptr;
68 if (
I &&
I->isLogicalShift() &&
77 if (
I->getOpcode() == Instruction::LShr && !
I->isExact()) {
82 if (
I->getOpcode() == Instruction::Shl && !
I->hasNoUnsignedWrap()) {
83 I->setHasNoUnsignedWrap();
92 return MadeChange ? V :
nullptr;
108 bool HasAnyNoWrap =
I.hasNoSignedWrap() ||
I.hasNoUnsignedWrap();
109 Value *Neg = Builder.CreateNeg(OtherOp,
"", HasAnyNoWrap);
110 return Builder.CreateSelect(
Cond, OtherOp, Neg);
116 bool HasAnyNoWrap =
I.hasNoSignedWrap() ||
I.hasNoUnsignedWrap();
117 Value *Neg = Builder.CreateNeg(OtherOp,
"", HasAnyNoWrap);
118 return Builder.CreateSelect(
Cond, Neg, OtherOp);
126 return Builder.CreateSelectFMF(
Cond, OtherOp,
127 Builder.CreateFNegFMF(OtherOp, &
I), &
I);
134 return Builder.CreateSelectFMF(
Cond, Builder.CreateFNegFMF(OtherOp, &
I),
148 const bool HasNSW =
Mul.hasNoSignedWrap();
149 const bool HasNUW =
Mul.hasNoUnsignedWrap();
155 return Builder.CreateShl(
X, Z,
Mul.getName(), HasNUW, PropagateNSW);
168 FrX = Builder.CreateFreeze(
X,
X->getName() +
".fr");
169 Value *Shl = Builder.CreateShl(FrX, Z,
"mulshl", HasNUW, PropagateNSW);
170 return Builder.CreateAdd(Shl, FrX,
Mul.getName(), HasNUW, PropagateNSW);
181 FrX = Builder.CreateFreeze(
X,
X->getName() +
".fr");
182 Value *Shl = Builder.CreateShl(FrX, Z,
"mulshl");
183 return Builder.CreateSub(Shl, FrX,
Mul.getName());
190 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
193 SQ.getWithInstruction(&
I)))
208 Type *Ty =
I.getType();
209 const unsigned BitWidth = Ty->getScalarSizeInBits();
210 const bool HasNSW =
I.hasNoSignedWrap();
211 const bool HasNUW =
I.hasNoUnsignedWrap();
230 assert(Shl &&
"Constant folding of immediate constants failed");
233 if (HasNUW &&
Mul->hasNoUnsignedWrap())
249 if (
match(NewCst,
m_APInt(V)) && *V != V->getBitWidth() - 1)
266 (*MulAP - 1).isPowerOf2() && *ShiftC == MulAP->
logBase2()) {
272 BinOp =
Builder.CreateLShr(NewOp, ConstantInt::get(Ty, *ShiftC),
"",
275 auto *NewAdd = BinaryOperator::CreateAdd(NewOp, BinOp);
276 if (HasNSW && (HasNUW || OpBO->
getOpcode() == Instruction::LShr ||
278 NewAdd->setHasNoSignedWrap(
true);
280 NewAdd->setHasNoUnsignedWrap(HasNUW);
294 HasNSW && Op1C->isNotMinSignedValue()));
303 const APInt *NegPow2C;
307 unsigned SrcWidth =
X->getType()->getScalarSizeInBits();
309 if (ShiftAmt >=
BitWidth - SrcWidth) {
312 return BinaryOperator::CreateShl(Z, ConstantInt::get(Ty, ShiftAmt));
338 (BOp0->getOpcode() == Instruction::Or || BOp0->hasNoUnsignedWrap());
340 auto *BO = BinaryOperator::CreateAdd(NewMul, NewC);
341 if (HasNUW && Op0NUW) {
344 NewMulBO->setHasNoUnsignedWrap();
345 BO->setHasNoUnsignedWrap();
354 return BinaryOperator::CreateMul(
X,
X);
359 if (
I.hasNoSignedWrap() &&
364 I,
Builder.CreateBinaryIntrinsic(Intrinsic::abs,
377 auto *NewMul = BinaryOperator::CreateMul(
X,
Y);
380 NewMul->setHasNoSignedWrap();
393 return BinaryOperator::CreateMul(NegOp0,
X);
401 auto UDivCheck = [&C1](
const APInt &
C) {
return C.urem(*C1).isZero(); };
402 auto SDivCheck = [&C1](
const APInt &
C) {
423 if (!Div || (Div->
getOpcode() != Instruction::UDiv &&
424 Div->
getOpcode() != Instruction::SDiv)) {
428 Value *Neg = dyn_castNegVal(
Y);
431 (Div->
getOpcode() == Instruction::UDiv ||
432 Div->
getOpcode() == Instruction::SDiv)) {
442 auto RemOpc = Div->
getOpcode() == Instruction::UDiv ? Instruction::URem
447 XFreeze =
Builder.CreateFreeze(
X,
X->getName() +
".fr");
448 Value *Rem =
Builder.CreateBinOp(RemOpc, XFreeze, DivOp1);
450 return BinaryOperator::CreateSub(XFreeze, Rem);
451 return BinaryOperator::CreateSub(Rem, XFreeze);
460 if (Ty->isIntOrIntVectorTy(1) ||
463 return BinaryOperator::CreateAnd(Op0, Op1);
475 X->getType()->isIntOrIntVectorTy(1) &&
X->getType() ==
Y->getType() &&
476 (Op0->
hasOneUse() || Op1->hasOneUse() ||
X ==
Y)) {
485 X->getType()->isIntOrIntVectorTy(1) &&
X->getType() ==
Y->getType() &&
486 (Op0->
hasOneUse() || Op1->hasOneUse())) {
501 X->getType()->isIntOrIntVectorTy(1))
516 *
C ==
C->getBitWidth() - 1) {
528 *
C ==
C->getBitWidth() - 1) {
576 if (!HasNSW && willNotOverflowSignedMul(Op0, Op1,
I)) {
578 I.setHasNoSignedWrap(
true);
581 if (!HasNUW && willNotOverflowUnsignedMul(Op0, Op1,
I,
I.hasNoSignedWrap())) {
583 I.setHasNoUnsignedWrap(
true);
591 assert((Opcode == Instruction::FMul || Opcode == Instruction::FDiv) &&
592 "Expected fmul or fdiv");
594 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
610 (Op0->
hasOneUse() || Op1->hasOneUse())) {
611 Value *XY = Builder.CreateBinOpFMF(Opcode,
X,
Y, &
I);
613 Builder.CreateUnaryIntrinsic(Intrinsic::fabs, XY, &
I,
I.getName());
626 Intrinsic::powi, {
X->getType(), YZ->
getType()}, {
X, YZ}, &
I);
632 unsigned Opcode =
I.getOpcode();
633 assert((Opcode == Instruction::FMul || Opcode == Instruction::FDiv) &&
634 "Unexpected opcode");
641 Constant *One = ConstantInt::get(
Y->getType(), 1);
642 if (willNotOverflowSignedAdd(
Y, One,
I)) {
649 Value *Op0 =
I.getOperand(0);
650 Value *Op1 =
I.getOperand(1);
651 if (Opcode == Instruction::FMul &&
I.isOnlyUserOfAnyOperand() &&
656 Y->getType() == Z->getType()) {
661 if (Opcode == Instruction::FDiv &&
I.hasAllowReassoc() &&
I.hasNoNaNs()) {
668 willNotOverflowSignedSub(
Y, ConstantInt::get(
Y->getType(), 1),
I)) {
670 Instruction *NewPow = createPowiExpr(
I, *
this, Op1,
Y, NegOne);
681 willNotOverflowSignedSub(
Y, ConstantInt::get(
Y->getType(), 1),
I)) {
683 auto *NewPow = createPowiExpr(
I, *
this,
X,
Y, NegOne);
715 return !R1.
empty() && !
R2.empty();
749 if (!
X->hasAllowReassoc() || !
X->hasAllowReciprocal() || !
X->hasNoInfs())
756 if (BBx != BBr1 && BBx != BBr2)
765 return (
I->getParent() != BBr1 || !
I->hasAllowReassoc());
775 return (
I->getParent() == BBr2 &&
I->hasAllowReassoc());
780 Value *Op0 =
I.getOperand(0);
781 Value *Op1 =
I.getOperand(1);
845 auto *NewFMul =
Builder.CreateFMulFMF(
X, Z, FMF);
856 Value *Sqrt =
Builder.CreateUnaryIntrinsic(Intrinsic::sqrt, XY, &
I);
866 if (
I.hasNoSignedZeros() &&
870 if (
I.hasNoSignedZeros() &&
877 if (
I.hasNoNaNs() &&
I.hasNoSignedZeros() && Op0 == Op1 && Op0->
hasNUses(2)) {
896 Value *Y1 =
Builder.CreateFAddFMF(
Y, ConstantFP::get(
I.getType(), 1.0), &
I);
897 Value *Pow =
Builder.CreateBinaryIntrinsic(Intrinsic::pow,
X, Y1, &
I);
904 if (
I.isOnlyUserOfAnyOperand()) {
908 auto *YZ =
Builder.CreateFAddFMF(
Y, Z, &
I);
909 auto *NewPow =
Builder.CreateBinaryIntrinsic(Intrinsic::pow,
X, YZ, &
I);
915 auto *XZ =
Builder.CreateFMulFMF(
X, Z, &
I);
916 auto *NewPow =
Builder.CreateBinaryIntrinsic(Intrinsic::pow, XZ,
Y, &
I);
924 Value *Exp =
Builder.CreateUnaryIntrinsic(Intrinsic::exp, XY, &
I);
932 Value *Exp2 =
Builder.CreateUnaryIntrinsic(Intrinsic::exp2, XY, &
I);
958 I.getFastMathFlags(),
959 SQ.getWithInstruction(&
I)))
984 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
996 Op0 =
Builder.CreateFNegFMF(Op0, &
I);
998 {
I.getType()}, {Op1, Op0}, &
I);
1009 if (
I.hasNoNaNs() &&
I.hasNoSignedZeros()) {
1014 X->getType()->isIntOrIntVectorTy(1)) {
1016 SI->copyFastMathFlags(
I.getFastMathFlags());
1020 X->getType()->isIntOrIntVectorTy(1)) {
1022 SI->copyFastMathFlags(
I.getFastMathFlags());
1031 if (
I.hasAllowReassoc())
1059 Value *Start =
nullptr, *Step =
nullptr;
1073 if (!Result->hasNoNaNs())
1074 Result->setHasNoInfs(
false);
1079 if (
I.hasAllowContract() &&
1083 auto *Sin =
Builder.CreateUnaryIntrinsic(Intrinsic::sin,
X, &
I);
1084 if (
auto *
Metadata =
I.getMetadata(LLVMContext::MD_fpmath)) {
1085 Sin->setMetadata(LLVMContext::MD_fpmath,
Metadata);
1122 Value *SelectCond =
SI->getCondition();
1129 while (BBI != BBFront) {
1137 for (
Use &
Op : BBI->operands()) {
1141 }
else if (
Op == SelectCond) {
1151 if (&*BBI == SelectCond)
1152 SelectCond =
nullptr;
1155 if (!SelectCond && !
SI)
1166 Product = IsSigned ? C1.
smul_ov(C2, Overflow) : C1.
umul_ov(C2, Overflow);
1193 assert((
I.getOpcode() == Instruction::SDiv ||
1194 I.getOpcode() == Instruction::UDiv) &&
1195 "Expected integer divide");
1197 bool IsSigned =
I.getOpcode() == Instruction::SDiv;
1198 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
1199 Type *Ty =
I.getType();
1210 bool HasNUW =
Mul->hasNoUnsignedWrap() && Shl->hasNoUnsignedWrap();
1211 bool HasNSW =
Mul->hasNoSignedWrap() && Shl->hasNoSignedWrap();
1214 if (!IsSigned && HasNUW)
1215 return Builder.CreateLShr(
Y, Z,
"",
I.isExact());
1218 if (IsSigned && HasNSW && (Op0->
hasOneUse() || Op1->hasOneUse())) {
1219 Value *Shl = Builder.CreateShl(ConstantInt::get(Ty, 1), Z);
1220 return Builder.CreateSDiv(
Y, Shl,
"",
I.isExact());
1235 ((Shl0->hasNoUnsignedWrap() && Shl1->hasNoUnsignedWrap()) ||
1236 (Shl0->hasNoUnsignedWrap() && Shl0->hasNoSignedWrap() &&
1237 Shl1->hasNoSignedWrap())))
1238 return Builder.CreateUDiv(
X,
Y,
"",
I.isExact());
1242 if (IsSigned && Shl0->hasNoSignedWrap() && Shl1->hasNoSignedWrap() &&
1243 Shl1->hasNoUnsignedWrap())
1244 return Builder.CreateSDiv(
X,
Y,
"",
I.isExact());
1254 if (IsSigned ? (Shl0->hasNoSignedWrap() && Shl1->hasNoSignedWrap())
1255 : (Shl0->hasNoUnsignedWrap() && Shl1->hasNoUnsignedWrap())) {
1256 Constant *One = ConstantInt::get(
X->getType(), 1);
1259 Value *Dividend = Builder.CreateShl(
1260 One,
Y,
"shl.dividend",
1263 IsSigned ? (Shl0->hasNoUnsignedWrap() || Shl1->hasNoUnsignedWrap())
1264 : Shl0->hasNoSignedWrap());
1265 return Builder.CreateLShr(Dividend, Z,
"",
I.isExact());
1274 assert(
I.isIntDivRem() &&
"Unexpected instruction");
1275 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
1280 Type *Ty =
I.getType();
1283 unsigned NumElts = VTy->getNumElements();
1284 for (
unsigned i = 0; i != NumElts; ++i) {
1324 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
1325 bool IsSigned =
I.getOpcode() == Instruction::SDiv;
1326 Type *Ty =
I.getType();
1339 ConstantInt::get(Ty, Product));
1347 if (
isMultiple(*C2, *C1, Quotient, IsSigned)) {
1349 ConstantInt::get(Ty, Quotient));
1350 NewDiv->setIsExact(
I.isExact());
1355 if (
isMultiple(*C1, *C2, Quotient, IsSigned)) {
1357 ConstantInt::get(Ty, Quotient));
1359 Mul->setHasNoUnsignedWrap(!IsSigned && OBO->hasNoUnsignedWrap());
1360 Mul->setHasNoSignedWrap(OBO->hasNoSignedWrap());
1373 if (
isMultiple(*C2, C1Shifted, Quotient, IsSigned)) {
1375 ConstantInt::get(Ty, Quotient));
1376 BO->setIsExact(
I.isExact());
1381 if (
isMultiple(C1Shifted, *C2, Quotient, IsSigned)) {
1383 ConstantInt::get(Ty, Quotient));
1385 Mul->setHasNoUnsignedWrap(!IsSigned && OBO->hasNoUnsignedWrap());
1386 Mul->setHasNoSignedWrap(OBO->hasNoSignedWrap());
1399 return BinaryOperator::CreateNSWAdd(
X, ConstantInt::get(Ty, Quotient));
1404 return BinaryOperator::CreateNUWAdd(
X,
1405 ConstantInt::get(Ty, C1->
udiv(*C2)));
1414 assert(!Ty->isIntOrIntVectorTy(1) &&
"i1 divide not removed?");
1421 F1 =
Builder.CreateFreeze(Op1, Op1->getName() +
".fr");
1423 Value *Cmp =
Builder.CreateICmpULT(Inc, ConstantInt::get(Ty, 3));
1446 return BinaryOperator::CreateNSWShl(ConstantInt::get(Ty, 1),
Y);
1448 return BinaryOperator::CreateNUWShl(ConstantInt::get(Ty, 1),
Y);
1454 if ((IsSigned && HasNSW) || (!IsSigned && HasNUW)) {
1463 if (!IsSigned && Op1->hasOneUse() &&
1468 Builder.CreateShl(ConstantInt::get(Ty, 1), Z,
"",
true),
Y);
1484 if (!IsSigned &&
Mul->hasNoUnsignedWrap())
1485 NewDiv = BinaryOperator::CreateUDiv(
X,
Y);
1486 else if (IsSigned &&
Mul->hasNoSignedWrap())
1487 NewDiv = BinaryOperator::CreateSDiv(
X,
Y);
1491 NewDiv->
setIsExact(
I.isExact() && InnerDiv->isExact());
1505 const APInt *C1, *C2;
1506 if (IsSigned && OB0HasNSW) {
1508 return BinaryOperator::CreateSDiv(
A,
B);
1510 if (!IsSigned && OB0HasNUW) {
1512 return BinaryOperator::CreateUDiv(
A,
B);
1514 return BinaryOperator::CreateUDiv(
A,
B);
1520 if (
auto *Val = CreateDivOrNull(
Y, Z))
1524 if (
auto *Val = CreateDivOrNull(
X, Z))
1535 return reinterpret_cast<Value *
>(-1);
1543 return IfFold([&]() {
1559 return IfFold([&]() {
return Builder.CreateZExt(LogX,
Op->getType()); });
1565 if (AssumeNonZero || TI->hasNoUnsignedWrap())
1567 return IfFold([&]() {
1568 return Builder.CreateTrunc(LogX,
Op->getType(),
"",
1569 TI->hasNoUnsignedWrap());
1578 if (AssumeNonZero || BO->hasNoUnsignedWrap() || BO->hasNoSignedWrap())
1580 return IfFold([&]() {
return Builder.CreateAdd(LogX,
Y); });
1587 if (AssumeNonZero || PEO->isExact())
1589 return IfFold([&]() {
return Builder.CreateSub(LogX,
Y); });
1596 return IfFold([&]() {
return LogX; });
1598 return IfFold([&]() {
return LogY; });
1607 return IfFold([&]() {
1608 return Builder.CreateSelect(
SI->getOperand(0), LogX, LogY);
1621 return IfFold([&]() {
1622 return Builder.CreateBinaryIntrinsic(
MinMax->getIntrinsicID(), LogX,
1637 Type *Ty =
I.getType();
1640 X->getType() ==
Y->getType() && (
N->hasOneUse() ||
D->hasOneUse())) {
1677 SQ.getWithInstruction(&
I)))
1687 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
1689 const APInt *C1, *C2;
1697 X, ConstantInt::get(
X->getType(), C2ShlC1));
1706 Type *Ty =
I.getType();
1732 auto GetShiftableDenom = [&](
Value *Denom) ->
Value * {
1742 return Builder.CreateBinaryIntrinsic(Intrinsic::cttz, Denom,
1748 if (
auto *Res = GetShiftableDenom(Op1))
1750 I,
Builder.CreateLShr(Op0, Res,
I.getName(),
I.isExact()));
1757 SQ.getWithInstruction(&
I)))
1767 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
1768 Type *Ty =
I.getType();
1784 return BinaryOperator::CreateExactAShr(Op0,
C);
1790 return BinaryOperator::CreateExactAShr(Op0, ShAmt);
1796 Value *Ashr =
Builder.CreateAShr(Op0,
C,
I.getName() +
".neg",
true);
1817 Value *NarrowOp =
Builder.CreateSDiv(Op0Src, NarrowDivisor);
1825 Constant *NegC = ConstantInt::get(Ty, -(*Op1C));
1836 Builder.CreateSDiv(
X,
Y,
I.getName(),
I.isExact()));
1859 auto *BO = BinaryOperator::CreateUDiv(Op0, Op1,
I.getName());
1860 BO->setIsExact(
I.isExact());
1869 Value *Shr =
Builder.CreateLShr(Op0, CNegLog2,
I.getName(),
I.isExact());
1878 auto *BO = BinaryOperator::CreateUDiv(Op0, Op1,
I.getName());
1879 BO->setIsExact(
I.isExact());
1909 if (
I.hasNoNaNs() &&
1914 Intrinsic::copysign, {
C->getType()},
1923 if (!(
C->hasExactInverseFP() || (
I.hasAllowReciprocal() &&
C->isNormalFP())))
1931 Instruction::FDiv, ConstantFP::get(
I.getType(), 1.0),
C,
DL);
1932 if (!RecipC || !RecipC->isNormalFP())
1952 if (!
I.hasAllowReassoc() || !
I.hasAllowReciprocal())
1977 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
1979 if (!
II || !
II->hasOneUse() || !
I.hasAllowReassoc() ||
1980 !
I.hasAllowReciprocal())
1990 case Intrinsic::pow:
1991 Args.push_back(
II->getArgOperand(0));
1992 Args.push_back(Builder.CreateFNegFMF(
II->getArgOperand(1), &
I));
1994 case Intrinsic::powi: {
2002 Args.push_back(
II->getArgOperand(0));
2003 Args.push_back(Builder.CreateNeg(
II->getArgOperand(1)));
2004 Type *Tys[] = {
I.getType(),
II->getArgOperand(1)->getType()};
2005 Value *Pow = Builder.CreateIntrinsic(IID, Tys, Args, &
I);
2008 case Intrinsic::exp:
2009 case Intrinsic::exp2:
2010 Args.push_back(Builder.CreateFNegFMF(
II->getArgOperand(0), &
I));
2015 Value *Pow = Builder.CreateIntrinsic(IID,
I.getType(), Args, &
I);
2024 if (!
I.hasAllowReassoc() || !
I.hasAllowReciprocal())
2026 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
2028 if (!
II ||
II->getIntrinsicID() != Intrinsic::sqrt || !
II->hasOneUse() ||
2029 !
II->hasAllowReassoc() || !
II->hasAllowReciprocal())
2038 if (!DivOp->hasAllowReassoc() || !
I.hasAllowReciprocal() ||
2039 !DivOp->hasOneUse())
2041 Value *SwapDiv = Builder.CreateFDivFMF(Z,
Y, DivOp);
2043 Builder.CreateUnaryIntrinsic(
II->getIntrinsicID(), SwapDiv,
II);
2066 B.SetInsertPoint(
X);
2072 B.CreateFDiv(ConstantFP::get(
X->getType(), 1.0), SqrtOp));
2073 auto *R1FPMathMDNode = (*R1.
begin())->getMetadata(LLVMContext::MD_fpmath);
2077 R1FPMathMDNode,
I->getMetadata(LLVMContext::MD_fpmath));
2078 R1FMF &=
I->getFastMathFlags();
2082 FDiv->setMetadata(LLVMContext::MD_fpmath, R1FPMathMDNode);
2083 FDiv->copyFastMathFlags(R1FMF);
2090 auto *R2FPMathMDNode = (*
R2.begin())->getMetadata(LLVMContext::MD_fpmath);
2094 R2FPMathMDNode,
I->getMetadata(LLVMContext::MD_fpmath));
2095 R2FMF &=
I->getFastMathFlags();
2099 FSqrt->setMetadata(LLVMContext::MD_fpmath, R2FPMathMDNode);
2100 FSqrt->copyFastMathFlags(R2FMF);
2109 FMul->copyMetadata(*
X);
2119 I.getFastMathFlags(),
2120 SQ.getWithInstruction(&
I)))
2138 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
2167 if (
I.hasAllowReassoc() &&
I.hasAllowReciprocal()) {
2191 if (
I.hasAllowReassoc() && Op0->
hasOneUse() && Op1->hasOneUse()) {
2201 if ((IsTan || IsCot) &&
hasFloatFn(M, &
TLI,
I.getType(), LibFunc_tan,
2202 LibFunc_tanf, LibFunc_tanl)) {
2205 B.setFastMathFlags(
I.getFastMathFlags());
2206 AttributeList Attrs =
2209 LibFunc_tanl,
B, Attrs);
2211 Res =
B.CreateFDiv(ConstantFP::get(
I.getType(), 1.0), Res);
2220 if (
I.hasNoNaNs() &&
I.hasAllowReassoc() &&
2229 if (
I.hasNoNaNs() &&
I.hasNoInfs() &&
2233 Intrinsic::copysign, ConstantFP::get(
I.getType(), 1.0),
X, &
I);
2244 if (
I.hasAllowReassoc() &&
2248 Builder.CreateFAddFMF(
Y, ConstantFP::get(
I.getType(), -1.0), &
I);
2249 Value *Pow =
Builder.CreateBinaryIntrinsic(Intrinsic::pow, Op1, Y1, &
I);
2267 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1), *
X =
nullptr;
2269 bool ShiftByX =
false;
2273 bool &PreserveNSW) ->
bool {
2274 const APInt *Tmp =
nullptr;
2293 const APInt *Tmp =
nullptr;
2305 bool Op0PreserveNSW =
true, Op1PreserveNSW =
true;
2306 if (MatchShiftOrMulXC(Op0,
X,
Y, Op0PreserveNSW) &&
2307 MatchShiftOrMulXC(Op1,
X, Z, Op1PreserveNSW)) {
2309 }
else if (MatchShiftCX(Op0,
Y,
X) && MatchShiftCX(Op1, Z,
X)) {
2315 bool IsSRem =
I.getOpcode() == Instruction::SRem;
2322 bool BO0NoWrap = IsSRem ? BO0HasNSW : BO0HasNUW;
2324 APInt RemYZ = IsSRem ?
Y.srem(Z) :
Y.urem(Z);
2328 if (RemYZ.
isZero() && BO0NoWrap)
2334 auto CreateMulOrShift =
2336 Value *RemSimplification =
2337 ConstantInt::get(
I.getType(), RemSimplificationC);
2338 return ShiftByX ? BinaryOperator::CreateShl(RemSimplification,
X)
2339 : BinaryOperator::CreateMul(
X, RemSimplification);
2345 bool BO1NoWrap = IsSRem ? BO1HasNSW : BO1HasNUW;
2349 if (RemYZ ==
Y && BO1NoWrap) {
2360 if (
Y.uge(Z) && (IsSRem ? (BO0HasNSW && BO1HasNSW) : BO0HasNUW)) {
2378 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
2386 const APInt *Op1Int;
2388 (
I.getOpcode() == Instruction::URem ||
2412 SQ.getWithInstruction(&
I)))
2425 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
2426 Type *Ty =
I.getType();
2432 return BinaryOperator::CreateAnd(Op0,
Add);
2437 Value *Cmp =
Builder.CreateICmpNE(Op1, ConstantInt::get(Ty, 1));
2458 Value *FrozenOp0 = Op0;
2460 FrozenOp0 =
Builder.CreateFreeze(Op0, Op0->
getName() +
".frozen");
2471 Value *FrozenOp0 = Op0;
2473 FrozenOp0 =
Builder.CreateFreeze(Op0, Op0->
getName() +
".frozen");
2484 SQ.getWithInstruction(&
I)))
2494 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
2512 return BinaryOperator::CreateURem(Op0, Op1,
I.getName());
2520 bool hasNegative =
false;
2521 bool hasMissing =
false;
2522 for (
unsigned i = 0; i != VWidth; ++i) {
2523 Constant *Elt =
C->getAggregateElement(i);
2530 if (RHS->isNegative())
2534 if (hasNegative && !hasMissing) {
2536 for (
unsigned i = 0; i != VWidth; ++i) {
2537 Elts[i] =
C->getAggregateElement(i);
2539 if (RHS->isNegative())
2555 I.getFastMathFlags(),
2556 SQ.getWithInstruction(&
I)))
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This file provides internal interfaces used to implement the InstCombine.
static Instruction * convertFSqrtDivIntoFMul(CallInst *CI, Instruction *X, const SmallPtrSetImpl< Instruction * > &R1, const SmallPtrSetImpl< Instruction * > &R2, InstCombiner::BuilderTy &B, InstCombinerImpl *IC)
static Instruction * simplifyIRemMulShl(BinaryOperator &I, InstCombinerImpl &IC)
static Instruction * narrowUDivURem(BinaryOperator &I, InstCombinerImpl &IC)
If we have zero-extended operands of an unsigned div or rem, we may be able to narrow the operation (...
static Value * simplifyValueKnownNonZero(Value *V, InstCombinerImpl &IC, Instruction &CxtI)
The specific integer value is used in a context where it is known to be non-zero.
static bool getFSqrtDivOptPattern(Instruction *Div, SmallPtrSetImpl< Instruction * > &R1, SmallPtrSetImpl< Instruction * > &R2)
static Value * foldMulSelectToNegate(BinaryOperator &I, InstCombiner::BuilderTy &Builder)
static bool isFSqrtDivToFMulLegal(Instruction *X, SmallPtrSetImpl< Instruction * > &R1, SmallPtrSetImpl< Instruction * > &R2)
static Instruction * foldFDivPowDivisor(BinaryOperator &I, InstCombiner::BuilderTy &Builder)
Negate the exponent of pow/exp to fold division-by-pow() into multiply.
static bool multiplyOverflows(const APInt &C1, const APInt &C2, APInt &Product, bool IsSigned)
True if the multiply can not be expressed in an int this size.
static Value * foldMulShl1(BinaryOperator &Mul, bool CommuteOperands, InstCombiner::BuilderTy &Builder)
Reduce integer multiplication patterns that contain a (+/-1 << Z) factor.
static bool isMultiple(const APInt &C1, const APInt &C2, APInt &Quotient, bool IsSigned)
True if C1 is a multiple of C2. Quotient contains C1/C2.
static Instruction * foldFDivSqrtDivisor(BinaryOperator &I, InstCombiner::BuilderTy &Builder)
Convert div to mul if we have an sqrt divisor iff sqrt's operand is a fdiv instruction.
static Instruction * foldFDivConstantDividend(BinaryOperator &I)
Remove negation and try to reassociate constant math.
static Value * foldIDivShl(BinaryOperator &I, InstCombiner::BuilderTy &Builder)
This file provides the interface for the instcombine pass implementation.
static bool hasNoSignedWrap(BinaryOperator &I)
static bool hasNoUnsignedWrap(BinaryOperator &I)
uint64_t IntrinsicInst * II
const SmallVectorImpl< MachineOperand > & Cond
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
Class for arbitrary precision integers.
LLVM_ABI APInt umul_ov(const APInt &RHS, bool &Overflow) const
LLVM_ABI APInt udiv(const APInt &RHS) const
Unsigned division operation.
static LLVM_ABI void udivrem(const APInt &LHS, const APInt &RHS, APInt &Quotient, APInt &Remainder)
Dual division/remainder interface.
static APInt getSignMask(unsigned BitWidth)
Get the SignMask for a specific bit width.
bool isMinSignedValue() const
Determine if this is the smallest signed value.
uint64_t getZExtValue() const
Get zero extended value.
static LLVM_ABI void sdivrem(const APInt &LHS, const APInt &RHS, APInt &Quotient, APInt &Remainder)
bool isAllOnes() const
Determine if all bits are set. This is true for zero-width values.
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool ult(const APInt &RHS) const
Unsigned less than comparison.
bool isMinValue() const
Determine if this is the smallest unsigned value.
unsigned countr_zero() const
Count the number of trailing zero bits.
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
LLVM_ABI APInt ushl_ov(const APInt &Amt, bool &Overflow) const
unsigned getSignificantBits() const
Get the minimum bit size for this signed APInt.
unsigned logBase2() const
LLVM_ABI APInt smul_ov(const APInt &RHS, bool &Overflow) const
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
LLVM Basic Block Representation.
const Function * getParent() const
Return the enclosing method, or null if none.
InstListType::iterator iterator
Instruction iterators...
static BinaryOperator * CreateFAddFMF(Value *V1, Value *V2, FastMathFlags FMF, const Twine &Name="")
static LLVM_ABI BinaryOperator * CreateNeg(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Helper functions to construct and inspect unary operations (NEG and NOT) via binary operators SUB and...
BinaryOps getOpcode() const
static BinaryOperator * CreateExact(BinaryOps Opc, Value *V1, Value *V2, const Twine &Name="")
static LLVM_ABI BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name=Twine(), InsertPosition InsertBefore=nullptr)
Construct a binary instruction, given the opcode and the two operands.
static BinaryOperator * CreateFMulFMF(Value *V1, Value *V2, FastMathFlags FMF, const Twine &Name="")
static BinaryOperator * CreateFDivFMF(Value *V1, Value *V2, FastMathFlags FMF, const Twine &Name="")
static BinaryOperator * CreateFSubFMF(Value *V1, Value *V2, FastMathFlags FMF, const Twine &Name="")
static BinaryOperator * CreateWithCopiedFlags(BinaryOps Opc, Value *V1, Value *V2, Value *CopyO, const Twine &Name="", InsertPosition InsertBefore=nullptr)
static LLVM_ABI BinaryOperator * CreateNSWNeg(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Value * getArgOperand(unsigned i) const
This class represents a function call, abstracting a target machine's calling convention.
static LLVM_ABI CastInst * CreateZExtOrBitCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a ZExt or BitCast cast instruction.
static LLVM_ABI CastInst * Create(Instruction::CastOps, Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Provides a way to construct any of the CastInst subclasses using an opcode instead of the subclass's ...
static Type * makeCmpResultType(Type *opnd_type)
Create a result type for fcmp/icmp.
@ ICMP_ULT
unsigned less than
static LLVM_ABI Constant * getNeg(Constant *C, bool HasNSW=false)
static LLVM_ABI Constant * getTrunc(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static LLVM_ABI Constant * getExactLogBase2(Constant *C)
If C is a scalar/fixed width vector of known powers of 2, then this function returns a new scalar/fix...
static LLVM_ABI Constant * getInfinity(Type *Ty, bool Negative=false)
This is the shared class of boolean and integer constants.
static LLVM_ABI ConstantInt * getTrue(LLVMContext &Context)
static LLVM_ABI ConstantInt * getFalse(LLVMContext &Context)
static LLVM_ABI ConstantInt * getBool(LLVMContext &Context, bool V)
static LLVM_ABI Constant * get(ArrayRef< Constant * > V)
This is an important base class in LLVM.
static LLVM_ABI Constant * getAllOnesValue(Type *Ty)
LLVM_ABI bool isNormalFP() const
Return true if this is a normal (as opposed to denormal, infinity, nan, or zero) floating-point scala...
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
LLVM_ABI Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
LLVM_ABI bool isNotMinSignedValue() const
Return true if the value is not the smallest signed value, or, for vectors, does not contain smallest...
LLVM_ABI bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
A parsed version of the target data layout string in and methods for querying it.
Convenience struct for specifying and reasoning about fast-math flags.
static FastMathFlags intersectRewrite(FastMathFlags LHS, FastMathFlags RHS)
Intersect rewrite-based flags.
static FastMathFlags unionValue(FastMathFlags LHS, FastMathFlags RHS)
Union value flags.
bool allowReassoc() const
Flag queries.
Value * CreateSub(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Value * CreateShl(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Value * CreateBinOp(Instruction::BinaryOps Opc, Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Instruction * visitMul(BinaryOperator &I)
Instruction * foldBinOpOfSelectAndCastOfSelectCondition(BinaryOperator &I)
Tries to simplify binops of select and cast of the select condition.
Instruction * foldBinOpIntoSelectOrPhi(BinaryOperator &I)
This is a convenience wrapper function for the above two functions.
Instruction * visitUDiv(BinaryOperator &I)
bool SimplifyAssociativeOrCommutative(BinaryOperator &I)
Performs a few simplifications for operators which are associative or commutative.
Value * foldUsingDistributiveLaws(BinaryOperator &I)
Tries to simplify binary operations which some other binary operation distributes over.
Instruction * visitURem(BinaryOperator &I)
Instruction * foldOpIntoPhi(Instruction &I, PHINode *PN, bool AllowMultipleUses=false)
Given a binary operator, cast instruction, or select which has a PHI node as operand #0,...
InstCombinerImpl(InstructionWorklist &Worklist, BuilderTy &Builder, Function &F, AAResults *AA, AssumptionCache &AC, TargetLibraryInfo &TLI, TargetTransformInfo &TTI, DominatorTree &DT, OptimizationRemarkEmitter &ORE, BlockFrequencyInfo *BFI, BranchProbabilityInfo *BPI, ProfileSummaryInfo *PSI, const DataLayout &DL, ReversePostOrderTraversal< BasicBlock * > &RPOT)
Value * takeLog2(Value *Op, unsigned Depth, bool AssumeNonZero, bool DoFold)
Take the exact integer log2 of the value.
Instruction * visitSRem(BinaryOperator &I)
Instruction * foldBinOpSelectBinOp(BinaryOperator &Op)
In some cases it is beneficial to fold a select into a binary operator.
Instruction * visitFDiv(BinaryOperator &I)
Instruction * FoldOpIntoSelect(Instruction &Op, SelectInst *SI, bool FoldWithMultiUse=false, bool SimplifyBothArms=false)
Given an instruction with a select as one operand and a constant as the other operand,...
bool simplifyDivRemOfSelectWithZeroOp(BinaryOperator &I)
Fold a divide or remainder with a select instruction divisor when one of the select operands is zero.
Instruction * eraseInstFromFunction(Instruction &I) override
Combiner aware instruction erasure.
Instruction * commonIDivRemTransforms(BinaryOperator &I)
Common integer divide/remainder transforms.
Value * tryGetLog2(Value *Op, bool AssumeNonZero)
Instruction * commonIDivTransforms(BinaryOperator &I)
This function implements the transforms common to both integer division instructions (udiv and sdiv).
Instruction * foldBinopWithPhiOperands(BinaryOperator &BO)
For a binary operator with 2 phi operands, try to hoist the binary operation before the phi.
Instruction * visitFRem(BinaryOperator &I)
bool SimplifyDemandedInstructionBits(Instruction &Inst)
Tries to simplify operands to an integer instruction based on its demanded bits.
Instruction * visitFMul(BinaryOperator &I)
Instruction * foldFMulReassoc(BinaryOperator &I)
Instruction * foldVectorBinop(BinaryOperator &Inst)
Canonicalize the position of binops relative to shufflevector.
Value * SimplifySelectsFeedingBinaryOp(BinaryOperator &I, Value *LHS, Value *RHS)
Instruction * foldPowiReassoc(BinaryOperator &I)
Instruction * visitSDiv(BinaryOperator &I)
Instruction * commonIRemTransforms(BinaryOperator &I)
This function implements the transforms common to both integer remainder instructions (urem and srem)...
const DataLayout & getDataLayout() const
IRBuilder< TargetFolder, IRBuilderCallbackInserter > BuilderTy
An IRBuilder that automatically inserts new instructions into the worklist.
Instruction * replaceInstUsesWith(Instruction &I, Value *V)
A combiner-aware RAUW-like routine.
void replaceUse(Use &U, Value *NewValue)
Replace use and add the previously used value to the worklist.
InstructionWorklist & Worklist
A worklist of the instructions that need to be simplified.
void computeKnownBits(const Value *V, KnownBits &Known, const Instruction *CxtI, unsigned Depth=0) const
Instruction * replaceOperand(Instruction &I, unsigned OpNum, Value *V)
Replace operand of instruction and add old operand to the worklist.
bool MaskedValueIsZero(const Value *V, const APInt &Mask, const Instruction *CxtI=nullptr, unsigned Depth=0) const
bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero=false, const Instruction *CxtI=nullptr, unsigned Depth=0)
LLVM_ABI Instruction * clone() const
Create a copy of 'this' instruction that is identical in all ways except the following:
LLVM_ABI void setHasNoUnsignedWrap(bool b=true)
Set or clear the nuw flag on this instruction, which must be an operator which supports this flag.
LLVM_ABI bool hasNoNaNs() const LLVM_READONLY
Determine whether the no-NaNs flag is set.
LLVM_ABI bool hasNoInfs() const LLVM_READONLY
Determine whether the no-infs flag is set.
LLVM_ABI bool hasNoSignedZeros() const LLVM_READONLY
Determine whether the no-signed-zeros flag is set.
LLVM_ABI bool hasNoSignedWrap() const LLVM_READONLY
Determine whether the no signed wrap flag is set.
LLVM_ABI void setHasNoSignedWrap(bool b=true)
Set or clear the nsw flag on this instruction, which must be an operator which supports this flag.
LLVM_ABI bool isExact() const LLVM_READONLY
Determine whether the exact flag is set.
LLVM_ABI FastMathFlags getFastMathFlags() const LLVM_READONLY
Convenience function for getting all the fast-math flags, which must be an operator which supports th...
LLVM_ABI void setIsExact(bool b=true)
Set or clear the exact flag on this instruction, which must be an operator which supports this flag.
LLVM_ABI bool hasAllowReassoc() const LLVM_READONLY
Determine whether the allow-reassociation flag is set.
A wrapper class for inspecting calls to intrinsic functions.
static LLVM_ABI MDNode * getMostGenericFPMath(MDNode *A, MDNode *B)
A Module instance is used to store all the information related to an LLVM module.
static Value * Negate(bool LHSIsZero, bool IsNSW, Value *Root, InstCombinerImpl &IC)
Attempt to negate Root.
Utility class for integer operators which may exhibit overflow - Add, Sub, Mul, and Shl.
bool hasNoSignedWrap() const
Test whether this operation is known to never undergo signed overflow, aka the nsw property.
bool hasNoUnsignedWrap() const
Test whether this operation is known to never undergo unsigned overflow, aka the nuw property.
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
This class represents a sign extension of integer types.
This class represents the LLVM 'select' instruction.
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", InsertPosition InsertBefore=nullptr, const Instruction *MDFrom=nullptr)
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
The instances of the Type class are immutable: once they are created, they are never changed.
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
static UnaryOperator * CreateFNegFMF(Value *Op, Instruction *FMFSource, const Twine &Name="", InsertPosition InsertBefore=nullptr)
A Use represents the edge between a Value definition and its users.
Value * getOperand(unsigned i) const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
bool hasOneUse() const
Return true if there is exactly one use of this value.
iterator_range< user_iterator > users()
LLVM_ABI bool hasNUses(unsigned N) const
Return true if this Value has exactly N uses.
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
LLVM_ABI void takeName(Value *V)
Transfer the name from V to this value.
This class represents zero extension of integer types.
An efficient, type-erasing, non-owning reference to a callable.
self_iterator getIterator()
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
BinaryOp_match< SpecificConstantMatch, SrcTy, TargetOpcode::G_SUB > m_Neg(const SrcTy &&Src)
Matches a register negated by a G_SUB.
BinaryOp_match< SrcTy, SpecificConstantMatch, TargetOpcode::G_XOR, true > m_Not(const SrcTy &&Src)
Matches a register not-ed by a G_XOR.
OneUse_match< SubPat > m_OneUse(const SubPat &SP)
cst_pred_ty< is_all_ones > m_AllOnes()
Match an integer or vector with all bits set.
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
cst_pred_ty< is_negative > m_Negative()
Match an integer or vector of negative values.
BinaryOp_match< LHS, RHS, Instruction::Add > m_Add(const LHS &L, const RHS &R)
class_match< BinaryOperator > m_BinOp()
Match an arbitrary binary operation and ignore it.
BinaryOp_match< LHS, RHS, Instruction::FMul, true > m_c_FMul(const LHS &L, const RHS &R)
Matches FMul with LHS and RHS in either order.
cst_pred_ty< is_sign_mask > m_SignMask()
Match an integer or vector with only the sign bit(s) set.
BinaryOp_match< LHS, RHS, Instruction::AShr > m_AShr(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::FSub > m_FSub(const LHS &L, const RHS &R)
cst_pred_ty< is_power2 > m_Power2()
Match an integer or vector power-of-2.
BinaryOp_match< LHS, RHS, Instruction::URem > m_URem(const LHS &L, const RHS &R)
CommutativeBinaryIntrinsic_match< IntrID, T0, T1 > m_c_Intrinsic(const T0 &Op0, const T1 &Op1)
class_match< Constant > m_Constant()
Match an arbitrary Constant and ignore it.
AllowReassoc_match< T > m_AllowReassoc(const T &SubPattern)
ap_match< APInt > m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
CastInst_match< OpTy, TruncInst > m_Trunc(const OpTy &Op)
Matches Trunc.
ap_match< APInt > m_APIntAllowPoison(const APInt *&Res)
Match APInt while allowing poison in splat vector constants.
specific_intval< false > m_SpecificInt(const APInt &V)
Match a specific integer value or vector with all elements equal to the value.
BinaryOp_match< LHS, RHS, Instruction::FMul > m_FMul(const LHS &L, const RHS &R)
bool match(Val *V, const Pattern &P)
cstfp_pred_ty< is_any_zero_fp > m_AnyZeroFP()
Match a floating-point negative zero or positive zero.
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
BinOpPred_match< LHS, RHS, is_right_shift_op > m_Shr(const LHS &L, const RHS &R)
Matches logical shift operations.
specific_intval< true > m_SpecificIntAllowPoison(const APInt &V)
ap_match< APFloat > m_APFloatAllowPoison(const APFloat *&Res)
Match APFloat while allowing poison in splat vector constants.
OverflowingBinaryOp_match< cst_pred_ty< is_zero_int >, ValTy, Instruction::Sub, OverflowingBinaryOperator::NoSignedWrap > m_NSWNeg(const ValTy &V)
Matches a 'Neg' as 'sub nsw 0, V'.
cst_pred_ty< is_nonnegative > m_NonNegative()
Match an integer or vector of non-negative values.
cst_pred_ty< is_one > m_One()
Match an integer 1 or a vector with all elements equal to 1.
IntrinsicID_match m_Intrinsic()
Match intrinsic calls like this: m_Intrinsic<Intrinsic::fabs>(m_Value(X))
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
specific_fpval m_SpecificFP(double V)
Match a specific floating point value or vector with all elements equal to the value.
m_Intrinsic_Ty< Opnd0 >::Ty m_Sqrt(const Opnd0 &Op0)
BinaryOp_match< LHS, RHS, Instruction::FAdd > m_FAdd(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::Mul > m_Mul(const LHS &L, const RHS &R)
deferredval_ty< Value > m_Deferred(Value *const &V)
Like m_Specific(), but works if the specific value to match is determined as part of the same match()...
OverflowingBinaryOp_match< LHS, RHS, Instruction::Shl, OverflowingBinaryOperator::NoSignedWrap > m_NSWShl(const LHS &L, const RHS &R)
CastInst_match< OpTy, ZExtInst > m_ZExt(const OpTy &Op)
Matches ZExt.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Shl, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWShl(const LHS &L, const RHS &R)
OverflowingBinaryOp_match< LHS, RHS, Instruction::Mul, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWMul(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::UDiv > m_UDiv(const LHS &L, const RHS &R)
cst_pred_ty< is_negated_power2 > m_NegatedPower2()
Match a integer or vector negated power-of-2.
match_immconstant_ty m_ImmConstant()
Match an arbitrary immediate Constant and ignore it.
cst_pred_ty< custom_checkfn< APInt > > m_CheckedInt(function_ref< bool(const APInt &)> CheckFn)
Match an integer or vector where CheckFn(ele) for each element is true.
specific_fpval m_FPOne()
Match a float 1.0 or vector with all elements equal to 1.0.
match_combine_or< BinaryOp_match< LHS, RHS, Instruction::Add >, DisjointOr_match< LHS, RHS > > m_AddLike(const LHS &L, const RHS &R)
Match either "add" or "or disjoint".
CastInst_match< OpTy, UIToFPInst > m_UIToFP(const OpTy &Op)
BinaryOp_match< LHS, RHS, Instruction::SDiv > m_SDiv(const LHS &L, const RHS &R)
match_combine_or< OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoSignedWrap >, DisjointOr_match< LHS, RHS > > m_NSWAddLike(const LHS &L, const RHS &R)
Match either "add nsw" or "or disjoint".
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
AnyBinaryOp_match< LHS, RHS, true > m_c_BinOp(const LHS &L, const RHS &R)
Matches a BinaryOperator with LHS and RHS in either order.
BinaryOp_match< LHS, RHS, Instruction::LShr > m_LShr(const LHS &L, const RHS &R)
match_combine_or< CastInst_match< OpTy, ZExtInst >, CastInst_match< OpTy, SExtInst > > m_ZExtOrSExt(const OpTy &Op)
Exact_match< T > m_Exact(const T &SubPattern)
FNeg_match< OpTy > m_FNeg(const OpTy &X)
Match 'fneg X' as 'fsub -0.0, X'.
cstfp_pred_ty< is_pos_zero_fp > m_PosZeroFP()
Match a floating-point positive zero.
BinaryOp_match< LHS, RHS, Instruction::Shl > m_Shl(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::FDiv > m_FDiv(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::SRem > m_SRem(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::Or > m_Or(const LHS &L, const RHS &R)
CastInst_match< OpTy, SExtInst > m_SExt(const OpTy &Op)
Matches SExt.
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
match_combine_or< OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoUnsignedWrap >, DisjointOr_match< LHS, RHS > > m_NUWAddLike(const LHS &L, const RHS &R)
Match either "add nuw" or "or disjoint".
m_Intrinsic_Ty< Opnd0 >::Ty m_FAbs(const Opnd0 &Op0)
BinaryOp_match< LHS, RHS, Instruction::Mul, true > m_c_Mul(const LHS &L, const RHS &R)
Matches a Mul with LHS and RHS in either order.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Mul, OverflowingBinaryOperator::NoSignedWrap > m_NSWMul(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::Sub > m_Sub(const LHS &L, const RHS &R)
This is an optimization pass for GlobalISel generic memory operations.
LLVM_ABI Value * emitUnaryFloatFnCall(Value *Op, const TargetLibraryInfo *TLI, StringRef Name, IRBuilderBase &B, const AttributeList &Attrs)
Emit a call to the unary function named 'Name' (e.g.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI Value * simplifyFMulInst(Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
Given operands for an FMul, fold the result or return null.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
LLVM_ABI Value * simplifySDivInst(Value *LHS, Value *RHS, bool IsExact, const SimplifyQuery &Q)
Given operands for an SDiv, fold the result or return null.
LLVM_ABI Value * simplifyMulInst(Value *LHS, Value *RHS, bool IsNSW, bool IsNUW, const SimplifyQuery &Q)
Given operands for a Mul, fold the result or return null.
LLVM_ABI bool hasFloatFn(const Module *M, const TargetLibraryInfo *TLI, Type *Ty, LibFunc DoubleFn, LibFunc FloatFn, LibFunc LongDoubleFn)
Check whether the overloaded floating point function corresponding to Ty is available.
LLVM_ABI bool isGuaranteedNotToBeUndef(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Returns true if V cannot be undef, but may be poison.
LLVM_ABI bool matchSimpleRecurrence(const PHINode *P, BinaryOperator *&BO, Value *&Start, Value *&Step)
Attempt to match a simple first order recurrence cycle of the form: iv = phi Ty [Start,...
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
constexpr unsigned MaxAnalysisRecursionDepth
LLVM_ABI Constant * ConstantFoldUnaryOpOperand(unsigned Opcode, Constant *Op, const DataLayout &DL)
Attempt to constant fold a unary operation with the specified operand.
LLVM_ABI Constant * getLosslessUnsignedTrunc(Constant *C, Type *DestTy, const DataLayout &DL, PreservedCastFlags *Flags=nullptr)
LLVM_ABI Value * simplifyFRemInst(Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
Given operands for an FRem, fold the result or return null.
LLVM_ABI Value * simplifyICmpInst(CmpPredicate Pred, Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for an ICmpInst, fold the result or return null.
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
LLVM_ABI Constant * ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS, Constant *RHS, const DataLayout &DL)
Attempt to constant fold a binary operation with the specified operands.
LLVM_ABI Value * simplifyFDivInst(Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
Given operands for an FDiv, fold the result or return null.
@ Mul
Product of integers.
@ Sub
Subtraction of integers.
LLVM_ABI Value * simplifyUDivInst(Value *LHS, Value *RHS, bool IsExact, const SimplifyQuery &Q)
Given operands for a UDiv, fold the result or return null.
DWARFExpression::Operation Op
constexpr unsigned BitWidth
LLVM_ABI bool isGuaranteedToTransferExecutionToSuccessor(const Instruction *I)
Return true if this function can prove that the instruction I will always transfer execution to one o...
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
LLVM_ABI bool isKnownNeverNaN(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Return true if the floating-point scalar value is not a NaN or if the floating-point vector value has...
LLVM_ABI Value * simplifySRemInst(Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for an SRem, fold the result or return null.
unsigned Log2(Align A)
Returns the log2 of the alignment.
LLVM_ABI bool isKnownNegation(const Value *X, const Value *Y, bool NeedNSW=false, bool AllowPoison=true)
Return true if the two given values are negation.
LLVM_ABI bool isKnownNonNegative(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Returns true if the give value is known to be non-negative.
LLVM_ABI Value * simplifyURemInst(Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for a URem, fold the result or return null.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
bool isNonNegative() const
Returns true if this value is known to be non-negative.
unsigned countMinTrailingZeros() const
Returns the minimum number of trailing zero bits.