46#include "llvm/IR/IntrinsicsAArch64.h"
47#include "llvm/IR/IntrinsicsAMDGPU.h"
48#include "llvm/IR/IntrinsicsARM.h"
49#include "llvm/IR/IntrinsicsHexagon.h"
78#define DEBUG_TYPE "instcombine"
82using namespace PatternMatch;
84STATISTIC(NumSimplified,
"Number of library calls simplified");
87 "instcombine-guard-widening-window",
89 cl::desc(
"How wide an instruction window to bypass looking for "
96 if (ITy->getBitWidth() < 32)
106 auto *Src =
MI->getRawSource();
107 while (isa<GetElementPtrInst>(Src)) {
108 if (!Src->hasOneUse())
110 Src = cast<Instruction>(Src)->getOperand(0);
112 return isa<AllocaInst>(Src) && Src->hasOneUse();
118 if (!CopyDstAlign || *CopyDstAlign < DstAlign) {
119 MI->setDestAlignment(DstAlign);
125 if (!CopySrcAlign || *CopySrcAlign < SrcAlign) {
126 MI->setSourceAlignment(SrcAlign);
149 ConstantInt *MemOpLength = dyn_cast<ConstantInt>(
MI->getLength());
150 if (!MemOpLength)
return nullptr;
157 assert(
Size &&
"0-sized memory transferring should be removed already.");
166 if (isa<AtomicMemTransferInst>(
MI))
167 if (*CopyDstAlign <
Size || *CopySrcAlign <
Size)
177 Value *Src =
MI->getArgOperand(1);
178 Value *Dest =
MI->getArgOperand(0);
181 L->setAlignment(*CopySrcAlign);
182 L->setAAMetadata(AACopyMD);
183 MDNode *LoopMemParallelMD =
184 MI->getMetadata(LLVMContext::MD_mem_parallel_loop_access);
185 if (LoopMemParallelMD)
186 L->setMetadata(LLVMContext::MD_mem_parallel_loop_access, LoopMemParallelMD);
187 MDNode *AccessGroupMD =
MI->getMetadata(LLVMContext::MD_access_group);
189 L->setMetadata(LLVMContext::MD_access_group, AccessGroupMD);
195 if (LoopMemParallelMD)
196 S->
setMetadata(LLVMContext::MD_mem_parallel_loop_access, LoopMemParallelMD);
198 S->
setMetadata(LLVMContext::MD_access_group, AccessGroupMD);
201 if (
auto *MT = dyn_cast<MemTransferInst>(
MI)) {
203 L->setVolatile(MT->isVolatile());
206 if (isa<AtomicMemTransferInst>(
MI)) {
218 const Align KnownAlignment =
221 if (!MemSetAlign || *MemSetAlign < KnownAlignment) {
222 MI->setDestAlignment(KnownAlignment);
238 if (isa<UndefValue>(
MI->getValue())) {
250 assert(Len &&
"0-sized memory setting should be removed already.");
251 const Align Alignment =
MI->getDestAlign().valueOrOne();
257 if (isa<AtomicMemSetInst>(
MI))
266 Constant *FillVal = ConstantInt::get(
270 auto replaceOpForAssignmentMarkers = [FillC, FillVal](
auto *DbgAssign) {
272 DbgAssign->replaceVariableLocationOp(FillC, FillVal);
278 if (isa<AtomicMemSetInst>(
MI))
292 Value *LoadPtr =
II.getArgOperand(0);
293 const Align Alignment =
294 cast<ConstantInt>(
II.getArgOperand(1))->getAlignValue();
308 II.getDataLayout(), &
II, &
AC)) {
322 auto *ConstMask = dyn_cast<Constant>(
II.getArgOperand(3));
327 if (ConstMask->isNullValue())
331 if (ConstMask->isAllOnesValue()) {
332 Value *StorePtr =
II.getArgOperand(1);
333 Align Alignment = cast<ConstantInt>(
II.getArgOperand(2))->getAlignValue();
335 new StoreInst(
II.getArgOperand(0), StorePtr,
false, Alignment);
340 if (isa<ScalableVectorType>(ConstMask->getType()))
360 auto *ConstMask = dyn_cast<Constant>(
II.getArgOperand(2));
367 if (ConstMask->isAllOnesValue())
369 auto *VecTy = cast<VectorType>(
II.getType());
370 const Align Alignment =
371 cast<ConstantInt>(
II.getArgOperand(1))->getAlignValue();
373 Alignment,
"load.scalar");
388 auto *ConstMask = dyn_cast<Constant>(
II.getArgOperand(3));
393 if (ConstMask->isNullValue())
402 cast<ConstantInt>(
II.getArgOperand(2))->getAlignValue();
411 if (ConstMask->isAllOnesValue()) {
412 Align Alignment = cast<ConstantInt>(
II.getArgOperand(2))->getAlignValue();
413 VectorType *WideLoadTy = cast<VectorType>(
II.getArgOperand(1)->getType());
420 new StoreInst(Extract, SplatPtr,
false, Alignment);
425 if (isa<ScalableVectorType>(ConstMask->getType()))
451 auto *Arg =
II.getArgOperand(0);
452 auto *StrippedArg = Arg->stripPointerCasts();
453 auto *StrippedInvariantGroupsArg = StrippedArg;
454 while (
auto *
Intr = dyn_cast<IntrinsicInst>(StrippedInvariantGroupsArg)) {
455 if (
Intr->getIntrinsicID() != Intrinsic::launder_invariant_group &&
456 Intr->getIntrinsicID() != Intrinsic::strip_invariant_group)
458 StrippedInvariantGroupsArg =
Intr->getArgOperand(0)->stripPointerCasts();
460 if (StrippedArg == StrippedInvariantGroupsArg)
463 Value *Result =
nullptr;
465 if (
II.getIntrinsicID() == Intrinsic::launder_invariant_group)
467 else if (
II.getIntrinsicID() == Intrinsic::strip_invariant_group)
471 "simplifyInvariantGroupIntrinsic only handles launder and strip");
472 if (Result->getType()->getPointerAddressSpace() !=
473 II.getType()->getPointerAddressSpace())
476 return cast<Instruction>(Result);
480 assert((
II.getIntrinsicID() == Intrinsic::cttz ||
481 II.getIntrinsicID() == Intrinsic::ctlz) &&
482 "Expected cttz or ctlz intrinsic");
483 bool IsTZ =
II.getIntrinsicID() == Intrinsic::cttz;
484 Value *Op0 =
II.getArgOperand(0);
485 Value *Op1 =
II.getArgOperand(1);
496 if (
II.getType()->isIntOrIntVectorTy(1)) {
509 II.dropUBImplyingAttrsAndMetadata();
556 return BinaryOperator::CreateAdd(ConstCttz,
X);
564 return BinaryOperator::CreateSub(ConstCttz,
X);
570 ConstantInt::get(
II.getType(),
II.getType()->getScalarSizeInBits());
571 return BinaryOperator::CreateSub(Width,
X);
579 return BinaryOperator::CreateAdd(ConstCtlz,
X);
587 return BinaryOperator::CreateSub(ConstCtlz,
X);
597 ConstantInt::get(R->getType(), R->getType()->getScalarSizeInBits() - 1),
616 if (PossibleZeros == DefiniteZeros) {
617 auto *
C = ConstantInt::get(Op0->
getType(), DefiniteZeros);
632 if (
BitWidth != 1 && !
II.hasRetAttr(Attribute::Range) &&
633 !
II.getMetadata(LLVMContext::MD_range)) {
644 assert(
II.getIntrinsicID() == Intrinsic::ctpop &&
645 "Expected ctpop intrinsic");
648 Value *Op0 =
II.getArgOperand(0);
694 if ((~Known.
Zero).isPowerOf2())
695 return BinaryOperator::CreateLShr(
696 Op0, ConstantInt::get(Ty, (~Known.
Zero).exactLogBase2()));
710 II.getRange().value_or(ConstantRange::getFull(
BitWidth));
722 if (
Range != OldRange) {
738 auto *
C = dyn_cast<Constant>(
II.getArgOperand(1));
742 auto *VecTy = cast<FixedVectorType>(
II.getType());
743 unsigned NumElts = VecTy->getNumElements();
746 if (!VecTy->getElementType()->isIntegerTy(8) || NumElts != 8)
751 for (
unsigned I = 0;
I < NumElts; ++
I) {
754 if (!COp || !isa<ConstantInt>(COp))
757 Indexes[
I] = cast<ConstantInt>(COp)->getLimitedValue();
760 if ((
unsigned)Indexes[
I] >= NumElts)
764 auto *V1 =
II.getArgOperand(0);
772 unsigned NumOperands) {
773 assert(
I.arg_size() >= NumOperands &&
"Not enough operands");
775 for (
unsigned i = 0; i < NumOperands; i++)
797 for (; BI != BE; ++BI) {
798 if (
auto *
I = dyn_cast<IntrinsicInst>(&*BI)) {
799 if (
I->isDebugOrPseudoInst() ||
820 return I.getIntrinsicID() == Intrinsic::vastart ||
821 I.getIntrinsicID() == Intrinsic::vacopy;
827 assert(Call.arg_size() > 1 &&
"Need at least 2 args to swap");
828 Value *Arg0 = Call.getArgOperand(0), *Arg1 = Call.getArgOperand(1);
829 if (isa<Constant>(Arg0) && !isa<Constant>(Arg1)) {
830 Call.setArgOperand(0, Arg1);
831 Call.setArgOperand(1, Arg0);
850 Value *OperationResult =
nullptr;
873 if (
auto *Inst = dyn_cast<Instruction>(Result)) {
875 Inst->setHasNoSignedWrap();
877 Inst->setHasNoUnsignedWrap();
902 switch (
static_cast<unsigned>(Mask)) {
943 case ~fcZero & ~fcNan:
959 Value *Src0 =
II.getArgOperand(0);
960 Value *Src1 =
II.getArgOperand(1);
961 const ConstantInt *CMask = cast<ConstantInt>(Src1);
966 const FPClassTest OrderedInvertedMask = ~OrderedMask & ~fcNan;
968 const bool IsStrict =
969 II.getFunction()->getAttributes().hasFnAttr(Attribute::StrictFP);
975 II.setArgOperand(1, ConstantInt::get(Src1->
getType(),
fneg(Mask)));
985 if ((OrderedMask ==
fcInf || OrderedInvertedMask ==
fcInf) &&
986 (IsOrdered || IsUnordered) && !IsStrict) {
994 if (OrderedInvertedMask ==
fcInf)
1004 (IsOrdered || IsUnordered) && !IsStrict) {
1018 if ((OrderedInvertedMask ==
fcPosInf || OrderedInvertedMask ==
fcNegInf) &&
1019 (IsOrdered || IsUnordered) && !IsStrict) {
1032 if (Mask ==
fcNan && !IsStrict) {
1064 if (!IsStrict && (IsOrdered || IsUnordered) &&
1109 return std::nullopt;
1121 return std::nullopt;
1133 return *Known0 == *Known1;
1141 assert((MinMaxID == Intrinsic::smax || MinMaxID == Intrinsic::smin ||
1142 MinMaxID == Intrinsic::umax || MinMaxID == Intrinsic::umin) &&
1143 "Expected a min or max intrinsic");
1146 Value *Op0 =
II->getArgOperand(0), *Op1 =
II->getArgOperand(1);
1148 const APInt *C0, *C1;
1154 bool IsSigned = MinMaxID == Intrinsic::smax || MinMaxID == Intrinsic::smin;
1155 auto *
Add = cast<BinaryOperator>(Op0);
1156 if ((IsSigned && !
Add->hasNoSignedWrap()) ||
1157 (!IsSigned && !
Add->hasNoUnsignedWrap()))
1164 IsSigned ? C1->
ssub_ov(*C0, Overflow) : C1->
usub_ov(*C0, Overflow);
1165 assert(!Overflow &&
"Expected simplify of min/max");
1169 Constant *NewMinMaxC = ConstantInt::get(
II->getType(), CDiff);
1171 return IsSigned ? BinaryOperator::CreateNSWAdd(NewMinMax,
Add->getOperand(1))
1172 : BinaryOperator::CreateNUWAdd(NewMinMax,
Add->getOperand(1));
1183 const APInt *MinValue, *MaxValue;
1187 }
else if (
match(&MinMax1,
1196 if (!(*MaxValue + 1).isPowerOf2() || -*MinValue != *MaxValue + 1)
1199 unsigned NewBitWidth = (*MaxValue + 1).logBase2() + 1;
1213 if (
AddSub->getOpcode() == Instruction::Add)
1214 IntrinsicID = Intrinsic::sadd_sat;
1215 else if (
AddSub->getOpcode() == Instruction::Sub)
1216 IntrinsicID = Intrinsic::ssub_sat;
1240 Value *I0 =
II->getArgOperand(0), *I1 =
II->getArgOperand(1);
1242 const APInt *C0, *C1;
1247 switch (
II->getIntrinsicID()) {
1248 case Intrinsic::smax:
1252 case Intrinsic::smin:
1256 case Intrinsic::umax:
1260 case Intrinsic::umin:
1282 auto *
LHS = dyn_cast<MinMaxIntrinsic>(
II->getArgOperand(0));
1296 if (InnerMinMaxID != MinMaxID &&
1297 !(((MinMaxID == Intrinsic::umax && InnerMinMaxID == Intrinsic::smax) ||
1298 (MinMaxID == Intrinsic::smin && InnerMinMaxID == Intrinsic::umin)) &&
1306 {LHS->getArgOperand(0), NewC});
1326 auto *InnerMM = dyn_cast<IntrinsicInst>(Inner);
1327 if (!InnerMM || InnerMM->getIntrinsicID() != MinMaxID ||
1333 MinMaxID,
II->getType());
1342 auto *
LHS = dyn_cast<IntrinsicInst>(
II->getArgOperand(0));
1343 auto *
RHS = dyn_cast<IntrinsicInst>(
II->getArgOperand(1));
1345 if (!
LHS || !
RHS ||
LHS->getIntrinsicID() != MinMaxID ||
1346 RHS->getIntrinsicID() != MinMaxID ||
1356 Value *MinMaxOp =
nullptr;
1357 Value *ThirdOp =
nullptr;
1361 if (
D ==
A ||
C ==
A) {
1366 }
else if (
D ==
B ||
C ==
B) {
1375 if (
D ==
A ||
D ==
B) {
1380 }
else if (
C ==
A ||
C ==
B) {
1388 if (!MinMaxOp || !ThirdOp)
1405 switch (
II->getIntrinsicID()) {
1406 case Intrinsic::smax:
1407 case Intrinsic::smin:
1408 case Intrinsic::umax:
1409 case Intrinsic::umin:
1410 case Intrinsic::fma:
1411 case Intrinsic::fshl:
1412 case Intrinsic::fshr:
1420 if (!
match(
II->getArgOperand(0),
1425 if (
none_of(
II->args(), [](
Value *V) { return V->hasOneUse(); }))
1431 Type *SrcTy =
X->getType();
1432 for (
unsigned i = 1, e =
II->arg_size(); i != e; ++i) {
1433 if (!
match(
II->getArgOperand(i),
1435 X->getType() != SrcTy)
1442 Value *NewIntrinsic =
1450template <Intrinsic::ID IntrID>
1453 static_assert(IntrID == Intrinsic::bswap || IntrID == Intrinsic::bitreverse,
1454 "This helper only supports BSWAP and BITREVERSE intrinsics");
1460 isa<BinaryOperator>(V)) {
1461 Value *OldReorderX, *OldReorderY;
1487 if (!CanReorderLanes)
1495 if (!isa<FixedVectorType>(Arg->
getType()) ||
1497 !cast<ShuffleVectorInst>(Arg)->isSingleSource())
1500 int Sz = Mask.size();
1502 for (
int Idx : Mask) {
1510 return UsedIndices.
all() ? V :
nullptr;
1517template <Intrinsic::ID IntrID>
1522 static_assert(IntrID == Intrinsic::cttz || IntrID == Intrinsic::ctlz,
1523 "This helper only supports cttz and ctlz intrinsics");
1531 unsigned BitWidth = I1->getType()->getScalarSizeInBits();
1538 Type *Ty = I1->getType();
1540 IntrID == Intrinsic::cttz ? Instruction::Shl : Instruction::LShr,
1541 IntrID == Intrinsic::cttz
1542 ? ConstantInt::get(Ty, 1)
1544 cast<Constant>(I1),
DL);
1546 IntrID, Builder.
CreateOr(CtOp, NewConst),
1555 case Intrinsic::umax:
1556 case Intrinsic::umin:
1557 return HasNUW && LOp == Instruction::Add;
1558 case Intrinsic::smax:
1559 case Intrinsic::smin:
1560 return HasNSW && LOp == Instruction::Add;
1596 "Only inner and outer commutative op codes are supported.");
1604 if (
A !=
C &&
A !=
D)
1606 if (
A ==
C ||
A ==
D) {
1611 cast<BinaryOperator>(Builder.
CreateBinOp(InnerOpcode, NewIntrinsic,
A));
1644 if (!
II)
return visitCallBase(CI);
1648 if (
auto *AMI = dyn_cast<AtomicMemIntrinsic>(
II))
1649 if (
ConstantInt *NumBytes = dyn_cast<ConstantInt>(AMI->getLength()))
1650 if (NumBytes->isNegative() ||
1651 (NumBytes->getZExtValue() % AMI->getElementSizeInBytes() != 0)) {
1653 assert(AMI->getType()->isVoidTy() &&
1654 "non void atomic unordered mem intrinsic");
1660 if (
auto *
MI = dyn_cast<AnyMemIntrinsic>(
II)) {
1661 bool Changed =
false;
1664 if (
Constant *NumBytes = dyn_cast<Constant>(
MI->getLength())) {
1665 if (NumBytes->isNullValue())
1670 if (
auto *M = dyn_cast<MemIntrinsic>(
MI))
1671 if (M->isVolatile())
1677 if (
auto *MMI = dyn_cast<AnyMemMoveInst>(
MI)) {
1678 if (
GlobalVariable *GVSrc = dyn_cast<GlobalVariable>(MMI->getSource()))
1679 if (GVSrc->isConstant()) {
1682 isa<AtomicMemMoveInst>(MMI)
1683 ? Intrinsic::memcpy_element_unordered_atomic
1684 : Intrinsic::memcpy;
1696 if (MTI->getSource() == MTI->getDest())
1701 return isa<ConstantPointerNull>(
Ptr) &&
1704 cast<PointerType>(
Ptr->getType())->getAddressSpace());
1706 bool SrcIsUndefined =
false;
1709 if (
auto *MTI = dyn_cast<AnyMemTransferInst>(
MI)) {
1712 SrcIsUndefined = IsPointerUndefined(MTI->getRawSource());
1713 }
else if (
auto *MSI = dyn_cast<AnyMemSetInst>(
MI)) {
1719 if (SrcIsUndefined || IsPointerUndefined(
MI->getRawDest())) {
1724 if (Changed)
return II;
1729 if (
auto *IIFVTy = dyn_cast<FixedVectorType>(
II->getType())) {
1730 auto VWidth = IIFVTy->getNumElements();
1731 APInt PoisonElts(VWidth, 0);
1740 if (
II->isCommutative()) {
1741 if (
auto Pair = matchSymmetricPair(
II->getOperand(0),
II->getOperand(1))) {
1755 if (CI.
use_empty() && isa<ConstrainedFPIntrinsic>(CI)) {
1762 case Intrinsic::objectsize: {
1765 &InsertedInstructions)) {
1766 for (
Instruction *Inserted : InsertedInstructions)
1772 case Intrinsic::abs: {
1773 Value *IIOperand =
II->getArgOperand(0);
1774 bool IntMinIsPoison = cast<Constant>(
II->getArgOperand(1))->isOneValue();
1786 if (
match(IIOperand,
1788 m_Intrinsic<Intrinsic::abs>(
m_Value(
Y)))))) {
1790 cast<Instruction>(IIOperand)->hasNoSignedWrap() && IntMinIsPoison;
1795 if (std::optional<bool> Known =
1821 return BinaryOperator::CreateAnd(
X, ConstantInt::get(
II->getType(), 1));
1825 case Intrinsic::umin: {
1826 Value *I0 =
II->getArgOperand(0), *I1 =
II->getArgOperand(1);
1829 assert(
II->getType()->getScalarSizeInBits() != 1 &&
1830 "Expected simplify of umin with max constant");
1836 if (
Value *FoldedCttz =
1837 foldMinimumOverTrailingOrLeadingZeroCount<Intrinsic::cttz>(
1841 if (
Value *FoldedCtlz =
1842 foldMinimumOverTrailingOrLeadingZeroCount<Intrinsic::ctlz>(
1847 case Intrinsic::umax: {
1848 Value *I0 =
II->getArgOperand(0), *I1 =
II->getArgOperand(1);
1851 (I0->
hasOneUse() || I1->hasOneUse()) &&
X->getType() ==
Y->getType()) {
1867 case Intrinsic::smax:
1868 case Intrinsic::smin: {
1869 Value *I0 =
II->getArgOperand(0), *I1 =
II->getArgOperand(1);
1872 (I0->
hasOneUse() || I1->hasOneUse()) &&
X->getType() ==
Y->getType()) {
1888 if ((IID == Intrinsic::umin || IID == Intrinsic::smax) &&
1889 II->getType()->isIntOrIntVectorTy(1)) {
1890 return BinaryOperator::CreateAnd(I0, I1);
1895 if ((IID == Intrinsic::umax || IID == Intrinsic::smin) &&
1896 II->getType()->isIntOrIntVectorTy(1)) {
1897 return BinaryOperator::CreateOr(I0, I1);
1900 if (IID == Intrinsic::smax || IID == Intrinsic::smin) {
1927 bool UseOr = IID == Intrinsic::smax || IID == Intrinsic::umax;
1928 bool UseAndN = IID == Intrinsic::smin || IID == Intrinsic::umin;
1930 if (IID == Intrinsic::smax || IID == Intrinsic::smin) {
1932 if (KnownSign == std::nullopt) {
1935 }
else if (*KnownSign ) {
1947 return BinaryOperator::CreateOr(I0,
X);
1985 ConstantInt::get(
II->getType(), *RHSC));
1995 if (I0->
hasOneUse() && !I1->hasOneUse())
2007 if (IID == Intrinsic::smin || IID == Intrinsic::umax)
2035 if (LHS_CR.
icmp(Pred, *RHSC))
2039 ConstantInt::get(
II->getType(), *RHSC));
2048 case Intrinsic::scmp: {
2049 Value *I0 =
II->getArgOperand(0), *I1 =
II->getArgOperand(1);
2057 case Intrinsic::bitreverse: {
2058 Value *IIOperand =
II->getArgOperand(0);
2062 X->getType()->isIntOrIntVectorTy(1)) {
2063 Type *Ty =
II->getType();
2070 foldBitOrderCrossLogicOp<Intrinsic::bitreverse>(IIOperand,
Builder))
2071 return crossLogicOpFold;
2075 case Intrinsic::bswap: {
2076 Value *IIOperand =
II->getArgOperand(0);
2088 cast<BinaryOperator>(IIOperand)->
getOpcode() == Instruction::Shl
2101 if (BW - LZ - TZ == 8) {
2102 assert(LZ != TZ &&
"active byte cannot be in the middle");
2104 return BinaryOperator::CreateNUWShl(
2105 IIOperand, ConstantInt::get(IIOperand->
getType(), LZ - TZ));
2107 return BinaryOperator::CreateExactLShr(
2108 IIOperand, ConstantInt::get(IIOperand->
getType(), TZ - LZ));
2113 unsigned C =
X->getType()->getScalarSizeInBits() - BW;
2114 Value *CV = ConstantInt::get(
X->getType(),
C);
2120 foldBitOrderCrossLogicOp<Intrinsic::bswap>(IIOperand,
Builder)) {
2121 return crossLogicOpFold;
2130 case Intrinsic::masked_load:
2131 if (
Value *SimplifiedMaskedOp = simplifyMaskedLoad(*
II))
2134 case Intrinsic::masked_store:
2135 return simplifyMaskedStore(*
II);
2136 case Intrinsic::masked_gather:
2137 return simplifyMaskedGather(*
II);
2138 case Intrinsic::masked_scatter:
2139 return simplifyMaskedScatter(*
II);
2140 case Intrinsic::launder_invariant_group:
2141 case Intrinsic::strip_invariant_group:
2145 case Intrinsic::powi:
2146 if (
ConstantInt *Power = dyn_cast<ConstantInt>(
II->getArgOperand(1))) {
2149 if (Power->isMinusOne())
2151 II->getArgOperand(0),
II);
2153 if (Power->equalsInt(2))
2155 II->getArgOperand(0),
II);
2157 if (!Power->getValue()[0]) {
2172 case Intrinsic::cttz:
2173 case Intrinsic::ctlz:
2178 case Intrinsic::ctpop:
2183 case Intrinsic::fshl:
2184 case Intrinsic::fshr: {
2185 Value *Op0 =
II->getArgOperand(0), *Op1 =
II->getArgOperand(1);
2186 Type *Ty =
II->getType();
2196 if (ModuloC != ShAmtC)
2202 "Shift amount expected to be modulo bitwidth");
2207 if (IID == Intrinsic::fshr) {
2218 assert(IID == Intrinsic::fshl &&
2219 "All funnel shifts by simple constants should go left");
2224 return BinaryOperator::CreateShl(Op0, ShAmtC);
2229 return BinaryOperator::CreateLShr(Op1,
2249 Value *Op2 =
II->getArgOperand(2);
2251 return BinaryOperator::CreateShl(Op0,
And);
2269 case Intrinsic::ptrmask: {
2275 Value *InnerPtr, *InnerMask;
2276 bool Changed =
false;
2280 if (
match(
II->getArgOperand(0),
2284 "Mask types must match");
2301 unsigned NewAlignmentLog =
2315 case Intrinsic::uadd_with_overflow:
2316 case Intrinsic::sadd_with_overflow: {
2324 const APInt *C0, *C1;
2325 Value *Arg0 =
II->getArgOperand(0);
2326 Value *Arg1 =
II->getArgOperand(1);
2327 bool IsSigned = IID == Intrinsic::sadd_with_overflow;
2328 bool HasNWAdd = IsSigned
2334 IsSigned ? C1->
sadd_ov(*C0, Overflow) : C1->
uadd_ov(*C0, Overflow);
2338 IID,
X, ConstantInt::get(Arg1->
getType(), NewC)));
2343 case Intrinsic::umul_with_overflow:
2344 case Intrinsic::smul_with_overflow:
2345 case Intrinsic::usub_with_overflow:
2350 case Intrinsic::ssub_with_overflow: {
2355 Value *Arg0 =
II->getArgOperand(0);
2356 Value *Arg1 =
II->getArgOperand(1);
2373 case Intrinsic::uadd_sat:
2374 case Intrinsic::sadd_sat:
2375 case Intrinsic::usub_sat:
2376 case Intrinsic::ssub_sat: {
2378 Type *Ty = SI->getType();
2379 Value *Arg0 = SI->getLHS();
2380 Value *Arg1 = SI->getRHS();
2411 if (IID == Intrinsic::usub_sat &&
2422 C->isNotMinSignedValue()) {
2426 Intrinsic::sadd_sat, Arg0, NegVal));
2432 if (
auto *
Other = dyn_cast<IntrinsicInst>(Arg0)) {
2434 const APInt *Val, *Val2;
2437 IID == Intrinsic::uadd_sat || IID == Intrinsic::usub_sat;
2438 if (
Other->getIntrinsicID() == IID &&
2446 NewVal = Val->
sadd_ov(*Val2, Overflow);
2459 IID,
X, ConstantInt::get(
II->getType(), NewVal)));
2465 case Intrinsic::minnum:
2466 case Intrinsic::maxnum:
2467 case Intrinsic::minimum:
2468 case Intrinsic::maximum: {
2469 Value *Arg0 =
II->getArgOperand(0);
2470 Value *Arg1 =
II->getArgOperand(1);
2479 case Intrinsic::maxnum:
2480 NewIID = Intrinsic::minnum;
2482 case Intrinsic::minnum:
2483 NewIID = Intrinsic::maxnum;
2485 case Intrinsic::maximum:
2486 NewIID = Intrinsic::minimum;
2488 case Intrinsic::minimum:
2489 NewIID = Intrinsic::maximum;
2495 Instruction *FNeg = UnaryOperator::CreateFNeg(NewCall);
2502 if (
auto *M = dyn_cast<IntrinsicInst>(Arg0)) {
2510 case Intrinsic::maxnum:
2513 case Intrinsic::minnum:
2516 case Intrinsic::maximum:
2519 case Intrinsic::minimum:
2526 IID,
X, ConstantFP::get(Arg0->
getType(), Res),
II);
2530 if (
auto *CI = dyn_cast<CallInst>(V))
2539 X->getType() ==
Y->getType()) {
2551 auto IsMinMaxOrXNegX = [IID, &
X](
Value *Op0,
Value *Op1) {
2553 return Op0->hasOneUse() ||
2554 (IID != Intrinsic::minimum && IID != Intrinsic::minnum);
2558 if (IsMinMaxOrXNegX(Arg0, Arg1) || IsMinMaxOrXNegX(Arg1, Arg0)) {
2560 if (IID == Intrinsic::minimum || IID == Intrinsic::minnum)
2567 case Intrinsic::matrix_multiply: {
2579 Value *Op0 =
II->getOperand(0);
2580 Value *Op1 =
II->getOperand(1);
2581 Value *OpNotNeg, *NegatedOp;
2582 unsigned NegatedOpArg, OtherOpArg;
2599 Value *OtherOp =
II->getOperand(OtherOpArg);
2617 NewArgs[NegatedOpArg] = OpNotNeg;
2624 case Intrinsic::fmuladd: {
2627 II->getFastMathFlags(),
2629 auto *
FAdd = BinaryOperator::CreateFAdd(V,
II->getArgOperand(2));
2630 FAdd->copyFastMathFlags(
II);
2636 case Intrinsic::fma: {
2638 Value *Src0 =
II->getArgOperand(0);
2639 Value *Src1 =
II->getArgOperand(1);
2640 Value *Src2 =
II->getArgOperand(2);
2660 auto *
FAdd = BinaryOperator::CreateFAdd(V, Src2);
2661 FAdd->copyFastMathFlags(
II);
2678 case Intrinsic::copysign: {
2679 Value *Mag =
II->getArgOperand(0), *Sign =
II->getArgOperand(1);
2682 if (*KnownSignBit) {
2722 case Intrinsic::fabs: {
2724 Value *Arg =
II->getArgOperand(0);
2734 if (isa<Constant>(TVal) || isa<Constant>(FVal)) {
2739 FastMathFlags FMF2 = cast<SelectInst>(Arg)->getFastMathFlags();
2741 SI->setFastMathFlags(FMF1 | FMF2);
2752 Value *Magnitude, *Sign;
2753 if (
match(
II->getArgOperand(0),
2764 case Intrinsic::ceil:
2765 case Intrinsic::floor:
2766 case Intrinsic::round:
2767 case Intrinsic::roundeven:
2768 case Intrinsic::nearbyint:
2769 case Intrinsic::rint:
2770 case Intrinsic::trunc: {
2779 case Intrinsic::cos:
2780 case Intrinsic::amdgcn_cos: {
2782 Value *Src =
II->getArgOperand(0);
2792 case Intrinsic::sin:
2793 case Intrinsic::amdgcn_sin: {
2802 case Intrinsic::ldexp: {
2815 Value *Src =
II->getArgOperand(0);
2816 Value *Exp =
II->getArgOperand(1);
2821 Exp->getType() == InnerExp->
getType()) {
2823 FastMathFlags InnerFlags = cast<FPMathOperator>(Src)->getFastMathFlags();
2830 II->setArgOperand(1, NewExp);
2831 II->setFastMathFlags(InnerFlags);
2843 ConstantFP::get(
II->getType(), 1.0));
2850 ConstantFP::get(
II->getType(), 1.0));
2858 Value *SelectCond, *SelectLHS, *SelectRHS;
2859 if (
match(
II->getArgOperand(1),
2862 Value *NewLdexp =
nullptr;
2874 cast<Instruction>(NewLdexp)->copyFastMathFlags(
II);
2881 case Intrinsic::ptrauth_auth:
2882 case Intrinsic::ptrauth_resign: {
2885 bool NeedSign =
II->getIntrinsicID() == Intrinsic::ptrauth_resign;
2887 Value *Key =
II->getArgOperand(1);
2888 Value *Disc =
II->getArgOperand(2);
2892 Value *AuthKey =
nullptr, *AuthDisc =
nullptr, *BasePtr;
2893 if (
const auto *CI = dyn_cast<CallBase>(
Ptr)) {
2905 }
else if (
const auto *PtrToInt = dyn_cast<PtrToIntOperator>(
Ptr)) {
2908 const auto *CPA = dyn_cast<ConstantPtrAuth>(PtrToInt->getOperand(0));
2909 if (!CPA || !CPA->isKnownCompatibleWith(Key, Disc,
DL))
2913 if (NeedSign && isa<ConstantInt>(
II->getArgOperand(4))) {
2914 auto *SignKey = cast<ConstantInt>(
II->getArgOperand(3));
2915 auto *SignDisc = cast<ConstantInt>(
II->getArgOperand(4));
2918 SignDisc, SignAddrDisc);
2930 if (AuthKey && NeedSign) {
2932 NewIntrin = Intrinsic::ptrauth_resign;
2933 }
else if (AuthKey) {
2935 NewIntrin = Intrinsic::ptrauth_auth;
2936 }
else if (NeedSign) {
2938 NewIntrin = Intrinsic::ptrauth_sign;
2961 case Intrinsic::arm_neon_vtbl1:
2962 case Intrinsic::aarch64_neon_tbl1:
2967 case Intrinsic::arm_neon_vmulls:
2968 case Intrinsic::arm_neon_vmullu:
2969 case Intrinsic::aarch64_neon_smull:
2970 case Intrinsic::aarch64_neon_umull: {
2971 Value *Arg0 =
II->getArgOperand(0);
2972 Value *Arg1 =
II->getArgOperand(1);
2975 if (isa<ConstantAggregateZero>(Arg0) || isa<ConstantAggregateZero>(Arg1)) {
2980 bool Zext = (IID == Intrinsic::arm_neon_vmullu ||
2981 IID == Intrinsic::aarch64_neon_umull);
2983 if (
Constant *CV0 = dyn_cast<Constant>(Arg0)) {
2984 if (
Constant *CV1 = dyn_cast<Constant>(Arg1)) {
2995 if (
Constant *CV1 = dyn_cast<Constant>(Arg1))
2997 dyn_cast_or_null<ConstantInt>(CV1->getSplatValue()))
3004 case Intrinsic::arm_neon_aesd:
3005 case Intrinsic::arm_neon_aese:
3006 case Intrinsic::aarch64_crypto_aesd:
3007 case Intrinsic::aarch64_crypto_aese: {
3008 Value *DataArg =
II->getArgOperand(0);
3009 Value *KeyArg =
II->getArgOperand(1);
3021 case Intrinsic::hexagon_V6_vandvrt:
3022 case Intrinsic::hexagon_V6_vandvrt_128B: {
3024 if (
auto Op0 = dyn_cast<IntrinsicInst>(
II->getArgOperand(0))) {
3026 if (ID0 != Intrinsic::hexagon_V6_vandqrt &&
3027 ID0 != Intrinsic::hexagon_V6_vandqrt_128B)
3029 Value *Bytes = Op0->getArgOperand(1), *Mask =
II->getArgOperand(1);
3034 if ((
C & 0xFF) && (
C & 0xFF00) && (
C & 0xFF0000) && (
C & 0xFF000000))
3039 case Intrinsic::stackrestore: {
3040 enum class ClassifyResult {
3044 CallWithSideEffects,
3047 if (isa<AllocaInst>(
I))
3048 return ClassifyResult::Alloca;
3050 if (
auto *CI = dyn_cast<CallInst>(
I)) {
3051 if (
auto *
II = dyn_cast<IntrinsicInst>(CI)) {
3052 if (
II->getIntrinsicID() == Intrinsic::stackrestore)
3053 return ClassifyResult::StackRestore;
3055 if (
II->mayHaveSideEffects())
3056 return ClassifyResult::CallWithSideEffects;
3059 return ClassifyResult::CallWithSideEffects;
3063 return ClassifyResult::None;
3069 if (
IntrinsicInst *SS = dyn_cast<IntrinsicInst>(
II->getArgOperand(0))) {
3070 if (SS->getIntrinsicID() == Intrinsic::stacksave &&
3071 SS->getParent() ==
II->getParent()) {
3073 bool CannotRemove =
false;
3074 for (++BI; &*BI !=
II; ++BI) {
3075 switch (Classify(&*BI)) {
3076 case ClassifyResult::None:
3080 case ClassifyResult::StackRestore:
3083 if (cast<IntrinsicInst>(*BI).getArgOperand(0) != SS)
3084 CannotRemove =
true;
3087 case ClassifyResult::Alloca:
3088 case ClassifyResult::CallWithSideEffects:
3091 CannotRemove =
true;
3107 bool CannotRemove =
false;
3108 for (++BI; &*BI != TI; ++BI) {
3109 switch (Classify(&*BI)) {
3110 case ClassifyResult::None:
3114 case ClassifyResult::StackRestore:
3118 case ClassifyResult::Alloca:
3119 case ClassifyResult::CallWithSideEffects:
3123 CannotRemove =
true;
3133 if (!CannotRemove && (isa<ReturnInst>(TI) || isa<ResumeInst>(TI)))
3137 case Intrinsic::lifetime_end:
3140 if (
II->getFunction()->hasFnAttribute(Attribute::SanitizeAddress) ||
3141 II->getFunction()->hasFnAttribute(Attribute::SanitizeMemory) ||
3142 II->getFunction()->hasFnAttribute(Attribute::SanitizeHWAddress))
3146 return I.getIntrinsicID() == Intrinsic::lifetime_start;
3150 case Intrinsic::assume: {
3151 Value *IIOperand =
II->getArgOperand(0);
3153 II->getOperandBundlesAsDefs(OpBundles);
3159 assert(isa<AssumeInst>(Assume));
3169 if (
match(Next, m_Intrinsic<Intrinsic::assume>(
m_Specific(IIOperand))))
3170 return RemoveConditionFromAssume(Next);
3176 Value *AssumeIntrinsic =
II->getCalledOperand();
3198 LHS->getOpcode() == Instruction::Load &&
3204 return RemoveConditionFromAssume(
II);
3214 for (
unsigned Idx = 0;
Idx <
II->getNumOperandBundles();
Idx++) {
3216 if (OBU.
getTagName() ==
"separate_storage") {
3218 auto MaybeSimplifyHint = [&](
const Use &U) {
3219 Value *Hint = U.get();
3226 MaybeSimplifyHint(OBU.
Inputs[0]);
3227 MaybeSimplifyHint(OBU.
Inputs[1]);
3239 A->getType()->isPointerTy()) {
3243 Replacement->insertBefore(Next);
3245 return RemoveConditionFromAssume(
II);
3272 if (
auto *Replacement =
3275 Replacement->insertAfter(
II);
3278 return RemoveConditionFromAssume(
II);
3285 for (
unsigned Idx = 0;
Idx <
II->getNumOperandBundles();
Idx++) {
3286 auto &BOI =
II->bundle_op_info_begin()[
Idx];
3289 if (BOI.End - BOI.Begin > 2)
3300 if (BOI.End - BOI.Begin > 0) {
3307 if (BOI.End - BOI.Begin > 0)
3308 II->op_begin()[BOI.Begin].set(CanonRK.
WasOn);
3309 if (BOI.End - BOI.Begin > 1)
3310 II->op_begin()[BOI.Begin + 1].set(ConstantInt::get(
3336 case Intrinsic::experimental_guard: {
3347 Value *NextCond =
nullptr;
3349 m_Intrinsic<Intrinsic::experimental_guard>(
m_Value(NextCond)))) {
3350 Value *CurrCond =
II->getArgOperand(0);
3354 if (CurrCond != NextCond) {
3356 while (MoveI != NextInst) {
3368 case Intrinsic::vector_insert: {
3369 Value *Vec =
II->getArgOperand(0);
3370 Value *SubVec =
II->getArgOperand(1);
3372 auto *DstTy = dyn_cast<FixedVectorType>(
II->getType());
3373 auto *VecTy = dyn_cast<FixedVectorType>(Vec->
getType());
3374 auto *SubVecTy = dyn_cast<FixedVectorType>(SubVec->
getType());
3378 if (DstTy && VecTy && SubVecTy) {
3379 unsigned DstNumElts = DstTy->getNumElements();
3380 unsigned VecNumElts = VecTy->getNumElements();
3381 unsigned SubVecNumElts = SubVecTy->getNumElements();
3382 unsigned IdxN = cast<ConstantInt>(
Idx)->getZExtValue();
3385 if (VecNumElts == SubVecNumElts)
3394 for (i = 0; i != SubVecNumElts; ++i)
3396 for (; i != VecNumElts; ++i)
3402 for (
unsigned i = 0; i != IdxN; ++i)
3404 for (
unsigned i = DstNumElts; i != DstNumElts + SubVecNumElts; ++i)
3406 for (
unsigned i = IdxN + SubVecNumElts; i != DstNumElts; ++i)
3414 case Intrinsic::vector_extract: {
3415 Value *Vec =
II->getArgOperand(0);
3418 Type *ReturnType =
II->getType();
3421 unsigned ExtractIdx = cast<ConstantInt>(
Idx)->getZExtValue();
3422 Value *InsertTuple, *InsertIdx, *InsertValue;
3423 if (
match(Vec, m_Intrinsic<Intrinsic::vector_insert>(
m_Value(InsertTuple),
3426 InsertValue->
getType() == ReturnType) {
3427 unsigned Index = cast<ConstantInt>(InsertIdx)->getZExtValue();
3431 if (ExtractIdx == Index)
3442 auto *DstTy = dyn_cast<VectorType>(ReturnType);
3443 auto *VecTy = dyn_cast<VectorType>(Vec->
getType());
3445 if (DstTy && VecTy) {
3446 auto DstEltCnt = DstTy->getElementCount();
3447 auto VecEltCnt = VecTy->getElementCount();
3448 unsigned IdxN = cast<ConstantInt>(
Idx)->getZExtValue();
3451 if (DstEltCnt == VecTy->getElementCount()) {
3458 if (VecEltCnt.isScalable() || DstEltCnt.isScalable())
3462 for (
unsigned i = 0; i != DstEltCnt.getKnownMinValue(); ++i)
3463 Mask.push_back(IdxN + i);
3470 case Intrinsic::vector_reverse: {
3472 Value *Vec =
II->getArgOperand(0);
3474 auto *OldBinOp = cast<BinaryOperator>(Vec);
3479 OldBinOp->getOpcode(),
X,
Y,
3480 OldBinOp, OldBinOp->getName(),
3481 II->getIterator()));
3485 OldBinOp->getOpcode(),
X, BO1,
3486 OldBinOp, OldBinOp->
getName(),
3487 II->getIterator()));
3493 OldBinOp->getOpcode(), BO0,
Y, OldBinOp,
3494 OldBinOp->getName(),
II->getIterator()));
3498 auto *OldUnOp = cast<UnaryOperator>(Vec);
3500 OldUnOp->getOpcode(),
X, OldUnOp, OldUnOp->getName(),
3506 case Intrinsic::vector_reduce_or:
3507 case Intrinsic::vector_reduce_and: {
3515 Value *Arg =
II->getArgOperand(0);
3525 if (
auto *FTy = dyn_cast<FixedVectorType>(Vect->
getType()))
3529 if (IID == Intrinsic::vector_reduce_and) {
3533 assert(IID == Intrinsic::vector_reduce_or &&
3534 "Expected or reduction.");
3545 case Intrinsic::vector_reduce_add: {
3546 if (IID == Intrinsic::vector_reduce_add) {
3553 Value *Arg =
II->getArgOperand(0);
3563 if (
auto *FTy = dyn_cast<FixedVectorType>(Vect->
getType()))
3571 cast<Instruction>(Arg)->
getOpcode() == Instruction::SExt)
3579 case Intrinsic::vector_reduce_xor: {
3580 if (IID == Intrinsic::vector_reduce_xor) {
3588 Value *Arg =
II->getArgOperand(0);
3598 if (
auto *VTy = dyn_cast<VectorType>(Vect->
getType()))
3610 case Intrinsic::vector_reduce_mul: {
3611 if (IID == Intrinsic::vector_reduce_mul) {
3618 Value *Arg =
II->getArgOperand(0);
3628 if (
auto *VTy = dyn_cast<VectorType>(Vect->
getType()))
3639 case Intrinsic::vector_reduce_umin:
3640 case Intrinsic::vector_reduce_umax: {
3641 if (IID == Intrinsic::vector_reduce_umin ||
3642 IID == Intrinsic::vector_reduce_umax) {
3649 Value *Arg =
II->getArgOperand(0);
3659 if (
auto *VTy = dyn_cast<VectorType>(Vect->
getType()))
3661 Value *Res = IID == Intrinsic::vector_reduce_umin
3673 case Intrinsic::vector_reduce_smin:
3674 case Intrinsic::vector_reduce_smax: {
3675 if (IID == Intrinsic::vector_reduce_smin ||
3676 IID == Intrinsic::vector_reduce_smax) {
3691 Value *Arg =
II->getArgOperand(0);
3701 if (
auto *VTy = dyn_cast<VectorType>(Vect->
getType()))
3705 ExtOpc = cast<CastInst>(Arg)->getOpcode();
3706 Value *Res = ((IID == Intrinsic::vector_reduce_smin) ==
3707 (ExtOpc == Instruction::CastOps::ZExt))
3718 case Intrinsic::vector_reduce_fmax:
3719 case Intrinsic::vector_reduce_fmin:
3720 case Intrinsic::vector_reduce_fadd:
3721 case Intrinsic::vector_reduce_fmul: {
3722 bool CanReorderLanes = (IID != Intrinsic::vector_reduce_fadd &&
3723 IID != Intrinsic::vector_reduce_fmul) ||
3724 II->hasAllowReassoc();
3725 const unsigned ArgIdx = (IID == Intrinsic::vector_reduce_fadd ||
3726 IID == Intrinsic::vector_reduce_fmul)
3729 Value *Arg =
II->getArgOperand(ArgIdx);
3736 case Intrinsic::is_fpclass: {
3741 case Intrinsic::threadlocal_address: {
3766 if (
auto *Sel = dyn_cast<SelectInst>(
Op))
3775 return visitCallBase(*
II);
3790 if (FI1SyncScope != FI2->getSyncScopeID() ||
3797 if (NFI && isIdenticalOrStrongerFence(NFI, &FI))
3801 if (isIdenticalOrStrongerFence(PFI, &FI))
3808 return visitCallBase(
II);
3813 return visitCallBase(CBI);
3832 InstCombineRAUW, InstCombineErase);
3833 if (
Value *With = Simplifier.optimizeCall(CI,
Builder)) {
3845 if (Underlying != TrampMem &&
3846 (!Underlying->hasOneUse() || Underlying->user_back() != TrampMem))
3848 if (!isa<AllocaInst>(Underlying))
3856 if (
II->getIntrinsicID() == Intrinsic::init_trampoline) {
3860 InitTrampoline =
II;
3863 if (
II->getIntrinsicID() == Intrinsic::adjust_trampoline)
3870 if (!InitTrampoline)
3874 if (InitTrampoline->
getOperand(0) != TrampMem)
3877 return InitTrampoline;
3889 if (
II->getIntrinsicID() == Intrinsic::init_trampoline &&
3890 II->getOperand(0) == TrampMem)
3902 Callee = Callee->stripPointerCasts();
3903 IntrinsicInst *AdjustTramp = dyn_cast<IntrinsicInst>(Callee);
3917bool InstCombinerImpl::annotateAnyAllocSite(
CallBase &Call,
3923 bool Changed =
false;
3925 if (!
Call.getType()->isPointerTy())
3932 if (
Call.hasRetAttr(Attribute::NonNull)) {
3933 Changed = !
Call.hasRetAttr(Attribute::Dereferenceable);
3935 Call.getContext(),
Size->getLimitedValue()));
3937 Changed = !
Call.hasRetAttr(Attribute::DereferenceableOrNull);
3939 Call.getContext(),
Size->getLimitedValue()));
3948 ConstantInt *AlignOpC = dyn_cast<ConstantInt>(Alignment);
3952 Align ExistingAlign =
Call.getRetAlign().valueOrOne();
3954 if (NewAlign > ExistingAlign) {
3966 bool Changed = annotateAnyAllocSite(Call, &
TLI);
3975 if (
V->getType()->isPointerTy() &&
3976 !
Call.paramHasAttr(ArgNo, Attribute::NonNull) &&
3982 assert(ArgNo ==
Call.arg_size() &&
"Call arguments not processed correctly.");
3984 if (!ArgNos.
empty()) {
3989 Call.setAttributes(AS);
3996 Function *CalleeF = dyn_cast<Function>(Callee);
3998 transformConstExprCastCall(Call))
4005 LLVM_DEBUG(
dbgs() <<
"Removing convergent attr from instr " << Call
4007 Call.setNotConvergent();
4029 if (isa<CallInst>(OldCall))
4034 cast<CallBase>(OldCall)->setCalledFunction(
4043 if ((isa<ConstantPointerNull>(Callee) &&
4045 isa<UndefValue>(Callee)) {
4048 if (!
Call.getType()->isVoidTy())
4051 if (
Call.isTerminator()) {
4062 return transformCallThroughTrampoline(Call, *
II);
4064 if (isa<InlineAsm>(Callee) && !
Call.doesNotThrow()) {
4066 if (!
IA->canThrow()) {
4069 Call.setDoesNotThrow();
4077 if (
CallInst *CI = dyn_cast<CallInst>(&Call)) {
4084 if (!
Call.use_empty() && !
Call.isMustTailCall())
4085 if (
Value *ReturnedArg =
Call.getReturnedArgOperand()) {
4087 Type *RetArgTy = ReturnedArg->getType();
4096 if (Bundle && !
Call.isIndirectCall()) {
4100 ConstantInt *ExpectedType = cast<ConstantInt>(Bundle->Inputs[0]);
4103 FunctionType = mdconst::extract<ConstantInt>(MD->getOperand(0));
4107 dbgs() <<
Call.getModule()->getName()
4108 <<
": warning: kcfi: " <<
Call.getCaller()->getName()
4109 <<
": call to " << CalleeF->
getName()
4110 <<
" using a mismatching function pointer type\n";
4121 switch (
Call.getIntrinsicID()) {
4122 case Intrinsic::experimental_gc_statepoint: {
4138 if (isa<UndefValue>(DerivedPtr) || isa<UndefValue>(BasePtr)) {
4144 if (
auto *PT = dyn_cast<PointerType>(GCR.
getType())) {
4148 if (isa<ConstantPointerNull>(DerivedPtr)) {
4177 LiveGcValues.
insert(BasePtr);
4178 LiveGcValues.
insert(DerivedPtr);
4180 std::optional<OperandBundleUse> Bundle =
4182 unsigned NumOfGCLives = LiveGcValues.
size();
4183 if (!Bundle || NumOfGCLives == Bundle->Inputs.size())
4187 std::vector<Value *> NewLiveGc;
4188 for (
Value *V : Bundle->Inputs) {
4189 if (Val2Idx.
count(V))
4191 if (LiveGcValues.
count(V)) {
4192 Val2Idx[
V] = NewLiveGc.
size();
4193 NewLiveGc.push_back(V);
4195 Val2Idx[
V] = NumOfGCLives;
4201 assert(Val2Idx.
count(BasePtr) && Val2Idx[BasePtr] != NumOfGCLives &&
4202 "Missed live gc for base pointer");
4204 GCR.
setOperand(1, ConstantInt::get(OpIntTy1, Val2Idx[BasePtr]));
4206 assert(Val2Idx.
count(DerivedPtr) && Val2Idx[DerivedPtr] != NumOfGCLives &&
4207 "Missed live gc for derived pointer");
4209 GCR.
setOperand(2, ConstantInt::get(OpIntTy2, Val2Idx[DerivedPtr]));
4218 return Changed ? &
Call :
nullptr;
4224bool InstCombinerImpl::transformConstExprCastCall(
CallBase &Call) {
4226 dyn_cast<Function>(
Call.getCalledOperand()->stripPointerCasts());
4230 assert(!isa<CallBrInst>(Call) &&
4231 "CallBr's don't have a single point after a def to insert at");
4236 if (
Callee->isDeclaration())
4242 if (
Callee->hasFnAttribute(
"thunk"))
4248 if (
Callee->hasFnAttribute(Attribute::Naked))
4255 if (
Call.isMustTailCall())
4266 Type *NewRetTy = FT->getReturnType();
4269 if (OldRetTy != NewRetTy) {
4275 if (!
Caller->use_empty())
4290 if (!
Caller->use_empty()) {
4292 if (
auto *
II = dyn_cast<InvokeInst>(Caller))
4293 PhisNotSupportedBlock =
II->getNormalDest();
4294 if (PhisNotSupportedBlock)
4296 if (
PHINode *PN = dyn_cast<PHINode>(U))
4297 if (PN->getParent() == PhisNotSupportedBlock)
4302 unsigned NumActualArgs =
Call.arg_size();
4303 unsigned NumCommonArgs = std::min(FT->getNumParams(), NumActualArgs);
4313 if (
Callee->getAttributes().hasAttrSomewhere(Attribute::InAlloca) ||
4314 Callee->getAttributes().hasAttrSomewhere(Attribute::Preallocated))
4317 auto AI =
Call.arg_begin();
4318 for (
unsigned i = 0, e = NumCommonArgs; i !=
e; ++i, ++AI) {
4319 Type *ParamTy = FT->getParamType(i);
4320 Type *ActTy = (*AI)->getType();
4332 if (
Call.isInAllocaArgument(i) ||
4340 Callee->getAttributes().hasParamAttr(i, Attribute::ByVal))
4344 if (FT->getNumParams() < NumActualArgs && FT->isVarArg() &&
4359 Args.reserve(NumActualArgs);
4360 ArgAttrs.
reserve(NumActualArgs);
4371 AI =
Call.arg_begin();
4372 for (
unsigned i = 0; i != NumCommonArgs; ++i, ++AI) {
4373 Type *ParamTy = FT->getParamType(i);
4375 Value *NewArg = *AI;
4376 if ((*AI)->getType() != ParamTy)
4378 Args.push_back(NewArg);
4390 for (
unsigned i = NumCommonArgs; i != FT->getNumParams(); ++i) {
4396 if (FT->getNumParams() < NumActualArgs) {
4398 if (FT->isVarArg()) {
4400 for (
unsigned i = FT->getNumParams(); i != NumActualArgs; ++i, ++AI) {
4402 Value *NewArg = *AI;
4403 if (PTy != (*AI)->getType()) {
4409 Args.push_back(NewArg);
4422 assert((ArgAttrs.
size() == FT->getNumParams() || FT->isVarArg()) &&
4423 "missing argument attributes");
4428 Call.getOperandBundlesAsDefs(OpBundles);
4433 II->getUnwindDest(), Args, OpBundles);
4437 cast<CallInst>(Caller)->getTailCallKind());
4444 NewCall->
copyMetadata(*Caller, {LLVMContext::MD_prof});
4449 if (OldRetTy !=
NV->getType() && !
Caller->use_empty()) {
4450 assert(!
NV->getType()->isVoidTy());
4452 NC->setDebugLoc(
Caller->getDebugLoc());
4455 assert(OptInsertPt &&
"No place to insert cast");
4460 if (!
Caller->use_empty())
4462 else if (
Caller->hasValueHandle()) {
4463 if (OldRetTy ==
NV->getType())
4478InstCombinerImpl::transformCallThroughTrampoline(
CallBase &Call,
4485 if (
Attrs.hasAttrSomewhere(Attribute::Nest))
4493 unsigned NestArgNo = 0;
4494 Type *NestTy =
nullptr;
4499 E = NestFTy->param_end();
4500 I != E; ++NestArgNo, ++
I) {
4511 std::vector<Value*> NewArgs;
4512 std::vector<AttributeSet> NewArgAttrs;
4513 NewArgs.reserve(
Call.arg_size() + 1);
4514 NewArgAttrs.reserve(
Call.arg_size());
4521 auto I =
Call.arg_begin(), E =
Call.arg_end();
4523 if (ArgNo == NestArgNo) {
4526 if (NestVal->
getType() != NestTy)
4528 NewArgs.push_back(NestVal);
4529 NewArgAttrs.push_back(NestAttr);
4536 NewArgs.push_back(*
I);
4537 NewArgAttrs.push_back(
Attrs.getParamAttrs(ArgNo));
4548 std::vector<Type*> NewTypes;
4549 NewTypes.reserve(FTy->getNumParams()+1);
4556 E = FTy->param_end();
4559 if (ArgNo == NestArgNo)
4561 NewTypes.push_back(NestTy);
4567 NewTypes.push_back(*
I);
4580 Attrs.getRetAttrs(), NewArgAttrs);
4583 Call.getOperandBundlesAsDefs(OpBundles);
4588 II->getUnwindDest(), NewArgs, OpBundles);
4589 cast<InvokeInst>(NewCaller)->setCallingConv(
II->getCallingConv());
4590 cast<InvokeInst>(NewCaller)->setAttributes(NewPAL);
4591 }
else if (
CallBrInst *CBI = dyn_cast<CallBrInst>(&Call)) {
4594 CBI->getIndirectDests(), NewArgs, OpBundles);
4595 cast<CallBrInst>(NewCaller)->setCallingConv(CBI->getCallingConv());
4596 cast<CallBrInst>(NewCaller)->setAttributes(NewPAL);
4599 cast<CallInst>(NewCaller)->setTailCallKind(
4600 cast<CallInst>(Call).getTailCallKind());
4601 cast<CallInst>(NewCaller)->setCallingConv(
4602 cast<CallInst>(Call).getCallingConv());
4603 cast<CallInst>(NewCaller)->setAttributes(NewPAL);
4614 Call.setCalledFunction(FTy, NestF);
AMDGPU Register Bank Select
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
This file implements the APSInt class, which is a simple class that represents an arbitrary sized int...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static cl::opt< ITMode > IT(cl::desc("IT block support"), cl::Hidden, cl::init(DefaultIT), cl::values(clEnumValN(DefaultIT, "arm-default-it", "Generate any type of IT block"), clEnumValN(RestrictedIT, "arm-restrict-it", "Disallow complex IT blocks")))
Atomic ordering constants.
This file contains the simple types necessary to represent the attributes associated with functions a...
BlockVerifier::State From
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static SDValue foldBitOrderCrossLogicOp(SDNode *N, SelectionDAG &DAG)
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
#define DEBUG_WITH_TYPE(TYPE,...)
DEBUG_WITH_TYPE macro - This macro should be used by passes to emit debug information.
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
static Type * getPromotedType(Type *Ty)
Return the specified type promoted as it would be to pass though a va_arg area.
static Instruction * createOverflowTuple(IntrinsicInst *II, Value *Result, Constant *Overflow)
Creates a result tuple for an overflow intrinsic II with a given Result and a constant Overflow value...
static IntrinsicInst * findInitTrampolineFromAlloca(Value *TrampMem)
static bool removeTriviallyEmptyRange(IntrinsicInst &EndI, InstCombinerImpl &IC, std::function< bool(const IntrinsicInst &)> IsStart)
static bool inputDenormalIsDAZ(const Function &F, const Type *Ty)
static Instruction * reassociateMinMaxWithConstantInOperand(IntrinsicInst *II, InstCombiner::BuilderTy &Builder)
If this min/max has a matching min/max operand with a constant, try to push the constant operand into...
static bool signBitMustBeTheSame(Value *Op0, Value *Op1, const SimplifyQuery &SQ)
Return true if two values Op0 and Op1 are known to have the same sign.
static Instruction * moveAddAfterMinMax(IntrinsicInst *II, InstCombiner::BuilderTy &Builder)
Try to canonicalize min/max(X + C0, C1) as min/max(X, C1 - C0) + C0.
static Instruction * simplifyInvariantGroupIntrinsic(IntrinsicInst &II, InstCombinerImpl &IC)
This function transforms launder.invariant.group and strip.invariant.group like: launder(launder(x)) ...
static bool haveSameOperands(const IntrinsicInst &I, const IntrinsicInst &E, unsigned NumOperands)
static std::optional< bool > getKnownSign(Value *Op, const SimplifyQuery &SQ)
static cl::opt< unsigned > GuardWideningWindow("instcombine-guard-widening-window", cl::init(3), cl::desc("How wide an instruction window to bypass looking for " "another guard"))
static bool hasUndefSource(AnyMemTransferInst *MI)
Recognize a memcpy/memmove from a trivially otherwise unused alloca.
static Instruction * foldShuffledIntrinsicOperands(IntrinsicInst *II, InstCombiner::BuilderTy &Builder)
If all arguments of the intrinsic are unary shuffles with the same mask, try to shuffle after the int...
static Instruction * factorizeMinMaxTree(IntrinsicInst *II)
Reduce a sequence of min/max intrinsics with a common operand.
static Value * simplifyNeonTbl1(const IntrinsicInst &II, InstCombiner::BuilderTy &Builder)
Convert a table lookup to shufflevector if the mask is constant.
static Instruction * foldClampRangeOfTwo(IntrinsicInst *II, InstCombiner::BuilderTy &Builder)
If we have a clamp pattern like max (min X, 42), 41 – where the output can only be one of two possibl...
static Value * simplifyReductionOperand(Value *Arg, bool CanReorderLanes)
static IntrinsicInst * findInitTrampolineFromBB(IntrinsicInst *AdjustTramp, Value *TrampMem)
static Value * foldIntrinsicUsingDistributiveLaws(IntrinsicInst *II, InstCombiner::BuilderTy &Builder)
static std::optional< bool > getKnownSignOrZero(Value *Op, const SimplifyQuery &SQ)
static Value * foldMinimumOverTrailingOrLeadingZeroCount(Value *I0, Value *I1, const DataLayout &DL, InstCombiner::BuilderTy &Builder)
Fold an unsigned minimum of trailing or leading zero bits counts: umin(cttz(CtOp, ZeroUndef),...
static Instruction * foldCtpop(IntrinsicInst &II, InstCombinerImpl &IC)
static Instruction * foldCttzCtlz(IntrinsicInst &II, InstCombinerImpl &IC)
static IntrinsicInst * findInitTrampoline(Value *Callee)
static FCmpInst::Predicate fpclassTestIsFCmp0(FPClassTest Mask, const Function &F, Type *Ty)
static bool leftDistributesOverRight(Instruction::BinaryOps LOp, bool HasNUW, bool HasNSW, Intrinsic::ID ROp)
Return whether "X LOp (Y ROp Z)" is always equal to "(X LOp Y) ROp (X LOp Z)".
static Value * reassociateMinMaxWithConstants(IntrinsicInst *II, IRBuilderBase &Builder, const SimplifyQuery &SQ)
If this min/max has a constant operand and an operand that is a matching min/max with a constant oper...
static CallInst * canonicalizeConstantArg0ToArg1(CallInst &Call)
This file provides internal interfaces used to implement the InstCombine.
This file provides the interface for the instcombine pass implementation.
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t IntrinsicInst * II
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file implements the SmallBitVector class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
static bool inputDenormalIsIEEE(const Function &F, const Type *Ty)
Return true if it's possible to assume IEEE treatment of input denormals in F for Val.
ModRefInfo getModRefInfoMask(const MemoryLocation &Loc, bool IgnoreLocals=false)
Returns a bitmask that should be unconditionally applied to the ModRef info of a memory location.
Class for arbitrary precision integers.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
static APInt getSignMask(unsigned BitWidth)
Get the SignMask for a specific bit width.
APInt usub_ov(const APInt &RHS, bool &Overflow) const
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool ult(const APInt &RHS) const
Unsigned less than comparison.
APInt sadd_ov(const APInt &RHS, bool &Overflow) const
APInt uadd_ov(const APInt &RHS, bool &Overflow) const
static APInt getSplat(unsigned NewLen, const APInt &V)
Return a value containing V broadcasted over NewLen bits.
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
APInt uadd_sat(const APInt &RHS) const
bool isNonNegative() const
Determine if this APInt Value is non-negative (>= 0)
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
APInt ssub_ov(const APInt &RHS, bool &Overflow) const
static APSInt getMinValue(uint32_t numBits, bool Unsigned)
Return the APSInt representing the minimum integer value with the given bit width and signedness.
static APSInt getMaxValue(uint32_t numBits, bool Unsigned)
Return the APSInt representing the maximum integer value with the given bit width and signedness.
This class represents any memset intrinsic.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
void registerAssumption(AssumeInst *CI)
Add an @llvm.assume intrinsic to this function's cache.
void updateAffectedValues(AssumeInst *CI)
Update the cache of values being affected by this assumption (i.e.
MutableArrayRef< ResultElem > assumptionsFor(const Value *V)
Access the list of assumptions which affect this value.
bool overlaps(const AttributeMask &AM) const
Return true if the builder has any attribute that's in the specified builder.
AttributeSet getFnAttrs() const
The function attributes are returned.
static AttributeList get(LLVMContext &C, ArrayRef< std::pair< unsigned, Attribute > > Attrs)
Create an AttributeList with the specified parameters in it.
bool isEmpty() const
Return true if there are no attributes.
AttributeSet getRetAttrs() const
The attributes for the ret value are returned.
bool hasAttrSomewhere(Attribute::AttrKind Kind, unsigned *Index=nullptr) const
Return true if the specified attribute is set for at least one parameter or for the return value.
bool hasParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Return true if the attribute exists for the given argument.
AttributeSet getParamAttrs(unsigned ArgNo) const
The attributes for the argument or parameter at the given index are returned.
AttributeList addParamAttribute(LLVMContext &C, unsigned ArgNo, Attribute::AttrKind Kind) const
Add an argument attribute to the list.
bool hasAttribute(Attribute::AttrKind Kind) const
Return true if the attribute exists in this set.
AttributeSet removeAttributes(LLVMContext &C, const AttributeMask &AttrsToRemove) const
Remove the specified attributes from this set.
static AttributeSet get(LLVMContext &C, const AttrBuilder &B)
static Attribute get(LLVMContext &Context, AttrKind Kind, uint64_t Val=0)
Return a uniquified Attribute object.
static Attribute getWithDereferenceableBytes(LLVMContext &Context, uint64_t Bytes)
static Attribute getWithDereferenceableOrNullBytes(LLVMContext &Context, uint64_t Bytes)
static Attribute getWithAlignment(LLVMContext &Context, Align Alignment)
Return a uniquified Attribute object that has the specific alignment set.
LLVM Basic Block Representation.
InstListType::reverse_iterator reverse_iterator
InstListType::iterator iterator
Instruction iterators...
bool isSigned() const
Whether the intrinsic is signed or unsigned.
Instruction::BinaryOps getBinaryOp() const
Returns the binary operation underlying the intrinsic.
static BinaryOperator * CreateNeg(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Helper functions to construct and inspect unary operations (NEG and NOT) via binary operators SUB and...
static BinaryOperator * CreateNSW(BinaryOps Opc, Value *V1, Value *V2, const Twine &Name="")
static BinaryOperator * CreateNot(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)
static BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name=Twine(), InsertPosition InsertBefore=nullptr)
Construct a binary instruction, given the opcode and the two operands.
static BinaryOperator * CreateNUW(BinaryOps Opc, Value *V1, Value *V2, const Twine &Name="")
static BinaryOperator * CreateFMulFMF(Value *V1, Value *V2, FastMathFlags FMF, const Twine &Name="")
static BinaryOperator * CreateFDivFMF(Value *V1, Value *V2, FastMathFlags FMF, const Twine &Name="")
static BinaryOperator * CreateFSubFMF(Value *V1, Value *V2, FastMathFlags FMF, const Twine &Name="")
static BinaryOperator * CreateWithCopiedFlags(BinaryOps Opc, Value *V1, Value *V2, Value *CopyO, const Twine &Name="", InsertPosition InsertBefore=nullptr)
static BinaryOperator * CreateNSWNeg(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
void setCallingConv(CallingConv::ID CC)
MaybeAlign getRetAlign() const
Extract the alignment of the return value.
std::optional< OperandBundleUse > getOperandBundle(StringRef Name) const
Return an operand bundle by name, if present.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
bool hasRetAttr(Attribute::AttrKind Kind) const
Determine whether the return value has the given attribute.
Value * getCalledOperand() const
void setAttributes(AttributeList A)
Set the attributes for this call.
bool doesNotThrow() const
Determine if the call cannot unwind.
void addRetAttr(Attribute::AttrKind Kind)
Adds the attribute to the return value.
Value * getArgOperand(unsigned i) const
Intrinsic::ID getIntrinsicID() const
Returns the intrinsic ID of the intrinsic called or Intrinsic::not_intrinsic if the called function i...
static CallBase * Create(CallBase *CB, ArrayRef< OperandBundleDef > Bundles, InsertPosition InsertPt=nullptr)
Create a clone of CB with a different set of operand bundles and insert it before InsertPt.
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
static CallBase * removeOperandBundle(CallBase *CB, uint32_t ID, InsertPosition InsertPt=nullptr)
Create a clone of CB with operand bundle ID removed.
unsigned arg_size() const
void setCalledFunction(Function *Fn)
Sets the function called, including updating the function type.
CallBr instruction, tracking function calls that may not return control but instead transfer it to a ...
static CallBrInst * Create(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, ArrayRef< BasicBlock * > IndirectDests, ArrayRef< Value * > Args, const Twine &NameStr, InsertPosition InsertBefore=nullptr)
This class represents a function call, abstracting a target machine's calling convention.
bool isNoTailCall() const
void setTailCallKind(TailCallKind TCK)
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
bool isMustTailCall() const
static Instruction::CastOps getCastOpcode(const Value *Val, bool SrcIsSigned, Type *Ty, bool DstIsSigned)
Returns the opcode necessary to cast Val into Ty using usual casting rules.
static CastInst * CreateIntegerCast(Value *S, Type *Ty, bool isSigned, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a ZExt, BitCast, or Trunc for int -> int casts.
static bool isBitOrNoopPointerCastable(Type *SrcTy, Type *DestTy, const DataLayout &DL)
Check whether a bitcast, inttoptr, or ptrtoint cast between these types is valid and a no-op.
static CastInst * CreateBitOrPointerCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a BitCast, a PtrToInt, or an IntToPTr cast instruction.
static CastInst * Create(Instruction::CastOps, Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Provides a way to construct any of the CastInst subclasses using an opcode instead of the subclass's ...
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ FCMP_OLT
0 1 0 0 True if ordered and less than
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
@ ICMP_UGT
unsigned greater than
@ ICMP_SGT
signed greater than
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
@ ICMP_ULT
unsigned less than
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
Predicate getNonStrictPredicate() const
For example, SGT -> SGE, SLT -> SLE, ULT -> ULE, UGT -> UGE.
Predicate getUnorderedPredicate() const
static ConstantAggregateZero * get(Type *Ty)
static Constant * getPointerCast(Constant *C, Type *Ty)
Create a BitCast, AddrSpaceCast, or a PtrToInt cast constant expression.
static Constant * getSub(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
static Constant * getNeg(Constant *C, bool HasNSW=false)
static Constant * getInfinity(Type *Ty, bool Negative=false)
static Constant * getZero(Type *Ty, bool Negative=false)
This is the shared class of boolean and integer constants.
uint64_t getLimitedValue(uint64_t Limit=~0ULL) const
getLimitedValue - If the value is smaller than the specified limit, return it, otherwise return the l...
static ConstantInt * getTrue(LLVMContext &Context)
static ConstantInt * getFalse(LLVMContext &Context)
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
const APInt & getValue() const
Return the constant as an APInt value reference.
static ConstantInt * getBool(LLVMContext &Context, bool V)
static ConstantPointerNull * get(PointerType *T)
Static factory methods - Return objects of the specified value.
static ConstantPtrAuth * get(Constant *Ptr, ConstantInt *Key, ConstantInt *Disc, Constant *AddrDisc)
Return a pointer signed with the specified parameters.
This class represents a range of values.
bool isFullSet() const
Return true if this set contains all of the elements possible for this data-type.
bool icmp(CmpInst::Predicate Pred, const ConstantRange &Other) const
Does the predicate Pred hold between ranges this and Other? NOTE: false does not mean that inverse pr...
bool contains(const APInt &Val) const
Return true if the specified value is in the set.
ConstantRange intersectWith(const ConstantRange &CR, PreferredRangeType Type=Smallest) const
Return the range that results from the intersection of this range with another range.
static Constant * get(StructType *T, ArrayRef< Constant * > V)
This is an important base class in LLVM.
static Constant * getIntegerValue(Type *Ty, const APInt &V)
Return the value for an integer or pointer constant, or a vector thereof, with the given scalar value...
static Constant * getAllOnesValue(Type *Ty)
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
unsigned getPointerTypeSizeInBits(Type *) const
Layout pointer size, in bits, based on the type.
size_type count(const_arg_type_t< KeyT > Val) const
Return 1 if the specified key is in the map, 0 otherwise.
static FMFSource intersect(Value *A, Value *B)
Intersect the FMF from two instructions.
This class represents an extension of floating point types.
Convenience struct for specifying and reasoning about fast-math flags.
void setNoSignedZeros(bool B=true)
bool allowReassoc() const
Flag queries.
An instruction for ordering other memory operations.
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this fence instruction.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this fence instruction.
Class to represent function types.
Type::subtype_iterator param_iterator
static FunctionType * get(Type *Result, ArrayRef< Type * > Params, bool isVarArg)
This static method is the primary way of constructing a FunctionType.
bool isConvergent() const
Determine if the call is convergent.
FunctionType * getFunctionType() const
Returns the FunctionType for me.
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
AttributeList getAttributes() const
Return the attribute list for this Function.
bool doesNotThrow() const
Determine if the function cannot unwind.
bool isIntrinsic() const
isIntrinsic - Returns true if the function's name starts with "llvm.".
Represents calls to the gc.relocate intrinsic.
Value * getBasePtr() const
unsigned getBasePtrIndex() const
The index into the associate statepoint's argument list which contains the base pointer of the pointe...
Value * getDerivedPtr() const
unsigned getDerivedPtrIndex() const
The index into the associate statepoint's argument list which contains the pointer whose relocation t...
Represents a gc.statepoint intrinsic call.
std::vector< const GCRelocateInst * > getGCRelocates() const
Get list of all gc reloactes linked to this statepoint May contain several relocations for the same b...
MDNode * getMetadata(unsigned KindID) const
Get the current metadata attachments for the given kind, if any.
bool isDeclaration() const
Return true if the primary definition of this global value is outside of the current translation unit...
PointerType * getType() const
Global values are always pointers.
Common base class shared among various IRBuilders.
Value * CreateFCmpONE(Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
Value * CreateLdexp(Value *Src, Value *Exp, FMFSource FMFSource={}, const Twine &Name="")
Create call to the ldexp intrinsic.
Value * CreateLaunderInvariantGroup(Value *Ptr)
Create a launder.invariant.group intrinsic call.
Value * CreateFCmp(CmpInst::Predicate P, Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
IntegerType * getInt1Ty()
Fetch the type representing a single bit.
Value * CreateExtractElement(Value *Vec, Value *Idx, const Twine &Name="")
IntegerType * getIntNTy(unsigned N)
Fetch the type representing an N-bit integer.
LoadInst * CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align, const char *Name)
Value * CreateFCmpORD(Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
Value * CreateZExtOrTrunc(Value *V, Type *DestTy, const Twine &Name="")
Create a ZExt or Trunc from the integer value V to DestTy.
CallInst * CreateAndReduce(Value *Src)
Create a vector int AND reduction intrinsic of the source vector.
CallInst * CreateAssumption(Value *Cond, ArrayRef< OperandBundleDef > OpBundles={})
Create an assume intrinsic call that allows the optimizer to assume that the provided condition will ...
Value * CreateVectorSplat(unsigned NumElts, Value *V, const Twine &Name="")
Return a vector value that contains.
ConstantInt * getTrue()
Get the constant value for i1 true.
Value * CreateSelect(Value *C, Value *True, Value *False, const Twine &Name="", Instruction *MDFrom=nullptr)
InvokeInst * CreateInvoke(FunctionType *Ty, Value *Callee, BasicBlock *NormalDest, BasicBlock *UnwindDest, ArrayRef< Value * > Args, ArrayRef< OperandBundleDef > OpBundles, const Twine &Name="")
Create an invoke instruction.
Value * CreateFCmpUNE(Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
CallInst * CreateAddReduce(Value *Src)
Create a vector int add reduction intrinsic of the source vector.
Value * CreateLShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
IntegerType * getInt32Ty()
Fetch the type representing a 32-bit integer.
Value * CreateCast(Instruction::CastOps Op, Value *V, Type *DestTy, const Twine &Name="", MDNode *FPMathTag=nullptr, FMFSource FMFSource={})
Value * CreateNSWMul(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateICmpNE(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateNeg(Value *V, const Twine &Name="", bool HasNSW=false)
CallInst * CreateOrReduce(Value *Src)
Create a vector int OR reduction intrinsic of the source vector.
Value * CreateBinaryIntrinsic(Intrinsic::ID ID, Value *LHS, Value *RHS, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with 2 operands which is mangled on the first type.
CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > Types, ArrayRef< Value * > Args, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with Args, mangled using Types.
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
Value * CreateBitOrPointerCast(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateNot(Value *V, const Twine &Name="")
Value * CreateICmpEQ(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateFCmpUEQ(Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
Value * CreateSub(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Value * CreateBitCast(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateCopySign(Value *LHS, Value *RHS, FMFSource FMFSource={}, const Twine &Name="")
Create call to the copysign intrinsic.
CallInst * CreateUnaryIntrinsic(Intrinsic::ID ID, Value *V, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with 1 operand which is mangled on its type.
LoadInst * CreateLoad(Type *Ty, Value *Ptr, const char *Name)
Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of converting the string to 'bool...
Value * CreateZExt(Value *V, Type *DestTy, const Twine &Name="", bool IsNonNeg=false)
Value * CreateShuffleVector(Value *V1, Value *V2, Value *Mask, const Twine &Name="")
Value * CreateFCmpOEQ(Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
StoreInst * CreateStore(Value *Val, Value *Ptr, bool isVolatile=false)
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Value * CreatePtrToInt(Value *V, Type *DestTy, const Twine &Name="")
ConstantInt * getFalse()
Get the constant value for i1 false.
Value * CreateIsNotNull(Value *Arg, const Twine &Name="")
Return a boolean value testing if Arg != 0.
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args={}, const Twine &Name="", MDNode *FPMathTag=nullptr)
Value * CreateTrunc(Value *V, Type *DestTy, const Twine &Name="", bool IsNUW=false, bool IsNSW=false)
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="")
PointerType * getPtrTy(unsigned AddrSpace=0)
Fetch the type representing a pointer.
Value * CreateBinOp(Instruction::BinaryOps Opc, Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
Value * CreateElementCount(Type *DstType, ElementCount EC)
Create an expression which evaluates to the number of elements in EC at runtime.
Value * CreateIntCast(Value *V, Type *DestTy, bool isSigned, const Twine &Name="")
Value * CreateFCmpUNO(Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
Value * CreateIsNull(Value *Arg, const Twine &Name="")
Return a boolean value testing if Arg == 0.
Value * CreateFNegFMF(Value *V, FMFSource FMFSource, const Twine &Name="", MDNode *FPMathTag=nullptr)
Value * CreateICmp(CmpInst::Predicate P, Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateFNeg(Value *V, const Twine &Name="", MDNode *FPMathTag=nullptr)
Value * CreateAddrSpaceCast(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateMul(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Value * CreateStripInvariantGroup(Value *Ptr)
Create a strip.invariant.group intrinsic call.
static InsertValueInst * Create(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Instruction * FoldOpIntoSelect(Instruction &Op, SelectInst *SI, bool FoldWithMultiUse=false)
Given an instruction with a select as one operand and a constant as the other operand,...
KnownFPClass computeKnownFPClass(Value *Val, FastMathFlags FMF, FPClassTest Interested=fcAllFlags, const Instruction *CtxI=nullptr, unsigned Depth=0) const
Value * SimplifyDemandedVectorElts(Value *V, APInt DemandedElts, APInt &PoisonElts, unsigned Depth=0, bool AllowMultipleUsers=false) override
The specified value produces a vector with any number of elements.
Instruction * SimplifyAnyMemSet(AnyMemSetInst *MI)
Constant * getLosslessUnsignedTrunc(Constant *C, Type *TruncTy)
Instruction * visitFree(CallInst &FI, Value *FreedOp)
Instruction * visitCallBrInst(CallBrInst &CBI)
Instruction * eraseInstFromFunction(Instruction &I) override
Combiner aware instruction erasure.
bool SimplifyDemandedBits(Instruction *I, unsigned Op, const APInt &DemandedMask, KnownBits &Known, unsigned Depth, const SimplifyQuery &Q) override
This form of SimplifyDemandedBits simplifies the specified instruction operand if possible,...
Value * tryGetLog2(Value *Op, bool AssumeNonZero)
Instruction * visitFenceInst(FenceInst &FI)
Instruction * visitInvokeInst(InvokeInst &II)
Constant * getLosslessSignedTrunc(Constant *C, Type *TruncTy)
bool SimplifyDemandedInstructionBits(Instruction &Inst)
Tries to simplify operands to an integer instruction based on its demanded bits.
void CreateNonTerminatorUnreachable(Instruction *InsertAt)
Create and insert the idiom we use to indicate a block is unreachable without having to rewrite the C...
Instruction * visitVAEndInst(VAEndInst &I)
Instruction * matchBSwapOrBitReverse(Instruction &I, bool MatchBSwaps, bool MatchBitReversals)
Given an initial instruction, check to see if it is the root of a bswap/bitreverse idiom.
Instruction * visitAllocSite(Instruction &FI)
Instruction * SimplifyAnyMemTransfer(AnyMemTransferInst *MI)
OverflowResult computeOverflow(Instruction::BinaryOps BinaryOp, bool IsSigned, Value *LHS, Value *RHS, Instruction *CxtI) const
Instruction * visitCallInst(CallInst &CI)
CallInst simplification.
bool isFreeToInvert(Value *V, bool WillInvertAllUses, bool &DoesConsume)
Return true if the specified value is free to invert (apply ~ to).
DominatorTree & getDominatorTree() const
bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero=false, unsigned Depth=0, const Instruction *CxtI=nullptr)
Instruction * InsertNewInstBefore(Instruction *New, BasicBlock::iterator Old)
Inserts an instruction New before instruction Old.
Instruction * replaceInstUsesWith(Instruction &I, Value *V)
A combiner-aware RAUW-like routine.
void replaceUse(Use &U, Value *NewValue)
Replace use and add the previously used value to the worklist.
InstructionWorklist & Worklist
A worklist of the instructions that need to be simplified.
std::optional< Instruction * > targetInstCombineIntrinsic(IntrinsicInst &II)
Instruction * replaceOperand(Instruction &I, unsigned OpNum, Value *V)
Replace operand of instruction and add old operand to the worklist.
void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth, const Instruction *CxtI) const
AssumptionCache & getAssumptionCache() const
bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth=0, const Instruction *CxtI=nullptr) const
OptimizationRemarkEmitter & ORE
Value * getFreelyInverted(Value *V, bool WillInvertAllUses, BuilderTy *Builder, bool &DoesConsume)
const SimplifyQuery & getSimplifyQuery() const
unsigned ComputeMaxSignificantBits(const Value *Op, unsigned Depth=0, const Instruction *CxtI=nullptr) const
void pushUsersToWorkList(Instruction &I)
When an instruction is simplified, add all users of the instruction to the work lists because they mi...
void add(Instruction *I)
Add instruction to the worklist.
void setHasNoUnsignedWrap(bool b=true)
Set or clear the nuw flag on this instruction, which must be an operator which supports this flag.
void copyFastMathFlags(FastMathFlags FMF)
Convenience function for transferring all fast-math flag values to this instruction,...
bool mayWriteToMemory() const LLVM_READONLY
Return true if this instruction may modify memory.
void copyIRFlags(const Value *V, bool IncludeWrapFlags=true)
Convenience method to copy supported exact, fast-math, and (optionally) wrapping flags from V to this...
void setHasNoSignedWrap(bool b=true)
Set or clear the nsw flag on this instruction, which must be an operator which supports this flag.
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
void setAAMetadata(const AAMDNodes &N)
Sets the AA metadata on this instruction from the AAMDNodes structure.
void andIRFlags(const Value *V)
Logical 'and' of any supported wrapping, exact, and fast-math flags of V and this instruction.
const Instruction * getPrevNonDebugInstruction(bool SkipPseudoOp=false) const
Return a pointer to the previous non-debug instruction in the same basic block as 'this',...
const Function * getFunction() const
Return the function this instruction belongs to.
const Instruction * getNextNonDebugInstruction(bool SkipPseudoOp=false) const
Return a pointer to the next non-debug instruction in the same basic block as 'this',...
void setMetadata(unsigned KindID, MDNode *Node)
Set the metadata of the specified kind to the specified node.
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
std::optional< InstListType::iterator > getInsertionPointAfterDef()
Get the first insertion point at which the result of this instruction is defined.
bool isIdenticalTo(const Instruction *I) const LLVM_READONLY
Return true if the specified instruction is exactly identical to the current one.
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
void copyMetadata(const Instruction &SrcInst, ArrayRef< unsigned > WL=ArrayRef< unsigned >())
Copy metadata from SrcInst to this instruction.
void moveBefore(Instruction *MovePos)
Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...
Class to represent integer types.
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
A wrapper class for inspecting calls to intrinsic functions.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
static InvokeInst * Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, BasicBlock *IfException, ArrayRef< Value * > Args, const Twine &NameStr, InsertPosition InsertBefore=nullptr)
This is an important class for using LLVM in a threaded context.
LibCallSimplifier - This class implements a collection of optimizations that replace well formed call...
An instruction for reading from memory.
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
ICmpInst::Predicate getPredicate() const
Returns the comparison predicate underlying the intrinsic.
bool isSigned() const
Whether the intrinsic is signed or unsigned.
A Module instance is used to store all the information related to an LLVM module.
A container for an operand bundle being viewed as a set of values rather than a set of uses.
unsigned getOpcode() const
Return the opcode for this Instruction or ConstantExpr.
Utility class for integer operators which may exhibit overflow - Add, Sub, Mul, and Shl.
bool hasNoSignedWrap() const
Test whether this operation is known to never undergo signed overflow, aka the nsw property.
bool hasNoUnsignedWrap() const
Test whether this operation is known to never undergo unsigned overflow, aka the nuw property.
bool isCommutative() const
Return true if the instruction is commutative.
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
Represents a saturating add/sub intrinsic.
This class represents the LLVM 'select' instruction.
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", InsertPosition InsertBefore=nullptr, Instruction *MDFrom=nullptr)
This instruction constructs a fixed permutation of two input vectors.
This is a 'bitvector' (really, a variable-sized bit array), optimized for the case when the array is ...
bool test(unsigned Idx) const
bool all() const
Returns true if all bits are set.
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
void reserve(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
void setVolatile(bool V)
Specify whether this is a volatile store or not.
void setAlignment(Align Align)
void setOrdering(AtomicOrdering Ordering)
Sets the ordering constraint of this store instruction.
Class to represent struct types.
static bool isCallingConvCCompatible(CallBase *CI)
Returns true if call site / callee has cdecl-compatible calling conventions.
Provides information about what library functions are available for the current target.
This class represents a truncation of integer types.
The instances of the Type class are immutable: once they are created, they are never changed.
unsigned getIntegerBitWidth() const
const fltSemantics & getFltSemantics() const
bool isPointerTy() const
True if this is an instance of PointerType.
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isStructTy() const
True if this is an instance of StructType.
Type * getWithNewBitWidth(unsigned NewBitWidth) const
Given an integer or vector type, change the lane bitwidth to NewBitwidth, whilst keeping the old numb...
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
bool canLosslesslyBitCastTo(Type *Ty) const
Return true if this type could be converted with a lossless BitCast to type 'Ty'.
static IntegerType * getInt32Ty(LLVMContext &C)
static IntegerType * getInt64Ty(LLVMContext &C)
bool isIntegerTy() const
True if this is an instance of IntegerType.
bool isVoidTy() const
Return true if this is 'void'.
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
static UnaryOperator * CreateWithCopiedFlags(UnaryOps Opc, Value *V, Instruction *CopyO, const Twine &Name="", InsertPosition InsertBefore=nullptr)
static UnaryOperator * CreateFNegFMF(Value *Op, Instruction *FMFSource, const Twine &Name="", InsertPosition InsertBefore=nullptr)
static UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
A Use represents the edge between a Value definition and its users.
void setOperand(unsigned i, Value *Val)
Value * getOperand(unsigned i) const
This represents the llvm.va_end intrinsic.
static void ValueIsDeleted(Value *V)
static void ValueIsRAUWd(Value *Old, Value *New)
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
static constexpr uint64_t MaximumAlignment
void setMetadata(unsigned KindID, MDNode *Node)
Set a particular kind of metadata attachment.
bool hasOneUse() const
Return true if there is exactly one use of this value.
iterator_range< user_iterator > users()
static void dropDroppableUse(Use &U)
Remove the droppable use U.
const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
LLVMContext & getContext() const
All values hold a context through their type.
static constexpr unsigned MaxAlignmentExponent
The maximum alignment for instructions.
StringRef getName() const
Return a constant reference to the value's name.
void takeName(Value *V)
Transfer the name from V to this value.
Base class of all SIMD vector types.
ElementCount getElementCount() const
Return an ElementCount instance to represent the (possibly scalable) number of elements in the vector...
Represents an op.with.overflow intrinsic.
static constexpr bool isKnownLT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
static constexpr bool isKnownGT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
const ParentTy * getParent() const
self_iterator getIterator()
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
AttributeMask typeIncompatible(Type *Ty, AttributeSet AS, AttributeSafetyKind ASK=ASK_ALL)
Which attributes cannot be applied to a type.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ C
The default llvm calling convention, compatible with C.
Function * getOrInsertDeclaration(Module *M, ID id, ArrayRef< Type * > Tys={})
Look up the Function declaration of the intrinsic id in the Module M.
cst_pred_ty< is_all_ones > m_AllOnes()
Match an integer or vector with all bits set.
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::Add > m_Add(const LHS &L, const RHS &R)
class_match< BinaryOperator > m_BinOp()
Match an arbitrary binary operation and ignore it.
m_Intrinsic_Ty< Opnd0 >::Ty m_BitReverse(const Opnd0 &Op0)
class_match< Constant > m_Constant()
Match an arbitrary Constant and ignore it.
BinaryOp_match< LHS, RHS, Instruction::And, true > m_c_And(const LHS &L, const RHS &R)
Matches an And with LHS and RHS in either order.
CastInst_match< OpTy, TruncInst > m_Trunc(const OpTy &Op)
Matches Trunc.
BinaryOp_match< LHS, RHS, Instruction::Xor > m_Xor(const LHS &L, const RHS &R)
OverflowingBinaryOp_match< LHS, RHS, Instruction::Sub, OverflowingBinaryOperator::NoSignedWrap > m_NSWSub(const LHS &L, const RHS &R)
specific_intval< false > m_SpecificInt(const APInt &V)
Match a specific integer value or vector with all elements equal to the value.
bool match(Val *V, const Pattern &P)
bind_ty< Instruction > m_Instruction(Instruction *&I)
Match an instruction, capturing it if we match.
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
OverflowingBinaryOp_match< cst_pred_ty< is_zero_int >, ValTy, Instruction::Sub, OverflowingBinaryOperator::NoSignedWrap > m_NSWNeg(const ValTy &V)
Matches a 'Neg' as 'sub nsw 0, V'.
class_match< ConstantInt > m_ConstantInt()
Match an arbitrary ConstantInt and ignore it.
cst_pred_ty< is_one > m_One()
Match an integer 1 or a vector with all elements equal to 1.
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
cstfp_pred_ty< is_neg_zero_fp > m_NegZeroFP()
Match a floating-point negative zero.
specific_fpval m_SpecificFP(double V)
Match a specific floating point value or vector with all elements equal to the value.
BinOpPred_match< LHS, RHS, is_logical_shift_op > m_LogicalShift(const LHS &L, const RHS &R)
Matches logical shift operations.
match_combine_and< LTy, RTy > m_CombineAnd(const LTy &L, const RTy &R)
Combine two pattern matchers matching L && R.
MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty > m_SMin(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::Xor, true > m_c_Xor(const LHS &L, const RHS &R)
Matches an Xor with LHS and RHS in either order.
deferredval_ty< Value > m_Deferred(Value *const &V)
Like m_Specific(), but works if the specific value to match is determined as part of the same match()...
cst_pred_ty< is_zero_int > m_ZeroInt()
Match an integer 0 or a vector with all elements equal to 0.
apint_match m_APIntAllowPoison(const APInt *&Res)
Match APInt while allowing poison in splat vector constants.
match_combine_or< match_combine_or< CastInst_match< OpTy, ZExtInst >, CastInst_match< OpTy, SExtInst > >, OpTy > m_ZExtOrSExtOrSelf(const OpTy &Op)
OneUse_match< T > m_OneUse(const T &SubPattern)
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
BinaryOp_match< cst_pred_ty< is_zero_int >, ValTy, Instruction::Sub > m_Neg(const ValTy &V)
Matches a 'Neg' as 'sub 0, V'.
TwoOps_match< V1_t, V2_t, Instruction::ShuffleVector > m_Shuffle(const V1_t &v1, const V2_t &v2)
Matches ShuffleVectorInst independently of mask value.
match_combine_and< class_match< Constant >, match_unless< constantexpr_match > > m_ImmConstant()
Match an arbitrary immediate Constant and ignore it.
ThreeOps_match< decltype(m_Value()), LHS, RHS, Instruction::Select, true > m_c_Select(const LHS &L, const RHS &R)
Match Select(C, LHS, RHS) or Select(C, RHS, LHS)
CastInst_match< OpTy, FPExtInst > m_FPExt(const OpTy &Op)
SpecificCmpClass_match< LHS, RHS, ICmpInst > m_SpecificICmp(CmpPredicate MatchPred, const LHS &L, const RHS &R)
CastInst_match< OpTy, ZExtInst > m_ZExt(const OpTy &Op)
Matches ZExt.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Shl, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWShl(const LHS &L, const RHS &R)
MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty > m_UMax(const LHS &L, const RHS &R)
cst_pred_ty< is_negated_power2 > m_NegatedPower2()
Match a integer or vector negated power-of-2.
cst_pred_ty< custom_checkfn< APInt > > m_CheckedInt(function_ref< bool(const APInt &)> CheckFn)
Match an integer or vector where CheckFn(ele) for each element is true.
m_Intrinsic_Ty< Opnd0, Opnd1, Opnd2 >::Ty m_FShl(const Opnd0 &Op0, const Opnd1 &Op1, const Opnd2 &Op2)
match_combine_or< match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty, true >, MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty, true > >, match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty, true >, MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty, true > > > m_c_MaxOrMin(const LHS &L, const RHS &R)
class_match< UnaryOperator > m_UnOp()
Match an arbitrary unary operation and ignore it.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Sub, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWSub(const LHS &L, const RHS &R)
MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty > m_SMax(const LHS &L, const RHS &R)
apint_match m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
match_combine_or< OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoSignedWrap >, DisjointOr_match< LHS, RHS > > m_NSWAddLike(const LHS &L, const RHS &R)
Match either "add nsw" or "or disjoint".
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
BinaryOp_match< LHS, RHS, Instruction::LShr > m_LShr(const LHS &L, const RHS &R)
Exact_match< T > m_Exact(const T &SubPattern)
FNeg_match< OpTy > m_FNeg(const OpTy &X)
Match 'fneg X' as 'fsub -0.0, X'.
BinOpPred_match< LHS, RHS, is_shift_op > m_Shift(const LHS &L, const RHS &R)
Matches shift operations.
cstfp_pred_ty< is_pos_zero_fp > m_PosZeroFP()
Match a floating-point positive zero.
BinaryOp_match< LHS, RHS, Instruction::Shl > m_Shl(const LHS &L, const RHS &R)
m_Intrinsic_Ty< Opnd0 >::Ty m_VecReverse(const Opnd0 &Op0)
apfloat_match m_APFloat(const APFloat *&Res)
Match a ConstantFP or splatted ConstantVector, binding the specified pointer to the contained APFloat...
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
match_combine_or< match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty >, MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty > >, match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty >, MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty > > > m_MaxOrMin(const LHS &L, const RHS &R)
m_Intrinsic_Ty< Opnd0, Opnd1, Opnd2 >::Ty m_FShr(const Opnd0 &Op0, const Opnd1 &Op1, const Opnd2 &Op2)
BinaryOp_match< LHS, RHS, Instruction::SRem > m_SRem(const LHS &L, const RHS &R)
auto m_Undef()
Match an arbitrary undef constant.
BinaryOp_match< cst_pred_ty< is_all_ones >, ValTy, Instruction::Xor, true > m_Not(const ValTy &V)
Matches a 'Not' as 'xor V, -1' or 'xor -1, V'.
m_Intrinsic_Ty< Opnd0 >::Ty m_BSwap(const Opnd0 &Op0)
CastInst_match< OpTy, SExtInst > m_SExt(const OpTy &Op)
Matches SExt.
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
BinaryOp_match< LHS, RHS, Instruction::Or, true > m_c_Or(const LHS &L, const RHS &R)
Matches an Or with LHS and RHS in either order.
match_combine_or< OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoUnsignedWrap >, DisjointOr_match< LHS, RHS > > m_NUWAddLike(const LHS &L, const RHS &R)
Match either "add nuw" or "or disjoint".
BinOpPred_match< LHS, RHS, is_bitwiselogic_op > m_BitwiseLogic(const LHS &L, const RHS &R)
Matches bitwise logic operations.
m_Intrinsic_Ty< Opnd0 >::Ty m_FAbs(const Opnd0 &Op0)
BinaryOp_match< LHS, RHS, Instruction::Mul, true > m_c_Mul(const LHS &L, const RHS &R)
Matches a Mul with LHS and RHS in either order.
m_Intrinsic_Ty< Opnd0, Opnd1 >::Ty m_CopySign(const Opnd0 &Op0, const Opnd1 &Op1)
CastOperator_match< OpTy, Instruction::PtrToInt > m_PtrToInt(const OpTy &Op)
Matches PtrToInt.
MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty > m_UMin(const LHS &L, const RHS &R)
match_combine_or< LTy, RTy > m_CombineOr(const LTy &L, const RTy &R)
Combine two pattern matchers matching L || R.
@ SingleThread
Synchronized with respect to signal handlers executing in the same thread.
@ System
Synchronized with respect to all concurrently executing threads.
AssignmentMarkerRange getAssignmentMarkers(DIAssignID *ID)
Return a range of dbg.assign intrinsics which use \ID as an operand.
SmallVector< DbgVariableRecord * > getDVRAssignmentMarkers(const Instruction *Inst)
initializer< Ty > init(const Ty &Val)
DiagnosticInfoOptimizationBase::Argument NV
This is an optimization pass for GlobalISel generic memory operations.
cl::opt< bool > EnableKnowledgeRetention
Intrinsic::ID getInverseMinMaxIntrinsic(Intrinsic::ID MinMaxID)
unsigned Log2_32_Ceil(uint32_t Value)
Return the ceil log base 2 of the specified value, 32 if the value is zero.
@ NeverOverflows
Never overflows.
@ AlwaysOverflowsHigh
Always overflows in the direction of signed/unsigned max value.
@ AlwaysOverflowsLow
Always overflows in the direction of signed/unsigned min value.
@ MayOverflow
May or may not overflow.
UnaryFunction for_each(R &&Range, UnaryFunction F)
Provide wrappers to std::for_each which take ranges instead of having to pass begin/end explicitly.
Value * simplifyFMulInst(Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
Given operands for an FMul, fold the result or return null.
bool isValidAssumeForContext(const Instruction *I, const Instruction *CxtI, const DominatorTree *DT=nullptr, bool AllowEphemerals=false)
Return true if it is valid to use the assumptions provided by an assume intrinsic,...
bool isSafeToSpeculativelyExecuteWithVariableReplaced(const Instruction *I)
Don't use information from its non-constant operands.
APInt possiblyDemandedEltsInMask(Value *Mask)
Given a mask vector of the form <Y x i1>, return an APInt (of bitwidth Y) for each lane which may be ...
RetainedKnowledge simplifyRetainedKnowledge(AssumeInst *Assume, RetainedKnowledge RK, AssumptionCache *AC, DominatorTree *DT)
canonicalize the RetainedKnowledge RK.
bool isRemovableAlloc(const CallBase *V, const TargetLibraryInfo *TLI)
Return true if this is a call to an allocation function that does not have side effects that we are r...
Value * lowerObjectSizeCall(IntrinsicInst *ObjectSize, const DataLayout &DL, const TargetLibraryInfo *TLI, bool MustSucceed)
Try to turn a call to @llvm.objectsize into an integer value of the given Type.
Value * getAllocAlignment(const CallBase *V, const TargetLibraryInfo *TLI)
Gets the alignment argument for an aligned_alloc-like function, using either built-in knowledge based...
LLVM_READONLY APFloat maximum(const APFloat &A, const APFloat &B)
Implements IEEE 754-2019 maximum semantics.
const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=6)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
Value * simplifyCall(CallBase *Call, Value *Callee, ArrayRef< Value * > Args, const SimplifyQuery &Q)
Given a callsite, callee, and arguments, fold the result or return null.
Constant * ConstantFoldCompareInstOperands(unsigned Predicate, Constant *LHS, Constant *RHS, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const Instruction *I=nullptr)
Attempt to constant fold a compare instruction (icmp/fcmp) with the specified operands.
constexpr T alignDown(U Value, V Align, W Skew=0)
Returns the largest unsigned integer less than or equal to Value and is Skew mod Align.
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
bool isAssumeWithEmptyBundle(const AssumeInst &Assume)
Return true iff the operand bundles of the provided llvm.assume doesn't contain any valuable informat...
Value * getSplatValue(const Value *V)
Get splat value if the input is a splat vector or return nullptr.
constexpr T MinAlign(U A, V B)
A and B are either alignments or offsets.
RetainedKnowledge getKnowledgeFromBundle(AssumeInst &Assume, const CallBase::BundleOpInfo &BOI)
This extracts the Knowledge from an element of an operand bundle.
Align getKnownAlignment(Value *V, const DataLayout &DL, const Instruction *CxtI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr)
Try to infer an alignment for the specified pointer.
bool isSplatValue(const Value *V, int Index=-1, unsigned Depth=0)
Return true if each element of the vector value V is poisoned or equal to every other non-poisoned el...
LLVM_READONLY APFloat maxnum(const APFloat &A, const APFloat &B)
Implements IEEE-754 2019 maximumNumber semantics.
FPClassTest fneg(FPClassTest Mask)
Return the test mask which returns true if the value's sign bit is flipped.
SelectPatternFlavor
Specific patterns of select instructions we can match.
@ SPF_ABS
Floating point maxnum.
@ SPF_NABS
Absolute value.
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
bool isModSet(const ModRefInfo MRI)
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
SelectPatternResult matchSelectPattern(Value *V, Value *&LHS, Value *&RHS, Instruction::CastOps *CastOp=nullptr, unsigned Depth=0)
Pattern match integer [SU]MIN, [SU]MAX and ABS idioms, returning the kind and providing the out param...
bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
bool isAtLeastOrStrongerThan(AtomicOrdering AO, AtomicOrdering Other)
AssumeInst * buildAssumeFromKnowledge(ArrayRef< RetainedKnowledge > Knowledge, Instruction *CtxI, AssumptionCache *AC=nullptr, DominatorTree *DT=nullptr)
Build and return a new assume created from the provided knowledge if the knowledge in the assume is f...
bool isSafeToSpeculativelyExecute(const Instruction *I, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr, bool UseVariableInfo=true)
Return true if the instruction does not have any effects besides calculating the result and does not ...
FPClassTest inverse_fabs(FPClassTest Mask)
Return the test mask which returns true after fabs is applied to the value.
bool isNotCrossLaneOperation(const Instruction *I)
Return true if the instruction doesn't potentially cross vector lanes.
bool maskIsAllOneOrUndef(Value *Mask)
Given a mask vector of i1, Return true if all of the elements of this predicate mask are known to be ...
Constant * ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS, Constant *RHS, const DataLayout &DL)
Attempt to constant fold a binary operation with the specified operands.
bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
constexpr int PoisonMaskElem
@ Mod
The access may modify the value stored in memory.
Value * simplifyFMAFMul(Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
Given operands for the multiplication of a FMA, fold the result or return null.
Value * simplifyConstrainedFPCall(CallBase *Call, const SimplifyQuery &Q)
Given a constrained FP intrinsic call, tries to compute its simplified version.
LLVM_READONLY APFloat minnum(const APFloat &A, const APFloat &B)
Implements IEEE-754 2019 minimumNumber semantics.
@ And
Bitwise or logical AND of integers.
ConstantRange computeConstantRangeIncludingKnownBits(const WithCache< const Value * > &V, bool ForSigned, const SimplifyQuery &SQ)
Combine constant ranges from computeConstantRange() and computeKnownBits().
void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
Value * getFreedOperand(const CallBase *CB, const TargetLibraryInfo *TLI)
If this if a call to a free function, return the freed operand.
constexpr unsigned BitWidth
bool isDereferenceablePointer(const Value *V, Type *Ty, const DataLayout &DL, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr)
Return true if this is always a dereferenceable pointer.
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
std::optional< APInt > getAllocSize(const CallBase *CB, const TargetLibraryInfo *TLI, function_ref< const Value *(const Value *)> Mapper=[](const Value *V) { return V;})
Return the size of the requested allocation.
std::optional< bool > computeKnownFPSignBit(const Value *V, unsigned Depth, const SimplifyQuery &SQ)
Return false if we can prove that the specified FP value's sign bit is 0.
unsigned Log2(Align A)
Returns the log2 of the alignment.
bool maskContainsAllOneOrUndef(Value *Mask)
Given a mask vector of i1, Return true if any of the elements of this predicate mask are known to be ...
std::optional< bool > isImpliedByDomCondition(const Value *Cond, const Instruction *ContextI, const DataLayout &DL)
Return the boolean condition value in the context of the given instruction if it is known based on do...
LLVM_READONLY APFloat minimum(const APFloat &A, const APFloat &B)
Implements IEEE 754-2019 minimum semantics.
bool isKnownNegation(const Value *X, const Value *Y, bool NeedNSW=false, bool AllowPoison=true)
Return true if the two given values are negation.
bool isKnownNonNegative(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Returns true if the give value is known to be non-negative.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
This struct is a compact representation of a valid (non-zero power of two) alignment.
@ IEEE
IEEE-754 denormal numbers preserved.
bool isNonNegative() const
Returns true if this value is known to be non-negative.
unsigned countMinTrailingZeros() const
Returns the minimum number of trailing zero bits.
unsigned countMaxTrailingZeros() const
Returns the maximum number of trailing zero bits possible.
unsigned countMaxPopulation() const
Returns the maximum number of bits that could be one.
unsigned getBitWidth() const
Get the bit width of this value.
bool isNonZero() const
Returns true if this value is known to be non-zero.
unsigned countMinLeadingZeros() const
Returns the minimum number of leading zero bits.
bool isNegative() const
Returns true if this value is known to be negative.
unsigned countMaxLeadingZeros() const
Returns the maximum number of leading zero bits possible.
unsigned countMinPopulation() const
Returns the number of bits known to be one.
bool isAllOnes() const
Returns true if value is all one bits.
FPClassTest KnownFPClasses
Floating-point classes the value could be one of.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Align valueOrOne() const
For convenience, returns a valid alignment or 1 if undefined.
A lightweight accessor for an operand bundle meant to be passed around by value.
StringRef getTagName() const
Return the tag of this operand bundle as a string.
Represent one information held inside an operand bundle of an llvm.assume.
Attribute::AttrKind AttrKind
SelectPatternFlavor Flavor
SimplifyQuery getWithInstruction(const Instruction *I) const