46#include "llvm/IR/IntrinsicsAArch64.h"
47#include "llvm/IR/IntrinsicsAMDGPU.h"
48#include "llvm/IR/IntrinsicsARM.h"
49#include "llvm/IR/IntrinsicsHexagon.h"
80#define DEBUG_TYPE "instcombine"
86STATISTIC(NumSimplified,
"Number of library calls simplified");
89 "instcombine-guard-widening-window",
91 cl::desc(
"How wide an instruction window to bypass looking for "
98 if (ITy->getBitWidth() < 32)
108 auto *Src =
MI->getRawSource();
110 if (!Src->hasOneUse())
120 if (!CopyDstAlign || *CopyDstAlign < DstAlign) {
121 MI->setDestAlignment(DstAlign);
127 if (!CopySrcAlign || *CopySrcAlign < SrcAlign) {
128 MI->setSourceAlignment(SrcAlign);
152 if (!MemOpLength)
return nullptr;
159 assert(
Size &&
"0-sized memory transferring should be removed already.");
169 if (*CopyDstAlign <
Size || *CopySrcAlign <
Size)
179 Value *Src =
MI->getArgOperand(1);
180 Value *Dest =
MI->getArgOperand(0);
183 L->setAlignment(*CopySrcAlign);
184 L->setAAMetadata(AACopyMD);
185 MDNode *LoopMemParallelMD =
186 MI->getMetadata(LLVMContext::MD_mem_parallel_loop_access);
187 if (LoopMemParallelMD)
188 L->setMetadata(LLVMContext::MD_mem_parallel_loop_access, LoopMemParallelMD);
189 MDNode *AccessGroupMD =
MI->getMetadata(LLVMContext::MD_access_group);
191 L->setMetadata(LLVMContext::MD_access_group, AccessGroupMD);
197 if (LoopMemParallelMD)
198 S->
setMetadata(LLVMContext::MD_mem_parallel_loop_access, LoopMemParallelMD);
200 S->
setMetadata(LLVMContext::MD_access_group, AccessGroupMD);
205 L->setVolatile(MT->isVolatile());
208 if (
MI->isAtomic()) {
220 const Align KnownAlignment =
223 if (!MemSetAlign || *MemSetAlign < KnownAlignment) {
224 MI->setDestAlignment(KnownAlignment);
252 assert(Len &&
"0-sized memory setting should be removed already.");
253 const Align Alignment =
MI->getDestAlign().valueOrOne();
259 if (
MI->isAtomic() && Alignment < Len)
267 Constant *FillVal = ConstantInt::get(
273 DbgAssign->replaceVariableLocationOp(FillC, FillVal);
291 Value *LoadPtr =
II.getArgOperand(0);
292 const Align Alignment =
II.getParamAlign(0).valueOrOne();
297 LoadInst *L = Builder.CreateAlignedLoad(
II.getType(), LoadPtr, Alignment,
306 II.getDataLayout(), &
II, &
AC)) {
307 LoadInst *LI = Builder.CreateAlignedLoad(
II.getType(), LoadPtr, Alignment,
310 return Builder.CreateSelect(
II.getArgOperand(1), LI,
II.getArgOperand(2));
320 Value *StorePtr =
II.getArgOperand(1);
321 Align Alignment =
II.getParamAlign(1).valueOrOne();
333 new StoreInst(
II.getArgOperand(0), StorePtr,
false, Alignment);
365 if (ConstMask->isAllOnesValue())
368 const Align Alignment =
II.getParamAlign(0).valueOrOne();
369 LoadInst *
L =
Builder.CreateAlignedLoad(VecTy->getElementType(), SplatPtr,
370 Alignment,
"load.scalar");
372 Builder.CreateVectorSplat(VecTy->getElementCount(), L,
"broadcast");
398 Align Alignment =
II.getParamAlign(1).valueOrOne();
399 StoreInst *S =
new StoreInst(SplatValue, SplatPtr,
false,
407 if (ConstMask->isAllOnesValue()) {
408 Align Alignment =
II.getParamAlign(1).valueOrOne();
410 ElementCount VF = WideLoadTy->getElementCount();
414 Builder.CreateExtractElement(
II.getArgOperand(0), LastLane);
416 new StoreInst(Extract, SplatPtr,
false, Alignment);
447 auto *Arg =
II.getArgOperand(0);
448 auto *StrippedArg = Arg->stripPointerCasts();
449 auto *StrippedInvariantGroupsArg = StrippedArg;
451 if (Intr->getIntrinsicID() != Intrinsic::launder_invariant_group &&
452 Intr->getIntrinsicID() != Intrinsic::strip_invariant_group)
454 StrippedInvariantGroupsArg = Intr->getArgOperand(0)->stripPointerCasts();
456 if (StrippedArg == StrippedInvariantGroupsArg)
459 Value *Result =
nullptr;
461 if (
II.getIntrinsicID() == Intrinsic::launder_invariant_group)
463 else if (
II.getIntrinsicID() == Intrinsic::strip_invariant_group)
467 "simplifyInvariantGroupIntrinsic only handles launder and strip");
468 if (Result->getType()->getPointerAddressSpace() !=
469 II.getType()->getPointerAddressSpace())
476 assert((
II.getIntrinsicID() == Intrinsic::cttz ||
477 II.getIntrinsicID() == Intrinsic::ctlz) &&
478 "Expected cttz or ctlz intrinsic");
479 bool IsTZ =
II.getIntrinsicID() == Intrinsic::cttz;
480 Value *Op0 =
II.getArgOperand(0);
481 Value *Op1 =
II.getArgOperand(1);
492 if (
II.getType()->isIntOrIntVectorTy(1)) {
505 II.dropUBImplyingAttrsAndMetadata();
552 return BinaryOperator::CreateAdd(ConstCttz,
X);
560 return BinaryOperator::CreateSub(ConstCttz,
X);
566 ConstantInt::get(
II.getType(),
II.getType()->getScalarSizeInBits());
567 return BinaryOperator::CreateSub(Width,
X);
575 return BinaryOperator::CreateAdd(ConstCtlz,
X);
583 return BinaryOperator::CreateSub(ConstCtlz,
X);
591 unsigned BitWidth = Ty->getScalarSizeInBits();
605 ConstantInt::get(R->getType(), R->getType()->getScalarSizeInBits() - 1),
624 if (PossibleZeros == DefiniteZeros) {
625 auto *
C = ConstantInt::get(Op0->
getType(), DefiniteZeros);
640 if (
BitWidth != 1 && !
II.hasRetAttr(Attribute::Range) &&
641 !
II.getMetadata(LLVMContext::MD_range)) {
652 assert(
II.getIntrinsicID() == Intrinsic::ctpop &&
653 "Expected ctpop intrinsic");
655 unsigned BitWidth = Ty->getScalarSizeInBits();
656 Value *Op0 =
II.getArgOperand(0);
702 if ((~Known.
Zero).isPowerOf2())
703 return BinaryOperator::CreateLShr(
704 Op0, ConstantInt::get(Ty, (~Known.
Zero).exactLogBase2()));
718 II.getRange().value_or(ConstantRange::getFull(
BitWidth));
730 if (
Range != OldRange) {
751 unsigned NumElts = VecTy->getNumElements();
754 if (!VecTy->getElementType()->isIntegerTy(8) || NumElts != 8)
759 for (
unsigned I = 0;
I < NumElts; ++
I) {
768 if ((
unsigned)Indexes[
I] >= NumElts)
772 auto *V1 =
II.getArgOperand(0);
774 return Builder.CreateShuffleVector(V1, V2,
ArrayRef(Indexes));
780 unsigned NumOperands) {
781 assert(
I.arg_size() >= NumOperands &&
"Not enough operands");
782 assert(
E.arg_size() >= NumOperands &&
"Not enough operands");
783 for (
unsigned i = 0; i < NumOperands; i++)
784 if (
I.getArgOperand(i) !=
E.getArgOperand(i))
805 for (; BI != BE; ++BI) {
807 if (
I->isDebugOrPseudoInst() ||
830 return II.getIntrinsicID() == Intrinsic::vastart ||
831 (
II.getIntrinsicID() == Intrinsic::vacopy &&
832 I.getArgOperand(0) !=
II.getArgOperand(1));
838 assert(
Call.arg_size() > 1 &&
"Need at least 2 args to swap");
839 Value *Arg0 =
Call.getArgOperand(0), *Arg1 =
Call.getArgOperand(1);
841 Call.setArgOperand(0, Arg1);
842 Call.setArgOperand(1, Arg0);
861 Value *OperationResult =
nullptr;
868 for (User *U : WO->
users()) {
872 for (
auto &AssumeVH :
AC.assumptionsFor(U)) {
886 Inst->setHasNoSignedWrap();
888 Inst->setHasNoUnsignedWrap();
899 Ty = Ty->getScalarType();
904 Ty = Ty->getScalarType();
905 return F.getDenormalMode(Ty->getFltSemantics()).inputsAreZero();
913 switch (
static_cast<unsigned>(Mask)) {
970 Value *Src0 =
II.getArgOperand(0);
971 Value *Src1 =
II.getArgOperand(1);
977 const FPClassTest OrderedInvertedMask = ~OrderedMask & ~fcNan;
979 const bool IsStrict =
980 II.getFunction()->getAttributes().hasFnAttr(Attribute::StrictFP);
986 II.setArgOperand(1, ConstantInt::get(Src1->
getType(),
fneg(Mask)));
996 if ((OrderedMask ==
fcInf || OrderedInvertedMask ==
fcInf) &&
997 (IsOrdered || IsUnordered) && !IsStrict) {
1005 if (OrderedInvertedMask ==
fcInf)
1008 Value *Fabs =
Builder.CreateUnaryIntrinsic(Intrinsic::fabs, Src0);
1015 (IsOrdered || IsUnordered) && !IsStrict) {
1022 Value *EqInf = IsUnordered ?
Builder.CreateFCmpUEQ(Src0, Inf)
1023 :
Builder.CreateFCmpOEQ(Src0, Inf);
1029 if ((OrderedInvertedMask ==
fcPosInf || OrderedInvertedMask ==
fcNegInf) &&
1030 (IsOrdered || IsUnordered) && !IsStrict) {
1037 Value *NeInf = IsUnordered ?
Builder.CreateFCmpUNE(Src0, Inf)
1038 :
Builder.CreateFCmpONE(Src0, Inf);
1043 if (Mask ==
fcNan && !IsStrict) {
1075 if (!IsStrict && (IsOrdered || IsUnordered) &&
1120 return std::nullopt;
1132 return std::nullopt;
1144 return *Known0 == *Known1;
1152 assert((MinMaxID == Intrinsic::smax || MinMaxID == Intrinsic::smin ||
1153 MinMaxID == Intrinsic::umax || MinMaxID == Intrinsic::umin) &&
1154 "Expected a min or max intrinsic");
1157 Value *Op0 =
II->getArgOperand(0), *Op1 =
II->getArgOperand(1);
1159 const APInt *C0, *C1;
1165 bool IsSigned = MinMaxID == Intrinsic::smax || MinMaxID == Intrinsic::smin;
1167 if ((IsSigned && !
Add->hasNoSignedWrap()) ||
1168 (!IsSigned && !
Add->hasNoUnsignedWrap()))
1175 IsSigned ? C1->
ssub_ov(*C0, Overflow) : C1->
usub_ov(*C0, Overflow);
1176 assert(!Overflow &&
"Expected simplify of min/max");
1180 Constant *NewMinMaxC = ConstantInt::get(
II->getType(), CDiff);
1181 Value *NewMinMax = Builder.CreateBinaryIntrinsic(MinMaxID,
X, NewMinMaxC);
1182 return IsSigned ? BinaryOperator::CreateNSWAdd(NewMinMax,
Add->getOperand(1))
1183 : BinaryOperator::CreateNUWAdd(NewMinMax,
Add->getOperand(1));
1194 const APInt *MinValue, *MaxValue;
1198 }
else if (
match(&MinMax1,
1207 if (!(*MaxValue + 1).isPowerOf2() || -*MinValue != *MaxValue + 1)
1210 unsigned NewBitWidth = (*MaxValue + 1).logBase2() + 1;
1224 if (
AddSub->getOpcode() == Instruction::Add)
1225 IntrinsicID = Intrinsic::sadd_sat;
1226 else if (
AddSub->getOpcode() == Instruction::Sub)
1227 IntrinsicID = Intrinsic::ssub_sat;
1240 Value *Sat =
Builder.CreateIntrinsic(IntrinsicID, NewTy, {AT,
BT});
1250 Value *I0 =
II->getArgOperand(0), *I1 =
II->getArgOperand(1);
1252 const APInt *C0, *C1;
1257 switch (
II->getIntrinsicID()) {
1258 case Intrinsic::smax:
1262 case Intrinsic::smin:
1266 case Intrinsic::umax:
1270 case Intrinsic::umin:
1282 Value *Cmp = Builder.CreateICmp(Pred,
X, I1);
1306 if (InnerMinMaxID != MinMaxID &&
1307 !(((MinMaxID == Intrinsic::umax && InnerMinMaxID == Intrinsic::smax) ||
1308 (MinMaxID == Intrinsic::smin && InnerMinMaxID == Intrinsic::umin)) &&
1313 Value *CondC = Builder.CreateICmp(Pred, C0, C1);
1314 Value *NewC = Builder.CreateSelect(CondC, C0, C1);
1315 return Builder.CreateIntrinsic(InnerMinMaxID,
II->getType(),
1316 {LHS->getArgOperand(0), NewC});
1337 if (!InnerMM || InnerMM->getIntrinsicID() != MinMaxID ||
1343 MinMaxID,
II->getType());
1344 Value *NewInner = Builder.CreateBinaryIntrinsic(MinMaxID,
X,
Y);
1355 if (!
LHS || !
RHS ||
LHS->getIntrinsicID() != MinMaxID ||
1356 RHS->getIntrinsicID() != MinMaxID ||
1357 (!
LHS->hasOneUse() && !
RHS->hasOneUse()))
1366 Value *MinMaxOp =
nullptr;
1367 Value *ThirdOp =
nullptr;
1368 if (
LHS->hasOneUse()) {
1371 if (
D ==
A ||
C ==
A) {
1376 }
else if (
D ==
B ||
C ==
B) {
1383 assert(
RHS->hasOneUse() &&
"Expected one-use operand");
1385 if (
D ==
A ||
D ==
B) {
1390 }
else if (
C ==
A ||
C ==
B) {
1398 if (!MinMaxOp || !ThirdOp)
1412 !
II->getCalledFunction()->isSpeculatable())
1419 return isa<Constant>(Arg.get()) ||
1420 isVectorIntrinsicWithScalarOpAtArg(II->getIntrinsicID(),
1421 Arg.getOperandNo(), nullptr);
1434 Type *SrcTy =
X->getType();
1435 for (
Use &Arg :
II->args()) {
1439 else if (
match(&Arg,
1441 X->getType() == SrcTy)
1460 Value *NewIntrinsic =
1461 Builder.CreateIntrinsic(ResTy,
II->getIntrinsicID(), NewArgs, FPI);
1474 return match(V, m_OneUse(m_VecReverse(m_Value())));
1481 for (
Use &Arg :
II->args()) {
1483 Arg.getOperandNo(),
nullptr))
1498 II->getType(),
II->getIntrinsicID(), NewArgs, FPI);
1499 return Builder.CreateVectorReverse(NewIntrinsic);
1505template <Intrinsic::ID IntrID>
1508 static_assert(IntrID == Intrinsic::bswap || IntrID == Intrinsic::bitreverse,
1509 "This helper only supports BSWAP and BITREVERSE intrinsics");
1516 Value *OldReorderX, *OldReorderY;
1529 Value *NewReorder = Builder.CreateUnaryIntrinsic(IntrID,
Y);
1534 Value *NewReorder = Builder.CreateUnaryIntrinsic(IntrID,
X);
1545 case Intrinsic::smax:
1546 case Intrinsic::smin:
1547 case Intrinsic::umax:
1548 case Intrinsic::umin:
1549 case Intrinsic::maximum:
1550 case Intrinsic::minimum:
1551 case Intrinsic::maximumnum:
1552 case Intrinsic::minimumnum:
1553 case Intrinsic::maxnum:
1554 case Intrinsic::minnum:
1573 auto IID =
II->getIntrinsicID();
1579 auto *InvariantBinaryInst =
1583 return InvariantBinaryInst;
1587 if (!CanReorderLanes)
1600 int Sz = Mask.size();
1602 for (
int Idx : Mask) {
1605 UsedIndices.
set(Idx);
1610 return UsedIndices.
all() ? V :
nullptr;
1617template <Intrinsic::ID IntrID>
1622 static_assert(IntrID == Intrinsic::cttz || IntrID == Intrinsic::ctlz,
1623 "This helper only supports cttz and ctlz intrinsics");
1631 unsigned BitWidth = I1->getType()->getScalarSizeInBits();
1638 Type *Ty = I1->getType();
1640 IntrID == Intrinsic::cttz ? Instruction::Shl : Instruction::LShr,
1641 IntrID == Intrinsic::cttz
1642 ? ConstantInt::get(Ty, 1)
1645 return Builder.CreateBinaryIntrinsic(
1646 IntrID, Builder.CreateOr(CtOp, NewConst),
1655 case Intrinsic::umax:
1656 case Intrinsic::umin:
1657 if (HasNUW && LOp == Instruction::Add)
1659 if (HasNUW && LOp == Instruction::Shl)
1662 case Intrinsic::smax:
1663 case Intrinsic::smin:
1664 return HasNSW && LOp == Instruction::Add;
1707 if (
A ==
D ||
B ==
C)
1715 Value *NewIntrinsic = Builder.CreateBinaryIntrinsic(TopLevelOpcode,
B,
D);
1718 }
else if (
B ==
D) {
1719 Value *NewIntrinsic = Builder.CreateBinaryIntrinsic(TopLevelOpcode,
A,
C);
1741 SQ.getWithInstruction(&CI)))
1757 return visitCallBase(CI);
1762 if (
auto NumBytes =
MI->getLengthInBytes()) {
1764 if (NumBytes->isZero())
1769 if (
MI->isAtomic() &&
1770 (NumBytes->isNegative() ||
1771 (NumBytes->getZExtValue() %
MI->getElementSizeInBytes() != 0))) {
1773 assert(
MI->getType()->isVoidTy() &&
1774 "non void atomic unordered mem intrinsic");
1780 if (
MI->isVolatile())
1785 if (MTI->getSource() == MTI->getDest())
1795 bool SrcIsUndefined =
false;
1801 SrcIsUndefined = IsPointerUndefined(MTI->getRawSource());
1808 if (SrcIsUndefined || IsPointerUndefined(
MI->getRawDest())) {
1818 if (GVSrc->isConstant()) {
1822 ? Intrinsic::memcpy_element_unordered_atomic
1823 : Intrinsic::memcpy;
1837 auto VWidth = IIFVTy->getNumElements();
1838 APInt PoisonElts(VWidth, 0);
1847 if (
II->isCommutative()) {
1848 if (
auto Pair = matchSymmetricPair(
II->getOperand(0),
II->getOperand(1))) {
1869 case Intrinsic::objectsize: {
1872 &InsertedInstructions)) {
1873 for (
Instruction *Inserted : InsertedInstructions)
1879 case Intrinsic::abs: {
1880 Value *IIOperand =
II->getArgOperand(0);
1895 if (
match(IIOperand,
1904 if (std::optional<bool> Known =
1930 return BinaryOperator::CreateAnd(
X, ConstantInt::get(
II->getType(), 1));
1934 case Intrinsic::umin: {
1935 Value *I0 =
II->getArgOperand(0), *I1 =
II->getArgOperand(1);
1938 assert(
II->getType()->getScalarSizeInBits() != 1 &&
1939 "Expected simplify of umin with max constant");
1945 if (
Value *FoldedCttz =
1950 if (
Value *FoldedCtlz =
1956 case Intrinsic::umax: {
1957 Value *I0 =
II->getArgOperand(0), *I1 =
II->getArgOperand(1);
1960 (I0->
hasOneUse() || I1->hasOneUse()) &&
X->getType() ==
Y->getType()) {
1968 Value *NarrowMaxMin =
Builder.CreateBinaryIntrinsic(IID,
X, NarrowC);
1987 Value *Cmp =
Builder.CreateICmpEQ(
X, ConstantInt::get(
X->getType(), 0));
1989 Builder.CreateSelect(Cmp, ConstantInt::get(
X->getType(), 1),
A);
1993 if (IID == Intrinsic::umax) {
2004 case Intrinsic::smax:
2005 case Intrinsic::smin: {
2006 Value *I0 =
II->getArgOperand(0), *I1 =
II->getArgOperand(1);
2009 (I0->
hasOneUse() || I1->hasOneUse()) &&
X->getType() ==
Y->getType()) {
2018 Value *NarrowMaxMin =
Builder.CreateBinaryIntrinsic(IID,
X, NarrowC);
2025 const APInt *MinC, *MaxC;
2026 auto CreateCanonicalClampForm = [&](
bool IsSigned) {
2027 auto MaxIID = IsSigned ? Intrinsic::smax : Intrinsic::umax;
2028 auto MinIID = IsSigned ? Intrinsic::smin : Intrinsic::umin;
2030 MaxIID,
X, ConstantInt::get(
X->getType(), *MaxC));
2033 MinIID, NewMax, ConstantInt::get(
X->getType(), *MinC)));
2035 if (IID == Intrinsic::smax &&
2039 return CreateCanonicalClampForm(
true);
2040 if (IID == Intrinsic::umax &&
2044 return CreateCanonicalClampForm(
false);
2048 if ((IID == Intrinsic::umin || IID == Intrinsic::smax) &&
2049 II->getType()->isIntOrIntVectorTy(1)) {
2050 return BinaryOperator::CreateAnd(I0, I1);
2055 if ((IID == Intrinsic::umax || IID == Intrinsic::smin) &&
2056 II->getType()->isIntOrIntVectorTy(1)) {
2057 return BinaryOperator::CreateOr(I0, I1);
2065 if (IID == Intrinsic::smin) {
2068 Value *Zero = ConstantInt::get(
X->getType(), 0);
2071 Builder.CreateIntrinsic(
II->getType(), Intrinsic::scmp, {X, Zero}));
2075 if (IID == Intrinsic::smax || IID == Intrinsic::smin) {
2102 bool UseOr = IID == Intrinsic::smax || IID == Intrinsic::umax;
2103 bool UseAndN = IID == Intrinsic::smin || IID == Intrinsic::umin;
2105 if (IID == Intrinsic::smax || IID == Intrinsic::smin) {
2107 if (KnownSign == std::nullopt) {
2110 }
else if (*KnownSign ) {
2122 return BinaryOperator::CreateOr(I0,
X);
2124 return BinaryOperator::CreateAnd(I0,
Builder.CreateNot(
X));
2140 Value *InvMaxMin =
Builder.CreateBinaryIntrinsic(InvID,
A, NotY);
2159 return BinaryOperator::CreateAnd(
Builder.CreateBinaryIntrinsic(IID,
X,
Y),
2160 ConstantInt::get(
II->getType(), *RHSC));
2170 if (I0->
hasOneUse() && !I1->hasOneUse())
2182 if (IID == Intrinsic::smin || IID == Intrinsic::umax)
2183 Abs =
Builder.CreateNeg(Abs,
"nabs", IntMinIsPoison);
2208 I0, IsSigned,
SQ.getWithInstruction(
II));
2210 if (LHS_CR.
icmp(Pred, *RHSC))
2214 ConstantInt::get(
II->getType(), *RHSC));
2223 case Intrinsic::scmp: {
2224 Value *I0 =
II->getArgOperand(0), *I1 =
II->getArgOperand(1);
2229 Builder.CreateIntrinsic(
II->getType(), Intrinsic::scmp, {LHS, RHS}));
2232 case Intrinsic::bitreverse: {
2233 Value *IIOperand =
II->getArgOperand(0);
2237 X->getType()->isIntOrIntVectorTy(1)) {
2238 Type *Ty =
II->getType();
2246 return crossLogicOpFold;
2250 case Intrinsic::bswap: {
2251 Value *IIOperand =
II->getArgOperand(0);
2261 Value *NewSwap =
Builder.CreateUnaryIntrinsic(Intrinsic::bswap,
X);
2276 if (BW - LZ - TZ == 8) {
2277 assert(LZ != TZ &&
"active byte cannot be in the middle");
2279 return BinaryOperator::CreateNUWShl(
2280 IIOperand, ConstantInt::get(IIOperand->
getType(), LZ - TZ));
2282 return BinaryOperator::CreateExactLShr(
2283 IIOperand, ConstantInt::get(IIOperand->
getType(), TZ - LZ));
2288 unsigned C =
X->getType()->getScalarSizeInBits() - BW;
2289 Value *CV = ConstantInt::get(
X->getType(),
C);
2296 return crossLogicOpFold;
2305 case Intrinsic::masked_load:
2306 if (
Value *SimplifiedMaskedOp = simplifyMaskedLoad(*
II))
2309 case Intrinsic::masked_store:
2310 return simplifyMaskedStore(*
II);
2311 case Intrinsic::masked_gather:
2312 return simplifyMaskedGather(*
II);
2313 case Intrinsic::masked_scatter:
2314 return simplifyMaskedScatter(*
II);
2315 case Intrinsic::launder_invariant_group:
2316 case Intrinsic::strip_invariant_group:
2320 case Intrinsic::powi:
2324 if (Power->isMinusOne())
2326 II->getArgOperand(0),
II);
2328 if (Power->equalsInt(2))
2330 II->getArgOperand(0),
II);
2332 if (!Power->getValue()[0]) {
2347 case Intrinsic::cttz:
2348 case Intrinsic::ctlz:
2353 case Intrinsic::ctpop:
2358 case Intrinsic::fshl:
2359 case Intrinsic::fshr: {
2360 Value *Op0 =
II->getArgOperand(0), *Op1 =
II->getArgOperand(1);
2361 Type *Ty =
II->getType();
2362 unsigned BitWidth = Ty->getScalarSizeInBits();
2371 if (ModuloC != ShAmtC)
2377 "Shift amount expected to be modulo bitwidth");
2382 if (IID == Intrinsic::fshr) {
2393 assert(IID == Intrinsic::fshl &&
2394 "All funnel shifts by simple constants should go left");
2399 return BinaryOperator::CreateShl(Op0, ShAmtC);
2404 return BinaryOperator::CreateLShr(Op1,
2422 const APInt *ShAmtInnerC, *ShAmtOuterC;
2426 APInt Sum = *ShAmtOuterC + *ShAmtInnerC;
2430 Constant *ModuloC = ConstantInt::get(Ty, Modulo);
2432 {InnerOp, InnerOp, ModuloC});
2444 Mod, IID == Intrinsic::fshl ? Intrinsic::fshr : Intrinsic::fshl, Ty);
2452 Value *Op2 =
II->getArgOperand(2);
2454 return BinaryOperator::CreateShl(Op0,
And);
2472 case Intrinsic::ptrmask: {
2473 unsigned BitWidth =
DL.getPointerTypeSizeInBits(
II->getType());
2478 Value *InnerPtr, *InnerMask;
2483 if (
match(
II->getArgOperand(0),
2487 "Mask types must match");
2490 Value *NewMask =
Builder.CreateAnd(
II->getArgOperand(1), InnerMask);
2504 unsigned NewAlignmentLog =
2518 case Intrinsic::uadd_with_overflow:
2519 case Intrinsic::sadd_with_overflow: {
2527 const APInt *C0, *C1;
2528 Value *Arg0 =
II->getArgOperand(0);
2529 Value *Arg1 =
II->getArgOperand(1);
2530 bool IsSigned = IID == Intrinsic::sadd_with_overflow;
2531 bool HasNWAdd = IsSigned
2537 IsSigned ? C1->
sadd_ov(*C0, Overflow) : C1->
uadd_ov(*C0, Overflow);
2541 IID,
X, ConstantInt::get(Arg1->
getType(), NewC)));
2546 case Intrinsic::umul_with_overflow:
2547 case Intrinsic::smul_with_overflow:
2548 case Intrinsic::usub_with_overflow:
2553 case Intrinsic::ssub_with_overflow: {
2558 Value *Arg0 =
II->getArgOperand(0);
2559 Value *Arg1 =
II->getArgOperand(1);
2569 *
II,
Builder.CreateBinaryIntrinsic(Intrinsic::sadd_with_overflow,
2576 case Intrinsic::uadd_sat:
2577 case Intrinsic::sadd_sat:
2578 case Intrinsic::usub_sat:
2579 case Intrinsic::ssub_sat: {
2581 Type *Ty =
SI->getType();
2597 unsigned BitWidth = Ty->getScalarSizeInBits();
2602 unsigned BitWidth = Ty->getScalarSizeInBits();
2614 if (IID == Intrinsic::usub_sat &&
2617 auto *NewC =
Builder.CreateBinaryIntrinsic(Intrinsic::usub_sat,
C, C1);
2619 Builder.CreateBinaryIntrinsic(Intrinsic::usub_sat, NewC,
A);
2625 C->isNotMinSignedValue()) {
2629 Intrinsic::sadd_sat, Arg0, NegVal));
2637 const APInt *Val, *Val2;
2640 IID == Intrinsic::uadd_sat || IID == Intrinsic::usub_sat;
2641 if (
Other->getIntrinsicID() == IID &&
2649 NewVal = Val->
sadd_ov(*Val2, Overflow);
2662 IID,
X, ConstantInt::get(
II->getType(), NewVal)));
2668 case Intrinsic::minnum:
2669 case Intrinsic::maxnum:
2670 case Intrinsic::minimum:
2671 case Intrinsic::maximum: {
2672 Value *Arg0 =
II->getArgOperand(0);
2673 Value *Arg1 =
II->getArgOperand(1);
2682 case Intrinsic::maxnum:
2683 NewIID = Intrinsic::minnum;
2685 case Intrinsic::minnum:
2686 NewIID = Intrinsic::maxnum;
2688 case Intrinsic::maximum:
2689 NewIID = Intrinsic::minimum;
2691 case Intrinsic::minimum:
2692 NewIID = Intrinsic::maximum;
2698 Instruction *FNeg = UnaryOperator::CreateFNeg(NewCall);
2713 case Intrinsic::maxnum:
2716 case Intrinsic::minnum:
2719 case Intrinsic::maximum:
2722 case Intrinsic::minimum:
2732 IID,
X, ConstantFP::get(Arg0->
getType(), Res),
2741 X->getType() ==
Y->getType()) {
2743 Builder.CreateBinaryIntrinsic(IID,
X,
Y,
II,
II->getName());
2753 auto IsMinMaxOrXNegX = [IID, &
X](
Value *Op0,
Value *Op1) {
2755 return Op0->hasOneUse() ||
2756 (IID != Intrinsic::minimum && IID != Intrinsic::minnum);
2760 if (IsMinMaxOrXNegX(Arg0, Arg1) || IsMinMaxOrXNegX(Arg1, Arg0)) {
2762 if (IID == Intrinsic::minimum || IID == Intrinsic::minnum)
2769 case Intrinsic::matrix_multiply: {
2781 Value *Op0 =
II->getOperand(0);
2782 Value *Op1 =
II->getOperand(1);
2783 Value *OpNotNeg, *NegatedOp;
2784 unsigned NegatedOpArg, OtherOpArg;
2801 Value *OtherOp =
II->getOperand(OtherOpArg);
2819 NewArgs[NegatedOpArg] = OpNotNeg;
2821 Builder.CreateIntrinsic(
II->getType(), IID, NewArgs,
II);
2826 case Intrinsic::fmuladd: {
2830 II->getFastMathFlags(),
SQ.getWithInstruction(
II)))
2832 II->getFastMathFlags());
2836 case Intrinsic::fma: {
2838 Value *Src0 =
II->getArgOperand(0);
2839 Value *Src1 =
II->getArgOperand(1);
2840 Value *Src2 =
II->getArgOperand(2);
2859 SQ.getWithInstruction(
II)))
2875 case Intrinsic::copysign: {
2876 Value *Mag =
II->getArgOperand(0), *Sign =
II->getArgOperand(1);
2879 if (*KnownSignBit) {
2882 Value *Fabs =
Builder.CreateUnaryIntrinsic(Intrinsic::fabs, Mag,
II);
2888 Value *Fabs =
Builder.CreateUnaryIntrinsic(Intrinsic::fabs, Mag,
II);
2919 case Intrinsic::fabs: {
2921 Value *Arg =
II->getArgOperand(0);
2939 SI->setFastMathFlags(FMF1 | FMF2);
2950 Value *Magnitude, *Sign;
2951 if (
match(
II->getArgOperand(0),
2955 Builder.CreateUnaryIntrinsic(Intrinsic::fabs, Magnitude,
II);
2961 case Intrinsic::ceil:
2962 case Intrinsic::floor:
2963 case Intrinsic::round:
2964 case Intrinsic::roundeven:
2965 case Intrinsic::nearbyint:
2966 case Intrinsic::rint:
2967 case Intrinsic::trunc: {
2976 case Intrinsic::cos:
2977 case Intrinsic::amdgcn_cos: {
2979 Value *Src =
II->getArgOperand(0);
2989 case Intrinsic::sin:
2990 case Intrinsic::amdgcn_sin: {
2999 case Intrinsic::ldexp: {
3012 Value *Src =
II->getArgOperand(0);
3013 Value *Exp =
II->getArgOperand(1);
3018 Exp->getType() == InnerExp->
getType()) {
3027 II->setArgOperand(1, NewExp);
3028 II->setFastMathFlags(InnerFlags);
3039 Builder.CreateSelect(ExtSrc, ConstantFP::get(
II->getType(), 2.0),
3040 ConstantFP::get(
II->getType(), 1.0));
3046 Builder.CreateSelect(ExtSrc, ConstantFP::get(
II->getType(), 0.5),
3047 ConstantFP::get(
II->getType(), 1.0));
3055 Value *SelectCond, *SelectLHS, *SelectRHS;
3056 if (
match(
II->getArgOperand(1),
3059 Value *NewLdexp =
nullptr;
3062 NewLdexp =
Builder.CreateLdexp(Src, SelectLHS,
II);
3065 NewLdexp =
Builder.CreateLdexp(Src, SelectRHS,
II);
3077 case Intrinsic::ptrauth_auth:
3078 case Intrinsic::ptrauth_resign: {
3081 bool NeedSign =
II->getIntrinsicID() == Intrinsic::ptrauth_resign;
3084 Value *Disc =
II->getArgOperand(2);
3088 Value *AuthKey =
nullptr, *AuthDisc =
nullptr, *BasePtr;
3105 if (!CPA || !CPA->isKnownCompatibleWith(
Key, Disc,
DL))
3114 SignDisc, SignAddrDisc);
3121 BasePtr =
Builder.CreatePtrToInt(CPA->getPointer(),
II->getType());
3126 if (AuthKey && NeedSign) {
3128 NewIntrin = Intrinsic::ptrauth_resign;
3129 }
else if (AuthKey) {
3131 NewIntrin = Intrinsic::ptrauth_auth;
3132 }
else if (NeedSign) {
3134 NewIntrin = Intrinsic::ptrauth_sign;
3157 case Intrinsic::arm_neon_vtbl1:
3158 case Intrinsic::aarch64_neon_tbl1:
3163 case Intrinsic::arm_neon_vmulls:
3164 case Intrinsic::arm_neon_vmullu:
3165 case Intrinsic::aarch64_neon_smull:
3166 case Intrinsic::aarch64_neon_umull: {
3167 Value *Arg0 =
II->getArgOperand(0);
3168 Value *Arg1 =
II->getArgOperand(1);
3176 bool Zext = (IID == Intrinsic::arm_neon_vmullu ||
3177 IID == Intrinsic::aarch64_neon_umull);
3200 case Intrinsic::arm_neon_aesd:
3201 case Intrinsic::arm_neon_aese:
3202 case Intrinsic::aarch64_crypto_aesd:
3203 case Intrinsic::aarch64_crypto_aese:
3204 case Intrinsic::aarch64_sve_aesd:
3205 case Intrinsic::aarch64_sve_aese: {
3206 Value *DataArg =
II->getArgOperand(0);
3207 Value *KeyArg =
II->getArgOperand(1);
3223 case Intrinsic::hexagon_V6_vandvrt:
3224 case Intrinsic::hexagon_V6_vandvrt_128B: {
3228 if (ID0 != Intrinsic::hexagon_V6_vandqrt &&
3229 ID0 != Intrinsic::hexagon_V6_vandqrt_128B)
3231 Value *Bytes = Op0->getArgOperand(1), *Mask =
II->getArgOperand(1);
3236 if ((
C & 0xFF) && (
C & 0xFF00) && (
C & 0xFF0000) && (
C & 0xFF000000))
3241 case Intrinsic::stackrestore: {
3242 enum class ClassifyResult {
3246 CallWithSideEffects,
3250 return ClassifyResult::Alloca;
3254 if (
II->getIntrinsicID() == Intrinsic::stackrestore)
3255 return ClassifyResult::StackRestore;
3257 if (
II->mayHaveSideEffects())
3258 return ClassifyResult::CallWithSideEffects;
3261 return ClassifyResult::CallWithSideEffects;
3265 return ClassifyResult::None;
3272 if (SS->getIntrinsicID() == Intrinsic::stacksave &&
3273 SS->getParent() ==
II->getParent()) {
3275 bool CannotRemove =
false;
3276 for (++BI; &*BI !=
II; ++BI) {
3277 switch (Classify(&*BI)) {
3278 case ClassifyResult::None:
3282 case ClassifyResult::StackRestore:
3286 CannotRemove =
true;
3289 case ClassifyResult::Alloca:
3290 case ClassifyResult::CallWithSideEffects:
3293 CannotRemove =
true;
3309 bool CannotRemove =
false;
3310 for (++BI; &*BI != TI; ++BI) {
3311 switch (Classify(&*BI)) {
3312 case ClassifyResult::None:
3316 case ClassifyResult::StackRestore:
3320 case ClassifyResult::Alloca:
3321 case ClassifyResult::CallWithSideEffects:
3325 CannotRemove =
true;
3339 case Intrinsic::lifetime_end:
3342 if (
II->getFunction()->hasFnAttribute(Attribute::SanitizeAddress) ||
3343 II->getFunction()->hasFnAttribute(Attribute::SanitizeMemory) ||
3344 II->getFunction()->hasFnAttribute(Attribute::SanitizeHWAddress))
3348 return I.getIntrinsicID() == Intrinsic::lifetime_start;
3352 case Intrinsic::assume: {
3353 Value *IIOperand =
II->getArgOperand(0);
3355 II->getOperandBundlesAsDefs(OpBundles);
3372 return RemoveConditionFromAssume(
Next);
3378 Value *AssumeIntrinsic =
II->getCalledOperand();
3381 Builder.CreateCall(AssumeIntrinsicTy, AssumeIntrinsic,
A, OpBundles,
3383 Builder.CreateCall(AssumeIntrinsicTy, AssumeIntrinsic,
B,
II->getName());
3388 Builder.CreateCall(AssumeIntrinsicTy, AssumeIntrinsic,
3389 Builder.CreateNot(
A), OpBundles,
II->getName());
3390 Builder.CreateCall(AssumeIntrinsicTy, AssumeIntrinsic,
3400 LHS->getOpcode() == Instruction::Load &&
3401 LHS->getType()->isPointerTy() &&
3404 LHS->setMetadata(LLVMContext::MD_nonnull, MD);
3405 LHS->setMetadata(LLVMContext::MD_noundef, MD);
3406 return RemoveConditionFromAssume(
II);
3412 for (
unsigned Idx = 0; Idx <
II->getNumOperandBundles(); Idx++) {
3419 if (OBU.
getTagName() ==
"separate_storage") {
3421 auto MaybeSimplifyHint = [&](
const Use &U) {
3422 Value *Hint = U.get();
3429 MaybeSimplifyHint(OBU.
Inputs[0]);
3430 MaybeSimplifyHint(OBU.
Inputs[1]);
3437 if (!RK || RK.
AttrKind != Attribute::Alignment ||
3472 A->getType()->isPointerTy()) {
3476 Replacement->insertBefore(
Next->getIterator());
3477 AC.registerAssumption(Replacement);
3478 return RemoveConditionFromAssume(
II);
3506 if (
auto *Replacement =
3509 Replacement->insertAfter(
II->getIterator());
3510 AC.registerAssumption(Replacement);
3512 return RemoveConditionFromAssume(
II);
3519 for (
unsigned Idx = 0; Idx <
II->getNumOperandBundles(); Idx++) {
3520 auto &BOI =
II->bundle_op_info_begin()[Idx];
3523 if (BOI.End - BOI.Begin > 2)
3534 if (BOI.End - BOI.Begin > 0) {
3535 Worklist.pushValue(
II->op_begin()[BOI.Begin]);
3541 if (BOI.End - BOI.Begin > 0)
3542 II->op_begin()[BOI.Begin].set(CanonRK.
WasOn);
3543 if (BOI.End - BOI.Begin > 1)
3544 II->op_begin()[BOI.Begin + 1].set(ConstantInt::get(
3570 case Intrinsic::experimental_guard: {
3581 Value *NextCond =
nullptr;
3584 Value *CurrCond =
II->getArgOperand(0);
3588 if (CurrCond != NextCond) {
3590 while (MoveI != NextInst) {
3602 case Intrinsic::vector_insert: {
3603 Value *Vec =
II->getArgOperand(0);
3604 Value *SubVec =
II->getArgOperand(1);
3605 Value *Idx =
II->getArgOperand(2);
3612 if (DstTy && VecTy && SubVecTy) {
3613 unsigned DstNumElts = DstTy->getNumElements();
3614 unsigned VecNumElts = VecTy->getNumElements();
3615 unsigned SubVecNumElts = SubVecTy->getNumElements();
3619 if (VecNumElts == SubVecNumElts)
3628 for (i = 0; i != SubVecNumElts; ++i)
3630 for (; i != VecNumElts; ++i)
3633 Value *WidenShuffle =
Builder.CreateShuffleVector(SubVec, WidenMask);
3636 for (
unsigned i = 0; i != IdxN; ++i)
3638 for (
unsigned i = DstNumElts; i != DstNumElts + SubVecNumElts; ++i)
3640 for (
unsigned i = IdxN + SubVecNumElts; i != DstNumElts; ++i)
3643 Value *Shuffle =
Builder.CreateShuffleVector(Vec, WidenShuffle, Mask);
3648 case Intrinsic::vector_extract: {
3649 Value *Vec =
II->getArgOperand(0);
3650 Value *Idx =
II->getArgOperand(1);
3652 Type *ReturnType =
II->getType();
3656 Value *InsertTuple, *InsertIdx, *InsertValue;
3660 InsertValue->
getType() == ReturnType) {
3665 if (ExtractIdx == Index)
3679 if (DstTy && VecTy) {
3680 auto DstEltCnt = DstTy->getElementCount();
3681 auto VecEltCnt = VecTy->getElementCount();
3685 if (DstEltCnt == VecTy->getElementCount()) {
3692 if (VecEltCnt.isScalable() || DstEltCnt.isScalable())
3696 for (
unsigned i = 0; i != DstEltCnt.getKnownMinValue(); ++i)
3697 Mask.push_back(IdxN + i);
3699 Value *Shuffle =
Builder.CreateShuffleVector(Vec, Mask);
3704 case Intrinsic::experimental_vp_reverse: {
3706 Value *Vec =
II->getArgOperand(0);
3707 Value *Mask =
II->getArgOperand(1);
3710 Value *EVL =
II->getArgOperand(2);
3718 OldUnOp->getOpcode(),
X, OldUnOp, OldUnOp->getName(),
3724 case Intrinsic::vector_reduce_or:
3725 case Intrinsic::vector_reduce_and: {
3733 Value *Arg =
II->getArgOperand(0);
3744 if (FTy->getElementType() ==
Builder.getInt1Ty()) {
3746 Vect,
Builder.getIntNTy(FTy->getNumElements()));
3747 if (IID == Intrinsic::vector_reduce_and) {
3751 assert(IID == Intrinsic::vector_reduce_or &&
3752 "Expected or reduction.");
3753 Res =
Builder.CreateIsNotNull(Res);
3763 case Intrinsic::vector_reduce_add: {
3764 if (IID == Intrinsic::vector_reduce_add) {
3771 Value *Arg =
II->getArgOperand(0);
3782 if (FTy->getElementType() ==
Builder.getInt1Ty()) {
3784 Vect,
Builder.getIntNTy(FTy->getNumElements()));
3785 Value *Res =
Builder.CreateUnaryIntrinsic(Intrinsic::ctpop, V);
3787 Res =
Builder.CreateZExtOrTrunc(Res,
II->getType());
3799 if (VecToReduceCount.
isFixed()) {
3801 return BinaryOperator::CreateMul(
3802 Splat, ConstantInt::get(
Splat->getType(), VectorSize));
3808 case Intrinsic::vector_reduce_xor: {
3809 if (IID == Intrinsic::vector_reduce_xor) {
3817 Value *Arg =
II->getArgOperand(0);
3828 if (VTy->getElementType() ==
Builder.getInt1Ty()) {
3839 case Intrinsic::vector_reduce_mul: {
3840 if (IID == Intrinsic::vector_reduce_mul) {
3847 Value *Arg =
II->getArgOperand(0);
3858 if (VTy->getElementType() ==
Builder.getInt1Ty()) {
3861 Res =
Builder.CreateZExt(Res,
II->getType());
3868 case Intrinsic::vector_reduce_umin:
3869 case Intrinsic::vector_reduce_umax: {
3870 if (IID == Intrinsic::vector_reduce_umin ||
3871 IID == Intrinsic::vector_reduce_umax) {
3878 Value *Arg =
II->getArgOperand(0);
3889 if (VTy->getElementType() ==
Builder.getInt1Ty()) {
3890 Value *Res = IID == Intrinsic::vector_reduce_umin
3891 ?
Builder.CreateAndReduce(Vect)
3892 :
Builder.CreateOrReduce(Vect);
3902 case Intrinsic::vector_reduce_smin:
3903 case Intrinsic::vector_reduce_smax: {
3904 if (IID == Intrinsic::vector_reduce_smin ||
3905 IID == Intrinsic::vector_reduce_smax) {
3920 Value *Arg =
II->getArgOperand(0);
3931 if (VTy->getElementType() ==
Builder.getInt1Ty()) {
3935 Value *Res = ((IID == Intrinsic::vector_reduce_smin) ==
3936 (ExtOpc == Instruction::CastOps::ZExt))
3937 ?
Builder.CreateAndReduce(Vect)
3938 :
Builder.CreateOrReduce(Vect);
3940 Res =
Builder.CreateCast(ExtOpc, Res,
II->getType());
3947 case Intrinsic::vector_reduce_fmax:
3948 case Intrinsic::vector_reduce_fmin:
3949 case Intrinsic::vector_reduce_fadd:
3950 case Intrinsic::vector_reduce_fmul: {
3951 bool CanReorderLanes = (IID != Intrinsic::vector_reduce_fadd &&
3952 IID != Intrinsic::vector_reduce_fmul) ||
3953 II->hasAllowReassoc();
3954 const unsigned ArgIdx = (IID == Intrinsic::vector_reduce_fadd ||
3955 IID == Intrinsic::vector_reduce_fmul)
3958 Value *Arg =
II->getArgOperand(ArgIdx);
3965 case Intrinsic::is_fpclass: {
3970 case Intrinsic::threadlocal_address: {
3979 case Intrinsic::frexp: {
3994 case Intrinsic::get_active_lane_mask: {
3995 const APInt *Op0, *Op1;
3998 Type *OpTy =
II->getOperand(0)->getType();
4001 II->getType(), Intrinsic::get_active_lane_mask,
4002 {Constant::getNullValue(OpTy),
4003 ConstantInt::get(OpTy, Op1->usub_sat(*Op0))}));
4025 bool IsVectorCond = Sel->getCondition()->getType()->isVectorTy();
4030 bool SimplifyBothArms =
4031 !
Op->getType()->isVectorTy() &&
II->getType()->isVectorTy();
4033 *
II, Sel,
false, SimplifyBothArms))
4053 return visitCallBase(*
II);
4068 if (FI1SyncScope != FI2->getSyncScopeID() ||
4075 if (NFI && isIdenticalOrStrongerFence(NFI, &FI))
4079 if (isIdenticalOrStrongerFence(PFI, &FI))
4086 return visitCallBase(
II);
4091 return visitCallBase(CBI);
4110 InstCombineRAUW, InstCombineErase);
4111 if (
Value *With = Simplifier.optimizeCall(CI,
Builder)) {
4123 if (Underlying != TrampMem &&
4124 (!Underlying->hasOneUse() || Underlying->user_back() != TrampMem))
4134 if (
II->getIntrinsicID() == Intrinsic::init_trampoline) {
4138 InitTrampoline =
II;
4141 if (
II->getIntrinsicID() == Intrinsic::adjust_trampoline)
4148 if (!InitTrampoline)
4152 if (InitTrampoline->
getOperand(0) != TrampMem)
4155 return InitTrampoline;
4167 if (
II->getIntrinsicID() == Intrinsic::init_trampoline &&
4168 II->getOperand(0) == TrampMem)
4180 Callee = Callee->stripPointerCasts();
4198 if (!IPC || !IPC->isNoopCast(
DL))
4206 if (IIID != Intrinsic::ptrauth_resign && IIID != Intrinsic::ptrauth_sign)
4210 std::optional<OperandBundleUse> PtrAuthBundleOrNone;
4215 PtrAuthBundleOrNone = Bundle;
4220 if (!PtrAuthBundleOrNone)
4223 Value *NewCallee =
nullptr;
4227 case Intrinsic::ptrauth_resign: {
4229 if (
II->getOperand(3) != PtrAuthBundleOrNone->Inputs[0])
4232 if (
II->getOperand(4) != PtrAuthBundleOrNone->Inputs[1])
4237 if (
II->getOperand(1) != PtrAuthBundleOrNone->Inputs[0])
4240 Value *NewBundleOps[] = {
II->getOperand(1),
II->getOperand(2)};
4242 NewCallee =
II->getOperand(0);
4249 case Intrinsic::ptrauth_sign: {
4251 if (
II->getOperand(1) != PtrAuthBundleOrNone->Inputs[0])
4254 if (
II->getOperand(2) != PtrAuthBundleOrNone->Inputs[1])
4256 NewCallee =
II->getOperand(0);
4266 NewCallee =
Builder.CreateBitOrPointerCast(NewCallee,
Callee->getType());
4291 if (!CPA->isKnownCompatibleWith(
Key, Discriminator,
DL))
4300bool InstCombinerImpl::annotateAnyAllocSite(
CallBase &
Call,
4337 if (NewAlign > ExistingAlign) {
4354 SmallVector<unsigned, 4> ArgNos;
4358 if (
V->getType()->isPointerTy()) {
4363 (HasDereferenceable &&
4365 V->getType()->getPointerAddressSpace()))) {
4366 if (
Value *Res = simplifyNonNullOperand(V, HasDereferenceable)) {
4380 if (!ArgNos.
empty()) {
4383 AS = AS.addParamAttribute(Ctx, ArgNos,
4394 transformConstExprCastCall(
Call))
4458 return transformCallThroughTrampoline(
Call, *
II);
4461 if (Instruction *NewCall = foldPtrAuthIntrinsicCallee(
Call))
4465 if (Instruction *NewCall = foldPtrAuthConstantCallee(
Call))
4470 if (!
IA->canThrow()) {
4491 Type *RetArgTy = ReturnedArg->getType();
4494 Call,
Builder.CreateBitOrPointerCast(ReturnedArg, CallTy));
4510 ConstantInt *FunctionType =
nullptr;
4513 if (MDNode *MD = CalleeF->
getMetadata(LLVMContext::MD_kcfi_type))
4520 <<
": call to " << CalleeF->
getName()
4521 <<
" using a mismatching function pointer type\n";
4533 case Intrinsic::experimental_gc_statepoint: {
4535 SmallPtrSet<Value *, 32> LiveGcValues;
4537 GCRelocateInst &GCR = *
const_cast<GCRelocateInst *
>(Reloc);
4588 LiveGcValues.
insert(BasePtr);
4589 LiveGcValues.
insert(DerivedPtr);
4591 std::optional<OperandBundleUse> Bundle =
4593 unsigned NumOfGCLives = LiveGcValues.
size();
4594 if (!Bundle || NumOfGCLives == Bundle->Inputs.size())
4597 DenseMap<Value *, unsigned> Val2Idx;
4598 std::vector<Value *> NewLiveGc;
4599 for (
Value *V : Bundle->Inputs) {
4603 if (LiveGcValues.
count(V)) {
4604 It->second = NewLiveGc.size();
4605 NewLiveGc.push_back(V);
4607 It->second = NumOfGCLives;
4611 GCRelocateInst &GCR = *
const_cast<GCRelocateInst *
>(Reloc);
4613 assert(Val2Idx.
count(BasePtr) && Val2Idx[BasePtr] != NumOfGCLives &&
4614 "Missed live gc for base pointer");
4616 GCR.
setOperand(1, ConstantInt::get(OpIntTy1, Val2Idx[BasePtr]));
4618 assert(Val2Idx.
count(DerivedPtr) && Val2Idx[DerivedPtr] != NumOfGCLives &&
4619 "Missed live gc for derived pointer");
4621 GCR.
setOperand(2, ConstantInt::get(OpIntTy2, Val2Idx[DerivedPtr]));
4636bool InstCombinerImpl::transformConstExprCastCall(
CallBase &
Call) {
4643 "CallBr's don't have a single point after a def to insert at");
4648 if (
Callee->isDeclaration())
4654 if (
Callee->hasFnAttribute(
"thunk"))
4660 if (
Callee->hasFnAttribute(Attribute::Naked))
4676 FunctionType *FT =
Callee->getFunctionType();
4678 Type *NewRetTy = FT->getReturnType();
4681 if (OldRetTy != NewRetTy) {
4687 if (!
Caller->use_empty())
4691 if (!CallerPAL.isEmpty() && !
Caller->use_empty()) {
4692 AttrBuilder RAttrs(FT->getContext(), CallerPAL.getRetAttrs());
4693 if (RAttrs.overlaps(AttributeFuncs::typeIncompatible(
4694 NewRetTy, CallerPAL.getRetAttrs())))
4702 if (!
Caller->use_empty()) {
4705 PhisNotSupportedBlock =
II->getNormalDest();
4706 if (PhisNotSupportedBlock)
4707 for (User *U :
Caller->users())
4709 if (PN->getParent() == PhisNotSupportedBlock)
4715 unsigned NumCommonArgs = std::min(FT->getNumParams(), NumActualArgs);
4725 if (
Callee->getAttributes().hasAttrSomewhere(Attribute::InAlloca) ||
4726 Callee->getAttributes().hasAttrSomewhere(Attribute::Preallocated))
4730 for (
unsigned i = 0, e = NumCommonArgs; i !=
e; ++i, ++AI) {
4731 Type *ParamTy = FT->getParamType(i);
4732 Type *ActTy = (*AI)->getType();
4738 if (AttrBuilder(FT->getContext(), CallerPAL.getParamAttrs(i))
4739 .overlaps(AttributeFuncs::typeIncompatible(
4740 ParamTy, CallerPAL.getParamAttrs(i),
4741 AttributeFuncs::ASK_UNSAFE_TO_DROP)))
4745 CallerPAL.hasParamAttr(i, Attribute::Preallocated))
4748 if (CallerPAL.hasParamAttr(i, Attribute::SwiftError))
4751 if (CallerPAL.hasParamAttr(i, Attribute::ByVal) !=
4752 Callee->getAttributes().hasParamAttr(i, Attribute::ByVal))
4756 if (FT->getNumParams() < NumActualArgs && FT->isVarArg() &&
4757 !CallerPAL.isEmpty()) {
4762 if (CallerPAL.hasAttrSomewhere(Attribute::StructRet, &SRetIdx) &&
4763 SRetIdx - AttributeList::FirstArgIndex >= FT->getNumParams())
4769 SmallVector<Value *, 8>
Args;
4771 Args.reserve(NumActualArgs);
4772 ArgAttrs.
reserve(NumActualArgs);
4775 AttrBuilder RAttrs(FT->getContext(), CallerPAL.getRetAttrs());
4780 AttributeFuncs::typeIncompatible(NewRetTy, CallerPAL.getRetAttrs()));
4784 for (
unsigned i = 0; i != NumCommonArgs; ++i, ++AI) {
4785 Type *ParamTy = FT->getParamType(i);
4787 Value *NewArg = *AI;
4788 if ((*AI)->getType() != ParamTy)
4789 NewArg =
Builder.CreateBitOrPointerCast(*AI, ParamTy);
4790 Args.push_back(NewArg);
4794 AttributeMask IncompatibleAttrs = AttributeFuncs::typeIncompatible(
4795 ParamTy, CallerPAL.getParamAttrs(i), AttributeFuncs::ASK_SAFE_TO_DROP);
4797 CallerPAL.getParamAttrs(i).removeAttributes(Ctx, IncompatibleAttrs));
4802 for (
unsigned i = NumCommonArgs; i != FT->getNumParams(); ++i) {
4808 if (FT->getNumParams() < NumActualArgs) {
4810 if (FT->isVarArg()) {
4812 for (
unsigned i = FT->getNumParams(); i != NumActualArgs; ++i, ++AI) {
4814 Value *NewArg = *AI;
4815 if (PTy != (*AI)->getType()) {
4819 NewArg =
Builder.CreateCast(opcode, *AI, PTy);
4821 Args.push_back(NewArg);
4824 ArgAttrs.
push_back(CallerPAL.getParamAttrs(i));
4829 AttributeSet FnAttrs = CallerPAL.getFnAttrs();
4834 assert((ArgAttrs.
size() == FT->getNumParams() || FT->isVarArg()) &&
4835 "missing argument attributes");
4836 AttributeList NewCallerPAL = AttributeList::get(
4844 NewCall =
Builder.CreateInvoke(Callee,
II->getNormalDest(),
4845 II->getUnwindDest(), Args, OpBundles);
4847 NewCall =
Builder.CreateCall(Callee, Args, OpBundles);
4856 NewCall->
copyMetadata(*Caller, {LLVMContext::MD_prof});
4861 if (OldRetTy !=
NV->getType() && !
Caller->use_empty()) {
4862 assert(!
NV->getType()->isVoidTy());
4864 NC->setDebugLoc(
Caller->getDebugLoc());
4867 assert(OptInsertPt &&
"No place to insert cast");
4869 Worklist.pushUsersToWorkList(*Caller);
4872 if (!
Caller->use_empty())
4874 else if (
Caller->hasValueHandle()) {
4875 if (OldRetTy ==
NV->getType())
4890InstCombinerImpl::transformCallThroughTrampoline(
CallBase &
Call,
4897 if (
Attrs.hasAttrSomewhere(Attribute::Nest))
4904 if (!NestAttrs.isEmpty()) {
4905 unsigned NestArgNo = 0;
4906 Type *NestTy =
nullptr;
4907 AttributeSet NestAttr;
4911 E = NestFTy->param_end();
4912 I !=
E; ++NestArgNo, ++
I) {
4913 AttributeSet AS = NestAttrs.getParamAttrs(NestArgNo);
4923 std::vector<Value*> NewArgs;
4924 std::vector<AttributeSet> NewArgAttrs;
4935 if (ArgNo == NestArgNo) {
4938 if (NestVal->
getType() != NestTy)
4939 NestVal =
Builder.CreateBitCast(NestVal, NestTy,
"nest");
4940 NewArgs.push_back(NestVal);
4941 NewArgAttrs.push_back(NestAttr);
4948 NewArgs.push_back(*
I);
4949 NewArgAttrs.push_back(
Attrs.getParamAttrs(ArgNo));
4960 std::vector<Type*> NewTypes;
4961 NewTypes.reserve(FTy->getNumParams()+1);
4968 E = FTy->param_end();
4971 if (ArgNo == NestArgNo)
4973 NewTypes.push_back(NestTy);
4979 NewTypes.push_back(*
I);
4988 FunctionType *NewFTy =
4990 AttributeList NewPAL =
4991 AttributeList::get(FTy->getContext(),
Attrs.getFnAttrs(),
4992 Attrs.getRetAttrs(), NewArgAttrs);
5000 II->getUnwindDest(), NewArgs, OpBundles);
5006 CBI->getIndirectDests(), NewArgs, OpBundles);
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
AMDGPU Register Bank Select
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
This file implements the APSInt class, which is a simple class that represents an arbitrary sized int...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static cl::opt< ITMode > IT(cl::desc("IT block support"), cl::Hidden, cl::init(DefaultIT), cl::values(clEnumValN(DefaultIT, "arm-default-it", "Generate any type of IT block"), clEnumValN(RestrictedIT, "arm-restrict-it", "Disallow complex IT blocks")))
Atomic ordering constants.
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static SDValue foldBitOrderCrossLogicOp(SDNode *N, SelectionDAG &DAG)
static Type * getPromotedType(Type *Ty)
Return the specified type promoted as it would be to pass though a va_arg area.
static Instruction * createOverflowTuple(IntrinsicInst *II, Value *Result, Constant *Overflow)
Creates a result tuple for an overflow intrinsic II with a given Result and a constant Overflow value...
static IntrinsicInst * findInitTrampolineFromAlloca(Value *TrampMem)
static bool removeTriviallyEmptyRange(IntrinsicInst &EndI, InstCombinerImpl &IC, std::function< bool(const IntrinsicInst &)> IsStart)
static bool inputDenormalIsDAZ(const Function &F, const Type *Ty)
static Instruction * reassociateMinMaxWithConstantInOperand(IntrinsicInst *II, InstCombiner::BuilderTy &Builder)
If this min/max has a matching min/max operand with a constant, try to push the constant operand into...
static bool isIdempotentBinaryIntrinsic(Intrinsic::ID IID)
Helper to match idempotent binary intrinsics, namely, intrinsics where f(f(x, y), y) == f(x,...
static bool signBitMustBeTheSame(Value *Op0, Value *Op1, const SimplifyQuery &SQ)
Return true if two values Op0 and Op1 are known to have the same sign.
static Instruction * moveAddAfterMinMax(IntrinsicInst *II, InstCombiner::BuilderTy &Builder)
Try to canonicalize min/max(X + C0, C1) as min/max(X, C1 - C0) + C0.
static Instruction * simplifyInvariantGroupIntrinsic(IntrinsicInst &II, InstCombinerImpl &IC)
This function transforms launder.invariant.group and strip.invariant.group like: launder(launder(x)) ...
static bool haveSameOperands(const IntrinsicInst &I, const IntrinsicInst &E, unsigned NumOperands)
static std::optional< bool > getKnownSign(Value *Op, const SimplifyQuery &SQ)
static cl::opt< unsigned > GuardWideningWindow("instcombine-guard-widening-window", cl::init(3), cl::desc("How wide an instruction window to bypass looking for " "another guard"))
static bool hasUndefSource(AnyMemTransferInst *MI)
Recognize a memcpy/memmove from a trivially otherwise unused alloca.
static Instruction * factorizeMinMaxTree(IntrinsicInst *II)
Reduce a sequence of min/max intrinsics with a common operand.
static Value * simplifyNeonTbl1(const IntrinsicInst &II, InstCombiner::BuilderTy &Builder)
Convert a table lookup to shufflevector if the mask is constant.
static Instruction * foldClampRangeOfTwo(IntrinsicInst *II, InstCombiner::BuilderTy &Builder)
If we have a clamp pattern like max (min X, 42), 41 – where the output can only be one of two possibl...
static Value * simplifyReductionOperand(Value *Arg, bool CanReorderLanes)
static IntrinsicInst * findInitTrampolineFromBB(IntrinsicInst *AdjustTramp, Value *TrampMem)
static Value * foldIntrinsicUsingDistributiveLaws(IntrinsicInst *II, InstCombiner::BuilderTy &Builder)
static std::optional< bool > getKnownSignOrZero(Value *Op, const SimplifyQuery &SQ)
static Value * foldMinimumOverTrailingOrLeadingZeroCount(Value *I0, Value *I1, const DataLayout &DL, InstCombiner::BuilderTy &Builder)
Fold an unsigned minimum of trailing or leading zero bits counts: umin(cttz(CtOp, ZeroUndef),...
static Value * foldIdempotentBinaryIntrinsicRecurrence(InstCombinerImpl &IC, IntrinsicInst *II)
Attempt to simplify value-accumulating recurrences of kind: umax.acc = phi i8 [ umax,...
static Instruction * foldCtpop(IntrinsicInst &II, InstCombinerImpl &IC)
static Instruction * foldCttzCtlz(IntrinsicInst &II, InstCombinerImpl &IC)
static IntrinsicInst * findInitTrampoline(Value *Callee)
static FCmpInst::Predicate fpclassTestIsFCmp0(FPClassTest Mask, const Function &F, Type *Ty)
static bool leftDistributesOverRight(Instruction::BinaryOps LOp, bool HasNUW, bool HasNSW, Intrinsic::ID ROp)
Return whether "X LOp (Y ROp Z)" is always equal to "(X LOp Y) ROp (X LOp Z)".
static Value * reassociateMinMaxWithConstants(IntrinsicInst *II, IRBuilderBase &Builder, const SimplifyQuery &SQ)
If this min/max has a constant operand and an operand that is a matching min/max with a constant oper...
static CallInst * canonicalizeConstantArg0ToArg1(CallInst &Call)
This file provides internal interfaces used to implement the InstCombine.
This file provides the interface for the instcombine pass implementation.
static bool hasNoSignedWrap(BinaryOperator &I)
static bool inputDenormalIsIEEE(DenormalMode Mode)
Return true if it's possible to assume IEEE treatment of input denormals in F for Val.
static const Function * getCalledFunction(const Value *V)
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t IntrinsicInst * II
if(auto Err=PB.parsePassPipeline(MPM, Passes)) return wrap(std MPM run * Mod
const SmallVectorImpl< MachineOperand > & Cond
This file implements the SmallBitVector class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
#define DEBUG_WITH_TYPE(TYPE,...)
DEBUG_WITH_TYPE macro - This macro should be used by passes to emit debug information.
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
Class for arbitrary precision integers.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
static APInt getSignMask(unsigned BitWidth)
Get the SignMask for a specific bit width.
bool sgt(const APInt &RHS) const
Signed greater than comparison.
LLVM_ABI APInt usub_ov(const APInt &RHS, bool &Overflow) const
bool ugt(const APInt &RHS) const
Unsigned greater than comparison.
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
LLVM_ABI APInt urem(const APInt &RHS) const
Unsigned remainder operation.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool ult(const APInt &RHS) const
Unsigned less than comparison.
LLVM_ABI APInt sadd_ov(const APInt &RHS, bool &Overflow) const
LLVM_ABI APInt uadd_ov(const APInt &RHS, bool &Overflow) const
static LLVM_ABI APInt getSplat(unsigned NewLen, const APInt &V)
Return a value containing V broadcasted over NewLen bits.
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
LLVM_ABI APInt uadd_sat(const APInt &RHS) const
bool isNonNegative() const
Determine if this APInt Value is non-negative (>= 0)
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
LLVM_ABI APInt ssub_ov(const APInt &RHS, bool &Overflow) const
static APSInt getMinValue(uint32_t numBits, bool Unsigned)
Return the APSInt representing the minimum integer value with the given bit width and signedness.
static APSInt getMaxValue(uint32_t numBits, bool Unsigned)
Return the APSInt representing the maximum integer value with the given bit width and signedness.
This class represents any memset intrinsic.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
LLVM_ABI bool hasAttribute(Attribute::AttrKind Kind) const
Return true if the attribute exists in this set.
static LLVM_ABI AttributeSet get(LLVMContext &C, const AttrBuilder &B)
static LLVM_ABI Attribute get(LLVMContext &Context, AttrKind Kind, uint64_t Val=0)
Return a uniquified Attribute object.
static LLVM_ABI Attribute getWithDereferenceableBytes(LLVMContext &Context, uint64_t Bytes)
static LLVM_ABI Attribute getWithDereferenceableOrNullBytes(LLVMContext &Context, uint64_t Bytes)
static LLVM_ABI Attribute getWithAlignment(LLVMContext &Context, Align Alignment)
Return a uniquified Attribute object that has the specific alignment set.
InstListType::reverse_iterator reverse_iterator
InstListType::iterator iterator
Instruction iterators...
LLVM_ABI bool isSigned() const
Whether the intrinsic is signed or unsigned.
LLVM_ABI Instruction::BinaryOps getBinaryOp() const
Returns the binary operation underlying the intrinsic.
static BinaryOperator * CreateFAddFMF(Value *V1, Value *V2, FastMathFlags FMF, const Twine &Name="")
static LLVM_ABI BinaryOperator * CreateNeg(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Helper functions to construct and inspect unary operations (NEG and NOT) via binary operators SUB and...
static BinaryOperator * CreateNSW(BinaryOps Opc, Value *V1, Value *V2, const Twine &Name="")
static LLVM_ABI BinaryOperator * CreateNot(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)
static LLVM_ABI BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name=Twine(), InsertPosition InsertBefore=nullptr)
Construct a binary instruction, given the opcode and the two operands.
static BinaryOperator * CreateNUW(BinaryOps Opc, Value *V1, Value *V2, const Twine &Name="")
static BinaryOperator * CreateFMulFMF(Value *V1, Value *V2, FastMathFlags FMF, const Twine &Name="")
static BinaryOperator * CreateFDivFMF(Value *V1, Value *V2, FastMathFlags FMF, const Twine &Name="")
static BinaryOperator * CreateFSubFMF(Value *V1, Value *V2, FastMathFlags FMF, const Twine &Name="")
static LLVM_ABI BinaryOperator * CreateNSWNeg(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
void setCallingConv(CallingConv::ID CC)
MaybeAlign getRetAlign() const
Extract the alignment of the return value.
LLVM_ABI void getOperandBundlesAsDefs(SmallVectorImpl< OperandBundleDef > &Defs) const
Return the list of operand bundles attached to this instruction as a vector of OperandBundleDefs.
OperandBundleUse getOperandBundleAt(unsigned Index) const
Return the operand bundle at a specific index.
std::optional< OperandBundleUse > getOperandBundle(StringRef Name) const
Return an operand bundle by name, if present.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
bool isInAllocaArgument(unsigned ArgNo) const
Determine whether this argument is passed in an alloca.
bool hasRetAttr(Attribute::AttrKind Kind) const
Determine whether the return value has the given attribute.
unsigned getNumOperandBundles() const
Return the number of operand bundles associated with this User.
uint64_t getParamDereferenceableBytes(unsigned i) const
Extract the number of dereferenceable bytes for a call or parameter (0=unknown).
CallingConv::ID getCallingConv() const
LLVM_ABI bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
User::op_iterator arg_begin()
Return the iterator pointing to the beginning of the argument list.
LLVM_ABI bool isIndirectCall() const
Return true if the callsite is an indirect call.
Value * getCalledOperand() const
void setAttributes(AttributeList A)
Set the attributes for this call.
bool doesNotThrow() const
Determine if the call cannot unwind.
void addRetAttr(Attribute::AttrKind Kind)
Adds the attribute to the return value.
Value * getArgOperand(unsigned i) const
User::op_iterator arg_end()
Return the iterator pointing to the end of the argument list.
bool isConvergent() const
Determine if the invoke is convergent.
FunctionType * getFunctionType() const
LLVM_ABI Intrinsic::ID getIntrinsicID() const
Returns the intrinsic ID of the intrinsic called or Intrinsic::not_intrinsic if the called function i...
Value * getReturnedArgOperand() const
If one of the arguments has the 'returned' attribute, returns its operand value.
static LLVM_ABI CallBase * Create(CallBase *CB, ArrayRef< OperandBundleDef > Bundles, InsertPosition InsertPt=nullptr)
Create a clone of CB with a different set of operand bundles and insert it before InsertPt.
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
void setCalledOperand(Value *V)
static LLVM_ABI CallBase * removeOperandBundle(CallBase *CB, uint32_t ID, InsertPosition InsertPt=nullptr)
Create a clone of CB with operand bundle ID removed.
unsigned arg_size() const
AttributeList getAttributes() const
Return the attributes for this call.
void setCalledFunction(Function *Fn)
Sets the function called, including updating the function type.
LLVM_ABI Function * getCaller()
Helper to get the caller (the parent function).
CallBr instruction, tracking function calls that may not return control but instead transfer it to a ...
static CallBrInst * Create(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, ArrayRef< BasicBlock * > IndirectDests, ArrayRef< Value * > Args, const Twine &NameStr, InsertPosition InsertBefore=nullptr)
This class represents a function call, abstracting a target machine's calling convention.
bool isNoTailCall() const
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
bool isMustTailCall() const
static LLVM_ABI Instruction::CastOps getCastOpcode(const Value *Val, bool SrcIsSigned, Type *Ty, bool DstIsSigned)
Returns the opcode necessary to cast Val into Ty using usual casting rules.
static LLVM_ABI CastInst * CreateIntegerCast(Value *S, Type *Ty, bool isSigned, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a ZExt, BitCast, or Trunc for int -> int casts.
static LLVM_ABI bool isBitOrNoopPointerCastable(Type *SrcTy, Type *DestTy, const DataLayout &DL)
Check whether a bitcast, inttoptr, or ptrtoint cast between these types is valid and a no-op.
static LLVM_ABI CastInst * CreateBitOrPointerCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a BitCast, a PtrToInt, or an IntToPTr cast instruction.
static LLVM_ABI CastInst * Create(Instruction::CastOps, Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Provides a way to construct any of the CastInst subclasses using an opcode instead of the subclass's ...
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ FCMP_OLT
0 1 0 0 True if ordered and less than
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
@ ICMP_UGT
unsigned greater than
@ ICMP_SGT
signed greater than
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
@ ICMP_ULT
unsigned less than
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
Predicate getNonStrictPredicate() const
For example, SGT -> SGE, SLT -> SLE, ULT -> ULE, UGT -> UGE.
Predicate getUnorderedPredicate() const
static LLVM_ABI ConstantAggregateZero * get(Type *Ty)
static LLVM_ABI Constant * getPointerCast(Constant *C, Type *Ty)
Create a BitCast, AddrSpaceCast, or a PtrToInt cast constant expression.
static LLVM_ABI Constant * getSub(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
static LLVM_ABI Constant * getNeg(Constant *C, bool HasNSW=false)
static LLVM_ABI Constant * getInfinity(Type *Ty, bool Negative=false)
static LLVM_ABI Constant * getZero(Type *Ty, bool Negative=false)
This is the shared class of boolean and integer constants.
uint64_t getLimitedValue(uint64_t Limit=~0ULL) const
getLimitedValue - If the value is smaller than the specified limit, return it, otherwise return the l...
static LLVM_ABI ConstantInt * getTrue(LLVMContext &Context)
static LLVM_ABI ConstantInt * getFalse(LLVMContext &Context)
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
const APInt & getValue() const
Return the constant as an APInt value reference.
static LLVM_ABI ConstantInt * getBool(LLVMContext &Context, bool V)
static LLVM_ABI ConstantPointerNull * get(PointerType *T)
Static factory methods - Return objects of the specified value.
static LLVM_ABI ConstantPtrAuth * get(Constant *Ptr, ConstantInt *Key, ConstantInt *Disc, Constant *AddrDisc)
Return a pointer signed with the specified parameters.
This class represents a range of values.
LLVM_ABI bool isFullSet() const
Return true if this set contains all of the elements possible for this data-type.
LLVM_ABI bool icmp(CmpInst::Predicate Pred, const ConstantRange &Other) const
Does the predicate Pred hold between ranges this and Other?
LLVM_ABI bool contains(const APInt &Val) const
Return true if the specified value is in the set.
static LLVM_ABI Constant * get(StructType *T, ArrayRef< Constant * > V)
This is an important base class in LLVM.
static LLVM_ABI Constant * getIntegerValue(Type *Ty, const APInt &V)
Return the value for an integer or pointer constant, or a vector thereof, with the given scalar value...
static LLVM_ABI Constant * getAllOnesValue(Type *Ty)
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
A parsed version of the target data layout string in and methods for querying it.
Record of a variable value-assignment, aka a non instruction representation of the dbg....
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&...Args)
size_type count(const_arg_type_t< KeyT > Val) const
Return 1 if the specified key is in the map, 0 otherwise.
LLVM_ABI bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
static FMFSource intersect(Value *A, Value *B)
Intersect the FMF from two instructions.
This class represents an extension of floating point types.
Convenience struct for specifying and reasoning about fast-math flags.
void setNoSignedZeros(bool B=true)
bool allowReassoc() const
Flag queries.
An instruction for ordering other memory operations.
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this fence instruction.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this fence instruction.
Class to represent function types.
Type::subtype_iterator param_iterator
static LLVM_ABI FunctionType * get(Type *Result, ArrayRef< Type * > Params, bool isVarArg)
This static method is the primary way of constructing a FunctionType.
bool isConvergent() const
Determine if the call is convergent.
FunctionType * getFunctionType() const
Returns the FunctionType for me.
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
AttributeList getAttributes() const
Return the attribute list for this Function.
bool doesNotThrow() const
Determine if the function cannot unwind.
bool isIntrinsic() const
isIntrinsic - Returns true if the function's name starts with "llvm.".
LLVM_ABI Value * getBasePtr() const
unsigned getBasePtrIndex() const
The index into the associate statepoint's argument list which contains the base pointer of the pointe...
LLVM_ABI Value * getDerivedPtr() const
unsigned getDerivedPtrIndex() const
The index into the associate statepoint's argument list which contains the pointer whose relocation t...
std::vector< const GCRelocateInst * > getGCRelocates() const
Get list of all gc reloactes linked to this statepoint May contain several relocations for the same b...
MDNode * getMetadata(unsigned KindID) const
Get the current metadata attachments for the given kind, if any.
LLVM_ABI bool isDeclaration() const
Return true if the primary definition of this global value is outside of the current translation unit...
PointerType * getType() const
Global values are always pointers.
Common base class shared among various IRBuilders.
LLVM_ABI Value * CreateLaunderInvariantGroup(Value *Ptr)
Create a launder.invariant.group intrinsic call.
ConstantInt * getTrue()
Get the constant value for i1 true.
LLVM_ABI Value * CreateBinaryIntrinsic(Intrinsic::ID ID, Value *LHS, Value *RHS, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with 2 operands which is mangled on the first type.
LLVM_ABI CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > Types, ArrayRef< Value * > Args, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with Args, mangled using Types.
Value * CreateSub(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
LLVM_ABI CallInst * CreateUnaryIntrinsic(Intrinsic::ID ID, Value *V, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with 1 operand which is mangled on its type.
Value * CreateZExt(Value *V, Type *DestTy, const Twine &Name="", bool IsNonNeg=false)
ConstantInt * getFalse()
Get the constant value for i1 false.
Value * CreateICmp(CmpInst::Predicate P, Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateAddrSpaceCast(Value *V, Type *DestTy, const Twine &Name="")
LLVM_ABI Value * CreateStripInvariantGroup(Value *Ptr)
Create a strip.invariant.group intrinsic call.
static InsertValueInst * Create(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
KnownFPClass computeKnownFPClass(Value *Val, FastMathFlags FMF, FPClassTest Interested=fcAllFlags, const Instruction *CtxI=nullptr, unsigned Depth=0) const
Instruction * foldOpIntoPhi(Instruction &I, PHINode *PN, bool AllowMultipleUses=false)
Given a binary operator, cast instruction, or select which has a PHI node as operand #0,...
Value * SimplifyDemandedVectorElts(Value *V, APInt DemandedElts, APInt &PoisonElts, unsigned Depth=0, bool AllowMultipleUsers=false) override
The specified value produces a vector with any number of elements.
bool SimplifyDemandedBits(Instruction *I, unsigned Op, const APInt &DemandedMask, KnownBits &Known, const SimplifyQuery &Q, unsigned Depth=0) override
This form of SimplifyDemandedBits simplifies the specified instruction operand if possible,...
Instruction * FoldOpIntoSelect(Instruction &Op, SelectInst *SI, bool FoldWithMultiUse=false, bool SimplifyBothArms=false)
Given an instruction with a select as one operand and a constant as the other operand,...
Instruction * SimplifyAnyMemSet(AnyMemSetInst *MI)
Instruction * visitFree(CallInst &FI, Value *FreedOp)
Instruction * visitCallBrInst(CallBrInst &CBI)
Instruction * eraseInstFromFunction(Instruction &I) override
Combiner aware instruction erasure.
Value * foldReversedIntrinsicOperands(IntrinsicInst *II)
If all arguments of the intrinsic are reverses, try to pull the reverse after the intrinsic.
Value * tryGetLog2(Value *Op, bool AssumeNonZero)
Instruction * visitFenceInst(FenceInst &FI)
Instruction * foldShuffledIntrinsicOperands(IntrinsicInst *II)
If all arguments of the intrinsic are unary shuffles with the same mask, try to shuffle after the int...
Instruction * visitInvokeInst(InvokeInst &II)
bool SimplifyDemandedInstructionBits(Instruction &Inst)
Tries to simplify operands to an integer instruction based on its demanded bits.
void CreateNonTerminatorUnreachable(Instruction *InsertAt)
Create and insert the idiom we use to indicate a block is unreachable without having to rewrite the C...
Instruction * visitVAEndInst(VAEndInst &I)
Instruction * matchBSwapOrBitReverse(Instruction &I, bool MatchBSwaps, bool MatchBitReversals)
Given an initial instruction, check to see if it is the root of a bswap/bitreverse idiom.
Constant * unshuffleConstant(ArrayRef< int > ShMask, Constant *C, VectorType *NewCTy)
Find a constant NewC that has property: shuffle(NewC, ShMask) = C Returns nullptr if such a constant ...
Instruction * visitAllocSite(Instruction &FI)
Instruction * SimplifyAnyMemTransfer(AnyMemTransferInst *MI)
OverflowResult computeOverflow(Instruction::BinaryOps BinaryOp, bool IsSigned, Value *LHS, Value *RHS, Instruction *CxtI) const
Instruction * visitCallInst(CallInst &CI)
CallInst simplification.
unsigned ComputeMaxSignificantBits(const Value *Op, const Instruction *CxtI=nullptr, unsigned Depth=0) const
IRBuilder< TargetFolder, IRBuilderCallbackInserter > BuilderTy
An IRBuilder that automatically inserts new instructions into the worklist.
bool isFreeToInvert(Value *V, bool WillInvertAllUses, bool &DoesConsume)
Return true if the specified value is free to invert (apply ~ to).
DominatorTree & getDominatorTree() const
Instruction * InsertNewInstBefore(Instruction *New, BasicBlock::iterator Old)
Inserts an instruction New before instruction Old.
Instruction * replaceInstUsesWith(Instruction &I, Value *V)
A combiner-aware RAUW-like routine.
void replaceUse(Use &U, Value *NewValue)
Replace use and add the previously used value to the worklist.
InstructionWorklist & Worklist
A worklist of the instructions that need to be simplified.
void computeKnownBits(const Value *V, KnownBits &Known, const Instruction *CxtI, unsigned Depth=0) const
std::optional< Instruction * > targetInstCombineIntrinsic(IntrinsicInst &II)
Instruction * replaceOperand(Instruction &I, unsigned OpNum, Value *V)
Replace operand of instruction and add old operand to the worklist.
bool MaskedValueIsZero(const Value *V, const APInt &Mask, const Instruction *CxtI=nullptr, unsigned Depth=0) const
AssumptionCache & getAssumptionCache() const
OptimizationRemarkEmitter & ORE
Value * getFreelyInverted(Value *V, bool WillInvertAllUses, BuilderTy *Builder, bool &DoesConsume)
const SimplifyQuery & getSimplifyQuery() const
bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero=false, const Instruction *CxtI=nullptr, unsigned Depth=0)
LLVM_ABI void setHasNoUnsignedWrap(bool b=true)
Set or clear the nuw flag on this instruction, which must be an operator which supports this flag.
LLVM_ABI bool mayWriteToMemory() const LLVM_READONLY
Return true if this instruction may modify memory.
LLVM_ABI void copyIRFlags(const Value *V, bool IncludeWrapFlags=true)
Convenience method to copy supported exact, fast-math, and (optionally) wrapping flags from V to this...
LLVM_ABI void setHasNoSignedWrap(bool b=true)
Set or clear the nsw flag on this instruction, which must be an operator which supports this flag.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
LLVM_ABI const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
LLVM_ABI void setAAMetadata(const AAMDNodes &N)
Sets the AA metadata on this instruction from the AAMDNodes structure.
LLVM_ABI void moveBefore(InstListType::iterator InsertPos)
Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
bool isTerminator() const
LLVM_ABI void setMetadata(unsigned KindID, MDNode *Node)
Set the metadata of the specified kind to the specified node.
LLVM_ABI std::optional< InstListType::iterator > getInsertionPointAfterDef()
Get the first insertion point at which the result of this instruction is defined.
LLVM_ABI bool isIdenticalTo(const Instruction *I) const LLVM_READONLY
Return true if the specified instruction is exactly identical to the current one.
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
LLVM_ABI void copyMetadata(const Instruction &SrcInst, ArrayRef< unsigned > WL=ArrayRef< unsigned >())
Copy metadata from SrcInst to this instruction.
Class to represent integer types.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
A wrapper class for inspecting calls to intrinsic functions.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
static InvokeInst * Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, BasicBlock *IfException, ArrayRef< Value * > Args, const Twine &NameStr, InsertPosition InsertBefore=nullptr)
An instruction for reading from memory.
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
static ICmpInst::Predicate getPredicate(Intrinsic::ID ID)
Returns the comparison predicate underlying the intrinsic.
ICmpInst::Predicate getPredicate() const
Returns the comparison predicate underlying the intrinsic.
bool isSigned() const
Whether the intrinsic is signed or unsigned.
A Module instance is used to store all the information related to an LLVM module.
StringRef getName() const
Get a short "name" for the module.
unsigned getOpcode() const
Return the opcode for this Instruction or ConstantExpr.
Utility class for integer operators which may exhibit overflow - Add, Sub, Mul, and Shl.
bool hasNoSignedWrap() const
Test whether this operation is known to never undergo signed overflow, aka the nsw property.
bool hasNoUnsignedWrap() const
Test whether this operation is known to never undergo unsigned overflow, aka the nuw property.
bool isCommutative() const
Return true if the instruction is commutative.
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
Represents a saturating add/sub intrinsic.
This class represents the LLVM 'select' instruction.
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", InsertPosition InsertBefore=nullptr, const Instruction *MDFrom=nullptr)
This instruction constructs a fixed permutation of two input vectors.
This is a 'bitvector' (really, a variable-sized bit array), optimized for the case when the array is ...
bool test(unsigned Idx) const
bool all() const
Returns true if all bits are set.
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
reference emplace_back(ArgTypes &&... Args)
void reserve(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
void setVolatile(bool V)
Specify whether this is a volatile store or not.
void setAlignment(Align Align)
void setOrdering(AtomicOrdering Ordering)
Sets the ordering constraint of this store instruction.
Class to represent struct types.
static LLVM_ABI bool isCallingConvCCompatible(CallBase *CI)
Returns true if call site / callee has cdecl-compatible calling conventions.
Provides information about what library functions are available for the current target.
This class represents a truncation of integer types.
The instances of the Type class are immutable: once they are created, they are never changed.
static LLVM_ABI IntegerType * getInt64Ty(LLVMContext &C)
LLVM_ABI unsigned getIntegerBitWidth() const
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
bool isPointerTy() const
True if this is an instance of PointerType.
LLVM_ABI bool canLosslesslyBitCastTo(Type *Ty) const
Return true if this type could be converted with a lossless BitCast to type 'Ty'.
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
bool isStructTy() const
True if this is an instance of StructType.
LLVM_ABI Type * getWithNewBitWidth(unsigned NewBitWidth) const
Given an integer or vector type, change the lane bitwidth to NewBitwidth, whilst keeping the old numb...
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isIntegerTy() const
True if this is an instance of IntegerType.
bool isVoidTy() const
Return true if this is 'void'.
static UnaryOperator * CreateWithCopiedFlags(UnaryOps Opc, Value *V, Instruction *CopyO, const Twine &Name="", InsertPosition InsertBefore=nullptr)
static UnaryOperator * CreateFNegFMF(Value *Op, Instruction *FMFSource, const Twine &Name="", InsertPosition InsertBefore=nullptr)
static LLVM_ABI UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
A Use represents the edge between a Value definition and its users.
LLVM_ABI unsigned getOperandNo() const
Return the operand # of this use in its User.
void setOperand(unsigned i, Value *Val)
Value * getOperand(unsigned i) const
This represents the llvm.va_end intrinsic.
static LLVM_ABI void ValueIsDeleted(Value *V)
static LLVM_ABI void ValueIsRAUWd(Value *Old, Value *New)
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
static constexpr uint64_t MaximumAlignment
bool hasOneUse() const
Return true if there is exactly one use of this value.
iterator_range< user_iterator > users()
static LLVM_ABI void dropDroppableUse(Use &U)
Remove the droppable use U.
LLVM_ABI const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
LLVM_ABI LLVMContext & getContext() const
All values hold a context through their type.
static constexpr unsigned MaxAlignmentExponent
The maximum alignment for instructions.
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
LLVM_ABI void takeName(Value *V)
Transfer the name from V to this value.
Base class of all SIMD vector types.
ElementCount getElementCount() const
Return an ElementCount instance to represent the (possibly scalable) number of elements in the vector...
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
constexpr ScalarTy getFixedValue() const
static constexpr bool isKnownLT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
constexpr bool isFixed() const
Returns true if the quantity is not scaled by vscale.
static constexpr bool isKnownGT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
const ParentTy * getParent() const
self_iterator getIterator()
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ C
The default llvm calling convention, compatible with C.
@ BasicBlock
Various leaf nodes.
LLVM_ABI Function * getOrInsertDeclaration(Module *M, ID id, ArrayRef< Type * > Tys={})
Look up the Function declaration of the intrinsic id in the Module M.
SpecificConstantMatch m_ZeroInt()
Convenience matchers for specific integer values.
BinaryOp_match< SpecificConstantMatch, SrcTy, TargetOpcode::G_SUB > m_Neg(const SrcTy &&Src)
Matches a register negated by a G_SUB.
BinaryOp_match< SrcTy, SpecificConstantMatch, TargetOpcode::G_XOR, true > m_Not(const SrcTy &&Src)
Matches a register not-ed by a G_XOR.
OneUse_match< SubPat > m_OneUse(const SubPat &SP)
cst_pred_ty< is_all_ones > m_AllOnes()
Match an integer or vector with all bits set.
class_match< PoisonValue > m_Poison()
Match an arbitrary poison constant.
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::Add > m_Add(const LHS &L, const RHS &R)
class_match< BinaryOperator > m_BinOp()
Match an arbitrary binary operation and ignore it.
auto m_PtrToIntOrAddr(const OpTy &Op)
Matches PtrToInt or PtrToAddr.
m_Intrinsic_Ty< Opnd0 >::Ty m_BitReverse(const Opnd0 &Op0)
class_match< Constant > m_Constant()
Match an arbitrary Constant and ignore it.
ap_match< APInt > m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
BinaryOp_match< LHS, RHS, Instruction::And, true > m_c_And(const LHS &L, const RHS &R)
Matches an And with LHS and RHS in either order.
CastInst_match< OpTy, TruncInst > m_Trunc(const OpTy &Op)
Matches Trunc.
BinaryOp_match< LHS, RHS, Instruction::Xor > m_Xor(const LHS &L, const RHS &R)
ap_match< APInt > m_APIntAllowPoison(const APInt *&Res)
Match APInt while allowing poison in splat vector constants.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Sub, OverflowingBinaryOperator::NoSignedWrap > m_NSWSub(const LHS &L, const RHS &R)
specific_intval< false > m_SpecificInt(const APInt &V)
Match a specific integer value or vector with all elements equal to the value.
bool match(Val *V, const Pattern &P)
bind_ty< Instruction > m_Instruction(Instruction *&I)
Match an instruction, capturing it if we match.
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
ap_match< APFloat > m_APFloat(const APFloat *&Res)
Match a ConstantFP or splatted ConstantVector, binding the specified pointer to the contained APFloat...
OverflowingBinaryOp_match< cst_pred_ty< is_zero_int >, ValTy, Instruction::Sub, OverflowingBinaryOperator::NoSignedWrap > m_NSWNeg(const ValTy &V)
Matches a 'Neg' as 'sub nsw 0, V'.
class_match< ConstantInt > m_ConstantInt()
Match an arbitrary ConstantInt and ignore it.
cst_pred_ty< is_one > m_One()
Match an integer 1 or a vector with all elements equal to 1.
IntrinsicID_match m_Intrinsic()
Match intrinsic calls like this: m_Intrinsic<Intrinsic::fabs>(m_Value(X))
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
cstfp_pred_ty< is_neg_zero_fp > m_NegZeroFP()
Match a floating-point negative zero.
specific_fpval m_SpecificFP(double V)
Match a specific floating point value or vector with all elements equal to the value.
ExtractValue_match< Ind, Val_t > m_ExtractValue(const Val_t &V)
Match a single index ExtractValue instruction.
BinOpPred_match< LHS, RHS, is_logical_shift_op > m_LogicalShift(const LHS &L, const RHS &R)
Matches logical shift operations.
match_combine_and< LTy, RTy > m_CombineAnd(const LTy &L, const RTy &R)
Combine two pattern matchers matching L && R.
MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty > m_SMin(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::Xor, true > m_c_Xor(const LHS &L, const RHS &R)
Matches an Xor with LHS and RHS in either order.
deferredval_ty< Value > m_Deferred(Value *const &V)
Like m_Specific(), but works if the specific value to match is determined as part of the same match()...
match_combine_or< match_combine_or< CastInst_match< OpTy, ZExtInst >, CastInst_match< OpTy, SExtInst > >, OpTy > m_ZExtOrSExtOrSelf(const OpTy &Op)
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
TwoOps_match< V1_t, V2_t, Instruction::ShuffleVector > m_Shuffle(const V1_t &v1, const V2_t &v2)
Matches ShuffleVectorInst independently of mask value.
cst_pred_ty< is_strictlypositive > m_StrictlyPositive()
Match an integer or vector of strictly positive values.
ThreeOps_match< decltype(m_Value()), LHS, RHS, Instruction::Select, true > m_c_Select(const LHS &L, const RHS &R)
Match Select(C, LHS, RHS) or Select(C, RHS, LHS)
CastInst_match< OpTy, FPExtInst > m_FPExt(const OpTy &Op)
SpecificCmpClass_match< LHS, RHS, ICmpInst > m_SpecificICmp(CmpPredicate MatchPred, const LHS &L, const RHS &R)
CastInst_match< OpTy, ZExtInst > m_ZExt(const OpTy &Op)
Matches ZExt.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Shl, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWShl(const LHS &L, const RHS &R)
OverflowingBinaryOp_match< LHS, RHS, Instruction::Mul, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWMul(const LHS &L, const RHS &R)
MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty > m_UMax(const LHS &L, const RHS &R)
cst_pred_ty< is_negated_power2 > m_NegatedPower2()
Match a integer or vector negated power-of-2.
match_immconstant_ty m_ImmConstant()
Match an arbitrary immediate Constant and ignore it.
cst_pred_ty< custom_checkfn< APInt > > m_CheckedInt(function_ref< bool(const APInt &)> CheckFn)
Match an integer or vector where CheckFn(ele) for each element is true.
m_Intrinsic_Ty< Opnd0, Opnd1, Opnd2 >::Ty m_FShl(const Opnd0 &Op0, const Opnd1 &Op1, const Opnd2 &Op2)
match_combine_or< match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty, true >, MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty, true > >, match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty, true >, MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty, true > > > m_c_MaxOrMin(const LHS &L, const RHS &R)
class_match< UnaryOperator > m_UnOp()
Match an arbitrary unary operation and ignore it.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Sub, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWSub(const LHS &L, const RHS &R)
MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty > m_SMax(const LHS &L, const RHS &R)
match_combine_or< OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoSignedWrap >, DisjointOr_match< LHS, RHS > > m_NSWAddLike(const LHS &L, const RHS &R)
Match either "add nsw" or "or disjoint".
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
BinaryOp_match< LHS, RHS, Instruction::LShr > m_LShr(const LHS &L, const RHS &R)
Exact_match< T > m_Exact(const T &SubPattern)
FNeg_match< OpTy > m_FNeg(const OpTy &X)
Match 'fneg X' as 'fsub -0.0, X'.
BinOpPred_match< LHS, RHS, is_shift_op > m_Shift(const LHS &L, const RHS &R)
Matches shift operations.
cstfp_pred_ty< is_pos_zero_fp > m_PosZeroFP()
Match a floating-point positive zero.
BinaryOp_match< LHS, RHS, Instruction::Shl > m_Shl(const LHS &L, const RHS &R)
m_Intrinsic_Ty< Opnd0 >::Ty m_VecReverse(const Opnd0 &Op0)
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
match_combine_or< match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty >, MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty > >, match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty >, MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty > > > m_MaxOrMin(const LHS &L, const RHS &R)
m_Intrinsic_Ty< Opnd0, Opnd1, Opnd2 >::Ty m_FShr(const Opnd0 &Op0, const Opnd1 &Op1, const Opnd2 &Op2)
BinaryOp_match< LHS, RHS, Instruction::SRem > m_SRem(const LHS &L, const RHS &R)
auto m_Undef()
Match an arbitrary undef constant.
m_Intrinsic_Ty< Opnd0 >::Ty m_BSwap(const Opnd0 &Op0)
CastInst_match< OpTy, SExtInst > m_SExt(const OpTy &Op)
Matches SExt.
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
BinaryOp_match< LHS, RHS, Instruction::Or, true > m_c_Or(const LHS &L, const RHS &R)
Matches an Or with LHS and RHS in either order.
match_combine_or< OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoUnsignedWrap >, DisjointOr_match< LHS, RHS > > m_NUWAddLike(const LHS &L, const RHS &R)
Match either "add nuw" or "or disjoint".
BinOpPred_match< LHS, RHS, is_bitwiselogic_op > m_BitwiseLogic(const LHS &L, const RHS &R)
Matches bitwise logic operations.
m_Intrinsic_Ty< Opnd0 >::Ty m_FAbs(const Opnd0 &Op0)
BinaryOp_match< LHS, RHS, Instruction::Mul, true > m_c_Mul(const LHS &L, const RHS &R)
Matches a Mul with LHS and RHS in either order.
m_Intrinsic_Ty< Opnd0, Opnd1 >::Ty m_CopySign(const Opnd0 &Op0, const Opnd1 &Op1)
MatchFunctor< Val, Pattern > match_fn(const Pattern &P)
A match functor that can be used as a UnaryPredicate in functional algorithms like all_of.
MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty > m_UMin(const LHS &L, const RHS &R)
match_combine_or< LTy, RTy > m_CombineOr(const LTy &L, const RTy &R)
Combine two pattern matchers matching L || R.
@ SingleThread
Synchronized with respect to signal handlers executing in the same thread.
@ System
Synchronized with respect to all concurrently executing threads.
SmallVector< DbgVariableRecord * > getDVRAssignmentMarkers(const Instruction *Inst)
Return a range of dbg_assign records for which Inst performs the assignment they encode.
initializer< Ty > init(const Ty &Val)
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > extract(Y &&MD)
Extract a Value from Metadata.
DiagnosticInfoOptimizationBase::Argument NV
friend class Instruction
Iterator for Instructions in a `BasicBlock.
This is an optimization pass for GlobalISel generic memory operations.
LLVM_ABI cl::opt< bool > EnableKnowledgeRetention
LLVM_ABI Intrinsic::ID getInverseMinMaxIntrinsic(Intrinsic::ID MinMaxID)
unsigned Log2_32_Ceil(uint32_t Value)
Return the ceil log base 2 of the specified value, 32 if the value is zero.
FunctionAddr VTableAddr Value
@ NeverOverflows
Never overflows.
@ AlwaysOverflowsHigh
Always overflows in the direction of signed/unsigned max value.
@ AlwaysOverflowsLow
Always overflows in the direction of signed/unsigned min value.
@ MayOverflow
May or may not overflow.
LLVM_ABI Value * simplifyFMulInst(Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
Given operands for an FMul, fold the result or return null.
LLVM_ABI bool isValidAssumeForContext(const Instruction *I, const Instruction *CxtI, const DominatorTree *DT=nullptr, bool AllowEphemerals=false)
Return true if it is valid to use the assumptions provided by an assume intrinsic,...
LLVM_ABI APInt possiblyDemandedEltsInMask(Value *Mask)
Given a mask vector of the form <Y x i1>, return an APInt (of bitwidth Y) for each lane which may be ...
LLVM_ABI RetainedKnowledge simplifyRetainedKnowledge(AssumeInst *Assume, RetainedKnowledge RK, AssumptionCache *AC, DominatorTree *DT)
canonicalize the RetainedKnowledge RK.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
LLVM_ABI bool isRemovableAlloc(const CallBase *V, const TargetLibraryInfo *TLI)
Return true if this is a call to an allocation function that does not have side effects that we are r...
LLVM_ABI Value * lowerObjectSizeCall(IntrinsicInst *ObjectSize, const DataLayout &DL, const TargetLibraryInfo *TLI, bool MustSucceed)
Try to turn a call to @llvm.objectsize into an integer value of the given Type.
LLVM_ABI Value * getAllocAlignment(const CallBase *V, const TargetLibraryInfo *TLI)
Gets the alignment argument for an aligned_alloc-like function, using either built-in knowledge based...
LLVM_ABI RetainedKnowledge getKnowledgeFromOperandInAssume(AssumeInst &Assume, unsigned Idx)
Retreive the information help by Assume on the operand at index Idx.
LLVM_READONLY APFloat maximum(const APFloat &A, const APFloat &B)
Implements IEEE 754-2019 maximum semantics.
LLVM_ABI Value * simplifyCall(CallBase *Call, Value *Callee, ArrayRef< Value * > Args, const SimplifyQuery &Q)
Given a callsite, callee, and arguments, fold the result or return null.
LLVM_ABI Constant * ConstantFoldCompareInstOperands(unsigned Predicate, Constant *LHS, Constant *RHS, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const Instruction *I=nullptr)
Attempt to constant fold a compare instruction (icmp/fcmp) with the specified operands.
constexpr T alignDown(U Value, V Align, W Skew=0)
Returns the largest unsigned integer less than or equal to Value and is Skew mod Align.
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
LLVM_ABI bool isAssumeWithEmptyBundle(const AssumeInst &Assume)
Return true iff the operand bundles of the provided llvm.assume doesn't contain any valuable informat...
LLVM_ABI bool isSafeToSpeculativelyExecute(const Instruction *I, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr, bool UseVariableInfo=true, bool IgnoreUBImplyingAttrs=true)
Return true if the instruction does not have any effects besides calculating the result and does not ...
LLVM_ABI Value * getSplatValue(const Value *V)
Get splat value if the input is a splat vector or return nullptr.
constexpr T MinAlign(U A, V B)
A and B are either alignments or offsets.
LLVM_ABI RetainedKnowledge getKnowledgeFromBundle(AssumeInst &Assume, const CallBase::BundleOpInfo &BOI)
This extracts the Knowledge from an element of an operand bundle.
auto dyn_cast_or_null(const Y &Val)
Align getKnownAlignment(Value *V, const DataLayout &DL, const Instruction *CxtI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr)
Try to infer an alignment for the specified pointer.
LLVM_ABI bool isSplatValue(const Value *V, int Index=-1, unsigned Depth=0)
Return true if each element of the vector value V is poisoned or equal to every other non-poisoned el...
LLVM_READONLY APFloat maxnum(const APFloat &A, const APFloat &B)
Implements IEEE-754 2008 maxNum semantics.
LLVM_ABI FPClassTest fneg(FPClassTest Mask)
Return the test mask which returns true if the value's sign bit is flipped.
SelectPatternFlavor
Specific patterns of select instructions we can match.
@ SPF_ABS
Floating point maxnum.
@ SPF_NABS
Absolute value.
LLVM_ABI Constant * getLosslessUnsignedTrunc(Constant *C, Type *DestTy, const DataLayout &DL, PreservedCastFlags *Flags=nullptr)
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
bool isModSet(const ModRefInfo MRI)
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
LLVM_ABI void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
LLVM_ABI SelectPatternResult matchSelectPattern(Value *V, Value *&LHS, Value *&RHS, Instruction::CastOps *CastOp=nullptr, unsigned Depth=0)
Pattern match integer [SU]MIN, [SU]MAX and ABS idioms, returning the kind and providing the out param...
LLVM_ABI bool matchSimpleBinaryIntrinsicRecurrence(const IntrinsicInst *I, PHINode *&P, Value *&Init, Value *&OtherOp)
Attempt to match a simple value-accumulating recurrence of the form: llvm.intrinsic....
LLVM_ABI bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
auto find_if_not(R &&Range, UnaryPredicate P)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
bool isAtLeastOrStrongerThan(AtomicOrdering AO, AtomicOrdering Other)
LLVM_ABI Constant * getLosslessSignedTrunc(Constant *C, Type *DestTy, const DataLayout &DL, PreservedCastFlags *Flags=nullptr)
LLVM_ABI AssumeInst * buildAssumeFromKnowledge(ArrayRef< RetainedKnowledge > Knowledge, Instruction *CtxI, AssumptionCache *AC=nullptr, DominatorTree *DT=nullptr)
Build and return a new assume created from the provided knowledge if the knowledge in the assume is f...
LLVM_ABI FPClassTest inverse_fabs(FPClassTest Mask)
Return the test mask which returns true after fabs is applied to the value.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
LLVM_ABI bool isNotCrossLaneOperation(const Instruction *I)
Return true if the instruction doesn't potentially cross vector lanes.
LLVM_ABI bool maskIsAllOneOrUndef(Value *Mask)
Given a mask vector of i1, Return true if all of the elements of this predicate mask are known to be ...
LLVM_ATTRIBUTE_VISIBILITY_DEFAULT AnalysisKey InnerAnalysisManagerProxy< AnalysisManagerT, IRUnitT, ExtraArgTs... >::Key
LLVM_ABI Constant * ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS, Constant *RHS, const DataLayout &DL)
Attempt to constant fold a binary operation with the specified operands.
LLVM_ABI bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
constexpr int PoisonMaskElem
@ Mod
The access may modify the value stored in memory.
LLVM_ABI Value * simplifyFMAFMul(Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
Given operands for the multiplication of a FMA, fold the result or return null.
FunctionAddr VTableAddr uintptr_t uintptr_t Data
LLVM_ABI Value * simplifyConstrainedFPCall(CallBase *Call, const SimplifyQuery &Q)
Given a constrained FP intrinsic call, tries to compute its simplified version.
LLVM_READONLY APFloat minnum(const APFloat &A, const APFloat &B)
Implements IEEE-754 2008 minNum semantics.
OperandBundleDefT< Value * > OperandBundleDef
LLVM_ABI bool isVectorIntrinsicWithScalarOpAtArg(Intrinsic::ID ID, unsigned ScalarOpdIdx, const TargetTransformInfo *TTI)
Identifies if the vector form of the intrinsic has a scalar operand.
LLVM_ABI ConstantRange computeConstantRangeIncludingKnownBits(const WithCache< const Value * > &V, bool ForSigned, const SimplifyQuery &SQ)
Combine constant ranges from computeConstantRange() and computeKnownBits().
FunctionAddr VTableAddr Next
DWARFExpression::Operation Op
bool isSafeToSpeculativelyExecuteWithVariableReplaced(const Instruction *I, bool IgnoreUBImplyingAttrs=true)
Don't use information from its non-constant operands.
ArrayRef(const T &OneElt) -> ArrayRef< T >
LLVM_ABI Value * getFreedOperand(const CallBase *CB, const TargetLibraryInfo *TLI)
If this if a call to a free function, return the freed operand.
constexpr unsigned BitWidth
LLVM_ABI bool isDereferenceablePointer(const Value *V, Type *Ty, const DataLayout &DL, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr)
Return true if this is always a dereferenceable pointer.
LLVM_ABI bool maskIsAllZeroOrUndef(Value *Mask)
Given a mask vector of i1, Return true if all of the elements of this predicate mask are known to be ...
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
LLVM_ABI std::optional< APInt > getAllocSize(const CallBase *CB, const TargetLibraryInfo *TLI, function_ref< const Value *(const Value *)> Mapper=[](const Value *V) { return V;})
Return the size of the requested allocation.
unsigned Log2(Align A)
Returns the log2 of the alignment.
LLVM_ABI bool maskContainsAllOneOrUndef(Value *Mask)
Given a mask vector of i1, Return true if any of the elements of this predicate mask are known to be ...
LLVM_ABI std::optional< bool > isImpliedByDomCondition(const Value *Cond, const Instruction *ContextI, const DataLayout &DL)
Return the boolean condition value in the context of the given instruction if it is known based on do...
LLVM_READONLY APFloat minimum(const APFloat &A, const APFloat &B)
Implements IEEE 754-2019 minimum semantics.
LLVM_ABI bool isKnownNegation(const Value *X, const Value *Y, bool NeedNSW=false, bool AllowPoison=true)
Return true if the two given values are negation.
LLVM_ABI const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=MaxLookupSearchDepth)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
LLVM_ABI bool isKnownNonNegative(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Returns true if the give value is known to be non-negative.
LLVM_ABI bool isTriviallyVectorizable(Intrinsic::ID ID)
Identify if the intrinsic is trivially vectorizable.
LLVM_ABI std::optional< bool > computeKnownFPSignBit(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Return false if we can prove that the specified FP value's sign bit is 0.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
This struct is a compact representation of a valid (non-zero power of two) alignment.
@ IEEE
IEEE-754 denormal numbers preserved.
bool isNonNegative() const
Returns true if this value is known to be non-negative.
unsigned countMinTrailingZeros() const
Returns the minimum number of trailing zero bits.
unsigned countMaxTrailingZeros() const
Returns the maximum number of trailing zero bits possible.
unsigned countMaxPopulation() const
Returns the maximum number of bits that could be one.
unsigned getBitWidth() const
Get the bit width of this value.
bool isNonZero() const
Returns true if this value is known to be non-zero.
unsigned countMinLeadingZeros() const
Returns the minimum number of leading zero bits.
bool isNegative() const
Returns true if this value is known to be negative.
unsigned countMaxLeadingZeros() const
Returns the maximum number of leading zero bits possible.
unsigned countMinPopulation() const
Returns the number of bits known to be one.
bool isAllOnes() const
Returns true if value is all one bits.
FPClassTest KnownFPClasses
Floating-point classes the value could be one of.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Align valueOrOne() const
For convenience, returns a valid alignment or 1 if undefined.
A lightweight accessor for an operand bundle meant to be passed around by value.
StringRef getTagName() const
Return the tag of this operand bundle as a string.
uint32_t getTagID() const
Return the tag of this operand bundle as an integer.
Represent one information held inside an operand bundle of an llvm.assume.
Attribute::AttrKind AttrKind
SelectPatternFlavor Flavor
SimplifyQuery getWithInstruction(const Instruction *I) const