46#include "llvm/IR/IntrinsicsAArch64.h"
47#include "llvm/IR/IntrinsicsAMDGPU.h"
48#include "llvm/IR/IntrinsicsARM.h"
49#include "llvm/IR/IntrinsicsHexagon.h"
78#define DEBUG_TYPE "instcombine"
82using namespace PatternMatch;
84STATISTIC(NumSimplified,
"Number of library calls simplified");
87 "instcombine-guard-widening-window",
89 cl::desc(
"How wide an instruction window to bypass looking for "
101 if (
IntegerType* ITy = dyn_cast<IntegerType>(Ty)) {
102 if (ITy->getBitWidth() < 32)
112 auto *Src =
MI->getRawSource();
113 while (isa<GetElementPtrInst>(Src) || isa<BitCastInst>(Src)) {
114 if (!Src->hasOneUse())
116 Src = cast<Instruction>(Src)->getOperand(0);
118 return isa<AllocaInst>(Src) && Src->hasOneUse();
124 if (!CopyDstAlign || *CopyDstAlign < DstAlign) {
125 MI->setDestAlignment(DstAlign);
131 if (!CopySrcAlign || *CopySrcAlign < SrcAlign) {
132 MI->setSourceAlignment(SrcAlign);
155 ConstantInt *MemOpLength = dyn_cast<ConstantInt>(
MI->getLength());
156 if (!MemOpLength)
return nullptr;
163 assert(
Size &&
"0-sized memory transferring should be removed already.");
172 if (isa<AtomicMemTransferInst>(
MI))
173 if (*CopyDstAlign <
Size || *CopySrcAlign <
Size)
182 if (
MDNode *M =
MI->getMetadata(LLVMContext::MD_tbaa)) {
184 }
else if (
MDNode *M =
MI->getMetadata(LLVMContext::MD_tbaa_struct)) {
185 if (M->getNumOperands() == 3 && M->getOperand(0) &&
186 mdconst::hasa<ConstantInt>(M->getOperand(0)) &&
187 mdconst::extract<ConstantInt>(M->getOperand(0))->isZero() &&
189 mdconst::hasa<ConstantInt>(M->getOperand(1)) &&
190 mdconst::extract<ConstantInt>(M->getOperand(1))->getValue() ==
192 M->getOperand(2) && isa<MDNode>(M->getOperand(2)))
193 CopyMD = cast<MDNode>(M->getOperand(2));
196 Value *Src =
MI->getArgOperand(1);
197 Value *Dest =
MI->getArgOperand(0);
200 L->setAlignment(*CopySrcAlign);
202 L->setMetadata(LLVMContext::MD_tbaa, CopyMD);
203 MDNode *LoopMemParallelMD =
204 MI->getMetadata(LLVMContext::MD_mem_parallel_loop_access);
205 if (LoopMemParallelMD)
206 L->setMetadata(LLVMContext::MD_mem_parallel_loop_access, LoopMemParallelMD);
207 MDNode *AccessGroupMD =
MI->getMetadata(LLVMContext::MD_access_group);
209 L->setMetadata(LLVMContext::MD_access_group, AccessGroupMD);
216 if (LoopMemParallelMD)
217 S->
setMetadata(LLVMContext::MD_mem_parallel_loop_access, LoopMemParallelMD);
219 S->
setMetadata(LLVMContext::MD_access_group, AccessGroupMD);
222 if (
auto *MT = dyn_cast<MemTransferInst>(
MI)) {
224 L->setVolatile(MT->isVolatile());
227 if (isa<AtomicMemTransferInst>(
MI)) {
239 const Align KnownAlignment =
242 if (!MemSetAlign || *MemSetAlign < KnownAlignment) {
243 MI->setDestAlignment(KnownAlignment);
259 if (isa<UndefValue>(
MI->getValue())) {
271 assert(Len &&
"0-sized memory setting should be removed already.");
272 const Align Alignment =
MI->getDestAlign().valueOrOne();
278 if (isa<AtomicMemSetInst>(
MI))
295 DAI->replaceVariableLocationOp(FillC, FillVal);
299 if (isa<AtomicMemSetInst>(
MI))
314 const Align Alignment =
332 LI->copyMetadata(II);
348 if (ConstMask->isNullValue())
352 if (ConstMask->isAllOnesValue()) {
361 if (isa<ScalableVectorType>(ConstMask->getType()))
388 if (ConstMask->isAllOnesValue())
390 auto *VecTy = cast<VectorType>(II.
getType());
391 const Align Alignment =
394 Alignment,
"load.scalar");
414 if (ConstMask->isNullValue())
423 new StoreInst(SplatValue, SplatPtr,
false, Alignment);
429 if (ConstMask->isAllOnesValue()) {
438 new StoreInst(Extract, SplatPtr,
false, Alignment);
443 if (isa<ScalableVectorType>(ConstMask->getType()))
471 auto *StrippedInvariantGroupsArg = StrippedArg;
472 while (
auto *
Intr = dyn_cast<IntrinsicInst>(StrippedInvariantGroupsArg)) {
473 if (
Intr->getIntrinsicID() != Intrinsic::launder_invariant_group &&
474 Intr->getIntrinsicID() != Intrinsic::strip_invariant_group)
476 StrippedInvariantGroupsArg =
Intr->getArgOperand(0)->stripPointerCasts();
478 if (StrippedArg == StrippedInvariantGroupsArg)
481 Value *Result =
nullptr;
489 "simplifyInvariantGroupIntrinsic only handles launder and strip");
490 if (Result->getType()->getPointerAddressSpace() !=
494 return cast<Instruction>(Result);
500 "Expected cttz or ctlz intrinsic");
572 if (PossibleZeros == DefiniteZeros) {
589 if (
IT &&
IT->getBitWidth() != 1 && !II.
getMetadata(LLVMContext::MD_range)) {
603 "Expected ctpop intrinsic");
653 if ((~Known.
Zero).isPowerOf2())
654 return BinaryOperator::CreateLShr(
670 if (
IT->getBitWidth() != 1 && !II.
getMetadata(LLVMContext::MD_range)) {
693 auto *VecTy = cast<FixedVectorType>(II.
getType());
694 unsigned NumElts = VecTy->getNumElements();
697 if (!VecTy->getElementType()->isIntegerTy(8) || NumElts != 8)
702 for (
unsigned I = 0;
I < NumElts; ++
I) {
705 if (!COp || !isa<ConstantInt>(COp))
708 Indexes[
I] = cast<ConstantInt>(COp)->getLimitedValue();
711 if ((
unsigned)Indexes[
I] >= NumElts)
723 unsigned NumOperands) {
724 assert(
I.arg_size() >= NumOperands &&
"Not enough operands");
725 assert(
E.arg_size() >= NumOperands &&
"Not enough operands");
726 for (
unsigned i = 0; i < NumOperands; i++)
727 if (
I.getArgOperand(i) !=
E.getArgOperand(i))
748 for (; BI != BE; ++BI) {
749 if (
auto *
I = dyn_cast<IntrinsicInst>(&*BI)) {
750 if (
I->isDebugOrPseudoInst() ||
771 return I.getIntrinsicID() == Intrinsic::vastart ||
772 I.getIntrinsicID() == Intrinsic::vacopy;
778 assert(Call.arg_size() > 1 &&
"Need at least 2 args to swap");
779 Value *Arg0 = Call.getArgOperand(0), *Arg1 = Call.getArgOperand(1);
780 if (isa<Constant>(Arg0) && !isa<Constant>(Arg1)) {
781 Call.setArgOperand(0, Arg1);
782 Call.setArgOperand(1, Arg0);
799InstCombinerImpl::foldIntrinsicWithOverflowCommon(
IntrinsicInst *II) {
801 Value *OperationResult =
nullptr;
824 switch (
static_cast<unsigned>(Mask)) {
865 case ~fcZero & ~fcNan:
883 const ConstantInt *CMask = cast<ConstantInt>(Src1);
888 const FPClassTest OrderedInvertedMask = ~OrderedMask & ~fcNan;
906 if ((OrderedMask ==
fcInf || OrderedInvertedMask ==
fcInf) &&
907 (IsOrdered || IsUnordered) && !IsStrict) {
915 if (OrderedInvertedMask ==
fcInf)
925 (IsOrdered || IsUnordered) && !IsStrict) {
940 (IsOrdered || IsUnordered) && !IsStrict) {
953 if (Mask ==
fcNan && !IsStrict) {
985 if (!IsStrict && (IsOrdered || IsUnordered) &&
1047 return std::nullopt;
1054 std::optional<bool> Known1 =
getKnownSign(Op1, CxtI,
DL, AC, DT);
1057 std::optional<bool> Known0 =
getKnownSign(Op0, CxtI,
DL, AC, DT);
1060 return *Known0 == *Known1;
1068 assert((MinMaxID == Intrinsic::smax || MinMaxID == Intrinsic::smin ||
1069 MinMaxID == Intrinsic::umax || MinMaxID == Intrinsic::umin) &&
1070 "Expected a min or max intrinsic");
1075 const APInt *C0, *C1;
1081 bool IsSigned = MinMaxID == Intrinsic::smax || MinMaxID == Intrinsic::smin;
1082 auto *
Add = cast<BinaryOperator>(Op0);
1083 if ((IsSigned && !
Add->hasNoSignedWrap()) ||
1084 (!IsSigned && !
Add->hasNoUnsignedWrap()))
1091 IsSigned ? C1->
ssub_ov(*C0, Overflow) : C1->
usub_ov(*C0, Overflow);
1092 assert(!Overflow &&
"Expected simplify of min/max");
1097 Value *NewMinMax =
Builder.CreateBinaryIntrinsic(MinMaxID,
X, NewMinMaxC);
1098 return IsSigned ? BinaryOperator::CreateNSWAdd(NewMinMax,
Add->getOperand(1))
1099 : BinaryOperator::CreateNUWAdd(NewMinMax,
Add->getOperand(1));
1110 const APInt *MinValue, *MaxValue;
1114 }
else if (
match(&MinMax1,
1123 if (!(*MaxValue + 1).isPowerOf2() || -*MinValue != *MaxValue + 1)
1126 unsigned NewBitWidth = (*MaxValue + 1).logBase2() + 1;
1140 if (
AddSub->getOpcode() == Instruction::Add)
1141 IntrinsicID = Intrinsic::sadd_sat;
1142 else if (
AddSub->getOpcode() == Instruction::Sub)
1143 IntrinsicID = Intrinsic::ssub_sat;
1170 const APInt *C0, *C1;
1176 case Intrinsic::smax:
1180 case Intrinsic::smin:
1184 case Intrinsic::umax:
1188 case Intrinsic::umin:
1210 if (!
LHS ||
LHS->getIntrinsicID() != MinMaxID)
1223 {LHS->getArgOperand(0), NewC});
1243 auto *InnerMM = dyn_cast<IntrinsicInst>(Inner);
1244 if (!InnerMM || InnerMM->getIntrinsicID() != MinMaxID ||
1262 if (!
LHS || !
RHS ||
LHS->getIntrinsicID() != MinMaxID ||
1263 RHS->getIntrinsicID() != MinMaxID ||
1273 Value *MinMaxOp =
nullptr;
1274 Value *ThirdOp =
nullptr;
1278 if (
D ==
A ||
C ==
A) {
1283 }
else if (
D ==
B ||
C ==
B) {
1292 if (
D ==
A ||
D ==
B) {
1297 }
else if (
C ==
A ||
C ==
B) {
1305 if (!MinMaxOp || !ThirdOp)
1322 case Intrinsic::smax:
1323 case Intrinsic::smin:
1324 case Intrinsic::umax:
1325 case Intrinsic::umin:
1326 case Intrinsic::fma:
1327 case Intrinsic::fshl:
1328 case Intrinsic::fshr:
1347 Type *SrcTy =
X->getType();
1348 for (
unsigned i = 1, e = II->
arg_size(); i != e; ++i) {
1351 X->getType() != SrcTy)
1357 Instruction *FPI = isa<FPMathOperator>(II) ? II :
nullptr;
1358 Value *NewIntrinsic =
1366template <Intrinsic::ID IntrID>
1369 static_assert(IntrID == Intrinsic::bswap || IntrID == Intrinsic::bitreverse,
1370 "This helper only supports BSWAP and BITREVERSE intrinsics");
1376 isa<BinaryOperator>(V)) {
1377 Value *OldReorderX, *OldReorderY;
1390 Value *NewReorder =
Builder.CreateUnaryIntrinsic(IntrID,
Y);
1395 Value *NewReorder =
Builder.CreateUnaryIntrinsic(IntrID,
X);
1429 if (!II)
return visitCallBase(CI);
1433 if (
auto *AMI = dyn_cast<AtomicMemIntrinsic>(II))
1434 if (
ConstantInt *NumBytes = dyn_cast<ConstantInt>(AMI->getLength()))
1435 if (NumBytes->isNegative() ||
1436 (NumBytes->getZExtValue() % AMI->getElementSizeInBytes() != 0)) {
1438 assert(AMI->getType()->isVoidTy() &&
1439 "non void atomic unordered mem intrinsic");
1445 if (
auto *
MI = dyn_cast<AnyMemIntrinsic>(II)) {
1446 bool Changed =
false;
1449 if (
Constant *NumBytes = dyn_cast<Constant>(
MI->getLength())) {
1450 if (NumBytes->isNullValue())
1455 if (
auto *M = dyn_cast<MemIntrinsic>(
MI))
1456 if (M->isVolatile())
1462 if (
auto *MMI = dyn_cast<AnyMemMoveInst>(
MI)) {
1463 if (
GlobalVariable *GVSrc = dyn_cast<GlobalVariable>(MMI->getSource()))
1464 if (GVSrc->isConstant()) {
1467 isa<AtomicMemMoveInst>(MMI)
1468 ? Intrinsic::memcpy_element_unordered_atomic
1469 : Intrinsic::memcpy;
1480 if (MTI->getSource() == MTI->getDest())
1486 if (
auto *MTI = dyn_cast<AnyMemTransferInst>(
MI)) {
1489 }
else if (
auto *MSI = dyn_cast<AnyMemSetInst>(
MI)) {
1494 if (Changed)
return II;
1499 if (
auto *IIFVTy = dyn_cast<FixedVectorType>(II->
getType())) {
1500 auto VWidth = IIFVTy->getNumElements();
1501 APInt UndefElts(VWidth, 0);
1519 if (CI.
use_empty() && isa<ConstrainedFPIntrinsic>(CI)) {
1526 case Intrinsic::objectsize: {
1529 &InsertedInstructions)) {
1530 for (
Instruction *Inserted : InsertedInstructions)
1536 case Intrinsic::abs: {
1538 bool IntMinIsPoison = cast<Constant>(II->
getArgOperand(1))->isOneValue();
1550 if (std::optional<bool> Known =
1580 case Intrinsic::umin: {
1585 "Expected simplify of umin with max constant");
1592 case Intrinsic::umax: {
1596 (I0->
hasOneUse() || I1->hasOneUse()) &&
X->getType() ==
Y->getType()) {
1613 case Intrinsic::smax:
1614 case Intrinsic::smin: {
1618 (I0->
hasOneUse() || I1->hasOneUse()) &&
X->getType() ==
Y->getType()) {
1635 if ((IID == Intrinsic::umin || IID == Intrinsic::smax) &&
1637 return BinaryOperator::CreateAnd(I0, I1);
1642 if ((IID == Intrinsic::umax || IID == Intrinsic::smin) &&
1644 return BinaryOperator::CreateOr(I0, I1);
1647 if (IID == Intrinsic::smax || IID == Intrinsic::smin) {
1674 bool UseOr = IID == Intrinsic::smax || IID == Intrinsic::umax;
1675 bool UseAndN = IID == Intrinsic::smin || IID == Intrinsic::umin;
1677 if (IID == Intrinsic::smax || IID == Intrinsic::smin) {
1679 if (KnownSign == std::nullopt) {
1682 }
else if (*KnownSign ) {
1694 return BinaryOperator::CreateOr(I0,
X);
1735 if (I0->
hasOneUse() && !I1->hasOneUse())
1747 if (IID == Intrinsic::smin || IID == Intrinsic::umax)
1769 case Intrinsic::bitreverse: {
1774 X->getType()->isIntOrIntVectorTy(1)) {
1782 foldBitOrderCrossLogicOp<Intrinsic::bitreverse>(IIOperand,
Builder))
1783 return crossLogicOpFold;
1787 case Intrinsic::bswap: {
1804 cast<BinaryOperator>(IIOperand)->
getOpcode() == Instruction::Shl
1817 if (BW - LZ - TZ == 8) {
1818 assert(LZ != TZ &&
"active byte cannot be in the middle");
1820 return BinaryOperator::CreateNUWShl(
1823 return BinaryOperator::CreateExactLShr(
1829 unsigned C =
X->getType()->getScalarSizeInBits() - BW;
1836 foldBitOrderCrossLogicOp<Intrinsic::bswap>(IIOperand,
Builder)) {
1837 return crossLogicOpFold;
1842 case Intrinsic::masked_load:
1843 if (
Value *SimplifiedMaskedOp = simplifyMaskedLoad(*II))
1846 case Intrinsic::masked_store:
1847 return simplifyMaskedStore(*II);
1848 case Intrinsic::masked_gather:
1849 return simplifyMaskedGather(*II);
1850 case Intrinsic::masked_scatter:
1851 return simplifyMaskedScatter(*II);
1852 case Intrinsic::launder_invariant_group:
1853 case Intrinsic::strip_invariant_group:
1857 case Intrinsic::powi:
1861 if (Power->isMinusOne())
1865 if (Power->equalsInt(2))
1869 if (!Power->getValue()[0]) {
1884 case Intrinsic::cttz:
1885 case Intrinsic::ctlz:
1890 case Intrinsic::ctpop:
1895 case Intrinsic::fshl:
1896 case Intrinsic::fshr: {
1908 if (ModuloC != ShAmtC)
1913 "Shift amount expected to be modulo bitwidth");
1918 if (IID == Intrinsic::fshr) {
1925 assert(IID == Intrinsic::fshl &&
1926 "All funnel shifts by simple constants should go left");
1931 return BinaryOperator::CreateShl(Op0, ShAmtC);
1936 return BinaryOperator::CreateLShr(Op1,
1966 case Intrinsic::ptrmask: {
1967 Value *InnerPtr, *InnerMask;
1976 {InnerPtr, NewMask}));
1981 case Intrinsic::uadd_with_overflow:
1982 case Intrinsic::sadd_with_overflow: {
1983 if (
Instruction *
I = foldIntrinsicWithOverflowCommon(II))
1990 const APInt *C0, *C1;
1993 bool IsSigned = IID == Intrinsic::sadd_with_overflow;
1999 IsSigned ? C1->
sadd_ov(*C0, Overflow) : C1->
uadd_ov(*C0, Overflow);
2008 case Intrinsic::umul_with_overflow:
2009 case Intrinsic::smul_with_overflow:
2010 case Intrinsic::usub_with_overflow:
2011 if (
Instruction *
I = foldIntrinsicWithOverflowCommon(II))
2015 case Intrinsic::ssub_with_overflow: {
2016 if (
Instruction *
I = foldIntrinsicWithOverflowCommon(II))
2038 case Intrinsic::uadd_sat:
2039 case Intrinsic::sadd_sat:
2040 case Intrinsic::usub_sat:
2041 case Intrinsic::ssub_sat: {
2043 Type *Ty = SI->getType();
2044 Value *Arg0 = SI->getLHS();
2045 Value *Arg1 = SI->getRHS();
2073 C->isNotMinSignedValue()) {
2077 Intrinsic::sadd_sat, Arg0, NegVal));
2083 if (
auto *
Other = dyn_cast<IntrinsicInst>(Arg0)) {
2085 const APInt *Val, *Val2;
2088 IID == Intrinsic::uadd_sat || IID == Intrinsic::usub_sat;
2089 if (
Other->getIntrinsicID() == IID &&
2097 NewVal = Val->
sadd_ov(*Val2, Overflow);
2116 case Intrinsic::minnum:
2117 case Intrinsic::maxnum:
2118 case Intrinsic::minimum:
2119 case Intrinsic::maximum: {
2130 case Intrinsic::maxnum:
2131 NewIID = Intrinsic::minnum;
2133 case Intrinsic::minnum:
2134 NewIID = Intrinsic::maxnum;
2136 case Intrinsic::maximum:
2137 NewIID = Intrinsic::minimum;
2139 case Intrinsic::minimum:
2140 NewIID = Intrinsic::maximum;
2146 Instruction *FNeg = UnaryOperator::CreateFNeg(NewCall);
2153 if (
auto *M = dyn_cast<IntrinsicInst>(Arg0)) {
2161 case Intrinsic::maxnum:
2164 case Intrinsic::minnum:
2167 case Intrinsic::maximum:
2170 case Intrinsic::minimum:
2189 X->getType() ==
Y->getType()) {
2203 if (IID == Intrinsic::minimum || IID == Intrinsic::minnum)
2210 case Intrinsic::matrix_multiply: {
2224 Value *OpNotNeg, *NegatedOp;
2225 unsigned NegatedOpArg, OtherOpArg;
2260 NewArgs[NegatedOpArg] = OpNotNeg;
2267 case Intrinsic::fmuladd: {
2284 FAdd->copyFastMathFlags(II);
2290 case Intrinsic::fma: {
2315 FAdd->copyFastMathFlags(II);
2329 case Intrinsic::copysign: {
2360 case Intrinsic::fabs: {
2365 if (isa<Constant>(TVal) && isa<Constant>(FVal)) {
2378 Value *Magnitude, *Sign;
2390 case Intrinsic::ceil:
2391 case Intrinsic::floor:
2392 case Intrinsic::round:
2393 case Intrinsic::roundeven:
2394 case Intrinsic::nearbyint:
2395 case Intrinsic::rint:
2396 case Intrinsic::trunc: {
2405 case Intrinsic::cos:
2406 case Intrinsic::amdgcn_cos: {
2416 case Intrinsic::sin: {
2421 Instruction *FNeg = UnaryOperator::CreateFNeg(NewSin);
2427 case Intrinsic::ldexp: {
2446 Exp->getType() == InnerExp->
getType()) {
2448 FastMathFlags InnerFlags = cast<FPMathOperator>(Src)->getFastMathFlags();
2463 case Intrinsic::ptrauth_auth:
2464 case Intrinsic::ptrauth_resign: {
2467 bool NeedSign = II->
getIntrinsicID() == Intrinsic::ptrauth_resign;
2473 Value *AuthKey =
nullptr, *AuthDisc =
nullptr, *BasePtr;
2490 if (AuthKey && NeedSign) {
2492 NewIntrin = Intrinsic::ptrauth_resign;
2493 }
else if (AuthKey) {
2495 NewIntrin = Intrinsic::ptrauth_auth;
2496 }
else if (NeedSign) {
2498 NewIntrin = Intrinsic::ptrauth_sign;
2521 case Intrinsic::arm_neon_vtbl1:
2522 case Intrinsic::aarch64_neon_tbl1:
2527 case Intrinsic::arm_neon_vmulls:
2528 case Intrinsic::arm_neon_vmullu:
2529 case Intrinsic::aarch64_neon_smull:
2530 case Intrinsic::aarch64_neon_umull: {
2535 if (isa<ConstantAggregateZero>(Arg0) || isa<ConstantAggregateZero>(Arg1)) {
2540 bool Zext = (IID == Intrinsic::arm_neon_vmullu ||
2541 IID == Intrinsic::aarch64_neon_umull);
2543 if (
Constant *CV0 = dyn_cast<Constant>(Arg0)) {
2544 if (
Constant *CV1 = dyn_cast<Constant>(Arg1)) {
2556 if (
Constant *CV1 = dyn_cast<Constant>(Arg1))
2558 dyn_cast_or_null<ConstantInt>(CV1->getSplatValue()))
2565 case Intrinsic::arm_neon_aesd:
2566 case Intrinsic::arm_neon_aese:
2567 case Intrinsic::aarch64_crypto_aesd:
2568 case Intrinsic::aarch64_crypto_aese: {
2582 case Intrinsic::hexagon_V6_vandvrt:
2583 case Intrinsic::hexagon_V6_vandvrt_128B: {
2585 if (
auto Op0 = dyn_cast<IntrinsicInst>(II->
getArgOperand(0))) {
2587 if (ID0 != Intrinsic::hexagon_V6_vandqrt &&
2588 ID0 != Intrinsic::hexagon_V6_vandqrt_128B)
2595 if ((
C & 0xFF) && (
C & 0xFF00) && (
C & 0xFF0000) && (
C & 0xFF000000))
2600 case Intrinsic::stackrestore: {
2601 enum class ClassifyResult {
2605 CallWithSideEffects,
2608 if (isa<AllocaInst>(
I))
2609 return ClassifyResult::Alloca;
2611 if (
auto *CI = dyn_cast<CallInst>(
I)) {
2612 if (
auto *II = dyn_cast<IntrinsicInst>(CI)) {
2614 return ClassifyResult::StackRestore;
2617 return ClassifyResult::CallWithSideEffects;
2620 return ClassifyResult::CallWithSideEffects;
2624 return ClassifyResult::None;
2631 if (SS->getIntrinsicID() == Intrinsic::stacksave &&
2634 bool CannotRemove =
false;
2635 for (++BI; &*BI != II; ++BI) {
2636 switch (Classify(&*BI)) {
2637 case ClassifyResult::None:
2641 case ClassifyResult::StackRestore:
2644 if (cast<IntrinsicInst>(*BI).getArgOperand(0) != SS)
2645 CannotRemove =
true;
2648 case ClassifyResult::Alloca:
2649 case ClassifyResult::CallWithSideEffects:
2652 CannotRemove =
true;
2668 bool CannotRemove =
false;
2669 for (++BI; &*BI != TI; ++BI) {
2670 switch (Classify(&*BI)) {
2671 case ClassifyResult::None:
2675 case ClassifyResult::StackRestore:
2679 case ClassifyResult::Alloca:
2680 case ClassifyResult::CallWithSideEffects:
2684 CannotRemove =
true;
2694 if (!CannotRemove && (isa<ReturnInst>(TI) || isa<ResumeInst>(TI)))
2698 case Intrinsic::lifetime_end:
2707 return I.getIntrinsicID() == Intrinsic::lifetime_start;
2711 case Intrinsic::assume: {
2720 assert(isa<AssumeInst>(Assume));
2730 if (
match(Next, m_Intrinsic<Intrinsic::assume>(
m_Specific(IIOperand))))
2731 return RemoveConditionFromAssume(Next);
2765 return RemoveConditionFromAssume(II);
2777 if (OBU.
getTagName() ==
"separate_storage") {
2779 auto MaybeSimplifyHint = [&](
const Use &U) {
2780 Value *Hint = U.get();
2787 MaybeSimplifyHint(OBU.
Inputs[0]);
2788 MaybeSimplifyHint(OBU.
Inputs[1]);
2803 Replacement->insertBefore(Next);
2805 return RemoveConditionFromAssume(II);
2832 if (
auto *Replacement =
2835 Replacement->insertAfter(II);
2838 return RemoveConditionFromAssume(II);
2849 if (BOI.End - BOI.Begin > 2)
2860 if (BOI.End - BOI.Begin > 0) {
2867 if (BOI.End - BOI.Begin > 0)
2869 if (BOI.End - BOI.Begin > 1)
2896 case Intrinsic::experimental_guard: {
2907 Value *NextCond =
nullptr;
2909 m_Intrinsic<Intrinsic::experimental_guard>(
m_Value(NextCond)))) {
2914 if (CurrCond != NextCond) {
2916 while (MoveI != NextInst) {
2928 case Intrinsic::vector_insert: {
2932 auto *DstTy = dyn_cast<FixedVectorType>(II->
getType());
2933 auto *VecTy = dyn_cast<FixedVectorType>(Vec->
getType());
2934 auto *SubVecTy = dyn_cast<FixedVectorType>(SubVec->
getType());
2938 if (DstTy && VecTy && SubVecTy) {
2939 unsigned DstNumElts = DstTy->getNumElements();
2940 unsigned VecNumElts = VecTy->getNumElements();
2941 unsigned SubVecNumElts = SubVecTy->getNumElements();
2942 unsigned IdxN = cast<ConstantInt>(
Idx)->getZExtValue();
2945 if (VecNumElts == SubVecNumElts)
2954 for (i = 0; i != SubVecNumElts; ++i)
2956 for (; i != VecNumElts; ++i)
2962 for (
unsigned i = 0; i != IdxN; ++i)
2964 for (
unsigned i = DstNumElts; i != DstNumElts + SubVecNumElts; ++i)
2966 for (
unsigned i = IdxN + SubVecNumElts; i != DstNumElts; ++i)
2974 case Intrinsic::vector_extract: {
2981 unsigned ExtractIdx = cast<ConstantInt>(
Idx)->getZExtValue();
2982 Value *InsertTuple, *InsertIdx, *InsertValue;
2983 if (
match(Vec, m_Intrinsic<Intrinsic::vector_insert>(
m_Value(InsertTuple),
2986 InsertValue->
getType() == ReturnType) {
2987 unsigned Index = cast<ConstantInt>(InsertIdx)->getZExtValue();
2991 if (ExtractIdx ==
Index)
3002 auto *DstTy = dyn_cast<FixedVectorType>(ReturnType);
3003 auto *VecTy = dyn_cast<FixedVectorType>(Vec->
getType());
3007 if (DstTy && VecTy) {
3008 unsigned DstNumElts = DstTy->getNumElements();
3009 unsigned VecNumElts = VecTy->getNumElements();
3010 unsigned IdxN = cast<ConstantInt>(
Idx)->getZExtValue();
3013 if (VecNumElts == DstNumElts) {
3019 for (
unsigned i = 0; i != DstNumElts; ++i)
3020 Mask.push_back(IdxN + i);
3027 case Intrinsic::experimental_vector_reverse: {
3031 auto *OldBinOp = cast<BinaryOperator>(Vec);
3037 OldBinOp->getOpcode(),
X,
Y, OldBinOp,
3038 OldBinOp->getName(), II));
3043 OldBinOp->getOpcode(),
X, BO1,
3044 OldBinOp, OldBinOp->
getName(), II));
3049 OldBinOp->getOpcode(), BO0,
Y,
3050 OldBinOp, OldBinOp->getName(), II));
3054 auto *OldUnOp = cast<UnaryOperator>(Vec);
3056 OldUnOp->getOpcode(),
X, OldUnOp, OldUnOp->getName(), II);
3061 case Intrinsic::vector_reduce_or:
3062 case Intrinsic::vector_reduce_and: {
3073 if (
auto *FTy = dyn_cast<FixedVectorType>(Vect->
getType()))
3077 if (IID == Intrinsic::vector_reduce_and) {
3081 assert(IID == Intrinsic::vector_reduce_or &&
3082 "Expected or reduction.");
3093 case Intrinsic::vector_reduce_add: {
3094 if (IID == Intrinsic::vector_reduce_add) {
3104 if (
auto *FTy = dyn_cast<FixedVectorType>(Vect->
getType()))
3112 cast<Instruction>(Arg)->
getOpcode() == Instruction::SExt)
3120 case Intrinsic::vector_reduce_xor: {
3121 if (IID == Intrinsic::vector_reduce_xor) {
3132 if (
auto *FTy = dyn_cast<FixedVectorType>(Vect->
getType()))
3144 case Intrinsic::vector_reduce_mul: {
3145 if (IID == Intrinsic::vector_reduce_mul) {
3155 if (
auto *FTy = dyn_cast<FixedVectorType>(Vect->
getType()))
3166 case Intrinsic::vector_reduce_umin:
3167 case Intrinsic::vector_reduce_umax: {
3168 if (IID == Intrinsic::vector_reduce_umin ||
3169 IID == Intrinsic::vector_reduce_umax) {
3179 if (
auto *FTy = dyn_cast<FixedVectorType>(Vect->
getType()))
3181 Value *Res = IID == Intrinsic::vector_reduce_umin
3193 case Intrinsic::vector_reduce_smin:
3194 case Intrinsic::vector_reduce_smax: {
3195 if (IID == Intrinsic::vector_reduce_smin ||
3196 IID == Intrinsic::vector_reduce_smax) {
3214 if (
auto *FTy = dyn_cast<FixedVectorType>(Vect->
getType()))
3218 ExtOpc = cast<CastInst>(Arg)->getOpcode();
3219 Value *Res = ((IID == Intrinsic::vector_reduce_smin) ==
3220 (ExtOpc == Instruction::CastOps::ZExt))
3231 case Intrinsic::vector_reduce_fmax:
3232 case Intrinsic::vector_reduce_fmin:
3233 case Intrinsic::vector_reduce_fadd:
3234 case Intrinsic::vector_reduce_fmul: {
3235 bool CanBeReassociated = (IID != Intrinsic::vector_reduce_fadd &&
3236 IID != Intrinsic::vector_reduce_fmul) ||
3238 const unsigned ArgIdx = (IID == Intrinsic::vector_reduce_fadd ||
3239 IID == Intrinsic::vector_reduce_fmul)
3245 if (!isa<FixedVectorType>(Arg->
getType()) || !CanBeReassociated ||
3247 !cast<ShuffleVectorInst>(Arg)->isSingleSource())
3249 int Sz = Mask.size();
3251 for (
int Idx : Mask) {
3258 if (UsedIndices.
all()) {
3264 case Intrinsic::is_fpclass: {
3283 case Intrinsic::ctlz:
3284 case Intrinsic::cttz:
3285 case Intrinsic::ctpop:
3286 case Intrinsic::umin:
3287 case Intrinsic::umax:
3288 case Intrinsic::smin:
3289 case Intrinsic::smax:
3290 case Intrinsic::usub_sat:
3291 case Intrinsic::uadd_sat:
3292 case Intrinsic::ssub_sat:
3293 case Intrinsic::sadd_sat:
3295 if (
auto *Sel = dyn_cast<SelectInst>(
Op))
3308 return visitCallBase(*II);
3323 if (FI1SyncScope != FI2->getSyncScopeID() ||
3330 if (NFI && isIdenticalOrStrongerFence(NFI, &FI))
3334 if (isIdenticalOrStrongerFence(PFI, &FI))
3341 return visitCallBase(II);
3346 return visitCallBase(CBI);
3366 if (
Value *With = Simplifier.optimizeCall(CI,
Builder)) {
3378 if (Underlying != TrampMem &&
3379 (!Underlying->hasOneUse() || Underlying->user_back() != TrampMem))
3381 if (!isa<AllocaInst>(Underlying))
3393 InitTrampoline = II;
3403 if (!InitTrampoline)
3407 if (InitTrampoline->
getOperand(0) != TrampMem)
3410 return InitTrampoline;
3435 Callee = Callee->stripPointerCasts();
3436 IntrinsicInst *AdjustTramp = dyn_cast<IntrinsicInst>(Callee);
3450bool InstCombinerImpl::annotateAnyAllocSite(
CallBase &Call,
3456 bool Changed =
false;
3458 if (!
Call.getType()->isPointerTy())
3465 if (
Call.hasRetAttr(Attribute::NonNull)) {
3466 Changed = !
Call.hasRetAttr(Attribute::Dereferenceable);
3468 Call.getContext(),
Size->getLimitedValue()));
3470 Changed = !
Call.hasRetAttr(Attribute::DereferenceableOrNull);
3472 Call.getContext(),
Size->getLimitedValue()));
3481 ConstantInt *AlignOpC = dyn_cast<ConstantInt>(Alignment);
3485 Align ExistingAlign =
Call.getRetAlign().valueOrOne();
3487 if (NewAlign > ExistingAlign) {
3499 bool Changed = annotateAnyAllocSite(Call, &
TLI);
3508 if (
V->getType()->isPointerTy() &&
3509 !
Call.paramHasAttr(ArgNo, Attribute::NonNull) &&
3515 assert(ArgNo ==
Call.arg_size() &&
"Call arguments not processed correctly.");
3517 if (!ArgNos.
empty()) {
3522 Call.setAttributes(AS);
3529 Function *CalleeF = dyn_cast<Function>(Callee);
3531 transformConstExprCastCall(Call))
3538 LLVM_DEBUG(
dbgs() <<
"Removing convergent attr from instr " << Call
3540 Call.setNotConvergent();
3562 if (isa<CallInst>(OldCall))
3567 cast<CallBase>(OldCall)->setCalledFunction(
3576 if ((isa<ConstantPointerNull>(Callee) &&
3578 isa<UndefValue>(Callee)) {
3581 if (!
Call.getType()->isVoidTy())
3584 if (
Call.isTerminator()) {
3595 return transformCallThroughTrampoline(Call, *II);
3597 if (isa<InlineAsm>(Callee) && !
Call.doesNotThrow()) {
3599 if (!
IA->canThrow()) {
3602 Call.setDoesNotThrow();
3610 if (
CallInst *CI = dyn_cast<CallInst>(&Call)) {
3617 if (!
Call.use_empty() && !
Call.isMustTailCall())
3618 if (
Value *ReturnedArg =
Call.getReturnedArgOperand()) {
3620 Type *RetArgTy = ReturnedArg->getType();
3629 if (Bundle && !
Call.isIndirectCall()) {
3633 ConstantInt *ExpectedType = cast<ConstantInt>(Bundle->Inputs[0]);
3636 FunctionType = mdconst::extract<ConstantInt>(MD->getOperand(0));
3640 dbgs() <<
Call.getModule()->getName()
3641 <<
": warning: kcfi: " <<
Call.getCaller()->getName()
3642 <<
": call to " << CalleeF->
getName()
3643 <<
" using a mismatching function pointer type\n";
3654 switch (
Call.getIntrinsicID()) {
3655 case Intrinsic::experimental_gc_statepoint: {
3671 if (isa<UndefValue>(DerivedPtr) || isa<UndefValue>(BasePtr)) {
3677 if (
auto *PT = dyn_cast<PointerType>(GCR.
getType())) {
3681 if (isa<ConstantPointerNull>(DerivedPtr)) {
3709 LiveGcValues.
insert(BasePtr);
3710 LiveGcValues.
insert(DerivedPtr);
3712 std::optional<OperandBundleUse> Bundle =
3714 unsigned NumOfGCLives = LiveGcValues.
size();
3715 if (!Bundle || NumOfGCLives == Bundle->Inputs.size())
3719 std::vector<Value *> NewLiveGc;
3720 for (
Value *V : Bundle->Inputs) {
3721 if (Val2Idx.
count(V))
3723 if (LiveGcValues.
count(V)) {
3724 Val2Idx[
V] = NewLiveGc.
size();
3725 NewLiveGc.push_back(V);
3727 Val2Idx[
V] = NumOfGCLives;
3733 assert(Val2Idx.
count(BasePtr) && Val2Idx[BasePtr] != NumOfGCLives &&
3734 "Missed live gc for base pointer");
3738 assert(Val2Idx.
count(DerivedPtr) && Val2Idx[DerivedPtr] != NumOfGCLives &&
3739 "Missed live gc for derived pointer");
3750 return Changed ? &
Call :
nullptr;
3756bool InstCombinerImpl::transformConstExprCastCall(
CallBase &Call) {
3758 dyn_cast<Function>(
Call.getCalledOperand()->stripPointerCasts());
3762 assert(!isa<CallBrInst>(Call) &&
3763 "CallBr's don't have a single point after a def to insert at");
3768 if (
Callee->hasFnAttribute(
"thunk"))
3775 if (
Call.isMustTailCall())
3786 Type *NewRetTy = FT->getReturnType();
3789 if (OldRetTy != NewRetTy) {
3795 if (
Callee->isDeclaration())
3798 if (!
Caller->use_empty() &&
3814 if (!
Caller->use_empty()) {
3816 if (
auto *II = dyn_cast<InvokeInst>(Caller))
3817 PhisNotSupportedBlock = II->getNormalDest();
3818 if (PhisNotSupportedBlock)
3820 if (
PHINode *PN = dyn_cast<PHINode>(U))
3821 if (PN->getParent() == PhisNotSupportedBlock)
3826 unsigned NumActualArgs =
Call.arg_size();
3827 unsigned NumCommonArgs = std::min(FT->getNumParams(), NumActualArgs);
3837 if (
Callee->getAttributes().hasAttrSomewhere(Attribute::InAlloca) ||
3838 Callee->getAttributes().hasAttrSomewhere(Attribute::Preallocated))
3841 auto AI =
Call.arg_begin();
3842 for (
unsigned i = 0, e = NumCommonArgs; i !=
e; ++i, ++AI) {
3843 Type *ParamTy = FT->getParamType(i);
3844 Type *ActTy = (*AI)->getType();
3855 if (
Call.isInAllocaArgument(i) ||
3863 Callee->getAttributes().hasParamAttr(i, Attribute::ByVal))
3867 if (
Callee->isDeclaration()) {
3869 if (FT->getNumParams() < NumActualArgs && !FT->isVarArg())
3875 if (FT->isVarArg() !=
Call.getFunctionType()->isVarArg())
3881 if (FT->isVarArg() &&
Call.getFunctionType()->isVarArg() &&
3882 FT->getNumParams() !=
Call.getFunctionType()->getNumParams())
3886 if (FT->getNumParams() < NumActualArgs && FT->isVarArg() &&
3901 Args.reserve(NumActualArgs);
3902 ArgAttrs.
reserve(NumActualArgs);
3912 AI =
Call.arg_begin();
3913 for (
unsigned i = 0; i != NumCommonArgs; ++i, ++AI) {
3914 Type *ParamTy = FT->getParamType(i);
3916 Value *NewArg = *AI;
3917 if ((*AI)->getType() != ParamTy)
3919 Args.push_back(NewArg);
3931 for (
unsigned i = NumCommonArgs; i != FT->getNumParams(); ++i) {
3937 if (FT->getNumParams() < NumActualArgs) {
3939 if (FT->isVarArg()) {
3941 for (
unsigned i = FT->getNumParams(); i != NumActualArgs; ++i, ++AI) {
3943 Value *NewArg = *AI;
3944 if (PTy != (*AI)->getType()) {
3950 Args.push_back(NewArg);
3963 assert((ArgAttrs.
size() == FT->getNumParams() || FT->isVarArg()) &&
3964 "missing argument attributes");
3969 Call.getOperandBundlesAsDefs(OpBundles);
3972 if (
InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
3974 II->getUnwindDest(), Args, OpBundles);
3978 cast<CallInst>(Caller)->getTailCallKind());
3985 NewCall->
copyMetadata(*Caller, {LLVMContext::MD_prof});
3990 if (OldRetTy !=
NV->getType() && !
Caller->use_empty()) {
3991 if (!
NV->getType()->isVoidTy()) {
3993 NC->setDebugLoc(
Caller->getDebugLoc());
3996 assert(InsertPt &&
"No place to insert cast");
4004 if (!
Caller->use_empty())
4006 else if (
Caller->hasValueHandle()) {
4007 if (OldRetTy ==
NV->getType())
4022InstCombinerImpl::transformCallThroughTrampoline(
CallBase &Call,
4029 if (
Attrs.hasAttrSomewhere(Attribute::Nest))
4037 unsigned NestArgNo = 0;
4038 Type *NestTy =
nullptr;
4043 E = NestFTy->param_end();
4044 I !=
E; ++NestArgNo, ++
I) {
4055 std::vector<Value*> NewArgs;
4056 std::vector<AttributeSet> NewArgAttrs;
4057 NewArgs.reserve(
Call.arg_size() + 1);
4058 NewArgAttrs.reserve(
Call.arg_size());
4065 auto I =
Call.arg_begin(),
E =
Call.arg_end();
4067 if (ArgNo == NestArgNo) {
4070 if (NestVal->
getType() != NestTy)
4072 NewArgs.push_back(NestVal);
4073 NewArgAttrs.push_back(NestAttr);
4080 NewArgs.push_back(*
I);
4081 NewArgAttrs.push_back(
Attrs.getParamAttrs(ArgNo));
4092 std::vector<Type*> NewTypes;
4093 NewTypes.reserve(FTy->getNumParams()+1);
4100 E = FTy->param_end();
4103 if (ArgNo == NestArgNo)
4105 NewTypes.push_back(NestTy);
4111 NewTypes.push_back(*
I);
4124 Attrs.getRetAttrs(), NewArgAttrs);
4127 Call.getOperandBundlesAsDefs(OpBundles);
4130 if (
InvokeInst *II = dyn_cast<InvokeInst>(&Call)) {
4132 II->getUnwindDest(), NewArgs, OpBundles);
4133 cast<InvokeInst>(NewCaller)->setCallingConv(II->
getCallingConv());
4134 cast<InvokeInst>(NewCaller)->setAttributes(NewPAL);
4135 }
else if (
CallBrInst *CBI = dyn_cast<CallBrInst>(&Call)) {
4138 CBI->getIndirectDests(), NewArgs, OpBundles);
4139 cast<CallBrInst>(NewCaller)->setCallingConv(CBI->getCallingConv());
4140 cast<CallBrInst>(NewCaller)->setAttributes(NewPAL);
4143 cast<CallInst>(NewCaller)->setTailCallKind(
4144 cast<CallInst>(Call).getTailCallKind());
4145 cast<CallInst>(NewCaller)->setCallingConv(
4146 cast<CallInst>(Call).getCallingConv());
4147 cast<CallInst>(NewCaller)->setAttributes(NewPAL);
4158 Call.setCalledFunction(FTy, NestF);
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
This file implements the APSInt class, which is a simple class that represents an arbitrary sized int...
static cl::opt< ITMode > IT(cl::desc("IT block support"), cl::Hidden, cl::init(DefaultIT), cl::values(clEnumValN(DefaultIT, "arm-default-it", "Generate any type of IT block"), clEnumValN(RestrictedIT, "arm-restrict-it", "Disallow complex IT blocks")))
Atomic ordering constants.
This file contains the simple types necessary to represent the attributes associated with functions a...
BlockVerifier::State From
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static SDValue foldBitOrderCrossLogicOp(SDNode *N, SelectionDAG &DAG)
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
#define DEBUG_WITH_TYPE(TYPE, X)
DEBUG_WITH_TYPE macro - This macro should be used by passes to emit debug information.
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
static Type * getPromotedType(Type *Ty)
Return the specified type promoted as it would be to pass though a va_arg area.
static Instruction * createOverflowTuple(IntrinsicInst *II, Value *Result, Constant *Overflow)
Creates a result tuple for an overflow intrinsic II with a given Result and a constant Overflow value...
static IntrinsicInst * findInitTrampolineFromAlloca(Value *TrampMem)
static bool removeTriviallyEmptyRange(IntrinsicInst &EndI, InstCombinerImpl &IC, std::function< bool(const IntrinsicInst &)> IsStart)
static bool inputDenormalIsDAZ(const Function &F, const Type *Ty)
static Instruction * reassociateMinMaxWithConstantInOperand(IntrinsicInst *II, InstCombiner::BuilderTy &Builder)
If this min/max has a matching min/max operand with a constant, try to push the constant operand into...
static bool signBitMustBeTheSame(Value *Op0, Value *Op1, Instruction *CxtI, const DataLayout &DL, AssumptionCache *AC, DominatorTree *DT)
Return true if two values Op0 and Op1 are known to have the same sign.
static Instruction * moveAddAfterMinMax(IntrinsicInst *II, InstCombiner::BuilderTy &Builder)
Try to canonicalize min/max(X + C0, C1) as min/max(X, C1 - C0) + C0.
static Instruction * simplifyInvariantGroupIntrinsic(IntrinsicInst &II, InstCombinerImpl &IC)
This function transforms launder.invariant.group and strip.invariant.group like: launder(launder(x)) ...
static bool haveSameOperands(const IntrinsicInst &I, const IntrinsicInst &E, unsigned NumOperands)
static cl::opt< unsigned > GuardWideningWindow("instcombine-guard-widening-window", cl::init(3), cl::desc("How wide an instruction window to bypass looking for " "another guard"))
static bool hasUndefSource(AnyMemTransferInst *MI)
Recognize a memcpy/memmove from a trivially otherwise unused alloca.
static Instruction * foldShuffledIntrinsicOperands(IntrinsicInst *II, InstCombiner::BuilderTy &Builder)
If all arguments of the intrinsic are unary shuffles with the same mask, try to shuffle after the int...
static Instruction * factorizeMinMaxTree(IntrinsicInst *II)
Reduce a sequence of min/max intrinsics with a common operand.
static Value * simplifyNeonTbl1(const IntrinsicInst &II, InstCombiner::BuilderTy &Builder)
Convert a table lookup to shufflevector if the mask is constant.
static Instruction * foldClampRangeOfTwo(IntrinsicInst *II, InstCombiner::BuilderTy &Builder)
If we have a clamp pattern like max (min X, 42), 41 – where the output can only be one of two possibl...
static IntrinsicInst * findInitTrampolineFromBB(IntrinsicInst *AdjustTramp, Value *TrampMem)
static Value * reassociateMinMaxWithConstants(IntrinsicInst *II, IRBuilderBase &Builder)
If this min/max has a constant operand and an operand that is a matching min/max with a constant oper...
static std::optional< bool > getKnownSignOrZero(Value *Op, Instruction *CxtI, const DataLayout &DL, AssumptionCache *AC, DominatorTree *DT)
static Instruction * foldCtpop(IntrinsicInst &II, InstCombinerImpl &IC)
static Instruction * foldCttzCtlz(IntrinsicInst &II, InstCombinerImpl &IC)
static IntrinsicInst * findInitTrampoline(Value *Callee)
static FCmpInst::Predicate fpclassTestIsFCmp0(FPClassTest Mask, const Function &F, Type *Ty)
static std::optional< bool > getKnownSign(Value *Op, Instruction *CxtI, const DataLayout &DL, AssumptionCache *AC, DominatorTree *DT)
static CallInst * canonicalizeConstantArg0ToArg1(CallInst &Call)
This file provides internal interfaces used to implement the InstCombine.
This file provides the interface for the instcombine pass implementation.
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file implements the SmallBitVector class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
static bool inputDenormalIsIEEE(const Function &F, const Type *Ty)
Return true if it's possible to assume IEEE treatment of input denormals in F for Val.
ModRefInfo getModRefInfoMask(const MemoryLocation &Loc, bool IgnoreLocals=false)
Returns a bitmask that should be unconditionally applied to the ModRef info of a memory location.
Class for arbitrary precision integers.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
static APInt getSignMask(unsigned BitWidth)
Get the SignMask for a specific bit width.
APInt usub_ov(const APInt &RHS, bool &Overflow) const
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool ult(const APInt &RHS) const
Unsigned less than comparison.
APInt sadd_ov(const APInt &RHS, bool &Overflow) const
APInt uadd_ov(const APInt &RHS, bool &Overflow) const
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
APInt uadd_sat(const APInt &RHS) const
bool isNonNegative() const
Determine if this APInt Value is non-negative (>= 0)
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
APInt ssub_ov(const APInt &RHS, bool &Overflow) const
static APSInt getMinValue(uint32_t numBits, bool Unsigned)
Return the APSInt representing the minimum integer value with the given bit width and signedness.
static APSInt getMaxValue(uint32_t numBits, bool Unsigned)
Return the APSInt representing the maximum integer value with the given bit width and signedness.
This class represents any memset intrinsic.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
A cache of @llvm.assume calls within a function.
void registerAssumption(AssumeInst *CI)
Add an @llvm.assume intrinsic to this function's cache.
void updateAffectedValues(AssumeInst *CI)
Update the cache of values being affected by this assumption (i.e.
bool overlaps(const AttributeMask &AM) const
Return true if the builder has any attribute that's in the specified builder.
AttributeSet getFnAttrs() const
The function attributes are returned.
static AttributeList get(LLVMContext &C, ArrayRef< std::pair< unsigned, Attribute > > Attrs)
Create an AttributeList with the specified parameters in it.
bool isEmpty() const
Return true if there are no attributes.
AttributeSet getRetAttrs() const
The attributes for the ret value are returned.
bool hasAttrSomewhere(Attribute::AttrKind Kind, unsigned *Index=nullptr) const
Return true if the specified attribute is set for at least one parameter or for the return value.
bool hasParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Return true if the attribute exists for the given argument.
AttributeSet getParamAttrs(unsigned ArgNo) const
The attributes for the argument or parameter at the given index are returned.
AttributeList addParamAttribute(LLVMContext &C, unsigned ArgNo, Attribute::AttrKind Kind) const
Add an argument attribute to the list.
bool hasAttribute(Attribute::AttrKind Kind) const
Return true if the attribute exists in this set.
AttributeSet removeAttributes(LLVMContext &C, const AttributeMask &AttrsToRemove) const
Remove the specified attributes from this set.
static AttributeSet get(LLVMContext &C, const AttrBuilder &B)
static Attribute get(LLVMContext &Context, AttrKind Kind, uint64_t Val=0)
Return a uniquified Attribute object.
static Attribute getWithDereferenceableBytes(LLVMContext &Context, uint64_t Bytes)
static Attribute getWithDereferenceableOrNullBytes(LLVMContext &Context, uint64_t Bytes)
static Attribute getWithAlignment(LLVMContext &Context, Align Alignment)
Return a uniquified Attribute object that has the specific alignment set.
LLVM Basic Block Representation.
iterator begin()
Instruction iterator methods.
InstListType::reverse_iterator reverse_iterator
InstListType::iterator iterator
Instruction iterators...
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
bool isSigned() const
Whether the intrinsic is signed or unsigned.
Instruction::BinaryOps getBinaryOp() const
Returns the binary operation underlying the intrinsic.
static BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name=Twine(), Instruction *InsertBefore=nullptr)
Construct a binary instruction, given the opcode and the two operands.
static BinaryOperator * CreateFDivFMF(Value *V1, Value *V2, Instruction *FMFSource, const Twine &Name="")
static BinaryOperator * CreateNeg(Value *Op, const Twine &Name="", Instruction *InsertBefore=nullptr)
Helper functions to construct and inspect unary operations (NEG and NOT) via binary operators SUB and...
static BinaryOperator * CreateNSW(BinaryOps Opc, Value *V1, Value *V2, const Twine &Name="")
static BinaryOperator * CreateNot(Value *Op, const Twine &Name="", Instruction *InsertBefore=nullptr)
static BinaryOperator * CreateWithCopiedFlags(BinaryOps Opc, Value *V1, Value *V2, Value *CopyO, const Twine &Name="", Instruction *InsertBefore=nullptr)
static BinaryOperator * CreateNSWNeg(Value *Op, const Twine &Name="", Instruction *InsertBefore=nullptr)
static BinaryOperator * CreateFMulFMF(Value *V1, Value *V2, Instruction *FMFSource, const Twine &Name="")
static BinaryOperator * CreateNUW(BinaryOps Opc, Value *V1, Value *V2, const Twine &Name="")
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
void setCallingConv(CallingConv::ID CC)
bundle_op_iterator bundle_op_info_begin()
Return the start of the list of BundleOpInfo instances associated with this OperandBundleUser.
void getOperandBundlesAsDefs(SmallVectorImpl< OperandBundleDef > &Defs) const
Return the list of operand bundles attached to this instruction as a vector of OperandBundleDefs.
OperandBundleUse getOperandBundleAt(unsigned Index) const
Return the operand bundle at a specific index.
std::optional< OperandBundleUse > getOperandBundle(StringRef Name) const
Return an operand bundle by name, if present.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
bool isStrictFP() const
Determine if the call requires strict floating point semantics.
bool hasRetAttr(Attribute::AttrKind Kind) const
Determine whether the return value has the given attribute.
unsigned getNumOperandBundles() const
Return the number of operand bundles associated with this User.
CallingConv::ID getCallingConv() const
static CallBase * Create(CallBase *CB, ArrayRef< OperandBundleDef > Bundles, Instruction *InsertPt=nullptr)
Create a clone of CB with a different set of operand bundles and insert it before InsertPt.
static CallBase * removeOperandBundle(CallBase *CB, uint32_t ID, Instruction *InsertPt=nullptr)
Create a clone of CB with operand bundle ID removed.
Value * getCalledOperand() const
void setAttributes(AttributeList A)
Set the parameter attributes for this call.
bool doesNotThrow() const
Determine if the call cannot unwind.
void addRetAttr(Attribute::AttrKind Kind)
Adds the attribute to the return value.
Value * getArgOperand(unsigned i) const
void setArgOperand(unsigned i, Value *v)
FunctionType * getFunctionType() const
Intrinsic::ID getIntrinsicID() const
Returns the intrinsic ID of the intrinsic called or Intrinsic::not_intrinsic if the called function i...
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
unsigned arg_size() const