25#include "llvm/IR/IntrinsicsAMDGPU.h"
32#include "AMDGPUGenCallingConv.inc"
35 "amdgpu-bypass-slow-div",
36 cl::desc(
"Skip 64-bit divide for dynamic 32-bit values"),
45 if (StoreSize % 32 == 0)
201 {MVT::v2i8, MVT::v4i8, MVT::v2i16, MVT::v3i16, MVT::v4i16})
407 {MVT::f16, MVT::f32},
Legal);
413 {MVT::f16, MVT::f32, MVT::f64},
Expand);
448 {MVT::v2f32, MVT::v3f32, MVT::v4f32, MVT::v5f32,
449 MVT::v6f32, MVT::v7f32, MVT::v8f32, MVT::v16f32,
450 MVT::v2f64, MVT::v3f64, MVT::v4f64, MVT::v8f64,
456 {MVT::v2f16, MVT::v3f16, MVT::v4f16, MVT::v16f16},
463 {MVT::v3i32, MVT::v3f32, MVT::v4i32, MVT::v4f32,
464 MVT::v5i32, MVT::v5f32, MVT::v6i32, MVT::v6f32,
465 MVT::v7i32, MVT::v7f32, MVT::v8i32, MVT::v8f32,
466 MVT::v9i32, MVT::v9f32, MVT::v10i32, MVT::v10f32,
467 MVT::v11i32, MVT::v11f32, MVT::v12i32, MVT::v12f32},
472 {MVT::v2f32, MVT::v2i32, MVT::v3f32, MVT::v3i32, MVT::v4f32,
473 MVT::v4i32, MVT::v5f32, MVT::v5i32, MVT::v6f32, MVT::v6i32,
474 MVT::v7f32, MVT::v7i32, MVT::v8f32, MVT::v8i32, MVT::v9f32,
475 MVT::v9i32, MVT::v10i32, MVT::v10f32, MVT::v11i32, MVT::v11f32,
476 MVT::v12i32, MVT::v12f32, MVT::v16i32, MVT::v32f32, MVT::v32i32,
477 MVT::v2f64, MVT::v2i64, MVT::v3f64, MVT::v3i64, MVT::v4f64,
478 MVT::v4i64, MVT::v8f64, MVT::v8i64, MVT::v16f64, MVT::v16i64},
484 const MVT ScalarIntVTs[] = { MVT::i32, MVT::i64 };
485 for (
MVT VT : ScalarIntVTs) {
524 for (
auto VT : {MVT::i8, MVT::i16})
528 MVT::v2i32, MVT::v3i32, MVT::v4i32, MVT::v5i32, MVT::v6i32, MVT::v7i32,
529 MVT::v9i32, MVT::v10i32, MVT::v11i32, MVT::v12i32};
531 for (
MVT VT : VectorIntTypes) {
549 MVT::v2f32, MVT::v3f32, MVT::v4f32, MVT::v5f32, MVT::v6f32, MVT::v7f32,
550 MVT::v9f32, MVT::v10f32, MVT::v11f32, MVT::v12f32};
552 for (
MVT VT : FloatVectorTypes) {
649 const auto Flags =
Op.getNode()->getFlags();
650 if (Flags.hasNoSignedZeros())
701 unsigned Opc =
N->getOpcode();
722 return (
N->getNumOperands() > 2 &&
N->getOpcode() !=
ISD::SELECT) ||
731 return N->getValueType(0) == MVT::f32;
738 if (isa<MemSDNode>(
N))
741 switch (
N->getOpcode()) {
756 switch (
N->getConstantOperandVal(0)) {
757 case Intrinsic::amdgcn_interp_p1:
758 case Intrinsic::amdgcn_interp_p2:
759 case Intrinsic::amdgcn_interp_mov:
760 case Intrinsic::amdgcn_interp_p1_f16:
761 case Intrinsic::amdgcn_interp_p2_f16:
781 unsigned NumMayIncreaseSize = 0;
782 MVT VT =
N->getValueType(0).getScalarType().getSimpleVT();
787 for (
const SDNode *U :
N->users()) {
822 bool ForCodeSize)
const {
824 return (ScalarVT == MVT::f32 || ScalarVT == MVT::f64 ||
831 return (ScalarVT != MVT::f32 && ScalarVT != MVT::f64);
836 std::optional<unsigned> ByteOffset)
const {
848 EVT OldVT =
N->getValueType(0);
856 if (OldSize >= 32 && NewSize < 32 && MN->
getAlign() >=
Align(4) &&
871 return (OldSize < 32);
886 if ((LScalarSize >= CastScalarSize) && (CastScalarSize < 32))
891 CastTy, MMO, &
Fast) &&
907 switch (
N->getOpcode()) {
912 unsigned IntrID =
N->getConstantOperandVal(0);
916 unsigned IntrID =
N->getConstantOperandVal(1);
920 if (cast<LoadSDNode>(
N)->getMemOperand()->getAddrSpace() ==
934 switch (
Op.getOpcode()) {
944 EVT VT =
Op.getValueType();
969 return VT == MVT::f32 || VT == MVT::f64 ||
970 (Subtarget->
has16BitInsts() && (VT == MVT::f16 || VT == MVT::bf16));
977 return VT == MVT::f32 || VT == MVT::f64 || VT == MVT::f16 || VT == MVT::bf16;
1001 unsigned SrcSize = Source.getSizeInBits();
1004 return DestSize < SrcSize && DestSize % 32 == 0 ;
1010 unsigned SrcSize = Source->getScalarSizeInBits();
1014 return SrcSize >= 32;
1016 return DestSize < SrcSize && DestSize % 32 == 0;
1020 unsigned SrcSize = Src->getScalarSizeInBits();
1024 return DestSize >= 32;
1026 return SrcSize == 32 && DestSize == 64;
1035 if (Src == MVT::i16)
1036 return Dest == MVT::i32 ||Dest == MVT::i64 ;
1038 return Src == MVT::i32 && Dest == MVT::i64;
1043 switch (
N->getOpcode()) {
1062 if (!
N->isDivergent() && DestVT.
isInteger() &&
1080 if (isa<LoadSDNode>(
N))
1090 "Expected shift op");
1092 SDValue ShiftLHS =
N->getOperand(0);
1107 if (
N->getValueType(0) == MVT::i32 &&
N->hasOneUse() &&
1108 (
N->user_begin()->getOpcode() ==
ISD::SRA ||
1109 N->user_begin()->getOpcode() ==
ISD::SRL))
1116 auto *RHSLd = dyn_cast<LoadSDNode>(
RHS);
1117 auto *LHS0 = dyn_cast<LoadSDNode>(
LHS.getOperand(0));
1118 auto *LHS1 = dyn_cast<ConstantSDNode>(
LHS.getOperand(1));
1119 return LHS0 && LHS1 && RHSLd && LHS0->getExtensionType() ==
ISD::ZEXTLOAD &&
1120 LHS1->getAPIntValue() == LHS0->getMemoryVT().getScalarSizeInBits() &&
1125 return !(IsShiftAndLoad(
LHS,
RHS) || IsShiftAndLoad(
RHS,
LHS));
1145 return CC_AMDGPU_CS_CHAIN;
1149 return CC_AMDGPU_Func;
1175 return RetCC_SI_Shader;
1178 return RetCC_SI_Gfx;
1182 return RetCC_AMDGPU_Func;
1221 const unsigned ExplicitOffset = ST.getExplicitKernelArgOffset();
1228 unsigned InIndex = 0;
1231 const bool IsByRef = Arg.hasByRefAttr();
1232 Type *BaseArgTy = Arg.getType();
1233 Type *MemArgTy = IsByRef ? Arg.getParamByRefType() : BaseArgTy;
1234 Align Alignment =
DL.getValueOrABITypeAlignment(
1235 IsByRef ? Arg.getParamAlign() : std::nullopt, MemArgTy);
1236 MaxAlign = std::max(Alignment, MaxAlign);
1237 uint64_t AllocSize =
DL.getTypeAllocSize(MemArgTy);
1239 uint64_t ArgOffset =
alignTo(ExplicitArgOffset, Alignment) + ExplicitOffset;
1240 ExplicitArgOffset =
alignTo(ExplicitArgOffset, Alignment) + AllocSize;
1253 for (
unsigned Value = 0, NumValues = ValueVTs.
size();
1291 }
else if (RegisterVT.
isVector()) {
1294 assert(MemoryBits % NumElements == 0);
1298 MemoryBits / NumElements);
1316 unsigned PartOffset = 0;
1317 for (
unsigned i = 0; i != NumRegs; ++i) {
1319 BasePartOffset + PartOffset,
1358 int ClobberedFI)
const {
1361 int64_t LastByte = FirstByte + MFI.
getObjectSize(ClobberedFI) - 1;
1370 if (
LoadSDNode *L = dyn_cast<LoadSDNode>(U)) {
1372 if (FI->getIndex() < 0) {
1374 int64_t InLastByte = InFirstByte;
1377 if ((InFirstByte <= FirstByte && FirstByte <= InLastByte) ||
1378 (FirstByte <= InFirstByte && InFirstByte <= LastByte))
1400 FuncName =
G->getSymbol();
1402 FuncName =
G->getGlobal()->getName();
1432 switch (
Op.getOpcode()) {
1436 "instruction is not implemented yet!");
1481 switch (
N->getOpcode()) {
1528 if (std::optional<uint32_t>
Address =
1530 if (IsNamedBarrier) {
1535 }
else if (IsNamedBarrier) {
1543 GV->
getName() !=
"llvm.amdgcn.module.lds" &&
1548 Fn,
"local memory global used by non-kernel function",
1565 "Do not know what to do with an non-zero offset");
1581 EVT VT =
Op.getValueType();
1583 unsigned OpBitSize =
Op.getOperand(0).getValueType().getSizeInBits();
1584 if (OpBitSize >= 32 && OpBitSize % 32 == 0) {
1585 unsigned NewNumElt = OpBitSize / 32;
1586 EVT NewEltVT = (NewNumElt == 1) ? MVT::i32
1588 MVT::i32, NewNumElt);
1589 for (
const SDUse &U :
Op->ops()) {
1595 Args.push_back(NewIn);
1605 for (
const SDUse &U :
Op->ops())
1615 unsigned Start =
Op.getConstantOperandVal(1);
1616 EVT VT =
Op.getValueType();
1617 EVT SrcVT =
Op.getOperand(0).getValueType();
1622 assert(NumElt % 2 == 0 && NumSrcElt % 2 == 0 &&
"expect legal types");
1626 EVT NewVT = NumElt == 2
1738 if ((
LHS == True &&
RHS == False) || (
LHS == False &&
RHS == True))
1757 if (
LHS == NegTrue && CFalse && CRHS) {
1771std::pair<SDValue, SDValue>
1783 return std::pair(
Lo,
Hi);
1812 HiVT = NumElts - LoNumElts == 1
1815 return std::pair(LoVT, HiVT);
1820std::pair<SDValue, SDValue>
1822 const EVT &LoVT,
const EVT &HiVT,
1824 EVT VT =
N.getValueType();
1828 "More vector elements requested than available!");
1859 EVT VT =
Op.getValueType();
1871 SDValue BasePtr = Load->getBasePtr();
1872 EVT MemVT = Load->getMemoryVT();
1877 EVT LoMemVT, HiMemVT;
1885 Align BaseAlign = Load->getAlign();
1889 Load->getChain(), BasePtr, SrcValue, LoMemVT,
1890 BaseAlign, Load->getMemOperand()->getFlags());
1893 DAG.
getExtLoad(Load->getExtensionType(), SL, HiVT, Load->getChain(),
1895 HiMemVT, HiAlign, Load->getMemOperand()->getFlags());
1919 EVT VT =
Op.getValueType();
1920 SDValue BasePtr = Load->getBasePtr();
1921 EVT MemVT = Load->getMemoryVT();
1924 Align BaseAlign = Load->getAlign();
1929 if (NumElements != 3 ||
1930 (BaseAlign <
Align(8) &&
1934 assert(NumElements == 3);
1941 Load->getExtensionType(), SL, WideVT, Load->getChain(), BasePtr, SrcValue,
1942 WideMemVT, BaseAlign, Load->getMemOperand()->getFlags());
1953 SDValue Val = Store->getValue();
1961 EVT MemVT = Store->getMemoryVT();
1962 SDValue Chain = Store->getChain();
1963 SDValue BasePtr = Store->getBasePtr();
1967 EVT LoMemVT, HiMemVT;
1977 Align BaseAlign = Store->getAlign();
1982 DAG.
getTruncStore(Chain, SL,
Lo, BasePtr, SrcValue, LoMemVT, BaseAlign,
1983 Store->getMemOperand()->getFlags());
1986 HiMemVT, HiAlign, Store->getMemOperand()->getFlags());
1997 EVT VT =
Op.getValueType();
2000 MVT IntVT = MVT::i32;
2001 MVT FltVT = MVT::f32;
2004 if (LHSSignBits < 9)
2008 if (RHSSignBits < 9)
2012 unsigned SignBits = std::min(LHSSignBits, RHSSignBits);
2013 unsigned DivBits = BitSize - SignBits;
2057 bool UseFmadFtz =
false;
2058 if (Subtarget->
isGCN()) {
2113 EVT VT =
Op.getValueType();
2115 assert(VT == MVT::i64 &&
"LowerUDIVREM64 expects an i64");
2188 std::tie(Mulhi1_Lo, Mulhi1_Hi) =
2201 std::tie(Mulhi2_Lo, Mulhi2_Hi) =
2215 std::tie(Mul3_Lo, Mul3_Hi) = DAG.
SplitScalar(Mul3,
DL, HalfVT, HalfVT);
2294 for (
unsigned i = 0; i < halfBitWidth; ++i) {
2295 const unsigned bitPos = halfBitWidth - i - 1;
2326 EVT VT =
Op.getValueType();
2328 if (VT == MVT::i64) {
2334 if (VT == MVT::i32) {
2381 EVT VT =
Op.getValueType();
2389 if (VT == MVT::i32) {
2394 if (VT == MVT::i64 &&
2441 EVT VT =
Op.getValueType();
2442 auto Flags =
Op->getFlags();
2480 const unsigned FractBits = 52;
2481 const unsigned ExpBits = 11;
2497 assert(
Op.getValueType() == MVT::f64);
2507 const unsigned FractBits = 52;
2519 = DAG.
getConstant((UINT64_C(1) << FractBits) - 1, SL, MVT::i64);
2544 assert(
Op.getValueType() == MVT::f64);
2577 auto VT =
Op.getValueType();
2578 auto Arg =
Op.getOperand(0u);
2590 EVT VT =
Op.getValueType();
2641 switch (Src.getOpcode()) {
2643 return Src.getOperand(0).getValueType() == MVT::f16;
2648 unsigned IntrinsicID = Src.getConstantOperandVal(0);
2649 switch (IntrinsicID) {
2650 case Intrinsic::amdgcn_frexp_mant:
2665 return Flags.hasApproximateFuncs();
2681 EVT VT = Src.getValueType();
2692 return IsLtSmallestNormal;
2698 EVT VT = Src.getValueType();
2711std::pair<SDValue, SDValue>
2732 return {ScaledInput, IsLtSmallestNormal};
2743 EVT VT =
Op.getValueType();
2747 if (VT == MVT::f16) {
2756 auto [ScaledInput, IsLtSmallestNormal] =
2779 EVT VT =
Op.getValueType();
2787 if (VT == MVT::f16 || Flags.hasApproximateFuncs()) {
2812 const float c_log10 = 0x1.344134p-2f;
2813 const float cc_log10 = 0x1.09f79ep-26f;
2816 const float c_log = 0x1.62e42ep-1f;
2817 const float cc_log = 0x1.efa39ep-25f;
2829 const float ch_log10 = 0x1.344000p-2f;
2830 const float ct_log10 = 0x1.3509f6p-18f;
2833 const float ch_log = 0x1.62e000p-1f;
2834 const float ct_log = 0x1.0bfbe8p-15f;
2851 const bool IsFiniteOnly = (Flags.hasNoNaNs() ||
Options.NoNaNsFPMath) &&
2852 (Flags.hasNoInfs() ||
Options.NoInfsFPMath);
2855 if (!IsFiniteOnly) {
2881 EVT VT = Src.getValueType();
2885 double Log2BaseInverted =
2888 if (VT == MVT::f32) {
2898 ScaledResultOffset, Zero, Flags);
2913 return DAG.
getNode(
ISD::FMUL, SL, VT, Log2Operand, Log2BaseInvertedOperand,
2922 EVT VT =
Op.getValueType();
2926 if (VT == MVT::f16) {
2971 EVT VT =
X.getValueType();
2979 SL, VT,
Mul, Flags);
3011 const EVT VT =
X.getValueType();
3012 const unsigned Exp2Op = VT == MVT::f32 ?
static_cast<unsigned>(
AMDGPUISD::EXP)
3062 EVT VT =
Op.getValueType();
3125 const float cc_exp = 0x1.4ae0bep-26f;
3126 const float c_exp10 = 0x1.a934f0p+1f;
3127 const float cc_exp10 = 0x1.2f346ep-24f;
3137 const float ch_exp = 0x1.714000p+0f;
3138 const float cl_exp = 0x1.47652ap-12f;
3140 const float ch_exp10 = 0x1.a92000p+1f;
3141 const float cl_exp10 = 0x1.4f0978p-11f;
3156 PL =
getMad(DAG, SL, VT, XH, CL, Mad0, Flags);
3171 DAG.
getConstantFP(IsExp10 ? -0x1.66d3e8p+5f : -0x1.9d1da0p+6f, SL, VT);
3181 if (!Flags.hasNoInfs() && !
Options.NoInfsFPMath) {
3183 DAG.
getConstantFP(IsExp10 ? 0x1.344136p+5f : 0x1.62e430p+6f, SL, VT);
3205 auto Opc =
Op.getOpcode();
3206 auto Arg =
Op.getOperand(0u);
3207 auto ResultVT =
Op.getValueType();
3209 if (ResultVT != MVT::i8 && ResultVT != MVT::i16)
3213 assert(ResultVT == Arg.getValueType());
3215 const uint64_t NumBits = ResultVT.getFixedSizeInBits();
3222 NewOp = DAG.
getNode(
Opc, SL, MVT::i32, NewOp);
3225 NewOp = DAG.
getNode(
Opc, SL, MVT::i32, NewOp);
3242 bool Is64BitScalar = !Src->isDivergent() && Src.getValueType() == MVT::i64;
3244 if (Src.getValueType() == MVT::i32 || Is64BitScalar) {
3258 Op.getValueType().getScalarSizeInBits(), SL, MVT::i32);
3278 OprLo = DAG.
getNode(AddOpc, SL, MVT::i32, OprLo, Const32);
3280 OprHi = DAG.
getNode(AddOpc, SL, MVT::i32, OprHi, Const32);
3397 if (Subtarget->
isGCN())
3440 EVT DestVT =
Op.getValueType();
3442 EVT SrcVT = Src.getValueType();
3444 if (SrcVT == MVT::i16) {
3445 if (DestVT == MVT::f16)
3454 if (DestVT == MVT::bf16) {
3461 if (SrcVT != MVT::i64)
3476 if (DestVT == MVT::f32)
3479 assert(DestVT == MVT::f64);
3485 EVT DestVT =
Op.getValueType();
3488 EVT SrcVT = Src.getValueType();
3490 if (SrcVT == MVT::i16) {
3491 if (DestVT == MVT::f16)
3500 if (DestVT == MVT::bf16) {
3507 if (SrcVT != MVT::i64)
3525 if (DestVT == MVT::f32)
3528 assert(DestVT == MVT::f64);
3537 EVT SrcVT = Src.getValueType();
3539 assert(SrcVT == MVT::f32 || SrcVT == MVT::f64);
3552 if (
Signed && SrcVT == MVT::f32) {
3565 if (SrcVT == MVT::f64) {
3567 llvm::bit_cast<double>(UINT64_C( 0x3df0000000000000)), SL,
3570 llvm::bit_cast<double>(UINT64_C( 0xc1f0000000000000)), SL,
3574 llvm::bit_cast<float>(UINT32_C( 0x2f800000)), SL, SrcVT);
3576 llvm::bit_cast<float>(UINT32_C( 0xcf800000)), SL, SrcVT);
3587 SL, MVT::i32, FloorMul);
3593 if (
Signed && SrcVT == MVT::f32) {
3615 if (
Op->getFlags().hasApproximateFuncs()) {
3626 assert(Src.getSimpleValueType() == MVT::f64);
3630 const unsigned ExpMask = 0x7ff;
3631 const unsigned ExpBiasf64 = 1023;
3632 const unsigned ExpBiasf16 = 15;
3715 unsigned OpOpcode =
Op.getOpcode();
3716 EVT SrcVT = Src.getValueType();
3717 EVT DestVT =
Op.getValueType();
3720 if (SrcVT == MVT::f16 && DestVT == MVT::i16)
3723 if (SrcVT == MVT::bf16) {
3726 return DAG.
getNode(
Op.getOpcode(),
DL, DestVT, PromotedSrc);
3730 if (DestVT == MVT::i16 && (SrcVT == MVT::f32 || SrcVT == MVT::f64)) {
3737 if (DestVT != MVT::i64)
3740 if (SrcVT == MVT::f16 ||
3747 return DAG.
getNode(Ext,
DL, MVT::i64, FpToInt32);
3750 if (SrcVT == MVT::f32 || SrcVT == MVT::f64)
3758 EVT ExtraVT = cast<VTSDNode>(
Op.getOperand(1))->getVT();
3759 MVT VT =
Op.getSimpleValueType();
3773 for (
unsigned I = 0;
I < NElts; ++
I)
3788 EVT VT =
Op.getValueType();
3802 unsigned NewOpcode = Node24->
getOpcode();
3806 case Intrinsic::amdgcn_mul_i24:
3809 case Intrinsic::amdgcn_mul_u24:
3812 case Intrinsic::amdgcn_mulhi_i24:
3815 case Intrinsic::amdgcn_mulhi_u24:
3830 if (DemandedLHS || DemandedRHS)
3832 DemandedLHS ? DemandedLHS :
LHS,
3833 DemandedRHS ? DemandedRHS :
RHS);
3845template <
typename IntTy>
3848 if (Width +
Offset < 32) {
3850 IntTy Result =
static_cast<IntTy
>(Shl) >> (32 - Width);
3851 if constexpr (std::is_signed_v<IntTy>) {
3863 if (
MemSDNode *M = dyn_cast<MemSDNode>(U)) {
3864 if (M->isVolatile())
4012 EVT ExtVT = cast<VTSDNode>(N1)->getVT();
4016 EVT SrcVT = Src.getValueType();
4017 if (SrcVT.
bitsGE(ExtVT)) {
4028 unsigned IID =
N->getConstantOperandVal(0);
4030 case Intrinsic::amdgcn_mul_i24:
4031 case Intrinsic::amdgcn_mul_u24:
4032 case Intrinsic::amdgcn_mulhi_i24:
4033 case Intrinsic::amdgcn_mulhi_u24:
4035 case Intrinsic::amdgcn_fract:
4036 case Intrinsic::amdgcn_rsq:
4037 case Intrinsic::amdgcn_rcp_legacy:
4038 case Intrinsic::amdgcn_rsq_legacy:
4039 case Intrinsic::amdgcn_rsq_clamp:
4040 case Intrinsic::amdgcn_tanh:
4041 case Intrinsic::amdgcn_prng_b32: {
4044 return Src.isUndef() ? Src :
SDValue();
4046 case Intrinsic::amdgcn_frexp_exp: {
4052 if (PeekSign == Src)
4089 EVT VT =
N->getValueType(0);
4102 switch (
LHS->getOpcode()) {
4110 if (VT == MVT::i32 && RHSVal == 16 &&
X.getValueType() == MVT::i16 &&
4127 EVT XVT =
X.getValueType();
4145 EVT TargetScalarType = ElementType.getHalfSizedIntegerVT(*DAG.
getContext());
4162 ShiftAmt = DAG.
getNode(
ISD::AND, SL, TargetType, TruncShiftAmt, ShiftMask);
4179 for (
unsigned I = 0;
I != NElts; ++
I)
4180 HiAndLoOps[2 *
I + 1] = HiOps[
I];
4193 EVT VT =
N->getValueType(0);
4210 EVT TargetScalarType = ElementType.getHalfSizedIntegerVT(*DAG.
getContext());
4225 (ElementType.getSizeInBits() - 1)) {
4226 ShiftAmt = ShiftFullAmt;
4233 ShiftAmt = DAG.
getNode(
ISD::AND, SL, TargetType, truncShiftAmt, ShiftMask);
4248 for (
unsigned I = 0;
I != NElts; ++
I) {
4249 HiOps[
I] = HiAndLoOps[2 *
I + 1];
4279 for (
unsigned I = 0;
I != NElts; ++
I) {
4280 HiAndLoOps[2 *
I + 1] = HiOps[
I];
4281 HiAndLoOps[2 *
I] = LoOps[
I];
4294 EVT VT =
N->getValueType(0);
4306 if (
auto *Mask = dyn_cast<ConstantSDNode>(
LHS.getOperand(1))) {
4307 unsigned MaskIdx, MaskLen;
4308 if (Mask->getAPIntValue().isShiftedMask(MaskIdx, MaskLen) &&
4309 MaskIdx == RHSVal) {
4332 EVT TargetScalarType = ElementType.getHalfSizedIntegerVT(*DAG.
getContext());
4349 ShiftAmt = DAG.
getNode(
ISD::AND, SL, TargetType, TruncShiftAmt, ShiftMask);
4365 for (
unsigned I = 0;
I != NElts; ++
I)
4366 HiOps[
I] = HiAndLoOps[2 *
I + 1];
4385 for (
unsigned I = 0;
I != NElts; ++
I)
4386 HiAndLoOps[2 *
I] = LoOps[
I];
4398 EVT VT =
N->getValueType(0);
4403 SDValue Vec = Src.getOperand(0);
4427 unsigned BitIndex = K->getZExtValue();
4428 unsigned PartIndex = BitIndex / SrcEltSize;
4430 if (PartIndex * SrcEltSize == BitIndex &&
4448 EVT SrcVT = Src.getValueType();
4453 SDValue Amt = Src.getOperand(1);
4460 const unsigned MaxCstSize =
4495 return DAG.
getNode(MulOpc, SL, MVT::i32, N0, N1);
4519 EVT VT =
N->getValueType(0);
4525 if (!
N->isDivergent())
4547 if (V.hasOneUse() ||
all_of(V->users(), [](
const SDNode *U) ->
bool {
4548 return U->getOpcode() == ISD::MUL;
4557 if (
SDValue MulOper = IsFoldableAdd(N0)) {
4562 if (
SDValue MulOper = IsFoldableAdd(N1)) {
4603 if (
N->getValueType(0) != MVT::i32)
4624 unsigned LoOpcode = 0;
4625 unsigned HiOpcode = 0;
4652 EVT VT =
N->getValueType(0);
4663 if (Subtarget->
hasSMulHi() && !
N->isDivergent())
4685 EVT VT =
N->getValueType(0);
4696 if (Subtarget->
hasSMulHi() && !
N->isDivergent())
4719 unsigned Opc)
const {
4720 EVT VT =
Op.getValueType();
4723 LegalVT != MVT::i16))
4760 return getFFBX_U32(DAG, CmpLHS, SL,
Opc);
4771 return getFFBX_U32(DAG, CmpLHS, SL,
Opc);
4789 return DAG.
getNode(
Op, SL, VT, NewSelect);
4807 EVT VT =
N.getValueType();
4834 bool ShouldFoldNeg =
true;
4839 ShouldFoldNeg =
false;
4841 ShouldFoldNeg =
false;
4844 if (ShouldFoldNeg) {
4868 Cond, NewLHS, NewRHS);
4870 return DAG.
getNode(
LHS.getOpcode(), SL, VT, NewSelect);
4886 EVT VT =
N->getValueType(0);
4894 if (
Cond.hasOneUse()) {
4904 getSetCCInverse(cast<CondCodeSDNode>(CC)->
get(),
LHS.getValueType());
5010 EVT VT =
N->getValueType(0);
5131 for (
unsigned I = 0;
I < 3; ++
I)
5197 EVT SrcVT = Src.getValueType();
5233 Ops.
back() = CastBack;
5283 EVT SrcVT = Src.getValueType();
5297 const auto *CFP = dyn_cast<ConstantFPSDNode>(
N->getOperand(0));
5302 const APFloat &Val = CFP->getValueAPF();
5312 switch(
N->getOpcode()) {
5316 EVT DestVT =
N->getValueType(0);
5328 EVT SrcVT = Src.getValueType();
5364 const APInt &Val =
C->getValueAPF().bitcastToAPInt();
5383 if (!(
N->getValueType(0).isVector() &&
5421 assert(!
N->getValueType(0).isVector() &&
5422 "Vector handling of BFE not implemented");
5435 SDValue BitsFrom =
N->getOperand(0);
5440 if (OffsetVal == 0) {
5442 unsigned SignBits =
Signed ? (32 - WidthVal + 1) : (32 - WidthVal);
5445 if (OpSignBits >= SignBits)
5465 return constantFoldBFE<int32_t>(DAG,
5466 CVal->getSExtValue(),
5472 return constantFoldBFE<uint32_t>(DAG,
5473 CVal->getZExtValue(),
5479 if ((OffsetVal + WidthVal) >= 32 &&
5480 !(Subtarget->
hasSDWA() && OffsetVal == 16 && WidthVal == 16)) {
5483 BitsFrom, ShiftVal);
5489 OffsetVal + WidthVal);
5519 EVT VT =
N->getValueType(0);
5526 if (N0CFP && N1CFP && N2CFP) {
5527 const auto FTZ = [](
const APFloat &V) {
5528 if (V.isDenormal()) {
5529 APFloat Zero(V.getSemantics(), 0);
5530 return V.isNegative() ? -Zero : Zero;
5557 bool RawReg)
const {
5562 if (!
MRI.isLiveIn(Reg)) {
5563 VReg =
MRI.createVirtualRegister(RC);
5564 MRI.addLiveIn(Reg, VReg);
5566 VReg =
MRI.getLiveInVirtReg(Reg);
5628 assert(Arg &&
"Attempting to load missing argument");
5637 unsigned Mask = Arg.
getMask();
5638 unsigned Shift = llvm::countr_zero<unsigned>(Mask);
5650 alignTo(ExplicitKernArgSize, Alignment) + ExplicitArgOffset;
5670#define NODE_NAME_CASE(node) case AMDGPUISD::node: return #node;
5834 int &RefinementSteps,
5835 bool &UseOneConstNR,
5836 bool Reciprocal)
const {
5839 if (VT == MVT::f32) {
5840 RefinementSteps = 0;
5852 int &RefinementSteps)
const {
5855 if (VT == MVT::f32) {
5861 RefinementSteps = 0;
5873 case Intrinsic::amdgcn_workitem_id_x:
5875 case Intrinsic::amdgcn_workitem_id_y:
5877 case Intrinsic::amdgcn_workitem_id_z:
5890 unsigned Opc =
Op.getOpcode();
5933 LHSKnown = LHSKnown.
trunc(24);
5934 RHSKnown = RHSKnown.
trunc(24);
5939 unsigned MaxValBits = LHSValBits + RHSValBits;
5940 if (MaxValBits > 32)
5942 unsigned SignBits = 32 - MaxValBits + 1;
5950 if ((LHSNonNegative && RHSNonNegative) || (LHSNegative && RHSNegative))
5952 else if ((LHSNegative && RHSPositive) || (LHSPositive && RHSNegative))
5957 unsigned MaxValBits = LHSValBits + RHSValBits;
5958 if (MaxValBits >= 32)
5973 for (
unsigned I = 0;
I < 32;
I += 8) {
5974 unsigned SelBits = Sel & 0xff;
5979 }
else if (SelBits < 7) {
5980 SelBits = (SelBits & 3) * 8;
5983 }
else if (SelBits == 0x0c) {
5984 Known.
Zero |= 0xFFull <<
I;
5985 }
else if (SelBits > 0x0c) {
5986 Known.
One |= 0xFFull <<
I;
6001 auto *GA = cast<GlobalAddressSDNode>(
Op.getOperand(0).getNode());
6032 unsigned IID =
Op.getConstantOperandVal(0);
6034 case Intrinsic::amdgcn_workitem_id_x:
6035 case Intrinsic::amdgcn_workitem_id_y:
6036 case Intrinsic::amdgcn_workitem_id_z: {
6051 unsigned Depth)
const {
6052 switch (
Op.getOpcode()) {
6064 return std::max(SignBits, Op0SignBits);
6069 return Width ? 32 - (Width->
getZExtValue() & 0x1f) : 1;
6103 return std::min({Tmp0, Tmp1, Tmp2});
6118 switch (
MI->getOpcode()) {
6119 case AMDGPU::G_AMDGPU_BUFFER_LOAD_SBYTE:
6121 case AMDGPU::G_AMDGPU_BUFFER_LOAD_SSHORT:
6123 case AMDGPU::G_AMDGPU_BUFFER_LOAD_UBYTE:
6125 case AMDGPU::G_AMDGPU_BUFFER_LOAD_USHORT:
6127 case AMDGPU::G_AMDGPU_SMED3:
6128 case AMDGPU::G_AMDGPU_UMED3: {
6129 auto [Dst, Src0, Src1, Src2] =
MI->getFirst4Regs();
6130 unsigned Tmp2 =
Analysis.computeNumSignBits(Src2, DemandedElts,
Depth + 1);
6133 unsigned Tmp1 =
Analysis.computeNumSignBits(Src1, DemandedElts,
Depth + 1);
6136 unsigned Tmp0 =
Analysis.computeNumSignBits(Src0, DemandedElts,
Depth + 1);
6139 return std::min({Tmp0, Tmp1, Tmp2});
6149 unsigned Opcode =
Op.getOpcode();
6161 unsigned Depth)
const {
6162 unsigned Opcode =
Op.getOpcode();
6225 unsigned IntrinsicID =
Op.getConstantOperandVal(0);
6227 switch (IntrinsicID) {
6228 case Intrinsic::amdgcn_cubeid:
6229 case Intrinsic::amdgcn_cvt_off_f32_i4:
6232 case Intrinsic::amdgcn_frexp_mant: {
6237 case Intrinsic::amdgcn_cvt_pkrtz: {
6243 case Intrinsic::amdgcn_rcp:
6244 case Intrinsic::amdgcn_rsq:
6245 case Intrinsic::amdgcn_rcp_legacy:
6246 case Intrinsic::amdgcn_rsq_legacy:
6247 case Intrinsic::amdgcn_rsq_clamp:
6248 case Intrinsic::amdgcn_tanh: {
6255 case Intrinsic::amdgcn_trig_preop:
6256 case Intrinsic::amdgcn_fdot2:
6259 case Intrinsic::amdgcn_fma_legacy:
6276 return MRI.hasOneNonDBGUse(N0);
unsigned const MachineRegisterInfo * MRI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static LLVM_READONLY bool hasSourceMods(const MachineInstr &MI)
static bool isInv2Pi(const APFloat &APF)
static LLVM_READONLY bool opMustUseVOP3Encoding(const MachineInstr &MI, const MachineRegisterInfo &MRI)
returns true if the operation will definitely need to use a 64-bit encoding, and thus will use a VOP3...
static unsigned inverseMinMax(unsigned Opc)
static SDValue extractF64Exponent(SDValue Hi, const SDLoc &SL, SelectionDAG &DAG)
static unsigned workitemIntrinsicDim(unsigned ID)
static int getOrCreateFixedStackObject(MachineFrameInfo &MFI, unsigned Size, int64_t Offset)
static SDValue constantFoldBFE(SelectionDAG &DAG, IntTy Src0, uint32_t Offset, uint32_t Width, const SDLoc &DL)
static SDValue getMad(SelectionDAG &DAG, const SDLoc &SL, EVT VT, SDValue X, SDValue Y, SDValue C, SDNodeFlags Flags=SDNodeFlags())
static SDValue getAddOneOp(const SDNode *V)
If V is an add of a constant 1, returns the other operand.
#define NODE_NAME_CASE(node)
static LLVM_READONLY bool selectSupportsSourceMods(const SDNode *N)
Return true if v_cndmask_b32 will support fabs/fneg source modifiers for the type for ISD::SELECT.
static cl::opt< bool > AMDGPUBypassSlowDiv("amdgpu-bypass-slow-div", cl::desc("Skip 64-bit divide for dynamic 32-bit values"), cl::init(true))
static SDValue getMul24(SelectionDAG &DAG, const SDLoc &SL, SDValue N0, SDValue N1, unsigned Size, bool Signed)
static bool fnegFoldsIntoOp(const SDNode *N)
static bool isI24(SDValue Op, SelectionDAG &DAG)
static bool isCttzOpc(unsigned Opc)
static bool isU24(SDValue Op, SelectionDAG &DAG)
static SDValue peekFPSignOps(SDValue Val)
static bool valueIsKnownNeverF32Denorm(SDValue Src)
Return true if it's known that Src can never be an f32 denormal value.
static SDValue distributeOpThroughSelect(TargetLowering::DAGCombinerInfo &DCI, unsigned Op, const SDLoc &SL, SDValue Cond, SDValue N1, SDValue N2)
static SDValue peekFNeg(SDValue Val)
static SDValue simplifyMul24(SDNode *Node24, TargetLowering::DAGCombinerInfo &DCI)
static bool isCtlzOpc(unsigned Opc)
static LLVM_READNONE bool fnegFoldsIntoOpcode(unsigned Opc)
static bool hasVolatileUser(SDNode *Val)
Interface definition of the TargetLowering class that is common to all AMD GPUs.
Contains the definition of a TargetInstrInfo class that is common to all AMD GPUs.
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Function Alias Analysis Results
block Block Frequency Analysis
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
Analysis containing CSE Info
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
Provides analysis for querying information about KnownBits during GISel passes.
static DebugLoc getDebugLoc(MachineBasicBlock::instr_iterator FirstMI, MachineBasicBlock::instr_iterator LastMI)
Return the first found DebugLoc that has a DILocation, given a range of instructions.
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
const SmallVectorImpl< MachineOperand > & Cond
static CCAssignFn * CCAssignFnForCall(CallingConv::ID CC, bool IsVarArg)
static CCAssignFn * CCAssignFnForReturn(CallingConv::ID CC, bool IsVarArg)
uint64_t getExplicitKernArgSize() const
static std::optional< uint32_t > getLDSAbsoluteAddress(const GlobalValue &GV)
void recordNumNamedBarriers(uint32_t GVAddr, unsigned BarCnt)
unsigned allocateLDSGlobal(const DataLayout &DL, const GlobalVariable &GV)
bool isModuleEntryFunction() const
bool hasFminFmaxLegacy() const
Align getAlignmentForImplicitArgPtr() const
bool hasMadMacF32Insts() const
unsigned getMaxWorkitemID(const Function &Kernel, unsigned Dimension) const
Return the maximum workitem ID value in the function, for the given (0, 1, 2) dimension.
bool has16BitInsts() const
bool hasFastFMAF32() const
unsigned getExplicitKernelArgOffset() const
Returns the offset in bytes from the start of the input buffer of the first explicit kernel argument.
static const AMDGPUSubtarget & get(const MachineFunction &MF)
bool hasInv2PiInlineImm() const
bool hasVOP3PInsts() const
static unsigned numBitsSigned(SDValue Op, SelectionDAG &DAG)
SDValue combineFMinMaxLegacy(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, SDValue True, SDValue False, SDValue CC, DAGCombinerInfo &DCI) const
Generate Min/Max node.
unsigned ComputeNumSignBitsForTargetNode(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth=0) const override
This method can be implemented by targets that want to expose additional information about sign bits ...
SDValue performMulhuCombine(SDNode *N, DAGCombinerInfo &DCI) const
EVT getTypeForExtReturn(LLVMContext &Context, EVT VT, ISD::NodeType ExtendKind) const override
Return the type that should be used to zero or sign extend a zeroext/signext integer return value.
SDValue SplitVectorLoad(SDValue Op, SelectionDAG &DAG) const
Split a vector load into 2 loads of half the vector.
SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const
SDValue performLoadCombine(SDNode *N, DAGCombinerInfo &DCI) const
void analyzeFormalArgumentsCompute(CCState &State, const SmallVectorImpl< ISD::InputArg > &Ins) const
The SelectionDAGBuilder will automatically promote function arguments with illegal types.
SDValue LowerF64ToF16Safe(SDValue Src, const SDLoc &DL, SelectionDAG &DAG) const
SDValue LowerFROUND(SDValue Op, SelectionDAG &DAG) const
SDValue storeStackInputValue(SelectionDAG &DAG, const SDLoc &SL, SDValue Chain, SDValue ArgVal, int64_t Offset) const
bool storeOfVectorConstantIsCheap(bool IsZero, EVT MemVT, unsigned NumElem, unsigned AS) const override
Return true if it is expected to be cheaper to do a store of vector constant with the given size and ...
SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const
void computeKnownBitsForTargetNode(const SDValue Op, KnownBits &Known, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth=0) const override
Determine which of the bits specified in Mask are known to be either zero or one and return them in t...
bool shouldCombineMemoryType(EVT VT) const
SDValue splitBinaryBitConstantOpImpl(DAGCombinerInfo &DCI, const SDLoc &SL, unsigned Opc, SDValue LHS, uint32_t ValLo, uint32_t ValHi) const
Split the 64-bit value LHS into two 32-bit components, and perform the binary operation Opc to it wit...
SDValue lowerUnhandledCall(CallLoweringInfo &CLI, SmallVectorImpl< SDValue > &InVals, StringRef Reason) const
SDValue performAssertSZExtCombine(SDNode *N, DAGCombinerInfo &DCI) const
bool isTruncateFree(EVT Src, EVT Dest) const override
bool aggressivelyPreferBuildVectorSources(EVT VecVT) const override
SDValue LowerFCEIL(SDValue Op, SelectionDAG &DAG) const
TargetLowering::NegatibleCost getConstantNegateCost(const ConstantFPSDNode *C) const
SDValue LowerFLOGUnsafe(SDValue Op, const SDLoc &SL, SelectionDAG &DAG, bool IsLog10, SDNodeFlags Flags) const
bool canCreateUndefOrPoisonForTargetNode(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, bool PoisonOnly, bool ConsiderFlags, unsigned Depth) const override
Return true if Op can create undef or poison from non-undef & non-poison operands.
SDValue performMulhsCombine(SDNode *N, DAGCombinerInfo &DCI) const
bool isSDNodeAlwaysUniform(const SDNode *N) const override
bool isDesirableToCommuteWithShift(const SDNode *N, CombineLevel Level) const override
Return true if it is profitable to move this shift by a constant amount through its operand,...
SDValue LowerFREM(SDValue Op, SelectionDAG &DAG) const
Split a vector store into multiple scalar stores.
SDValue performShlCombine(SDNode *N, DAGCombinerInfo &DCI) const
bool isCheapToSpeculateCtlz(Type *Ty) const override
Return true if it is cheap to speculate a call to intrinsic ctlz.
SDValue LowerSDIVREM(SDValue Op, SelectionDAG &DAG) const
bool isFNegFree(EVT VT) const override
Return true if an fneg operation is free to the point where it is never worthwhile to replace it with...
SDValue LowerFLOG10(SDValue Op, SelectionDAG &DAG) const
SDValue LowerINT_TO_FP64(SDValue Op, SelectionDAG &DAG, bool Signed) const
unsigned computeNumSignBitsForTargetInstr(GISelValueTracking &Analysis, Register R, const APInt &DemandedElts, const MachineRegisterInfo &MRI, unsigned Depth=0) const override
This method can be implemented by targets that want to expose additional information about sign bits ...
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
This callback is invoked for operations that are unsupported by the target, which are registered to u...
SDValue LowerFP_TO_FP16(SDValue Op, SelectionDAG &DAG) const
SDValue addTokenForArgument(SDValue Chain, SelectionDAG &DAG, MachineFrameInfo &MFI, int ClobberedFI) const
bool isConstantCheaperToNegate(SDValue N) const
bool isReassocProfitable(MachineRegisterInfo &MRI, Register N0, Register N1) const override
bool isKnownNeverNaNForTargetNode(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, bool SNaN=false, unsigned Depth=0) const override
If SNaN is false,.
static bool needsDenormHandlingF32(const SelectionDAG &DAG, SDValue Src, SDNodeFlags Flags)
uint32_t getImplicitParameterOffset(const MachineFunction &MF, const ImplicitParameter Param) const
Helper function that returns the byte offset of the given type of implicit parameter.
SDValue LowerFFLOOR(SDValue Op, SelectionDAG &DAG) const
SDValue performSelectCombine(SDNode *N, DAGCombinerInfo &DCI) const
SDValue performFNegCombine(SDNode *N, DAGCombinerInfo &DCI) const
SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const
virtual SDValue LowerGlobalAddress(AMDGPUMachineFunction *MFI, SDValue Op, SelectionDAG &DAG) const
bool isConstantCostlierToNegate(SDValue N) const
SDValue loadInputValue(SelectionDAG &DAG, const TargetRegisterClass *RC, EVT VT, const SDLoc &SL, const ArgDescriptor &Arg) const
SDValue LowerDIVREM24(SDValue Op, SelectionDAG &DAG, bool sign) const
SDValue lowerFEXP10Unsafe(SDValue Op, const SDLoc &SL, SelectionDAG &DAG, SDNodeFlags Flags) const
Emit approx-funcs appropriate lowering for exp10.
bool shouldReduceLoadWidth(SDNode *Load, ISD::LoadExtType ExtType, EVT ExtVT, std::optional< unsigned > ByteOffset) const override
Return true if it is profitable to reduce a load to a smaller type.
SDValue LowerUINT_TO_FP(SDValue Op, SelectionDAG &DAG) const
bool isCheapToSpeculateCttz(Type *Ty) const override
Return true if it is cheap to speculate a call to intrinsic cttz.
SDValue performCtlz_CttzCombine(const SDLoc &SL, SDValue Cond, SDValue LHS, SDValue RHS, DAGCombinerInfo &DCI) const
SDValue performSraCombine(SDNode *N, DAGCombinerInfo &DCI) const
bool isSelectSupported(SelectSupportKind) const override
bool isZExtFree(Type *Src, Type *Dest) const override
Return true if any actual instruction that defines a value of type FromTy implicitly zero-extends the...
SDValue lowerFEXP2(SDValue Op, SelectionDAG &DAG) const
SDValue LowerCall(CallLoweringInfo &CLI, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower calls into the specified DAG.
SDValue performSrlCombine(SDNode *N, DAGCombinerInfo &DCI) const
SDValue lowerFEXP(SDValue Op, SelectionDAG &DAG) const
SDValue getIsLtSmallestNormal(SelectionDAG &DAG, SDValue Op, SDNodeFlags Flags) const
bool mayIgnoreSignedZero(SDValue Op) const
SDValue getIsFinite(SelectionDAG &DAG, SDValue Op, SDNodeFlags Flags) const
bool isLoadBitCastBeneficial(EVT, EVT, const SelectionDAG &DAG, const MachineMemOperand &MMO) const final
Return true if the following transform is beneficial: fold (conv (load x)) -> (load (conv*)x) On arch...
std::pair< SDValue, SDValue > splitVector(const SDValue &N, const SDLoc &DL, const EVT &LoVT, const EVT &HighVT, SelectionDAG &DAG) const
Split a vector value into two parts of types LoVT and HiVT.
SDValue LowerFLOGCommon(SDValue Op, SelectionDAG &DAG) const
SDValue foldFreeOpFromSelect(TargetLowering::DAGCombinerInfo &DCI, SDValue N) const
SDValue LowerINT_TO_FP32(SDValue Op, SelectionDAG &DAG, bool Signed) const
bool isFAbsFree(EVT VT) const override
Return true if an fabs operation is free to the point where it is never worthwhile to replace it with...
SDValue loadStackInputValue(SelectionDAG &DAG, EVT VT, const SDLoc &SL, int64_t Offset) const
Similar to CreateLiveInRegister, except value maybe loaded from a stack slot rather than passed in a ...
SDValue LowerFLOG2(SDValue Op, SelectionDAG &DAG) const
static EVT getEquivalentMemType(LLVMContext &Context, EVT VT)
SDValue getSqrtEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled, int &RefinementSteps, bool &UseOneConstNR, bool Reciprocal) const override
Hooks for building estimates in place of slower divisions and square roots.
SDValue performTruncateCombine(SDNode *N, DAGCombinerInfo &DCI) const
SDValue LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG) const
static SDValue stripBitcast(SDValue Val)
SDValue CreateLiveInRegister(SelectionDAG &DAG, const TargetRegisterClass *RC, Register Reg, EVT VT, const SDLoc &SL, bool RawReg=false) const
Helper function that adds Reg to the LiveIn list of the DAG's MachineFunction.
SDValue SplitVectorStore(SDValue Op, SelectionDAG &DAG) const
Split a vector store into 2 stores of half the vector.
SDValue LowerCTLZ_CTTZ(SDValue Op, SelectionDAG &DAG) const
SDValue getNegatedExpression(SDValue Op, SelectionDAG &DAG, bool LegalOperations, bool ForCodeSize, NegatibleCost &Cost, unsigned Depth) const override
Return the newly negated expression if the cost is not expensive and set the cost in Cost to indicate...
std::pair< SDValue, SDValue > split64BitValue(SDValue Op, SelectionDAG &DAG) const
Return 64-bit value Op as two 32-bit integers.
SDValue performMulCombine(SDNode *N, DAGCombinerInfo &DCI) const
SDValue getRecipEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled, int &RefinementSteps) const override
Return a reciprocal estimate value for the input operand.
AMDGPUTargetLowering(const TargetMachine &TM, const AMDGPUSubtarget &STI)
SDValue LowerFNEARBYINT(SDValue Op, SelectionDAG &DAG) const
const char * getTargetNodeName(unsigned Opcode) const override
This method returns the name of a target specific DAG node.
SDValue LowerSIGN_EXTEND_INREG(SDValue Op, SelectionDAG &DAG) const
static CCAssignFn * CCAssignFnForReturn(CallingConv::ID CC, bool IsVarArg)
std::pair< SDValue, SDValue > getScaledLogInput(SelectionDAG &DAG, const SDLoc SL, SDValue Op, SDNodeFlags Flags) const
If denormal handling is required return the scaled input to FLOG2, and the check for denormal range.
static CCAssignFn * CCAssignFnForCall(CallingConv::ID CC, bool IsVarArg)
Selects the correct CCAssignFn for a given CallingConvention value.
static bool allUsesHaveSourceMods(const SDNode *N, unsigned CostThreshold=4)
SDValue LowerFROUNDEVEN(SDValue Op, SelectionDAG &DAG) const
bool isFPImmLegal(const APFloat &Imm, EVT VT, bool ForCodeSize) const override
Returns true if the target can instruction select the specified FP immediate natively.
static unsigned numBitsUnsigned(SDValue Op, SelectionDAG &DAG)
SDValue lowerFEXPUnsafe(SDValue Op, const SDLoc &SL, SelectionDAG &DAG, SDNodeFlags Flags) const
SDValue LowerFTRUNC(SDValue Op, SelectionDAG &DAG) const
SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const
static bool allowApproxFunc(const SelectionDAG &DAG, SDNodeFlags Flags)
bool ShouldShrinkFPConstant(EVT VT) const override
If true, then instruction selection should seek to shrink the FP constant of the specified type to a ...
SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, const SmallVectorImpl< SDValue > &OutVals, const SDLoc &DL, SelectionDAG &DAG) const override
This hook must be implemented to lower outgoing return values, described by the Outs array,...
SDValue performStoreCombine(SDNode *N, DAGCombinerInfo &DCI) const
void ReplaceNodeResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
This callback is invoked when a node result type is illegal for the target, and the operation was reg...
SDValue performRcpCombine(SDNode *N, DAGCombinerInfo &DCI) const
SDValue getLoHalf64(SDValue Op, SelectionDAG &DAG) const
SDValue lowerCTLZResults(SDValue Op, SelectionDAG &DAG) const
SDValue performFAbsCombine(SDNode *N, DAGCombinerInfo &DCI) const
SDValue LowerFP_TO_INT64(SDValue Op, SelectionDAG &DAG, bool Signed) const
static bool shouldFoldFNegIntoSrc(SDNode *FNeg, SDValue FNegSrc)
bool isNarrowingProfitable(SDNode *N, EVT SrcVT, EVT DestVT) const override
Return true if it's profitable to narrow operations of type SrcVT to DestVT.
SDValue LowerFRINT(SDValue Op, SelectionDAG &DAG) const
SDValue performIntrinsicWOChainCombine(SDNode *N, DAGCombinerInfo &DCI) const
SDValue LowerUDIVREM(SDValue Op, SelectionDAG &DAG) const
SDValue performMulLoHiCombine(SDNode *N, DAGCombinerInfo &DCI) const
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override
This method will be invoked for all target nodes and for any target-independent nodes that the target...
void LowerUDIVREM64(SDValue Op, SelectionDAG &DAG, SmallVectorImpl< SDValue > &Results) const
SDValue WidenOrSplitVectorLoad(SDValue Op, SelectionDAG &DAG) const
Widen a suitably aligned v3 load.
std::pair< EVT, EVT > getSplitDestVTs(const EVT &VT, SelectionDAG &DAG) const
Split a vector type into two parts.
SDValue getHiHalf64(SDValue Op, SelectionDAG &DAG) const
SDValue combineFMinMaxLegacyImpl(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, SDValue True, SDValue False, SDValue CC, DAGCombinerInfo &DCI) const
unsigned getVectorIdxWidth(const DataLayout &) const override
Returns the type to be used for the index operand vector operations.
bool bitwiseIsEqual(const APFloat &RHS) const
opStatus add(const APFloat &RHS, roundingMode RM)
const fltSemantics & getSemantics() const
opStatus multiply(const APFloat &RHS, roundingMode RM)
static APFloat getSmallestNormalized(const fltSemantics &Sem, bool Negative=false)
Returns the smallest (by magnitude) normalized finite number in the given semantics.
static APFloat getInf(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Infinity.
Class for arbitrary precision integers.
uint64_t getZExtValue() const
Get zero extended value.
void setHighBits(unsigned hiBits)
Set the top hiBits bits.
void setBitsFrom(unsigned loBit)
Set the top bits starting from loBit.
static APInt getBitsSet(unsigned numBits, unsigned loBit, unsigned hiBit)
Get a value with a block of bits set.
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)
Constructs an APInt value that has the top hiBitsSet bits set.
void setLowBits(unsigned loBits)
Set the bottom loBits bits.
This class represents an incoming formal argument to a Function.
CCState - This class holds information needed while lowering arguments and return values.
MachineFunction & getMachineFunction() const
LLVMContext & getContext() const
void addLoc(const CCValAssign &V)
static CCValAssign getCustomMem(unsigned ValNo, MVT ValVT, int64_t Offset, MVT LocVT, LocInfo HTP)
const APFloat & getValueAPF() const
bool isNegative() const
Return true if the value is negative.
uint64_t getZExtValue() const
This class represents an Operation in the Expression.
uint64_t getNumOperands() const
A parsed version of the target data layout string in and methods for querying it.
Diagnostic information for unsupported feature in backend.
const DataLayout & getDataLayout() const
Get the data layout of the module this function belongs to.
iterator_range< arg_iterator > args()
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Module * getParent()
Get the module that this global value is contained inside of...
Type * getValueType() const
This is an important class for using LLVM in a threaded context.
LLVM_ABI void diagnose(const DiagnosticInfo &DI)
Report a message to the currently installed diagnostic handler.
This class is used to represent ISD::LOAD nodes.
const SDValue & getBasePtr() const
static auto integer_fixedlen_vector_valuetypes()
unsigned getVectorNumElements() const
bool isVector() const
Return true if this is a vector value type.
bool isInteger() const
Return true if this is an integer or a vector integer type.
static auto integer_valuetypes()
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
MVT getScalarType() const
If this is a vector, return the element type, otherwise return this.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
LLVM_ABI int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
int64_t getObjectOffset(int ObjectIdx) const
Return the assigned stack offset of the specified object from the incoming stack pointer.
int getObjectIndexBegin() const
Return the minimum frame object index.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
DenormalMode getDenormalMode(const fltSemantics &FPType) const
Returns the denormal handling type for the default rounding mode of the function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
Representation of each machine instruction.
A description of a memory reference used in the backend.
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOInvariant
The memory access always returns the same value (or traps).
Flags getFlags() const
Return the raw flags of the source value,.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
This is an abstract virtual class for memory operations.
unsigned getAddressSpace() const
Return the address space for the associated pointer.
bool isSimple() const
Returns true if the memory operation is neither atomic or volatile.
MachineMemOperand * getMemOperand() const
Return a MachineMemOperand object describing the memory reference performed by operation.
const SDValue & getChain() const
EVT getMemoryVT() const
Return the type of the in-memory value.
LLVMContext & getContext() const
Get the global data context.
Wrapper class representing virtual and physical registers.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
const DebugLoc & getDebugLoc() const
Represents one node in the SelectionDAG.
ArrayRef< SDUse > ops() const
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
bool hasOneUse() const
Return true if there is exactly one use of this node.
SDNodeFlags getFlags() const
SDVTList getVTList() const
const SDValue & getOperand(unsigned Num) const
uint64_t getConstantOperandVal(unsigned Num) const
Helper method returns the integer value of a ConstantSDNode operand.
iterator_range< user_iterator > users()
Represents a use of a SDNode.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
bool hasOneUse() const
Return true if there is exactly one node using value ResNo of Node.
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
TypeSize getValueSizeInBits() const
Returns the size of the value in bits.
const SDValue & getOperand(unsigned i) const
unsigned getOpcode() const
unsigned getNumOperands() const
This class keeps track of the SPI_SP_INPUT_ADDR config register, which tells the hardware which inter...
SIModeRegisterDefaults getMode() const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
LLVM_ABI SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
LLVM_ABI unsigned ComputeMaxSignificantBits(SDValue Op, unsigned Depth=0) const
Get the upper bound on bit size for this Value Op as a signed integer.
const SDValue & getRoot() const
Return the root tag of the SelectionDAG.
LLVM_ABI SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
LLVM_ABI SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
LLVM_ABI SDValue getShiftAmountConstant(uint64_t Val, EVT VT, const SDLoc &DL)
LLVM_ABI SDValue getAllOnesConstant(const SDLoc &DL, EVT VT, bool IsTarget=false, bool IsOpaque=false)
LLVM_ABI void ExtractVectorElements(SDValue Op, SmallVectorImpl< SDValue > &Args, unsigned Start=0, unsigned Count=0, EVT EltVT=EVT())
Append the extracted elements from Start to Count out of the vector Op in Args.
LLVM_ABI SDValue getFreeze(SDValue V)
Return a freeze using the SDLoc of the value operand.
SDValue getSetCC(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode Cond, SDValue Chain=SDValue(), bool IsSignaling=false)
Helper function to make it easier to build SetCC's if you just have an ISD::CondCode instead of an SD...
LLVM_ABI SDValue getConstantFP(double Val, const SDLoc &DL, EVT VT, bool isTarget=false)
Create a ConstantFPSDNode wrapping a constant value.
LLVM_ABI SDValue getRegister(Register Reg, EVT VT)
LLVM_ABI SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
LLVM_ABI SDValue getNOT(const SDLoc &DL, SDValue Val, EVT VT)
Create a bitwise NOT operation as (XOR Val, -1).
const TargetLowering & getTargetLoweringInfo() const
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
LLVM_ABI SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type.
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, Register Reg, EVT VT)
SDValue getSelect(const SDLoc &DL, EVT VT, SDValue Cond, SDValue LHS, SDValue RHS, SDNodeFlags Flags=SDNodeFlags())
Helper function to make it easier to build Select's if you just have operands and don't want to check...
LLVM_ABI SDValue getZeroExtendInReg(SDValue Op, const SDLoc &DL, EVT VT)
Return the expression required to zero extend the Op value assuming it was the smaller SrcTy value.
const DataLayout & getDataLayout() const
LLVM_ABI SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
LLVM_ABI SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, EVT SVT, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
LLVM_ABI void ReplaceAllUsesWith(SDValue From, SDValue To)
Modify anything using 'From' to use 'To' instead.
LLVM_ABI SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
LLVM_ABI SDValue getSignedConstant(int64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
bool isConstantValueOfAnyType(SDValue N) const
SDValue getSelectCC(const SDLoc &DL, SDValue LHS, SDValue RHS, SDValue True, SDValue False, ISD::CondCode Cond, SDNodeFlags Flags=SDNodeFlags())
Helper function to make it easier to build SelectCC's if you just have an ISD::CondCode instead of an...
LLVM_ABI SDValue getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either sign-extending or trunca...
LLVM_ABI SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
LLVM_ABI SDValue getValueType(EVT)
LLVM_ABI SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
LLVM_ABI bool isKnownNeverNaN(SDValue Op, const APInt &DemandedElts, bool SNaN=false, unsigned Depth=0) const
Test whether the given SDValue (or all elements of it, if it is a vector) is known to never be NaN in...
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
LLVM_ABI unsigned ComputeNumSignBits(SDValue Op, unsigned Depth=0) const
Return the number of times the sign bit of the register is replicated into the other bits.
LLVM_ABI SDValue getVectorIdxConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
LLVM_ABI void ReplaceAllUsesOfValueWith(SDValue From, SDValue To)
Replace any uses of From with To, leaving uses of other values produced by From.getNode() alone.
MachineFunction & getMachineFunction() const
SDValue getPOISON(EVT VT)
Return a POISON node. POISON does not have a useful SDLoc.
LLVM_ABI SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
LLVM_ABI KnownBits computeKnownBits(SDValue Op, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in Known.
LLVM_ABI SDValue getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either zero-extending or trunca...
LLVM_ABI bool MaskedValueIsZero(SDValue Op, const APInt &Mask, unsigned Depth=0) const
Return true if 'Op & Mask' is known to be zero.
SDValue getObjectPtrOffset(const SDLoc &SL, SDValue Ptr, TypeSize Offset)
Create an add instruction with appropriate flags when used for addressing some offset of an object.
LLVMContext * getContext() const
const SDValue & setRoot(SDValue N)
Set the current root tag of the SelectionDAG.
LLVM_ABI SDNode * UpdateNodeOperands(SDNode *N, SDValue Op)
Mutate the specified node in-place to have the specified operands.
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
LLVM_ABI std::pair< SDValue, SDValue > SplitScalar(const SDValue &N, const SDLoc &DL, const EVT &LoVT, const EVT &HiVT)
Split the scalar node with EXTRACT_ELEMENT using the provided VTs and return the low/high part.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
This class is used to represent ISD::STORE nodes.
const SDValue & getBasePtr() const
const SDValue & getValue() const
StringRef - Represent a constant reference to a string, i.e.
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
void setMaxDivRemBitWidthSupported(unsigned SizeInBits)
Set the size in bits of the maximum div/rem the backend supports.
bool PredictableSelectIsExpensive
Tells the code generator that select is more expensive than a branch if the branch is usually predict...
virtual bool shouldReduceLoadWidth(SDNode *Load, ISD::LoadExtType ExtTy, EVT NewVT, std::optional< unsigned > ByteOffset=std::nullopt) const
Return true if it is profitable to reduce a load to a smaller type.
unsigned MaxStoresPerMemcpyOptSize
Likewise for functions with the OptSize attribute.
const TargetMachine & getTargetMachine() const
virtual unsigned getNumRegistersForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain targets require unusual breakdowns of certain types.
unsigned MaxGluedStoresPerMemcpy
Specify max number of store instructions to glue in inlined memcpy.
virtual MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain combinations of ABIs, Targets and features require that types are legal for some operations a...
void addBypassSlowDiv(unsigned int SlowBitWidth, unsigned int FastBitWidth)
Tells the code generator which bitwidths to bypass.
void setMaxLargeFPConvertBitWidthSupported(unsigned SizeInBits)
Set the size in bits of the maximum fp to/from int conversion the backend supports.
void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)
Set the maximum atomic operation size supported by the backend.
SelectSupportKind
Enum that describes what type of support for selects the target has.
virtual bool allowsMisalignedMemoryAccesses(EVT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *=nullptr) const
Determine if the target supports unaligned memory accesses.
unsigned MaxStoresPerMemsetOptSize
Likewise for functions with the OptSize attribute.
EVT getShiftAmountTy(EVT LHSTy, const DataLayout &DL) const
Returns the type for the shift amount of a shift opcode.
unsigned MaxStoresPerMemmove
Specify maximum number of store instructions per memmove call.
virtual EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const
Return the ValueType of the result of SETCC operations.
virtual EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const
For types supported by the target, this is an identity function.
unsigned MaxStoresPerMemmoveOptSize
Likewise for functions with the OptSize attribute.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
void setSupportsUnalignedAtomics(bool UnalignedSupported)
Sets whether unaligned atomic operations are supported.
bool isOperationLegal(unsigned Op, EVT VT) const
Return true if the specified operation is legal on this target.
unsigned MaxStoresPerMemset
Specify maximum number of store instructions per memset call.
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
void setMinCmpXchgSizeInBits(unsigned SizeInBits)
Sets the minimum cmpxchg or ll/sc size supported by the backend.
void AddPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT)
If Opc/OrigVT is specified as being promoted, the promotion code defaults to trying a larger integer/...
void setTargetDAGCombine(ArrayRef< ISD::NodeType > NTs)
Targets should invoke this method for each target independent node that they want to provide a custom...
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
unsigned GatherAllAliasesMaxDepth
Depth that GatherAllAliases should continue looking for chain dependencies when trying to find a more...
NegatibleCost
Enum that specifies when a float negation is beneficial.
bool allowsMemoryAccessForAlignment(LLVMContext &Context, const DataLayout &DL, EVT VT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *Fast=nullptr) const
This function returns true if the memory access is aligned or if the target allows this specific unal...
unsigned MaxStoresPerMemcpy
Specify maximum number of store instructions per memcpy call.
void setSchedulingPreference(Sched::Preference Pref)
Specify the target scheduling preference.
void setJumpIsExpensive(bool isExpensive=true)
Tells the code generator not to expand logic operations on comparison predicates into separate sequen...
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
SDValue scalarizeVectorStore(StoreSDNode *ST, SelectionDAG &DAG) const
SDValue SimplifyMultipleUseDemandedBits(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, SelectionDAG &DAG, unsigned Depth=0) const
More limited version of SimplifyDemandedBits that can be used to "look through" ops that don't contri...
SDValue expandUnalignedStore(StoreSDNode *ST, SelectionDAG &DAG) const
Expands an unaligned store to 2 half-size stores for integer values, and possibly more for vectors.
bool ShrinkDemandedConstant(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, TargetLoweringOpt &TLO) const
Check to see if the specified operand of the specified instruction is a constant integer.
std::pair< SDValue, SDValue > expandUnalignedLoad(LoadSDNode *LD, SelectionDAG &DAG) const
Expands an unaligned load to 2 half-size loads for an integer, and possibly more for vectors.
virtual SDValue getNegatedExpression(SDValue Op, SelectionDAG &DAG, bool LegalOps, bool OptForSize, NegatibleCost &Cost, unsigned Depth=0) const
Return the newly negated expression if the cost is not expensive and set the cost in Cost to indicate...
std::pair< SDValue, SDValue > scalarizeVectorLoad(LoadSDNode *LD, SelectionDAG &DAG) const
Turn load of vector type into a load of the individual elements.
bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, KnownBits &Known, TargetLoweringOpt &TLO, unsigned Depth=0, bool AssumeSingleUse=false) const
Look at Op.
virtual bool canCreateUndefOrPoisonForTargetNode(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, bool PoisonOnly, bool ConsiderFlags, unsigned Depth) const
Return true if Op can create undef or poison from non-undef & non-poison operands.
Primary interface to the complete machine description for the target machine.
static constexpr TypeSize getFixed(ScalarTy ExactSize)
The instances of the Type class are immutable: once they are created, they are never changed.
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
LLVM Value Representation.
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ CONSTANT_ADDRESS_32BIT
Address space for 32-bit constant memory.
@ REGION_ADDRESS
Address space for region memory. (GDS)
@ LOCAL_ADDRESS
Address space for local memory.
@ CONSTANT_ADDRESS
Address space for constant memory (VTX2).
@ GLOBAL_ADDRESS
Address space for global memory (RAT0, VTX0).
bool isIntrinsicAlwaysUniform(unsigned IntrID)
TargetExtType * isNamedBarrier(const GlobalVariable &GV)
bool isUniformMMO(const MachineMemOperand *MMO)
@ AMDGPU_CS
Used for Mesa/AMDPAL compute shaders.
@ AMDGPU_VS
Used for Mesa vertex shaders, or AMDPAL last shader stage before rasterization (vertex shader if tess...
@ AMDGPU_KERNEL
Used for AMDGPU code object kernels.
@ AMDGPU_Gfx
Used for AMD graphics targets.
@ AMDGPU_CS_ChainPreserve
Used on AMDGPUs to give the middle-end more control over argument placement.
@ AMDGPU_HS
Used for Mesa/AMDPAL hull shaders (= tessellation control shaders).
@ AMDGPU_GS
Used for Mesa/AMDPAL geometry shaders.
@ AMDGPU_CS_Chain
Used on AMDGPUs to give the middle-end more control over argument placement.
@ AMDGPU_PS
Used for Mesa/AMDPAL pixel shaders.
@ Cold
Attempts to make code in the caller as efficient as possible under the assumption that the call is no...
@ SPIR_KERNEL
Used for SPIR kernel functions.
@ Fast
Attempts to make calls as fast as possible (e.g.
@ AMDGPU_ES
Used for AMDPAL shader stage before geometry shader if geometry is in use.
@ AMDGPU_LS
Used for AMDPAL vertex shader if tessellation is in use.
@ C
The default llvm calling convention, compatible with C.
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
@ INSERT_SUBVECTOR
INSERT_SUBVECTOR(VECTOR1, VECTOR2, IDX) - Returns a vector with VECTOR2 inserted into VECTOR1.
@ BSWAP
Byte Swap and Counting operators.
@ ATOMIC_STORE
OUTCHAIN = ATOMIC_STORE(INCHAIN, val, ptr) This corresponds to "store atomic" instruction.
@ ADDC
Carry-setting nodes for multiple precision addition and subtraction.
@ FMAD
FMAD - Perform a * b + c, while getting the same result as the separately rounded operations.
@ ADD
Simple integer binary arithmetic operators.
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
@ CONCAT_VECTORS
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
@ FADD
Simple binary floating point operators.
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
@ FP16_TO_FP
FP16_TO_FP, FP_TO_FP16 - These operators are used to perform promotions and truncation for half-preci...
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
@ FLDEXP
FLDEXP - ldexp, inspired by libm (op0 * 2**op1).
@ SIGN_EXTEND
Conversion operators.
@ CTTZ_ZERO_UNDEF
Bit counting operators with an undefined result for zero inputs.
@ FNEG
Perform various unary floating-point operations inspired by libm.
@ BRIND
BRIND - Indirect branch.
@ BR_JT
BR_JT - Jumptable branch.
@ FCANONICALIZE
Returns platform specific canonical encoding of a floating point number.
@ IS_FPCLASS
Performs a check of floating point class property, defined by IEEE-754.
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
@ ATOMIC_LOAD
Val, OUTCHAIN = ATOMIC_LOAD(INCHAIN, ptr) This corresponds to "load atomic" instruction.
@ EXTRACT_ELEMENT
EXTRACT_ELEMENT - This is used to get the lower or upper (determined by a Constant,...
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
@ SHL
Shift and rotation operations.
@ VECTOR_SHUFFLE
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
@ EXTRACT_SUBVECTOR
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.
@ FMINNUM_IEEE
FMINNUM_IEEE/FMAXNUM_IEEE - Perform floating-point minimumNumber or maximumNumber on two values,...
@ EntryToken
EntryToken - This is the marker used to indicate the start of a region.
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
@ CopyToReg
CopyToReg - This node has three operands: a chain, a register number to set to this value,...
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
@ FMINNUM
FMINNUM/FMAXNUM - Perform floating-point minimum maximum on two values, following IEEE-754 definition...
@ DYNAMIC_STACKALLOC
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary.
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
@ FP_EXTEND
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
@ UADDO_CARRY
Carry-using nodes for multiple precision addition and subtraction.
@ INLINEASM_BR
INLINEASM_BR - Branching version of inline asm. Used by asm-goto.
@ FMINIMUM
FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0 as less than 0....
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ TRAP
TRAP - Trapping instruction.
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
@ ADDE
Carry-using nodes for multiple precision addition and subtraction.
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ FFREXP
FFREXP - frexp, extract fractional and exponent component of a floating-point value.
@ FP_ROUND
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
@ ADDRSPACECAST
ADDRSPACECAST - This operator converts between pointers of different address spaces.
@ INLINEASM
INLINEASM - Represents an inline asm block.
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
@ AssertSext
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
@ FMINIMUMNUM
FMINIMUMNUM/FMAXIMUMNUM - minimumnum/maximumnum that is same with FMINNUM_IEEE and FMAXNUM_IEEE besid...
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
bool isNormalStore(const SDNode *N)
Returns true if the specified node is a non-truncating and unindexed store.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
bool isNormalLoad(const SDNode *N)
Returns true if the specified node is a non-extending and unindexed load.
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
MaybeAlign getAlign(const CallInst &I, unsigned Index)
LLVM_ABI bool isNullConstant(SDValue V)
Returns true if V is a constant integer zero.
bool CCAssignFn(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State)
CCAssignFn - This function assigns a location for Val, updating State to reflect the change.
LLVM_ABI ConstantFPSDNode * isConstOrConstSplatFP(SDValue N, bool AllowUndefs=false)
Returns the SDNode if it is a constant splat BuildVector or constant float.
uint64_t PowerOf2Ceil(uint64_t A)
Returns the power of two which is greater than or equal to the given value.
int countl_zero(T Val)
Count number of 0's from the most significant bit to the least stopping at the first 1.
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
constexpr uint32_t Hi_32(uint64_t Value)
Return the high 32 bits of a 64 bit value.
constexpr uint32_t Lo_32(uint64_t Value)
Return the low 32 bits of a 64 bit value.
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
@ Mul
Product of integers.
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
DWARFExpression::Operation Op
void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty, SmallVectorImpl< EVT > &ValueVTs, SmallVectorImpl< EVT > *MemVTs, SmallVectorImpl< TypeSize > *Offsets=nullptr, TypeSize StartingOffset=TypeSize::getZero())
ComputeValueVTs - Given an LLVM IR type, compute a sequence of EVTs that represent all the individual...
LLVM_ABI ConstantSDNode * isConstOrConstSplat(SDValue N, bool AllowUndefs=false, bool AllowTruncation=false)
Returns the SDNode if it is a constant splat BuildVector or constant int.
constexpr unsigned BitWidth
LLVM_ABI bool isOneConstant(SDValue V)
Returns true if V is a constant integer one.
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
static cl::opt< int > CostThreshold("sbvec-cost-threshold", cl::init(0), cl::Hidden, cl::desc("Vectorization cost threshold."))
APFloat neg(APFloat X)
Returns the negated value of the argument.
unsigned Log2(Align A)
Returns the log2 of the alignment.
LLVM_ABI bool isAllOnesConstant(SDValue V)
Returns true if V is an integer constant with all bits set.
LLVM_ABI void reportFatalUsageError(Error Err)
Report a fatal error that does not indicate a bug in LLVM.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
static LLVM_ABI const fltSemantics & IEEEsingle() LLVM_READNONE
static constexpr roundingMode rmNearestTiesToEven
static LLVM_ABI const fltSemantics & IEEEdouble() LLVM_READNONE
static LLVM_ABI const fltSemantics & IEEEhalf() LLVM_READNONE
This struct is a compact representation of a valid (non-zero power of two) alignment.
MCRegister getRegister() const
unsigned getStackOffset() const
DenormalModeKind Input
Denormal treatment kind for floating point instruction inputs in the default floating-point environme...
@ PreserveSign
The sign of a flushed-to-zero number is preserved in the sign of 0.
static constexpr DenormalMode getPreserveSign()
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
EVT getPow2VectorType(LLVMContext &Context) const
Widens the length of the given vector EVT up to the nearest power of 2 and returns that type.
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)
Returns the EVT that represents a vector NumElements in length, where each element is of type VT.
EVT changeTypeToInteger() const
Return the type converted to an equivalently sized integer or vector with integer element type.
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
EVT getDoubleNumVectorElementsVT(LLVMContext &Context) const
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
bool isByteSized() const
Return true if the bit size is a multiple of 8.
uint64_t getScalarSizeInBits() const
EVT getHalfSizedIntegerVT(LLVMContext &Context) const
Finds the smallest simple value type that is greater than or equal to half the width of this EVT.
bool isPow2VectorType() const
Returns true if the given vector is a power of 2.
TypeSize getStoreSizeInBits() const
Return the number of bits overwritten by a store of the specified value type.
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
EVT getRoundIntegerType(LLVMContext &Context) const
Rounds the bit-width of the given integer EVT up to the nearest power of two (and at least to eight),...
bool isVector() const
Return true if this is a vector value type.
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
bool bitsGE(EVT VT) const
Return true if this has no less bits than VT.
EVT getVectorElementType() const
Given a vector type, return the type of each element.
bool isExtended() const
Test if the given EVT is extended (as opposed to being simple).
EVT changeVectorElementType(EVT EltVT) const
Return a VT for a vector type whose attributes match ourselves with the exception of the element type...
LLVM_ABI const fltSemantics & getFltSemantics() const
Returns an APFloat semantics tag appropriate for the value type.
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
bool bitsLE(EVT VT) const
Return true if this has no more bits than VT.
bool isInteger() const
Return true if this is an integer or a vector integer type.
bool isNonNegative() const
Returns true if this value is known to be non-negative.
unsigned countMinTrailingZeros() const
Returns the minimum number of trailing zero bits.
bool isUnknown() const
Returns true if we don't know any bits.
KnownBits trunc(unsigned BitWidth) const
Return known bits for a truncation of the value we're tracking.
unsigned getBitWidth() const
Get the bit width of this value.
void resetAll()
Resets the known state of all bits.
unsigned countMaxActiveBits() const
Returns the maximum number of bits needed to represent all possible unsigned values with these known ...
unsigned countMinLeadingZeros() const
Returns the minimum number of leading zero bits.
APInt getMaxValue() const
Return the maximal unsigned value possible given these KnownBits.
APInt getMinValue() const
Return the minimal unsigned value possible given these KnownBits.
bool isStrictlyPositive() const
Returns true if this value is known to be positive.
bool isNegative() const
Returns true if this value is known to be negative.
unsigned countMaxSignificantBits() const
Returns the maximum number of bits needed to represent all possible signed values with these known bi...
This class contains a discriminated union of information about pointers in memory operands,...
LLVM_ABI bool isDereferenceable(unsigned Size, LLVMContext &C, const DataLayout &DL) const
Return true if memory region [V, V+Offset+Size) is known to be dereferenceable.
static LLVM_ABI MachinePointerInfo getStack(MachineFunction &MF, int64_t Offset, uint8_t ID=0)
Stack pointer relative access.
MachinePointerInfo getWithOffset(int64_t O) const
These are IR-level optimization flags that may be propagated to SDNodes.
void setAllowContract(bool b)
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
DenormalMode FP32Denormals
If this is set, neither input or output denormals are flushed for most f32 instructions.
This structure contains all information that is necessary for lowering calls.
SmallVector< ISD::InputArg, 32 > Ins
bool isBeforeLegalizeOps() const
CombineLevel getDAGCombineLevel()
LLVM_ABI void AddToWorklist(SDNode *N)
bool isCalledByLegalizer() const
bool isBeforeLegalize() const
LLVM_ABI SDValue CombineTo(SDNode *N, ArrayRef< SDValue > To, bool AddTo=true)
LLVM_ABI void CommitTargetLoweringOpt(const TargetLoweringOpt &TLO)
A convenience struct that encapsulates a DAG, and two SDValues for returning information from TargetL...