26#include "llvm/IR/IntrinsicsAMDGPU.h"
33#include "AMDGPUGenCallingConv.inc"
36 "amdgpu-bypass-slow-div",
37 cl::desc(
"Skip 64-bit divide for dynamic 32-bit values"),
46 if (StoreSize % 32 == 0)
203 {MVT::v2i8, MVT::v4i8, MVT::v2i16, MVT::v3i16, MVT::v4i16})
410 {MVT::f16, MVT::f32},
Legal);
416 {MVT::f16, MVT::f32, MVT::f64},
Expand);
442 {MVT::v2f32, MVT::v3f32, MVT::v4f32, MVT::v5f32,
443 MVT::v6f32, MVT::v7f32, MVT::v8f32, MVT::v16f32,
444 MVT::v2f64, MVT::v3f64, MVT::v4f64, MVT::v8f64,
452 {MVT::v3i32, MVT::v3f32, MVT::v4i32, MVT::v4f32,
453 MVT::v5i32, MVT::v5f32, MVT::v6i32, MVT::v6f32,
454 MVT::v7i32, MVT::v7f32, MVT::v8i32, MVT::v8f32,
455 MVT::v9i32, MVT::v9f32, MVT::v10i32, MVT::v10f32,
456 MVT::v11i32, MVT::v11f32, MVT::v12i32, MVT::v12f32},
461 {MVT::v2f32, MVT::v2i32, MVT::v3f32, MVT::v3i32, MVT::v4f32,
462 MVT::v4i32, MVT::v5f32, MVT::v5i32, MVT::v6f32, MVT::v6i32,
463 MVT::v7f32, MVT::v7i32, MVT::v8f32, MVT::v8i32, MVT::v9f32,
464 MVT::v9i32, MVT::v10i32, MVT::v10f32, MVT::v11i32, MVT::v11f32,
465 MVT::v12i32, MVT::v12f32, MVT::v16i32, MVT::v32f32, MVT::v32i32,
466 MVT::v2f64, MVT::v2i64, MVT::v3f64, MVT::v3i64, MVT::v4f64,
467 MVT::v4i64, MVT::v8f64, MVT::v8i64, MVT::v16f64, MVT::v16i64},
474 const MVT ScalarIntVTs[] = { MVT::i32, MVT::i64 };
475 for (
MVT VT : ScalarIntVTs) {
513 for (
auto VT : {MVT::i8, MVT::i16})
517 MVT::v2i32, MVT::v3i32, MVT::v4i32, MVT::v5i32, MVT::v6i32, MVT::v7i32,
518 MVT::v9i32, MVT::v10i32, MVT::v11i32, MVT::v12i32};
520 for (
MVT VT : VectorIntTypes) {
547 MVT::v2f32, MVT::v3f32, MVT::v4f32, MVT::v5f32, MVT::v6f32, MVT::v7f32,
548 MVT::v9f32, MVT::v10f32, MVT::v11f32, MVT::v12f32};
550 for (
MVT VT : FloatVectorTypes) {
644 const auto Flags =
Op.getNode()->getFlags();
645 if (Flags.hasNoSignedZeros())
679 case AMDGPUISD::RCP_LEGACY:
680 case AMDGPUISD::RCP_IFLAG:
681 case AMDGPUISD::SIN_HW:
682 case AMDGPUISD::FMUL_LEGACY:
683 case AMDGPUISD::FMIN_LEGACY:
684 case AMDGPUISD::FMAX_LEGACY:
685 case AMDGPUISD::FMED3:
696 unsigned Opc =
N->getOpcode();
717 return (
N->getNumOperands() > 2 &&
N->getOpcode() !=
ISD::SELECT) ||
726 return N->getValueType(0) == MVT::f32;
736 switch (
N->getOpcode()) {
742 case AMDGPUISD::DIV_SCALE:
751 switch (
N->getConstantOperandVal(0)) {
752 case Intrinsic::amdgcn_interp_p1:
753 case Intrinsic::amdgcn_interp_p2:
754 case Intrinsic::amdgcn_interp_mov:
755 case Intrinsic::amdgcn_interp_p1_f16:
756 case Intrinsic::amdgcn_interp_p2_f16:
776 unsigned NumMayIncreaseSize = 0;
777 MVT VT =
N->getValueType(0).getScalarType().getSimpleVT();
782 for (
const SDNode *U :
N->users()) {
817 bool ForCodeSize)
const {
824 return (ScalarVT != MVT::f32 && ScalarVT != MVT::f64);
829 std::optional<unsigned> ByteOffset)
const {
841 EVT OldVT =
N->getValueType(0);
849 if (OldSize >= 32 && NewSize < 32 && MN->
getAlign() >=
Align(4) &&
864 return (OldSize < 32);
879 if ((LScalarSize >= CastScalarSize) && (CastScalarSize < 32))
884 CastTy, MMO, &
Fast) &&
900 switch (
N->getOpcode()) {
905 unsigned IntrID =
N->getConstantOperandVal(0);
909 unsigned IntrID =
N->getConstantOperandVal(1);
917 case AMDGPUISD::SETCC:
927 switch (
Op.getOpcode()) {
935 case AMDGPUISD::RCP: {
937 EVT VT =
Op.getValueType();
943 return DAG.
getNode(AMDGPUISD::RCP, SL, VT, NegSrc,
Op->getFlags());
963 return VT == MVT::f32 || VT == MVT::f64 || VT == MVT::f16 || VT == MVT::bf16;
970 return VT == MVT::f32 || VT == MVT::f64 || VT == MVT::f16 || VT == MVT::bf16;
994 unsigned SrcSize = Source.getSizeInBits();
997 return DestSize < SrcSize && DestSize % 32 == 0 ;
1003 unsigned SrcSize = Source->getScalarSizeInBits();
1006 if (DestSize== 16 && Subtarget->has16BitInsts())
1007 return SrcSize >= 32;
1009 return DestSize < SrcSize && DestSize % 32 == 0;
1013 unsigned SrcSize = Src->getScalarSizeInBits();
1016 if (SrcSize == 16 && Subtarget->has16BitInsts())
1017 return DestSize >= 32;
1019 return SrcSize == 32 && DestSize == 64;
1028 if (Src == MVT::i16)
1029 return Dest == MVT::i32 ||Dest == MVT::i64 ;
1031 return Src == MVT::i32 && Dest == MVT::i64;
1036 switch (
N->getOpcode()) {
1056 if (!
N->isDivergent() && DestVT.
isInteger() &&
1084 "Expected shift op");
1086 SDValue ShiftLHS =
N->getOperand(0);
1101 if (
N->getValueType(0) == MVT::i32 &&
N->hasOneUse() &&
1102 (
N->user_begin()->getOpcode() ==
ISD::SRA ||
1103 N->user_begin()->getOpcode() ==
ISD::SRL))
1113 return LHS0 && LHS1 && RHSLd && LHS0->getExtensionType() ==
ISD::ZEXTLOAD &&
1114 LHS1->getAPIntValue() == LHS0->getMemoryVT().getScalarSizeInBits() &&
1117 SDValue LHS =
N->getOperand(0).getOperand(0);
1118 SDValue RHS =
N->getOperand(0).getOperand(1);
1119 return !(IsShiftAndLoad(LHS, RHS) || IsShiftAndLoad(RHS, LHS));
1139 return CC_AMDGPU_CS_CHAIN;
1143 return CC_AMDGPU_Func;
1169 return RetCC_SI_Shader;
1172 return RetCC_SI_Gfx;
1176 return RetCC_AMDGPU_Func;
1214 const unsigned ExplicitOffset = Subtarget->getExplicitKernelArgOffset();
1221 unsigned InIndex = 0;
1224 const bool IsByRef = Arg.hasByRefAttr();
1225 Type *BaseArgTy = Arg.getType();
1226 Type *MemArgTy = IsByRef ? Arg.getParamByRefType() : BaseArgTy;
1227 Align Alignment =
DL.getValueOrABITypeAlignment(
1228 IsByRef ? Arg.getParamAlign() : std::nullopt, MemArgTy);
1229 MaxAlign = std::max(Alignment, MaxAlign);
1230 uint64_t AllocSize =
DL.getTypeAllocSize(MemArgTy);
1232 uint64_t ArgOffset =
alignTo(ExplicitArgOffset, Alignment) + ExplicitOffset;
1233 ExplicitArgOffset =
alignTo(ExplicitArgOffset, Alignment) + AllocSize;
1245 &Offsets, ArgOffset);
1247 for (
unsigned Value = 0, NumValues = ValueVTs.
size();
1285 }
else if (RegisterVT.
isVector()) {
1288 assert(MemoryBits % NumElements == 0);
1292 MemoryBits / NumElements);
1310 unsigned PartOffset = 0;
1311 for (
unsigned i = 0; i != NumRegs; ++i) {
1313 BasePartOffset + PartOffset,
1331 return DAG.
getNode(AMDGPUISD::ENDPGM,
DL, MVT::Other, Chain);
1352 int ClobberedFI)
const {
1355 int64_t LastByte = FirstByte + MFI.
getObjectSize(ClobberedFI) - 1;
1366 if (FI->getIndex() < 0) {
1368 int64_t InLastByte = InFirstByte;
1371 if ((InFirstByte <= FirstByte && FirstByte <= InLastByte) ||
1372 (FirstByte <= InFirstByte && InFirstByte <= LastByte))
1394 FuncName =
G->getSymbol();
1396 FuncName =
G->getGlobal()->getName();
1431 switch (
Op.getOpcode()) {
1435 "instruction is not implemented yet!");
1483 switch (
N->getOpcode()) {
1530 if (std::optional<uint32_t>
Address =
1532 if (IsNamedBarrier) {
1537 }
else if (IsNamedBarrier) {
1545 GV->
getName() !=
"llvm.amdgcn.module.lds" &&
1550 Fn,
"local memory global used by non-kernel function",
1567 "Do not know what to do with an non-zero offset");
1583 EVT VT =
Op.getValueType();
1585 unsigned OpBitSize =
Op.getOperand(0).getValueType().getSizeInBits();
1586 if (OpBitSize >= 32 && OpBitSize % 32 == 0) {
1587 unsigned NewNumElt = OpBitSize / 32;
1588 EVT NewEltVT = (NewNumElt == 1) ? MVT::i32
1590 MVT::i32, NewNumElt);
1591 for (
const SDUse &U :
Op->ops()) {
1597 Args.push_back(NewIn);
1601 NewNumElt *
Op.getNumOperands());
1607 for (
const SDUse &U :
Op->ops())
1617 unsigned Start =
Op.getConstantOperandVal(1);
1618 EVT VT =
Op.getValueType();
1619 EVT SrcVT =
Op.getOperand(0).getValueType();
1624 assert(NumElt % 2 == 0 && NumSrcElt % 2 == 0 &&
"expect legal types");
1628 EVT NewVT = NumElt == 2
1688 return DAG.
getNode(AMDGPUISD::FMIN_LEGACY,
DL, VT, RHS, LHS);
1689 return DAG.
getNode(AMDGPUISD::FMAX_LEGACY,
DL, VT, LHS, RHS);
1707 return DAG.
getNode(AMDGPUISD::FMIN_LEGACY,
DL, VT, LHS, RHS);
1708 return DAG.
getNode(AMDGPUISD::FMAX_LEGACY,
DL, VT, RHS, LHS);
1713 return DAG.
getNode(AMDGPUISD::FMAX_LEGACY,
DL, VT, RHS, LHS);
1714 return DAG.
getNode(AMDGPUISD::FMIN_LEGACY,
DL, VT, LHS, RHS);
1725 return DAG.
getNode(AMDGPUISD::FMAX_LEGACY,
DL, VT, LHS, RHS);
1726 return DAG.
getNode(AMDGPUISD::FMIN_LEGACY,
DL, VT, RHS, LHS);
1740 if ((LHS == True && RHS == False) || (LHS == False && RHS == True))
1759 if (LHS == NegTrue && CFalse && CRHS) {
1773std::pair<SDValue, SDValue>
1785 return std::pair(
Lo,
Hi);
1814 HiVT = NumElts - LoNumElts == 1
1817 return std::pair(LoVT, HiVT);
1822std::pair<SDValue, SDValue>
1824 const EVT &LoVT,
const EVT &HiVT,
1826 EVT VT =
N.getValueType();
1830 "More vector elements requested than available!");
1861 EVT VT =
Op.getValueType();
1873 SDValue BasePtr = Load->getBasePtr();
1874 EVT MemVT = Load->getMemoryVT();
1879 EVT LoMemVT, HiMemVT;
1887 Align BaseAlign = Load->getAlign();
1891 Load->getExtensionType(), SL, LoVT, Load->getChain(), BasePtr, SrcValue,
1892 LoMemVT, BaseAlign, Load->getMemOperand()->getFlags(), Load->getAAInfo());
1895 Load->getExtensionType(), SL, HiVT, Load->getChain(), HiPtr,
1897 Load->getMemOperand()->getFlags(), Load->getAAInfo());
1921 EVT VT =
Op.getValueType();
1922 SDValue BasePtr = Load->getBasePtr();
1923 EVT MemVT = Load->getMemoryVT();
1926 Align BaseAlign = Load->getAlign();
1931 if (NumElements != 3 ||
1932 (BaseAlign <
Align(8) &&
1936 assert(NumElements == 3);
1943 Load->getExtensionType(), SL, WideVT, Load->getChain(), BasePtr, SrcValue,
1944 WideMemVT, BaseAlign, Load->getMemOperand()->getFlags());
1955 SDValue Val = Store->getValue();
1963 EVT MemVT = Store->getMemoryVT();
1964 SDValue Chain = Store->getChain();
1965 SDValue BasePtr = Store->getBasePtr();
1969 EVT LoMemVT, HiMemVT;
1979 Align BaseAlign = Store->getAlign();
1984 DAG.
getTruncStore(Chain, SL,
Lo, BasePtr, SrcValue, LoMemVT, BaseAlign,
1985 Store->getMemOperand()->getFlags(), Store->getAAInfo());
1988 Store->getMemOperand()->getFlags(), Store->getAAInfo());
1999 EVT VT =
Op.getValueType();
2002 MVT IntVT = MVT::i32;
2003 MVT FltVT = MVT::f32;
2006 if (LHSSignBits < 9)
2010 if (RHSSignBits < 9)
2014 unsigned SignBits = std::min(LHSSignBits, RHSSignBits);
2015 unsigned DivBits = BitSize - SignBits;
2049 fa, DAG.
getNode(AMDGPUISD::RCP,
DL, FltVT, fb));
2059 bool UseFmadFtz =
false;
2060 if (Subtarget->isGCN()) {
2068 : UseFmadFtz ? (
unsigned)AMDGPUISD::FMAD_FTZ
2115 EVT VT =
Op.getValueType();
2117 assert(VT == MVT::i64 &&
"LowerUDIVREM64 expects an i64");
2127 std::tie(LHS_Lo, LHS_Hi) = DAG.
SplitScalar(LHS,
DL, HalfVT, HalfVT);
2131 std::tie(RHS_Lo, RHS_Hi) = DAG.
SplitScalar(RHS,
DL, HalfVT, HalfVT);
2190 std::tie(Mulhi1_Lo, Mulhi1_Hi) =
2203 std::tie(Mulhi2_Lo, Mulhi2_Hi) =
2217 std::tie(Mul3_Lo, Mul3_Hi) = DAG.
SplitScalar(Mul3,
DL, HalfVT, HalfVT);
2296 for (
unsigned i = 0; i < halfBitWidth; ++i) {
2297 const unsigned bitPos = halfBitWidth - i - 1;
2328 EVT VT =
Op.getValueType();
2330 if (VT == MVT::i64) {
2336 if (VT == MVT::i32) {
2383 EVT VT =
Op.getValueType();
2391 if (VT == MVT::i32) {
2396 if (VT == MVT::i64 &&
2467 const unsigned FractBits = 52;
2468 const unsigned ExpBits = 11;
2484 assert(
Op.getValueType() == MVT::f64);
2494 const unsigned FractBits = 52;
2506 = DAG.
getConstant((UINT64_C(1) << FractBits) - 1, SL, MVT::i64);
2531 assert(
Op.getValueType() == MVT::f64);
2564 auto VT =
Op.getValueType();
2565 auto Arg =
Op.getOperand(0u);
2577 EVT VT =
Op.getValueType();
2628 switch (Src.getOpcode()) {
2630 return Src.getOperand(0).getValueType() == MVT::f16;
2634 case AMDGPUISD::LOG:
2635 case AMDGPUISD::EXP:
2638 unsigned IntrinsicID = Src.getConstantOperandVal(0);
2639 switch (IntrinsicID) {
2640 case Intrinsic::amdgcn_frexp_mant:
2641 case Intrinsic::amdgcn_log:
2642 case Intrinsic::amdgcn_log_clamp:
2643 case Intrinsic::amdgcn_exp2:
2644 case Intrinsic::amdgcn_sqrt:
2659 return Flags.hasApproximateFuncs();
2675 EVT VT = Src.getValueType();
2686 return IsLtSmallestNormal;
2692 EVT VT = Src.getValueType();
2705std::pair<SDValue, SDValue>
2726 return {ScaledInput, IsLtSmallestNormal};
2737 EVT VT =
Op.getValueType();
2741 if (VT == MVT::f16) {
2745 SDValue Log = DAG.
getNode(AMDGPUISD::LOG, SL, MVT::f32, Ext, Flags);
2750 auto [ScaledInput, IsLtSmallestNormal] =
2753 return DAG.
getNode(AMDGPUISD::LOG, SL, VT, Src, Flags);
2773 EVT VT =
Op.getValueType();
2780 if (VT == MVT::f16 || Flags.hasApproximateFuncs()) {
2803 if (Subtarget->hasFastFMAF32()) {
2805 const float c_log10 = 0x1.344134p-2f;
2806 const float cc_log10 = 0x1.09f79ep-26f;
2809 const float c_log = 0x1.62e42ep-1f;
2810 const float cc_log = 0x1.efa39ep-25f;
2816 Flags.setAllowContract(
false);
2824 const float ch_log10 = 0x1.344000p-2f;
2825 const float ct_log10 = 0x1.3509f6p-18f;
2828 const float ch_log = 0x1.62e000p-1f;
2829 const float ct_log = 0x1.0bfbe8p-15f;
2841 Flags.setAllowContract(
false);
2848 const bool IsFiniteOnly =
2849 (Flags.hasNoNaNs() ||
Options.NoNaNsFPMath) && Flags.hasNoInfs();
2852 if (!IsFiniteOnly) {
2878 EVT VT = Src.getValueType();
2882 double Log2BaseInverted =
2885 if (VT == MVT::f32) {
2888 SDValue LogSrc = DAG.
getNode(AMDGPUISD::LOG, SL, VT, ScaledInput, Flags);
2895 ScaledResultOffset, Zero, Flags);
2899 if (Subtarget->hasFastFMAF32())
2910 return DAG.
getNode(
ISD::FMUL, SL, VT, Log2Operand, Log2BaseInvertedOperand,
2919 EVT VT =
Op.getValueType();
2923 if (VT == MVT::f16) {
2927 SDValue Log = DAG.
getNode(AMDGPUISD::EXP, SL, MVT::f32, Ext, Flags);
2935 return DAG.
getNode(AMDGPUISD::EXP, SL, MVT::f32, Src, Flags);
2955 SDValue Exp2 = DAG.
getNode(AMDGPUISD::EXP, SL, VT, AddInput, Flags);
2968 bool IsExp10)
const {
2971 EVT VT =
X.getValueType();
2976 return DAG.
getNode(VT == MVT::f32 ? (
unsigned)AMDGPUISD::EXP
2978 SL, VT,
Mul, Flags);
2984 EVT VT =
X.getValueType();
3003 SDValue Exp2 = DAG.
getNode(AMDGPUISD::EXP, SL, VT, ExpInput, Flags);
3018 const EVT VT =
X.getValueType();
3020 const unsigned Exp2Op = VT == MVT::f32 ?
static_cast<unsigned>(AMDGPUISD::EXP)
3070 EVT VT =
Op.getValueType();
3130 if (Subtarget->hasFastFMAF32()) {
3132 const float cc_exp = 0x1.4ae0bep-26f;
3133 const float c_exp10 = 0x1.a934f0p+1f;
3134 const float cc_exp10 = 0x1.2f346ep-24f;
3144 const float ch_exp = 0x1.714000p+0f;
3145 const float cl_exp = 0x1.47652ap-12f;
3147 const float ch_exp10 = 0x1.a92000p+1f;
3148 const float cl_exp10 = 0x1.4f0978p-11f;
3163 PL =
getMad(DAG, SL, VT, XH, CL, Mad0, Flags);
3178 DAG.
getConstantFP(IsExp10 ? -0x1.66d3e8p+5f : -0x1.9d1da0p+6f, SL, VT);
3187 if (!Flags.hasNoInfs()) {
3189 DAG.
getConstantFP(IsExp10 ? 0x1.344136p+5f : 0x1.62e430p+6f, SL, VT);
3211 auto Opc =
Op.getOpcode();
3212 auto Arg =
Op.getOperand(0u);
3213 auto ResultVT =
Op.getValueType();
3215 if (ResultVT != MVT::i8 && ResultVT != MVT::i16)
3219 assert(ResultVT == Arg.getValueType());
3221 const uint64_t NumBits = ResultVT.getFixedSizeInBits();
3228 NewOp = DAG.
getNode(
Opc, SL, MVT::i32, NewOp);
3231 NewOp = DAG.
getNode(
Opc, SL, MVT::i32, NewOp);
3244 unsigned NewOpc = Ctlz ? AMDGPUISD::FFBH_U32 : AMDGPUISD::FFBL_B32;
3248 bool Is64BitScalar = !Src->isDivergent() && Src.getValueType() == MVT::i64;
3250 if (Src.getValueType() == MVT::i32 || Is64BitScalar) {
3264 Op.getValueType().getScalarSizeInBits(), SL, MVT::i32);
3284 OprLo = DAG.
getNode(AddOpc, SL, MVT::i32, OprLo, Const32);
3286 OprHi = DAG.
getNode(AddOpc, SL, MVT::i32, OprHi, Const32);
3333 if (
Signed && Subtarget->isGCN()) {
3362 ShAmt = DAG.
getNode(AMDGPUISD::FFBH_I32, SL, MVT::i32,
Hi);
3447 EVT DestVT =
Op.getValueType();
3449 EVT SrcVT = Src.getValueType();
3451 if (SrcVT == MVT::i16) {
3452 if (DestVT == MVT::f16)
3461 if (DestVT == MVT::bf16) {
3468 if (SrcVT != MVT::i64)
3471 if (DestVT == MVT::f16 &&
isTypeLegal(MVT::f16)) {
3483 if (DestVT == MVT::f32)
3486 assert(DestVT == MVT::f64);
3492 EVT DestVT =
Op.getValueType();
3495 EVT SrcVT = Src.getValueType();
3497 if (SrcVT == MVT::i16) {
3498 if (DestVT == MVT::f16)
3507 if (DestVT == MVT::bf16) {
3514 if (SrcVT != MVT::i64)
3519 if (DestVT == MVT::f16 &&
isTypeLegal(MVT::f16)) {
3532 if (DestVT == MVT::f32)
3535 assert(DestVT == MVT::f64);
3544 EVT SrcVT = Src.getValueType();
3546 assert(SrcVT == MVT::f32 || SrcVT == MVT::f64);
3559 if (
Signed && SrcVT == MVT::f32) {
3572 if (SrcVT == MVT::f64) {
3594 SL, MVT::i32, FloorMul);
3600 if (
Signed && SrcVT == MVT::f32) {
3620 return DAG.
getNode(AMDGPUISD::FP_TO_FP16,
DL,
Op.getValueType(), N0);
3622 if (
Op->getFlags().hasApproximateFuncs()) {
3633 assert(Src.getSimpleValueType() == MVT::f64);
3637 const unsigned ExpMask = 0x7ff;
3638 const unsigned ExpBiasf64 = 1023;
3639 const unsigned ExpBiasf16 = 15;
3722 unsigned OpOpcode =
Op.getOpcode();
3723 EVT SrcVT = Src.getValueType();
3724 EVT DestVT =
Op.getValueType();
3727 if (SrcVT == MVT::f16 && DestVT == MVT::i16)
3730 if (SrcVT == MVT::bf16) {
3733 return DAG.
getNode(
Op.getOpcode(),
DL, DestVT, PromotedSrc);
3737 if (DestVT == MVT::i16 && (SrcVT == MVT::f32 || SrcVT == MVT::f64)) {
3744 if (DestVT != MVT::i64)
3747 if (SrcVT == MVT::f16 ||
3754 return DAG.
getNode(Ext,
DL, MVT::i64, FpToInt32);
3757 if (SrcVT == MVT::f32 || SrcVT == MVT::f64)
3766 unsigned OpOpcode =
Op.getOpcode();
3767 EVT SrcVT = Src.getValueType();
3768 EVT DstVT =
Op.getValueType();
3769 SDValue SatVTOp =
Op.getNode()->getOperand(1);
3775 assert(SatWidth <= DstWidth &&
"Saturation width cannot exceed result width");
3778 if (DstVT == MVT::i32 && SatWidth == DstWidth &&
3779 (SrcVT == MVT::f32 || SrcVT == MVT::f64))
3785 if (SatWidth < DstWidth) {
3804 if (DstWidth == Int32Width)
3806 if (DstWidth < Int32Width)
3810 const unsigned Ext =
3812 return DAG.
getNode(Ext,
DL, DstVT, FpToInt32);
3818 if (DstVT == MVT::i64 &&
3819 (SrcVT == MVT::f16 || SrcVT == MVT::bf16 ||
3821 return DAG.
getNode(OpOpcode,
DL, DstVT, Src, Int32VT);
3825 if (SrcVT == MVT::f16 || SrcVT == MVT::bf16) {
3827 return DAG.
getNode(
Op.getOpcode(),
DL, DstVT, PromotedSrc, SatVTOp);
3831 if (DstWidth < 32) {
3846 MVT VT =
Op.getSimpleValueType();
3860 for (
unsigned I = 0;
I < NElts; ++
I)
3875 EVT VT =
Op.getValueType();
3889 unsigned NewOpcode = Node24->
getOpcode();
3893 case Intrinsic::amdgcn_mul_i24:
3894 NewOpcode = AMDGPUISD::MUL_I24;
3896 case Intrinsic::amdgcn_mul_u24:
3897 NewOpcode = AMDGPUISD::MUL_U24;
3899 case Intrinsic::amdgcn_mulhi_i24:
3900 NewOpcode = AMDGPUISD::MULHI_I24;
3902 case Intrinsic::amdgcn_mulhi_u24:
3903 NewOpcode = AMDGPUISD::MULHI_U24;
3917 if (DemandedLHS || DemandedRHS)
3919 DemandedLHS ? DemandedLHS :
LHS,
3920 DemandedRHS ? DemandedRHS :
RHS);
3932template <
typename IntTy>
3935 if (Width +
Offset < 32) {
3937 IntTy Result =
static_cast<IntTy
>(Shl) >> (32 - Width);
3938 if constexpr (std::is_signed_v<IntTy>) {
3951 if (M->isVolatile())
4103 EVT SrcVT = Src.getValueType();
4104 if (SrcVT.
bitsGE(ExtVT)) {
4115 unsigned IID =
N->getConstantOperandVal(0);
4117 case Intrinsic::amdgcn_mul_i24:
4118 case Intrinsic::amdgcn_mul_u24:
4119 case Intrinsic::amdgcn_mulhi_i24:
4120 case Intrinsic::amdgcn_mulhi_u24:
4122 case Intrinsic::amdgcn_fract:
4123 case Intrinsic::amdgcn_rsq:
4124 case Intrinsic::amdgcn_rcp_legacy:
4125 case Intrinsic::amdgcn_rsq_legacy:
4126 case Intrinsic::amdgcn_rsq_clamp:
4127 case Intrinsic::amdgcn_tanh:
4128 case Intrinsic::amdgcn_prng_b32: {
4131 return Src.isUndef() ? Src :
SDValue();
4133 case Intrinsic::amdgcn_frexp_exp: {
4139 if (PeekSign == Src)
4176 EVT VT =
N->getValueType(0);
4189 switch (LHS->getOpcode()) {
4197 if (VT == MVT::i32 && RHSVal == 16 &&
X.getValueType() == MVT::i16 &&
4214 EVT XVT =
X.getValueType();
4246 ShiftAmt = DAG.
getNode(
ISD::AND, SL, TargetType, TruncShiftAmt, ShiftMask);
4263 for (
unsigned I = 0;
I != NElts; ++
I)
4264 HiAndLoOps[2 *
I + 1] = HiOps[
I];
4277 EVT VT =
N->getValueType(0);
4308 (ElementType.getSizeInBits() - 1)) {
4309 ShiftAmt = ShiftFullAmt;
4316 ShiftAmt = DAG.
getNode(
ISD::AND, SL, TargetType, TruncShiftAmt, ShiftMask);
4331 for (
unsigned I = 0;
I != NElts; ++
I) {
4332 HiOps[
I] = HiAndLoOps[2 *
I + 1];
4362 for (
unsigned I = 0;
I != NElts; ++
I) {
4363 HiAndLoOps[2 *
I + 1] = HiOps[
I];
4364 HiAndLoOps[2 *
I] = LoOps[
I];
4377 EVT VT =
N->getValueType(0);
4390 unsigned MaskIdx, MaskLen;
4391 if (Mask->getAPIntValue().isShiftedMask(MaskIdx, MaskLen) &&
4392 MaskIdx == RHSVal) {
4431 ShiftAmt = DAG.
getNode(
ISD::AND, SL, TargetType, TruncShiftAmt, ShiftMask);
4447 for (
unsigned I = 0;
I != NElts; ++
I)
4448 HiOps[
I] = HiAndLoOps[2 *
I + 1];
4467 for (
unsigned I = 0;
I != NElts; ++
I)
4468 HiAndLoOps[2 *
I] = LoOps[
I];
4480 EVT VT =
N->getValueType(0);
4509 unsigned BitIndex = K->getZExtValue();
4510 unsigned PartIndex = BitIndex / SrcEltSize;
4512 if (PartIndex * SrcEltSize == BitIndex &&
4530 EVT SrcVT = Src.getValueType();
4542 const unsigned MaxCstSize =
4576 unsigned MulOpc =
Signed ? AMDGPUISD::MUL_I24 : AMDGPUISD::MUL_U24;
4577 return DAG.
getNode(MulOpc, SL, MVT::i32, N0, N1);
4580 unsigned MulLoOpc =
Signed ? AMDGPUISD::MUL_I24 : AMDGPUISD::MUL_U24;
4581 unsigned MulHiOpc =
Signed ? AMDGPUISD::MULHI_I24 : AMDGPUISD::MULHI_U24;
4601 EVT VT =
N->getValueType(0);
4607 if (!
N->isDivergent())
4629 if (V.hasOneUse() ||
all_of(V->users(), [](
const SDNode *U) ->
bool {
4630 return U->getOpcode() == ISD::MUL;
4639 if (
SDValue MulOper = IsFoldableAdd(N0)) {
4644 if (
SDValue MulOper = IsFoldableAdd(N1)) {
4665 if (Subtarget->hasMulU24() &&
isU24(N0, DAG) &&
isU24(N1, DAG)) {
4669 }
else if (Subtarget->hasMulI24() &&
isI24(N0, DAG) &&
isI24(N1, DAG)) {
4685 if (
N->getValueType(0) != MVT::i32)
4706 unsigned LoOpcode = 0;
4707 unsigned HiOpcode = 0;
4709 if (Subtarget->hasMulI24() &&
isI24(N0, DAG) &&
isI24(N1, DAG)) {
4712 LoOpcode = AMDGPUISD::MUL_I24;
4713 HiOpcode = AMDGPUISD::MULHI_I24;
4716 if (Subtarget->hasMulU24() &&
isU24(N0, DAG) &&
isU24(N1, DAG)) {
4719 LoOpcode = AMDGPUISD::MUL_U24;
4720 HiOpcode = AMDGPUISD::MULHI_U24;
4734 EVT VT =
N->getValueType(0);
4736 if (!Subtarget->hasMulI24() || VT.
isVector())
4745 if (Subtarget->hasSMulHi() && !
N->isDivergent())
4767 EVT VT =
N->getValueType(0);
4778 if (!
N->isDivergent() && Subtarget->hasSMulHi())
4801 unsigned Opc)
const {
4802 EVT VT =
Op.getValueType();
4839 isCttzOpc(RHS.getOpcode()) ? AMDGPUISD::FFBL_B32 : AMDGPUISD::FFBH_U32;
4840 return getFFBX_U32(DAG, CmpLHS, SL,
Opc);
4849 isCttzOpc(LHS.getOpcode()) ? AMDGPUISD::FFBL_B32 : AMDGPUISD::FFBH_U32;
4851 return getFFBX_U32(DAG, CmpLHS, SL,
Opc);
4869 return DAG.
getNode(
Op, SL, VT, NewSelect);
4887 EVT VT =
N.getValueType();
4914 bool ShouldFoldNeg =
true;
4919 ShouldFoldNeg =
false;
4921 ShouldFoldNeg =
false;
4924 if (ShouldFoldNeg) {
4948 Cond, NewLHS, NewRHS);
4950 return DAG.
getNode(LHS.getOpcode(), SL, VT, NewSelect);
4966 EVT VT =
N->getValueType(0);
4974 if (
Cond.hasOneUse()) {
4990 if (VT == MVT::f32 && Subtarget->hasFminFmaxLegacy()) {
5020 if (Subtarget->hasInv2PiInlineImm() &&
isInv2Pi(
C->getValueAPF()))
5056 case AMDGPUISD::FMAX_LEGACY:
5057 return AMDGPUISD::FMIN_LEGACY;
5058 case AMDGPUISD::FMIN_LEGACY:
5059 return AMDGPUISD::FMAX_LEGACY;
5090 EVT VT =
N->getValueType(0);
5125 case AMDGPUISD::FMUL_LEGACY: {
5183 case AMDGPUISD::FMAX_LEGACY:
5184 case AMDGPUISD::FMIN_LEGACY: {
5209 case AMDGPUISD::FMED3: {
5211 for (
unsigned I = 0;
I < 3; ++
I)
5215 if (Res.
getOpcode() != AMDGPUISD::FMED3)
5235 case AMDGPUISD::RCP:
5236 case AMDGPUISD::RCP_LEGACY:
5237 case AMDGPUISD::RCP_IFLAG:
5238 case AMDGPUISD::SIN_HW: {
5277 EVT SrcVT = Src.getValueType();
5313 Ops.back() = CastBack;
5363 EVT SrcVT = Src.getValueType();
5382 const APFloat &Val = CFP->getValueAPF();
5392 switch(
N->getOpcode()) {
5396 EVT DestVT =
N->getValueType(0);
5408 EVT SrcVT = Src.getValueType();
5444 const APInt &Val =
C->getValueAPF().bitcastToAPInt();
5463 if (!(
N->getValueType(0).isVector() &&
5477 case AMDGPUISD::MUL_U24:
5478 case AMDGPUISD::MUL_I24: {
5483 case AMDGPUISD::MULHI_I24:
5484 case AMDGPUISD::MULHI_U24:
5499 case AMDGPUISD::BFE_I32:
5500 case AMDGPUISD::BFE_U32: {
5501 assert(!
N->getValueType(0).isVector() &&
5502 "Vector handling of BFE not implemented");
5515 SDValue BitsFrom =
N->getOperand(0);
5518 bool Signed =
N->getOpcode() == AMDGPUISD::BFE_I32;
5520 if (OffsetVal == 0) {
5522 unsigned SignBits =
Signed ? (32 - WidthVal + 1) : (32 - WidthVal);
5525 if (OpSignBits >= SignBits)
5546 CVal->getSExtValue(),
5553 CVal->getZExtValue(),
5559 if ((OffsetVal + WidthVal) >= 32 &&
5560 !(OffsetVal == 16 && WidthVal == 16 && Subtarget->hasSDWA())) {
5563 BitsFrom, ShiftVal);
5569 OffsetVal + WidthVal);
5587 case AMDGPUISD::RCP:
5588 case AMDGPUISD::RCP_IFLAG:
5595 case AMDGPUISD::FMAD_FTZ: {
5599 EVT VT =
N->getValueType(0);
5606 if (N0CFP && N1CFP && N2CFP) {
5607 const auto FTZ = [](
const APFloat &V) {
5608 if (V.isDenormal()) {
5609 APFloat Zero(V.getSemantics(), 0);
5610 return V.isNegative() ? -Zero : Zero;
5637 bool RawReg)
const {
5642 if (!
MRI.isLiveIn(Reg)) {
5643 VReg =
MRI.createVirtualRegister(RC);
5644 MRI.addLiveIn(Reg, VReg);
5646 VReg =
MRI.getLiveInVirtReg(Reg);
5697 DAG.
getCopyFromReg(Chain, SL, Info->getStackPtrOffsetReg(), MVT::i32);
5708 assert(Arg &&
"Attempting to load missing argument");
5717 unsigned Mask = Arg.
getMask();
5727 unsigned ExplicitArgOffset = Subtarget->getExplicitKernelArgOffset();
5728 const Align Alignment = Subtarget->getAlignmentForImplicitArgPtr();
5730 alignTo(ExplicitKernArgSize, Alignment) + ExplicitArgOffset;
5752 int &RefinementSteps,
5753 bool &UseOneConstNR,
5754 bool Reciprocal)
const {
5757 if (VT == MVT::f32) {
5758 RefinementSteps = 0;
5759 return DAG.
getNode(AMDGPUISD::RSQ,
SDLoc(Operand), VT, Operand);
5770 int &RefinementSteps)
const {
5773 if (VT == MVT::f32) {
5779 RefinementSteps = 0;
5780 return DAG.
getNode(AMDGPUISD::RCP,
SDLoc(Operand), VT, Operand);
5791 case Intrinsic::amdgcn_workitem_id_x:
5793 case Intrinsic::amdgcn_workitem_id_y:
5795 case Intrinsic::amdgcn_workitem_id_z:
5808 unsigned Opc =
Op.getOpcode();
5813 case AMDGPUISD::CARRY:
5814 case AMDGPUISD::BORROW: {
5819 case AMDGPUISD::BFE_I32:
5820 case AMDGPUISD::BFE_U32: {
5827 if (
Opc == AMDGPUISD::BFE_U32)
5832 case AMDGPUISD::FP_TO_FP16: {
5839 case AMDGPUISD::MUL_U24:
5840 case AMDGPUISD::MUL_I24: {
5851 LHSKnown = LHSKnown.
trunc(24);
5852 RHSKnown = RHSKnown.
trunc(24);
5854 if (
Opc == AMDGPUISD::MUL_I24) {
5857 unsigned MaxValBits = LHSValBits + RHSValBits;
5858 if (MaxValBits > 32)
5860 unsigned SignBits = 32 - MaxValBits + 1;
5868 if ((LHSNonNegative && RHSNonNegative) || (LHSNegative && RHSNegative))
5870 else if ((LHSNegative && RHSPositive) || (LHSPositive && RHSNegative))
5875 unsigned MaxValBits = LHSValBits + RHSValBits;
5876 if (MaxValBits >= 32)
5882 case AMDGPUISD::PERM: {
5891 for (
unsigned I = 0;
I < 32;
I += 8) {
5892 unsigned SelBits = Sel & 0xff;
5897 }
else if (SelBits < 7) {
5898 SelBits = (SelBits & 3) * 8;
5901 }
else if (SelBits == 0x0c) {
5902 Known.
Zero |= 0xFFull <<
I;
5903 }
else if (SelBits > 0x0c) {
5904 Known.
One |= 0xFFull <<
I;
5910 case AMDGPUISD::BUFFER_LOAD_UBYTE: {
5914 case AMDGPUISD::BUFFER_LOAD_USHORT: {
5918 case AMDGPUISD::LDS: {
5926 case AMDGPUISD::SMIN3:
5927 case AMDGPUISD::SMAX3:
5928 case AMDGPUISD::SMED3:
5929 case AMDGPUISD::UMIN3:
5930 case AMDGPUISD::UMAX3:
5931 case AMDGPUISD::UMED3: {
5950 unsigned IID =
Op.getConstantOperandVal(0);
5952 case Intrinsic::amdgcn_workitem_id_x:
5953 case Intrinsic::amdgcn_workitem_id_y:
5954 case Intrinsic::amdgcn_workitem_id_z: {
5955 unsigned MaxValue = Subtarget->getMaxWorkitemID(
5969 unsigned Depth)
const {
5970 switch (
Op.getOpcode()) {
5971 case AMDGPUISD::BFE_I32: {
5982 return std::max(SignBits, Op0SignBits);
5985 case AMDGPUISD::BFE_U32: {
5987 return Width ? 32 - (Width->
getZExtValue() & 0x1f) : 1;
5990 case AMDGPUISD::CARRY:
5991 case AMDGPUISD::BORROW:
5993 case AMDGPUISD::BUFFER_LOAD_BYTE:
5995 case AMDGPUISD::BUFFER_LOAD_SHORT:
5997 case AMDGPUISD::BUFFER_LOAD_UBYTE:
5999 case AMDGPUISD::BUFFER_LOAD_USHORT:
6001 case AMDGPUISD::FP_TO_FP16:
6003 case AMDGPUISD::SMIN3:
6004 case AMDGPUISD::SMAX3:
6005 case AMDGPUISD::SMED3:
6006 case AMDGPUISD::UMIN3:
6007 case AMDGPUISD::UMAX3:
6008 case AMDGPUISD::UMED3: {
6021 return std::min({Tmp0, Tmp1, Tmp2});
6036 switch (
MI->getOpcode()) {
6037 case AMDGPU::G_AMDGPU_BUFFER_LOAD_SBYTE:
6039 case AMDGPU::G_AMDGPU_BUFFER_LOAD_SSHORT:
6041 case AMDGPU::G_AMDGPU_BUFFER_LOAD_UBYTE:
6043 case AMDGPU::G_AMDGPU_BUFFER_LOAD_USHORT:
6045 case AMDGPU::G_AMDGPU_SMED3:
6046 case AMDGPU::G_AMDGPU_UMED3: {
6047 auto [Dst, Src0, Src1, Src2] =
MI->getFirst4Regs();
6048 unsigned Tmp2 =
Analysis.computeNumSignBits(Src2, DemandedElts,
Depth + 1);
6051 unsigned Tmp1 =
Analysis.computeNumSignBits(Src1, DemandedElts,
Depth + 1);
6054 unsigned Tmp0 =
Analysis.computeNumSignBits(Src0, DemandedElts,
Depth + 1);
6057 return std::min({Tmp0, Tmp1, Tmp2});
6067 unsigned Opcode =
Op.getOpcode();
6069 case AMDGPUISD::BFE_I32:
6070 case AMDGPUISD::BFE_U32:
6079 unsigned Depth)
const {
6080 unsigned Opcode =
Op.getOpcode();
6082 case AMDGPUISD::FMIN_LEGACY:
6083 case AMDGPUISD::FMAX_LEGACY: {
6091 case AMDGPUISD::FMUL_LEGACY:
6092 case AMDGPUISD::CVT_PKRTZ_F16_F32: {
6098 case AMDGPUISD::FMED3:
6099 case AMDGPUISD::FMIN3:
6100 case AMDGPUISD::FMAX3:
6101 case AMDGPUISD::FMINIMUM3:
6102 case AMDGPUISD::FMAXIMUM3:
6103 case AMDGPUISD::FMAD_FTZ: {
6110 case AMDGPUISD::CVT_F32_UBYTE0:
6111 case AMDGPUISD::CVT_F32_UBYTE1:
6112 case AMDGPUISD::CVT_F32_UBYTE2:
6113 case AMDGPUISD::CVT_F32_UBYTE3:
6116 case AMDGPUISD::RCP:
6117 case AMDGPUISD::RSQ:
6118 case AMDGPUISD::RCP_LEGACY:
6119 case AMDGPUISD::RSQ_CLAMP: {
6127 case AMDGPUISD::FRACT: {
6132 case AMDGPUISD::DIV_SCALE:
6133 case AMDGPUISD::DIV_FMAS:
6134 case AMDGPUISD::DIV_FIXUP:
6137 case AMDGPUISD::SIN_HW:
6138 case AMDGPUISD::COS_HW: {
6143 unsigned IntrinsicID =
Op.getConstantOperandVal(0);
6145 switch (IntrinsicID) {
6146 case Intrinsic::amdgcn_cubeid:
6147 case Intrinsic::amdgcn_cvt_off_f32_i4:
6150 case Intrinsic::amdgcn_frexp_mant: {
6155 case Intrinsic::amdgcn_cvt_pkrtz: {
6161 case Intrinsic::amdgcn_rcp:
6162 case Intrinsic::amdgcn_rsq:
6163 case Intrinsic::amdgcn_rcp_legacy:
6164 case Intrinsic::amdgcn_rsq_legacy:
6165 case Intrinsic::amdgcn_rsq_clamp:
6166 case Intrinsic::amdgcn_tanh: {
6173 case Intrinsic::amdgcn_trig_preop:
6174 case Intrinsic::amdgcn_fdot2:
6177 case Intrinsic::amdgcn_fma_legacy:
6194 return MRI.hasOneNonDBGUse(N0);
unsigned const MachineRegisterInfo * MRI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static LLVM_READONLY bool hasSourceMods(const MachineInstr &MI)
static bool isInv2Pi(const APFloat &APF)
static LLVM_READONLY bool opMustUseVOP3Encoding(const MachineInstr &MI, const MachineRegisterInfo &MRI)
returns true if the operation will definitely need to use a 64-bit encoding, and thus will use a VOP3...
static unsigned inverseMinMax(unsigned Opc)
static SDValue extractF64Exponent(SDValue Hi, const SDLoc &SL, SelectionDAG &DAG)
static unsigned workitemIntrinsicDim(unsigned ID)
static int getOrCreateFixedStackObject(MachineFrameInfo &MFI, unsigned Size, int64_t Offset)
static SDValue constantFoldBFE(SelectionDAG &DAG, IntTy Src0, uint32_t Offset, uint32_t Width, const SDLoc &DL)
static SDValue getMad(SelectionDAG &DAG, const SDLoc &SL, EVT VT, SDValue X, SDValue Y, SDValue C, SDNodeFlags Flags=SDNodeFlags())
static SDValue getAddOneOp(const SDNode *V)
If V is an add of a constant 1, returns the other operand.
static LLVM_READONLY bool selectSupportsSourceMods(const SDNode *N)
Return true if v_cndmask_b32 will support fabs/fneg source modifiers for the type for ISD::SELECT.
static cl::opt< bool > AMDGPUBypassSlowDiv("amdgpu-bypass-slow-div", cl::desc("Skip 64-bit divide for dynamic 32-bit values"), cl::init(true))
static SDValue getMul24(SelectionDAG &DAG, const SDLoc &SL, SDValue N0, SDValue N1, unsigned Size, bool Signed)
static bool fnegFoldsIntoOp(const SDNode *N)
static bool isI24(SDValue Op, SelectionDAG &DAG)
static bool isCttzOpc(unsigned Opc)
static bool isU24(SDValue Op, SelectionDAG &DAG)
static SDValue peekFPSignOps(SDValue Val)
static bool valueIsKnownNeverF32Denorm(SDValue Src)
Return true if it's known that Src can never be an f32 denormal value.
static SDValue distributeOpThroughSelect(TargetLowering::DAGCombinerInfo &DCI, unsigned Op, const SDLoc &SL, SDValue Cond, SDValue N1, SDValue N2)
static SDValue peekFNeg(SDValue Val)
static SDValue simplifyMul24(SDNode *Node24, TargetLowering::DAGCombinerInfo &DCI)
static bool isCtlzOpc(unsigned Opc)
static LLVM_READNONE bool fnegFoldsIntoOpcode(unsigned Opc)
static bool hasVolatileUser(SDNode *Val)
Interface definition of the TargetLowering class that is common to all AMD GPUs.
Contains the definition of a TargetInstrInfo class that is common to all AMD GPUs.
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Function Alias Analysis Results
block Block Frequency Analysis
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Provides analysis for querying information about KnownBits during GISel passes.
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
static DebugLoc getDebugLoc(MachineBasicBlock::instr_iterator FirstMI, MachineBasicBlock::instr_iterator LastMI)
Return the first DebugLoc that has line number information, given a range of instructions.
const SmallVectorImpl< MachineOperand > & Cond
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static CCAssignFn * CCAssignFnForCall(CallingConv::ID CC, bool IsVarArg)
static CCAssignFn * CCAssignFnForReturn(CallingConv::ID CC, bool IsVarArg)
uint64_t getExplicitKernArgSize() const
static std::optional< uint32_t > getLDSAbsoluteAddress(const GlobalValue &GV)
void recordNumNamedBarriers(uint32_t GVAddr, unsigned BarCnt)
unsigned allocateLDSGlobal(const DataLayout &DL, const GlobalVariable &GV)
bool isModuleEntryFunction() const
static unsigned numBitsSigned(SDValue Op, SelectionDAG &DAG)
SDValue combineFMinMaxLegacy(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, SDValue True, SDValue False, SDValue CC, DAGCombinerInfo &DCI) const
Generate Min/Max node.
unsigned ComputeNumSignBitsForTargetNode(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth=0) const override
This method can be implemented by targets that want to expose additional information about sign bits ...
SDValue performMulhuCombine(SDNode *N, DAGCombinerInfo &DCI) const
EVT getTypeForExtReturn(LLVMContext &Context, EVT VT, ISD::NodeType ExtendKind) const override
Return the type that should be used to zero or sign extend a zeroext/signext integer return value.
SDValue SplitVectorLoad(SDValue Op, SelectionDAG &DAG) const
Split a vector load into 2 loads of half the vector.
SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const
SDValue performLoadCombine(SDNode *N, DAGCombinerInfo &DCI) const
void analyzeFormalArgumentsCompute(CCState &State, const SmallVectorImpl< ISD::InputArg > &Ins) const
The SelectionDAGBuilder will automatically promote function arguments with illegal types.
SDValue LowerF64ToF16Safe(SDValue Src, const SDLoc &DL, SelectionDAG &DAG) const
SDValue LowerFROUND(SDValue Op, SelectionDAG &DAG) const
SDValue storeStackInputValue(SelectionDAG &DAG, const SDLoc &SL, SDValue Chain, SDValue ArgVal, int64_t Offset) const
bool storeOfVectorConstantIsCheap(bool IsZero, EVT MemVT, unsigned NumElem, unsigned AS) const override
Return true if it is expected to be cheaper to do a store of vector constant with the given size and ...
SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const
void computeKnownBitsForTargetNode(const SDValue Op, KnownBits &Known, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth=0) const override
Determine which of the bits specified in Mask are known to be either zero or one and return them in t...
bool shouldCombineMemoryType(EVT VT) const
SDValue splitBinaryBitConstantOpImpl(DAGCombinerInfo &DCI, const SDLoc &SL, unsigned Opc, SDValue LHS, uint32_t ValLo, uint32_t ValHi) const
Split the 64-bit value LHS into two 32-bit components, and perform the binary operation Opc to it wit...
SDValue lowerUnhandledCall(CallLoweringInfo &CLI, SmallVectorImpl< SDValue > &InVals, StringRef Reason) const
SDValue performAssertSZExtCombine(SDNode *N, DAGCombinerInfo &DCI) const
bool isTruncateFree(EVT Src, EVT Dest) const override
bool aggressivelyPreferBuildVectorSources(EVT VecVT) const override
SDValue LowerFCEIL(SDValue Op, SelectionDAG &DAG) const
Split a vector store into multiple scalar stores.
TargetLowering::NegatibleCost getConstantNegateCost(const ConstantFPSDNode *C) const
SDValue LowerFLOGUnsafe(SDValue Op, const SDLoc &SL, SelectionDAG &DAG, bool IsLog10, SDNodeFlags Flags) const
bool canCreateUndefOrPoisonForTargetNode(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, bool PoisonOnly, bool ConsiderFlags, unsigned Depth) const override
Return true if Op can create undef or poison from non-undef & non-poison operands.
SDValue performMulhsCombine(SDNode *N, DAGCombinerInfo &DCI) const
SDValue lowerFEXPUnsafeImpl(SDValue Op, const SDLoc &SL, SelectionDAG &DAG, SDNodeFlags Flags, bool IsExp10) const
bool isSDNodeAlwaysUniform(const SDNode *N) const override
bool isDesirableToCommuteWithShift(const SDNode *N, CombineLevel Level) const override
Return true if it is profitable to move this shift by a constant amount through its operand,...
SDValue performShlCombine(SDNode *N, DAGCombinerInfo &DCI) const
bool isCheapToSpeculateCtlz(Type *Ty) const override
Return true if it is cheap to speculate a call to intrinsic ctlz.
SDValue LowerSDIVREM(SDValue Op, SelectionDAG &DAG) const
bool isFNegFree(EVT VT) const override
Return true if an fneg operation is free to the point where it is never worthwhile to replace it with...
SDValue LowerFLOG10(SDValue Op, SelectionDAG &DAG) const
SDValue LowerINT_TO_FP64(SDValue Op, SelectionDAG &DAG, bool Signed) const
unsigned computeNumSignBitsForTargetInstr(GISelValueTracking &Analysis, Register R, const APInt &DemandedElts, const MachineRegisterInfo &MRI, unsigned Depth=0) const override
This method can be implemented by targets that want to expose additional information about sign bits ...
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
This callback is invoked for operations that are unsupported by the target, which are registered to u...
SDValue LowerFP_TO_FP16(SDValue Op, SelectionDAG &DAG) const
SDValue addTokenForArgument(SDValue Chain, SelectionDAG &DAG, MachineFrameInfo &MFI, int ClobberedFI) const
bool isConstantCheaperToNegate(SDValue N) const
bool isReassocProfitable(MachineRegisterInfo &MRI, Register N0, Register N1) const override
bool isKnownNeverNaNForTargetNode(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, bool SNaN=false, unsigned Depth=0) const override
If SNaN is false,.
static bool needsDenormHandlingF32(const SelectionDAG &DAG, SDValue Src, SDNodeFlags Flags)
uint32_t getImplicitParameterOffset(const MachineFunction &MF, const ImplicitParameter Param) const
Helper function that returns the byte offset of the given type of implicit parameter.
SDValue LowerFFLOOR(SDValue Op, SelectionDAG &DAG) const
SDValue performSelectCombine(SDNode *N, DAGCombinerInfo &DCI) const
SDValue performFNegCombine(SDNode *N, DAGCombinerInfo &DCI) const
SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const
virtual SDValue LowerGlobalAddress(AMDGPUMachineFunction *MFI, SDValue Op, SelectionDAG &DAG) const
bool isConstantCostlierToNegate(SDValue N) const
SDValue loadInputValue(SelectionDAG &DAG, const TargetRegisterClass *RC, EVT VT, const SDLoc &SL, const ArgDescriptor &Arg) const
SDValue LowerDIVREM24(SDValue Op, SelectionDAG &DAG, bool sign) const
SDValue lowerFEXP10Unsafe(SDValue Op, const SDLoc &SL, SelectionDAG &DAG, SDNodeFlags Flags) const
Emit approx-funcs appropriate lowering for exp10.
bool shouldReduceLoadWidth(SDNode *Load, ISD::LoadExtType ExtType, EVT ExtVT, std::optional< unsigned > ByteOffset) const override
Return true if it is profitable to reduce a load to a smaller type.
SDValue LowerUINT_TO_FP(SDValue Op, SelectionDAG &DAG) const
bool isCheapToSpeculateCttz(Type *Ty) const override
Return true if it is cheap to speculate a call to intrinsic cttz.
SDValue performCtlz_CttzCombine(const SDLoc &SL, SDValue Cond, SDValue LHS, SDValue RHS, DAGCombinerInfo &DCI) const
SDValue performSraCombine(SDNode *N, DAGCombinerInfo &DCI) const
bool isSelectSupported(SelectSupportKind) const override
bool isZExtFree(Type *Src, Type *Dest) const override
Return true if any actual instruction that defines a value of type FromTy implicitly zero-extends the...
SDValue lowerFEXP2(SDValue Op, SelectionDAG &DAG) const
SDValue LowerCall(CallLoweringInfo &CLI, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower calls into the specified DAG.
SDValue performSrlCombine(SDNode *N, DAGCombinerInfo &DCI) const
SDValue lowerFEXP(SDValue Op, SelectionDAG &DAG) const
SDValue getIsLtSmallestNormal(SelectionDAG &DAG, SDValue Op, SDNodeFlags Flags) const
bool mayIgnoreSignedZero(SDValue Op) const
SDValue getIsFinite(SelectionDAG &DAG, SDValue Op, SDNodeFlags Flags) const
bool isLoadBitCastBeneficial(EVT, EVT, const SelectionDAG &DAG, const MachineMemOperand &MMO) const final
Return true if the following transform is beneficial: fold (conv (load x)) -> (load (conv*)x) On arch...
std::pair< SDValue, SDValue > splitVector(const SDValue &N, const SDLoc &DL, const EVT &LoVT, const EVT &HighVT, SelectionDAG &DAG) const
Split a vector value into two parts of types LoVT and HiVT.
AMDGPUTargetLowering(const TargetMachine &TM, const TargetSubtargetInfo &STI, const AMDGPUSubtarget &AMDGPUSTI)
SDValue LowerFLOGCommon(SDValue Op, SelectionDAG &DAG) const
SDValue foldFreeOpFromSelect(TargetLowering::DAGCombinerInfo &DCI, SDValue N) const
SDValue LowerINT_TO_FP32(SDValue Op, SelectionDAG &DAG, bool Signed) const
bool isFAbsFree(EVT VT) const override
Return true if an fabs operation is free to the point where it is never worthwhile to replace it with...
SDValue loadStackInputValue(SelectionDAG &DAG, EVT VT, const SDLoc &SL, int64_t Offset) const
Similar to CreateLiveInRegister, except value maybe loaded from a stack slot rather than passed in a ...
SDValue LowerFLOG2(SDValue Op, SelectionDAG &DAG) const
static EVT getEquivalentMemType(LLVMContext &Context, EVT VT)
SDValue getSqrtEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled, int &RefinementSteps, bool &UseOneConstNR, bool Reciprocal) const override
Hooks for building estimates in place of slower divisions and square roots.
SDValue performTruncateCombine(SDNode *N, DAGCombinerInfo &DCI) const
SDValue LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG) const
static SDValue stripBitcast(SDValue Val)
SDValue CreateLiveInRegister(SelectionDAG &DAG, const TargetRegisterClass *RC, Register Reg, EVT VT, const SDLoc &SL, bool RawReg=false) const
Helper function that adds Reg to the LiveIn list of the DAG's MachineFunction.
SDValue SplitVectorStore(SDValue Op, SelectionDAG &DAG) const
Split a vector store into 2 stores of half the vector.
SDValue LowerCTLZ_CTTZ(SDValue Op, SelectionDAG &DAG) const
SDValue getNegatedExpression(SDValue Op, SelectionDAG &DAG, bool LegalOperations, bool ForCodeSize, NegatibleCost &Cost, unsigned Depth) const override
Return the newly negated expression if the cost is not expensive and set the cost in Cost to indicate...
std::pair< SDValue, SDValue > split64BitValue(SDValue Op, SelectionDAG &DAG) const
Return 64-bit value Op as two 32-bit integers.
SDValue performMulCombine(SDNode *N, DAGCombinerInfo &DCI) const
SDValue getRecipEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled, int &RefinementSteps) const override
Return a reciprocal estimate value for the input operand.
SDValue LowerFNEARBYINT(SDValue Op, SelectionDAG &DAG) const
SDValue LowerSIGN_EXTEND_INREG(SDValue Op, SelectionDAG &DAG) const
static CCAssignFn * CCAssignFnForReturn(CallingConv::ID CC, bool IsVarArg)
std::pair< SDValue, SDValue > getScaledLogInput(SelectionDAG &DAG, const SDLoc SL, SDValue Op, SDNodeFlags Flags) const
If denormal handling is required return the scaled input to FLOG2, and the check for denormal range.
static CCAssignFn * CCAssignFnForCall(CallingConv::ID CC, bool IsVarArg)
Selects the correct CCAssignFn for a given CallingConvention value.
static bool allUsesHaveSourceMods(const SDNode *N, unsigned CostThreshold=4)
SDValue LowerFROUNDEVEN(SDValue Op, SelectionDAG &DAG) const
bool isFPImmLegal(const APFloat &Imm, EVT VT, bool ForCodeSize) const override
Returns true if the target can instruction select the specified FP immediate natively.
static unsigned numBitsUnsigned(SDValue Op, SelectionDAG &DAG)
SDValue lowerFEXPUnsafe(SDValue Op, const SDLoc &SL, SelectionDAG &DAG, SDNodeFlags Flags) const
SDValue LowerFTRUNC(SDValue Op, SelectionDAG &DAG) const
SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const
static bool allowApproxFunc(const SelectionDAG &DAG, SDNodeFlags Flags)
bool ShouldShrinkFPConstant(EVT VT) const override
If true, then instruction selection should seek to shrink the FP constant of the specified type to a ...
SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, const SmallVectorImpl< SDValue > &OutVals, const SDLoc &DL, SelectionDAG &DAG) const override
This hook must be implemented to lower outgoing return values, described by the Outs array,...
SDValue performStoreCombine(SDNode *N, DAGCombinerInfo &DCI) const
void ReplaceNodeResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
This callback is invoked when a node result type is illegal for the target, and the operation was reg...
SDValue performRcpCombine(SDNode *N, DAGCombinerInfo &DCI) const
SDValue getLoHalf64(SDValue Op, SelectionDAG &DAG) const
SDValue lowerCTLZResults(SDValue Op, SelectionDAG &DAG) const
SDValue LowerFP_TO_INT_SAT(SDValue Op, SelectionDAG &DAG) const
SDValue performFAbsCombine(SDNode *N, DAGCombinerInfo &DCI) const
SDValue LowerFP_TO_INT64(SDValue Op, SelectionDAG &DAG, bool Signed) const
static bool shouldFoldFNegIntoSrc(SDNode *FNeg, SDValue FNegSrc)
bool isNarrowingProfitable(SDNode *N, EVT SrcVT, EVT DestVT) const override
Return true if it's profitable to narrow operations of type SrcVT to DestVT.
SDValue LowerFRINT(SDValue Op, SelectionDAG &DAG) const
SDValue performIntrinsicWOChainCombine(SDNode *N, DAGCombinerInfo &DCI) const
SDValue LowerUDIVREM(SDValue Op, SelectionDAG &DAG) const
SDValue performMulLoHiCombine(SDNode *N, DAGCombinerInfo &DCI) const
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override
This method will be invoked for all target nodes and for any target-independent nodes that the target...
void LowerUDIVREM64(SDValue Op, SelectionDAG &DAG, SmallVectorImpl< SDValue > &Results) const
SDValue WidenOrSplitVectorLoad(SDValue Op, SelectionDAG &DAG) const
Widen a suitably aligned v3 load.
std::pair< EVT, EVT > getSplitDestVTs(const EVT &VT, SelectionDAG &DAG) const
Split a vector type into two parts.
SDValue getHiHalf64(SDValue Op, SelectionDAG &DAG) const
SDValue combineFMinMaxLegacyImpl(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, SDValue True, SDValue False, SDValue CC, DAGCombinerInfo &DCI) const
unsigned getVectorIdxWidth(const DataLayout &) const override
Returns the type to be used for the index operand vector operations.
static const fltSemantics & IEEEsingle()
static const fltSemantics & IEEEdouble()
static constexpr roundingMode rmNearestTiesToEven
static const fltSemantics & IEEEhalf()
bool bitwiseIsEqual(const APFloat &RHS) const
opStatus add(const APFloat &RHS, roundingMode RM)
const fltSemantics & getSemantics() const
opStatus multiply(const APFloat &RHS, roundingMode RM)
static APFloat getSmallestNormalized(const fltSemantics &Sem, bool Negative=false)
Returns the smallest (by magnitude) normalized finite number in the given semantics.
static APFloat getInf(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Infinity.
Class for arbitrary precision integers.
uint64_t getZExtValue() const
Get zero extended value.
void setHighBits(unsigned hiBits)
Set the top hiBits bits.
void setBitsFrom(unsigned loBit)
Set the top bits starting from loBit.
static APInt getMaxValue(unsigned numBits)
Gets maximum unsigned value of APInt for specific bit width.
static APInt getBitsSet(unsigned numBits, unsigned loBit, unsigned hiBit)
Get a value with a block of bits set.
static APInt getSignedMaxValue(unsigned numBits)
Gets maximum signed value of APInt for a specific bit width.
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)
Constructs an APInt value that has the top hiBitsSet bits set.
void setLowBits(unsigned loBits)
Set the bottom loBits bits.
This class represents an incoming formal argument to a Function.
CCState - This class holds information needed while lowering arguments and return values.
static CCValAssign getCustomMem(unsigned ValNo, MVT ValVT, int64_t Offset, MVT LocVT, LocInfo HTP)
const APFloat & getValueAPF() const
bool isNegative() const
Return true if the value is negative.
uint64_t getZExtValue() const
A parsed version of the target data layout string in and methods for querying it.
Diagnostic information for unsupported feature in backend.
const DataLayout & getDataLayout() const
Get the data layout of the module this function belongs to.
iterator_range< arg_iterator > args()
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
This is an important class for using LLVM in a threaded context.
LLVM_ABI void diagnose(const DiagnosticInfo &DI)
Report a message to the currently installed diagnostic handler.
This class is used to represent ISD::LOAD nodes.
const SDValue & getBasePtr() const
static auto integer_fixedlen_vector_valuetypes()
unsigned getVectorNumElements() const
bool isVector() const
Return true if this is a vector value type.
bool isInteger() const
Return true if this is an integer or a vector integer type.
static auto integer_valuetypes()
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
MVT getScalarType() const
If this is a vector, return the element type, otherwise return this.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
LLVM_ABI int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
int64_t getObjectOffset(int ObjectIdx) const
Return the assigned stack offset of the specified object from the incoming stack pointer.
int getObjectIndexBegin() const
Return the minimum frame object index.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
DenormalMode getDenormalMode(const fltSemantics &FPType) const
Returns the denormal handling type for the default rounding mode of the function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
Representation of each machine instruction.
A description of a memory reference used in the backend.
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOInvariant
The memory access always returns the same value (or traps).
Flags getFlags() const
Return the raw flags of the source value,.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
This is an abstract virtual class for memory operations.
unsigned getAddressSpace() const
Return the address space for the associated pointer.
bool isSimple() const
Returns true if the memory operation is neither atomic or volatile.
MachineMemOperand * getMemOperand() const
Return a MachineMemOperand object describing the memory reference performed by operation.
const SDValue & getChain() const
EVT getMemoryVT() const
Return the type of the in-memory value.
Wrapper class representing virtual and physical registers.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
const DebugLoc & getDebugLoc() const
Represents one node in the SelectionDAG.
ArrayRef< SDUse > ops() const
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
bool hasOneUse() const
Return true if there is exactly one use of this node.
SDNodeFlags getFlags() const
SDVTList getVTList() const
const SDValue & getOperand(unsigned Num) const
uint64_t getConstantOperandVal(unsigned Num) const
Helper method returns the integer value of a ConstantSDNode operand.
iterator_range< user_iterator > users()
Represents a use of a SDNode.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
bool hasOneUse() const
Return true if there is exactly one node using value ResNo of Node.
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
TypeSize getValueSizeInBits() const
Returns the size of the value in bits.
const SDValue & getOperand(unsigned i) const
unsigned getOpcode() const
unsigned getNumOperands() const
This class keeps track of the SPI_SP_INPUT_ADDR config register, which tells the hardware which inter...
SIModeRegisterDefaults getMode() const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
LLVM_ABI SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
LLVM_ABI unsigned ComputeMaxSignificantBits(SDValue Op, unsigned Depth=0) const
Get the upper bound on bit size for this Value Op as a signed integer.
const SDValue & getRoot() const
Return the root tag of the SelectionDAG.
LLVM_ABI SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
LLVM_ABI SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
LLVM_ABI SDValue getShiftAmountConstant(uint64_t Val, EVT VT, const SDLoc &DL)
LLVM_ABI SDValue getAllOnesConstant(const SDLoc &DL, EVT VT, bool IsTarget=false, bool IsOpaque=false)
LLVM_ABI void ExtractVectorElements(SDValue Op, SmallVectorImpl< SDValue > &Args, unsigned Start=0, unsigned Count=0, EVT EltVT=EVT())
Append the extracted elements from Start to Count out of the vector Op in Args.
LLVM_ABI SDValue getFreeze(SDValue V)
Return a freeze using the SDLoc of the value operand.
SDValue getSetCC(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode Cond, SDValue Chain=SDValue(), bool IsSignaling=false)
Helper function to make it easier to build SetCC's if you just have an ISD::CondCode instead of an SD...
LLVM_ABI SDValue getConstantFP(double Val, const SDLoc &DL, EVT VT, bool isTarget=false)
Create a ConstantFPSDNode wrapping a constant value.
LLVM_ABI SDValue getRegister(Register Reg, EVT VT)
LLVM_ABI SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
LLVM_ABI SDValue getNOT(const SDLoc &DL, SDValue Val, EVT VT)
Create a bitwise NOT operation as (XOR Val, -1).
const TargetLowering & getTargetLoweringInfo() const
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd).
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
LLVM_ABI SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type.
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, Register Reg, EVT VT)
SDValue getSelect(const SDLoc &DL, EVT VT, SDValue Cond, SDValue LHS, SDValue RHS, SDNodeFlags Flags=SDNodeFlags())
Helper function to make it easier to build Select's if you just have operands and don't want to check...
LLVM_ABI SDValue getZeroExtendInReg(SDValue Op, const SDLoc &DL, EVT VT)
Return the expression required to zero extend the Op value assuming it was the smaller SrcTy value.
const DataLayout & getDataLayout() const
LLVM_ABI SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
LLVM_ABI SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, EVT SVT, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
LLVM_ABI void ReplaceAllUsesWith(SDValue From, SDValue To)
Modify anything using 'From' to use 'To' instead.
LLVM_ABI SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
LLVM_ABI SDValue getSignedConstant(int64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
bool isConstantValueOfAnyType(SDValue N) const
SDValue getSelectCC(const SDLoc &DL, SDValue LHS, SDValue RHS, SDValue True, SDValue False, ISD::CondCode Cond, SDNodeFlags Flags=SDNodeFlags())
Helper function to make it easier to build SelectCC's if you just have an ISD::CondCode instead of an...
LLVM_ABI SDValue getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either sign-extending or trunca...
LLVM_ABI SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
LLVM_ABI SDValue getValueType(EVT)
LLVM_ABI SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
LLVM_ABI bool isKnownNeverNaN(SDValue Op, const APInt &DemandedElts, bool SNaN=false, unsigned Depth=0) const
Test whether the given SDValue (or all elements of it, if it is a vector) is known to never be NaN in...
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
LLVM_ABI unsigned ComputeNumSignBits(SDValue Op, unsigned Depth=0) const
Return the number of times the sign bit of the register is replicated into the other bits.
LLVM_ABI SDValue getVectorIdxConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
LLVM_ABI void ReplaceAllUsesOfValueWith(SDValue From, SDValue To)
Replace any uses of From with To, leaving uses of other values produced by From.getNode() alone.
MachineFunction & getMachineFunction() const
SDValue getPOISON(EVT VT)
Return a POISON node. POISON does not have a useful SDLoc.
LLVM_ABI SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
LLVM_ABI KnownBits computeKnownBits(SDValue Op, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in Known.
LLVM_ABI SDValue getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either zero-extending or trunca...
LLVM_ABI bool MaskedValueIsZero(SDValue Op, const APInt &Mask, unsigned Depth=0) const
Return true if 'Op & Mask' is known to be zero.
SDValue getObjectPtrOffset(const SDLoc &SL, SDValue Ptr, TypeSize Offset)
Create an add instruction with appropriate flags when used for addressing some offset of an object.
LLVMContext * getContext() const
const SDValue & setRoot(SDValue N)
Set the current root tag of the SelectionDAG.
LLVM_ABI SDNode * UpdateNodeOperands(SDNode *N, SDValue Op)
Mutate the specified node in-place to have the specified operands.
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
LLVM_ABI std::pair< SDValue, SDValue > SplitScalar(const SDValue &N, const SDLoc &DL, const EVT &LoVT, const EVT &HiVT)
Split the scalar node with EXTRACT_ELEMENT using the provided VTs and return the low/high part.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
This class is used to represent ISD::STORE nodes.
const SDValue & getBasePtr() const
const SDValue & getValue() const
StringRef - Represent a constant reference to a string, i.e.
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
void setMaxDivRemBitWidthSupported(unsigned SizeInBits)
Set the size in bits of the maximum div/rem the backend supports.
bool PredictableSelectIsExpensive
Tells the code generator that select is more expensive than a branch if the branch is usually predict...
virtual bool shouldReduceLoadWidth(SDNode *Load, ISD::LoadExtType ExtTy, EVT NewVT, std::optional< unsigned > ByteOffset=std::nullopt) const
Return true if it is profitable to reduce a load to a smaller type.
unsigned MaxStoresPerMemcpyOptSize
Likewise for functions with the OptSize attribute.
const TargetMachine & getTargetMachine() const
virtual unsigned getNumRegistersForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain targets require unusual breakdowns of certain types.
unsigned MaxGluedStoresPerMemcpy
Specify max number of store instructions to glue in inlined memcpy.
virtual MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain combinations of ABIs, Targets and features require that types are legal for some operations a...
void addBypassSlowDiv(unsigned int SlowBitWidth, unsigned int FastBitWidth)
Tells the code generator which bitwidths to bypass.
void setMaxLargeFPConvertBitWidthSupported(unsigned SizeInBits)
Set the size in bits of the maximum fp to/from int conversion the backend supports.
void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)
Set the maximum atomic operation size supported by the backend.
SelectSupportKind
Enum that describes what type of support for selects the target has.
virtual bool allowsMisalignedMemoryAccesses(EVT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *=nullptr) const
Determine if the target supports unaligned memory accesses.
unsigned MaxStoresPerMemsetOptSize
Likewise for functions with the OptSize attribute.
EVT getShiftAmountTy(EVT LHSTy, const DataLayout &DL) const
Returns the type for the shift amount of a shift opcode.
unsigned MaxStoresPerMemmove
Specify maximum number of store instructions per memmove call.
virtual EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const
Return the ValueType of the result of SETCC operations.
unsigned MaxStoresPerMemmoveOptSize
Likewise for functions with the OptSize attribute.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
void setSupportsUnalignedAtomics(bool UnalignedSupported)
Sets whether unaligned atomic operations are supported.
bool isOperationLegal(unsigned Op, EVT VT) const
Return true if the specified operation is legal on this target.
unsigned MaxStoresPerMemset
Specify maximum number of store instructions per memset call.
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
void setMinCmpXchgSizeInBits(unsigned SizeInBits)
Sets the minimum cmpxchg or ll/sc size supported by the backend.
void AddPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT)
If Opc/OrigVT is specified as being promoted, the promotion code defaults to trying a larger integer/...
void setTargetDAGCombine(ArrayRef< ISD::NodeType > NTs)
Targets should invoke this method for each target independent node that they want to provide a custom...
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
unsigned GatherAllAliasesMaxDepth
Depth that GatherAllAliases should continue looking for chain dependencies when trying to find a more...
NegatibleCost
Enum that specifies when a float negation is beneficial.
bool allowsMemoryAccessForAlignment(LLVMContext &Context, const DataLayout &DL, EVT VT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *Fast=nullptr) const
This function returns true if the memory access is aligned or if the target allows this specific unal...
unsigned MaxStoresPerMemcpy
Specify maximum number of store instructions per memcpy call.
void setSchedulingPreference(Sched::Preference Pref)
Specify the target scheduling preference.
void setJumpIsExpensive(bool isExpensive=true)
Tells the code generator not to expand logic operations on comparison predicates into separate sequen...
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
SDValue scalarizeVectorStore(StoreSDNode *ST, SelectionDAG &DAG) const
SDValue SimplifyMultipleUseDemandedBits(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, SelectionDAG &DAG, unsigned Depth=0) const
More limited version of SimplifyDemandedBits that can be used to "lookthrough" ops that don't contrib...
SDValue expandUnalignedStore(StoreSDNode *ST, SelectionDAG &DAG) const
Expands an unaligned store to 2 half-size stores for integer values, and possibly more for vectors.
bool ShrinkDemandedConstant(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, TargetLoweringOpt &TLO) const
Check to see if the specified operand of the specified instruction is a constant integer.
std::pair< SDValue, SDValue > expandUnalignedLoad(LoadSDNode *LD, SelectionDAG &DAG) const
Expands an unaligned load to 2 half-size loads for an integer, and possibly more for vectors.
virtual SDValue getNegatedExpression(SDValue Op, SelectionDAG &DAG, bool LegalOps, bool OptForSize, NegatibleCost &Cost, unsigned Depth=0) const
Return the newly negated expression if the cost is not expensive and set the cost in Cost to indicate...
std::pair< SDValue, SDValue > scalarizeVectorLoad(LoadSDNode *LD, SelectionDAG &DAG) const
Turn load of vector type into a load of the individual elements.
bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, KnownBits &Known, TargetLoweringOpt &TLO, unsigned Depth=0, bool AssumeSingleUse=false) const
Look at Op.
TargetLowering(const TargetLowering &)=delete
virtual bool canCreateUndefOrPoisonForTargetNode(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, bool PoisonOnly, bool ConsiderFlags, unsigned Depth) const
Return true if Op can create undef or poison from non-undef & non-poison operands.
Primary interface to the complete machine description for the target machine.
TargetSubtargetInfo - Generic base class for all target subtargets.
static constexpr TypeSize getFixed(ScalarTy ExactSize)
The instances of the Type class are immutable: once they are created, they are never changed.
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
LLVM Value Representation.
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ CONSTANT_ADDRESS_32BIT
Address space for 32-bit constant memory.
@ REGION_ADDRESS
Address space for region memory. (GDS)
@ LOCAL_ADDRESS
Address space for local memory.
@ CONSTANT_ADDRESS
Address space for constant memory (VTX2).
@ GLOBAL_ADDRESS
Address space for global memory (RAT0, VTX0).
bool isIntrinsicAlwaysUniform(unsigned IntrID)
TargetExtType * isNamedBarrier(const GlobalVariable &GV)
bool isUniformMMO(const MachineMemOperand *MMO)
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ AMDGPU_CS
Used for Mesa/AMDPAL compute shaders.
@ AMDGPU_VS
Used for Mesa vertex shaders, or AMDPAL last shader stage before rasterization (vertex shader if tess...
@ AMDGPU_KERNEL
Used for AMDGPU code object kernels.
@ AMDGPU_Gfx
Used for AMD graphics targets.
@ AMDGPU_CS_ChainPreserve
Used on AMDGPUs to give the middle-end more control over argument placement.
@ AMDGPU_HS
Used for Mesa/AMDPAL hull shaders (= tessellation control shaders).
@ AMDGPU_GS
Used for Mesa/AMDPAL geometry shaders.
@ AMDGPU_CS_Chain
Used on AMDGPUs to give the middle-end more control over argument placement.
@ AMDGPU_PS
Used for Mesa/AMDPAL pixel shaders.
@ Cold
Attempts to make code in the caller as efficient as possible under the assumption that the call is no...
@ SPIR_KERNEL
Used for SPIR kernel functions.
@ Fast
Attempts to make calls as fast as possible (e.g.
@ AMDGPU_ES
Used for AMDPAL shader stage before geometry shader if geometry is in use.
@ AMDGPU_LS
Used for AMDPAL vertex shader if tessellation is in use.
@ C
The default llvm calling convention, compatible with C.
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
@ INSERT_SUBVECTOR
INSERT_SUBVECTOR(VECTOR1, VECTOR2, IDX) - Returns a vector with VECTOR2 inserted into VECTOR1.
@ BSWAP
Byte Swap and Counting operators.
@ ATOMIC_STORE
OUTCHAIN = ATOMIC_STORE(INCHAIN, val, ptr) This corresponds to "store atomic" instruction.
@ ADDC
Carry-setting nodes for multiple precision addition and subtraction.
@ FMAD
FMAD - Perform a * b + c, while getting the same result as the separately rounded operations.
@ ADD
Simple integer binary arithmetic operators.
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
@ CONCAT_VECTORS
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
@ FADD
Simple binary floating point operators.
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
@ FP16_TO_FP
FP16_TO_FP, FP_TO_FP16 - These operators are used to perform promotions and truncation for half-preci...
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
@ FLDEXP
FLDEXP - ldexp, inspired by libm (op0 * 2**op1).
@ SIGN_EXTEND
Conversion operators.
@ CTTZ_ZERO_UNDEF
Bit counting operators with an undefined result for zero inputs.
@ FNEG
Perform various unary floating-point operations inspired by libm.
@ BRIND
BRIND - Indirect branch.
@ BR_JT
BR_JT - Jumptable branch.
@ FCANONICALIZE
Returns platform specific canonical encoding of a floating point number.
@ IS_FPCLASS
Performs a check of floating point class property, defined by IEEE-754.
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
@ ATOMIC_LOAD
Val, OUTCHAIN = ATOMIC_LOAD(INCHAIN, ptr) This corresponds to "load atomic" instruction.
@ EXTRACT_ELEMENT
EXTRACT_ELEMENT - This is used to get the lower or upper (determined by a Constant,...
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
@ SHL
Shift and rotation operations.
@ VECTOR_SHUFFLE
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
@ EXTRACT_SUBVECTOR
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.
@ FMINNUM_IEEE
FMINNUM_IEEE/FMAXNUM_IEEE - Perform floating-point minimumNumber or maximumNumber on two values,...
@ EntryToken
EntryToken - This is the marker used to indicate the start of a region.
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
@ CopyToReg
CopyToReg - This node has three operands: a chain, a register number to set to this value,...
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
@ FMINNUM
FMINNUM/FMAXNUM - Perform floating-point minimum maximum on two values, following IEEE-754 definition...
@ DYNAMIC_STACKALLOC
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary.
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
@ FP_EXTEND
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
@ UADDO_CARRY
Carry-using nodes for multiple precision addition and subtraction.
@ INLINEASM_BR
INLINEASM_BR - Branching version of inline asm. Used by asm-goto.
@ FMINIMUM
FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0 as less than 0....
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ TRAP
TRAP - Trapping instruction.
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
@ ADDE
Carry-using nodes for multiple precision addition and subtraction.
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ FFREXP
FFREXP - frexp, extract fractional and exponent component of a floating-point value.
@ FP_ROUND
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
@ ADDRSPACECAST
ADDRSPACECAST - This operator converts between pointers of different address spaces.
@ INLINEASM
INLINEASM - Represents an inline asm block.
@ FP_TO_SINT_SAT
FP_TO_[US]INT_SAT - Convert floating point value in operand 0 to a signed or unsigned scalar integer ...
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
@ AssertSext
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
@ FMINIMUMNUM
FMINIMUMNUM/FMAXIMUMNUM - minimumnum/maximumnum that is same with FMINNUM_IEEE and FMAXNUM_IEEE besid...
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
bool isNormalStore(const SDNode *N)
Returns true if the specified node is a non-truncating and unindexed store.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
bool isNormalLoad(const SDNode *N)
Returns true if the specified node is a non-extending and unindexed load.
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
MaybeAlign getAlign(const CallInst &I, unsigned Index)
LLVM_ABI bool isNullConstant(SDValue V)
Returns true if V is a constant integer zero.
void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty, SmallVectorImpl< EVT > &ValueVTs, SmallVectorImpl< EVT > *MemVTs=nullptr, SmallVectorImpl< TypeSize > *Offsets=nullptr, TypeSize StartingOffset=TypeSize::getZero())
ComputeValueVTs - Given an LLVM IR type, compute a sequence of EVTs that represent all the individual...
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
bool CCAssignFn(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State)
CCAssignFn - This function assigns a location for Val, updating State to reflect the change.
LLVM_ABI ConstantFPSDNode * isConstOrConstSplatFP(SDValue N, bool AllowUndefs=false)
Returns the SDNode if it is a constant splat BuildVector or constant float.
uint64_t PowerOf2Ceil(uint64_t A)
Returns the power of two which is greater than or equal to the given value.
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
int countl_zero(T Val)
Count number of 0's from the most significant bit to the least stopping at the first 1.
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
constexpr uint32_t Hi_32(uint64_t Value)
Return the high 32 bits of a 64 bit value.
constexpr uint32_t Lo_32(uint64_t Value)
Return the low 32 bits of a 64 bit value.
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
To bit_cast(const From &from) noexcept
@ Mul
Product of integers.
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
DWARFExpression::Operation Op
LLVM_ABI ConstantSDNode * isConstOrConstSplat(SDValue N, bool AllowUndefs=false, bool AllowTruncation=false)
Returns the SDNode if it is a constant splat BuildVector or constant int.
constexpr unsigned BitWidth
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
LLVM_ABI bool isOneConstant(SDValue V)
Returns true if V is a constant integer one.
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
APFloat neg(APFloat X)
Returns the negated value of the argument.
unsigned Log2(Align A)
Returns the log2 of the alignment.
static cl::opt< unsigned > CostThreshold("dfa-cost-threshold", cl::desc("Maximum cost accepted for the transformation"), cl::Hidden, cl::init(50))
LLVM_ABI bool isAllOnesConstant(SDValue V)
Returns true if V is an integer constant with all bits set.
LLVM_ABI void reportFatalUsageError(Error Err)
Report a fatal error that does not indicate a bug in LLVM.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
This struct is a compact representation of a valid (non-zero power of two) alignment.
MCRegister getRegister() const
unsigned getStackOffset() const
DenormalModeKind Input
Denormal treatment kind for floating point instruction inputs in the default floating-point environme...
@ PreserveSign
The sign of a flushed-to-zero number is preserved in the sign of 0.
static constexpr DenormalMode getPreserveSign()
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
EVT getPow2VectorType(LLVMContext &Context) const
Widens the length of the given vector EVT up to the nearest power of 2 and returns that type.
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)
Returns the EVT that represents a vector NumElements in length, where each element is of type VT.
EVT changeTypeToInteger() const
Return the type converted to an equivalently sized integer or vector with integer element type.
bool bitsGT(EVT VT) const
Return true if this has more bits than VT.
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
EVT getDoubleNumVectorElementsVT(LLVMContext &Context) const
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
bool isByteSized() const
Return true if the bit size is a multiple of 8.
uint64_t getScalarSizeInBits() const
EVT getHalfSizedIntegerVT(LLVMContext &Context) const
Finds the smallest simple value type that is greater than or equal to half the width of this EVT.
bool isPow2VectorType() const
Returns true if the given vector is a power of 2.
TypeSize getStoreSizeInBits() const
Return the number of bits overwritten by a store of the specified value type.
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
EVT getRoundIntegerType(LLVMContext &Context) const
Rounds the bit-width of the given integer EVT up to the nearest power of two (and at least to eight),...
bool isVector() const
Return true if this is a vector value type.
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
bool bitsGE(EVT VT) const
Return true if this has no less bits than VT.
EVT getVectorElementType() const
Given a vector type, return the type of each element.
bool isExtended() const
Test if the given EVT is extended (as opposed to being simple).
EVT changeElementType(LLVMContext &Context, EVT EltVT) const
Return a VT for a type whose attributes match ourselves with the exception of the element type that i...
LLVM_ABI const fltSemantics & getFltSemantics() const
Returns an APFloat semantics tag appropriate for the value type.
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
bool bitsLE(EVT VT) const
Return true if this has no more bits than VT.
bool isInteger() const
Return true if this is an integer or a vector integer type.
bool isNonNegative() const
Returns true if this value is known to be non-negative.
unsigned countMinTrailingZeros() const
Returns the minimum number of trailing zero bits.
bool isUnknown() const
Returns true if we don't know any bits.
KnownBits trunc(unsigned BitWidth) const
Return known bits for a truncation of the value we're tracking.
unsigned getBitWidth() const
Get the bit width of this value.
void resetAll()
Resets the known state of all bits.
unsigned countMaxActiveBits() const
Returns the maximum number of bits needed to represent all possible unsigned values with these known ...
unsigned countMinLeadingZeros() const
Returns the minimum number of leading zero bits.
APInt getMaxValue() const
Return the maximal unsigned value possible given these KnownBits.
APInt getMinValue() const
Return the minimal unsigned value possible given these KnownBits.
bool isStrictlyPositive() const
Returns true if this value is known to be positive.
bool isNegative() const
Returns true if this value is known to be negative.
unsigned countMaxSignificantBits() const
Returns the maximum number of bits needed to represent all possible signed values with these known bi...
This class contains a discriminated union of information about pointers in memory operands,...
LLVM_ABI bool isDereferenceable(unsigned Size, LLVMContext &C, const DataLayout &DL) const
Return true if memory region [V, V+Offset+Size) is known to be dereferenceable.
static LLVM_ABI MachinePointerInfo getStack(MachineFunction &MF, int64_t Offset, uint8_t ID=0)
Stack pointer relative access.
MachinePointerInfo getWithOffset(int64_t O) const
These are IR-level optimization flags that may be propagated to SDNodes.
void setAllowContract(bool b)
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
DenormalMode FP32Denormals
If this is set, neither input or output denormals are flushed for most f32 instructions.
This structure contains all information that is necessary for lowering calls.
SmallVector< ISD::InputArg, 32 > Ins
bool isBeforeLegalizeOps() const
CombineLevel getDAGCombineLevel()
LLVM_ABI void AddToWorklist(SDNode *N)
bool isCalledByLegalizer() const
bool isBeforeLegalize() const
LLVM_ABI SDValue CombineTo(SDNode *N, ArrayRef< SDValue > To, bool AddTo=true)
LLVM_ABI void CommitTargetLoweringOpt(const TargetLoweringOpt &TLO)
A convenience struct that encapsulates a DAG, and two SDValues for returning information from TargetL...