47#include "llvm/IR/IntrinsicsNVPTX.h"
70#define DEBUG_TYPE "nvptx-lower"
82 cl::desc(
"NVPTX Specific: FMA contraction (0: don't do it"
83 " 1: do it 2: do it aggressively"),
88 cl::desc(
"NVPTX Specifies: 0 use div.approx, 1 use div.full, 2 use"
89 " IEEE Compliant F32 div.rnd if available."),
94 cl::desc(
"NVPTX Specific: 0 use sqrt.approx, 1 use sqrt.rn."),
98 "nvptx-force-min-byval-param-align",
cl::Hidden,
99 cl::desc(
"NVPTX Specific: force 4-byte minimal alignment for byval"
100 " params of device functions."),
172static std::optional<std::pair<unsigned int, EVT>>
201 return std::pair(NumElts, EltVT);
219 return std::pair(NumElts / NPerWord,
245 Offsets->push_back(StartingOffset + 0);
246 Offsets->push_back(StartingOffset + 8);
253 if (
StructType *STy = dyn_cast<StructType>(Ty)) {
254 auto const *SL =
DL.getStructLayout(STy);
256 for(
auto *EI : STy->elements()) {
258 StartingOffset + SL->getElementOffset(ElementNum));
265 if (
ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
266 Type *EltTy = ATy->getElementType();
267 uint64_t EltSize =
DL.getTypeAllocSize(EltTy);
268 for (
int I : llvm::seq<int>(ATy->getNumElements()))
274 for (
unsigned i = 0, e = TempVTs.
size(); i != e; ++i) {
310 NumElts = (NumElts + 3) / 4;
311 }
else if (EltVT.
getSimpleVT() == MVT::i8 && NumElts == 2) {
316 for (
unsigned j = 0; j != NumElts; ++j) {
324 Offsets->push_back(Off);
339 "Promotion is not suitable for scalars of size larger than 64-bits");
341 *PromotedVT = MVT::i1;
346 *PromotedVT = MVT::i8;
349 *PromotedVT = MVT::i16;
352 *PromotedVT = MVT::i32;
355 *PromotedVT = MVT::i64;
358 return EVT(*PromotedVT) != VT;
378 if (ParamAlignment < AccessSize)
381 if (Offsets[
Idx] & (AccessSize - 1))
384 EVT EltVT = ValueVTs[
Idx];
388 if (EltSize >= AccessSize)
391 unsigned NumElts = AccessSize / EltSize;
393 if (AccessSize != EltSize * NumElts)
397 if (
Idx + NumElts > ValueVTs.
size())
401 if (NumElts != 4 && NumElts != 2)
404 for (
unsigned j =
Idx + 1; j <
Idx + NumElts; ++j) {
406 if (ValueVTs[j] != EltVT)
410 if (Offsets[j] - Offsets[j - 1] != EltSize)
438 Align ParamAlignment,
bool IsVAArg =
false) {
448 for (
int I = 0, E = ValueVTs.
size();
I != E; ++
I) {
451 for (
unsigned AccessSize : {16, 8, 4, 2}) {
453 I, AccessSize, ValueVTs, Offsets, ParamAlignment);
462 assert(
I + 1 < E &&
"Not enough elements.");
468 assert(
I + 3 < E &&
"Not enough elements.");
486 if (
Value->getValueType(0) == VT)
540 Op, VT, IsOpSupported ? Action : NoBF16Action);
545 bool IsOpSupported =
false;
631 for (
MVT VT : {MVT::bf16, MVT::f16, MVT::v2bf16, MVT::v2f16, MVT::f32,
632 MVT::f64, MVT::i1, MVT::i8, MVT::i16, MVT::v2i16, MVT::v4i8,
633 MVT::i32, MVT::i64}) {
658 {MVT::i8, MVT::i16, MVT::v2i16, MVT::i32, MVT::i64},
757 for (
const auto& Ty : {MVT::i16, MVT::i32, MVT::i64}) {
840 for (
const auto &VT : {MVT::bf16, MVT::v2bf16}) {
848 const bool IsFP16FP16x2NegAvailable = STI.
getSmVersion() >= 53 &&
851 for (
const auto &VT : {MVT::f16, MVT::v2f16})
876 for (
MVT VT : {MVT::bf16, MVT::f32, MVT::f64}) {
885 for (
MVT VT : {MVT::i1, MVT::i16, MVT::i32, MVT::i64}) {
914 for (
const auto &
Op :
948 bool SupportsF32MinMaxNaN =
976#define MAKE_CASE(V) \
1063 bool Reciprocal)
const {
1084 if (Reciprocal || ExtraSteps > 0) {
1086 return MakeIntrinsicCall(Ftz ? Intrinsic::nvvm_rsqrt_approx_ftz_f
1087 : Intrinsic::nvvm_rsqrt_approx_f);
1088 else if (VT == MVT::f64)
1089 return MakeIntrinsicCall(Intrinsic::nvvm_rsqrt_approx_d);
1094 return MakeIntrinsicCall(Ftz ? Intrinsic::nvvm_sqrt_approx_ftz_f
1095 : Intrinsic::nvvm_sqrt_approx_f);
1103 DAG.
getConstant(Intrinsic::nvvm_rcp_approx_ftz_d,
DL, MVT::i32),
1104 MakeIntrinsicCall(Intrinsic::nvvm_rsqrt_approx_d));
1126 std::optional<std::pair<unsigned, const APInt &>> VAInfo,
1127 const CallBase &CB,
unsigned UniqueCallSite)
const {
1131 assert(isABI &&
"Non-ABI compilation is not supported");
1135 std::string Prototype;
1137 O <<
"prototype_" << UniqueCallSite <<
" : .callprototype ";
1146 if (
auto *ITy = dyn_cast<IntegerType>(retTy)) {
1147 size = ITy->getBitWidth();
1150 "Floating point type expected here");
1158 O <<
".param .b" <<
size <<
" _";
1159 }
else if (isa<PointerType>(retTy)) {
1160 O <<
".param .b" << PtrVT.getSizeInBits() <<
" _";
1162 O <<
".param .align " << (retAlignment ? retAlignment->value() : 0)
1163 <<
" .b8 _[" <<
DL.getTypeAllocSize(retTy) <<
"]";
1173 unsigned NumArgs = VAInfo ? VAInfo->first : Args.size();
1174 for (
unsigned i = 0, OIdx = 0; i != NumArgs; ++i, ++OIdx) {
1175 Type *Ty = Args[i].Ty;
1181 if (!Outs[OIdx].Flags.isByVal()) {
1185 O <<
".param .align " << ParamAlign.
value() <<
" .b8 ";
1187 O <<
"[" <<
DL.getTypeAllocSize(Ty) <<
"]";
1191 if (
unsigned len = vtparts.
size())
1197 (
getValueType(
DL, Ty) == MVT::i8 && Outs[OIdx].VT == MVT::i16)) &&
1198 "type mismatch between callee prototype and arguments");
1201 if (isa<IntegerType>(Ty)) {
1202 sz = cast<IntegerType>(Ty)->getBitWidth();
1204 }
else if (isa<PointerType>(Ty)) {
1205 sz = PtrVT.getSizeInBits();
1209 O <<
".param .b" << sz <<
" ";
1216 Type *ETy = Args[i].IndirectType;
1217 Align InitialAlign = Outs[OIdx].Flags.getNonZeroByValAlign();
1218 Align ParamByValAlign =
1221 O <<
".param .align " << ParamByValAlign.
value() <<
" .b8 ";
1223 O <<
"[" << Outs[OIdx].Flags.getByValSize() <<
"]";
1227 O << (first ?
"" :
",") <<
" .param .align " << VAInfo->second
1247 return DL.getABITypeAlign(Ty);
1252 if (!DirectCallee) {
1257 if (
const auto *CI = dyn_cast<CallInst>(CB)) {
1260 return StackAlign.value();
1271 return DL.getABITypeAlign(Ty);
1275 switch (ElementType.getSimpleVT().SimpleTy) {
1280 ElementType = MVT::i16;
1285 ElementType = MVT::i32;
1288 ElementType = MVT::i64;
1300 unsigned ArgID,
const SDLoc &dl) {
1307 for (
unsigned i = 0, n = ElementType.getSizeInBits() / 8; i < n; i++) {
1333 EVT MergedType = ElementType;
1340 for (
unsigned i = 0, n = ElementType.getSizeInBits() / 8; i < n; i++) {
1367 if (ElementType != MergedType)
1377 if (
auto *CalleeFunc = dyn_cast<Function>(Func->getGlobal()))
1387 "Support for variadic functions (unsized array parameter) introduced "
1388 "in PTX ISA version 6.0 and requires target sm_30.");
1404 assert(isABI &&
"Non-ABI compilation is not supported");
1426 unsigned VAOffset = 0;
1433 unsigned ParamCount = 0;
1446 for (
unsigned i = 0, e = Args.size(); i != e; ++i, ++OIdx) {
1447 EVT VT = Outs[OIdx].VT;
1448 Type *Ty = Args[i].Ty;
1450 bool IsByVal = Outs[OIdx].Flags.isByVal();
1455 assert((!IsByVal || Args[i].IndirectType) &&
1456 "byval arg must have indirect type");
1457 Type *ETy = (IsByVal ? Args[i].IndirectType : Ty);
1465 Align InitialAlign = Outs[OIdx].Flags.getNonZeroByValAlign();
1469 VAOffset =
alignTo(VAOffset, ArgAlign);
1471 ArgAlign = getArgumentAlignment(CB, Ty, ParamCount + 1,
DL);
1475 (IsByVal ? Outs[OIdx].Flags.getByValSize() :
DL.getTypeAllocSize(Ty));
1481 if (ParamCount == FirstVAArg) {
1487 DeclareParamVTs, DeclareParamOps);
1489 NeedAlign = PassAsArray;
1490 }
else if (PassAsArray) {
1507 SDValue DeclareScalarParamOps[] = {
1512 DeclareScalarParamOps);
1521 bool ExtendIntegerParam =
1526 for (
unsigned j = 0, je = VTs.
size(); j != je; ++j) {
1528 int CurOffset = Offsets[j];
1533 SDValue StVal = OutVals[OIdx];
1537 EltVT =
EVT(PromotedVT);
1542 StVal = DAG.
getNode(Ext, dl, PromotedVT, StVal);
1551 }
else if (ExtendIntegerParam) {
1552 assert(VTs.
size() == 1 &&
"Scalar can't have multiple parts.");
1556 dl, MVT::i32, StVal);
1567 if (VectorInfo[j] ==
PVF_SCALAR && !IsVAArg && PartAlign.has_value() &&
1570 assert(StoreOperands.
empty() &&
"Unfinished preceeding store.");
1572 DAG, Chain, IsByVal ? CurOffset + VAOffset : CurOffset, EltVT,
1573 StVal, InGlue, ParamCount, dl);
1584 assert(StoreOperands.
empty() &&
"Unfinished preceding store.");
1587 DAG.
getConstant(IsVAArg ? FirstVAArg : ParamCount, dl, MVT::i32));
1590 IsByVal ? CurOffset + VAOffset : (IsVAArg ? VAOffset : CurOffset),
1598 unsigned NumElts = StoreOperands.
size() - 3;
1618 EVT TheStoreType = ExtendIntegerParam ? MVT::i32 : EltVT;
1621 Op, dl, DAG.
getVTList(MVT::Other, MVT::Glue), StoreOperands,
1627 StoreOperands.
clear();
1631 if (!IsByVal && IsVAArg) {
1633 "Vectorization is expected to be disabled for variadics.");
1634 VAOffset +=
DL.getTypeAllocSize(
1641 assert(StoreOperands.
empty() &&
"Unfinished parameter store.");
1642 if (!IsByVal && VTs.
size() > 0)
1645 if (IsByVal && IsVAArg)
1653 if (Ins.size() > 0) {
1660 unsigned resultsz =
DL.getTypeAllocSizeInBits(
RetTy);
1671 retAlignment = getArgumentAlignment(CB,
RetTy, 0,
DL);
1672 assert(retAlignment &&
"retAlignment is guaranteed to be set");
1675 Chain, DAG.
getConstant(retAlignment->value(), dl, MVT::i32),
1693 VADeclareParam->
getVTList(), DeclareParamOps);
1705 if (isa<ExternalSymbolSDNode>(Callee)) {
1710 assert(CalleeFunc !=
nullptr &&
"Libcall callee must be set.");
1714 CalleeFunc->
addFnAttr(
"nvptx-libcall-callee",
"true");
1727 DL,
RetTy, Args, Outs, retAlignment,
1729 ? std::optional<std::pair<unsigned, const APInt &>>(std::make_pair(
1732 *CB, UniqueCallSite);
1745 Chain, DAG.
getConstant((Ins.size() == 0) ? 0 : 1, dl, MVT::i32), InGlue
1752 Chain = DAG.
getNode(Opcode, dl, PrintCallVTs, PrintCallOps);
1755 if (ConvertToIndirectCall) {
1758 EVT DestVT = Callee.getValueType();
1769 SDValue CallVoidOps[] = { Chain, Callee, InGlue };
1775 SDValue CallArgBeginOps[] = { Chain, InGlue };
1780 for (
unsigned i = 0, e = std::min(CLI.
NumFixedArgs + 1, ParamCount); i != e;
1790 Chain = DAG.
getNode(opcode, dl, CallArgVTs, CallArgOps);
1794 SDValue CallArgEndOps[] = { Chain,
1803 Chain, DAG.
getConstant(UniqueCallSite, dl, MVT::i32), InGlue};
1820 if (Ins.size() > 0) {
1824 assert(VTs.
size() == Ins.size() &&
"Bad value decomposition");
1826 Align RetAlign = getArgumentAlignment(CB,
RetTy, 0,
DL);
1835 bool ExtendIntegerRetVal =
1836 RetTy->isIntegerTy() &&
DL.getTypeAllocSizeInBits(
RetTy) < 32;
1838 for (
unsigned i = 0, e = VTs.
size(); i != e; ++i) {
1839 bool needTruncate =
false;
1840 EVT TheLoadType = VTs[i];
1841 EVT EltType = Ins[i].VT;
1846 TheLoadType =
EVT(PromotedVT);
1847 EltType =
EVT(PromotedVT);
1848 needTruncate =
true;
1851 if (ExtendIntegerRetVal) {
1852 TheLoadType = MVT::i32;
1854 needTruncate =
true;
1856 if (VTs[i].isInteger())
1857 needTruncate =
true;
1864 EltAlign <
DL.getABITypeAlign(
1866 assert(VecIdx == -1 && LoadVTs.
empty() &&
"Orphaned operand list.");
1868 DAG, Chain, Offsets[i], TheLoadType, InGlue, TempProxyRegOps, dl);
1870 ProxyRegTruncates.
push_back(std::optional<MVT>());
1879 assert(VecIdx == -1 && LoadVTs.
empty() &&
"Orphaned operand list.");
1886 unsigned NumElts = LoadVTs.
size();
1906 DAG.
getConstant(Offsets[VecIdx], dl, MVT::i32), InGlue};
1908 Op, dl, DAG.
getVTList(LoadVTs), LoadOperands, TheLoadType,
1912 for (
unsigned j = 0; j < NumElts; ++j) {
1916 ProxyRegTruncates.
push_back(std::optional<MVT>(Ins[VecIdx + j].VT));
1918 ProxyRegTruncates.
push_back(std::optional<MVT>());
1922 InGlue = RetVal.
getValue(NumElts + 1);
1932 DAG.
getCALLSEQ_END(Chain, UniqueCallSite, UniqueCallSite + 1, InGlue, dl);
1938 for (
unsigned i = 0; i < ProxyRegOps.
size(); ++i) {
1939 if (i < RetElts.
size() && RetElts[i]) {
1946 DAG.
getVTList(ProxyRegOps[i].getSimpleValueType(), MVT::Other, MVT::Glue),
1947 { Chain, ProxyRegOps[i], InGlue }
1950 Chain = Ret.getValue(1);
1951 InGlue = Ret.getValue(2);
1953 if (ProxyRegTruncates[i]) {
1960 for (
SDValue &
T : TempProxyRegOps) {
1963 DAG.
getVTList(
T.getSimpleValueType(), MVT::Other, MVT::Glue),
1964 {Chain, T.getOperand(0), InGlue});
1986 "Support for dynamic alloca introduced in PTX ISA version 7.3 and "
1987 "requires target sm_52.",
1997 uint64_t Align = cast<ConstantSDNode>(
Op.getOperand(2))->getZExtValue();
2005 EVT RetTypes[] = {ValueSizeTy, MVT::Other};
2017 "Support for stackrestore requires PTX ISA version >= 7.3 and target "
2021 return Op.getOperand(0);
2040 "Support for stacksave requires PTX ISA version >= 7.3 and target >= "
2065 unsigned NumOperands = Node->getNumOperands();
2066 for (
unsigned i = 0; i < NumOperands; ++i) {
2067 SDValue SubOp = Node->getOperand(i);
2071 for (
unsigned j = 0; j < NumSubElem; ++j) {
2082 EVT FromVT =
Op->getOperand(0)->getValueType(0);
2083 if (FromVT != MVT::v2i8) {
2099 EVT ToVT =
Op->getValueType(0);
2109 EVT VT =
Op->getValueType(0);
2110 if (!(
Isv2x16VT(VT) || VT == MVT::v4i8))
2115 return Operand->isUndef() || isa<ConstantSDNode>(Operand) ||
2116 isa<ConstantFPSDNode>(Operand);
2118 if (VT != MVT::v4i8)
2135 auto PRMT__10 = GetPRMT(
Op->getOperand(0),
Op->getOperand(1),
true, 0x3340);
2136 auto PRMT__32 = GetPRMT(
Op->getOperand(2),
Op->getOperand(3),
true, 0x3340);
2137 auto PRMT3210 = GetPRMT(PRMT__10, PRMT__32,
false, 0x5410);
2144 EVT VT =
Op->getValueType(0);
2146 return APInt(32, 0);
2148 if (VT == MVT::v2f16 || VT == MVT::v2bf16)
2149 Value = cast<ConstantFPSDNode>(Operand)->getValueAPF().bitcastToAPInt();
2150 else if (VT == MVT::v2i16 || VT == MVT::v4i8)
2156 if (VT == MVT::v4i8)
2158 return Value.zext(32);
2162 Value = GetOperand(
Op, 0) | GetOperand(
Op, 1).shl(16);
2163 }
else if (VT == MVT::v4i8) {
2164 Value = GetOperand(
Op, 0) | GetOperand(
Op, 1).shl(8) |
2165 GetOperand(
Op, 2).shl(16) | GetOperand(
Op, 3).shl(24);
2180 if (VectorVT == MVT::v4i8) {
2192 if (isa<ConstantSDNode>(
Index.getNode()))
2213 if (VectorVT != MVT::v4i8)
2217 if (
Value->isUndef())
2236 if (VectorVT != MVT::v4i8 ||
Op.getValueType() != MVT::v4i8)
2244 if (
I.value() != -1)
2245 Selector |= (
I.value() << (
I.index() * 4));
2263 EVT VT =
Op.getValueType();
2324 EVT VT =
Op.getValueType();
2378 EVT VT =
Op.getValueType();
2392 EVT VT =
Op.getValueType();
2395 return LowerFROUND32(
Op, DAG);
2398 return LowerFROUND64(
Op, DAG);
2414 EVT VT =
Op.getValueType();
2420 const unsigned SignBitMask = 0x80000000;
2423 const unsigned PointFiveInBits = 0x3F000000;
2424 SDValue PointFiveWithSignRaw =
2455 EVT VT =
Op.getValueType();
2484 EVT VT =
N->getValueType(0);
2508 if (
Op.getValueType() == MVT::bf16) {
2512 DAG.
getNode(
Op.getOpcode(), Loc, MVT::f32,
Op.getOperand(0)),
2524 if (
Op.getOperand(0).getValueType() == MVT::bf16) {
2527 Op.getOpcode(), Loc,
Op.getValueType(),
2537 EVT NarrowVT =
Op.getValueType();
2574 EVT WideVT =
Op.getValueType();
2601 if (
Op.getValueType() != MVT::v2i16)
2603 EVT EltVT =
Op.getValueType().getVectorElementType();
2605 for (
int I = 0, E =
Op.getValueType().getVectorNumElements();
I < E;
I++) {
2608 [&](
const SDUse &O) {
2609 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT,
2610 O.get(), DAG.getIntPtrConstant(I, DL));
2621 switch (
Op.getOpcode()) {
2631 return LowerBUILD_VECTOR(
Op, DAG);
2633 return LowerBITCAST(
Op, DAG);
2637 return LowerEXTRACT_VECTOR_ELT(
Op, DAG);
2639 return LowerINSERT_VECTOR_ELT(
Op, DAG);
2641 return LowerVECTOR_SHUFFLE(
Op, DAG);
2643 return LowerCONCAT_VECTORS(
Op, DAG);
2645 return LowerSTORE(
Op, DAG);
2647 return LowerLOAD(
Op, DAG);
2649 return LowerShiftLeftParts(
Op, DAG);
2652 return LowerShiftRightParts(
Op, DAG);
2654 return LowerSelect(
Op, DAG);
2656 return LowerFROUND(
Op, DAG);
2658 return LowerFCOPYSIGN(
Op, DAG);
2661 return LowerINT_TO_FP(
Op, DAG);
2664 return LowerFP_TO_INT(
Op, DAG);
2666 return LowerFP_ROUND(
Op, DAG);
2668 return LowerFP_EXTEND(
Op, DAG);
2670 return LowerBR_JT(
Op, DAG);
2672 return LowerVAARG(
Op, DAG);
2674 return LowerVASTART(
Op, DAG);
2694 return LowerCopyToReg_128(
Op, DAG);
2699 return PromoteBinOpIfF32FTZ(
Op, DAG);
2709 const auto *JT = cast<JumpTableSDNode>(
Op.getOperand(1));
2712 unsigned JId = JT->getIndex();
2748 const Value *V = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
2749 EVT VT = Node->getValueType(0);
2751 SDValue Tmp1 = Node->getOperand(0);
2752 SDValue Tmp2 = Node->getOperand(1);
2753 const MaybeAlign MA(Node->getConstantOperandVal(3));
2791 SDValue Arg = getParamSymbol(DAG, -1, PtrVT);
2794 const Value *SV = cast<SrcValueSDNode>(
Op.getOperand(2))->getValue();
2795 return DAG.
getStore(
Op.getOperand(0),
DL, VAReg,
Op.getOperand(1),
2805 assert(
Op.getValueType() == MVT::i1 &&
"Custom lowering enabled only for i1");
2816 if (
Op.getValueType() == MVT::i1)
2817 return LowerLOADi1(
Op, DAG);
2821 EVT VT =
Op.getValueType();
2824 EVT MemVT =
Load->getMemoryVT();
2826 MemVT, *
Load->getMemOperand())) {
2846 "Custom lowering for i1 load only");
2848 LD->getBasePtr(),
LD->getPointerInfo(),
2849 MVT::i8,
LD->getAlign(),
2850 LD->getMemOperand()->getFlags());
2855 SDValue Ops[] = { result,
LD->getChain() };
2864 return LowerSTOREi1(
Op, DAG);
2868 if ((
Isv2x16VT(VT) || VT == MVT::v4i8) &&
2870 VT, *
Store->getMemOperand()))
2878 return LowerSTOREVector(
Op, DAG);
2891 if (!NumEltsAndEltVT)
2893 auto [NumElts, EltVT] = NumEltsAndEltVT.value();
2900 if (Alignment < PrefAlign) {
2912 bool NeedExt =
false;
2916 unsigned Opcode = 0;
2935 "NumElts should not increase, only decrease or stay the same.");
2944 for (
unsigned i = 0; i < NumElts; ++i) {
2947 NumEltsPerSubVector);
2952 for (
unsigned i = 0; i < NumElts; ++i) {
2962 Ops.
append(
N->op_begin() + 2,
N->op_end());
2986 DAG.
getTruncStore(Tmp1, dl, Tmp3, Tmp2,
ST->getPointerInfo(), MVT::i8,
2987 ST->getAlign(),
ST->getMemOperand()->getFlags());
2996 assert(
Op.getOperand(1).getValueType() == MVT::i128 &&
2997 "Custom lowering for 128-bit CopyToReg only");
3011 NewOps[0] =
Op->getOperand(0);
3012 NewOps[1] =
Op->getOperand(1);
3016 NewOps[4] =
Op->getOperand(3);
3021unsigned NVPTXTargetLowering::getNumRegisters(
3023 std::optional<MVT> RegisterVT = std::nullopt)
const {
3024 if (VT == MVT::i128 && RegisterVT == MVT::i128)
3029bool NVPTXTargetLowering::splitValueIntoRegisterParts(
3031 unsigned NumParts,
MVT PartVT, std::optional<CallingConv::ID>
CC)
const {
3032 if (Val.
getValueType() == MVT::i128 && NumParts == 1) {
3063 std::vector<SDValue> OutChains;
3066 assert(isABI &&
"Non-ABI compilation is not supported");
3070 std::vector<Type *> argTypes;
3071 std::vector<const Argument *> theArgs;
3073 theArgs.push_back(&
I);
3074 argTypes.push_back(
I.getType());
3085 unsigned InsIdx = 0;
3087 for (
unsigned i = 0, e = theArgs.size(); i != e; ++i, ++InsIdx) {
3088 Type *Ty = argTypes[i];
3090 if (theArgs[i]->use_empty()) {
3096 if (vtparts.
empty())
3099 for (
unsigned parti = 0, parte = vtparts.
size(); parti != parte;
3104 if (vtparts.
size() > 0)
3111 for (
unsigned parti = 0; parti < NumRegs; ++parti) {
3128 bool aggregateIsPacked =
false;
3129 if (
StructType *STy = dyn_cast<StructType>(Ty))
3130 aggregateIsPacked = STy->isPacked();
3142 SDValue Arg = getParamSymbol(DAG, i, PtrVT);
3144 for (
unsigned parti = 0, parte = VTs.
size(); parti != parte; ++parti) {
3146 assert(VecIdx == -1 &&
"Orphaned vector.");
3151 if (VectorInfo[parti] &
PVF_LAST) {
3152 unsigned NumElts = parti - VecIdx + 1;
3153 EVT EltVT = VTs[parti];
3156 if (EltVT == MVT::i1)
3158 else if (
Isv2x16VT(EltVT) || EltVT == MVT::v4i8)
3172 if (aggregateIsPacked)
3175 return std::nullopt;
3185 P.getNode()->setIROrder(i + 1);
3186 for (
unsigned j = 0; j < NumElts; ++j) {
3190 if (EltVT == MVT::i1)
3193 else if (EltVT != LoadVT)
3205 Ins[InsIdx].VT.getFixedSizeInBits() >
3209 Elt = DAG.
getNode(Extend, dl, Ins[InsIdx].VT, Elt);
3232 assert(ObjectVT == Ins[InsIdx].VT &&
3233 "Ins type did not match function type");
3234 SDValue Arg = getParamSymbol(DAG, i, PtrVT);
3237 p.getNode()->setIROrder(i + 1);
3241 if (!OutChains.empty())
3257 for (
unsigned i = 0, n = ElementType.getSizeInBits() / 8; i < n; i++) {
3267 DAG.
getVTList(MVT::Other), StoreOperands,
3285 assert(isABI &&
"Non-ABI compilation is not supported");
3294 assert(VTs.
size() == OutVals.
size() &&
"Bad return value decomposition");
3296 for (
unsigned i = 0, e = VTs.
size(); i != e; ++i) {
3297 SDValue PromotedOutVal = OutVals[i];
3300 VTs[i] =
EVT(PromotedVT);
3305 PromotedOutVal = DAG.
getNode(Ext, dl, PromotedVT, PromotedOutVal);
3307 PromotedOutVals.
push_back(PromotedOutVal);
3318 bool ExtendIntegerRetVal =
3319 RetTy->isIntegerTy() &&
DL.getTypeAllocSizeInBits(
RetTy) < 32;
3322 for (
unsigned i = 0, e = VTs.
size(); i != e; ++i) {
3324 SDValue RetVal = PromotedOutVals[i];
3326 if (ExtendIntegerRetVal) {
3329 dl, MVT::i32, RetVal);
3339 EVT ElementType = ExtendIntegerRetVal ? MVT::i32 : VTs[i];
3340 Align ElementTypeAlign =
3341 DL.getABITypeAlign(ElementType.getTypeForEVT(
RetTy->getContext()));
3342 Align ElementAlign =
3344 if (ElementAlign < ElementTypeAlign) {
3345 assert(StoreOperands.
empty() &&
"Orphaned operand list.");
3357 assert(StoreOperands.
empty() &&
"Orphaned operand list.");
3368 unsigned NumElts = StoreOperands.
size() - 2;
3385 EVT TheStoreType = ExtendIntegerRetVal ? MVT::i32 : VTs[i];
3387 Op, dl, DAG.
getVTList(MVT::Other), StoreOperands, TheStoreType,
3390 StoreOperands.
clear();
3400 if (Constraint.
size() > 1)
3413 switch (Intrinsic) {
3416 case Intrinsic::nvvm_match_all_sync_i32p:
3417 case Intrinsic::nvvm_match_all_sync_i64p:
3422 Info.memVT = MVT::i1;
3427 case Intrinsic::nvvm_wmma_m16n16k16_load_a_f16_col:
3428 case Intrinsic::nvvm_wmma_m16n16k16_load_a_f16_row:
3429 case Intrinsic::nvvm_wmma_m16n16k16_load_a_f16_col_stride:
3430 case Intrinsic::nvvm_wmma_m16n16k16_load_a_f16_row_stride:
3431 case Intrinsic::nvvm_wmma_m16n16k16_load_b_f16_col:
3432 case Intrinsic::nvvm_wmma_m16n16k16_load_b_f16_row:
3433 case Intrinsic::nvvm_wmma_m16n16k16_load_b_f16_col_stride:
3434 case Intrinsic::nvvm_wmma_m16n16k16_load_b_f16_row_stride:
3435 case Intrinsic::nvvm_wmma_m32n8k16_load_a_f16_col:
3436 case Intrinsic::nvvm_wmma_m32n8k16_load_a_f16_row:
3437 case Intrinsic::nvvm_wmma_m32n8k16_load_a_f16_col_stride:
3438 case Intrinsic::nvvm_wmma_m32n8k16_load_a_f16_row_stride:
3439 case Intrinsic::nvvm_wmma_m32n8k16_load_b_f16_col:
3440 case Intrinsic::nvvm_wmma_m32n8k16_load_b_f16_row:
3441 case Intrinsic::nvvm_wmma_m32n8k16_load_b_f16_col_stride:
3442 case Intrinsic::nvvm_wmma_m32n8k16_load_b_f16_row_stride:
3443 case Intrinsic::nvvm_wmma_m8n32k16_load_a_f16_col:
3444 case Intrinsic::nvvm_wmma_m8n32k16_load_a_f16_row:
3445 case Intrinsic::nvvm_wmma_m8n32k16_load_a_f16_col_stride:
3446 case Intrinsic::nvvm_wmma_m8n32k16_load_a_f16_row_stride:
3447 case Intrinsic::nvvm_wmma_m8n32k16_load_b_f16_col:
3448 case Intrinsic::nvvm_wmma_m8n32k16_load_b_f16_row:
3449 case Intrinsic::nvvm_wmma_m8n32k16_load_b_f16_col_stride:
3450 case Intrinsic::nvvm_wmma_m8n32k16_load_b_f16_row_stride: {
3452 Info.memVT = MVT::v8f16;
3453 Info.ptrVal =
I.getArgOperand(0);
3459 case Intrinsic::nvvm_wmma_m16n16k16_load_a_s8_col:
3460 case Intrinsic::nvvm_wmma_m16n16k16_load_a_s8_col_stride:
3461 case Intrinsic::nvvm_wmma_m16n16k16_load_a_u8_col_stride:
3462 case Intrinsic::nvvm_wmma_m16n16k16_load_a_u8_col:
3463 case Intrinsic::nvvm_wmma_m16n16k16_load_a_s8_row:
3464 case Intrinsic::nvvm_wmma_m16n16k16_load_a_s8_row_stride:
3465 case Intrinsic::nvvm_wmma_m16n16k16_load_a_u8_row_stride:
3466 case Intrinsic::nvvm_wmma_m16n16k16_load_a_u8_row:
3467 case Intrinsic::nvvm_wmma_m8n32k16_load_a_bf16_col:
3468 case Intrinsic::nvvm_wmma_m8n32k16_load_a_bf16_col_stride:
3469 case Intrinsic::nvvm_wmma_m8n32k16_load_a_bf16_row:
3470 case Intrinsic::nvvm_wmma_m8n32k16_load_a_bf16_row_stride:
3471 case Intrinsic::nvvm_wmma_m16n16k16_load_b_s8_col:
3472 case Intrinsic::nvvm_wmma_m16n16k16_load_b_s8_col_stride:
3473 case Intrinsic::nvvm_wmma_m16n16k16_load_b_u8_col_stride:
3474 case Intrinsic::nvvm_wmma_m16n16k16_load_b_u8_col:
3475 case Intrinsic::nvvm_wmma_m16n16k16_load_b_s8_row:
3476 case Intrinsic::nvvm_wmma_m16n16k16_load_b_s8_row_stride:
3477 case Intrinsic::nvvm_wmma_m16n16k16_load_b_u8_row_stride:
3478 case Intrinsic::nvvm_wmma_m16n16k16_load_b_u8_row:
3479 case Intrinsic::nvvm_wmma_m32n8k16_load_b_bf16_col:
3480 case Intrinsic::nvvm_wmma_m32n8k16_load_b_bf16_col_stride:
3481 case Intrinsic::nvvm_wmma_m32n8k16_load_b_bf16_row:
3482 case Intrinsic::nvvm_wmma_m32n8k16_load_b_bf16_row_stride: {
3484 Info.memVT = MVT::v2i32;
3485 Info.ptrVal =
I.getArgOperand(0);
3492 case Intrinsic::nvvm_wmma_m32n8k16_load_a_s8_col:
3493 case Intrinsic::nvvm_wmma_m32n8k16_load_a_s8_col_stride:
3494 case Intrinsic::nvvm_wmma_m32n8k16_load_a_u8_col_stride:
3495 case Intrinsic::nvvm_wmma_m32n8k16_load_a_u8_col:
3496 case Intrinsic::nvvm_wmma_m32n8k16_load_a_s8_row:
3497 case Intrinsic::nvvm_wmma_m32n8k16_load_a_s8_row_stride:
3498 case Intrinsic::nvvm_wmma_m32n8k16_load_a_u8_row_stride:
3499 case Intrinsic::nvvm_wmma_m32n8k16_load_a_u8_row:
3500 case Intrinsic::nvvm_wmma_m16n16k16_load_a_bf16_col:
3501 case Intrinsic::nvvm_wmma_m16n16k16_load_a_bf16_col_stride:
3502 case Intrinsic::nvvm_wmma_m16n16k16_load_a_bf16_row:
3503 case Intrinsic::nvvm_wmma_m16n16k16_load_a_bf16_row_stride:
3504 case Intrinsic::nvvm_wmma_m16n16k8_load_a_tf32_col:
3505 case Intrinsic::nvvm_wmma_m16n16k8_load_a_tf32_col_stride:
3506 case Intrinsic::nvvm_wmma_m16n16k8_load_a_tf32_row:
3507 case Intrinsic::nvvm_wmma_m16n16k8_load_a_tf32_row_stride:
3509 case Intrinsic::nvvm_wmma_m8n32k16_load_b_s8_col:
3510 case Intrinsic::nvvm_wmma_m8n32k16_load_b_s8_col_stride:
3511 case Intrinsic::nvvm_wmma_m8n32k16_load_b_u8_col_stride:
3512 case Intrinsic::nvvm_wmma_m8n32k16_load_b_u8_col:
3513 case Intrinsic::nvvm_wmma_m8n32k16_load_b_s8_row:
3514 case Intrinsic::nvvm_wmma_m8n32k16_load_b_s8_row_stride:
3515 case Intrinsic::nvvm_wmma_m8n32k16_load_b_u8_row_stride:
3516 case Intrinsic::nvvm_wmma_m8n32k16_load_b_u8_row:
3517 case Intrinsic::nvvm_wmma_m16n16k16_load_b_bf16_col:
3518 case Intrinsic::nvvm_wmma_m16n16k16_load_b_bf16_col_stride:
3519 case Intrinsic::nvvm_wmma_m16n16k16_load_b_bf16_row:
3520 case Intrinsic::nvvm_wmma_m16n16k16_load_b_bf16_row_stride:
3521 case Intrinsic::nvvm_wmma_m16n16k8_load_b_tf32_col:
3522 case Intrinsic::nvvm_wmma_m16n16k8_load_b_tf32_col_stride:
3523 case Intrinsic::nvvm_wmma_m16n16k8_load_b_tf32_row:
3524 case Intrinsic::nvvm_wmma_m16n16k8_load_b_tf32_row_stride:
3525 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n8_x4_b16:
3526 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n8_x4_trans_b16: {
3528 Info.memVT = MVT::v4i32;
3529 Info.ptrVal =
I.getArgOperand(0);
3536 case Intrinsic::nvvm_wmma_m32n8k16_load_b_s8_col:
3537 case Intrinsic::nvvm_wmma_m32n8k16_load_b_s8_col_stride:
3538 case Intrinsic::nvvm_wmma_m32n8k16_load_b_u8_col_stride:
3539 case Intrinsic::nvvm_wmma_m32n8k16_load_b_u8_col:
3540 case Intrinsic::nvvm_wmma_m32n8k16_load_b_s8_row:
3541 case Intrinsic::nvvm_wmma_m32n8k16_load_b_s8_row_stride:
3542 case Intrinsic::nvvm_wmma_m32n8k16_load_b_u8_row_stride:
3543 case Intrinsic::nvvm_wmma_m32n8k16_load_b_u8_row:
3545 case Intrinsic::nvvm_wmma_m8n32k16_load_a_s8_col:
3546 case Intrinsic::nvvm_wmma_m8n32k16_load_a_s8_col_stride:
3547 case Intrinsic::nvvm_wmma_m8n32k16_load_a_u8_col_stride:
3548 case Intrinsic::nvvm_wmma_m8n32k16_load_a_u8_col:
3549 case Intrinsic::nvvm_wmma_m8n32k16_load_a_s8_row:
3550 case Intrinsic::nvvm_wmma_m8n32k16_load_a_s8_row_stride:
3551 case Intrinsic::nvvm_wmma_m8n32k16_load_a_u8_row_stride:
3552 case Intrinsic::nvvm_wmma_m8n32k16_load_a_u8_row:
3553 case Intrinsic::nvvm_wmma_m8n8k128_load_a_b1_row:
3554 case Intrinsic::nvvm_wmma_m8n8k128_load_a_b1_row_stride:
3555 case Intrinsic::nvvm_wmma_m8n8k128_load_b_b1_col:
3556 case Intrinsic::nvvm_wmma_m8n8k128_load_b_b1_col_stride:
3557 case Intrinsic::nvvm_wmma_m8n8k32_load_a_s4_row:
3558 case Intrinsic::nvvm_wmma_m8n8k32_load_a_s4_row_stride:
3559 case Intrinsic::nvvm_wmma_m8n8k32_load_a_u4_row_stride:
3560 case Intrinsic::nvvm_wmma_m8n8k32_load_a_u4_row:
3561 case Intrinsic::nvvm_wmma_m8n8k32_load_b_s4_col:
3562 case Intrinsic::nvvm_wmma_m8n8k32_load_b_s4_col_stride:
3563 case Intrinsic::nvvm_wmma_m8n8k32_load_b_u4_col_stride:
3564 case Intrinsic::nvvm_wmma_m8n8k32_load_b_u4_col:
3565 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n8_x1_b16:
3566 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n8_x1_trans_b16: {
3568 Info.memVT = MVT::i32;
3569 Info.ptrVal =
I.getArgOperand(0);
3576 case Intrinsic::nvvm_wmma_m16n16k16_load_c_f16_col:
3577 case Intrinsic::nvvm_wmma_m16n16k16_load_c_f16_row:
3578 case Intrinsic::nvvm_wmma_m16n16k16_load_c_f16_col_stride:
3579 case Intrinsic::nvvm_wmma_m16n16k16_load_c_f16_row_stride:
3580 case Intrinsic::nvvm_wmma_m32n8k16_load_c_f16_col:
3581 case Intrinsic::nvvm_wmma_m32n8k16_load_c_f16_row:
3582 case Intrinsic::nvvm_wmma_m32n8k16_load_c_f16_col_stride:
3583 case Intrinsic::nvvm_wmma_m32n8k16_load_c_f16_row_stride:
3584 case Intrinsic::nvvm_wmma_m8n32k16_load_c_f16_col:
3585 case Intrinsic::nvvm_wmma_m8n32k16_load_c_f16_row:
3586 case Intrinsic::nvvm_wmma_m8n32k16_load_c_f16_col_stride:
3587 case Intrinsic::nvvm_wmma_m8n32k16_load_c_f16_row_stride: {
3589 Info.memVT = MVT::v4f16;
3590 Info.ptrVal =
I.getArgOperand(0);
3597 case Intrinsic::nvvm_wmma_m16n16k16_load_c_f32_col:
3598 case Intrinsic::nvvm_wmma_m16n16k16_load_c_f32_row:
3599 case Intrinsic::nvvm_wmma_m16n16k16_load_c_f32_col_stride:
3600 case Intrinsic::nvvm_wmma_m16n16k16_load_c_f32_row_stride:
3601 case Intrinsic::nvvm_wmma_m32n8k16_load_c_f32_col:
3602 case Intrinsic::nvvm_wmma_m32n8k16_load_c_f32_row:
3603 case Intrinsic::nvvm_wmma_m32n8k16_load_c_f32_col_stride:
3604 case Intrinsic::nvvm_wmma_m32n8k16_load_c_f32_row_stride:
3605 case Intrinsic::nvvm_wmma_m8n32k16_load_c_f32_col:
3606 case Intrinsic::nvvm_wmma_m8n32k16_load_c_f32_row:
3607 case Intrinsic::nvvm_wmma_m8n32k16_load_c_f32_col_stride:
3608 case Intrinsic::nvvm_wmma_m8n32k16_load_c_f32_row_stride:
3609 case Intrinsic::nvvm_wmma_m16n16k8_load_c_f32_col:
3610 case Intrinsic::nvvm_wmma_m16n16k8_load_c_f32_row:
3611 case Intrinsic::nvvm_wmma_m16n16k8_load_c_f32_col_stride:
3612 case Intrinsic::nvvm_wmma_m16n16k8_load_c_f32_row_stride: {
3614 Info.memVT = MVT::v8f32;
3615 Info.ptrVal =
I.getArgOperand(0);
3622 case Intrinsic::nvvm_wmma_m32n8k16_load_a_bf16_col:
3623 case Intrinsic::nvvm_wmma_m32n8k16_load_a_bf16_col_stride:
3624 case Intrinsic::nvvm_wmma_m32n8k16_load_a_bf16_row:
3625 case Intrinsic::nvvm_wmma_m32n8k16_load_a_bf16_row_stride:
3627 case Intrinsic::nvvm_wmma_m8n32k16_load_b_bf16_col:
3628 case Intrinsic::nvvm_wmma_m8n32k16_load_b_bf16_col_stride:
3629 case Intrinsic::nvvm_wmma_m8n32k16_load_b_bf16_row:
3630 case Intrinsic::nvvm_wmma_m8n32k16_load_b_bf16_row_stride:
3632 case Intrinsic::nvvm_wmma_m16n16k16_load_c_s32_col:
3633 case Intrinsic::nvvm_wmma_m16n16k16_load_c_s32_col_stride:
3634 case Intrinsic::nvvm_wmma_m16n16k16_load_c_s32_row:
3635 case Intrinsic::nvvm_wmma_m16n16k16_load_c_s32_row_stride:
3636 case Intrinsic::nvvm_wmma_m32n8k16_load_c_s32_col:
3637 case Intrinsic::nvvm_wmma_m32n8k16_load_c_s32_col_stride:
3638 case Intrinsic::nvvm_wmma_m32n8k16_load_c_s32_row:
3639 case Intrinsic::nvvm_wmma_m32n8k16_load_c_s32_row_stride:
3640 case Intrinsic::nvvm_wmma_m8n32k16_load_c_s32_col:
3641 case Intrinsic::nvvm_wmma_m8n32k16_load_c_s32_col_stride:
3642 case Intrinsic::nvvm_wmma_m8n32k16_load_c_s32_row:
3643 case Intrinsic::nvvm_wmma_m8n32k16_load_c_s32_row_stride: {
3645 Info.memVT = MVT::v8i32;
3646 Info.ptrVal =
I.getArgOperand(0);
3653 case Intrinsic::nvvm_wmma_m8n8k128_load_c_s32_col:
3654 case Intrinsic::nvvm_wmma_m8n8k128_load_c_s32_col_stride:
3655 case Intrinsic::nvvm_wmma_m8n8k128_load_c_s32_row:
3656 case Intrinsic::nvvm_wmma_m8n8k128_load_c_s32_row_stride:
3657 case Intrinsic::nvvm_wmma_m8n8k32_load_c_s32_col:
3658 case Intrinsic::nvvm_wmma_m8n8k32_load_c_s32_col_stride:
3659 case Intrinsic::nvvm_wmma_m8n8k32_load_c_s32_row:
3660 case Intrinsic::nvvm_wmma_m8n8k32_load_c_s32_row_stride:
3661 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n8_x2_b16:
3662 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n8_x2_trans_b16: {
3664 Info.memVT = MVT::v2i32;
3665 Info.ptrVal =
I.getArgOperand(0);
3672 case Intrinsic::nvvm_wmma_m8n8k4_load_a_f64_col:
3673 case Intrinsic::nvvm_wmma_m8n8k4_load_a_f64_col_stride:
3674 case Intrinsic::nvvm_wmma_m8n8k4_load_a_f64_row:
3675 case Intrinsic::nvvm_wmma_m8n8k4_load_a_f64_row_stride:
3677 case Intrinsic::nvvm_wmma_m8n8k4_load_b_f64_col:
3678 case Intrinsic::nvvm_wmma_m8n8k4_load_b_f64_col_stride:
3679 case Intrinsic::nvvm_wmma_m8n8k4_load_b_f64_row:
3680 case Intrinsic::nvvm_wmma_m8n8k4_load_b_f64_row_stride: {
3682 Info.memVT = MVT::f64;
3683 Info.ptrVal =
I.getArgOperand(0);
3690 case Intrinsic::nvvm_wmma_m8n8k4_load_c_f64_col:
3691 case Intrinsic::nvvm_wmma_m8n8k4_load_c_f64_col_stride:
3692 case Intrinsic::nvvm_wmma_m8n8k4_load_c_f64_row:
3693 case Intrinsic::nvvm_wmma_m8n8k4_load_c_f64_row_stride: {
3695 Info.memVT = MVT::v2f64;
3696 Info.ptrVal =
I.getArgOperand(0);
3703 case Intrinsic::nvvm_wmma_m16n16k16_store_d_f16_col:
3704 case Intrinsic::nvvm_wmma_m16n16k16_store_d_f16_row:
3705 case Intrinsic::nvvm_wmma_m16n16k16_store_d_f16_col_stride:
3706 case Intrinsic::nvvm_wmma_m16n16k16_store_d_f16_row_stride:
3707 case Intrinsic::nvvm_wmma_m32n8k16_store_d_f16_col:
3708 case Intrinsic::nvvm_wmma_m32n8k16_store_d_f16_row:
3709 case Intrinsic::nvvm_wmma_m32n8k16_store_d_f16_col_stride:
3710 case Intrinsic::nvvm_wmma_m32n8k16_store_d_f16_row_stride:
3711 case Intrinsic::nvvm_wmma_m8n32k16_store_d_f16_col:
3712 case Intrinsic::nvvm_wmma_m8n32k16_store_d_f16_row:
3713 case Intrinsic::nvvm_wmma_m8n32k16_store_d_f16_col_stride:
3714 case Intrinsic::nvvm_wmma_m8n32k16_store_d_f16_row_stride: {
3716 Info.memVT = MVT::v4f16;
3717 Info.ptrVal =
I.getArgOperand(0);
3724 case Intrinsic::nvvm_wmma_m16n16k16_store_d_f32_col:
3725 case Intrinsic::nvvm_wmma_m16n16k16_store_d_f32_row:
3726 case Intrinsic::nvvm_wmma_m16n16k16_store_d_f32_col_stride:
3727 case Intrinsic::nvvm_wmma_m16n16k16_store_d_f32_row_stride:
3728 case Intrinsic::nvvm_wmma_m32n8k16_store_d_f32_col:
3729 case Intrinsic::nvvm_wmma_m32n8k16_store_d_f32_row:
3730 case Intrinsic::nvvm_wmma_m32n8k16_store_d_f32_col_stride:
3731 case Intrinsic::nvvm_wmma_m32n8k16_store_d_f32_row_stride:
3732 case Intrinsic::nvvm_wmma_m8n32k16_store_d_f32_col:
3733 case Intrinsic::nvvm_wmma_m8n32k16_store_d_f32_row:
3734 case Intrinsic::nvvm_wmma_m8n32k16_store_d_f32_col_stride:
3735 case Intrinsic::nvvm_wmma_m8n32k16_store_d_f32_row_stride:
3736 case Intrinsic::nvvm_wmma_m16n16k8_store_d_f32_col:
3737 case Intrinsic::nvvm_wmma_m16n16k8_store_d_f32_row:
3738 case Intrinsic::nvvm_wmma_m16n16k8_store_d_f32_col_stride:
3739 case Intrinsic::nvvm_wmma_m16n16k8_store_d_f32_row_stride: {
3741 Info.memVT = MVT::v8f32;
3742 Info.ptrVal =
I.getArgOperand(0);
3749 case Intrinsic::nvvm_wmma_m16n16k16_store_d_s32_col:
3750 case Intrinsic::nvvm_wmma_m16n16k16_store_d_s32_col_stride:
3751 case Intrinsic::nvvm_wmma_m16n16k16_store_d_s32_row:
3752 case Intrinsic::nvvm_wmma_m16n16k16_store_d_s32_row_stride:
3753 case Intrinsic::nvvm_wmma_m32n8k16_store_d_s32_col:
3754 case Intrinsic::nvvm_wmma_m32n8k16_store_d_s32_col_stride:
3755 case Intrinsic::nvvm_wmma_m32n8k16_store_d_s32_row:
3756 case Intrinsic::nvvm_wmma_m32n8k16_store_d_s32_row_stride:
3757 case Intrinsic::nvvm_wmma_m8n32k16_store_d_s32_col:
3758 case Intrinsic::nvvm_wmma_m8n32k16_store_d_s32_col_stride:
3759 case Intrinsic::nvvm_wmma_m8n32k16_store_d_s32_row:
3760 case Intrinsic::nvvm_wmma_m8n32k16_store_d_s32_row_stride: {
3762 Info.memVT = MVT::v8i32;
3763 Info.ptrVal =
I.getArgOperand(0);
3770 case Intrinsic::nvvm_wmma_m8n8k128_store_d_s32_col:
3771 case Intrinsic::nvvm_wmma_m8n8k128_store_d_s32_col_stride:
3772 case Intrinsic::nvvm_wmma_m8n8k128_store_d_s32_row:
3773 case Intrinsic::nvvm_wmma_m8n8k128_store_d_s32_row_stride:
3774 case Intrinsic::nvvm_wmma_m8n8k32_store_d_s32_col:
3775 case Intrinsic::nvvm_wmma_m8n8k32_store_d_s32_col_stride:
3776 case Intrinsic::nvvm_wmma_m8n8k32_store_d_s32_row:
3777 case Intrinsic::nvvm_wmma_m8n8k32_store_d_s32_row_stride: {
3779 Info.memVT = MVT::v2i32;
3780 Info.ptrVal =
I.getArgOperand(0);
3787 case Intrinsic::nvvm_wmma_m8n8k4_store_d_f64_col:
3788 case Intrinsic::nvvm_wmma_m8n8k4_store_d_f64_col_stride:
3789 case Intrinsic::nvvm_wmma_m8n8k4_store_d_f64_row:
3790 case Intrinsic::nvvm_wmma_m8n8k4_store_d_f64_row_stride: {
3792 Info.memVT = MVT::v2f64;
3793 Info.ptrVal =
I.getArgOperand(0);
3800 case Intrinsic::nvvm_atomic_load_inc_32:
3801 case Intrinsic::nvvm_atomic_load_dec_32:
3803 case Intrinsic::nvvm_atomic_add_gen_f_cta:
3804 case Intrinsic::nvvm_atomic_add_gen_f_sys:
3805 case Intrinsic::nvvm_atomic_add_gen_i_cta:
3806 case Intrinsic::nvvm_atomic_add_gen_i_sys:
3807 case Intrinsic::nvvm_atomic_and_gen_i_cta:
3808 case Intrinsic::nvvm_atomic_and_gen_i_sys:
3809 case Intrinsic::nvvm_atomic_cas_gen_i_cta:
3810 case Intrinsic::nvvm_atomic_cas_gen_i_sys:
3811 case Intrinsic::nvvm_atomic_dec_gen_i_cta:
3812 case Intrinsic::nvvm_atomic_dec_gen_i_sys:
3813 case Intrinsic::nvvm_atomic_inc_gen_i_cta:
3814 case Intrinsic::nvvm_atomic_inc_gen_i_sys:
3815 case Intrinsic::nvvm_atomic_max_gen_i_cta:
3816 case Intrinsic::nvvm_atomic_max_gen_i_sys:
3817 case Intrinsic::nvvm_atomic_min_gen_i_cta:
3818 case Intrinsic::nvvm_atomic_min_gen_i_sys:
3819 case Intrinsic::nvvm_atomic_or_gen_i_cta:
3820 case Intrinsic::nvvm_atomic_or_gen_i_sys:
3821 case Intrinsic::nvvm_atomic_exch_gen_i_cta:
3822 case Intrinsic::nvvm_atomic_exch_gen_i_sys:
3823 case Intrinsic::nvvm_atomic_xor_gen_i_cta:
3824 case Intrinsic::nvvm_atomic_xor_gen_i_sys: {
3825 auto &
DL =
I.getDataLayout();
3828 Info.ptrVal =
I.getArgOperand(0);
3835 case Intrinsic::nvvm_ldu_global_i:
3836 case Intrinsic::nvvm_ldu_global_f:
3837 case Intrinsic::nvvm_ldu_global_p: {
3838 auto &
DL =
I.getDataLayout();
3840 if (Intrinsic == Intrinsic::nvvm_ldu_global_i)
3842 else if(Intrinsic == Intrinsic::nvvm_ldu_global_p)
3846 Info.ptrVal =
I.getArgOperand(0);
3849 Info.align = cast<ConstantInt>(
I.getArgOperand(1))->getMaybeAlignValue();
3853 case Intrinsic::nvvm_tex_1d_v4f32_s32:
3854 case Intrinsic::nvvm_tex_1d_v4f32_f32:
3855 case Intrinsic::nvvm_tex_1d_level_v4f32_f32:
3856 case Intrinsic::nvvm_tex_1d_grad_v4f32_f32:
3857 case Intrinsic::nvvm_tex_1d_array_v4f32_s32:
3858 case Intrinsic::nvvm_tex_1d_array_v4f32_f32:
3859 case Intrinsic::nvvm_tex_1d_array_level_v4f32_f32:
3860 case Intrinsic::nvvm_tex_1d_array_grad_v4f32_f32:
3861 case Intrinsic::nvvm_tex_2d_v4f32_s32:
3862 case Intrinsic::nvvm_tex_2d_v4f32_f32:
3863 case Intrinsic::nvvm_tex_2d_level_v4f32_f32:
3864 case Intrinsic::nvvm_tex_2d_grad_v4f32_f32:
3865 case Intrinsic::nvvm_tex_2d_array_v4f32_s32:
3866 case Intrinsic::nvvm_tex_2d_array_v4f32_f32:
3867 case Intrinsic::nvvm_tex_2d_array_level_v4f32_f32:
3868 case Intrinsic::nvvm_tex_2d_array_grad_v4f32_f32:
3869 case Intrinsic::nvvm_tex_3d_v4f32_s32:
3870 case Intrinsic::nvvm_tex_3d_v4f32_f32:
3871 case Intrinsic::nvvm_tex_3d_level_v4f32_f32:
3872 case Intrinsic::nvvm_tex_3d_grad_v4f32_f32:
3873 case Intrinsic::nvvm_tex_cube_v4f32_f32:
3874 case Intrinsic::nvvm_tex_cube_level_v4f32_f32:
3875 case Intrinsic::nvvm_tex_cube_array_v4f32_f32:
3876 case Intrinsic::nvvm_tex_cube_array_level_v4f32_f32:
3877 case Intrinsic::nvvm_tld4_r_2d_v4f32_f32:
3878 case Intrinsic::nvvm_tld4_g_2d_v4f32_f32:
3879 case Intrinsic::nvvm_tld4_b_2d_v4f32_f32:
3880 case Intrinsic::nvvm_tld4_a_2d_v4f32_f32:
3881 case Intrinsic::nvvm_tex_unified_1d_v4f32_s32:
3882 case Intrinsic::nvvm_tex_unified_1d_v4f32_f32:
3883 case Intrinsic::nvvm_tex_unified_1d_level_v4f32_f32:
3884 case Intrinsic::nvvm_tex_unified_1d_grad_v4f32_f32:
3885 case Intrinsic::nvvm_tex_unified_1d_array_v4f32_s32:
3886 case Intrinsic::nvvm_tex_unified_1d_array_v4f32_f32:
3887 case Intrinsic::nvvm_tex_unified_1d_array_level_v4f32_f32:
3888 case Intrinsic::nvvm_tex_unified_1d_array_grad_v4f32_f32:
3889 case Intrinsic::nvvm_tex_unified_2d_v4f32_s32:
3890 case Intrinsic::nvvm_tex_unified_2d_v4f32_f32:
3891 case Intrinsic::nvvm_tex_unified_2d_level_v4f32_f32:
3892 case Intrinsic::nvvm_tex_unified_2d_grad_v4f32_f32:
3893 case Intrinsic::nvvm_tex_unified_2d_array_v4f32_s32:
3894 case Intrinsic::nvvm_tex_unified_2d_array_v4f32_f32:
3895 case Intrinsic::nvvm_tex_unified_2d_array_level_v4f32_f32:
3896 case Intrinsic::nvvm_tex_unified_2d_array_grad_v4f32_f32:
3897 case Intrinsic::nvvm_tex_unified_3d_v4f32_s32:
3898 case Intrinsic::nvvm_tex_unified_3d_v4f32_f32:
3899 case Intrinsic::nvvm_tex_unified_3d_level_v4f32_f32:
3900 case Intrinsic::nvvm_tex_unified_3d_grad_v4f32_f32:
3901 case Intrinsic::nvvm_tex_unified_cube_v4f32_f32:
3902 case Intrinsic::nvvm_tex_unified_cube_level_v4f32_f32:
3903 case Intrinsic::nvvm_tex_unified_cube_array_v4f32_f32:
3904 case Intrinsic::nvvm_tex_unified_cube_array_level_v4f32_f32:
3905 case Intrinsic::nvvm_tex_unified_cube_grad_v4f32_f32:
3906 case Intrinsic::nvvm_tex_unified_cube_array_grad_v4f32_f32:
3907 case Intrinsic::nvvm_tld4_unified_r_2d_v4f32_f32:
3908 case Intrinsic::nvvm_tld4_unified_g_2d_v4f32_f32:
3909 case Intrinsic::nvvm_tld4_unified_b_2d_v4f32_f32:
3910 case Intrinsic::nvvm_tld4_unified_a_2d_v4f32_f32:
3912 Info.memVT = MVT::v4f32;
3913 Info.ptrVal =
nullptr;
3919 case Intrinsic::nvvm_tex_1d_v4s32_s32:
3920 case Intrinsic::nvvm_tex_1d_v4s32_f32:
3921 case Intrinsic::nvvm_tex_1d_level_v4s32_f32:
3922 case Intrinsic::nvvm_tex_1d_grad_v4s32_f32:
3923 case Intrinsic::nvvm_tex_1d_array_v4s32_s32:
3924 case Intrinsic::nvvm_tex_1d_array_v4s32_f32:
3925 case Intrinsic::nvvm_tex_1d_array_level_v4s32_f32:
3926 case Intrinsic::nvvm_tex_1d_array_grad_v4s32_f32:
3927 case Intrinsic::nvvm_tex_2d_v4s32_s32:
3928 case Intrinsic::nvvm_tex_2d_v4s32_f32:
3929 case Intrinsic::nvvm_tex_2d_level_v4s32_f32:
3930 case Intrinsic::nvvm_tex_2d_grad_v4s32_f32:
3931 case Intrinsic::nvvm_tex_2d_array_v4s32_s32:
3932 case Intrinsic::nvvm_tex_2d_array_v4s32_f32:
3933 case Intrinsic::nvvm_tex_2d_array_level_v4s32_f32:
3934 case Intrinsic::nvvm_tex_2d_array_grad_v4s32_f32:
3935 case Intrinsic::nvvm_tex_3d_v4s32_s32:
3936 case Intrinsic::nvvm_tex_3d_v4s32_f32:
3937 case Intrinsic::nvvm_tex_3d_level_v4s32_f32:
3938 case Intrinsic::nvvm_tex_3d_grad_v4s32_f32:
3939 case Intrinsic::nvvm_tex_cube_v4s32_f32:
3940 case Intrinsic::nvvm_tex_cube_level_v4s32_f32:
3941 case Intrinsic::nvvm_tex_cube_array_v4s32_f32:
3942 case Intrinsic::nvvm_tex_cube_array_level_v4s32_f32:
3943 case Intrinsic::nvvm_tex_cube_v4u32_f32:
3944 case Intrinsic::nvvm_tex_cube_level_v4u32_f32:
3945 case Intrinsic::nvvm_tex_cube_array_v4u32_f32:
3946 case Intrinsic::nvvm_tex_cube_array_level_v4u32_f32:
3947 case Intrinsic::nvvm_tex_1d_v4u32_s32:
3948 case Intrinsic::nvvm_tex_1d_v4u32_f32:
3949 case Intrinsic::nvvm_tex_1d_level_v4u32_f32:
3950 case Intrinsic::nvvm_tex_1d_grad_v4u32_f32:
3951 case Intrinsic::nvvm_tex_1d_array_v4u32_s32:
3952 case Intrinsic::nvvm_tex_1d_array_v4u32_f32:
3953 case Intrinsic::nvvm_tex_1d_array_level_v4u32_f32:
3954 case Intrinsic::nvvm_tex_1d_array_grad_v4u32_f32:
3955 case Intrinsic::nvvm_tex_2d_v4u32_s32:
3956 case Intrinsic::nvvm_tex_2d_v4u32_f32:
3957 case Intrinsic::nvvm_tex_2d_level_v4u32_f32:
3958 case Intrinsic::nvvm_tex_2d_grad_v4u32_f32:
3959 case Intrinsic::nvvm_tex_2d_array_v4u32_s32:
3960 case Intrinsic::nvvm_tex_2d_array_v4u32_f32:
3961 case Intrinsic::nvvm_tex_2d_array_level_v4u32_f32:
3962 case Intrinsic::nvvm_tex_2d_array_grad_v4u32_f32:
3963 case Intrinsic::nvvm_tex_3d_v4u32_s32:
3964 case Intrinsic::nvvm_tex_3d_v4u32_f32:
3965 case Intrinsic::nvvm_tex_3d_level_v4u32_f32:
3966 case Intrinsic::nvvm_tex_3d_grad_v4u32_f32:
3967 case Intrinsic::nvvm_tld4_r_2d_v4s32_f32:
3968 case Intrinsic::nvvm_tld4_g_2d_v4s32_f32:
3969 case Intrinsic::nvvm_tld4_b_2d_v4s32_f32:
3970 case Intrinsic::nvvm_tld4_a_2d_v4s32_f32:
3971 case Intrinsic::nvvm_tld4_r_2d_v4u32_f32:
3972 case Intrinsic::nvvm_tld4_g_2d_v4u32_f32:
3973 case Intrinsic::nvvm_tld4_b_2d_v4u32_f32:
3974 case Intrinsic::nvvm_tld4_a_2d_v4u32_f32:
3975 case Intrinsic::nvvm_tex_unified_1d_v4s32_s32:
3976 case Intrinsic::nvvm_tex_unified_1d_v4s32_f32:
3977 case Intrinsic::nvvm_tex_unified_1d_level_v4s32_f32:
3978 case Intrinsic::nvvm_tex_unified_1d_grad_v4s32_f32:
3979 case Intrinsic::nvvm_tex_unified_1d_array_v4s32_s32:
3980 case Intrinsic::nvvm_tex_unified_1d_array_v4s32_f32:
3981 case Intrinsic::nvvm_tex_unified_1d_array_level_v4s32_f32:
3982 case Intrinsic::nvvm_tex_unified_1d_array_grad_v4s32_f32:
3983 case Intrinsic::nvvm_tex_unified_2d_v4s32_s32:
3984 case Intrinsic::nvvm_tex_unified_2d_v4s32_f32:
3985 case Intrinsic::nvvm_tex_unified_2d_level_v4s32_f32:
3986 case Intrinsic::nvvm_tex_unified_2d_grad_v4s32_f32:
3987 case Intrinsic::nvvm_tex_unified_2d_array_v4s32_s32:
3988 case Intrinsic::nvvm_tex_unified_2d_array_v4s32_f32:
3989 case Intrinsic::nvvm_tex_unified_2d_array_level_v4s32_f32:
3990 case Intrinsic::nvvm_tex_unified_2d_array_grad_v4s32_f32:
3991 case Intrinsic::nvvm_tex_unified_3d_v4s32_s32:
3992 case Intrinsic::nvvm_tex_unified_3d_v4s32_f32:
3993 case Intrinsic::nvvm_tex_unified_3d_level_v4s32_f32:
3994 case Intrinsic::nvvm_tex_unified_3d_grad_v4s32_f32:
3995 case Intrinsic::nvvm_tex_unified_1d_v4u32_s32:
3996 case Intrinsic::nvvm_tex_unified_1d_v4u32_f32:
3997 case Intrinsic::nvvm_tex_unified_1d_level_v4u32_f32:
3998 case Intrinsic::nvvm_tex_unified_1d_grad_v4u32_f32:
3999 case Intrinsic::nvvm_tex_unified_1d_array_v4u32_s32:
4000 case Intrinsic::nvvm_tex_unified_1d_array_v4u32_f32:
4001 case Intrinsic::nvvm_tex_unified_1d_array_level_v4u32_f32:
4002 case Intrinsic::nvvm_tex_unified_1d_array_grad_v4u32_f32:
4003 case Intrinsic::nvvm_tex_unified_2d_v4u32_s32:
4004 case Intrinsic::nvvm_tex_unified_2d_v4u32_f32:
4005 case Intrinsic::nvvm_tex_unified_2d_level_v4u32_f32:
4006 case Intrinsic::nvvm_tex_unified_2d_grad_v4u32_f32:
4007 case Intrinsic::nvvm_tex_unified_2d_array_v4u32_s32:
4008 case Intrinsic::nvvm_tex_unified_2d_array_v4u32_f32:
4009 case Intrinsic::nvvm_tex_unified_2d_array_level_v4u32_f32:
4010 case Intrinsic::nvvm_tex_unified_2d_array_grad_v4u32_f32:
4011 case Intrinsic::nvvm_tex_unified_3d_v4u32_s32:
4012 case Intrinsic::nvvm_tex_unified_3d_v4u32_f32:
4013 case Intrinsic::nvvm_tex_unified_3d_level_v4u32_f32:
4014 case Intrinsic::nvvm_tex_unified_3d_grad_v4u32_f32:
4015 case Intrinsic::nvvm_tex_unified_cube_v4s32_f32:
4016 case Intrinsic::nvvm_tex_unified_cube_level_v4s32_f32:
4017 case Intrinsic::nvvm_tex_unified_cube_array_v4s32_f32:
4018 case Intrinsic::nvvm_tex_unified_cube_array_level_v4s32_f32:
4019 case Intrinsic::nvvm_tex_unified_cube_v4u32_f32:
4020 case Intrinsic::nvvm_tex_unified_cube_level_v4u32_f32:
4021 case Intrinsic::nvvm_tex_unified_cube_array_v4u32_f32:
4022 case Intrinsic::nvvm_tex_unified_cube_array_level_v4u32_f32:
4023 case Intrinsic::nvvm_tex_unified_cube_grad_v4s32_f32:
4024 case Intrinsic::nvvm_tex_unified_cube_grad_v4u32_f32:
4025 case Intrinsic::nvvm_tex_unified_cube_array_grad_v4s32_f32:
4026 case Intrinsic::nvvm_tex_unified_cube_array_grad_v4u32_f32:
4027 case Intrinsic::nvvm_tld4_unified_r_2d_v4s32_f32:
4028 case Intrinsic::nvvm_tld4_unified_g_2d_v4s32_f32:
4029 case Intrinsic::nvvm_tld4_unified_b_2d_v4s32_f32:
4030 case Intrinsic::nvvm_tld4_unified_a_2d_v4s32_f32:
4031 case Intrinsic::nvvm_tld4_unified_r_2d_v4u32_f32:
4032 case Intrinsic::nvvm_tld4_unified_g_2d_v4u32_f32:
4033 case Intrinsic::nvvm_tld4_unified_b_2d_v4u32_f32:
4034 case Intrinsic::nvvm_tld4_unified_a_2d_v4u32_f32:
4036 Info.memVT = MVT::v4i32;
4037 Info.ptrVal =
nullptr;
4043 case Intrinsic::nvvm_suld_1d_i8_clamp:
4044 case Intrinsic::nvvm_suld_1d_v2i8_clamp:
4045 case Intrinsic::nvvm_suld_1d_v4i8_clamp:
4046 case Intrinsic::nvvm_suld_1d_array_i8_clamp:
4047 case Intrinsic::nvvm_suld_1d_array_v2i8_clamp:
4048 case Intrinsic::nvvm_suld_1d_array_v4i8_clamp:
4049 case Intrinsic::nvvm_suld_2d_i8_clamp:
4050 case Intrinsic::nvvm_suld_2d_v2i8_clamp:
4051 case Intrinsic::nvvm_suld_2d_v4i8_clamp:
4052 case Intrinsic::nvvm_suld_2d_array_i8_clamp:
4053 case Intrinsic::nvvm_suld_2d_array_v2i8_clamp:
4054 case Intrinsic::nvvm_suld_2d_array_v4i8_clamp:
4055 case Intrinsic::nvvm_suld_3d_i8_clamp:
4056 case Intrinsic::nvvm_suld_3d_v2i8_clamp:
4057 case Intrinsic::nvvm_suld_3d_v4i8_clamp:
4058 case Intrinsic::nvvm_suld_1d_i8_trap:
4059 case Intrinsic::nvvm_suld_1d_v2i8_trap:
4060 case Intrinsic::nvvm_suld_1d_v4i8_trap:
4061 case Intrinsic::nvvm_suld_1d_array_i8_trap:
4062 case Intrinsic::nvvm_suld_1d_array_v2i8_trap:
4063 case Intrinsic::nvvm_suld_1d_array_v4i8_trap:
4064 case Intrinsic::nvvm_suld_2d_i8_trap:
4065 case Intrinsic::nvvm_suld_2d_v2i8_trap:
4066 case Intrinsic::nvvm_suld_2d_v4i8_trap:
4067 case Intrinsic::nvvm_suld_2d_array_i8_trap:
4068 case Intrinsic::nvvm_suld_2d_array_v2i8_trap:
4069 case Intrinsic::nvvm_suld_2d_array_v4i8_trap:
4070 case Intrinsic::nvvm_suld_3d_i8_trap:
4071 case Intrinsic::nvvm_suld_3d_v2i8_trap:
4072 case Intrinsic::nvvm_suld_3d_v4i8_trap:
4073 case Intrinsic::nvvm_suld_1d_i8_zero:
4074 case Intrinsic::nvvm_suld_1d_v2i8_zero:
4075 case Intrinsic::nvvm_suld_1d_v4i8_zero:
4076 case Intrinsic::nvvm_suld_1d_array_i8_zero:
4077 case Intrinsic::nvvm_suld_1d_array_v2i8_zero:
4078 case Intrinsic::nvvm_suld_1d_array_v4i8_zero:
4079 case Intrinsic::nvvm_suld_2d_i8_zero:
4080 case Intrinsic::nvvm_suld_2d_v2i8_zero:
4081 case Intrinsic::nvvm_suld_2d_v4i8_zero:
4082 case Intrinsic::nvvm_suld_2d_array_i8_zero:
4083 case Intrinsic::nvvm_suld_2d_array_v2i8_zero:
4084 case Intrinsic::nvvm_suld_2d_array_v4i8_zero:
4085 case Intrinsic::nvvm_suld_3d_i8_zero:
4086 case Intrinsic::nvvm_suld_3d_v2i8_zero:
4087 case Intrinsic::nvvm_suld_3d_v4i8_zero:
4089 Info.memVT = MVT::i8;
4090 Info.ptrVal =
nullptr;
4096 case Intrinsic::nvvm_suld_1d_i16_clamp:
4097 case Intrinsic::nvvm_suld_1d_v2i16_clamp:
4098 case Intrinsic::nvvm_suld_1d_v4i16_clamp:
4099 case Intrinsic::nvvm_suld_1d_array_i16_clamp:
4100 case Intrinsic::nvvm_suld_1d_array_v2i16_clamp:
4101 case Intrinsic::nvvm_suld_1d_array_v4i16_clamp:
4102 case Intrinsic::nvvm_suld_2d_i16_clamp:
4103 case Intrinsic::nvvm_suld_2d_v2i16_clamp:
4104 case Intrinsic::nvvm_suld_2d_v4i16_clamp:
4105 case Intrinsic::nvvm_suld_2d_array_i16_clamp:
4106 case Intrinsic::nvvm_suld_2d_array_v2i16_clamp:
4107 case Intrinsic::nvvm_suld_2d_array_v4i16_clamp:
4108 case Intrinsic::nvvm_suld_3d_i16_clamp:
4109 case Intrinsic::nvvm_suld_3d_v2i16_clamp:
4110 case Intrinsic::nvvm_suld_3d_v4i16_clamp:
4111 case Intrinsic::nvvm_suld_1d_i16_trap:
4112 case Intrinsic::nvvm_suld_1d_v2i16_trap:
4113 case Intrinsic::nvvm_suld_1d_v4i16_trap:
4114 case Intrinsic::nvvm_suld_1d_array_i16_trap:
4115 case Intrinsic::nvvm_suld_1d_array_v2i16_trap:
4116 case Intrinsic::nvvm_suld_1d_array_v4i16_trap:
4117 case Intrinsic::nvvm_suld_2d_i16_trap:
4118 case Intrinsic::nvvm_suld_2d_v2i16_trap:
4119 case Intrinsic::nvvm_suld_2d_v4i16_trap:
4120 case Intrinsic::nvvm_suld_2d_array_i16_trap:
4121 case Intrinsic::nvvm_suld_2d_array_v2i16_trap:
4122 case Intrinsic::nvvm_suld_2d_array_v4i16_trap:
4123 case Intrinsic::nvvm_suld_3d_i16_trap:
4124 case Intrinsic::nvvm_suld_3d_v2i16_trap:
4125 case Intrinsic::nvvm_suld_3d_v4i16_trap:
4126 case Intrinsic::nvvm_suld_1d_i16_zero:
4127 case Intrinsic::nvvm_suld_1d_v2i16_zero:
4128 case Intrinsic::nvvm_suld_1d_v4i16_zero:
4129 case Intrinsic::nvvm_suld_1d_array_i16_zero:
4130 case Intrinsic::nvvm_suld_1d_array_v2i16_zero:
4131 case Intrinsic::nvvm_suld_1d_array_v4i16_zero:
4132 case Intrinsic::nvvm_suld_2d_i16_zero:
4133 case Intrinsic::nvvm_suld_2d_v2i16_zero:
4134 case Intrinsic::nvvm_suld_2d_v4i16_zero:
4135 case Intrinsic::nvvm_suld_2d_array_i16_zero:
4136 case Intrinsic::nvvm_suld_2d_array_v2i16_zero:
4137 case Intrinsic::nvvm_suld_2d_array_v4i16_zero:
4138 case Intrinsic::nvvm_suld_3d_i16_zero:
4139 case Intrinsic::nvvm_suld_3d_v2i16_zero:
4140 case Intrinsic::nvvm_suld_3d_v4i16_zero:
4142 Info.memVT = MVT::i16;
4143 Info.ptrVal =
nullptr;
4149 case Intrinsic::nvvm_suld_1d_i32_clamp:
4150 case Intrinsic::nvvm_suld_1d_v2i32_clamp:
4151 case Intrinsic::nvvm_suld_1d_v4i32_clamp:
4152 case Intrinsic::nvvm_suld_1d_array_i32_clamp:
4153 case Intrinsic::nvvm_suld_1d_array_v2i32_clamp:
4154 case Intrinsic::nvvm_suld_1d_array_v4i32_clamp:
4155 case Intrinsic::nvvm_suld_2d_i32_clamp:
4156 case Intrinsic::nvvm_suld_2d_v2i32_clamp:
4157 case Intrinsic::nvvm_suld_2d_v4i32_clamp:
4158 case Intrinsic::nvvm_suld_2d_array_i32_clamp:
4159 case Intrinsic::nvvm_suld_2d_array_v2i32_clamp:
4160 case Intrinsic::nvvm_suld_2d_array_v4i32_clamp:
4161 case Intrinsic::nvvm_suld_3d_i32_clamp:
4162 case Intrinsic::nvvm_suld_3d_v2i32_clamp:
4163 case Intrinsic::nvvm_suld_3d_v4i32_clamp:
4164 case Intrinsic::nvvm_suld_1d_i32_trap:
4165 case Intrinsic::nvvm_suld_1d_v2i32_trap:
4166 case Intrinsic::nvvm_suld_1d_v4i32_trap:
4167 case Intrinsic::nvvm_suld_1d_array_i32_trap:
4168 case Intrinsic::nvvm_suld_1d_array_v2i32_trap:
4169 case Intrinsic::nvvm_suld_1d_array_v4i32_trap:
4170 case Intrinsic::nvvm_suld_2d_i32_trap:
4171 case Intrinsic::nvvm_suld_2d_v2i32_trap:
4172 case Intrinsic::nvvm_suld_2d_v4i32_trap:
4173 case Intrinsic::nvvm_suld_2d_array_i32_trap:
4174 case Intrinsic::nvvm_suld_2d_array_v2i32_trap:
4175 case Intrinsic::nvvm_suld_2d_array_v4i32_trap:
4176 case Intrinsic::nvvm_suld_3d_i32_trap:
4177 case Intrinsic::nvvm_suld_3d_v2i32_trap:
4178 case Intrinsic::nvvm_suld_3d_v4i32_trap:
4179 case Intrinsic::nvvm_suld_1d_i32_zero:
4180 case Intrinsic::nvvm_suld_1d_v2i32_zero:
4181 case Intrinsic::nvvm_suld_1d_v4i32_zero:
4182 case Intrinsic::nvvm_suld_1d_array_i32_zero:
4183 case Intrinsic::nvvm_suld_1d_array_v2i32_zero:
4184 case Intrinsic::nvvm_suld_1d_array_v4i32_zero:
4185 case Intrinsic::nvvm_suld_2d_i32_zero:
4186 case Intrinsic::nvvm_suld_2d_v2i32_zero:
4187 case Intrinsic::nvvm_suld_2d_v4i32_zero:
4188 case Intrinsic::nvvm_suld_2d_array_i32_zero:
4189 case Intrinsic::nvvm_suld_2d_array_v2i32_zero:
4190 case Intrinsic::nvvm_suld_2d_array_v4i32_zero:
4191 case Intrinsic::nvvm_suld_3d_i32_zero:
4192 case Intrinsic::nvvm_suld_3d_v2i32_zero:
4193 case Intrinsic::nvvm_suld_3d_v4i32_zero:
4195 Info.memVT = MVT::i32;
4196 Info.ptrVal =
nullptr;
4202 case Intrinsic::nvvm_suld_1d_i64_clamp:
4203 case Intrinsic::nvvm_suld_1d_v2i64_clamp:
4204 case Intrinsic::nvvm_suld_1d_array_i64_clamp:
4205 case Intrinsic::nvvm_suld_1d_array_v2i64_clamp:
4206 case Intrinsic::nvvm_suld_2d_i64_clamp:
4207 case Intrinsic::nvvm_suld_2d_v2i64_clamp:
4208 case Intrinsic::nvvm_suld_2d_array_i64_clamp:
4209 case Intrinsic::nvvm_suld_2d_array_v2i64_clamp:
4210 case Intrinsic::nvvm_suld_3d_i64_clamp:
4211 case Intrinsic::nvvm_suld_3d_v2i64_clamp:
4212 case Intrinsic::nvvm_suld_1d_i64_trap:
4213 case Intrinsic::nvvm_suld_1d_v2i64_trap:
4214 case Intrinsic::nvvm_suld_1d_array_i64_trap:
4215 case Intrinsic::nvvm_suld_1d_array_v2i64_trap:
4216 case Intrinsic::nvvm_suld_2d_i64_trap:
4217 case Intrinsic::nvvm_suld_2d_v2i64_trap:
4218 case Intrinsic::nvvm_suld_2d_array_i64_trap:
4219 case Intrinsic::nvvm_suld_2d_array_v2i64_trap:
4220 case Intrinsic::nvvm_suld_3d_i64_trap:
4221 case Intrinsic::nvvm_suld_3d_v2i64_trap:
4222 case Intrinsic::nvvm_suld_1d_i64_zero:
4223 case Intrinsic::nvvm_suld_1d_v2i64_zero:
4224 case Intrinsic::nvvm_suld_1d_array_i64_zero:
4225 case Intrinsic::nvvm_suld_1d_array_v2i64_zero:
4226 case Intrinsic::nvvm_suld_2d_i64_zero:
4227 case Intrinsic::nvvm_suld_2d_v2i64_zero:
4228 case Intrinsic::nvvm_suld_2d_array_i64_zero:
4229 case Intrinsic::nvvm_suld_2d_array_v2i64_zero:
4230 case Intrinsic::nvvm_suld_3d_i64_zero:
4231 case Intrinsic::nvvm_suld_3d_v2i64_zero:
4233 Info.memVT = MVT::i64;
4234 Info.ptrVal =
nullptr;
4254 const Align ABITypeAlign = std::min(
Align(128),
DL.getABITypeAlign(ArgTy));
4259 if (!
F || !
F->hasLocalLinkage() ||
4260 F->hasAddressTaken(
nullptr,
4264 return ABITypeAlign;
4267 return std::max(
Align(16), ABITypeAlign);
4274 Align ArgAlign = InitialAlign;
4289 ArgAlign = std::max(ArgAlign,
Align(4));
4299 std::string ParamName;
4304 ParamStr <<
"_vararg";
4306 ParamStr <<
"_param_" <<
Idx;
4358 if (Constraint.
size() == 1) {
4359 switch (Constraint[0]) {
4378std::pair<unsigned, const TargetRegisterClass *>
4382 if (Constraint.
size() == 1) {
4383 switch (Constraint[0]) {
4385 return std::make_pair(0U, &NVPTX::Int1RegsRegClass);
4387 return std::make_pair(0U, &NVPTX::Int16RegsRegClass);
4389 return std::make_pair(0U, &NVPTX::Int16RegsRegClass);
4391 return std::make_pair(0U, &NVPTX::Int32RegsRegClass);
4394 return std::make_pair(0U, &NVPTX::Int64RegsRegClass);
4398 "supported for sm_70 and higher!");
4399 return std::make_pair(0U, &NVPTX::Int128RegsRegClass);
4402 return std::make_pair(0U, &NVPTX::Float32RegsRegClass);
4404 return std::make_pair(0U, &NVPTX::Float64RegsRegClass);
4438 return F.getFnAttribute(
"unsafe-fp-math").getValueAsBool();
4442 const auto *Const = dyn_cast<ConstantSDNode>(Operand);
4443 return Const && Const->getZExtValue() == 0;
4475 if (M->getOpcode() !=
ISD::MUL || !M.getNode()->hasOneUse())
4483 ((ZeroOpNum == 1) ? N1 : MAD),
4484 ((ZeroOpNum == 1) ? MAD : N1));
4510 int nonAddCount = 0;
4519 int orderNo =
N->getIROrder();
4525 if (orderNo - orderNo2 < 500)
4531 bool opIsLive =
false;
4535 if (isa<ConstantSDNode>(left) || isa<ConstantSDNode>(right))
4540 int orderNo3 =
User->getIROrder();
4541 if (orderNo3 > orderNo) {
4549 int orderNo3 =
User->getIROrder();
4550 if (orderNo3 > orderNo) {
4569 if (
all_of(
N->ops().drop_front(Front).drop_back(Back),
4570 [](
const SDUse &U) { return U.get()->isUndef(); }))
4573 return N->getOperand(0);
4602 if (VT.
isVector() || VT != MVT::i32)
4622 if (VT.
isVector() || !(VT == MVT::f32 || VT == MVT::f64))
4643 if (isa<ConstantSDNode>(Val)) {
4657 ConstantSDNode *BFEBits = dyn_cast<ConstantSDNode>(BFE.getOperand(0));
4669 if (MaskVal != (
uint64_t(1) << BFEBitsVal) - 1)
4689 if (MaskVal != 0xff) {
4694 MemSDNode *Mem = dyn_cast<MemSDNode>(Val);
4701 if (MemVT != MVT::v2i8 && MemVT != MVT::v4i8) {
4714 if (AExt.
getNode() !=
nullptr) {
4739 EVT VT =
N->getValueType(0);
4743 const SDValue &Num =
N->getOperand(0);
4744 const SDValue &Den =
N->getOperand(1);
4747 if (U->getOpcode() == DivOpc && U->getOperand(0) == Num &&
4748 U->getOperand(1) == Den) {
4775 EVT OrigVT =
Op.getOperand(0).getValueType();
4781 EVT OrigVT =
Op.getOperand(0).getValueType();
4808 IsSigned = (LHSSign ==
Signed);
4812 const APInt &Val = CI->getAPIntValue();
4814 return Val.
isIntN(OptSize);
4823 return LHSSign == RHSSign;
4833 EVT MulType =
N->getValueType(0);
4834 if (MulType != MVT::i32 && MulType != MVT::i64) {
4845 if (isa<ConstantSDNode>(
LHS)) {
4874 if (MulType == MVT::i32) {
4875 DemotedVT = MVT::i16;
4877 DemotedVT = MVT::i32;
4894 return DCI.
DAG.
getNode(Opc,
DL, MulType, TruncLHS, TruncRHS);
4898 const auto *Const = dyn_cast<ConstantSDNode>(Operand);
4899 return Const && Const->getZExtValue() == 1;
4907 return Add->getOperand(1);
4910 return Add->getOperand(0);
4951 (ConstOpNo == 1) ?
X : NewMul,
4952 (ConstOpNo == 1) ? NewMul :
X);
4963 if (VT != MVT::i16 && VT != MVT::i32 && VT != MVT::i64)
5014 EVT CCType =
N->getValueType(0);
5018 EVT AType =
A.getValueType();
5019 if (!(CCType == MVT::v2i1 && (AType == MVT::v2f16 || AType == MVT::v2bf16)))
5022 if (
A.getValueType() == MVT::v2bf16 &&
SmVersion < 90)
5033 DL, DCI.
DAG.
getVTList(MVT::i1, MVT::i1), {A, B, N->getOperand(2)});
5052 VectorVT == MVT::v4i8 || VectorVT == MVT::v8i8)
5061 if (!(VectorBits == 16 || VectorBits == 32 || VectorBits == 64))
5066 if (!Index || Index->getZExtValue() == 0)
5081 if (EltVT != EltIVT)
5084 if (EltVT !=
N->getValueType(0))
5094 if (VectorVT != MVT::v4i8)
5105 for (
int I = 0;
I < 4; ++
I) {
5124 auto VT =
N->getValueType(0);
5128 auto Op0 =
N->getOperand(0);
5129 auto Op1 =
N->getOperand(1);
5136 std::pair<SDValue *, uint64_t *> OpData[2] = {{&Op0, &Op0Bytes},
5142 for (
auto &[
Op, OpBytes] : OpData) {
5145 *
Op =
Op->getOperand(0);
5148 Op->getOperand(0).getValueType() == MVT::i32))
5153 if (!
Op->hasOneUse())
5156 *
Op =
Op->getOperand(0);
5160 if (
Op->getOpcode() ==
ISD::SRL && isa<ConstantSDNode>(
Op->getOperand(1))) {
5161 if (cast<ConstantSDNode>(
Op->getOperand(1))->getZExtValue() == 16) {
5164 assert((*OpBytes == 0x10 || *OpBytes == 0x54) &&
5165 "PRMT selector values out of range");
5167 *
Op =
Op->getOperand(0);
5173 auto &DAG = DCI.
DAG;
5177 {Op0, Op1, DAG.
getConstant((Op1Bytes << 8) | Op0Bytes,
DL, MVT::i32),
5183 DAGCombinerInfo &DCI)
const {
5185 switch (
N->getOpcode()) {
5225 EVT ToVT =
Op->getValueType(0);
5226 if (ToVT != MVT::v2i8) {
5245 EVT ResVT =
N->getValueType(0);
5251 if (!NumEltsAndEltVT)
5253 auto [NumElts, EltVT] = NumEltsAndEltVT.value();
5257 Align Alignment = LD->getAlign();
5261 if (Alignment < PrefAlign) {
5273 bool NeedTrunc =
false;
5279 unsigned Opcode = 0;
5287 LdResVTs = DAG.
getVTList(EltVT, EltVT, MVT::Other);
5291 EVT ListVTs[] = { EltVT, EltVT, EltVT, EltVT, MVT::Other };
5306 LD->getMemOperand());
5310 "NumElts should not increase, only decrease or stay the same.");
5318 for (
unsigned i = 0; i < NumElts; ++i) {
5323 for (
unsigned i = 0; i < NumElts; ++i) {
5350 case Intrinsic::nvvm_ldu_global_i:
5351 case Intrinsic::nvvm_ldu_global_f:
5352 case Intrinsic::nvvm_ldu_global_p: {
5353 EVT ResVT =
N->getValueType(0);
5365 bool NeedTrunc =
false;
5371 unsigned Opcode = 0;
5379 LdResVTs = DAG.
getVTList(EltVT, EltVT, MVT::Other);
5383 EVT ListVTs[] = { EltVT, EltVT, EltVT, EltVT, MVT::Other };
5396 OtherOps.
append(
N->op_begin() + 2,
N->op_end());
5406 for (
unsigned i = 0; i < NumElts; ++i) {
5424 "Custom handling of non-i8 ldu/ldg?");
5457 assert(Reg.getValueType() == MVT::i128 &&
5458 "Custom lowering for CopyFromReg with 128-bit reg only");
5460 N->getValueType(2)};
5472void NVPTXTargetLowering::ReplaceNodeResults(
5474 switch (
N->getOpcode()) {
5513 auto ITy = cast<llvm::IntegerType>(Ty);
5522 switch (ITy->getBitWidth()) {
5541 switch (ITy->getBitWidth()) {
AMDGPU Register Bank Select
This file implements a class to represent arbitrary precision integral constant values and operations...
static SDValue PerformADDCombineWithOperands(SDNode *N, SDValue N0, SDValue N1, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
PerformADDCombineWithOperands - Try DAG combinations for an ADD with operands N0 and N1.
static SDValue PerformADDCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
PerformADDCombine - Target-specific dag combine xforms for ISD::ADD.
static SDValue PerformVSELECTCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
static SDValue PerformMULCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
static SDValue PerformFADDCombine(SDNode *N, SelectionDAG &DAG, const ARMSubtarget *Subtarget)
static SDValue PerformANDCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
static SDValue PerformBUILD_VECTORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
PerformBUILD_VECTORCombine - Target-specific dag combine xforms for ISD::BUILD_VECTOR.
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Function Alias Analysis Results
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
Analysis containing CSE Info
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
This file contains the declarations of entities that describe floating point environment and related ...
Module.h This file contains the declarations for the Module class.
static DebugLoc getDebugLoc(MachineBasicBlock::instr_iterator FirstMI, MachineBasicBlock::instr_iterator LastMI)
Return the first found DebugLoc that has a DILocation, given a range of instructions.
unsigned const TargetRegisterInfo * TRI
NVPTX address space definition.
static bool shouldConvertToIndirectCall(const CallBase *CB, const GlobalAddressSDNode *Func)
static cl::opt< bool > sched4reg("nvptx-sched4reg", cl::desc("NVPTX Specific: schedule for register pressue"), cl::init(false))
static SDValue PerformEXTRACTCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
static bool isConstOne(const SDValue &Operand)
static cl::opt< unsigned > FMAContractLevelOpt("nvptx-fma-level", cl::Hidden, cl::desc("NVPTX Specific: FMA contraction (0: don't do it" " 1: do it 2: do it aggressively"), cl::init(2))
static bool IsPTXVectorType(MVT VT)
static cl::opt< int > UsePrecDivF32("nvptx-prec-divf32", cl::Hidden, cl::desc("NVPTX Specifies: 0 use div.approx, 1 use div.full, 2 use" " IEEE Compliant F32 div.rnd if available."), cl::init(2))
static SDValue PerformStoreParamCombine(SDNode *N)
static void ReplaceLoadVector(SDNode *N, SelectionDAG &DAG, SmallVectorImpl< SDValue > &Results)
ReplaceVectorLoad - Convert vector loads into multi-output scalar loads.
static void ReplaceBITCAST(SDNode *Node, SelectionDAG &DAG, SmallVectorImpl< SDValue > &Results)
static void ReplaceCopyFromReg_128(SDNode *N, SelectionDAG &DAG, SmallVectorImpl< SDValue > &Results)
static bool Is16bitsType(MVT VT)
static SDValue combineMADConstOne(SDValue X, SDValue Add, EVT VT, SDLoc DL, TargetLowering::DAGCombinerInfo &DCI)
static bool IsTypePassedAsArray(const Type *Ty)
static SmallVector< ParamVectorizationFlags, 16 > VectorizePTXValueVTs(const SmallVectorImpl< EVT > &ValueVTs, const SmallVectorImpl< uint64_t > &Offsets, Align ParamAlignment, bool IsVAArg=false)
static unsigned CanMergeParamLoadStoresStartingAt(unsigned Idx, uint32_t AccessSize, const SmallVectorImpl< EVT > &ValueVTs, const SmallVectorImpl< uint64_t > &Offsets, Align ParamAlignment)
static void ReplaceINTRINSIC_W_CHAIN(SDNode *N, SelectionDAG &DAG, SmallVectorImpl< SDValue > &Results)
static SDValue PerformFADDCombineWithOperands(SDNode *N, SDValue N0, SDValue N1, TargetLowering::DAGCombinerInfo &DCI, CodeGenOptLevel OptLevel)
static bool isConstZero(const SDValue &Operand)
static SDValue LowerVectorArith(SDValue Op, SelectionDAG &DAG)
static void ComputePTXValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty, SmallVectorImpl< EVT > &ValueVTs, SmallVectorImpl< uint64_t > *Offsets=nullptr, uint64_t StartingOffset=0)
ComputePTXValueVTs - For the given Type Ty, returns the set of primitive EVTs that compose it.
static bool IsMulWideOperandDemotable(SDValue Op, unsigned OptSize, OperandSignedness &S)
IsMulWideOperandDemotable - Checks if the provided DAG node is an operand that can be demoted to OptS...
static SDValue LowerUnalignedStoreParam(SelectionDAG &DAG, SDValue Chain, uint64_t Offset, EVT ElementType, SDValue StVal, SDValue &InGlue, unsigned ArgID, const SDLoc &dl)
static SDValue PerformREMCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, CodeGenOptLevel OptLevel)
static std::optional< std::pair< unsigned int, EVT > > getVectorLoweringShape(EVT VectorVT)
static SDValue PerformMULCombineWithOperands(SDNode *N, SDValue N0, SDValue N1, TargetLowering::DAGCombinerInfo &DCI)
static SDValue PerformStoreRetvalCombine(SDNode *N)
static bool AreMulWideOperandsDemotable(SDValue LHS, SDValue RHS, unsigned OptSize, bool &IsSigned)
AreMulWideOperandsDemotable - Checks if the given LHS and RHS operands can be demoted to OptSize bits...
static SDValue PerformStoreCombineHelper(SDNode *N, std::size_t Front, std::size_t Back)
static bool adjustElementType(EVT &ElementType)
static SDValue TryMULWIDECombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
TryMULWIDECombine - Attempt to replace a multiply of M bits with a multiply of M/2 bits that produces...
static SDValue combineMulSelectConstOne(SDValue X, SDValue Select, EVT VT, SDLoc DL, TargetLowering::DAGCombinerInfo &DCI)
static SDValue matchMADConstOnePattern(SDValue Add)
static SDValue MaybeBitcast(SelectionDAG &DAG, SDLoc DL, EVT VT, SDValue Value)
static cl::opt< bool > UsePrecSqrtF32("nvptx-prec-sqrtf32", cl::Hidden, cl::desc("NVPTX Specific: 0 use sqrt.approx, 1 use sqrt.rn."), cl::init(true))
static SDValue LowerUnalignedStoreRet(SelectionDAG &DAG, SDValue Chain, uint64_t Offset, EVT ElementType, SDValue RetVal, const SDLoc &dl)
static SDValue PromoteBinOpToF32(SDNode *N, SelectionDAG &DAG)
static bool PromoteScalarIntegerPTX(const EVT &VT, MVT *PromotedVT)
PromoteScalarIntegerPTX Used to make sure the arguments/returns are suitable for passing and promote ...
static SDValue PerformSETCCCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, unsigned int SmVersion)
static SDValue LowerUnalignedLoadRetParam(SelectionDAG &DAG, SDValue &Chain, uint64_t Offset, EVT ElementType, SDValue &InGlue, SmallVectorImpl< SDValue > &TempProxyRegOps, const SDLoc &dl)
static std::atomic< unsigned > GlobalUniqueCallSite
static cl::opt< bool > ForceMinByValParamAlign("nvptx-force-min-byval-param-align", cl::Hidden, cl::desc("NVPTX Specific: force 4-byte minimal alignment for byval" " params of device functions."), cl::init(false))
static SDValue PerformSHLCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, CodeGenOptLevel OptLevel)
PerformSHLCombine - Runs PTX-specific DAG combine patterns on SHL nodes.
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the SmallVector class.
This file describes how to lower LLVM code to machine code.
Class for arbitrary precision integers.
bool isSignedIntN(unsigned N) const
Check if this APInt has an N-bits signed integer value.
bool slt(const APInt &RHS) const
Signed less than comparison.
bool isIntN(unsigned N) const
Check if this APInt has an N-bits unsigned integer value.
bool sge(const APInt &RHS) const
Signed greater or equal comparison.
This class represents an incoming formal argument to a Function.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
const T & back() const
back - Get the last element.
ArrayRef< T > drop_back(size_t N=1) const
Drop the last N elements of the array.
bool empty() const
empty - Check if the array is empty.
an instruction that atomically reads a memory location, combines it with another value,...
@ Min
*p = old <signed v ? old : v
@ Max
*p = old >signed v ? old : v
@ UMin
*p = old <unsigned v ? old : v
@ UMax
*p = old >unsigned v ? old : v
bool isFloatingPointOperation() const
BinOp getOperation() const
bool hasParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Return true if the attribute exists for the given argument.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
FunctionType * getFunctionType() const
This class represents a function call, abstracting a target machine's calling convention.
uint64_t getZExtValue() const
const APInt & getAPIntValue() const
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
This class represents an Operation in the Expression.
uint64_t getNumOperands() const
A parsed version of the target data layout string in and methods for querying it.
TypeSize getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
Align getPrefTypeAlign(Type *Ty) const
Returns the preferred stack/global alignment for the specified type.
Diagnostic information for unsupported feature in backend.
void addFnAttr(Attribute::AttrKind Kind)
Add function attributes to this function.
Type * getReturnType() const
Returns the type of the ret val.
unsigned getAddressSpace() const
const GlobalValue * getGlobal() const
This is an important class for using LLVM in a threaded context.
void diagnose(const DiagnosticInfo &DI)
Report a message to the currently installed diagnostic handler.
This class is used to represent ISD::LOAD nodes.
MCSection * getDataSection() const
Instances of this class represent a uniqued identifier for a section in the current translation unit.
StringRef getName() const
getName - Get the symbol name.
unsigned getVectorNumElements() const
bool isScalableVector() const
Return true if this is a vector value type where the runtime length is machine dependent.
static auto integer_valuetypes()
static auto fixedlen_vector_valuetypes()
static MVT getVectorVT(MVT VT, unsigned NumElements)
static MVT getIntegerVT(unsigned BitWidth)
MVT getScalarType() const
If this is a vector, return the element type, otherwise return this.
DenormalMode getDenormalMode(const fltSemantics &FPType) const
Returns the denormal handling type for the default rounding mode of the function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
const MachineJumpTableInfo * getJumpTableInfo() const
getJumpTableInfo - Return the jump table info object for the current function.
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
@ EK_Inline
EK_Inline - Jump table entries are emitted inline at their point of use.
const std::vector< MachineJumpTableEntry > & getJumpTables() const
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MOInvariant
The memory access always returns the same value (or traps).
@ MOStore
The memory access writes data.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
This SDNode is used for target intrinsics that touch memory and need an associated MachineMemOperand.
This is an abstract virtual class for memory operations.
MachineMemOperand * getMemOperand() const
Return a MachineMemOperand object describing the memory reference performed by operation.
EVT getMemoryVT() const
Return the type of the in-memory value.
unsigned getMaxRequiredAlignment() const
bool hasAtomMinMax64() const
bool hasAtomAddF64() const
const NVPTXTargetLowering * getTargetLowering() const override
unsigned getPTXVersion() const
bool hasNativeBF16Support(int Opcode) const
const NVPTXRegisterInfo * getRegisterInfo() const override
unsigned int getSmVersion() const
bool hasAtomBitwise64() const
bool allowFP16Math() const
bool hasAtomCas16() const
ConstraintType getConstraintType(StringRef Constraint) const override
getConstraintType - Given a constraint letter, return the type of constraint it is for this target.
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
This callback is invoked for operations that are unsupported by the target, which are registered to u...
const NVPTXTargetMachine * nvTM
SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const
NVPTXTargetLowering(const NVPTXTargetMachine &TM, const NVPTXSubtarget &STI)
bool useF32FTZ(const MachineFunction &MF) const
SDValue LowerSTACKSAVE(SDValue Op, SelectionDAG &DAG) const
Align getFunctionArgumentAlignment(const Function *F, Type *Ty, unsigned Idx, const DataLayout &DL) const
SDValue getSqrtEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled, int &ExtraSteps, bool &UseOneConst, bool Reciprocal) const override
Hooks for building estimates in place of slower divisions and square roots.
SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, const SmallVectorImpl< SDValue > &OutVals, const SDLoc &dl, SelectionDAG &DAG) const override
This hook must be implemented to lower outgoing return values, described by the Outs array,...
SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::InputArg > &Ins, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower the incoming (formal) arguments, described by the Ins array,...
void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const override
Lower the specified operand into the Ops vector.
SDValue LowerSTACKRESTORE(SDValue Op, SelectionDAG &DAG) const
std::string getParamName(const Function *F, int Idx) const
TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(MVT VT) const override
Return the preferred vector type legalization action.
std::string getPrototype(const DataLayout &DL, Type *, const ArgListTy &, const SmallVectorImpl< ISD::OutputArg > &, MaybeAlign retAlignment, std::optional< std::pair< unsigned, const APInt & > > VAInfo, const CallBase &CB, unsigned UniqueCallSite) const
Align getFunctionParamOptimizedAlign(const Function *F, Type *ArgTy, const DataLayout &DL) const
getFunctionParamOptimizedAlign - since function arguments are passed via .param space,...
SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Ctx, EVT VT) const override
Return the ValueType of the result of SETCC operations.
std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const override
Given a physical register constraint (e.g.
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I=nullptr) const override
isLegalAddressingMode - Return true if the addressing mode represented by AM is legal for this target...
AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override
Returns how the IR-level AtomicExpand pass should expand the given AtomicRMW, if at all.
Align getFunctionByValParamAlign(const Function *F, Type *ArgTy, Align InitialAlign, const DataLayout &DL) const
Helper for computing alignment of a device function byval parameter.
bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I, MachineFunction &MF, unsigned Intrinsic) const override
Given an intrinsic, checks if on the target the intrinsic will need to map to a MemIntrinsicNode (tou...
const char * getTargetNodeName(unsigned Opcode) const override
This method returns the name of a target specific DAG node.
bool allowFMA(MachineFunction &MF, CodeGenOptLevel OptLevel) const
bool usePrecSqrtF32() const
unsigned getJumpTableEncoding() const override
Return the entry encoding for a jump table in the current function.
bool allowUnsafeFPMath(MachineFunction &MF) const
int getDivF32Level() const
SDValue LowerCall(CallLoweringInfo &CLI, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower calls into the specified DAG.
UniqueStringSaver & getStrPool() const
MCSection * SelectSectionForGlobal(const GlobalObject *GO, SectionKind Kind, const TargetMachine &TM) const override
~NVPTXTargetObjectFile() override
static PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
const APInt & getAsAPIntVal() const
Helper method returns the APInt value of a ConstantSDNode.
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
bool hasOneUse() const
Return true if there is exactly one use of this node.
unsigned getIROrder() const
Return the node ordering.
uint64_t getAsZExtVal() const
Helper method returns the zero-extended integer value of a ConstantSDNode.
unsigned getNumOperands() const
Return the number of values used by this operation.
SDVTList getVTList() const
const SDValue & getOperand(unsigned Num) const
uint64_t getConstantOperandVal(unsigned Num) const
Helper method returns the integer value of a ConstantSDNode operand.
const APInt & getConstantOperandAPInt(unsigned Num) const
Helper method returns the APInt of a ConstantSDNode operand.
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
bool isUndef() const
Return true if the type of the node type undefined.
iterator_range< user_iterator > users()
Represents a use of a SDNode.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
TypeSize getValueSizeInBits() const
Returns the size of the value in bits.
const SDValue & getOperand(unsigned i) const
MVT getSimpleValueType() const
Return the simple ValueType of the referenced return value.
unsigned getOpcode() const
SectionKind - This is a simple POD value that classifies the properties of a section.
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
const SDValue & getRoot() const
Return the root tag of the SelectionDAG.
SDValue getAddrSpaceCast(const SDLoc &dl, EVT VT, SDValue Ptr, unsigned SrcAS, unsigned DestAS)
Return an AddrSpaceCastSDNode.
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, Register Reg, SDValue N)
SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
void ExtractVectorElements(SDValue Op, SmallVectorImpl< SDValue > &Args, unsigned Start=0, unsigned Count=0, EVT EltVT=EVT())
Append the extracted elements from Start to Count out of the vector Op in Args.
SDValue getSetCC(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode Cond, SDValue Chain=SDValue(), bool IsSignaling=false)
Helper function to make it easier to build SetCC's if you just have an ISD::CondCode instead of an SD...
SDValue getSymbolFunctionGlobalAddress(SDValue Op, Function **TargetFunction=nullptr)
Return a GlobalAddress of the function from the current module with name matching the given ExternalS...
SDValue getConstantFP(double Val, const SDLoc &DL, EVT VT, bool isTarget=false)
Create a ConstantFPSDNode wrapping a constant value.
SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
const TargetLowering & getTargetLoweringInfo() const
SDNode * MorphNodeTo(SDNode *N, unsigned Opc, SDVTList VTs, ArrayRef< SDValue > Ops)
This mutates the specified node to have the specified return type, opcode, and operands.
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd).
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type.
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, Register Reg, EVT VT)
SDValue getSelect(const SDLoc &DL, EVT VT, SDValue Cond, SDValue LHS, SDValue RHS, SDNodeFlags Flags=SDNodeFlags())
Helper function to make it easier to build Select's if you just have operands and don't want to check...
const DataLayout & getDataLayout() const
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, EVT SVT, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
void ReplaceAllUsesWith(SDValue From, SDValue To)
Modify anything using 'From' to use 'To' instead.
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
SDValue getSignedConstant(int64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
void RemoveDeadNode(SDNode *N)
Remove the specified node from the system.
SDValue getBasicBlock(MachineBasicBlock *MBB)
SDValue getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either any-extending or truncat...
SDValue getSelectCC(const SDLoc &DL, SDValue LHS, SDValue RHS, SDValue True, SDValue False, ISD::CondCode Cond)
Helper function to make it easier to build SelectCC's if you just have an ISD::CondCode instead of an...
SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
SDValue getFPExtendOrRound(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of float type, to the float type VT, by either extending or rounding (by tr...
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
MachineFunction & getMachineFunction() const
SDValue getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either zero-extending or trunca...
LLVMContext * getContext() const
const SDValue & setRoot(SDValue N)
Set the current root tag of the SelectionDAG.
SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef< SDValue > Ops, EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags Flags=MachineMemOperand::MOLoad|MachineMemOperand::MOStore, LocationSize Size=0, const AAMDNodes &AAInfo=AAMDNodes())
Creates a MemIntrinsicNode that may produce a result and takes a list of operands.
SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
This SDNode is used to implement the code generator support for the llvm IR shufflevector instruction...
ArrayRef< int > getMask() const
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void assign(size_type NumElts, ValueParamT Elt)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
This class is used to represent ISD::STORE nodes.
StringRef - Represent a constant reference to a string, i.e.
constexpr size_t size() const
size - Get the string size.
constexpr const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Class to represent struct types.
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
void setMaxDivRemBitWidthSupported(unsigned SizeInBits)
Set the size in bits of the maximum div/rem the backend supports.
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
LegalizeAction
This enum indicates whether operations are valid for a target, and if not, what action should be used...
unsigned MaxStoresPerMemcpyOptSize
Likewise for functions with the OptSize attribute.
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
const TargetMachine & getTargetMachine() const
LegalizeTypeAction
This enum indicates whether a types are legal for a target, and if not, what action should be used to...
void addBypassSlowDiv(unsigned int SlowBitWidth, unsigned int FastBitWidth)
Tells the code generator which bitwidths to bypass.
virtual unsigned getNumRegisters(LLVMContext &Context, EVT VT, std::optional< MVT > RegisterVT=std::nullopt) const
Return the number of registers that this ValueType will eventually require.
void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)
Set the maximum atomic operation size supported by the backend.
virtual TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(MVT VT) const
Return the preferred vector type legalization action.
unsigned MaxStoresPerMemsetOptSize
Likewise for functions with the OptSize attribute.
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
unsigned MaxStoresPerMemmove
Specify maximum number of store instructions per memmove call.
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose.
unsigned MaxStoresPerMemmoveOptSize
Likewise for functions with the OptSize attribute.
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
unsigned MaxStoresPerMemset
Specify maximum number of store instructions per memset call.
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
@ ZeroOrNegativeOneBooleanContent
void setMinCmpXchgSizeInBits(unsigned SizeInBits)
Sets the minimum cmpxchg or ll/sc size supported by the backend.
void AddPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT)
If Opc/OrigVT is specified as being promoted, the promotion code defaults to trying a larger integer/...
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
void setCondCodeAction(ArrayRef< ISD::CondCode > CCs, MVT VT, LegalizeAction Action)
Indicate that the specified condition code is or isn't supported on the target and indicate what to d...
void setTargetDAGCombine(ArrayRef< ISD::NodeType > NTs)
Targets should invoke this method for each target independent node that they want to provide a custom...
Align getMinStackArgumentAlignment() const
Return the minimum stack alignment of an argument.
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
std::vector< ArgListEntry > ArgListTy
bool allowsMemoryAccessForAlignment(LLVMContext &Context, const DataLayout &DL, EVT VT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *Fast=nullptr) const
This function returns true if the memory access is aligned or if the target allows this specific unal...
unsigned MaxStoresPerMemcpy
Specify maximum number of store instructions per memcpy call.
void setSchedulingPreference(Sched::Preference Pref)
Specify the target scheduling preference.
void setJumpIsExpensive(bool isExpensive=true)
Tells the code generator not to expand logic operations on comparison predicates into separate sequen...
LegalizeAction getOperationAction(unsigned Op, EVT VT) const
Return how this operation should be treated: either it is legal, needs to be promoted to a larger siz...
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
SDValue expandUnalignedStore(StoreSDNode *ST, SelectionDAG &DAG) const
Expands an unaligned store to 2 half-size stores for integer values, and possibly more for vectors.
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
std::pair< SDValue, SDValue > expandUnalignedLoad(LoadSDNode *LD, SelectionDAG &DAG) const
Expands an unaligned load to 2 half-size loads for an integer, and possibly more for vectors.
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
SDValue expandRoundInexactToOdd(EVT ResultVT, SDValue Op, const SDLoc &DL, SelectionDAG &DAG) const
Truncate Op to ResultVT.
SDValue expandFP_ROUND(SDNode *Node, SelectionDAG &DAG) const
Expand round(fp) to fp conversion.
virtual void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
Primary interface to the complete machine description for the target machine.
CodeGenOptLevel getOptLevel() const
Returns the optimization level: None, Less, Default, or Aggressive.
MCSymbol * getSymbol(const GlobalValue *GV) const
unsigned UnsafeFPMath
UnsafeFPMath - This flag is enabled when the -enable-unsafe-fp-math flag is specified on the command ...
FPOpFusion::FPOpFusionMode AllowFPOpFusion
AllowFPOpFusion - This flag is set by the -fp-contract=xxx option.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
The instances of the Type class are immutable: once they are created, they are never changed.
bool isVectorTy() const
True if this is an instance of VectorType.
bool isFloatTy() const
Return true if this is 'float', a 32-bit IEEE fp type.
bool isBFloatTy() const
Return true if this is 'bfloat', a 16-bit bfloat type.
@ VoidTyID
type with no size
bool isAggregateType() const
Return true if the type is an aggregate type.
bool isHalfTy() const
Return true if this is 'half', a 16-bit IEEE fp type.
bool isDoubleTy() const
Return true if this is 'double', a 64-bit IEEE fp type.
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
bool isIntegerTy() const
True if this is an instance of IntegerType.
TypeID getTypeID() const
Return the type id for the type.
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
StringRef save(const char *S)
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
int getNumOccurrences() const
A raw_ostream that writes to an std::string.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
@ STACKRESTORE
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain.
@ STACKSAVE
STACKSAVE - STACKSAVE has one operand, an input chain.
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
@ BSWAP
Byte Swap and Counting operators.
@ VAEND
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE.
@ ADDC
Carry-setting nodes for multiple precision addition and subtraction.
@ ADD
Simple integer binary arithmetic operators.
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
@ CONCAT_VECTORS
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
@ FADD
Simple binary floating point operators.
@ ABS
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth.
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
@ SIGN_EXTEND
Conversion operators.
@ READSTEADYCOUNTER
READSTEADYCOUNTER - This corresponds to the readfixedcounter intrinsic.
@ FNEG
Perform various unary floating-point operations inspired by libm.
@ BR_CC
BR_CC - Conditional branch.
@ SSUBO
Same for subtraction.
@ BRIND
BRIND - Indirect branch.
@ BR_JT
BR_JT - Jumptable branch.
@ SSUBSAT
RESULT = [US]SUBSAT(LHS, RHS) - Perform saturation subtraction on 2 integers with the same bit width ...
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
@ UNDEF
UNDEF - An undefined node.
@ VACOPY
VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer,...
@ CopyFromReg
CopyFromReg - This node indicates that the input value is a virtual or physical register that is defi...
@ SADDO
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
@ SHL
Shift and rotation operations.
@ VECTOR_SHUFFLE
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
@ EXTRACT_SUBVECTOR
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.
@ FMINNUM_IEEE
FMINNUM_IEEE/FMAXNUM_IEEE - Perform floating-point minimumNumber or maximumNumber on two values,...
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
@ CopyToReg
CopyToReg - This node has three operands: a chain, a register number to set to this value,...
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ DEBUGTRAP
DEBUGTRAP - Trap intended to get the attention of a debugger.
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
@ FMINNUM
FMINNUM/FMAXNUM - Perform floating-point minimum or maximum on two values.
@ SSHLSAT
RESULT = [US]SHLSAT(LHS, RHS) - Perform saturation left shift.
@ SMULO
Same for multiplication.
@ DYNAMIC_STACKALLOC
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary.
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
@ FP_EXTEND
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
@ UADDO_CARRY
Carry-using nodes for multiple precision addition and subtraction.
@ BF16_TO_FP
BF16_TO_FP, FP_TO_BF16 - These operators are used to perform promotions and truncation for bfloat16.
@ FRAMEADDR
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG.
@ FMINIMUM
FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0 as less than 0....
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ READCYCLECOUNTER
READCYCLECOUNTER - This corresponds to the readcyclecounter intrinsic.
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ TRAP
TRAP - Trapping instruction.
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
@ ADDE
Carry-using nodes for multiple precision addition and subtraction.
@ FREEZE
FREEZE - FREEZE(VAL) returns an arbitrary value if VAL is UNDEF (or is evaluated to UNDEF),...
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ FP_ROUND
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
@ VAARG
VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE, and the alignment.
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
@ SADDSAT
RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2 integers with the same bit width (W)...
@ SADDO_CARRY
Carry-using overflow-aware nodes for multiple precision addition and subtraction.
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
bool allOperandsUndef(const SDNode *N)
Return true if the node has at least one operand and all operands of the specified node are ISD::UNDE...
@ Bitcast
Perform the operation on a different, but equivalently sized type.
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
static bool isIndirectCall(const MachineInstr &MI)
bool shouldEmitPTXNoReturn(const Value *V, const TargetMachine &TM)
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
MaybeAlign getAlign(const Function &F, unsigned Index)
uint64_t PowerOf2Ceil(uint64_t A)
Returns the power of two which is greater than or equal to the given value.
OutputIt transform(R &&Range, OutputIt d_first, UnaryFunction F)
Wrapper function around std::transform to apply a function to a range and store the result elsewhere.
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
unsigned promoteScalarArgumentSize(unsigned size)
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
CodeGenOptLevel
Code generation optimization level.
@ Mul
Product of integers.
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
DWARFExpression::Operation Op
void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty, SmallVectorImpl< EVT > &ValueVTs, SmallVectorImpl< EVT > *MemVTs, SmallVectorImpl< TypeSize > *Offsets=nullptr, TypeSize StartingOffset=TypeSize::getZero())
ComputeValueVTs - Given an LLVM IR type, compute a sequence of EVTs that represent all the individual...
constexpr unsigned BitWidth
bool isKernelFunction(const Function &F)
Function * getMaybeBitcastedCallee(const CallBase *CB)
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
static const fltSemantics & IEEEsingle() LLVM_READNONE
This struct is a compact representation of a valid (non-zero power of two) alignment.
uint64_t value() const
This is a hole in the type system and should not be abused.
@ PreserveSign
The sign of a flushed-to-zero number is preserved in the sign of 0.
DenormalModeKind Output
Denormal flushing mode for floating point instruction results in the default floating point environme...
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)
Returns the EVT that represents a vector NumElements in length, where each element is of type VT.
EVT changeTypeToInteger() const
Return the type converted to an equivalently sized integer or vector with integer element type.
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
ElementCount getVectorElementCount() const
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
uint64_t getScalarSizeInBits() const
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
bool isVector() const
Return true if this is a vector value type.
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
bool bitsEq(EVT VT) const
Return true if this has the same number of bits as VT.
Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
EVT getVectorElementType() const
Given a vector type, return the type of each element.
bool isScalarInteger() const
Return true if this is an integer, but not a vector.
EVT changeVectorElementType(EVT EltVT) const
Return a VT for a vector type whose attributes match ourselves with the exception of the element type...
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
bool isInteger() const
Return true if this is an integer or a vector integer type.
This class contains a discriminated union of information about pointers in memory operands,...
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg + ScalableOffset*...
This structure contains all information that is necessary for lowering calls.
SmallVector< ISD::InputArg, 32 > Ins
SmallVector< ISD::OutputArg, 32 > Outs
SmallVector< SDValue, 32 > OutVals
bool isAfterLegalizeDAG() const
SDValue CombineTo(SDNode *N, ArrayRef< SDValue > To, bool AddTo=true)