47#include "llvm/IR/IntrinsicsNVPTX.h"
70#define DEBUG_TYPE "nvptx-lower"
82 cl::desc(
"NVPTX Specific: FMA contraction (0: don't do it"
83 " 1: do it 2: do it aggressively"),
88 cl::desc(
"NVPTX Specifies: 0 use div.approx, 1 use div.full, 2 use"
89 " IEEE Compliant F32 div.rnd if available."),
94 cl::desc(
"NVPTX Specific: 0 use sqrt.approx, 1 use sqrt.rn."),
98 "nvptx-force-min-byval-param-align",
cl::Hidden,
99 cl::desc(
"NVPTX Specific: force 4-byte minimal alignment for byval"
100 " params of device functions."),
182 Offsets->push_back(StartingOffset + 0);
183 Offsets->push_back(StartingOffset + 8);
190 if (
StructType *STy = dyn_cast<StructType>(Ty)) {
191 auto const *SL =
DL.getStructLayout(STy);
193 for(
auto *EI : STy->elements()) {
195 StartingOffset + SL->getElementOffset(ElementNum));
202 for (
unsigned i = 0, e = TempVTs.
size(); i != e; ++i) {
229 (NumElts % 4 == 0 || NumElts == 3)) {
232 NumElts = (NumElts + 3) / 4;
234 for (
unsigned j = 0; j != NumElts; ++j) {
242 Offsets->push_back(Off);
257 "Promotion is not suitable for scalars of size larger than 64-bits");
259 *PromotedVT = MVT::i1;
264 *PromotedVT = MVT::i8;
267 *PromotedVT = MVT::i16;
270 *PromotedVT = MVT::i32;
273 *PromotedVT = MVT::i64;
276 return EVT(*PromotedVT) != VT;
296 if (ParamAlignment < AccessSize)
299 if (Offsets[
Idx] & (AccessSize - 1))
302 EVT EltVT = ValueVTs[
Idx];
306 if (EltSize >= AccessSize)
309 unsigned NumElts = AccessSize / EltSize;
311 if (AccessSize != EltSize * NumElts)
315 if (
Idx + NumElts > ValueVTs.
size())
319 if (NumElts != 4 && NumElts != 2)
322 for (
unsigned j =
Idx + 1; j <
Idx + NumElts; ++j) {
324 if (ValueVTs[j] != EltVT)
328 if (Offsets[j] - Offsets[j - 1] != EltSize)
356 Align ParamAlignment,
bool IsVAArg =
false) {
366 for (
int I = 0, E = ValueVTs.
size();
I != E; ++
I) {
369 for (
unsigned AccessSize : {16, 8, 4, 2}) {
371 I, AccessSize, ValueVTs, Offsets, ParamAlignment);
380 assert(
I + 1 < E &&
"Not enough elements.");
386 assert(
I + 3 < E &&
"Not enough elements.");
478 Op, VT, IsOpSupported ? Action : NoBF16Action);
483 bool IsOpSupported =
false;
565 for (
MVT VT : {MVT::bf16, MVT::f16, MVT::v2bf16, MVT::v2f16, MVT::f32,
566 MVT::f64, MVT::i1, MVT::i8, MVT::i16, MVT::v2i16, MVT::v4i8,
567 MVT::i32, MVT::i64}) {
693 for (
const auto& Ty : {MVT::i16, MVT::i32, MVT::i64}) {
775 const bool IsFP16FP16x2NegAvailable = STI.
getSmVersion() >= 53 &&
778 for (
const auto &VT : {MVT::f16, MVT::v2f16})
803 for (
MVT VT : {MVT::bf16, MVT::f32, MVT::f64}) {
812 for (
MVT VT : {MVT::i1, MVT::i16, MVT::i32, MVT::i64}) {
841 for (
const auto &
Op :
872 bool SupportsF32MinMaxNaN =
900#define MAKE_CASE(V) \
1344 bool Reciprocal)
const {
1365 if (Reciprocal || ExtraSteps > 0) {
1367 return MakeIntrinsicCall(Ftz ? Intrinsic::nvvm_rsqrt_approx_ftz_f
1368 : Intrinsic::nvvm_rsqrt_approx_f);
1369 else if (VT == MVT::f64)
1370 return MakeIntrinsicCall(Intrinsic::nvvm_rsqrt_approx_d);
1375 return MakeIntrinsicCall(Ftz ? Intrinsic::nvvm_sqrt_approx_ftz_f
1376 : Intrinsic::nvvm_sqrt_approx_f);
1384 DAG.
getConstant(Intrinsic::nvvm_rcp_approx_ftz_d,
DL, MVT::i32),
1385 MakeIntrinsicCall(Intrinsic::nvvm_rsqrt_approx_d));
1407 std::optional<std::pair<unsigned, const APInt &>> VAInfo,
1408 const CallBase &CB,
unsigned UniqueCallSite)
const {
1412 assert(isABI &&
"Non-ABI compilation is not supported");
1416 std::string Prototype;
1418 O <<
"prototype_" << UniqueCallSite <<
" : .callprototype ";
1427 if (
auto *ITy = dyn_cast<IntegerType>(retTy)) {
1428 size = ITy->getBitWidth();
1431 "Floating point type expected here");
1439 O <<
".param .b" <<
size <<
" _";
1440 }
else if (isa<PointerType>(retTy)) {
1441 O <<
".param .b" << PtrVT.getSizeInBits() <<
" _";
1443 O <<
".param .align " << (retAlignment ? retAlignment->value() : 0)
1444 <<
" .b8 _[" <<
DL.getTypeAllocSize(retTy) <<
"]";
1454 unsigned NumArgs = VAInfo ? VAInfo->first : Args.size();
1455 for (
unsigned i = 0, OIdx = 0; i != NumArgs; ++i, ++OIdx) {
1456 Type *Ty = Args[i].Ty;
1462 if (!Outs[OIdx].Flags.isByVal()) {
1466 O <<
".param .align " << ParamAlign.
value() <<
" .b8 ";
1468 O <<
"[" <<
DL.getTypeAllocSize(Ty) <<
"]";
1472 if (
unsigned len = vtparts.
size())
1478 (
getValueType(
DL, Ty) == MVT::i8 && Outs[OIdx].VT == MVT::i16)) &&
1479 "type mismatch between callee prototype and arguments");
1482 if (isa<IntegerType>(Ty)) {
1483 sz = cast<IntegerType>(Ty)->getBitWidth();
1485 }
else if (isa<PointerType>(Ty)) {
1486 sz = PtrVT.getSizeInBits();
1490 O <<
".param .b" << sz <<
" ";
1497 Type *ETy = Args[i].IndirectType;
1498 Align InitialAlign = Outs[OIdx].Flags.getNonZeroByValAlign();
1499 Align ParamByValAlign =
1502 O <<
".param .align " << ParamByValAlign.
value() <<
" .b8 ";
1504 O <<
"[" << Outs[OIdx].Flags.getByValSize() <<
"]";
1508 O << (first ?
"" :
",") <<
" .param .align " << VAInfo->second
1528 return DL.getABITypeAlign(Ty);
1533 if (!DirectCallee) {
1538 if (
const auto *CI = dyn_cast<CallInst>(CB)) {
1541 return StackAlign.value();
1552 return DL.getABITypeAlign(Ty);
1556 switch (ElementType.getSimpleVT().SimpleTy) {
1561 ElementType = MVT::i16;
1566 ElementType = MVT::i32;
1569 ElementType = MVT::i64;
1581 unsigned ArgID,
const SDLoc &dl) {
1588 for (
unsigned i = 0, n = ElementType.getSizeInBits() / 8; i < n; i++) {
1614 EVT MergedType = ElementType;
1621 for (
unsigned i = 0, n = ElementType.getSizeInBits() / 8; i < n; i++) {
1648 if (ElementType != MergedType)
1659 "Support for variadic functions (unsized array parameter) introduced "
1660 "in PTX ISA version 6.0 and requires target sm_30.");
1676 assert(isABI &&
"Non-ABI compilation is not supported");
1698 unsigned VAOffset = 0;
1705 unsigned ParamCount = 0;
1718 for (
unsigned i = 0, e = Args.size(); i != e; ++i, ++OIdx) {
1719 EVT VT = Outs[OIdx].VT;
1720 Type *Ty = Args[i].Ty;
1722 bool IsByVal = Outs[OIdx].Flags.isByVal();
1727 assert((!IsByVal || Args[i].IndirectType) &&
1728 "byval arg must have indirect type");
1729 Type *ETy = (IsByVal ? Args[i].IndirectType : Ty);
1737 Align InitialAlign = Outs[OIdx].Flags.getNonZeroByValAlign();
1741 VAOffset =
alignTo(VAOffset, ArgAlign);
1743 ArgAlign = getArgumentAlignment(CB, Ty, ParamCount + 1,
DL);
1747 (IsByVal ? Outs[OIdx].Flags.getByValSize() :
DL.getTypeAllocSize(Ty));
1753 if (ParamCount == FirstVAArg) {
1759 DeclareParamVTs, DeclareParamOps);
1761 NeedAlign = PassAsArray;
1762 }
else if (PassAsArray) {
1779 SDValue DeclareScalarParamOps[] = {
1784 DeclareScalarParamOps);
1793 bool ExtendIntegerParam =
1798 for (
unsigned j = 0, je = VTs.
size(); j != je; ++j) {
1800 int CurOffset = Offsets[j];
1805 SDValue StVal = OutVals[OIdx];
1809 EltVT =
EVT(PromotedVT);
1814 StVal = DAG.
getNode(Ext, dl, PromotedVT, StVal);
1823 }
else if (ExtendIntegerParam) {
1824 assert(VTs.
size() == 1 &&
"Scalar can't have multiple parts.");
1828 dl, MVT::i32, StVal);
1839 if (VectorInfo[j] ==
PVF_SCALAR && !IsVAArg && PartAlign.has_value() &&
1842 assert(StoreOperands.
empty() &&
"Unfinished preceeding store.");
1844 DAG, Chain, IsByVal ? CurOffset + VAOffset : CurOffset, EltVT,
1845 StVal, InGlue, ParamCount, dl);
1856 assert(StoreOperands.
empty() &&
"Unfinished preceding store.");
1859 DAG.
getConstant(IsVAArg ? FirstVAArg : ParamCount, dl, MVT::i32));
1862 IsByVal ? CurOffset + VAOffset : (IsVAArg ? VAOffset : CurOffset),
1870 unsigned NumElts = StoreOperands.
size() - 3;
1890 EVT TheStoreType = ExtendIntegerParam ? MVT::i32 : EltVT;
1893 Op, dl, DAG.
getVTList(MVT::Other, MVT::Glue), StoreOperands,
1899 StoreOperands.
clear();
1903 if (!IsByVal && IsVAArg) {
1905 "Vectorization is expected to be disabled for variadics.");
1906 VAOffset +=
DL.getTypeAllocSize(
1913 assert(StoreOperands.
empty() &&
"Unfinished parameter store.");
1914 if (!IsByVal && VTs.
size() > 0)
1917 if (IsByVal && IsVAArg)
1925 if (Ins.size() > 0) {
1932 unsigned resultsz =
DL.getTypeAllocSizeInBits(
RetTy);
1943 retAlignment = getArgumentAlignment(CB,
RetTy, 0,
DL);
1944 assert(retAlignment &&
"retAlignment is guaranteed to be set");
1947 Chain, DAG.
getConstant(retAlignment->value(), dl, MVT::i32),
1965 VADeclareParam->
getVTList(), DeclareParamOps);
1973 if (isa<ExternalSymbolSDNode>(Callee)) {
1978 assert(CalleeFunc !=
nullptr &&
"Libcall callee must be set.");
1982 CalleeFunc->
addFnAttr(
"nvptx-libcall-callee",
"true");
1995 DL,
RetTy, Args, Outs, retAlignment,
1997 ? std::optional<std::pair<unsigned, const APInt &>>(std::make_pair(
2000 *CB, UniqueCallSite);
2013 Chain, DAG.
getConstant((Ins.size() == 0) ? 0 : 1, dl, MVT::i32), InGlue
2020 Chain = DAG.
getNode(Opcode, dl, PrintCallVTs, PrintCallOps);
2025 SDValue CallVoidOps[] = { Chain, Callee, InGlue };
2031 SDValue CallArgBeginOps[] = { Chain, InGlue };
2036 for (
unsigned i = 0, e = std::min(CLI.
NumFixedArgs + 1, ParamCount); i != e;
2046 Chain = DAG.
getNode(opcode, dl, CallArgVTs, CallArgOps);
2050 SDValue CallArgEndOps[] = { Chain,
2059 Chain, DAG.
getConstant(UniqueCallSite, dl, MVT::i32), InGlue};
2076 if (Ins.size() > 0) {
2080 assert(VTs.
size() == Ins.size() &&
"Bad value decomposition");
2082 Align RetAlign = getArgumentAlignment(CB,
RetTy, 0,
DL);
2091 bool ExtendIntegerRetVal =
2092 RetTy->isIntegerTy() &&
DL.getTypeAllocSizeInBits(
RetTy) < 32;
2094 for (
unsigned i = 0, e = VTs.
size(); i != e; ++i) {
2095 bool needTruncate =
false;
2096 EVT TheLoadType = VTs[i];
2097 EVT EltType = Ins[i].VT;
2102 TheLoadType =
EVT(PromotedVT);
2103 EltType =
EVT(PromotedVT);
2104 needTruncate =
true;
2107 if (ExtendIntegerRetVal) {
2108 TheLoadType = MVT::i32;
2110 needTruncate =
true;
2112 if (VTs[i].isInteger())
2113 needTruncate =
true;
2120 EltAlign <
DL.getABITypeAlign(
2122 assert(VecIdx == -1 && LoadVTs.
empty() &&
"Orphaned operand list.");
2124 DAG, Chain, Offsets[i], TheLoadType, InGlue, TempProxyRegOps, dl);
2126 ProxyRegTruncates.
push_back(std::optional<MVT>());
2135 assert(VecIdx == -1 && LoadVTs.
empty() &&
"Orphaned operand list.");
2142 unsigned NumElts = LoadVTs.
size();
2162 DAG.
getConstant(Offsets[VecIdx], dl, MVT::i32), InGlue};
2164 Op, dl, DAG.
getVTList(LoadVTs), LoadOperands, TheLoadType,
2168 for (
unsigned j = 0; j < NumElts; ++j) {
2172 ProxyRegTruncates.
push_back(std::optional<MVT>(Ins[VecIdx + j].VT));
2174 ProxyRegTruncates.
push_back(std::optional<MVT>());
2178 InGlue = RetVal.
getValue(NumElts + 1);
2188 DAG.
getCALLSEQ_END(Chain, UniqueCallSite, UniqueCallSite + 1, InGlue, dl);
2194 for (
unsigned i = 0; i < ProxyRegOps.
size(); ++i) {
2195 if (i < RetElts.
size() && RetElts[i]) {
2202 DAG.
getVTList(ProxyRegOps[i].getSimpleValueType(), MVT::Other, MVT::Glue),
2203 { Chain, ProxyRegOps[i], InGlue }
2206 Chain = Ret.getValue(1);
2207 InGlue = Ret.getValue(2);
2209 if (ProxyRegTruncates[i]) {
2216 for (
SDValue &
T : TempProxyRegOps) {
2219 DAG.
getVTList(
T.getSimpleValueType(), MVT::Other, MVT::Glue),
2220 {Chain, T.getOperand(0), InGlue});
2242 "Support for dynamic alloca introduced in PTX ISA version 7.3 and "
2243 "requires target sm_52.",
2253 uint64_t Align = cast<ConstantSDNode>(
Op.getOperand(2))->getZExtValue();
2261 EVT RetTypes[] = {ValueSizeTy, MVT::Other};
2273 unsigned NumOperands = Node->getNumOperands();
2274 for (
unsigned i = 0; i < NumOperands; ++i) {
2275 SDValue SubOp = Node->getOperand(i);
2279 for (
unsigned j = 0; j < NumSubElem; ++j) {
2293 EVT VT =
Op->getValueType(0);
2294 if (!(
Isv2x16VT(VT) || VT == MVT::v4i8))
2300 return Operand->isUndef() || isa<ConstantSDNode>(Operand) ||
2301 isa<ConstantFPSDNode>(Operand);
2305 if (VT == MVT::v4i8) {
2327 EVT VT =
Op->getValueType(0);
2329 return APInt(32, 0);
2331 if (VT == MVT::v2f16 || VT == MVT::v2bf16)
2332 Value = cast<ConstantFPSDNode>(Operand)->getValueAPF().bitcastToAPInt();
2333 else if (VT == MVT::v2i16 || VT == MVT::v4i8)
2339 if (VT == MVT::v4i8)
2341 return Value.zext(32);
2345 Value = GetOperand(
Op, 0) | GetOperand(
Op, 1).shl(16);
2346 }
else if (VT == MVT::v4i8) {
2347 Value = GetOperand(
Op, 0) | GetOperand(
Op, 1).shl(8) |
2348 GetOperand(
Op, 2).shl(16) | GetOperand(
Op, 3).shl(24);
2363 if (VectorVT == MVT::v4i8) {
2375 if (isa<ConstantSDNode>(
Index.getNode()))
2396 if (VectorVT != MVT::v4i8)
2400 if (
Value->isUndef())
2419 if (VectorVT != MVT::v4i8 ||
Op.getValueType() != MVT::v4i8)
2427 if (
I.value() != -1)
2428 Selector |= (
I.value() << (
I.index() * 4));
2446 EVT VT =
Op.getValueType();
2507 EVT VT =
Op.getValueType();
2558 EVT VT =
Op.getValueType();
2561 return LowerFROUND32(
Op, DAG);
2564 return LowerFROUND64(
Op, DAG);
2580 EVT VT =
Op.getValueType();
2586 const int SignBitMask = 0x80000000;
2589 const int PointFiveInBits = 0x3F000000;
2590 SDValue PointFiveWithSignRaw =
2621 EVT VT =
Op.getValueType();
2653 if (
Op.getValueType() == MVT::bf16) {
2657 DAG.
getNode(
Op.getOpcode(), Loc, MVT::f32,
Op.getOperand(0)),
2669 if (
Op.getOperand(0).getValueType() == MVT::bf16) {
2672 Op.getOpcode(), Loc,
Op.getValueType(),
2682 EVT NarrowVT =
Op.getValueType();
2719 EVT WideVT =
Op.getValueType();
2746 if (
Op.getValueType() != MVT::v2i16)
2748 EVT EltVT =
Op.getValueType().getVectorElementType();
2750 for (
int I = 0, E =
Op.getValueType().getVectorNumElements();
I < E;
I++) {
2753 [&](
const SDUse &O) {
2754 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT,
2755 O.get(), DAG.getIntPtrConstant(I, DL));
2766 switch (
Op.getOpcode()) {
2776 return LowerBUILD_VECTOR(
Op, DAG);
2780 return LowerEXTRACT_VECTOR_ELT(
Op, DAG);
2782 return LowerINSERT_VECTOR_ELT(
Op, DAG);
2784 return LowerVECTOR_SHUFFLE(
Op, DAG);
2786 return LowerCONCAT_VECTORS(
Op, DAG);
2788 return LowerSTORE(
Op, DAG);
2790 return LowerLOAD(
Op, DAG);
2792 return LowerShiftLeftParts(
Op, DAG);
2795 return LowerShiftRightParts(
Op, DAG);
2797 return LowerSelect(
Op, DAG);
2799 return LowerFROUND(
Op, DAG);
2802 return LowerINT_TO_FP(
Op, DAG);
2805 return LowerFP_TO_INT(
Op, DAG);
2807 return LowerFP_ROUND(
Op, DAG);
2809 return LowerFP_EXTEND(
Op, DAG);
2811 return LowerBR_JT(
Op, DAG);
2813 return LowerVAARG(
Op, DAG);
2815 return LowerVASTART(
Op, DAG);
2831 return LowerCopyToReg_128(
Op, DAG);
2840 const auto *JT = cast<JumpTableSDNode>(
Op.getOperand(1));
2843 unsigned JId = JT->getIndex();
2879 const Value *V = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
2880 EVT VT = Node->getValueType(0);
2882 SDValue Tmp1 = Node->getOperand(0);
2883 SDValue Tmp2 = Node->getOperand(1);
2884 const MaybeAlign MA(Node->getConstantOperandVal(3));
2922 SDValue Arg = getParamSymbol(DAG, -1, PtrVT);
2925 const Value *SV = cast<SrcValueSDNode>(
Op.getOperand(2))->getValue();
2926 return DAG.
getStore(
Op.getOperand(0),
DL, VAReg,
Op.getOperand(1),
2936 assert(
Op.getValueType() == MVT::i1 &&
"Custom lowering enabled only for i1");
2947 if (
Op.getValueType() == MVT::i1)
2948 return LowerLOADi1(
Op, DAG);
2952 EVT VT =
Op.getValueType();
2955 EVT MemVT =
Load->getMemoryVT();
2957 MemVT, *
Load->getMemOperand())) {
2977 "Custom lowering for i1 load only");
2979 LD->getBasePtr(),
LD->getPointerInfo(),
2980 MVT::i8,
LD->getAlign(),
2981 LD->getMemOperand()->getFlags());
2986 SDValue Ops[] = { result,
LD->getChain() };
2995 return LowerSTOREi1(
Op, DAG);
2999 if ((
Isv2x16VT(VT) || VT == MVT::v4i8) &&
3001 VT, *
Store->getMemOperand()))
3009 return LowerSTOREVector(
Op, DAG);
3057 if (Alignment < PrefAlign) {
3066 unsigned Opcode = 0;
3073 bool NeedExt =
false;
3077 bool StoreF16x2 =
false;
3105 for (
unsigned i = 0; i < NumElts; ++i) {
3116 for (
unsigned i = 0; i < NumElts; ++i) {
3126 Ops.
append(
N->op_begin() + 2,
N->op_end());
3153 DAG.
getTruncStore(Tmp1, dl, Tmp3, Tmp2,
ST->getPointerInfo(), MVT::i8,
3154 ST->getAlign(),
ST->getMemOperand()->getFlags());
3163 assert(
Op.getOperand(1).getValueType() == MVT::i128 &&
3164 "Custom lowering for 128-bit CopyToReg only");
3178 NewOps[0] =
Op->getOperand(0);
3179 NewOps[1] =
Op->getOperand(1);
3183 NewOps[4] =
Op->getOperand(3);
3188unsigned NVPTXTargetLowering::getNumRegisters(
3190 std::optional<MVT> RegisterVT = std::nullopt)
const {
3191 if (VT == MVT::i128 && RegisterVT == MVT::i128)
3196bool NVPTXTargetLowering::splitValueIntoRegisterParts(
3198 unsigned NumParts,
MVT PartVT, std::optional<CallingConv::ID>
CC)
const {
3199 if (Val.
getValueType() == MVT::i128 && NumParts == 1) {
3230 std::vector<SDValue> OutChains;
3233 assert(isABI &&
"Non-ABI compilation is not supported");
3237 std::vector<Type *> argTypes;
3238 std::vector<const Argument *> theArgs;
3240 theArgs.push_back(&
I);
3241 argTypes.push_back(
I.getType());
3252 unsigned InsIdx = 0;
3254 for (
unsigned i = 0, e = theArgs.size(); i != e; ++i, ++InsIdx) {
3255 Type *Ty = argTypes[i];
3257 if (theArgs[i]->use_empty()) {
3263 if (vtparts.
empty())
3266 for (
unsigned parti = 0, parte = vtparts.
size(); parti != parte;
3271 if (vtparts.
size() > 0)
3278 for (
unsigned parti = 0; parti < NumRegs; ++parti) {
3295 bool aggregateIsPacked =
false;
3296 if (
StructType *STy = dyn_cast<StructType>(Ty))
3297 aggregateIsPacked = STy->isPacked();
3309 SDValue Arg = getParamSymbol(DAG, i, PtrVT);
3311 for (
unsigned parti = 0, parte = VTs.
size(); parti != parte; ++parti) {
3313 assert(VecIdx == -1 &&
"Orphaned vector.");
3318 if (VectorInfo[parti] &
PVF_LAST) {
3319 unsigned NumElts = parti - VecIdx + 1;
3320 EVT EltVT = VTs[parti];
3323 if (EltVT == MVT::i1)
3325 else if (
Isv2x16VT(EltVT) || EltVT == MVT::v4i8)
3339 if (aggregateIsPacked)
3342 return std::nullopt;
3352 P.getNode()->setIROrder(i + 1);
3353 for (
unsigned j = 0; j < NumElts; ++j) {
3357 if (EltVT == MVT::i1)
3360 else if (EltVT != LoadVT)
3372 Ins[InsIdx].VT.getFixedSizeInBits() >
3376 Elt = DAG.
getNode(Extend, dl, Ins[InsIdx].VT, Elt);
3399 assert(ObjectVT == Ins[InsIdx].VT &&
3400 "Ins type did not match function type");
3401 SDValue Arg = getParamSymbol(DAG, i, PtrVT);
3404 p.getNode()->setIROrder(i + 1);
3408 if (!OutChains.empty())
3424 for (
unsigned i = 0, n = ElementType.getSizeInBits() / 8; i < n; i++) {
3434 DAG.
getVTList(MVT::Other), StoreOperands,
3452 assert(isABI &&
"Non-ABI compilation is not supported");
3461 assert(VTs.
size() == OutVals.
size() &&
"Bad return value decomposition");
3463 for (
unsigned i = 0, e = VTs.
size(); i != e; ++i) {
3464 SDValue PromotedOutVal = OutVals[i];
3467 VTs[i] =
EVT(PromotedVT);
3472 PromotedOutVal = DAG.
getNode(Ext, dl, PromotedVT, PromotedOutVal);
3474 PromotedOutVals.
push_back(PromotedOutVal);
3485 bool ExtendIntegerRetVal =
3486 RetTy->isIntegerTy() &&
DL.getTypeAllocSizeInBits(
RetTy) < 32;
3489 for (
unsigned i = 0, e = VTs.
size(); i != e; ++i) {
3491 SDValue RetVal = PromotedOutVals[i];
3493 if (ExtendIntegerRetVal) {
3496 dl, MVT::i32, RetVal);
3506 EVT ElementType = ExtendIntegerRetVal ? MVT::i32 : VTs[i];
3507 Align ElementTypeAlign =
3508 DL.getABITypeAlign(ElementType.getTypeForEVT(
RetTy->getContext()));
3509 Align ElementAlign =
3511 if (ElementAlign < ElementTypeAlign) {
3512 assert(StoreOperands.
empty() &&
"Orphaned operand list.");
3524 assert(StoreOperands.
empty() &&
"Orphaned operand list.");
3535 unsigned NumElts = StoreOperands.
size() - 2;
3552 EVT TheStoreType = ExtendIntegerRetVal ? MVT::i32 : VTs[i];
3554 Op, dl, DAG.
getVTList(MVT::Other), StoreOperands, TheStoreType,
3557 StoreOperands.
clear();
3567 if (Constraint.
size() > 1)
3573 switch (Intrinsic) {
3577 case Intrinsic::nvvm_tex_1d_v4f32_s32:
3579 case Intrinsic::nvvm_tex_1d_v4f32_f32:
3581 case Intrinsic::nvvm_tex_1d_level_v4f32_f32:
3583 case Intrinsic::nvvm_tex_1d_grad_v4f32_f32:
3585 case Intrinsic::nvvm_tex_1d_v4s32_s32:
3587 case Intrinsic::nvvm_tex_1d_v4s32_f32:
3589 case Intrinsic::nvvm_tex_1d_level_v4s32_f32:
3591 case Intrinsic::nvvm_tex_1d_grad_v4s32_f32:
3593 case Intrinsic::nvvm_tex_1d_v4u32_s32:
3595 case Intrinsic::nvvm_tex_1d_v4u32_f32:
3597 case Intrinsic::nvvm_tex_1d_level_v4u32_f32:
3599 case Intrinsic::nvvm_tex_1d_grad_v4u32_f32:
3602 case Intrinsic::nvvm_tex_1d_array_v4f32_s32:
3604 case Intrinsic::nvvm_tex_1d_array_v4f32_f32:
3606 case Intrinsic::nvvm_tex_1d_array_level_v4f32_f32:
3608 case Intrinsic::nvvm_tex_1d_array_grad_v4f32_f32:
3610 case Intrinsic::nvvm_tex_1d_array_v4s32_s32:
3612 case Intrinsic::nvvm_tex_1d_array_v4s32_f32:
3614 case Intrinsic::nvvm_tex_1d_array_level_v4s32_f32:
3616 case Intrinsic::nvvm_tex_1d_array_grad_v4s32_f32:
3618 case Intrinsic::nvvm_tex_1d_array_v4u32_s32:
3620 case Intrinsic::nvvm_tex_1d_array_v4u32_f32:
3622 case Intrinsic::nvvm_tex_1d_array_level_v4u32_f32:
3624 case Intrinsic::nvvm_tex_1d_array_grad_v4u32_f32:
3627 case Intrinsic::nvvm_tex_2d_v4f32_s32:
3629 case Intrinsic::nvvm_tex_2d_v4f32_f32:
3631 case Intrinsic::nvvm_tex_2d_level_v4f32_f32:
3633 case Intrinsic::nvvm_tex_2d_grad_v4f32_f32:
3635 case Intrinsic::nvvm_tex_2d_v4s32_s32:
3637 case Intrinsic::nvvm_tex_2d_v4s32_f32:
3639 case Intrinsic::nvvm_tex_2d_level_v4s32_f32:
3641 case Intrinsic::nvvm_tex_2d_grad_v4s32_f32:
3643 case Intrinsic::nvvm_tex_2d_v4u32_s32:
3645 case Intrinsic::nvvm_tex_2d_v4u32_f32:
3647 case Intrinsic::nvvm_tex_2d_level_v4u32_f32:
3649 case Intrinsic::nvvm_tex_2d_grad_v4u32_f32:
3652 case Intrinsic::nvvm_tex_2d_array_v4f32_s32:
3654 case Intrinsic::nvvm_tex_2d_array_v4f32_f32:
3656 case Intrinsic::nvvm_tex_2d_array_level_v4f32_f32:
3658 case Intrinsic::nvvm_tex_2d_array_grad_v4f32_f32:
3660 case Intrinsic::nvvm_tex_2d_array_v4s32_s32:
3662 case Intrinsic::nvvm_tex_2d_array_v4s32_f32:
3664 case Intrinsic::nvvm_tex_2d_array_level_v4s32_f32:
3666 case Intrinsic::nvvm_tex_2d_array_grad_v4s32_f32:
3668 case Intrinsic::nvvm_tex_2d_array_v4u32_s32:
3670 case Intrinsic::nvvm_tex_2d_array_v4u32_f32:
3672 case Intrinsic::nvvm_tex_2d_array_level_v4u32_f32:
3674 case Intrinsic::nvvm_tex_2d_array_grad_v4u32_f32:
3677 case Intrinsic::nvvm_tex_3d_v4f32_s32:
3679 case Intrinsic::nvvm_tex_3d_v4f32_f32:
3681 case Intrinsic::nvvm_tex_3d_level_v4f32_f32:
3683 case Intrinsic::nvvm_tex_3d_grad_v4f32_f32:
3685 case Intrinsic::nvvm_tex_3d_v4s32_s32:
3687 case Intrinsic::nvvm_tex_3d_v4s32_f32:
3689 case Intrinsic::nvvm_tex_3d_level_v4s32_f32:
3691 case Intrinsic::nvvm_tex_3d_grad_v4s32_f32:
3693 case Intrinsic::nvvm_tex_3d_v4u32_s32:
3695 case Intrinsic::nvvm_tex_3d_v4u32_f32:
3697 case Intrinsic::nvvm_tex_3d_level_v4u32_f32:
3699 case Intrinsic::nvvm_tex_3d_grad_v4u32_f32:
3702 case Intrinsic::nvvm_tex_cube_v4f32_f32:
3704 case Intrinsic::nvvm_tex_cube_level_v4f32_f32:
3706 case Intrinsic::nvvm_tex_cube_v4s32_f32:
3708 case Intrinsic::nvvm_tex_cube_level_v4s32_f32:
3710 case Intrinsic::nvvm_tex_cube_v4u32_f32:
3712 case Intrinsic::nvvm_tex_cube_level_v4u32_f32:
3715 case Intrinsic::nvvm_tex_cube_array_v4f32_f32:
3717 case Intrinsic::nvvm_tex_cube_array_level_v4f32_f32:
3719 case Intrinsic::nvvm_tex_cube_array_v4s32_f32:
3721 case Intrinsic::nvvm_tex_cube_array_level_v4s32_f32:
3723 case Intrinsic::nvvm_tex_cube_array_v4u32_f32:
3725 case Intrinsic::nvvm_tex_cube_array_level_v4u32_f32:
3728 case Intrinsic::nvvm_tld4_r_2d_v4f32_f32:
3730 case Intrinsic::nvvm_tld4_g_2d_v4f32_f32:
3732 case Intrinsic::nvvm_tld4_b_2d_v4f32_f32:
3734 case Intrinsic::nvvm_tld4_a_2d_v4f32_f32:
3736 case Intrinsic::nvvm_tld4_r_2d_v4s32_f32:
3738 case Intrinsic::nvvm_tld4_g_2d_v4s32_f32:
3740 case Intrinsic::nvvm_tld4_b_2d_v4s32_f32:
3742 case Intrinsic::nvvm_tld4_a_2d_v4s32_f32:
3744 case Intrinsic::nvvm_tld4_r_2d_v4u32_f32:
3746 case Intrinsic::nvvm_tld4_g_2d_v4u32_f32:
3748 case Intrinsic::nvvm_tld4_b_2d_v4u32_f32:
3750 case Intrinsic::nvvm_tld4_a_2d_v4u32_f32:
3753 case Intrinsic::nvvm_tex_unified_1d_v4f32_s32:
3755 case Intrinsic::nvvm_tex_unified_1d_v4f32_f32:
3757 case Intrinsic::nvvm_tex_unified_1d_level_v4f32_f32:
3759 case Intrinsic::nvvm_tex_unified_1d_grad_v4f32_f32:
3761 case Intrinsic::nvvm_tex_unified_1d_v4s32_s32:
3763 case Intrinsic::nvvm_tex_unified_1d_v4s32_f32:
3765 case Intrinsic::nvvm_tex_unified_1d_level_v4s32_f32:
3767 case Intrinsic::nvvm_tex_unified_1d_grad_v4s32_f32:
3769 case Intrinsic::nvvm_tex_unified_1d_v4u32_s32:
3771 case Intrinsic::nvvm_tex_unified_1d_v4u32_f32:
3773 case Intrinsic::nvvm_tex_unified_1d_level_v4u32_f32:
3775 case Intrinsic::nvvm_tex_unified_1d_grad_v4u32_f32:
3778 case Intrinsic::nvvm_tex_unified_1d_array_v4f32_s32:
3780 case Intrinsic::nvvm_tex_unified_1d_array_v4f32_f32:
3782 case Intrinsic::nvvm_tex_unified_1d_array_level_v4f32_f32:
3784 case Intrinsic::nvvm_tex_unified_1d_array_grad_v4f32_f32:
3786 case Intrinsic::nvvm_tex_unified_1d_array_v4s32_s32:
3788 case Intrinsic::nvvm_tex_unified_1d_array_v4s32_f32:
3790 case Intrinsic::nvvm_tex_unified_1d_array_level_v4s32_f32:
3792 case Intrinsic::nvvm_tex_unified_1d_array_grad_v4s32_f32:
3794 case Intrinsic::nvvm_tex_unified_1d_array_v4u32_s32:
3796 case Intrinsic::nvvm_tex_unified_1d_array_v4u32_f32:
3798 case Intrinsic::nvvm_tex_unified_1d_array_level_v4u32_f32:
3800 case Intrinsic::nvvm_tex_unified_1d_array_grad_v4u32_f32:
3803 case Intrinsic::nvvm_tex_unified_2d_v4f32_s32:
3805 case Intrinsic::nvvm_tex_unified_2d_v4f32_f32:
3807 case Intrinsic::nvvm_tex_unified_2d_level_v4f32_f32:
3809 case Intrinsic::nvvm_tex_unified_2d_grad_v4f32_f32:
3811 case Intrinsic::nvvm_tex_unified_2d_v4s32_s32:
3813 case Intrinsic::nvvm_tex_unified_2d_v4s32_f32:
3815 case Intrinsic::nvvm_tex_unified_2d_level_v4s32_f32:
3817 case Intrinsic::nvvm_tex_unified_2d_grad_v4s32_f32:
3819 case Intrinsic::nvvm_tex_unified_2d_v4u32_s32:
3821 case Intrinsic::nvvm_tex_unified_2d_v4u32_f32:
3823 case Intrinsic::nvvm_tex_unified_2d_level_v4u32_f32:
3825 case Intrinsic::nvvm_tex_unified_2d_grad_v4u32_f32:
3828 case Intrinsic::nvvm_tex_unified_2d_array_v4f32_s32:
3830 case Intrinsic::nvvm_tex_unified_2d_array_v4f32_f32:
3832 case Intrinsic::nvvm_tex_unified_2d_array_level_v4f32_f32:
3834 case Intrinsic::nvvm_tex_unified_2d_array_grad_v4f32_f32:
3836 case Intrinsic::nvvm_tex_unified_2d_array_v4s32_s32:
3838 case Intrinsic::nvvm_tex_unified_2d_array_v4s32_f32:
3840 case Intrinsic::nvvm_tex_unified_2d_array_level_v4s32_f32:
3842 case Intrinsic::nvvm_tex_unified_2d_array_grad_v4s32_f32:
3844 case Intrinsic::nvvm_tex_unified_2d_array_v4u32_s32:
3846 case Intrinsic::nvvm_tex_unified_2d_array_v4u32_f32:
3848 case Intrinsic::nvvm_tex_unified_2d_array_level_v4u32_f32:
3850 case Intrinsic::nvvm_tex_unified_2d_array_grad_v4u32_f32:
3853 case Intrinsic::nvvm_tex_unified_3d_v4f32_s32:
3855 case Intrinsic::nvvm_tex_unified_3d_v4f32_f32:
3857 case Intrinsic::nvvm_tex_unified_3d_level_v4f32_f32:
3859 case Intrinsic::nvvm_tex_unified_3d_grad_v4f32_f32:
3861 case Intrinsic::nvvm_tex_unified_3d_v4s32_s32:
3863 case Intrinsic::nvvm_tex_unified_3d_v4s32_f32:
3865 case Intrinsic::nvvm_tex_unified_3d_level_v4s32_f32:
3867 case Intrinsic::nvvm_tex_unified_3d_grad_v4s32_f32:
3869 case Intrinsic::nvvm_tex_unified_3d_v4u32_s32:
3871 case Intrinsic::nvvm_tex_unified_3d_v4u32_f32:
3873 case Intrinsic::nvvm_tex_unified_3d_level_v4u32_f32:
3875 case Intrinsic::nvvm_tex_unified_3d_grad_v4u32_f32:
3878 case Intrinsic::nvvm_tex_unified_cube_v4f32_f32:
3880 case Intrinsic::nvvm_tex_unified_cube_level_v4f32_f32:
3882 case Intrinsic::nvvm_tex_unified_cube_v4s32_f32:
3884 case Intrinsic::nvvm_tex_unified_cube_level_v4s32_f32:
3886 case Intrinsic::nvvm_tex_unified_cube_v4u32_f32:
3888 case Intrinsic::nvvm_tex_unified_cube_level_v4u32_f32:
3891 case Intrinsic::nvvm_tex_unified_cube_array_v4f32_f32:
3893 case Intrinsic::nvvm_tex_unified_cube_array_level_v4f32_f32:
3895 case Intrinsic::nvvm_tex_unified_cube_array_v4s32_f32:
3897 case Intrinsic::nvvm_tex_unified_cube_array_level_v4s32_f32:
3899 case Intrinsic::nvvm_tex_unified_cube_array_v4u32_f32:
3901 case Intrinsic::nvvm_tex_unified_cube_array_level_v4u32_f32:
3904 case Intrinsic::nvvm_tex_unified_cube_grad_v4f32_f32:
3906 case Intrinsic::nvvm_tex_unified_cube_grad_v4s32_f32:
3908 case Intrinsic::nvvm_tex_unified_cube_grad_v4u32_f32:
3910 case Intrinsic::nvvm_tex_unified_cube_array_grad_v4f32_f32:
3912 case Intrinsic::nvvm_tex_unified_cube_array_grad_v4s32_f32:
3914 case Intrinsic::nvvm_tex_unified_cube_array_grad_v4u32_f32:
3917 case Intrinsic::nvvm_tld4_unified_r_2d_v4f32_f32:
3919 case Intrinsic::nvvm_tld4_unified_g_2d_v4f32_f32:
3921 case Intrinsic::nvvm_tld4_unified_b_2d_v4f32_f32:
3923 case Intrinsic::nvvm_tld4_unified_a_2d_v4f32_f32:
3925 case Intrinsic::nvvm_tld4_unified_r_2d_v4s32_f32:
3927 case Intrinsic::nvvm_tld4_unified_g_2d_v4s32_f32:
3929 case Intrinsic::nvvm_tld4_unified_b_2d_v4s32_f32:
3931 case Intrinsic::nvvm_tld4_unified_a_2d_v4s32_f32:
3933 case Intrinsic::nvvm_tld4_unified_r_2d_v4u32_f32:
3935 case Intrinsic::nvvm_tld4_unified_g_2d_v4u32_f32:
3937 case Intrinsic::nvvm_tld4_unified_b_2d_v4u32_f32:
3939 case Intrinsic::nvvm_tld4_unified_a_2d_v4u32_f32:
3945 switch (Intrinsic) {
3948 case Intrinsic::nvvm_suld_1d_i8_clamp:
3950 case Intrinsic::nvvm_suld_1d_i16_clamp:
3952 case Intrinsic::nvvm_suld_1d_i32_clamp:
3954 case Intrinsic::nvvm_suld_1d_i64_clamp:
3956 case Intrinsic::nvvm_suld_1d_v2i8_clamp:
3958 case Intrinsic::nvvm_suld_1d_v2i16_clamp:
3960 case Intrinsic::nvvm_suld_1d_v2i32_clamp:
3962 case Intrinsic::nvvm_suld_1d_v2i64_clamp:
3964 case Intrinsic::nvvm_suld_1d_v4i8_clamp:
3966 case Intrinsic::nvvm_suld_1d_v4i16_clamp:
3968 case Intrinsic::nvvm_suld_1d_v4i32_clamp:
3970 case Intrinsic::nvvm_suld_1d_array_i8_clamp:
3972 case Intrinsic::nvvm_suld_1d_array_i16_clamp:
3974 case Intrinsic::nvvm_suld_1d_array_i32_clamp:
3976 case Intrinsic::nvvm_suld_1d_array_i64_clamp:
3978 case Intrinsic::nvvm_suld_1d_array_v2i8_clamp:
3980 case Intrinsic::nvvm_suld_1d_array_v2i16_clamp:
3982 case Intrinsic::nvvm_suld_1d_array_v2i32_clamp:
3984 case Intrinsic::nvvm_suld_1d_array_v2i64_clamp:
3986 case Intrinsic::nvvm_suld_1d_array_v4i8_clamp:
3988 case Intrinsic::nvvm_suld_1d_array_v4i16_clamp:
3990 case Intrinsic::nvvm_suld_1d_array_v4i32_clamp:
3992 case Intrinsic::nvvm_suld_2d_i8_clamp:
3994 case Intrinsic::nvvm_suld_2d_i16_clamp:
3996 case Intrinsic::nvvm_suld_2d_i32_clamp:
3998 case Intrinsic::nvvm_suld_2d_i64_clamp:
4000 case Intrinsic::nvvm_suld_2d_v2i8_clamp:
4002 case Intrinsic::nvvm_suld_2d_v2i16_clamp:
4004 case Intrinsic::nvvm_suld_2d_v2i32_clamp:
4006 case Intrinsic::nvvm_suld_2d_v2i64_clamp:
4008 case Intrinsic::nvvm_suld_2d_v4i8_clamp:
4010 case Intrinsic::nvvm_suld_2d_v4i16_clamp:
4012 case Intrinsic::nvvm_suld_2d_v4i32_clamp:
4014 case Intrinsic::nvvm_suld_2d_array_i8_clamp:
4016 case Intrinsic::nvvm_suld_2d_array_i16_clamp:
4018 case Intrinsic::nvvm_suld_2d_array_i32_clamp:
4020 case Intrinsic::nvvm_suld_2d_array_i64_clamp:
4022 case Intrinsic::nvvm_suld_2d_array_v2i8_clamp:
4024 case Intrinsic::nvvm_suld_2d_array_v2i16_clamp:
4026 case Intrinsic::nvvm_suld_2d_array_v2i32_clamp:
4028 case Intrinsic::nvvm_suld_2d_array_v2i64_clamp:
4030 case Intrinsic::nvvm_suld_2d_array_v4i8_clamp:
4032 case Intrinsic::nvvm_suld_2d_array_v4i16_clamp:
4034 case Intrinsic::nvvm_suld_2d_array_v4i32_clamp:
4036 case Intrinsic::nvvm_suld_3d_i8_clamp:
4038 case Intrinsic::nvvm_suld_3d_i16_clamp:
4040 case Intrinsic::nvvm_suld_3d_i32_clamp:
4042 case Intrinsic::nvvm_suld_3d_i64_clamp:
4044 case Intrinsic::nvvm_suld_3d_v2i8_clamp:
4046 case Intrinsic::nvvm_suld_3d_v2i16_clamp:
4048 case Intrinsic::nvvm_suld_3d_v2i32_clamp:
4050 case Intrinsic::nvvm_suld_3d_v2i64_clamp:
4052 case Intrinsic::nvvm_suld_3d_v4i8_clamp:
4054 case Intrinsic::nvvm_suld_3d_v4i16_clamp:
4056 case Intrinsic::nvvm_suld_3d_v4i32_clamp:
4058 case Intrinsic::nvvm_suld_1d_i8_trap:
4060 case Intrinsic::nvvm_suld_1d_i16_trap:
4062 case Intrinsic::nvvm_suld_1d_i32_trap:
4064 case Intrinsic::nvvm_suld_1d_i64_trap:
4066 case Intrinsic::nvvm_suld_1d_v2i8_trap:
4068 case Intrinsic::nvvm_suld_1d_v2i16_trap:
4070 case Intrinsic::nvvm_suld_1d_v2i32_trap:
4072 case Intrinsic::nvvm_suld_1d_v2i64_trap:
4074 case Intrinsic::nvvm_suld_1d_v4i8_trap:
4076 case Intrinsic::nvvm_suld_1d_v4i16_trap:
4078 case Intrinsic::nvvm_suld_1d_v4i32_trap:
4080 case Intrinsic::nvvm_suld_1d_array_i8_trap:
4082 case Intrinsic::nvvm_suld_1d_array_i16_trap:
4084 case Intrinsic::nvvm_suld_1d_array_i32_trap:
4086 case Intrinsic::nvvm_suld_1d_array_i64_trap:
4088 case Intrinsic::nvvm_suld_1d_array_v2i8_trap:
4090 case Intrinsic::nvvm_suld_1d_array_v2i16_trap:
4092 case Intrinsic::nvvm_suld_1d_array_v2i32_trap:
4094 case Intrinsic::nvvm_suld_1d_array_v2i64_trap:
4096 case Intrinsic::nvvm_suld_1d_array_v4i8_trap:
4098 case Intrinsic::nvvm_suld_1d_array_v4i16_trap:
4100 case Intrinsic::nvvm_suld_1d_array_v4i32_trap:
4102 case Intrinsic::nvvm_suld_2d_i8_trap:
4104 case Intrinsic::nvvm_suld_2d_i16_trap:
4106 case Intrinsic::nvvm_suld_2d_i32_trap:
4108 case Intrinsic::nvvm_suld_2d_i64_trap:
4110 case Intrinsic::nvvm_suld_2d_v2i8_trap:
4112 case Intrinsic::nvvm_suld_2d_v2i16_trap:
4114 case Intrinsic::nvvm_suld_2d_v2i32_trap:
4116 case Intrinsic::nvvm_suld_2d_v2i64_trap:
4118 case Intrinsic::nvvm_suld_2d_v4i8_trap:
4120 case Intrinsic::nvvm_suld_2d_v4i16_trap:
4122 case Intrinsic::nvvm_suld_2d_v4i32_trap:
4124 case Intrinsic::nvvm_suld_2d_array_i8_trap:
4126 case Intrinsic::nvvm_suld_2d_array_i16_trap:
4128 case Intrinsic::nvvm_suld_2d_array_i32_trap:
4130 case Intrinsic::nvvm_suld_2d_array_i64_trap:
4132 case Intrinsic::nvvm_suld_2d_array_v2i8_trap:
4134 case Intrinsic::nvvm_suld_2d_array_v2i16_trap:
4136 case Intrinsic::nvvm_suld_2d_array_v2i32_trap:
4138 case Intrinsic::nvvm_suld_2d_array_v2i64_trap:
4140 case Intrinsic::nvvm_suld_2d_array_v4i8_trap:
4142 case Intrinsic::nvvm_suld_2d_array_v4i16_trap:
4144 case Intrinsic::nvvm_suld_2d_array_v4i32_trap:
4146 case Intrinsic::nvvm_suld_3d_i8_trap:
4148 case Intrinsic::nvvm_suld_3d_i16_trap:
4150 case Intrinsic::nvvm_suld_3d_i32_trap:
4152 case Intrinsic::nvvm_suld_3d_i64_trap:
4154 case Intrinsic::nvvm_suld_3d_v2i8_trap:
4156 case Intrinsic::nvvm_suld_3d_v2i16_trap:
4158 case Intrinsic::nvvm_suld_3d_v2i32_trap:
4160 case Intrinsic::nvvm_suld_3d_v2i64_trap:
4162 case Intrinsic::nvvm_suld_3d_v4i8_trap:
4164 case Intrinsic::nvvm_suld_3d_v4i16_trap:
4166 case Intrinsic::nvvm_suld_3d_v4i32_trap:
4168 case Intrinsic::nvvm_suld_1d_i8_zero:
4170 case Intrinsic::nvvm_suld_1d_i16_zero:
4172 case Intrinsic::nvvm_suld_1d_i32_zero:
4174 case Intrinsic::nvvm_suld_1d_i64_zero:
4176 case Intrinsic::nvvm_suld_1d_v2i8_zero:
4178 case Intrinsic::nvvm_suld_1d_v2i16_zero:
4180 case Intrinsic::nvvm_suld_1d_v2i32_zero:
4182 case Intrinsic::nvvm_suld_1d_v2i64_zero:
4184 case Intrinsic::nvvm_suld_1d_v4i8_zero:
4186 case Intrinsic::nvvm_suld_1d_v4i16_zero:
4188 case Intrinsic::nvvm_suld_1d_v4i32_zero:
4190 case Intrinsic::nvvm_suld_1d_array_i8_zero:
4192 case Intrinsic::nvvm_suld_1d_array_i16_zero:
4194 case Intrinsic::nvvm_suld_1d_array_i32_zero:
4196 case Intrinsic::nvvm_suld_1d_array_i64_zero:
4198 case Intrinsic::nvvm_suld_1d_array_v2i8_zero:
4200 case Intrinsic::nvvm_suld_1d_array_v2i16_zero:
4202 case Intrinsic::nvvm_suld_1d_array_v2i32_zero:
4204 case Intrinsic::nvvm_suld_1d_array_v2i64_zero:
4206 case Intrinsic::nvvm_suld_1d_array_v4i8_zero:
4208 case Intrinsic::nvvm_suld_1d_array_v4i16_zero:
4210 case Intrinsic::nvvm_suld_1d_array_v4i32_zero:
4212 case Intrinsic::nvvm_suld_2d_i8_zero:
4214 case Intrinsic::nvvm_suld_2d_i16_zero:
4216 case Intrinsic::nvvm_suld_2d_i32_zero:
4218 case Intrinsic::nvvm_suld_2d_i64_zero:
4220 case Intrinsic::nvvm_suld_2d_v2i8_zero:
4222 case Intrinsic::nvvm_suld_2d_v2i16_zero:
4224 case Intrinsic::nvvm_suld_2d_v2i32_zero:
4226 case Intrinsic::nvvm_suld_2d_v2i64_zero:
4228 case Intrinsic::nvvm_suld_2d_v4i8_zero:
4230 case Intrinsic::nvvm_suld_2d_v4i16_zero:
4232 case Intrinsic::nvvm_suld_2d_v4i32_zero:
4234 case Intrinsic::nvvm_suld_2d_array_i8_zero:
4236 case Intrinsic::nvvm_suld_2d_array_i16_zero:
4238 case Intrinsic::nvvm_suld_2d_array_i32_zero:
4240 case Intrinsic::nvvm_suld_2d_array_i64_zero:
4242 case Intrinsic::nvvm_suld_2d_array_v2i8_zero:
4244 case Intrinsic::nvvm_suld_2d_array_v2i16_zero:
4246 case Intrinsic::nvvm_suld_2d_array_v2i32_zero:
4248 case Intrinsic::nvvm_suld_2d_array_v2i64_zero:
4250 case Intrinsic::nvvm_suld_2d_array_v4i8_zero:
4252 case Intrinsic::nvvm_suld_2d_array_v4i16_zero:
4254 case Intrinsic::nvvm_suld_2d_array_v4i32_zero:
4256 case Intrinsic::nvvm_suld_3d_i8_zero:
4258 case Intrinsic::nvvm_suld_3d_i16_zero:
4260 case Intrinsic::nvvm_suld_3d_i32_zero:
4262 case Intrinsic::nvvm_suld_3d_i64_zero:
4264 case Intrinsic::nvvm_suld_3d_v2i8_zero:
4266 case Intrinsic::nvvm_suld_3d_v2i16_zero:
4268 case Intrinsic::nvvm_suld_3d_v2i32_zero:
4270 case Intrinsic::nvvm_suld_3d_v2i64_zero:
4272 case Intrinsic::nvvm_suld_3d_v4i8_zero:
4274 case Intrinsic::nvvm_suld_3d_v4i16_zero:
4276 case Intrinsic::nvvm_suld_3d_v4i32_zero:
4289 switch (Intrinsic) {
4292 case Intrinsic::nvvm_match_all_sync_i32p:
4293 case Intrinsic::nvvm_match_all_sync_i64p:
4298 Info.memVT = MVT::i1;
4303 case Intrinsic::nvvm_wmma_m16n16k16_load_a_f16_col:
4304 case Intrinsic::nvvm_wmma_m16n16k16_load_a_f16_row:
4305 case Intrinsic::nvvm_wmma_m16n16k16_load_a_f16_col_stride:
4306 case Intrinsic::nvvm_wmma_m16n16k16_load_a_f16_row_stride:
4307 case Intrinsic::nvvm_wmma_m16n16k16_load_b_f16_col:
4308 case Intrinsic::nvvm_wmma_m16n16k16_load_b_f16_row:
4309 case Intrinsic::nvvm_wmma_m16n16k16_load_b_f16_col_stride:
4310 case Intrinsic::nvvm_wmma_m16n16k16_load_b_f16_row_stride:
4311 case Intrinsic::nvvm_wmma_m32n8k16_load_a_f16_col:
4312 case Intrinsic::nvvm_wmma_m32n8k16_load_a_f16_row:
4313 case Intrinsic::nvvm_wmma_m32n8k16_load_a_f16_col_stride:
4314 case Intrinsic::nvvm_wmma_m32n8k16_load_a_f16_row_stride:
4315 case Intrinsic::nvvm_wmma_m32n8k16_load_b_f16_col:
4316 case Intrinsic::nvvm_wmma_m32n8k16_load_b_f16_row:
4317 case Intrinsic::nvvm_wmma_m32n8k16_load_b_f16_col_stride:
4318 case Intrinsic::nvvm_wmma_m32n8k16_load_b_f16_row_stride:
4319 case Intrinsic::nvvm_wmma_m8n32k16_load_a_f16_col:
4320 case Intrinsic::nvvm_wmma_m8n32k16_load_a_f16_row:
4321 case Intrinsic::nvvm_wmma_m8n32k16_load_a_f16_col_stride:
4322 case Intrinsic::nvvm_wmma_m8n32k16_load_a_f16_row_stride:
4323 case Intrinsic::nvvm_wmma_m8n32k16_load_b_f16_col:
4324 case Intrinsic::nvvm_wmma_m8n32k16_load_b_f16_row:
4325 case Intrinsic::nvvm_wmma_m8n32k16_load_b_f16_col_stride:
4326 case Intrinsic::nvvm_wmma_m8n32k16_load_b_f16_row_stride: {
4328 Info.memVT = MVT::v8f16;
4329 Info.ptrVal =
I.getArgOperand(0);
4335 case Intrinsic::nvvm_wmma_m16n16k16_load_a_s8_col:
4336 case Intrinsic::nvvm_wmma_m16n16k16_load_a_s8_col_stride:
4337 case Intrinsic::nvvm_wmma_m16n16k16_load_a_u8_col_stride:
4338 case Intrinsic::nvvm_wmma_m16n16k16_load_a_u8_col:
4339 case Intrinsic::nvvm_wmma_m16n16k16_load_a_s8_row:
4340 case Intrinsic::nvvm_wmma_m16n16k16_load_a_s8_row_stride:
4341 case Intrinsic::nvvm_wmma_m16n16k16_load_a_u8_row_stride:
4342 case Intrinsic::nvvm_wmma_m16n16k16_load_a_u8_row:
4343 case Intrinsic::nvvm_wmma_m8n32k16_load_a_bf16_col:
4344 case Intrinsic::nvvm_wmma_m8n32k16_load_a_bf16_col_stride:
4345 case Intrinsic::nvvm_wmma_m8n32k16_load_a_bf16_row:
4346 case Intrinsic::nvvm_wmma_m8n32k16_load_a_bf16_row_stride:
4347 case Intrinsic::nvvm_wmma_m16n16k16_load_b_s8_col:
4348 case Intrinsic::nvvm_wmma_m16n16k16_load_b_s8_col_stride:
4349 case Intrinsic::nvvm_wmma_m16n16k16_load_b_u8_col_stride:
4350 case Intrinsic::nvvm_wmma_m16n16k16_load_b_u8_col:
4351 case Intrinsic::nvvm_wmma_m16n16k16_load_b_s8_row:
4352 case Intrinsic::nvvm_wmma_m16n16k16_load_b_s8_row_stride:
4353 case Intrinsic::nvvm_wmma_m16n16k16_load_b_u8_row_stride:
4354 case Intrinsic::nvvm_wmma_m16n16k16_load_b_u8_row:
4355 case Intrinsic::nvvm_wmma_m32n8k16_load_b_bf16_col:
4356 case Intrinsic::nvvm_wmma_m32n8k16_load_b_bf16_col_stride:
4357 case Intrinsic::nvvm_wmma_m32n8k16_load_b_bf16_row:
4358 case Intrinsic::nvvm_wmma_m32n8k16_load_b_bf16_row_stride: {
4360 Info.memVT = MVT::v2i32;
4361 Info.ptrVal =
I.getArgOperand(0);
4368 case Intrinsic::nvvm_wmma_m32n8k16_load_a_s8_col:
4369 case Intrinsic::nvvm_wmma_m32n8k16_load_a_s8_col_stride:
4370 case Intrinsic::nvvm_wmma_m32n8k16_load_a_u8_col_stride:
4371 case Intrinsic::nvvm_wmma_m32n8k16_load_a_u8_col:
4372 case Intrinsic::nvvm_wmma_m32n8k16_load_a_s8_row:
4373 case Intrinsic::nvvm_wmma_m32n8k16_load_a_s8_row_stride:
4374 case Intrinsic::nvvm_wmma_m32n8k16_load_a_u8_row_stride:
4375 case Intrinsic::nvvm_wmma_m32n8k16_load_a_u8_row:
4376 case Intrinsic::nvvm_wmma_m16n16k16_load_a_bf16_col:
4377 case Intrinsic::nvvm_wmma_m16n16k16_load_a_bf16_col_stride:
4378 case Intrinsic::nvvm_wmma_m16n16k16_load_a_bf16_row:
4379 case Intrinsic::nvvm_wmma_m16n16k16_load_a_bf16_row_stride:
4380 case Intrinsic::nvvm_wmma_m16n16k8_load_a_tf32_col:
4381 case Intrinsic::nvvm_wmma_m16n16k8_load_a_tf32_col_stride:
4382 case Intrinsic::nvvm_wmma_m16n16k8_load_a_tf32_row:
4383 case Intrinsic::nvvm_wmma_m16n16k8_load_a_tf32_row_stride:
4385 case Intrinsic::nvvm_wmma_m8n32k16_load_b_s8_col:
4386 case Intrinsic::nvvm_wmma_m8n32k16_load_b_s8_col_stride:
4387 case Intrinsic::nvvm_wmma_m8n32k16_load_b_u8_col_stride:
4388 case Intrinsic::nvvm_wmma_m8n32k16_load_b_u8_col:
4389 case Intrinsic::nvvm_wmma_m8n32k16_load_b_s8_row:
4390 case Intrinsic::nvvm_wmma_m8n32k16_load_b_s8_row_stride:
4391 case Intrinsic::nvvm_wmma_m8n32k16_load_b_u8_row_stride:
4392 case Intrinsic::nvvm_wmma_m8n32k16_load_b_u8_row:
4393 case Intrinsic::nvvm_wmma_m16n16k16_load_b_bf16_col:
4394 case Intrinsic::nvvm_wmma_m16n16k16_load_b_bf16_col_stride:
4395 case Intrinsic::nvvm_wmma_m16n16k16_load_b_bf16_row:
4396 case Intrinsic::nvvm_wmma_m16n16k16_load_b_bf16_row_stride:
4397 case Intrinsic::nvvm_wmma_m16n16k8_load_b_tf32_col:
4398 case Intrinsic::nvvm_wmma_m16n16k8_load_b_tf32_col_stride:
4399 case Intrinsic::nvvm_wmma_m16n16k8_load_b_tf32_row:
4400 case Intrinsic::nvvm_wmma_m16n16k8_load_b_tf32_row_stride:
4401 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n8_x4_b16:
4402 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n8_x4_trans_b16: {
4404 Info.memVT = MVT::v4i32;
4405 Info.ptrVal =
I.getArgOperand(0);
4412 case Intrinsic::nvvm_wmma_m32n8k16_load_b_s8_col:
4413 case Intrinsic::nvvm_wmma_m32n8k16_load_b_s8_col_stride:
4414 case Intrinsic::nvvm_wmma_m32n8k16_load_b_u8_col_stride:
4415 case Intrinsic::nvvm_wmma_m32n8k16_load_b_u8_col:
4416 case Intrinsic::nvvm_wmma_m32n8k16_load_b_s8_row:
4417 case Intrinsic::nvvm_wmma_m32n8k16_load_b_s8_row_stride:
4418 case Intrinsic::nvvm_wmma_m32n8k16_load_b_u8_row_stride:
4419 case Intrinsic::nvvm_wmma_m32n8k16_load_b_u8_row:
4421 case Intrinsic::nvvm_wmma_m8n32k16_load_a_s8_col:
4422 case Intrinsic::nvvm_wmma_m8n32k16_load_a_s8_col_stride:
4423 case Intrinsic::nvvm_wmma_m8n32k16_load_a_u8_col_stride:
4424 case Intrinsic::nvvm_wmma_m8n32k16_load_a_u8_col:
4425 case Intrinsic::nvvm_wmma_m8n32k16_load_a_s8_row:
4426 case Intrinsic::nvvm_wmma_m8n32k16_load_a_s8_row_stride:
4427 case Intrinsic::nvvm_wmma_m8n32k16_load_a_u8_row_stride:
4428 case Intrinsic::nvvm_wmma_m8n32k16_load_a_u8_row:
4429 case Intrinsic::nvvm_wmma_m8n8k128_load_a_b1_row:
4430 case Intrinsic::nvvm_wmma_m8n8k128_load_a_b1_row_stride:
4431 case Intrinsic::nvvm_wmma_m8n8k128_load_b_b1_col:
4432 case Intrinsic::nvvm_wmma_m8n8k128_load_b_b1_col_stride:
4433 case Intrinsic::nvvm_wmma_m8n8k32_load_a_s4_row:
4434 case Intrinsic::nvvm_wmma_m8n8k32_load_a_s4_row_stride:
4435 case Intrinsic::nvvm_wmma_m8n8k32_load_a_u4_row_stride:
4436 case Intrinsic::nvvm_wmma_m8n8k32_load_a_u4_row:
4437 case Intrinsic::nvvm_wmma_m8n8k32_load_b_s4_col:
4438 case Intrinsic::nvvm_wmma_m8n8k32_load_b_s4_col_stride:
4439 case Intrinsic::nvvm_wmma_m8n8k32_load_b_u4_col_stride:
4440 case Intrinsic::nvvm_wmma_m8n8k32_load_b_u4_col:
4441 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n8_x1_b16:
4442 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n8_x1_trans_b16: {
4444 Info.memVT = MVT::i32;
4445 Info.ptrVal =
I.getArgOperand(0);
4452 case Intrinsic::nvvm_wmma_m16n16k16_load_c_f16_col:
4453 case Intrinsic::nvvm_wmma_m16n16k16_load_c_f16_row:
4454 case Intrinsic::nvvm_wmma_m16n16k16_load_c_f16_col_stride:
4455 case Intrinsic::nvvm_wmma_m16n16k16_load_c_f16_row_stride:
4456 case Intrinsic::nvvm_wmma_m32n8k16_load_c_f16_col:
4457 case Intrinsic::nvvm_wmma_m32n8k16_load_c_f16_row:
4458 case Intrinsic::nvvm_wmma_m32n8k16_load_c_f16_col_stride:
4459 case Intrinsic::nvvm_wmma_m32n8k16_load_c_f16_row_stride:
4460 case Intrinsic::nvvm_wmma_m8n32k16_load_c_f16_col:
4461 case Intrinsic::nvvm_wmma_m8n32k16_load_c_f16_row:
4462 case Intrinsic::nvvm_wmma_m8n32k16_load_c_f16_col_stride:
4463 case Intrinsic::nvvm_wmma_m8n32k16_load_c_f16_row_stride: {
4465 Info.memVT = MVT::v4f16;
4466 Info.ptrVal =
I.getArgOperand(0);
4473 case Intrinsic::nvvm_wmma_m16n16k16_load_c_f32_col:
4474 case Intrinsic::nvvm_wmma_m16n16k16_load_c_f32_row:
4475 case Intrinsic::nvvm_wmma_m16n16k16_load_c_f32_col_stride:
4476 case Intrinsic::nvvm_wmma_m16n16k16_load_c_f32_row_stride:
4477 case Intrinsic::nvvm_wmma_m32n8k16_load_c_f32_col:
4478 case Intrinsic::nvvm_wmma_m32n8k16_load_c_f32_row:
4479 case Intrinsic::nvvm_wmma_m32n8k16_load_c_f32_col_stride:
4480 case Intrinsic::nvvm_wmma_m32n8k16_load_c_f32_row_stride:
4481 case Intrinsic::nvvm_wmma_m8n32k16_load_c_f32_col:
4482 case Intrinsic::nvvm_wmma_m8n32k16_load_c_f32_row:
4483 case Intrinsic::nvvm_wmma_m8n32k16_load_c_f32_col_stride:
4484 case Intrinsic::nvvm_wmma_m8n32k16_load_c_f32_row_stride:
4485 case Intrinsic::nvvm_wmma_m16n16k8_load_c_f32_col:
4486 case Intrinsic::nvvm_wmma_m16n16k8_load_c_f32_row:
4487 case Intrinsic::nvvm_wmma_m16n16k8_load_c_f32_col_stride:
4488 case Intrinsic::nvvm_wmma_m16n16k8_load_c_f32_row_stride: {
4490 Info.memVT = MVT::v8f32;
4491 Info.ptrVal =
I.getArgOperand(0);
4498 case Intrinsic::nvvm_wmma_m32n8k16_load_a_bf16_col:
4499 case Intrinsic::nvvm_wmma_m32n8k16_load_a_bf16_col_stride:
4500 case Intrinsic::nvvm_wmma_m32n8k16_load_a_bf16_row:
4501 case Intrinsic::nvvm_wmma_m32n8k16_load_a_bf16_row_stride:
4503 case Intrinsic::nvvm_wmma_m8n32k16_load_b_bf16_col:
4504 case Intrinsic::nvvm_wmma_m8n32k16_load_b_bf16_col_stride:
4505 case Intrinsic::nvvm_wmma_m8n32k16_load_b_bf16_row:
4506 case Intrinsic::nvvm_wmma_m8n32k16_load_b_bf16_row_stride:
4508 case Intrinsic::nvvm_wmma_m16n16k16_load_c_s32_col:
4509 case Intrinsic::nvvm_wmma_m16n16k16_load_c_s32_col_stride:
4510 case Intrinsic::nvvm_wmma_m16n16k16_load_c_s32_row:
4511 case Intrinsic::nvvm_wmma_m16n16k16_load_c_s32_row_stride:
4512 case Intrinsic::nvvm_wmma_m32n8k16_load_c_s32_col:
4513 case Intrinsic::nvvm_wmma_m32n8k16_load_c_s32_col_stride:
4514 case Intrinsic::nvvm_wmma_m32n8k16_load_c_s32_row:
4515 case Intrinsic::nvvm_wmma_m32n8k16_load_c_s32_row_stride:
4516 case Intrinsic::nvvm_wmma_m8n32k16_load_c_s32_col:
4517 case Intrinsic::nvvm_wmma_m8n32k16_load_c_s32_col_stride:
4518 case Intrinsic::nvvm_wmma_m8n32k16_load_c_s32_row:
4519 case Intrinsic::nvvm_wmma_m8n32k16_load_c_s32_row_stride: {
4521 Info.memVT = MVT::v8i32;
4522 Info.ptrVal =
I.getArgOperand(0);
4529 case Intrinsic::nvvm_wmma_m8n8k128_load_c_s32_col:
4530 case Intrinsic::nvvm_wmma_m8n8k128_load_c_s32_col_stride:
4531 case Intrinsic::nvvm_wmma_m8n8k128_load_c_s32_row:
4532 case Intrinsic::nvvm_wmma_m8n8k128_load_c_s32_row_stride:
4533 case Intrinsic::nvvm_wmma_m8n8k32_load_c_s32_col:
4534 case Intrinsic::nvvm_wmma_m8n8k32_load_c_s32_col_stride:
4535 case Intrinsic::nvvm_wmma_m8n8k32_load_c_s32_row:
4536 case Intrinsic::nvvm_wmma_m8n8k32_load_c_s32_row_stride:
4537 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n8_x2_b16:
4538 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n8_x2_trans_b16: {
4540 Info.memVT = MVT::v2i32;
4541 Info.ptrVal =
I.getArgOperand(0);
4548 case Intrinsic::nvvm_wmma_m8n8k4_load_a_f64_col:
4549 case Intrinsic::nvvm_wmma_m8n8k4_load_a_f64_col_stride:
4550 case Intrinsic::nvvm_wmma_m8n8k4_load_a_f64_row:
4551 case Intrinsic::nvvm_wmma_m8n8k4_load_a_f64_row_stride:
4553 case Intrinsic::nvvm_wmma_m8n8k4_load_b_f64_col:
4554 case Intrinsic::nvvm_wmma_m8n8k4_load_b_f64_col_stride:
4555 case Intrinsic::nvvm_wmma_m8n8k4_load_b_f64_row:
4556 case Intrinsic::nvvm_wmma_m8n8k4_load_b_f64_row_stride: {
4558 Info.memVT = MVT::f64;
4559 Info.ptrVal =
I.getArgOperand(0);
4566 case Intrinsic::nvvm_wmma_m8n8k4_load_c_f64_col:
4567 case Intrinsic::nvvm_wmma_m8n8k4_load_c_f64_col_stride:
4568 case Intrinsic::nvvm_wmma_m8n8k4_load_c_f64_row:
4569 case Intrinsic::nvvm_wmma_m8n8k4_load_c_f64_row_stride: {
4571 Info.memVT = MVT::v2f64;
4572 Info.ptrVal =
I.getArgOperand(0);
4579 case Intrinsic::nvvm_wmma_m16n16k16_store_d_f16_col:
4580 case Intrinsic::nvvm_wmma_m16n16k16_store_d_f16_row:
4581 case Intrinsic::nvvm_wmma_m16n16k16_store_d_f16_col_stride:
4582 case Intrinsic::nvvm_wmma_m16n16k16_store_d_f16_row_stride:
4583 case Intrinsic::nvvm_wmma_m32n8k16_store_d_f16_col:
4584 case Intrinsic::nvvm_wmma_m32n8k16_store_d_f16_row:
4585 case Intrinsic::nvvm_wmma_m32n8k16_store_d_f16_col_stride:
4586 case Intrinsic::nvvm_wmma_m32n8k16_store_d_f16_row_stride:
4587 case Intrinsic::nvvm_wmma_m8n32k16_store_d_f16_col:
4588 case Intrinsic::nvvm_wmma_m8n32k16_store_d_f16_row:
4589 case Intrinsic::nvvm_wmma_m8n32k16_store_d_f16_col_stride:
4590 case Intrinsic::nvvm_wmma_m8n32k16_store_d_f16_row_stride: {
4592 Info.memVT = MVT::v4f16;
4593 Info.ptrVal =
I.getArgOperand(0);
4600 case Intrinsic::nvvm_wmma_m16n16k16_store_d_f32_col:
4601 case Intrinsic::nvvm_wmma_m16n16k16_store_d_f32_row:
4602 case Intrinsic::nvvm_wmma_m16n16k16_store_d_f32_col_stride:
4603 case Intrinsic::nvvm_wmma_m16n16k16_store_d_f32_row_stride:
4604 case Intrinsic::nvvm_wmma_m32n8k16_store_d_f32_col:
4605 case Intrinsic::nvvm_wmma_m32n8k16_store_d_f32_row:
4606 case Intrinsic::nvvm_wmma_m32n8k16_store_d_f32_col_stride:
4607 case Intrinsic::nvvm_wmma_m32n8k16_store_d_f32_row_stride:
4608 case Intrinsic::nvvm_wmma_m8n32k16_store_d_f32_col:
4609 case Intrinsic::nvvm_wmma_m8n32k16_store_d_f32_row:
4610 case Intrinsic::nvvm_wmma_m8n32k16_store_d_f32_col_stride:
4611 case Intrinsic::nvvm_wmma_m8n32k16_store_d_f32_row_stride:
4612 case Intrinsic::nvvm_wmma_m16n16k8_store_d_f32_col:
4613 case Intrinsic::nvvm_wmma_m16n16k8_store_d_f32_row:
4614 case Intrinsic::nvvm_wmma_m16n16k8_store_d_f32_col_stride:
4615 case Intrinsic::nvvm_wmma_m16n16k8_store_d_f32_row_stride: {
4617 Info.memVT = MVT::v8f32;
4618 Info.ptrVal =
I.getArgOperand(0);
4625 case Intrinsic::nvvm_wmma_m16n16k16_store_d_s32_col:
4626 case Intrinsic::nvvm_wmma_m16n16k16_store_d_s32_col_stride:
4627 case Intrinsic::nvvm_wmma_m16n16k16_store_d_s32_row:
4628 case Intrinsic::nvvm_wmma_m16n16k16_store_d_s32_row_stride:
4629 case Intrinsic::nvvm_wmma_m32n8k16_store_d_s32_col:
4630 case Intrinsic::nvvm_wmma_m32n8k16_store_d_s32_col_stride:
4631 case Intrinsic::nvvm_wmma_m32n8k16_store_d_s32_row:
4632 case Intrinsic::nvvm_wmma_m32n8k16_store_d_s32_row_stride:
4633 case Intrinsic::nvvm_wmma_m8n32k16_store_d_s32_col:
4634 case Intrinsic::nvvm_wmma_m8n32k16_store_d_s32_col_stride:
4635 case Intrinsic::nvvm_wmma_m8n32k16_store_d_s32_row:
4636 case Intrinsic::nvvm_wmma_m8n32k16_store_d_s32_row_stride: {
4638 Info.memVT = MVT::v8i32;
4639 Info.ptrVal =
I.getArgOperand(0);
4646 case Intrinsic::nvvm_wmma_m8n8k128_store_d_s32_col:
4647 case Intrinsic::nvvm_wmma_m8n8k128_store_d_s32_col_stride:
4648 case Intrinsic::nvvm_wmma_m8n8k128_store_d_s32_row:
4649 case Intrinsic::nvvm_wmma_m8n8k128_store_d_s32_row_stride:
4650 case Intrinsic::nvvm_wmma_m8n8k32_store_d_s32_col:
4651 case Intrinsic::nvvm_wmma_m8n8k32_store_d_s32_col_stride:
4652 case Intrinsic::nvvm_wmma_m8n8k32_store_d_s32_row:
4653 case Intrinsic::nvvm_wmma_m8n8k32_store_d_s32_row_stride: {
4655 Info.memVT = MVT::v2i32;
4656 Info.ptrVal =
I.getArgOperand(0);
4663 case Intrinsic::nvvm_wmma_m8n8k4_store_d_f64_col:
4664 case Intrinsic::nvvm_wmma_m8n8k4_store_d_f64_col_stride:
4665 case Intrinsic::nvvm_wmma_m8n8k4_store_d_f64_row:
4666 case Intrinsic::nvvm_wmma_m8n8k4_store_d_f64_row_stride: {
4668 Info.memVT = MVT::v2f64;
4669 Info.ptrVal =
I.getArgOperand(0);
4676 case Intrinsic::nvvm_atomic_load_inc_32:
4677 case Intrinsic::nvvm_atomic_load_dec_32:
4679 case Intrinsic::nvvm_atomic_add_gen_f_cta:
4680 case Intrinsic::nvvm_atomic_add_gen_f_sys:
4681 case Intrinsic::nvvm_atomic_add_gen_i_cta:
4682 case Intrinsic::nvvm_atomic_add_gen_i_sys:
4683 case Intrinsic::nvvm_atomic_and_gen_i_cta:
4684 case Intrinsic::nvvm_atomic_and_gen_i_sys:
4685 case Intrinsic::nvvm_atomic_cas_gen_i_cta:
4686 case Intrinsic::nvvm_atomic_cas_gen_i_sys:
4687 case Intrinsic::nvvm_atomic_dec_gen_i_cta:
4688 case Intrinsic::nvvm_atomic_dec_gen_i_sys:
4689 case Intrinsic::nvvm_atomic_inc_gen_i_cta:
4690 case Intrinsic::nvvm_atomic_inc_gen_i_sys:
4691 case Intrinsic::nvvm_atomic_max_gen_i_cta:
4692 case Intrinsic::nvvm_atomic_max_gen_i_sys:
4693 case Intrinsic::nvvm_atomic_min_gen_i_cta:
4694 case Intrinsic::nvvm_atomic_min_gen_i_sys:
4695 case Intrinsic::nvvm_atomic_or_gen_i_cta:
4696 case Intrinsic::nvvm_atomic_or_gen_i_sys:
4697 case Intrinsic::nvvm_atomic_exch_gen_i_cta:
4698 case Intrinsic::nvvm_atomic_exch_gen_i_sys:
4699 case Intrinsic::nvvm_atomic_xor_gen_i_cta:
4700 case Intrinsic::nvvm_atomic_xor_gen_i_sys: {
4701 auto &
DL =
I.getDataLayout();
4704 Info.ptrVal =
I.getArgOperand(0);
4711 case Intrinsic::nvvm_ldu_global_i:
4712 case Intrinsic::nvvm_ldu_global_f:
4713 case Intrinsic::nvvm_ldu_global_p: {
4714 auto &
DL =
I.getDataLayout();
4716 if (Intrinsic == Intrinsic::nvvm_ldu_global_i)
4718 else if(Intrinsic == Intrinsic::nvvm_ldu_global_p)
4722 Info.ptrVal =
I.getArgOperand(0);
4725 Info.align = cast<ConstantInt>(
I.getArgOperand(1))->getMaybeAlignValue();
4729 case Intrinsic::nvvm_ldg_global_i:
4730 case Intrinsic::nvvm_ldg_global_f:
4731 case Intrinsic::nvvm_ldg_global_p: {
4732 auto &
DL =
I.getDataLayout();
4735 if (Intrinsic == Intrinsic::nvvm_ldg_global_i)
4737 else if(Intrinsic == Intrinsic::nvvm_ldg_global_p)
4741 Info.ptrVal =
I.getArgOperand(0);
4744 Info.align = cast<ConstantInt>(
I.getArgOperand(1))->getMaybeAlignValue();
4749 case Intrinsic::nvvm_tex_1d_v4f32_s32:
4750 case Intrinsic::nvvm_tex_1d_v4f32_f32:
4751 case Intrinsic::nvvm_tex_1d_level_v4f32_f32:
4752 case Intrinsic::nvvm_tex_1d_grad_v4f32_f32:
4753 case Intrinsic::nvvm_tex_1d_array_v4f32_s32:
4754 case Intrinsic::nvvm_tex_1d_array_v4f32_f32:
4755 case Intrinsic::nvvm_tex_1d_array_level_v4f32_f32:
4756 case Intrinsic::nvvm_tex_1d_array_grad_v4f32_f32:
4757 case Intrinsic::nvvm_tex_2d_v4f32_s32:
4758 case Intrinsic::nvvm_tex_2d_v4f32_f32:
4759 case Intrinsic::nvvm_tex_2d_level_v4f32_f32:
4760 case Intrinsic::nvvm_tex_2d_grad_v4f32_f32:
4761 case Intrinsic::nvvm_tex_2d_array_v4f32_s32:
4762 case Intrinsic::nvvm_tex_2d_array_v4f32_f32:
4763 case Intrinsic::nvvm_tex_2d_array_level_v4f32_f32:
4764 case Intrinsic::nvvm_tex_2d_array_grad_v4f32_f32:
4765 case Intrinsic::nvvm_tex_3d_v4f32_s32:
4766 case Intrinsic::nvvm_tex_3d_v4f32_f32:
4767 case Intrinsic::nvvm_tex_3d_level_v4f32_f32:
4768 case Intrinsic::nvvm_tex_3d_grad_v4f32_f32:
4769 case Intrinsic::nvvm_tex_cube_v4f32_f32:
4770 case Intrinsic::nvvm_tex_cube_level_v4f32_f32:
4771 case Intrinsic::nvvm_tex_cube_array_v4f32_f32:
4772 case Intrinsic::nvvm_tex_cube_array_level_v4f32_f32:
4773 case Intrinsic::nvvm_tld4_r_2d_v4f32_f32:
4774 case Intrinsic::nvvm_tld4_g_2d_v4f32_f32:
4775 case Intrinsic::nvvm_tld4_b_2d_v4f32_f32:
4776 case Intrinsic::nvvm_tld4_a_2d_v4f32_f32:
4777 case Intrinsic::nvvm_tex_unified_1d_v4f32_s32:
4778 case Intrinsic::nvvm_tex_unified_1d_v4f32_f32:
4779 case Intrinsic::nvvm_tex_unified_1d_level_v4f32_f32:
4780 case Intrinsic::nvvm_tex_unified_1d_grad_v4f32_f32:
4781 case Intrinsic::nvvm_tex_unified_1d_array_v4f32_s32:
4782 case Intrinsic::nvvm_tex_unified_1d_array_v4f32_f32:
4783 case Intrinsic::nvvm_tex_unified_1d_array_level_v4f32_f32:
4784 case Intrinsic::nvvm_tex_unified_1d_array_grad_v4f32_f32:
4785 case Intrinsic::nvvm_tex_unified_2d_v4f32_s32:
4786 case Intrinsic::nvvm_tex_unified_2d_v4f32_f32:
4787 case Intrinsic::nvvm_tex_unified_2d_level_v4f32_f32:
4788 case Intrinsic::nvvm_tex_unified_2d_grad_v4f32_f32:
4789 case Intrinsic::nvvm_tex_unified_2d_array_v4f32_s32:
4790 case Intrinsic::nvvm_tex_unified_2d_array_v4f32_f32:
4791 case Intrinsic::nvvm_tex_unified_2d_array_level_v4f32_f32:
4792 case Intrinsic::nvvm_tex_unified_2d_array_grad_v4f32_f32:
4793 case Intrinsic::nvvm_tex_unified_3d_v4f32_s32:
4794 case Intrinsic::nvvm_tex_unified_3d_v4f32_f32:
4795 case Intrinsic::nvvm_tex_unified_3d_level_v4f32_f32:
4796 case Intrinsic::nvvm_tex_unified_3d_grad_v4f32_f32:
4797 case Intrinsic::nvvm_tex_unified_cube_v4f32_f32:
4798 case Intrinsic::nvvm_tex_unified_cube_level_v4f32_f32:
4799 case Intrinsic::nvvm_tex_unified_cube_array_v4f32_f32:
4800 case Intrinsic::nvvm_tex_unified_cube_array_level_v4f32_f32:
4801 case Intrinsic::nvvm_tex_unified_cube_grad_v4f32_f32:
4802 case Intrinsic::nvvm_tex_unified_cube_array_grad_v4f32_f32:
4803 case Intrinsic::nvvm_tld4_unified_r_2d_v4f32_f32:
4804 case Intrinsic::nvvm_tld4_unified_g_2d_v4f32_f32:
4805 case Intrinsic::nvvm_tld4_unified_b_2d_v4f32_f32:
4806 case Intrinsic::nvvm_tld4_unified_a_2d_v4f32_f32:
4808 Info.memVT = MVT::v4f32;
4809 Info.ptrVal =
nullptr;
4815 case Intrinsic::nvvm_tex_1d_v4s32_s32:
4816 case Intrinsic::nvvm_tex_1d_v4s32_f32:
4817 case Intrinsic::nvvm_tex_1d_level_v4s32_f32:
4818 case Intrinsic::nvvm_tex_1d_grad_v4s32_f32:
4819 case Intrinsic::nvvm_tex_1d_array_v4s32_s32:
4820 case Intrinsic::nvvm_tex_1d_array_v4s32_f32:
4821 case Intrinsic::nvvm_tex_1d_array_level_v4s32_f32:
4822 case Intrinsic::nvvm_tex_1d_array_grad_v4s32_f32:
4823 case Intrinsic::nvvm_tex_2d_v4s32_s32:
4824 case Intrinsic::nvvm_tex_2d_v4s32_f32:
4825 case Intrinsic::nvvm_tex_2d_level_v4s32_f32:
4826 case Intrinsic::nvvm_tex_2d_grad_v4s32_f32:
4827 case Intrinsic::nvvm_tex_2d_array_v4s32_s32:
4828 case Intrinsic::nvvm_tex_2d_array_v4s32_f32:
4829 case Intrinsic::nvvm_tex_2d_array_level_v4s32_f32:
4830 case Intrinsic::nvvm_tex_2d_array_grad_v4s32_f32:
4831 case Intrinsic::nvvm_tex_3d_v4s32_s32:
4832 case Intrinsic::nvvm_tex_3d_v4s32_f32:
4833 case Intrinsic::nvvm_tex_3d_level_v4s32_f32:
4834 case Intrinsic::nvvm_tex_3d_grad_v4s32_f32:
4835 case Intrinsic::nvvm_tex_cube_v4s32_f32:
4836 case Intrinsic::nvvm_tex_cube_level_v4s32_f32:
4837 case Intrinsic::nvvm_tex_cube_array_v4s32_f32:
4838 case Intrinsic::nvvm_tex_cube_array_level_v4s32_f32:
4839 case Intrinsic::nvvm_tex_cube_v4u32_f32:
4840 case Intrinsic::nvvm_tex_cube_level_v4u32_f32:
4841 case Intrinsic::nvvm_tex_cube_array_v4u32_f32:
4842 case Intrinsic::nvvm_tex_cube_array_level_v4u32_f32:
4843 case Intrinsic::nvvm_tex_1d_v4u32_s32:
4844 case Intrinsic::nvvm_tex_1d_v4u32_f32:
4845 case Intrinsic::nvvm_tex_1d_level_v4u32_f32:
4846 case Intrinsic::nvvm_tex_1d_grad_v4u32_f32:
4847 case Intrinsic::nvvm_tex_1d_array_v4u32_s32:
4848 case Intrinsic::nvvm_tex_1d_array_v4u32_f32:
4849 case Intrinsic::nvvm_tex_1d_array_level_v4u32_f32:
4850 case Intrinsic::nvvm_tex_1d_array_grad_v4u32_f32:
4851 case Intrinsic::nvvm_tex_2d_v4u32_s32:
4852 case Intrinsic::nvvm_tex_2d_v4u32_f32:
4853 case Intrinsic::nvvm_tex_2d_level_v4u32_f32:
4854 case Intrinsic::nvvm_tex_2d_grad_v4u32_f32:
4855 case Intrinsic::nvvm_tex_2d_array_v4u32_s32:
4856 case Intrinsic::nvvm_tex_2d_array_v4u32_f32:
4857 case Intrinsic::nvvm_tex_2d_array_level_v4u32_f32:
4858 case Intrinsic::nvvm_tex_2d_array_grad_v4u32_f32:
4859 case Intrinsic::nvvm_tex_3d_v4u32_s32:
4860 case Intrinsic::nvvm_tex_3d_v4u32_f32:
4861 case Intrinsic::nvvm_tex_3d_level_v4u32_f32:
4862 case Intrinsic::nvvm_tex_3d_grad_v4u32_f32:
4863 case Intrinsic::nvvm_tld4_r_2d_v4s32_f32:
4864 case Intrinsic::nvvm_tld4_g_2d_v4s32_f32:
4865 case Intrinsic::nvvm_tld4_b_2d_v4s32_f32:
4866 case Intrinsic::nvvm_tld4_a_2d_v4s32_f32:
4867 case Intrinsic::nvvm_tld4_r_2d_v4u32_f32:
4868 case Intrinsic::nvvm_tld4_g_2d_v4u32_f32:
4869 case Intrinsic::nvvm_tld4_b_2d_v4u32_f32:
4870 case Intrinsic::nvvm_tld4_a_2d_v4u32_f32:
4871 case Intrinsic::nvvm_tex_unified_1d_v4s32_s32:
4872 case Intrinsic::nvvm_tex_unified_1d_v4s32_f32:
4873 case Intrinsic::nvvm_tex_unified_1d_level_v4s32_f32:
4874 case Intrinsic::nvvm_tex_unified_1d_grad_v4s32_f32:
4875 case Intrinsic::nvvm_tex_unified_1d_array_v4s32_s32:
4876 case Intrinsic::nvvm_tex_unified_1d_array_v4s32_f32:
4877 case Intrinsic::nvvm_tex_unified_1d_array_level_v4s32_f32:
4878 case Intrinsic::nvvm_tex_unified_1d_array_grad_v4s32_f32:
4879 case Intrinsic::nvvm_tex_unified_2d_v4s32_s32:
4880 case Intrinsic::nvvm_tex_unified_2d_v4s32_f32:
4881 case Intrinsic::nvvm_tex_unified_2d_level_v4s32_f32:
4882 case Intrinsic::nvvm_tex_unified_2d_grad_v4s32_f32:
4883 case Intrinsic::nvvm_tex_unified_2d_array_v4s32_s32:
4884 case Intrinsic::nvvm_tex_unified_2d_array_v4s32_f32:
4885 case Intrinsic::nvvm_tex_unified_2d_array_level_v4s32_f32:
4886 case Intrinsic::nvvm_tex_unified_2d_array_grad_v4s32_f32:
4887 case Intrinsic::nvvm_tex_unified_3d_v4s32_s32:
4888 case Intrinsic::nvvm_tex_unified_3d_v4s32_f32:
4889 case Intrinsic::nvvm_tex_unified_3d_level_v4s32_f32:
4890 case Intrinsic::nvvm_tex_unified_3d_grad_v4s32_f32:
4891 case Intrinsic::nvvm_tex_unified_1d_v4u32_s32:
4892 case Intrinsic::nvvm_tex_unified_1d_v4u32_f32:
4893 case Intrinsic::nvvm_tex_unified_1d_level_v4u32_f32:
4894 case Intrinsic::nvvm_tex_unified_1d_grad_v4u32_f32:
4895 case Intrinsic::nvvm_tex_unified_1d_array_v4u32_s32:
4896 case Intrinsic::nvvm_tex_unified_1d_array_v4u32_f32:
4897 case Intrinsic::nvvm_tex_unified_1d_array_level_v4u32_f32:
4898 case Intrinsic::nvvm_tex_unified_1d_array_grad_v4u32_f32:
4899 case Intrinsic::nvvm_tex_unified_2d_v4u32_s32:
4900 case Intrinsic::nvvm_tex_unified_2d_v4u32_f32:
4901 case Intrinsic::nvvm_tex_unified_2d_level_v4u32_f32:
4902 case Intrinsic::nvvm_tex_unified_2d_grad_v4u32_f32:
4903 case Intrinsic::nvvm_tex_unified_2d_array_v4u32_s32:
4904 case Intrinsic::nvvm_tex_unified_2d_array_v4u32_f32:
4905 case Intrinsic::nvvm_tex_unified_2d_array_level_v4u32_f32:
4906 case Intrinsic::nvvm_tex_unified_2d_array_grad_v4u32_f32:
4907 case Intrinsic::nvvm_tex_unified_3d_v4u32_s32:
4908 case Intrinsic::nvvm_tex_unified_3d_v4u32_f32:
4909 case Intrinsic::nvvm_tex_unified_3d_level_v4u32_f32:
4910 case Intrinsic::nvvm_tex_unified_3d_grad_v4u32_f32:
4911 case Intrinsic::nvvm_tex_unified_cube_v4s32_f32:
4912 case Intrinsic::nvvm_tex_unified_cube_level_v4s32_f32:
4913 case Intrinsic::nvvm_tex_unified_cube_array_v4s32_f32:
4914 case Intrinsic::nvvm_tex_unified_cube_array_level_v4s32_f32:
4915 case Intrinsic::nvvm_tex_unified_cube_v4u32_f32:
4916 case Intrinsic::nvvm_tex_unified_cube_level_v4u32_f32:
4917 case Intrinsic::nvvm_tex_unified_cube_array_v4u32_f32:
4918 case Intrinsic::nvvm_tex_unified_cube_array_level_v4u32_f32:
4919 case Intrinsic::nvvm_tex_unified_cube_grad_v4s32_f32:
4920 case Intrinsic::nvvm_tex_unified_cube_grad_v4u32_f32:
4921 case Intrinsic::nvvm_tex_unified_cube_array_grad_v4s32_f32:
4922 case Intrinsic::nvvm_tex_unified_cube_array_grad_v4u32_f32:
4923 case Intrinsic::nvvm_tld4_unified_r_2d_v4s32_f32:
4924 case Intrinsic::nvvm_tld4_unified_g_2d_v4s32_f32:
4925 case Intrinsic::nvvm_tld4_unified_b_2d_v4s32_f32:
4926 case Intrinsic::nvvm_tld4_unified_a_2d_v4s32_f32:
4927 case Intrinsic::nvvm_tld4_unified_r_2d_v4u32_f32:
4928 case Intrinsic::nvvm_tld4_unified_g_2d_v4u32_f32:
4929 case Intrinsic::nvvm_tld4_unified_b_2d_v4u32_f32:
4930 case Intrinsic::nvvm_tld4_unified_a_2d_v4u32_f32:
4932 Info.memVT = MVT::v4i32;
4933 Info.ptrVal =
nullptr;
4939 case Intrinsic::nvvm_suld_1d_i8_clamp:
4940 case Intrinsic::nvvm_suld_1d_v2i8_clamp:
4941 case Intrinsic::nvvm_suld_1d_v4i8_clamp:
4942 case Intrinsic::nvvm_suld_1d_array_i8_clamp:
4943 case Intrinsic::nvvm_suld_1d_array_v2i8_clamp:
4944 case Intrinsic::nvvm_suld_1d_array_v4i8_clamp:
4945 case Intrinsic::nvvm_suld_2d_i8_clamp:
4946 case Intrinsic::nvvm_suld_2d_v2i8_clamp:
4947 case Intrinsic::nvvm_suld_2d_v4i8_clamp:
4948 case Intrinsic::nvvm_suld_2d_array_i8_clamp:
4949 case Intrinsic::nvvm_suld_2d_array_v2i8_clamp:
4950 case Intrinsic::nvvm_suld_2d_array_v4i8_clamp:
4951 case Intrinsic::nvvm_suld_3d_i8_clamp:
4952 case Intrinsic::nvvm_suld_3d_v2i8_clamp:
4953 case Intrinsic::nvvm_suld_3d_v4i8_clamp:
4954 case Intrinsic::nvvm_suld_1d_i8_trap:
4955 case Intrinsic::nvvm_suld_1d_v2i8_trap:
4956 case Intrinsic::nvvm_suld_1d_v4i8_trap:
4957 case Intrinsic::nvvm_suld_1d_array_i8_trap:
4958 case Intrinsic::nvvm_suld_1d_array_v2i8_trap:
4959 case Intrinsic::nvvm_suld_1d_array_v4i8_trap:
4960 case Intrinsic::nvvm_suld_2d_i8_trap:
4961 case Intrinsic::nvvm_suld_2d_v2i8_trap:
4962 case Intrinsic::nvvm_suld_2d_v4i8_trap:
4963 case Intrinsic::nvvm_suld_2d_array_i8_trap:
4964 case Intrinsic::nvvm_suld_2d_array_v2i8_trap:
4965 case Intrinsic::nvvm_suld_2d_array_v4i8_trap:
4966 case Intrinsic::nvvm_suld_3d_i8_trap:
4967 case Intrinsic::nvvm_suld_3d_v2i8_trap:
4968 case Intrinsic::nvvm_suld_3d_v4i8_trap:
4969 case Intrinsic::nvvm_suld_1d_i8_zero:
4970 case Intrinsic::nvvm_suld_1d_v2i8_zero:
4971 case Intrinsic::nvvm_suld_1d_v4i8_zero:
4972 case Intrinsic::nvvm_suld_1d_array_i8_zero:
4973 case Intrinsic::nvvm_suld_1d_array_v2i8_zero:
4974 case Intrinsic::nvvm_suld_1d_array_v4i8_zero:
4975 case Intrinsic::nvvm_suld_2d_i8_zero:
4976 case Intrinsic::nvvm_suld_2d_v2i8_zero:
4977 case Intrinsic::nvvm_suld_2d_v4i8_zero:
4978 case Intrinsic::nvvm_suld_2d_array_i8_zero:
4979 case Intrinsic::nvvm_suld_2d_array_v2i8_zero:
4980 case Intrinsic::nvvm_suld_2d_array_v4i8_zero:
4981 case Intrinsic::nvvm_suld_3d_i8_zero:
4982 case Intrinsic::nvvm_suld_3d_v2i8_zero:
4983 case Intrinsic::nvvm_suld_3d_v4i8_zero:
4985 Info.memVT = MVT::i8;
4986 Info.ptrVal =
nullptr;
4992 case Intrinsic::nvvm_suld_1d_i16_clamp:
4993 case Intrinsic::nvvm_suld_1d_v2i16_clamp:
4994 case Intrinsic::nvvm_suld_1d_v4i16_clamp:
4995 case Intrinsic::nvvm_suld_1d_array_i16_clamp:
4996 case Intrinsic::nvvm_suld_1d_array_v2i16_clamp:
4997 case Intrinsic::nvvm_suld_1d_array_v4i16_clamp:
4998 case Intrinsic::nvvm_suld_2d_i16_clamp:
4999 case Intrinsic::nvvm_suld_2d_v2i16_clamp:
5000 case Intrinsic::nvvm_suld_2d_v4i16_clamp:
5001 case Intrinsic::nvvm_suld_2d_array_i16_clamp:
5002 case Intrinsic::nvvm_suld_2d_array_v2i16_clamp:
5003 case Intrinsic::nvvm_suld_2d_array_v4i16_clamp:
5004 case Intrinsic::nvvm_suld_3d_i16_clamp:
5005 case Intrinsic::nvvm_suld_3d_v2i16_clamp:
5006 case Intrinsic::nvvm_suld_3d_v4i16_clamp:
5007 case Intrinsic::nvvm_suld_1d_i16_trap:
5008 case Intrinsic::nvvm_suld_1d_v2i16_trap:
5009 case Intrinsic::nvvm_suld_1d_v4i16_trap:
5010 case Intrinsic::nvvm_suld_1d_array_i16_trap:
5011 case Intrinsic::nvvm_suld_1d_array_v2i16_trap:
5012 case Intrinsic::nvvm_suld_1d_array_v4i16_trap:
5013 case Intrinsic::nvvm_suld_2d_i16_trap:
5014 case Intrinsic::nvvm_suld_2d_v2i16_trap:
5015 case Intrinsic::nvvm_suld_2d_v4i16_trap:
5016 case Intrinsic::nvvm_suld_2d_array_i16_trap:
5017 case Intrinsic::nvvm_suld_2d_array_v2i16_trap:
5018 case Intrinsic::nvvm_suld_2d_array_v4i16_trap:
5019 case Intrinsic::nvvm_suld_3d_i16_trap:
5020 case Intrinsic::nvvm_suld_3d_v2i16_trap:
5021 case Intrinsic::nvvm_suld_3d_v4i16_trap:
5022 case Intrinsic::nvvm_suld_1d_i16_zero:
5023 case Intrinsic::nvvm_suld_1d_v2i16_zero:
5024 case Intrinsic::nvvm_suld_1d_v4i16_zero:
5025 case Intrinsic::nvvm_suld_1d_array_i16_zero:
5026 case Intrinsic::nvvm_suld_1d_array_v2i16_zero:
5027 case Intrinsic::nvvm_suld_1d_array_v4i16_zero:
5028 case Intrinsic::nvvm_suld_2d_i16_zero:
5029 case Intrinsic::nvvm_suld_2d_v2i16_zero:
5030 case Intrinsic::nvvm_suld_2d_v4i16_zero:
5031 case Intrinsic::nvvm_suld_2d_array_i16_zero:
5032 case Intrinsic::nvvm_suld_2d_array_v2i16_zero:
5033 case Intrinsic::nvvm_suld_2d_array_v4i16_zero:
5034 case Intrinsic::nvvm_suld_3d_i16_zero:
5035 case Intrinsic::nvvm_suld_3d_v2i16_zero:
5036 case Intrinsic::nvvm_suld_3d_v4i16_zero:
5038 Info.memVT = MVT::i16;
5039 Info.ptrVal =
nullptr;
5045 case Intrinsic::nvvm_suld_1d_i32_clamp:
5046 case Intrinsic::nvvm_suld_1d_v2i32_clamp:
5047 case Intrinsic::nvvm_suld_1d_v4i32_clamp:
5048 case Intrinsic::nvvm_suld_1d_array_i32_clamp:
5049 case Intrinsic::nvvm_suld_1d_array_v2i32_clamp:
5050 case Intrinsic::nvvm_suld_1d_array_v4i32_clamp:
5051 case Intrinsic::nvvm_suld_2d_i32_clamp:
5052 case Intrinsic::nvvm_suld_2d_v2i32_clamp:
5053 case Intrinsic::nvvm_suld_2d_v4i32_clamp:
5054 case Intrinsic::nvvm_suld_2d_array_i32_clamp:
5055 case Intrinsic::nvvm_suld_2d_array_v2i32_clamp:
5056 case Intrinsic::nvvm_suld_2d_array_v4i32_clamp:
5057 case Intrinsic::nvvm_suld_3d_i32_clamp:
5058 case Intrinsic::nvvm_suld_3d_v2i32_clamp:
5059 case Intrinsic::nvvm_suld_3d_v4i32_clamp:
5060 case Intrinsic::nvvm_suld_1d_i32_trap:
5061 case Intrinsic::nvvm_suld_1d_v2i32_trap:
5062 case Intrinsic::nvvm_suld_1d_v4i32_trap:
5063 case Intrinsic::nvvm_suld_1d_array_i32_trap:
5064 case Intrinsic::nvvm_suld_1d_array_v2i32_trap:
5065 case Intrinsic::nvvm_suld_1d_array_v4i32_trap:
5066 case Intrinsic::nvvm_suld_2d_i32_trap:
5067 case Intrinsic::nvvm_suld_2d_v2i32_trap:
5068 case Intrinsic::nvvm_suld_2d_v4i32_trap:
5069 case Intrinsic::nvvm_suld_2d_array_i32_trap:
5070 case Intrinsic::nvvm_suld_2d_array_v2i32_trap:
5071 case Intrinsic::nvvm_suld_2d_array_v4i32_trap:
5072 case Intrinsic::nvvm_suld_3d_i32_trap:
5073 case Intrinsic::nvvm_suld_3d_v2i32_trap:
5074 case Intrinsic::nvvm_suld_3d_v4i32_trap:
5075 case Intrinsic::nvvm_suld_1d_i32_zero:
5076 case Intrinsic::nvvm_suld_1d_v2i32_zero:
5077 case Intrinsic::nvvm_suld_1d_v4i32_zero:
5078 case Intrinsic::nvvm_suld_1d_array_i32_zero:
5079 case Intrinsic::nvvm_suld_1d_array_v2i32_zero:
5080 case Intrinsic::nvvm_suld_1d_array_v4i32_zero:
5081 case Intrinsic::nvvm_suld_2d_i32_zero:
5082 case Intrinsic::nvvm_suld_2d_v2i32_zero:
5083 case Intrinsic::nvvm_suld_2d_v4i32_zero:
5084 case Intrinsic::nvvm_suld_2d_array_i32_zero:
5085 case Intrinsic::nvvm_suld_2d_array_v2i32_zero:
5086 case Intrinsic::nvvm_suld_2d_array_v4i32_zero:
5087 case Intrinsic::nvvm_suld_3d_i32_zero:
5088 case Intrinsic::nvvm_suld_3d_v2i32_zero:
5089 case Intrinsic::nvvm_suld_3d_v4i32_zero:
5091 Info.memVT = MVT::i32;
5092 Info.ptrVal =
nullptr;
5098 case Intrinsic::nvvm_suld_1d_i64_clamp:
5099 case Intrinsic::nvvm_suld_1d_v2i64_clamp:
5100 case Intrinsic::nvvm_suld_1d_array_i64_clamp:
5101 case Intrinsic::nvvm_suld_1d_array_v2i64_clamp:
5102 case Intrinsic::nvvm_suld_2d_i64_clamp:
5103 case Intrinsic::nvvm_suld_2d_v2i64_clamp:
5104 case Intrinsic::nvvm_suld_2d_array_i64_clamp:
5105 case Intrinsic::nvvm_suld_2d_array_v2i64_clamp:
5106 case Intrinsic::nvvm_suld_3d_i64_clamp:
5107 case Intrinsic::nvvm_suld_3d_v2i64_clamp:
5108 case Intrinsic::nvvm_suld_1d_i64_trap:
5109 case Intrinsic::nvvm_suld_1d_v2i64_trap:
5110 case Intrinsic::nvvm_suld_1d_array_i64_trap:
5111 case Intrinsic::nvvm_suld_1d_array_v2i64_trap:
5112 case Intrinsic::nvvm_suld_2d_i64_trap:
5113 case Intrinsic::nvvm_suld_2d_v2i64_trap:
5114 case Intrinsic::nvvm_suld_2d_array_i64_trap:
5115 case Intrinsic::nvvm_suld_2d_array_v2i64_trap:
5116 case Intrinsic::nvvm_suld_3d_i64_trap:
5117 case Intrinsic::nvvm_suld_3d_v2i64_trap:
5118 case Intrinsic::nvvm_suld_1d_i64_zero:
5119 case Intrinsic::nvvm_suld_1d_v2i64_zero:
5120 case Intrinsic::nvvm_suld_1d_array_i64_zero:
5121 case Intrinsic::nvvm_suld_1d_array_v2i64_zero:
5122 case Intrinsic::nvvm_suld_2d_i64_zero:
5123 case Intrinsic::nvvm_suld_2d_v2i64_zero:
5124 case Intrinsic::nvvm_suld_2d_array_i64_zero:
5125 case Intrinsic::nvvm_suld_2d_array_v2i64_zero:
5126 case Intrinsic::nvvm_suld_3d_i64_zero:
5127 case Intrinsic::nvvm_suld_3d_v2i64_zero:
5129 Info.memVT = MVT::i64;
5130 Info.ptrVal =
nullptr;
5150 const Align ABITypeAlign = std::min(
Align(128),
DL.getABITypeAlign(ArgTy));
5155 if (!
F || !
F->hasLocalLinkage() ||
5156 F->hasAddressTaken(
nullptr,
5160 return ABITypeAlign;
5163 return std::max(
Align(16), ABITypeAlign);
5170 Align ArgAlign = InitialAlign;
5185 ArgAlign = std::max(ArgAlign,
Align(4));
5195 std::string ParamName;
5200 ParamStr <<
"_vararg";
5202 ParamStr <<
"_param_" <<
Idx;
5254 if (Constraint.
size() == 1) {
5255 switch (Constraint[0]) {
5274std::pair<unsigned, const TargetRegisterClass *>
5278 if (Constraint.
size() == 1) {
5279 switch (Constraint[0]) {
5281 return std::make_pair(0U, &NVPTX::Int1RegsRegClass);
5283 return std::make_pair(0U, &NVPTX::Int16RegsRegClass);
5285 return std::make_pair(0U, &NVPTX::Int16RegsRegClass);
5287 return std::make_pair(0U, &NVPTX::Int32RegsRegClass);
5290 return std::make_pair(0U, &NVPTX::Int64RegsRegClass);
5294 "supported for sm_70 and higher!");
5295 return std::make_pair(0U, &NVPTX::Int128RegsRegClass);
5298 return std::make_pair(0U, &NVPTX::Float32RegsRegClass);
5300 return std::make_pair(0U, &NVPTX::Float64RegsRegClass);
5334 return F.getFnAttribute(
"unsafe-fp-math").getValueAsBool();
5338 const auto *Const = dyn_cast<ConstantSDNode>(Operand);
5339 return Const && Const->getZExtValue() == 0;
5377 if (M->getOpcode() !=
ISD::MUL || !M.getNode()->hasOneUse())
5381 M->getOperand(0), M->getOperand(1), N1);
5383 ((ZeroOpNum == 1) ? N1 : MAD),
5384 ((ZeroOpNum == 1) ? MAD : N1));
5410 int nonAddCount = 0;
5419 int orderNo =
N->getIROrder();
5425 if (orderNo - orderNo2 < 500)
5431 bool opIsLive =
false;
5435 if (isa<ConstantSDNode>(left) || isa<ConstantSDNode>(right))
5440 int orderNo3 =
User->getIROrder();
5441 if (orderNo3 > orderNo) {
5449 int orderNo3 =
User->getIROrder();
5450 if (orderNo3 > orderNo) {
5469 if (
all_of(
N->ops().drop_front(Front).drop_back(Back),
5470 [](
const SDUse &U) { return U.get()->isUndef(); }))
5473 return N->getOperand(0);
5502 if (VT.
isVector() || VT != MVT::i32)
5522 if (VT.
isVector() || !(VT == MVT::f32 || VT == MVT::f64))
5543 if (isa<ConstantSDNode>(Val)) {
5557 ConstantSDNode *BFEBits = dyn_cast<ConstantSDNode>(BFE.getOperand(0));
5569 if (MaskVal != (
uint64_t(1) << BFEBitsVal) - 1)
5593 if (MaskVal != 0xff) {
5598 MemSDNode *Mem = dyn_cast<MemSDNode>(Val);
5605 if (MemVT != MVT::v2i8 && MemVT != MVT::v4i8) {
5618 if (AExt.
getNode() !=
nullptr) {
5643 EVT VT =
N->getValueType(0);
5647 const SDValue &Num =
N->getOperand(0);
5648 const SDValue &Den =
N->getOperand(1);
5651 if (U->getOpcode() == DivOpc && U->getOperand(0) == Num &&
5652 U->getOperand(1) == Den) {
5679 EVT OrigVT =
Op.getOperand(0).getValueType();
5685 EVT OrigVT =
Op.getOperand(0).getValueType();
5712 IsSigned = (LHSSign ==
Signed);
5716 const APInt &Val = CI->getAPIntValue();
5718 return Val.
isIntN(OptSize);
5727 return LHSSign == RHSSign;
5737 EVT MulType =
N->getValueType(0);
5738 if (MulType != MVT::i32 && MulType != MVT::i64) {
5749 if (isa<ConstantSDNode>(
LHS)) {
5778 if (MulType == MVT::i32) {
5779 DemotedVT = MVT::i16;
5781 DemotedVT = MVT::i32;
5798 return DCI.
DAG.
getNode(Opc,
DL, MulType, TruncLHS, TruncRHS);
5802 const auto *Const = dyn_cast<ConstantSDNode>(Operand);
5803 return Const && Const->getZExtValue() == 1;
5811 return Add->getOperand(1);
5814 return Add->getOperand(0);
5853 (ConstOpNo == 1) ?
X : NewMul,
5854 (ConstOpNo == 1) ? NewMul :
X);
5865 if (VT != MVT::i16 && VT != MVT::i32 && VT != MVT::i64)
5916 EVT CCType =
N->getValueType(0);
5920 EVT AType =
A.getValueType();
5921 if (!(CCType == MVT::v2i1 && (AType == MVT::v2f16 || AType == MVT::v2bf16)))
5924 if (
A.getValueType() == MVT::v2bf16 &&
SmVersion < 90)
5935 DL, DCI.
DAG.
getVTList(MVT::i1, MVT::i1), {A, B, N->getOperand(2)});
5954 VectorVT == MVT::v4i8 || VectorVT == MVT::v8i8)
5963 if (!(VectorBits == 16 || VectorBits == 32 || VectorBits == 64))
5983 if (EltVT != EltIVT)
5986 if (EltVT !=
N->getValueType(0))
5996 if (VectorVT != MVT::v4i8)
6007 for (
int I = 0;
I < 4; ++
I) {
6034 EVT VT =
N->getValueType(0);
6035 if (VT != MVT::v16i8)
6042 EVT NewVT = MVT::v4i32;
6045 EVT RetVTs[] = {EltVT, EltVT, EltVT, EltVT, MVT::Other};
6050 LD->getMemOperand());
6055 for (
unsigned i = 0; i < NumElts; i++)
6064 DAGCombinerInfo &DCI)
const {
6066 switch (
N->getOpcode()) {
6104 EVT ResVT =
N->getValueType(0);
6137 Align Alignment = LD->getAlign();
6141 if (Alignment < PrefAlign) {
6156 bool NeedTrunc =
false;
6162 unsigned Opcode = 0;
6164 bool Load16x2 =
false;
6171 LdResVTs = DAG.
getVTList(EltVT, EltVT, MVT::Other);
6175 EVT ListVTs[] = { EltVT, EltVT, EltVT, EltVT, MVT::Other };
6200 EVT ListVTs[] = {VVT, VVT, VVT, VVT, MVT::Other};
6215 LD->getMemOperand());
6221 for (
unsigned i = 0; i < NumElts; ++i) {
6231 for (
unsigned i = 0; i < NumElts; ++i) {
6258 case Intrinsic::nvvm_ldg_global_i:
6259 case Intrinsic::nvvm_ldg_global_f:
6260 case Intrinsic::nvvm_ldg_global_p:
6261 case Intrinsic::nvvm_ldu_global_i:
6262 case Intrinsic::nvvm_ldu_global_f:
6263 case Intrinsic::nvvm_ldu_global_p: {
6264 EVT ResVT =
N->getValueType(0);
6276 bool NeedTrunc =
false;
6282 unsigned Opcode = 0;
6292 case Intrinsic::nvvm_ldg_global_i:
6293 case Intrinsic::nvvm_ldg_global_f:
6294 case Intrinsic::nvvm_ldg_global_p:
6297 case Intrinsic::nvvm_ldu_global_i:
6298 case Intrinsic::nvvm_ldu_global_f:
6299 case Intrinsic::nvvm_ldu_global_p:
6303 LdResVTs = DAG.
getVTList(EltVT, EltVT, MVT::Other);
6309 case Intrinsic::nvvm_ldg_global_i:
6310 case Intrinsic::nvvm_ldg_global_f:
6311 case Intrinsic::nvvm_ldg_global_p:
6314 case Intrinsic::nvvm_ldu_global_i:
6315 case Intrinsic::nvvm_ldu_global_f:
6316 case Intrinsic::nvvm_ldu_global_p:
6320 EVT ListVTs[] = { EltVT, EltVT, EltVT, EltVT, MVT::Other };
6333 OtherOps.
append(
N->op_begin() + 2,
N->op_end());
6343 for (
unsigned i = 0; i < NumElts; ++i) {
6361 "Custom handling of non-i8 ldu/ldg?");
6394 assert(Reg.getValueType() == MVT::i128 &&
6395 "Custom lowering for CopyFromReg with 128-bit reg only");
6397 N->getValueType(2)};
6409void NVPTXTargetLowering::ReplaceNodeResults(
6411 switch (
N->getOpcode()) {
6447 auto ITy = cast<llvm::IntegerType>(Ty);
6456 switch (ITy->getBitWidth()) {
6475 switch (ITy->getBitWidth()) {
amdgpu AMDGPU Register Bank Select
This file implements a class to represent arbitrary precision integral constant values and operations...
static SDValue PerformLOADCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
static SDValue PerformADDCombineWithOperands(SDNode *N, SDValue N0, SDValue N1, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
PerformADDCombineWithOperands - Try DAG combinations for an ADD with operands N0 and N1.
static SDValue PerformADDCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
PerformADDCombine - Target-specific dag combine xforms for ISD::ADD.
static SDValue PerformVSELECTCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
static SDValue PerformMULCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
static SDValue PerformFADDCombine(SDNode *N, SelectionDAG &DAG, const ARMSubtarget *Subtarget)
static SDValue PerformANDCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Function Alias Analysis Results
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
Analysis containing CSE Info
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
This file contains the declarations of entities that describe floating point environment and related ...
static DebugLoc getDebugLoc(MachineBasicBlock::instr_iterator FirstMI, MachineBasicBlock::instr_iterator LastMI)
Return the first found DebugLoc that has a DILocation, given a range of instructions.
unsigned const TargetRegisterInfo * TRI
Module.h This file contains the declarations for the Module class.
static cl::opt< bool > sched4reg("nvptx-sched4reg", cl::desc("NVPTX Specific: schedule for register pressue"), cl::init(false))
static SDValue PerformEXTRACTCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
static bool isConstOne(const SDValue &Operand)
static cl::opt< unsigned > FMAContractLevelOpt("nvptx-fma-level", cl::Hidden, cl::desc("NVPTX Specific: FMA contraction (0: don't do it" " 1: do it 2: do it aggressively"), cl::init(2))
static bool IsPTXVectorType(MVT VT)
static cl::opt< int > UsePrecDivF32("nvptx-prec-divf32", cl::Hidden, cl::desc("NVPTX Specifies: 0 use div.approx, 1 use div.full, 2 use" " IEEE Compliant F32 div.rnd if available."), cl::init(2))
static SDValue PerformStoreParamCombine(SDNode *N)
static void ReplaceLoadVector(SDNode *N, SelectionDAG &DAG, SmallVectorImpl< SDValue > &Results)
ReplaceVectorLoad - Convert vector loads into multi-output scalar loads.
static unsigned getOpcForSurfaceInstr(unsigned Intrinsic)
static void ReplaceCopyFromReg_128(SDNode *N, SelectionDAG &DAG, SmallVectorImpl< SDValue > &Results)
static bool Is16bitsType(MVT VT)
static SDValue combineMADConstOne(SDValue X, SDValue Add, EVT VT, SDLoc DL, TargetLowering::DAGCombinerInfo &DCI)
static bool IsTypePassedAsArray(const Type *Ty)
static SmallVector< ParamVectorizationFlags, 16 > VectorizePTXValueVTs(const SmallVectorImpl< EVT > &ValueVTs, const SmallVectorImpl< uint64_t > &Offsets, Align ParamAlignment, bool IsVAArg=false)
static unsigned CanMergeParamLoadStoresStartingAt(unsigned Idx, uint32_t AccessSize, const SmallVectorImpl< EVT > &ValueVTs, const SmallVectorImpl< uint64_t > &Offsets, Align ParamAlignment)
static void ReplaceINTRINSIC_W_CHAIN(SDNode *N, SelectionDAG &DAG, SmallVectorImpl< SDValue > &Results)
static SDValue PerformFADDCombineWithOperands(SDNode *N, SDValue N0, SDValue N1, TargetLowering::DAGCombinerInfo &DCI, CodeGenOptLevel OptLevel)
static unsigned getOpcForTextureInstr(unsigned Intrinsic)
static bool isConstZero(const SDValue &Operand)
static SDValue LowerVectorArith(SDValue Op, SelectionDAG &DAG)
static void ComputePTXValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty, SmallVectorImpl< EVT > &ValueVTs, SmallVectorImpl< uint64_t > *Offsets=nullptr, uint64_t StartingOffset=0)
ComputePTXValueVTs - For the given Type Ty, returns the set of primitive EVTs that compose it.
static bool IsMulWideOperandDemotable(SDValue Op, unsigned OptSize, OperandSignedness &S)
IsMulWideOperandDemotable - Checks if the provided DAG node is an operand that can be demoted to OptS...
static SDValue LowerUnalignedStoreParam(SelectionDAG &DAG, SDValue Chain, uint64_t Offset, EVT ElementType, SDValue StVal, SDValue &InGlue, unsigned ArgID, const SDLoc &dl)
static SDValue PerformREMCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, CodeGenOptLevel OptLevel)
static SDValue PerformMULCombineWithOperands(SDNode *N, SDValue N0, SDValue N1, TargetLowering::DAGCombinerInfo &DCI)
static SDValue PerformStoreRetvalCombine(SDNode *N)
static bool AreMulWideOperandsDemotable(SDValue LHS, SDValue RHS, unsigned OptSize, bool &IsSigned)
AreMulWideOperandsDemotable - Checks if the given LHS and RHS operands can be demoted to OptSize bits...
static SDValue PerformStoreCombineHelper(SDNode *N, std::size_t Front, std::size_t Back)
static bool adjustElementType(EVT &ElementType)
static SDValue TryMULWIDECombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
TryMULWIDECombine - Attempt to replace a multiply of M bits with a multiply of M/2 bits that produces...
static SDValue combineMulSelectConstOne(SDValue X, SDValue Select, EVT VT, SDLoc DL, TargetLowering::DAGCombinerInfo &DCI)
static SDValue matchMADConstOnePattern(SDValue Add)
static cl::opt< bool > UsePrecSqrtF32("nvptx-prec-sqrtf32", cl::Hidden, cl::desc("NVPTX Specific: 0 use sqrt.approx, 1 use sqrt.rn."), cl::init(true))
static SDValue LowerUnalignedStoreRet(SelectionDAG &DAG, SDValue Chain, uint64_t Offset, EVT ElementType, SDValue RetVal, const SDLoc &dl)
static bool PromoteScalarIntegerPTX(const EVT &VT, MVT *PromotedVT)
PromoteScalarIntegerPTX Used to make sure the arguments/returns are suitable for passing and promote ...
static SDValue PerformSETCCCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, unsigned int SmVersion)
static SDValue LowerUnalignedLoadRetParam(SelectionDAG &DAG, SDValue &Chain, uint64_t Offset, EVT ElementType, SDValue &InGlue, SmallVectorImpl< SDValue > &TempProxyRegOps, const SDLoc &dl)
static std::atomic< unsigned > GlobalUniqueCallSite
static cl::opt< bool > ForceMinByValParamAlign("nvptx-force-min-byval-param-align", cl::Hidden, cl::desc("NVPTX Specific: force 4-byte minimal alignment for byval" " params of device functions."), cl::init(false))
static SDValue PerformSHLCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, CodeGenOptLevel OptLevel)
PerformSHLCombine - Runs PTX-specific DAG combine patterns on SHL nodes.
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the SmallVector class.
This file describes how to lower LLVM code to machine code.
Class for arbitrary precision integers.
bool isSignedIntN(unsigned N) const
Check if this APInt has an N-bits signed integer value.
bool slt(const APInt &RHS) const
Signed less than comparison.
bool isIntN(unsigned N) const
Check if this APInt has an N-bits unsigned integer value.
bool sge(const APInt &RHS) const
Signed greater or equal comparison.
This class represents an incoming formal argument to a Function.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
const T & back() const
back - Get the last element.
ArrayRef< T > drop_back(size_t N=1) const
Drop the last N elements of the array.
bool empty() const
empty - Check if the array is empty.
an instruction that atomically reads a memory location, combines it with another value,...
@ Min
*p = old <signed v ? old : v
@ Max
*p = old >signed v ? old : v
@ UMin
*p = old <unsigned v ? old : v
@ UMax
*p = old >unsigned v ? old : v
bool isFloatingPointOperation() const
BinOp getOperation() const
bool hasParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Return true if the attribute exists for the given argument.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
This class represents a function call, abstracting a target machine's calling convention.
uint64_t getZExtValue() const
const APInt & getAPIntValue() const
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
This class represents an Operation in the Expression.
uint64_t getNumOperands() const
A parsed version of the target data layout string in and methods for querying it.
TypeSize getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
Align getPrefTypeAlign(Type *Ty) const
Returns the preferred stack/global alignment for the specified type.
Diagnostic information for unsupported feature in backend.
void addFnAttr(Attribute::AttrKind Kind)
Add function attributes to this function.
Type * getReturnType() const
Returns the type of the ret val.
unsigned getAddressSpace() const
const GlobalValue * getGlobal() const
This is an important class for using LLVM in a threaded context.
void diagnose(const DiagnosticInfo &DI)
Report a message to the currently installed diagnostic handler.
This class is used to represent ISD::LOAD nodes.
MCSection * getDataSection() const
Instances of this class represent a uniqued identifier for a section in the current translation unit.
StringRef getName() const
getName - Get the symbol name.
unsigned getVectorNumElements() const
bool isScalableVector() const
Return true if this is a vector value type where the runtime length is machine dependent.
static auto integer_valuetypes()
static auto fixedlen_vector_valuetypes()
static MVT getIntegerVT(unsigned BitWidth)
MVT getScalarType() const
If this is a vector, return the element type, otherwise return this.
DenormalMode getDenormalMode(const fltSemantics &FPType) const
Returns the denormal handling type for the default rounding mode of the function.
Function & getFunction()
Return the LLVM function that this machine code represents.
const LLVMTargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
const MachineJumpTableInfo * getJumpTableInfo() const
getJumpTableInfo - Return the jump table info object for the current function.
@ EK_Inline
EK_Inline - Jump table entries are emitted inline at their point of use.
const std::vector< MachineJumpTableEntry > & getJumpTables() const
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MOInvariant
The memory access always returns the same value (or traps).
@ MOStore
The memory access writes data.
This SDNode is used for target intrinsics that touch memory and need an associated MachineMemOperand.
This is an abstract virtual class for memory operations.
MachineMemOperand * getMemOperand() const
Return a MachineMemOperand object describing the memory reference performed by operation.
EVT getMemoryVT() const
Return the type of the in-memory value.
unsigned getMaxRequiredAlignment() const
bool hasAtomMinMax64() const
bool hasAtomAddF64() const
const NVPTXTargetLowering * getTargetLowering() const override
unsigned getPTXVersion() const
const NVPTXRegisterInfo * getRegisterInfo() const override
unsigned int getSmVersion() const
bool hasAtomBitwise64() const
bool allowFP16Math() const
ConstraintType getConstraintType(StringRef Constraint) const override
getConstraintType - Given a constraint letter, return the type of constraint it is for this target.
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
This callback is invoked for operations that are unsupported by the target, which are registered to u...
const NVPTXTargetMachine * nvTM
SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const
NVPTXTargetLowering(const NVPTXTargetMachine &TM, const NVPTXSubtarget &STI)
bool useF32FTZ(const MachineFunction &MF) const
Align getFunctionArgumentAlignment(const Function *F, Type *Ty, unsigned Idx, const DataLayout &DL) const
SDValue getSqrtEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled, int &ExtraSteps, bool &UseOneConst, bool Reciprocal) const override
Hooks for building estimates in place of slower divisions and square roots.
SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, const SmallVectorImpl< SDValue > &OutVals, const SDLoc &dl, SelectionDAG &DAG) const override
This hook must be implemented to lower outgoing return values, described by the Outs array,...
SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::InputArg > &Ins, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower the incoming (formal) arguments, described by the Ins array,...
void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const override
Lower the specified operand into the Ops vector.
std::string getParamName(const Function *F, int Idx) const
TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(MVT VT) const override
Return the preferred vector type legalization action.
std::string getPrototype(const DataLayout &DL, Type *, const ArgListTy &, const SmallVectorImpl< ISD::OutputArg > &, MaybeAlign retAlignment, std::optional< std::pair< unsigned, const APInt & > > VAInfo, const CallBase &CB, unsigned UniqueCallSite) const
Align getFunctionParamOptimizedAlign(const Function *F, Type *ArgTy, const DataLayout &DL) const
getFunctionParamOptimizedAlign - since function arguments are passed via .param space,...
SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Ctx, EVT VT) const override
Return the ValueType of the result of SETCC operations.
std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const override
Given a physical register constraint (e.g.
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I=nullptr) const override
isLegalAddressingMode - Return true if the addressing mode represented by AM is legal for this target...
AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override
Returns how the IR-level AtomicExpand pass should expand the given AtomicRMW, if at all.
Align getFunctionByValParamAlign(const Function *F, Type *ArgTy, Align InitialAlign, const DataLayout &DL) const
Helper for computing alignment of a device function byval parameter.
bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I, MachineFunction &MF, unsigned Intrinsic) const override
Given an intrinsic, checks if on the target the intrinsic will need to map to a MemIntrinsicNode (tou...
const char * getTargetNodeName(unsigned Opcode) const override
This method returns the name of a target specific DAG node.
bool allowFMA(MachineFunction &MF, CodeGenOptLevel OptLevel) const
bool usePrecSqrtF32() const
unsigned getJumpTableEncoding() const override
Return the entry encoding for a jump table in the current function.
bool allowUnsafeFPMath(MachineFunction &MF) const
int getDivF32Level() const
SDValue LowerCall(CallLoweringInfo &CLI, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower calls into the specified DAG.
UniqueStringSaver & getStrPool() const
MCSection * SelectSectionForGlobal(const GlobalObject *GO, SectionKind Kind, const TargetMachine &TM) const override
~NVPTXTargetObjectFile() override
static PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
const APInt & getAsAPIntVal() const
Helper method returns the APInt value of a ConstantSDNode.
bool isMachineOpcode() const
Test if this node has a post-isel opcode, directly corresponding to a MachineInstr opcode.
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
bool hasOneUse() const
Return true if there is exactly one use of this node.
unsigned getIROrder() const
Return the node ordering.
iterator_range< use_iterator > uses()
uint64_t getAsZExtVal() const
Helper method returns the zero-extended integer value of a ConstantSDNode.
unsigned getNumOperands() const
Return the number of values used by this operation.
unsigned getMachineOpcode() const
This may only be called if isMachineOpcode returns true.
SDVTList getVTList() const
const SDValue & getOperand(unsigned Num) const
uint64_t getConstantOperandVal(unsigned Num) const
Helper method returns the integer value of a ConstantSDNode operand.
const APInt & getConstantOperandAPInt(unsigned Num) const
Helper method returns the APInt of a ConstantSDNode operand.
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
bool isUndef() const
Return true if the type of the node type undefined.
Represents a use of a SDNode.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
TypeSize getValueSizeInBits() const
Returns the size of the value in bits.
const SDValue & getOperand(unsigned i) const
MVT getSimpleValueType() const
Return the simple ValueType of the referenced return value.
unsigned getOpcode() const
SectionKind - This is a simple POD value that classifies the properties of a section.
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
const SDValue & getRoot() const
Return the root tag of the SelectionDAG.
SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
SDValue getSetCC(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode Cond, SDValue Chain=SDValue(), bool IsSignaling=false)
Helper function to make it easier to build SetCC's if you just have an ISD::CondCode instead of an SD...
SDValue getSymbolFunctionGlobalAddress(SDValue Op, Function **TargetFunction=nullptr)
Return a GlobalAddress of the function from the current module with name matching the given ExternalS...
SDValue getConstantFP(double Val, const SDLoc &DL, EVT VT, bool isTarget=false)
Create a ConstantFPSDNode wrapping a constant value.
SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
const TargetLowering & getTargetLoweringInfo() const
SDNode * MorphNodeTo(SDNode *N, unsigned Opc, SDVTList VTs, ArrayRef< SDValue > Ops)
This mutates the specified node to have the specified return type, opcode, and operands.
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd).
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type.
SDValue getSelect(const SDLoc &DL, EVT VT, SDValue Cond, SDValue LHS, SDValue RHS, SDNodeFlags Flags=SDNodeFlags())
Helper function to make it easier to build Select's if you just have operands and don't want to check...
const DataLayout & getDataLayout() const
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, EVT SVT, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
void ReplaceAllUsesWith(SDValue From, SDValue To)
Modify anything using 'From' to use 'To' instead.
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
void RemoveDeadNode(SDNode *N)
Remove the specified node from the system.
SDValue getBasicBlock(MachineBasicBlock *MBB)
SDValue getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either any-extending or truncat...
SDValue getSelectCC(const SDLoc &DL, SDValue LHS, SDValue RHS, SDValue True, SDValue False, ISD::CondCode Cond)
Helper function to make it easier to build SelectCC's if you just have an ISD::CondCode instead of an...
SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
SDValue getFPExtendOrRound(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of float type, to the float type VT, by either extending or rounding (by tr...
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
MachineFunction & getMachineFunction() const
SDValue getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either zero-extending or trunca...
LLVMContext * getContext() const
const SDValue & setRoot(SDValue N)
Set the current root tag of the SelectionDAG.
SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef< SDValue > Ops, EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags Flags=MachineMemOperand::MOLoad|MachineMemOperand::MOStore, LocationSize Size=0, const AAMDNodes &AAInfo=AAMDNodes())
Creates a MemIntrinsicNode that may produce a result and takes a list of operands.
SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)
This SDNode is used to implement the code generator support for the llvm IR shufflevector instruction...
ArrayRef< int > getMask() const
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void assign(size_type NumElts, ValueParamT Elt)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
This class is used to represent ISD::STORE nodes.
StringRef - Represent a constant reference to a string, i.e.
constexpr size_t size() const
size - Get the string size.
constexpr const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Class to represent struct types.
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
void setMaxDivRemBitWidthSupported(unsigned SizeInBits)
Set the size in bits of the maximum div/rem the backend supports.
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
LegalizeAction
This enum indicates whether operations are valid for a target, and if not, what action should be used...
unsigned MaxStoresPerMemcpyOptSize
Likewise for functions with the OptSize attribute.
const TargetMachine & getTargetMachine() const
LegalizeTypeAction
This enum indicates whether a types are legal for a target, and if not, what action should be used to...
void addBypassSlowDiv(unsigned int SlowBitWidth, unsigned int FastBitWidth)
Tells the code generator which bitwidths to bypass.
virtual unsigned getNumRegisters(LLVMContext &Context, EVT VT, std::optional< MVT > RegisterVT=std::nullopt) const
Return the number of registers that this ValueType will eventually require.
void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)
Set the maximum atomic operation size supported by the backend.
virtual TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(MVT VT) const
Return the preferred vector type legalization action.
unsigned MaxStoresPerMemsetOptSize
Likewise for functions with the OptSize attribute.
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
unsigned MaxStoresPerMemmove
Specify maximum number of store instructions per memmove call.
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose.
unsigned MaxStoresPerMemmoveOptSize
Likewise for functions with the OptSize attribute.
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
unsigned MaxStoresPerMemset
Specify maximum number of store instructions per memset call.
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
@ ZeroOrNegativeOneBooleanContent
void setMinCmpXchgSizeInBits(unsigned SizeInBits)
Sets the minimum cmpxchg or ll/sc size supported by the backend.
void AddPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT)
If Opc/OrigVT is specified as being promoted, the promotion code defaults to trying a larger integer/...
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
void setTargetDAGCombine(ArrayRef< ISD::NodeType > NTs)
Targets should invoke this method for each target independent node that they want to provide a custom...
Align getMinStackArgumentAlignment() const
Return the minimum stack alignment of an argument.
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
std::vector< ArgListEntry > ArgListTy
bool allowsMemoryAccessForAlignment(LLVMContext &Context, const DataLayout &DL, EVT VT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *Fast=nullptr) const
This function returns true if the memory access is aligned or if the target allows this specific unal...
unsigned MaxStoresPerMemcpy
Specify maximum number of store instructions per memcpy call.
void setSchedulingPreference(Sched::Preference Pref)
Specify the target scheduling preference.
void setJumpIsExpensive(bool isExpensive=true)
Tells the code generator not to expand logic operations on comparison predicates into separate sequen...
LegalizeAction getOperationAction(unsigned Op, EVT VT) const
Return how this operation should be treated: either it is legal, needs to be promoted to a larger siz...
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
SDValue expandUnalignedStore(StoreSDNode *ST, SelectionDAG &DAG) const
Expands an unaligned store to 2 half-size stores for integer values, and possibly more for vectors.
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
std::pair< SDValue, SDValue > expandUnalignedLoad(LoadSDNode *LD, SelectionDAG &DAG) const
Expands an unaligned load to 2 half-size loads for an integer, and possibly more for vectors.
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
SDValue expandRoundInexactToOdd(EVT ResultVT, SDValue Op, const SDLoc &DL, SelectionDAG &DAG) const
Truncate Op to ResultVT.
SDValue expandFP_ROUND(SDNode *Node, SelectionDAG &DAG) const
Expand round(fp) to fp conversion.
virtual void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
Primary interface to the complete machine description for the target machine.
CodeGenOptLevel getOptLevel() const
Returns the optimization level: None, Less, Default, or Aggressive.
MCSymbol * getSymbol(const GlobalValue *GV) const
unsigned UnsafeFPMath
UnsafeFPMath - This flag is enabled when the -enable-unsafe-fp-math flag is specified on the command ...
FPOpFusion::FPOpFusionMode AllowFPOpFusion
AllowFPOpFusion - This flag is set by the -fp-contract=xxx option.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
The instances of the Type class are immutable: once they are created, they are never changed.
bool isVectorTy() const
True if this is an instance of VectorType.
bool isFloatTy() const
Return true if this is 'float', a 32-bit IEEE fp type.
bool isBFloatTy() const
Return true if this is 'bfloat', a 16-bit bfloat type.
@ VoidTyID
type with no size
bool isAggregateType() const
Return true if the type is an aggregate type.
bool isHalfTy() const
Return true if this is 'half', a 16-bit IEEE fp type.
bool isDoubleTy() const
Return true if this is 'double', a 64-bit IEEE fp type.
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
bool isIntegerTy() const
True if this is an instance of IntegerType.
TypeID getTypeID() const
Return the type id for the type.
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
StringRef save(const char *S)
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
A raw_ostream that writes to an std::string.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
@ BSWAP
Byte Swap and Counting operators.
@ VAEND
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE.
@ ADDC
Carry-setting nodes for multiple precision addition and subtraction.
@ ADD
Simple integer binary arithmetic operators.
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
@ CONCAT_VECTORS
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
@ FADD
Simple binary floating point operators.
@ ABS
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth.
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
@ SIGN_EXTEND
Conversion operators.
@ READSTEADYCOUNTER
READSTEADYCOUNTER - This corresponds to the readfixedcounter intrinsic.
@ FNEG
Perform various unary floating-point operations inspired by libm.
@ BR_CC
BR_CC - Conditional branch.
@ SSUBO
Same for subtraction.
@ BRIND
BRIND - Indirect branch.
@ BR_JT
BR_JT - Jumptable branch.
@ SSUBSAT
RESULT = [US]SUBSAT(LHS, RHS) - Perform saturation subtraction on 2 integers with the same bit width ...
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
@ UNDEF
UNDEF - An undefined node.
@ VACOPY
VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer,...
@ CopyFromReg
CopyFromReg - This node indicates that the input value is a virtual or physical register that is defi...
@ SADDO
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
@ SHL
Shift and rotation operations.
@ VECTOR_SHUFFLE
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
@ EXTRACT_SUBVECTOR
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.
@ FMINNUM_IEEE
FMINNUM_IEEE/FMAXNUM_IEEE - Perform floating-point minimumNumber or maximumNumber on two values,...
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
@ CopyToReg
CopyToReg - This node has three operands: a chain, a register number to set to this value,...
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ DEBUGTRAP
DEBUGTRAP - Trap intended to get the attention of a debugger.
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
@ FMINNUM
FMINNUM/FMAXNUM - Perform floating-point minimum or maximum on two values.
@ SSHLSAT
RESULT = [US]SHLSAT(LHS, RHS) - Perform saturation left shift.
@ SMULO
Same for multiplication.
@ DYNAMIC_STACKALLOC
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary.
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
@ FP_EXTEND
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
@ UADDO_CARRY
Carry-using nodes for multiple precision addition and subtraction.
@ BF16_TO_FP
BF16_TO_FP, FP_TO_BF16 - These operators are used to perform promotions and truncation for bfloat16.
@ FRAMEADDR
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG.
@ FMINIMUM
FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0 as less than 0....
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ READCYCLECOUNTER
READCYCLECOUNTER - This corresponds to the readcyclecounter intrinsic.
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ TRAP
TRAP - Trapping instruction.
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
@ ADDE
Carry-using nodes for multiple precision addition and subtraction.
@ FREEZE
FREEZE - FREEZE(VAL) returns an arbitrary value if VAL is UNDEF (or is evaluated to UNDEF),...
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ FP_ROUND
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
@ VAARG
VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE, and the alignment.
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
@ SADDSAT
RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2 integers with the same bit width (W)...
@ SADDO_CARRY
Carry-using overflow-aware nodes for multiple precision addition and subtraction.
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
bool allOperandsUndef(const SDNode *N)
Return true if the node has at least one operand and all operands of the specified node are ISD::UNDE...
@ Bitcast
Perform the operation on a different, but equivalently sized type.
@ TexUnified1DS32FloatLevel
@ Tex1DArrayFloatFloatLevel
@ TexUnified2DU32FloatGrad
@ Tld4UnifiedG2DFloatFloat
@ TexUnifiedCubeArrayFloatFloatLevel
@ Tld4UnifiedR2DFloatFloat
@ Tex2DArrayS32FloatLevel
@ TexUnified1DArrayFloatFloatLevel
@ TexUnified2DFloatFloatLevel
@ TexUnified3DFloatFloatLevel
@ TexUnified1DFloatFloatLevel
@ TexUnified2DArrayU32Float
@ TexUnified1DArrayFloatFloat
@ Tex1DArrayFloatFloatGrad
@ TexUnifiedCubeArrayU32FloatGrad
@ TexUnified1DFloatFloatGrad
@ TexUnifiedCubeFloatFloatGrad
@ TexUnified2DArrayFloatFloat
@ TexUnified3DU32FloatLevel
@ TexUnified1DArrayU32Float
@ TexUnified2DArrayFloatFloatLevel
@ TexUnified2DFloatFloatGrad
@ TexUnified2DArrayU32S32
@ TexUnifiedCubeArrayS32FloatLevel
@ TexUnified1DArrayS32Float
@ TexUnified1DArrayS32FloatLevel
@ TexUnified2DS32FloatLevel
@ TexUnified3DU32FloatGrad
@ TexUnifiedCubeU32FloatLevel
@ TexUnified2DArrayU32FloatGrad
@ TexUnifiedCubeFloatFloatLevel
@ TexUnified1DArrayFloatS32
@ TexUnifiedCubeS32FloatLevel
@ TexUnified1DS32FloatGrad
@ Tex2DArrayFloatFloatLevel
@ TexUnifiedCubeArrayFloatFloat
@ TexUnifiedCubeArrayFloatFloatGrad
@ TexUnifiedCubeFloatFloat
@ TexUnified1DArrayU32S32
@ TexUnified3DFloatFloatGrad
@ Tld4UnifiedA2DFloatFloat
@ TexUnified3DS32FloatGrad
@ TexUnified2DU32FloatLevel
@ TexUnified1DArrayS32S32
@ TexCubeArrayFloatFloatLevel
@ TexUnified1DU32FloatGrad
@ TexCubeArrayS32FloatLevel
@ Tex2DArrayU32FloatLevel
@ Tex1DArrayU32FloatLevel
@ TexUnified2DArrayU32FloatLevel
@ TexUnified1DArrayFloatFloatGrad
@ TexUnifiedCubeS32FloatGrad
@ TexCubeArrayU32FloatLevel
@ TexUnified3DS32FloatLevel
@ TexUnifiedCubeArrayS32FloatGrad
@ TexUnified2DArrayS32Float
@ Tex2DArrayFloatFloatGrad
@ TexUnifiedCubeArrayS32Float
@ TexUnified2DArrayS32FloatLevel
@ Tex1DArrayS32FloatLevel
@ TexUnifiedCubeArrayU32FloatLevel
@ TexUnified2DArrayS32S32
@ TexUnified2DArrayFloatFloatGrad
@ TexUnifiedCubeU32FloatGrad
@ Tld4UnifiedB2DFloatFloat
@ TexUnified1DArrayU32FloatLevel
@ TexUnified1DArrayS32FloatGrad
@ TexUnified2DS32FloatGrad
@ TexUnified2DArrayS32FloatGrad
@ TexUnified1DU32FloatLevel
@ TexUnifiedCubeArrayU32Float
@ TexUnified2DArrayFloatS32
@ TexUnified1DArrayU32FloatGrad
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
static bool isIndirectCall(const MachineInstr &MI)
bool shouldEmitPTXNoReturn(const Value *V, const TargetMachine &TM)
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
MaybeAlign getAlign(const Function &F, unsigned Index)
uint64_t PowerOf2Ceil(uint64_t A)
Returns the power of two which is greater than or equal to the given value.
OutputIt transform(R &&Range, OutputIt d_first, UnaryFunction F)
Wrapper function around std::transform to apply a function to a range and store the result elsewhere.
unsigned promoteScalarArgumentSize(unsigned size)
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
CodeGenOptLevel
Code generation optimization level.
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
DWARFExpression::Operation Op
void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty, SmallVectorImpl< EVT > &ValueVTs, SmallVectorImpl< EVT > *MemVTs, SmallVectorImpl< TypeSize > *Offsets=nullptr, TypeSize StartingOffset=TypeSize::getZero())
ComputeValueVTs - Given an LLVM IR type, compute a sequence of EVTs that represent all the individual...
constexpr unsigned BitWidth
bool isKernelFunction(const Function &F)
Function * getMaybeBitcastedCallee(const CallBase *CB)
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
static const fltSemantics & IEEEsingle() LLVM_READNONE
This struct is a compact representation of a valid (non-zero power of two) alignment.
uint64_t value() const
This is a hole in the type system and should not be abused.
@ PreserveSign
The sign of a flushed-to-zero number is preserved in the sign of 0.
DenormalModeKind Output
Denormal flushing mode for floating point instruction results in the default floating point environme...
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)
Returns the EVT that represents a vector NumElements in length, where each element is of type VT.
EVT changeTypeToInteger() const
Return the type converted to an equivalently sized integer or vector with integer element type.
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
uint64_t getScalarSizeInBits() const
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
bool isVector() const
Return true if this is a vector value type.
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
EVT getVectorElementType() const
Given a vector type, return the type of each element.
bool isScalarInteger() const
Return true if this is an integer, but not a vector.
EVT changeVectorElementType(EVT EltVT) const
Return a VT for a vector type whose attributes match ourselves with the exception of the element type...
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
bool isInteger() const
Return true if this is an integer or a vector integer type.
This class contains a discriminated union of information about pointers in memory operands,...
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg + ScalableOffset*...
This structure contains all information that is necessary for lowering calls.
SmallVector< ISD::InputArg, 32 > Ins
SmallVector< ISD::OutputArg, 32 > Outs
SmallVector< SDValue, 32 > OutVals
SDValue CombineTo(SDNode *N, ArrayRef< SDValue > To, bool AddTo=true)