29#include "llvm/IR/IntrinsicsAMDGPU.h"
33#ifdef EXPENSIVE_CHECKS
38#define DEBUG_TYPE "amdgpu-isel"
53 In = stripBitcast(In);
59 Out = In.getOperand(0);
70 if (ShiftAmt->getZExtValue() == 16) {
86 return In.getOperand(0);
91 if (Src.getValueType().getSizeInBits() == 32)
92 return stripBitcast(Src);
101 "AMDGPU DAG->DAG Pattern Instruction Selection",
false,
false)
105#ifdef EXPENSIVE_CHECKS
110 "AMDGPU DAG->DAG Pattern Instruction Selection",
false,
false)
126#ifdef EXPENSIVE_CHECKS
127 DominatorTree & DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
128 LoopInfo * LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
130 assert(L->isLCSSAForm(DT));
138bool AMDGPUDAGToDAGISel::fp16SrcZerosHighBits(
unsigned Opc)
const {
203#ifdef EXPENSIVE_CHECKS
212 MVT VT =
N->getValueType(0).getSimpleVT();
213 if (VT != MVT::v2i16 && VT != MVT::v2f16)
219 LoadSDNode *LdHi = dyn_cast<LoadSDNode>(stripBitcast(
Hi));
256 LoadSDNode *LdLo = dyn_cast<LoadSDNode>(stripBitcast(
Lo));
257 if (LdLo &&
Lo.hasOneUse()) {
296 bool MadeChange =
false;
302 switch (
N->getOpcode()) {
318bool AMDGPUDAGToDAGISel::isInlineImmediate(
const SDNode *
N,
319 bool Negated)
const {
326 return TII->isInlineConstant(-
C->getAPIntValue());
329 return TII->isInlineConstant(-
C->getValueAPF().bitcastToAPInt());
333 return TII->isInlineConstant(
C->getAPIntValue());
336 return TII->isInlineConstant(
C->getValueAPF().bitcastToAPInt());
347 unsigned OpNo)
const {
348 if (!
N->isMachineOpcode()) {
350 Register Reg = cast<RegisterSDNode>(
N->getOperand(1))->getReg();
351 if (
Reg.isVirtual()) {
353 return MRI.getRegClass(Reg);
357 =
static_cast<const GCNSubtarget *
>(Subtarget)->getRegisterInfo();
358 return TRI->getPhysRegBaseClass(Reg);
364 switch (
N->getMachineOpcode()) {
368 unsigned OpIdx =
Desc.getNumDefs() + OpNo;
369 if (OpIdx >=
Desc.getNumOperands())
371 int RegClass =
Desc.operands()[OpIdx].RegClass;
377 case AMDGPU::REG_SEQUENCE: {
378 unsigned RCID = cast<ConstantSDNode>(
N->getOperand(0))->getZExtValue();
382 SDValue SubRegOp =
N->getOperand(OpNo + 1);
383 unsigned SubRegIdx = cast<ConstantSDNode>(SubRegOp)->getZExtValue();
392 SmallVector <SDValue, 8> Ops;
394 for (
unsigned i = 1, e =
N->getNumOperands(); i != e; ++i)
405 assert(
N->getOperand(0).getValueType() == MVT::Other &&
"Expected chain");
408 return glueCopyToOp(
N,
M0,
M0.getValue(1));
411SDNode *AMDGPUDAGToDAGISel::glueCopyToM0LDSInit(
SDNode *
N)
const {
412 unsigned AS = cast<MemSDNode>(
N)->getAddressSpace();
428 AMDGPU::S_MOV_B32,
DL, MVT::i32,
442 EVT VT =
N->getValueType(0);
448 if (NumVectorElts == 1) {
454 assert(NumVectorElts <= 32 &&
"Vectors with more than 32 elements not "
464 bool IsRegSeq =
true;
465 unsigned NOps =
N->getNumOperands();
466 for (
unsigned i = 0; i < NOps; i++) {
468 if (isa<RegisterSDNode>(
N->getOperand(i))) {
474 RegSeqArgs[1 + (2 * i)] =
N->getOperand(i);
477 if (NOps != NumVectorElts) {
482 for (
unsigned i = NOps; i < NumVectorElts; ++i) {
485 RegSeqArgs[1 + (2 * i)] =
SDValue(ImpDef, 0);
486 RegSeqArgs[1 + (2 * i) + 1] =
497 unsigned int Opc =
N->getOpcode();
498 if (
N->isMachineOpcode()) {
508 N = glueCopyToM0LDSInit(
N);
523 if (
N->getValueType(0) != MVT::i64)
526 SelectADD_SUB_I64(
N);
531 if (
N->getValueType(0) != MVT::i32)
538 SelectUADDO_USUBO(
N);
542 SelectFMUL_W_CHAIN(
N);
546 SelectFMA_W_CHAIN(
N);
552 EVT VT =
N->getValueType(0);
566 unsigned RegClassID =
574 if (
N->getValueType(0) == MVT::i128) {
578 }
else if (
N->getValueType(0) == MVT::i64) {
585 const SDValue Ops[] = { RC,
N->getOperand(0), SubReg0,
586 N->getOperand(1), SubReg1 };
588 N->getValueType(0), Ops));
594 if (
N->getValueType(0).getSizeInBits() != 64 || isInlineImmediate(
N))
599 Imm =
FP->getValueAPF().bitcastToAPInt().getZExtValue();
602 Imm =
C->getZExtValue();
630 uint32_t WidthVal = Width->getZExtValue();
647 return SelectMUL_LOHI(
N);
658 if (
N->getValueType(0) != MVT::i32)
672 if (
N->getValueType(0) == MVT::i32) {
675 { N->getOperand(0), N->getOperand(1) });
683 SelectINTRINSIC_W_CHAIN(
N);
687 SelectINTRINSIC_WO_CHAIN(
N);
691 SelectINTRINSIC_VOID(
N);
695 SelectWAVE_ADDRESS(
N);
699 SelectSTACKRESTORE(
N);
707bool AMDGPUDAGToDAGISel::isUniformBr(
const SDNode *
N)
const {
710 return Term->getMetadata(
"amdgpu.uniform") ||
711 Term->getMetadata(
"structurizecfg.uniform");
714bool AMDGPUDAGToDAGISel::isUnneededShiftMask(
const SDNode *
N,
715 unsigned ShAmtBits)
const {
718 const APInt &
RHS = cast<ConstantSDNode>(
N->getOperand(1))->getAPIntValue();
719 if (
RHS.countr_one() >= ShAmtBits)
723 return (LHSKnownZeros | RHS).
countr_one() >= ShAmtBits;
749 N1 =
Lo.getOperand(1);
766 assert(LHS && RHS && isa<ConstantSDNode>(RHS));
774 return "AMDGPU DAG->DAG Pattern Instruction Selection";
791 if ((
C = dyn_cast<ConstantSDNode>(
Addr))) {
795 (
C = dyn_cast<ConstantSDNode>(
Addr.getOperand(0)))) {
799 (
C = dyn_cast<ConstantSDNode>(
Addr.getOperand(1)))) {
810SDValue AMDGPUDAGToDAGISel::getMaterializedScalarImm32(int64_t Val,
813 AMDGPU::S_MOV_B32,
DL, MVT::i32,
819void AMDGPUDAGToDAGISel::SelectADD_SUB_I64(
SDNode *
N) {
824 unsigned Opcode =
N->getOpcode();
834 DL, MVT::i32, LHS, Sub0);
836 DL, MVT::i32, LHS, Sub1);
839 DL, MVT::i32, RHS, Sub0);
841 DL, MVT::i32, RHS, Sub1);
845 static const unsigned OpcMap[2][2][2] = {
846 {{AMDGPU::S_SUB_U32, AMDGPU::S_ADD_U32},
847 {AMDGPU::V_SUB_CO_U32_e32, AMDGPU::V_ADD_CO_U32_e32}},
848 {{AMDGPU::S_SUBB_U32, AMDGPU::S_ADDC_U32},
849 {AMDGPU::V_SUBB_U32_e32, AMDGPU::V_ADDC_U32_e32}}};
851 unsigned Opc = OpcMap[0][
N->isDivergent()][IsAdd];
852 unsigned CarryOpc = OpcMap[1][
N->isDivergent()][IsAdd];
877 MVT::i64, RegSequenceArgs);
888void AMDGPUDAGToDAGISel::SelectAddcSubb(
SDNode *
N) {
894 if (
N->isDivergent()) {
896 : AMDGPU::V_SUBB_U32_e64;
898 N, Opc,
N->getVTList(),
900 CurDAG->getTargetConstant(0, {}, MVT::i1) });
903 : AMDGPU::S_SUB_CO_PSEUDO;
904 CurDAG->SelectNodeTo(
N, Opc,
N->getVTList(), {
LHS,
RHS, CI});
908void AMDGPUDAGToDAGISel::SelectUADDO_USUBO(
SDNode *
N) {
913 bool IsVALU =
N->isDivergent();
917 if (UI.getUse().getResNo() == 1) {
926 unsigned Opc = IsAdd ? AMDGPU::V_ADD_CO_U32_e64 : AMDGPU::V_SUB_CO_U32_e64;
929 N, Opc,
N->getVTList(),
930 {N->getOperand(0), N->getOperand(1),
931 CurDAG->getTargetConstant(0, {}, MVT::i1) });
933 unsigned Opc =
N->getOpcode() ==
ISD::UADDO ? AMDGPU::S_UADDO_PSEUDO
934 : AMDGPU::S_USUBO_PSEUDO;
936 CurDAG->SelectNodeTo(
N, Opc,
N->getVTList(),
937 {
N->getOperand(0),
N->getOperand(1)});
941void AMDGPUDAGToDAGISel::SelectFMA_W_CHAIN(
SDNode *
N) {
946 SelectVOP3Mods0(
N->getOperand(1), Ops[1], Ops[0], Ops[6], Ops[7]);
947 SelectVOP3Mods(
N->getOperand(2), Ops[3], Ops[2]);
948 SelectVOP3Mods(
N->getOperand(3), Ops[5], Ops[4]);
949 Ops[8] =
N->getOperand(0);
950 Ops[9] =
N->getOperand(4);
955 cast<ConstantSDNode>(Ops[0])->isZero() &&
956 cast<ConstantSDNode>(Ops[2])->isZero() &&
957 cast<ConstantSDNode>(Ops[4])->isZero();
958 unsigned Opcode = UseFMAC ? AMDGPU::V_FMAC_F32_e64 : AMDGPU::V_FMA_F32_e64;
962void AMDGPUDAGToDAGISel::SelectFMUL_W_CHAIN(
SDNode *
N) {
967 SelectVOP3Mods0(
N->getOperand(1), Ops[1], Ops[0], Ops[4], Ops[5]);
968 SelectVOP3Mods(
N->getOperand(2), Ops[3], Ops[2]);
969 Ops[6] =
N->getOperand(0);
970 Ops[7] =
N->getOperand(3);
977void AMDGPUDAGToDAGISel::SelectDIV_SCALE(
SDNode *
N) {
979 EVT VT =
N->getValueType(0);
981 assert(VT == MVT::f32 || VT == MVT::f64);
984 = (VT == MVT::f64) ? AMDGPU::V_DIV_SCALE_F64_e64 : AMDGPU::V_DIV_SCALE_F32_e64;
989 SelectVOP3BMods0(
N->getOperand(0), Ops[1], Ops[0], Ops[6], Ops[7]);
990 SelectVOP3BMods(
N->getOperand(1), Ops[3], Ops[2]);
991 SelectVOP3BMods(
N->getOperand(2), Ops[5], Ops[4]);
997void AMDGPUDAGToDAGISel::SelectMAD_64_32(
SDNode *
N) {
1002 Opc =
Signed ? AMDGPU::V_MAD_I64_I32_gfx11_e64
1003 : AMDGPU::V_MAD_U64_U32_gfx11_e64;
1005 Opc =
Signed ? AMDGPU::V_MAD_I64_I32_e64 : AMDGPU::V_MAD_U64_U32_e64;
1008 SDValue Ops[] = {
N->getOperand(0),
N->getOperand(1),
N->getOperand(2),
1015void AMDGPUDAGToDAGISel::SelectMUL_LOHI(
SDNode *
N) {
1020 Opc =
Signed ? AMDGPU::V_MAD_I64_I32_gfx11_e64
1021 : AMDGPU::V_MAD_U64_U32_gfx11_e64;
1023 Opc =
Signed ? AMDGPU::V_MAD_I64_I32_e64 : AMDGPU::V_MAD_U64_U32_e64;
1027 SDValue Ops[] = {
N->getOperand(0),
N->getOperand(1),
Zero, Clamp};
1032 MVT::i32,
SDValue(Mad, 0), Sub0);
1038 MVT::i32,
SDValue(Mad, 0), Sub1);
1073 int64_t ByteOffset =
C->getSExtValue();
1074 if (isDSOffsetLegal(
SDValue(), ByteOffset)) {
1081 Zero,
Addr.getOperand(1));
1083 if (isDSOffsetLegal(Sub, ByteOffset)) {
1089 unsigned SubOp = AMDGPU::V_SUB_CO_U32_e32;
1091 SubOp = AMDGPU::V_SUB_U32_e64;
1113 if (isDSOffsetLegal(
SDValue(), CAddr->getZExtValue())) {
1116 DL, MVT::i32, Zero);
1129bool AMDGPUDAGToDAGISel::isDSOffset2Legal(
SDValue Base,
unsigned Offset0,
1131 unsigned Size)
const {
1132 if (Offset0 %
Size != 0 || Offset1 %
Size != 0)
1134 if (!isUInt<8>(Offset0 /
Size) || !isUInt<8>(Offset1 /
Size))
1146bool AMDGPUDAGToDAGISel::isFlatScratchBaseLegal(
SDValue Base,
1159 return SelectDSReadWrite2(
Addr,
Base, Offset0, Offset1, 4);
1165 return SelectDSReadWrite2(
Addr,
Base, Offset0, Offset1, 8);
1170 unsigned Size)
const {
1178 unsigned OffsetValue1 = OffsetValue0 +
Size;
1181 if (isDSOffset2Legal(N0, OffsetValue0, OffsetValue1,
Size)) {
1190 dyn_cast<ConstantSDNode>(
Addr.getOperand(0))) {
1191 unsigned OffsetValue0 =
C->getZExtValue();
1192 unsigned OffsetValue1 = OffsetValue0 +
Size;
1194 if (isDSOffset2Legal(
SDValue(), OffsetValue0, OffsetValue1,
Size)) {
1204 if (isDSOffset2Legal(Sub, OffsetValue0, OffsetValue1,
Size)) {
1208 unsigned SubOp = AMDGPU::V_SUB_CO_U32_e32;
1210 SubOp = AMDGPU::V_SUB_U32_e64;
1226 unsigned OffsetValue0 = CAddr->getZExtValue();
1227 unsigned OffsetValue1 = OffsetValue0 +
Size;
1229 if (isDSOffset2Legal(
SDValue(), OffsetValue0, OffsetValue1,
Size)) {
1267 C1 = cast<ConstantSDNode>(
Addr.getOperand(1));
1269 N0 =
Addr.getOperand(0);
1327 AMDGPU::S_MOV_B32,
DL, MVT::i32,
1343 if (!SelectMUBUF(
Addr,
Ptr, VAddr, SOffset,
Offset, Offen, Idxen, Addr64))
1347 if (
C->getSExtValue()) {
1360std::pair<SDValue, SDValue> AMDGPUDAGToDAGISel::foldFrameIndex(
SDValue N)
const {
1363 auto *FI = dyn_cast<FrameIndexSDNode>(
N);
1374bool AMDGPUDAGToDAGISel::SelectMUBUFScratchOffen(
SDNode *Parent,
1386 int64_t
Imm = CAddr->getSExtValue();
1387 const int64_t NullPtr =
1390 if (Imm != NullPtr) {
1395 AMDGPU::V_MOV_B32_e32,
DL, MVT::i32, HighBits);
1396 VAddr =
SDValue(MovHighBits, 0);
1429 std::tie(VAddr, SOffset) = foldFrameIndex(N0);
1436 std::tie(VAddr, SOffset) = foldFrameIndex(
Addr);
1444 auto Reg = cast<RegisterSDNode>(Val.
getOperand(1))->getReg();
1445 if (!Reg.isPhysical())
1447 auto RC =
TRI.getPhysRegBaseClass(Reg);
1448 return RC &&
TRI.isSGPRClass(RC);
1451bool AMDGPUDAGToDAGISel::SelectMUBUFScratchOffset(
SDNode *Parent,
1473 CAddr = dyn_cast<ConstantSDNode>(
Addr.getOperand(1));
1479 SOffset =
Addr.getOperand(0);
1480 }
else if ((CAddr = dyn_cast<ConstantSDNode>(
Addr)) &&
1501 if (!SelectMUBUF(
Addr,
Ptr, VAddr, SOffset,
Offset, Offen, Idxen, Addr64))
1504 if (!cast<ConstantSDNode>(Offen)->getSExtValue() &&
1505 !cast<ConstantSDNode>(Idxen)->getSExtValue() &&
1506 !cast<ConstantSDNode>(Addr64)->getSExtValue()) {
1526 assert(isa<BuildVectorSDNode>(
N));
1537 int64_t OffsetVal = 0;
1541 bool CanHaveFlatSegmentOffsetBug =
1548 if (isBaseWithConstantOffset64(
Addr, N0, N1) &&
1549 isFlatScratchBaseLegal(N0, FlatVariant)) {
1550 int64_t COffsetVal = cast<ConstantSDNode>(N1)->getSExtValue();
1553 if (
TII->isLegalFLATOffset(COffsetVal, AS, FlatVariant)) {
1555 OffsetVal = COffsetVal;
1570 std::tie(OffsetVal, RemainderOffset) =
1571 TII->splitFlatOffset(COffsetVal, AS, FlatVariant);
1574 getMaterializedScalarImm32(
Lo_32(RemainderOffset),
DL);
1577 if (
Addr.getValueType().getSizeInBits() == 32) {
1581 unsigned AddOp = AMDGPU::V_ADD_CO_U32_e32;
1583 AddOp = AMDGPU::V_ADD_U32_e64;
1594 DL, MVT::i32, N0, Sub0);
1596 DL, MVT::i32, N0, Sub1);
1599 getMaterializedScalarImm32(
Hi_32(RemainderOffset),
DL);
1605 {AddOffsetLo,
SDValue(N0Lo, 0), Clamp});
1608 AMDGPU::V_ADDC_U32_e64,
DL, VTs,
1616 MVT::i64, RegSequenceArgs),
1643 return SelectFlatOffsetImpl(
N,
Addr, VAddr,
Offset,
1657bool AMDGPUDAGToDAGISel::SelectGlobalSAddr(
SDNode *
N,
1662 int64_t ImmOffset = 0;
1668 if (isBaseWithConstantOffset64(
Addr, LHS, RHS)) {
1669 int64_t COffsetVal = cast<ConstantSDNode>(RHS)->getSExtValue();
1675 ImmOffset = COffsetVal;
1676 }
else if (!
LHS->isDivergent()) {
1677 if (COffsetVal > 0) {
1682 int64_t SplitImmOffset, RemainderOffset;
1683 std::tie(SplitImmOffset, RemainderOffset) =
TII->splitFlatOffset(
1686 if (isUInt<32>(RemainderOffset)) {
1688 AMDGPU::V_MOV_B32_e32, SL, MVT::i32,
1702 unsigned NumLiterals =
1703 !
TII->isInlineConstant(
APInt(32, COffsetVal & 0xffffffff)) +
1704 !
TII->isInlineConstant(
APInt(32, COffsetVal >> 32));
1715 if (!
LHS->isDivergent()) {
1723 if (!SAddr && !
RHS->isDivergent()) {
1738 isa<ConstantSDNode>(
Addr))
1753 if (
auto FI = dyn_cast<FrameIndexSDNode>(SAddr)) {
1756 isa<FrameIndexSDNode>(SAddr.
getOperand(0))) {
1759 auto FI = cast<FrameIndexSDNode>(SAddr.
getOperand(0));
1761 FI->getValueType(0));
1774 if (
Addr->isDivergent())
1779 int64_t COffsetVal = 0;
1782 isFlatScratchBaseLegal(
Addr.getOperand(0))) {
1783 COffsetVal = cast<ConstantSDNode>(
Addr.getOperand(1))->getSExtValue();
1784 SAddr =
Addr.getOperand(0);
1795 int64_t SplitImmOffset, RemainderOffset;
1796 std::tie(SplitImmOffset, RemainderOffset) =
TII->splitFlatOffset(
1799 COffsetVal = SplitImmOffset;
1803 ? getMaterializedScalarImm32(
Lo_32(RemainderOffset),
DL)
1804 :
CurDAG->getTargetConstant(RemainderOffset,
DL,
MVT::i32);
1816bool AMDGPUDAGToDAGISel::checkFlatScratchSVSSwizzleBug(
1830 return (VMax & 3) + (
SMax & 3) >= 4;
1836 int64_t ImmOffset = 0;
1839 if (isBaseWithConstantOffset64(
Addr, LHS, RHS)) {
1840 int64_t COffsetVal = cast<ConstantSDNode>(RHS)->getSExtValue();
1845 ImmOffset = COffsetVal;
1846 }
else if (!
LHS->isDivergent() && COffsetVal > 0) {
1850 int64_t SplitImmOffset, RemainderOffset;
1851 std::tie(SplitImmOffset, RemainderOffset)
1854 if (isUInt<32>(RemainderOffset)) {
1856 AMDGPU::V_MOV_B32_e32, SL, MVT::i32,
1860 if (!isFlatScratchBaseLegal(SAddr) || !isFlatScratchBaseLegal(VAddr))
1862 if (checkFlatScratchSVSSwizzleBug(VAddr, SAddr, SplitImmOffset))
1876 if (!
LHS->isDivergent() &&
RHS->isDivergent()) {
1879 }
else if (!
RHS->isDivergent() &&
LHS->isDivergent()) {
1886 if (!isFlatScratchBaseLegal(SAddr) || !isFlatScratchBaseLegal(VAddr))
1889 if (checkFlatScratchSVSSwizzleBug(VAddr, SAddr, ImmOffset))
1899bool AMDGPUDAGToDAGISel::SelectSMRDOffset(
SDValue ByteOffsetNode,
1901 bool Imm32Only,
bool IsBuffer)
const {
1903 "Cannot match both soffset and offset at the same time!");
1911 *SOffset = ByteOffsetNode;
1923 SDLoc SL(ByteOffsetNode);
1927 int64_t ByteOffset = IsBuffer ?
C->getZExtValue() :
C->getSExtValue();
1928 std::optional<int64_t> EncodedOffset =
1930 if (EncodedOffset &&
Offset && !Imm32Only) {
1940 if (EncodedOffset &&
Offset && Imm32Only) {
1945 if (!isUInt<32>(ByteOffset) && !isInt<32>(ByteOffset))
1959 if (
Addr.getValueType() != MVT::i32)
1967 unsigned AddrHiVal =
Info->get32BitAddressHighBits();
1989 bool IsBuffer)
const {
1991 assert(!Imm32Only && !IsBuffer);
1993 return SelectSMRDBaseOffset(
Addr,
B,
nullptr,
Offset) &&
1994 SelectSMRDBaseOffset(
B, SBase, SOffset,
nullptr);
2000 !
Addr->getFlags().hasNoUnsignedWrap())
2006 N0 =
Addr.getOperand(0);
2007 N1 =
Addr.getOperand(1);
2009 assert(N0 && N1 && isa<ConstantSDNode>(N1));
2013 if (SelectSMRDOffset(N1, SOffset,
Offset, Imm32Only, IsBuffer)) {
2017 if (SelectSMRDOffset(N0, SOffset,
Offset, Imm32Only, IsBuffer)) {
2026 bool Imm32Only)
const {
2027 if (SelectSMRDBaseOffset(
Addr, SBase, SOffset,
Offset, Imm32Only)) {
2028 SBase = Expand32BitAddress(SBase);
2032 if (
Addr.getValueType() == MVT::i32 &&
Offset && !SOffset) {
2043 return SelectSMRD(
Addr, SBase,
nullptr, &
Offset);
2049 return SelectSMRD(
Addr, SBase,
nullptr, &
Offset,
2055 return SelectSMRD(
Addr, SBase, &SOffset,
nullptr);
2061 return SelectSMRD(
Addr, SBase, &SOffset, &
Offset);
2065 return SelectSMRDOffset(
N,
nullptr, &
Offset,
2069bool AMDGPUDAGToDAGISel::SelectSMRDBufferImm32(
SDValue N,
2072 return SelectSMRDOffset(
N,
nullptr, &
Offset,
2076bool AMDGPUDAGToDAGISel::SelectSMRDBufferSgprImm(
SDValue N,
SDValue &SOffset,
2080 return N.getValueType() == MVT::i32 &&
2081 SelectSMRDBaseOffset(
N, SOffset,
nullptr,
2086bool AMDGPUDAGToDAGISel::SelectMOVRELOffset(
SDValue Index,
2108 if (isa<ConstantSDNode>(
Index))
2116SDNode *AMDGPUDAGToDAGISel::getBFE32(
bool IsSigned,
const SDLoc &
DL,
2120 unsigned Opcode = IsSigned ? AMDGPU::V_BFE_I32_e64 : AMDGPU::V_BFE_U32_e64;
2126 unsigned Opcode = IsSigned ? AMDGPU::S_BFE_I32 : AMDGPU::S_BFE_U32;
2136void AMDGPUDAGToDAGISel::SelectS_BFEFromShifts(
SDNode *
N) {
2141 const SDValue &Shl =
N->getOperand(0);
2149 if (0 < BVal && BVal <= CVal && CVal < 32) {
2159void AMDGPUDAGToDAGISel::SelectS_BFE(
SDNode *
N) {
2160 switch (
N->getOpcode()) {
2162 if (
N->getOperand(0).getOpcode() ==
ISD::SRL) {
2165 const SDValue &Srl =
N->getOperand(0);
2169 if (Shift && Mask) {
2183 if (
N->getOperand(0).getOpcode() ==
ISD::AND) {
2190 if (Shift && Mask) {
2201 }
else if (
N->getOperand(0).getOpcode() ==
ISD::SHL) {
2202 SelectS_BFEFromShifts(
N);
2207 if (
N->getOperand(0).getOpcode() ==
ISD::SHL) {
2208 SelectS_BFEFromShifts(
N);
2219 const ConstantSDNode *Amt = dyn_cast<ConstantSDNode>(Src.getOperand(1));
2223 unsigned Width = cast<VTSDNode>(
N->getOperand(1))->getVT().getSizeInBits();
2233bool AMDGPUDAGToDAGISel::isCBranchSCC(
const SDNode *
N)
const {
2235 if (!
N->hasOneUse())
2245 MVT VT =
Cond.getOperand(0).getSimpleValueType();
2249 if (VT == MVT::i64) {
2259void AMDGPUDAGToDAGISel::SelectBRCOND(
SDNode *
N) {
2262 if (
Cond.isUndef()) {
2264 N->getOperand(2),
N->getOperand(0));
2271 bool UseSCCBr = isCBranchSCC(
N) && isUniformBr(
N);
2272 unsigned BrOp = UseSCCBr ? AMDGPU::S_CBRANCH_SCC1 : AMDGPU::S_CBRANCH_VCCNZ;
2273 Register CondReg = UseSCCBr ? AMDGPU::SCC :
TRI->getVCC();
2291 : AMDGPU::S_AND_B64,
2306void AMDGPUDAGToDAGISel::SelectDSAppendConsume(
SDNode *
N,
unsigned IntrID) {
2309 unsigned Opc = IntrID == Intrinsic::amdgcn_ds_append ?
2310 AMDGPU::DS_APPEND : AMDGPU::DS_CONSUME;
2323 const APInt &OffsetVal = cast<ConstantSDNode>(PtrOffset)->getAPIntValue();
2324 if (isDSOffsetLegal(PtrBase, OffsetVal.
getZExtValue())) {
2325 N = glueCopyToM0(
N, PtrBase);
2331 N = glueCopyToM0(
N,
Ptr);
2339 N->getOperand(
N->getNumOperands() - 1)
2348void AMDGPUDAGToDAGISel::SelectDSBvhStackIntrinsic(
SDNode *
N) {
2349 unsigned Opc = AMDGPU::DS_BVH_STACK_RTN_B32;
2350 SDValue Ops[] = {
N->getOperand(2),
N->getOperand(3),
N->getOperand(4),
2351 N->getOperand(5),
N->getOperand(0)};
2361 case Intrinsic::amdgcn_ds_gws_init:
2362 return AMDGPU::DS_GWS_INIT;
2363 case Intrinsic::amdgcn_ds_gws_barrier:
2364 return AMDGPU::DS_GWS_BARRIER;
2365 case Intrinsic::amdgcn_ds_gws_sema_v:
2366 return AMDGPU::DS_GWS_SEMA_V;
2367 case Intrinsic::amdgcn_ds_gws_sema_br:
2368 return AMDGPU::DS_GWS_SEMA_BR;
2369 case Intrinsic::amdgcn_ds_gws_sema_p:
2370 return AMDGPU::DS_GWS_SEMA_P;
2371 case Intrinsic::amdgcn_ds_gws_sema_release_all:
2372 return AMDGPU::DS_GWS_SEMA_RELEASE_ALL;
2378void AMDGPUDAGToDAGISel::SelectDS_GWS(
SDNode *
N,
unsigned IntrID) {
2379 if (!Subtarget->
hasGWS() ||
2380 (IntrID == Intrinsic::amdgcn_ds_gws_sema_release_all &&
2388 const bool HasVSrc =
N->getNumOperands() == 4;
2389 assert(HasVSrc ||
N->getNumOperands() == 3);
2392 SDValue BaseOffset =
N->getOperand(HasVSrc ? 3 : 2);
2403 if (
ConstantSDNode *ConstOffset = dyn_cast<ConstantSDNode>(BaseOffset)) {
2409 ImmOffset = ConstOffset->getZExtValue();
2427 glueCopyToM0(
N,
SDValue(M0Base, 0));
2444void AMDGPUDAGToDAGISel::SelectInterpP1F16(
SDNode *
N) {
2502void AMDGPUDAGToDAGISel::SelectINTRINSIC_W_CHAIN(
SDNode *
N) {
2503 unsigned IntrID = cast<ConstantSDNode>(
N->getOperand(1))->getZExtValue();
2505 case Intrinsic::amdgcn_ds_append:
2506 case Intrinsic::amdgcn_ds_consume: {
2507 if (
N->getValueType(0) != MVT::i32)
2509 SelectDSAppendConsume(
N, IntrID);
2512 case Intrinsic::amdgcn_ds_bvh_stack_rtn:
2513 SelectDSBvhStackIntrinsic(
N);
2520void AMDGPUDAGToDAGISel::SelectINTRINSIC_WO_CHAIN(
SDNode *
N) {
2521 unsigned IntrID = cast<ConstantSDNode>(
N->getOperand(0))->getZExtValue();
2524 case Intrinsic::amdgcn_wqm:
2525 Opcode = AMDGPU::WQM;
2527 case Intrinsic::amdgcn_softwqm:
2528 Opcode = AMDGPU::SOFT_WQM;
2530 case Intrinsic::amdgcn_wwm:
2531 case Intrinsic::amdgcn_strict_wwm:
2532 Opcode = AMDGPU::STRICT_WWM;
2534 case Intrinsic::amdgcn_strict_wqm:
2535 Opcode = AMDGPU::STRICT_WQM;
2537 case Intrinsic::amdgcn_interp_p1_f16:
2538 SelectInterpP1F16(
N);
2540 case Intrinsic::amdgcn_inverse_ballot:
2541 switch (
N->getOperand(1).getValueSizeInBits()) {
2543 Opcode = AMDGPU::S_INVERSE_BALLOT_U32;
2546 Opcode = AMDGPU::S_INVERSE_BALLOT_U64;
2561void AMDGPUDAGToDAGISel::SelectINTRINSIC_VOID(
SDNode *
N) {
2562 unsigned IntrID = cast<ConstantSDNode>(
N->getOperand(1))->getZExtValue();
2564 case Intrinsic::amdgcn_ds_gws_init:
2565 case Intrinsic::amdgcn_ds_gws_barrier:
2566 case Intrinsic::amdgcn_ds_gws_sema_v:
2567 case Intrinsic::amdgcn_ds_gws_sema_br:
2568 case Intrinsic::amdgcn_ds_gws_sema_p:
2569 case Intrinsic::amdgcn_ds_gws_sema_release_all:
2570 SelectDS_GWS(
N, IntrID);
2579void AMDGPUDAGToDAGISel::SelectWAVE_ADDRESS(
SDNode *
N) {
2583 {N->getOperand(0), Log2WaveSize});
2586void AMDGPUDAGToDAGISel::SelectSTACKRESTORE(
SDNode *
N) {
2603 if (
N->isDivergent()) {
2610 {SrcVal, Log2WaveSize}),
2618bool AMDGPUDAGToDAGISel::SelectVOP3ModsImpl(
SDValue In,
SDValue &Src,
2620 bool IsCanonicalizing,
2621 bool AllowAbs)
const {
2627 Src = Src.getOperand(0);
2628 }
else if (Src.getOpcode() ==
ISD::FSUB && IsCanonicalizing) {
2631 auto *
LHS = dyn_cast<ConstantFPSDNode>(Src.getOperand(0));
2632 if (LHS &&
LHS->isZero()) {
2634 Src = Src.getOperand(1);
2638 if (AllowAbs && Src.getOpcode() ==
ISD::FABS) {
2640 Src = Src.getOperand(0);
2649 if (SelectVOP3ModsImpl(In, Src, Mods,
true,
2658bool AMDGPUDAGToDAGISel::SelectVOP3ModsNonCanonicalizing(
2661 if (SelectVOP3ModsImpl(In, Src, Mods,
false,
2670bool AMDGPUDAGToDAGISel::SelectVOP3BMods(
SDValue In,
SDValue &Src,
2673 if (SelectVOP3ModsImpl(In, Src, Mods,
2683bool AMDGPUDAGToDAGISel::SelectVOP3NoMods(
SDValue In,
SDValue &Src)
const {
2691bool AMDGPUDAGToDAGISel::SelectVINTERPModsImpl(
SDValue In,
SDValue &Src,
2695 if (SelectVOP3ModsImpl(In, Src, Mods,
2707bool AMDGPUDAGToDAGISel::SelectVINTERPMods(
SDValue In,
SDValue &Src,
2709 return SelectVINTERPModsImpl(In, Src, SrcMods,
false);
2712bool AMDGPUDAGToDAGISel::SelectVINTERPModsHi(
SDValue In,
SDValue &Src,
2714 return SelectVINTERPModsImpl(In, Src, SrcMods,
true);
2717bool AMDGPUDAGToDAGISel::SelectVOP3Mods0(
SDValue In,
SDValue &Src,
2724 return SelectVOP3Mods(In, Src, SrcMods);
2727bool AMDGPUDAGToDAGISel::SelectVOP3BMods0(
SDValue In,
SDValue &Src,
2734 return SelectVOP3BMods(In, Src, SrcMods);
2737bool AMDGPUDAGToDAGISel::SelectVOP3OMods(
SDValue In,
SDValue &Src,
2748bool AMDGPUDAGToDAGISel::SelectVOP3PMods(
SDValue In,
SDValue &Src,
2749 SDValue &SrcMods,
bool IsDOT)
const {
2756 Src = Src.getOperand(0);
2761 unsigned VecMods = Mods;
2763 SDValue Lo = stripBitcast(Src.getOperand(0));
2764 SDValue Hi = stripBitcast(Src.getOperand(1));
2767 Lo = stripBitcast(
Lo.getOperand(0));
2772 Hi = stripBitcast(
Hi.getOperand(0));
2782 unsigned VecSize = Src.getValueSizeInBits();
2783 Lo = stripExtractLoElt(
Lo);
2784 Hi = stripExtractLoElt(
Hi);
2786 if (
Lo.getValueSizeInBits() > VecSize) {
2788 (VecSize > 32) ? AMDGPU::sub0_sub1 : AMDGPU::sub0,
SDLoc(In),
2792 if (
Hi.getValueSizeInBits() > VecSize) {
2794 (VecSize > 32) ? AMDGPU::sub0_sub1 : AMDGPU::sub0,
SDLoc(In),
2798 assert(
Lo.getValueSizeInBits() <= VecSize &&
2799 Hi.getValueSizeInBits() <= VecSize);
2801 if (
Lo ==
Hi && !isInlineImmediate(
Lo.getNode())) {
2805 if (VecSize == 32 || VecSize ==
Lo.getValueSizeInBits()) {
2808 assert(
Lo.getValueSizeInBits() == 32 && VecSize == 64);
2813 Lo.getValueType()), 0);
2814 auto RC =
Lo->isDivergent() ? AMDGPU::VReg_64RegClassID
2815 : AMDGPU::SReg_64RegClassID;
2822 Src.getValueType(), Ops), 0);
2828 if (VecSize == 64 &&
Lo ==
Hi && isa<ConstantFPSDNode>(
Lo)) {
2829 uint64_t Lit = cast<ConstantFPSDNode>(
Lo)->getValueAPF()
2830 .bitcastToAPInt().getZExtValue();
2848bool AMDGPUDAGToDAGISel::SelectVOP3PModsDOT(
SDValue In,
SDValue &Src,
2850 return SelectVOP3PMods(In, Src, SrcMods,
true);
2853bool AMDGPUDAGToDAGISel::SelectDotIUVOP3PMods(
SDValue In,
SDValue &Src)
const {
2857 assert(
C->getAPIntValue().getBitWidth() == 1 &&
"expected i1 value");
2860 unsigned SrcSign =
C->getZExtValue();
2868bool AMDGPUDAGToDAGISel::SelectWMMAOpSelVOP3PMods(
SDValue In,
2871 assert(
C->getAPIntValue().getBitWidth() == 1 &&
"expected i1 value");
2874 unsigned SrcVal =
C->getZExtValue();
2882bool AMDGPUDAGToDAGISel::SelectVOP3OpSel(
SDValue In,
SDValue &Src,
2890bool AMDGPUDAGToDAGISel::SelectVOP3OpSelMods(
SDValue In,
SDValue &Src,
2893 return SelectVOP3Mods(In, Src, SrcMods);
2898bool AMDGPUDAGToDAGISel::SelectVOP3PMadMixModsImpl(
SDValue In,
SDValue &Src,
2899 unsigned &Mods)
const {
2901 SelectVOP3ModsImpl(In, Src, Mods);
2904 Src = Src.getOperand(0);
2905 assert(Src.getValueType() == MVT::f16);
2906 Src = stripBitcast(Src);
2912 SelectVOP3ModsImpl(Src, Src, ModsTmp);
2939bool AMDGPUDAGToDAGISel::SelectVOP3PMadMixModsExt(
SDValue In,
SDValue &Src,
2942 if (!SelectVOP3PMadMixModsImpl(In, Src, Mods))
2948bool AMDGPUDAGToDAGISel::SelectVOP3PMadMixMods(
SDValue In,
SDValue &Src,
2951 SelectVOP3PMadMixModsImpl(In, Src, Mods);
2968 C->getValueAPF().bitcastToAPInt().getZExtValue() << 16, SL, MVT::i32);
2978bool AMDGPUDAGToDAGISel::isVGPRImm(
const SDNode *
N)
const {
2987 bool AllUsesAcceptSReg =
true;
2989 Limit < 10 &&
U !=
E; ++
U, ++Limit) {
2998 if (RC != &AMDGPU::VS_32RegClass) {
2999 AllUsesAcceptSReg =
false;
3001 if (
User->isMachineOpcode()) {
3002 unsigned Opc =
User->getMachineOpcode();
3004 if (
Desc.isCommutable()) {
3005 unsigned OpIdx =
Desc.getNumDefs() +
U.getOperandNo();
3008 unsigned CommutedOpNo = CommuteIdx1 -
Desc.getNumDefs();
3010 if (CommutedRC == &AMDGPU::VS_32RegClass)
3011 AllUsesAcceptSReg =
true;
3019 if (!AllUsesAcceptSReg)
3023 return !AllUsesAcceptSReg && (Limit < 10);
3026bool AMDGPUDAGToDAGISel::isUniformLoad(
const SDNode *
N)
const {
3027 auto Ld = cast<LoadSDNode>(
N);
3032 return Ld->getAlign() >=
Align(4) &&
3039 ->isMemOpHasNoClobberedMemOperand(
N)));
3045 bool IsModified =
false;
3052 SDNode *Node = &*Position++;
3058 if (ResNode != Node) {
3065 }
while (IsModified);
unsigned const MachineRegisterInfo * MRI
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static bool getBaseWithOffsetUsingSplitOR(SelectionDAG &DAG, SDValue Addr, SDValue &N0, SDValue &N1)
static SDValue matchZExtFromI32(SDValue Op)
static unsigned gwsIntrinToOpcode(unsigned IntrID)
static SDValue SelectSAddrFI(SelectionDAG *CurDAG, SDValue SAddr)
static MemSDNode * findMemSDNode(SDNode *N)
static bool IsCopyFromSGPR(const SIRegisterInfo &TRI, SDValue Val)
Defines an instruction selector for the AMDGPU target.
Contains the definition of a TargetInstrInfo class that is common to all AMD GPUs.
static unsigned gwsIntrinToOpcode(unsigned IntrID)
static MachineInstr * isExtractHiElt(MachineInstr *Inst, MachineRegisterInfo &MRI)
Provides AMDGPU specific target descriptions.
Base class for AMDGPU specific classes of TargetSubtarget.
The AMDGPU TargetMachine interface definition for hw codegen targets.
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
Analysis containing CSE Info
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
unsigned const TargetRegisterInfo * TRI
const char LLVMTargetMachineRef TM
#define INITIALIZE_PASS_DEPENDENCY(depName)
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
pre isel intrinsic Pre ISel Intrinsic Lowering
Provides R600 specific target descriptions.
Interface definition for R600RegisterInfo.
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
AMDGPU specific code to select AMDGPU machine instructions for SelectionDAG operations.
void SelectBuildVector(SDNode *N, unsigned RegClassID)
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
void Select(SDNode *N) override
Main hook for targets to transform nodes into machine nodes.
bool runOnMachineFunction(MachineFunction &MF) override
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
void PreprocessISelDAG() override
PreprocessISelDAG - This hook allows targets to hack on the graph before instruction selection starts...
void PostprocessISelDAG() override
PostprocessISelDAG() - This hook allows the target to hack on the graph right after selection.
StringRef getPassName() const override
getPassName - Return a nice clean name for a pass.
AMDGPUDAGToDAGISel()=delete
bool matchLoadD16FromBuildVector(SDNode *N) const
static bool isUniformMMO(const MachineMemOperand *MMO)
unsigned getWavefrontSizeLog2() const
bool hasInv2PiInlineImm() const
static SDValue stripBitcast(SDValue Val)
static int64_t getNullPointerValue(unsigned AddrSpace)
Get the integer value of a null pointer in the given address space.
static bool EnableLateStructurizeCFG
Class for arbitrary precision integers.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
uint64_t getZExtValue() const
Get zero extended value.
unsigned countr_one() const
Count the number of trailing one bits.
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
LLVM Basic Block Representation.
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
uint64_t getZExtValue() const
int64_t getSExtValue() const
This class represents an Operation in the Expression.
Legacy analysis pass which computes a DominatorTree.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
FunctionPass class - This class is used to implement most global optimizations.
int getLDSBankCount() const
bool hasUsableDSOffset() const
True if the offset field of DS instructions works as expected.
bool unsafeDSOffsetFoldingEnabled() const
bool hasFlatInstOffsets() const
const SIInstrInfo * getInstrInfo() const override
unsigned getConstantBusLimit(unsigned Opcode) const
bool hasMADIntraFwdBug() const
bool privateMemoryResourceIsRangeChecked() const
const SIRegisterInfo * getRegisterInfo() const override
bool hasDOTOpSelHazard() const
bool d16PreservesUnusedBits() const
bool hasFlatSegmentOffsetBug() const
bool getScalarizeGlobalBehavior() const
bool ldsRequiresM0Init() const
Return if most LDS instructions have an m0 use that require m0 to be initialized.
bool hasFlatScratchSVSSwizzleBug() const
bool useFlatForGlobal() const
Generation getGeneration() const
bool hasGWSSemaReleaseAll() const
bool hasAddNoCarry() const
This class is used to represent ISD::LOAD nodes.
const SDValue & getBasePtr() const
ISD::LoadExtType getExtensionType() const
Return whether this is a plain node, or one of the varieties of value-extending loads.
SmallVector< LoopT *, 4 > getLoopsInPreorder() const
Return all of the loops in the function in preorder across the loop nests, with siblings in forward p...
The legacy pass manager's analysis pass to compute loop information.
Describe properties that are true of each instruction in the target description file.
const Triple & getTargetTriple() const
static MVT getIntegerVT(unsigned BitWidth)
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
A description of a memory reference used in the backend.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
An SDNode that represents everything that will be needed to construct a MachineInstr.
This SDNode is used for target intrinsics that touch memory and need an associated MachineMemOperand.
This is an abstract virtual class for memory operations.
unsigned getAddressSpace() const
Return the address space for the associated pointer.
MachineMemOperand * getMemOperand() const
Return a MachineMemOperand object describing the memory reference performed by operation.
const SDValue & getChain() const
EVT getMemoryVT() const
Return the type of the in-memory value.
Wrapper class representing virtual and physical registers.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
This class provides iterator support for SDUse operands that use a specific SDNode.
Represents one node in the SelectionDAG.
const SDValue & getOperand(unsigned Num) const
bool isPredecessorOf(const SDNode *N) const
Return true if this node is a predecessor of N.
static use_iterator use_end()
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
const SDValue & getOperand(unsigned i) const
uint64_t getConstantOperandVal(unsigned i) const
unsigned getOpcode() const
bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx0, unsigned &SrcOpIdx1) const override
static bool isLegalMUBUFImmOffset(unsigned Imm)
static unsigned getMaxMUBUFImmOffset()
This class keeps track of the SPI_SP_INPUT_ADDR config register, which tells the hardware which inter...
const TargetRegisterClass * getRegClass(unsigned RCID) const
static unsigned getSubRegFromChannel(unsigned Channel, unsigned NumRegs=1)
static LLVM_READONLY const TargetRegisterClass * getSGPRClassForBitWidth(unsigned BitWidth)
static bool isSGPRClass(const TargetRegisterClass *RC)
SelectionDAGISel - This is the common base class used for SelectionDAG-based pattern-matching instruc...
std::unique_ptr< FunctionLoweringInfo > FuncInfo
const TargetLowering * TLI
const TargetInstrInfo * TII
void ReplaceUses(SDValue F, SDValue T)
ReplaceUses - replace all uses of the old node F with the use of the new node T.
bool runOnMachineFunction(MachineFunction &MF) override
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
void ReplaceNode(SDNode *F, SDNode *T)
Replace all uses of F with T, then remove F from the DAG.
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
const TargetLowering * getTargetLowering() const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
const TargetSubtargetInfo & getSubtarget() const
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s),...
SDNode * SelectNodeTo(SDNode *N, unsigned MachineOpc, EVT VT)
These are used for target selectors to mutate the specified node to have the specified return type,...
SDNode * MorphNodeTo(SDNode *N, unsigned Opc, SDVTList VTs, ArrayRef< SDValue > Ops)
This mutates the specified node to have the specified return type, opcode, and operands.
allnodes_const_iterator allnodes_begin() const
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
allnodes_const_iterator allnodes_end() const
void setNodeMemRefs(MachineSDNode *N, ArrayRef< MachineMemOperand * > NewMemRefs)
Mutate the specified machine node's memory references to the provided list.
SDValue getTargetFrameIndex(int FI, EVT VT)
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
bool SignBitIsZero(SDValue Op, unsigned Depth=0) const
Return true if the sign bit of Op is known to be zero.
SDValue getRegister(unsigned Reg, EVT VT)
void RemoveDeadNodes()
This method deletes all unreachable nodes in the SelectionDAG.
void RemoveDeadNode(SDNode *N)
Remove the specified node from the system.
SDValue getTargetExtractSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand)
A convenience function for creating TargetInstrInfo::EXTRACT_SUBREG nodes.
const TargetMachine & getTarget() const
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, unsigned Reg, SDValue N)
SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef< SDValue > Ops, EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags Flags=MachineMemOperand::MOLoad|MachineMemOperand::MOStore, uint64_t Size=0, const AAMDNodes &AAInfo=AAMDNodes())
Creates a MemIntrinsicNode that may produce a result and takes a list of operands.
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
bool isBaseWithConstantOffset(SDValue Op) const
Return true if the specified operand is an ISD::ADD with a ConstantSDNode on the right-hand side,...
void ReplaceAllUsesOfValueWith(SDValue From, SDValue To)
Replace any uses of From with To, leaving uses of other values produced by From.getNode() alone.
MachineFunction & getMachineFunction() const
KnownBits computeKnownBits(SDValue Op, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in Known.
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
static const unsigned CommuteAnyOperandIndex
Register getStackPointerRegisterToSaveRestore() const
If a physical register, this specifies the register that llvm.savestack/llvm.restorestack should save...
Primary interface to the complete machine description for the target machine.
const Triple & getTargetTriple() const
unsigned getID() const
Return the register class ID number.
ArchType getArch() const
Get the parsed architecture type of this triple.
LLVM Value Representation.
Iterator for intrusive lists based on ilist_node.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ CONSTANT_ADDRESS_32BIT
Address space for 32-bit constant memory.
@ REGION_ADDRESS
Address space for region memory. (GDS)
@ LOCAL_ADDRESS
Address space for local memory.
@ CONSTANT_ADDRESS
Address space for constant memory (VTX2).
@ FLAT_ADDRESS
Address space for flat memory.
@ GLOBAL_ADDRESS
Address space for global memory (RAT0, VTX0).
@ PRIVATE_ADDRESS
Address space for private memory.
@ CLAMP
CLAMP value between 0.0 and 1.0.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
std::optional< int64_t > getSMRDEncodedLiteralOffset32(const MCSubtargetInfo &ST, int64_t ByteOffset)
bool isInlinableLiteral32(int32_t Literal, bool HasInv2Pi)
std::optional< int64_t > getSMRDEncodedOffset(const MCSubtargetInfo &ST, int64_t ByteOffset, bool IsBuffer)
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ C
The default llvm calling convention, compatible with C.
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
@ STACKRESTORE
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain.
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
@ ADDC
Carry-setting nodes for multiple precision addition and subtraction.
@ FMAD
FMAD - Perform a * b + c, while getting the same result as the separately rounded operations.
@ ADD
Simple integer binary arithmetic operators.
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
@ FADD
Simple binary floating point operators.
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
@ FLDEXP
FLDEXP - ldexp, inspired by libm (op0 * 2**op1).
@ SCALAR_TO_VECTOR
SCALAR_TO_VECTOR(VAL) - This represents the operation of loading a scalar value into element 0 of the...
@ FNEG
Perform various unary floating-point operations inspired by libm.
@ FCANONICALIZE
Returns platform specific canonical encoding of a floating point number.
@ UNDEF
UNDEF - An undefined node.
@ CopyFromReg
CopyFromReg - This node indicates that the input value is a virtual or physical register that is defi...
@ SHL
Shift and rotation operations.
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
@ CopyToReg
CopyToReg - This node has three operands: a chain, a register number to set to this value,...
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ FMINNUM
FMINNUM/FMAXNUM - Perform floating-point minimum or maximum on two values.
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
@ FP_EXTEND
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
@ UADDO_CARRY
Carry-using nodes for multiple precision addition and subtraction.
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
@ ADDE
Carry-using nodes for multiple precision addition and subtraction.
@ FP_ROUND
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
@ BRCOND
BRCOND - Conditional branch.
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
@ Undef
Value of the register doesn't matter.
Reg
All possible values of the reg field in the ModR/M byte.
constexpr const char32_t SBase
This is an optimization pass for GlobalISel generic memory operations.
int popcount(T Value) noexcept
Count the number of set bits in a value.
bool isNullConstant(SDValue V)
Returns true if V is a constant integer zero.
constexpr bool isMask_32(uint32_t Value)
Return true if the argument is a non-empty sequence of ones starting at the least significant bit wit...
constexpr uint32_t Hi_32(uint64_t Value)
Return the high 32 bits of a 64 bit value.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
CodeGenOptLevel
Code generation optimization level.
constexpr uint32_t Lo_32(uint64_t Value)
Return the low 32 bits of a 64 bit value.
FunctionPass * createAMDGPUISelDag(TargetMachine &TM, CodeGenOptLevel OptLevel)
This pass converts a legalized DAG into a AMDGPU-specific.
@ SMax
Signed integer max implemented in terms of select(cmp()).
@ And
Bitwise or logical AND of integers.
unsigned M0(unsigned Val)
This struct is a compact representation of a valid (non-zero power of two) alignment.
Description of the encoding of one expression Op.
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
uint64_t getScalarSizeInBits() const
bool bitsEq(EVT VT) const
Return true if this has the same number of bits as VT.
EVT getVectorElementType() const
Given a vector type, return the type of each element.
bool isScalarInteger() const
Return true if this is an integer, but not a vector.
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
static KnownBits makeConstant(const APInt &C)
Create known bits from a known constant.
APInt getMaxValue() const
Return the maximal unsigned value possible given these KnownBits.
static KnownBits computeForAddSub(bool Add, bool NSW, const KnownBits &LHS, KnownBits RHS)
Compute known bits resulting from adding LHS and RHS.
static unsigned getSubRegFromChannel(unsigned Channel)
This represents a list of ValueType's that has been intern'd by a SelectionDAG.