36 cl::desc(
"Restrict range of branch instructions (DEBUG)"));
67 if (Op0Idx == -1 && Op1Idx == -1)
71 if ((Op0Idx == -1 && Op1Idx != -1) ||
72 (Op1Idx == -1 && Op0Idx != -1))
91 case AMDGPU::V_MOV_B32_e32:
92 case AMDGPU::V_MOV_B32_e64:
93 case AMDGPU::V_MOV_B64_PSEUDO:
102 int64_t &Offset1)
const {
110 if (!
get(Opc0).mayLoad() || !
get(Opc1).mayLoad())
134 Offset0 = cast<ConstantSDNode>(Load0->
getOperand(2))->getZExtValue();
135 Offset1 = cast<ConstantSDNode>(Load1->
getOperand(2))->getZExtValue();
151 if (!Load0Offset || !Load1Offset)
159 Offset1 = Load1Offset->getZExtValue();
176 if (OffIdx0 == -1 || OffIdx1 == -1)
189 if (!isa<ConstantSDNode>(Off0) || !isa<ConstantSDNode>(Off1))
192 Offset0 = cast<ConstantSDNode>(Off0)->getZExtValue();
193 Offset1 = cast<ConstantSDNode>(Off1)->getZExtValue();
202 case AMDGPU::DS_READ2ST64_B32:
203 case AMDGPU::DS_READ2ST64_B64:
204 case AMDGPU::DS_WRITE2ST64_B32:
205 case AMDGPU::DS_WRITE2ST64_B64:
225 BaseReg = AddrReg->
getReg();
226 Offset = OffsetImm->
getImm();
238 uint8_t Offset0 = Offset0Imm->
getImm();
239 uint8_t Offset1 = Offset1Imm->
getImm();
241 if (Offset1 > Offset0 && Offset1 - Offset0 == 1) {
259 BaseReg = AddrReg->
getReg();
260 Offset = EltSize * Offset0;
269 if (SOffset && SOffset->
isReg())
279 BaseReg = AddrReg->
getReg();
280 Offset = OffsetImm->
getImm();
283 Offset += SOffset->
getImm();
296 BaseReg = SBaseReg->
getReg();
297 Offset = OffsetImm->
getImm();
303 BaseReg = AddrReg->
getReg();
313 unsigned NumLoads)
const {
324 }
else if (
isDS(FirstLdSt) &&
isDS(SecondLdSt)) {
329 if (!FirstDst || !SecondDst)
340 unsigned LoadClusterThreshold = 16;
346 return (NumLoads * DstRC->
getSize()) <= LoadClusterThreshold;
351 const DebugLoc &DL,
unsigned DestReg,
352 unsigned SrcReg,
bool KillSrc)
const {
355 if (RC == &AMDGPU::VGPR_32RegClass) {
357 AMDGPU::SReg_32RegClass.
contains(SrcReg));
358 BuildMI(MBB, MI, DL,
get(AMDGPU::V_MOV_B32_e32), DestReg)
363 if (RC == &AMDGPU::SReg_32_XM0RegClass ||
364 RC == &AMDGPU::SReg_32RegClass) {
365 if (SrcReg == AMDGPU::SCC) {
366 BuildMI(MBB, MI, DL,
get(AMDGPU::S_CSELECT_B32), DestReg)
373 BuildMI(MBB, MI, DL,
get(AMDGPU::S_MOV_B32), DestReg)
378 if (RC == &AMDGPU::SReg_64RegClass) {
379 if (DestReg == AMDGPU::VCC) {
380 if (AMDGPU::SReg_64RegClass.
contains(SrcReg)) {
381 BuildMI(MBB, MI, DL,
get(AMDGPU::S_MOV_B64), AMDGPU::VCC)
386 BuildMI(MBB, MI, DL,
get(AMDGPU::V_CMP_NE_U32_e32))
395 BuildMI(MBB, MI, DL,
get(AMDGPU::S_MOV_B64), DestReg)
400 if (DestReg == AMDGPU::SCC) {
402 BuildMI(MBB, MI, DL,
get(AMDGPU::S_CMP_LG_U32))
408 unsigned EltSize = 4;
409 unsigned Opcode = AMDGPU::V_MOV_B32_e32;
412 Opcode = AMDGPU::S_MOV_B64;
415 Opcode = AMDGPU::S_MOV_B32;
423 for (
unsigned Idx = 0; Idx < SubIndices.
size(); ++Idx) {
426 SubIdx = SubIndices[Idx];
428 SubIdx = SubIndices[SubIndices.
size() - Idx - 1];
431 get(Opcode), RI.getSubReg(DestReg, SubIdx));
433 Builder.
addReg(RI.getSubReg(SrcReg, SubIdx));
435 if (Idx == SubIndices.
size() - 1)
466 return RI.
isSGPRClass(DstRC) ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
468 return AMDGPU::S_MOV_B64;
470 return AMDGPU::V_MOV_B64_PSEUDO;
478 return AMDGPU::SI_SPILL_S32_SAVE;
480 return AMDGPU::SI_SPILL_S64_SAVE;
482 return AMDGPU::SI_SPILL_S128_SAVE;
484 return AMDGPU::SI_SPILL_S256_SAVE;
486 return AMDGPU::SI_SPILL_S512_SAVE;
495 return AMDGPU::SI_SPILL_V32_SAVE;
497 return AMDGPU::SI_SPILL_V64_SAVE;
499 return AMDGPU::SI_SPILL_V96_SAVE;
501 return AMDGPU::SI_SPILL_V128_SAVE;
503 return AMDGPU::SI_SPILL_V256_SAVE;
505 return AMDGPU::SI_SPILL_V512_SAVE;
513 unsigned SrcReg,
bool isKill,
522 unsigned Size = FrameInfo.getObjectSize(FrameIndex);
523 unsigned Align = FrameInfo.getObjectAlignment(FrameIndex);
564 Ctx.
emitError(
"SIInstrInfo::storeRegToStackSlot - Do not know how to"
576 BuildMI(MBB, MI, DL,
get(Opcode))
588 return AMDGPU::SI_SPILL_S32_RESTORE;
590 return AMDGPU::SI_SPILL_S64_RESTORE;
592 return AMDGPU::SI_SPILL_S128_RESTORE;
594 return AMDGPU::SI_SPILL_S256_RESTORE;
596 return AMDGPU::SI_SPILL_S512_RESTORE;
605 return AMDGPU::SI_SPILL_V32_RESTORE;
607 return AMDGPU::SI_SPILL_V64_RESTORE;
609 return AMDGPU::SI_SPILL_V96_RESTORE;
611 return AMDGPU::SI_SPILL_V128_RESTORE;
613 return AMDGPU::SI_SPILL_V256_RESTORE;
615 return AMDGPU::SI_SPILL_V512_RESTORE;
630 unsigned Align = FrameInfo.getObjectAlignment(FrameIndex);
631 unsigned Size = FrameInfo.getObjectSize(FrameIndex);
664 Ctx.
emitError(
"SIInstrInfo::loadRegFromStackSlot - Do not know how to"
665 " restore register");
666 BuildMI(MBB, MI, DL,
get(AMDGPU::IMPLICIT_DEF), DestReg);
674 BuildMI(MBB, MI, DL,
get(Opcode), DestReg)
685 unsigned FrameOffset,
unsigned Size)
const {
692 unsigned WavefrontSize = ST.getWavefrontSize();
698 DebugLoc DL = Insert->getDebugLoc();
702 if (TIDReg == AMDGPU::NoRegister)
706 WorkGroupSize > WavefrontSize) {
714 unsigned InputPtrReg =
716 for (
unsigned Reg : {TIDIGXReg, TIDIGYReg, TIDIGZReg}) {
725 BuildMI(Entry, Insert, DL,
get(AMDGPU::S_LOAD_DWORD_IMM), STmp0)
728 BuildMI(Entry, Insert, DL,
get(AMDGPU::S_LOAD_DWORD_IMM), STmp1)
733 BuildMI(Entry, Insert, DL,
get(AMDGPU::S_MUL_I32), STmp1)
737 BuildMI(Entry, Insert, DL,
get(AMDGPU::V_MUL_U32_U24_e32), TIDReg)
741 BuildMI(Entry, Insert, DL,
get(AMDGPU::V_MAD_U32_U24), TIDReg)
746 BuildMI(Entry, Insert, DL,
get(AMDGPU::V_ADD_I32_e32), TIDReg)
751 BuildMI(Entry, Insert, DL,
get(AMDGPU::V_MBCNT_LO_U32_B32_e64),
756 BuildMI(Entry, Insert, DL,
get(AMDGPU::V_MBCNT_HI_U32_B32_e64),
762 BuildMI(Entry, Insert, DL,
get(AMDGPU::V_LSHLREV_B32_e32),
770 unsigned LDSOffset = MFI->
getLDSSize() + (FrameOffset * WorkGroupSize);
771 BuildMI(MBB, MI, DL,
get(AMDGPU::V_ADD_I32_e32), TmpReg)
789 BuildMI(MBB, MI, DL,
get(AMDGPU::S_NOP))
812 default:
return AMDGPUInstrInfo::expandPostRAPseudo(MI);
813 case AMDGPU::S_MOV_B64_term: {
816 MI.
setDesc(
get(AMDGPU::S_MOV_B64));
819 case AMDGPU::S_XOR_B64_term: {
822 MI.
setDesc(
get(AMDGPU::S_XOR_B64));
825 case AMDGPU::S_ANDN2_B64_term: {
828 MI.
setDesc(
get(AMDGPU::S_ANDN2_B64));
831 case AMDGPU::V_MOV_B64_PSEUDO: {
833 unsigned DstLo = RI.getSubReg(Dst, AMDGPU::sub0);
834 unsigned DstHi = RI.getSubReg(Dst, AMDGPU::sub1);
841 BuildMI(MBB, MI, DL,
get(AMDGPU::V_MOV_B32_e32), DstLo)
842 .
addImm(Imm.getLoBits(32).getZExtValue())
844 BuildMI(MBB, MI, DL,
get(AMDGPU::V_MOV_B32_e32), DstHi)
845 .
addImm(Imm.getHiBits(32).getZExtValue())
849 BuildMI(MBB, MI, DL,
get(AMDGPU::V_MOV_B32_e32), DstLo)
852 BuildMI(MBB, MI, DL,
get(AMDGPU::V_MOV_B32_e32), DstHi)
859 case AMDGPU::V_MOVRELD_B32_V1:
860 case AMDGPU::V_MOVRELD_B32_V2:
861 case AMDGPU::V_MOVRELD_B32_V4:
862 case AMDGPU::V_MOVRELD_B32_V8:
863 case AMDGPU::V_MOVRELD_B32_V16: {
864 const MCInstrDesc &MovRelDesc =
get(AMDGPU::V_MOVRELD_B32_e32);
871 BuildMI(MBB, MI, DL, MovRelDesc)
877 const int ImpDefIdx =
879 const int ImpUseIdx = ImpDefIdx + 1;
880 MovRel->tieOperands(ImpDefIdx, ImpUseIdx);
885 case AMDGPU::SI_PC_ADD_REL_OFFSET: {
888 unsigned RegLo = RI.getSubReg(Reg, AMDGPU::sub0);
889 unsigned RegHi = RI.getSubReg(Reg, AMDGPU::sub1);
894 Bundler.
append(
BuildMI(MF, DL,
get(AMDGPU::S_GETPC_B64), Reg));
898 Bundler.
append(
BuildMI(MF, DL,
get(AMDGPU::S_ADD_U32), RegLo)
923 unsigned Src1OpName)
const {
930 "All commutable instructions have both src0 and src1 modifiers");
932 int Src0ModsVal = Src0Mods->
getImm();
933 int Src1ModsVal = Src1Mods->
getImm();
935 Src1Mods->
setImm(Src0ModsVal);
936 Src0Mods->
setImm(Src1ModsVal);
945 bool IsKill = RegOp.
isKill();
946 bool IsDead = RegOp.
isDead();
947 bool IsUndef = RegOp.
isUndef();
948 bool IsDebug = RegOp.
isDebug();
950 if (NonRegOp.
isImm())
952 else if (NonRegOp.
isFI())
957 NonRegOp.
ChangeToRegister(Reg,
false,
false, IsKill, IsDead, IsUndef, IsDebug);
965 unsigned Src1Idx)
const {
966 assert(!NewMI &&
"this should never be used");
970 if (CommutedOpcode == -1)
974 static_cast<int>(Src0Idx) &&
976 static_cast<int>(Src1Idx) &&
977 "inconsistency with findCommutedOpIndices");
1005 Src1, AMDGPU::OpName::src1_modifiers);
1007 CommutedMI->
setDesc(
get(CommutedOpcode));
1017 unsigned &SrcOpIdx1)
const {
1030 return fixCommutedOpIndices(SrcOpIdx0, SrcOpIdx1, Src0Idx, Src1Idx);
1034 int64_t BrOffset)
const {
1037 assert(BranchOp != AMDGPU::S_SETPC_B64);
1051 if (MI.
getOpcode() == AMDGPU::S_SETPC_B64) {
1065 assert(RS &&
"RegScavenger required for long branching");
1067 "new block should be inserted for expanding unconditional branch");
1084 if (BrOffset >= 0) {
1085 BuildMI(MBB,
I, DL,
get(AMDGPU::S_ADD_U32))
1087 .
addReg(PCReg, 0, AMDGPU::sub0)
1089 BuildMI(MBB,
I, DL,
get(AMDGPU::S_ADDC_U32))
1091 .
addReg(PCReg, 0, AMDGPU::sub1)
1095 BuildMI(MBB,
I, DL,
get(AMDGPU::S_SUB_U32))
1097 .
addReg(PCReg, 0, AMDGPU::sub0)
1099 BuildMI(MBB,
I, DL,
get(AMDGPU::S_SUBB_U32))
1101 .
addReg(PCReg, 0, AMDGPU::sub1)
1106 BuildMI(&MBB, DL,
get(AMDGPU::S_SETPC_B64))
1152 return 4 + 8 + 4 + 4;
1155 unsigned SIInstrInfo::getBranchOpcode(SIInstrInfo::BranchPredicate Cond) {
1157 case SIInstrInfo::SCC_TRUE:
1158 return AMDGPU::S_CBRANCH_SCC1;
1159 case SIInstrInfo::SCC_FALSE:
1160 return AMDGPU::S_CBRANCH_SCC0;
1161 case SIInstrInfo::VCCNZ:
1162 return AMDGPU::S_CBRANCH_VCCNZ;
1163 case SIInstrInfo::VCCZ:
1164 return AMDGPU::S_CBRANCH_VCCZ;
1165 case SIInstrInfo::EXECNZ:
1166 return AMDGPU::S_CBRANCH_EXECNZ;
1167 case SIInstrInfo::EXECZ:
1168 return AMDGPU::S_CBRANCH_EXECZ;
1174 SIInstrInfo::BranchPredicate SIInstrInfo::getBranchPredicate(
unsigned Opcode) {
1176 case AMDGPU::S_CBRANCH_SCC0:
1178 case AMDGPU::S_CBRANCH_SCC1:
1180 case AMDGPU::S_CBRANCH_VCCNZ:
1182 case AMDGPU::S_CBRANCH_VCCZ:
1184 case AMDGPU::S_CBRANCH_EXECNZ:
1186 case AMDGPU::S_CBRANCH_EXECZ:
1198 bool AllowModify)
const {
1199 if (I->getOpcode() == AMDGPU::S_BRANCH) {
1201 TBB = I->getOperand(0).getMBB();
1205 BranchPredicate Pred = getBranchPredicate(I->getOpcode());
1206 if (Pred == INVALID_BR)
1215 if (I == MBB.
end()) {
1221 if (I->getOpcode() == AMDGPU::S_BRANCH) {
1223 FBB = I->getOperand(0).getMBB();
1233 bool AllowModify)
const {
1238 if (I->getOpcode() != AMDGPU::SI_MASK_BRANCH)
1261 if (TBB != MaskBrDest || Cond.
empty())
1264 auto Pred = Cond[0].getImm();
1265 return (Pred != EXECZ && Pred != EXECNZ);
1269 int *BytesRemoved)
const {
1273 unsigned RemovedSize = 0;
1274 while (I != MBB.
end()) {
1276 if (I->getOpcode() == AMDGPU::SI_MASK_BRANCH) {
1282 I->eraseFromParent();
1288 *BytesRemoved = RemovedSize;
1298 int *BytesAdded)
const {
1300 if (!FBB && Cond.
empty()) {
1301 BuildMI(&MBB, DL,
get(AMDGPU::S_BRANCH))
1308 assert(TBB && Cond[0].isImm());
1311 = getBranchOpcode(static_cast<BranchPredicate>(Cond[0].getImm()));
1316 BuildMI(&MBB, DL,
get(Opcode))
1332 BuildMI(&MBB, DL,
get(Opcode))
1334 BuildMI(&MBB, DL,
get(AMDGPU::S_BRANCH))
1350 Cond[0].setImm(-Cond[0].getImm());
1357 AMDGPU::OpName::src0_modifiers);
1359 AMDGPU::OpName::src1_modifiers);
1361 AMDGPU::OpName::src2_modifiers);
1374 if (Opc == AMDGPU::COPY) {
1379 case AMDGPU::S_MOV_B64:
1384 case AMDGPU::V_MOV_B32_e32:
1385 case AMDGPU::S_MOV_B32:
1388 unsigned NewOpc = isVGPRCopy ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32;
1392 if (!ImmOp->
isImm()) {
1401 if (Opc == AMDGPU::V_MAD_F32 || Opc == AMDGPU::V_MAC_F32_e64 ||
1402 Opc == AMDGPU::V_MAD_F16 || Opc == AMDGPU::V_MAC_F16_e64) {
1403 bool IsF32 = Opc == AMDGPU::V_MAD_F32 || Opc == AMDGPU::V_MAC_F32_e64;
1449 unsigned Src1Reg = Src1->
getReg();
1450 unsigned Src1SubReg = Src1->
getSubReg();
1455 if (Opc == AMDGPU::V_MAC_F32_e64 ||
1456 Opc == AMDGPU::V_MAC_F16_e64)
1463 UseMI.
setDesc(
get(IsF32 ? AMDGPU::V_MADMK_F32 : AMDGPU::V_MADMK_F16));
1476 if (!Src0->
isImm() &&
1494 if (Opc == AMDGPU::V_MAC_F32_e64 ||
1495 Opc == AMDGPU::V_MAC_F16_e64)
1504 UseMI.
setDesc(
get(IsF32 ? AMDGPU::V_MADAK_F32 : AMDGPU::V_MADAK_F16));
1518 int WidthB,
int OffsetB) {
1519 int LowOffset = OffsetA < OffsetB ? OffsetA : OffsetB;
1520 int HighOffset = OffsetA < OffsetB ? OffsetB : OffsetA;
1521 int LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;
1522 return LowOffset + LowWidth <= HighOffset;
1525 bool SIInstrInfo::checkInstOffsetsDoNotOverlap(
MachineInstr &MIa,
1527 unsigned BaseReg0, BaseReg1;
1528 int64_t Offset0, Offset1;
1539 if (BaseReg0 == BaseReg1 &&
1552 "MIa must load from or modify a memory location");
1554 "MIb must load from or modify a memory location");
1569 if (!AA->
alias(LocA, LocB))
1581 return checkInstOffsetsDoNotOverlap(MIa, MIb);
1588 return checkInstOffsetsDoNotOverlap(MIa, MIb);
1595 return checkInstOffsetsDoNotOverlap(MIa, MIb);
1602 return checkInstOffsetsDoNotOverlap(MIa, MIb);
1618 case AMDGPU::V_MAC_F16_e64:
1620 case AMDGPU::V_MAC_F32_e64:
1622 case AMDGPU::V_MAC_F16_e32:
1624 case AMDGPU::V_MAC_F32_e32: {
1626 AMDGPU::OpName::src0);
1640 get(IsF16 ? AMDGPU::V_MAD_F16 : AMDGPU::V_MAD_F32))
1657 case AMDGPU::S_SET_GPR_IDX_ON:
1658 case AMDGPU::S_SET_GPR_IDX_MODE:
1659 case AMDGPU::S_SET_GPR_IDX_OFF:
1676 MI.
getOpcode() == AMDGPU::S_SETREG_IMM32_B32 ||
1677 MI.
getOpcode() == AMDGPU::S_SETREG_B32 ||
1707 int64_t Imm = MO.
getImm();
1710 int32_t Trunc =
static_cast<int32_t
>(Imm);
1711 return Trunc == Imm &&
1720 int16_t Trunc =
static_cast<int16_t
>(Imm);
1795 AMDGPU::OpName::src0_modifiers) != -1;
1799 unsigned OpName)
const {
1801 return Mods && Mods->
getImm();
1831 return (MO.
getReg() == AMDGPU::VCC || MO.
getReg() == AMDGPU::M0 ||
1833 (AMDGPU::SGPR_32RegClass.contains(MO.
getReg()) ||
1843 switch (MO.getReg()) {
1846 case AMDGPU::FLAT_SCR:
1854 return AMDGPU::NoRegister;
1860 case AMDGPU::V_READLANE_B32:
1861 case AMDGPU::V_READLANE_B32_si:
1862 case AMDGPU::V_READLANE_B32_vi:
1863 case AMDGPU::V_WRITELANE_B32:
1864 case AMDGPU::V_WRITELANE_B32_si:
1865 case AMDGPU::V_WRITELANE_B32_vi:
1872 if (SIInstrInfo::isGenericOpcode(MI.
getOpcode()) ||
1884 return TRI.isSubRegister(SuperVec.
getReg(), SubReg.
getReg());
1886 return SubReg.
getSubReg() != AMDGPU::NoSubRegister &&
1902 ErrInfo =
"Instruction has wrong number of operands.";
1920 ErrInfo =
"inlineasm operand has incorrect register class.";
1931 ErrInfo =
"FPImm Machine Operands are not supported. ISel should bitcast "
1932 "all fp values to integers.";
1941 ErrInfo =
"Illegal immediate value for operand.";
1956 ErrInfo =
"Illegal immediate value for operand.";
1967 ErrInfo =
"Expected immediate, but got non-immediate";
1978 if (RegClass != -1) {
1980 if (Reg == AMDGPU::NoRegister ||
1986 ErrInfo =
"Operand has incorrect register class.";
1997 const int OpIndices[] = { Src0Idx, Src1Idx, Src2Idx };
1999 unsigned ConstantBusCount = 0;
2005 if (SGPRUsed != AMDGPU::NoRegister)
2008 for (
int OpIdx : OpIndices) {
2014 if (MO.
getReg() != SGPRUsed)
2022 if (ConstantBusCount > 1) {
2023 ErrInfo =
"VOP* instruction uses the constant bus more than once";
2029 if (Desc.
getOpcode() == AMDGPU::V_DIV_SCALE_F32 ||
2030 Desc.
getOpcode() == AMDGPU::V_DIV_SCALE_F64) {
2037 ErrInfo =
"v_div_scale_{f32|f64} require src0 = src1 or src2";
2047 ErrInfo =
"invalid immediate for SOPK instruction";
2052 ErrInfo =
"invalid immediate for SOPK instruction";
2058 if (Desc.
getOpcode() == AMDGPU::V_MOVRELS_B32_e32 ||
2059 Desc.
getOpcode() == AMDGPU::V_MOVRELS_B32_e64 ||
2060 Desc.
getOpcode() == AMDGPU::V_MOVRELD_B32_e32 ||
2061 Desc.
getOpcode() == AMDGPU::V_MOVRELD_B32_e64) {
2062 const bool IsDst = Desc.
getOpcode() == AMDGPU::V_MOVRELD_B32_e32 ||
2063 Desc.
getOpcode() == AMDGPU::V_MOVRELD_B32_e64;
2067 const unsigned NumImplicitOps = IsDst ? 2 : 1;
2073 ErrInfo =
"missing implicit register operands";
2079 if (!Dst->
isUse()) {
2080 ErrInfo =
"v_movreld_b32 vdst should be a use operand";
2086 UseOpIdx != StaticNumOps + 1) {
2087 ErrInfo =
"movrel implicit operands should be tied";
2094 = MI.
getOperand(StaticNumOps + NumImplicitOps - 1);
2096 !
isSubRegOf(RI, ImpUse, IsDst ? *Dst : Src0)) {
2097 ErrInfo =
"src0 should be subreg of implicit vector use";
2106 ErrInfo =
"VALU instruction does not implicitly read exec mask";
2116 if (Soff && Soff->
getReg() != AMDGPU::M0) {
2117 ErrInfo =
"scalar stores must use m0 as offset register";
2128 default:
return AMDGPU::INSTRUCTION_LIST_END;
2129 case AMDGPU::REG_SEQUENCE:
return AMDGPU::REG_SEQUENCE;
2130 case AMDGPU::COPY:
return AMDGPU::COPY;
2131 case AMDGPU::PHI:
return AMDGPU::PHI;
2132 case AMDGPU::INSERT_SUBREG:
return AMDGPU::INSERT_SUBREG;
2133 case AMDGPU::S_MOV_B32:
2135 AMDGPU::COPY : AMDGPU::V_MOV_B32_e32;
2136 case AMDGPU::S_ADD_I32:
2137 case AMDGPU::S_ADD_U32:
return AMDGPU::V_ADD_I32_e32;
2138 case AMDGPU::S_ADDC_U32:
return AMDGPU::V_ADDC_U32_e32;
2139 case AMDGPU::S_SUB_I32:
2140 case AMDGPU::S_SUB_U32:
return AMDGPU::V_SUB_I32_e32;
2141 case AMDGPU::S_SUBB_U32:
return AMDGPU::V_SUBB_U32_e32;
2142 case AMDGPU::S_MUL_I32:
return AMDGPU::V_MUL_LO_I32;
2143 case AMDGPU::S_AND_B32:
return AMDGPU::V_AND_B32_e64;
2144 case AMDGPU::S_OR_B32:
return AMDGPU::V_OR_B32_e64;
2145 case AMDGPU::S_XOR_B32:
return AMDGPU::V_XOR_B32_e64;
2146 case AMDGPU::S_MIN_I32:
return AMDGPU::V_MIN_I32_e64;
2147 case AMDGPU::S_MIN_U32:
return AMDGPU::V_MIN_U32_e64;
2148 case AMDGPU::S_MAX_I32:
return AMDGPU::V_MAX_I32_e64;
2149 case AMDGPU::S_MAX_U32:
return AMDGPU::V_MAX_U32_e64;
2150 case AMDGPU::S_ASHR_I32:
return AMDGPU::V_ASHR_I32_e32;
2151 case AMDGPU::S_ASHR_I64:
return AMDGPU::V_ASHR_I64;
2152 case AMDGPU::S_LSHL_B32:
return AMDGPU::V_LSHL_B32_e32;
2153 case AMDGPU::S_LSHL_B64:
return AMDGPU::V_LSHL_B64;
2154 case AMDGPU::S_LSHR_B32:
return AMDGPU::V_LSHR_B32_e32;
2155 case AMDGPU::S_LSHR_B64:
return AMDGPU::V_LSHR_B64;
2156 case AMDGPU::S_SEXT_I32_I8:
return AMDGPU::V_BFE_I32;
2157 case AMDGPU::S_SEXT_I32_I16:
return AMDGPU::V_BFE_I32;
2158 case AMDGPU::S_BFE_U32:
return AMDGPU::V_BFE_U32;
2159 case AMDGPU::S_BFE_I32:
return AMDGPU::V_BFE_I32;
2160 case AMDGPU::S_BFM_B32:
return AMDGPU::V_BFM_B32_e64;
2161 case AMDGPU::S_BREV_B32:
return AMDGPU::V_BFREV_B32_e32;
2162 case AMDGPU::S_NOT_B32:
return AMDGPU::V_NOT_B32_e32;
2163 case AMDGPU::S_NOT_B64:
return AMDGPU::V_NOT_B32_e32;
2164 case AMDGPU::S_CMP_EQ_I32:
return AMDGPU::V_CMP_EQ_I32_e32;
2165 case AMDGPU::S_CMP_LG_I32:
return AMDGPU::V_CMP_NE_I32_e32;
2166 case AMDGPU::S_CMP_GT_I32:
return AMDGPU::V_CMP_GT_I32_e32;
2167 case AMDGPU::S_CMP_GE_I32:
return AMDGPU::V_CMP_GE_I32_e32;
2168 case AMDGPU::S_CMP_LT_I32:
return AMDGPU::V_CMP_LT_I32_e32;
2169 case AMDGPU::S_CMP_LE_I32:
return AMDGPU::V_CMP_LE_I32_e32;
2170 case AMDGPU::S_CMP_EQ_U32:
return AMDGPU::V_CMP_EQ_U32_e32;
2171 case AMDGPU::S_CMP_LG_U32:
return AMDGPU::V_CMP_NE_U32_e32;
2172 case AMDGPU::S_CMP_GT_U32:
return AMDGPU::V_CMP_GT_U32_e32;
2173 case AMDGPU::S_CMP_GE_U32:
return AMDGPU::V_CMP_GE_U32_e32;
2174 case AMDGPU::S_CMP_LT_U32:
return AMDGPU::V_CMP_LT_U32_e32;
2175 case AMDGPU::S_CMP_LE_U32:
return AMDGPU::V_CMP_LE_U32_e32;
2176 case AMDGPU::S_CMP_EQ_U64:
return AMDGPU::V_CMP_EQ_U64_e32;
2177 case AMDGPU::S_CMP_LG_U64:
return AMDGPU::V_CMP_NE_U64_e32;
2178 case AMDGPU::S_BCNT1_I32_B32:
return AMDGPU::V_BCNT_U32_B32_e64;
2179 case AMDGPU::S_FF1_I32_B32:
return AMDGPU::V_FFBL_B32_e32;
2180 case AMDGPU::S_FLBIT_I32_B32:
return AMDGPU::V_FFBH_U32_e32;
2181 case AMDGPU::S_FLBIT_I32:
return AMDGPU::V_FFBH_I32_e64;
2182 case AMDGPU::S_CBRANCH_SCC0:
return AMDGPU::S_CBRANCH_VCCZ;
2183 case AMDGPU::S_CBRANCH_SCC1:
return AMDGPU::S_CBRANCH_VCCNZ;
2188 return getVALUOp(MI) != AMDGPU::INSTRUCTION_LIST_END;
2192 unsigned OpNo)
const {
2205 return RI.getRegClass(RCID);
2211 case AMDGPU::REG_SEQUENCE:
2213 case AMDGPU::INSERT_SUBREG:
2225 unsigned RCID =
get(MI.
getOpcode()).OpInfo[OpIdx].RegClass;
2227 unsigned Opcode = AMDGPU::V_MOV_B32_e32;
2229 Opcode = AMDGPU::COPY;
2231 Opcode = AMDGPU::S_MOV_B32;
2234 if (RI.getCommonSubClass(&AMDGPU::VReg_64RegClass, VRC))
2235 VRC = &AMDGPU::VReg_64RegClass;
2237 VRC = &AMDGPU::VGPR_32RegClass;
2256 if (SuperReg.
getSubReg() == AMDGPU::NoSubRegister) {
2257 BuildMI(*MBB, MI, DL,
get(TargetOpcode::COPY), SubReg)
2268 BuildMI(*MBB, MI, DL,
get(TargetOpcode::COPY), NewSuperReg)
2271 BuildMI(*MBB, MI, DL,
get(TargetOpcode::COPY), SubReg)
2272 .
addReg(NewSuperReg, 0, SubIdx);
2285 if (SubIdx == AMDGPU::sub0)
2287 if (SubIdx == AMDGPU::sub1)
2293 unsigned SubReg = buildExtractSubReg(MII, MRI, Op, SuperRC,
2299 void SIInstrInfo::swapOperands(
MachineInstr &Inst)
const {
2331 return RI.getCommonSubClass(RC, RI.getRegClass(OpInfo.
RegClass)) == RC;
2357 RegSubRegPair SGPRUsed;
2366 if ((Op.
getReg() != SGPRUsed.Reg || Op.
getSubReg() != SGPRUsed.SubReg) &&
2407 if (HasImplicitSGPR) {
2444 if (CommutedOpc == -1) {
2451 unsigned Src0Reg = Src0.
getReg();
2453 bool Src0Kill = Src0.
isKill();
2457 else if (Src1.
isReg()) {
2482 unsigned SGPRReg = findUsedSGPR(MI, VOP3Idx);
2484 for (
unsigned i = 0;
i < 3; ++
i) {
2485 int Idx = VOP3Idx[
i];
2497 if (SGPRReg == AMDGPU::NoRegister || SGPRReg == MO.
getReg()) {
2514 unsigned SubRegs = VRC->
getSize() / 4;
2517 for (
unsigned i = 0;
i < SubRegs; ++
i) {
2520 get(AMDGPU::V_READFIRSTLANE_B32), SGPR)
2527 get(AMDGPU::REG_SEQUENCE), DstReg);
2528 for (
unsigned i = 0;
i < SubRegs; ++
i) {
2556 unsigned OpReg = Op.
getReg();
2654 if (MI.
getOpcode() == AMDGPU::REG_SEQUENCE) {
2681 if (MI.
getOpcode() == AMDGPU::INSERT_SUBREG) {
2686 if (DstRC != Src0RC) {
2722 if (SRsrcIdx != -1) {
2725 unsigned SRsrcRC =
get(MI.
getOpcode()).OpInfo[SRsrcIdx].RegClass;
2727 RI.getRegClass(SRsrcRC))) {
2736 unsigned SRsrcPtr = buildExtractSubReg(MI, MRI, *SRsrc,
2737 &AMDGPU::VReg_128RegClass, AMDGPU::sub0_sub1, &AMDGPU::VReg_64RegClass);
2752 .
addImm(RsrcDataFormat & 0xFFFFFFFF);
2756 .
addImm(RsrcDataFormat >> 32);
2761 .
addImm(AMDGPU::sub0_sub1)
2777 BuildMI(MBB, MI, DL,
get(AMDGPU::V_ADD_I32_e32), NewVAddrLo)
2778 .
addReg(SRsrcPtr, 0, AMDGPU::sub0)
2782 BuildMI(MBB, MI, DL,
get(AMDGPU::V_ADDC_U32_e32), NewVAddrHi)
2783 .
addReg(SRsrcPtr, 0, AMDGPU::sub1)
2797 "FIXME: Need to emit flat atomics here");
2814 .
addReg(AMDGPU::NoRegister)
2824 MIB.
addImm(GLC->getImm());
2831 MIB.
addImm(TFE->getImm());
2841 .
addReg(AMDGPU::NoRegister)
2856 .
addReg(SRsrcPtr, 0, AMDGPU::sub0)
2858 .
addReg(SRsrcPtr, 0, AMDGPU::sub1)
2876 while (!Worklist.
empty()) {
2888 case AMDGPU::S_AND_B64:
2889 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::V_AND_B32_e64);
2893 case AMDGPU::S_OR_B64:
2894 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::V_OR_B32_e64);
2898 case AMDGPU::S_XOR_B64:
2899 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::V_XOR_B32_e64);
2903 case AMDGPU::S_NOT_B64:
2904 splitScalar64BitUnaryOp(Worklist, Inst, AMDGPU::V_NOT_B32_e32);
2908 case AMDGPU::S_BCNT1_I32_B64:
2909 splitScalar64BitBCNT(Worklist, Inst);
2913 case AMDGPU::S_BFE_I64: {
2914 splitScalar64BitBFE(Worklist, Inst);
2919 case AMDGPU::S_LSHL_B32:
2921 NewOpcode = AMDGPU::V_LSHLREV_B32_e64;
2925 case AMDGPU::S_ASHR_I32:
2927 NewOpcode = AMDGPU::V_ASHRREV_I32_e64;
2931 case AMDGPU::S_LSHR_B32:
2933 NewOpcode = AMDGPU::V_LSHRREV_B32_e64;
2937 case AMDGPU::S_LSHL_B64:
2939 NewOpcode = AMDGPU::V_LSHLREV_B64;
2943 case AMDGPU::S_ASHR_I64:
2945 NewOpcode = AMDGPU::V_ASHRREV_I64;
2949 case AMDGPU::S_LSHR_B64:
2951 NewOpcode = AMDGPU::V_LSHRREV_B64;
2956 case AMDGPU::S_ABS_I32:
2957 lowerScalarAbs(Worklist, Inst);
2961 case AMDGPU::S_CBRANCH_SCC0:
2962 case AMDGPU::S_CBRANCH_SCC1:
2970 case AMDGPU::S_BFE_U64:
2971 case AMDGPU::S_BFM_B64:
2975 if (NewOpcode == AMDGPU::INSTRUCTION_LIST_END) {
2993 addSCCDefUsersToVALUWorklist(Inst, Worklist);
2997 if (Opcode == AMDGPU::S_SEXT_I32_I8 || Opcode == AMDGPU::S_SEXT_I32_I16) {
3000 unsigned Size = (Opcode == AMDGPU::S_SEXT_I32_I8) ? 8 : 16;
3004 }
else if (Opcode == AMDGPU::S_BCNT1_I32_B32) {
3012 if (Opcode == AMDGPU::S_BFE_I32 || Opcode == AMDGPU::S_BFE_U32) {
3017 "Scalar BFE is only implemented for constant width and offset");
3021 uint32_t BitWidth = (Imm & 0x7f0000) >> 16;
3028 unsigned NewDstReg = AMDGPU::NoRegister;
3045 addUsersToMoveToVALUWorklist(DstReg, MRI, Worklist);
3060 addUsersToMoveToVALUWorklist(NewDstReg, MRI, Worklist);
3076 BuildMI(MBB, MII, DL,
get(AMDGPU::V_SUB_I32_e32), TmpReg)
3080 BuildMI(MBB, MII, DL,
get(AMDGPU::V_MAX_I32_e64), ResultReg)
3085 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist);
3088 void SIInstrInfo::splitScalar64BitUnaryOp(
3090 unsigned Opcode)
const {
3103 &AMDGPU::SGPR_32RegClass;
3107 MachineOperand SrcReg0Sub0 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC,
3108 AMDGPU::sub0, Src0SubRC);
3115 BuildMI(MBB, MII, DL, InstDesc, DestSub0)
3118 MachineOperand SrcReg0Sub1 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC,
3119 AMDGPU::sub1, Src0SubRC);
3122 BuildMI(MBB, MII, DL, InstDesc, DestSub1)
3126 BuildMI(MBB, MII, DL,
get(TargetOpcode::REG_SEQUENCE), FullDestReg)
3138 addUsersToMoveToVALUWorklist(FullDestReg, MRI, Worklist);
3141 void SIInstrInfo::splitScalar64BitBinaryOp(
3143 unsigned Opcode)
const {
3157 &AMDGPU::SGPR_32RegClass;
3162 &AMDGPU::SGPR_32RegClass;
3166 MachineOperand SrcReg0Sub0 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC,
3167 AMDGPU::sub0, Src0SubRC);
3168 MachineOperand SrcReg1Sub0 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC,
3169 AMDGPU::sub0, Src1SubRC);
3180 MachineOperand SrcReg0Sub1 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC,
3181 AMDGPU::sub1, Src0SubRC);
3182 MachineOperand SrcReg1Sub1 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC,
3183 AMDGPU::sub1, Src1SubRC);
3191 BuildMI(MBB, MII, DL,
get(TargetOpcode::REG_SEQUENCE), FullDestReg)
3205 addUsersToMoveToVALUWorklist(FullDestReg, MRI, Worklist);
3208 void SIInstrInfo::splitScalar64BitBCNT(
3219 const MCInstrDesc &InstDesc =
get(AMDGPU::V_BCNT_U32_B32_e64);
3222 &AMDGPU::SGPR_32RegClass;
3229 MachineOperand SrcRegSub0 = buildExtractSubRegOrImm(MII, MRI, Src, SrcRC,
3230 AMDGPU::sub0, SrcSubRC);
3231 MachineOperand SrcRegSub1 = buildExtractSubRegOrImm(MII, MRI, Src, SrcRC,
3232 AMDGPU::sub1, SrcSubRC);
3234 BuildMI(MBB, MII, DL, InstDesc, MidReg)
3238 BuildMI(MBB, MII, DL, InstDesc, ResultReg)
3246 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist);
3259 uint32_t BitWidth = (Imm & 0x7f0000) >> 16;
3265 Offset == 0 &&
"Not implemented");
3267 if (BitWidth < 32) {
3272 BuildMI(MBB, MII, DL,
get(AMDGPU::V_BFE_I32), MidRegLo)
3277 BuildMI(MBB, MII, DL,
get(AMDGPU::V_ASHRREV_I32_e32), MidRegHi)
3281 BuildMI(MBB, MII, DL,
get(TargetOpcode::REG_SEQUENCE), ResultReg)
3288 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist);
3296 BuildMI(MBB, MII, DL,
get(AMDGPU::V_ASHRREV_I32_e64), TmpReg)
3300 BuildMI(MBB, MII, DL,
get(TargetOpcode::REG_SEQUENCE), ResultReg)
3302 .addImm(AMDGPU::sub0)
3307 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist);
3310 void SIInstrInfo::addUsersToMoveToVALUWorklist(
3322 }
while (
I !=
E &&
I->getParent() == &
UseMI);
3329 void SIInstrInfo::addSCCDefUsersToVALUWorklist(
3337 if (MI.findRegisterDefOperandIdx(AMDGPU::SCC) != -1)
3340 if (MI.findRegisterUseOperandIdx(AMDGPU::SCC) != -1)
3355 case AMDGPU::REG_SEQUENCE:
3356 case AMDGPU::INSERT_SUBREG:
3370 unsigned SIInstrInfo::findUsedSGPR(
const MachineInstr &MI,
3371 int OpIndices[3])
const {
3384 if (SGPRReg != AMDGPU::NoRegister)
3387 unsigned UsedSGPRs[3] = { AMDGPU::NoRegister };
3390 for (
unsigned i = 0;
i < 3; ++
i) {
3391 int Idx = OpIndices[
i];
3426 if (UsedSGPRs[0] != AMDGPU::NoRegister) {
3427 if (UsedSGPRs[0] == UsedSGPRs[1] || UsedSGPRs[0] == UsedSGPRs[2])
3428 SGPRReg = UsedSGPRs[0];
3431 if (SGPRReg == AMDGPU::NoRegister && UsedSGPRs[1] != AMDGPU::NoRegister) {
3432 if (UsedSGPRs[1] == UsedSGPRs[2])
3433 SGPRReg = UsedSGPRs[1];
3440 unsigned OperandName)
const {
3451 RsrcDataFormat |= (1ULL << 56);
3455 RsrcDataFormat |= (2ULL << 59);
3458 return RsrcDataFormat;
3495 if (!Addr || !Addr->
isFI())
3496 return AMDGPU::NoRegister;
3517 return AMDGPU::NoRegister;
3525 return AMDGPU::NoRegister;
3531 return AMDGPU::NoRegister;
3539 return AMDGPU::NoRegister;
3545 unsigned DescSize = Desc.
getSize();
3552 if (DescSize != 0 && DescSize != 4)
3555 if (Opc == AMDGPU::WAVE_BARRIER)
3587 case AMDGPU::SI_MASK_BRANCH:
3588 case TargetOpcode::IMPLICIT_DEF:
3590 case TargetOpcode::DBG_VALUE:
3591 case TargetOpcode::BUNDLE:
3620 static const std::pair<int, const char *> TargetIndices[] = {
void legalizeOpWithMove(MachineInstr &MI, unsigned OpIdx) const
Legalize the OpIndex operand of this instruction by inserting a MOV.
MachineBasicBlock::instr_iterator begin() const
Return an iterator to the first bundled instruction.
void push_back(const T &Elt)
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
static unsigned getVGPRSpillSaveOpcode(unsigned Size)
static bool isSGPRSpill(const MachineInstr &MI)
unsigned getNumImplicitUses() const
Return the number of implicit uses this instruction has.
ArrayRef< int16_t > getRegSplitParts(const TargetRegisterClass *RC, unsigned EltSize) const
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function. ...
bool opCanUseInlineConstant(unsigned OpType) const
bool isVGPR(const MachineRegisterInfo &MRI, unsigned Reg) const
bool isVGPRSpillingEnabled(const Function &F) const
static bool sopkIsZext(const MachineInstr &MI)
void ChangeToRegister(unsigned Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isDebug=false)
ChangeToRegister - Replace this operand with a new register operand of the specified value...
unsigned calculateLDSSpillAddress(MachineBasicBlock &MBB, MachineInstr &MI, RegScavenger *RS, unsigned TmpReg, unsigned Offset, unsigned Size) const
MachineBasicBlock * getMBB() const
bool isTargetIndex() const
isTargetIndex - Tests if this is a MO_TargetIndex operand.
unsigned createVirtualRegister(const TargetRegisterClass *RegClass)
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
static bool isStride64(unsigned Opc)
void legalizeOperandsVOP3(MachineRegisterInfo &MRI, MachineInstr &MI) const
Fix operands in MI to satisfy constant bus requirements.
iterator getFirstTerminator()
Returns an iterator to the first terminator instruction of this basic block.
Describe properties that are true of each instruction in the target description file.
bool mayStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly modify memory.
void setIsUndef(bool Val=true)
MachineInstrBuilder MachineInstrBuilder &DefMI const MCInstrDesc & Desc
bool hasRegisterImplicitUseOperand(unsigned Reg) const
Returns true if the MachineInstr has an implicit-use operand of exactly the given register (not consi...
static bool isVirtualRegister(unsigned Reg)
Return true if the specified register number is in the virtual register namespace.
unsigned isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const override
void moveToVALU(MachineInstr &MI) const
Replace this instruction's opcode with the equivalent VALU opcode.
static bool isSOPK(const MachineInstr &MI)
const TargetRegisterClass * getRegClassForReg(const MachineRegisterInfo &MRI, unsigned Reg) const
bool hasOrderedMemoryRef() const
Return true if this instruction may have an ordered or volatile memory reference, or if the informati...
bool isOperandLegal(const MachineInstr &MI, unsigned OpIdx, const MachineOperand *MO=nullptr) const
Check if MO is a legal operand if it was the OpIdx Operand for MI.
unsigned getScratchWaveOffsetReg() const
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
unsigned getNumOperands() const
Return the number of values used by this operation.
MachineBasicBlock reference.
const char * getSymbolName() const
constexpr bool isInt< 16 >(int64_t x)
const SDValue & getOperand(unsigned Num) const
bool isSGPRClass(const TargetRegisterClass *RC) const
unsigned getMovOpcode(const TargetRegisterClass *DstRC) const
const Function * getFunction() const
getFunction - Return the LLVM function that this machine code represents
iterator_range< mmo_iterator > memoperands()
static SDValue findChainOperand(SDNode *Load)
bool hasModifiers(unsigned Opcode) const
Return true if this instruction has any modifiers.
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, const DebugLoc &DL, unsigned DestReg, unsigned SrcReg, bool KillSrc) const override
bool isVariadic() const
Return true if this instruction can have a variable number of operands.
static bool isSMRD(const MachineInstr &MI)
void clearVirtRegs()
clearVirtRegs - Remove all virtual registers (after physreg assignment).
unsigned getScratchRSrcReg() const
Returns the physical register reserved for use as the resource descriptor for scratch accesses...
return AArch64::GPR64RegClass contains(Reg)
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
static int operandBitWidth(uint8_t OperandType)
static MachineInstr * swapRegAndNonRegOperand(MachineInstr &MI, MachineOperand &RegOp, MachineOperand &NonRegOp)
int pseudoToMCOpcode(int Opcode) const
Return a target-specific opcode if Opcode is a pseudo instruction.
const TargetRegisterClass * getSubRegClass(const TargetRegisterClass *RC, unsigned SubIdx) const
unsigned getHWRegIndex(unsigned Reg) const
bool isInlinableLiteral16(int16_t Literal, bool HasInv2Pi)
bool mayAccessFlatAddressSpace(const MachineInstr &MI) const
static bool isFixedSize(const MachineInstr &MI)
static MCDisassembler::DecodeStatus addOperand(MCInst &Inst, const MCOperand &Opnd)
unsigned getMaxFlatWorkGroupSize() const
LLVM_READONLY int16_t getNamedOperandIdx(uint16_t Opcode, uint16_t NamedIdx)
bool verifyInstruction(const MachineInstr &MI, StringRef &ErrInfo) const override
AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB)
The main low level interface to the alias analysis implementation.
static bool isDS(const MachineInstr &MI)
unsigned getSize() const
Return the size of the register in bytes, which is also the size of a stack slot allocated to hold a ...
A description of a memory reference used in the backend.
static use_iterator use_end()
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
static bool isFLAT(const MachineInstr &MI)
void insertWaitStates(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, int Count) const
unsigned getInstSizeInBytes(const MachineInstr &MI) const override
const TargetRegisterInfo * getTargetRegisterInfo() const
ArrayRef< T > makeArrayRef(const T &OneElt)
Construct an ArrayRef from a single element.
static MachineOperand CreateReg(unsigned Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false)
const MCAsmInfo * getMCAsmInfo() const
Return target specific asm information.
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
static bool isMIMG(const MachineInstr &MI)
unsigned isSGPRStackAccess(const MachineInstr &MI, int &FrameIndex) const
unsigned getLDSSize() const
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
bool isReg() const
isReg - Tests if this is a MO_Register operand.
void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
bool mayLoad(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read memory.
LLVM_READONLY MachineOperand * getNamedOperand(MachineInstr &MI, unsigned OperandName) const
Returns the operand named Op.
const TargetRegisterClass * getRegClass(unsigned Reg) const
Return the register class of the specified virtual register.
void setHasSpilledVGPRs(bool Spill=true)
void setTIDReg(unsigned Reg)
Name of external global symbol.
const MCInstrDesc & getMCOpcodeFromPseudo(unsigned Opcode) const
Return the descriptor of the target-specific machine instruction that corresponds to the specified ps...
Reg
All possible values of the reg field in the ModR/M byte.
LLVM_READONLY int getCommuteOrig(uint16_t Opcode)
static bool changesVGPRIndexingMode(const MachineInstr &MI)
static unsigned getSGPRSpillSaveOpcode(unsigned Size)
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted...
static bool isVALU(const MachineInstr &MI)
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
INLINEASM - Represents an inline asm block.
bool opCanUseLiteralConstant(unsigned OpType) const
static bool isSubRegOf(const SIRegisterInfo &TRI, const MachineOperand &SuperVec, const MachineOperand &SubReg)
LLVM_NODISCARD bool empty() const
unsigned getNumOperands() const
Access to explicit operands of the instruction.
void emitError(unsigned LocCookie, const Twine &ErrorStr)
emitError - Emit an error message to the currently installed error handler with optional location inf...
uint8_t OperandType
Information about the type of the operand.
void RemoveOperand(unsigned i)
Erase an operand from an instruction, leaving it with one fewer operand than it started with...
const MachineBasicBlock & front() const
LLVM_READONLY int commuteOpcode(unsigned Opc) const
static bool isMUBUF(const MachineInstr &MI)
bool isFI() const
isFI - Tests if this is a MO_FrameIndex operand.
bool canReadVGPR(const MachineInstr &MI, unsigned OpNo) const
static bool shouldReadExec(const MachineInstr &MI)
uint64_t getScratchRsrcWords23() const
const uint64_t RSRC_DATA_FORMAT
MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx0, unsigned OpIdx1) const override
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory)...
bool isLegalRegOperand(const MachineRegisterInfo &MRI, const MCOperandInfo &OpInfo, const MachineOperand &MO) const
Check if MO (a register operand) is a legal register for the given operand description.
unsigned isStackAccess(const MachineInstr &MI, int &FrameIndex) const
Itinerary data supplied by a subtarget to be used by a target.
void untieRegOperand(unsigned OpIdx)
Break any tie involving OpIdx.
static unsigned findImplicitSGPRRead(const MachineInstr &MI)
Generation getGeneration() const
size_t size() const
size - Get the array size.
const TargetRegisterClass * constrainRegClass(unsigned Reg, const TargetRegisterClass *RC, unsigned MinNumRegs=0)
constrainRegClass - Constrain the register class of the specified virtual register to be a common sub...
unsigned getKillRegState(bool B)
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
static GCRegistry::Add< CoreCLRGC > E("coreclr","CoreCLR-compatible GC")
void ChangeToImmediate(int64_t ImmVal)
ChangeToImmediate - Replace this operand with a new immediate operand of the specified value...
const MachineBasicBlock * getParent() const
mmo_iterator memoperands_end() const
static bool nodesHaveSameOperandValue(SDNode *N0, SDNode *N1, unsigned OpName)
Returns true if both nodes have the same value for the given operand Op, or if both nodes do not have...
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
Address of a global value.
bool getMemOpBaseRegImmOfs(MachineInstr &LdSt, unsigned &BaseReg, int64_t &Offset, const TargetRegisterInfo *TRI) const final
initializer< Ty > init(const Ty &Val)
unsigned getTargetFlags() const
bool isInlinableLiteral64(int64_t Literal, bool HasInv2Pi)
Is this literal inlinable.
const MachineInstrBuilder & setMemRefs(MachineInstr::mmo_iterator b, MachineInstr::mmo_iterator e) const
void addLiveIn(MCPhysReg PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
This file declares the machine register scavenger class.
const TargetRegisterClass * getEquivalentVGPRClass(const TargetRegisterClass *SRC) const
unsigned const MachineRegisterInfo * MRI
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, unsigned DestReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const override
bool isShader(CallingConv::ID cc)
bool reverseBranchCondition(SmallVectorImpl< MachineOperand > &Cond) const override
HazardRecognizer - This determines whether or not an instruction can be issued this cycle...
bool hasVGPRs(const TargetRegisterClass *RC) const
This is an important class for using LLVM in a threaded context.
AAMDNodes getAAInfo() const
Return the AA tags for the memory reference.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineInstrBuilder & UseMI
bool isReallyTriviallyReMaterializable(const MachineInstr &MI, AliasAnalysis *AA) const override
Address space for private memory.
int64_t getSExtValue() const
Get sign extended value.
const MachineOperand & getOperand(unsigned i) const
static unsigned getSGPRSpillRestoreOpcode(unsigned Size)
const TargetRegisterClass * getEquivalentSGPRClass(const TargetRegisterClass *VRC) const
bool isSchedulingBoundary(const MachineInstr &MI, const MachineBasicBlock *MBB, const MachineFunction &MF) const override
unsigned getSubRegFromChannel(unsigned Channel) const
unsigned getSize() const
Return the number of bytes in the encoding of this instruction, or zero if the encoding size cannot b...
bool isRegTiedToUseOperand(unsigned DefOpIdx, unsigned *UseOpIdx=nullptr) const
Given the index of a register def operand, check if the register def is tied to a source operand...
bool expandPostRAPseudo(MachineInstr &MI) const override
static unsigned getNumOperandsNoGlue(SDNode *Node)
static void removeModOperands(MachineInstr &MI)
bool swapSourceModifiers(MachineInstr &MI, MachineOperand &Src0, unsigned Src0OpName, MachineOperand &Src1, unsigned Src1OpName) const
unsigned getNumExplicitOperands() const
Returns the number of non-implicit operands.
void legalizeGenericOperand(MachineBasicBlock &InsertMBB, MachineBasicBlock::iterator I, const TargetRegisterClass *DstRC, MachineOperand &Op, MachineRegisterInfo &MRI, const DebugLoc &DL) const
bool isVariadic(QueryType Type=IgnoreBundle) const
Return true if this instruction can have a variable number of operands.
bool hasScalarStores() const
unsigned getBitWidth() const
Return the number of bits in the APInt.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
void setImm(int64_t immVal)
bool hasOneMemOperand() const
Return true if this instruction has exactly one MachineMemOperand.
bool hasUnmodeledSideEffects() const
Return true if this instruction has side effects that are not modeled by mayLoad / mayStore...
uint64_t getDefaultRsrcDataFormat() const
ScheduleHazardRecognizer * CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II, const ScheduleDAG *DAG) const override
This is used by the post-RA scheduler (SchedulePostRAList.cpp).
unsigned insertIndirectBranch(MachineBasicBlock &MBB, MachineBasicBlock &NewDestBB, const DebugLoc &DL, int64_t BrOffset, RegScavenger *RS=nullptr) const override
bool empty() const
empty - Check if the array is empty.
Address space for flat memory.
The AMDGPU TargetMachine interface definition for hw codgen targets.
virtual bool isSchedulingBoundary(const MachineInstr &MI, const MachineBasicBlock *MBB, const MachineFunction &MF) const
Test if the given instruction should be considered a scheduling boundary.
static unsigned getVGPRSpillRestoreOpcode(unsigned Size)
static bool isVOP2(const MachineInstr &MI)
void legalizeOperandsSMRD(MachineRegisterInfo &MRI, MachineInstr &MI) const
unsigned getSubReg() const
bool isIntN(unsigned N, int64_t x)
isIntN - Checks if an signed integer fits into the given (dynamic) bit width.
void setHasSpilledSGPRs(bool Spill=true)
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
bool isHighLatencyInstruction(const MachineInstr &MI) const
bool hasInv2PiInlineImm() const
This class contains a discriminated union of information about pointers in memory operands...
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Operands with register or inline constant.
SIInstrInfo(const SISubtarget &)
EH_LABEL - Represents a label in mid basic block used to track locations needed for debug and excepti...
void setIsKill(bool Val=true)
const uint64_t RSRC_TID_ENABLE
DebugLoc findDebugLoc(instr_iterator MBBI)
Find the next valid DebugLoc starting at MBBI, skipping any DBG_VALUE instructions.
unsigned getOpcode() const
Return the opcode number for this descriptor.
bool FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, unsigned Reg, MachineRegisterInfo *MRI) const final
The memory access writes data.
bool isInlineConstant(const APInt &Imm) const
Representation for a specific memory location.
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
bool memoperands_empty() const
Return true if we don't have any memory operands which described the the memory access done by this i...
Iterator for intrusive lists based on ilist_node.
void setDesc(const MCInstrDesc &tid)
Replace the instruction descriptor (thus opcode) of the current instruction with a new one...
bool analyzeBranchImpl(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify) const
void addOperand(MachineFunction &MF, const MachineOperand &Op)
Add the specified operand to the instruction.
LLVM_READONLY int getVOPe32(uint16_t Opcode)
bool hasVALU32BitEncoding(unsigned Opcode) const
Return true if this 64-bit VALU instruction has a 32-bit encoding.
static bool offsetsDoNotOverlap(int WidthA, int OffsetA, int WidthB, int OffsetB)
static bool isSALU(const MachineInstr &MI)
MachineOperand class - Representation of each machine instruction operand.
unsigned readlaneVGPRToSGPR(unsigned SrcReg, MachineInstr &UseMI, MachineRegisterInfo &MRI) const
Copy a value from a VGPR (SrcReg) to SGPR.
bool isImmOperandLegal(const MachineInstr &MI, unsigned OpNo, const MachineOperand &MO) const
bool areMemAccessesTriviallyDisjoint(MachineInstr &MIa, MachineInstr &MIb, AliasAnalysis *AA=nullptr) const override
LLVM_NODISCARD T pop_back_val()
const uint64_t RSRC_INDEX_STRIDE_SHIFT
bool isSGPRReg(const MachineRegisterInfo &MRI, unsigned Reg) const
Represents one node in the SelectionDAG.
bool usesConstantBus(const MachineRegisterInfo &MRI, const MachineOperand &MO, const MCOperandInfo &OpInfo) const
Returns true if this operand uses the constant bus.
const MachineInstrBuilder & addFrameIndex(int Idx) const
void enterBasicBlockEnd(MachineBasicBlock &MBB)
Start tracking liveness from the end of basic block MBB.
bool areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2, int64_t &Offset1, int64_t &Offset2) const override
MCSymbol reference (for debug/eh info)
unsigned Log2_32(uint32_t Value)
Log2_32 - This function returns the floor log base 2 of the specified value, -1 if the value is zero...
bool isFPImm() const
isFPImm - Tests if this is a MO_FPImmediate operand.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, unsigned base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SynchronizationScope SynchScope=CrossThread, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
Class for arbitrary precision integers.
const Value * getValue() const
Return the base address of the memory access.
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, unsigned SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const override
const TargetRegisterClass * getRegClassConstraint(unsigned OpIdx, const TargetInstrInfo *TII, const TargetRegisterInfo *TRI) const
Compute the static register class constraint for operand OpIdx.
iterator_range< mop_iterator > implicit_operands()
void addImplicitDefUseOperands(MachineFunction &MF)
Add all implicit def and use operands to this instruction.
const TargetRegisterClass * getPhysRegClass(unsigned Reg) const
Return the 'base' register class for this register.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
void replaceRegWith(unsigned FromReg, unsigned ToReg)
replaceRegWith - Replace all instances of FromReg with ToReg in the machine function.
static bool isVOP3(const MachineInstr &MI)
MachineRegisterInfo - Keep track of information for virtual and physical registers, including vreg register classes, use/def chains for registers, etc.
The memory access reads data.
bool shouldClusterMemOps(MachineInstr &FirstLdSt, MachineInstr &SecondLdSt, unsigned NumLoads) const final
void legalizeOperandsVOP2(MachineRegisterInfo &MRI, MachineInstr &MI) const
Legalize operands in MI by either commuting it or inserting a copy of src1.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
static bool compareMachineOp(const MachineOperand &Op0, const MachineOperand &Op1)
Representation of each machine instruction.
MachineOperandType getType() const
getType - Returns the MachineOperandType for this operand.
static bool isPhysicalRegister(unsigned Reg)
Return true if the specified register number is in the physical register namespace.
unsigned findUnusedRegister(const MachineRegisterInfo &MRI, const TargetRegisterClass *RC, const MachineFunction &MF) const
Returns a register that is not used at any point in the function.
This class keeps track of the SPI_SP_INPUT_ADDR config register, which tells the hardware which inter...
Interface definition for SIInstrInfo.
bool isVGPRCopy(const MachineInstr &MI) const
void enterBasicBlock(MachineBasicBlock &MBB)
Start tracking liveness from the begin of basic block MBB.
static bool isMTBUF(const MachineInstr &MI)
OperandType
Types of operands to CF instructions.
bool isBranchOffsetInRange(unsigned BranchOpc, int64_t BrOffset) const override
unsigned getMaxPrivateElementSize() const
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
int64_t getNamedImmOperand(const MachineInstr &MI, unsigned OpName) const
Get required immediate operand.
static bool isVOPC(const MachineInstr &MI)
int16_t RegClass
This specifies the register class enumeration of the operand if the operand is a register.
use_iterator use_begin(unsigned RegNo) const
unsigned getNumWaitStates(const MachineInstr &MI) const
Return the number of wait states that result from executing this instruction.
virtual MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const
This method commutes the operands of the given machine instruction MI.
bool hasOneNonDBGUse(unsigned RegNo) const
hasOneNonDBGUse - Return true if there is exactly one non-Debug instruction using the specified regis...
void setReg(unsigned Reg)
Change the register this operand corresponds to.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &DL, int *BytesAdded=nullptr) const override
static MachineOperand CreateImm(int64_t Val)
LLVM_ATTRIBUTE_ALWAYS_INLINE size_type size() const
MachineBasicBlock * getBranchDestBlock(const MachineInstr &MI) const override
void setSubReg(unsigned subReg)
void clearKillFlags(unsigned Reg) const
clearKillFlags - Iterate over all the uses of the given register and clear the kill flag from the Mac...
bool isLowLatencyInstruction(const MachineInstr &MI) const
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
LLVM_NODISCARD std::enable_if<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
Abstract Stack Frame Index.
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify) const override
constexpr bool isUInt< 16 >(uint64_t x)
ArrayRef< std::pair< int, const char * > > getSerializableTargetIndices() const override
MachineInstr * removeFromParent()
Unlink 'this' from the containing basic block, and return it without deleting it. ...
bool isLiveIn(MCPhysReg Reg, LaneBitmask LaneMask=LaneBitmask::getAll()) const
Return true if the specified register is in the live in set.
EVT getValueType() const
Return the ValueType of the referenced return value.
static unsigned getVALUOp(const MachineInstr &MI)
MachineInstr * getVRegDef(unsigned Reg) const
getVRegDef - Return the machine instr that defines the specified virtual register or null if none is ...
Operands with register or 32-bit immediate.
unsigned isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex) const override
bool isSALUOpSupportedOnVALU(const MachineInstr &MI) const
unsigned getReg() const
getReg - Returns the register number.
MIBundleBuilder & append(MachineInstr *MI)
Insert MI into MBB by appending it to the instructions in the bundle.
bool isCommutable(QueryType Type=IgnoreBundle) const
Return true if this may be a 2- or 3-address instruction (of the form "X = op Y, Z, ..."), which produces the same result if Y and Z are exchanged.
unsigned removeBranch(MachineBasicBlock &MBB, int *BytesRemoved=nullptr) const override
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
bool isLegalVSrcOperand(const MachineRegisterInfo &MRI, const MCOperandInfo &OpInfo, const MachineOperand &MO) const
Check if MO would be a valid operand for the given operand definition OpInfo.
void setRegUsed(unsigned Reg, LaneBitmask LaneMask=LaneBitmask::getAll())
Tell the scavenger a register is used.
const uint64_t RSRC_ELEMENT_SIZE_SHIFT
bool isInlinableLiteral32(int32_t Literal, bool HasInv2Pi)
bool hasModifiersSet(const MachineInstr &MI, unsigned OpName) const
MachineInstr * convertToThreeAddress(MachineFunction::iterator &MBB, MachineInstr &MI, LiveVariables *LV) const override
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned char TargetFlags=0) const
static bool isVGPRSpill(const MachineInstr &MI)
#define LLVM_FALLTHROUGH
LLVM_FALLTHROUGH - Mark fallthrough cases in switch statements.
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
Operand with 32-bit immediate that uses the constant bus.
const MCOperandInfo * OpInfo
void ChangeToFrameIndex(int Idx)
Replace this operand with a frame index.
const MachineInstrBuilder & addOperand(const MachineOperand &MO) const
uint64_t getSize() const
Return the size in bytes of the memory reference.
void insertNoop(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI) const override
void legalizeOperands(MachineInstr &MI) const
Legalize all operands in this instruction.
const TargetRegisterClass * getOpRegClass(const MachineInstr &MI, unsigned OpNo) const
Return the correct register class for OpNo.
StringRef - Represent a constant reference to a string, i.e.
bool hasCalculatedTID() const
bool isLiteralConstantLike(const MachineOperand &MO, const MCOperandInfo &OpInfo) const
LLVM_READONLY int getAddr64Inst(uint16_t Opcode)
This holds information about one operand of a machine instruction, indicating the register class for ...
bool isMoveImmediate(QueryType Type=IgnoreBundle) const
Return true if this instruction is a move immediate (including conditional moves) instruction...
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation...
const MachineInstrBuilder & addReg(unsigned RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
unsigned getTIDReg() const
reg_begin/reg_end - Provide iteration support to walk over all definitions and uses of a register wit...
void finalizeBundle(MachineBasicBlock &MBB, MachineBasicBlock::instr_iterator FirstMI, MachineBasicBlock::instr_iterator LastMI)
finalizeBundle - Finalize a machine instruction bundle which includes a sequence of instructions star...
static bool isVOP1(const MachineInstr &MI)
unsigned pred_size() const
bool modifiesRegister(unsigned Reg, const TargetRegisterInfo *TRI) const
Return true if the MachineInstr modifies (fully define or partially define) the specified register...
bool findCommutedOpIndices(MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const override
static cl::opt< unsigned > BranchOffsetBits("amdgpu-s-branch-bits", cl::ReallyHidden, cl::init(16), cl::desc("Restrict range of branch instructions (DEBUG)"))
LLVM_READONLY int getCommuteRev(uint16_t Opcode)
Helper class for constructing bundles of MachineInstrs.
bool isMachineOpcode() const
Test if this node has a post-isel opcode, directly corresponding to a MachineInstr opcode...
unsigned getMachineOpcode() const
This may only be called if isMachineOpcode returns true.
uint64_t getZExtValue() const
mmo_iterator memoperands_begin() const
Access to memory operands of the instruction.
unsigned scavengeRegister(const TargetRegisterClass *RegClass, MachineBasicBlock::iterator I, int SPAdj)
Make a register of the specific register class available and do the appropriate bookkeeping.
bool contains(unsigned Reg) const
Return true if the specified register is included in this register class.