45#define DEBUG_TYPE "reginfo"
47#define GET_REGINFO_TARGET_DESC
48#include "PPCGenRegisterInfo.inc"
50STATISTIC(InflateGPRC,
"Number of gprc inputs for getLargestLegalClass");
51STATISTIC(InflateGP8RC,
"Number of g8rc inputs for getLargestLegalClass");
55 cl::desc(
"Enable use of a base pointer for complex stack frames"));
59 cl::desc(
"Force the use of a base pointer in every function"));
63 cl::desc(
"Enable spills from gpr to vsr rather than stack"));
67 cl::desc(
"Consider R1 caller preserved so stack saves of "
68 "caller preserved registers can be LICM candidates"),
73 cl::desc(
"Maximum search distance for definition of CR bit "
87 cl::desc(
"Emit information about accumulator register spills "
99 TM.isPPC64() ? 0 : 1),
101 ImmToIdxMap[PPC::LD] = PPC::LDX; ImmToIdxMap[PPC::STD] = PPC::STDX;
102 ImmToIdxMap[PPC::LBZ] = PPC::LBZX; ImmToIdxMap[PPC::STB] = PPC::STBX;
103 ImmToIdxMap[PPC::LHZ] = PPC::LHZX; ImmToIdxMap[PPC::LHA] = PPC::LHAX;
104 ImmToIdxMap[PPC::LWZ] = PPC::LWZX; ImmToIdxMap[PPC::LWA] = PPC::LWAX;
105 ImmToIdxMap[PPC::LFS] = PPC::LFSX; ImmToIdxMap[PPC::LFD] = PPC::LFDX;
106 ImmToIdxMap[PPC::STH] = PPC::STHX; ImmToIdxMap[PPC::STW] = PPC::STWX;
107 ImmToIdxMap[PPC::STFS] = PPC::STFSX; ImmToIdxMap[PPC::STFD] = PPC::STFDX;
108 ImmToIdxMap[PPC::ADDI] = PPC::ADD4;
109 ImmToIdxMap[PPC::LWA_32] = PPC::LWAX_32;
112 ImmToIdxMap[PPC::LHA8] = PPC::LHAX8; ImmToIdxMap[PPC::LBZ8] = PPC::LBZX8;
113 ImmToIdxMap[PPC::LHZ8] = PPC::LHZX8; ImmToIdxMap[PPC::LWZ8] = PPC::LWZX8;
114 ImmToIdxMap[PPC::STB8] = PPC::STBX8; ImmToIdxMap[PPC::STH8] = PPC::STHX8;
115 ImmToIdxMap[PPC::STW8] = PPC::STWX8; ImmToIdxMap[PPC::STDU] = PPC::STDUX;
116 ImmToIdxMap[PPC::ADDI8] = PPC::ADD8;
117 ImmToIdxMap[PPC::LQ] = PPC::LQX_PSEUDO;
118 ImmToIdxMap[PPC::STQ] = PPC::STQX_PSEUDO;
121 ImmToIdxMap[PPC::DFLOADf32] = PPC::LXSSPX;
122 ImmToIdxMap[PPC::DFLOADf64] = PPC::LXSDX;
123 ImmToIdxMap[PPC::SPILLTOVSR_LD] = PPC::SPILLTOVSR_LDX;
124 ImmToIdxMap[PPC::SPILLTOVSR_ST] = PPC::SPILLTOVSR_STX;
125 ImmToIdxMap[PPC::DFSTOREf32] = PPC::STXSSPX;
126 ImmToIdxMap[PPC::DFSTOREf64] = PPC::STXSDX;
127 ImmToIdxMap[PPC::LXV] = PPC::LXVX;
128 ImmToIdxMap[PPC::LXSD] = PPC::LXSDX;
129 ImmToIdxMap[PPC::LXSSP] = PPC::LXSSPX;
130 ImmToIdxMap[PPC::STXV] = PPC::STXVX;
131 ImmToIdxMap[PPC::STXSD] = PPC::STXSDX;
132 ImmToIdxMap[PPC::STXSSP] = PPC::STXSSPX;
135 ImmToIdxMap[PPC::EVLDD] = PPC::EVLDDX;
136 ImmToIdxMap[PPC::EVSTDD] = PPC::EVSTDDX;
137 ImmToIdxMap[PPC::SPESTW] = PPC::SPESTWX;
138 ImmToIdxMap[PPC::SPELWZ] = PPC::SPELWZX;
141 ImmToIdxMap[PPC::PLBZ] = PPC::LBZX; ImmToIdxMap[PPC::PLBZ8] = PPC::LBZX8;
142 ImmToIdxMap[PPC::PLHZ] = PPC::LHZX; ImmToIdxMap[PPC::PLHZ8] = PPC::LHZX8;
143 ImmToIdxMap[PPC::PLHA] = PPC::LHAX; ImmToIdxMap[PPC::PLHA8] = PPC::LHAX8;
144 ImmToIdxMap[PPC::PLWZ] = PPC::LWZX; ImmToIdxMap[PPC::PLWZ8] = PPC::LWZX8;
145 ImmToIdxMap[PPC::PLWA] = PPC::LWAX; ImmToIdxMap[PPC::PLWA8] = PPC::LWAX;
146 ImmToIdxMap[PPC::PLD] = PPC::LDX; ImmToIdxMap[PPC::PSTD] = PPC::STDX;
148 ImmToIdxMap[PPC::PSTB] = PPC::STBX; ImmToIdxMap[PPC::PSTB8] = PPC::STBX8;
149 ImmToIdxMap[PPC::PSTH] = PPC::STHX; ImmToIdxMap[PPC::PSTH8] = PPC::STHX8;
150 ImmToIdxMap[PPC::PSTW] = PPC::STWX; ImmToIdxMap[PPC::PSTW8] = PPC::STWX8;
152 ImmToIdxMap[PPC::PLFS] = PPC::LFSX; ImmToIdxMap[PPC::PSTFS] = PPC::STFSX;
153 ImmToIdxMap[PPC::PLFD] = PPC::LFDX; ImmToIdxMap[PPC::PSTFD] = PPC::STFDX;
154 ImmToIdxMap[PPC::PLXSSP] = PPC::LXSSPX; ImmToIdxMap[PPC::PSTXSSP] = PPC::STXSSPX;
155 ImmToIdxMap[PPC::PLXSD] = PPC::LXSDX; ImmToIdxMap[PPC::PSTXSD] = PPC::STXSDX;
156 ImmToIdxMap[PPC::PLXV] = PPC::LXVX; ImmToIdxMap[PPC::PSTXV] = PPC::STXVX;
158 ImmToIdxMap[PPC::LXVP] = PPC::LXVPX;
159 ImmToIdxMap[PPC::STXVP] = PPC::STXVPX;
160 ImmToIdxMap[PPC::PLXVP] = PPC::LXVPX;
161 ImmToIdxMap[PPC::PSTXVP] = PPC::STXVPX;
172 return &PPC::G8RC_NOX0RegClass;
173 return &PPC::GPRC_NOR0RegClass;
177 return &PPC::G8RCRegClass;
178 return &PPC::GPRCRegClass;
185 if (!TM.isPPC64() && Subtarget.
isAIXABI())
187 if (Subtarget.hasVSX()) {
188 if (Subtarget.pairedVectorMemops())
189 return CSR_64_AllRegs_VSRP_SaveList;
190 if (Subtarget.
isAIXABI() && !TM.getAIXExtendedAltivecABI())
191 return CSR_64_AllRegs_AIX_Dflt_VSX_SaveList;
192 return CSR_64_AllRegs_VSX_SaveList;
194 if (Subtarget.hasAltivec()) {
195 if (Subtarget.
isAIXABI() && !TM.getAIXExtendedAltivecABI())
196 return CSR_64_AllRegs_AIX_Dflt_Altivec_SaveList;
197 return CSR_64_AllRegs_Altivec_SaveList;
199 return CSR_64_AllRegs_SaveList;
217 if (Subtarget.pairedVectorMemops())
218 return SaveR2 ? CSR_SVR64_ColdCC_R2_VSRP_SaveList
219 : CSR_SVR64_ColdCC_VSRP_SaveList;
220 if (Subtarget.hasAltivec())
221 return SaveR2 ? CSR_SVR64_ColdCC_R2_Altivec_SaveList
222 : CSR_SVR64_ColdCC_Altivec_SaveList;
223 return SaveR2 ? CSR_SVR64_ColdCC_R2_SaveList
224 : CSR_SVR64_ColdCC_SaveList;
227 if (Subtarget.pairedVectorMemops())
228 return CSR_SVR32_ColdCC_VSRP_SaveList;
229 else if (Subtarget.hasAltivec())
230 return CSR_SVR32_ColdCC_Altivec_SaveList;
231 else if (Subtarget.hasSPE())
232 return CSR_SVR32_ColdCC_SPE_SaveList;
233 return CSR_SVR32_ColdCC_SaveList;
237 if (Subtarget.pairedVectorMemops()) {
239 if (!TM.getAIXExtendedAltivecABI())
240 return SaveR2 ? CSR_PPC64_R2_SaveList : CSR_PPC64_SaveList;
241 return SaveR2 ? CSR_AIX64_R2_VSRP_SaveList : CSR_AIX64_VSRP_SaveList;
243 return SaveR2 ? CSR_SVR464_R2_VSRP_SaveList : CSR_SVR464_VSRP_SaveList;
245 if (Subtarget.hasAltivec() &&
246 (!Subtarget.
isAIXABI() || TM.getAIXExtendedAltivecABI())) {
247 return SaveR2 ? CSR_PPC64_R2_Altivec_SaveList
248 : CSR_PPC64_Altivec_SaveList;
250 return SaveR2 ? CSR_PPC64_R2_SaveList : CSR_PPC64_SaveList;
254 if (Subtarget.pairedVectorMemops())
255 return TM.getAIXExtendedAltivecABI() ? CSR_AIX32_VSRP_SaveList
256 : CSR_AIX32_SaveList;
257 if (Subtarget.hasAltivec())
258 return TM.getAIXExtendedAltivecABI() ? CSR_AIX32_Altivec_SaveList
259 : CSR_AIX32_SaveList;
260 return CSR_AIX32_SaveList;
262 if (Subtarget.pairedVectorMemops())
263 return CSR_SVR432_VSRP_SaveList;
264 if (Subtarget.hasAltivec())
265 return CSR_SVR432_Altivec_SaveList;
266 else if (Subtarget.hasSPE()) {
267 if (TM.isPositionIndependent() && !TM.isPPC64())
268 return CSR_SVR432_SPE_NO_S30_31_SaveList;
269 return CSR_SVR432_SPE_SaveList;
271 return CSR_SVR432_SaveList;
279 if (Subtarget.hasVSX()) {
280 if (Subtarget.pairedVectorMemops())
281 return CSR_64_AllRegs_VSRP_RegMask;
282 if (Subtarget.
isAIXABI() && !TM.getAIXExtendedAltivecABI())
283 return CSR_64_AllRegs_AIX_Dflt_VSX_RegMask;
284 return CSR_64_AllRegs_VSX_RegMask;
286 if (Subtarget.hasAltivec()) {
287 if (Subtarget.
isAIXABI() && !TM.getAIXExtendedAltivecABI())
288 return CSR_64_AllRegs_AIX_Dflt_Altivec_RegMask;
289 return CSR_64_AllRegs_Altivec_RegMask;
291 return CSR_64_AllRegs_RegMask;
295 if (Subtarget.pairedVectorMemops()) {
296 if (!TM.getAIXExtendedAltivecABI())
297 return TM.isPPC64() ? CSR_PPC64_RegMask : CSR_AIX32_RegMask;
298 return TM.isPPC64() ? CSR_AIX64_VSRP_RegMask : CSR_AIX32_VSRP_RegMask;
301 ? ((Subtarget.hasAltivec() && TM.getAIXExtendedAltivecABI())
302 ? CSR_PPC64_Altivec_RegMask
304 : ((Subtarget.hasAltivec() && TM.getAIXExtendedAltivecABI())
305 ? CSR_AIX32_Altivec_RegMask
306 : CSR_AIX32_RegMask);
311 return Subtarget.pairedVectorMemops()
312 ? CSR_SVR64_ColdCC_VSRP_RegMask
313 : (Subtarget.hasAltivec() ? CSR_SVR64_ColdCC_Altivec_RegMask
314 : CSR_SVR64_ColdCC_RegMask);
316 return Subtarget.pairedVectorMemops()
317 ? CSR_SVR32_ColdCC_VSRP_RegMask
318 : (Subtarget.hasAltivec()
319 ? CSR_SVR32_ColdCC_Altivec_RegMask
320 : (Subtarget.hasSPE() ? CSR_SVR32_ColdCC_SPE_RegMask
321 : CSR_SVR32_ColdCC_RegMask));
325 return Subtarget.pairedVectorMemops()
326 ? CSR_SVR464_VSRP_RegMask
327 : (Subtarget.hasAltivec() ? CSR_PPC64_Altivec_RegMask
328 : CSR_PPC64_RegMask);
330 return Subtarget.pairedVectorMemops()
331 ? CSR_SVR432_VSRP_RegMask
332 : (Subtarget.hasAltivec()
333 ? CSR_SVR432_Altivec_RegMask
334 : (Subtarget.hasSPE()
335 ? (TM.isPositionIndependent()
336 ? CSR_SVR432_SPE_NO_S30_31_RegMask
337 : CSR_SVR432_SPE_RegMask)
338 : CSR_SVR432_RegMask));
343 return CSR_NoRegs_RegMask;
347 for (
unsigned PseudoReg : {PPC::ZERO, PPC::ZERO8, PPC::RM})
348 Mask[PseudoReg / 32] &= ~(1u << (PseudoReg % 32));
378 markSuperRegs(
Reserved, PPC::VRSAVE);
389 if (!TM.isPPC64() || UsesTOCBasePtr || MF.
hasInlineAsm())
400 if (TFI->needsFP(MF))
403 bool IsPositionIndependent = TM.isPositionIndependent();
415 if (!Subtarget.hasAltivec())
419 if (Subtarget.
isAIXABI() && Subtarget.hasAltivec() &&
420 !TM.getAIXExtendedAltivecABI()) {
423 for (
auto Reg : CSR_Altivec_SaveList) {
440 if (PhysReg == PPC::CTR || PhysReg == PPC::CTR8 || PhysReg == PPC::LR ||
466 if (FrameSize & ~0x7FFF) {
467 LLVM_DEBUG(
dbgs() <<
"TRUE - Frame size is too large for D-Form.\n");
476 if (CSI.isSpilledToReg())
479 int FrIdx = CSI.getFrameIdx();
483 unsigned Opcode = InstrInfo->getStoreOpcodeForSpill(RC);
488 LLVM_DEBUG(
dbgs() <<
"Memory Operand: " << InstrInfo->getName(Opcode)
489 <<
" for register " <<
printReg(Reg,
this) <<
".\n");
490 LLVM_DEBUG(
dbgs() <<
"TRUE - Not fixed frame object that requires "
502 if (InstrInfo->isXFormMemOp(Opcode)) {
503 LLVM_DEBUG(
dbgs() <<
"Memory Operand: " << InstrInfo->getName(Opcode)
504 <<
" for register " <<
printReg(Reg,
this) <<
".\n");
510 if ((Opcode == PPC::RESTORE_QUADWORD) || (Opcode == PPC::SPILL_QUADWORD)) {
511 LLVM_DEBUG(
dbgs() <<
"Memory Operand: " << InstrInfo->getName(Opcode)
512 <<
" for register " <<
printReg(Reg,
this) <<
".\n");
528 return !Subtarget.hasROPProtect();
546 !MFI.hasVarSizedObjects() && !MFI.hasOpaqueSPAdjustment())
570 VirtReg, Order, Hints, MF, VRM,
Matrix);
576 return BaseImplRetVal;
587 switch (
Use.getOpcode()) {
588 case TargetOpcode::COPY: {
589 ResultOp = &
Use.getOperand(0);
590 ResultReg = ResultOp->
getReg();
592 MRI->getRegClass(ResultReg)->contains(PPC::UACC0) &&
596 if (RegClass->
contains(PPC::VSRp0)) {
597 HintReg = getSubReg(UACCPhys, ResultOp->
getSubReg());
599 if (HintReg >= PPC::VSRp0 && HintReg <= PPC::VSRp31)
601 }
else if (RegClass->
contains(PPC::ACC0)) {
602 HintReg = PPC::ACC0 + (UACCPhys - PPC::UACC0);
603 if (HintReg >= PPC::ACC0 && HintReg <= PPC::ACC7)
609 case PPC::BUILD_UACC: {
610 ResultOp = &
Use.getOperand(0);
611 ResultReg = ResultOp->
getReg();
612 if (
MRI->getRegClass(ResultReg)->contains(PPC::ACC0) &&
615 assert((ACCPhys >= PPC::ACC0 && ACCPhys <= PPC::ACC7) &&
616 "Expecting an ACC register for BUILD_UACC.");
617 Register HintReg = PPC::UACC0 + (ACCPhys - PPC::ACC0);
624 return BaseImplRetVal;
629 if (RC == &PPC::CARRYRCRegClass)
630 return TM.isPPC64() ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
637 const unsigned DefaultSafety = 1;
639 switch (RC->
getID()) {
642 case PPC::G8RC_NOX0RegClassID:
643 case PPC::GPRC_NOR0RegClassID:
644 case PPC::SPERCRegClassID:
645 case PPC::G8RCRegClassID:
646 case PPC::GPRCRegClassID: {
647 unsigned FP = TFI->
hasFP(MF) ? 1 : 0;
648 return 32 -
FP - DefaultSafety;
650 case PPC::F4RCRegClassID:
651 case PPC::F8RCRegClassID:
652 case PPC::VSLRCRegClassID:
653 return 32 - DefaultSafety;
654 case PPC::VFRCRegClassID:
655 case PPC::VRRCRegClassID: {
659 if (!TM.getAIXExtendedAltivecABI() && Subtarget.
isAIXABI())
660 return 20 - DefaultSafety;
662 return 32 - DefaultSafety;
663 case PPC::VSFRCRegClassID:
664 case PPC::VSSRCRegClassID:
665 case PPC::VSRCRegClassID: {
667 if (!TM.getAIXExtendedAltivecABI() && Subtarget.
isAIXABI())
670 return 52 - DefaultSafety;
672 return 64 - DefaultSafety;
673 case PPC::CRRCRegClassID:
674 return 8 - DefaultSafety;
682 const auto *DefaultSuperclass =
684 if (Subtarget.hasVSX()) {
691 if (TM.isELFv2ABI() || Subtarget.
isAIXABI()) {
693 RC == &PPC::G8RCRegClass) {
695 return &PPC::SPILLTOVSRRCRegClass;
702 if (getRegSizeInBits(*
getRegClass(SuperID)) != getRegSizeInBits(*RC))
706 case PPC::VSSRCRegClassID:
707 return Subtarget.hasP8Vector() ?
getRegClass(SuperID)
709 case PPC::VSFRCRegClassID:
710 case PPC::VSRCRegClassID:
712 case PPC::VSRpRCRegClassID:
713 return Subtarget.pairedVectorMemops() ?
getRegClass(SuperID)
715 case PPC::ACCRCRegClassID:
716 case PPC::UACCRCRegClassID:
717 return Subtarget.hasMMA() ?
getRegClass(SuperID) : DefaultSuperclass;
722 return DefaultSuperclass;
749 bool LP64 = TM.isPPC64();
756 "Maximum call-frame size not sufficiently aligned");
762 bool KillNegSizeReg =
MI.getOperand(1).isKill();
763 Register NegSizeReg =
MI.getOperand(1).getReg();
775 .
addImm(maxCallFrameSize);
783 .
addImm(maxCallFrameSize);
795 bool &KillNegSizeReg,
809 bool LP64 = TM.isPPC64();
828 if (MaxAlign < TargetAlign &&
isInt<16>(FrameSize)) {
848 if (MaxAlign > TargetAlign) {
849 unsigned UnalNegSizeReg = NegSizeReg;
857 unsigned NegSizeReg1 = NegSizeReg;
862 KillNegSizeReg =
true;
865 if (MaxAlign > TargetAlign) {
866 unsigned UnalNegSizeReg = NegSizeReg;
874 unsigned NegSizeReg1 = NegSizeReg;
879 KillNegSizeReg =
true;
895 bool LP64 = TM.isPPC64();
897 Register FramePointer =
MI.getOperand(0).getReg();
898 const Register ActualNegSizeReg =
MI.getOperand(1).getReg();
899 bool KillNegSizeReg =
MI.getOperand(2).isKill();
900 Register NegSizeReg =
MI.getOperand(2).getReg();
903 if (FramePointer == NegSizeReg) {
904 assert(KillNegSizeReg &&
"FramePointer is a def and NegSizeReg is an use, "
905 "NegSizeReg should be killed");
912 NegSizeReg = ActualNegSizeReg;
913 KillNegSizeReg =
false;
918 if (NegSizeReg != ActualNegSizeReg)
943 MI.getOperand(0).getReg())
944 .
addImm(maxCallFrameSize);
957 unsigned FrameIndex)
const {
967 bool LP64 = TM.isPPC64();
981 if (SrcReg != PPC::CR0) {
988 .
addImm(getEncodingValue(SrcReg) * 4)
1002 unsigned FrameIndex)
const {
1012 bool LP64 = TM.isPPC64();
1017 Register DestReg =
MI.getOperand(0).getReg();
1018 assert(
MI.definesRegister(DestReg,
nullptr) &&
1019 "RESTORE_CR does not define its destination");
1026 if (DestReg != PPC::CR0) {
1030 unsigned ShiftBits = getEncodingValue(DestReg)*4;
1037 BuildMI(
MBB,
II, dl,
TII.get(LP64 ? PPC::MTOCRF8 : PPC::MTOCRF), DestReg)
1045 unsigned FrameIndex)
const {
1056 bool LP64 = TM.isPPC64();
1067 unsigned CRBitSpillDistance = 0;
1068 bool SeenUse =
false;
1069 for (; Ins != Rend; ++Ins) {
1071 if (Ins->modifiesRegister(SrcReg,
TRI))
1074 if (Ins->readsRegister(SrcReg,
TRI))
1082 if (!Ins->isDebugInstr())
1083 CRBitSpillDistance++;
1087 if (Ins ==
MBB.rend())
1090 bool SpillsKnownBit =
false;
1092 switch (Ins->getOpcode()) {
1096 SpillsKnownBit =
true;
1101 SpillsKnownBit =
true;
1113 if (Subtarget.isISA3_1()) {
1125 if (Subtarget.isISA3_0()) {
1126 if (SrcReg == PPC::CR0LT || SrcReg == PPC::CR1LT ||
1127 SrcReg == PPC::CR2LT || SrcReg == PPC::CR3LT ||
1128 SrcReg == PPC::CR4LT || SrcReg == PPC::CR5LT ||
1129 SrcReg == PPC::CR6LT || SrcReg == PPC::CR7LT) {
1152 .
addImm(getEncodingValue(SrcReg))
1159 bool KillsCRBit =
MI.killsRegister(SrcReg,
TRI);
1162 if (SpillsKnownBit && KillsCRBit && !SeenUse) {
1163 Ins->setDesc(
TII.get(PPC::UNENCODED_NOP));
1164 Ins->removeOperand(0);
1169 unsigned FrameIndex)
const {
1179 bool LP64 = TM.isPPC64();
1184 Register DestReg =
MI.getOperand(0).getReg();
1185 assert(
MI.definesRegister(DestReg,
nullptr) &&
1186 "RESTORE_CRBIT does not define its destination");
1194 BuildMI(
MBB,
II, dl,
TII.get(LP64 ? PPC::MFOCRF8 : PPC::MFOCRF), RegO)
1197 unsigned ShiftBits = getEncodingValue(DestReg);
1199 BuildMI(
MBB,
II, dl,
TII.get(LP64 ? PPC::RLWIMI8 : PPC::RLWIMI), RegO)
1202 .
addImm(ShiftBits ? 32 - ShiftBits : 0)
1224 std::string Dest = PPC::ACCRCRegClass.contains(DestReg) ?
"acc" :
"uacc";
1225 std::string Src = PPC::ACCRCRegClass.contains(SrcReg) ?
"acc" :
"uacc";
1226 dbgs() <<
"Emitting copy from " << Src <<
" to " << Dest <<
":\n";
1238 dbgs() <<
"Emitting " << (IsPrimed ?
"acc" :
"uacc") <<
" register "
1239 << (IsRestore ?
"restore" :
"spill") <<
":\n";
1248 unsigned FrameIndex,
bool IsLittleEndian,
1254 "Spilling register pairs does not support virtual registers.");
1266 FrameIndex, IsLittleEndian ?
Offset - 16 :
Offset + 16);
1273 unsigned FrameIndex)
const {
1275 "Expecting to do this only if paired vector stores are disabled.");
1284 bool IsKilled =
MI.getOperand(0).isKill();
1286 spillRegPair(
MBB,
II,
DL,
TII, FrameIndex, IsLittleEndian, IsKilled, SrcReg,
1287 IsLittleEndian ? 16 : 0);
1298 dbgs() <<
"Emitting wacc register " << (IsRestore ?
"restore" :
"spill")
1311 unsigned FrameIndex)
const {
1319 bool IsKilled =
MI.getOperand(0).isKill();
1321 bool IsPrimed = PPC::ACCRCRegClass.contains(SrcReg);
1333 spillRegPair(
MBB,
II,
DL,
TII, FrameIndex, IsLittleEndian, IsKilled,
1335 IsLittleEndian ? 48 : 0);
1336 spillRegPair(
MBB,
II,
DL,
TII, FrameIndex, IsLittleEndian, IsKilled,
1338 IsLittleEndian ? 16 : 32);
1344 FrameIndex, IsLittleEndian ? 32 : 0);
1349 FrameIndex, IsLittleEndian ? 0 : 32);
1351 if (IsPrimed && !IsKilled)
1360 unsigned FrameIndex)
const {
1368 Register DestReg =
MI.getOperand(0).getReg();
1369 assert(
MI.definesRegister(DestReg,
nullptr) &&
1370 "RESTORE_ACC does not define its destination");
1372 bool IsPrimed = PPC::ACCRCRegClass.contains(DestReg);
1374 PPC::VSRp0 + (DestReg - (IsPrimed ? PPC::ACC0 : PPC::UACC0)) * 2;
1382 FrameIndex, IsLittleEndian ? 32 : 0);
1384 FrameIndex, IsLittleEndian ? 0 : 32);
1395 unsigned FrameIndex)
const {
1417 FrameIndex, IsLittleEndian ? 32 : 0);
1420 FrameIndex, IsLittleEndian ? 0 : 32);
1429 unsigned FrameIndex)
const {
1443 Register DestReg =
MI.getOperand(0).getReg();
1446 FrameIndex, IsLittleEndian ? 32 : 0);
1448 FrameIndex, IsLittleEndian ? 0 : 32);
1461 unsigned FrameIndex)
const {
1470 bool IsKilled =
MI.getOperand(0).isKill();
1472 Register Reg = PPC::X0 + (SrcReg - PPC::G8p0) * 2;
1477 FrameIndex, IsLittleEndian ? 8 : 0);
1480 FrameIndex, IsLittleEndian ? 0 : 8);
1488 unsigned FrameIndex)
const {
1496 Register DestReg =
MI.getOperand(0).getReg();
1497 assert(
MI.definesRegister(DestReg,
nullptr) &&
1498 "RESTORE_QUADWORD does not define its destination");
1500 Register Reg = PPC::X0 + (DestReg - PPC::G8p0) * 2;
1504 IsLittleEndian ? 8 : 0);
1506 IsLittleEndian ? 0 : 8);
1514 unsigned FrameIndex)
const {
1526 auto spillDMR = [&](
Register SrcReg,
int BEIdx,
int LEIdx) {
1527 auto spillWACC = [&](
unsigned Opc,
unsigned RegIdx,
int IdxBE,
int IdxLE) {
1537 FrameIndex, IsLittleEndian ? IdxLE : IdxBE);
1540 FrameIndex, IsLittleEndian ? IdxLE - 32 : IdxBE + 32);
1542 spillWACC(PPC::DMXXEXTFDMR512, PPC::sub_wacc_lo, BEIdx, LEIdx);
1543 spillWACC(PPC::DMXXEXTFDMR512_HI, PPC::sub_wacc_hi, BEIdx + 64, LEIdx - 64);
1547 if (
MI.getOpcode() == PPC::SPILL_DMRP) {
1551 spillDMR(SrcReg, 0, 96);
1559 unsigned FrameIndex)
const {
1569 auto restoreDMR = [&](
Register DestReg,
int BEIdx,
int LEIdx) {
1570 auto restoreWACC = [&](
unsigned Opc,
unsigned RegIdx,
int IdxBE,
1576 FrameIndex, IsLittleEndian ? IdxLE : IdxBE);
1578 FrameIndex, IsLittleEndian ? IdxLE - 32 : IdxBE + 32);
1586 restoreWACC(PPC::DMXXINSTDMR512, PPC::sub_wacc_lo, BEIdx, LEIdx);
1587 restoreWACC(PPC::DMXXINSTDMR512_HI, PPC::sub_wacc_hi, BEIdx + 64,
1591 Register DestReg =
MI.getOperand(0).getReg();
1592 if (
MI.getOpcode() == PPC::RESTORE_DMRP) {
1596 restoreDMR(DestReg, 0, 96);
1603 Register Reg,
int &FrameIdx)
const {
1612 if (PPC::CR2 <= Reg && Reg <= PPC::CR4) {
1630 case PPC::DFLOADf32:
1631 case PPC::DFLOADf64:
1632 case PPC::DFSTOREf32:
1633 case PPC::DFSTOREf64:
1654 unsigned OpC =
MI.getOpcode();
1660 unsigned FIOperandNum) {
1662 unsigned OffsetOperandNo = (FIOperandNum == 2) ? 1 : 2;
1663 if (
MI.isInlineAsm())
1664 OffsetOperandNo = FIOperandNum - 1;
1665 else if (
MI.getOpcode() == TargetOpcode::STACKMAP ||
1666 MI.getOpcode() == TargetOpcode::PATCHPOINT)
1667 OffsetOperandNo = FIOperandNum + 1;
1669 return OffsetOperandNo;
1674 int SPAdj,
unsigned FIOperandNum,
1676 assert(SPAdj == 0 &&
"Unexpected");
1694 int FrameIndex =
MI.getOperand(FIOperandNum).getIndex();
1701 unsigned OpC =
MI.getOpcode();
1706 case PPC::DYNAREAOFFSET:
1707 case PPC::DYNAREAOFFSET8:
1712 case PPC::DYNALLOC8: {
1714 if (FPSI && FrameIndex == FPSI) {
1720 case PPC::PREPARE_PROBED_ALLOCA_64:
1721 case PPC::PREPARE_PROBED_ALLOCA_32:
1722 case PPC::PREPARE_PROBED_ALLOCA_NEGSIZE_SAME_REG_64:
1723 case PPC::PREPARE_PROBED_ALLOCA_NEGSIZE_SAME_REG_32: {
1724 if (FPSI && FrameIndex == FPSI) {
1734 case PPC::RESTORE_CR:
1737 case PPC::SPILL_CRBIT:
1740 case PPC::RESTORE_CRBIT:
1743 case PPC::SPILL_ACC:
1744 case PPC::SPILL_UACC:
1747 case PPC::RESTORE_ACC:
1748 case PPC::RESTORE_UACC:
1758 case PPC::SPILL_WACC:
1761 case PPC::RESTORE_WACC:
1764 case PPC::SPILL_DMRP:
1765 case PPC::SPILL_DMR:
1768 case PPC::RESTORE_DMRP:
1769 case PPC::RESTORE_DMR:
1772 case PPC::SPILL_QUADWORD:
1775 case PPC::RESTORE_QUADWORD:
1781 MI.getOperand(FIOperandNum).ChangeToRegister(
1786 bool noImmForm = !
MI.isInlineAsm() && OpC != TargetOpcode::STACKMAP &&
1787 OpC != TargetOpcode::PATCHPOINT && !ImmToIdxMap.count(OpC);
1791 Offset +=
MI.getOperand(OffsetOperandNo).getImm();
1806 if ((OpC == PPC::LXVP || OpC == PPC::STXVP) &&
1808 Subtarget.hasPrefixInstrs() && Subtarget.hasP10Vector()) {
1809 unsigned NewOpc = OpC == PPC::LXVP ? PPC::PLXVP : PPC::PSTXVP;
1810 MI.setDesc(
TII.get(NewOpc));
1820 assert(OpC != PPC::DBG_VALUE &&
1821 "This should be handled in a target-independent way");
1824 bool OffsetFitsMnemonic = (OpC == PPC::EVSTDD || OpC == PPC::EVLDD) ?
1827 if (
TII.isPrefixed(
MI.getOpcode()))
1829 if (!noImmForm && ((OffsetFitsMnemonic &&
1831 OpC == TargetOpcode::STACKMAP ||
1832 OpC == TargetOpcode::PATCHPOINT)) {
1833 MI.getOperand(OffsetOperandNo).ChangeToImmediate(
Offset);
1844 unsigned NewOpcode = 0u;
1851 if (ScavengingFailed && Subtarget.hasDirectMove()) {
1854 SRegHi = SReg =
is64Bit ? PPC::X4 : PPC::R4;
1855 if (
MI.getOperand(0).getReg() == SReg)
1856 SRegHi = SReg = SReg + 1;
1884 unsigned OperandBase;
1888 else if (OpC != TargetOpcode::INLINEASM &&
1889 OpC != TargetOpcode::INLINEASM_BR) {
1890 assert(ImmToIdxMap.count(OpC) &&
1891 "No indexed form of load or store available!");
1892 NewOpcode = ImmToIdxMap.find(OpC)->second;
1893 MI.setDesc(
TII.get(NewOpcode));
1896 OperandBase = OffsetOperandNo;
1899 Register StackReg =
MI.getOperand(FIOperandNum).getReg();
1900 MI.getOperand(OperandBase).ChangeToRegister(StackReg,
false);
1901 MI.getOperand(OperandBase + 1).ChangeToRegister(SReg,
false,
false,
true);
1905 if (ScavengingFailed && Subtarget.hasDirectMove())
1912 if (NewOpcode == PPC::LQX_PSEUDO || NewOpcode == PPC::STQX_PSEUDO) {
1913 assert(
is64Bit &&
"Quadword loads/stores only supported in 64-bit mode");
1918 MI.setDesc(
TII.get(NewOpcode == PPC::LQX_PSEUDO ? PPC::LQ : PPC::STQ));
1919 MI.getOperand(OperandBase + 1).ChangeToRegister(NewReg,
false);
1920 MI.getOperand(OperandBase).ChangeToImmediate(0);
1929 return TFI->
hasFP(MF) ? PPC::R31 : PPC::R1;
1931 return TFI->
hasFP(MF) ? PPC::X31 : PPC::X1;
1942 if (Subtarget.
isSVR4ABI() && TM.isPositionIndependent())
1957 return hasStackRealignment(MF);
1977 unsigned OpC =
MI->getOpcode();
1978 if (!ImmToIdxMap.count(OpC))
1982 if ((OpC == PPC::ADDI || OpC == PPC::ADDI8) &&
1983 MI->getOperand(2).getImm() == 0)
2012 unsigned ADDriOpc = TM.isPPC64() ? PPC::ADDI8 : PPC::ADDI;
2016 if (Ins !=
MBB->end())
2017 DL = Ins->getDebugLoc();
2026 MRI.constrainRegClass(BaseReg,
TII.getRegClass(
MCID, 0,
this));
2036 unsigned FIOperandNum = 0;
2037 while (!
MI.getOperand(FIOperandNum).isFI()) {
2039 assert(FIOperandNum <
MI.getNumOperands() &&
2040 "Instr doesn't have FrameIndex operand!");
2043 MI.getOperand(FIOperandNum).ChangeToRegister(BaseReg,
false);
2045 Offset +=
MI.getOperand(OffsetOperandNo).getImm();
2046 MI.getOperand(OffsetOperandNo).ChangeToImmediate(
Offset);
2054 MRI.constrainRegClass(BaseReg,
TII.getRegClass(
MCID, FIOperandNum,
this));
2060 unsigned FIOperandNum = 0;
2061 while (!
MI->getOperand(FIOperandNum).isFI()) {
2063 assert(FIOperandNum < MI->getNumOperands() &&
2064 "Instr doesn't have FrameIndex operand!");
2068 Offset +=
MI->getOperand(OffsetOperandNo).getImm();
2070 return MI->getOpcode() == PPC::DBG_VALUE ||
2071 MI->getOpcode() == TargetOpcode::STACKMAP ||
2072 MI->getOpcode() == TargetOpcode::PATCHPOINT ||
unsigned const MachineRegisterInfo * MRI
static const TargetRegisterClass * getRegClass(const MachineInstr &MI, Register Reg)
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file implements the BitVector class.
const HexagonInstrInfo * TII
static cl::opt< bool > EnableBasePointer("m68k-use-base-pointer", cl::Hidden, cl::init(true), cl::desc("Enable use of a base pointer for complex stack frames"))
Register const TargetRegisterInfo * TRI
uint64_t IntrinsicInst * II
cl::opt< bool > DisableAutoPairedVecSt("disable-auto-paired-vec-st", cl::desc("disable automatically generated 32byte paired vector stores"), cl::init(true), cl::Hidden)
static cl::opt< unsigned > MaxCRBitSpillDist("ppc-max-crbit-spill-dist", cl::desc("Maximum search distance for definition of CR bit " "spill on ppc"), cl::Hidden, cl::init(100))
static cl::opt< bool > EnableBasePointer("ppc-use-base-pointer", cl::Hidden, cl::init(true), cl::desc("Enable use of a base pointer for complex stack frames"))
static cl::opt< bool > EnableGPRToVecSpills("ppc-enable-gpr-to-vsr-spills", cl::Hidden, cl::init(false), cl::desc("Enable spills from gpr to vsr rather than stack"))
static cl::opt< bool > ReportAccMoves("ppc-report-acc-moves", cl::desc("Emit information about accumulator register spills " "and copies"), cl::Hidden, cl::init(false))
static void emitWAccSpillRestoreInfo(MachineBasicBlock &MBB, bool IsRestore)
static unsigned getOffsetONFromFION(const MachineInstr &MI, unsigned FIOperandNum)
static unsigned offsetMinAlignForOpcode(unsigned OpC)
static void emitAccSpillRestoreInfo(MachineBasicBlock &MBB, bool IsPrimed, bool IsRestore)
static unsigned offsetMinAlign(const MachineInstr &MI)
static cl::opt< bool > StackPtrConst("ppc-stack-ptr-caller-preserved", cl::desc("Consider R1 caller preserved so stack saves of " "caller preserved registers can be LICM candidates"), cl::init(true), cl::Hidden)
static cl::opt< bool > AlwaysBasePointer("ppc-always-use-base-pointer", cl::Hidden, cl::init(false), cl::desc("Force the use of a base pointer in every function"))
This file declares the machine register scavenger class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static const TargetRegisterClass * getMinimalPhysRegClass(const TargetRegisterInfo *TRI, MCRegister Reg, TypeT Ty)
static bool is64Bit(const char *name)
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
bool test(unsigned Idx) const
bool any() const
any - Returns true if any bit is set.
bool none() const
none - Returns true if none of the bits are set.
The CalleeSavedInfo class tracks the information need to locate where a callee saved register is in t...
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Describe properties that are true of each instruction in the target description file.
MCRegAliasIterator enumerates all registers aliasing Reg.
MCRegister getSubReg(MCRegister Reg, unsigned Idx) const
Returns the physical register number of sub-register "Index" for physical register RegNo.
Wrapper class representing physical registers. Should be passed by value.
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
MachineInstrBundleIterator< MachineInstr, true > reverse_iterator
MachineInstrBundleIterator< MachineInstr > iterator
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
uint64_t getStackSize() const
Return the number of bytes that must be allocated to hold all of the fixed size frame objects.
Align getMaxAlign() const
Return the alignment in bytes that this function must be aligned to, which is greater than the defaul...
uint64_t getMaxCallFrameSize() const
Return the maximum size of a call frame that must be allocated for an outgoing function call.
bool isCalleeSavedInfoValid() const
Has the callee saved info been calculated yet?
const std::vector< CalleeSavedInfo > & getCalleeSavedInfo() const
Returns a reference to call saved info vector for the current function.
int64_t getObjectOffset(int ObjectIdx) const
Return the assigned stack offset of the specified object from the incoming stack pointer.
bool isFixedObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to a fixed stack object.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
bool hasInlineAsm() const
Returns true if the function contains any inline assembly.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
MachineOperand class - Representation of each machine instruction operand.
unsigned getSubReg() const
Register getReg() const
getReg - Returns the register number.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLVM_ABI Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
bool isAllocatable(MCRegister PhysReg) const
isAllocatable - Returns true when PhysReg belongs to an allocatable register class and it hasn't been...
uint64_t determineFrameLayout(const MachineFunction &MF, bool UseEstimate=false, unsigned *NewMaxCallFrameSize=nullptr) const
Determine the frame layout but do not update the machine function.
PPCFunctionInfo - This class is derived from MachineFunction private PowerPC target-specific informat...
int getFramePointerSaveIndex() const
bool usesTOCBasePtr() const
void resolveFrameIndex(MachineInstr &MI, Register BaseReg, int64_t Offset) const override
bool getRegAllocationHints(Register VirtReg, ArrayRef< MCPhysReg > Order, SmallVectorImpl< MCPhysReg > &Hints, const MachineFunction &MF, const VirtRegMap *VRM, const LiveRegMatrix *Matrix) const override
Register getFrameRegister(const MachineFunction &MF) const override
bool hasBasePointer(const MachineFunction &MF) const
Register getBaseRegister(const MachineFunction &MF) const
void lowerDMRRestore(MachineBasicBlock::iterator II, unsigned FrameIndex) const
lowerDMRRestore - Generate the code to restore the DMR register.
void prepareDynamicAlloca(MachineBasicBlock::iterator II, Register &NegSizeReg, bool &KillNegSizeReg, Register &FramePointer) const
To accomplish dynamic stack allocation, we have to calculate exact size subtracted from the stack poi...
void lowerCRBitSpilling(MachineBasicBlock::iterator II, unsigned FrameIndex) const
void lowerACCSpilling(MachineBasicBlock::iterator II, unsigned FrameIndex) const
lowerACCSpilling - Generate the code for spilling the accumulator register.
bool requiresFrameIndexScavenging(const MachineFunction &MF) const override
void lowerCRSpilling(MachineBasicBlock::iterator II, unsigned FrameIndex) const
lowerCRSpilling - Generate the code for spilling a CR register.
const TargetRegisterClass * getPointerRegClass(unsigned Kind=0) const override
getPointerRegClass - Return the register class to use to hold pointers.
void lowerDynamicAreaOffset(MachineBasicBlock::iterator II) const
void lowerDynamicAlloc(MachineBasicBlock::iterator II) const
lowerDynamicAlloc - Generate the code for allocating an object in the current frame.
const uint32_t * getCallPreservedMask(const MachineFunction &MF, CallingConv::ID CC) const override
void adjustStackMapLiveOutMask(uint32_t *Mask) const override
bool hasReservedSpillSlot(const MachineFunction &MF, Register Reg, int &FrameIdx) const override
bool isCallerPreservedPhysReg(MCRegister PhysReg, const MachineFunction &MF) const override
bool needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const override
Returns true if the instruction's frame index reference would be better served by a base register oth...
const TargetRegisterClass * getCrossCopyRegClass(const TargetRegisterClass *RC) const override
const uint32_t * getNoPreservedMask() const override
void lowerDMRSpilling(MachineBasicBlock::iterator II, unsigned FrameIndex) const
lowerDMRSpilling - Generate the code for spilling the DMR register.
void lowerCRRestore(MachineBasicBlock::iterator II, unsigned FrameIndex) const
bool eliminateFrameIndex(MachineBasicBlock::iterator II, int SPAdj, unsigned FIOperandNum, RegScavenger *RS=nullptr) const override
void lowerQuadwordRestore(MachineBasicBlock::iterator II, unsigned FrameIndex) const
lowerQuadwordRestore - Generate code to restore paired general register.
static void emitAccCopyInfo(MachineBasicBlock &MBB, MCRegister DestReg, MCRegister SrcReg)
bool requiresVirtualBaseRegisters(const MachineFunction &MF) const override
void lowerCRBitRestore(MachineBasicBlock::iterator II, unsigned FrameIndex) const
const MCPhysReg * getCalleeSavedRegs(const MachineFunction *MF) const override
Code Generation virtual methods...
Register materializeFrameBaseRegister(MachineBasicBlock *MBB, int FrameIdx, int64_t Offset) const override
Insert defining instruction(s) for BaseReg to be a pointer to FrameIdx at the beginning of the basic ...
unsigned getRegPressureLimit(const TargetRegisterClass *RC, MachineFunction &MF) const override
void lowerWACCRestore(MachineBasicBlock::iterator II, unsigned FrameIndex) const
lowerWACCRestore - Generate the code to restore the wide accumulator register.
void lowerPrepareProbedAlloca(MachineBasicBlock::iterator II) const
void lowerQuadwordSpilling(MachineBasicBlock::iterator II, unsigned FrameIndex) const
lowerQuadwordSpilling - Generate code to spill paired general register.
PPCRegisterInfo(const PPCTargetMachine &TM)
bool isFrameOffsetLegal(const MachineInstr *MI, Register BaseReg, int64_t Offset) const override
void lowerWACCSpilling(MachineBasicBlock::iterator II, unsigned FrameIndex) const
lowerWACCSpilling - Generate the code for spilling the wide accumulator register.
void lowerOctWordSpilling(MachineBasicBlock::iterator II, unsigned FrameIndex) const
Remove any STXVP[X] instructions and split them out into a pair of STXV[X] instructions if –disable-a...
bool isAsmClobberable(const MachineFunction &MF, MCRegister PhysReg) const override
BitVector getReservedRegs(const MachineFunction &MF) const override
const TargetRegisterClass * getLargestLegalSuperClass(const TargetRegisterClass *RC, const MachineFunction &MF) const override
void lowerACCRestore(MachineBasicBlock::iterator II, unsigned FrameIndex) const
lowerACCRestore - Generate the code to restore the accumulator register.
bool is32BitELFABI() const
bool isUsingPCRelativeCalls() const
const PPCInstrInfo * getInstrInfo() const override
bool isLittleEndian() const
MCRegister getTOCPointerRegister() const
MCRegister getStackPointerRegister() const
bool is64BitELFABI() const
const PPCRegisterInfo * getRegisterInfo() const override
Common code between 32-bit and 64-bit PowerPC targets.
BitVector getRegsAvailable(const TargetRegisterClass *RC)
Return all available registers in the register class in Mask.
Wrapper class representing virtual and physical registers.
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
bool hasFP(const MachineFunction &MF) const
hasFP - Return true if the specified function should have a dedicated frame pointer register.
Align getStackAlign() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
TargetInstrInfo - Interface to description of machine instruction set.
unsigned getID() const
Return the register class ID number.
bool contains(Register Reg) const
Return true if the specified register is included in this register class.
ArrayRef< unsigned > superclasses() const
Returns a list of super-classes.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const TargetRegisterClass * getLargestLegalSuperClass(const TargetRegisterClass *RC, const MachineFunction &) const
Returns the largest super class of RC that is legal to use in the current sub-target and has the same...
virtual bool getRegAllocationHints(Register VirtReg, ArrayRef< MCPhysReg > Order, SmallVectorImpl< MCPhysReg > &Hints, const MachineFunction &MF, const VirtRegMap *VRM=nullptr, const LiveRegMatrix *Matrix=nullptr) const
Get a list of 'hint' registers that the register allocator should try first when allocating a physica...
A Use represents the edge between a Value definition and its users.
MCRegister getPhys(Register virtReg) const
returns the physical register mapped to the specified virtual register
bool hasPhys(Register virtReg) const
returns true if the specified virtual register is mapped to a physical register
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ AnyReg
OBSOLETED - Used for stack based JavaScript calls.
@ Cold
Attempts to make code in the caller as efficient as possible under the assumption that the call is no...
Define some predicates that are used for node matching.
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Kill
The last use of a register.
@ Undef
Value of the register doesn't matter.
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
bool isAligned(Align Lhs, uint64_t SizeInBytes)
Checks that SizeInBytes is a multiple of the alignment.
static const MachineInstrBuilder & addFrameReference(const MachineInstrBuilder &MIB, int FI, int Offset=0, bool mem=true)
addFrameReference - This function is used to add a reference to the base of an abstract object on the...
static unsigned getCRFromCRBit(unsigned SrcReg)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
unsigned getKillRegState(bool B)
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
LLVM_ABI Printable printReg(Register Reg, const TargetRegisterInfo *TRI=nullptr, unsigned SubIdx=0, const MachineRegisterInfo *MRI=nullptr)
Prints virtual and physical registers with or without a TRI instance.
This struct is a compact representation of a valid (non-zero power of two) alignment.
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.