236#define DEBUG_TYPE "frame-info"
239 cl::desc(
"enable use of redzone on AArch64"),
244 cl::desc(
"reverse the CSR restore sequence"),
248 "stack-tagging-merge-settag",
258 cl::desc(
"Emit homogeneous prologue and epilogue for the size "
259 "optimization (default = off)"));
261STATISTIC(NumRedZoneFunctions,
"Number of functions using red zone");
272 bool IsTailCallReturn =
false;
274 unsigned RetOpcode =
MBBI->getOpcode();
275 IsTailCallReturn = RetOpcode == AArch64::TCRETURNdi ||
276 RetOpcode == AArch64::TCRETURNri ||
277 RetOpcode == AArch64::TCRETURNriBTI;
281 int64_t ArgumentPopSize = 0;
282 if (IsTailCallReturn) {
288 ArgumentPopSize = StackAdjust.
getImm();
297 return ArgumentPopSize;
308bool AArch64FrameLowering::homogeneousPrologEpilog(
338bool AArch64FrameLowering::producePairRegisters(
MachineFunction &MF)
const {
357 if (
MI.isDebugInstr() ||
MI.isPseudo() ||
358 MI.getOpcode() == AArch64::ADDXri ||
359 MI.getOpcode() == AArch64::ADDSXri)
386 if (!IsWin64 || IsFunclet) {
394 const unsigned UnwindHelpObject = (MF.
hasEHFunclets() ? 8 : 0);
395 return alignTo(VarArgsArea + UnwindHelpObject, 16);
412 const unsigned RedZoneSize =
421 return !(MFI.
hasCalls() ||
hasFP(MF) || NumBytes > RedZoneSize ||
474 unsigned Opc =
I->getOpcode();
475 bool IsDestroy = Opc ==
TII->getCallFrameDestroyOpcode();
476 uint64_t CalleePopAmount = IsDestroy ?
I->getOperand(1).getImm() : 0;
479 int64_t Amount =
I->getOperand(0).getImm();
487 if (CalleePopAmount == 0) {
498 assert(Amount > -0xffffff && Amount < 0xffffff &&
"call frame too large");
502 }
else if (CalleePopAmount != 0) {
505 assert(CalleePopAmount < 0xffffff &&
"call frame too large");
512void AArch64FrameLowering::emitCalleeSavedGPRLocations(
526 for (
const auto &Info : CSI) {
530 assert(!
Info.isSpilledToReg() &&
"Spilling to registers not implemented");
531 unsigned DwarfReg =
TRI.getDwarfRegNum(
Info.getReg(),
true);
543void AArch64FrameLowering::emitCalleeSavedSVELocations(
559 for (
const auto &Info : CSI) {
565 assert(!
Info.isSpilledToReg() &&
"Spilling to registers not implemented");
600 const MCInstrDesc &CFIDesc =
TII.get(TargetOpcode::CFI_INSTRUCTION);
606 nullptr,
TRI.getDwarfRegNum(AArch64::SP,
true), 0));
610 if (MFI.shouldSignReturnAddress(MF)) {
618 TRI.getDwarfRegNum(AArch64::X18,
true));
621 const std::vector<CalleeSavedInfo> &CSI =
623 for (
const auto &
Info : CSI) {
624 unsigned Reg =
Info.getReg();
625 if (!
TRI.regNeedsCFI(Reg, Reg))
628 TRI.getDwarfRegNum(Reg,
true));
647 for (
const auto &
Info : CSI) {
652 unsigned Reg =
Info.getReg();
658 nullptr,
TRI.getDwarfRegNum(
Info.getReg(),
true)));
665void AArch64FrameLowering::emitCalleeSavedGPRRestores(
670void AArch64FrameLowering::emitCalleeSavedSVERestores(
684 case AArch64::W##n: \
685 case AArch64::X##n: \
710 case AArch64::B##n: \
711 case AArch64::H##n: \
712 case AArch64::S##n: \
713 case AArch64::D##n: \
714 case AArch64::Q##n: \
715 return HasSVE ? AArch64::Z##n : AArch64::Q##n
752void AArch64FrameLowering::emitZeroCallUsedRegs(
BitVector RegsToZero,
768 bool HasSVE = STI.hasSVE();
770 if (
TRI.isGeneralPurposeRegister(MF, Reg)) {
773 GPRsToZero.set(XReg);
774 }
else if (AArch64::FPR128RegClass.
contains(Reg) ||
775 AArch64::FPR64RegClass.
contains(Reg) ||
776 AArch64::FPR32RegClass.
contains(Reg) ||
777 AArch64::FPR16RegClass.
contains(Reg) ||
778 AArch64::FPR8RegClass.
contains(Reg)) {
781 FPRsToZero.set(XReg);
797 {AArch64::P0, AArch64::P1, AArch64::P2, AArch64::P3, AArch64::P4,
798 AArch64::P5, AArch64::P6, AArch64::P7, AArch64::P8, AArch64::P9,
799 AArch64::P10, AArch64::P11, AArch64::P12, AArch64::P13, AArch64::P14,
801 if (RegsToZero[PReg])
832 for (
unsigned i = 0; CSRegs[i]; ++i)
833 LiveRegs.
addReg(CSRegs[i]);
840 for (
unsigned Reg : AArch64::GPR64RegClass) {
844 return AArch64::NoRegister;
855 if (!RegInfo->hasStackRealignment(*MF))
870 unsigned StackProbeSize =
871 F.getFnAttributeAsParsedInteger(
"stack-probe-size", 4096);
872 return (StackSizeInBytes >= StackProbeSize) &&
873 !
F.hasFnAttribute(
"no-stack-arg-probe");
879 F.needsUnwindTableEntry();
882bool AArch64FrameLowering::shouldCombineCSRLocalStackBump(
888 if (homogeneousPrologEpilog(MF))
911 if (MFI.hasVarSizedObjects())
914 if (
RegInfo->hasStackRealignment(MF))
931bool AArch64FrameLowering::shouldCombineCSRLocalStackBumpInEpilogue(
933 if (!shouldCombineCSRLocalStackBump(*
MBB.
getParent(), StackBumpBytes))
943 while (LastI != Begin) {
945 if (LastI->isTransient())
950 switch (LastI->getOpcode()) {
951 case AArch64::STGloop:
952 case AArch64::STZGloop:
956 case AArch64::STZ2Gi:
969 unsigned Opc =
MBBI->getOpcode();
973 unsigned ImmIdx =
MBBI->getNumOperands() - 1;
974 int Imm =
MBBI->getOperand(ImmIdx).getImm();
982 case AArch64::LDPDpost:
985 case AArch64::STPDpre: {
986 unsigned Reg0 =
RegInfo->getSEHRegNum(
MBBI->getOperand(1).getReg());
987 unsigned Reg1 =
RegInfo->getSEHRegNum(
MBBI->getOperand(2).getReg());
988 MIB =
BuildMI(MF,
DL,
TII.get(AArch64::SEH_SaveFRegP_X))
995 case AArch64::LDPXpost:
998 case AArch64::STPXpre: {
1001 if (Reg0 == AArch64::FP && Reg1 == AArch64::LR)
1002 MIB =
BuildMI(MF,
DL,
TII.get(AArch64::SEH_SaveFPLR_X))
1006 MIB =
BuildMI(MF,
DL,
TII.get(AArch64::SEH_SaveRegP_X))
1013 case AArch64::LDRDpost:
1016 case AArch64::STRDpre: {
1017 unsigned Reg =
RegInfo->getSEHRegNum(
MBBI->getOperand(1).getReg());
1018 MIB =
BuildMI(MF,
DL,
TII.get(AArch64::SEH_SaveFReg_X))
1024 case AArch64::LDRXpost:
1027 case AArch64::STRXpre: {
1028 unsigned Reg =
RegInfo->getSEHRegNum(
MBBI->getOperand(1).getReg());
1035 case AArch64::STPDi:
1036 case AArch64::LDPDi: {
1037 unsigned Reg0 =
RegInfo->getSEHRegNum(
MBBI->getOperand(0).getReg());
1038 unsigned Reg1 =
RegInfo->getSEHRegNum(
MBBI->getOperand(1).getReg());
1046 case AArch64::STPXi:
1047 case AArch64::LDPXi: {
1050 if (Reg0 == AArch64::FP && Reg1 == AArch64::LR)
1062 case AArch64::STRXui:
1063 case AArch64::LDRXui: {
1064 int Reg =
RegInfo->getSEHRegNum(
MBBI->getOperand(0).getReg());
1071 case AArch64::STRDui:
1072 case AArch64::LDRDui: {
1073 unsigned Reg =
RegInfo->getSEHRegNum(
MBBI->getOperand(0).getReg());
1087 unsigned LocalStackSize) {
1089 unsigned ImmIdx =
MBBI->getNumOperands() - 1;
1090 switch (
MBBI->getOpcode()) {
1093 case AArch64::SEH_SaveFPLR:
1094 case AArch64::SEH_SaveRegP:
1095 case AArch64::SEH_SaveReg:
1096 case AArch64::SEH_SaveFRegP:
1097 case AArch64::SEH_SaveFReg:
1098 ImmOpnd = &
MBBI->getOperand(ImmIdx);
1111 bool NeedsWinCFI,
bool *HasWinCFI,
bool EmitCFI,
1113 int CFAOffset = 0) {
1115 switch (
MBBI->getOpcode()) {
1118 case AArch64::STPXi:
1119 NewOpc = AArch64::STPXpre;
1121 case AArch64::STPDi:
1122 NewOpc = AArch64::STPDpre;
1124 case AArch64::STPQi:
1125 NewOpc = AArch64::STPQpre;
1127 case AArch64::STRXui:
1128 NewOpc = AArch64::STRXpre;
1130 case AArch64::STRDui:
1131 NewOpc = AArch64::STRDpre;
1133 case AArch64::STRQui:
1134 NewOpc = AArch64::STRQpre;
1136 case AArch64::LDPXi:
1137 NewOpc = AArch64::LDPXpost;
1139 case AArch64::LDPDi:
1140 NewOpc = AArch64::LDPDpost;
1142 case AArch64::LDPQi:
1143 NewOpc = AArch64::LDPQpost;
1145 case AArch64::LDRXui:
1146 NewOpc = AArch64::LDRXpost;
1148 case AArch64::LDRDui:
1149 NewOpc = AArch64::LDRDpost;
1151 case AArch64::LDRQui:
1152 NewOpc = AArch64::LDRQpost;
1157 auto SEH = std::next(
MBBI);
1159 SEH->eraseFromParent();
1164 int64_t MinOffset, MaxOffset;
1166 NewOpc, Scale, Width, MinOffset, MaxOffset);
1173 if (
MBBI->getOperand(
MBBI->getNumOperands() - 1).getImm() != 0 ||
1174 CSStackSizeInc < MinOffset || CSStackSizeInc > MaxOffset) {
1177 false,
false,
nullptr, EmitCFI,
1180 return std::prev(
MBBI);
1187 unsigned OpndIdx = 0;
1188 for (
unsigned OpndEnd =
MBBI->getNumOperands() - 1; OpndIdx < OpndEnd;
1190 MIB.
add(
MBBI->getOperand(OpndIdx));
1192 assert(
MBBI->getOperand(OpndIdx).getImm() == 0 &&
1193 "Unexpected immediate offset in first/last callee-save save/restore "
1195 assert(
MBBI->getOperand(OpndIdx - 1).getReg() == AArch64::SP &&
1196 "Unexpected base register in callee-save save/restore instruction!");
1197 assert(CSStackSizeInc % Scale == 0);
1198 MIB.
addImm(CSStackSizeInc / (
int)Scale);
1229 unsigned Opc =
MI.getOpcode();
1232 case AArch64::STPXi:
1233 case AArch64::STRXui:
1234 case AArch64::STPDi:
1235 case AArch64::STRDui:
1236 case AArch64::LDPXi:
1237 case AArch64::LDRXui:
1238 case AArch64::LDPDi:
1239 case AArch64::LDRDui:
1242 case AArch64::STPQi:
1243 case AArch64::STRQui:
1244 case AArch64::LDPQi:
1245 case AArch64::LDRQui:
1252 unsigned OffsetIdx =
MI.getNumExplicitOperands() - 1;
1253 assert(
MI.getOperand(OffsetIdx - 1).getReg() == AArch64::SP &&
1254 "Unexpected base register in callee-save save/restore instruction!");
1258 assert(LocalStackSize % Scale == 0);
1259 OffsetOpnd.
setImm(OffsetOpnd.
getImm() + LocalStackSize / Scale);
1264 assert(
MBBI !=
MI.getParent()->end() &&
"Expecting a valid instruction");
1266 "Expecting a SEH instruction");
1277 switch (
I->getOpcode()) {
1280 case AArch64::STR_ZXI:
1281 case AArch64::STR_PXI:
1282 case AArch64::LDR_ZXI:
1283 case AArch64::LDR_PXI:
1292 [](
const auto &
Info) { return Info.getReg() == AArch64::LR; }) &&
1307 bool NeedsUnwindInfo) {
1323 if (NeedsUnwindInfo) {
1326 static const char CFIInst[] = {
1327 dwarf::DW_CFA_val_expression,
1330 static_cast<char>(
unsigned(dwarf::DW_OP_breg18)),
1331 static_cast<char>(-8) & 0x7f,
1334 nullptr,
StringRef(CFIInst,
sizeof(CFIInst))));
1372 const int OffsetToFirstCalleeSaveFromFP =
1376 unsigned Reg =
TRI->getDwarfRegNum(
FramePtr,
true);
1378 nullptr, Reg, FixedObject - OffsetToFirstCalleeSaveFromFP));
1396 bool HasFP =
hasFP(MF);
1398 bool HasWinCFI =
false;
1415 MFnI.needsDwarfUnwindInfo(MF));
1417 if (MFnI.shouldSignReturnAddress(MF)) {
1424 if (EmitCFI && MFnI.isMTETagged()) {
1487 assert(!HasFP &&
"unexpected function without stack frame but with FP");
1489 "unexpected function without stack frame but with SVE objects");
1498 ++NumRedZoneFunctions;
1531 bool CombineSPBump = shouldCombineCSRLocalStackBump(MF, NumBytes);
1532 bool HomPrologEpilog = homogeneousPrologEpilog(MF);
1533 if (CombineSPBump) {
1534 assert(!SVEStackSize &&
"Cannot combine SP bump with SVE");
1540 }
else if (HomPrologEpilog) {
1542 NumBytes -= PrologueSaveSize;
1543 }
else if (PrologueSaveSize != 0) {
1545 MBB,
MBBI,
DL,
TII, -PrologueSaveSize, NeedsWinCFI, &HasWinCFI,
1547 NumBytes -= PrologueSaveSize;
1549 assert(NumBytes >= 0 &&
"Negative stack allocation size!?");
1559 NeedsWinCFI, &HasWinCFI);
1564 if (!IsFunclet && HasFP) {
1576 bool HaveInitialContext = Attrs.hasAttrSomewhere(Attribute::SwiftAsync);
1577 if (HaveInitialContext)
1580 .
addUse(HaveInitialContext ? AArch64::X22 : AArch64::XZR)
1586 if (HomPrologEpilog) {
1599 if (NeedsWinCFI && HasWinCFI) {
1604 NeedsWinCFI =
false;
1615 emitCalleeSavedGPRLocations(
MBB,
MBBI);
1618 const bool NeedsRealignment =
1619 NumBytes && !IsFunclet && RegInfo->hasStackRealignment(MF);
1620 int64_t RealignmentPadding =
1626 uint64_t NumWords = (NumBytes + RealignmentPadding) >> 4;
1634 if (NumBytes >= (1 << 28))
1636 "unwinding purposes");
1638 uint32_t LowNumWords = NumWords & 0xFFFF;
1645 if ((NumWords & 0xFFFF0000) != 0) {
1648 .
addImm((NumWords & 0xFFFF0000) >> 16)
1719 if (RealignmentPadding > 0) {
1720 if (RealignmentPadding >= 4096) {
1723 .
addImm(RealignmentPadding)
1733 .
addImm(RealignmentPadding)
1750 StackOffset AllocateBefore = SVEStackSize, AllocateAfter = {};
1757 CalleeSavesBegin =
MBBI;
1761 CalleeSavesEnd =
MBBI;
1764 AllocateAfter = SVEStackSize - AllocateBefore;
1769 MBB, CalleeSavesBegin,
DL, AArch64::SP, AArch64::SP, -AllocateBefore,
TII,
1771 EmitAsyncCFI && !HasFP && AllocateBefore,
1775 emitCalleeSavedSVELocations(
MBB, CalleeSavesEnd);
1780 nullptr, EmitAsyncCFI && !HasFP && AllocateAfter,
1786 unsigned scratchSPReg = AArch64::SP;
1788 if (NeedsRealignment) {
1790 assert(scratchSPReg != AArch64::NoRegister);
1799 MBB,
MBBI,
DL, scratchSPReg, AArch64::SP,
1801 false, NeedsWinCFI, &HasWinCFI, EmitAsyncCFI && !HasFP,
1805 if (NeedsRealignment) {
1807 assert(scratchSPReg != AArch64::SP);
1834 if (!IsFunclet && RegInfo->hasBasePointer(MF)) {
1846 if (NeedsWinCFI && HasWinCFI) {
1854 if (IsFunclet &&
F.hasPersonalityFn()) {
1864 if (EmitCFI && !EmitAsyncCFI) {
1871 *RegInfo, AArch64::SP, AArch64::SP, TotalSize,
1877 emitCalleeSavedGPRLocations(
MBB,
MBBI);
1878 emitCalleeSavedSVELocations(
MBB,
MBBI);
1883 switch (
MI.getOpcode()) {
1886 case AArch64::CATCHRET:
1887 case AArch64::CLEANUPRET:
1902 bool HasWinCFI =
false;
1903 bool IsFunclet =
false;
1906 DL =
MBBI->getDebugLoc();
1914 BuildMI(MBB, MBB.getFirstTerminator(), DL,
1915 TII->get(AArch64::PAUTH_EPILOGUE))
1916 .setMIFlag(MachineInstr::FrameDestroy);
1926 TII->get(AArch64::SEH_EpilogEnd))
1953 int64_t AfterCSRPopSize = ArgumentStackToRestore;
1961 if (homogeneousPrologEpilog(MF, &
MBB)) {
1965 auto HomogeneousEpilog = std::prev(LastPopI);
1966 if (HomogeneousEpilog->getOpcode() == AArch64::HOM_Epilog)
1967 LastPopI = HomogeneousEpilog;
1977 assert(AfterCSRPopSize == 0);
1980 bool CombineSPBump = shouldCombineCSRLocalStackBumpInEpilogue(
MBB, NumBytes);
1983 bool CombineAfterCSRBump =
false;
1984 if (!CombineSPBump && PrologueSaveSize != 0) {
1986 while (Pop->getOpcode() == TargetOpcode::CFI_INSTRUCTION ||
1988 Pop = std::prev(Pop);
1991 const MachineOperand &OffsetOp = Pop->getOperand(Pop->getNumOperands() - 1);
1995 if (OffsetOp.
getImm() == 0 && AfterCSRPopSize >= 0) {
1997 MBB, Pop,
DL,
TII, PrologueSaveSize, NeedsWinCFI, &HasWinCFI, EmitCFI,
2004 AfterCSRPopSize += PrologueSaveSize;
2005 CombineAfterCSRBump =
true;
2014 while (LastPopI != Begin) {
2020 }
else if (CombineSPBump)
2022 NeedsWinCFI, &HasWinCFI);
2034 EpilogStartI = LastPopI;
2065 if (CombineSPBump) {
2066 assert(!SVEStackSize &&
"Cannot combine SP bump with SVE");
2069 if (EmitCFI &&
hasFP(MF)) {
2071 unsigned Reg = RegInfo.getDwarfRegNum(AArch64::SP,
true);
2086 NumBytes -= PrologueSaveSize;
2087 assert(NumBytes >= 0 &&
"Negative stack allocation size!?");
2091 StackOffset DeallocateBefore = {}, DeallocateAfter = SVEStackSize;
2094 RestoreBegin = std::prev(RestoreEnd);
2095 while (RestoreBegin !=
MBB.
begin() &&
2104 DeallocateBefore = SVEStackSize - CalleeSavedSizeAsOffset;
2105 DeallocateAfter = CalleeSavedSizeAsOffset;
2127 MBB, RestoreBegin,
DL, AArch64::SP, AArch64::SP,
2129 false,
false,
nullptr, EmitCFI && !
hasFP(MF),
2136 false,
nullptr, EmitCFI && !
hasFP(MF),
2142 false,
nullptr, EmitCFI && !
hasFP(MF),
2147 emitCalleeSavedSVERestores(
MBB, RestoreEnd);
2154 if (RedZone && AfterCSRPopSize == 0)
2161 bool NoCalleeSaveRestore = PrologueSaveSize == 0;
2162 int64_t StackRestoreBytes = RedZone ? 0 : NumBytes;
2163 if (NoCalleeSaveRestore)
2164 StackRestoreBytes += AfterCSRPopSize;
2167 MBB, LastPopI,
DL, AArch64::SP, AArch64::SP,
2174 if (NoCalleeSaveRestore || AfterCSRPopSize == 0) {
2187 MBB, LastPopI,
DL, AArch64::SP, AArch64::FP,
2190 }
else if (NumBytes)
2196 if (EmitCFI &&
hasFP(MF)) {
2198 unsigned Reg = RegInfo.getDwarfRegNum(AArch64::SP,
true);
2209 if (AfterCSRPopSize) {
2210 assert(AfterCSRPopSize > 0 &&
"attempting to reallocate arg stack that an "
2211 "interrupt may have clobbered");
2216 false, NeedsWinCFI, &HasWinCFI, EmitCFI,
2247 int64_t ObjectOffset) {
2252 unsigned FixedObject =
2261 int64_t ObjectOffset) {
2272 return RegInfo->getLocalAddressRegister(MF) == AArch64::FP
2279 bool ForSimm)
const {
2282 bool isFixed = MFI.isFixedObjectIndex(FI);
2289 const MachineFunction &MF, int64_t ObjectOffset,
bool isFixed,
bool isSVE,
2290 Register &FrameReg,
bool PreferFP,
bool ForSimm)
const {
2313 PreferFP &= !SVEStackSize;
2321 }
else if (isCSR && RegInfo->hasStackRealignment(MF)) {
2325 assert(
hasFP(MF) &&
"Re-aligned stack must have frame pointer");
2327 }
else if (
hasFP(MF) && !RegInfo->hasStackRealignment(MF)) {
2332 bool FPOffsetFits = !ForSimm || FPOffset >= -256;
2333 PreferFP |=
Offset > -FPOffset && !SVEStackSize;
2335 if (MFI.hasVarSizedObjects()) {
2339 bool CanUseBP = RegInfo->hasBasePointer(MF);
2340 if (FPOffsetFits && CanUseBP)
2347 }
else if (FPOffset >= 0) {
2352 }
else if (MF.
hasEHFunclets() && !RegInfo->hasBasePointer(MF)) {
2359 "Funclets should only be present on Win64");
2363 if (FPOffsetFits && PreferFP)
2370 ((isFixed || isCSR) || !RegInfo->hasStackRealignment(MF) || !UseFP) &&
2371 "In the presence of dynamic stack pointer realignment, "
2372 "non-argument/CSR objects cannot be accessed through the frame pointer");
2384 RegInfo->hasStackRealignment(MF))) {
2385 FrameReg = RegInfo->getFrameRegister(MF);
2389 FrameReg = RegInfo->hasBasePointer(MF) ? RegInfo->getBaseRegister()
2395 if (UseFP && !(isFixed || isCSR))
2396 ScalableOffset = -SVEStackSize;
2397 if (!UseFP && (isFixed || isCSR))
2398 ScalableOffset = SVEStackSize;
2401 FrameReg = RegInfo->getFrameRegister(MF);
2406 if (RegInfo->hasBasePointer(MF))
2407 FrameReg = RegInfo->getBaseRegister();
2409 assert(!MFI.hasVarSizedObjects() &&
2410 "Can't use SP when we have var sized objects.");
2411 FrameReg = AArch64::SP;
2437 Attrs.hasAttrSomewhere(Attribute::SwiftError)) &&
2442 bool NeedsWinCFI,
bool IsFirst,
2451 if (Reg2 == AArch64::FP)
2455 if (
TRI->getEncodingValue(Reg2) ==
TRI->getEncodingValue(Reg1) + 1)
2462 if (Reg1 >= AArch64::X19 && Reg1 <= AArch64::X27 &&
2463 (Reg1 - AArch64::X19) % 2 == 0 && Reg2 == AArch64::LR && !IsFirst)
2473 bool UsesWinAAPCS,
bool NeedsWinCFI,
2474 bool NeedsFrameRecord,
bool IsFirst,
2482 if (NeedsFrameRecord)
2483 return Reg2 == AArch64::LR;
2491 unsigned Reg1 = AArch64::NoRegister;
2492 unsigned Reg2 = AArch64::NoRegister;
2495 enum RegType { GPR, FPR64, FPR128, PPR, ZPR }
Type;
2497 RegPairInfo() =
default;
2499 bool isPaired()
const {
return Reg2 != AArch64::NoRegister; }
2501 unsigned getScale()
const {
2515 bool isScalable()
const {
return Type == PPR ||
Type == ZPR; }
2523 bool NeedsFrameRecord) {
2533 unsigned Count = CSI.
size();
2540 "Odd number of callee-saved regs to spill!");
2542 int StackFillDir = -1;
2544 unsigned FirstReg = 0;
2552 FirstReg = Count - 1;
2558 for (
unsigned i = FirstReg; i < Count; i += RegInc) {
2560 RPI.Reg1 = CSI[i].getReg();
2562 if (AArch64::GPR64RegClass.
contains(RPI.Reg1))
2563 RPI.Type = RegPairInfo::GPR;
2564 else if (AArch64::FPR64RegClass.
contains(RPI.Reg1))
2565 RPI.Type = RegPairInfo::FPR64;
2566 else if (AArch64::FPR128RegClass.
contains(RPI.Reg1))
2567 RPI.Type = RegPairInfo::FPR128;
2568 else if (AArch64::ZPRRegClass.
contains(RPI.Reg1))
2569 RPI.Type = RegPairInfo::ZPR;
2570 else if (AArch64::PPRRegClass.
contains(RPI.Reg1))
2571 RPI.Type = RegPairInfo::PPR;
2576 if (
unsigned(i + RegInc) < Count) {
2577 Register NextReg = CSI[i + RegInc].getReg();
2578 bool IsFirst = i == FirstReg;
2580 case RegPairInfo::GPR:
2581 if (AArch64::GPR64RegClass.
contains(NextReg) &&
2583 NeedsWinCFI, NeedsFrameRecord, IsFirst,
2587 case RegPairInfo::FPR64:
2588 if (AArch64::FPR64RegClass.
contains(NextReg) &&
2593 case RegPairInfo::FPR128:
2594 if (AArch64::FPR128RegClass.
contains(NextReg))
2597 case RegPairInfo::PPR:
2598 case RegPairInfo::ZPR:
2609 assert((!RPI.isPaired() ||
2610 (CSI[i].getFrameIdx() + RegInc == CSI[i + RegInc].getFrameIdx())) &&
2611 "Out of order callee saved regs!");
2613 assert((!RPI.isPaired() || !NeedsFrameRecord || RPI.Reg2 != AArch64::FP ||
2614 RPI.Reg1 == AArch64::LR) &&
2615 "FrameRecord must be allocated together with LR");
2618 assert((!RPI.isPaired() || !NeedsFrameRecord || RPI.Reg1 != AArch64::FP ||
2619 RPI.Reg2 == AArch64::LR) &&
2620 "FrameRecord must be allocated together with LR");
2628 ((RPI.Reg1 == AArch64::LR && RPI.Reg2 == AArch64::FP) ||
2629 RPI.Reg1 + 1 == RPI.Reg2))) &&
2630 "Callee-save registers not saved as adjacent register pair!");
2632 RPI.FrameIdx = CSI[i].getFrameIdx();
2635 RPI.FrameIdx = CSI[i + RegInc].getFrameIdx();
2637 int Scale = RPI.getScale();
2639 int OffsetPre = RPI.isScalable() ? ScalableByteOffset : ByteOffset;
2640 assert(OffsetPre % Scale == 0);
2642 if (RPI.isScalable())
2643 ScalableByteOffset += StackFillDir * Scale;
2645 ByteOffset += StackFillDir * (RPI.isPaired() ? 2 * Scale : Scale);
2650 ((!IsWindows && RPI.Reg2 == AArch64::FP) ||
2651 (IsWindows && RPI.Reg2 == AArch64::LR)))
2652 ByteOffset += StackFillDir * 8;
2654 assert(!(RPI.isScalable() && RPI.isPaired()) &&
2655 "Paired spill/fill instructions don't exist for SVE vectors");
2659 if (NeedGapToAlignStack && !NeedsWinCFI &&
2660 !RPI.isScalable() && RPI.Type != RegPairInfo::FPR128 &&
2661 !RPI.isPaired() && ByteOffset % 16 != 0) {
2662 ByteOffset += 8 * StackFillDir;
2663 assert(MFI.getObjectAlign(RPI.FrameIdx) <=
Align(16));
2667 MFI.setObjectAlignment(RPI.FrameIdx,
Align(16));
2668 NeedGapToAlignStack =
false;
2671 int OffsetPost = RPI.isScalable() ? ScalableByteOffset : ByteOffset;
2672 assert(OffsetPost % Scale == 0);
2675 int Offset = NeedsWinCFI ? OffsetPre : OffsetPost;
2680 ((!IsWindows && RPI.Reg2 == AArch64::FP) ||
2681 (IsWindows && RPI.Reg2 == AArch64::LR)))
2683 RPI.Offset =
Offset / Scale;
2685 assert(((!RPI.isScalable() && RPI.Offset >= -64 && RPI.Offset <= 63) ||
2686 (RPI.isScalable() && RPI.Offset >= -256 && RPI.Offset <= 255)) &&
2687 "Offset out of bounds for LDP/STP immediate");
2691 if (NeedsFrameRecord && ((!IsWindows && RPI.Reg1 == AArch64::LR &&
2692 RPI.Reg2 == AArch64::FP) ||
2693 (IsWindows && RPI.Reg1 == AArch64::FP &&
2694 RPI.Reg2 == AArch64::LR)))
2708 MFI.setObjectAlignment(CSI[0].getFrameIdx(),
Align(16));
2711 std::reverse(RegPairs.
begin(), RegPairs.
end());
2727 if (homogeneousPrologEpilog(MF)) {
2731 for (
auto &RPI : RegPairs) {
2736 if (!
MRI.isReserved(RPI.Reg1))
2738 if (!
MRI.isReserved(RPI.Reg2))
2744 unsigned Reg1 = RPI.Reg1;
2745 unsigned Reg2 = RPI.Reg2;
2761 case RegPairInfo::GPR:
2762 StrOpc = RPI.isPaired() ? AArch64::STPXi : AArch64::STRXui;
2764 Alignment =
Align(8);
2766 case RegPairInfo::FPR64:
2767 StrOpc = RPI.isPaired() ? AArch64::STPDi : AArch64::STRDui;
2769 Alignment =
Align(8);
2771 case RegPairInfo::FPR128:
2772 StrOpc = RPI.isPaired() ? AArch64::STPQi : AArch64::STRQui;
2774 Alignment =
Align(16);
2776 case RegPairInfo::ZPR:
2777 StrOpc = AArch64::STR_ZXI;
2779 Alignment =
Align(16);
2781 case RegPairInfo::PPR:
2782 StrOpc = AArch64::STR_PXI;
2784 Alignment =
Align(2);
2789 dbgs() <<
") -> fi#(" << RPI.FrameIdx;
2790 if (RPI.isPaired())
dbgs() <<
", " << RPI.FrameIdx + 1;
2793 assert((!NeedsWinCFI || !(Reg1 == AArch64::LR && Reg2 == AArch64::FP)) &&
2794 "Windows unwdinding requires a consecutive (FP,LR) pair");
2798 unsigned FrameIdxReg1 = RPI.FrameIdx;
2799 unsigned FrameIdxReg2 = RPI.FrameIdx + 1;
2800 if (NeedsWinCFI && RPI.isPaired()) {
2805 if (!
MRI.isReserved(Reg1))
2807 if (RPI.isPaired()) {
2808 if (!
MRI.isReserved(Reg2))
2828 if (RPI.Type == RegPairInfo::ZPR || RPI.Type == RegPairInfo::PPR)
2845 DL =
MBBI->getDebugLoc();
2850 unsigned Reg1 = RPI.Reg1;
2851 unsigned Reg2 = RPI.Reg2;
2865 case RegPairInfo::GPR:
2866 LdrOpc = RPI.isPaired() ? AArch64::LDPXi : AArch64::LDRXui;
2868 Alignment =
Align(8);
2870 case RegPairInfo::FPR64:
2871 LdrOpc = RPI.isPaired() ? AArch64::LDPDi : AArch64::LDRDui;
2873 Alignment =
Align(8);
2875 case RegPairInfo::FPR128:
2876 LdrOpc = RPI.isPaired() ? AArch64::LDPQi : AArch64::LDRQui;
2878 Alignment =
Align(16);
2880 case RegPairInfo::ZPR:
2881 LdrOpc = AArch64::LDR_ZXI;
2883 Alignment =
Align(16);
2885 case RegPairInfo::PPR:
2886 LdrOpc = AArch64::LDR_PXI;
2888 Alignment =
Align(2);
2893 dbgs() <<
") -> fi#(" << RPI.FrameIdx;
2894 if (RPI.isPaired())
dbgs() <<
", " << RPI.FrameIdx + 1;
2900 unsigned FrameIdxReg1 = RPI.FrameIdx;
2901 unsigned FrameIdxReg2 = RPI.FrameIdx + 1;
2902 if (NeedsWinCFI && RPI.isPaired()) {
2907 if (RPI.isPaired()) {
2928 for (
const RegPairInfo &RPI :
reverse(RegPairs))
2929 if (RPI.isScalable())
2932 if (homogeneousPrologEpilog(MF, &
MBB)) {
2935 for (
auto &RPI : RegPairs) {
2944 for (
const RegPairInfo &RPI :
reverse(RegPairs)) {
2945 if (RPI.isScalable())
2954 for (
const RegPairInfo &RPI : RegPairs) {
2955 if (RPI.isScalable())
2977 unsigned UnspilledCSGPR = AArch64::NoRegister;
2978 unsigned UnspilledCSGPRPaired = AArch64::NoRegister;
2987 unsigned ExtraCSSpill = 0;
2989 for (
unsigned i = 0; CSRegs[i]; ++i) {
2990 const unsigned Reg = CSRegs[i];
2993 if (Reg == BasePointerReg)
2996 bool RegUsed = SavedRegs.
test(Reg);
2997 unsigned PairedReg = AArch64::NoRegister;
2998 if (AArch64::GPR64RegClass.
contains(Reg) ||
2999 AArch64::FPR64RegClass.contains(Reg) ||
3000 AArch64::FPR128RegClass.contains(Reg))
3001 PairedReg = CSRegs[i ^ 1];
3004 if (AArch64::GPR64RegClass.
contains(Reg) &&
3006 UnspilledCSGPR = Reg;
3007 UnspilledCSGPRPaired = PairedReg;
3015 if (producePairRegisters(MF) && PairedReg != AArch64::NoRegister &&
3016 !SavedRegs.
test(PairedReg)) {
3017 SavedRegs.
set(PairedReg);
3018 if (AArch64::GPR64RegClass.
contains(PairedReg) &&
3020 ExtraCSSpill = PairedReg;
3031 SavedRegs.
set(AArch64::X18);
3035 unsigned CSStackSize = 0;
3036 unsigned SVECSStackSize = 0;
3039 for (
unsigned Reg : SavedRegs.
set_bits()) {
3041 if (AArch64::PPRRegClass.
contains(Reg) ||
3042 AArch64::ZPRRegClass.contains(Reg))
3049 unsigned NumSavedRegs = SavedRegs.
count();
3055 SavedRegs.
set(AArch64::FP);
3056 SavedRegs.
set(AArch64::LR);
3066 int64_t SVEStackSize =
3067 alignTo(SVECSStackSize + estimateSVEStackObjectOffsets(MFI), 16);
3068 bool CanEliminateFrame = (SavedRegs.
count() == 0) && !SVEStackSize;
3077 int64_t CalleeStackUsed = 0;
3080 if (FixedOff > CalleeStackUsed) CalleeStackUsed = FixedOff;
3084 bool BigStack = SVEStackSize || (EstimatedStackSize + CSStackSize +
3085 CalleeStackUsed) > EstimatedStackSizeLimit;
3087 AFI->setHasStackFrame(
true);
3096 if (!ExtraCSSpill && UnspilledCSGPR != AArch64::NoRegister) {
3098 <<
" to get a scratch register.\n");
3099 SavedRegs.
set(UnspilledCSGPR);
3103 if (producePairRegisters(MF))
3104 SavedRegs.
set(UnspilledCSGPRPaired);
3105 ExtraCSSpill = UnspilledCSGPR;
3113 unsigned Size =
TRI->getSpillSize(RC);
3114 Align Alignment =
TRI->getSpillAlign(RC);
3117 LLVM_DEBUG(
dbgs() <<
"No available CS registers, allocated fi#" << FI
3118 <<
" as the emergency spill slot.\n");
3123 CSStackSize += 8 * (SavedRegs.
count() - NumSavedRegs);
3127 if (
hasFP(MF) && AFI->hasSwiftAsyncContext())
3132 << EstimatedStackSize + AlignedCSStackSize
3136 AFI->getCalleeSavedStackSize() == AlignedCSStackSize) &&
3137 "Should not invalidate callee saved info");
3141 AFI->setCalleeSavedStackSize(AlignedCSStackSize);
3142 AFI->setCalleeSaveStackHasFreeSpace(AlignedCSStackSize != CSStackSize);
3143 AFI->setSVECalleeSavedStackSize(
alignTo(SVECSStackSize, 16));
3148 std::vector<CalleeSavedInfo> &CSI,
unsigned &MinCSFrameIndex,
3149 unsigned &MaxCSFrameIndex)
const {
3157 std::reverse(CSI.begin(), CSI.end());
3171 if ((
unsigned)FrameIdx < MinCSFrameIndex) MinCSFrameIndex = FrameIdx;
3172 if ((
unsigned)FrameIdx > MaxCSFrameIndex) MaxCSFrameIndex = FrameIdx;
3175 for (
auto &CS : CSI) {
3182 CS.setFrameIdx(FrameIdx);
3184 if ((
unsigned)FrameIdx < MinCSFrameIndex) MinCSFrameIndex = FrameIdx;
3185 if ((
unsigned)FrameIdx > MaxCSFrameIndex) MaxCSFrameIndex = FrameIdx;
3189 Reg == AArch64::FP) {
3192 if ((
unsigned)FrameIdx < MinCSFrameIndex) MinCSFrameIndex = FrameIdx;
3193 if ((
unsigned)FrameIdx > MaxCSFrameIndex) MaxCSFrameIndex = FrameIdx;
3213 int &Min,
int &Max) {
3214 Min = std::numeric_limits<int>::max();
3215 Max = std::numeric_limits<int>::min();
3221 for (
auto &CS : CSI) {
3222 if (AArch64::ZPRRegClass.
contains(CS.getReg()) ||
3223 AArch64::PPRRegClass.contains(CS.getReg())) {
3224 assert((Max == std::numeric_limits<int>::min() ||
3225 Max + 1 == CS.getFrameIdx()) &&
3226 "SVE CalleeSaves are not consecutive");
3228 Min = std::min(Min, CS.getFrameIdx());
3229 Max = std::max(Max, CS.getFrameIdx());
3232 return Min != std::numeric_limits<int>::max();
3241 int &MinCSFrameIndex,
3242 int &MaxCSFrameIndex,
3243 bool AssignOffsets) {
3248 "SVE vectors should never be passed on the stack by value, only by "
3252 auto Assign = [&MFI](
int FI, int64_t
Offset) {
3262 for (
int I = MinCSFrameIndex;
I <= MaxCSFrameIndex; ++
I) {
3278 int StackProtectorFI = -1;
3282 ObjectsToAllocate.
push_back(StackProtectorFI);
3288 if (
I == StackProtectorFI)
3290 if (MaxCSFrameIndex >=
I &&
I >= MinCSFrameIndex)
3299 for (
unsigned FI : ObjectsToAllocate) {
3304 if (Alignment >
Align(16))
3306 "Alignment of scalable vectors > 16 bytes is not yet supported");
3316int64_t AArch64FrameLowering::estimateSVEStackObjectOffsets(
3318 int MinCSFrameIndex, MaxCSFrameIndex;
3322int64_t AArch64FrameLowering::assignSVEStackObjectOffsets(
3333 "Upwards growing stack unsupported");
3335 int MinCSFrameIndex, MaxCSFrameIndex;
3336 int64_t SVEStackSize =
3337 assignSVEStackObjectOffsets(MFI, MinCSFrameIndex, MaxCSFrameIndex);
3357 int64_t FixedObject =
3370 assert(DstReg &&
"There must be a free register after frame setup");
3379struct TagStoreInstr {
3402 std::optional<int64_t> FrameRegUpdate;
3404 unsigned FrameRegUpdateFlags;
3415 :
MBB(
MBB), ZeroData(ZeroData) {
3421 void addInstruction(TagStoreInstr
I) {
3423 TagStores.
back().Offset + TagStores.
back().Size ==
I.Offset) &&
3424 "Non-adjacent tag store instructions.");
3439 const int64_t kMinOffset = -256 * 16;
3440 const int64_t kMaxOffset = 255 * 16;
3443 int64_t BaseRegOffsetBytes = FrameRegOffset.
getFixed();
3444 if (BaseRegOffsetBytes < kMinOffset ||
3445 BaseRegOffsetBytes + (
Size -
Size % 32) > kMaxOffset ||
3449 BaseRegOffsetBytes % 16 != 0) {
3450 Register ScratchReg =
MRI->createVirtualRegister(&AArch64::GPR64RegClass);
3453 BaseReg = ScratchReg;
3454 BaseRegOffsetBytes = 0;
3459 int64_t InstrSize = (
Size > 16) ? 32 : 16;
3462 ? (ZeroData ? AArch64::STZGi : AArch64::STGi)
3463 : (ZeroData ? AArch64::STZ2Gi : AArch64::ST2Gi);
3464 assert(BaseRegOffsetBytes % 16 == 0);
3468 .
addImm(BaseRegOffsetBytes / 16)
3472 if (BaseRegOffsetBytes == 0)
3474 BaseRegOffsetBytes += InstrSize;
3488 :
MRI->createVirtualRegister(&AArch64::GPR64RegClass);
3489 Register SizeReg =
MRI->createVirtualRegister(&AArch64::GPR64RegClass);
3493 int64_t LoopSize =
Size;
3496 if (FrameRegUpdate && *FrameRegUpdate)
3497 LoopSize -= LoopSize % 32;
3499 TII->get(ZeroData ? AArch64::STZGloop_wback
3500 : AArch64::STGloop_wback))
3507 LoopI->
setFlags(FrameRegUpdateFlags);
3509 int64_t ExtraBaseRegUpdate =
3510 FrameRegUpdate ? (*FrameRegUpdate - FrameRegOffset.
getFixed() -
Size) : 0;
3511 if (LoopSize <
Size) {
3516 TII->get(ZeroData ? AArch64::STZGPostIndex : AArch64::STGPostIndex))
3520 .
addImm(1 + ExtraBaseRegUpdate / 16)
3523 }
else if (ExtraBaseRegUpdate) {
3527 TII->get(ExtraBaseRegUpdate > 0 ? AArch64::ADDXri : AArch64::SUBXri))
3530 .
addImm(std::abs(ExtraBaseRegUpdate))
3540 int64_t
Size, int64_t *TotalOffset) {
3542 if ((
MI.getOpcode() == AArch64::ADDXri ||
3543 MI.getOpcode() == AArch64::SUBXri) &&
3544 MI.getOperand(0).getReg() == Reg &&
MI.getOperand(1).getReg() == Reg) {
3546 int64_t
Offset =
MI.getOperand(2).getImm() << Shift;
3547 if (
MI.getOpcode() == AArch64::SUBXri)
3549 int64_t AbsPostOffset = std::abs(
Offset -
Size);
3550 const int64_t kMaxOffset =
3552 if (AbsPostOffset <= kMaxOffset && AbsPostOffset % 16 == 0) {
3563 for (
auto &TS : TSE) {
3567 if (
MI->memoperands_empty()) {
3571 MemRefs.
append(
MI->memoperands_begin(),
MI->memoperands_end());
3577 bool TryMergeSPUpdate) {
3578 if (TagStores.
empty())
3580 TagStoreInstr &FirstTagStore = TagStores[0];
3581 TagStoreInstr &LastTagStore = TagStores[TagStores.
size() - 1];
3582 Size = LastTagStore.Offset - FirstTagStore.Offset + LastTagStore.Size;
3583 DL = TagStores[0].MI->getDebugLoc();
3587 *MF, FirstTagStore.Offset,
false ,
false , Reg,
3590 FrameRegUpdate = std::nullopt;
3592 mergeMemRefs(TagStores, CombinedMemRefs);
3595 for (
const auto &Instr
3596 : TagStores) {
dbgs() <<
" " << *
Instr.MI; });
3602 if (TagStores.size() < 2)
3604 emitUnrolled(InsertI);
3607 int64_t TotalOffset = 0;
3608 if (TryMergeSPUpdate) {
3614 if (InsertI !=
MBB->
end() &&
3615 canMergeRegUpdate(InsertI, FrameReg, FrameRegOffset.
getFixed() +
Size,
3617 UpdateInstr = &*InsertI++;
3623 if (!UpdateInstr && TagStores.size() < 2)
3627 FrameRegUpdate = TotalOffset;
3628 FrameRegUpdateFlags = UpdateInstr->
getFlags();
3635 for (
auto &TS : TagStores)
3636 TS.MI->eraseFromParent();
3640 int64_t &
Size,
bool &ZeroData) {
3644 unsigned Opcode =
MI.getOpcode();
3645 ZeroData = (Opcode == AArch64::STZGloop || Opcode == AArch64::STZGi ||
3646 Opcode == AArch64::STZ2Gi);
3648 if (Opcode == AArch64::STGloop || Opcode == AArch64::STZGloop) {
3649 if (!
MI.getOperand(0).isDead() || !
MI.getOperand(1).isDead())
3651 if (!
MI.getOperand(2).isImm() || !
MI.getOperand(3).isFI())
3654 Size =
MI.getOperand(2).getImm();
3658 if (Opcode == AArch64::STGi || Opcode == AArch64::STZGi)
3660 else if (Opcode == AArch64::ST2Gi || Opcode == AArch64::STZ2Gi)
3665 if (
MI.getOperand(0).getReg() != AArch64::SP || !
MI.getOperand(1).isFI())
3669 16 *
MI.getOperand(2).getImm();
3689 if (!isMergeableStackTaggingInstruction(
MI,
Offset,
Size, FirstZeroData))
3695 constexpr int kScanLimit = 10;
3698 NextI !=
E && Count < kScanLimit; ++NextI) {
3707 if (isMergeableStackTaggingInstruction(
MI,
Offset,
Size, ZeroData)) {
3708 if (ZeroData != FirstZeroData)
3716 if (!
MI.isTransient())
3725 if (
MI.mayLoadOrStore() ||
MI.hasUnmodeledSideEffects())
3734 [](
const TagStoreInstr &
Left,
const TagStoreInstr &
Right) {
3739 int64_t CurOffset = Instrs[0].Offset;
3740 for (
auto &Instr : Instrs) {
3741 if (CurOffset >
Instr.Offset)
3748 TagStoreEdit TSE(
MBB, FirstZeroData);
3749 std::optional<int64_t> EndOffset;
3750 for (
auto &Instr : Instrs) {
3751 if (EndOffset && *EndOffset !=
Instr.Offset) {
3753 TSE.emitCode(InsertI, TFI,
false);
3757 TSE.addInstruction(Instr);
3776 II = tryMergeAdjacentSTG(II,
this, RS);
3784 bool IgnoreSPUpdates)
const {
3786 if (IgnoreSPUpdates) {
3789 FrameReg = AArch64::SP;
3799 FrameReg = AArch64::SP;
3824 bool IsValid =
false;
3826 int ObjectIndex = 0;
3828 int GroupIndex = -1;
3830 bool ObjectFirst =
false;
3833 bool GroupFirst =
false;
3838 int NextGroupIndex = 0;
3839 std::vector<FrameObject> &Objects;
3842 GroupBuilder(std::vector<FrameObject> &Objects) : Objects(Objects) {}
3844 void EndCurrentGroup() {
3845 if (CurrentMembers.
size() > 1) {
3850 for (
int Index : CurrentMembers) {
3851 Objects[
Index].GroupIndex = NextGroupIndex;
3857 CurrentMembers.clear();
3861bool FrameObjectCompare(
const FrameObject &
A,
const FrameObject &
B) {
3879 return std::make_tuple(!
A.IsValid,
A.ObjectFirst,
A.GroupFirst,
A.GroupIndex,
3881 std::make_tuple(!
B.IsValid,
B.ObjectFirst,
B.GroupFirst,
B.GroupIndex,
3893 for (
auto &Obj : ObjectsToAllocate) {
3894 FrameObjects[Obj].IsValid =
true;
3895 FrameObjects[Obj].ObjectIndex = Obj;
3899 GroupBuilder GB(FrameObjects);
3900 for (
auto &
MBB : MF) {
3901 for (
auto &
MI :
MBB) {
3902 if (
MI.isDebugInstr())
3905 switch (
MI.getOpcode()) {
3906 case AArch64::STGloop:
3907 case AArch64::STZGloop:
3911 case AArch64::STZGi:
3912 case AArch64::ST2Gi:
3913 case AArch64::STZ2Gi:
3926 FrameObjects[FI].IsValid)
3934 GB.AddMember(TaggedFI);
3936 GB.EndCurrentGroup();
3939 GB.EndCurrentGroup();
3949 FrameObjects[*TBPI].ObjectFirst =
true;
3950 FrameObjects[*TBPI].GroupFirst =
true;
3951 int FirstGroupIndex = FrameObjects[*TBPI].GroupIndex;
3952 if (FirstGroupIndex >= 0)
3953 for (FrameObject &Object : FrameObjects)
3954 if (Object.GroupIndex == FirstGroupIndex)
3955 Object.GroupFirst =
true;
3961 for (
auto &Obj : FrameObjects) {
3965 ObjectsToAllocate[i++] = Obj.ObjectIndex;
3972 dbgs() <<
" " << Obj.ObjectIndex <<
": group " << Obj.GroupIndex;
3973 if (Obj.ObjectFirst)
3974 dbgs() <<
", first";
3976 dbgs() <<
", group-first";
unsigned const MachineRegisterInfo * MRI
static int64_t getArgumentStackToRestore(MachineFunction &MF, MachineBasicBlock &MBB)
Returns how much of the incoming argument stack area (in bytes) we should clean up in an epilogue.
static void emitShadowCallStackEpilogue(const TargetInstrInfo &TII, MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL)
static void emitCalleeSavedRestores(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, bool SVE)
static void computeCalleeSaveRegisterPairs(MachineFunction &MF, ArrayRef< CalleeSavedInfo > CSI, const TargetRegisterInfo *TRI, SmallVectorImpl< RegPairInfo > &RegPairs, bool NeedsFrameRecord)
static const unsigned DefaultSafeSPDisplacement
This is the biggest offset to the stack pointer we can encode in aarch64 instructions (without using ...
static void emitDefineCFAWithFP(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, unsigned FixedObject)
static bool needsWinCFI(const MachineFunction &MF)
static cl::opt< bool > ReverseCSRRestoreSeq("reverse-csr-restore-seq", cl::desc("reverse the CSR restore sequence"), cl::init(false), cl::Hidden)
static void insertCFISameValue(const MCInstrDesc &Desc, MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertPt, unsigned DwarfReg)
static cl::opt< bool > StackTaggingMergeSetTag("stack-tagging-merge-settag", cl::desc("merge settag instruction in function epilog"), cl::init(true), cl::Hidden)
static bool produceCompactUnwindFrame(MachineFunction &MF)
static int64_t determineSVEStackObjectOffsets(MachineFrameInfo &MFI, int &MinCSFrameIndex, int &MaxCSFrameIndex, bool AssignOffsets)
static cl::opt< bool > OrderFrameObjects("aarch64-order-frame-objects", cl::desc("sort stack allocations"), cl::init(true), cl::Hidden)
static bool windowsRequiresStackProbe(MachineFunction &MF, uint64_t StackSizeInBytes)
static void fixupCalleeSaveRestoreStackOffset(MachineInstr &MI, uint64_t LocalStackSize, bool NeedsWinCFI, bool *HasWinCFI)
static bool invalidateWindowsRegisterPairing(unsigned Reg1, unsigned Reg2, bool NeedsWinCFI, bool IsFirst, const TargetRegisterInfo *TRI)
static MachineBasicBlock::iterator convertCalleeSaveRestoreToSPPrePostIncDec(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, const TargetInstrInfo *TII, int CSStackSizeInc, bool NeedsWinCFI, bool *HasWinCFI, bool EmitCFI, MachineInstr::MIFlag FrameFlag=MachineInstr::FrameSetup, int CFAOffset=0)
static void fixupSEHOpcode(MachineBasicBlock::iterator MBBI, unsigned LocalStackSize)
static StackOffset getSVEStackSize(const MachineFunction &MF)
Returns the size of the entire SVE stackframe (calleesaves + spills).
static cl::opt< bool > EnableRedZone("aarch64-redzone", cl::desc("enable use of redzone on AArch64"), cl::init(false), cl::Hidden)
static MachineBasicBlock::iterator InsertSEH(MachineBasicBlock::iterator MBBI, const TargetInstrInfo &TII, MachineInstr::MIFlag Flag)
static unsigned findScratchNonCalleeSaveRegister(MachineBasicBlock *MBB)
cl::opt< bool > EnableHomogeneousPrologEpilog("homogeneous-prolog-epilog", cl::Hidden, cl::desc("Emit homogeneous prologue and epilogue for the size " "optimization (default = off)"))
static bool IsSVECalleeSave(MachineBasicBlock::iterator I)
static bool invalidateRegisterPairing(unsigned Reg1, unsigned Reg2, bool UsesWinAAPCS, bool NeedsWinCFI, bool NeedsFrameRecord, bool IsFirst, const TargetRegisterInfo *TRI)
Returns true if Reg1 and Reg2 cannot be paired using a ldp/stp instruction.
static unsigned getPrologueDeath(MachineFunction &MF, unsigned Reg)
static StackOffset getFPOffset(const MachineFunction &MF, int64_t ObjectOffset)
static bool isTargetWindows(const MachineFunction &MF)
static StackOffset getStackOffset(const MachineFunction &MF, int64_t ObjectOffset)
static unsigned estimateRSStackSizeLimit(MachineFunction &MF)
Look at each instruction that references stack frames and return the stack size limit beyond which so...
static bool getSVECalleeSaveSlotRange(const MachineFrameInfo &MFI, int &Min, int &Max)
returns true if there are any SVE callee saves.
static MCRegister getRegisterOrZero(MCRegister Reg, bool HasSVE)
static bool isFuncletReturnInstr(const MachineInstr &MI)
static void emitShadowCallStackPrologue(const TargetInstrInfo &TII, MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, bool NeedsWinCFI, bool NeedsUnwindInfo)
static unsigned getFixedObjectSize(const MachineFunction &MF, const AArch64FunctionInfo *AFI, bool IsWin64, bool IsFunclet)
Returns the size of the fixed object area (allocated next to sp on entry) On Win64 this may include a...
static bool needsShadowCallStackPrologueEpilogue(MachineFunction &MF)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
static const int kSetTagLoopThreshold
This file contains the simple types necessary to represent the attributes associated with functions a...
#define CASE(ATTRNAME, AANAME,...)
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
Analysis containing CSE Info
static void clear(coro::Shape &Shape)
static const HTTPClientCleanup Cleanup
const HexagonInstrInfo * TII
This file implements the LivePhysRegs utility for tracking liveness of physical registers.
unsigned const TargetRegisterInfo * TRI
This file declares the machine register scavenger class.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
static const unsigned FramePtr
void processFunctionBeforeFrameIndicesReplaced(MachineFunction &MF, RegScavenger *RS) const override
processFunctionBeforeFrameIndicesReplaced - This method is called immediately before MO_FrameIndex op...
MachineBasicBlock::iterator eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator I) const override
This method is called during prolog/epilog code insertion to eliminate call frame setup and destroy p...
bool canUseAsPrologue(const MachineBasicBlock &MBB) const override
Check whether or not the given MBB can be used as a prologue for the target.
bool enableStackSlotScavenging(const MachineFunction &MF) const override
Returns true if the stack slot holes in the fixed and callee-save stack area should be used when allo...
bool spillCalleeSavedRegisters(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, ArrayRef< CalleeSavedInfo > CSI, const TargetRegisterInfo *TRI) const override
spillCalleeSavedRegisters - Issues instruction(s) to spill all callee saved registers and returns tru...
bool restoreCalleeSavedRegisters(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, MutableArrayRef< CalleeSavedInfo > CSI, const TargetRegisterInfo *TRI) const override
restoreCalleeSavedRegisters - Issues instruction(s) to restore all callee saved registers and returns...
StackOffset getNonLocalFrameIndexReference(const MachineFunction &MF, int FI) const override
getNonLocalFrameIndexReference - This method returns the offset used to reference a frame index locat...
TargetStackID::Value getStackIDForScalableVectors() const override
Returns the StackID that scalable vectors should be associated with.
bool hasFP(const MachineFunction &MF) const override
hasFP - Return true if the specified function should have a dedicated frame pointer register.
void emitPrologue(MachineFunction &MF, MachineBasicBlock &MBB) const override
emitProlog/emitEpilog - These methods insert prolog and epilog code into the function.
bool enableCFIFixup(MachineFunction &MF) const override
Returns true if we may need to fix the unwind information for the function.
void resetCFIToInitialState(MachineBasicBlock &MBB) const override
Emit CFI instructions that recreate the state of the unwind information upon fucntion entry.
bool hasReservedCallFrame(const MachineFunction &MF) const override
hasReservedCallFrame - Under normal circumstances, when a frame pointer is not required,...
bool canUseRedZone(const MachineFunction &MF) const
Can this function use the red zone for local allocations.
void processFunctionBeforeFrameFinalized(MachineFunction &MF, RegScavenger *RS) const override
processFunctionBeforeFrameFinalized - This method is called immediately before the specified function...
int getSEHFrameIndexOffset(const MachineFunction &MF, int FI) const
unsigned getWinEHFuncletFrameSize(const MachineFunction &MF) const
Funclets only need to account for space for the callee saved registers, as the locals are accounted f...
void orderFrameObjects(const MachineFunction &MF, SmallVectorImpl< int > &ObjectsToAllocate) const override
Order the symbols in the local stack frame.
void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const override
void determineCalleeSaves(MachineFunction &MF, BitVector &SavedRegs, RegScavenger *RS) const override
This method determines which of the registers reported by TargetRegisterInfo::getCalleeSavedRegs() sh...
StackOffset getFrameIndexReference(const MachineFunction &MF, int FI, Register &FrameReg) const override
getFrameIndexReference - Provide a base+offset reference to an FI slot for debug info.
StackOffset resolveFrameOffsetReference(const MachineFunction &MF, int64_t ObjectOffset, bool isFixed, bool isSVE, Register &FrameReg, bool PreferFP, bool ForSimm) const
bool assignCalleeSavedSpillSlots(MachineFunction &MF, const TargetRegisterInfo *TRI, std::vector< CalleeSavedInfo > &CSI, unsigned &MinCSFrameIndex, unsigned &MaxCSFrameIndex) const override
assignCalleeSavedSpillSlots - Allows target to override spill slot assignment logic.
StackOffset getFrameIndexReferencePreferSP(const MachineFunction &MF, int FI, Register &FrameReg, bool IgnoreSPUpdates) const override
For Win64 AArch64 EH, the offset to the Unwind object is from the SP before the update.
StackOffset resolveFrameIndexReference(const MachineFunction &MF, int FI, Register &FrameReg, bool PreferFP, bool ForSimm) const
unsigned getWinEHParentFrameOffset(const MachineFunction &MF) const override
The parent frame offset (aka dispFrame) is only used on X86_64 to retrieve the parent's frame pointer...
AArch64FunctionInfo - This class is derived from MachineFunctionInfo and contains private AArch64-spe...
void setSwiftAsyncContextFrameIdx(int FI)
unsigned getTailCallReservedStack() const
unsigned getCalleeSavedStackSize(const MachineFrameInfo &MFI) const
void setCalleeSaveBaseToFrameRecordOffset(int Offset)
unsigned getArgumentStackToRestore() const
void setLocalStackSize(uint64_t Size)
int getCalleeSaveBaseToFrameRecordOffset() const
bool hasStreamingModeChanges() const
bool shouldSignReturnAddress(const MachineFunction &MF) const
uint64_t getStackSizeSVE() const
void setHasRedZone(bool s)
bool hasStackFrame() const
std::optional< int > getTaggedBasePointerIndex() const
uint64_t getLocalStackSize() const
void setStackRealigned(bool s)
bool needsDwarfUnwindInfo(const MachineFunction &MF) const
unsigned getVarArgsGPRSize() const
void setStackSizeSVE(uint64_t S)
bool isStackRealigned() const
bool hasSwiftAsyncContext() const
void setTaggedBasePointerOffset(unsigned Offset)
unsigned getSVECalleeSavedStackSize() const
bool needsAsyncDwarfUnwindInfo(const MachineFunction &MF) const
void setMinMaxSVECSFrameIndex(int Min, int Max)
bool hasCalleeSaveStackFreeSpace() const
static bool isSEHInstruction(const MachineInstr &MI)
Return true if the instructions is a SEH instruciton used for unwinding on Windows.
bool isReservedReg(const MachineFunction &MF, MCRegister Reg) const
bool hasBasePointer(const MachineFunction &MF) const
bool cannotEliminateFrame(const MachineFunction &MF) const
unsigned getBaseRegister() const
bool isTargetWindows() const
const AArch64RegisterInfo * getRegisterInfo() const override
const AArch64InstrInfo * getInstrInfo() const override
bool isTargetILP32() const
const AArch64TargetLowering * getTargetLowering() const override
bool isTargetMachO() const
bool isCallingConvWin64(CallingConv::ID CC) const
const char * getChkStkName() const
bool isXRegisterReserved(size_t i) const
bool swiftAsyncContextIsDynamicallySet() const
Return whether FrameLowering should always set the "extended frame present" bit in FP,...
unsigned getRedZoneSize(const Function &F) const
bool supportSwiftError() const override
Return true if the target supports swifterror attribute.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
bool empty() const
empty - Check if the array is empty.
bool test(unsigned Idx) const
size_type count() const
count - Returns the number of bits which are set.
iterator_range< const_set_bits_iterator > set_bits() const
bool hasOptSize() const
Optimize this function for size (-Os) or minimum size (-Oz).
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
AttributeList getAttributes() const
Return the attribute list for this Function.
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, const DebugLoc &DL, MCRegister DestReg, MCRegister SrcReg, bool KillSrc) const override
Emit instructions to copy a pair of physical registers.
A set of physical registers with utility functions to track liveness when walking backward/forward th...
bool available(const MachineRegisterInfo &MRI, MCPhysReg Reg) const
Returns true if register Reg and no aliasing register is in the set.
void addLiveIns(const MachineBasicBlock &MBB)
Adds all live-in registers of basic block MBB.
void addReg(MCPhysReg Reg)
Adds a physical register and all its sub-registers to the set.
bool usesWindowsCFI() const
static MCCFIInstruction createOffset(MCSymbol *L, unsigned Register, int Offset, SMLoc Loc={})
.cfi_offset Previous value of Register is saved at offset Offset from CFA.
static MCCFIInstruction cfiDefCfaOffset(MCSymbol *L, int Offset, SMLoc Loc={})
.cfi_def_cfa_offset modifies a rule for computing CFA.
static MCCFIInstruction createRestore(MCSymbol *L, unsigned Register, SMLoc Loc={})
.cfi_restore says that the rule for Register is now the same as it was at the beginning of the functi...
static MCCFIInstruction createNegateRAState(MCSymbol *L, SMLoc Loc={})
.cfi_negate_ra_state AArch64 negate RA state.
static MCCFIInstruction cfiDefCfa(MCSymbol *L, unsigned Register, int Offset, SMLoc Loc={})
.cfi_def_cfa defines a rule for computing CFA as: take address from Register and add Offset to it.
static MCCFIInstruction createEscape(MCSymbol *L, StringRef Vals, SMLoc Loc={}, StringRef Comment="")
.cfi_escape Allows the user to add arbitrary bytes to the unwind info.
static MCCFIInstruction createSameValue(MCSymbol *L, unsigned Register, SMLoc Loc={})
.cfi_same_value Current value of Register is the same as in the previous frame.
MCSymbol * createTempSymbol()
Create a temporary symbol with a unique name.
Describe properties that are true of each instruction in the target description file.
Wrapper class representing physical registers. Should be passed by value.
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
bool isEHFuncletEntry() const
Returns true if this is the entry block of an EH funclet.
iterator getFirstTerminator()
Returns an iterator to the first terminator instruction of this basic block.
MachineInstr & instr_back()
DebugLoc findDebugLoc(instr_iterator MBBI)
Find the next valid DebugLoc starting at MBBI, skipping any debug instructions.
iterator getLastNonDebugInstr(bool SkipPseudoOp=true)
Returns an iterator to the last non-debug instruction in the basic block, or end().
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
instr_iterator erase(instr_iterator I)
Remove an instruction from the instruction list and delete it.
iterator insertAfter(iterator I, MachineInstr *MI)
Insert MI into the instruction list after I.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
bool hasVarSizedObjects() const
This method may be called any time after instruction selection is complete to determine if the stack ...
uint64_t getStackSize() const
Return the number of bytes that must be allocated to hold all of the fixed size frame objects.
int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it.
bool hasCalls() const
Return true if the current function has any function calls.
bool isFrameAddressTaken() const
This method may be called any time after instruction selection is complete to determine if there is a...
Align getMaxAlign() const
Return the alignment in bytes that this function must be aligned to, which is greater than the defaul...
void setObjectOffset(int ObjectIdx, int64_t SPOffset)
Set the stack frame offset of the specified object.
bool hasPatchPoint() const
This method may be called any time after instruction selection is complete to determine if there is a...
int getStackProtectorIndex() const
Return the index for the stack protector object.
uint64_t estimateStackSize(const MachineFunction &MF) const
Estimate and return the size of the stack frame.
void setStackID(int ObjectIdx, uint8_t ID)
bool isCalleeSavedInfoValid() const
Has the callee saved info been calculated yet?
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
bool isMaxCallFrameSizeComputed() const
bool hasStackMap() const
This method may be called any time after instruction selection is complete to determine if there is a...
const std::vector< CalleeSavedInfo > & getCalleeSavedInfo() const
Returns a reference to call saved info vector for the current function.
unsigned getMaxCallFrameSize() const
Return the maximum size of a call frame that must be allocated for an outgoing function call.
int getObjectIndexEnd() const
Return one past the maximum frame object index.
bool hasStackProtectorIndex() const
uint8_t getStackID(int ObjectIdx) const
int64_t getObjectOffset(int ObjectIdx) const
Return the assigned stack offset of the specified object from the incoming stack pointer.
int getObjectIndexBegin() const
Return the minimum frame object index.
bool isDeadObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to a dead object.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
const WinEHFuncInfo * getWinEHFuncInfo() const
getWinEHFuncInfo - Return information about how the current function uses Windows exception handling.
unsigned addFrameInst(const MCCFIInstruction &Inst)
void setHasWinCFI(bool v)
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
const LLVMTargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
MachineModuleInfo & getMMI() const
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const MachineBasicBlock & front() const
bool hasEHFunclets() const
const MachineInstrBuilder & setMemRefs(ArrayRef< MachineMemOperand * > MMOs) const
const MachineInstrBuilder & addExternalSymbol(const char *FnName, unsigned TargetFlags=0) const
const MachineInstrBuilder & addCFIIndex(unsigned CFIIndex) const
const MachineInstrBuilder & setMIFlag(MachineInstr::MIFlag Flag) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & setMIFlags(unsigned Flags) const
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
void setFlags(unsigned flags)
void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
uint32_t getFlags() const
Return the MI flags bitvector.
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
This class contains meta information specific to a module.
const MCContext & getContext() const
MachineOperand class - Representation of each machine instruction operand.
void setImm(int64_t immVal)
static MachineOperand CreateImm(int64_t Val)
bool isFI() const
isFI - Tests if this is a MO_FrameIndex operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
bool isLiveIn(Register Reg) const
const MCPhysReg * getCalleeSavedRegs() const
Returns list of callee saved registers.
bool isPhysRegUsed(MCRegister PhysReg, bool SkipRegMaskTest=false) const
Return true if the specified register is modified or read in this function.
MutableArrayRef - Represent a mutable reference to an array (0 or more elements consecutively in memo...
void enterBasicBlockEnd(MachineBasicBlock &MBB)
Start tracking liveness from the end of basic block MBB.
Register FindUnusedReg(const TargetRegisterClass *RC) const
Find an unused register of the specified register class.
void backward()
Update internal register state and move MBB iterator backwards.
void addScavengingFrameIndex(int FI)
Add a scavenging frame index.
Wrapper class representing virtual and physical registers.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StackOffset holds a fixed and a scalable offset in bytes.
int64_t getFixed() const
Returns the fixed component of the stack.
int64_t getScalable() const
Returns the scalable component of the stack.
static StackOffset get(int64_t Fixed, int64_t Scalable)
static StackOffset getScalable(int64_t Scalable)
static StackOffset getFixed(int64_t Fixed)
StringRef - Represent a constant reference to a string, i.e.
virtual void determineCalleeSaves(MachineFunction &MF, BitVector &SavedRegs, RegScavenger *RS=nullptr) const
This method determines which of the registers reported by TargetRegisterInfo::getCalleeSavedRegs() sh...
int getOffsetOfLocalArea() const
getOffsetOfLocalArea - This method returns the offset of the local area from the stack pointer on ent...
Align getStackAlign() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
StackDirection getStackGrowthDirection() const
getStackGrowthDirection - Return the direction the stack grows
virtual bool enableCFIFixup(MachineFunction &MF) const
Returns true if we may need to fix the unwind information for the function.
TargetInstrInfo - Interface to description of machine instruction set.
CodeModel::Model getCodeModel() const
Returns the code model.
const MCAsmInfo * getMCAsmInfo() const
Return target specific asm information.
SwiftAsyncFramePointerMode SwiftAsyncFramePointer
Control when and how the Swift async frame pointer bit should be set.
bool DisableFramePointerElim(const MachineFunction &MF) const
DisableFramePointerElim - This returns true if frame pointer elimination optimization should be disab...
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
const TargetRegisterClass * getMinimalPhysRegClass(MCRegister Reg, MVT VT=MVT::Other) const
Returns the Register Class of a physical register of the given type, picking the most sub register cl...
Align getSpillAlign(const TargetRegisterClass &RC) const
Return the minimum required alignment in bytes for a spill slot for a register of this class.
bool hasStackRealignment(const MachineFunction &MF) const
True if stack realignment is required and still possible.
unsigned getSpillSize(const TargetRegisterClass &RC) const
Return the size in bytes of the stack slot allocated to hold a spilled copy of a register from class ...