236#define DEBUG_TYPE "frame-info"
239 cl::desc(
"enable use of redzone on AArch64"),
244 cl::desc(
"reverse the CSR restore sequence"),
248 "stack-tagging-merge-settag",
258 cl::desc(
"Emit homogeneous prologue and epilogue for the size "
259 "optimization (default = off)"));
261STATISTIC(NumRedZoneFunctions,
"Number of functions using red zone");
272 bool IsTailCallReturn =
false;
274 unsigned RetOpcode =
MBBI->getOpcode();
275 IsTailCallReturn = RetOpcode == AArch64::TCRETURNdi ||
276 RetOpcode == AArch64::TCRETURNri ||
277 RetOpcode == AArch64::TCRETURNriBTI;
281 int64_t ArgumentPopSize = 0;
282 if (IsTailCallReturn) {
288 ArgumentPopSize = StackAdjust.
getImm();
297 return ArgumentPopSize;
308bool AArch64FrameLowering::homogeneousPrologEpilog(
338bool AArch64FrameLowering::producePairRegisters(
MachineFunction &MF)
const {
357 if (
MI.isDebugInstr() ||
MI.isPseudo() ||
358 MI.getOpcode() == AArch64::ADDXri ||
359 MI.getOpcode() == AArch64::ADDSXri)
386 if (!IsWin64 || IsFunclet) {
394 const unsigned UnwindHelpObject = (MF.
hasEHFunclets() ? 8 : 0);
395 return alignTo(VarArgsArea + UnwindHelpObject, 16);
412 const unsigned RedZoneSize =
421 return !(MFI.
hasCalls() ||
hasFP(MF) || NumBytes > RedZoneSize ||
473 unsigned Opc =
I->getOpcode();
474 bool IsDestroy = Opc ==
TII->getCallFrameDestroyOpcode();
475 uint64_t CalleePopAmount = IsDestroy ?
I->getOperand(1).getImm() : 0;
478 int64_t Amount =
I->getOperand(0).getImm();
486 if (CalleePopAmount == 0) {
497 assert(Amount > -0xffffff && Amount < 0xffffff &&
"call frame too large");
501 }
else if (CalleePopAmount != 0) {
504 assert(CalleePopAmount < 0xffffff &&
"call frame too large");
511void AArch64FrameLowering::emitCalleeSavedGPRLocations(
525 for (
const auto &Info : CSI) {
529 assert(!
Info.isSpilledToReg() &&
"Spilling to registers not implemented");
530 unsigned DwarfReg =
TRI.getDwarfRegNum(
Info.getReg(),
true);
542void AArch64FrameLowering::emitCalleeSavedSVELocations(
558 for (
const auto &Info : CSI) {
564 assert(!
Info.isSpilledToReg() &&
"Spilling to registers not implemented");
599 const MCInstrDesc &CFIDesc =
TII.get(TargetOpcode::CFI_INSTRUCTION);
605 nullptr,
TRI.getDwarfRegNum(AArch64::SP,
true), 0));
609 if (MFI.shouldSignReturnAddress(MF)) {
617 TRI.getDwarfRegNum(AArch64::X18,
true));
620 const std::vector<CalleeSavedInfo> &CSI =
622 for (
const auto &
Info : CSI) {
623 unsigned Reg =
Info.getReg();
624 if (!
TRI.regNeedsCFI(Reg, Reg))
627 TRI.getDwarfRegNum(Reg,
true));
646 for (
const auto &
Info : CSI) {
651 unsigned Reg =
Info.getReg();
657 nullptr,
TRI.getDwarfRegNum(
Info.getReg(),
true)));
664void AArch64FrameLowering::emitCalleeSavedGPRRestores(
669void AArch64FrameLowering::emitCalleeSavedSVERestores(
683 case AArch64::W##n: \
684 case AArch64::X##n: \
709 case AArch64::B##n: \
710 case AArch64::H##n: \
711 case AArch64::S##n: \
712 case AArch64::D##n: \
713 case AArch64::Q##n: \
714 return HasSVE ? AArch64::Z##n : AArch64::Q##n
751void AArch64FrameLowering::emitZeroCallUsedRegs(
BitVector RegsToZero,
767 bool HasSVE = STI.hasSVE();
769 if (
TRI.isGeneralPurposeRegister(MF, Reg)) {
772 GPRsToZero.set(XReg);
773 }
else if (AArch64::FPR128RegClass.
contains(Reg) ||
774 AArch64::FPR64RegClass.
contains(Reg) ||
775 AArch64::FPR32RegClass.
contains(Reg) ||
776 AArch64::FPR16RegClass.
contains(Reg) ||
777 AArch64::FPR8RegClass.
contains(Reg)) {
780 FPRsToZero.set(XReg);
801 {AArch64::P0, AArch64::P1, AArch64::P2, AArch64::P3, AArch64::P4,
802 AArch64::P5, AArch64::P6, AArch64::P7, AArch64::P8, AArch64::P9,
803 AArch64::P10, AArch64::P11, AArch64::P12, AArch64::P13, AArch64::P14,
805 if (RegsToZero[PReg])
836 for (
unsigned i = 0; CSRegs[i]; ++i)
837 LiveRegs.
addReg(CSRegs[i]);
844 for (
unsigned Reg : AArch64::GPR64RegClass) {
848 return AArch64::NoRegister;
859 if (!RegInfo->hasStackRealignment(*MF))
874 unsigned StackProbeSize =
875 F.getFnAttributeAsParsedInteger(
"stack-probe-size", 4096);
876 return (StackSizeInBytes >= StackProbeSize) &&
877 !
F.hasFnAttribute(
"no-stack-arg-probe");
883 F.needsUnwindTableEntry();
886bool AArch64FrameLowering::shouldCombineCSRLocalStackBump(
892 if (homogeneousPrologEpilog(MF))
915 if (MFI.hasVarSizedObjects())
918 if (
RegInfo->hasStackRealignment(MF))
935bool AArch64FrameLowering::shouldCombineCSRLocalStackBumpInEpilogue(
937 if (!shouldCombineCSRLocalStackBump(*
MBB.
getParent(), StackBumpBytes))
947 while (LastI != Begin) {
949 if (LastI->isTransient())
954 switch (LastI->getOpcode()) {
955 case AArch64::STGloop:
956 case AArch64::STZGloop:
960 case AArch64::STZ2Gi:
973 unsigned Opc =
MBBI->getOpcode();
977 unsigned ImmIdx =
MBBI->getNumOperands() - 1;
978 int Imm =
MBBI->getOperand(ImmIdx).getImm();
986 case AArch64::LDPDpost:
989 case AArch64::STPDpre: {
990 unsigned Reg0 =
RegInfo->getSEHRegNum(
MBBI->getOperand(1).getReg());
991 unsigned Reg1 =
RegInfo->getSEHRegNum(
MBBI->getOperand(2).getReg());
992 MIB =
BuildMI(MF,
DL,
TII.get(AArch64::SEH_SaveFRegP_X))
999 case AArch64::LDPXpost:
1002 case AArch64::STPXpre: {
1005 if (Reg0 == AArch64::FP && Reg1 == AArch64::LR)
1006 MIB =
BuildMI(MF,
DL,
TII.get(AArch64::SEH_SaveFPLR_X))
1010 MIB =
BuildMI(MF,
DL,
TII.get(AArch64::SEH_SaveRegP_X))
1017 case AArch64::LDRDpost:
1020 case AArch64::STRDpre: {
1021 unsigned Reg =
RegInfo->getSEHRegNum(
MBBI->getOperand(1).getReg());
1022 MIB =
BuildMI(MF,
DL,
TII.get(AArch64::SEH_SaveFReg_X))
1028 case AArch64::LDRXpost:
1031 case AArch64::STRXpre: {
1032 unsigned Reg =
RegInfo->getSEHRegNum(
MBBI->getOperand(1).getReg());
1039 case AArch64::STPDi:
1040 case AArch64::LDPDi: {
1041 unsigned Reg0 =
RegInfo->getSEHRegNum(
MBBI->getOperand(0).getReg());
1042 unsigned Reg1 =
RegInfo->getSEHRegNum(
MBBI->getOperand(1).getReg());
1050 case AArch64::STPXi:
1051 case AArch64::LDPXi: {
1054 if (Reg0 == AArch64::FP && Reg1 == AArch64::LR)
1066 case AArch64::STRXui:
1067 case AArch64::LDRXui: {
1068 int Reg =
RegInfo->getSEHRegNum(
MBBI->getOperand(0).getReg());
1075 case AArch64::STRDui:
1076 case AArch64::LDRDui: {
1077 unsigned Reg =
RegInfo->getSEHRegNum(
MBBI->getOperand(0).getReg());
1091 unsigned LocalStackSize) {
1093 unsigned ImmIdx =
MBBI->getNumOperands() - 1;
1094 switch (
MBBI->getOpcode()) {
1097 case AArch64::SEH_SaveFPLR:
1098 case AArch64::SEH_SaveRegP:
1099 case AArch64::SEH_SaveReg:
1100 case AArch64::SEH_SaveFRegP:
1101 case AArch64::SEH_SaveFReg:
1102 ImmOpnd = &
MBBI->getOperand(ImmIdx);
1115 bool NeedsWinCFI,
bool *HasWinCFI,
bool EmitCFI,
1117 int CFAOffset = 0) {
1119 switch (
MBBI->getOpcode()) {
1122 case AArch64::STPXi:
1123 NewOpc = AArch64::STPXpre;
1125 case AArch64::STPDi:
1126 NewOpc = AArch64::STPDpre;
1128 case AArch64::STPQi:
1129 NewOpc = AArch64::STPQpre;
1131 case AArch64::STRXui:
1132 NewOpc = AArch64::STRXpre;
1134 case AArch64::STRDui:
1135 NewOpc = AArch64::STRDpre;
1137 case AArch64::STRQui:
1138 NewOpc = AArch64::STRQpre;
1140 case AArch64::LDPXi:
1141 NewOpc = AArch64::LDPXpost;
1143 case AArch64::LDPDi:
1144 NewOpc = AArch64::LDPDpost;
1146 case AArch64::LDPQi:
1147 NewOpc = AArch64::LDPQpost;
1149 case AArch64::LDRXui:
1150 NewOpc = AArch64::LDRXpost;
1152 case AArch64::LDRDui:
1153 NewOpc = AArch64::LDRDpost;
1155 case AArch64::LDRQui:
1156 NewOpc = AArch64::LDRQpost;
1161 auto SEH = std::next(
MBBI);
1163 SEH->eraseFromParent();
1168 int64_t MinOffset, MaxOffset;
1170 NewOpc, Scale, Width, MinOffset, MaxOffset);
1177 if (
MBBI->getOperand(
MBBI->getNumOperands() - 1).getImm() != 0 ||
1178 CSStackSizeInc < MinOffset || CSStackSizeInc > MaxOffset) {
1181 false,
false,
nullptr, EmitCFI,
1184 return std::prev(
MBBI);
1191 unsigned OpndIdx = 0;
1192 for (
unsigned OpndEnd =
MBBI->getNumOperands() - 1; OpndIdx < OpndEnd;
1194 MIB.
add(
MBBI->getOperand(OpndIdx));
1196 assert(
MBBI->getOperand(OpndIdx).getImm() == 0 &&
1197 "Unexpected immediate offset in first/last callee-save save/restore "
1199 assert(
MBBI->getOperand(OpndIdx - 1).getReg() == AArch64::SP &&
1200 "Unexpected base register in callee-save save/restore instruction!");
1201 assert(CSStackSizeInc % Scale == 0);
1202 MIB.
addImm(CSStackSizeInc / (
int)Scale);
1233 unsigned Opc =
MI.getOpcode();
1236 case AArch64::STPXi:
1237 case AArch64::STRXui:
1238 case AArch64::STPDi:
1239 case AArch64::STRDui:
1240 case AArch64::LDPXi:
1241 case AArch64::LDRXui:
1242 case AArch64::LDPDi:
1243 case AArch64::LDRDui:
1246 case AArch64::STPQi:
1247 case AArch64::STRQui:
1248 case AArch64::LDPQi:
1249 case AArch64::LDRQui:
1256 unsigned OffsetIdx =
MI.getNumExplicitOperands() - 1;
1257 assert(
MI.getOperand(OffsetIdx - 1).getReg() == AArch64::SP &&
1258 "Unexpected base register in callee-save save/restore instruction!");
1262 assert(LocalStackSize % Scale == 0);
1263 OffsetOpnd.
setImm(OffsetOpnd.
getImm() + LocalStackSize / Scale);
1268 assert(
MBBI !=
MI.getParent()->end() &&
"Expecting a valid instruction");
1270 "Expecting a SEH instruction");
1281 switch (
I->getOpcode()) {
1284 case AArch64::STR_ZXI:
1285 case AArch64::STR_PXI:
1286 case AArch64::LDR_ZXI:
1287 case AArch64::LDR_PXI:
1296 [](
const auto &
Info) { return Info.getReg() == AArch64::LR; }) &&
1311 bool NeedsUnwindInfo) {
1327 if (NeedsUnwindInfo) {
1330 static const char CFIInst[] = {
1331 dwarf::DW_CFA_val_expression,
1334 static_cast<char>(
unsigned(dwarf::DW_OP_breg18)),
1335 static_cast<char>(-8) & 0x7f,
1338 nullptr,
StringRef(CFIInst,
sizeof(CFIInst))));
1378 bool HasFP =
hasFP(MF);
1380 bool HasWinCFI =
false;
1397 MFnI.needsDwarfUnwindInfo(MF));
1399 if (MFnI.shouldSignReturnAddress(MF)) {
1400 if (MFnI.shouldSignWithBKey()) {
1408 TII->get(MFnI.shouldSignWithBKey() ? AArch64::PACIBSP
1409 : AArch64::PACIASP))
1418 }
else if (NeedsWinCFI) {
1424 if (EmitCFI && MFnI.isMTETagged()) {
1487 assert(!HasFP &&
"unexpected function without stack frame but with FP");
1489 "unexpected function without stack frame but with SVE objects");
1498 ++NumRedZoneFunctions;
1531 bool CombineSPBump = shouldCombineCSRLocalStackBump(MF, NumBytes);
1532 bool HomPrologEpilog = homogeneousPrologEpilog(MF);
1533 if (CombineSPBump) {
1534 assert(!SVEStackSize &&
"Cannot combine SP bump with SVE");
1540 }
else if (HomPrologEpilog) {
1542 NumBytes -= PrologueSaveSize;
1543 }
else if (PrologueSaveSize != 0) {
1545 MBB,
MBBI,
DL,
TII, -PrologueSaveSize, NeedsWinCFI, &HasWinCFI,
1547 NumBytes -= PrologueSaveSize;
1549 assert(NumBytes >= 0 &&
"Negative stack allocation size!?");
1559 NeedsWinCFI, &HasWinCFI);
1564 if (!IsFunclet && HasFP) {
1576 bool HaveInitialContext = Attrs.hasAttrSomewhere(Attribute::SwiftAsync);
1577 if (HaveInitialContext)
1580 .
addUse(HaveInitialContext ? AArch64::X22 : AArch64::XZR)
1586 if (HomPrologEpilog) {
1599 if (NeedsWinCFI && HasWinCFI) {
1604 NeedsWinCFI =
false;
1609 const int OffsetToFirstCalleeSaveFromFP =
1613 unsigned Reg = RegInfo->getDwarfRegNum(
FramePtr,
true);
1615 nullptr, Reg, FixedObject - OffsetToFirstCalleeSaveFromFP));
1626 emitCalleeSavedGPRLocations(
MBB,
MBBI);
1629 const bool NeedsRealignment =
1630 NumBytes && !IsFunclet && RegInfo->hasStackRealignment(MF);
1631 int64_t RealignmentPadding =
1637 uint64_t NumWords = (NumBytes + RealignmentPadding) >> 4;
1645 if (NumBytes >= (1 << 28))
1647 "unwinding purposes");
1649 uint32_t LowNumWords = NumWords & 0xFFFF;
1656 if ((NumWords & 0xFFFF0000) != 0) {
1659 .
addImm((NumWords & 0xFFFF0000) >> 16)
1730 if (RealignmentPadding > 0) {
1733 .
addImm(RealignmentPadding)
1748 StackOffset AllocateBefore = SVEStackSize, AllocateAfter = {};
1755 CalleeSavesBegin =
MBBI;
1759 CalleeSavesEnd =
MBBI;
1762 AllocateAfter = SVEStackSize - AllocateBefore;
1767 MBB, CalleeSavesBegin,
DL, AArch64::SP, AArch64::SP, -AllocateBefore,
TII,
1769 EmitCFI && !HasFP && AllocateBefore,
1773 emitCalleeSavedSVELocations(
MBB, CalleeSavesEnd);
1778 nullptr, EmitCFI && !HasFP && AllocateAfter,
1784 unsigned scratchSPReg = AArch64::SP;
1786 if (NeedsRealignment) {
1788 assert(scratchSPReg != AArch64::NoRegister);
1797 MBB,
MBBI,
DL, scratchSPReg, AArch64::SP,
1799 false, NeedsWinCFI, &HasWinCFI, EmitCFI && !HasFP,
1803 if (NeedsRealignment) {
1805 assert(scratchSPReg != AArch64::SP);
1832 if (!IsFunclet && RegInfo->hasBasePointer(MF)) {
1844 if (NeedsWinCFI && HasWinCFI) {
1852 if (IsFunclet &&
F.hasPersonalityFn()) {
1864 bool NeedsWinCFI,
bool *HasWinCFI) {
1866 if (!MFI.shouldSignReturnAddress(MF))
1874 DL =
MBBI->getDebugLoc();
1881 if (Subtarget.hasPAuth() &&
1883 MBBI !=
MBB.
end() &&
MBBI->getOpcode() == AArch64::RET_ReallyLR &&
1886 TII->get(MFI.shouldSignWithBKey() ? AArch64::RETAB : AArch64::RETAA))
1892 TII->get(MFI.shouldSignWithBKey() ? AArch64::AUTIBSP : AArch64::AUTIASP))
1909 switch (
MI.getOpcode()) {
1912 case AArch64::CATCHRET:
1913 case AArch64::CLEANUPRET:
1928 bool HasWinCFI =
false;
1929 bool IsFunclet =
false;
1933 DL =
MBBI->getDebugLoc();
1945 TII->get(AArch64::SEH_EpilogEnd))
1965 int64_t AfterCSRPopSize = ArgumentStackToRestore;
1973 if (homogeneousPrologEpilog(MF, &
MBB)) {
1977 auto HomogeneousEpilog = std::prev(LastPopI);
1978 if (HomogeneousEpilog->getOpcode() == AArch64::HOM_Epilog)
1979 LastPopI = HomogeneousEpilog;
1989 assert(AfterCSRPopSize == 0);
1992 bool CombineSPBump = shouldCombineCSRLocalStackBumpInEpilogue(
MBB, NumBytes);
1995 bool CombineAfterCSRBump =
false;
1996 if (!CombineSPBump && PrologueSaveSize != 0) {
1998 while (Pop->getOpcode() == TargetOpcode::CFI_INSTRUCTION ||
2000 Pop = std::prev(Pop);
2003 const MachineOperand &OffsetOp = Pop->getOperand(Pop->getNumOperands() - 1);
2007 if (OffsetOp.
getImm() == 0 && AfterCSRPopSize >= 0) {
2009 MBB, Pop,
DL,
TII, PrologueSaveSize, NeedsWinCFI, &HasWinCFI, EmitCFI,
2016 AfterCSRPopSize += PrologueSaveSize;
2017 CombineAfterCSRBump =
true;
2026 while (LastPopI != Begin) {
2032 }
else if (CombineSPBump)
2034 NeedsWinCFI, &HasWinCFI);
2076 if (CombineSPBump) {
2077 assert(!SVEStackSize &&
"Cannot combine SP bump with SVE");
2080 if (EmitCFI &&
hasFP(MF)) {
2082 unsigned Reg = RegInfo.getDwarfRegNum(AArch64::SP,
true);
2097 NumBytes -= PrologueSaveSize;
2098 assert(NumBytes >= 0 &&
"Negative stack allocation size!?");
2102 StackOffset DeallocateBefore = {}, DeallocateAfter = SVEStackSize;
2105 RestoreBegin = std::prev(RestoreEnd);
2106 while (RestoreBegin !=
MBB.
begin() &&
2115 DeallocateBefore = SVEStackSize - CalleeSavedSizeAsOffset;
2116 DeallocateAfter = CalleeSavedSizeAsOffset;
2138 MBB, RestoreBegin,
DL, AArch64::SP, AArch64::SP,
2140 false,
false,
nullptr, EmitCFI && !
hasFP(MF),
2147 false,
nullptr, EmitCFI && !
hasFP(MF),
2153 false,
nullptr, EmitCFI && !
hasFP(MF),
2158 emitCalleeSavedSVERestores(
MBB, RestoreEnd);
2165 if (RedZone && AfterCSRPopSize == 0)
2172 bool NoCalleeSaveRestore = PrologueSaveSize == 0;
2173 int64_t StackRestoreBytes = RedZone ? 0 : NumBytes;
2174 if (NoCalleeSaveRestore)
2175 StackRestoreBytes += AfterCSRPopSize;
2178 MBB, LastPopI,
DL, AArch64::SP, AArch64::SP,
2185 if (NoCalleeSaveRestore || AfterCSRPopSize == 0) {
2198 MBB, LastPopI,
DL, AArch64::SP, AArch64::FP,
2201 }
else if (NumBytes)
2207 if (EmitCFI &&
hasFP(MF)) {
2209 unsigned Reg = RegInfo.getDwarfRegNum(AArch64::SP,
true);
2220 if (AfterCSRPopSize) {
2221 assert(AfterCSRPopSize > 0 &&
"attempting to reallocate arg stack that an "
2222 "interrupt may have clobbered");
2227 false, NeedsWinCFI, &HasWinCFI, EmitCFI,
2253 int64_t ObjectOffset) {
2258 unsigned FixedObject =
2267 int64_t ObjectOffset) {
2278 return RegInfo->getLocalAddressRegister(MF) == AArch64::FP
2285 bool ForSimm)
const {
2288 bool isFixed = MFI.isFixedObjectIndex(FI);
2295 const MachineFunction &MF, int64_t ObjectOffset,
bool isFixed,
bool isSVE,
2296 Register &FrameReg,
bool PreferFP,
bool ForSimm)
const {
2319 PreferFP &= !SVEStackSize;
2327 }
else if (isCSR && RegInfo->hasStackRealignment(MF)) {
2331 assert(
hasFP(MF) &&
"Re-aligned stack must have frame pointer");
2333 }
else if (
hasFP(MF) && !RegInfo->hasStackRealignment(MF)) {
2338 bool FPOffsetFits = !ForSimm || FPOffset >= -256;
2339 PreferFP |=
Offset > -FPOffset && !SVEStackSize;
2341 if (MFI.hasVarSizedObjects()) {
2345 bool CanUseBP = RegInfo->hasBasePointer(MF);
2346 if (FPOffsetFits && CanUseBP)
2353 }
else if (FPOffset >= 0) {
2358 }
else if (MF.
hasEHFunclets() && !RegInfo->hasBasePointer(MF)) {
2365 "Funclets should only be present on Win64");
2369 if (FPOffsetFits && PreferFP)
2376 ((isFixed || isCSR) || !RegInfo->hasStackRealignment(MF) || !UseFP) &&
2377 "In the presence of dynamic stack pointer realignment, "
2378 "non-argument/CSR objects cannot be accessed through the frame pointer");
2390 RegInfo->hasStackRealignment(MF))) {
2391 FrameReg = RegInfo->getFrameRegister(MF);
2395 FrameReg = RegInfo->hasBasePointer(MF) ? RegInfo->getBaseRegister()
2401 if (UseFP && !(isFixed || isCSR))
2402 ScalableOffset = -SVEStackSize;
2403 if (!UseFP && (isFixed || isCSR))
2404 ScalableOffset = SVEStackSize;
2407 FrameReg = RegInfo->getFrameRegister(MF);
2412 if (RegInfo->hasBasePointer(MF))
2413 FrameReg = RegInfo->getBaseRegister();
2415 assert(!MFI.hasVarSizedObjects() &&
2416 "Can't use SP when we have var sized objects.");
2417 FrameReg = AArch64::SP;
2443 Attrs.hasAttrSomewhere(Attribute::SwiftError)) &&
2448 bool NeedsWinCFI,
bool IsFirst,
2457 if (Reg2 == AArch64::FP)
2461 if (
TRI->getEncodingValue(Reg2) ==
TRI->getEncodingValue(Reg1) + 1)
2468 if (Reg1 >= AArch64::X19 && Reg1 <= AArch64::X27 &&
2469 (Reg1 - AArch64::X19) % 2 == 0 && Reg2 == AArch64::LR && !IsFirst)
2479 bool UsesWinAAPCS,
bool NeedsWinCFI,
2480 bool NeedsFrameRecord,
bool IsFirst,
2488 if (NeedsFrameRecord)
2489 return Reg2 == AArch64::LR;
2497 unsigned Reg1 = AArch64::NoRegister;
2498 unsigned Reg2 = AArch64::NoRegister;
2501 enum RegType { GPR, FPR64, FPR128, PPR, ZPR }
Type;
2503 RegPairInfo() =
default;
2505 bool isPaired()
const {
return Reg2 != AArch64::NoRegister; }
2507 unsigned getScale()
const {
2521 bool isScalable()
const {
return Type == PPR ||
Type == ZPR; }
2529 bool NeedsFrameRecord) {
2539 unsigned Count = CSI.
size();
2545 (Count & 1) == 0) &&
2546 "Odd number of callee-saved regs to spill!");
2548 int StackFillDir = -1;
2550 unsigned FirstReg = 0;
2558 FirstReg = Count - 1;
2564 for (
unsigned i = FirstReg; i < Count; i += RegInc) {
2566 RPI.Reg1 = CSI[i].getReg();
2568 if (AArch64::GPR64RegClass.
contains(RPI.Reg1))
2569 RPI.Type = RegPairInfo::GPR;
2570 else if (AArch64::FPR64RegClass.
contains(RPI.Reg1))
2571 RPI.Type = RegPairInfo::FPR64;
2572 else if (AArch64::FPR128RegClass.
contains(RPI.Reg1))
2573 RPI.Type = RegPairInfo::FPR128;
2574 else if (AArch64::ZPRRegClass.
contains(RPI.Reg1))
2575 RPI.Type = RegPairInfo::ZPR;
2576 else if (AArch64::PPRRegClass.
contains(RPI.Reg1))
2577 RPI.Type = RegPairInfo::PPR;
2582 if (
unsigned(i + RegInc) < Count) {
2583 Register NextReg = CSI[i + RegInc].getReg();
2584 bool IsFirst = i == FirstReg;
2586 case RegPairInfo::GPR:
2587 if (AArch64::GPR64RegClass.
contains(NextReg) &&
2589 NeedsWinCFI, NeedsFrameRecord, IsFirst,
2593 case RegPairInfo::FPR64:
2594 if (AArch64::FPR64RegClass.
contains(NextReg) &&
2599 case RegPairInfo::FPR128:
2600 if (AArch64::FPR128RegClass.
contains(NextReg))
2603 case RegPairInfo::PPR:
2604 case RegPairInfo::ZPR:
2615 assert((!RPI.isPaired() ||
2616 (CSI[i].getFrameIdx() + RegInc == CSI[i + RegInc].getFrameIdx())) &&
2617 "Out of order callee saved regs!");
2619 assert((!RPI.isPaired() || !NeedsFrameRecord || RPI.Reg2 != AArch64::FP ||
2620 RPI.Reg1 == AArch64::LR) &&
2621 "FrameRecord must be allocated together with LR");
2624 assert((!RPI.isPaired() || !NeedsFrameRecord || RPI.Reg1 != AArch64::FP ||
2625 RPI.Reg2 == AArch64::LR) &&
2626 "FrameRecord must be allocated together with LR");
2633 ((RPI.Reg1 == AArch64::LR && RPI.Reg2 == AArch64::FP) ||
2634 RPI.Reg1 + 1 == RPI.Reg2))) &&
2635 "Callee-save registers not saved as adjacent register pair!");
2637 RPI.FrameIdx = CSI[i].getFrameIdx();
2640 RPI.FrameIdx = CSI[i + RegInc].getFrameIdx();
2642 int Scale = RPI.getScale();
2644 int OffsetPre = RPI.isScalable() ? ScalableByteOffset : ByteOffset;
2645 assert(OffsetPre % Scale == 0);
2647 if (RPI.isScalable())
2648 ScalableByteOffset += StackFillDir * Scale;
2650 ByteOffset += StackFillDir * (RPI.isPaired() ? 2 * Scale : Scale);
2655 RPI.Reg2 == AArch64::FP)
2656 ByteOffset += StackFillDir * 8;
2658 assert(!(RPI.isScalable() && RPI.isPaired()) &&
2659 "Paired spill/fill instructions don't exist for SVE vectors");
2663 if (NeedGapToAlignStack && !NeedsWinCFI &&
2664 !RPI.isScalable() && RPI.Type != RegPairInfo::FPR128 &&
2665 !RPI.isPaired() && ByteOffset % 16 != 0) {
2666 ByteOffset += 8 * StackFillDir;
2667 assert(MFI.getObjectAlign(RPI.FrameIdx) <=
Align(16));
2671 MFI.setObjectAlignment(RPI.FrameIdx,
Align(16));
2672 NeedGapToAlignStack =
false;
2675 int OffsetPost = RPI.isScalable() ? ScalableByteOffset : ByteOffset;
2676 assert(OffsetPost % Scale == 0);
2679 int Offset = NeedsWinCFI ? OffsetPre : OffsetPost;
2684 RPI.Reg2 == AArch64::FP)
2686 RPI.Offset =
Offset / Scale;
2688 assert(((!RPI.isScalable() && RPI.Offset >= -64 && RPI.Offset <= 63) ||
2689 (RPI.isScalable() && RPI.Offset >= -256 && RPI.Offset <= 255)) &&
2690 "Offset out of bounds for LDP/STP immediate");
2694 if (NeedsFrameRecord && ((!IsWindows && RPI.Reg1 == AArch64::LR &&
2695 RPI.Reg2 == AArch64::FP) ||
2696 (IsWindows && RPI.Reg1 == AArch64::FP &&
2697 RPI.Reg2 == AArch64::LR)))
2711 MFI.setObjectAlignment(CSI[0].getFrameIdx(),
Align(16));
2714 std::reverse(RegPairs.
begin(), RegPairs.
end());
2730 if (homogeneousPrologEpilog(MF)) {
2734 for (
auto &RPI : RegPairs) {
2739 if (!
MRI.isReserved(RPI.Reg1))
2741 if (!
MRI.isReserved(RPI.Reg2))
2747 unsigned Reg1 = RPI.Reg1;
2748 unsigned Reg2 = RPI.Reg2;
2764 case RegPairInfo::GPR:
2765 StrOpc = RPI.isPaired() ? AArch64::STPXi : AArch64::STRXui;
2767 Alignment =
Align(8);
2769 case RegPairInfo::FPR64:
2770 StrOpc = RPI.isPaired() ? AArch64::STPDi : AArch64::STRDui;
2772 Alignment =
Align(8);
2774 case RegPairInfo::FPR128:
2775 StrOpc = RPI.isPaired() ? AArch64::STPQi : AArch64::STRQui;
2777 Alignment =
Align(16);
2779 case RegPairInfo::ZPR:
2780 StrOpc = AArch64::STR_ZXI;
2782 Alignment =
Align(16);
2784 case RegPairInfo::PPR:
2785 StrOpc = AArch64::STR_PXI;
2787 Alignment =
Align(2);
2792 dbgs() <<
") -> fi#(" << RPI.FrameIdx;
2793 if (RPI.isPaired())
dbgs() <<
", " << RPI.FrameIdx + 1;
2796 assert((!NeedsWinCFI || !(Reg1 == AArch64::LR && Reg2 == AArch64::FP)) &&
2797 "Windows unwdinding requires a consecutive (FP,LR) pair");
2801 unsigned FrameIdxReg1 = RPI.FrameIdx;
2802 unsigned FrameIdxReg2 = RPI.FrameIdx + 1;
2803 if (NeedsWinCFI && RPI.isPaired()) {
2808 if (!
MRI.isReserved(Reg1))
2810 if (RPI.isPaired()) {
2811 if (!
MRI.isReserved(Reg2))
2831 if (RPI.Type == RegPairInfo::ZPR || RPI.Type == RegPairInfo::PPR)
2848 DL =
MBBI->getDebugLoc();
2853 unsigned Reg1 = RPI.Reg1;
2854 unsigned Reg2 = RPI.Reg2;
2868 case RegPairInfo::GPR:
2869 LdrOpc = RPI.isPaired() ? AArch64::LDPXi : AArch64::LDRXui;
2871 Alignment =
Align(8);
2873 case RegPairInfo::FPR64:
2874 LdrOpc = RPI.isPaired() ? AArch64::LDPDi : AArch64::LDRDui;
2876 Alignment =
Align(8);
2878 case RegPairInfo::FPR128:
2879 LdrOpc = RPI.isPaired() ? AArch64::LDPQi : AArch64::LDRQui;
2881 Alignment =
Align(16);
2883 case RegPairInfo::ZPR:
2884 LdrOpc = AArch64::LDR_ZXI;
2886 Alignment =
Align(16);
2888 case RegPairInfo::PPR:
2889 LdrOpc = AArch64::LDR_PXI;
2891 Alignment =
Align(2);
2896 dbgs() <<
") -> fi#(" << RPI.FrameIdx;
2897 if (RPI.isPaired())
dbgs() <<
", " << RPI.FrameIdx + 1;
2903 unsigned FrameIdxReg1 = RPI.FrameIdx;
2904 unsigned FrameIdxReg2 = RPI.FrameIdx + 1;
2905 if (NeedsWinCFI && RPI.isPaired()) {
2910 if (RPI.isPaired()) {
2931 for (
const RegPairInfo &RPI :
reverse(RegPairs))
2932 if (RPI.isScalable())
2935 if (homogeneousPrologEpilog(MF, &
MBB)) {
2938 for (
auto &RPI : RegPairs) {
2947 for (
const RegPairInfo &RPI :
reverse(RegPairs)) {
2948 if (RPI.isScalable())
2957 for (
const RegPairInfo &RPI : RegPairs) {
2958 if (RPI.isScalable())
2980 unsigned UnspilledCSGPR = AArch64::NoRegister;
2981 unsigned UnspilledCSGPRPaired = AArch64::NoRegister;
2990 unsigned ExtraCSSpill = 0;
2992 for (
unsigned i = 0; CSRegs[i]; ++i) {
2993 const unsigned Reg = CSRegs[i];
2996 if (Reg == BasePointerReg)
2999 bool RegUsed = SavedRegs.
test(Reg);
3000 unsigned PairedReg = AArch64::NoRegister;
3001 if (AArch64::GPR64RegClass.
contains(Reg) ||
3002 AArch64::FPR64RegClass.contains(Reg) ||
3003 AArch64::FPR128RegClass.contains(Reg))
3004 PairedReg = CSRegs[i ^ 1];
3007 if (AArch64::GPR64RegClass.
contains(Reg) &&
3009 UnspilledCSGPR = Reg;
3010 UnspilledCSGPRPaired = PairedReg;
3018 if (producePairRegisters(MF) && PairedReg != AArch64::NoRegister &&
3019 !SavedRegs.
test(PairedReg)) {
3020 SavedRegs.
set(PairedReg);
3021 if (AArch64::GPR64RegClass.
contains(PairedReg) &&
3023 ExtraCSSpill = PairedReg;
3034 SavedRegs.
set(AArch64::X18);
3038 unsigned CSStackSize = 0;
3039 unsigned SVECSStackSize = 0;
3042 for (
unsigned Reg : SavedRegs.
set_bits()) {
3044 if (AArch64::PPRRegClass.
contains(Reg) ||
3045 AArch64::ZPRRegClass.contains(Reg))
3052 unsigned NumSavedRegs = SavedRegs.
count();
3058 SavedRegs.
set(AArch64::FP);
3059 SavedRegs.
set(AArch64::LR);
3069 int64_t SVEStackSize =
3070 alignTo(SVECSStackSize + estimateSVEStackObjectOffsets(MFI), 16);
3071 bool CanEliminateFrame = (SavedRegs.
count() == 0) && !SVEStackSize;
3080 int64_t CalleeStackUsed = 0;
3083 if (FixedOff > CalleeStackUsed) CalleeStackUsed = FixedOff;
3087 bool BigStack = SVEStackSize || (EstimatedStackSize + CSStackSize +
3088 CalleeStackUsed) > EstimatedStackSizeLimit;
3090 AFI->setHasStackFrame(
true);
3099 if (!ExtraCSSpill && UnspilledCSGPR != AArch64::NoRegister) {
3101 <<
" to get a scratch register.\n");
3102 SavedRegs.
set(UnspilledCSGPR);
3106 if (producePairRegisters(MF))
3107 SavedRegs.
set(UnspilledCSGPRPaired);
3108 ExtraCSSpill = UnspilledCSGPR;
3116 unsigned Size =
TRI->getSpillSize(RC);
3117 Align Alignment =
TRI->getSpillAlign(RC);
3120 LLVM_DEBUG(
dbgs() <<
"No available CS registers, allocated fi#" << FI
3121 <<
" as the emergency spill slot.\n");
3126 CSStackSize += 8 * (SavedRegs.
count() - NumSavedRegs);
3130 if (
hasFP(MF) && AFI->hasSwiftAsyncContext())
3135 << EstimatedStackSize + AlignedCSStackSize
3139 AFI->getCalleeSavedStackSize() == AlignedCSStackSize) &&
3140 "Should not invalidate callee saved info");
3144 AFI->setCalleeSavedStackSize(AlignedCSStackSize);
3145 AFI->setCalleeSaveStackHasFreeSpace(AlignedCSStackSize != CSStackSize);
3146 AFI->setSVECalleeSavedStackSize(
alignTo(SVECSStackSize, 16));
3151 std::vector<CalleeSavedInfo> &CSI,
unsigned &MinCSFrameIndex,
3152 unsigned &MaxCSFrameIndex)
const {
3160 std::reverse(CSI.begin(), CSI.end());
3174 if ((
unsigned)FrameIdx < MinCSFrameIndex) MinCSFrameIndex = FrameIdx;
3175 if ((
unsigned)FrameIdx > MaxCSFrameIndex) MaxCSFrameIndex = FrameIdx;
3178 for (
auto &CS : CSI) {
3185 CS.setFrameIdx(FrameIdx);
3187 if ((
unsigned)FrameIdx < MinCSFrameIndex) MinCSFrameIndex = FrameIdx;
3188 if ((
unsigned)FrameIdx > MaxCSFrameIndex) MaxCSFrameIndex = FrameIdx;
3192 Reg == AArch64::FP) {
3195 if ((
unsigned)FrameIdx < MinCSFrameIndex) MinCSFrameIndex = FrameIdx;
3196 if ((
unsigned)FrameIdx > MaxCSFrameIndex) MaxCSFrameIndex = FrameIdx;
3210 int &Min,
int &Max) {
3211 Min = std::numeric_limits<int>::max();
3212 Max = std::numeric_limits<int>::min();
3218 for (
auto &CS : CSI) {
3219 if (AArch64::ZPRRegClass.
contains(CS.getReg()) ||
3220 AArch64::PPRRegClass.contains(CS.getReg())) {
3221 assert((Max == std::numeric_limits<int>::min() ||
3222 Max + 1 == CS.getFrameIdx()) &&
3223 "SVE CalleeSaves are not consecutive");
3225 Min = std::min(Min, CS.getFrameIdx());
3226 Max = std::max(Max, CS.getFrameIdx());
3229 return Min != std::numeric_limits<int>::max();
3238 int &MinCSFrameIndex,
3239 int &MaxCSFrameIndex,
3240 bool AssignOffsets) {
3245 "SVE vectors should never be passed on the stack by value, only by "
3249 auto Assign = [&MFI](
int FI, int64_t
Offset) {
3259 for (
int I = MinCSFrameIndex;
I <= MaxCSFrameIndex; ++
I) {
3275 int StackProtectorFI = -1;
3279 ObjectsToAllocate.
push_back(StackProtectorFI);
3285 if (
I == StackProtectorFI)
3287 if (MaxCSFrameIndex >=
I &&
I >= MinCSFrameIndex)
3296 for (
unsigned FI : ObjectsToAllocate) {
3301 if (Alignment >
Align(16))
3303 "Alignment of scalable vectors > 16 bytes is not yet supported");
3313int64_t AArch64FrameLowering::estimateSVEStackObjectOffsets(
3315 int MinCSFrameIndex, MaxCSFrameIndex;
3319int64_t AArch64FrameLowering::assignSVEStackObjectOffsets(
3330 "Upwards growing stack unsupported");
3332 int MinCSFrameIndex, MaxCSFrameIndex;
3333 int64_t SVEStackSize =
3334 assignSVEStackObjectOffsets(MFI, MinCSFrameIndex, MaxCSFrameIndex);
3354 int64_t FixedObject =
3367 assert(DstReg &&
"There must be a free register after frame setup");
3376struct TagStoreInstr {
3398 std::optional<int64_t> FrameRegUpdate;
3400 unsigned FrameRegUpdateFlags;
3411 :
MBB(
MBB), ZeroData(ZeroData) {
3417 void addInstruction(TagStoreInstr
I) {
3419 TagStores.
back().Offset + TagStores.
back().Size ==
I.Offset) &&
3420 "Non-adjacent tag store instructions.");
3435 const int64_t kMinOffset = -256 * 16;
3436 const int64_t kMaxOffset = 255 * 16;
3439 int64_t BaseRegOffsetBytes = FrameRegOffset.
getFixed();
3440 if (BaseRegOffsetBytes < kMinOffset ||
3441 BaseRegOffsetBytes + (
Size -
Size % 32) > kMaxOffset ||
3445 BaseRegOffsetBytes % 16 != 0) {
3446 Register ScratchReg =
MRI->createVirtualRegister(&AArch64::GPR64RegClass);
3449 BaseReg = ScratchReg;
3450 BaseRegOffsetBytes = 0;
3455 int64_t InstrSize = (
Size > 16) ? 32 : 16;
3458 ? (ZeroData ? AArch64::STZGi : AArch64::STGi)
3459 : (ZeroData ? AArch64::STZ2Gi : AArch64::ST2Gi);
3460 assert(BaseRegOffsetBytes % 16 == 0);
3464 .
addImm(BaseRegOffsetBytes / 16)
3468 if (BaseRegOffsetBytes == 0)
3470 BaseRegOffsetBytes += InstrSize;
3484 :
MRI->createVirtualRegister(&AArch64::GPR64RegClass);
3485 Register SizeReg =
MRI->createVirtualRegister(&AArch64::GPR64RegClass);
3489 int64_t LoopSize =
Size;
3492 if (FrameRegUpdate && *FrameRegUpdate)
3493 LoopSize -= LoopSize % 32;
3495 TII->get(ZeroData ? AArch64::STZGloop_wback
3496 : AArch64::STGloop_wback))
3503 LoopI->
setFlags(FrameRegUpdateFlags);
3505 int64_t ExtraBaseRegUpdate =
3506 FrameRegUpdate ? (*FrameRegUpdate - FrameRegOffset.
getFixed() -
Size) : 0;
3507 if (LoopSize <
Size) {
3512 TII->get(ZeroData ? AArch64::STZGPostIndex : AArch64::STGPostIndex))
3516 .
addImm(1 + ExtraBaseRegUpdate / 16)
3519 }
else if (ExtraBaseRegUpdate) {
3523 TII->get(ExtraBaseRegUpdate > 0 ? AArch64::ADDXri : AArch64::SUBXri))
3526 .
addImm(std::abs(ExtraBaseRegUpdate))
3536 int64_t
Size, int64_t *TotalOffset) {
3538 if ((
MI.getOpcode() == AArch64::ADDXri ||
3539 MI.getOpcode() == AArch64::SUBXri) &&
3540 MI.getOperand(0).getReg() == Reg &&
MI.getOperand(1).getReg() == Reg) {
3542 int64_t
Offset =
MI.getOperand(2).getImm() << Shift;
3543 if (
MI.getOpcode() == AArch64::SUBXri)
3545 int64_t AbsPostOffset = std::abs(
Offset -
Size);
3546 const int64_t kMaxOffset =
3548 if (AbsPostOffset <= kMaxOffset && AbsPostOffset % 16 == 0) {
3559 for (
auto &TS : TSE) {
3563 if (
MI->memoperands_empty()) {
3567 MemRefs.
append(
MI->memoperands_begin(),
MI->memoperands_end());
3573 bool TryMergeSPUpdate) {
3574 if (TagStores.
empty())
3576 TagStoreInstr &FirstTagStore = TagStores[0];
3577 TagStoreInstr &LastTagStore = TagStores[TagStores.
size() - 1];
3578 Size = LastTagStore.Offset - FirstTagStore.Offset + LastTagStore.Size;
3579 DL = TagStores[0].MI->getDebugLoc();
3583 *MF, FirstTagStore.Offset,
false ,
false , Reg,
3586 FrameRegUpdate = std::nullopt;
3588 mergeMemRefs(TagStores, CombinedMemRefs);
3591 for (
const auto &Instr
3592 : TagStores) {
dbgs() <<
" " << *Instr.MI; });
3598 if (TagStores.size() < 2)
3600 emitUnrolled(InsertI);
3603 int64_t TotalOffset = 0;
3604 if (TryMergeSPUpdate) {
3610 if (InsertI !=
MBB->
end() &&
3611 canMergeRegUpdate(InsertI, FrameReg, FrameRegOffset.
getFixed() +
Size,
3613 UpdateInstr = &*InsertI++;
3619 if (!UpdateInstr && TagStores.size() < 2)
3623 FrameRegUpdate = TotalOffset;
3624 FrameRegUpdateFlags = UpdateInstr->
getFlags();
3631 for (
auto &TS : TagStores)
3632 TS.MI->eraseFromParent();
3636 int64_t &
Size,
bool &ZeroData) {
3640 unsigned Opcode =
MI.getOpcode();
3641 ZeroData = (Opcode == AArch64::STZGloop || Opcode == AArch64::STZGi ||
3642 Opcode == AArch64::STZ2Gi);
3644 if (Opcode == AArch64::STGloop || Opcode == AArch64::STZGloop) {
3645 if (!
MI.getOperand(0).isDead() || !
MI.getOperand(1).isDead())
3647 if (!
MI.getOperand(2).isImm() || !
MI.getOperand(3).isFI())
3650 Size =
MI.getOperand(2).getImm();
3654 if (Opcode == AArch64::STGi || Opcode == AArch64::STZGi)
3656 else if (Opcode == AArch64::ST2Gi || Opcode == AArch64::STZ2Gi)
3661 if (
MI.getOperand(0).getReg() != AArch64::SP || !
MI.getOperand(1).isFI())
3665 16 *
MI.getOperand(2).getImm();
3685 if (!isMergeableStackTaggingInstruction(
MI,
Offset,
Size, FirstZeroData))
3691 constexpr int kScanLimit = 10;
3694 NextI !=
E && Count < kScanLimit; ++NextI) {
3703 if (isMergeableStackTaggingInstruction(
MI,
Offset,
Size, ZeroData)) {
3704 if (ZeroData != FirstZeroData)
3712 if (!
MI.isTransient())
3721 if (
MI.mayLoadOrStore() ||
MI.hasUnmodeledSideEffects())
3730 [](
const TagStoreInstr &
Left,
const TagStoreInstr &
Right) {
3735 int64_t CurOffset = Instrs[0].Offset;
3736 for (
auto &Instr : Instrs) {
3737 if (CurOffset > Instr.Offset)
3739 CurOffset = Instr.Offset + Instr.Size;
3744 TagStoreEdit TSE(
MBB, FirstZeroData);
3745 std::optional<int64_t> EndOffset;
3746 for (
auto &Instr : Instrs) {
3747 if (EndOffset && *EndOffset != Instr.Offset) {
3749 TSE.emitCode(InsertI, TFI,
false);
3753 TSE.addInstruction(Instr);
3754 EndOffset = Instr.Offset + Instr.Size;
3772 II = tryMergeAdjacentSTG(II,
this, RS);
3780 bool IgnoreSPUpdates)
const {
3782 if (IgnoreSPUpdates) {
3785 FrameReg = AArch64::SP;
3795 FrameReg = AArch64::SP;
3820 bool IsValid =
false;
3822 int ObjectIndex = 0;
3824 int GroupIndex = -1;
3826 bool ObjectFirst =
false;
3829 bool GroupFirst =
false;
3834 int NextGroupIndex = 0;
3835 std::vector<FrameObject> &Objects;
3838 GroupBuilder(std::vector<FrameObject> &Objects) : Objects(Objects) {}
3840 void EndCurrentGroup() {
3841 if (CurrentMembers.
size() > 1) {
3846 for (
int Index : CurrentMembers) {
3847 Objects[
Index].GroupIndex = NextGroupIndex;
3853 CurrentMembers.clear();
3857bool FrameObjectCompare(
const FrameObject &
A,
const FrameObject &
B) {
3875 return std::make_tuple(!
A.IsValid,
A.ObjectFirst,
A.GroupFirst,
A.GroupIndex,
3877 std::make_tuple(!
B.IsValid,
B.ObjectFirst,
B.GroupFirst,
B.GroupIndex,
3889 for (
auto &Obj : ObjectsToAllocate) {
3890 FrameObjects[Obj].IsValid =
true;
3891 FrameObjects[Obj].ObjectIndex = Obj;
3895 GroupBuilder GB(FrameObjects);
3896 for (
auto &
MBB : MF) {
3897 for (
auto &
MI :
MBB) {
3898 if (
MI.isDebugInstr())
3901 switch (
MI.getOpcode()) {
3902 case AArch64::STGloop:
3903 case AArch64::STZGloop:
3907 case AArch64::STZGi:
3908 case AArch64::ST2Gi:
3909 case AArch64::STZ2Gi:
3922 FrameObjects[FI].IsValid)
3930 GB.AddMember(TaggedFI);
3932 GB.EndCurrentGroup();
3935 GB.EndCurrentGroup();
3945 FrameObjects[*TBPI].ObjectFirst =
true;
3946 FrameObjects[*TBPI].GroupFirst =
true;
3947 int FirstGroupIndex = FrameObjects[*TBPI].GroupIndex;
3948 if (FirstGroupIndex >= 0)
3949 for (FrameObject &
Object : FrameObjects)
3950 if (
Object.GroupIndex == FirstGroupIndex)
3951 Object.GroupFirst =
true;
3957 for (
auto &Obj : FrameObjects) {
3961 ObjectsToAllocate[i++] = Obj.ObjectIndex;
3968 dbgs() <<
" " << Obj.ObjectIndex <<
": group " << Obj.GroupIndex;
3969 if (Obj.ObjectFirst)
3970 dbgs() <<
", first";
3972 dbgs() <<
", group-first";
unsigned const MachineRegisterInfo * MRI
static int64_t getArgumentStackToRestore(MachineFunction &MF, MachineBasicBlock &MBB)
Returns how much of the incoming argument stack area (in bytes) we should clean up in an epilogue.
static void emitShadowCallStackEpilogue(const TargetInstrInfo &TII, MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL)
static void emitCalleeSavedRestores(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, bool SVE)
static void computeCalleeSaveRegisterPairs(MachineFunction &MF, ArrayRef< CalleeSavedInfo > CSI, const TargetRegisterInfo *TRI, SmallVectorImpl< RegPairInfo > &RegPairs, bool NeedsFrameRecord)
static void InsertReturnAddressAuth(MachineFunction &MF, MachineBasicBlock &MBB, bool NeedsWinCFI, bool *HasWinCFI)
static const unsigned DefaultSafeSPDisplacement
This is the biggest offset to the stack pointer we can encode in aarch64 instructions (without using ...
static bool needsWinCFI(const MachineFunction &MF)
static cl::opt< bool > ReverseCSRRestoreSeq("reverse-csr-restore-seq", cl::desc("reverse the CSR restore sequence"), cl::init(false), cl::Hidden)
static void insertCFISameValue(const MCInstrDesc &Desc, MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertPt, unsigned DwarfReg)
static cl::opt< bool > StackTaggingMergeSetTag("stack-tagging-merge-settag", cl::desc("merge settag instruction in function epilog"), cl::init(true), cl::Hidden)
static bool produceCompactUnwindFrame(MachineFunction &MF)
static int64_t determineSVEStackObjectOffsets(MachineFrameInfo &MFI, int &MinCSFrameIndex, int &MaxCSFrameIndex, bool AssignOffsets)
static cl::opt< bool > OrderFrameObjects("aarch64-order-frame-objects", cl::desc("sort stack allocations"), cl::init(true), cl::Hidden)
static bool windowsRequiresStackProbe(MachineFunction &MF, uint64_t StackSizeInBytes)
static void fixupCalleeSaveRestoreStackOffset(MachineInstr &MI, uint64_t LocalStackSize, bool NeedsWinCFI, bool *HasWinCFI)
static bool invalidateWindowsRegisterPairing(unsigned Reg1, unsigned Reg2, bool NeedsWinCFI, bool IsFirst, const TargetRegisterInfo *TRI)
static MachineBasicBlock::iterator convertCalleeSaveRestoreToSPPrePostIncDec(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, const TargetInstrInfo *TII, int CSStackSizeInc, bool NeedsWinCFI, bool *HasWinCFI, bool EmitCFI, MachineInstr::MIFlag FrameFlag=MachineInstr::FrameSetup, int CFAOffset=0)
static void fixupSEHOpcode(MachineBasicBlock::iterator MBBI, unsigned LocalStackSize)
static StackOffset getSVEStackSize(const MachineFunction &MF)
Returns the size of the entire SVE stackframe (calleesaves + spills).
static cl::opt< bool > EnableRedZone("aarch64-redzone", cl::desc("enable use of redzone on AArch64"), cl::init(false), cl::Hidden)
static MachineBasicBlock::iterator InsertSEH(MachineBasicBlock::iterator MBBI, const TargetInstrInfo &TII, MachineInstr::MIFlag Flag)
static unsigned findScratchNonCalleeSaveRegister(MachineBasicBlock *MBB)
cl::opt< bool > EnableHomogeneousPrologEpilog("homogeneous-prolog-epilog", cl::Hidden, cl::desc("Emit homogeneous prologue and epilogue for the size " "optimization (default = off)"))
static bool IsSVECalleeSave(MachineBasicBlock::iterator I)
static bool invalidateRegisterPairing(unsigned Reg1, unsigned Reg2, bool UsesWinAAPCS, bool NeedsWinCFI, bool NeedsFrameRecord, bool IsFirst, const TargetRegisterInfo *TRI)
Returns true if Reg1 and Reg2 cannot be paired using a ldp/stp instruction.
static unsigned getPrologueDeath(MachineFunction &MF, unsigned Reg)
static StackOffset getFPOffset(const MachineFunction &MF, int64_t ObjectOffset)
static bool isTargetWindows(const MachineFunction &MF)
static StackOffset getStackOffset(const MachineFunction &MF, int64_t ObjectOffset)
static unsigned estimateRSStackSizeLimit(MachineFunction &MF)
Look at each instruction that references stack frames and return the stack size limit beyond which so...
static bool getSVECalleeSaveSlotRange(const MachineFrameInfo &MFI, int &Min, int &Max)
returns true if there are any SVE callee saves.
static MCRegister getRegisterOrZero(MCRegister Reg, bool HasSVE)
static bool isFuncletReturnInstr(const MachineInstr &MI)
static void emitShadowCallStackPrologue(const TargetInstrInfo &TII, MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, bool NeedsWinCFI, bool NeedsUnwindInfo)
static unsigned getFixedObjectSize(const MachineFunction &MF, const AArch64FunctionInfo *AFI, bool IsWin64, bool IsFunclet)
Returns the size of the fixed object area (allocated next to sp on entry) On Win64 this may include a...
static bool needsShadowCallStackPrologueEpilogue(MachineFunction &MF)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
static const int kSetTagLoopThreshold
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
Analysis containing CSE Info
static void clear(coro::Shape &Shape)
static const HTTPClientCleanup Cleanup
const HexagonInstrInfo * TII
This file implements the LivePhysRegs utility for tracking liveness of physical registers.
unsigned const TargetRegisterInfo * TRI
This file declares the machine register scavenger class.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
static const unsigned FramePtr
void processFunctionBeforeFrameIndicesReplaced(MachineFunction &MF, RegScavenger *RS) const override
processFunctionBeforeFrameIndicesReplaced - This method is called immediately before MO_FrameIndex op...
MachineBasicBlock::iterator eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator I) const override
This method is called during prolog/epilog code insertion to eliminate call frame setup and destroy p...
bool canUseAsPrologue(const MachineBasicBlock &MBB) const override
Check whether or not the given MBB can be used as a prologue for the target.
bool enableStackSlotScavenging(const MachineFunction &MF) const override
Returns true if the stack slot holes in the fixed and callee-save stack area should be used when allo...
bool spillCalleeSavedRegisters(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, ArrayRef< CalleeSavedInfo > CSI, const TargetRegisterInfo *TRI) const override
spillCalleeSavedRegisters - Issues instruction(s) to spill all callee saved registers and returns tru...
bool restoreCalleeSavedRegisters(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, MutableArrayRef< CalleeSavedInfo > CSI, const TargetRegisterInfo *TRI) const override
restoreCalleeSavedRegisters - Issues instruction(s) to restore all callee saved registers and returns...
StackOffset getNonLocalFrameIndexReference(const MachineFunction &MF, int FI) const override
getNonLocalFrameIndexReference - This method returns the offset used to reference a frame index locat...
TargetStackID::Value getStackIDForScalableVectors() const override
Returns the StackID that scalable vectors should be associated with.
bool hasFP(const MachineFunction &MF) const override
hasFP - Return true if the specified function should have a dedicated frame pointer register.
void emitPrologue(MachineFunction &MF, MachineBasicBlock &MBB) const override
emitProlog/emitEpilog - These methods insert prolog and epilog code into the function.
void resetCFIToInitialState(MachineBasicBlock &MBB) const override
Emit CFI instructions that recreate the state of the unwind information upon fucntion entry.
bool hasReservedCallFrame(const MachineFunction &MF) const override
hasReservedCallFrame - Under normal circumstances, when a frame pointer is not required,...
bool canUseRedZone(const MachineFunction &MF) const
Can this function use the red zone for local allocations.
void processFunctionBeforeFrameFinalized(MachineFunction &MF, RegScavenger *RS) const override
processFunctionBeforeFrameFinalized - This method is called immediately before the specified function...
int getSEHFrameIndexOffset(const MachineFunction &MF, int FI) const
unsigned getWinEHFuncletFrameSize(const MachineFunction &MF) const
Funclets only need to account for space for the callee saved registers, as the locals are accounted f...
void orderFrameObjects(const MachineFunction &MF, SmallVectorImpl< int > &ObjectsToAllocate) const override
Order the symbols in the local stack frame.
void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const override
void determineCalleeSaves(MachineFunction &MF, BitVector &SavedRegs, RegScavenger *RS) const override
This method determines which of the registers reported by TargetRegisterInfo::getCalleeSavedRegs() sh...
StackOffset getFrameIndexReference(const MachineFunction &MF, int FI, Register &FrameReg) const override
getFrameIndexReference - Provide a base+offset reference to an FI slot for debug info.
StackOffset resolveFrameOffsetReference(const MachineFunction &MF, int64_t ObjectOffset, bool isFixed, bool isSVE, Register &FrameReg, bool PreferFP, bool ForSimm) const
bool assignCalleeSavedSpillSlots(MachineFunction &MF, const TargetRegisterInfo *TRI, std::vector< CalleeSavedInfo > &CSI, unsigned &MinCSFrameIndex, unsigned &MaxCSFrameIndex) const override
assignCalleeSavedSpillSlots - Allows target to override spill slot assignment logic.
StackOffset getFrameIndexReferencePreferSP(const MachineFunction &MF, int FI, Register &FrameReg, bool IgnoreSPUpdates) const override
For Win64 AArch64 EH, the offset to the Unwind object is from the SP before the update.
StackOffset resolveFrameIndexReference(const MachineFunction &MF, int FI, Register &FrameReg, bool PreferFP, bool ForSimm) const
unsigned getWinEHParentFrameOffset(const MachineFunction &MF) const override
The parent frame offset (aka dispFrame) is only used on X86_64 to retrieve the parent's frame pointer...
AArch64FunctionInfo - This class is derived from MachineFunctionInfo and contains private AArch64-spe...
void setSwiftAsyncContextFrameIdx(int FI)
unsigned getTailCallReservedStack() const
unsigned getCalleeSavedStackSize(const MachineFrameInfo &MFI) const
void setCalleeSaveBaseToFrameRecordOffset(int Offset)
unsigned getArgumentStackToRestore() const
void setLocalStackSize(uint64_t Size)
int getCalleeSaveBaseToFrameRecordOffset() const
uint64_t getStackSizeSVE() const
void setHasRedZone(bool s)
bool hasStackFrame() const
std::optional< int > getTaggedBasePointerIndex() const
uint64_t getLocalStackSize() const
void setStackRealigned(bool s)
bool needsDwarfUnwindInfo(const MachineFunction &MF) const
unsigned getVarArgsGPRSize() const
void setStackSizeSVE(uint64_t S)
bool isStackRealigned() const
bool hasSwiftAsyncContext() const
void setTaggedBasePointerOffset(unsigned Offset)
unsigned getSVECalleeSavedStackSize() const
bool needsAsyncDwarfUnwindInfo(const MachineFunction &MF) const
void setMinMaxSVECSFrameIndex(int Min, int Max)
bool hasCalleeSaveStackFreeSpace() const
static bool isSEHInstruction(const MachineInstr &MI)
Return true if the instructions is a SEH instruciton used for unwinding on Windows.
bool isReservedReg(const MachineFunction &MF, MCRegister Reg) const
bool hasBasePointer(const MachineFunction &MF) const
bool cannotEliminateFrame(const MachineFunction &MF) const
unsigned getBaseRegister() const
bool isTargetWindows() const
const AArch64RegisterInfo * getRegisterInfo() const override
const AArch64InstrInfo * getInstrInfo() const override
bool isTargetILP32() const
const AArch64TargetLowering * getTargetLowering() const override
bool isTargetMachO() const
bool isCallingConvWin64(CallingConv::ID CC) const
const char * getChkStkName() const
bool isXRegisterReserved(size_t i) const
bool swiftAsyncContextIsDynamicallySet() const
Return whether FrameLowering should always set the "extended frame present" bit in FP,...
unsigned getRedZoneSize(const Function &F) const
bool supportSwiftError() const override
Return true if the target supports swifterror attribute.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
bool empty() const
empty - Check if the array is empty.
bool test(unsigned Idx) const
size_type count() const
count - Returns the number of bits which are set.
iterator_range< const_set_bits_iterator > set_bits() const
bool hasOptSize() const
Optimize this function for size (-Os) or minimum size (-Oz).
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
AttributeList getAttributes() const
Return the attribute list for this Function.
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, const DebugLoc &DL, MCRegister DestReg, MCRegister SrcReg, bool KillSrc) const override
Emit instructions to copy a pair of physical registers.
A set of physical registers with utility functions to track liveness when walking backward/forward th...
bool available(const MachineRegisterInfo &MRI, MCPhysReg Reg) const
Returns true if register Reg and no aliasing register is in the set.
void addLiveIns(const MachineBasicBlock &MBB)
Adds all live-in registers of basic block MBB.
void addReg(MCPhysReg Reg)
Adds a physical register and all its sub-registers to the set.
bool usesWindowsCFI() const
static MCCFIInstruction cfiDefCfaOffset(MCSymbol *L, int Offset)
.cfi_def_cfa_offset modifies a rule for computing CFA.
static MCCFIInstruction createRestore(MCSymbol *L, unsigned Register)
.cfi_restore says that the rule for Register is now the same as it was at the beginning of the functi...
static MCCFIInstruction createSameValue(MCSymbol *L, unsigned Register)
.cfi_same_value Current value of Register is the same as in the previous frame.
static MCCFIInstruction cfiDefCfa(MCSymbol *L, unsigned Register, int Offset)
.cfi_def_cfa defines a rule for computing CFA as: take address from Register and add Offset to it.
static MCCFIInstruction createNegateRAState(MCSymbol *L)
.cfi_negate_ra_state AArch64 negate RA state.
static MCCFIInstruction createEscape(MCSymbol *L, StringRef Vals, StringRef Comment="")
.cfi_escape Allows the user to add arbitrary bytes to the unwind info.
static MCCFIInstruction createOffset(MCSymbol *L, unsigned Register, int Offset)
.cfi_offset Previous value of Register is saved at offset Offset from CFA.
MCSymbol * createTempSymbol()
Create a temporary symbol with a unique name.
Describe properties that are true of each instruction in the target description file.
Wrapper class representing physical registers. Should be passed by value.
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
bool isEHFuncletEntry() const
Returns true if this is the entry block of an EH funclet.
iterator getFirstTerminator()
Returns an iterator to the first terminator instruction of this basic block.
MachineInstr & instr_back()
DebugLoc findDebugLoc(instr_iterator MBBI)
Find the next valid DebugLoc starting at MBBI, skipping any DBG_VALUE and DBG_LABEL instructions.
iterator getLastNonDebugInstr(bool SkipPseudoOp=true)
Returns an iterator to the last non-debug instruction in the basic block, or end().
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
instr_iterator erase(instr_iterator I)
Remove an instruction from the instruction list and delete it.
iterator insertAfter(iterator I, MachineInstr *MI)
Insert MI into the instruction list after I.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
bool hasVarSizedObjects() const
This method may be called any time after instruction selection is complete to determine if the stack ...
uint64_t getStackSize() const
Return the number of bytes that must be allocated to hold all of the fixed size frame objects.
int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it.
bool hasCalls() const
Return true if the current function has any function calls.
bool isFrameAddressTaken() const
This method may be called any time after instruction selection is complete to determine if there is a...
Align getMaxAlign() const
Return the alignment in bytes that this function must be aligned to, which is greater than the defaul...
void setObjectOffset(int ObjectIdx, int64_t SPOffset)
Set the stack frame offset of the specified object.
bool hasPatchPoint() const
This method may be called any time after instruction selection is complete to determine if there is a...
int getStackProtectorIndex() const
Return the index for the stack protector object.
uint64_t estimateStackSize(const MachineFunction &MF) const
Estimate and return the size of the stack frame.
void setStackID(int ObjectIdx, uint8_t ID)
bool isCalleeSavedInfoValid() const
Has the callee saved info been calculated yet?
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
bool isMaxCallFrameSizeComputed() const
bool hasStackMap() const
This method may be called any time after instruction selection is complete to determine if there is a...
const std::vector< CalleeSavedInfo > & getCalleeSavedInfo() const
Returns a reference to call saved info vector for the current function.
unsigned getMaxCallFrameSize() const
Return the maximum size of a call frame that must be allocated for an outgoing function call.
int getObjectIndexEnd() const
Return one past the maximum frame object index.
bool hasStackProtectorIndex() const
uint8_t getStackID(int ObjectIdx) const
int64_t getObjectOffset(int ObjectIdx) const
Return the assigned stack offset of the specified object from the incoming stack pointer.
int getObjectIndexBegin() const
Return the minimum frame object index.
bool isDeadObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to a dead object.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
const WinEHFuncInfo * getWinEHFuncInfo() const
getWinEHFuncInfo - Return information about how the current function uses Windows exception handling.
unsigned addFrameInst(const MCCFIInstruction &Inst)
void setHasWinCFI(bool v)
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
const LLVMTargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
MachineModuleInfo & getMMI() const
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const MachineBasicBlock & front() const
bool hasEHFunclets() const
const MachineInstrBuilder & setMemRefs(ArrayRef< MachineMemOperand * > MMOs) const
const MachineInstrBuilder & addExternalSymbol(const char *FnName, unsigned TargetFlags=0) const
const MachineInstrBuilder & addCFIIndex(unsigned CFIIndex) const
const MachineInstrBuilder & setMIFlag(MachineInstr::MIFlag Flag) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & setMIFlags(unsigned Flags) const
const MachineInstrBuilder & copyImplicitOps(const MachineInstr &OtherMI) const
Copy all the implicit operands from OtherMI onto this one.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
void setFlags(unsigned flags)
void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
uint16_t getFlags() const
Return the MI flags bitvector.
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
This class contains meta information specific to a module.
const MCContext & getContext() const
MachineOperand class - Representation of each machine instruction operand.
void setImm(int64_t immVal)
static MachineOperand CreateImm(int64_t Val)
bool isFI() const
isFI - Tests if this is a MO_FrameIndex operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
bool isLiveIn(Register Reg) const
const MCPhysReg * getCalleeSavedRegs() const
Returns list of callee saved registers.
bool isPhysRegUsed(MCRegister PhysReg, bool SkipRegMaskTest=false) const
Return true if the specified register is modified or read in this function.
MutableArrayRef - Represent a mutable reference to an array (0 or more elements consecutively in memo...
void enterBasicBlockEnd(MachineBasicBlock &MBB)
Start tracking liveness from the end of basic block MBB.
Register FindUnusedReg(const TargetRegisterClass *RC) const
Find an unused register of the specified register class.
void backward()
Update internal register state and move MBB iterator backwards.
void addScavengingFrameIndex(int FI)
Add a scavenging frame index.
Wrapper class representing virtual and physical registers.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StackOffset holds a fixed and a scalable offset in bytes.
int64_t getFixed() const
Returns the fixed component of the stack.
int64_t getScalable() const
Returns the scalable component of the stack.
static StackOffset get(int64_t Fixed, int64_t Scalable)
static StackOffset getScalable(int64_t Scalable)
static StackOffset getFixed(int64_t Fixed)
StringRef - Represent a constant reference to a string, i.e.
virtual void determineCalleeSaves(MachineFunction &MF, BitVector &SavedRegs, RegScavenger *RS=nullptr) const
This method determines which of the registers reported by TargetRegisterInfo::getCalleeSavedRegs() sh...
int getOffsetOfLocalArea() const
getOffsetOfLocalArea - This method returns the offset of the local area from the stack pointer on ent...
Align getStackAlign() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
StackDirection getStackGrowthDirection() const
getStackGrowthDirection - Return the direction the stack grows
TargetInstrInfo - Interface to description of machine instruction set.
CodeModel::Model getCodeModel() const
Returns the code model.
const MCAsmInfo * getMCAsmInfo() const
Return target specific asm information.
SwiftAsyncFramePointerMode SwiftAsyncFramePointer
Control when and how the Swift async frame pointer bit should be set.
bool DisableFramePointerElim(const MachineFunction &MF) const
DisableFramePointerElim - This returns true if frame pointer elimination optimization should be disab...
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
const TargetRegisterClass * getMinimalPhysRegClass(MCRegister Reg, MVT VT=MVT::Other) const
Returns the Register Class of a physical register of the given type, picking the most sub register cl...
Align getSpillAlign(const TargetRegisterClass &RC) const
Return the minimum required alignment in bytes for a spill slot for a register of this class.
bool hasStackRealignment(const MachineFunction &MF) const
True if stack realignment is required and still possible.
unsigned getSpillSize(const TargetRegisterClass &RC) const
Return the size in bytes of the stack slot allocated to hold a spilled copy of a register from class ...
TargetSubtargetInfo - Generic base class for all target subtargets.
virtual const TargetRegisterInfo * getRegisterInfo() const
getRegisterInfo - If register information is available, return it.
virtual const TargetInstrInfo * getInstrInfo() const
static constexpr TypeSize Fixed(ScalarTy ExactSize)
The instances of the Type class are immutable: once they are created, they are never changed.