256#define DEBUG_TYPE "frame-info"
259 cl::desc(
"enable use of redzone on AArch64"),
263 "stack-tagging-merge-settag",
273 cl::desc(
"Emit homogeneous prologue and epilogue for the size "
274 "optimization (default = off)"));
288STATISTIC(NumRedZoneFunctions,
"Number of functions using red zone");
304 int64_t ArgumentPopSize = 0;
305 if (IsTailCallReturn) {
311 ArgumentPopSize = StackAdjust.
getImm();
320 return ArgumentPopSize;
331bool AArch64FrameLowering::homogeneousPrologEpilog(
356 if (AFI->hasSwiftAsyncContext() || AFI->hasStreamingModeChanges())
363 unsigned NumGPRs = 0;
364 for (
unsigned I = 0; CSRegs[
I]; ++
I) {
366 if (Reg == AArch64::LR) {
367 assert(CSRegs[
I + 1] == AArch64::FP);
368 if (NumGPRs % 2 != 0)
372 if (AArch64::GPR64RegClass.
contains(Reg))
380bool AArch64FrameLowering::producePairRegisters(
MachineFunction &MF)
const {
399 if (
MI.isDebugInstr() ||
MI.isPseudo() ||
400 MI.getOpcode() == AArch64::ADDXri ||
401 MI.getOpcode() == AArch64::ADDSXri)
428 if (!IsWin64 || IsFunclet) {
433 Attribute::SwiftAsync))
438 const unsigned UnwindHelpObject = (MF.
hasEHFunclets() ? 8 : 0);
440 alignTo(VarArgsArea + UnwindHelpObject, 16);
457 const unsigned RedZoneSize =
470 bool LowerQRegCopyThroughMem = Subtarget.hasFPARMv8() &&
474 return !(MFI.
hasCalls() ||
hasFP(MF) || NumBytes > RedZoneSize ||
535 unsigned Opc =
I->getOpcode();
536 bool IsDestroy = Opc ==
TII->getCallFrameDestroyOpcode();
537 uint64_t CalleePopAmount = IsDestroy ?
I->getOperand(1).getImm() : 0;
540 int64_t Amount =
I->getOperand(0).getImm();
548 if (CalleePopAmount == 0) {
559 assert(Amount > -0xffffff && Amount < 0xffffff &&
"call frame too large");
570 "non-reserved call frame without var sized objects?");
579 }
else if (CalleePopAmount != 0) {
582 assert(CalleePopAmount < 0xffffff &&
"call frame too large");
589void AArch64FrameLowering::emitCalleeSavedGPRLocations(
595 bool LocallyStreaming =
596 Attrs.hasStreamingBody() && !Attrs.hasStreamingInterface();
607 for (
const auto &Info : CSI) {
608 unsigned FrameIdx =
Info.getFrameIdx();
612 assert(!
Info.isSpilledToReg() &&
"Spilling to registers not implemented");
613 int64_t DwarfReg =
TRI.getDwarfRegNum(
Info.getReg(),
true);
620 (!LocallyStreaming &&
621 DwarfReg ==
TRI.getDwarfRegNum(AArch64::VG,
true)))
632void AArch64FrameLowering::emitCalleeSavedSVELocations(
648 for (
const auto &Info : CSI) {
654 assert(!
Info.isSpilledToReg() &&
"Spilling to registers not implemented");
689 const MCInstrDesc &CFIDesc =
TII.get(TargetOpcode::CFI_INSTRUCTION);
695 nullptr,
TRI.getDwarfRegNum(AArch64::SP,
true), 0));
699 if (MFI.shouldSignReturnAddress(MF)) {
705 if (MFI.needsShadowCallStackPrologueEpilogue(MF))
707 TRI.getDwarfRegNum(AArch64::X18,
true));
710 const std::vector<CalleeSavedInfo> &CSI =
712 for (
const auto &
Info : CSI) {
713 unsigned Reg =
Info.getReg();
714 if (!
TRI.regNeedsCFI(Reg, Reg))
717 TRI.getDwarfRegNum(Reg,
true));
736 for (
const auto &
Info : CSI) {
741 unsigned Reg =
Info.getReg();
746 if (!
Info.isRestored())
750 nullptr,
TRI.getDwarfRegNum(
Info.getReg(),
true)));
757void AArch64FrameLowering::emitCalleeSavedGPRRestores(
762void AArch64FrameLowering::emitCalleeSavedSVERestores(
770 static const int64_t MAX_BYTES_PER_SCALABLE_BYTE = 16;
771 return Size.getScalable() * MAX_BYTES_PER_SCALABLE_BYTE +
Size.getFixed();
774void AArch64FrameLowering::allocateStackSpace(
776 int64_t RealignmentPadding,
StackOffset AllocSize,
bool NeedsWinCFI,
777 bool *HasWinCFI,
bool EmitCFI,
StackOffset InitialOffset,
778 bool FollowupAllocs)
const {
791 const uint64_t AndMask = ~(MaxAlign - 1);
794 Register TargetReg = RealignmentPadding
800 EmitCFI, InitialOffset);
802 if (RealignmentPadding) {
823 if (AllocSize.
getScalable() == 0 && RealignmentPadding == 0) {
825 assert(ScratchReg != AArch64::NoRegister);
835 if (FollowupAllocs) {
852 if (
upperBound(AllocSize) + RealignmentPadding <= ProbeSize) {
853 Register ScratchReg = RealignmentPadding
856 assert(ScratchReg != AArch64::NoRegister);
860 EmitCFI, InitialOffset);
861 if (RealignmentPadding) {
869 if (FollowupAllocs ||
upperBound(AllocSize) + RealignmentPadding >
885 assert(TargetReg != AArch64::NoRegister);
889 EmitCFI, InitialOffset);
890 if (RealignmentPadding) {
910 if (RealignmentPadding)
923 case AArch64::W##n: \
924 case AArch64::X##n: \
949 case AArch64::B##n: \
950 case AArch64::H##n: \
951 case AArch64::S##n: \
952 case AArch64::D##n: \
953 case AArch64::Q##n: \
954 return HasSVE ? AArch64::Z##n : AArch64::Q##n
991void AArch64FrameLowering::emitZeroCallUsedRegs(
BitVector RegsToZero,
1007 bool HasSVE = STI.hasSVE();
1009 if (
TRI.isGeneralPurposeRegister(MF, Reg)) {
1012 GPRsToZero.set(XReg);
1016 FPRsToZero.set(XReg);
1032 {AArch64::P0, AArch64::P1, AArch64::P2, AArch64::P3, AArch64::P4,
1033 AArch64::P5, AArch64::P6, AArch64::P7, AArch64::P8, AArch64::P9,
1034 AArch64::P10, AArch64::P11, AArch64::P12, AArch64::P13, AArch64::P14,
1036 if (RegsToZero[PReg])
1048 for (
unsigned i = 0; CSRegs[i]; ++i)
1049 LiveRegs.
addReg(CSRegs[i]);
1083 for (
unsigned Reg : AArch64::GPR64RegClass) {
1087 return AArch64::NoRegister;
1133 StackSizeInBytes >=
uint64_t(MFI.getStackProbeSize());
1139 F.needsUnwindTableEntry();
1142bool AArch64FrameLowering::shouldCombineCSRLocalStackBump(
1148 if (homogeneousPrologEpilog(MF))
1171 if (MFI.hasVarSizedObjects())
1174 if (
RegInfo->hasStackRealignment(MF))
1191bool AArch64FrameLowering::shouldCombineCSRLocalStackBumpInEpilogue(
1193 if (!shouldCombineCSRLocalStackBump(*
MBB.
getParent(), StackBumpBytes))
1203 while (LastI != Begin) {
1205 if (LastI->isTransient())
1210 switch (LastI->getOpcode()) {
1211 case AArch64::STGloop:
1212 case AArch64::STZGloop:
1214 case AArch64::STZGi:
1215 case AArch64::ST2Gi:
1216 case AArch64::STZ2Gi:
1229 unsigned Opc =
MBBI->getOpcode();
1233 unsigned ImmIdx =
MBBI->getNumOperands() - 1;
1234 int Imm =
MBBI->getOperand(ImmIdx).getImm();
1242 case AArch64::LDPDpost:
1245 case AArch64::STPDpre: {
1246 unsigned Reg0 =
RegInfo->getSEHRegNum(
MBBI->getOperand(1).getReg());
1247 unsigned Reg1 =
RegInfo->getSEHRegNum(
MBBI->getOperand(2).getReg());
1248 MIB =
BuildMI(MF,
DL,
TII.get(AArch64::SEH_SaveFRegP_X))
1255 case AArch64::LDPXpost:
1258 case AArch64::STPXpre: {
1261 if (Reg0 == AArch64::FP && Reg1 == AArch64::LR)
1262 MIB =
BuildMI(MF,
DL,
TII.get(AArch64::SEH_SaveFPLR_X))
1266 MIB =
BuildMI(MF,
DL,
TII.get(AArch64::SEH_SaveRegP_X))
1273 case AArch64::LDRDpost:
1276 case AArch64::STRDpre: {
1277 unsigned Reg =
RegInfo->getSEHRegNum(
MBBI->getOperand(1).getReg());
1278 MIB =
BuildMI(MF,
DL,
TII.get(AArch64::SEH_SaveFReg_X))
1284 case AArch64::LDRXpost:
1287 case AArch64::STRXpre: {
1288 unsigned Reg =
RegInfo->getSEHRegNum(
MBBI->getOperand(1).getReg());
1295 case AArch64::STPDi:
1296 case AArch64::LDPDi: {
1297 unsigned Reg0 =
RegInfo->getSEHRegNum(
MBBI->getOperand(0).getReg());
1298 unsigned Reg1 =
RegInfo->getSEHRegNum(
MBBI->getOperand(1).getReg());
1306 case AArch64::STPXi:
1307 case AArch64::LDPXi: {
1310 if (Reg0 == AArch64::FP && Reg1 == AArch64::LR)
1322 case AArch64::STRXui:
1323 case AArch64::LDRXui: {
1324 int Reg =
RegInfo->getSEHRegNum(
MBBI->getOperand(0).getReg());
1331 case AArch64::STRDui:
1332 case AArch64::LDRDui: {
1333 unsigned Reg =
RegInfo->getSEHRegNum(
MBBI->getOperand(0).getReg());
1340 case AArch64::STPQi:
1341 case AArch64::LDPQi: {
1342 unsigned Reg0 =
RegInfo->getSEHRegNum(
MBBI->getOperand(0).getReg());
1343 unsigned Reg1 =
RegInfo->getSEHRegNum(
MBBI->getOperand(1).getReg());
1344 MIB =
BuildMI(MF,
DL,
TII.get(AArch64::SEH_SaveAnyRegQP))
1351 case AArch64::LDPQpost:
1354 case AArch64::STPQpre: {
1355 unsigned Reg0 =
RegInfo->getSEHRegNum(
MBBI->getOperand(1).getReg());
1356 unsigned Reg1 =
RegInfo->getSEHRegNum(
MBBI->getOperand(2).getReg());
1357 MIB =
BuildMI(MF,
DL,
TII.get(AArch64::SEH_SaveAnyRegQPX))
1371 unsigned LocalStackSize) {
1373 unsigned ImmIdx =
MBBI->getNumOperands() - 1;
1374 switch (
MBBI->getOpcode()) {
1377 case AArch64::SEH_SaveFPLR:
1378 case AArch64::SEH_SaveRegP:
1379 case AArch64::SEH_SaveReg:
1380 case AArch64::SEH_SaveFRegP:
1381 case AArch64::SEH_SaveFReg:
1382 case AArch64::SEH_SaveAnyRegQP:
1383 case AArch64::SEH_SaveAnyRegQPX:
1384 ImmOpnd = &
MBBI->getOperand(ImmIdx);
1404 if (ST.isTargetDarwin())
1410 unsigned Opc =
MBBI->getOpcode();
1411 if (Opc == AArch64::CNTD_XPiI || Opc == AArch64::RDSVLI_XI ||
1412 Opc == AArch64::UBFMXri)
1416 if (Opc == AArch64::ORRXrr)
1419 if (Opc == AArch64::BL) {
1420 auto Op1 =
MBBI->getOperand(0);
1421 return Op1.isSymbol() &&
1422 (
StringRef(Op1.getSymbolName()) ==
"__arm_get_current_vg");
1435 bool NeedsWinCFI,
bool *HasWinCFI,
bool EmitCFI,
1437 int CFAOffset = 0) {
1449 switch (
MBBI->getOpcode()) {
1452 case AArch64::STPXi:
1453 NewOpc = AArch64::STPXpre;
1455 case AArch64::STPDi:
1456 NewOpc = AArch64::STPDpre;
1458 case AArch64::STPQi:
1459 NewOpc = AArch64::STPQpre;
1461 case AArch64::STRXui:
1462 NewOpc = AArch64::STRXpre;
1464 case AArch64::STRDui:
1465 NewOpc = AArch64::STRDpre;
1467 case AArch64::STRQui:
1468 NewOpc = AArch64::STRQpre;
1470 case AArch64::LDPXi:
1471 NewOpc = AArch64::LDPXpost;
1473 case AArch64::LDPDi:
1474 NewOpc = AArch64::LDPDpost;
1476 case AArch64::LDPQi:
1477 NewOpc = AArch64::LDPQpost;
1479 case AArch64::LDRXui:
1480 NewOpc = AArch64::LDRXpost;
1482 case AArch64::LDRDui:
1483 NewOpc = AArch64::LDRDpost;
1485 case AArch64::LDRQui:
1486 NewOpc = AArch64::LDRQpost;
1491 auto SEH = std::next(
MBBI);
1493 SEH->eraseFromParent();
1497 int64_t MinOffset, MaxOffset;
1499 NewOpc, Scale, Width, MinOffset, MaxOffset);
1505 if (
MBBI->getOperand(
MBBI->getNumOperands() - 1).getImm() != 0 ||
1506 CSStackSizeInc < MinOffset * (int64_t)Scale.
getFixedValue() ||
1507 CSStackSizeInc > MaxOffset * (int64_t)Scale.
getFixedValue()) {
1514 false,
false,
nullptr, EmitCFI,
1517 return std::prev(
MBBI);
1524 unsigned OpndIdx = 0;
1525 for (
unsigned OpndEnd =
MBBI->getNumOperands() - 1; OpndIdx < OpndEnd;
1527 MIB.
add(
MBBI->getOperand(OpndIdx));
1529 assert(
MBBI->getOperand(OpndIdx).getImm() == 0 &&
1530 "Unexpected immediate offset in first/last callee-save save/restore "
1532 assert(
MBBI->getOperand(OpndIdx - 1).getReg() == AArch64::SP &&
1533 "Unexpected base register in callee-save save/restore instruction!");
1534 assert(CSStackSizeInc % Scale == 0);
1535 MIB.
addImm(CSStackSizeInc / (
int)Scale);
1566 unsigned Opc =
MI.getOpcode();
1569 case AArch64::STPXi:
1570 case AArch64::STRXui:
1571 case AArch64::STPDi:
1572 case AArch64::STRDui:
1573 case AArch64::LDPXi:
1574 case AArch64::LDRXui:
1575 case AArch64::LDPDi:
1576 case AArch64::LDRDui:
1579 case AArch64::STPQi:
1580 case AArch64::STRQui:
1581 case AArch64::LDPQi:
1582 case AArch64::LDRQui:
1589 unsigned OffsetIdx =
MI.getNumExplicitOperands() - 1;
1590 assert(
MI.getOperand(OffsetIdx - 1).getReg() == AArch64::SP &&
1591 "Unexpected base register in callee-save save/restore instruction!");
1595 assert(LocalStackSize % Scale == 0);
1596 OffsetOpnd.
setImm(OffsetOpnd.
getImm() + LocalStackSize / Scale);
1601 assert(
MBBI !=
MI.getParent()->end() &&
"Expecting a valid instruction");
1603 "Expecting a SEH instruction");
1614 switch (
I->getOpcode()) {
1617 case AArch64::PTRUE_C_B:
1618 case AArch64::LD1B_2Z_IMM:
1619 case AArch64::ST1B_2Z_IMM:
1620 case AArch64::STR_ZXI:
1621 case AArch64::STR_PXI:
1622 case AArch64::LDR_ZXI:
1623 case AArch64::LDR_PXI:
1634 bool NeedsUnwindInfo) {
1650 if (NeedsUnwindInfo) {
1653 static const char CFIInst[] = {
1654 dwarf::DW_CFA_val_expression,
1657 static_cast<char>(
unsigned(dwarf::DW_OP_breg18)),
1658 static_cast<char>(-8) & 0x7f,
1661 nullptr,
StringRef(CFIInst,
sizeof(CFIInst))));
1699 const int OffsetToFirstCalleeSaveFromFP =
1703 unsigned Reg =
TRI->getDwarfRegNum(
FramePtr,
true);
1705 nullptr, Reg, FixedObject - OffsetToFirstCalleeSaveFromFP));
1737 bool HasFP =
hasFP(MF);
1739 bool HasWinCFI =
false;
1748 while (NonFrameStart !=
End &&
1753 if (NonFrameStart !=
MBB.
end()) {
1769 if (NonFrameStart ==
MBB.
end())
1774 for (auto &Op : MI.operands())
1775 if (Op.isReg() && Op.isDef())
1776 assert(!LiveRegs.contains(Op.getReg()) &&
1777 "live register clobbered by inserted prologue instructions");
1794 if (MFnI.needsShadowCallStackPrologueEpilogue(MF))
1796 MFnI.needsDwarfUnwindInfo(MF));
1798 if (MFnI.shouldSignReturnAddress(MF)) {
1805 if (EmitCFI && MFnI.isMTETagged()) {
1883 assert(!HasFP &&
"unexpected function without stack frame but with FP");
1885 "unexpected function without stack frame but with SVE objects");
1894 ++NumRedZoneFunctions;
1926 bool CombineSPBump = shouldCombineCSRLocalStackBump(MF, NumBytes);
1927 bool HomPrologEpilog = homogeneousPrologEpilog(MF);
1928 if (CombineSPBump) {
1929 assert(!SVEStackSize &&
"Cannot combine SP bump with SVE");
1935 }
else if (HomPrologEpilog) {
1937 NumBytes -= PrologueSaveSize;
1938 }
else if (PrologueSaveSize != 0) {
1940 MBB,
MBBI,
DL,
TII, -PrologueSaveSize, NeedsWinCFI, &HasWinCFI,
1942 NumBytes -= PrologueSaveSize;
1944 assert(NumBytes >= 0 &&
"Negative stack allocation size!?");
1958 NeedsWinCFI, &HasWinCFI);
1963 if (!IsFunclet && HasFP) {
1975 bool HaveInitialContext = Attrs.hasAttrSomewhere(Attribute::SwiftAsync);
1976 if (HaveInitialContext)
1978 Register Reg = HaveInitialContext ? AArch64::X22 : AArch64::XZR;
1994 if (HomPrologEpilog) {
2007 if (NeedsWinCFI && HasWinCFI) {
2012 NeedsWinCFI =
false;
2023 emitCalleeSavedGPRLocations(
MBB,
MBBI);
2026 const bool NeedsRealignment =
2027 NumBytes && !IsFunclet && RegInfo->hasStackRealignment(MF);
2028 const int64_t RealignmentPadding =
2034 uint64_t NumWords = (NumBytes + RealignmentPadding) >> 4;
2042 if (NumBytes >= (1 << 28))
2044 "unwinding purposes");
2046 uint32_t LowNumWords = NumWords & 0xFFFF;
2053 if ((NumWords & 0xFFFF0000) != 0) {
2056 .
addImm((NumWords & 0xFFFF0000) >> 16)
2127 if (RealignmentPadding > 0) {
2128 if (RealignmentPadding >= 4096) {
2131 .
addImm(RealignmentPadding)
2141 .
addImm(RealignmentPadding)
2158 StackOffset SVECalleeSavesSize = {}, SVELocalsSize = SVEStackSize;
2164 LLVM_DEBUG(
dbgs() <<
"SVECalleeSavedStackSize = " << CalleeSavedSize
2167 CalleeSavesBegin =
MBBI;
2171 CalleeSavesEnd =
MBBI;
2174 SVELocalsSize = SVEStackSize - SVECalleeSavesSize;
2181 allocateStackSpace(
MBB, CalleeSavesBegin, 0, SVECalleeSavesSize,
false,
2182 nullptr, EmitAsyncCFI && !HasFP, CFAOffset,
2184 CFAOffset += SVECalleeSavesSize;
2187 emitCalleeSavedSVELocations(
MBB, CalleeSavesEnd);
2192 "Cannot use redzone with stack realignment");
2197 allocateStackSpace(
MBB, CalleeSavesEnd, RealignmentPadding,
2199 NeedsWinCFI, &HasWinCFI, EmitAsyncCFI && !HasFP,
2211 if (!IsFunclet && RegInfo->hasBasePointer(MF)) {
2223 if (NeedsWinCFI && HasWinCFI) {
2231 if (IsFunclet &&
F.hasPersonalityFn()) {
2241 if (EmitCFI && !EmitAsyncCFI) {
2248 *RegInfo, AArch64::SP, AArch64::SP, TotalSize,
2254 emitCalleeSavedGPRLocations(
MBB,
MBBI);
2255 emitCalleeSavedSVELocations(
MBB,
MBBI);
2260 switch (
MI.getOpcode()) {
2263 case AArch64::CATCHRET:
2264 case AArch64::CLEANUPRET:
2279 bool HasWinCFI =
false;
2280 bool IsFunclet =
false;
2283 DL =
MBBI->getDebugLoc();
2291 BuildMI(MBB, MBB.getFirstTerminator(), DL,
2292 TII->get(AArch64::PAUTH_EPILOGUE))
2293 .setMIFlag(MachineInstr::FrameDestroy);
2303 TII->get(AArch64::SEH_EpilogEnd))
2330 int64_t AfterCSRPopSize = ArgumentStackToRestore;
2338 if (homogeneousPrologEpilog(MF, &
MBB)) {
2342 auto HomogeneousEpilog = std::prev(LastPopI);
2343 if (HomogeneousEpilog->getOpcode() == AArch64::HOM_Epilog)
2344 LastPopI = HomogeneousEpilog;
2354 assert(AfterCSRPopSize == 0);
2357 bool CombineSPBump = shouldCombineCSRLocalStackBumpInEpilogue(
MBB, NumBytes);
2360 bool CombineAfterCSRBump =
false;
2361 if (!CombineSPBump && PrologueSaveSize != 0) {
2363 while (Pop->getOpcode() == TargetOpcode::CFI_INSTRUCTION ||
2365 Pop = std::prev(Pop);
2368 const MachineOperand &OffsetOp = Pop->getOperand(Pop->getNumOperands() - 1);
2372 if (OffsetOp.
getImm() == 0 && AfterCSRPopSize >= 0) {
2374 MBB, Pop,
DL,
TII, PrologueSaveSize, NeedsWinCFI, &HasWinCFI, EmitCFI,
2381 AfterCSRPopSize += PrologueSaveSize;
2382 CombineAfterCSRBump =
true;
2391 while (LastPopI != Begin) {
2397 }
else if (CombineSPBump)
2399 NeedsWinCFI, &HasWinCFI);
2411 EpilogStartI = LastPopI;
2447 if (CombineSPBump) {
2448 assert(!SVEStackSize &&
"Cannot combine SP bump with SVE");
2451 if (EmitCFI &&
hasFP(MF)) {
2453 unsigned Reg = RegInfo.getDwarfRegNum(AArch64::SP,
true);
2468 NumBytes -= PrologueSaveSize;
2469 assert(NumBytes >= 0 &&
"Negative stack allocation size!?");
2473 StackOffset DeallocateBefore = {}, DeallocateAfter = SVEStackSize;
2476 RestoreBegin = std::prev(RestoreEnd);
2477 while (RestoreBegin !=
MBB.
begin() &&
2486 DeallocateBefore = SVEStackSize - CalleeSavedSizeAsOffset;
2487 DeallocateAfter = CalleeSavedSizeAsOffset;
2509 MBB, RestoreBegin,
DL, AArch64::SP, AArch64::SP,
2511 false,
false,
nullptr, EmitCFI && !
hasFP(MF),
2518 false,
nullptr, EmitCFI && !
hasFP(MF),
2524 false,
nullptr, EmitCFI && !
hasFP(MF),
2529 emitCalleeSavedSVERestores(
MBB, RestoreEnd);
2536 if (RedZone && AfterCSRPopSize == 0)
2543 bool NoCalleeSaveRestore = PrologueSaveSize == 0;
2544 int64_t StackRestoreBytes = RedZone ? 0 : NumBytes;
2545 if (NoCalleeSaveRestore)
2546 StackRestoreBytes += AfterCSRPopSize;
2549 MBB, LastPopI,
DL, AArch64::SP, AArch64::SP,
2556 if (NoCalleeSaveRestore || AfterCSRPopSize == 0) {
2569 MBB, LastPopI,
DL, AArch64::SP, AArch64::FP,
2572 }
else if (NumBytes)
2578 if (EmitCFI &&
hasFP(MF)) {
2580 unsigned Reg = RegInfo.getDwarfRegNum(AArch64::SP,
true);
2591 if (AfterCSRPopSize) {
2592 assert(AfterCSRPopSize > 0 &&
"attempting to reallocate arg stack that an "
2593 "interrupt may have clobbered");
2598 false, NeedsWinCFI, &HasWinCFI, EmitCFI,
2640 if (MFI.isVariableSizedObjectIndex(FI)) {
2654 bool IsFixed = MFI.isFixedObjectIndex(FI);
2659 if (!IsFixed && !IsCSR)
2660 ScalableOffset = -SVEStackSize;
2672 int64_t ObjectOffset) {
2676 bool IsWin64 = Subtarget.isCallingConvWin64(
F.getCallingConv(),
F.isVarArg());
2677 unsigned FixedObject =
2686 int64_t ObjectOffset) {
2697 return RegInfo->getLocalAddressRegister(MF) == AArch64::FP
2704 bool ForSimm)
const {
2707 bool isFixed = MFI.isFixedObjectIndex(FI);
2714 const MachineFunction &MF, int64_t ObjectOffset,
bool isFixed,
bool isSVE,
2715 Register &FrameReg,
bool PreferFP,
bool ForSimm)
const {
2738 PreferFP &= !SVEStackSize;
2746 }
else if (isCSR && RegInfo->hasStackRealignment(MF)) {
2750 assert(
hasFP(MF) &&
"Re-aligned stack must have frame pointer");
2752 }
else if (
hasFP(MF) && !RegInfo->hasStackRealignment(MF)) {
2757 bool FPOffsetFits = !ForSimm || FPOffset >= -256;
2758 PreferFP |=
Offset > -FPOffset && !SVEStackSize;
2760 if (MFI.hasVarSizedObjects()) {
2764 bool CanUseBP = RegInfo->hasBasePointer(MF);
2765 if (FPOffsetFits && CanUseBP)
2772 }
else if (FPOffset >= 0) {
2777 }
else if (MF.
hasEHFunclets() && !RegInfo->hasBasePointer(MF)) {
2784 "Funclets should only be present on Win64");
2788 if (FPOffsetFits && PreferFP)
2795 ((isFixed || isCSR) || !RegInfo->hasStackRealignment(MF) || !UseFP) &&
2796 "In the presence of dynamic stack pointer realignment, "
2797 "non-argument/CSR objects cannot be accessed through the frame pointer");
2809 RegInfo->hasStackRealignment(MF))) {
2810 FrameReg = RegInfo->getFrameRegister(MF);
2814 FrameReg = RegInfo->hasBasePointer(MF) ? RegInfo->getBaseRegister()
2820 if (UseFP && !(isFixed || isCSR))
2821 ScalableOffset = -SVEStackSize;
2822 if (!UseFP && (isFixed || isCSR))
2823 ScalableOffset = SVEStackSize;
2826 FrameReg = RegInfo->getFrameRegister(MF);
2831 if (RegInfo->hasBasePointer(MF))
2832 FrameReg = RegInfo->getBaseRegister();
2834 assert(!MFI.hasVarSizedObjects() &&
2835 "Can't use SP when we have var sized objects.");
2836 FrameReg = AArch64::SP;
2862 Attrs.hasAttrSomewhere(Attribute::SwiftError)) &&
2868 bool NeedsWinCFI,
bool IsFirst,
2877 if (Reg2 == AArch64::FP)
2881 if (
TRI->getEncodingValue(Reg2) ==
TRI->getEncodingValue(Reg1) + 1)
2888 if (Reg1 >= AArch64::X19 && Reg1 <= AArch64::X27 &&
2889 (Reg1 - AArch64::X19) % 2 == 0 && Reg2 == AArch64::LR && !IsFirst)
2899 bool UsesWinAAPCS,
bool NeedsWinCFI,
2900 bool NeedsFrameRecord,
bool IsFirst,
2908 if (NeedsFrameRecord)
2909 return Reg2 == AArch64::LR;
2917 unsigned Reg1 = AArch64::NoRegister;
2918 unsigned Reg2 = AArch64::NoRegister;
2921 enum RegType { GPR, FPR64, FPR128, PPR, ZPR, VG }
Type;
2923 RegPairInfo() =
default;
2925 bool isPaired()
const {
return Reg2 != AArch64::NoRegister; }
2927 unsigned getScale()
const {
2942 bool isScalable()
const {
return Type == PPR ||
Type == ZPR; }
2948 for (
unsigned PReg = AArch64::P8; PReg <= AArch64::P15; ++PReg) {
2949 if (SavedRegs.
test(PReg)) {
2950 unsigned PNReg = PReg - AArch64::P0 + AArch64::PN0;
2954 return AArch64::NoRegister;
2960 bool NeedsFrameRecord) {
2970 unsigned Count = CSI.
size();
2977 "Odd number of callee-saved regs to spill!");
2979 int StackFillDir = -1;
2981 unsigned FirstReg = 0;
2989 FirstReg = Count - 1;
2996 for (
unsigned i = FirstReg; i < Count; i += RegInc) {
2998 RPI.Reg1 = CSI[i].getReg();
3000 if (AArch64::GPR64RegClass.
contains(RPI.Reg1))
3001 RPI.Type = RegPairInfo::GPR;
3002 else if (AArch64::FPR64RegClass.
contains(RPI.Reg1))
3003 RPI.Type = RegPairInfo::FPR64;
3004 else if (AArch64::FPR128RegClass.
contains(RPI.Reg1))
3005 RPI.Type = RegPairInfo::FPR128;
3006 else if (AArch64::ZPRRegClass.
contains(RPI.Reg1))
3007 RPI.Type = RegPairInfo::ZPR;
3008 else if (AArch64::PPRRegClass.
contains(RPI.Reg1))
3009 RPI.Type = RegPairInfo::PPR;
3010 else if (RPI.Reg1 == AArch64::VG)
3011 RPI.Type = RegPairInfo::VG;
3024 Register NextReg = CSI[i + RegInc].getReg();
3025 bool IsFirst = i == FirstReg;
3027 case RegPairInfo::GPR:
3028 if (AArch64::GPR64RegClass.
contains(NextReg) &&
3030 NeedsWinCFI, NeedsFrameRecord, IsFirst,
3034 case RegPairInfo::FPR64:
3035 if (AArch64::FPR64RegClass.
contains(NextReg) &&
3040 case RegPairInfo::FPR128:
3041 if (AArch64::FPR128RegClass.
contains(NextReg))
3044 case RegPairInfo::PPR:
3046 case RegPairInfo::ZPR:
3048 if (((RPI.Reg1 - AArch64::Z0) & 1) == 0 && (NextReg == RPI.Reg1 + 1))
3051 case RegPairInfo::VG:
3062 assert((!RPI.isPaired() ||
3063 (CSI[i].getFrameIdx() + RegInc == CSI[i + RegInc].getFrameIdx())) &&
3064 "Out of order callee saved regs!");
3066 assert((!RPI.isPaired() || !NeedsFrameRecord || RPI.Reg2 != AArch64::FP ||
3067 RPI.Reg1 == AArch64::LR) &&
3068 "FrameRecord must be allocated together with LR");
3071 assert((!RPI.isPaired() || !NeedsFrameRecord || RPI.Reg1 != AArch64::FP ||
3072 RPI.Reg2 == AArch64::LR) &&
3073 "FrameRecord must be allocated together with LR");
3081 ((RPI.Reg1 == AArch64::LR && RPI.Reg2 == AArch64::FP) ||
3082 RPI.Reg1 + 1 == RPI.Reg2))) &&
3083 "Callee-save registers not saved as adjacent register pair!");
3085 RPI.FrameIdx = CSI[i].getFrameIdx();
3088 RPI.FrameIdx = CSI[i + RegInc].getFrameIdx();
3089 int Scale = RPI.getScale();
3091 int OffsetPre = RPI.isScalable() ? ScalableByteOffset : ByteOffset;
3092 assert(OffsetPre % Scale == 0);
3094 if (RPI.isScalable())
3095 ScalableByteOffset += StackFillDir * (RPI.isPaired() ? 2 * Scale : Scale);
3097 ByteOffset += StackFillDir * (RPI.isPaired() ? 2 * Scale : Scale);
3102 ((!IsWindows && RPI.Reg2 == AArch64::FP) ||
3103 (IsWindows && RPI.Reg2 == AArch64::LR)))
3104 ByteOffset += StackFillDir * 8;
3108 if (NeedGapToAlignStack && !NeedsWinCFI && !RPI.isScalable() &&
3109 RPI.Type != RegPairInfo::FPR128 && !RPI.isPaired() &&
3110 ByteOffset % 16 != 0) {
3111 ByteOffset += 8 * StackFillDir;
3112 assert(MFI.getObjectAlign(RPI.FrameIdx) <=
Align(16));
3116 MFI.setObjectAlignment(RPI.FrameIdx,
Align(16));
3117 NeedGapToAlignStack =
false;
3120 int OffsetPost = RPI.isScalable() ? ScalableByteOffset : ByteOffset;
3121 assert(OffsetPost % Scale == 0);
3124 int Offset = NeedsWinCFI ? OffsetPre : OffsetPost;
3129 ((!IsWindows && RPI.Reg2 == AArch64::FP) ||
3130 (IsWindows && RPI.Reg2 == AArch64::LR)))
3132 RPI.Offset =
Offset / Scale;
3134 assert((!RPI.isPaired() ||
3135 (!RPI.isScalable() && RPI.Offset >= -64 && RPI.Offset <= 63) ||
3136 (RPI.isScalable() && RPI.Offset >= -256 && RPI.Offset <= 255)) &&
3137 "Offset out of bounds for LDP/STP immediate");
3141 if (NeedsFrameRecord &&
3142 ((!IsWindows && RPI.Reg1 == AArch64::LR && RPI.Reg2 == AArch64::FP) ||
3143 (IsWindows && RPI.Reg1 == AArch64::FP && RPI.Reg2 == AArch64::LR)))
3157 MFI.setObjectAlignment(CSI[0].getFrameIdx(),
Align(16));
3160 std::reverse(RegPairs.
begin(), RegPairs.
end());
3179 MRI.freezeReservedRegs();
3181 if (homogeneousPrologEpilog(MF)) {
3185 for (
auto &RPI : RegPairs) {
3190 if (!
MRI.isReserved(RPI.Reg1))
3192 if (RPI.isPaired() && !
MRI.isReserved(RPI.Reg2))
3197 bool PTrueCreated =
false;
3199 unsigned Reg1 = RPI.Reg1;
3200 unsigned Reg2 = RPI.Reg2;
3216 case RegPairInfo::GPR:
3217 StrOpc = RPI.isPaired() ? AArch64::STPXi : AArch64::STRXui;
3219 Alignment =
Align(8);
3221 case RegPairInfo::FPR64:
3222 StrOpc = RPI.isPaired() ? AArch64::STPDi : AArch64::STRDui;
3224 Alignment =
Align(8);
3226 case RegPairInfo::FPR128:
3227 StrOpc = RPI.isPaired() ? AArch64::STPQi : AArch64::STRQui;
3229 Alignment =
Align(16);
3231 case RegPairInfo::ZPR:
3232 StrOpc = RPI.isPaired() ? AArch64::ST1B_2Z_IMM : AArch64::STR_ZXI;
3234 Alignment =
Align(16);
3236 case RegPairInfo::PPR:
3237 StrOpc = AArch64::STR_PXI;
3239 Alignment =
Align(2);
3241 case RegPairInfo::VG:
3242 StrOpc = AArch64::STRXui;
3244 Alignment =
Align(8);
3248 unsigned X0Scratch = AArch64::NoRegister;
3249 if (Reg1 == AArch64::VG) {
3252 assert(Reg1 != AArch64::NoRegister);
3255 if (Attrs.hasStreamingBody() && !Attrs.hasStreamingInterface() &&
3280 return STI.getRegisterInfo()->isSuperOrSubRegisterEq(
3281 AArch64::X0, LiveIn.PhysReg);
3285 if (X0Scratch != AArch64::NoRegister)
3292 const uint32_t *RegMask =
TRI->getCallPreservedMask(
3307 dbgs() <<
") -> fi#(" << RPI.FrameIdx;
3308 if (RPI.isPaired())
dbgs() <<
", " << RPI.FrameIdx + 1;
3311 assert((!NeedsWinCFI || !(Reg1 == AArch64::LR && Reg2 == AArch64::FP)) &&
3312 "Windows unwdinding requires a consecutive (FP,LR) pair");
3316 unsigned FrameIdxReg1 = RPI.FrameIdx;
3317 unsigned FrameIdxReg2 = RPI.FrameIdx + 1;
3318 if (NeedsWinCFI && RPI.isPaired()) {
3323 if (RPI.isPaired() && RPI.isScalable()) {
3328 assert(((Subtarget.hasSVE2p1() || Subtarget.hasSME2()) && PnReg != 0) &&
3329 "Expects SVE2.1 or SME2 target and a predicate register");
3330#ifdef EXPENSIVE_CHECKS
3331 auto IsPPR = [](
const RegPairInfo &c) {
3332 return c.Reg1 == RegPairInfo::PPR;
3334 auto PPRBegin = std::find_if(RegPairs.
begin(), RegPairs.
end(), IsPPR);
3335 auto IsZPR = [](
const RegPairInfo &c) {
3336 return c.Type == RegPairInfo::ZPR;
3338 auto ZPRBegin = std::find_if(RegPairs.
begin(), RegPairs.
end(), IsZPR);
3339 assert(!(PPRBegin < ZPRBegin) &&
3340 "Expected callee save predicate to be handled first");
3342 if (!PTrueCreated) {
3343 PTrueCreated =
true;
3348 if (!
MRI.isReserved(Reg1))
3350 if (!
MRI.isReserved(Reg2))
3352 MIB.
addReg( AArch64::Z0_Z1 + (RPI.Reg1 - AArch64::Z0));
3368 if (!
MRI.isReserved(Reg1))
3370 if (RPI.isPaired()) {
3371 if (!
MRI.isReserved(Reg2))
3391 if (RPI.Type == RegPairInfo::ZPR || RPI.Type == RegPairInfo::PPR) {
3397 if (X0Scratch != AArch64::NoRegister)
3417 DL =
MBBI->getDebugLoc();
3420 if (homogeneousPrologEpilog(MF, &
MBB)) {
3423 for (
auto &RPI : RegPairs) {
3431 auto IsPPR = [](
const RegPairInfo &c) {
return c.Type == RegPairInfo::PPR; };
3432 auto PPRBegin = std::find_if(RegPairs.
begin(), RegPairs.
end(), IsPPR);
3433 auto PPREnd = std::find_if_not(PPRBegin, RegPairs.
end(), IsPPR);
3434 std::reverse(PPRBegin, PPREnd);
3435 auto IsZPR = [](
const RegPairInfo &c) {
return c.Type == RegPairInfo::ZPR; };
3436 auto ZPRBegin = std::find_if(RegPairs.
begin(), RegPairs.
end(), IsZPR);
3437 auto ZPREnd = std::find_if_not(ZPRBegin, RegPairs.
end(), IsZPR);
3438 std::reverse(ZPRBegin, ZPREnd);
3440 bool PTrueCreated =
false;
3441 for (
const RegPairInfo &RPI : RegPairs) {
3442 unsigned Reg1 = RPI.Reg1;
3443 unsigned Reg2 = RPI.Reg2;
3457 case RegPairInfo::GPR:
3458 LdrOpc = RPI.isPaired() ? AArch64::LDPXi : AArch64::LDRXui;
3460 Alignment =
Align(8);
3462 case RegPairInfo::FPR64:
3463 LdrOpc = RPI.isPaired() ? AArch64::LDPDi : AArch64::LDRDui;
3465 Alignment =
Align(8);
3467 case RegPairInfo::FPR128:
3468 LdrOpc = RPI.isPaired() ? AArch64::LDPQi : AArch64::LDRQui;
3470 Alignment =
Align(16);
3472 case RegPairInfo::ZPR:
3473 LdrOpc = RPI.isPaired() ? AArch64::LD1B_2Z_IMM : AArch64::LDR_ZXI;
3475 Alignment =
Align(16);
3477 case RegPairInfo::PPR:
3478 LdrOpc = AArch64::LDR_PXI;
3480 Alignment =
Align(2);
3482 case RegPairInfo::VG:
3487 dbgs() <<
") -> fi#(" << RPI.FrameIdx;
3488 if (RPI.isPaired())
dbgs() <<
", " << RPI.FrameIdx + 1;
3494 unsigned FrameIdxReg1 = RPI.FrameIdx;
3495 unsigned FrameIdxReg2 = RPI.FrameIdx + 1;
3496 if (NeedsWinCFI && RPI.isPaired()) {
3502 if (RPI.isPaired() && RPI.isScalable()) {
3506 assert(((Subtarget.hasSVE2p1() || Subtarget.hasSME2()) && PnReg != 0) &&
3507 "Expects SVE2.1 or SME2 target and a predicate register");
3508#ifdef EXPENSIVE_CHECKS
3509 assert(!(PPRBegin < ZPRBegin) &&
3510 "Expected callee save predicate to be handled first");
3512 if (!PTrueCreated) {
3513 PTrueCreated =
true;
3518 MIB.
addReg( AArch64::Z0_Z1 + (RPI.Reg1 - AArch64::Z0),
3535 if (RPI.isPaired()) {
3560 dyn_cast_or_null<FixedStackPseudoSourceValue>(MMO->
getPseudoValue());
3562 return std::optional<int>(PSV->getFrameIndex());
3573 return std::nullopt;
3579 if (!
MI.mayLoadOrStore() ||
MI.getNumMemOperands() < 1)
3580 return std::nullopt;
3588void AArch64FrameLowering::determineStackHazardSlot(
3603 bool HasFPRCSRs =
any_of(SavedRegs.
set_bits(), [](
unsigned Reg) {
3604 return AArch64::FPR64RegClass.contains(Reg) ||
3605 AArch64::FPR128RegClass.contains(Reg) ||
3606 AArch64::ZPRRegClass.contains(Reg) ||
3607 AArch64::PPRRegClass.contains(Reg);
3609 bool HasFPRStackObjects =
false;
3612 for (
auto &
MBB : MF) {
3613 for (
auto &
MI :
MBB) {
3615 if (FI && *FI >= 0 && *FI < (
int)FrameObjects.size()) {
3618 FrameObjects[*FI] |= 2;
3620 FrameObjects[*FI] |= 1;
3624 HasFPRStackObjects =
3625 any_of(FrameObjects, [](
unsigned B) {
return (
B & 3) == 2; });
3628 if (HasFPRCSRs || HasFPRStackObjects) {
3649 unsigned UnspilledCSGPR = AArch64::NoRegister;
3650 unsigned UnspilledCSGPRPaired = AArch64::NoRegister;
3659 unsigned ExtraCSSpill = 0;
3660 bool HasUnpairedGPR64 =
false;
3661 bool HasPairZReg =
false;
3663 for (
unsigned i = 0; CSRegs[i]; ++i) {
3664 const unsigned Reg = CSRegs[i];
3667 if (Reg == BasePointerReg)
3670 bool RegUsed = SavedRegs.
test(Reg);
3671 unsigned PairedReg = AArch64::NoRegister;
3672 const bool RegIsGPR64 = AArch64::GPR64RegClass.contains(Reg);
3673 if (RegIsGPR64 || AArch64::FPR64RegClass.
contains(Reg) ||
3674 AArch64::FPR128RegClass.contains(Reg)) {
3677 if (HasUnpairedGPR64)
3678 PairedReg = CSRegs[i % 2 == 0 ? i - 1 : i + 1];
3680 PairedReg = CSRegs[i ^ 1];
3687 if (RegIsGPR64 && !AArch64::GPR64RegClass.
contains(PairedReg)) {
3688 PairedReg = AArch64::NoRegister;
3689 HasUnpairedGPR64 =
true;
3691 assert(PairedReg == AArch64::NoRegister ||
3692 AArch64::GPR64RegClass.
contains(Reg, PairedReg) ||
3693 AArch64::FPR64RegClass.
contains(Reg, PairedReg) ||
3694 AArch64::FPR128RegClass.
contains(Reg, PairedReg));
3697 if (AArch64::GPR64RegClass.
contains(Reg) &&
3699 UnspilledCSGPR = Reg;
3700 UnspilledCSGPRPaired = PairedReg;
3708 if (producePairRegisters(MF) && PairedReg != AArch64::NoRegister &&
3709 !SavedRegs.
test(PairedReg)) {
3710 SavedRegs.
set(PairedReg);
3711 if (AArch64::GPR64RegClass.
contains(PairedReg) &&
3713 ExtraCSSpill = PairedReg;
3716 HasPairZReg |= (AArch64::ZPRRegClass.contains(Reg, CSRegs[i ^ 1]) &&
3717 SavedRegs.
test(CSRegs[i ^ 1]));
3720 if (HasPairZReg && (Subtarget.hasSVE2p1() || Subtarget.hasSME2())) {
3725 if (PnReg != AArch64::NoRegister)
3731 SavedRegs.
set(AArch64::P8);
3736 "Predicate cannot be a reserved register");
3746 SavedRegs.
set(AArch64::X18);
3750 unsigned CSStackSize = 0;
3751 unsigned SVECSStackSize = 0;
3754 for (
unsigned Reg : SavedRegs.
set_bits()) {
3756 if (AArch64::PPRRegClass.
contains(Reg) ||
3757 AArch64::ZPRRegClass.
contains(Reg))
3770 if (Attrs.hasStreamingBody() && !Attrs.hasStreamingInterface())
3778 determineStackHazardSlot(MF, SavedRegs);
3779 if (AFI->hasStackHazardSlotIndex())
3783 unsigned NumSavedRegs = SavedRegs.
count();
3789 SavedRegs.
set(AArch64::FP);
3790 SavedRegs.
set(AArch64::LR);
3794 dbgs() <<
"*** determineCalleeSaves\nSaved CSRs:";
3795 for (
unsigned Reg : SavedRegs.
set_bits())
3801 int64_t SVEStackSize =
3802 alignTo(SVECSStackSize + estimateSVEStackObjectOffsets(MFI), 16);
3803 bool CanEliminateFrame = (SavedRegs.
count() == 0) && !SVEStackSize;
3812 int64_t CalleeStackUsed = 0;
3815 if (FixedOff > CalleeStackUsed)
3816 CalleeStackUsed = FixedOff;
3820 bool BigStack = SVEStackSize || (EstimatedStackSize + CSStackSize +
3821 CalleeStackUsed) > EstimatedStackSizeLimit;
3823 AFI->setHasStackFrame(
true);
3832 if (!ExtraCSSpill && UnspilledCSGPR != AArch64::NoRegister) {
3834 <<
" to get a scratch register.\n");
3835 SavedRegs.
set(UnspilledCSGPR);
3836 ExtraCSSpill = UnspilledCSGPR;
3841 if (producePairRegisters(MF)) {
3842 if (UnspilledCSGPRPaired == AArch64::NoRegister) {
3845 SavedRegs.
reset(UnspilledCSGPR);
3846 ExtraCSSpill = AArch64::NoRegister;
3849 SavedRegs.
set(UnspilledCSGPRPaired);
3858 unsigned Size =
TRI->getSpillSize(RC);
3859 Align Alignment =
TRI->getSpillAlign(RC);
3862 LLVM_DEBUG(
dbgs() <<
"No available CS registers, allocated fi#" << FI
3863 <<
" as the emergency spill slot.\n");
3868 CSStackSize += 8 * (SavedRegs.
count() - NumSavedRegs);
3872 if (
hasFP(MF) && AFI->hasSwiftAsyncContext())
3877 << EstimatedStackSize + AlignedCSStackSize <<
" bytes.\n");
3880 AFI->getCalleeSavedStackSize() == AlignedCSStackSize) &&
3881 "Should not invalidate callee saved info");
3885 AFI->setCalleeSavedStackSize(AlignedCSStackSize);
3886 AFI->setCalleeSaveStackHasFreeSpace(AlignedCSStackSize != CSStackSize);
3887 AFI->setSVECalleeSavedStackSize(
alignTo(SVECSStackSize, 16));
3892 std::vector<CalleeSavedInfo> &CSI,
unsigned &MinCSFrameIndex,
3893 unsigned &MaxCSFrameIndex)
const {
3901 std::reverse(CSI.begin(), CSI.end());
3915 if ((
unsigned)FrameIdx < MinCSFrameIndex)
3916 MinCSFrameIndex = FrameIdx;
3917 if ((
unsigned)FrameIdx > MaxCSFrameIndex)
3918 MaxCSFrameIndex = FrameIdx;
3923 std::vector<CalleeSavedInfo> VGSaves;
3927 VGInfo.setRestored(
false);
3928 VGSaves.push_back(VGInfo);
3932 if (Attrs.hasStreamingBody() && !Attrs.hasStreamingInterface())
3933 VGSaves.push_back(VGInfo);
3935 bool InsertBeforeLR =
false;
3937 for (
unsigned I = 0;
I < CSI.size();
I++)
3938 if (CSI[
I].
getReg() == AArch64::LR) {
3939 InsertBeforeLR =
true;
3940 CSI.insert(CSI.begin() +
I, VGSaves.begin(), VGSaves.end());
3944 if (!InsertBeforeLR)
3945 CSI.insert(CSI.end(), VGSaves.begin(), VGSaves.end());
3949 int HazardSlotIndex = std::numeric_limits<int>::max();
3950 for (
auto &CS : CSI) {
3958 assert(HazardSlotIndex == std::numeric_limits<int>::max() &&
3959 "Unexpected register order for hazard slot");
3961 LLVM_DEBUG(
dbgs() <<
"Created CSR Hazard at slot " << HazardSlotIndex
3964 if ((
unsigned)HazardSlotIndex < MinCSFrameIndex)
3965 MinCSFrameIndex = HazardSlotIndex;
3966 if ((
unsigned)HazardSlotIndex > MaxCSFrameIndex)
3967 MaxCSFrameIndex = HazardSlotIndex;
3973 CS.setFrameIdx(FrameIdx);
3975 if ((
unsigned)FrameIdx < MinCSFrameIndex)
3976 MinCSFrameIndex = FrameIdx;
3977 if ((
unsigned)FrameIdx > MaxCSFrameIndex)
3978 MaxCSFrameIndex = FrameIdx;
3982 Reg == AArch64::FP) {
3985 if ((
unsigned)FrameIdx < MinCSFrameIndex)
3986 MinCSFrameIndex = FrameIdx;
3987 if ((
unsigned)FrameIdx > MaxCSFrameIndex)
3988 MaxCSFrameIndex = FrameIdx;
3995 HazardSlotIndex == std::numeric_limits<int>::max()) {
3997 LLVM_DEBUG(
dbgs() <<
"Created CSR Hazard at slot " << HazardSlotIndex
4000 if ((
unsigned)HazardSlotIndex < MinCSFrameIndex)
4001 MinCSFrameIndex = HazardSlotIndex;
4002 if ((
unsigned)HazardSlotIndex > MaxCSFrameIndex)
4003 MaxCSFrameIndex = HazardSlotIndex;
4027 int &Min,
int &Max) {
4028 Min = std::numeric_limits<int>::max();
4029 Max = std::numeric_limits<int>::min();
4035 for (
auto &CS : CSI) {
4036 if (AArch64::ZPRRegClass.
contains(CS.getReg()) ||
4037 AArch64::PPRRegClass.contains(CS.getReg())) {
4038 assert((Max == std::numeric_limits<int>::min() ||
4039 Max + 1 == CS.getFrameIdx()) &&
4040 "SVE CalleeSaves are not consecutive");
4042 Min = std::min(Min, CS.getFrameIdx());
4043 Max = std::max(Max, CS.getFrameIdx());
4046 return Min != std::numeric_limits<int>::max();
4055 int &MinCSFrameIndex,
4056 int &MaxCSFrameIndex,
4057 bool AssignOffsets) {
4062 "SVE vectors should never be passed on the stack by value, only by "
4066 auto Assign = [&MFI](
int FI, int64_t
Offset) {
4076 for (
int I = MinCSFrameIndex;
I <= MaxCSFrameIndex; ++
I) {
4092 int StackProtectorFI = -1;
4096 ObjectsToAllocate.
push_back(StackProtectorFI);
4102 if (
I == StackProtectorFI)
4104 if (MaxCSFrameIndex >=
I &&
I >= MinCSFrameIndex)
4113 for (
unsigned FI : ObjectsToAllocate) {
4118 if (Alignment >
Align(16))
4120 "Alignment of scalable vectors > 16 bytes is not yet supported");
4130int64_t AArch64FrameLowering::estimateSVEStackObjectOffsets(
4132 int MinCSFrameIndex, MaxCSFrameIndex;
4136int64_t AArch64FrameLowering::assignSVEStackObjectOffsets(
4147 "Upwards growing stack unsupported");
4149 int MinCSFrameIndex, MaxCSFrameIndex;
4150 int64_t SVEStackSize =
4151 assignSVEStackObjectOffsets(MFI, MinCSFrameIndex, MaxCSFrameIndex);
4171 int64_t FixedObject =
4184 assert(DstReg &&
"There must be a free register after frame setup");
4193struct TagStoreInstr {
4216 std::optional<int64_t> FrameRegUpdate;
4218 unsigned FrameRegUpdateFlags;
4229 :
MBB(
MBB), ZeroData(ZeroData) {
4235 void addInstruction(TagStoreInstr
I) {
4237 TagStores.
back().Offset + TagStores.
back().Size ==
I.Offset) &&
4238 "Non-adjacent tag store instructions.");
4253 const int64_t kMinOffset = -256 * 16;
4254 const int64_t kMaxOffset = 255 * 16;
4257 int64_t BaseRegOffsetBytes = FrameRegOffset.
getFixed();
4258 if (BaseRegOffsetBytes < kMinOffset ||
4259 BaseRegOffsetBytes + (
Size -
Size % 32) > kMaxOffset ||
4263 BaseRegOffsetBytes % 16 != 0) {
4264 Register ScratchReg =
MRI->createVirtualRegister(&AArch64::GPR64RegClass);
4267 BaseReg = ScratchReg;
4268 BaseRegOffsetBytes = 0;
4273 int64_t InstrSize = (
Size > 16) ? 32 : 16;
4276 ? (ZeroData ? AArch64::STZGi : AArch64::STGi)
4277 : (ZeroData ? AArch64::STZ2Gi : AArch64::ST2Gi);
4278 assert(BaseRegOffsetBytes % 16 == 0);
4282 .
addImm(BaseRegOffsetBytes / 16)
4286 if (BaseRegOffsetBytes == 0)
4288 BaseRegOffsetBytes += InstrSize;
4302 :
MRI->createVirtualRegister(&AArch64::GPR64RegClass);
4303 Register SizeReg =
MRI->createVirtualRegister(&AArch64::GPR64RegClass);
4307 int64_t LoopSize =
Size;
4310 if (FrameRegUpdate && *FrameRegUpdate)
4311 LoopSize -= LoopSize % 32;
4313 TII->get(ZeroData ? AArch64::STZGloop_wback
4314 : AArch64::STGloop_wback))
4321 LoopI->
setFlags(FrameRegUpdateFlags);
4323 int64_t ExtraBaseRegUpdate =
4324 FrameRegUpdate ? (*FrameRegUpdate - FrameRegOffset.
getFixed() -
Size) : 0;
4325 if (LoopSize <
Size) {
4330 TII->get(ZeroData ? AArch64::STZGPostIndex : AArch64::STGPostIndex))
4334 .
addImm(1 + ExtraBaseRegUpdate / 16)
4337 }
else if (ExtraBaseRegUpdate) {
4341 TII->get(ExtraBaseRegUpdate > 0 ? AArch64::ADDXri : AArch64::SUBXri))
4344 .
addImm(std::abs(ExtraBaseRegUpdate))
4354 int64_t
Size, int64_t *TotalOffset) {
4356 if ((
MI.getOpcode() == AArch64::ADDXri ||
4357 MI.getOpcode() == AArch64::SUBXri) &&
4358 MI.getOperand(0).getReg() == Reg &&
MI.getOperand(1).getReg() == Reg) {
4360 int64_t
Offset =
MI.getOperand(2).getImm() << Shift;
4361 if (
MI.getOpcode() == AArch64::SUBXri)
4363 int64_t AbsPostOffset = std::abs(
Offset -
Size);
4364 const int64_t kMaxOffset =
4366 if (AbsPostOffset <= kMaxOffset && AbsPostOffset % 16 == 0) {
4377 for (
auto &TS : TSE) {
4381 if (
MI->memoperands_empty()) {
4385 MemRefs.
append(
MI->memoperands_begin(),
MI->memoperands_end());
4391 bool TryMergeSPUpdate) {
4392 if (TagStores.
empty())
4394 TagStoreInstr &FirstTagStore = TagStores[0];
4395 TagStoreInstr &LastTagStore = TagStores[TagStores.
size() - 1];
4396 Size = LastTagStore.Offset - FirstTagStore.Offset + LastTagStore.Size;
4397 DL = TagStores[0].MI->getDebugLoc();
4401 *MF, FirstTagStore.Offset,
false ,
false , Reg,
4404 FrameRegUpdate = std::nullopt;
4406 mergeMemRefs(TagStores, CombinedMemRefs);
4409 dbgs() <<
"Replacing adjacent STG instructions:\n";
4410 for (
const auto &Instr : TagStores) {
4419 if (TagStores.
size() < 2)
4421 emitUnrolled(InsertI);
4424 int64_t TotalOffset = 0;
4425 if (TryMergeSPUpdate) {
4431 if (InsertI !=
MBB->
end() &&
4432 canMergeRegUpdate(InsertI, FrameReg, FrameRegOffset.
getFixed() +
Size,
4434 UpdateInstr = &*InsertI++;
4440 if (!UpdateInstr && TagStores.
size() < 2)
4444 FrameRegUpdate = TotalOffset;
4445 FrameRegUpdateFlags = UpdateInstr->
getFlags();
4452 for (
auto &TS : TagStores)
4453 TS.MI->eraseFromParent();
4457 int64_t &
Size,
bool &ZeroData) {
4461 unsigned Opcode =
MI.getOpcode();
4462 ZeroData = (Opcode == AArch64::STZGloop || Opcode == AArch64::STZGi ||
4463 Opcode == AArch64::STZ2Gi);
4465 if (Opcode == AArch64::STGloop || Opcode == AArch64::STZGloop) {
4466 if (!
MI.getOperand(0).isDead() || !
MI.getOperand(1).isDead())
4468 if (!
MI.getOperand(2).isImm() || !
MI.getOperand(3).isFI())
4471 Size =
MI.getOperand(2).getImm();
4475 if (Opcode == AArch64::STGi || Opcode == AArch64::STZGi)
4477 else if (Opcode == AArch64::ST2Gi || Opcode == AArch64::STZ2Gi)
4482 if (
MI.getOperand(0).getReg() != AArch64::SP || !
MI.getOperand(1).isFI())
4486 16 *
MI.getOperand(2).getImm();
4506 if (!isMergeableStackTaggingInstruction(
MI,
Offset,
Size, FirstZeroData))
4512 constexpr int kScanLimit = 10;
4515 NextI != E && Count < kScanLimit; ++NextI) {
4524 if (isMergeableStackTaggingInstruction(
MI,
Offset,
Size, ZeroData)) {
4525 if (ZeroData != FirstZeroData)
4533 if (!
MI.isTransient())
4542 if (
MI.mayLoadOrStore() ||
MI.hasUnmodeledSideEffects())
4558 LiveRegs.addLiveOuts(*
MBB);
4563 LiveRegs.stepBackward(*
I);
4566 if (LiveRegs.contains(AArch64::NZCV))
4570 [](
const TagStoreInstr &
Left,
const TagStoreInstr &
Right) {
4575 int64_t CurOffset = Instrs[0].Offset;
4576 for (
auto &Instr : Instrs) {
4577 if (CurOffset >
Instr.Offset)
4584 TagStoreEdit TSE(
MBB, FirstZeroData);
4585 std::optional<int64_t> EndOffset;
4586 for (
auto &Instr : Instrs) {
4587 if (EndOffset && *EndOffset !=
Instr.Offset) {
4589 TSE.emitCode(InsertI, TFI,
false);
4593 TSE.addInstruction(Instr);
4613 if (
MI.getOpcode() != AArch64::VGSavePseudo &&
4614 MI.getOpcode() != AArch64::VGRestorePseudo)
4618 bool LocallyStreaming =
4625 int64_t VGFrameIdx =
4627 assert(VGFrameIdx != std::numeric_limits<int>::max() &&
4628 "Expected FrameIdx for VG");
4631 if (
MI.getOpcode() == AArch64::VGSavePseudo) {
4636 nullptr,
TRI->getDwarfRegNum(AArch64::VG,
true),
Offset));
4639 nullptr,
TRI->getDwarfRegNum(AArch64::VG,
true)));
4642 TII->get(TargetOpcode::CFI_INSTRUCTION))
4645 MI.eraseFromParent();
4656 II = tryMergeAdjacentSTG(
II,
this, RS);
4665 bool IgnoreSPUpdates)
const {
4667 if (IgnoreSPUpdates) {
4670 FrameReg = AArch64::SP;
4680 FrameReg = AArch64::SP;
4705 bool IsValid =
false;
4707 int ObjectIndex = 0;
4709 int GroupIndex = -1;
4711 bool ObjectFirst =
false;
4714 bool GroupFirst =
false;
4718 unsigned Accesses = 0;
4719 enum { AccessFPR = 1, AccessHazard = 2, AccessGPR = 4 };
4724 int NextGroupIndex = 0;
4725 std::vector<FrameObject> &Objects;
4728 GroupBuilder(std::vector<FrameObject> &Objects) : Objects(Objects) {}
4730 void EndCurrentGroup() {
4731 if (CurrentMembers.
size() > 1) {
4736 for (
int Index : CurrentMembers) {
4737 Objects[
Index].GroupIndex = NextGroupIndex;
4743 CurrentMembers.clear();
4747bool FrameObjectCompare(
const FrameObject &
A,
const FrameObject &
B) {
4769 return std::make_tuple(!
A.IsValid,
A.Accesses,
A.ObjectFirst,
A.GroupFirst,
4770 A.GroupIndex,
A.ObjectIndex) <
4771 std::make_tuple(!
B.IsValid,
B.Accesses,
B.ObjectFirst,
B.GroupFirst,
4772 B.GroupIndex,
B.ObjectIndex);
4783 std::vector<FrameObject> FrameObjects(MFI.getObjectIndexEnd());
4784 for (
auto &Obj : ObjectsToAllocate) {
4785 FrameObjects[Obj].IsValid =
true;
4786 FrameObjects[Obj].ObjectIndex = Obj;
4791 GroupBuilder GB(FrameObjects);
4792 for (
auto &
MBB : MF) {
4793 for (
auto &
MI :
MBB) {
4794 if (
MI.isDebugInstr())
4799 if (FI && *FI >= 0 && *FI < (
int)FrameObjects.size()) {
4802 FrameObjects[*FI].Accesses |= FrameObject::AccessFPR;
4804 FrameObjects[*FI].Accesses |= FrameObject::AccessGPR;
4809 switch (
MI.getOpcode()) {
4810 case AArch64::STGloop:
4811 case AArch64::STZGloop:
4815 case AArch64::STZGi:
4816 case AArch64::ST2Gi:
4817 case AArch64::STZ2Gi:
4829 if (FI >= 0 && FI < MFI.getObjectIndexEnd() &&
4830 FrameObjects[FI].IsValid)
4838 GB.AddMember(TaggedFI);
4840 GB.EndCurrentGroup();
4843 GB.EndCurrentGroup();
4848 FrameObject::AccessHazard;
4850 for (
auto &Obj : FrameObjects)
4851 if (!Obj.Accesses ||
4852 Obj.Accesses == (FrameObject::AccessGPR | FrameObject::AccessFPR))
4853 Obj.Accesses = FrameObject::AccessGPR;
4862 FrameObjects[*TBPI].ObjectFirst =
true;
4863 FrameObjects[*TBPI].GroupFirst =
true;
4864 int FirstGroupIndex = FrameObjects[*TBPI].GroupIndex;
4865 if (FirstGroupIndex >= 0)
4866 for (FrameObject &Object : FrameObjects)
4867 if (Object.GroupIndex == FirstGroupIndex)
4868 Object.GroupFirst =
true;
4874 for (
auto &Obj : FrameObjects) {
4878 ObjectsToAllocate[i++] = Obj.ObjectIndex;
4882 dbgs() <<
"Final frame order:\n";
4883 for (
auto &Obj : FrameObjects) {
4886 dbgs() <<
" " << Obj.ObjectIndex <<
": group " << Obj.GroupIndex;
4887 if (Obj.ObjectFirst)
4888 dbgs() <<
", first";
4890 dbgs() <<
", group-first";
4901AArch64FrameLowering::inlineStackProbeLoopExactMultiple(
4912 MF.
insert(MBBInsertPoint, LoopMBB);
4914 MF.
insert(MBBInsertPoint, ExitMBB);
4949 return ExitMBB->
begin();
4952void AArch64FrameLowering::inlineStackProbeFixed(
4965 int64_t NumBlocks = FrameSize / ProbeSize;
4966 int64_t ResidualSize = FrameSize % ProbeSize;
4968 LLVM_DEBUG(
dbgs() <<
"Stack probing: total " << FrameSize <<
" bytes, "
4969 << NumBlocks <<
" blocks of " << ProbeSize
4970 <<
" bytes, plus " << ResidualSize <<
" bytes\n");
4975 for (
int i = 0; i < NumBlocks; ++i) {
4981 EmitAsyncCFI && !HasFP, CFAOffset);
4990 }
else if (NumBlocks != 0) {
4996 EmitAsyncCFI && !HasFP, CFAOffset);
4998 MBBI = inlineStackProbeLoopExactMultiple(
MBBI, ProbeSize, ScratchReg);
5000 if (EmitAsyncCFI && !HasFP) {
5004 unsigned Reg =
RegInfo.getDwarfRegNum(AArch64::SP,
true);
5013 if (ResidualSize != 0) {
5019 EmitAsyncCFI && !HasFP, CFAOffset);
5038 if (
MI.getOpcode() == AArch64::PROBED_STACKALLOC ||
5039 MI.getOpcode() == AArch64::PROBED_STACKALLOC_VAR)
5043 if (
MI->getOpcode() == AArch64::PROBED_STACKALLOC) {
5044 Register ScratchReg =
MI->getOperand(0).getReg();
5045 int64_t FrameSize =
MI->getOperand(1).getImm();
5047 MI->getOperand(3).getImm());
5048 inlineStackProbeFixed(
MI->getIterator(), ScratchReg, FrameSize,
5051 assert(
MI->getOpcode() == AArch64::PROBED_STACKALLOC_VAR &&
5052 "Stack probe pseudo-instruction expected");
5055 Register TargetReg =
MI->getOperand(0).getReg();
5056 (void)
TII->probedStackAlloc(
MI->getIterator(), TargetReg,
true);
5058 MI->eraseFromParent();
5078 return std::make_tuple(start(),
Idx) <
5079 std::make_tuple(Rhs.
start(), Rhs.
Idx);
5084 return AccessTypes & (AccessType::GPR | AccessType::PPR);
5086 bool isSME()
const {
return AccessTypes & AccessType::FPR; }
5087 bool isMixed()
const {
return isCPU() && isSME(); }
5093 switch (AccessTypes) {
5094 case AccessType::FPR:
5096 case AccessType::PPR:
5098 case AccessType::GPR:
5100 case AccessType::NotAccessed:
5109 << (
Offset.getFixed() < 0 ?
"" :
"+") <<
Offset.getFixed();
5110 if (
Offset.getScalable())
5111 OS << (
Offset.getScalable() < 0 ?
"" :
"+") <<
Offset.getScalable()
5122void AArch64FrameLowering::emitRemarks(
5126 if (
Attrs.hasNonStreamingInterfaceAndBody())
5132 if (HazardSize == 0)
5140 std::vector<StackAccess> StackAccesses(MFI.
getNumObjects());
5142 size_t NumFPLdSt = 0;
5143 size_t NumNonFPLdSt = 0;
5148 if (!
MI.mayLoadOrStore() ||
MI.getNumMemOperands() < 1)
5157 StackAccesses[ArrIdx].Idx = FrameIdx;
5158 StackAccesses[ArrIdx].Offset =
5165 if (AArch64::PPRRegClass.
contains(
MI.getOperand(0).getReg()))
5173 StackAccesses[ArrIdx].AccessTypes |= RegTy;
5184 if (NumFPLdSt == 0 || NumNonFPLdSt == 0)
5193 StackAccesses.end());
5198 if (StackAccesses.front().isMixed())
5199 MixedObjects.
push_back(&StackAccesses.front());
5201 for (
auto It = StackAccesses.begin(),
End = std::prev(StackAccesses.end());
5203 const auto &
First = *It;
5204 const auto &Second = *(It + 1);
5206 if (Second.isMixed())
5209 if ((
First.isSME() && Second.isCPU()) ||
5210 (
First.isCPU() && Second.isSME())) {
5212 if (Distance < HazardSize)
5220 "sme",
"StackHazard", MF.getFunction().getSubprogram(), &MF.front());
5221 return R <<
formatv(
"stack hazard in '{0}': ", MF.getName()).str() << Str;
5225 for (
const auto &
P : HazardPairs)
5226 EmitRemark(
formatv(
"{0} is too close to {1}", *
P.first, *
P.second).str());
5228 for (
const auto *Obj : MixedObjects)
5230 formatv(
"{0} accessed by both GP and FP instructions", *Obj).str());
unsigned const MachineRegisterInfo * MRI
for(const MachineOperand &MO :llvm::drop_begin(OldMI.operands(), Desc.getNumOperands()))
static int64_t getArgumentStackToRestore(MachineFunction &MF, MachineBasicBlock &MBB)
Returns how much of the incoming argument stack area (in bytes) we should clean up in an epilogue.
static void emitShadowCallStackEpilogue(const TargetInstrInfo &TII, MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL)
static void getLiveRegsForEntryMBB(LivePhysRegs &LiveRegs, const MachineBasicBlock &MBB)
static void emitCalleeSavedRestores(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, bool SVE)
static void computeCalleeSaveRegisterPairs(MachineFunction &MF, ArrayRef< CalleeSavedInfo > CSI, const TargetRegisterInfo *TRI, SmallVectorImpl< RegPairInfo > &RegPairs, bool NeedsFrameRecord)
static const unsigned DefaultSafeSPDisplacement
This is the biggest offset to the stack pointer we can encode in aarch64 instructions (without using ...
static void emitDefineCFAWithFP(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, unsigned FixedObject)
static bool needsWinCFI(const MachineFunction &MF)
static void insertCFISameValue(const MCInstrDesc &Desc, MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertPt, unsigned DwarfReg)
static cl::opt< bool > StackTaggingMergeSetTag("stack-tagging-merge-settag", cl::desc("merge settag instruction in function epilog"), cl::init(true), cl::Hidden)
static cl::opt< unsigned > StackHazardSize("aarch64-stack-hazard-size", cl::init(0), cl::Hidden)
bool requiresGetVGCall(MachineFunction &MF)
bool isVGInstruction(MachineBasicBlock::iterator MBBI)
static std::optional< int > getLdStFrameID(const MachineInstr &MI, const MachineFrameInfo &MFI)
static bool produceCompactUnwindFrame(MachineFunction &MF)
static cl::opt< bool > StackHazardInNonStreaming("aarch64-stack-hazard-in-non-streaming", cl::init(false), cl::Hidden)
static int64_t determineSVEStackObjectOffsets(MachineFrameInfo &MFI, int &MinCSFrameIndex, int &MaxCSFrameIndex, bool AssignOffsets)
static cl::opt< bool > OrderFrameObjects("aarch64-order-frame-objects", cl::desc("sort stack allocations"), cl::init(true), cl::Hidden)
static bool windowsRequiresStackProbe(MachineFunction &MF, uint64_t StackSizeInBytes)
static void fixupCalleeSaveRestoreStackOffset(MachineInstr &MI, uint64_t LocalStackSize, bool NeedsWinCFI, bool *HasWinCFI)
static bool invalidateWindowsRegisterPairing(unsigned Reg1, unsigned Reg2, bool NeedsWinCFI, bool IsFirst, const TargetRegisterInfo *TRI)
static MachineBasicBlock::iterator convertCalleeSaveRestoreToSPPrePostIncDec(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, const TargetInstrInfo *TII, int CSStackSizeInc, bool NeedsWinCFI, bool *HasWinCFI, bool EmitCFI, MachineInstr::MIFlag FrameFlag=MachineInstr::FrameSetup, int CFAOffset=0)
static void fixupSEHOpcode(MachineBasicBlock::iterator MBBI, unsigned LocalStackSize)
static StackOffset getSVEStackSize(const MachineFunction &MF)
Returns the size of the entire SVE stackframe (calleesaves + spills).
static cl::opt< bool > EnableRedZone("aarch64-redzone", cl::desc("enable use of redzone on AArch64"), cl::init(false), cl::Hidden)
static MachineBasicBlock::iterator InsertSEH(MachineBasicBlock::iterator MBBI, const TargetInstrInfo &TII, MachineInstr::MIFlag Flag)
static Register findScratchNonCalleeSaveRegister(MachineBasicBlock *MBB)
static void getLivePhysRegsUpTo(MachineInstr &MI, const TargetRegisterInfo &TRI, LivePhysRegs &LiveRegs)
Collect live registers from the end of MI's parent up to (including) MI in LiveRegs.
cl::opt< bool > EnableHomogeneousPrologEpilog("homogeneous-prolog-epilog", cl::Hidden, cl::desc("Emit homogeneous prologue and epilogue for the size " "optimization (default = off)"))
MachineBasicBlock::iterator emitVGSaveRestore(MachineBasicBlock::iterator II, const AArch64FrameLowering *TFI)
static bool IsSVECalleeSave(MachineBasicBlock::iterator I)
static bool invalidateRegisterPairing(unsigned Reg1, unsigned Reg2, bool UsesWinAAPCS, bool NeedsWinCFI, bool NeedsFrameRecord, bool IsFirst, const TargetRegisterInfo *TRI)
Returns true if Reg1 and Reg2 cannot be paired using a ldp/stp instruction.
unsigned findFreePredicateReg(BitVector &SavedRegs)
static unsigned getPrologueDeath(MachineFunction &MF, unsigned Reg)
static StackOffset getFPOffset(const MachineFunction &MF, int64_t ObjectOffset)
static bool isTargetWindows(const MachineFunction &MF)
static StackOffset getStackOffset(const MachineFunction &MF, int64_t ObjectOffset)
static int64_t upperBound(StackOffset Size)
static unsigned estimateRSStackSizeLimit(MachineFunction &MF)
Look at each instruction that references stack frames and return the stack size limit beyond which so...
static bool getSVECalleeSaveSlotRange(const MachineFrameInfo &MFI, int &Min, int &Max)
returns true if there are any SVE callee saves.
static cl::opt< unsigned > StackHazardRemarkSize("aarch64-stack-hazard-remark-size", cl::init(0), cl::Hidden)
static MCRegister getRegisterOrZero(MCRegister Reg, bool HasSVE)
static bool isFuncletReturnInstr(const MachineInstr &MI)
static void emitShadowCallStackPrologue(const TargetInstrInfo &TII, MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, bool NeedsWinCFI, bool NeedsUnwindInfo)
static std::optional< int > getMMOFrameID(MachineMemOperand *MMO, const MachineFrameInfo &MFI)
static bool requiresSaveVG(MachineFunction &MF)
static unsigned getFixedObjectSize(const MachineFunction &MF, const AArch64FunctionInfo *AFI, bool IsWin64, bool IsFunclet)
Returns the size of the fixed object area (allocated next to sp on entry) On Win64 this may include a...
static const int kSetTagLoopThreshold
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
This file contains the simple types necessary to represent the attributes associated with functions a...
#define CASE(ATTRNAME, AANAME,...)
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
Analysis containing CSE Info
static void clear(coro::Shape &Shape)
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
static const HTTPClientCleanup Cleanup
const HexagonInstrInfo * TII
static std::string getTypeString(Type *T)
This file implements the LivePhysRegs utility for tracking liveness of physical registers.
unsigned const TargetRegisterInfo * TRI
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
uint64_t IntrinsicInst * II
static const MCPhysReg FPR[]
FPR - The set of FP registers that should be allocated for arguments on Darwin and AIX.
This file declares the machine register scavenger class.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
static const unsigned FramePtr
void processFunctionBeforeFrameIndicesReplaced(MachineFunction &MF, RegScavenger *RS) const override
processFunctionBeforeFrameIndicesReplaced - This method is called immediately before MO_FrameIndex op...
MachineBasicBlock::iterator eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator I) const override
This method is called during prolog/epilog code insertion to eliminate call frame setup and destroy p...
bool canUseAsPrologue(const MachineBasicBlock &MBB) const override
Check whether or not the given MBB can be used as a prologue for the target.
bool enableStackSlotScavenging(const MachineFunction &MF) const override
Returns true if the stack slot holes in the fixed and callee-save stack area should be used when allo...
bool spillCalleeSavedRegisters(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, ArrayRef< CalleeSavedInfo > CSI, const TargetRegisterInfo *TRI) const override
spillCalleeSavedRegisters - Issues instruction(s) to spill all callee saved registers and returns tru...
bool restoreCalleeSavedRegisters(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, MutableArrayRef< CalleeSavedInfo > CSI, const TargetRegisterInfo *TRI) const override
restoreCalleeSavedRegisters - Issues instruction(s) to restore all callee saved registers and returns...
StackOffset getFrameIndexReferenceFromSP(const MachineFunction &MF, int FI) const override
getFrameIndexReferenceFromSP - This method returns the offset from the stack pointer to the slot of t...
StackOffset getNonLocalFrameIndexReference(const MachineFunction &MF, int FI) const override
getNonLocalFrameIndexReference - This method returns the offset used to reference a frame index locat...
TargetStackID::Value getStackIDForScalableVectors() const override
Returns the StackID that scalable vectors should be associated with.
bool hasFP(const MachineFunction &MF) const override
hasFP - Return true if the specified function should have a dedicated frame pointer register.
void emitPrologue(MachineFunction &MF, MachineBasicBlock &MBB) const override
emitProlog/emitEpilog - These methods insert prolog and epilog code into the function.
bool enableCFIFixup(MachineFunction &MF) const override
Returns true if we may need to fix the unwind information for the function.
void resetCFIToInitialState(MachineBasicBlock &MBB) const override
Emit CFI instructions that recreate the state of the unwind information upon fucntion entry.
bool hasReservedCallFrame(const MachineFunction &MF) const override
hasReservedCallFrame - Under normal circumstances, when a frame pointer is not required,...
bool canUseRedZone(const MachineFunction &MF) const
Can this function use the red zone for local allocations.
void processFunctionBeforeFrameFinalized(MachineFunction &MF, RegScavenger *RS) const override
processFunctionBeforeFrameFinalized - This method is called immediately before the specified function...
int getSEHFrameIndexOffset(const MachineFunction &MF, int FI) const
unsigned getWinEHFuncletFrameSize(const MachineFunction &MF) const
Funclets only need to account for space for the callee saved registers, as the locals are accounted f...
void orderFrameObjects(const MachineFunction &MF, SmallVectorImpl< int > &ObjectsToAllocate) const override
Order the symbols in the local stack frame.
void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const override
void determineCalleeSaves(MachineFunction &MF, BitVector &SavedRegs, RegScavenger *RS) const override
This method determines which of the registers reported by TargetRegisterInfo::getCalleeSavedRegs() sh...
StackOffset getFrameIndexReference(const MachineFunction &MF, int FI, Register &FrameReg) const override
getFrameIndexReference - Provide a base+offset reference to an FI slot for debug info.
StackOffset resolveFrameOffsetReference(const MachineFunction &MF, int64_t ObjectOffset, bool isFixed, bool isSVE, Register &FrameReg, bool PreferFP, bool ForSimm) const
bool assignCalleeSavedSpillSlots(MachineFunction &MF, const TargetRegisterInfo *TRI, std::vector< CalleeSavedInfo > &CSI, unsigned &MinCSFrameIndex, unsigned &MaxCSFrameIndex) const override
assignCalleeSavedSpillSlots - Allows target to override spill slot assignment logic.
StackOffset getFrameIndexReferencePreferSP(const MachineFunction &MF, int FI, Register &FrameReg, bool IgnoreSPUpdates) const override
For Win64 AArch64 EH, the offset to the Unwind object is from the SP before the update.
StackOffset resolveFrameIndexReference(const MachineFunction &MF, int FI, Register &FrameReg, bool PreferFP, bool ForSimm) const
unsigned getWinEHParentFrameOffset(const MachineFunction &MF) const override
The parent frame offset (aka dispFrame) is only used on X86_64 to retrieve the parent's frame pointer...
AArch64FunctionInfo - This class is derived from MachineFunctionInfo and contains private AArch64-spe...
bool needsShadowCallStackPrologueEpilogue(MachineFunction &MF) const
void setSwiftAsyncContextFrameIdx(int FI)
unsigned getTailCallReservedStack() const
unsigned getCalleeSavedStackSize(const MachineFrameInfo &MFI) const
void setCalleeSaveBaseToFrameRecordOffset(int Offset)
bool hasStackProbing() const
unsigned getArgumentStackToRestore() const
void setLocalStackSize(uint64_t Size)
void setVGIdx(unsigned Idx)
int getCalleeSaveBaseToFrameRecordOffset() const
bool hasStreamingModeChanges() const
bool shouldSignReturnAddress(const MachineFunction &MF) const
void setPredicateRegForFillSpill(unsigned Reg)
int getStackHazardSlotIndex() const
void setStreamingVGIdx(unsigned FrameIdx)
int64_t getStackProbeSize() const
uint64_t getStackSizeSVE() const
void setHasRedZone(bool s)
bool hasStackFrame() const
std::optional< int > getTaggedBasePointerIndex() const
uint64_t getLocalStackSize() const
void setStackRealigned(bool s)
bool needsDwarfUnwindInfo(const MachineFunction &MF) const
unsigned getVarArgsGPRSize() const
void setStackSizeSVE(uint64_t S)
bool isStackRealigned() const
bool hasSwiftAsyncContext() const
bool hasStackHazardSlotIndex() const
void setTaggedBasePointerOffset(unsigned Offset)
void setStackHazardCSRSlotIndex(int Index)
unsigned getPredicateRegForFillSpill() const
unsigned getSVECalleeSavedStackSize() const
bool needsAsyncDwarfUnwindInfo(const MachineFunction &MF) const
int64_t getStreamingVGIdx() const
void setMinMaxSVECSFrameIndex(int Min, int Max)
bool hasCalleeSaveStackFreeSpace() const
static bool isTailCallReturnInst(const MachineInstr &MI)
Returns true if MI is one of the TCRETURN* instructions.
static bool isSEHInstruction(const MachineInstr &MI)
Return true if the instructions is a SEH instruciton used for unwinding on Windows.
static bool isFpOrNEON(Register Reg)
Returns whether the physical register is FP or NEON.
bool isReservedReg(const MachineFunction &MF, MCRegister Reg) const
bool hasBasePointer(const MachineFunction &MF) const
bool cannotEliminateFrame(const MachineFunction &MF) const
unsigned getBaseRegister() const
bool isTargetWindows() const
const AArch64RegisterInfo * getRegisterInfo() const override
bool isNeonAvailable() const
Returns true if the target has NEON and the function at runtime is known to have NEON enabled (e....
const AArch64InstrInfo * getInstrInfo() const override
bool isTargetILP32() const
const AArch64TargetLowering * getTargetLowering() const override
bool isTargetMachO() const
const Triple & getTargetTriple() const
const char * getChkStkName() const
bool isCallingConvWin64(CallingConv::ID CC, bool IsVarArg) const
bool swiftAsyncContextIsDynamicallySet() const
Return whether FrameLowering should always set the "extended frame present" bit in FP,...
bool hasInlineStackProbe(const MachineFunction &MF) const override
True if stack clash protection is enabled for this functions.
unsigned getRedZoneSize(const Function &F) const
bool supportSwiftError() const override
Return true if the target supports swifterror attribute.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
bool empty() const
empty - Check if the array is empty.
bool hasAttrSomewhere(Attribute::AttrKind Kind, unsigned *Index=nullptr) const
Return true if the specified attribute is set for at least one parameter or for the return value.
bool test(unsigned Idx) const
size_type count() const
count - Returns the number of bits which are set.
iterator_range< const_set_bits_iterator > set_bits() const
The CalleeSavedInfo class tracks the information need to locate where a callee saved register is in t...
bool hasOptSize() const
Optimize this function for size (-Os) or minimum size (-Oz).
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
AttributeList getAttributes() const
Return the attribute list for this Function.
bool isVarArg() const
isVarArg - Return true if this function takes a variable number of arguments.
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, const DebugLoc &DL, MCRegister DestReg, MCRegister SrcReg, bool KillSrc) const override
Emit instructions to copy a pair of physical registers.
A set of physical registers with utility functions to track liveness when walking backward/forward th...
bool available(const MachineRegisterInfo &MRI, MCPhysReg Reg) const
Returns true if register Reg and no aliasing register is in the set.
void stepBackward(const MachineInstr &MI)
Simulates liveness when stepping backwards over an instruction(bundle).
void removeReg(MCPhysReg Reg)
Removes a physical register, all its sub-registers, and all its super-registers from the set.
void addLiveIns(const MachineBasicBlock &MBB)
Adds all live-in registers of basic block MBB.
void addLiveOuts(const MachineBasicBlock &MBB)
Adds all live-out registers of basic block MBB.
void addReg(MCPhysReg Reg)
Adds a physical register and all its sub-registers to the set.
bool usesWindowsCFI() const
static MCCFIInstruction createDefCfaRegister(MCSymbol *L, unsigned Register, SMLoc Loc={})
.cfi_def_cfa_register modifies a rule for computing CFA.
static MCCFIInstruction createRestore(MCSymbol *L, unsigned Register, SMLoc Loc={})
.cfi_restore says that the rule for Register is now the same as it was at the beginning of the functi...
static MCCFIInstruction cfiDefCfa(MCSymbol *L, unsigned Register, int64_t Offset, SMLoc Loc={})
.cfi_def_cfa defines a rule for computing CFA as: take address from Register and add Offset to it.
static MCCFIInstruction createOffset(MCSymbol *L, unsigned Register, int64_t Offset, SMLoc Loc={})
.cfi_offset Previous value of Register is saved at offset Offset from CFA.
static MCCFIInstruction createNegateRAState(MCSymbol *L, SMLoc Loc={})
.cfi_negate_ra_state AArch64 negate RA state.
static MCCFIInstruction cfiDefCfaOffset(MCSymbol *L, int64_t Offset, SMLoc Loc={})
.cfi_def_cfa_offset modifies a rule for computing CFA.
static MCCFIInstruction createEscape(MCSymbol *L, StringRef Vals, SMLoc Loc={}, StringRef Comment="")
.cfi_escape Allows the user to add arbitrary bytes to the unwind info.
static MCCFIInstruction createSameValue(MCSymbol *L, unsigned Register, SMLoc Loc={})
.cfi_same_value Current value of Register is the same as in the previous frame.
MCSymbol * createTempSymbol()
Create a temporary symbol with a unique name.
Describe properties that are true of each instruction in the target description file.
Wrapper class representing physical registers. Should be passed by value.
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
instr_iterator instr_begin()
iterator_range< livein_iterator > liveins() const
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
bool isLiveIn(MCPhysReg Reg, LaneBitmask LaneMask=LaneBitmask::getAll()) const
Return true if the specified register is in the live in set.
bool isEHFuncletEntry() const
Returns true if this is the entry block of an EH funclet.
iterator getFirstTerminator()
Returns an iterator to the first terminator instruction of this basic block.
MachineInstr & instr_back()
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
DebugLoc findDebugLoc(instr_iterator MBBI)
Find the next valid DebugLoc starting at MBBI, skipping any debug instructions.
iterator getLastNonDebugInstr(bool SkipPseudoOp=true)
Returns an iterator to the last non-debug instruction in the basic block, or end().
instr_iterator instr_end()
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
instr_iterator erase(instr_iterator I)
Remove an instruction from the instruction list and delete it.
reverse_iterator rbegin()
iterator insertAfter(iterator I, MachineInstr *MI)
Insert MI into the instruction list after I.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
bool hasVarSizedObjects() const
This method may be called any time after instruction selection is complete to determine if the stack ...
uint64_t getStackSize() const
Return the number of bytes that must be allocated to hold all of the fixed size frame objects.
const AllocaInst * getObjectAllocation(int ObjectIdx) const
Return the underlying Alloca of the specified stack object if it exists.
int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it.
bool hasCalls() const
Return true if the current function has any function calls.
bool isFrameAddressTaken() const
This method may be called any time after instruction selection is complete to determine if there is a...
Align getMaxAlign() const
Return the alignment in bytes that this function must be aligned to, which is greater than the defaul...
void setObjectOffset(int ObjectIdx, int64_t SPOffset)
Set the stack frame offset of the specified object.
uint64_t getMaxCallFrameSize() const
Return the maximum size of a call frame that must be allocated for an outgoing function call.
bool hasPatchPoint() const
This method may be called any time after instruction selection is complete to determine if there is a...
int getStackProtectorIndex() const
Return the index for the stack protector object.
uint64_t estimateStackSize(const MachineFunction &MF) const
Estimate and return the size of the stack frame.
void setStackID(int ObjectIdx, uint8_t ID)
bool isCalleeSavedInfoValid() const
Has the callee saved info been calculated yet?
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
bool isMaxCallFrameSizeComputed() const
bool hasStackMap() const
This method may be called any time after instruction selection is complete to determine if there is a...
const std::vector< CalleeSavedInfo > & getCalleeSavedInfo() const
Returns a reference to call saved info vector for the current function.
unsigned getNumObjects() const
Return the number of objects.
int getObjectIndexEnd() const
Return one past the maximum frame object index.
bool hasStackProtectorIndex() const
bool hasStackObjects() const
Return true if there are any stack objects in this function.
uint8_t getStackID(int ObjectIdx) const
unsigned getNumFixedObjects() const
Return the number of fixed objects.
int64_t getObjectOffset(int ObjectIdx) const
Return the assigned stack offset of the specified object from the incoming stack pointer.
int getObjectIndexBegin() const
Return the minimum frame object index.
bool isDeadObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to a dead object.
const WinEHFuncInfo * getWinEHFuncInfo() const
getWinEHFuncInfo - Return information about how the current function uses Windows exception handling.
unsigned addFrameInst(const MCCFIInstruction &Inst)
void setHasWinCFI(bool v)
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MCContext & getContext() const
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
const LLVMTargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const MachineBasicBlock & front() const
bool hasEHFunclets() const
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *BB=nullptr, std::optional< UniqueBBID > BBID=std::nullopt)
CreateMachineBasicBlock - Allocate a new MachineBasicBlock.
void insert(iterator MBBI, MachineBasicBlock *MBB)
const MachineInstrBuilder & setMemRefs(ArrayRef< MachineMemOperand * > MMOs) const
const MachineInstrBuilder & addExternalSymbol(const char *FnName, unsigned TargetFlags=0) const
const MachineInstrBuilder & addCFIIndex(unsigned CFIIndex) const
const MachineInstrBuilder & setMIFlag(MachineInstr::MIFlag Flag) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addRegMask(const uint32_t *Mask) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & setMIFlags(unsigned Flags) const
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
void setFlags(unsigned flags)
void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
uint32_t getFlags() const
Return the MI flags bitvector.
A description of a memory reference used in the backend.
const PseudoSourceValue * getPseudoValue() const
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
const Value * getValue() const
Return the base address of the memory access.
MachineOperand class - Representation of each machine instruction operand.
void setImm(int64_t immVal)
static MachineOperand CreateImm(int64_t Val)
bool isFI() const
isFI - Tests if this is a MO_FrameIndex operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
bool isLiveIn(Register Reg) const
const MCPhysReg * getCalleeSavedRegs() const
Returns list of callee saved registers.
bool isPhysRegUsed(MCRegister PhysReg, bool SkipRegMaskTest=false) const
Return true if the specified register is modified or read in this function.
MutableArrayRef - Represent a mutable reference to an array (0 or more elements consecutively in memo...
void enterBasicBlockEnd(MachineBasicBlock &MBB)
Start tracking liveness from the end of basic block MBB.
Register FindUnusedReg(const TargetRegisterClass *RC) const
Find an unused register of the specified register class.
void backward()
Update internal register state and move MBB iterator backwards.
void addScavengingFrameIndex(int FI)
Add a scavenging frame index.
Wrapper class representing virtual and physical registers.
SMEAttrs is a utility class to parse the SME ACLE attributes on functions.
bool hasStreamingInterface() const
bool hasStreamingBody() const
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StackOffset holds a fixed and a scalable offset in bytes.
int64_t getFixed() const
Returns the fixed component of the stack.
int64_t getScalable() const
Returns the scalable component of the stack.
static StackOffset get(int64_t Fixed, int64_t Scalable)
static StackOffset getScalable(int64_t Scalable)
static StackOffset getFixed(int64_t Fixed)
StringRef - Represent a constant reference to a string, i.e.
virtual void determineCalleeSaves(MachineFunction &MF, BitVector &SavedRegs, RegScavenger *RS=nullptr) const
This method determines which of the registers reported by TargetRegisterInfo::getCalleeSavedRegs() sh...
int getOffsetOfLocalArea() const
getOffsetOfLocalArea - This method returns the offset of the local area from the stack pointer on ent...
Align getStackAlign() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
StackDirection getStackGrowthDirection() const
getStackGrowthDirection - Return the direction the stack grows
virtual bool enableCFIFixup(MachineFunction &MF) const
Returns true if we may need to fix the unwind information for the function.
TargetInstrInfo - Interface to description of machine instruction set.
CodeModel::Model getCodeModel() const
Returns the code model.
const MCAsmInfo * getMCAsmInfo() const
Return target specific asm information.
SwiftAsyncFramePointerMode SwiftAsyncFramePointer
Control when and how the Swift async frame pointer bit should be set.
bool DisableFramePointerElim(const MachineFunction &MF) const
DisableFramePointerElim - This returns true if frame pointer elimination optimization should be disab...
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
const TargetRegisterClass * getMinimalPhysRegClass(MCRegister Reg, MVT VT=MVT::Other) const
Returns the Register Class of a physical register of the given type, picking the most sub register cl...
Align getSpillAlign(const TargetRegisterClass &RC) const
Return the minimum required alignment in bytes for a spill slot for a register of this class.
bool hasStackRealignment(const MachineFunction &MF) const
True if stack realignment is required and still possible.
unsigned getSpillSize(const TargetRegisterClass &RC) const
Return the size in bytes of the stack slot allocated to hold a spilled copy of a register from class ...
TargetSubtargetInfo - Generic base class for all target subtargets.
virtual const TargetRegisterInfo * getRegisterInfo() const
getRegisterInfo - If register information is available, return it.
virtual const TargetInstrInfo * getInstrInfo() const
StringRef getArchName() const
Get the architecture (first) component of the triple.
static constexpr TypeSize getFixed(ScalarTy ExactSize)
The instances of the Type class are immutable: once they are created, they are never changed.
constexpr ScalarTy getFixedValue() const
self_iterator getIterator()
This class implements an extremely fast bulk output stream that can only output to a stream.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ MO_GOT
MO_GOT - This flag indicates that a symbol operand represents the address of the GOT entry for the sy...
static unsigned getShiftValue(unsigned Imm)
getShiftValue - Extract the shift value.
static unsigned getArithExtendImm(AArch64_AM::ShiftExtendType ET, unsigned Imm)
getArithExtendImm - Encode the extend type and shift amount for an arithmetic instruction: imm: 3-bit...
static uint64_t encodeLogicalImmediate(uint64_t imm, unsigned regSize)
encodeLogicalImmediate - Return the encoded immediate value for a logical immediate instruction of th...
static unsigned getShifterImm(AArch64_AM::ShiftExtendType ST, unsigned Imm)
getShifterImm - Encode the shift type and amount: imm: 6-bit shift amount shifter: 000 ==> lsl 001 ==...
const unsigned StackProbeMaxLoopUnroll
Maximum number of iterations to unroll for a constant size probing loop.
const unsigned StackProbeMaxUnprobedStack
Maximum allowed number of unprobed bytes above SP at an ABI boundary.
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
@ AArch64_SVE_VectorCall
Used between AArch64 SVE functions.
@ PreserveMost
Used for runtime calls that preserves most registers.
@ CXX_FAST_TLS
Used for access functions.
@ GHC
Used by the Glasgow Haskell Compiler (GHC).
@ AArch64_SME_ABI_Support_Routines_PreserveMost_From_X1
Preserve X1-X15, X19-X29, SP, Z0-Z31, P0-P15.
@ PreserveAll
Used for runtime calls that preserves (almost) all registers.
@ PreserveNone
Used for runtime calls that preserves none general registers.
@ Win64
The C convention as implemented on Windows/x86-64 and AArch64.
@ SwiftTail
This follows the Swift calling convention in how arguments are passed but guarantees tail calls will ...
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Define
Register definition.
@ Kill
The last use of a register.
@ Undef
Value of the register doesn't matter.
Reg
All possible values of the reg field in the ModR/M byte.
initializer< Ty > init(const Ty &Val)
NodeAddr< InstrNode * > Instr
This is an optimization pass for GlobalISel generic memory operations.
void stable_sort(R &&Range)
MCCFIInstruction createDefCFA(const TargetRegisterInfo &TRI, unsigned FrameReg, unsigned Reg, const StackOffset &Offset, bool LastAdjustmentWasScalable=true)
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
int isAArch64FrameOffsetLegal(const MachineInstr &MI, StackOffset &Offset, bool *OutUseUnscaledOp=nullptr, unsigned *OutUnscaledOp=nullptr, int64_t *EmittableOffset=nullptr)
Check if the Offset is a valid frame offset for MI.
detail::scope_exit< std::decay_t< Callable > > make_scope_exit(Callable &&F)
MCCFIInstruction createCFAOffset(const TargetRegisterInfo &MRI, unsigned Reg, const StackOffset &OffsetFromDefCFA)
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
auto formatv(const char *Fmt, Ts &&...Vals) -> formatv_object< decltype(std::make_tuple(support::detail::build_format_adapter(std::forward< Ts >(Vals))...))>
unsigned getBLRCallOpcode(const MachineFunction &MF)
Return opcode to be used for indirect calls.
const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=6)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
@ AArch64FrameOffsetCannotUpdate
Offset cannot apply.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
auto reverse(ContainerTy &&C)
void sort(IteratorTy Start, IteratorTy End)
@ Always
Always set the bit.
@ Never
Never set the bit.
@ DeploymentBased
Determine whether to set the bit statically or dynamically based on the deployment target.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
void emitFrameOffset(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, unsigned DestReg, unsigned SrcReg, StackOffset Offset, const TargetInstrInfo *TII, MachineInstr::MIFlag=MachineInstr::NoFlags, bool SetNZCV=false, bool NeedsWinCFI=false, bool *HasWinCFI=nullptr, bool EmitCFAOffset=false, StackOffset InitialOffset={}, unsigned FrameReg=AArch64::SP)
emitFrameOffset - Emit instructions as needed to set DestReg to SrcReg plus Offset.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
auto remove_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::remove_if which take ranges instead of having to pass begin/end explicitly.
unsigned getDefRegState(bool B)
unsigned getKillRegState(bool B)
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
raw_ostream & operator<<(raw_ostream &OS, const APFixedPoint &FX)
bool isAsynchronousEHPersonality(EHPersonality Pers)
Returns true if this personality function catches asynchronous exceptions.
void fullyRecomputeLiveIns(ArrayRef< MachineBasicBlock * > MBBs)
Convenience function for recomputing live-in's for a set of MBBs until the computation converges.
Printable printReg(Register Reg, const TargetRegisterInfo *TRI=nullptr, unsigned SubIdx=0, const MachineRegisterInfo *MRI=nullptr)
Prints virtual and physical registers with or without a TRI instance.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
bool operator<(const StackAccess &Rhs) const
void print(raw_ostream &OS) const
std::string getTypeString() const
This struct is a compact representation of a valid (non-zero power of two) alignment.
uint64_t value() const
This is a hole in the type system and should not be abused.
Description of the encoding of one expression Op.
Pair of physical register and lane mask.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.