267#define DEBUG_TYPE "frame-info"
270 cl::desc(
"enable use of redzone on AArch64"),
274 "stack-tagging-merge-settag",
284 cl::desc(
"Split allocation of ZPR & PPR objects"),
289 cl::desc(
"Emit homogeneous prologue and epilogue for the size "
290 "optimization (default = off)"));
302 "aarch64-disable-multivector-spill-fill",
311 bool IsTailCallReturn = (
MBB.end() !=
MBBI)
315 int64_t ArgumentPopSize = 0;
316 if (IsTailCallReturn) {
322 ArgumentPopSize = StackAdjust.
getImm();
331 return ArgumentPopSize;
389bool AArch64FrameLowering::homogeneousPrologEpilog(
411 if (Exit && getArgumentStackToRestore(MF, *Exit))
414 auto *AFI = MF.
getInfo<AArch64FunctionInfo>();
422 unsigned NumGPRs = 0;
423 for (
unsigned I = 0; CSRegs[
I]; ++
I) {
425 if (
Reg == AArch64::LR) {
426 assert(CSRegs[
I + 1] == AArch64::FP);
427 if (NumGPRs % 2 != 0)
439bool AArch64FrameLowering::producePairRegisters(
MachineFunction &MF)
const {
458 if (
MI.isDebugInstr() ||
MI.isPseudo() ||
459 MI.getOpcode() == AArch64::ADDXri ||
460 MI.getOpcode() == AArch64::ADDSXri)
485 bool IsWin64,
bool IsFunclet)
const {
487 "Tail call reserved stack must be aligned to 16 bytes");
488 if (!IsWin64 || IsFunclet) {
493 Attribute::SwiftAsync))
507 int FrameIndex =
H.CatchObj.FrameIndex;
508 if ((FrameIndex != INT_MAX) &&
509 CatchObjFrameIndices.
insert(FrameIndex)) {
510 FixedObjectSize =
alignTo(FixedObjectSize,
517 FixedObjectSize += 8;
519 return alignTo(FixedObjectSize, 16);
530 const unsigned RedZoneSize =
543 bool LowerQRegCopyThroughMem = Subtarget.hasFPARMv8() &&
547 return !(MFI.
hasCalls() ||
hasFP(MF) || NumBytes > RedZoneSize ||
568 RegInfo->hasStackRealignment(MF))
611 const Triple &TT = TM.getTargetTriple();
615 if (TT.isOSDarwin() || TT.isOSWindows())
623 if (TM.Options.FramePointerIsReserved(MF))
653 unsigned Opc =
I->getOpcode();
654 bool IsDestroy =
Opc ==
TII->getCallFrameDestroyOpcode();
655 uint64_t CalleePopAmount = IsDestroy ?
I->getOperand(1).getImm() : 0;
658 int64_t Amount =
I->getOperand(0).getImm();
666 if (CalleePopAmount == 0) {
677 assert(Amount > -0xffffff && Amount < 0xffffff &&
"call frame too large");
688 "non-reserved call frame without var sized objects?");
697 }
else if (CalleePopAmount != 0) {
700 assert(CalleePopAmount < 0xffffff &&
"call frame too large");
712 const auto &
TRI = *Subtarget.getRegisterInfo();
718 CFIBuilder.buildDefCFA(AArch64::SP, 0);
721 if (MFI.shouldSignReturnAddress(MF))
722 MFI.branchProtectionPAuthLR() ? CFIBuilder.buildNegateRAStateWithPC()
723 : CFIBuilder.buildNegateRAState();
726 if (MFI.needsShadowCallStackPrologueEpilogue(MF))
727 CFIBuilder.buildSameValue(AArch64::X18);
730 const std::vector<CalleeSavedInfo> &CSI =
732 for (
const auto &Info : CSI) {
734 if (!
TRI.regNeedsCFI(Reg, Reg))
736 CFIBuilder.buildSameValue(Reg);
749 case AArch64::W##n: \
750 case AArch64::X##n: \
775 case AArch64::B##n: \
776 case AArch64::H##n: \
777 case AArch64::S##n: \
778 case AArch64::D##n: \
779 case AArch64::Q##n: \
780 return HasSVE ? AArch64::Z##n : AArch64::Q##n
817void AArch64FrameLowering::emitZeroCallUsedRegs(
BitVector RegsToZero,
828 const AArch64Subtarget &STI = MF.
getSubtarget<AArch64Subtarget>();
831 BitVector GPRsToZero(
TRI.getNumRegs());
832 BitVector FPRsToZero(
TRI.getNumRegs());
835 if (
TRI.isGeneralPurposeRegister(MF,
Reg)) {
838 GPRsToZero.set(XReg);
842 FPRsToZero.set(XReg);
849 for (MCRegister
Reg : GPRsToZero.set_bits())
853 for (MCRegister
Reg : FPRsToZero.set_bits())
857 for (MCRegister PReg :
858 {AArch64::P0, AArch64::P1, AArch64::P2, AArch64::P3, AArch64::P4,
859 AArch64::P5, AArch64::P6, AArch64::P7, AArch64::P8, AArch64::P9,
860 AArch64::P10, AArch64::P11, AArch64::P12, AArch64::P13, AArch64::P14,
862 if (RegsToZero[PReg])
868bool AArch64FrameLowering::windowsRequiresStackProbe(
870 const AArch64Subtarget &Subtarget = MF.
getSubtarget<AArch64Subtarget>();
871 const AArch64FunctionInfo &MFI = *MF.
getInfo<AArch64FunctionInfo>();
875 StackSizeInBytes >= uint64_t(MFI.getStackProbeSize());
884 for (
unsigned i = 0; CSRegs[i]; ++i)
890 bool HasCall)
const {
900 const AArch64Subtarget &Subtarget = MF->
getSubtarget<AArch64Subtarget>();
902 LivePhysRegs LiveRegs(
TRI);
905 LiveRegs.addReg(AArch64::X16);
906 LiveRegs.addReg(AArch64::X17);
907 LiveRegs.addReg(AArch64::X18);
912 if (LiveRegs.available(
MRI, AArch64::X9))
915 for (
unsigned Reg : AArch64::GPR64RegClass) {
916 if (LiveRegs.available(
MRI,
Reg))
919 return AArch64::NoRegister;
946 MBB.isLiveIn(AArch64::NZCV))
950 if (findScratchNonCalleeSaveRegister(TmpMBB) == AArch64::NoRegister)
956 windowsRequiresStackProbe(*MF, std::numeric_limits<uint64_t>::max()))
957 if (findScratchNonCalleeSaveRegister(TmpMBB,
true) == AArch64::NoRegister)
966 F.needsUnwindTableEntry();
969bool AArch64FrameLowering::shouldSignReturnAddressEverywhere(
977 return SignReturnAddressAll;
986 unsigned Opc =
MBBI->getOpcode();
990 unsigned ImmIdx =
MBBI->getNumOperands() - 1;
991 int Imm =
MBBI->getOperand(ImmIdx).getImm();
999 case AArch64::STR_ZXI:
1000 case AArch64::LDR_ZXI: {
1001 unsigned Reg0 =
RegInfo->getSEHRegNum(
MBBI->getOperand(0).getReg());
1008 case AArch64::STR_PXI:
1009 case AArch64::LDR_PXI: {
1010 unsigned Reg0 = RegInfo->getSEHRegNum(
MBBI->getOperand(0).getReg());
1017 case AArch64::LDPDpost:
1020 case AArch64::STPDpre: {
1021 unsigned Reg0 = RegInfo->getSEHRegNum(
MBBI->getOperand(1).getReg());
1022 unsigned Reg1 = RegInfo->getSEHRegNum(
MBBI->getOperand(2).getReg());
1023 MIB =
BuildMI(MF,
DL,
TII.get(AArch64::SEH_SaveFRegP_X))
1030 case AArch64::LDPXpost:
1033 case AArch64::STPXpre: {
1036 if (Reg0 == AArch64::FP && Reg1 == AArch64::LR)
1037 MIB =
BuildMI(MF,
DL,
TII.get(AArch64::SEH_SaveFPLR_X))
1041 MIB =
BuildMI(MF,
DL,
TII.get(AArch64::SEH_SaveRegP_X))
1042 .
addImm(RegInfo->getSEHRegNum(Reg0))
1043 .
addImm(RegInfo->getSEHRegNum(Reg1))
1048 case AArch64::LDRDpost:
1051 case AArch64::STRDpre: {
1052 unsigned Reg = RegInfo->getSEHRegNum(
MBBI->getOperand(1).getReg());
1053 MIB =
BuildMI(MF,
DL,
TII.get(AArch64::SEH_SaveFReg_X))
1059 case AArch64::LDRXpost:
1062 case AArch64::STRXpre: {
1063 unsigned Reg = RegInfo->getSEHRegNum(
MBBI->getOperand(1).getReg());
1070 case AArch64::STPDi:
1071 case AArch64::LDPDi: {
1072 unsigned Reg0 = RegInfo->getSEHRegNum(
MBBI->getOperand(0).getReg());
1073 unsigned Reg1 = RegInfo->getSEHRegNum(
MBBI->getOperand(1).getReg());
1081 case AArch64::STPXi:
1082 case AArch64::LDPXi: {
1085 if (Reg0 == AArch64::FP && Reg1 == AArch64::LR)
1091 .
addImm(RegInfo->getSEHRegNum(Reg0))
1092 .
addImm(RegInfo->getSEHRegNum(Reg1))
1097 case AArch64::STRXui:
1098 case AArch64::LDRXui: {
1099 int Reg = RegInfo->getSEHRegNum(
MBBI->getOperand(0).getReg());
1106 case AArch64::STRDui:
1107 case AArch64::LDRDui: {
1108 unsigned Reg = RegInfo->getSEHRegNum(
MBBI->getOperand(0).getReg());
1115 case AArch64::STPQi:
1116 case AArch64::LDPQi: {
1117 unsigned Reg0 = RegInfo->getSEHRegNum(
MBBI->getOperand(0).getReg());
1118 unsigned Reg1 = RegInfo->getSEHRegNum(
MBBI->getOperand(1).getReg());
1119 MIB =
BuildMI(MF,
DL,
TII.get(AArch64::SEH_SaveAnyRegQP))
1126 case AArch64::LDPQpost:
1129 case AArch64::STPQpre: {
1130 unsigned Reg0 = RegInfo->getSEHRegNum(
MBBI->getOperand(1).getReg());
1131 unsigned Reg1 = RegInfo->getSEHRegNum(
MBBI->getOperand(2).getReg());
1132 MIB =
BuildMI(MF,
DL,
TII.get(AArch64::SEH_SaveAnyRegQPX))
1151 if (ST.isTargetDarwin())
1177 DL =
MBBI->getDebugLoc();
1184 EmitSignRA(MF.
front());
1186 if (
MBB.isEHFuncletEntry())
1188 if (
MBB.isReturnBlock())
1244 StackOffset SVEStackSize = ZPRStackSize + PPRStackSize;
1249 if (MFI.isVariableSizedObjectIndex(FI)) {
1258 bool FPAfterSVECalleeSaves =
1260 if (MFI.hasScalableStackID(FI)) {
1261 if (FPAfterSVECalleeSaves &&
1264 "split-sve-objects not supported with FPAfterSVECalleeSaves");
1272 AccessOffset = -PPRStackSize;
1273 return AccessOffset +
1278 bool IsFixed = MFI.isFixedObjectIndex(FI);
1283 if (!IsFixed && !IsCSR) {
1284 ScalableOffset = -SVEStackSize;
1285 }
else if (FPAfterSVECalleeSaves && IsCSR) {
1300 int64_t ObjectOffset)
const {
1304 bool IsWin64 = Subtarget.isCallingConvWin64(
F.getCallingConv(),
F.isVarArg());
1305 unsigned FixedObject =
1306 getFixedObjectSize(MF, AFI, IsWin64,
false);
1314 int64_t ObjectOffset)
const {
1325 return RegInfo->getLocalAddressRegister(MF) == AArch64::FP
1326 ? getFPOffset(MF, ObjectOffset).getFixed()
1327 : getStackOffset(MF, ObjectOffset).getFixed();
1332 bool ForSimm)
const {
1334 int64_t ObjectOffset = MFI.getObjectOffset(FI);
1335 bool isFixed = MFI.isFixedObjectIndex(FI);
1338 FrameReg, PreferFP, ForSimm);
1344 bool ForSimm)
const {
1350 int64_t FPOffset = getFPOffset(MF, ObjectOffset).getFixed();
1351 int64_t
Offset = getStackOffset(MF, ObjectOffset).getFixed();
1354 bool isSVE = MFI.isScalableStackID(StackID);
1358 StackOffset SVEStackSize = ZPRStackSize + PPRStackSize;
1369 PreferFP &= !SVEStackSize;
1377 }
else if (isCSR && RegInfo->hasStackRealignment(MF)) {
1381 assert(
hasFP(MF) &&
"Re-aligned stack must have frame pointer");
1383 }
else if (
hasFP(MF) && !RegInfo->hasStackRealignment(MF)) {
1388 bool FPOffsetFits = !ForSimm || FPOffset >= -256;
1389 PreferFP |=
Offset > -FPOffset && !SVEStackSize;
1391 if (FPOffset >= 0) {
1395 }
else if (MFI.hasVarSizedObjects()) {
1399 bool CanUseBP = RegInfo->hasBasePointer(MF);
1400 if (FPOffsetFits && CanUseBP)
1407 }
else if (MF.
hasEHFunclets() && !RegInfo->hasBasePointer(MF)) {
1414 "Funclets should only be present on Win64");
1418 if (FPOffsetFits && PreferFP)
1425 ((isFixed || isCSR) || !RegInfo->hasStackRealignment(MF) || !UseFP) &&
1426 "In the presence of dynamic stack pointer realignment, "
1427 "non-argument/CSR objects cannot be accessed through the frame pointer");
1429 bool FPAfterSVECalleeSaves =
1445 FPOffset -= PPRStackSize;
1447 SPOffset -= PPRStackSize;
1452 if (FPAfterSVECalleeSaves) {
1463 RegInfo->hasStackRealignment(MF))) {
1464 FrameReg = RegInfo->getFrameRegister(MF);
1467 FrameReg = RegInfo->hasBasePointer(MF) ? RegInfo->getBaseRegister()
1474 if (FPAfterSVECalleeSaves) {
1481 SVEAreaOffset = SVECalleeSavedStack;
1483 SVEAreaOffset = SVECalleeSavedStack - SVEStackSize;
1486 SVEAreaOffset = SVEStackSize;
1488 SVEAreaOffset = SVEStackSize - SVECalleeSavedStack;
1491 if (UseFP && !(isFixed || isCSR))
1492 SVEAreaOffset = -SVEStackSize;
1493 if (!UseFP && (isFixed || isCSR))
1494 SVEAreaOffset = SVEStackSize;
1498 FrameReg = RegInfo->getFrameRegister(MF);
1503 if (RegInfo->hasBasePointer(MF))
1504 FrameReg = RegInfo->getBaseRegister();
1506 assert(!MFI.hasVarSizedObjects() &&
1507 "Can't use SP when we have var sized objects.");
1508 FrameReg = AArch64::SP;
1536 Attrs.hasAttrSomewhere(Attribute::SwiftError)) &&
1542 bool NeedsWinCFI,
bool IsFirst,
1551 if (Reg2 == AArch64::FP)
1555 if (
TRI->getEncodingValue(Reg2) ==
TRI->getEncodingValue(Reg1) + 1)
1562 if (Reg1 >= AArch64::X19 && Reg1 <= AArch64::X27 &&
1563 (Reg1 - AArch64::X19) % 2 == 0 && Reg2 == AArch64::LR && !IsFirst)
1573 bool UsesWinAAPCS,
bool NeedsWinCFI,
1574 bool NeedsFrameRecord,
bool IsFirst,
1582 if (NeedsFrameRecord)
1583 return Reg2 == AArch64::LR;
1595 enum RegType { GPR, FPR64, FPR128, PPR, ZPR, VG }
Type;
1596 const TargetRegisterClass *RC;
1598 RegPairInfo() =
default;
1600 bool isPaired()
const {
return Reg2.
isValid(); }
1602 bool isScalable()
const {
return Type == PPR ||
Type == ZPR; }
1608 for (
unsigned PReg = AArch64::P8; PReg <= AArch64::P15; ++PReg) {
1609 if (SavedRegs.
test(PReg)) {
1610 unsigned PNReg = PReg - AArch64::P0 + AArch64::PN0;
1624 bool IsLocallyStreaming =
1630 return Subtarget.hasSVE2p1() ||
1631 (Subtarget.hasSME2() &&
1632 (!IsLocallyStreaming && Subtarget.
isStreaming()));
1640 bool NeedsFrameRecord) {
1658 (
Count & 1) == 0) &&
1659 "Odd number of callee-saved regs to spill!");
1661 int StackFillDir = -1;
1663 unsigned FirstReg = 0;
1671 FirstReg =
Count - 1;
1676 int ZPRByteOffset = 0;
1677 int PPRByteOffset = 0;
1682 }
else if (!FPAfterSVECalleeSaves) {
1694 for (
unsigned i = FirstReg; i <
Count; i += RegInc) {
1696 RPI.Reg1 = CSI[i].getReg();
1698 if (AArch64::GPR64RegClass.
contains(RPI.Reg1)) {
1699 RPI.Type = RegPairInfo::GPR;
1700 RPI.RC = &AArch64::GPR64RegClass;
1701 }
else if (AArch64::FPR64RegClass.
contains(RPI.Reg1)) {
1702 RPI.Type = RegPairInfo::FPR64;
1703 RPI.RC = &AArch64::FPR64RegClass;
1704 }
else if (AArch64::FPR128RegClass.
contains(RPI.Reg1)) {
1705 RPI.Type = RegPairInfo::FPR128;
1706 RPI.RC = &AArch64::FPR128RegClass;
1707 }
else if (AArch64::ZPRRegClass.
contains(RPI.Reg1)) {
1708 RPI.Type = RegPairInfo::ZPR;
1709 RPI.RC = &AArch64::ZPRRegClass;
1710 }
else if (AArch64::PPRRegClass.
contains(RPI.Reg1)) {
1711 RPI.Type = RegPairInfo::PPR;
1712 RPI.RC = &AArch64::PPRRegClass;
1713 }
else if (RPI.Reg1 == AArch64::VG) {
1714 RPI.Type = RegPairInfo::VG;
1715 RPI.RC = &AArch64::FIXED_REGSRegClass;
1720 int &ScalableByteOffset = RPI.Type == RegPairInfo::PPR && SplitPPRs
1725 if (HasCSHazardPadding &&
1728 ByteOffset += StackFillDir * StackHazardSize;
1731 int Scale =
TRI->getSpillSize(*RPI.RC);
1733 if (
unsigned(i + RegInc) <
Count && !HasCSHazardPadding) {
1734 MCRegister NextReg = CSI[i + RegInc].getReg();
1735 bool IsFirst = i == FirstReg;
1737 case RegPairInfo::GPR:
1738 if (AArch64::GPR64RegClass.
contains(NextReg) &&
1740 NeedsWinCFI, NeedsFrameRecord, IsFirst,
1744 case RegPairInfo::FPR64:
1745 if (AArch64::FPR64RegClass.
contains(NextReg) &&
1750 case RegPairInfo::FPR128:
1751 if (AArch64::FPR128RegClass.
contains(NextReg))
1754 case RegPairInfo::PPR:
1756 case RegPairInfo::ZPR:
1758 ((RPI.Reg1 - AArch64::Z0) & 1) == 0 && (NextReg == RPI.Reg1 + 1)) {
1761 int Offset = (ScalableByteOffset + StackFillDir * 2 * Scale) / Scale;
1766 case RegPairInfo::VG:
1777 assert((!RPI.isPaired() ||
1778 (CSI[i].getFrameIdx() + RegInc == CSI[i + RegInc].getFrameIdx())) &&
1779 "Out of order callee saved regs!");
1781 assert((!RPI.isPaired() || !NeedsFrameRecord || RPI.Reg2 != AArch64::FP ||
1782 RPI.Reg1 == AArch64::LR) &&
1783 "FrameRecord must be allocated together with LR");
1786 assert((!RPI.isPaired() || !NeedsFrameRecord || RPI.Reg1 != AArch64::FP ||
1787 RPI.Reg2 == AArch64::LR) &&
1788 "FrameRecord must be allocated together with LR");
1796 ((RPI.Reg1 == AArch64::LR && RPI.Reg2 == AArch64::FP) ||
1797 RPI.Reg1 + 1 == RPI.Reg2))) &&
1798 "Callee-save registers not saved as adjacent register pair!");
1800 RPI.FrameIdx = CSI[i].getFrameIdx();
1803 RPI.FrameIdx = CSI[i + RegInc].getFrameIdx();
1807 if (RPI.isScalable() && ScalableByteOffset % Scale != 0) {
1808 ScalableByteOffset =
alignTo(ScalableByteOffset, Scale);
1811 int OffsetPre = RPI.isScalable() ? ScalableByteOffset : ByteOffset;
1812 assert(OffsetPre % Scale == 0);
1814 if (RPI.isScalable())
1815 ScalableByteOffset += StackFillDir * (RPI.isPaired() ? 2 * Scale : Scale);
1817 ByteOffset += StackFillDir * (RPI.isPaired() ? 2 * Scale : Scale);
1822 ((!IsWindows && RPI.Reg2 == AArch64::FP) ||
1823 (IsWindows && RPI.Reg2 == AArch64::LR)))
1824 ByteOffset += StackFillDir * 8;
1828 if (NeedGapToAlignStack && !NeedsWinCFI && !RPI.isScalable() &&
1829 RPI.Type != RegPairInfo::FPR128 && !RPI.isPaired() &&
1830 ByteOffset % 16 != 0) {
1831 ByteOffset += 8 * StackFillDir;
1837 NeedGapToAlignStack =
false;
1840 int OffsetPost = RPI.isScalable() ? ScalableByteOffset : ByteOffset;
1841 assert(OffsetPost % Scale == 0);
1844 int Offset = NeedsWinCFI ? OffsetPre : OffsetPost;
1849 ((!IsWindows && RPI.Reg2 == AArch64::FP) ||
1850 (IsWindows && RPI.Reg2 == AArch64::LR)))
1852 RPI.Offset =
Offset / Scale;
1854 assert((!RPI.isPaired() ||
1855 (!RPI.isScalable() && RPI.Offset >= -64 && RPI.Offset <= 63) ||
1856 (RPI.isScalable() && RPI.Offset >= -256 && RPI.Offset <= 255)) &&
1857 "Offset out of bounds for LDP/STP immediate");
1859 auto isFrameRecord = [&] {
1861 return IsWindows ? RPI.Reg1 == AArch64::FP && RPI.Reg2 == AArch64::LR
1862 : RPI.Reg1 == AArch64::LR && RPI.Reg2 == AArch64::FP;
1870 return i > 0 && RPI.Reg1 == AArch64::FP &&
1871 CSI[i - 1].getReg() == AArch64::LR;
1876 if (NeedsFrameRecord && isFrameRecord())
1893 std::reverse(RegPairs.
begin(), RegPairs.
end());
1912 MRI.freezeReservedRegs();
1914 if (homogeneousPrologEpilog(MF)) {
1918 for (
auto &RPI : RegPairs) {
1923 if (!
MRI.isReserved(RPI.Reg1))
1924 MBB.addLiveIn(RPI.Reg1);
1925 if (RPI.isPaired() && !
MRI.isReserved(RPI.Reg2))
1926 MBB.addLiveIn(RPI.Reg2);
1930 bool PTrueCreated =
false;
1946 unsigned Size =
TRI->getSpillSize(*RPI.RC);
1947 Align Alignment =
TRI->getSpillAlign(*RPI.RC);
1949 case RegPairInfo::GPR:
1950 StrOpc = RPI.isPaired() ? AArch64::STPXi : AArch64::STRXui;
1952 case RegPairInfo::FPR64:
1953 StrOpc = RPI.isPaired() ? AArch64::STPDi : AArch64::STRDui;
1955 case RegPairInfo::FPR128:
1956 StrOpc = RPI.isPaired() ? AArch64::STPQi : AArch64::STRQui;
1958 case RegPairInfo::ZPR:
1959 StrOpc = RPI.isPaired() ? AArch64::ST1B_2Z_IMM : AArch64::STR_ZXI;
1961 case RegPairInfo::PPR:
1962 StrOpc = AArch64::STR_PXI;
1964 case RegPairInfo::VG:
1965 StrOpc = AArch64::STRXui;
1971 if (X0Scratch != AArch64::NoRegister)
1977 if (Reg1 == AArch64::VG) {
1979 Reg1 = findScratchNonCalleeSaveRegister(&
MBB,
true);
1980 assert(Reg1 != AArch64::NoRegister);
1990 return STI.getRegisterInfo()->isSuperOrSubRegisterEq(
1991 AArch64::X0, LiveIn.PhysReg);
1999 RTLIB::Libcall LC = RTLIB::SMEABI_GET_CURRENT_VG;
2001 TRI->getCallPreservedMask(MF, TLI.getLibcallCallingConv(LC));
2015 dbgs() <<
") -> fi#(" << RPI.FrameIdx;
2017 dbgs() <<
", " << RPI.FrameIdx + 1;
2021 assert((!NeedsWinCFI || !(Reg1 == AArch64::LR && Reg2 == AArch64::FP)) &&
2022 "Windows unwdinding requires a consecutive (FP,LR) pair");
2026 unsigned FrameIdxReg1 = RPI.FrameIdx;
2027 unsigned FrameIdxReg2 = RPI.FrameIdx + 1;
2028 if (NeedsWinCFI && RPI.isPaired()) {
2033 if (RPI.isPaired() && RPI.isScalable()) {
2039 "Expects SVE2.1 or SME2 target and a predicate register");
2040#ifdef EXPENSIVE_CHECKS
2041 auto IsPPR = [](
const RegPairInfo &c) {
2042 return c.Reg1 == RegPairInfo::PPR;
2044 auto PPRBegin = std::find_if(RegPairs.
begin(), RegPairs.
end(), IsPPR);
2045 auto IsZPR = [](
const RegPairInfo &c) {
2046 return c.Type == RegPairInfo::ZPR;
2048 auto ZPRBegin = std::find_if(RegPairs.
begin(), RegPairs.
end(), IsZPR);
2049 assert(!(PPRBegin < ZPRBegin) &&
2050 "Expected callee save predicate to be handled first");
2052 if (!PTrueCreated) {
2053 PTrueCreated =
true;
2058 if (!
MRI.isReserved(Reg1))
2059 MBB.addLiveIn(Reg1);
2060 if (!
MRI.isReserved(Reg2))
2061 MBB.addLiveIn(Reg2);
2062 MIB.
addReg( AArch64::Z0_Z1 + (RPI.Reg1 - AArch64::Z0));
2078 if (!
MRI.isReserved(Reg1))
2079 MBB.addLiveIn(Reg1);
2080 if (RPI.isPaired()) {
2081 if (!
MRI.isReserved(Reg2))
2082 MBB.addLiveIn(Reg2);
2101 if (RPI.Type == RegPairInfo::ZPR) {
2105 }
else if (RPI.Type == RegPairInfo::PPR) {
2124 DL =
MBBI->getDebugLoc();
2127 if (homogeneousPrologEpilog(MF, &
MBB)) {
2130 for (
auto &RPI : RegPairs) {
2138 auto IsPPR = [](
const RegPairInfo &c) {
return c.Type == RegPairInfo::PPR; };
2140 auto PPREnd = std::find_if_not(PPRBegin, RegPairs.
end(), IsPPR);
2141 std::reverse(PPRBegin, PPREnd);
2142 auto IsZPR = [](
const RegPairInfo &c) {
return c.Type == RegPairInfo::ZPR; };
2144 auto ZPREnd = std::find_if_not(ZPRBegin, RegPairs.
end(), IsZPR);
2145 std::reverse(ZPRBegin, ZPREnd);
2147 bool PTrueCreated =
false;
2148 for (
const RegPairInfo &RPI : RegPairs) {
2161 unsigned Size =
TRI->getSpillSize(*RPI.RC);
2162 Align Alignment =
TRI->getSpillAlign(*RPI.RC);
2164 case RegPairInfo::GPR:
2165 LdrOpc = RPI.isPaired() ? AArch64::LDPXi : AArch64::LDRXui;
2167 case RegPairInfo::FPR64:
2168 LdrOpc = RPI.isPaired() ? AArch64::LDPDi : AArch64::LDRDui;
2170 case RegPairInfo::FPR128:
2171 LdrOpc = RPI.isPaired() ? AArch64::LDPQi : AArch64::LDRQui;
2173 case RegPairInfo::ZPR:
2174 LdrOpc = RPI.isPaired() ? AArch64::LD1B_2Z_IMM : AArch64::LDR_ZXI;
2176 case RegPairInfo::PPR:
2177 LdrOpc = AArch64::LDR_PXI;
2179 case RegPairInfo::VG:
2186 dbgs() <<
") -> fi#(" << RPI.FrameIdx;
2188 dbgs() <<
", " << RPI.FrameIdx + 1;
2195 unsigned FrameIdxReg1 = RPI.FrameIdx;
2196 unsigned FrameIdxReg2 = RPI.FrameIdx + 1;
2197 if (NeedsWinCFI && RPI.isPaired()) {
2203 if (RPI.isPaired() && RPI.isScalable()) {
2208 "Expects SVE2.1 or SME2 target and a predicate register");
2209#ifdef EXPENSIVE_CHECKS
2210 assert(!(PPRBegin < ZPRBegin) &&
2211 "Expected callee save predicate to be handled first");
2213 if (!PTrueCreated) {
2214 PTrueCreated =
true;
2219 MIB.
addReg( AArch64::Z0_Z1 + (RPI.Reg1 - AArch64::Z0),
2236 if (RPI.isPaired()) {
2263 return std::optional<int>(PSV->getFrameIndex());
2274 return std::nullopt;
2280 if (!
MI.mayLoadOrStore() ||
MI.getNumMemOperands() < 1)
2281 return std::nullopt;
2288 return AArch64::PPRRegClass.contains(
MI.getOperand(0).getReg());
2294void AArch64FrameLowering::determineStackHazardSlot(
2297 auto *AFI = MF.
getInfo<AArch64FunctionInfo>();
2298 if (StackHazardSize == 0 || StackHazardSize % 16 != 0 ||
2312 return AArch64::FPR64RegClass.contains(Reg) ||
2313 AArch64::FPR128RegClass.contains(Reg) ||
2314 AArch64::ZPRRegClass.contains(Reg);
2317 return AArch64::PPRRegClass.contains(Reg);
2319 bool HasFPRStackObjects =
false;
2320 bool HasPPRStackObjects =
false;
2322 enum SlotType : uint8_t {
2333 for (
auto &
MBB : MF) {
2334 for (
auto &
MI :
MBB) {
2336 if (!FI || FI < 0 || FI >
int(SlotTypes.size()))
2343 ? SlotType::ZPRorFPR
2349 for (
int FI = 0; FI < int(SlotTypes.size()); ++FI) {
2350 HasFPRStackObjects |= SlotTypes[FI] == SlotType::ZPRorFPR;
2353 if (SlotTypes[FI] == SlotType::PPR) {
2355 HasPPRStackObjects =
true;
2360 if (HasFPRCSRs || HasFPRStackObjects) {
2363 << StackHazardSize <<
"\n");
2370 if (!HasPPRCSRs && !HasPPRStackObjects) {
2372 dbgs() <<
"Not using SplitSVEObjects as no PPRs are on the stack\n");
2376 if (!HasFPRCSRs && !HasFPRStackObjects) {
2379 <<
"Not using SplitSVEObjects as no FPRs or ZPRs are on the stack\n");
2387 MF.getFunction().getCallingConv())) {
2389 dbgs() <<
"Calling convention is not supported with SplitSVEObjects");
2393 [[maybe_unused]]
const AArch64Subtarget &Subtarget =
2394 MF.getSubtarget<AArch64Subtarget>();
2396 "Expected SVE to be available for PPRs");
2398 const TargetRegisterInfo *
TRI = MF.getSubtarget().getRegisterInfo();
2402 BitVector FPRZRegs(SavedRegs.
size());
2403 for (
size_t Reg = 0,
E = SavedRegs.
size(); HasFPRCSRs &&
Reg <
E; ++
Reg) {
2404 BitVector::reference RegBit = SavedRegs[
Reg];
2407 unsigned SubRegIdx = 0;
2409 SubRegIdx = AArch64::dsub;
2411 SubRegIdx = AArch64::zsub;
2418 TRI->getMatchingSuperReg(
Reg, SubRegIdx, &AArch64::ZPRRegClass);
2421 SavedRegs |= FPRZRegs;
2441 unsigned UnspilledCSGPR = AArch64::NoRegister;
2442 unsigned UnspilledCSGPRPaired = AArch64::NoRegister;
2448 RegInfo->hasBasePointer(MF) ? RegInfo->getBaseRegister() :
MCRegister();
2450 unsigned ExtraCSSpill = 0;
2451 bool HasUnpairedGPR64 =
false;
2452 bool HasPairZReg =
false;
2453 BitVector UserReservedRegs = RegInfo->getUserReservedRegs(MF);
2454 BitVector ReservedRegs = RegInfo->getReservedRegs(MF);
2457 for (
unsigned i = 0; CSRegs[i]; ++i) {
2461 if (Reg == BasePointerReg)
2466 if (UserReservedRegs[Reg]) {
2467 SavedRegs.
reset(Reg);
2471 bool RegUsed = SavedRegs.
test(Reg);
2473 const bool RegIsGPR64 = AArch64::GPR64RegClass.contains(Reg);
2474 if (RegIsGPR64 || AArch64::FPR64RegClass.
contains(Reg) ||
2475 AArch64::FPR128RegClass.
contains(Reg)) {
2478 if (HasUnpairedGPR64)
2479 PairedReg = CSRegs[i % 2 == 0 ? i - 1 : i + 1];
2481 PairedReg = CSRegs[i ^ 1];
2488 if (RegIsGPR64 && !AArch64::GPR64RegClass.
contains(PairedReg)) {
2489 PairedReg = AArch64::NoRegister;
2490 HasUnpairedGPR64 =
true;
2492 assert(PairedReg == AArch64::NoRegister ||
2493 AArch64::GPR64RegClass.
contains(Reg, PairedReg) ||
2494 AArch64::FPR64RegClass.
contains(Reg, PairedReg) ||
2495 AArch64::FPR128RegClass.
contains(Reg, PairedReg));
2498 if (AArch64::GPR64RegClass.
contains(Reg) && !ReservedRegs[Reg]) {
2499 UnspilledCSGPR = Reg;
2500 UnspilledCSGPRPaired = PairedReg;
2508 if (producePairRegisters(MF) && PairedReg != AArch64::NoRegister &&
2509 !SavedRegs.
test(PairedReg)) {
2510 SavedRegs.
set(PairedReg);
2511 if (AArch64::GPR64RegClass.
contains(PairedReg) &&
2512 !ReservedRegs[PairedReg])
2513 ExtraCSSpill = PairedReg;
2516 HasPairZReg |= (AArch64::ZPRRegClass.contains(Reg, CSRegs[i ^ 1]) &&
2517 SavedRegs.
test(CSRegs[i ^ 1]));
2525 if (PnReg.isValid())
2531 SavedRegs.
set(AArch64::P8);
2536 "Predicate cannot be a reserved register");
2546 SavedRegs.
set(AArch64::X18);
2552 determineStackHazardSlot(MF, SavedRegs);
2555 unsigned CSStackSize = 0;
2556 unsigned ZPRCSStackSize = 0;
2557 unsigned PPRCSStackSize = 0;
2559 for (
unsigned Reg : SavedRegs.
set_bits()) {
2561 assert(RC &&
"expected register class!");
2562 auto SpillSize =
TRI->getSpillSize(*RC);
2563 bool IsZPR = AArch64::ZPRRegClass.contains(Reg);
2564 bool IsPPR = !IsZPR && AArch64::PPRRegClass.contains(Reg);
2566 ZPRCSStackSize += SpillSize;
2568 PPRCSStackSize += SpillSize;
2570 CSStackSize += SpillSize;
2576 unsigned NumSavedRegs = SavedRegs.
count();
2589 SavedRegs.
set(AArch64::LR);
2594 windowsRequiresStackProbe(MF, EstimatedStackSize + CSStackSize + 16)) {
2595 SavedRegs.
set(AArch64::FP);
2596 SavedRegs.
set(AArch64::LR);
2600 dbgs() <<
"*** determineCalleeSaves\nSaved CSRs:";
2601 for (
unsigned Reg : SavedRegs.
set_bits())
2607 auto [ZPRLocalStackSize, PPRLocalStackSize] =
2609 uint64_t SVELocals = ZPRLocalStackSize + PPRLocalStackSize;
2611 alignTo(ZPRCSStackSize + PPRCSStackSize + SVELocals, 16);
2612 bool CanEliminateFrame = (SavedRegs.
count() == 0) && !SVEStackSize;
2621 int64_t CalleeStackUsed = 0;
2624 if (FixedOff > CalleeStackUsed)
2625 CalleeStackUsed = FixedOff;
2629 bool BigStack = SVEStackSize || (EstimatedStackSize + CSStackSize +
2630 CalleeStackUsed) > EstimatedStackSizeLimit;
2631 if (BigStack || !CanEliminateFrame || RegInfo->cannotEliminateFrame(MF))
2641 if (!ExtraCSSpill && UnspilledCSGPR != AArch64::NoRegister) {
2643 <<
" to get a scratch register.\n");
2644 SavedRegs.
set(UnspilledCSGPR);
2645 ExtraCSSpill = UnspilledCSGPR;
2650 if (producePairRegisters(MF)) {
2651 if (UnspilledCSGPRPaired == AArch64::NoRegister) {
2654 SavedRegs.
reset(UnspilledCSGPR);
2655 ExtraCSSpill = AArch64::NoRegister;
2658 SavedRegs.
set(UnspilledCSGPRPaired);
2667 unsigned Size =
TRI->getSpillSize(RC);
2668 Align Alignment =
TRI->getSpillAlign(RC);
2670 RS->addScavengingFrameIndex(FI);
2671 LLVM_DEBUG(
dbgs() <<
"No available CS registers, allocated fi#" << FI
2672 <<
" as the emergency spill slot.\n");
2677 CSStackSize += 8 * (SavedRegs.
count() - NumSavedRegs);
2686 << EstimatedStackSize + AlignedCSStackSize <<
" bytes.\n");
2690 "Should not invalidate callee saved info");
2701 std::vector<CalleeSavedInfo> &CSI,
unsigned &MinCSFrameIndex,
2702 unsigned &MaxCSFrameIndex)
const {
2711 std::reverse(CSI.begin(), CSI.end());
2725 if ((
unsigned)FrameIdx < MinCSFrameIndex)
2726 MinCSFrameIndex = FrameIdx;
2727 if ((
unsigned)FrameIdx > MaxCSFrameIndex)
2728 MaxCSFrameIndex = FrameIdx;
2735 find_if(CSI, [](
auto &Info) {
return Info.getReg() == AArch64::LR; });
2736 if (It != CSI.end())
2737 CSI.insert(It, VGInfo);
2739 CSI.push_back(VGInfo);
2743 int HazardSlotIndex = std::numeric_limits<int>::max();
2744 for (
auto &CS : CSI) {
2752 assert(HazardSlotIndex == std::numeric_limits<int>::max() &&
2753 "Unexpected register order for hazard slot");
2755 LLVM_DEBUG(
dbgs() <<
"Created CSR Hazard at slot " << HazardSlotIndex
2758 if ((
unsigned)HazardSlotIndex < MinCSFrameIndex)
2759 MinCSFrameIndex = HazardSlotIndex;
2760 if ((
unsigned)HazardSlotIndex > MaxCSFrameIndex)
2761 MaxCSFrameIndex = HazardSlotIndex;
2764 unsigned Size = RegInfo->getSpillSize(*RC);
2765 Align Alignment(RegInfo->getSpillAlign(*RC));
2767 CS.setFrameIdx(FrameIdx);
2769 if ((
unsigned)FrameIdx < MinCSFrameIndex)
2770 MinCSFrameIndex = FrameIdx;
2771 if ((
unsigned)FrameIdx > MaxCSFrameIndex)
2772 MaxCSFrameIndex = FrameIdx;
2776 Reg == AArch64::FP) {
2779 if ((
unsigned)FrameIdx < MinCSFrameIndex)
2780 MinCSFrameIndex = FrameIdx;
2781 if ((
unsigned)FrameIdx > MaxCSFrameIndex)
2782 MaxCSFrameIndex = FrameIdx;
2789 HazardSlotIndex == std::numeric_limits<int>::max()) {
2791 LLVM_DEBUG(
dbgs() <<
"Created CSR Hazard at slot " << HazardSlotIndex
2794 if ((
unsigned)HazardSlotIndex < MinCSFrameIndex)
2795 MinCSFrameIndex = HazardSlotIndex;
2796 if ((
unsigned)HazardSlotIndex > MaxCSFrameIndex)
2797 MaxCSFrameIndex = HazardSlotIndex;
2821 int &Min,
int &Max) {
2822 Min = std::numeric_limits<int>::max();
2823 Max = std::numeric_limits<int>::min();
2829 for (
auto &CS : CSI) {
2830 if (AArch64::ZPRRegClass.
contains(CS.getReg()) ||
2831 AArch64::PPRRegClass.contains(CS.getReg())) {
2832 assert((Max == std::numeric_limits<int>::min() ||
2833 Max + 1 == CS.getFrameIdx()) &&
2834 "SVE CalleeSaves are not consecutive");
2835 Min = std::min(Min, CS.getFrameIdx());
2836 Max = std::max(Max, CS.getFrameIdx());
2839 return Min != std::numeric_limits<int>::max();
2852 uint64_t &ZPRStackTop = SVEStack.ZPRStackSize;
2860 "SVE vectors should never be passed on the stack by value, only by "
2864 auto AllocateObject = [&](
int FI) {
2873 if (Alignment >
Align(16))
2875 "Alignment of scalable vectors > 16 bytes is not yet supported");
2878 StackTop =
alignTo(StackTop, Alignment);
2880 assert(StackTop < (
uint64_t)std::numeric_limits<int64_t>::max() &&
2881 "SVE StackTop far too large?!");
2883 int64_t
Offset = -int64_t(StackTop);
2891 int MinCSFrameIndex, MaxCSFrameIndex;
2893 for (
int FI = MinCSFrameIndex; FI <= MaxCSFrameIndex; ++FI)
2906 int StackProtectorFI = -1;
2910 ObjectsToAllocate.
push_back(StackProtectorFI);
2916 if (MaxCSFrameIndex >= FI && FI >= MinCSFrameIndex)
2927 for (
unsigned FI : ObjectsToAllocate)
2942 "Upwards growing stack unsupported");
2957 int64_t CurrentOffset =
2961 int FrameIndex =
H.CatchObj.FrameIndex;
2962 if ((FrameIndex != INT_MAX) && MFI.
getObjectOffset(FrameIndex) == 0) {
2973 int64_t UnwindHelpOffset =
alignTo(CurrentOffset + 8,
Align(16));
2974 assert(UnwindHelpOffset == getFixedObjectSize(MF, AFI,
true,
2976 "UnwindHelpOffset must be at the start of the fixed object area");
2979 EHInfo.UnwindHelpFrameIdx = UnwindHelpFI;
2989 RS->enterBasicBlockEnd(
MBB);
2991 Register DstReg = RS->FindUnusedReg(&AArch64::GPR64commonRegClass);
2992 assert(DstReg &&
"There must be a free register after frame setup");
3002struct TagStoreInstr {
3010 MachineFunction *MF;
3011 MachineBasicBlock *
MBB;
3012 MachineRegisterInfo *
MRI;
3021 StackOffset FrameRegOffset;
3025 std::optional<int64_t> FrameRegUpdate;
3027 unsigned FrameRegUpdateFlags;
3037 TagStoreEdit(MachineBasicBlock *
MBB,
bool ZeroData)
3038 :
MBB(
MBB), ZeroData(ZeroData) {
3044 void addInstruction(TagStoreInstr
I) {
3046 TagStores.
back().Offset + TagStores.
back().Size ==
I.Offset) &&
3047 "Non-adjacent tag store instructions.");
3050 void clear() { TagStores.
clear(); }
3055 const AArch64FrameLowering *TFI,
bool TryMergeSPUpdate);
3062 const int64_t kMinOffset = -256 * 16;
3063 const int64_t kMaxOffset = 255 * 16;
3066 int64_t BaseRegOffsetBytes = FrameRegOffset.
getFixed();
3067 if (BaseRegOffsetBytes < kMinOffset ||
3068 BaseRegOffsetBytes + (
Size -
Size % 32) > kMaxOffset ||
3072 BaseRegOffsetBytes % 16 != 0) {
3073 Register ScratchReg =
MRI->createVirtualRegister(&AArch64::GPR64RegClass);
3077 BaseRegOffsetBytes = 0;
3082 int64_t InstrSize = (
Size > 16) ? 32 : 16;
3085 ? (ZeroData ? AArch64::STZGi : AArch64::STGi)
3087 assert(BaseRegOffsetBytes % 16 == 0);
3091 .
addImm(BaseRegOffsetBytes / 16)
3095 if (BaseRegOffsetBytes == 0)
3097 BaseRegOffsetBytes += InstrSize;
3111 :
MRI->createVirtualRegister(&AArch64::GPR64RegClass);
3112 Register SizeReg =
MRI->createVirtualRegister(&AArch64::GPR64RegClass);
3116 int64_t LoopSize =
Size;
3119 if (FrameRegUpdate && *FrameRegUpdate)
3120 LoopSize -= LoopSize % 32;
3122 TII->get(ZeroData ? AArch64::STZGloop_wback
3123 : AArch64::STGloop_wback))
3130 LoopI->
setFlags(FrameRegUpdateFlags);
3132 int64_t ExtraBaseRegUpdate =
3133 FrameRegUpdate ? (*FrameRegUpdate - FrameRegOffset.
getFixed() -
Size) : 0;
3134 LLVM_DEBUG(
dbgs() <<
"TagStoreEdit::emitLoop: LoopSize=" << LoopSize
3135 <<
", Size=" <<
Size
3136 <<
", ExtraBaseRegUpdate=" << ExtraBaseRegUpdate
3137 <<
", FrameRegUpdate=" << FrameRegUpdate
3138 <<
", FrameRegOffset.getFixed()="
3139 << FrameRegOffset.
getFixed() <<
"\n");
3140 if (LoopSize <
Size) {
3144 int64_t STGOffset = ExtraBaseRegUpdate + 16;
3145 assert(STGOffset % 16 == 0 && STGOffset >= -4096 && STGOffset <= 4080 &&
3146 "STG immediate out of range");
3148 TII->get(ZeroData ? AArch64::STZGPostIndex : AArch64::STGPostIndex))
3155 }
else if (ExtraBaseRegUpdate) {
3157 int64_t AddSubOffset = std::abs(ExtraBaseRegUpdate);
3158 assert(AddSubOffset <= 4095 &&
"ADD/SUB immediate out of range");
3161 TII->get(ExtraBaseRegUpdate > 0 ? AArch64::ADDXri : AArch64::SUBXri))
3174 int64_t
Size, int64_t *TotalOffset) {
3176 if ((
MI.getOpcode() == AArch64::ADDXri ||
3177 MI.getOpcode() == AArch64::SUBXri) &&
3178 MI.getOperand(0).getReg() ==
Reg &&
MI.getOperand(1).getReg() ==
Reg) {
3180 int64_t
Offset =
MI.getOperand(2).getImm() << Shift;
3181 if (
MI.getOpcode() == AArch64::SUBXri)
3192 const int64_t kMaxOffset = 4080 - 16;
3194 const int64_t kMinOffset = -4095;
3195 if (PostOffset <= kMaxOffset && PostOffset >= kMinOffset &&
3196 PostOffset % 16 == 0) {
3207 for (
auto &TS : TSE) {
3211 if (
MI->memoperands_empty()) {
3215 MemRefs.
append(
MI->memoperands_begin(),
MI->memoperands_end());
3221 bool TryMergeSPUpdate) {
3222 if (TagStores.
empty())
3224 TagStoreInstr &FirstTagStore = TagStores[0];
3225 TagStoreInstr &LastTagStore = TagStores[TagStores.
size() - 1];
3226 Size = LastTagStore.Offset - FirstTagStore.Offset + LastTagStore.Size;
3227 DL = TagStores[0].MI->getDebugLoc();
3231 *MF, FirstTagStore.Offset,
false ,
3235 FrameRegUpdate = std::nullopt;
3237 mergeMemRefs(TagStores, CombinedMemRefs);
3240 dbgs() <<
"Replacing adjacent STG instructions:\n";
3241 for (
const auto &Instr : TagStores) {
3250 if (TagStores.
size() < 2)
3252 emitUnrolled(InsertI);
3255 int64_t TotalOffset = 0;
3256 if (TryMergeSPUpdate) {
3262 if (InsertI !=
MBB->
end() &&
3263 canMergeRegUpdate(InsertI, FrameReg, FrameRegOffset.
getFixed() +
Size,
3265 UpdateInstr = &*InsertI++;
3271 if (!UpdateInstr && TagStores.
size() < 2)
3275 FrameRegUpdate = TotalOffset;
3276 FrameRegUpdateFlags = UpdateInstr->
getFlags();
3283 for (
auto &TS : TagStores)
3284 TS.MI->eraseFromParent();
3288 int64_t &
Size,
bool &ZeroData) {
3292 unsigned Opcode =
MI.getOpcode();
3293 ZeroData = (Opcode == AArch64::STZGloop || Opcode == AArch64::STZGi ||
3294 Opcode == AArch64::STZ2Gi);
3296 if (Opcode == AArch64::STGloop || Opcode == AArch64::STZGloop) {
3297 if (!
MI.getOperand(0).isDead() || !
MI.getOperand(1).isDead())
3299 if (!
MI.getOperand(2).isImm() || !
MI.getOperand(3).isFI())
3302 Size =
MI.getOperand(2).getImm();
3306 if (Opcode == AArch64::STGi || Opcode == AArch64::STZGi)
3308 else if (Opcode == AArch64::ST2Gi || Opcode == AArch64::STZ2Gi)
3313 if (
MI.getOperand(0).getReg() != AArch64::SP || !
MI.getOperand(1).isFI())
3317 16 *
MI.getOperand(2).getImm();
3337 if (!isMergeableStackTaggingInstruction(
MI,
Offset,
Size, FirstZeroData))
3343 constexpr int kScanLimit = 10;
3346 NextI !=
E &&
Count < kScanLimit; ++NextI) {
3355 if (isMergeableStackTaggingInstruction(
MI,
Offset,
Size, ZeroData)) {
3356 if (ZeroData != FirstZeroData)
3364 if (!
MI.isTransient())
3373 if (
MI.mayLoadOrStore() ||
MI.hasUnmodeledSideEffects() ||
MI.isCall())
3389 LiveRegs.addLiveOuts(*
MBB);
3394 LiveRegs.stepBackward(*
I);
3397 if (LiveRegs.contains(AArch64::NZCV))
3401 [](
const TagStoreInstr &
Left,
const TagStoreInstr &
Right) {
3406 int64_t CurOffset = Instrs[0].Offset;
3407 for (
auto &Instr : Instrs) {
3408 if (CurOffset >
Instr.Offset)
3415 TagStoreEdit TSE(
MBB, FirstZeroData);
3416 std::optional<int64_t> EndOffset;
3417 for (
auto &Instr : Instrs) {
3418 if (EndOffset && *EndOffset !=
Instr.Offset) {
3420 TSE.emitCode(InsertI, TFI,
false);
3424 TSE.addInstruction(Instr);
3443 II = tryMergeAdjacentSTG(
II,
this, RS);
3450 shouldSignReturnAddressEverywhere(MF))
3459 bool IgnoreSPUpdates)
const {
3461 if (IgnoreSPUpdates) {
3464 FrameReg = AArch64::SP;
3474 FrameReg = AArch64::SP;
3499 bool IsValid =
false;
3501 int ObjectIndex = 0;
3503 int GroupIndex = -1;
3505 bool ObjectFirst =
false;
3508 bool GroupFirst =
false;
3513 enum { AccessFPR = 1, AccessHazard = 2, AccessGPR = 4 };
3517 SmallVector<int, 8> CurrentMembers;
3518 int NextGroupIndex = 0;
3519 std::vector<FrameObject> &Objects;
3522 GroupBuilder(std::vector<FrameObject> &Objects) : Objects(Objects) {}
3523 void AddMember(
int Index) { CurrentMembers.
push_back(Index); }
3524 void EndCurrentGroup() {
3525 if (CurrentMembers.
size() > 1) {
3530 for (
int Index : CurrentMembers) {
3531 Objects[
Index].GroupIndex = NextGroupIndex;
3537 CurrentMembers.clear();
3541bool FrameObjectCompare(
const FrameObject &
A,
const FrameObject &
B) {
3563 return std::make_tuple(!
A.IsValid,
A.Accesses,
A.ObjectFirst,
A.GroupFirst,
3564 A.GroupIndex,
A.ObjectIndex) <
3565 std::make_tuple(!
B.IsValid,
B.Accesses,
B.ObjectFirst,
B.GroupFirst,
3566 B.GroupIndex,
B.ObjectIndex);
3575 ObjectsToAllocate.
empty())
3580 for (
auto &Obj : ObjectsToAllocate) {
3581 FrameObjects[Obj].IsValid =
true;
3582 FrameObjects[Obj].ObjectIndex = Obj;
3587 GroupBuilder GB(FrameObjects);
3588 for (
auto &
MBB : MF) {
3589 for (
auto &
MI :
MBB) {
3590 if (
MI.isDebugInstr())
3595 if (FI && *FI >= 0 && *FI < (
int)FrameObjects.size()) {
3598 FrameObjects[*FI].Accesses |= FrameObject::AccessFPR;
3600 FrameObjects[*FI].Accesses |= FrameObject::AccessGPR;
3605 switch (
MI.getOpcode()) {
3606 case AArch64::STGloop:
3607 case AArch64::STZGloop:
3611 case AArch64::STZGi:
3612 case AArch64::ST2Gi:
3613 case AArch64::STZ2Gi:
3626 FrameObjects[FI].IsValid)
3634 GB.AddMember(TaggedFI);
3636 GB.EndCurrentGroup();
3639 GB.EndCurrentGroup();
3644 FrameObject::AccessHazard;
3646 for (
auto &Obj : FrameObjects)
3647 if (!Obj.Accesses ||
3648 Obj.Accesses == (FrameObject::AccessGPR | FrameObject::AccessFPR))
3649 Obj.Accesses = FrameObject::AccessGPR;
3658 FrameObjects[*TBPI].ObjectFirst =
true;
3659 FrameObjects[*TBPI].GroupFirst =
true;
3660 int FirstGroupIndex = FrameObjects[*TBPI].GroupIndex;
3661 if (FirstGroupIndex >= 0)
3662 for (FrameObject &Object : FrameObjects)
3663 if (Object.GroupIndex == FirstGroupIndex)
3664 Object.GroupFirst =
true;
3670 for (
auto &Obj : FrameObjects) {
3674 ObjectsToAllocate[i++] = Obj.ObjectIndex;
3678 dbgs() <<
"Final frame order:\n";
3679 for (
auto &Obj : FrameObjects) {
3682 dbgs() <<
" " << Obj.ObjectIndex <<
": group " << Obj.GroupIndex;
3683 if (Obj.ObjectFirst)
3684 dbgs() <<
", first";
3686 dbgs() <<
", group-first";
3697AArch64FrameLowering::inlineStackProbeLoopExactMultiple(
3708 MF.
insert(MBBInsertPoint, LoopMBB);
3710 MF.
insert(MBBInsertPoint, ExitMBB);
3741 MBB.addSuccessor(LoopMBB);
3745 return ExitMBB->
begin();
3748void AArch64FrameLowering::inlineStackProbeFixed(
3753 const AArch64InstrInfo *
TII =
3755 AArch64FunctionInfo *AFI = MF.
getInfo<AArch64FunctionInfo>();
3760 int64_t ProbeSize = MF.
getInfo<AArch64FunctionInfo>()->getStackProbeSize();
3761 int64_t NumBlocks = FrameSize / ProbeSize;
3762 int64_t ResidualSize = FrameSize % ProbeSize;
3764 LLVM_DEBUG(
dbgs() <<
"Stack probing: total " << FrameSize <<
" bytes, "
3765 << NumBlocks <<
" blocks of " << ProbeSize
3766 <<
" bytes, plus " << ResidualSize <<
" bytes\n");
3771 for (
int i = 0; i < NumBlocks; ++i) {
3777 EmitAsyncCFI && !HasFP, CFAOffset);
3786 }
else if (NumBlocks != 0) {
3792 EmitAsyncCFI && !HasFP, CFAOffset);
3794 MBBI = inlineStackProbeLoopExactMultiple(
MBBI, ProbeSize, ScratchReg);
3796 if (EmitAsyncCFI && !HasFP) {
3799 .buildDefCFARegister(AArch64::SP);
3803 if (ResidualSize != 0) {
3809 EmitAsyncCFI && !HasFP, CFAOffset);
3826 SmallVector<MachineInstr *, 4> ToReplace;
3827 for (MachineInstr &
MI :
MBB)
3828 if (
MI.getOpcode() == AArch64::PROBED_STACKALLOC ||
3829 MI.getOpcode() == AArch64::PROBED_STACKALLOC_VAR)
3832 for (MachineInstr *
MI : ToReplace) {
3833 if (
MI->getOpcode() == AArch64::PROBED_STACKALLOC) {
3834 Register ScratchReg =
MI->getOperand(0).getReg();
3835 int64_t FrameSize =
MI->getOperand(1).getImm();
3837 MI->getOperand(3).getImm());
3838 inlineStackProbeFixed(
MI->getIterator(), ScratchReg, FrameSize,
3841 assert(
MI->getOpcode() == AArch64::PROBED_STACKALLOC_VAR &&
3842 "Stack probe pseudo-instruction expected");
3843 const AArch64InstrInfo *
TII =
3844 MI->getMF()->getSubtarget<AArch64Subtarget>().getInstrInfo();
3845 Register TargetReg =
MI->getOperand(0).getReg();
3846 (void)
TII->probedStackAlloc(
MI->getIterator(), TargetReg,
true);
3848 MI->eraseFromParent();
3868 return std::make_tuple(
start(),
Idx) <
3869 std::make_tuple(Rhs.
start(), Rhs.
Idx);
3899 << (
Offset.getFixed() < 0 ?
"" :
"+") <<
Offset.getFixed();
3900 if (
Offset.getScalable())
3901 OS << (
Offset.getScalable() < 0 ?
"" :
"+") <<
Offset.getScalable()
3912void AArch64FrameLowering::emitRemarks(
3915 auto *AFI = MF.
getInfo<AArch64FunctionInfo>();
3920 const uint64_t HazardSize =
3923 if (HazardSize == 0)
3931 std::vector<StackAccess> StackAccesses(MFI.
getNumObjects());
3933 size_t NumFPLdSt = 0;
3934 size_t NumNonFPLdSt = 0;
3937 for (
const MachineBasicBlock &
MBB : MF) {
3938 for (
const MachineInstr &
MI :
MBB) {
3939 if (!
MI.mayLoadOrStore() ||
MI.getNumMemOperands() < 1)
3941 for (MachineMemOperand *MMO :
MI.memoperands()) {
3948 StackAccesses[ArrIdx].Idx = FrameIdx;
3949 StackAccesses[ArrIdx].Offset =
3960 StackAccesses[ArrIdx].AccessTypes |= RegTy;
3971 if (NumFPLdSt == 0 || NumNonFPLdSt == 0)
3982 if (StackAccesses.front().isMixed())
3983 MixedObjects.push_back(&StackAccesses.front());
3985 for (
auto It = StackAccesses.begin(), End = std::prev(StackAccesses.end());
3987 const auto &
First = *It;
3988 const auto &Second = *(It + 1);
3990 if (Second.isMixed())
3991 MixedObjects.push_back(&Second);
3993 if ((
First.isSME() && Second.isCPU()) ||
3994 (
First.isCPU() && Second.isSME())) {
3995 uint64_t Distance =
static_cast<uint64_t
>(Second.start() -
First.end());
3996 if (Distance < HazardSize)
4001 auto EmitRemark = [&](llvm::StringRef Str) {
4003 auto R = MachineOptimizationRemarkAnalysis(
4004 "sme",
"StackHazard", MF.getFunction().getSubprogram(), &MF.front());
4005 return R <<
formatv(
"stack hazard in '{0}': ", MF.getName()).str() << Str;
4009 for (
const auto &
P : HazardPairs)
4010 EmitRemark(
formatv(
"{0} is too close to {1}", *
P.first, *
P.second).str());
4012 for (
const auto *Obj : MixedObjects)
4014 formatv(
"{0} accessed by both GP and FP instructions", *Obj).str());
unsigned const MachineRegisterInfo * MRI
static void getLiveRegsForEntryMBB(LivePhysRegs &LiveRegs, const MachineBasicBlock &MBB)
static const unsigned DefaultSafeSPDisplacement
This is the biggest offset to the stack pointer we can encode in aarch64 instructions (without using ...
static bool produceCompactUnwindFrame(const AArch64FrameLowering &, MachineFunction &MF)
static cl::opt< bool > StackTaggingMergeSetTag("stack-tagging-merge-settag", cl::desc("merge settag instruction in function epilog"), cl::init(true), cl::Hidden)
bool enableMultiVectorSpillFill(const AArch64Subtarget &Subtarget, MachineFunction &MF)
static std::optional< int > getLdStFrameID(const MachineInstr &MI, const MachineFrameInfo &MFI)
static cl::opt< bool > SplitSVEObjects("aarch64-split-sve-objects", cl::desc("Split allocation of ZPR & PPR objects"), cl::init(true), cl::Hidden)
static cl::opt< bool > StackHazardInNonStreaming("aarch64-stack-hazard-in-non-streaming", cl::init(false), cl::Hidden)
void computeCalleeSaveRegisterPairs(const AArch64FrameLowering &AFL, MachineFunction &MF, ArrayRef< CalleeSavedInfo > CSI, const TargetRegisterInfo *TRI, SmallVectorImpl< RegPairInfo > &RegPairs, bool NeedsFrameRecord)
static cl::opt< bool > OrderFrameObjects("aarch64-order-frame-objects", cl::desc("sort stack allocations"), cl::init(true), cl::Hidden)
static bool invalidateWindowsRegisterPairing(unsigned Reg1, unsigned Reg2, bool NeedsWinCFI, bool IsFirst, const TargetRegisterInfo *TRI)
static cl::opt< bool > DisableMultiVectorSpillFill("aarch64-disable-multivector-spill-fill", cl::desc("Disable use of LD/ST pairs for SME2 or SVE2p1"), cl::init(false), cl::Hidden)
static cl::opt< bool > EnableRedZone("aarch64-redzone", cl::desc("enable use of redzone on AArch64"), cl::init(false), cl::Hidden)
cl::opt< bool > EnableHomogeneousPrologEpilog("homogeneous-prolog-epilog", cl::Hidden, cl::desc("Emit homogeneous prologue and epilogue for the size " "optimization (default = off)"))
static bool isLikelyToHaveSVEStack(const AArch64FrameLowering &AFL, const MachineFunction &MF)
static bool invalidateRegisterPairing(unsigned Reg1, unsigned Reg2, bool UsesWinAAPCS, bool NeedsWinCFI, bool NeedsFrameRecord, bool IsFirst, const TargetRegisterInfo *TRI)
Returns true if Reg1 and Reg2 cannot be paired using a ldp/stp instruction.
static unsigned getPrologueDeath(MachineFunction &MF, unsigned Reg)
static SVEStackSizes determineSVEStackSizes(MachineFunction &MF, AssignObjectOffsets AssignOffsets)
Process all the SVE stack objects and the SVE stack size and offsets for each object.
static bool isTargetWindows(const MachineFunction &MF)
static unsigned estimateRSStackSizeLimit(MachineFunction &MF)
Look at each instruction that references stack frames and return the stack size limit beyond which so...
static bool getSVECalleeSaveSlotRange(const MachineFrameInfo &MFI, int &Min, int &Max)
returns true if there are any SVE callee saves.
static cl::opt< unsigned > StackHazardRemarkSize("aarch64-stack-hazard-remark-size", cl::init(0), cl::Hidden)
static MCRegister getRegisterOrZero(MCRegister Reg, bool HasSVE)
static unsigned getStackHazardSize(const MachineFunction &MF)
MCRegister findFreePredicateReg(BitVector &SavedRegs)
static bool isPPRAccess(const MachineInstr &MI)
static std::optional< int > getMMOFrameID(MachineMemOperand *MMO, const MachineFrameInfo &MFI)
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file contains the declaration of the AArch64PrologueEmitter and AArch64EpilogueEmitter classes,...
static const int kSetTagLoopThreshold
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
This file contains the simple types necessary to represent the attributes associated with functions a...
#define CASE(ATTRNAME, AANAME,...)
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
DXIL Forward Handle Accesses
const HexagonInstrInfo * TII
static std::string getTypeString(Type *T)
This file implements the LivePhysRegs utility for tracking liveness of physical registers.
Register const TargetRegisterInfo * TRI
Promote Memory to Register
uint64_t IntrinsicInst * II
This file declares the machine register scavenger class.
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...
This file defines the SmallVector class.
void emitEpilogue()
Emit the epilogue.
StackOffset getSVEStackSize(const MachineFunction &MF) const
Returns the size of the entire SVE stackframe (PPRs + ZPRs).
StackOffset getZPRStackSize(const MachineFunction &MF) const
Returns the size of the entire ZPR stackframe (calleesaves + spills).
void processFunctionBeforeFrameIndicesReplaced(MachineFunction &MF, RegScavenger *RS) const override
processFunctionBeforeFrameIndicesReplaced - This method is called immediately before MO_FrameIndex op...
MachineBasicBlock::iterator eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator I) const override
This method is called during prolog/epilog code insertion to eliminate call frame setup and destroy p...
bool canUseAsPrologue(const MachineBasicBlock &MBB) const override
Check whether or not the given MBB can be used as a prologue for the target.
bool enableStackSlotScavenging(const MachineFunction &MF) const override
Returns true if the stack slot holes in the fixed and callee-save stack area should be used when allo...
bool spillCalleeSavedRegisters(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, ArrayRef< CalleeSavedInfo > CSI, const TargetRegisterInfo *TRI) const override
spillCalleeSavedRegisters - Issues instruction(s) to spill all callee saved registers and returns tru...
bool restoreCalleeSavedRegisters(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, MutableArrayRef< CalleeSavedInfo > CSI, const TargetRegisterInfo *TRI) const override
restoreCalleeSavedRegisters - Issues instruction(s) to restore all callee saved registers and returns...
bool enableFullCFIFixup(const MachineFunction &MF) const override
enableFullCFIFixup - Returns true if we may need to fix the unwind information such that it is accura...
StackOffset getFrameIndexReferenceFromSP(const MachineFunction &MF, int FI) const override
getFrameIndexReferenceFromSP - This method returns the offset from the stack pointer to the slot of t...
bool enableCFIFixup(const MachineFunction &MF) const override
Returns true if we may need to fix the unwind information for the function.
StackOffset getNonLocalFrameIndexReference(const MachineFunction &MF, int FI) const override
getNonLocalFrameIndexReference - This method returns the offset used to reference a frame index locat...
TargetStackID::Value getStackIDForScalableVectors() const override
Returns the StackID that scalable vectors should be associated with.
friend class AArch64PrologueEmitter
bool hasFPImpl(const MachineFunction &MF) const override
hasFPImpl - Return true if the specified function should have a dedicated frame pointer register.
void emitPrologue(MachineFunction &MF, MachineBasicBlock &MBB) const override
emitProlog/emitEpilog - These methods insert prolog and epilog code into the function.
friend class AArch64EpilogueEmitter
void resetCFIToInitialState(MachineBasicBlock &MBB) const override
Emit CFI instructions that recreate the state of the unwind information upon function entry.
bool hasReservedCallFrame(const MachineFunction &MF) const override
hasReservedCallFrame - Under normal circumstances, when a frame pointer is not required,...
StackOffset resolveFrameOffsetReference(const MachineFunction &MF, int64_t ObjectOffset, bool isFixed, TargetStackID::Value StackID, Register &FrameReg, bool PreferFP, bool ForSimm) const
bool canUseRedZone(const MachineFunction &MF) const
Can this function use the red zone for local allocations.
bool needsWinCFI(const MachineFunction &MF) const
bool isFPReserved(const MachineFunction &MF) const
Should the Frame Pointer be reserved for the current function?
void processFunctionBeforeFrameFinalized(MachineFunction &MF, RegScavenger *RS) const override
processFunctionBeforeFrameFinalized - This method is called immediately before the specified function...
int getSEHFrameIndexOffset(const MachineFunction &MF, int FI) const
unsigned getWinEHFuncletFrameSize(const MachineFunction &MF) const
Funclets only need to account for space for the callee saved registers, as the locals are accounted f...
void orderFrameObjects(const MachineFunction &MF, SmallVectorImpl< int > &ObjectsToAllocate) const override
Order the symbols in the local stack frame.
void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const override
StackOffset getPPRStackSize(const MachineFunction &MF) const
Returns the size of the entire PPR stackframe (calleesaves + spills + hazard padding).
void determineCalleeSaves(MachineFunction &MF, BitVector &SavedRegs, RegScavenger *RS) const override
This method determines which of the registers reported by TargetRegisterInfo::getCalleeSavedRegs() sh...
StackOffset getFrameIndexReference(const MachineFunction &MF, int FI, Register &FrameReg) const override
getFrameIndexReference - Provide a base+offset reference to an FI slot for debug info.
bool assignCalleeSavedSpillSlots(MachineFunction &MF, const TargetRegisterInfo *TRI, std::vector< CalleeSavedInfo > &CSI, unsigned &MinCSFrameIndex, unsigned &MaxCSFrameIndex) const override
assignCalleeSavedSpillSlots - Allows target to override spill slot assignment logic.
StackOffset getFrameIndexReferencePreferSP(const MachineFunction &MF, int FI, Register &FrameReg, bool IgnoreSPUpdates) const override
For Win64 AArch64 EH, the offset to the Unwind object is from the SP before the update.
StackOffset resolveFrameIndexReference(const MachineFunction &MF, int FI, Register &FrameReg, bool PreferFP, bool ForSimm) const
unsigned getWinEHParentFrameOffset(const MachineFunction &MF) const override
The parent frame offset (aka dispFrame) is only used on X86_64 to retrieve the parent's frame pointer...
bool requiresSaveVG(const MachineFunction &MF) const
void emitPacRetPlusLeafHardening(MachineFunction &MF) const
Harden the entire function with pac-ret.
AArch64FunctionInfo - This class is derived from MachineFunctionInfo and contains private AArch64-spe...
unsigned getPPRCalleeSavedStackSize() const
void setHasStackFrame(bool s)
void setSwiftAsyncContextFrameIdx(int FI)
unsigned getTailCallReservedStack() const
unsigned getCalleeSavedStackSize(const MachineFrameInfo &MFI) const
void setCalleeSaveBaseToFrameRecordOffset(int Offset)
bool hasStackProbing() const
unsigned getArgumentStackToRestore() const
void setCalleeSaveStackHasFreeSpace(bool s)
int getCalleeSaveBaseToFrameRecordOffset() const
bool hasStreamingModeChanges() const
bool shouldSignReturnAddress(const MachineFunction &MF) const
void setPredicateRegForFillSpill(unsigned Reg)
int getStackHazardSlotIndex() const
void setCalleeSavedStackSize(unsigned Size)
void setSplitSVEObjects(bool s)
bool hasStackFrame() const
void setStackSizeSVE(uint64_t ZPR, uint64_t PPR)
std::optional< int > getTaggedBasePointerIndex() const
SMEAttrs getSMEFnAttrs() const
uint64_t getLocalStackSize() const
bool needsDwarfUnwindInfo(const MachineFunction &MF) const
unsigned getVarArgsGPRSize() const
uint64_t getStackSizePPR() const
bool hasSwiftAsyncContext() const
bool hasStackHazardSlotIndex() const
void setStackHazardSlotIndex(int Index)
unsigned getZPRCalleeSavedStackSize() const
void setStackHazardCSRSlotIndex(int Index)
unsigned getPredicateRegForFillSpill() const
void setSVECalleeSavedStackSize(unsigned ZPR, unsigned PPR)
bool hasCalculatedStackSizeSVE() const
uint64_t getStackSizeZPR() const
bool hasSVEStackSize() const
bool isStackHazardIncludedInCalleeSaveArea() const
unsigned getSVECalleeSavedStackSize() const
bool hasSplitSVEObjects() const
bool needsAsyncDwarfUnwindInfo(const MachineFunction &MF) const
bool hasCalleeSaveStackFreeSpace() const
static bool isTailCallReturnInst(const MachineInstr &MI)
Returns true if MI is one of the TCRETURN* instructions.
static bool isFpOrNEON(Register Reg)
Returns whether the physical register is FP or NEON.
void emitPrologue()
Emit the prologue.
bool isTargetWindows() const
const AArch64RegisterInfo * getRegisterInfo() const override
bool isNeonAvailable() const
Returns true if the target has NEON and the function at runtime is known to have NEON enabled (e....
const AArch64InstrInfo * getInstrInfo() const override
const AArch64TargetLowering * getTargetLowering() const override
bool isTargetMachO() const
bool isSVEorStreamingSVEAvailable() const
Returns true if the target has access to either the full range of SVE instructions,...
bool isStreaming() const
Returns true if the function has a streaming body.
bool hasInlineStackProbe(const MachineFunction &MF) const override
True if stack clash protection is enabled for this functions.
unsigned getRedZoneSize(const Function &F) const
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
bool empty() const
empty - Check if the array is empty.
bool test(unsigned Idx) const
size_type count() const
count - Returns the number of bits which are set.
iterator_range< const_set_bits_iterator > set_bits() const
size_type size() const
size - Returns the number of bits in this bitvector.
Helper class for creating CFI instructions and inserting them into MIR.
The CalleeSavedInfo class tracks the information need to locate where a callee saved register is in t...
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
AttributeList getAttributes() const
Return the attribute list for this Function.
bool isVarArg() const
isVarArg - Return true if this function takes a variable number of arguments.
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
A set of physical registers with utility functions to track liveness when walking backward/forward th...
bool usesWindowsCFI() const
Wrapper class representing physical registers. Should be passed by value.
LLVM_ABI void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
LLVM_ABI iterator getFirstTerminator()
Returns an iterator to the first terminator instruction of this basic block.
MachineInstr & instr_back()
LLVM_ABI void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
reverse_iterator rbegin()
iterator insertAfter(iterator I, MachineInstr *MI)
Insert MI into the instruction list after I.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
MachineInstrBundleIterator< MachineInstr > iterator
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
LLVM_ABI int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
bool hasVarSizedObjects() const
This method may be called any time after instruction selection is complete to determine if the stack ...
const AllocaInst * getObjectAllocation(int ObjectIdx) const
Return the underlying Alloca of the specified stack object if it exists.
LLVM_ABI int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it.
bool hasCalls() const
Return true if the current function has any function calls.
bool isFrameAddressTaken() const
This method may be called any time after instruction selection is complete to determine if there is a...
void setObjectOffset(int ObjectIdx, int64_t SPOffset)
Set the stack frame offset of the specified object.
uint64_t getMaxCallFrameSize() const
Return the maximum size of a call frame that must be allocated for an outgoing function call.
bool hasPatchPoint() const
This method may be called any time after instruction selection is complete to determine if there is a...
bool hasScalableStackID(int ObjectIdx) const
int getStackProtectorIndex() const
Return the index for the stack protector object.
LLVM_ABI int CreateSpillStackObject(uint64_t Size, Align Alignment)
Create a new statically sized stack object that represents a spill slot, returning a nonnegative iden...
LLVM_ABI uint64_t estimateStackSize(const MachineFunction &MF) const
Estimate and return the size of the stack frame.
void setStackID(int ObjectIdx, uint8_t ID)
bool isCalleeSavedInfoValid() const
Has the callee saved info been calculated yet?
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
bool isMaxCallFrameSizeComputed() const
bool hasStackMap() const
This method may be called any time after instruction selection is complete to determine if there is a...
const std::vector< CalleeSavedInfo > & getCalleeSavedInfo() const
Returns a reference to call saved info vector for the current function.
unsigned getNumObjects() const
Return the number of objects.
int getObjectIndexEnd() const
Return one past the maximum frame object index.
bool hasStackProtectorIndex() const
bool hasStackObjects() const
Return true if there are any stack objects in this function.
uint8_t getStackID(int ObjectIdx) const
unsigned getNumFixedObjects() const
Return the number of fixed objects.
int64_t getObjectOffset(int ObjectIdx) const
Return the assigned stack offset of the specified object from the incoming stack pointer.
int getObjectIndexBegin() const
Return the minimum frame object index.
void setObjectAlignment(int ObjectIdx, Align Alignment)
setObjectAlignment - Change the alignment of the specified stack object.
bool isDeadObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to a dead object.
const WinEHFuncInfo * getWinEHFuncInfo() const
getWinEHFuncInfo - Return information about how the current function uses Windows exception handling.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
BasicBlockListType::iterator iterator
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const MachineBasicBlock & front() const
bool hasEHFunclets() const
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *BB=nullptr, std::optional< UniqueBBID > BBID=std::nullopt)
CreateMachineInstr - Allocate a new MachineInstr.
void insert(iterator MBBI, MachineBasicBlock *MBB)
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
const MachineInstrBuilder & setMemRefs(ArrayRef< MachineMemOperand * > MMOs) const
const MachineInstrBuilder & addExternalSymbol(const char *FnName, unsigned TargetFlags=0) const
const MachineInstrBuilder & setMIFlag(MachineInstr::MIFlag Flag) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addRegMask(const uint32_t *Mask) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & setMIFlags(unsigned Flags) const
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
void setFlags(unsigned flags)
LLVM_ABI void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
uint32_t getFlags() const
Return the MI flags bitvector.
A description of a memory reference used in the backend.
const PseudoSourceValue * getPseudoValue() const
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
const Value * getValue() const
Return the base address of the memory access.
MachineOperand class - Representation of each machine instruction operand.
bool isFI() const
isFI - Tests if this is a MO_FrameIndex operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLVM_ABI Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
LLVM_ABI bool isLiveIn(Register Reg) const
LLVM_ABI const MCPhysReg * getCalleeSavedRegs() const
Returns list of callee saved registers.
LLVM_ABI bool isPhysRegUsed(MCRegister PhysReg, bool SkipRegMaskTest=false) const
Return true if the specified register is modified or read in this function.
MutableArrayRef - Represent a mutable reference to an array (0 or more elements consecutively in memo...
Wrapper class representing virtual and physical registers.
constexpr bool isValid() const
SMEAttrs is a utility class to parse the SME ACLE attributes on functions.
bool hasStreamingInterface() const
bool hasNonStreamingInterfaceAndBody() const
bool hasStreamingBody() const
bool insert(const value_type &X)
Insert a new element into the SetVector.
A SetVector that performs no allocations if smaller than a certain size.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StackOffset holds a fixed and a scalable offset in bytes.
int64_t getFixed() const
Returns the fixed component of the stack.
int64_t getScalable() const
Returns the scalable component of the stack.
static StackOffset get(int64_t Fixed, int64_t Scalable)
static StackOffset getScalable(int64_t Scalable)
static StackOffset getFixed(int64_t Fixed)
bool hasFP(const MachineFunction &MF) const
hasFP - Return true if the specified function should have a dedicated frame pointer register.
virtual void determineCalleeSaves(MachineFunction &MF, BitVector &SavedRegs, RegScavenger *RS=nullptr) const
This method determines which of the registers reported by TargetRegisterInfo::getCalleeSavedRegs() sh...
int getOffsetOfLocalArea() const
getOffsetOfLocalArea - This method returns the offset of the local area from the stack pointer on ent...
Align getStackAlign() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
StackDirection getStackGrowthDirection() const
getStackGrowthDirection - Return the direction the stack grows
virtual bool enableCFIFixup(const MachineFunction &MF) const
Returns true if we may need to fix the unwind information for the function.
TargetInstrInfo - Interface to description of machine instruction set.
Primary interface to the complete machine description for the target machine.
const MCAsmInfo * getMCAsmInfo() const
Return target specific asm information.
LLVM_ABI bool DisableFramePointerElim(const MachineFunction &MF) const
DisableFramePointerElim - This returns true if frame pointer elimination optimization should be disab...
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
bool hasStackRealignment(const MachineFunction &MF) const
True if stack realignment is required and still possible.
virtual const TargetInstrInfo * getInstrInfo() const
virtual const TargetRegisterInfo * getRegisterInfo() const =0
Return the target's register information.
Triple - Helper class for working with autoconf configuration names.
This class implements an extremely fast bulk output stream that can only output to a stream.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
static unsigned getShiftValue(unsigned Imm)
getShiftValue - Extract the shift value.
static unsigned getArithExtendImm(AArch64_AM::ShiftExtendType ET, unsigned Imm)
getArithExtendImm - Encode the extend type and shift amount for an arithmetic instruction: imm: 3-bit...
const unsigned StackProbeMaxLoopUnroll
Maximum number of iterations to unroll for a constant size probing loop.
const unsigned StackProbeMaxUnprobedStack
Maximum allowed number of unprobed bytes above SP at an ABI boundary.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ AArch64_SVE_VectorCall
Used between AArch64 SVE functions.
@ PreserveMost
Used for runtime calls that preserves most registers.
@ CXX_FAST_TLS
Used for access functions.
@ GHC
Used by the Glasgow Haskell Compiler (GHC).
@ PreserveAll
Used for runtime calls that preserves (almost) all registers.
@ Fast
Attempts to make calls as fast as possible (e.g.
@ PreserveNone
Used for runtime calls that preserves none general registers.
@ Win64
The C convention as implemented on Windows/x86-64 and AArch64.
@ SwiftTail
This follows the Swift calling convention in how arguments are passed but guarantees tail calls will ...
@ C
The default llvm calling convention, compatible with C.
@ Define
Register definition.
@ ScalablePredicateVector
initializer< Ty > init(const Ty &Val)
NodeAddr< InstrNode * > Instr
BaseReg
Stack frame base register. Bit 0 of FREInfo.Info.
This is an optimization pass for GlobalISel generic memory operations.
void stable_sort(R &&Range)
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
int isAArch64FrameOffsetLegal(const MachineInstr &MI, StackOffset &Offset, bool *OutUseUnscaledOp=nullptr, unsigned *OutUnscaledOp=nullptr, int64_t *EmittableOffset=nullptr)
Check if the Offset is a valid frame offset for MI.
detail::scope_exit< std::decay_t< Callable > > make_scope_exit(Callable &&F)
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
@ AArch64FrameOffsetCannotUpdate
Offset cannot apply.
auto dyn_cast_or_null(const Y &Val)
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
auto formatv(bool Validate, const char *Fmt, Ts &&...Vals)
auto reverse(ContainerTy &&C)
void sort(IteratorTy Start, IteratorTy End)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
void emitFrameOffset(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, unsigned DestReg, unsigned SrcReg, StackOffset Offset, const TargetInstrInfo *TII, MachineInstr::MIFlag=MachineInstr::NoFlags, bool SetNZCV=false, bool NeedsWinCFI=false, bool *HasWinCFI=nullptr, bool EmitCFAOffset=false, StackOffset InitialOffset={}, unsigned FrameReg=AArch64::SP)
emitFrameOffset - Emit instructions as needed to set DestReg to SrcReg plus Offset.
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
FunctionAddr VTableAddr Count
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
@ LLVM_MARK_AS_BITMASK_ENUM
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
unsigned getDefRegState(bool B)
unsigned getKillRegState(bool B)
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
raw_ostream & operator<<(raw_ostream &OS, const APFixedPoint &FX)
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
LLVM_ABI const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=MaxLookupSearchDepth)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
void fullyRecomputeLiveIns(ArrayRef< MachineBasicBlock * > MBBs)
Convenience function for recomputing live-in's for a set of MBBs until the computation converges.
LLVM_ABI Printable printReg(Register Reg, const TargetRegisterInfo *TRI=nullptr, unsigned SubIdx=0, const MachineRegisterInfo *MRI=nullptr)
Prints virtual and physical registers with or without a TRI instance.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
bool operator<(const StackAccess &Rhs) const
void print(raw_ostream &OS) const
std::string getTypeString() const
This struct is a compact representation of a valid (non-zero power of two) alignment.
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
Pair of physical register and lane mask.
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
SmallVector< WinEHTryBlockMapEntry, 4 > TryBlockMap
SmallVector< WinEHHandlerType, 1 > HandlerArray