236#define DEBUG_TYPE "frame-info"
239 cl::desc(
"enable use of redzone on AArch64"),
243 "stack-tagging-merge-settag",
253 cl::desc(
"Emit homogeneous prologue and epilogue for the size "
254 "optimization (default = off)"));
256STATISTIC(NumRedZoneFunctions,
"Number of functions using red zone");
272 int64_t ArgumentPopSize = 0;
273 if (IsTailCallReturn) {
279 ArgumentPopSize = StackAdjust.
getImm();
288 return ArgumentPopSize;
299bool AArch64FrameLowering::homogeneousPrologEpilog(
324 if (AFI->hasSwiftAsyncContext() || AFI->hasStreamingModeChanges())
331 unsigned NumGPRs = 0;
332 for (
unsigned I = 0; CSRegs[
I]; ++
I) {
334 if (Reg == AArch64::LR) {
335 assert(CSRegs[
I + 1] == AArch64::FP);
336 if (NumGPRs % 2 != 0)
340 if (AArch64::GPR64RegClass.
contains(Reg))
348bool AArch64FrameLowering::producePairRegisters(
MachineFunction &MF)
const {
367 if (
MI.isDebugInstr() ||
MI.isPseudo() ||
368 MI.getOpcode() == AArch64::ADDXri ||
369 MI.getOpcode() == AArch64::ADDSXri)
396 if (!IsWin64 || IsFunclet) {
401 Attribute::SwiftAsync))
406 const unsigned UnwindHelpObject = (MF.
hasEHFunclets() ? 8 : 0);
408 alignTo(VarArgsArea + UnwindHelpObject, 16);
425 const unsigned RedZoneSize =
438 bool LowerQRegCopyThroughMem = Subtarget.hasFPARMv8() &&
442 return !(MFI.
hasCalls() ||
hasFP(MF) || NumBytes > RedZoneSize ||
503 unsigned Opc =
I->getOpcode();
504 bool IsDestroy = Opc ==
TII->getCallFrameDestroyOpcode();
505 uint64_t CalleePopAmount = IsDestroy ?
I->getOperand(1).getImm() : 0;
508 int64_t Amount =
I->getOperand(0).getImm();
516 if (CalleePopAmount == 0) {
527 assert(Amount > -0xffffff && Amount < 0xffffff &&
"call frame too large");
538 "non-reserved call frame without var sized objects?");
547 }
else if (CalleePopAmount != 0) {
550 assert(CalleePopAmount < 0xffffff &&
"call frame too large");
557void AArch64FrameLowering::emitCalleeSavedGPRLocations(
563 bool LocallyStreaming =
564 Attrs.hasStreamingBody() && !Attrs.hasStreamingInterface();
575 for (
const auto &Info : CSI) {
576 unsigned FrameIdx =
Info.getFrameIdx();
580 assert(!
Info.isSpilledToReg() &&
"Spilling to registers not implemented");
581 int64_t DwarfReg =
TRI.getDwarfRegNum(
Info.getReg(),
true);
588 (!LocallyStreaming &&
589 DwarfReg ==
TRI.getDwarfRegNum(AArch64::VG,
true)))
600void AArch64FrameLowering::emitCalleeSavedSVELocations(
616 for (
const auto &Info : CSI) {
622 assert(!
Info.isSpilledToReg() &&
"Spilling to registers not implemented");
657 const MCInstrDesc &CFIDesc =
TII.get(TargetOpcode::CFI_INSTRUCTION);
663 nullptr,
TRI.getDwarfRegNum(AArch64::SP,
true), 0));
667 if (MFI.shouldSignReturnAddress(MF)) {
673 if (MFI.needsShadowCallStackPrologueEpilogue(MF))
675 TRI.getDwarfRegNum(AArch64::X18,
true));
678 const std::vector<CalleeSavedInfo> &CSI =
680 for (
const auto &
Info : CSI) {
681 unsigned Reg =
Info.getReg();
682 if (!
TRI.regNeedsCFI(Reg, Reg))
685 TRI.getDwarfRegNum(Reg,
true));
704 for (
const auto &
Info : CSI) {
709 unsigned Reg =
Info.getReg();
714 if (!
Info.isRestored())
718 nullptr,
TRI.getDwarfRegNum(
Info.getReg(),
true)));
725void AArch64FrameLowering::emitCalleeSavedGPRRestores(
730void AArch64FrameLowering::emitCalleeSavedSVERestores(
738 static const int64_t MAX_BYTES_PER_SCALABLE_BYTE = 16;
739 return Size.getScalable() * MAX_BYTES_PER_SCALABLE_BYTE +
Size.getFixed();
742void AArch64FrameLowering::allocateStackSpace(
744 int64_t RealignmentPadding,
StackOffset AllocSize,
bool NeedsWinCFI,
745 bool *HasWinCFI,
bool EmitCFI,
StackOffset InitialOffset,
746 bool FollowupAllocs)
const {
759 const uint64_t AndMask = ~(MaxAlign - 1);
762 Register TargetReg = RealignmentPadding
768 EmitCFI, InitialOffset);
770 if (RealignmentPadding) {
791 if (AllocSize.
getScalable() == 0 && RealignmentPadding == 0) {
793 assert(ScratchReg != AArch64::NoRegister);
803 if (FollowupAllocs) {
820 if (
upperBound(AllocSize) + RealignmentPadding <= ProbeSize) {
821 Register ScratchReg = RealignmentPadding
824 assert(ScratchReg != AArch64::NoRegister);
828 EmitCFI, InitialOffset);
829 if (RealignmentPadding) {
837 if (FollowupAllocs ||
upperBound(AllocSize) + RealignmentPadding >
853 assert(TargetReg != AArch64::NoRegister);
857 EmitCFI, InitialOffset);
858 if (RealignmentPadding) {
878 if (RealignmentPadding)
891 case AArch64::W##n: \
892 case AArch64::X##n: \
917 case AArch64::B##n: \
918 case AArch64::H##n: \
919 case AArch64::S##n: \
920 case AArch64::D##n: \
921 case AArch64::Q##n: \
922 return HasSVE ? AArch64::Z##n : AArch64::Q##n
959void AArch64FrameLowering::emitZeroCallUsedRegs(
BitVector RegsToZero,
975 bool HasSVE = STI.hasSVE();
977 if (
TRI.isGeneralPurposeRegister(MF, Reg)) {
980 GPRsToZero.set(XReg);
981 }
else if (AArch64::FPR128RegClass.
contains(Reg) ||
982 AArch64::FPR64RegClass.
contains(Reg) ||
983 AArch64::FPR32RegClass.
contains(Reg) ||
984 AArch64::FPR16RegClass.
contains(Reg) ||
985 AArch64::FPR8RegClass.
contains(Reg)) {
988 FPRsToZero.set(XReg);
1004 {AArch64::P0, AArch64::P1, AArch64::P2, AArch64::P3, AArch64::P4,
1005 AArch64::P5, AArch64::P6, AArch64::P7, AArch64::P8, AArch64::P9,
1006 AArch64::P10, AArch64::P11, AArch64::P12, AArch64::P13, AArch64::P14,
1008 if (RegsToZero[PReg])
1020 for (
unsigned i = 0; CSRegs[i]; ++i)
1021 LiveRegs.
addReg(CSRegs[i]);
1055 for (
unsigned Reg : AArch64::GPR64RegClass) {
1059 return AArch64::NoRegister;
1105 StackSizeInBytes >=
uint64_t(MFI.getStackProbeSize());
1111 F.needsUnwindTableEntry();
1114bool AArch64FrameLowering::shouldCombineCSRLocalStackBump(
1120 if (homogeneousPrologEpilog(MF))
1143 if (MFI.hasVarSizedObjects())
1146 if (
RegInfo->hasStackRealignment(MF))
1163bool AArch64FrameLowering::shouldCombineCSRLocalStackBumpInEpilogue(
1165 if (!shouldCombineCSRLocalStackBump(*
MBB.
getParent(), StackBumpBytes))
1175 while (LastI != Begin) {
1177 if (LastI->isTransient())
1182 switch (LastI->getOpcode()) {
1183 case AArch64::STGloop:
1184 case AArch64::STZGloop:
1186 case AArch64::STZGi:
1187 case AArch64::ST2Gi:
1188 case AArch64::STZ2Gi:
1201 unsigned Opc =
MBBI->getOpcode();
1205 unsigned ImmIdx =
MBBI->getNumOperands() - 1;
1206 int Imm =
MBBI->getOperand(ImmIdx).getImm();
1214 case AArch64::LDPDpost:
1217 case AArch64::STPDpre: {
1218 unsigned Reg0 =
RegInfo->getSEHRegNum(
MBBI->getOperand(1).getReg());
1219 unsigned Reg1 =
RegInfo->getSEHRegNum(
MBBI->getOperand(2).getReg());
1220 MIB =
BuildMI(MF,
DL,
TII.get(AArch64::SEH_SaveFRegP_X))
1227 case AArch64::LDPXpost:
1230 case AArch64::STPXpre: {
1233 if (Reg0 == AArch64::FP && Reg1 == AArch64::LR)
1234 MIB =
BuildMI(MF,
DL,
TII.get(AArch64::SEH_SaveFPLR_X))
1238 MIB =
BuildMI(MF,
DL,
TII.get(AArch64::SEH_SaveRegP_X))
1245 case AArch64::LDRDpost:
1248 case AArch64::STRDpre: {
1249 unsigned Reg =
RegInfo->getSEHRegNum(
MBBI->getOperand(1).getReg());
1250 MIB =
BuildMI(MF,
DL,
TII.get(AArch64::SEH_SaveFReg_X))
1256 case AArch64::LDRXpost:
1259 case AArch64::STRXpre: {
1260 unsigned Reg =
RegInfo->getSEHRegNum(
MBBI->getOperand(1).getReg());
1267 case AArch64::STPDi:
1268 case AArch64::LDPDi: {
1269 unsigned Reg0 =
RegInfo->getSEHRegNum(
MBBI->getOperand(0).getReg());
1270 unsigned Reg1 =
RegInfo->getSEHRegNum(
MBBI->getOperand(1).getReg());
1278 case AArch64::STPXi:
1279 case AArch64::LDPXi: {
1282 if (Reg0 == AArch64::FP && Reg1 == AArch64::LR)
1294 case AArch64::STRXui:
1295 case AArch64::LDRXui: {
1296 int Reg =
RegInfo->getSEHRegNum(
MBBI->getOperand(0).getReg());
1303 case AArch64::STRDui:
1304 case AArch64::LDRDui: {
1305 unsigned Reg =
RegInfo->getSEHRegNum(
MBBI->getOperand(0).getReg());
1312 case AArch64::STPQi:
1313 case AArch64::LDPQi: {
1314 unsigned Reg0 =
RegInfo->getSEHRegNum(
MBBI->getOperand(0).getReg());
1315 unsigned Reg1 =
RegInfo->getSEHRegNum(
MBBI->getOperand(1).getReg());
1316 MIB =
BuildMI(MF,
DL,
TII.get(AArch64::SEH_SaveAnyRegQP))
1323 case AArch64::LDPQpost:
1326 case AArch64::STPQpre: {
1327 unsigned Reg0 =
RegInfo->getSEHRegNum(
MBBI->getOperand(1).getReg());
1328 unsigned Reg1 =
RegInfo->getSEHRegNum(
MBBI->getOperand(2).getReg());
1329 MIB =
BuildMI(MF,
DL,
TII.get(AArch64::SEH_SaveAnyRegQPX))
1343 unsigned LocalStackSize) {
1345 unsigned ImmIdx =
MBBI->getNumOperands() - 1;
1346 switch (
MBBI->getOpcode()) {
1349 case AArch64::SEH_SaveFPLR:
1350 case AArch64::SEH_SaveRegP:
1351 case AArch64::SEH_SaveReg:
1352 case AArch64::SEH_SaveFRegP:
1353 case AArch64::SEH_SaveFReg:
1354 case AArch64::SEH_SaveAnyRegQP:
1355 case AArch64::SEH_SaveAnyRegQPX:
1356 ImmOpnd = &
MBBI->getOperand(ImmIdx);
1370 unsigned Opc =
MBBI->getOpcode();
1371 if (Opc == AArch64::CNTD_XPiI || Opc == AArch64::RDSVLI_XI ||
1372 Opc == AArch64::UBFMXri)
1376 if (Opc == AArch64::ORRXrr)
1379 if (Opc == AArch64::BL) {
1380 auto Op1 =
MBBI->getOperand(0);
1381 return Op1.isSymbol() &&
1382 (
StringRef(Op1.getSymbolName()) ==
"__arm_get_current_vg");
1395 bool NeedsWinCFI,
bool *HasWinCFI,
bool EmitCFI,
1397 int CFAOffset = 0) {
1410 switch (
MBBI->getOpcode()) {
1413 case AArch64::STPXi:
1414 NewOpc = AArch64::STPXpre;
1416 case AArch64::STPDi:
1417 NewOpc = AArch64::STPDpre;
1419 case AArch64::STPQi:
1420 NewOpc = AArch64::STPQpre;
1422 case AArch64::STRXui:
1423 NewOpc = AArch64::STRXpre;
1425 case AArch64::STRDui:
1426 NewOpc = AArch64::STRDpre;
1428 case AArch64::STRQui:
1429 NewOpc = AArch64::STRQpre;
1431 case AArch64::LDPXi:
1432 NewOpc = AArch64::LDPXpost;
1434 case AArch64::LDPDi:
1435 NewOpc = AArch64::LDPDpost;
1437 case AArch64::LDPQi:
1438 NewOpc = AArch64::LDPQpost;
1440 case AArch64::LDRXui:
1441 NewOpc = AArch64::LDRXpost;
1443 case AArch64::LDRDui:
1444 NewOpc = AArch64::LDRDpost;
1446 case AArch64::LDRQui:
1447 NewOpc = AArch64::LDRQpost;
1452 auto SEH = std::next(
MBBI);
1454 SEH->eraseFromParent();
1458 int64_t MinOffset, MaxOffset;
1460 NewOpc, Scale, Width, MinOffset, MaxOffset);
1466 if (
MBBI->getOperand(
MBBI->getNumOperands() - 1).getImm() != 0 ||
1467 CSStackSizeInc < MinOffset || CSStackSizeInc > MaxOffset) {
1470 false,
false,
nullptr, EmitCFI,
1473 return std::prev(
MBBI);
1480 unsigned OpndIdx = 0;
1481 for (
unsigned OpndEnd =
MBBI->getNumOperands() - 1; OpndIdx < OpndEnd;
1483 MIB.
add(
MBBI->getOperand(OpndIdx));
1485 assert(
MBBI->getOperand(OpndIdx).getImm() == 0 &&
1486 "Unexpected immediate offset in first/last callee-save save/restore "
1488 assert(
MBBI->getOperand(OpndIdx - 1).getReg() == AArch64::SP &&
1489 "Unexpected base register in callee-save save/restore instruction!");
1490 assert(CSStackSizeInc % Scale == 0);
1491 MIB.
addImm(CSStackSizeInc / (
int)Scale);
1522 unsigned Opc =
MI.getOpcode();
1525 case AArch64::STPXi:
1526 case AArch64::STRXui:
1527 case AArch64::STPDi:
1528 case AArch64::STRDui:
1529 case AArch64::LDPXi:
1530 case AArch64::LDRXui:
1531 case AArch64::LDPDi:
1532 case AArch64::LDRDui:
1535 case AArch64::STPQi:
1536 case AArch64::STRQui:
1537 case AArch64::LDPQi:
1538 case AArch64::LDRQui:
1545 unsigned OffsetIdx =
MI.getNumExplicitOperands() - 1;
1546 assert(
MI.getOperand(OffsetIdx - 1).getReg() == AArch64::SP &&
1547 "Unexpected base register in callee-save save/restore instruction!");
1551 assert(LocalStackSize % Scale == 0);
1552 OffsetOpnd.
setImm(OffsetOpnd.
getImm() + LocalStackSize / Scale);
1557 assert(
MBBI !=
MI.getParent()->end() &&
"Expecting a valid instruction");
1559 "Expecting a SEH instruction");
1570 switch (
I->getOpcode()) {
1573 case AArch64::PTRUE_C_B:
1574 case AArch64::LD1B_2Z_IMM:
1575 case AArch64::ST1B_2Z_IMM:
1576 case AArch64::STR_ZXI:
1577 case AArch64::STR_PXI:
1578 case AArch64::LDR_ZXI:
1579 case AArch64::LDR_PXI:
1590 bool NeedsUnwindInfo) {
1606 if (NeedsUnwindInfo) {
1609 static const char CFIInst[] = {
1610 dwarf::DW_CFA_val_expression,
1613 static_cast<char>(
unsigned(dwarf::DW_OP_breg18)),
1614 static_cast<char>(-8) & 0x7f,
1617 nullptr,
StringRef(CFIInst,
sizeof(CFIInst))));
1655 const int OffsetToFirstCalleeSaveFromFP =
1659 unsigned Reg =
TRI->getDwarfRegNum(
FramePtr,
true);
1661 nullptr, Reg, FixedObject - OffsetToFirstCalleeSaveFromFP));
1694 bool HasFP =
hasFP(MF);
1696 bool HasWinCFI =
false;
1705 while (NonFrameStart !=
End &&
1710 if (NonFrameStart !=
MBB.
end()) {
1726 if (NonFrameStart ==
MBB.
end())
1731 for (auto &Op : MI.operands())
1732 if (Op.isReg() && Op.isDef())
1733 assert(!LiveRegs.contains(Op.getReg()) &&
1734 "live register clobbered by inserted prologue instructions");
1751 if (MFnI.needsShadowCallStackPrologueEpilogue(MF))
1753 MFnI.needsDwarfUnwindInfo(MF));
1755 if (MFnI.shouldSignReturnAddress(MF)) {
1762 if (EmitCFI && MFnI.isMTETagged()) {
1840 assert(!HasFP &&
"unexpected function without stack frame but with FP");
1842 "unexpected function without stack frame but with SVE objects");
1851 ++NumRedZoneFunctions;
1884 bool CombineSPBump = shouldCombineCSRLocalStackBump(MF, NumBytes);
1885 bool HomPrologEpilog = homogeneousPrologEpilog(MF);
1886 if (CombineSPBump) {
1887 assert(!SVEStackSize &&
"Cannot combine SP bump with SVE");
1893 }
else if (HomPrologEpilog) {
1895 NumBytes -= PrologueSaveSize;
1896 }
else if (PrologueSaveSize != 0) {
1898 MBB,
MBBI,
DL,
TII, -PrologueSaveSize, NeedsWinCFI, &HasWinCFI,
1900 NumBytes -= PrologueSaveSize;
1902 assert(NumBytes >= 0 &&
"Negative stack allocation size!?");
1916 NeedsWinCFI, &HasWinCFI);
1921 if (!IsFunclet && HasFP) {
1933 bool HaveInitialContext = Attrs.hasAttrSomewhere(Attribute::SwiftAsync);
1934 if (HaveInitialContext)
1936 Register Reg = HaveInitialContext ? AArch64::X22 : AArch64::XZR;
1952 if (HomPrologEpilog) {
1965 if (NeedsWinCFI && HasWinCFI) {
1970 NeedsWinCFI =
false;
1981 emitCalleeSavedGPRLocations(
MBB,
MBBI);
1984 const bool NeedsRealignment =
1985 NumBytes && !IsFunclet && RegInfo->hasStackRealignment(MF);
1986 const int64_t RealignmentPadding =
1992 uint64_t NumWords = (NumBytes + RealignmentPadding) >> 4;
2000 if (NumBytes >= (1 << 28))
2002 "unwinding purposes");
2004 uint32_t LowNumWords = NumWords & 0xFFFF;
2011 if ((NumWords & 0xFFFF0000) != 0) {
2014 .
addImm((NumWords & 0xFFFF0000) >> 16)
2085 if (RealignmentPadding > 0) {
2086 if (RealignmentPadding >= 4096) {
2089 .
addImm(RealignmentPadding)
2099 .
addImm(RealignmentPadding)
2116 StackOffset SVECalleeSavesSize = {}, SVELocalsSize = SVEStackSize;
2122 LLVM_DEBUG(
dbgs() <<
"SVECalleeSavedStackSize = " << CalleeSavedSize
2125 CalleeSavesBegin =
MBBI;
2129 CalleeSavesEnd =
MBBI;
2132 SVELocalsSize = SVEStackSize - SVECalleeSavesSize;
2139 allocateStackSpace(
MBB, CalleeSavesBegin, 0, SVECalleeSavesSize,
false,
2140 nullptr, EmitAsyncCFI && !HasFP, CFAOffset,
2142 CFAOffset += SVECalleeSavesSize;
2145 emitCalleeSavedSVELocations(
MBB, CalleeSavesEnd);
2150 "Cannot use redzone with stack realignment");
2155 allocateStackSpace(
MBB, CalleeSavesEnd, RealignmentPadding,
2157 NeedsWinCFI, &HasWinCFI, EmitAsyncCFI && !HasFP,
2169 if (!IsFunclet && RegInfo->hasBasePointer(MF)) {
2181 if (NeedsWinCFI && HasWinCFI) {
2189 if (IsFunclet &&
F.hasPersonalityFn()) {
2199 if (EmitCFI && !EmitAsyncCFI) {
2206 *RegInfo, AArch64::SP, AArch64::SP, TotalSize,
2212 emitCalleeSavedGPRLocations(
MBB,
MBBI);
2213 emitCalleeSavedSVELocations(
MBB,
MBBI);
2218 switch (
MI.getOpcode()) {
2221 case AArch64::CATCHRET:
2222 case AArch64::CLEANUPRET:
2237 bool HasWinCFI =
false;
2238 bool IsFunclet =
false;
2241 DL =
MBBI->getDebugLoc();
2249 BuildMI(MBB, MBB.getFirstTerminator(), DL,
2250 TII->get(AArch64::PAUTH_EPILOGUE))
2251 .setMIFlag(MachineInstr::FrameDestroy);
2261 TII->get(AArch64::SEH_EpilogEnd))
2288 int64_t AfterCSRPopSize = ArgumentStackToRestore;
2296 if (homogeneousPrologEpilog(MF, &
MBB)) {
2300 auto HomogeneousEpilog = std::prev(LastPopI);
2301 if (HomogeneousEpilog->getOpcode() == AArch64::HOM_Epilog)
2302 LastPopI = HomogeneousEpilog;
2312 assert(AfterCSRPopSize == 0);
2315 bool CombineSPBump = shouldCombineCSRLocalStackBumpInEpilogue(
MBB, NumBytes);
2318 bool CombineAfterCSRBump =
false;
2319 if (!CombineSPBump && PrologueSaveSize != 0) {
2321 while (Pop->getOpcode() == TargetOpcode::CFI_INSTRUCTION ||
2323 Pop = std::prev(Pop);
2326 const MachineOperand &OffsetOp = Pop->getOperand(Pop->getNumOperands() - 1);
2330 if (OffsetOp.
getImm() == 0 && AfterCSRPopSize >= 0) {
2332 MBB, Pop,
DL,
TII, PrologueSaveSize, NeedsWinCFI, &HasWinCFI, EmitCFI,
2339 AfterCSRPopSize += PrologueSaveSize;
2340 CombineAfterCSRBump =
true;
2349 while (LastPopI != Begin) {
2355 }
else if (CombineSPBump)
2357 NeedsWinCFI, &HasWinCFI);
2369 EpilogStartI = LastPopI;
2405 if (CombineSPBump) {
2406 assert(!SVEStackSize &&
"Cannot combine SP bump with SVE");
2409 if (EmitCFI &&
hasFP(MF)) {
2411 unsigned Reg = RegInfo.getDwarfRegNum(AArch64::SP,
true);
2426 NumBytes -= PrologueSaveSize;
2427 assert(NumBytes >= 0 &&
"Negative stack allocation size!?");
2431 StackOffset DeallocateBefore = {}, DeallocateAfter = SVEStackSize;
2434 RestoreBegin = std::prev(RestoreEnd);
2435 while (RestoreBegin !=
MBB.
begin() &&
2444 DeallocateBefore = SVEStackSize - CalleeSavedSizeAsOffset;
2445 DeallocateAfter = CalleeSavedSizeAsOffset;
2467 MBB, RestoreBegin,
DL, AArch64::SP, AArch64::SP,
2469 false,
false,
nullptr, EmitCFI && !
hasFP(MF),
2476 false,
nullptr, EmitCFI && !
hasFP(MF),
2482 false,
nullptr, EmitCFI && !
hasFP(MF),
2487 emitCalleeSavedSVERestores(
MBB, RestoreEnd);
2494 if (RedZone && AfterCSRPopSize == 0)
2501 bool NoCalleeSaveRestore = PrologueSaveSize == 0;
2502 int64_t StackRestoreBytes = RedZone ? 0 : NumBytes;
2503 if (NoCalleeSaveRestore)
2504 StackRestoreBytes += AfterCSRPopSize;
2507 MBB, LastPopI,
DL, AArch64::SP, AArch64::SP,
2514 if (NoCalleeSaveRestore || AfterCSRPopSize == 0) {
2527 MBB, LastPopI,
DL, AArch64::SP, AArch64::FP,
2530 }
else if (NumBytes)
2536 if (EmitCFI &&
hasFP(MF)) {
2538 unsigned Reg = RegInfo.getDwarfRegNum(AArch64::SP,
true);
2549 if (AfterCSRPopSize) {
2550 assert(AfterCSRPopSize > 0 &&
"attempting to reallocate arg stack that an "
2551 "interrupt may have clobbered");
2556 false, NeedsWinCFI, &HasWinCFI, EmitCFI,
2588 int64_t ObjectOffset) {
2593 unsigned FixedObject =
2602 int64_t ObjectOffset) {
2613 return RegInfo->getLocalAddressRegister(MF) == AArch64::FP
2620 bool ForSimm)
const {
2623 bool isFixed = MFI.isFixedObjectIndex(FI);
2630 const MachineFunction &MF, int64_t ObjectOffset,
bool isFixed,
bool isSVE,
2631 Register &FrameReg,
bool PreferFP,
bool ForSimm)
const {
2654 PreferFP &= !SVEStackSize;
2662 }
else if (isCSR && RegInfo->hasStackRealignment(MF)) {
2666 assert(
hasFP(MF) &&
"Re-aligned stack must have frame pointer");
2668 }
else if (
hasFP(MF) && !RegInfo->hasStackRealignment(MF)) {
2673 bool FPOffsetFits = !ForSimm || FPOffset >= -256;
2674 PreferFP |=
Offset > -FPOffset && !SVEStackSize;
2676 if (MFI.hasVarSizedObjects()) {
2680 bool CanUseBP = RegInfo->hasBasePointer(MF);
2681 if (FPOffsetFits && CanUseBP)
2688 }
else if (FPOffset >= 0) {
2693 }
else if (MF.
hasEHFunclets() && !RegInfo->hasBasePointer(MF)) {
2700 "Funclets should only be present on Win64");
2704 if (FPOffsetFits && PreferFP)
2711 ((isFixed || isCSR) || !RegInfo->hasStackRealignment(MF) || !UseFP) &&
2712 "In the presence of dynamic stack pointer realignment, "
2713 "non-argument/CSR objects cannot be accessed through the frame pointer");
2725 RegInfo->hasStackRealignment(MF))) {
2726 FrameReg = RegInfo->getFrameRegister(MF);
2730 FrameReg = RegInfo->hasBasePointer(MF) ? RegInfo->getBaseRegister()
2736 if (UseFP && !(isFixed || isCSR))
2737 ScalableOffset = -SVEStackSize;
2738 if (!UseFP && (isFixed || isCSR))
2739 ScalableOffset = SVEStackSize;
2742 FrameReg = RegInfo->getFrameRegister(MF);
2747 if (RegInfo->hasBasePointer(MF))
2748 FrameReg = RegInfo->getBaseRegister();
2750 assert(!MFI.hasVarSizedObjects() &&
2751 "Can't use SP when we have var sized objects.");
2752 FrameReg = AArch64::SP;
2778 Attrs.hasAttrSomewhere(Attribute::SwiftError)) &&
2783 bool NeedsWinCFI,
bool IsFirst,
2792 if (Reg2 == AArch64::FP)
2796 if (
TRI->getEncodingValue(Reg2) ==
TRI->getEncodingValue(Reg1) + 1)
2803 if (Reg1 >= AArch64::X19 && Reg1 <= AArch64::X27 &&
2804 (Reg1 - AArch64::X19) % 2 == 0 && Reg2 == AArch64::LR && !IsFirst)
2814 bool UsesWinAAPCS,
bool NeedsWinCFI,
2815 bool NeedsFrameRecord,
bool IsFirst,
2823 if (NeedsFrameRecord)
2824 return Reg2 == AArch64::LR;
2832 unsigned Reg1 = AArch64::NoRegister;
2833 unsigned Reg2 = AArch64::NoRegister;
2836 enum RegType { GPR, FPR64, FPR128, PPR, ZPR, VG }
Type;
2838 RegPairInfo() =
default;
2840 bool isPaired()
const {
return Reg2 != AArch64::NoRegister; }
2842 unsigned getScale()
const {
2857 bool isScalable()
const {
return Type == PPR ||
Type == ZPR; }
2863 for (
unsigned PReg = AArch64::P8; PReg <= AArch64::P15; ++PReg) {
2864 if (SavedRegs.
test(PReg)) {
2865 unsigned PNReg = PReg - AArch64::P0 + AArch64::PN0;
2869 return AArch64::NoRegister;
2875 bool NeedsFrameRecord) {
2885 unsigned Count = CSI.
size();
2892 "Odd number of callee-saved regs to spill!");
2894 int StackFillDir = -1;
2896 unsigned FirstReg = 0;
2904 FirstReg = Count - 1;
2910 for (
unsigned i = FirstReg; i < Count; i += RegInc) {
2912 RPI.Reg1 = CSI[i].getReg();
2914 if (AArch64::GPR64RegClass.
contains(RPI.Reg1))
2915 RPI.Type = RegPairInfo::GPR;
2916 else if (AArch64::FPR64RegClass.
contains(RPI.Reg1))
2917 RPI.Type = RegPairInfo::FPR64;
2918 else if (AArch64::FPR128RegClass.
contains(RPI.Reg1))
2919 RPI.Type = RegPairInfo::FPR128;
2920 else if (AArch64::ZPRRegClass.
contains(RPI.Reg1))
2921 RPI.Type = RegPairInfo::ZPR;
2922 else if (AArch64::PPRRegClass.
contains(RPI.Reg1))
2923 RPI.Type = RegPairInfo::PPR;
2924 else if (RPI.Reg1 == AArch64::VG)
2925 RPI.Type = RegPairInfo::VG;
2930 if (
unsigned(i + RegInc) < Count) {
2931 Register NextReg = CSI[i + RegInc].getReg();
2932 bool IsFirst = i == FirstReg;
2934 case RegPairInfo::GPR:
2935 if (AArch64::GPR64RegClass.
contains(NextReg) &&
2937 NeedsWinCFI, NeedsFrameRecord, IsFirst,
2941 case RegPairInfo::FPR64:
2942 if (AArch64::FPR64RegClass.
contains(NextReg) &&
2947 case RegPairInfo::FPR128:
2948 if (AArch64::FPR128RegClass.
contains(NextReg))
2951 case RegPairInfo::PPR:
2953 case RegPairInfo::ZPR:
2955 if (((RPI.Reg1 - AArch64::Z0) & 1) == 0 && (NextReg == RPI.Reg1 + 1))
2958 case RegPairInfo::VG:
2969 assert((!RPI.isPaired() ||
2970 (CSI[i].getFrameIdx() + RegInc == CSI[i + RegInc].getFrameIdx())) &&
2971 "Out of order callee saved regs!");
2973 assert((!RPI.isPaired() || !NeedsFrameRecord || RPI.Reg2 != AArch64::FP ||
2974 RPI.Reg1 == AArch64::LR) &&
2975 "FrameRecord must be allocated together with LR");
2978 assert((!RPI.isPaired() || !NeedsFrameRecord || RPI.Reg1 != AArch64::FP ||
2979 RPI.Reg2 == AArch64::LR) &&
2980 "FrameRecord must be allocated together with LR");
2988 ((RPI.Reg1 == AArch64::LR && RPI.Reg2 == AArch64::FP) ||
2989 RPI.Reg1 + 1 == RPI.Reg2))) &&
2990 "Callee-save registers not saved as adjacent register pair!");
2992 RPI.FrameIdx = CSI[i].getFrameIdx();
2995 RPI.FrameIdx = CSI[i + RegInc].getFrameIdx();
2996 int Scale = RPI.getScale();
2998 int OffsetPre = RPI.isScalable() ? ScalableByteOffset : ByteOffset;
2999 assert(OffsetPre % Scale == 0);
3001 if (RPI.isScalable())
3002 ScalableByteOffset += StackFillDir * (RPI.isPaired() ? 2 * Scale : Scale);
3004 ByteOffset += StackFillDir * (RPI.isPaired() ? 2 * Scale : Scale);
3009 ((!IsWindows && RPI.Reg2 == AArch64::FP) ||
3010 (IsWindows && RPI.Reg2 == AArch64::LR)))
3011 ByteOffset += StackFillDir * 8;
3015 if (NeedGapToAlignStack && !NeedsWinCFI &&
3016 !RPI.isScalable() && RPI.Type != RegPairInfo::FPR128 &&
3017 !RPI.isPaired() && ByteOffset % 16 != 0) {
3018 ByteOffset += 8 * StackFillDir;
3019 assert(MFI.getObjectAlign(RPI.FrameIdx) <=
Align(16));
3023 MFI.setObjectAlignment(RPI.FrameIdx,
Align(16));
3024 NeedGapToAlignStack =
false;
3027 int OffsetPost = RPI.isScalable() ? ScalableByteOffset : ByteOffset;
3028 assert(OffsetPost % Scale == 0);
3031 int Offset = NeedsWinCFI ? OffsetPre : OffsetPost;
3036 ((!IsWindows && RPI.Reg2 == AArch64::FP) ||
3037 (IsWindows && RPI.Reg2 == AArch64::LR)))
3039 RPI.Offset =
Offset / Scale;
3041 assert(((!RPI.isScalable() && RPI.Offset >= -64 && RPI.Offset <= 63) ||
3042 (RPI.isScalable() && RPI.Offset >= -256 && RPI.Offset <= 255)) &&
3043 "Offset out of bounds for LDP/STP immediate");
3047 if (NeedsFrameRecord && ((!IsWindows && RPI.Reg1 == AArch64::LR &&
3048 RPI.Reg2 == AArch64::FP) ||
3049 (IsWindows && RPI.Reg1 == AArch64::FP &&
3050 RPI.Reg2 == AArch64::LR)))
3064 MFI.setObjectAlignment(CSI[0].getFrameIdx(),
Align(16));
3067 std::reverse(RegPairs.
begin(), RegPairs.
end());
3084 if (homogeneousPrologEpilog(MF)) {
3088 for (
auto &RPI : RegPairs) {
3093 if (!
MRI.isReserved(RPI.Reg1))
3095 if (RPI.isPaired() && !
MRI.isReserved(RPI.Reg2))
3100 bool PTrueCreated =
false;
3102 unsigned Reg1 = RPI.Reg1;
3103 unsigned Reg2 = RPI.Reg2;
3119 case RegPairInfo::GPR:
3120 StrOpc = RPI.isPaired() ? AArch64::STPXi : AArch64::STRXui;
3122 Alignment =
Align(8);
3124 case RegPairInfo::FPR64:
3125 StrOpc = RPI.isPaired() ? AArch64::STPDi : AArch64::STRDui;
3127 Alignment =
Align(8);
3129 case RegPairInfo::FPR128:
3130 StrOpc = RPI.isPaired() ? AArch64::STPQi : AArch64::STRQui;
3132 Alignment =
Align(16);
3134 case RegPairInfo::ZPR:
3135 StrOpc = RPI.isPaired() ? AArch64::ST1B_2Z_IMM : AArch64::STR_ZXI;
3137 Alignment =
Align(16);
3139 case RegPairInfo::PPR:
3140 StrOpc = AArch64::STR_PXI;
3142 Alignment =
Align(2);
3144 case RegPairInfo::VG:
3145 StrOpc = AArch64::STRXui;
3147 Alignment =
Align(8);
3151 unsigned X0Scratch = AArch64::NoRegister;
3152 if (Reg1 == AArch64::VG) {
3155 assert(Reg1 != AArch64::NoRegister);
3158 if (Attrs.hasStreamingBody() && !Attrs.hasStreamingInterface() &&
3183 return STI.getRegisterInfo()->isSuperOrSubRegisterEq(
3184 AArch64::X0, LiveIn.PhysReg);
3188 if (X0Scratch != AArch64::NoRegister)
3195 const uint32_t *RegMask =
TRI->getCallPreservedMask(
3210 dbgs() <<
") -> fi#(" << RPI.FrameIdx;
3211 if (RPI.isPaired())
dbgs() <<
", " << RPI.FrameIdx + 1;
3214 assert((!NeedsWinCFI || !(Reg1 == AArch64::LR && Reg2 == AArch64::FP)) &&
3215 "Windows unwdinding requires a consecutive (FP,LR) pair");
3219 unsigned FrameIdxReg1 = RPI.FrameIdx;
3220 unsigned FrameIdxReg2 = RPI.FrameIdx + 1;
3221 if (NeedsWinCFI && RPI.isPaired()) {
3226 if (RPI.isPaired() && RPI.isScalable()) {
3231 assert(((Subtarget.hasSVE2p1() || Subtarget.hasSME2()) && PnReg != 0) &&
3232 "Expects SVE2.1 or SME2 target and a predicate register");
3233#ifdef EXPENSIVE_CHECKS
3234 auto IsPPR = [](
const RegPairInfo &c) {
3235 return c.Reg1 == RegPairInfo::PPR;
3237 auto PPRBegin = std::find_if(RegPairs.
begin(), RegPairs.
end(), IsPPR);
3238 auto IsZPR = [](
const RegPairInfo &c) {
3239 return c.Type == RegPairInfo::ZPR;
3241 auto ZPRBegin = std::find_if(RegPairs.
begin(), RegPairs.
end(), IsZPR);
3242 assert(!(PPRBegin < ZPRBegin) &&
3243 "Expected callee save predicate to be handled first");
3245 if (!PTrueCreated) {
3246 PTrueCreated =
true;
3251 if (!
MRI.isReserved(Reg1))
3253 if (!
MRI.isReserved(Reg2))
3255 MIB.
addReg( AArch64::Z0_Z1 + (RPI.Reg1 - AArch64::Z0));
3271 if (!
MRI.isReserved(Reg1))
3273 if (RPI.isPaired()) {
3274 if (!
MRI.isReserved(Reg2))
3294 if (RPI.Type == RegPairInfo::ZPR || RPI.Type == RegPairInfo::PPR) {
3300 if (X0Scratch != AArch64::NoRegister)
3320 DL =
MBBI->getDebugLoc();
3323 if (homogeneousPrologEpilog(MF, &
MBB)) {
3326 for (
auto &RPI : RegPairs) {
3334 auto IsPPR = [](
const RegPairInfo &c) {
return c.Type == RegPairInfo::PPR; };
3335 auto PPRBegin = std::find_if(RegPairs.
begin(), RegPairs.
end(), IsPPR);
3336 auto PPREnd = std::find_if_not(PPRBegin, RegPairs.
end(), IsPPR);
3337 std::reverse(PPRBegin, PPREnd);
3338 auto IsZPR = [](
const RegPairInfo &c) {
return c.Type == RegPairInfo::ZPR; };
3339 auto ZPRBegin = std::find_if(RegPairs.
begin(), RegPairs.
end(), IsZPR);
3340 auto ZPREnd = std::find_if_not(ZPRBegin, RegPairs.
end(), IsZPR);
3341 std::reverse(ZPRBegin, ZPREnd);
3343 bool PTrueCreated =
false;
3344 for (
const RegPairInfo &RPI : RegPairs) {
3345 unsigned Reg1 = RPI.Reg1;
3346 unsigned Reg2 = RPI.Reg2;
3360 case RegPairInfo::GPR:
3361 LdrOpc = RPI.isPaired() ? AArch64::LDPXi : AArch64::LDRXui;
3363 Alignment =
Align(8);
3365 case RegPairInfo::FPR64:
3366 LdrOpc = RPI.isPaired() ? AArch64::LDPDi : AArch64::LDRDui;
3368 Alignment =
Align(8);
3370 case RegPairInfo::FPR128:
3371 LdrOpc = RPI.isPaired() ? AArch64::LDPQi : AArch64::LDRQui;
3373 Alignment =
Align(16);
3375 case RegPairInfo::ZPR:
3376 LdrOpc = RPI.isPaired() ? AArch64::LD1B_2Z_IMM : AArch64::LDR_ZXI;
3378 Alignment =
Align(16);
3380 case RegPairInfo::PPR:
3381 LdrOpc = AArch64::LDR_PXI;
3383 Alignment =
Align(2);
3385 case RegPairInfo::VG:
3390 dbgs() <<
") -> fi#(" << RPI.FrameIdx;
3391 if (RPI.isPaired())
dbgs() <<
", " << RPI.FrameIdx + 1;
3397 unsigned FrameIdxReg1 = RPI.FrameIdx;
3398 unsigned FrameIdxReg2 = RPI.FrameIdx + 1;
3399 if (NeedsWinCFI && RPI.isPaired()) {
3405 if (RPI.isPaired() && RPI.isScalable()) {
3409 assert(((Subtarget.hasSVE2p1() || Subtarget.hasSME2()) && PnReg != 0) &&
3410 "Expects SVE2.1 or SME2 target and a predicate register");
3411#ifdef EXPENSIVE_CHECKS
3412 assert(!(PPRBegin < ZPRBegin) &&
3413 "Expected callee save predicate to be handled first");
3415 if (!PTrueCreated) {
3416 PTrueCreated =
true;
3421 MIB.
addReg( AArch64::Z0_Z1 + (RPI.Reg1 - AArch64::Z0),
3438 if (RPI.isPaired()) {
3472 unsigned UnspilledCSGPR = AArch64::NoRegister;
3473 unsigned UnspilledCSGPRPaired = AArch64::NoRegister;
3482 unsigned ExtraCSSpill = 0;
3483 bool HasUnpairedGPR64 =
false;
3484 bool HasPairZReg =
false;
3486 for (
unsigned i = 0; CSRegs[i]; ++i) {
3487 const unsigned Reg = CSRegs[i];
3490 if (Reg == BasePointerReg)
3493 bool RegUsed = SavedRegs.
test(Reg);
3494 unsigned PairedReg = AArch64::NoRegister;
3495 const bool RegIsGPR64 = AArch64::GPR64RegClass.contains(Reg);
3496 if (RegIsGPR64 || AArch64::FPR64RegClass.
contains(Reg) ||
3497 AArch64::FPR128RegClass.contains(Reg)) {
3500 if (HasUnpairedGPR64)
3501 PairedReg = CSRegs[i % 2 == 0 ? i - 1 : i + 1];
3503 PairedReg = CSRegs[i ^ 1];
3510 if (RegIsGPR64 && !AArch64::GPR64RegClass.
contains(PairedReg)) {
3511 PairedReg = AArch64::NoRegister;
3512 HasUnpairedGPR64 =
true;
3514 assert(PairedReg == AArch64::NoRegister ||
3515 AArch64::GPR64RegClass.
contains(Reg, PairedReg) ||
3516 AArch64::FPR64RegClass.
contains(Reg, PairedReg) ||
3517 AArch64::FPR128RegClass.
contains(Reg, PairedReg));
3520 if (AArch64::GPR64RegClass.
contains(Reg) &&
3522 UnspilledCSGPR = Reg;
3523 UnspilledCSGPRPaired = PairedReg;
3531 if (producePairRegisters(MF) && PairedReg != AArch64::NoRegister &&
3532 !SavedRegs.
test(PairedReg)) {
3533 SavedRegs.
set(PairedReg);
3534 if (AArch64::GPR64RegClass.
contains(PairedReg) &&
3536 ExtraCSSpill = PairedReg;
3539 HasPairZReg |= (AArch64::ZPRRegClass.contains(Reg, CSRegs[i ^ 1]) &&
3540 SavedRegs.
test(CSRegs[i ^ 1]));
3543 if (HasPairZReg && (Subtarget.hasSVE2p1() || Subtarget.hasSME2())) {
3548 if (PnReg != AArch64::NoRegister)
3554 SavedRegs.
set(AArch64::P8);
3559 "Predicate cannot be a reserved register");
3569 SavedRegs.
set(AArch64::X18);
3573 unsigned CSStackSize = 0;
3574 unsigned SVECSStackSize = 0;
3577 for (
unsigned Reg : SavedRegs.
set_bits()) {
3579 if (AArch64::PPRRegClass.
contains(Reg) ||
3580 AArch64::ZPRRegClass.
contains(Reg))
3592 if (AFI->hasStreamingModeChanges()) {
3593 if (Attrs.hasStreamingBody() && !Attrs.hasStreamingInterface())
3600 unsigned NumSavedRegs = SavedRegs.
count();
3606 SavedRegs.
set(AArch64::FP);
3607 SavedRegs.
set(AArch64::LR);
3617 int64_t SVEStackSize =
3618 alignTo(SVECSStackSize + estimateSVEStackObjectOffsets(MFI), 16);
3619 bool CanEliminateFrame = (SavedRegs.
count() == 0) && !SVEStackSize;
3628 int64_t CalleeStackUsed = 0;
3631 if (FixedOff > CalleeStackUsed) CalleeStackUsed = FixedOff;
3635 bool BigStack = SVEStackSize || (EstimatedStackSize + CSStackSize +
3636 CalleeStackUsed) > EstimatedStackSizeLimit;
3638 AFI->setHasStackFrame(
true);
3647 if (!ExtraCSSpill && UnspilledCSGPR != AArch64::NoRegister) {
3649 <<
" to get a scratch register.\n");
3650 SavedRegs.
set(UnspilledCSGPR);
3651 ExtraCSSpill = UnspilledCSGPR;
3656 if (producePairRegisters(MF)) {
3657 if (UnspilledCSGPRPaired == AArch64::NoRegister) {
3660 SavedRegs.
reset(UnspilledCSGPR);
3661 ExtraCSSpill = AArch64::NoRegister;
3664 SavedRegs.
set(UnspilledCSGPRPaired);
3673 unsigned Size =
TRI->getSpillSize(RC);
3674 Align Alignment =
TRI->getSpillAlign(RC);
3677 LLVM_DEBUG(
dbgs() <<
"No available CS registers, allocated fi#" << FI
3678 <<
" as the emergency spill slot.\n");
3683 CSStackSize += 8 * (SavedRegs.
count() - NumSavedRegs);
3687 if (
hasFP(MF) && AFI->hasSwiftAsyncContext())
3692 << EstimatedStackSize + AlignedCSStackSize
3696 AFI->getCalleeSavedStackSize() == AlignedCSStackSize) &&
3697 "Should not invalidate callee saved info");
3701 AFI->setCalleeSavedStackSize(AlignedCSStackSize);
3702 AFI->setCalleeSaveStackHasFreeSpace(AlignedCSStackSize != CSStackSize);
3703 AFI->setSVECalleeSavedStackSize(
alignTo(SVECSStackSize, 16));
3708 std::vector<CalleeSavedInfo> &CSI,
unsigned &MinCSFrameIndex,
3709 unsigned &MaxCSFrameIndex)
const {
3717 std::reverse(CSI.begin(), CSI.end());
3731 if ((
unsigned)FrameIdx < MinCSFrameIndex) MinCSFrameIndex = FrameIdx;
3732 if ((
unsigned)FrameIdx > MaxCSFrameIndex) MaxCSFrameIndex = FrameIdx;
3737 std::vector<CalleeSavedInfo> VGSaves;
3741 VGInfo.setRestored(
false);
3742 VGSaves.push_back(VGInfo);
3746 if (Attrs.hasStreamingBody() && !Attrs.hasStreamingInterface())
3747 VGSaves.push_back(VGInfo);
3749 bool InsertBeforeLR =
false;
3751 for (
unsigned I = 0;
I < CSI.size();
I++)
3752 if (CSI[
I].
getReg() == AArch64::LR) {
3753 InsertBeforeLR =
true;
3754 CSI.insert(CSI.begin() +
I, VGSaves.begin(), VGSaves.end());
3758 if (!InsertBeforeLR)
3759 CSI.insert(CSI.end(), VGSaves.begin(), VGSaves.end());
3762 for (
auto &CS : CSI) {
3769 CS.setFrameIdx(FrameIdx);
3771 if ((
unsigned)FrameIdx < MinCSFrameIndex) MinCSFrameIndex = FrameIdx;
3772 if ((
unsigned)FrameIdx > MaxCSFrameIndex) MaxCSFrameIndex = FrameIdx;
3776 Reg == AArch64::FP) {
3779 if ((
unsigned)FrameIdx < MinCSFrameIndex) MinCSFrameIndex = FrameIdx;
3780 if ((
unsigned)FrameIdx > MaxCSFrameIndex) MaxCSFrameIndex = FrameIdx;
3800 int &Min,
int &Max) {
3801 Min = std::numeric_limits<int>::max();
3802 Max = std::numeric_limits<int>::min();
3808 for (
auto &CS : CSI) {
3809 if (AArch64::ZPRRegClass.
contains(CS.getReg()) ||
3810 AArch64::PPRRegClass.contains(CS.getReg())) {
3811 assert((Max == std::numeric_limits<int>::min() ||
3812 Max + 1 == CS.getFrameIdx()) &&
3813 "SVE CalleeSaves are not consecutive");
3815 Min = std::min(Min, CS.getFrameIdx());
3816 Max = std::max(Max, CS.getFrameIdx());
3819 return Min != std::numeric_limits<int>::max();
3828 int &MinCSFrameIndex,
3829 int &MaxCSFrameIndex,
3830 bool AssignOffsets) {
3835 "SVE vectors should never be passed on the stack by value, only by "
3839 auto Assign = [&MFI](
int FI, int64_t
Offset) {
3849 for (
int I = MinCSFrameIndex;
I <= MaxCSFrameIndex; ++
I) {
3865 int StackProtectorFI = -1;
3869 ObjectsToAllocate.
push_back(StackProtectorFI);
3875 if (
I == StackProtectorFI)
3877 if (MaxCSFrameIndex >=
I &&
I >= MinCSFrameIndex)
3886 for (
unsigned FI : ObjectsToAllocate) {
3891 if (Alignment >
Align(16))
3893 "Alignment of scalable vectors > 16 bytes is not yet supported");
3903int64_t AArch64FrameLowering::estimateSVEStackObjectOffsets(
3905 int MinCSFrameIndex, MaxCSFrameIndex;
3909int64_t AArch64FrameLowering::assignSVEStackObjectOffsets(
3920 "Upwards growing stack unsupported");
3922 int MinCSFrameIndex, MaxCSFrameIndex;
3923 int64_t SVEStackSize =
3924 assignSVEStackObjectOffsets(MFI, MinCSFrameIndex, MaxCSFrameIndex);
3944 int64_t FixedObject =
3957 assert(DstReg &&
"There must be a free register after frame setup");
3966struct TagStoreInstr {
3989 std::optional<int64_t> FrameRegUpdate;
3991 unsigned FrameRegUpdateFlags;
4002 :
MBB(
MBB), ZeroData(ZeroData) {
4008 void addInstruction(TagStoreInstr
I) {
4010 TagStores.
back().Offset + TagStores.
back().Size ==
I.Offset) &&
4011 "Non-adjacent tag store instructions.");
4026 const int64_t kMinOffset = -256 * 16;
4027 const int64_t kMaxOffset = 255 * 16;
4030 int64_t BaseRegOffsetBytes = FrameRegOffset.
getFixed();
4031 if (BaseRegOffsetBytes < kMinOffset ||
4032 BaseRegOffsetBytes + (
Size -
Size % 32) > kMaxOffset ||
4036 BaseRegOffsetBytes % 16 != 0) {
4037 Register ScratchReg =
MRI->createVirtualRegister(&AArch64::GPR64RegClass);
4040 BaseReg = ScratchReg;
4041 BaseRegOffsetBytes = 0;
4046 int64_t InstrSize = (
Size > 16) ? 32 : 16;
4049 ? (ZeroData ? AArch64::STZGi : AArch64::STGi)
4050 : (ZeroData ? AArch64::STZ2Gi : AArch64::ST2Gi);
4051 assert(BaseRegOffsetBytes % 16 == 0);
4055 .
addImm(BaseRegOffsetBytes / 16)
4059 if (BaseRegOffsetBytes == 0)
4061 BaseRegOffsetBytes += InstrSize;
4075 :
MRI->createVirtualRegister(&AArch64::GPR64RegClass);
4076 Register SizeReg =
MRI->createVirtualRegister(&AArch64::GPR64RegClass);
4080 int64_t LoopSize =
Size;
4083 if (FrameRegUpdate && *FrameRegUpdate)
4084 LoopSize -= LoopSize % 32;
4086 TII->get(ZeroData ? AArch64::STZGloop_wback
4087 : AArch64::STGloop_wback))
4094 LoopI->
setFlags(FrameRegUpdateFlags);
4096 int64_t ExtraBaseRegUpdate =
4097 FrameRegUpdate ? (*FrameRegUpdate - FrameRegOffset.
getFixed() -
Size) : 0;
4098 if (LoopSize <
Size) {
4103 TII->get(ZeroData ? AArch64::STZGPostIndex : AArch64::STGPostIndex))
4107 .
addImm(1 + ExtraBaseRegUpdate / 16)
4110 }
else if (ExtraBaseRegUpdate) {
4114 TII->get(ExtraBaseRegUpdate > 0 ? AArch64::ADDXri : AArch64::SUBXri))
4117 .
addImm(std::abs(ExtraBaseRegUpdate))
4127 int64_t
Size, int64_t *TotalOffset) {
4129 if ((
MI.getOpcode() == AArch64::ADDXri ||
4130 MI.getOpcode() == AArch64::SUBXri) &&
4131 MI.getOperand(0).getReg() == Reg &&
MI.getOperand(1).getReg() == Reg) {
4133 int64_t
Offset =
MI.getOperand(2).getImm() << Shift;
4134 if (
MI.getOpcode() == AArch64::SUBXri)
4136 int64_t AbsPostOffset = std::abs(
Offset -
Size);
4137 const int64_t kMaxOffset =
4139 if (AbsPostOffset <= kMaxOffset && AbsPostOffset % 16 == 0) {
4150 for (
auto &TS : TSE) {
4154 if (
MI->memoperands_empty()) {
4158 MemRefs.
append(
MI->memoperands_begin(),
MI->memoperands_end());
4164 bool TryMergeSPUpdate) {
4165 if (TagStores.
empty())
4167 TagStoreInstr &FirstTagStore = TagStores[0];
4168 TagStoreInstr &LastTagStore = TagStores[TagStores.
size() - 1];
4169 Size = LastTagStore.Offset - FirstTagStore.Offset + LastTagStore.Size;
4170 DL = TagStores[0].MI->getDebugLoc();
4174 *MF, FirstTagStore.Offset,
false ,
false , Reg,
4177 FrameRegUpdate = std::nullopt;
4179 mergeMemRefs(TagStores, CombinedMemRefs);
4182 for (
const auto &Instr
4183 : TagStores) {
dbgs() <<
" " << *
Instr.MI; });
4189 if (TagStores.size() < 2)
4191 emitUnrolled(InsertI);
4194 int64_t TotalOffset = 0;
4195 if (TryMergeSPUpdate) {
4201 if (InsertI !=
MBB->
end() &&
4202 canMergeRegUpdate(InsertI, FrameReg, FrameRegOffset.
getFixed() +
Size,
4204 UpdateInstr = &*InsertI++;
4210 if (!UpdateInstr && TagStores.size() < 2)
4214 FrameRegUpdate = TotalOffset;
4215 FrameRegUpdateFlags = UpdateInstr->
getFlags();
4222 for (
auto &TS : TagStores)
4223 TS.MI->eraseFromParent();
4227 int64_t &
Size,
bool &ZeroData) {
4231 unsigned Opcode =
MI.getOpcode();
4232 ZeroData = (Opcode == AArch64::STZGloop || Opcode == AArch64::STZGi ||
4233 Opcode == AArch64::STZ2Gi);
4235 if (Opcode == AArch64::STGloop || Opcode == AArch64::STZGloop) {
4236 if (!
MI.getOperand(0).isDead() || !
MI.getOperand(1).isDead())
4238 if (!
MI.getOperand(2).isImm() || !
MI.getOperand(3).isFI())
4241 Size =
MI.getOperand(2).getImm();
4245 if (Opcode == AArch64::STGi || Opcode == AArch64::STZGi)
4247 else if (Opcode == AArch64::ST2Gi || Opcode == AArch64::STZ2Gi)
4252 if (
MI.getOperand(0).getReg() != AArch64::SP || !
MI.getOperand(1).isFI())
4256 16 *
MI.getOperand(2).getImm();
4276 if (!isMergeableStackTaggingInstruction(
MI,
Offset,
Size, FirstZeroData))
4282 constexpr int kScanLimit = 10;
4285 NextI != E && Count < kScanLimit; ++NextI) {
4294 if (isMergeableStackTaggingInstruction(
MI,
Offset,
Size, ZeroData)) {
4295 if (ZeroData != FirstZeroData)
4303 if (!
MI.isTransient())
4312 if (
MI.mayLoadOrStore() ||
MI.hasUnmodeledSideEffects())
4328 LiveRegs.addLiveOuts(*
MBB);
4333 LiveRegs.stepBackward(*
I);
4336 if (LiveRegs.contains(AArch64::NZCV))
4340 [](
const TagStoreInstr &
Left,
const TagStoreInstr &
Right) {
4345 int64_t CurOffset = Instrs[0].Offset;
4346 for (
auto &Instr : Instrs) {
4347 if (CurOffset >
Instr.Offset)
4354 TagStoreEdit TSE(
MBB, FirstZeroData);
4355 std::optional<int64_t> EndOffset;
4356 for (
auto &Instr : Instrs) {
4357 if (EndOffset && *EndOffset !=
Instr.Offset) {
4359 TSE.emitCode(InsertI, TFI,
false);
4363 TSE.addInstruction(Instr);
4383 if (
MI.getOpcode() != AArch64::VGSavePseudo &&
4384 MI.getOpcode() != AArch64::VGRestorePseudo)
4388 bool LocallyStreaming =
4395 int64_t VGFrameIdx =
4397 assert(VGFrameIdx != std::numeric_limits<int>::max() &&
4398 "Expected FrameIdx for VG");
4401 if (
MI.getOpcode() == AArch64::VGSavePseudo) {
4406 nullptr,
TRI->getDwarfRegNum(AArch64::VG,
true),
Offset));
4409 nullptr,
TRI->getDwarfRegNum(AArch64::VG,
true)));
4412 TII->get(TargetOpcode::CFI_INSTRUCTION))
4415 MI.eraseFromParent();
4427 II = tryMergeAdjacentSTG(
II,
this, RS);
4436 bool IgnoreSPUpdates)
const {
4438 if (IgnoreSPUpdates) {
4441 FrameReg = AArch64::SP;
4451 FrameReg = AArch64::SP;
4476 bool IsValid =
false;
4478 int ObjectIndex = 0;
4480 int GroupIndex = -1;
4482 bool ObjectFirst =
false;
4485 bool GroupFirst =
false;
4490 int NextGroupIndex = 0;
4491 std::vector<FrameObject> &Objects;
4494 GroupBuilder(std::vector<FrameObject> &Objects) : Objects(Objects) {}
4496 void EndCurrentGroup() {
4497 if (CurrentMembers.
size() > 1) {
4502 for (
int Index : CurrentMembers) {
4503 Objects[
Index].GroupIndex = NextGroupIndex;
4509 CurrentMembers.clear();
4513bool FrameObjectCompare(
const FrameObject &
A,
const FrameObject &
B) {
4531 return std::make_tuple(!
A.IsValid,
A.ObjectFirst,
A.GroupFirst,
A.GroupIndex,
4533 std::make_tuple(!
B.IsValid,
B.ObjectFirst,
B.GroupFirst,
B.GroupIndex,
4545 for (
auto &Obj : ObjectsToAllocate) {
4546 FrameObjects[Obj].IsValid =
true;
4547 FrameObjects[Obj].ObjectIndex = Obj;
4551 GroupBuilder GB(FrameObjects);
4552 for (
auto &
MBB : MF) {
4553 for (
auto &
MI :
MBB) {
4554 if (
MI.isDebugInstr())
4557 switch (
MI.getOpcode()) {
4558 case AArch64::STGloop:
4559 case AArch64::STZGloop:
4563 case AArch64::STZGi:
4564 case AArch64::ST2Gi:
4565 case AArch64::STZ2Gi:
4578 FrameObjects[FI].IsValid)
4586 GB.AddMember(TaggedFI);
4588 GB.EndCurrentGroup();
4591 GB.EndCurrentGroup();
4601 FrameObjects[*TBPI].ObjectFirst =
true;
4602 FrameObjects[*TBPI].GroupFirst =
true;
4603 int FirstGroupIndex = FrameObjects[*TBPI].GroupIndex;
4604 if (FirstGroupIndex >= 0)
4605 for (FrameObject &Object : FrameObjects)
4606 if (Object.GroupIndex == FirstGroupIndex)
4607 Object.GroupFirst =
true;
4613 for (
auto &Obj : FrameObjects) {
4617 ObjectsToAllocate[i++] = Obj.ObjectIndex;
4624 dbgs() <<
" " << Obj.ObjectIndex <<
": group " << Obj.GroupIndex;
4625 if (Obj.ObjectFirst)
4626 dbgs() <<
", first";
4628 dbgs() <<
", group-first";
4638AArch64FrameLowering::inlineStackProbeLoopExactMultiple(
4649 MF.
insert(MBBInsertPoint, LoopMBB);
4651 MF.
insert(MBBInsertPoint, ExitMBB);
4686 return ExitMBB->
begin();
4689void AArch64FrameLowering::inlineStackProbeFixed(
4702 int64_t NumBlocks = FrameSize / ProbeSize;
4703 int64_t ResidualSize = FrameSize % ProbeSize;
4705 LLVM_DEBUG(
dbgs() <<
"Stack probing: total " << FrameSize <<
" bytes, "
4706 << NumBlocks <<
" blocks of " << ProbeSize
4707 <<
" bytes, plus " << ResidualSize <<
" bytes\n");
4712 for (
int i = 0; i < NumBlocks; ++i) {
4718 EmitAsyncCFI && !HasFP, CFAOffset);
4727 }
else if (NumBlocks != 0) {
4733 EmitAsyncCFI && !HasFP, CFAOffset);
4735 MBBI = inlineStackProbeLoopExactMultiple(
MBBI, ProbeSize, ScratchReg);
4737 if (EmitAsyncCFI && !HasFP) {
4741 unsigned Reg =
RegInfo.getDwarfRegNum(AArch64::SP,
true);
4750 if (ResidualSize != 0) {
4756 EmitAsyncCFI && !HasFP, CFAOffset);
4775 if (
MI.getOpcode() == AArch64::PROBED_STACKALLOC ||
4776 MI.getOpcode() == AArch64::PROBED_STACKALLOC_VAR)
4780 if (
MI->getOpcode() == AArch64::PROBED_STACKALLOC) {
4781 Register ScratchReg =
MI->getOperand(0).getReg();
4782 int64_t FrameSize =
MI->getOperand(1).getImm();
4784 MI->getOperand(3).getImm());
4785 inlineStackProbeFixed(
MI->getIterator(), ScratchReg, FrameSize,
4788 assert(
MI->getOpcode() == AArch64::PROBED_STACKALLOC_VAR &&
4789 "Stack probe pseudo-instruction expected");
4792 Register TargetReg =
MI->getOperand(0).getReg();
4793 (void)
TII->probedStackAlloc(
MI->getIterator(), TargetReg,
true);
4795 MI->eraseFromParent();
unsigned const MachineRegisterInfo * MRI
for(const MachineOperand &MO :llvm::drop_begin(OldMI.operands(), Desc.getNumOperands()))
static int64_t getArgumentStackToRestore(MachineFunction &MF, MachineBasicBlock &MBB)
Returns how much of the incoming argument stack area (in bytes) we should clean up in an epilogue.
static void emitShadowCallStackEpilogue(const TargetInstrInfo &TII, MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL)
static void getLiveRegsForEntryMBB(LivePhysRegs &LiveRegs, const MachineBasicBlock &MBB)
static void emitCalleeSavedRestores(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, bool SVE)
static void computeCalleeSaveRegisterPairs(MachineFunction &MF, ArrayRef< CalleeSavedInfo > CSI, const TargetRegisterInfo *TRI, SmallVectorImpl< RegPairInfo > &RegPairs, bool NeedsFrameRecord)
static const unsigned DefaultSafeSPDisplacement
This is the biggest offset to the stack pointer we can encode in aarch64 instructions (without using ...
static void emitDefineCFAWithFP(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, unsigned FixedObject)
static bool needsWinCFI(const MachineFunction &MF)
static void insertCFISameValue(const MCInstrDesc &Desc, MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertPt, unsigned DwarfReg)
static cl::opt< bool > StackTaggingMergeSetTag("stack-tagging-merge-settag", cl::desc("merge settag instruction in function epilog"), cl::init(true), cl::Hidden)
bool requiresGetVGCall(MachineFunction &MF)
bool isVGInstruction(MachineBasicBlock::iterator MBBI)
static bool produceCompactUnwindFrame(MachineFunction &MF)
static int64_t determineSVEStackObjectOffsets(MachineFrameInfo &MFI, int &MinCSFrameIndex, int &MaxCSFrameIndex, bool AssignOffsets)
static cl::opt< bool > OrderFrameObjects("aarch64-order-frame-objects", cl::desc("sort stack allocations"), cl::init(true), cl::Hidden)
static bool windowsRequiresStackProbe(MachineFunction &MF, uint64_t StackSizeInBytes)
static void fixupCalleeSaveRestoreStackOffset(MachineInstr &MI, uint64_t LocalStackSize, bool NeedsWinCFI, bool *HasWinCFI)
static bool invalidateWindowsRegisterPairing(unsigned Reg1, unsigned Reg2, bool NeedsWinCFI, bool IsFirst, const TargetRegisterInfo *TRI)
static MachineBasicBlock::iterator convertCalleeSaveRestoreToSPPrePostIncDec(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, const TargetInstrInfo *TII, int CSStackSizeInc, bool NeedsWinCFI, bool *HasWinCFI, bool EmitCFI, MachineInstr::MIFlag FrameFlag=MachineInstr::FrameSetup, int CFAOffset=0)
static void fixupSEHOpcode(MachineBasicBlock::iterator MBBI, unsigned LocalStackSize)
static StackOffset getSVEStackSize(const MachineFunction &MF)
Returns the size of the entire SVE stackframe (calleesaves + spills).
static cl::opt< bool > EnableRedZone("aarch64-redzone", cl::desc("enable use of redzone on AArch64"), cl::init(false), cl::Hidden)
static MachineBasicBlock::iterator InsertSEH(MachineBasicBlock::iterator MBBI, const TargetInstrInfo &TII, MachineInstr::MIFlag Flag)
static Register findScratchNonCalleeSaveRegister(MachineBasicBlock *MBB)
static void getLivePhysRegsUpTo(MachineInstr &MI, const TargetRegisterInfo &TRI, LivePhysRegs &LiveRegs)
Collect live registers from the end of MI's parent up to (including) MI in LiveRegs.
cl::opt< bool > EnableHomogeneousPrologEpilog("homogeneous-prolog-epilog", cl::Hidden, cl::desc("Emit homogeneous prologue and epilogue for the size " "optimization (default = off)"))
MachineBasicBlock::iterator emitVGSaveRestore(MachineBasicBlock::iterator II, const AArch64FrameLowering *TFI)
static bool IsSVECalleeSave(MachineBasicBlock::iterator I)
static bool invalidateRegisterPairing(unsigned Reg1, unsigned Reg2, bool UsesWinAAPCS, bool NeedsWinCFI, bool NeedsFrameRecord, bool IsFirst, const TargetRegisterInfo *TRI)
Returns true if Reg1 and Reg2 cannot be paired using a ldp/stp instruction.
unsigned findFreePredicateReg(BitVector &SavedRegs)
static unsigned getPrologueDeath(MachineFunction &MF, unsigned Reg)
static StackOffset getFPOffset(const MachineFunction &MF, int64_t ObjectOffset)
static bool isTargetWindows(const MachineFunction &MF)
static StackOffset getStackOffset(const MachineFunction &MF, int64_t ObjectOffset)
static int64_t upperBound(StackOffset Size)
static unsigned estimateRSStackSizeLimit(MachineFunction &MF)
Look at each instruction that references stack frames and return the stack size limit beyond which so...
static bool getSVECalleeSaveSlotRange(const MachineFrameInfo &MFI, int &Min, int &Max)
returns true if there are any SVE callee saves.
static MCRegister getRegisterOrZero(MCRegister Reg, bool HasSVE)
static bool isFuncletReturnInstr(const MachineInstr &MI)
static void emitShadowCallStackPrologue(const TargetInstrInfo &TII, MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, bool NeedsWinCFI, bool NeedsUnwindInfo)
static unsigned getFixedObjectSize(const MachineFunction &MF, const AArch64FunctionInfo *AFI, bool IsWin64, bool IsFunclet)
Returns the size of the fixed object area (allocated next to sp on entry) On Win64 this may include a...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
static const int kSetTagLoopThreshold
This file contains the simple types necessary to represent the attributes associated with functions a...
#define CASE(ATTRNAME, AANAME,...)
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
Analysis containing CSE Info
static void clear(coro::Shape &Shape)
static const HTTPClientCleanup Cleanup
const HexagonInstrInfo * TII
This file implements the LivePhysRegs utility for tracking liveness of physical registers.
unsigned const TargetRegisterInfo * TRI
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
uint64_t IntrinsicInst * II
This file declares the machine register scavenger class.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
static const unsigned FramePtr
void processFunctionBeforeFrameIndicesReplaced(MachineFunction &MF, RegScavenger *RS) const override
processFunctionBeforeFrameIndicesReplaced - This method is called immediately before MO_FrameIndex op...
MachineBasicBlock::iterator eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator I) const override
This method is called during prolog/epilog code insertion to eliminate call frame setup and destroy p...
bool canUseAsPrologue(const MachineBasicBlock &MBB) const override
Check whether or not the given MBB can be used as a prologue for the target.
bool enableStackSlotScavenging(const MachineFunction &MF) const override
Returns true if the stack slot holes in the fixed and callee-save stack area should be used when allo...
bool spillCalleeSavedRegisters(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, ArrayRef< CalleeSavedInfo > CSI, const TargetRegisterInfo *TRI) const override
spillCalleeSavedRegisters - Issues instruction(s) to spill all callee saved registers and returns tru...
bool restoreCalleeSavedRegisters(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, MutableArrayRef< CalleeSavedInfo > CSI, const TargetRegisterInfo *TRI) const override
restoreCalleeSavedRegisters - Issues instruction(s) to restore all callee saved registers and returns...
StackOffset getNonLocalFrameIndexReference(const MachineFunction &MF, int FI) const override
getNonLocalFrameIndexReference - This method returns the offset used to reference a frame index locat...
TargetStackID::Value getStackIDForScalableVectors() const override
Returns the StackID that scalable vectors should be associated with.
bool hasFP(const MachineFunction &MF) const override
hasFP - Return true if the specified function should have a dedicated frame pointer register.
void emitPrologue(MachineFunction &MF, MachineBasicBlock &MBB) const override
emitProlog/emitEpilog - These methods insert prolog and epilog code into the function.
bool enableCFIFixup(MachineFunction &MF) const override
Returns true if we may need to fix the unwind information for the function.
void resetCFIToInitialState(MachineBasicBlock &MBB) const override
Emit CFI instructions that recreate the state of the unwind information upon fucntion entry.
bool hasReservedCallFrame(const MachineFunction &MF) const override
hasReservedCallFrame - Under normal circumstances, when a frame pointer is not required,...
bool canUseRedZone(const MachineFunction &MF) const
Can this function use the red zone for local allocations.
void processFunctionBeforeFrameFinalized(MachineFunction &MF, RegScavenger *RS) const override
processFunctionBeforeFrameFinalized - This method is called immediately before the specified function...
int getSEHFrameIndexOffset(const MachineFunction &MF, int FI) const
unsigned getWinEHFuncletFrameSize(const MachineFunction &MF) const
Funclets only need to account for space for the callee saved registers, as the locals are accounted f...
void orderFrameObjects(const MachineFunction &MF, SmallVectorImpl< int > &ObjectsToAllocate) const override
Order the symbols in the local stack frame.
void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const override
void determineCalleeSaves(MachineFunction &MF, BitVector &SavedRegs, RegScavenger *RS) const override
This method determines which of the registers reported by TargetRegisterInfo::getCalleeSavedRegs() sh...
StackOffset getFrameIndexReference(const MachineFunction &MF, int FI, Register &FrameReg) const override
getFrameIndexReference - Provide a base+offset reference to an FI slot for debug info.
StackOffset resolveFrameOffsetReference(const MachineFunction &MF, int64_t ObjectOffset, bool isFixed, bool isSVE, Register &FrameReg, bool PreferFP, bool ForSimm) const
bool assignCalleeSavedSpillSlots(MachineFunction &MF, const TargetRegisterInfo *TRI, std::vector< CalleeSavedInfo > &CSI, unsigned &MinCSFrameIndex, unsigned &MaxCSFrameIndex) const override
assignCalleeSavedSpillSlots - Allows target to override spill slot assignment logic.
StackOffset getFrameIndexReferencePreferSP(const MachineFunction &MF, int FI, Register &FrameReg, bool IgnoreSPUpdates) const override
For Win64 AArch64 EH, the offset to the Unwind object is from the SP before the update.
StackOffset resolveFrameIndexReference(const MachineFunction &MF, int FI, Register &FrameReg, bool PreferFP, bool ForSimm) const
unsigned getWinEHParentFrameOffset(const MachineFunction &MF) const override
The parent frame offset (aka dispFrame) is only used on X86_64 to retrieve the parent's frame pointer...
AArch64FunctionInfo - This class is derived from MachineFunctionInfo and contains private AArch64-spe...
bool needsShadowCallStackPrologueEpilogue(MachineFunction &MF) const
void setSwiftAsyncContextFrameIdx(int FI)
unsigned getTailCallReservedStack() const
unsigned getCalleeSavedStackSize(const MachineFrameInfo &MFI) const
void setCalleeSaveBaseToFrameRecordOffset(int Offset)
bool hasStackProbing() const
unsigned getArgumentStackToRestore() const
void setLocalStackSize(uint64_t Size)
void setVGIdx(unsigned Idx)
int getCalleeSaveBaseToFrameRecordOffset() const
bool hasStreamingModeChanges() const
bool shouldSignReturnAddress(const MachineFunction &MF) const
void setPredicateRegForFillSpill(unsigned Reg)
void setStreamingVGIdx(unsigned FrameIdx)
int64_t getStackProbeSize() const
uint64_t getStackSizeSVE() const
void setHasRedZone(bool s)
bool hasStackFrame() const
std::optional< int > getTaggedBasePointerIndex() const
uint64_t getLocalStackSize() const
void setStackRealigned(bool s)
bool needsDwarfUnwindInfo(const MachineFunction &MF) const
unsigned getVarArgsGPRSize() const
void setStackSizeSVE(uint64_t S)
bool isStackRealigned() const
bool hasSwiftAsyncContext() const
void setTaggedBasePointerOffset(unsigned Offset)
unsigned getPredicateRegForFillSpill() const
unsigned getSVECalleeSavedStackSize() const
bool needsAsyncDwarfUnwindInfo(const MachineFunction &MF) const
int64_t getStreamingVGIdx() const
void setMinMaxSVECSFrameIndex(int Min, int Max)
bool hasCalleeSaveStackFreeSpace() const
static bool isTailCallReturnInst(const MachineInstr &MI)
Returns true if MI is one of the TCRETURN* instructions.
static bool isSEHInstruction(const MachineInstr &MI)
Return true if the instructions is a SEH instruciton used for unwinding on Windows.
bool isReservedReg(const MachineFunction &MF, MCRegister Reg) const
bool hasBasePointer(const MachineFunction &MF) const
bool cannotEliminateFrame(const MachineFunction &MF) const
unsigned getBaseRegister() const
bool isTargetWindows() const
const AArch64RegisterInfo * getRegisterInfo() const override
bool isNeonAvailable() const
Returns true if the target has NEON and the function at runtime is known to have NEON enabled (e....
const AArch64InstrInfo * getInstrInfo() const override
bool isTargetILP32() const
const AArch64TargetLowering * getTargetLowering() const override
bool isTargetMachO() const
const Triple & getTargetTriple() const
bool isCallingConvWin64(CallingConv::ID CC) const
const char * getChkStkName() const
bool swiftAsyncContextIsDynamicallySet() const
Return whether FrameLowering should always set the "extended frame present" bit in FP,...
bool hasInlineStackProbe(const MachineFunction &MF) const override
True if stack clash protection is enabled for this functions.
unsigned getRedZoneSize(const Function &F) const
bool supportSwiftError() const override
Return true if the target supports swifterror attribute.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
bool empty() const
empty - Check if the array is empty.
bool hasAttrSomewhere(Attribute::AttrKind Kind, unsigned *Index=nullptr) const
Return true if the specified attribute is set for at least one parameter or for the return value.
bool test(unsigned Idx) const
size_type count() const
count - Returns the number of bits which are set.
iterator_range< const_set_bits_iterator > set_bits() const
The CalleeSavedInfo class tracks the information need to locate where a callee saved register is in t...
bool hasOptSize() const
Optimize this function for size (-Os) or minimum size (-Oz).
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
AttributeList getAttributes() const
Return the attribute list for this Function.
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, const DebugLoc &DL, MCRegister DestReg, MCRegister SrcReg, bool KillSrc) const override
Emit instructions to copy a pair of physical registers.
A set of physical registers with utility functions to track liveness when walking backward/forward th...
bool available(const MachineRegisterInfo &MRI, MCPhysReg Reg) const
Returns true if register Reg and no aliasing register is in the set.
void stepBackward(const MachineInstr &MI)
Simulates liveness when stepping backwards over an instruction(bundle).
void removeReg(MCPhysReg Reg)
Removes a physical register, all its sub-registers, and all its super-registers from the set.
void addLiveIns(const MachineBasicBlock &MBB)
Adds all live-in registers of basic block MBB.
void addLiveOuts(const MachineBasicBlock &MBB)
Adds all live-out registers of basic block MBB.
void addReg(MCPhysReg Reg)
Adds a physical register and all its sub-registers to the set.
bool usesWindowsCFI() const
static MCCFIInstruction createDefCfaRegister(MCSymbol *L, unsigned Register, SMLoc Loc={})
.cfi_def_cfa_register modifies a rule for computing CFA.
static MCCFIInstruction createOffset(MCSymbol *L, unsigned Register, int Offset, SMLoc Loc={})
.cfi_offset Previous value of Register is saved at offset Offset from CFA.
static MCCFIInstruction cfiDefCfaOffset(MCSymbol *L, int Offset, SMLoc Loc={})
.cfi_def_cfa_offset modifies a rule for computing CFA.
static MCCFIInstruction createRestore(MCSymbol *L, unsigned Register, SMLoc Loc={})
.cfi_restore says that the rule for Register is now the same as it was at the beginning of the functi...
static MCCFIInstruction createNegateRAState(MCSymbol *L, SMLoc Loc={})
.cfi_negate_ra_state AArch64 negate RA state.
static MCCFIInstruction cfiDefCfa(MCSymbol *L, unsigned Register, int Offset, SMLoc Loc={})
.cfi_def_cfa defines a rule for computing CFA as: take address from Register and add Offset to it.
static MCCFIInstruction createEscape(MCSymbol *L, StringRef Vals, SMLoc Loc={}, StringRef Comment="")
.cfi_escape Allows the user to add arbitrary bytes to the unwind info.
static MCCFIInstruction createSameValue(MCSymbol *L, unsigned Register, SMLoc Loc={})
.cfi_same_value Current value of Register is the same as in the previous frame.
MCSymbol * createTempSymbol()
Create a temporary symbol with a unique name.
Describe properties that are true of each instruction in the target description file.
Wrapper class representing physical registers. Should be passed by value.
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
instr_iterator instr_begin()
iterator_range< livein_iterator > liveins() const
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
bool isLiveIn(MCPhysReg Reg, LaneBitmask LaneMask=LaneBitmask::getAll()) const
Return true if the specified register is in the live in set.
bool isEHFuncletEntry() const
Returns true if this is the entry block of an EH funclet.
iterator getFirstTerminator()
Returns an iterator to the first terminator instruction of this basic block.
MachineInstr & instr_back()
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
DebugLoc findDebugLoc(instr_iterator MBBI)
Find the next valid DebugLoc starting at MBBI, skipping any debug instructions.
iterator getLastNonDebugInstr(bool SkipPseudoOp=true)
Returns an iterator to the last non-debug instruction in the basic block, or end().
instr_iterator instr_end()
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
instr_iterator erase(instr_iterator I)
Remove an instruction from the instruction list and delete it.
reverse_iterator rbegin()
iterator insertAfter(iterator I, MachineInstr *MI)
Insert MI into the instruction list after I.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
bool hasVarSizedObjects() const
This method may be called any time after instruction selection is complete to determine if the stack ...
uint64_t getStackSize() const
Return the number of bytes that must be allocated to hold all of the fixed size frame objects.
int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it.
bool hasCalls() const
Return true if the current function has any function calls.
bool isFrameAddressTaken() const
This method may be called any time after instruction selection is complete to determine if there is a...
Align getMaxAlign() const
Return the alignment in bytes that this function must be aligned to, which is greater than the defaul...
void setObjectOffset(int ObjectIdx, int64_t SPOffset)
Set the stack frame offset of the specified object.
bool hasPatchPoint() const
This method may be called any time after instruction selection is complete to determine if there is a...
int getStackProtectorIndex() const
Return the index for the stack protector object.
uint64_t estimateStackSize(const MachineFunction &MF) const
Estimate and return the size of the stack frame.
void setStackID(int ObjectIdx, uint8_t ID)
bool isCalleeSavedInfoValid() const
Has the callee saved info been calculated yet?
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
bool isMaxCallFrameSizeComputed() const
bool hasStackMap() const
This method may be called any time after instruction selection is complete to determine if there is a...
const std::vector< CalleeSavedInfo > & getCalleeSavedInfo() const
Returns a reference to call saved info vector for the current function.
unsigned getMaxCallFrameSize() const
Return the maximum size of a call frame that must be allocated for an outgoing function call.
int getObjectIndexEnd() const
Return one past the maximum frame object index.
bool hasStackProtectorIndex() const
uint8_t getStackID(int ObjectIdx) const
int64_t getObjectOffset(int ObjectIdx) const
Return the assigned stack offset of the specified object from the incoming stack pointer.
int getObjectIndexBegin() const
Return the minimum frame object index.
bool isDeadObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to a dead object.
const WinEHFuncInfo * getWinEHFuncInfo() const
getWinEHFuncInfo - Return information about how the current function uses Windows exception handling.
unsigned addFrameInst(const MCCFIInstruction &Inst)
void setHasWinCFI(bool v)
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
const LLVMTargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
MachineModuleInfo & getMMI() const
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const MachineBasicBlock & front() const
bool hasEHFunclets() const
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *BB=nullptr, std::optional< UniqueBBID > BBID=std::nullopt)
CreateMachineBasicBlock - Allocate a new MachineBasicBlock.
void insert(iterator MBBI, MachineBasicBlock *MBB)
const MachineInstrBuilder & setMemRefs(ArrayRef< MachineMemOperand * > MMOs) const
const MachineInstrBuilder & addExternalSymbol(const char *FnName, unsigned TargetFlags=0) const
const MachineInstrBuilder & addCFIIndex(unsigned CFIIndex) const
const MachineInstrBuilder & setMIFlag(MachineInstr::MIFlag Flag) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addRegMask(const uint32_t *Mask) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & setMIFlags(unsigned Flags) const
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
void setFlags(unsigned flags)
void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
uint32_t getFlags() const
Return the MI flags bitvector.
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
This class contains meta information specific to a module.
const MCContext & getContext() const
MachineOperand class - Representation of each machine instruction operand.
void setImm(int64_t immVal)
static MachineOperand CreateImm(int64_t Val)
bool isFI() const
isFI - Tests if this is a MO_FrameIndex operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
bool isLiveIn(Register Reg) const
const MCPhysReg * getCalleeSavedRegs() const
Returns list of callee saved registers.
bool isPhysRegUsed(MCRegister PhysReg, bool SkipRegMaskTest=false) const
Return true if the specified register is modified or read in this function.
MutableArrayRef - Represent a mutable reference to an array (0 or more elements consecutively in memo...
void enterBasicBlockEnd(MachineBasicBlock &MBB)
Start tracking liveness from the end of basic block MBB.
Register FindUnusedReg(const TargetRegisterClass *RC) const
Find an unused register of the specified register class.
void backward()
Update internal register state and move MBB iterator backwards.
void addScavengingFrameIndex(int FI)
Add a scavenging frame index.
Wrapper class representing virtual and physical registers.
SMEAttrs is a utility class to parse the SME ACLE attributes on functions.
bool hasStreamingInterface() const
bool hasStreamingBody() const
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StackOffset holds a fixed and a scalable offset in bytes.
int64_t getFixed() const
Returns the fixed component of the stack.
int64_t getScalable() const
Returns the scalable component of the stack.
static StackOffset get(int64_t Fixed, int64_t Scalable)
static StackOffset getScalable(int64_t Scalable)
static StackOffset getFixed(int64_t Fixed)
StringRef - Represent a constant reference to a string, i.e.
virtual void determineCalleeSaves(MachineFunction &MF, BitVector &SavedRegs, RegScavenger *RS=nullptr) const
This method determines which of the registers reported by TargetRegisterInfo::getCalleeSavedRegs() sh...
int getOffsetOfLocalArea() const
getOffsetOfLocalArea - This method returns the offset of the local area from the stack pointer on ent...
Align getStackAlign() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
StackDirection getStackGrowthDirection() const
getStackGrowthDirection - Return the direction the stack grows
virtual bool enableCFIFixup(MachineFunction &MF) const
Returns true if we may need to fix the unwind information for the function.
TargetInstrInfo - Interface to description of machine instruction set.
CodeModel::Model getCodeModel() const
Returns the code model.
const MCAsmInfo * getMCAsmInfo() const
Return target specific asm information.
SwiftAsyncFramePointerMode SwiftAsyncFramePointer
Control when and how the Swift async frame pointer bit should be set.
bool DisableFramePointerElim(const MachineFunction &MF) const
DisableFramePointerElim - This returns true if frame pointer elimination optimization should be disab...
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
const TargetRegisterClass * getMinimalPhysRegClass(MCRegister Reg, MVT VT=MVT::Other) const
Returns the Register Class of a physical register of the given type, picking the most sub register cl...
Align getSpillAlign(const TargetRegisterClass &RC) const
Return the minimum required alignment in bytes for a spill slot for a register of this class.
bool hasStackRealignment(const MachineFunction &MF) const
True if stack realignment is required and still possible.
unsigned getSpillSize(const TargetRegisterClass &RC) const
Return the size in bytes of the stack slot allocated to hold a spilled copy of a register from class ...
TargetSubtargetInfo - Generic base class for all target subtargets.
virtual const TargetRegisterInfo * getRegisterInfo() const
getRegisterInfo - If register information is available, return it.
virtual const TargetInstrInfo * getInstrInfo() const
StringRef getArchName() const
Get the architecture (first) component of the triple.
static constexpr TypeSize getFixed(ScalarTy ExactSize)
The instances of the Type class are immutable: once they are created, they are never changed.
self_iterator getIterator()
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ MO_GOT
MO_GOT - This flag indicates that a symbol operand represents the address of the GOT entry for the sy...
static unsigned getShiftValue(unsigned Imm)
getShiftValue - Extract the shift value.
static unsigned getArithExtendImm(AArch64_AM::ShiftExtendType ET, unsigned Imm)
getArithExtendImm - Encode the extend type and shift amount for an arithmetic instruction: imm: 3-bit...
static uint64_t encodeLogicalImmediate(uint64_t imm, unsigned regSize)
encodeLogicalImmediate - Return the encoded immediate value for a logical immediate instruction of th...
static unsigned getShifterImm(AArch64_AM::ShiftExtendType ST, unsigned Imm)
getShifterImm - Encode the shift type and amount: imm: 6-bit shift amount shifter: 000 ==> lsl 001 ==...
const unsigned StackProbeMaxLoopUnroll
Maximum number of iterations to unroll for a constant size probing loop.
const unsigned StackProbeMaxUnprobedStack
Maximum allowed number of unprobed bytes above SP at an ABI boundary.
@ AArch64_SVE_VectorCall
Used between AArch64 SVE functions.
@ PreserveMost
Used for runtime calls that preserves most registers.
@ CXX_FAST_TLS
Used for access functions.
@ GHC
Used by the Glasgow Haskell Compiler (GHC).
@ AArch64_SME_ABI_Support_Routines_PreserveMost_From_X1
Preserve X1-X15, X19-X29, SP, Z0-Z31, P0-P15.
@ PreserveAll
Used for runtime calls that preserves (almost) all registers.
@ PreserveNone
Used for runtime calls that preserves none general registers.
@ Win64
The C convention as implemented on Windows/x86-64 and AArch64.
@ SwiftTail
This follows the Swift calling convention in how arguments are passed but guarantees tail calls will ...
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Define
Register definition.
@ Kill
The last use of a register.
@ Undef
Value of the register doesn't matter.
Reg
All possible values of the reg field in the ModR/M byte.
initializer< Ty > init(const Ty &Val)
NodeAddr< InstrNode * > Instr
This is an optimization pass for GlobalISel generic memory operations.
void stable_sort(R &&Range)
MCCFIInstruction createDefCFA(const TargetRegisterInfo &TRI, unsigned FrameReg, unsigned Reg, const StackOffset &Offset, bool LastAdjustmentWasScalable=true)
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
int isAArch64FrameOffsetLegal(const MachineInstr &MI, StackOffset &Offset, bool *OutUseUnscaledOp=nullptr, unsigned *OutUnscaledOp=nullptr, int64_t *EmittableOffset=nullptr)
Check if the Offset is a valid frame offset for MI.
detail::scope_exit< std::decay_t< Callable > > make_scope_exit(Callable &&F)
MCCFIInstruction createCFAOffset(const TargetRegisterInfo &MRI, unsigned Reg, const StackOffset &OffsetFromDefCFA)
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
unsigned getBLRCallOpcode(const MachineFunction &MF)
Return opcode to be used for indirect calls.
@ AArch64FrameOffsetCannotUpdate
Offset cannot apply.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
auto reverse(ContainerTy &&C)
@ Always
Always set the bit.
@ Never
Never set the bit.
@ DeploymentBased
Determine whether to set the bit statically or dynamically based on the deployment target.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
void emitFrameOffset(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, unsigned DestReg, unsigned SrcReg, StackOffset Offset, const TargetInstrInfo *TII, MachineInstr::MIFlag=MachineInstr::NoFlags, bool SetNZCV=false, bool NeedsWinCFI=false, bool *HasWinCFI=nullptr, bool EmitCFAOffset=false, StackOffset InitialOffset={}, unsigned FrameReg=AArch64::SP)
emitFrameOffset - Emit instructions as needed to set DestReg to SrcReg plus Offset.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
unsigned getDefRegState(bool B)
unsigned getKillRegState(bool B)
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
bool isAsynchronousEHPersonality(EHPersonality Pers)
Returns true if this personality function catches asynchronous exceptions.
void fullyRecomputeLiveIns(ArrayRef< MachineBasicBlock * > MBBs)
Convenience function for recomputing live-in's for a set of MBBs until the computation converges.
Printable printReg(Register Reg, const TargetRegisterInfo *TRI=nullptr, unsigned SubIdx=0, const MachineRegisterInfo *MRI=nullptr)
Prints virtual and physical registers with or without a TRI instance.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
This struct is a compact representation of a valid (non-zero power of two) alignment.
uint64_t value() const
This is a hole in the type system and should not be abused.
Description of the encoding of one expression Op.
Pair of physical register and lane mask.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.