255#define DEBUG_TYPE "frame-info"
258 cl::desc(
"enable use of redzone on AArch64"),
262 "stack-tagging-merge-settag",
272 cl::desc(
"Emit homogeneous prologue and epilogue for the size "
273 "optimization (default = off)"));
283STATISTIC(NumRedZoneFunctions,
"Number of functions using red zone");
299 int64_t ArgumentPopSize = 0;
300 if (IsTailCallReturn) {
306 ArgumentPopSize = StackAdjust.
getImm();
315 return ArgumentPopSize;
326bool AArch64FrameLowering::homogeneousPrologEpilog(
351 if (AFI->hasSwiftAsyncContext() || AFI->hasStreamingModeChanges())
358 unsigned NumGPRs = 0;
359 for (
unsigned I = 0; CSRegs[
I]; ++
I) {
361 if (Reg == AArch64::LR) {
362 assert(CSRegs[
I + 1] == AArch64::FP);
363 if (NumGPRs % 2 != 0)
367 if (AArch64::GPR64RegClass.
contains(Reg))
375bool AArch64FrameLowering::producePairRegisters(
MachineFunction &MF)
const {
394 if (
MI.isDebugInstr() ||
MI.isPseudo() ||
395 MI.getOpcode() == AArch64::ADDXri ||
396 MI.getOpcode() == AArch64::ADDSXri)
423 if (!IsWin64 || IsFunclet) {
428 Attribute::SwiftAsync))
433 const unsigned UnwindHelpObject = (MF.
hasEHFunclets() ? 8 : 0);
435 alignTo(VarArgsArea + UnwindHelpObject, 16);
452 const unsigned RedZoneSize =
465 bool LowerQRegCopyThroughMem = Subtarget.hasFPARMv8() &&
469 return !(MFI.
hasCalls() ||
hasFP(MF) || NumBytes > RedZoneSize ||
530 unsigned Opc =
I->getOpcode();
531 bool IsDestroy = Opc ==
TII->getCallFrameDestroyOpcode();
532 uint64_t CalleePopAmount = IsDestroy ?
I->getOperand(1).getImm() : 0;
535 int64_t Amount =
I->getOperand(0).getImm();
543 if (CalleePopAmount == 0) {
554 assert(Amount > -0xffffff && Amount < 0xffffff &&
"call frame too large");
565 "non-reserved call frame without var sized objects?");
574 }
else if (CalleePopAmount != 0) {
577 assert(CalleePopAmount < 0xffffff &&
"call frame too large");
584void AArch64FrameLowering::emitCalleeSavedGPRLocations(
590 bool LocallyStreaming =
591 Attrs.hasStreamingBody() && !Attrs.hasStreamingInterface();
602 for (
const auto &Info : CSI) {
603 unsigned FrameIdx =
Info.getFrameIdx();
607 assert(!
Info.isSpilledToReg() &&
"Spilling to registers not implemented");
608 int64_t DwarfReg =
TRI.getDwarfRegNum(
Info.getReg(),
true);
615 (!LocallyStreaming &&
616 DwarfReg ==
TRI.getDwarfRegNum(AArch64::VG,
true)))
627void AArch64FrameLowering::emitCalleeSavedSVELocations(
643 for (
const auto &Info : CSI) {
649 assert(!
Info.isSpilledToReg() &&
"Spilling to registers not implemented");
684 const MCInstrDesc &CFIDesc =
TII.get(TargetOpcode::CFI_INSTRUCTION);
690 nullptr,
TRI.getDwarfRegNum(AArch64::SP,
true), 0));
694 if (MFI.shouldSignReturnAddress(MF)) {
700 if (MFI.needsShadowCallStackPrologueEpilogue(MF))
702 TRI.getDwarfRegNum(AArch64::X18,
true));
705 const std::vector<CalleeSavedInfo> &CSI =
707 for (
const auto &
Info : CSI) {
708 unsigned Reg =
Info.getReg();
709 if (!
TRI.regNeedsCFI(Reg, Reg))
712 TRI.getDwarfRegNum(Reg,
true));
731 for (
const auto &
Info : CSI) {
736 unsigned Reg =
Info.getReg();
741 if (!
Info.isRestored())
745 nullptr,
TRI.getDwarfRegNum(
Info.getReg(),
true)));
752void AArch64FrameLowering::emitCalleeSavedGPRRestores(
757void AArch64FrameLowering::emitCalleeSavedSVERestores(
765 static const int64_t MAX_BYTES_PER_SCALABLE_BYTE = 16;
766 return Size.getScalable() * MAX_BYTES_PER_SCALABLE_BYTE +
Size.getFixed();
769void AArch64FrameLowering::allocateStackSpace(
771 int64_t RealignmentPadding,
StackOffset AllocSize,
bool NeedsWinCFI,
772 bool *HasWinCFI,
bool EmitCFI,
StackOffset InitialOffset,
773 bool FollowupAllocs)
const {
786 const uint64_t AndMask = ~(MaxAlign - 1);
789 Register TargetReg = RealignmentPadding
795 EmitCFI, InitialOffset);
797 if (RealignmentPadding) {
818 if (AllocSize.
getScalable() == 0 && RealignmentPadding == 0) {
820 assert(ScratchReg != AArch64::NoRegister);
830 if (FollowupAllocs) {
847 if (
upperBound(AllocSize) + RealignmentPadding <= ProbeSize) {
848 Register ScratchReg = RealignmentPadding
851 assert(ScratchReg != AArch64::NoRegister);
855 EmitCFI, InitialOffset);
856 if (RealignmentPadding) {
864 if (FollowupAllocs ||
upperBound(AllocSize) + RealignmentPadding >
880 assert(TargetReg != AArch64::NoRegister);
884 EmitCFI, InitialOffset);
885 if (RealignmentPadding) {
905 if (RealignmentPadding)
918 case AArch64::W##n: \
919 case AArch64::X##n: \
944 case AArch64::B##n: \
945 case AArch64::H##n: \
946 case AArch64::S##n: \
947 case AArch64::D##n: \
948 case AArch64::Q##n: \
949 return HasSVE ? AArch64::Z##n : AArch64::Q##n
986void AArch64FrameLowering::emitZeroCallUsedRegs(
BitVector RegsToZero,
1002 bool HasSVE = STI.hasSVE();
1004 if (
TRI.isGeneralPurposeRegister(MF, Reg)) {
1007 GPRsToZero.set(XReg);
1011 FPRsToZero.set(XReg);
1027 {AArch64::P0, AArch64::P1, AArch64::P2, AArch64::P3, AArch64::P4,
1028 AArch64::P5, AArch64::P6, AArch64::P7, AArch64::P8, AArch64::P9,
1029 AArch64::P10, AArch64::P11, AArch64::P12, AArch64::P13, AArch64::P14,
1031 if (RegsToZero[PReg])
1043 for (
unsigned i = 0; CSRegs[i]; ++i)
1044 LiveRegs.
addReg(CSRegs[i]);
1078 for (
unsigned Reg : AArch64::GPR64RegClass) {
1082 return AArch64::NoRegister;
1128 StackSizeInBytes >=
uint64_t(MFI.getStackProbeSize());
1134 F.needsUnwindTableEntry();
1137bool AArch64FrameLowering::shouldCombineCSRLocalStackBump(
1143 if (homogeneousPrologEpilog(MF))
1166 if (MFI.hasVarSizedObjects())
1169 if (
RegInfo->hasStackRealignment(MF))
1186bool AArch64FrameLowering::shouldCombineCSRLocalStackBumpInEpilogue(
1188 if (!shouldCombineCSRLocalStackBump(*
MBB.
getParent(), StackBumpBytes))
1198 while (LastI != Begin) {
1200 if (LastI->isTransient())
1205 switch (LastI->getOpcode()) {
1206 case AArch64::STGloop:
1207 case AArch64::STZGloop:
1209 case AArch64::STZGi:
1210 case AArch64::ST2Gi:
1211 case AArch64::STZ2Gi:
1224 unsigned Opc =
MBBI->getOpcode();
1228 unsigned ImmIdx =
MBBI->getNumOperands() - 1;
1229 int Imm =
MBBI->getOperand(ImmIdx).getImm();
1237 case AArch64::LDPDpost:
1240 case AArch64::STPDpre: {
1241 unsigned Reg0 =
RegInfo->getSEHRegNum(
MBBI->getOperand(1).getReg());
1242 unsigned Reg1 =
RegInfo->getSEHRegNum(
MBBI->getOperand(2).getReg());
1243 MIB =
BuildMI(MF,
DL,
TII.get(AArch64::SEH_SaveFRegP_X))
1250 case AArch64::LDPXpost:
1253 case AArch64::STPXpre: {
1256 if (Reg0 == AArch64::FP && Reg1 == AArch64::LR)
1257 MIB =
BuildMI(MF,
DL,
TII.get(AArch64::SEH_SaveFPLR_X))
1261 MIB =
BuildMI(MF,
DL,
TII.get(AArch64::SEH_SaveRegP_X))
1268 case AArch64::LDRDpost:
1271 case AArch64::STRDpre: {
1272 unsigned Reg =
RegInfo->getSEHRegNum(
MBBI->getOperand(1).getReg());
1273 MIB =
BuildMI(MF,
DL,
TII.get(AArch64::SEH_SaveFReg_X))
1279 case AArch64::LDRXpost:
1282 case AArch64::STRXpre: {
1283 unsigned Reg =
RegInfo->getSEHRegNum(
MBBI->getOperand(1).getReg());
1290 case AArch64::STPDi:
1291 case AArch64::LDPDi: {
1292 unsigned Reg0 =
RegInfo->getSEHRegNum(
MBBI->getOperand(0).getReg());
1293 unsigned Reg1 =
RegInfo->getSEHRegNum(
MBBI->getOperand(1).getReg());
1301 case AArch64::STPXi:
1302 case AArch64::LDPXi: {
1305 if (Reg0 == AArch64::FP && Reg1 == AArch64::LR)
1317 case AArch64::STRXui:
1318 case AArch64::LDRXui: {
1319 int Reg =
RegInfo->getSEHRegNum(
MBBI->getOperand(0).getReg());
1326 case AArch64::STRDui:
1327 case AArch64::LDRDui: {
1328 unsigned Reg =
RegInfo->getSEHRegNum(
MBBI->getOperand(0).getReg());
1335 case AArch64::STPQi:
1336 case AArch64::LDPQi: {
1337 unsigned Reg0 =
RegInfo->getSEHRegNum(
MBBI->getOperand(0).getReg());
1338 unsigned Reg1 =
RegInfo->getSEHRegNum(
MBBI->getOperand(1).getReg());
1339 MIB =
BuildMI(MF,
DL,
TII.get(AArch64::SEH_SaveAnyRegQP))
1346 case AArch64::LDPQpost:
1349 case AArch64::STPQpre: {
1350 unsigned Reg0 =
RegInfo->getSEHRegNum(
MBBI->getOperand(1).getReg());
1351 unsigned Reg1 =
RegInfo->getSEHRegNum(
MBBI->getOperand(2).getReg());
1352 MIB =
BuildMI(MF,
DL,
TII.get(AArch64::SEH_SaveAnyRegQPX))
1366 unsigned LocalStackSize) {
1368 unsigned ImmIdx =
MBBI->getNumOperands() - 1;
1369 switch (
MBBI->getOpcode()) {
1372 case AArch64::SEH_SaveFPLR:
1373 case AArch64::SEH_SaveRegP:
1374 case AArch64::SEH_SaveReg:
1375 case AArch64::SEH_SaveFRegP:
1376 case AArch64::SEH_SaveFReg:
1377 case AArch64::SEH_SaveAnyRegQP:
1378 case AArch64::SEH_SaveAnyRegQPX:
1379 ImmOpnd = &
MBBI->getOperand(ImmIdx);
1393 unsigned Opc =
MBBI->getOpcode();
1394 if (Opc == AArch64::CNTD_XPiI || Opc == AArch64::RDSVLI_XI ||
1395 Opc == AArch64::UBFMXri)
1399 if (Opc == AArch64::ORRXrr)
1402 if (Opc == AArch64::BL) {
1403 auto Op1 =
MBBI->getOperand(0);
1404 return Op1.isSymbol() &&
1405 (
StringRef(Op1.getSymbolName()) ==
"__arm_get_current_vg");
1418 bool NeedsWinCFI,
bool *HasWinCFI,
bool EmitCFI,
1420 int CFAOffset = 0) {
1433 switch (
MBBI->getOpcode()) {
1436 case AArch64::STPXi:
1437 NewOpc = AArch64::STPXpre;
1439 case AArch64::STPDi:
1440 NewOpc = AArch64::STPDpre;
1442 case AArch64::STPQi:
1443 NewOpc = AArch64::STPQpre;
1445 case AArch64::STRXui:
1446 NewOpc = AArch64::STRXpre;
1448 case AArch64::STRDui:
1449 NewOpc = AArch64::STRDpre;
1451 case AArch64::STRQui:
1452 NewOpc = AArch64::STRQpre;
1454 case AArch64::LDPXi:
1455 NewOpc = AArch64::LDPXpost;
1457 case AArch64::LDPDi:
1458 NewOpc = AArch64::LDPDpost;
1460 case AArch64::LDPQi:
1461 NewOpc = AArch64::LDPQpost;
1463 case AArch64::LDRXui:
1464 NewOpc = AArch64::LDRXpost;
1466 case AArch64::LDRDui:
1467 NewOpc = AArch64::LDRDpost;
1469 case AArch64::LDRQui:
1470 NewOpc = AArch64::LDRQpost;
1475 auto SEH = std::next(
MBBI);
1477 SEH->eraseFromParent();
1481 int64_t MinOffset, MaxOffset;
1483 NewOpc, Scale, Width, MinOffset, MaxOffset);
1489 if (
MBBI->getOperand(
MBBI->getNumOperands() - 1).getImm() != 0 ||
1490 CSStackSizeInc < MinOffset || CSStackSizeInc > MaxOffset) {
1497 false,
false,
nullptr, EmitCFI,
1500 return std::prev(
MBBI);
1507 unsigned OpndIdx = 0;
1508 for (
unsigned OpndEnd =
MBBI->getNumOperands() - 1; OpndIdx < OpndEnd;
1510 MIB.
add(
MBBI->getOperand(OpndIdx));
1512 assert(
MBBI->getOperand(OpndIdx).getImm() == 0 &&
1513 "Unexpected immediate offset in first/last callee-save save/restore "
1515 assert(
MBBI->getOperand(OpndIdx - 1).getReg() == AArch64::SP &&
1516 "Unexpected base register in callee-save save/restore instruction!");
1517 assert(CSStackSizeInc % Scale == 0);
1518 MIB.
addImm(CSStackSizeInc / (
int)Scale);
1549 unsigned Opc =
MI.getOpcode();
1552 case AArch64::STPXi:
1553 case AArch64::STRXui:
1554 case AArch64::STPDi:
1555 case AArch64::STRDui:
1556 case AArch64::LDPXi:
1557 case AArch64::LDRXui:
1558 case AArch64::LDPDi:
1559 case AArch64::LDRDui:
1562 case AArch64::STPQi:
1563 case AArch64::STRQui:
1564 case AArch64::LDPQi:
1565 case AArch64::LDRQui:
1572 unsigned OffsetIdx =
MI.getNumExplicitOperands() - 1;
1573 assert(
MI.getOperand(OffsetIdx - 1).getReg() == AArch64::SP &&
1574 "Unexpected base register in callee-save save/restore instruction!");
1578 assert(LocalStackSize % Scale == 0);
1579 OffsetOpnd.
setImm(OffsetOpnd.
getImm() + LocalStackSize / Scale);
1584 assert(
MBBI !=
MI.getParent()->end() &&
"Expecting a valid instruction");
1586 "Expecting a SEH instruction");
1597 switch (
I->getOpcode()) {
1600 case AArch64::PTRUE_C_B:
1601 case AArch64::LD1B_2Z_IMM:
1602 case AArch64::ST1B_2Z_IMM:
1603 case AArch64::STR_ZXI:
1604 case AArch64::STR_PXI:
1605 case AArch64::LDR_ZXI:
1606 case AArch64::LDR_PXI:
1617 bool NeedsUnwindInfo) {
1633 if (NeedsUnwindInfo) {
1636 static const char CFIInst[] = {
1637 dwarf::DW_CFA_val_expression,
1640 static_cast<char>(
unsigned(dwarf::DW_OP_breg18)),
1641 static_cast<char>(-8) & 0x7f,
1644 nullptr,
StringRef(CFIInst,
sizeof(CFIInst))));
1682 const int OffsetToFirstCalleeSaveFromFP =
1686 unsigned Reg =
TRI->getDwarfRegNum(
FramePtr,
true);
1688 nullptr, Reg, FixedObject - OffsetToFirstCalleeSaveFromFP));
1721 bool HasFP =
hasFP(MF);
1723 bool HasWinCFI =
false;
1732 while (NonFrameStart !=
End &&
1737 if (NonFrameStart !=
MBB.
end()) {
1753 if (NonFrameStart ==
MBB.
end())
1758 for (auto &Op : MI.operands())
1759 if (Op.isReg() && Op.isDef())
1760 assert(!LiveRegs.contains(Op.getReg()) &&
1761 "live register clobbered by inserted prologue instructions");
1778 if (MFnI.needsShadowCallStackPrologueEpilogue(MF))
1780 MFnI.needsDwarfUnwindInfo(MF));
1782 if (MFnI.shouldSignReturnAddress(MF)) {
1789 if (EmitCFI && MFnI.isMTETagged()) {
1867 assert(!HasFP &&
"unexpected function without stack frame but with FP");
1869 "unexpected function without stack frame but with SVE objects");
1878 ++NumRedZoneFunctions;
1911 bool CombineSPBump = shouldCombineCSRLocalStackBump(MF, NumBytes);
1912 bool HomPrologEpilog = homogeneousPrologEpilog(MF);
1913 if (CombineSPBump) {
1914 assert(!SVEStackSize &&
"Cannot combine SP bump with SVE");
1920 }
else if (HomPrologEpilog) {
1922 NumBytes -= PrologueSaveSize;
1923 }
else if (PrologueSaveSize != 0) {
1925 MBB,
MBBI,
DL,
TII, -PrologueSaveSize, NeedsWinCFI, &HasWinCFI,
1927 NumBytes -= PrologueSaveSize;
1929 assert(NumBytes >= 0 &&
"Negative stack allocation size!?");
1943 NeedsWinCFI, &HasWinCFI);
1948 if (!IsFunclet && HasFP) {
1960 bool HaveInitialContext = Attrs.hasAttrSomewhere(Attribute::SwiftAsync);
1961 if (HaveInitialContext)
1963 Register Reg = HaveInitialContext ? AArch64::X22 : AArch64::XZR;
1979 if (HomPrologEpilog) {
1992 if (NeedsWinCFI && HasWinCFI) {
1997 NeedsWinCFI =
false;
2008 emitCalleeSavedGPRLocations(
MBB,
MBBI);
2011 const bool NeedsRealignment =
2012 NumBytes && !IsFunclet && RegInfo->hasStackRealignment(MF);
2013 const int64_t RealignmentPadding =
2019 uint64_t NumWords = (NumBytes + RealignmentPadding) >> 4;
2027 if (NumBytes >= (1 << 28))
2029 "unwinding purposes");
2031 uint32_t LowNumWords = NumWords & 0xFFFF;
2038 if ((NumWords & 0xFFFF0000) != 0) {
2041 .
addImm((NumWords & 0xFFFF0000) >> 16)
2112 if (RealignmentPadding > 0) {
2113 if (RealignmentPadding >= 4096) {
2116 .
addImm(RealignmentPadding)
2126 .
addImm(RealignmentPadding)
2143 StackOffset SVECalleeSavesSize = {}, SVELocalsSize = SVEStackSize;
2149 LLVM_DEBUG(
dbgs() <<
"SVECalleeSavedStackSize = " << CalleeSavedSize
2152 CalleeSavesBegin =
MBBI;
2156 CalleeSavesEnd =
MBBI;
2159 SVELocalsSize = SVEStackSize - SVECalleeSavesSize;
2166 allocateStackSpace(
MBB, CalleeSavesBegin, 0, SVECalleeSavesSize,
false,
2167 nullptr, EmitAsyncCFI && !HasFP, CFAOffset,
2169 CFAOffset += SVECalleeSavesSize;
2172 emitCalleeSavedSVELocations(
MBB, CalleeSavesEnd);
2177 "Cannot use redzone with stack realignment");
2182 allocateStackSpace(
MBB, CalleeSavesEnd, RealignmentPadding,
2184 NeedsWinCFI, &HasWinCFI, EmitAsyncCFI && !HasFP,
2196 if (!IsFunclet && RegInfo->hasBasePointer(MF)) {
2208 if (NeedsWinCFI && HasWinCFI) {
2216 if (IsFunclet &&
F.hasPersonalityFn()) {
2226 if (EmitCFI && !EmitAsyncCFI) {
2233 *RegInfo, AArch64::SP, AArch64::SP, TotalSize,
2239 emitCalleeSavedGPRLocations(
MBB,
MBBI);
2240 emitCalleeSavedSVELocations(
MBB,
MBBI);
2245 switch (
MI.getOpcode()) {
2248 case AArch64::CATCHRET:
2249 case AArch64::CLEANUPRET:
2264 bool HasWinCFI =
false;
2265 bool IsFunclet =
false;
2268 DL =
MBBI->getDebugLoc();
2276 BuildMI(MBB, MBB.getFirstTerminator(), DL,
2277 TII->get(AArch64::PAUTH_EPILOGUE))
2278 .setMIFlag(MachineInstr::FrameDestroy);
2288 TII->get(AArch64::SEH_EpilogEnd))
2315 int64_t AfterCSRPopSize = ArgumentStackToRestore;
2323 if (homogeneousPrologEpilog(MF, &
MBB)) {
2327 auto HomogeneousEpilog = std::prev(LastPopI);
2328 if (HomogeneousEpilog->getOpcode() == AArch64::HOM_Epilog)
2329 LastPopI = HomogeneousEpilog;
2339 assert(AfterCSRPopSize == 0);
2342 bool CombineSPBump = shouldCombineCSRLocalStackBumpInEpilogue(
MBB, NumBytes);
2345 bool CombineAfterCSRBump =
false;
2346 if (!CombineSPBump && PrologueSaveSize != 0) {
2348 while (Pop->getOpcode() == TargetOpcode::CFI_INSTRUCTION ||
2350 Pop = std::prev(Pop);
2353 const MachineOperand &OffsetOp = Pop->getOperand(Pop->getNumOperands() - 1);
2357 if (OffsetOp.
getImm() == 0 && AfterCSRPopSize >= 0) {
2359 MBB, Pop,
DL,
TII, PrologueSaveSize, NeedsWinCFI, &HasWinCFI, EmitCFI,
2366 AfterCSRPopSize += PrologueSaveSize;
2367 CombineAfterCSRBump =
true;
2376 while (LastPopI != Begin) {
2382 }
else if (CombineSPBump)
2384 NeedsWinCFI, &HasWinCFI);
2396 EpilogStartI = LastPopI;
2432 if (CombineSPBump) {
2433 assert(!SVEStackSize &&
"Cannot combine SP bump with SVE");
2436 if (EmitCFI &&
hasFP(MF)) {
2438 unsigned Reg = RegInfo.getDwarfRegNum(AArch64::SP,
true);
2453 NumBytes -= PrologueSaveSize;
2454 assert(NumBytes >= 0 &&
"Negative stack allocation size!?");
2458 StackOffset DeallocateBefore = {}, DeallocateAfter = SVEStackSize;
2461 RestoreBegin = std::prev(RestoreEnd);
2462 while (RestoreBegin !=
MBB.
begin() &&
2471 DeallocateBefore = SVEStackSize - CalleeSavedSizeAsOffset;
2472 DeallocateAfter = CalleeSavedSizeAsOffset;
2494 MBB, RestoreBegin,
DL, AArch64::SP, AArch64::SP,
2496 false,
false,
nullptr, EmitCFI && !
hasFP(MF),
2503 false,
nullptr, EmitCFI && !
hasFP(MF),
2509 false,
nullptr, EmitCFI && !
hasFP(MF),
2514 emitCalleeSavedSVERestores(
MBB, RestoreEnd);
2521 if (RedZone && AfterCSRPopSize == 0)
2528 bool NoCalleeSaveRestore = PrologueSaveSize == 0;
2529 int64_t StackRestoreBytes = RedZone ? 0 : NumBytes;
2530 if (NoCalleeSaveRestore)
2531 StackRestoreBytes += AfterCSRPopSize;
2534 MBB, LastPopI,
DL, AArch64::SP, AArch64::SP,
2541 if (NoCalleeSaveRestore || AfterCSRPopSize == 0) {
2554 MBB, LastPopI,
DL, AArch64::SP, AArch64::FP,
2557 }
else if (NumBytes)
2563 if (EmitCFI &&
hasFP(MF)) {
2565 unsigned Reg = RegInfo.getDwarfRegNum(AArch64::SP,
true);
2576 if (AfterCSRPopSize) {
2577 assert(AfterCSRPopSize > 0 &&
"attempting to reallocate arg stack that an "
2578 "interrupt may have clobbered");
2583 false, NeedsWinCFI, &HasWinCFI, EmitCFI,
2615 int64_t ObjectOffset) {
2620 unsigned FixedObject =
2629 int64_t ObjectOffset) {
2640 return RegInfo->getLocalAddressRegister(MF) == AArch64::FP
2647 bool ForSimm)
const {
2650 bool isFixed = MFI.isFixedObjectIndex(FI);
2657 const MachineFunction &MF, int64_t ObjectOffset,
bool isFixed,
bool isSVE,
2658 Register &FrameReg,
bool PreferFP,
bool ForSimm)
const {
2681 PreferFP &= !SVEStackSize;
2689 }
else if (isCSR && RegInfo->hasStackRealignment(MF)) {
2693 assert(
hasFP(MF) &&
"Re-aligned stack must have frame pointer");
2695 }
else if (
hasFP(MF) && !RegInfo->hasStackRealignment(MF)) {
2700 bool FPOffsetFits = !ForSimm || FPOffset >= -256;
2701 PreferFP |=
Offset > -FPOffset && !SVEStackSize;
2703 if (MFI.hasVarSizedObjects()) {
2707 bool CanUseBP = RegInfo->hasBasePointer(MF);
2708 if (FPOffsetFits && CanUseBP)
2715 }
else if (FPOffset >= 0) {
2720 }
else if (MF.
hasEHFunclets() && !RegInfo->hasBasePointer(MF)) {
2727 "Funclets should only be present on Win64");
2731 if (FPOffsetFits && PreferFP)
2738 ((isFixed || isCSR) || !RegInfo->hasStackRealignment(MF) || !UseFP) &&
2739 "In the presence of dynamic stack pointer realignment, "
2740 "non-argument/CSR objects cannot be accessed through the frame pointer");
2752 RegInfo->hasStackRealignment(MF))) {
2753 FrameReg = RegInfo->getFrameRegister(MF);
2757 FrameReg = RegInfo->hasBasePointer(MF) ? RegInfo->getBaseRegister()
2763 if (UseFP && !(isFixed || isCSR))
2764 ScalableOffset = -SVEStackSize;
2765 if (!UseFP && (isFixed || isCSR))
2766 ScalableOffset = SVEStackSize;
2769 FrameReg = RegInfo->getFrameRegister(MF);
2774 if (RegInfo->hasBasePointer(MF))
2775 FrameReg = RegInfo->getBaseRegister();
2777 assert(!MFI.hasVarSizedObjects() &&
2778 "Can't use SP when we have var sized objects.");
2779 FrameReg = AArch64::SP;
2805 Attrs.hasAttrSomewhere(Attribute::SwiftError)) &&
2810 bool NeedsWinCFI,
bool IsFirst,
2819 if (Reg2 == AArch64::FP)
2823 if (
TRI->getEncodingValue(Reg2) ==
TRI->getEncodingValue(Reg1) + 1)
2830 if (Reg1 >= AArch64::X19 && Reg1 <= AArch64::X27 &&
2831 (Reg1 - AArch64::X19) % 2 == 0 && Reg2 == AArch64::LR && !IsFirst)
2841 bool UsesWinAAPCS,
bool NeedsWinCFI,
2842 bool NeedsFrameRecord,
bool IsFirst,
2850 if (NeedsFrameRecord)
2851 return Reg2 == AArch64::LR;
2859 unsigned Reg1 = AArch64::NoRegister;
2860 unsigned Reg2 = AArch64::NoRegister;
2863 enum RegType { GPR, FPR64, FPR128, PPR, ZPR, VG }
Type;
2865 RegPairInfo() =
default;
2867 bool isPaired()
const {
return Reg2 != AArch64::NoRegister; }
2869 unsigned getScale()
const {
2884 bool isScalable()
const {
return Type == PPR ||
Type == ZPR; }
2890 for (
unsigned PReg = AArch64::P8; PReg <= AArch64::P15; ++PReg) {
2891 if (SavedRegs.
test(PReg)) {
2892 unsigned PNReg = PReg - AArch64::P0 + AArch64::PN0;
2896 return AArch64::NoRegister;
2902 bool NeedsFrameRecord) {
2912 unsigned Count = CSI.
size();
2919 "Odd number of callee-saved regs to spill!");
2921 int StackFillDir = -1;
2923 unsigned FirstReg = 0;
2931 FirstReg = Count - 1;
2938 for (
unsigned i = FirstReg; i < Count; i += RegInc) {
2940 RPI.Reg1 = CSI[i].getReg();
2942 if (AArch64::GPR64RegClass.
contains(RPI.Reg1))
2943 RPI.Type = RegPairInfo::GPR;
2944 else if (AArch64::FPR64RegClass.
contains(RPI.Reg1))
2945 RPI.Type = RegPairInfo::FPR64;
2946 else if (AArch64::FPR128RegClass.
contains(RPI.Reg1))
2947 RPI.Type = RegPairInfo::FPR128;
2948 else if (AArch64::ZPRRegClass.
contains(RPI.Reg1))
2949 RPI.Type = RegPairInfo::ZPR;
2950 else if (AArch64::PPRRegClass.
contains(RPI.Reg1))
2951 RPI.Type = RegPairInfo::PPR;
2952 else if (RPI.Reg1 == AArch64::VG)
2953 RPI.Type = RegPairInfo::VG;
2966 Register NextReg = CSI[i + RegInc].getReg();
2967 bool IsFirst = i == FirstReg;
2969 case RegPairInfo::GPR:
2970 if (AArch64::GPR64RegClass.
contains(NextReg) &&
2972 NeedsWinCFI, NeedsFrameRecord, IsFirst,
2976 case RegPairInfo::FPR64:
2977 if (AArch64::FPR64RegClass.
contains(NextReg) &&
2982 case RegPairInfo::FPR128:
2983 if (AArch64::FPR128RegClass.
contains(NextReg))
2986 case RegPairInfo::PPR:
2988 case RegPairInfo::ZPR:
2990 if (((RPI.Reg1 - AArch64::Z0) & 1) == 0 && (NextReg == RPI.Reg1 + 1))
2993 case RegPairInfo::VG:
3004 assert((!RPI.isPaired() ||
3005 (CSI[i].getFrameIdx() + RegInc == CSI[i + RegInc].getFrameIdx())) &&
3006 "Out of order callee saved regs!");
3008 assert((!RPI.isPaired() || !NeedsFrameRecord || RPI.Reg2 != AArch64::FP ||
3009 RPI.Reg1 == AArch64::LR) &&
3010 "FrameRecord must be allocated together with LR");
3013 assert((!RPI.isPaired() || !NeedsFrameRecord || RPI.Reg1 != AArch64::FP ||
3014 RPI.Reg2 == AArch64::LR) &&
3015 "FrameRecord must be allocated together with LR");
3023 ((RPI.Reg1 == AArch64::LR && RPI.Reg2 == AArch64::FP) ||
3024 RPI.Reg1 + 1 == RPI.Reg2))) &&
3025 "Callee-save registers not saved as adjacent register pair!");
3027 RPI.FrameIdx = CSI[i].getFrameIdx();
3030 RPI.FrameIdx = CSI[i + RegInc].getFrameIdx();
3031 int Scale = RPI.getScale();
3033 int OffsetPre = RPI.isScalable() ? ScalableByteOffset : ByteOffset;
3034 assert(OffsetPre % Scale == 0);
3036 if (RPI.isScalable())
3037 ScalableByteOffset += StackFillDir * (RPI.isPaired() ? 2 * Scale : Scale);
3039 ByteOffset += StackFillDir * (RPI.isPaired() ? 2 * Scale : Scale);
3044 ((!IsWindows && RPI.Reg2 == AArch64::FP) ||
3045 (IsWindows && RPI.Reg2 == AArch64::LR)))
3046 ByteOffset += StackFillDir * 8;
3050 if (NeedGapToAlignStack && !NeedsWinCFI && !RPI.isScalable() &&
3051 RPI.Type != RegPairInfo::FPR128 && !RPI.isPaired() &&
3052 ByteOffset % 16 != 0) {
3053 ByteOffset += 8 * StackFillDir;
3054 assert(MFI.getObjectAlign(RPI.FrameIdx) <=
Align(16));
3058 MFI.setObjectAlignment(RPI.FrameIdx,
Align(16));
3059 NeedGapToAlignStack =
false;
3062 int OffsetPost = RPI.isScalable() ? ScalableByteOffset : ByteOffset;
3063 assert(OffsetPost % Scale == 0);
3066 int Offset = NeedsWinCFI ? OffsetPre : OffsetPost;
3071 ((!IsWindows && RPI.Reg2 == AArch64::FP) ||
3072 (IsWindows && RPI.Reg2 == AArch64::LR)))
3074 RPI.Offset =
Offset / Scale;
3076 assert((!RPI.isPaired() ||
3077 (!RPI.isScalable() && RPI.Offset >= -64 && RPI.Offset <= 63) ||
3078 (RPI.isScalable() && RPI.Offset >= -256 && RPI.Offset <= 255)) &&
3079 "Offset out of bounds for LDP/STP immediate");
3083 if (NeedsFrameRecord &&
3084 ((!IsWindows && RPI.Reg1 == AArch64::LR && RPI.Reg2 == AArch64::FP) ||
3085 (IsWindows && RPI.Reg1 == AArch64::FP && RPI.Reg2 == AArch64::LR)))
3099 MFI.setObjectAlignment(CSI[0].getFrameIdx(),
Align(16));
3102 std::reverse(RegPairs.
begin(), RegPairs.
end());
3121 MRI.freezeReservedRegs();
3123 if (homogeneousPrologEpilog(MF)) {
3127 for (
auto &RPI : RegPairs) {
3132 if (!
MRI.isReserved(RPI.Reg1))
3134 if (RPI.isPaired() && !
MRI.isReserved(RPI.Reg2))
3139 bool PTrueCreated =
false;
3141 unsigned Reg1 = RPI.Reg1;
3142 unsigned Reg2 = RPI.Reg2;
3158 case RegPairInfo::GPR:
3159 StrOpc = RPI.isPaired() ? AArch64::STPXi : AArch64::STRXui;
3161 Alignment =
Align(8);
3163 case RegPairInfo::FPR64:
3164 StrOpc = RPI.isPaired() ? AArch64::STPDi : AArch64::STRDui;
3166 Alignment =
Align(8);
3168 case RegPairInfo::FPR128:
3169 StrOpc = RPI.isPaired() ? AArch64::STPQi : AArch64::STRQui;
3171 Alignment =
Align(16);
3173 case RegPairInfo::ZPR:
3174 StrOpc = RPI.isPaired() ? AArch64::ST1B_2Z_IMM : AArch64::STR_ZXI;
3176 Alignment =
Align(16);
3178 case RegPairInfo::PPR:
3179 StrOpc = AArch64::STR_PXI;
3181 Alignment =
Align(2);
3183 case RegPairInfo::VG:
3184 StrOpc = AArch64::STRXui;
3186 Alignment =
Align(8);
3190 unsigned X0Scratch = AArch64::NoRegister;
3191 if (Reg1 == AArch64::VG) {
3194 assert(Reg1 != AArch64::NoRegister);
3197 if (Attrs.hasStreamingBody() && !Attrs.hasStreamingInterface() &&
3222 return STI.getRegisterInfo()->isSuperOrSubRegisterEq(
3223 AArch64::X0, LiveIn.PhysReg);
3227 if (X0Scratch != AArch64::NoRegister)
3234 const uint32_t *RegMask =
TRI->getCallPreservedMask(
3249 dbgs() <<
") -> fi#(" << RPI.FrameIdx;
3250 if (RPI.isPaired())
dbgs() <<
", " << RPI.FrameIdx + 1;
3253 assert((!NeedsWinCFI || !(Reg1 == AArch64::LR && Reg2 == AArch64::FP)) &&
3254 "Windows unwdinding requires a consecutive (FP,LR) pair");
3258 unsigned FrameIdxReg1 = RPI.FrameIdx;
3259 unsigned FrameIdxReg2 = RPI.FrameIdx + 1;
3260 if (NeedsWinCFI && RPI.isPaired()) {
3265 if (RPI.isPaired() && RPI.isScalable()) {
3270 assert(((Subtarget.hasSVE2p1() || Subtarget.hasSME2()) && PnReg != 0) &&
3271 "Expects SVE2.1 or SME2 target and a predicate register");
3272#ifdef EXPENSIVE_CHECKS
3273 auto IsPPR = [](
const RegPairInfo &c) {
3274 return c.Reg1 == RegPairInfo::PPR;
3276 auto PPRBegin = std::find_if(RegPairs.
begin(), RegPairs.
end(), IsPPR);
3277 auto IsZPR = [](
const RegPairInfo &c) {
3278 return c.Type == RegPairInfo::ZPR;
3280 auto ZPRBegin = std::find_if(RegPairs.
begin(), RegPairs.
end(), IsZPR);
3281 assert(!(PPRBegin < ZPRBegin) &&
3282 "Expected callee save predicate to be handled first");
3284 if (!PTrueCreated) {
3285 PTrueCreated =
true;
3290 if (!
MRI.isReserved(Reg1))
3292 if (!
MRI.isReserved(Reg2))
3294 MIB.
addReg( AArch64::Z0_Z1 + (RPI.Reg1 - AArch64::Z0));
3310 if (!
MRI.isReserved(Reg1))
3312 if (RPI.isPaired()) {
3313 if (!
MRI.isReserved(Reg2))
3333 if (RPI.Type == RegPairInfo::ZPR || RPI.Type == RegPairInfo::PPR) {
3339 if (X0Scratch != AArch64::NoRegister)
3359 DL =
MBBI->getDebugLoc();
3362 if (homogeneousPrologEpilog(MF, &
MBB)) {
3365 for (
auto &RPI : RegPairs) {
3373 auto IsPPR = [](
const RegPairInfo &c) {
return c.Type == RegPairInfo::PPR; };
3374 auto PPRBegin = std::find_if(RegPairs.
begin(), RegPairs.
end(), IsPPR);
3375 auto PPREnd = std::find_if_not(PPRBegin, RegPairs.
end(), IsPPR);
3376 std::reverse(PPRBegin, PPREnd);
3377 auto IsZPR = [](
const RegPairInfo &c) {
return c.Type == RegPairInfo::ZPR; };
3378 auto ZPRBegin = std::find_if(RegPairs.
begin(), RegPairs.
end(), IsZPR);
3379 auto ZPREnd = std::find_if_not(ZPRBegin, RegPairs.
end(), IsZPR);
3380 std::reverse(ZPRBegin, ZPREnd);
3382 bool PTrueCreated =
false;
3383 for (
const RegPairInfo &RPI : RegPairs) {
3384 unsigned Reg1 = RPI.Reg1;
3385 unsigned Reg2 = RPI.Reg2;
3399 case RegPairInfo::GPR:
3400 LdrOpc = RPI.isPaired() ? AArch64::LDPXi : AArch64::LDRXui;
3402 Alignment =
Align(8);
3404 case RegPairInfo::FPR64:
3405 LdrOpc = RPI.isPaired() ? AArch64::LDPDi : AArch64::LDRDui;
3407 Alignment =
Align(8);
3409 case RegPairInfo::FPR128:
3410 LdrOpc = RPI.isPaired() ? AArch64::LDPQi : AArch64::LDRQui;
3412 Alignment =
Align(16);
3414 case RegPairInfo::ZPR:
3415 LdrOpc = RPI.isPaired() ? AArch64::LD1B_2Z_IMM : AArch64::LDR_ZXI;
3417 Alignment =
Align(16);
3419 case RegPairInfo::PPR:
3420 LdrOpc = AArch64::LDR_PXI;
3422 Alignment =
Align(2);
3424 case RegPairInfo::VG:
3429 dbgs() <<
") -> fi#(" << RPI.FrameIdx;
3430 if (RPI.isPaired())
dbgs() <<
", " << RPI.FrameIdx + 1;
3436 unsigned FrameIdxReg1 = RPI.FrameIdx;
3437 unsigned FrameIdxReg2 = RPI.FrameIdx + 1;
3438 if (NeedsWinCFI && RPI.isPaired()) {
3444 if (RPI.isPaired() && RPI.isScalable()) {
3448 assert(((Subtarget.hasSVE2p1() || Subtarget.hasSME2()) && PnReg != 0) &&
3449 "Expects SVE2.1 or SME2 target and a predicate register");
3450#ifdef EXPENSIVE_CHECKS
3451 assert(!(PPRBegin < ZPRBegin) &&
3452 "Expected callee save predicate to be handled first");
3454 if (!PTrueCreated) {
3455 PTrueCreated =
true;
3460 MIB.
addReg( AArch64::Z0_Z1 + (RPI.Reg1 - AArch64::Z0),
3477 if (RPI.isPaired()) {
3501 if (!
MI.mayLoadOrStore() ||
MI.getNumMemOperands() < 1)
3502 return std::nullopt;
3506 dyn_cast_or_null<FixedStackPseudoSourceValue>(MMO->
getPseudoValue());
3508 return std::optional<int>(PSV->getFrameIndex());
3519 return std::nullopt;
3525void AArch64FrameLowering::determineStackHazardSlot(
3540 bool HasFPRCSRs =
any_of(SavedRegs.
set_bits(), [](
unsigned Reg) {
3541 return AArch64::FPR64RegClass.contains(Reg) ||
3542 AArch64::FPR128RegClass.contains(Reg) ||
3543 AArch64::ZPRRegClass.contains(Reg) ||
3544 AArch64::PPRRegClass.contains(Reg);
3546 bool HasFPRStackObjects =
false;
3549 for (
auto &
MBB : MF) {
3550 for (
auto &
MI :
MBB) {
3552 if (FI && *FI >= 0 && *FI < (
int)FrameObjects.size()) {
3555 FrameObjects[*FI] |= 2;
3557 FrameObjects[*FI] |= 1;
3561 HasFPRStackObjects =
3562 any_of(FrameObjects, [](
unsigned B) {
return (
B & 3) == 2; });
3565 if (HasFPRCSRs || HasFPRStackObjects) {
3586 unsigned UnspilledCSGPR = AArch64::NoRegister;
3587 unsigned UnspilledCSGPRPaired = AArch64::NoRegister;
3596 unsigned ExtraCSSpill = 0;
3597 bool HasUnpairedGPR64 =
false;
3598 bool HasPairZReg =
false;
3600 for (
unsigned i = 0; CSRegs[i]; ++i) {
3601 const unsigned Reg = CSRegs[i];
3604 if (Reg == BasePointerReg)
3607 bool RegUsed = SavedRegs.
test(Reg);
3608 unsigned PairedReg = AArch64::NoRegister;
3609 const bool RegIsGPR64 = AArch64::GPR64RegClass.contains(Reg);
3610 if (RegIsGPR64 || AArch64::FPR64RegClass.
contains(Reg) ||
3611 AArch64::FPR128RegClass.contains(Reg)) {
3614 if (HasUnpairedGPR64)
3615 PairedReg = CSRegs[i % 2 == 0 ? i - 1 : i + 1];
3617 PairedReg = CSRegs[i ^ 1];
3624 if (RegIsGPR64 && !AArch64::GPR64RegClass.
contains(PairedReg)) {
3625 PairedReg = AArch64::NoRegister;
3626 HasUnpairedGPR64 =
true;
3628 assert(PairedReg == AArch64::NoRegister ||
3629 AArch64::GPR64RegClass.
contains(Reg, PairedReg) ||
3630 AArch64::FPR64RegClass.
contains(Reg, PairedReg) ||
3631 AArch64::FPR128RegClass.
contains(Reg, PairedReg));
3634 if (AArch64::GPR64RegClass.
contains(Reg) &&
3636 UnspilledCSGPR = Reg;
3637 UnspilledCSGPRPaired = PairedReg;
3645 if (producePairRegisters(MF) && PairedReg != AArch64::NoRegister &&
3646 !SavedRegs.
test(PairedReg)) {
3647 SavedRegs.
set(PairedReg);
3648 if (AArch64::GPR64RegClass.
contains(PairedReg) &&
3650 ExtraCSSpill = PairedReg;
3653 HasPairZReg |= (AArch64::ZPRRegClass.contains(Reg, CSRegs[i ^ 1]) &&
3654 SavedRegs.
test(CSRegs[i ^ 1]));
3657 if (HasPairZReg && (Subtarget.hasSVE2p1() || Subtarget.hasSME2())) {
3662 if (PnReg != AArch64::NoRegister)
3668 SavedRegs.
set(AArch64::P8);
3673 "Predicate cannot be a reserved register");
3683 SavedRegs.
set(AArch64::X18);
3687 unsigned CSStackSize = 0;
3688 unsigned SVECSStackSize = 0;
3691 for (
unsigned Reg : SavedRegs.
set_bits()) {
3693 if (AArch64::PPRRegClass.
contains(Reg) ||
3694 AArch64::ZPRRegClass.
contains(Reg))
3706 if (AFI->hasStreamingModeChanges()) {
3707 if (Attrs.hasStreamingBody() && !Attrs.hasStreamingInterface())
3715 determineStackHazardSlot(MF, SavedRegs);
3716 if (AFI->hasStackHazardSlotIndex())
3720 unsigned NumSavedRegs = SavedRegs.
count();
3726 SavedRegs.
set(AArch64::FP);
3727 SavedRegs.
set(AArch64::LR);
3731 dbgs() <<
"*** determineCalleeSaves\nSaved CSRs:";
3732 for (
unsigned Reg : SavedRegs.
set_bits())
3738 int64_t SVEStackSize =
3739 alignTo(SVECSStackSize + estimateSVEStackObjectOffsets(MFI), 16);
3740 bool CanEliminateFrame = (SavedRegs.
count() == 0) && !SVEStackSize;
3749 int64_t CalleeStackUsed = 0;
3752 if (FixedOff > CalleeStackUsed)
3753 CalleeStackUsed = FixedOff;
3757 bool BigStack = SVEStackSize || (EstimatedStackSize + CSStackSize +
3758 CalleeStackUsed) > EstimatedStackSizeLimit;
3760 AFI->setHasStackFrame(
true);
3769 if (!ExtraCSSpill && UnspilledCSGPR != AArch64::NoRegister) {
3771 <<
" to get a scratch register.\n");
3772 SavedRegs.
set(UnspilledCSGPR);
3773 ExtraCSSpill = UnspilledCSGPR;
3778 if (producePairRegisters(MF)) {
3779 if (UnspilledCSGPRPaired == AArch64::NoRegister) {
3782 SavedRegs.
reset(UnspilledCSGPR);
3783 ExtraCSSpill = AArch64::NoRegister;
3786 SavedRegs.
set(UnspilledCSGPRPaired);
3795 unsigned Size =
TRI->getSpillSize(RC);
3796 Align Alignment =
TRI->getSpillAlign(RC);
3799 LLVM_DEBUG(
dbgs() <<
"No available CS registers, allocated fi#" << FI
3800 <<
" as the emergency spill slot.\n");
3805 CSStackSize += 8 * (SavedRegs.
count() - NumSavedRegs);
3809 if (
hasFP(MF) && AFI->hasSwiftAsyncContext())
3814 << EstimatedStackSize + AlignedCSStackSize <<
" bytes.\n");
3817 AFI->getCalleeSavedStackSize() == AlignedCSStackSize) &&
3818 "Should not invalidate callee saved info");
3822 AFI->setCalleeSavedStackSize(AlignedCSStackSize);
3823 AFI->setCalleeSaveStackHasFreeSpace(AlignedCSStackSize != CSStackSize);
3824 AFI->setSVECalleeSavedStackSize(
alignTo(SVECSStackSize, 16));
3829 std::vector<CalleeSavedInfo> &CSI,
unsigned &MinCSFrameIndex,
3830 unsigned &MaxCSFrameIndex)
const {
3838 std::reverse(CSI.begin(), CSI.end());
3852 if ((
unsigned)FrameIdx < MinCSFrameIndex)
3853 MinCSFrameIndex = FrameIdx;
3854 if ((
unsigned)FrameIdx > MaxCSFrameIndex)
3855 MaxCSFrameIndex = FrameIdx;
3860 std::vector<CalleeSavedInfo> VGSaves;
3864 VGInfo.setRestored(
false);
3865 VGSaves.push_back(VGInfo);
3869 if (Attrs.hasStreamingBody() && !Attrs.hasStreamingInterface())
3870 VGSaves.push_back(VGInfo);
3872 bool InsertBeforeLR =
false;
3874 for (
unsigned I = 0;
I < CSI.size();
I++)
3875 if (CSI[
I].
getReg() == AArch64::LR) {
3876 InsertBeforeLR =
true;
3877 CSI.insert(CSI.begin() +
I, VGSaves.begin(), VGSaves.end());
3881 if (!InsertBeforeLR)
3882 CSI.insert(CSI.end(), VGSaves.begin(), VGSaves.end());
3886 int HazardSlotIndex = std::numeric_limits<int>::max();
3887 for (
auto &CS : CSI) {
3895 assert(HazardSlotIndex == std::numeric_limits<int>::max() &&
3896 "Unexpected register order for hazard slot");
3898 LLVM_DEBUG(
dbgs() <<
"Created CSR Hazard at slot " << HazardSlotIndex
3901 if ((
unsigned)HazardSlotIndex < MinCSFrameIndex)
3902 MinCSFrameIndex = HazardSlotIndex;
3903 if ((
unsigned)HazardSlotIndex > MaxCSFrameIndex)
3904 MaxCSFrameIndex = HazardSlotIndex;
3910 CS.setFrameIdx(FrameIdx);
3912 if ((
unsigned)FrameIdx < MinCSFrameIndex)
3913 MinCSFrameIndex = FrameIdx;
3914 if ((
unsigned)FrameIdx > MaxCSFrameIndex)
3915 MaxCSFrameIndex = FrameIdx;
3919 Reg == AArch64::FP) {
3922 if ((
unsigned)FrameIdx < MinCSFrameIndex)
3923 MinCSFrameIndex = FrameIdx;
3924 if ((
unsigned)FrameIdx > MaxCSFrameIndex)
3925 MaxCSFrameIndex = FrameIdx;
3932 HazardSlotIndex == std::numeric_limits<int>::max()) {
3934 LLVM_DEBUG(
dbgs() <<
"Created CSR Hazard at slot " << HazardSlotIndex
3937 if ((
unsigned)HazardSlotIndex < MinCSFrameIndex)
3938 MinCSFrameIndex = HazardSlotIndex;
3939 if ((
unsigned)HazardSlotIndex > MaxCSFrameIndex)
3940 MaxCSFrameIndex = HazardSlotIndex;
3964 int &Min,
int &Max) {
3965 Min = std::numeric_limits<int>::max();
3966 Max = std::numeric_limits<int>::min();
3972 for (
auto &CS : CSI) {
3973 if (AArch64::ZPRRegClass.
contains(CS.getReg()) ||
3974 AArch64::PPRRegClass.contains(CS.getReg())) {
3975 assert((Max == std::numeric_limits<int>::min() ||
3976 Max + 1 == CS.getFrameIdx()) &&
3977 "SVE CalleeSaves are not consecutive");
3979 Min = std::min(Min, CS.getFrameIdx());
3980 Max = std::max(Max, CS.getFrameIdx());
3983 return Min != std::numeric_limits<int>::max();
3992 int &MinCSFrameIndex,
3993 int &MaxCSFrameIndex,
3994 bool AssignOffsets) {
3999 "SVE vectors should never be passed on the stack by value, only by "
4003 auto Assign = [&MFI](
int FI, int64_t
Offset) {
4013 for (
int I = MinCSFrameIndex;
I <= MaxCSFrameIndex; ++
I) {
4029 int StackProtectorFI = -1;
4033 ObjectsToAllocate.
push_back(StackProtectorFI);
4039 if (
I == StackProtectorFI)
4041 if (MaxCSFrameIndex >=
I &&
I >= MinCSFrameIndex)
4050 for (
unsigned FI : ObjectsToAllocate) {
4055 if (Alignment >
Align(16))
4057 "Alignment of scalable vectors > 16 bytes is not yet supported");
4067int64_t AArch64FrameLowering::estimateSVEStackObjectOffsets(
4069 int MinCSFrameIndex, MaxCSFrameIndex;
4073int64_t AArch64FrameLowering::assignSVEStackObjectOffsets(
4084 "Upwards growing stack unsupported");
4086 int MinCSFrameIndex, MaxCSFrameIndex;
4087 int64_t SVEStackSize =
4088 assignSVEStackObjectOffsets(MFI, MinCSFrameIndex, MaxCSFrameIndex);
4108 int64_t FixedObject =
4121 assert(DstReg &&
"There must be a free register after frame setup");
4130struct TagStoreInstr {
4153 std::optional<int64_t> FrameRegUpdate;
4155 unsigned FrameRegUpdateFlags;
4166 :
MBB(
MBB), ZeroData(ZeroData) {
4172 void addInstruction(TagStoreInstr
I) {
4174 TagStores.
back().Offset + TagStores.
back().Size ==
I.Offset) &&
4175 "Non-adjacent tag store instructions.");
4190 const int64_t kMinOffset = -256 * 16;
4191 const int64_t kMaxOffset = 255 * 16;
4194 int64_t BaseRegOffsetBytes = FrameRegOffset.
getFixed();
4195 if (BaseRegOffsetBytes < kMinOffset ||
4196 BaseRegOffsetBytes + (
Size -
Size % 32) > kMaxOffset ||
4200 BaseRegOffsetBytes % 16 != 0) {
4201 Register ScratchReg =
MRI->createVirtualRegister(&AArch64::GPR64RegClass);
4204 BaseReg = ScratchReg;
4205 BaseRegOffsetBytes = 0;
4210 int64_t InstrSize = (
Size > 16) ? 32 : 16;
4213 ? (ZeroData ? AArch64::STZGi : AArch64::STGi)
4214 : (ZeroData ? AArch64::STZ2Gi : AArch64::ST2Gi);
4215 assert(BaseRegOffsetBytes % 16 == 0);
4219 .
addImm(BaseRegOffsetBytes / 16)
4223 if (BaseRegOffsetBytes == 0)
4225 BaseRegOffsetBytes += InstrSize;
4239 :
MRI->createVirtualRegister(&AArch64::GPR64RegClass);
4240 Register SizeReg =
MRI->createVirtualRegister(&AArch64::GPR64RegClass);
4244 int64_t LoopSize =
Size;
4247 if (FrameRegUpdate && *FrameRegUpdate)
4248 LoopSize -= LoopSize % 32;
4250 TII->get(ZeroData ? AArch64::STZGloop_wback
4251 : AArch64::STGloop_wback))
4258 LoopI->
setFlags(FrameRegUpdateFlags);
4260 int64_t ExtraBaseRegUpdate =
4261 FrameRegUpdate ? (*FrameRegUpdate - FrameRegOffset.
getFixed() -
Size) : 0;
4262 if (LoopSize <
Size) {
4267 TII->get(ZeroData ? AArch64::STZGPostIndex : AArch64::STGPostIndex))
4271 .
addImm(1 + ExtraBaseRegUpdate / 16)
4274 }
else if (ExtraBaseRegUpdate) {
4278 TII->get(ExtraBaseRegUpdate > 0 ? AArch64::ADDXri : AArch64::SUBXri))
4281 .
addImm(std::abs(ExtraBaseRegUpdate))
4291 int64_t
Size, int64_t *TotalOffset) {
4293 if ((
MI.getOpcode() == AArch64::ADDXri ||
4294 MI.getOpcode() == AArch64::SUBXri) &&
4295 MI.getOperand(0).getReg() == Reg &&
MI.getOperand(1).getReg() == Reg) {
4297 int64_t
Offset =
MI.getOperand(2).getImm() << Shift;
4298 if (
MI.getOpcode() == AArch64::SUBXri)
4300 int64_t AbsPostOffset = std::abs(
Offset -
Size);
4301 const int64_t kMaxOffset =
4303 if (AbsPostOffset <= kMaxOffset && AbsPostOffset % 16 == 0) {
4314 for (
auto &TS : TSE) {
4318 if (
MI->memoperands_empty()) {
4322 MemRefs.
append(
MI->memoperands_begin(),
MI->memoperands_end());
4328 bool TryMergeSPUpdate) {
4329 if (TagStores.
empty())
4331 TagStoreInstr &FirstTagStore = TagStores[0];
4332 TagStoreInstr &LastTagStore = TagStores[TagStores.
size() - 1];
4333 Size = LastTagStore.Offset - FirstTagStore.Offset + LastTagStore.Size;
4334 DL = TagStores[0].MI->getDebugLoc();
4338 *MF, FirstTagStore.Offset,
false ,
false , Reg,
4341 FrameRegUpdate = std::nullopt;
4343 mergeMemRefs(TagStores, CombinedMemRefs);
4346 dbgs() <<
"Replacing adjacent STG instructions:\n";
4347 for (
const auto &Instr : TagStores) {
4356 if (TagStores.
size() < 2)
4358 emitUnrolled(InsertI);
4361 int64_t TotalOffset = 0;
4362 if (TryMergeSPUpdate) {
4368 if (InsertI !=
MBB->
end() &&
4369 canMergeRegUpdate(InsertI, FrameReg, FrameRegOffset.
getFixed() +
Size,
4371 UpdateInstr = &*InsertI++;
4377 if (!UpdateInstr && TagStores.
size() < 2)
4381 FrameRegUpdate = TotalOffset;
4382 FrameRegUpdateFlags = UpdateInstr->
getFlags();
4389 for (
auto &TS : TagStores)
4390 TS.MI->eraseFromParent();
4394 int64_t &
Size,
bool &ZeroData) {
4398 unsigned Opcode =
MI.getOpcode();
4399 ZeroData = (Opcode == AArch64::STZGloop || Opcode == AArch64::STZGi ||
4400 Opcode == AArch64::STZ2Gi);
4402 if (Opcode == AArch64::STGloop || Opcode == AArch64::STZGloop) {
4403 if (!
MI.getOperand(0).isDead() || !
MI.getOperand(1).isDead())
4405 if (!
MI.getOperand(2).isImm() || !
MI.getOperand(3).isFI())
4408 Size =
MI.getOperand(2).getImm();
4412 if (Opcode == AArch64::STGi || Opcode == AArch64::STZGi)
4414 else if (Opcode == AArch64::ST2Gi || Opcode == AArch64::STZ2Gi)
4419 if (
MI.getOperand(0).getReg() != AArch64::SP || !
MI.getOperand(1).isFI())
4423 16 *
MI.getOperand(2).getImm();
4443 if (!isMergeableStackTaggingInstruction(
MI,
Offset,
Size, FirstZeroData))
4449 constexpr int kScanLimit = 10;
4452 NextI != E && Count < kScanLimit; ++NextI) {
4461 if (isMergeableStackTaggingInstruction(
MI,
Offset,
Size, ZeroData)) {
4462 if (ZeroData != FirstZeroData)
4470 if (!
MI.isTransient())
4479 if (
MI.mayLoadOrStore() ||
MI.hasUnmodeledSideEffects())
4495 LiveRegs.addLiveOuts(*
MBB);
4500 LiveRegs.stepBackward(*
I);
4503 if (LiveRegs.contains(AArch64::NZCV))
4507 [](
const TagStoreInstr &
Left,
const TagStoreInstr &
Right) {
4512 int64_t CurOffset = Instrs[0].Offset;
4513 for (
auto &Instr : Instrs) {
4514 if (CurOffset >
Instr.Offset)
4521 TagStoreEdit TSE(
MBB, FirstZeroData);
4522 std::optional<int64_t> EndOffset;
4523 for (
auto &Instr : Instrs) {
4524 if (EndOffset && *EndOffset !=
Instr.Offset) {
4526 TSE.emitCode(InsertI, TFI,
false);
4530 TSE.addInstruction(Instr);
4550 if (
MI.getOpcode() != AArch64::VGSavePseudo &&
4551 MI.getOpcode() != AArch64::VGRestorePseudo)
4555 bool LocallyStreaming =
4562 int64_t VGFrameIdx =
4564 assert(VGFrameIdx != std::numeric_limits<int>::max() &&
4565 "Expected FrameIdx for VG");
4568 if (
MI.getOpcode() == AArch64::VGSavePseudo) {
4573 nullptr,
TRI->getDwarfRegNum(AArch64::VG,
true),
Offset));
4576 nullptr,
TRI->getDwarfRegNum(AArch64::VG,
true)));
4579 TII->get(TargetOpcode::CFI_INSTRUCTION))
4582 MI.eraseFromParent();
4594 II = tryMergeAdjacentSTG(
II,
this, RS);
4603 bool IgnoreSPUpdates)
const {
4605 if (IgnoreSPUpdates) {
4608 FrameReg = AArch64::SP;
4618 FrameReg = AArch64::SP;
4643 bool IsValid =
false;
4645 int ObjectIndex = 0;
4647 int GroupIndex = -1;
4649 bool ObjectFirst =
false;
4652 bool GroupFirst =
false;
4656 unsigned Accesses = 0;
4657 enum { AccessFPR = 1, AccessHazard = 2, AccessGPR = 4 };
4662 int NextGroupIndex = 0;
4663 std::vector<FrameObject> &Objects;
4666 GroupBuilder(std::vector<FrameObject> &Objects) : Objects(Objects) {}
4668 void EndCurrentGroup() {
4669 if (CurrentMembers.
size() > 1) {
4674 for (
int Index : CurrentMembers) {
4675 Objects[
Index].GroupIndex = NextGroupIndex;
4681 CurrentMembers.clear();
4685bool FrameObjectCompare(
const FrameObject &
A,
const FrameObject &
B) {
4707 return std::make_tuple(!
A.IsValid,
A.Accesses,
A.ObjectFirst,
A.GroupFirst,
4708 A.GroupIndex,
A.ObjectIndex) <
4709 std::make_tuple(!
B.IsValid,
B.Accesses,
B.ObjectFirst,
B.GroupFirst,
4710 B.GroupIndex,
B.ObjectIndex);
4721 std::vector<FrameObject> FrameObjects(MFI.getObjectIndexEnd());
4722 for (
auto &Obj : ObjectsToAllocate) {
4723 FrameObjects[Obj].IsValid =
true;
4724 FrameObjects[Obj].ObjectIndex = Obj;
4729 GroupBuilder GB(FrameObjects);
4730 for (
auto &
MBB : MF) {
4731 for (
auto &
MI :
MBB) {
4732 if (
MI.isDebugInstr())
4737 if (FI && *FI >= 0 && *FI < (
int)FrameObjects.size()) {
4740 FrameObjects[*FI].Accesses |= FrameObject::AccessFPR;
4742 FrameObjects[*FI].Accesses |= FrameObject::AccessGPR;
4747 switch (
MI.getOpcode()) {
4748 case AArch64::STGloop:
4749 case AArch64::STZGloop:
4753 case AArch64::STZGi:
4754 case AArch64::ST2Gi:
4755 case AArch64::STZ2Gi:
4767 if (FI >= 0 && FI < MFI.getObjectIndexEnd() &&
4768 FrameObjects[FI].IsValid)
4776 GB.AddMember(TaggedFI);
4778 GB.EndCurrentGroup();
4781 GB.EndCurrentGroup();
4786 FrameObject::AccessHazard;
4788 for (
auto &Obj : FrameObjects)
4789 if (!Obj.Accesses ||
4790 Obj.Accesses == (FrameObject::AccessGPR | FrameObject::AccessFPR))
4791 Obj.Accesses = FrameObject::AccessGPR;
4800 FrameObjects[*TBPI].ObjectFirst =
true;
4801 FrameObjects[*TBPI].GroupFirst =
true;
4802 int FirstGroupIndex = FrameObjects[*TBPI].GroupIndex;
4803 if (FirstGroupIndex >= 0)
4804 for (FrameObject &Object : FrameObjects)
4805 if (Object.GroupIndex == FirstGroupIndex)
4806 Object.GroupFirst =
true;
4812 for (
auto &Obj : FrameObjects) {
4816 ObjectsToAllocate[i++] = Obj.ObjectIndex;
4820 dbgs() <<
"Final frame order:\n";
4821 for (
auto &Obj : FrameObjects) {
4824 dbgs() <<
" " << Obj.ObjectIndex <<
": group " << Obj.GroupIndex;
4825 if (Obj.ObjectFirst)
4826 dbgs() <<
", first";
4828 dbgs() <<
", group-first";
4839AArch64FrameLowering::inlineStackProbeLoopExactMultiple(
4850 MF.
insert(MBBInsertPoint, LoopMBB);
4852 MF.
insert(MBBInsertPoint, ExitMBB);
4887 return ExitMBB->
begin();
4890void AArch64FrameLowering::inlineStackProbeFixed(
4903 int64_t NumBlocks = FrameSize / ProbeSize;
4904 int64_t ResidualSize = FrameSize % ProbeSize;
4906 LLVM_DEBUG(
dbgs() <<
"Stack probing: total " << FrameSize <<
" bytes, "
4907 << NumBlocks <<
" blocks of " << ProbeSize
4908 <<
" bytes, plus " << ResidualSize <<
" bytes\n");
4913 for (
int i = 0; i < NumBlocks; ++i) {
4919 EmitAsyncCFI && !HasFP, CFAOffset);
4928 }
else if (NumBlocks != 0) {
4934 EmitAsyncCFI && !HasFP, CFAOffset);
4936 MBBI = inlineStackProbeLoopExactMultiple(
MBBI, ProbeSize, ScratchReg);
4938 if (EmitAsyncCFI && !HasFP) {
4942 unsigned Reg =
RegInfo.getDwarfRegNum(AArch64::SP,
true);
4951 if (ResidualSize != 0) {
4957 EmitAsyncCFI && !HasFP, CFAOffset);
4976 if (
MI.getOpcode() == AArch64::PROBED_STACKALLOC ||
4977 MI.getOpcode() == AArch64::PROBED_STACKALLOC_VAR)
4981 if (
MI->getOpcode() == AArch64::PROBED_STACKALLOC) {
4982 Register ScratchReg =
MI->getOperand(0).getReg();
4983 int64_t FrameSize =
MI->getOperand(1).getImm();
4985 MI->getOperand(3).getImm());
4986 inlineStackProbeFixed(
MI->getIterator(), ScratchReg, FrameSize,
4989 assert(
MI->getOpcode() == AArch64::PROBED_STACKALLOC_VAR &&
4990 "Stack probe pseudo-instruction expected");
4993 Register TargetReg =
MI->getOperand(0).getReg();
4994 (void)
TII->probedStackAlloc(
MI->getIterator(), TargetReg,
true);
4996 MI->eraseFromParent();
unsigned const MachineRegisterInfo * MRI
for(const MachineOperand &MO :llvm::drop_begin(OldMI.operands(), Desc.getNumOperands()))
static int64_t getArgumentStackToRestore(MachineFunction &MF, MachineBasicBlock &MBB)
Returns how much of the incoming argument stack area (in bytes) we should clean up in an epilogue.
static void emitShadowCallStackEpilogue(const TargetInstrInfo &TII, MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL)
static void getLiveRegsForEntryMBB(LivePhysRegs &LiveRegs, const MachineBasicBlock &MBB)
static void emitCalleeSavedRestores(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, bool SVE)
static void computeCalleeSaveRegisterPairs(MachineFunction &MF, ArrayRef< CalleeSavedInfo > CSI, const TargetRegisterInfo *TRI, SmallVectorImpl< RegPairInfo > &RegPairs, bool NeedsFrameRecord)
static const unsigned DefaultSafeSPDisplacement
This is the biggest offset to the stack pointer we can encode in aarch64 instructions (without using ...
static void emitDefineCFAWithFP(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, unsigned FixedObject)
static bool needsWinCFI(const MachineFunction &MF)
static void insertCFISameValue(const MCInstrDesc &Desc, MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertPt, unsigned DwarfReg)
static cl::opt< bool > StackTaggingMergeSetTag("stack-tagging-merge-settag", cl::desc("merge settag instruction in function epilog"), cl::init(true), cl::Hidden)
static cl::opt< unsigned > StackHazardSize("aarch64-stack-hazard-size", cl::init(0), cl::Hidden)
bool requiresGetVGCall(MachineFunction &MF)
bool isVGInstruction(MachineBasicBlock::iterator MBBI)
static std::optional< int > getLdStFrameID(const MachineInstr &MI, const MachineFrameInfo &MFI)
static bool produceCompactUnwindFrame(MachineFunction &MF)
static cl::opt< bool > StackHazardInNonStreaming("aarch64-stack-hazard-in-non-streaming", cl::init(false), cl::Hidden)
static int64_t determineSVEStackObjectOffsets(MachineFrameInfo &MFI, int &MinCSFrameIndex, int &MaxCSFrameIndex, bool AssignOffsets)
static cl::opt< bool > OrderFrameObjects("aarch64-order-frame-objects", cl::desc("sort stack allocations"), cl::init(true), cl::Hidden)
static bool windowsRequiresStackProbe(MachineFunction &MF, uint64_t StackSizeInBytes)
static void fixupCalleeSaveRestoreStackOffset(MachineInstr &MI, uint64_t LocalStackSize, bool NeedsWinCFI, bool *HasWinCFI)
static bool invalidateWindowsRegisterPairing(unsigned Reg1, unsigned Reg2, bool NeedsWinCFI, bool IsFirst, const TargetRegisterInfo *TRI)
static MachineBasicBlock::iterator convertCalleeSaveRestoreToSPPrePostIncDec(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, const TargetInstrInfo *TII, int CSStackSizeInc, bool NeedsWinCFI, bool *HasWinCFI, bool EmitCFI, MachineInstr::MIFlag FrameFlag=MachineInstr::FrameSetup, int CFAOffset=0)
static void fixupSEHOpcode(MachineBasicBlock::iterator MBBI, unsigned LocalStackSize)
static StackOffset getSVEStackSize(const MachineFunction &MF)
Returns the size of the entire SVE stackframe (calleesaves + spills).
static cl::opt< bool > EnableRedZone("aarch64-redzone", cl::desc("enable use of redzone on AArch64"), cl::init(false), cl::Hidden)
static MachineBasicBlock::iterator InsertSEH(MachineBasicBlock::iterator MBBI, const TargetInstrInfo &TII, MachineInstr::MIFlag Flag)
static Register findScratchNonCalleeSaveRegister(MachineBasicBlock *MBB)
static void getLivePhysRegsUpTo(MachineInstr &MI, const TargetRegisterInfo &TRI, LivePhysRegs &LiveRegs)
Collect live registers from the end of MI's parent up to (including) MI in LiveRegs.
cl::opt< bool > EnableHomogeneousPrologEpilog("homogeneous-prolog-epilog", cl::Hidden, cl::desc("Emit homogeneous prologue and epilogue for the size " "optimization (default = off)"))
MachineBasicBlock::iterator emitVGSaveRestore(MachineBasicBlock::iterator II, const AArch64FrameLowering *TFI)
static bool IsSVECalleeSave(MachineBasicBlock::iterator I)
static bool invalidateRegisterPairing(unsigned Reg1, unsigned Reg2, bool UsesWinAAPCS, bool NeedsWinCFI, bool NeedsFrameRecord, bool IsFirst, const TargetRegisterInfo *TRI)
Returns true if Reg1 and Reg2 cannot be paired using a ldp/stp instruction.
unsigned findFreePredicateReg(BitVector &SavedRegs)
static unsigned getPrologueDeath(MachineFunction &MF, unsigned Reg)
static StackOffset getFPOffset(const MachineFunction &MF, int64_t ObjectOffset)
static bool isTargetWindows(const MachineFunction &MF)
static StackOffset getStackOffset(const MachineFunction &MF, int64_t ObjectOffset)
static int64_t upperBound(StackOffset Size)
static unsigned estimateRSStackSizeLimit(MachineFunction &MF)
Look at each instruction that references stack frames and return the stack size limit beyond which so...
static bool getSVECalleeSaveSlotRange(const MachineFrameInfo &MFI, int &Min, int &Max)
returns true if there are any SVE callee saves.
static MCRegister getRegisterOrZero(MCRegister Reg, bool HasSVE)
static bool isFuncletReturnInstr(const MachineInstr &MI)
static void emitShadowCallStackPrologue(const TargetInstrInfo &TII, MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, bool NeedsWinCFI, bool NeedsUnwindInfo)
static unsigned getFixedObjectSize(const MachineFunction &MF, const AArch64FunctionInfo *AFI, bool IsWin64, bool IsFunclet)
Returns the size of the fixed object area (allocated next to sp on entry) On Win64 this may include a...
static const int kSetTagLoopThreshold
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
This file contains the simple types necessary to represent the attributes associated with functions a...
#define CASE(ATTRNAME, AANAME,...)
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
Analysis containing CSE Info
static void clear(coro::Shape &Shape)
static const HTTPClientCleanup Cleanup
const HexagonInstrInfo * TII
This file implements the LivePhysRegs utility for tracking liveness of physical registers.
unsigned const TargetRegisterInfo * TRI
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
uint64_t IntrinsicInst * II
This file declares the machine register scavenger class.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
static const unsigned FramePtr
void processFunctionBeforeFrameIndicesReplaced(MachineFunction &MF, RegScavenger *RS) const override
processFunctionBeforeFrameIndicesReplaced - This method is called immediately before MO_FrameIndex op...
MachineBasicBlock::iterator eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator I) const override
This method is called during prolog/epilog code insertion to eliminate call frame setup and destroy p...
bool canUseAsPrologue(const MachineBasicBlock &MBB) const override
Check whether or not the given MBB can be used as a prologue for the target.
bool enableStackSlotScavenging(const MachineFunction &MF) const override
Returns true if the stack slot holes in the fixed and callee-save stack area should be used when allo...
bool spillCalleeSavedRegisters(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, ArrayRef< CalleeSavedInfo > CSI, const TargetRegisterInfo *TRI) const override
spillCalleeSavedRegisters - Issues instruction(s) to spill all callee saved registers and returns tru...
bool restoreCalleeSavedRegisters(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, MutableArrayRef< CalleeSavedInfo > CSI, const TargetRegisterInfo *TRI) const override
restoreCalleeSavedRegisters - Issues instruction(s) to restore all callee saved registers and returns...
StackOffset getNonLocalFrameIndexReference(const MachineFunction &MF, int FI) const override
getNonLocalFrameIndexReference - This method returns the offset used to reference a frame index locat...
TargetStackID::Value getStackIDForScalableVectors() const override
Returns the StackID that scalable vectors should be associated with.
bool hasFP(const MachineFunction &MF) const override
hasFP - Return true if the specified function should have a dedicated frame pointer register.
void emitPrologue(MachineFunction &MF, MachineBasicBlock &MBB) const override
emitProlog/emitEpilog - These methods insert prolog and epilog code into the function.
bool enableCFIFixup(MachineFunction &MF) const override
Returns true if we may need to fix the unwind information for the function.
void resetCFIToInitialState(MachineBasicBlock &MBB) const override
Emit CFI instructions that recreate the state of the unwind information upon fucntion entry.
bool hasReservedCallFrame(const MachineFunction &MF) const override
hasReservedCallFrame - Under normal circumstances, when a frame pointer is not required,...
bool canUseRedZone(const MachineFunction &MF) const
Can this function use the red zone for local allocations.
void processFunctionBeforeFrameFinalized(MachineFunction &MF, RegScavenger *RS) const override
processFunctionBeforeFrameFinalized - This method is called immediately before the specified function...
int getSEHFrameIndexOffset(const MachineFunction &MF, int FI) const
unsigned getWinEHFuncletFrameSize(const MachineFunction &MF) const
Funclets only need to account for space for the callee saved registers, as the locals are accounted f...
void orderFrameObjects(const MachineFunction &MF, SmallVectorImpl< int > &ObjectsToAllocate) const override
Order the symbols in the local stack frame.
void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const override
void determineCalleeSaves(MachineFunction &MF, BitVector &SavedRegs, RegScavenger *RS) const override
This method determines which of the registers reported by TargetRegisterInfo::getCalleeSavedRegs() sh...
StackOffset getFrameIndexReference(const MachineFunction &MF, int FI, Register &FrameReg) const override
getFrameIndexReference - Provide a base+offset reference to an FI slot for debug info.
StackOffset resolveFrameOffsetReference(const MachineFunction &MF, int64_t ObjectOffset, bool isFixed, bool isSVE, Register &FrameReg, bool PreferFP, bool ForSimm) const
bool assignCalleeSavedSpillSlots(MachineFunction &MF, const TargetRegisterInfo *TRI, std::vector< CalleeSavedInfo > &CSI, unsigned &MinCSFrameIndex, unsigned &MaxCSFrameIndex) const override
assignCalleeSavedSpillSlots - Allows target to override spill slot assignment logic.
StackOffset getFrameIndexReferencePreferSP(const MachineFunction &MF, int FI, Register &FrameReg, bool IgnoreSPUpdates) const override
For Win64 AArch64 EH, the offset to the Unwind object is from the SP before the update.
StackOffset resolveFrameIndexReference(const MachineFunction &MF, int FI, Register &FrameReg, bool PreferFP, bool ForSimm) const
unsigned getWinEHParentFrameOffset(const MachineFunction &MF) const override
The parent frame offset (aka dispFrame) is only used on X86_64 to retrieve the parent's frame pointer...
AArch64FunctionInfo - This class is derived from MachineFunctionInfo and contains private AArch64-spe...
bool needsShadowCallStackPrologueEpilogue(MachineFunction &MF) const
void setSwiftAsyncContextFrameIdx(int FI)
unsigned getTailCallReservedStack() const
unsigned getCalleeSavedStackSize(const MachineFrameInfo &MFI) const
void setCalleeSaveBaseToFrameRecordOffset(int Offset)
bool hasStackProbing() const
unsigned getArgumentStackToRestore() const
void setLocalStackSize(uint64_t Size)
void setVGIdx(unsigned Idx)
int getCalleeSaveBaseToFrameRecordOffset() const
bool hasStreamingModeChanges() const
bool shouldSignReturnAddress(const MachineFunction &MF) const
void setPredicateRegForFillSpill(unsigned Reg)
int getStackHazardSlotIndex() const
void setStreamingVGIdx(unsigned FrameIdx)
int64_t getStackProbeSize() const
uint64_t getStackSizeSVE() const
void setHasRedZone(bool s)
bool hasStackFrame() const
std::optional< int > getTaggedBasePointerIndex() const
uint64_t getLocalStackSize() const
void setStackRealigned(bool s)
bool needsDwarfUnwindInfo(const MachineFunction &MF) const
unsigned getVarArgsGPRSize() const
void setStackSizeSVE(uint64_t S)
bool isStackRealigned() const
bool hasSwiftAsyncContext() const
bool hasStackHazardSlotIndex() const
void setTaggedBasePointerOffset(unsigned Offset)
void setStackHazardCSRSlotIndex(int Index)
unsigned getPredicateRegForFillSpill() const
unsigned getSVECalleeSavedStackSize() const
bool needsAsyncDwarfUnwindInfo(const MachineFunction &MF) const
int64_t getStreamingVGIdx() const
void setMinMaxSVECSFrameIndex(int Min, int Max)
bool hasCalleeSaveStackFreeSpace() const
static bool isTailCallReturnInst(const MachineInstr &MI)
Returns true if MI is one of the TCRETURN* instructions.
static bool isSEHInstruction(const MachineInstr &MI)
Return true if the instructions is a SEH instruciton used for unwinding on Windows.
static bool isFpOrNEON(Register Reg)
Returns whether the physical register is FP or NEON.
bool isReservedReg(const MachineFunction &MF, MCRegister Reg) const
bool hasBasePointer(const MachineFunction &MF) const
bool cannotEliminateFrame(const MachineFunction &MF) const
unsigned getBaseRegister() const
bool isTargetWindows() const
const AArch64RegisterInfo * getRegisterInfo() const override
bool isNeonAvailable() const
Returns true if the target has NEON and the function at runtime is known to have NEON enabled (e....
const AArch64InstrInfo * getInstrInfo() const override
bool isTargetILP32() const
const AArch64TargetLowering * getTargetLowering() const override
bool isTargetMachO() const
const Triple & getTargetTriple() const
bool isCallingConvWin64(CallingConv::ID CC) const
const char * getChkStkName() const
bool swiftAsyncContextIsDynamicallySet() const
Return whether FrameLowering should always set the "extended frame present" bit in FP,...
bool hasInlineStackProbe(const MachineFunction &MF) const override
True if stack clash protection is enabled for this functions.
unsigned getRedZoneSize(const Function &F) const
bool supportSwiftError() const override
Return true if the target supports swifterror attribute.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
bool empty() const
empty - Check if the array is empty.
bool hasAttrSomewhere(Attribute::AttrKind Kind, unsigned *Index=nullptr) const
Return true if the specified attribute is set for at least one parameter or for the return value.
bool test(unsigned Idx) const
size_type count() const
count - Returns the number of bits which are set.
iterator_range< const_set_bits_iterator > set_bits() const
The CalleeSavedInfo class tracks the information need to locate where a callee saved register is in t...
bool hasOptSize() const
Optimize this function for size (-Os) or minimum size (-Oz).
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
AttributeList getAttributes() const
Return the attribute list for this Function.
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, const DebugLoc &DL, MCRegister DestReg, MCRegister SrcReg, bool KillSrc) const override
Emit instructions to copy a pair of physical registers.
A set of physical registers with utility functions to track liveness when walking backward/forward th...
bool available(const MachineRegisterInfo &MRI, MCPhysReg Reg) const
Returns true if register Reg and no aliasing register is in the set.
void stepBackward(const MachineInstr &MI)
Simulates liveness when stepping backwards over an instruction(bundle).
void removeReg(MCPhysReg Reg)
Removes a physical register, all its sub-registers, and all its super-registers from the set.
void addLiveIns(const MachineBasicBlock &MBB)
Adds all live-in registers of basic block MBB.
void addLiveOuts(const MachineBasicBlock &MBB)
Adds all live-out registers of basic block MBB.
void addReg(MCPhysReg Reg)
Adds a physical register and all its sub-registers to the set.
bool usesWindowsCFI() const
static MCCFIInstruction createDefCfaRegister(MCSymbol *L, unsigned Register, SMLoc Loc={})
.cfi_def_cfa_register modifies a rule for computing CFA.
static MCCFIInstruction createOffset(MCSymbol *L, unsigned Register, int Offset, SMLoc Loc={})
.cfi_offset Previous value of Register is saved at offset Offset from CFA.
static MCCFIInstruction cfiDefCfaOffset(MCSymbol *L, int Offset, SMLoc Loc={})
.cfi_def_cfa_offset modifies a rule for computing CFA.
static MCCFIInstruction createRestore(MCSymbol *L, unsigned Register, SMLoc Loc={})
.cfi_restore says that the rule for Register is now the same as it was at the beginning of the functi...
static MCCFIInstruction createNegateRAState(MCSymbol *L, SMLoc Loc={})
.cfi_negate_ra_state AArch64 negate RA state.
static MCCFIInstruction cfiDefCfa(MCSymbol *L, unsigned Register, int Offset, SMLoc Loc={})
.cfi_def_cfa defines a rule for computing CFA as: take address from Register and add Offset to it.
static MCCFIInstruction createEscape(MCSymbol *L, StringRef Vals, SMLoc Loc={}, StringRef Comment="")
.cfi_escape Allows the user to add arbitrary bytes to the unwind info.
static MCCFIInstruction createSameValue(MCSymbol *L, unsigned Register, SMLoc Loc={})
.cfi_same_value Current value of Register is the same as in the previous frame.
MCSymbol * createTempSymbol()
Create a temporary symbol with a unique name.
Describe properties that are true of each instruction in the target description file.
Wrapper class representing physical registers. Should be passed by value.
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
instr_iterator instr_begin()
iterator_range< livein_iterator > liveins() const
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
bool isLiveIn(MCPhysReg Reg, LaneBitmask LaneMask=LaneBitmask::getAll()) const
Return true if the specified register is in the live in set.
bool isEHFuncletEntry() const
Returns true if this is the entry block of an EH funclet.
iterator getFirstTerminator()
Returns an iterator to the first terminator instruction of this basic block.
MachineInstr & instr_back()
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
DebugLoc findDebugLoc(instr_iterator MBBI)
Find the next valid DebugLoc starting at MBBI, skipping any debug instructions.
iterator getLastNonDebugInstr(bool SkipPseudoOp=true)
Returns an iterator to the last non-debug instruction in the basic block, or end().
instr_iterator instr_end()
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
instr_iterator erase(instr_iterator I)
Remove an instruction from the instruction list and delete it.
reverse_iterator rbegin()
iterator insertAfter(iterator I, MachineInstr *MI)
Insert MI into the instruction list after I.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
bool hasVarSizedObjects() const
This method may be called any time after instruction selection is complete to determine if the stack ...
uint64_t getStackSize() const
Return the number of bytes that must be allocated to hold all of the fixed size frame objects.
const AllocaInst * getObjectAllocation(int ObjectIdx) const
Return the underlying Alloca of the specified stack object if it exists.
int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it.
bool hasCalls() const
Return true if the current function has any function calls.
bool isFrameAddressTaken() const
This method may be called any time after instruction selection is complete to determine if there is a...
Align getMaxAlign() const
Return the alignment in bytes that this function must be aligned to, which is greater than the defaul...
void setObjectOffset(int ObjectIdx, int64_t SPOffset)
Set the stack frame offset of the specified object.
bool hasPatchPoint() const
This method may be called any time after instruction selection is complete to determine if there is a...
int getStackProtectorIndex() const
Return the index for the stack protector object.
uint64_t estimateStackSize(const MachineFunction &MF) const
Estimate and return the size of the stack frame.
void setStackID(int ObjectIdx, uint8_t ID)
bool isCalleeSavedInfoValid() const
Has the callee saved info been calculated yet?
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
bool isMaxCallFrameSizeComputed() const
bool hasStackMap() const
This method may be called any time after instruction selection is complete to determine if there is a...
const std::vector< CalleeSavedInfo > & getCalleeSavedInfo() const
Returns a reference to call saved info vector for the current function.
unsigned getMaxCallFrameSize() const
Return the maximum size of a call frame that must be allocated for an outgoing function call.
int getObjectIndexEnd() const
Return one past the maximum frame object index.
bool hasStackProtectorIndex() const
uint8_t getStackID(int ObjectIdx) const
int64_t getObjectOffset(int ObjectIdx) const
Return the assigned stack offset of the specified object from the incoming stack pointer.
int getObjectIndexBegin() const
Return the minimum frame object index.
bool isDeadObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to a dead object.
const WinEHFuncInfo * getWinEHFuncInfo() const
getWinEHFuncInfo - Return information about how the current function uses Windows exception handling.
unsigned addFrameInst(const MCCFIInstruction &Inst)
void setHasWinCFI(bool v)
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
const LLVMTargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
MachineModuleInfo & getMMI() const
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const MachineBasicBlock & front() const
bool hasEHFunclets() const
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *BB=nullptr, std::optional< UniqueBBID > BBID=std::nullopt)
CreateMachineBasicBlock - Allocate a new MachineBasicBlock.
void insert(iterator MBBI, MachineBasicBlock *MBB)
const MachineInstrBuilder & setMemRefs(ArrayRef< MachineMemOperand * > MMOs) const
const MachineInstrBuilder & addExternalSymbol(const char *FnName, unsigned TargetFlags=0) const
const MachineInstrBuilder & addCFIIndex(unsigned CFIIndex) const
const MachineInstrBuilder & setMIFlag(MachineInstr::MIFlag Flag) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addRegMask(const uint32_t *Mask) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & setMIFlags(unsigned Flags) const
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
void setFlags(unsigned flags)
void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
uint32_t getFlags() const
Return the MI flags bitvector.
A description of a memory reference used in the backend.
const PseudoSourceValue * getPseudoValue() const
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
const Value * getValue() const
Return the base address of the memory access.
This class contains meta information specific to a module.
const MCContext & getContext() const
MachineOperand class - Representation of each machine instruction operand.
void setImm(int64_t immVal)
static MachineOperand CreateImm(int64_t Val)
bool isFI() const
isFI - Tests if this is a MO_FrameIndex operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
bool isLiveIn(Register Reg) const
const MCPhysReg * getCalleeSavedRegs() const
Returns list of callee saved registers.
bool isPhysRegUsed(MCRegister PhysReg, bool SkipRegMaskTest=false) const
Return true if the specified register is modified or read in this function.
MutableArrayRef - Represent a mutable reference to an array (0 or more elements consecutively in memo...
void enterBasicBlockEnd(MachineBasicBlock &MBB)
Start tracking liveness from the end of basic block MBB.
Register FindUnusedReg(const TargetRegisterClass *RC) const
Find an unused register of the specified register class.
void backward()
Update internal register state and move MBB iterator backwards.
void addScavengingFrameIndex(int FI)
Add a scavenging frame index.
Wrapper class representing virtual and physical registers.
SMEAttrs is a utility class to parse the SME ACLE attributes on functions.
bool hasStreamingInterface() const
bool hasStreamingBody() const
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StackOffset holds a fixed and a scalable offset in bytes.
int64_t getFixed() const
Returns the fixed component of the stack.
int64_t getScalable() const
Returns the scalable component of the stack.
static StackOffset get(int64_t Fixed, int64_t Scalable)
static StackOffset getScalable(int64_t Scalable)
static StackOffset getFixed(int64_t Fixed)
StringRef - Represent a constant reference to a string, i.e.
virtual void determineCalleeSaves(MachineFunction &MF, BitVector &SavedRegs, RegScavenger *RS=nullptr) const
This method determines which of the registers reported by TargetRegisterInfo::getCalleeSavedRegs() sh...
int getOffsetOfLocalArea() const
getOffsetOfLocalArea - This method returns the offset of the local area from the stack pointer on ent...
Align getStackAlign() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
StackDirection getStackGrowthDirection() const
getStackGrowthDirection - Return the direction the stack grows
virtual bool enableCFIFixup(MachineFunction &MF) const
Returns true if we may need to fix the unwind information for the function.
TargetInstrInfo - Interface to description of machine instruction set.
CodeModel::Model getCodeModel() const
Returns the code model.
const MCAsmInfo * getMCAsmInfo() const
Return target specific asm information.
SwiftAsyncFramePointerMode SwiftAsyncFramePointer
Control when and how the Swift async frame pointer bit should be set.
bool DisableFramePointerElim(const MachineFunction &MF) const
DisableFramePointerElim - This returns true if frame pointer elimination optimization should be disab...
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
const TargetRegisterClass * getMinimalPhysRegClass(MCRegister Reg, MVT VT=MVT::Other) const
Returns the Register Class of a physical register of the given type, picking the most sub register cl...
Align getSpillAlign(const TargetRegisterClass &RC) const
Return the minimum required alignment in bytes for a spill slot for a register of this class.
bool hasStackRealignment(const MachineFunction &MF) const
True if stack realignment is required and still possible.
unsigned getSpillSize(const TargetRegisterClass &RC) const
Return the size in bytes of the stack slot allocated to hold a spilled copy of a register from class ...
TargetSubtargetInfo - Generic base class for all target subtargets.
virtual const TargetRegisterInfo * getRegisterInfo() const
getRegisterInfo - If register information is available, return it.
virtual const TargetInstrInfo * getInstrInfo() const
StringRef getArchName() const
Get the architecture (first) component of the triple.
static constexpr TypeSize getFixed(ScalarTy ExactSize)
The instances of the Type class are immutable: once they are created, they are never changed.
self_iterator getIterator()
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ MO_GOT
MO_GOT - This flag indicates that a symbol operand represents the address of the GOT entry for the sy...
static unsigned getShiftValue(unsigned Imm)
getShiftValue - Extract the shift value.
static unsigned getArithExtendImm(AArch64_AM::ShiftExtendType ET, unsigned Imm)
getArithExtendImm - Encode the extend type and shift amount for an arithmetic instruction: imm: 3-bit...
static uint64_t encodeLogicalImmediate(uint64_t imm, unsigned regSize)
encodeLogicalImmediate - Return the encoded immediate value for a logical immediate instruction of th...
static unsigned getShifterImm(AArch64_AM::ShiftExtendType ST, unsigned Imm)
getShifterImm - Encode the shift type and amount: imm: 6-bit shift amount shifter: 000 ==> lsl 001 ==...
const unsigned StackProbeMaxLoopUnroll
Maximum number of iterations to unroll for a constant size probing loop.
const unsigned StackProbeMaxUnprobedStack
Maximum allowed number of unprobed bytes above SP at an ABI boundary.
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
@ AArch64_SVE_VectorCall
Used between AArch64 SVE functions.
@ PreserveMost
Used for runtime calls that preserves most registers.
@ CXX_FAST_TLS
Used for access functions.
@ GHC
Used by the Glasgow Haskell Compiler (GHC).
@ AArch64_SME_ABI_Support_Routines_PreserveMost_From_X1
Preserve X1-X15, X19-X29, SP, Z0-Z31, P0-P15.
@ PreserveAll
Used for runtime calls that preserves (almost) all registers.
@ PreserveNone
Used for runtime calls that preserves none general registers.
@ Win64
The C convention as implemented on Windows/x86-64 and AArch64.
@ SwiftTail
This follows the Swift calling convention in how arguments are passed but guarantees tail calls will ...
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Define
Register definition.
@ Kill
The last use of a register.
@ Undef
Value of the register doesn't matter.
Reg
All possible values of the reg field in the ModR/M byte.
initializer< Ty > init(const Ty &Val)
NodeAddr< InstrNode * > Instr
This is an optimization pass for GlobalISel generic memory operations.
void stable_sort(R &&Range)
MCCFIInstruction createDefCFA(const TargetRegisterInfo &TRI, unsigned FrameReg, unsigned Reg, const StackOffset &Offset, bool LastAdjustmentWasScalable=true)
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
int isAArch64FrameOffsetLegal(const MachineInstr &MI, StackOffset &Offset, bool *OutUseUnscaledOp=nullptr, unsigned *OutUnscaledOp=nullptr, int64_t *EmittableOffset=nullptr)
Check if the Offset is a valid frame offset for MI.
detail::scope_exit< std::decay_t< Callable > > make_scope_exit(Callable &&F)
MCCFIInstruction createCFAOffset(const TargetRegisterInfo &MRI, unsigned Reg, const StackOffset &OffsetFromDefCFA)
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
unsigned getBLRCallOpcode(const MachineFunction &MF)
Return opcode to be used for indirect calls.
const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=6)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
@ AArch64FrameOffsetCannotUpdate
Offset cannot apply.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
auto reverse(ContainerTy &&C)
@ Always
Always set the bit.
@ Never
Never set the bit.
@ DeploymentBased
Determine whether to set the bit statically or dynamically based on the deployment target.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
void emitFrameOffset(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, unsigned DestReg, unsigned SrcReg, StackOffset Offset, const TargetInstrInfo *TII, MachineInstr::MIFlag=MachineInstr::NoFlags, bool SetNZCV=false, bool NeedsWinCFI=false, bool *HasWinCFI=nullptr, bool EmitCFAOffset=false, StackOffset InitialOffset={}, unsigned FrameReg=AArch64::SP)
emitFrameOffset - Emit instructions as needed to set DestReg to SrcReg plus Offset.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
unsigned getDefRegState(bool B)
unsigned getKillRegState(bool B)
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
bool isAsynchronousEHPersonality(EHPersonality Pers)
Returns true if this personality function catches asynchronous exceptions.
void fullyRecomputeLiveIns(ArrayRef< MachineBasicBlock * > MBBs)
Convenience function for recomputing live-in's for a set of MBBs until the computation converges.
Printable printReg(Register Reg, const TargetRegisterInfo *TRI=nullptr, unsigned SubIdx=0, const MachineRegisterInfo *MRI=nullptr)
Prints virtual and physical registers with or without a TRI instance.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
This struct is a compact representation of a valid (non-zero power of two) alignment.
uint64_t value() const
This is a hole in the type system and should not be abused.
Description of the encoding of one expression Op.
Pair of physical register and lane mask.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.