19#define DEBUG_TYPE "frame-info"
21STATISTIC(NumRedZoneFunctions,
"Number of functions using red zone");
32 return AFI->hasStreamingModeChanges() &&
38 unsigned Opc =
MBBI->getOpcode();
39 if (
Opc == AArch64::CNTD_XPiI)
45 if (
Opc == AArch64::BL)
48 return Opc == TargetOpcode::COPY;
53 switch (
I->getOpcode()) {
56 case AArch64::LD1B_2Z_IMM:
57 case AArch64::ST1B_2Z_IMM:
58 case AArch64::STR_ZXI:
59 case AArch64::LDR_ZXI:
60 case AArch64::PTRUE_C_B:
63 case AArch64::SEH_SaveZReg:
70 switch (
I->getOpcode()) {
73 case AArch64::STR_PXI:
74 case AArch64::LDR_PXI:
77 case AArch64::SEH_SavePReg:
99 if (
AFL.hasSVECalleeSavesAboveFrameRecord(
MF)) {
100 if (AFI->hasStackHazardSlotIndex())
101 reportFatalUsageError(
"SME hazard padding is not supported on Windows");
102 SVELayout = SVEStackLayout::CalleeSavesAboveFrameRecord;
103 }
else if (
AFI->hasSplitSVEObjects()) {
104 SVELayout = SVEStackLayout::Split;
117 if (
AFL.requiresSaveVG(
MF)) {
118 auto &TLI = *
Subtarget.getTargetLowering();
123 switch (
MBBI->getOpcode()) {
127 NewOpc = AArch64::STPXpre;
130 NewOpc = AArch64::STPDpre;
133 NewOpc = AArch64::STPQpre;
135 case AArch64::STRXui:
136 NewOpc = AArch64::STRXpre;
138 case AArch64::STRDui:
139 NewOpc = AArch64::STRDpre;
141 case AArch64::STRQui:
142 NewOpc = AArch64::STRQpre;
145 NewOpc = AArch64::LDPXpost;
148 NewOpc = AArch64::LDPDpost;
151 NewOpc = AArch64::LDPQpost;
153 case AArch64::LDRXui:
154 NewOpc = AArch64::LDRXpost;
156 case AArch64::LDRDui:
157 NewOpc = AArch64::LDRDpost;
159 case AArch64::LDRQui:
160 NewOpc = AArch64::LDRQpost;
164 int64_t MinOffset, MaxOffset;
165 bool Success =
TII->getMemOpInfo(NewOpc, Scale, Width, MinOffset, MaxOffset);
174 if (
MBBI->getOperand(
MBBI->getNumOperands() - 1).getImm() != 0 ||
175 CSStackSizeInc < MinOffset * (int64_t)Scale.
getFixedValue() ||
176 CSStackSizeInc > MaxOffset * (int64_t)Scale.
getFixedValue() ||
178 (NewOpc == AArch64::LDPXpost || NewOpc == AArch64::STPXpre) &&
179 RegInfo.getEncodingValue(
MBBI->getOperand(0).getReg()) + 1 !=
180 RegInfo.getEncodingValue(
MBBI->getOperand(1).getReg()))) {
194 return std::prev(
MBBI);
199 auto SEH = std::next(
MBBI);
200 if (AArch64InstrInfo::isSEHInstruction(*SEH))
201 SEH->eraseFromParent();
208 unsigned OpndIdx = 0;
209 for (
unsigned OpndEnd =
MBBI->getNumOperands() - 1; OpndIdx < OpndEnd;
211 MIB.
add(
MBBI->getOperand(OpndIdx));
213 assert(
MBBI->getOperand(OpndIdx).getImm() == 0 &&
214 "Unexpected immediate offset in first/last callee-save save/restore "
216 assert(
MBBI->getOperand(OpndIdx - 1).getReg() == AArch64::SP &&
217 "Unexpected base register in callee-save save/restore instruction!");
218 assert(CSStackSizeInc % Scale == 0);
219 MIB.
addImm(CSStackSizeInc / (
int)Scale);
227 AFL.insertSEH(*MIB, *
TII, FrameFlag);
234 return std::prev(
MBB.erase(
MBBI));
239 unsigned LocalStackSize) {
241 unsigned ImmIdx =
MBBI->getNumOperands() - 1;
242 switch (
MBBI->getOpcode()) {
245 case AArch64::SEH_SaveFPLR:
246 case AArch64::SEH_SaveRegP:
247 case AArch64::SEH_SaveReg:
248 case AArch64::SEH_SaveFRegP:
249 case AArch64::SEH_SaveFReg:
250 case AArch64::SEH_SaveAnyRegI:
251 case AArch64::SEH_SaveAnyRegIP:
252 case AArch64::SEH_SaveAnyRegQP:
253 case AArch64::SEH_SaveAnyRegQPX:
254 ImmOpnd = &
MBBI->getOperand(ImmIdx);
263 if (AArch64InstrInfo::isSEHInstruction(
MI))
266 unsigned Opc =
MI.getOpcode();
270 case AArch64::STRXui:
272 case AArch64::STRDui:
274 case AArch64::LDRXui:
276 case AArch64::LDRDui:
280 case AArch64::STRQui:
282 case AArch64::LDRQui:
289 unsigned OffsetIdx =
MI.getNumExplicitOperands() - 1;
290 assert(
MI.getOperand(OffsetIdx - 1).getReg() == AArch64::SP &&
291 "Unexpected base register in callee-save save/restore instruction!");
295 assert(LocalStackSize % Scale == 0);
296 OffsetOpnd.
setImm(OffsetOpnd.
getImm() + LocalStackSize / Scale);
301 assert(
MBBI !=
MI.getParent()->end() &&
"Expecting a valid instruction");
302 assert(AArch64InstrInfo::isSEHInstruction(*
MBBI) &&
303 "Expecting a SEH instruction");
310 if (
AFL.homogeneousPrologEpilog(
MF))
313 if (
AFI->getLocalStackSize() == 0)
329 if (
AFL.needsWinCFI(
MF) &&
AFI->getCalleeSavedStackSize() > 0 &&
330 MF.getFunction().hasOptSize())
335 if (StackBumpBytes >= 512 ||
336 AFL.windowsRequiresStackProbe(
MF, StackBumpBytes))
339 if (
MFI.hasVarSizedObjects())
348 if (
AFL.canUseRedZone(
MF))
353 if (
AFI->hasSVEStackSize())
367 return {{PPRCalleeSavesSize, PPRLocalsSize},
368 {ZPRCalleeSavesSize, ZPRLocalsSize}};
371 {ZPRCalleeSavesSize, PPRLocalsSize + ZPRLocalsSize}};
380 BeforePPRs =
SVE.PPR.CalleeSavesSize;
382 if (
SVE.ZPR.CalleeSavesSize)
383 AfterPPRs +=
SVE.PPR.LocalsSize +
SVE.ZPR.CalleeSavesSize;
385 AfterZPRs +=
SVE.PPR.LocalsSize;
387 return {BeforePPRs, AfterPPRs, AfterZPRs};
403 IsEpilogue ?
MBB.begin() :
MBB.getFirstTerminator();
404 auto AdjustI = [&](
auto MBBI) {
return IsEpilogue ? std::prev(
MBBI) :
MBBI; };
406 if (PPRCalleeSavesSize) {
407 PPRsI = AdjustI(PPRsI);
410 IsEpilogue ? (--PPRsI) : (++PPRsI);
413 if (ZPRCalleeSavesSize) {
414 ZPRsI = AdjustI(ZPRsI);
417 IsEpilogue ? (--ZPRsI) : (++ZPRsI);
420 return {{PPRsI,
MBBI}, {ZPRsI, PPRsI}};
421 return {{
MBBI, PPRsI}, {PPRsI, ZPRsI}};
429 EmitAsyncCFI =
AFI->needsAsyncDwarfUnwindInfo(
MF);
434 collectBlockLiveins();
451void AArch64PrologueEmitter::collectBlockLiveins() {
454 PrologueEndI =
MBB.begin();
455 while (PrologueEndI !=
MBB.end() &&
459 if (PrologueEndI !=
MBB.end()) {
475void AArch64PrologueEmitter::verifyPrologueClobbers()
const {
476 if (PrologueEndI ==
MBB.end())
479 for (MachineInstr &
MI :
480 make_range(
MBB.instr_begin(), PrologueEndI->getIterator())) {
481 for (
auto &
Op :
MI.operands())
482 if (
Op.isReg() &&
Op.isDef())
483 assert(!LiveRegs.contains(
Op.getReg()) &&
484 "live register clobbered by inserted prologue instructions");
489void AArch64PrologueEmitter::determineLocalsStackSize(
490 uint64_t StackSize, uint64_t PrologueSaveSize) {
491 AFI->setLocalStackSize(StackSize - PrologueSaveSize);
498 static const int64_t MAX_BYTES_PER_SCALABLE_BYTE = 16;
499 return Size.getScalable() * MAX_BYTES_PER_SCALABLE_BYTE +
Size.getFixed();
502void AArch64PrologueEmitter::allocateStackSpace(
504 StackOffset AllocSize,
bool EmitCFI, StackOffset InitialOffset,
505 bool FollowupAllocs) {
512 const uint64_t AndMask = ~(MaxAlign - 1);
515 Register TargetReg = RealignmentPadding
516 ?
AFL.findScratchNonCalleeSaveRegister(&
MBB)
523 if (RealignmentPadding) {
544 if (AllocSize.getScalable() == 0 && RealignmentPadding == 0) {
546 assert(ScratchReg != AArch64::NoRegister);
549 .
addImm(AllocSize.getFixed())
550 .
addImm(InitialOffset.getFixed())
551 .
addImm(InitialOffset.getScalable());
556 if (FollowupAllocs) {
576 int64_t ProbeSize =
AFI->getStackProbeSize();
577 if (
upperBound(AllocSize) + RealignmentPadding <= ProbeSize) {
578 Register ScratchReg = RealignmentPadding
579 ?
AFL.findScratchNonCalleeSaveRegister(&
MBB)
581 assert(ScratchReg != AArch64::NoRegister);
586 if (RealignmentPadding) {
592 AFI->setStackRealigned(
true);
594 if (FollowupAllocs ||
upperBound(AllocSize) + RealignmentPadding >
614 assert(TargetReg != AArch64::NoRegister);
619 if (RealignmentPadding) {
632 .buildDefCFARegister(AArch64::SP);
634 if (RealignmentPadding)
635 AFI->setStackRealigned(
true);
645 AFI->setHasRedZone(
false);
655 if (
AFI->getArgumentStackToRestore())
658 if (
AFI->shouldSignReturnAddress(
MF)) {
661 if (!
AFL.shouldSignReturnAddressEverywhere(
MF)) {
669 if (
AFI->needsShadowCallStackPrologueEpilogue(
MF)) {
670 emitShadowCallStackPrologue(PrologueBeginI,
DL);
682 if (
HasFP &&
AFI->hasSwiftAsyncContext())
683 emitSwiftAsyncContextFramePointer(PrologueBeginI,
DL);
692 if (std::optional<int> TBPI =
AFI->getTaggedBasePointerIndex())
693 AFI->setTaggedBasePointerOffset(-
MFI.getObjectOffset(*TBPI));
695 AFI->setTaggedBasePointerOffset(
MFI.getStackSize());
705 if (!
AFI->hasStackFrame() && !
AFL.windowsRequiresStackProbe(
MF, NumBytes))
706 return emitEmptyStackFramePrologue(NumBytes, PrologueBeginI,
DL);
708 bool IsWin64 =
Subtarget.isCallingConvWin64(F.getCallingConv(), F.isVarArg());
711 auto PrologueSaveSize =
AFI->getCalleeSavedStackSize() + FixedObject;
713 determineLocalsStackSize(NumBytes, PrologueSaveSize);
721 "unexpected SVE allocs after PPRs with CalleeSavesAboveFrameRecord");
728 allocateStackSpace(PrologueBeginI, 0, SaveSize,
false,
StackOffset{},
730 NumBytes -= FixedObject;
737 MBBI,
DL, -
AFI->getCalleeSavedStackSize(), EmitAsyncCFI);
738 NumBytes -=
AFI->getCalleeSavedStackSize();
739 }
else if (CombineSPBump) {
740 assert(!
AFL.getSVEStackSize(
MF) &&
"Cannot combine SP bump with SVE");
748 NumBytes -= PrologueSaveSize;
749 }
else if (PrologueSaveSize != 0) {
751 PrologueBeginI,
DL, -PrologueSaveSize, EmitAsyncCFI);
752 NumBytes -= PrologueSaveSize;
754 assert(NumBytes >= 0 &&
"Negative stack allocation size!?");
759 auto &TLI = *
Subtarget.getTargetLowering();
762 while (AfterGPRSavesI != EndI &&
769 AFI->getLocalStackSize());
776 emitFramePointerSetup(AfterGPRSavesI,
DL, FixedObject);
782 emitCalleeSavedGPRLocations(AfterGPRSavesI);
785 const bool NeedsRealignment =
787 const int64_t RealignmentPadding =
788 (NeedsRealignment &&
MFI.getMaxAlign() >
Align(16))
789 ?
MFI.getMaxAlign().value() - 16
792 if (
AFL.windowsRequiresStackProbe(
MF, NumBytes + RealignmentPadding))
793 emitWindowsStackProbe(AfterGPRSavesI,
DL, NumBytes, RealignmentPadding);
803 auto [PPRRange, ZPRRange] =
805 ZPR.CalleeSavesSize,
false);
806 AfterSVESavesI = ZPRRange.End;
808 emitCalleeSavedSVELocations(AfterSVESavesI);
810 allocateStackSpace(PPRRange.Begin, 0, SVEAllocs.
BeforePPRs,
811 EmitAsyncCFI && !
HasFP, CFAOffset,
815 assert(PPRRange.End == ZPRRange.Begin &&
816 "Expected ZPR callee saves after PPR locals");
817 allocateStackSpace(PPRRange.End, 0, SVEAllocs.
AfterPPRs,
818 EmitAsyncCFI && !
HasFP, CFAOffset,
831 assert(!(
AFL.canUseRedZone(
MF) && NeedsRealignment) &&
832 "Cannot use redzone with stack realignment");
833 if (!
AFL.canUseRedZone(
MF)) {
837 allocateStackSpace(AfterSVESavesI, RealignmentPadding, SVEAllocs.
AfterZPRs,
838 EmitAsyncCFI && !
HasFP, CFAOffset,
839 MFI.hasVarSizedObjects());
878 MBB.addLiveIn(AArch64::X1);
882 if (
EmitCFI && !EmitAsyncCFI) {
884 emitDefineCFAWithFP(AfterSVESavesI, FixedObject);
887 AFL.getSVEStackSize(
MF) +
894 emitCalleeSavedGPRLocations(AfterSVESavesI);
895 emitCalleeSavedSVELocations(AfterSVESavesI);
899void AArch64PrologueEmitter::emitShadowCallStackPrologue(
910 MBB.addLiveIn(AArch64::X18);
919 static const char CFIInst[] = {
920 dwarf::DW_CFA_val_expression,
923 static_cast<char>(
unsigned(dwarf::DW_OP_breg18)),
924 static_cast<char>(-8) & 0x7f,
927 .buildEscape(StringRef(CFIInst,
sizeof(CFIInst)));
931void AArch64PrologueEmitter::emitSwiftAsyncContextFramePointer(
933 switch (
MF.getTarget().Options.SwiftAsyncFramePointer) {
935 if (
Subtarget.swiftAsyncContextIsDynamicallySet()) {
977void AArch64PrologueEmitter::emitEmptyStackFramePrologue(
980 assert(!
HasFP &&
"unexpected function without stack frame but with FP");
982 "unexpected function without stack frame but with SVE objects");
984 AFI->setLocalStackSize(NumBytes);
994 if (
AFL.canUseRedZone(
MF)) {
995 AFI->setHasRedZone(
true);
996 ++NumRedZoneFunctions;
1003 MCSymbol *FrameLabel =
MF.getContext().createTempSymbol();
1006 .buildDefCFAOffset(NumBytes, FrameLabel);
1017void AArch64PrologueEmitter::emitFramePointerSetup(
1019 unsigned FixedObject) {
1020 int64_t FPOffset =
AFI->getCalleeSaveBaseToFrameRecordOffset();
1022 FPOffset +=
AFI->getLocalStackSize();
1024 if (
AFI->hasSwiftAsyncContext()) {
1028 const auto &
Attrs =
MF.getFunction().getAttributes();
1029 bool HaveInitialContext =
Attrs.hasAttrSomewhere(Attribute::SwiftAsync);
1030 if (HaveInitialContext)
1031 MBB.addLiveIn(AArch64::X22);
1032 Register Reg = HaveInitialContext ? AArch64::X22 : AArch64::XZR;
1070 emitDefineCFAWithFP(
MBBI, FixedObject);
1074void AArch64PrologueEmitter::emitDefineCFAWithFP(
1076 const int OffsetToFirstCalleeSaveFromFP =
1077 AFI->getCalleeSaveBaseToFrameRecordOffset() -
1078 AFI->getCalleeSavedStackSize();
1081 .buildDefCFA(
FramePtr, FixedObject - OffsetToFirstCalleeSaveFromFP);
1084void AArch64PrologueEmitter::emitWindowsStackProbe(
1086 int64_t RealignmentPadding)
const {
1087 if (
AFI->getSVECalleeSavedStackSize())
1092 unsigned X15Scratch = AArch64::NoRegister;
1094 [
this](
const MachineBasicBlock::RegisterMaskPair &LiveIn) {
1095 return RegInfo.isSuperOrSubRegisterEq(AArch64::X15,
1098 X15Scratch =
AFL.findScratchNonCalleeSaveRegister(&
MBB,
true);
1099 assert(X15Scratch != AArch64::NoRegister &&
1100 (X15Scratch < AArch64::X15 || X15Scratch > AArch64::X17));
1102 LiveRegs.removeReg(AArch64::X15);
1111 uint64_t NumWords = (NumBytes + RealignmentPadding) >> 4;
1119 if (NumBytes >= (1 << 28))
1121 "unwinding purposes");
1123 uint32_t LowNumWords = NumWords & 0xFFFF;
1130 if ((NumWords & 0xFFFF0000) != 0) {
1133 .
addImm((NumWords & 0xFFFF0000) >> 16)
1145 const AArch64TargetLowering *TLI =
Subtarget.getTargetLowering();
1146 RTLIB::LibcallImpl ChkStkLibcall = TLI->getLibcallImpl(RTLIB::STACK_PROBE);
1147 if (ChkStkLibcall == RTLIB::Unsupported)
1150 const char *ChkStk = TLI->getLibcallImplName(ChkStkLibcall).data();
1151 switch (
MF.getTarget().getCodeModel()) {
1215 if (RealignmentPadding > 0) {
1216 if (RealignmentPadding >= 4096) {
1219 .
addImm(RealignmentPadding)
1229 .
addImm(RealignmentPadding)
1234 uint64_t AndMask = ~(
MFI.getMaxAlign().value() - 1);
1238 AFI->setStackRealigned(
true);
1244 if (X15Scratch != AArch64::NoRegister) {
1253void AArch64PrologueEmitter::emitCalleeSavedGPRLocations(
1255 const std::vector<CalleeSavedInfo> &CSI =
MFI.getCalleeSavedInfo();
1260 for (
const auto &Info : CSI) {
1261 unsigned FrameIdx =
Info.getFrameIdx();
1262 if (
MFI.hasScalableStackID(FrameIdx))
1265 assert(!
Info.isSpilledToReg() &&
"Spilling to registers not implemented");
1266 int64_t
Offset =
MFI.getObjectOffset(FrameIdx) -
AFL.getOffsetOfLocalArea();
1267 CFIBuilder.buildOffset(
Info.getReg(),
Offset);
1271void AArch64PrologueEmitter::emitCalleeSavedSVELocations(
1274 const std::vector<CalleeSavedInfo> &CSI =
MFI.getCalleeSavedInfo();
1280 std::optional<int64_t> IncomingVGOffsetFromDefCFA;
1281 if (
AFL.requiresSaveVG(
MF)) {
1283 reverse(CSI), [](
auto &Info) {
return Info.getReg() == AArch64::VG; });
1284 IncomingVGOffsetFromDefCFA =
MFI.getObjectOffset(IncomingVG.getFrameIdx()) -
1285 AFL.getOffsetOfLocalArea();
1288 StackOffset PPRStackSize =
AFL.getPPRStackSize(
MF);
1289 for (
const auto &Info : CSI) {
1290 int FI =
Info.getFrameIdx();
1291 if (!
MFI.hasScalableStackID(FI))
1296 assert(!
Info.isSpilledToReg() &&
"Spilling to registers not implemented");
1297 MCRegister
Reg =
Info.getReg();
1311 CFIBuilder.insertCFIInst(
1317 switch (
MI.getOpcode()) {
1320 case AArch64::CATCHRET:
1321 case AArch64::CLEANUPRET:
1332 SEHEpilogueStartI =
MBB.end();
1339 "expected negative offset (with optional fixed portion)");
1341 if (int64_t FixedOffset =
Offset.getFixed()) {
1357 if (
MBB.end() != EpilogueEndI) {
1358 DL = EpilogueEndI->getDebugLoc();
1372 int64_t ArgumentStackToRestore =
AFL.getArgumentStackToRestore(
MF,
MBB);
1373 bool IsWin64 =
Subtarget.isCallingConvWin64(
MF.getFunction().getCallingConv(),
1374 MF.getFunction().isVarArg());
1377 int64_t AfterCSRPopSize = ArgumentStackToRestore;
1378 auto PrologueSaveSize =
AFI->getCalleeSavedStackSize() + FixedObject;
1383 if (
MF.hasEHFunclets())
1384 AFI->setLocalStackSize(NumBytes - PrologueSaveSize);
1388 auto FirstHomogenousEpilogI =
MBB.getFirstTerminator();
1389 if (FirstHomogenousEpilogI !=
MBB.begin()) {
1390 auto HomogeneousEpilog = std::prev(FirstHomogenousEpilogI);
1391 if (HomogeneousEpilog->getOpcode() == AArch64::HOM_Epilog)
1392 FirstHomogenousEpilogI = HomogeneousEpilog;
1402 assert(AfterCSRPopSize == 0);
1406 bool CombineSPBump = shouldCombineCSRLocalStackBump(NumBytes);
1408 unsigned ProloguePopSize = PrologueSaveSize;
1414 ProloguePopSize -= FixedObject;
1415 AfterCSRPopSize += FixedObject;
1419 if (!CombineSPBump && ProloguePopSize != 0) {
1421 while (Pop->getOpcode() == TargetOpcode::CFI_INSTRUCTION ||
1422 AArch64InstrInfo::isSEHInstruction(*Pop) ||
1425 Pop = std::prev(Pop);
1432 if (
OffsetOp.getImm() == 0 && AfterCSRPopSize >= 0) {
1438 if (AArch64InstrInfo::isSEHInstruction(*AfterLastPop))
1452 AfterCSRPopSize += ProloguePopSize;
1461 while (FirstGPRRestoreI != Begin) {
1468 }
else if (CombineSPBump)
1470 AFI->getLocalStackSize());
1480 BuildMI(
MBB, FirstGPRRestoreI, DL,
TII->get(AArch64::SEH_EpilogStart))
1482 SEHEpilogueStartI = FirstGPRRestoreI;
1483 --SEHEpilogueStartI;
1493 ?
MBB.getFirstTerminator()
1495 PPR.CalleeSavesSize, ZPR.CalleeSavesSize,
true);
1497 if (
HasFP &&
AFI->hasSwiftAsyncContext())
1498 emitSwiftAsyncContextFramePointer(EpilogueEndI, DL);
1501 if (CombineSPBump) {
1502 assert(!
AFI->hasSVEStackSize() &&
"Cannot combine SP bump with SVE");
1516 NumBytes -= PrologueSaveSize;
1517 assert(NumBytes >= 0 &&
"Negative stack allocation size!?");
1519 StackOffset SVECalleeSavesSize = ZPR.CalleeSavesSize + PPR.CalleeSavesSize;
1525 "unexpected SVE allocs after PPRs with CalleeSavesAboveFrameRecord");
1529 if (!
AFI->isStackRealigned() && !
MFI.hasVarSizedObjects()) {
1539 }
else if (
AFI->hasSVEStackSize()) {
1544 (
AFI->isStackRealigned() ||
MFI.hasVarSizedObjects()) ? AArch64::FP
1546 if (SVECalleeSavesSize && BaseForSVEDealloc == AArch64::FP) {
1550 -SVECalleeSavesSize - PPR.LocalsSize -
1554 moveSPBelowFP(ZPRRange.Begin, FPOffsetZPR);
1562 assert(!FPOffsetPPR.
getFixed() &&
"expected only scalable offset");
1566 }
else if (BaseForSVEDealloc == AArch64::SP) {
1575 NumBytes -= NonSVELocals.getFixed();
1583 assert(PPRRange.Begin == ZPRRange.End &&
1584 "Expected PPR restores after ZPR");
1597 emitCalleeSavedSVERestores(
1602 bool RedZone =
AFL.canUseRedZone(
MF);
1605 if (RedZone && AfterCSRPopSize == 0)
1612 bool NoCalleeSaveRestore = PrologueSaveSize == 0;
1613 int64_t StackRestoreBytes = RedZone ? 0 : NumBytes;
1614 if (NoCalleeSaveRestore)
1615 StackRestoreBytes += AfterCSRPopSize;
1618 MBB, FirstGPRRestoreI, DL, AArch64::SP, AArch64::SP,
1625 if (NoCalleeSaveRestore || AfterCSRPopSize == 0)
1635 if (!
IsFunclet && (
MFI.hasVarSizedObjects() ||
AFI->isStackRealigned())) {
1637 MBB, FirstGPRRestoreI, DL, AArch64::SP, AArch64::FP,
1640 }
else if (NumBytes)
1653 if (AfterCSRPopSize) {
1654 assert(AfterCSRPopSize > 0 &&
"attempting to reallocate arg stack that an "
1655 "interrupt may have clobbered");
1658 MBB,
MBB.getFirstTerminator(), DL, AArch64::SP, AArch64::SP,
1665bool AArch64EpilogueEmitter::shouldCombineCSRLocalStackBump(
1677 while (LastI != Begin) {
1679 if (LastI->isTransient())
1684 switch (LastI->getOpcode()) {
1685 case AArch64::STGloop:
1686 case AArch64::STZGloop:
1688 case AArch64::STZGi:
1689 case AArch64::ST2Gi:
1690 case AArch64::STZ2Gi:
1698void AArch64EpilogueEmitter::emitSwiftAsyncContextFramePointer(
1700 switch (
MF.getTarget().Options.SwiftAsyncFramePointer) {
1728void AArch64EpilogueEmitter::emitShadowCallStackEpilogue(
1742 if (
AFI->needsAsyncDwarfUnwindInfo(
MF))
1744 .buildRestore(AArch64::X18);
1747void AArch64EpilogueEmitter::emitCalleeSavedRestores(
1749 const std::vector<CalleeSavedInfo> &CSI =
MFI.getCalleeSavedInfo();
1755 for (
const auto &Info : CSI) {
1756 if (
SVE !=
MFI.hasScalableStackID(
Info.getFrameIdx()))
1759 MCRegister
Reg =
Info.getReg();
1763 CFIBuilder.buildRestore(
Info.getReg());
1767void AArch64EpilogueEmitter::finalizeEpilogue()
const {
1768 if (
AFI->needsShadowCallStackPrologueEpilogue(
MF)) {
1769 emitShadowCallStackEpilogue(
MBB.getFirstTerminator(), DL);
1773 emitCalleeSavedGPRRestores(
MBB.getFirstTerminator());
1774 if (
AFI->shouldSignReturnAddress(
MF)) {
1777 if (!
AFL.shouldSignReturnAddressEverywhere(
MF)) {
1779 TII->get(AArch64::PAUTH_EPILOGUE))
1786 BuildMI(
MBB,
MBB.getFirstTerminator(), DL,
TII->get(AArch64::SEH_EpilogEnd))
1788 if (!
MF.hasWinCFI())
1789 MF.setHasWinCFI(
true);
1794 MBB.erase(SEHEpilogueStartI);
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file contains the declaration of the AArch64PrologueEmitter and AArch64EpilogueEmitter classes,...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
This file contains constants used for implementing Dwarf debug support.
const HexagonInstrInfo * TII
std::pair< Instruction::BinaryOps, Value * > OffsetOp
Find all possible pairs (BinOp, RHS) that BinOp V, RHS can be simplified.
Register const TargetRegisterInfo * TRI
Promote Memory to Register
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static Function * getFunction(FunctionType *Ty, const Twine &Name, Module *M)
static const unsigned FramePtr
void emitEpilogue()
Emit the epilogue.
AArch64EpilogueEmitter(MachineFunction &MF, MachineBasicBlock &MBB, const AArch64FrameLowering &AFL)
AArch64FunctionInfo - This class is derived from MachineFunctionInfo and contains private AArch64-spe...
void setStackRealigned(bool s)
void emitPrologue()
Emit the prologue.
AArch64PrologueEmitter(MachineFunction &MF, MachineBasicBlock &MBB, const AArch64FrameLowering &AFL)
const MachineFrameInfo & MFI
AArch64FunctionInfo * AFI
MachineBasicBlock::iterator convertCalleeSaveRestoreToSPPrePostIncDec(MachineBasicBlock::iterator MBBI, const DebugLoc &DL, int CSStackSizeInc, bool EmitCFI, MachineInstr::MIFlag FrameFlag=MachineInstr::FrameSetup, int CFAOffset=0) const
SVEFrameSizes getSVEStackFrameSizes() const
bool isVGInstruction(MachineBasicBlock::iterator MBBI, const TargetLowering &TLI) const
AArch64PrologueEpilogueCommon(MachineFunction &MF, MachineBasicBlock &MBB, const AArch64FrameLowering &AFL)
const AArch64RegisterInfo & RegInfo
const AArch64FrameLowering & AFL
@ CalleeSavesAboveFrameRecord
void fixupCalleeSaveRestoreStackOffset(MachineInstr &MI, uint64_t LocalStackSize) const
bool shouldCombineCSRLocalStackBump(uint64_t StackBumpBytes) const
const AArch64Subtarget & Subtarget
SVEStackAllocations getSVEStackAllocations(SVEFrameSizes const &)
const AArch64InstrInfo * TII
bool requiresGetVGCall() const
const AArch64TargetLowering * getTargetLowering() const override
bool hasInlineStackProbe(const MachineFunction &MF) const override
True if stack clash protection is enabled for this functions.
Helper class for creating CFI instructions and inserting them into MIR.
void buildDefCFAOffset(int64_t Offset, MCSymbol *Label=nullptr) const
void insertCFIInst(const MCCFIInstruction &CFIInst) const
void buildDefCFA(MCRegister Reg, int64_t Offset) const
A set of physical registers with utility functions to track liveness when walking backward/forward th...
MachineInstrBundleIterator< MachineInstr > iterator
Align getMaxAlign() const
Return the alignment in bytes that this function must be aligned to, which is greater than the defaul...
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const MachineInstrBuilder & setMemRefs(ArrayRef< MachineMemOperand * > MMOs) const
const MachineInstrBuilder & addExternalSymbol(const char *FnName, unsigned TargetFlags=0) const
const MachineInstrBuilder & addUse(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addReg(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & setMIFlag(MachineInstr::MIFlag Flag) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addDef(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a virtual register definition operand.
const MachineInstrBuilder & setMIFlags(unsigned Flags) const
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
Representation of each machine instruction.
@ MOVolatile
The memory access is volatile.
@ MOLoad
The memory access reads data.
MachineOperand class - Representation of each machine instruction operand.
void setImm(int64_t immVal)
bool isSymbol() const
isSymbol - Tests if this is a MO_ExternalSymbol operand.
static MachineOperand CreateImm(int64_t Val)
const char * getSymbolName() const
LLVM_ABI Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
Wrapper class representing virtual and physical registers.
StackOffset holds a fixed and a scalable offset in bytes.
int64_t getFixed() const
Returns the fixed component of the stack.
int64_t getScalable() const
Returns the scalable component of the stack.
static StackOffset getFixed(int64_t Fixed)
StringRef - Represent a constant reference to a string, i.e.
const char * getLibcallName(RTLIB::Libcall Call) const
Get the libcall routine name for the specified libcall.
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
static constexpr TypeSize getFixed(ScalarTy ExactSize)
constexpr ScalarTy getFixedValue() const
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ MO_GOT
MO_GOT - This flag indicates that a symbol operand represents the address of the GOT entry for the sy...
static unsigned getArithExtendImm(AArch64_AM::ShiftExtendType ET, unsigned Imm)
getArithExtendImm - Encode the extend type and shift amount for an arithmetic instruction: imm: 3-bit...
static uint64_t encodeLogicalImmediate(uint64_t imm, unsigned regSize)
encodeLogicalImmediate - Return the encoded immediate value for a logical immediate instruction of th...
static unsigned getShifterImm(AArch64_AM::ShiftExtendType ST, unsigned Imm)
getShifterImm - Encode the shift type and amount: imm: 6-bit shift amount shifter: 000 ==> lsl 001 ==...
const unsigned StackProbeMaxUnprobedStack
Maximum allowed number of unprobed bytes above SP at an ABI boundary.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
@ GHC
Used by the Glasgow Haskell Compiler (GHC).
This is an optimization pass for GlobalISel generic memory operations.
MCCFIInstruction createDefCFA(const TargetRegisterInfo &TRI, unsigned FrameReg, unsigned Reg, const StackOffset &Offset, bool LastAdjustmentWasScalable=true)
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Kill
The last use of a register.
@ Undef
Value of the register doesn't matter.
@ Define
Register definition.
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
unsigned getBLRCallOpcode(const MachineFunction &MF)
Return opcode to be used for indirect calls.
static bool matchLibcall(const TargetLowering &TLI, const MachineOperand &MO, RTLIB::Libcall LC)
static bool isPartOfSVECalleeSaves(MachineBasicBlock::iterator I)
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
static bool isFuncletReturnInstr(const MachineInstr &MI)
auto reverse(ContainerTy &&C)
static void getLivePhysRegsUpTo(MachineInstr &MI, const TargetRegisterInfo &TRI, LivePhysRegs &LiveRegs)
Collect live registers from the end of MI's parent up to (including) MI in LiveRegs.
@ Always
Always set the bit.
@ Never
Never set the bit.
@ DeploymentBased
Determine whether to set the bit statically or dynamically based on the deployment target.
void emitFrameOffset(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, unsigned DestReg, unsigned SrcReg, StackOffset Offset, const TargetInstrInfo *TII, MachineInstr::MIFlag=MachineInstr::NoFlags, bool SetNZCV=false, bool NeedsWinCFI=false, bool *HasWinCFI=nullptr, bool EmitCFAOffset=false, StackOffset InitialOffset={}, unsigned FrameReg=AArch64::SP)
emitFrameOffset - Emit instructions as needed to set DestReg to SrcReg plus Offset.
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
LLVM_ABI EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
static bool isPartOfPPRCalleeSaves(MachineBasicBlock::iterator I)
@ Success
The lock was released successfully.
static void fixupSEHOpcode(MachineBasicBlock::iterator MBBI, unsigned LocalStackSize)
MCCFIInstruction createCFAOffset(const TargetRegisterInfo &MRI, unsigned Reg, const StackOffset &OffsetFromDefCFA, std::optional< int64_t > IncomingVGOffsetFromDefCFA)
DWARFExpression::Operation Op
bool isAsynchronousEHPersonality(EHPersonality Pers)
Returns true if this personality function catches asynchronous exceptions.
static SVEPartitions partitionSVECS(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, StackOffset PPRCalleeSavesSize, StackOffset ZPRCalleeSavesSize, bool IsEpilogue)
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
static bool isPartOfZPRCalleeSaves(MachineBasicBlock::iterator I)
static int64_t upperBound(StackOffset Size)
LLVM_ABI void reportFatalUsageError(Error Err)
Report a fatal error that does not indicate a bug in LLVM.
This struct is a compact representation of a valid (non-zero power of two) alignment.
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
static LLVM_ABI MachinePointerInfo getUnknownStack(MachineFunction &MF)
Stack memory without other information.
MachineBasicBlock::iterator End
struct llvm::SVEPartitions::@327166152017175235362202041204223104077330276266 ZPR
struct llvm::SVEPartitions::@327166152017175235362202041204223104077330276266 PPR
MachineBasicBlock::iterator Begin
StackOffset totalSize() const