65#define DEBUG_TYPE "hexagon-pei"
156 cl::desc(
"Set the number of scavenger slots"),
161 cl::desc(
"Specify O2(not Os) spill func threshold"),
166 cl::desc(
"Specify Os spill func threshold"),
175 cl::desc(
"Enable stack frame shrink wrapping"));
180 cl::desc(
"Max count of stack frame shrink-wraps"));
184 cl::desc(
"Enable long calls for save-restore stubs."),
195 cl::init(std::numeric_limits<unsigned>::max()));
221 MachineFunctionProperties::Property::NoVRegs);
225 char HexagonCallFrameInformation::ID = 0;
229bool HexagonCallFrameInformation::runOnMachineFunction(
MachineFunction &MF) {
235 HFI.insertCFIInstructions(MF);
240 "Hexagon call frame information",
false,
false)
243 return new HexagonCallFrameInformation();
251 if (Reg < Hexagon::D0 || Reg > Hexagon::D15)
260 if (!RegNo ||
SubReg < RegNo)
270 static_assert(Hexagon::R1 > 0,
271 "Assume physical registers are encoded as positive integers");
276 for (
unsigned I = 1,
E = CSI.
size();
I <
E; ++
I) {
291 unsigned Opc =
MI.getOpcode();
293 case Hexagon::PS_alloca:
294 case Hexagon::PS_aligna:
317 for (
MCPhysReg S : HRI.subregs_inclusive(R))
322 if (MO.isRegMask()) {
327 const uint32_t *BM = MO.getRegMask();
331 if (!(BM[R/32] & (1u << (R%32))))
346 unsigned RetOpc =
I->getOpcode();
347 return RetOpc == Hexagon::PS_tailcall_i || RetOpc == Hexagon::PS_tailcall_r;
369 case Hexagon::RESTORE_DEALLOC_RET_JMP_V4:
370 case Hexagon::RESTORE_DEALLOC_RET_JMP_V4_PIC:
371 case Hexagon::RESTORE_DEALLOC_RET_JMP_V4_EXT:
372 case Hexagon::RESTORE_DEALLOC_RET_JMP_V4_EXT_PIC:
373 case Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_EXT:
374 case Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_EXT_PIC:
375 case Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4:
376 case Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_PIC:
389 return F.hasOptSize() && !
F.hasMinSize();
402 static unsigned ShrinkCounter = 0;
427 RPO[
I->getNumber()] = RPON++;
433 unsigned BN = RPO[
I.getNumber()];
436 if (RPO[Succ->getNumber()] <= BN)
444 for (
const MCPhysReg *
P = HRI.getCalleeSavedRegs(&MF); *
P; ++
P)
453 dbgs() <<
"Blocks needing SF: {";
454 for (
auto &
B : SFBlocks)
459 if (SFBlocks.
empty())
464 for (
unsigned i = 1, n = SFBlocks.
size(); i < n; ++i) {
470 for (
unsigned i = 1, n = SFBlocks.
size(); i < n; ++i) {
476 dbgs() <<
"Computed dom block: ";
481 dbgs() <<
", computed pdom block: ";
497 LLVM_DEBUG(
dbgs() <<
"PDom block does not post-dominate dom block\n");
520 findShrunkPrologEpilog(MF, PrologB, EpilogB);
522 bool PrologueStubs =
false;
523 insertCSRSpillsInBlock(*PrologB, CSI, HRI, PrologueStubs);
524 insertPrologueInBlock(*PrologB, PrologueStubs);
525 updateEntryPaths(MF, *PrologB);
528 insertCSRRestoresInBlock(*EpilogB, CSI, HRI);
529 insertEpilogueInBlock(*EpilogB);
532 if (
B.isReturnBlock())
533 insertCSRRestoresInBlock(
B, CSI, HRI);
536 if (
B.isReturnBlock())
537 insertEpilogueInBlock(
B);
555 BitVector DoneT(MaxBN+1), DoneF(MaxBN+1), Path(MaxBN+1);
556 updateExitPaths(*EpilogB, *EpilogB, DoneT, DoneF, Path);
565 assert(
F.hasFnAttribute(Attribute::NoReturn) &&
566 F.getFunction().hasFnAttribute(Attribute::NoUnwind) &&
567 !
F.getFunction().hasFnAttribute(Attribute::UWTable));
583 assert(!MFI.hasVarSizedObjects() &&
584 !HST.getRegisterInfo()->hasStackRealignment(MF));
585 return F.hasFnAttribute(Attribute::NoReturn) &&
586 F.hasFnAttribute(Attribute::NoUnwind) &&
587 !
F.hasFnAttribute(Attribute::UWTable) && HST.noreturnStackElim() &&
588 MFI.getStackSize() == 0;
592 bool PrologueStubs)
const {
597 auto &HRI = *HST.getRegisterInfo();
608 FrameSize = MaxCFA +
alignTo(FrameSize, MaxAlign);
615 Register SP = HRI.getStackRegister();
622 if (
MI.getOpcode() == Hexagon::PS_alloca)
625 for (
auto *
MI : AdjustRegs) {
626 assert((
MI->getOpcode() == Hexagon::PS_alloca) &&
"Expected alloca");
627 expandAlloca(
MI, HII, SP, MaxCF);
628 MI->eraseFromParent();
633 if (MF.getFunction().isVarArg() &&
637 int RegisterSavedAreaSizePlusPadding = (NumVarArgRegs % 2 == 0)
639 : NumVarArgRegs * 4 + 4;
640 if (RegisterSavedAreaSizePlusPadding > 0) {
643 BuildMI(
MBB, InsertPt, dl, HII.get(Hexagon::A2_addi), SP)
645 .
addImm(-RegisterSavedAreaSizePlusPadding)
651 for (
int i = HMFI.getFirstNamedArgFrameIndex(),
652 e = HMFI.getLastNamedArgFrameIndex(); i >=
e; --i) {
657 unsigned LDOpc, STOpc;
661 if (ObjAlign > ObjSize) {
664 else if (ObjSize <= 2)
666 else if (ObjSize <= 4)
668 else if (ObjSize > 4)
672 switch (OpcodeChecker) {
674 LDOpc = Hexagon::L2_loadrb_io;
675 STOpc = Hexagon::S2_storerb_io;
678 LDOpc = Hexagon::L2_loadrh_io;
679 STOpc = Hexagon::S2_storerh_io;
682 LDOpc = Hexagon::L2_loadri_io;
683 STOpc = Hexagon::S2_storeri_io;
687 LDOpc = Hexagon::L2_loadrd_io;
688 STOpc = Hexagon::S2_storerd_io;
692 Register RegUsed = LDOpc == Hexagon::L2_loadrd_io ? Hexagon::D3
694 int LoadStoreCount = ObjSize / OpcodeChecker;
696 if (ObjSize % OpcodeChecker)
704 NumBytes =
alignTo(NumBytes, ObjAlign);
707 while (Count < LoadStoreCount) {
709 BuildMI(
MBB, InsertPt, dl, HII.get(LDOpc), RegUsed)
711 .
addImm(RegisterSavedAreaSizePlusPadding +
712 ObjAlign.
value() * Count + NumBytes)
728 NumBytes =
alignTo(NumBytes, 8);
733 NumBytes = (NumVarArgRegs % 2 == 0) ? NumBytes : NumBytes + 4;
736 BuildMI(
MBB, InsertPt, dl, HII.get(Hexagon::S2_storeri_io))
746 insertAllocframe(
MBB, InsertPt, NumBytes);
748 BuildMI(
MBB, InsertPt, dl, HII.get(Hexagon::A2_andir), SP)
756 BuildMI(
MBB, InsertPt, dl, HII.get(Hexagon::PS_call_stk))
758 }
else if (NumBytes > 0) {
760 BuildMI(
MBB, InsertPt, dl, HII.get(Hexagon::A2_addi), SP)
770 auto &HRI = *HST.getRegisterInfo();
771 Register SP = HRI.getStackRegister();
783 int RegisterSavedAreaSizePlusPadding = (NumVarArgRegs % 2 == 0) ?
784 (NumVarArgRegs * 4) : (NumVarArgRegs * 4 + 4);
785 NumBytes += RegisterSavedAreaSizePlusPadding;
788 BuildMI(
MBB, InsertPt, dl, HII.get(Hexagon::A2_addi), SP)
796 unsigned RetOpc = RetI ? RetI->
getOpcode() : 0;
799 if (RetOpc == Hexagon::EH_RETURN_JMPR) {
800 BuildMI(
MBB, InsertPt, dl, HII.get(Hexagon::L2_deallocframe))
803 BuildMI(
MBB, InsertPt, dl, HII.get(Hexagon::A2_add), SP)
811 if (RetOpc == Hexagon::RESTORE_DEALLOC_RET_JMP_V4 ||
812 RetOpc == Hexagon::RESTORE_DEALLOC_RET_JMP_V4_PIC ||
813 RetOpc == Hexagon::RESTORE_DEALLOC_RET_JMP_V4_EXT ||
814 RetOpc == Hexagon::RESTORE_DEALLOC_RET_JMP_V4_EXT_PIC) {
830 bool NeedsDeallocframe =
true;
833 unsigned COpc = PrevIt->getOpcode();
834 if (COpc == Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4 ||
835 COpc == Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_PIC ||
836 COpc == Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_EXT ||
837 COpc == Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_EXT_PIC ||
838 COpc == Hexagon::PS_call_nr || COpc == Hexagon::PS_callr_nr)
839 NeedsDeallocframe =
false;
844 if (!NeedsDeallocframe)
850 BuildMI(
MBB, InsertPt, dl, HII.get(Hexagon::L2_deallocframe))
855 unsigned NewOpc = Hexagon::L4_return;
866 int RegisterSavedAreaSizePlusPadding = (NumVarArgRegs % 2 == 0) ?
867 (NumVarArgRegs * 4) : (NumVarArgRegs * 4 + 4);
873 (
I->getOpcode() != Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_EXT &&
874 I->getOpcode() != Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_EXT_PIC &&
875 I->getOpcode() != Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4 &&
876 I->getOpcode() != Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_PIC))
877 BuildMI(
MBB, InsertPt, dl, HII.get(Hexagon::L2_deallocframe))
880 if (RegisterSavedAreaSizePlusPadding != 0)
881 BuildMI(
MBB, InsertPt, dl, HII.get(Hexagon::A2_addi), SP)
883 .
addImm(RegisterSavedAreaSizePlusPadding);
892 auto &HRI = *HST.getRegisterInfo();
896 const unsigned int ALLOCFRAME_MAX = 16384;
904 Register SP = HRI.getStackRegister();
906 if (NumBytes >= ALLOCFRAME_MAX) {
908 BuildMI(
MBB, InsertPt, dl, HII.get(Hexagon::S2_allocframe))
915 Register SP = HRI.getStackRegister();
916 BuildMI(
MBB, InsertPt, dl, HII.get(Hexagon::A2_addi), SP)
920 BuildMI(
MBB, InsertPt, dl, HII.get(Hexagon::S2_allocframe))
938 for (
unsigned i = 0; i < Worklist.
size(); ++i) {
939 unsigned BN = Worklist[i];
946 Worklist.
insert(SB->getNumber());
955 if (Path[BN] || DoneF[BN])
963 bool ReachedExit =
false;
965 ReachedExit |= updateExitPaths(*SB, RestoreB, DoneT, DoneF, Path);
981 if (ReachedExit && &
MBB != &RestoreB) {
994static std::optional<MachineBasicBlock::iterator>
1001 auto End =
B.instr_end();
1005 if (!
I.isBundle()) {
1006 if (
I.getOpcode() == Hexagon::S2_allocframe)
1007 return std::next(It);
1011 bool HasCall =
false, HasAllocFrame =
false;
1013 while (++
T !=
End &&
T->isBundled()) {
1014 if (
T->getOpcode() == Hexagon::S2_allocframe)
1015 HasAllocFrame =
true;
1016 else if (
T->isCall())
1020 return HasCall ? It : std::next(It);
1022 return std::nullopt;
1028 insertCFIInstructionsAt(
B, *At);
1038 auto &HRI = *HST.getRegisterInfo();
1044 const MCInstrDesc &CFID = HII.get(TargetOpcode::CFI_INSTRUCTION);
1047 bool HasFP =
hasFP(MF);
1050 unsigned DwFPReg = HRI.getDwarfRegNum(HRI.getFrameRegister(),
true);
1051 unsigned DwRAReg = HRI.getDwarfRegNum(HRI.getRARegister(),
true);
1078 Hexagon::R1, Hexagon::R0, Hexagon::R3, Hexagon::R2,
1079 Hexagon::R17, Hexagon::R16, Hexagon::R19, Hexagon::R18,
1080 Hexagon::R21, Hexagon::R20, Hexagon::R23, Hexagon::R22,
1081 Hexagon::R25, Hexagon::R24, Hexagon::R27, Hexagon::R26,
1082 Hexagon::D0, Hexagon::D1, Hexagon::D8, Hexagon::D9,
1083 Hexagon::D10, Hexagon::D11, Hexagon::D12, Hexagon::D13,
1089 for (
unsigned i = 0; RegsToMove[i] != Hexagon::NoRegister; ++i) {
1092 return C.getReg() ==
Reg;
1116 if (Reg < Hexagon::D0 || Reg > Hexagon::D15) {
1117 unsigned DwarfReg = HRI.getDwarfRegNum(Reg,
true);
1129 Register HiReg = HRI.getSubReg(Reg, Hexagon::isub_hi);
1130 Register LoReg = HRI.getSubReg(Reg, Hexagon::isub_lo);
1131 unsigned HiDwarfReg = HRI.getDwarfRegNum(HiReg,
true);
1132 unsigned LoDwarfReg = HRI.getDwarfRegNum(LoReg,
true);
1151 bool HasExtraAlign = HRI.hasStackRealignment(MF);
1167 if (HasAlloca || HasExtraAlign)
1193 bool Stkchk =
false) {
1194 const char * V4SpillToMemoryFunctions[] = {
1195 "__save_r16_through_r17",
1196 "__save_r16_through_r19",
1197 "__save_r16_through_r21",
1198 "__save_r16_through_r23",
1199 "__save_r16_through_r25",
1200 "__save_r16_through_r27" };
1202 const char * V4SpillToMemoryStkchkFunctions[] = {
1203 "__save_r16_through_r17_stkchk",
1204 "__save_r16_through_r19_stkchk",
1205 "__save_r16_through_r21_stkchk",
1206 "__save_r16_through_r23_stkchk",
1207 "__save_r16_through_r25_stkchk",
1208 "__save_r16_through_r27_stkchk" };
1210 const char * V4SpillFromMemoryFunctions[] = {
1211 "__restore_r16_through_r17_and_deallocframe",
1212 "__restore_r16_through_r19_and_deallocframe",
1213 "__restore_r16_through_r21_and_deallocframe",
1214 "__restore_r16_through_r23_and_deallocframe",
1215 "__restore_r16_through_r25_and_deallocframe",
1216 "__restore_r16_through_r27_and_deallocframe" };
1218 const char * V4SpillFromMemoryTailcallFunctions[] = {
1219 "__restore_r16_through_r17_and_deallocframe_before_tailcall",
1220 "__restore_r16_through_r19_and_deallocframe_before_tailcall",
1221 "__restore_r16_through_r21_and_deallocframe_before_tailcall",
1222 "__restore_r16_through_r23_and_deallocframe_before_tailcall",
1223 "__restore_r16_through_r25_and_deallocframe_before_tailcall",
1224 "__restore_r16_through_r27_and_deallocframe_before_tailcall"
1227 const char **SpillFunc =
nullptr;
1231 SpillFunc = Stkchk ? V4SpillToMemoryStkchkFunctions
1232 : V4SpillToMemoryFunctions;
1235 SpillFunc = V4SpillFromMemoryFunctions;
1238 SpillFunc = V4SpillFromMemoryTailcallFunctions;
1241 assert(SpillFunc &&
"Unknown spill kind");
1246 return SpillFunc[0];
1248 return SpillFunc[1];
1250 return SpillFunc[2];
1252 return SpillFunc[3];
1254 return SpillFunc[4];
1256 return SpillFunc[5];
1271 bool HasExtraAlign = HRI.hasStackRealignment(MF);
1276 Register SP = HRI.getStackRegister();
1278 Register AP = HMFI.getStackAlignBaseReg();
1293 bool UseFP =
false, UseAP =
false;
1298 if (NoOpt && !HasExtraAlign)
1303 UseFP |= (HasAlloca || HasExtraAlign);
1314 bool HasFP =
hasFP(MF);
1315 assert((HasFP || !UseFP) &&
"This function must have frame pointer");
1341 if (
Offset > 0 && !HasFP)
1356 if (!UseFP && !UseAP)
1357 RealOffset = FrameSize+
Offset;
1363 bool &PrologueStubs)
const {
1368 PrologueStubs =
false;
1373 if (useSpillFunction(MF, CSI)) {
1374 PrologueStubs =
true;
1386 if (StkOvrFlowEnabled) {
1388 SpillOpc = IsPIC ? Hexagon::SAVE_REGISTERS_CALL_V4STK_EXT_PIC
1389 : Hexagon::SAVE_REGISTERS_CALL_V4STK_EXT;
1391 SpillOpc = IsPIC ? Hexagon::SAVE_REGISTERS_CALL_V4STK_PIC
1392 : Hexagon::SAVE_REGISTERS_CALL_V4STK;
1395 SpillOpc = IsPIC ? Hexagon::SAVE_REGISTERS_CALL_V4_EXT_PIC
1396 : Hexagon::SAVE_REGISTERS_CALL_V4_EXT;
1398 SpillOpc = IsPIC ? Hexagon::SAVE_REGISTERS_CALL_V4_PIC
1399 : Hexagon::SAVE_REGISTERS_CALL_V4;
1407 addCalleeSaveRegistersAsImpOperand(SaveRegsCall, CSI,
false,
true);
1420 int FI =
I.getFrameIdx();
1422 HII.storeRegToStackSlot(
MBB,
MI, Reg, IsKill, FI, RC, &HRI,
Register());
1439 if (useRestoreFunction(MF, CSI)) {
1445 bool IsPIC = HTM.isPositionIndependent();
1456 RetOpc = IsPIC ? Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_EXT_PIC
1457 : Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_EXT;
1459 RetOpc = IsPIC ? Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_PIC
1460 : Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4;
1469 RetOpc = IsPIC ? Hexagon::RESTORE_DEALLOC_RET_JMP_V4_EXT_PIC
1470 : Hexagon::RESTORE_DEALLOC_RET_JMP_V4_EXT;
1472 RetOpc = IsPIC ? Hexagon::RESTORE_DEALLOC_RET_JMP_V4_PIC
1473 : Hexagon::RESTORE_DEALLOC_RET_JMP_V4;
1479 addCalleeSaveRegistersAsImpOperand(DeallocCall, CSI,
true,
false);
1486 int FI =
I.getFrameIdx();
1487 HII.loadRegFromStackSlot(
MBB,
MI, Reg, FI, RC, &HRI,
Register());
1497 unsigned Opc =
MI.getOpcode();
1499 assert((Opc == Hexagon::ADJCALLSTACKDOWN || Opc == Hexagon::ADJCALLSTACKUP) &&
1500 "Cannot handle this call frame pseudo instruction");
1514 if (!HasAlloca || !NeedsAlign)
1520 AP = AI->getOperand(0).getReg();
1523 HMFI.setStackAlignBaseReg(AP);
1531 auto IsUsed = [&HRI,&
MRI] (
Register Reg) ->
bool {
1533 if (
MRI.isPhysRegUsed(*AI))
1563 BitVector SRegs(Hexagon::NUM_TARGET_REGS);
1593 bool HasResSub =
false;
1619 BitVector TmpSup(Hexagon::NUM_TARGET_REGS);
1625 for (
int x = TmpSup.find_first(); x >= 0; x = TmpSup.find_next(x)) {
1665 for (
const SpillSlot *S = FixedSlots; S != FixedSlots+NumFixed; ++S) {
1670 MinOffset = std::min(MinOffset, S->Offset);
1672 SRegs[S->Reg] =
false;
1681 unsigned Size =
TRI->getSpillSize(*RC);
1682 int Off = MinOffset -
Size;
1684 Off &= -Alignment.
value();
1686 MinOffset = std::min(MinOffset, Off);
1692 dbgs() <<
"CS information: {";
1694 int FI =
I.getFrameIdx();
1706 bool MissedReg =
false;
1726 if (!Hexagon::ModRegsRegClass.
contains(DstR) ||
1727 !Hexagon::ModRegsRegClass.
contains(SrcR))
1730 Register TmpR =
MRI.createVirtualRegister(&Hexagon::IntRegsRegClass);
1731 BuildMI(
B, It,
DL, HII.get(TargetOpcode::COPY), TmpR).
add(
MI->getOperand(1));
1732 BuildMI(
B, It,
DL, HII.get(TargetOpcode::COPY), DstR)
1744 if (!
MI->getOperand(0).isFI())
1748 unsigned Opc =
MI->getOpcode();
1750 bool IsKill =
MI->getOperand(2).isKill();
1751 int FI =
MI->getOperand(0).getIndex();
1755 Register TmpR =
MRI.createVirtualRegister(&Hexagon::IntRegsRegClass);
1756 unsigned TfrOpc = (Opc == Hexagon::STriw_pred) ? Hexagon::C2_tfrpr
1757 : Hexagon::A2_tfrcrr;
1762 BuildMI(
B, It,
DL, HII.get(Hexagon::S2_storeri_io))
1777 if (!
MI->getOperand(1).isFI())
1781 unsigned Opc =
MI->getOpcode();
1783 int FI =
MI->getOperand(1).getIndex();
1786 Register TmpR =
MRI.createVirtualRegister(&Hexagon::IntRegsRegClass);
1787 BuildMI(
B, It,
DL, HII.get(Hexagon::L2_loadri_io), TmpR)
1794 unsigned TfrOpc = (Opc == Hexagon::LDriw_pred) ? Hexagon::C2_tfrrp
1795 : Hexagon::A2_tfrrcr;
1808 if (!
MI->getOperand(0).isFI())
1813 bool IsKill =
MI->getOperand(2).isKill();
1814 int FI =
MI->getOperand(0).getIndex();
1815 auto *RC = &Hexagon::HvxVRRegClass;
1821 Register TmpR0 =
MRI.createVirtualRegister(&Hexagon::IntRegsRegClass);
1824 BuildMI(
B, It,
DL, HII.get(Hexagon::A2_tfrsi), TmpR0)
1827 BuildMI(
B, It,
DL, HII.get(Hexagon::V6_vandqrt), TmpR1)
1833 expandStoreVec(
B, std::prev(It),
MRI, HII, NewRegs);
1845 if (!
MI->getOperand(1).isFI())
1850 int FI =
MI->getOperand(1).getIndex();
1851 auto *RC = &Hexagon::HvxVRRegClass;
1856 Register TmpR0 =
MRI.createVirtualRegister(&Hexagon::IntRegsRegClass);
1859 BuildMI(
B, It,
DL, HII.get(Hexagon::A2_tfrsi), TmpR0)
1864 expandLoadVec(
B, std::prev(It),
MRI, HII, NewRegs);
1866 BuildMI(
B, It,
DL, HII.get(Hexagon::V6_vandvrt), DstR)
1883 if (!
MI->getOperand(0).isFI())
1893 for (
auto R =
B.begin(); R != It; ++R) {
1895 LPR.stepForward(*R, Clobbers);
1900 Register SrcLo = HRI.getSubReg(SrcR, Hexagon::vsub_lo);
1901 Register SrcHi = HRI.getSubReg(SrcR, Hexagon::vsub_hi);
1902 bool IsKill =
MI->getOperand(2).isKill();
1903 int FI =
MI->getOperand(0).getIndex();
1905 unsigned Size = HRI.getSpillSize(Hexagon::HvxVRRegClass);
1906 Align NeedAlign = HRI.getSpillAlign(Hexagon::HvxVRRegClass);
1911 if (LPR.contains(SrcLo)) {
1912 StoreOpc = NeedAlign <= HasAlign ? Hexagon::V6_vS32b_ai
1913 : Hexagon::V6_vS32Ub_ai;
1922 if (LPR.contains(SrcHi)) {
1923 StoreOpc = NeedAlign <= HasAlign ? Hexagon::V6_vS32b_ai
1924 : Hexagon::V6_vS32Ub_ai;
1943 if (!
MI->getOperand(1).isFI())
1948 Register DstHi = HRI.getSubReg(DstR, Hexagon::vsub_hi);
1949 Register DstLo = HRI.getSubReg(DstR, Hexagon::vsub_lo);
1950 int FI =
MI->getOperand(1).getIndex();
1952 unsigned Size = HRI.getSpillSize(Hexagon::HvxVRRegClass);
1953 Align NeedAlign = HRI.getSpillAlign(Hexagon::HvxVRRegClass);
1958 LoadOpc = NeedAlign <= HasAlign ? Hexagon::V6_vL32b_ai
1959 : Hexagon::V6_vL32Ub_ai;
1966 LoadOpc = NeedAlign <= HasAlign ? Hexagon::V6_vL32b_ai
1967 : Hexagon::V6_vL32Ub_ai;
1983 if (!
MI->getOperand(0).isFI())
1989 bool IsKill =
MI->getOperand(2).isKill();
1990 int FI =
MI->getOperand(0).getIndex();
1992 Align NeedAlign = HRI.getSpillAlign(Hexagon::HvxVRRegClass);
1994 unsigned StoreOpc = NeedAlign <= HasAlign ? Hexagon::V6_vS32b_ai
1995 : Hexagon::V6_vS32Ub_ai;
2012 if (!
MI->getOperand(1).isFI())
2018 int FI =
MI->getOperand(1).getIndex();
2020 Align NeedAlign = HRI.getSpillAlign(Hexagon::HvxVRRegClass);
2022 unsigned LoadOpc = NeedAlign <= HasAlign ? Hexagon::V6_vL32b_ai
2023 : Hexagon::V6_vL32Ub_ai;
2037 bool Changed =
false;
2039 for (
auto &
B : MF) {
2042 for (
auto I =
B.begin(),
E =
B.end();
I !=
E;
I = NextI) {
2044 NextI = std::next(
I);
2045 unsigned Opc =
MI->getOpcode();
2048 case TargetOpcode::COPY:
2049 Changed |= expandCopy(
B,
I,
MRI, HII, NewRegs);
2051 case Hexagon::STriw_pred:
2052 case Hexagon::STriw_ctr:
2053 Changed |= expandStoreInt(
B,
I,
MRI, HII, NewRegs);
2055 case Hexagon::LDriw_pred:
2056 case Hexagon::LDriw_ctr:
2057 Changed |= expandLoadInt(
B,
I,
MRI, HII, NewRegs);
2059 case Hexagon::PS_vstorerq_ai:
2060 Changed |= expandStoreVecPred(
B,
I,
MRI, HII, NewRegs);
2062 case Hexagon::PS_vloadrq_ai:
2063 Changed |= expandLoadVecPred(
B,
I,
MRI, HII, NewRegs);
2065 case Hexagon::PS_vloadrw_ai:
2066 Changed |= expandLoadVec2(
B,
I,
MRI, HII, NewRegs);
2068 case Hexagon::PS_vstorerw_ai:
2069 Changed |= expandStoreVec2(
B,
I,
MRI, HII, NewRegs);
2083 SavedRegs.
resize(HRI.getNumRegs());
2093 expandSpillMacros(MF, NewRegs);
2095 optimizeSpillSlots(MF, NewRegs);
2099 if (!NewRegs.
empty() || mayOverflowFrameOffset(MF)) {
2105 SpillRCs.
insert(&Hexagon::IntRegsRegClass);
2110 for (
const auto *RC : SpillRCs) {
2114 switch (RC->
getID()) {
2115 case Hexagon::IntRegsRegClassID:
2118 case Hexagon::HvxQRRegClassID:
2122 unsigned S = HRI.getSpillSize(*RC);
2123 Align A = HRI.getSpillAlign(*RC);
2124 for (
unsigned i = 0; i < Num; i++) {
2142 auto isDead = [&FIR,&DeadMap] (
Register Reg) ->
bool {
2143 auto F = DeadMap.find({Reg,0});
2144 if (
F == DeadMap.end())
2146 for (
auto &DR :
F->second)
2147 if (DR.contains(FIR))
2169 auto &HII = *HST.getInstrInfo();
2170 auto &HRI = *HST.getRegisterInfo();
2174 using BlockIndexMap =
2175 std::map<MachineBasicBlock *, HexagonBlockRanges::InstrIndexMap>;
2176 using BlockRangeMap =
2177 std::map<MachineBasicBlock *, HexagonBlockRanges::RangeList>;
2185 SlotInfo() =
default;
2188 BlockIndexMap BlockIndexes;
2190 std::map<int,SlotInfo> FIRangeMap;
2199 if (HaveRC ==
nullptr || HaveRC == NewRC)
2204 if (NewRC->hasSubClassEq(HaveRC))
2211 for (
auto &
B : MF) {
2212 std::map<int,IndexType> LastStore, LastLoad;
2215 auto P = BlockIndexes.insert(
2217 auto &IndexMap =
P.first->second;
2219 << IndexMap <<
'\n');
2221 for (
auto &In :
B) {
2223 bool Load = HII.isLoadFromStackSlot(In, LFI) && !HII.isPredicated(In);
2224 bool Store = HII.isStoreToStackSlot(In, SFI) && !HII.isPredicated(In);
2225 if (Load && Store) {
2237 if (Load || Store) {
2238 int TFI =
Load ? LFI : SFI;
2239 unsigned AM = HII.getAddrMode(In);
2240 SlotInfo &
SI = FIRangeMap[TFI];
2244 unsigned OpNum =
Load ? 0 : 2;
2245 auto *RC = HII.getRegClass(
In.getDesc(), OpNum, &HRI, MF);
2246 RC = getCommonRC(
SI.RC, RC);
2254 unsigned S = HII.getMemAccessSize(In);
2255 if (
SI.Size != 0 &&
SI.Size != S)
2261 for (
auto *Mo :
In.memoperands()) {
2262 if (!Mo->isVolatile() && !Mo->isAtomic())
2273 for (
unsigned i = 0, n =
In.getNumOperands(); i < n; ++i) {
2277 int FI =
Op.getIndex();
2280 if (i+1 >= n || !
In.getOperand(i+1).isImm() ||
2281 In.getOperand(i+1).getImm() != 0)
2283 if (BadFIs.
count(FI))
2288 if (LastStore[FI] == IndexType::None)
2289 LastStore[FI] = IndexType::Entry;
2290 LastLoad[FI] =
Index;
2293 if (LastStore[FI] != IndexType::None)
2294 RL.
add(LastStore[FI], LastLoad[FI],
false,
false);
2295 else if (LastLoad[FI] != IndexType::None)
2296 RL.
add(IndexType::Entry, LastLoad[FI],
false,
false);
2297 LastLoad[FI] = IndexType::None;
2298 LastStore[FI] =
Index;
2305 for (
auto &
I : LastLoad) {
2306 IndexType LL =
I.second;
2307 if (LL == IndexType::None)
2309 auto &RL = FIRangeMap[
I.first].Map[&
B];
2310 IndexType &
LS = LastStore[
I.first];
2311 if (LS != IndexType::None)
2312 RL.
add(LS, LL,
false,
false);
2314 RL.
add(IndexType::Entry, LL,
false,
false);
2315 LS = IndexType::None;
2317 for (
auto &
I : LastStore) {
2318 IndexType
LS =
I.second;
2319 if (LS == IndexType::None)
2321 auto &RL = FIRangeMap[
I.first].Map[&
B];
2322 RL.
add(LS, IndexType::None,
false,
false);
2327 for (
auto &
P : FIRangeMap) {
2328 dbgs() <<
"fi#" <<
P.first;
2329 if (BadFIs.
count(
P.first))
2332 if (
P.second.RC !=
nullptr)
2333 dbgs() << HRI.getRegClassName(
P.second.RC) <<
'\n';
2335 dbgs() <<
"<null>\n";
2336 for (
auto &R :
P.second.Map)
2347 std::map<MachineBasicBlock*,std::vector<int>> BlockFIMap;
2349 for (
auto &
P : FIRangeMap) {
2351 if (BadFIs.
count(
P.first))
2353 for (
auto &
B : MF) {
2354 auto F =
P.second.Map.find(&
B);
2356 if (
F ==
P.second.Map.end() ||
F->second.empty())
2359 if (
IR.start() == IndexType::Entry)
2360 LoxFIs.insert(
P.first);
2361 BlockFIMap[&
B].push_back(
P.first);
2366 dbgs() <<
"Block-to-FI map (* -- live-on-exit):\n";
2367 for (
auto &
P : BlockFIMap) {
2368 auto &FIs =
P.second;
2372 for (
auto I : FIs) {
2373 dbgs() <<
" fi#" <<
I;
2374 if (LoxFIs.count(
I))
2386 for (
auto &
B : MF) {
2387 auto F = BlockIndexes.find(&
B);
2388 assert(
F != BlockIndexes.end());
2395 for (
auto FI : BlockFIMap[&
B]) {
2396 if (BadFIs.
count(FI))
2400 for (
auto &Range : RL) {
2402 if (!IndexType::isInstr(
Range.start()) ||
2403 !IndexType::isInstr(
Range.end()))
2407 assert(
SI.mayStore() &&
"Unexpected start instruction");
2412 SrcOp.getSubReg() };
2413 auto *RC = HII.getRegClass(
SI.getDesc(), 2, &HRI, MF);
2415 Register FoundR = this->findPhysReg(MF, Range, IM,
DM, RC);
2431 if (SrcRR.
Reg != FoundR || SrcRR.
Sub != 0) {
2433 CopyIn =
BuildMI(
B, StartIt,
DL, HII.get(TargetOpcode::COPY), FoundR)
2439 if (LoxFIs.count(FI) && (&Range == &RL.back())) {
2441 if (
unsigned SR =
SrcOp.getSubReg())
2442 SrcOp.setReg(HRI.getSubReg(FoundR, SR));
2444 SrcOp.setReg(FoundR);
2447 SrcOp.setIsKill(
false);
2454 for (
auto It = StartIt; It != EndIt; It = NextIt) {
2456 NextIt = std::next(It);
2458 if (!HII.isLoadFromStackSlot(
MI, TFI) || TFI != FI)
2461 assert(
MI.getOperand(0).getSubReg() == 0);
2463 if (DstR != FoundR) {
2465 unsigned MemSize = HII.getMemAccessSize(
MI);
2467 unsigned CopyOpc = TargetOpcode::COPY;
2468 if (HII.isSignExtendingLoad(
MI))
2469 CopyOpc = (MemSize == 1) ? Hexagon::A2_sxtb : Hexagon::A2_sxth;
2470 else if (HII.isZeroExtendingLoad(
MI))
2471 CopyOpc = (MemSize == 1) ? Hexagon::A2_zxtb : Hexagon::A2_zxth;
2472 CopyOut =
BuildMI(
B, It,
DL, HII.get(CopyOpc), DstR)
2482 DM[RR].subtract(Range);
2488void HexagonFrameLowering::expandAlloca(
MachineInstr *AI,
2514 BuildMI(MB, AI,
DL, HII.get(Hexagon::A2_sub), Rd)
2519 BuildMI(MB, AI,
DL, HII.get(Hexagon::A2_sub), SP)
2525 BuildMI(MB, AI,
DL, HII.get(Hexagon::A2_andir), Rd)
2529 BuildMI(MB, AI,
DL, HII.get(Hexagon::A2_andir), SP)
2535 BuildMI(MB, AI,
DL, HII.get(TargetOpcode::COPY), SP)
2540 BuildMI(MB, AI,
DL, HII.get(Hexagon::A2_addi), Rd)
2560 if (
I.getOpcode() == Hexagon::PS_aligna)
2567void HexagonFrameLowering::addCalleeSaveRegistersAsImpOperand(
MachineInstr *
MI,
2568 const CSIVect &CSI,
bool IsDef,
bool IsKill)
const {
2579 const CSIVect &CSI)
const {
2592 BitVector Regs(Hexagon::NUM_TARGET_REGS);
2595 if (!Hexagon::DoubleRegsRegClass.
contains(R))
2599 int F = Regs.find_first();
2600 if (
F != Hexagon::D8)
2603 int N = Regs.find_next(
F);
2604 if (
N >= 0 &&
N !=
F+1)
2613 const CSIVect &CSI)
const {
2614 if (shouldInlineCSR(MF, CSI))
2616 unsigned NumCSI = CSI.size();
2622 return Threshold < NumCSI;
2625bool HexagonFrameLowering::useRestoreFunction(
const MachineFunction &MF,
2626 const CSIVect &CSI)
const {
2627 if (shouldInlineCSR(MF, CSI))
2637 unsigned NumCSI = CSI.size();
2643 return Threshold < NumCSI;
2646bool HexagonFrameLowering::mayOverflowFrameOffset(
MachineFunction &MF)
const {
2651 if (HST.useHVXOps() && StackSize > 256)
2658 bool HasImmStack =
false;
2659 unsigned MinLS = ~0
u;
2664 switch (
MI.getOpcode()) {
2665 case Hexagon::S4_storeirit_io:
2666 case Hexagon::S4_storeirif_io:
2667 case Hexagon::S4_storeiri_io:
2670 case Hexagon::S4_storeirht_io:
2671 case Hexagon::S4_storeirhf_io:
2672 case Hexagon::S4_storeirh_io:
2675 case Hexagon::S4_storeirbt_io:
2676 case Hexagon::S4_storeirbf_io:
2677 case Hexagon::S4_storeirb_io:
2678 if (
MI.getOperand(0).isFI())
2680 MinLS = std::min(MinLS, LS);
2687 return !isUInt<6>(StackSize >> MinLS);
unsigned const MachineRegisterInfo * MRI
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file contains the simple types necessary to represent the attributes associated with functions a...
This file implements the BitVector class.
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static RegisterPass< DebugifyModulePass > DM("debugify", "Attach debug info to everything")
This file defines the DenseMap class.
static MachineInstr * getReturn(MachineBasicBlock &MBB)
Returns the "return" instruction from this block, or nullptr if there isn't any.
static cl::opt< unsigned > ShrinkLimit("shrink-frame-limit", cl::init(std::numeric_limits< unsigned >::max()), cl::Hidden, cl::desc("Max count of stack frame shrink-wraps"))
static bool isOptNone(const MachineFunction &MF)
static cl::opt< int > SpillFuncThreshold("spill-func-threshold", cl::Hidden, cl::desc("Specify O2(not Os) spill func threshold"), cl::init(6))
static std::optional< MachineBasicBlock::iterator > findCFILocation(MachineBasicBlock &B)
static cl::opt< bool > EliminateFramePointer("hexagon-fp-elim", cl::init(true), cl::Hidden, cl::desc("Refrain from using FP whenever possible"))
static bool enableAllocFrameElim(const MachineFunction &MF)
static const char * getSpillFunctionFor(Register MaxReg, SpillKind SpillType, bool Stkchk=false)
static bool hasReturn(const MachineBasicBlock &MBB)
Returns true if MBB contains an instruction that returns.
static cl::opt< bool > EnableSaveRestoreLong("enable-save-restore-long", cl::Hidden, cl::desc("Enable long calls for save-restore stubs."), cl::init(false))
static bool needToReserveScavengingSpillSlots(MachineFunction &MF, const HexagonRegisterInfo &HRI, const TargetRegisterClass *RC)
Returns true if there are no caller-saved registers available in class RC.
static bool isOptSize(const MachineFunction &MF)
static Register getMax32BitSubRegister(Register Reg, const TargetRegisterInfo &TRI, bool hireg=true)
Map a register pair Reg to the subregister that has the greater "number", i.e.
static cl::opt< int > SpillFuncThresholdOs("spill-func-threshold-Os", cl::Hidden, cl::desc("Specify Os spill func threshold"), cl::init(1))
static bool needsStackFrame(const MachineBasicBlock &MBB, const BitVector &CSR, const HexagonRegisterInfo &HRI)
Checks if the basic block contains any instruction that needs a stack frame to be already in place.
static cl::opt< bool > DisableDeallocRet("disable-hexagon-dealloc-ret", cl::Hidden, cl::desc("Disable Dealloc Return for Hexagon target"))
static cl::opt< bool > EnableShrinkWrapping("hexagon-shrink-frame", cl::init(true), cl::Hidden, cl::desc("Enable stack frame shrink wrapping"))
static bool hasTailCall(const MachineBasicBlock &MBB)
Returns true if MBB has a machine instructions that indicates a tail call in the block.
static cl::opt< unsigned > NumberScavengerSlots("number-scavenger-slots", cl::Hidden, cl::desc("Set the number of scavenger slots"), cl::init(2))
static Register getMaxCalleeSavedReg(ArrayRef< CalleeSavedInfo > CSI, const TargetRegisterInfo &TRI)
Returns the callee saved register with the largest id in the vector.
static bool isMinSize(const MachineFunction &MF)
static cl::opt< unsigned > SpillOptMax("spill-opt-max", cl::Hidden, cl::init(std::numeric_limits< unsigned >::max()))
static unsigned SpillOptCount
static void dump_registers(BitVector &Regs, const TargetRegisterInfo &TRI)
static bool isRestoreCall(unsigned Opc)
static cl::opt< bool > OptimizeSpillSlots("hexagon-opt-spill", cl::Hidden, cl::init(true), cl::desc("Optimize spill slots"))
static cl::opt< bool > EnableStackOVFSanitizer("enable-stackovf-sanitizer", cl::Hidden, cl::desc("Enable runtime checks for stack overflow."), cl::init(false))
Legalize the Machine IR a function s Machine IR
This file implements the LivePhysRegs utility for tracking liveness of physical registers.
unsigned const TargetRegisterInfo * TRI
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
const char LLVMTargetMachineRef TM
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
This file builds on the ADT/GraphTraits.h file to build a generic graph post order iterator.
This file declares the machine register scavenger class.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file implements a set that has insertion order iteration characteristics.
This file defines the SmallSet class.
This file defines the SmallVector class.
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
bool empty() const
empty - Check if the array is empty.
int find_first() const
find_first - Returns the index of the first set bit, -1 if none of the bits are set.
void resize(unsigned N, bool t=false)
resize - Grow or shrink the bitvector.
int find_next(unsigned Prev) const
find_next - Returns the index of the next set bit following the "Prev" bit.
The CalleeSavedInfo class tracks the information need to locate where a callee saved register is in t...
This class represents an Operation in the Expression.
FunctionPass class - This class is used to implement most global optimizations.
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
bool hasOptNone() const
Do not optimize this function (-O0).
bool isVarArg() const
isVarArg - Return true if this function takes a variable number of arguments.
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
void replaceInstr(MachineInstr *OldMI, MachineInstr *NewMI)
IndexType getIndex(MachineInstr *MI) const
MachineInstr * getInstr(IndexType Idx) const
void add(IndexType Start, IndexType End, bool Fixed, bool TiedEnd)
const MachineInstr * getAlignaInstr(const MachineFunction &MF) const
void insertCFIInstructions(MachineFunction &MF) const
bool enableCalleeSaveSkip(const MachineFunction &MF) const override
Returns true if the target can safely skip saving callee-saved registers for noreturn nounwind functi...
MachineBasicBlock::iterator eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator I) const override
This method is called during prolog/epilog code insertion to eliminate call frame setup and destroy p...
bool hasFP(const MachineFunction &MF) const override
hasFP - Return true if the specified function should have a dedicated frame pointer register.
StackOffset getFrameIndexReference(const MachineFunction &MF, int FI, Register &FrameReg) const override
getFrameIndexReference - This method should return the base register and offset used to reference a f...
void emitPrologue(MachineFunction &MF, MachineBasicBlock &MBB) const override
Perform most of the PEI work here:
void determineCalleeSaves(MachineFunction &MF, BitVector &SavedRegs, RegScavenger *RS) const override
This method determines which of the registers reported by TargetRegisterInfo::getCalleeSavedRegs() sh...
void processFunctionBeforeFrameFinalized(MachineFunction &MF, RegScavenger *RS=nullptr) const override
processFunctionBeforeFrameFinalized - This method is called immediately before the specified function...
const SpillSlot * getCalleeSavedSpillSlots(unsigned &NumEntries) const override
getCalleeSavedSpillSlots - This method returns a pointer to an array of pairs, that contains an entry...
bool needsAligna(const MachineFunction &MF) const
bool assignCalleeSavedSpillSlots(MachineFunction &MF, const TargetRegisterInfo *TRI, std::vector< CalleeSavedInfo > &CSI) const override
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg) const override
Store the specified register of the given register class to the specified stack frame index.
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register DestReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg) const override
Load the specified register of the given register class from the specified stack frame index.
Hexagon target-specific information for each MachineFunction.
bool isEHReturnCalleeSaveReg(Register Reg) const
const MCPhysReg * getCalleeSavedRegs(const MachineFunction *MF) const override
Code Generation virtual methods...
const MCPhysReg * getCallerSavedRegs(const MachineFunction *MF, const TargetRegisterClass *RC) const
const HexagonInstrInfo * getInstrInfo() const override
bool isEnvironmentMusl() const
A set of physical registers with utility functions to track liveness when walking backward/forward th...
static MCCFIInstruction createOffset(MCSymbol *L, unsigned Register, int Offset, SMLoc Loc={})
.cfi_offset Previous value of Register is saved at offset Offset from CFA.
static MCCFIInstruction cfiDefCfa(MCSymbol *L, unsigned Register, int Offset, SMLoc Loc={})
.cfi_def_cfa defines a rule for computing CFA as: take address from Register and add Offset to it.
MCSymbol * createTempSymbol()
Create a temporary symbol with a unique name.
Describe properties that are true of each instruction in the target description file.
MCRegAliasIterator enumerates all registers aliasing Reg.
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
int getNumber() const
MachineBasicBlocks are uniquely numbered at the function level, unless they're not in a MachineFuncti...
bool isLiveIn(MCPhysReg Reg, LaneBitmask LaneMask=LaneBitmask::getAll()) const
Return true if the specified register is in the live in set.
iterator getFirstTerminator()
Returns an iterator to the first terminator instruction of this basic block.
DebugLoc findDebugLoc(instr_iterator MBBI)
Find the next valid DebugLoc starting at MBBI, skipping any debug instructions.
iterator getLastNonDebugInstr(bool SkipPseudoOp=true)
Returns an iterator to the last non-debug instruction in the basic block, or end().
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
instr_iterator erase(instr_iterator I)
Remove an instruction from the instruction list and delete it.
iterator_range< iterator > terminators()
iterator_range< succ_iterator > successors()
DominatorTree Class - Concrete subclass of DominatorTreeBase that is used to compute a normal dominat...
MachineBasicBlock * findNearestCommonDominator(MachineBasicBlock *A, MachineBasicBlock *B)
findNearestCommonDominator - Find nearest common dominator basic block for basic block A and B.
bool dominates(const MachineDomTreeNode *A, const MachineDomTreeNode *B) const
bool runOnMachineFunction(MachineFunction &F) override
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
bool hasVarSizedObjects() const
This method may be called any time after instruction selection is complete to determine if the stack ...
bool isObjectPreAllocated(int ObjectIdx) const
Return true if the object was pre-allocated into the local block.
uint64_t getStackSize() const
Return the number of bytes that must be allocated to hold all of the fixed size frame objects.
bool hasCalls() const
Return true if the current function has any function calls.
Align getMaxAlign() const
Return the alignment in bytes that this function must be aligned to, which is greater than the defaul...
int CreateSpillStackObject(uint64_t Size, Align Alignment)
Create a new statically sized stack object that represents a spill slot, returning a nonnegative iden...
uint64_t estimateStackSize(const MachineFunction &MF) const
Estimate and return the size of the stack frame.
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
const std::vector< CalleeSavedInfo > & getCalleeSavedInfo() const
Returns a reference to call saved info vector for the current function.
unsigned getMaxCallFrameSize() const
Return the maximum size of a call frame that must be allocated for an outgoing function call.
void setMaxCallFrameSize(unsigned S)
int CreateFixedSpillStackObject(uint64_t Size, int64_t SPOffset, bool IsImmutable=false)
Create a spill slot at a fixed location on the stack.
int64_t getObjectOffset(int ObjectIdx) const
Return the assigned stack offset of the specified object from the incoming stack pointer.
void setStackSize(uint64_t Size)
Set the size of the stack.
bool isFixedObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to a fixed stack object.
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
virtual bool runOnMachineFunction(MachineFunction &MF)=0
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
virtual MachineFunctionProperties getRequiredProperties() const
Properties which a MachineFunction may have at a given point in time.
MachineFunctionProperties & set(Property P)
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
unsigned addFrameInst(const MCCFIInstruction &Inst)
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
bool needsFrameMoves() const
True if this function needs frame moves for debug or exceptions.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
MachineBasicBlock * getBlockNumbered(unsigned N) const
getBlockNumbered - MachineBasicBlocks are automatically numbered when they are inserted into the mach...
Function & getFunction()
Return the LLVM function that this machine code represents.
const LLVMTargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
unsigned getNumBlockIDs() const
getNumBlockIDs - Return the number of MBB ID's allocated.
MachineModuleInfo & getMMI() const
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const MachineBasicBlock & front() const
const MachineInstrBuilder & addExternalSymbol(const char *FnName, unsigned TargetFlags=0) const
const MachineInstrBuilder & addCFIIndex(unsigned CFIIndex) const
const MachineInstrBuilder & setMIFlag(MachineInstr::MIFlag Flag) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & cloneMemRefs(const MachineInstr &OtherMI) const
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
instr_iterator getInstrIterator() const
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
bool isReturn(QueryType Type=AnyInBundle) const
const MachineBasicBlock * getParent() const
void addOperand(MachineFunction &MF, const MachineOperand &Op)
Add the specified operand to the instruction.
void copyImplicitOps(MachineFunction &MF, const MachineInstr &MI)
Copy implicit register operands from specified instruction to this instruction.
bool mayLoad(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read memory.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
const MachineOperand & getOperand(unsigned i) const
@ MOStore
The memory access writes data.
This class contains meta information specific to a module.
const MCContext & getContext() const
MachineOperand class - Representation of each machine instruction operand.
Register getReg() const
getReg - Returns the register number.
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
MachinePostDominatorTree - an analysis pass wrapper for DominatorTree used to compute the post-domina...
bool dominates(const MachineDomTreeNode *A, const MachineDomTreeNode *B) const
bool runOnMachineFunction(MachineFunction &MF) override
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
MachineBasicBlock * findNearestCommonDominator(MachineBasicBlock *A, MachineBasicBlock *B) const
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
PassRegistry - This class manages the registration and intitialization of the pass subsystem as appli...
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
void addScavengingFrameIndex(int FI)
Add a scavenging frame index.
Wrapper class representing virtual and physical registers.
constexpr bool isValid() const
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
A vector that has set insertion semantics.
size_type size() const
Determine the number of elements in the SetVector.
bool insert(const value_type &X)
Insert a new element into the SetVector.
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
size_type count(const T &V) const
count - Return 1 if the element is in the set, 0 otherwise.
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StackOffset holds a fixed and a scalable offset in bytes.
int64_t getFixed() const
Returns the fixed component of the stack.
static StackOffset getFixed(int64_t Fixed)
virtual void determineCalleeSaves(MachineFunction &MF, BitVector &SavedRegs, RegScavenger *RS=nullptr) const
This method determines which of the registers reported by TargetRegisterInfo::getCalleeSavedRegs() sh...
Align getStackAlign() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
Primary interface to the complete machine description for the target machine.
bool isPositionIndependent() const
CodeGenOptLevel getOptLevel() const
Returns the optimization level: None, Less, Default, or Aggressive.
unsigned getID() const
Return the register class ID number.
bool hasSubClassEq(const TargetRegisterClass *RC) const
Returns true if RC is a sub-class of or equal to this class.
ArrayRef< MCPhysReg > getRawAllocationOrder(const MachineFunction &MF) const
Returns the preferred order for allocating registers from this register class in MF.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
self_iterator getIterator()
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ C
The default llvm calling convention, compatible with C.
@ Kill
The last use of a register.
Reg
All possible values of the reg field in the ModR/M byte.
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
FunctionPass * createHexagonCallFrameInformation()
void initializeHexagonCallFrameInformationPass(PassRegistry &)
unsigned getKillRegState(bool B)
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Printable printReg(Register Reg, const TargetRegisterInfo *TRI=nullptr, unsigned SubIdx=0, const MachineRegisterInfo *MRI=nullptr)
Prints virtual and physical registers with or without a TRI instance.
Printable printMBBReference(const MachineBasicBlock &MBB)
Prints a machine basic block reference.
This struct is a compact representation of a valid (non-zero power of two) alignment.
uint64_t value() const
This is a hole in the type system and should not be abused.
static RegisterSet expandToSubRegs(RegisterRef R, const MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI)
std::map< RegisterRef, RangeList > RegToRangeMap
static MachinePointerInfo getStack(MachineFunction &MF, int64_t Offset, uint8_t ID=0)
Stack pointer relative access.