65#define DEBUG_TYPE "hexagon-pei"
156 cl::desc(
"Set the number of scavenger slots"),
161 cl::desc(
"Specify O2(not Os) spill func threshold"),
166 cl::desc(
"Specify Os spill func threshold"),
175 cl::desc(
"Enable stack frame shrink wrapping"));
180 cl::desc(
"Max count of stack frame shrink-wraps"));
184 cl::desc(
"Enable long calls for save-restore stubs."),
195 cl::init(std::numeric_limits<unsigned>::max()));
214 char HexagonCallFrameInformation::ID = 0;
218bool HexagonCallFrameInformation::runOnMachineFunction(
MachineFunction &MF) {
219 auto &HFI = *MF.
getSubtarget<HexagonSubtarget>().getFrameLowering();
224 HFI.insertCFIInstructions(MF);
229 "Hexagon call frame information",
false,
false)
232 return new HexagonCallFrameInformation();
249 if (!RegNo ||
SubReg < RegNo)
259 static_assert(Hexagon::R1 > 0,
260 "Assume physical registers are encoded as positive integers");
265 for (
unsigned I = 1,
E = CSI.
size();
I <
E; ++
I) {
280 unsigned Opc =
MI.getOpcode();
282 case Hexagon::PS_alloca:
283 case Hexagon::PS_aligna:
306 for (
MCPhysReg S : HRI.subregs_inclusive(R))
311 if (MO.isRegMask()) {
316 const uint32_t *BM = MO.getRegMask();
320 if (!(BM[R/32] & (1u << (R%32))))
335 unsigned RetOpc =
I->getOpcode();
336 return RetOpc == Hexagon::PS_tailcall_i || RetOpc == Hexagon::PS_tailcall_r;
358 case Hexagon::RESTORE_DEALLOC_RET_JMP_V4:
359 case Hexagon::RESTORE_DEALLOC_RET_JMP_V4_PIC:
360 case Hexagon::RESTORE_DEALLOC_RET_JMP_V4_EXT:
361 case Hexagon::RESTORE_DEALLOC_RET_JMP_V4_EXT_PIC:
362 case Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_EXT:
363 case Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_EXT_PIC:
364 case Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4:
365 case Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_PIC:
378 return F.hasOptSize() && !
F.hasMinSize();
389void HexagonFrameLowering::findShrunkPrologEpilog(MachineFunction &MF,
390 MachineBasicBlock *&PrologB, MachineBasicBlock *&EpilogB)
const {
391 static unsigned ShrinkCounter = 0;
393 if (MF.
getSubtarget<HexagonSubtarget>().isEnvironmentMusl() &&
402 auto &HRI = *MF.
getSubtarget<HexagonSubtarget>().getRegisterInfo();
404 MachineDominatorTree MDT;
406 MachinePostDominatorTree MPT;
409 using UnsignedMap = DenseMap<unsigned, unsigned>;
410 using RPOTType = ReversePostOrderTraversal<const MachineFunction *>;
416 RPO[
I->getNumber()] = RPON++;
422 unsigned BN = RPO[
I.getNumber()];
423 for (MachineBasicBlock *Succ :
I.successors())
425 if (RPO[Succ->getNumber()] <= BN)
432 BitVector CSR(Hexagon::NUM_TARGET_REGS);
433 for (
const MCPhysReg *
P = HRI.getCalleeSavedRegs(&MF); *
P; ++
P)
442 dbgs() <<
"Blocks needing SF: {";
443 for (
auto &
B : SFBlocks)
448 if (SFBlocks.
empty())
452 MachineBasicBlock *DomB = SFBlocks[0];
453 for (
unsigned i = 1, n = SFBlocks.
size(); i < n; ++i) {
458 MachineBasicBlock *PDomB = SFBlocks[0];
459 for (
unsigned i = 1, n = SFBlocks.
size(); i < n; ++i) {
465 dbgs() <<
"Computed dom block: ";
470 dbgs() <<
", computed pdom block: ";
486 LLVM_DEBUG(
dbgs() <<
"PDom block does not post-dominate dom block\n");
509 findShrunkPrologEpilog(MF, PrologB, EpilogB);
511 bool PrologueStubs =
false;
512 insertCSRSpillsInBlock(*PrologB, CSI, HRI, PrologueStubs);
513 insertPrologueInBlock(*PrologB, PrologueStubs);
514 updateEntryPaths(MF, *PrologB);
517 insertCSRRestoresInBlock(*EpilogB, CSI, HRI);
518 insertEpilogueInBlock(*EpilogB);
521 if (
B.isReturnBlock())
522 insertCSRRestoresInBlock(
B, CSI, HRI);
525 if (
B.isReturnBlock())
526 insertEpilogueInBlock(
B);
544 BitVector DoneT(MaxBN+1), DoneF(MaxBN+1), Path(MaxBN+1);
545 updateExitPaths(*EpilogB, *EpilogB, DoneT, DoneF, Path);
554 assert(
F.hasFnAttribute(Attribute::NoReturn) &&
555 F.getFunction().hasFnAttribute(Attribute::NoUnwind) &&
556 !
F.getFunction().hasFnAttribute(Attribute::UWTable));
572 assert(!MFI.hasVarSizedObjects() &&
573 !HST.getRegisterInfo()->hasStackRealignment(MF));
574 return F.hasFnAttribute(Attribute::NoReturn) &&
575 F.hasFnAttribute(Attribute::NoUnwind) &&
576 !
F.hasFnAttribute(Attribute::UWTable) && HST.noreturnStackElim() &&
577 MFI.getStackSize() == 0;
580void HexagonFrameLowering::insertPrologueInBlock(MachineBasicBlock &
MBB,
581 bool PrologueStubs)
const {
597 FrameSize = MaxCFA +
alignTo(FrameSize, MaxAlign);
600 bool AlignStack = (MaxAlign > getStackAlign());
608 SmallVector<MachineInstr *, 4> AdjustRegs;
611 if (
MI.getOpcode() == Hexagon::PS_alloca)
614 for (
auto *
MI : AdjustRegs) {
615 assert((
MI->getOpcode() == Hexagon::PS_alloca) &&
"Expected alloca");
616 expandAlloca(
MI, HII, SP, MaxCF);
617 MI->eraseFromParent();
622 if (MF.getFunction().isVarArg() &&
623 MF.getSubtarget<HexagonSubtarget>().isEnvironmentMusl()) {
625 int NumVarArgRegs = 6 - FirstVarArgSavedReg;
626 int RegisterSavedAreaSizePlusPadding = (NumVarArgRegs % 2 == 0)
628 : NumVarArgRegs * 4 + 4;
629 if (RegisterSavedAreaSizePlusPadding > 0) {
632 BuildMI(
MBB, InsertPt, dl, HII.get(Hexagon::A2_addi), SP)
634 .
addImm(-RegisterSavedAreaSizePlusPadding)
639 auto &HMFI = *MF.getInfo<HexagonMachineFunctionInfo>();
640 for (
int i = HMFI.getFirstNamedArgFrameIndex(),
641 e = HMFI.getLastNamedArgFrameIndex(); i >= e; --i) {
646 unsigned LDOpc, STOpc;
647 uint64_t OpcodeChecker = ObjAlign.
value();
650 if (ObjAlign > ObjSize) {
653 else if (ObjSize <= 2)
655 else if (ObjSize <= 4)
657 else if (ObjSize > 4)
661 switch (OpcodeChecker) {
663 LDOpc = Hexagon::L2_loadrb_io;
664 STOpc = Hexagon::S2_storerb_io;
667 LDOpc = Hexagon::L2_loadrh_io;
668 STOpc = Hexagon::S2_storerh_io;
671 LDOpc = Hexagon::L2_loadri_io;
672 STOpc = Hexagon::S2_storeri_io;
676 LDOpc = Hexagon::L2_loadrd_io;
677 STOpc = Hexagon::S2_storerd_io;
681 Register RegUsed = LDOpc == Hexagon::L2_loadrd_io ? Hexagon::D3
683 int LoadStoreCount = ObjSize / OpcodeChecker;
685 if (ObjSize % OpcodeChecker)
693 NumBytes =
alignTo(NumBytes, ObjAlign);
696 while (
Count < LoadStoreCount) {
698 BuildMI(
MBB, InsertPt, dl, HII.get(LDOpc), RegUsed)
700 .
addImm(RegisterSavedAreaSizePlusPadding +
717 NumBytes =
alignTo(NumBytes, 8);
722 NumBytes = (NumVarArgRegs % 2 == 0) ? NumBytes : NumBytes + 4;
724 for (
int j = FirstVarArgSavedReg, i = 0;
j < 6; ++
j, ++i) {
725 BuildMI(
MBB, InsertPt, dl, HII.get(Hexagon::S2_storeri_io))
735 insertAllocframe(
MBB, InsertPt, NumBytes);
737 BuildMI(
MBB, InsertPt, dl, HII.get(Hexagon::A2_andir), SP)
745 BuildMI(
MBB, InsertPt, dl, HII.get(Hexagon::PS_call_stk))
747 }
else if (NumBytes > 0) {
749 BuildMI(
MBB, InsertPt, dl, HII.get(Hexagon::A2_addi), SP)
755void HexagonFrameLowering::insertEpilogueInBlock(MachineBasicBlock &
MBB)
const {
769 MF.
getSubtarget<HexagonSubtarget>().isEnvironmentMusl()) {
771 int NumVarArgRegs = 6 - FirstVarArgSavedReg;
772 int RegisterSavedAreaSizePlusPadding = (NumVarArgRegs % 2 == 0) ?
773 (NumVarArgRegs * 4) : (NumVarArgRegs * 4 + 4);
774 NumBytes += RegisterSavedAreaSizePlusPadding;
777 BuildMI(
MBB, InsertPt, dl, HII.get(Hexagon::A2_addi), SP)
785 unsigned RetOpc = RetI ? RetI->
getOpcode() : 0;
788 if (RetOpc == Hexagon::EH_RETURN_JMPR) {
789 BuildMI(
MBB, InsertPt, dl, HII.get(Hexagon::L2_deallocframe))
792 BuildMI(
MBB, InsertPt, dl, HII.get(Hexagon::A2_add), SP)
800 if (RetOpc == Hexagon::RESTORE_DEALLOC_RET_JMP_V4 ||
801 RetOpc == Hexagon::RESTORE_DEALLOC_RET_JMP_V4_PIC ||
802 RetOpc == Hexagon::RESTORE_DEALLOC_RET_JMP_V4_EXT ||
803 RetOpc == Hexagon::RESTORE_DEALLOC_RET_JMP_V4_EXT_PIC) {
819 bool NeedsDeallocframe =
true;
822 unsigned COpc = PrevIt->getOpcode();
823 if (COpc == Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4 ||
824 COpc == Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_PIC ||
825 COpc == Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_EXT ||
826 COpc == Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_EXT_PIC ||
827 COpc == Hexagon::PS_call_nr || COpc == Hexagon::PS_callr_nr)
828 NeedsDeallocframe =
false;
831 if (!MF.
getSubtarget<HexagonSubtarget>().isEnvironmentMusl() ||
833 if (!NeedsDeallocframe)
839 BuildMI(
MBB, InsertPt, dl, HII.get(Hexagon::L2_deallocframe))
844 unsigned NewOpc = Hexagon::L4_return;
845 MachineInstr *NewI =
BuildMI(
MBB, RetI, dl, HII.get(NewOpc))
854 int NumVarArgRegs = 6 - FirstVarArgSavedReg;
855 int RegisterSavedAreaSizePlusPadding = (NumVarArgRegs % 2 == 0) ?
856 (NumVarArgRegs * 4) : (NumVarArgRegs * 4 + 4);
862 (
I->getOpcode() != Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_EXT &&
863 I->getOpcode() != Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_EXT_PIC &&
864 I->getOpcode() != Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4 &&
865 I->getOpcode() != Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_PIC))
866 BuildMI(
MBB, InsertPt, dl, HII.get(Hexagon::L2_deallocframe))
869 if (RegisterSavedAreaSizePlusPadding != 0)
870 BuildMI(
MBB, InsertPt, dl, HII.get(Hexagon::A2_addi), SP)
872 .
addImm(RegisterSavedAreaSizePlusPadding);
876void HexagonFrameLowering::insertAllocframe(MachineBasicBlock &
MBB,
885 const unsigned int ALLOCFRAME_MAX = 16384;
895 if (NumBytes >= ALLOCFRAME_MAX) {
897 BuildMI(
MBB, InsertPt, dl, HII.get(Hexagon::S2_allocframe))
906 BuildMI(
MBB, InsertPt, dl, HII.get(Hexagon::A2_addi), SP)
911 BuildMI(
MBB, InsertPt, dl, HII.get(Hexagon::S2_allocframe))
920void HexagonFrameLowering::updateEntryPaths(MachineFunction &MF,
921 MachineBasicBlock &SaveB)
const {
922 SetVector<unsigned> Worklist;
924 MachineBasicBlock &EntryB = MF.
front();
930 for (
unsigned i = 0; i < Worklist.
size(); ++i) {
931 unsigned BN = Worklist[i];
938 Worklist.
insert(SB->getNumber());
942bool HexagonFrameLowering::updateExitPaths(MachineBasicBlock &
MBB,
943 MachineBasicBlock &RestoreB, BitVector &DoneT, BitVector &DoneF,
944 BitVector &Path)
const {
947 if (Path[BN] || DoneF[BN])
955 bool ReachedExit =
false;
957 ReachedExit |= updateExitPaths(*SB, RestoreB, DoneT, DoneF, Path);
963 MachineInstr &RetI =
MBB.
back();
973 if (ReachedExit && &
MBB != &RestoreB) {
986static std::optional<MachineBasicBlock::iterator>
993 auto End =
B.instr_end();
998 if (
I.getOpcode() == Hexagon::S2_allocframe)
999 return std::next(It);
1003 bool HasCall =
false, HasAllocFrame =
false;
1005 while (++
T != End &&
T->isBundled()) {
1006 if (
T->getOpcode() == Hexagon::S2_allocframe)
1007 HasAllocFrame =
true;
1008 else if (
T->isCall())
1012 return HasCall ? It : std::next(It);
1014 return std::nullopt;
1020 insertCFIInstructionsAt(
B, *At);
1035 const MCInstrDesc &CFID = HII.get(TargetOpcode::CFI_INSTRUCTION);
1038 bool HasFP = hasFP(MF);
1041 unsigned DwFPReg = HRI.getDwarfRegNum(HRI.getFrameRegister(),
true);
1042 unsigned DwRAReg = HRI.getDwarfRegNum(HRI.getRARegister(),
true);
1069 Hexagon::R1, Hexagon::R0, Hexagon::R3, Hexagon::R2,
1070 Hexagon::R17, Hexagon::R16, Hexagon::R19, Hexagon::R18,
1071 Hexagon::R21, Hexagon::R20, Hexagon::R23, Hexagon::R22,
1072 Hexagon::R25, Hexagon::R24, Hexagon::R27, Hexagon::R26,
1073 Hexagon::D0, Hexagon::D1, Hexagon::D8, Hexagon::D9,
1074 Hexagon::D10, Hexagon::D11, Hexagon::D12, Hexagon::D13
1080 auto IfR = [
Reg] (
const CalleeSavedInfo &
C) ->
bool {
1081 return C.getReg() ==
Reg;
1100 getFrameIndexReference(MF,
F->getFrameIdx(), FrameReg).getFixed();
1106 unsigned DwarfReg = HRI.getDwarfRegNum(
Reg,
true);
1118 Register HiReg = HRI.getSubReg(
Reg, Hexagon::isub_hi);
1119 Register LoReg = HRI.getSubReg(
Reg, Hexagon::isub_lo);
1120 unsigned HiDwarfReg = HRI.getDwarfRegNum(HiReg,
true);
1121 unsigned LoDwarfReg = HRI.getDwarfRegNum(LoReg,
true);
1137 bool HasExtraAlign = HRI.hasStackRealignment(MF);
1153 if (HasAlloca || HasExtraAlign)
1183 bool Stkchk =
false) {
1184 const char * V4SpillToMemoryFunctions[] = {
1185 "__save_r16_through_r17",
1186 "__save_r16_through_r19",
1187 "__save_r16_through_r21",
1188 "__save_r16_through_r23",
1189 "__save_r16_through_r25",
1190 "__save_r16_through_r27" };
1192 const char * V4SpillToMemoryStkchkFunctions[] = {
1193 "__save_r16_through_r17_stkchk",
1194 "__save_r16_through_r19_stkchk",
1195 "__save_r16_through_r21_stkchk",
1196 "__save_r16_through_r23_stkchk",
1197 "__save_r16_through_r25_stkchk",
1198 "__save_r16_through_r27_stkchk" };
1200 const char * V4SpillFromMemoryFunctions[] = {
1201 "__restore_r16_through_r17_and_deallocframe",
1202 "__restore_r16_through_r19_and_deallocframe",
1203 "__restore_r16_through_r21_and_deallocframe",
1204 "__restore_r16_through_r23_and_deallocframe",
1205 "__restore_r16_through_r25_and_deallocframe",
1206 "__restore_r16_through_r27_and_deallocframe" };
1208 const char * V4SpillFromMemoryTailcallFunctions[] = {
1209 "__restore_r16_through_r17_and_deallocframe_before_tailcall",
1210 "__restore_r16_through_r19_and_deallocframe_before_tailcall",
1211 "__restore_r16_through_r21_and_deallocframe_before_tailcall",
1212 "__restore_r16_through_r23_and_deallocframe_before_tailcall",
1213 "__restore_r16_through_r25_and_deallocframe_before_tailcall",
1214 "__restore_r16_through_r27_and_deallocframe_before_tailcall"
1217 const char **SpillFunc =
nullptr;
1221 SpillFunc = Stkchk ? V4SpillToMemoryStkchkFunctions
1222 : V4SpillToMemoryFunctions;
1225 SpillFunc = V4SpillFromMemoryFunctions;
1228 SpillFunc = V4SpillFromMemoryTailcallFunctions;
1231 assert(SpillFunc &&
"Unknown spill kind");
1236 return SpillFunc[0];
1238 return SpillFunc[1];
1240 return SpillFunc[2];
1242 return SpillFunc[3];
1244 return SpillFunc[4];
1246 return SpillFunc[5];
1261 bool HasExtraAlign = HRI.hasStackRealignment(MF);
1266 Register SP = HRI.getStackRegister();
1268 Register AP = HMFI.getStackAlignBaseReg();
1283 bool UseFP =
false, UseAP =
false;
1288 if (NoOpt && !HasExtraAlign)
1293 UseFP |= (HasAlloca || HasExtraAlign);
1304 bool HasFP =
hasFP(MF);
1305 assert((HasFP || !UseFP) &&
"This function must have frame pointer");
1331 if (
Offset > 0 && !HasFP)
1346 if (!UseFP && !UseAP)
1347 RealOffset = FrameSize+
Offset;
1353 bool &PrologueStubs)
const {
1358 PrologueStubs =
false;
1363 if (useSpillFunction(MF, CSI)) {
1364 PrologueStubs =
true;
1376 if (StkOvrFlowEnabled) {
1378 SpillOpc = IsPIC ? Hexagon::SAVE_REGISTERS_CALL_V4STK_EXT_PIC
1379 : Hexagon::SAVE_REGISTERS_CALL_V4STK_EXT;
1381 SpillOpc = IsPIC ? Hexagon::SAVE_REGISTERS_CALL_V4STK_PIC
1382 : Hexagon::SAVE_REGISTERS_CALL_V4STK;
1385 SpillOpc = IsPIC ? Hexagon::SAVE_REGISTERS_CALL_V4_EXT_PIC
1386 : Hexagon::SAVE_REGISTERS_CALL_V4_EXT;
1388 SpillOpc = IsPIC ? Hexagon::SAVE_REGISTERS_CALL_V4_PIC
1389 : Hexagon::SAVE_REGISTERS_CALL_V4;
1392 MachineInstr *SaveRegsCall =
1397 addCalleeSaveRegistersAsImpOperand(SaveRegsCall, CSI,
false,
true);
1399 for (
const CalleeSavedInfo &
I : CSI)
1404 for (
const CalleeSavedInfo &
I : CSI) {
1405 MCRegister
Reg =
I.getReg();
1410 int FI =
I.getFrameIdx();
1411 const TargetRegisterClass *RC = HRI.getMinimalPhysRegClass(
Reg);
1419bool HexagonFrameLowering::insertCSRRestoresInBlock(MachineBasicBlock &
MBB,
1420 const CSIVect &CSI,
const HexagonRegisterInfo &HRI)
const {
1429 if (useRestoreFunction(MF, CSI)) {
1434 auto &HTM =
static_cast<const HexagonTargetMachine&
>(MF.
getTarget());
1435 bool IsPIC = HTM.isPositionIndependent();
1441 MachineInstr *DeallocCall =
nullptr;
1446 RetOpc = IsPIC ? Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_EXT_PIC
1447 : Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_EXT;
1449 RetOpc = IsPIC ? Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_PIC
1450 : Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4;
1459 RetOpc = IsPIC ? Hexagon::RESTORE_DEALLOC_RET_JMP_V4_EXT_PIC
1460 : Hexagon::RESTORE_DEALLOC_RET_JMP_V4_EXT;
1462 RetOpc = IsPIC ? Hexagon::RESTORE_DEALLOC_RET_JMP_V4_PIC
1463 : Hexagon::RESTORE_DEALLOC_RET_JMP_V4;
1469 addCalleeSaveRegistersAsImpOperand(DeallocCall, CSI,
true,
false);
1473 for (
const CalleeSavedInfo &
I : CSI) {
1474 MCRegister
Reg =
I.getReg();
1475 const TargetRegisterClass *RC = HRI.getMinimalPhysRegClass(
Reg);
1476 int FI =
I.getFrameIdx();
1487 unsigned Opc =
MI.getOpcode();
1489 assert((
Opc == Hexagon::ADJCALLSTACKDOWN ||
Opc == Hexagon::ADJCALLSTACKUP) &&
1490 "Cannot handle this call frame pseudo instruction");
1491 return MBB.erase(
I);
1504 if (!HasAlloca || !NeedsAlign)
1510 AP = AI->getOperand(0).getReg();
1513 HMFI.setStackAlignBaseReg(AP);
1523 if (
MRI.isPhysRegUsed(*AI))
1553 BitVector SRegs(Hexagon::NUM_TARGET_REGS);
1583 bool HasResSub =
false;
1609 BitVector TmpSup(Hexagon::NUM_TARGET_REGS);
1615 for (
int x = TmpSup.find_first(); x >= 0; x = TmpSup.find_next(x)) {
1653 int64_t MinOffset = 0;
1655 for (
const SpillSlot *S = FixedSlots; S != FixedSlots+NumFixed; ++S) {
1660 MinOffset = std::min(MinOffset, S->Offset);
1662 SRegs[S->Reg] =
false;
1671 unsigned Size =
TRI->getSpillSize(*RC);
1672 int64_t Off = MinOffset -
Size;
1674 Off &= -Alignment.
value();
1676 MinOffset = std::min(MinOffset, Off);
1682 dbgs() <<
"CS information: {";
1684 int FI =
I.getFrameIdx();
1696 bool MissedReg =
false;
1716 if (!Hexagon::ModRegsRegClass.
contains(DstR) ||
1717 !Hexagon::ModRegsRegClass.
contains(SrcR))
1720 Register TmpR =
MRI.createVirtualRegister(&Hexagon::IntRegsRegClass);
1721 BuildMI(
B, It,
DL, HII.get(TargetOpcode::COPY), TmpR).
add(
MI->getOperand(1));
1722 BuildMI(
B, It,
DL, HII.get(TargetOpcode::COPY), DstR)
1734 if (!
MI->getOperand(0).isFI())
1738 unsigned Opc =
MI->getOpcode();
1740 bool IsKill =
MI->getOperand(2).isKill();
1741 int FI =
MI->getOperand(0).getIndex();
1745 Register TmpR =
MRI.createVirtualRegister(&Hexagon::IntRegsRegClass);
1746 unsigned TfrOpc = (
Opc == Hexagon::STriw_pred) ? Hexagon::C2_tfrpr
1752 BuildMI(
B, It,
DL, HII.get(Hexagon::S2_storeri_io))
1763bool HexagonFrameLowering::expandLoadInt(MachineBasicBlock &
B,
1765 const HexagonInstrInfo &HII, SmallVectorImpl<Register> &NewRegs)
const {
1766 MachineInstr *
MI = &*It;
1767 if (!
MI->getOperand(1).isFI())
1771 unsigned Opc =
MI->getOpcode();
1773 int FI =
MI->getOperand(1).getIndex();
1776 Register TmpR =
MRI.createVirtualRegister(&Hexagon::IntRegsRegClass);
1777 BuildMI(
B, It,
DL, HII.get(Hexagon::L2_loadri_io), TmpR)
1784 unsigned TfrOpc = (
Opc == Hexagon::LDriw_pred) ? Hexagon::C2_tfrrp
1785 : Hexagon::A2_tfrrcr;
1787 .
addReg(TmpR, RegState::Kill);
1794bool HexagonFrameLowering::expandStoreVecPred(MachineBasicBlock &
B,
1796 const HexagonInstrInfo &HII, SmallVectorImpl<Register> &NewRegs)
const {
1797 MachineInstr *
MI = &*It;
1798 if (!
MI->getOperand(0).isFI())
1803 bool IsKill =
MI->getOperand(2).isKill();
1804 int FI =
MI->getOperand(0).getIndex();
1805 auto *RC = &Hexagon::HvxVRRegClass;
1811 Register TmpR0 =
MRI.createVirtualRegister(&Hexagon::IntRegsRegClass);
1814 BuildMI(
B, It,
DL, HII.get(Hexagon::A2_tfrsi), TmpR0)
1817 BuildMI(
B, It,
DL, HII.get(Hexagon::V6_vandqrt), TmpR1)
1819 .
addReg(TmpR0, RegState::Kill);
1822 expandStoreVec(
B, std::prev(It),
MRI, HII, NewRegs);
1830bool HexagonFrameLowering::expandLoadVecPred(MachineBasicBlock &
B,
1832 const HexagonInstrInfo &HII, SmallVectorImpl<Register> &NewRegs)
const {
1833 MachineInstr *
MI = &*It;
1834 if (!
MI->getOperand(1).isFI())
1839 int FI =
MI->getOperand(1).getIndex();
1840 auto *RC = &Hexagon::HvxVRRegClass;
1845 Register TmpR0 =
MRI.createVirtualRegister(&Hexagon::IntRegsRegClass);
1848 BuildMI(
B, It,
DL, HII.get(Hexagon::A2_tfrsi), TmpR0)
1851 expandLoadVec(
B, std::prev(It),
MRI, HII, NewRegs);
1853 BuildMI(
B, It,
DL, HII.get(Hexagon::V6_vandvrt), DstR)
1854 .
addReg(TmpR1, RegState::Kill)
1855 .
addReg(TmpR0, RegState::Kill);
1863bool HexagonFrameLowering::expandStoreVec2(MachineBasicBlock &
B,
1865 const HexagonInstrInfo &HII, SmallVectorImpl<Register> &NewRegs)
const {
1866 MachineFunction &MF = *
B.getParent();
1868 auto &HRI = *MF.
getSubtarget<HexagonSubtarget>().getRegisterInfo();
1869 MachineInstr *
MI = &*It;
1870 if (!
MI->getOperand(0).isFI())
1877 LivePhysRegs LPR(HRI);
1880 for (
auto R =
B.begin(); R != It; ++R) {
1882 LPR.stepForward(*R, Clobbers);
1887 Register SrcLo = HRI.getSubReg(SrcR, Hexagon::vsub_lo);
1888 Register SrcHi = HRI.getSubReg(SrcR, Hexagon::vsub_hi);
1889 bool IsKill =
MI->getOperand(2).isKill();
1890 int FI =
MI->getOperand(0).getIndex();
1892 unsigned Size = HRI.getSpillSize(Hexagon::HvxVRRegClass);
1893 Align NeedAlign = HRI.getSpillAlign(Hexagon::HvxVRRegClass);
1898 if (LPR.contains(SrcLo)) {
1899 StoreOpc = NeedAlign <= HasAlign ? Hexagon::V6_vS32b_ai
1900 : Hexagon::V6_vS32Ub_ai;
1909 if (LPR.contains(SrcHi)) {
1910 StoreOpc = NeedAlign <= HasAlign ? Hexagon::V6_vS32b_ai
1911 : Hexagon::V6_vS32Ub_ai;
1923bool HexagonFrameLowering::expandLoadVec2(MachineBasicBlock &
B,
1925 const HexagonInstrInfo &HII, SmallVectorImpl<Register> &NewRegs)
const {
1926 MachineFunction &MF = *
B.getParent();
1928 auto &HRI = *MF.
getSubtarget<HexagonSubtarget>().getRegisterInfo();
1929 MachineInstr *
MI = &*It;
1930 if (!
MI->getOperand(1).isFI())
1935 Register DstHi = HRI.getSubReg(DstR, Hexagon::vsub_hi);
1936 Register DstLo = HRI.getSubReg(DstR, Hexagon::vsub_lo);
1937 int FI =
MI->getOperand(1).getIndex();
1939 unsigned Size = HRI.getSpillSize(Hexagon::HvxVRRegClass);
1940 Align NeedAlign = HRI.getSpillAlign(Hexagon::HvxVRRegClass);
1945 LoadOpc = NeedAlign <= HasAlign ? Hexagon::V6_vL32b_ai
1946 : Hexagon::V6_vL32Ub_ai;
1953 LoadOpc = NeedAlign <= HasAlign ? Hexagon::V6_vL32b_ai
1954 : Hexagon::V6_vL32Ub_ai;
1964bool HexagonFrameLowering::expandStoreVec(MachineBasicBlock &
B,
1966 const HexagonInstrInfo &HII, SmallVectorImpl<Register> &NewRegs)
const {
1967 MachineFunction &MF = *
B.getParent();
1969 MachineInstr *
MI = &*It;
1970 if (!
MI->getOperand(0).isFI())
1973 auto &HRI = *MF.
getSubtarget<HexagonSubtarget>().getRegisterInfo();
1976 bool IsKill =
MI->getOperand(2).isKill();
1977 int FI =
MI->getOperand(0).getIndex();
1979 Align NeedAlign = HRI.getSpillAlign(Hexagon::HvxVRRegClass);
1981 unsigned StoreOpc = NeedAlign <= HasAlign ? Hexagon::V6_vS32b_ai
1982 : Hexagon::V6_vS32Ub_ai;
1993bool HexagonFrameLowering::expandLoadVec(MachineBasicBlock &
B,
1995 const HexagonInstrInfo &HII, SmallVectorImpl<Register> &NewRegs)
const {
1996 MachineFunction &MF = *
B.getParent();
1998 MachineInstr *
MI = &*It;
1999 if (!
MI->getOperand(1).isFI())
2002 auto &HRI = *MF.
getSubtarget<HexagonSubtarget>().getRegisterInfo();
2005 int FI =
MI->getOperand(1).getIndex();
2007 Align NeedAlign = HRI.getSpillAlign(Hexagon::HvxVRRegClass);
2009 unsigned LoadOpc = NeedAlign <= HasAlign ? Hexagon::V6_vL32b_ai
2010 : Hexagon::V6_vL32Ub_ai;
2020bool HexagonFrameLowering::expandSpillMacros(MachineFunction &MF,
2021 SmallVectorImpl<Register> &NewRegs)
const {
2022 auto &HII = *MF.
getSubtarget<HexagonSubtarget>().getInstrInfo();
2026 for (
auto &
B : MF) {
2029 for (
auto I =
B.begin(),
E =
B.end();
I !=
E;
I = NextI) {
2030 MachineInstr *
MI = &*
I;
2031 NextI = std::next(
I);
2032 unsigned Opc =
MI->getOpcode();
2035 case TargetOpcode::COPY:
2038 case Hexagon::STriw_pred:
2039 case Hexagon::STriw_ctr:
2042 case Hexagon::LDriw_pred:
2043 case Hexagon::LDriw_ctr:
2046 case Hexagon::PS_vstorerq_ai:
2047 Changed |= expandStoreVecPred(
B,
I,
MRI, HII, NewRegs);
2049 case Hexagon::PS_vloadrq_ai:
2052 case Hexagon::PS_vloadrw_ai:
2055 case Hexagon::PS_vstorerw_ai:
2070 SavedRegs.
resize(HRI.getNumRegs());
2080 expandSpillMacros(MF, NewRegs);
2082 optimizeSpillSlots(MF, NewRegs);
2086 if (!NewRegs.
empty() || mayOverflowFrameOffset(MF)) {
2092 SpillRCs.
insert(&Hexagon::IntRegsRegClass);
2097 for (
const auto *RC : SpillRCs) {
2101 switch (RC->
getID()) {
2102 case Hexagon::IntRegsRegClassID:
2105 case Hexagon::HvxQRRegClassID:
2109 unsigned S = HRI.getSpillSize(*RC);
2110 Align A = HRI.getSpillAlign(*RC);
2111 for (
unsigned i = 0; i < Num; i++) {
2113 RS->addScavengingFrameIndex(NewFI);
2130 auto F = DeadMap.find({Reg,0});
2131 if (
F == DeadMap.end())
2133 for (
auto &DR :
F->second)
2134 if (DR.contains(FIR))
2153void HexagonFrameLowering::optimizeSpillSlots(MachineFunction &MF,
2154 SmallVectorImpl<Register> &VRegs)
const {
2156 auto &HII = *HST.getInstrInfo();
2157 auto &HRI = *HST.getRegisterInfo();
2159 HexagonBlockRanges HBR(MF);
2161 using BlockIndexMap =
2162 std::map<MachineBasicBlock *, HexagonBlockRanges::InstrIndexMap>;
2163 using BlockRangeMap =
2164 std::map<MachineBasicBlock *, HexagonBlockRanges::RangeList>;
2165 using IndexType = HexagonBlockRanges::IndexType;
2170 const TargetRegisterClass *RC =
nullptr;
2172 SlotInfo() =
default;
2175 BlockIndexMap BlockIndexes;
2176 SmallSet<int,4> BadFIs;
2177 std::map<int,SlotInfo> FIRangeMap;
2184 [](
const TargetRegisterClass *HaveRC,
2185 const TargetRegisterClass *NewRC) ->
const TargetRegisterClass * {
2186 if (HaveRC ==
nullptr || HaveRC == NewRC)
2191 if (NewRC->hasSubClassEq(HaveRC))
2198 for (
auto &
B : MF) {
2199 std::map<int,IndexType> LastStore, LastLoad;
2200 auto P = BlockIndexes.emplace(&
B, HexagonBlockRanges::InstrIndexMap(
B));
2201 auto &IndexMap =
P.first->second;
2203 << IndexMap <<
'\n');
2205 for (
auto &In :
B) {
2207 bool Load = HII.isLoadFromStackSlot(In, LFI) && !HII.isPredicated(In);
2208 bool Store = HII.isStoreToStackSlot(In, SFI) && !HII.isPredicated(In);
2209 if (Load && Store) {
2221 if (Load || Store) {
2222 int TFI =
Load ? LFI : SFI;
2223 unsigned AM = HII.getAddrMode(In);
2224 SlotInfo &
SI = FIRangeMap[TFI];
2228 unsigned OpNum =
Load ? 0 : 2;
2229 auto *RC = HII.getRegClass(
In.getDesc(), OpNum);
2230 RC = getCommonRC(
SI.RC, RC);
2238 unsigned S = HII.getMemAccessSize(In);
2239 if (
SI.Size != 0 &&
SI.Size != S)
2245 for (
auto *Mo :
In.memoperands()) {
2246 if (!Mo->isVolatile() && !Mo->isAtomic())
2257 for (
unsigned i = 0, n =
In.getNumOperands(); i < n; ++i) {
2258 const MachineOperand &
Op =
In.getOperand(i);
2261 int FI =
Op.getIndex();
2264 if (i+1 >= n || !
In.getOperand(i+1).isImm() ||
2265 In.getOperand(i+1).getImm() != 0)
2267 if (BadFIs.
count(FI))
2271 auto &
LS = LastStore[FI];
2272 auto &LL = LastLoad[FI];
2274 if (LS == IndexType::None)
2275 LS = IndexType::Entry;
2278 HexagonBlockRanges::RangeList &RL = FIRangeMap[FI].Map[&
B];
2279 if (LS != IndexType::None)
2280 RL.
add(LS, LL,
false,
false);
2281 else if (LL != IndexType::None)
2282 RL.
add(IndexType::Entry, LL,
false,
false);
2283 LL = IndexType::None;
2291 for (
auto &
I : LastLoad) {
2292 IndexType LL =
I.second;
2293 if (LL == IndexType::None)
2295 auto &RL = FIRangeMap[
I.first].Map[&
B];
2296 IndexType &
LS = LastStore[
I.first];
2297 if (LS != IndexType::None)
2298 RL.
add(LS, LL,
false,
false);
2300 RL.
add(IndexType::Entry, LL,
false,
false);
2301 LS = IndexType::None;
2303 for (
auto &
I : LastStore) {
2304 IndexType
LS =
I.second;
2305 if (LS == IndexType::None)
2307 auto &RL = FIRangeMap[
I.first].Map[&
B];
2308 RL.
add(LS, IndexType::None,
false,
false);
2313 for (
auto &
P : FIRangeMap) {
2314 dbgs() <<
"fi#" <<
P.first;
2315 if (BadFIs.
count(
P.first))
2318 if (
P.second.RC !=
nullptr)
2319 dbgs() << HRI.getRegClassName(
P.second.RC) <<
'\n';
2321 dbgs() <<
"<null>\n";
2322 for (
auto &R :
P.second.Map)
2331 SmallSet<int,4> LoxFIs;
2333 std::map<MachineBasicBlock*,std::vector<int>> BlockFIMap;
2335 for (
auto &
P : FIRangeMap) {
2337 if (BadFIs.
count(
P.first))
2339 for (
auto &
B : MF) {
2340 auto F =
P.second.Map.find(&
B);
2342 if (
F ==
P.second.Map.end() ||
F->second.empty())
2344 HexagonBlockRanges::IndexRange &
IR =
F->second.front();
2345 if (
IR.start() == IndexType::Entry)
2346 LoxFIs.insert(
P.first);
2347 BlockFIMap[&
B].push_back(
P.first);
2352 dbgs() <<
"Block-to-FI map (* -- live-on-exit):\n";
2353 for (
auto &
P : BlockFIMap) {
2354 auto &FIs =
P.second;
2358 for (
auto I : FIs) {
2359 dbgs() <<
" fi#" <<
I;
2360 if (LoxFIs.count(
I))
2372 for (
auto &
B : MF) {
2373 auto F = BlockIndexes.find(&
B);
2374 assert(
F != BlockIndexes.end());
2375 HexagonBlockRanges::InstrIndexMap &IM =
F->second;
2379 << HexagonBlockRanges::PrintRangeMap(
DM, HRI));
2381 for (
auto FI : BlockFIMap[&
B]) {
2382 if (BadFIs.
count(FI))
2385 HexagonBlockRanges::RangeList &RL = FIRangeMap[FI].Map[&
B];
2386 for (
auto &
Range : RL) {
2388 if (!IndexType::isInstr(
Range.start()) ||
2389 !IndexType::isInstr(
Range.end()))
2393 assert(
SI.mayStore() &&
"Unexpected start instruction");
2395 MachineOperand &SrcOp =
SI.getOperand(2);
2397 HexagonBlockRanges::RegisterRef SrcRR = { SrcOp.
getReg(),
2399 auto *RC = HII.getRegClass(
SI.getDesc(), 2);
2416 MachineInstr *CopyIn =
nullptr;
2417 if (SrcRR.
Reg != FoundR || SrcRR.
Sub != 0) {
2419 CopyIn =
BuildMI(
B, StartIt,
DL, HII.get(TargetOpcode::COPY), FoundR)
2425 if (LoxFIs.count(FI) && (&
Range == &RL.back())) {
2428 SrcOp.
setReg(HRI.getSubReg(FoundR, SR));
2440 for (
auto It = StartIt; It != EndIt; It = NextIt) {
2441 MachineInstr &
MI = *It;
2442 NextIt = std::next(It);
2444 if (!HII.isLoadFromStackSlot(
MI, TFI) || TFI != FI)
2447 assert(
MI.getOperand(0).getSubReg() == 0);
2448 MachineInstr *CopyOut =
nullptr;
2449 if (DstR != FoundR) {
2451 unsigned MemSize = HII.getMemAccessSize(
MI);
2453 unsigned CopyOpc = TargetOpcode::COPY;
2454 if (HII.isSignExtendingLoad(
MI))
2455 CopyOpc = (MemSize == 1) ? Hexagon::A2_sxtb : Hexagon::A2_sxth;
2456 else if (HII.isZeroExtendingLoad(
MI))
2457 CopyOpc = (MemSize == 1) ? Hexagon::A2_zxtb : Hexagon::A2_zxth;
2458 CopyOut =
BuildMI(
B, It,
DL, HII.get(CopyOpc), DstR)
2466 HexagonBlockRanges::RegisterRef FoundRR = { FoundR, 0 };
2474void HexagonFrameLowering::expandAlloca(MachineInstr *AI,
2475 const HexagonInstrInfo &HII,
Register SP,
unsigned CF)
const {
2476 MachineBasicBlock &MB = *AI->
getParent();
2500 BuildMI(MB, AI,
DL, HII.get(Hexagon::A2_sub), Rd)
2505 BuildMI(MB, AI,
DL, HII.get(Hexagon::A2_sub), SP)
2511 BuildMI(MB, AI,
DL, HII.get(Hexagon::A2_andir), Rd)
2515 BuildMI(MB, AI,
DL, HII.get(Hexagon::A2_andir), SP)
2521 BuildMI(MB, AI,
DL, HII.get(TargetOpcode::COPY), SP)
2526 BuildMI(MB, AI,
DL, HII.get(Hexagon::A2_addi), Rd)
2546 if (
I.getOpcode() == Hexagon::PS_aligna)
2553void HexagonFrameLowering::addCalleeSaveRegistersAsImpOperand(
MachineInstr *
MI,
2554 const CSIVect &CSI,
bool IsDef,
bool IsKill)
const {
2565 const CSIVect &CSI)
const {
2578 BitVector Regs(Hexagon::NUM_TARGET_REGS);
2581 if (!Hexagon::DoubleRegsRegClass.
contains(R))
2585 int F = Regs.find_first();
2586 if (
F != Hexagon::D8)
2589 int N = Regs.find_next(
F);
2590 if (
N >= 0 &&
N !=
F+1)
2598bool HexagonFrameLowering::useSpillFunction(
const MachineFunction &MF,
2599 const CSIVect &CSI)
const {
2600 if (shouldInlineCSR(MF, CSI))
2602 unsigned NumCSI = CSI.size();
2608 return Threshold < NumCSI;
2611bool HexagonFrameLowering::useRestoreFunction(
const MachineFunction &MF,
2612 const CSIVect &CSI)
const {
2613 if (shouldInlineCSR(MF, CSI))
2623 unsigned NumCSI = CSI.size();
2629 return Threshold < NumCSI;
2632bool HexagonFrameLowering::mayOverflowFrameOffset(MachineFunction &MF)
const {
2637 if (HST.useHVXOps() && StackSize > 256)
2644 bool HasImmStack =
false;
2645 unsigned MinLS = ~0
u;
2647 for (
const MachineBasicBlock &
B : MF) {
2648 for (
const MachineInstr &
MI :
B) {
2650 switch (
MI.getOpcode()) {
2651 case Hexagon::S4_storeirit_io:
2652 case Hexagon::S4_storeirif_io:
2653 case Hexagon::S4_storeiri_io:
2656 case Hexagon::S4_storeirht_io:
2657 case Hexagon::S4_storeirhf_io:
2658 case Hexagon::S4_storeirh_io:
2661 case Hexagon::S4_storeirbt_io:
2662 case Hexagon::S4_storeirbf_io:
2663 case Hexagon::S4_storeirb_io:
2664 if (
MI.getOperand(0).isFI())
2666 MinLS = std::min(MinLS, LS);
2680struct HexagonFrameSortingObject {
2681 bool IsValid =
false;
2687struct HexagonFrameSortingComparator {
2688 inline bool operator()(
const HexagonFrameSortingObject &
A,
2689 const HexagonFrameSortingObject &
B)
const {
2690 return std::make_tuple(!
A.IsValid,
A.ObjectAlignment,
A.Size) <
2691 std::make_tuple(!
B.IsValid,
B.ObjectAlignment,
B.Size);
2701 if (ObjectsToAllocate.
empty())
2705 int NObjects = ObjectsToAllocate.
size();
2713 if (i != ObjectsToAllocate[j])
2724 SortingObjects[i].IsValid =
true;
2725 SortingObjects[i].Index = i;
2735 for (
auto &Obj : SortingObjects) {
2738 ObjectsToAllocate[--i] = Obj.Index;
unsigned const MachineRegisterInfo * MRI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file contains the simple types necessary to represent the attributes associated with functions a...
This file implements the BitVector class.
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static RegisterPass< DebugifyModulePass > DM("debugify", "Attach debug info to everything")
This file defines the DenseMap class.
static MachineInstr * getReturn(MachineBasicBlock &MBB)
Returns the "return" instruction from this block, or nullptr if there isn't any.
static cl::opt< unsigned > ShrinkLimit("shrink-frame-limit", cl::init(std::numeric_limits< unsigned >::max()), cl::Hidden, cl::desc("Max count of stack frame shrink-wraps"))
static bool isOptNone(const MachineFunction &MF)
static cl::opt< int > SpillFuncThreshold("spill-func-threshold", cl::Hidden, cl::desc("Specify O2(not Os) spill func threshold"), cl::init(6))
static std::optional< MachineBasicBlock::iterator > findCFILocation(MachineBasicBlock &B)
static cl::opt< bool > EliminateFramePointer("hexagon-fp-elim", cl::init(true), cl::Hidden, cl::desc("Refrain from using FP whenever possible"))
static bool enableAllocFrameElim(const MachineFunction &MF)
static const char * getSpillFunctionFor(Register MaxReg, SpillKind SpillType, bool Stkchk=false)
static bool hasReturn(const MachineBasicBlock &MBB)
Returns true if MBB contains an instruction that returns.
static cl::opt< bool > EnableSaveRestoreLong("enable-save-restore-long", cl::Hidden, cl::desc("Enable long calls for save-restore stubs."), cl::init(false))
static bool needToReserveScavengingSpillSlots(MachineFunction &MF, const HexagonRegisterInfo &HRI, const TargetRegisterClass *RC)
Returns true if there are no caller-saved registers available in class RC.
static bool isOptSize(const MachineFunction &MF)
static Register getMax32BitSubRegister(Register Reg, const TargetRegisterInfo &TRI, bool hireg=true)
Map a register pair Reg to the subregister that has the greater "number", i.e.
static cl::opt< int > SpillFuncThresholdOs("spill-func-threshold-Os", cl::Hidden, cl::desc("Specify Os spill func threshold"), cl::init(1))
static bool needsStackFrame(const MachineBasicBlock &MBB, const BitVector &CSR, const HexagonRegisterInfo &HRI)
Checks if the basic block contains any instruction that needs a stack frame to be already in place.
static cl::opt< bool > DisableDeallocRet("disable-hexagon-dealloc-ret", cl::Hidden, cl::desc("Disable Dealloc Return for Hexagon target"))
static cl::opt< bool > EnableShrinkWrapping("hexagon-shrink-frame", cl::init(true), cl::Hidden, cl::desc("Enable stack frame shrink wrapping"))
static bool hasTailCall(const MachineBasicBlock &MBB)
Returns true if MBB has a machine instructions that indicates a tail call in the block.
static cl::opt< unsigned > NumberScavengerSlots("number-scavenger-slots", cl::Hidden, cl::desc("Set the number of scavenger slots"), cl::init(2))
static Register getMaxCalleeSavedReg(ArrayRef< CalleeSavedInfo > CSI, const TargetRegisterInfo &TRI)
Returns the callee saved register with the largest id in the vector.
static bool isMinSize(const MachineFunction &MF)
static cl::opt< unsigned > SpillOptMax("spill-opt-max", cl::Hidden, cl::init(std::numeric_limits< unsigned >::max()))
static unsigned SpillOptCount
static void dump_registers(BitVector &Regs, const TargetRegisterInfo &TRI)
static bool isRestoreCall(unsigned Opc)
static cl::opt< bool > OptimizeSpillSlots("hexagon-opt-spill", cl::Hidden, cl::init(true), cl::desc("Optimize spill slots"))
static cl::opt< bool > EnableStackOVFSanitizer("enable-stackovf-sanitizer", cl::Hidden, cl::desc("Enable runtime checks for stack overflow."), cl::init(false))
Legalize the Machine IR a function s Machine IR
This file implements the LivePhysRegs utility for tracking liveness of physical registers.
Register const TargetRegisterInfo * TRI
Promote Memory to Register
static MCRegister getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
This file builds on the ADT/GraphTraits.h file to build a generic graph post order iterator.
This file declares the machine register scavenger class.
bool isDead(const MachineInstr &MI, const MachineRegisterInfo &MRI)
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
This file implements a set that has insertion order iteration characteristics.
This file defines the SmallSet class.
This file defines the SmallVector class.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
bool empty() const
empty - Check if the array is empty.
int find_first() const
find_first - Returns the index of the first set bit, -1 if none of the bits are set.
void resize(unsigned N, bool t=false)
resize - Grow or shrink the bitvector.
int find_next(unsigned Prev) const
find_next - Returns the index of the next set bit following the "Prev" bit.
The CalleeSavedInfo class tracks the information need to locate where a callee saved register is in t...
NodeT * findNearestCommonDominator(NodeT *A, NodeT *B) const
Find nearest common dominator basic block for basic block A and B.
bool dominates(const DomTreeNodeBase< NodeT > *A, const DomTreeNodeBase< NodeT > *B) const
dominates - Returns true iff A dominates B.
void recalculate(ParentType &Func)
recalculate - compute a dominator tree for the given function
FunctionPass class - This class is used to implement most global optimizations.
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
bool hasOptNone() const
Do not optimize this function (-O0).
bool isVarArg() const
isVarArg - Return true if this function takes a variable number of arguments.
void replaceInstr(MachineInstr *OldMI, MachineInstr *NewMI)
IndexType getIndex(MachineInstr *MI) const
MachineInstr * getInstr(IndexType Idx) const
void add(IndexType Start, IndexType End, bool Fixed, bool TiedEnd)
const MachineInstr * getAlignaInstr(const MachineFunction &MF) const
void insertCFIInstructions(MachineFunction &MF) const
bool hasFPImpl(const MachineFunction &MF) const override
bool enableCalleeSaveSkip(const MachineFunction &MF) const override
Returns true if the target can safely skip saving callee-saved registers for noreturn nounwind functi...
MachineBasicBlock::iterator eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator I) const override
This method is called during prolog/epilog code insertion to eliminate call frame setup and destroy p...
StackOffset getFrameIndexReference(const MachineFunction &MF, int FI, Register &FrameReg) const override
getFrameIndexReference - This method should return the base register and offset used to reference a f...
void emitPrologue(MachineFunction &MF, MachineBasicBlock &MBB) const override
Perform most of the PEI work here:
void determineCalleeSaves(MachineFunction &MF, BitVector &SavedRegs, RegScavenger *RS) const override
This method determines which of the registers reported by TargetRegisterInfo::getCalleeSavedRegs() sh...
void orderFrameObjects(const MachineFunction &MF, SmallVectorImpl< int > &ObjectsToAllocate) const override
Order the symbols in the local stack frame.
void processFunctionBeforeFrameFinalized(MachineFunction &MF, RegScavenger *RS=nullptr) const override
processFunctionBeforeFrameFinalized - This method is called immediately before the specified function...
const SpillSlot * getCalleeSavedSpillSlots(unsigned &NumEntries) const override
getCalleeSavedSpillSlots - This method returns a pointer to an array of pairs, that contains an entry...
bool needsAligna(const MachineFunction &MF) const
bool assignCalleeSavedSpillSlots(MachineFunction &MF, const TargetRegisterInfo *TRI, std::vector< CalleeSavedInfo > &CSI) const override
assignCalleeSavedSpillSlots - Allows target to override spill slot assignment logic.
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, Register VReg, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const override
Store the specified register of the given register class to the specified stack frame index.
const HexagonRegisterInfo & getRegisterInfo() const
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register DestReg, int FrameIndex, const TargetRegisterClass *RC, Register VReg, unsigned SubReg=0, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const override
Load the specified register of the given register class from the specified stack frame index.
Hexagon target-specific information for each MachineFunction.
bool isEHReturnCalleeSaveReg(Register Reg) const
const MCPhysReg * getCalleeSavedRegs(const MachineFunction *MF) const override
Code Generation virtual methods...
const MCPhysReg * getCallerSavedRegs(const MachineFunction *MF, const TargetRegisterClass *RC) const
const HexagonInstrInfo * getInstrInfo() const override
bool isEnvironmentMusl() const
static MCCFIInstruction cfiDefCfa(MCSymbol *L, unsigned Register, int64_t Offset, SMLoc Loc={})
.cfi_def_cfa defines a rule for computing CFA as: take address from Register and add Offset to it.
static MCCFIInstruction createOffset(MCSymbol *L, unsigned Register, int64_t Offset, SMLoc Loc={})
.cfi_offset Previous value of Register is saved at offset Offset from CFA.
LLVM_ABI MCSymbol * createTempSymbol()
Create a temporary symbol with a unique name.
Describe properties that are true of each instruction in the target description file.
MCRegAliasIterator enumerates all registers aliasing Reg.
Wrapper class representing physical registers. Should be passed by value.
constexpr bool isValid() const
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
MachineInstrBundleIterator< const MachineInstr > const_iterator
int getNumber() const
MachineBasicBlocks are uniquely numbered at the function level, unless they're not in a MachineFuncti...
LLVM_ABI iterator getFirstTerminator()
Returns an iterator to the first terminator instruction of this basic block.
LLVM_ABI DebugLoc findDebugLoc(instr_iterator MBBI)
Find the next valid DebugLoc starting at MBBI, skipping any debug instructions.
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
LLVM_ABI instr_iterator erase(instr_iterator I)
Remove an instruction from the instruction list and delete it.
iterator_range< succ_iterator > successors()
MachineInstrBundleIterator< MachineInstr > iterator
LLVM_ABI bool isLiveIn(MCRegister Reg, LaneBitmask LaneMask=LaneBitmask::getAll()) const
Return true if the specified register is in the live in set.
bool dominates(const MachineInstr *A, const MachineInstr *B) const
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
void setMaxCallFrameSize(uint64_t S)
bool hasVarSizedObjects() const
This method may be called any time after instruction selection is complete to determine if the stack ...
bool isObjectPreAllocated(int ObjectIdx) const
Return true if the object was pre-allocated into the local block.
uint64_t getStackSize() const
Return the number of bytes that must be allocated to hold all of the fixed size frame objects.
bool hasCalls() const
Return true if the current function has any function calls.
Align getMaxAlign() const
Return the alignment in bytes that this function must be aligned to, which is greater than the defaul...
uint64_t getMaxCallFrameSize() const
Return the maximum size of a call frame that must be allocated for an outgoing function call.
LLVM_ABI int CreateSpillStackObject(uint64_t Size, Align Alignment)
Create a new statically sized stack object that represents a spill slot, returning a nonnegative iden...
LLVM_ABI uint64_t estimateStackSize(const MachineFunction &MF) const
Estimate and return the size of the stack frame.
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
const std::vector< CalleeSavedInfo > & getCalleeSavedInfo() const
Returns a reference to call saved info vector for the current function.
int getObjectIndexEnd() const
Return one past the maximum frame object index.
LLVM_ABI int CreateFixedSpillStackObject(uint64_t Size, int64_t SPOffset, bool IsImmutable=false)
Create a spill slot at a fixed location on the stack.
int64_t getObjectOffset(int ObjectIdx) const
Return the assigned stack offset of the specified object from the incoming stack pointer.
void setStackSize(uint64_t Size)
Set the size of the stack.
bool isFixedObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to a fixed stack object.
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
Properties which a MachineFunction may have at a given point in time.
unsigned addFrameInst(const MCCFIInstruction &Inst)
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
bool needsFrameMoves() const
True if this function needs frame moves for debug or exceptions.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MCContext & getContext() const
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
MachineBasicBlock * getBlockNumbered(unsigned N) const
getBlockNumbered - MachineBasicBlocks are automatically numbered when they are inserted into the mach...
Function & getFunction()
Return the LLVM function that this machine code represents.
unsigned getNumBlockIDs() const
getNumBlockIDs - Return the number of MBB ID's allocated.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const MachineBasicBlock & front() const
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
const MachineInstrBuilder & addExternalSymbol(const char *FnName, unsigned TargetFlags=0) const
const MachineInstrBuilder & addCFIIndex(unsigned CFIIndex) const
const MachineInstrBuilder & addReg(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & setMIFlag(MachineInstr::MIFlag Flag) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addDef(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a virtual register definition operand.
const MachineInstrBuilder & cloneMemRefs(const MachineInstr &OtherMI) const
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
instr_iterator getInstrIterator() const
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
bool isReturn(QueryType Type=AnyInBundle) const
const MachineBasicBlock * getParent() const
LLVM_ABI void addOperand(MachineFunction &MF, const MachineOperand &Op)
Add the specified operand to the instruction.
LLVM_ABI void copyImplicitOps(MachineFunction &MF, const MachineInstr &MI)
Copy implicit register operands from specified instruction to this instruction.
bool mayLoad(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read memory.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
const MachineOperand & getOperand(unsigned i) const
@ MOStore
The memory access writes data.
MachineOperand class - Representation of each machine instruction operand.
void setSubReg(unsigned subReg)
unsigned getSubReg() const
LLVM_ABI void setReg(Register Reg)
Change the register this operand corresponds to.
void setIsKill(bool Val=true)
Register getReg() const
getReg - Returns the register number.
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
LLVM_ABI MachineBasicBlock * findNearestCommonDominator(ArrayRef< MachineBasicBlock * > Blocks) const
Returns the nearest common dominator of the given blocks.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Wrapper class representing virtual and physical registers.
constexpr bool isValid() const
A vector that has set insertion semantics.
size_type size() const
Determine the number of elements in the SetVector.
bool insert(const value_type &X)
Insert a new element into the SetVector.
size_type count(const T &V) const
count - Return 1 if the element is in the set, 0 otherwise.
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
int64_t getFixed() const
Returns the fixed component of the stack.
bool hasFP(const MachineFunction &MF) const
hasFP - Return true if the specified function should have a dedicated frame pointer register.
virtual void determineCalleeSaves(MachineFunction &MF, BitVector &SavedRegs, RegScavenger *RS=nullptr) const
This method determines which of the registers reported by TargetRegisterInfo::getCalleeSavedRegs() sh...
Align getStackAlign() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
const TargetRegisterInfo & getRegisterInfo() const
Primary interface to the complete machine description for the target machine.
CodeGenOptLevel getOptLevel() const
Returns the optimization level: None, Less, Default, or Aggressive.
bool isPositionIndependent() const
LLVM_ABI bool DisableFramePointerElim(const MachineFunction &MF) const
DisableFramePointerElim - This returns true if frame pointer elimination optimization should be disab...
unsigned getID() const
Return the register class ID number.
ArrayRef< MCPhysReg > getRawAllocationOrder(const MachineFunction &MF, bool Rev=false) const
Returns the preferred order for allocating registers from this register class in MF.
bool hasSubClassEq(const TargetRegisterClass *RC) const
Returns true if RC is a sub-class of or equal to this class.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const TargetInstrInfo * getInstrInfo() const
self_iterator getIterator()
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ C
The default llvm calling convention, compatible with C.
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
void stable_sort(R &&Range)
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
@ Kill
The last use of a register.
constexpr RegState getKillRegState(bool B)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
FunctionPass * createHexagonCallFrameInformation()
FunctionAddr VTableAddr Count
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
DWARFExpression::Operation Op
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI Printable printReg(Register Reg, const TargetRegisterInfo *TRI=nullptr, unsigned SubIdx=0, const MachineRegisterInfo *MRI=nullptr)
Prints virtual and physical registers with or without a TRI instance.
LLVM_ABI Printable printMBBReference(const MachineBasicBlock &MBB)
Prints a machine basic block reference.
This struct is a compact representation of a valid (non-zero power of two) alignment.
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
static RegisterSet expandToSubRegs(RegisterRef R, const MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI)
std::map< RegisterRef, RangeList > RegToRangeMap
static LLVM_ABI MachinePointerInfo getStack(MachineFunction &MF, int64_t Offset, uint8_t ID=0)
Stack pointer relative access.