11 #define DEBUG_TYPE "hexagon-pei"
148 using namespace llvm;
171 cl::desc(
"Enable stack frame shrink wrapping"));
175 cl::desc(
"Max count of stack frame shrink-wraps"));
189 cl::init(std::numeric_limits<unsigned>::max()));
223 bool HexagonCallFrameInformation::runOnMachineFunction(
MachineFunction &MF) {
230 HFI.insertCFIInstructions(MF);
235 "Hexagon call frame information",
false,
false)
238 return new HexagonCallFrameInformation();
246 if (Reg < Hexagon::D0 || Reg > Hexagon::D15)
252 if (*SubRegs > RegNo)
255 if (!RegNo || *SubRegs < RegNo)
265 static_assert(Hexagon::R1 > 0,
266 "Assume physical registers are encoded as positive integers");
271 for (
unsigned I = 1,
E = CSI.size();
I <
E; ++
I) {
283 for (
auto &
I : MBB) {
289 case Hexagon::PS_alloca:
290 case Hexagon::PS_aligna:
306 unsigned R = MO.getReg();
323 unsigned RetOpc = I->getOpcode();
324 return RetOpc == Hexagon::PS_tailcall_i || RetOpc == Hexagon::PS_tailcall_r;
346 case Hexagon::RESTORE_DEALLOC_RET_JMP_V4:
347 case Hexagon::RESTORE_DEALLOC_RET_JMP_V4_PIC:
348 case Hexagon::RESTORE_DEALLOC_RET_JMP_V4_EXT:
349 case Hexagon::RESTORE_DEALLOC_RET_JMP_V4_EXT_PIC:
350 case Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_EXT:
351 case Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_EXT_PIC:
352 case Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4:
353 case Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_PIC:
379 static unsigned ShrinkCounter = 0;
400 for (RPOTType::rpo_iterator
I = RPOT.begin(),
E = RPOT.end();
I !=
E; ++
I)
401 RPO[(*I)->getNumber()] = RPON++;
407 unsigned BN = RPO[
I.getNumber()];
408 for (
auto SI =
I.succ_begin(), SE =
I.succ_end();
SI != SE; ++
SI) {
410 if (RPO[(*SI)->getNumber()] <= BN)
419 for (
const MCPhysReg *
P = HRI.getCalleeSavedRegs(&MF); *
P; ++
P)
428 dbgs() <<
"Blocks needing SF: {";
429 for (
auto &
B : SFBlocks)
430 dbgs() <<
" BB#" <<
B->getNumber();
434 if (SFBlocks.empty())
439 for (
unsigned i = 1, n = SFBlocks.size();
i < n; ++
i) {
445 for (
unsigned i = 1, n = SFBlocks.size(); i < n; ++
i) {
451 dbgs() <<
"Computed dom block: BB#";
453 else dbgs() <<
"<null>";
454 dbgs() <<
", computed pdom block: BB#";
456 else dbgs() <<
"<null>";
464 DEBUG(
dbgs() <<
"Dom block does not dominate pdom block\n");
468 DEBUG(
dbgs() <<
"PDom block does not post-dominate dom block\n");
492 findShrunkPrologEpilog(MF, PrologB, EpilogB);
494 bool PrologueStubs =
false;
495 insertCSRSpillsInBlock(*PrologB, CSI, HRI, PrologueStubs);
496 insertPrologueInBlock(*PrologB, PrologueStubs);
497 updateEntryPaths(MF, *PrologB);
500 insertCSRRestoresInBlock(*EpilogB, CSI, HRI);
501 insertEpilogueInBlock(*EpilogB);
504 if (
B.isReturnBlock())
505 insertCSRRestoresInBlock(
B, CSI, HRI);
508 if (
B.isReturnBlock())
509 insertEpilogueInBlock(
B);
527 BitVector DoneT(MaxBN+1), DoneF(MaxBN+1), Path(MaxBN+1);
528 updateExitPaths(*EpilogB, *EpilogB, DoneT, DoneF, Path);
533 bool PrologueStubs)
const {
550 FrameSize = MaxCFA +
alignTo(FrameSize, MaxAlign);
557 unsigned SP = HRI.getStackRegister();
564 if (
MI.getOpcode() == Hexagon::PS_alloca)
567 for (
auto MI : AdjustRegs) {
568 assert((
MI->getOpcode() == Hexagon::PS_alloca) &&
"Expected alloca");
569 expandAlloca(
MI, HII, SP, MaxCF);
570 MI->eraseFromParent();
578 const unsigned int ALLOCFRAME_MAX = 16384;
586 if (NumBytes >= ALLOCFRAME_MAX) {
588 BuildMI(MBB, InsertPt, dl, HII.get(Hexagon::S2_allocframe))
594 unsigned CallerSavedReg = HRI.getFirstCallerSavedNonParamReg();
596 CallerSavedReg).addImm(NumBytes);
597 BuildMI(MBB, InsertPt, dl, HII.get(Hexagon::A2_sub), SP)
601 BuildMI(MBB, InsertPt, dl, HII.get(Hexagon::S2_allocframe))
607 BuildMI(MBB, InsertPt, dl, HII.get(Hexagon::A2_andir), SP)
609 .
addImm(-int64_t(MaxAlign));
616 BuildMI(MBB, InsertPt, dl, HII.get(Hexagon::PS_call_stk))
617 .addExternalSymbol(
"__runtime_stack_check");
628 unsigned SP = HRI.getStackRegister();
631 unsigned RetOpc = RetI ? RetI->
getOpcode() : 0;
635 if (InsertPt != MBB.
end())
636 DL = InsertPt->getDebugLoc();
637 else if (!MBB.
empty())
638 DL = std::prev(MBB.
end())->getDebugLoc();
641 if (RetOpc == Hexagon::EH_RETURN_JMPR) {
642 BuildMI(MBB, InsertPt, DL, HII.get(Hexagon::L2_deallocframe));
643 BuildMI(MBB, InsertPt, DL, HII.get(Hexagon::A2_add), SP)
651 if (RetOpc == Hexagon::RESTORE_DEALLOC_RET_JMP_V4 ||
652 RetOpc == Hexagon::RESTORE_DEALLOC_RET_JMP_V4_PIC ||
653 RetOpc == Hexagon::RESTORE_DEALLOC_RET_JMP_V4_EXT ||
654 RetOpc == Hexagon::RESTORE_DEALLOC_RET_JMP_V4_EXT_PIC) {
658 while (It != MBB.
end()) {
670 bool NeedsDeallocframe =
true;
671 if (!MBB.
empty() && InsertPt != MBB.
begin()) {
673 unsigned COpc = PrevIt->getOpcode();
674 if (COpc == Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4 ||
675 COpc == Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_PIC ||
676 COpc == Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_EXT ||
677 COpc == Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_EXT_PIC ||
678 COpc == Hexagon::PS_call_nr || COpc == Hexagon::PS_callr_nr)
679 NeedsDeallocframe =
false;
682 if (!NeedsDeallocframe)
688 BuildMI(MBB, InsertPt, DL, HII.get(Hexagon::L2_deallocframe));
691 unsigned NewOpc = Hexagon::L4_return;
708 for (
unsigned i = 0; i < Worklist.
size(); ++
i) {
709 unsigned BN = Worklist[
i];
725 if (Path[BN] || DoneF[BN])
733 bool ReachedExit =
false;
735 ReachedExit |= updateExitPaths(*
SB, RestoreB, DoneT, DoneF, Path);
751 if (ReachedExit && &MBB != &RestoreB) {
776 if (
I.getOpcode() == Hexagon::S2_allocframe)
777 return std::next(It);
781 bool HasCall =
false, HasAllocFrame =
false;
783 while (++
T !=
End &&
T->isBundled()) {
784 if (
T->getOpcode() == Hexagon::S2_allocframe)
785 HasAllocFrame =
true;
786 else if (
T->isCall())
790 return HasCall ? It : std::next(It);
799 insertCFIInstructionsAt(
B, At.getValue());
816 const MCInstrDesc &CFID = HII.get(TargetOpcode::CFI_INSTRUCTION);
819 bool HasFP =
hasFP(MF);
822 unsigned DwFPReg = HRI.getDwarfRegNum(HRI.getFrameRegister(),
true);
823 unsigned DwRAReg = HRI.getDwarfRegNum(HRI.getRARegister(),
true);
849 static unsigned int RegsToMove[] = {
850 Hexagon::R1, Hexagon::R0, Hexagon::R3,
Hexagon::R2,
851 Hexagon::R17, Hexagon::R16, Hexagon::R19, Hexagon::R18,
852 Hexagon::R21, Hexagon::R20, Hexagon::R23, Hexagon::R22,
853 Hexagon::R25, Hexagon::R24, Hexagon::R27, Hexagon::R26,
854 Hexagon::D0, Hexagon::D1, Hexagon::D8, Hexagon::D9,
855 Hexagon::D10, Hexagon::D11, Hexagon::D12, Hexagon::D13,
861 for (
unsigned i = 0; RegsToMove[
i] != Hexagon::NoRegister; ++
i) {
862 unsigned Reg = RegsToMove[
i];
864 return C.getReg() ==
Reg;
887 if (Reg < Hexagon::D0 || Reg > Hexagon::D15) {
888 unsigned DwarfReg = HRI.getDwarfRegNum(Reg,
true);
900 unsigned HiReg = HRI.getSubReg(Reg, Hexagon::isub_hi);
901 unsigned LoReg = HRI.getSubReg(Reg, Hexagon::isub_lo);
902 unsigned HiDwarfReg = HRI.getDwarfRegNum(HiReg,
true);
903 unsigned LoDwarfReg = HRI.getDwarfRegNum(LoReg,
true);
922 .getLocalFrameObjectCount();
923 bool HasExtraAlign = HRI.needsStackRealignment(MF);
938 if ((HasFixed || HasPrealloc) && (HasAlloca || HasExtraAlign))
960 bool Stkchk =
false) {
961 const char * V4SpillToMemoryFunctions[] = {
962 "__save_r16_through_r17",
963 "__save_r16_through_r19",
964 "__save_r16_through_r21",
965 "__save_r16_through_r23",
966 "__save_r16_through_r25",
967 "__save_r16_through_r27" };
969 const char * V4SpillToMemoryStkchkFunctions[] = {
970 "__save_r16_through_r17_stkchk",
971 "__save_r16_through_r19_stkchk",
972 "__save_r16_through_r21_stkchk",
973 "__save_r16_through_r23_stkchk",
974 "__save_r16_through_r25_stkchk",
975 "__save_r16_through_r27_stkchk" };
977 const char * V4SpillFromMemoryFunctions[] = {
978 "__restore_r16_through_r17_and_deallocframe",
979 "__restore_r16_through_r19_and_deallocframe",
980 "__restore_r16_through_r21_and_deallocframe",
981 "__restore_r16_through_r23_and_deallocframe",
982 "__restore_r16_through_r25_and_deallocframe",
983 "__restore_r16_through_r27_and_deallocframe" };
985 const char * V4SpillFromMemoryTailcallFunctions[] = {
986 "__restore_r16_through_r17_and_deallocframe_before_tailcall",
987 "__restore_r16_through_r19_and_deallocframe_before_tailcall",
988 "__restore_r16_through_r21_and_deallocframe_before_tailcall",
989 "__restore_r16_through_r23_and_deallocframe_before_tailcall",
990 "__restore_r16_through_r25_and_deallocframe_before_tailcall",
991 "__restore_r16_through_r27_and_deallocframe_before_tailcall"
994 const char **SpillFunc =
nullptr;
998 SpillFunc = Stkchk ? V4SpillToMemoryStkchkFunctions
999 : V4SpillToMemoryFunctions;
1002 SpillFunc = V4SpillFromMemoryFunctions;
1005 SpillFunc = V4SpillFromMemoryTailcallFunctions;
1008 assert(SpillFunc &&
"Unknown spill kind");
1013 return SpillFunc[0];
1015 return SpillFunc[1];
1017 return SpillFunc[2];
1019 return SpillFunc[3];
1021 return SpillFunc[4];
1023 return SpillFunc[5];
1031 int FI,
unsigned &FrameReg)
const {
1037 bool HasExtraAlign = HRI.needsStackRealignment(MF);
1040 unsigned SP = HRI.getStackRegister(), FP = HRI.getFrameRegister();
1045 bool UseFP =
false, UseAP =
false;
1050 if (NoOpt && !HasExtraAlign)
1055 UseFP |= (HasAlloca || HasExtraAlign);
1066 bool HasFP =
hasFP(MF);
1067 assert((HasFP || !UseFP) &&
"This function must have frame pointer");
1093 if (Offset > 0 && !HasFP)
1108 if (!UseFP && !UseAP && HasFP)
1109 RealOffset = FrameSize+
Offset;
1115 bool &PrologueStubs)
const {
1120 PrologueStubs =
false;
1125 if (useSpillFunction(MF, CSI)) {
1126 PrologueStubs =
true;
1138 if (StkOvrFlowEnabled) {
1140 SpillOpc = IsPIC ? Hexagon::SAVE_REGISTERS_CALL_V4STK_EXT_PIC
1141 : Hexagon::SAVE_REGISTERS_CALL_V4STK_EXT;
1143 SpillOpc = IsPIC ? Hexagon::SAVE_REGISTERS_CALL_V4STK_PIC
1144 : Hexagon::SAVE_REGISTERS_CALL_V4STK;
1147 SpillOpc = IsPIC ? Hexagon::SAVE_REGISTERS_CALL_V4_EXT_PIC
1148 : Hexagon::SAVE_REGISTERS_CALL_V4_EXT;
1150 SpillOpc = IsPIC ? Hexagon::SAVE_REGISTERS_CALL_V4_PIC
1151 : Hexagon::SAVE_REGISTERS_CALL_V4;
1155 BuildMI(MBB, MI, DL, HII.get(SpillOpc))
1156 .addExternalSymbol(SpillFun);
1159 addCalleeSaveRegistersAsImpOperand(SaveRegsCall, CSI,
false,
true);
1161 for (
unsigned I = 0;
I < CSI.size(); ++
I)
1166 for (
unsigned i = 0, n = CSI.size(); i < n; ++
i) {
1167 unsigned Reg = CSI[
i].getReg();
1172 int FI = CSI[
i].getFrameIdx();
1174 HII.storeRegToStackSlot(MBB, MI, Reg, IsKill, FI, RC, &HRI);
1191 if (useRestoreFunction(MF, CSI)) {
1197 bool IsPIC = HTM.isPositionIndependent();
1208 RetOpc = IsPIC ? Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_EXT_PIC
1209 : Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_EXT;
1211 RetOpc = IsPIC ? Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_PIC
1212 : Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4;
1213 DeallocCall =
BuildMI(MBB, MI, DL, HII.get(RetOpc))
1214 .addExternalSymbol(RestoreFn);
1218 assert(It->isReturn() && std::next(It) == MBB.
end());
1221 RetOpc = IsPIC ? Hexagon::RESTORE_DEALLOC_RET_JMP_V4_EXT_PIC
1222 : Hexagon::RESTORE_DEALLOC_RET_JMP_V4_EXT;
1224 RetOpc = IsPIC ? Hexagon::RESTORE_DEALLOC_RET_JMP_V4_PIC
1225 : Hexagon::RESTORE_DEALLOC_RET_JMP_V4;
1226 DeallocCall =
BuildMI(MBB, It, DL, HII.get(RetOpc))
1227 .addExternalSymbol(RestoreFn);
1231 addCalleeSaveRegistersAsImpOperand(DeallocCall, CSI,
true,
false);
1235 for (
unsigned i = 0; i < CSI.size(); ++
i) {
1236 unsigned Reg = CSI[
i].getReg();
1238 int FI = CSI[
i].getFrameIdx();
1239 HII.loadRegFromStackSlot(MBB, MI, Reg, FI, RC, &HRI);
1251 assert((Opc == Hexagon::ADJCALLSTACKDOWN || Opc == Hexagon::ADJCALLSTACKUP) &&
1252 "Cannot handle this call frame pseudo instruction");
1253 return MBB.
erase(I);
1266 if (!HasAlloca || !NeedsAlign)
1284 assert(A <= 8 &&
"Unexpected local frame alignment");
1292 AP = AI->getOperand(0).getReg();
1302 auto IsUsed = [&HRI,&
MRI] (
unsigned Reg) ->
bool {
1335 BitVector SRegs(Hexagon::NUM_TARGET_REGS);
1343 DEBUG(
dbgs() <<
"Initial CS registers: {");
1344 for (
unsigned i = 0, n = CSI.size(); i < n; ++
i) {
1345 unsigned R = CSI[
i].getReg();
1356 for (
int x = Reserved.find_first(); x >= 0; x = Reserved.find_next(x)) {
1368 BitVector TmpSup(Hexagon::NUM_TARGET_REGS);
1374 for (
int x = TmpSup.find_first(); x >= 0; x = TmpSup.find_next(x)) {
1410 for (
const SpillSlot *S = FixedSlots; S != FixedSlots+NumFixed; ++S) {
1414 int FI = MFI.CreateFixedSpillStackObject(RC->
getSize(), S->Offset);
1415 MinOffset =
std::min(MinOffset, S->Offset);
1417 SRegs[S->Reg] =
false;
1426 int Off = MinOffset - RC->
getSize();
1430 int FI = MFI.CreateFixedSpillStackObject(RC->
getSize(), Off);
1431 MinOffset =
std::min(MinOffset, Off);
1437 dbgs() <<
"CS information: {";
1438 for (
unsigned i = 0, n = CSI.size(); i < n; ++
i) {
1439 int FI = CSI[
i].getFrameIdx();
1440 int Off = MFI.getObjectOffset(FI);
1451 bool MissedReg =
false;
1471 if (!Hexagon::ModRegsRegClass.
contains(DstR) ||
1472 !Hexagon::ModRegsRegClass.
contains(SrcR))
1476 BuildMI(B, It, DL, HII.get(TargetOpcode::COPY), TmpR)
1478 BuildMI(B, It, DL, HII.get(TargetOpcode::COPY), DstR)
1502 unsigned TfrOpc = (Opc == Hexagon::STriw_pred) ? Hexagon::C2_tfrpr
1503 : Hexagon::A2_tfrcrr;
1504 BuildMI(B, It, DL, HII.get(TfrOpc), TmpR)
1508 BuildMI(B, It, DL, HII.get(Hexagon::S2_storeri_io))
1533 BuildMI(B, It, DL, HII.get(Hexagon::L2_loadri_io), TmpR)
1540 unsigned TfrOpc = (Opc == Hexagon::LDriw_pred) ? Hexagon::C2_tfrrp
1541 : Hexagon::A2_tfrrcr;
1542 BuildMI(B, It, DL, HII.get(TfrOpc), DstR)
1555 if (!MI->getOperand(0).isFI())
1559 unsigned SrcR = MI->getOperand(2).getReg();
1560 bool IsKill = MI->getOperand(2).isKill();
1561 int FI = MI->getOperand(0).getIndex();
1563 bool Is128B = HST.useHVXDblOps();
1564 auto *RC = !Is128B ? &Hexagon::VectorRegsRegClass
1565 : &Hexagon::VectorRegs128BRegClass;
1574 BuildMI(B, It, DL, HII.get(Hexagon::A2_tfrsi), TmpR0)
1575 .addImm(0x01010101);
1577 unsigned VandOpc = !Is128B ? Hexagon::V6_vandqrt : Hexagon::V6_vandqrt_128B;
1578 BuildMI(B, It, DL, HII.get(VandOpc), TmpR1)
1584 expandStoreVec(B, std::prev(It), MRI, HII, NewRegs);
1597 if (!MI->getOperand(1).isFI())
1601 unsigned DstR = MI->getOperand(0).getReg();
1602 int FI = MI->getOperand(1).getIndex();
1604 bool Is128B = HST.useHVXDblOps();
1605 auto *RC = !Is128B ? &Hexagon::VectorRegsRegClass
1606 : &Hexagon::VectorRegs128BRegClass;
1614 BuildMI(B, It, DL, HII.get(Hexagon::A2_tfrsi), TmpR0)
1615 .addImm(0x01010101);
1618 expandLoadVec(B, std::prev(It), MRI, HII, NewRegs);
1620 unsigned VandOpc = !Is128B ? Hexagon::V6_vandvrt : Hexagon::V6_vandvrt_128B;
1621 BuildMI(B, It, DL, HII.get(VandOpc), DstR)
1649 for (
auto R = B.
begin(); R != It; ++R)
1650 LPR.stepForward(*R, Clobbers);
1654 unsigned SrcLo = HRI.getSubReg(SrcR, Hexagon::vsub_lo);
1655 unsigned SrcHi = HRI.getSubReg(SrcR, Hexagon::vsub_hi);
1659 bool Is128B = HST.useHVXDblOps();
1660 auto *RC = !Is128B ? &Hexagon::VectorRegsRegClass
1661 : &Hexagon::VectorRegs128BRegClass;
1662 unsigned Size = RC->
getSize();
1664 unsigned HasAlign = MFI.getObjectAlignment(FI);
1668 if (LPR.contains(SrcLo)) {
1669 if (NeedAlign <= HasAlign)
1670 StoreOpc = !Is128B ? Hexagon::V6_vS32b_ai : Hexagon::V6_vS32b_ai_128B;
1672 StoreOpc = !Is128B ? Hexagon::V6_vS32Ub_ai : Hexagon::V6_vS32Ub_ai_128B;
1674 BuildMI(B, It, DL, HII.get(StoreOpc))
1682 if (LPR.contains(SrcHi)) {
1683 if (NeedAlign <=
MinAlign(HasAlign, Size))
1684 StoreOpc = !Is128B ? Hexagon::V6_vS32b_ai : Hexagon::V6_vS32b_ai_128B;
1686 StoreOpc = !Is128B ? Hexagon::V6_vS32Ub_ai : Hexagon::V6_vS32Ub_ai_128B;
1688 BuildMI(B, It, DL, HII.get(StoreOpc))
1712 unsigned DstHi = HRI.getSubReg(DstR, Hexagon::vsub_hi);
1713 unsigned DstLo = HRI.getSubReg(DstR, Hexagon::vsub_lo);
1716 bool Is128B = HST.useHVXDblOps();
1717 auto *RC = !Is128B ? &Hexagon::VectorRegsRegClass
1718 : &Hexagon::VectorRegs128BRegClass;
1719 unsigned Size = RC->
getSize();
1721 unsigned HasAlign = MFI.getObjectAlignment(FI);
1725 if (NeedAlign <= HasAlign)
1726 LoadOpc = !Is128B ? Hexagon::V6_vL32b_ai : Hexagon::V6_vL32b_ai_128B;
1728 LoadOpc = !Is128B ? Hexagon::V6_vL32Ub_ai : Hexagon::V6_vL32Ub_ai_128B;
1730 BuildMI(B, It, DL, HII.get(LoadOpc), DstLo)
1736 if (NeedAlign <=
MinAlign(HasAlign, Size))
1737 LoadOpc = !Is128B ? Hexagon::V6_vL32b_ai : Hexagon::V6_vL32b_ai_128B;
1739 LoadOpc = !Is128B ? Hexagon::V6_vL32Ub_ai : Hexagon::V6_vL32Ub_ai_128B;
1741 BuildMI(B, It, DL, HII.get(LoadOpc), DstHi)
1765 bool Is128B = HST.useHVXDblOps();
1766 auto *RC = !Is128B ? &Hexagon::VectorRegsRegClass
1767 : &Hexagon::VectorRegs128BRegClass;
1770 unsigned HasAlign = MFI.getObjectAlignment(FI);
1773 if (NeedAlign <= HasAlign)
1774 StoreOpc = !Is128B ? Hexagon::V6_vS32b_ai : Hexagon::V6_vS32b_ai_128B;
1776 StoreOpc = !Is128B ? Hexagon::V6_vS32Ub_ai : Hexagon::V6_vS32Ub_ai_128B;
1778 BuildMI(B, It, DL, HII.get(StoreOpc))
1802 bool Is128B = HST.useHVXDblOps();
1803 auto *RC = !Is128B ? &Hexagon::VectorRegsRegClass
1804 : &Hexagon::VectorRegs128BRegClass;
1807 unsigned HasAlign = MFI.getObjectAlignment(FI);
1810 if (NeedAlign <= HasAlign)
1811 LoadOpc = !Is128B ? Hexagon::V6_vL32b_ai : Hexagon::V6_vL32b_ai_128B;
1813 LoadOpc = !Is128B ? Hexagon::V6_vL32Ub_ai : Hexagon::V6_vL32Ub_ai_128B;
1815 BuildMI(B, It, DL, HII.get(LoadOpc), DstR)
1827 auto &HII = *HST.getInstrInfo();
1829 bool Changed =
false;
1831 for (
auto &B : MF) {
1836 NextI = std::next(
I);
1840 case TargetOpcode::COPY:
1841 Changed |= expandCopy(B,
I, MRI, HII, NewRegs);
1843 case Hexagon::STriw_pred:
1844 case Hexagon::STriw_mod:
1845 Changed |= expandStoreInt(B,
I, MRI, HII, NewRegs);
1847 case Hexagon::LDriw_pred:
1848 case Hexagon::LDriw_mod:
1849 Changed |= expandLoadInt(B,
I, MRI, HII, NewRegs);
1851 case Hexagon::PS_vstorerq_ai:
1852 case Hexagon::PS_vstorerq_ai_128B:
1853 Changed |= expandStoreVecPred(B,
I, MRI, HII, NewRegs);
1855 case Hexagon::PS_vloadrq_ai:
1856 case Hexagon::PS_vloadrq_ai_128B:
1857 Changed |= expandLoadVecPred(B,
I, MRI, HII, NewRegs);
1859 case Hexagon::PS_vloadrw_ai:
1860 case Hexagon::PS_vloadrwu_ai:
1861 case Hexagon::PS_vloadrw_ai_128B:
1862 case Hexagon::PS_vloadrwu_ai_128B:
1863 Changed |= expandLoadVec2(B,
I, MRI, HII, NewRegs);
1865 case Hexagon::PS_vstorerw_ai:
1866 case Hexagon::PS_vstorerwu_ai:
1867 case Hexagon::PS_vstorerw_ai_128B:
1868 case Hexagon::PS_vstorerwu_ai_128B:
1869 Changed |= expandStoreVec2(B,
I, MRI, HII, NewRegs);
1882 auto &HRI = *HST.getRegisterInfo();
1884 SavedRegs.
resize(HRI.getNumRegs());
1889 for (
const MCPhysReg *R = HRI.getCalleeSavedRegs(&MF); *R; ++R)
1894 expandSpillMacros(MF, NewRegs);
1896 optimizeSpillSlots(MF, NewRegs);
1900 if (!NewRegs.
empty() || mayOverflowFrameOffset(MF)) {
1906 SpillRCs.
insert(&Hexagon::IntRegsRegClass);
1908 for (
unsigned VR : NewRegs)
1911 for (
auto *RC : SpillRCs) {
1916 for (
unsigned i = 0; i < Num; i++) {
1934 auto isDead = [&FIR,&DeadMap] (
unsigned Reg) ->
bool {
1935 auto F = DeadMap.find({
Reg,0});
1936 if (
F == DeadMap.end())
1938 for (
auto &DR :
F->second)
1939 if (DR.contains(FIR))
1962 auto &HRI = *HST.getRegisterInfo();
1966 typedef std::map<MachineBasicBlock*,HexagonBlockRanges::InstrIndexMap>
1968 typedef std::map<MachineBasicBlock*,HexagonBlockRanges::RangeList>
1977 SlotInfo() =
default;
1980 BlockIndexMap BlockIndexes;
1982 std::map<int,SlotInfo> FIRangeMap;
1991 if (HaveRC ==
nullptr || HaveRC == NewRC)
1996 if (NewRC->hasSubClassEq(HaveRC))
2003 for (
auto &B : MF) {
2004 std::map<int,IndexType> LastStore, LastLoad;
2007 auto P = BlockIndexes.insert(
2009 auto &IndexMap =
P.first->second;
2011 << IndexMap <<
'\n');
2013 for (
auto &
In : B) {
2015 bool Load = HII.isLoadFromStackSlot(
In, LFI) && !HII.isPredicated(
In);
2016 bool Store = HII.isStoreToStackSlot(
In, SFI) && !HII.isPredicated(
In);
2017 if (Load && Store) {
2029 if (Load || Store) {
2030 int TFI = Load ? LFI : SFI;
2031 unsigned AM = HII.getAddrMode(
In);
2032 SlotInfo &
SI = FIRangeMap[TFI];
2036 unsigned OpNum = Load ? 0 : 2;
2037 auto *RC = HII.getRegClass(
In.getDesc(), OpNum, &HRI, MF);
2038 RC = getCommonRC(SI.RC, RC);
2046 unsigned S = (1U << (HII.getMemAccessSize(
In) - 1));
2047 if (SI.Size != 0 && SI.Size != S)
2053 for (
auto *Mo :
In.memoperands()) {
2054 if (!Mo->isVolatile())
2065 for (
unsigned i = 0, n =
In.getNumOperands(); i < n; ++
i) {
2072 if (i+1 >= n || !
In.getOperand(i+1).isImm() ||
2073 In.getOperand(i+1).getImm() != 0)
2075 if (BadFIs.
count(FI))
2081 LastStore[FI] = IndexType::Entry;
2082 LastLoad[FI] = Index;
2086 RL.
add(LastStore[FI], LastLoad[FI],
false,
false);
2088 RL.
add(IndexType::Entry, LastLoad[FI],
false,
false);
2090 LastStore[FI] = Index;
2097 for (
auto &
I : LastLoad) {
2098 IndexType LL =
I.second;
2101 auto &RL = FIRangeMap[
I.first].Map[&
B];
2102 IndexType &
LS = LastStore[
I.first];
2104 RL.
add(LS, LL,
false,
false);
2106 RL.
add(IndexType::Entry, LL,
false,
false);
2109 for (
auto &
I : LastStore) {
2110 IndexType LS =
I.second;
2113 auto &RL = FIRangeMap[
I.first].Map[&
B];
2119 for (
auto &
P : FIRangeMap) {
2120 dbgs() <<
"fi#" <<
P.first;
2121 if (BadFIs.
count(
P.first))
2124 if (
P.second.RC !=
nullptr)
2125 dbgs() << HRI.getRegClassName(
P.second.RC) <<
'\n';
2127 dbgs() <<
"<null>\n";
2128 for (
auto &R :
P.second.Map)
2129 dbgs() <<
" BB#" << R.first->getNumber() <<
" { " << R.second <<
"}\n";
2138 std::map<MachineBasicBlock*,std::vector<int>> BlockFIMap;
2140 for (
auto &
P : FIRangeMap) {
2142 if (BadFIs.
count(
P.first))
2144 for (
auto &B : MF) {
2145 auto F =
P.second.Map.find(&B);
2147 if (
F ==
P.second.Map.end() ||
F->second.empty())
2150 if (IR.
start() == IndexType::Entry)
2151 LoxFIs.insert(
P.first);
2152 BlockFIMap[&
B].push_back(
P.first);
2157 dbgs() <<
"Block-to-FI map (* -- live-on-exit):\n";
2158 for (
auto &
P : BlockFIMap) {
2159 auto &FIs =
P.second;
2162 dbgs() <<
" BB#" <<
P.first->getNumber() <<
": {";
2163 for (
auto I : FIs) {
2164 dbgs() <<
" fi#" <<
I;
2165 if (LoxFIs.count(
I))
2177 for (
auto &B : MF) {
2178 auto F = BlockIndexes.find(&B);
2179 assert(
F != BlockIndexes.end());
2183 DEBUG(
dbgs() <<
"BB#" << B.getNumber() <<
" dead map\n"
2186 for (
auto FI : BlockFIMap[&B]) {
2187 if (BadFIs.
count(FI))
2189 DEBUG(
dbgs() <<
"Working on fi#" << FI <<
'\n');
2191 for (
auto &Range : RL) {
2192 DEBUG(
dbgs() <<
"--Examining range:" << RL <<
'\n');
2193 if (!IndexType::isInstr(Range.start()) ||
2194 !IndexType::isInstr(Range.end()))
2203 SrcOp.getSubReg() };
2204 auto *RC = HII.getRegClass(SI.
getDesc(), 2, &HRI, MF);
2206 unsigned FoundR = this->findPhysReg(MF, Range, IM, DM, RC);
2221 if (SrcRR.
Reg != FoundR || SrcRR.
Sub != 0) {
2223 CopyIn =
BuildMI(B, StartIt, DL, HII.get(TargetOpcode::COPY), FoundR)
2229 if (LoxFIs.count(FI) && (&Range == &RL.back())) {
2231 if (
unsigned SR = SrcOp.getSubReg())
2232 SrcOp.setReg(HRI.getSubReg(FoundR, SR));
2234 SrcOp.setReg(FoundR);
2237 SrcOp.setIsKill(
false);
2240 IM.replaceInstr(&SI, CopyIn);
2244 for (
auto It = StartIt; It != EndIt; It = NextIt) {
2246 NextIt = std::next(It);
2248 if (!HII.isLoadFromStackSlot(MI, TFI) || TFI != FI)
2253 if (DstR != FoundR) {
2255 unsigned MemSize = (1U << (HII.getMemAccessSize(MI) - 1));
2257 unsigned CopyOpc = TargetOpcode::COPY;
2258 if (HII.isSignExtendingLoad(MI))
2259 CopyOpc = (MemSize == 1) ? Hexagon::A2_sxtb : Hexagon::A2_sxth;
2260 else if (HII.isZeroExtendingLoad(MI))
2261 CopyOpc = (MemSize == 1) ? Hexagon::A2_zxtb : Hexagon::A2_zxth;
2262 CopyOut =
BuildMI(B, It, DL, HII.get(CopyOpc), DstR)
2265 IM.replaceInstr(&MI, CopyOut);
2272 DM[RR].subtract(Range);
2278 void HexagonFrameLowering::expandAlloca(
MachineInstr *AI,
2304 BuildMI(MB, AI, DL, HII.get(Hexagon::A2_sub), Rd)
2309 BuildMI(MB, AI, DL, HII.get(Hexagon::A2_sub), SP)
2315 BuildMI(MB, AI, DL, HII.get(Hexagon::A2_andir), Rd)
2319 BuildMI(MB, AI, DL, HII.get(Hexagon::A2_andir), SP)
2325 BuildMI(MB, AI, DL, HII.get(TargetOpcode::COPY), SP)
2330 BuildMI(MB, AI, DL, HII.get(Hexagon::A2_addi), Rd)
2350 if (
I.getOpcode() == Hexagon::PS_aligna)
2357 void HexagonFrameLowering::addCalleeSaveRegistersAsImpOperand(
MachineInstr *MI,
2358 const CSIVect &CSI,
bool IsDef,
bool IsKill)
const {
2369 const CSIVect &CSI)
const {
2379 for (
unsigned i = 0, n = CSI.size(); i < n; ++
i) {
2380 unsigned R = CSI[
i].getReg();
2381 if (!Hexagon::DoubleRegsRegClass.
contains(R))
2385 int F =
Regs.find_first();
2386 if (F != Hexagon::D8)
2389 int N =
Regs.find_next(F);
2390 if (N >= 0 && N != F+1)
2399 const CSIVect &CSI)
const {
2400 if (shouldInlineCSR(MF, CSI))
2402 unsigned NumCSI = CSI.size();
2408 return Threshold < NumCSI;
2412 const CSIVect &CSI)
const {
2413 if (shouldInlineCSR(MF, CSI))
2423 unsigned NumCSI = CSI.size();
2429 return Threshold < NumCSI;
2432 bool HexagonFrameLowering::mayOverflowFrameOffset(
MachineFunction &MF)
const {
2438 if (HST.useHVXOps())
2439 return StackSize > 256;
void resize(unsigned N, bool t=false)
resize - Grow or shrink the bitvector.
unsigned getStackAlignment() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
void push_back(const T &Elt)
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
const MCPhysReg * getCallerSavedRegs(const MachineFunction *MF, const TargetRegisterClass *RC) const
instr_iterator erase(instr_iterator I)
Remove an instruction from the instruction list and delete it.
int find_first() const
find_first - Returns the index of the first set bit, -1 if none of the bits are set.
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
instr_iterator instr_end()
void mapLocalFrameObject(int ObjectIndex, int64_t Offset)
Map a frame index into the local object block.
static cl::opt< int > SpillFuncThresholdOs("spill-func-threshold-Os", cl::Hidden, cl::desc("Specify Os spill func threshold"), cl::init(1), cl::ZeroOrMore)
bool isValid() const
isValid - returns true if this iterator is not yet at the end.
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
int getNumber() const
MachineBasicBlocks are uniquely numbered at the function level, unless they're not in a MachineFuncti...
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const override
Store the specified register of the given register class to the specified stack frame index...
unsigned createVirtualRegister(const TargetRegisterClass *RegClass)
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
MachineBasicBlock * findNearestCommonDominator(MachineBasicBlock *A, MachineBasicBlock *B)
bool hasSubClassEq(const TargetRegisterClass *RC) const
Returns true if RC is a sub-class of or equal to this class.
int find_next(unsigned Prev) const
find_next - Returns the index of the next set bit following the "Prev" bit.
static Optional< MachineBasicBlock::iterator > findCFILocation(MachineBasicBlock &B)
iterator getFirstTerminator()
Returns an iterator to the first terminator instruction of this basic block.
Describe properties that are true of each instruction in the target description file.
bool mayStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly modify memory.
static MCCFIInstruction createOffset(MCSymbol *L, unsigned Register, int Offset)
.cfi_offset Previous value of Register is saved at offset Offset from CFA.
static bool isVirtualRegister(unsigned Reg)
Return true if the specified register number is in the virtual register namespace.
static void dump_registers(BitVector &Regs, const TargetRegisterInfo &TRI)
static RegisterSet expandToSubRegs(RegisterRef R, const MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI)
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
static const MCPhysReg VRegs[32]
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
Hexagon target-specific information for each MachineFunction.
const SpillSlot * getCalleeSavedSpillSlots(unsigned &NumEntries) const override
getCalleeSavedSpillSlots - This method returns a pointer to an array of pairs, that contains an entry...
const Function * getFunction() const
getFunction - Return the LLVM function that this machine code represents
static const char * getSpillFunctionFor(unsigned MaxReg, SpillKind SpillType, bool Stkchk=false)
uint64_t alignTo(uint64_t Value, uint64_t Align, uint64_t Skew=0)
Returns the next integer (mod 2**64) that is greater than or equal to Value and is a multiple of Alig...
static unsigned SpillOptCount
static cl::opt< int > SpillFuncThreshold("spill-func-threshold", cl::Hidden, cl::desc("Specify O2(not Os) spill func threshold"), cl::init(6), cl::ZeroOrMore)
iterator_range< mop_iterator > operands()
static cl::opt< unsigned > SpillOptMax("spill-opt-max", cl::Hidden, cl::init(std::numeric_limits< unsigned >::max()))
size_type size() const
Determine the number of elements in the SetVector.
const MachineInstrBuilder & addCFIIndex(unsigned CFIIndex) const
bool optForSize() const
Optimize this function for size (-Os) or minimum size (-Oz).
bool isObjectPreAllocated(int ObjectIdx) const
Return true if the object was pre-allocated into the local block.
void setStackAlignBasePhysReg(unsigned R)
bool optForMinSize() const
Optimize this function for minimum size (-Oz).
return AArch64::GPR64RegClass contains(Reg)
static cl::opt< bool > UseAllocframe("use-allocframe", cl::init(true), cl::Hidden, cl::desc("Use allocframe more conservatively"))
iterator_range< succ_iterator > successors()
StringRef getName() const
Return a constant reference to the value's name.
INITIALIZE_PASS(HexagonCallFrameInformation,"hexagon-cfi","Hexagon call frame information", false, false) FunctionPass *llvm
unsigned getMaxAlignment() const
Return the alignment in bytes that this function must be aligned to, which is greater than the defaul...
const std::vector< CalleeSavedInfo > & getCalleeSavedInfo() const
Returns a reference to call saved info vector for the current function.
static MCDisassembler::DecodeStatus addOperand(MCInst &Inst, const MCOperand &Opnd)
uint64_t getStackSize() const
Return the number of bytes that must be allocated to hold all of the fixed size frame objects...
int64_t getLocalFrameSize() const
Get the size of the local object blob.
unsigned getSize() const
Return the size of the register in bytes, which is also the size of a stack slot allocated to hold a ...
void add(IndexType Start, IndexType End, bool Fixed, bool TiedEnd)
A description of a memory reference used in the backend.
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned DestReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const override
Load the specified register of the given register class from the specified stack frame index...
void setLocalFrameSize(int64_t sz)
Set the size of the local object blob.
unsigned getNumBlockIDs() const
getNumBlockIDs - Return the number of MBB ID's allocated.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
void setUseLocalStackAllocationBlock(bool v)
setUseLocalStackAllocationBlock - Set whether the local allocation blob should be allocated together ...
MCSuperRegIterator enumerates all super-registers of Reg.
bool isPhysRegUsed(unsigned PhysReg) const
Return true if the specified register is modified or read in this function.
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
static MachineOperand CreateReg(unsigned Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false)
MachineBasicBlock::iterator eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator I) const override
This method is called during prolog/epilog code insertion to eliminate call frame setup and destroy p...
bool runOnMachineFunction(MachineFunction &MF) override
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
const HexagonRegisterInfo * getRegisterInfo() const override
ArrayRef< MCPhysReg > getRawAllocationOrder(const MachineFunction &MF) const
Returns the preferred order for allocating registers from this register class in MF.
static cl::opt< bool > EnableStackOVFSanitizer("enable-stackovf-sanitizer", cl::Hidden, cl::desc("Enable runtime checks for stack overflow."), cl::init(false), cl::ZeroOrMore)
bool mayLoad(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read memory.
const TargetRegisterClass * getRegClass(unsigned Reg) const
Return the register class of the specified virtual register.
Reg
All possible values of the reg field in the ModR/M byte.
static cl::opt< bool > EnableShrinkWrapping("hexagon-shrink-frame", cl::init(true), cl::Hidden, cl::ZeroOrMore, cl::desc("Enable stack frame shrink wrapping"))
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted...
bool isFixedObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to a fixed stack object.
static unsigned getMax32BitSubRegister(unsigned Reg, const TargetRegisterInfo &TRI, bool hireg=true)
Map a register pair Reg to the subregister that has the greater "number", i.e.
bool hasDebugInfo() const
Returns true if valid debug info is present.
LLVM_NODISCARD unsigned addFrameInst(const MCCFIInstruction &Inst)
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
LLVM_NODISCARD bool empty() const
const MachineBasicBlock & front() const
void copyImplicitOps(MachineFunction &MF, const MachineInstr &MI)
Copy implicit register operands from specified instruction to this instruction.
bool insert(const value_type &X)
Insert a new element into the SetVector.
bool isFI() const
isFI - Tests if this is a MO_FrameIndex operand.
static bool isMinSize(const MachineFunction &MF)
unsigned getLocalFrameMaxAlign() const
Return the required alignment of the local object blob.
static bool hasTailCall(const MachineBasicBlock &MBB)
Returns true if MBB has a machine instructions that indicates a tail call in the block.
void setLocalFrameMaxAlign(unsigned Align)
Required alignment of the local object blob, which is the strictest alignment of any object in it...
iterator getLastNonDebugInstr()
Returns an iterator to the last non-debug instruction in the basic block, or end().
bool needsAligna(const MachineFunction &MF) const
static GCRegistry::Add< OcamlGC > B("ocaml","ocaml 3.10-compatible GC")
void determineCalleeSaves(MachineFunction &MF, BitVector &SavedRegs, RegScavenger *RS) const override
This method determines which of the registers reported by TargetRegisterInfo::getCalleeSavedRegs() sh...
unsigned getNumFixedObjects() const
Return the number of fixed objects.
MachineBasicBlock * findNearestCommonDominator(MachineBasicBlock *A, MachineBasicBlock *B)
findNearestCommonDominator - Find nearest common dominator basic block for basic block A and B...
CodeGenOpt::Level getOptLevel() const
Returns the optimization level: None, Less, Default, or Aggressive.
static bool needsStackFrame(const MachineBasicBlock &MBB, const BitVector &CSR, const HexagonRegisterInfo &HRI)
Checks if the basic block contains any instruction that needs a stack frame to be already in place...
void emitPrologue(MachineFunction &MF, MachineBasicBlock &MBB) const override
Perform most of the PEI work here:
Printable PrintReg(unsigned Reg, const TargetRegisterInfo *TRI=nullptr, unsigned SubRegIdx=0)
Prints virtual and physical registers with or without a TRI instance.
Maximum length of the test input libFuzzer tries to guess a good value based on the corpus and reports it always prefer smaller inputs during the corpus shuffle When libFuzzer itself reports a bug this exit code will be used If indicates the maximal total time in seconds to run the fuzzer minimizes the provided crash input Use with etc Experimental Use value profile to guide fuzzing Number of simultaneous worker processes to run the jobs If min(jobs, NumberOfCpuCores()/2)\" is used.") FUZZER_FLAG_INT(reload
unsigned getKillRegState(bool B)
unsigned estimateStackSize(const MachineFunction &MF) const
Estimate and return the size of the stack frame.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
static GCRegistry::Add< CoreCLRGC > E("coreclr","CoreCLR-compatible GC")
bool isSpillSlotObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to a spill slot.
const MachineBasicBlock * getParent() const
mmo_iterator memoperands_end() const
INITIALIZE_PASS(HexagonEarlyIfConversion,"hexagon-eif","Hexagon early if conversion", false, false) bool HexagonEarlyIfConversion MachineBasicBlock * SB
constexpr uint64_t MinAlign(uint64_t A, uint64_t B)
MinAlign - A and B are either alignments or offsets.
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
static bool needToReserveScavengingSpillSlots(MachineFunction &MF, const HexagonRegisterInfo &HRI, const TargetRegisterClass *RC)
Returns true if there are no caller-saved registers available in class RC.
static bool isOptSize(const MachineFunction &MF)
initializer< Ty > init(const Ty &Val)
static cl::opt< unsigned > NumberScavengerSlots("number-scavenger-slots", cl::Hidden, cl::desc("Set the number of scavenger slots"), cl::init(2), cl::ZeroOrMore)
bool isReturn(QueryType Type=AnyInBundle) const
const MachineInstrBuilder & setMemRefs(MachineInstr::mmo_iterator b, MachineInstr::mmo_iterator e) const
void addLiveIn(MCPhysReg PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
This file declares the machine register scavenger class.
MCSymbol * createTempSymbol(bool CanBeUnnamed=true)
Create and return a new assembler temporary symbol with a unique but unspecified name.
unsigned const MachineRegisterInfo * MRI
static MCCFIInstruction createDefCfa(MCSymbol *L, unsigned Register, int Offset)
.cfi_def_cfa defines a rule for computing CFA as: take address from Register and add Offset to it...
constexpr bool isPowerOf2_32(uint32_t Value)
isPowerOf2_32 - This function returns true if the argument is a power of two > 0. ...
static cl::opt< bool > DisableDeallocRet("disable-hexagon-dealloc-ret", cl::Hidden, cl::desc("Disable Dealloc Return for Hexagon target"))
bool isEHReturnCalleeSaveReg(unsigned Reg) const
unsigned getAlignment() const
Return the minimum required alignment for a register of this class.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
void setStackSize(uint64_t Size)
Set the size of the stack.
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
const MachineOperand & getOperand(unsigned i) const
void insertCFIInstructions(MachineFunction &MF) const
MCRegAliasIterator enumerates all registers aliasing Reg.
bool isPositionIndependent() const
static const unsigned End
void processFunctionBeforeFrameFinalized(MachineFunction &MF, RegScavenger *RS=nullptr) const override
processFunctionBeforeFrameFinalized - This method is called immediately before the specified function...
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
FunctionPass class - This class is used to implement most global optimizations.
self_iterator getIterator()
std::pair< NoneType, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
unsigned getSubReg() const
int CreateSpillStackObject(uint64_t Size, unsigned Alignment)
Create a new statically sized stack object that represents a spill slot, returning a nonnegative iden...
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
MCSubRegIterator enumerates all sub-registers of Reg.
This class contains a discriminated union of information about pointers in memory operands...
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
bool dominates(const MachineDomTreeNode *A, const MachineDomTreeNode *B) const
static MachineInstr * getReturn(MachineBasicBlock &MBB)
Returns the "return" instruction from this block, or nullptr if there isn't any.
static bool hasReturn(const MachineBasicBlock &MBB)
Returns true if MBB contains an instruction that returns.
int64_t getObjectOffset(int ObjectIdx) const
Return the assigned stack offset of the specified object from the incoming stack pointer.
bool isDeadObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to a dead object.
virtual void determineCalleeSaves(MachineFunction &MF, BitVector &SavedRegs, RegScavenger *RS=nullptr) const
This method determines which of the registers reported by TargetRegisterInfo::getCalleeSavedRegs() sh...
The memory access writes data.
size_type count(const T &V) const
count - Return 1 if the element is in the set, 0 otherwise.
bool hasCalls() const
Return true if the current function has any function calls.
static cl::opt< unsigned > ShrinkLimit("shrink-frame-limit", cl::init(std::numeric_limits< unsigned >::max()), cl::Hidden, cl::ZeroOrMore, cl::desc("Max count of stack frame shrink-wraps"))
void addOperand(MachineFunction &MF, const MachineOperand &Op)
Add the specified operand to the instruction.
const MCContext & getContext() const
PostDominatorTree Class - Concrete subclass of DominatorTree that is used to compute the post-dominat...
void addScavengingFrameIndex(int FI)
Add a scavenging frame index.
MachineOperand class - Representation of each machine instruction operand.
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small...
unsigned getObjectAlignment(int ObjectIdx) const
Return the alignment of the specified stack object.
unsigned getMaxCallFrameSize() const
Return the maximum size of a call frame that must be allocated for an outgoing function call...
MachineBasicBlock * getBlockNumbered(unsigned N) const
getBlockNumbered - MachineBasicBlocks are automatically numbered when they are inserted into the mach...
bool needsUnwindTableEntry() const
True if this function needs an unwind table.
instr_iterator getInstrIterator() const
int getFrameIndexReference(const MachineFunction &MF, int FI, unsigned &FrameReg) const override
getFrameIndexReference - This method should return the base register and offset used to reference a f...
bool assignCalleeSavedSpillSlots(MachineFunction &MF, const TargetRegisterInfo *TRI, std::vector< CalleeSavedInfo > &CSI) const override
assignCalleeSavedSpillSlots - Allows target to override spill slot assignment logic.
static GCRegistry::Add< ShadowStackGC > C("shadow-stack","Very portable GC for uncooperative code generators")
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
The CalleeSavedInfo class tracks the information need to locate where a callee saved register is in t...
static cl::opt< bool > EnableSaveRestoreLong("enable-save-restore-long", cl::Hidden, cl::desc("Enable long calls for save-restore stubs."), cl::init(false), cl::ZeroOrMore)
virtual BitVector getReservedRegs(const MachineFunction &MF) const =0
Returns a bitset indexed by physical register number indicating if a register is a special register t...
static unsigned getReg(const void *D, unsigned RC, unsigned RegNo)
const TargetRegisterClass * getMinimalPhysRegClass(unsigned Reg, MVT VT=MVT::Other) const
Returns the Register Class of a physical register of the given type, picking the most sub register cl...
FunctionPass * createHexagonCallFrameInformation()
bool runOnMachineFunction(MachineFunction &F) override
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
MachineRegisterInfo - Keep track of information for virtual and physical registers, including vreg register classes, use/def chains for registers, etc.
MachineFunctionProperties & set(Property P)
Representation of each machine instruction.
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
static bool isRestoreCall(unsigned Opc)
std::map< RegisterRef, RangeList > RegToRangeMap
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
A set of live physical registers with functions to track liveness when walking backward/forward throu...
IndexType getIndex(MachineInstr *MI) const
static cl::opt< bool > OptimizeSpillSlots("hexagon-opt-spill", cl::Hidden, cl::init(true), cl::desc("Optimize spill slots"))
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
void setMaxCallFrameSize(unsigned S)
bool isCall(QueryType Type=AnyInBundle) const
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
bool hasVarSizedObjects() const
This method may be called any time after instruction selection is complete to determine if the stack ...
bool isLiveIn(MCPhysReg Reg, LaneBitmask LaneMask=LaneBitmask::getAll()) const
Return true if the specified register is in the live in set.
static int const Threshold
TODO: Write a new FunctionPass AliasAnalysis so that it can keep a cache.
unsigned getReg() const
getReg - Returns the register number.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
const HexagonInstrInfo * getInstrInfo() const override
rpo Deduce function attributes in RPO
A vector that has set insertion semantics.
static bool isOptNone(const MachineFunction &MF)
bool hasFP(const MachineFunction &MF) const override
hasFP - Return true if the specified function should have a dedicated frame pointer register...
MachineModuleInfo & getMMI() const
PassRegistry - This class manages the registration and intitialization of the pass subsystem as appli...
int getObjectIndexEnd() const
Return one past the maximum frame object index.
void initializeHexagonCallFrameInformationPass(PassRegistry &)
auto find_if(R &&Range, UnaryPredicate P) -> decltype(std::begin(Range))
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly...
bool dominates(const MachineDomTreeNode *A, const MachineDomTreeNode *B) const
unsigned getStackAlignBasePhysReg() const
const MachineInstrBuilder & addReg(unsigned RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
static GCRegistry::Add< ErlangGC > A("erlang","erlang-compatible garbage collector")
void setObjectAlignment(int ObjectIdx, unsigned Align)
setObjectAlignment - Change the alignment of the specified stack object.
const MachineInstr * getAlignaInstr(const MachineFunction &MF) const
static unsigned getMaxCalleeSavedReg(const std::vector< CalleeSavedInfo > &CSI, const TargetRegisterInfo &TRI)
Returns the callee saved register with the largest id in the vector.
DominatorTree Class - Concrete subclass of DominatorTreeBase that is used to compute a normal dominat...
Statically lint checks LLVM IR
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
Properties which a MachineFunction may have at a given point in time.
This class contains meta information specific to a module.
mmo_iterator memoperands_begin() const
Access to memory operands of the instruction.