27#define GET_REGINFO_TARGET_DESC
28#include "RISCVGenRegisterInfo.inc"
37 cl::desc(
"Disable two address hints for register "
40static_assert(RISCV::X1 == RISCV::X0 + 1,
"Register list not consecutive");
41static_assert(RISCV::X31 == RISCV::X0 + 31,
"Register list not consecutive");
42static_assert(RISCV::F1_H == RISCV::F0_H + 1,
"Register list not consecutive");
43static_assert(RISCV::F31_H == RISCV::F0_H + 31,
44 "Register list not consecutive");
45static_assert(RISCV::F1_F == RISCV::F0_F + 1,
"Register list not consecutive");
46static_assert(RISCV::F31_F == RISCV::F0_F + 31,
47 "Register list not consecutive");
48static_assert(RISCV::F1_D == RISCV::F0_D + 1,
"Register list not consecutive");
49static_assert(RISCV::F31_D == RISCV::F0_D + 31,
50 "Register list not consecutive");
51static_assert(RISCV::F1_Q == RISCV::F0_Q + 1,
"Register list not consecutive");
52static_assert(RISCV::F31_Q == RISCV::F0_Q + 31,
53 "Register list not consecutive");
54static_assert(RISCV::V1 == RISCV::V0 + 1,
"Register list not consecutive");
55static_assert(RISCV::V31 == RISCV::V0 + 31,
"Register list not consecutive");
63 return CSR_IPRA_SaveList;
70 return CSR_NoRegs_SaveList;
72 return Subtarget.hasStdExtE() ? CSR_RT_MostRegs_RVE_SaveList
73 : CSR_RT_MostRegs_SaveList;
75 if (Subtarget.hasVInstructions()) {
76 if (Subtarget.hasStdExtD())
77 return Subtarget.hasStdExtE() ? CSR_XLEN_F64_V_Interrupt_RVE_SaveList
78 : CSR_XLEN_F64_V_Interrupt_SaveList;
79 if (Subtarget.hasStdExtF())
80 return Subtarget.hasStdExtE() ? CSR_XLEN_F32_V_Interrupt_RVE_SaveList
81 : CSR_XLEN_F32_V_Interrupt_SaveList;
82 return Subtarget.hasStdExtE() ? CSR_XLEN_V_Interrupt_RVE_SaveList
83 : CSR_XLEN_V_Interrupt_SaveList;
85 if (Subtarget.hasStdExtD())
86 return Subtarget.hasStdExtE() ? CSR_XLEN_F64_Interrupt_RVE_SaveList
87 : CSR_XLEN_F64_Interrupt_SaveList;
88 if (Subtarget.hasStdExtF())
89 return Subtarget.hasStdExtE() ? CSR_XLEN_F32_Interrupt_RVE_SaveList
90 : CSR_XLEN_F32_Interrupt_SaveList;
91 return Subtarget.hasStdExtE() ? CSR_Interrupt_RVE_SaveList
92 : CSR_Interrupt_SaveList;
97 Subtarget.hasVInstructions();
99 switch (Subtarget.getTargetABI()) {
104 return CSR_ILP32E_LP64E_SaveList;
108 return CSR_ILP32_LP64_V_SaveList;
109 return CSR_ILP32_LP64_SaveList;
113 return CSR_ILP32F_LP64F_V_SaveList;
114 return CSR_ILP32F_LP64F_SaveList;
118 return CSR_ILP32D_LP64D_V_SaveList;
119 return CSR_ILP32D_LP64D_SaveList;
128 for (
size_t Reg = 0; Reg < getNumRegs(); Reg++) {
130 if (Subtarget.isRegisterReservedByUser(Reg))
134 if (isConstantPhysReg(Reg))
139 markSuperRegs(
Reserved, RISCV::X2_H);
140 markSuperRegs(
Reserved, RISCV::X3_H);
141 markSuperRegs(
Reserved, RISCV::X4_H);
143 markSuperRegs(
Reserved, RISCV::X8_H);
151 markSuperRegs(
Reserved, RISCV::DUMMY_REG_PAIR_WITH_X0);
154 if (Subtarget.hasStdExtE())
155 for (
MCPhysReg Reg = RISCV::X16_H; Reg <= RISCV::X31_H; Reg++)
160 markSuperRegs(
Reserved, RISCV::VTYPE);
161 markSuperRegs(
Reserved, RISCV::VXSAT);
162 markSuperRegs(
Reserved, RISCV::VXRM);
165 markSuperRegs(
Reserved, RISCV::FRM);
166 markSuperRegs(
Reserved, RISCV::FFLAGS);
169 markSuperRegs(
Reserved, RISCV::SF_VCIX_STATE);
172 if (Subtarget.hasStdExtE())
174 markSuperRegs(
Reserved, RISCV::X23_H);
175 markSuperRegs(
Reserved, RISCV::X27_H);
179 markSuperRegs(
Reserved, RISCV::SSP);
182 for (
MCPhysReg Reg = RISCV::T0; Reg <= RISCV::T15; Reg++)
195 return CSR_NoRegs_RegMask;
205 if (DestReg == SrcReg && !
Offset.getFixed() && !
Offset.getScalable())
214 if (
Offset.getScalable()) {
215 if (
auto VLEN = ST.getRealVLen()) {
217 const int64_t VLENB = *VLEN / 8;
219 "Reserve the stack by the multiple of one vector size.");
220 const int64_t NumOfVReg =
Offset.getScalable() / 8;
221 const int64_t FixedOffset = NumOfVReg * VLENB;
224 "Frame size outside of the signed 32-bit range not supported");
230 bool KillSrcReg =
false;
232 if (
Offset.getScalable()) {
233 unsigned ScalableAdjOpc = RISCV::ADD;
234 int64_t ScalableValue =
Offset.getScalable();
235 if (ScalableValue < 0) {
236 ScalableValue = -ScalableValue;
237 ScalableAdjOpc = RISCV::SUB;
241 if (DestReg == SrcReg)
242 ScratchReg =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
244 assert(ScalableValue > 0 &&
"There is no need to get VLEN scaled value.");
246 "Reserve the stack by the multiple of one vector size.");
248 "Expect the number of vector registers within 32-bits.");
252 bool IsPrologueOrEpilogue =
254 bool UseVsetvliRatherThanVlenb =
255 IsPrologueOrEpilogue && ST.preferVsetvliOverReadVLENB();
256 if (UseVsetvliRatherThanVlenb && (NumOfVReg == 1 || NumOfVReg == 2 ||
257 NumOfVReg == 4 || NumOfVReg == 8)) {
267 if (UseVsetvliRatherThanVlenb)
276 if (ScalableAdjOpc == RISCV::ADD && ST.hasStdExtZba() &&
277 (NumOfVReg == 2 || NumOfVReg == 4 || NumOfVReg == 8)) {
278 unsigned Opc = NumOfVReg == 2
280 : (NumOfVReg == 4 ? RISCV::SH2ADD : RISCV::SH3ADD);
286 TII->mulImm(MF,
MBB,
II,
DL, ScratchReg, NumOfVReg, Flag);
297 int64_t Val =
Offset.getFixed();
298 if (DestReg == SrcReg && Val == 0)
313 if (ST.hasVendorXqcilia() &&
isInt<26>(Val)) {
317 int Hi20 = (Val & 0xFFFFF000) >> 12;
319 ((Val & 0xFFF) == 0) && (Hi20 != 0) &&
320 (
isUInt<5>(Hi20) || (Hi20 >= 0xfffe0 && Hi20 <= 0xfffff));
321 bool IsCompressAddSub =
322 (SrcReg == DestReg) &&
323 ((Val > 0 && RISCV::GPRNoX0RegClass.
contains(SrcReg)) ||
324 (Val < 0 && RISCV::GPRCRegClass.
contains(SrcReg)));
326 if (!(IsCompressLUI && IsCompressAddSub)) {
341 assert(
Align < 2048 &&
"Required alignment too large");
342 int64_t MaxPosAdjStep = 2048 -
Align;
343 if (Val > -4096 && Val <= (2 * MaxPosAdjStep)) {
344 int64_t FirstAdj = Val < 0 ? -2048 : MaxPosAdjStep;
362 if (ST.hasStdExtZba() && (Val & 0xFFF) != 0) {
372 Register ScratchReg =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
373 TII->movImm(
MBB,
II,
DL, ScratchReg, Val, Flag);
382 unsigned Opc = RISCV::ADD;
388 Register ScratchReg =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
389 TII->movImm(
MBB,
II,
DL, ScratchReg, Val, Flag);
396static std::tuple<RISCVVType::VLMUL, const TargetRegisterClass &, unsigned>
398 if (NumRemaining >= 8 && RegEncoding % 8 == 0)
400 IsSpill ? RISCV::VS8R_V : RISCV::VL8RE8_V};
401 if (NumRemaining >= 4 && RegEncoding % 4 == 0)
403 IsSpill ? RISCV::VS4R_V : RISCV::VL4RE8_V};
404 if (NumRemaining >= 2 && RegEncoding % 2 == 0)
406 IsSpill ? RISCV::VS2R_V : RISCV::VL2RE8_V};
408 IsSpill ? RISCV::VS1R_V : RISCV::VL1RE8_V};
414 bool IsSpill)
const {
424 unsigned NF = ZvlssegInfo->first;
425 unsigned LMUL = ZvlssegInfo->second;
426 unsigned NumRegs = NF * LMUL;
427 assert(NumRegs <= 8 &&
"Invalid NF/LMUL combinations.");
432 bool IsBaseKill =
II->getOperand(1).isKill();
433 Register NewBase =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
435 auto *OldMMO = *(
II->memoperands_begin());
441 unsigned VLENBShift = 0;
442 unsigned PrevHandledNum = 0;
444 while (
I != NumRegs) {
445 auto [LMulHandled, RegClass, Opcode] =
448 bool IsLast =
I + RegNumHandled == NumRegs;
449 if (PrevHandledNum) {
453 int64_t
Offset = *VLEN / 8 * PrevHandledNum;
454 Step =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
458 VLENB =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
465 if (VLENBShift > ShiftAmount) {
468 .
addImm(VLENBShift - ShiftAmount);
469 }
else if (VLENBShift < ShiftAmount) {
472 .
addImm(ShiftAmount - VLENBShift);
474 VLENBShift = ShiftAmount;
490 VRegSize * RegNumHandled));
499 PrevHandledNum = RegNumHandled;
500 RegEncoding += RegNumHandled;
503 II->eraseFromParent();
507 int SPAdj,
unsigned FIOperandNum,
509 assert(SPAdj == 0 &&
"Unexpected non-zero SPAdj value");
516 int FrameIndex =
MI.getOperand(FIOperandNum).getIndex();
519 getFrameLowering(MF)->getFrameIndexReference(MF, FrameIndex, FrameReg);
526 "Frame offsets outside of the signed 32-bit range not supported");
530 int64_t Val =
Offset.getFixed();
532 unsigned Opc =
MI.getOpcode();
540 MI.getOperand(FIOperandNum + 1).ChangeToImmediate(0);
541 }
else if ((
Opc == RISCV::PREFETCH_I ||
Opc == RISCV::PREFETCH_R ||
542 Opc == RISCV::PREFETCH_W) &&
543 (Lo12 & 0b11111) != 0) {
545 MI.getOperand(FIOperandNum + 1).ChangeToImmediate(0);
546 }
else if (
Opc == RISCV::MIPS_PREF && !
isUInt<9>(Val)) {
548 MI.getOperand(FIOperandNum + 1).ChangeToImmediate(0);
549 }
else if ((
Opc == RISCV::PseudoRV32ZdinxLD ||
550 Opc == RISCV::PseudoRV32ZdinxSD) &&
555 MI.getOperand(FIOperandNum + 1).ChangeToImmediate(0);
560 MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Lo12);
568 if (
MI.getOpcode() == RISCV::ADDI)
569 DestReg =
MI.getOperand(0).getReg();
571 DestReg =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
574 MI.getOperand(FIOperandNum).ChangeToRegister(DestReg,
false,
578 MI.getOperand(FIOperandNum).ChangeToRegister(FrameReg,
false,
584 if (
MI.getOpcode() == RISCV::ADDI &&
585 MI.getOperand(0).getReg() ==
MI.getOperand(1).getReg() &&
586 MI.getOperand(2).getImm() == 0) {
587 MI.eraseFromParent();
593 switch (
MI.getOpcode()) {
594 case RISCV::PseudoVSPILL2_M1:
595 case RISCV::PseudoVSPILL2_M2:
596 case RISCV::PseudoVSPILL2_M4:
597 case RISCV::PseudoVSPILL3_M1:
598 case RISCV::PseudoVSPILL3_M2:
599 case RISCV::PseudoVSPILL4_M1:
600 case RISCV::PseudoVSPILL4_M2:
601 case RISCV::PseudoVSPILL5_M1:
602 case RISCV::PseudoVSPILL6_M1:
603 case RISCV::PseudoVSPILL7_M1:
604 case RISCV::PseudoVSPILL8_M1:
607 case RISCV::PseudoVRELOAD2_M1:
608 case RISCV::PseudoVRELOAD2_M2:
609 case RISCV::PseudoVRELOAD2_M4:
610 case RISCV::PseudoVRELOAD3_M1:
611 case RISCV::PseudoVRELOAD3_M2:
612 case RISCV::PseudoVRELOAD4_M1:
613 case RISCV::PseudoVRELOAD4_M2:
614 case RISCV::PseudoVRELOAD5_M1:
615 case RISCV::PseudoVRELOAD6_M1:
616 case RISCV::PseudoVRELOAD7_M1:
617 case RISCV::PseudoVRELOAD8_M1:
636 unsigned FIOperandNum = 0;
637 for (; !
MI->getOperand(FIOperandNum).isFI(); FIOperandNum++)
638 assert(FIOperandNum < MI->getNumOperands() &&
639 "Instr doesn't have FrameIndex operand");
648 if (!
MI->mayLoad() && !
MI->mayStore())
656 if (TFI->
hasFP(MF) && !shouldRealignStack(MF)) {
660 unsigned CalleeSavedSize = 0;
663 if (Subtarget.isRegisterReservedByUser(Reg))
666 if (RISCV::GPRRegClass.
contains(Reg))
667 CalleeSavedSize += getSpillSize(RISCV::GPRRegClass);
668 else if (RISCV::FPR64RegClass.
contains(Reg))
669 CalleeSavedSize += getSpillSize(RISCV::FPR64RegClass);
670 else if (RISCV::FPR32RegClass.
contains(Reg))
671 CalleeSavedSize += getSpillSize(RISCV::FPR32RegClass);
675 int64_t MaxFPOffset =
Offset - CalleeSavedSize;
683 int64_t MaxSPOffset =
Offset + 128;
693 unsigned FIOperandNum = 0;
694 while (!
MI->getOperand(FIOperandNum).isFI()) {
696 assert(FIOperandNum < MI->getNumOperands() &&
697 "Instr does not have a FrameIndex operand!");
729 unsigned FIOperandNum = 0;
730 while (!
MI.getOperand(FIOperandNum).isFI()) {
732 assert(FIOperandNum <
MI.getNumOperands() &&
733 "Instr does not have a FrameIndex operand!");
739 MI.getOperand(FIOperandNum).ChangeToRegister(BaseReg,
false);
740 MI.getOperand(FIOperandNum + 1).ChangeToImmediate(
Offset);
749 "The MI must be I or S format.");
750 assert(
MI->getOperand(Idx).isFI() &&
"The Idx'th operand of MI is not a "
751 "FrameIndex operand");
752 return MI->getOperand(Idx + 1).getImm();
757 return TFI->
hasFP(MF) ? RISCV::X8 : RISCV::X2;
761 if (Reg == RISCV::SF_VCIX_STATE)
762 return "sf.vcix_state";
772 return CSR_NoRegs_RegMask;
776 return CSR_RT_MostRegs_RVE_RegMask;
777 return CSR_RT_MostRegs_RegMask;
784 return CSR_ILP32E_LP64E_RegMask;
788 return CSR_ILP32_LP64_V_RegMask;
789 return CSR_ILP32_LP64_RegMask;
793 return CSR_ILP32F_LP64F_V_RegMask;
794 return CSR_ILP32F_LP64F_RegMask;
798 return CSR_ILP32D_LP64D_V_RegMask;
799 return CSR_ILP32D_LP64D_RegMask;
806 if (RC == &RISCV::VMV0RegClass)
807 return &RISCV::VRRegClass;
808 if (RC == &RISCV::VRNoV0RegClass)
809 return &RISCV::VRRegClass;
810 if (RC == &RISCV::VRM2NoV0RegClass)
811 return &RISCV::VRM2RegClass;
812 if (RC == &RISCV::VRM4NoV0RegClass)
813 return &RISCV::VRM4RegClass;
814 if (RC == &RISCV::VRM8NoV0RegClass)
815 return &RISCV::VRM8RegClass;
824 assert(
Offset.getScalable() % 8 == 0 &&
"Invalid frame offset");
830 int64_t VLENBSized =
Offset.getScalable() / 8;
831 if (VLENBSized > 0) {
832 Ops.push_back(dwarf::DW_OP_constu);
833 Ops.push_back(VLENBSized);
834 Ops.append({dwarf::DW_OP_bregx, VLENB, 0ULL});
835 Ops.push_back(dwarf::DW_OP_mul);
836 Ops.push_back(dwarf::DW_OP_plus);
837 }
else if (VLENBSized < 0) {
838 Ops.push_back(dwarf::DW_OP_constu);
839 Ops.push_back(-VLENBSized);
840 Ops.append({dwarf::DW_OP_bregx, VLENB, 0ULL});
841 Ops.push_back(dwarf::DW_OP_mul);
842 Ops.push_back(dwarf::DW_OP_minus);
855 return getRegClassWeight(RC).RegWeight;
868 std::pair<unsigned, Register> Hint =
MRI->getRegAllocationHint(VirtReg);
869 unsigned HintType = Hint.first;
880 MCRegister TargetReg = PartnerPhys.
id() + (WantOdd ? 1 : -1);
883 if (RISCV::GPRRegClass.
contains(TargetReg) &&
894 unsigned RegNum = getEncodingValue(PhysReg);
896 bool IsOdd = (RegNum % 2 != 0);
902 if ((WantOdd && IsOdd) || (!WantOdd && !IsOdd))
908 VirtReg, Order, Hints, MF, VRM,
Matrix);
911 return BaseImplRetVal;
917 bool NeedGPRC) ->
void {
923 if (PhysReg && (!NeedGPRC || RISCV::GPRCRegClass.
contains(PhysReg)) &&
924 !MO.getSubReg() && !VRRegMO.
getSubReg()) {
926 TwoAddrHints.
insert(PhysReg);
932 auto isCompressible = [&Subtarget](
const MachineInstr &
MI,
bool &NeedGPRC) {
934 switch (
MI.getOpcode()) {
947 if (!
MI.getOperand(2).isImm())
949 int64_t Imm =
MI.getOperand(2).getImm();
953 return Subtarget.hasStdExtZcb() && Imm == 255;
964 return MI.getOperand(2).isImm() &&
isInt<6>(
MI.getOperand(2).getImm());
968 case RISCV::ZEXT_H_RV32:
969 case RISCV::ZEXT_H_RV64:
972 return Subtarget.hasStdExtZcb();
976 return Subtarget.hasStdExtZcb() &&
MI.getOperand(2).isReg() &&
977 MI.getOperand(2).getReg() == RISCV::X0;
981 return Subtarget.hasStdExtZcb() &&
MI.getOperand(2).isImm() &&
982 MI.getOperand(2).getImm() == -1;
995 return PhysReg && RISCV::GPRCRegClass.contains(PhysReg);
998 for (
auto &MO :
MRI->reg_nodbg_operands(VirtReg)) {
1000 unsigned OpIdx = MO.getOperandNo();
1002 if (isCompressible(
MI, NeedGPRC)) {
1003 if (
OpIdx == 0 &&
MI.getOperand(1).isReg()) {
1004 if (!NeedGPRC ||
MI.getNumExplicitOperands() < 3 ||
1005 MI.getOpcode() == RISCV::ADD_UW ||
1006 isCompressibleOpnd(
MI.getOperand(2)))
1007 tryAddHint(MO,
MI.getOperand(1), NeedGPRC);
1008 if (
MI.isCommutable() &&
MI.getOperand(2).isReg() &&
1009 (!NeedGPRC || isCompressibleOpnd(
MI.getOperand(1))))
1010 tryAddHint(MO,
MI.getOperand(2), NeedGPRC);
1011 }
else if (
OpIdx == 1 && (!NeedGPRC ||
MI.getNumExplicitOperands() < 3 ||
1012 isCompressibleOpnd(
MI.getOperand(2)))) {
1013 tryAddHint(MO,
MI.getOperand(0), NeedGPRC);
1014 }
else if (
MI.isCommutable() &&
OpIdx == 2 &&
1015 (!NeedGPRC || isCompressibleOpnd(
MI.getOperand(1)))) {
1016 tryAddHint(MO,
MI.getOperand(0), NeedGPRC);
1023 if ((
MI.getOpcode() == RISCV::ADDIW ||
MI.getOpcode() == RISCV::ADDI) &&
1024 MI.getOperand(1).isReg()) {
1028 if (
I !=
MBB.begin()) {
1030 if ((
I->getOpcode() == RISCV::LUI ||
I->getOpcode() == RISCV::AUIPC) &&
1031 I->getOperand(0).getReg() ==
MI.getOperand(1).getReg()) {
1033 tryAddHint(MO,
MI.getOperand(1),
false);
1035 tryAddHint(MO,
MI.getOperand(0),
false);
1042 if (TwoAddrHints.
count(OrderReg))
1045 return BaseImplRetVal;
1051 std::pair<unsigned, Register> Hint =
MRI->getRegAllocationHint(Reg);
1056 Hint.second.isVirtual()) {
1061 std::pair<unsigned, Register> PartnerHint =
1062 MRI->getRegAllocationHint(Partner);
1065 if (PartnerHint.second == Reg) {
1067 MRI->setRegAllocationHint(Partner, PartnerHint.first, NewReg);
1072 MRI->setRegAllocationHint(NewReg, Hint.first, Partner);
1083 return getMatchingSuperReg(Reg, RISCV::sub_vrm1_0, &RegClass);
unsigned const MachineRegisterInfo * MRI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
This file contains constants used for implementing Dwarf debug support.
const HexagonInstrInfo * TII
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
Register const TargetRegisterInfo * TRI
Promote Memory to Register
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
static cl::opt< bool > DisableRegAllocHints("riscv-disable-regalloc-hints", cl::Hidden, cl::init(false), cl::desc("Disable two address hints for register " "allocation"))
static cl::opt< bool > DisableCostPerUse("riscv-disable-cost-per-use", cl::init(false), cl::Hidden)
static std::tuple< RISCVVType::VLMUL, const TargetRegisterClass &, unsigned > getSpillReloadInfo(unsigned NumRemaining, uint16_t RegEncoding, bool IsSpill)
This file declares the machine register scavenger class.
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
This file defines the SmallSet class.
static unsigned getDwarfRegNum(MCRegister Reg, const TargetRegisterInfo *TRI)
Go up the super-register chain until we hit a valid dwarf register number.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
static LLVM_ABI void appendOffset(SmallVectorImpl< uint64_t > &Ops, int64_t Offset)
Append Ops with operations to apply the Offset.
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
TypeSize getValue() const
Wrapper class representing physical registers. Should be passed by value.
constexpr unsigned id() const
MachineInstrBundleIterator< const MachineInstr > const_iterator
MachineInstrBundleIterator< MachineInstr > iterator
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
int64_t getLocalFrameSize() const
Get the size of the local object blob.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
const MachineInstrBuilder & setMIFlag(MachineInstr::MIFlag Flag) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
Representation of each machine instruction.
MachineOperand class - Representation of each machine instruction operand.
unsigned getSubReg() const
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLVM_ABI Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
bool hasBP(const MachineFunction &MF) const
void movImm(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, Register DstReg, uint64_t Val, MachineInstr::MIFlag Flag=MachineInstr::NoFlags, bool DstRenamable=false, bool DstIsDead=false) const
std::optional< unsigned > getRealVLen() const
const RISCVRegisterInfo * getRegisterInfo() const override
const RISCVInstrInfo * getInstrInfo() const override
Wrapper class representing virtual and physical registers.
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
size_type count(const T &V) const
count - Return 1 if the element is in the set, 0 otherwise.
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
StackOffset holds a fixed and a scalable offset in bytes.
int64_t getFixed() const
Returns the fixed component of the stack.
static StackOffset get(int64_t Fixed, int64_t Scalable)
StringRef - Represent a constant reference to a string, i.e.
Information about stack frame layout on the target.
bool hasFP(const MachineFunction &MF) const
hasFP - Return true if the specified function should have a dedicated frame pointer register.
TargetInstrInfo - Interface to description of machine instruction set.
const uint8_t TSFlags
Configurable target specific flags.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual StringRef getRegAsmName(MCRegister Reg) const
Return the assembly name for Reg.
virtual bool getRegAllocationHints(Register VirtReg, ArrayRef< MCPhysReg > Order, SmallVectorImpl< MCPhysReg > &Hints, const MachineFunction &MF, const VirtRegMap *VRM=nullptr, const LiveRegMatrix *Matrix=nullptr) const
Get a list of 'hint' registers that the register allocator should try first when allocating a physica...
virtual bool isRegisterReservedByUser(Register R) const
virtual const TargetInstrInfo * getInstrInfo() const
MCRegister getPhys(Register virtReg) const
returns the physical register mapped to the specified virtual register
bool hasPhys(Register virtReg) const
returns true if the specified virtual register is mapped to a physical register
constexpr bool isKnownMultipleOf(ScalarTy RHS) const
This function tells the caller whether the element count is known at compile time to be a multiple of...
constexpr LeafTy divideCoefficientBy(ScalarTy RHS) const
We do not provide the '/' operator here because division for polynomial types does not work in the sa...
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ RISCV_VectorCall
Calling convention used for RISC-V V-extension.
@ PreserveMost
Used for runtime calls that preserves most registers.
@ GHC
Used by the Glasgow Haskell Compiler (GHC).
@ GRAAL
Used by GraalVM. Two additional registers are reserved.
static unsigned getFormat(uint64_t TSFlags)
static RISCVVType::VLMUL getLMul(uint8_t TSFlags)
LLVM_ABI std::pair< unsigned, bool > decodeVLMUL(VLMUL VLMul)
std::optional< std::pair< unsigned, unsigned > > isRVVSpillForZvlsseg(unsigned Opcode)
bool isRVVSpill(const MachineInstr &MI)
static constexpr unsigned RVVBytesPerBlock
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Kill
The last use of a register.
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
IterT skipDebugInstructionsBackward(IterT It, IterT Begin, bool SkipPseudoOp=true)
Decrement It until it points to a non-debug instruction or to Begin and return the resulting iterator...
unsigned getDefRegState(bool B)
unsigned getKillRegState(bool B)
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
constexpr bool isShiftedInt(int64_t x)
Checks if a signed integer is an N bit number shifted left by S.
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
constexpr int64_t SignExtend64(uint64_t x)
Sign-extend the number in the bottom B bits of X to a 64-bit integer.
LLVM_ABI void reportFatalUsageError(Error Err)
Report a fatal error that does not indicate a bug in LLVM.
This struct is a compact representation of a valid (non-zero power of two) alignment.
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Align valueOrOne() const
For convenience, returns a valid alignment or 1 if undefined.
bool needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const override
bool requiresVirtualBaseRegisters(const MachineFunction &MF) const override
Register findVRegWithEncoding(const TargetRegisterClass &RegClass, uint16_t Encoding) const
const TargetRegisterClass * getLargestLegalSuperClass(const TargetRegisterClass *RC, const MachineFunction &) const override
const MCPhysReg * getCalleeSavedRegs(const MachineFunction *MF) const override
BitVector getReservedRegs(const MachineFunction &MF) const override
const uint32_t * getCallPreservedMask(const MachineFunction &MF, CallingConv::ID) const override
Register materializeFrameBaseRegister(MachineBasicBlock *MBB, int FrameIdx, int64_t Offset) const override
RISCVRegisterInfo(unsigned HwMode)
void getOffsetOpcodes(const StackOffset &Offset, SmallVectorImpl< uint64_t > &Ops) const override
bool isFrameOffsetLegal(const MachineInstr *MI, Register BaseReg, int64_t Offset) const override
Register getFrameRegister(const MachineFunction &MF) const override
const MCPhysReg * getIPRACSRegs(const MachineFunction *MF) const override
void lowerSegmentSpillReload(MachineBasicBlock::iterator II, bool IsSpill) const
void adjustReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator II, const DebugLoc &DL, Register DestReg, Register SrcReg, StackOffset Offset, MachineInstr::MIFlag Flag, MaybeAlign RequiredAlign) const
void updateRegAllocHint(Register Reg, Register NewReg, MachineFunction &MF) const override
bool isAsmClobberable(const MachineFunction &MF, MCRegister PhysReg) const override
const uint32_t * getNoPreservedMask() const override
float getSpillWeightScaleFactor(const TargetRegisterClass *RC) const override
void resolveFrameIndex(MachineInstr &MI, Register BaseReg, int64_t Offset) const override
bool getRegAllocationHints(Register VirtReg, ArrayRef< MCPhysReg > Order, SmallVectorImpl< MCPhysReg > &Hints, const MachineFunction &MF, const VirtRegMap *VRM, const LiveRegMatrix *Matrix) const override
int64_t getFrameIndexInstrOffset(const MachineInstr *MI, int Idx) const override
unsigned getRegisterCostTableIndex(const MachineFunction &MF) const override
StringRef getRegAsmName(MCRegister Reg) const override
bool eliminateFrameIndex(MachineBasicBlock::iterator MI, int SPAdj, unsigned FIOperandNum, RegScavenger *RS=nullptr) const override