27#define GET_REGINFO_TARGET_DESC
28#include "RISCVGenRegisterInfo.inc"
37 cl::desc(
"Disable two address hints for register "
40static_assert(RISCV::X1 == RISCV::X0 + 1,
"Register list not consecutive");
41static_assert(RISCV::X31 == RISCV::X0 + 31,
"Register list not consecutive");
42static_assert(RISCV::F1_H == RISCV::F0_H + 1,
"Register list not consecutive");
43static_assert(RISCV::F31_H == RISCV::F0_H + 31,
44 "Register list not consecutive");
45static_assert(RISCV::F1_F == RISCV::F0_F + 1,
"Register list not consecutive");
46static_assert(RISCV::F31_F == RISCV::F0_F + 31,
47 "Register list not consecutive");
48static_assert(RISCV::F1_D == RISCV::F0_D + 1,
"Register list not consecutive");
49static_assert(RISCV::F31_D == RISCV::F0_D + 31,
50 "Register list not consecutive");
51static_assert(RISCV::V1 == RISCV::V0 + 1,
"Register list not consecutive");
52static_assert(RISCV::V31 == RISCV::V0 + 31,
"Register list not consecutive");
62 return CSR_NoRegs_SaveList;
64 if (Subtarget.hasStdExtD())
65 return CSR_XLEN_F64_Interrupt_SaveList;
66 if (Subtarget.hasStdExtF())
67 return Subtarget.hasStdExtE() ? CSR_XLEN_F32_Interrupt_RVE_SaveList
68 : CSR_XLEN_F32_Interrupt_SaveList;
69 return Subtarget.hasStdExtE() ? CSR_Interrupt_RVE_SaveList
70 : CSR_Interrupt_SaveList;
75 Subtarget.hasVInstructions();
77 switch (Subtarget.getTargetABI()) {
82 return CSR_ILP32E_LP64E_SaveList;
86 return CSR_ILP32_LP64_V_SaveList;
87 return CSR_ILP32_LP64_SaveList;
91 return CSR_ILP32F_LP64F_V_SaveList;
92 return CSR_ILP32F_LP64F_SaveList;
96 return CSR_ILP32D_LP64D_V_SaveList;
97 return CSR_ILP32D_LP64D_SaveList;
106 for (
size_t Reg = 0; Reg < getNumRegs(); Reg++) {
108 if (Subtarget.isRegisterReservedByUser(Reg))
112 if (isConstantPhysReg(Reg))
117 markSuperRegs(
Reserved, RISCV::X2_H);
118 markSuperRegs(
Reserved, RISCV::X3_H);
119 markSuperRegs(
Reserved, RISCV::X4_H);
121 markSuperRegs(
Reserved, RISCV::X8_H);
129 markSuperRegs(
Reserved, RISCV::DUMMY_REG_PAIR_WITH_X0);
132 if (Subtarget.hasStdExtE())
133 for (
MCPhysReg Reg = RISCV::X16_H; Reg <= RISCV::X31_H; Reg++)
138 markSuperRegs(
Reserved, RISCV::VTYPE);
139 markSuperRegs(
Reserved, RISCV::VXSAT);
140 markSuperRegs(
Reserved, RISCV::VXRM);
143 markSuperRegs(
Reserved, RISCV::FRM);
144 markSuperRegs(
Reserved, RISCV::FFLAGS);
147 markSuperRegs(
Reserved, RISCV::SF_VCIX_STATE);
150 if (Subtarget.hasStdExtE())
152 markSuperRegs(
Reserved, RISCV::X23_H);
153 markSuperRegs(
Reserved, RISCV::X27_H);
157 markSuperRegs(
Reserved, RISCV::SSP);
169 return CSR_NoRegs_RegMask;
179 if (DestReg == SrcReg && !
Offset.getFixed() && !
Offset.getScalable())
188 if (
Offset.getScalable()) {
189 if (
auto VLEN = ST.getRealVLen()) {
191 const int64_t VLENB = *VLEN / 8;
193 "Reserve the stack by the multiple of one vector size.");
194 const int64_t NumOfVReg =
Offset.getScalable() / 8;
195 const int64_t FixedOffset = NumOfVReg * VLENB;
196 if (!isInt<32>(FixedOffset)) {
198 "Frame size outside of the signed 32-bit range not supported");
204 bool KillSrcReg =
false;
206 if (
Offset.getScalable()) {
207 unsigned ScalableAdjOpc = RISCV::ADD;
208 int64_t ScalableValue =
Offset.getScalable();
209 if (ScalableValue < 0) {
210 ScalableValue = -ScalableValue;
211 ScalableAdjOpc = RISCV::SUB;
215 if (DestReg == SrcReg)
216 ScratchReg =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
218 assert(ScalableValue > 0 &&
"There is no need to get VLEN scaled value.");
220 "Reserve the stack by the multiple of one vector size.");
222 "Expect the number of vector registers within 32-bits.");
227 if (ScalableAdjOpc == RISCV::ADD && ST.hasStdExtZba() &&
228 (NumOfVReg == 2 || NumOfVReg == 4 || NumOfVReg == 8)) {
229 unsigned Opc = NumOfVReg == 2 ? RISCV::SH1ADD :
230 (NumOfVReg == 4 ? RISCV::SH2ADD : RISCV::SH3ADD);
235 TII->mulImm(MF,
MBB,
II,
DL, ScratchReg, NumOfVReg, Flag);
244 int64_t Val =
Offset.getFixed();
245 if (DestReg == SrcReg && Val == 0)
250 if (isInt<12>(Val)) {
264 assert(
Align < 2048 &&
"Required alignment too large");
265 int64_t MaxPosAdjStep = 2048 -
Align;
266 if (Val > -4096 && Val <= (2 * MaxPosAdjStep)) {
267 int64_t FirstAdj = Val < 0 ? -2048 : MaxPosAdjStep;
285 if (ST.hasStdExtZba() && (Val & 0xFFF) != 0) {
287 if (isShiftedInt<12, 3>(Val)) {
290 }
else if (isShiftedInt<12, 2>(Val)) {
295 Register ScratchReg =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
296 TII->movImm(
MBB,
II,
DL, ScratchReg, Val, Flag);
305 unsigned Opc = RISCV::ADD;
311 Register ScratchReg =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
312 TII->movImm(
MBB,
II,
DL, ScratchReg, Val, Flag);
331 unsigned NF = ZvlssegInfo->first;
332 unsigned LMUL = ZvlssegInfo->second;
333 assert(NF * LMUL <= 8 &&
"Invalid NF/LMUL combinations.");
334 unsigned Opcode, SubRegIdx;
339 Opcode = RISCV::VS1R_V;
340 SubRegIdx = RISCV::sub_vrm1_0;
343 Opcode = RISCV::VS2R_V;
344 SubRegIdx = RISCV::sub_vrm2_0;
347 Opcode = RISCV::VS4R_V;
348 SubRegIdx = RISCV::sub_vrm4_0;
351 static_assert(RISCV::sub_vrm1_7 == RISCV::sub_vrm1_0 + 7,
352 "Unexpected subreg numbering");
353 static_assert(RISCV::sub_vrm2_3 == RISCV::sub_vrm2_0 + 3,
354 "Unexpected subreg numbering");
355 static_assert(RISCV::sub_vrm4_1 == RISCV::sub_vrm4_0 + 1,
356 "Unexpected subreg numbering");
358 Register VL =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
361 const int64_t VLENB = *VLEN / 8;
362 int64_t
Offset = VLENB * LMUL;
367 if (ShiftAmount != 0)
375 bool IsBaseKill =
II->getOperand(1).isKill();
376 Register NewBase =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
377 for (
unsigned I = 0;
I < NF; ++
I) {
383 .
addReg(
TRI->getSubReg(SrcReg, SubRegIdx +
I))
393 II->eraseFromParent();
408 unsigned NF = ZvlssegInfo->first;
409 unsigned LMUL = ZvlssegInfo->second;
410 assert(NF * LMUL <= 8 &&
"Invalid NF/LMUL combinations.");
411 unsigned Opcode, SubRegIdx;
416 Opcode = RISCV::VL1RE8_V;
417 SubRegIdx = RISCV::sub_vrm1_0;
420 Opcode = RISCV::VL2RE8_V;
421 SubRegIdx = RISCV::sub_vrm2_0;
424 Opcode = RISCV::VL4RE8_V;
425 SubRegIdx = RISCV::sub_vrm4_0;
428 static_assert(RISCV::sub_vrm1_7 == RISCV::sub_vrm1_0 + 7,
429 "Unexpected subreg numbering");
430 static_assert(RISCV::sub_vrm2_3 == RISCV::sub_vrm2_0 + 3,
431 "Unexpected subreg numbering");
432 static_assert(RISCV::sub_vrm4_1 == RISCV::sub_vrm4_0 + 1,
433 "Unexpected subreg numbering");
435 Register VL =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
438 const int64_t VLENB = *VLEN / 8;
439 int64_t
Offset = VLENB * LMUL;
444 if (ShiftAmount != 0)
450 Register DestReg =
II->getOperand(0).getReg();
452 bool IsBaseKill =
II->getOperand(1).isKill();
453 Register NewBase =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
454 for (
unsigned I = 0;
I < NF; ++
I) {
456 TRI->getSubReg(DestReg, SubRegIdx +
I))
465 II->eraseFromParent();
469 int SPAdj,
unsigned FIOperandNum,
471 assert(SPAdj == 0 &&
"Unexpected non-zero SPAdj value");
478 int FrameIndex =
MI.getOperand(FIOperandNum).getIndex();
481 getFrameLowering(MF)->getFrameIndexReference(MF, FrameIndex, FrameReg);
486 if (!isInt<32>(
Offset.getFixed())) {
488 "Frame offsets outside of the signed 32-bit range not supported");
492 int64_t Val =
Offset.getFixed();
493 int64_t Lo12 = SignExtend64<12>(Val);
494 unsigned Opc =
MI.getOpcode();
495 if (Opc == RISCV::ADDI && !isInt<12>(Val)) {
501 MI.getOperand(FIOperandNum + 1).ChangeToImmediate(0);
502 }
else if ((Opc == RISCV::PREFETCH_I || Opc == RISCV::PREFETCH_R ||
503 Opc == RISCV::PREFETCH_W) &&
504 (Lo12 & 0b11111) != 0) {
506 MI.getOperand(FIOperandNum + 1).ChangeToImmediate(0);
507 }
else if ((Opc == RISCV::PseudoRV32ZdinxLD ||
508 Opc == RISCV::PseudoRV32ZdinxSD) &&
513 MI.getOperand(FIOperandNum + 1).ChangeToImmediate(0);
518 MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Lo12);
526 if (
MI.getOpcode() == RISCV::ADDI)
527 DestReg =
MI.getOperand(0).getReg();
529 DestReg =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
532 MI.getOperand(FIOperandNum).ChangeToRegister(DestReg,
false,
536 MI.getOperand(FIOperandNum).ChangeToRegister(FrameReg,
false,
542 if (
MI.getOpcode() == RISCV::ADDI &&
543 MI.getOperand(0).getReg() ==
MI.getOperand(1).getReg() &&
544 MI.getOperand(2).getImm() == 0) {
545 MI.eraseFromParent();
553 switch (
MI.getOpcode()) {
554 case RISCV::PseudoVSPILL2_M1:
555 case RISCV::PseudoVSPILL2_M2:
556 case RISCV::PseudoVSPILL2_M4:
557 case RISCV::PseudoVSPILL3_M1:
558 case RISCV::PseudoVSPILL3_M2:
559 case RISCV::PseudoVSPILL4_M1:
560 case RISCV::PseudoVSPILL4_M2:
561 case RISCV::PseudoVSPILL5_M1:
562 case RISCV::PseudoVSPILL6_M1:
563 case RISCV::PseudoVSPILL7_M1:
564 case RISCV::PseudoVSPILL8_M1:
567 case RISCV::PseudoVRELOAD2_M1:
568 case RISCV::PseudoVRELOAD2_M2:
569 case RISCV::PseudoVRELOAD2_M4:
570 case RISCV::PseudoVRELOAD3_M1:
571 case RISCV::PseudoVRELOAD3_M2:
572 case RISCV::PseudoVRELOAD4_M1:
573 case RISCV::PseudoVRELOAD4_M2:
574 case RISCV::PseudoVRELOAD5_M1:
575 case RISCV::PseudoVRELOAD6_M1:
576 case RISCV::PseudoVRELOAD7_M1:
577 case RISCV::PseudoVRELOAD8_M1:
596 unsigned FIOperandNum = 0;
597 for (; !
MI->getOperand(FIOperandNum).isFI(); FIOperandNum++)
598 assert(FIOperandNum < MI->getNumOperands() &&
599 "Instr doesn't have FrameIndex operand");
608 if (!
MI->mayLoad() && !
MI->mayStore())
616 if (TFI->
hasFP(MF) && !shouldRealignStack(MF)) {
620 unsigned CalleeSavedSize = 0;
623 if (Subtarget.isRegisterReservedByUser(Reg))
626 if (RISCV::GPRRegClass.
contains(Reg))
627 CalleeSavedSize += getSpillSize(RISCV::GPRRegClass);
628 else if (RISCV::FPR64RegClass.
contains(Reg))
629 CalleeSavedSize += getSpillSize(RISCV::FPR64RegClass);
630 else if (RISCV::FPR32RegClass.
contains(Reg))
631 CalleeSavedSize += getSpillSize(RISCV::FPR32RegClass);
635 int64_t MaxFPOffset =
Offset - CalleeSavedSize;
643 int64_t MaxSPOffset =
Offset + 128;
653 unsigned FIOperandNum = 0;
654 while (!
MI->getOperand(FIOperandNum).isFI()) {
656 assert(FIOperandNum < MI->getNumOperands() &&
657 "Instr does not have a FrameIndex operand!");
689 unsigned FIOperandNum = 0;
690 while (!
MI.getOperand(FIOperandNum).isFI()) {
692 assert(FIOperandNum <
MI.getNumOperands() &&
693 "Instr does not have a FrameIndex operand!");
699 MI.getOperand(FIOperandNum).ChangeToRegister(BaseReg,
false);
700 MI.getOperand(FIOperandNum + 1).ChangeToImmediate(
Offset);
709 "The MI must be I or S format.");
710 assert(
MI->getOperand(
Idx).isFI() &&
"The Idx'th operand of MI is not a "
711 "FrameIndex operand");
712 return MI->getOperand(
Idx + 1).getImm();
717 return TFI->
hasFP(MF) ? RISCV::X8 : RISCV::X2;
721 if (Reg == RISCV::SF_VCIX_STATE)
722 return "sf.vcix_state";
732 return CSR_NoRegs_RegMask;
733 switch (Subtarget.getTargetABI()) {
738 return CSR_ILP32E_LP64E_RegMask;
742 return CSR_ILP32_LP64_V_RegMask;
743 return CSR_ILP32_LP64_RegMask;
747 return CSR_ILP32F_LP64F_V_RegMask;
748 return CSR_ILP32F_LP64F_RegMask;
752 return CSR_ILP32D_LP64D_V_RegMask;
753 return CSR_ILP32D_LP64D_RegMask;
760 if (RC == &RISCV::VMV0RegClass)
761 return &RISCV::VRRegClass;
762 if (RC == &RISCV::VRNoV0RegClass)
763 return &RISCV::VRRegClass;
764 if (RC == &RISCV::VRM2NoV0RegClass)
765 return &RISCV::VRM2RegClass;
766 if (RC == &RISCV::VRM4NoV0RegClass)
767 return &RISCV::VRM4RegClass;
768 if (RC == &RISCV::VRM8NoV0RegClass)
769 return &RISCV::VRM8RegClass;
778 assert(
Offset.getScalable() % 8 == 0 &&
"Invalid frame offset");
784 int64_t VLENBSized =
Offset.getScalable() / 8;
785 if (VLENBSized > 0) {
788 Ops.
append({dwarf::DW_OP_bregx, VLENB, 0ULL});
791 }
else if (VLENBSized < 0) {
794 Ops.
append({dwarf::DW_OP_bregx, VLENB, 0ULL});
818 VirtReg, Order, Hints, MF, VRM,
Matrix);
821 return BaseImplRetVal;
827 bool NeedGPRC) ->
void {
833 if (PhysReg && (!NeedGPRC || RISCV::GPRCRegClass.
contains(PhysReg)) &&
834 !MO.getSubReg() && !VRRegMO.
getSubReg()) {
836 TwoAddrHints.
insert(PhysReg);
842 auto isCompressible = [&Subtarget](
const MachineInstr &
MI,
bool &NeedGPRC) {
844 switch (
MI.getOpcode()) {
857 if (!
MI.getOperand(2).isImm())
859 int64_t Imm =
MI.getOperand(2).getImm();
863 return Subtarget.hasStdExtZcb() && Imm == 255;
874 return MI.getOperand(2).isImm() && isInt<6>(
MI.getOperand(2).getImm());
878 case RISCV::ZEXT_H_RV32:
879 case RISCV::ZEXT_H_RV64:
882 return Subtarget.hasStdExtZcb();
886 return Subtarget.hasStdExtZcb() &&
MI.getOperand(2).isReg() &&
887 MI.getOperand(2).getReg() == RISCV::X0;
891 return Subtarget.hasStdExtZcb() &&
MI.getOperand(2).isImm() &&
892 MI.getOperand(2).getImm() == -1;
905 return PhysReg && RISCV::GPRCRegClass.contains(PhysReg);
908 for (
auto &MO :
MRI->reg_nodbg_operands(VirtReg)) {
910 unsigned OpIdx = MO.getOperandNo();
912 if (isCompressible(
MI, NeedGPRC)) {
913 if (OpIdx == 0 &&
MI.getOperand(1).isReg()) {
914 if (!NeedGPRC ||
MI.getNumExplicitOperands() < 3 ||
915 MI.getOpcode() == RISCV::ADD_UW ||
916 isCompressibleOpnd(
MI.getOperand(2)))
917 tryAddHint(MO,
MI.getOperand(1), NeedGPRC);
918 if (
MI.isCommutable() &&
MI.getOperand(2).isReg() &&
919 (!NeedGPRC || isCompressibleOpnd(
MI.getOperand(1))))
920 tryAddHint(MO,
MI.getOperand(2), NeedGPRC);
921 }
else if (OpIdx == 1 && (!NeedGPRC ||
MI.getNumExplicitOperands() < 3 ||
922 isCompressibleOpnd(
MI.getOperand(2)))) {
923 tryAddHint(MO,
MI.getOperand(0), NeedGPRC);
924 }
else if (
MI.isCommutable() && OpIdx == 2 &&
925 (!NeedGPRC || isCompressibleOpnd(
MI.getOperand(1)))) {
926 tryAddHint(MO,
MI.getOperand(0), NeedGPRC);
932 if (TwoAddrHints.
count(OrderReg))
935 return BaseImplRetVal;
unsigned const MachineRegisterInfo * MRI
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This file contains constants used for implementing Dwarf debug support.
const HexagonInstrInfo * TII
unsigned const TargetRegisterInfo * TRI
uint64_t IntrinsicInst * II
static cl::opt< bool > DisableRegAllocHints("riscv-disable-regalloc-hints", cl::Hidden, cl::init(false), cl::desc("Disable two address hints for register " "allocation"))
static cl::opt< bool > DisableCostPerUse("riscv-disable-cost-per-use", cl::init(false), cl::Hidden)
This file declares the machine register scavenger class.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
This file defines the SmallSet class.
static unsigned getDwarfRegNum(unsigned Reg, const TargetRegisterInfo *TRI)
Go up the super-register chain until we hit a valid dwarf register number.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
static void appendOffset(SmallVectorImpl< uint64_t > &Ops, int64_t Offset)
Append Ops with operations to apply the Offset.
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Wrapper class representing physical registers. Should be passed by value.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
int64_t getLocalFrameSize() const
Get the size of the local object blob.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
const MachineInstrBuilder & setMIFlag(MachineInstr::MIFlag Flag) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
Representation of each machine instruction.
MachineOperand class - Representation of each machine instruction operand.
unsigned getSubReg() const
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
bool hasBP(const MachineFunction &MF) const
void movImm(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, Register DstReg, uint64_t Val, MachineInstr::MIFlag Flag=MachineInstr::NoFlags, bool DstRenamable=false, bool DstIsDead=false) const
std::optional< unsigned > getRealVLen() const
const RISCVRegisterInfo * getRegisterInfo() const override
const RISCVInstrInfo * getInstrInfo() const override
Wrapper class representing virtual and physical registers.
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
size_type count(const T &V) const
count - Return 1 if the element is in the set, 0 otherwise.
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
StackOffset holds a fixed and a scalable offset in bytes.
int64_t getFixed() const
Returns the fixed component of the stack.
static StackOffset get(int64_t Fixed, int64_t Scalable)
StringRef - Represent a constant reference to a string, i.e.
Information about stack frame layout on the target.
bool hasFP(const MachineFunction &MF) const
hasFP - Return true if the specified function should have a dedicated frame pointer register.
TargetInstrInfo - Interface to description of machine instruction set.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual StringRef getRegAsmName(MCRegister Reg) const
Return the assembly name for Reg.
virtual bool getRegAllocationHints(Register VirtReg, ArrayRef< MCPhysReg > Order, SmallVectorImpl< MCPhysReg > &Hints, const MachineFunction &MF, const VirtRegMap *VRM=nullptr, const LiveRegMatrix *Matrix=nullptr) const
Get a list of 'hint' registers that the register allocator should try first when allocating a physica...
virtual bool isRegisterReservedByUser(Register R) const
virtual const TargetInstrInfo * getInstrInfo() const
MCRegister getPhys(Register virtReg) const
returns the physical register mapped to the specified virtual register
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ RISCV_VectorCall
Calling convention used for RISC-V V-extension.
@ GHC
Used by the Glasgow Haskell Compiler (GHC).
@ GRAAL
Used by GraalVM. Two additional registers are reserved.
static unsigned getFormat(uint64_t TSFlags)
std::optional< std::pair< unsigned, unsigned > > isRVVSpillForZvlsseg(unsigned Opcode)
static constexpr unsigned RVVBitsPerBlock
bool isRVVSpill(const MachineInstr &MI)
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Kill
The last use of a register.
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
unsigned getKillRegState(bool B)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
This struct is a compact representation of a valid (non-zero power of two) alignment.
uint64_t value() const
This is a hole in the type system and should not be abused.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Align valueOrOne() const
For convenience, returns a valid alignment or 1 if undefined.
bool needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const override
bool requiresVirtualBaseRegisters(const MachineFunction &MF) const override
const TargetRegisterClass * getLargestLegalSuperClass(const TargetRegisterClass *RC, const MachineFunction &) const override
const MCPhysReg * getCalleeSavedRegs(const MachineFunction *MF) const override
BitVector getReservedRegs(const MachineFunction &MF) const override
void lowerVRELOAD(MachineBasicBlock::iterator II) const
const uint32_t * getCallPreservedMask(const MachineFunction &MF, CallingConv::ID) const override
Register materializeFrameBaseRegister(MachineBasicBlock *MBB, int FrameIdx, int64_t Offset) const override
RISCVRegisterInfo(unsigned HwMode)
void getOffsetOpcodes(const StackOffset &Offset, SmallVectorImpl< uint64_t > &Ops) const override
bool isFrameOffsetLegal(const MachineInstr *MI, Register BaseReg, int64_t Offset) const override
void lowerVSPILL(MachineBasicBlock::iterator II) const
Register getFrameRegister(const MachineFunction &MF) const override
void adjustReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator II, const DebugLoc &DL, Register DestReg, Register SrcReg, StackOffset Offset, MachineInstr::MIFlag Flag, MaybeAlign RequiredAlign) const
bool isAsmClobberable(const MachineFunction &MF, MCRegister PhysReg) const override
const uint32_t * getNoPreservedMask() const override
void resolveFrameIndex(MachineInstr &MI, Register BaseReg, int64_t Offset) const override
bool getRegAllocationHints(Register VirtReg, ArrayRef< MCPhysReg > Order, SmallVectorImpl< MCPhysReg > &Hints, const MachineFunction &MF, const VirtRegMap *VRM, const LiveRegMatrix *Matrix) const override
int64_t getFrameIndexInstrOffset(const MachineInstr *MI, int Idx) const override
unsigned getRegisterCostTableIndex(const MachineFunction &MF) const override
StringRef getRegAsmName(MCRegister Reg) const override
bool eliminateFrameIndex(MachineBasicBlock::iterator MI, int SPAdj, unsigned FIOperandNum, RegScavenger *RS=nullptr) const override