36#define GET_CC_REGISTER_LISTS
37#include "AArch64GenCallingConv.inc"
38#define GET_REGINFO_TARGET_DESC
39#include "AArch64GenRegisterInfo.inc"
52 unsigned &RegToUseForCFI)
const {
53 if (AArch64::PPRRegClass.
contains(Reg))
56 if (AArch64::ZPRRegClass.
contains(Reg)) {
57 RegToUseForCFI = getSubReg(Reg, AArch64::dsub);
58 for (
int I = 0; CSR_AArch64_AAPCS_SaveList[
I]; ++
I) {
59 if (CSR_AArch64_AAPCS_SaveList[
I] == RegToUseForCFI)
71 assert(MF &&
"Invalid MachineFunction pointer.");
76 return CSR_AArch64_NoRegs_SaveList;
78 return CSR_AArch64_NoneRegs_SaveList;
80 return CSR_AArch64_AllRegs_SaveList;
83 return CSR_Win_AArch64_Arm64EC_Thunk_SaveList;
91 return CSR_Win_AArch64_CFGuard_Check_SaveList;
96 Attribute::SwiftError))
97 return CSR_Win_AArch64_AAPCS_SwiftError_SaveList;
99 return CSR_Win_AArch64_AAPCS_SwiftTail_SaveList;
100 return CSR_Win_AArch64_AAPCS_SaveList;
103 return CSR_AArch64_AAVPCS_SaveList;
105 return CSR_AArch64_SVE_AAPCS_SaveList;
109 "Calling convention "
110 "AArch64_SME_ABI_Support_Routines_PreserveMost_From_X0 is only "
111 "supported to improve calls to SME ACLE save/restore/disable-za "
112 "functions, and is not intended to be used beyond that scope.");
116 "Calling convention "
117 "AArch64_SME_ABI_Support_Routines_PreserveMost_From_X1 is "
118 "only supported to improve calls to SME ACLE __arm_get_current_vg "
119 "function, and is not intended to be used beyond that scope.");
123 "Calling convention "
124 "AArch64_SME_ABI_Support_Routines_PreserveMost_From_X2 is "
125 "only supported to improve calls to SME ACLE __arm_sme_state "
126 "and is not intended to be used beyond that scope.");
130 Attribute::SwiftError))
131 return CSR_AArch64_AAPCS_SwiftError_SaveList;
133 return CSR_AArch64_AAPCS_SwiftTail_SaveList;
135 return CSR_AArch64_RT_MostRegs_SaveList;
137 return CSR_AArch64_RT_AllRegs_SaveList;
141 return CSR_AArch64_AAPCS_X18_SaveList;
143 return CSR_AArch64_SVE_AAPCS_SaveList;
144 return CSR_AArch64_AAPCS_SaveList;
149 assert(MF &&
"Invalid MachineFunction pointer.");
151 "Invalid subtarget for getDarwinCalleeSavedRegs");
155 "Calling convention CFGuard_Check is unsupported on Darwin.");
157 return CSR_Darwin_AArch64_AAVPCS_SaveList;
160 "Calling convention SVE_VectorCall is unsupported on Darwin.");
164 "Calling convention "
165 "AArch64_SME_ABI_Support_Routines_PreserveMost_From_X0 is "
166 "only supported to improve calls to SME ACLE save/restore/disable-za "
167 "functions, and is not intended to be used beyond that scope.");
171 "Calling convention "
172 "AArch64_SME_ABI_Support_Routines_PreserveMost_From_X1 is "
173 "only supported to improve calls to SME ACLE __arm_get_current_vg "
174 "function, and is not intended to be used beyond that scope.");
178 "Calling convention "
179 "AArch64_SME_ABI_Support_Routines_PreserveMost_From_X2 is "
180 "only supported to improve calls to SME ACLE __arm_sme_state "
181 "and is not intended to be used beyond that scope.");
184 ? CSR_Darwin_AArch64_CXX_TLS_PE_SaveList
185 : CSR_Darwin_AArch64_CXX_TLS_SaveList;
189 Attribute::SwiftError))
190 return CSR_Darwin_AArch64_AAPCS_SwiftError_SaveList;
192 return CSR_Darwin_AArch64_AAPCS_SwiftTail_SaveList;
194 return CSR_Darwin_AArch64_RT_MostRegs_SaveList;
196 return CSR_Darwin_AArch64_RT_AllRegs_SaveList;
198 return CSR_Darwin_AArch64_AAPCS_Win64_SaveList;
200 return CSR_Darwin_AArch64_SVE_AAPCS_SaveList;
201 return CSR_Darwin_AArch64_AAPCS_SaveList;
206 assert(MF &&
"Invalid MachineFunction pointer.");
209 return CSR_Darwin_AArch64_CXX_TLS_ViaCopy_SaveList;
220 for (
size_t i = 0; i < AArch64::GPR64commonRegClass.getNumRegs(); ++i) {
222 UpdatedCSRs.
push_back(AArch64::GPR64commonRegClass.getRegister(i));
232 unsigned Idx)
const {
234 if (RC == &AArch64::GPR32allRegClass &&
Idx == AArch64::hsub)
235 return &AArch64::FPR32RegClass;
236 else if (RC == &AArch64::GPR64allRegClass &&
Idx == AArch64::hsub)
237 return &AArch64::FPR64RegClass;
240 return AArch64GenRegisterInfo::getSubClassWithSubReg(RC,
Idx);
247 "Invalid subtarget for getDarwinCallPreservedMask");
250 return CSR_Darwin_AArch64_CXX_TLS_RegMask;
252 return CSR_Darwin_AArch64_AAVPCS_RegMask;
254 return CSR_Darwin_AArch64_SVE_AAPCS_RegMask;
256 return CSR_AArch64_SME_ABI_Support_Routines_PreserveMost_From_X0_RegMask;
258 return CSR_AArch64_SME_ABI_Support_Routines_PreserveMost_From_X1_RegMask;
260 return CSR_AArch64_SME_ABI_Support_Routines_PreserveMost_From_X2_RegMask;
263 "Calling convention CFGuard_Check is unsupported on Darwin.");
268 return CSR_Darwin_AArch64_AAPCS_SwiftError_RegMask;
270 return CSR_Darwin_AArch64_AAPCS_SwiftTail_RegMask;
272 return CSR_Darwin_AArch64_RT_MostRegs_RegMask;
274 return CSR_Darwin_AArch64_RT_AllRegs_RegMask;
275 return CSR_Darwin_AArch64_AAPCS_RegMask;
284 return SCS ? CSR_AArch64_NoRegs_SCS_RegMask : CSR_AArch64_NoRegs_RegMask;
286 return SCS ? CSR_AArch64_NoneRegs_SCS_RegMask
287 : CSR_AArch64_NoneRegs_RegMask;
289 return SCS ? CSR_AArch64_AllRegs_SCS_RegMask : CSR_AArch64_AllRegs_RegMask;
299 return SCS ? CSR_AArch64_AAVPCS_SCS_RegMask : CSR_AArch64_AAVPCS_RegMask;
301 return SCS ? CSR_AArch64_SVE_AAPCS_SCS_RegMask
302 : CSR_AArch64_SVE_AAPCS_RegMask;
304 return CSR_AArch64_SME_ABI_Support_Routines_PreserveMost_From_X0_RegMask;
306 return CSR_AArch64_SME_ABI_Support_Routines_PreserveMost_From_X1_RegMask;
308 return CSR_AArch64_SME_ABI_Support_Routines_PreserveMost_From_X2_RegMask;
310 return CSR_Win_AArch64_CFGuard_Check_RegMask;
314 return SCS ? CSR_AArch64_AAPCS_SwiftError_SCS_RegMask
315 : CSR_AArch64_AAPCS_SwiftError_RegMask;
319 return CSR_AArch64_AAPCS_SwiftTail_RegMask;
322 return SCS ? CSR_AArch64_RT_MostRegs_SCS_RegMask
323 : CSR_AArch64_RT_MostRegs_RegMask;
325 return SCS ? CSR_AArch64_RT_AllRegs_SCS_RegMask
326 : CSR_AArch64_RT_AllRegs_RegMask;
328 return SCS ? CSR_AArch64_AAPCS_SCS_RegMask : CSR_AArch64_AAPCS_RegMask;
334 return CSR_AArch64_AAPCS_RegMask;
341 return CSR_Darwin_AArch64_TLS_RegMask;
344 return CSR_AArch64_TLS_ELF_RegMask;
351 memcpy(UpdatedMask, *Mask,
sizeof(UpdatedMask[0]) * RegMaskSize);
353 for (
size_t i = 0; i < AArch64::GPR64commonRegClass.getNumRegs(); ++i) {
356 subregs_inclusive(AArch64::GPR64commonRegClass.getRegister(i))) {
367 return CSR_AArch64_SMStartStop_RegMask;
372 return CSR_AArch64_SME_ABI_Support_Routines_PreserveMost_From_X0_RegMask;
376 return CSR_AArch64_NoRegs_RegMask;
391 return CSR_Darwin_AArch64_AAPCS_ThisReturn_RegMask;
392 return CSR_AArch64_AAPCS_ThisReturn_RegMask;
396 return CSR_AArch64_StackProbe_Windows_RegMask;
399std::optional<std::string>
403 return std::string(
"X19 is used as the frame base pointer register.");
414 for (
unsigned i = AArch64::B16; i <= AArch64::B31; ++i)
420 " is clobbered by asynchronous signals when using Arm64EC.";
432 markSuperRegs(
Reserved, AArch64::WSP);
433 markSuperRegs(
Reserved, AArch64::WZR);
436 markSuperRegs(
Reserved, AArch64::W29);
441 markSuperRegs(
Reserved, AArch64::W13);
442 markSuperRegs(
Reserved, AArch64::W14);
443 markSuperRegs(
Reserved, AArch64::W23);
444 markSuperRegs(
Reserved, AArch64::W24);
445 markSuperRegs(
Reserved, AArch64::W28);
446 for (
unsigned i = AArch64::B16; i <= AArch64::B31; ++i)
450 for (
size_t i = 0; i < AArch64::GPR32commonRegClass.getNumRegs(); ++i) {
452 markSuperRegs(
Reserved, AArch64::GPR32commonRegClass.getRegister(i));
456 markSuperRegs(
Reserved, AArch64::W19);
460 markSuperRegs(
Reserved, AArch64::W16);
481 markSuperRegs(
Reserved, AArch64::FPCR);
482 markSuperRegs(
Reserved, AArch64::FPMR);
483 markSuperRegs(
Reserved, AArch64::FPSR);
486 markSuperRegs(
Reserved, AArch64::X27);
487 markSuperRegs(
Reserved, AArch64::X28);
488 markSuperRegs(
Reserved, AArch64::W27);
489 markSuperRegs(
Reserved, AArch64::W28);
498 static_assert(AArch64::W30_HI - AArch64::W0_HI == 30,
499 "Unexpected order of registers");
500 Reserved.set(AArch64::W0_HI, AArch64::W30_HI);
501 static_assert(AArch64::B31_HI - AArch64::B0_HI == 31,
502 "Unexpected order of registers");
503 Reserved.set(AArch64::B0_HI, AArch64::B31_HI);
504 static_assert(AArch64::H31_HI - AArch64::H0_HI == 31,
505 "Unexpected order of registers");
506 Reserved.set(AArch64::H0_HI, AArch64::H31_HI);
507 static_assert(AArch64::S31_HI - AArch64::S0_HI == 31,
508 "Unexpected order of registers");
509 Reserved.set(AArch64::S0_HI, AArch64::S31_HI);
510 static_assert(AArch64::D31_HI - AArch64::D0_HI == 31,
511 "Unexpected order of registers");
512 Reserved.set(AArch64::D0_HI, AArch64::D31_HI);
513 static_assert(AArch64::Q31_HI - AArch64::Q0_HI == 31,
514 "Unexpected order of registers");
515 Reserved.set(AArch64::Q0_HI, AArch64::Q31_HI);
523 for (
size_t i = 0; i < AArch64::GPR32commonRegClass.getNumRegs(); ++i) {
525 markSuperRegs(
Reserved, AArch64::GPR32commonRegClass.getRegister(i));
536 markSuperRegs(
Reserved, AArch64::LR);
568 " function calls if any of the argument registers is reserved.")});
581 if (PhysReg == AArch64::ZA || PhysReg == AArch64::ZT0)
589 unsigned Kind)
const {
590 return &AArch64::GPR64spRegClass;
595 if (RC == &AArch64::CCRRegClass)
596 return &AArch64::GPR64RegClass;
614 if (hasStackRealignment(MF))
618 if (ST.hasSVE() || ST.isStreaming()) {
654 return HasReg(CC_AArch64_GHC_ArgRegs, Reg);
657 return HasReg(CC_AArch64_Preserve_None_ArgRegs, Reg);
669 return HasReg(CC_AArch64_Win64_VarArg_ArgRegs, Reg);
672 return HasReg(CC_AArch64_Win64PCS_ArgRegs, Reg);
675 return HasReg(CC_AArch64_Win64PCS_Swift_ArgRegs, Reg) ||
676 HasReg(CC_AArch64_Win64PCS_ArgRegs, Reg);
682 return HasReg(CC_AArch64_AAPCS_ArgRegs, Reg);
685 return HasReg(CC_AArch64_AAPCS_ArgRegs, Reg) ||
686 HasReg(CC_AArch64_AAPCS_Swift_ArgRegs, Reg);
692 return HasReg(CC_AArch64_DarwinPCS_ArgRegs, Reg);
695 return HasReg(CC_AArch64_DarwinPCS_ArgRegs, Reg) ||
696 HasReg(CC_AArch64_DarwinPCS_Swift_ArgRegs, Reg);
700 return HasReg(CC_AArch64_DarwinPCS_ILP32_VarArg_ArgRegs, Reg);
701 return HasReg(CC_AArch64_DarwinPCS_VarArg_ArgRegs, Reg);
704 HasReg(CC_AArch64_Win64_VarArg_ArgRegs, Reg);
705 return HasReg(CC_AArch64_Win64PCS_ArgRegs, Reg);
707 return HasReg(CC_AArch64_Win64_CFGuard_Check_ArgRegs, Reg);
714 return HasReg(CC_AArch64_Win64PCS_ArgRegs, Reg);
715 return HasReg(CC_AArch64_AAPCS_ArgRegs, Reg);
722 return TFI->
hasFP(MF) ? AArch64::FP : AArch64::SP;
749 "Expected SVE area to be calculated by this point");
772 for (
unsigned i = 0; !
MI->getOperand(i).isFI(); ++i)
773 assert(i < MI->getNumOperands() &&
774 "Instr doesn't have FrameIndex operand!");
785 if (!
MI->mayLoad() && !
MI->mayStore())
800 int64_t FPOffset =
Offset - 16 * 20;
837 assert(
MI &&
"Unable to get the legal offset for nil instruction.");
851 DL = Ins->getDebugLoc();
857 Register BaseReg =
MRI.createVirtualRegister(&AArch64::GPR64spRegClass);
858 MRI.constrainRegClass(BaseReg,
TII->getRegClass(MCID, 0,
this, MF));
875 while (!
MI.getOperand(i).isFI()) {
877 assert(i <
MI.getNumOperands() &&
"Instr doesn't have FrameIndex operand!");
884 assert(
Done &&
"Unable to resolve frame index!");
898 if (
MI.getOpcode() == AArch64::STGloop ||
899 MI.getOpcode() == AArch64::STZGloop) {
900 assert(FIOperandNum == 3 &&
901 "Wrong frame index operand for STGloop/STZGloop");
902 unsigned Op =
MI.getOpcode() == AArch64::STGloop ? AArch64::STGloop_wback
903 : AArch64::STZGloop_wback;
904 ScratchReg =
MI.getOperand(1).getReg();
905 MI.getOperand(3).ChangeToRegister(ScratchReg,
false,
false,
true);
907 MI.tieOperands(1, 3);
910 MI.getMF()->getRegInfo().createVirtualRegister(&AArch64::GPR64RegClass);
911 MI.getOperand(FIOperandNum)
912 .ChangeToRegister(ScratchReg,
false,
false,
true);
922 assert(
Offset.getScalable() % 2 == 0 &&
"Invalid frame offset");
928 int64_t VGSized =
Offset.getScalable() / 2;
932 Ops.
append({dwarf::DW_OP_bregx, VG, 0ULL});
935 }
else if (VGSized < 0) {
938 Ops.
append({dwarf::DW_OP_bregx, VG, 0ULL});
945 int SPAdj,
unsigned FIOperandNum,
947 assert(SPAdj == 0 &&
"Unexpected");
956 int FrameIndex =
MI.getOperand(FIOperandNum).getIndex();
962 if (
MI.getOpcode() == TargetOpcode::STACKMAP ||
963 MI.getOpcode() == TargetOpcode::PATCHPOINT ||
964 MI.getOpcode() == TargetOpcode::STATEPOINT) {
970 MI.getOperand(FIOperandNum).ChangeToRegister(FrameReg,
false );
971 MI.getOperand(FIOperandNum + 1).ChangeToImmediate(
Offset.getFixed());
975 if (
MI.getOpcode() == TargetOpcode::LOCAL_ESCAPE) {
979 "Frame offsets with a scalable component are not supported");
985 if (
MI.getOpcode() == AArch64::TAGPstack) {
988 FrameReg =
MI.getOperand(3).getReg();
1000 MF, FrameIndex, FrameReg,
false,
true);
1009 MI.getOperand(FIOperandNum)
1010 .ChangeToRegister(ScratchReg,
false,
false,
true);
1013 FrameReg = AArch64::SP;
1018 MF, FrameIndex, FrameReg,
false,
true);
1026 "Emergency spill slot is out of reach");
1041 switch (RC->
getID()) {
1044 case AArch64::GPR32RegClassID:
1045 case AArch64::GPR32spRegClassID:
1046 case AArch64::GPR32allRegClassID:
1047 case AArch64::GPR64spRegClassID:
1048 case AArch64::GPR64allRegClassID:
1049 case AArch64::GPR64RegClassID:
1050 case AArch64::GPR32commonRegClassID:
1051 case AArch64::GPR64commonRegClassID:
1056 case AArch64::FPR8RegClassID:
1057 case AArch64::FPR16RegClassID:
1058 case AArch64::FPR32RegClassID:
1059 case AArch64::FPR64RegClassID:
1060 case AArch64::FPR128RegClassID:
1063 case AArch64::MatrixIndexGPR32_8_11RegClassID:
1064 case AArch64::MatrixIndexGPR32_12_15RegClassID:
1067 case AArch64::DDRegClassID:
1068 case AArch64::DDDRegClassID:
1069 case AArch64::DDDDRegClassID:
1070 case AArch64::QQRegClassID:
1071 case AArch64::QQQRegClassID:
1072 case AArch64::QQQQRegClassID:
1075 case AArch64::FPR128_loRegClassID:
1076 case AArch64::FPR64_loRegClassID:
1077 case AArch64::FPR16_loRegClassID:
1079 case AArch64::FPR128_0to7RegClassID:
1110 unsigned RegID =
MRI.getRegClass(VirtReg)->getID();
1112 if ((RegID == AArch64::ZPR2StridedOrContiguousRegClassID ||
1113 RegID == AArch64::ZPR4StridedOrContiguousRegClassID) &&
1115 return Use.getOpcode() ==
1116 AArch64::FORM_TRANSPOSED_REG_TUPLE_X2_PSEUDO ||
1117 Use.getOpcode() == AArch64::FORM_TRANSPOSED_REG_TUPLE_X4_PSEUDO;
1120 RegID == AArch64::ZPR2StridedOrContiguousRegClassID
1121 ? &AArch64::ZPR2StridedRegClass
1122 : &AArch64::ZPR4StridedRegClass;
1133 if (
MI.getOpcode() != AArch64::FORM_TRANSPOSED_REG_TUPLE_X2_PSEUDO &&
1134 MI.getOpcode() != AArch64::FORM_TRANSPOSED_REG_TUPLE_X4_PSEUDO)
1138 unsigned FirstOpSubReg =
MI.getOperand(1).getSubReg();
1139 switch (FirstOpSubReg) {
1140 case AArch64::zsub0:
1141 case AArch64::zsub1:
1142 case AArch64::zsub2:
1143 case AArch64::zsub3:
1150 Register FirstOpVirtReg =
MI.getOperand(1).getReg();
1151 if (!VRM->
hasPhys(FirstOpVirtReg))
1155 getSubReg(VRM->
getPhys(FirstOpVirtReg), FirstOpSubReg);
1156 for (
unsigned I = 0;
I < Order.
size(); ++
I)
1157 if (
MCRegister R = getSubReg(Order[
I], AArch64::zsub0))
1158 if (R == TupleStartReg)
1171 else if (hasStackRealignment(MF))
1184 ((DstRC->
getID() == AArch64::GPR64RegClassID) ||
1185 (DstRC->
getID() == AArch64::GPR64commonRegClassID)) &&
1186 MI->getOperand(0).getSubReg() &&
MI->getOperand(1).getSubReg())
1193 switch (
MI.getOpcode()) {
1194 case AArch64::COALESCER_BARRIER_FPR16:
1195 case AArch64::COALESCER_BARRIER_FPR32:
1196 case AArch64::COALESCER_BARRIER_FPR64:
1197 case AArch64::COALESCER_BARRIER_FPR128:
1213 if (
MI->isCopy() &&
SubReg != DstSubReg &&
1214 (AArch64::ZPRRegClass.hasSubClassEq(DstRC) ||
1215 AArch64::ZPRRegClass.hasSubClassEq(SrcRC))) {
1216 unsigned SrcReg =
MI->getOperand(1).getReg();
1217 if (
any_of(
MRI.def_instructions(SrcReg), IsCoalescerBarrier))
1219 unsigned DstReg =
MI->getOperand(0).getReg();
1220 if (
any_of(
MRI.use_nodbg_instructions(DstReg), IsCoalescerBarrier))
1229 return R == AArch64::VG;
unsigned const MachineRegisterInfo * MRI
static Register createScratchRegisterForInstruction(MachineInstr &MI, unsigned FIOperandNum, const AArch64InstrInfo *TII)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file implements the BitVector class.
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This file contains constants used for implementing Dwarf debug support.
const HexagonInstrInfo * TII
uint64_t IntrinsicInst * II
This file declares the machine register scavenger class.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
static unsigned getDwarfRegNum(unsigned Reg, const TargetRegisterInfo *TRI)
Go up the super-register chain until we hit a valid dwarf register number.
StackOffset getNonLocalFrameIndexReference(const MachineFunction &MF, int FI) const override
getNonLocalFrameIndexReference - This method returns the offset used to reference a frame index locat...
StackOffset resolveFrameIndexReference(const MachineFunction &MF, int FI, Register &FrameReg, bool PreferFP, bool ForSimm) const
AArch64FunctionInfo - This class is derived from MachineFunctionInfo and contains private AArch64-spe...
unsigned getTaggedBasePointerOffset() const
uint64_t getStackSizeSVE() const
bool hasCalculatedStackSizeSVE() const
static const char * getRegisterName(MCRegister Reg, unsigned AltIdx=AArch64::NoRegAltName)
BitVector getStrictlyReservedRegs(const MachineFunction &MF) const
const TargetRegisterClass * getCrossCopyRegClass(const TargetRegisterClass *RC) const override
const uint32_t * getThisReturnPreservedMask(const MachineFunction &MF, CallingConv::ID) const
getThisReturnPreservedMask - Returns a call preserved mask specific to the case that 'returned' is on...
bool isReservedReg(const MachineFunction &MF, MCRegister Reg) const
const uint32_t * getCallPreservedMask(const MachineFunction &MF, CallingConv::ID) const override
const MCPhysReg * getCalleeSavedRegsViaCopy(const MachineFunction *MF) const
BitVector getReservedRegs(const MachineFunction &MF) const override
bool shouldCoalesce(MachineInstr *MI, const TargetRegisterClass *SrcRC, unsigned SubReg, const TargetRegisterClass *DstRC, unsigned DstSubReg, const TargetRegisterClass *NewRC, LiveIntervals &LIS) const override
SrcRC and DstRC will be morphed into NewRC if this returns true.
bool requiresVirtualBaseRegisters(const MachineFunction &MF) const override
const TargetRegisterClass * getSubClassWithSubReg(const TargetRegisterClass *RC, unsigned Idx) const override
unsigned getRegPressureLimit(const TargetRegisterClass *RC, MachineFunction &MF) const override
bool getRegAllocationHints(Register VirtReg, ArrayRef< MCPhysReg > Order, SmallVectorImpl< MCPhysReg > &Hints, const MachineFunction &MF, const VirtRegMap *VRM, const LiveRegMatrix *Matrix) const override
Register materializeFrameBaseRegister(MachineBasicBlock *MBB, int FrameIdx, int64_t Offset) const override
Insert defining instruction(s) for BaseReg to be a pointer to FrameIdx at the beginning of the basic ...
const TargetRegisterClass * getPointerRegClass(const MachineFunction &MF, unsigned Kind=0) const override
void UpdateCustomCalleeSavedRegs(MachineFunction &MF) const
bool requiresRegisterScavenging(const MachineFunction &MF) const override
bool isFrameOffsetLegal(const MachineInstr *MI, Register BaseReg, int64_t Offset) const override
void resolveFrameIndex(MachineInstr &MI, Register BaseReg, int64_t Offset) const override
bool needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const override
needsFrameBaseReg - Returns true if the instruction's frame index reference would be better served by...
const uint32_t * getWindowsStackProbePreservedMask() const
Stack probing calls preserve different CSRs to the normal CC.
AArch64RegisterInfo(const Triple &TT)
bool isAnyArgRegReserved(const MachineFunction &MF) const
void emitReservedArgRegCallError(const MachineFunction &MF) const
bool regNeedsCFI(unsigned Reg, unsigned &RegToUseForCFI) const
Return whether the register needs a CFI entry.
bool isStrictlyReservedReg(const MachineFunction &MF, MCRegister Reg) const
bool eliminateFrameIndex(MachineBasicBlock::iterator II, int SPAdj, unsigned FIOperandNum, RegScavenger *RS=nullptr) const override
const uint32_t * getTLSCallPreservedMask() const
const uint32_t * getNoPreservedMask() const override
Register getFrameRegister(const MachineFunction &MF) const override
bool shouldAnalyzePhysregInMachineLoopInfo(MCRegister R) const override
void getOffsetOpcodes(const StackOffset &Offset, SmallVectorImpl< uint64_t > &Ops) const override
const MCPhysReg * getDarwinCalleeSavedRegs(const MachineFunction *MF) const
bool isAsmClobberable(const MachineFunction &MF, MCRegister PhysReg) const override
const uint32_t * SMEABISupportRoutinesCallPreservedMaskFromX0() const
const MCPhysReg * getCalleeSavedRegs(const MachineFunction *MF) const override
Code Generation virtual methods...
const uint32_t * getCustomEHPadPreservedMask(const MachineFunction &MF) const override
unsigned getLocalAddressRegister(const MachineFunction &MF) const
bool hasBasePointer(const MachineFunction &MF) const
const uint32_t * getDarwinCallPreservedMask(const MachineFunction &MF, CallingConv::ID) const
const uint32_t * getSMStartStopCallPreservedMask() const
bool useFPForScavengingIndex(const MachineFunction &MF) const override
bool cannotEliminateFrame(const MachineFunction &MF) const
bool isArgumentRegister(const MachineFunction &MF, MCRegister Reg) const override
void UpdateCustomCallPreservedMask(MachineFunction &MF, const uint32_t **Mask) const
std::optional< std::string > explainReservedReg(const MachineFunction &MF, MCRegister PhysReg) const override
bool requiresFrameIndexScavenging(const MachineFunction &MF) const override
unsigned getBaseRegister() const
bool isTargetWindows() const
bool isLRReservedForRA() const
bool isTargetDarwin() const
bool isTargetILP32() const
bool isXRegisterReservedForRA(size_t i) const
unsigned getNumXRegisterReserved() const
const AArch64TargetLowering * getTargetLowering() const override
bool isXRegCustomCalleeSaved(size_t i) const
bool isWindowsArm64EC() const
bool isXRegisterReserved(size_t i) const
bool isCallingConvWin64(CallingConv::ID CC, bool IsVarArg) const
bool isTargetLinux() const
bool supportSwiftError() const override
Return true if the target supports swifterror attribute.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
bool hasAttrSomewhere(Attribute::AttrKind Kind, unsigned *Index=nullptr) const
Return true if the specified attribute is set for at least one parameter or for the return value.
static void appendOffset(SmallVectorImpl< uint64_t > &Ops, int64_t Offset)
Append Ops with operations to apply the Offset.
This class represents an Operation in the Expression.
Diagnostic information for unsupported feature in backend.
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
AttributeList getAttributes() const
Return the attribute list for this Function.
bool isVarArg() const
isVarArg - Return true if this function takes a variable number of arguments.
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Describe properties that are true of each instruction in the target description file.
bool regsOverlap(MCRegister RegA, MCRegister RegB) const
Returns true if the two registers are equal or alias each other.
Wrapper class representing physical registers. Should be passed by value.
MCSubRegIterator enumerates all sub-registers of Reg.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
bool hasVarSizedObjects() const
This method may be called any time after instruction selection is complete to determine if the stack ...
uint64_t getStackSize() const
Return the number of bytes that must be allocated to hold all of the fixed size frame objects.
bool adjustsStack() const
Return true if this function adjusts the stack – e.g., when calling another function.
bool isFrameAddressTaken() const
This method may be called any time after instruction selection is complete to determine if there is a...
int64_t getLocalFrameSize() const
Get the size of the local object blob.
int64_t getObjectOffset(int ObjectIdx) const
Return the assigned stack offset of the specified object from the incoming stack pointer.
bool hasProperty(Property P) const
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
uint32_t * allocateRegMask()
Allocate and initialize a register mask with NumRegister bits.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const MachineFunctionProperties & getProperties() const
Get the function properties.
bool hasEHFunclets() const
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
Representation of each machine instruction.
MachineOperand class - Representation of each machine instruction operand.
void ChangeToImmediate(int64_t ImmVal, unsigned TargetFlags=0)
ChangeToImmediate - Replace this operand with a new immediate operand of the specified value.
static unsigned getRegMaskSize(unsigned NumRegs)
Returns number of elements needed for a regmask array.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
void setCalleeSavedRegs(ArrayRef< MCPhysReg > CSRs)
Sets the updated Callee Saved Registers list.
bool isScavengingFrameIndex(int FI) const
Query whether a frame index is a scavenging frame index.
Wrapper class representing virtual and physical registers.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StackOffset holds a fixed and a scalable offset in bytes.
int64_t getFixed() const
Returns the fixed component of the stack.
bool hasFP(const MachineFunction &MF) const
hasFP - Return true if the specified function should have a dedicated frame pointer register.
bool DisableFramePointerElim(const MachineFunction &MF) const
DisableFramePointerElim - This returns true if frame pointer elimination optimization should be disab...
unsigned getID() const
Return the register class ID number.
bool contains(Register Reg) const
Return true if the specified register is included in this register class.
virtual bool getRegAllocationHints(Register VirtReg, ArrayRef< MCPhysReg > Order, SmallVectorImpl< MCPhysReg > &Hints, const MachineFunction &MF, const VirtRegMap *VRM=nullptr, const LiveRegMatrix *Matrix=nullptr) const
Get a list of 'hint' registers that the register allocator should try first when allocating a physica...
Triple - Helper class for working with autoconf configuration names.
bool isOSDarwin() const
Is this a "Darwin" OS (macOS, iOS, tvOS, watchOS, XROS, or DriverKit).
bool isOSBinFormatELF() const
Tests whether the OS uses the ELF binary format.
A Use represents the edge between a Value definition and its users.
MCRegister getPhys(Register virtReg) const
returns the physical register mapped to the specified virtual register
bool hasPhys(Register virtReg) const
returns true if the specified virtual register is mapped to a physical register
@ MO_TAGGED
MO_TAGGED - With MO_PAGE, indicates that the page includes a memory tag in bits 56-63.
static unsigned getShifterImm(AArch64_AM::ShiftExtendType ST, unsigned Imm)
getShifterImm - Encode the shift type and amount: imm: 6-bit shift amount shifter: 000 ==> lsl 001 ==...
void initLLVMToCVRegMapping(MCRegisterInfo *MRI)
@ AArch64_VectorCall
Used between AArch64 Advanced SIMD functions.
@ Swift
Calling convention for Swift.
@ AArch64_SVE_VectorCall
Used between AArch64 SVE functions.
@ CFGuard_Check
Special calling convention on Windows for calling the Control Guard Check ICall funtion.
@ PreserveMost
Used for runtime calls that preserves most registers.
@ AnyReg
OBSOLETED - Used for stack based JavaScript calls.
@ AArch64_SME_ABI_Support_Routines_PreserveMost_From_X2
Preserve X2-X15, X19-X29, SP, Z0-Z31, P0-P15.
@ CXX_FAST_TLS
Used for access functions.
@ AArch64_SME_ABI_Support_Routines_PreserveMost_From_X0
Preserve X0-X13, X19-X29, SP, Z0-Z31, P0-P15.
@ GHC
Used by the Glasgow Haskell Compiler (GHC).
@ AArch64_SME_ABI_Support_Routines_PreserveMost_From_X1
Preserve X1-X15, X19-X29, SP, Z0-Z31, P0-P15.
@ PreserveAll
Used for runtime calls that preserves (almost) all registers.
@ Fast
Attempts to make calls as fast as possible (e.g.
@ PreserveNone
Used for runtime calls that preserves none general registers.
@ Tail
Attemps to make calls as fast as possible while guaranteeing that tail call optimization can always b...
@ Win64
The C convention as implemented on Windows/x86-64 and AArch64.
@ SwiftTail
This follows the Swift calling convention in how arguments are passed but guarantees tail calls will ...
@ GRAAL
Used by GraalVM. Two additional registers are reserved.
@ ARM64EC_Thunk_X64
Calling convention used in the ARM64EC ABI to implement calls between x64 code and thunks.
@ C
The default llvm calling convention, compatible with C.
This is an optimization pass for GlobalISel generic memory operations.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
int isAArch64FrameOffsetLegal(const MachineInstr &MI, StackOffset &Offset, bool *OutUseUnscaledOp=nullptr, unsigned *OutUnscaledOp=nullptr, int64_t *EmittableOffset=nullptr)
Check if the Offset is a valid frame offset for MI.
@ AArch64FrameOffsetIsLegal
Offset is legal.
@ AArch64FrameOffsetCanUpdate
Offset can apply, at least partly.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
void emitFrameOffset(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, unsigned DestReg, unsigned SrcReg, StackOffset Offset, const TargetInstrInfo *TII, MachineInstr::MIFlag=MachineInstr::NoFlags, bool SetNZCV=false, bool NeedsWinCFI=false, bool *HasWinCFI=nullptr, bool EmitCFAOffset=false, StackOffset InitialOffset={}, unsigned FrameReg=AArch64::SP)
emitFrameOffset - Emit instructions as needed to set DestReg to SrcReg plus Offset.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
bool rewriteAArch64FrameIndex(MachineInstr &MI, unsigned FrameRegIdx, unsigned FrameReg, StackOffset &Offset, const AArch64InstrInfo *TII)
rewriteAArch64FrameIndex - Rewrite MI to access 'Offset' bytes from the FP.
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.