LCOV - code coverage report
Current view: top level - lib/Target/AArch64 - AArch64RegisterInfo.cpp (source / functions) Hit Total Coverage
Test: llvm-toolchain.info Lines: 181 183 98.9 %
Date: 2018-10-20 13:21:21 Functions: 32 32 100.0 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : //===- AArch64RegisterInfo.cpp - AArch64 Register Information -------------===//
       2             : //
       3             : //                     The LLVM Compiler Infrastructure
       4             : //
       5             : // This file is distributed under the University of Illinois Open Source
       6             : // License. See LICENSE.TXT for details.
       7             : //
       8             : //===----------------------------------------------------------------------===//
       9             : //
      10             : // This file contains the AArch64 implementation of the TargetRegisterInfo
      11             : // class.
      12             : //
      13             : //===----------------------------------------------------------------------===//
      14             : 
      15             : #include "AArch64RegisterInfo.h"
      16             : #include "AArch64FrameLowering.h"
      17             : #include "AArch64InstrInfo.h"
      18             : #include "AArch64MachineFunctionInfo.h"
      19             : #include "AArch64Subtarget.h"
      20             : #include "MCTargetDesc/AArch64AddressingModes.h"
      21             : #include "llvm/ADT/BitVector.h"
      22             : #include "llvm/ADT/Triple.h"
      23             : #include "llvm/CodeGen/MachineFrameInfo.h"
      24             : #include "llvm/CodeGen/MachineInstrBuilder.h"
      25             : #include "llvm/CodeGen/MachineRegisterInfo.h"
      26             : #include "llvm/CodeGen/RegisterScavenging.h"
      27             : #include "llvm/IR/Function.h"
      28             : #include "llvm/IR/DiagnosticInfo.h"
      29             : #include "llvm/Support/raw_ostream.h"
      30             : #include "llvm/CodeGen/TargetFrameLowering.h"
      31             : #include "llvm/Target/TargetOptions.h"
      32             : 
      33             : using namespace llvm;
      34             : 
      35             : #define GET_REGINFO_TARGET_DESC
      36             : #include "AArch64GenRegisterInfo.inc"
      37             : 
      38        1573 : AArch64RegisterInfo::AArch64RegisterInfo(const Triple &TT)
      39        1573 :     : AArch64GenRegisterInfo(AArch64::LR), TT(TT) {
      40        1573 :   AArch64_MC::initLLVMToCVRegMapping(this);
      41        1573 : }
      42             : 
      43             : const MCPhysReg *
      44      232825 : AArch64RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
      45             :   assert(MF && "Invalid MachineFunction pointer.");
      46      465650 :   if (MF->getFunction().getCallingConv() == CallingConv::GHC)
      47             :     // GHC set of callee saved regs is empty as all those regs are
      48             :     // used for passing STG regs around
      49             :     return CSR_AArch64_NoRegs_SaveList;
      50      232747 :   if (MF->getFunction().getCallingConv() == CallingConv::AnyReg)
      51             :     return CSR_AArch64_AllRegs_SaveList;
      52      232747 :   if (MF->getFunction().getCallingConv() == CallingConv::AArch64_VectorCall)
      53             :     return CSR_AArch64_AAVPCS_SaveList;
      54      232732 :   if (MF->getFunction().getCallingConv() == CallingConv::CXX_FAST_TLS)
      55         100 :     return MF->getInfo<AArch64FunctionInfo>()->isSplitCSR() ?
      56             :            CSR_AArch64_CXX_TLS_Darwin_PE_SaveList :
      57             :            CSR_AArch64_CXX_TLS_Darwin_SaveList;
      58      232632 :   if (MF->getSubtarget<AArch64Subtarget>().getTargetLowering()
      59      465264 :           ->supportSwiftError() &&
      60      232632 :       MF->getFunction().getAttributes().hasAttrSomewhere(
      61             :           Attribute::SwiftError))
      62         454 :     return CSR_AArch64_AAPCS_SwiftError_SaveList;
      63      464356 :   if (MF->getFunction().getCallingConv() == CallingConv::PreserveMost)
      64             :     return CSR_AArch64_RT_MostRegs_SaveList;
      65             :   else
      66      232151 :     return CSR_AArch64_AAPCS_SaveList;
      67             : }
      68             : 
      69       13570 : const MCPhysReg *AArch64RegisterInfo::getCalleeSavedRegsViaCopy(
      70             :     const MachineFunction *MF) const {
      71             :   assert(MF && "Invalid MachineFunction pointer.");
      72       27140 :   if (MF->getFunction().getCallingConv() == CallingConv::CXX_FAST_TLS &&
      73          17 :       MF->getInfo<AArch64FunctionInfo>()->isSplitCSR())
      74          12 :     return CSR_AArch64_CXX_TLS_Darwin_ViaCopy_SaveList;
      75             :   return nullptr;
      76             : }
      77             : 
      78          25 : void AArch64RegisterInfo::UpdateCustomCalleeSavedRegs(
      79             :     MachineFunction &MF) const {
      80          25 :   const MCPhysReg *CSRs = getCalleeSavedRegs(&MF);
      81             :   SmallVector<MCPhysReg, 32> UpdatedCSRs;
      82         525 :   for (const MCPhysReg *I = CSRs; *I; ++I)
      83         500 :     UpdatedCSRs.push_back(*I);
      84             : 
      85         800 :   for (size_t i = 0; i < AArch64::GPR64commonRegClass.getNumRegs(); ++i) {
      86        1550 :     if (MF.getSubtarget<AArch64Subtarget>().isXRegCustomCalleeSaved(i)) {
      87          57 :       UpdatedCSRs.push_back(AArch64::GPR64commonRegClass.getRegister(i));
      88             :     }
      89             :   }
      90             :   // Register lists are zero-terminated.
      91          25 :   UpdatedCSRs.push_back(0);
      92          50 :   MF.getRegInfo().setCalleeSavedRegs(UpdatedCSRs);
      93          25 : }
      94             : 
      95             : const TargetRegisterClass *
      96       33904 : AArch64RegisterInfo::getSubClassWithSubReg(const TargetRegisterClass *RC,
      97             :                                        unsigned Idx) const {
      98             :   // edge case for GPR/FPR register classes
      99       33904 :   if (RC == &AArch64::GPR32allRegClass && Idx == AArch64::hsub)
     100             :     return &AArch64::FPR32RegClass;
     101       33888 :   else if (RC == &AArch64::GPR64allRegClass && Idx == AArch64::hsub)
     102             :     return &AArch64::FPR64RegClass;
     103             : 
     104             :   // Forward to TableGen's default version.
     105       33886 :   return AArch64GenRegisterInfo::getSubClassWithSubReg(RC, Idx);
     106             : }
     107             : 
     108             : const uint32_t *
     109        2432 : AArch64RegisterInfo::getCallPreservedMask(const MachineFunction &MF,
     110             :                                           CallingConv::ID CC) const {
     111        2432 :   bool SCS = MF.getFunction().hasFnAttribute(Attribute::ShadowCallStack);
     112        2432 :   if (CC == CallingConv::GHC)
     113             :     // This is academic because all GHC calls are (supposed to be) tail calls
     114           8 :     return SCS ? CSR_AArch64_NoRegs_SCS_RegMask : CSR_AArch64_NoRegs_RegMask;
     115        2428 :   if (CC == CallingConv::AnyReg)
     116          26 :     return SCS ? CSR_AArch64_AllRegs_SCS_RegMask : CSR_AArch64_AllRegs_RegMask;
     117        2415 :   if (CC == CallingConv::CXX_FAST_TLS)
     118             :     return SCS ? CSR_AArch64_CXX_TLS_Darwin_SCS_RegMask
     119          12 :                : CSR_AArch64_CXX_TLS_Darwin_RegMask;
     120        2409 :   if (CC == CallingConv::AArch64_VectorCall)
     121           0 :     return SCS ? CSR_AArch64_AAVPCS_SCS_RegMask : CSR_AArch64_AAVPCS_RegMask;
     122        2409 :   if (MF.getSubtarget<AArch64Subtarget>().getTargetLowering()
     123        4818 :           ->supportSwiftError() &&
     124        2409 :       MF.getFunction().getAttributes().hasAttrSomewhere(Attribute::SwiftError))
     125             :     return SCS ? CSR_AArch64_AAPCS_SwiftError_SCS_RegMask
     126          54 :                : CSR_AArch64_AAPCS_SwiftError_RegMask;
     127        2382 :   if (CC == CallingConv::PreserveMost)
     128             :     return SCS ? CSR_AArch64_RT_MostRegs_SCS_RegMask
     129           8 :                : CSR_AArch64_RT_MostRegs_RegMask;
     130             :   else
     131        4749 :     return SCS ? CSR_AArch64_AAPCS_SCS_RegMask : CSR_AArch64_AAPCS_RegMask;
     132             : }
     133             : 
     134          27 : const uint32_t *AArch64RegisterInfo::getTLSCallPreservedMask() const {
     135          27 :   if (TT.isOSDarwin())
     136          27 :     return CSR_AArch64_TLS_Darwin_RegMask;
     137             : 
     138             :   assert(TT.isOSBinFormatELF() && "Invalid target");
     139             :   return CSR_AArch64_TLS_ELF_RegMask;
     140             : }
     141             : 
     142          11 : void AArch64RegisterInfo::UpdateCustomCallPreservedMask(MachineFunction &MF,
     143             :                                                  const uint32_t **Mask) const {
     144          11 :   uint32_t *UpdatedMask = MF.allocateRegMask();
     145          11 :   unsigned RegMaskSize = MachineOperand::getRegMaskSize(getNumRegs());
     146          11 :   memcpy(UpdatedMask, *Mask, sizeof(UpdatedMask[0]) * RegMaskSize);
     147             : 
     148         352 :   for (size_t i = 0; i < AArch64::GPR64commonRegClass.getNumRegs(); ++i) {
     149         682 :     if (MF.getSubtarget<AArch64Subtarget>().isXRegCustomCalleeSaved(i)) {
     150             :       for (MCSubRegIterator SubReg(AArch64::GPR64commonRegClass.getRegister(i),
     151             :                                    this, true);
     152          81 :            SubReg.isValid(); ++SubReg) {
     153             :         // See TargetRegisterInfo::getCallPreservedMask for how to interpret the
     154             :         // register mask.
     155          54 :         UpdatedMask[*SubReg / 32] |= 1u << (*SubReg % 32);
     156             :       }
     157             :     }
     158             :   }
     159          11 :   *Mask = UpdatedMask;
     160          11 : }
     161             : 
     162             : const uint32_t *
     163          10 : AArch64RegisterInfo::getThisReturnPreservedMask(const MachineFunction &MF,
     164             :                                                 CallingConv::ID CC) const {
     165             :   // This should return a register mask that is the same as that returned by
     166             :   // getCallPreservedMask but that additionally preserves the register used for
     167             :   // the first i64 argument (which must also be the register used to return a
     168             :   // single i64 return value)
     169             :   //
     170             :   // In case that the calling convention does not use the same register for
     171             :   // both, the function should return NULL (does not currently apply)
     172             :   assert(CC != CallingConv::GHC && "should not be GHC calling convention.");
     173          10 :   return CSR_AArch64_AAPCS_ThisReturn_RegMask;
     174             : }
     175             : 
     176           3 : const uint32_t *AArch64RegisterInfo::getWindowsStackProbePreservedMask() const {
     177           3 :   return CSR_AArch64_StackProbe_Windows_RegMask;
     178             : }
     179             : 
     180             : BitVector
     181      228922 : AArch64RegisterInfo::getReservedRegs(const MachineFunction &MF) const {
     182      228922 :   const AArch64FrameLowering *TFI = getFrameLowering(MF);
     183             : 
     184             :   // FIXME: avoid re-calculating this every time.
     185      228922 :   BitVector Reserved(getNumRegs());
     186      228922 :   markSuperRegs(Reserved, AArch64::WSP);
     187      228922 :   markSuperRegs(Reserved, AArch64::WZR);
     188             : 
     189      228922 :   if (TFI->hasFP(MF) || TT.isOSDarwin())
     190       57631 :     markSuperRegs(Reserved, AArch64::W29);
     191             : 
     192     7325504 :   for (size_t i = 0; i < AArch64::GPR32commonRegClass.getNumRegs(); ++i) {
     193    14193164 :     if (MF.getSubtarget<AArch64Subtarget>().isXRegisterReserved(i))
     194       44489 :       markSuperRegs(Reserved, AArch64::GPR32commonRegClass.getRegister(i));
     195             :   }
     196             : 
     197      228922 :   if (hasBasePointer(MF))
     198         197 :     markSuperRegs(Reserved, AArch64::W19);
     199             : 
     200             :   assert(checkAllSuperRegsMarked(Reserved));
     201      228922 :   return Reserved;
     202             : }
     203             : 
     204      197387 : bool AArch64RegisterInfo::isReservedReg(const MachineFunction &MF,
     205             :                                       unsigned Reg) const {
     206      197387 :   return getReservedRegs(MF)[Reg];
     207             : }
     208             : 
     209        2163 : bool AArch64RegisterInfo::isAnyArgRegReserved(const MachineFunction &MF) const {
     210             :   // FIXME: Get the list of argument registers from TableGen.
     211             :   static const MCPhysReg GPRArgRegs[] = { AArch64::X0, AArch64::X1, AArch64::X2,
     212             :                                           AArch64::X3, AArch64::X4, AArch64::X5,
     213             :                                           AArch64::X6, AArch64::X7 };
     214             :   return std::any_of(std::begin(GPRArgRegs), std::end(GPRArgRegs),
     215           0 :                      [this, &MF](MCPhysReg r){return isReservedReg(MF, r);});
     216             : }
     217             : 
     218           6 : void AArch64RegisterInfo::emitReservedArgRegCallError(
     219             :     const MachineFunction &MF) const {
     220           6 :   const Function &F = MF.getFunction();
     221           6 :   F.getContext().diagnose(DiagnosticInfoUnsupported{F, "AArch64 doesn't support"
     222             :     " function calls if any of the argument registers is reserved."});
     223           6 : }
     224             : 
     225        2015 : bool AArch64RegisterInfo::isAsmClobberable(const MachineFunction &MF,
     226             :                                           unsigned PhysReg) const {
     227        2015 :   return !isReservedReg(MF, PhysReg);
     228             : }
     229             : 
     230      199122 : bool AArch64RegisterInfo::isConstantPhysReg(unsigned PhysReg) const {
     231      199122 :   return PhysReg == AArch64::WZR || PhysReg == AArch64::XZR;
     232             : }
     233             : 
     234             : const TargetRegisterClass *
     235          29 : AArch64RegisterInfo::getPointerRegClass(const MachineFunction &MF,
     236             :                                       unsigned Kind) const {
     237          29 :   return &AArch64::GPR64spRegClass;
     238             : }
     239             : 
     240             : const TargetRegisterClass *
     241           3 : AArch64RegisterInfo::getCrossCopyRegClass(const TargetRegisterClass *RC) const {
     242           3 :   if (RC == &AArch64::CCRRegClass)
     243           3 :     return &AArch64::GPR64RegClass; // Only MSR & MRS copy NZCV.
     244             :   return RC;
     245             : }
     246             : 
     247          41 : unsigned AArch64RegisterInfo::getBaseRegister() const { return AArch64::X19; }
     248             : 
     249      249091 : bool AArch64RegisterInfo::hasBasePointer(const MachineFunction &MF) const {
     250      249091 :   const MachineFrameInfo &MFI = MF.getFrameInfo();
     251             : 
     252             :   // In the presence of variable sized objects, if the fixed stack size is
     253             :   // large enough that referencing from the FP won't result in things being
     254             :   // in range relatively often, we can use a base pointer to allow access
     255             :   // from the other direction like the SP normally works.
     256             :   // Furthermore, if both variable sized objects are present, and the
     257             :   // stack needs to be dynamically re-aligned, the base pointer is the only
     258             :   // reliable way to reference the locals.
     259      249091 :   if (MFI.hasVarSizedObjects()) {
     260         891 :     if (needsStackRealignment(MF))
     261             :       return true;
     262             :     // Conservatively estimate whether the negative offset from the frame
     263             :     // pointer will be sufficient to reach. If a function has a smallish
     264             :     // frame, it's less likely to have lots of spills and callee saved
     265             :     // space, so it's all more likely to be within range of the frame pointer.
     266             :     // If it's wrong, we'll materialize the constant and still get to the
     267             :     // object; it's just suboptimal. Negative offsets use the unscaled
     268             :     // load/store instructions, which have a 9-bit signed immediate.
     269         725 :     return MFI.getLocalFrameSize() >= 256;
     270             :   }
     271             : 
     272             :   return false;
     273             : }
     274             : 
     275             : unsigned
     276        1539 : AArch64RegisterInfo::getFrameRegister(const MachineFunction &MF) const {
     277        1539 :   const AArch64FrameLowering *TFI = getFrameLowering(MF);
     278        1539 :   return TFI->hasFP(MF) ? AArch64::FP : AArch64::SP;
     279             : }
     280             : 
     281       43636 : bool AArch64RegisterInfo::requiresRegisterScavenging(
     282             :     const MachineFunction &MF) const {
     283       43636 :   return true;
     284             : }
     285             : 
     286       14770 : bool AArch64RegisterInfo::requiresVirtualBaseRegisters(
     287             :     const MachineFunction &MF) const {
     288       14770 :   return true;
     289             : }
     290             : 
     291             : bool
     292         293 : AArch64RegisterInfo::useFPForScavengingIndex(const MachineFunction &MF) const {
     293             :   // This function indicates whether the emergency spillslot should be placed
     294             :   // close to the beginning of the stackframe (closer to FP) or the end
     295             :   // (closer to SP).
     296             :   //
     297             :   // The beginning works most reliably if we have a frame pointer.
     298         293 :   const AArch64FrameLowering &TFI = *getFrameLowering(MF);
     299         293 :   return TFI.hasFP(MF);
     300             : }
     301             : 
     302       14794 : bool AArch64RegisterInfo::requiresFrameIndexScavenging(
     303             :     const MachineFunction &MF) const {
     304       14794 :   return true;
     305             : }
     306             : 
     307             : bool
     308       13646 : AArch64RegisterInfo::cannotEliminateFrame(const MachineFunction &MF) const {
     309       13646 :   const MachineFrameInfo &MFI = MF.getFrameInfo();
     310       13646 :   if (MF.getTarget().Options.DisableFramePointerElim(MF) && MFI.adjustsStack())
     311             :     return true;
     312       13646 :   return MFI.hasVarSizedObjects() || MFI.isFrameAddressTaken();
     313             : }
     314             : 
     315             : /// needsFrameBaseReg - Returns true if the instruction's frame index
     316             : /// reference would be better served by a base register other than FP
     317             : /// or SP. Used by LocalStackFrameAllocation to determine which frame index
     318             : /// references it should create new base registers for.
     319        1211 : bool AArch64RegisterInfo::needsFrameBaseReg(MachineInstr *MI,
     320             :                                             int64_t Offset) const {
     321        2424 :   for (unsigned i = 0; !MI->getOperand(i).isFI(); ++i)
     322             :     assert(i < MI->getNumOperands() &&
     323             :            "Instr doesn't have FrameIndex operand!");
     324             : 
     325             :   // It's the load/store FI references that cause issues, as it can be difficult
     326             :   // to materialize the offset if it won't fit in the literal field. Estimate
     327             :   // based on the size of the local frame and some conservative assumptions
     328             :   // about the rest of the stack frame (note, this is pre-regalloc, so
     329             :   // we don't know everything for certain yet) whether this offset is likely
     330             :   // to be out of range of the immediate. Return true if so.
     331             : 
     332             :   // We only generate virtual base registers for loads and stores, so
     333             :   // return false for everything else.
     334        1211 :   if (!MI->mayLoad() && !MI->mayStore())
     335             :     return false;
     336             : 
     337             :   // Without a virtual base register, if the function has variable sized
     338             :   // objects, all fixed-size local references will be via the frame pointer,
     339             :   // Approximate the offset and see if it's legal for the instruction.
     340             :   // Note that the incoming offset is based on the SP value at function entry,
     341             :   // so it'll be negative.
     342        1037 :   MachineFunction &MF = *MI->getParent()->getParent();
     343        1037 :   const AArch64FrameLowering *TFI = getFrameLowering(MF);
     344        1037 :   MachineFrameInfo &MFI = MF.getFrameInfo();
     345             : 
     346             :   // Estimate an offset from the frame pointer.
     347             :   // Conservatively assume all GPR callee-saved registers get pushed.
     348             :   // FP, LR, X19-X28, D8-D15. 64-bits each.
     349        1037 :   int64_t FPOffset = Offset - 16 * 20;
     350             :   // Estimate an offset from the stack pointer.
     351             :   // The incoming offset is relating to the SP at the start of the function,
     352             :   // but when we access the local it'll be relative to the SP after local
     353             :   // allocation, so adjust our SP-relative offset by that allocation size.
     354        1037 :   Offset += MFI.getLocalFrameSize();
     355             :   // Assume that we'll have at least some spill slots allocated.
     356             :   // FIXME: This is a total SWAG number. We should run some statistics
     357             :   //        and pick a real one.
     358        1037 :   Offset += 128; // 128 bytes of spill slots
     359             : 
     360             :   // If there is a frame pointer, try using it.
     361             :   // The FP is only available if there is no dynamic realignment. We
     362             :   // don't know for sure yet whether we'll need that, so we guess based
     363             :   // on whether there are any local variables that would trigger it.
     364        1037 :   if (TFI->hasFP(MF) && isFrameOffsetLegal(MI, AArch64::FP, FPOffset))
     365             :     return false;
     366             : 
     367             :   // If we can reference via the stack pointer or base pointer, try that.
     368             :   // FIXME: This (and the code that resolves the references) can be improved
     369             :   //        to only disallow SP relative references in the live range of
     370             :   //        the VLA(s). In practice, it's unclear how much difference that
     371             :   //        would make, but it may be worth doing.
     372        1037 :   if (isFrameOffsetLegal(MI, AArch64::SP, Offset))
     373        1033 :     return false;
     374             : 
     375             :   // The offset likely isn't legal; we want to allocate a virtual base register.
     376             :   return true;
     377             : }
     378             : 
     379        1210 : bool AArch64RegisterInfo::isFrameOffsetLegal(const MachineInstr *MI,
     380             :                                              unsigned BaseReg,
     381             :                                              int64_t Offset) const {
     382             :   assert(Offset <= INT_MAX && "Offset too big to fit in int.");
     383             :   assert(MI && "Unable to get the legal offset for nil instruction.");
     384        1210 :   int SaveOffset = Offset;
     385        1210 :   return isAArch64FrameOffsetLegal(*MI, SaveOffset) & AArch64FrameOffsetIsLegal;
     386             : }
     387             : 
     388             : /// Insert defining instruction(s) for BaseReg to be a pointer to FrameIdx
     389             : /// at the beginning of the basic block.
     390           1 : void AArch64RegisterInfo::materializeFrameBaseRegister(MachineBasicBlock *MBB,
     391             :                                                        unsigned BaseReg,
     392             :                                                        int FrameIdx,
     393             :                                                        int64_t Offset) const {
     394             :   MachineBasicBlock::iterator Ins = MBB->begin();
     395           1 :   DebugLoc DL; // Defaults to "unknown"
     396           1 :   if (Ins != MBB->end())
     397             :     DL = Ins->getDebugLoc();
     398           1 :   const MachineFunction &MF = *MBB->getParent();
     399             :   const AArch64InstrInfo *TII =
     400           1 :       MF.getSubtarget<AArch64Subtarget>().getInstrInfo();
     401           1 :   const MCInstrDesc &MCID = TII->get(AArch64::ADDXri);
     402           1 :   MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
     403           1 :   MRI.constrainRegClass(BaseReg, TII->getRegClass(MCID, 0, this, MF));
     404             :   unsigned Shifter = AArch64_AM::getShifterImm(AArch64_AM::LSL, 0);
     405             : 
     406           1 :   BuildMI(*MBB, Ins, DL, MCID, BaseReg)
     407             :       .addFrameIndex(FrameIdx)
     408             :       .addImm(Offset)
     409             :       .addImm(Shifter);
     410           1 : }
     411             : 
     412           2 : void AArch64RegisterInfo::resolveFrameIndex(MachineInstr &MI, unsigned BaseReg,
     413             :                                             int64_t Offset) const {
     414           2 :   int Off = Offset; // ARM doesn't need the general 64-bit offsets
     415             :   unsigned i = 0;
     416             : 
     417           8 :   while (!MI.getOperand(i).isFI()) {
     418           2 :     ++i;
     419             :     assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!");
     420             :   }
     421           2 :   const MachineFunction *MF = MI.getParent()->getParent();
     422             :   const AArch64InstrInfo *TII =
     423           2 :       MF->getSubtarget<AArch64Subtarget>().getInstrInfo();
     424           2 :   bool Done = rewriteAArch64FrameIndex(MI, i, BaseReg, Off, TII);
     425             :   assert(Done && "Unable to resolve frame index!");
     426             :   (void)Done;
     427           2 : }
     428             : 
     429        3932 : void AArch64RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
     430             :                                               int SPAdj, unsigned FIOperandNum,
     431             :                                               RegScavenger *RS) const {
     432             :   assert(SPAdj == 0 && "Unexpected");
     433             : 
     434             :   MachineInstr &MI = *II;
     435        3932 :   MachineBasicBlock &MBB = *MI.getParent();
     436        3932 :   MachineFunction &MF = *MBB.getParent();
     437             :   const AArch64InstrInfo *TII =
     438        3932 :       MF.getSubtarget<AArch64Subtarget>().getInstrInfo();
     439        3932 :   const AArch64FrameLowering *TFI = getFrameLowering(MF);
     440             : 
     441        7864 :   int FrameIndex = MI.getOperand(FIOperandNum).getIndex();
     442             :   unsigned FrameReg;
     443             :   int Offset;
     444             : 
     445             :   // Special handling of dbg_value, stackmap and patchpoint instructions.
     446        3932 :   if (MI.isDebugValue() || MI.getOpcode() == TargetOpcode::STACKMAP ||
     447             :       MI.getOpcode() == TargetOpcode::PATCHPOINT) {
     448          29 :     Offset = TFI->resolveFrameIndexReference(MF, FrameIndex, FrameReg,
     449             :                                              /*PreferFP=*/true);
     450          58 :     Offset += MI.getOperand(FIOperandNum + 1).getImm();
     451          29 :     MI.getOperand(FIOperandNum).ChangeToRegister(FrameReg, false /*isDef*/);
     452          58 :     MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset);
     453        3916 :     return;
     454             :   }
     455             : 
     456             :   // Modify MI as necessary to handle as much of 'Offset' as possible
     457        3903 :   Offset = TFI->resolveFrameIndexReference(MF, FrameIndex, FrameReg);
     458        3903 :   if (rewriteAArch64FrameIndex(MI, FIOperandNum, FrameReg, Offset, TII))
     459             :     return;
     460             : 
     461             :   assert((!RS || !RS->isScavengingFrameIndex(FrameIndex)) &&
     462             :          "Emergency spill slot is out of reach");
     463             : 
     464             :   // If we get here, the immediate doesn't fit into the instruction.  We folded
     465             :   // as much as possible above.  Handle the rest, providing a register that is
     466             :   // SP+LargeImm.
     467             :   unsigned ScratchReg =
     468          32 :       MF.getRegInfo().createVirtualRegister(&AArch64::GPR64RegClass);
     469          32 :   emitFrameOffset(MBB, II, MI.getDebugLoc(), ScratchReg, FrameReg, Offset, TII);
     470          32 :   MI.getOperand(FIOperandNum).ChangeToRegister(ScratchReg, false, false, true);
     471             : }
     472             : 
     473        1560 : unsigned AArch64RegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC,
     474             :                                                   MachineFunction &MF) const {
     475        1560 :   const AArch64FrameLowering *TFI = getFrameLowering(MF);
     476             : 
     477        3120 :   switch (RC->getID()) {
     478             :   default:
     479             :     return 0;
     480         120 :   case AArch64::GPR32RegClassID:
     481             :   case AArch64::GPR32spRegClassID:
     482             :   case AArch64::GPR32allRegClassID:
     483             :   case AArch64::GPR64spRegClassID:
     484             :   case AArch64::GPR64allRegClassID:
     485             :   case AArch64::GPR64RegClassID:
     486             :   case AArch64::GPR32commonRegClassID:
     487             :   case AArch64::GPR64commonRegClassID:
     488             :     return 32 - 1                                   // XZR/SP
     489         120 :               - (TFI->hasFP(MF) || TT.isOSDarwin()) // FP
     490         120 :               - MF.getSubtarget<AArch64Subtarget>().getNumXRegisterReserved()
     491         120 :               - hasBasePointer(MF);  // X19
     492          75 :   case AArch64::FPR8RegClassID:
     493             :   case AArch64::FPR16RegClassID:
     494             :   case AArch64::FPR32RegClassID:
     495             :   case AArch64::FPR64RegClassID:
     496             :   case AArch64::FPR128RegClassID:
     497          75 :     return 32;
     498             : 
     499          90 :   case AArch64::DDRegClassID:
     500             :   case AArch64::DDDRegClassID:
     501             :   case AArch64::DDDDRegClassID:
     502             :   case AArch64::QQRegClassID:
     503             :   case AArch64::QQQRegClassID:
     504             :   case AArch64::QQQQRegClassID:
     505          90 :     return 32;
     506             : 
     507          15 :   case AArch64::FPR128_loRegClassID:
     508          15 :     return 16;
     509             :   }
     510             : }

Generated by: LCOV version 1.13