LLVM  16.0.0git
AArch64RegisterInfo.cpp
Go to the documentation of this file.
1 //===- AArch64RegisterInfo.cpp - AArch64 Register Information -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains the AArch64 implementation of the TargetRegisterInfo
10 // class.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "AArch64RegisterInfo.h"
15 #include "AArch64FrameLowering.h"
16 #include "AArch64InstrInfo.h"
18 #include "AArch64Subtarget.h"
21 #include "llvm/ADT/BitVector.h"
22 #include "llvm/ADT/Triple.h"
30 #include "llvm/IR/DiagnosticInfo.h"
31 #include "llvm/IR/Function.h"
34 
35 using namespace llvm;
36 
37 #define GET_CC_REGISTER_LISTS
38 #include "AArch64GenCallingConv.inc"
39 #define GET_REGINFO_TARGET_DESC
40 #include "AArch64GenRegisterInfo.inc"
41 
43  : AArch64GenRegisterInfo(AArch64::LR), TT(TT) {
45 }
46 
47 /// Return whether the register needs a CFI entry. Not all unwinders may know
48 /// about SVE registers, so we assume the lowest common denominator, i.e. the
49 /// callee-saves required by the base ABI. For the SVE registers z8-z15 only the
50 /// lower 64-bits (d8-d15) need to be saved. The lower 64-bits subreg is
51 /// returned in \p RegToUseForCFI.
53  unsigned &RegToUseForCFI) const {
54  if (AArch64::PPRRegClass.contains(Reg))
55  return false;
56 
57  if (AArch64::ZPRRegClass.contains(Reg)) {
58  RegToUseForCFI = getSubReg(Reg, AArch64::dsub);
59  for (int I = 0; CSR_AArch64_AAPCS_SaveList[I]; ++I) {
60  if (CSR_AArch64_AAPCS_SaveList[I] == RegToUseForCFI)
61  return true;
62  }
63  return false;
64  }
65 
66  RegToUseForCFI = Reg;
67  return true;
68 }
69 
70 const MCPhysReg *
72  assert(MF && "Invalid MachineFunction pointer.");
73 
75  // GHC set of callee saved regs is empty as all those regs are
76  // used for passing STG regs around
77  return CSR_AArch64_NoRegs_SaveList;
79  return CSR_AArch64_AllRegs_SaveList;
80 
81  // Darwin has its own CSR_AArch64_AAPCS_SaveList, which means most CSR save
82  // lists depending on that will need to have their Darwin variant as well.
84  return getDarwinCalleeSavedRegs(MF);
85 
87  return CSR_Win_AArch64_CFGuard_Check_SaveList;
89  return CSR_Win_AArch64_AAPCS_SaveList;
91  return CSR_AArch64_AAVPCS_SaveList;
93  return CSR_AArch64_SVE_AAPCS_SaveList;
94  if (MF->getFunction().getCallingConv() ==
97  "Calling convention AArch64_SME_ABI_Support_Routines_PreserveMost_From_X0 is "
98  "only supported to improve calls to SME ACLE save/restore/disable-za "
99  "functions, and is not intended to be used beyond that scope.");
100  if (MF->getFunction().getCallingConv() ==
103  "Calling convention AArch64_SME_ABI_Support_Routines_PreserveMost_From_X2 is "
104  "only supported to improve calls to SME ACLE __arm_sme_state "
105  "and is not intended to be used beyond that scope.");
107  ->supportSwiftError() &&
109  Attribute::SwiftError))
110  return CSR_AArch64_AAPCS_SwiftError_SaveList;
112  return CSR_AArch64_AAPCS_SwiftTail_SaveList;
114  return CSR_AArch64_RT_MostRegs_SaveList;
116  // This is for OSes other than Windows; Windows is a separate case further
117  // above.
118  return CSR_AArch64_AAPCS_X18_SaveList;
119  if (MF->getInfo<AArch64FunctionInfo>()->isSVECC())
120  return CSR_AArch64_SVE_AAPCS_SaveList;
121  return CSR_AArch64_AAPCS_SaveList;
122 }
123 
124 const MCPhysReg *
126  assert(MF && "Invalid MachineFunction pointer.");
128  "Invalid subtarget for getDarwinCalleeSavedRegs");
129 
132  "Calling convention CFGuard_Check is unsupported on Darwin.");
134  return CSR_Darwin_AArch64_AAVPCS_SaveList;
137  "Calling convention SVE_VectorCall is unsupported on Darwin.");
138  if (MF->getFunction().getCallingConv() ==
141  "Calling convention AArch64_SME_ABI_Support_Routines_PreserveMost_From_X0 is "
142  "only supported to improve calls to SME ACLE save/restore/disable-za "
143  "functions, and is not intended to be used beyond that scope.");
144  if (MF->getFunction().getCallingConv() ==
147  "Calling convention AArch64_SME_ABI_Support_Routines_PreserveMost_From_X2 is "
148  "only supported to improve calls to SME ACLE __arm_sme_state "
149  "and is not intended to be used beyond that scope.");
151  return MF->getInfo<AArch64FunctionInfo>()->isSplitCSR()
152  ? CSR_Darwin_AArch64_CXX_TLS_PE_SaveList
153  : CSR_Darwin_AArch64_CXX_TLS_SaveList;
155  ->supportSwiftError() &&
157  Attribute::SwiftError))
158  return CSR_Darwin_AArch64_AAPCS_SwiftError_SaveList;
160  return CSR_Darwin_AArch64_AAPCS_SwiftTail_SaveList;
162  return CSR_Darwin_AArch64_RT_MostRegs_SaveList;
164  return CSR_Darwin_AArch64_AAPCS_Win64_SaveList;
165  return CSR_Darwin_AArch64_AAPCS_SaveList;
166 }
167 
169  const MachineFunction *MF) const {
170  assert(MF && "Invalid MachineFunction pointer.");
173  return CSR_Darwin_AArch64_CXX_TLS_ViaCopy_SaveList;
174  return nullptr;
175 }
176 
178  MachineFunction &MF) const {
179  const MCPhysReg *CSRs = getCalleeSavedRegs(&MF);
180  SmallVector<MCPhysReg, 32> UpdatedCSRs;
181  for (const MCPhysReg *I = CSRs; *I; ++I)
182  UpdatedCSRs.push_back(*I);
183 
184  for (size_t i = 0; i < AArch64::GPR64commonRegClass.getNumRegs(); ++i) {
186  UpdatedCSRs.push_back(AArch64::GPR64commonRegClass.getRegister(i));
187  }
188  }
189  // Register lists are zero-terminated.
190  UpdatedCSRs.push_back(0);
191  MF.getRegInfo().setCalleeSavedRegs(UpdatedCSRs);
192 }
193 
194 const TargetRegisterClass *
196  unsigned Idx) const {
197  // edge case for GPR/FPR register classes
198  if (RC == &AArch64::GPR32allRegClass && Idx == AArch64::hsub)
199  return &AArch64::FPR32RegClass;
200  else if (RC == &AArch64::GPR64allRegClass && Idx == AArch64::hsub)
201  return &AArch64::FPR64RegClass;
202 
203  // Forward to TableGen's default version.
204  return AArch64GenRegisterInfo::getSubClassWithSubReg(RC, Idx);
205 }
206 
207 const uint32_t *
209  CallingConv::ID CC) const {
211  "Invalid subtarget for getDarwinCallPreservedMask");
212 
214  return CSR_Darwin_AArch64_CXX_TLS_RegMask;
216  return CSR_Darwin_AArch64_AAVPCS_RegMask;
219  "Calling convention SVE_VectorCall is unsupported on Darwin.");
222  "Calling convention AArch64_SME_ABI_Support_Routines_PreserveMost_From_X0 is "
223  "unsupported on Darwin.");
226  "Calling convention AArch64_SME_ABI_Support_Routines_PreserveMost_From_X2 is "
227  "unsupported on Darwin.");
230  "Calling convention CFGuard_Check is unsupported on Darwin.");
233  ->supportSwiftError() &&
234  MF.getFunction().getAttributes().hasAttrSomewhere(Attribute::SwiftError))
235  return CSR_Darwin_AArch64_AAPCS_SwiftError_RegMask;
236  if (CC == CallingConv::SwiftTail)
237  return CSR_Darwin_AArch64_AAPCS_SwiftTail_RegMask;
239  return CSR_Darwin_AArch64_RT_MostRegs_RegMask;
240  return CSR_Darwin_AArch64_AAPCS_RegMask;
241 }
242 
243 const uint32_t *
245  CallingConv::ID CC) const {
246  bool SCS = MF.getFunction().hasFnAttribute(Attribute::ShadowCallStack);
247  if (CC == CallingConv::GHC)
248  // This is academic because all GHC calls are (supposed to be) tail calls
249  return SCS ? CSR_AArch64_NoRegs_SCS_RegMask : CSR_AArch64_NoRegs_RegMask;
250  if (CC == CallingConv::AnyReg)
251  return SCS ? CSR_AArch64_AllRegs_SCS_RegMask : CSR_AArch64_AllRegs_RegMask;
252 
253  // All the following calling conventions are handled differently on Darwin.
255  if (SCS)
256  report_fatal_error("ShadowCallStack attribute not supported on Darwin.");
257  return getDarwinCallPreservedMask(MF, CC);
258  }
259 
261  return SCS ? CSR_AArch64_AAVPCS_SCS_RegMask : CSR_AArch64_AAVPCS_RegMask;
263  return SCS ? CSR_AArch64_SVE_AAPCS_SCS_RegMask
264  : CSR_AArch64_SVE_AAPCS_RegMask;
266  return CSR_AArch64_SME_ABI_Support_Routines_PreserveMost_From_X0_RegMask;
268  return CSR_AArch64_SME_ABI_Support_Routines_PreserveMost_From_X2_RegMask;
270  return CSR_Win_AArch64_CFGuard_Check_RegMask;
272  ->supportSwiftError() &&
273  MF.getFunction().getAttributes().hasAttrSomewhere(Attribute::SwiftError))
274  return SCS ? CSR_AArch64_AAPCS_SwiftError_SCS_RegMask
275  : CSR_AArch64_AAPCS_SwiftError_RegMask;
276  if (CC == CallingConv::SwiftTail) {
277  if (SCS)
278  report_fatal_error("ShadowCallStack attribute not supported with swifttail");
279  return CSR_AArch64_AAPCS_SwiftTail_RegMask;
280  }
282  return SCS ? CSR_AArch64_RT_MostRegs_SCS_RegMask
283  : CSR_AArch64_RT_MostRegs_RegMask;
284  else
285  return SCS ? CSR_AArch64_AAPCS_SCS_RegMask : CSR_AArch64_AAPCS_RegMask;
286 }
287 
289  const MachineFunction &MF) const {
291  return CSR_AArch64_AAPCS_RegMask;
292 
293  return nullptr;
294 }
295 
297  if (TT.isOSDarwin())
298  return CSR_Darwin_AArch64_TLS_RegMask;
299 
300  assert(TT.isOSBinFormatELF() && "Invalid target");
301  return CSR_AArch64_TLS_ELF_RegMask;
302 }
303 
305  const uint32_t **Mask) const {
306  uint32_t *UpdatedMask = MF.allocateRegMask();
307  unsigned RegMaskSize = MachineOperand::getRegMaskSize(getNumRegs());
308  memcpy(UpdatedMask, *Mask, sizeof(UpdatedMask[0]) * RegMaskSize);
309 
310  for (size_t i = 0; i < AArch64::GPR64commonRegClass.getNumRegs(); ++i) {
312  for (MCSubRegIterator SubReg(AArch64::GPR64commonRegClass.getRegister(i),
313  this, true);
314  SubReg.isValid(); ++SubReg) {
315  // See TargetRegisterInfo::getCallPreservedMask for how to interpret the
316  // register mask.
317  UpdatedMask[*SubReg / 32] |= 1u << (*SubReg % 32);
318  }
319  }
320  }
321  *Mask = UpdatedMask;
322 }
323 
325  return CSR_AArch64_SMStartStop_RegMask;
326 }
327 
328 const uint32_t *
330  return CSR_AArch64_SME_ABI_Support_Routines_PreserveMost_From_X0_RegMask;
331 }
332 
334  return CSR_AArch64_NoRegs_RegMask;
335 }
336 
337 const uint32_t *
339  CallingConv::ID CC) const {
340  // This should return a register mask that is the same as that returned by
341  // getCallPreservedMask but that additionally preserves the register used for
342  // the first i64 argument (which must also be the register used to return a
343  // single i64 return value)
344  //
345  // In case that the calling convention does not use the same register for
346  // both, the function should return NULL (does not currently apply)
347  assert(CC != CallingConv::GHC && "should not be GHC calling convention.");
349  return CSR_Darwin_AArch64_AAPCS_ThisReturn_RegMask;
350  return CSR_AArch64_AAPCS_ThisReturn_RegMask;
351 }
352 
354  return CSR_AArch64_StackProbe_Windows_RegMask;
355 }
356 
359  MCRegister PhysReg) const {
360  if (hasBasePointer(MF) && MCRegisterInfo::regsOverlap(PhysReg, AArch64::X19))
361  return std::string("X19 is used as the frame base pointer register.");
362 
364  bool warn = false;
365  if (MCRegisterInfo::regsOverlap(PhysReg, AArch64::X13) ||
366  MCRegisterInfo::regsOverlap(PhysReg, AArch64::X14) ||
367  MCRegisterInfo::regsOverlap(PhysReg, AArch64::X23) ||
368  MCRegisterInfo::regsOverlap(PhysReg, AArch64::X24) ||
369  MCRegisterInfo::regsOverlap(PhysReg, AArch64::X28))
370  warn = true;
371 
372  for (unsigned i = AArch64::B16; i <= AArch64::B31; ++i)
373  if (MCRegisterInfo::regsOverlap(PhysReg, i))
374  warn = true;
375 
376  if (warn)
377  return std::string(AArch64InstPrinter::getRegisterName(PhysReg)) +
378  " is clobbered by asynchronous signals when using Arm64EC.";
379  }
380 
381  return {};
382 }
383 
384 BitVector
386  const AArch64FrameLowering *TFI = getFrameLowering(MF);
387 
388  // FIXME: avoid re-calculating this every time.
389  BitVector Reserved(getNumRegs());
390  markSuperRegs(Reserved, AArch64::WSP);
391  markSuperRegs(Reserved, AArch64::WZR);
392 
393  if (TFI->hasFP(MF) || TT.isOSDarwin())
394  markSuperRegs(Reserved, AArch64::W29);
395 
397  // x13, x14, x23, x24, x28, and v16-v31 are clobbered by asynchronous
398  // signals, so we can't ever use them.
399  markSuperRegs(Reserved, AArch64::W13);
400  markSuperRegs(Reserved, AArch64::W14);
401  markSuperRegs(Reserved, AArch64::W23);
402  markSuperRegs(Reserved, AArch64::W24);
403  markSuperRegs(Reserved, AArch64::W28);
404  for (unsigned i = AArch64::B16; i <= AArch64::B31; ++i)
405  markSuperRegs(Reserved, i);
406  }
407 
408  for (size_t i = 0; i < AArch64::GPR32commonRegClass.getNumRegs(); ++i) {
410  markSuperRegs(Reserved, AArch64::GPR32commonRegClass.getRegister(i));
411  }
412 
413  if (hasBasePointer(MF))
414  markSuperRegs(Reserved, AArch64::W19);
415 
416  // SLH uses register W16/X16 as the taint register.
417  if (MF.getFunction().hasFnAttribute(Attribute::SpeculativeLoadHardening))
418  markSuperRegs(Reserved, AArch64::W16);
419 
420  // SME tiles are not allocatable.
421  if (MF.getSubtarget<AArch64Subtarget>().hasSME()) {
422  for (MCSubRegIterator SubReg(AArch64::ZA, this, /*self=*/true);
423  SubReg.isValid(); ++SubReg)
424  Reserved.set(*SubReg);
425  }
426 
427  markSuperRegs(Reserved, AArch64::FPCR);
428 
429  assert(checkAllSuperRegsMarked(Reserved));
430  return Reserved;
431 }
432 
433 BitVector
436 
437  for (size_t i = 0; i < AArch64::GPR32commonRegClass.getNumRegs(); ++i) {
439  markSuperRegs(Reserved, AArch64::GPR32commonRegClass.getRegister(i));
440  }
441 
442  assert(checkAllSuperRegsMarked(Reserved));
443  return Reserved;
444 }
445 
447  MCRegister Reg) const {
448  return getReservedRegs(MF)[Reg];
449 }
450 
452  MCRegister Reg) const {
453  return getStrictlyReservedRegs(MF)[Reg];
454 }
455 
457  return llvm::any_of(*AArch64::GPR64argRegClass.MC, [this, &MF](MCPhysReg r) {
458  return isStrictlyReservedReg(MF, r);
459  });
460 }
461 
463  const MachineFunction &MF) const {
464  const Function &F = MF.getFunction();
465  F.getContext().diagnose(DiagnosticInfoUnsupported{F, ("AArch64 doesn't support"
466  " function calls if any of the argument registers is reserved.")});
467 }
468 
470  MCRegister PhysReg) const {
471  // SLH uses register X16 as the taint register but it will fallback to a different
472  // method if the user clobbers it. So X16 is not reserved for inline asm but is
473  // for normal codegen.
474  if (MF.getFunction().hasFnAttribute(Attribute::SpeculativeLoadHardening) &&
475  MCRegisterInfo::regsOverlap(PhysReg, AArch64::X16))
476  return true;
477 
478  return !isReservedReg(MF, PhysReg);
479 }
480 
481 const TargetRegisterClass *
483  unsigned Kind) const {
484  return &AArch64::GPR64spRegClass;
485 }
486 
487 const TargetRegisterClass *
489  if (RC == &AArch64::CCRRegClass)
490  return &AArch64::GPR64RegClass; // Only MSR & MRS copy NZCV.
491  return RC;
492 }
493 
494 unsigned AArch64RegisterInfo::getBaseRegister() const { return AArch64::X19; }
495 
497  const MachineFrameInfo &MFI = MF.getFrameInfo();
498 
499  // In the presence of variable sized objects or funclets, if the fixed stack
500  // size is large enough that referencing from the FP won't result in things
501  // being in range relatively often, we can use a base pointer to allow access
502  // from the other direction like the SP normally works.
503  //
504  // Furthermore, if both variable sized objects are present, and the
505  // stack needs to be dynamically re-aligned, the base pointer is the only
506  // reliable way to reference the locals.
507  if (MFI.hasVarSizedObjects() || MF.hasEHFunclets()) {
508  if (hasStackRealignment(MF))
509  return true;
510 
511  if (MF.getSubtarget<AArch64Subtarget>().hasSVE()) {
513  // Frames that have variable sized objects and scalable SVE objects,
514  // should always use a basepointer.
515  if (!AFI->hasCalculatedStackSizeSVE() || AFI->getStackSizeSVE())
516  return true;
517  }
518 
519  // Conservatively estimate whether the negative offset from the frame
520  // pointer will be sufficient to reach. If a function has a smallish
521  // frame, it's less likely to have lots of spills and callee saved
522  // space, so it's all more likely to be within range of the frame pointer.
523  // If it's wrong, we'll materialize the constant and still get to the
524  // object; it's just suboptimal. Negative offsets use the unscaled
525  // load/store instructions, which have a 9-bit signed immediate.
526  return MFI.getLocalFrameSize() >= 256;
527  }
528 
529  return false;
530 }
531 
533  MCRegister Reg) const {
536  bool IsVarArg = STI.isCallingConvWin64(MF.getFunction().getCallingConv());
537 
538  auto HasReg = [](ArrayRef<MCRegister> RegList, MCRegister Reg) {
539  return llvm::is_contained(RegList, Reg);
540  };
541 
542  switch (CC) {
543  default:
544  report_fatal_error("Unsupported calling convention.");
546  return HasReg(CC_AArch64_WebKit_JS_ArgRegs, Reg);
547  case CallingConv::GHC:
548  return HasReg(CC_AArch64_GHC_ArgRegs, Reg);
549  case CallingConv::C:
550  case CallingConv::Fast:
553  case CallingConv::Swift:
555  case CallingConv::Tail:
556  if (STI.isTargetWindows() && IsVarArg)
557  return HasReg(CC_AArch64_Win64_VarArg_ArgRegs, Reg);
558  if (!STI.isTargetDarwin()) {
559  switch (CC) {
560  default:
561  return HasReg(CC_AArch64_AAPCS_ArgRegs, Reg);
562  case CallingConv::Swift:
564  return HasReg(CC_AArch64_AAPCS_ArgRegs, Reg) ||
565  HasReg(CC_AArch64_AAPCS_Swift_ArgRegs, Reg);
566  }
567  }
568  if (!IsVarArg) {
569  switch (CC) {
570  default:
571  return HasReg(CC_AArch64_DarwinPCS_ArgRegs, Reg);
572  case CallingConv::Swift:
574  return HasReg(CC_AArch64_DarwinPCS_ArgRegs, Reg) ||
575  HasReg(CC_AArch64_DarwinPCS_Swift_ArgRegs, Reg);
576  }
577  }
578  if (STI.isTargetILP32())
579  return HasReg(CC_AArch64_DarwinPCS_ILP32_VarArg_ArgRegs, Reg);
580  return HasReg(CC_AArch64_DarwinPCS_VarArg_ArgRegs, Reg);
581  case CallingConv::Win64:
582  if (IsVarArg)
583  HasReg(CC_AArch64_Win64_VarArg_ArgRegs, Reg);
584  return HasReg(CC_AArch64_AAPCS_ArgRegs, Reg);
586  return HasReg(CC_AArch64_Win64_CFGuard_Check_ArgRegs, Reg);
591  return HasReg(CC_AArch64_AAPCS_ArgRegs, Reg);
592  }
593 }
594 
595 Register
597  const AArch64FrameLowering *TFI = getFrameLowering(MF);
598  return TFI->hasFP(MF) ? AArch64::FP : AArch64::SP;
599 }
600 
602  const MachineFunction &MF) const {
603  return true;
604 }
605 
607  const MachineFunction &MF) const {
608  return true;
609 }
610 
611 bool
613  // This function indicates whether the emergency spillslot should be placed
614  // close to the beginning of the stackframe (closer to FP) or the end
615  // (closer to SP).
616  //
617  // The beginning works most reliably if we have a frame pointer.
618  // In the presence of any non-constant space between FP and locals,
619  // (e.g. in case of stack realignment or a scalable SVE area), it is
620  // better to use SP or BP.
621  const AArch64FrameLowering &TFI = *getFrameLowering(MF);
623  assert((!MF.getSubtarget<AArch64Subtarget>().hasSVE() ||
624  AFI->hasCalculatedStackSizeSVE()) &&
625  "Expected SVE area to be calculated by this point");
626  return TFI.hasFP(MF) && !hasStackRealignment(MF) && !AFI->getStackSizeSVE();
627 }
628 
630  const MachineFunction &MF) const {
631  return true;
632 }
633 
634 bool
636  const MachineFrameInfo &MFI = MF.getFrameInfo();
638  return true;
639  return MFI.hasVarSizedObjects() || MFI.isFrameAddressTaken();
640 }
641 
642 /// needsFrameBaseReg - Returns true if the instruction's frame index
643 /// reference would be better served by a base register other than FP
644 /// or SP. Used by LocalStackFrameAllocation to determine which frame index
645 /// references it should create new base registers for.
647  int64_t Offset) const {
648  for (unsigned i = 0; !MI->getOperand(i).isFI(); ++i)
649  assert(i < MI->getNumOperands() &&
650  "Instr doesn't have FrameIndex operand!");
651 
652  // It's the load/store FI references that cause issues, as it can be difficult
653  // to materialize the offset if it won't fit in the literal field. Estimate
654  // based on the size of the local frame and some conservative assumptions
655  // about the rest of the stack frame (note, this is pre-regalloc, so
656  // we don't know everything for certain yet) whether this offset is likely
657  // to be out of range of the immediate. Return true if so.
658 
659  // We only generate virtual base registers for loads and stores, so
660  // return false for everything else.
661  if (!MI->mayLoad() && !MI->mayStore())
662  return false;
663 
664  // Without a virtual base register, if the function has variable sized
665  // objects, all fixed-size local references will be via the frame pointer,
666  // Approximate the offset and see if it's legal for the instruction.
667  // Note that the incoming offset is based on the SP value at function entry,
668  // so it'll be negative.
669  MachineFunction &MF = *MI->getParent()->getParent();
670  const AArch64FrameLowering *TFI = getFrameLowering(MF);
671  MachineFrameInfo &MFI = MF.getFrameInfo();
672 
673  // Estimate an offset from the frame pointer.
674  // Conservatively assume all GPR callee-saved registers get pushed.
675  // FP, LR, X19-X28, D8-D15. 64-bits each.
676  int64_t FPOffset = Offset - 16 * 20;
677  // Estimate an offset from the stack pointer.
678  // The incoming offset is relating to the SP at the start of the function,
679  // but when we access the local it'll be relative to the SP after local
680  // allocation, so adjust our SP-relative offset by that allocation size.
681  Offset += MFI.getLocalFrameSize();
682  // Assume that we'll have at least some spill slots allocated.
683  // FIXME: This is a total SWAG number. We should run some statistics
684  // and pick a real one.
685  Offset += 128; // 128 bytes of spill slots
686 
687  // If there is a frame pointer, try using it.
688  // The FP is only available if there is no dynamic realignment. We
689  // don't know for sure yet whether we'll need that, so we guess based
690  // on whether there are any local variables that would trigger it.
691  if (TFI->hasFP(MF) && isFrameOffsetLegal(MI, AArch64::FP, FPOffset))
692  return false;
693 
694  // If we can reference via the stack pointer or base pointer, try that.
695  // FIXME: This (and the code that resolves the references) can be improved
696  // to only disallow SP relative references in the live range of
697  // the VLA(s). In practice, it's unclear how much difference that
698  // would make, but it may be worth doing.
699  if (isFrameOffsetLegal(MI, AArch64::SP, Offset))
700  return false;
701 
702  // If even offset 0 is illegal, we don't want a virtual base register.
703  if (!isFrameOffsetLegal(MI, AArch64::SP, 0))
704  return false;
705 
706  // The offset likely isn't legal; we want to allocate a virtual base register.
707  return true;
708 }
709 
711  Register BaseReg,
712  int64_t Offset) const {
713  assert(MI && "Unable to get the legal offset for nil instruction.");
714  StackOffset SaveOffset = StackOffset::getFixed(Offset);
716 }
717 
718 /// Insert defining instruction(s) for BaseReg to be a pointer to FrameIdx
719 /// at the beginning of the basic block.
720 Register
722  int FrameIdx,
723  int64_t Offset) const {
725  DebugLoc DL; // Defaults to "unknown"
726  if (Ins != MBB->end())
727  DL = Ins->getDebugLoc();
728  const MachineFunction &MF = *MBB->getParent();
729  const AArch64InstrInfo *TII =
730  MF.getSubtarget<AArch64Subtarget>().getInstrInfo();
731  const MCInstrDesc &MCID = TII->get(AArch64::ADDXri);
733  Register BaseReg = MRI.createVirtualRegister(&AArch64::GPR64spRegClass);
734  MRI.constrainRegClass(BaseReg, TII->getRegClass(MCID, 0, this, MF));
735  unsigned Shifter = AArch64_AM::getShifterImm(AArch64_AM::LSL, 0);
736 
737  BuildMI(*MBB, Ins, DL, MCID, BaseReg)
738  .addFrameIndex(FrameIdx)
739  .addImm(Offset)
740  .addImm(Shifter);
741 
742  return BaseReg;
743 }
744 
746  int64_t Offset) const {
747  // ARM doesn't need the general 64-bit offsets
748  StackOffset Off = StackOffset::getFixed(Offset);
749 
750  unsigned i = 0;
751  while (!MI.getOperand(i).isFI()) {
752  ++i;
753  assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!");
754  }
755 
756  const MachineFunction *MF = MI.getParent()->getParent();
757  const AArch64InstrInfo *TII =
758  MF->getSubtarget<AArch64Subtarget>().getInstrInfo();
759  bool Done = rewriteAArch64FrameIndex(MI, i, BaseReg, Off, TII);
760  assert(Done && "Unable to resolve frame index!");
761  (void)Done;
762 }
763 
764 // Create a scratch register for the frame index elimination in an instruction.
765 // This function has special handling of stack tagging loop pseudos, in which
766 // case it can also change the instruction opcode.
767 static Register
769  const AArch64InstrInfo *TII) {
770  // ST*Gloop have a reserved scratch register in operand 1. Use it, and also
771  // replace the instruction with the writeback variant because it will now
772  // satisfy the operand constraints for it.
773  Register ScratchReg;
774  if (MI.getOpcode() == AArch64::STGloop ||
775  MI.getOpcode() == AArch64::STZGloop) {
776  assert(FIOperandNum == 3 &&
777  "Wrong frame index operand for STGloop/STZGloop");
778  unsigned Op = MI.getOpcode() == AArch64::STGloop ? AArch64::STGloop_wback
779  : AArch64::STZGloop_wback;
780  ScratchReg = MI.getOperand(1).getReg();
781  MI.getOperand(3).ChangeToRegister(ScratchReg, false, false, true);
782  MI.setDesc(TII->get(Op));
783  MI.tieOperands(1, 3);
784  } else {
785  ScratchReg =
786  MI.getMF()->getRegInfo().createVirtualRegister(&AArch64::GPR64RegClass);
787  MI.getOperand(FIOperandNum)
788  .ChangeToRegister(ScratchReg, false, false, true);
789  }
790  return ScratchReg;
791 }
792 
794  const StackOffset &Offset, SmallVectorImpl<uint64_t> &Ops) const {
795  // The smallest scalable element supported by scaled SVE addressing
796  // modes are predicates, which are 2 scalable bytes in size. So the scalable
797  // byte offset must always be a multiple of 2.
798  assert(Offset.getScalable() % 2 == 0 && "Invalid frame offset");
799 
800  // Add fixed-sized offset using existing DIExpression interface.
801  DIExpression::appendOffset(Ops, Offset.getFixed());
802 
803  unsigned VG = getDwarfRegNum(AArch64::VG, true);
804  int64_t VGSized = Offset.getScalable() / 2;
805  if (VGSized > 0) {
806  Ops.push_back(dwarf::DW_OP_constu);
807  Ops.push_back(VGSized);
808  Ops.append({dwarf::DW_OP_bregx, VG, 0ULL});
809  Ops.push_back(dwarf::DW_OP_mul);
810  Ops.push_back(dwarf::DW_OP_plus);
811  } else if (VGSized < 0) {
812  Ops.push_back(dwarf::DW_OP_constu);
813  Ops.push_back(-VGSized);
814  Ops.append({dwarf::DW_OP_bregx, VG, 0ULL});
815  Ops.push_back(dwarf::DW_OP_mul);
816  Ops.push_back(dwarf::DW_OP_minus);
817  }
818 }
819 
821  int SPAdj, unsigned FIOperandNum,
822  RegScavenger *RS) const {
823  assert(SPAdj == 0 && "Unexpected");
824 
825  MachineInstr &MI = *II;
826  MachineBasicBlock &MBB = *MI.getParent();
827  MachineFunction &MF = *MBB.getParent();
828  const MachineFrameInfo &MFI = MF.getFrameInfo();
829  const AArch64InstrInfo *TII =
830  MF.getSubtarget<AArch64Subtarget>().getInstrInfo();
831  const AArch64FrameLowering *TFI = getFrameLowering(MF);
832  int FrameIndex = MI.getOperand(FIOperandNum).getIndex();
833  bool Tagged =
834  MI.getOperand(FIOperandNum).getTargetFlags() & AArch64II::MO_TAGGED;
835  Register FrameReg;
836 
837  // Special handling of dbg_value, stackmap patchpoint statepoint instructions.
838  if (MI.getOpcode() == TargetOpcode::STACKMAP ||
839  MI.getOpcode() == TargetOpcode::PATCHPOINT ||
840  MI.getOpcode() == TargetOpcode::STATEPOINT) {
841  StackOffset Offset =
842  TFI->resolveFrameIndexReference(MF, FrameIndex, FrameReg,
843  /*PreferFP=*/true,
844  /*ForSimm=*/false);
845  Offset += StackOffset::getFixed(MI.getOperand(FIOperandNum + 1).getImm());
846  MI.getOperand(FIOperandNum).ChangeToRegister(FrameReg, false /*isDef*/);
847  MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset.getFixed());
848  return false;
849  }
850 
851  if (MI.getOpcode() == TargetOpcode::LOCAL_ESCAPE) {
852  MachineOperand &FI = MI.getOperand(FIOperandNum);
854  assert(!Offset.getScalable() &&
855  "Frame offsets with a scalable component are not supported");
856  FI.ChangeToImmediate(Offset.getFixed());
857  return false;
858  }
859 
860  StackOffset Offset;
861  if (MI.getOpcode() == AArch64::TAGPstack) {
862  // TAGPstack must use the virtual frame register in its 3rd operand.
864  FrameReg = MI.getOperand(3).getReg();
867  } else if (Tagged) {
869  MFI.getObjectOffset(FrameIndex) + (int64_t)MFI.getStackSize());
870  if (MFI.hasVarSizedObjects() ||
871  isAArch64FrameOffsetLegal(MI, SPOffset, nullptr, nullptr, nullptr) !=
873  // Can't update to SP + offset in place. Precalculate the tagged pointer
874  // in a scratch register.
875  Offset = TFI->resolveFrameIndexReference(
876  MF, FrameIndex, FrameReg, /*PreferFP=*/false, /*ForSimm=*/true);
877  Register ScratchReg =
878  MF.getRegInfo().createVirtualRegister(&AArch64::GPR64RegClass);
879  emitFrameOffset(MBB, II, MI.getDebugLoc(), ScratchReg, FrameReg, Offset,
880  TII);
881  BuildMI(MBB, MI, MI.getDebugLoc(), TII->get(AArch64::LDG), ScratchReg)
882  .addReg(ScratchReg)
883  .addReg(ScratchReg)
884  .addImm(0);
885  MI.getOperand(FIOperandNum)
886  .ChangeToRegister(ScratchReg, false, false, true);
887  return false;
888  }
889  FrameReg = AArch64::SP;
891  (int64_t)MFI.getStackSize());
892  } else {
893  Offset = TFI->resolveFrameIndexReference(
894  MF, FrameIndex, FrameReg, /*PreferFP=*/false, /*ForSimm=*/true);
895  }
896 
897  // Modify MI as necessary to handle as much of 'Offset' as possible
898  if (rewriteAArch64FrameIndex(MI, FIOperandNum, FrameReg, Offset, TII))
899  return true;
900 
901  assert((!RS || !RS->isScavengingFrameIndex(FrameIndex)) &&
902  "Emergency spill slot is out of reach");
903 
904  // If we get here, the immediate doesn't fit into the instruction. We folded
905  // as much as possible above. Handle the rest, providing a register that is
906  // SP+LargeImm.
907  Register ScratchReg =
909  emitFrameOffset(MBB, II, MI.getDebugLoc(), ScratchReg, FrameReg, Offset, TII);
910  return false;
911 }
912 
914  MachineFunction &MF) const {
915  const AArch64FrameLowering *TFI = getFrameLowering(MF);
916 
917  switch (RC->getID()) {
918  default:
919  return 0;
920  case AArch64::GPR32RegClassID:
921  case AArch64::GPR32spRegClassID:
922  case AArch64::GPR32allRegClassID:
923  case AArch64::GPR64spRegClassID:
924  case AArch64::GPR64allRegClassID:
925  case AArch64::GPR64RegClassID:
926  case AArch64::GPR32commonRegClassID:
927  case AArch64::GPR64commonRegClassID:
928  return 32 - 1 // XZR/SP
929  - (TFI->hasFP(MF) || TT.isOSDarwin()) // FP
931  - hasBasePointer(MF); // X19
932  case AArch64::FPR8RegClassID:
933  case AArch64::FPR16RegClassID:
934  case AArch64::FPR32RegClassID:
935  case AArch64::FPR64RegClassID:
936  case AArch64::FPR128RegClassID:
937  return 32;
938 
939  case AArch64::MatrixIndexGPR32_8_11RegClassID:
940  case AArch64::MatrixIndexGPR32_12_15RegClassID:
941  return 4;
942 
943  case AArch64::DDRegClassID:
944  case AArch64::DDDRegClassID:
945  case AArch64::DDDDRegClassID:
946  case AArch64::QQRegClassID:
947  case AArch64::QQQRegClassID:
948  case AArch64::QQQQRegClassID:
949  return 32;
950 
951  case AArch64::FPR128_loRegClassID:
952  case AArch64::FPR64_loRegClassID:
953  case AArch64::FPR16_loRegClassID:
954  return 16;
955  }
956 }
957 
959  const MachineFunction &MF) const {
960  const auto &MFI = MF.getFrameInfo();
961  if (!MF.hasEHFunclets() && !MFI.hasVarSizedObjects())
962  return AArch64::SP;
963  else if (hasStackRealignment(MF))
964  return getBaseRegister();
965  return getFrameRegister(MF);
966 }
967 
968 /// SrcRC and DstRC will be morphed into NewRC if this returns true
970  MachineInstr *MI, const TargetRegisterClass *SrcRC, unsigned SubReg,
971  const TargetRegisterClass *DstRC, unsigned DstSubReg,
972  const TargetRegisterClass *NewRC, LiveIntervals &LIS) const {
973  if (MI->isCopy() &&
974  ((DstRC->getID() == AArch64::GPR64RegClassID) ||
975  (DstRC->getID() == AArch64::GPR64commonRegClassID)) &&
976  MI->getOperand(0).getSubReg() && MI->getOperand(1).getSubReg())
977  // Do not coalesce in the case of a 32-bit subregister copy
978  // which implements a 32 to 64 bit zero extension
979  // which relies on the upper 32 bits being zeroed.
980  return false;
981  return true;
982 }
llvm::AArch64RegisterInfo::getSMStartStopCallPreservedMask
const uint32_t * getSMStartStopCallPreservedMask() const
Definition: AArch64RegisterInfo.cpp:324
i
i
Definition: README.txt:29
llvm::AArch64RegisterInfo::getCallPreservedMask
const uint32_t * getCallPreservedMask(const MachineFunction &MF, CallingConv::ID) const override
Definition: AArch64RegisterInfo.cpp:244
llvm::MachineFrameInfo::hasVarSizedObjects
bool hasVarSizedObjects() const
This method may be called any time after instruction selection is complete to determine if the stack ...
Definition: MachineFrameInfo.h:355
llvm::AArch64Subtarget::isTargetWindows
bool isTargetWindows() const
Definition: AArch64Subtarget.h:264
AArch64RegisterInfo.h
createScratchRegisterForInstruction
static Register createScratchRegisterForInstruction(MachineInstr &MI, unsigned FIOperandNum, const AArch64InstrInfo *TII)
Definition: AArch64RegisterInfo.cpp:768
llvm::CallingConv::AnyReg
@ AnyReg
Used for dynamic register based calls (e.g.
Definition: CallingConv.h:60
MI
IRTranslator LLVM IR MI
Definition: IRTranslator.cpp:108
llvm::MachineInstrBuilder::addImm
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
Definition: MachineInstrBuilder.h:131
llvm::TargetRegisterClass::getID
unsigned getID() const
Return the register class ID number.
Definition: TargetRegisterInfo.h:75
llvm
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
AArch64MachineFunctionInfo.h
llvm::CallingConv::AArch64_SME_ABI_Support_Routines_PreserveMost_From_X0
@ AArch64_SME_ABI_Support_Routines_PreserveMost_From_X0
Preserve X0-X13, X19-X29, SP, Z0-Z31, P0-P15.
Definition: CallingConv.h:239
llvm::ISD::PATCHPOINT
@ PATCHPOINT
Definition: ISDOpcodes.h:1299
TargetFrameLowering.h
llvm::AArch64RegisterInfo::shouldCoalesce
bool shouldCoalesce(MachineInstr *MI, const TargetRegisterClass *SrcRC, unsigned SubReg, const TargetRegisterClass *DstRC, unsigned DstSubReg, const TargetRegisterClass *NewRC, LiveIntervals &LIS) const override
SrcRC and DstRC will be morphed into NewRC if this returns true.
Definition: AArch64RegisterInfo.cpp:969
llvm::AArch64_AM::LSL
@ LSL
Definition: AArch64AddressingModes.h:35
llvm::MachineRegisterInfo::createVirtualRegister
Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
Definition: MachineRegisterInfo.cpp:156
llvm::AttributeList::hasAttrSomewhere
bool hasAttrSomewhere(Attribute::AttrKind Kind, unsigned *Index=nullptr) const
Return true if the specified attribute is set for at least one parameter or for the return value.
Definition: Attributes.cpp:1494
llvm::MachineFunction::allocateRegMask
uint32_t * allocateRegMask()
Allocate and initialize a register mask with NumRegister bits.
Definition: MachineFunction.cpp:547
llvm::DiagnosticInfoUnsupported
Diagnostic information for unsupported feature in backend.
Definition: DiagnosticInfo.h:1008
DebugInfoMetadata.h
llvm::MachineRegisterInfo
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Definition: MachineRegisterInfo.h:50
llvm::Function
Definition: Function.h:60
llvm::AArch64FrameOffsetCanUpdate
@ AArch64FrameOffsetCanUpdate
Offset can apply, at least partly.
Definition: AArch64InstrInfo.h:436
contains
return AArch64::GPR64RegClass contains(Reg)
llvm::SmallVector
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1199
AArch64GenRegisterInfo
llvm::AArch64RegisterInfo::SMEABISupportRoutinesCallPreservedMaskFromX0
const uint32_t * SMEABISupportRoutinesCallPreservedMaskFromX0() const
Definition: AArch64RegisterInfo.cpp:329
llvm::AArch64RegisterInfo::UpdateCustomCallPreservedMask
void UpdateCustomCallPreservedMask(MachineFunction &MF, const uint32_t **Mask) const
Definition: AArch64RegisterInfo.cpp:304
llvm::AArch64RegisterInfo::explainReservedReg
llvm::Optional< std::string > explainReservedReg(const MachineFunction &MF, MCRegister PhysReg) const override
Definition: AArch64RegisterInfo.cpp:358
llvm::AArch64RegisterInfo::getBaseRegister
unsigned getBaseRegister() const
Definition: AArch64RegisterInfo.cpp:494
llvm::X86Disassembler::Reg
Reg
All possible values of the reg field in the ModR/M byte.
Definition: X86DisassemblerDecoder.h:462
llvm::AArch64RegisterInfo::isAnyArgRegReserved
bool isAnyArgRegReserved(const MachineFunction &MF) const
Definition: AArch64RegisterInfo.cpp:456
llvm::AArch64RegisterInfo::getNoPreservedMask
const uint32_t * getNoPreservedMask() const override
Definition: AArch64RegisterInfo.cpp:333
llvm::AArch64InstPrinter::getRegisterName
static const char * getRegisterName(unsigned RegNo, unsigned AltIdx=AArch64::NoRegAltName)
llvm::AArch64RegisterInfo::isArgumentRegister
bool isArgumentRegister(const MachineFunction &MF, MCRegister Reg) const override
Definition: AArch64RegisterInfo.cpp:532
llvm::AArch64Subtarget::isTargetDarwin
bool isTargetDarwin() const
Definition: AArch64Subtarget.h:261
llvm::AArch64RegisterInfo::getCalleeSavedRegsViaCopy
const MCPhysReg * getCalleeSavedRegsViaCopy(const MachineFunction *MF) const
Definition: AArch64RegisterInfo.cpp:168
llvm::Triple
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:44
getDwarfRegNum
static unsigned getDwarfRegNum(unsigned Reg, const TargetRegisterInfo *TRI)
Go up the super-register chain until we hit a valid dwarf register number.
Definition: StackMaps.cpp:178
llvm::AArch64FunctionInfo::isSVECC
bool isSVECC() const
Definition: AArch64MachineFunctionInfo.h:205
llvm::CallingConv::AArch64_SME_ABI_Support_Routines_PreserveMost_From_X2
@ AArch64_SME_ABI_Support_Routines_PreserveMost_From_X2
Preserve X2-X15, X19-X29, SP, Z0-Z31, P0-P15.
Definition: CallingConv.h:242
llvm::StackOffset::getFixed
ScalarTy getFixed() const
Definition: TypeSize.h:149
llvm::AArch64RegisterInfo::getFrameRegister
Register getFrameRegister(const MachineFunction &MF) const override
Definition: AArch64RegisterInfo.cpp:596
llvm::rewriteAArch64FrameIndex
bool rewriteAArch64FrameIndex(MachineInstr &MI, unsigned FrameRegIdx, unsigned FrameReg, StackOffset &Offset, const AArch64InstrInfo *TII)
rewriteAArch64FrameIndex - Rewrite MI to access 'Offset' bytes from the FP.
Definition: AArch64InstrInfo.cpp:4742
llvm::Optional< std::string >
llvm::AArch64Subtarget::isXRegCustomCalleeSaved
bool isXRegCustomCalleeSaved(size_t i) const
Definition: AArch64Subtarget.h:219
llvm::AArch64FrameLowering::hasFP
bool hasFP(const MachineFunction &MF) const override
hasFP - Return true if the specified function should have a dedicated frame pointer register.
Definition: AArch64FrameLowering.cpp:427
llvm::AArch64Subtarget::isWindowsArm64EC
bool isWindowsArm64EC() const
Definition: AArch64Subtarget.h:267
llvm::AArch64Subtarget::getTargetLowering
const AArch64TargetLowering * getTargetLowering() const override
Definition: AArch64Subtarget.h:178
F
#define F(x, y, z)
Definition: MD5.cpp:55
MachineRegisterInfo.h
llvm::AArch64RegisterInfo::getRegPressureLimit
unsigned getRegPressureLimit(const TargetRegisterClass *RC, MachineFunction &MF) const override
Definition: AArch64RegisterInfo.cpp:913
llvm::RegScavenger::isScavengingFrameIndex
bool isScavengingFrameIndex(int FI) const
Query whether a frame index is a scavenging frame index.
Definition: RegisterScavenging.h:148
llvm::MCRegisterInfo::regsOverlap
bool regsOverlap(MCRegister RegA, MCRegister RegB) const
Returns true if the two registers are equal or alias each other.
Definition: MCRegisterInfo.cpp:126
llvm::AArch64RegisterInfo::cannotEliminateFrame
bool cannotEliminateFrame(const MachineFunction &MF) const
Definition: AArch64RegisterInfo.cpp:635
llvm::AArch64RegisterInfo::getCrossCopyRegClass
const TargetRegisterClass * getCrossCopyRegClass(const TargetRegisterClass *RC) const override
Definition: AArch64RegisterInfo.cpp:488
llvm::AArch64FrameLowering
Definition: AArch64FrameLowering.h:21
llvm::BitmaskEnumDetail::Mask
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
Definition: BitmaskEnum.h:80
llvm::MachineFunction::getRegInfo
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Definition: MachineFunction.h:667
llvm::Triple::isOSBinFormatELF
bool isOSBinFormatELF() const
Tests whether the OS uses the ELF binary format.
Definition: Triple.h:673
llvm::AArch64RegisterInfo::getSubClassWithSubReg
const TargetRegisterClass * getSubClassWithSubReg(const TargetRegisterClass *RC, unsigned Idx) const override
Definition: AArch64RegisterInfo.cpp:195
AArch64InstrInfo.h
llvm::AArch64InstrInfo
Definition: AArch64InstrInfo.h:36
llvm::Triple::isOSDarwin
bool isOSDarwin() const
Is this a "Darwin" OS (macOS, iOS, tvOS, watchOS, or DriverKit).
Definition: Triple.h:517
llvm::MachineFunction::getInfo
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
Definition: MachineFunction.h:755
llvm::MachineOperand::getRegMaskSize
static unsigned getRegMaskSize(unsigned NumRegs)
Returns number of elements needed for a regmask array.
Definition: MachineOperand.h:645
llvm::AArch64Subtarget::isTargetILP32
bool isTargetILP32() const
Definition: AArch64Subtarget.h:273
llvm::CallingConv::Swift
@ Swift
Calling convention for Swift.
Definition: CallingConv.h:69
llvm::AArch64TargetLowering::supportSwiftError
bool supportSwiftError() const override
Return true if the target supports swifterror attribute.
Definition: AArch64ISelLowering.h:838
llvm::TargetRegisterClass
Definition: TargetRegisterInfo.h:46
TII
const HexagonInstrInfo * TII
Definition: HexagonCopyToCombine.cpp:125
llvm::MachineOperand::ChangeToImmediate
void ChangeToImmediate(int64_t ImmVal, unsigned TargetFlags=0)
ChangeToImmediate - Replace this operand with a new immediate operand of the specified value.
Definition: MachineOperand.cpp:157
llvm::MCInstrDesc
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:197
llvm::MachineOperand
MachineOperand class - Representation of each machine instruction operand.
Definition: MachineOperand.h:48
llvm::CallingConv::C
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
llvm::MachineRegisterInfo::setCalleeSavedRegs
void setCalleeSavedRegs(ArrayRef< MCPhysReg > CSRs)
Sets the updated Callee Saved Registers list.
Definition: MachineRegisterInfo.cpp:630
llvm::AArch64_AM::getShifterImm
static unsigned getShifterImm(AArch64_AM::ShiftExtendType ST, unsigned Imm)
getShifterImm - Encode the shift type and amount: imm: 6-bit shift amount shifter: 000 ==> lsl 001 ==...
Definition: AArch64AddressingModes.h:99
llvm::report_fatal_error
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:145
llvm::CallingConv::CXX_FAST_TLS
@ CXX_FAST_TLS
Used for access functions.
Definition: CallingConv.h:72
BitVector.h
llvm::MachineFrameInfo::getStackSize
uint64_t getStackSize() const
Return the number of bytes that must be allocated to hold all of the fixed size frame objects.
Definition: MachineFrameInfo.h:585
llvm::MachineFrameInfo::getObjectOffset
int64_t getObjectOffset(int ObjectIdx) const
Return the assigned stack offset of the specified object from the incoming stack pointer.
Definition: MachineFrameInfo.h:526
llvm::BitVector
Definition: BitVector.h:75
llvm::SmallVectorImpl::append
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
Definition: SmallVector.h:687
llvm::AArch64RegisterInfo::materializeFrameBaseRegister
Register materializeFrameBaseRegister(MachineBasicBlock *MBB, int FrameIdx, int64_t Offset) const override
Insert defining instruction(s) for BaseReg to be a pointer to FrameIdx at the beginning of the basic ...
Definition: AArch64RegisterInfo.cpp:721
llvm::CallingConv::ID
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition: CallingConv.h:24
llvm::MachineBasicBlock
Definition: MachineBasicBlock.h:94
llvm::AArch64RegisterInfo::isFrameOffsetLegal
bool isFrameOffsetLegal(const MachineInstr *MI, Register BaseReg, int64_t Offset) const override
Definition: AArch64RegisterInfo.cpp:710
llvm::Function::getAttributes
AttributeList getAttributes() const
Return the attribute list for this Function.
Definition: Function.h:314
AArch64AddressingModes.h
llvm::TargetOptions::DisableFramePointerElim
bool DisableFramePointerElim(const MachineFunction &MF) const
DisableFramePointerElim - This returns true if frame pointer elimination optimization should be disab...
Definition: TargetOptionsImpl.cpp:23
llvm::CallingConv::Win64
@ Win64
The C convention as implemented on Windows/x86-64 and AArch64.
Definition: CallingConv.h:156
llvm::AArch64FrameOffsetIsLegal
@ AArch64FrameOffsetIsLegal
Offset is legal.
Definition: AArch64InstrInfo.h:435
llvm::AArch64FrameLowering::resolveFrameIndexReference
StackOffset resolveFrameIndexReference(const MachineFunction &MF, int FI, Register &FrameReg, bool PreferFP, bool ForSimm) const
Definition: AArch64FrameLowering.cpp:2290
llvm::MachineFunction::getSubtarget
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
Definition: MachineFunction.h:657
llvm::AArch64RegisterInfo::AArch64RegisterInfo
AArch64RegisterInfo(const Triple &TT)
Definition: AArch64RegisterInfo.cpp:42
llvm::AArch64FunctionInfo::isSplitCSR
bool isSplitCSR() const
Definition: AArch64MachineFunctionInfo.h:247
llvm::MachineInstrBuilder::addFrameIndex
const MachineInstrBuilder & addFrameIndex(int Idx) const
Definition: MachineInstrBuilder.h:152
llvm::AArch64Subtarget::isXRegisterReservedForRA
bool isXRegisterReservedForRA(size_t i) const
Definition: AArch64Subtarget.h:212
llvm::isAArch64FrameOffsetLegal
int isAArch64FrameOffsetLegal(const MachineInstr &MI, StackOffset &Offset, bool *OutUseUnscaledOp=nullptr, unsigned *OutUnscaledOp=nullptr, int64_t *EmittableOffset=nullptr)
Check if the Offset is a valid frame offset for MI.
Definition: AArch64InstrInfo.cpp:4641
llvm::Function::hasFnAttribute
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition: Function.cpp:629
llvm::MachineFrameInfo::getLocalFrameSize
int64_t getLocalFrameSize() const
Get the size of the local object blob.
Definition: MachineFrameInfo.h:438
llvm::MachineInstr
Representation of each machine instruction.
Definition: MachineInstr.h:66
AArch64FrameLowering.h
llvm::Function::getCallingConv
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition: Function.h:238
llvm::AArch64FunctionInfo::getTaggedBasePointerOffset
unsigned getTaggedBasePointerOffset() const
Definition: AArch64MachineFunctionInfo.h:415
llvm::AArch64RegisterInfo::isStrictlyReservedReg
bool isStrictlyReservedReg(const MachineFunction &MF, MCRegister Reg) const
Definition: AArch64RegisterInfo.cpp:451
llvm::AArch64FunctionInfo
AArch64FunctionInfo - This class is derived from MachineFunctionInfo and contains private AArch64-spe...
Definition: AArch64MachineFunctionInfo.h:38
llvm::AArch64RegisterInfo::needsFrameBaseReg
bool needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const override
needsFrameBaseReg - Returns true if the instruction's frame index reference would be better served by...
Definition: AArch64RegisterInfo.cpp:646
I
#define I(x, y, z)
Definition: MD5.cpp:58
llvm::RegScavenger
Definition: RegisterScavenging.h:34
llvm::CallingConv::PreserveMost
@ PreserveMost
Used for runtime calls that preserves most registers.
Definition: CallingConv.h:63
llvm::is_contained
bool is_contained(R &&Range, const E &Element)
Wrapper function around std::find to detect if an element exists in a container.
Definition: STLExtras.h:1843
llvm::TargetMachine::Options
TargetOptions Options
Definition: TargetMachine.h:117
assert
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
llvm::MachineFunction::getFrameInfo
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
Definition: MachineFunction.h:673
memcpy
<%struct.s * > cast struct s *S to sbyte *< sbyte * > sbyte uint cast struct s *agg result to sbyte *< sbyte * > sbyte uint cast struct s *memtmp to sbyte *< sbyte * > sbyte uint ret void llc ends up issuing two memcpy or custom lower memcpy(of small size) to be ldmia/stmia. I think option 2 is better but the current register allocator cannot allocate a chunk of registers at a time. A feasible temporary solution is to use specific physical registers at the lowering time for small(<
llvm::MachineBasicBlock::getParent
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
Definition: MachineBasicBlock.h:261
llvm::ISD::STACKMAP
@ STACKMAP
Definition: ISDOpcodes.h:1293
llvm::AArch64Subtarget::isTargetLinux
bool isTargetLinux() const
Definition: AArch64Subtarget.h:263
llvm::AArch64Subtarget::isCallingConvWin64
bool isCallingConvWin64(CallingConv::ID CC) const
Definition: AArch64Subtarget.h:325
llvm::AArch64RegisterInfo::useFPForScavengingIndex
bool useFPForScavengingIndex(const MachineFunction &MF) const override
Definition: AArch64RegisterInfo.cpp:612
llvm::emitFrameOffset
void emitFrameOffset(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, unsigned DestReg, unsigned SrcReg, StackOffset Offset, const TargetInstrInfo *TII, MachineInstr::MIFlag=MachineInstr::NoFlags, bool SetNZCV=false, bool NeedsWinCFI=false, bool *HasWinCFI=nullptr, bool EmitCFAOffset=false, StackOffset InitialOffset={}, unsigned FrameReg=AArch64::SP)
emitFrameOffset - Emit instructions as needed to set DestReg to SrcReg plus Offset.
Definition: AArch64InstrInfo.cpp:4398
llvm::AArch64FrameLowering::getNonLocalFrameIndexReference
StackOffset getNonLocalFrameIndexReference(const MachineFunction &MF, int FI) const override
getNonLocalFrameIndexReference - This method returns the offset used to reference a frame index locat...
Definition: AArch64FrameLowering.cpp:2254
llvm::MachineInstrBuilder::addReg
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
Definition: MachineInstrBuilder.h:97
llvm::AArch64RegisterInfo::getCalleeSavedRegs
const MCPhysReg * getCalleeSavedRegs(const MachineFunction *MF) const override
Code Generation virtual methods...
Definition: AArch64RegisterInfo.cpp:71
llvm::CallingConv::SwiftTail
@ SwiftTail
This follows the Swift calling convention in how arguments are passed but guarantees tail calls will ...
Definition: CallingConv.h:87
llvm::MachineFunction
Definition: MachineFunction.h:257
llvm::AArch64RegisterInfo::getReservedRegs
BitVector getReservedRegs(const MachineFunction &MF) const override
Definition: AArch64RegisterInfo.cpp:434
Triple.h
llvm::AArch64RegisterInfo::requiresFrameIndexScavenging
bool requiresFrameIndexScavenging(const MachineFunction &MF) const override
Definition: AArch64RegisterInfo.cpp:629
TargetOptions.h
llvm::AArch64RegisterInfo::isAsmClobberable
bool isAsmClobberable(const MachineFunction &MF, MCRegister PhysReg) const override
Definition: AArch64RegisterInfo.cpp:469
llvm::ArrayRef
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: APInt.h:32
llvm::AArch64Subtarget::isXRegisterReserved
bool isXRegisterReserved(size_t i) const
Definition: AArch64Subtarget.h:211
llvm::AArch64RegisterInfo::getLocalAddressRegister
unsigned getLocalAddressRegister(const MachineFunction &MF) const
Definition: AArch64RegisterInfo.cpp:958
llvm::any_of
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1716
AArch64InstPrinter.h
llvm::AArch64FunctionInfo::hasCalculatedStackSizeSVE
bool hasCalculatedStackSizeSVE() const
Definition: AArch64MachineFunctionInfo.h:226
Dwarf.h
uint32_t
llvm::StackOffset
StackOffset is a class to represent an offset with 2 dimensions, named fixed and scalable,...
Definition: TypeSize.h:134
DL
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Definition: AArch64SLSHardening.cpp:76
llvm::AArch64RegisterInfo::getCustomEHPadPreservedMask
const uint32_t * getCustomEHPadPreservedMask(const MachineFunction &MF) const override
Definition: AArch64RegisterInfo.cpp:288
CC
auto CC
Definition: RISCVRedundantCopyElimination.cpp:79
llvm::AArch64RegisterInfo::getThisReturnPreservedMask
const uint32_t * getThisReturnPreservedMask(const MachineFunction &MF, CallingConv::ID) const
getThisReturnPreservedMask - Returns a call preserved mask specific to the case that 'returned' is on...
Definition: AArch64RegisterInfo.cpp:338
llvm::AArch64RegisterInfo::emitReservedArgRegCallError
void emitReservedArgRegCallError(const MachineFunction &MF) const
Definition: AArch64RegisterInfo.cpp:462
llvm::AArch64RegisterInfo::getDarwinCallPreservedMask
const uint32_t * getDarwinCallPreservedMask(const MachineFunction &MF, CallingConv::ID) const
Definition: AArch64RegisterInfo.cpp:208
MRI
unsigned const MachineRegisterInfo * MRI
Definition: AArch64AdvSIMDScalarPass.cpp:105
llvm::Register
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
llvm::MachineFrameInfo::isFrameAddressTaken
bool isFrameAddressTaken() const
This method may be called any time after instruction selection is complete to determine if there is a...
Definition: MachineFrameInfo.h:371
llvm::AArch64_MC::initLLVMToCVRegMapping
void initLLVMToCVRegMapping(MCRegisterInfo *MRI)
Definition: AArch64MCTargetDesc.cpp:66
llvm::ISD::FrameIndex
@ FrameIndex
Definition: ISDOpcodes.h:80
MBB
MachineBasicBlock & MBB
Definition: AArch64SLSHardening.cpp:74
llvm::CallingConv::CFGuard_Check
@ CFGuard_Check
Special calling convention on Windows for calling the Control Guard Check ICall funtion.
Definition: CallingConv.h:82
llvm::DIExpression::appendOffset
static void appendOffset(SmallVectorImpl< uint64_t > &Ops, int64_t Offset)
Append Ops with operations to apply the Offset.
Definition: DebugInfoMetadata.cpp:1421
llvm::AArch64Subtarget::getNumXRegisterReserved
unsigned getNumXRegisterReserved() const
Definition: AArch64Subtarget.h:213
llvm::AArch64RegisterInfo::getOffsetOpcodes
void getOffsetOpcodes(const StackOffset &Offset, SmallVectorImpl< uint64_t > &Ops) const override
Definition: AArch64RegisterInfo.cpp:793
llvm::CallingConv::GHC
@ GHC
Used by the Glasgow Haskell Compiler (GHC).
Definition: CallingConv.h:50
llvm::MachineFunction::getFunction
Function & getFunction()
Return the LLVM function that this machine code represents.
Definition: MachineFunction.h:623
uint16_t
llvm::AArch64FunctionInfo::getStackSizeSVE
uint64_t getStackSizeSVE() const
Definition: AArch64MachineFunctionInfo.h:233
llvm::MachineFunction::getTarget
const LLVMTargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
Definition: MachineFunction.h:653
llvm::AMDGPU::SendMsg::Op
Op
Definition: SIDefines.h:348
MachineFrameInfo.h
DiagnosticInfo.h
Function.h
llvm::LiveIntervals
Definition: LiveIntervals.h:53
llvm::AArch64RegisterInfo::getWindowsStackProbePreservedMask
const uint32_t * getWindowsStackProbePreservedMask() const
Stack probing calls preserve different CSRs to the normal CC.
Definition: AArch64RegisterInfo.cpp:353
llvm::MachineFunction::hasEHFunclets
bool hasEHFunclets() const
Definition: MachineFunction.h:1100
llvm::MCSubRegIterator
MCSubRegIterator enumerates all sub-registers of Reg.
Definition: MCRegisterInfo.h:597
llvm::MachineFrameInfo
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
Definition: MachineFrameInfo.h:106
AArch64Subtarget.h
llvm::AArch64RegisterInfo::resolveFrameIndex
void resolveFrameIndex(MachineInstr &MI, Register BaseReg, int64_t Offset) const override
Definition: AArch64RegisterInfo.cpp:745
llvm::BuildMI
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
Definition: MachineInstrBuilder.h:357
llvm::MachineBasicBlock::begin
iterator begin()
Definition: MachineBasicBlock.h:305
MachineInstrBuilder.h
llvm::CallingConv::Fast
@ Fast
Attempts to make calls as fast as possible (e.g.
Definition: CallingConv.h:41
llvm::PseudoProbeAttributes::Reserved
@ Reserved
llvm::CallingConv::Tail
@ Tail
Attemps to make calls as fast as possible while guaranteeing that tail call optimization can always b...
Definition: CallingConv.h:76
llvm::AArch64RegisterInfo::getPointerRegClass
const TargetRegisterClass * getPointerRegClass(const MachineFunction &MF, unsigned Kind=0) const override
Definition: AArch64RegisterInfo.cpp:482
llvm::MachineRegisterInfo::constrainRegClass
const TargetRegisterClass * constrainRegClass(Register Reg, const TargetRegisterClass *RC, unsigned MinNumRegs=0)
constrainRegClass - Constrain the register class of the specified virtual register to be a common sub...
Definition: MachineRegisterInfo.cpp:82
llvm::AArch64RegisterInfo::UpdateCustomCalleeSavedRegs
void UpdateCustomCalleeSavedRegs(MachineFunction &MF) const
Definition: AArch64RegisterInfo.cpp:177
llvm::MipsISD::Ins
@ Ins
Definition: MipsISelLowering.h:160
llvm::SmallVectorImpl< uint64_t >
llvm::AArch64RegisterInfo::getTLSCallPreservedMask
const uint32_t * getTLSCallPreservedMask() const
Definition: AArch64RegisterInfo.cpp:296
llvm::AArch64RegisterInfo::regNeedsCFI
bool regNeedsCFI(unsigned Reg, unsigned &RegToUseForCFI) const
Return whether the register needs a CFI entry.
Definition: AArch64RegisterInfo.cpp:52
llvm::DebugLoc
A debug info location.
Definition: DebugLoc.h:33
llvm::MachineFrameInfo::adjustsStack
bool adjustsStack() const
Return true if this function adjusts the stack – e.g., when calling another function.
Definition: MachineFrameInfo.h:609
llvm::AArch64II::MO_TAGGED
@ MO_TAGGED
MO_TAGGED - With MO_PAGE, indicates that the page includes a memory tag in bits 56-63.
Definition: AArch64BaseInfo.h:802
RegisterScavenging.h
llvm::AArch64RegisterInfo::requiresVirtualBaseRegisters
bool requiresVirtualBaseRegisters(const MachineFunction &MF) const override
Definition: AArch64RegisterInfo.cpp:606
llvm::AArch64Subtarget
Definition: AArch64Subtarget.h:38
raw_ostream.h
llvm::AArch64RegisterInfo::hasBasePointer
bool hasBasePointer(const MachineFunction &MF) const
Definition: AArch64RegisterInfo.cpp:496
llvm::CallingConv::AArch64_SVE_VectorCall
@ AArch64_SVE_VectorCall
Used between AArch64 SVE functions.
Definition: CallingConv.h:225
llvm::MachineInstrBundleIterator< MachineInstr >
llvm::CallingConv::WebKit_JS
@ WebKit_JS
Used for stack based JavaScript calls.
Definition: CallingConv.h:56
llvm::AArch64RegisterInfo::requiresRegisterScavenging
bool requiresRegisterScavenging(const MachineFunction &MF) const override
Definition: AArch64RegisterInfo.cpp:601
llvm::AArch64RegisterInfo::eliminateFrameIndex
bool eliminateFrameIndex(MachineBasicBlock::iterator II, int SPAdj, unsigned FIOperandNum, RegScavenger *RS=nullptr) const override
Definition: AArch64RegisterInfo.cpp:820
llvm::AArch64RegisterInfo::getDarwinCalleeSavedRegs
const MCPhysReg * getDarwinCalleeSavedRegs(const MachineFunction *MF) const
Definition: AArch64RegisterInfo.cpp:125
llvm::CallingConv::AArch64_VectorCall
@ AArch64_VectorCall
Used between AArch64 Advanced SIMD functions.
Definition: CallingConv.h:222
llvm::MachineBasicBlock::end
iterator end()
Definition: MachineBasicBlock.h:307
SubReg
unsigned SubReg
Definition: AArch64AdvSIMDScalarPass.cpp:104
llvm::AArch64RegisterInfo::isReservedReg
bool isReservedReg(const MachineFunction &MF, MCRegister Reg) const
Definition: AArch64RegisterInfo.cpp:446
llvm::MCRegister
Wrapper class representing physical registers. Should be passed by value.
Definition: MCRegister.h:24
llvm::AArch64RegisterInfo::getStrictlyReservedRegs
BitVector getStrictlyReservedRegs(const MachineFunction &MF) const
Definition: AArch64RegisterInfo.cpp:385