LLVM  15.0.0git
AArch64RegisterInfo.cpp
Go to the documentation of this file.
1 //===- AArch64RegisterInfo.cpp - AArch64 Register Information -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains the AArch64 implementation of the TargetRegisterInfo
10 // class.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "AArch64RegisterInfo.h"
15 #include "AArch64FrameLowering.h"
16 #include "AArch64InstrInfo.h"
18 #include "AArch64Subtarget.h"
20 #include "llvm/ADT/BitVector.h"
21 #include "llvm/ADT/Triple.h"
29 #include "llvm/IR/DiagnosticInfo.h"
30 #include "llvm/IR/Function.h"
33 
34 using namespace llvm;
35 
36 #define GET_CC_REGISTER_LISTS
37 #include "AArch64GenCallingConv.inc"
38 #define GET_REGINFO_TARGET_DESC
39 #include "AArch64GenRegisterInfo.inc"
40 
42  : AArch64GenRegisterInfo(AArch64::LR), TT(TT) {
44 }
45 
46 /// Return whether the register needs a CFI entry. Not all unwinders may know
47 /// about SVE registers, so we assume the lowest common denominator, i.e. the
48 /// callee-saves required by the base ABI. For the SVE registers z8-z15 only the
49 /// lower 64-bits (d8-d15) need to be saved. The lower 64-bits subreg is
50 /// returned in \p RegToUseForCFI.
52  unsigned &RegToUseForCFI) const {
53  if (AArch64::PPRRegClass.contains(Reg))
54  return false;
55 
56  if (AArch64::ZPRRegClass.contains(Reg)) {
57  RegToUseForCFI = getSubReg(Reg, AArch64::dsub);
58  for (int I = 0; CSR_AArch64_AAPCS_SaveList[I]; ++I) {
59  if (CSR_AArch64_AAPCS_SaveList[I] == RegToUseForCFI)
60  return true;
61  }
62  return false;
63  }
64 
65  RegToUseForCFI = Reg;
66  return true;
67 }
68 
70  const Function &F = MF->getFunction();
71  return isa<ScalableVectorType>(F.getReturnType()) ||
72  any_of(F.args(), [](const Argument &Arg) {
73  return isa<ScalableVectorType>(Arg.getType());
74  });
75 }
76 
77 const MCPhysReg *
79  assert(MF && "Invalid MachineFunction pointer.");
80 
82  // GHC set of callee saved regs is empty as all those regs are
83  // used for passing STG regs around
84  return CSR_AArch64_NoRegs_SaveList;
86  return CSR_AArch64_AllRegs_SaveList;
87 
88  // Darwin has its own CSR_AArch64_AAPCS_SaveList, which means most CSR save
89  // lists depending on that will need to have their Darwin variant as well.
91  return getDarwinCalleeSavedRegs(MF);
92 
94  return CSR_Win_AArch64_CFGuard_Check_SaveList;
96  return CSR_Win_AArch64_AAPCS_SaveList;
98  return CSR_AArch64_AAVPCS_SaveList;
100  return CSR_AArch64_SVE_AAPCS_SaveList;
102  ->supportSwiftError() &&
104  Attribute::SwiftError))
105  return CSR_AArch64_AAPCS_SwiftError_SaveList;
107  return CSR_AArch64_AAPCS_SwiftTail_SaveList;
109  return CSR_AArch64_RT_MostRegs_SaveList;
111  // This is for OSes other than Windows; Windows is a separate case further
112  // above.
113  return CSR_AArch64_AAPCS_X18_SaveList;
114  if (hasSVEArgsOrReturn(MF))
115  return CSR_AArch64_SVE_AAPCS_SaveList;
116  return CSR_AArch64_AAPCS_SaveList;
117 }
118 
119 const MCPhysReg *
121  assert(MF && "Invalid MachineFunction pointer.");
123  "Invalid subtarget for getDarwinCalleeSavedRegs");
124 
127  "Calling convention CFGuard_Check is unsupported on Darwin.");
129  return CSR_Darwin_AArch64_AAVPCS_SaveList;
132  "Calling convention SVE_VectorCall is unsupported on Darwin.");
134  return MF->getInfo<AArch64FunctionInfo>()->isSplitCSR()
135  ? CSR_Darwin_AArch64_CXX_TLS_PE_SaveList
136  : CSR_Darwin_AArch64_CXX_TLS_SaveList;
138  ->supportSwiftError() &&
140  Attribute::SwiftError))
141  return CSR_Darwin_AArch64_AAPCS_SwiftError_SaveList;
143  return CSR_Darwin_AArch64_AAPCS_SwiftTail_SaveList;
145  return CSR_Darwin_AArch64_RT_MostRegs_SaveList;
146  return CSR_Darwin_AArch64_AAPCS_SaveList;
147 }
148 
150  const MachineFunction *MF) const {
151  assert(MF && "Invalid MachineFunction pointer.");
154  return CSR_Darwin_AArch64_CXX_TLS_ViaCopy_SaveList;
155  return nullptr;
156 }
157 
159  MachineFunction &MF) const {
160  const MCPhysReg *CSRs = getCalleeSavedRegs(&MF);
161  SmallVector<MCPhysReg, 32> UpdatedCSRs;
162  for (const MCPhysReg *I = CSRs; *I; ++I)
163  UpdatedCSRs.push_back(*I);
164 
165  for (size_t i = 0; i < AArch64::GPR64commonRegClass.getNumRegs(); ++i) {
167  UpdatedCSRs.push_back(AArch64::GPR64commonRegClass.getRegister(i));
168  }
169  }
170  // Register lists are zero-terminated.
171  UpdatedCSRs.push_back(0);
172  MF.getRegInfo().setCalleeSavedRegs(UpdatedCSRs);
173 }
174 
175 const TargetRegisterClass *
177  unsigned Idx) const {
178  // edge case for GPR/FPR register classes
179  if (RC == &AArch64::GPR32allRegClass && Idx == AArch64::hsub)
180  return &AArch64::FPR32RegClass;
181  else if (RC == &AArch64::GPR64allRegClass && Idx == AArch64::hsub)
182  return &AArch64::FPR64RegClass;
183 
184  // Forward to TableGen's default version.
185  return AArch64GenRegisterInfo::getSubClassWithSubReg(RC, Idx);
186 }
187 
188 const uint32_t *
190  CallingConv::ID CC) const {
192  "Invalid subtarget for getDarwinCallPreservedMask");
193 
194  if (CC == CallingConv::CXX_FAST_TLS)
195  return CSR_Darwin_AArch64_CXX_TLS_RegMask;
197  return CSR_Darwin_AArch64_AAVPCS_RegMask;
200  "Calling convention SVE_VectorCall is unsupported on Darwin.");
201  if (CC == CallingConv::CFGuard_Check)
203  "Calling convention CFGuard_Check is unsupported on Darwin.");
206  ->supportSwiftError() &&
207  MF.getFunction().getAttributes().hasAttrSomewhere(Attribute::SwiftError))
208  return CSR_Darwin_AArch64_AAPCS_SwiftError_RegMask;
209  if (CC == CallingConv::SwiftTail)
210  return CSR_Darwin_AArch64_AAPCS_SwiftTail_RegMask;
211  if (CC == CallingConv::PreserveMost)
212  return CSR_Darwin_AArch64_RT_MostRegs_RegMask;
213  return CSR_Darwin_AArch64_AAPCS_RegMask;
214 }
215 
216 const uint32_t *
218  CallingConv::ID CC) const {
219  bool SCS = MF.getFunction().hasFnAttribute(Attribute::ShadowCallStack);
220  if (CC == CallingConv::GHC)
221  // This is academic because all GHC calls are (supposed to be) tail calls
222  return SCS ? CSR_AArch64_NoRegs_SCS_RegMask : CSR_AArch64_NoRegs_RegMask;
223  if (CC == CallingConv::AnyReg)
224  return SCS ? CSR_AArch64_AllRegs_SCS_RegMask : CSR_AArch64_AllRegs_RegMask;
225 
226  // All the following calling conventions are handled differently on Darwin.
228  if (SCS)
229  report_fatal_error("ShadowCallStack attribute not supported on Darwin.");
230  return getDarwinCallPreservedMask(MF, CC);
231  }
232 
234  return SCS ? CSR_AArch64_AAVPCS_SCS_RegMask : CSR_AArch64_AAVPCS_RegMask;
236  return SCS ? CSR_AArch64_SVE_AAPCS_SCS_RegMask
237  : CSR_AArch64_SVE_AAPCS_RegMask;
238  if (CC == CallingConv::CFGuard_Check)
239  return CSR_Win_AArch64_CFGuard_Check_RegMask;
241  ->supportSwiftError() &&
242  MF.getFunction().getAttributes().hasAttrSomewhere(Attribute::SwiftError))
243  return SCS ? CSR_AArch64_AAPCS_SwiftError_SCS_RegMask
244  : CSR_AArch64_AAPCS_SwiftError_RegMask;
245  if (CC == CallingConv::SwiftTail) {
246  if (SCS)
247  report_fatal_error("ShadowCallStack attribute not supported with swifttail");
248  return CSR_AArch64_AAPCS_SwiftTail_RegMask;
249  }
250  if (CC == CallingConv::PreserveMost)
251  return SCS ? CSR_AArch64_RT_MostRegs_SCS_RegMask
252  : CSR_AArch64_RT_MostRegs_RegMask;
253  else
254  return SCS ? CSR_AArch64_AAPCS_SCS_RegMask : CSR_AArch64_AAPCS_RegMask;
255 }
256 
258  const MachineFunction &MF) const {
260  return CSR_AArch64_AAPCS_RegMask;
261 
262  return nullptr;
263 }
264 
266  if (TT.isOSDarwin())
267  return CSR_Darwin_AArch64_TLS_RegMask;
268 
269  assert(TT.isOSBinFormatELF() && "Invalid target");
270  return CSR_AArch64_TLS_ELF_RegMask;
271 }
272 
274  const uint32_t **Mask) const {
275  uint32_t *UpdatedMask = MF.allocateRegMask();
276  unsigned RegMaskSize = MachineOperand::getRegMaskSize(getNumRegs());
277  memcpy(UpdatedMask, *Mask, sizeof(UpdatedMask[0]) * RegMaskSize);
278 
279  for (size_t i = 0; i < AArch64::GPR64commonRegClass.getNumRegs(); ++i) {
281  for (MCSubRegIterator SubReg(AArch64::GPR64commonRegClass.getRegister(i),
282  this, true);
283  SubReg.isValid(); ++SubReg) {
284  // See TargetRegisterInfo::getCallPreservedMask for how to interpret the
285  // register mask.
286  UpdatedMask[*SubReg / 32] |= 1u << (*SubReg % 32);
287  }
288  }
289  }
290  *Mask = UpdatedMask;
291 }
292 
294  return CSR_AArch64_NoRegs_RegMask;
295 }
296 
297 const uint32_t *
299  CallingConv::ID CC) const {
300  // This should return a register mask that is the same as that returned by
301  // getCallPreservedMask but that additionally preserves the register used for
302  // the first i64 argument (which must also be the register used to return a
303  // single i64 return value)
304  //
305  // In case that the calling convention does not use the same register for
306  // both, the function should return NULL (does not currently apply)
307  assert(CC != CallingConv::GHC && "should not be GHC calling convention.");
309  return CSR_Darwin_AArch64_AAPCS_ThisReturn_RegMask;
310  return CSR_AArch64_AAPCS_ThisReturn_RegMask;
311 }
312 
314  return CSR_AArch64_StackProbe_Windows_RegMask;
315 }
316 
317 BitVector
319  const AArch64FrameLowering *TFI = getFrameLowering(MF);
320 
321  // FIXME: avoid re-calculating this every time.
322  BitVector Reserved(getNumRegs());
323  markSuperRegs(Reserved, AArch64::WSP);
324  markSuperRegs(Reserved, AArch64::WZR);
325 
326  if (TFI->hasFP(MF) || TT.isOSDarwin())
327  markSuperRegs(Reserved, AArch64::W29);
328 
329  for (size_t i = 0; i < AArch64::GPR32commonRegClass.getNumRegs(); ++i) {
331  markSuperRegs(Reserved, AArch64::GPR32commonRegClass.getRegister(i));
332  }
333 
334  if (hasBasePointer(MF))
335  markSuperRegs(Reserved, AArch64::W19);
336 
337  // SLH uses register W16/X16 as the taint register.
338  if (MF.getFunction().hasFnAttribute(Attribute::SpeculativeLoadHardening))
339  markSuperRegs(Reserved, AArch64::W16);
340 
341  // SME tiles are not allocatable.
342  if (MF.getSubtarget<AArch64Subtarget>().hasSME()) {
343  for (MCSubRegIterator SubReg(AArch64::ZA, this, /*self=*/true);
344  SubReg.isValid(); ++SubReg)
345  Reserved.set(*SubReg);
346  }
347 
348  assert(checkAllSuperRegsMarked(Reserved));
349  return Reserved;
350 }
351 
353  MCRegister Reg) const {
354  return getReservedRegs(MF)[Reg];
355 }
356 
358  return llvm::any_of(*AArch64::GPR64argRegClass.MC, [this, &MF](MCPhysReg r) {
359  return isReservedReg(MF, r);
360  });
361 }
362 
364  const MachineFunction &MF) const {
365  const Function &F = MF.getFunction();
366  F.getContext().diagnose(DiagnosticInfoUnsupported{F, ("AArch64 doesn't support"
367  " function calls if any of the argument registers is reserved.")});
368 }
369 
371  MCRegister PhysReg) const {
372  return !isReservedReg(MF, PhysReg);
373 }
374 
376  return PhysReg == AArch64::WZR || PhysReg == AArch64::XZR;
377 }
378 
379 const TargetRegisterClass *
381  unsigned Kind) const {
382  return &AArch64::GPR64spRegClass;
383 }
384 
385 const TargetRegisterClass *
387  if (RC == &AArch64::CCRRegClass)
388  return &AArch64::GPR64RegClass; // Only MSR & MRS copy NZCV.
389  return RC;
390 }
391 
392 unsigned AArch64RegisterInfo::getBaseRegister() const { return AArch64::X19; }
393 
395  const MachineFrameInfo &MFI = MF.getFrameInfo();
396 
397  // In the presence of variable sized objects or funclets, if the fixed stack
398  // size is large enough that referencing from the FP won't result in things
399  // being in range relatively often, we can use a base pointer to allow access
400  // from the other direction like the SP normally works.
401  //
402  // Furthermore, if both variable sized objects are present, and the
403  // stack needs to be dynamically re-aligned, the base pointer is the only
404  // reliable way to reference the locals.
405  if (MFI.hasVarSizedObjects() || MF.hasEHFunclets()) {
406  if (hasStackRealignment(MF))
407  return true;
408 
409  if (MF.getSubtarget<AArch64Subtarget>().hasSVE()) {
411  // Frames that have variable sized objects and scalable SVE objects,
412  // should always use a basepointer.
413  if (!AFI->hasCalculatedStackSizeSVE() || AFI->getStackSizeSVE())
414  return true;
415  }
416 
417  // Conservatively estimate whether the negative offset from the frame
418  // pointer will be sufficient to reach. If a function has a smallish
419  // frame, it's less likely to have lots of spills and callee saved
420  // space, so it's all more likely to be within range of the frame pointer.
421  // If it's wrong, we'll materialize the constant and still get to the
422  // object; it's just suboptimal. Negative offsets use the unscaled
423  // load/store instructions, which have a 9-bit signed immediate.
424  return MFI.getLocalFrameSize() >= 256;
425  }
426 
427  return false;
428 }
429 
431  MCRegister Reg) const {
434  bool IsVarArg = STI.isCallingConvWin64(MF.getFunction().getCallingConv());
435 
436  auto HasReg = [](ArrayRef<MCRegister> RegList, MCRegister Reg) {
437  return llvm::any_of(RegList,
438  [Reg](const MCRegister R) { return R == Reg; });
439  };
440 
441  switch (CC) {
442  default:
443  report_fatal_error("Unsupported calling convention.");
445  return HasReg(CC_AArch64_WebKit_JS_ArgRegs, Reg);
446  case CallingConv::GHC:
447  return HasReg(CC_AArch64_GHC_ArgRegs, Reg);
448  case CallingConv::C:
449  case CallingConv::Fast:
452  case CallingConv::Swift:
454  case CallingConv::Tail:
455  if (STI.isTargetWindows() && IsVarArg)
456  return HasReg(CC_AArch64_Win64_VarArg_ArgRegs, Reg);
457  if (!STI.isTargetDarwin()) {
458  switch (CC) {
459  default:
460  return HasReg(CC_AArch64_AAPCS_ArgRegs, Reg);
461  case CallingConv::Swift:
463  return HasReg(CC_AArch64_AAPCS_ArgRegs, Reg) ||
464  HasReg(CC_AArch64_AAPCS_Swift_ArgRegs, Reg);
465  }
466  }
467  if (!IsVarArg) {
468  switch (CC) {
469  default:
470  return HasReg(CC_AArch64_DarwinPCS_ArgRegs, Reg);
471  case CallingConv::Swift:
473  return HasReg(CC_AArch64_DarwinPCS_ArgRegs, Reg) ||
474  HasReg(CC_AArch64_DarwinPCS_Swift_ArgRegs, Reg);
475  }
476  }
477  if (STI.isTargetILP32())
478  return HasReg(CC_AArch64_DarwinPCS_ILP32_VarArg_ArgRegs, Reg);
479  return HasReg(CC_AArch64_DarwinPCS_VarArg_ArgRegs, Reg);
480  case CallingConv::Win64:
481  if (IsVarArg)
482  HasReg(CC_AArch64_Win64_VarArg_ArgRegs, Reg);
483  return HasReg(CC_AArch64_AAPCS_ArgRegs, Reg);
485  return HasReg(CC_AArch64_Win64_CFGuard_Check_ArgRegs, Reg);
488  return HasReg(CC_AArch64_AAPCS_ArgRegs, Reg);
489  }
490 }
491 
492 Register
494  const AArch64FrameLowering *TFI = getFrameLowering(MF);
495  return TFI->hasFP(MF) ? AArch64::FP : AArch64::SP;
496 }
497 
499  const MachineFunction &MF) const {
500  return true;
501 }
502 
504  const MachineFunction &MF) const {
505  return true;
506 }
507 
508 bool
510  // This function indicates whether the emergency spillslot should be placed
511  // close to the beginning of the stackframe (closer to FP) or the end
512  // (closer to SP).
513  //
514  // The beginning works most reliably if we have a frame pointer.
515  // In the presence of any non-constant space between FP and locals,
516  // (e.g. in case of stack realignment or a scalable SVE area), it is
517  // better to use SP or BP.
518  const AArch64FrameLowering &TFI = *getFrameLowering(MF);
520  assert((!MF.getSubtarget<AArch64Subtarget>().hasSVE() ||
521  AFI->hasCalculatedStackSizeSVE()) &&
522  "Expected SVE area to be calculated by this point");
523  return TFI.hasFP(MF) && !hasStackRealignment(MF) && !AFI->getStackSizeSVE();
524 }
525 
527  const MachineFunction &MF) const {
528  return true;
529 }
530 
531 bool
533  const MachineFrameInfo &MFI = MF.getFrameInfo();
535  return true;
536  return MFI.hasVarSizedObjects() || MFI.isFrameAddressTaken();
537 }
538 
539 /// needsFrameBaseReg - Returns true if the instruction's frame index
540 /// reference would be better served by a base register other than FP
541 /// or SP. Used by LocalStackFrameAllocation to determine which frame index
542 /// references it should create new base registers for.
544  int64_t Offset) const {
545  for (unsigned i = 0; !MI->getOperand(i).isFI(); ++i)
546  assert(i < MI->getNumOperands() &&
547  "Instr doesn't have FrameIndex operand!");
548 
549  // It's the load/store FI references that cause issues, as it can be difficult
550  // to materialize the offset if it won't fit in the literal field. Estimate
551  // based on the size of the local frame and some conservative assumptions
552  // about the rest of the stack frame (note, this is pre-regalloc, so
553  // we don't know everything for certain yet) whether this offset is likely
554  // to be out of range of the immediate. Return true if so.
555 
556  // We only generate virtual base registers for loads and stores, so
557  // return false for everything else.
558  if (!MI->mayLoad() && !MI->mayStore())
559  return false;
560 
561  // Without a virtual base register, if the function has variable sized
562  // objects, all fixed-size local references will be via the frame pointer,
563  // Approximate the offset and see if it's legal for the instruction.
564  // Note that the incoming offset is based on the SP value at function entry,
565  // so it'll be negative.
566  MachineFunction &MF = *MI->getParent()->getParent();
567  const AArch64FrameLowering *TFI = getFrameLowering(MF);
568  MachineFrameInfo &MFI = MF.getFrameInfo();
569 
570  // Estimate an offset from the frame pointer.
571  // Conservatively assume all GPR callee-saved registers get pushed.
572  // FP, LR, X19-X28, D8-D15. 64-bits each.
573  int64_t FPOffset = Offset - 16 * 20;
574  // Estimate an offset from the stack pointer.
575  // The incoming offset is relating to the SP at the start of the function,
576  // but when we access the local it'll be relative to the SP after local
577  // allocation, so adjust our SP-relative offset by that allocation size.
578  Offset += MFI.getLocalFrameSize();
579  // Assume that we'll have at least some spill slots allocated.
580  // FIXME: This is a total SWAG number. We should run some statistics
581  // and pick a real one.
582  Offset += 128; // 128 bytes of spill slots
583 
584  // If there is a frame pointer, try using it.
585  // The FP is only available if there is no dynamic realignment. We
586  // don't know for sure yet whether we'll need that, so we guess based
587  // on whether there are any local variables that would trigger it.
588  if (TFI->hasFP(MF) && isFrameOffsetLegal(MI, AArch64::FP, FPOffset))
589  return false;
590 
591  // If we can reference via the stack pointer or base pointer, try that.
592  // FIXME: This (and the code that resolves the references) can be improved
593  // to only disallow SP relative references in the live range of
594  // the VLA(s). In practice, it's unclear how much difference that
595  // would make, but it may be worth doing.
596  if (isFrameOffsetLegal(MI, AArch64::SP, Offset))
597  return false;
598 
599  // If even offset 0 is illegal, we don't want a virtual base register.
600  if (!isFrameOffsetLegal(MI, AArch64::SP, 0))
601  return false;
602 
603  // The offset likely isn't legal; we want to allocate a virtual base register.
604  return true;
605 }
606 
608  Register BaseReg,
609  int64_t Offset) const {
610  assert(MI && "Unable to get the legal offset for nil instruction.");
611  StackOffset SaveOffset = StackOffset::getFixed(Offset);
613 }
614 
615 /// Insert defining instruction(s) for BaseReg to be a pointer to FrameIdx
616 /// at the beginning of the basic block.
617 Register
619  int FrameIdx,
620  int64_t Offset) const {
622  DebugLoc DL; // Defaults to "unknown"
623  if (Ins != MBB->end())
624  DL = Ins->getDebugLoc();
625  const MachineFunction &MF = *MBB->getParent();
626  const AArch64InstrInfo *TII =
627  MF.getSubtarget<AArch64Subtarget>().getInstrInfo();
628  const MCInstrDesc &MCID = TII->get(AArch64::ADDXri);
630  Register BaseReg = MRI.createVirtualRegister(&AArch64::GPR64spRegClass);
631  MRI.constrainRegClass(BaseReg, TII->getRegClass(MCID, 0, this, MF));
632  unsigned Shifter = AArch64_AM::getShifterImm(AArch64_AM::LSL, 0);
633 
634  BuildMI(*MBB, Ins, DL, MCID, BaseReg)
635  .addFrameIndex(FrameIdx)
636  .addImm(Offset)
637  .addImm(Shifter);
638 
639  return BaseReg;
640 }
641 
643  int64_t Offset) const {
644  // ARM doesn't need the general 64-bit offsets
645  StackOffset Off = StackOffset::getFixed(Offset);
646 
647  unsigned i = 0;
648  while (!MI.getOperand(i).isFI()) {
649  ++i;
650  assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!");
651  }
652 
653  const MachineFunction *MF = MI.getParent()->getParent();
654  const AArch64InstrInfo *TII =
655  MF->getSubtarget<AArch64Subtarget>().getInstrInfo();
656  bool Done = rewriteAArch64FrameIndex(MI, i, BaseReg, Off, TII);
657  assert(Done && "Unable to resolve frame index!");
658  (void)Done;
659 }
660 
661 // Create a scratch register for the frame index elimination in an instruction.
662 // This function has special handling of stack tagging loop pseudos, in which
663 // case it can also change the instruction opcode.
664 static Register
666  const AArch64InstrInfo *TII) {
667  // ST*Gloop have a reserved scratch register in operand 1. Use it, and also
668  // replace the instruction with the writeback variant because it will now
669  // satisfy the operand constraints for it.
670  Register ScratchReg;
671  if (MI.getOpcode() == AArch64::STGloop ||
672  MI.getOpcode() == AArch64::STZGloop) {
673  assert(FIOperandNum == 3 &&
674  "Wrong frame index operand for STGloop/STZGloop");
675  unsigned Op = MI.getOpcode() == AArch64::STGloop ? AArch64::STGloop_wback
676  : AArch64::STZGloop_wback;
677  ScratchReg = MI.getOperand(1).getReg();
678  MI.getOperand(3).ChangeToRegister(ScratchReg, false, false, true);
679  MI.setDesc(TII->get(Op));
680  MI.tieOperands(1, 3);
681  } else {
682  ScratchReg =
683  MI.getMF()->getRegInfo().createVirtualRegister(&AArch64::GPR64RegClass);
684  MI.getOperand(FIOperandNum)
685  .ChangeToRegister(ScratchReg, false, false, true);
686  }
687  return ScratchReg;
688 }
689 
691  const StackOffset &Offset, SmallVectorImpl<uint64_t> &Ops) const {
692  // The smallest scalable element supported by scaled SVE addressing
693  // modes are predicates, which are 2 scalable bytes in size. So the scalable
694  // byte offset must always be a multiple of 2.
695  assert(Offset.getScalable() % 2 == 0 && "Invalid frame offset");
696 
697  // Add fixed-sized offset using existing DIExpression interface.
698  DIExpression::appendOffset(Ops, Offset.getFixed());
699 
700  unsigned VG = getDwarfRegNum(AArch64::VG, true);
701  int64_t VGSized = Offset.getScalable() / 2;
702  if (VGSized > 0) {
703  Ops.push_back(dwarf::DW_OP_constu);
704  Ops.push_back(VGSized);
705  Ops.append({dwarf::DW_OP_bregx, VG, 0ULL});
706  Ops.push_back(dwarf::DW_OP_mul);
707  Ops.push_back(dwarf::DW_OP_plus);
708  } else if (VGSized < 0) {
709  Ops.push_back(dwarf::DW_OP_constu);
710  Ops.push_back(-VGSized);
711  Ops.append({dwarf::DW_OP_bregx, VG, 0ULL});
712  Ops.push_back(dwarf::DW_OP_mul);
713  Ops.push_back(dwarf::DW_OP_minus);
714  }
715 }
716 
718  int SPAdj, unsigned FIOperandNum,
719  RegScavenger *RS) const {
720  assert(SPAdj == 0 && "Unexpected");
721 
722  MachineInstr &MI = *II;
723  MachineBasicBlock &MBB = *MI.getParent();
724  MachineFunction &MF = *MBB.getParent();
725  const MachineFrameInfo &MFI = MF.getFrameInfo();
726  const AArch64InstrInfo *TII =
727  MF.getSubtarget<AArch64Subtarget>().getInstrInfo();
728  const AArch64FrameLowering *TFI = getFrameLowering(MF);
729  int FrameIndex = MI.getOperand(FIOperandNum).getIndex();
730  bool Tagged =
731  MI.getOperand(FIOperandNum).getTargetFlags() & AArch64II::MO_TAGGED;
732  Register FrameReg;
733 
734  // Special handling of dbg_value, stackmap patchpoint statepoint instructions.
735  if (MI.getOpcode() == TargetOpcode::STACKMAP ||
736  MI.getOpcode() == TargetOpcode::PATCHPOINT ||
737  MI.getOpcode() == TargetOpcode::STATEPOINT) {
738  StackOffset Offset =
739  TFI->resolveFrameIndexReference(MF, FrameIndex, FrameReg,
740  /*PreferFP=*/true,
741  /*ForSimm=*/false);
742  Offset += StackOffset::getFixed(MI.getOperand(FIOperandNum + 1).getImm());
743  MI.getOperand(FIOperandNum).ChangeToRegister(FrameReg, false /*isDef*/);
744  MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset.getFixed());
745  return;
746  }
747 
748  if (MI.getOpcode() == TargetOpcode::LOCAL_ESCAPE) {
749  MachineOperand &FI = MI.getOperand(FIOperandNum);
751  assert(!Offset.getScalable() &&
752  "Frame offsets with a scalable component are not supported");
753  FI.ChangeToImmediate(Offset.getFixed());
754  return;
755  }
756 
757  StackOffset Offset;
758  if (MI.getOpcode() == AArch64::TAGPstack) {
759  // TAGPstack must use the virtual frame register in its 3rd operand.
761  FrameReg = MI.getOperand(3).getReg();
764  } else if (Tagged) {
766  MFI.getObjectOffset(FrameIndex) + (int64_t)MFI.getStackSize());
767  if (MFI.hasVarSizedObjects() ||
768  isAArch64FrameOffsetLegal(MI, SPOffset, nullptr, nullptr, nullptr) !=
770  // Can't update to SP + offset in place. Precalculate the tagged pointer
771  // in a scratch register.
772  Offset = TFI->resolveFrameIndexReference(
773  MF, FrameIndex, FrameReg, /*PreferFP=*/false, /*ForSimm=*/true);
774  Register ScratchReg =
775  MF.getRegInfo().createVirtualRegister(&AArch64::GPR64RegClass);
776  emitFrameOffset(MBB, II, MI.getDebugLoc(), ScratchReg, FrameReg, Offset,
777  TII);
778  BuildMI(MBB, MI, MI.getDebugLoc(), TII->get(AArch64::LDG), ScratchReg)
779  .addReg(ScratchReg)
780  .addReg(ScratchReg)
781  .addImm(0);
782  MI.getOperand(FIOperandNum)
783  .ChangeToRegister(ScratchReg, false, false, true);
784  return;
785  }
786  FrameReg = AArch64::SP;
788  (int64_t)MFI.getStackSize());
789  } else {
790  Offset = TFI->resolveFrameIndexReference(
791  MF, FrameIndex, FrameReg, /*PreferFP=*/false, /*ForSimm=*/true);
792  }
793 
794  // Modify MI as necessary to handle as much of 'Offset' as possible
795  if (rewriteAArch64FrameIndex(MI, FIOperandNum, FrameReg, Offset, TII))
796  return;
797 
798  assert((!RS || !RS->isScavengingFrameIndex(FrameIndex)) &&
799  "Emergency spill slot is out of reach");
800 
801  // If we get here, the immediate doesn't fit into the instruction. We folded
802  // as much as possible above. Handle the rest, providing a register that is
803  // SP+LargeImm.
804  Register ScratchReg =
806  emitFrameOffset(MBB, II, MI.getDebugLoc(), ScratchReg, FrameReg, Offset, TII);
807 }
808 
810  MachineFunction &MF) const {
811  const AArch64FrameLowering *TFI = getFrameLowering(MF);
812 
813  switch (RC->getID()) {
814  default:
815  return 0;
816  case AArch64::GPR32RegClassID:
817  case AArch64::GPR32spRegClassID:
818  case AArch64::GPR32allRegClassID:
819  case AArch64::GPR64spRegClassID:
820  case AArch64::GPR64allRegClassID:
821  case AArch64::GPR64RegClassID:
822  case AArch64::GPR32commonRegClassID:
823  case AArch64::GPR64commonRegClassID:
824  return 32 - 1 // XZR/SP
825  - (TFI->hasFP(MF) || TT.isOSDarwin()) // FP
827  - hasBasePointer(MF); // X19
828  case AArch64::FPR8RegClassID:
829  case AArch64::FPR16RegClassID:
830  case AArch64::FPR32RegClassID:
831  case AArch64::FPR64RegClassID:
832  case AArch64::FPR128RegClassID:
833  return 32;
834 
835  case AArch64::MatrixIndexGPR32_12_15RegClassID:
836  return 4;
837 
838  case AArch64::DDRegClassID:
839  case AArch64::DDDRegClassID:
840  case AArch64::DDDDRegClassID:
841  case AArch64::QQRegClassID:
842  case AArch64::QQQRegClassID:
843  case AArch64::QQQQRegClassID:
844  return 32;
845 
846  case AArch64::FPR128_loRegClassID:
847  case AArch64::FPR64_loRegClassID:
848  case AArch64::FPR16_loRegClassID:
849  return 16;
850  }
851 }
852 
854  const MachineFunction &MF) const {
855  const auto &MFI = MF.getFrameInfo();
856  if (!MF.hasEHFunclets() && !MFI.hasVarSizedObjects())
857  return AArch64::SP;
858  else if (hasStackRealignment(MF))
859  return getBaseRegister();
860  return getFrameRegister(MF);
861 }
862 
863 /// SrcRC and DstRC will be morphed into NewRC if this returns true
865  MachineInstr *MI, const TargetRegisterClass *SrcRC, unsigned SubReg,
866  const TargetRegisterClass *DstRC, unsigned DstSubReg,
867  const TargetRegisterClass *NewRC, LiveIntervals &LIS) const {
868  if (MI->isCopy() &&
869  ((DstRC->getID() == AArch64::GPR64RegClassID) ||
870  (DstRC->getID() == AArch64::GPR64commonRegClassID)) &&
871  MI->getOperand(0).getSubReg() && MI->getOperand(1).getSubReg())
872  // Do not coalesce in the case of a 32-bit subregister copy
873  // which implements a 32 to 64 bit zero extension
874  // which relies on the upper 32 bits being zeroed.
875  return false;
876  return true;
877 }
i
i
Definition: README.txt:29
llvm::AArch64RegisterInfo::getCallPreservedMask
const uint32_t * getCallPreservedMask(const MachineFunction &MF, CallingConv::ID) const override
Definition: AArch64RegisterInfo.cpp:217
llvm::MachineFrameInfo::hasVarSizedObjects
bool hasVarSizedObjects() const
This method may be called any time after instruction selection is complete to determine if the stack ...
Definition: MachineFrameInfo.h:354
llvm::Argument
This class represents an incoming formal argument to a Function.
Definition: Argument.h:28
llvm::AArch64Subtarget::isTargetWindows
bool isTargetWindows() const
Definition: AArch64Subtarget.h:245
AArch64RegisterInfo.h
createScratchRegisterForInstruction
static Register createScratchRegisterForInstruction(MachineInstr &MI, unsigned FIOperandNum, const AArch64InstrInfo *TII)
Definition: AArch64RegisterInfo.cpp:665
MI
IRTranslator LLVM IR MI
Definition: IRTranslator.cpp:104
llvm::MachineInstrBuilder::addImm
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
Definition: MachineInstrBuilder.h:131
llvm::TargetRegisterClass::getID
unsigned getID() const
Return the register class ID number.
Definition: TargetRegisterInfo.h:72
llvm
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:17
AArch64MachineFunctionInfo.h
llvm::AArch64RegisterInfo::eliminateFrameIndex
void eliminateFrameIndex(MachineBasicBlock::iterator II, int SPAdj, unsigned FIOperandNum, RegScavenger *RS=nullptr) const override
Definition: AArch64RegisterInfo.cpp:717
TargetFrameLowering.h
llvm::AArch64RegisterInfo::shouldCoalesce
bool shouldCoalesce(MachineInstr *MI, const TargetRegisterClass *SrcRC, unsigned SubReg, const TargetRegisterClass *DstRC, unsigned DstSubReg, const TargetRegisterClass *NewRC, LiveIntervals &LIS) const override
SrcRC and DstRC will be morphed into NewRC if this returns true.
Definition: AArch64RegisterInfo.cpp:864
llvm::AArch64_AM::LSL
@ LSL
Definition: AArch64AddressingModes.h:35
llvm::CallingConv::WebKit_JS
@ WebKit_JS
Definition: CallingConv.h:58
llvm::MachineRegisterInfo::createVirtualRegister
Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
Definition: MachineRegisterInfo.cpp:156
llvm::AttributeList::hasAttrSomewhere
bool hasAttrSomewhere(Attribute::AttrKind Kind, unsigned *Index=nullptr) const
Return true if the specified attribute is set for at least one parameter or for the return value.
Definition: Attributes.cpp:1413
llvm::MachineFunction::allocateRegMask
uint32_t * allocateRegMask()
Allocate and initialize a register mask with NumRegister bits.
Definition: MachineFunction.cpp:545
llvm::DiagnosticInfoUnsupported
Diagnostic information for unsupported feature in backend.
Definition: DiagnosticInfo.h:1009
DebugInfoMetadata.h
llvm::MachineRegisterInfo
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Definition: MachineRegisterInfo.h:50
llvm::Function
Definition: Function.h:60
llvm::BitVector::set
BitVector & set()
Definition: BitVector.h:344
llvm::AArch64FrameOffsetCanUpdate
@ AArch64FrameOffsetCanUpdate
Offset can apply, at least partly.
Definition: AArch64InstrInfo.h:437
contains
return AArch64::GPR64RegClass contains(Reg)
llvm::SmallVector
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1185
AArch64GenRegisterInfo
llvm::AArch64RegisterInfo::UpdateCustomCallPreservedMask
void UpdateCustomCallPreservedMask(MachineFunction &MF, const uint32_t **Mask) const
Definition: AArch64RegisterInfo.cpp:273
llvm::AArch64RegisterInfo::getBaseRegister
unsigned getBaseRegister() const
Definition: AArch64RegisterInfo.cpp:392
llvm::X86Disassembler::Reg
Reg
All possible values of the reg field in the ModR/M byte.
Definition: X86DisassemblerDecoder.h:462
llvm::AArch64RegisterInfo::isAnyArgRegReserved
bool isAnyArgRegReserved(const MachineFunction &MF) const
Definition: AArch64RegisterInfo.cpp:357
llvm::AArch64RegisterInfo::getNoPreservedMask
const uint32_t * getNoPreservedMask() const override
Definition: AArch64RegisterInfo.cpp:293
llvm::AArch64RegisterInfo::isArgumentRegister
bool isArgumentRegister(const MachineFunction &MF, MCRegister Reg) const override
Definition: AArch64RegisterInfo.cpp:430
llvm::AArch64Subtarget::isTargetDarwin
bool isTargetDarwin() const
Definition: AArch64Subtarget.h:242
llvm::AArch64RegisterInfo::getCalleeSavedRegsViaCopy
const MCPhysReg * getCalleeSavedRegsViaCopy(const MachineFunction *MF) const
Definition: AArch64RegisterInfo.cpp:149
llvm::Triple
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:44
getDwarfRegNum
static unsigned getDwarfRegNum(unsigned Reg, const TargetRegisterInfo *TRI)
Go up the super-register chain until we hit a valid dwarf register number.
Definition: StackMaps.cpp:178
llvm::StackOffset::getFixed
ScalarTy getFixed() const
Definition: TypeSize.h:149
llvm::AArch64RegisterInfo::getFrameRegister
Register getFrameRegister(const MachineFunction &MF) const override
Definition: AArch64RegisterInfo.cpp:493
llvm::rewriteAArch64FrameIndex
bool rewriteAArch64FrameIndex(MachineInstr &MI, unsigned FrameRegIdx, unsigned FrameReg, StackOffset &Offset, const AArch64InstrInfo *TII)
rewriteAArch64FrameIndex - Rewrite MI to access 'Offset' bytes from the FP.
Definition: AArch64InstrInfo.cpp:4694
llvm::AArch64Subtarget::isXRegCustomCalleeSaved
bool isXRegCustomCalleeSaved(size_t i) const
Definition: AArch64Subtarget.h:200
llvm::AArch64FrameLowering::hasFP
bool hasFP(const MachineFunction &MF) const override
hasFP - Return true if the specified function should have a dedicated frame pointer register.
Definition: AArch64FrameLowering.cpp:426
llvm::AArch64Subtarget::getTargetLowering
const AArch64TargetLowering * getTargetLowering() const override
Definition: AArch64Subtarget.h:168
F
#define F(x, y, z)
Definition: MD5.cpp:55
MachineRegisterInfo.h
llvm::AArch64RegisterInfo::getRegPressureLimit
unsigned getRegPressureLimit(const TargetRegisterClass *RC, MachineFunction &MF) const override
Definition: AArch64RegisterInfo.cpp:809
llvm::RegScavenger::isScavengingFrameIndex
bool isScavengingFrameIndex(int FI) const
Query whether a frame index is a scavenging frame index.
Definition: RegisterScavenging.h:148
llvm::CallingConv::AArch64_VectorCall
@ AArch64_VectorCall
Definition: CallingConv.h:239
llvm::AArch64RegisterInfo::cannotEliminateFrame
bool cannotEliminateFrame(const MachineFunction &MF) const
Definition: AArch64RegisterInfo.cpp:532
llvm::AArch64RegisterInfo::getCrossCopyRegClass
const TargetRegisterClass * getCrossCopyRegClass(const TargetRegisterClass *RC) const override
Definition: AArch64RegisterInfo.cpp:386
llvm::AArch64FrameLowering
Definition: AArch64FrameLowering.h:23
Arg
amdgpu Simplify well known AMD library false FunctionCallee Value * Arg
Definition: AMDGPULibCalls.cpp:186
llvm::BitmaskEnumDetail::Mask
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
Definition: BitmaskEnum.h:80
llvm::MachineFunction::getRegInfo
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Definition: MachineFunction.h:666
llvm::Triple::isOSBinFormatELF
bool isOSBinFormatELF() const
Tests whether the OS uses the ELF binary format.
Definition: Triple.h:656
llvm::AArch64RegisterInfo::getSubClassWithSubReg
const TargetRegisterClass * getSubClassWithSubReg(const TargetRegisterClass *RC, unsigned Idx) const override
Definition: AArch64RegisterInfo.cpp:176
AArch64InstrInfo.h
llvm::CallingConv::Tail
@ Tail
Tail - This calling convention attemps to make calls as fast as possible while guaranteeing that tail...
Definition: CallingConv.h:81
llvm::AArch64RegisterInfo::hasSVEArgsOrReturn
static bool hasSVEArgsOrReturn(const MachineFunction *MF)
Definition: AArch64RegisterInfo.cpp:69
llvm::AArch64InstrInfo
Definition: AArch64InstrInfo.h:37
llvm::Triple::isOSDarwin
bool isOSDarwin() const
Is this a "Darwin" OS (macOS, iOS, tvOS, watchOS, or DriverKit).
Definition: Triple.h:508
llvm::SmallVectorImpl::append
void append(in_iter in_start, in_iter in_end)
Add the specified range to the end of the SmallVector.
Definition: SmallVector.h:667
llvm::MachineFunction::getInfo
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
Definition: MachineFunction.h:754
llvm::MachineOperand::getRegMaskSize
static unsigned getRegMaskSize(unsigned NumRegs)
Returns number of elements needed for a regmask array.
Definition: MachineOperand.h:645
llvm::AArch64Subtarget::isTargetILP32
bool isTargetILP32() const
Definition: AArch64Subtarget.h:253
llvm::AArch64TargetLowering::supportSwiftError
bool supportSwiftError() const override
Return true if the target supports swifterror attribute.
Definition: AArch64ISelLowering.h:807
llvm::TargetRegisterClass
Definition: TargetRegisterInfo.h:45
TII
const HexagonInstrInfo * TII
Definition: HexagonCopyToCombine.cpp:125
llvm::MachineOperand::ChangeToImmediate
void ChangeToImmediate(int64_t ImmVal, unsigned TargetFlags=0)
ChangeToImmediate - Replace this operand with a new immediate operand of the specified value.
Definition: MachineOperand.cpp:154
llvm::MCInstrDesc
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:197
llvm::MachineOperand
MachineOperand class - Representation of each machine instruction operand.
Definition: MachineOperand.h:48
llvm::MachineRegisterInfo::setCalleeSavedRegs
void setCalleeSavedRegs(ArrayRef< MCPhysReg > CSRs)
Sets the updated Callee Saved Registers list.
Definition: MachineRegisterInfo.cpp:624
llvm::AArch64_AM::getShifterImm
static unsigned getShifterImm(AArch64_AM::ShiftExtendType ST, unsigned Imm)
getShifterImm - Encode the shift type and amount: imm: 6-bit shift amount shifter: 000 ==> lsl 001 ==...
Definition: AArch64AddressingModes.h:99
llvm::report_fatal_error
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:143
BitVector.h
llvm::MachineFrameInfo::getStackSize
uint64_t getStackSize() const
Return the number of bytes that must be allocated to hold all of the fixed size frame objects.
Definition: MachineFrameInfo.h:577
llvm::MachineFrameInfo::getObjectOffset
int64_t getObjectOffset(int ObjectIdx) const
Return the assigned stack offset of the specified object from the incoming stack pointer.
Definition: MachineFrameInfo.h:518
llvm::BitVector
Definition: BitVector.h:75
llvm::AArch64RegisterInfo::materializeFrameBaseRegister
Register materializeFrameBaseRegister(MachineBasicBlock *MBB, int FrameIdx, int64_t Offset) const override
Insert defining instruction(s) for BaseReg to be a pointer to FrameIdx at the beginning of the basic ...
Definition: AArch64RegisterInfo.cpp:618
llvm::lltok::Kind
Kind
Definition: LLToken.h:18
llvm::CallingConv::ID
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition: CallingConv.h:24
llvm::CallingConv::CFGuard_Check
@ CFGuard_Check
Special calling convention on Windows for calling the Control Guard Check ICall funtion.
Definition: CallingConv.h:87
llvm::MachineBasicBlock
Definition: MachineBasicBlock.h:94
llvm::CallingConv::Swift
@ Swift
Definition: CallingConv.h:73
llvm::AArch64RegisterInfo::isFrameOffsetLegal
bool isFrameOffsetLegal(const MachineInstr *MI, Register BaseReg, int64_t Offset) const override
Definition: AArch64RegisterInfo.cpp:607
llvm::Function::getAttributes
AttributeList getAttributes() const
Return the attribute list for this Function.
Definition: Function.h:314
AArch64AddressingModes.h
llvm::TargetOptions::DisableFramePointerElim
bool DisableFramePointerElim(const MachineFunction &MF) const
DisableFramePointerElim - This returns true if frame pointer elimination optimization should be disab...
Definition: TargetOptionsImpl.cpp:23
llvm::AArch64FrameOffsetIsLegal
@ AArch64FrameOffsetIsLegal
Offset is legal.
Definition: AArch64InstrInfo.h:436
llvm::AArch64FrameLowering::resolveFrameIndexReference
StackOffset resolveFrameIndexReference(const MachineFunction &MF, int FI, Register &FrameReg, bool PreferFP, bool ForSimm) const
Definition: AArch64FrameLowering.cpp:2268
llvm::MachineFunction::getSubtarget
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
Definition: MachineFunction.h:656
llvm::AArch64RegisterInfo::AArch64RegisterInfo
AArch64RegisterInfo(const Triple &TT)
Definition: AArch64RegisterInfo.cpp:41
llvm::AArch64FunctionInfo::isSplitCSR
bool isSplitCSR() const
Definition: AArch64MachineFunctionInfo.h:230
llvm::MachineInstrBuilder::addFrameIndex
const MachineInstrBuilder & addFrameIndex(int Idx) const
Definition: MachineInstrBuilder.h:152
llvm::isAArch64FrameOffsetLegal
int isAArch64FrameOffsetLegal(const MachineInstr &MI, StackOffset &Offset, bool *OutUseUnscaledOp=nullptr, unsigned *OutUnscaledOp=nullptr, int64_t *EmittableOffset=nullptr)
Check if the Offset is a valid frame offset for MI.
Definition: AArch64InstrInfo.cpp:4593
llvm::Function::hasFnAttribute
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition: Function.cpp:625
llvm::MachineFrameInfo::getLocalFrameSize
int64_t getLocalFrameSize() const
Get the size of the local object blob.
Definition: MachineFrameInfo.h:437
llvm::MachineInstr
Representation of each machine instruction.
Definition: MachineInstr.h:66
AArch64FrameLowering.h
llvm::Function::getCallingConv
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition: Function.h:238
llvm::AArch64FunctionInfo::getTaggedBasePointerOffset
unsigned getTaggedBasePointerOffset() const
Definition: AArch64MachineFunctionInfo.h:395
llvm::AArch64FunctionInfo
AArch64FunctionInfo - This class is derived from MachineFunctionInfo and contains private AArch64-spe...
Definition: AArch64MachineFunctionInfo.h:38
llvm::AArch64RegisterInfo::needsFrameBaseReg
bool needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const override
needsFrameBaseReg - Returns true if the instruction's frame index reference would be better served by...
Definition: AArch64RegisterInfo.cpp:543
I
#define I(x, y, z)
Definition: MD5.cpp:58
llvm::RegScavenger
Definition: RegisterScavenging.h:34
llvm::TargetMachine::Options
TargetOptions Options
Definition: TargetMachine.h:118
assert
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
llvm::MachineFunction::getFrameInfo
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
Definition: MachineFunction.h:672
memcpy
<%struct.s * > cast struct s *S to sbyte *< sbyte * > sbyte uint cast struct s *agg result to sbyte *< sbyte * > sbyte uint cast struct s *memtmp to sbyte *< sbyte * > sbyte uint ret void llc ends up issuing two memcpy or custom lower memcpy(of small size) to be ldmia/stmia. I think option 2 is better but the current register allocator cannot allocate a chunk of registers at a time. A feasible temporary solution is to use specific physical registers at the lowering time for small(<
llvm::AArch64RegisterInfo::isConstantPhysReg
bool isConstantPhysReg(MCRegister PhysReg) const override
Definition: AArch64RegisterInfo.cpp:375
llvm::MachineBasicBlock::getParent
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
Definition: MachineBasicBlock.h:234
llvm::AArch64Subtarget::isTargetLinux
bool isTargetLinux() const
Definition: AArch64Subtarget.h:244
llvm::AArch64Subtarget::isCallingConvWin64
bool isCallingConvWin64(CallingConv::ID CC) const
Definition: AArch64Subtarget.h:297
llvm::AArch64RegisterInfo::useFPForScavengingIndex
bool useFPForScavengingIndex(const MachineFunction &MF) const override
Definition: AArch64RegisterInfo.cpp:509
llvm::emitFrameOffset
void emitFrameOffset(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, unsigned DestReg, unsigned SrcReg, StackOffset Offset, const TargetInstrInfo *TII, MachineInstr::MIFlag=MachineInstr::NoFlags, bool SetNZCV=false, bool NeedsWinCFI=false, bool *HasWinCFI=nullptr, bool EmitCFAOffset=false, StackOffset InitialOffset={}, unsigned FrameReg=AArch64::SP)
emitFrameOffset - Emit instructions as needed to set DestReg to SrcReg plus Offset.
Definition: AArch64InstrInfo.cpp:4360
llvm::AArch64FrameLowering::getNonLocalFrameIndexReference
StackOffset getNonLocalFrameIndexReference(const MachineFunction &MF, int FI) const override
getNonLocalFrameIndexReference - This method returns the offset used to reference a frame index locat...
Definition: AArch64FrameLowering.cpp:2232
llvm::MachineInstrBuilder::addReg
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
Definition: MachineInstrBuilder.h:97
llvm::AArch64RegisterInfo::getCalleeSavedRegs
const MCPhysReg * getCalleeSavedRegs(const MachineFunction *MF) const override
Code Generation virtual methods...
Definition: AArch64RegisterInfo.cpp:78
llvm::MachineFunction
Definition: MachineFunction.h:257
llvm::AArch64RegisterInfo::getReservedRegs
BitVector getReservedRegs(const MachineFunction &MF) const override
Definition: AArch64RegisterInfo.cpp:318
llvm::CallingConv::C
@ C
C - The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
Triple.h
llvm::AArch64RegisterInfo::requiresFrameIndexScavenging
bool requiresFrameIndexScavenging(const MachineFunction &MF) const override
Definition: AArch64RegisterInfo.cpp:526
TargetOptions.h
llvm::CallingConv::Fast
@ Fast
Fast - This calling convention attempts to make calls as fast as possible (e.g.
Definition: CallingConv.h:42
llvm::AArch64RegisterInfo::isAsmClobberable
bool isAsmClobberable(const MachineFunction &MF, MCRegister PhysReg) const override
Definition: AArch64RegisterInfo.cpp:370
llvm::ArrayRef
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: APInt.h:32
llvm::AArch64Subtarget::isXRegisterReserved
bool isXRegisterReserved(size_t i) const
Definition: AArch64Subtarget.h:198
llvm::AArch64RegisterInfo::getLocalAddressRegister
unsigned getLocalAddressRegister(const MachineFunction &MF) const
Definition: AArch64RegisterInfo.cpp:853
llvm::any_of
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1624
llvm::AArch64FunctionInfo::hasCalculatedStackSizeSVE
bool hasCalculatedStackSizeSVE() const
Definition: AArch64MachineFunctionInfo.h:209
Dwarf.h
llvm::CallingConv::PreserveMost
@ PreserveMost
Definition: CallingConv.h:66
uint32_t
llvm::StackOffset
StackOffset is a class to represent an offset with 2 dimensions, named fixed and scalable,...
Definition: TypeSize.h:134
DL
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Definition: AArch64SLSHardening.cpp:76
llvm::AArch64RegisterInfo::getCustomEHPadPreservedMask
const uint32_t * getCustomEHPadPreservedMask(const MachineFunction &MF) const override
Definition: AArch64RegisterInfo.cpp:257
llvm::AArch64RegisterInfo::getThisReturnPreservedMask
const uint32_t * getThisReturnPreservedMask(const MachineFunction &MF, CallingConv::ID) const
getThisReturnPreservedMask - Returns a call preserved mask specific to the case that 'returned' is on...
Definition: AArch64RegisterInfo.cpp:298
llvm::AArch64RegisterInfo::emitReservedArgRegCallError
void emitReservedArgRegCallError(const MachineFunction &MF) const
Definition: AArch64RegisterInfo.cpp:363
llvm::AArch64RegisterInfo::getDarwinCallPreservedMask
const uint32_t * getDarwinCallPreservedMask(const MachineFunction &MF, CallingConv::ID) const
Definition: AArch64RegisterInfo.cpp:189
MRI
unsigned const MachineRegisterInfo * MRI
Definition: AArch64AdvSIMDScalarPass.cpp:105
llvm::Register
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
llvm::MachineFrameInfo::isFrameAddressTaken
bool isFrameAddressTaken() const
This method may be called any time after instruction selection is complete to determine if there is a...
Definition: MachineFrameInfo.h:370
llvm::AArch64_MC::initLLVMToCVRegMapping
void initLLVMToCVRegMapping(MCRegisterInfo *MRI)
Definition: AArch64MCTargetDesc.cpp:65
llvm::ISD::FrameIndex
@ FrameIndex
Definition: ISDOpcodes.h:80
MBB
MachineBasicBlock & MBB
Definition: AArch64SLSHardening.cpp:74
llvm::DIExpression::appendOffset
static void appendOffset(SmallVectorImpl< uint64_t > &Ops, int64_t Offset)
Append Ops with operations to apply the Offset.
Definition: DebugInfoMetadata.cpp:1383
llvm::CallingConv::AArch64_SVE_VectorCall
@ AArch64_SVE_VectorCall
Calling convention between AArch64 SVE functions.
Definition: CallingConv.h:242
llvm::AArch64Subtarget::getNumXRegisterReserved
unsigned getNumXRegisterReserved() const
Definition: AArch64Subtarget.h:199
llvm::CallingConv::Win64
@ Win64
The C convention as implemented on Windows/x86-64 and AArch64.
Definition: CallingConv.h:169
llvm::AArch64RegisterInfo::getOffsetOpcodes
void getOffsetOpcodes(const StackOffset &Offset, SmallVectorImpl< uint64_t > &Ops) const override
Definition: AArch64RegisterInfo.cpp:690
llvm::MachineFunction::getFunction
Function & getFunction()
Return the LLVM function that this machine code represents.
Definition: MachineFunction.h:622
uint16_t
llvm::AArch64FunctionInfo::getStackSizeSVE
uint64_t getStackSizeSVE() const
Definition: AArch64MachineFunctionInfo.h:216
llvm::MachineFunction::getTarget
const LLVMTargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
Definition: MachineFunction.h:652
llvm::AMDGPU::SendMsg::Op
Op
Definition: SIDefines.h:345
MachineFrameInfo.h
DiagnosticInfo.h
Function.h
llvm::LiveIntervals
Definition: LiveIntervals.h:54
llvm::AArch64RegisterInfo::getWindowsStackProbePreservedMask
const uint32_t * getWindowsStackProbePreservedMask() const
Stack probing calls preserve different CSRs to the normal CC.
Definition: AArch64RegisterInfo.cpp:313
llvm::MachineFunction::hasEHFunclets
bool hasEHFunclets() const
Definition: MachineFunction.h:1098
llvm::MCSubRegIterator
MCSubRegIterator enumerates all sub-registers of Reg.
Definition: MCRegisterInfo.h:597
llvm::MachineFrameInfo
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
Definition: MachineFrameInfo.h:105
AArch64Subtarget.h
llvm::CallingConv::CXX_FAST_TLS
@ CXX_FAST_TLS
Definition: CallingConv.h:76
llvm::AArch64RegisterInfo::resolveFrameIndex
void resolveFrameIndex(MachineInstr &MI, Register BaseReg, int64_t Offset) const override
Definition: AArch64RegisterInfo.cpp:642
llvm::MachineBasicBlock::begin
iterator begin()
Definition: MachineBasicBlock.h:278
MachineInstrBuilder.h
llvm::BuildMI
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
Definition: MachineInstrBuilder.h:328
llvm::AArch64RegisterInfo::getPointerRegClass
const TargetRegisterClass * getPointerRegClass(const MachineFunction &MF, unsigned Kind=0) const override
Definition: AArch64RegisterInfo.cpp:380
llvm::MachineRegisterInfo::constrainRegClass
const TargetRegisterClass * constrainRegClass(Register Reg, const TargetRegisterClass *RC, unsigned MinNumRegs=0)
constrainRegClass - Constrain the register class of the specified virtual register to be a common sub...
Definition: MachineRegisterInfo.cpp:83
llvm::AArch64RegisterInfo::UpdateCustomCalleeSavedRegs
void UpdateCustomCalleeSavedRegs(MachineFunction &MF) const
Definition: AArch64RegisterInfo.cpp:158
llvm::MipsISD::Ins
@ Ins
Definition: MipsISelLowering.h:160
llvm::SmallVectorImpl< uint64_t >
llvm::AArch64RegisterInfo::getTLSCallPreservedMask
const uint32_t * getTLSCallPreservedMask() const
Definition: AArch64RegisterInfo.cpp:265
llvm::AArch64RegisterInfo::regNeedsCFI
bool regNeedsCFI(unsigned Reg, unsigned &RegToUseForCFI) const
Return whether the register needs a CFI entry.
Definition: AArch64RegisterInfo.cpp:51
llvm::DebugLoc
A debug info location.
Definition: DebugLoc.h:33
llvm::MachineFrameInfo::adjustsStack
bool adjustsStack() const
Return true if this function adjusts the stack – e.g., when calling another function.
Definition: MachineFrameInfo.h:601
llvm::AArch64II::MO_TAGGED
@ MO_TAGGED
MO_TAGGED - With MO_PAGE, indicates that the page includes a memory tag in bits 56-63.
Definition: AArch64BaseInfo.h:748
RegisterScavenging.h
llvm::AArch64RegisterInfo::requiresVirtualBaseRegisters
bool requiresVirtualBaseRegisters(const MachineFunction &MF) const override
Definition: AArch64RegisterInfo.cpp:503
llvm::AArch64Subtarget
Definition: AArch64Subtarget.h:38
raw_ostream.h
llvm::AArch64RegisterInfo::hasBasePointer
bool hasBasePointer(const MachineFunction &MF) const
Definition: AArch64RegisterInfo.cpp:394
llvm::CallingConv::AnyReg
@ AnyReg
Definition: CallingConv.h:62
llvm::CallingConv::GHC
@ GHC
Definition: CallingConv.h:51
llvm::MachineInstrBundleIterator< MachineInstr >
llvm::AArch64RegisterInfo::requiresRegisterScavenging
bool requiresRegisterScavenging(const MachineFunction &MF) const override
Definition: AArch64RegisterInfo.cpp:498
llvm::AArch64RegisterInfo::getDarwinCalleeSavedRegs
const MCPhysReg * getDarwinCalleeSavedRegs(const MachineFunction *MF) const
Definition: AArch64RegisterInfo.cpp:120
llvm::CallingConv::SwiftTail
@ SwiftTail
SwiftTail - This follows the Swift calling convention in how arguments are passed but guarantees tail...
Definition: CallingConv.h:92
llvm::MachineBasicBlock::end
iterator end()
Definition: MachineBasicBlock.h:280
SubReg
unsigned SubReg
Definition: AArch64AdvSIMDScalarPass.cpp:104
llvm::AArch64RegisterInfo::isReservedReg
bool isReservedReg(const MachineFunction &MF, MCRegister Reg) const
Definition: AArch64RegisterInfo.cpp:352
llvm::MCRegister
Wrapper class representing physical registers. Should be passed by value.
Definition: MCRegister.h:24