LLVM 19.0.0git
AArch64RegisterInfo.cpp
Go to the documentation of this file.
1//===- AArch64RegisterInfo.cpp - AArch64 Register Information -------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the AArch64 implementation of the TargetRegisterInfo
10// class.
11//
12//===----------------------------------------------------------------------===//
13
14#include "AArch64RegisterInfo.h"
16#include "AArch64InstrInfo.h"
18#include "AArch64Subtarget.h"
21#include "llvm/ADT/BitVector.h"
30#include "llvm/IR/Function.h"
34
35using namespace llvm;
36
37#define GET_CC_REGISTER_LISTS
38#include "AArch64GenCallingConv.inc"
39#define GET_REGINFO_TARGET_DESC
40#include "AArch64GenRegisterInfo.inc"
41
43 : AArch64GenRegisterInfo(AArch64::LR), TT(TT) {
45}
46
47/// Return whether the register needs a CFI entry. Not all unwinders may know
48/// about SVE registers, so we assume the lowest common denominator, i.e. the
49/// callee-saves required by the base ABI. For the SVE registers z8-z15 only the
50/// lower 64-bits (d8-d15) need to be saved. The lower 64-bits subreg is
51/// returned in \p RegToUseForCFI.
53 unsigned &RegToUseForCFI) const {
54 if (AArch64::PPRRegClass.contains(Reg))
55 return false;
56
57 if (AArch64::ZPRRegClass.contains(Reg)) {
58 RegToUseForCFI = getSubReg(Reg, AArch64::dsub);
59 for (int I = 0; CSR_AArch64_AAPCS_SaveList[I]; ++I) {
60 if (CSR_AArch64_AAPCS_SaveList[I] == RegToUseForCFI)
61 return true;
62 }
63 return false;
64 }
65
66 RegToUseForCFI = Reg;
67 return true;
68}
69
70const MCPhysReg *
72 assert(MF && "Invalid MachineFunction pointer.");
73
75 // GHC set of callee saved regs is empty as all those regs are
76 // used for passing STG regs around
77 return CSR_AArch64_NoRegs_SaveList;
79 return CSR_AArch64_AllRegs_SaveList;
80
82 return CSR_Win_AArch64_Arm64EC_Thunk_SaveList;
83
84 // Darwin has its own CSR_AArch64_AAPCS_SaveList, which means most CSR save
85 // lists depending on that will need to have their Darwin variant as well.
87 return getDarwinCalleeSavedRegs(MF);
88
90 return CSR_Win_AArch64_CFGuard_Check_SaveList;
95 Attribute::SwiftError))
96 return CSR_Win_AArch64_AAPCS_SwiftError_SaveList;
98 return CSR_Win_AArch64_AAPCS_SwiftTail_SaveList;
99 return CSR_Win_AArch64_AAPCS_SaveList;
100 }
102 return CSR_AArch64_AAVPCS_SaveList;
104 return CSR_AArch64_SVE_AAPCS_SaveList;
105 if (MF->getFunction().getCallingConv() ==
108 "Calling convention AArch64_SME_ABI_Support_Routines_PreserveMost_From_X0 is "
109 "only supported to improve calls to SME ACLE save/restore/disable-za "
110 "functions, and is not intended to be used beyond that scope.");
111 if (MF->getFunction().getCallingConv() ==
114 "Calling convention AArch64_SME_ABI_Support_Routines_PreserveMost_From_X2 is "
115 "only supported to improve calls to SME ACLE __arm_sme_state "
116 "and is not intended to be used beyond that scope.");
118 ->supportSwiftError() &&
120 Attribute::SwiftError))
121 return CSR_AArch64_AAPCS_SwiftError_SaveList;
123 return CSR_AArch64_AAPCS_SwiftTail_SaveList;
125 return CSR_AArch64_RT_MostRegs_SaveList;
127 return CSR_AArch64_RT_AllRegs_SaveList;
129 // This is for OSes other than Windows; Windows is a separate case further
130 // above.
131 return CSR_AArch64_AAPCS_X18_SaveList;
132 if (MF->getInfo<AArch64FunctionInfo>()->isSVECC())
133 return CSR_AArch64_SVE_AAPCS_SaveList;
134 return CSR_AArch64_AAPCS_SaveList;
135}
136
137const MCPhysReg *
139 assert(MF && "Invalid MachineFunction pointer.");
141 "Invalid subtarget for getDarwinCalleeSavedRegs");
142
145 "Calling convention CFGuard_Check is unsupported on Darwin.");
147 return CSR_Darwin_AArch64_AAVPCS_SaveList;
150 "Calling convention SVE_VectorCall is unsupported on Darwin.");
151 if (MF->getFunction().getCallingConv() ==
154 "Calling convention AArch64_SME_ABI_Support_Routines_PreserveMost_From_X0 is "
155 "only supported to improve calls to SME ACLE save/restore/disable-za "
156 "functions, and is not intended to be used beyond that scope.");
157 if (MF->getFunction().getCallingConv() ==
160 "Calling convention AArch64_SME_ABI_Support_Routines_PreserveMost_From_X2 is "
161 "only supported to improve calls to SME ACLE __arm_sme_state "
162 "and is not intended to be used beyond that scope.");
164 return MF->getInfo<AArch64FunctionInfo>()->isSplitCSR()
165 ? CSR_Darwin_AArch64_CXX_TLS_PE_SaveList
166 : CSR_Darwin_AArch64_CXX_TLS_SaveList;
168 ->supportSwiftError() &&
170 Attribute::SwiftError))
171 return CSR_Darwin_AArch64_AAPCS_SwiftError_SaveList;
173 return CSR_Darwin_AArch64_AAPCS_SwiftTail_SaveList;
175 return CSR_Darwin_AArch64_RT_MostRegs_SaveList;
177 return CSR_Darwin_AArch64_RT_AllRegs_SaveList;
179 return CSR_Darwin_AArch64_AAPCS_Win64_SaveList;
180 return CSR_Darwin_AArch64_AAPCS_SaveList;
181}
182
184 const MachineFunction *MF) const {
185 assert(MF && "Invalid MachineFunction pointer.");
188 return CSR_Darwin_AArch64_CXX_TLS_ViaCopy_SaveList;
189 return nullptr;
190}
191
193 MachineFunction &MF) const {
194 const MCPhysReg *CSRs = getCalleeSavedRegs(&MF);
195 SmallVector<MCPhysReg, 32> UpdatedCSRs;
196 for (const MCPhysReg *I = CSRs; *I; ++I)
197 UpdatedCSRs.push_back(*I);
198
199 for (size_t i = 0; i < AArch64::GPR64commonRegClass.getNumRegs(); ++i) {
201 UpdatedCSRs.push_back(AArch64::GPR64commonRegClass.getRegister(i));
202 }
203 }
204 // Register lists are zero-terminated.
205 UpdatedCSRs.push_back(0);
206 MF.getRegInfo().setCalleeSavedRegs(UpdatedCSRs);
207}
208
211 unsigned Idx) const {
212 // edge case for GPR/FPR register classes
213 if (RC == &AArch64::GPR32allRegClass && Idx == AArch64::hsub)
214 return &AArch64::FPR32RegClass;
215 else if (RC == &AArch64::GPR64allRegClass && Idx == AArch64::hsub)
216 return &AArch64::FPR64RegClass;
217
218 // Forward to TableGen's default version.
219 return AArch64GenRegisterInfo::getSubClassWithSubReg(RC, Idx);
220}
221
222const uint32_t *
224 CallingConv::ID CC) const {
226 "Invalid subtarget for getDarwinCallPreservedMask");
227
229 return CSR_Darwin_AArch64_CXX_TLS_RegMask;
231 return CSR_Darwin_AArch64_AAVPCS_RegMask;
234 "Calling convention SVE_VectorCall is unsupported on Darwin.");
237 "Calling convention AArch64_SME_ABI_Support_Routines_PreserveMost_From_X0 is "
238 "unsupported on Darwin.");
241 "Calling convention AArch64_SME_ABI_Support_Routines_PreserveMost_From_X2 is "
242 "unsupported on Darwin.");
245 "Calling convention CFGuard_Check is unsupported on Darwin.");
248 ->supportSwiftError() &&
249 MF.getFunction().getAttributes().hasAttrSomewhere(Attribute::SwiftError))
250 return CSR_Darwin_AArch64_AAPCS_SwiftError_RegMask;
252 return CSR_Darwin_AArch64_AAPCS_SwiftTail_RegMask;
254 return CSR_Darwin_AArch64_RT_MostRegs_RegMask;
256 return CSR_Darwin_AArch64_RT_AllRegs_RegMask;
257 return CSR_Darwin_AArch64_AAPCS_RegMask;
258}
259
260const uint32_t *
262 CallingConv::ID CC) const {
263 bool SCS = MF.getFunction().hasFnAttribute(Attribute::ShadowCallStack);
264 if (CC == CallingConv::GHC)
265 // This is academic because all GHC calls are (supposed to be) tail calls
266 return SCS ? CSR_AArch64_NoRegs_SCS_RegMask : CSR_AArch64_NoRegs_RegMask;
267 if (CC == CallingConv::AnyReg)
268 return SCS ? CSR_AArch64_AllRegs_SCS_RegMask : CSR_AArch64_AllRegs_RegMask;
269
270 // All the following calling conventions are handled differently on Darwin.
272 if (SCS)
273 report_fatal_error("ShadowCallStack attribute not supported on Darwin.");
274 return getDarwinCallPreservedMask(MF, CC);
275 }
276
278 return SCS ? CSR_AArch64_AAVPCS_SCS_RegMask : CSR_AArch64_AAVPCS_RegMask;
280 return SCS ? CSR_AArch64_SVE_AAPCS_SCS_RegMask
281 : CSR_AArch64_SVE_AAPCS_RegMask;
283 return CSR_AArch64_SME_ABI_Support_Routines_PreserveMost_From_X0_RegMask;
285 return CSR_AArch64_SME_ABI_Support_Routines_PreserveMost_From_X2_RegMask;
287 return CSR_Win_AArch64_CFGuard_Check_RegMask;
289 ->supportSwiftError() &&
290 MF.getFunction().getAttributes().hasAttrSomewhere(Attribute::SwiftError))
291 return SCS ? CSR_AArch64_AAPCS_SwiftError_SCS_RegMask
292 : CSR_AArch64_AAPCS_SwiftError_RegMask;
293 if (CC == CallingConv::SwiftTail) {
294 if (SCS)
295 report_fatal_error("ShadowCallStack attribute not supported with swifttail");
296 return CSR_AArch64_AAPCS_SwiftTail_RegMask;
297 }
299 return SCS ? CSR_AArch64_RT_MostRegs_SCS_RegMask
300 : CSR_AArch64_RT_MostRegs_RegMask;
301 else if (CC == CallingConv::PreserveAll)
302 return SCS ? CSR_AArch64_RT_AllRegs_SCS_RegMask
303 : CSR_AArch64_RT_AllRegs_RegMask;
304
305 else
306 return SCS ? CSR_AArch64_AAPCS_SCS_RegMask : CSR_AArch64_AAPCS_RegMask;
307}
308
310 const MachineFunction &MF) const {
312 return CSR_AArch64_AAPCS_RegMask;
313
314 return nullptr;
315}
316
318 if (TT.isOSDarwin())
319 return CSR_Darwin_AArch64_TLS_RegMask;
320
321 assert(TT.isOSBinFormatELF() && "Invalid target");
322 return CSR_AArch64_TLS_ELF_RegMask;
323}
324
326 const uint32_t **Mask) const {
327 uint32_t *UpdatedMask = MF.allocateRegMask();
328 unsigned RegMaskSize = MachineOperand::getRegMaskSize(getNumRegs());
329 memcpy(UpdatedMask, *Mask, sizeof(UpdatedMask[0]) * RegMaskSize);
330
331 for (size_t i = 0; i < AArch64::GPR64commonRegClass.getNumRegs(); ++i) {
333 for (MCPhysReg SubReg :
334 subregs_inclusive(AArch64::GPR64commonRegClass.getRegister(i))) {
335 // See TargetRegisterInfo::getCallPreservedMask for how to interpret the
336 // register mask.
337 UpdatedMask[SubReg / 32] |= 1u << (SubReg % 32);
338 }
339 }
340 }
341 *Mask = UpdatedMask;
342}
343
345 return CSR_AArch64_SMStartStop_RegMask;
346}
347
348const uint32_t *
350 return CSR_AArch64_SME_ABI_Support_Routines_PreserveMost_From_X0_RegMask;
351}
352
354 return CSR_AArch64_NoRegs_RegMask;
355}
356
357const uint32_t *
359 CallingConv::ID CC) const {
360 // This should return a register mask that is the same as that returned by
361 // getCallPreservedMask but that additionally preserves the register used for
362 // the first i64 argument (which must also be the register used to return a
363 // single i64 return value)
364 //
365 // In case that the calling convention does not use the same register for
366 // both, the function should return NULL (does not currently apply)
367 assert(CC != CallingConv::GHC && "should not be GHC calling convention.");
369 return CSR_Darwin_AArch64_AAPCS_ThisReturn_RegMask;
370 return CSR_AArch64_AAPCS_ThisReturn_RegMask;
371}
372
374 return CSR_AArch64_StackProbe_Windows_RegMask;
375}
376
377std::optional<std::string>
379 MCRegister PhysReg) const {
380 if (hasBasePointer(MF) && MCRegisterInfo::regsOverlap(PhysReg, AArch64::X19))
381 return std::string("X19 is used as the frame base pointer register.");
382
384 bool warn = false;
385 if (MCRegisterInfo::regsOverlap(PhysReg, AArch64::X13) ||
386 MCRegisterInfo::regsOverlap(PhysReg, AArch64::X14) ||
387 MCRegisterInfo::regsOverlap(PhysReg, AArch64::X23) ||
388 MCRegisterInfo::regsOverlap(PhysReg, AArch64::X24) ||
389 MCRegisterInfo::regsOverlap(PhysReg, AArch64::X28))
390 warn = true;
391
392 for (unsigned i = AArch64::B16; i <= AArch64::B31; ++i)
393 if (MCRegisterInfo::regsOverlap(PhysReg, i))
394 warn = true;
395
396 if (warn)
397 return std::string(AArch64InstPrinter::getRegisterName(PhysReg)) +
398 " is clobbered by asynchronous signals when using Arm64EC.";
399 }
400
401 return {};
402}
403
406 const AArch64FrameLowering *TFI = getFrameLowering(MF);
407
408 // FIXME: avoid re-calculating this every time.
409 BitVector Reserved(getNumRegs());
410 markSuperRegs(Reserved, AArch64::WSP);
411 markSuperRegs(Reserved, AArch64::WZR);
412
413 if (TFI->hasFP(MF) || TT.isOSDarwin())
414 markSuperRegs(Reserved, AArch64::W29);
415
417 // x13, x14, x23, x24, x28, and v16-v31 are clobbered by asynchronous
418 // signals, so we can't ever use them.
419 markSuperRegs(Reserved, AArch64::W13);
420 markSuperRegs(Reserved, AArch64::W14);
421 markSuperRegs(Reserved, AArch64::W23);
422 markSuperRegs(Reserved, AArch64::W24);
423 markSuperRegs(Reserved, AArch64::W28);
424 for (unsigned i = AArch64::B16; i <= AArch64::B31; ++i)
425 markSuperRegs(Reserved, i);
426 }
427
428 for (size_t i = 0; i < AArch64::GPR32commonRegClass.getNumRegs(); ++i) {
430 markSuperRegs(Reserved, AArch64::GPR32commonRegClass.getRegister(i));
431 }
432
433 if (hasBasePointer(MF))
434 markSuperRegs(Reserved, AArch64::W19);
435
436 // SLH uses register W16/X16 as the taint register.
437 if (MF.getFunction().hasFnAttribute(Attribute::SpeculativeLoadHardening))
438 markSuperRegs(Reserved, AArch64::W16);
439
440 // FFR is modelled as global state that cannot be allocated.
441 if (MF.getSubtarget<AArch64Subtarget>().hasSVE())
442 Reserved.set(AArch64::FFR);
443
444 // SME tiles are not allocatable.
445 if (MF.getSubtarget<AArch64Subtarget>().hasSME()) {
446 for (MCPhysReg SubReg : subregs_inclusive(AArch64::ZA))
447 Reserved.set(SubReg);
448 }
449
450 // VG cannot be allocated
451 Reserved.set(AArch64::VG);
452
453 if (MF.getSubtarget<AArch64Subtarget>().hasSME2()) {
454 for (MCSubRegIterator SubReg(AArch64::ZT0, this, /*self=*/true);
455 SubReg.isValid(); ++SubReg)
456 Reserved.set(*SubReg);
457 }
458
459 markSuperRegs(Reserved, AArch64::FPCR);
460 markSuperRegs(Reserved, AArch64::FPSR);
461
463 markSuperRegs(Reserved, AArch64::X27);
464 markSuperRegs(Reserved, AArch64::X28);
465 markSuperRegs(Reserved, AArch64::W27);
466 markSuperRegs(Reserved, AArch64::W28);
467 }
468
469 assert(checkAllSuperRegsMarked(Reserved));
470 return Reserved;
471}
472
476
477 for (size_t i = 0; i < AArch64::GPR32commonRegClass.getNumRegs(); ++i) {
479 markSuperRegs(Reserved, AArch64::GPR32commonRegClass.getRegister(i));
480 }
481
482 assert(checkAllSuperRegsMarked(Reserved));
483 return Reserved;
484}
485
487 MCRegister Reg) const {
488 return getReservedRegs(MF)[Reg];
489}
490
492 MCRegister Reg) const {
493 return getStrictlyReservedRegs(MF)[Reg];
494}
495
497 return llvm::any_of(*AArch64::GPR64argRegClass.MC, [this, &MF](MCPhysReg r) {
498 return isStrictlyReservedReg(MF, r);
499 });
500}
501
503 const MachineFunction &MF) const {
504 const Function &F = MF.getFunction();
505 F.getContext().diagnose(DiagnosticInfoUnsupported{F, ("AArch64 doesn't support"
506 " function calls if any of the argument registers is reserved.")});
507}
508
510 MCRegister PhysReg) const {
511 // SLH uses register X16 as the taint register but it will fallback to a different
512 // method if the user clobbers it. So X16 is not reserved for inline asm but is
513 // for normal codegen.
514 if (MF.getFunction().hasFnAttribute(Attribute::SpeculativeLoadHardening) &&
515 MCRegisterInfo::regsOverlap(PhysReg, AArch64::X16))
516 return true;
517
518 // ZA/ZT0 registers are reserved but may be permitted in the clobber list.
519 if (PhysReg == AArch64::ZA || PhysReg == AArch64::ZT0)
520 return true;
521
522 return !isReservedReg(MF, PhysReg);
523}
524
527 unsigned Kind) const {
528 return &AArch64::GPR64spRegClass;
529}
530
533 if (RC == &AArch64::CCRRegClass)
534 return &AArch64::GPR64RegClass; // Only MSR & MRS copy NZCV.
535 return RC;
536}
537
538unsigned AArch64RegisterInfo::getBaseRegister() const { return AArch64::X19; }
539
541 const MachineFrameInfo &MFI = MF.getFrameInfo();
542
543 // In the presence of variable sized objects or funclets, if the fixed stack
544 // size is large enough that referencing from the FP won't result in things
545 // being in range relatively often, we can use a base pointer to allow access
546 // from the other direction like the SP normally works.
547 //
548 // Furthermore, if both variable sized objects are present, and the
549 // stack needs to be dynamically re-aligned, the base pointer is the only
550 // reliable way to reference the locals.
551 if (MFI.hasVarSizedObjects() || MF.hasEHFunclets()) {
552 if (hasStackRealignment(MF))
553 return true;
554
555 auto &ST = MF.getSubtarget<AArch64Subtarget>();
556 if (ST.hasSVE() || ST.isStreaming()) {
558 // Frames that have variable sized objects and scalable SVE objects,
559 // should always use a basepointer.
560 if (!AFI->hasCalculatedStackSizeSVE() || AFI->getStackSizeSVE())
561 return true;
562 }
563
564 // Conservatively estimate whether the negative offset from the frame
565 // pointer will be sufficient to reach. If a function has a smallish
566 // frame, it's less likely to have lots of spills and callee saved
567 // space, so it's all more likely to be within range of the frame pointer.
568 // If it's wrong, we'll materialize the constant and still get to the
569 // object; it's just suboptimal. Negative offsets use the unscaled
570 // load/store instructions, which have a 9-bit signed immediate.
571 return MFI.getLocalFrameSize() >= 256;
572 }
573
574 return false;
575}
576
578 MCRegister Reg) const {
581 bool IsVarArg = STI.isCallingConvWin64(MF.getFunction().getCallingConv());
582
583 auto HasReg = [](ArrayRef<MCRegister> RegList, MCRegister Reg) {
584 return llvm::is_contained(RegList, Reg);
585 };
586
587 switch (CC) {
588 default:
589 report_fatal_error("Unsupported calling convention.");
590 case CallingConv::GHC:
591 return HasReg(CC_AArch64_GHC_ArgRegs, Reg);
592 case CallingConv::C:
600 if (STI.isTargetWindows()) {
601 if (IsVarArg)
602 return HasReg(CC_AArch64_Win64_VarArg_ArgRegs, Reg);
603 switch (CC) {
604 default:
605 return HasReg(CC_AArch64_Win64PCS_ArgRegs, Reg);
608 return HasReg(CC_AArch64_Win64PCS_Swift_ArgRegs, Reg) ||
609 HasReg(CC_AArch64_Win64PCS_ArgRegs, Reg);
610 }
611 }
612 if (!STI.isTargetDarwin()) {
613 switch (CC) {
614 default:
615 return HasReg(CC_AArch64_AAPCS_ArgRegs, Reg);
618 return HasReg(CC_AArch64_AAPCS_ArgRegs, Reg) ||
619 HasReg(CC_AArch64_AAPCS_Swift_ArgRegs, Reg);
620 }
621 }
622 if (!IsVarArg) {
623 switch (CC) {
624 default:
625 return HasReg(CC_AArch64_DarwinPCS_ArgRegs, Reg);
628 return HasReg(CC_AArch64_DarwinPCS_ArgRegs, Reg) ||
629 HasReg(CC_AArch64_DarwinPCS_Swift_ArgRegs, Reg);
630 }
631 }
632 if (STI.isTargetILP32())
633 return HasReg(CC_AArch64_DarwinPCS_ILP32_VarArg_ArgRegs, Reg);
634 return HasReg(CC_AArch64_DarwinPCS_VarArg_ArgRegs, Reg);
636 if (IsVarArg)
637 HasReg(CC_AArch64_Win64_VarArg_ArgRegs, Reg);
638 return HasReg(CC_AArch64_Win64PCS_ArgRegs, Reg);
640 return HasReg(CC_AArch64_Win64_CFGuard_Check_ArgRegs, Reg);
645 if (STI.isTargetWindows())
646 return HasReg(CC_AArch64_Win64PCS_ArgRegs, Reg);
647 return HasReg(CC_AArch64_AAPCS_ArgRegs, Reg);
648 }
649}
650
653 const AArch64FrameLowering *TFI = getFrameLowering(MF);
654 return TFI->hasFP(MF) ? AArch64::FP : AArch64::SP;
655}
656
658 const MachineFunction &MF) const {
659 return true;
660}
661
663 const MachineFunction &MF) const {
664 return true;
665}
666
667bool
669 // This function indicates whether the emergency spillslot should be placed
670 // close to the beginning of the stackframe (closer to FP) or the end
671 // (closer to SP).
672 //
673 // The beginning works most reliably if we have a frame pointer.
674 // In the presence of any non-constant space between FP and locals,
675 // (e.g. in case of stack realignment or a scalable SVE area), it is
676 // better to use SP or BP.
677 const AArch64FrameLowering &TFI = *getFrameLowering(MF);
679 assert((!MF.getSubtarget<AArch64Subtarget>().hasSVE() ||
681 "Expected SVE area to be calculated by this point");
682 return TFI.hasFP(MF) && !hasStackRealignment(MF) && !AFI->getStackSizeSVE();
683}
684
686 const MachineFunction &MF) const {
687 return true;
688}
689
690bool
692 const MachineFrameInfo &MFI = MF.getFrameInfo();
694 return true;
695 return MFI.hasVarSizedObjects() || MFI.isFrameAddressTaken();
696}
697
698/// needsFrameBaseReg - Returns true if the instruction's frame index
699/// reference would be better served by a base register other than FP
700/// or SP. Used by LocalStackFrameAllocation to determine which frame index
701/// references it should create new base registers for.
703 int64_t Offset) const {
704 for (unsigned i = 0; !MI->getOperand(i).isFI(); ++i)
705 assert(i < MI->getNumOperands() &&
706 "Instr doesn't have FrameIndex operand!");
707
708 // It's the load/store FI references that cause issues, as it can be difficult
709 // to materialize the offset if it won't fit in the literal field. Estimate
710 // based on the size of the local frame and some conservative assumptions
711 // about the rest of the stack frame (note, this is pre-regalloc, so
712 // we don't know everything for certain yet) whether this offset is likely
713 // to be out of range of the immediate. Return true if so.
714
715 // We only generate virtual base registers for loads and stores, so
716 // return false for everything else.
717 if (!MI->mayLoad() && !MI->mayStore())
718 return false;
719
720 // Without a virtual base register, if the function has variable sized
721 // objects, all fixed-size local references will be via the frame pointer,
722 // Approximate the offset and see if it's legal for the instruction.
723 // Note that the incoming offset is based on the SP value at function entry,
724 // so it'll be negative.
725 MachineFunction &MF = *MI->getParent()->getParent();
726 const AArch64FrameLowering *TFI = getFrameLowering(MF);
727 MachineFrameInfo &MFI = MF.getFrameInfo();
728
729 // Estimate an offset from the frame pointer.
730 // Conservatively assume all GPR callee-saved registers get pushed.
731 // FP, LR, X19-X28, D8-D15. 64-bits each.
732 int64_t FPOffset = Offset - 16 * 20;
733 // Estimate an offset from the stack pointer.
734 // The incoming offset is relating to the SP at the start of the function,
735 // but when we access the local it'll be relative to the SP after local
736 // allocation, so adjust our SP-relative offset by that allocation size.
737 Offset += MFI.getLocalFrameSize();
738 // Assume that we'll have at least some spill slots allocated.
739 // FIXME: This is a total SWAG number. We should run some statistics
740 // and pick a real one.
741 Offset += 128; // 128 bytes of spill slots
742
743 // If there is a frame pointer, try using it.
744 // The FP is only available if there is no dynamic realignment. We
745 // don't know for sure yet whether we'll need that, so we guess based
746 // on whether there are any local variables that would trigger it.
747 if (TFI->hasFP(MF) && isFrameOffsetLegal(MI, AArch64::FP, FPOffset))
748 return false;
749
750 // If we can reference via the stack pointer or base pointer, try that.
751 // FIXME: This (and the code that resolves the references) can be improved
752 // to only disallow SP relative references in the live range of
753 // the VLA(s). In practice, it's unclear how much difference that
754 // would make, but it may be worth doing.
755 if (isFrameOffsetLegal(MI, AArch64::SP, Offset))
756 return false;
757
758 // If even offset 0 is illegal, we don't want a virtual base register.
759 if (!isFrameOffsetLegal(MI, AArch64::SP, 0))
760 return false;
761
762 // The offset likely isn't legal; we want to allocate a virtual base register.
763 return true;
764}
765
767 Register BaseReg,
768 int64_t Offset) const {
769 assert(MI && "Unable to get the legal offset for nil instruction.");
772}
773
774/// Insert defining instruction(s) for BaseReg to be a pointer to FrameIdx
775/// at the beginning of the basic block.
778 int FrameIdx,
779 int64_t Offset) const {
781 DebugLoc DL; // Defaults to "unknown"
782 if (Ins != MBB->end())
783 DL = Ins->getDebugLoc();
784 const MachineFunction &MF = *MBB->getParent();
785 const AArch64InstrInfo *TII =
786 MF.getSubtarget<AArch64Subtarget>().getInstrInfo();
787 const MCInstrDesc &MCID = TII->get(AArch64::ADDXri);
789 Register BaseReg = MRI.createVirtualRegister(&AArch64::GPR64spRegClass);
790 MRI.constrainRegClass(BaseReg, TII->getRegClass(MCID, 0, this, MF));
791 unsigned Shifter = AArch64_AM::getShifterImm(AArch64_AM::LSL, 0);
792
793 BuildMI(*MBB, Ins, DL, MCID, BaseReg)
794 .addFrameIndex(FrameIdx)
795 .addImm(Offset)
796 .addImm(Shifter);
797
798 return BaseReg;
799}
800
802 int64_t Offset) const {
803 // ARM doesn't need the general 64-bit offsets
805
806 unsigned i = 0;
807 while (!MI.getOperand(i).isFI()) {
808 ++i;
809 assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!");
810 }
811
812 const MachineFunction *MF = MI.getParent()->getParent();
813 const AArch64InstrInfo *TII =
814 MF->getSubtarget<AArch64Subtarget>().getInstrInfo();
815 bool Done = rewriteAArch64FrameIndex(MI, i, BaseReg, Off, TII);
816 assert(Done && "Unable to resolve frame index!");
817 (void)Done;
818}
819
820// Create a scratch register for the frame index elimination in an instruction.
821// This function has special handling of stack tagging loop pseudos, in which
822// case it can also change the instruction opcode.
823static Register
825 const AArch64InstrInfo *TII) {
826 // ST*Gloop have a reserved scratch register in operand 1. Use it, and also
827 // replace the instruction with the writeback variant because it will now
828 // satisfy the operand constraints for it.
829 Register ScratchReg;
830 if (MI.getOpcode() == AArch64::STGloop ||
831 MI.getOpcode() == AArch64::STZGloop) {
832 assert(FIOperandNum == 3 &&
833 "Wrong frame index operand for STGloop/STZGloop");
834 unsigned Op = MI.getOpcode() == AArch64::STGloop ? AArch64::STGloop_wback
835 : AArch64::STZGloop_wback;
836 ScratchReg = MI.getOperand(1).getReg();
837 MI.getOperand(3).ChangeToRegister(ScratchReg, false, false, true);
838 MI.setDesc(TII->get(Op));
839 MI.tieOperands(1, 3);
840 } else {
841 ScratchReg =
842 MI.getMF()->getRegInfo().createVirtualRegister(&AArch64::GPR64RegClass);
843 MI.getOperand(FIOperandNum)
844 .ChangeToRegister(ScratchReg, false, false, true);
845 }
846 return ScratchReg;
847}
848
850 const StackOffset &Offset, SmallVectorImpl<uint64_t> &Ops) const {
851 // The smallest scalable element supported by scaled SVE addressing
852 // modes are predicates, which are 2 scalable bytes in size. So the scalable
853 // byte offset must always be a multiple of 2.
854 assert(Offset.getScalable() % 2 == 0 && "Invalid frame offset");
855
856 // Add fixed-sized offset using existing DIExpression interface.
857 DIExpression::appendOffset(Ops, Offset.getFixed());
858
859 unsigned VG = getDwarfRegNum(AArch64::VG, true);
860 int64_t VGSized = Offset.getScalable() / 2;
861 if (VGSized > 0) {
862 Ops.push_back(dwarf::DW_OP_constu);
863 Ops.push_back(VGSized);
864 Ops.append({dwarf::DW_OP_bregx, VG, 0ULL});
865 Ops.push_back(dwarf::DW_OP_mul);
866 Ops.push_back(dwarf::DW_OP_plus);
867 } else if (VGSized < 0) {
868 Ops.push_back(dwarf::DW_OP_constu);
869 Ops.push_back(-VGSized);
870 Ops.append({dwarf::DW_OP_bregx, VG, 0ULL});
871 Ops.push_back(dwarf::DW_OP_mul);
872 Ops.push_back(dwarf::DW_OP_minus);
873 }
874}
875
877 int SPAdj, unsigned FIOperandNum,
878 RegScavenger *RS) const {
879 assert(SPAdj == 0 && "Unexpected");
880
881 MachineInstr &MI = *II;
882 MachineBasicBlock &MBB = *MI.getParent();
884 const MachineFrameInfo &MFI = MF.getFrameInfo();
885 const AArch64InstrInfo *TII =
886 MF.getSubtarget<AArch64Subtarget>().getInstrInfo();
887 const AArch64FrameLowering *TFI = getFrameLowering(MF);
888 int FrameIndex = MI.getOperand(FIOperandNum).getIndex();
889 bool Tagged =
890 MI.getOperand(FIOperandNum).getTargetFlags() & AArch64II::MO_TAGGED;
891 Register FrameReg;
892
893 // Special handling of dbg_value, stackmap patchpoint statepoint instructions.
894 if (MI.getOpcode() == TargetOpcode::STACKMAP ||
895 MI.getOpcode() == TargetOpcode::PATCHPOINT ||
896 MI.getOpcode() == TargetOpcode::STATEPOINT) {
898 TFI->resolveFrameIndexReference(MF, FrameIndex, FrameReg,
899 /*PreferFP=*/true,
900 /*ForSimm=*/false);
901 Offset += StackOffset::getFixed(MI.getOperand(FIOperandNum + 1).getImm());
902 MI.getOperand(FIOperandNum).ChangeToRegister(FrameReg, false /*isDef*/);
903 MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset.getFixed());
904 return false;
905 }
906
907 if (MI.getOpcode() == TargetOpcode::LOCAL_ESCAPE) {
908 MachineOperand &FI = MI.getOperand(FIOperandNum);
910 assert(!Offset.getScalable() &&
911 "Frame offsets with a scalable component are not supported");
912 FI.ChangeToImmediate(Offset.getFixed());
913 return false;
914 }
915
917 if (MI.getOpcode() == AArch64::TAGPstack) {
918 // TAGPstack must use the virtual frame register in its 3rd operand.
920 FrameReg = MI.getOperand(3).getReg();
923 } else if (Tagged) {
925 MFI.getObjectOffset(FrameIndex) + (int64_t)MFI.getStackSize());
926 if (MFI.hasVarSizedObjects() ||
927 isAArch64FrameOffsetLegal(MI, SPOffset, nullptr, nullptr, nullptr) !=
929 // Can't update to SP + offset in place. Precalculate the tagged pointer
930 // in a scratch register.
932 MF, FrameIndex, FrameReg, /*PreferFP=*/false, /*ForSimm=*/true);
933 Register ScratchReg =
934 MF.getRegInfo().createVirtualRegister(&AArch64::GPR64RegClass);
935 emitFrameOffset(MBB, II, MI.getDebugLoc(), ScratchReg, FrameReg, Offset,
936 TII);
937 BuildMI(MBB, MI, MI.getDebugLoc(), TII->get(AArch64::LDG), ScratchReg)
938 .addReg(ScratchReg)
939 .addReg(ScratchReg)
940 .addImm(0);
941 MI.getOperand(FIOperandNum)
942 .ChangeToRegister(ScratchReg, false, false, true);
943 return false;
944 }
945 FrameReg = AArch64::SP;
947 (int64_t)MFI.getStackSize());
948 } else {
950 MF, FrameIndex, FrameReg, /*PreferFP=*/false, /*ForSimm=*/true);
951 }
952
953 // Modify MI as necessary to handle as much of 'Offset' as possible
954 if (rewriteAArch64FrameIndex(MI, FIOperandNum, FrameReg, Offset, TII))
955 return true;
956
957 assert((!RS || !RS->isScavengingFrameIndex(FrameIndex)) &&
958 "Emergency spill slot is out of reach");
959
960 // If we get here, the immediate doesn't fit into the instruction. We folded
961 // as much as possible above. Handle the rest, providing a register that is
962 // SP+LargeImm.
963 Register ScratchReg =
965 emitFrameOffset(MBB, II, MI.getDebugLoc(), ScratchReg, FrameReg, Offset, TII);
966 return false;
967}
968
970 MachineFunction &MF) const {
971 const AArch64FrameLowering *TFI = getFrameLowering(MF);
972
973 switch (RC->getID()) {
974 default:
975 return 0;
976 case AArch64::GPR32RegClassID:
977 case AArch64::GPR32spRegClassID:
978 case AArch64::GPR32allRegClassID:
979 case AArch64::GPR64spRegClassID:
980 case AArch64::GPR64allRegClassID:
981 case AArch64::GPR64RegClassID:
982 case AArch64::GPR32commonRegClassID:
983 case AArch64::GPR64commonRegClassID:
984 return 32 - 1 // XZR/SP
985 - (TFI->hasFP(MF) || TT.isOSDarwin()) // FP
987 - hasBasePointer(MF); // X19
988 case AArch64::FPR8RegClassID:
989 case AArch64::FPR16RegClassID:
990 case AArch64::FPR32RegClassID:
991 case AArch64::FPR64RegClassID:
992 case AArch64::FPR128RegClassID:
993 return 32;
994
995 case AArch64::MatrixIndexGPR32_8_11RegClassID:
996 case AArch64::MatrixIndexGPR32_12_15RegClassID:
997 return 4;
998
999 case AArch64::DDRegClassID:
1000 case AArch64::DDDRegClassID:
1001 case AArch64::DDDDRegClassID:
1002 case AArch64::QQRegClassID:
1003 case AArch64::QQQRegClassID:
1004 case AArch64::QQQQRegClassID:
1005 return 32;
1006
1007 case AArch64::FPR128_loRegClassID:
1008 case AArch64::FPR64_loRegClassID:
1009 case AArch64::FPR16_loRegClassID:
1010 return 16;
1011 case AArch64::FPR128_0to7RegClassID:
1012 return 8;
1013 }
1014}
1015
1017 const MachineFunction &MF) const {
1018 const auto &MFI = MF.getFrameInfo();
1019 if (!MF.hasEHFunclets() && !MFI.hasVarSizedObjects())
1020 return AArch64::SP;
1021 else if (hasStackRealignment(MF))
1022 return getBaseRegister();
1023 return getFrameRegister(MF);
1024}
1025
1026/// SrcRC and DstRC will be morphed into NewRC if this returns true
1028 MachineInstr *MI, const TargetRegisterClass *SrcRC, unsigned SubReg,
1029 const TargetRegisterClass *DstRC, unsigned DstSubReg,
1030 const TargetRegisterClass *NewRC, LiveIntervals &LIS) const {
1031 MachineRegisterInfo &MRI = MI->getMF()->getRegInfo();
1032
1033 if (MI->isCopy() &&
1034 ((DstRC->getID() == AArch64::GPR64RegClassID) ||
1035 (DstRC->getID() == AArch64::GPR64commonRegClassID)) &&
1036 MI->getOperand(0).getSubReg() && MI->getOperand(1).getSubReg())
1037 // Do not coalesce in the case of a 32-bit subregister copy
1038 // which implements a 32 to 64 bit zero extension
1039 // which relies on the upper 32 bits being zeroed.
1040 return false;
1041
1042 auto IsCoalescerBarrier = [](const MachineInstr &MI) {
1043 switch (MI.getOpcode()) {
1044 case AArch64::COALESCER_BARRIER_FPR16:
1045 case AArch64::COALESCER_BARRIER_FPR32:
1046 case AArch64::COALESCER_BARRIER_FPR64:
1047 case AArch64::COALESCER_BARRIER_FPR128:
1048 return true;
1049 default:
1050 return false;
1051 }
1052 };
1053
1054 // For calls that temporarily have to toggle streaming mode as part of the
1055 // call-sequence, we need to be more careful when coalescing copy instructions
1056 // so that we don't end up coalescing the NEON/FP result or argument register
1057 // with a whole Z-register, such that after coalescing the register allocator
1058 // will try to spill/reload the entire Z register.
1059 //
1060 // We do this by checking if the node has any defs/uses that are
1061 // COALESCER_BARRIER pseudos. These are 'nops' in practice, but they exist to
1062 // instruct the coalescer to avoid coalescing the copy.
1063 if (MI->isCopy() && SubReg != DstSubReg &&
1064 (AArch64::ZPRRegClass.hasSubClassEq(DstRC) ||
1065 AArch64::ZPRRegClass.hasSubClassEq(SrcRC))) {
1066 unsigned SrcReg = MI->getOperand(1).getReg();
1067 if (any_of(MRI.def_instructions(SrcReg), IsCoalescerBarrier))
1068 return false;
1069 unsigned DstReg = MI->getOperand(0).getReg();
1070 if (any_of(MRI.use_nodbg_instructions(DstReg), IsCoalescerBarrier))
1071 return false;
1072 }
1073
1074 return true;
1075}
1076
1078 MCRegister R) const {
1079 return R == AArch64::VG;
1080}
unsigned SubReg
unsigned const MachineRegisterInfo * MRI
static Register createScratchRegisterForInstruction(MachineInstr &MI, unsigned FIOperandNum, const AArch64InstrInfo *TII)
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file implements the BitVector class.
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This file contains constants used for implementing Dwarf debug support.
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
INITIALIZE_PASS(RISCVInsertVSETVLI, DEBUG_TYPE, RISCV_INSERT_VSETVLI_NAME, false, false) char RISCVCoalesceVSETVLI const LiveIntervals * LIS
This file declares the machine register scavenger class.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static unsigned getDwarfRegNum(unsigned Reg, const TargetRegisterInfo *TRI)
Go up the super-register chain until we hit a valid dwarf register number.
Definition: StackMaps.cpp:195
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Definition: Value.cpp:469
StackOffset getNonLocalFrameIndexReference(const MachineFunction &MF, int FI) const override
getNonLocalFrameIndexReference - This method returns the offset used to reference a frame index locat...
bool hasFP(const MachineFunction &MF) const override
hasFP - Return true if the specified function should have a dedicated frame pointer register.
StackOffset resolveFrameIndexReference(const MachineFunction &MF, int FI, Register &FrameReg, bool PreferFP, bool ForSimm) const
AArch64FunctionInfo - This class is derived from MachineFunctionInfo and contains private AArch64-spe...
static const char * getRegisterName(MCRegister Reg, unsigned AltIdx=AArch64::NoRegAltName)
BitVector getStrictlyReservedRegs(const MachineFunction &MF) const
const TargetRegisterClass * getCrossCopyRegClass(const TargetRegisterClass *RC) const override
const uint32_t * getThisReturnPreservedMask(const MachineFunction &MF, CallingConv::ID) const
getThisReturnPreservedMask - Returns a call preserved mask specific to the case that 'returned' is on...
bool isReservedReg(const MachineFunction &MF, MCRegister Reg) const
const uint32_t * getCallPreservedMask(const MachineFunction &MF, CallingConv::ID) const override
const MCPhysReg * getCalleeSavedRegsViaCopy(const MachineFunction *MF) const
BitVector getReservedRegs(const MachineFunction &MF) const override
bool shouldCoalesce(MachineInstr *MI, const TargetRegisterClass *SrcRC, unsigned SubReg, const TargetRegisterClass *DstRC, unsigned DstSubReg, const TargetRegisterClass *NewRC, LiveIntervals &LIS) const override
SrcRC and DstRC will be morphed into NewRC if this returns true.
bool requiresVirtualBaseRegisters(const MachineFunction &MF) const override
const TargetRegisterClass * getSubClassWithSubReg(const TargetRegisterClass *RC, unsigned Idx) const override
unsigned getRegPressureLimit(const TargetRegisterClass *RC, MachineFunction &MF) const override
Register materializeFrameBaseRegister(MachineBasicBlock *MBB, int FrameIdx, int64_t Offset) const override
Insert defining instruction(s) for BaseReg to be a pointer to FrameIdx at the beginning of the basic ...
const TargetRegisterClass * getPointerRegClass(const MachineFunction &MF, unsigned Kind=0) const override
void UpdateCustomCalleeSavedRegs(MachineFunction &MF) const
bool requiresRegisterScavenging(const MachineFunction &MF) const override
bool isFrameOffsetLegal(const MachineInstr *MI, Register BaseReg, int64_t Offset) const override
void resolveFrameIndex(MachineInstr &MI, Register BaseReg, int64_t Offset) const override
bool needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const override
needsFrameBaseReg - Returns true if the instruction's frame index reference would be better served by...
const uint32_t * getWindowsStackProbePreservedMask() const
Stack probing calls preserve different CSRs to the normal CC.
AArch64RegisterInfo(const Triple &TT)
bool isAnyArgRegReserved(const MachineFunction &MF) const
void emitReservedArgRegCallError(const MachineFunction &MF) const
bool regNeedsCFI(unsigned Reg, unsigned &RegToUseForCFI) const
Return whether the register needs a CFI entry.
bool isStrictlyReservedReg(const MachineFunction &MF, MCRegister Reg) const
bool eliminateFrameIndex(MachineBasicBlock::iterator II, int SPAdj, unsigned FIOperandNum, RegScavenger *RS=nullptr) const override
const uint32_t * getTLSCallPreservedMask() const
const uint32_t * getNoPreservedMask() const override
Register getFrameRegister(const MachineFunction &MF) const override
bool shouldAnalyzePhysregInMachineLoopInfo(MCRegister R) const override
void getOffsetOpcodes(const StackOffset &Offset, SmallVectorImpl< uint64_t > &Ops) const override
const MCPhysReg * getDarwinCalleeSavedRegs(const MachineFunction *MF) const
bool isAsmClobberable(const MachineFunction &MF, MCRegister PhysReg) const override
const uint32_t * SMEABISupportRoutinesCallPreservedMaskFromX0() const
const MCPhysReg * getCalleeSavedRegs(const MachineFunction *MF) const override
Code Generation virtual methods...
const uint32_t * getCustomEHPadPreservedMask(const MachineFunction &MF) const override
unsigned getLocalAddressRegister(const MachineFunction &MF) const
bool hasBasePointer(const MachineFunction &MF) const
const uint32_t * getDarwinCallPreservedMask(const MachineFunction &MF, CallingConv::ID) const
const uint32_t * getSMStartStopCallPreservedMask() const
bool useFPForScavengingIndex(const MachineFunction &MF) const override
bool cannotEliminateFrame(const MachineFunction &MF) const
bool isArgumentRegister(const MachineFunction &MF, MCRegister Reg) const override
void UpdateCustomCallPreservedMask(MachineFunction &MF, const uint32_t **Mask) const
std::optional< std::string > explainReservedReg(const MachineFunction &MF, MCRegister PhysReg) const override
bool requiresFrameIndexScavenging(const MachineFunction &MF) const override
bool isXRegisterReservedForRA(size_t i) const
unsigned getNumXRegisterReserved() const
const AArch64TargetLowering * getTargetLowering() const override
bool isCallingConvWin64(CallingConv::ID CC) const
bool isXRegCustomCalleeSaved(size_t i) const
bool isXRegisterReserved(size_t i) const
bool supportSwiftError() const override
Return true if the target supports swifterror attribute.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
bool hasAttrSomewhere(Attribute::AttrKind Kind, unsigned *Index=nullptr) const
Return true if the specified attribute is set for at least one parameter or for the return value.
static void appendOffset(SmallVectorImpl< uint64_t > &Ops, int64_t Offset)
Append Ops with operations to apply the Offset.
This class represents an Operation in the Expression.
A debug info location.
Definition: DebugLoc.h:33
Diagnostic information for unsupported feature in backend.
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition: Function.h:264
AttributeList getAttributes() const
Return the attribute list for this Function.
Definition: Function.h:340
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition: Function.cpp:675
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:198
bool regsOverlap(MCRegister RegA, MCRegister RegB) const
Returns true if the two registers are equal or alias each other.
Wrapper class representing physical registers. Should be passed by value.
Definition: MCRegister.h:33
MCSubRegIterator enumerates all sub-registers of Reg.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
bool hasVarSizedObjects() const
This method may be called any time after instruction selection is complete to determine if the stack ...
uint64_t getStackSize() const
Return the number of bytes that must be allocated to hold all of the fixed size frame objects.
bool adjustsStack() const
Return true if this function adjusts the stack – e.g., when calling another function.
bool isFrameAddressTaken() const
This method may be called any time after instruction selection is complete to determine if there is a...
int64_t getLocalFrameSize() const
Get the size of the local object blob.
int64_t getObjectOffset(int ObjectIdx) const
Return the assigned stack offset of the specified object from the incoming stack pointer.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
uint32_t * allocateRegMask()
Allocate and initialize a register mask with NumRegister bits.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
const LLVMTargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
Representation of each machine instruction.
Definition: MachineInstr.h:69
MachineOperand class - Representation of each machine instruction operand.
void ChangeToImmediate(int64_t ImmVal, unsigned TargetFlags=0)
ChangeToImmediate - Replace this operand with a new immediate operand of the specified value.
static unsigned getRegMaskSize(unsigned NumRegs)
Returns number of elements needed for a regmask array.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
void setCalleeSavedRegs(ArrayRef< MCPhysReg > CSRs)
Sets the updated Callee Saved Registers list.
bool isScavengingFrameIndex(int FI) const
Query whether a frame index is a scavenging frame index.
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:586
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
Definition: SmallVector.h:696
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
StackOffset holds a fixed and a scalable offset in bytes.
Definition: TypeSize.h:33
int64_t getFixed() const
Returns the fixed component of the stack.
Definition: TypeSize.h:49
TargetOptions Options
bool DisableFramePointerElim(const MachineFunction &MF) const
DisableFramePointerElim - This returns true if frame pointer elimination optimization should be disab...
unsigned getID() const
Return the register class ID number.
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:44
bool isOSDarwin() const
Is this a "Darwin" OS (macOS, iOS, tvOS, watchOS, XROS, or DriverKit).
Definition: Triple.h:558
bool isOSBinFormatELF() const
Tests whether the OS uses the ELF binary format.
Definition: Triple.h:719
@ MO_TAGGED
MO_TAGGED - With MO_PAGE, indicates that the page includes a memory tag in bits 56-63.
static unsigned getShifterImm(AArch64_AM::ShiftExtendType ST, unsigned Imm)
getShifterImm - Encode the shift type and amount: imm: 6-bit shift amount shifter: 000 ==> lsl 001 ==...
void initLLVMToCVRegMapping(MCRegisterInfo *MRI)
@ AArch64_VectorCall
Used between AArch64 Advanced SIMD functions.
Definition: CallingConv.h:221
@ Swift
Calling convention for Swift.
Definition: CallingConv.h:69
@ AArch64_SVE_VectorCall
Used between AArch64 SVE functions.
Definition: CallingConv.h:224
@ CFGuard_Check
Special calling convention on Windows for calling the Control Guard Check ICall funtion.
Definition: CallingConv.h:82
@ PreserveMost
Used for runtime calls that preserves most registers.
Definition: CallingConv.h:63
@ AnyReg
OBSOLETED - Used for stack based JavaScript calls.
Definition: CallingConv.h:60
@ AArch64_SME_ABI_Support_Routines_PreserveMost_From_X2
Preserve X2-X15, X19-X29, SP, Z0-Z31, P0-P15.
Definition: CallingConv.h:241
@ CXX_FAST_TLS
Used for access functions.
Definition: CallingConv.h:72
@ AArch64_SME_ABI_Support_Routines_PreserveMost_From_X0
Preserve X0-X13, X19-X29, SP, Z0-Z31, P0-P15.
Definition: CallingConv.h:238
@ GHC
Used by the Glasgow Haskell Compiler (GHC).
Definition: CallingConv.h:50
@ PreserveAll
Used for runtime calls that preserves (almost) all registers.
Definition: CallingConv.h:66
@ Fast
Attempts to make calls as fast as possible (e.g.
Definition: CallingConv.h:41
@ Tail
Attemps to make calls as fast as possible while guaranteeing that tail call optimization can always b...
Definition: CallingConv.h:76
@ Win64
The C convention as implemented on Windows/x86-64 and AArch64.
Definition: CallingConv.h:159
@ SwiftTail
This follows the Swift calling convention in how arguments are passed but guarantees tail calls will ...
Definition: CallingConv.h:87
@ GRAAL
Used by GraalVM. Two additional registers are reserved.
Definition: CallingConv.h:255
@ ARM64EC_Thunk_X64
Calling convention used in the ARM64EC ABI to implement calls between x64 code and thunks.
Definition: CallingConv.h:260
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:456
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
int isAArch64FrameOffsetLegal(const MachineInstr &MI, StackOffset &Offset, bool *OutUseUnscaledOp=nullptr, unsigned *OutUnscaledOp=nullptr, int64_t *EmittableOffset=nullptr)
Check if the Offset is a valid frame offset for MI.
@ Done
Definition: Threading.h:61
@ AArch64FrameOffsetIsLegal
Offset is legal.
@ AArch64FrameOffsetCanUpdate
Offset can apply, at least partly.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1729
void emitFrameOffset(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, unsigned DestReg, unsigned SrcReg, StackOffset Offset, const TargetInstrInfo *TII, MachineInstr::MIFlag=MachineInstr::NoFlags, bool SetNZCV=false, bool NeedsWinCFI=false, bool *HasWinCFI=nullptr, bool EmitCFAOffset=false, StackOffset InitialOffset={}, unsigned FrameReg=AArch64::SP)
emitFrameOffset - Emit instructions as needed to set DestReg to SrcReg plus Offset.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:156
bool rewriteAArch64FrameIndex(MachineInstr &MI, unsigned FrameRegIdx, unsigned FrameReg, StackOffset &Offset, const AArch64InstrInfo *TII)
rewriteAArch64FrameIndex - Rewrite MI to access 'Offset' bytes from the FP.
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition: STLExtras.h:1879