LLVM 23.0.0git
AArch64RegisterInfo.cpp
Go to the documentation of this file.
1//===- AArch64RegisterInfo.cpp - AArch64 Register Information -------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the AArch64 implementation of the TargetRegisterInfo
10// class.
11//
12//===----------------------------------------------------------------------===//
13
14#include "AArch64RegisterInfo.h"
16#include "AArch64InstrInfo.h"
19#include "AArch64Subtarget.h"
22#include "llvm/ADT/BitVector.h"
32#include "llvm/IR/Function.h"
35
36using namespace llvm;
37
38#define GET_CC_REGISTER_LISTS
39#include "AArch64GenCallingConv.inc"
40#define GET_REGINFO_TARGET_DESC
41#include "AArch64GenRegisterInfo.inc"
42
44 : AArch64GenRegisterInfo(AArch64::LR, 0, 0, 0, HwMode), TT(TT) {
46}
47
48/// Return whether the register needs a CFI entry. Not all unwinders may know
49/// about SVE registers, so we assume the lowest common denominator, i.e. the
50/// callee-saves required by the base ABI. For the SVE registers z8-z15 only the
51/// lower 64-bits (d8-d15) need to be saved. The lower 64-bits subreg is
52/// returned in \p RegToUseForCFI.
54 MCRegister &RegToUseForCFI) const {
55 if (AArch64::PPRRegClass.contains(Reg))
56 return false;
57
58 if (AArch64::ZPRRegClass.contains(Reg)) {
59 RegToUseForCFI = getSubReg(Reg, AArch64::dsub);
60 for (int I = 0; CSR_AArch64_AAPCS_SaveList[I]; ++I) {
61 if (CSR_AArch64_AAPCS_SaveList[I] == RegToUseForCFI)
62 return true;
63 }
64 return false;
65 }
66
67 RegToUseForCFI = Reg;
68 return true;
69}
70
71const MCPhysReg *
73 assert(MF && "Invalid MachineFunction pointer.");
74
75 auto &AFI = *MF->getInfo<AArch64FunctionInfo>();
76 const auto &F = MF->getFunction();
77 const auto *TLI = MF->getSubtarget<AArch64Subtarget>().getTargetLowering();
78 const bool Darwin = MF->getSubtarget<AArch64Subtarget>().isTargetDarwin();
79 const bool Windows = MF->getSubtarget<AArch64Subtarget>().isTargetWindows();
80
81 if (TLI->supportSwiftError() &&
82 F.getAttributes().hasAttrSomewhere(Attribute::SwiftError)) {
83 if (Darwin)
84 return CSR_Darwin_AArch64_AAPCS_SwiftError_SaveList;
85 if (Windows)
86 return CSR_Win_AArch64_AAPCS_SwiftError_SaveList;
87 return CSR_AArch64_AAPCS_SwiftError_SaveList;
88 }
89
90 switch (F.getCallingConv()) {
92 // GHC set of callee saved regs is empty as all those regs are
93 // used for passing STG regs around
94 return CSR_AArch64_NoRegs_SaveList;
95
97 // FIXME: Windows likely need this to be altered for properly unwinding.
98 return CSR_AArch64_NoneRegs_SaveList;
99
101 return CSR_AArch64_AllRegs_SaveList;
102
104 return CSR_Win_AArch64_Arm64EC_Thunk_SaveList;
105
107 if (Darwin)
108 return CSR_Darwin_AArch64_RT_MostRegs_SaveList;
109 if (Windows)
110 return CSR_Win_AArch64_RT_MostRegs_SaveList;
111 return CSR_AArch64_RT_MostRegs_SaveList;
112
114 if (Darwin)
115 return CSR_Darwin_AArch64_RT_AllRegs_SaveList;
116 if (Windows)
117 return CSR_Win_AArch64_RT_AllRegs_SaveList;
118 return CSR_AArch64_RT_AllRegs_SaveList;
119
121 if (Darwin)
123 "Calling convention CFGuard_Check is unsupported on Darwin.");
124 return CSR_Win_AArch64_CFGuard_Check_SaveList;
125
127 if (Darwin)
128 return CSR_Darwin_AArch64_AAPCS_SwiftTail_SaveList;
129 if (Windows)
130 return CSR_Win_AArch64_AAPCS_SwiftTail_SaveList;
131 return CSR_AArch64_AAPCS_SwiftTail_SaveList;
132
134 if (Darwin)
135 return CSR_Darwin_AArch64_AAVPCS_SaveList;
136 if (Windows)
137 return CSR_Win_AArch64_AAVPCS_SaveList;
138 return CSR_AArch64_AAVPCS_SaveList;
139
141 if (Darwin)
143 "Calling convention SVE_VectorCall is unsupported on Darwin.");
144 if (Windows)
145 return CSR_Win_AArch64_SVE_AAPCS_SaveList;
146 return CSR_AArch64_SVE_AAPCS_SaveList;
147
150 "Calling convention "
151 "AArch64_SME_ABI_Support_Routines_PreserveMost_From_X0 is only "
152 "supported to improve calls to SME ACLE save/restore/disable-za "
153 "functions, and is not intended to be used beyond that scope.");
154
157 "Calling convention "
158 "AArch64_SME_ABI_Support_Routines_PreserveMost_From_X1 is "
159 "only supported to improve calls to SME ACLE __arm_get_current_vg "
160 "function, and is not intended to be used beyond that scope.");
161
164 "Calling convention "
165 "AArch64_SME_ABI_Support_Routines_PreserveMost_From_X2 is "
166 "only supported to improve calls to SME ACLE __arm_sme_state "
167 "and is not intended to be used beyond that scope.");
168
170 if (Darwin)
171 return CSR_Darwin_AArch64_AAPCS_Win64_SaveList;
172 if (Windows)
173 return CSR_Win_AArch64_AAPCS_SaveList;
174 return CSR_AArch64_AAPCS_X18_SaveList;
175
177 if (Darwin)
178 return AFI.isSplitCSR() ? CSR_Darwin_AArch64_CXX_TLS_PE_SaveList
179 : CSR_Darwin_AArch64_CXX_TLS_SaveList;
180 // FIXME: this likely should be a `report_fatal_error` condition, however,
181 // that would be a departure from the previously implemented behaviour.
183
184 default:
185 if (Darwin)
186 return AFI.hasSVE_AAPCS(*MF) ? CSR_Darwin_AArch64_SVE_AAPCS_SaveList
187 : CSR_Darwin_AArch64_AAPCS_SaveList;
188 if (Windows)
189 return AFI.hasSVE_AAPCS(*MF) ? CSR_Win_AArch64_SVE_AAPCS_SaveList
190 : CSR_Win_AArch64_AAPCS_SaveList;
191 return AFI.hasSVE_AAPCS(*MF) ? CSR_AArch64_SVE_AAPCS_SaveList
192 : CSR_AArch64_AAPCS_SaveList;
193 }
194}
195
197 const MachineFunction *MF) const {
198 assert(MF && "Invalid MachineFunction pointer.");
201 return CSR_Darwin_AArch64_CXX_TLS_ViaCopy_SaveList;
202 return nullptr;
203}
204
206 MachineFunction &MF) const {
207 const MCPhysReg *CSRs = getCalleeSavedRegs(&MF);
208 SmallVector<MCPhysReg, 32> UpdatedCSRs;
209 for (const MCPhysReg *I = CSRs; *I; ++I)
210 UpdatedCSRs.push_back(*I);
211
212 for (size_t i = 0; i < AArch64::GPR64commonRegClass.getNumRegs(); ++i) {
214 UpdatedCSRs.push_back(AArch64::GPR64commonRegClass.getRegister(i));
215 }
216 }
217 // Register lists are zero-terminated.
218 UpdatedCSRs.push_back(0);
219 MF.getRegInfo().setCalleeSavedRegs(UpdatedCSRs);
220}
221
224 unsigned Idx) const {
225 // edge case for GPR/FPR register classes
226 if (RC == &AArch64::GPR32allRegClass && Idx == AArch64::hsub)
227 return &AArch64::FPR32RegClass;
228 else if (RC == &AArch64::GPR64allRegClass && Idx == AArch64::hsub)
229 return &AArch64::FPR64RegClass;
230
231 // Forward to TableGen's default version.
232 return AArch64GenRegisterInfo::getSubClassWithSubReg(RC, Idx);
233}
234
235const uint32_t *
237 CallingConv::ID CC) const {
239 "Invalid subtarget for getDarwinCallPreservedMask");
240
242 return CSR_Darwin_AArch64_CXX_TLS_RegMask;
244 return CSR_Darwin_AArch64_AAVPCS_RegMask;
246 return CSR_Darwin_AArch64_SVE_AAPCS_RegMask;
248 return CSR_AArch64_SME_ABI_Support_Routines_PreserveMost_From_X0_RegMask;
250 return CSR_AArch64_SME_ABI_Support_Routines_PreserveMost_From_X1_RegMask;
252 return CSR_AArch64_SME_ABI_Support_Routines_PreserveMost_From_X2_RegMask;
255 "Calling convention CFGuard_Check is unsupported on Darwin.");
258 ->supportSwiftError() &&
259 MF.getFunction().getAttributes().hasAttrSomewhere(Attribute::SwiftError))
260 return CSR_Darwin_AArch64_AAPCS_SwiftError_RegMask;
261 if (CC == CallingConv::SwiftTail)
262 return CSR_Darwin_AArch64_AAPCS_SwiftTail_RegMask;
264 return CSR_Darwin_AArch64_RT_MostRegs_RegMask;
265 if (CC == CallingConv::PreserveAll)
266 return CSR_Darwin_AArch64_RT_AllRegs_RegMask;
267 return CSR_Darwin_AArch64_AAPCS_RegMask;
268}
269
270const uint32_t *
272 CallingConv::ID CC) const {
273 bool SCS = MF.getFunction().hasFnAttribute(Attribute::ShadowCallStack);
274 if (CC == CallingConv::GHC)
275 // This is academic because all GHC calls are (supposed to be) tail calls
276 return SCS ? CSR_AArch64_NoRegs_SCS_RegMask : CSR_AArch64_NoRegs_RegMask;
278 return SCS ? CSR_AArch64_NoneRegs_SCS_RegMask
279 : CSR_AArch64_NoneRegs_RegMask;
280 if (CC == CallingConv::AnyReg)
281 return SCS ? CSR_AArch64_AllRegs_SCS_RegMask : CSR_AArch64_AllRegs_RegMask;
282
283 // All the following calling conventions are handled differently on Darwin.
285 if (SCS)
286 report_fatal_error("ShadowCallStack attribute not supported on Darwin.");
287 return getDarwinCallPreservedMask(MF, CC);
288 }
289
291 return SCS ? CSR_AArch64_AAVPCS_SCS_RegMask : CSR_AArch64_AAVPCS_RegMask;
293 return SCS ? CSR_AArch64_SVE_AAPCS_SCS_RegMask
294 : CSR_AArch64_SVE_AAPCS_RegMask;
296 return CSR_AArch64_SME_ABI_Support_Routines_PreserveMost_From_X0_RegMask;
298 return CSR_AArch64_SME_ABI_Support_Routines_PreserveMost_From_X1_RegMask;
300 return CSR_AArch64_SME_ABI_Support_Routines_PreserveMost_From_X2_RegMask;
302 return CSR_Win_AArch64_CFGuard_Check_RegMask;
304 ->supportSwiftError() &&
305 MF.getFunction().getAttributes().hasAttrSomewhere(Attribute::SwiftError))
306 return SCS ? CSR_AArch64_AAPCS_SwiftError_SCS_RegMask
307 : CSR_AArch64_AAPCS_SwiftError_RegMask;
308 if (CC == CallingConv::SwiftTail) {
309 if (SCS)
310 report_fatal_error("ShadowCallStack attribute not supported with swifttail");
311 return CSR_AArch64_AAPCS_SwiftTail_RegMask;
312 }
314 return SCS ? CSR_AArch64_RT_MostRegs_SCS_RegMask
315 : CSR_AArch64_RT_MostRegs_RegMask;
316 if (CC == CallingConv::PreserveAll)
317 return SCS ? CSR_AArch64_RT_AllRegs_SCS_RegMask
318 : CSR_AArch64_RT_AllRegs_RegMask;
319
320 return SCS ? CSR_AArch64_AAPCS_SCS_RegMask : CSR_AArch64_AAPCS_RegMask;
321}
322
324 const MachineFunction &MF) const {
326 return CSR_AArch64_AAPCS_RegMask;
327
328 return nullptr;
329}
330
332 if (TT.isOSDarwin())
333 return CSR_Darwin_AArch64_TLS_RegMask;
334
335 assert(TT.isOSBinFormatELF() && "Invalid target");
336 return CSR_AArch64_TLS_ELF_RegMask;
337}
338
340 const uint32_t **Mask) const {
341 uint32_t *UpdatedMask = MF.allocateRegMask();
342 unsigned RegMaskSize = MachineOperand::getRegMaskSize(getNumRegs());
343 memcpy(UpdatedMask, *Mask, sizeof(UpdatedMask[0]) * RegMaskSize);
344
345 for (size_t i = 0; i < AArch64::GPR64commonRegClass.getNumRegs(); ++i) {
347 for (MCPhysReg SubReg :
348 subregs_inclusive(AArch64::GPR64commonRegClass.getRegister(i))) {
349 // See TargetRegisterInfo::getCallPreservedMask for how to interpret the
350 // register mask.
351 UpdatedMask[SubReg / 32] |= 1u << (SubReg % 32);
352 }
353 }
354 }
355 *Mask = UpdatedMask;
356}
357
359 return CSR_AArch64_SMStartStop_RegMask;
360}
361
362const uint32_t *
364 return CSR_AArch64_SME_ABI_Support_Routines_PreserveMost_From_X0_RegMask;
365}
366
368 return CSR_AArch64_NoRegs_RegMask;
369}
370
371const uint32_t *
373 CallingConv::ID CC) const {
374 // This should return a register mask that is the same as that returned by
375 // getCallPreservedMask but that additionally preserves the register used for
376 // the first i64 argument (which must also be the register used to return a
377 // single i64 return value)
378 //
379 // In case that the calling convention does not use the same register for
380 // both, the function should return NULL (does not currently apply)
381 assert(CC != CallingConv::GHC && "should not be GHC calling convention.");
383 return CSR_Darwin_AArch64_AAPCS_ThisReturn_RegMask;
384 return CSR_AArch64_AAPCS_ThisReturn_RegMask;
385}
386
388 return CSR_AArch64_StackProbe_Windows_RegMask;
389}
390
391std::optional<std::string>
393 MCRegister PhysReg) const {
394 if (hasBasePointer(MF) && MCRegisterInfo::regsOverlap(PhysReg, AArch64::X19))
395 return std::string("X19 is used as the frame base pointer register.");
396
398 bool warn = false;
399 if (MCRegisterInfo::regsOverlap(PhysReg, AArch64::X13) ||
400 MCRegisterInfo::regsOverlap(PhysReg, AArch64::X14) ||
401 MCRegisterInfo::regsOverlap(PhysReg, AArch64::X23) ||
402 MCRegisterInfo::regsOverlap(PhysReg, AArch64::X24) ||
403 MCRegisterInfo::regsOverlap(PhysReg, AArch64::X28))
404 warn = true;
405
406 for (unsigned i = AArch64::B16; i <= AArch64::B31; ++i)
407 if (MCRegisterInfo::regsOverlap(PhysReg, i))
408 warn = true;
409
410 if (warn)
411 return std::string(AArch64InstPrinter::getRegisterName(PhysReg)) +
412 " is clobbered by asynchronous signals when using Arm64EC.";
413 }
414
415 return {};
416}
417
420 const AArch64FrameLowering *TFI = getFrameLowering(MF);
421
422 BitVector Reserved(getNumRegs());
423 markSuperRegs(Reserved, AArch64::WSP);
424 markSuperRegs(Reserved, AArch64::WZR);
425
426 if (TFI->isFPReserved(MF))
427 markSuperRegs(Reserved, AArch64::W29);
428
430 // x13, x14, x23, x24, x28, and v16-v31 are clobbered by asynchronous
431 // signals, so we can't ever use them.
432 markSuperRegs(Reserved, AArch64::W13);
433 markSuperRegs(Reserved, AArch64::W14);
434 markSuperRegs(Reserved, AArch64::W23);
435 markSuperRegs(Reserved, AArch64::W24);
436 markSuperRegs(Reserved, AArch64::W28);
437 for (unsigned i = AArch64::B16; i <= AArch64::B31; ++i)
438 markSuperRegs(Reserved, i);
439 }
440
441 if (MF.getSubtarget<AArch64Subtarget>().isLFI()) {
442 markSuperRegs(Reserved, AArch64::W28);
443 markSuperRegs(Reserved, AArch64::W27);
444 markSuperRegs(Reserved, AArch64::W26);
445 markSuperRegs(Reserved, AArch64::W25);
446 if (!MF.getProperties().hasNoVRegs()) {
447 markSuperRegs(Reserved, AArch64::LR);
448 markSuperRegs(Reserved, AArch64::W30);
449 }
450 }
451
452 for (size_t i = 0; i < AArch64::GPR32commonRegClass.getNumRegs(); ++i) {
454 markSuperRegs(Reserved, AArch64::GPR32commonRegClass.getRegister(i));
455 }
456
457 if (hasBasePointer(MF))
458 markSuperRegs(Reserved, AArch64::W19);
459
460 // SLH uses register W16/X16 as the taint register.
461 if (MF.getFunction().hasFnAttribute(Attribute::SpeculativeLoadHardening))
462 markSuperRegs(Reserved, AArch64::W16);
463
464 // FFR is modelled as global state that cannot be allocated.
465 if (MF.getSubtarget<AArch64Subtarget>().hasSVE())
466 Reserved.set(AArch64::FFR);
467
468 // SME tiles are not allocatable.
469 if (MF.getSubtarget<AArch64Subtarget>().hasSME()) {
470 for (MCPhysReg SubReg : subregs_inclusive(AArch64::ZA))
471 Reserved.set(SubReg);
472 }
473
474 // VG cannot be allocated
475 Reserved.set(AArch64::VG);
476
477 if (MF.getSubtarget<AArch64Subtarget>().hasSME2()) {
478 for (MCSubRegIterator SubReg(AArch64::ZT0, this, /*self=*/true);
479 SubReg.isValid(); ++SubReg)
480 Reserved.set(*SubReg);
481 }
482
483 markSuperRegs(Reserved, AArch64::FPCR);
484 markSuperRegs(Reserved, AArch64::FPMR);
485 markSuperRegs(Reserved, AArch64::FPSR);
486
488 markSuperRegs(Reserved, AArch64::X27);
489 markSuperRegs(Reserved, AArch64::X28);
490 markSuperRegs(Reserved, AArch64::W27);
491 markSuperRegs(Reserved, AArch64::W28);
492 }
493
494 assert(checkAllSuperRegsMarked(Reserved));
495
496 // Add _HI registers after checkAllSuperRegsMarked as this check otherwise
497 // becomes considerably more expensive.
498 Reserved.set(AArch64::WSP_HI);
499 Reserved.set(AArch64::WZR_HI);
500 static_assert(AArch64::W30_HI - AArch64::W0_HI == 30,
501 "Unexpected order of registers");
502 Reserved.set(AArch64::W0_HI, AArch64::W30_HI);
503 static_assert(AArch64::B31_HI - AArch64::B0_HI == 31,
504 "Unexpected order of registers");
505 Reserved.set(AArch64::B0_HI, AArch64::B31_HI);
506 static_assert(AArch64::H31_HI - AArch64::H0_HI == 31,
507 "Unexpected order of registers");
508 Reserved.set(AArch64::H0_HI, AArch64::H31_HI);
509 static_assert(AArch64::S31_HI - AArch64::S0_HI == 31,
510 "Unexpected order of registers");
511 Reserved.set(AArch64::S0_HI, AArch64::S31_HI);
512 static_assert(AArch64::D31_HI - AArch64::D0_HI == 31,
513 "Unexpected order of registers");
514 Reserved.set(AArch64::D0_HI, AArch64::D31_HI);
515 static_assert(AArch64::Q31_HI - AArch64::Q0_HI == 31,
516 "Unexpected order of registers");
517 Reserved.set(AArch64::Q0_HI, AArch64::Q31_HI);
518
519 return Reserved;
520}
521
524 BitVector Reserved(getNumRegs());
525 for (size_t i = 0; i < AArch64::GPR32commonRegClass.getNumRegs(); ++i) {
526 // ReserveXRegister is set for registers manually reserved
527 // through +reserve-x#i.
529 markSuperRegs(Reserved, AArch64::GPR32commonRegClass.getRegister(i));
530 }
531 return Reserved;
532}
533
536 BitVector Reserved(getNumRegs());
537 for (size_t i = 0; i < AArch64::GPR32commonRegClass.getNumRegs(); ++i) {
539 markSuperRegs(Reserved, AArch64::GPR32commonRegClass.getRegister(i));
540 }
541
543 // In order to prevent the register allocator from using LR, we need to
544 // mark it as reserved. However we don't want to keep it reserved throughout
545 // the pipeline since it prevents other infrastructure from reasoning about
546 // it's liveness. We use the NoVRegs property instead of IsSSA because
547 // IsSSA is removed before VirtRegRewriter runs.
548 if (!MF.getProperties().hasNoVRegs())
549 // Reserve LR (X30) by marking from its subregister W30 because otherwise
550 // the register allocator could clobber the subregister.
551 markSuperRegs(Reserved, AArch64::W30);
552 }
553
554 assert(checkAllSuperRegsMarked(Reserved));
555
556 // Handle strictlyReservedRegs separately to avoid re-evaluating the assert,
557 // which becomes considerably expensive when considering the _HI registers.
559
560 return Reserved;
561}
562
564 MCRegister Reg) const {
565 return getReservedRegs(MF)[Reg];
566}
567
569 MCRegister Reg) const {
570 return getUserReservedRegs(MF)[Reg];
571}
572
574 MCRegister Reg) const {
575 return getStrictlyReservedRegs(MF)[Reg];
576}
577
579 for (size_t i = 0; i < AArch64::GPR64argRegClass.getNumRegs(); ++i) {
581 return true;
582 }
583 return false;
584}
585
587 const MachineFunction &MF) const {
588 const Function &F = MF.getFunction();
589 F.getContext().diagnose(DiagnosticInfoUnsupported{F, ("AArch64 doesn't support"
590 " function calls if any of the argument registers is reserved.")});
591}
592
594 MCRegister PhysReg) const {
595 // SLH uses register X16 as the taint register but it will fallback to a different
596 // method if the user clobbers it. So X16 is not reserved for inline asm but is
597 // for normal codegen.
598 if (MF.getFunction().hasFnAttribute(Attribute::SpeculativeLoadHardening) &&
599 MCRegisterInfo::regsOverlap(PhysReg, AArch64::X16))
600 return true;
601
602 // ZA/ZT0 registers are reserved but may be permitted in the clobber list.
603 if (PhysReg == AArch64::ZA || PhysReg == AArch64::ZT0)
604 return true;
605
606 return !isReservedReg(MF, PhysReg);
607}
608
611 return &AArch64::GPR64spRegClass;
612}
613
616 if (RC == &AArch64::CCRRegClass)
617 return &AArch64::GPR64RegClass; // Only MSR & MRS copy NZCV.
618 return RC;
619}
620
621MCRegister AArch64RegisterInfo::getBaseRegister() const { return AArch64::X19; }
622
624 const MachineFrameInfo &MFI = MF.getFrameInfo();
625
626 // In the presence of variable sized objects or funclets, if the fixed stack
627 // size is large enough that referencing from the FP won't result in things
628 // being in range relatively often, we can use a base pointer to allow access
629 // from the other direction like the SP normally works.
630 //
631 // Furthermore, if both variable sized objects are present, and the
632 // stack needs to be dynamically re-aligned, the base pointer is the only
633 // reliable way to reference the locals.
634 if (MFI.hasVarSizedObjects() || MF.hasEHFunclets()) {
635 if (hasStackRealignment(MF))
636 return true;
637
638 auto &ST = MF.getSubtarget<AArch64Subtarget>();
640 if (ST.hasSVE() || ST.isStreaming()) {
641 // Frames that have variable sized objects and scalable SVE objects,
642 // should always use a basepointer.
643 if (!AFI->hasCalculatedStackSizeSVE() || AFI->hasSVEStackSize())
644 return true;
645 }
646
647 // Frames with hazard padding can have a large offset between the frame
648 // pointer and GPR locals, which includes the emergency spill slot. If the
649 // emergency spill slot is not within range of the load/store instructions
650 // (which have a signed 9-bit range), we will fail to compile if it is used.
651 // Since hasBasePointer() is called before we know if we have hazard padding
652 // or an emergency spill slot we need to enable the basepointer
653 // conservatively.
654 if (ST.getStreamingHazardSize() &&
655 !AFI->getSMEFnAttrs().hasNonStreamingInterfaceAndBody()) {
656 return true;
657 }
658
659 // Conservatively estimate whether the negative offset from the frame
660 // pointer will be sufficient to reach. If a function has a smallish
661 // frame, it's less likely to have lots of spills and callee saved
662 // space, so it's all more likely to be within range of the frame pointer.
663 // If it's wrong, we'll materialize the constant and still get to the
664 // object; it's just suboptimal. Negative offsets use the unscaled
665 // load/store instructions, which have a 9-bit signed immediate.
666 return MFI.getLocalFrameSize() >= 256;
667 }
668
669 return false;
670}
671
673 MCRegister Reg) const {
676 bool IsVarArg = STI.isCallingConvWin64(MF.getFunction().getCallingConv(),
677 MF.getFunction().isVarArg());
678
679 auto HasReg = [](ArrayRef<MCRegister> RegList, MCRegister Reg) {
680 return llvm::is_contained(RegList, Reg);
681 };
682
683 switch (CC) {
684 default:
685 report_fatal_error("Unsupported calling convention.");
686 case CallingConv::GHC:
687 return HasReg(CC_AArch64_GHC_ArgRegs, Reg);
689 if (!MF.getFunction().isVarArg())
690 return HasReg(CC_AArch64_Preserve_None_ArgRegs, Reg);
691 [[fallthrough]];
692 case CallingConv::C:
700 if (STI.isTargetWindows()) {
701 if (IsVarArg)
702 return HasReg(CC_AArch64_Win64_VarArg_ArgRegs, Reg);
703 switch (CC) {
704 default:
705 return HasReg(CC_AArch64_Win64PCS_ArgRegs, Reg);
708 return HasReg(CC_AArch64_Win64PCS_Swift_ArgRegs, Reg) ||
709 HasReg(CC_AArch64_Win64PCS_ArgRegs, Reg);
710 }
711 }
712 if (!STI.isTargetDarwin()) {
713 switch (CC) {
714 default:
715 return HasReg(CC_AArch64_AAPCS_ArgRegs, Reg);
718 return HasReg(CC_AArch64_AAPCS_ArgRegs, Reg) ||
719 HasReg(CC_AArch64_AAPCS_Swift_ArgRegs, Reg);
720 }
721 }
722 if (!IsVarArg) {
723 switch (CC) {
724 default:
725 return HasReg(CC_AArch64_DarwinPCS_ArgRegs, Reg);
728 return HasReg(CC_AArch64_DarwinPCS_ArgRegs, Reg) ||
729 HasReg(CC_AArch64_DarwinPCS_Swift_ArgRegs, Reg);
730 }
731 }
732 if (STI.isTargetILP32())
733 return HasReg(CC_AArch64_DarwinPCS_ILP32_VarArg_ArgRegs, Reg);
734 return HasReg(CC_AArch64_DarwinPCS_VarArg_ArgRegs, Reg);
736 if (IsVarArg)
737 HasReg(CC_AArch64_Win64_VarArg_ArgRegs, Reg);
738 return HasReg(CC_AArch64_Win64PCS_ArgRegs, Reg);
740 return HasReg(CC_AArch64_Win64_CFGuard_Check_ArgRegs, Reg);
746 if (STI.isTargetWindows())
747 return HasReg(CC_AArch64_Win64PCS_ArgRegs, Reg);
748 return HasReg(CC_AArch64_AAPCS_ArgRegs, Reg);
749 }
750}
751
754 const AArch64FrameLowering *TFI = getFrameLowering(MF);
755 return TFI->hasFP(MF) ? AArch64::FP : AArch64::SP;
756}
757
759 const MachineFunction &MF) const {
760 return true;
761}
762
764 const MachineFunction &MF) const {
765 return true;
766}
767
768bool
770 // This function indicates whether the emergency spillslot should be placed
771 // close to the beginning of the stackframe (closer to FP) or the end
772 // (closer to SP).
773 //
774 // The beginning works most reliably if we have a frame pointer.
775 // In the presence of any non-constant space between FP and locals,
776 // (e.g. in case of stack realignment or a scalable SVE area), it is
777 // better to use SP or BP.
778 const AArch64FrameLowering &TFI = *getFrameLowering(MF);
780 assert((!MF.getSubtarget<AArch64Subtarget>().hasSVE() ||
782 "Expected SVE area to be calculated by this point");
783 return TFI.hasFP(MF) && !hasStackRealignment(MF) && !AFI->hasSVEStackSize() &&
785}
786
788 const MachineFunction &MF) const {
789 return true;
790}
791
792bool
794 const MachineFrameInfo &MFI = MF.getFrameInfo();
796 return true;
797 return MFI.hasVarSizedObjects() || MFI.isFrameAddressTaken();
798}
799
800/// needsFrameBaseReg - Returns true if the instruction's frame index
801/// reference would be better served by a base register other than FP
802/// or SP. Used by LocalStackFrameAllocation to determine which frame index
803/// references it should create new base registers for.
805 int64_t Offset) const {
806 for (unsigned i = 0; !MI->getOperand(i).isFI(); ++i)
807 assert(i < MI->getNumOperands() &&
808 "Instr doesn't have FrameIndex operand!");
809
810 // It's the load/store FI references that cause issues, as it can be difficult
811 // to materialize the offset if it won't fit in the literal field. Estimate
812 // based on the size of the local frame and some conservative assumptions
813 // about the rest of the stack frame (note, this is pre-regalloc, so
814 // we don't know everything for certain yet) whether this offset is likely
815 // to be out of range of the immediate. Return true if so.
816
817 // We only generate virtual base registers for loads and stores, so
818 // return false for everything else.
819 if (!MI->mayLoad() && !MI->mayStore())
820 return false;
821
822 // Without a virtual base register, if the function has variable sized
823 // objects, all fixed-size local references will be via the frame pointer,
824 // Approximate the offset and see if it's legal for the instruction.
825 // Note that the incoming offset is based on the SP value at function entry,
826 // so it'll be negative.
827 MachineFunction &MF = *MI->getParent()->getParent();
828 const AArch64FrameLowering *TFI = getFrameLowering(MF);
829 MachineFrameInfo &MFI = MF.getFrameInfo();
830
831 // Estimate an offset from the frame pointer.
832 // Conservatively assume all GPR callee-saved registers get pushed.
833 // FP, LR, X19-X28, D8-D15. 64-bits each.
834 int64_t FPOffset = Offset - 16 * 20;
835 // Estimate an offset from the stack pointer.
836 // The incoming offset is relating to the SP at the start of the function,
837 // but when we access the local it'll be relative to the SP after local
838 // allocation, so adjust our SP-relative offset by that allocation size.
839 Offset += MFI.getLocalFrameSize();
840 // Assume that we'll have at least some spill slots allocated.
841 // FIXME: This is a total SWAG number. We should run some statistics
842 // and pick a real one.
843 Offset += 128; // 128 bytes of spill slots
844
845 // If there is a frame pointer, try using it.
846 // The FP is only available if there is no dynamic realignment. We
847 // don't know for sure yet whether we'll need that, so we guess based
848 // on whether there are any local variables that would trigger it.
849 if (TFI->hasFP(MF) && isFrameOffsetLegal(MI, AArch64::FP, FPOffset))
850 return false;
851
852 // If we can reference via the stack pointer or base pointer, try that.
853 // FIXME: This (and the code that resolves the references) can be improved
854 // to only disallow SP relative references in the live range of
855 // the VLA(s). In practice, it's unclear how much difference that
856 // would make, but it may be worth doing.
857 if (isFrameOffsetLegal(MI, AArch64::SP, Offset))
858 return false;
859
860 // If even offset 0 is illegal, we don't want a virtual base register.
861 if (!isFrameOffsetLegal(MI, AArch64::SP, 0))
862 return false;
863
864 // The offset likely isn't legal; we want to allocate a virtual base register.
865 return true;
866}
867
869 Register BaseReg,
870 int64_t Offset) const {
871 assert(MI && "Unable to get the legal offset for nil instruction.");
874}
875
876/// Insert defining instruction(s) for BaseReg to be a pointer to FrameIdx
877/// at the beginning of the basic block.
880 int FrameIdx,
881 int64_t Offset) const {
882 MachineBasicBlock::iterator Ins = MBB->begin();
883 DebugLoc DL; // Defaults to "unknown"
884 if (Ins != MBB->end())
885 DL = Ins->getDebugLoc();
886 const MachineFunction &MF = *MBB->getParent();
887 const AArch64InstrInfo *TII =
888 MF.getSubtarget<AArch64Subtarget>().getInstrInfo();
889 const MCInstrDesc &MCID = TII->get(AArch64::ADDXri);
890 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
891 Register BaseReg = MRI.createVirtualRegister(&AArch64::GPR64spRegClass);
892 MRI.constrainRegClass(BaseReg, TII->getRegClass(MCID, 0));
893 unsigned Shifter = AArch64_AM::getShifterImm(AArch64_AM::LSL, 0);
894
895 BuildMI(*MBB, Ins, DL, MCID, BaseReg)
896 .addFrameIndex(FrameIdx)
897 .addImm(Offset)
898 .addImm(Shifter);
899
900 return BaseReg;
901}
902
904 int64_t Offset) const {
905 // ARM doesn't need the general 64-bit offsets
907
908 unsigned i = 0;
909 while (!MI.getOperand(i).isFI()) {
910 ++i;
911 assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!");
912 }
913
914 const MachineFunction *MF = MI.getParent()->getParent();
915 const AArch64InstrInfo *TII =
916 MF->getSubtarget<AArch64Subtarget>().getInstrInfo();
917 bool Done = rewriteAArch64FrameIndex(MI, i, BaseReg, Off, TII);
918 assert(Done && "Unable to resolve frame index!");
919 (void)Done;
920}
921
922// Create a scratch register for the frame index elimination in an instruction.
923// This function has special handling of stack tagging loop pseudos, in which
924// case it can also change the instruction opcode.
925static Register
927 const AArch64InstrInfo *TII) {
928 // ST*Gloop have a reserved scratch register in operand 1. Use it, and also
929 // replace the instruction with the writeback variant because it will now
930 // satisfy the operand constraints for it.
931 Register ScratchReg;
932 if (MI.getOpcode() == AArch64::STGloop ||
933 MI.getOpcode() == AArch64::STZGloop) {
934 assert(FIOperandNum == 3 &&
935 "Wrong frame index operand for STGloop/STZGloop");
936 unsigned Op = MI.getOpcode() == AArch64::STGloop ? AArch64::STGloop_wback
937 : AArch64::STZGloop_wback;
938 ScratchReg = MI.getOperand(1).getReg();
939 MI.getOperand(3).ChangeToRegister(ScratchReg, false, false, true);
940 MI.setDesc(TII->get(Op));
941 MI.tieOperands(1, 3);
942 } else {
943 ScratchReg =
944 MI.getMF()->getRegInfo().createVirtualRegister(&AArch64::GPR64RegClass);
945 MI.getOperand(FIOperandNum)
946 .ChangeToRegister(ScratchReg, false, false, true);
947 }
948 return ScratchReg;
949}
950
953 // The smallest scalable element supported by scaled SVE addressing
954 // modes are predicates, which are 2 scalable bytes in size. So the scalable
955 // byte offset must always be a multiple of 2.
956 assert(Offset.getScalable() % 2 == 0 && "Invalid frame offset");
957
958 // Add fixed-sized offset using existing DIExpression interface.
960
961 unsigned VG = getDwarfRegNum(AArch64::VG, true);
962 int64_t VGSized = Offset.getScalable() / 2;
963 if (VGSized > 0) {
964 Ops.push_back(dwarf::DW_OP_constu);
965 Ops.push_back(VGSized);
966 Ops.append({dwarf::DW_OP_bregx, VG, 0ULL});
967 Ops.push_back(dwarf::DW_OP_mul);
968 Ops.push_back(dwarf::DW_OP_plus);
969 } else if (VGSized < 0) {
970 Ops.push_back(dwarf::DW_OP_constu);
971 Ops.push_back(-VGSized);
972 Ops.append({dwarf::DW_OP_bregx, VG, 0ULL});
973 Ops.push_back(dwarf::DW_OP_mul);
974 Ops.push_back(dwarf::DW_OP_minus);
975 }
976}
977
979 int SPAdj, unsigned FIOperandNum,
980 RegScavenger *RS) const {
981 assert(SPAdj == 0 && "Unexpected");
982
983 MachineInstr &MI = *II;
984 MachineBasicBlock &MBB = *MI.getParent();
985 MachineFunction &MF = *MBB.getParent();
986 const MachineFrameInfo &MFI = MF.getFrameInfo();
987 const AArch64InstrInfo *TII =
988 MF.getSubtarget<AArch64Subtarget>().getInstrInfo();
989 const AArch64FrameLowering *TFI = getFrameLowering(MF);
990 int FrameIndex = MI.getOperand(FIOperandNum).getIndex();
991 bool Tagged =
992 MI.getOperand(FIOperandNum).getTargetFlags() & AArch64II::MO_TAGGED;
993 Register FrameReg;
994
995 // Special handling of dbg_value, stackmap patchpoint statepoint instructions.
996 if (MI.getOpcode() == TargetOpcode::STACKMAP ||
997 MI.getOpcode() == TargetOpcode::PATCHPOINT ||
998 MI.getOpcode() == TargetOpcode::STATEPOINT) {
1000 TFI->resolveFrameIndexReference(MF, FrameIndex, FrameReg,
1001 /*PreferFP=*/true,
1002 /*ForSimm=*/false);
1003 Offset += StackOffset::getFixed(MI.getOperand(FIOperandNum + 1).getImm());
1004 MI.getOperand(FIOperandNum).ChangeToRegister(FrameReg, false /*isDef*/);
1005 MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset.getFixed());
1006 return false;
1007 }
1008
1009 if (MI.getOpcode() == TargetOpcode::LOCAL_ESCAPE) {
1010 MachineOperand &FI = MI.getOperand(FIOperandNum);
1011 StackOffset Offset = TFI->getNonLocalFrameIndexReference(MF, FrameIndex);
1012 assert(!Offset.getScalable() &&
1013 "Frame offsets with a scalable component are not supported");
1014 FI.ChangeToImmediate(Offset.getFixed());
1015 return false;
1016 }
1017
1019 if (MI.getOpcode() == AArch64::TAGPstack) {
1020 // TAGPstack must use the virtual frame register in its 3rd operand.
1022 FrameReg = MI.getOperand(3).getReg();
1023 Offset = StackOffset::getFixed(MFI.getObjectOffset(FrameIndex) +
1025 } else if (Tagged) {
1027 MFI.getObjectOffset(FrameIndex) + (int64_t)MFI.getStackSize());
1028 if (MFI.hasVarSizedObjects() ||
1029 isAArch64FrameOffsetLegal(MI, SPOffset, nullptr, nullptr, nullptr) !=
1031 // Can't update to SP + offset in place. Precalculate the tagged pointer
1032 // in a scratch register.
1034 MF, FrameIndex, FrameReg, /*PreferFP=*/false, /*ForSimm=*/true);
1035 Register ScratchReg =
1036 MF.getRegInfo().createVirtualRegister(&AArch64::GPR64RegClass);
1037 emitFrameOffset(MBB, II, MI.getDebugLoc(), ScratchReg, FrameReg, Offset,
1038 TII);
1039 BuildMI(MBB, MI, MI.getDebugLoc(), TII->get(AArch64::LDG), ScratchReg)
1040 .addReg(ScratchReg)
1041 .addReg(ScratchReg)
1042 .addImm(0);
1043 MI.getOperand(FIOperandNum)
1044 .ChangeToRegister(ScratchReg, false, false, true);
1045 return false;
1046 }
1047 FrameReg = AArch64::SP;
1048 Offset = StackOffset::getFixed(MFI.getObjectOffset(FrameIndex) +
1049 (int64_t)MFI.getStackSize());
1050 } else {
1052 MF, FrameIndex, FrameReg, /*PreferFP=*/false, /*ForSimm=*/true);
1053 }
1054
1055 // Modify MI as necessary to handle as much of 'Offset' as possible
1056 if (rewriteAArch64FrameIndex(MI, FIOperandNum, FrameReg, Offset, TII))
1057 return true;
1058
1059 assert((!RS || !RS->isScavengingFrameIndex(FrameIndex)) &&
1060 "Emergency spill slot is out of reach");
1061
1062 // If we get here, the immediate doesn't fit into the instruction. We folded
1063 // as much as possible above. Handle the rest, providing a register that is
1064 // SP+LargeImm.
1065 Register ScratchReg =
1067 emitFrameOffset(MBB, II, MI.getDebugLoc(), ScratchReg, FrameReg, Offset, TII);
1068 return false;
1069}
1070
1072 MachineFunction &MF) const {
1073 const AArch64FrameLowering *TFI = getFrameLowering(MF);
1074
1075 switch (RC->getID()) {
1076 default:
1077 return 0;
1078 case AArch64::GPR32RegClassID:
1079 case AArch64::GPR32spRegClassID:
1080 case AArch64::GPR32allRegClassID:
1081 case AArch64::GPR64spRegClassID:
1082 case AArch64::GPR64allRegClassID:
1083 case AArch64::GPR64RegClassID:
1084 case AArch64::GPR32commonRegClassID:
1085 case AArch64::GPR64commonRegClassID:
1086 return 32 - 1 // XZR/SP
1087 - (TFI->hasFP(MF) || TT.isOSDarwin()) // FP
1088 - MF.getSubtarget<AArch64Subtarget>().getNumXRegisterReserved()
1089 - hasBasePointer(MF); // X19
1090 case AArch64::FPR8RegClassID:
1091 case AArch64::FPR16RegClassID:
1092 case AArch64::FPR32RegClassID:
1093 case AArch64::FPR64RegClassID:
1094 case AArch64::FPR128RegClassID:
1095 return 32;
1096
1097 case AArch64::MatrixIndexGPR32_8_11RegClassID:
1098 case AArch64::MatrixIndexGPR32_12_15RegClassID:
1099 return 4;
1100
1101 case AArch64::DDRegClassID:
1102 case AArch64::DDDRegClassID:
1103 case AArch64::DDDDRegClassID:
1104 case AArch64::QQRegClassID:
1105 case AArch64::QQQRegClassID:
1106 case AArch64::QQQQRegClassID:
1107 return 32;
1108
1109 case AArch64::FPR128_loRegClassID:
1110 case AArch64::FPR64_loRegClassID:
1111 case AArch64::FPR16_loRegClassID:
1112 return 16;
1113 case AArch64::FPR128_0to7RegClassID:
1114 return 8;
1115 }
1116}
1117
1118// We add regalloc hints for different cases:
1119// * Choosing a better destination operand for predicated SVE instructions
1120// where the inactive lanes are undef, by choosing a register that is not
1121// unique to the other operands of the instruction.
1122//
1123// * Improve register allocation for SME multi-vector instructions where we can
1124// benefit from the strided- and contiguous register multi-vector tuples.
1125//
1126// Here FORM_TRANSPOSED_REG_TUPLE nodes are created to improve register
1127// allocation where a consecutive multi-vector tuple is constructed from the
1128// same indices of multiple strided loads. This may still result in
1129// unnecessary copies between the loads and the tuple. Here we try to return a
1130// hint to assign the contiguous ZPRMulReg starting at the same register as
1131// the first operand of the pseudo, which should be a subregister of the first
1132// strided load.
1133//
1134// For example, if the first strided load has been assigned $z16_z20_z24_z28
1135// and the operands of the pseudo are each accessing subregister zsub2, we
1136// should look through through Order to find a contiguous register which
1137// begins with $z24 (i.e. $z24_z25_z26_z27).
1139 Register VirtReg, ArrayRef<MCPhysReg> Order,
1141 const VirtRegMap *VRM, const LiveRegMatrix *Matrix) const {
1142 auto &ST = MF.getSubtarget<AArch64Subtarget>();
1143 const AArch64InstrInfo *TII =
1144 MF.getSubtarget<AArch64Subtarget>().getInstrInfo();
1145 const MachineRegisterInfo &MRI = MF.getRegInfo();
1146
1147 // For predicated SVE instructions where the inactive lanes are undef,
1148 // pick a destination register that is not unique to avoid introducing
1149 // a movprfx.
1150 const TargetRegisterClass *RegRC = MRI.getRegClass(VirtReg);
1151 if (AArch64::ZPRRegClass.hasSubClassEq(RegRC)) {
1152 bool ConsiderOnlyHints = TargetRegisterInfo::getRegAllocationHints(
1153 VirtReg, Order, Hints, MF, VRM);
1154
1155 for (const MachineOperand &DefOp : MRI.def_operands(VirtReg)) {
1156 const MachineInstr &Def = *DefOp.getParent();
1157 if (DefOp.isImplicit() ||
1158 (TII->get(Def.getOpcode()).TSFlags & AArch64::FalseLanesMask) !=
1160 continue;
1161
1162 unsigned InstFlags =
1163 TII->get(AArch64::getSVEPseudoMap(Def.getOpcode())).TSFlags;
1164
1165 for (MCPhysReg R : Order) {
1166 auto AddHintIfSuitable = [&](MCPhysReg R,
1167 const MachineOperand &MO) -> bool {
1168 // R is a suitable register hint if R can reuse one of the other
1169 // source operands.
1170 if (VRM->getPhys(MO.getReg()) != R)
1171 return false;
1172 Hints.push_back(R);
1173 return true;
1174 };
1175
1176 switch (InstFlags & AArch64::DestructiveInstTypeMask) {
1177 default:
1178 break;
1180 AddHintIfSuitable(R, Def.getOperand(2)) ||
1181 AddHintIfSuitable(R, Def.getOperand(3)) ||
1182 AddHintIfSuitable(R, Def.getOperand(4));
1183 break;
1186 AddHintIfSuitable(R, Def.getOperand(2)) ||
1187 AddHintIfSuitable(R, Def.getOperand(3));
1188 break;
1191 AddHintIfSuitable(R, Def.getOperand(2));
1192 break;
1194 AddHintIfSuitable(R, Def.getOperand(3));
1195 break;
1197 AddHintIfSuitable(R, Def.getOperand(1));
1198 break;
1199 }
1200 }
1201 }
1202
1203 if (Hints.size())
1204 return ConsiderOnlyHints;
1205 }
1206
1207 if (!ST.hasSME() || !ST.isStreaming())
1208 return TargetRegisterInfo::getRegAllocationHints(VirtReg, Order, Hints, MF,
1209 VRM);
1210
1211 // The SVE calling convention preserves registers Z8-Z23. As a result, there
1212 // are no ZPR2Strided or ZPR4Strided registers that do not overlap with the
1213 // callee-saved registers and so by default these will be pushed to the back
1214 // of the allocation order for the ZPRStridedOrContiguous classes.
1215 // If any of the instructions which define VirtReg are used by the
1216 // FORM_TRANSPOSED_REG_TUPLE pseudo, we want to favour reducing copy
1217 // instructions over reducing the number of clobbered callee-save registers,
1218 // so we add the strided registers as a hint.
1219 unsigned RegID = RegRC->getID();
1220 if (RegID == AArch64::ZPR2StridedOrContiguousRegClassID ||
1221 RegID == AArch64::ZPR4StridedOrContiguousRegClassID) {
1222
1223 // Look through uses of the register for FORM_TRANSPOSED_REG_TUPLE.
1224 for (const MachineInstr &Use : MRI.use_nodbg_instructions(VirtReg)) {
1225 if (Use.getOpcode() != AArch64::FORM_TRANSPOSED_REG_TUPLE_X2_PSEUDO &&
1226 Use.getOpcode() != AArch64::FORM_TRANSPOSED_REG_TUPLE_X4_PSEUDO)
1227 continue;
1228
1229 unsigned UseOps = Use.getNumOperands() - 1;
1230 const TargetRegisterClass *StridedRC;
1231 switch (RegID) {
1232 case AArch64::ZPR2StridedOrContiguousRegClassID:
1233 StridedRC = &AArch64::ZPR2StridedRegClass;
1234 break;
1235 case AArch64::ZPR4StridedOrContiguousRegClassID:
1236 StridedRC = &AArch64::ZPR4StridedRegClass;
1237 break;
1238 default:
1239 llvm_unreachable("Unexpected RegID");
1240 }
1241
1242 SmallVector<MCPhysReg, 4> StridedOrder;
1243 for (MCPhysReg Reg : Order)
1244 if (StridedRC->contains(Reg))
1245 StridedOrder.push_back(Reg);
1246
1247 int OpIdx = Use.findRegisterUseOperandIdx(VirtReg, this);
1248 assert(OpIdx != -1 && "Expected operand index from register use.");
1249
1250 unsigned TupleID = MRI.getRegClass(Use.getOperand(0).getReg())->getID();
1251 bool IsMulZPR = TupleID == AArch64::ZPR2Mul2RegClassID ||
1252 TupleID == AArch64::ZPR4Mul4RegClassID;
1253
1254 const MachineOperand *AssignedRegOp = llvm::find_if(
1255 make_range(Use.operands_begin() + 1, Use.operands_end()),
1256 [&VRM](const MachineOperand &Op) {
1257 return VRM->hasPhys(Op.getReg());
1258 });
1259
1260 // Example:
1261 //
1262 // When trying to find a suitable register allocation for VirtReg %v2 in:
1263 //
1264 // %v0:zpr2stridedorcontiguous = ld1 p0/z, [...]
1265 // %v1:zpr2stridedorcontiguous = ld1 p0/z, [...]
1266 // %v2:zpr2stridedorcontiguous = ld1 p0/z, [...]
1267 // %v3:zpr2stridedorcontiguous = ld1 p0/z, [...]
1268 // %v4:zpr4mul4 = FORM_TRANSPOSED_X4 %v0:0, %v1:0, %v2:0, %v3:0
1269 //
1270 // One such suitable allocation would be:
1271 //
1272 // { z0, z8 } = ld1 p0/z, [...]
1273 // { z1, z9 } = ld1 p0/z, [...]
1274 // { z2, z10 } = ld1 p0/z, [...]
1275 // { z3, z11 } = ld1 p0/z, [...]
1276 // { z0, z1, z2, z3 } =
1277 // FORM_TRANSPOSED_X4 {z0, z8}:0, {z1, z9}:0, {z2, z10}:0, {z3, z11}:0
1278 //
1279 // Below we distinguish two cases when trying to find a register:
1280 // * None of the registers used by FORM_TRANSPOSED_X4 have been assigned
1281 // yet. In this case the code muse ensure that there are at least UseOps
1282 // free consecutive registers. If IsMulZPR is true, then the first of
1283 // registers must also be a multiple of UseOps, e.g. { z0, z1, z2, z3 }
1284 // is valid but { z1, z2, z3, z5 } is not.
1285 // * One or more of the registers used by FORM_TRANSPOSED_X4 is already
1286 // assigned a physical register, which means only checking that a
1287 // consecutive range of free tuple registers exists which includes
1288 // the assigned register.
1289 // e.g. in the example above, if { z0, z8 } is already allocated for
1290 // %v0, we just need to ensure that { z1, z9 }, { z2, z10 } and
1291 // { z3, z11 } are also free. If so, we add { z2, z10 }.
1292
1293 if (AssignedRegOp == Use.operands_end()) {
1294 // There are no registers already assigned to any of the pseudo
1295 // operands. Look for a valid starting register for the group.
1296 for (unsigned I = 0; I < StridedOrder.size(); ++I) {
1297 MCPhysReg Reg = StridedOrder[I];
1298
1299 // If the FORM_TRANSPOSE nodes use the ZPRMul classes, the starting
1300 // register of the first load should be a multiple of 2 or 4.
1301 unsigned SubRegIdx = Use.getOperand(OpIdx).getSubReg();
1302 if (IsMulZPR && (getSubReg(Reg, SubRegIdx) - AArch64::Z0) % UseOps !=
1303 ((unsigned)OpIdx - 1))
1304 continue;
1305
1306 // In the example above, if VirtReg is the third operand of the
1307 // tuple (%v2) and Reg == Z2_Z10, then we need to make sure that
1308 // Z0_Z8, Z1_Z9 and Z3_Z11 are also available.
1309 auto IsFreeConsecutiveReg = [&](unsigned UseOp) {
1310 unsigned R = Reg - (OpIdx - 1) + UseOp;
1311 return StridedRC->contains(R) &&
1312 (UseOp == 0 ||
1313 ((getSubReg(R, AArch64::zsub0) - AArch64::Z0) ==
1314 (getSubReg(R - 1, AArch64::zsub0) - AArch64::Z0) + 1)) &&
1315 !Matrix->isPhysRegUsed(R);
1316 };
1317 if (all_of(iota_range<unsigned>(0U, UseOps, /*Inclusive=*/false),
1318 IsFreeConsecutiveReg))
1319 Hints.push_back(Reg);
1320 }
1321 } else {
1322 // At least one operand already has a physical register assigned.
1323 // Find the starting sub-register of this and use it to work out the
1324 // correct strided register to suggest based on the current op index.
1325 MCPhysReg TargetStartReg =
1326 getSubReg(VRM->getPhys(AssignedRegOp->getReg()), AArch64::zsub0) +
1327 (OpIdx - AssignedRegOp->getOperandNo());
1328
1329 for (unsigned I = 0; I < StridedOrder.size(); ++I)
1330 if (getSubReg(StridedOrder[I], AArch64::zsub0) == TargetStartReg)
1331 Hints.push_back(StridedOrder[I]);
1332 }
1333
1334 if (!Hints.empty())
1335 return TargetRegisterInfo::getRegAllocationHints(VirtReg, Order, Hints,
1336 MF, VRM);
1337 }
1338 }
1339
1340 for (MachineInstr &MI : MRI.def_instructions(VirtReg)) {
1341 if (MI.getOpcode() != AArch64::FORM_TRANSPOSED_REG_TUPLE_X2_PSEUDO &&
1342 MI.getOpcode() != AArch64::FORM_TRANSPOSED_REG_TUPLE_X4_PSEUDO)
1343 return TargetRegisterInfo::getRegAllocationHints(VirtReg, Order, Hints,
1344 MF, VRM);
1345
1346 unsigned FirstOpSubReg = MI.getOperand(1).getSubReg();
1347 switch (FirstOpSubReg) {
1348 case AArch64::zsub0:
1349 case AArch64::zsub1:
1350 case AArch64::zsub2:
1351 case AArch64::zsub3:
1352 break;
1353 default:
1354 continue;
1355 }
1356
1357 // Look up the physical register mapped to the first operand of the pseudo.
1358 Register FirstOpVirtReg = MI.getOperand(1).getReg();
1359 if (!VRM->hasPhys(FirstOpVirtReg))
1360 continue;
1361
1362 MCRegister TupleStartReg =
1363 getSubReg(VRM->getPhys(FirstOpVirtReg), FirstOpSubReg);
1364 for (unsigned I = 0; I < Order.size(); ++I)
1365 if (MCRegister R = getSubReg(Order[I], AArch64::zsub0))
1366 if (R == TupleStartReg)
1367 Hints.push_back(Order[I]);
1368 }
1369
1370 return TargetRegisterInfo::getRegAllocationHints(VirtReg, Order, Hints, MF,
1371 VRM);
1372}
1373
1375 const MachineFunction &MF) const {
1376 const auto &MFI = MF.getFrameInfo();
1377 if (!MF.hasEHFunclets() && !MFI.hasVarSizedObjects())
1378 return AArch64::SP;
1379 else if (hasStackRealignment(MF))
1380 return getBaseRegister();
1381 return getFrameRegister(MF);
1382}
1383
1384/// SrcRC and DstRC will be morphed into NewRC if this returns true
1386 MachineInstr *MI, const TargetRegisterClass *SrcRC, unsigned SubReg,
1387 const TargetRegisterClass *DstRC, unsigned DstSubReg,
1388 const TargetRegisterClass *NewRC, LiveIntervals &LIS) const {
1389 MachineFunction &MF = *MI->getMF();
1390 MachineRegisterInfo &MRI = MF.getRegInfo();
1391
1392 if (MI->isSubregToReg() && MRI.subRegLivenessEnabled() &&
1394 return false;
1395
1396 if (MI->isCopy() &&
1397 ((DstRC->getID() == AArch64::GPR64RegClassID) ||
1398 (DstRC->getID() == AArch64::GPR64commonRegClassID)) &&
1399 MI->getOperand(0).getSubReg() && MI->getOperand(1).getSubReg())
1400 // Do not coalesce in the case of a 32-bit subregister copy
1401 // which implements a 32 to 64 bit zero extension
1402 // which relies on the upper 32 bits being zeroed.
1403 return false;
1404
1405 auto IsCoalescerBarrier = [](const MachineInstr &MI) {
1406 switch (MI.getOpcode()) {
1407 case AArch64::COALESCER_BARRIER_FPR16:
1408 case AArch64::COALESCER_BARRIER_FPR32:
1409 case AArch64::COALESCER_BARRIER_FPR64:
1410 case AArch64::COALESCER_BARRIER_FPR128:
1411 return true;
1412 default:
1413 return false;
1414 }
1415 };
1416
1417 // For calls that temporarily have to toggle streaming mode as part of the
1418 // call-sequence, we need to be more careful when coalescing copy instructions
1419 // so that we don't end up coalescing the NEON/FP result or argument register
1420 // with a whole Z-register, such that after coalescing the register allocator
1421 // will try to spill/reload the entire Z register.
1422 //
1423 // We do this by checking if the node has any defs/uses that are
1424 // COALESCER_BARRIER pseudos. These are 'nops' in practice, but they exist to
1425 // instruct the coalescer to avoid coalescing the copy.
1426 if (MI->isCopy() && SubReg != DstSubReg &&
1427 (AArch64::ZPRRegClass.hasSubClassEq(DstRC) ||
1428 AArch64::ZPRRegClass.hasSubClassEq(SrcRC))) {
1429 unsigned SrcReg = MI->getOperand(1).getReg();
1430 if (any_of(MRI.def_instructions(SrcReg), IsCoalescerBarrier))
1431 return false;
1432 unsigned DstReg = MI->getOperand(0).getReg();
1433 if (any_of(MRI.use_nodbg_instructions(DstReg), IsCoalescerBarrier))
1434 return false;
1435 }
1436
1437 return true;
1438}
1439
1441 MCRegister R) const {
1442 return R == AArch64::VG;
1443}
1444
1446 return (LLVMReg >= AArch64::Z0 && LLVMReg <= AArch64::Z31) ||
1447 (LLVMReg >= AArch64::P0 && LLVMReg <= AArch64::P15);
1448}
static bool isTargetWindows(const MachineFunction &MF)
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static Register createScratchRegisterForInstruction(MachineInstr &MI, unsigned FIOperandNum, const AArch64InstrInfo *TII)
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file implements the BitVector class.
#define LLVM_FALLTHROUGH
LLVM_FALLTHROUGH - Mark fallthrough cases in switch statements.
Definition Compiler.h:404
This file contains constants used for implementing Dwarf debug support.
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
Live Register Matrix
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
This file declares the machine register scavenger class.
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Definition Value.cpp:483
static unsigned getDwarfRegNum(MCRegister Reg, const TargetRegisterInfo *TRI)
Go up the super-register chain until we hit a valid dwarf register number.
StackOffset getNonLocalFrameIndexReference(const MachineFunction &MF, int FI) const override
getNonLocalFrameIndexReference - This method returns the offset used to reference a frame index locat...
bool isFPReserved(const MachineFunction &MF) const
Should the Frame Pointer be reserved for the current function?
StackOffset resolveFrameIndexReference(const MachineFunction &MF, int FI, Register &FrameReg, bool PreferFP, bool ForSimm) const
AArch64FunctionInfo - This class is derived from MachineFunctionInfo and contains private AArch64-spe...
static const char * getRegisterName(MCRegister Reg, unsigned AltIdx=AArch64::NoRegAltName)
BitVector getStrictlyReservedRegs(const MachineFunction &MF) const
const TargetRegisterClass * getCrossCopyRegClass(const TargetRegisterClass *RC) const override
const uint32_t * getThisReturnPreservedMask(const MachineFunction &MF, CallingConv::ID) const
getThisReturnPreservedMask - Returns a call preserved mask specific to the case that 'returned' is on...
bool isReservedReg(const MachineFunction &MF, MCRegister Reg) const
const uint32_t * getCallPreservedMask(const MachineFunction &MF, CallingConv::ID) const override
const MCPhysReg * getCalleeSavedRegsViaCopy(const MachineFunction *MF) const
bool isIgnoredCVReg(MCRegister LLVMReg) const override
BitVector getReservedRegs(const MachineFunction &MF) const override
const TargetRegisterClass * getPointerRegClass(unsigned Kind=0) const override
bool shouldCoalesce(MachineInstr *MI, const TargetRegisterClass *SrcRC, unsigned SubReg, const TargetRegisterClass *DstRC, unsigned DstSubReg, const TargetRegisterClass *NewRC, LiveIntervals &LIS) const override
SrcRC and DstRC will be morphed into NewRC if this returns true.
bool requiresVirtualBaseRegisters(const MachineFunction &MF) const override
bool isUserReservedReg(const MachineFunction &MF, MCRegister Reg) const
const TargetRegisterClass * getSubClassWithSubReg(const TargetRegisterClass *RC, unsigned Idx) const override
unsigned getRegPressureLimit(const TargetRegisterClass *RC, MachineFunction &MF) const override
bool getRegAllocationHints(Register VirtReg, ArrayRef< MCPhysReg > Order, SmallVectorImpl< MCPhysReg > &Hints, const MachineFunction &MF, const VirtRegMap *VRM, const LiveRegMatrix *Matrix) const override
Register materializeFrameBaseRegister(MachineBasicBlock *MBB, int FrameIdx, int64_t Offset) const override
Insert defining instruction(s) for BaseReg to be a pointer to FrameIdx at the beginning of the basic ...
void UpdateCustomCalleeSavedRegs(MachineFunction &MF) const
bool requiresRegisterScavenging(const MachineFunction &MF) const override
bool isFrameOffsetLegal(const MachineInstr *MI, Register BaseReg, int64_t Offset) const override
BitVector getUserReservedRegs(const MachineFunction &MF) const
void resolveFrameIndex(MachineInstr &MI, Register BaseReg, int64_t Offset) const override
bool needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const override
needsFrameBaseReg - Returns true if the instruction's frame index reference would be better served by...
const uint32_t * getWindowsStackProbePreservedMask() const
Stack probing calls preserve different CSRs to the normal CC.
bool regNeedsCFI(MCRegister Reg, MCRegister &RegToUseForCFI) const
Return whether the register needs a CFI entry.
bool isAnyArgRegReserved(const MachineFunction &MF) const
void emitReservedArgRegCallError(const MachineFunction &MF) const
bool isStrictlyReservedReg(const MachineFunction &MF, MCRegister Reg) const
bool eliminateFrameIndex(MachineBasicBlock::iterator II, int SPAdj, unsigned FIOperandNum, RegScavenger *RS=nullptr) const override
const uint32_t * getTLSCallPreservedMask() const
const uint32_t * getNoPreservedMask() const override
Register getFrameRegister(const MachineFunction &MF) const override
bool shouldAnalyzePhysregInMachineLoopInfo(MCRegister R) const override
void getOffsetOpcodes(const StackOffset &Offset, SmallVectorImpl< uint64_t > &Ops) const override
bool isAsmClobberable(const MachineFunction &MF, MCRegister PhysReg) const override
AArch64RegisterInfo(const Triple &TT, unsigned HwMode)
const uint32_t * SMEABISupportRoutinesCallPreservedMaskFromX0() const
const MCPhysReg * getCalleeSavedRegs(const MachineFunction *MF) const override
Code Generation virtual methods...
const uint32_t * getCustomEHPadPreservedMask(const MachineFunction &MF) const override
unsigned getLocalAddressRegister(const MachineFunction &MF) const
bool hasBasePointer(const MachineFunction &MF) const
const uint32_t * getDarwinCallPreservedMask(const MachineFunction &MF, CallingConv::ID) const
const uint32_t * getSMStartStopCallPreservedMask() const
bool useFPForScavengingIndex(const MachineFunction &MF) const override
bool cannotEliminateFrame(const MachineFunction &MF) const
bool isArgumentRegister(const MachineFunction &MF, MCRegister Reg) const override
void UpdateCustomCallPreservedMask(MachineFunction &MF, const uint32_t **Mask) const
std::optional< std::string > explainReservedReg(const MachineFunction &MF, MCRegister PhysReg) const override
bool requiresFrameIndexScavenging(const MachineFunction &MF) const override
bool enableSRLTSubregToRegMitigation() const
bool isXRegisterReservedForRA(size_t i) const
const AArch64TargetLowering * getTargetLowering() const override
bool isXRegCustomCalleeSaved(size_t i) const
bool isXRegisterReserved(size_t i) const
bool isCallingConvWin64(CallingConv::ID CC, bool IsVarArg) const
bool supportSwiftError() const override
Return true if the target supports swifterror attribute.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
size_t size() const
size - Get the array size.
Definition ArrayRef.h:142
static LLVM_ABI void appendOffset(SmallVectorImpl< uint64_t > &Ops, int64_t Offset)
Append Ops with operations to apply the Offset.
A debug info location.
Definition DebugLoc.h:123
Diagnostic information for unsupported feature in backend.
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition Function.h:272
AttributeList getAttributes() const
Return the attribute list for this Function.
Definition Function.h:354
bool isVarArg() const
isVarArg - Return true if this function takes a variable number of arguments.
Definition Function.h:229
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition Function.cpp:728
Describe properties that are true of each instruction in the target description file.
bool regsOverlap(MCRegister RegA, MCRegister RegB) const
Returns true if the two registers are equal or alias each other.
Wrapper class representing physical registers. Should be passed by value.
Definition MCRegister.h:41
MCSubRegIterator enumerates all sub-registers of Reg.
bool isValid() const
Returns true if this iterator is not yet at the end.
MachineInstrBundleIterator< MachineInstr > iterator
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
bool hasVarSizedObjects() const
This method may be called any time after instruction selection is complete to determine if the stack ...
uint64_t getStackSize() const
Return the number of bytes that must be allocated to hold all of the fixed size frame objects.
bool adjustsStack() const
Return true if this function adjusts the stack – e.g., when calling another function.
bool isFrameAddressTaken() const
This method may be called any time after instruction selection is complete to determine if there is a...
int64_t getLocalFrameSize() const
Get the size of the local object blob.
int64_t getObjectOffset(int ObjectIdx) const
Return the assigned stack offset of the specified object from the incoming stack pointer.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
uint32_t * allocateRegMask()
Allocate and initialize a register mask with NumRegister bits.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const MachineFunctionProperties & getProperties() const
Get the function properties.
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
const MachineInstrBuilder & addReg(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addFrameIndex(int Idx) const
Representation of each machine instruction.
MachineOperand class - Representation of each machine instruction operand.
LLVM_ABI unsigned getOperandNo() const
Returns the index of this operand in the instruction that it belongs to.
LLVM_ABI void ChangeToImmediate(int64_t ImmVal, unsigned TargetFlags=0)
ChangeToImmediate - Replace this operand with a new immediate operand of the specified value.
static unsigned getRegMaskSize(unsigned NumRegs)
Returns number of elements needed for a regmask array.
Register getReg() const
getReg - Returns the register number.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
const TargetRegisterClass * getRegClass(Register Reg) const
Return the register class of the specified virtual register.
iterator_range< def_instr_iterator > def_instructions(Register Reg) const
LLVM_ABI Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
iterator_range< use_instr_nodbg_iterator > use_nodbg_instructions(Register Reg) const
iterator_range< def_iterator > def_operands(Register Reg) const
LLVM_ABI void setCalleeSavedRegs(ArrayRef< MCPhysReg > CSRs)
Sets the updated Callee Saved Registers list.
LLVM_ABI const TargetRegisterClass * constrainRegClass(Register Reg, const TargetRegisterClass *RC, unsigned MinNumRegs=0)
constrainRegClass - Constrain the register class of the specified virtual register to be a common sub...
Wrapper class representing virtual and physical registers.
Definition Register.h:20
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StackOffset holds a fixed and a scalable offset in bytes.
Definition TypeSize.h:30
int64_t getFixed() const
Returns the fixed component of the stack.
Definition TypeSize.h:46
bool hasFP(const MachineFunction &MF) const
hasFP - Return true if the specified function should have a dedicated frame pointer register.
TargetOptions Options
LLVM_ABI bool DisableFramePointerElim(const MachineFunction &MF) const
DisableFramePointerElim - This returns true if frame pointer elimination optimization should be disab...
unsigned getID() const
Return the register class ID number.
bool contains(Register Reg) const
Return true if the specified register is included in this register class.
virtual bool getRegAllocationHints(Register VirtReg, ArrayRef< MCPhysReg > Order, SmallVectorImpl< MCPhysReg > &Hints, const MachineFunction &MF, const VirtRegMap *VRM=nullptr, const LiveRegMatrix *Matrix=nullptr) const
Get a list of 'hint' registers that the register allocator should try first when allocating a physica...
Triple - Helper class for working with autoconf configuration names.
Definition Triple.h:47
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
MCRegister getPhys(Register virtReg) const
returns the physical register mapped to the specified virtual register
Definition VirtRegMap.h:91
bool hasPhys(Register virtReg) const
returns true if the specified virtual register is mapped to a physical register
Definition VirtRegMap.h:87
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ MO_TAGGED
MO_TAGGED - With MO_PAGE, indicates that the page includes a memory tag in bits 56-63.
static unsigned getShifterImm(AArch64_AM::ShiftExtendType ST, unsigned Imm)
getShifterImm - Encode the shift type and amount: imm: 6-bit shift amount shifter: 000 ==> lsl 001 ==...
void initLLVMToCVRegMapping(MCRegisterInfo *MRI)
int32_t getSVEPseudoMap(uint32_t Opcode)
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ AArch64_VectorCall
Used between AArch64 Advanced SIMD functions.
@ Swift
Calling convention for Swift.
Definition CallingConv.h:69
@ AArch64_SVE_VectorCall
Used between AArch64 SVE functions.
@ CFGuard_Check
Special calling convention on Windows for calling the Control Guard Check ICall funtion.
Definition CallingConv.h:82
@ PreserveMost
Used for runtime calls that preserves most registers.
Definition CallingConv.h:63
@ AnyReg
OBSOLETED - Used for stack based JavaScript calls.
Definition CallingConv.h:60
@ AArch64_SME_ABI_Support_Routines_PreserveMost_From_X2
Preserve X2-X15, X19-X29, SP, Z0-Z31, P0-P15.
@ CXX_FAST_TLS
Used for access functions.
Definition CallingConv.h:72
@ AArch64_SME_ABI_Support_Routines_PreserveMost_From_X0
Preserve X0-X13, X19-X29, SP, Z0-Z31, P0-P15.
@ GHC
Used by the Glasgow Haskell Compiler (GHC).
Definition CallingConv.h:50
@ AArch64_SME_ABI_Support_Routines_PreserveMost_From_X1
Preserve X1-X15, X19-X29, SP, Z0-Z31, P0-P15.
@ PreserveAll
Used for runtime calls that preserves (almost) all registers.
Definition CallingConv.h:66
@ Fast
Attempts to make calls as fast as possible (e.g.
Definition CallingConv.h:41
@ PreserveNone
Used for runtime calls that preserves none general registers.
Definition CallingConv.h:90
@ Tail
Attemps to make calls as fast as possible while guaranteeing that tail call optimization can always b...
Definition CallingConv.h:76
@ Win64
The C convention as implemented on Windows/x86-64 and AArch64.
@ SwiftTail
This follows the Swift calling convention in how arguments are passed but guarantees tail calls will ...
Definition CallingConv.h:87
@ GRAAL
Used by GraalVM. Two additional registers are reserved.
@ ARM64EC_Thunk_X64
Calling convention used in the ARM64EC ABI to implement calls between x64 code and thunks.
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
This is an optimization pass for GlobalISel generic memory operations.
@ Offset
Definition DWP.cpp:532
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1739
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
int isAArch64FrameOffsetLegal(const MachineInstr &MI, StackOffset &Offset, bool *OutUseUnscaledOp=nullptr, unsigned *OutUnscaledOp=nullptr, int64_t *EmittableOffset=nullptr)
Check if the Offset is a valid frame offset for MI.
@ Done
Definition Threading.h:60
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
@ AArch64FrameOffsetIsLegal
Offset is legal.
@ AArch64FrameOffsetCanUpdate
Offset can apply, at least partly.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1746
void emitFrameOffset(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, unsigned DestReg, unsigned SrcReg, StackOffset Offset, const TargetInstrInfo *TII, MachineInstr::MIFlag=MachineInstr::NoFlags, bool SetNZCV=false, bool NeedsWinCFI=false, bool *HasWinCFI=nullptr, bool EmitCFAOffset=false, StackOffset InitialOffset={}, unsigned FrameReg=AArch64::SP)
emitFrameOffset - Emit instructions as needed to set DestReg to SrcReg plus Offset.
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:163
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
Definition MCRegister.h:21
DWARFExpression::Operation Op
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1772
bool rewriteAArch64FrameIndex(MachineInstr &MI, unsigned FrameRegIdx, unsigned FrameReg, StackOffset &Offset, const AArch64InstrInfo *TII)
rewriteAArch64FrameIndex - Rewrite MI to access 'Offset' bytes from the FP.
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1947