LLVM 20.0.0git
AArch64RegisterInfo.cpp
Go to the documentation of this file.
1//===- AArch64RegisterInfo.cpp - AArch64 Register Information -------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the AArch64 implementation of the TargetRegisterInfo
10// class.
11//
12//===----------------------------------------------------------------------===//
13
14#include "AArch64RegisterInfo.h"
16#include "AArch64InstrInfo.h"
18#include "AArch64Subtarget.h"
21#include "llvm/ADT/BitVector.h"
30#include "llvm/IR/Function.h"
34
35using namespace llvm;
36
37#define GET_CC_REGISTER_LISTS
38#include "AArch64GenCallingConv.inc"
39#define GET_REGINFO_TARGET_DESC
40#include "AArch64GenRegisterInfo.inc"
41
43 : AArch64GenRegisterInfo(AArch64::LR), TT(TT) {
45}
46
47/// Return whether the register needs a CFI entry. Not all unwinders may know
48/// about SVE registers, so we assume the lowest common denominator, i.e. the
49/// callee-saves required by the base ABI. For the SVE registers z8-z15 only the
50/// lower 64-bits (d8-d15) need to be saved. The lower 64-bits subreg is
51/// returned in \p RegToUseForCFI.
53 unsigned &RegToUseForCFI) const {
54 if (AArch64::PPRRegClass.contains(Reg))
55 return false;
56
57 if (AArch64::ZPRRegClass.contains(Reg)) {
58 RegToUseForCFI = getSubReg(Reg, AArch64::dsub);
59 for (int I = 0; CSR_AArch64_AAPCS_SaveList[I]; ++I) {
60 if (CSR_AArch64_AAPCS_SaveList[I] == RegToUseForCFI)
61 return true;
62 }
63 return false;
64 }
65
66 RegToUseForCFI = Reg;
67 return true;
68}
69
70const MCPhysReg *
72 assert(MF && "Invalid MachineFunction pointer.");
73
75 // GHC set of callee saved regs is empty as all those regs are
76 // used for passing STG regs around
77 return CSR_AArch64_NoRegs_SaveList;
79 return CSR_AArch64_NoneRegs_SaveList;
81 return CSR_AArch64_AllRegs_SaveList;
82
84 return CSR_Win_AArch64_Arm64EC_Thunk_SaveList;
85
86 // Darwin has its own CSR_AArch64_AAPCS_SaveList, which means most CSR save
87 // lists depending on that will need to have their Darwin variant as well.
89 return getDarwinCalleeSavedRegs(MF);
90
92 return CSR_Win_AArch64_CFGuard_Check_SaveList;
97 Attribute::SwiftError))
98 return CSR_Win_AArch64_AAPCS_SwiftError_SaveList;
100 return CSR_Win_AArch64_AAPCS_SwiftTail_SaveList;
101 return CSR_Win_AArch64_AAPCS_SaveList;
102 }
104 return CSR_AArch64_AAVPCS_SaveList;
106 return CSR_AArch64_SVE_AAPCS_SaveList;
107 if (MF->getFunction().getCallingConv() ==
110 "Calling convention "
111 "AArch64_SME_ABI_Support_Routines_PreserveMost_From_X0 is only "
112 "supported to improve calls to SME ACLE save/restore/disable-za "
113 "functions, and is not intended to be used beyond that scope.");
114 if (MF->getFunction().getCallingConv() ==
117 "Calling convention "
118 "AArch64_SME_ABI_Support_Routines_PreserveMost_From_X1 is "
119 "only supported to improve calls to SME ACLE __arm_get_current_vg "
120 "function, and is not intended to be used beyond that scope.");
121 if (MF->getFunction().getCallingConv() ==
124 "Calling convention "
125 "AArch64_SME_ABI_Support_Routines_PreserveMost_From_X2 is "
126 "only supported to improve calls to SME ACLE __arm_sme_state "
127 "and is not intended to be used beyond that scope.");
129 ->supportSwiftError() &&
131 Attribute::SwiftError))
132 return CSR_AArch64_AAPCS_SwiftError_SaveList;
134 return CSR_AArch64_AAPCS_SwiftTail_SaveList;
136 return CSR_AArch64_RT_MostRegs_SaveList;
138 return CSR_AArch64_RT_AllRegs_SaveList;
140 // This is for OSes other than Windows; Windows is a separate case further
141 // above.
142 return CSR_AArch64_AAPCS_X18_SaveList;
143 if (MF->getInfo<AArch64FunctionInfo>()->isSVECC())
144 return CSR_AArch64_SVE_AAPCS_SaveList;
145 return CSR_AArch64_AAPCS_SaveList;
146}
147
148const MCPhysReg *
150 assert(MF && "Invalid MachineFunction pointer.");
152 "Invalid subtarget for getDarwinCalleeSavedRegs");
153
156 "Calling convention CFGuard_Check is unsupported on Darwin.");
158 return CSR_Darwin_AArch64_AAVPCS_SaveList;
161 "Calling convention SVE_VectorCall is unsupported on Darwin.");
162 if (MF->getFunction().getCallingConv() ==
165 "Calling convention "
166 "AArch64_SME_ABI_Support_Routines_PreserveMost_From_X0 is "
167 "only supported to improve calls to SME ACLE save/restore/disable-za "
168 "functions, and is not intended to be used beyond that scope.");
169 if (MF->getFunction().getCallingConv() ==
172 "Calling convention "
173 "AArch64_SME_ABI_Support_Routines_PreserveMost_From_X1 is "
174 "only supported to improve calls to SME ACLE __arm_get_current_vg "
175 "function, and is not intended to be used beyond that scope.");
176 if (MF->getFunction().getCallingConv() ==
179 "Calling convention "
180 "AArch64_SME_ABI_Support_Routines_PreserveMost_From_X2 is "
181 "only supported to improve calls to SME ACLE __arm_sme_state "
182 "and is not intended to be used beyond that scope.");
184 return MF->getInfo<AArch64FunctionInfo>()->isSplitCSR()
185 ? CSR_Darwin_AArch64_CXX_TLS_PE_SaveList
186 : CSR_Darwin_AArch64_CXX_TLS_SaveList;
188 ->supportSwiftError() &&
190 Attribute::SwiftError))
191 return CSR_Darwin_AArch64_AAPCS_SwiftError_SaveList;
193 return CSR_Darwin_AArch64_AAPCS_SwiftTail_SaveList;
195 return CSR_Darwin_AArch64_RT_MostRegs_SaveList;
197 return CSR_Darwin_AArch64_RT_AllRegs_SaveList;
199 return CSR_Darwin_AArch64_AAPCS_Win64_SaveList;
200 if (MF->getInfo<AArch64FunctionInfo>()->isSVECC())
201 return CSR_Darwin_AArch64_SVE_AAPCS_SaveList;
202 return CSR_Darwin_AArch64_AAPCS_SaveList;
203}
204
206 const MachineFunction *MF) const {
207 assert(MF && "Invalid MachineFunction pointer.");
210 return CSR_Darwin_AArch64_CXX_TLS_ViaCopy_SaveList;
211 return nullptr;
212}
213
215 MachineFunction &MF) const {
216 const MCPhysReg *CSRs = getCalleeSavedRegs(&MF);
217 SmallVector<MCPhysReg, 32> UpdatedCSRs;
218 for (const MCPhysReg *I = CSRs; *I; ++I)
219 UpdatedCSRs.push_back(*I);
220
221 for (size_t i = 0; i < AArch64::GPR64commonRegClass.getNumRegs(); ++i) {
223 UpdatedCSRs.push_back(AArch64::GPR64commonRegClass.getRegister(i));
224 }
225 }
226 // Register lists are zero-terminated.
227 UpdatedCSRs.push_back(0);
228 MF.getRegInfo().setCalleeSavedRegs(UpdatedCSRs);
229}
230
233 unsigned Idx) const {
234 // edge case for GPR/FPR register classes
235 if (RC == &AArch64::GPR32allRegClass && Idx == AArch64::hsub)
236 return &AArch64::FPR32RegClass;
237 else if (RC == &AArch64::GPR64allRegClass && Idx == AArch64::hsub)
238 return &AArch64::FPR64RegClass;
239
240 // Forward to TableGen's default version.
241 return AArch64GenRegisterInfo::getSubClassWithSubReg(RC, Idx);
242}
243
244const uint32_t *
246 CallingConv::ID CC) const {
248 "Invalid subtarget for getDarwinCallPreservedMask");
249
251 return CSR_Darwin_AArch64_CXX_TLS_RegMask;
253 return CSR_Darwin_AArch64_AAVPCS_RegMask;
255 return CSR_Darwin_AArch64_SVE_AAPCS_RegMask;
257 return CSR_AArch64_SME_ABI_Support_Routines_PreserveMost_From_X0_RegMask;
259 return CSR_AArch64_SME_ABI_Support_Routines_PreserveMost_From_X1_RegMask;
261 return CSR_AArch64_SME_ABI_Support_Routines_PreserveMost_From_X2_RegMask;
264 "Calling convention CFGuard_Check is unsupported on Darwin.");
267 ->supportSwiftError() &&
268 MF.getFunction().getAttributes().hasAttrSomewhere(Attribute::SwiftError))
269 return CSR_Darwin_AArch64_AAPCS_SwiftError_RegMask;
271 return CSR_Darwin_AArch64_AAPCS_SwiftTail_RegMask;
273 return CSR_Darwin_AArch64_RT_MostRegs_RegMask;
275 return CSR_Darwin_AArch64_RT_AllRegs_RegMask;
276 return CSR_Darwin_AArch64_AAPCS_RegMask;
277}
278
279const uint32_t *
281 CallingConv::ID CC) const {
282 bool SCS = MF.getFunction().hasFnAttribute(Attribute::ShadowCallStack);
283 if (CC == CallingConv::GHC)
284 // This is academic because all GHC calls are (supposed to be) tail calls
285 return SCS ? CSR_AArch64_NoRegs_SCS_RegMask : CSR_AArch64_NoRegs_RegMask;
287 return SCS ? CSR_AArch64_NoneRegs_SCS_RegMask
288 : CSR_AArch64_NoneRegs_RegMask;
289 if (CC == CallingConv::AnyReg)
290 return SCS ? CSR_AArch64_AllRegs_SCS_RegMask : CSR_AArch64_AllRegs_RegMask;
291
292 // All the following calling conventions are handled differently on Darwin.
294 if (SCS)
295 report_fatal_error("ShadowCallStack attribute not supported on Darwin.");
296 return getDarwinCallPreservedMask(MF, CC);
297 }
298
300 return SCS ? CSR_AArch64_AAVPCS_SCS_RegMask : CSR_AArch64_AAVPCS_RegMask;
302 return SCS ? CSR_AArch64_SVE_AAPCS_SCS_RegMask
303 : CSR_AArch64_SVE_AAPCS_RegMask;
305 return CSR_AArch64_SME_ABI_Support_Routines_PreserveMost_From_X0_RegMask;
307 return CSR_AArch64_SME_ABI_Support_Routines_PreserveMost_From_X1_RegMask;
309 return CSR_AArch64_SME_ABI_Support_Routines_PreserveMost_From_X2_RegMask;
311 return CSR_Win_AArch64_CFGuard_Check_RegMask;
313 ->supportSwiftError() &&
314 MF.getFunction().getAttributes().hasAttrSomewhere(Attribute::SwiftError))
315 return SCS ? CSR_AArch64_AAPCS_SwiftError_SCS_RegMask
316 : CSR_AArch64_AAPCS_SwiftError_RegMask;
317 if (CC == CallingConv::SwiftTail) {
318 if (SCS)
319 report_fatal_error("ShadowCallStack attribute not supported with swifttail");
320 return CSR_AArch64_AAPCS_SwiftTail_RegMask;
321 }
323 return SCS ? CSR_AArch64_RT_MostRegs_SCS_RegMask
324 : CSR_AArch64_RT_MostRegs_RegMask;
326 return SCS ? CSR_AArch64_RT_AllRegs_SCS_RegMask
327 : CSR_AArch64_RT_AllRegs_RegMask;
328
329 return SCS ? CSR_AArch64_AAPCS_SCS_RegMask : CSR_AArch64_AAPCS_RegMask;
330}
331
333 const MachineFunction &MF) const {
335 return CSR_AArch64_AAPCS_RegMask;
336
337 return nullptr;
338}
339
341 if (TT.isOSDarwin())
342 return CSR_Darwin_AArch64_TLS_RegMask;
343
344 assert(TT.isOSBinFormatELF() && "Invalid target");
345 return CSR_AArch64_TLS_ELF_RegMask;
346}
347
349 const uint32_t **Mask) const {
350 uint32_t *UpdatedMask = MF.allocateRegMask();
351 unsigned RegMaskSize = MachineOperand::getRegMaskSize(getNumRegs());
352 memcpy(UpdatedMask, *Mask, sizeof(UpdatedMask[0]) * RegMaskSize);
353
354 for (size_t i = 0; i < AArch64::GPR64commonRegClass.getNumRegs(); ++i) {
356 for (MCPhysReg SubReg :
357 subregs_inclusive(AArch64::GPR64commonRegClass.getRegister(i))) {
358 // See TargetRegisterInfo::getCallPreservedMask for how to interpret the
359 // register mask.
360 UpdatedMask[SubReg / 32] |= 1u << (SubReg % 32);
361 }
362 }
363 }
364 *Mask = UpdatedMask;
365}
366
368 return CSR_AArch64_SMStartStop_RegMask;
369}
370
371const uint32_t *
373 return CSR_AArch64_SME_ABI_Support_Routines_PreserveMost_From_X0_RegMask;
374}
375
377 return CSR_AArch64_NoRegs_RegMask;
378}
379
380const uint32_t *
382 CallingConv::ID CC) const {
383 // This should return a register mask that is the same as that returned by
384 // getCallPreservedMask but that additionally preserves the register used for
385 // the first i64 argument (which must also be the register used to return a
386 // single i64 return value)
387 //
388 // In case that the calling convention does not use the same register for
389 // both, the function should return NULL (does not currently apply)
390 assert(CC != CallingConv::GHC && "should not be GHC calling convention.");
392 return CSR_Darwin_AArch64_AAPCS_ThisReturn_RegMask;
393 return CSR_AArch64_AAPCS_ThisReturn_RegMask;
394}
395
397 return CSR_AArch64_StackProbe_Windows_RegMask;
398}
399
400std::optional<std::string>
402 MCRegister PhysReg) const {
403 if (hasBasePointer(MF) && MCRegisterInfo::regsOverlap(PhysReg, AArch64::X19))
404 return std::string("X19 is used as the frame base pointer register.");
405
407 bool warn = false;
408 if (MCRegisterInfo::regsOverlap(PhysReg, AArch64::X13) ||
409 MCRegisterInfo::regsOverlap(PhysReg, AArch64::X14) ||
410 MCRegisterInfo::regsOverlap(PhysReg, AArch64::X23) ||
411 MCRegisterInfo::regsOverlap(PhysReg, AArch64::X24) ||
412 MCRegisterInfo::regsOverlap(PhysReg, AArch64::X28))
413 warn = true;
414
415 for (unsigned i = AArch64::B16; i <= AArch64::B31; ++i)
416 if (MCRegisterInfo::regsOverlap(PhysReg, i))
417 warn = true;
418
419 if (warn)
420 return std::string(AArch64InstPrinter::getRegisterName(PhysReg)) +
421 " is clobbered by asynchronous signals when using Arm64EC.";
422 }
423
424 return {};
425}
426
429 const AArch64FrameLowering *TFI = getFrameLowering(MF);
430
431 // FIXME: avoid re-calculating this every time.
432 BitVector Reserved(getNumRegs());
433 markSuperRegs(Reserved, AArch64::WSP);
434 markSuperRegs(Reserved, AArch64::WZR);
435
436 if (TFI->hasFP(MF) || TT.isOSDarwin())
437 markSuperRegs(Reserved, AArch64::W29);
438
440 // x13, x14, x23, x24, x28, and v16-v31 are clobbered by asynchronous
441 // signals, so we can't ever use them.
442 markSuperRegs(Reserved, AArch64::W13);
443 markSuperRegs(Reserved, AArch64::W14);
444 markSuperRegs(Reserved, AArch64::W23);
445 markSuperRegs(Reserved, AArch64::W24);
446 markSuperRegs(Reserved, AArch64::W28);
447 for (unsigned i = AArch64::B16; i <= AArch64::B31; ++i)
448 markSuperRegs(Reserved, i);
449 }
450
451 for (size_t i = 0; i < AArch64::GPR32commonRegClass.getNumRegs(); ++i) {
453 markSuperRegs(Reserved, AArch64::GPR32commonRegClass.getRegister(i));
454 }
455
456 if (hasBasePointer(MF))
457 markSuperRegs(Reserved, AArch64::W19);
458
459 // SLH uses register W16/X16 as the taint register.
460 if (MF.getFunction().hasFnAttribute(Attribute::SpeculativeLoadHardening))
461 markSuperRegs(Reserved, AArch64::W16);
462
463 // FFR is modelled as global state that cannot be allocated.
464 if (MF.getSubtarget<AArch64Subtarget>().hasSVE())
465 Reserved.set(AArch64::FFR);
466
467 // SME tiles are not allocatable.
468 if (MF.getSubtarget<AArch64Subtarget>().hasSME()) {
469 for (MCPhysReg SubReg : subregs_inclusive(AArch64::ZA))
470 Reserved.set(SubReg);
471 }
472
473 // VG cannot be allocated
474 Reserved.set(AArch64::VG);
475
476 if (MF.getSubtarget<AArch64Subtarget>().hasSME2()) {
477 for (MCSubRegIterator SubReg(AArch64::ZT0, this, /*self=*/true);
478 SubReg.isValid(); ++SubReg)
479 Reserved.set(*SubReg);
480 }
481
482 markSuperRegs(Reserved, AArch64::FPCR);
483 markSuperRegs(Reserved, AArch64::FPMR);
484 markSuperRegs(Reserved, AArch64::FPSR);
485
487 markSuperRegs(Reserved, AArch64::X27);
488 markSuperRegs(Reserved, AArch64::X28);
489 markSuperRegs(Reserved, AArch64::W27);
490 markSuperRegs(Reserved, AArch64::W28);
491 }
492
493 assert(checkAllSuperRegsMarked(Reserved));
494 return Reserved;
495}
496
500
501 for (size_t i = 0; i < AArch64::GPR32commonRegClass.getNumRegs(); ++i) {
503 markSuperRegs(Reserved, AArch64::GPR32commonRegClass.getRegister(i));
504 }
505
507 // In order to prevent the register allocator from using LR, we need to
508 // mark it as reserved. However we don't want to keep it reserved throughout
509 // the pipeline since it prevents other infrastructure from reasoning about
510 // it's liveness. We use the NoVRegs property instead of IsSSA because
511 // IsSSA is removed before VirtRegRewriter runs.
512 if (!MF.getProperties().hasProperty(
514 markSuperRegs(Reserved, AArch64::LR);
515 }
516
517 assert(checkAllSuperRegsMarked(Reserved));
518 return Reserved;
519}
520
522 MCRegister Reg) const {
523 return getReservedRegs(MF)[Reg];
524}
525
527 MCRegister Reg) const {
528 return getStrictlyReservedRegs(MF)[Reg];
529}
530
532 return llvm::any_of(*AArch64::GPR64argRegClass.MC, [this, &MF](MCPhysReg r) {
533 return isStrictlyReservedReg(MF, r);
534 });
535}
536
538 const MachineFunction &MF) const {
539 const Function &F = MF.getFunction();
540 F.getContext().diagnose(DiagnosticInfoUnsupported{F, ("AArch64 doesn't support"
541 " function calls if any of the argument registers is reserved.")});
542}
543
545 MCRegister PhysReg) const {
546 // SLH uses register X16 as the taint register but it will fallback to a different
547 // method if the user clobbers it. So X16 is not reserved for inline asm but is
548 // for normal codegen.
549 if (MF.getFunction().hasFnAttribute(Attribute::SpeculativeLoadHardening) &&
550 MCRegisterInfo::regsOverlap(PhysReg, AArch64::X16))
551 return true;
552
553 // ZA/ZT0 registers are reserved but may be permitted in the clobber list.
554 if (PhysReg == AArch64::ZA || PhysReg == AArch64::ZT0)
555 return true;
556
557 return !isReservedReg(MF, PhysReg);
558}
559
562 unsigned Kind) const {
563 return &AArch64::GPR64spRegClass;
564}
565
568 if (RC == &AArch64::CCRRegClass)
569 return &AArch64::GPR64RegClass; // Only MSR & MRS copy NZCV.
570 return RC;
571}
572
573unsigned AArch64RegisterInfo::getBaseRegister() const { return AArch64::X19; }
574
576 const MachineFrameInfo &MFI = MF.getFrameInfo();
577
578 // In the presence of variable sized objects or funclets, if the fixed stack
579 // size is large enough that referencing from the FP won't result in things
580 // being in range relatively often, we can use a base pointer to allow access
581 // from the other direction like the SP normally works.
582 //
583 // Furthermore, if both variable sized objects are present, and the
584 // stack needs to be dynamically re-aligned, the base pointer is the only
585 // reliable way to reference the locals.
586 if (MFI.hasVarSizedObjects() || MF.hasEHFunclets()) {
587 if (hasStackRealignment(MF))
588 return true;
589
590 auto &ST = MF.getSubtarget<AArch64Subtarget>();
591 if (ST.hasSVE() || ST.isStreaming()) {
593 // Frames that have variable sized objects and scalable SVE objects,
594 // should always use a basepointer.
595 if (!AFI->hasCalculatedStackSizeSVE() || AFI->getStackSizeSVE())
596 return true;
597 }
598
599 // Conservatively estimate whether the negative offset from the frame
600 // pointer will be sufficient to reach. If a function has a smallish
601 // frame, it's less likely to have lots of spills and callee saved
602 // space, so it's all more likely to be within range of the frame pointer.
603 // If it's wrong, we'll materialize the constant and still get to the
604 // object; it's just suboptimal. Negative offsets use the unscaled
605 // load/store instructions, which have a 9-bit signed immediate.
606 return MFI.getLocalFrameSize() >= 256;
607 }
608
609 return false;
610}
611
613 MCRegister Reg) const {
616 bool IsVarArg = STI.isCallingConvWin64(MF.getFunction().getCallingConv(),
617 MF.getFunction().isVarArg());
618
619 auto HasReg = [](ArrayRef<MCRegister> RegList, MCRegister Reg) {
620 return llvm::is_contained(RegList, Reg);
621 };
622
623 switch (CC) {
624 default:
625 report_fatal_error("Unsupported calling convention.");
626 case CallingConv::GHC:
627 return HasReg(CC_AArch64_GHC_ArgRegs, Reg);
629 if (!MF.getFunction().isVarArg())
630 return HasReg(CC_AArch64_Preserve_None_ArgRegs, Reg);
631 [[fallthrough]];
632 case CallingConv::C:
640 if (STI.isTargetWindows()) {
641 if (IsVarArg)
642 return HasReg(CC_AArch64_Win64_VarArg_ArgRegs, Reg);
643 switch (CC) {
644 default:
645 return HasReg(CC_AArch64_Win64PCS_ArgRegs, Reg);
648 return HasReg(CC_AArch64_Win64PCS_Swift_ArgRegs, Reg) ||
649 HasReg(CC_AArch64_Win64PCS_ArgRegs, Reg);
650 }
651 }
652 if (!STI.isTargetDarwin()) {
653 switch (CC) {
654 default:
655 return HasReg(CC_AArch64_AAPCS_ArgRegs, Reg);
658 return HasReg(CC_AArch64_AAPCS_ArgRegs, Reg) ||
659 HasReg(CC_AArch64_AAPCS_Swift_ArgRegs, Reg);
660 }
661 }
662 if (!IsVarArg) {
663 switch (CC) {
664 default:
665 return HasReg(CC_AArch64_DarwinPCS_ArgRegs, Reg);
668 return HasReg(CC_AArch64_DarwinPCS_ArgRegs, Reg) ||
669 HasReg(CC_AArch64_DarwinPCS_Swift_ArgRegs, Reg);
670 }
671 }
672 if (STI.isTargetILP32())
673 return HasReg(CC_AArch64_DarwinPCS_ILP32_VarArg_ArgRegs, Reg);
674 return HasReg(CC_AArch64_DarwinPCS_VarArg_ArgRegs, Reg);
676 if (IsVarArg)
677 HasReg(CC_AArch64_Win64_VarArg_ArgRegs, Reg);
678 return HasReg(CC_AArch64_Win64PCS_ArgRegs, Reg);
680 return HasReg(CC_AArch64_Win64_CFGuard_Check_ArgRegs, Reg);
686 if (STI.isTargetWindows())
687 return HasReg(CC_AArch64_Win64PCS_ArgRegs, Reg);
688 return HasReg(CC_AArch64_AAPCS_ArgRegs, Reg);
689 }
690}
691
694 const AArch64FrameLowering *TFI = getFrameLowering(MF);
695 return TFI->hasFP(MF) ? AArch64::FP : AArch64::SP;
696}
697
699 const MachineFunction &MF) const {
700 return true;
701}
702
704 const MachineFunction &MF) const {
705 return true;
706}
707
708bool
710 // This function indicates whether the emergency spillslot should be placed
711 // close to the beginning of the stackframe (closer to FP) or the end
712 // (closer to SP).
713 //
714 // The beginning works most reliably if we have a frame pointer.
715 // In the presence of any non-constant space between FP and locals,
716 // (e.g. in case of stack realignment or a scalable SVE area), it is
717 // better to use SP or BP.
718 const AArch64FrameLowering &TFI = *getFrameLowering(MF);
720 assert((!MF.getSubtarget<AArch64Subtarget>().hasSVE() ||
722 "Expected SVE area to be calculated by this point");
723 return TFI.hasFP(MF) && !hasStackRealignment(MF) && !AFI->getStackSizeSVE();
724}
725
727 const MachineFunction &MF) const {
728 return true;
729}
730
731bool
733 const MachineFrameInfo &MFI = MF.getFrameInfo();
735 return true;
736 return MFI.hasVarSizedObjects() || MFI.isFrameAddressTaken();
737}
738
739/// needsFrameBaseReg - Returns true if the instruction's frame index
740/// reference would be better served by a base register other than FP
741/// or SP. Used by LocalStackFrameAllocation to determine which frame index
742/// references it should create new base registers for.
744 int64_t Offset) const {
745 for (unsigned i = 0; !MI->getOperand(i).isFI(); ++i)
746 assert(i < MI->getNumOperands() &&
747 "Instr doesn't have FrameIndex operand!");
748
749 // It's the load/store FI references that cause issues, as it can be difficult
750 // to materialize the offset if it won't fit in the literal field. Estimate
751 // based on the size of the local frame and some conservative assumptions
752 // about the rest of the stack frame (note, this is pre-regalloc, so
753 // we don't know everything for certain yet) whether this offset is likely
754 // to be out of range of the immediate. Return true if so.
755
756 // We only generate virtual base registers for loads and stores, so
757 // return false for everything else.
758 if (!MI->mayLoad() && !MI->mayStore())
759 return false;
760
761 // Without a virtual base register, if the function has variable sized
762 // objects, all fixed-size local references will be via the frame pointer,
763 // Approximate the offset and see if it's legal for the instruction.
764 // Note that the incoming offset is based on the SP value at function entry,
765 // so it'll be negative.
766 MachineFunction &MF = *MI->getParent()->getParent();
767 const AArch64FrameLowering *TFI = getFrameLowering(MF);
768 MachineFrameInfo &MFI = MF.getFrameInfo();
769
770 // Estimate an offset from the frame pointer.
771 // Conservatively assume all GPR callee-saved registers get pushed.
772 // FP, LR, X19-X28, D8-D15. 64-bits each.
773 int64_t FPOffset = Offset - 16 * 20;
774 // Estimate an offset from the stack pointer.
775 // The incoming offset is relating to the SP at the start of the function,
776 // but when we access the local it'll be relative to the SP after local
777 // allocation, so adjust our SP-relative offset by that allocation size.
778 Offset += MFI.getLocalFrameSize();
779 // Assume that we'll have at least some spill slots allocated.
780 // FIXME: This is a total SWAG number. We should run some statistics
781 // and pick a real one.
782 Offset += 128; // 128 bytes of spill slots
783
784 // If there is a frame pointer, try using it.
785 // The FP is only available if there is no dynamic realignment. We
786 // don't know for sure yet whether we'll need that, so we guess based
787 // on whether there are any local variables that would trigger it.
788 if (TFI->hasFP(MF) && isFrameOffsetLegal(MI, AArch64::FP, FPOffset))
789 return false;
790
791 // If we can reference via the stack pointer or base pointer, try that.
792 // FIXME: This (and the code that resolves the references) can be improved
793 // to only disallow SP relative references in the live range of
794 // the VLA(s). In practice, it's unclear how much difference that
795 // would make, but it may be worth doing.
796 if (isFrameOffsetLegal(MI, AArch64::SP, Offset))
797 return false;
798
799 // If even offset 0 is illegal, we don't want a virtual base register.
800 if (!isFrameOffsetLegal(MI, AArch64::SP, 0))
801 return false;
802
803 // The offset likely isn't legal; we want to allocate a virtual base register.
804 return true;
805}
806
808 Register BaseReg,
809 int64_t Offset) const {
810 assert(MI && "Unable to get the legal offset for nil instruction.");
813}
814
815/// Insert defining instruction(s) for BaseReg to be a pointer to FrameIdx
816/// at the beginning of the basic block.
819 int FrameIdx,
820 int64_t Offset) const {
822 DebugLoc DL; // Defaults to "unknown"
823 if (Ins != MBB->end())
824 DL = Ins->getDebugLoc();
825 const MachineFunction &MF = *MBB->getParent();
826 const AArch64InstrInfo *TII =
827 MF.getSubtarget<AArch64Subtarget>().getInstrInfo();
828 const MCInstrDesc &MCID = TII->get(AArch64::ADDXri);
830 Register BaseReg = MRI.createVirtualRegister(&AArch64::GPR64spRegClass);
831 MRI.constrainRegClass(BaseReg, TII->getRegClass(MCID, 0, this, MF));
832 unsigned Shifter = AArch64_AM::getShifterImm(AArch64_AM::LSL, 0);
833
834 BuildMI(*MBB, Ins, DL, MCID, BaseReg)
835 .addFrameIndex(FrameIdx)
836 .addImm(Offset)
837 .addImm(Shifter);
838
839 return BaseReg;
840}
841
843 int64_t Offset) const {
844 // ARM doesn't need the general 64-bit offsets
846
847 unsigned i = 0;
848 while (!MI.getOperand(i).isFI()) {
849 ++i;
850 assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!");
851 }
852
853 const MachineFunction *MF = MI.getParent()->getParent();
854 const AArch64InstrInfo *TII =
855 MF->getSubtarget<AArch64Subtarget>().getInstrInfo();
856 bool Done = rewriteAArch64FrameIndex(MI, i, BaseReg, Off, TII);
857 assert(Done && "Unable to resolve frame index!");
858 (void)Done;
859}
860
861// Create a scratch register for the frame index elimination in an instruction.
862// This function has special handling of stack tagging loop pseudos, in which
863// case it can also change the instruction opcode.
864static Register
866 const AArch64InstrInfo *TII) {
867 // ST*Gloop have a reserved scratch register in operand 1. Use it, and also
868 // replace the instruction with the writeback variant because it will now
869 // satisfy the operand constraints for it.
870 Register ScratchReg;
871 if (MI.getOpcode() == AArch64::STGloop ||
872 MI.getOpcode() == AArch64::STZGloop) {
873 assert(FIOperandNum == 3 &&
874 "Wrong frame index operand for STGloop/STZGloop");
875 unsigned Op = MI.getOpcode() == AArch64::STGloop ? AArch64::STGloop_wback
876 : AArch64::STZGloop_wback;
877 ScratchReg = MI.getOperand(1).getReg();
878 MI.getOperand(3).ChangeToRegister(ScratchReg, false, false, true);
879 MI.setDesc(TII->get(Op));
880 MI.tieOperands(1, 3);
881 } else {
882 ScratchReg =
883 MI.getMF()->getRegInfo().createVirtualRegister(&AArch64::GPR64RegClass);
884 MI.getOperand(FIOperandNum)
885 .ChangeToRegister(ScratchReg, false, false, true);
886 }
887 return ScratchReg;
888}
889
891 const StackOffset &Offset, SmallVectorImpl<uint64_t> &Ops) const {
892 // The smallest scalable element supported by scaled SVE addressing
893 // modes are predicates, which are 2 scalable bytes in size. So the scalable
894 // byte offset must always be a multiple of 2.
895 assert(Offset.getScalable() % 2 == 0 && "Invalid frame offset");
896
897 // Add fixed-sized offset using existing DIExpression interface.
898 DIExpression::appendOffset(Ops, Offset.getFixed());
899
900 unsigned VG = getDwarfRegNum(AArch64::VG, true);
901 int64_t VGSized = Offset.getScalable() / 2;
902 if (VGSized > 0) {
903 Ops.push_back(dwarf::DW_OP_constu);
904 Ops.push_back(VGSized);
905 Ops.append({dwarf::DW_OP_bregx, VG, 0ULL});
906 Ops.push_back(dwarf::DW_OP_mul);
907 Ops.push_back(dwarf::DW_OP_plus);
908 } else if (VGSized < 0) {
909 Ops.push_back(dwarf::DW_OP_constu);
910 Ops.push_back(-VGSized);
911 Ops.append({dwarf::DW_OP_bregx, VG, 0ULL});
912 Ops.push_back(dwarf::DW_OP_mul);
913 Ops.push_back(dwarf::DW_OP_minus);
914 }
915}
916
918 int SPAdj, unsigned FIOperandNum,
919 RegScavenger *RS) const {
920 assert(SPAdj == 0 && "Unexpected");
921
922 MachineInstr &MI = *II;
923 MachineBasicBlock &MBB = *MI.getParent();
925 const MachineFrameInfo &MFI = MF.getFrameInfo();
926 const AArch64InstrInfo *TII =
927 MF.getSubtarget<AArch64Subtarget>().getInstrInfo();
928 const AArch64FrameLowering *TFI = getFrameLowering(MF);
929 int FrameIndex = MI.getOperand(FIOperandNum).getIndex();
930 bool Tagged =
931 MI.getOperand(FIOperandNum).getTargetFlags() & AArch64II::MO_TAGGED;
932 Register FrameReg;
933
934 // Special handling of dbg_value, stackmap patchpoint statepoint instructions.
935 if (MI.getOpcode() == TargetOpcode::STACKMAP ||
936 MI.getOpcode() == TargetOpcode::PATCHPOINT ||
937 MI.getOpcode() == TargetOpcode::STATEPOINT) {
939 TFI->resolveFrameIndexReference(MF, FrameIndex, FrameReg,
940 /*PreferFP=*/true,
941 /*ForSimm=*/false);
942 Offset += StackOffset::getFixed(MI.getOperand(FIOperandNum + 1).getImm());
943 MI.getOperand(FIOperandNum).ChangeToRegister(FrameReg, false /*isDef*/);
944 MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset.getFixed());
945 return false;
946 }
947
948 if (MI.getOpcode() == TargetOpcode::LOCAL_ESCAPE) {
949 MachineOperand &FI = MI.getOperand(FIOperandNum);
951 assert(!Offset.getScalable() &&
952 "Frame offsets with a scalable component are not supported");
953 FI.ChangeToImmediate(Offset.getFixed());
954 return false;
955 }
956
958 if (MI.getOpcode() == AArch64::TAGPstack) {
959 // TAGPstack must use the virtual frame register in its 3rd operand.
961 FrameReg = MI.getOperand(3).getReg();
964 } else if (Tagged) {
966 MFI.getObjectOffset(FrameIndex) + (int64_t)MFI.getStackSize());
967 if (MFI.hasVarSizedObjects() ||
968 isAArch64FrameOffsetLegal(MI, SPOffset, nullptr, nullptr, nullptr) !=
970 // Can't update to SP + offset in place. Precalculate the tagged pointer
971 // in a scratch register.
973 MF, FrameIndex, FrameReg, /*PreferFP=*/false, /*ForSimm=*/true);
974 Register ScratchReg =
975 MF.getRegInfo().createVirtualRegister(&AArch64::GPR64RegClass);
976 emitFrameOffset(MBB, II, MI.getDebugLoc(), ScratchReg, FrameReg, Offset,
977 TII);
978 BuildMI(MBB, MI, MI.getDebugLoc(), TII->get(AArch64::LDG), ScratchReg)
979 .addReg(ScratchReg)
980 .addReg(ScratchReg)
981 .addImm(0);
982 MI.getOperand(FIOperandNum)
983 .ChangeToRegister(ScratchReg, false, false, true);
984 return false;
985 }
986 FrameReg = AArch64::SP;
988 (int64_t)MFI.getStackSize());
989 } else {
991 MF, FrameIndex, FrameReg, /*PreferFP=*/false, /*ForSimm=*/true);
992 }
993
994 // Modify MI as necessary to handle as much of 'Offset' as possible
995 if (rewriteAArch64FrameIndex(MI, FIOperandNum, FrameReg, Offset, TII))
996 return true;
997
998 assert((!RS || !RS->isScavengingFrameIndex(FrameIndex)) &&
999 "Emergency spill slot is out of reach");
1000
1001 // If we get here, the immediate doesn't fit into the instruction. We folded
1002 // as much as possible above. Handle the rest, providing a register that is
1003 // SP+LargeImm.
1004 Register ScratchReg =
1006 emitFrameOffset(MBB, II, MI.getDebugLoc(), ScratchReg, FrameReg, Offset, TII);
1007 return false;
1008}
1009
1011 MachineFunction &MF) const {
1012 const AArch64FrameLowering *TFI = getFrameLowering(MF);
1013
1014 switch (RC->getID()) {
1015 default:
1016 return 0;
1017 case AArch64::GPR32RegClassID:
1018 case AArch64::GPR32spRegClassID:
1019 case AArch64::GPR32allRegClassID:
1020 case AArch64::GPR64spRegClassID:
1021 case AArch64::GPR64allRegClassID:
1022 case AArch64::GPR64RegClassID:
1023 case AArch64::GPR32commonRegClassID:
1024 case AArch64::GPR64commonRegClassID:
1025 return 32 - 1 // XZR/SP
1026 - (TFI->hasFP(MF) || TT.isOSDarwin()) // FP
1028 - hasBasePointer(MF); // X19
1029 case AArch64::FPR8RegClassID:
1030 case AArch64::FPR16RegClassID:
1031 case AArch64::FPR32RegClassID:
1032 case AArch64::FPR64RegClassID:
1033 case AArch64::FPR128RegClassID:
1034 return 32;
1035
1036 case AArch64::MatrixIndexGPR32_8_11RegClassID:
1037 case AArch64::MatrixIndexGPR32_12_15RegClassID:
1038 return 4;
1039
1040 case AArch64::DDRegClassID:
1041 case AArch64::DDDRegClassID:
1042 case AArch64::DDDDRegClassID:
1043 case AArch64::QQRegClassID:
1044 case AArch64::QQQRegClassID:
1045 case AArch64::QQQQRegClassID:
1046 return 32;
1047
1048 case AArch64::FPR128_loRegClassID:
1049 case AArch64::FPR64_loRegClassID:
1050 case AArch64::FPR16_loRegClassID:
1051 return 16;
1052 case AArch64::FPR128_0to7RegClassID:
1053 return 8;
1054 }
1055}
1056
1058 const MachineFunction &MF) const {
1059 const auto &MFI = MF.getFrameInfo();
1060 if (!MF.hasEHFunclets() && !MFI.hasVarSizedObjects())
1061 return AArch64::SP;
1062 else if (hasStackRealignment(MF))
1063 return getBaseRegister();
1064 return getFrameRegister(MF);
1065}
1066
1067/// SrcRC and DstRC will be morphed into NewRC if this returns true
1069 MachineInstr *MI, const TargetRegisterClass *SrcRC, unsigned SubReg,
1070 const TargetRegisterClass *DstRC, unsigned DstSubReg,
1071 const TargetRegisterClass *NewRC, LiveIntervals &LIS) const {
1072 MachineRegisterInfo &MRI = MI->getMF()->getRegInfo();
1073
1074 if (MI->isCopy() &&
1075 ((DstRC->getID() == AArch64::GPR64RegClassID) ||
1076 (DstRC->getID() == AArch64::GPR64commonRegClassID)) &&
1077 MI->getOperand(0).getSubReg() && MI->getOperand(1).getSubReg())
1078 // Do not coalesce in the case of a 32-bit subregister copy
1079 // which implements a 32 to 64 bit zero extension
1080 // which relies on the upper 32 bits being zeroed.
1081 return false;
1082
1083 auto IsCoalescerBarrier = [](const MachineInstr &MI) {
1084 switch (MI.getOpcode()) {
1085 case AArch64::COALESCER_BARRIER_FPR16:
1086 case AArch64::COALESCER_BARRIER_FPR32:
1087 case AArch64::COALESCER_BARRIER_FPR64:
1088 case AArch64::COALESCER_BARRIER_FPR128:
1089 return true;
1090 default:
1091 return false;
1092 }
1093 };
1094
1095 // For calls that temporarily have to toggle streaming mode as part of the
1096 // call-sequence, we need to be more careful when coalescing copy instructions
1097 // so that we don't end up coalescing the NEON/FP result or argument register
1098 // with a whole Z-register, such that after coalescing the register allocator
1099 // will try to spill/reload the entire Z register.
1100 //
1101 // We do this by checking if the node has any defs/uses that are
1102 // COALESCER_BARRIER pseudos. These are 'nops' in practice, but they exist to
1103 // instruct the coalescer to avoid coalescing the copy.
1104 if (MI->isCopy() && SubReg != DstSubReg &&
1105 (AArch64::ZPRRegClass.hasSubClassEq(DstRC) ||
1106 AArch64::ZPRRegClass.hasSubClassEq(SrcRC))) {
1107 unsigned SrcReg = MI->getOperand(1).getReg();
1108 if (any_of(MRI.def_instructions(SrcReg), IsCoalescerBarrier))
1109 return false;
1110 unsigned DstReg = MI->getOperand(0).getReg();
1111 if (any_of(MRI.use_nodbg_instructions(DstReg), IsCoalescerBarrier))
1112 return false;
1113 }
1114
1115 return true;
1116}
1117
1119 MCRegister R) const {
1120 return R == AArch64::VG;
1121}
unsigned SubReg
unsigned const MachineRegisterInfo * MRI
static Register createScratchRegisterForInstruction(MachineInstr &MI, unsigned FIOperandNum, const AArch64InstrInfo *TII)
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file implements the BitVector class.
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This file contains constants used for implementing Dwarf debug support.
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
uint64_t IntrinsicInst * II
This file declares the machine register scavenger class.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static unsigned getDwarfRegNum(unsigned Reg, const TargetRegisterInfo *TRI)
Go up the super-register chain until we hit a valid dwarf register number.
Definition: StackMaps.cpp:195
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Definition: Value.cpp:469
StackOffset getNonLocalFrameIndexReference(const MachineFunction &MF, int FI) const override
getNonLocalFrameIndexReference - This method returns the offset used to reference a frame index locat...
bool hasFP(const MachineFunction &MF) const override
hasFP - Return true if the specified function should have a dedicated frame pointer register.
StackOffset resolveFrameIndexReference(const MachineFunction &MF, int FI, Register &FrameReg, bool PreferFP, bool ForSimm) const
AArch64FunctionInfo - This class is derived from MachineFunctionInfo and contains private AArch64-spe...
static const char * getRegisterName(MCRegister Reg, unsigned AltIdx=AArch64::NoRegAltName)
BitVector getStrictlyReservedRegs(const MachineFunction &MF) const
const TargetRegisterClass * getCrossCopyRegClass(const TargetRegisterClass *RC) const override
const uint32_t * getThisReturnPreservedMask(const MachineFunction &MF, CallingConv::ID) const
getThisReturnPreservedMask - Returns a call preserved mask specific to the case that 'returned' is on...
bool isReservedReg(const MachineFunction &MF, MCRegister Reg) const
const uint32_t * getCallPreservedMask(const MachineFunction &MF, CallingConv::ID) const override
const MCPhysReg * getCalleeSavedRegsViaCopy(const MachineFunction *MF) const
BitVector getReservedRegs(const MachineFunction &MF) const override
bool shouldCoalesce(MachineInstr *MI, const TargetRegisterClass *SrcRC, unsigned SubReg, const TargetRegisterClass *DstRC, unsigned DstSubReg, const TargetRegisterClass *NewRC, LiveIntervals &LIS) const override
SrcRC and DstRC will be morphed into NewRC if this returns true.
bool requiresVirtualBaseRegisters(const MachineFunction &MF) const override
const TargetRegisterClass * getSubClassWithSubReg(const TargetRegisterClass *RC, unsigned Idx) const override
unsigned getRegPressureLimit(const TargetRegisterClass *RC, MachineFunction &MF) const override
Register materializeFrameBaseRegister(MachineBasicBlock *MBB, int FrameIdx, int64_t Offset) const override
Insert defining instruction(s) for BaseReg to be a pointer to FrameIdx at the beginning of the basic ...
const TargetRegisterClass * getPointerRegClass(const MachineFunction &MF, unsigned Kind=0) const override
void UpdateCustomCalleeSavedRegs(MachineFunction &MF) const
bool requiresRegisterScavenging(const MachineFunction &MF) const override
bool isFrameOffsetLegal(const MachineInstr *MI, Register BaseReg, int64_t Offset) const override
void resolveFrameIndex(MachineInstr &MI, Register BaseReg, int64_t Offset) const override
bool needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const override
needsFrameBaseReg - Returns true if the instruction's frame index reference would be better served by...
const uint32_t * getWindowsStackProbePreservedMask() const
Stack probing calls preserve different CSRs to the normal CC.
AArch64RegisterInfo(const Triple &TT)
bool isAnyArgRegReserved(const MachineFunction &MF) const
void emitReservedArgRegCallError(const MachineFunction &MF) const
bool regNeedsCFI(unsigned Reg, unsigned &RegToUseForCFI) const
Return whether the register needs a CFI entry.
bool isStrictlyReservedReg(const MachineFunction &MF, MCRegister Reg) const
bool eliminateFrameIndex(MachineBasicBlock::iterator II, int SPAdj, unsigned FIOperandNum, RegScavenger *RS=nullptr) const override
const uint32_t * getTLSCallPreservedMask() const
const uint32_t * getNoPreservedMask() const override
Register getFrameRegister(const MachineFunction &MF) const override
bool shouldAnalyzePhysregInMachineLoopInfo(MCRegister R) const override
void getOffsetOpcodes(const StackOffset &Offset, SmallVectorImpl< uint64_t > &Ops) const override
const MCPhysReg * getDarwinCalleeSavedRegs(const MachineFunction *MF) const
bool isAsmClobberable(const MachineFunction &MF, MCRegister PhysReg) const override
const uint32_t * SMEABISupportRoutinesCallPreservedMaskFromX0() const
const MCPhysReg * getCalleeSavedRegs(const MachineFunction *MF) const override
Code Generation virtual methods...
const uint32_t * getCustomEHPadPreservedMask(const MachineFunction &MF) const override
unsigned getLocalAddressRegister(const MachineFunction &MF) const
bool hasBasePointer(const MachineFunction &MF) const
const uint32_t * getDarwinCallPreservedMask(const MachineFunction &MF, CallingConv::ID) const
const uint32_t * getSMStartStopCallPreservedMask() const
bool useFPForScavengingIndex(const MachineFunction &MF) const override
bool cannotEliminateFrame(const MachineFunction &MF) const
bool isArgumentRegister(const MachineFunction &MF, MCRegister Reg) const override
void UpdateCustomCallPreservedMask(MachineFunction &MF, const uint32_t **Mask) const
std::optional< std::string > explainReservedReg(const MachineFunction &MF, MCRegister PhysReg) const override
bool requiresFrameIndexScavenging(const MachineFunction &MF) const override
bool isLRReservedForRA() const
bool isXRegisterReservedForRA(size_t i) const
unsigned getNumXRegisterReserved() const
const AArch64TargetLowering * getTargetLowering() const override
bool isXRegCustomCalleeSaved(size_t i) const
bool isXRegisterReserved(size_t i) const
bool isCallingConvWin64(CallingConv::ID CC, bool IsVarArg) const
bool supportSwiftError() const override
Return true if the target supports swifterror attribute.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
bool hasAttrSomewhere(Attribute::AttrKind Kind, unsigned *Index=nullptr) const
Return true if the specified attribute is set for at least one parameter or for the return value.
static void appendOffset(SmallVectorImpl< uint64_t > &Ops, int64_t Offset)
Append Ops with operations to apply the Offset.
This class represents an Operation in the Expression.
A debug info location.
Definition: DebugLoc.h:33
Diagnostic information for unsupported feature in backend.
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition: Function.h:281
AttributeList getAttributes() const
Return the attribute list for this Function.
Definition: Function.h:357
bool isVarArg() const
isVarArg - Return true if this function takes a variable number of arguments.
Definition: Function.h:232
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition: Function.cpp:743
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:198
bool regsOverlap(MCRegister RegA, MCRegister RegB) const
Returns true if the two registers are equal or alias each other.
Wrapper class representing physical registers. Should be passed by value.
Definition: MCRegister.h:33
MCSubRegIterator enumerates all sub-registers of Reg.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
bool hasVarSizedObjects() const
This method may be called any time after instruction selection is complete to determine if the stack ...
uint64_t getStackSize() const
Return the number of bytes that must be allocated to hold all of the fixed size frame objects.
bool adjustsStack() const
Return true if this function adjusts the stack – e.g., when calling another function.
bool isFrameAddressTaken() const
This method may be called any time after instruction selection is complete to determine if there is a...
int64_t getLocalFrameSize() const
Get the size of the local object blob.
int64_t getObjectOffset(int ObjectIdx) const
Return the assigned stack offset of the specified object from the incoming stack pointer.
bool hasProperty(Property P) const
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
uint32_t * allocateRegMask()
Allocate and initialize a register mask with NumRegister bits.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
const LLVMTargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const MachineFunctionProperties & getProperties() const
Get the function properties.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
Representation of each machine instruction.
Definition: MachineInstr.h:69
MachineOperand class - Representation of each machine instruction operand.
void ChangeToImmediate(int64_t ImmVal, unsigned TargetFlags=0)
ChangeToImmediate - Replace this operand with a new immediate operand of the specified value.
static unsigned getRegMaskSize(unsigned NumRegs)
Returns number of elements needed for a regmask array.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
void setCalleeSavedRegs(ArrayRef< MCPhysReg > CSRs)
Sets the updated Callee Saved Registers list.
bool isScavengingFrameIndex(int FI) const
Query whether a frame index is a scavenging frame index.
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:586
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
Definition: SmallVector.h:696
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
StackOffset holds a fixed and a scalable offset in bytes.
Definition: TypeSize.h:33
int64_t getFixed() const
Returns the fixed component of the stack.
Definition: TypeSize.h:49
TargetOptions Options
bool DisableFramePointerElim(const MachineFunction &MF) const
DisableFramePointerElim - This returns true if frame pointer elimination optimization should be disab...
unsigned getID() const
Return the register class ID number.
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:44
bool isOSDarwin() const
Is this a "Darwin" OS (macOS, iOS, tvOS, watchOS, XROS, or DriverKit).
Definition: Triple.h:558
bool isOSBinFormatELF() const
Tests whether the OS uses the ELF binary format.
Definition: Triple.h:719
@ MO_TAGGED
MO_TAGGED - With MO_PAGE, indicates that the page includes a memory tag in bits 56-63.
static unsigned getShifterImm(AArch64_AM::ShiftExtendType ST, unsigned Imm)
getShifterImm - Encode the shift type and amount: imm: 6-bit shift amount shifter: 000 ==> lsl 001 ==...
void initLLVMToCVRegMapping(MCRegisterInfo *MRI)
@ AArch64_VectorCall
Used between AArch64 Advanced SIMD functions.
Definition: CallingConv.h:221
@ Swift
Calling convention for Swift.
Definition: CallingConv.h:69
@ AArch64_SVE_VectorCall
Used between AArch64 SVE functions.
Definition: CallingConv.h:224
@ CFGuard_Check
Special calling convention on Windows for calling the Control Guard Check ICall funtion.
Definition: CallingConv.h:82
@ PreserveMost
Used for runtime calls that preserves most registers.
Definition: CallingConv.h:63
@ AnyReg
OBSOLETED - Used for stack based JavaScript calls.
Definition: CallingConv.h:60
@ AArch64_SME_ABI_Support_Routines_PreserveMost_From_X2
Preserve X2-X15, X19-X29, SP, Z0-Z31, P0-P15.
Definition: CallingConv.h:241
@ CXX_FAST_TLS
Used for access functions.
Definition: CallingConv.h:72
@ AArch64_SME_ABI_Support_Routines_PreserveMost_From_X0
Preserve X0-X13, X19-X29, SP, Z0-Z31, P0-P15.
Definition: CallingConv.h:238
@ GHC
Used by the Glasgow Haskell Compiler (GHC).
Definition: CallingConv.h:50
@ AArch64_SME_ABI_Support_Routines_PreserveMost_From_X1
Preserve X1-X15, X19-X29, SP, Z0-Z31, P0-P15.
Definition: CallingConv.h:271
@ PreserveAll
Used for runtime calls that preserves (almost) all registers.
Definition: CallingConv.h:66
@ Fast
Attempts to make calls as fast as possible (e.g.
Definition: CallingConv.h:41
@ PreserveNone
Used for runtime calls that preserves none general registers.
Definition: CallingConv.h:90
@ Tail
Attemps to make calls as fast as possible while guaranteeing that tail call optimization can always b...
Definition: CallingConv.h:76
@ Win64
The C convention as implemented on Windows/x86-64 and AArch64.
Definition: CallingConv.h:159
@ SwiftTail
This follows the Swift calling convention in how arguments are passed but guarantees tail calls will ...
Definition: CallingConv.h:87
@ GRAAL
Used by GraalVM. Two additional registers are reserved.
Definition: CallingConv.h:255
@ ARM64EC_Thunk_X64
Calling convention used in the ARM64EC ABI to implement calls between x64 code and thunks.
Definition: CallingConv.h:260
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:480
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
int isAArch64FrameOffsetLegal(const MachineInstr &MI, StackOffset &Offset, bool *OutUseUnscaledOp=nullptr, unsigned *OutUnscaledOp=nullptr, int64_t *EmittableOffset=nullptr)
Check if the Offset is a valid frame offset for MI.
@ Done
Definition: Threading.h:61
@ AArch64FrameOffsetIsLegal
Offset is legal.
@ AArch64FrameOffsetCanUpdate
Offset can apply, at least partly.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1729
void emitFrameOffset(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, unsigned DestReg, unsigned SrcReg, StackOffset Offset, const TargetInstrInfo *TII, MachineInstr::MIFlag=MachineInstr::NoFlags, bool SetNZCV=false, bool NeedsWinCFI=false, bool *HasWinCFI=nullptr, bool EmitCFAOffset=false, StackOffset InitialOffset={}, unsigned FrameReg=AArch64::SP)
emitFrameOffset - Emit instructions as needed to set DestReg to SrcReg plus Offset.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:167
bool rewriteAArch64FrameIndex(MachineInstr &MI, unsigned FrameRegIdx, unsigned FrameReg, StackOffset &Offset, const AArch64InstrInfo *TII)
rewriteAArch64FrameIndex - Rewrite MI to access 'Offset' bytes from the FP.
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition: STLExtras.h:1886