LLVM 17.0.0git
AArch64RegisterInfo.cpp
Go to the documentation of this file.
1//===- AArch64RegisterInfo.cpp - AArch64 Register Information -------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the AArch64 implementation of the TargetRegisterInfo
10// class.
11//
12//===----------------------------------------------------------------------===//
13
14#include "AArch64RegisterInfo.h"
16#include "AArch64InstrInfo.h"
18#include "AArch64Subtarget.h"
21#include "llvm/ADT/BitVector.h"
30#include "llvm/IR/Function.h"
34
35using namespace llvm;
36
37#define GET_CC_REGISTER_LISTS
38#include "AArch64GenCallingConv.inc"
39#define GET_REGINFO_TARGET_DESC
40#include "AArch64GenRegisterInfo.inc"
41
43 : AArch64GenRegisterInfo(AArch64::LR), TT(TT) {
45}
46
47/// Return whether the register needs a CFI entry. Not all unwinders may know
48/// about SVE registers, so we assume the lowest common denominator, i.e. the
49/// callee-saves required by the base ABI. For the SVE registers z8-z15 only the
50/// lower 64-bits (d8-d15) need to be saved. The lower 64-bits subreg is
51/// returned in \p RegToUseForCFI.
53 unsigned &RegToUseForCFI) const {
54 if (AArch64::PPRRegClass.contains(Reg))
55 return false;
56
57 if (AArch64::ZPRRegClass.contains(Reg)) {
58 RegToUseForCFI = getSubReg(Reg, AArch64::dsub);
59 for (int I = 0; CSR_AArch64_AAPCS_SaveList[I]; ++I) {
60 if (CSR_AArch64_AAPCS_SaveList[I] == RegToUseForCFI)
61 return true;
62 }
63 return false;
64 }
65
66 RegToUseForCFI = Reg;
67 return true;
68}
69
70const MCPhysReg *
72 assert(MF && "Invalid MachineFunction pointer.");
73
75 // GHC set of callee saved regs is empty as all those regs are
76 // used for passing STG regs around
77 return CSR_AArch64_NoRegs_SaveList;
79 return CSR_AArch64_AllRegs_SaveList;
80
81 // Darwin has its own CSR_AArch64_AAPCS_SaveList, which means most CSR save
82 // lists depending on that will need to have their Darwin variant as well.
84 return getDarwinCalleeSavedRegs(MF);
85
87 return CSR_Win_AArch64_CFGuard_Check_SaveList;
89 return CSR_Win_AArch64_AAPCS_SaveList;
91 return CSR_AArch64_AAVPCS_SaveList;
93 return CSR_AArch64_SVE_AAPCS_SaveList;
94 if (MF->getFunction().getCallingConv() ==
97 "Calling convention AArch64_SME_ABI_Support_Routines_PreserveMost_From_X0 is "
98 "only supported to improve calls to SME ACLE save/restore/disable-za "
99 "functions, and is not intended to be used beyond that scope.");
100 if (MF->getFunction().getCallingConv() ==
103 "Calling convention AArch64_SME_ABI_Support_Routines_PreserveMost_From_X2 is "
104 "only supported to improve calls to SME ACLE __arm_sme_state "
105 "and is not intended to be used beyond that scope.");
107 ->supportSwiftError() &&
109 Attribute::SwiftError))
110 return CSR_AArch64_AAPCS_SwiftError_SaveList;
112 return CSR_AArch64_AAPCS_SwiftTail_SaveList;
114 return CSR_AArch64_RT_MostRegs_SaveList;
116 // This is for OSes other than Windows; Windows is a separate case further
117 // above.
118 return CSR_AArch64_AAPCS_X18_SaveList;
119 if (MF->getInfo<AArch64FunctionInfo>()->isSVECC())
120 return CSR_AArch64_SVE_AAPCS_SaveList;
121 return CSR_AArch64_AAPCS_SaveList;
122}
123
124const MCPhysReg *
126 assert(MF && "Invalid MachineFunction pointer.");
128 "Invalid subtarget for getDarwinCalleeSavedRegs");
129
132 "Calling convention CFGuard_Check is unsupported on Darwin.");
134 return CSR_Darwin_AArch64_AAVPCS_SaveList;
137 "Calling convention SVE_VectorCall is unsupported on Darwin.");
138 if (MF->getFunction().getCallingConv() ==
141 "Calling convention AArch64_SME_ABI_Support_Routines_PreserveMost_From_X0 is "
142 "only supported to improve calls to SME ACLE save/restore/disable-za "
143 "functions, and is not intended to be used beyond that scope.");
144 if (MF->getFunction().getCallingConv() ==
147 "Calling convention AArch64_SME_ABI_Support_Routines_PreserveMost_From_X2 is "
148 "only supported to improve calls to SME ACLE __arm_sme_state "
149 "and is not intended to be used beyond that scope.");
151 return MF->getInfo<AArch64FunctionInfo>()->isSplitCSR()
152 ? CSR_Darwin_AArch64_CXX_TLS_PE_SaveList
153 : CSR_Darwin_AArch64_CXX_TLS_SaveList;
155 ->supportSwiftError() &&
157 Attribute::SwiftError))
158 return CSR_Darwin_AArch64_AAPCS_SwiftError_SaveList;
160 return CSR_Darwin_AArch64_AAPCS_SwiftTail_SaveList;
162 return CSR_Darwin_AArch64_RT_MostRegs_SaveList;
164 return CSR_Darwin_AArch64_AAPCS_Win64_SaveList;
165 return CSR_Darwin_AArch64_AAPCS_SaveList;
166}
167
169 const MachineFunction *MF) const {
170 assert(MF && "Invalid MachineFunction pointer.");
173 return CSR_Darwin_AArch64_CXX_TLS_ViaCopy_SaveList;
174 return nullptr;
175}
176
178 MachineFunction &MF) const {
179 const MCPhysReg *CSRs = getCalleeSavedRegs(&MF);
180 SmallVector<MCPhysReg, 32> UpdatedCSRs;
181 for (const MCPhysReg *I = CSRs; *I; ++I)
182 UpdatedCSRs.push_back(*I);
183
184 for (size_t i = 0; i < AArch64::GPR64commonRegClass.getNumRegs(); ++i) {
186 UpdatedCSRs.push_back(AArch64::GPR64commonRegClass.getRegister(i));
187 }
188 }
189 // Register lists are zero-terminated.
190 UpdatedCSRs.push_back(0);
191 MF.getRegInfo().setCalleeSavedRegs(UpdatedCSRs);
192}
193
196 unsigned Idx) const {
197 // edge case for GPR/FPR register classes
198 if (RC == &AArch64::GPR32allRegClass && Idx == AArch64::hsub)
199 return &AArch64::FPR32RegClass;
200 else if (RC == &AArch64::GPR64allRegClass && Idx == AArch64::hsub)
201 return &AArch64::FPR64RegClass;
202
203 // Forward to TableGen's default version.
204 return AArch64GenRegisterInfo::getSubClassWithSubReg(RC, Idx);
205}
206
207const uint32_t *
209 CallingConv::ID CC) const {
211 "Invalid subtarget for getDarwinCallPreservedMask");
212
214 return CSR_Darwin_AArch64_CXX_TLS_RegMask;
216 return CSR_Darwin_AArch64_AAVPCS_RegMask;
219 "Calling convention SVE_VectorCall is unsupported on Darwin.");
222 "Calling convention AArch64_SME_ABI_Support_Routines_PreserveMost_From_X0 is "
223 "unsupported on Darwin.");
226 "Calling convention AArch64_SME_ABI_Support_Routines_PreserveMost_From_X2 is "
227 "unsupported on Darwin.");
230 "Calling convention CFGuard_Check is unsupported on Darwin.");
233 ->supportSwiftError() &&
234 MF.getFunction().getAttributes().hasAttrSomewhere(Attribute::SwiftError))
235 return CSR_Darwin_AArch64_AAPCS_SwiftError_RegMask;
237 return CSR_Darwin_AArch64_AAPCS_SwiftTail_RegMask;
239 return CSR_Darwin_AArch64_RT_MostRegs_RegMask;
240 return CSR_Darwin_AArch64_AAPCS_RegMask;
241}
242
243const uint32_t *
245 CallingConv::ID CC) const {
246 bool SCS = MF.getFunction().hasFnAttribute(Attribute::ShadowCallStack);
247 if (CC == CallingConv::GHC)
248 // This is academic because all GHC calls are (supposed to be) tail calls
249 return SCS ? CSR_AArch64_NoRegs_SCS_RegMask : CSR_AArch64_NoRegs_RegMask;
250 if (CC == CallingConv::AnyReg)
251 return SCS ? CSR_AArch64_AllRegs_SCS_RegMask : CSR_AArch64_AllRegs_RegMask;
252
253 // All the following calling conventions are handled differently on Darwin.
255 if (SCS)
256 report_fatal_error("ShadowCallStack attribute not supported on Darwin.");
257 return getDarwinCallPreservedMask(MF, CC);
258 }
259
261 return SCS ? CSR_AArch64_AAVPCS_SCS_RegMask : CSR_AArch64_AAVPCS_RegMask;
263 return SCS ? CSR_AArch64_SVE_AAPCS_SCS_RegMask
264 : CSR_AArch64_SVE_AAPCS_RegMask;
266 return CSR_AArch64_SME_ABI_Support_Routines_PreserveMost_From_X0_RegMask;
268 return CSR_AArch64_SME_ABI_Support_Routines_PreserveMost_From_X2_RegMask;
270 return CSR_Win_AArch64_CFGuard_Check_RegMask;
272 ->supportSwiftError() &&
273 MF.getFunction().getAttributes().hasAttrSomewhere(Attribute::SwiftError))
274 return SCS ? CSR_AArch64_AAPCS_SwiftError_SCS_RegMask
275 : CSR_AArch64_AAPCS_SwiftError_RegMask;
276 if (CC == CallingConv::SwiftTail) {
277 if (SCS)
278 report_fatal_error("ShadowCallStack attribute not supported with swifttail");
279 return CSR_AArch64_AAPCS_SwiftTail_RegMask;
280 }
282 return SCS ? CSR_AArch64_RT_MostRegs_SCS_RegMask
283 : CSR_AArch64_RT_MostRegs_RegMask;
284 else
285 return SCS ? CSR_AArch64_AAPCS_SCS_RegMask : CSR_AArch64_AAPCS_RegMask;
286}
287
289 const MachineFunction &MF) const {
291 return CSR_AArch64_AAPCS_RegMask;
292
293 return nullptr;
294}
295
297 if (TT.isOSDarwin())
298 return CSR_Darwin_AArch64_TLS_RegMask;
299
300 assert(TT.isOSBinFormatELF() && "Invalid target");
301 return CSR_AArch64_TLS_ELF_RegMask;
302}
303
305 const uint32_t **Mask) const {
306 uint32_t *UpdatedMask = MF.allocateRegMask();
307 unsigned RegMaskSize = MachineOperand::getRegMaskSize(getNumRegs());
308 memcpy(UpdatedMask, *Mask, sizeof(UpdatedMask[0]) * RegMaskSize);
309
310 for (size_t i = 0; i < AArch64::GPR64commonRegClass.getNumRegs(); ++i) {
312 for (MCSubRegIterator SubReg(AArch64::GPR64commonRegClass.getRegister(i),
313 this, true);
314 SubReg.isValid(); ++SubReg) {
315 // See TargetRegisterInfo::getCallPreservedMask for how to interpret the
316 // register mask.
317 UpdatedMask[*SubReg / 32] |= 1u << (*SubReg % 32);
318 }
319 }
320 }
321 *Mask = UpdatedMask;
322}
323
325 return CSR_AArch64_SMStartStop_RegMask;
326}
327
328const uint32_t *
330 return CSR_AArch64_SME_ABI_Support_Routines_PreserveMost_From_X0_RegMask;
331}
332
334 return CSR_AArch64_NoRegs_RegMask;
335}
336
337const uint32_t *
339 CallingConv::ID CC) const {
340 // This should return a register mask that is the same as that returned by
341 // getCallPreservedMask but that additionally preserves the register used for
342 // the first i64 argument (which must also be the register used to return a
343 // single i64 return value)
344 //
345 // In case that the calling convention does not use the same register for
346 // both, the function should return NULL (does not currently apply)
347 assert(CC != CallingConv::GHC && "should not be GHC calling convention.");
349 return CSR_Darwin_AArch64_AAPCS_ThisReturn_RegMask;
350 return CSR_AArch64_AAPCS_ThisReturn_RegMask;
351}
352
354 return CSR_AArch64_StackProbe_Windows_RegMask;
355}
356
357std::optional<std::string>
359 MCRegister PhysReg) const {
360 if (hasBasePointer(MF) && MCRegisterInfo::regsOverlap(PhysReg, AArch64::X19))
361 return std::string("X19 is used as the frame base pointer register.");
362
364 bool warn = false;
365 if (MCRegisterInfo::regsOverlap(PhysReg, AArch64::X13) ||
366 MCRegisterInfo::regsOverlap(PhysReg, AArch64::X14) ||
367 MCRegisterInfo::regsOverlap(PhysReg, AArch64::X23) ||
368 MCRegisterInfo::regsOverlap(PhysReg, AArch64::X24) ||
369 MCRegisterInfo::regsOverlap(PhysReg, AArch64::X28))
370 warn = true;
371
372 for (unsigned i = AArch64::B16; i <= AArch64::B31; ++i)
373 if (MCRegisterInfo::regsOverlap(PhysReg, i))
374 warn = true;
375
376 if (warn)
377 return std::string(AArch64InstPrinter::getRegisterName(PhysReg)) +
378 " is clobbered by asynchronous signals when using Arm64EC.";
379 }
380
381 return {};
382}
383
386 const AArch64FrameLowering *TFI = getFrameLowering(MF);
387
388 // FIXME: avoid re-calculating this every time.
389 BitVector Reserved(getNumRegs());
390 markSuperRegs(Reserved, AArch64::WSP);
391 markSuperRegs(Reserved, AArch64::WZR);
392
393 if (TFI->hasFP(MF) || TT.isOSDarwin())
394 markSuperRegs(Reserved, AArch64::W29);
395
397 // x13, x14, x23, x24, x28, and v16-v31 are clobbered by asynchronous
398 // signals, so we can't ever use them.
399 markSuperRegs(Reserved, AArch64::W13);
400 markSuperRegs(Reserved, AArch64::W14);
401 markSuperRegs(Reserved, AArch64::W23);
402 markSuperRegs(Reserved, AArch64::W24);
403 markSuperRegs(Reserved, AArch64::W28);
404 for (unsigned i = AArch64::B16; i <= AArch64::B31; ++i)
405 markSuperRegs(Reserved, i);
406 }
407
408 for (size_t i = 0; i < AArch64::GPR32commonRegClass.getNumRegs(); ++i) {
410 markSuperRegs(Reserved, AArch64::GPR32commonRegClass.getRegister(i));
411 }
412
413 if (hasBasePointer(MF))
414 markSuperRegs(Reserved, AArch64::W19);
415
416 // SLH uses register W16/X16 as the taint register.
417 if (MF.getFunction().hasFnAttribute(Attribute::SpeculativeLoadHardening))
418 markSuperRegs(Reserved, AArch64::W16);
419
420 // SME tiles are not allocatable.
421 if (MF.getSubtarget<AArch64Subtarget>().hasSME()) {
422 for (MCSubRegIterator SubReg(AArch64::ZA, this, /*self=*/true);
423 SubReg.isValid(); ++SubReg)
424 Reserved.set(*SubReg);
425 }
426
427 markSuperRegs(Reserved, AArch64::FPCR);
428
429 assert(checkAllSuperRegsMarked(Reserved));
430 return Reserved;
431}
432
436
437 for (size_t i = 0; i < AArch64::GPR32commonRegClass.getNumRegs(); ++i) {
439 markSuperRegs(Reserved, AArch64::GPR32commonRegClass.getRegister(i));
440 }
441
442 assert(checkAllSuperRegsMarked(Reserved));
443 return Reserved;
444}
445
447 MCRegister Reg) const {
448 return getReservedRegs(MF)[Reg];
449}
450
452 MCRegister Reg) const {
453 return getStrictlyReservedRegs(MF)[Reg];
454}
455
457 return llvm::any_of(*AArch64::GPR64argRegClass.MC, [this, &MF](MCPhysReg r) {
458 return isStrictlyReservedReg(MF, r);
459 });
460}
461
463 const MachineFunction &MF) const {
464 const Function &F = MF.getFunction();
465 F.getContext().diagnose(DiagnosticInfoUnsupported{F, ("AArch64 doesn't support"
466 " function calls if any of the argument registers is reserved.")});
467}
468
470 MCRegister PhysReg) const {
471 // SLH uses register X16 as the taint register but it will fallback to a different
472 // method if the user clobbers it. So X16 is not reserved for inline asm but is
473 // for normal codegen.
474 if (MF.getFunction().hasFnAttribute(Attribute::SpeculativeLoadHardening) &&
475 MCRegisterInfo::regsOverlap(PhysReg, AArch64::X16))
476 return true;
477
478 return !isReservedReg(MF, PhysReg);
479}
480
483 unsigned Kind) const {
484 return &AArch64::GPR64spRegClass;
485}
486
489 if (RC == &AArch64::CCRRegClass)
490 return &AArch64::GPR64RegClass; // Only MSR & MRS copy NZCV.
491 return RC;
492}
493
494unsigned AArch64RegisterInfo::getBaseRegister() const { return AArch64::X19; }
495
497 const MachineFrameInfo &MFI = MF.getFrameInfo();
498
499 // In the presence of variable sized objects or funclets, if the fixed stack
500 // size is large enough that referencing from the FP won't result in things
501 // being in range relatively often, we can use a base pointer to allow access
502 // from the other direction like the SP normally works.
503 //
504 // Furthermore, if both variable sized objects are present, and the
505 // stack needs to be dynamically re-aligned, the base pointer is the only
506 // reliable way to reference the locals.
507 if (MFI.hasVarSizedObjects() || MF.hasEHFunclets()) {
508 if (hasStackRealignment(MF))
509 return true;
510
511 if (MF.getSubtarget<AArch64Subtarget>().hasSVE()) {
513 // Frames that have variable sized objects and scalable SVE objects,
514 // should always use a basepointer.
515 if (!AFI->hasCalculatedStackSizeSVE() || AFI->getStackSizeSVE())
516 return true;
517 }
518
519 // Conservatively estimate whether the negative offset from the frame
520 // pointer will be sufficient to reach. If a function has a smallish
521 // frame, it's less likely to have lots of spills and callee saved
522 // space, so it's all more likely to be within range of the frame pointer.
523 // If it's wrong, we'll materialize the constant and still get to the
524 // object; it's just suboptimal. Negative offsets use the unscaled
525 // load/store instructions, which have a 9-bit signed immediate.
526 return MFI.getLocalFrameSize() >= 256;
527 }
528
529 return false;
530}
531
533 MCRegister Reg) const {
536 bool IsVarArg = STI.isCallingConvWin64(MF.getFunction().getCallingConv());
537
538 auto HasReg = [](ArrayRef<MCRegister> RegList, MCRegister Reg) {
539 return llvm::is_contained(RegList, Reg);
540 };
541
542 switch (CC) {
543 default:
544 report_fatal_error("Unsupported calling convention.");
546 return HasReg(CC_AArch64_WebKit_JS_ArgRegs, Reg);
547 case CallingConv::GHC:
548 return HasReg(CC_AArch64_GHC_ArgRegs, Reg);
549 case CallingConv::C:
556 if (STI.isTargetWindows() && IsVarArg)
557 return HasReg(CC_AArch64_Win64_VarArg_ArgRegs, Reg);
558 if (!STI.isTargetDarwin()) {
559 switch (CC) {
560 default:
561 return HasReg(CC_AArch64_AAPCS_ArgRegs, Reg);
564 return HasReg(CC_AArch64_AAPCS_ArgRegs, Reg) ||
565 HasReg(CC_AArch64_AAPCS_Swift_ArgRegs, Reg);
566 }
567 }
568 if (!IsVarArg) {
569 switch (CC) {
570 default:
571 return HasReg(CC_AArch64_DarwinPCS_ArgRegs, Reg);
574 return HasReg(CC_AArch64_DarwinPCS_ArgRegs, Reg) ||
575 HasReg(CC_AArch64_DarwinPCS_Swift_ArgRegs, Reg);
576 }
577 }
578 if (STI.isTargetILP32())
579 return HasReg(CC_AArch64_DarwinPCS_ILP32_VarArg_ArgRegs, Reg);
580 return HasReg(CC_AArch64_DarwinPCS_VarArg_ArgRegs, Reg);
582 if (IsVarArg)
583 HasReg(CC_AArch64_Win64_VarArg_ArgRegs, Reg);
584 return HasReg(CC_AArch64_AAPCS_ArgRegs, Reg);
586 return HasReg(CC_AArch64_Win64_CFGuard_Check_ArgRegs, Reg);
591 return HasReg(CC_AArch64_AAPCS_ArgRegs, Reg);
592 }
593}
594
597 const AArch64FrameLowering *TFI = getFrameLowering(MF);
598 return TFI->hasFP(MF) ? AArch64::FP : AArch64::SP;
599}
600
602 const MachineFunction &MF) const {
603 return true;
604}
605
607 const MachineFunction &MF) const {
608 return true;
609}
610
611bool
613 // This function indicates whether the emergency spillslot should be placed
614 // close to the beginning of the stackframe (closer to FP) or the end
615 // (closer to SP).
616 //
617 // The beginning works most reliably if we have a frame pointer.
618 // In the presence of any non-constant space between FP and locals,
619 // (e.g. in case of stack realignment or a scalable SVE area), it is
620 // better to use SP or BP.
621 const AArch64FrameLowering &TFI = *getFrameLowering(MF);
623 assert((!MF.getSubtarget<AArch64Subtarget>().hasSVE() ||
625 "Expected SVE area to be calculated by this point");
626 return TFI.hasFP(MF) && !hasStackRealignment(MF) && !AFI->getStackSizeSVE();
627}
628
630 const MachineFunction &MF) const {
631 return true;
632}
633
634bool
636 const MachineFrameInfo &MFI = MF.getFrameInfo();
638 return true;
639 return MFI.hasVarSizedObjects() || MFI.isFrameAddressTaken();
640}
641
642/// needsFrameBaseReg - Returns true if the instruction's frame index
643/// reference would be better served by a base register other than FP
644/// or SP. Used by LocalStackFrameAllocation to determine which frame index
645/// references it should create new base registers for.
647 int64_t Offset) const {
648 for (unsigned i = 0; !MI->getOperand(i).isFI(); ++i)
649 assert(i < MI->getNumOperands() &&
650 "Instr doesn't have FrameIndex operand!");
651
652 // It's the load/store FI references that cause issues, as it can be difficult
653 // to materialize the offset if it won't fit in the literal field. Estimate
654 // based on the size of the local frame and some conservative assumptions
655 // about the rest of the stack frame (note, this is pre-regalloc, so
656 // we don't know everything for certain yet) whether this offset is likely
657 // to be out of range of the immediate. Return true if so.
658
659 // We only generate virtual base registers for loads and stores, so
660 // return false for everything else.
661 if (!MI->mayLoad() && !MI->mayStore())
662 return false;
663
664 // Without a virtual base register, if the function has variable sized
665 // objects, all fixed-size local references will be via the frame pointer,
666 // Approximate the offset and see if it's legal for the instruction.
667 // Note that the incoming offset is based on the SP value at function entry,
668 // so it'll be negative.
669 MachineFunction &MF = *MI->getParent()->getParent();
670 const AArch64FrameLowering *TFI = getFrameLowering(MF);
671 MachineFrameInfo &MFI = MF.getFrameInfo();
672
673 // Estimate an offset from the frame pointer.
674 // Conservatively assume all GPR callee-saved registers get pushed.
675 // FP, LR, X19-X28, D8-D15. 64-bits each.
676 int64_t FPOffset = Offset - 16 * 20;
677 // Estimate an offset from the stack pointer.
678 // The incoming offset is relating to the SP at the start of the function,
679 // but when we access the local it'll be relative to the SP after local
680 // allocation, so adjust our SP-relative offset by that allocation size.
681 Offset += MFI.getLocalFrameSize();
682 // Assume that we'll have at least some spill slots allocated.
683 // FIXME: This is a total SWAG number. We should run some statistics
684 // and pick a real one.
685 Offset += 128; // 128 bytes of spill slots
686
687 // If there is a frame pointer, try using it.
688 // The FP is only available if there is no dynamic realignment. We
689 // don't know for sure yet whether we'll need that, so we guess based
690 // on whether there are any local variables that would trigger it.
691 if (TFI->hasFP(MF) && isFrameOffsetLegal(MI, AArch64::FP, FPOffset))
692 return false;
693
694 // If we can reference via the stack pointer or base pointer, try that.
695 // FIXME: This (and the code that resolves the references) can be improved
696 // to only disallow SP relative references in the live range of
697 // the VLA(s). In practice, it's unclear how much difference that
698 // would make, but it may be worth doing.
699 if (isFrameOffsetLegal(MI, AArch64::SP, Offset))
700 return false;
701
702 // If even offset 0 is illegal, we don't want a virtual base register.
703 if (!isFrameOffsetLegal(MI, AArch64::SP, 0))
704 return false;
705
706 // The offset likely isn't legal; we want to allocate a virtual base register.
707 return true;
708}
709
711 Register BaseReg,
712 int64_t Offset) const {
713 assert(MI && "Unable to get the legal offset for nil instruction.");
716}
717
718/// Insert defining instruction(s) for BaseReg to be a pointer to FrameIdx
719/// at the beginning of the basic block.
722 int FrameIdx,
723 int64_t Offset) const {
725 DebugLoc DL; // Defaults to "unknown"
726 if (Ins != MBB->end())
727 DL = Ins->getDebugLoc();
728 const MachineFunction &MF = *MBB->getParent();
729 const AArch64InstrInfo *TII =
730 MF.getSubtarget<AArch64Subtarget>().getInstrInfo();
731 const MCInstrDesc &MCID = TII->get(AArch64::ADDXri);
733 Register BaseReg = MRI.createVirtualRegister(&AArch64::GPR64spRegClass);
734 MRI.constrainRegClass(BaseReg, TII->getRegClass(MCID, 0, this, MF));
735 unsigned Shifter = AArch64_AM::getShifterImm(AArch64_AM::LSL, 0);
736
737 BuildMI(*MBB, Ins, DL, MCID, BaseReg)
738 .addFrameIndex(FrameIdx)
739 .addImm(Offset)
740 .addImm(Shifter);
741
742 return BaseReg;
743}
744
746 int64_t Offset) const {
747 // ARM doesn't need the general 64-bit offsets
749
750 unsigned i = 0;
751 while (!MI.getOperand(i).isFI()) {
752 ++i;
753 assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!");
754 }
755
756 const MachineFunction *MF = MI.getParent()->getParent();
757 const AArch64InstrInfo *TII =
758 MF->getSubtarget<AArch64Subtarget>().getInstrInfo();
759 bool Done = rewriteAArch64FrameIndex(MI, i, BaseReg, Off, TII);
760 assert(Done && "Unable to resolve frame index!");
761 (void)Done;
762}
763
764// Create a scratch register for the frame index elimination in an instruction.
765// This function has special handling of stack tagging loop pseudos, in which
766// case it can also change the instruction opcode.
767static Register
769 const AArch64InstrInfo *TII) {
770 // ST*Gloop have a reserved scratch register in operand 1. Use it, and also
771 // replace the instruction with the writeback variant because it will now
772 // satisfy the operand constraints for it.
773 Register ScratchReg;
774 if (MI.getOpcode() == AArch64::STGloop ||
775 MI.getOpcode() == AArch64::STZGloop) {
776 assert(FIOperandNum == 3 &&
777 "Wrong frame index operand for STGloop/STZGloop");
778 unsigned Op = MI.getOpcode() == AArch64::STGloop ? AArch64::STGloop_wback
779 : AArch64::STZGloop_wback;
780 ScratchReg = MI.getOperand(1).getReg();
781 MI.getOperand(3).ChangeToRegister(ScratchReg, false, false, true);
782 MI.setDesc(TII->get(Op));
783 MI.tieOperands(1, 3);
784 } else {
785 ScratchReg =
786 MI.getMF()->getRegInfo().createVirtualRegister(&AArch64::GPR64RegClass);
787 MI.getOperand(FIOperandNum)
788 .ChangeToRegister(ScratchReg, false, false, true);
789 }
790 return ScratchReg;
791}
792
794 const StackOffset &Offset, SmallVectorImpl<uint64_t> &Ops) const {
795 // The smallest scalable element supported by scaled SVE addressing
796 // modes are predicates, which are 2 scalable bytes in size. So the scalable
797 // byte offset must always be a multiple of 2.
798 assert(Offset.getScalable() % 2 == 0 && "Invalid frame offset");
799
800 // Add fixed-sized offset using existing DIExpression interface.
801 DIExpression::appendOffset(Ops, Offset.getFixed());
802
803 unsigned VG = getDwarfRegNum(AArch64::VG, true);
804 int64_t VGSized = Offset.getScalable() / 2;
805 if (VGSized > 0) {
806 Ops.push_back(dwarf::DW_OP_constu);
807 Ops.push_back(VGSized);
808 Ops.append({dwarf::DW_OP_bregx, VG, 0ULL});
809 Ops.push_back(dwarf::DW_OP_mul);
810 Ops.push_back(dwarf::DW_OP_plus);
811 } else if (VGSized < 0) {
812 Ops.push_back(dwarf::DW_OP_constu);
813 Ops.push_back(-VGSized);
814 Ops.append({dwarf::DW_OP_bregx, VG, 0ULL});
815 Ops.push_back(dwarf::DW_OP_mul);
816 Ops.push_back(dwarf::DW_OP_minus);
817 }
818}
819
821 int SPAdj, unsigned FIOperandNum,
822 RegScavenger *RS) const {
823 assert(SPAdj == 0 && "Unexpected");
824
825 MachineInstr &MI = *II;
826 MachineBasicBlock &MBB = *MI.getParent();
828 const MachineFrameInfo &MFI = MF.getFrameInfo();
829 const AArch64InstrInfo *TII =
830 MF.getSubtarget<AArch64Subtarget>().getInstrInfo();
831 const AArch64FrameLowering *TFI = getFrameLowering(MF);
832 int FrameIndex = MI.getOperand(FIOperandNum).getIndex();
833 bool Tagged =
834 MI.getOperand(FIOperandNum).getTargetFlags() & AArch64II::MO_TAGGED;
835 Register FrameReg;
836
837 // Special handling of dbg_value, stackmap patchpoint statepoint instructions.
838 if (MI.getOpcode() == TargetOpcode::STACKMAP ||
839 MI.getOpcode() == TargetOpcode::PATCHPOINT ||
840 MI.getOpcode() == TargetOpcode::STATEPOINT) {
842 TFI->resolveFrameIndexReference(MF, FrameIndex, FrameReg,
843 /*PreferFP=*/true,
844 /*ForSimm=*/false);
845 Offset += StackOffset::getFixed(MI.getOperand(FIOperandNum + 1).getImm());
846 MI.getOperand(FIOperandNum).ChangeToRegister(FrameReg, false /*isDef*/);
847 MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset.getFixed());
848 return false;
849 }
850
851 if (MI.getOpcode() == TargetOpcode::LOCAL_ESCAPE) {
852 MachineOperand &FI = MI.getOperand(FIOperandNum);
854 assert(!Offset.getScalable() &&
855 "Frame offsets with a scalable component are not supported");
856 FI.ChangeToImmediate(Offset.getFixed());
857 return false;
858 }
859
861 if (MI.getOpcode() == AArch64::TAGPstack) {
862 // TAGPstack must use the virtual frame register in its 3rd operand.
864 FrameReg = MI.getOperand(3).getReg();
867 } else if (Tagged) {
869 MFI.getObjectOffset(FrameIndex) + (int64_t)MFI.getStackSize());
870 if (MFI.hasVarSizedObjects() ||
871 isAArch64FrameOffsetLegal(MI, SPOffset, nullptr, nullptr, nullptr) !=
873 // Can't update to SP + offset in place. Precalculate the tagged pointer
874 // in a scratch register.
876 MF, FrameIndex, FrameReg, /*PreferFP=*/false, /*ForSimm=*/true);
877 Register ScratchReg =
878 MF.getRegInfo().createVirtualRegister(&AArch64::GPR64RegClass);
879 emitFrameOffset(MBB, II, MI.getDebugLoc(), ScratchReg, FrameReg, Offset,
880 TII);
881 BuildMI(MBB, MI, MI.getDebugLoc(), TII->get(AArch64::LDG), ScratchReg)
882 .addReg(ScratchReg)
883 .addReg(ScratchReg)
884 .addImm(0);
885 MI.getOperand(FIOperandNum)
886 .ChangeToRegister(ScratchReg, false, false, true);
887 return false;
888 }
889 FrameReg = AArch64::SP;
891 (int64_t)MFI.getStackSize());
892 } else {
894 MF, FrameIndex, FrameReg, /*PreferFP=*/false, /*ForSimm=*/true);
895 }
896
897 // Modify MI as necessary to handle as much of 'Offset' as possible
898 if (rewriteAArch64FrameIndex(MI, FIOperandNum, FrameReg, Offset, TII))
899 return true;
900
901 assert((!RS || !RS->isScavengingFrameIndex(FrameIndex)) &&
902 "Emergency spill slot is out of reach");
903
904 // If we get here, the immediate doesn't fit into the instruction. We folded
905 // as much as possible above. Handle the rest, providing a register that is
906 // SP+LargeImm.
907 Register ScratchReg =
909 emitFrameOffset(MBB, II, MI.getDebugLoc(), ScratchReg, FrameReg, Offset, TII);
910 return false;
911}
912
914 MachineFunction &MF) const {
915 const AArch64FrameLowering *TFI = getFrameLowering(MF);
916
917 switch (RC->getID()) {
918 default:
919 return 0;
920 case AArch64::GPR32RegClassID:
921 case AArch64::GPR32spRegClassID:
922 case AArch64::GPR32allRegClassID:
923 case AArch64::GPR64spRegClassID:
924 case AArch64::GPR64allRegClassID:
925 case AArch64::GPR64RegClassID:
926 case AArch64::GPR32commonRegClassID:
927 case AArch64::GPR64commonRegClassID:
928 return 32 - 1 // XZR/SP
929 - (TFI->hasFP(MF) || TT.isOSDarwin()) // FP
931 - hasBasePointer(MF); // X19
932 case AArch64::FPR8RegClassID:
933 case AArch64::FPR16RegClassID:
934 case AArch64::FPR32RegClassID:
935 case AArch64::FPR64RegClassID:
936 case AArch64::FPR128RegClassID:
937 return 32;
938
939 case AArch64::MatrixIndexGPR32_8_11RegClassID:
940 case AArch64::MatrixIndexGPR32_12_15RegClassID:
941 return 4;
942
943 case AArch64::DDRegClassID:
944 case AArch64::DDDRegClassID:
945 case AArch64::DDDDRegClassID:
946 case AArch64::QQRegClassID:
947 case AArch64::QQQRegClassID:
948 case AArch64::QQQQRegClassID:
949 return 32;
950
951 case AArch64::FPR128_loRegClassID:
952 case AArch64::FPR64_loRegClassID:
953 case AArch64::FPR16_loRegClassID:
954 return 16;
955 }
956}
957
959 const MachineFunction &MF) const {
960 const auto &MFI = MF.getFrameInfo();
961 if (!MF.hasEHFunclets() && !MFI.hasVarSizedObjects())
962 return AArch64::SP;
963 else if (hasStackRealignment(MF))
964 return getBaseRegister();
965 return getFrameRegister(MF);
966}
967
968/// SrcRC and DstRC will be morphed into NewRC if this returns true
970 MachineInstr *MI, const TargetRegisterClass *SrcRC, unsigned SubReg,
971 const TargetRegisterClass *DstRC, unsigned DstSubReg,
972 const TargetRegisterClass *NewRC, LiveIntervals &LIS) const {
973 if (MI->isCopy() &&
974 ((DstRC->getID() == AArch64::GPR64RegClassID) ||
975 (DstRC->getID() == AArch64::GPR64commonRegClassID)) &&
976 MI->getOperand(0).getSubReg() && MI->getOperand(1).getSubReg())
977 // Do not coalesce in the case of a 32-bit subregister copy
978 // which implements a 32 to 64 bit zero extension
979 // which relies on the upper 32 bits being zeroed.
980 return false;
981 return true;
982}
unsigned SubReg
unsigned const MachineRegisterInfo * MRI
static Register createScratchRegisterForInstruction(MachineInstr &MI, unsigned FIOperandNum, const AArch64InstrInfo *TII)
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file implements the BitVector class.
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This file contains constants used for implementing Dwarf debug support.
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
This file declares the machine register scavenger class.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static unsigned getDwarfRegNum(unsigned Reg, const TargetRegisterInfo *TRI)
Go up the super-register chain until we hit a valid dwarf register number.
Definition: StackMaps.cpp:195
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Definition: Value.cpp:467
StackOffset getNonLocalFrameIndexReference(const MachineFunction &MF, int FI) const override
getNonLocalFrameIndexReference - This method returns the offset used to reference a frame index locat...
bool hasFP(const MachineFunction &MF) const override
hasFP - Return true if the specified function should have a dedicated frame pointer register.
StackOffset resolveFrameIndexReference(const MachineFunction &MF, int FI, Register &FrameReg, bool PreferFP, bool ForSimm) const
AArch64FunctionInfo - This class is derived from MachineFunctionInfo and contains private AArch64-spe...
static const char * getRegisterName(MCRegister Reg, unsigned AltIdx=AArch64::NoRegAltName)
BitVector getStrictlyReservedRegs(const MachineFunction &MF) const
const TargetRegisterClass * getCrossCopyRegClass(const TargetRegisterClass *RC) const override
const uint32_t * getThisReturnPreservedMask(const MachineFunction &MF, CallingConv::ID) const
getThisReturnPreservedMask - Returns a call preserved mask specific to the case that 'returned' is on...
bool isReservedReg(const MachineFunction &MF, MCRegister Reg) const
const uint32_t * getCallPreservedMask(const MachineFunction &MF, CallingConv::ID) const override
const MCPhysReg * getCalleeSavedRegsViaCopy(const MachineFunction *MF) const
BitVector getReservedRegs(const MachineFunction &MF) const override
bool shouldCoalesce(MachineInstr *MI, const TargetRegisterClass *SrcRC, unsigned SubReg, const TargetRegisterClass *DstRC, unsigned DstSubReg, const TargetRegisterClass *NewRC, LiveIntervals &LIS) const override
SrcRC and DstRC will be morphed into NewRC if this returns true.
bool requiresVirtualBaseRegisters(const MachineFunction &MF) const override
const TargetRegisterClass * getSubClassWithSubReg(const TargetRegisterClass *RC, unsigned Idx) const override
unsigned getRegPressureLimit(const TargetRegisterClass *RC, MachineFunction &MF) const override
Register materializeFrameBaseRegister(MachineBasicBlock *MBB, int FrameIdx, int64_t Offset) const override
Insert defining instruction(s) for BaseReg to be a pointer to FrameIdx at the beginning of the basic ...
const TargetRegisterClass * getPointerRegClass(const MachineFunction &MF, unsigned Kind=0) const override
void UpdateCustomCalleeSavedRegs(MachineFunction &MF) const
bool requiresRegisterScavenging(const MachineFunction &MF) const override
bool isFrameOffsetLegal(const MachineInstr *MI, Register BaseReg, int64_t Offset) const override
void resolveFrameIndex(MachineInstr &MI, Register BaseReg, int64_t Offset) const override
bool needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const override
needsFrameBaseReg - Returns true if the instruction's frame index reference would be better served by...
const uint32_t * getWindowsStackProbePreservedMask() const
Stack probing calls preserve different CSRs to the normal CC.
AArch64RegisterInfo(const Triple &TT)
bool isAnyArgRegReserved(const MachineFunction &MF) const
void emitReservedArgRegCallError(const MachineFunction &MF) const
bool regNeedsCFI(unsigned Reg, unsigned &RegToUseForCFI) const
Return whether the register needs a CFI entry.
bool isStrictlyReservedReg(const MachineFunction &MF, MCRegister Reg) const
bool eliminateFrameIndex(MachineBasicBlock::iterator II, int SPAdj, unsigned FIOperandNum, RegScavenger *RS=nullptr) const override
const uint32_t * getTLSCallPreservedMask() const
const uint32_t * getNoPreservedMask() const override
Register getFrameRegister(const MachineFunction &MF) const override
void getOffsetOpcodes(const StackOffset &Offset, SmallVectorImpl< uint64_t > &Ops) const override
const MCPhysReg * getDarwinCalleeSavedRegs(const MachineFunction *MF) const
bool isAsmClobberable(const MachineFunction &MF, MCRegister PhysReg) const override
const uint32_t * SMEABISupportRoutinesCallPreservedMaskFromX0() const
const MCPhysReg * getCalleeSavedRegs(const MachineFunction *MF) const override
Code Generation virtual methods...
const uint32_t * getCustomEHPadPreservedMask(const MachineFunction &MF) const override
unsigned getLocalAddressRegister(const MachineFunction &MF) const
bool hasBasePointer(const MachineFunction &MF) const
const uint32_t * getDarwinCallPreservedMask(const MachineFunction &MF, CallingConv::ID) const
const uint32_t * getSMStartStopCallPreservedMask() const
bool useFPForScavengingIndex(const MachineFunction &MF) const override
bool cannotEliminateFrame(const MachineFunction &MF) const
bool isArgumentRegister(const MachineFunction &MF, MCRegister Reg) const override
void UpdateCustomCallPreservedMask(MachineFunction &MF, const uint32_t **Mask) const
std::optional< std::string > explainReservedReg(const MachineFunction &MF, MCRegister PhysReg) const override
bool requiresFrameIndexScavenging(const MachineFunction &MF) const override
bool isXRegisterReservedForRA(size_t i) const
unsigned getNumXRegisterReserved() const
const AArch64TargetLowering * getTargetLowering() const override
bool isCallingConvWin64(CallingConv::ID CC) const
bool isXRegCustomCalleeSaved(size_t i) const
bool isXRegisterReserved(size_t i) const
bool supportSwiftError() const override
Return true if the target supports swifterror attribute.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
bool hasAttrSomewhere(Attribute::AttrKind Kind, unsigned *Index=nullptr) const
Return true if the specified attribute is set for at least one parameter or for the return value.
static void appendOffset(SmallVectorImpl< uint64_t > &Ops, int64_t Offset)
Append Ops with operations to apply the Offset.
A debug info location.
Definition: DebugLoc.h:33
Diagnostic information for unsupported feature in backend.
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition: Function.h:237
AttributeList getAttributes() const
Return the attribute list for this Function.
Definition: Function.h:313
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition: Function.cpp:644
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:198
bool regsOverlap(MCRegister RegA, MCRegister RegB) const
Returns true if the two registers are equal or alias each other.
Wrapper class representing physical registers. Should be passed by value.
Definition: MCRegister.h:24
MCSubRegIterator enumerates all sub-registers of Reg.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
bool hasVarSizedObjects() const
This method may be called any time after instruction selection is complete to determine if the stack ...
uint64_t getStackSize() const
Return the number of bytes that must be allocated to hold all of the fixed size frame objects.
bool adjustsStack() const
Return true if this function adjusts the stack – e.g., when calling another function.
bool isFrameAddressTaken() const
This method may be called any time after instruction selection is complete to determine if there is a...
int64_t getLocalFrameSize() const
Get the size of the local object blob.
int64_t getObjectOffset(int ObjectIdx) const
Return the assigned stack offset of the specified object from the incoming stack pointer.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
uint32_t * allocateRegMask()
Allocate and initialize a register mask with NumRegister bits.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
const LLVMTargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
Representation of each machine instruction.
Definition: MachineInstr.h:68
MachineOperand class - Representation of each machine instruction operand.
void ChangeToImmediate(int64_t ImmVal, unsigned TargetFlags=0)
ChangeToImmediate - Replace this operand with a new immediate operand of the specified value.
static unsigned getRegMaskSize(unsigned NumRegs)
Returns number of elements needed for a regmask array.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
void setCalleeSavedRegs(ArrayRef< MCPhysReg > CSRs)
Sets the updated Callee Saved Registers list.
bool isScavengingFrameIndex(int FI) const
Query whether a frame index is a scavenging frame index.
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:577
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
Definition: SmallVector.h:687
void push_back(const T &Elt)
Definition: SmallVector.h:416
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1200
StackOffset holds a fixed and a scalable offset in bytes.
Definition: TypeSize.h:36
int64_t getFixed() const
Returns the fixed component of the stack.
Definition: TypeSize.h:52
TargetOptions Options
bool DisableFramePointerElim(const MachineFunction &MF) const
DisableFramePointerElim - This returns true if frame pointer elimination optimization should be disab...
unsigned getID() const
Return the register class ID number.
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:44
bool isOSDarwin() const
Is this a "Darwin" OS (macOS, iOS, tvOS, watchOS, or DriverKit).
Definition: Triple.h:519
bool isOSBinFormatELF() const
Tests whether the OS uses the ELF binary format.
Definition: Triple.h:675
@ MO_TAGGED
MO_TAGGED - With MO_PAGE, indicates that the page includes a memory tag in bits 56-63.
static unsigned getShifterImm(AArch64_AM::ShiftExtendType ST, unsigned Imm)
getShifterImm - Encode the shift type and amount: imm: 6-bit shift amount shifter: 000 ==> lsl 001 ==...
void initLLVMToCVRegMapping(MCRegisterInfo *MRI)
@ AArch64_VectorCall
Used between AArch64 Advanced SIMD functions.
Definition: CallingConv.h:218
@ Swift
Calling convention for Swift.
Definition: CallingConv.h:69
@ AArch64_SVE_VectorCall
Used between AArch64 SVE functions.
Definition: CallingConv.h:221
@ CFGuard_Check
Special calling convention on Windows for calling the Control Guard Check ICall funtion.
Definition: CallingConv.h:82
@ PreserveMost
Used for runtime calls that preserves most registers.
Definition: CallingConv.h:63
@ AnyReg
Used for dynamic register based calls (e.g.
Definition: CallingConv.h:60
@ AArch64_SME_ABI_Support_Routines_PreserveMost_From_X2
Preserve X2-X15, X19-X29, SP, Z0-Z31, P0-P15.
Definition: CallingConv.h:238
@ CXX_FAST_TLS
Used for access functions.
Definition: CallingConv.h:72
@ AArch64_SME_ABI_Support_Routines_PreserveMost_From_X0
Preserve X0-X13, X19-X29, SP, Z0-Z31, P0-P15.
Definition: CallingConv.h:235
@ GHC
Used by the Glasgow Haskell Compiler (GHC).
Definition: CallingConv.h:50
@ WebKit_JS
Used for stack based JavaScript calls.
Definition: CallingConv.h:56
@ Fast
Attempts to make calls as fast as possible (e.g.
Definition: CallingConv.h:41
@ Tail
Attemps to make calls as fast as possible while guaranteeing that tail call optimization can always b...
Definition: CallingConv.h:76
@ Win64
The C convention as implemented on Windows/x86-64 and AArch64.
Definition: CallingConv.h:156
@ SwiftTail
This follows the Swift calling convention in how arguments are passed but guarantees tail calls will ...
Definition: CallingConv.h:87
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:406
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
int isAArch64FrameOffsetLegal(const MachineInstr &MI, StackOffset &Offset, bool *OutUseUnscaledOp=nullptr, unsigned *OutUnscaledOp=nullptr, int64_t *EmittableOffset=nullptr)
Check if the Offset is a valid frame offset for MI.
@ Done
Definition: Threading.h:61
@ AArch64FrameOffsetIsLegal
Offset is legal.
@ AArch64FrameOffsetCanUpdate
Offset can apply, at least partly.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1826
void emitFrameOffset(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, unsigned DestReg, unsigned SrcReg, StackOffset Offset, const TargetInstrInfo *TII, MachineInstr::MIFlag=MachineInstr::NoFlags, bool SetNZCV=false, bool NeedsWinCFI=false, bool *HasWinCFI=nullptr, bool EmitCFAOffset=false, StackOffset InitialOffset={}, unsigned FrameReg=AArch64::SP)
emitFrameOffset - Emit instructions as needed to set DestReg to SrcReg plus Offset.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:145
bool rewriteAArch64FrameIndex(MachineInstr &MI, unsigned FrameRegIdx, unsigned FrameReg, StackOffset &Offset, const AArch64InstrInfo *TII)
rewriteAArch64FrameIndex - Rewrite MI to access 'Offset' bytes from the FP.
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition: STLExtras.h:1976