LLVM 22.0.0git
AArch64MachineFunctionInfo.h
Go to the documentation of this file.
1//=- AArch64MachineFunctionInfo.h - AArch64 machine function info -*- C++ -*-=//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file declares AArch64-specific per-machine-function information.
10//
11//===----------------------------------------------------------------------===//
12
13#ifndef LLVM_LIB_TARGET_AARCH64_AARCH64MACHINEFUNCTIONINFO_H
14#define LLVM_LIB_TARGET_AARCH64_AARCH64MACHINEFUNCTIONINFO_H
15
17#include "AArch64Subtarget.h"
18#include "llvm/ADT/ArrayRef.h"
25#include "llvm/IR/Function.h"
27#include "llvm/MC/MCSymbol.h"
28#include <cassert>
29#include <optional>
30
31namespace llvm {
32
33namespace yaml {
35} // end namespace yaml
36
38class MachineInstr;
39
41 int FrameIndex = std::numeric_limits<int>::max();
42 unsigned Uses = 0;
43};
44
45/// Condition of signing the return address in a function.
46///
47/// Corresponds to possible values of "sign-return-address" function attribute.
53
54/// AArch64FunctionInfo - This class is derived from MachineFunctionInfo and
55/// contains private AArch64-specific information for each MachineFunction.
57 /// Number of bytes of arguments this function has on the stack. If the callee
58 /// is expected to restore the argument stack this should be a multiple of 16,
59 /// all usable during a tail call.
60 ///
61 /// The alternative would forbid tail call optimisation in some cases: if we
62 /// want to transfer control from a function with 8-bytes of stack-argument
63 /// space to a function with 16-bytes then misalignment of this value would
64 /// make a stack adjustment necessary, which could not be undone by the
65 /// callee.
66 unsigned BytesInStackArgArea = 0;
67
68 /// The number of bytes to restore to deallocate space for incoming
69 /// arguments. Canonically 0 in the C calling convention, but non-zero when
70 /// callee is expected to pop the args.
71 unsigned ArgumentStackToRestore = 0;
72
73 /// Space just below incoming stack pointer reserved for arguments being
74 /// passed on the stack during a tail call. This will be the difference
75 /// between the largest tail call argument space needed in this function and
76 /// what's already available by reusing space of incoming arguments.
77 unsigned TailCallReservedStack = 0;
78
79 /// HasStackFrame - True if this function has a stack frame. Set by
80 /// determineCalleeSaves().
81 bool HasStackFrame = false;
82
83 /// Amount of stack frame size, not including callee-saved registers.
84 uint64_t LocalStackSize = 0;
85
86 /// Amount of stack frame size used for saving callee-saved registers.
87 unsigned CalleeSavedStackSize = 0;
88 unsigned ZPRCalleeSavedStackSize = 0;
89 unsigned PPRCalleeSavedStackSize = 0;
90 bool HasCalleeSavedStackSize = false;
91 bool HasSVECalleeSavedStackSize = false;
92
93 /// Number of TLS accesses using the special (combinable)
94 /// _TLS_MODULE_BASE_ symbol.
95 unsigned NumLocalDynamicTLSAccesses = 0;
96
97 /// FrameIndex for start of varargs area for arguments passed on the
98 /// stack.
99 int VarArgsStackIndex = 0;
100
101 /// Offset of start of varargs area for arguments passed on the stack.
102 unsigned VarArgsStackOffset = 0;
103
104 /// FrameIndex for start of varargs area for arguments passed in
105 /// general purpose registers.
106 int VarArgsGPRIndex = 0;
107
108 /// Size of the varargs area for arguments passed in general purpose
109 /// registers.
110 unsigned VarArgsGPRSize = 0;
111
112 /// FrameIndex for start of varargs area for arguments passed in
113 /// floating-point registers.
114 int VarArgsFPRIndex = 0;
115
116 /// Size of the varargs area for arguments passed in floating-point
117 /// registers.
118 unsigned VarArgsFPRSize = 0;
119
120 /// The stack slots used to add space between FPR and GPR accesses when using
121 /// hazard padding. StackHazardCSRSlotIndex is added between GPR and FPR CSRs.
122 /// StackHazardSlotIndex is added between (sorted) stack objects.
123 int StackHazardSlotIndex = std::numeric_limits<int>::max();
124 int StackHazardCSRSlotIndex = std::numeric_limits<int>::max();
125
126 /// True if this function has a subset of CSRs that is handled explicitly via
127 /// copies.
128 bool IsSplitCSR = false;
129
130 /// True when the stack gets realigned dynamically because the size of stack
131 /// frame is unknown at compile time. e.g., in case of VLAs.
132 bool StackRealigned = false;
133
134 /// True when the callee-save stack area has unused gaps that may be used for
135 /// other stack allocations.
136 bool CalleeSaveStackHasFreeSpace = false;
137
138 /// SRetReturnReg - sret lowering includes returning the value of the
139 /// returned struct in a register. This field holds the virtual register into
140 /// which the sret argument is passed.
141 Register SRetReturnReg;
142
143 /// SVE stack size (for predicates and data vectors) are maintained here
144 /// rather than in FrameInfo, as the placement and Stack IDs are target
145 /// specific.
146 uint64_t StackSizeZPR = 0;
147 uint64_t StackSizePPR = 0;
148
149 /// Are SVE objects (vectors and predicates) split into separate regions on
150 /// the stack.
151 bool SplitSVEObjects = false;
152
153 /// HasCalculatedStackSizeSVE indicates whether StackSizeZPR/PPR is valid.
154 bool HasCalculatedStackSizeSVE = false;
155
156 /// Has a value when it is known whether or not the function uses a
157 /// redzone, and no value otherwise.
158 /// Initialized during frame lowering, unless the function has the noredzone
159 /// attribute, in which case it is set to false at construction.
160 std::optional<bool> HasRedZone;
161
162 /// ForwardedMustTailRegParms - A list of virtual and physical registers
163 /// that must be forwarded to every musttail call.
164 SmallVector<ForwardedRegister, 1> ForwardedMustTailRegParms;
165
166 /// FrameIndex for the tagged base pointer.
167 std::optional<int> TaggedBasePointerIndex;
168
169 /// Offset from SP-at-entry to the tagged base pointer.
170 /// Tagged base pointer is set up to point to the first (lowest address)
171 /// tagged stack slot.
172 unsigned TaggedBasePointerOffset;
173
174 /// OutliningStyle denotes, if a function was outined, how it was outlined,
175 /// e.g. Tail Call, Thunk, or Function if none apply.
176 std::optional<std::string> OutliningStyle;
177
178 // Offset from SP-after-callee-saved-spills (i.e. SP-at-entry minus
179 // CalleeSavedStackSize) to the address of the frame record.
180 int CalleeSaveBaseToFrameRecordOffset = 0;
181
182 /// SignCondition controls when PAC-RET protection should be used.
184
185 /// SignWithBKey modifies the default PAC-RET mode to signing with the B key.
186 bool SignWithBKey = false;
187
188 /// HasELFSignedGOT is true if the target binary format is ELF and the IR
189 /// module containing the corresponding function has "ptrauth-elf-got" flag
190 /// set to 1.
191 bool HasELFSignedGOT = false;
192
193 /// SigningInstrOffset captures the offset of the PAC-RET signing instruction
194 /// within the prologue, so it can be re-used for authentication in the
195 /// epilogue when using PC as a second salt (FEAT_PAuth_LR)
196 MCSymbol *SignInstrLabel = nullptr;
197
198 /// BranchTargetEnforcement enables placing BTI instructions at potential
199 /// indirect branch destinations.
200 bool BranchTargetEnforcement = false;
201
202 /// Indicates that SP signing should be diversified with PC as-per PAuthLR.
203 /// This is set by -mbranch-protection and will emit NOP instructions unless
204 /// the subtarget feature +pauthlr is also used (in which case non-NOP
205 /// instructions are emitted).
206 bool BranchProtectionPAuthLR = false;
207
208 /// Whether this function has an extended frame record [Ctx, FP, LR]. If so,
209 /// bit 60 of the in-memory FP will be 1 to enable other tools to detect the
210 /// extended record.
211 bool HasSwiftAsyncContext = false;
212
213 /// The stack slot where the Swift asynchronous context is stored.
214 int SwiftAsyncContextFrameIdx = std::numeric_limits<int>::max();
215
216 bool IsMTETagged = false;
217
218 /// The function has Scalable Vector or Scalable Predicate register argument
219 /// or return type
220 bool IsSVECC = false;
221
222 /// Whether this function changes streaming mode within the function.
223 bool HasStreamingModeChanges = false;
224
225 /// True if the function need unwind information.
226 mutable std::optional<bool> NeedsDwarfUnwindInfo;
227
228 /// True if the function need asynchronous unwind information.
229 mutable std::optional<bool> NeedsAsyncDwarfUnwindInfo;
230
231 int64_t StackProbeSize = 0;
232
233 // Holds a register containing pstate.sm. This is set
234 // on function entry to record the initial pstate of a function.
235 Register PStateSMReg = MCRegister::NoRegister;
236
237 // Has the PNReg used to build PTRUE instruction.
238 // The PTRUE is used for the LD/ST of ZReg pairs in save and restore.
239 unsigned PredicateRegForFillSpill = 0;
240
241 // Holds the SME function attributes (streaming mode, ZA/ZT0 state).
242 SMEAttrs SMEFnAttrs;
243
244 // Holds the TPIDR2 block if allocated early (for Windows/stack probes
245 // support).
246 Register EarlyAllocSMESaveBuffer = AArch64::NoRegister;
247
248 // Holds the spill slot for ZT0.
249 int ZT0SpillSlotIndex = std::numeric_limits<int>::max();
250
251 // Note: The following properties are only used for the old SME ABI lowering:
252 /// The frame-index for the TPIDR2 object used for lazy saves.
253 TPIDR2Object TPIDR2;
254 // Holds a pointer to a buffer that is large enough to represent
255 // all SME ZA state and any additional state required by the
256 // __arm_sme_save/restore support routines.
257 Register SMESaveBufferAddr = MCRegister::NoRegister;
258 // true if SMESaveBufferAddr is used.
259 bool SMESaveBufferUsed = false;
260
261public:
263
267 const override;
268
270 EarlyAllocSMESaveBuffer = Ptr;
271 }
272
274 return EarlyAllocSMESaveBuffer;
275 }
276
277 void setZT0SpillSlotIndex(int FI) { ZT0SpillSlotIndex = FI; }
279 assert(hasZT0SpillSlotIndex() && "ZT0 spill slot index not set!");
280 return ZT0SpillSlotIndex;
281 }
282 bool hasZT0SpillSlotIndex() const {
283 return ZT0SpillSlotIndex != std::numeric_limits<int>::max();
284 }
285
286 // Old SME ABI lowering state getters/setters:
287 Register getSMESaveBufferAddr() const { return SMESaveBufferAddr; };
288 void setSMESaveBufferAddr(Register Reg) { SMESaveBufferAddr = Reg; };
289 unsigned isSMESaveBufferUsed() const { return SMESaveBufferUsed; };
290 void setSMESaveBufferUsed(bool Used = true) { SMESaveBufferUsed = Used; };
291 TPIDR2Object &getTPIDR2Obj() { return TPIDR2; }
292
294 PredicateRegForFillSpill = Reg;
295 }
297 return PredicateRegForFillSpill;
298 }
299
300 Register getPStateSMReg() const { return PStateSMReg; };
301 void setPStateSMReg(Register Reg) { PStateSMReg = Reg; };
302
303 bool isSVECC() const { return IsSVECC; };
304 void setIsSVECC(bool s) { IsSVECC = s; };
305
307
308 unsigned getBytesInStackArgArea() const { return BytesInStackArgArea; }
309 void setBytesInStackArgArea(unsigned bytes) { BytesInStackArgArea = bytes; }
310
311 unsigned getArgumentStackToRestore() const { return ArgumentStackToRestore; }
312 void setArgumentStackToRestore(unsigned bytes) {
313 ArgumentStackToRestore = bytes;
314 }
315
316 unsigned getTailCallReservedStack() const { return TailCallReservedStack; }
317 void setTailCallReservedStack(unsigned bytes) {
318 TailCallReservedStack = bytes;
319 }
320
322 assert(isAligned(Align(16), ZPR) && isAligned(Align(16), PPR) &&
323 "expected SVE stack sizes to be aligned to 16-bytes");
324 StackSizeZPR = ZPR;
325 StackSizePPR = PPR;
326 HasCalculatedStackSizeSVE = true;
327 }
328
331 return StackSizeZPR;
332 }
335 return StackSizePPR;
336 }
337
338 bool hasCalculatedStackSizeSVE() const { return HasCalculatedStackSizeSVE; }
339
340 bool hasSVEStackSize() const {
341 return getStackSizeZPR() > 0 || getStackSizePPR() > 0;
342 }
343
344 bool hasStackFrame() const { return HasStackFrame; }
345 void setHasStackFrame(bool s) { HasStackFrame = s; }
346
347 bool isStackRealigned() const { return StackRealigned; }
348 void setStackRealigned(bool s) { StackRealigned = s; }
350 return CalleeSaveStackHasFreeSpace;
351 }
353 CalleeSaveStackHasFreeSpace = s;
354 }
355 bool isSplitCSR() const { return IsSplitCSR; }
356 void setIsSplitCSR(bool s) { IsSplitCSR = s; }
357
358 void setLocalStackSize(uint64_t Size) { LocalStackSize = Size; }
359 uint64_t getLocalStackSize() const { return LocalStackSize; }
360
361 void setOutliningStyle(const std::string &Style) { OutliningStyle = Style; }
362 std::optional<std::string> getOutliningStyle() const {
363 return OutliningStyle;
364 }
365
367 CalleeSavedStackSize = Size;
368 HasCalleeSavedStackSize = true;
369 }
370
371 // When CalleeSavedStackSize has not been set (for example when
372 // some MachineIR pass is run in isolation), then recalculate
373 // the CalleeSavedStackSize directly from the CalleeSavedInfo.
374 // Note: This information can only be recalculated after PEI
375 // has assigned offsets to the callee save objects.
376 unsigned getCalleeSavedStackSize(const MachineFrameInfo &MFI) const {
377 bool ValidateCalleeSavedStackSize = false;
378
379#ifndef NDEBUG
380 // Make sure the calculated size derived from the CalleeSavedInfo
381 // equals the cached size that was calculated elsewhere (e.g. in
382 // determineCalleeSaves).
383 ValidateCalleeSavedStackSize = HasCalleeSavedStackSize;
384#endif
385
386 if (!HasCalleeSavedStackSize || ValidateCalleeSavedStackSize) {
387 assert(MFI.isCalleeSavedInfoValid() && "CalleeSavedInfo not calculated");
388 if (MFI.getCalleeSavedInfo().empty())
389 return 0;
390
391 int64_t MinOffset = std::numeric_limits<int64_t>::max();
392 int64_t MaxOffset = std::numeric_limits<int64_t>::min();
393 for (const auto &Info : MFI.getCalleeSavedInfo()) {
394 int FrameIdx = Info.getFrameIdx();
395 if (MFI.getStackID(FrameIdx) != TargetStackID::Default)
396 continue;
397 int64_t Offset = MFI.getObjectOffset(FrameIdx);
398 int64_t ObjSize = MFI.getObjectSize(FrameIdx);
399 MinOffset = std::min<int64_t>(Offset, MinOffset);
400 MaxOffset = std::max<int64_t>(Offset + ObjSize, MaxOffset);
401 }
402
403 if (SwiftAsyncContextFrameIdx != std::numeric_limits<int>::max()) {
405 int64_t ObjSize = MFI.getObjectSize(getSwiftAsyncContextFrameIdx());
406 MinOffset = std::min<int64_t>(Offset, MinOffset);
407 MaxOffset = std::max<int64_t>(Offset + ObjSize, MaxOffset);
408 }
409
410 if (StackHazardCSRSlotIndex != std::numeric_limits<int>::max()) {
411 int64_t Offset = MFI.getObjectOffset(StackHazardCSRSlotIndex);
412 int64_t ObjSize = MFI.getObjectSize(StackHazardCSRSlotIndex);
413 MinOffset = std::min<int64_t>(Offset, MinOffset);
414 MaxOffset = std::max<int64_t>(Offset + ObjSize, MaxOffset);
415 }
416
417 unsigned Size = alignTo(MaxOffset - MinOffset, 16);
418 assert((!HasCalleeSavedStackSize || getCalleeSavedStackSize() == Size) &&
419 "Invalid size calculated for callee saves");
420 return Size;
421 }
422
424 }
425
426 unsigned getCalleeSavedStackSize() const {
427 assert(HasCalleeSavedStackSize &&
428 "CalleeSavedStackSize has not been calculated");
429 return CalleeSavedStackSize;
430 }
431
432 // Saves the CalleeSavedStackSize for SVE vectors in 'scalable bytes'
433 void setSVECalleeSavedStackSize(unsigned ZPR, unsigned PPR) {
434 assert(isAligned(Align(16), ZPR) && isAligned(Align(16), PPR) &&
435 "expected SVE callee-save sizes to be aligned to 16-bytes");
436 ZPRCalleeSavedStackSize = ZPR;
437 PPRCalleeSavedStackSize = PPR;
438 HasSVECalleeSavedStackSize = true;
439 }
440 unsigned getZPRCalleeSavedStackSize() const {
441 assert(HasSVECalleeSavedStackSize &&
442 "ZPRCalleeSavedStackSize has not been calculated");
443 return ZPRCalleeSavedStackSize;
444 }
445 unsigned getPPRCalleeSavedStackSize() const {
446 assert(HasSVECalleeSavedStackSize &&
447 "PPRCalleeSavedStackSize has not been calculated");
448 return PPRCalleeSavedStackSize;
449 }
450
451 unsigned getSVECalleeSavedStackSize() const {
453 "ZPRs and PPRs are split. Use get[ZPR|PPR]CalleeSavedStackSize()");
455 }
456
457 void incNumLocalDynamicTLSAccesses() { ++NumLocalDynamicTLSAccesses; }
459 return NumLocalDynamicTLSAccesses;
460 }
461
465
466 std::optional<bool> hasRedZone() const { return HasRedZone; }
467 void setHasRedZone(bool s) { HasRedZone = s; }
468
469 int getVarArgsStackIndex() const { return VarArgsStackIndex; }
470 void setVarArgsStackIndex(int Index) { VarArgsStackIndex = Index; }
471
472 unsigned getVarArgsStackOffset() const { return VarArgsStackOffset; }
473 void setVarArgsStackOffset(unsigned Offset) { VarArgsStackOffset = Offset; }
474
475 int getVarArgsGPRIndex() const { return VarArgsGPRIndex; }
476 void setVarArgsGPRIndex(int Index) { VarArgsGPRIndex = Index; }
477
478 unsigned getVarArgsGPRSize() const { return VarArgsGPRSize; }
479 void setVarArgsGPRSize(unsigned Size) { VarArgsGPRSize = Size; }
480
481 int getVarArgsFPRIndex() const { return VarArgsFPRIndex; }
482 void setVarArgsFPRIndex(int Index) { VarArgsFPRIndex = Index; }
483
484 unsigned getVarArgsFPRSize() const { return VarArgsFPRSize; }
485 void setVarArgsFPRSize(unsigned Size) { VarArgsFPRSize = Size; }
486
488 return StackHazardSlotIndex != std::numeric_limits<int>::max();
489 }
490 int getStackHazardSlotIndex() const { return StackHazardSlotIndex; }
491 void setStackHazardSlotIndex(int Index) {
492 assert(StackHazardSlotIndex == std::numeric_limits<int>::max());
493 StackHazardSlotIndex = Index;
494 }
495 int getStackHazardCSRSlotIndex() const { return StackHazardCSRSlotIndex; }
497 assert(StackHazardCSRSlotIndex == std::numeric_limits<int>::max());
498 StackHazardCSRSlotIndex = Index;
499 }
500
501 bool hasSplitSVEObjects() const { return SplitSVEObjects; }
502 void setSplitSVEObjects(bool s) { SplitSVEObjects = s; }
503
504 bool hasSVE_AAPCS(const MachineFunction &MF) const {
505 return hasSplitSVEObjects() || isSVECC() ||
508 }
509
510 SMEAttrs getSMEFnAttrs() const { return SMEFnAttrs; }
511
512 unsigned getSRetReturnReg() const { return SRetReturnReg; }
513 void setSRetReturnReg(unsigned Reg) { SRetReturnReg = Reg; }
514
515 unsigned getJumpTableEntrySize(int Idx) const {
516 return JumpTableEntryInfo[Idx].first;
517 }
519 return JumpTableEntryInfo[Idx].second;
520 }
521 void setJumpTableEntryInfo(int Idx, unsigned Size, MCSymbol *PCRelSym) {
522 if ((unsigned)Idx >= JumpTableEntryInfo.size())
523 JumpTableEntryInfo.resize(Idx+1);
524 JumpTableEntryInfo[Idx] = std::make_pair(Size, PCRelSym);
525 }
526
528
529 const SetOfInstructions &getLOHRelated() const { return LOHRelated; }
530
531 // Shortcuts for LOH related types.
533 MCLOHType Kind;
534
535 /// Arguments of this directive. Order matters.
537
538 public:
540
542 : Kind(Kind), Args(Args.begin(), Args.end()) {
543 assert(isValidMCLOHType(Kind) && "Invalid LOH directive type!");
544 }
545
546 MCLOHType getKind() const { return Kind; }
547 LOHArgs getArgs() const { return Args; }
548 };
549
552
553 const MILOHContainer &getLOHContainer() const { return LOHContainerSet; }
554
555 /// Add a LOH directive of this @p Kind and this @p Args.
557 LOHContainerSet.push_back(MILOHDirective(Kind, Args));
558 LOHRelated.insert_range(Args);
559 }
560
561 size_t
563 size_t InitialSize = LOHContainerSet.size();
564 erase_if(LOHContainerSet, [&](const auto &D) {
565 return any_of(D.getArgs(), [&](auto *Arg) { return MIs.contains(Arg); });
566 });
567 // In theory there could be an LOH with one label in MIs and another label
568 // outside MIs, however we don't know if the label outside MIs is used in
569 // any other LOHs, so we can't remove them from LOHRelated. In that case, we
570 // might produce a few extra labels, but it won't break anything.
571 LOHRelated.remove_if([&](auto *MI) { return MIs.contains(MI); });
572 return InitialSize - LOHContainerSet.size();
573 };
574
576 return ForwardedMustTailRegParms;
577 }
578
579 std::optional<int> getTaggedBasePointerIndex() const {
580 return TaggedBasePointerIndex;
581 }
582 void setTaggedBasePointerIndex(int Index) { TaggedBasePointerIndex = Index; }
583
584 unsigned getTaggedBasePointerOffset() const {
585 return TaggedBasePointerOffset;
586 }
588 TaggedBasePointerOffset = Offset;
589 }
590
592 return CalleeSaveBaseToFrameRecordOffset;
593 }
595 CalleeSaveBaseToFrameRecordOffset = Offset;
596 }
597
598 static bool shouldSignReturnAddress(SignReturnAddress Condition,
599 bool IsLRSpilled);
600
601 bool shouldSignReturnAddress(const MachineFunction &MF) const;
602
604 return SignCondition;
605 }
606
608
609 bool shouldSignWithBKey() const { return SignWithBKey; }
610
611 bool hasELFSignedGOT() const { return HasELFSignedGOT; }
612
613 MCSymbol *getSigningInstrLabel() const { return SignInstrLabel; }
614 void setSigningInstrLabel(MCSymbol *Label) { SignInstrLabel = Label; }
615
616 bool isMTETagged() const { return IsMTETagged; }
617
618 bool branchTargetEnforcement() const { return BranchTargetEnforcement; }
619
620 bool branchProtectionPAuthLR() const { return BranchProtectionPAuthLR; }
621
622 void setHasSwiftAsyncContext(bool HasContext) {
623 HasSwiftAsyncContext = HasContext;
624 }
625 bool hasSwiftAsyncContext() const { return HasSwiftAsyncContext; }
626
628 SwiftAsyncContextFrameIdx = FI;
629 }
630 int getSwiftAsyncContextFrameIdx() const { return SwiftAsyncContextFrameIdx; }
631
632 bool needsDwarfUnwindInfo(const MachineFunction &MF) const;
633 bool needsAsyncDwarfUnwindInfo(const MachineFunction &MF) const;
634
635 bool hasStreamingModeChanges() const { return HasStreamingModeChanges; }
636 void setHasStreamingModeChanges(bool HasChanges) {
637 HasStreamingModeChanges = HasChanges;
638 }
639
640 bool hasStackProbing() const { return StackProbeSize != 0; }
641
642 int64_t getStackProbeSize() const { return StackProbeSize; }
643
644private:
645 // Hold the lists of LOHs.
646 MILOHContainer LOHContainerSet;
647 SetOfInstructions LOHRelated;
648
649 SmallVector<std::pair<unsigned, MCSymbol *>, 2> JumpTableEntryInfo;
650};
651
652namespace yaml {
654 std::optional<bool> HasRedZone;
655 std::optional<uint64_t> StackSizeZPR;
656 std::optional<uint64_t> StackSizePPR;
657 std::optional<bool> HasStackFrame;
658 std::optional<bool> HasStreamingModeChanges;
659
662
663 void mappingImpl(yaml::IO &YamlIO) override;
664 ~AArch64FunctionInfo() override = default;
665};
666
668 static void mapping(IO &YamlIO, AArch64FunctionInfo &MFI) {
669 YamlIO.mapOptional("hasRedZone", MFI.HasRedZone);
670 YamlIO.mapOptional("stackSizeZPR", MFI.StackSizeZPR);
671 YamlIO.mapOptional("stackSizePPR", MFI.StackSizePPR);
672 YamlIO.mapOptional("hasStackFrame", MFI.HasStackFrame);
673 YamlIO.mapOptional("hasStreamingModeChanges", MFI.HasStreamingModeChanges);
674 }
675};
676
677} // end namespace yaml
678
679} // end namespace llvm
680
681#endif // LLVM_LIB_TARGET_AARCH64_AARCH64MACHINEFUNCTIONINFO_H
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
Analysis containing CSE Info
Definition CSEInfo.cpp:27
IRTranslator LLVM IR MI
#define F(x, y, z)
Definition MD5.cpp:54
Register Reg
Basic Register Allocator
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
AArch64FunctionInfo - This class is derived from MachineFunctionInfo and contains private AArch64-spe...
void addLOHDirective(MCLOHType Kind, MILOHArgs Args)
Add a LOH directive of this Kind and this Args.
bool needsShadowCallStackPrologueEpilogue(MachineFunction &MF) const
unsigned getCalleeSavedStackSize(const MachineFrameInfo &MFI) const
void setCalleeSaveBaseToFrameRecordOffset(int Offset)
void setVarArgsStackOffset(unsigned Offset)
void setTailCallReservedStack(unsigned bytes)
SmallVector< MILOHDirective, 32 > MILOHContainer
SmallVectorImpl< ForwardedRegister > & getForwardedMustTailRegParms()
SignReturnAddress getSignReturnAddressCondition() const
void setOutliningStyle(const std::string &Style)
const SetOfInstructions & getLOHRelated() const
void setBytesInStackArgArea(unsigned bytes)
void setSigningInstrLabel(MCSymbol *Label)
void setHasSwiftAsyncContext(bool HasContext)
void setStackSizeSVE(uint64_t ZPR, uint64_t PPR)
std::optional< int > getTaggedBasePointerIndex() const
AArch64FunctionInfo(const Function &F, const AArch64Subtarget *STI)
unsigned getJumpTableEntrySize(int Idx) const
bool needsDwarfUnwindInfo(const MachineFunction &MF) const
size_t clearLinkerOptimizationHints(const SmallPtrSetImpl< MachineInstr * > &MIs)
MCSymbol * getJumpTableEntryPCRelSymbol(int Idx) const
SmallPtrSet< const MachineInstr *, 16 > SetOfInstructions
void setTaggedBasePointerOffset(unsigned Offset)
std::optional< bool > hasRedZone() const
static bool shouldSignReturnAddress(SignReturnAddress Condition, bool IsLRSpilled)
void setSVECalleeSavedStackSize(unsigned ZPR, unsigned PPR)
std::optional< std::string > getOutliningStyle() const
void initializeBaseYamlFields(const yaml::AArch64FunctionInfo &YamlMFI)
const MILOHContainer & getLOHContainer() const
void setJumpTableEntryInfo(int Idx, unsigned Size, MCSymbol *PCRelSym)
bool needsAsyncDwarfUnwindInfo(const MachineFunction &MF) const
MachineFunctionInfo * clone(BumpPtrAllocator &Allocator, MachineFunction &DestMF, const DenseMap< MachineBasicBlock *, MachineBasicBlock * > &Src2DstMBB) const override
Make a functionally equivalent copy of this MachineFunctionInfo in MF.
bool hasSVE_AAPCS(const MachineFunction &MF) const
void setArgumentStackToRestore(unsigned bytes)
void setHasStreamingModeChanges(bool HasChanges)
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition Function.h:270
static constexpr unsigned NoRegister
Definition MCRegister.h:60
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition MCSymbol.h:42
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
bool isCalleeSavedInfoValid() const
Has the callee saved info been calculated yet?
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
const std::vector< CalleeSavedInfo > & getCalleeSavedInfo() const
Returns a reference to call saved info vector for the current function.
uint8_t getStackID(int ObjectIdx) const
int64_t getObjectOffset(int ObjectIdx) const
Return the assigned stack offset of the specified object from the incoming stack pointer.
Function & getFunction()
Return the LLVM function that this machine code represents.
Representation of each machine instruction.
Wrapper class representing virtual and physical registers.
Definition Register.h:20
SMEAttrs is a utility class to parse the SME ACLE attributes on functions.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
bool contains(ConstPtrType Ptr) const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
void mapOptional(StringRef Key, T &Val)
Definition YAMLTraits.h:799
@ AArch64_SVE_VectorCall
Used between AArch64 SVE functions.
This is an optimization pass for GlobalISel generic memory operations.
SignReturnAddress
Condition of signing the return address in a function.
@ Offset
Definition DWP.cpp:532
bool isAligned(Align Lhs, uint64_t SizeInBytes)
Checks that SizeInBytes is a multiple of the alignment.
Definition Alignment.h:134
static bool isValidMCLOHType(unsigned Kind)
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1732
MCLOHType
Linker Optimization Hint Type.
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition Alignment.h:144
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
Definition STLExtras.h:2120
BumpPtrAllocatorImpl<> BumpPtrAllocator
The standard BumpPtrAllocator which just uses the default template parameters.
Definition Allocator.h:383
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
MachineFunctionInfo - This class can be derived from and used by targets to hold private target-speci...
~AArch64FunctionInfo() override=default
void mappingImpl(yaml::IO &YamlIO) override
Targets should override this in a way that mirrors the implementation of llvm::MachineFunctionInfo.
static void mapping(IO &YamlIO, AArch64FunctionInfo &MFI)
This class should be specialized by any type that needs to be converted to/from a YAML mapping.
Definition YAMLTraits.h:62