LLVM 20.0.0git
AArch64MachineFunctionInfo.h
Go to the documentation of this file.
1//=- AArch64MachineFunctionInfo.h - AArch64 machine function info -*- C++ -*-=//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file declares AArch64-specific per-machine-function information.
10//
11//===----------------------------------------------------------------------===//
12
13#ifndef LLVM_LIB_TARGET_AARCH64_AARCH64MACHINEFUNCTIONINFO_H
14#define LLVM_LIB_TARGET_AARCH64_AARCH64MACHINEFUNCTIONINFO_H
15
16#include "AArch64Subtarget.h"
17#include "llvm/ADT/ArrayRef.h"
24#include "llvm/IR/Function.h"
26#include "llvm/MC/MCSymbol.h"
27#include <cassert>
28#include <optional>
29
30namespace llvm {
31
32namespace yaml {
33struct AArch64FunctionInfo;
34} // end namespace yaml
35
36class AArch64Subtarget;
37class MachineInstr;
38
40 int FrameIndex = std::numeric_limits<int>::max();
41 unsigned Uses = 0;
42};
43
44/// AArch64FunctionInfo - This class is derived from MachineFunctionInfo and
45/// contains private AArch64-specific information for each MachineFunction.
47 /// Number of bytes of arguments this function has on the stack. If the callee
48 /// is expected to restore the argument stack this should be a multiple of 16,
49 /// all usable during a tail call.
50 ///
51 /// The alternative would forbid tail call optimisation in some cases: if we
52 /// want to transfer control from a function with 8-bytes of stack-argument
53 /// space to a function with 16-bytes then misalignment of this value would
54 /// make a stack adjustment necessary, which could not be undone by the
55 /// callee.
56 unsigned BytesInStackArgArea = 0;
57
58 /// The number of bytes to restore to deallocate space for incoming
59 /// arguments. Canonically 0 in the C calling convention, but non-zero when
60 /// callee is expected to pop the args.
61 unsigned ArgumentStackToRestore = 0;
62
63 /// Space just below incoming stack pointer reserved for arguments being
64 /// passed on the stack during a tail call. This will be the difference
65 /// between the largest tail call argument space needed in this function and
66 /// what's already available by reusing space of incoming arguments.
67 unsigned TailCallReservedStack = 0;
68
69 /// HasStackFrame - True if this function has a stack frame. Set by
70 /// determineCalleeSaves().
71 bool HasStackFrame = false;
72
73 /// Amount of stack frame size, not including callee-saved registers.
74 uint64_t LocalStackSize = 0;
75
76 /// The start and end frame indices for the SVE callee saves.
77 int MinSVECSFrameIndex = 0;
78 int MaxSVECSFrameIndex = 0;
79
80 /// Amount of stack frame size used for saving callee-saved registers.
81 unsigned CalleeSavedStackSize = 0;
82 unsigned SVECalleeSavedStackSize = 0;
83 bool HasCalleeSavedStackSize = false;
84
85 /// Number of TLS accesses using the special (combinable)
86 /// _TLS_MODULE_BASE_ symbol.
87 unsigned NumLocalDynamicTLSAccesses = 0;
88
89 /// FrameIndex for start of varargs area for arguments passed on the
90 /// stack.
91 int VarArgsStackIndex = 0;
92
93 /// Offset of start of varargs area for arguments passed on the stack.
94 unsigned VarArgsStackOffset = 0;
95
96 /// FrameIndex for start of varargs area for arguments passed in
97 /// general purpose registers.
98 int VarArgsGPRIndex = 0;
99
100 /// Size of the varargs area for arguments passed in general purpose
101 /// registers.
102 unsigned VarArgsGPRSize = 0;
103
104 /// FrameIndex for start of varargs area for arguments passed in
105 /// floating-point registers.
106 int VarArgsFPRIndex = 0;
107
108 /// Size of the varargs area for arguments passed in floating-point
109 /// registers.
110 unsigned VarArgsFPRSize = 0;
111
112 /// The stack slots used to add space between FPR and GPR accesses when using
113 /// hazard padding. StackHazardCSRSlotIndex is added between GPR and FPR CSRs.
114 /// StackHazardSlotIndex is added between (sorted) stack objects.
115 int StackHazardSlotIndex = std::numeric_limits<int>::max();
116 int StackHazardCSRSlotIndex = std::numeric_limits<int>::max();
117
118 /// True if this function has a subset of CSRs that is handled explicitly via
119 /// copies.
120 bool IsSplitCSR = false;
121
122 /// True when the stack gets realigned dynamically because the size of stack
123 /// frame is unknown at compile time. e.g., in case of VLAs.
124 bool StackRealigned = false;
125
126 /// True when the callee-save stack area has unused gaps that may be used for
127 /// other stack allocations.
128 bool CalleeSaveStackHasFreeSpace = false;
129
130 /// SRetReturnReg - sret lowering includes returning the value of the
131 /// returned struct in a register. This field holds the virtual register into
132 /// which the sret argument is passed.
133 Register SRetReturnReg;
134
135 /// SVE stack size (for predicates and data vectors) are maintained here
136 /// rather than in FrameInfo, as the placement and Stack IDs are target
137 /// specific.
138 uint64_t StackSizeSVE = 0;
139
140 /// HasCalculatedStackSizeSVE indicates whether StackSizeSVE is valid.
141 bool HasCalculatedStackSizeSVE = false;
142
143 /// Has a value when it is known whether or not the function uses a
144 /// redzone, and no value otherwise.
145 /// Initialized during frame lowering, unless the function has the noredzone
146 /// attribute, in which case it is set to false at construction.
147 std::optional<bool> HasRedZone;
148
149 /// ForwardedMustTailRegParms - A list of virtual and physical registers
150 /// that must be forwarded to every musttail call.
151 SmallVector<ForwardedRegister, 1> ForwardedMustTailRegParms;
152
153 /// FrameIndex for the tagged base pointer.
154 std::optional<int> TaggedBasePointerIndex;
155
156 /// Offset from SP-at-entry to the tagged base pointer.
157 /// Tagged base pointer is set up to point to the first (lowest address)
158 /// tagged stack slot.
159 unsigned TaggedBasePointerOffset;
160
161 /// OutliningStyle denotes, if a function was outined, how it was outlined,
162 /// e.g. Tail Call, Thunk, or Function if none apply.
163 std::optional<std::string> OutliningStyle;
164
165 // Offset from SP-after-callee-saved-spills (i.e. SP-at-entry minus
166 // CalleeSavedStackSize) to the address of the frame record.
167 int CalleeSaveBaseToFrameRecordOffset = 0;
168
169 /// SignReturnAddress is true if PAC-RET is enabled for the function with
170 /// defaults being sign non-leaf functions only, with the B key.
171 bool SignReturnAddress = false;
172
173 /// SignReturnAddressAll modifies the default PAC-RET mode to signing leaf
174 /// functions as well.
175 bool SignReturnAddressAll = false;
176
177 /// SignWithBKey modifies the default PAC-RET mode to signing with the B key.
178 bool SignWithBKey = false;
179
180 /// HasELFSignedGOT is true if the target binary format is ELF and the IR
181 /// module containing the corresponding function has "ptrauth-elf-got" flag
182 /// set to 1.
183 bool HasELFSignedGOT = false;
184
185 /// SigningInstrOffset captures the offset of the PAC-RET signing instruction
186 /// within the prologue, so it can be re-used for authentication in the
187 /// epilogue when using PC as a second salt (FEAT_PAuth_LR)
188 MCSymbol *SignInstrLabel = nullptr;
189
190 /// BranchTargetEnforcement enables placing BTI instructions at potential
191 /// indirect branch destinations.
192 bool BranchTargetEnforcement = false;
193
194 /// Indicates that SP signing should be diversified with PC as-per PAuthLR.
195 /// This is set by -mbranch-protection and will emit NOP instructions unless
196 /// the subtarget feature +pauthlr is also used (in which case non-NOP
197 /// instructions are emitted).
198 bool BranchProtectionPAuthLR = false;
199
200 /// Whether this function has an extended frame record [Ctx, FP, LR]. If so,
201 /// bit 60 of the in-memory FP will be 1 to enable other tools to detect the
202 /// extended record.
203 bool HasSwiftAsyncContext = false;
204
205 /// The stack slot where the Swift asynchronous context is stored.
206 int SwiftAsyncContextFrameIdx = std::numeric_limits<int>::max();
207
208 bool IsMTETagged = false;
209
210 /// The function has Scalable Vector or Scalable Predicate register argument
211 /// or return type
212 bool IsSVECC = false;
213
214 /// The frame-index for the TPIDR2 object used for lazy saves.
215 TPIDR2Object TPIDR2;
216
217 /// Whether this function changes streaming mode within the function.
218 bool HasStreamingModeChanges = false;
219
220 /// True if the function need unwind information.
221 mutable std::optional<bool> NeedsDwarfUnwindInfo;
222
223 /// True if the function need asynchronous unwind information.
224 mutable std::optional<bool> NeedsAsyncDwarfUnwindInfo;
225
226 int64_t StackProbeSize = 0;
227
228 // Holds a register containing pstate.sm. This is set
229 // on function entry to record the initial pstate of a function.
230 Register PStateSMReg = MCRegister::NoRegister;
231
232 // Has the PNReg used to build PTRUE instruction.
233 // The PTRUE is used for the LD/ST of ZReg pairs in save and restore.
234 unsigned PredicateRegForFillSpill = 0;
235
236 // The stack slots where VG values are stored to.
237 int64_t VGIdx = std::numeric_limits<int>::max();
238 int64_t StreamingVGIdx = std::numeric_limits<int>::max();
239
240public:
242
246 const override;
247
249 PredicateRegForFillSpill = Reg;
250 }
252 return PredicateRegForFillSpill;
253 }
254
255 Register getPStateSMReg() const { return PStateSMReg; };
256 void setPStateSMReg(Register Reg) { PStateSMReg = Reg; };
257
258 int64_t getVGIdx() const { return VGIdx; };
259 void setVGIdx(unsigned Idx) { VGIdx = Idx; };
260
261 int64_t getStreamingVGIdx() const { return StreamingVGIdx; };
262 void setStreamingVGIdx(unsigned FrameIdx) { StreamingVGIdx = FrameIdx; };
263
264 bool isSVECC() const { return IsSVECC; };
265 void setIsSVECC(bool s) { IsSVECC = s; };
266
267 TPIDR2Object &getTPIDR2Obj() { return TPIDR2; }
268
270
271 unsigned getBytesInStackArgArea() const { return BytesInStackArgArea; }
272 void setBytesInStackArgArea(unsigned bytes) { BytesInStackArgArea = bytes; }
273
274 unsigned getArgumentStackToRestore() const { return ArgumentStackToRestore; }
275 void setArgumentStackToRestore(unsigned bytes) {
276 ArgumentStackToRestore = bytes;
277 }
278
279 unsigned getTailCallReservedStack() const { return TailCallReservedStack; }
280 void setTailCallReservedStack(unsigned bytes) {
281 TailCallReservedStack = bytes;
282 }
283
284 bool hasCalculatedStackSizeSVE() const { return HasCalculatedStackSizeSVE; }
285
287 HasCalculatedStackSizeSVE = true;
288 StackSizeSVE = S;
289 }
290
291 uint64_t getStackSizeSVE() const { return StackSizeSVE; }
292
293 bool hasStackFrame() const { return HasStackFrame; }
294 void setHasStackFrame(bool s) { HasStackFrame = s; }
295
296 bool isStackRealigned() const { return StackRealigned; }
297 void setStackRealigned(bool s) { StackRealigned = s; }
298
300 return CalleeSaveStackHasFreeSpace;
301 }
303 CalleeSaveStackHasFreeSpace = s;
304 }
305 bool isSplitCSR() const { return IsSplitCSR; }
306 void setIsSplitCSR(bool s) { IsSplitCSR = s; }
307
308 void setLocalStackSize(uint64_t Size) { LocalStackSize = Size; }
309 uint64_t getLocalStackSize() const { return LocalStackSize; }
310
311 void setOutliningStyle(const std::string &Style) { OutliningStyle = Style; }
312 std::optional<std::string> getOutliningStyle() const {
313 return OutliningStyle;
314 }
315
317 CalleeSavedStackSize = Size;
318 HasCalleeSavedStackSize = true;
319 }
320
321 // When CalleeSavedStackSize has not been set (for example when
322 // some MachineIR pass is run in isolation), then recalculate
323 // the CalleeSavedStackSize directly from the CalleeSavedInfo.
324 // Note: This information can only be recalculated after PEI
325 // has assigned offsets to the callee save objects.
326 unsigned getCalleeSavedStackSize(const MachineFrameInfo &MFI) const {
327 bool ValidateCalleeSavedStackSize = false;
328
329#ifndef NDEBUG
330 // Make sure the calculated size derived from the CalleeSavedInfo
331 // equals the cached size that was calculated elsewhere (e.g. in
332 // determineCalleeSaves).
333 ValidateCalleeSavedStackSize = HasCalleeSavedStackSize;
334#endif
335
336 if (!HasCalleeSavedStackSize || ValidateCalleeSavedStackSize) {
337 assert(MFI.isCalleeSavedInfoValid() && "CalleeSavedInfo not calculated");
338 if (MFI.getCalleeSavedInfo().empty())
339 return 0;
340
341 int64_t MinOffset = std::numeric_limits<int64_t>::max();
342 int64_t MaxOffset = std::numeric_limits<int64_t>::min();
343 for (const auto &Info : MFI.getCalleeSavedInfo()) {
344 int FrameIdx = Info.getFrameIdx();
345 if (MFI.getStackID(FrameIdx) != TargetStackID::Default)
346 continue;
347 int64_t Offset = MFI.getObjectOffset(FrameIdx);
348 int64_t ObjSize = MFI.getObjectSize(FrameIdx);
349 MinOffset = std::min<int64_t>(Offset, MinOffset);
350 MaxOffset = std::max<int64_t>(Offset + ObjSize, MaxOffset);
351 }
352
353 if (SwiftAsyncContextFrameIdx != std::numeric_limits<int>::max()) {
355 int64_t ObjSize = MFI.getObjectSize(getSwiftAsyncContextFrameIdx());
356 MinOffset = std::min<int64_t>(Offset, MinOffset);
357 MaxOffset = std::max<int64_t>(Offset + ObjSize, MaxOffset);
358 }
359
360 if (StackHazardCSRSlotIndex != std::numeric_limits<int>::max()) {
361 int64_t Offset = MFI.getObjectOffset(StackHazardCSRSlotIndex);
362 int64_t ObjSize = MFI.getObjectSize(StackHazardCSRSlotIndex);
363 MinOffset = std::min<int64_t>(Offset, MinOffset);
364 MaxOffset = std::max<int64_t>(Offset + ObjSize, MaxOffset);
365 }
366
367 unsigned Size = alignTo(MaxOffset - MinOffset, 16);
368 assert((!HasCalleeSavedStackSize || getCalleeSavedStackSize() == Size) &&
369 "Invalid size calculated for callee saves");
370 return Size;
371 }
372
374 }
375
376 unsigned getCalleeSavedStackSize() const {
377 assert(HasCalleeSavedStackSize &&
378 "CalleeSavedStackSize has not been calculated");
379 return CalleeSavedStackSize;
380 }
381
382 // Saves the CalleeSavedStackSize for SVE vectors in 'scalable bytes'
384 SVECalleeSavedStackSize = Size;
385 }
386 unsigned getSVECalleeSavedStackSize() const {
387 return SVECalleeSavedStackSize;
388 }
389
390 void setMinMaxSVECSFrameIndex(int Min, int Max) {
391 MinSVECSFrameIndex = Min;
392 MaxSVECSFrameIndex = Max;
393 }
394
395 int getMinSVECSFrameIndex() const { return MinSVECSFrameIndex; }
396 int getMaxSVECSFrameIndex() const { return MaxSVECSFrameIndex; }
397
398 void incNumLocalDynamicTLSAccesses() { ++NumLocalDynamicTLSAccesses; }
400 return NumLocalDynamicTLSAccesses;
401 }
402
403 std::optional<bool> hasRedZone() const { return HasRedZone; }
404 void setHasRedZone(bool s) { HasRedZone = s; }
405
406 int getVarArgsStackIndex() const { return VarArgsStackIndex; }
407 void setVarArgsStackIndex(int Index) { VarArgsStackIndex = Index; }
408
409 unsigned getVarArgsStackOffset() const { return VarArgsStackOffset; }
410 void setVarArgsStackOffset(unsigned Offset) { VarArgsStackOffset = Offset; }
411
412 int getVarArgsGPRIndex() const { return VarArgsGPRIndex; }
413 void setVarArgsGPRIndex(int Index) { VarArgsGPRIndex = Index; }
414
415 unsigned getVarArgsGPRSize() const { return VarArgsGPRSize; }
416 void setVarArgsGPRSize(unsigned Size) { VarArgsGPRSize = Size; }
417
418 int getVarArgsFPRIndex() const { return VarArgsFPRIndex; }
419 void setVarArgsFPRIndex(int Index) { VarArgsFPRIndex = Index; }
420
421 unsigned getVarArgsFPRSize() const { return VarArgsFPRSize; }
422 void setVarArgsFPRSize(unsigned Size) { VarArgsFPRSize = Size; }
423
425 return StackHazardSlotIndex != std::numeric_limits<int>::max();
426 }
427 int getStackHazardSlotIndex() const { return StackHazardSlotIndex; }
429 assert(StackHazardSlotIndex == std::numeric_limits<int>::max());
430 StackHazardSlotIndex = Index;
431 }
432 int getStackHazardCSRSlotIndex() const { return StackHazardCSRSlotIndex; }
434 assert(StackHazardCSRSlotIndex == std::numeric_limits<int>::max());
435 StackHazardCSRSlotIndex = Index;
436 }
437
438 unsigned getSRetReturnReg() const { return SRetReturnReg; }
439 void setSRetReturnReg(unsigned Reg) { SRetReturnReg = Reg; }
440
441 unsigned getJumpTableEntrySize(int Idx) const {
442 return JumpTableEntryInfo[Idx].first;
443 }
445 return JumpTableEntryInfo[Idx].second;
446 }
447 void setJumpTableEntryInfo(int Idx, unsigned Size, MCSymbol *PCRelSym) {
448 if ((unsigned)Idx >= JumpTableEntryInfo.size())
449 JumpTableEntryInfo.resize(Idx+1);
450 JumpTableEntryInfo[Idx] = std::make_pair(Size, PCRelSym);
451 }
452
454
455 const SetOfInstructions &getLOHRelated() const { return LOHRelated; }
456
457 // Shortcuts for LOH related types.
459 MCLOHType Kind;
460
461 /// Arguments of this directive. Order matters.
463
464 public:
466
468 : Kind(Kind), Args(Args.begin(), Args.end()) {
469 assert(isValidMCLOHType(Kind) && "Invalid LOH directive type!");
470 }
471
472 MCLOHType getKind() const { return Kind; }
473 LOHArgs getArgs() const { return Args; }
474 };
475
478
479 const MILOHContainer &getLOHContainer() const { return LOHContainerSet; }
480
481 /// Add a LOH directive of this @p Kind and this @p Args.
483 LOHContainerSet.push_back(MILOHDirective(Kind, Args));
484 LOHRelated.insert(Args.begin(), Args.end());
485 }
486
488 return ForwardedMustTailRegParms;
489 }
490
491 std::optional<int> getTaggedBasePointerIndex() const {
492 return TaggedBasePointerIndex;
493 }
494 void setTaggedBasePointerIndex(int Index) { TaggedBasePointerIndex = Index; }
495
496 unsigned getTaggedBasePointerOffset() const {
497 return TaggedBasePointerOffset;
498 }
500 TaggedBasePointerOffset = Offset;
501 }
502
504 return CalleeSaveBaseToFrameRecordOffset;
505 }
507 CalleeSaveBaseToFrameRecordOffset = Offset;
508 }
509
510 bool shouldSignReturnAddress(const MachineFunction &MF) const;
511 bool shouldSignReturnAddress(bool SpillsLR) const;
512
514
515 bool shouldSignWithBKey() const { return SignWithBKey; }
516
517 bool hasELFSignedGOT() const { return HasELFSignedGOT; }
518
519 MCSymbol *getSigningInstrLabel() const { return SignInstrLabel; }
520 void setSigningInstrLabel(MCSymbol *Label) { SignInstrLabel = Label; }
521
522 bool isMTETagged() const { return IsMTETagged; }
523
524 bool branchTargetEnforcement() const { return BranchTargetEnforcement; }
525
526 bool branchProtectionPAuthLR() const { return BranchProtectionPAuthLR; }
527
528 void setHasSwiftAsyncContext(bool HasContext) {
529 HasSwiftAsyncContext = HasContext;
530 }
531 bool hasSwiftAsyncContext() const { return HasSwiftAsyncContext; }
532
534 SwiftAsyncContextFrameIdx = FI;
535 }
536 int getSwiftAsyncContextFrameIdx() const { return SwiftAsyncContextFrameIdx; }
537
538 bool needsDwarfUnwindInfo(const MachineFunction &MF) const;
539 bool needsAsyncDwarfUnwindInfo(const MachineFunction &MF) const;
540
541 bool hasStreamingModeChanges() const { return HasStreamingModeChanges; }
542 void setHasStreamingModeChanges(bool HasChanges) {
543 HasStreamingModeChanges = HasChanges;
544 }
545
546 bool hasStackProbing() const { return StackProbeSize != 0; }
547
548 int64_t getStackProbeSize() const { return StackProbeSize; }
549
550private:
551 // Hold the lists of LOHs.
552 MILOHContainer LOHContainerSet;
553 SetOfInstructions LOHRelated;
554
555 SmallVector<std::pair<unsigned, MCSymbol *>, 2> JumpTableEntryInfo;
556};
557
558namespace yaml {
560 std::optional<bool> HasRedZone;
561
564
565 void mappingImpl(yaml::IO &YamlIO) override;
567};
568
570 static void mapping(IO &YamlIO, AArch64FunctionInfo &MFI) {
571 YamlIO.mapOptional("hasRedZone", MFI.HasRedZone);
572 }
573};
574
575} // end namespace yaml
576
577} // end namespace llvm
578
579#endif // LLVM_LIB_TARGET_AARCH64_AARCH64MACHINEFUNCTIONINFO_H
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
uint32_t Index
uint64_t Size
IO & YamlIO
Definition: ELFYAML.cpp:1312
#define F(x, y, z)
Definition: MD5.cpp:55
unsigned Reg
Basic Register Allocator
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
AArch64FunctionInfo - This class is derived from MachineFunctionInfo and contains private AArch64-spe...
void addLOHDirective(MCLOHType Kind, MILOHArgs Args)
Add a LOH directive of this Kind and this Args.
bool needsShadowCallStackPrologueEpilogue(MachineFunction &MF) const
unsigned getCalleeSavedStackSize(const MachineFrameInfo &MFI) const
void setCalleeSaveBaseToFrameRecordOffset(int Offset)
void setVarArgsStackOffset(unsigned Offset)
void setTailCallReservedStack(unsigned bytes)
SmallVector< MILOHDirective, 32 > MILOHContainer
SmallVectorImpl< ForwardedRegister > & getForwardedMustTailRegParms()
bool shouldSignReturnAddress(const MachineFunction &MF) const
void setPredicateRegForFillSpill(unsigned Reg)
void setOutliningStyle(const std::string &Style)
const SetOfInstructions & getLOHRelated() const
void setBytesInStackArgArea(unsigned bytes)
void setStreamingVGIdx(unsigned FrameIdx)
void setCalleeSavedStackSize(unsigned Size)
void setSigningInstrLabel(MCSymbol *Label)
void setHasSwiftAsyncContext(bool HasContext)
std::optional< int > getTaggedBasePointerIndex() const
unsigned getJumpTableEntrySize(int Idx) const
bool needsDwarfUnwindInfo(const MachineFunction &MF) const
MCSymbol * getJumpTableEntryPCRelSymbol(int Idx) const
SmallPtrSet< const MachineInstr *, 16 > SetOfInstructions
void setTaggedBasePointerOffset(unsigned Offset)
std::optional< bool > hasRedZone() const
void setSVECalleeSavedStackSize(unsigned Size)
std::optional< std::string > getOutliningStyle() const
void initializeBaseYamlFields(const yaml::AArch64FunctionInfo &YamlMFI)
const MILOHContainer & getLOHContainer() const
void setJumpTableEntryInfo(int Idx, unsigned Size, MCSymbol *PCRelSym)
bool needsAsyncDwarfUnwindInfo(const MachineFunction &MF) const
MachineFunctionInfo * clone(BumpPtrAllocator &Allocator, MachineFunction &DestMF, const DenseMap< MachineBasicBlock *, MachineBasicBlock * > &Src2DstMBB) const override
Make a functionally equivalent copy of this MachineFunctionInfo in MF.
void setArgumentStackToRestore(unsigned bytes)
void setMinMaxSVECSFrameIndex(int Min, int Max)
void setHasStreamingModeChanges(bool HasChanges)
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
Allocate memory in an ever growing pool, as if by bump-pointer.
Definition: Allocator.h:66
static constexpr unsigned NoRegister
Definition: MCRegister.h:52
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition: MCSymbol.h:41
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
bool isCalleeSavedInfoValid() const
Has the callee saved info been calculated yet?
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
const std::vector< CalleeSavedInfo > & getCalleeSavedInfo() const
Returns a reference to call saved info vector for the current function.
uint8_t getStackID(int ObjectIdx) const
int64_t getObjectOffset(int ObjectIdx) const
Return the assigned stack offset of the specified object from the incoming stack pointer.
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:384
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:573
void push_back(const T &Elt)
Definition: SmallVector.h:413
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1196
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:480
static bool isValidMCLOHType(unsigned Kind)
MCLOHType
Linker Optimization Hint Type.
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition: Alignment.h:155
MachineFunctionInfo - This class can be derived from and used by targets to hold private target-speci...
void mappingImpl(yaml::IO &YamlIO) override
Targets should override this in a way that mirrors the implementation of llvm::MachineFunctionInfo.
static void mapping(IO &YamlIO, AArch64FunctionInfo &MFI)