LLVM 22.0.0git
MachineSMEABIPass.cpp
Go to the documentation of this file.
1//===- MachineSMEABIPass.cpp ----------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This pass implements the SME ABI requirements for ZA state. This includes
10// implementing the lazy (and agnostic) ZA state save schemes around calls.
11//
12//===----------------------------------------------------------------------===//
13//
14// This pass works by collecting instructions that require ZA to be in a
15// specific state (e.g., "ACTIVE" or "SAVED") and inserting the necessary state
16// transitions to ensure ZA is in the required state before instructions. State
17// transitions represent actions such as setting up or restoring a lazy save.
18// Certain points within a function may also have predefined states independent
19// of any instructions, for example, a "shared_za" function is always entered
20// and exited in the "ACTIVE" state.
21//
22// To handle ZA state across control flow, we make use of edge bundling. This
23// assigns each block an "incoming" and "outgoing" edge bundle (representing
24// incoming and outgoing edges). Initially, these are unique to each block;
25// then, in the process of forming bundles, the outgoing bundle of a block is
26// joined with the incoming bundle of all successors. The result is that each
27// bundle can be assigned a single ZA state, which ensures the state required by
28// all a blocks' successors is the same, and that each basic block will always
29// be entered with the same ZA state. This eliminates the need for splitting
30// edges to insert state transitions or "phi" nodes for ZA states.
31//
32// See below for a simple example of edge bundling.
33//
34// The following shows a conditionally executed basic block (BB1):
35//
36// if (cond)
37// BB1
38// BB2
39//
40// Initial Bundles Joined Bundles
41//
42// ┌──0──┐ ┌──0──┐
43// │ BB0 │ │ BB0 │
44// └──1──┘ └──1──┘
45// ├───────┐ ├───────┐
46// ▼ │ ▼ │
47// ┌──2──┐ │ ─────► ┌──1──┐ │
48// │ BB1 │ ▼ │ BB1 │ ▼
49// └──3──┘ ┌──4──┐ └──1──┘ ┌──1──┐
50// └───►4 BB2 │ └───►1 BB2 │
51// └──5──┘ └──2──┘
52//
53// On the left are the initial per-block bundles, and on the right are the
54// joined bundles (which are the result of the EdgeBundles analysis).
55
56#include "AArch64InstrInfo.h"
58#include "AArch64Subtarget.h"
68
69using namespace llvm;
70
71#define DEBUG_TYPE "aarch64-machine-sme-abi"
72
73namespace {
74
75enum ZAState {
76 // Any/unknown state (not valid)
77 ANY = 0,
78
79 // ZA is in use and active (i.e. within the accumulator)
80 ACTIVE,
81
82 // A ZA save has been set up or committed (i.e. ZA is dormant or off)
83 LOCAL_SAVED,
84
85 // ZA is off or a lazy save has been set up by the caller
86 CALLER_DORMANT,
87
88 // ZA is off
89 OFF,
90
91 // The number of ZA states (not a valid state)
92 NUM_ZA_STATE
93};
94
95/// A bitmask enum to record live physical registers that the "emit*" routines
96/// may need to preserve. Note: This only tracks registers we may clobber.
97enum LiveRegs : uint8_t {
98 None = 0,
99 NZCV = 1 << 0,
100 W0 = 1 << 1,
101 W0_HI = 1 << 2,
102 X0 = W0 | W0_HI,
103 LLVM_MARK_AS_BITMASK_ENUM(/* LargestValue = */ W0_HI)
104};
105
106/// Holds the virtual registers live physical registers have been saved to.
107struct PhysRegSave {
108 LiveRegs PhysLiveRegs;
109 Register StatusFlags = AArch64::NoRegister;
110 Register X0Save = AArch64::NoRegister;
111};
112
113/// Contains the needed ZA state (and live registers) at an instruction. That is
114/// the state ZA must be in _before_ "InsertPt".
115struct InstInfo {
116 ZAState NeededState{ZAState::ANY};
118 LiveRegs PhysLiveRegs = LiveRegs::None;
119};
120
121/// Contains the needed ZA state for each instruction in a block. Instructions
122/// that do not require a ZA state are not recorded.
123struct BlockInfo {
124 ZAState FixedEntryState{ZAState::ANY};
126 LiveRegs PhysLiveRegsAtEntry = LiveRegs::None;
127 LiveRegs PhysLiveRegsAtExit = LiveRegs::None;
128};
129
130/// Contains the needed ZA state information for all blocks within a function.
131struct FunctionInfo {
133 std::optional<MachineBasicBlock::iterator> AfterSMEProloguePt;
134 LiveRegs PhysLiveRegsAfterSMEPrologue = LiveRegs::None;
135};
136
137/// State/helpers that is only needed when emitting code to handle
138/// saving/restoring ZA.
139class EmitContext {
140public:
141 EmitContext() = default;
142
143 /// Get or create a TPIDR2 block in \p MF.
144 int getTPIDR2Block(MachineFunction &MF) {
145 if (TPIDR2BlockFI)
146 return *TPIDR2BlockFI;
147 MachineFrameInfo &MFI = MF.getFrameInfo();
148 TPIDR2BlockFI = MFI.CreateStackObject(16, Align(16), false);
149 return *TPIDR2BlockFI;
150 }
151
152 /// Get or create agnostic ZA buffer pointer in \p MF.
153 Register getAgnosticZABufferPtr(MachineFunction &MF) {
154 if (AgnosticZABufferPtr != AArch64::NoRegister)
155 return AgnosticZABufferPtr;
156 Register BufferPtr =
157 MF.getInfo<AArch64FunctionInfo>()->getEarlyAllocSMESaveBuffer();
158 AgnosticZABufferPtr =
159 BufferPtr != AArch64::NoRegister
160 ? BufferPtr
161 : MF.getRegInfo().createVirtualRegister(&AArch64::GPR64RegClass);
162 return AgnosticZABufferPtr;
163 }
164
165 /// Returns true if the function must allocate a ZA save buffer on entry. This
166 /// will be the case if, at any point in the function, a ZA save was emitted.
167 bool needsSaveBuffer() const {
168 assert(!(TPIDR2BlockFI && AgnosticZABufferPtr) &&
169 "Cannot have both a TPIDR2 block and agnostic ZA buffer");
170 return TPIDR2BlockFI || AgnosticZABufferPtr != AArch64::NoRegister;
171 }
172
173private:
174 std::optional<int> TPIDR2BlockFI;
175 Register AgnosticZABufferPtr = AArch64::NoRegister;
176};
177
178static bool isLegalEdgeBundleZAState(ZAState State) {
179 switch (State) {
180 case ZAState::ACTIVE:
181 case ZAState::LOCAL_SAVED:
182 return true;
183 default:
184 return false;
185 }
186}
187
188StringRef getZAStateString(ZAState State) {
189#define MAKE_CASE(V) \
190 case V: \
191 return #V;
192 switch (State) {
193 MAKE_CASE(ZAState::ANY)
194 MAKE_CASE(ZAState::ACTIVE)
195 MAKE_CASE(ZAState::LOCAL_SAVED)
196 MAKE_CASE(ZAState::CALLER_DORMANT)
197 MAKE_CASE(ZAState::OFF)
198 default:
199 llvm_unreachable("Unexpected ZAState");
200 }
201#undef MAKE_CASE
202}
203
204static bool isZAorZTRegOp(const TargetRegisterInfo &TRI,
205 const MachineOperand &MO) {
206 if (!MO.isReg() || !MO.getReg().isPhysical())
207 return false;
208 return any_of(TRI.subregs_inclusive(MO.getReg()), [](const MCPhysReg &SR) {
209 return AArch64::MPR128RegClass.contains(SR) ||
210 AArch64::ZTRRegClass.contains(SR);
211 });
212}
213
214/// Returns the required ZA state needed before \p MI and an iterator pointing
215/// to where any code required to change the ZA state should be inserted.
216static std::pair<ZAState, MachineBasicBlock::iterator>
217getZAStateBeforeInst(const TargetRegisterInfo &TRI, MachineInstr &MI,
218 bool ZAOffAtReturn) {
220
221 if (MI.getOpcode() == AArch64::InOutZAUsePseudo)
222 return {ZAState::ACTIVE, std::prev(InsertPt)};
223
224 if (MI.getOpcode() == AArch64::RequiresZASavePseudo)
225 return {ZAState::LOCAL_SAVED, std::prev(InsertPt)};
226
227 if (MI.isReturn())
228 return {ZAOffAtReturn ? ZAState::OFF : ZAState::ACTIVE, InsertPt};
229
230 for (auto &MO : MI.operands()) {
231 if (isZAorZTRegOp(TRI, MO))
232 return {ZAState::ACTIVE, InsertPt};
233 }
234
235 return {ZAState::ANY, InsertPt};
236}
237
238struct MachineSMEABI : public MachineFunctionPass {
239 inline static char ID = 0;
240
241 MachineSMEABI() : MachineFunctionPass(ID) {}
242
243 bool runOnMachineFunction(MachineFunction &MF) override;
244
245 StringRef getPassName() const override { return "Machine SME ABI pass"; }
246
247 void getAnalysisUsage(AnalysisUsage &AU) const override {
248 AU.setPreservesCFG();
253 }
254
255 /// Collects the needed ZA state (and live registers) before each instruction
256 /// within the machine function.
257 FunctionInfo collectNeededZAStates(SMEAttrs SMEFnAttrs);
258
259 /// Assigns each edge bundle a ZA state based on the needed states of blocks
260 /// that have incoming or outgoing edges in that bundle.
261 SmallVector<ZAState> assignBundleZAStates(const EdgeBundles &Bundles,
262 const FunctionInfo &FnInfo);
263
264 /// Inserts code to handle changes between ZA states within the function.
265 /// E.g., ACTIVE -> LOCAL_SAVED will insert code required to save ZA.
266 void insertStateChanges(EmitContext &, const FunctionInfo &FnInfo,
267 const EdgeBundles &Bundles,
268 ArrayRef<ZAState> BundleStates);
269
270 // Emission routines for private and shared ZA functions (using lazy saves).
271 void emitNewZAPrologue(MachineBasicBlock &MBB,
273 void emitRestoreLazySave(EmitContext &, MachineBasicBlock &MBB,
275 LiveRegs PhysLiveRegs);
276 void emitSetupLazySave(EmitContext &, MachineBasicBlock &MBB,
278 void emitAllocateLazySaveBuffer(EmitContext &, MachineBasicBlock &MBB,
281 bool ClearTPIDR2);
282
283 // Emission routines for agnostic ZA functions.
284 void emitSetupFullZASave(MachineBasicBlock &MBB,
286 LiveRegs PhysLiveRegs);
287 // Emit a "full" ZA save or restore. It is "full" in the sense that this
288 // function will emit a call to __arm_sme_save or __arm_sme_restore, which
289 // handles saving and restoring both ZA and ZT0.
290 void emitFullZASaveRestore(EmitContext &, MachineBasicBlock &MBB,
292 LiveRegs PhysLiveRegs, bool IsSave);
293 void emitAllocateFullZASaveBuffer(EmitContext &, MachineBasicBlock &MBB,
295 LiveRegs PhysLiveRegs);
296
297 /// Attempts to find an insertion point before \p Inst where the status flags
298 /// are not live. If \p Inst is `Block.Insts.end()` a point before the end of
299 /// the block is found.
300 std::pair<MachineBasicBlock::iterator, LiveRegs>
301 findStateChangeInsertionPoint(MachineBasicBlock &MBB, const BlockInfo &Block,
303 void emitStateChange(EmitContext &, MachineBasicBlock &MBB,
304 MachineBasicBlock::iterator MBBI, ZAState From,
305 ZAState To, LiveRegs PhysLiveRegs);
306
307 // Helpers for switching between lazy/full ZA save/restore routines.
308 void emitZASave(EmitContext &Context, MachineBasicBlock &MBB,
310 if (AFI->getSMEFnAttrs().hasAgnosticZAInterface())
311 return emitFullZASaveRestore(Context, MBB, MBBI, PhysLiveRegs,
312 /*IsSave=*/true);
313 return emitSetupLazySave(Context, MBB, MBBI);
314 }
315 void emitZARestore(EmitContext &Context, MachineBasicBlock &MBB,
317 if (AFI->getSMEFnAttrs().hasAgnosticZAInterface())
318 return emitFullZASaveRestore(Context, MBB, MBBI, PhysLiveRegs,
319 /*IsSave=*/false);
320 return emitRestoreLazySave(Context, MBB, MBBI, PhysLiveRegs);
321 }
322 void emitAllocateZASaveBuffer(EmitContext &Context, MachineBasicBlock &MBB,
324 LiveRegs PhysLiveRegs) {
325 if (AFI->getSMEFnAttrs().hasAgnosticZAInterface())
326 return emitAllocateFullZASaveBuffer(Context, MBB, MBBI, PhysLiveRegs);
327 return emitAllocateLazySaveBuffer(Context, MBB, MBBI);
328 }
329
330 /// Save live physical registers to virtual registers.
331 PhysRegSave createPhysRegSave(LiveRegs PhysLiveRegs, MachineBasicBlock &MBB,
333 /// Restore physical registers from a save of their previous values.
334 void restorePhyRegSave(const PhysRegSave &RegSave, MachineBasicBlock &MBB,
336
337private:
338 MachineFunction *MF = nullptr;
339 const AArch64Subtarget *Subtarget = nullptr;
340 const AArch64RegisterInfo *TRI = nullptr;
341 const AArch64FunctionInfo *AFI = nullptr;
342 const TargetInstrInfo *TII = nullptr;
343 MachineRegisterInfo *MRI = nullptr;
344};
345
346static LiveRegs getPhysLiveRegs(LiveRegUnits const &LiveUnits) {
347 LiveRegs PhysLiveRegs = LiveRegs::None;
348 if (!LiveUnits.available(AArch64::NZCV))
349 PhysLiveRegs |= LiveRegs::NZCV;
350 // We have to track W0 and X0 separately as otherwise things can get
351 // confused if we attempt to preserve X0 but only W0 was defined.
352 if (!LiveUnits.available(AArch64::W0))
353 PhysLiveRegs |= LiveRegs::W0;
354 if (!LiveUnits.available(AArch64::W0_HI))
355 PhysLiveRegs |= LiveRegs::W0_HI;
356 return PhysLiveRegs;
357}
358
359static void setPhysLiveRegs(LiveRegUnits &LiveUnits, LiveRegs PhysLiveRegs) {
360 if (PhysLiveRegs & LiveRegs::NZCV)
361 LiveUnits.addReg(AArch64::NZCV);
362 if (PhysLiveRegs & LiveRegs::W0)
363 LiveUnits.addReg(AArch64::W0);
364 if (PhysLiveRegs & LiveRegs::W0_HI)
365 LiveUnits.addReg(AArch64::W0_HI);
366}
367
368FunctionInfo MachineSMEABI::collectNeededZAStates(SMEAttrs SMEFnAttrs) {
369 assert((SMEFnAttrs.hasAgnosticZAInterface() || SMEFnAttrs.hasZT0State() ||
370 SMEFnAttrs.hasZAState()) &&
371 "Expected function to have ZA/ZT0 state!");
372
374 LiveRegs PhysLiveRegsAfterSMEPrologue = LiveRegs::None;
375 std::optional<MachineBasicBlock::iterator> AfterSMEProloguePt;
376
377 for (MachineBasicBlock &MBB : *MF) {
378 BlockInfo &Block = Blocks[MBB.getNumber()];
379
380 if (MBB.isEntryBlock()) {
381 // Entry block:
382 Block.FixedEntryState = SMEFnAttrs.hasPrivateZAInterface()
383 ? ZAState::CALLER_DORMANT
384 : ZAState::ACTIVE;
385 } else if (MBB.isEHPad()) {
386 // EH entry block:
387 Block.FixedEntryState = ZAState::LOCAL_SAVED;
388 }
389
390 LiveRegUnits LiveUnits(*TRI);
391 LiveUnits.addLiveOuts(MBB);
392
393 Block.PhysLiveRegsAtExit = getPhysLiveRegs(LiveUnits);
394 auto FirstTerminatorInsertPt = MBB.getFirstTerminator();
395 auto FirstNonPhiInsertPt = MBB.getFirstNonPHI();
396 for (MachineInstr &MI : reverse(MBB)) {
398 LiveUnits.stepBackward(MI);
399 LiveRegs PhysLiveRegs = getPhysLiveRegs(LiveUnits);
400 // The SMEStateAllocPseudo marker is added to a function if the save
401 // buffer was allocated in SelectionDAG. It marks the end of the
402 // allocation -- which is a safe point for this pass to insert any TPIDR2
403 // block setup.
404 if (MI.getOpcode() == AArch64::SMEStateAllocPseudo) {
405 AfterSMEProloguePt = MBBI;
406 PhysLiveRegsAfterSMEPrologue = PhysLiveRegs;
407 }
408 // Note: We treat Agnostic ZA as inout_za with an alternate save/restore.
409 auto [NeededState, InsertPt] = getZAStateBeforeInst(
410 *TRI, MI, /*ZAOffAtReturn=*/SMEFnAttrs.hasPrivateZAInterface());
411 assert((InsertPt == MBBI ||
412 InsertPt->getOpcode() == AArch64::ADJCALLSTACKDOWN) &&
413 "Unexpected state change insertion point!");
414 // TODO: Do something to avoid state changes where NZCV is live.
415 if (MBBI == FirstTerminatorInsertPt)
416 Block.PhysLiveRegsAtExit = PhysLiveRegs;
417 if (MBBI == FirstNonPhiInsertPt)
418 Block.PhysLiveRegsAtEntry = PhysLiveRegs;
419 if (NeededState != ZAState::ANY)
420 Block.Insts.push_back({NeededState, InsertPt, PhysLiveRegs});
421 }
422
423 // Reverse vector (as we had to iterate backwards for liveness).
424 std::reverse(Block.Insts.begin(), Block.Insts.end());
425 }
426
427 return FunctionInfo{std::move(Blocks), AfterSMEProloguePt,
428 PhysLiveRegsAfterSMEPrologue};
429}
430
431/// Assigns each edge bundle a ZA state based on the needed states of blocks
432/// that have incoming or outgoing edges in that bundle.
434MachineSMEABI::assignBundleZAStates(const EdgeBundles &Bundles,
435 const FunctionInfo &FnInfo) {
436 SmallVector<ZAState> BundleStates(Bundles.getNumBundles());
437 for (unsigned I = 0, E = Bundles.getNumBundles(); I != E; ++I) {
438 LLVM_DEBUG(dbgs() << "Assigning ZA state for edge bundle: " << I << '\n');
439
440 // Attempt to assign a ZA state for this bundle that minimizes state
441 // transitions. Edges within loops are given a higher weight as we assume
442 // they will be executed more than once.
443 // TODO: We should propagate desired incoming/outgoing states through blocks
444 // that have the "ANY" state first to make better global decisions.
445 int EdgeStateCounts[ZAState::NUM_ZA_STATE] = {0};
446 for (unsigned BlockID : Bundles.getBlocks(I)) {
447 LLVM_DEBUG(dbgs() << "- bb." << BlockID);
448
449 const BlockInfo &Block = FnInfo.Blocks[BlockID];
450 if (Block.Insts.empty()) {
451 LLVM_DEBUG(dbgs() << " (no state preference)\n");
452 continue;
453 }
454 bool InEdge = Bundles.getBundle(BlockID, /*Out=*/false) == I;
455 bool OutEdge = Bundles.getBundle(BlockID, /*Out=*/true) == I;
456
457 ZAState DesiredIncomingState = Block.Insts.front().NeededState;
458 if (InEdge && isLegalEdgeBundleZAState(DesiredIncomingState)) {
459 EdgeStateCounts[DesiredIncomingState]++;
460 LLVM_DEBUG(dbgs() << " DesiredIncomingState: "
461 << getZAStateString(DesiredIncomingState));
462 }
463 ZAState DesiredOutgoingState = Block.Insts.back().NeededState;
464 if (OutEdge && isLegalEdgeBundleZAState(DesiredOutgoingState)) {
465 EdgeStateCounts[DesiredOutgoingState]++;
466 LLVM_DEBUG(dbgs() << " DesiredOutgoingState: "
467 << getZAStateString(DesiredOutgoingState));
468 }
469 LLVM_DEBUG(dbgs() << '\n');
470 }
471
472 ZAState BundleState =
473 ZAState(max_element(EdgeStateCounts) - EdgeStateCounts);
474
475 // Force ZA to be active in bundles that don't have a preferred state.
476 // TODO: Something better here (to avoid extra mode switches).
477 if (BundleState == ZAState::ANY)
478 BundleState = ZAState::ACTIVE;
479
480 LLVM_DEBUG({
481 dbgs() << "Chosen ZA state: " << getZAStateString(BundleState) << '\n'
482 << "Edge counts:";
483 for (auto [State, Count] : enumerate(EdgeStateCounts))
484 dbgs() << " " << getZAStateString(ZAState(State)) << ": " << Count;
485 dbgs() << "\n\n";
486 });
487
488 BundleStates[I] = BundleState;
489 }
490
491 return BundleStates;
492}
493
494std::pair<MachineBasicBlock::iterator, LiveRegs>
495MachineSMEABI::findStateChangeInsertionPoint(
496 MachineBasicBlock &MBB, const BlockInfo &Block,
498 LiveRegs PhysLiveRegs;
500 if (Inst != Block.Insts.end()) {
501 InsertPt = Inst->InsertPt;
502 PhysLiveRegs = Inst->PhysLiveRegs;
503 } else {
504 InsertPt = MBB.getFirstTerminator();
505 PhysLiveRegs = Block.PhysLiveRegsAtExit;
506 }
507
508 if (!(PhysLiveRegs & LiveRegs::NZCV))
509 return {InsertPt, PhysLiveRegs}; // Nothing to do (no live flags).
510
511 // Find the previous state change. We can not move before this point.
512 MachineBasicBlock::iterator PrevStateChangeI;
513 if (Inst == Block.Insts.begin()) {
514 PrevStateChangeI = MBB.begin();
515 } else {
516 // Note: `std::prev(Inst)` is the previous InstInfo. We only create an
517 // InstInfo object for instructions that require a specific ZA state, so the
518 // InstInfo is the site of the previous state change in the block (which can
519 // be several MIs earlier).
520 PrevStateChangeI = std::prev(Inst)->InsertPt;
521 }
522
523 // Note: LiveUnits will only accurately track X0 and NZCV.
524 LiveRegUnits LiveUnits(*TRI);
525 setPhysLiveRegs(LiveUnits, PhysLiveRegs);
526 for (MachineBasicBlock::iterator I = InsertPt; I != PrevStateChangeI; --I) {
527 // Don't move before/into a call (which may have a state change before it).
528 if (I->getOpcode() == TII->getCallFrameDestroyOpcode() || I->isCall())
529 break;
530 LiveUnits.stepBackward(*I);
531 if (LiveUnits.available(AArch64::NZCV))
532 return {I, getPhysLiveRegs(LiveUnits)};
533 }
534 return {InsertPt, PhysLiveRegs};
535}
536
537void MachineSMEABI::insertStateChanges(EmitContext &Context,
538 const FunctionInfo &FnInfo,
539 const EdgeBundles &Bundles,
540 ArrayRef<ZAState> BundleStates) {
541 for (MachineBasicBlock &MBB : *MF) {
542 const BlockInfo &Block = FnInfo.Blocks[MBB.getNumber()];
543 ZAState InState = BundleStates[Bundles.getBundle(MBB.getNumber(),
544 /*Out=*/false)];
545
546 ZAState CurrentState = Block.FixedEntryState;
547 if (CurrentState == ZAState::ANY)
548 CurrentState = InState;
549
550 for (auto &Inst : Block.Insts) {
551 if (CurrentState != Inst.NeededState) {
552 auto [InsertPt, PhysLiveRegs] =
553 findStateChangeInsertionPoint(MBB, Block, &Inst);
554 emitStateChange(Context, MBB, InsertPt, CurrentState, Inst.NeededState,
555 PhysLiveRegs);
556 CurrentState = Inst.NeededState;
557 }
558 }
559
560 if (MBB.succ_empty())
561 continue;
562
563 ZAState OutState =
564 BundleStates[Bundles.getBundle(MBB.getNumber(), /*Out=*/true)];
565 if (CurrentState != OutState) {
566 auto [InsertPt, PhysLiveRegs] =
567 findStateChangeInsertionPoint(MBB, Block, Block.Insts.end());
568 emitStateChange(Context, MBB, InsertPt, CurrentState, OutState,
569 PhysLiveRegs);
570 }
571 }
572}
573
576 if (MBBI != MBB.end())
577 return MBBI->getDebugLoc();
578 return DebugLoc();
579}
580
581void MachineSMEABI::emitSetupLazySave(EmitContext &Context,
585
586 // Get pointer to TPIDR2 block.
587 Register TPIDR2 = MRI->createVirtualRegister(&AArch64::GPR64spRegClass);
588 Register TPIDR2Ptr = MRI->createVirtualRegister(&AArch64::GPR64RegClass);
589 BuildMI(MBB, MBBI, DL, TII->get(AArch64::ADDXri), TPIDR2)
590 .addFrameIndex(Context.getTPIDR2Block(*MF))
591 .addImm(0)
592 .addImm(0);
593 BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::COPY), TPIDR2Ptr)
594 .addReg(TPIDR2);
595 // Set TPIDR2_EL0 to point to TPIDR2 block.
596 BuildMI(MBB, MBBI, DL, TII->get(AArch64::MSR))
597 .addImm(AArch64SysReg::TPIDR2_EL0)
598 .addReg(TPIDR2Ptr);
599}
600
601PhysRegSave MachineSMEABI::createPhysRegSave(LiveRegs PhysLiveRegs,
604 DebugLoc DL) {
605 PhysRegSave RegSave{PhysLiveRegs};
606 if (PhysLiveRegs & LiveRegs::NZCV) {
607 RegSave.StatusFlags = MRI->createVirtualRegister(&AArch64::GPR64RegClass);
608 BuildMI(MBB, MBBI, DL, TII->get(AArch64::MRS), RegSave.StatusFlags)
609 .addImm(AArch64SysReg::NZCV)
610 .addReg(AArch64::NZCV, RegState::Implicit);
611 }
612 // Note: Preserving X0 is "free" as this is before register allocation, so
613 // the register allocator is still able to optimize these copies.
614 if (PhysLiveRegs & LiveRegs::W0) {
615 RegSave.X0Save = MRI->createVirtualRegister(PhysLiveRegs & LiveRegs::W0_HI
616 ? &AArch64::GPR64RegClass
617 : &AArch64::GPR32RegClass);
618 BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::COPY), RegSave.X0Save)
619 .addReg(PhysLiveRegs & LiveRegs::W0_HI ? AArch64::X0 : AArch64::W0);
620 }
621 return RegSave;
622}
623
624void MachineSMEABI::restorePhyRegSave(const PhysRegSave &RegSave,
627 DebugLoc DL) {
628 if (RegSave.StatusFlags != AArch64::NoRegister)
629 BuildMI(MBB, MBBI, DL, TII->get(AArch64::MSR))
630 .addImm(AArch64SysReg::NZCV)
631 .addReg(RegSave.StatusFlags)
632 .addReg(AArch64::NZCV, RegState::ImplicitDefine);
633
634 if (RegSave.X0Save != AArch64::NoRegister)
635 BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::COPY),
636 RegSave.PhysLiveRegs & LiveRegs::W0_HI ? AArch64::X0 : AArch64::W0)
637 .addReg(RegSave.X0Save);
638}
639
640void MachineSMEABI::emitRestoreLazySave(EmitContext &Context,
643 LiveRegs PhysLiveRegs) {
644 auto *TLI = Subtarget->getTargetLowering();
646 Register TPIDR2EL0 = MRI->createVirtualRegister(&AArch64::GPR64RegClass);
647 Register TPIDR2 = AArch64::X0;
648
649 // TODO: Emit these within the restore MBB to prevent unnecessary saves.
650 PhysRegSave RegSave = createPhysRegSave(PhysLiveRegs, MBB, MBBI, DL);
651
652 // Enable ZA.
653 BuildMI(MBB, MBBI, DL, TII->get(AArch64::MSRpstatesvcrImm1))
654 .addImm(AArch64SVCR::SVCRZA)
655 .addImm(1);
656 // Get current TPIDR2_EL0.
657 BuildMI(MBB, MBBI, DL, TII->get(AArch64::MRS), TPIDR2EL0)
658 .addImm(AArch64SysReg::TPIDR2_EL0);
659 // Get pointer to TPIDR2 block.
660 BuildMI(MBB, MBBI, DL, TII->get(AArch64::ADDXri), TPIDR2)
661 .addFrameIndex(Context.getTPIDR2Block(*MF))
662 .addImm(0)
663 .addImm(0);
664 // (Conditionally) restore ZA state.
665 BuildMI(MBB, MBBI, DL, TII->get(AArch64::RestoreZAPseudo))
666 .addReg(TPIDR2EL0)
667 .addReg(TPIDR2)
668 .addExternalSymbol(TLI->getLibcallName(RTLIB::SMEABI_TPIDR2_RESTORE))
669 .addRegMask(TRI->SMEABISupportRoutinesCallPreservedMaskFromX0());
670 // Zero TPIDR2_EL0.
671 BuildMI(MBB, MBBI, DL, TII->get(AArch64::MSR))
672 .addImm(AArch64SysReg::TPIDR2_EL0)
673 .addReg(AArch64::XZR);
674
675 restorePhyRegSave(RegSave, MBB, MBBI, DL);
676}
677
678void MachineSMEABI::emitZAOff(MachineBasicBlock &MBB,
680 bool ClearTPIDR2) {
682
683 if (ClearTPIDR2)
684 BuildMI(MBB, MBBI, DL, TII->get(AArch64::MSR))
685 .addImm(AArch64SysReg::TPIDR2_EL0)
686 .addReg(AArch64::XZR);
687
688 // Disable ZA.
689 BuildMI(MBB, MBBI, DL, TII->get(AArch64::MSRpstatesvcrImm1))
690 .addImm(AArch64SVCR::SVCRZA)
691 .addImm(0);
692}
693
694void MachineSMEABI::emitAllocateLazySaveBuffer(
695 EmitContext &Context, MachineBasicBlock &MBB,
697 MachineFrameInfo &MFI = MF->getFrameInfo();
699 Register SP = MRI->createVirtualRegister(&AArch64::GPR64RegClass);
700 Register SVL = MRI->createVirtualRegister(&AArch64::GPR64RegClass);
701 Register Buffer = AFI->getEarlyAllocSMESaveBuffer();
702
703 // Calculate SVL.
704 BuildMI(MBB, MBBI, DL, TII->get(AArch64::RDSVLI_XI), SVL).addImm(1);
705
706 // 1. Allocate the lazy save buffer.
707 if (Buffer == AArch64::NoRegister) {
708 // TODO: On Windows, we allocate the lazy save buffer in SelectionDAG (so
709 // Buffer != AArch64::NoRegister). This is done to reuse the existing
710 // expansions (which can insert stack checks). This works, but it means we
711 // will always allocate the lazy save buffer (even if the function contains
712 // no lazy saves). If we want to handle Windows here, we'll need to
713 // implement something similar to LowerWindowsDYNAMIC_STACKALLOC.
714 assert(!Subtarget->isTargetWindows() &&
715 "Lazy ZA save is not yet supported on Windows");
716 Buffer = MRI->createVirtualRegister(&AArch64::GPR64RegClass);
717 // Get original stack pointer.
718 BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::COPY), SP)
719 .addReg(AArch64::SP);
720 // Allocate a lazy-save buffer object of the size given, normally SVL * SVL
721 BuildMI(MBB, MBBI, DL, TII->get(AArch64::MSUBXrrr), Buffer)
722 .addReg(SVL)
723 .addReg(SVL)
724 .addReg(SP);
725 BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::COPY), AArch64::SP)
726 .addReg(Buffer);
727 // We have just allocated a variable sized object, tell this to PEI.
728 MFI.CreateVariableSizedObject(Align(16), nullptr);
729 }
730
731 // 2. Setup the TPIDR2 block.
732 {
733 // Note: This case just needs to do `SVL << 48`. It is not implemented as we
734 // generally don't support big-endian SVE/SME.
735 if (!Subtarget->isLittleEndian())
737 "TPIDR2 block initialization is not supported on big-endian targets");
738
739 // Store buffer pointer and num_za_save_slices.
740 // Bytes 10-15 are implicitly zeroed.
741 BuildMI(MBB, MBBI, DL, TII->get(AArch64::STPXi))
742 .addReg(Buffer)
743 .addReg(SVL)
744 .addFrameIndex(Context.getTPIDR2Block(*MF))
745 .addImm(0);
746 }
747}
748
749void MachineSMEABI::emitNewZAPrologue(MachineBasicBlock &MBB,
751 auto *TLI = Subtarget->getTargetLowering();
753
754 // Get current TPIDR2_EL0.
755 Register TPIDR2EL0 = MRI->createVirtualRegister(&AArch64::GPR64RegClass);
756 BuildMI(MBB, MBBI, DL, TII->get(AArch64::MRS))
757 .addReg(TPIDR2EL0, RegState::Define)
758 .addImm(AArch64SysReg::TPIDR2_EL0);
759 // If TPIDR2_EL0 is non-zero, commit the lazy save.
760 // NOTE: Functions that only use ZT0 don't need to zero ZA.
761 bool ZeroZA = AFI->getSMEFnAttrs().hasZAState();
762 auto CommitZASave =
763 BuildMI(MBB, MBBI, DL, TII->get(AArch64::CommitZASavePseudo))
764 .addReg(TPIDR2EL0)
765 .addImm(ZeroZA ? 1 : 0)
766 .addExternalSymbol(TLI->getLibcallName(RTLIB::SMEABI_TPIDR2_SAVE))
767 .addRegMask(TRI->SMEABISupportRoutinesCallPreservedMaskFromX0());
768 if (ZeroZA)
769 CommitZASave.addDef(AArch64::ZAB0, RegState::ImplicitDefine);
770 // Enable ZA (as ZA could have previously been in the OFF state).
771 BuildMI(MBB, MBBI, DL, TII->get(AArch64::MSRpstatesvcrImm1))
772 .addImm(AArch64SVCR::SVCRZA)
773 .addImm(1);
774}
775
776void MachineSMEABI::emitFullZASaveRestore(EmitContext &Context,
779 LiveRegs PhysLiveRegs, bool IsSave) {
780 auto *TLI = Subtarget->getTargetLowering();
782 Register BufferPtr = AArch64::X0;
783
784 PhysRegSave RegSave = createPhysRegSave(PhysLiveRegs, MBB, MBBI, DL);
785
786 // Copy the buffer pointer into X0.
787 BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::COPY), BufferPtr)
788 .addReg(Context.getAgnosticZABufferPtr(*MF));
789
790 // Call __arm_sme_save/__arm_sme_restore.
791 BuildMI(MBB, MBBI, DL, TII->get(AArch64::BL))
792 .addReg(BufferPtr, RegState::Implicit)
793 .addExternalSymbol(TLI->getLibcallName(
794 IsSave ? RTLIB::SMEABI_SME_SAVE : RTLIB::SMEABI_SME_RESTORE))
795 .addRegMask(TRI->getCallPreservedMask(
796 *MF,
798
799 restorePhyRegSave(RegSave, MBB, MBBI, DL);
800}
801
802void MachineSMEABI::emitAllocateFullZASaveBuffer(
803 EmitContext &Context, MachineBasicBlock &MBB,
805 // Buffer already allocated in SelectionDAG.
807 return;
808
810 Register BufferPtr = Context.getAgnosticZABufferPtr(*MF);
811 Register BufferSize = MRI->createVirtualRegister(&AArch64::GPR64RegClass);
812
813 PhysRegSave RegSave = createPhysRegSave(PhysLiveRegs, MBB, MBBI, DL);
814
815 // Calculate the SME state size.
816 {
817 auto *TLI = Subtarget->getTargetLowering();
818 const AArch64RegisterInfo *TRI = Subtarget->getRegisterInfo();
819 BuildMI(MBB, MBBI, DL, TII->get(AArch64::BL))
820 .addExternalSymbol(TLI->getLibcallName(RTLIB::SMEABI_SME_STATE_SIZE))
821 .addReg(AArch64::X0, RegState::ImplicitDefine)
822 .addRegMask(TRI->getCallPreservedMask(
823 *MF, CallingConv::
825 BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::COPY), BufferSize)
826 .addReg(AArch64::X0);
827 }
828
829 // Allocate a buffer object of the size given __arm_sme_state_size.
830 {
831 MachineFrameInfo &MFI = MF->getFrameInfo();
832 BuildMI(MBB, MBBI, DL, TII->get(AArch64::SUBXrx64), AArch64::SP)
833 .addReg(AArch64::SP)
834 .addReg(BufferSize)
836 BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::COPY), BufferPtr)
837 .addReg(AArch64::SP);
838
839 // We have just allocated a variable sized object, tell this to PEI.
840 MFI.CreateVariableSizedObject(Align(16), nullptr);
841 }
842
843 restorePhyRegSave(RegSave, MBB, MBBI, DL);
844}
845
846void MachineSMEABI::emitStateChange(EmitContext &Context,
849 ZAState From, ZAState To,
850 LiveRegs PhysLiveRegs) {
851 // ZA not used.
852 if (From == ZAState::ANY || To == ZAState::ANY)
853 return;
854
855 // If we're exiting from the CALLER_DORMANT state that means this new ZA
856 // function did not touch ZA (so ZA was never turned on).
857 if (From == ZAState::CALLER_DORMANT && To == ZAState::OFF)
858 return;
859
860 // TODO: Avoid setting up the save buffer if there's no transition to
861 // LOCAL_SAVED.
862 if (From == ZAState::CALLER_DORMANT) {
864 "CALLER_DORMANT state requires private ZA interface");
865 assert(&MBB == &MBB.getParent()->front() &&
866 "CALLER_DORMANT state only valid in entry block");
867 emitNewZAPrologue(MBB, MBB.getFirstNonPHI());
868 if (To == ZAState::ACTIVE)
869 return; // Nothing more to do (ZA is active after the prologue).
870
871 // Note: "emitNewZAPrologue" zeros ZA, so we may need to setup a lazy save
872 // if "To" is "ZAState::LOCAL_SAVED". It may be possible to improve this
873 // case by changing the placement of the zero instruction.
874 From = ZAState::ACTIVE;
875 }
876
877 if (From == ZAState::ACTIVE && To == ZAState::LOCAL_SAVED)
878 emitZASave(Context, MBB, InsertPt, PhysLiveRegs);
879 else if (From == ZAState::LOCAL_SAVED && To == ZAState::ACTIVE)
880 emitZARestore(Context, MBB, InsertPt, PhysLiveRegs);
881 else if (To == ZAState::OFF) {
882 assert(From != ZAState::CALLER_DORMANT &&
883 "CALLER_DORMANT to OFF should have already been handled");
885 "Should not turn ZA off in agnostic ZA function");
886 emitZAOff(MBB, InsertPt, /*ClearTPIDR2=*/From == ZAState::LOCAL_SAVED);
887 } else {
888 dbgs() << "Error: Transition from " << getZAStateString(From) << " to "
889 << getZAStateString(To) << '\n';
890 llvm_unreachable("Unimplemented state transition");
891 }
892}
893
894} // end anonymous namespace
895
896INITIALIZE_PASS(MachineSMEABI, "aarch64-machine-sme-abi", "Machine SME ABI",
897 false, false)
898
899bool MachineSMEABI::runOnMachineFunction(MachineFunction &MF) {
900 if (!MF.getSubtarget<AArch64Subtarget>().hasSME())
901 return false;
902
903 AFI = MF.getInfo<AArch64FunctionInfo>();
904 SMEAttrs SMEFnAttrs = AFI->getSMEFnAttrs();
905 if (!SMEFnAttrs.hasZAState() && !SMEFnAttrs.hasZT0State() &&
906 !SMEFnAttrs.hasAgnosticZAInterface())
907 return false;
908
909 assert(MF.getRegInfo().isSSA() && "Expected to be run on SSA form!");
910
911 this->MF = &MF;
912 Subtarget = &MF.getSubtarget<AArch64Subtarget>();
913 TII = Subtarget->getInstrInfo();
914 TRI = Subtarget->getRegisterInfo();
915 MRI = &MF.getRegInfo();
916
917 const EdgeBundles &Bundles =
918 getAnalysis<EdgeBundlesWrapperLegacy>().getEdgeBundles();
919
920 FunctionInfo FnInfo = collectNeededZAStates(SMEFnAttrs);
921 SmallVector<ZAState> BundleStates = assignBundleZAStates(Bundles, FnInfo);
922
923 EmitContext Context;
924 insertStateChanges(Context, FnInfo, Bundles, BundleStates);
925
926 if (Context.needsSaveBuffer()) {
927 if (FnInfo.AfterSMEProloguePt) {
928 // Note: With inline stack probes the AfterSMEProloguePt may not be in the
929 // entry block (due to the probing loop).
930 MachineBasicBlock::iterator MBBI = *FnInfo.AfterSMEProloguePt;
931 emitAllocateZASaveBuffer(Context, *MBBI->getParent(), MBBI,
932 FnInfo.PhysLiveRegsAfterSMEPrologue);
933 } else {
934 MachineBasicBlock &EntryBlock = MF.front();
935 emitAllocateZASaveBuffer(
936 Context, EntryBlock, EntryBlock.getFirstNonPHI(),
937 FnInfo.Blocks[EntryBlock.getNumber()].PhysLiveRegsAtEntry);
938 }
939 }
940
941 return true;
942}
943
944FunctionPass *llvm::createMachineSMEABIPass() { return new MachineSMEABI(); }
unsigned const MachineRegisterInfo * MRI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
This file implements the LivePhysRegs utility for tracking liveness of physical registers.
#define I(x, y, z)
Definition MD5.cpp:58
static DebugLoc getDebugLoc(MachineBasicBlock::instr_iterator FirstMI, MachineBasicBlock::instr_iterator LastMI)
Return the first found DebugLoc that has a DILocation, given a range of instructions.
#define MAKE_CASE(V)
Register const TargetRegisterInfo * TRI
if(PassOpts->AAPipeline)
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Definition PassSupport.h:56
This file defines the SmallVector class.
#define LLVM_DEBUG(...)
Definition Debug.h:114
AArch64FunctionInfo - This class is derived from MachineFunctionInfo and contains private AArch64-spe...
const AArch64RegisterInfo * getRegisterInfo() const override
const AArch64TargetLowering * getTargetLowering() const override
Represent the analysis usage information of a pass.
AnalysisUsage & addPreservedID(const void *ID)
AnalysisUsage & addRequired()
LLVM_ABI void setPreservesCFG()
This function should be called by the pass, iff they do not:
Definition Pass.cpp:270
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:41
A debug info location.
Definition DebugLoc.h:124
ArrayRef< unsigned > getBlocks(unsigned Bundle) const
getBlocks - Return an array of blocks that are connected to Bundle.
Definition EdgeBundles.h:53
unsigned getBundle(unsigned N, bool Out) const
getBundle - Return the ingoing (Out = false) or outgoing (Out = true) bundle number for basic block N
Definition EdgeBundles.h:47
unsigned getNumBundles() const
getNumBundles - Return the total number of bundles in the CFG.
Definition EdgeBundles.h:50
FunctionPass class - This class is used to implement most global optimizations.
Definition Pass.h:314
A set of register units used to track register liveness.
bool available(MCRegister Reg) const
Returns true if no part of physical register Reg is live.
void addReg(MCRegister Reg)
Adds register units covered by physical register Reg.
LLVM_ABI void stepBackward(const MachineInstr &MI)
Updates liveness when stepping backwards over the instruction MI.
LLVM_ABI void addLiveOuts(const MachineBasicBlock &MBB)
Adds registers living out of block MBB.
int getNumber() const
MachineBasicBlocks are uniquely numbered at the function level, unless they're not in a MachineFuncti...
LLVM_ABI iterator getFirstNonPHI()
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
MachineInstrBundleIterator< MachineInstr > iterator
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
LLVM_ABI int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it.
LLVM_ABI int CreateVariableSizedObject(Align Alignment, const AllocaInst *Alloca)
Notify the MachineFrameInfo object that a variable sized object has been created.
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
unsigned getNumBlockIDs() const
getNumBlockIDs - Return the number of MBB ID's allocated.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const MachineInstrBuilder & addExternalSymbol(const char *FnName, unsigned TargetFlags=0) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addRegMask(const uint32_t *Mask) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
MachineOperand class - Representation of each machine instruction operand.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
Register getReg() const
getReg - Returns the register number.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLVM_ABI Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
Wrapper class representing virtual and physical registers.
Definition Register.h:19
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Definition Register.h:78
SMEAttrs is a utility class to parse the SME ACLE attributes on functions.
bool hasAgnosticZAInterface() const
bool hasPrivateZAInterface() const
typename SuperClass::const_iterator const_iterator
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
TargetInstrInfo - Interface to description of machine instruction set.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
static unsigned getArithExtendImm(AArch64_AM::ShiftExtendType ET, unsigned Imm)
getArithExtendImm - Encode the extend type and shift amount for an arithmetic instruction: imm: 3-bit...
CallingConv Namespace - This namespace contains an enum with a value for the well-known calling conve...
Definition CallingConv.h:21
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ AArch64_SME_ABI_Support_Routines_PreserveMost_From_X1
Preserve X1-X15, X19-X29, SP, Z0-Z31, P0-P15.
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Define
Register definition.
This is an optimization pass for GlobalISel generic memory operations.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
FunctionPass * createMachineSMEABIPass()
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Definition STLExtras.h:2472
LLVM_ABI char & MachineDominatorsID
MachineDominators - This pass is a machine dominators analysis pass.
LLVM_ABI void reportFatalInternalError(Error Err)
Report a fatal error that indicates a bug in LLVM.
Definition Error.cpp:177
LLVM_ABI char & MachineLoopInfoID
MachineLoopInfo - This pass is a loop analysis pass.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1732
auto reverse(ContainerTy &&C)
Definition STLExtras.h:406
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
FunctionAddr VTableAddr Count
Definition InstrProf.h:139
@ LLVM_MARK_AS_BITMASK_ENUM
Definition ModRef.h:37
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
Definition MCRegister.h:21
auto max_element(R &&Range)
Provide wrappers to std::max_element which take ranges instead of having to pass begin/end explicitly...
Definition STLExtras.h:2030
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39