LLVM 22.0.0git
SIFrameLowering.cpp
Go to the documentation of this file.
1//===----------------------- SIFrameLowering.cpp --------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//==-----------------------------------------------------------------------===//
8
9#include "SIFrameLowering.h"
10#include "AMDGPU.h"
11#include "AMDGPULaneMaskUtils.h"
12#include "GCNSubtarget.h"
19
20using namespace llvm;
21
22#define DEBUG_TYPE "frame-info"
23
25 "amdgpu-spill-vgpr-to-agpr",
26 cl::desc("Enable spilling VGPRs to AGPRs"),
28 cl::init(true));
29
30// Find a register matching \p RC from \p LiveUnits which is unused and
31// available throughout the function. On failure, returns AMDGPU::NoRegister.
32// TODO: Rewrite the loop here to iterate over MCRegUnits instead of
33// MCRegisters. This should reduce the number of iterations and avoid redundant
34// checking.
36 const LiveRegUnits &LiveUnits,
37 const TargetRegisterClass &RC) {
38 for (MCRegister Reg : RC) {
39 if (!MRI.isPhysRegUsed(Reg) && LiveUnits.available(Reg) &&
40 !MRI.isReserved(Reg))
41 return Reg;
42 }
43 return MCRegister();
44}
45
46// Find a scratch register that we can use in the prologue. We avoid using
47// callee-save registers since they may appear to be free when this is called
48// from canUseAsPrologue (during shrink wrapping), but then no longer be free
49// when this is called from emitPrologue.
52 const TargetRegisterClass &RC, bool Unused = false) {
53 // Mark callee saved registers as used so we will not choose them.
54 const MCPhysReg *CSRegs = MRI.getCalleeSavedRegs();
55 for (unsigned i = 0; CSRegs[i]; ++i)
56 LiveUnits.addReg(CSRegs[i]);
57
58 // We are looking for a register that can be used throughout the entire
59 // function, so any use is unacceptable.
60 if (Unused)
61 return findUnusedRegister(MRI, LiveUnits, RC);
62
63 for (MCRegister Reg : RC) {
64 if (LiveUnits.available(Reg) && !MRI.isReserved(Reg))
65 return Reg;
66 }
67
68 return MCRegister();
69}
70
71/// Query target location for spilling SGPRs
72/// \p IncludeScratchCopy : Also look for free scratch SGPRs
74 MachineFunction &MF, LiveRegUnits &LiveUnits, Register SGPR,
75 const TargetRegisterClass &RC = AMDGPU::SReg_32_XM0_XEXECRegClass,
76 bool IncludeScratchCopy = true) {
78 MachineFrameInfo &FrameInfo = MF.getFrameInfo();
79
80 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
81 const SIRegisterInfo *TRI = ST.getRegisterInfo();
82 unsigned Size = TRI->getSpillSize(RC);
83 Align Alignment = TRI->getSpillAlign(RC);
84
85 // We need to save and restore the given SGPR.
86
87 Register ScratchSGPR;
88 // 1: Try to save the given register into an unused scratch SGPR. The
89 // LiveUnits should have all the callee saved registers marked as used. For
90 // certain cases we skip copy to scratch SGPR.
91 if (IncludeScratchCopy)
92 ScratchSGPR = findUnusedRegister(MF.getRegInfo(), LiveUnits, RC);
93
94 if (!ScratchSGPR) {
95 int FI = FrameInfo.CreateStackObject(Size, Alignment, true, nullptr,
97
98 if (TRI->spillSGPRToVGPR() &&
99 MFI->allocateSGPRSpillToVGPRLane(MF, FI, /*SpillToPhysVGPRLane=*/true,
100 /*IsPrologEpilog=*/true)) {
101 // 2: There's no free lane to spill, and no free register to save the
102 // SGPR, so we're forced to take another VGPR to use for the spill.
106
107 LLVM_DEBUG(auto Spill = MFI->getSGPRSpillToPhysicalVGPRLanes(FI).front();
108 dbgs() << printReg(SGPR, TRI) << " requires fallback spill to "
109 << printReg(Spill.VGPR, TRI) << ':' << Spill.Lane
110 << '\n';);
111 } else {
112 // Remove dead <FI> index
114 // 3: If all else fails, spill the register to memory.
115 FI = FrameInfo.CreateSpillStackObject(Size, Alignment);
117 SGPR,
119 LLVM_DEBUG(dbgs() << "Reserved FI " << FI << " for spilling "
120 << printReg(SGPR, TRI) << '\n');
121 }
122 } else {
126 LiveUnits.addReg(ScratchSGPR);
127 LLVM_DEBUG(dbgs() << "Saving " << printReg(SGPR, TRI) << " with copy to "
128 << printReg(ScratchSGPR, TRI) << '\n');
129 }
130}
131
132// We need to specially emit stack operations here because a different frame
133// register is used than in the rest of the function, as getFrameRegister would
134// use.
135static void buildPrologSpill(const GCNSubtarget &ST, const SIRegisterInfo &TRI,
136 const SIMachineFunctionInfo &FuncInfo,
137 LiveRegUnits &LiveUnits, MachineFunction &MF,
140 Register SpillReg, int FI, Register FrameReg,
141 int64_t DwordOff = 0) {
142 unsigned Opc = ST.enableFlatScratch() ? AMDGPU::SCRATCH_STORE_DWORD_SADDR
143 : AMDGPU::BUFFER_STORE_DWORD_OFFSET;
144
145 MachineFrameInfo &FrameInfo = MF.getFrameInfo();
148 PtrInfo, MachineMemOperand::MOStore, FrameInfo.getObjectSize(FI),
149 FrameInfo.getObjectAlign(FI));
150 LiveUnits.addReg(SpillReg);
151 bool IsKill = !MBB.isLiveIn(SpillReg);
152 TRI.buildSpillLoadStore(MBB, I, DL, Opc, FI, SpillReg, IsKill, FrameReg,
153 DwordOff, MMO, nullptr, &LiveUnits);
154 if (IsKill)
155 LiveUnits.removeReg(SpillReg);
156}
157
158static void buildEpilogRestore(const GCNSubtarget &ST,
159 const SIRegisterInfo &TRI,
160 const SIMachineFunctionInfo &FuncInfo,
161 LiveRegUnits &LiveUnits, MachineFunction &MF,
164 const DebugLoc &DL, Register SpillReg, int FI,
165 Register FrameReg, int64_t DwordOff = 0) {
166 unsigned Opc = ST.enableFlatScratch() ? AMDGPU::SCRATCH_LOAD_DWORD_SADDR
167 : AMDGPU::BUFFER_LOAD_DWORD_OFFSET;
168
169 MachineFrameInfo &FrameInfo = MF.getFrameInfo();
172 PtrInfo, MachineMemOperand::MOLoad, FrameInfo.getObjectSize(FI),
173 FrameInfo.getObjectAlign(FI));
174 TRI.buildSpillLoadStore(MBB, I, DL, Opc, FI, SpillReg, false, FrameReg,
175 DwordOff, MMO, nullptr, &LiveUnits);
176}
177
179 const DebugLoc &DL, const SIInstrInfo *TII,
180 Register TargetReg) {
181 MachineFunction *MF = MBB.getParent();
183 const SIRegisterInfo *TRI = &TII->getRegisterInfo();
184 const MCInstrDesc &SMovB32 = TII->get(AMDGPU::S_MOV_B32);
185 Register TargetLo = TRI->getSubReg(TargetReg, AMDGPU::sub0);
186 Register TargetHi = TRI->getSubReg(TargetReg, AMDGPU::sub1);
187
188 if (MFI->getGITPtrHigh() != 0xffffffff) {
189 BuildMI(MBB, I, DL, SMovB32, TargetHi)
190 .addImm(MFI->getGITPtrHigh())
191 .addReg(TargetReg, RegState::ImplicitDefine);
192 } else {
193 const MCInstrDesc &GetPC64 = TII->get(AMDGPU::S_GETPC_B64_pseudo);
194 BuildMI(MBB, I, DL, GetPC64, TargetReg);
195 }
196 Register GitPtrLo = MFI->getGITPtrLoReg(*MF);
197 MF->getRegInfo().addLiveIn(GitPtrLo);
198 MBB.addLiveIn(GitPtrLo);
199 BuildMI(MBB, I, DL, SMovB32, TargetLo)
200 .addReg(GitPtrLo);
201}
202
203static void initLiveUnits(LiveRegUnits &LiveUnits, const SIRegisterInfo &TRI,
204 const SIMachineFunctionInfo *FuncInfo,
206 MachineBasicBlock::iterator MBBI, bool IsProlog) {
207 if (LiveUnits.empty()) {
208 LiveUnits.init(TRI);
209 if (IsProlog) {
210 LiveUnits.addLiveIns(MBB);
211 } else {
212 // In epilog.
213 LiveUnits.addLiveOuts(MBB);
214 LiveUnits.stepBackward(*MBBI);
215 }
216 }
217}
218
219namespace llvm {
220
221// SpillBuilder to save/restore special SGPR spills like the one needed for FP,
222// BP, etc. These spills are delayed until the current function's frame is
223// finalized. For a given register, the builder uses the
224// PrologEpilogSGPRSaveRestoreInfo to decide the spill method.
228 MachineFunction &MF;
229 const GCNSubtarget &ST;
230 MachineFrameInfo &MFI;
231 SIMachineFunctionInfo *FuncInfo;
232 const SIInstrInfo *TII;
233 const SIRegisterInfo &TRI;
234 Register SuperReg;
236 LiveRegUnits &LiveUnits;
237 const DebugLoc &DL;
238 Register FrameReg;
239 ArrayRef<int16_t> SplitParts;
240 unsigned NumSubRegs;
241 unsigned EltSize = 4;
242
243 void saveToMemory(const int FI) const {
244 MachineRegisterInfo &MRI = MF.getRegInfo();
245 assert(!MFI.isDeadObjectIndex(FI));
246
247 initLiveUnits(LiveUnits, TRI, FuncInfo, MF, MBB, MI, /*IsProlog*/ true);
248
250 MRI, LiveUnits, AMDGPU::VGPR_32RegClass);
251 if (!TmpVGPR)
252 report_fatal_error("failed to find free scratch register");
253
254 for (unsigned I = 0, DwordOff = 0; I < NumSubRegs; ++I) {
255 Register SubReg = NumSubRegs == 1
256 ? SuperReg
257 : Register(TRI.getSubReg(SuperReg, SplitParts[I]));
258 BuildMI(MBB, MI, DL, TII->get(AMDGPU::V_MOV_B32_e32), TmpVGPR)
259 .addReg(SubReg);
260
261 buildPrologSpill(ST, TRI, *FuncInfo, LiveUnits, MF, MBB, MI, DL, TmpVGPR,
262 FI, FrameReg, DwordOff);
263 DwordOff += 4;
264 }
265 }
266
267 void saveToVGPRLane(const int FI) const {
268 assert(!MFI.isDeadObjectIndex(FI));
269
270 assert(MFI.getStackID(FI) == TargetStackID::SGPRSpill);
272 FuncInfo->getSGPRSpillToPhysicalVGPRLanes(FI);
273 assert(Spill.size() == NumSubRegs);
274
275 for (unsigned I = 0; I < NumSubRegs; ++I) {
276 Register SubReg = NumSubRegs == 1
277 ? SuperReg
278 : Register(TRI.getSubReg(SuperReg, SplitParts[I]));
279 BuildMI(MBB, MI, DL, TII->get(AMDGPU::SI_SPILL_S32_TO_VGPR),
280 Spill[I].VGPR)
281 .addReg(SubReg)
282 .addImm(Spill[I].Lane)
283 .addReg(Spill[I].VGPR, RegState::Undef);
284 }
285 }
286
287 void copyToScratchSGPR(Register DstReg) const {
288 BuildMI(MBB, MI, DL, TII->get(AMDGPU::COPY), DstReg)
289 .addReg(SuperReg)
291 }
292
293 void restoreFromMemory(const int FI) {
294 MachineRegisterInfo &MRI = MF.getRegInfo();
295
296 initLiveUnits(LiveUnits, TRI, FuncInfo, MF, MBB, MI, /*IsProlog*/ false);
298 MRI, LiveUnits, AMDGPU::VGPR_32RegClass);
299 if (!TmpVGPR)
300 report_fatal_error("failed to find free scratch register");
301
302 for (unsigned I = 0, DwordOff = 0; I < NumSubRegs; ++I) {
303 Register SubReg = NumSubRegs == 1
304 ? SuperReg
305 : Register(TRI.getSubReg(SuperReg, SplitParts[I]));
306
307 buildEpilogRestore(ST, TRI, *FuncInfo, LiveUnits, MF, MBB, MI, DL,
308 TmpVGPR, FI, FrameReg, DwordOff);
309 assert(SubReg.isPhysical());
310
311 BuildMI(MBB, MI, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), SubReg)
312 .addReg(TmpVGPR, RegState::Kill);
313 DwordOff += 4;
314 }
315 }
316
317 void restoreFromVGPRLane(const int FI) {
318 assert(MFI.getStackID(FI) == TargetStackID::SGPRSpill);
320 FuncInfo->getSGPRSpillToPhysicalVGPRLanes(FI);
321 assert(Spill.size() == NumSubRegs);
322
323 for (unsigned I = 0; I < NumSubRegs; ++I) {
324 Register SubReg = NumSubRegs == 1
325 ? SuperReg
326 : Register(TRI.getSubReg(SuperReg, SplitParts[I]));
327 BuildMI(MBB, MI, DL, TII->get(AMDGPU::SI_RESTORE_S32_FROM_VGPR), SubReg)
328 .addReg(Spill[I].VGPR)
329 .addImm(Spill[I].Lane);
330 }
331 }
332
333 void copyFromScratchSGPR(Register SrcReg) const {
334 BuildMI(MBB, MI, DL, TII->get(AMDGPU::COPY), SuperReg)
335 .addReg(SrcReg)
337 }
338
339public:
344 const DebugLoc &DL, const SIInstrInfo *TII,
345 const SIRegisterInfo &TRI,
346 LiveRegUnits &LiveUnits, Register FrameReg)
347 : MI(MI), MBB(MBB), MF(*MBB.getParent()),
348 ST(MF.getSubtarget<GCNSubtarget>()), MFI(MF.getFrameInfo()),
349 FuncInfo(MF.getInfo<SIMachineFunctionInfo>()), TII(TII), TRI(TRI),
350 SuperReg(Reg), SI(SI), LiveUnits(LiveUnits), DL(DL),
351 FrameReg(FrameReg) {
352 const TargetRegisterClass *RC = TRI.getPhysRegBaseClass(SuperReg);
353 SplitParts = TRI.getRegSplitParts(RC, EltSize);
354 NumSubRegs = SplitParts.empty() ? 1 : SplitParts.size();
355
356 assert(SuperReg != AMDGPU::M0 && "m0 should never spill");
357 }
358
359 void save() {
360 switch (SI.getKind()) {
362 return saveToMemory(SI.getIndex());
364 return saveToVGPRLane(SI.getIndex());
366 return copyToScratchSGPR(SI.getReg());
367 }
368 }
369
370 void restore() {
371 switch (SI.getKind()) {
373 return restoreFromMemory(SI.getIndex());
375 return restoreFromVGPRLane(SI.getIndex());
377 return copyFromScratchSGPR(SI.getReg());
378 }
379 }
380};
381
382} // namespace llvm
383
384// Emit flat scratch setup code, assuming `MFI->hasFlatScratchInit()`
385void SIFrameLowering::emitEntryFunctionFlatScratchInit(
387 const DebugLoc &DL, Register ScratchWaveOffsetReg) const {
388 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
389 const SIInstrInfo *TII = ST.getInstrInfo();
390 const SIRegisterInfo *TRI = &TII->getRegisterInfo();
391 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
392
393 // We don't need this if we only have spills since there is no user facing
394 // scratch.
395
396 // TODO: If we know we don't have flat instructions earlier, we can omit
397 // this from the input registers.
398 //
399 // TODO: We only need to know if we access scratch space through a flat
400 // pointer. Because we only detect if flat instructions are used at all,
401 // this will be used more often than necessary on VI.
402
403 Register FlatScrInitLo;
404 Register FlatScrInitHi;
405
406 if (ST.isAmdPalOS()) {
407 // Extract the scratch offset from the descriptor in the GIT
408 LiveRegUnits LiveUnits;
409 LiveUnits.init(*TRI);
410 LiveUnits.addLiveIns(MBB);
411
412 // Find unused reg to load flat scratch init into
413 MachineRegisterInfo &MRI = MF.getRegInfo();
414 Register FlatScrInit = AMDGPU::NoRegister;
415 ArrayRef<MCPhysReg> AllSGPR64s = TRI->getAllSGPR64(MF);
416 unsigned NumPreloaded = (MFI->getNumPreloadedSGPRs() + 1) / 2;
417 AllSGPR64s = AllSGPR64s.slice(
418 std::min(static_cast<unsigned>(AllSGPR64s.size()), NumPreloaded));
419 Register GITPtrLoReg = MFI->getGITPtrLoReg(MF);
420 for (MCPhysReg Reg : AllSGPR64s) {
421 if (LiveUnits.available(Reg) && !MRI.isReserved(Reg) &&
422 MRI.isAllocatable(Reg) && !TRI->isSubRegisterEq(Reg, GITPtrLoReg)) {
423 FlatScrInit = Reg;
424 break;
425 }
426 }
427 assert(FlatScrInit && "Failed to find free register for scratch init");
428
429 FlatScrInitLo = TRI->getSubReg(FlatScrInit, AMDGPU::sub0);
430 FlatScrInitHi = TRI->getSubReg(FlatScrInit, AMDGPU::sub1);
431
432 buildGitPtr(MBB, I, DL, TII, FlatScrInit);
433
434 // We now have the GIT ptr - now get the scratch descriptor from the entry
435 // at offset 0 (or offset 16 for a compute shader).
436 MachinePointerInfo PtrInfo(AMDGPUAS::CONSTANT_ADDRESS);
437 const MCInstrDesc &LoadDwordX2 = TII->get(AMDGPU::S_LOAD_DWORDX2_IMM);
438 auto *MMO = MF.getMachineMemOperand(
439 PtrInfo,
442 8, Align(4));
443 unsigned Offset =
445 const GCNSubtarget &Subtarget = MF.getSubtarget<GCNSubtarget>();
446 unsigned EncodedOffset = AMDGPU::convertSMRDOffsetUnits(Subtarget, Offset);
447 BuildMI(MBB, I, DL, LoadDwordX2, FlatScrInit)
448 .addReg(FlatScrInit)
449 .addImm(EncodedOffset) // offset
450 .addImm(0) // cpol
451 .addMemOperand(MMO);
452
453 // Mask the offset in [47:0] of the descriptor
454 const MCInstrDesc &SAndB32 = TII->get(AMDGPU::S_AND_B32);
455 auto And = BuildMI(MBB, I, DL, SAndB32, FlatScrInitHi)
456 .addReg(FlatScrInitHi)
457 .addImm(0xffff);
458 And->getOperand(3).setIsDead(); // Mark SCC as dead.
459 } else {
460 Register FlatScratchInitReg =
462 assert(FlatScratchInitReg);
463
464 MachineRegisterInfo &MRI = MF.getRegInfo();
465 MRI.addLiveIn(FlatScratchInitReg);
466 MBB.addLiveIn(FlatScratchInitReg);
467
468 FlatScrInitLo = TRI->getSubReg(FlatScratchInitReg, AMDGPU::sub0);
469 FlatScrInitHi = TRI->getSubReg(FlatScratchInitReg, AMDGPU::sub1);
470 }
471
472 // Do a 64-bit pointer add.
473 if (ST.flatScratchIsPointer()) {
474 if (ST.getGeneration() >= AMDGPUSubtarget::GFX10) {
475 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADD_U32), FlatScrInitLo)
476 .addReg(FlatScrInitLo)
477 .addReg(ScratchWaveOffsetReg);
478 auto Addc = BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADDC_U32),
479 FlatScrInitHi)
480 .addReg(FlatScrInitHi)
481 .addImm(0);
482 Addc->getOperand(3).setIsDead(); // Mark SCC as dead.
483
484 using namespace AMDGPU::Hwreg;
485 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SETREG_B32))
486 .addReg(FlatScrInitLo)
487 .addImm(int16_t(HwregEncoding::encode(ID_FLAT_SCR_LO, 0, 32)));
488 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SETREG_B32))
489 .addReg(FlatScrInitHi)
490 .addImm(int16_t(HwregEncoding::encode(ID_FLAT_SCR_HI, 0, 32)));
491 return;
492 }
493
494 // For GFX9.
495 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADD_U32), AMDGPU::FLAT_SCR_LO)
496 .addReg(FlatScrInitLo)
497 .addReg(ScratchWaveOffsetReg);
498 auto Addc = BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADDC_U32),
499 AMDGPU::FLAT_SCR_HI)
500 .addReg(FlatScrInitHi)
501 .addImm(0);
502 Addc->getOperand(3).setIsDead(); // Mark SCC as dead.
503
504 return;
505 }
506
507 assert(ST.getGeneration() < AMDGPUSubtarget::GFX9);
508
509 // Copy the size in bytes.
510 BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), AMDGPU::FLAT_SCR_LO)
511 .addReg(FlatScrInitHi, RegState::Kill);
512
513 // Add wave offset in bytes to private base offset.
514 // See comment in AMDKernelCodeT.h for enable_sgpr_flat_scratch_init.
515 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADD_I32), FlatScrInitLo)
516 .addReg(FlatScrInitLo)
517 .addReg(ScratchWaveOffsetReg);
518
519 // Convert offset to 256-byte units.
520 auto LShr = BuildMI(MBB, I, DL, TII->get(AMDGPU::S_LSHR_B32),
521 AMDGPU::FLAT_SCR_HI)
522 .addReg(FlatScrInitLo, RegState::Kill)
523 .addImm(8);
524 LShr->getOperand(3).setIsDead(); // Mark SCC as dead.
525}
526
527// Note SGPRSpill stack IDs should only be used for SGPR spilling to VGPRs, not
528// memory. They should have been removed by now.
530 for (int I = MFI.getObjectIndexBegin(), E = MFI.getObjectIndexEnd();
531 I != E; ++I) {
532 if (!MFI.isDeadObjectIndex(I))
533 return false;
534 }
535
536 return true;
537}
538
539// Shift down registers reserved for the scratch RSRC.
540Register SIFrameLowering::getEntryFunctionReservedScratchRsrcReg(
541 MachineFunction &MF) const {
542
543 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
544 const SIInstrInfo *TII = ST.getInstrInfo();
545 const SIRegisterInfo *TRI = &TII->getRegisterInfo();
546 MachineRegisterInfo &MRI = MF.getRegInfo();
547 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
548
549 assert(MFI->isEntryFunction());
550
551 Register ScratchRsrcReg = MFI->getScratchRSrcReg();
552
553 if (!ScratchRsrcReg || (!MRI.isPhysRegUsed(ScratchRsrcReg) &&
555 return Register();
556
557 if (ST.hasSGPRInitBug() ||
558 ScratchRsrcReg != TRI->reservedPrivateSegmentBufferReg(MF))
559 return ScratchRsrcReg;
560
561 // We reserved the last registers for this. Shift it down to the end of those
562 // which were actually used.
563 //
564 // FIXME: It might be safer to use a pseudoregister before replacement.
565
566 // FIXME: We should be able to eliminate unused input registers. We only
567 // cannot do this for the resources required for scratch access. For now we
568 // skip over user SGPRs and may leave unused holes.
569
570 unsigned NumPreloaded = (MFI->getNumPreloadedSGPRs() + 3) / 4;
571 ArrayRef<MCPhysReg> AllSGPR128s = TRI->getAllSGPR128(MF);
572 AllSGPR128s = AllSGPR128s.slice(std::min(static_cast<unsigned>(AllSGPR128s.size()), NumPreloaded));
573
574 // Skip the last N reserved elements because they should have already been
575 // reserved for VCC etc.
576 Register GITPtrLoReg = MFI->getGITPtrLoReg(MF);
577 for (MCPhysReg Reg : AllSGPR128s) {
578 // Pick the first unallocated one. Make sure we don't clobber the other
579 // reserved input we needed. Also for PAL, make sure we don't clobber
580 // the GIT pointer passed in SGPR0 or SGPR8.
581 if (!MRI.isPhysRegUsed(Reg) && MRI.isAllocatable(Reg) &&
582 (!GITPtrLoReg || !TRI->isSubRegisterEq(Reg, GITPtrLoReg))) {
583 MRI.replaceRegWith(ScratchRsrcReg, Reg);
585 MRI.reserveReg(Reg, TRI);
586 return Reg;
587 }
588 }
589
590 return ScratchRsrcReg;
591}
592
593static unsigned getScratchScaleFactor(const GCNSubtarget &ST) {
594 return ST.enableFlatScratch() ? 1 : ST.getWavefrontSize();
595}
596
598 MachineBasicBlock &MBB) const {
599 assert(&MF.front() == &MBB && "Shrink-wrapping not yet supported");
600
601 // FIXME: If we only have SGPR spills, we won't actually be using scratch
602 // memory since these spill to VGPRs. We should be cleaning up these unused
603 // SGPR spill frame indices somewhere.
604
605 // FIXME: We still have implicit uses on SGPR spill instructions in case they
606 // need to spill to vector memory. It's likely that will not happen, but at
607 // this point it appears we need the setup. This part of the prolog should be
608 // emitted after frame indices are eliminated.
609
610 // FIXME: Remove all of the isPhysRegUsed checks
611
613 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
614 const SIInstrInfo *TII = ST.getInstrInfo();
615 const SIRegisterInfo *TRI = &TII->getRegisterInfo();
617 const Function &F = MF.getFunction();
618 MachineFrameInfo &FrameInfo = MF.getFrameInfo();
619
620 assert(MFI->isEntryFunction());
621
622 Register PreloadedScratchWaveOffsetReg = MFI->getPreloadedReg(
624
625 // We need to do the replacement of the private segment buffer register even
626 // if there are no stack objects. There could be stores to undef or a
627 // constant without an associated object.
628 //
629 // This will return `Register()` in cases where there are no actual
630 // uses of the SRSRC.
631 Register ScratchRsrcReg;
632 if (!ST.enableFlatScratch())
633 ScratchRsrcReg = getEntryFunctionReservedScratchRsrcReg(MF);
634
635 // Make the selected register live throughout the function.
636 if (ScratchRsrcReg) {
637 for (MachineBasicBlock &OtherBB : MF) {
638 if (&OtherBB != &MBB) {
639 OtherBB.addLiveIn(ScratchRsrcReg);
640 }
641 }
642 }
643
644 // Now that we have fixed the reserved SRSRC we need to locate the
645 // (potentially) preloaded SRSRC.
646 Register PreloadedScratchRsrcReg;
647 if (ST.isAmdHsaOrMesa(F)) {
648 PreloadedScratchRsrcReg =
650 if (ScratchRsrcReg && PreloadedScratchRsrcReg) {
651 // We added live-ins during argument lowering, but since they were not
652 // used they were deleted. We're adding the uses now, so add them back.
653 MRI.addLiveIn(PreloadedScratchRsrcReg);
654 MBB.addLiveIn(PreloadedScratchRsrcReg);
655 }
656 }
657
658 // Debug location must be unknown since the first debug location is used to
659 // determine the end of the prologue.
660 DebugLoc DL;
662
663 // We found the SRSRC first because it needs four registers and has an
664 // alignment requirement. If the SRSRC that we found is clobbering with
665 // the scratch wave offset, which may be in a fixed SGPR or a free SGPR
666 // chosen by SITargetLowering::allocateSystemSGPRs, COPY the scratch
667 // wave offset to a free SGPR.
668 Register ScratchWaveOffsetReg;
669 if (PreloadedScratchWaveOffsetReg &&
670 TRI->isSubRegisterEq(ScratchRsrcReg, PreloadedScratchWaveOffsetReg)) {
671 ArrayRef<MCPhysReg> AllSGPRs = TRI->getAllSGPR32(MF);
672 unsigned NumPreloaded = MFI->getNumPreloadedSGPRs();
673 AllSGPRs = AllSGPRs.slice(
674 std::min(static_cast<unsigned>(AllSGPRs.size()), NumPreloaded));
675 Register GITPtrLoReg = MFI->getGITPtrLoReg(MF);
676 for (MCPhysReg Reg : AllSGPRs) {
677 if (!MRI.isPhysRegUsed(Reg) && MRI.isAllocatable(Reg) &&
678 !TRI->isSubRegisterEq(ScratchRsrcReg, Reg) && GITPtrLoReg != Reg) {
679 ScratchWaveOffsetReg = Reg;
680 BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), ScratchWaveOffsetReg)
681 .addReg(PreloadedScratchWaveOffsetReg, RegState::Kill);
682 break;
683 }
684 }
685
686 // FIXME: We can spill incoming arguments and restore at the end of the
687 // prolog.
688 if (!ScratchWaveOffsetReg)
690 "could not find temporary scratch offset register in prolog");
691 } else {
692 ScratchWaveOffsetReg = PreloadedScratchWaveOffsetReg;
693 }
694 assert(ScratchWaveOffsetReg || !PreloadedScratchWaveOffsetReg);
695
696 unsigned Offset = FrameInfo.getStackSize() * getScratchScaleFactor(ST);
697 if (!mayReserveScratchForCWSR(MF)) {
698 if (hasFP(MF)) {
700 assert(FPReg != AMDGPU::FP_REG);
701 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_MOV_B32), FPReg).addImm(0);
702 }
703
706 assert(SPReg != AMDGPU::SP_REG);
707 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_MOV_B32), SPReg).addImm(Offset);
708 }
709 } else {
710 // We need to check if we're on a compute queue - if we are, then the CWSR
711 // trap handler may need to store some VGPRs on the stack. The first VGPR
712 // block is saved separately, so we only need to allocate space for any
713 // additional VGPR blocks used. For now, we will make sure there's enough
714 // room for the theoretical maximum number of VGPRs that can be allocated.
715 // FIXME: Figure out if the shader uses fewer VGPRs in practice.
716 assert(hasFP(MF));
718 assert(FPReg != AMDGPU::FP_REG);
719 unsigned VGPRSize = llvm::alignTo(
720 (ST.getAddressableNumVGPRs(MFI->getDynamicVGPRBlockSize()) -
722 MFI->getDynamicVGPRBlockSize())) *
723 4,
724 FrameInfo.getMaxAlign());
726
727 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_GETREG_B32), FPReg)
730 // The MicroEngine ID is 0 for the graphics queue, and 1 or 2 for compute
731 // (3 is unused, so we ignore it). Unfortunately, S_GETREG doesn't set
732 // SCC, so we need to check for 0 manually.
733 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_CMP_LG_U32)).addImm(0).addReg(FPReg);
734 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_CMOVK_I32), FPReg).addImm(VGPRSize);
737 assert(SPReg != AMDGPU::SP_REG);
738
739 // If at least one of the constants can be inlined, then we can use
740 // s_cselect. Otherwise, use a mov and cmovk.
741 if (AMDGPU::isInlinableLiteral32(Offset, ST.hasInv2PiInlineImm()) ||
743 ST.hasInv2PiInlineImm())) {
744 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_CSELECT_B32), SPReg)
745 .addImm(Offset + VGPRSize)
746 .addImm(Offset);
747 } else {
748 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_MOV_B32), SPReg).addImm(Offset);
749 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_CMOVK_I32), SPReg)
750 .addImm(Offset + VGPRSize);
751 }
752 }
753 }
754
755 bool NeedsFlatScratchInit =
757 (MRI.isPhysRegUsed(AMDGPU::FLAT_SCR) || FrameInfo.hasCalls() ||
758 (!allStackObjectsAreDead(FrameInfo) && ST.enableFlatScratch()));
759
760 if ((NeedsFlatScratchInit || ScratchRsrcReg) &&
761 PreloadedScratchWaveOffsetReg && !ST.flatScratchIsArchitected()) {
762 MRI.addLiveIn(PreloadedScratchWaveOffsetReg);
763 MBB.addLiveIn(PreloadedScratchWaveOffsetReg);
764 }
765
766 if (NeedsFlatScratchInit) {
767 emitEntryFunctionFlatScratchInit(MF, MBB, I, DL, ScratchWaveOffsetReg);
768 }
769
770 if (ScratchRsrcReg) {
771 emitEntryFunctionScratchRsrcRegSetup(MF, MBB, I, DL,
772 PreloadedScratchRsrcReg,
773 ScratchRsrcReg, ScratchWaveOffsetReg);
774 }
775
776 if (ST.hasWaitXCnt()) {
777 // Set REPLAY_MODE (bit 25) in MODE register to enable multi-group XNACK
778 // replay. This aligns hardware behavior with the compiler's s_wait_xcnt
779 // insertion logic, which assumes multi-group mode by default.
780 unsigned RegEncoding =
782 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SETREG_IMM32_B32))
783 .addImm(1)
784 .addImm(RegEncoding);
785 }
786}
787
788// Emit scratch RSRC setup code, assuming `ScratchRsrcReg != AMDGPU::NoReg`
789void SIFrameLowering::emitEntryFunctionScratchRsrcRegSetup(
791 const DebugLoc &DL, Register PreloadedScratchRsrcReg,
792 Register ScratchRsrcReg, Register ScratchWaveOffsetReg) const {
793
794 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
795 const SIInstrInfo *TII = ST.getInstrInfo();
796 const SIRegisterInfo *TRI = &TII->getRegisterInfo();
798 const Function &Fn = MF.getFunction();
799
800 if (ST.isAmdPalOS()) {
801 // The pointer to the GIT is formed from the offset passed in and either
802 // the amdgpu-git-ptr-high function attribute or the top part of the PC
803 Register Rsrc01 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub0_sub1);
804 Register Rsrc03 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub3);
805
806 buildGitPtr(MBB, I, DL, TII, Rsrc01);
807
808 // We now have the GIT ptr - now get the scratch descriptor from the entry
809 // at offset 0 (or offset 16 for a compute shader).
811 const MCInstrDesc &LoadDwordX4 = TII->get(AMDGPU::S_LOAD_DWORDX4_IMM);
812 auto *MMO = MF.getMachineMemOperand(
813 PtrInfo,
816 16, Align(4));
817 unsigned Offset = Fn.getCallingConv() == CallingConv::AMDGPU_CS ? 16 : 0;
818 const GCNSubtarget &Subtarget = MF.getSubtarget<GCNSubtarget>();
819 unsigned EncodedOffset = AMDGPU::convertSMRDOffsetUnits(Subtarget, Offset);
820 BuildMI(MBB, I, DL, LoadDwordX4, ScratchRsrcReg)
821 .addReg(Rsrc01)
822 .addImm(EncodedOffset) // offset
823 .addImm(0) // cpol
824 .addReg(ScratchRsrcReg, RegState::ImplicitDefine)
825 .addMemOperand(MMO);
826
827 // The driver will always set the SRD for wave 64 (bits 118:117 of
828 // descriptor / bits 22:21 of third sub-reg will be 0b11)
829 // If the shader is actually wave32 we have to modify the const_index_stride
830 // field of the descriptor 3rd sub-reg (bits 22:21) to 0b10 (stride=32). The
831 // reason the driver does this is that there can be cases where it presents
832 // 2 shaders with different wave size (e.g. VsFs).
833 // TODO: convert to using SCRATCH instructions or multiple SRD buffers
834 if (ST.isWave32()) {
835 const MCInstrDesc &SBitsetB32 = TII->get(AMDGPU::S_BITSET0_B32);
836 BuildMI(MBB, I, DL, SBitsetB32, Rsrc03)
837 .addImm(21)
838 .addReg(Rsrc03);
839 }
840 } else if (ST.isMesaGfxShader(Fn) || !PreloadedScratchRsrcReg) {
841 assert(!ST.isAmdHsaOrMesa(Fn));
842 const MCInstrDesc &SMovB32 = TII->get(AMDGPU::S_MOV_B32);
843
844 Register Rsrc2 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub2);
845 Register Rsrc3 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub3);
846
847 // Use relocations to get the pointer, and setup the other bits manually.
848 uint64_t Rsrc23 = TII->getScratchRsrcWords23();
849
851 Register Rsrc01 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub0_sub1);
852
854 const MCInstrDesc &Mov64 = TII->get(AMDGPU::S_MOV_B64);
855
856 BuildMI(MBB, I, DL, Mov64, Rsrc01)
858 .addReg(ScratchRsrcReg, RegState::ImplicitDefine);
859 } else {
860 const MCInstrDesc &LoadDwordX2 = TII->get(AMDGPU::S_LOAD_DWORDX2_IMM);
861
862 MachinePointerInfo PtrInfo(AMDGPUAS::CONSTANT_ADDRESS);
863 auto *MMO = MF.getMachineMemOperand(
864 PtrInfo,
867 8, Align(4));
868 BuildMI(MBB, I, DL, LoadDwordX2, Rsrc01)
870 .addImm(0) // offset
871 .addImm(0) // cpol
872 .addMemOperand(MMO)
873 .addReg(ScratchRsrcReg, RegState::ImplicitDefine);
874
877 }
878 } else {
879 Register Rsrc0 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub0);
880 Register Rsrc1 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub1);
881
882 BuildMI(MBB, I, DL, SMovB32, Rsrc0)
883 .addExternalSymbol("SCRATCH_RSRC_DWORD0")
884 .addReg(ScratchRsrcReg, RegState::ImplicitDefine);
885
886 BuildMI(MBB, I, DL, SMovB32, Rsrc1)
887 .addExternalSymbol("SCRATCH_RSRC_DWORD1")
888 .addReg(ScratchRsrcReg, RegState::ImplicitDefine);
889 }
890
891 BuildMI(MBB, I, DL, SMovB32, Rsrc2)
892 .addImm(Lo_32(Rsrc23))
893 .addReg(ScratchRsrcReg, RegState::ImplicitDefine);
894
895 BuildMI(MBB, I, DL, SMovB32, Rsrc3)
896 .addImm(Hi_32(Rsrc23))
897 .addReg(ScratchRsrcReg, RegState::ImplicitDefine);
898 } else if (ST.isAmdHsaOrMesa(Fn)) {
899 assert(PreloadedScratchRsrcReg);
900
901 if (ScratchRsrcReg != PreloadedScratchRsrcReg) {
902 BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), ScratchRsrcReg)
903 .addReg(PreloadedScratchRsrcReg, RegState::Kill);
904 }
905 }
906
907 // Add the scratch wave offset into the scratch RSRC.
908 //
909 // We only want to update the first 48 bits, which is the base address
910 // pointer, without touching the adjacent 16 bits of flags. We know this add
911 // cannot carry-out from bit 47, otherwise the scratch allocation would be
912 // impossible to fit in the 48-bit global address space.
913 //
914 // TODO: Evaluate if it is better to just construct an SRD using the flat
915 // scratch init and some constants rather than update the one we are passed.
916 Register ScratchRsrcSub0 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub0);
917 Register ScratchRsrcSub1 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub1);
918
919 // We cannot Kill ScratchWaveOffsetReg here because we allow it to be used in
920 // the kernel body via inreg arguments.
921 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADD_U32), ScratchRsrcSub0)
922 .addReg(ScratchRsrcSub0)
923 .addReg(ScratchWaveOffsetReg)
924 .addReg(ScratchRsrcReg, RegState::ImplicitDefine);
925 auto Addc = BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADDC_U32), ScratchRsrcSub1)
926 .addReg(ScratchRsrcSub1)
927 .addImm(0)
928 .addReg(ScratchRsrcReg, RegState::ImplicitDefine);
929 Addc->getOperand(3).setIsDead(); // Mark SCC as dead.
930}
931
933 switch (ID) {
937 return true;
941 return false;
942 }
943 llvm_unreachable("Invalid TargetStackID::Value");
944}
945
946// Activate only the inactive lanes when \p EnableInactiveLanes is true.
947// Otherwise, activate all lanes. It returns the saved exec.
949 MachineFunction &MF,
952 const DebugLoc &DL, bool IsProlog,
953 bool EnableInactiveLanes) {
954 Register ScratchExecCopy;
956 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
957 const SIInstrInfo *TII = ST.getInstrInfo();
958 const SIRegisterInfo &TRI = TII->getRegisterInfo();
960
961 initLiveUnits(LiveUnits, TRI, FuncInfo, MF, MBB, MBBI, IsProlog);
962
963 if (FuncInfo->isWholeWaveFunction()) {
964 // Whole wave functions already have a copy of the original EXEC mask that
965 // we can use.
966 assert(IsProlog && "Epilog should look at return, not setup");
967 ScratchExecCopy =
968 TII->getWholeWaveFunctionSetup(MF)->getOperand(0).getReg();
969 assert(ScratchExecCopy && "Couldn't find copy of EXEC");
970 } else {
971 ScratchExecCopy = findScratchNonCalleeSaveRegister(
972 MRI, LiveUnits, *TRI.getWaveMaskRegClass());
973 }
974
975 if (!ScratchExecCopy)
976 report_fatal_error("failed to find free scratch register");
977
978 LiveUnits.addReg(ScratchExecCopy);
979
980 const unsigned SaveExecOpc =
981 ST.isWave32() ? (EnableInactiveLanes ? AMDGPU::S_XOR_SAVEEXEC_B32
982 : AMDGPU::S_OR_SAVEEXEC_B32)
983 : (EnableInactiveLanes ? AMDGPU::S_XOR_SAVEEXEC_B64
984 : AMDGPU::S_OR_SAVEEXEC_B64);
985 auto SaveExec =
986 BuildMI(MBB, MBBI, DL, TII->get(SaveExecOpc), ScratchExecCopy).addImm(-1);
987 SaveExec->getOperand(3).setIsDead(); // Mark SCC as dead.
988
989 return ScratchExecCopy;
990}
991
995 Register FrameReg, Register FramePtrRegScratchCopy) const {
997 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
998 const SIInstrInfo *TII = ST.getInstrInfo();
999 const SIRegisterInfo &TRI = TII->getRegisterInfo();
1002
1003 // Spill Whole-Wave Mode VGPRs. Save only the inactive lanes of the scratch
1004 // registers. However, save all lanes of callee-saved VGPRs. Due to this, we
1005 // might end up flipping the EXEC bits twice.
1006 Register ScratchExecCopy;
1007 SmallVector<std::pair<Register, int>, 2> WWMCalleeSavedRegs, WWMScratchRegs;
1008 FuncInfo->splitWWMSpillRegisters(MF, WWMCalleeSavedRegs, WWMScratchRegs);
1009 if (!WWMScratchRegs.empty())
1010 ScratchExecCopy =
1011 buildScratchExecCopy(LiveUnits, MF, MBB, MBBI, DL,
1012 /*IsProlog*/ true, /*EnableInactiveLanes*/ true);
1013
1014 auto StoreWWMRegisters =
1016 for (const auto &Reg : WWMRegs) {
1017 Register VGPR = Reg.first;
1018 int FI = Reg.second;
1019 buildPrologSpill(ST, TRI, *FuncInfo, LiveUnits, MF, MBB, MBBI, DL,
1020 VGPR, FI, FrameReg);
1021 }
1022 };
1023
1024 for (const Register Reg : make_first_range(WWMScratchRegs)) {
1025 if (!MRI.isReserved(Reg)) {
1026 MRI.addLiveIn(Reg);
1027 MBB.addLiveIn(Reg);
1028 }
1029 }
1030 StoreWWMRegisters(WWMScratchRegs);
1031
1032 auto EnableAllLanes = [&]() {
1033 BuildMI(MBB, MBBI, DL, TII->get(LMC.MovOpc), LMC.ExecReg).addImm(-1);
1034 };
1035
1036 if (!WWMCalleeSavedRegs.empty()) {
1037 if (ScratchExecCopy) {
1038 EnableAllLanes();
1039 } else {
1040 ScratchExecCopy = buildScratchExecCopy(LiveUnits, MF, MBB, MBBI, DL,
1041 /*IsProlog*/ true,
1042 /*EnableInactiveLanes*/ false);
1043 }
1044 }
1045
1046 StoreWWMRegisters(WWMCalleeSavedRegs);
1047 if (FuncInfo->isWholeWaveFunction()) {
1048 // If we have already saved some WWM CSR registers, then the EXEC is already
1049 // -1 and we don't need to do anything else. Otherwise, set EXEC to -1 here.
1050 if (!ScratchExecCopy)
1051 buildScratchExecCopy(LiveUnits, MF, MBB, MBBI, DL, /*IsProlog*/ true,
1052 /*EnableInactiveLanes*/ true);
1053 else if (WWMCalleeSavedRegs.empty())
1054 EnableAllLanes();
1055 } else if (ScratchExecCopy) {
1056 // FIXME: Split block and make terminator.
1057 BuildMI(MBB, MBBI, DL, TII->get(LMC.MovOpc), LMC.ExecReg)
1058 .addReg(ScratchExecCopy, RegState::Kill);
1059 LiveUnits.addReg(ScratchExecCopy);
1060 }
1061
1062 Register FramePtrReg = FuncInfo->getFrameOffsetReg();
1063
1064 for (const auto &Spill : FuncInfo->getPrologEpilogSGPRSpills()) {
1065 // Special handle FP spill:
1066 // Skip if FP is saved to a scratch SGPR, the save has already been emitted.
1067 // Otherwise, FP has been moved to a temporary register and spill it
1068 // instead.
1069 Register Reg =
1070 Spill.first == FramePtrReg ? FramePtrRegScratchCopy : Spill.first;
1071 if (!Reg)
1072 continue;
1073
1074 PrologEpilogSGPRSpillBuilder SB(Reg, Spill.second, MBB, MBBI, DL, TII, TRI,
1075 LiveUnits, FrameReg);
1076 SB.save();
1077 }
1078
1079 // If a copy to scratch SGPR has been chosen for any of the SGPR spills, make
1080 // such scratch registers live throughout the function.
1081 SmallVector<Register, 1> ScratchSGPRs;
1082 FuncInfo->getAllScratchSGPRCopyDstRegs(ScratchSGPRs);
1083 if (!ScratchSGPRs.empty()) {
1084 for (MachineBasicBlock &MBB : MF) {
1085 for (MCPhysReg Reg : ScratchSGPRs)
1086 MBB.addLiveIn(Reg);
1087
1088 MBB.sortUniqueLiveIns();
1089 }
1090 if (!LiveUnits.empty()) {
1091 for (MCPhysReg Reg : ScratchSGPRs)
1092 LiveUnits.addReg(Reg);
1093 }
1094 }
1095}
1096
1100 Register FrameReg, Register FramePtrRegScratchCopy) const {
1101 const SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>();
1102 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
1103 const SIInstrInfo *TII = ST.getInstrInfo();
1104 const SIRegisterInfo &TRI = TII->getRegisterInfo();
1106 Register FramePtrReg = FuncInfo->getFrameOffsetReg();
1107
1108 for (const auto &Spill : FuncInfo->getPrologEpilogSGPRSpills()) {
1109 // Special handle FP restore:
1110 // Skip if FP needs to be restored from the scratch SGPR. Otherwise, restore
1111 // the FP value to a temporary register. The frame pointer should be
1112 // overwritten only at the end when all other spills are restored from
1113 // current frame.
1114 Register Reg =
1115 Spill.first == FramePtrReg ? FramePtrRegScratchCopy : Spill.first;
1116 if (!Reg)
1117 continue;
1118
1119 PrologEpilogSGPRSpillBuilder SB(Reg, Spill.second, MBB, MBBI, DL, TII, TRI,
1120 LiveUnits, FrameReg);
1121 SB.restore();
1122 }
1123
1124 // Restore Whole-Wave Mode VGPRs. Restore only the inactive lanes of the
1125 // scratch registers. However, restore all lanes of callee-saved VGPRs. Due to
1126 // this, we might end up flipping the EXEC bits twice.
1127 Register ScratchExecCopy;
1128 SmallVector<std::pair<Register, int>, 2> WWMCalleeSavedRegs, WWMScratchRegs;
1129 FuncInfo->splitWWMSpillRegisters(MF, WWMCalleeSavedRegs, WWMScratchRegs);
1130 auto RestoreWWMRegisters =
1132 for (const auto &Reg : WWMRegs) {
1133 Register VGPR = Reg.first;
1134 int FI = Reg.second;
1135 buildEpilogRestore(ST, TRI, *FuncInfo, LiveUnits, MF, MBB, MBBI, DL,
1136 VGPR, FI, FrameReg);
1137 }
1138 };
1139
1140 if (FuncInfo->isWholeWaveFunction()) {
1141 // For whole wave functions, the EXEC is already -1 at this point.
1142 // Therefore, we can restore the CSR WWM registers right away.
1143 RestoreWWMRegisters(WWMCalleeSavedRegs);
1144
1145 // The original EXEC is the first operand of the return instruction.
1146 MachineInstr &Return = MBB.instr_back();
1147 unsigned Opcode = Return.getOpcode();
1148 switch (Opcode) {
1149 case AMDGPU::SI_WHOLE_WAVE_FUNC_RETURN:
1150 Opcode = AMDGPU::SI_RETURN;
1151 break;
1152 case AMDGPU::SI_TCRETURN_GFX_WholeWave:
1153 Opcode = AMDGPU::SI_TCRETURN_GFX;
1154 break;
1155 default:
1156 llvm_unreachable("Unexpected return inst");
1157 }
1158 Register OrigExec = Return.getOperand(0).getReg();
1159
1160 if (!WWMScratchRegs.empty()) {
1161 BuildMI(MBB, MBBI, DL, TII->get(LMC.XorOpc), LMC.ExecReg)
1162 .addReg(OrigExec)
1163 .addImm(-1);
1164 RestoreWWMRegisters(WWMScratchRegs);
1165 }
1166
1167 // Restore original EXEC.
1168 BuildMI(MBB, MBBI, DL, TII->get(LMC.MovOpc), LMC.ExecReg).addReg(OrigExec);
1169
1170 // Drop the first operand and update the opcode.
1171 Return.removeOperand(0);
1172 Return.setDesc(TII->get(Opcode));
1173
1174 return;
1175 }
1176
1177 if (!WWMScratchRegs.empty()) {
1178 ScratchExecCopy =
1179 buildScratchExecCopy(LiveUnits, MF, MBB, MBBI, DL,
1180 /*IsProlog=*/false, /*EnableInactiveLanes=*/true);
1181 }
1182 RestoreWWMRegisters(WWMScratchRegs);
1183 if (!WWMCalleeSavedRegs.empty()) {
1184 if (ScratchExecCopy) {
1185 BuildMI(MBB, MBBI, DL, TII->get(LMC.MovOpc), LMC.ExecReg).addImm(-1);
1186 } else {
1187 ScratchExecCopy = buildScratchExecCopy(LiveUnits, MF, MBB, MBBI, DL,
1188 /*IsProlog*/ false,
1189 /*EnableInactiveLanes*/ false);
1190 }
1191 }
1192
1193 RestoreWWMRegisters(WWMCalleeSavedRegs);
1194 if (ScratchExecCopy) {
1195 // FIXME: Split block and make terminator.
1196 BuildMI(MBB, MBBI, DL, TII->get(LMC.MovOpc), LMC.ExecReg)
1197 .addReg(ScratchExecCopy, RegState::Kill);
1198 }
1199}
1200
1202 MachineBasicBlock &MBB) const {
1204 if (FuncInfo->isEntryFunction()) {
1206 return;
1207 }
1208
1209 MachineFrameInfo &MFI = MF.getFrameInfo();
1210 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
1211 const SIInstrInfo *TII = ST.getInstrInfo();
1212 const SIRegisterInfo &TRI = TII->getRegisterInfo();
1214
1215 Register StackPtrReg = FuncInfo->getStackPtrOffsetReg();
1216 Register FramePtrReg = FuncInfo->getFrameOffsetReg();
1217 Register BasePtrReg =
1218 TRI.hasBasePointer(MF) ? TRI.getBaseRegister() : Register();
1219 LiveRegUnits LiveUnits;
1220
1222 // DebugLoc must be unknown since the first instruction with DebugLoc is used
1223 // to determine the end of the prologue.
1224 DebugLoc DL;
1225
1226 if (FuncInfo->isChainFunction()) {
1227 // Functions with the amdgpu_cs_chain[_preserve] CC don't receive a SP, but
1228 // are free to set one up if they need it.
1229 bool UseSP = requiresStackPointerReference(MF);
1230 if (UseSP) {
1231 assert(StackPtrReg != AMDGPU::SP_REG);
1232
1233 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::S_MOV_B32), StackPtrReg)
1235 }
1236 }
1237
1238 bool HasFP = false;
1239 bool HasBP = false;
1240 uint32_t NumBytes = MFI.getStackSize();
1241 uint32_t RoundedSize = NumBytes;
1242
1243 if (TRI.hasStackRealignment(MF))
1244 HasFP = true;
1245
1246 Register FramePtrRegScratchCopy;
1247 if (!HasFP && !hasFP(MF)) {
1248 // Emit the CSR spill stores with SP base register.
1249 emitCSRSpillStores(MF, MBB, MBBI, DL, LiveUnits,
1250 FuncInfo->isChainFunction() ? Register() : StackPtrReg,
1251 FramePtrRegScratchCopy);
1252 } else {
1253 // CSR spill stores will use FP as base register.
1254 Register SGPRForFPSaveRestoreCopy =
1255 FuncInfo->getScratchSGPRCopyDstReg(FramePtrReg);
1256
1257 initLiveUnits(LiveUnits, TRI, FuncInfo, MF, MBB, MBBI, /*IsProlog*/ true);
1258 if (SGPRForFPSaveRestoreCopy) {
1259 // Copy FP to the scratch register now and emit the CFI entry. It avoids
1260 // the extra FP copy needed in the other two cases when FP is spilled to
1261 // memory or to a VGPR lane.
1263 FramePtrReg,
1264 FuncInfo->getPrologEpilogSGPRSaveRestoreInfo(FramePtrReg), MBB, MBBI,
1265 DL, TII, TRI, LiveUnits, FramePtrReg);
1266 SB.save();
1267 LiveUnits.addReg(SGPRForFPSaveRestoreCopy);
1268 } else {
1269 // Copy FP into a new scratch register so that its previous value can be
1270 // spilled after setting up the new frame.
1271 FramePtrRegScratchCopy = findScratchNonCalleeSaveRegister(
1272 MRI, LiveUnits, AMDGPU::SReg_32_XM0_XEXECRegClass);
1273 if (!FramePtrRegScratchCopy)
1274 report_fatal_error("failed to find free scratch register");
1275
1276 LiveUnits.addReg(FramePtrRegScratchCopy);
1277 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::COPY), FramePtrRegScratchCopy)
1278 .addReg(FramePtrReg);
1279 }
1280 }
1281
1282 if (HasFP) {
1283 const unsigned Alignment = MFI.getMaxAlign().value();
1284
1285 RoundedSize += Alignment;
1286 if (LiveUnits.empty()) {
1287 LiveUnits.init(TRI);
1288 LiveUnits.addLiveIns(MBB);
1289 }
1290
1291 // s_add_i32 s33, s32, NumBytes
1292 // s_and_b32 s33, s33, 0b111...0000
1293 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::S_ADD_I32), FramePtrReg)
1294 .addReg(StackPtrReg)
1295 .addImm((Alignment - 1) * getScratchScaleFactor(ST))
1297 auto And = BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::S_AND_B32), FramePtrReg)
1298 .addReg(FramePtrReg, RegState::Kill)
1299 .addImm(-Alignment * getScratchScaleFactor(ST))
1301 And->getOperand(3).setIsDead(); // Mark SCC as dead.
1302 FuncInfo->setIsStackRealigned(true);
1303 } else if ((HasFP = hasFP(MF))) {
1304 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::COPY), FramePtrReg)
1305 .addReg(StackPtrReg)
1307 }
1308
1309 // If FP is used, emit the CSR spills with FP base register.
1310 if (HasFP) {
1311 emitCSRSpillStores(MF, MBB, MBBI, DL, LiveUnits, FramePtrReg,
1312 FramePtrRegScratchCopy);
1313 if (FramePtrRegScratchCopy)
1314 LiveUnits.removeReg(FramePtrRegScratchCopy);
1315 }
1316
1317 // If we need a base pointer, set it up here. It's whatever the value of
1318 // the stack pointer is at this point. Any variable size objects will be
1319 // allocated after this, so we can still use the base pointer to reference
1320 // the incoming arguments.
1321 if ((HasBP = TRI.hasBasePointer(MF))) {
1322 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::COPY), BasePtrReg)
1323 .addReg(StackPtrReg)
1325 }
1326
1327 if (HasFP && RoundedSize != 0) {
1328 auto Add = BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::S_ADD_I32), StackPtrReg)
1329 .addReg(StackPtrReg)
1330 .addImm(RoundedSize * getScratchScaleFactor(ST))
1332 Add->getOperand(3).setIsDead(); // Mark SCC as dead.
1333 }
1334
1335 bool FPSaved = FuncInfo->hasPrologEpilogSGPRSpillEntry(FramePtrReg);
1336 (void)FPSaved;
1337 assert((!HasFP || FPSaved) &&
1338 "Needed to save FP but didn't save it anywhere");
1339
1340 // If we allow spilling to AGPRs we may have saved FP but then spill
1341 // everything into AGPRs instead of the stack.
1342 assert((HasFP || !FPSaved || EnableSpillVGPRToAGPR) &&
1343 "Saved FP but didn't need it");
1344
1345 bool BPSaved = FuncInfo->hasPrologEpilogSGPRSpillEntry(BasePtrReg);
1346 (void)BPSaved;
1347 assert((!HasBP || BPSaved) &&
1348 "Needed to save BP but didn't save it anywhere");
1349
1350 assert((HasBP || !BPSaved) && "Saved BP but didn't need it");
1351
1352 if (FuncInfo->isWholeWaveFunction()) {
1353 // SI_WHOLE_WAVE_FUNC_SETUP has outlived its purpose.
1354 TII->getWholeWaveFunctionSetup(MF)->eraseFromParent();
1355 }
1356}
1357
1359 MachineBasicBlock &MBB) const {
1360 const SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>();
1361 if (FuncInfo->isEntryFunction())
1362 return;
1363
1364 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
1365 const SIInstrInfo *TII = ST.getInstrInfo();
1366 const SIRegisterInfo &TRI = TII->getRegisterInfo();
1368 LiveRegUnits LiveUnits;
1369 // Get the insert location for the epilogue. If there were no terminators in
1370 // the block, get the last instruction.
1372 DebugLoc DL;
1373 if (!MBB.empty()) {
1374 MBBI = MBB.getLastNonDebugInstr();
1375 if (MBBI != MBB.end())
1376 DL = MBBI->getDebugLoc();
1377
1378 MBBI = MBB.getFirstTerminator();
1379 }
1380
1381 const MachineFrameInfo &MFI = MF.getFrameInfo();
1382 uint32_t NumBytes = MFI.getStackSize();
1383 uint32_t RoundedSize = FuncInfo->isStackRealigned()
1384 ? NumBytes + MFI.getMaxAlign().value()
1385 : NumBytes;
1386 const Register StackPtrReg = FuncInfo->getStackPtrOffsetReg();
1387 Register FramePtrReg = FuncInfo->getFrameOffsetReg();
1388 bool FPSaved = FuncInfo->hasPrologEpilogSGPRSpillEntry(FramePtrReg);
1389
1390 if (RoundedSize != 0) {
1391 if (TRI.hasBasePointer(MF)) {
1392 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::COPY), StackPtrReg)
1393 .addReg(TRI.getBaseRegister())
1395 } else if (hasFP(MF)) {
1396 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::COPY), StackPtrReg)
1397 .addReg(FramePtrReg)
1399 }
1400 }
1401
1402 Register FramePtrRegScratchCopy;
1403 Register SGPRForFPSaveRestoreCopy =
1404 FuncInfo->getScratchSGPRCopyDstReg(FramePtrReg);
1405 if (FPSaved) {
1406 // CSR spill restores should use FP as base register. If
1407 // SGPRForFPSaveRestoreCopy is not true, restore the previous value of FP
1408 // into a new scratch register and copy to FP later when other registers are
1409 // restored from the current stack frame.
1410 initLiveUnits(LiveUnits, TRI, FuncInfo, MF, MBB, MBBI, /*IsProlog*/ false);
1411 if (SGPRForFPSaveRestoreCopy) {
1412 LiveUnits.addReg(SGPRForFPSaveRestoreCopy);
1413 } else {
1414 FramePtrRegScratchCopy = findScratchNonCalleeSaveRegister(
1415 MRI, LiveUnits, AMDGPU::SReg_32_XM0_XEXECRegClass);
1416 if (!FramePtrRegScratchCopy)
1417 report_fatal_error("failed to find free scratch register");
1418
1419 LiveUnits.addReg(FramePtrRegScratchCopy);
1420 }
1421
1422 emitCSRSpillRestores(MF, MBB, MBBI, DL, LiveUnits, FramePtrReg,
1423 FramePtrRegScratchCopy);
1424 }
1425
1426 if (FPSaved) {
1427 // Insert the copy to restore FP.
1428 Register SrcReg = SGPRForFPSaveRestoreCopy ? SGPRForFPSaveRestoreCopy
1429 : FramePtrRegScratchCopy;
1431 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::COPY), FramePtrReg)
1432 .addReg(SrcReg);
1433 if (SGPRForFPSaveRestoreCopy)
1435 } else {
1436 // Insert the CSR spill restores with SP as the base register.
1437 emitCSRSpillRestores(MF, MBB, MBBI, DL, LiveUnits,
1438 FuncInfo->isChainFunction() ? Register() : StackPtrReg,
1439 FramePtrRegScratchCopy);
1440 }
1441}
1442
1443#ifndef NDEBUG
1445 const MachineFrameInfo &MFI = MF.getFrameInfo();
1446 const SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>();
1447 for (int I = MFI.getObjectIndexBegin(), E = MFI.getObjectIndexEnd();
1448 I != E; ++I) {
1449 if (!MFI.isDeadObjectIndex(I) &&
1452 return false;
1453 }
1454 }
1455
1456 return true;
1457}
1458#endif
1459
1461 int FI,
1462 Register &FrameReg) const {
1463 const SIRegisterInfo *RI = MF.getSubtarget<GCNSubtarget>().getRegisterInfo();
1464
1465 FrameReg = RI->getFrameRegister(MF);
1467}
1468
1470 MachineFunction &MF,
1471 RegScavenger *RS) const {
1472 MachineFrameInfo &MFI = MF.getFrameInfo();
1473
1474 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
1475 const SIInstrInfo *TII = ST.getInstrInfo();
1476 const SIRegisterInfo *TRI = ST.getRegisterInfo();
1479
1480 const bool SpillVGPRToAGPR = ST.hasMAIInsts() && FuncInfo->hasSpilledVGPRs()
1482
1483 if (SpillVGPRToAGPR) {
1484 // To track the spill frame indices handled in this pass.
1485 BitVector SpillFIs(MFI.getObjectIndexEnd(), false);
1486 BitVector NonVGPRSpillFIs(MFI.getObjectIndexEnd(), false);
1487
1488 bool SeenDbgInstr = false;
1489
1490 for (MachineBasicBlock &MBB : MF) {
1492 int FrameIndex;
1493 if (MI.isDebugInstr())
1494 SeenDbgInstr = true;
1495
1496 if (TII->isVGPRSpill(MI)) {
1497 // Try to eliminate stack used by VGPR spills before frame
1498 // finalization.
1499 unsigned FIOp = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
1500 AMDGPU::OpName::vaddr);
1501 int FI = MI.getOperand(FIOp).getIndex();
1502 Register VReg =
1503 TII->getNamedOperand(MI, AMDGPU::OpName::vdata)->getReg();
1504 if (FuncInfo->allocateVGPRSpillToAGPR(MF, FI,
1505 TRI->isAGPR(MRI, VReg))) {
1506 assert(RS != nullptr);
1507 RS->enterBasicBlockEnd(MBB);
1508 RS->backward(std::next(MI.getIterator()));
1509 TRI->eliminateFrameIndex(MI, 0, FIOp, RS);
1510 SpillFIs.set(FI);
1511 continue;
1512 }
1513 } else if (TII->isStoreToStackSlot(MI, FrameIndex) ||
1514 TII->isLoadFromStackSlot(MI, FrameIndex))
1515 if (!MFI.isFixedObjectIndex(FrameIndex))
1516 NonVGPRSpillFIs.set(FrameIndex);
1517 }
1518 }
1519
1520 // Stack slot coloring may assign different objects to the same stack slot.
1521 // If not, then the VGPR to AGPR spill slot is dead.
1522 for (unsigned FI : SpillFIs.set_bits())
1523 if (!NonVGPRSpillFIs.test(FI))
1524 FuncInfo->setVGPRToAGPRSpillDead(FI);
1525
1526 for (MachineBasicBlock &MBB : MF) {
1527 for (MCPhysReg Reg : FuncInfo->getVGPRSpillAGPRs())
1528 MBB.addLiveIn(Reg);
1529
1530 for (MCPhysReg Reg : FuncInfo->getAGPRSpillVGPRs())
1531 MBB.addLiveIn(Reg);
1532
1533 MBB.sortUniqueLiveIns();
1534
1535 if (!SpillFIs.empty() && SeenDbgInstr) {
1536 // FIXME: The dead frame indices are replaced with a null register from
1537 // the debug value instructions. We should instead, update it with the
1538 // correct register value. But not sure the register value alone is
1539 for (MachineInstr &MI : MBB) {
1540 if (MI.isDebugValue()) {
1541 uint32_t StackOperandIdx = MI.isDebugValueList() ? 2 : 0;
1542 if (MI.getOperand(StackOperandIdx).isFI() &&
1543 !MFI.isFixedObjectIndex(
1544 MI.getOperand(StackOperandIdx).getIndex()) &&
1545 SpillFIs[MI.getOperand(StackOperandIdx).getIndex()]) {
1546 MI.getOperand(StackOperandIdx)
1547 .ChangeToRegister(Register(), false /*isDef*/);
1548 }
1549 }
1550 }
1551 }
1552 }
1553 }
1554
1555 // At this point we've already allocated all spilled SGPRs to VGPRs if we
1556 // can. Any remaining SGPR spills will go to memory, so move them back to the
1557 // default stack.
1558 bool HaveSGPRToVMemSpill =
1559 FuncInfo->removeDeadFrameIndices(MFI, /*ResetSGPRSpillStackIDs*/ true);
1561 "SGPR spill should have been removed in SILowerSGPRSpills");
1562
1563 // FIXME: The other checks should be redundant with allStackObjectsAreDead,
1564 // but currently hasNonSpillStackObjects is set only from source
1565 // allocas. Stack temps produced from legalization are not counted currently.
1566 if (!allStackObjectsAreDead(MFI)) {
1567 assert(RS && "RegScavenger required if spilling");
1568
1569 // Add an emergency spill slot
1570 RS->addScavengingFrameIndex(FuncInfo->getScavengeFI(MFI, *TRI));
1571
1572 // If we are spilling SGPRs to memory with a large frame, we may need a
1573 // second VGPR emergency frame index.
1574 if (HaveSGPRToVMemSpill &&
1576 RS->addScavengingFrameIndex(MFI.CreateSpillStackObject(4, Align(4)));
1577 }
1578 }
1579}
1580
1582 MachineFunction &MF, RegScavenger *RS) const {
1583 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
1584 const SIRegisterInfo *TRI = ST.getRegisterInfo();
1587
1588 if (ST.hasMAIInsts() && !ST.hasGFX90AInsts()) {
1589 // On gfx908, we had initially reserved highest available VGPR for AGPR
1590 // copy. Now since we are done with RA, check if there exist an unused VGPR
1591 // which is lower than the eariler reserved VGPR before RA. If one exist,
1592 // use it for AGPR copy instead of one reserved before RA.
1593 Register VGPRForAGPRCopy = FuncInfo->getVGPRForAGPRCopy();
1594 Register UnusedLowVGPR =
1595 TRI->findUnusedRegister(MRI, &AMDGPU::VGPR_32RegClass, MF);
1596 if (UnusedLowVGPR && (TRI->getHWRegIndex(UnusedLowVGPR) <
1597 TRI->getHWRegIndex(VGPRForAGPRCopy))) {
1598 // Reserve this newly identified VGPR (for AGPR copy)
1599 // reserved registers should already be frozen at this point
1600 // so we can avoid calling MRI.freezeReservedRegs and just use
1601 // MRI.reserveReg
1602 FuncInfo->setVGPRForAGPRCopy(UnusedLowVGPR);
1603 MRI.reserveReg(UnusedLowVGPR, TRI);
1604 }
1605 }
1606 // We initally reserved the highest available SGPR pair for long branches
1607 // now, after RA, we shift down to a lower unused one if one exists
1608 Register LongBranchReservedReg = FuncInfo->getLongBranchReservedReg();
1609 Register UnusedLowSGPR =
1610 TRI->findUnusedRegister(MRI, &AMDGPU::SGPR_64RegClass, MF);
1611 // If LongBranchReservedReg is null then we didn't find a long branch
1612 // and never reserved a register to begin with so there is nothing to
1613 // shift down. Then if UnusedLowSGPR is null, there isn't available lower
1614 // register to use so just keep the original one we set.
1615 if (LongBranchReservedReg && UnusedLowSGPR) {
1616 FuncInfo->setLongBranchReservedReg(UnusedLowSGPR);
1617 MRI.reserveReg(UnusedLowSGPR, TRI);
1618 }
1619}
1620
1621// The special SGPR spills like the one needed for FP, BP or any reserved
1622// registers delayed until frame lowering.
1624 MachineFunction &MF, BitVector &SavedVGPRs,
1625 bool NeedExecCopyReservedReg) const {
1626 MachineFrameInfo &FrameInfo = MF.getFrameInfo();
1629 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
1630 const SIRegisterInfo *TRI = ST.getRegisterInfo();
1631 LiveRegUnits LiveUnits;
1632 LiveUnits.init(*TRI);
1633 // Initially mark callee saved registers as used so we will not choose them
1634 // while looking for scratch SGPRs.
1635 const MCPhysReg *CSRegs = MF.getRegInfo().getCalleeSavedRegs();
1636 for (unsigned I = 0; CSRegs[I]; ++I)
1637 LiveUnits.addReg(CSRegs[I]);
1638
1639 const TargetRegisterClass &RC = *TRI->getWaveMaskRegClass();
1640
1641 Register ReservedRegForExecCopy = MFI->getSGPRForEXECCopy();
1642 if (NeedExecCopyReservedReg ||
1643 (ReservedRegForExecCopy &&
1644 MRI.isPhysRegUsed(ReservedRegForExecCopy, /*SkipRegMaskTest=*/true))) {
1645 MRI.reserveReg(ReservedRegForExecCopy, TRI);
1646 Register UnusedScratchReg = findUnusedRegister(MRI, LiveUnits, RC);
1647 if (UnusedScratchReg) {
1648 // If found any unused scratch SGPR, reserve the register itself for Exec
1649 // copy and there is no need for any spills in that case.
1650 MFI->setSGPRForEXECCopy(UnusedScratchReg);
1651 MRI.replaceRegWith(ReservedRegForExecCopy, UnusedScratchReg);
1652 LiveUnits.addReg(UnusedScratchReg);
1653 } else {
1654 // Needs spill.
1655 assert(!MFI->hasPrologEpilogSGPRSpillEntry(ReservedRegForExecCopy) &&
1656 "Re-reserving spill slot for EXEC copy register");
1657 getVGPRSpillLaneOrTempRegister(MF, LiveUnits, ReservedRegForExecCopy, RC,
1658 /*IncludeScratchCopy=*/false);
1659 }
1660 } else if (ReservedRegForExecCopy) {
1661 // Reset it at this point. There are no whole-wave copies and spills
1662 // encountered.
1663 MFI->setSGPRForEXECCopy(AMDGPU::NoRegister);
1664 }
1665
1666 // hasFP only knows about stack objects that already exist. We're now
1667 // determining the stack slots that will be created, so we have to predict
1668 // them. Stack objects force FP usage with calls.
1669 //
1670 // Note a new VGPR CSR may be introduced if one is used for the spill, but we
1671 // don't want to report it here.
1672 //
1673 // FIXME: Is this really hasReservedCallFrame?
1674 const bool WillHaveFP =
1675 FrameInfo.hasCalls() &&
1676 (SavedVGPRs.any() || !allStackObjectsAreDead(FrameInfo));
1677
1678 if (WillHaveFP || hasFP(MF)) {
1679 Register FramePtrReg = MFI->getFrameOffsetReg();
1680 assert(!MFI->hasPrologEpilogSGPRSpillEntry(FramePtrReg) &&
1681 "Re-reserving spill slot for FP");
1682 getVGPRSpillLaneOrTempRegister(MF, LiveUnits, FramePtrReg);
1683 }
1684
1685 if (TRI->hasBasePointer(MF)) {
1686 Register BasePtrReg = TRI->getBaseRegister();
1687 assert(!MFI->hasPrologEpilogSGPRSpillEntry(BasePtrReg) &&
1688 "Re-reserving spill slot for BP");
1689 getVGPRSpillLaneOrTempRegister(MF, LiveUnits, BasePtrReg);
1690 }
1691}
1692
1693// Only report VGPRs to generic code.
1695 BitVector &SavedVGPRs,
1696 RegScavenger *RS) const {
1698
1699 // If this is a function with the amdgpu_cs_chain[_preserve] calling
1700 // convention and it doesn't contain any calls to llvm.amdgcn.cs.chain, then
1701 // we don't need to save and restore anything.
1702 if (MFI->isChainFunction() && !MF.getFrameInfo().hasTailCall())
1703 return;
1704
1706
1707 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
1708 const SIRegisterInfo *TRI = ST.getRegisterInfo();
1709 const SIInstrInfo *TII = ST.getInstrInfo();
1710 bool NeedExecCopyReservedReg = false;
1711
1712 MachineInstr *ReturnMI = nullptr;
1713 for (MachineBasicBlock &MBB : MF) {
1714 for (MachineInstr &MI : MBB) {
1715 // TODO: Walking through all MBBs here would be a bad heuristic. Better
1716 // handle them elsewhere.
1717 if (TII->isWWMRegSpillOpcode(MI.getOpcode()))
1718 NeedExecCopyReservedReg = true;
1719 else if (MI.getOpcode() == AMDGPU::SI_RETURN ||
1720 MI.getOpcode() == AMDGPU::SI_RETURN_TO_EPILOG ||
1721 MI.getOpcode() == AMDGPU::SI_WHOLE_WAVE_FUNC_RETURN ||
1722 (MFI->isChainFunction() &&
1723 TII->isChainCallOpcode(MI.getOpcode()))) {
1724 // We expect all return to be the same size.
1725 assert(!ReturnMI ||
1726 (count_if(MI.operands(), [](auto Op) { return Op.isReg(); }) ==
1727 count_if(ReturnMI->operands(), [](auto Op) { return Op.isReg(); })));
1728 ReturnMI = &MI;
1729 }
1730 }
1731 }
1732
1733 SmallVector<Register> SortedWWMVGPRs;
1734 for (Register Reg : MFI->getWWMReservedRegs()) {
1735 // The shift-back is needed only for the VGPRs used for SGPR spills and they
1736 // are of 32-bit size. SIPreAllocateWWMRegs pass can add tuples into WWM
1737 // reserved registers.
1738 const TargetRegisterClass *RC = TRI->getPhysRegBaseClass(Reg);
1739 if (TRI->getRegSizeInBits(*RC) != 32)
1740 continue;
1741 SortedWWMVGPRs.push_back(Reg);
1742 }
1743
1744 sort(SortedWWMVGPRs, std::greater<Register>());
1745 MFI->shiftWwmVGPRsToLowestRange(MF, SortedWWMVGPRs, SavedVGPRs);
1746
1747 if (MFI->isEntryFunction())
1748 return;
1749
1750 if (MFI->isWholeWaveFunction()) {
1751 // In practice, all the VGPRs are WWM registers, and we will need to save at
1752 // least their inactive lanes. Add them to WWMReservedRegs.
1753 assert(!NeedExecCopyReservedReg &&
1754 "Whole wave functions can use the reg mapped for their i1 argument");
1755
1756 // FIXME: Be more efficient!
1757 unsigned NumArchVGPRs = ST.has1024AddressableVGPRs() ? 1024 : 256;
1758 for (MCRegister Reg :
1759 AMDGPU::VGPR_32RegClass.getRegisters().take_front(NumArchVGPRs))
1760 if (MF.getRegInfo().isPhysRegModified(Reg)) {
1761 MFI->reserveWWMRegister(Reg);
1762 MF.begin()->addLiveIn(Reg);
1763 }
1764 MF.begin()->sortUniqueLiveIns();
1765 }
1766
1767 // Remove any VGPRs used in the return value because these do not need to be saved.
1768 // This prevents CSR restore from clobbering return VGPRs.
1769 if (ReturnMI) {
1770 for (auto &Op : ReturnMI->operands()) {
1771 if (Op.isReg())
1772 SavedVGPRs.reset(Op.getReg());
1773 }
1774 }
1775
1776 // Create the stack objects for WWM registers now.
1777 for (Register Reg : MFI->getWWMReservedRegs()) {
1778 const TargetRegisterClass *RC = TRI->getPhysRegBaseClass(Reg);
1779 MFI->allocateWWMSpill(MF, Reg, TRI->getSpillSize(*RC),
1780 TRI->getSpillAlign(*RC));
1781 }
1782
1783 // Ignore the SGPRs the default implementation found.
1784 SavedVGPRs.clearBitsNotInMask(TRI->getAllVectorRegMask());
1785
1786 // Do not save AGPRs prior to GFX90A because there was no easy way to do so.
1787 // In gfx908 there was do AGPR loads and stores and thus spilling also
1788 // require a temporary VGPR.
1789 if (!ST.hasGFX90AInsts())
1790 SavedVGPRs.clearBitsInMask(TRI->getAllAGPRRegMask());
1791
1792 determinePrologEpilogSGPRSaves(MF, SavedVGPRs, NeedExecCopyReservedReg);
1793
1794 // The Whole-Wave VGPRs need to be specially inserted in the prolog, so don't
1795 // allow the default insertion to handle them.
1796 for (auto &Reg : MFI->getWWMSpills())
1797 SavedVGPRs.reset(Reg.first);
1798}
1799
1801 BitVector &SavedRegs,
1802 RegScavenger *RS) const {
1805 if (MFI->isEntryFunction())
1806 return;
1807
1808 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
1809 const SIRegisterInfo *TRI = ST.getRegisterInfo();
1810
1811 // The SP is specifically managed and we don't want extra spills of it.
1812 SavedRegs.reset(MFI->getStackPtrOffsetReg());
1813
1814 const BitVector AllSavedRegs = SavedRegs;
1815 SavedRegs.clearBitsInMask(TRI->getAllVectorRegMask());
1816
1817 // We have to anticipate introducing CSR VGPR spills or spill of caller
1818 // save VGPR reserved for SGPR spills as we now always create stack entry
1819 // for it, if we don't have any stack objects already, since we require a FP
1820 // if there is a call and stack. We will allocate a VGPR for SGPR spills if
1821 // there are any SGPR spills. Whether they are CSR spills or otherwise.
1822 MachineFrameInfo &FrameInfo = MF.getFrameInfo();
1823 const bool WillHaveFP =
1824 FrameInfo.hasCalls() && (AllSavedRegs.any() || MFI->hasSpilledSGPRs());
1825
1826 // FP will be specially managed like SP.
1827 if (WillHaveFP || hasFP(MF))
1828 SavedRegs.reset(MFI->getFrameOffsetReg());
1829
1830 // Return address use with return instruction is hidden through the SI_RETURN
1831 // pseudo. Given that and since the IPRA computes actual register usage and
1832 // does not use CSR list, the clobbering of return address by function calls
1833 // (D117243) or otherwise (D120922) is ignored/not seen by the IPRA's register
1834 // usage collection. This will ensure save/restore of return address happens
1835 // in those scenarios.
1836 const MachineRegisterInfo &MRI = MF.getRegInfo();
1837 Register RetAddrReg = TRI->getReturnAddressReg(MF);
1838 if (!MFI->isEntryFunction() &&
1839 (FrameInfo.hasCalls() || MRI.isPhysRegModified(RetAddrReg))) {
1840 SavedRegs.set(TRI->getSubReg(RetAddrReg, AMDGPU::sub0));
1841 SavedRegs.set(TRI->getSubReg(RetAddrReg, AMDGPU::sub1));
1842 }
1843}
1844
1846 const GCNSubtarget &ST,
1847 std::vector<CalleeSavedInfo> &CSI,
1848 unsigned &MinCSFrameIndex,
1849 unsigned &MaxCSFrameIndex) {
1851 MachineFrameInfo &MFI = MF.getFrameInfo();
1852 const SIRegisterInfo *TRI = ST.getRegisterInfo();
1853
1854 assert(
1855 llvm::is_sorted(CSI,
1856 [](const CalleeSavedInfo &A, const CalleeSavedInfo &B) {
1857 return A.getReg() < B.getReg();
1858 }) &&
1859 "Callee saved registers not sorted");
1860
1861 auto CanUseBlockOps = [&](const CalleeSavedInfo &CSI) {
1862 return !CSI.isSpilledToReg() &&
1863 TRI->getPhysRegBaseClass(CSI.getReg()) == &AMDGPU::VGPR_32RegClass &&
1864 !FuncInfo->isWWMReservedRegister(CSI.getReg());
1865 };
1866
1867 auto CSEnd = CSI.end();
1868 for (auto CSIt = CSI.begin(); CSIt != CSEnd; ++CSIt) {
1869 Register Reg = CSIt->getReg();
1870 if (!CanUseBlockOps(*CSIt))
1871 continue;
1872
1873 // Find all the regs that will fit in a 32-bit mask starting at the current
1874 // reg and build said mask. It should have 1 for every register that's
1875 // included, with the current register as the least significant bit.
1876 uint32_t Mask = 1;
1877 CSEnd = std::remove_if(
1878 CSIt + 1, CSEnd, [&](const CalleeSavedInfo &CSI) -> bool {
1879 if (CanUseBlockOps(CSI) && CSI.getReg() < Reg + 32) {
1880 Mask |= 1 << (CSI.getReg() - Reg);
1881 return true;
1882 } else {
1883 return false;
1884 }
1885 });
1886
1887 const TargetRegisterClass *BlockRegClass = TRI->getRegClassForBlockOp(MF);
1888 Register RegBlock =
1889 TRI->getMatchingSuperReg(Reg, AMDGPU::sub0, BlockRegClass);
1890 if (!RegBlock) {
1891 // We couldn't find a super register for the block. This can happen if
1892 // the register we started with is too high (e.g. v232 if the maximum is
1893 // v255). We therefore try to get the last register block and figure out
1894 // the mask from there.
1895 Register LastBlockStart =
1896 AMDGPU::VGPR0 + alignDown(Reg - AMDGPU::VGPR0, 32);
1897 RegBlock =
1898 TRI->getMatchingSuperReg(LastBlockStart, AMDGPU::sub0, BlockRegClass);
1899 assert(RegBlock && TRI->isSubRegister(RegBlock, Reg) &&
1900 "Couldn't find super register");
1901 int RegDelta = Reg - LastBlockStart;
1902 assert(RegDelta > 0 && llvm::countl_zero(Mask) >= RegDelta &&
1903 "Bad shift amount");
1904 Mask <<= RegDelta;
1905 }
1906
1907 FuncInfo->setMaskForVGPRBlockOps(RegBlock, Mask);
1908
1909 // The stack objects can be a bit smaller than the register block if we know
1910 // some of the high bits of Mask are 0. This may happen often with calling
1911 // conventions where the caller and callee-saved VGPRs are interleaved at
1912 // a small boundary (e.g. 8 or 16).
1913 int UnusedBits = llvm::countl_zero(Mask);
1914 unsigned BlockSize = TRI->getSpillSize(*BlockRegClass) - UnusedBits * 4;
1915 int FrameIdx =
1916 MFI.CreateStackObject(BlockSize, TRI->getSpillAlign(*BlockRegClass),
1917 /*isSpillSlot=*/true);
1918 if ((unsigned)FrameIdx < MinCSFrameIndex)
1919 MinCSFrameIndex = FrameIdx;
1920 if ((unsigned)FrameIdx > MaxCSFrameIndex)
1921 MaxCSFrameIndex = FrameIdx;
1922
1923 CSIt->setFrameIdx(FrameIdx);
1924 CSIt->setReg(RegBlock);
1925 }
1926 CSI.erase(CSEnd, CSI.end());
1927}
1928
1931 std::vector<CalleeSavedInfo> &CSI, unsigned &MinCSFrameIndex,
1932 unsigned &MaxCSFrameIndex) const {
1933 if (CSI.empty())
1934 return true; // Early exit if no callee saved registers are modified!
1935
1936 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
1937 bool UseVGPRBlocks = ST.useVGPRBlockOpsForCSR();
1938
1939 if (UseVGPRBlocks)
1940 assignSlotsUsingVGPRBlocks(MF, ST, CSI, MinCSFrameIndex, MaxCSFrameIndex);
1941
1942 return assignCalleeSavedSpillSlots(MF, TRI, CSI) || UseVGPRBlocks;
1943}
1944
1947 std::vector<CalleeSavedInfo> &CSI) const {
1948 if (CSI.empty())
1949 return true; // Early exit if no callee saved registers are modified!
1950
1951 const SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>();
1952 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
1953 const SIRegisterInfo *RI = ST.getRegisterInfo();
1954 Register FramePtrReg = FuncInfo->getFrameOffsetReg();
1955 Register BasePtrReg = RI->getBaseRegister();
1956 Register SGPRForFPSaveRestoreCopy =
1957 FuncInfo->getScratchSGPRCopyDstReg(FramePtrReg);
1958 Register SGPRForBPSaveRestoreCopy =
1959 FuncInfo->getScratchSGPRCopyDstReg(BasePtrReg);
1960 if (!SGPRForFPSaveRestoreCopy && !SGPRForBPSaveRestoreCopy)
1961 return false;
1962
1963 unsigned NumModifiedRegs = 0;
1964
1965 if (SGPRForFPSaveRestoreCopy)
1966 NumModifiedRegs++;
1967 if (SGPRForBPSaveRestoreCopy)
1968 NumModifiedRegs++;
1969
1970 for (auto &CS : CSI) {
1971 if (CS.getReg() == FramePtrReg.asMCReg() && SGPRForFPSaveRestoreCopy) {
1972 CS.setDstReg(SGPRForFPSaveRestoreCopy);
1973 if (--NumModifiedRegs)
1974 break;
1975 } else if (CS.getReg() == BasePtrReg.asMCReg() &&
1976 SGPRForBPSaveRestoreCopy) {
1977 CS.setDstReg(SGPRForBPSaveRestoreCopy);
1978 if (--NumModifiedRegs)
1979 break;
1980 }
1981 }
1982
1983 return false;
1984}
1985
1987 const MachineFunction &MF) const {
1988
1989 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
1990 const MachineFrameInfo &MFI = MF.getFrameInfo();
1991 const SIInstrInfo *TII = ST.getInstrInfo();
1992 uint64_t EstStackSize = MFI.estimateStackSize(MF);
1993 uint64_t MaxOffset = EstStackSize - 1;
1994
1995 // We need the emergency stack slots to be allocated in range of the
1996 // MUBUF/flat scratch immediate offset from the base register, so assign these
1997 // first at the incoming SP position.
1998 //
1999 // TODO: We could try sorting the objects to find a hole in the first bytes
2000 // rather than allocating as close to possible. This could save a lot of space
2001 // on frames with alignment requirements.
2002 if (ST.enableFlatScratch()) {
2003 if (TII->isLegalFLATOffset(MaxOffset, AMDGPUAS::PRIVATE_ADDRESS,
2005 return false;
2006 } else {
2007 if (TII->isLegalMUBUFImmOffset(MaxOffset))
2008 return false;
2009 }
2010
2011 return true;
2012}
2013
2017 MachineFunction *MF = MBB.getParent();
2018 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
2019 if (!ST.useVGPRBlockOpsForCSR())
2020 return false;
2021
2022 MachineFrameInfo &FrameInfo = MF->getFrameInfo();
2024 const SIInstrInfo *TII = ST.getInstrInfo();
2026
2027 const TargetRegisterClass *BlockRegClass =
2028 static_cast<const SIRegisterInfo *>(TRI)->getRegClassForBlockOp(*MF);
2029 for (const CalleeSavedInfo &CS : CSI) {
2030 Register Reg = CS.getReg();
2031 if (!BlockRegClass->contains(Reg) ||
2032 !FuncInfo->hasMaskForVGPRBlockOps(Reg)) {
2034 continue;
2035 }
2036
2037 // Build a scratch block store.
2038 uint32_t Mask = FuncInfo->getMaskForVGPRBlockOps(Reg);
2039 int FrameIndex = CS.getFrameIdx();
2040 MachinePointerInfo PtrInfo =
2041 MachinePointerInfo::getFixedStack(*MF, FrameIndex);
2042 MachineMemOperand *MMO =
2044 FrameInfo.getObjectSize(FrameIndex),
2045 FrameInfo.getObjectAlign(FrameIndex));
2046
2047 BuildMI(MBB, MI, MI->getDebugLoc(),
2048 TII->get(AMDGPU::SI_BLOCK_SPILL_V1024_SAVE))
2049 .addReg(Reg, getKillRegState(false))
2050 .addFrameIndex(FrameIndex)
2052 .addImm(0)
2053 .addImm(Mask)
2054 .addMemOperand(MMO);
2055
2056 FuncInfo->setHasSpilledVGPRs();
2057
2058 // Add the register to the liveins. This is necessary because if any of the
2059 // VGPRs in the register block is reserved (e.g. if it's a WWM register),
2060 // then the whole block will be marked as reserved and `updateLiveness` will
2061 // skip it.
2062 MBB.addLiveIn(Reg);
2063 }
2064 MBB.sortUniqueLiveIns();
2065
2066 return true;
2067}
2068
2072 MachineFunction *MF = MBB.getParent();
2073 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
2074 if (!ST.useVGPRBlockOpsForCSR())
2075 return false;
2076
2078 MachineFrameInfo &MFI = MF->getFrameInfo();
2079 const SIInstrInfo *TII = ST.getInstrInfo();
2080 const SIRegisterInfo *SITRI = static_cast<const SIRegisterInfo *>(TRI);
2081 const TargetRegisterClass *BlockRegClass = SITRI->getRegClassForBlockOp(*MF);
2082 for (const CalleeSavedInfo &CS : reverse(CSI)) {
2083 Register Reg = CS.getReg();
2084 if (!BlockRegClass->contains(Reg) ||
2085 !FuncInfo->hasMaskForVGPRBlockOps(Reg)) {
2087 continue;
2088 }
2089
2090 // Build a scratch block load.
2091 uint32_t Mask = FuncInfo->getMaskForVGPRBlockOps(Reg);
2092 int FrameIndex = CS.getFrameIdx();
2093 MachinePointerInfo PtrInfo =
2094 MachinePointerInfo::getFixedStack(*MF, FrameIndex);
2096 PtrInfo, MachineMemOperand::MOLoad, MFI.getObjectSize(FrameIndex),
2097 MFI.getObjectAlign(FrameIndex));
2098
2099 auto MIB = BuildMI(MBB, MI, MI->getDebugLoc(),
2100 TII->get(AMDGPU::SI_BLOCK_SPILL_V1024_RESTORE), Reg)
2101 .addFrameIndex(FrameIndex)
2102 .addReg(FuncInfo->getStackPtrOffsetReg())
2103 .addImm(0)
2104 .addImm(Mask)
2105 .addMemOperand(MMO);
2106 SITRI->addImplicitUsesForBlockCSRLoad(MIB, Reg);
2107
2108 // Add the register to the liveins. This is necessary because if any of the
2109 // VGPRs in the register block is reserved (e.g. if it's a WWM register),
2110 // then the whole block will be marked as reserved and `updateLiveness` will
2111 // skip it.
2112 MBB.addLiveIn(Reg);
2113 }
2114
2115 MBB.sortUniqueLiveIns();
2116 return true;
2117}
2118
2120 MachineFunction &MF,
2123 int64_t Amount = I->getOperand(0).getImm();
2124 if (Amount == 0)
2125 return MBB.erase(I);
2126
2127 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
2128 const SIInstrInfo *TII = ST.getInstrInfo();
2129 const DebugLoc &DL = I->getDebugLoc();
2130 unsigned Opc = I->getOpcode();
2131 bool IsDestroy = Opc == TII->getCallFrameDestroyOpcode();
2132 uint64_t CalleePopAmount = IsDestroy ? I->getOperand(1).getImm() : 0;
2133
2134 if (!hasReservedCallFrame(MF)) {
2135 Amount = alignTo(Amount, getStackAlign());
2136 assert(isUInt<32>(Amount) && "exceeded stack address space size");
2139
2140 Amount *= getScratchScaleFactor(ST);
2141 if (IsDestroy)
2142 Amount = -Amount;
2143 auto Add = BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADD_I32), SPReg)
2144 .addReg(SPReg)
2145 .addImm(Amount);
2146 Add->getOperand(3).setIsDead(); // Mark SCC as dead.
2147 } else if (CalleePopAmount != 0) {
2148 llvm_unreachable("is this used?");
2149 }
2150
2151 return MBB.erase(I);
2152}
2153
2154/// Returns true if the frame will require a reference to the stack pointer.
2155///
2156/// This is the set of conditions common to setting up the stack pointer in a
2157/// kernel, and for using a frame pointer in a callable function.
2158///
2159/// FIXME: Should also check hasOpaqueSPAdjustment and if any inline asm
2160/// references SP.
2162 return MFI.hasVarSizedObjects() || MFI.hasStackMap() || MFI.hasPatchPoint();
2163}
2164
2165// The FP for kernels is always known 0, so we never really need to setup an
2166// explicit register for it. However, DisableFramePointerElim will force us to
2167// use a register for it.
2169 const MachineFrameInfo &MFI = MF.getFrameInfo();
2170
2171 // For entry & chain functions we can use an immediate offset in most cases,
2172 // so the presence of calls doesn't imply we need a distinct frame pointer.
2173 if (MFI.hasCalls() &&
2176 // All offsets are unsigned, so need to be addressed in the same direction
2177 // as stack growth.
2178
2179 // FIXME: This function is pretty broken, since it can be called before the
2180 // frame layout is determined or CSR spills are inserted.
2181 return MFI.getStackSize() != 0;
2182 }
2183
2184 return (frameTriviallyRequiresSP(MFI) &&
2186 MFI.isFrameAddressTaken() ||
2187 MF.getSubtarget<GCNSubtarget>().getRegisterInfo()->hasStackRealignment(
2188 MF) ||
2191}
2192
2194 const MachineFunction &MF) const {
2195 return MF.getInfo<SIMachineFunctionInfo>()->isDynamicVGPREnabled() &&
2198}
2199
2200// This is essentially a reduced version of hasFP for entry functions. Since the
2201// stack pointer is known 0 on entry to kernels, we never really need an FP
2202// register. We may need to initialize the stack pointer depending on the frame
2203// properties, which logically overlaps many of the cases where an ordinary
2204// function would require an FP.
2205// Also used for chain functions. While not technically entry functions, chain
2206// functions may need to set up a stack pointer in some situations.
2208 const MachineFunction &MF) const {
2209 // Callable functions always require a stack pointer reference.
2212 "only expected to call this for entry points and chain functions");
2213
2214 const MachineFrameInfo &MFI = MF.getFrameInfo();
2215
2216 // Entry points ordinarily don't need to initialize SP. We have to set it up
2217 // for callees if there are any. Also note tail calls are impossible/don't
2218 // make any sense for kernels.
2219 if (MFI.hasCalls())
2220 return true;
2221
2222 // We still need to initialize the SP if we're doing anything weird that
2223 // references the SP, like variable sized stack objects.
2224 return frameTriviallyRequiresSP(MFI);
2225}
unsigned SubReg
unsigned const MachineRegisterInfo * MRI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
Provides AMDGPU specific target descriptions.
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
static const Function * getParent(const Value *V)
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
AMD GCN specific subclass of TargetSubtarget.
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
A set of register units.
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
Register Reg
Register const TargetRegisterInfo * TRI
Promote Memory to Register
Definition Mem2Reg.cpp:110
static constexpr MCPhysReg FPReg
static constexpr MCPhysReg SPReg
This file declares the machine register scavenger class.
static void buildEpilogRestore(const GCNSubtarget &ST, const SIRegisterInfo &TRI, const SIMachineFunctionInfo &FuncInfo, LiveRegUnits &LiveUnits, MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator I, const DebugLoc &DL, Register SpillReg, int FI, Register FrameReg, int64_t DwordOff=0)
static cl::opt< bool > EnableSpillVGPRToAGPR("amdgpu-spill-vgpr-to-agpr", cl::desc("Enable spilling VGPRs to AGPRs"), cl::ReallyHidden, cl::init(true))
static void getVGPRSpillLaneOrTempRegister(MachineFunction &MF, LiveRegUnits &LiveUnits, Register SGPR, const TargetRegisterClass &RC=AMDGPU::SReg_32_XM0_XEXECRegClass, bool IncludeScratchCopy=true)
Query target location for spilling SGPRs IncludeScratchCopy : Also look for free scratch SGPRs.
static void buildGitPtr(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, const DebugLoc &DL, const SIInstrInfo *TII, Register TargetReg)
static bool allStackObjectsAreDead(const MachineFrameInfo &MFI)
static void buildPrologSpill(const GCNSubtarget &ST, const SIRegisterInfo &TRI, const SIMachineFunctionInfo &FuncInfo, LiveRegUnits &LiveUnits, MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator I, const DebugLoc &DL, Register SpillReg, int FI, Register FrameReg, int64_t DwordOff=0)
static Register buildScratchExecCopy(LiveRegUnits &LiveUnits, MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, bool IsProlog, bool EnableInactiveLanes)
static bool frameTriviallyRequiresSP(const MachineFrameInfo &MFI)
Returns true if the frame will require a reference to the stack pointer.
static void assignSlotsUsingVGPRBlocks(MachineFunction &MF, const GCNSubtarget &ST, std::vector< CalleeSavedInfo > &CSI, unsigned &MinCSFrameIndex, unsigned &MaxCSFrameIndex)
static void initLiveUnits(LiveRegUnits &LiveUnits, const SIRegisterInfo &TRI, const SIMachineFunctionInfo *FuncInfo, MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, bool IsProlog)
static bool allSGPRSpillsAreDead(const MachineFunction &MF)
static MCRegister findScratchNonCalleeSaveRegister(MachineRegisterInfo &MRI, LiveRegUnits &LiveUnits, const TargetRegisterClass &RC, bool Unused=false)
static MCRegister findUnusedRegister(MachineRegisterInfo &MRI, const LiveRegUnits &LiveUnits, const TargetRegisterClass &RC)
static unsigned getScratchScaleFactor(const GCNSubtarget &ST)
#define LLVM_DEBUG(...)
Definition Debug.h:114
static const int BlockSize
Definition TarWriter.cpp:33
static const LaneMaskConstants & get(const GCNSubtarget &ST)
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
size_t size() const
size - Get the array size.
Definition ArrayRef.h:142
ArrayRef< T > slice(size_t N, size_t M) const
slice(n, m) - Chop off the first N elements of the array, and keep M elements in the array.
Definition ArrayRef.h:186
bool test(unsigned Idx) const
Definition BitVector.h:480
BitVector & reset()
Definition BitVector.h:411
void clearBitsNotInMask(const uint32_t *Mask, unsigned MaskWords=~0u)
clearBitsNotInMask - Clear a bit in this vector for every '0' bit in Mask.
Definition BitVector.h:741
BitVector & set()
Definition BitVector.h:370
bool any() const
any - Returns true if any bit is set.
Definition BitVector.h:189
void clearBitsInMask(const uint32_t *Mask, unsigned MaskWords=~0u)
clearBitsInMask - Clear any bits in this vector that are set in Mask.
Definition BitVector.h:729
iterator_range< const_set_bits_iterator > set_bits() const
Definition BitVector.h:159
bool empty() const
empty - Tests whether there are no bits in this bitvector.
Definition BitVector.h:175
The CalleeSavedInfo class tracks the information need to locate where a callee saved register is in t...
MCRegister getReg() const
A debug info location.
Definition DebugLoc.h:124
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition Function.h:270
const HexagonRegisterInfo & getRegisterInfo() const
A set of register units used to track register liveness.
bool available(MCRegister Reg) const
Returns true if no part of physical register Reg is live.
void init(const TargetRegisterInfo &TRI)
Initialize and clear the set.
void addReg(MCRegister Reg)
Adds register units covered by physical register Reg.
LLVM_ABI void stepBackward(const MachineInstr &MI)
Updates liveness when stepping backwards over the instruction MI.
LLVM_ABI void addLiveOuts(const MachineBasicBlock &MBB)
Adds registers living out of block MBB.
void removeReg(MCRegister Reg)
Removes all register units covered by physical register Reg.
bool empty() const
Returns true if the set is empty.
LLVM_ABI void addLiveIns(const MachineBasicBlock &MBB)
Adds registers living into block MBB.
Describe properties that are true of each instruction in the target description file.
Wrapper class representing physical registers. Should be passed by value.
Definition MCRegister.h:41
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
MachineInstrBundleIterator< MachineInstr > iterator
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
bool hasVarSizedObjects() const
This method may be called any time after instruction selection is complete to determine if the stack ...
uint64_t getStackSize() const
Return the number of bytes that must be allocated to hold all of the fixed size frame objects.
bool hasCalls() const
Return true if the current function has any function calls.
bool isFrameAddressTaken() const
This method may be called any time after instruction selection is complete to determine if there is a...
Align getMaxAlign() const
Return the alignment in bytes that this function must be aligned to, which is greater than the defaul...
bool hasPatchPoint() const
This method may be called any time after instruction selection is complete to determine if there is a...
LLVM_ABI int CreateSpillStackObject(uint64_t Size, Align Alignment)
Create a new statically sized stack object that represents a spill slot, returning a nonnegative iden...
bool hasTailCall() const
Returns true if the function contains a tail call.
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
bool hasStackMap() const
This method may be called any time after instruction selection is complete to determine if there is a...
void RemoveStackObject(int ObjectIdx)
Remove or mark dead a statically sized stack object.
int getObjectIndexEnd() const
Return one past the maximum frame object index.
uint8_t getStackID(int ObjectIdx) const
int64_t getObjectOffset(int ObjectIdx) const
Return the assigned stack offset of the specified object from the incoming stack pointer.
bool isFixedObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to a fixed stack object.
int getObjectIndexBegin() const
Return the minimum frame object index.
bool isDeadObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to a dead object.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const MachineBasicBlock & front() const
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
const MachineInstrBuilder & addExternalSymbol(const char *FnName, unsigned TargetFlags=0) const
const MachineInstrBuilder & setMIFlag(MachineInstr::MIFlag Flag) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
Representation of each machine instruction.
mop_range operands()
const MachineOperand & getOperand(unsigned i) const
A description of a memory reference used in the backend.
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MOInvariant
The memory access always returns the same value (or traps).
@ MOStore
The memory access writes data.
void setIsDead(bool Val=true)
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLVM_ABI const MCPhysReg * getCalleeSavedRegs() const
Returns list of callee saved registers.
void addLiveIn(MCRegister Reg, Register vreg=Register())
addLiveIn - Add the specified register as a live-in.
LLVM_ABI bool isPhysRegModified(MCRegister PhysReg, bool SkipNoReturnDef=false) const
Return true if the specified register is modified in this function.
MutableArrayRef - Represent a mutable reference to an array (0 or more elements consecutively in memo...
Definition ArrayRef.h:298
PrologEpilogSGPRSpillBuilder(Register Reg, const PrologEpilogSGPRSaveRestoreInfo SI, MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, const DebugLoc &DL, const SIInstrInfo *TII, const SIRegisterInfo &TRI, LiveRegUnits &LiveUnits, Register FrameReg)
Wrapper class representing virtual and physical registers.
Definition Register.h:20
MCRegister asMCReg() const
Utility to check-convert this value to a MCRegister.
Definition Register.h:107
void determinePrologEpilogSGPRSaves(MachineFunction &MF, BitVector &SavedRegs, bool NeedExecCopyReservedReg) const
StackOffset getFrameIndexReference(const MachineFunction &MF, int FI, Register &FrameReg) const override
getFrameIndexReference - This method should return the base register and offset used to reference a f...
void processFunctionBeforeFrameFinalized(MachineFunction &MF, RegScavenger *RS=nullptr) const override
processFunctionBeforeFrameFinalized - This method is called immediately before the specified function...
bool mayReserveScratchForCWSR(const MachineFunction &MF) const
bool allocateScavengingFrameIndexesNearIncomingSP(const MachineFunction &MF) const override
Control the placement of special register scavenging spill slots when allocating a stack frame.
bool requiresStackPointerReference(const MachineFunction &MF) const
void emitEntryFunctionPrologue(MachineFunction &MF, MachineBasicBlock &MBB) const
void determineCalleeSaves(MachineFunction &MF, BitVector &SavedRegs, RegScavenger *RS=nullptr) const override
This method determines which of the registers reported by TargetRegisterInfo::getCalleeSavedRegs() sh...
void emitCSRSpillStores(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, DebugLoc &DL, LiveRegUnits &LiveUnits, Register FrameReg, Register FramePtrRegScratchCopy) const
bool hasFPImpl(const MachineFunction &MF) const override
bool spillCalleeSavedRegisters(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, ArrayRef< CalleeSavedInfo > CSI, const TargetRegisterInfo *TRI) const override
spillCalleeSavedRegisters - Issues instruction(s) to spill all callee saved registers and returns tru...
bool assignCalleeSavedSpillSlots(MachineFunction &MF, const TargetRegisterInfo *TRI, std::vector< CalleeSavedInfo > &CSI) const override
void determineCalleeSavesSGPR(MachineFunction &MF, BitVector &SavedRegs, RegScavenger *RS=nullptr) const
void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const override
void emitCSRSpillRestores(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, DebugLoc &DL, LiveRegUnits &LiveUnits, Register FrameReg, Register FramePtrRegScratchCopy) const
void processFunctionBeforeFrameIndicesReplaced(MachineFunction &MF, RegScavenger *RS=nullptr) const override
processFunctionBeforeFrameIndicesReplaced - This method is called immediately before MO_FrameIndex op...
bool isSupportedStackID(TargetStackID::Value ID) const override
void emitPrologue(MachineFunction &MF, MachineBasicBlock &MBB) const override
emitProlog/emitEpilog - These methods insert prolog and epilog code into the function.
MachineBasicBlock::iterator eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator MI) const override
This method is called during prolog/epilog code insertion to eliminate call frame setup and destroy p...
bool restoreCalleeSavedRegisters(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, MutableArrayRef< CalleeSavedInfo > CSI, const TargetRegisterInfo *TRI) const override
restoreCalleeSavedRegisters - Issues instruction(s) to restore all callee saved registers and returns...
This class keeps track of the SPI_SP_INPUT_ADDR config register, which tells the hardware which inter...
ArrayRef< PrologEpilogSGPRSpill > getPrologEpilogSGPRSpills() const
const WWMSpillsMap & getWWMSpills() const
void getAllScratchSGPRCopyDstRegs(SmallVectorImpl< Register > &Regs) const
ArrayRef< MCPhysReg > getAGPRSpillVGPRs() const
void shiftWwmVGPRsToLowestRange(MachineFunction &MF, SmallVectorImpl< Register > &WWMVGPRs, BitVector &SavedVGPRs)
void setMaskForVGPRBlockOps(Register RegisterBlock, uint32_t Mask)
GCNUserSGPRUsageInfo & getUserSGPRInfo()
void allocateWWMSpill(MachineFunction &MF, Register VGPR, uint64_t Size=4, Align Alignment=Align(4))
void setVGPRToAGPRSpillDead(int FrameIndex)
Register getScratchRSrcReg() const
Returns the physical register reserved for use as the resource descriptor for scratch accesses.
ArrayRef< MCPhysReg > getVGPRSpillAGPRs() const
int getScavengeFI(MachineFrameInfo &MFI, const SIRegisterInfo &TRI)
uint32_t getMaskForVGPRBlockOps(Register RegisterBlock) const
bool hasMaskForVGPRBlockOps(Register RegisterBlock) const
bool hasPrologEpilogSGPRSpillEntry(Register Reg) const
Register getGITPtrLoReg(const MachineFunction &MF) const
void setVGPRForAGPRCopy(Register NewVGPRForAGPRCopy)
bool allocateVGPRSpillToAGPR(MachineFunction &MF, int FI, bool isAGPRtoVGPR)
Reserve AGPRs or VGPRs to support spilling for FrameIndex FI.
void splitWWMSpillRegisters(MachineFunction &MF, SmallVectorImpl< std::pair< Register, int > > &CalleeSavedRegs, SmallVectorImpl< std::pair< Register, int > > &ScratchRegs) const
bool isWWMReservedRegister(Register Reg) const
ArrayRef< SIRegisterInfo::SpilledReg > getSGPRSpillToPhysicalVGPRLanes(int FrameIndex) const
bool allocateSGPRSpillToVGPRLane(MachineFunction &MF, int FI, bool SpillToPhysVGPRLane=false, bool IsPrologEpilog=false)
void setLongBranchReservedReg(Register Reg)
void setHasSpilledVGPRs(bool Spill=true)
bool removeDeadFrameIndices(MachineFrameInfo &MFI, bool ResetSGPRSpillStackIDs)
If ResetSGPRSpillStackIDs is true, reset the stack ID from sgpr-spill to the default stack.
void setScratchReservedForDynamicVGPRs(unsigned SizeInBytes)
MCRegister getPreloadedReg(AMDGPUFunctionArgInfo::PreloadedValue Value) const
bool checkIndexInPrologEpilogSGPRSpills(int FI) const
const ReservedRegSet & getWWMReservedRegs() const
const PrologEpilogSGPRSaveRestoreInfo & getPrologEpilogSGPRSaveRestoreInfo(Register Reg) const
void setIsStackRealigned(bool Realigned=true)
void addToPrologEpilogSGPRSpills(Register Reg, PrologEpilogSGPRSaveRestoreInfo SI)
Register getScratchSGPRCopyDstReg(Register Reg) const
Register getFrameRegister(const MachineFunction &MF) const override
const TargetRegisterClass * getRegClassForBlockOp(const MachineFunction &MF) const
void addImplicitUsesForBlockCSRLoad(MachineInstrBuilder &MIB, Register BlockReg) const
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StackOffset holds a fixed and a scalable offset in bytes.
Definition TypeSize.h:30
int64_t getFixed() const
Returns the fixed component of the stack.
Definition TypeSize.h:46
bool hasFP(const MachineFunction &MF) const
hasFP - Return true if the specified function should have a dedicated frame pointer register.
virtual bool hasReservedCallFrame(const MachineFunction &MF) const
hasReservedCallFrame - Under normal circumstances, when a frame pointer is not required,...
virtual void determineCalleeSaves(MachineFunction &MF, BitVector &SavedRegs, RegScavenger *RS=nullptr) const
This method determines which of the registers reported by TargetRegisterInfo::getCalleeSavedRegs() sh...
void restoreCalleeSavedRegister(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, const CalleeSavedInfo &CS, const TargetInstrInfo *TII, const TargetRegisterInfo *TRI) const
void spillCalleeSavedRegister(MachineBasicBlock &SaveBlock, MachineBasicBlock::iterator MI, const CalleeSavedInfo &CS, const TargetInstrInfo *TII, const TargetRegisterInfo *TRI) const
spillCalleeSavedRegister - Default implementation for spilling a single callee saved register.
Align getStackAlign() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
TargetOptions Options
LLVM_ABI bool DisableFramePointerElim(const MachineFunction &MF) const
DisableFramePointerElim - This returns true if frame pointer elimination optimization should be disab...
bool contains(Register Reg) const
Return true if the specified register is included in this register class.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ CONSTANT_ADDRESS
Address space for constant memory (VTX2).
@ PRIVATE_ADDRESS
Address space for private memory.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
unsigned getVGPRAllocGranule(const MCSubtargetInfo *STI, unsigned DynamicVGPRBlockSize, std::optional< bool > EnableWavefrontSize32)
uint64_t convertSMRDOffsetUnits(const MCSubtargetInfo &ST, uint64_t ByteOffset)
Convert ByteOffset to dwords if the subtarget uses dword SMRD immediate offsets.
LLVM_READNONE constexpr bool isEntryFunctionCC(CallingConv::ID CC)
bool isInlinableLiteral32(int32_t Literal, bool HasInv2Pi)
LLVM_READNONE constexpr bool isCompute(CallingConv::ID CC)
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ AMDGPU_CS
Used for Mesa/AMDPAL compute shaders.
@ Kill
The last use of a register.
@ Undef
Value of the register doesn't matter.
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
@ Offset
Definition DWP.cpp:532
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition STLExtras.h:632
constexpr T alignDown(U Value, V Align, W Skew=0)
Returns the largest unsigned integer less than or equal to Value and is Skew mod Align.
Definition MathExtras.h:546
int countl_zero(T Val)
Count number of 0's from the most significant bit to the least stopping at the first 1.
Definition bit.h:236
auto reverse(ContainerTy &&C)
Definition STLExtras.h:406
void sort(IteratorTy Start, IteratorTy End)
Definition STLExtras.h:1622
constexpr uint32_t Hi_32(uint64_t Value)
Return the high 32 bits of a 64 bit value.
Definition MathExtras.h:150
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
auto make_first_range(ContainerTy &&c)
Given a container of pairs, return a range over the first elements.
Definition STLExtras.h:1397
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:167
bool is_sorted(R &&Range, Compare C)
Wrapper function around std::is_sorted to check if elements in a range R are sorted with respect to a...
Definition STLExtras.h:1920
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
Definition MathExtras.h:189
constexpr uint32_t Lo_32(uint64_t Value)
Return the low 32 bits of a 64 bit value.
Definition MathExtras.h:155
@ And
Bitwise or logical AND of integers.
@ Add
Sum of integers.
unsigned getKillRegState(bool B)
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
Definition MCRegister.h:21
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition Alignment.h:144
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
auto count_if(R &&Range, UnaryPredicate P)
Wrapper function around std::count_if to count the number of times an element satisfying a given pred...
Definition STLExtras.h:1961
LLVM_ABI Printable printReg(Register Reg, const TargetRegisterInfo *TRI=nullptr, unsigned SubIdx=0, const MachineRegisterInfo *MRI=nullptr)
Prints virtual and physical registers with or without a TRI instance.
static constexpr uint64_t encode(Fields... Values)
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
Definition Alignment.h:77
Matching combinators.
This class contains a discriminated union of information about pointers in memory operands,...
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.