LLVM  4.0.0
SIFrameLowering.cpp
Go to the documentation of this file.
1 //===----------------------- SIFrameLowering.cpp --------------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //==-----------------------------------------------------------------------===//
9 
10 #include "SIFrameLowering.h"
11 #include "SIInstrInfo.h"
12 #include "SIMachineFunctionInfo.h"
13 #include "SIRegisterInfo.h"
14 #include "AMDGPUSubtarget.h"
15 
20 
21 using namespace llvm;
22 
23 
25  const SIRegisterInfo *TRI) {
26  return makeArrayRef(AMDGPU::SGPR_128RegClass.begin(),
27  TRI->getMaxNumSGPRs(MF) / 4);
28 }
29 
31  const SIRegisterInfo *TRI) {
32  return makeArrayRef(AMDGPU::SGPR_32RegClass.begin(),
33  TRI->getMaxNumSGPRs(MF));
34 }
35 
36 void SIFrameLowering::emitFlatScratchInit(const SIInstrInfo *TII,
37  const SIRegisterInfo* TRI,
38  MachineFunction &MF,
39  MachineBasicBlock &MBB) const {
40  // We don't need this if we only have spills since there is no user facing
41  // scratch.
42 
43  // TODO: If we know we don't have flat instructions earlier, we can omit
44  // this from the input registers.
45  //
46  // TODO: We only need to know if we access scratch space through a flat
47  // pointer. Because we only detect if flat instructions are used at all,
48  // this will be used more often than necessary on VI.
49 
50  // Debug location must be unknown since the first debug location is used to
51  // determine the end of the prologue.
52  DebugLoc DL;
54 
55  unsigned FlatScratchInitReg
57 
59  MRI.addLiveIn(FlatScratchInitReg);
60  MBB.addLiveIn(FlatScratchInitReg);
61 
62  // Copy the size in bytes.
63  unsigned FlatScrInitHi = TRI->getSubReg(FlatScratchInitReg, AMDGPU::sub1);
64  BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), AMDGPU::FLAT_SCR_LO)
65  .addReg(FlatScrInitHi, RegState::Kill);
66 
67  unsigned FlatScrInitLo = TRI->getSubReg(FlatScratchInitReg, AMDGPU::sub0);
68 
70  unsigned ScratchWaveOffsetReg = MFI->getScratchWaveOffsetReg();
71 
72  // Add wave offset in bytes to private base offset.
73  // See comment in AMDKernelCodeT.h for enable_sgpr_flat_scratch_init.
74  BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADD_U32), FlatScrInitLo)
75  .addReg(FlatScrInitLo)
76  .addReg(ScratchWaveOffsetReg);
77 
78  // Convert offset to 256-byte units.
79  BuildMI(MBB, I, DL, TII->get(AMDGPU::S_LSHR_B32), AMDGPU::FLAT_SCR_HI)
80  .addReg(FlatScrInitLo, RegState::Kill)
81  .addImm(8);
82 }
83 
84 unsigned SIFrameLowering::getReservedPrivateSegmentBufferReg(
85  const SISubtarget &ST,
86  const SIInstrInfo *TII,
87  const SIRegisterInfo *TRI,
89  MachineFunction &MF) const {
90 
91  // We need to insert initialization of the scratch resource descriptor.
92  unsigned ScratchRsrcReg = MFI->getScratchRSrcReg();
93  if (ScratchRsrcReg == AMDGPU::NoRegister)
94  return AMDGPU::NoRegister;
95 
96  if (ST.hasSGPRInitBug() ||
97  ScratchRsrcReg != TRI->reservedPrivateSegmentBufferReg(MF))
98  return ScratchRsrcReg;
99 
100  // We reserved the last registers for this. Shift it down to the end of those
101  // which were actually used.
102  //
103  // FIXME: It might be safer to use a pseudoregister before replacement.
104 
105  // FIXME: We should be able to eliminate unused input registers. We only
106  // cannot do this for the resources required for scratch access. For now we
107  // skip over user SGPRs and may leave unused holes.
108 
109  // We find the resource first because it has an alignment requirement.
110 
111  MachineRegisterInfo &MRI = MF.getRegInfo();
112 
113  unsigned NumPreloaded = (MFI->getNumPreloadedSGPRs() + 3) / 4;
114  ArrayRef<MCPhysReg> AllSGPR128s = getAllSGPR128(MF, TRI);
115  AllSGPR128s = AllSGPR128s.slice(std::min(static_cast<unsigned>(AllSGPR128s.size()), NumPreloaded));
116 
117  // Skip the last 2 elements because the last one is reserved for VCC, and
118  // this is the 2nd to last element already.
119  for (MCPhysReg Reg : AllSGPR128s) {
120  // Pick the first unallocated one. Make sure we don't clobber the other
121  // reserved input we needed.
122  if (!MRI.isPhysRegUsed(Reg) && MRI.isAllocatable(Reg)) {
123  //assert(MRI.isAllocatable(Reg));
124  MRI.replaceRegWith(ScratchRsrcReg, Reg);
125  MFI->setScratchRSrcReg(Reg);
126  return Reg;
127  }
128  }
129 
130  return ScratchRsrcReg;
131 }
132 
133 unsigned SIFrameLowering::getReservedPrivateSegmentWaveByteOffsetReg(
134  const SISubtarget &ST,
135  const SIInstrInfo *TII,
136  const SIRegisterInfo *TRI,
138  MachineFunction &MF) const {
139  unsigned ScratchWaveOffsetReg = MFI->getScratchWaveOffsetReg();
140  if (ST.hasSGPRInitBug() ||
141  ScratchWaveOffsetReg != TRI->reservedPrivateSegmentWaveByteOffsetReg(MF))
142  return ScratchWaveOffsetReg;
143 
144  unsigned ScratchRsrcReg = MFI->getScratchRSrcReg();
145  MachineRegisterInfo &MRI = MF.getRegInfo();
146 
147  unsigned NumPreloaded = MFI->getNumPreloadedSGPRs();
148 
149  ArrayRef<MCPhysReg> AllSGPRs = getAllSGPRs(MF, TRI);
150  if (NumPreloaded > AllSGPRs.size())
151  return ScratchWaveOffsetReg;
152 
153  AllSGPRs = AllSGPRs.slice(NumPreloaded);
154 
155  // We need to drop register from the end of the list that we cannot use
156  // for the scratch wave offset.
157  // + 2 s102 and s103 do not exist on VI.
158  // + 2 for vcc
159  // + 2 for xnack_mask
160  // + 2 for flat_scratch
161  // + 4 for registers reserved for scratch resource register
162  // + 1 for register reserved for scratch wave offset. (By exluding this
163  // register from the list to consider, it means that when this
164  // register is being used for the scratch wave offset and there
165  // are no other free SGPRs, then the value will stay in this register.
166  // ----
167  // 13
168  if (AllSGPRs.size() < 13)
169  return ScratchWaveOffsetReg;
170 
171  for (MCPhysReg Reg : AllSGPRs.drop_back(13)) {
172  // Pick the first unallocated SGPR. Be careful not to pick an alias of the
173  // scratch descriptor, since we haven’t added its uses yet.
174  if (!MRI.isPhysRegUsed(Reg)) {
175  if (!MRI.isAllocatable(Reg) ||
176  TRI->isSubRegisterEq(ScratchRsrcReg, Reg))
177  continue;
178 
179  MRI.replaceRegWith(ScratchWaveOffsetReg, Reg);
181  return Reg;
182  }
183  }
184 
185  return ScratchWaveOffsetReg;
186 }
187 
189  MachineBasicBlock &MBB) const {
190  // Emit debugger prologue if "amdgpu-debugger-emit-prologue" attribute was
191  // specified.
192  const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
193  if (ST.debuggerEmitPrologue())
194  emitDebuggerPrologue(MF, MBB);
195 
196  assert(&MF.front() == &MBB && "Shrink-wrapping not yet supported");
197 
199 
200  // If we only have SGPR spills, we won't actually be using scratch memory
201  // since these spill to VGPRs.
202  //
203  // FIXME: We should be cleaning up these unused SGPR spill frame indices
204  // somewhere.
205 
206  const SIInstrInfo *TII = ST.getInstrInfo();
207  const SIRegisterInfo *TRI = &TII->getRegisterInfo();
208  MachineRegisterInfo &MRI = MF.getRegInfo();
209 
210  unsigned ScratchRsrcReg
211  = getReservedPrivateSegmentBufferReg(ST, TII, TRI, MFI, MF);
212  unsigned ScratchWaveOffsetReg
213  = getReservedPrivateSegmentWaveByteOffsetReg(ST, TII, TRI, MFI, MF);
214 
215  if (ScratchRsrcReg == AMDGPU::NoRegister) {
216  assert(ScratchWaveOffsetReg == AMDGPU::NoRegister);
217  return;
218  }
219 
220  assert(!TRI->isSubRegister(ScratchRsrcReg, ScratchWaveOffsetReg));
221 
222  // We need to do the replacement of the private segment buffer and wave offset
223  // register even if there are no stack objects. There could be stores to undef
224  // or a constant without an associated object.
225 
226  // FIXME: We still have implicit uses on SGPR spill instructions in case they
227  // need to spill to vector memory. It's likely that will not happen, but at
228  // this point it appears we need the setup. This part of the prolog should be
229  // emitted after frame indices are eliminated.
230 
231  if (MF.getFrameInfo().hasStackObjects() && MFI->hasFlatScratchInit())
232  emitFlatScratchInit(TII, TRI, MF, MBB);
233 
234  // We need to insert initialization of the scratch resource descriptor.
235  unsigned PreloadedScratchWaveOffsetReg = TRI->getPreloadedValue(
237 
238 
239  unsigned PreloadedPrivateBufferReg = AMDGPU::NoRegister;
240  if (ST.isAmdCodeObjectV2(MF) || ST.isMesaGfxShader(MF)) {
241  PreloadedPrivateBufferReg = TRI->getPreloadedValue(
243  }
244 
245  bool OffsetRegUsed = !MRI.use_empty(ScratchWaveOffsetReg);
246  bool ResourceRegUsed = !MRI.use_empty(ScratchRsrcReg);
247 
248  // We added live-ins during argument lowering, but since they were not used
249  // they were deleted. We're adding the uses now, so add them back.
250  if (OffsetRegUsed) {
251  assert(PreloadedScratchWaveOffsetReg != AMDGPU::NoRegister &&
252  "scratch wave offset input is required");
253  MRI.addLiveIn(PreloadedScratchWaveOffsetReg);
254  MBB.addLiveIn(PreloadedScratchWaveOffsetReg);
255  }
256 
257  if (ResourceRegUsed && PreloadedPrivateBufferReg != AMDGPU::NoRegister) {
258  assert(ST.isAmdCodeObjectV2(MF) || ST.isMesaGfxShader(MF));
259  MRI.addLiveIn(PreloadedPrivateBufferReg);
260  MBB.addLiveIn(PreloadedPrivateBufferReg);
261  }
262 
263  // Make the register selected live throughout the function.
264  for (MachineBasicBlock &OtherBB : MF) {
265  if (&OtherBB == &MBB)
266  continue;
267 
268  if (OffsetRegUsed)
269  OtherBB.addLiveIn(ScratchWaveOffsetReg);
270 
271  if (ResourceRegUsed)
272  OtherBB.addLiveIn(ScratchRsrcReg);
273  }
274 
275  DebugLoc DL;
277 
278  // If we reserved the original input registers, we don't need to copy to the
279  // reserved registers.
280 
281  bool CopyBuffer = ResourceRegUsed &&
282  PreloadedPrivateBufferReg != AMDGPU::NoRegister &&
283  ST.isAmdCodeObjectV2(MF) &&
284  ScratchRsrcReg != PreloadedPrivateBufferReg;
285 
286  // This needs to be careful of the copying order to avoid overwriting one of
287  // the input registers before it's been copied to it's final
288  // destination. Usually the offset should be copied first.
289  bool CopyBufferFirst = TRI->isSubRegisterEq(PreloadedPrivateBufferReg,
290  ScratchWaveOffsetReg);
291  if (CopyBuffer && CopyBufferFirst) {
292  BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), ScratchRsrcReg)
293  .addReg(PreloadedPrivateBufferReg, RegState::Kill);
294  }
295 
296  if (OffsetRegUsed &&
297  PreloadedScratchWaveOffsetReg != ScratchWaveOffsetReg) {
298  BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), ScratchWaveOffsetReg)
299  .addReg(PreloadedScratchWaveOffsetReg, RegState::Kill);
300  }
301 
302  if (CopyBuffer && !CopyBufferFirst) {
303  BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), ScratchRsrcReg)
304  .addReg(PreloadedPrivateBufferReg, RegState::Kill);
305  }
306 
307  if (ResourceRegUsed && (ST.isMesaGfxShader(MF) || (PreloadedPrivateBufferReg == AMDGPU::NoRegister))) {
308  assert(!ST.isAmdCodeObjectV2(MF));
309  const MCInstrDesc &SMovB32 = TII->get(AMDGPU::S_MOV_B32);
310 
311  unsigned Rsrc2 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub2);
312  unsigned Rsrc3 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub3);
313 
314  // Use relocations to get the pointer, and setup the other bits manually.
315  uint64_t Rsrc23 = TII->getScratchRsrcWords23();
316 
317  if (MFI->hasPrivateMemoryInputPtr()) {
318  unsigned Rsrc01 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub0_sub1);
319 
320  if (AMDGPU::isCompute(MF.getFunction()->getCallingConv())) {
321  const MCInstrDesc &Mov64 = TII->get(AMDGPU::S_MOV_B64);
322 
323  BuildMI(MBB, I, DL, Mov64, Rsrc01)
324  .addReg(PreloadedPrivateBufferReg)
325  .addReg(ScratchRsrcReg, RegState::ImplicitDefine);
326  } else {
327  const MCInstrDesc &LoadDwordX2 = TII->get(AMDGPU::S_LOAD_DWORDX2_IMM);
328 
329  PointerType *PtrTy =
330  PointerType::get(Type::getInt64Ty(MF.getFunction()->getContext()),
332  MachinePointerInfo PtrInfo(UndefValue::get(PtrTy));
333  auto MMO = MF.getMachineMemOperand(PtrInfo,
337  0, 0);
338  BuildMI(MBB, I, DL, LoadDwordX2, Rsrc01)
339  .addReg(PreloadedPrivateBufferReg)
340  .addImm(0) // offset
341  .addImm(0) // glc
342  .addMemOperand(MMO)
343  .addReg(ScratchRsrcReg, RegState::ImplicitDefine);
344  }
345  } else {
346  unsigned Rsrc0 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub0);
347  unsigned Rsrc1 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub1);
348 
349  BuildMI(MBB, I, DL, SMovB32, Rsrc0)
350  .addExternalSymbol("SCRATCH_RSRC_DWORD0")
351  .addReg(ScratchRsrcReg, RegState::ImplicitDefine);
352 
353  BuildMI(MBB, I, DL, SMovB32, Rsrc1)
354  .addExternalSymbol("SCRATCH_RSRC_DWORD1")
355  .addReg(ScratchRsrcReg, RegState::ImplicitDefine);
356 
357  }
358 
359  BuildMI(MBB, I, DL, SMovB32, Rsrc2)
360  .addImm(Rsrc23 & 0xffffffff)
361  .addReg(ScratchRsrcReg, RegState::ImplicitDefine);
362 
363  BuildMI(MBB, I, DL, SMovB32, Rsrc3)
364  .addImm(Rsrc23 >> 32)
365  .addReg(ScratchRsrcReg, RegState::ImplicitDefine);
366  }
367 }
368 
370  MachineBasicBlock &MBB) const {
371 
372 }
373 
375  MachineFunction &MF,
376  RegScavenger *RS) const {
377  MachineFrameInfo &MFI = MF.getFrameInfo();
378 
379  if (!MFI.hasStackObjects())
380  return;
381 
382  bool MayNeedScavengingEmergencySlot = MFI.hasStackObjects();
383 
384  assert((RS || !MayNeedScavengingEmergencySlot) &&
385  "RegScavenger required if spilling");
386 
387  if (MayNeedScavengingEmergencySlot) {
388  int ScavengeFI = MFI.CreateStackObject(
389  AMDGPU::SGPR_32RegClass.getSize(),
390  AMDGPU::SGPR_32RegClass.getAlignment(), false);
391  RS->addScavengingFrameIndex(ScavengeFI);
392  }
393 }
394 
395 void SIFrameLowering::emitDebuggerPrologue(MachineFunction &MF,
396  MachineBasicBlock &MBB) const {
397  const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
398  const SIInstrInfo *TII = ST.getInstrInfo();
399  const SIRegisterInfo *TRI = &TII->getRegisterInfo();
401 
403  DebugLoc DL;
404 
405  // For each dimension:
406  for (unsigned i = 0; i < 3; ++i) {
407  // Get work group ID SGPR, and make it live-in again.
408  unsigned WorkGroupIDSGPR = MFI->getWorkGroupIDSGPR(i);
409  MF.getRegInfo().addLiveIn(WorkGroupIDSGPR);
410  MBB.addLiveIn(WorkGroupIDSGPR);
411 
412  // Since SGPRs are spilled into VGPRs, copy work group ID SGPR to VGPR in
413  // order to spill it to scratch.
414  unsigned WorkGroupIDVGPR =
415  MF.getRegInfo().createVirtualRegister(&AMDGPU::VGPR_32RegClass);
416  BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOV_B32_e32), WorkGroupIDVGPR)
417  .addReg(WorkGroupIDSGPR);
418 
419  // Spill work group ID.
420  int WorkGroupIDObjectIdx = MFI->getDebuggerWorkGroupIDStackObjectIndex(i);
421  TII->storeRegToStackSlot(MBB, I, WorkGroupIDVGPR, false,
422  WorkGroupIDObjectIdx, &AMDGPU::VGPR_32RegClass, TRI);
423 
424  // Get work item ID VGPR, and make it live-in again.
425  unsigned WorkItemIDVGPR = MFI->getWorkItemIDVGPR(i);
426  MF.getRegInfo().addLiveIn(WorkItemIDVGPR);
427  MBB.addLiveIn(WorkItemIDVGPR);
428 
429  // Spill work item ID.
430  int WorkItemIDObjectIdx = MFI->getDebuggerWorkItemIDStackObjectIndex(i);
431  TII->storeRegToStackSlot(MBB, I, WorkItemIDVGPR, false,
432  WorkItemIDObjectIdx, &AMDGPU::VGPR_32RegClass, TRI);
433  }
434 }
Interface definition for SIRegisterInfo.
int getDebuggerWorkGroupIDStackObjectIndex(unsigned Dim) const
AMDGPU specific subclass of TargetSubtarget.
size_t i
unsigned createVirtualRegister(const TargetRegisterClass *RegClass)
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
void addLiveIn(unsigned Reg, unsigned vreg=0)
addLiveIn - Add the specified register as a live-in.
static ArrayRef< MCPhysReg > getAllSGPR128(const MachineFunction &MF, const SIRegisterInfo *TRI)
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:163
void processFunctionBeforeFrameFinalized(MachineFunction &MF, RegScavenger *RS=nullptr) const override
processFunctionBeforeFrameFinalized - This method is called immediately before the specified function...
const SIInstrInfo * getInstrInfo() const override
static PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space...
Definition: Type.cpp:655
static ArrayRef< MCPhysReg > getAllSGPRs(const MachineFunction &MF, const SIRegisterInfo *TRI)
unsigned getScratchWaveOffsetReg() const
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
const_iterator begin(StringRef path)
Get begin iterator over path.
Definition: Path.cpp:233
A debug info location.
Definition: DebugLoc.h:34
static IntegerType * getInt64Ty(LLVMContext &C)
Definition: Type.cpp:170
bool isAmdCodeObjectV2(const MachineFunction &MF) const
unsigned getScratchRSrcReg() const
Returns the physical register reserved for use as the resource descriptor for scratch accesses...
ArrayRef< T > slice(size_t N, size_t M) const
slice(n, m) - Chop off the first N elements of the array, and keep M elements in the array...
Definition: ArrayRef.h:171
unsigned reservedPrivateSegmentWaveByteOffsetReg(const MachineFunction &MF) const
Return the end register initially reserved for the scratch wave offset in case spilling is needed...
void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const override
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
bool isPhysRegUsed(unsigned PhysReg) const
Return true if the specified register is modified or read in this function.
unsigned getMaxNumSGPRs(const SISubtarget &ST, unsigned WavesPerEU, bool Addressable) const
const HexagonInstrInfo * TII
ArrayRef< T > makeArrayRef(const T &OneElt)
Construct an ArrayRef from a single element.
Definition: ArrayRef.h:440
Reg
All possible values of the reg field in the ModR/M byte.
The memory access is dereferenceable (i.e., doesn't trap).
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted...
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
bool isMesaGfxShader(const MachineFunction &MF) const
unsigned getWorkItemIDVGPR(unsigned Dim) const
const MachineBasicBlock & front() const
static unsigned getAlignment(GlobalVariable *GV)
bool hasStackObjects() const
Return true if there are any stack objects in this function.
int getDebuggerWorkItemIDStackObjectIndex(unsigned Dim) const
MachineBasicBlock * MBB
unsigned getWorkGroupIDSGPR(unsigned Dim) const
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:141
Maximum length of the test input libFuzzer tries to guess a good value based on the corpus and reports it always prefer smaller inputs during the corpus shuffle When libFuzzer itself reports a bug this exit code will be used If indicates the maximal total time in seconds to run the fuzzer minimizes the provided crash input Use with etc Experimental Use value profile to guide fuzzing Number of simultaneous worker processes to run the jobs If min(jobs, NumberOfCpuCores()/2)\" is used.") FUZZER_FLAG_INT(reload
Class to represent pointers.
Definition: DerivedTypes.h:443
bool hasSGPRInitBug() const
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
void addLiveIn(MCPhysReg PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
This file declares the machine register scavenger class.
unsigned const MachineRegisterInfo * MRI
Address space for constant memory (VTX2)
Definition: AMDGPU.h:140
unsigned getPreloadedValue(const MachineFunction &MF, enum PreloadedValue Value) const
Returns the physical register that Value is stored in.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
ArrayRef< T > drop_back(size_t N=1) const
Drop the last N elements of the array.
Definition: ArrayRef.h:186
bool isCompute(CallingConv::ID cc)
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
static UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
Definition: Constants.cpp:1337
void setScratchWaveOffsetReg(unsigned Reg)
This class contains a discriminated union of information about pointers in memory operands...
void addScavengingFrameIndex(int FI)
Add a scavenging frame index.
bool isAllocatable(unsigned PhysReg) const
isAllocatable - Returns true when PhysReg belongs to an allocatable register class and it hasn't been...
void emitPrologue(MachineFunction &MF, MachineBasicBlock &MBB) const override
emitProlog/emitEpilog - These methods insert prolog and epilog code into the function.
void replaceRegWith(unsigned FromReg, unsigned ToReg)
replaceRegWith - Replace all instances of FromReg with ToReg in the machine function.
MachineRegisterInfo - Keep track of information for virtual and physical registers, including vreg register classes, use/def chains for registers, etc.
The memory access reads data.
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition: Metadata.h:1132
This class keeps track of the SPI_SP_INPUT_ADDR config register, which tells the hardware which inter...
Interface definition for SIInstrInfo.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const MachineInstrBuilder & addExternalSymbol(const char *FnName, unsigned char TargetFlags=0) const
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
#define I(x, y, z)
Definition: MD5.cpp:54
The memory access always returns the same value (or traps).
int CreateStackObject(uint64_t Size, unsigned Alignment, bool isSS, const AllocaInst *Alloca=nullptr)
Create a new statically sized stack object, returning a nonnegative identifier to represent it...
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
unsigned reservedPrivateSegmentBufferReg(const MachineFunction &MF) const
Return the end register initially reserved for the scratch buffer in case spilling is needed...
const MachineInstrBuilder & addReg(unsigned RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
bool debuggerEmitPrologue() const
bool use_empty(unsigned RegNo) const
use_empty - Return true if there are no instructions using the specified register.