Bug Summary

File:build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp
Warning:line 1312, column 5
Value stored to 'Desc' is never read

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name SIRegisterInfo.cpp -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -ffp-contract=on -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/build-llvm/tools/clang/stage2-bins -resource-dir /usr/lib/llvm-16/lib/clang/16.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I lib/Target/AMDGPU -I /build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/llvm/lib/Target/AMDGPU -I include -I /build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/llvm/include -D _FORTIFY_SOURCE=2 -D NDEBUG -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-16/lib/clang/16.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -fmacro-prefix-map=/build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/build-llvm/tools/clang/stage2-bins=build-llvm/tools/clang/stage2-bins -fmacro-prefix-map=/build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/= -fcoverage-prefix-map=/build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/build-llvm/tools/clang/stage2-bins=build-llvm/tools/clang/stage2-bins -fcoverage-prefix-map=/build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/= -O2 -Wno-unused-command-line-argument -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -Wno-misleading-indentation -std=c++17 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/build-llvm/tools/clang/stage2-bins -fdebug-prefix-map=/build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/build-llvm/tools/clang/stage2-bins=build-llvm/tools/clang/stage2-bins -fdebug-prefix-map=/build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/= -ferror-limit 19 -fvisibility=hidden -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -fcolor-diagnostics -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2022-10-03-140002-15933-1 -x c++ /build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp
1//===-- SIRegisterInfo.cpp - SI Register Information ---------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// SI implementation of the TargetRegisterInfo class.
11//
12//===----------------------------------------------------------------------===//
13
14#include "SIRegisterInfo.h"
15#include "AMDGPU.h"
16#include "AMDGPURegisterBankInfo.h"
17#include "GCNSubtarget.h"
18#include "MCTargetDesc/AMDGPUInstPrinter.h"
19#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
20#include "SIMachineFunctionInfo.h"
21#include "llvm/CodeGen/LiveIntervals.h"
22#include "llvm/CodeGen/LivePhysRegs.h"
23#include "llvm/CodeGen/MachineDominators.h"
24#include "llvm/CodeGen/MachineFrameInfo.h"
25#include "llvm/CodeGen/RegisterScavenging.h"
26
27using namespace llvm;
28
29#define GET_REGINFO_TARGET_DESC
30#include "AMDGPUGenRegisterInfo.inc"
31
32static cl::opt<bool> EnableSpillSGPRToVGPR(
33 "amdgpu-spill-sgpr-to-vgpr",
34 cl::desc("Enable spilling VGPRs to SGPRs"),
35 cl::ReallyHidden,
36 cl::init(true));
37
38std::array<std::vector<int16_t>, 16> SIRegisterInfo::RegSplitParts;
39std::array<std::array<uint16_t, 32>, 9> SIRegisterInfo::SubRegFromChannelTable;
40
41// Map numbers of DWORDs to indexes in SubRegFromChannelTable.
42// Valid indexes are shifted 1, such that a 0 mapping means unsupported.
43// e.g. for 8 DWORDs (256-bit), SubRegFromChannelTableWidthMap[8] = 8,
44// meaning index 7 in SubRegFromChannelTable.
45static const std::array<unsigned, 17> SubRegFromChannelTableWidthMap = {
46 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 0, 0, 0, 0, 0, 0, 9};
47
48namespace llvm {
49
50// A temporary struct to spill SGPRs.
51// This is mostly to spill SGPRs to memory. Spilling SGPRs into VGPR lanes emits
52// just v_writelane and v_readlane.
53//
54// When spilling to memory, the SGPRs are written into VGPR lanes and the VGPR
55// is saved to scratch (or the other way around for loads).
56// For this, a VGPR is required where the needed lanes can be clobbered. The
57// RegScavenger can provide a VGPR where currently active lanes can be
58// clobbered, but we still need to save inactive lanes.
59// The high-level steps are:
60// - Try to scavenge SGPR(s) to save exec
61// - Try to scavenge VGPR
62// - Save needed, all or inactive lanes of a TmpVGPR
63// - Spill/Restore SGPRs using TmpVGPR
64// - Restore TmpVGPR
65//
66// To save all lanes of TmpVGPR, exec needs to be saved and modified. If we
67// cannot scavenge temporary SGPRs to save exec, we use the following code:
68// buffer_store_dword TmpVGPR ; only if active lanes need to be saved
69// s_not exec, exec
70// buffer_store_dword TmpVGPR ; save inactive lanes
71// s_not exec, exec
72struct SGPRSpillBuilder {
73 struct PerVGPRData {
74 unsigned PerVGPR;
75 unsigned NumVGPRs;
76 int64_t VGPRLanes;
77 };
78
79 // The SGPR to save
80 Register SuperReg;
81 MachineBasicBlock::iterator MI;
82 ArrayRef<int16_t> SplitParts;
83 unsigned NumSubRegs;
84 bool IsKill;
85 const DebugLoc &DL;
86
87 /* When spilling to stack */
88 // The SGPRs are written into this VGPR, which is then written to scratch
89 // (or vice versa for loads).
90 Register TmpVGPR = AMDGPU::NoRegister;
91 // Temporary spill slot to save TmpVGPR to.
92 int TmpVGPRIndex = 0;
93 // If TmpVGPR is live before the spill or if it is scavenged.
94 bool TmpVGPRLive = false;
95 // Scavenged SGPR to save EXEC.
96 Register SavedExecReg = AMDGPU::NoRegister;
97 // Stack index to write the SGPRs to.
98 int Index;
99 unsigned EltSize = 4;
100
101 RegScavenger *RS;
102 MachineBasicBlock *MBB;
103 MachineFunction &MF;
104 SIMachineFunctionInfo &MFI;
105 const SIInstrInfo &TII;
106 const SIRegisterInfo &TRI;
107 bool IsWave32;
108 Register ExecReg;
109 unsigned MovOpc;
110 unsigned NotOpc;
111
112 SGPRSpillBuilder(const SIRegisterInfo &TRI, const SIInstrInfo &TII,
113 bool IsWave32, MachineBasicBlock::iterator MI, int Index,
114 RegScavenger *RS)
115 : SGPRSpillBuilder(TRI, TII, IsWave32, MI, MI->getOperand(0).getReg(),
116 MI->getOperand(0).isKill(), Index, RS) {}
117
118 SGPRSpillBuilder(const SIRegisterInfo &TRI, const SIInstrInfo &TII,
119 bool IsWave32, MachineBasicBlock::iterator MI, Register Reg,
120 bool IsKill, int Index, RegScavenger *RS)
121 : SuperReg(Reg), MI(MI), IsKill(IsKill), DL(MI->getDebugLoc()),
122 Index(Index), RS(RS), MBB(MI->getParent()), MF(*MBB->getParent()),
123 MFI(*MF.getInfo<SIMachineFunctionInfo>()), TII(TII), TRI(TRI),
124 IsWave32(IsWave32) {
125 const TargetRegisterClass *RC = TRI.getPhysRegClass(SuperReg);
126 SplitParts = TRI.getRegSplitParts(RC, EltSize);
127 NumSubRegs = SplitParts.empty() ? 1 : SplitParts.size();
128
129 if (IsWave32) {
130 ExecReg = AMDGPU::EXEC_LO;
131 MovOpc = AMDGPU::S_MOV_B32;
132 NotOpc = AMDGPU::S_NOT_B32;
133 } else {
134 ExecReg = AMDGPU::EXEC;
135 MovOpc = AMDGPU::S_MOV_B64;
136 NotOpc = AMDGPU::S_NOT_B64;
137 }
138
139 assert(SuperReg != AMDGPU::M0 && "m0 should never spill")(static_cast <bool> (SuperReg != AMDGPU::M0 && "m0 should never spill"
) ? void (0) : __assert_fail ("SuperReg != AMDGPU::M0 && \"m0 should never spill\""
, "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 139, __extension__
__PRETTY_FUNCTION__))
;
140 assert(SuperReg != AMDGPU::EXEC_LO && SuperReg != AMDGPU::EXEC_HI &&(static_cast <bool> (SuperReg != AMDGPU::EXEC_LO &&
SuperReg != AMDGPU::EXEC_HI && SuperReg != AMDGPU::EXEC
&& "exec should never spill") ? void (0) : __assert_fail
("SuperReg != AMDGPU::EXEC_LO && SuperReg != AMDGPU::EXEC_HI && SuperReg != AMDGPU::EXEC && \"exec should never spill\""
, "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 141, __extension__
__PRETTY_FUNCTION__))
141 SuperReg != AMDGPU::EXEC && "exec should never spill")(static_cast <bool> (SuperReg != AMDGPU::EXEC_LO &&
SuperReg != AMDGPU::EXEC_HI && SuperReg != AMDGPU::EXEC
&& "exec should never spill") ? void (0) : __assert_fail
("SuperReg != AMDGPU::EXEC_LO && SuperReg != AMDGPU::EXEC_HI && SuperReg != AMDGPU::EXEC && \"exec should never spill\""
, "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 141, __extension__
__PRETTY_FUNCTION__))
;
142 }
143
144 PerVGPRData getPerVGPRData() {
145 PerVGPRData Data;
146 Data.PerVGPR = IsWave32 ? 32 : 64;
147 Data.NumVGPRs = (NumSubRegs + (Data.PerVGPR - 1)) / Data.PerVGPR;
148 Data.VGPRLanes = (1LL << std::min(Data.PerVGPR, NumSubRegs)) - 1LL;
149 return Data;
150 }
151
152 // Tries to scavenge SGPRs to save EXEC and a VGPR. Uses v0 if no VGPR is
153 // free.
154 // Writes these instructions if an SGPR can be scavenged:
155 // s_mov_b64 s[6:7], exec ; Save exec
156 // s_mov_b64 exec, 3 ; Wanted lanemask
157 // buffer_store_dword v1 ; Write scavenged VGPR to emergency slot
158 //
159 // Writes these instructions if no SGPR can be scavenged:
160 // buffer_store_dword v0 ; Only if no free VGPR was found
161 // s_not_b64 exec, exec
162 // buffer_store_dword v0 ; Save inactive lanes
163 // ; exec stays inverted, it is flipped back in
164 // ; restore.
165 void prepare() {
166 // Scavenged temporary VGPR to use. It must be scavenged once for any number
167 // of spilled subregs.
168 // FIXME: The liveness analysis is limited and does not tell if a register
169 // is in use in lanes that are currently inactive. We can never be sure if
170 // a register as actually in use in another lane, so we need to save all
171 // used lanes of the chosen VGPR.
172 assert(RS && "Cannot spill SGPR to memory without RegScavenger")(static_cast <bool> (RS && "Cannot spill SGPR to memory without RegScavenger"
) ? void (0) : __assert_fail ("RS && \"Cannot spill SGPR to memory without RegScavenger\""
, "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 172, __extension__
__PRETTY_FUNCTION__))
;
173 TmpVGPR = RS->scavengeRegister(&AMDGPU::VGPR_32RegClass, MI, 0, false);
174
175 // Reserve temporary stack slot
176 TmpVGPRIndex = MFI.getScavengeFI(MF.getFrameInfo(), TRI);
177 if (TmpVGPR) {
178 // Found a register that is dead in the currently active lanes, we only
179 // need to spill inactive lanes.
180 TmpVGPRLive = false;
181 } else {
182 // Pick v0 because it doesn't make a difference.
183 TmpVGPR = AMDGPU::VGPR0;
184 TmpVGPRLive = true;
185 }
186
187 if (TmpVGPRLive) {
188 // We need to inform the scavenger that this index is already in use until
189 // we're done with the custom emergency spill.
190 RS->assignRegToScavengingIndex(TmpVGPRIndex, TmpVGPR);
191 }
192
193 // We may end up recursively calling the scavenger, and don't want to re-use
194 // the same register.
195 RS->setRegUsed(TmpVGPR);
196
197 // Try to scavenge SGPRs to save exec
198 assert(!SavedExecReg && "Exec is already saved, refuse to save again")(static_cast <bool> (!SavedExecReg && "Exec is already saved, refuse to save again"
) ? void (0) : __assert_fail ("!SavedExecReg && \"Exec is already saved, refuse to save again\""
, "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 198, __extension__
__PRETTY_FUNCTION__))
;
199 const TargetRegisterClass &RC =
200 IsWave32 ? AMDGPU::SGPR_32RegClass : AMDGPU::SGPR_64RegClass;
201 RS->setRegUsed(SuperReg);
202 SavedExecReg = RS->scavengeRegister(&RC, MI, 0, false);
203
204 int64_t VGPRLanes = getPerVGPRData().VGPRLanes;
205
206 if (SavedExecReg) {
207 RS->setRegUsed(SavedExecReg);
208 // Set exec to needed lanes
209 BuildMI(*MBB, MI, DL, TII.get(MovOpc), SavedExecReg).addReg(ExecReg);
210 auto I =
211 BuildMI(*MBB, MI, DL, TII.get(MovOpc), ExecReg).addImm(VGPRLanes);
212 if (!TmpVGPRLive)
213 I.addReg(TmpVGPR, RegState::ImplicitDefine);
214 // Spill needed lanes
215 TRI.buildVGPRSpillLoadStore(*this, TmpVGPRIndex, 0, /*IsLoad*/ false);
216 } else {
217 // The modify and restore of exec clobber SCC, which we would have to save
218 // and restore. FIXME: We probably would need to reserve a register for
219 // this.
220 if (RS->isRegUsed(AMDGPU::SCC))
221 MI->emitError("unhandled SGPR spill to memory");
222
223 // Spill active lanes
224 if (TmpVGPRLive)
225 TRI.buildVGPRSpillLoadStore(*this, TmpVGPRIndex, 0, /*IsLoad*/ false,
226 /*IsKill*/ false);
227 // Spill inactive lanes
228 auto I = BuildMI(*MBB, MI, DL, TII.get(NotOpc), ExecReg).addReg(ExecReg);
229 if (!TmpVGPRLive)
230 I.addReg(TmpVGPR, RegState::ImplicitDefine);
231 I->getOperand(2).setIsDead(true); // Mark SCC as dead.
232 TRI.buildVGPRSpillLoadStore(*this, TmpVGPRIndex, 0, /*IsLoad*/ false);
233 }
234 }
235
236 // Writes these instructions if an SGPR can be scavenged:
237 // buffer_load_dword v1 ; Write scavenged VGPR to emergency slot
238 // s_waitcnt vmcnt(0) ; If a free VGPR was found
239 // s_mov_b64 exec, s[6:7] ; Save exec
240 //
241 // Writes these instructions if no SGPR can be scavenged:
242 // buffer_load_dword v0 ; Restore inactive lanes
243 // s_waitcnt vmcnt(0) ; If a free VGPR was found
244 // s_not_b64 exec, exec
245 // buffer_load_dword v0 ; Only if no free VGPR was found
246 void restore() {
247 if (SavedExecReg) {
248 // Restore used lanes
249 TRI.buildVGPRSpillLoadStore(*this, TmpVGPRIndex, 0, /*IsLoad*/ true,
250 /*IsKill*/ false);
251 // Restore exec
252 auto I = BuildMI(*MBB, MI, DL, TII.get(MovOpc), ExecReg)
253 .addReg(SavedExecReg, RegState::Kill);
254 // Add an implicit use of the load so it is not dead.
255 // FIXME This inserts an unnecessary waitcnt
256 if (!TmpVGPRLive) {
257 I.addReg(TmpVGPR, RegState::ImplicitKill);
258 }
259 } else {
260 // Restore inactive lanes
261 TRI.buildVGPRSpillLoadStore(*this, TmpVGPRIndex, 0, /*IsLoad*/ true,
262 /*IsKill*/ false);
263 auto I = BuildMI(*MBB, MI, DL, TII.get(NotOpc), ExecReg).addReg(ExecReg);
264 if (!TmpVGPRLive)
265 I.addReg(TmpVGPR, RegState::ImplicitKill);
266 I->getOperand(2).setIsDead(true); // Mark SCC as dead.
267
268 // Restore active lanes
269 if (TmpVGPRLive)
270 TRI.buildVGPRSpillLoadStore(*this, TmpVGPRIndex, 0, /*IsLoad*/ true);
271 }
272
273 // Inform the scavenger where we're releasing our custom scavenged register.
274 if (TmpVGPRLive) {
275 MachineBasicBlock::iterator RestorePt = std::prev(MI);
276 RS->assignRegToScavengingIndex(TmpVGPRIndex, TmpVGPR, &*RestorePt);
277 }
278 }
279
280 // Write TmpVGPR to memory or read TmpVGPR from memory.
281 // Either using a single buffer_load/store if exec is set to the needed mask
282 // or using
283 // buffer_load
284 // s_not exec, exec
285 // buffer_load
286 // s_not exec, exec
287 void readWriteTmpVGPR(unsigned Offset, bool IsLoad) {
288 if (SavedExecReg) {
289 // Spill needed lanes
290 TRI.buildVGPRSpillLoadStore(*this, Index, Offset, IsLoad);
291 } else {
292 // The modify and restore of exec clobber SCC, which we would have to save
293 // and restore. FIXME: We probably would need to reserve a register for
294 // this.
295 if (RS->isRegUsed(AMDGPU::SCC))
296 MI->emitError("unhandled SGPR spill to memory");
297
298 // Spill active lanes
299 TRI.buildVGPRSpillLoadStore(*this, Index, Offset, IsLoad,
300 /*IsKill*/ false);
301 // Spill inactive lanes
302 auto Not0 = BuildMI(*MBB, MI, DL, TII.get(NotOpc), ExecReg).addReg(ExecReg);
303 Not0->getOperand(2).setIsDead(); // Mark SCC as dead.
304 TRI.buildVGPRSpillLoadStore(*this, Index, Offset, IsLoad);
305 auto Not1 = BuildMI(*MBB, MI, DL, TII.get(NotOpc), ExecReg).addReg(ExecReg);
306 Not1->getOperand(2).setIsDead(); // Mark SCC as dead.
307 }
308 }
309
310 void setMI(MachineBasicBlock *NewMBB, MachineBasicBlock::iterator NewMI) {
311 assert(MBB->getParent() == &MF)(static_cast <bool> (MBB->getParent() == &MF) ? void
(0) : __assert_fail ("MBB->getParent() == &MF", "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 311, __extension__ __PRETTY_FUNCTION__))
;
312 MI = NewMI;
313 MBB = NewMBB;
314 }
315};
316
317} // namespace llvm
318
319SIRegisterInfo::SIRegisterInfo(const GCNSubtarget &ST)
320 : AMDGPUGenRegisterInfo(AMDGPU::PC_REG, ST.getAMDGPUDwarfFlavour()), ST(ST),
321 SpillSGPRToVGPR(EnableSpillSGPRToVGPR), isWave32(ST.isWave32()) {
322
323 assert(getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() == 3 &&(static_cast <bool> (getSubRegIndexLaneMask(AMDGPU::sub0
).getAsInteger() == 3 && getSubRegIndexLaneMask(AMDGPU
::sub31).getAsInteger() == (3ULL << 62) && (getSubRegIndexLaneMask
(AMDGPU::lo16) | getSubRegIndexLaneMask(AMDGPU::hi16)).getAsInteger
() == getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() &&
"getNumCoveredRegs() will not work with generated subreg masks!"
) ? void (0) : __assert_fail ("getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() == 3 && getSubRegIndexLaneMask(AMDGPU::sub31).getAsInteger() == (3ULL << 62) && (getSubRegIndexLaneMask(AMDGPU::lo16) | getSubRegIndexLaneMask(AMDGPU::hi16)).getAsInteger() == getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() && \"getNumCoveredRegs() will not work with generated subreg masks!\""
, "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 328, __extension__
__PRETTY_FUNCTION__))
324 getSubRegIndexLaneMask(AMDGPU::sub31).getAsInteger() == (3ULL << 62) &&(static_cast <bool> (getSubRegIndexLaneMask(AMDGPU::sub0
).getAsInteger() == 3 && getSubRegIndexLaneMask(AMDGPU
::sub31).getAsInteger() == (3ULL << 62) && (getSubRegIndexLaneMask
(AMDGPU::lo16) | getSubRegIndexLaneMask(AMDGPU::hi16)).getAsInteger
() == getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() &&
"getNumCoveredRegs() will not work with generated subreg masks!"
) ? void (0) : __assert_fail ("getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() == 3 && getSubRegIndexLaneMask(AMDGPU::sub31).getAsInteger() == (3ULL << 62) && (getSubRegIndexLaneMask(AMDGPU::lo16) | getSubRegIndexLaneMask(AMDGPU::hi16)).getAsInteger() == getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() && \"getNumCoveredRegs() will not work with generated subreg masks!\""
, "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 328, __extension__
__PRETTY_FUNCTION__))
325 (getSubRegIndexLaneMask(AMDGPU::lo16) |(static_cast <bool> (getSubRegIndexLaneMask(AMDGPU::sub0
).getAsInteger() == 3 && getSubRegIndexLaneMask(AMDGPU
::sub31).getAsInteger() == (3ULL << 62) && (getSubRegIndexLaneMask
(AMDGPU::lo16) | getSubRegIndexLaneMask(AMDGPU::hi16)).getAsInteger
() == getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() &&
"getNumCoveredRegs() will not work with generated subreg masks!"
) ? void (0) : __assert_fail ("getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() == 3 && getSubRegIndexLaneMask(AMDGPU::sub31).getAsInteger() == (3ULL << 62) && (getSubRegIndexLaneMask(AMDGPU::lo16) | getSubRegIndexLaneMask(AMDGPU::hi16)).getAsInteger() == getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() && \"getNumCoveredRegs() will not work with generated subreg masks!\""
, "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 328, __extension__
__PRETTY_FUNCTION__))
326 getSubRegIndexLaneMask(AMDGPU::hi16)).getAsInteger() ==(static_cast <bool> (getSubRegIndexLaneMask(AMDGPU::sub0
).getAsInteger() == 3 && getSubRegIndexLaneMask(AMDGPU
::sub31).getAsInteger() == (3ULL << 62) && (getSubRegIndexLaneMask
(AMDGPU::lo16) | getSubRegIndexLaneMask(AMDGPU::hi16)).getAsInteger
() == getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() &&
"getNumCoveredRegs() will not work with generated subreg masks!"
) ? void (0) : __assert_fail ("getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() == 3 && getSubRegIndexLaneMask(AMDGPU::sub31).getAsInteger() == (3ULL << 62) && (getSubRegIndexLaneMask(AMDGPU::lo16) | getSubRegIndexLaneMask(AMDGPU::hi16)).getAsInteger() == getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() && \"getNumCoveredRegs() will not work with generated subreg masks!\""
, "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 328, __extension__
__PRETTY_FUNCTION__))
327 getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() &&(static_cast <bool> (getSubRegIndexLaneMask(AMDGPU::sub0
).getAsInteger() == 3 && getSubRegIndexLaneMask(AMDGPU
::sub31).getAsInteger() == (3ULL << 62) && (getSubRegIndexLaneMask
(AMDGPU::lo16) | getSubRegIndexLaneMask(AMDGPU::hi16)).getAsInteger
() == getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() &&
"getNumCoveredRegs() will not work with generated subreg masks!"
) ? void (0) : __assert_fail ("getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() == 3 && getSubRegIndexLaneMask(AMDGPU::sub31).getAsInteger() == (3ULL << 62) && (getSubRegIndexLaneMask(AMDGPU::lo16) | getSubRegIndexLaneMask(AMDGPU::hi16)).getAsInteger() == getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() && \"getNumCoveredRegs() will not work with generated subreg masks!\""
, "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 328, __extension__
__PRETTY_FUNCTION__))
328 "getNumCoveredRegs() will not work with generated subreg masks!")(static_cast <bool> (getSubRegIndexLaneMask(AMDGPU::sub0
).getAsInteger() == 3 && getSubRegIndexLaneMask(AMDGPU
::sub31).getAsInteger() == (3ULL << 62) && (getSubRegIndexLaneMask
(AMDGPU::lo16) | getSubRegIndexLaneMask(AMDGPU::hi16)).getAsInteger
() == getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() &&
"getNumCoveredRegs() will not work with generated subreg masks!"
) ? void (0) : __assert_fail ("getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() == 3 && getSubRegIndexLaneMask(AMDGPU::sub31).getAsInteger() == (3ULL << 62) && (getSubRegIndexLaneMask(AMDGPU::lo16) | getSubRegIndexLaneMask(AMDGPU::hi16)).getAsInteger() == getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() && \"getNumCoveredRegs() will not work with generated subreg masks!\""
, "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 328, __extension__
__PRETTY_FUNCTION__))
;
329
330 RegPressureIgnoredUnits.resize(getNumRegUnits());
331 RegPressureIgnoredUnits.set(
332 *MCRegUnitIterator(MCRegister::from(AMDGPU::M0), this));
333 for (auto Reg : AMDGPU::VGPR_HI16RegClass)
334 RegPressureIgnoredUnits.set(*MCRegUnitIterator(Reg, this));
335
336 // HACK: Until this is fully tablegen'd.
337 static llvm::once_flag InitializeRegSplitPartsFlag;
338
339 static auto InitializeRegSplitPartsOnce = [this]() {
340 for (unsigned Idx = 1, E = getNumSubRegIndices() - 1; Idx < E; ++Idx) {
341 unsigned Size = getSubRegIdxSize(Idx);
342 if (Size & 31)
343 continue;
344 std::vector<int16_t> &Vec = RegSplitParts[Size / 32 - 1];
345 unsigned Pos = getSubRegIdxOffset(Idx);
346 if (Pos % Size)
347 continue;
348 Pos /= Size;
349 if (Vec.empty()) {
350 unsigned MaxNumParts = 1024 / Size; // Maximum register is 1024 bits.
351 Vec.resize(MaxNumParts);
352 }
353 Vec[Pos] = Idx;
354 }
355 };
356
357 static llvm::once_flag InitializeSubRegFromChannelTableFlag;
358
359 static auto InitializeSubRegFromChannelTableOnce = [this]() {
360 for (auto &Row : SubRegFromChannelTable)
361 Row.fill(AMDGPU::NoSubRegister);
362 for (unsigned Idx = 1; Idx < getNumSubRegIndices(); ++Idx) {
363 unsigned Width = AMDGPUSubRegIdxRanges[Idx].Size / 32;
364 unsigned Offset = AMDGPUSubRegIdxRanges[Idx].Offset / 32;
365 assert(Width < SubRegFromChannelTableWidthMap.size())(static_cast <bool> (Width < SubRegFromChannelTableWidthMap
.size()) ? void (0) : __assert_fail ("Width < SubRegFromChannelTableWidthMap.size()"
, "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 365, __extension__
__PRETTY_FUNCTION__))
;
366 Width = SubRegFromChannelTableWidthMap[Width];
367 if (Width == 0)
368 continue;
369 unsigned TableIdx = Width - 1;
370 assert(TableIdx < SubRegFromChannelTable.size())(static_cast <bool> (TableIdx < SubRegFromChannelTable
.size()) ? void (0) : __assert_fail ("TableIdx < SubRegFromChannelTable.size()"
, "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 370, __extension__
__PRETTY_FUNCTION__))
;
371 assert(Offset < SubRegFromChannelTable[TableIdx].size())(static_cast <bool> (Offset < SubRegFromChannelTable
[TableIdx].size()) ? void (0) : __assert_fail ("Offset < SubRegFromChannelTable[TableIdx].size()"
, "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 371, __extension__
__PRETTY_FUNCTION__))
;
372 SubRegFromChannelTable[TableIdx][Offset] = Idx;
373 }
374 };
375
376 llvm::call_once(InitializeRegSplitPartsFlag, InitializeRegSplitPartsOnce);
377 llvm::call_once(InitializeSubRegFromChannelTableFlag,
378 InitializeSubRegFromChannelTableOnce);
379}
380
381void SIRegisterInfo::reserveRegisterTuples(BitVector &Reserved,
382 MCRegister Reg) const {
383 MCRegAliasIterator R(Reg, this, true);
384
385 for (; R.isValid(); ++R)
386 Reserved.set(*R);
387}
388
389// Forced to be here by one .inc
390const MCPhysReg *SIRegisterInfo::getCalleeSavedRegs(
391 const MachineFunction *MF) const {
392 CallingConv::ID CC = MF->getFunction().getCallingConv();
393 switch (CC) {
394 case CallingConv::C:
395 case CallingConv::Fast:
396 case CallingConv::Cold:
397 return ST.hasGFX90AInsts() ? CSR_AMDGPU_GFX90AInsts_SaveList
398 : CSR_AMDGPU_SaveList;
399 case CallingConv::AMDGPU_Gfx:
400 return ST.hasGFX90AInsts() ? CSR_AMDGPU_SI_Gfx_GFX90AInsts_SaveList
401 : CSR_AMDGPU_SI_Gfx_SaveList;
402 default: {
403 // Dummy to not crash RegisterClassInfo.
404 static const MCPhysReg NoCalleeSavedReg = AMDGPU::NoRegister;
405 return &NoCalleeSavedReg;
406 }
407 }
408}
409
410const MCPhysReg *
411SIRegisterInfo::getCalleeSavedRegsViaCopy(const MachineFunction *MF) const {
412 return nullptr;
413}
414
415const uint32_t *SIRegisterInfo::getCallPreservedMask(const MachineFunction &MF,
416 CallingConv::ID CC) const {
417 switch (CC) {
418 case CallingConv::C:
419 case CallingConv::Fast:
420 case CallingConv::Cold:
421 return ST.hasGFX90AInsts() ? CSR_AMDGPU_GFX90AInsts_RegMask
422 : CSR_AMDGPU_RegMask;
423 case CallingConv::AMDGPU_Gfx:
424 return ST.hasGFX90AInsts() ? CSR_AMDGPU_SI_Gfx_GFX90AInsts_RegMask
425 : CSR_AMDGPU_SI_Gfx_RegMask;
426 default:
427 return nullptr;
428 }
429}
430
431const uint32_t *SIRegisterInfo::getNoPreservedMask() const {
432 return CSR_AMDGPU_NoRegs_RegMask;
433}
434
435const TargetRegisterClass *
436SIRegisterInfo::getLargestLegalSuperClass(const TargetRegisterClass *RC,
437 const MachineFunction &MF) const {
438 // FIXME: Should have a helper function like getEquivalentVGPRClass to get the
439 // equivalent AV class. If used one, the verifier will crash after
440 // RegBankSelect in the GISel flow. The aligned regclasses are not fully given
441 // until Instruction selection.
442 if (ST.hasMAIInsts() && (isVGPRClass(RC) || isAGPRClass(RC))) {
443 if (RC == &AMDGPU::VGPR_32RegClass || RC == &AMDGPU::AGPR_32RegClass)
444 return &AMDGPU::AV_32RegClass;
445 if (RC == &AMDGPU::VReg_64RegClass || RC == &AMDGPU::AReg_64RegClass)
446 return &AMDGPU::AV_64RegClass;
447 if (RC == &AMDGPU::VReg_64_Align2RegClass ||
448 RC == &AMDGPU::AReg_64_Align2RegClass)
449 return &AMDGPU::AV_64_Align2RegClass;
450 if (RC == &AMDGPU::VReg_96RegClass || RC == &AMDGPU::AReg_96RegClass)
451 return &AMDGPU::AV_96RegClass;
452 if (RC == &AMDGPU::VReg_96_Align2RegClass ||
453 RC == &AMDGPU::AReg_96_Align2RegClass)
454 return &AMDGPU::AV_96_Align2RegClass;
455 if (RC == &AMDGPU::VReg_128RegClass || RC == &AMDGPU::AReg_128RegClass)
456 return &AMDGPU::AV_128RegClass;
457 if (RC == &AMDGPU::VReg_128_Align2RegClass ||
458 RC == &AMDGPU::AReg_128_Align2RegClass)
459 return &AMDGPU::AV_128_Align2RegClass;
460 if (RC == &AMDGPU::VReg_160RegClass || RC == &AMDGPU::AReg_160RegClass)
461 return &AMDGPU::AV_160RegClass;
462 if (RC == &AMDGPU::VReg_160_Align2RegClass ||
463 RC == &AMDGPU::AReg_160_Align2RegClass)
464 return &AMDGPU::AV_160_Align2RegClass;
465 if (RC == &AMDGPU::VReg_192RegClass || RC == &AMDGPU::AReg_192RegClass)
466 return &AMDGPU::AV_192RegClass;
467 if (RC == &AMDGPU::VReg_192_Align2RegClass ||
468 RC == &AMDGPU::AReg_192_Align2RegClass)
469 return &AMDGPU::AV_192_Align2RegClass;
470 if (RC == &AMDGPU::VReg_256RegClass || RC == &AMDGPU::AReg_256RegClass)
471 return &AMDGPU::AV_256RegClass;
472 if (RC == &AMDGPU::VReg_256_Align2RegClass ||
473 RC == &AMDGPU::AReg_256_Align2RegClass)
474 return &AMDGPU::AV_256_Align2RegClass;
475 if (RC == &AMDGPU::VReg_512RegClass || RC == &AMDGPU::AReg_512RegClass)
476 return &AMDGPU::AV_512RegClass;
477 if (RC == &AMDGPU::VReg_512_Align2RegClass ||
478 RC == &AMDGPU::AReg_512_Align2RegClass)
479 return &AMDGPU::AV_512_Align2RegClass;
480 if (RC == &AMDGPU::VReg_1024RegClass || RC == &AMDGPU::AReg_1024RegClass)
481 return &AMDGPU::AV_1024RegClass;
482 if (RC == &AMDGPU::VReg_1024_Align2RegClass ||
483 RC == &AMDGPU::AReg_1024_Align2RegClass)
484 return &AMDGPU::AV_1024_Align2RegClass;
485 }
486
487 return TargetRegisterInfo::getLargestLegalSuperClass(RC, MF);
488}
489
490Register SIRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
491 const SIFrameLowering *TFI = ST.getFrameLowering();
492 const SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>();
493 // During ISel lowering we always reserve the stack pointer in entry
494 // functions, but never actually want to reference it when accessing our own
495 // frame. If we need a frame pointer we use it, but otherwise we can just use
496 // an immediate "0" which we represent by returning NoRegister.
497 if (FuncInfo->isEntryFunction()) {
498 return TFI->hasFP(MF) ? FuncInfo->getFrameOffsetReg() : Register();
499 }
500 return TFI->hasFP(MF) ? FuncInfo->getFrameOffsetReg()
501 : FuncInfo->getStackPtrOffsetReg();
502}
503
504bool SIRegisterInfo::hasBasePointer(const MachineFunction &MF) const {
505 // When we need stack realignment, we can't reference off of the
506 // stack pointer, so we reserve a base pointer.
507 const MachineFrameInfo &MFI = MF.getFrameInfo();
508 return MFI.getNumFixedObjects() && shouldRealignStack(MF);
509}
510
511Register SIRegisterInfo::getBaseRegister() const { return AMDGPU::SGPR34; }
512
513const uint32_t *SIRegisterInfo::getAllVGPRRegMask() const {
514 return AMDGPU_AllVGPRs_RegMask;
515}
516
517const uint32_t *SIRegisterInfo::getAllAGPRRegMask() const {
518 return AMDGPU_AllAGPRs_RegMask;
519}
520
521const uint32_t *SIRegisterInfo::getAllVectorRegMask() const {
522 return AMDGPU_AllVectorRegs_RegMask;
523}
524
525const uint32_t *SIRegisterInfo::getAllAllocatableSRegMask() const {
526 return AMDGPU_AllAllocatableSRegs_RegMask;
527}
528
529unsigned SIRegisterInfo::getSubRegFromChannel(unsigned Channel,
530 unsigned NumRegs) {
531 assert(NumRegs < SubRegFromChannelTableWidthMap.size())(static_cast <bool> (NumRegs < SubRegFromChannelTableWidthMap
.size()) ? void (0) : __assert_fail ("NumRegs < SubRegFromChannelTableWidthMap.size()"
, "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 531, __extension__
__PRETTY_FUNCTION__))
;
532 unsigned NumRegIndex = SubRegFromChannelTableWidthMap[NumRegs];
533 assert(NumRegIndex && "Not implemented")(static_cast <bool> (NumRegIndex && "Not implemented"
) ? void (0) : __assert_fail ("NumRegIndex && \"Not implemented\""
, "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 533, __extension__
__PRETTY_FUNCTION__))
;
534 assert(Channel < SubRegFromChannelTable[NumRegIndex - 1].size())(static_cast <bool> (Channel < SubRegFromChannelTable
[NumRegIndex - 1].size()) ? void (0) : __assert_fail ("Channel < SubRegFromChannelTable[NumRegIndex - 1].size()"
, "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 534, __extension__
__PRETTY_FUNCTION__))
;
535 return SubRegFromChannelTable[NumRegIndex - 1][Channel];
536}
537
538MCRegister SIRegisterInfo::reservedPrivateSegmentBufferReg(
539 const MachineFunction &MF) const {
540 unsigned BaseIdx = alignDown(ST.getMaxNumSGPRs(MF), 4) - 4;
541 MCRegister BaseReg(AMDGPU::SGPR_32RegClass.getRegister(BaseIdx));
542 return getMatchingSuperReg(BaseReg, AMDGPU::sub0, &AMDGPU::SGPR_128RegClass);
543}
544
545BitVector SIRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
546 BitVector Reserved(getNumRegs());
547 Reserved.set(AMDGPU::MODE);
548
549 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
550
551 // Reserve special purpose registers.
552 //
553 // EXEC_LO and EXEC_HI could be allocated and used as regular register, but
554 // this seems likely to result in bugs, so I'm marking them as reserved.
555 reserveRegisterTuples(Reserved, AMDGPU::EXEC);
556 reserveRegisterTuples(Reserved, AMDGPU::FLAT_SCR);
557
558 // M0 has to be reserved so that llvm accepts it as a live-in into a block.
559 reserveRegisterTuples(Reserved, AMDGPU::M0);
560
561 // Reserve src_vccz, src_execz, src_scc.
562 reserveRegisterTuples(Reserved, AMDGPU::SRC_VCCZ);
563 reserveRegisterTuples(Reserved, AMDGPU::SRC_EXECZ);
564 reserveRegisterTuples(Reserved, AMDGPU::SRC_SCC);
565
566 // Reserve the memory aperture registers.
567 reserveRegisterTuples(Reserved, AMDGPU::SRC_SHARED_BASE);
568 reserveRegisterTuples(Reserved, AMDGPU::SRC_SHARED_LIMIT);
569 reserveRegisterTuples(Reserved, AMDGPU::SRC_PRIVATE_BASE);
570 reserveRegisterTuples(Reserved, AMDGPU::SRC_PRIVATE_LIMIT);
571
572 // Reserve src_pops_exiting_wave_id - support is not implemented in Codegen.
573 reserveRegisterTuples(Reserved, AMDGPU::SRC_POPS_EXITING_WAVE_ID);
574
575 // Reserve xnack_mask registers - support is not implemented in Codegen.
576 reserveRegisterTuples(Reserved, AMDGPU::XNACK_MASK);
577
578 // Reserve lds_direct register - support is not implemented in Codegen.
579 reserveRegisterTuples(Reserved, AMDGPU::LDS_DIRECT);
580
581 // Reserve Trap Handler registers - support is not implemented in Codegen.
582 reserveRegisterTuples(Reserved, AMDGPU::TBA);
583 reserveRegisterTuples(Reserved, AMDGPU::TMA);
584 reserveRegisterTuples(Reserved, AMDGPU::TTMP0_TTMP1);
585 reserveRegisterTuples(Reserved, AMDGPU::TTMP2_TTMP3);
586 reserveRegisterTuples(Reserved, AMDGPU::TTMP4_TTMP5);
587 reserveRegisterTuples(Reserved, AMDGPU::TTMP6_TTMP7);
588 reserveRegisterTuples(Reserved, AMDGPU::TTMP8_TTMP9);
589 reserveRegisterTuples(Reserved, AMDGPU::TTMP10_TTMP11);
590 reserveRegisterTuples(Reserved, AMDGPU::TTMP12_TTMP13);
591 reserveRegisterTuples(Reserved, AMDGPU::TTMP14_TTMP15);
592
593 // Reserve null register - it shall never be allocated
594 reserveRegisterTuples(Reserved, AMDGPU::SGPR_NULL64);
595
596 // Disallow vcc_hi allocation in wave32. It may be allocated but most likely
597 // will result in bugs.
598 if (isWave32) {
599 Reserved.set(AMDGPU::VCC);
600 Reserved.set(AMDGPU::VCC_HI);
601 }
602
603 // Reserve SGPRs.
604 //
605 unsigned MaxNumSGPRs = ST.getMaxNumSGPRs(MF);
606 unsigned TotalNumSGPRs = AMDGPU::SGPR_32RegClass.getNumRegs();
607 for (unsigned i = MaxNumSGPRs; i < TotalNumSGPRs; ++i) {
608 unsigned Reg = AMDGPU::SGPR_32RegClass.getRegister(i);
609 reserveRegisterTuples(Reserved, Reg);
610 }
611
612 for (auto Reg : AMDGPU::SReg_32RegClass) {
613 Reserved.set(getSubReg(Reg, AMDGPU::hi16));
614 Register Low = getSubReg(Reg, AMDGPU::lo16);
615 // This is to prevent BB vcc liveness errors.
616 if (!AMDGPU::SGPR_LO16RegClass.contains(Low))
617 Reserved.set(Low);
618 }
619
620 Register ScratchRSrcReg = MFI->getScratchRSrcReg();
621 if (ScratchRSrcReg != AMDGPU::NoRegister) {
622 // Reserve 4 SGPRs for the scratch buffer resource descriptor in case we
623 // need to spill.
624 // TODO: May need to reserve a VGPR if doing LDS spilling.
625 reserveRegisterTuples(Reserved, ScratchRSrcReg);
626 }
627
628 // We have to assume the SP is needed in case there are calls in the function,
629 // which is detected after the function is lowered. If we aren't really going
630 // to need SP, don't bother reserving it.
631 MCRegister StackPtrReg = MFI->getStackPtrOffsetReg();
632 if (StackPtrReg) {
633 reserveRegisterTuples(Reserved, StackPtrReg);
634 assert(!isSubRegister(ScratchRSrcReg, StackPtrReg))(static_cast <bool> (!isSubRegister(ScratchRSrcReg, StackPtrReg
)) ? void (0) : __assert_fail ("!isSubRegister(ScratchRSrcReg, StackPtrReg)"
, "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 634, __extension__
__PRETTY_FUNCTION__))
;
635 }
636
637 MCRegister FrameReg = MFI->getFrameOffsetReg();
638 if (FrameReg) {
639 reserveRegisterTuples(Reserved, FrameReg);
640 assert(!isSubRegister(ScratchRSrcReg, FrameReg))(static_cast <bool> (!isSubRegister(ScratchRSrcReg, FrameReg
)) ? void (0) : __assert_fail ("!isSubRegister(ScratchRSrcReg, FrameReg)"
, "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 640, __extension__
__PRETTY_FUNCTION__))
;
641 }
642
643 if (hasBasePointer(MF)) {
644 MCRegister BasePtrReg = getBaseRegister();
645 reserveRegisterTuples(Reserved, BasePtrReg);
646 assert(!isSubRegister(ScratchRSrcReg, BasePtrReg))(static_cast <bool> (!isSubRegister(ScratchRSrcReg, BasePtrReg
)) ? void (0) : __assert_fail ("!isSubRegister(ScratchRSrcReg, BasePtrReg)"
, "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 646, __extension__
__PRETTY_FUNCTION__))
;
647 }
648
649 // Reserve VGPRs/AGPRs.
650 //
651 unsigned MaxNumVGPRs = ST.getMaxNumVGPRs(MF);
652 unsigned MaxNumAGPRs = MaxNumVGPRs;
653 unsigned TotalNumVGPRs = AMDGPU::VGPR_32RegClass.getNumRegs();
654
655 // Reserve all the AGPRs if there are no instructions to use it.
656 if (!ST.hasMAIInsts()) {
657 for (unsigned i = 0; i < MaxNumAGPRs; ++i) {
658 unsigned Reg = AMDGPU::AGPR_32RegClass.getRegister(i);
659 reserveRegisterTuples(Reserved, Reg);
660 }
661 }
662
663 for (auto Reg : AMDGPU::AGPR_32RegClass) {
664 Reserved.set(getSubReg(Reg, AMDGPU::hi16));
665 }
666
667 // On GFX90A, the number of VGPRs and AGPRs need not be equal. Theoretically,
668 // a wave may have up to 512 total vector registers combining together both
669 // VGPRs and AGPRs. Hence, in an entry function without calls and without
670 // AGPRs used within it, it is possible to use the whole vector register
671 // budget for VGPRs.
672 //
673 // TODO: it shall be possible to estimate maximum AGPR/VGPR pressure and split
674 // register file accordingly.
675 if (ST.hasGFX90AInsts()) {
676 if (MFI->usesAGPRs(MF)) {
677 MaxNumVGPRs /= 2;
678 MaxNumAGPRs = MaxNumVGPRs;
679 } else {
680 if (MaxNumVGPRs > TotalNumVGPRs) {
681 MaxNumAGPRs = MaxNumVGPRs - TotalNumVGPRs;
682 MaxNumVGPRs = TotalNumVGPRs;
683 } else
684 MaxNumAGPRs = 0;
685 }
686 }
687
688 for (unsigned i = MaxNumVGPRs; i < TotalNumVGPRs; ++i) {
689 unsigned Reg = AMDGPU::VGPR_32RegClass.getRegister(i);
690 reserveRegisterTuples(Reserved, Reg);
691 }
692
693 for (unsigned i = MaxNumAGPRs; i < TotalNumVGPRs; ++i) {
694 unsigned Reg = AMDGPU::AGPR_32RegClass.getRegister(i);
695 reserveRegisterTuples(Reserved, Reg);
696 }
697
698 // On GFX908, in order to guarantee copying between AGPRs, we need a scratch
699 // VGPR available at all times.
700 if (ST.hasMAIInsts() && !ST.hasGFX90AInsts()) {
701 reserveRegisterTuples(Reserved, MFI->getVGPRForAGPRCopy());
702 }
703
704 for (Register Reg : MFI->WWMReservedRegs)
705 reserveRegisterTuples(Reserved, Reg);
706
707 // FIXME: Stop using reserved registers for this.
708 for (MCPhysReg Reg : MFI->getAGPRSpillVGPRs())
709 reserveRegisterTuples(Reserved, Reg);
710
711 for (MCPhysReg Reg : MFI->getVGPRSpillAGPRs())
712 reserveRegisterTuples(Reserved, Reg);
713
714 for (auto SSpill : MFI->getSGPRSpillVGPRs())
715 reserveRegisterTuples(Reserved, SSpill.VGPR);
716
717 return Reserved;
718}
719
720bool SIRegisterInfo::isAsmClobberable(const MachineFunction &MF,
721 MCRegister PhysReg) const {
722 return !MF.getRegInfo().isReserved(PhysReg);
723}
724
725bool SIRegisterInfo::shouldRealignStack(const MachineFunction &MF) const {
726 const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
727 // On entry, the base address is 0, so it can't possibly need any more
728 // alignment.
729
730 // FIXME: Should be able to specify the entry frame alignment per calling
731 // convention instead.
732 if (Info->isEntryFunction())
733 return false;
734
735 return TargetRegisterInfo::shouldRealignStack(MF);
736}
737
738bool SIRegisterInfo::requiresRegisterScavenging(const MachineFunction &Fn) const {
739 const SIMachineFunctionInfo *Info = Fn.getInfo<SIMachineFunctionInfo>();
740 if (Info->isEntryFunction()) {
741 const MachineFrameInfo &MFI = Fn.getFrameInfo();
742 return MFI.hasStackObjects() || MFI.hasCalls();
743 }
744
745 // May need scavenger for dealing with callee saved registers.
746 return true;
747}
748
749bool SIRegisterInfo::requiresFrameIndexScavenging(
750 const MachineFunction &MF) const {
751 // Do not use frame virtual registers. They used to be used for SGPRs, but
752 // once we reach PrologEpilogInserter, we can no longer spill SGPRs. If the
753 // scavenger fails, we can increment/decrement the necessary SGPRs to avoid a
754 // spill.
755 return false;
756}
757
758bool SIRegisterInfo::requiresFrameIndexReplacementScavenging(
759 const MachineFunction &MF) const {
760 const MachineFrameInfo &MFI = MF.getFrameInfo();
761 return MFI.hasStackObjects();
762}
763
764bool SIRegisterInfo::requiresVirtualBaseRegisters(
765 const MachineFunction &) const {
766 // There are no special dedicated stack or frame pointers.
767 return true;
768}
769
770int64_t SIRegisterInfo::getScratchInstrOffset(const MachineInstr *MI) const {
771 assert(SIInstrInfo::isMUBUF(*MI) || SIInstrInfo::isFLATScratch(*MI))(static_cast <bool> (SIInstrInfo::isMUBUF(*MI) || SIInstrInfo
::isFLATScratch(*MI)) ? void (0) : __assert_fail ("SIInstrInfo::isMUBUF(*MI) || SIInstrInfo::isFLATScratch(*MI)"
, "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 771, __extension__
__PRETTY_FUNCTION__))
;
772
773 int OffIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
774 AMDGPU::OpName::offset);
775 return MI->getOperand(OffIdx).getImm();
776}
777
778int64_t SIRegisterInfo::getFrameIndexInstrOffset(const MachineInstr *MI,
779 int Idx) const {
780 if (!SIInstrInfo::isMUBUF(*MI) && !SIInstrInfo::isFLATScratch(*MI))
781 return 0;
782
783 assert((Idx == AMDGPU::getNamedOperandIdx(MI->getOpcode(),(static_cast <bool> ((Idx == AMDGPU::getNamedOperandIdx
(MI->getOpcode(), AMDGPU::OpName::vaddr) || (Idx == AMDGPU
::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::saddr
))) && "Should never see frame index on non-address operand"
) ? void (0) : __assert_fail ("(Idx == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::vaddr) || (Idx == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::saddr))) && \"Should never see frame index on non-address operand\""
, "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 787, __extension__
__PRETTY_FUNCTION__))
784 AMDGPU::OpName::vaddr) ||(static_cast <bool> ((Idx == AMDGPU::getNamedOperandIdx
(MI->getOpcode(), AMDGPU::OpName::vaddr) || (Idx == AMDGPU
::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::saddr
))) && "Should never see frame index on non-address operand"
) ? void (0) : __assert_fail ("(Idx == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::vaddr) || (Idx == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::saddr))) && \"Should never see frame index on non-address operand\""
, "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 787, __extension__
__PRETTY_FUNCTION__))
785 (Idx == AMDGPU::getNamedOperandIdx(MI->getOpcode(),(static_cast <bool> ((Idx == AMDGPU::getNamedOperandIdx
(MI->getOpcode(), AMDGPU::OpName::vaddr) || (Idx == AMDGPU
::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::saddr
))) && "Should never see frame index on non-address operand"
) ? void (0) : __assert_fail ("(Idx == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::vaddr) || (Idx == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::saddr))) && \"Should never see frame index on non-address operand\""
, "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 787, __extension__
__PRETTY_FUNCTION__))
786 AMDGPU::OpName::saddr))) &&(static_cast <bool> ((Idx == AMDGPU::getNamedOperandIdx
(MI->getOpcode(), AMDGPU::OpName::vaddr) || (Idx == AMDGPU
::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::saddr
))) && "Should never see frame index on non-address operand"
) ? void (0) : __assert_fail ("(Idx == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::vaddr) || (Idx == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::saddr))) && \"Should never see frame index on non-address operand\""
, "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 787, __extension__
__PRETTY_FUNCTION__))
787 "Should never see frame index on non-address operand")(static_cast <bool> ((Idx == AMDGPU::getNamedOperandIdx
(MI->getOpcode(), AMDGPU::OpName::vaddr) || (Idx == AMDGPU
::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::saddr
))) && "Should never see frame index on non-address operand"
) ? void (0) : __assert_fail ("(Idx == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::vaddr) || (Idx == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::saddr))) && \"Should never see frame index on non-address operand\""
, "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 787, __extension__
__PRETTY_FUNCTION__))
;
788
789 return getScratchInstrOffset(MI);
790}
791
792bool SIRegisterInfo::needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const {
793 if (!SIInstrInfo::isMUBUF(*MI) && !SIInstrInfo::isFLATScratch(*MI))
794 return false;
795
796 int64_t FullOffset = Offset + getScratchInstrOffset(MI);
797
798 if (SIInstrInfo::isMUBUF(*MI))
799 return !SIInstrInfo::isLegalMUBUFImmOffset(FullOffset);
800
801 const SIInstrInfo *TII = ST.getInstrInfo();
802 return !TII->isLegalFLATOffset(FullOffset, AMDGPUAS::PRIVATE_ADDRESS,
803 SIInstrFlags::FlatScratch);
804}
805
806Register SIRegisterInfo::materializeFrameBaseRegister(MachineBasicBlock *MBB,
807 int FrameIdx,
808 int64_t Offset) const {
809 MachineBasicBlock::iterator Ins = MBB->begin();
810 DebugLoc DL; // Defaults to "unknown"
811
812 if (Ins != MBB->end())
813 DL = Ins->getDebugLoc();
814
815 MachineFunction *MF = MBB->getParent();
816 const SIInstrInfo *TII = ST.getInstrInfo();
817 MachineRegisterInfo &MRI = MF->getRegInfo();
818 unsigned MovOpc = ST.enableFlatScratch() ? AMDGPU::S_MOV_B32
819 : AMDGPU::V_MOV_B32_e32;
820
821 Register BaseReg = MRI.createVirtualRegister(
822 ST.enableFlatScratch() ? &AMDGPU::SReg_32_XEXEC_HIRegClass
823 : &AMDGPU::VGPR_32RegClass);
824
825 if (Offset == 0) {
826 BuildMI(*MBB, Ins, DL, TII->get(MovOpc), BaseReg)
827 .addFrameIndex(FrameIdx);
828 return BaseReg;
829 }
830
831 Register OffsetReg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
832
833 Register FIReg = MRI.createVirtualRegister(
834 ST.enableFlatScratch() ? &AMDGPU::SReg_32_XM0RegClass
835 : &AMDGPU::VGPR_32RegClass);
836
837 BuildMI(*MBB, Ins, DL, TII->get(AMDGPU::S_MOV_B32), OffsetReg)
838 .addImm(Offset);
839 BuildMI(*MBB, Ins, DL, TII->get(MovOpc), FIReg)
840 .addFrameIndex(FrameIdx);
841
842 if (ST.enableFlatScratch() ) {
843 BuildMI(*MBB, Ins, DL, TII->get(AMDGPU::S_ADD_I32), BaseReg)
844 .addReg(OffsetReg, RegState::Kill)
845 .addReg(FIReg);
846 return BaseReg;
847 }
848
849 TII->getAddNoCarry(*MBB, Ins, DL, BaseReg)
850 .addReg(OffsetReg, RegState::Kill)
851 .addReg(FIReg)
852 .addImm(0); // clamp bit
853
854 return BaseReg;
855}
856
857void SIRegisterInfo::resolveFrameIndex(MachineInstr &MI, Register BaseReg,
858 int64_t Offset) const {
859 const SIInstrInfo *TII = ST.getInstrInfo();
860 bool IsFlat = TII->isFLATScratch(MI);
861
862#ifndef NDEBUG
863 // FIXME: Is it possible to be storing a frame index to itself?
864 bool SeenFI = false;
865 for (const MachineOperand &MO: MI.operands()) {
866 if (MO.isFI()) {
867 if (SeenFI)
868 llvm_unreachable("should not see multiple frame indices")::llvm::llvm_unreachable_internal("should not see multiple frame indices"
, "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 868)
;
869
870 SeenFI = true;
871 }
872 }
873#endif
874
875 MachineOperand *FIOp =
876 TII->getNamedOperand(MI, IsFlat ? AMDGPU::OpName::saddr
877 : AMDGPU::OpName::vaddr);
878
879 MachineOperand *OffsetOp = TII->getNamedOperand(MI, AMDGPU::OpName::offset);
880 int64_t NewOffset = OffsetOp->getImm() + Offset;
881
882 assert(FIOp && FIOp->isFI() && "frame index must be address operand")(static_cast <bool> (FIOp && FIOp->isFI() &&
"frame index must be address operand") ? void (0) : __assert_fail
("FIOp && FIOp->isFI() && \"frame index must be address operand\""
, "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 882, __extension__
__PRETTY_FUNCTION__))
;
883 assert(TII->isMUBUF(MI) || TII->isFLATScratch(MI))(static_cast <bool> (TII->isMUBUF(MI) || TII->isFLATScratch
(MI)) ? void (0) : __assert_fail ("TII->isMUBUF(MI) || TII->isFLATScratch(MI)"
, "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 883, __extension__
__PRETTY_FUNCTION__))
;
884
885 if (IsFlat) {
886 assert(TII->isLegalFLATOffset(NewOffset, AMDGPUAS::PRIVATE_ADDRESS,(static_cast <bool> (TII->isLegalFLATOffset(NewOffset
, AMDGPUAS::PRIVATE_ADDRESS, SIInstrFlags::FlatScratch) &&
"offset should be legal") ? void (0) : __assert_fail ("TII->isLegalFLATOffset(NewOffset, AMDGPUAS::PRIVATE_ADDRESS, SIInstrFlags::FlatScratch) && \"offset should be legal\""
, "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 888, __extension__
__PRETTY_FUNCTION__))
887 SIInstrFlags::FlatScratch) &&(static_cast <bool> (TII->isLegalFLATOffset(NewOffset
, AMDGPUAS::PRIVATE_ADDRESS, SIInstrFlags::FlatScratch) &&
"offset should be legal") ? void (0) : __assert_fail ("TII->isLegalFLATOffset(NewOffset, AMDGPUAS::PRIVATE_ADDRESS, SIInstrFlags::FlatScratch) && \"offset should be legal\""
, "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 888, __extension__
__PRETTY_FUNCTION__))
888 "offset should be legal")(static_cast <bool> (TII->isLegalFLATOffset(NewOffset
, AMDGPUAS::PRIVATE_ADDRESS, SIInstrFlags::FlatScratch) &&
"offset should be legal") ? void (0) : __assert_fail ("TII->isLegalFLATOffset(NewOffset, AMDGPUAS::PRIVATE_ADDRESS, SIInstrFlags::FlatScratch) && \"offset should be legal\""
, "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 888, __extension__
__PRETTY_FUNCTION__))
;
889 FIOp->ChangeToRegister(BaseReg, false);
890 OffsetOp->setImm(NewOffset);
891 return;
892 }
893
894#ifndef NDEBUG
895 MachineOperand *SOffset = TII->getNamedOperand(MI, AMDGPU::OpName::soffset);
896 assert(SOffset->isImm() && SOffset->getImm() == 0)(static_cast <bool> (SOffset->isImm() && SOffset
->getImm() == 0) ? void (0) : __assert_fail ("SOffset->isImm() && SOffset->getImm() == 0"
, "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 896, __extension__
__PRETTY_FUNCTION__))
;
897#endif
898
899 assert(SIInstrInfo::isLegalMUBUFImmOffset(NewOffset) &&(static_cast <bool> (SIInstrInfo::isLegalMUBUFImmOffset
(NewOffset) && "offset should be legal") ? void (0) :
__assert_fail ("SIInstrInfo::isLegalMUBUFImmOffset(NewOffset) && \"offset should be legal\""
, "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 900, __extension__
__PRETTY_FUNCTION__))
900 "offset should be legal")(static_cast <bool> (SIInstrInfo::isLegalMUBUFImmOffset
(NewOffset) && "offset should be legal") ? void (0) :
__assert_fail ("SIInstrInfo::isLegalMUBUFImmOffset(NewOffset) && \"offset should be legal\""
, "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 900, __extension__
__PRETTY_FUNCTION__))
;
901
902 FIOp->ChangeToRegister(BaseReg, false);
903 OffsetOp->setImm(NewOffset);
904}
905
906bool SIRegisterInfo::isFrameOffsetLegal(const MachineInstr *MI,
907 Register BaseReg,
908 int64_t Offset) const {
909 if (!SIInstrInfo::isMUBUF(*MI) && !SIInstrInfo::isFLATScratch(*MI))
910 return false;
911
912 int64_t NewOffset = Offset + getScratchInstrOffset(MI);
913
914 if (SIInstrInfo::isMUBUF(*MI))
915 return SIInstrInfo::isLegalMUBUFImmOffset(NewOffset);
916
917 const SIInstrInfo *TII = ST.getInstrInfo();
918 return TII->isLegalFLATOffset(NewOffset, AMDGPUAS::PRIVATE_ADDRESS,
919 SIInstrFlags::FlatScratch);
920}
921
922const TargetRegisterClass *SIRegisterInfo::getPointerRegClass(
923 const MachineFunction &MF, unsigned Kind) const {
924 // This is inaccurate. It depends on the instruction and address space. The
925 // only place where we should hit this is for dealing with frame indexes /
926 // private accesses, so this is correct in that case.
927 return &AMDGPU::VGPR_32RegClass;
928}
929
930const TargetRegisterClass *
931SIRegisterInfo::getCrossCopyRegClass(const TargetRegisterClass *RC) const {
932 if (isAGPRClass(RC) && !ST.hasGFX90AInsts())
933 return getEquivalentVGPRClass(RC);
934 if (RC == &AMDGPU::SCC_CLASSRegClass)
935 return getWaveMaskRegClass();
936
937 return RC;
938}
939
940static unsigned getNumSubRegsForSpillOp(unsigned Op) {
941
942 switch (Op) {
943 case AMDGPU::SI_SPILL_S1024_SAVE:
944 case AMDGPU::SI_SPILL_S1024_RESTORE:
945 case AMDGPU::SI_SPILL_V1024_SAVE:
946 case AMDGPU::SI_SPILL_V1024_RESTORE:
947 case AMDGPU::SI_SPILL_A1024_SAVE:
948 case AMDGPU::SI_SPILL_A1024_RESTORE:
949 case AMDGPU::SI_SPILL_AV1024_SAVE:
950 case AMDGPU::SI_SPILL_AV1024_RESTORE:
951 return 32;
952 case AMDGPU::SI_SPILL_S512_SAVE:
953 case AMDGPU::SI_SPILL_S512_RESTORE:
954 case AMDGPU::SI_SPILL_V512_SAVE:
955 case AMDGPU::SI_SPILL_V512_RESTORE:
956 case AMDGPU::SI_SPILL_A512_SAVE:
957 case AMDGPU::SI_SPILL_A512_RESTORE:
958 case AMDGPU::SI_SPILL_AV512_SAVE:
959 case AMDGPU::SI_SPILL_AV512_RESTORE:
960 return 16;
961 case AMDGPU::SI_SPILL_S256_SAVE:
962 case AMDGPU::SI_SPILL_S256_RESTORE:
963 case AMDGPU::SI_SPILL_V256_SAVE:
964 case AMDGPU::SI_SPILL_V256_RESTORE:
965 case AMDGPU::SI_SPILL_A256_SAVE:
966 case AMDGPU::SI_SPILL_A256_RESTORE:
967 case AMDGPU::SI_SPILL_AV256_SAVE:
968 case AMDGPU::SI_SPILL_AV256_RESTORE:
969 return 8;
970 case AMDGPU::SI_SPILL_S224_SAVE:
971 case AMDGPU::SI_SPILL_S224_RESTORE:
972 case AMDGPU::SI_SPILL_V224_SAVE:
973 case AMDGPU::SI_SPILL_V224_RESTORE:
974 case AMDGPU::SI_SPILL_A224_SAVE:
975 case AMDGPU::SI_SPILL_A224_RESTORE:
976 case AMDGPU::SI_SPILL_AV224_SAVE:
977 case AMDGPU::SI_SPILL_AV224_RESTORE:
978 return 7;
979 case AMDGPU::SI_SPILL_S192_SAVE:
980 case AMDGPU::SI_SPILL_S192_RESTORE:
981 case AMDGPU::SI_SPILL_V192_SAVE:
982 case AMDGPU::SI_SPILL_V192_RESTORE:
983 case AMDGPU::SI_SPILL_A192_SAVE:
984 case AMDGPU::SI_SPILL_A192_RESTORE:
985 case AMDGPU::SI_SPILL_AV192_SAVE:
986 case AMDGPU::SI_SPILL_AV192_RESTORE:
987 return 6;
988 case AMDGPU::SI_SPILL_S160_SAVE:
989 case AMDGPU::SI_SPILL_S160_RESTORE:
990 case AMDGPU::SI_SPILL_V160_SAVE:
991 case AMDGPU::SI_SPILL_V160_RESTORE:
992 case AMDGPU::SI_SPILL_A160_SAVE:
993 case AMDGPU::SI_SPILL_A160_RESTORE:
994 case AMDGPU::SI_SPILL_AV160_SAVE:
995 case AMDGPU::SI_SPILL_AV160_RESTORE:
996 return 5;
997 case AMDGPU::SI_SPILL_S128_SAVE:
998 case AMDGPU::SI_SPILL_S128_RESTORE:
999 case AMDGPU::SI_SPILL_V128_SAVE:
1000 case AMDGPU::SI_SPILL_V128_RESTORE:
1001 case AMDGPU::SI_SPILL_A128_SAVE:
1002 case AMDGPU::SI_SPILL_A128_RESTORE:
1003 case AMDGPU::SI_SPILL_AV128_SAVE:
1004 case AMDGPU::SI_SPILL_AV128_RESTORE:
1005 return 4;
1006 case AMDGPU::SI_SPILL_S96_SAVE:
1007 case AMDGPU::SI_SPILL_S96_RESTORE:
1008 case AMDGPU::SI_SPILL_V96_SAVE:
1009 case AMDGPU::SI_SPILL_V96_RESTORE:
1010 case AMDGPU::SI_SPILL_A96_SAVE:
1011 case AMDGPU::SI_SPILL_A96_RESTORE:
1012 case AMDGPU::SI_SPILL_AV96_SAVE:
1013 case AMDGPU::SI_SPILL_AV96_RESTORE:
1014 return 3;
1015 case AMDGPU::SI_SPILL_S64_SAVE:
1016 case AMDGPU::SI_SPILL_S64_RESTORE:
1017 case AMDGPU::SI_SPILL_V64_SAVE:
1018 case AMDGPU::SI_SPILL_V64_RESTORE:
1019 case AMDGPU::SI_SPILL_A64_SAVE:
1020 case AMDGPU::SI_SPILL_A64_RESTORE:
1021 case AMDGPU::SI_SPILL_AV64_SAVE:
1022 case AMDGPU::SI_SPILL_AV64_RESTORE:
1023 return 2;
1024 case AMDGPU::SI_SPILL_S32_SAVE:
1025 case AMDGPU::SI_SPILL_S32_RESTORE:
1026 case AMDGPU::SI_SPILL_V32_SAVE:
1027 case AMDGPU::SI_SPILL_V32_RESTORE:
1028 case AMDGPU::SI_SPILL_A32_SAVE:
1029 case AMDGPU::SI_SPILL_A32_RESTORE:
1030 case AMDGPU::SI_SPILL_AV32_SAVE:
1031 case AMDGPU::SI_SPILL_AV32_RESTORE:
1032 return 1;
1033 default: llvm_unreachable("Invalid spill opcode")::llvm::llvm_unreachable_internal("Invalid spill opcode", "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 1033)
;
1034 }
1035}
1036
1037static int getOffsetMUBUFStore(unsigned Opc) {
1038 switch (Opc) {
1039 case AMDGPU::BUFFER_STORE_DWORD_OFFEN:
1040 return AMDGPU::BUFFER_STORE_DWORD_OFFSET;
1041 case AMDGPU::BUFFER_STORE_BYTE_OFFEN:
1042 return AMDGPU::BUFFER_STORE_BYTE_OFFSET;
1043 case AMDGPU::BUFFER_STORE_SHORT_OFFEN:
1044 return AMDGPU::BUFFER_STORE_SHORT_OFFSET;
1045 case AMDGPU::BUFFER_STORE_DWORDX2_OFFEN:
1046 return AMDGPU::BUFFER_STORE_DWORDX2_OFFSET;
1047 case AMDGPU::BUFFER_STORE_DWORDX3_OFFEN:
1048 return AMDGPU::BUFFER_STORE_DWORDX3_OFFSET;
1049 case AMDGPU::BUFFER_STORE_DWORDX4_OFFEN:
1050 return AMDGPU::BUFFER_STORE_DWORDX4_OFFSET;
1051 case AMDGPU::BUFFER_STORE_SHORT_D16_HI_OFFEN:
1052 return AMDGPU::BUFFER_STORE_SHORT_D16_HI_OFFSET;
1053 case AMDGPU::BUFFER_STORE_BYTE_D16_HI_OFFEN:
1054 return AMDGPU::BUFFER_STORE_BYTE_D16_HI_OFFSET;
1055 default:
1056 return -1;
1057 }
1058}
1059
1060static int getOffsetMUBUFLoad(unsigned Opc) {
1061 switch (Opc) {
1062 case AMDGPU::BUFFER_LOAD_DWORD_OFFEN:
1063 return AMDGPU::BUFFER_LOAD_DWORD_OFFSET;
1064 case AMDGPU::BUFFER_LOAD_UBYTE_OFFEN:
1065 return AMDGPU::BUFFER_LOAD_UBYTE_OFFSET;
1066 case AMDGPU::BUFFER_LOAD_SBYTE_OFFEN:
1067 return AMDGPU::BUFFER_LOAD_SBYTE_OFFSET;
1068 case AMDGPU::BUFFER_LOAD_USHORT_OFFEN:
1069 return AMDGPU::BUFFER_LOAD_USHORT_OFFSET;
1070 case AMDGPU::BUFFER_LOAD_SSHORT_OFFEN:
1071 return AMDGPU::BUFFER_LOAD_SSHORT_OFFSET;
1072 case AMDGPU::BUFFER_LOAD_DWORDX2_OFFEN:
1073 return AMDGPU::BUFFER_LOAD_DWORDX2_OFFSET;
1074 case AMDGPU::BUFFER_LOAD_DWORDX3_OFFEN:
1075 return AMDGPU::BUFFER_LOAD_DWORDX3_OFFSET;
1076 case AMDGPU::BUFFER_LOAD_DWORDX4_OFFEN:
1077 return AMDGPU::BUFFER_LOAD_DWORDX4_OFFSET;
1078 case AMDGPU::BUFFER_LOAD_UBYTE_D16_OFFEN:
1079 return AMDGPU::BUFFER_LOAD_UBYTE_D16_OFFSET;
1080 case AMDGPU::BUFFER_LOAD_UBYTE_D16_HI_OFFEN:
1081 return AMDGPU::BUFFER_LOAD_UBYTE_D16_HI_OFFSET;
1082 case AMDGPU::BUFFER_LOAD_SBYTE_D16_OFFEN:
1083 return AMDGPU::BUFFER_LOAD_SBYTE_D16_OFFSET;
1084 case AMDGPU::BUFFER_LOAD_SBYTE_D16_HI_OFFEN:
1085 return AMDGPU::BUFFER_LOAD_SBYTE_D16_HI_OFFSET;
1086 case AMDGPU::BUFFER_LOAD_SHORT_D16_OFFEN:
1087 return AMDGPU::BUFFER_LOAD_SHORT_D16_OFFSET;
1088 case AMDGPU::BUFFER_LOAD_SHORT_D16_HI_OFFEN:
1089 return AMDGPU::BUFFER_LOAD_SHORT_D16_HI_OFFSET;
1090 default:
1091 return -1;
1092 }
1093}
1094
1095static int getOffenMUBUFStore(unsigned Opc) {
1096 switch (Opc) {
1097 case AMDGPU::BUFFER_STORE_DWORD_OFFSET:
1098 return AMDGPU::BUFFER_STORE_DWORD_OFFEN;
1099 case AMDGPU::BUFFER_STORE_BYTE_OFFSET:
1100 return AMDGPU::BUFFER_STORE_BYTE_OFFEN;
1101 case AMDGPU::BUFFER_STORE_SHORT_OFFSET:
1102 return AMDGPU::BUFFER_STORE_SHORT_OFFEN;
1103 case AMDGPU::BUFFER_STORE_DWORDX2_OFFSET:
1104 return AMDGPU::BUFFER_STORE_DWORDX2_OFFEN;
1105 case AMDGPU::BUFFER_STORE_DWORDX3_OFFSET:
1106 return AMDGPU::BUFFER_STORE_DWORDX3_OFFEN;
1107 case AMDGPU::BUFFER_STORE_DWORDX4_OFFSET:
1108 return AMDGPU::BUFFER_STORE_DWORDX4_OFFEN;
1109 case AMDGPU::BUFFER_STORE_SHORT_D16_HI_OFFSET:
1110 return AMDGPU::BUFFER_STORE_SHORT_D16_HI_OFFEN;
1111 case AMDGPU::BUFFER_STORE_BYTE_D16_HI_OFFSET:
1112 return AMDGPU::BUFFER_STORE_BYTE_D16_HI_OFFEN;
1113 default:
1114 return -1;
1115 }
1116}
1117
1118static int getOffenMUBUFLoad(unsigned Opc) {
1119 switch (Opc) {
1120 case AMDGPU::BUFFER_LOAD_DWORD_OFFSET:
1121 return AMDGPU::BUFFER_LOAD_DWORD_OFFEN;
1122 case AMDGPU::BUFFER_LOAD_UBYTE_OFFSET:
1123 return AMDGPU::BUFFER_LOAD_UBYTE_OFFEN;
1124 case AMDGPU::BUFFER_LOAD_SBYTE_OFFSET:
1125 return AMDGPU::BUFFER_LOAD_SBYTE_OFFEN;
1126 case AMDGPU::BUFFER_LOAD_USHORT_OFFSET:
1127 return AMDGPU::BUFFER_LOAD_USHORT_OFFEN;
1128 case AMDGPU::BUFFER_LOAD_SSHORT_OFFSET:
1129 return AMDGPU::BUFFER_LOAD_SSHORT_OFFEN;
1130 case AMDGPU::BUFFER_LOAD_DWORDX2_OFFSET:
1131 return AMDGPU::BUFFER_LOAD_DWORDX2_OFFEN;
1132 case AMDGPU::BUFFER_LOAD_DWORDX3_OFFSET:
1133 return AMDGPU::BUFFER_LOAD_DWORDX3_OFFEN;
1134 case AMDGPU::BUFFER_LOAD_DWORDX4_OFFSET:
1135 return AMDGPU::BUFFER_LOAD_DWORDX4_OFFEN;
1136 case AMDGPU::BUFFER_LOAD_UBYTE_D16_OFFSET:
1137 return AMDGPU::BUFFER_LOAD_UBYTE_D16_OFFEN;
1138 case AMDGPU::BUFFER_LOAD_UBYTE_D16_HI_OFFSET:
1139 return AMDGPU::BUFFER_LOAD_UBYTE_D16_HI_OFFEN;
1140 case AMDGPU::BUFFER_LOAD_SBYTE_D16_OFFSET:
1141 return AMDGPU::BUFFER_LOAD_SBYTE_D16_OFFEN;
1142 case AMDGPU::BUFFER_LOAD_SBYTE_D16_HI_OFFSET:
1143 return AMDGPU::BUFFER_LOAD_SBYTE_D16_HI_OFFEN;
1144 case AMDGPU::BUFFER_LOAD_SHORT_D16_OFFSET:
1145 return AMDGPU::BUFFER_LOAD_SHORT_D16_OFFEN;
1146 case AMDGPU::BUFFER_LOAD_SHORT_D16_HI_OFFSET:
1147 return AMDGPU::BUFFER_LOAD_SHORT_D16_HI_OFFEN;
1148 default:
1149 return -1;
1150 }
1151}
1152
1153static MachineInstrBuilder spillVGPRtoAGPR(const GCNSubtarget &ST,
1154 MachineBasicBlock &MBB,
1155 MachineBasicBlock::iterator MI,
1156 int Index, unsigned Lane,
1157 unsigned ValueReg, bool IsKill) {
1158 MachineFunction *MF = MBB.getParent();
1159 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
1160 const SIInstrInfo *TII = ST.getInstrInfo();
1161
1162 MCPhysReg Reg = MFI->getVGPRToAGPRSpill(Index, Lane);
1163
1164 if (Reg == AMDGPU::NoRegister)
1165 return MachineInstrBuilder();
1166
1167 bool IsStore = MI->mayStore();
1168 MachineRegisterInfo &MRI = MF->getRegInfo();
1169 auto *TRI = static_cast<const SIRegisterInfo*>(MRI.getTargetRegisterInfo());
1170
1171 unsigned Dst = IsStore ? Reg : ValueReg;
1172 unsigned Src = IsStore ? ValueReg : Reg;
1173 bool IsVGPR = TRI->isVGPR(MRI, Reg);
1174 DebugLoc DL = MI->getDebugLoc();
1175 if (IsVGPR == TRI->isVGPR(MRI, ValueReg)) {
1176 // Spiller during regalloc may restore a spilled register to its superclass.
1177 // It could result in AGPR spills restored to VGPRs or the other way around,
1178 // making the src and dst with identical regclasses at this point. It just
1179 // needs a copy in such cases.
1180 auto CopyMIB = BuildMI(MBB, MI, DL, TII->get(AMDGPU::COPY), Dst)
1181 .addReg(Src, getKillRegState(IsKill));
1182 CopyMIB->setAsmPrinterFlag(MachineInstr::ReloadReuse);
1183 return CopyMIB;
1184 }
1185 unsigned Opc = (IsStore ^ IsVGPR) ? AMDGPU::V_ACCVGPR_WRITE_B32_e64
1186 : AMDGPU::V_ACCVGPR_READ_B32_e64;
1187
1188 auto MIB = BuildMI(MBB, MI, DL, TII->get(Opc), Dst)
1189 .addReg(Src, getKillRegState(IsKill));
1190 MIB->setAsmPrinterFlag(MachineInstr::ReloadReuse);
1191 return MIB;
1192}
1193
1194// This differs from buildSpillLoadStore by only scavenging a VGPR. It does not
1195// need to handle the case where an SGPR may need to be spilled while spilling.
1196static bool buildMUBUFOffsetLoadStore(const GCNSubtarget &ST,
1197 MachineFrameInfo &MFI,
1198 MachineBasicBlock::iterator MI,
1199 int Index,
1200 int64_t Offset) {
1201 const SIInstrInfo *TII = ST.getInstrInfo();
1202 MachineBasicBlock *MBB = MI->getParent();
1203 const DebugLoc &DL = MI->getDebugLoc();
1204 bool IsStore = MI->mayStore();
1205
1206 unsigned Opc = MI->getOpcode();
1207 int LoadStoreOp = IsStore ?
1208 getOffsetMUBUFStore(Opc) : getOffsetMUBUFLoad(Opc);
1209 if (LoadStoreOp == -1)
1210 return false;
1211
1212 const MachineOperand *Reg = TII->getNamedOperand(*MI, AMDGPU::OpName::vdata);
1213 if (spillVGPRtoAGPR(ST, *MBB, MI, Index, 0, Reg->getReg(), false).getInstr())
1214 return true;
1215
1216 MachineInstrBuilder NewMI =
1217 BuildMI(*MBB, MI, DL, TII->get(LoadStoreOp))
1218 .add(*Reg)
1219 .add(*TII->getNamedOperand(*MI, AMDGPU::OpName::srsrc))
1220 .add(*TII->getNamedOperand(*MI, AMDGPU::OpName::soffset))
1221 .addImm(Offset)
1222 .addImm(0) // cpol
1223 .addImm(0) // tfe
1224 .addImm(0) // swz
1225 .cloneMemRefs(*MI);
1226
1227 const MachineOperand *VDataIn = TII->getNamedOperand(*MI,
1228 AMDGPU::OpName::vdata_in);
1229 if (VDataIn)
1230 NewMI.add(*VDataIn);
1231 return true;
1232}
1233
1234static unsigned getFlatScratchSpillOpcode(const SIInstrInfo *TII,
1235 unsigned LoadStoreOp,
1236 unsigned EltSize) {
1237 bool IsStore = TII->get(LoadStoreOp).mayStore();
1238 bool HasVAddr = AMDGPU::getNamedOperandIdx(LoadStoreOp, AMDGPU::OpName::vaddr) != -1;
1239 bool UseST =
1240 !HasVAddr &&
1241 AMDGPU::getNamedOperandIdx(LoadStoreOp, AMDGPU::OpName::saddr) < 0;
1242
1243 switch (EltSize) {
1244 case 4:
1245 LoadStoreOp = IsStore ? AMDGPU::SCRATCH_STORE_DWORD_SADDR
1246 : AMDGPU::SCRATCH_LOAD_DWORD_SADDR;
1247 break;
1248 case 8:
1249 LoadStoreOp = IsStore ? AMDGPU::SCRATCH_STORE_DWORDX2_SADDR
1250 : AMDGPU::SCRATCH_LOAD_DWORDX2_SADDR;
1251 break;
1252 case 12:
1253 LoadStoreOp = IsStore ? AMDGPU::SCRATCH_STORE_DWORDX3_SADDR
1254 : AMDGPU::SCRATCH_LOAD_DWORDX3_SADDR;
1255 break;
1256 case 16:
1257 LoadStoreOp = IsStore ? AMDGPU::SCRATCH_STORE_DWORDX4_SADDR
1258 : AMDGPU::SCRATCH_LOAD_DWORDX4_SADDR;
1259 break;
1260 default:
1261 llvm_unreachable("Unexpected spill load/store size!")::llvm::llvm_unreachable_internal("Unexpected spill load/store size!"
, "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 1261)
;
1262 }
1263
1264 if (HasVAddr)
1265 LoadStoreOp = AMDGPU::getFlatScratchInstSVfromSS(LoadStoreOp);
1266 else if (UseST)
1267 LoadStoreOp = AMDGPU::getFlatScratchInstSTfromSS(LoadStoreOp);
1268
1269 return LoadStoreOp;
1270}
1271
1272void SIRegisterInfo::buildSpillLoadStore(
1273 MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, const DebugLoc &DL,
1274 unsigned LoadStoreOp, int Index, Register ValueReg, bool IsKill,
1275 MCRegister ScratchOffsetReg, int64_t InstOffset, MachineMemOperand *MMO,
1276 RegScavenger *RS, LivePhysRegs *LiveRegs) const {
1277 assert((!RS || !LiveRegs) && "Only RS or LiveRegs can be set but not both")(static_cast <bool> ((!RS || !LiveRegs) && "Only RS or LiveRegs can be set but not both"
) ? void (0) : __assert_fail ("(!RS || !LiveRegs) && \"Only RS or LiveRegs can be set but not both\""
, "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 1277, __extension__
__PRETTY_FUNCTION__))
;
1278
1279 MachineFunction *MF = MBB.getParent();
1280 const SIInstrInfo *TII = ST.getInstrInfo();
1281 const MachineFrameInfo &MFI = MF->getFrameInfo();
1282 const SIMachineFunctionInfo *FuncInfo = MF->getInfo<SIMachineFunctionInfo>();
1283
1284 const MCInstrDesc *Desc = &TII->get(LoadStoreOp);
1285 bool IsStore = Desc->mayStore();
1286 bool IsFlat = TII->isFLATScratch(LoadStoreOp);
1287
1288 bool CanClobberSCC = false;
1289 bool Scavenged = false;
1290 MCRegister SOffset = ScratchOffsetReg;
1291
1292 const TargetRegisterClass *RC = getRegClassForReg(MF->getRegInfo(), ValueReg);
1293 // On gfx90a+ AGPR is a regular VGPR acceptable for loads and stores.
1294 const bool IsAGPR = !ST.hasGFX90AInsts() && isAGPRClass(RC);
1295 const unsigned RegWidth = AMDGPU::getRegBitWidth(RC->getID()) / 8;
1296
1297 // Always use 4 byte operations for AGPRs because we need to scavenge
1298 // a temporary VGPR.
1299 unsigned EltSize = (IsFlat && !IsAGPR) ? std::min(RegWidth, 16u) : 4u;
1300 unsigned NumSubRegs = RegWidth / EltSize;
1301 unsigned Size = NumSubRegs * EltSize;
1302 unsigned RemSize = RegWidth - Size;
1303 unsigned NumRemSubRegs = RemSize ? 1 : 0;
1304 int64_t Offset = InstOffset + MFI.getObjectOffset(Index);
1305 int64_t MaterializedOffset = Offset;
1306
1307 int64_t MaxOffset = Offset + Size + RemSize - EltSize;
1308 int64_t ScratchOffsetRegDelta = 0;
1309
1310 if (IsFlat && EltSize > 4) {
1311 LoadStoreOp = getFlatScratchSpillOpcode(TII, LoadStoreOp, EltSize);
1312 Desc = &TII->get(LoadStoreOp);
Value stored to 'Desc' is never read
1313 }
1314
1315 Align Alignment = MFI.getObjectAlign(Index);
1316 const MachinePointerInfo &BasePtrInfo = MMO->getPointerInfo();
1317
1318 assert((IsFlat || ((Offset % EltSize) == 0)) &&(static_cast <bool> ((IsFlat || ((Offset % EltSize) == 0
)) && "unexpected VGPR spill offset") ? void (0) : __assert_fail
("(IsFlat || ((Offset % EltSize) == 0)) && \"unexpected VGPR spill offset\""
, "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 1319, __extension__
__PRETTY_FUNCTION__))
1319 "unexpected VGPR spill offset")(static_cast <bool> ((IsFlat || ((Offset % EltSize) == 0
)) && "unexpected VGPR spill offset") ? void (0) : __assert_fail
("(IsFlat || ((Offset % EltSize) == 0)) && \"unexpected VGPR spill offset\""
, "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 1319, __extension__
__PRETTY_FUNCTION__))
;
1320
1321 // Track a VGPR to use for a constant offset we need to materialize.
1322 Register TmpOffsetVGPR;
1323
1324 // Track a VGPR to use as an intermediate value.
1325 Register TmpIntermediateVGPR;
1326 bool UseVGPROffset = false;
1327
1328 // Materialize a VGPR offset required for the given SGPR/VGPR/Immediate
1329 // combination.
1330 auto MaterializeVOffset = [&](Register SGPRBase, Register TmpVGPR,
1331 int64_t VOffset) {
1332 // We are using a VGPR offset
1333 if (IsFlat && SGPRBase) {
1334 // We only have 1 VGPR offset, or 1 SGPR offset. We don't have a free
1335 // SGPR, so perform the add as vector.
1336 // We don't need a base SGPR in the kernel.
1337
1338 if (ST.getConstantBusLimit(AMDGPU::V_ADD_U32_e64) >= 2) {
1339 BuildMI(MBB, MI, DL, TII->get(AMDGPU::V_ADD_U32_e64), TmpVGPR)
1340 .addReg(SGPRBase)
1341 .addImm(VOffset)
1342 .addImm(0); // clamp
1343 } else {
1344 BuildMI(MBB, MI, DL, TII->get(AMDGPU::V_MOV_B32_e32), TmpVGPR)
1345 .addReg(SGPRBase);
1346 BuildMI(MBB, MI, DL, TII->get(AMDGPU::V_ADD_U32_e32), TmpVGPR)
1347 .addImm(VOffset)
1348 .addReg(TmpOffsetVGPR);
1349 }
1350 } else {
1351 assert(TmpOffsetVGPR)(static_cast <bool> (TmpOffsetVGPR) ? void (0) : __assert_fail
("TmpOffsetVGPR", "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 1351, __extension__ __PRETTY_FUNCTION__))
;
1352 BuildMI(MBB, MI, DL, TII->get(AMDGPU::V_MOV_B32_e32), TmpVGPR)
1353 .addImm(VOffset);
1354 }
1355 };
1356
1357 bool IsOffsetLegal =
1358 IsFlat ? TII->isLegalFLATOffset(MaxOffset, AMDGPUAS::PRIVATE_ADDRESS,
1359 SIInstrFlags::FlatScratch)
1360 : SIInstrInfo::isLegalMUBUFImmOffset(MaxOffset);
1361 if (!IsOffsetLegal || (IsFlat && !SOffset && !ST.hasFlatScratchSTMode())) {
1362 SOffset = MCRegister();
1363
1364 // We don't have access to the register scavenger if this function is called
1365 // during PEI::scavengeFrameVirtualRegs() so use LiveRegs in this case.
1366 // TODO: Clobbering SCC is not necessary for scratch instructions in the
1367 // entry.
1368 if (RS) {
1369 SOffset = RS->scavengeRegister(&AMDGPU::SGPR_32RegClass, MI, 0, false);
1370
1371 // Piggy back on the liveness scan we just did see if SCC is dead.
1372 CanClobberSCC = !RS->isRegUsed(AMDGPU::SCC);
1373 } else if (LiveRegs) {
1374 CanClobberSCC = !LiveRegs->contains(AMDGPU::SCC);
1375 for (MCRegister Reg : AMDGPU::SGPR_32RegClass) {
1376 if (LiveRegs->available(MF->getRegInfo(), Reg)) {
1377 SOffset = Reg;
1378 break;
1379 }
1380 }
1381 }
1382
1383 if (ScratchOffsetReg != AMDGPU::NoRegister && !CanClobberSCC)
1384 SOffset = Register();
1385
1386 if (!SOffset) {
1387 UseVGPROffset = true;
1388
1389 if (RS) {
1390 TmpOffsetVGPR = RS->scavengeRegister(&AMDGPU::VGPR_32RegClass, MI, 0);
1391 } else {
1392 assert(LiveRegs)(static_cast <bool> (LiveRegs) ? void (0) : __assert_fail
("LiveRegs", "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 1392
, __extension__ __PRETTY_FUNCTION__))
;
1393 for (MCRegister Reg : AMDGPU::VGPR_32RegClass) {
1394 if (LiveRegs->available(MF->getRegInfo(), Reg)) {
1395 TmpOffsetVGPR = Reg;
1396 break;
1397 }
1398 }
1399 }
1400
1401 assert(TmpOffsetVGPR)(static_cast <bool> (TmpOffsetVGPR) ? void (0) : __assert_fail
("TmpOffsetVGPR", "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 1401, __extension__ __PRETTY_FUNCTION__))
;
1402 } else if (!SOffset && CanClobberSCC) {
1403 // There are no free SGPRs, and since we are in the process of spilling
1404 // VGPRs too. Since we need a VGPR in order to spill SGPRs (this is true
1405 // on SI/CI and on VI it is true until we implement spilling using scalar
1406 // stores), we have no way to free up an SGPR. Our solution here is to
1407 // add the offset directly to the ScratchOffset or StackPtrOffset
1408 // register, and then subtract the offset after the spill to return the
1409 // register to it's original value.
1410
1411 // TODO: If we don't have to do an emergency stack slot spill, converting
1412 // to use the VGPR offset is fewer instructions.
1413 if (!ScratchOffsetReg)
1414 ScratchOffsetReg = FuncInfo->getStackPtrOffsetReg();
1415 SOffset = ScratchOffsetReg;
1416 ScratchOffsetRegDelta = Offset;
1417 } else {
1418 Scavenged = true;
1419 }
1420
1421 // We currently only support spilling VGPRs to EltSize boundaries, meaning
1422 // we can simplify the adjustment of Offset here to just scale with
1423 // WavefrontSize.
1424 if (!IsFlat && !UseVGPROffset)
1425 Offset *= ST.getWavefrontSize();
1426
1427 if (!UseVGPROffset && !SOffset)
1428 report_fatal_error("could not scavenge SGPR to spill in entry function");
1429
1430 if (UseVGPROffset) {
1431 // We are using a VGPR offset
1432 MaterializeVOffset(ScratchOffsetReg, TmpOffsetVGPR, Offset);
1433 } else if (ScratchOffsetReg == AMDGPU::NoRegister) {
1434 BuildMI(MBB, MI, DL, TII->get(AMDGPU::S_MOV_B32), SOffset).addImm(Offset);
1435 } else {
1436 assert(Offset != 0)(static_cast <bool> (Offset != 0) ? void (0) : __assert_fail
("Offset != 0", "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp",
1436, __extension__ __PRETTY_FUNCTION__))
;
1437 auto Add = BuildMI(MBB, MI, DL, TII->get(AMDGPU::S_ADD_I32), SOffset)
1438 .addReg(ScratchOffsetReg)
1439 .addImm(Offset);
1440 Add->getOperand(3).setIsDead(); // Mark SCC as dead.
1441 }
1442
1443 Offset = 0;
1444 }
1445
1446 if (IsFlat && SOffset == AMDGPU::NoRegister) {
1447 assert(AMDGPU::getNamedOperandIdx(LoadStoreOp, AMDGPU::OpName::vaddr) < 0(static_cast <bool> (AMDGPU::getNamedOperandIdx(LoadStoreOp
, AMDGPU::OpName::vaddr) < 0 && "Unexpected vaddr for flat scratch with a FI operand"
) ? void (0) : __assert_fail ("AMDGPU::getNamedOperandIdx(LoadStoreOp, AMDGPU::OpName::vaddr) < 0 && \"Unexpected vaddr for flat scratch with a FI operand\""
, "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 1448, __extension__
__PRETTY_FUNCTION__))
1448 && "Unexpected vaddr for flat scratch with a FI operand")(static_cast <bool> (AMDGPU::getNamedOperandIdx(LoadStoreOp
, AMDGPU::OpName::vaddr) < 0 && "Unexpected vaddr for flat scratch with a FI operand"
) ? void (0) : __assert_fail ("AMDGPU::getNamedOperandIdx(LoadStoreOp, AMDGPU::OpName::vaddr) < 0 && \"Unexpected vaddr for flat scratch with a FI operand\""
, "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 1448, __extension__
__PRETTY_FUNCTION__))
;
1449
1450 if (UseVGPROffset) {
1451 LoadStoreOp = AMDGPU::getFlatScratchInstSVfromSS(LoadStoreOp);
1452 } else {
1453 assert(ST.hasFlatScratchSTMode())(static_cast <bool> (ST.hasFlatScratchSTMode()) ? void (
0) : __assert_fail ("ST.hasFlatScratchSTMode()", "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 1453, __extension__ __PRETTY_FUNCTION__))
;
1454 LoadStoreOp = AMDGPU::getFlatScratchInstSTfromSS(LoadStoreOp);
1455 }
1456
1457 Desc = &TII->get(LoadStoreOp);
1458 }
1459
1460 for (unsigned i = 0, e = NumSubRegs + NumRemSubRegs, RegOffset = 0; i != e;
1461 ++i, RegOffset += EltSize) {
1462 if (i == NumSubRegs) {
1463 EltSize = RemSize;
1464 LoadStoreOp = getFlatScratchSpillOpcode(TII, LoadStoreOp, EltSize);
1465 }
1466 Desc = &TII->get(LoadStoreOp);
1467
1468 if (!IsFlat && UseVGPROffset) {
1469 int NewLoadStoreOp = IsStore ? getOffenMUBUFStore(LoadStoreOp)
1470 : getOffenMUBUFLoad(LoadStoreOp);
1471 Desc = &TII->get(NewLoadStoreOp);
1472 }
1473
1474 if (UseVGPROffset && TmpOffsetVGPR == TmpIntermediateVGPR) {
1475 // If we are spilling an AGPR beyond the range of the memory instruction
1476 // offset and need to use a VGPR offset, we ideally have at least 2
1477 // scratch VGPRs. If we don't have a second free VGPR without spilling,
1478 // recycle the VGPR used for the offset which requires resetting after
1479 // each subregister.
1480
1481 MaterializeVOffset(ScratchOffsetReg, TmpOffsetVGPR, MaterializedOffset);
1482 }
1483
1484 unsigned NumRegs = EltSize / 4;
1485 Register SubReg = e == 1
1486 ? ValueReg
1487 : Register(getSubReg(ValueReg,
1488 getSubRegFromChannel(RegOffset / 4, NumRegs)));
1489
1490 unsigned SOffsetRegState = 0;
1491 unsigned SrcDstRegState = getDefRegState(!IsStore);
1492 const bool IsLastSubReg = i + 1 == e;
1493 if (IsLastSubReg) {
1494 SOffsetRegState |= getKillRegState(Scavenged);
1495 // The last implicit use carries the "Kill" flag.
1496 SrcDstRegState |= getKillRegState(IsKill);
1497 }
1498
1499 // Make sure the whole register is defined if there are undef components by
1500 // adding an implicit def of the super-reg on the first instruction.
1501 bool NeedSuperRegDef = e > 1 && IsStore && i == 0;
1502 bool NeedSuperRegImpOperand = e > 1;
1503
1504 // Remaining element size to spill into memory after some parts of it
1505 // spilled into either AGPRs or VGPRs.
1506 unsigned RemEltSize = EltSize;
1507
1508 // AGPRs to spill VGPRs and vice versa are allocated in a reverse order,
1509 // starting from the last lane. In case if a register cannot be completely
1510 // spilled into another register that will ensure its alignment does not
1511 // change. For targets with VGPR alignment requirement this is important
1512 // in case of flat scratch usage as we might get a scratch_load or
1513 // scratch_store of an unaligned register otherwise.
1514 for (int LaneS = (RegOffset + EltSize) / 4 - 1, Lane = LaneS,
1515 LaneE = RegOffset / 4;
1516 Lane >= LaneE; --Lane) {
1517 bool IsSubReg = e > 1 || EltSize > 4;
1518 Register Sub = IsSubReg
1519 ? Register(getSubReg(ValueReg, getSubRegFromChannel(Lane)))
1520 : ValueReg;
1521 auto MIB = spillVGPRtoAGPR(ST, MBB, MI, Index, Lane, Sub, IsKill);
1522 if (!MIB.getInstr())
1523 break;
1524 if (NeedSuperRegDef || (IsSubReg && IsStore && Lane == LaneS && !i)) {
1525 MIB.addReg(ValueReg, RegState::ImplicitDefine);
1526 NeedSuperRegDef = false;
1527 }
1528 if (IsSubReg || NeedSuperRegImpOperand) {
1529 NeedSuperRegImpOperand = true;
1530 unsigned State = SrcDstRegState;
1531 if (Lane != LaneE)
1532 State &= ~RegState::Kill;
1533 MIB.addReg(ValueReg, RegState::Implicit | State);
1534 }
1535 RemEltSize -= 4;
1536 }
1537
1538 if (!RemEltSize) // Fully spilled into AGPRs.
1539 continue;
1540
1541 if (RemEltSize != EltSize) { // Partially spilled to AGPRs
1542 assert(IsFlat && EltSize > 4)(static_cast <bool> (IsFlat && EltSize > 4) ?
void (0) : __assert_fail ("IsFlat && EltSize > 4"
, "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 1542, __extension__
__PRETTY_FUNCTION__))
;
1543
1544 unsigned NumRegs = RemEltSize / 4;
1545 SubReg = Register(getSubReg(ValueReg,
1546 getSubRegFromChannel(RegOffset / 4, NumRegs)));
1547 unsigned Opc = getFlatScratchSpillOpcode(TII, LoadStoreOp, RemEltSize);
1548 Desc = &TII->get(Opc);
1549 }
1550
1551 unsigned FinalReg = SubReg;
1552
1553 if (IsAGPR) {
1554 assert(EltSize == 4)(static_cast <bool> (EltSize == 4) ? void (0) : __assert_fail
("EltSize == 4", "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 1554, __extension__ __PRETTY_FUNCTION__))
;
1555
1556 if (!TmpIntermediateVGPR) {
1557 TmpIntermediateVGPR = FuncInfo->getVGPRForAGPRCopy();
1558 assert(MF->getRegInfo().isReserved(TmpIntermediateVGPR))(static_cast <bool> (MF->getRegInfo().isReserved(TmpIntermediateVGPR
)) ? void (0) : __assert_fail ("MF->getRegInfo().isReserved(TmpIntermediateVGPR)"
, "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 1558, __extension__
__PRETTY_FUNCTION__))
;
1559 }
1560 if (IsStore) {
1561 auto AccRead = BuildMI(MBB, MI, DL,
1562 TII->get(AMDGPU::V_ACCVGPR_READ_B32_e64),
1563 TmpIntermediateVGPR)
1564 .addReg(SubReg, getKillRegState(IsKill));
1565 if (NeedSuperRegDef)
1566 AccRead.addReg(ValueReg, RegState::ImplicitDefine);
1567 AccRead->setAsmPrinterFlag(MachineInstr::ReloadReuse);
1568 }
1569 SubReg = TmpIntermediateVGPR;
1570 } else if (UseVGPROffset) {
1571 // FIXME: change to scavengeRegisterBackwards()
1572 if (!TmpOffsetVGPR) {
1573 TmpOffsetVGPR = RS->scavengeRegister(&AMDGPU::VGPR_32RegClass, MI, 0);
1574 RS->setRegUsed(TmpOffsetVGPR);
1575 }
1576 }
1577
1578 MachinePointerInfo PInfo = BasePtrInfo.getWithOffset(RegOffset);
1579 MachineMemOperand *NewMMO =
1580 MF->getMachineMemOperand(PInfo, MMO->getFlags(), RemEltSize,
1581 commonAlignment(Alignment, RegOffset));
1582
1583 auto MIB =
1584 BuildMI(MBB, MI, DL, *Desc)
1585 .addReg(SubReg, getDefRegState(!IsStore) | getKillRegState(IsKill));
1586
1587 if (UseVGPROffset) {
1588 // For an AGPR spill, we reuse the same temp VGPR for the offset and the
1589 // intermediate accvgpr_write.
1590 MIB.addReg(TmpOffsetVGPR, getKillRegState(IsLastSubReg && !IsAGPR));
1591 }
1592
1593 if (!IsFlat)
1594 MIB.addReg(FuncInfo->getScratchRSrcReg());
1595
1596 if (SOffset == AMDGPU::NoRegister) {
1597 if (!IsFlat) {
1598 if (UseVGPROffset && ScratchOffsetReg) {
1599 assert(!FuncInfo->isEntryFunction())(static_cast <bool> (!FuncInfo->isEntryFunction()) ?
void (0) : __assert_fail ("!FuncInfo->isEntryFunction()",
"llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 1599, __extension__
__PRETTY_FUNCTION__))
;
1600 MIB.addReg(ScratchOffsetReg);
1601 } else {
1602 assert(FuncInfo->isEntryFunction())(static_cast <bool> (FuncInfo->isEntryFunction()) ? void
(0) : __assert_fail ("FuncInfo->isEntryFunction()", "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 1602, __extension__ __PRETTY_FUNCTION__))
;
1603 MIB.addImm(0);
1604 }
1605 }
1606 } else {
1607 MIB.addReg(SOffset, SOffsetRegState);
1608 }
1609 MIB.addImm(Offset + RegOffset)
1610 .addImm(0); // cpol
1611 if (!IsFlat)
1612 MIB.addImm(0) // tfe
1613 .addImm(0); // swz
1614 MIB.addMemOperand(NewMMO);
1615
1616 if (!IsAGPR && NeedSuperRegDef)
1617 MIB.addReg(ValueReg, RegState::ImplicitDefine);
1618
1619 if (!IsStore && IsAGPR && TmpIntermediateVGPR != AMDGPU::NoRegister) {
1620 MIB = BuildMI(MBB, MI, DL, TII->get(AMDGPU::V_ACCVGPR_WRITE_B32_e64),
1621 FinalReg)
1622 .addReg(TmpIntermediateVGPR, RegState::Kill);
1623 MIB->setAsmPrinterFlag(MachineInstr::ReloadReuse);
1624 }
1625
1626 if (NeedSuperRegImpOperand)
1627 MIB.addReg(ValueReg, RegState::Implicit | SrcDstRegState);
1628 }
1629
1630 if (ScratchOffsetRegDelta != 0) {
1631 // Subtract the offset we added to the ScratchOffset register.
1632 BuildMI(MBB, MI, DL, TII->get(AMDGPU::S_ADD_I32), SOffset)
1633 .addReg(SOffset)
1634 .addImm(-ScratchOffsetRegDelta);
1635 }
1636}
1637
1638void SIRegisterInfo::buildVGPRSpillLoadStore(SGPRSpillBuilder &SB, int Index,
1639 int Offset, bool IsLoad,
1640 bool IsKill) const {
1641 // Load/store VGPR
1642 MachineFrameInfo &FrameInfo = SB.MF.getFrameInfo();
1643 assert(FrameInfo.getStackID(Index) != TargetStackID::SGPRSpill)(static_cast <bool> (FrameInfo.getStackID(Index) != TargetStackID
::SGPRSpill) ? void (0) : __assert_fail ("FrameInfo.getStackID(Index) != TargetStackID::SGPRSpill"
, "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 1643, __extension__
__PRETTY_FUNCTION__))
;
1644
1645 Register FrameReg =
1646 FrameInfo.isFixedObjectIndex(Index) && hasBasePointer(SB.MF)
1647 ? getBaseRegister()
1648 : getFrameRegister(SB.MF);
1649
1650 Align Alignment = FrameInfo.getObjectAlign(Index);
1651 MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(SB.MF, Index);
1652 MachineMemOperand *MMO = SB.MF.getMachineMemOperand(
1653 PtrInfo, IsLoad ? MachineMemOperand::MOLoad : MachineMemOperand::MOStore,
1654 SB.EltSize, Alignment);
1655
1656 if (IsLoad) {
1657 unsigned Opc = ST.enableFlatScratch() ? AMDGPU::SCRATCH_LOAD_DWORD_SADDR
1658 : AMDGPU::BUFFER_LOAD_DWORD_OFFSET;
1659 buildSpillLoadStore(*SB.MBB, SB.MI, SB.DL, Opc, Index, SB.TmpVGPR, false,
1660 FrameReg, Offset * SB.EltSize, MMO, SB.RS);
1661 } else {
1662 unsigned Opc = ST.enableFlatScratch() ? AMDGPU::SCRATCH_STORE_DWORD_SADDR
1663 : AMDGPU::BUFFER_STORE_DWORD_OFFSET;
1664 buildSpillLoadStore(*SB.MBB, SB.MI, SB.DL, Opc, Index, SB.TmpVGPR, IsKill,
1665 FrameReg, Offset * SB.EltSize, MMO, SB.RS);
1666 // This only ever adds one VGPR spill
1667 SB.MFI.addToSpilledVGPRs(1);
1668 }
1669}
1670
1671bool SIRegisterInfo::spillSGPR(MachineBasicBlock::iterator MI,
1672 int Index,
1673 RegScavenger *RS,
1674 LiveIntervals *LIS,
1675 bool OnlyToVGPR) const {
1676 SGPRSpillBuilder SB(*this, *ST.getInstrInfo(), isWave32, MI, Index, RS);
1677
1678 ArrayRef<SpilledReg> VGPRSpills = SB.MFI.getSGPRToVGPRSpills(Index);
1679 bool SpillToVGPR = !VGPRSpills.empty();
1680 if (OnlyToVGPR && !SpillToVGPR)
1681 return false;
1682
1683 assert(SpillToVGPR || (SB.SuperReg != SB.MFI.getStackPtrOffsetReg() &&(static_cast <bool> (SpillToVGPR || (SB.SuperReg != SB.
MFI.getStackPtrOffsetReg() && SB.SuperReg != SB.MFI.getFrameOffsetReg
())) ? void (0) : __assert_fail ("SpillToVGPR || (SB.SuperReg != SB.MFI.getStackPtrOffsetReg() && SB.SuperReg != SB.MFI.getFrameOffsetReg())"
, "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 1684, __extension__
__PRETTY_FUNCTION__))
1684 SB.SuperReg != SB.MFI.getFrameOffsetReg()))(static_cast <bool> (SpillToVGPR || (SB.SuperReg != SB.
MFI.getStackPtrOffsetReg() && SB.SuperReg != SB.MFI.getFrameOffsetReg
())) ? void (0) : __assert_fail ("SpillToVGPR || (SB.SuperReg != SB.MFI.getStackPtrOffsetReg() && SB.SuperReg != SB.MFI.getFrameOffsetReg())"
, "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 1684, __extension__
__PRETTY_FUNCTION__))
;
1685
1686 if (SpillToVGPR) {
1687
1688 assert(SB.NumSubRegs == VGPRSpills.size() &&(static_cast <bool> (SB.NumSubRegs == VGPRSpills.size()
&& "Num of VGPR lanes should be equal to num of SGPRs spilled"
) ? void (0) : __assert_fail ("SB.NumSubRegs == VGPRSpills.size() && \"Num of VGPR lanes should be equal to num of SGPRs spilled\""
, "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 1689, __extension__
__PRETTY_FUNCTION__))
1689 "Num of VGPR lanes should be equal to num of SGPRs spilled")(static_cast <bool> (SB.NumSubRegs == VGPRSpills.size()
&& "Num of VGPR lanes should be equal to num of SGPRs spilled"
) ? void (0) : __assert_fail ("SB.NumSubRegs == VGPRSpills.size() && \"Num of VGPR lanes should be equal to num of SGPRs spilled\""
, "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 1689, __extension__
__PRETTY_FUNCTION__))
;
1690
1691 for (unsigned i = 0, e = SB.NumSubRegs; i < e; ++i) {
1692 Register SubReg =
1693 SB.NumSubRegs == 1
1694 ? SB.SuperReg
1695 : Register(getSubReg(SB.SuperReg, SB.SplitParts[i]));
1696 SpilledReg Spill = VGPRSpills[i];
1697
1698 bool UseKill = SB.IsKill && i == SB.NumSubRegs - 1;
1699
1700 // Mark the "old value of vgpr" input undef only if this is the first sgpr
1701 // spill to this specific vgpr in the first basic block.
1702 auto MIB = BuildMI(*SB.MBB, MI, SB.DL,
1703 SB.TII.get(AMDGPU::V_WRITELANE_B32), Spill.VGPR)
1704 .addReg(SubReg, getKillRegState(UseKill))
1705 .addImm(Spill.Lane)
1706 .addReg(Spill.VGPR);
1707 if (LIS) {
1708 if (i == 0)
1709 LIS->ReplaceMachineInstrInMaps(*MI, *MIB);
1710 else
1711 LIS->InsertMachineInstrInMaps(*MIB);
1712 }
1713
1714 if (i == 0 && SB.NumSubRegs > 1) {
1715 // We may be spilling a super-register which is only partially defined,
1716 // and need to ensure later spills think the value is defined.
1717 MIB.addReg(SB.SuperReg, RegState::ImplicitDefine);
1718 }
1719
1720 if (SB.NumSubRegs > 1)
1721 MIB.addReg(SB.SuperReg, getKillRegState(UseKill) | RegState::Implicit);
1722
1723 // FIXME: Since this spills to another register instead of an actual
1724 // frame index, we should delete the frame index when all references to
1725 // it are fixed.
1726 }
1727 } else {
1728 SB.prepare();
1729
1730 // SubReg carries the "Kill" flag when SubReg == SB.SuperReg.
1731 unsigned SubKillState = getKillRegState((SB.NumSubRegs == 1) && SB.IsKill);
1732
1733 // Per VGPR helper data
1734 auto PVD = SB.getPerVGPRData();
1735
1736 for (unsigned Offset = 0; Offset < PVD.NumVGPRs; ++Offset) {
1737 unsigned TmpVGPRFlags = RegState::Undef;
1738
1739 // Write sub registers into the VGPR
1740 for (unsigned i = Offset * PVD.PerVGPR,
1741 e = std::min((Offset + 1) * PVD.PerVGPR, SB.NumSubRegs);
1742 i < e; ++i) {
1743 Register SubReg =
1744 SB.NumSubRegs == 1
1745 ? SB.SuperReg
1746 : Register(getSubReg(SB.SuperReg, SB.SplitParts[i]));
1747
1748 MachineInstrBuilder WriteLane =
1749 BuildMI(*SB.MBB, MI, SB.DL, SB.TII.get(AMDGPU::V_WRITELANE_B32),
1750 SB.TmpVGPR)
1751 .addReg(SubReg, SubKillState)
1752 .addImm(i % PVD.PerVGPR)
1753 .addReg(SB.TmpVGPR, TmpVGPRFlags);
1754 TmpVGPRFlags = 0;
1755
1756 if (LIS) {
1757 if (i == 0)
1758 LIS->ReplaceMachineInstrInMaps(*MI, *WriteLane);
1759 else
1760 LIS->InsertMachineInstrInMaps(*WriteLane);
1761 }
1762
1763 // There could be undef components of a spilled super register.
1764 // TODO: Can we detect this and skip the spill?
1765 if (SB.NumSubRegs > 1) {
1766 // The last implicit use of the SB.SuperReg carries the "Kill" flag.
1767 unsigned SuperKillState = 0;
1768 if (i + 1 == SB.NumSubRegs)
1769 SuperKillState |= getKillRegState(SB.IsKill);
1770 WriteLane.addReg(SB.SuperReg, RegState::Implicit | SuperKillState);
1771 }
1772 }
1773
1774 // Write out VGPR
1775 SB.readWriteTmpVGPR(Offset, /*IsLoad*/ false);
1776 }
1777
1778 SB.restore();
1779 }
1780
1781 MI->eraseFromParent();
1782 SB.MFI.addToSpilledSGPRs(SB.NumSubRegs);
1783
1784 if (LIS)
1785 LIS->removeAllRegUnitsForPhysReg(SB.SuperReg);
1786
1787 return true;
1788}
1789
1790bool SIRegisterInfo::restoreSGPR(MachineBasicBlock::iterator MI,
1791 int Index,
1792 RegScavenger *RS,
1793 LiveIntervals *LIS,
1794 bool OnlyToVGPR) const {
1795 SGPRSpillBuilder SB(*this, *ST.getInstrInfo(), isWave32, MI, Index, RS);
1796
1797 ArrayRef<SpilledReg> VGPRSpills = SB.MFI.getSGPRToVGPRSpills(Index);
1798 bool SpillToVGPR = !VGPRSpills.empty();
1799 if (OnlyToVGPR && !SpillToVGPR)
1800 return false;
1801
1802 if (SpillToVGPR) {
1803 for (unsigned i = 0, e = SB.NumSubRegs; i < e; ++i) {
1804 Register SubReg =
1805 SB.NumSubRegs == 1
1806 ? SB.SuperReg
1807 : Register(getSubReg(SB.SuperReg, SB.SplitParts[i]));
1808
1809 SpilledReg Spill = VGPRSpills[i];
1810 auto MIB = BuildMI(*SB.MBB, MI, SB.DL, SB.TII.get(AMDGPU::V_READLANE_B32),
1811 SubReg)
1812 .addReg(Spill.VGPR)
1813 .addImm(Spill.Lane);
1814 if (SB.NumSubRegs > 1 && i == 0)
1815 MIB.addReg(SB.SuperReg, RegState::ImplicitDefine);
1816 if (LIS) {
1817 if (i == e - 1)
1818 LIS->ReplaceMachineInstrInMaps(*MI, *MIB);
1819 else
1820 LIS->InsertMachineInstrInMaps(*MIB);
1821 }
1822
1823 }
1824 } else {
1825 SB.prepare();
1826
1827 // Per VGPR helper data
1828 auto PVD = SB.getPerVGPRData();
1829
1830 for (unsigned Offset = 0; Offset < PVD.NumVGPRs; ++Offset) {
1831 // Load in VGPR data
1832 SB.readWriteTmpVGPR(Offset, /*IsLoad*/ true);
1833
1834 // Unpack lanes
1835 for (unsigned i = Offset * PVD.PerVGPR,
1836 e = std::min((Offset + 1) * PVD.PerVGPR, SB.NumSubRegs);
1837 i < e; ++i) {
1838 Register SubReg =
1839 SB.NumSubRegs == 1
1840 ? SB.SuperReg
1841 : Register(getSubReg(SB.SuperReg, SB.SplitParts[i]));
1842
1843 bool LastSubReg = (i + 1 == e);
1844 auto MIB = BuildMI(*SB.MBB, MI, SB.DL,
1845 SB.TII.get(AMDGPU::V_READLANE_B32), SubReg)
1846 .addReg(SB.TmpVGPR, getKillRegState(LastSubReg))
1847 .addImm(i);
1848 if (SB.NumSubRegs > 1 && i == 0)
1849 MIB.addReg(SB.SuperReg, RegState::ImplicitDefine);
1850 if (LIS) {
1851 if (i == e - 1)
1852 LIS->ReplaceMachineInstrInMaps(*MI, *MIB);
1853 else
1854 LIS->InsertMachineInstrInMaps(*MIB);
1855 }
1856 }
1857 }
1858
1859 SB.restore();
1860 }
1861
1862 MI->eraseFromParent();
1863
1864 if (LIS)
1865 LIS->removeAllRegUnitsForPhysReg(SB.SuperReg);
1866
1867 return true;
1868}
1869
1870bool SIRegisterInfo::spillEmergencySGPR(MachineBasicBlock::iterator MI,
1871 MachineBasicBlock &RestoreMBB,
1872 Register SGPR, RegScavenger *RS) const {
1873 SGPRSpillBuilder SB(*this, *ST.getInstrInfo(), isWave32, MI, SGPR, false, 0,
1874 RS);
1875 SB.prepare();
1876 // Generate the spill of SGPR to SB.TmpVGPR.
1877 unsigned SubKillState = getKillRegState((SB.NumSubRegs == 1) && SB.IsKill);
1878 auto PVD = SB.getPerVGPRData();
1879 for (unsigned Offset = 0; Offset < PVD.NumVGPRs; ++Offset) {
1880 unsigned TmpVGPRFlags = RegState::Undef;
1881 // Write sub registers into the VGPR
1882 for (unsigned i = Offset * PVD.PerVGPR,
1883 e = std::min((Offset + 1) * PVD.PerVGPR, SB.NumSubRegs);
1884 i < e; ++i) {
1885 Register SubReg =
1886 SB.NumSubRegs == 1
1887 ? SB.SuperReg
1888 : Register(getSubReg(SB.SuperReg, SB.SplitParts[i]));
1889
1890 MachineInstrBuilder WriteLane =
1891 BuildMI(*SB.MBB, MI, SB.DL, SB.TII.get(AMDGPU::V_WRITELANE_B32),
1892 SB.TmpVGPR)
1893 .addReg(SubReg, SubKillState)
1894 .addImm(i % PVD.PerVGPR)
1895 .addReg(SB.TmpVGPR, TmpVGPRFlags);
1896 TmpVGPRFlags = 0;
1897 // There could be undef components of a spilled super register.
1898 // TODO: Can we detect this and skip the spill?
1899 if (SB.NumSubRegs > 1) {
1900 // The last implicit use of the SB.SuperReg carries the "Kill" flag.
1901 unsigned SuperKillState = 0;
1902 if (i + 1 == SB.NumSubRegs)
1903 SuperKillState |= getKillRegState(SB.IsKill);
1904 WriteLane.addReg(SB.SuperReg, RegState::Implicit | SuperKillState);
1905 }
1906 }
1907 // Don't need to write VGPR out.
1908 }
1909
1910 // Restore clobbered registers in the specified restore block.
1911 MI = RestoreMBB.end();
1912 SB.setMI(&RestoreMBB, MI);
1913 // Generate the restore of SGPR from SB.TmpVGPR.
1914 for (unsigned Offset = 0; Offset < PVD.NumVGPRs; ++Offset) {
1915 // Don't need to load VGPR in.
1916 // Unpack lanes
1917 for (unsigned i = Offset * PVD.PerVGPR,
1918 e = std::min((Offset + 1) * PVD.PerVGPR, SB.NumSubRegs);
1919 i < e; ++i) {
1920 Register SubReg =
1921 SB.NumSubRegs == 1
1922 ? SB.SuperReg
1923 : Register(getSubReg(SB.SuperReg, SB.SplitParts[i]));
1924 bool LastSubReg = (i + 1 == e);
1925 auto MIB = BuildMI(*SB.MBB, MI, SB.DL, SB.TII.get(AMDGPU::V_READLANE_B32),
1926 SubReg)
1927 .addReg(SB.TmpVGPR, getKillRegState(LastSubReg))
1928 .addImm(i);
1929 if (SB.NumSubRegs > 1 && i == 0)
1930 MIB.addReg(SB.SuperReg, RegState::ImplicitDefine);
1931 }
1932 }
1933 SB.restore();
1934
1935 SB.MFI.addToSpilledSGPRs(SB.NumSubRegs);
1936 return false;
1937}
1938
1939/// Special case of eliminateFrameIndex. Returns true if the SGPR was spilled to
1940/// a VGPR and the stack slot can be safely eliminated when all other users are
1941/// handled.
1942bool SIRegisterInfo::eliminateSGPRToVGPRSpillFrameIndex(
1943 MachineBasicBlock::iterator MI,
1944 int FI,
1945 RegScavenger *RS,
1946 LiveIntervals *LIS) const {
1947 switch (MI->getOpcode()) {
1948 case AMDGPU::SI_SPILL_S1024_SAVE:
1949 case AMDGPU::SI_SPILL_S512_SAVE:
1950 case AMDGPU::SI_SPILL_S256_SAVE:
1951 case AMDGPU::SI_SPILL_S224_SAVE:
1952 case AMDGPU::SI_SPILL_S192_SAVE:
1953 case AMDGPU::SI_SPILL_S160_SAVE:
1954 case AMDGPU::SI_SPILL_S128_SAVE:
1955 case AMDGPU::SI_SPILL_S96_SAVE:
1956 case AMDGPU::SI_SPILL_S64_SAVE:
1957 case AMDGPU::SI_SPILL_S32_SAVE:
1958 return spillSGPR(MI, FI, RS, LIS, true);
1959 case AMDGPU::SI_SPILL_S1024_RESTORE:
1960 case AMDGPU::SI_SPILL_S512_RESTORE:
1961 case AMDGPU::SI_SPILL_S256_RESTORE:
1962 case AMDGPU::SI_SPILL_S224_RESTORE:
1963 case AMDGPU::SI_SPILL_S192_RESTORE:
1964 case AMDGPU::SI_SPILL_S160_RESTORE:
1965 case AMDGPU::SI_SPILL_S128_RESTORE:
1966 case AMDGPU::SI_SPILL_S96_RESTORE:
1967 case AMDGPU::SI_SPILL_S64_RESTORE:
1968 case AMDGPU::SI_SPILL_S32_RESTORE:
1969 return restoreSGPR(MI, FI, RS, LIS, true);
1970 default:
1971 llvm_unreachable("not an SGPR spill instruction")::llvm::llvm_unreachable_internal("not an SGPR spill instruction"
, "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 1971)
;
1972 }
1973}
1974
1975void SIRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI,
1976 int SPAdj, unsigned FIOperandNum,
1977 RegScavenger *RS) const {
1978 MachineFunction *MF = MI->getParent()->getParent();
1979 MachineBasicBlock *MBB = MI->getParent();
1980 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
1981 MachineFrameInfo &FrameInfo = MF->getFrameInfo();
1982 const SIInstrInfo *TII = ST.getInstrInfo();
1983 DebugLoc DL = MI->getDebugLoc();
1984
1985 assert(SPAdj == 0 && "unhandled SP adjustment in call sequence?")(static_cast <bool> (SPAdj == 0 && "unhandled SP adjustment in call sequence?"
) ? void (0) : __assert_fail ("SPAdj == 0 && \"unhandled SP adjustment in call sequence?\""
, "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 1985, __extension__
__PRETTY_FUNCTION__))
;
1986
1987 MachineOperand &FIOp = MI->getOperand(FIOperandNum);
1988 int Index = MI->getOperand(FIOperandNum).getIndex();
1989
1990 Register FrameReg = FrameInfo.isFixedObjectIndex(Index) && hasBasePointer(*MF)
1991 ? getBaseRegister()
1992 : getFrameRegister(*MF);
1993
1994 switch (MI->getOpcode()) {
1995 // SGPR register spill
1996 case AMDGPU::SI_SPILL_S1024_SAVE:
1997 case AMDGPU::SI_SPILL_S512_SAVE:
1998 case AMDGPU::SI_SPILL_S256_SAVE:
1999 case AMDGPU::SI_SPILL_S224_SAVE:
2000 case AMDGPU::SI_SPILL_S192_SAVE:
2001 case AMDGPU::SI_SPILL_S160_SAVE:
2002 case AMDGPU::SI_SPILL_S128_SAVE:
2003 case AMDGPU::SI_SPILL_S96_SAVE:
2004 case AMDGPU::SI_SPILL_S64_SAVE:
2005 case AMDGPU::SI_SPILL_S32_SAVE: {
2006 spillSGPR(MI, Index, RS);
2007 break;
2008 }
2009
2010 // SGPR register restore
2011 case AMDGPU::SI_SPILL_S1024_RESTORE:
2012 case AMDGPU::SI_SPILL_S512_RESTORE:
2013 case AMDGPU::SI_SPILL_S256_RESTORE:
2014 case AMDGPU::SI_SPILL_S224_RESTORE:
2015 case AMDGPU::SI_SPILL_S192_RESTORE:
2016 case AMDGPU::SI_SPILL_S160_RESTORE:
2017 case AMDGPU::SI_SPILL_S128_RESTORE:
2018 case AMDGPU::SI_SPILL_S96_RESTORE:
2019 case AMDGPU::SI_SPILL_S64_RESTORE:
2020 case AMDGPU::SI_SPILL_S32_RESTORE: {
2021 restoreSGPR(MI, Index, RS);
2022 break;
2023 }
2024
2025 // VGPR register spill
2026 case AMDGPU::SI_SPILL_V1024_SAVE:
2027 case AMDGPU::SI_SPILL_V512_SAVE:
2028 case AMDGPU::SI_SPILL_V256_SAVE:
2029 case AMDGPU::SI_SPILL_V224_SAVE:
2030 case AMDGPU::SI_SPILL_V192_SAVE:
2031 case AMDGPU::SI_SPILL_V160_SAVE:
2032 case AMDGPU::SI_SPILL_V128_SAVE:
2033 case AMDGPU::SI_SPILL_V96_SAVE:
2034 case AMDGPU::SI_SPILL_V64_SAVE:
2035 case AMDGPU::SI_SPILL_V32_SAVE:
2036 case AMDGPU::SI_SPILL_A1024_SAVE:
2037 case AMDGPU::SI_SPILL_A512_SAVE:
2038 case AMDGPU::SI_SPILL_A256_SAVE:
2039 case AMDGPU::SI_SPILL_A224_SAVE:
2040 case AMDGPU::SI_SPILL_A192_SAVE:
2041 case AMDGPU::SI_SPILL_A160_SAVE:
2042 case AMDGPU::SI_SPILL_A128_SAVE:
2043 case AMDGPU::SI_SPILL_A96_SAVE:
2044 case AMDGPU::SI_SPILL_A64_SAVE:
2045 case AMDGPU::SI_SPILL_A32_SAVE:
2046 case AMDGPU::SI_SPILL_AV1024_SAVE:
2047 case AMDGPU::SI_SPILL_AV512_SAVE:
2048 case AMDGPU::SI_SPILL_AV256_SAVE:
2049 case AMDGPU::SI_SPILL_AV224_SAVE:
2050 case AMDGPU::SI_SPILL_AV192_SAVE:
2051 case AMDGPU::SI_SPILL_AV160_SAVE:
2052 case AMDGPU::SI_SPILL_AV128_SAVE:
2053 case AMDGPU::SI_SPILL_AV96_SAVE:
2054 case AMDGPU::SI_SPILL_AV64_SAVE:
2055 case AMDGPU::SI_SPILL_AV32_SAVE: {
2056 const MachineOperand *VData = TII->getNamedOperand(*MI,
2057 AMDGPU::OpName::vdata);
2058 assert(TII->getNamedOperand(*MI, AMDGPU::OpName::soffset)->getReg() ==(static_cast <bool> (TII->getNamedOperand(*MI, AMDGPU
::OpName::soffset)->getReg() == MFI->getStackPtrOffsetReg
()) ? void (0) : __assert_fail ("TII->getNamedOperand(*MI, AMDGPU::OpName::soffset)->getReg() == MFI->getStackPtrOffsetReg()"
, "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 2059, __extension__
__PRETTY_FUNCTION__))
2059 MFI->getStackPtrOffsetReg())(static_cast <bool> (TII->getNamedOperand(*MI, AMDGPU
::OpName::soffset)->getReg() == MFI->getStackPtrOffsetReg
()) ? void (0) : __assert_fail ("TII->getNamedOperand(*MI, AMDGPU::OpName::soffset)->getReg() == MFI->getStackPtrOffsetReg()"
, "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 2059, __extension__
__PRETTY_FUNCTION__))
;
2060
2061 unsigned Opc = ST.enableFlatScratch() ? AMDGPU::SCRATCH_STORE_DWORD_SADDR
2062 : AMDGPU::BUFFER_STORE_DWORD_OFFSET;
2063 auto *MBB = MI->getParent();
2064 buildSpillLoadStore(
2065 *MBB, MI, DL, Opc, Index, VData->getReg(), VData->isKill(), FrameReg,
2066 TII->getNamedOperand(*MI, AMDGPU::OpName::offset)->getImm(),
2067 *MI->memoperands_begin(), RS);
2068 MFI->addToSpilledVGPRs(getNumSubRegsForSpillOp(MI->getOpcode()));
2069 MI->eraseFromParent();
2070 break;
2071 }
2072 case AMDGPU::SI_SPILL_V32_RESTORE:
2073 case AMDGPU::SI_SPILL_V64_RESTORE:
2074 case AMDGPU::SI_SPILL_V96_RESTORE:
2075 case AMDGPU::SI_SPILL_V128_RESTORE:
2076 case AMDGPU::SI_SPILL_V160_RESTORE:
2077 case AMDGPU::SI_SPILL_V192_RESTORE:
2078 case AMDGPU::SI_SPILL_V224_RESTORE:
2079 case AMDGPU::SI_SPILL_V256_RESTORE:
2080 case AMDGPU::SI_SPILL_V512_RESTORE:
2081 case AMDGPU::SI_SPILL_V1024_RESTORE:
2082 case AMDGPU::SI_SPILL_A32_RESTORE:
2083 case AMDGPU::SI_SPILL_A64_RESTORE:
2084 case AMDGPU::SI_SPILL_A96_RESTORE:
2085 case AMDGPU::SI_SPILL_A128_RESTORE:
2086 case AMDGPU::SI_SPILL_A160_RESTORE:
2087 case AMDGPU::SI_SPILL_A192_RESTORE:
2088 case AMDGPU::SI_SPILL_A224_RESTORE:
2089 case AMDGPU::SI_SPILL_A256_RESTORE:
2090 case AMDGPU::SI_SPILL_A512_RESTORE:
2091 case AMDGPU::SI_SPILL_A1024_RESTORE:
2092 case AMDGPU::SI_SPILL_AV32_RESTORE:
2093 case AMDGPU::SI_SPILL_AV64_RESTORE:
2094 case AMDGPU::SI_SPILL_AV96_RESTORE:
2095 case AMDGPU::SI_SPILL_AV128_RESTORE:
2096 case AMDGPU::SI_SPILL_AV160_RESTORE:
2097 case AMDGPU::SI_SPILL_AV192_RESTORE:
2098 case AMDGPU::SI_SPILL_AV224_RESTORE:
2099 case AMDGPU::SI_SPILL_AV256_RESTORE:
2100 case AMDGPU::SI_SPILL_AV512_RESTORE:
2101 case AMDGPU::SI_SPILL_AV1024_RESTORE: {
2102 const MachineOperand *VData = TII->getNamedOperand(*MI,
2103 AMDGPU::OpName::vdata);
2104 assert(TII->getNamedOperand(*MI, AMDGPU::OpName::soffset)->getReg() ==(static_cast <bool> (TII->getNamedOperand(*MI, AMDGPU
::OpName::soffset)->getReg() == MFI->getStackPtrOffsetReg
()) ? void (0) : __assert_fail ("TII->getNamedOperand(*MI, AMDGPU::OpName::soffset)->getReg() == MFI->getStackPtrOffsetReg()"
, "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 2105, __extension__
__PRETTY_FUNCTION__))
2105 MFI->getStackPtrOffsetReg())(static_cast <bool> (TII->getNamedOperand(*MI, AMDGPU
::OpName::soffset)->getReg() == MFI->getStackPtrOffsetReg
()) ? void (0) : __assert_fail ("TII->getNamedOperand(*MI, AMDGPU::OpName::soffset)->getReg() == MFI->getStackPtrOffsetReg()"
, "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 2105, __extension__
__PRETTY_FUNCTION__))
;
2106
2107 unsigned Opc = ST.enableFlatScratch() ? AMDGPU::SCRATCH_LOAD_DWORD_SADDR
2108 : AMDGPU::BUFFER_LOAD_DWORD_OFFSET;
2109 auto *MBB = MI->getParent();
2110 buildSpillLoadStore(
2111 *MBB, MI, DL, Opc, Index, VData->getReg(), VData->isKill(), FrameReg,
2112 TII->getNamedOperand(*MI, AMDGPU::OpName::offset)->getImm(),
2113 *MI->memoperands_begin(), RS);
2114 MI->eraseFromParent();
2115 break;
2116 }
2117
2118 default: {
2119 // Other access to frame index
2120 const DebugLoc &DL = MI->getDebugLoc();
2121
2122 int64_t Offset = FrameInfo.getObjectOffset(Index);
2123 if (ST.enableFlatScratch()) {
2124 if (TII->isFLATScratch(*MI)) {
2125 assert((int16_t)FIOperandNum ==(static_cast <bool> ((int16_t)FIOperandNum == AMDGPU::getNamedOperandIdx
(MI->getOpcode(), AMDGPU::OpName::saddr)) ? void (0) : __assert_fail
("(int16_t)FIOperandNum == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::saddr)"
, "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 2127, __extension__
__PRETTY_FUNCTION__))
2126 AMDGPU::getNamedOperandIdx(MI->getOpcode(),(static_cast <bool> ((int16_t)FIOperandNum == AMDGPU::getNamedOperandIdx
(MI->getOpcode(), AMDGPU::OpName::saddr)) ? void (0) : __assert_fail
("(int16_t)FIOperandNum == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::saddr)"
, "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 2127, __extension__
__PRETTY_FUNCTION__))
2127 AMDGPU::OpName::saddr))(static_cast <bool> ((int16_t)FIOperandNum == AMDGPU::getNamedOperandIdx
(MI->getOpcode(), AMDGPU::OpName::saddr)) ? void (0) : __assert_fail
("(int16_t)FIOperandNum == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::saddr)"
, "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 2127, __extension__
__PRETTY_FUNCTION__))
;
2128
2129 // The offset is always swizzled, just replace it
2130 if (FrameReg)
2131 FIOp.ChangeToRegister(FrameReg, false);
2132
2133 if (!Offset)
2134 return;
2135
2136 MachineOperand *OffsetOp =
2137 TII->getNamedOperand(*MI, AMDGPU::OpName::offset);
2138 int64_t NewOffset = Offset + OffsetOp->getImm();
2139 if (TII->isLegalFLATOffset(NewOffset, AMDGPUAS::PRIVATE_ADDRESS,
2140 SIInstrFlags::FlatScratch)) {
2141 OffsetOp->setImm(NewOffset);
2142 if (FrameReg)
2143 return;
2144 Offset = 0;
2145 }
2146
2147 if (!Offset) {
2148 unsigned Opc = MI->getOpcode();
2149 int NewOpc = -1;
2150 if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vaddr) != -1) {
2151 NewOpc = AMDGPU::getFlatScratchInstSVfromSVS(Opc);
2152 } else if (ST.hasFlatScratchSTMode()) {
2153 // On GFX10 we have ST mode to use no registers for an address.
2154 // Otherwise we need to materialize 0 into an SGPR.
2155 NewOpc = AMDGPU::getFlatScratchInstSTfromSS(Opc);
2156 }
2157
2158 if (NewOpc != -1) {
2159 // removeOperand doesn't fixup tied operand indexes as it goes, so
2160 // it asserts. Untie vdst_in for now and retie them afterwards.
2161 int VDstIn = AMDGPU::getNamedOperandIdx(Opc,
2162 AMDGPU::OpName::vdst_in);
2163 bool TiedVDst = VDstIn != -1 &&
2164 MI->getOperand(VDstIn).isReg() &&
2165 MI->getOperand(VDstIn).isTied();
2166 if (TiedVDst)
2167 MI->untieRegOperand(VDstIn);
2168
2169 MI->removeOperand(
2170 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::saddr));
2171
2172 if (TiedVDst) {
2173 int NewVDst =
2174 AMDGPU::getNamedOperandIdx(NewOpc, AMDGPU::OpName::vdst);
2175 int NewVDstIn =
2176 AMDGPU::getNamedOperandIdx(NewOpc, AMDGPU::OpName::vdst_in);
2177 assert (NewVDst != -1 && NewVDstIn != -1 && "Must be tied!")(static_cast <bool> (NewVDst != -1 && NewVDstIn
!= -1 && "Must be tied!") ? void (0) : __assert_fail
("NewVDst != -1 && NewVDstIn != -1 && \"Must be tied!\""
, "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 2177, __extension__
__PRETTY_FUNCTION__))
;
2178 MI->tieOperands(NewVDst, NewVDstIn);
2179 }
2180 MI->setDesc(TII->get(NewOpc));
2181 return;
2182 }
2183 }
2184 }
2185
2186 if (!FrameReg) {
2187 FIOp.ChangeToImmediate(Offset);
2188 if (TII->isImmOperandLegal(*MI, FIOperandNum, FIOp))
2189 return;
2190 }
2191
2192 // We need to use register here. Check if we can use an SGPR or need
2193 // a VGPR.
2194 FIOp.ChangeToRegister(AMDGPU::M0, false);
2195 bool UseSGPR = TII->isOperandLegal(*MI, FIOperandNum, &FIOp);
2196
2197 if (!Offset && FrameReg && UseSGPR) {
2198 FIOp.setReg(FrameReg);
2199 return;
2200 }
2201
2202 const TargetRegisterClass *RC = UseSGPR ? &AMDGPU::SReg_32_XM0RegClass
2203 : &AMDGPU::VGPR_32RegClass;
2204
2205 Register TmpReg = RS->scavengeRegister(RC, MI, 0, !UseSGPR);
2206 FIOp.setReg(TmpReg);
2207 FIOp.setIsKill(true);
2208
2209 if ((!FrameReg || !Offset) && TmpReg) {
2210 unsigned Opc = UseSGPR ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
2211 auto MIB = BuildMI(*MBB, MI, DL, TII->get(Opc), TmpReg);
2212 if (FrameReg)
2213 MIB.addReg(FrameReg);
2214 else
2215 MIB.addImm(Offset);
2216
2217 return;
2218 }
2219
2220 Register TmpSReg =
2221 UseSGPR ? TmpReg
2222 : RS->scavengeRegister(&AMDGPU::SReg_32_XM0RegClass, MI, 0,
2223 !UseSGPR);
2224
2225 // TODO: for flat scratch another attempt can be made with a VGPR index
2226 // if no SGPRs can be scavenged.
2227 if ((!TmpSReg && !FrameReg) || (!TmpReg && !UseSGPR))
2228 report_fatal_error("Cannot scavenge register in FI elimination!");
2229
2230 if (!TmpSReg) {
2231 // Use frame register and restore it after.
2232 TmpSReg = FrameReg;
2233 FIOp.setReg(FrameReg);
2234 FIOp.setIsKill(false);
2235 }
2236
2237 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_ADD_I32), TmpSReg)
2238 .addReg(FrameReg)
2239 .addImm(Offset);
2240
2241 if (!UseSGPR)
2242 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::V_MOV_B32_e32), TmpReg)
2243 .addReg(TmpSReg, RegState::Kill);
2244
2245 if (TmpSReg == FrameReg) {
2246 // Undo frame register modification.
2247 BuildMI(*MBB, std::next(MI), DL, TII->get(AMDGPU::S_ADD_I32),
2248 FrameReg)
2249 .addReg(FrameReg)
2250 .addImm(-Offset);
2251 }
2252
2253 return;
2254 }
2255
2256 bool IsMUBUF = TII->isMUBUF(*MI);
2257
2258 if (!IsMUBUF && !MFI->isEntryFunction()) {
2259 // Convert to a swizzled stack address by scaling by the wave size.
2260 // In an entry function/kernel the offset is already swizzled.
2261 bool IsSALU = isSGPRClass(TII->getOpRegClass(*MI, FIOperandNum));
2262 bool LiveSCC = RS->isRegUsed(AMDGPU::SCC);
2263 const TargetRegisterClass *RC = IsSALU && !LiveSCC
2264 ? &AMDGPU::SReg_32RegClass
2265 : &AMDGPU::VGPR_32RegClass;
2266 bool IsCopy = MI->getOpcode() == AMDGPU::V_MOV_B32_e32 ||
2267 MI->getOpcode() == AMDGPU::V_MOV_B32_e64;
2268 Register ResultReg = IsCopy ? MI->getOperand(0).getReg()
2269 : RS->scavengeRegister(RC, MI, 0);
2270
2271 int64_t Offset = FrameInfo.getObjectOffset(Index);
2272 if (Offset == 0) {
2273 unsigned OpCode = IsSALU && !LiveSCC ? AMDGPU::S_LSHR_B32
2274 : AMDGPU::V_LSHRREV_B32_e64;
2275 // XXX - This never happens because of emergency scavenging slot at 0?
2276 auto Shift = BuildMI(*MBB, MI, DL, TII->get(OpCode), ResultReg)
2277 .addImm(ST.getWavefrontSizeLog2())
2278 .addReg(FrameReg);
2279 if (IsSALU && !LiveSCC)
2280 Shift.getInstr()->getOperand(3).setIsDead(
2281 true); // Mark SCC as dead.
2282 if (IsSALU && LiveSCC) {
2283 Register NewDest =
2284 RS->scavengeRegister(&AMDGPU::SReg_32RegClass, Shift, 0);
2285 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32),
2286 NewDest)
2287 .addReg(ResultReg);
2288 ResultReg = NewDest;
2289 }
2290 } else {
2291 MachineInstrBuilder MIB;
2292 if (!IsSALU) {
2293 if ((MIB = TII->getAddNoCarry(*MBB, MI, DL, ResultReg, *RS)) !=
2294 nullptr) {
2295 // Reuse ResultReg in intermediate step.
2296 Register ScaledReg = ResultReg;
2297
2298 BuildMI(*MBB, *MIB, DL, TII->get(AMDGPU::V_LSHRREV_B32_e64),
2299 ScaledReg)
2300 .addImm(ST.getWavefrontSizeLog2())
2301 .addReg(FrameReg);
2302
2303 const bool IsVOP2 = MIB->getOpcode() == AMDGPU::V_ADD_U32_e32;
2304
2305 // TODO: Fold if use instruction is another add of a constant.
2306 if (IsVOP2 || AMDGPU::isInlinableLiteral32(Offset, ST.hasInv2PiInlineImm())) {
2307 // FIXME: This can fail
2308 MIB.addImm(Offset);
2309 MIB.addReg(ScaledReg, RegState::Kill);
2310 if (!IsVOP2)
2311 MIB.addImm(0); // clamp bit
2312 } else {
2313 assert(MIB->getOpcode() == AMDGPU::V_ADD_CO_U32_e64 &&(static_cast <bool> (MIB->getOpcode() == AMDGPU::V_ADD_CO_U32_e64
&& "Need to reuse carry out register") ? void (0) : __assert_fail
("MIB->getOpcode() == AMDGPU::V_ADD_CO_U32_e64 && \"Need to reuse carry out register\""
, "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 2314, __extension__
__PRETTY_FUNCTION__))
2314 "Need to reuse carry out register")(static_cast <bool> (MIB->getOpcode() == AMDGPU::V_ADD_CO_U32_e64
&& "Need to reuse carry out register") ? void (0) : __assert_fail
("MIB->getOpcode() == AMDGPU::V_ADD_CO_U32_e64 && \"Need to reuse carry out register\""
, "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 2314, __extension__
__PRETTY_FUNCTION__))
;
2315
2316 // Use scavenged unused carry out as offset register.
2317 Register ConstOffsetReg;
2318 if (!isWave32)
2319 ConstOffsetReg = getSubReg(MIB.getReg(1), AMDGPU::sub0);
2320 else
2321 ConstOffsetReg = MIB.getReg(1);
2322
2323 BuildMI(*MBB, *MIB, DL, TII->get(AMDGPU::S_MOV_B32), ConstOffsetReg)
2324 .addImm(Offset);
2325 MIB.addReg(ConstOffsetReg, RegState::Kill);
2326 MIB.addReg(ScaledReg, RegState::Kill);
2327 MIB.addImm(0); // clamp bit
2328 }
2329 }
2330 }
2331 if (!MIB || IsSALU) {
2332 // We have to produce a carry out, and there isn't a free SGPR pair
2333 // for it. We can keep the whole computation on the SALU to avoid
2334 // clobbering an additional register at the cost of an extra mov.
2335
2336 // We may have 1 free scratch SGPR even though a carry out is
2337 // unavailable. Only one additional mov is needed.
2338 Register TmpScaledReg =
2339 RS->scavengeRegister(&AMDGPU::SReg_32_XM0RegClass, MI, 0, false);
2340 Register ScaledReg = TmpScaledReg.isValid() ? TmpScaledReg : FrameReg;
2341
2342 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_LSHR_B32), ScaledReg)
2343 .addReg(FrameReg)
2344 .addImm(ST.getWavefrontSizeLog2());
2345 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_ADD_I32), ScaledReg)
2346 .addReg(ScaledReg, RegState::Kill)
2347 .addImm(Offset);
2348 if (!IsSALU)
2349 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::COPY), ResultReg)
2350 .addReg(ScaledReg, RegState::Kill);
2351 else
2352 ResultReg = ScaledReg;
2353
2354 // If there were truly no free SGPRs, we need to undo everything.
2355 if (!TmpScaledReg.isValid()) {
2356 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_ADD_I32), ScaledReg)
2357 .addReg(ScaledReg, RegState::Kill)
2358 .addImm(-Offset);
2359 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_LSHL_B32), ScaledReg)
2360 .addReg(FrameReg)
2361 .addImm(ST.getWavefrontSizeLog2());
2362 }
2363 }
2364 }
2365
2366 // Don't introduce an extra copy if we're just materializing in a mov.
2367 if (IsCopy)
2368 MI->eraseFromParent();
2369 else
2370 FIOp.ChangeToRegister(ResultReg, false, false, true);
2371 return;
2372 }
2373
2374 if (IsMUBUF) {
2375 // Disable offen so we don't need a 0 vgpr base.
2376 assert(static_cast<int>(FIOperandNum) ==(static_cast <bool> (static_cast<int>(FIOperandNum
) == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName
::vaddr)) ? void (0) : __assert_fail ("static_cast<int>(FIOperandNum) == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::vaddr)"
, "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 2378, __extension__
__PRETTY_FUNCTION__))
2377 AMDGPU::getNamedOperandIdx(MI->getOpcode(),(static_cast <bool> (static_cast<int>(FIOperandNum
) == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName
::vaddr)) ? void (0) : __assert_fail ("static_cast<int>(FIOperandNum) == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::vaddr)"
, "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 2378, __extension__
__PRETTY_FUNCTION__))
2378 AMDGPU::OpName::vaddr))(static_cast <bool> (static_cast<int>(FIOperandNum
) == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName
::vaddr)) ? void (0) : __assert_fail ("static_cast<int>(FIOperandNum) == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::vaddr)"
, "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 2378, __extension__
__PRETTY_FUNCTION__))
;
2379
2380 auto &SOffset = *TII->getNamedOperand(*MI, AMDGPU::OpName::soffset);
2381 assert((SOffset.isImm() && SOffset.getImm() == 0))(static_cast <bool> ((SOffset.isImm() && SOffset
.getImm() == 0)) ? void (0) : __assert_fail ("(SOffset.isImm() && SOffset.getImm() == 0)"
, "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 2381, __extension__
__PRETTY_FUNCTION__))
;
2382
2383 if (FrameReg != AMDGPU::NoRegister)
2384 SOffset.ChangeToRegister(FrameReg, false);
2385
2386 int64_t Offset = FrameInfo.getObjectOffset(Index);
2387 int64_t OldImm
2388 = TII->getNamedOperand(*MI, AMDGPU::OpName::offset)->getImm();
2389 int64_t NewOffset = OldImm + Offset;
2390
2391 if (SIInstrInfo::isLegalMUBUFImmOffset(NewOffset) &&
2392 buildMUBUFOffsetLoadStore(ST, FrameInfo, MI, Index, NewOffset)) {
2393 MI->eraseFromParent();
2394 return;
2395 }
2396 }
2397
2398 // If the offset is simply too big, don't convert to a scratch wave offset
2399 // relative index.
2400
2401 FIOp.ChangeToImmediate(Offset);
2402 if (!TII->isImmOperandLegal(*MI, FIOperandNum, FIOp)) {
2403 Register TmpReg = RS->scavengeRegister(&AMDGPU::VGPR_32RegClass, MI, 0);
2404 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::V_MOV_B32_e32), TmpReg)
2405 .addImm(Offset);
2406 FIOp.ChangeToRegister(TmpReg, false, false, true);
2407 }
2408 }
2409 }
2410}
2411
2412StringRef SIRegisterInfo::getRegAsmName(MCRegister Reg) const {
2413 return AMDGPUInstPrinter::getRegisterName(Reg);
2414}
2415
2416static const TargetRegisterClass *
2417getAnyVGPRClassForBitWidth(unsigned BitWidth) {
2418 if (BitWidth <= 64)
2419 return &AMDGPU::VReg_64RegClass;
2420 if (BitWidth <= 96)
2421 return &AMDGPU::VReg_96RegClass;
2422 if (BitWidth <= 128)
2423 return &AMDGPU::VReg_128RegClass;
2424 if (BitWidth <= 160)
2425 return &AMDGPU::VReg_160RegClass;
2426 if (BitWidth <= 192)
2427 return &AMDGPU::VReg_192RegClass;
2428 if (BitWidth <= 224)
2429 return &AMDGPU::VReg_224RegClass;
2430 if (BitWidth <= 256)
2431 return &AMDGPU::VReg_256RegClass;
2432 if (BitWidth <= 512)
2433 return &AMDGPU::VReg_512RegClass;
2434 if (BitWidth <= 1024)
2435 return &AMDGPU::VReg_1024RegClass;
2436
2437 return nullptr;
2438}
2439
2440static const TargetRegisterClass *
2441getAlignedVGPRClassForBitWidth(unsigned BitWidth) {
2442 if (BitWidth <= 64)
2443 return &AMDGPU::VReg_64_Align2RegClass;
2444 if (BitWidth <= 96)
2445 return &AMDGPU::VReg_96_Align2RegClass;
2446 if (BitWidth <= 128)
2447 return &AMDGPU::VReg_128_Align2RegClass;
2448 if (BitWidth <= 160)
2449 return &AMDGPU::VReg_160_Align2RegClass;
2450 if (BitWidth <= 192)
2451 return &AMDGPU::VReg_192_Align2RegClass;
2452 if (BitWidth <= 224)
2453 return &AMDGPU::VReg_224_Align2RegClass;
2454 if (BitWidth <= 256)
2455 return &AMDGPU::VReg_256_Align2RegClass;
2456 if (BitWidth <= 512)
2457 return &AMDGPU::VReg_512_Align2RegClass;
2458 if (BitWidth <= 1024)
2459 return &AMDGPU::VReg_1024_Align2RegClass;
2460
2461 return nullptr;
2462}
2463
2464const TargetRegisterClass *
2465SIRegisterInfo::getVGPRClassForBitWidth(unsigned BitWidth) const {
2466 if (BitWidth == 1)
2467 return &AMDGPU::VReg_1RegClass;
2468 if (BitWidth <= 16)
2469 return &AMDGPU::VGPR_LO16RegClass;
2470 if (BitWidth <= 32)
2471 return &AMDGPU::VGPR_32RegClass;
2472 return ST.needsAlignedVGPRs() ? getAlignedVGPRClassForBitWidth(BitWidth)
2473 : getAnyVGPRClassForBitWidth(BitWidth);
2474}
2475
2476static const TargetRegisterClass *
2477getAnyAGPRClassForBitWidth(unsigned BitWidth) {
2478 if (BitWidth <= 64)
2479 return &AMDGPU::AReg_64RegClass;
2480 if (BitWidth <= 96)
2481 return &AMDGPU::AReg_96RegClass;
2482 if (BitWidth <= 128)
2483 return &AMDGPU::AReg_128RegClass;
2484 if (BitWidth <= 160)
2485 return &AMDGPU::AReg_160RegClass;
2486 if (BitWidth <= 192)
2487 return &AMDGPU::AReg_192RegClass;
2488 if (BitWidth <= 224)
2489 return &AMDGPU::AReg_224RegClass;
2490 if (BitWidth <= 256)
2491 return &AMDGPU::AReg_256RegClass;
2492 if (BitWidth <= 512)
2493 return &AMDGPU::AReg_512RegClass;
2494 if (BitWidth <= 1024)
2495 return &AMDGPU::AReg_1024RegClass;
2496
2497 return nullptr;
2498}
2499
2500static const TargetRegisterClass *
2501getAlignedAGPRClassForBitWidth(unsigned BitWidth) {
2502 if (BitWidth <= 64)
2503 return &AMDGPU::AReg_64_Align2RegClass;
2504 if (BitWidth <= 96)
2505 return &AMDGPU::AReg_96_Align2RegClass;
2506 if (BitWidth <= 128)
2507 return &AMDGPU::AReg_128_Align2RegClass;
2508 if (BitWidth <= 160)
2509 return &AMDGPU::AReg_160_Align2RegClass;
2510 if (BitWidth <= 192)
2511 return &AMDGPU::AReg_192_Align2RegClass;
2512 if (BitWidth <= 224)
2513 return &AMDGPU::AReg_224_Align2RegClass;
2514 if (BitWidth <= 256)
2515 return &AMDGPU::AReg_256_Align2RegClass;
2516 if (BitWidth <= 512)
2517 return &AMDGPU::AReg_512_Align2RegClass;
2518 if (BitWidth <= 1024)
2519 return &AMDGPU::AReg_1024_Align2RegClass;
2520
2521 return nullptr;
2522}
2523
2524const TargetRegisterClass *
2525SIRegisterInfo::getAGPRClassForBitWidth(unsigned BitWidth) const {
2526 if (BitWidth <= 16)
2527 return &AMDGPU::AGPR_LO16RegClass;
2528 if (BitWidth <= 32)
2529 return &AMDGPU::AGPR_32RegClass;
2530 return ST.needsAlignedVGPRs() ? getAlignedAGPRClassForBitWidth(BitWidth)
2531 : getAnyAGPRClassForBitWidth(BitWidth);
2532}
2533
2534static const TargetRegisterClass *
2535getAnyVectorSuperClassForBitWidth(unsigned BitWidth) {
2536 if (BitWidth <= 64)
2537 return &AMDGPU::AV_64RegClass;
2538 if (BitWidth <= 96)
2539 return &AMDGPU::AV_96RegClass;
2540 if (BitWidth <= 128)
2541 return &AMDGPU::AV_128RegClass;
2542 if (BitWidth <= 160)
2543 return &AMDGPU::AV_160RegClass;
2544 if (BitWidth <= 192)
2545 return &AMDGPU::AV_192RegClass;
2546 if (BitWidth <= 224)
2547 return &AMDGPU::AV_224RegClass;
2548 if (BitWidth <= 256)
2549 return &AMDGPU::AV_256RegClass;
2550 if (BitWidth <= 512)
2551 return &AMDGPU::AV_512RegClass;
2552 if (BitWidth <= 1024)
2553 return &AMDGPU::AV_1024RegClass;
2554
2555 return nullptr;
2556}
2557
2558static const TargetRegisterClass *
2559getAlignedVectorSuperClassForBitWidth(unsigned BitWidth) {
2560 if (BitWidth <= 64)
2561 return &AMDGPU::AV_64_Align2RegClass;
2562 if (BitWidth <= 96)
2563 return &AMDGPU::AV_96_Align2RegClass;
2564 if (BitWidth <= 128)
2565 return &AMDGPU::AV_128_Align2RegClass;
2566 if (BitWidth <= 160)
2567 return &AMDGPU::AV_160_Align2RegClass;
2568 if (BitWidth <= 192)
2569 return &AMDGPU::AV_192_Align2RegClass;
2570 if (BitWidth <= 224)
2571 return &AMDGPU::AV_224_Align2RegClass;
2572 if (BitWidth <= 256)
2573 return &AMDGPU::AV_256_Align2RegClass;
2574 if (BitWidth <= 512)
2575 return &AMDGPU::AV_512_Align2RegClass;
2576 if (BitWidth <= 1024)
2577 return &AMDGPU::AV_1024_Align2RegClass;
2578
2579 return nullptr;
2580}
2581
2582const TargetRegisterClass *
2583SIRegisterInfo::getVectorSuperClassForBitWidth(unsigned BitWidth) const {
2584 if (BitWidth <= 16)
2585 return &AMDGPU::VGPR_LO16RegClass;
2586 if (BitWidth <= 32)
2587 return &AMDGPU::AV_32RegClass;
2588 return ST.needsAlignedVGPRs()
2589 ? getAlignedVectorSuperClassForBitWidth(BitWidth)
2590 : getAnyVectorSuperClassForBitWidth(BitWidth);
2591}
2592
2593const TargetRegisterClass *
2594SIRegisterInfo::getSGPRClassForBitWidth(unsigned BitWidth) {
2595 if (BitWidth <= 16)
2596 return &AMDGPU::SGPR_LO16RegClass;
2597 if (BitWidth <= 32)
2598 return &AMDGPU::SReg_32RegClass;
2599 if (BitWidth <= 64)
2600 return &AMDGPU::SReg_64RegClass;
2601 if (BitWidth <= 96)
2602 return &AMDGPU::SGPR_96RegClass;
2603 if (BitWidth <= 128)
2604 return &AMDGPU::SGPR_128RegClass;
2605 if (BitWidth <= 160)
2606 return &AMDGPU::SGPR_160RegClass;
2607 if (BitWidth <= 192)
2608 return &AMDGPU::SGPR_192RegClass;
2609 if (BitWidth <= 224)
2610 return &AMDGPU::SGPR_224RegClass;
2611 if (BitWidth <= 256)
2612 return &AMDGPU::SGPR_256RegClass;
2613 if (BitWidth <= 512)
2614 return &AMDGPU::SGPR_512RegClass;
2615 if (BitWidth <= 1024)
2616 return &AMDGPU::SGPR_1024RegClass;
2617
2618 return nullptr;
2619}
2620
2621// FIXME: This is very slow. It might be worth creating a map from physreg to
2622// register class.
2623const TargetRegisterClass *
2624SIRegisterInfo::getPhysRegClass(MCRegister Reg) const {
2625 static const TargetRegisterClass *const BaseClasses[] = {
2626 &AMDGPU::VGPR_LO16RegClass,
2627 &AMDGPU::VGPR_HI16RegClass,
2628 &AMDGPU::SReg_LO16RegClass,
2629 &AMDGPU::AGPR_LO16RegClass,
2630 &AMDGPU::VGPR_32RegClass,
2631 &AMDGPU::SReg_32RegClass,
2632 &AMDGPU::AGPR_32RegClass,
2633 &AMDGPU::AGPR_32RegClass,
2634 &AMDGPU::VReg_64_Align2RegClass,
2635 &AMDGPU::VReg_64RegClass,
2636 &AMDGPU::SReg_64RegClass,
2637 &AMDGPU::AReg_64_Align2RegClass,
2638 &AMDGPU::AReg_64RegClass,
2639 &AMDGPU::VReg_96_Align2RegClass,
2640 &AMDGPU::VReg_96RegClass,
2641 &AMDGPU::SReg_96RegClass,
2642 &AMDGPU::AReg_96_Align2RegClass,
2643 &AMDGPU::AReg_96RegClass,
2644 &AMDGPU::VReg_128_Align2RegClass,
2645 &AMDGPU::VReg_128RegClass,
2646 &AMDGPU::SReg_128RegClass,
2647 &AMDGPU::AReg_128_Align2RegClass,
2648 &AMDGPU::AReg_128RegClass,
2649 &AMDGPU::VReg_160_Align2RegClass,
2650 &AMDGPU::VReg_160RegClass,
2651 &AMDGPU::SReg_160RegClass,
2652 &AMDGPU::AReg_160_Align2RegClass,
2653 &AMDGPU::AReg_160RegClass,
2654 &AMDGPU::VReg_192_Align2RegClass,
2655 &AMDGPU::VReg_192RegClass,
2656 &AMDGPU::SReg_192RegClass,
2657 &AMDGPU::AReg_192_Align2RegClass,
2658 &AMDGPU::AReg_192RegClass,
2659 &AMDGPU::VReg_224_Align2RegClass,
2660 &AMDGPU::VReg_224RegClass,
2661 &AMDGPU::SReg_224RegClass,
2662 &AMDGPU::AReg_224_Align2RegClass,
2663 &AMDGPU::AReg_224RegClass,
2664 &AMDGPU::VReg_256_Align2RegClass,
2665 &AMDGPU::VReg_256RegClass,
2666 &AMDGPU::SReg_256RegClass,
2667 &AMDGPU::AReg_256_Align2RegClass,
2668 &AMDGPU::AReg_256RegClass,
2669 &AMDGPU::VReg_512_Align2RegClass,
2670 &AMDGPU::VReg_512RegClass,
2671 &AMDGPU::SReg_512RegClass,
2672 &AMDGPU::AReg_512_Align2RegClass,
2673 &AMDGPU::AReg_512RegClass,
2674 &AMDGPU::SReg_1024RegClass,
2675 &AMDGPU::VReg_1024_Align2RegClass,
2676 &AMDGPU::VReg_1024RegClass,
2677 &AMDGPU::AReg_1024_Align2RegClass,
2678 &AMDGPU::AReg_1024RegClass,
2679 &AMDGPU::SCC_CLASSRegClass,
2680 &AMDGPU::Pseudo_SReg_32RegClass,
2681 &AMDGPU::Pseudo_SReg_128RegClass,
2682 };
2683
2684 for (const TargetRegisterClass *BaseClass : BaseClasses) {
2685 if (BaseClass->contains(Reg)) {
2686 return BaseClass;
2687 }
2688 }
2689 return nullptr;
2690}
2691
2692bool SIRegisterInfo::isSGPRReg(const MachineRegisterInfo &MRI,
2693 Register Reg) const {
2694 const TargetRegisterClass *RC;
2695 if (Reg.isVirtual())
2696 RC = MRI.getRegClass(Reg);
2697 else
2698 RC = getPhysRegClass(Reg);
2699 return RC ? isSGPRClass(RC) : false;
2700}
2701
2702const TargetRegisterClass *
2703SIRegisterInfo::getEquivalentVGPRClass(const TargetRegisterClass *SRC) const {
2704 unsigned Size = getRegSizeInBits(*SRC);
2705 const TargetRegisterClass *VRC = getVGPRClassForBitWidth(Size);
2706 assert(VRC && "Invalid register class size")(static_cast <bool> (VRC && "Invalid register class size"
) ? void (0) : __assert_fail ("VRC && \"Invalid register class size\""
, "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 2706, __extension__
__PRETTY_FUNCTION__))
;
2707 return VRC;
2708}
2709
2710const TargetRegisterClass *
2711SIRegisterInfo::getEquivalentAGPRClass(const TargetRegisterClass *SRC) const {
2712 unsigned Size = getRegSizeInBits(*SRC);
2713 const TargetRegisterClass *ARC = getAGPRClassForBitWidth(Size);
2714 assert(ARC && "Invalid register class size")(static_cast <bool> (ARC && "Invalid register class size"
) ? void (0) : __assert_fail ("ARC && \"Invalid register class size\""
, "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 2714, __extension__
__PRETTY_FUNCTION__))
;
2715 return ARC;
2716}
2717
2718const TargetRegisterClass *
2719SIRegisterInfo::getEquivalentSGPRClass(const TargetRegisterClass *VRC) const {
2720 unsigned Size = getRegSizeInBits(*VRC);
2721 if (Size == 32)
2722 return &AMDGPU::SGPR_32RegClass;
2723 const TargetRegisterClass *SRC = getSGPRClassForBitWidth(Size);
2724 assert(SRC && "Invalid register class size")(static_cast <bool> (SRC && "Invalid register class size"
) ? void (0) : __assert_fail ("SRC && \"Invalid register class size\""
, "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 2724, __extension__
__PRETTY_FUNCTION__))
;
2725 return SRC;
2726}
2727
2728const TargetRegisterClass *
2729SIRegisterInfo::getCompatibleSubRegClass(const TargetRegisterClass *SuperRC,
2730 const TargetRegisterClass *SubRC,
2731 unsigned SubIdx) const {
2732 // Ensure this subregister index is aligned in the super register.
2733 const TargetRegisterClass *MatchRC =
2734 getMatchingSuperRegClass(SuperRC, SubRC, SubIdx);
2735 return MatchRC && MatchRC->hasSubClassEq(SuperRC) ? MatchRC : nullptr;
2736}
2737
2738bool SIRegisterInfo::opCanUseInlineConstant(unsigned OpType) const {
2739 if (OpType >= AMDGPU::OPERAND_REG_INLINE_AC_FIRST &&
2740 OpType <= AMDGPU::OPERAND_REG_INLINE_AC_LAST)
2741 return !ST.hasMFMAInlineLiteralBug();
2742
2743 return OpType >= AMDGPU::OPERAND_SRC_FIRST &&
2744 OpType <= AMDGPU::OPERAND_SRC_LAST;
2745}
2746
2747bool SIRegisterInfo::shouldRewriteCopySrc(
2748 const TargetRegisterClass *DefRC,
2749 unsigned DefSubReg,
2750 const TargetRegisterClass *SrcRC,
2751 unsigned SrcSubReg) const {
2752 // We want to prefer the smallest register class possible, so we don't want to
2753 // stop and rewrite on anything that looks like a subregister
2754 // extract. Operations mostly don't care about the super register class, so we
2755 // only want to stop on the most basic of copies between the same register
2756 // class.
2757 //
2758 // e.g. if we have something like
2759 // %0 = ...
2760 // %1 = ...
2761 // %2 = REG_SEQUENCE %0, sub0, %1, sub1, %2, sub2
2762 // %3 = COPY %2, sub0
2763 //
2764 // We want to look through the COPY to find:
2765 // => %3 = COPY %0
2766
2767 // Plain copy.
2768 return getCommonSubClass(DefRC, SrcRC) != nullptr;
2769}
2770
2771bool SIRegisterInfo::opCanUseLiteralConstant(unsigned OpType) const {
2772 // TODO: 64-bit operands have extending behavior from 32-bit literal.
2773 return OpType >= AMDGPU::OPERAND_REG_IMM_FIRST &&
2774 OpType <= AMDGPU::OPERAND_REG_IMM_LAST;
2775}
2776
2777/// Returns a lowest register that is not used at any point in the function.
2778/// If all registers are used, then this function will return
2779/// AMDGPU::NoRegister. If \p ReserveHighestVGPR = true, then return
2780/// highest unused register.
2781MCRegister SIRegisterInfo::findUnusedRegister(const MachineRegisterInfo &MRI,
2782 const TargetRegisterClass *RC,
2783 const MachineFunction &MF,
2784 bool ReserveHighestVGPR) const {
2785 if (ReserveHighestVGPR) {
2786 for (MCRegister Reg : reverse(*RC))
2787 if (MRI.isAllocatable(Reg) && !MRI.isPhysRegUsed(Reg))
2788 return Reg;
2789 } else {
2790 for (MCRegister Reg : *RC)
2791 if (MRI.isAllocatable(Reg) && !MRI.isPhysRegUsed(Reg))
2792 return Reg;
2793 }
2794 return MCRegister();
2795}
2796
2797ArrayRef<int16_t> SIRegisterInfo::getRegSplitParts(const TargetRegisterClass *RC,
2798 unsigned EltSize) const {
2799 const unsigned RegBitWidth = AMDGPU::getRegBitWidth(*RC->MC);
2800 assert(RegBitWidth >= 32 && RegBitWidth <= 1024)(static_cast <bool> (RegBitWidth >= 32 && RegBitWidth
<= 1024) ? void (0) : __assert_fail ("RegBitWidth >= 32 && RegBitWidth <= 1024"
, "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 2800, __extension__
__PRETTY_FUNCTION__))
;
2801
2802 const unsigned RegDWORDs = RegBitWidth / 32;
2803 const unsigned EltDWORDs = EltSize / 4;
2804 assert(RegSplitParts.size() + 1 >= EltDWORDs)(static_cast <bool> (RegSplitParts.size() + 1 >= EltDWORDs
) ? void (0) : __assert_fail ("RegSplitParts.size() + 1 >= EltDWORDs"
, "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 2804, __extension__
__PRETTY_FUNCTION__))
;
2805
2806 const std::vector<int16_t> &Parts = RegSplitParts[EltDWORDs - 1];
2807 const unsigned NumParts = RegDWORDs / EltDWORDs;
2808
2809 return makeArrayRef(Parts.data(), NumParts);
2810}
2811
2812const TargetRegisterClass*
2813SIRegisterInfo::getRegClassForReg(const MachineRegisterInfo &MRI,
2814 Register Reg) const {
2815 return Reg.isVirtual() ? MRI.getRegClass(Reg) : getPhysRegClass(Reg);
2816}
2817
2818const TargetRegisterClass *
2819SIRegisterInfo::getRegClassForOperandReg(const MachineRegisterInfo &MRI,
2820 const MachineOperand &MO) const {
2821 const TargetRegisterClass *SrcRC = getRegClassForReg(MRI, MO.getReg());
2822 return getSubRegisterClass(SrcRC, MO.getSubReg());
2823}
2824
2825bool SIRegisterInfo::isVGPR(const MachineRegisterInfo &MRI,
2826 Register Reg) const {
2827 const TargetRegisterClass *RC = getRegClassForReg(MRI, Reg);
2828 // Registers without classes are unaddressable, SGPR-like registers.
2829 return RC && isVGPRClass(RC);
2830}
2831
2832bool SIRegisterInfo::isAGPR(const MachineRegisterInfo &MRI,
2833 Register Reg) const {
2834 const TargetRegisterClass *RC = getRegClassForReg(MRI, Reg);
2835
2836 // Registers without classes are unaddressable, SGPR-like registers.
2837 return RC && isAGPRClass(RC);
2838}
2839
2840bool SIRegisterInfo::shouldCoalesce(MachineInstr *MI,
2841 const TargetRegisterClass *SrcRC,
2842 unsigned SubReg,
2843 const TargetRegisterClass *DstRC,
2844 unsigned DstSubReg,
2845 const TargetRegisterClass *NewRC,
2846 LiveIntervals &LIS) const {
2847 unsigned SrcSize = getRegSizeInBits(*SrcRC);
2848 unsigned DstSize = getRegSizeInBits(*DstRC);
2849 unsigned NewSize = getRegSizeInBits(*NewRC);
2850
2851 // Do not increase size of registers beyond dword, we would need to allocate
2852 // adjacent registers and constraint regalloc more than needed.
2853
2854 // Always allow dword coalescing.
2855 if (SrcSize <= 32 || DstSize <= 32)
2856 return true;
2857
2858 return NewSize <= DstSize || NewSize <= SrcSize;
2859}
2860
2861unsigned SIRegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC,
2862 MachineFunction &MF) const {
2863 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
2864
2865 unsigned Occupancy = ST.getOccupancyWithLocalMemSize(MFI->getLDSSize(),
2866 MF.getFunction());
2867 switch (RC->getID()) {
2868 default:
2869 return AMDGPUGenRegisterInfo::getRegPressureLimit(RC, MF);
2870 case AMDGPU::VGPR_32RegClassID:
2871 case AMDGPU::VGPR_LO16RegClassID:
2872 case AMDGPU::VGPR_HI16RegClassID:
2873 return std::min(ST.getMaxNumVGPRs(Occupancy), ST.getMaxNumVGPRs(MF));
2874 case AMDGPU::SGPR_32RegClassID:
2875 case AMDGPU::SGPR_LO16RegClassID:
2876 return std::min(ST.getMaxNumSGPRs(Occupancy, true), ST.getMaxNumSGPRs(MF));
2877 }
2878}
2879
2880unsigned SIRegisterInfo::getRegPressureSetLimit(const MachineFunction &MF,
2881 unsigned Idx) const {
2882 if (Idx == AMDGPU::RegisterPressureSets::VGPR_32 ||
2883 Idx == AMDGPU::RegisterPressureSets::AGPR_32)
2884 return getRegPressureLimit(&AMDGPU::VGPR_32RegClass,
2885 const_cast<MachineFunction &>(MF));
2886
2887 if (Idx == AMDGPU::RegisterPressureSets::SReg_32)
2888 return getRegPressureLimit(&AMDGPU::SGPR_32RegClass,
2889 const_cast<MachineFunction &>(MF));
2890
2891 llvm_unreachable("Unexpected register pressure set!")::llvm::llvm_unreachable_internal("Unexpected register pressure set!"
, "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 2891)
;
2892}
2893
2894const int *SIRegisterInfo::getRegUnitPressureSets(unsigned RegUnit) const {
2895 static const int Empty[] = { -1 };
2896
2897 if (RegPressureIgnoredUnits[RegUnit])
2898 return Empty;
2899
2900 return AMDGPUGenRegisterInfo::getRegUnitPressureSets(RegUnit);
2901}
2902
2903MCRegister SIRegisterInfo::getReturnAddressReg(const MachineFunction &MF) const {
2904 // Not a callee saved register.
2905 return AMDGPU::SGPR30_SGPR31;
2906}
2907
2908const TargetRegisterClass *
2909SIRegisterInfo::getRegClassForSizeOnBank(unsigned Size,
2910 const RegisterBank &RB) const {
2911 switch (RB.getID()) {
2912 case AMDGPU::VGPRRegBankID:
2913 return getVGPRClassForBitWidth(std::max(32u, Size));
2914 case AMDGPU::VCCRegBankID:
2915 assert(Size == 1)(static_cast <bool> (Size == 1) ? void (0) : __assert_fail
("Size == 1", "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 2915
, __extension__ __PRETTY_FUNCTION__))
;
2916 return isWave32 ? &AMDGPU::SReg_32_XM0_XEXECRegClass
2917 : &AMDGPU::SReg_64_XEXECRegClass;
2918 case AMDGPU::SGPRRegBankID:
2919 return getSGPRClassForBitWidth(std::max(32u, Size));
2920 case AMDGPU::AGPRRegBankID:
2921 return getAGPRClassForBitWidth(std::max(32u, Size));
2922 default:
2923 llvm_unreachable("unknown register bank")::llvm::llvm_unreachable_internal("unknown register bank", "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 2923)
;
2924 }
2925}
2926
2927const TargetRegisterClass *
2928SIRegisterInfo::getConstrainedRegClassForOperand(const MachineOperand &MO,
2929 const MachineRegisterInfo &MRI) const {
2930 const RegClassOrRegBank &RCOrRB = MRI.getRegClassOrRegBank(MO.getReg());
2931 if (const RegisterBank *RB = RCOrRB.dyn_cast<const RegisterBank*>())
2932 return getRegClassForTypeOnBank(MRI.getType(MO.getReg()), *RB);
2933
2934 if (const auto *RC = RCOrRB.dyn_cast<const TargetRegisterClass *>())
2935 return getAllocatableClass(RC);
2936
2937 return nullptr;
2938}
2939
2940MCRegister SIRegisterInfo::getVCC() const {
2941 return isWave32 ? AMDGPU::VCC_LO : AMDGPU::VCC;
2942}
2943
2944MCRegister SIRegisterInfo::getExec() const {
2945 return isWave32 ? AMDGPU::EXEC_LO : AMDGPU::EXEC;
2946}
2947
2948const TargetRegisterClass *SIRegisterInfo::getVGPR64Class() const {
2949 // VGPR tuples have an alignment requirement on gfx90a variants.
2950 return ST.needsAlignedVGPRs() ? &AMDGPU::VReg_64_Align2RegClass
2951 : &AMDGPU::VReg_64RegClass;
2952}
2953
2954const TargetRegisterClass *
2955SIRegisterInfo::getRegClass(unsigned RCID) const {
2956 switch ((int)RCID) {
2957 case AMDGPU::SReg_1RegClassID:
2958 return getBoolRC();
2959 case AMDGPU::SReg_1_XEXECRegClassID:
2960 return isWave32 ? &AMDGPU::SReg_32_XM0_XEXECRegClass
2961 : &AMDGPU::SReg_64_XEXECRegClass;
2962 case -1:
2963 return nullptr;
2964 default:
2965 return AMDGPUGenRegisterInfo::getRegClass(RCID);
2966 }
2967}
2968
2969// Find reaching register definition
2970MachineInstr *SIRegisterInfo::findReachingDef(Register Reg, unsigned SubReg,
2971 MachineInstr &Use,
2972 MachineRegisterInfo &MRI,
2973 LiveIntervals *LIS) const {
2974 auto &MDT = LIS->getAnalysis<MachineDominatorTree>();
2975 SlotIndex UseIdx = LIS->getInstructionIndex(Use);
2976 SlotIndex DefIdx;
2977
2978 if (Reg.isVirtual()) {
2979 if (!LIS->hasInterval(Reg))
2980 return nullptr;
2981 LiveInterval &LI = LIS->getInterval(Reg);
2982 LaneBitmask SubLanes = SubReg ? getSubRegIndexLaneMask(SubReg)
2983 : MRI.getMaxLaneMaskForVReg(Reg);
2984 VNInfo *V = nullptr;
2985 if (LI.hasSubRanges()) {
2986 for (auto &S : LI.subranges()) {
2987 if ((S.LaneMask & SubLanes) == SubLanes) {
2988 V = S.getVNInfoAt(UseIdx);
2989 break;
2990 }
2991 }
2992 } else {
2993 V = LI.getVNInfoAt(UseIdx);
2994 }
2995 if (!V)
2996 return nullptr;
2997 DefIdx = V->def;
2998 } else {
2999 // Find last def.
3000 for (MCRegUnitIterator Units(Reg.asMCReg(), this); Units.isValid();
3001 ++Units) {
3002 LiveRange &LR = LIS->getRegUnit(*Units);
3003 if (VNInfo *V = LR.getVNInfoAt(UseIdx)) {
3004 if (!DefIdx.isValid() ||
3005 MDT.dominates(LIS->getInstructionFromIndex(DefIdx),
3006 LIS->getInstructionFromIndex(V->def)))
3007 DefIdx = V->def;
3008 } else {
3009 return nullptr;
3010 }
3011 }
3012 }
3013
3014 MachineInstr *Def = LIS->getInstructionFromIndex(DefIdx);
3015
3016 if (!Def || !MDT.dominates(Def, &Use))
3017 return nullptr;
3018
3019 assert(Def->modifiesRegister(Reg, this))(static_cast <bool> (Def->modifiesRegister(Reg, this
)) ? void (0) : __assert_fail ("Def->modifiesRegister(Reg, this)"
, "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 3019, __extension__
__PRETTY_FUNCTION__))
;
3020
3021 return Def;
3022}
3023
3024MCPhysReg SIRegisterInfo::get32BitRegister(MCPhysReg Reg) const {
3025 assert(getRegSizeInBits(*getPhysRegClass(Reg)) <= 32)(static_cast <bool> (getRegSizeInBits(*getPhysRegClass(
Reg)) <= 32) ? void (0) : __assert_fail ("getRegSizeInBits(*getPhysRegClass(Reg)) <= 32"
, "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 3025, __extension__
__PRETTY_FUNCTION__))
;
3026
3027 for (const TargetRegisterClass &RC : { AMDGPU::VGPR_32RegClass,
3028 AMDGPU::SReg_32RegClass,
3029 AMDGPU::AGPR_32RegClass } ) {
3030 if (MCPhysReg Super = getMatchingSuperReg(Reg, AMDGPU::lo16, &RC))
3031 return Super;
3032 }
3033 if (MCPhysReg Super = getMatchingSuperReg(Reg, AMDGPU::hi16,
3034 &AMDGPU::VGPR_32RegClass)) {
3035 return Super;
3036 }
3037
3038 return AMDGPU::NoRegister;
3039}
3040
3041bool SIRegisterInfo::isProperlyAlignedRC(const TargetRegisterClass &RC) const {
3042 if (!ST.needsAlignedVGPRs())
3043 return true;
3044
3045 if (isVGPRClass(&RC))
3046 return RC.hasSuperClassEq(getVGPRClassForBitWidth(getRegSizeInBits(RC)));
3047 if (isAGPRClass(&RC))
3048 return RC.hasSuperClassEq(getAGPRClassForBitWidth(getRegSizeInBits(RC)));
3049 if (isVectorSuperClass(&RC))
3050 return RC.hasSuperClassEq(
3051 getVectorSuperClassForBitWidth(getRegSizeInBits(RC)));
3052
3053 return true;
3054}
3055
3056const TargetRegisterClass *
3057SIRegisterInfo::getProperlyAlignedRC(const TargetRegisterClass *RC) const {
3058 if (!RC || !ST.needsAlignedVGPRs())
3059 return RC;
3060
3061 unsigned Size = getRegSizeInBits(*RC);
3062 if (Size <= 32)
3063 return RC;
3064
3065 if (isVGPRClass(RC))
3066 return getAlignedVGPRClassForBitWidth(Size);
3067 if (isAGPRClass(RC))
3068 return getAlignedAGPRClassForBitWidth(Size);
3069 if (isVectorSuperClass(RC))
3070 return getAlignedVectorSuperClassForBitWidth(Size);
3071
3072 return RC;
3073}
3074
3075ArrayRef<MCPhysReg>
3076SIRegisterInfo::getAllSGPR128(const MachineFunction &MF) const {
3077 return makeArrayRef(AMDGPU::SGPR_128RegClass.begin(),
3078 ST.getMaxNumSGPRs(MF) / 4);
3079}
3080
3081ArrayRef<MCPhysReg>
3082SIRegisterInfo::getAllSGPR64(const MachineFunction &MF) const {
3083 return makeArrayRef(AMDGPU::SGPR_64RegClass.begin(),
3084 ST.getMaxNumSGPRs(MF) / 2);
3085}
3086
3087ArrayRef<MCPhysReg>
3088SIRegisterInfo::getAllSGPR32(const MachineFunction &MF) const {
3089 return makeArrayRef(AMDGPU::SGPR_32RegClass.begin(), ST.getMaxNumSGPRs(MF));
3090}