Bug Summary

File:llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp
Warning:line 1184, column 5
Value stored to 'Desc' is never read

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name SIRegisterInfo.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-14~++20211103100639+f9e6be5cc1a2/build-llvm -resource-dir /usr/lib/llvm-14/lib/clang/14.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I lib/Target/AMDGPU -I /build/llvm-toolchain-snapshot-14~++20211103100639+f9e6be5cc1a2/llvm/lib/Target/AMDGPU -I include -I /build/llvm-toolchain-snapshot-14~++20211103100639+f9e6be5cc1a2/llvm/include -D NDEBUG -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-14/lib/clang/14.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-command-line-argument -Wno-unknown-warning-option -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-14~++20211103100639+f9e6be5cc1a2/build-llvm -ferror-limit 19 -fvisibility hidden -fvisibility-inlines-hidden -fgnuc-version=4.2.1 -fcolor-diagnostics -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2021-11-03-233151-280022-1 -x c++ /build/llvm-toolchain-snapshot-14~++20211103100639+f9e6be5cc1a2/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp
1//===-- SIRegisterInfo.cpp - SI Register Information ---------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// SI implementation of the TargetRegisterInfo class.
11//
12//===----------------------------------------------------------------------===//
13
14#include "SIRegisterInfo.h"
15#include "AMDGPU.h"
16#include "AMDGPURegisterBankInfo.h"
17#include "GCNSubtarget.h"
18#include "MCTargetDesc/AMDGPUInstPrinter.h"
19#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
20#include "SIMachineFunctionInfo.h"
21#include "llvm/CodeGen/LiveIntervals.h"
22#include "llvm/CodeGen/MachineDominators.h"
23#include "llvm/CodeGen/RegisterScavenging.h"
24
25using namespace llvm;
26
27#define GET_REGINFO_TARGET_DESC
28#include "AMDGPUGenRegisterInfo.inc"
29
30static cl::opt<bool> EnableSpillSGPRToVGPR(
31 "amdgpu-spill-sgpr-to-vgpr",
32 cl::desc("Enable spilling VGPRs to SGPRs"),
33 cl::ReallyHidden,
34 cl::init(true));
35
36std::array<std::vector<int16_t>, 16> SIRegisterInfo::RegSplitParts;
37std::array<std::array<uint16_t, 32>, 9> SIRegisterInfo::SubRegFromChannelTable;
38
39// Map numbers of DWORDs to indexes in SubRegFromChannelTable.
40// Valid indexes are shifted 1, such that a 0 mapping means unsupported.
41// e.g. for 8 DWORDs (256-bit), SubRegFromChannelTableWidthMap[8] = 8,
42// meaning index 7 in SubRegFromChannelTable.
43static const std::array<unsigned, 17> SubRegFromChannelTableWidthMap = {
44 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 0, 0, 0, 0, 0, 0, 9};
45
46namespace llvm {
47
48// A temporary struct to spill SGPRs.
49// This is mostly to spill SGPRs to memory. Spilling SGPRs into VGPR lanes emits
50// just v_writelane and v_readlane.
51//
52// When spilling to memory, the SGPRs are written into VGPR lanes and the VGPR
53// is saved to scratch (or the other way around for loads).
54// For this, a VGPR is required where the needed lanes can be clobbered. The
55// RegScavenger can provide a VGPR where currently active lanes can be
56// clobbered, but we still need to save inactive lanes.
57// The high-level steps are:
58// - Try to scavenge SGPR(s) to save exec
59// - Try to scavenge VGPR
60// - Save needed, all or inactive lanes of a TmpVGPR
61// - Spill/Restore SGPRs using TmpVGPR
62// - Restore TmpVGPR
63//
64// To save all lanes of TmpVGPR, exec needs to be saved and modified. If we
65// cannot scavenge temporary SGPRs to save exec, we use the following code:
66// buffer_store_dword TmpVGPR ; only if active lanes need to be saved
67// s_not exec, exec
68// buffer_store_dword TmpVGPR ; save inactive lanes
69// s_not exec, exec
70struct SGPRSpillBuilder {
71 struct PerVGPRData {
72 unsigned PerVGPR;
73 unsigned NumVGPRs;
74 int64_t VGPRLanes;
75 };
76
77 // The SGPR to save
78 Register SuperReg;
79 MachineBasicBlock::iterator MI;
80 ArrayRef<int16_t> SplitParts;
81 unsigned NumSubRegs;
82 bool IsKill;
83 const DebugLoc &DL;
84
85 /* When spilling to stack */
86 // The SGPRs are written into this VGPR, which is then written to scratch
87 // (or vice versa for loads).
88 Register TmpVGPR = AMDGPU::NoRegister;
89 // Temporary spill slot to save TmpVGPR to.
90 int TmpVGPRIndex = 0;
91 // If TmpVGPR is live before the spill or if it is scavenged.
92 bool TmpVGPRLive = false;
93 // Scavenged SGPR to save EXEC.
94 Register SavedExecReg = AMDGPU::NoRegister;
95 // Stack index to write the SGPRs to.
96 int Index;
97 unsigned EltSize = 4;
98
99 RegScavenger *RS;
100 MachineBasicBlock *MBB;
101 MachineFunction &MF;
102 SIMachineFunctionInfo &MFI;
103 const SIInstrInfo &TII;
104 const SIRegisterInfo &TRI;
105 bool IsWave32;
106 Register ExecReg;
107 unsigned MovOpc;
108 unsigned NotOpc;
109
110 SGPRSpillBuilder(const SIRegisterInfo &TRI, const SIInstrInfo &TII,
111 bool IsWave32, MachineBasicBlock::iterator MI, int Index,
112 RegScavenger *RS)
113 : SGPRSpillBuilder(TRI, TII, IsWave32, MI, MI->getOperand(0).getReg(),
114 MI->getOperand(0).isKill(), Index, RS) {}
115
116 SGPRSpillBuilder(const SIRegisterInfo &TRI, const SIInstrInfo &TII,
117 bool IsWave32, MachineBasicBlock::iterator MI, Register Reg,
118 bool IsKill, int Index, RegScavenger *RS)
119 : SuperReg(Reg), MI(MI), IsKill(IsKill), DL(MI->getDebugLoc()),
120 Index(Index), RS(RS), MBB(MI->getParent()), MF(*MBB->getParent()),
121 MFI(*MF.getInfo<SIMachineFunctionInfo>()), TII(TII), TRI(TRI),
122 IsWave32(IsWave32) {
123 const TargetRegisterClass *RC = TRI.getPhysRegClass(SuperReg);
124 SplitParts = TRI.getRegSplitParts(RC, EltSize);
125 NumSubRegs = SplitParts.empty() ? 1 : SplitParts.size();
126
127 if (IsWave32) {
128 ExecReg = AMDGPU::EXEC_LO;
129 MovOpc = AMDGPU::S_MOV_B32;
130 NotOpc = AMDGPU::S_NOT_B32;
131 } else {
132 ExecReg = AMDGPU::EXEC;
133 MovOpc = AMDGPU::S_MOV_B64;
134 NotOpc = AMDGPU::S_NOT_B64;
135 }
136
137 assert(SuperReg != AMDGPU::M0 && "m0 should never spill")(static_cast <bool> (SuperReg != AMDGPU::M0 && "m0 should never spill"
) ? void (0) : __assert_fail ("SuperReg != AMDGPU::M0 && \"m0 should never spill\""
, "/build/llvm-toolchain-snapshot-14~++20211103100639+f9e6be5cc1a2/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 137, __extension__ __PRETTY_FUNCTION__))
;
138 assert(SuperReg != AMDGPU::EXEC_LO && SuperReg != AMDGPU::EXEC_HI &&(static_cast <bool> (SuperReg != AMDGPU::EXEC_LO &&
SuperReg != AMDGPU::EXEC_HI && SuperReg != AMDGPU::EXEC
&& "exec should never spill") ? void (0) : __assert_fail
("SuperReg != AMDGPU::EXEC_LO && SuperReg != AMDGPU::EXEC_HI && SuperReg != AMDGPU::EXEC && \"exec should never spill\""
, "/build/llvm-toolchain-snapshot-14~++20211103100639+f9e6be5cc1a2/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 139, __extension__ __PRETTY_FUNCTION__))
139 SuperReg != AMDGPU::EXEC && "exec should never spill")(static_cast <bool> (SuperReg != AMDGPU::EXEC_LO &&
SuperReg != AMDGPU::EXEC_HI && SuperReg != AMDGPU::EXEC
&& "exec should never spill") ? void (0) : __assert_fail
("SuperReg != AMDGPU::EXEC_LO && SuperReg != AMDGPU::EXEC_HI && SuperReg != AMDGPU::EXEC && \"exec should never spill\""
, "/build/llvm-toolchain-snapshot-14~++20211103100639+f9e6be5cc1a2/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 139, __extension__ __PRETTY_FUNCTION__))
;
140 }
141
142 PerVGPRData getPerVGPRData() {
143 PerVGPRData Data;
144 Data.PerVGPR = IsWave32 ? 32 : 64;
145 Data.NumVGPRs = (NumSubRegs + (Data.PerVGPR - 1)) / Data.PerVGPR;
146 Data.VGPRLanes = (1LL << std::min(Data.PerVGPR, NumSubRegs)) - 1LL;
147 return Data;
148 }
149
150 // Tries to scavenge SGPRs to save EXEC and a VGPR. Uses v0 if no VGPR is
151 // free.
152 // Writes these instructions if an SGPR can be scavenged:
153 // s_mov_b64 s[6:7], exec ; Save exec
154 // s_mov_b64 exec, 3 ; Wanted lanemask
155 // buffer_store_dword v1 ; Write scavenged VGPR to emergency slot
156 //
157 // Writes these instructions if no SGPR can be scavenged:
158 // buffer_store_dword v0 ; Only if no free VGPR was found
159 // s_not_b64 exec, exec
160 // buffer_store_dword v0 ; Save inactive lanes
161 // ; exec stays inverted, it is flipped back in
162 // ; restore.
163 void prepare() {
164 // Scavenged temporary VGPR to use. It must be scavenged once for any number
165 // of spilled subregs.
166 // FIXME: The liveness analysis is limited and does not tell if a register
167 // is in use in lanes that are currently inactive. We can never be sure if
168 // a register as actually in use in another lane, so we need to save all
169 // used lanes of the chosen VGPR.
170 assert(RS && "Cannot spill SGPR to memory without RegScavenger")(static_cast <bool> (RS && "Cannot spill SGPR to memory without RegScavenger"
) ? void (0) : __assert_fail ("RS && \"Cannot spill SGPR to memory without RegScavenger\""
, "/build/llvm-toolchain-snapshot-14~++20211103100639+f9e6be5cc1a2/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 170, __extension__ __PRETTY_FUNCTION__))
;
171 TmpVGPR = RS->scavengeRegister(&AMDGPU::VGPR_32RegClass, MI, 0, false);
172
173 // Reserve temporary stack slot
174 TmpVGPRIndex = MFI.getScavengeFI(MF.getFrameInfo(), TRI);
175 if (TmpVGPR) {
176 // Found a register that is dead in the currently active lanes, we only
177 // need to spill inactive lanes.
178 TmpVGPRLive = false;
179 } else {
180 // Pick v0 because it doesn't make a difference.
181 TmpVGPR = AMDGPU::VGPR0;
182 TmpVGPRLive = true;
183 }
184
185 // Try to scavenge SGPRs to save exec
186 assert(!SavedExecReg && "Exec is already saved, refuse to save again")(static_cast <bool> (!SavedExecReg && "Exec is already saved, refuse to save again"
) ? void (0) : __assert_fail ("!SavedExecReg && \"Exec is already saved, refuse to save again\""
, "/build/llvm-toolchain-snapshot-14~++20211103100639+f9e6be5cc1a2/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 186, __extension__ __PRETTY_FUNCTION__))
;
187 const TargetRegisterClass &RC =
188 IsWave32 ? AMDGPU::SGPR_32RegClass : AMDGPU::SGPR_64RegClass;
189 RS->setRegUsed(SuperReg);
190 SavedExecReg = RS->scavengeRegister(&RC, MI, 0, false);
191
192 int64_t VGPRLanes = getPerVGPRData().VGPRLanes;
193
194 if (SavedExecReg) {
195 RS->setRegUsed(SavedExecReg);
196 // Set exec to needed lanes
197 BuildMI(*MBB, MI, DL, TII.get(MovOpc), SavedExecReg).addReg(ExecReg);
198 auto I =
199 BuildMI(*MBB, MI, DL, TII.get(MovOpc), ExecReg).addImm(VGPRLanes);
200 if (!TmpVGPRLive)
201 I.addReg(TmpVGPR, RegState::ImplicitDefine);
202 // Spill needed lanes
203 TRI.buildVGPRSpillLoadStore(*this, TmpVGPRIndex, 0, /*IsLoad*/ false);
204 } else {
205 // Spill active lanes
206 if (TmpVGPRLive)
207 TRI.buildVGPRSpillLoadStore(*this, TmpVGPRIndex, 0, /*IsLoad*/ false,
208 /*IsKill*/ false);
209 // Spill inactive lanes
210 auto I = BuildMI(*MBB, MI, DL, TII.get(NotOpc), ExecReg).addReg(ExecReg);
211 if (!TmpVGPRLive)
212 I.addReg(TmpVGPR, RegState::ImplicitDefine);
213 TRI.buildVGPRSpillLoadStore(*this, TmpVGPRIndex, 0, /*IsLoad*/ false);
214 }
215 }
216
217 // Writes these instructions if an SGPR can be scavenged:
218 // buffer_load_dword v1 ; Write scavenged VGPR to emergency slot
219 // s_waitcnt vmcnt(0) ; If a free VGPR was found
220 // s_mov_b64 exec, s[6:7] ; Save exec
221 //
222 // Writes these instructions if no SGPR can be scavenged:
223 // buffer_load_dword v0 ; Restore inactive lanes
224 // s_waitcnt vmcnt(0) ; If a free VGPR was found
225 // s_not_b64 exec, exec
226 // buffer_load_dword v0 ; Only if no free VGPR was found
227 void restore() {
228 if (SavedExecReg) {
229 // Restore used lanes
230 TRI.buildVGPRSpillLoadStore(*this, TmpVGPRIndex, 0, /*IsLoad*/ true,
231 /*IsKill*/ false);
232 // Restore exec
233 auto I = BuildMI(*MBB, MI, DL, TII.get(MovOpc), ExecReg)
234 .addReg(SavedExecReg, RegState::Kill);
235 // Add an implicit use of the load so it is not dead.
236 // FIXME This inserts an unnecessary waitcnt
237 if (!TmpVGPRLive) {
238 I.addReg(TmpVGPR, RegState::ImplicitKill);
239 }
240 } else {
241 // Restore inactive lanes
242 TRI.buildVGPRSpillLoadStore(*this, TmpVGPRIndex, 0, /*IsLoad*/ true,
243 /*IsKill*/ false);
244 auto I = BuildMI(*MBB, MI, DL, TII.get(NotOpc), ExecReg).addReg(ExecReg);
245 if (!TmpVGPRLive) {
246 I.addReg(TmpVGPR, RegState::ImplicitKill);
247 }
248 // Restore active lanes
249 if (TmpVGPRLive)
250 TRI.buildVGPRSpillLoadStore(*this, TmpVGPRIndex, 0, /*IsLoad*/ true);
251 }
252 }
253
254 // Write TmpVGPR to memory or read TmpVGPR from memory.
255 // Either using a single buffer_load/store if exec is set to the needed mask
256 // or using
257 // buffer_load
258 // s_not exec, exec
259 // buffer_load
260 // s_not exec, exec
261 void readWriteTmpVGPR(unsigned Offset, bool IsLoad) {
262 if (SavedExecReg) {
263 // Spill needed lanes
264 TRI.buildVGPRSpillLoadStore(*this, Index, Offset, IsLoad);
265 } else {
266 // Spill active lanes
267 TRI.buildVGPRSpillLoadStore(*this, Index, Offset, IsLoad,
268 /*IsKill*/ false);
269 // Spill inactive lanes
270 BuildMI(*MBB, MI, DL, TII.get(NotOpc), ExecReg).addReg(ExecReg);
271 TRI.buildVGPRSpillLoadStore(*this, Index, Offset, IsLoad);
272 BuildMI(*MBB, MI, DL, TII.get(NotOpc), ExecReg).addReg(ExecReg);
273 }
274 }
275
276 void setMI(MachineBasicBlock *NewMBB, MachineBasicBlock::iterator NewMI) {
277 assert(MBB->getParent() == &MF)(static_cast <bool> (MBB->getParent() == &MF) ? void
(0) : __assert_fail ("MBB->getParent() == &MF", "/build/llvm-toolchain-snapshot-14~++20211103100639+f9e6be5cc1a2/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 277, __extension__ __PRETTY_FUNCTION__))
;
278 MI = NewMI;
279 MBB = NewMBB;
280 }
281};
282
283} // namespace llvm
284
285SIRegisterInfo::SIRegisterInfo(const GCNSubtarget &ST)
286 : AMDGPUGenRegisterInfo(AMDGPU::PC_REG, ST.getAMDGPUDwarfFlavour()), ST(ST),
287 SpillSGPRToVGPR(EnableSpillSGPRToVGPR), isWave32(ST.isWave32()) {
288
289 assert(getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() == 3 &&(static_cast <bool> (getSubRegIndexLaneMask(AMDGPU::sub0
).getAsInteger() == 3 && getSubRegIndexLaneMask(AMDGPU
::sub31).getAsInteger() == (3ULL << 62) && (getSubRegIndexLaneMask
(AMDGPU::lo16) | getSubRegIndexLaneMask(AMDGPU::hi16)).getAsInteger
() == getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() &&
"getNumCoveredRegs() will not work with generated subreg masks!"
) ? void (0) : __assert_fail ("getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() == 3 && getSubRegIndexLaneMask(AMDGPU::sub31).getAsInteger() == (3ULL << 62) && (getSubRegIndexLaneMask(AMDGPU::lo16) | getSubRegIndexLaneMask(AMDGPU::hi16)).getAsInteger() == getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() && \"getNumCoveredRegs() will not work with generated subreg masks!\""
, "/build/llvm-toolchain-snapshot-14~++20211103100639+f9e6be5cc1a2/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 294, __extension__ __PRETTY_FUNCTION__))
290 getSubRegIndexLaneMask(AMDGPU::sub31).getAsInteger() == (3ULL << 62) &&(static_cast <bool> (getSubRegIndexLaneMask(AMDGPU::sub0
).getAsInteger() == 3 && getSubRegIndexLaneMask(AMDGPU
::sub31).getAsInteger() == (3ULL << 62) && (getSubRegIndexLaneMask
(AMDGPU::lo16) | getSubRegIndexLaneMask(AMDGPU::hi16)).getAsInteger
() == getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() &&
"getNumCoveredRegs() will not work with generated subreg masks!"
) ? void (0) : __assert_fail ("getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() == 3 && getSubRegIndexLaneMask(AMDGPU::sub31).getAsInteger() == (3ULL << 62) && (getSubRegIndexLaneMask(AMDGPU::lo16) | getSubRegIndexLaneMask(AMDGPU::hi16)).getAsInteger() == getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() && \"getNumCoveredRegs() will not work with generated subreg masks!\""
, "/build/llvm-toolchain-snapshot-14~++20211103100639+f9e6be5cc1a2/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 294, __extension__ __PRETTY_FUNCTION__))
291 (getSubRegIndexLaneMask(AMDGPU::lo16) |(static_cast <bool> (getSubRegIndexLaneMask(AMDGPU::sub0
).getAsInteger() == 3 && getSubRegIndexLaneMask(AMDGPU
::sub31).getAsInteger() == (3ULL << 62) && (getSubRegIndexLaneMask
(AMDGPU::lo16) | getSubRegIndexLaneMask(AMDGPU::hi16)).getAsInteger
() == getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() &&
"getNumCoveredRegs() will not work with generated subreg masks!"
) ? void (0) : __assert_fail ("getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() == 3 && getSubRegIndexLaneMask(AMDGPU::sub31).getAsInteger() == (3ULL << 62) && (getSubRegIndexLaneMask(AMDGPU::lo16) | getSubRegIndexLaneMask(AMDGPU::hi16)).getAsInteger() == getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() && \"getNumCoveredRegs() will not work with generated subreg masks!\""
, "/build/llvm-toolchain-snapshot-14~++20211103100639+f9e6be5cc1a2/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 294, __extension__ __PRETTY_FUNCTION__))
292 getSubRegIndexLaneMask(AMDGPU::hi16)).getAsInteger() ==(static_cast <bool> (getSubRegIndexLaneMask(AMDGPU::sub0
).getAsInteger() == 3 && getSubRegIndexLaneMask(AMDGPU
::sub31).getAsInteger() == (3ULL << 62) && (getSubRegIndexLaneMask
(AMDGPU::lo16) | getSubRegIndexLaneMask(AMDGPU::hi16)).getAsInteger
() == getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() &&
"getNumCoveredRegs() will not work with generated subreg masks!"
) ? void (0) : __assert_fail ("getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() == 3 && getSubRegIndexLaneMask(AMDGPU::sub31).getAsInteger() == (3ULL << 62) && (getSubRegIndexLaneMask(AMDGPU::lo16) | getSubRegIndexLaneMask(AMDGPU::hi16)).getAsInteger() == getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() && \"getNumCoveredRegs() will not work with generated subreg masks!\""
, "/build/llvm-toolchain-snapshot-14~++20211103100639+f9e6be5cc1a2/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 294, __extension__ __PRETTY_FUNCTION__))
293 getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() &&(static_cast <bool> (getSubRegIndexLaneMask(AMDGPU::sub0
).getAsInteger() == 3 && getSubRegIndexLaneMask(AMDGPU
::sub31).getAsInteger() == (3ULL << 62) && (getSubRegIndexLaneMask
(AMDGPU::lo16) | getSubRegIndexLaneMask(AMDGPU::hi16)).getAsInteger
() == getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() &&
"getNumCoveredRegs() will not work with generated subreg masks!"
) ? void (0) : __assert_fail ("getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() == 3 && getSubRegIndexLaneMask(AMDGPU::sub31).getAsInteger() == (3ULL << 62) && (getSubRegIndexLaneMask(AMDGPU::lo16) | getSubRegIndexLaneMask(AMDGPU::hi16)).getAsInteger() == getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() && \"getNumCoveredRegs() will not work with generated subreg masks!\""
, "/build/llvm-toolchain-snapshot-14~++20211103100639+f9e6be5cc1a2/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 294, __extension__ __PRETTY_FUNCTION__))
294 "getNumCoveredRegs() will not work with generated subreg masks!")(static_cast <bool> (getSubRegIndexLaneMask(AMDGPU::sub0
).getAsInteger() == 3 && getSubRegIndexLaneMask(AMDGPU
::sub31).getAsInteger() == (3ULL << 62) && (getSubRegIndexLaneMask
(AMDGPU::lo16) | getSubRegIndexLaneMask(AMDGPU::hi16)).getAsInteger
() == getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() &&
"getNumCoveredRegs() will not work with generated subreg masks!"
) ? void (0) : __assert_fail ("getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() == 3 && getSubRegIndexLaneMask(AMDGPU::sub31).getAsInteger() == (3ULL << 62) && (getSubRegIndexLaneMask(AMDGPU::lo16) | getSubRegIndexLaneMask(AMDGPU::hi16)).getAsInteger() == getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() && \"getNumCoveredRegs() will not work with generated subreg masks!\""
, "/build/llvm-toolchain-snapshot-14~++20211103100639+f9e6be5cc1a2/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 294, __extension__ __PRETTY_FUNCTION__))
;
295
296 RegPressureIgnoredUnits.resize(getNumRegUnits());
297 RegPressureIgnoredUnits.set(
298 *MCRegUnitIterator(MCRegister::from(AMDGPU::M0), this));
299 for (auto Reg : AMDGPU::VGPR_HI16RegClass)
300 RegPressureIgnoredUnits.set(*MCRegUnitIterator(Reg, this));
301
302 // HACK: Until this is fully tablegen'd.
303 static llvm::once_flag InitializeRegSplitPartsFlag;
304
305 static auto InitializeRegSplitPartsOnce = [this]() {
306 for (unsigned Idx = 1, E = getNumSubRegIndices() - 1; Idx < E; ++Idx) {
307 unsigned Size = getSubRegIdxSize(Idx);
308 if (Size & 31)
309 continue;
310 std::vector<int16_t> &Vec = RegSplitParts[Size / 32 - 1];
311 unsigned Pos = getSubRegIdxOffset(Idx);
312 if (Pos % Size)
313 continue;
314 Pos /= Size;
315 if (Vec.empty()) {
316 unsigned MaxNumParts = 1024 / Size; // Maximum register is 1024 bits.
317 Vec.resize(MaxNumParts);
318 }
319 Vec[Pos] = Idx;
320 }
321 };
322
323 static llvm::once_flag InitializeSubRegFromChannelTableFlag;
324
325 static auto InitializeSubRegFromChannelTableOnce = [this]() {
326 for (auto &Row : SubRegFromChannelTable)
327 Row.fill(AMDGPU::NoSubRegister);
328 for (uint16_t Idx = 1; Idx < getNumSubRegIndices(); ++Idx) {
329 unsigned Width = AMDGPUSubRegIdxRanges[Idx].Size / 32;
330 unsigned Offset = AMDGPUSubRegIdxRanges[Idx].Offset / 32;
331 assert(Width < SubRegFromChannelTableWidthMap.size())(static_cast <bool> (Width < SubRegFromChannelTableWidthMap
.size()) ? void (0) : __assert_fail ("Width < SubRegFromChannelTableWidthMap.size()"
, "/build/llvm-toolchain-snapshot-14~++20211103100639+f9e6be5cc1a2/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 331, __extension__ __PRETTY_FUNCTION__))
;
332 Width = SubRegFromChannelTableWidthMap[Width];
333 if (Width == 0)
334 continue;
335 unsigned TableIdx = Width - 1;
336 assert(TableIdx < SubRegFromChannelTable.size())(static_cast <bool> (TableIdx < SubRegFromChannelTable
.size()) ? void (0) : __assert_fail ("TableIdx < SubRegFromChannelTable.size()"
, "/build/llvm-toolchain-snapshot-14~++20211103100639+f9e6be5cc1a2/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 336, __extension__ __PRETTY_FUNCTION__))
;
337 assert(Offset < SubRegFromChannelTable[TableIdx].size())(static_cast <bool> (Offset < SubRegFromChannelTable
[TableIdx].size()) ? void (0) : __assert_fail ("Offset < SubRegFromChannelTable[TableIdx].size()"
, "/build/llvm-toolchain-snapshot-14~++20211103100639+f9e6be5cc1a2/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 337, __extension__ __PRETTY_FUNCTION__))
;
338 SubRegFromChannelTable[TableIdx][Offset] = Idx;
339 }
340 };
341
342 llvm::call_once(InitializeRegSplitPartsFlag, InitializeRegSplitPartsOnce);
343 llvm::call_once(InitializeSubRegFromChannelTableFlag,
344 InitializeSubRegFromChannelTableOnce);
345}
346
347void SIRegisterInfo::reserveRegisterTuples(BitVector &Reserved,
348 MCRegister Reg) const {
349 MCRegAliasIterator R(Reg, this, true);
350
351 for (; R.isValid(); ++R)
352 Reserved.set(*R);
353}
354
355// Forced to be here by one .inc
356const MCPhysReg *SIRegisterInfo::getCalleeSavedRegs(
357 const MachineFunction *MF) const {
358 CallingConv::ID CC = MF->getFunction().getCallingConv();
359 switch (CC) {
360 case CallingConv::C:
361 case CallingConv::Fast:
362 case CallingConv::Cold:
363 case CallingConv::AMDGPU_Gfx:
364 return MF->getSubtarget<GCNSubtarget>().hasGFX90AInsts()
365 ? CSR_AMDGPU_HighRegs_With_AGPRs_SaveList
366 : CSR_AMDGPU_HighRegs_SaveList;
367 default: {
368 // Dummy to not crash RegisterClassInfo.
369 static const MCPhysReg NoCalleeSavedReg = AMDGPU::NoRegister;
370 return &NoCalleeSavedReg;
371 }
372 }
373}
374
375const MCPhysReg *
376SIRegisterInfo::getCalleeSavedRegsViaCopy(const MachineFunction *MF) const {
377 return nullptr;
378}
379
380const uint32_t *SIRegisterInfo::getCallPreservedMask(const MachineFunction &MF,
381 CallingConv::ID CC) const {
382 switch (CC) {
383 case CallingConv::C:
384 case CallingConv::Fast:
385 case CallingConv::Cold:
386 case CallingConv::AMDGPU_Gfx:
387 return MF.getSubtarget<GCNSubtarget>().hasGFX90AInsts()
388 ? CSR_AMDGPU_HighRegs_With_AGPRs_RegMask
389 : CSR_AMDGPU_HighRegs_RegMask;
390 default:
391 return nullptr;
392 }
393}
394
395const uint32_t *SIRegisterInfo::getNoPreservedMask() const {
396 return CSR_AMDGPU_NoRegs_RegMask;
397}
398
399Register SIRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
400 const SIFrameLowering *TFI =
401 MF.getSubtarget<GCNSubtarget>().getFrameLowering();
402 const SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>();
403 // During ISel lowering we always reserve the stack pointer in entry
404 // functions, but never actually want to reference it when accessing our own
405 // frame. If we need a frame pointer we use it, but otherwise we can just use
406 // an immediate "0" which we represent by returning NoRegister.
407 if (FuncInfo->isEntryFunction()) {
408 return TFI->hasFP(MF) ? FuncInfo->getFrameOffsetReg() : Register();
409 }
410 return TFI->hasFP(MF) ? FuncInfo->getFrameOffsetReg()
411 : FuncInfo->getStackPtrOffsetReg();
412}
413
414bool SIRegisterInfo::hasBasePointer(const MachineFunction &MF) const {
415 // When we need stack realignment, we can't reference off of the
416 // stack pointer, so we reserve a base pointer.
417 const MachineFrameInfo &MFI = MF.getFrameInfo();
418 return MFI.getNumFixedObjects() && shouldRealignStack(MF);
419}
420
421Register SIRegisterInfo::getBaseRegister() const { return AMDGPU::SGPR34; }
422
423const uint32_t *SIRegisterInfo::getAllVGPRRegMask() const {
424 return CSR_AMDGPU_AllVGPRs_RegMask;
425}
426
427const uint32_t *SIRegisterInfo::getAllAGPRRegMask() const {
428 return CSR_AMDGPU_AllAGPRs_RegMask;
429}
430
431const uint32_t *SIRegisterInfo::getAllVectorRegMask() const {
432 return CSR_AMDGPU_AllVectorRegs_RegMask;
433}
434
435const uint32_t *SIRegisterInfo::getAllAllocatableSRegMask() const {
436 return CSR_AMDGPU_AllAllocatableSRegs_RegMask;
437}
438
439unsigned SIRegisterInfo::getSubRegFromChannel(unsigned Channel,
440 unsigned NumRegs) {
441 assert(NumRegs < SubRegFromChannelTableWidthMap.size())(static_cast <bool> (NumRegs < SubRegFromChannelTableWidthMap
.size()) ? void (0) : __assert_fail ("NumRegs < SubRegFromChannelTableWidthMap.size()"
, "/build/llvm-toolchain-snapshot-14~++20211103100639+f9e6be5cc1a2/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 441, __extension__ __PRETTY_FUNCTION__))
;
442 unsigned NumRegIndex = SubRegFromChannelTableWidthMap[NumRegs];
443 assert(NumRegIndex && "Not implemented")(static_cast <bool> (NumRegIndex && "Not implemented"
) ? void (0) : __assert_fail ("NumRegIndex && \"Not implemented\""
, "/build/llvm-toolchain-snapshot-14~++20211103100639+f9e6be5cc1a2/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 443, __extension__ __PRETTY_FUNCTION__))
;
444 assert(Channel < SubRegFromChannelTable[NumRegIndex - 1].size())(static_cast <bool> (Channel < SubRegFromChannelTable
[NumRegIndex - 1].size()) ? void (0) : __assert_fail ("Channel < SubRegFromChannelTable[NumRegIndex - 1].size()"
, "/build/llvm-toolchain-snapshot-14~++20211103100639+f9e6be5cc1a2/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 444, __extension__ __PRETTY_FUNCTION__))
;
445 return SubRegFromChannelTable[NumRegIndex - 1][Channel];
446}
447
448MCRegister SIRegisterInfo::reservedPrivateSegmentBufferReg(
449 const MachineFunction &MF) const {
450 unsigned BaseIdx = alignDown(ST.getMaxNumSGPRs(MF), 4) - 4;
451 MCRegister BaseReg(AMDGPU::SGPR_32RegClass.getRegister(BaseIdx));
452 return getMatchingSuperReg(BaseReg, AMDGPU::sub0, &AMDGPU::SGPR_128RegClass);
453}
454
455BitVector SIRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
456 BitVector Reserved(getNumRegs());
457 Reserved.set(AMDGPU::MODE);
458
459 // EXEC_LO and EXEC_HI could be allocated and used as regular register, but
460 // this seems likely to result in bugs, so I'm marking them as reserved.
461 reserveRegisterTuples(Reserved, AMDGPU::EXEC);
462 reserveRegisterTuples(Reserved, AMDGPU::FLAT_SCR);
463
464 // M0 has to be reserved so that llvm accepts it as a live-in into a block.
465 reserveRegisterTuples(Reserved, AMDGPU::M0);
466
467 // Reserve src_vccz, src_execz, src_scc.
468 reserveRegisterTuples(Reserved, AMDGPU::SRC_VCCZ);
469 reserveRegisterTuples(Reserved, AMDGPU::SRC_EXECZ);
470 reserveRegisterTuples(Reserved, AMDGPU::SRC_SCC);
471
472 // Reserve the memory aperture registers.
473 reserveRegisterTuples(Reserved, AMDGPU::SRC_SHARED_BASE);
474 reserveRegisterTuples(Reserved, AMDGPU::SRC_SHARED_LIMIT);
475 reserveRegisterTuples(Reserved, AMDGPU::SRC_PRIVATE_BASE);
476 reserveRegisterTuples(Reserved, AMDGPU::SRC_PRIVATE_LIMIT);
477
478 // Reserve src_pops_exiting_wave_id - support is not implemented in Codegen.
479 reserveRegisterTuples(Reserved, AMDGPU::SRC_POPS_EXITING_WAVE_ID);
480
481 // Reserve xnack_mask registers - support is not implemented in Codegen.
482 reserveRegisterTuples(Reserved, AMDGPU::XNACK_MASK);
483
484 // Reserve lds_direct register - support is not implemented in Codegen.
485 reserveRegisterTuples(Reserved, AMDGPU::LDS_DIRECT);
486
487 // Reserve Trap Handler registers - support is not implemented in Codegen.
488 reserveRegisterTuples(Reserved, AMDGPU::TBA);
489 reserveRegisterTuples(Reserved, AMDGPU::TMA);
490 reserveRegisterTuples(Reserved, AMDGPU::TTMP0_TTMP1);
491 reserveRegisterTuples(Reserved, AMDGPU::TTMP2_TTMP3);
492 reserveRegisterTuples(Reserved, AMDGPU::TTMP4_TTMP5);
493 reserveRegisterTuples(Reserved, AMDGPU::TTMP6_TTMP7);
494 reserveRegisterTuples(Reserved, AMDGPU::TTMP8_TTMP9);
495 reserveRegisterTuples(Reserved, AMDGPU::TTMP10_TTMP11);
496 reserveRegisterTuples(Reserved, AMDGPU::TTMP12_TTMP13);
497 reserveRegisterTuples(Reserved, AMDGPU::TTMP14_TTMP15);
498
499 // Reserve null register - it shall never be allocated
500 reserveRegisterTuples(Reserved, AMDGPU::SGPR_NULL);
501
502 // Disallow vcc_hi allocation in wave32. It may be allocated but most likely
503 // will result in bugs.
504 if (isWave32) {
505 Reserved.set(AMDGPU::VCC);
506 Reserved.set(AMDGPU::VCC_HI);
507 }
508
509 unsigned MaxNumSGPRs = ST.getMaxNumSGPRs(MF);
510 unsigned TotalNumSGPRs = AMDGPU::SGPR_32RegClass.getNumRegs();
511 for (unsigned i = MaxNumSGPRs; i < TotalNumSGPRs; ++i) {
512 unsigned Reg = AMDGPU::SGPR_32RegClass.getRegister(i);
513 reserveRegisterTuples(Reserved, Reg);
514 }
515
516 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
517 unsigned MaxNumVGPRs = ST.getMaxNumVGPRs(MF);
518 unsigned MaxNumAGPRs = MaxNumVGPRs;
519 unsigned TotalNumVGPRs = AMDGPU::VGPR_32RegClass.getNumRegs();
520
521 if (ST.hasGFX90AInsts()) {
522 // In an entry function without calls and AGPRs used it is possible to use
523 // the whole register budget for VGPRs.
524
525 // TODO: it shall be possible to estimate maximum AGPR/VGPR pressure and
526 // split register file accordingly.
527 if (MFI->usesAGPRs(MF)) {
528 MaxNumVGPRs /= 2;
529 MaxNumAGPRs = MaxNumVGPRs;
530 } else {
531 if (MaxNumVGPRs > TotalNumVGPRs) {
532 MaxNumAGPRs = MaxNumVGPRs - TotalNumVGPRs;
533 MaxNumVGPRs = TotalNumVGPRs;
534 } else
535 MaxNumAGPRs = 0;
536 }
537 }
538
539 for (unsigned i = MaxNumVGPRs; i < TotalNumVGPRs; ++i) {
540 unsigned Reg = AMDGPU::VGPR_32RegClass.getRegister(i);
541 reserveRegisterTuples(Reserved, Reg);
542 }
543
544 for (unsigned i = MaxNumAGPRs; i < TotalNumVGPRs; ++i) {
545 unsigned Reg = AMDGPU::AGPR_32RegClass.getRegister(i);
546 reserveRegisterTuples(Reserved, Reg);
547 }
548
549 for (auto Reg : AMDGPU::SReg_32RegClass) {
550 Reserved.set(getSubReg(Reg, AMDGPU::hi16));
551 Register Low = getSubReg(Reg, AMDGPU::lo16);
552 // This is to prevent BB vcc liveness errors.
553 if (!AMDGPU::SGPR_LO16RegClass.contains(Low))
554 Reserved.set(Low);
555 }
556
557 for (auto Reg : AMDGPU::AGPR_32RegClass) {
558 Reserved.set(getSubReg(Reg, AMDGPU::hi16));
559 }
560
561 // Reserve all the rest AGPRs if there are no instructions to use it.
562 if (!ST.hasMAIInsts()) {
563 for (unsigned i = 0; i < MaxNumVGPRs; ++i) {
564 unsigned Reg = AMDGPU::AGPR_32RegClass.getRegister(i);
565 reserveRegisterTuples(Reserved, Reg);
566 }
567 }
568
569 Register ScratchRSrcReg = MFI->getScratchRSrcReg();
570 if (ScratchRSrcReg != AMDGPU::NoRegister) {
571 // Reserve 4 SGPRs for the scratch buffer resource descriptor in case we need
572 // to spill.
573 // TODO: May need to reserve a VGPR if doing LDS spilling.
574 reserveRegisterTuples(Reserved, ScratchRSrcReg);
575 }
576
577 // We have to assume the SP is needed in case there are calls in the function,
578 // which is detected after the function is lowered. If we aren't really going
579 // to need SP, don't bother reserving it.
580 MCRegister StackPtrReg = MFI->getStackPtrOffsetReg();
581
582 if (StackPtrReg) {
583 reserveRegisterTuples(Reserved, StackPtrReg);
584 assert(!isSubRegister(ScratchRSrcReg, StackPtrReg))(static_cast <bool> (!isSubRegister(ScratchRSrcReg, StackPtrReg
)) ? void (0) : __assert_fail ("!isSubRegister(ScratchRSrcReg, StackPtrReg)"
, "/build/llvm-toolchain-snapshot-14~++20211103100639+f9e6be5cc1a2/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 584, __extension__ __PRETTY_FUNCTION__))
;
585 }
586
587 MCRegister FrameReg = MFI->getFrameOffsetReg();
588 if (FrameReg) {
589 reserveRegisterTuples(Reserved, FrameReg);
590 assert(!isSubRegister(ScratchRSrcReg, FrameReg))(static_cast <bool> (!isSubRegister(ScratchRSrcReg, FrameReg
)) ? void (0) : __assert_fail ("!isSubRegister(ScratchRSrcReg, FrameReg)"
, "/build/llvm-toolchain-snapshot-14~++20211103100639+f9e6be5cc1a2/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 590, __extension__ __PRETTY_FUNCTION__))
;
591 }
592
593 if (hasBasePointer(MF)) {
594 MCRegister BasePtrReg = getBaseRegister();
595 reserveRegisterTuples(Reserved, BasePtrReg);
596 assert(!isSubRegister(ScratchRSrcReg, BasePtrReg))(static_cast <bool> (!isSubRegister(ScratchRSrcReg, BasePtrReg
)) ? void (0) : __assert_fail ("!isSubRegister(ScratchRSrcReg, BasePtrReg)"
, "/build/llvm-toolchain-snapshot-14~++20211103100639+f9e6be5cc1a2/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 596, __extension__ __PRETTY_FUNCTION__))
;
597 }
598
599 for (auto Reg : MFI->WWMReservedRegs) {
600 reserveRegisterTuples(Reserved, Reg.first);
601 }
602
603 // Reserve VGPRs used for SGPR spilling.
604 // Note we treat freezeReservedRegs unusually because we run register
605 // allocation in two phases. It's OK to re-freeze with new registers for the
606 // second run.
607#if 0
608 for (auto &SpilledFI : MFI->sgpr_spill_vgprs()) {
609 for (auto &SpilledVGPR : SpilledFI.second)
610 reserveRegisterTuples(Reserved, SpilledVGPR.VGPR);
611 }
612#endif
613
614 // FIXME: Stop using reserved registers for this.
615 for (MCPhysReg Reg : MFI->getAGPRSpillVGPRs())
616 reserveRegisterTuples(Reserved, Reg);
617
618 for (MCPhysReg Reg : MFI->getVGPRSpillAGPRs())
619 reserveRegisterTuples(Reserved, Reg);
620
621 for (auto SSpill : MFI->getSGPRSpillVGPRs())
622 reserveRegisterTuples(Reserved, SSpill.VGPR);
623
624 return Reserved;
625}
626
627bool SIRegisterInfo::shouldRealignStack(const MachineFunction &MF) const {
628 const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
629 // On entry, the base address is 0, so it can't possibly need any more
630 // alignment.
631
632 // FIXME: Should be able to specify the entry frame alignment per calling
633 // convention instead.
634 if (Info->isEntryFunction())
635 return false;
636
637 return TargetRegisterInfo::shouldRealignStack(MF);
638}
639
640bool SIRegisterInfo::requiresRegisterScavenging(const MachineFunction &Fn) const {
641 const SIMachineFunctionInfo *Info = Fn.getInfo<SIMachineFunctionInfo>();
642 if (Info->isEntryFunction()) {
643 const MachineFrameInfo &MFI = Fn.getFrameInfo();
644 return MFI.hasStackObjects() || MFI.hasCalls();
645 }
646
647 // May need scavenger for dealing with callee saved registers.
648 return true;
649}
650
651bool SIRegisterInfo::requiresFrameIndexScavenging(
652 const MachineFunction &MF) const {
653 // Do not use frame virtual registers. They used to be used for SGPRs, but
654 // once we reach PrologEpilogInserter, we can no longer spill SGPRs. If the
655 // scavenger fails, we can increment/decrement the necessary SGPRs to avoid a
656 // spill.
657 return false;
658}
659
660bool SIRegisterInfo::requiresFrameIndexReplacementScavenging(
661 const MachineFunction &MF) const {
662 const MachineFrameInfo &MFI = MF.getFrameInfo();
663 return MFI.hasStackObjects();
664}
665
666bool SIRegisterInfo::requiresVirtualBaseRegisters(
667 const MachineFunction &) const {
668 // There are no special dedicated stack or frame pointers.
669 return true;
670}
671
672int64_t SIRegisterInfo::getScratchInstrOffset(const MachineInstr *MI) const {
673 assert(SIInstrInfo::isMUBUF(*MI) || SIInstrInfo::isFLATScratch(*MI))(static_cast <bool> (SIInstrInfo::isMUBUF(*MI) || SIInstrInfo
::isFLATScratch(*MI)) ? void (0) : __assert_fail ("SIInstrInfo::isMUBUF(*MI) || SIInstrInfo::isFLATScratch(*MI)"
, "/build/llvm-toolchain-snapshot-14~++20211103100639+f9e6be5cc1a2/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 673, __extension__ __PRETTY_FUNCTION__))
;
674
675 int OffIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
676 AMDGPU::OpName::offset);
677 return MI->getOperand(OffIdx).getImm();
678}
679
680int64_t SIRegisterInfo::getFrameIndexInstrOffset(const MachineInstr *MI,
681 int Idx) const {
682 if (!SIInstrInfo::isMUBUF(*MI) && !SIInstrInfo::isFLATScratch(*MI))
683 return 0;
684
685 assert((Idx == AMDGPU::getNamedOperandIdx(MI->getOpcode(),(static_cast <bool> ((Idx == AMDGPU::getNamedOperandIdx
(MI->getOpcode(), AMDGPU::OpName::vaddr) || (Idx == AMDGPU
::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::saddr
))) && "Should never see frame index on non-address operand"
) ? void (0) : __assert_fail ("(Idx == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::vaddr) || (Idx == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::saddr))) && \"Should never see frame index on non-address operand\""
, "/build/llvm-toolchain-snapshot-14~++20211103100639+f9e6be5cc1a2/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 689, __extension__ __PRETTY_FUNCTION__))
686 AMDGPU::OpName::vaddr) ||(static_cast <bool> ((Idx == AMDGPU::getNamedOperandIdx
(MI->getOpcode(), AMDGPU::OpName::vaddr) || (Idx == AMDGPU
::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::saddr
))) && "Should never see frame index on non-address operand"
) ? void (0) : __assert_fail ("(Idx == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::vaddr) || (Idx == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::saddr))) && \"Should never see frame index on non-address operand\""
, "/build/llvm-toolchain-snapshot-14~++20211103100639+f9e6be5cc1a2/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 689, __extension__ __PRETTY_FUNCTION__))
687 (Idx == AMDGPU::getNamedOperandIdx(MI->getOpcode(),(static_cast <bool> ((Idx == AMDGPU::getNamedOperandIdx
(MI->getOpcode(), AMDGPU::OpName::vaddr) || (Idx == AMDGPU
::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::saddr
))) && "Should never see frame index on non-address operand"
) ? void (0) : __assert_fail ("(Idx == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::vaddr) || (Idx == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::saddr))) && \"Should never see frame index on non-address operand\""
, "/build/llvm-toolchain-snapshot-14~++20211103100639+f9e6be5cc1a2/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 689, __extension__ __PRETTY_FUNCTION__))
688 AMDGPU::OpName::saddr))) &&(static_cast <bool> ((Idx == AMDGPU::getNamedOperandIdx
(MI->getOpcode(), AMDGPU::OpName::vaddr) || (Idx == AMDGPU
::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::saddr
))) && "Should never see frame index on non-address operand"
) ? void (0) : __assert_fail ("(Idx == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::vaddr) || (Idx == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::saddr))) && \"Should never see frame index on non-address operand\""
, "/build/llvm-toolchain-snapshot-14~++20211103100639+f9e6be5cc1a2/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 689, __extension__ __PRETTY_FUNCTION__))
689 "Should never see frame index on non-address operand")(static_cast <bool> ((Idx == AMDGPU::getNamedOperandIdx
(MI->getOpcode(), AMDGPU::OpName::vaddr) || (Idx == AMDGPU
::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::saddr
))) && "Should never see frame index on non-address operand"
) ? void (0) : __assert_fail ("(Idx == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::vaddr) || (Idx == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::saddr))) && \"Should never see frame index on non-address operand\""
, "/build/llvm-toolchain-snapshot-14~++20211103100639+f9e6be5cc1a2/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 689, __extension__ __PRETTY_FUNCTION__))
;
690
691 return getScratchInstrOffset(MI);
692}
693
694bool SIRegisterInfo::needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const {
695 if (!SIInstrInfo::isMUBUF(*MI) && !SIInstrInfo::isFLATScratch(*MI))
696 return false;
697
698 int64_t FullOffset = Offset + getScratchInstrOffset(MI);
699
700 if (SIInstrInfo::isMUBUF(*MI))
701 return !SIInstrInfo::isLegalMUBUFImmOffset(FullOffset);
702
703 const SIInstrInfo *TII = ST.getInstrInfo();
704 return !TII->isLegalFLATOffset(FullOffset, AMDGPUAS::PRIVATE_ADDRESS,
705 SIInstrFlags::FlatScratch);
706}
707
708Register SIRegisterInfo::materializeFrameBaseRegister(MachineBasicBlock *MBB,
709 int FrameIdx,
710 int64_t Offset) const {
711 MachineBasicBlock::iterator Ins = MBB->begin();
712 DebugLoc DL; // Defaults to "unknown"
713
714 if (Ins != MBB->end())
715 DL = Ins->getDebugLoc();
716
717 MachineFunction *MF = MBB->getParent();
718 const SIInstrInfo *TII = ST.getInstrInfo();
719 MachineRegisterInfo &MRI = MF->getRegInfo();
720 unsigned MovOpc = ST.enableFlatScratch() ? AMDGPU::S_MOV_B32
721 : AMDGPU::V_MOV_B32_e32;
722
723 Register BaseReg = MRI.createVirtualRegister(
724 ST.enableFlatScratch() ? &AMDGPU::SReg_32_XEXEC_HIRegClass
725 : &AMDGPU::VGPR_32RegClass);
726
727 if (Offset == 0) {
728 BuildMI(*MBB, Ins, DL, TII->get(MovOpc), BaseReg)
729 .addFrameIndex(FrameIdx);
730 return BaseReg;
731 }
732
733 Register OffsetReg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
734
735 Register FIReg = MRI.createVirtualRegister(
736 ST.enableFlatScratch() ? &AMDGPU::SReg_32_XM0RegClass
737 : &AMDGPU::VGPR_32RegClass);
738
739 BuildMI(*MBB, Ins, DL, TII->get(AMDGPU::S_MOV_B32), OffsetReg)
740 .addImm(Offset);
741 BuildMI(*MBB, Ins, DL, TII->get(MovOpc), FIReg)
742 .addFrameIndex(FrameIdx);
743
744 if (ST.enableFlatScratch() ) {
745 BuildMI(*MBB, Ins, DL, TII->get(AMDGPU::S_ADD_I32), BaseReg)
746 .addReg(OffsetReg, RegState::Kill)
747 .addReg(FIReg);
748 return BaseReg;
749 }
750
751 TII->getAddNoCarry(*MBB, Ins, DL, BaseReg)
752 .addReg(OffsetReg, RegState::Kill)
753 .addReg(FIReg)
754 .addImm(0); // clamp bit
755
756 return BaseReg;
757}
758
759void SIRegisterInfo::resolveFrameIndex(MachineInstr &MI, Register BaseReg,
760 int64_t Offset) const {
761 const SIInstrInfo *TII = ST.getInstrInfo();
762 bool IsFlat = TII->isFLATScratch(MI);
763
764#ifndef NDEBUG
765 // FIXME: Is it possible to be storing a frame index to itself?
766 bool SeenFI = false;
767 for (const MachineOperand &MO: MI.operands()) {
768 if (MO.isFI()) {
769 if (SeenFI)
770 llvm_unreachable("should not see multiple frame indices")::llvm::llvm_unreachable_internal("should not see multiple frame indices"
, "/build/llvm-toolchain-snapshot-14~++20211103100639+f9e6be5cc1a2/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 770)
;
771
772 SeenFI = true;
773 }
774 }
775#endif
776
777 MachineOperand *FIOp =
778 TII->getNamedOperand(MI, IsFlat ? AMDGPU::OpName::saddr
779 : AMDGPU::OpName::vaddr);
780
781 MachineOperand *OffsetOp = TII->getNamedOperand(MI, AMDGPU::OpName::offset);
782 int64_t NewOffset = OffsetOp->getImm() + Offset;
783
784 assert(FIOp && FIOp->isFI() && "frame index must be address operand")(static_cast <bool> (FIOp && FIOp->isFI() &&
"frame index must be address operand") ? void (0) : __assert_fail
("FIOp && FIOp->isFI() && \"frame index must be address operand\""
, "/build/llvm-toolchain-snapshot-14~++20211103100639+f9e6be5cc1a2/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 784, __extension__ __PRETTY_FUNCTION__))
;
785 assert(TII->isMUBUF(MI) || TII->isFLATScratch(MI))(static_cast <bool> (TII->isMUBUF(MI) || TII->isFLATScratch
(MI)) ? void (0) : __assert_fail ("TII->isMUBUF(MI) || TII->isFLATScratch(MI)"
, "/build/llvm-toolchain-snapshot-14~++20211103100639+f9e6be5cc1a2/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 785, __extension__ __PRETTY_FUNCTION__))
;
786
787 if (IsFlat) {
788 assert(TII->isLegalFLATOffset(NewOffset, AMDGPUAS::PRIVATE_ADDRESS,(static_cast <bool> (TII->isLegalFLATOffset(NewOffset
, AMDGPUAS::PRIVATE_ADDRESS, SIInstrFlags::FlatScratch) &&
"offset should be legal") ? void (0) : __assert_fail ("TII->isLegalFLATOffset(NewOffset, AMDGPUAS::PRIVATE_ADDRESS, SIInstrFlags::FlatScratch) && \"offset should be legal\""
, "/build/llvm-toolchain-snapshot-14~++20211103100639+f9e6be5cc1a2/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 790, __extension__ __PRETTY_FUNCTION__))
789 SIInstrFlags::FlatScratch) &&(static_cast <bool> (TII->isLegalFLATOffset(NewOffset
, AMDGPUAS::PRIVATE_ADDRESS, SIInstrFlags::FlatScratch) &&
"offset should be legal") ? void (0) : __assert_fail ("TII->isLegalFLATOffset(NewOffset, AMDGPUAS::PRIVATE_ADDRESS, SIInstrFlags::FlatScratch) && \"offset should be legal\""
, "/build/llvm-toolchain-snapshot-14~++20211103100639+f9e6be5cc1a2/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 790, __extension__ __PRETTY_FUNCTION__))
790 "offset should be legal")(static_cast <bool> (TII->isLegalFLATOffset(NewOffset
, AMDGPUAS::PRIVATE_ADDRESS, SIInstrFlags::FlatScratch) &&
"offset should be legal") ? void (0) : __assert_fail ("TII->isLegalFLATOffset(NewOffset, AMDGPUAS::PRIVATE_ADDRESS, SIInstrFlags::FlatScratch) && \"offset should be legal\""
, "/build/llvm-toolchain-snapshot-14~++20211103100639+f9e6be5cc1a2/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 790, __extension__ __PRETTY_FUNCTION__))
;
791 FIOp->ChangeToRegister(BaseReg, false);
792 OffsetOp->setImm(NewOffset);
793 return;
794 }
795
796#ifndef NDEBUG
797 MachineOperand *SOffset = TII->getNamedOperand(MI, AMDGPU::OpName::soffset);
798 assert(SOffset->isImm() && SOffset->getImm() == 0)(static_cast <bool> (SOffset->isImm() && SOffset
->getImm() == 0) ? void (0) : __assert_fail ("SOffset->isImm() && SOffset->getImm() == 0"
, "/build/llvm-toolchain-snapshot-14~++20211103100639+f9e6be5cc1a2/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 798, __extension__ __PRETTY_FUNCTION__))
;
799#endif
800
801 assert(SIInstrInfo::isLegalMUBUFImmOffset(NewOffset) &&(static_cast <bool> (SIInstrInfo::isLegalMUBUFImmOffset
(NewOffset) && "offset should be legal") ? void (0) :
__assert_fail ("SIInstrInfo::isLegalMUBUFImmOffset(NewOffset) && \"offset should be legal\""
, "/build/llvm-toolchain-snapshot-14~++20211103100639+f9e6be5cc1a2/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 802, __extension__ __PRETTY_FUNCTION__))
802 "offset should be legal")(static_cast <bool> (SIInstrInfo::isLegalMUBUFImmOffset
(NewOffset) && "offset should be legal") ? void (0) :
__assert_fail ("SIInstrInfo::isLegalMUBUFImmOffset(NewOffset) && \"offset should be legal\""
, "/build/llvm-toolchain-snapshot-14~++20211103100639+f9e6be5cc1a2/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 802, __extension__ __PRETTY_FUNCTION__))
;
803
804 FIOp->ChangeToRegister(BaseReg, false);
805 OffsetOp->setImm(NewOffset);
806}
807
808bool SIRegisterInfo::isFrameOffsetLegal(const MachineInstr *MI,
809 Register BaseReg,
810 int64_t Offset) const {
811 if (!SIInstrInfo::isMUBUF(*MI) && !SIInstrInfo::isFLATScratch(*MI))
812 return false;
813
814 int64_t NewOffset = Offset + getScratchInstrOffset(MI);
815
816 if (SIInstrInfo::isMUBUF(*MI))
817 return SIInstrInfo::isLegalMUBUFImmOffset(NewOffset);
818
819 const SIInstrInfo *TII = ST.getInstrInfo();
820 return TII->isLegalFLATOffset(NewOffset, AMDGPUAS::PRIVATE_ADDRESS,
821 SIInstrFlags::FlatScratch);
822}
823
824const TargetRegisterClass *SIRegisterInfo::getPointerRegClass(
825 const MachineFunction &MF, unsigned Kind) const {
826 // This is inaccurate. It depends on the instruction and address space. The
827 // only place where we should hit this is for dealing with frame indexes /
828 // private accesses, so this is correct in that case.
829 return &AMDGPU::VGPR_32RegClass;
830}
831
832const TargetRegisterClass *
833SIRegisterInfo::getCrossCopyRegClass(const TargetRegisterClass *RC) const {
834 if (isAGPRClass(RC) && !ST.hasGFX90AInsts())
835 return getEquivalentVGPRClass(RC);
836
837 return RC;
838}
839
840static unsigned getNumSubRegsForSpillOp(unsigned Op) {
841
842 switch (Op) {
843 case AMDGPU::SI_SPILL_S1024_SAVE:
844 case AMDGPU::SI_SPILL_S1024_RESTORE:
845 case AMDGPU::SI_SPILL_V1024_SAVE:
846 case AMDGPU::SI_SPILL_V1024_RESTORE:
847 case AMDGPU::SI_SPILL_A1024_SAVE:
848 case AMDGPU::SI_SPILL_A1024_RESTORE:
849 return 32;
850 case AMDGPU::SI_SPILL_S512_SAVE:
851 case AMDGPU::SI_SPILL_S512_RESTORE:
852 case AMDGPU::SI_SPILL_V512_SAVE:
853 case AMDGPU::SI_SPILL_V512_RESTORE:
854 case AMDGPU::SI_SPILL_A512_SAVE:
855 case AMDGPU::SI_SPILL_A512_RESTORE:
856 return 16;
857 case AMDGPU::SI_SPILL_S256_SAVE:
858 case AMDGPU::SI_SPILL_S256_RESTORE:
859 case AMDGPU::SI_SPILL_V256_SAVE:
860 case AMDGPU::SI_SPILL_V256_RESTORE:
861 case AMDGPU::SI_SPILL_A256_SAVE:
862 case AMDGPU::SI_SPILL_A256_RESTORE:
863 return 8;
864 case AMDGPU::SI_SPILL_S224_SAVE:
865 case AMDGPU::SI_SPILL_S224_RESTORE:
866 case AMDGPU::SI_SPILL_V224_SAVE:
867 case AMDGPU::SI_SPILL_V224_RESTORE:
868 case AMDGPU::SI_SPILL_A224_SAVE:
869 case AMDGPU::SI_SPILL_A224_RESTORE:
870 return 7;
871 case AMDGPU::SI_SPILL_S192_SAVE:
872 case AMDGPU::SI_SPILL_S192_RESTORE:
873 case AMDGPU::SI_SPILL_V192_SAVE:
874 case AMDGPU::SI_SPILL_V192_RESTORE:
875 case AMDGPU::SI_SPILL_A192_SAVE:
876 case AMDGPU::SI_SPILL_A192_RESTORE:
877 return 6;
878 case AMDGPU::SI_SPILL_S160_SAVE:
879 case AMDGPU::SI_SPILL_S160_RESTORE:
880 case AMDGPU::SI_SPILL_V160_SAVE:
881 case AMDGPU::SI_SPILL_V160_RESTORE:
882 case AMDGPU::SI_SPILL_A160_SAVE:
883 case AMDGPU::SI_SPILL_A160_RESTORE:
884 return 5;
885 case AMDGPU::SI_SPILL_S128_SAVE:
886 case AMDGPU::SI_SPILL_S128_RESTORE:
887 case AMDGPU::SI_SPILL_V128_SAVE:
888 case AMDGPU::SI_SPILL_V128_RESTORE:
889 case AMDGPU::SI_SPILL_A128_SAVE:
890 case AMDGPU::SI_SPILL_A128_RESTORE:
891 return 4;
892 case AMDGPU::SI_SPILL_S96_SAVE:
893 case AMDGPU::SI_SPILL_S96_RESTORE:
894 case AMDGPU::SI_SPILL_V96_SAVE:
895 case AMDGPU::SI_SPILL_V96_RESTORE:
896 case AMDGPU::SI_SPILL_A96_SAVE:
897 case AMDGPU::SI_SPILL_A96_RESTORE:
898 return 3;
899 case AMDGPU::SI_SPILL_S64_SAVE:
900 case AMDGPU::SI_SPILL_S64_RESTORE:
901 case AMDGPU::SI_SPILL_V64_SAVE:
902 case AMDGPU::SI_SPILL_V64_RESTORE:
903 case AMDGPU::SI_SPILL_A64_SAVE:
904 case AMDGPU::SI_SPILL_A64_RESTORE:
905 return 2;
906 case AMDGPU::SI_SPILL_S32_SAVE:
907 case AMDGPU::SI_SPILL_S32_RESTORE:
908 case AMDGPU::SI_SPILL_V32_SAVE:
909 case AMDGPU::SI_SPILL_V32_RESTORE:
910 case AMDGPU::SI_SPILL_A32_SAVE:
911 case AMDGPU::SI_SPILL_A32_RESTORE:
912 return 1;
913 default: llvm_unreachable("Invalid spill opcode")::llvm::llvm_unreachable_internal("Invalid spill opcode", "/build/llvm-toolchain-snapshot-14~++20211103100639+f9e6be5cc1a2/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 913)
;
914 }
915}
916
917static int getOffsetMUBUFStore(unsigned Opc) {
918 switch (Opc) {
919 case AMDGPU::BUFFER_STORE_DWORD_OFFEN:
920 return AMDGPU::BUFFER_STORE_DWORD_OFFSET;
921 case AMDGPU::BUFFER_STORE_BYTE_OFFEN:
922 return AMDGPU::BUFFER_STORE_BYTE_OFFSET;
923 case AMDGPU::BUFFER_STORE_SHORT_OFFEN:
924 return AMDGPU::BUFFER_STORE_SHORT_OFFSET;
925 case AMDGPU::BUFFER_STORE_DWORDX2_OFFEN:
926 return AMDGPU::BUFFER_STORE_DWORDX2_OFFSET;
927 case AMDGPU::BUFFER_STORE_DWORDX4_OFFEN:
928 return AMDGPU::BUFFER_STORE_DWORDX4_OFFSET;
929 case AMDGPU::BUFFER_STORE_SHORT_D16_HI_OFFEN:
930 return AMDGPU::BUFFER_STORE_SHORT_D16_HI_OFFSET;
931 case AMDGPU::BUFFER_STORE_BYTE_D16_HI_OFFEN:
932 return AMDGPU::BUFFER_STORE_BYTE_D16_HI_OFFSET;
933 default:
934 return -1;
935 }
936}
937
938static int getOffsetMUBUFLoad(unsigned Opc) {
939 switch (Opc) {
940 case AMDGPU::BUFFER_LOAD_DWORD_OFFEN:
941 return AMDGPU::BUFFER_LOAD_DWORD_OFFSET;
942 case AMDGPU::BUFFER_LOAD_UBYTE_OFFEN:
943 return AMDGPU::BUFFER_LOAD_UBYTE_OFFSET;
944 case AMDGPU::BUFFER_LOAD_SBYTE_OFFEN:
945 return AMDGPU::BUFFER_LOAD_SBYTE_OFFSET;
946 case AMDGPU::BUFFER_LOAD_USHORT_OFFEN:
947 return AMDGPU::BUFFER_LOAD_USHORT_OFFSET;
948 case AMDGPU::BUFFER_LOAD_SSHORT_OFFEN:
949 return AMDGPU::BUFFER_LOAD_SSHORT_OFFSET;
950 case AMDGPU::BUFFER_LOAD_DWORDX2_OFFEN:
951 return AMDGPU::BUFFER_LOAD_DWORDX2_OFFSET;
952 case AMDGPU::BUFFER_LOAD_DWORDX4_OFFEN:
953 return AMDGPU::BUFFER_LOAD_DWORDX4_OFFSET;
954 case AMDGPU::BUFFER_LOAD_UBYTE_D16_OFFEN:
955 return AMDGPU::BUFFER_LOAD_UBYTE_D16_OFFSET;
956 case AMDGPU::BUFFER_LOAD_UBYTE_D16_HI_OFFEN:
957 return AMDGPU::BUFFER_LOAD_UBYTE_D16_HI_OFFSET;
958 case AMDGPU::BUFFER_LOAD_SBYTE_D16_OFFEN:
959 return AMDGPU::BUFFER_LOAD_SBYTE_D16_OFFSET;
960 case AMDGPU::BUFFER_LOAD_SBYTE_D16_HI_OFFEN:
961 return AMDGPU::BUFFER_LOAD_SBYTE_D16_HI_OFFSET;
962 case AMDGPU::BUFFER_LOAD_SHORT_D16_OFFEN:
963 return AMDGPU::BUFFER_LOAD_SHORT_D16_OFFSET;
964 case AMDGPU::BUFFER_LOAD_SHORT_D16_HI_OFFEN:
965 return AMDGPU::BUFFER_LOAD_SHORT_D16_HI_OFFSET;
966 default:
967 return -1;
968 }
969}
970
971static MachineInstrBuilder spillVGPRtoAGPR(const GCNSubtarget &ST,
972 MachineBasicBlock &MBB,
973 MachineBasicBlock::iterator MI,
974 int Index, unsigned Lane,
975 unsigned ValueReg, bool IsKill) {
976 MachineFunction *MF = MBB.getParent();
977 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
978 const SIInstrInfo *TII = ST.getInstrInfo();
979
980 MCPhysReg Reg = MFI->getVGPRToAGPRSpill(Index, Lane);
981
982 if (Reg == AMDGPU::NoRegister)
983 return MachineInstrBuilder();
984
985 bool IsStore = MI->mayStore();
986 MachineRegisterInfo &MRI = MF->getRegInfo();
987 auto *TRI = static_cast<const SIRegisterInfo*>(MRI.getTargetRegisterInfo());
988
989 unsigned Dst = IsStore ? Reg : ValueReg;
990 unsigned Src = IsStore ? ValueReg : Reg;
991 unsigned Opc = (IsStore ^ TRI->isVGPR(MRI, Reg)) ? AMDGPU::V_ACCVGPR_WRITE_B32_e64
992 : AMDGPU::V_ACCVGPR_READ_B32_e64;
993
994 auto MIB = BuildMI(MBB, MI, MI->getDebugLoc(), TII->get(Opc), Dst)
995 .addReg(Src, getKillRegState(IsKill));
996 MIB->setAsmPrinterFlag(MachineInstr::ReloadReuse);
997 return MIB;
998}
999
1000// This differs from buildSpillLoadStore by only scavenging a VGPR. It does not
1001// need to handle the case where an SGPR may need to be spilled while spilling.
1002static bool buildMUBUFOffsetLoadStore(const GCNSubtarget &ST,
1003 MachineFrameInfo &MFI,
1004 MachineBasicBlock::iterator MI,
1005 int Index,
1006 int64_t Offset) {
1007 const SIInstrInfo *TII = ST.getInstrInfo();
1008 MachineBasicBlock *MBB = MI->getParent();
1009 const DebugLoc &DL = MI->getDebugLoc();
1010 bool IsStore = MI->mayStore();
1011
1012 unsigned Opc = MI->getOpcode();
1013 int LoadStoreOp = IsStore ?
1014 getOffsetMUBUFStore(Opc) : getOffsetMUBUFLoad(Opc);
1015 if (LoadStoreOp == -1)
1016 return false;
1017
1018 const MachineOperand *Reg = TII->getNamedOperand(*MI, AMDGPU::OpName::vdata);
1019 if (spillVGPRtoAGPR(ST, *MBB, MI, Index, 0, Reg->getReg(), false).getInstr())
1020 return true;
1021
1022 MachineInstrBuilder NewMI =
1023 BuildMI(*MBB, MI, DL, TII->get(LoadStoreOp))
1024 .add(*Reg)
1025 .add(*TII->getNamedOperand(*MI, AMDGPU::OpName::srsrc))
1026 .add(*TII->getNamedOperand(*MI, AMDGPU::OpName::soffset))
1027 .addImm(Offset)
1028 .addImm(0) // cpol
1029 .addImm(0) // tfe
1030 .addImm(0) // swz
1031 .cloneMemRefs(*MI);
1032
1033 const MachineOperand *VDataIn = TII->getNamedOperand(*MI,
1034 AMDGPU::OpName::vdata_in);
1035 if (VDataIn)
1036 NewMI.add(*VDataIn);
1037 return true;
1038}
1039
1040static unsigned getFlatScratchSpillOpcode(const SIInstrInfo *TII,
1041 unsigned LoadStoreOp,
1042 unsigned EltSize) {
1043 bool IsStore = TII->get(LoadStoreOp).mayStore();
1044 bool UseST =
1045 AMDGPU::getNamedOperandIdx(LoadStoreOp, AMDGPU::OpName::vaddr) < 0 &&
1046 AMDGPU::getNamedOperandIdx(LoadStoreOp, AMDGPU::OpName::saddr) < 0;
1047
1048 switch (EltSize) {
1049 case 4:
1050 LoadStoreOp = IsStore ? AMDGPU::SCRATCH_STORE_DWORD_SADDR
1051 : AMDGPU::SCRATCH_LOAD_DWORD_SADDR;
1052 break;
1053 case 8:
1054 LoadStoreOp = IsStore ? AMDGPU::SCRATCH_STORE_DWORDX2_SADDR
1055 : AMDGPU::SCRATCH_LOAD_DWORDX2_SADDR;
1056 break;
1057 case 12:
1058 LoadStoreOp = IsStore ? AMDGPU::SCRATCH_STORE_DWORDX3_SADDR
1059 : AMDGPU::SCRATCH_LOAD_DWORDX3_SADDR;
1060 break;
1061 case 16:
1062 LoadStoreOp = IsStore ? AMDGPU::SCRATCH_STORE_DWORDX4_SADDR
1063 : AMDGPU::SCRATCH_LOAD_DWORDX4_SADDR;
1064 break;
1065 default:
1066 llvm_unreachable("Unexpected spill load/store size!")::llvm::llvm_unreachable_internal("Unexpected spill load/store size!"
, "/build/llvm-toolchain-snapshot-14~++20211103100639+f9e6be5cc1a2/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 1066)
;
1067 }
1068
1069 if (UseST)
1070 LoadStoreOp = AMDGPU::getFlatScratchInstSTfromSS(LoadStoreOp);
1071
1072 return LoadStoreOp;
1073}
1074
1075void SIRegisterInfo::buildSpillLoadStore(
1076 MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
1077 unsigned LoadStoreOp, int Index, Register ValueReg, bool IsKill,
1078 MCRegister ScratchOffsetReg, int64_t InstOffset, MachineMemOperand *MMO,
1079 RegScavenger *RS, LivePhysRegs *LiveRegs) const {
1080 assert((!RS || !LiveRegs) && "Only RS or LiveRegs can be set but not both")(static_cast <bool> ((!RS || !LiveRegs) && "Only RS or LiveRegs can be set but not both"
) ? void (0) : __assert_fail ("(!RS || !LiveRegs) && \"Only RS or LiveRegs can be set but not both\""
, "/build/llvm-toolchain-snapshot-14~++20211103100639+f9e6be5cc1a2/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 1080, __extension__ __PRETTY_FUNCTION__))
;
1081
1082 MachineFunction *MF = MBB.getParent();
1083 const SIInstrInfo *TII = ST.getInstrInfo();
1084 const MachineFrameInfo &MFI = MF->getFrameInfo();
1085 const SIMachineFunctionInfo *FuncInfo = MF->getInfo<SIMachineFunctionInfo>();
1086
1087 const MCInstrDesc *Desc = &TII->get(LoadStoreOp);
1088 const DebugLoc &DL = MI != MBB.end() ? MI->getDebugLoc() : DebugLoc();
1089 bool IsStore = Desc->mayStore();
1090 bool IsFlat = TII->isFLATScratch(LoadStoreOp);
1091
1092 bool Scavenged = false;
1093 MCRegister SOffset = ScratchOffsetReg;
1094
1095 const TargetRegisterClass *RC = getRegClassForReg(MF->getRegInfo(), ValueReg);
1096 // On gfx90a+ AGPR is a regular VGPR acceptable for loads and stores.
1097 const bool IsAGPR = !ST.hasGFX90AInsts() && hasAGPRs(RC);
1098 const unsigned RegWidth = AMDGPU::getRegBitWidth(RC->getID()) / 8;
1099
1100 // Always use 4 byte operations for AGPRs because we need to scavenge
1101 // a temporary VGPR.
1102 unsigned EltSize = (IsFlat && !IsAGPR) ? std::min(RegWidth, 16u) : 4u;
1103 unsigned NumSubRegs = RegWidth / EltSize;
1104 unsigned Size = NumSubRegs * EltSize;
1105 unsigned RemSize = RegWidth - Size;
1106 unsigned NumRemSubRegs = RemSize ? 1 : 0;
1107 int64_t Offset = InstOffset + MFI.getObjectOffset(Index);
1108 int64_t MaxOffset = Offset + Size + RemSize - EltSize;
1109 int64_t ScratchOffsetRegDelta = 0;
1110
1111 if (IsFlat && EltSize > 4) {
1112 LoadStoreOp = getFlatScratchSpillOpcode(TII, LoadStoreOp, EltSize);
1113 Desc = &TII->get(LoadStoreOp);
1114 }
1115
1116 Align Alignment = MFI.getObjectAlign(Index);
1117 const MachinePointerInfo &BasePtrInfo = MMO->getPointerInfo();
1118
1119 assert((IsFlat || ((Offset % EltSize) == 0)) &&(static_cast <bool> ((IsFlat || ((Offset % EltSize) == 0
)) && "unexpected VGPR spill offset") ? void (0) : __assert_fail
("(IsFlat || ((Offset % EltSize) == 0)) && \"unexpected VGPR spill offset\""
, "/build/llvm-toolchain-snapshot-14~++20211103100639+f9e6be5cc1a2/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 1120, __extension__ __PRETTY_FUNCTION__))
1120 "unexpected VGPR spill offset")(static_cast <bool> ((IsFlat || ((Offset % EltSize) == 0
)) && "unexpected VGPR spill offset") ? void (0) : __assert_fail
("(IsFlat || ((Offset % EltSize) == 0)) && \"unexpected VGPR spill offset\""
, "/build/llvm-toolchain-snapshot-14~++20211103100639+f9e6be5cc1a2/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 1120, __extension__ __PRETTY_FUNCTION__))
;
1121
1122 bool IsOffsetLegal =
1123 IsFlat ? TII->isLegalFLATOffset(MaxOffset, AMDGPUAS::PRIVATE_ADDRESS,
1124 SIInstrFlags::FlatScratch)
1125 : SIInstrInfo::isLegalMUBUFImmOffset(MaxOffset);
1126 if (!IsOffsetLegal || (IsFlat && !SOffset && !ST.hasFlatScratchSTMode())) {
1127 SOffset = MCRegister();
1128
1129 // We currently only support spilling VGPRs to EltSize boundaries, meaning
1130 // we can simplify the adjustment of Offset here to just scale with
1131 // WavefrontSize.
1132 if (!IsFlat)
1133 Offset *= ST.getWavefrontSize();
1134
1135 // We don't have access to the register scavenger if this function is called
1136 // during PEI::scavengeFrameVirtualRegs() so use LiveRegs in this case.
1137 if (RS) {
1138 SOffset = RS->scavengeRegister(&AMDGPU::SGPR_32RegClass, MI, 0, false);
1139 } else if (LiveRegs) {
1140 for (MCRegister Reg : AMDGPU::SGPR_32RegClass) {
1141 if (LiveRegs->available(MF->getRegInfo(), Reg)) {
1142 SOffset = Reg;
1143 break;
1144 }
1145 }
1146 }
1147
1148 if (!SOffset) {
1149 // There are no free SGPRs, and since we are in the process of spilling
1150 // VGPRs too. Since we need a VGPR in order to spill SGPRs (this is true
1151 // on SI/CI and on VI it is true until we implement spilling using scalar
1152 // stores), we have no way to free up an SGPR. Our solution here is to
1153 // add the offset directly to the ScratchOffset or StackPtrOffset
1154 // register, and then subtract the offset after the spill to return the
1155 // register to it's original value.
1156 if (!ScratchOffsetReg)
1157 ScratchOffsetReg = FuncInfo->getStackPtrOffsetReg();
1158 SOffset = ScratchOffsetReg;
1159 ScratchOffsetRegDelta = Offset;
1160 } else {
1161 Scavenged = true;
1162 }
1163
1164 if (!SOffset)
1165 report_fatal_error("could not scavenge SGPR to spill in entry function");
1166
1167 if (ScratchOffsetReg == AMDGPU::NoRegister) {
1168 BuildMI(MBB, MI, DL, TII->get(AMDGPU::S_MOV_B32), SOffset).addImm(Offset);
1169 } else {
1170 BuildMI(MBB, MI, DL, TII->get(AMDGPU::S_ADD_I32), SOffset)
1171 .addReg(ScratchOffsetReg)
1172 .addImm(Offset);
1173 }
1174
1175 Offset = 0;
1176 }
1177
1178 if (IsFlat && SOffset == AMDGPU::NoRegister) {
1179 assert(AMDGPU::getNamedOperandIdx(LoadStoreOp, AMDGPU::OpName::vaddr) < 0(static_cast <bool> (AMDGPU::getNamedOperandIdx(LoadStoreOp
, AMDGPU::OpName::vaddr) < 0 && "Unexpected vaddr for flat scratch with a FI operand"
) ? void (0) : __assert_fail ("AMDGPU::getNamedOperandIdx(LoadStoreOp, AMDGPU::OpName::vaddr) < 0 && \"Unexpected vaddr for flat scratch with a FI operand\""
, "/build/llvm-toolchain-snapshot-14~++20211103100639+f9e6be5cc1a2/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 1180, __extension__ __PRETTY_FUNCTION__))
1180 && "Unexpected vaddr for flat scratch with a FI operand")(static_cast <bool> (AMDGPU::getNamedOperandIdx(LoadStoreOp
, AMDGPU::OpName::vaddr) < 0 && "Unexpected vaddr for flat scratch with a FI operand"
) ? void (0) : __assert_fail ("AMDGPU::getNamedOperandIdx(LoadStoreOp, AMDGPU::OpName::vaddr) < 0 && \"Unexpected vaddr for flat scratch with a FI operand\""
, "/build/llvm-toolchain-snapshot-14~++20211103100639+f9e6be5cc1a2/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 1180, __extension__ __PRETTY_FUNCTION__))
;
1181
1182 assert(ST.hasFlatScratchSTMode())(static_cast <bool> (ST.hasFlatScratchSTMode()) ? void (
0) : __assert_fail ("ST.hasFlatScratchSTMode()", "/build/llvm-toolchain-snapshot-14~++20211103100639+f9e6be5cc1a2/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 1182, __extension__ __PRETTY_FUNCTION__))
;
1183 LoadStoreOp = AMDGPU::getFlatScratchInstSTfromSS(LoadStoreOp);
1184 Desc = &TII->get(LoadStoreOp);
Value stored to 'Desc' is never read
1185 }
1186
1187 Register TmpReg;
1188
1189 for (unsigned i = 0, e = NumSubRegs + NumRemSubRegs, RegOffset = 0; i != e;
1190 ++i, RegOffset += EltSize) {
1191 if (i == NumSubRegs) {
1192 EltSize = RemSize;
1193 LoadStoreOp = getFlatScratchSpillOpcode(TII, LoadStoreOp, EltSize);
1194 }
1195 Desc = &TII->get(LoadStoreOp);
1196
1197 unsigned NumRegs = EltSize / 4;
1198 Register SubReg = e == 1
1199 ? ValueReg
1200 : Register(getSubReg(ValueReg,
1201 getSubRegFromChannel(RegOffset / 4, NumRegs)));
1202
1203 unsigned SOffsetRegState = 0;
1204 unsigned SrcDstRegState = getDefRegState(!IsStore);
1205 if (i + 1 == e) {
1206 SOffsetRegState |= getKillRegState(Scavenged);
1207 // The last implicit use carries the "Kill" flag.
1208 SrcDstRegState |= getKillRegState(IsKill);
1209 }
1210
1211 // Make sure the whole register is defined if there are undef components by
1212 // adding an implicit def of the super-reg on the first instruction.
1213 bool NeedSuperRegDef = e > 1 && IsStore && i == 0;
1214 bool NeedSuperRegImpOperand = e > 1;
1215
1216 // Remaining element size to spill into memory after some parts of it
1217 // spilled into either AGPRs or VGPRs.
1218 unsigned RemEltSize = EltSize;
1219
1220 // AGPRs to spill VGPRs and vice versa are allocated in a reverse order,
1221 // starting from the last lane. In case if a register cannot be completely
1222 // spilled into another register that will ensure its alignment does not
1223 // change. For targets with VGPR alignment requirement this is important
1224 // in case of flat scratch usage as we might get a scratch_load or
1225 // scratch_store of an unaligned register otherwise.
1226 for (int LaneS = (RegOffset + EltSize) / 4 - 1, Lane = LaneS,
1227 LaneE = RegOffset / 4;
1228 Lane >= LaneE; --Lane) {
1229 bool IsSubReg = e > 1 || EltSize > 4;
1230 Register Sub = IsSubReg
1231 ? Register(getSubReg(ValueReg, getSubRegFromChannel(Lane)))
1232 : ValueReg;
1233 auto MIB = spillVGPRtoAGPR(ST, MBB, MI, Index, Lane, Sub, IsKill);
1234 if (!MIB.getInstr())
1235 break;
1236 if (NeedSuperRegDef || (IsSubReg && IsStore && Lane == LaneS && !i)) {
1237 MIB.addReg(ValueReg, RegState::ImplicitDefine);
1238 NeedSuperRegDef = false;
1239 }
1240 if (IsSubReg || NeedSuperRegImpOperand) {
1241 NeedSuperRegImpOperand = true;
1242 unsigned State = SrcDstRegState;
1243 if (Lane != LaneE)
1244 State &= ~RegState::Kill;
1245 MIB.addReg(ValueReg, RegState::Implicit | State);
1246 }
1247 RemEltSize -= 4;
1248 }
1249
1250 if (!RemEltSize) // Fully spilled into AGPRs.
1251 continue;
1252
1253 if (RemEltSize != EltSize) { // Partially spilled to AGPRs
1254 assert(IsFlat && EltSize > 4)(static_cast <bool> (IsFlat && EltSize > 4) ?
void (0) : __assert_fail ("IsFlat && EltSize > 4"
, "/build/llvm-toolchain-snapshot-14~++20211103100639+f9e6be5cc1a2/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 1254, __extension__ __PRETTY_FUNCTION__))
;
1255
1256 unsigned NumRegs = RemEltSize / 4;
1257 SubReg = Register(getSubReg(ValueReg,
1258 getSubRegFromChannel(RegOffset / 4, NumRegs)));
1259 unsigned Opc = getFlatScratchSpillOpcode(TII, LoadStoreOp, RemEltSize);
1260 Desc = &TII->get(Opc);
1261 }
1262
1263 unsigned FinalReg = SubReg;
1264
1265 if (IsAGPR) {
1266 assert(EltSize == 4)(static_cast <bool> (EltSize == 4) ? void (0) : __assert_fail
("EltSize == 4", "/build/llvm-toolchain-snapshot-14~++20211103100639+f9e6be5cc1a2/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 1266, __extension__ __PRETTY_FUNCTION__))
;
1267
1268 if (!TmpReg) {
1269 assert(RS && "Needs to have RegScavenger to spill an AGPR!")(static_cast <bool> (RS && "Needs to have RegScavenger to spill an AGPR!"
) ? void (0) : __assert_fail ("RS && \"Needs to have RegScavenger to spill an AGPR!\""
, "/build/llvm-toolchain-snapshot-14~++20211103100639+f9e6be5cc1a2/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 1269, __extension__ __PRETTY_FUNCTION__))
;
1270 // FIXME: change to scavengeRegisterBackwards()
1271 TmpReg = RS->scavengeRegister(&AMDGPU::VGPR_32RegClass, MI, 0);
1272 RS->setRegUsed(TmpReg);
1273 }
1274 if (IsStore) {
1275 auto AccRead = BuildMI(MBB, MI, DL,
1276 TII->get(AMDGPU::V_ACCVGPR_READ_B32_e64), TmpReg)
1277 .addReg(SubReg, getKillRegState(IsKill));
1278 if (NeedSuperRegDef)
1279 AccRead.addReg(ValueReg, RegState::ImplicitDefine);
1280 AccRead->setAsmPrinterFlag(MachineInstr::ReloadReuse);
1281 }
1282 SubReg = TmpReg;
1283 }
1284
1285 MachinePointerInfo PInfo = BasePtrInfo.getWithOffset(RegOffset);
1286 MachineMemOperand *NewMMO =
1287 MF->getMachineMemOperand(PInfo, MMO->getFlags(), RemEltSize,
1288 commonAlignment(Alignment, RegOffset));
1289
1290 auto MIB =
1291 BuildMI(MBB, MI, DL, *Desc)
1292 .addReg(SubReg, getDefRegState(!IsStore) | getKillRegState(IsKill));
1293 if (!IsFlat)
1294 MIB.addReg(FuncInfo->getScratchRSrcReg());
1295
1296 if (SOffset == AMDGPU::NoRegister) {
1297 if (!IsFlat)
1298 MIB.addImm(0);
1299 } else {
1300 MIB.addReg(SOffset, SOffsetRegState);
1301 }
1302 MIB.addImm(Offset + RegOffset)
1303 .addImm(0); // cpol
1304 if (!IsFlat)
1305 MIB.addImm(0) // tfe
1306 .addImm(0); // swz
1307 MIB.addMemOperand(NewMMO);
1308
1309 if (!IsAGPR && NeedSuperRegDef)
1310 MIB.addReg(ValueReg, RegState::ImplicitDefine);
1311
1312 if (!IsStore && TmpReg != AMDGPU::NoRegister) {
1313 MIB = BuildMI(MBB, MI, DL, TII->get(AMDGPU::V_ACCVGPR_WRITE_B32_e64),
1314 FinalReg)
1315 .addReg(TmpReg, RegState::Kill);
1316 MIB->setAsmPrinterFlag(MachineInstr::ReloadReuse);
1317 }
1318
1319 if (NeedSuperRegImpOperand)
1320 MIB.addReg(ValueReg, RegState::Implicit | SrcDstRegState);
1321 }
1322
1323 if (ScratchOffsetRegDelta != 0) {
1324 // Subtract the offset we added to the ScratchOffset register.
1325 BuildMI(MBB, MI, DL, TII->get(AMDGPU::S_ADD_I32), SOffset)
1326 .addReg(SOffset)
1327 .addImm(-ScratchOffsetRegDelta);
1328 }
1329}
1330
1331void SIRegisterInfo::buildVGPRSpillLoadStore(SGPRSpillBuilder &SB, int Index,
1332 int Offset, bool IsLoad,
1333 bool IsKill) const {
1334 // Load/store VGPR
1335 MachineFrameInfo &FrameInfo = SB.MF.getFrameInfo();
1336 assert(FrameInfo.getStackID(Index) != TargetStackID::SGPRSpill)(static_cast <bool> (FrameInfo.getStackID(Index) != TargetStackID
::SGPRSpill) ? void (0) : __assert_fail ("FrameInfo.getStackID(Index) != TargetStackID::SGPRSpill"
, "/build/llvm-toolchain-snapshot-14~++20211103100639+f9e6be5cc1a2/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 1336, __extension__ __PRETTY_FUNCTION__))
;
1337
1338 Register FrameReg =
1339 FrameInfo.isFixedObjectIndex(Index) && hasBasePointer(SB.MF)
1340 ? getBaseRegister()
1341 : getFrameRegister(SB.MF);
1342
1343 Align Alignment = FrameInfo.getObjectAlign(Index);
1344 MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(SB.MF, Index);
1345 MachineMemOperand *MMO = SB.MF.getMachineMemOperand(
1346 PtrInfo, IsLoad ? MachineMemOperand::MOLoad : MachineMemOperand::MOStore,
1347 SB.EltSize, Alignment);
1348
1349 if (IsLoad) {
1350 unsigned Opc = ST.enableFlatScratch() ? AMDGPU::SCRATCH_LOAD_DWORD_SADDR
1351 : AMDGPU::BUFFER_LOAD_DWORD_OFFSET;
1352 buildSpillLoadStore(*SB.MBB, SB.MI, Opc, Index, SB.TmpVGPR, false, FrameReg,
1353 Offset * SB.EltSize, MMO, SB.RS);
1354 } else {
1355 unsigned Opc = ST.enableFlatScratch() ? AMDGPU::SCRATCH_STORE_DWORD_SADDR
1356 : AMDGPU::BUFFER_STORE_DWORD_OFFSET;
1357 buildSpillLoadStore(*SB.MBB, SB.MI, Opc, Index, SB.TmpVGPR, IsKill,
1358 FrameReg, Offset * SB.EltSize, MMO, SB.RS);
1359 // This only ever adds one VGPR spill
1360 SB.MFI.addToSpilledVGPRs(1);
1361 }
1362}
1363
1364bool SIRegisterInfo::spillSGPR(MachineBasicBlock::iterator MI,
1365 int Index,
1366 RegScavenger *RS,
1367 LiveIntervals *LIS,
1368 bool OnlyToVGPR) const {
1369 SGPRSpillBuilder SB(*this, *ST.getInstrInfo(), isWave32, MI, Index, RS);
1370
1371 ArrayRef<SIMachineFunctionInfo::SpilledReg> VGPRSpills =
1372 SB.MFI.getSGPRToVGPRSpills(Index);
1373 bool SpillToVGPR = !VGPRSpills.empty();
1374 if (OnlyToVGPR && !SpillToVGPR)
1375 return false;
1376
1377 assert(SpillToVGPR || (SB.SuperReg != SB.MFI.getStackPtrOffsetReg() &&(static_cast <bool> (SpillToVGPR || (SB.SuperReg != SB.
MFI.getStackPtrOffsetReg() && SB.SuperReg != SB.MFI.getFrameOffsetReg
())) ? void (0) : __assert_fail ("SpillToVGPR || (SB.SuperReg != SB.MFI.getStackPtrOffsetReg() && SB.SuperReg != SB.MFI.getFrameOffsetReg())"
, "/build/llvm-toolchain-snapshot-14~++20211103100639+f9e6be5cc1a2/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 1378, __extension__ __PRETTY_FUNCTION__))
1378 SB.SuperReg != SB.MFI.getFrameOffsetReg()))(static_cast <bool> (SpillToVGPR || (SB.SuperReg != SB.
MFI.getStackPtrOffsetReg() && SB.SuperReg != SB.MFI.getFrameOffsetReg
())) ? void (0) : __assert_fail ("SpillToVGPR || (SB.SuperReg != SB.MFI.getStackPtrOffsetReg() && SB.SuperReg != SB.MFI.getFrameOffsetReg())"
, "/build/llvm-toolchain-snapshot-14~++20211103100639+f9e6be5cc1a2/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 1378, __extension__ __PRETTY_FUNCTION__))
;
1379
1380 if (SpillToVGPR) {
1381
1382 assert(SB.NumSubRegs == VGPRSpills.size() &&(static_cast <bool> (SB.NumSubRegs == VGPRSpills.size()
&& "Num of VGPR lanes should be equal to num of SGPRs spilled"
) ? void (0) : __assert_fail ("SB.NumSubRegs == VGPRSpills.size() && \"Num of VGPR lanes should be equal to num of SGPRs spilled\""
, "/build/llvm-toolchain-snapshot-14~++20211103100639+f9e6be5cc1a2/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 1383, __extension__ __PRETTY_FUNCTION__))
1383 "Num of VGPR lanes should be equal to num of SGPRs spilled")(static_cast <bool> (SB.NumSubRegs == VGPRSpills.size()
&& "Num of VGPR lanes should be equal to num of SGPRs spilled"
) ? void (0) : __assert_fail ("SB.NumSubRegs == VGPRSpills.size() && \"Num of VGPR lanes should be equal to num of SGPRs spilled\""
, "/build/llvm-toolchain-snapshot-14~++20211103100639+f9e6be5cc1a2/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 1383, __extension__ __PRETTY_FUNCTION__))
;
1384
1385 for (unsigned i = 0, e = SB.NumSubRegs; i < e; ++i) {
1386 Register SubReg =
1387 SB.NumSubRegs == 1
1388 ? SB.SuperReg
1389 : Register(getSubReg(SB.SuperReg, SB.SplitParts[i]));
1390 SIMachineFunctionInfo::SpilledReg Spill = VGPRSpills[i];
1391
1392 bool UseKill = SB.IsKill && i == SB.NumSubRegs - 1;
1393
1394 // Mark the "old value of vgpr" input undef only if this is the first sgpr
1395 // spill to this specific vgpr in the first basic block.
1396 auto MIB = BuildMI(*SB.MBB, MI, SB.DL,
1397 SB.TII.get(AMDGPU::V_WRITELANE_B32), Spill.VGPR)
1398 .addReg(SubReg, getKillRegState(UseKill))
1399 .addImm(Spill.Lane)
1400 .addReg(Spill.VGPR);
1401 if (LIS) {
1402 if (i == 0)
1403 LIS->ReplaceMachineInstrInMaps(*MI, *MIB);
1404 else
1405 LIS->InsertMachineInstrInMaps(*MIB);
1406 }
1407
1408 if (i == 0 && SB.NumSubRegs > 1) {
1409 // We may be spilling a super-register which is only partially defined,
1410 // and need to ensure later spills think the value is defined.
1411 MIB.addReg(SB.SuperReg, RegState::ImplicitDefine);
1412 }
1413
1414 if (SB.NumSubRegs > 1)
1415 MIB.addReg(SB.SuperReg, getKillRegState(UseKill) | RegState::Implicit);
1416
1417 // FIXME: Since this spills to another register instead of an actual
1418 // frame index, we should delete the frame index when all references to
1419 // it are fixed.
1420 }
1421 } else {
1422 SB.prepare();
1423
1424 // SubReg carries the "Kill" flag when SubReg == SB.SuperReg.
1425 unsigned SubKillState = getKillRegState((SB.NumSubRegs == 1) && SB.IsKill);
1426
1427 // Per VGPR helper data
1428 auto PVD = SB.getPerVGPRData();
1429
1430 for (unsigned Offset = 0; Offset < PVD.NumVGPRs; ++Offset) {
1431 unsigned TmpVGPRFlags = RegState::Undef;
1432
1433 // Write sub registers into the VGPR
1434 for (unsigned i = Offset * PVD.PerVGPR,
1435 e = std::min((Offset + 1) * PVD.PerVGPR, SB.NumSubRegs);
1436 i < e; ++i) {
1437 Register SubReg =
1438 SB.NumSubRegs == 1
1439 ? SB.SuperReg
1440 : Register(getSubReg(SB.SuperReg, SB.SplitParts[i]));
1441
1442 MachineInstrBuilder WriteLane =
1443 BuildMI(*SB.MBB, MI, SB.DL, SB.TII.get(AMDGPU::V_WRITELANE_B32),
1444 SB.TmpVGPR)
1445 .addReg(SubReg, SubKillState)
1446 .addImm(i % PVD.PerVGPR)
1447 .addReg(SB.TmpVGPR, TmpVGPRFlags);
1448 TmpVGPRFlags = 0;
1449
1450 if (LIS) {
1451 if (i == 0)
1452 LIS->ReplaceMachineInstrInMaps(*MI, *WriteLane);
1453 else
1454 LIS->InsertMachineInstrInMaps(*WriteLane);
1455 }
1456
1457 // There could be undef components of a spilled super register.
1458 // TODO: Can we detect this and skip the spill?
1459 if (SB.NumSubRegs > 1) {
1460 // The last implicit use of the SB.SuperReg carries the "Kill" flag.
1461 unsigned SuperKillState = 0;
1462 if (i + 1 == SB.NumSubRegs)
1463 SuperKillState |= getKillRegState(SB.IsKill);
1464 WriteLane.addReg(SB.SuperReg, RegState::Implicit | SuperKillState);
1465 }
1466 }
1467
1468 // Write out VGPR
1469 SB.readWriteTmpVGPR(Offset, /*IsLoad*/ false);
1470 }
1471
1472 SB.restore();
1473 }
1474
1475 MI->eraseFromParent();
1476 SB.MFI.addToSpilledSGPRs(SB.NumSubRegs);
1477
1478 if (LIS)
1479 LIS->removeAllRegUnitsForPhysReg(SB.SuperReg);
1480
1481 return true;
1482}
1483
1484bool SIRegisterInfo::restoreSGPR(MachineBasicBlock::iterator MI,
1485 int Index,
1486 RegScavenger *RS,
1487 LiveIntervals *LIS,
1488 bool OnlyToVGPR) const {
1489 SGPRSpillBuilder SB(*this, *ST.getInstrInfo(), isWave32, MI, Index, RS);
1490
1491 ArrayRef<SIMachineFunctionInfo::SpilledReg> VGPRSpills =
1492 SB.MFI.getSGPRToVGPRSpills(Index);
1493 bool SpillToVGPR = !VGPRSpills.empty();
1494 if (OnlyToVGPR && !SpillToVGPR)
1495 return false;
1496
1497 if (SpillToVGPR) {
1498 for (unsigned i = 0, e = SB.NumSubRegs; i < e; ++i) {
1499 Register SubReg =
1500 SB.NumSubRegs == 1
1501 ? SB.SuperReg
1502 : Register(getSubReg(SB.SuperReg, SB.SplitParts[i]));
1503
1504 SIMachineFunctionInfo::SpilledReg Spill = VGPRSpills[i];
1505 auto MIB = BuildMI(*SB.MBB, MI, SB.DL, SB.TII.get(AMDGPU::V_READLANE_B32),
1506 SubReg)
1507 .addReg(Spill.VGPR)
1508 .addImm(Spill.Lane);
1509 if (SB.NumSubRegs > 1 && i == 0)
1510 MIB.addReg(SB.SuperReg, RegState::ImplicitDefine);
1511 if (LIS) {
1512 if (i == e - 1)
1513 LIS->ReplaceMachineInstrInMaps(*MI, *MIB);
1514 else
1515 LIS->InsertMachineInstrInMaps(*MIB);
1516 }
1517
1518 }
1519 } else {
1520 SB.prepare();
1521
1522 // Per VGPR helper data
1523 auto PVD = SB.getPerVGPRData();
1524
1525 for (unsigned Offset = 0; Offset < PVD.NumVGPRs; ++Offset) {
1526 // Load in VGPR data
1527 SB.readWriteTmpVGPR(Offset, /*IsLoad*/ true);
1528
1529 // Unpack lanes
1530 for (unsigned i = Offset * PVD.PerVGPR,
1531 e = std::min((Offset + 1) * PVD.PerVGPR, SB.NumSubRegs);
1532 i < e; ++i) {
1533 Register SubReg =
1534 SB.NumSubRegs == 1
1535 ? SB.SuperReg
1536 : Register(getSubReg(SB.SuperReg, SB.SplitParts[i]));
1537
1538 bool LastSubReg = (i + 1 == e);
1539 auto MIB = BuildMI(*SB.MBB, MI, SB.DL,
1540 SB.TII.get(AMDGPU::V_READLANE_B32), SubReg)
1541 .addReg(SB.TmpVGPR, getKillRegState(LastSubReg))
1542 .addImm(i);
1543 if (SB.NumSubRegs > 1 && i == 0)
1544 MIB.addReg(SB.SuperReg, RegState::ImplicitDefine);
1545 if (LIS) {
1546 if (i == e - 1)
1547 LIS->ReplaceMachineInstrInMaps(*MI, *MIB);
1548 else
1549 LIS->InsertMachineInstrInMaps(*MIB);
1550 }
1551 }
1552 }
1553
1554 SB.restore();
1555 }
1556
1557 MI->eraseFromParent();
1558
1559 if (LIS)
1560 LIS->removeAllRegUnitsForPhysReg(SB.SuperReg);
1561
1562 return true;
1563}
1564
1565bool SIRegisterInfo::spillEmergencySGPR(MachineBasicBlock::iterator MI,
1566 MachineBasicBlock &RestoreMBB,
1567 Register SGPR, RegScavenger *RS) const {
1568 SGPRSpillBuilder SB(*this, *ST.getInstrInfo(), isWave32, MI, SGPR, false, 0,
1569 RS);
1570 SB.prepare();
1571 // Generate the spill of SGPR to SB.TmpVGPR.
1572 unsigned SubKillState = getKillRegState((SB.NumSubRegs == 1) && SB.IsKill);
1573 auto PVD = SB.getPerVGPRData();
1574 for (unsigned Offset = 0; Offset < PVD.NumVGPRs; ++Offset) {
1575 unsigned TmpVGPRFlags = RegState::Undef;
1576 // Write sub registers into the VGPR
1577 for (unsigned i = Offset * PVD.PerVGPR,
1578 e = std::min((Offset + 1) * PVD.PerVGPR, SB.NumSubRegs);
1579 i < e; ++i) {
1580 Register SubReg =
1581 SB.NumSubRegs == 1
1582 ? SB.SuperReg
1583 : Register(getSubReg(SB.SuperReg, SB.SplitParts[i]));
1584
1585 MachineInstrBuilder WriteLane =
1586 BuildMI(*SB.MBB, MI, SB.DL, SB.TII.get(AMDGPU::V_WRITELANE_B32),
1587 SB.TmpVGPR)
1588 .addReg(SubReg, SubKillState)
1589 .addImm(i % PVD.PerVGPR)
1590 .addReg(SB.TmpVGPR, TmpVGPRFlags);
1591 TmpVGPRFlags = 0;
1592 // There could be undef components of a spilled super register.
1593 // TODO: Can we detect this and skip the spill?
1594 if (SB.NumSubRegs > 1) {
1595 // The last implicit use of the SB.SuperReg carries the "Kill" flag.
1596 unsigned SuperKillState = 0;
1597 if (i + 1 == SB.NumSubRegs)
1598 SuperKillState |= getKillRegState(SB.IsKill);
1599 WriteLane.addReg(SB.SuperReg, RegState::Implicit | SuperKillState);
1600 }
1601 }
1602 // Don't need to write VGPR out.
1603 }
1604
1605 // Restore clobbered registers in the specified restore block.
1606 MI = RestoreMBB.end();
1607 SB.setMI(&RestoreMBB, MI);
1608 // Generate the restore of SGPR from SB.TmpVGPR.
1609 for (unsigned Offset = 0; Offset < PVD.NumVGPRs; ++Offset) {
1610 // Don't need to load VGPR in.
1611 // Unpack lanes
1612 for (unsigned i = Offset * PVD.PerVGPR,
1613 e = std::min((Offset + 1) * PVD.PerVGPR, SB.NumSubRegs);
1614 i < e; ++i) {
1615 Register SubReg =
1616 SB.NumSubRegs == 1
1617 ? SB.SuperReg
1618 : Register(getSubReg(SB.SuperReg, SB.SplitParts[i]));
1619 bool LastSubReg = (i + 1 == e);
1620 auto MIB = BuildMI(*SB.MBB, MI, SB.DL, SB.TII.get(AMDGPU::V_READLANE_B32),
1621 SubReg)
1622 .addReg(SB.TmpVGPR, getKillRegState(LastSubReg))
1623 .addImm(i);
1624 if (SB.NumSubRegs > 1 && i == 0)
1625 MIB.addReg(SB.SuperReg, RegState::ImplicitDefine);
1626 }
1627 }
1628 SB.restore();
1629
1630 SB.MFI.addToSpilledSGPRs(SB.NumSubRegs);
1631 return false;
1632}
1633
1634/// Special case of eliminateFrameIndex. Returns true if the SGPR was spilled to
1635/// a VGPR and the stack slot can be safely eliminated when all other users are
1636/// handled.
1637bool SIRegisterInfo::eliminateSGPRToVGPRSpillFrameIndex(
1638 MachineBasicBlock::iterator MI,
1639 int FI,
1640 RegScavenger *RS,
1641 LiveIntervals *LIS) const {
1642 switch (MI->getOpcode()) {
1643 case AMDGPU::SI_SPILL_S1024_SAVE:
1644 case AMDGPU::SI_SPILL_S512_SAVE:
1645 case AMDGPU::SI_SPILL_S256_SAVE:
1646 case AMDGPU::SI_SPILL_S224_SAVE:
1647 case AMDGPU::SI_SPILL_S192_SAVE:
1648 case AMDGPU::SI_SPILL_S160_SAVE:
1649 case AMDGPU::SI_SPILL_S128_SAVE:
1650 case AMDGPU::SI_SPILL_S96_SAVE:
1651 case AMDGPU::SI_SPILL_S64_SAVE:
1652 case AMDGPU::SI_SPILL_S32_SAVE:
1653 return spillSGPR(MI, FI, RS, LIS, true);
1654 case AMDGPU::SI_SPILL_S1024_RESTORE:
1655 case AMDGPU::SI_SPILL_S512_RESTORE:
1656 case AMDGPU::SI_SPILL_S256_RESTORE:
1657 case AMDGPU::SI_SPILL_S224_RESTORE:
1658 case AMDGPU::SI_SPILL_S192_RESTORE:
1659 case AMDGPU::SI_SPILL_S160_RESTORE:
1660 case AMDGPU::SI_SPILL_S128_RESTORE:
1661 case AMDGPU::SI_SPILL_S96_RESTORE:
1662 case AMDGPU::SI_SPILL_S64_RESTORE:
1663 case AMDGPU::SI_SPILL_S32_RESTORE:
1664 return restoreSGPR(MI, FI, RS, LIS, true);
1665 default:
1666 llvm_unreachable("not an SGPR spill instruction")::llvm::llvm_unreachable_internal("not an SGPR spill instruction"
, "/build/llvm-toolchain-snapshot-14~++20211103100639+f9e6be5cc1a2/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 1666)
;
1667 }
1668}
1669
1670void SIRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI,
1671 int SPAdj, unsigned FIOperandNum,
1672 RegScavenger *RS) const {
1673 MachineFunction *MF = MI->getParent()->getParent();
1674 MachineBasicBlock *MBB = MI->getParent();
1675 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
1676 MachineFrameInfo &FrameInfo = MF->getFrameInfo();
1677 const SIInstrInfo *TII = ST.getInstrInfo();
1678 DebugLoc DL = MI->getDebugLoc();
1679
1680 assert(SPAdj == 0 && "unhandled SP adjustment in call sequence?")(static_cast <bool> (SPAdj == 0 && "unhandled SP adjustment in call sequence?"
) ? void (0) : __assert_fail ("SPAdj == 0 && \"unhandled SP adjustment in call sequence?\""
, "/build/llvm-toolchain-snapshot-14~++20211103100639+f9e6be5cc1a2/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 1680, __extension__ __PRETTY_FUNCTION__))
;
1681
1682 MachineOperand &FIOp = MI->getOperand(FIOperandNum);
1683 int Index = MI->getOperand(FIOperandNum).getIndex();
1684
1685 Register FrameReg = FrameInfo.isFixedObjectIndex(Index) && hasBasePointer(*MF)
1686 ? getBaseRegister()
1687 : getFrameRegister(*MF);
1688
1689 switch (MI->getOpcode()) {
1690 // SGPR register spill
1691 case AMDGPU::SI_SPILL_S1024_SAVE:
1692 case AMDGPU::SI_SPILL_S512_SAVE:
1693 case AMDGPU::SI_SPILL_S256_SAVE:
1694 case AMDGPU::SI_SPILL_S224_SAVE:
1695 case AMDGPU::SI_SPILL_S192_SAVE:
1696 case AMDGPU::SI_SPILL_S160_SAVE:
1697 case AMDGPU::SI_SPILL_S128_SAVE:
1698 case AMDGPU::SI_SPILL_S96_SAVE:
1699 case AMDGPU::SI_SPILL_S64_SAVE:
1700 case AMDGPU::SI_SPILL_S32_SAVE: {
1701 spillSGPR(MI, Index, RS);
1702 break;
1703 }
1704
1705 // SGPR register restore
1706 case AMDGPU::SI_SPILL_S1024_RESTORE:
1707 case AMDGPU::SI_SPILL_S512_RESTORE:
1708 case AMDGPU::SI_SPILL_S256_RESTORE:
1709 case AMDGPU::SI_SPILL_S224_RESTORE:
1710 case AMDGPU::SI_SPILL_S192_RESTORE:
1711 case AMDGPU::SI_SPILL_S160_RESTORE:
1712 case AMDGPU::SI_SPILL_S128_RESTORE:
1713 case AMDGPU::SI_SPILL_S96_RESTORE:
1714 case AMDGPU::SI_SPILL_S64_RESTORE:
1715 case AMDGPU::SI_SPILL_S32_RESTORE: {
1716 restoreSGPR(MI, Index, RS);
1717 break;
1718 }
1719
1720 // VGPR register spill
1721 case AMDGPU::SI_SPILL_V1024_SAVE:
1722 case AMDGPU::SI_SPILL_V512_SAVE:
1723 case AMDGPU::SI_SPILL_V256_SAVE:
1724 case AMDGPU::SI_SPILL_V224_SAVE:
1725 case AMDGPU::SI_SPILL_V192_SAVE:
1726 case AMDGPU::SI_SPILL_V160_SAVE:
1727 case AMDGPU::SI_SPILL_V128_SAVE:
1728 case AMDGPU::SI_SPILL_V96_SAVE:
1729 case AMDGPU::SI_SPILL_V64_SAVE:
1730 case AMDGPU::SI_SPILL_V32_SAVE:
1731 case AMDGPU::SI_SPILL_A1024_SAVE:
1732 case AMDGPU::SI_SPILL_A512_SAVE:
1733 case AMDGPU::SI_SPILL_A256_SAVE:
1734 case AMDGPU::SI_SPILL_A224_SAVE:
1735 case AMDGPU::SI_SPILL_A192_SAVE:
1736 case AMDGPU::SI_SPILL_A160_SAVE:
1737 case AMDGPU::SI_SPILL_A128_SAVE:
1738 case AMDGPU::SI_SPILL_A96_SAVE:
1739 case AMDGPU::SI_SPILL_A64_SAVE:
1740 case AMDGPU::SI_SPILL_A32_SAVE: {
1741 const MachineOperand *VData = TII->getNamedOperand(*MI,
1742 AMDGPU::OpName::vdata);
1743 assert(TII->getNamedOperand(*MI, AMDGPU::OpName::soffset)->getReg() ==(static_cast <bool> (TII->getNamedOperand(*MI, AMDGPU
::OpName::soffset)->getReg() == MFI->getStackPtrOffsetReg
()) ? void (0) : __assert_fail ("TII->getNamedOperand(*MI, AMDGPU::OpName::soffset)->getReg() == MFI->getStackPtrOffsetReg()"
, "/build/llvm-toolchain-snapshot-14~++20211103100639+f9e6be5cc1a2/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 1744, __extension__ __PRETTY_FUNCTION__))
1744 MFI->getStackPtrOffsetReg())(static_cast <bool> (TII->getNamedOperand(*MI, AMDGPU
::OpName::soffset)->getReg() == MFI->getStackPtrOffsetReg
()) ? void (0) : __assert_fail ("TII->getNamedOperand(*MI, AMDGPU::OpName::soffset)->getReg() == MFI->getStackPtrOffsetReg()"
, "/build/llvm-toolchain-snapshot-14~++20211103100639+f9e6be5cc1a2/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 1744, __extension__ __PRETTY_FUNCTION__))
;
1745
1746 unsigned Opc = ST.enableFlatScratch() ? AMDGPU::SCRATCH_STORE_DWORD_SADDR
1747 : AMDGPU::BUFFER_STORE_DWORD_OFFSET;
1748 auto *MBB = MI->getParent();
1749 buildSpillLoadStore(
1750 *MBB, MI, Opc, Index, VData->getReg(), VData->isKill(), FrameReg,
1751 TII->getNamedOperand(*MI, AMDGPU::OpName::offset)->getImm(),
1752 *MI->memoperands_begin(), RS);
1753 MFI->addToSpilledVGPRs(getNumSubRegsForSpillOp(MI->getOpcode()));
1754 MI->eraseFromParent();
1755 break;
1756 }
1757 case AMDGPU::SI_SPILL_V32_RESTORE:
1758 case AMDGPU::SI_SPILL_V64_RESTORE:
1759 case AMDGPU::SI_SPILL_V96_RESTORE:
1760 case AMDGPU::SI_SPILL_V128_RESTORE:
1761 case AMDGPU::SI_SPILL_V160_RESTORE:
1762 case AMDGPU::SI_SPILL_V192_RESTORE:
1763 case AMDGPU::SI_SPILL_V224_RESTORE:
1764 case AMDGPU::SI_SPILL_V256_RESTORE:
1765 case AMDGPU::SI_SPILL_V512_RESTORE:
1766 case AMDGPU::SI_SPILL_V1024_RESTORE:
1767 case AMDGPU::SI_SPILL_A32_RESTORE:
1768 case AMDGPU::SI_SPILL_A64_RESTORE:
1769 case AMDGPU::SI_SPILL_A96_RESTORE:
1770 case AMDGPU::SI_SPILL_A128_RESTORE:
1771 case AMDGPU::SI_SPILL_A160_RESTORE:
1772 case AMDGPU::SI_SPILL_A192_RESTORE:
1773 case AMDGPU::SI_SPILL_A224_RESTORE:
1774 case AMDGPU::SI_SPILL_A256_RESTORE:
1775 case AMDGPU::SI_SPILL_A512_RESTORE:
1776 case AMDGPU::SI_SPILL_A1024_RESTORE: {
1777 const MachineOperand *VData = TII->getNamedOperand(*MI,
1778 AMDGPU::OpName::vdata);
1779 assert(TII->getNamedOperand(*MI, AMDGPU::OpName::soffset)->getReg() ==(static_cast <bool> (TII->getNamedOperand(*MI, AMDGPU
::OpName::soffset)->getReg() == MFI->getStackPtrOffsetReg
()) ? void (0) : __assert_fail ("TII->getNamedOperand(*MI, AMDGPU::OpName::soffset)->getReg() == MFI->getStackPtrOffsetReg()"
, "/build/llvm-toolchain-snapshot-14~++20211103100639+f9e6be5cc1a2/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 1780, __extension__ __PRETTY_FUNCTION__))
1780 MFI->getStackPtrOffsetReg())(static_cast <bool> (TII->getNamedOperand(*MI, AMDGPU
::OpName::soffset)->getReg() == MFI->getStackPtrOffsetReg
()) ? void (0) : __assert_fail ("TII->getNamedOperand(*MI, AMDGPU::OpName::soffset)->getReg() == MFI->getStackPtrOffsetReg()"
, "/build/llvm-toolchain-snapshot-14~++20211103100639+f9e6be5cc1a2/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 1780, __extension__ __PRETTY_FUNCTION__))
;
1781
1782 unsigned Opc = ST.enableFlatScratch() ? AMDGPU::SCRATCH_LOAD_DWORD_SADDR
1783 : AMDGPU::BUFFER_LOAD_DWORD_OFFSET;
1784 auto *MBB = MI->getParent();
1785 buildSpillLoadStore(
1786 *MBB, MI, Opc, Index, VData->getReg(), VData->isKill(), FrameReg,
1787 TII->getNamedOperand(*MI, AMDGPU::OpName::offset)->getImm(),
1788 *MI->memoperands_begin(), RS);
1789 MI->eraseFromParent();
1790 break;
1791 }
1792
1793 default: {
1794 // Other access to frame index
1795 const DebugLoc &DL = MI->getDebugLoc();
1796
1797 int64_t Offset = FrameInfo.getObjectOffset(Index);
1798 if (ST.enableFlatScratch()) {
1799 if (TII->isFLATScratch(*MI)) {
1800 assert((int16_t)FIOperandNum ==(static_cast <bool> ((int16_t)FIOperandNum == AMDGPU::getNamedOperandIdx
(MI->getOpcode(), AMDGPU::OpName::saddr)) ? void (0) : __assert_fail
("(int16_t)FIOperandNum == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::saddr)"
, "/build/llvm-toolchain-snapshot-14~++20211103100639+f9e6be5cc1a2/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 1802, __extension__ __PRETTY_FUNCTION__))
1801 AMDGPU::getNamedOperandIdx(MI->getOpcode(),(static_cast <bool> ((int16_t)FIOperandNum == AMDGPU::getNamedOperandIdx
(MI->getOpcode(), AMDGPU::OpName::saddr)) ? void (0) : __assert_fail
("(int16_t)FIOperandNum == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::saddr)"
, "/build/llvm-toolchain-snapshot-14~++20211103100639+f9e6be5cc1a2/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 1802, __extension__ __PRETTY_FUNCTION__))
1802 AMDGPU::OpName::saddr))(static_cast <bool> ((int16_t)FIOperandNum == AMDGPU::getNamedOperandIdx
(MI->getOpcode(), AMDGPU::OpName::saddr)) ? void (0) : __assert_fail
("(int16_t)FIOperandNum == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::saddr)"
, "/build/llvm-toolchain-snapshot-14~++20211103100639+f9e6be5cc1a2/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 1802, __extension__ __PRETTY_FUNCTION__))
;
1803
1804 // The offset is always swizzled, just replace it
1805 if (FrameReg)
1806 FIOp.ChangeToRegister(FrameReg, false);
1807
1808 if (!Offset)
1809 return;
1810
1811 MachineOperand *OffsetOp =
1812 TII->getNamedOperand(*MI, AMDGPU::OpName::offset);
1813 int64_t NewOffset = Offset + OffsetOp->getImm();
1814 if (TII->isLegalFLATOffset(NewOffset, AMDGPUAS::PRIVATE_ADDRESS,
1815 SIInstrFlags::FlatScratch)) {
1816 OffsetOp->setImm(NewOffset);
1817 if (FrameReg)
1818 return;
1819 Offset = 0;
1820 }
1821
1822 assert(!TII->getNamedOperand(*MI, AMDGPU::OpName::vaddr) &&(static_cast <bool> (!TII->getNamedOperand(*MI, AMDGPU
::OpName::vaddr) && "Unexpected vaddr for flat scratch with a FI operand"
) ? void (0) : __assert_fail ("!TII->getNamedOperand(*MI, AMDGPU::OpName::vaddr) && \"Unexpected vaddr for flat scratch with a FI operand\""
, "/build/llvm-toolchain-snapshot-14~++20211103100639+f9e6be5cc1a2/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 1823, __extension__ __PRETTY_FUNCTION__))
1823 "Unexpected vaddr for flat scratch with a FI operand")(static_cast <bool> (!TII->getNamedOperand(*MI, AMDGPU
::OpName::vaddr) && "Unexpected vaddr for flat scratch with a FI operand"
) ? void (0) : __assert_fail ("!TII->getNamedOperand(*MI, AMDGPU::OpName::vaddr) && \"Unexpected vaddr for flat scratch with a FI operand\""
, "/build/llvm-toolchain-snapshot-14~++20211103100639+f9e6be5cc1a2/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 1823, __extension__ __PRETTY_FUNCTION__))
;
1824
1825 // On GFX10 we have ST mode to use no registers for an address.
1826 // Otherwise we need to materialize 0 into an SGPR.
1827 if (!Offset && ST.hasFlatScratchSTMode()) {
1828 unsigned Opc = MI->getOpcode();
1829 unsigned NewOpc = AMDGPU::getFlatScratchInstSTfromSS(Opc);
1830 MI->RemoveOperand(
1831 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::saddr));
1832 MI->setDesc(TII->get(NewOpc));
1833 return;
1834 }
1835 }
1836
1837 if (!FrameReg) {
1838 FIOp.ChangeToImmediate(Offset);
1839 if (TII->isImmOperandLegal(*MI, FIOperandNum, FIOp))
1840 return;
1841 }
1842
1843 // We need to use register here. Check if we can use an SGPR or need
1844 // a VGPR.
1845 FIOp.ChangeToRegister(AMDGPU::M0, false);
1846 bool UseSGPR = TII->isOperandLegal(*MI, FIOperandNum, &FIOp);
1847
1848 if (!Offset && FrameReg && UseSGPR) {
1849 FIOp.setReg(FrameReg);
1850 return;
1851 }
1852
1853 const TargetRegisterClass *RC = UseSGPR ? &AMDGPU::SReg_32_XM0RegClass
1854 : &AMDGPU::VGPR_32RegClass;
1855
1856 Register TmpReg = RS->scavengeRegister(RC, MI, 0, !UseSGPR);
1857 FIOp.setReg(TmpReg);
1858 FIOp.setIsKill(true);
1859
1860 if ((!FrameReg || !Offset) && TmpReg) {
1861 unsigned Opc = UseSGPR ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
1862 auto MIB = BuildMI(*MBB, MI, DL, TII->get(Opc), TmpReg);
1863 if (FrameReg)
1864 MIB.addReg(FrameReg);
1865 else
1866 MIB.addImm(Offset);
1867
1868 return;
1869 }
1870
1871 Register TmpSReg =
1872 UseSGPR ? TmpReg
1873 : RS->scavengeRegister(&AMDGPU::SReg_32_XM0RegClass, MI, 0,
1874 !UseSGPR);
1875
1876 // TODO: for flat scratch another attempt can be made with a VGPR index
1877 // if no SGPRs can be scavenged.
1878 if ((!TmpSReg && !FrameReg) || (!TmpReg && !UseSGPR))
1879 report_fatal_error("Cannot scavenge register in FI elimination!");
1880
1881 if (!TmpSReg) {
1882 // Use frame register and restore it after.
1883 TmpSReg = FrameReg;
1884 FIOp.setReg(FrameReg);
1885 FIOp.setIsKill(false);
1886 }
1887
1888 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_ADD_I32), TmpSReg)
1889 .addReg(FrameReg)
1890 .addImm(Offset);
1891
1892 if (!UseSGPR)
1893 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::V_MOV_B32_e32), TmpReg)
1894 .addReg(TmpSReg, RegState::Kill);
1895
1896 if (TmpSReg == FrameReg) {
1897 // Undo frame register modification.
1898 BuildMI(*MBB, std::next(MI), DL, TII->get(AMDGPU::S_ADD_I32),
1899 FrameReg)
1900 .addReg(FrameReg)
1901 .addImm(-Offset);
1902 }
1903
1904 return;
1905 }
1906
1907 bool IsMUBUF = TII->isMUBUF(*MI);
1908
1909 if (!IsMUBUF && !MFI->isEntryFunction()) {
1910 // Convert to a swizzled stack address by scaling by the wave size.
1911 //
1912 // In an entry function/kernel the offset is already swizzled.
1913
1914 bool IsCopy = MI->getOpcode() == AMDGPU::V_MOV_B32_e32;
1915 Register ResultReg =
1916 IsCopy ? MI->getOperand(0).getReg()
1917 : RS->scavengeRegister(&AMDGPU::VGPR_32RegClass, MI, 0);
1918
1919 int64_t Offset = FrameInfo.getObjectOffset(Index);
1920 if (Offset == 0) {
1921 // XXX - This never happens because of emergency scavenging slot at 0?
1922 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::V_LSHRREV_B32_e64), ResultReg)
1923 .addImm(ST.getWavefrontSizeLog2())
1924 .addReg(FrameReg);
1925 } else {
1926 if (auto MIB = TII->getAddNoCarry(*MBB, MI, DL, ResultReg, *RS)) {
1927 // Reuse ResultReg in intermediate step.
1928 Register ScaledReg = ResultReg;
1929
1930 BuildMI(*MBB, *MIB, DL, TII->get(AMDGPU::V_LSHRREV_B32_e64),
1931 ScaledReg)
1932 .addImm(ST.getWavefrontSizeLog2())
1933 .addReg(FrameReg);
1934
1935 const bool IsVOP2 = MIB->getOpcode() == AMDGPU::V_ADD_U32_e32;
1936
1937 // TODO: Fold if use instruction is another add of a constant.
1938 if (IsVOP2 || AMDGPU::isInlinableLiteral32(Offset, ST.hasInv2PiInlineImm())) {
1939 // FIXME: This can fail
1940 MIB.addImm(Offset);
1941 MIB.addReg(ScaledReg, RegState::Kill);
1942 if (!IsVOP2)
1943 MIB.addImm(0); // clamp bit
1944 } else {
1945 assert(MIB->getOpcode() == AMDGPU::V_ADD_CO_U32_e64 &&(static_cast <bool> (MIB->getOpcode() == AMDGPU::V_ADD_CO_U32_e64
&& "Need to reuse carry out register") ? void (0) : __assert_fail
("MIB->getOpcode() == AMDGPU::V_ADD_CO_U32_e64 && \"Need to reuse carry out register\""
, "/build/llvm-toolchain-snapshot-14~++20211103100639+f9e6be5cc1a2/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 1946, __extension__ __PRETTY_FUNCTION__))
1946 "Need to reuse carry out register")(static_cast <bool> (MIB->getOpcode() == AMDGPU::V_ADD_CO_U32_e64
&& "Need to reuse carry out register") ? void (0) : __assert_fail
("MIB->getOpcode() == AMDGPU::V_ADD_CO_U32_e64 && \"Need to reuse carry out register\""
, "/build/llvm-toolchain-snapshot-14~++20211103100639+f9e6be5cc1a2/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 1946, __extension__ __PRETTY_FUNCTION__))
;
1947
1948 // Use scavenged unused carry out as offset register.
1949 Register ConstOffsetReg;
1950 if (!isWave32)
1951 ConstOffsetReg = getSubReg(MIB.getReg(1), AMDGPU::sub0);
1952 else
1953 ConstOffsetReg = MIB.getReg(1);
1954
1955 BuildMI(*MBB, *MIB, DL, TII->get(AMDGPU::S_MOV_B32), ConstOffsetReg)
1956 .addImm(Offset);
1957 MIB.addReg(ConstOffsetReg, RegState::Kill);
1958 MIB.addReg(ScaledReg, RegState::Kill);
1959 MIB.addImm(0); // clamp bit
1960 }
1961 } else {
1962 // We have to produce a carry out, and there isn't a free SGPR pair
1963 // for it. We can keep the whole computation on the SALU to avoid
1964 // clobbering an additional register at the cost of an extra mov.
1965
1966 // We may have 1 free scratch SGPR even though a carry out is
1967 // unavailable. Only one additional mov is needed.
1968 Register TmpScaledReg =
1969 RS->scavengeRegister(&AMDGPU::SReg_32_XM0RegClass, MI, 0, false);
1970 Register ScaledReg = TmpScaledReg.isValid() ? TmpScaledReg : FrameReg;
1971
1972 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_LSHR_B32), ScaledReg)
1973 .addReg(FrameReg)
1974 .addImm(ST.getWavefrontSizeLog2());
1975 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_ADD_I32), ScaledReg)
1976 .addReg(ScaledReg, RegState::Kill)
1977 .addImm(Offset);
1978 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::COPY), ResultReg)
1979 .addReg(ScaledReg, RegState::Kill);
1980
1981 // If there were truly no free SGPRs, we need to undo everything.
1982 if (!TmpScaledReg.isValid()) {
1983 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_ADD_I32), ScaledReg)
1984 .addReg(ScaledReg, RegState::Kill)
1985 .addImm(-Offset);
1986 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_LSHL_B32), ScaledReg)
1987 .addReg(FrameReg)
1988 .addImm(ST.getWavefrontSizeLog2());
1989 }
1990 }
1991 }
1992
1993 // Don't introduce an extra copy if we're just materializing in a mov.
1994 if (IsCopy)
1995 MI->eraseFromParent();
1996 else
1997 FIOp.ChangeToRegister(ResultReg, false, false, true);
1998 return;
1999 }
2000
2001 if (IsMUBUF) {
2002 // Disable offen so we don't need a 0 vgpr base.
2003 assert(static_cast<int>(FIOperandNum) ==(static_cast <bool> (static_cast<int>(FIOperandNum
) == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName
::vaddr)) ? void (0) : __assert_fail ("static_cast<int>(FIOperandNum) == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::vaddr)"
, "/build/llvm-toolchain-snapshot-14~++20211103100639+f9e6be5cc1a2/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 2005, __extension__ __PRETTY_FUNCTION__))
2004 AMDGPU::getNamedOperandIdx(MI->getOpcode(),(static_cast <bool> (static_cast<int>(FIOperandNum
) == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName
::vaddr)) ? void (0) : __assert_fail ("static_cast<int>(FIOperandNum) == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::vaddr)"
, "/build/llvm-toolchain-snapshot-14~++20211103100639+f9e6be5cc1a2/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 2005, __extension__ __PRETTY_FUNCTION__))
2005 AMDGPU::OpName::vaddr))(static_cast <bool> (static_cast<int>(FIOperandNum
) == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName
::vaddr)) ? void (0) : __assert_fail ("static_cast<int>(FIOperandNum) == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::vaddr)"
, "/build/llvm-toolchain-snapshot-14~++20211103100639+f9e6be5cc1a2/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 2005, __extension__ __PRETTY_FUNCTION__))
;
2006
2007 auto &SOffset = *TII->getNamedOperand(*MI, AMDGPU::OpName::soffset);
2008 assert((SOffset.isImm() && SOffset.getImm() == 0))(static_cast <bool> ((SOffset.isImm() && SOffset
.getImm() == 0)) ? void (0) : __assert_fail ("(SOffset.isImm() && SOffset.getImm() == 0)"
, "/build/llvm-toolchain-snapshot-14~++20211103100639+f9e6be5cc1a2/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 2008, __extension__ __PRETTY_FUNCTION__))
;
2009
2010 if (FrameReg != AMDGPU::NoRegister)
2011 SOffset.ChangeToRegister(FrameReg, false);
2012
2013 int64_t Offset = FrameInfo.getObjectOffset(Index);
2014 int64_t OldImm
2015 = TII->getNamedOperand(*MI, AMDGPU::OpName::offset)->getImm();
2016 int64_t NewOffset = OldImm + Offset;
2017
2018 if (SIInstrInfo::isLegalMUBUFImmOffset(NewOffset) &&
2019 buildMUBUFOffsetLoadStore(ST, FrameInfo, MI, Index, NewOffset)) {
2020 MI->eraseFromParent();
2021 return;
2022 }
2023 }
2024
2025 // If the offset is simply too big, don't convert to a scratch wave offset
2026 // relative index.
2027
2028 FIOp.ChangeToImmediate(Offset);
2029 if (!TII->isImmOperandLegal(*MI, FIOperandNum, FIOp)) {
2030 Register TmpReg = RS->scavengeRegister(&AMDGPU::VGPR_32RegClass, MI, 0);
2031 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::V_MOV_B32_e32), TmpReg)
2032 .addImm(Offset);
2033 FIOp.ChangeToRegister(TmpReg, false, false, true);
2034 }
2035 }
2036 }
2037}
2038
2039StringRef SIRegisterInfo::getRegAsmName(MCRegister Reg) const {
2040 return AMDGPUInstPrinter::getRegisterName(Reg);
2041}
2042
2043static const TargetRegisterClass *
2044getAnyVGPRClassForBitWidth(unsigned BitWidth) {
2045 if (BitWidth <= 64)
2046 return &AMDGPU::VReg_64RegClass;
2047 if (BitWidth <= 96)
2048 return &AMDGPU::VReg_96RegClass;
2049 if (BitWidth <= 128)
2050 return &AMDGPU::VReg_128RegClass;
2051 if (BitWidth <= 160)
2052 return &AMDGPU::VReg_160RegClass;
2053 if (BitWidth <= 192)
2054 return &AMDGPU::VReg_192RegClass;
2055 if (BitWidth <= 224)
2056 return &AMDGPU::VReg_224RegClass;
2057 if (BitWidth <= 256)
2058 return &AMDGPU::VReg_256RegClass;
2059 if (BitWidth <= 512)
2060 return &AMDGPU::VReg_512RegClass;
2061 if (BitWidth <= 1024)
2062 return &AMDGPU::VReg_1024RegClass;
2063
2064 return nullptr;
2065}
2066
2067static const TargetRegisterClass *
2068getAlignedVGPRClassForBitWidth(unsigned BitWidth) {
2069 if (BitWidth <= 64)
2070 return &AMDGPU::VReg_64_Align2RegClass;
2071 if (BitWidth <= 96)
2072 return &AMDGPU::VReg_96_Align2RegClass;
2073 if (BitWidth <= 128)
2074 return &AMDGPU::VReg_128_Align2RegClass;
2075 if (BitWidth <= 160)
2076 return &AMDGPU::VReg_160_Align2RegClass;
2077 if (BitWidth <= 192)
2078 return &AMDGPU::VReg_192_Align2RegClass;
2079 if (BitWidth <= 224)
2080 return &AMDGPU::VReg_224_Align2RegClass;
2081 if (BitWidth <= 256)
2082 return &AMDGPU::VReg_256_Align2RegClass;
2083 if (BitWidth <= 512)
2084 return &AMDGPU::VReg_512_Align2RegClass;
2085 if (BitWidth <= 1024)
2086 return &AMDGPU::VReg_1024_Align2RegClass;
2087
2088 return nullptr;
2089}
2090
2091const TargetRegisterClass *
2092SIRegisterInfo::getVGPRClassForBitWidth(unsigned BitWidth) const {
2093 if (BitWidth == 1)
2094 return &AMDGPU::VReg_1RegClass;
2095 if (BitWidth <= 16)
2096 return &AMDGPU::VGPR_LO16RegClass;
2097 if (BitWidth <= 32)
2098 return &AMDGPU::VGPR_32RegClass;
2099 return ST.needsAlignedVGPRs() ? getAlignedVGPRClassForBitWidth(BitWidth)
2100 : getAnyVGPRClassForBitWidth(BitWidth);
2101}
2102
2103static const TargetRegisterClass *
2104getAnyAGPRClassForBitWidth(unsigned BitWidth) {
2105 if (BitWidth <= 64)
2106 return &AMDGPU::AReg_64RegClass;
2107 if (BitWidth <= 96)
2108 return &AMDGPU::AReg_96RegClass;
2109 if (BitWidth <= 128)
2110 return &AMDGPU::AReg_128RegClass;
2111 if (BitWidth <= 160)
2112 return &AMDGPU::AReg_160RegClass;
2113 if (BitWidth <= 192)
2114 return &AMDGPU::AReg_192RegClass;
2115 if (BitWidth <= 224)
2116 return &AMDGPU::AReg_224RegClass;
2117 if (BitWidth <= 256)
2118 return &AMDGPU::AReg_256RegClass;
2119 if (BitWidth <= 512)
2120 return &AMDGPU::AReg_512RegClass;
2121 if (BitWidth <= 1024)
2122 return &AMDGPU::AReg_1024RegClass;
2123
2124 return nullptr;
2125}
2126
2127static const TargetRegisterClass *
2128getAlignedAGPRClassForBitWidth(unsigned BitWidth) {
2129 if (BitWidth <= 64)
2130 return &AMDGPU::AReg_64_Align2RegClass;
2131 if (BitWidth <= 96)
2132 return &AMDGPU::AReg_96_Align2RegClass;
2133 if (BitWidth <= 128)
2134 return &AMDGPU::AReg_128_Align2RegClass;
2135 if (BitWidth <= 160)
2136 return &AMDGPU::AReg_160_Align2RegClass;
2137 if (BitWidth <= 192)
2138 return &AMDGPU::AReg_192_Align2RegClass;
2139 if (BitWidth <= 224)
2140 return &AMDGPU::AReg_224_Align2RegClass;
2141 if (BitWidth <= 256)
2142 return &AMDGPU::AReg_256_Align2RegClass;
2143 if (BitWidth <= 512)
2144 return &AMDGPU::AReg_512_Align2RegClass;
2145 if (BitWidth <= 1024)
2146 return &AMDGPU::AReg_1024_Align2RegClass;
2147
2148 return nullptr;
2149}
2150
2151const TargetRegisterClass *
2152SIRegisterInfo::getAGPRClassForBitWidth(unsigned BitWidth) const {
2153 if (BitWidth <= 16)
2154 return &AMDGPU::AGPR_LO16RegClass;
2155 if (BitWidth <= 32)
2156 return &AMDGPU::AGPR_32RegClass;
2157 return ST.needsAlignedVGPRs() ? getAlignedAGPRClassForBitWidth(BitWidth)
2158 : getAnyAGPRClassForBitWidth(BitWidth);
2159}
2160
2161const TargetRegisterClass *
2162SIRegisterInfo::getSGPRClassForBitWidth(unsigned BitWidth) {
2163 if (BitWidth <= 16)
2164 return &AMDGPU::SGPR_LO16RegClass;
2165 if (BitWidth <= 32)
2166 return &AMDGPU::SReg_32RegClass;
2167 if (BitWidth <= 64)
2168 return &AMDGPU::SReg_64RegClass;
2169 if (BitWidth <= 96)
2170 return &AMDGPU::SGPR_96RegClass;
2171 if (BitWidth <= 128)
2172 return &AMDGPU::SGPR_128RegClass;
2173 if (BitWidth <= 160)
2174 return &AMDGPU::SGPR_160RegClass;
2175 if (BitWidth <= 192)
2176 return &AMDGPU::SGPR_192RegClass;
2177 if (BitWidth <= 224)
2178 return &AMDGPU::SGPR_224RegClass;
2179 if (BitWidth <= 256)
2180 return &AMDGPU::SGPR_256RegClass;
2181 if (BitWidth <= 512)
2182 return &AMDGPU::SGPR_512RegClass;
2183 if (BitWidth <= 1024)
2184 return &AMDGPU::SGPR_1024RegClass;
2185
2186 return nullptr;
2187}
2188
2189// FIXME: This is very slow. It might be worth creating a map from physreg to
2190// register class.
2191const TargetRegisterClass *
2192SIRegisterInfo::getPhysRegClass(MCRegister Reg) const {
2193 static const TargetRegisterClass *const BaseClasses[] = {
2194 &AMDGPU::VGPR_LO16RegClass,
2195 &AMDGPU::VGPR_HI16RegClass,
2196 &AMDGPU::SReg_LO16RegClass,
2197 &AMDGPU::AGPR_LO16RegClass,
2198 &AMDGPU::VGPR_32RegClass,
2199 &AMDGPU::SReg_32RegClass,
2200 &AMDGPU::AGPR_32RegClass,
2201 &AMDGPU::AGPR_32RegClass,
2202 &AMDGPU::VReg_64_Align2RegClass,
2203 &AMDGPU::VReg_64RegClass,
2204 &AMDGPU::SReg_64RegClass,
2205 &AMDGPU::AReg_64_Align2RegClass,
2206 &AMDGPU::AReg_64RegClass,
2207 &AMDGPU::VReg_96_Align2RegClass,
2208 &AMDGPU::VReg_96RegClass,
2209 &AMDGPU::SReg_96RegClass,
2210 &AMDGPU::AReg_96_Align2RegClass,
2211 &AMDGPU::AReg_96RegClass,
2212 &AMDGPU::VReg_128_Align2RegClass,
2213 &AMDGPU::VReg_128RegClass,
2214 &AMDGPU::SReg_128RegClass,
2215 &AMDGPU::AReg_128_Align2RegClass,
2216 &AMDGPU::AReg_128RegClass,
2217 &AMDGPU::VReg_160_Align2RegClass,
2218 &AMDGPU::VReg_160RegClass,
2219 &AMDGPU::SReg_160RegClass,
2220 &AMDGPU::AReg_160_Align2RegClass,
2221 &AMDGPU::AReg_160RegClass,
2222 &AMDGPU::VReg_192_Align2RegClass,
2223 &AMDGPU::VReg_192RegClass,
2224 &AMDGPU::SReg_192RegClass,
2225 &AMDGPU::AReg_192_Align2RegClass,
2226 &AMDGPU::AReg_192RegClass,
2227 &AMDGPU::VReg_224_Align2RegClass,
2228 &AMDGPU::VReg_224RegClass,
2229 &AMDGPU::SReg_224RegClass,
2230 &AMDGPU::AReg_224_Align2RegClass,
2231 &AMDGPU::AReg_224RegClass,
2232 &AMDGPU::VReg_256_Align2RegClass,
2233 &AMDGPU::VReg_256RegClass,
2234 &AMDGPU::SReg_256RegClass,
2235 &AMDGPU::AReg_256_Align2RegClass,
2236 &AMDGPU::AReg_256RegClass,
2237 &AMDGPU::VReg_512_Align2RegClass,
2238 &AMDGPU::VReg_512RegClass,
2239 &AMDGPU::SReg_512RegClass,
2240 &AMDGPU::AReg_512_Align2RegClass,
2241 &AMDGPU::AReg_512RegClass,
2242 &AMDGPU::SReg_1024RegClass,
2243 &AMDGPU::VReg_1024_Align2RegClass,
2244 &AMDGPU::VReg_1024RegClass,
2245 &AMDGPU::AReg_1024_Align2RegClass,
2246 &AMDGPU::AReg_1024RegClass,
2247 &AMDGPU::SCC_CLASSRegClass,
2248 &AMDGPU::Pseudo_SReg_32RegClass,
2249 &AMDGPU::Pseudo_SReg_128RegClass,
2250 };
2251
2252 for (const TargetRegisterClass *BaseClass : BaseClasses) {
2253 if (BaseClass->contains(Reg)) {
2254 return BaseClass;
2255 }
2256 }
2257 return nullptr;
2258}
2259
2260bool SIRegisterInfo::isSGPRReg(const MachineRegisterInfo &MRI,
2261 Register Reg) const {
2262 const TargetRegisterClass *RC;
2263 if (Reg.isVirtual())
2264 RC = MRI.getRegClass(Reg);
2265 else
2266 RC = getPhysRegClass(Reg);
2267 return isSGPRClass(RC);
2268}
2269
2270const TargetRegisterClass *
2271SIRegisterInfo::getEquivalentVGPRClass(const TargetRegisterClass *SRC) const {
2272 unsigned Size = getRegSizeInBits(*SRC);
2273 const TargetRegisterClass *VRC = getVGPRClassForBitWidth(Size);
2274 assert(VRC && "Invalid register class size")(static_cast <bool> (VRC && "Invalid register class size"
) ? void (0) : __assert_fail ("VRC && \"Invalid register class size\""
, "/build/llvm-toolchain-snapshot-14~++20211103100639+f9e6be5cc1a2/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 2274, __extension__ __PRETTY_FUNCTION__))
;
2275 return VRC;
2276}
2277
2278const TargetRegisterClass *
2279SIRegisterInfo::getEquivalentAGPRClass(const TargetRegisterClass *SRC) const {
2280 unsigned Size = getRegSizeInBits(*SRC);
2281 const TargetRegisterClass *ARC = getAGPRClassForBitWidth(Size);
2282 assert(ARC && "Invalid register class size")(static_cast <bool> (ARC && "Invalid register class size"
) ? void (0) : __assert_fail ("ARC && \"Invalid register class size\""
, "/build/llvm-toolchain-snapshot-14~++20211103100639+f9e6be5cc1a2/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 2282, __extension__ __PRETTY_FUNCTION__))
;
2283 return ARC;
2284}
2285
2286const TargetRegisterClass *
2287SIRegisterInfo::getEquivalentSGPRClass(const TargetRegisterClass *VRC) const {
2288 unsigned Size = getRegSizeInBits(*VRC);
2289 if (Size == 32)
2290 return &AMDGPU::SGPR_32RegClass;
2291 const TargetRegisterClass *SRC = getSGPRClassForBitWidth(Size);
2292 assert(SRC && "Invalid register class size")(static_cast <bool> (SRC && "Invalid register class size"
) ? void (0) : __assert_fail ("SRC && \"Invalid register class size\""
, "/build/llvm-toolchain-snapshot-14~++20211103100639+f9e6be5cc1a2/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 2292, __extension__ __PRETTY_FUNCTION__))
;
2293 return SRC;
2294}
2295
2296const TargetRegisterClass *SIRegisterInfo::getSubRegClass(
2297 const TargetRegisterClass *RC, unsigned SubIdx) const {
2298 if (SubIdx == AMDGPU::NoSubRegister)
2299 return RC;
2300
2301 // We can assume that each lane corresponds to one 32-bit register.
2302 unsigned Size = getNumChannelsFromSubReg(SubIdx) * 32;
2303 if (isSGPRClass(RC)) {
2304 if (Size == 32)
2305 RC = &AMDGPU::SGPR_32RegClass;
2306 else
2307 RC = getSGPRClassForBitWidth(Size);
2308 } else if (hasAGPRs(RC)) {
2309 RC = getAGPRClassForBitWidth(Size);
2310 } else {
2311 RC = getVGPRClassForBitWidth(Size);
2312 }
2313 assert(RC && "Invalid sub-register class size")(static_cast <bool> (RC && "Invalid sub-register class size"
) ? void (0) : __assert_fail ("RC && \"Invalid sub-register class size\""
, "/build/llvm-toolchain-snapshot-14~++20211103100639+f9e6be5cc1a2/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 2313, __extension__ __PRETTY_FUNCTION__))
;
2314 return RC;
2315}
2316
2317const TargetRegisterClass *
2318SIRegisterInfo::getCompatibleSubRegClass(const TargetRegisterClass *SuperRC,
2319 const TargetRegisterClass *SubRC,
2320 unsigned SubIdx) const {
2321 // Ensure this subregister index is aligned in the super register.
2322 const TargetRegisterClass *MatchRC =
2323 getMatchingSuperRegClass(SuperRC, SubRC, SubIdx);
2324 return MatchRC && MatchRC->hasSubClassEq(SuperRC) ? MatchRC : nullptr;
2325}
2326
2327bool SIRegisterInfo::opCanUseInlineConstant(unsigned OpType) const {
2328 if (OpType >= AMDGPU::OPERAND_REG_INLINE_AC_FIRST &&
2329 OpType <= AMDGPU::OPERAND_REG_INLINE_AC_LAST)
2330 return !ST.hasMFMAInlineLiteralBug();
2331
2332 return OpType >= AMDGPU::OPERAND_SRC_FIRST &&
2333 OpType <= AMDGPU::OPERAND_SRC_LAST;
2334}
2335
2336bool SIRegisterInfo::shouldRewriteCopySrc(
2337 const TargetRegisterClass *DefRC,
2338 unsigned DefSubReg,
2339 const TargetRegisterClass *SrcRC,
2340 unsigned SrcSubReg) const {
2341 // We want to prefer the smallest register class possible, so we don't want to
2342 // stop and rewrite on anything that looks like a subregister
2343 // extract. Operations mostly don't care about the super register class, so we
2344 // only want to stop on the most basic of copies between the same register
2345 // class.
2346 //
2347 // e.g. if we have something like
2348 // %0 = ...
2349 // %1 = ...
2350 // %2 = REG_SEQUENCE %0, sub0, %1, sub1, %2, sub2
2351 // %3 = COPY %2, sub0
2352 //
2353 // We want to look through the COPY to find:
2354 // => %3 = COPY %0
2355
2356 // Plain copy.
2357 return getCommonSubClass(DefRC, SrcRC) != nullptr;
2358}
2359
2360bool SIRegisterInfo::opCanUseLiteralConstant(unsigned OpType) const {
2361 // TODO: 64-bit operands have extending behavior from 32-bit literal.
2362 return OpType >= AMDGPU::OPERAND_REG_IMM_FIRST &&
2363 OpType <= AMDGPU::OPERAND_REG_IMM_LAST;
2364}
2365
2366/// Returns a lowest register that is not used at any point in the function.
2367/// If all registers are used, then this function will return
2368/// AMDGPU::NoRegister. If \p ReserveHighestVGPR = true, then return
2369/// highest unused register.
2370MCRegister SIRegisterInfo::findUnusedRegister(const MachineRegisterInfo &MRI,
2371 const TargetRegisterClass *RC,
2372 const MachineFunction &MF,
2373 bool ReserveHighestVGPR) const {
2374 if (ReserveHighestVGPR) {
2375 for (MCRegister Reg : reverse(*RC))
2376 if (MRI.isAllocatable(Reg) && !MRI.isPhysRegUsed(Reg))
2377 return Reg;
2378 } else {
2379 for (MCRegister Reg : *RC)
2380 if (MRI.isAllocatable(Reg) && !MRI.isPhysRegUsed(Reg))
2381 return Reg;
2382 }
2383 return MCRegister();
2384}
2385
2386ArrayRef<int16_t> SIRegisterInfo::getRegSplitParts(const TargetRegisterClass *RC,
2387 unsigned EltSize) const {
2388 const unsigned RegBitWidth = AMDGPU::getRegBitWidth(*RC->MC);
2389 assert(RegBitWidth >= 32 && RegBitWidth <= 1024)(static_cast <bool> (RegBitWidth >= 32 && RegBitWidth
<= 1024) ? void (0) : __assert_fail ("RegBitWidth >= 32 && RegBitWidth <= 1024"
, "/build/llvm-toolchain-snapshot-14~++20211103100639+f9e6be5cc1a2/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 2389, __extension__ __PRETTY_FUNCTION__))
;
2390
2391 const unsigned RegDWORDs = RegBitWidth / 32;
2392 const unsigned EltDWORDs = EltSize / 4;
2393 assert(RegSplitParts.size() + 1 >= EltDWORDs)(static_cast <bool> (RegSplitParts.size() + 1 >= EltDWORDs
) ? void (0) : __assert_fail ("RegSplitParts.size() + 1 >= EltDWORDs"
, "/build/llvm-toolchain-snapshot-14~++20211103100639+f9e6be5cc1a2/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 2393, __extension__ __PRETTY_FUNCTION__))
;
2394
2395 const std::vector<int16_t> &Parts = RegSplitParts[EltDWORDs - 1];
2396 const unsigned NumParts = RegDWORDs / EltDWORDs;
2397
2398 return makeArrayRef(Parts.data(), NumParts);
2399}
2400
2401const TargetRegisterClass*
2402SIRegisterInfo::getRegClassForReg(const MachineRegisterInfo &MRI,
2403 Register Reg) const {
2404 return Reg.isVirtual() ? MRI.getRegClass(Reg) : getPhysRegClass(Reg);
2405}
2406
2407bool SIRegisterInfo::isVGPR(const MachineRegisterInfo &MRI,
2408 Register Reg) const {
2409 const TargetRegisterClass *RC = getRegClassForReg(MRI, Reg);
2410 // Registers without classes are unaddressable, SGPR-like registers.
2411 return RC && isVGPRClass(RC);
2412}
2413
2414bool SIRegisterInfo::isAGPR(const MachineRegisterInfo &MRI,
2415 Register Reg) const {
2416 const TargetRegisterClass *RC = getRegClassForReg(MRI, Reg);
2417
2418 // Registers without classes are unaddressable, SGPR-like registers.
2419 return RC && isAGPRClass(RC);
2420}
2421
2422bool SIRegisterInfo::shouldCoalesce(MachineInstr *MI,
2423 const TargetRegisterClass *SrcRC,
2424 unsigned SubReg,
2425 const TargetRegisterClass *DstRC,
2426 unsigned DstSubReg,
2427 const TargetRegisterClass *NewRC,
2428 LiveIntervals &LIS) const {
2429 unsigned SrcSize = getRegSizeInBits(*SrcRC);
2430 unsigned DstSize = getRegSizeInBits(*DstRC);
2431 unsigned NewSize = getRegSizeInBits(*NewRC);
2432
2433 // Do not increase size of registers beyond dword, we would need to allocate
2434 // adjacent registers and constraint regalloc more than needed.
2435
2436 // Always allow dword coalescing.
2437 if (SrcSize <= 32 || DstSize <= 32)
2438 return true;
2439
2440 return NewSize <= DstSize || NewSize <= SrcSize;
2441}
2442
2443unsigned SIRegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC,
2444 MachineFunction &MF) const {
2445 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
2446
2447 unsigned Occupancy = ST.getOccupancyWithLocalMemSize(MFI->getLDSSize(),
2448 MF.getFunction());
2449 switch (RC->getID()) {
2450 default:
2451 return AMDGPUGenRegisterInfo::getRegPressureLimit(RC, MF);
2452 case AMDGPU::VGPR_32RegClassID:
2453 case AMDGPU::VGPR_LO16RegClassID:
2454 case AMDGPU::VGPR_HI16RegClassID:
2455 return std::min(ST.getMaxNumVGPRs(Occupancy), ST.getMaxNumVGPRs(MF));
2456 case AMDGPU::SGPR_32RegClassID:
2457 case AMDGPU::SGPR_LO16RegClassID:
2458 return std::min(ST.getMaxNumSGPRs(Occupancy, true), ST.getMaxNumSGPRs(MF));
2459 }
2460}
2461
2462unsigned SIRegisterInfo::getRegPressureSetLimit(const MachineFunction &MF,
2463 unsigned Idx) const {
2464 if (Idx == AMDGPU::RegisterPressureSets::VGPR_32 ||
2465 Idx == AMDGPU::RegisterPressureSets::AGPR_32)
2466 return getRegPressureLimit(&AMDGPU::VGPR_32RegClass,
2467 const_cast<MachineFunction &>(MF));
2468
2469 if (Idx == AMDGPU::RegisterPressureSets::SReg_32)
2470 return getRegPressureLimit(&AMDGPU::SGPR_32RegClass,
2471 const_cast<MachineFunction &>(MF));
2472
2473 llvm_unreachable("Unexpected register pressure set!")::llvm::llvm_unreachable_internal("Unexpected register pressure set!"
, "/build/llvm-toolchain-snapshot-14~++20211103100639+f9e6be5cc1a2/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 2473)
;
2474}
2475
2476const int *SIRegisterInfo::getRegUnitPressureSets(unsigned RegUnit) const {
2477 static const int Empty[] = { -1 };
2478
2479 if (RegPressureIgnoredUnits[RegUnit])
2480 return Empty;
2481
2482 return AMDGPUGenRegisterInfo::getRegUnitPressureSets(RegUnit);
2483}
2484
2485MCRegister SIRegisterInfo::getReturnAddressReg(const MachineFunction &MF) const {
2486 // Not a callee saved register.
2487 return AMDGPU::SGPR30_SGPR31;
2488}
2489
2490const TargetRegisterClass *
2491SIRegisterInfo::getRegClassForSizeOnBank(unsigned Size,
2492 const RegisterBank &RB,
2493 const MachineRegisterInfo &MRI) const {
2494 switch (RB.getID()) {
2495 case AMDGPU::VGPRRegBankID:
2496 return getVGPRClassForBitWidth(std::max(32u, Size));
2497 case AMDGPU::VCCRegBankID:
2498 assert(Size == 1)(static_cast <bool> (Size == 1) ? void (0) : __assert_fail
("Size == 1", "/build/llvm-toolchain-snapshot-14~++20211103100639+f9e6be5cc1a2/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 2498, __extension__ __PRETTY_FUNCTION__))
;
2499 return isWave32 ? &AMDGPU::SReg_32_XM0_XEXECRegClass
2500 : &AMDGPU::SReg_64_XEXECRegClass;
2501 case AMDGPU::SGPRRegBankID:
2502 return getSGPRClassForBitWidth(std::max(32u, Size));
2503 case AMDGPU::AGPRRegBankID:
2504 return getAGPRClassForBitWidth(std::max(32u, Size));
2505 default:
2506 llvm_unreachable("unknown register bank")::llvm::llvm_unreachable_internal("unknown register bank", "/build/llvm-toolchain-snapshot-14~++20211103100639+f9e6be5cc1a2/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 2506)
;
2507 }
2508}
2509
2510const TargetRegisterClass *
2511SIRegisterInfo::getConstrainedRegClassForOperand(const MachineOperand &MO,
2512 const MachineRegisterInfo &MRI) const {
2513 const RegClassOrRegBank &RCOrRB = MRI.getRegClassOrRegBank(MO.getReg());
2514 if (const RegisterBank *RB = RCOrRB.dyn_cast<const RegisterBank*>())
2515 return getRegClassForTypeOnBank(MRI.getType(MO.getReg()), *RB, MRI);
2516
2517 if (const auto *RC = RCOrRB.dyn_cast<const TargetRegisterClass *>())
2518 return getAllocatableClass(RC);
2519
2520 return nullptr;
2521}
2522
2523MCRegister SIRegisterInfo::getVCC() const {
2524 return isWave32 ? AMDGPU::VCC_LO : AMDGPU::VCC;
2525}
2526
2527const TargetRegisterClass *SIRegisterInfo::getVGPR64Class() const {
2528 // VGPR tuples have an alignment requirement on gfx90a variants.
2529 return ST.needsAlignedVGPRs() ? &AMDGPU::VReg_64_Align2RegClass
2530 : &AMDGPU::VReg_64RegClass;
2531}
2532
2533const TargetRegisterClass *
2534SIRegisterInfo::getRegClass(unsigned RCID) const {
2535 switch ((int)RCID) {
2536 case AMDGPU::SReg_1RegClassID:
2537 return getBoolRC();
2538 case AMDGPU::SReg_1_XEXECRegClassID:
2539 return isWave32 ? &AMDGPU::SReg_32_XM0_XEXECRegClass
2540 : &AMDGPU::SReg_64_XEXECRegClass;
2541 case -1:
2542 return nullptr;
2543 default:
2544 return AMDGPUGenRegisterInfo::getRegClass(RCID);
2545 }
2546}
2547
2548// Find reaching register definition
2549MachineInstr *SIRegisterInfo::findReachingDef(Register Reg, unsigned SubReg,
2550 MachineInstr &Use,
2551 MachineRegisterInfo &MRI,
2552 LiveIntervals *LIS) const {
2553 auto &MDT = LIS->getAnalysis<MachineDominatorTree>();
2554 SlotIndex UseIdx = LIS->getInstructionIndex(Use);
2555 SlotIndex DefIdx;
2556
2557 if (Reg.isVirtual()) {
2558 if (!LIS->hasInterval(Reg))
2559 return nullptr;
2560 LiveInterval &LI = LIS->getInterval(Reg);
2561 LaneBitmask SubLanes = SubReg ? getSubRegIndexLaneMask(SubReg)
2562 : MRI.getMaxLaneMaskForVReg(Reg);
2563 VNInfo *V = nullptr;
2564 if (LI.hasSubRanges()) {
2565 for (auto &S : LI.subranges()) {
2566 if ((S.LaneMask & SubLanes) == SubLanes) {
2567 V = S.getVNInfoAt(UseIdx);
2568 break;
2569 }
2570 }
2571 } else {
2572 V = LI.getVNInfoAt(UseIdx);
2573 }
2574 if (!V)
2575 return nullptr;
2576 DefIdx = V->def;
2577 } else {
2578 // Find last def.
2579 for (MCRegUnitIterator Units(Reg.asMCReg(), this); Units.isValid();
2580 ++Units) {
2581 LiveRange &LR = LIS->getRegUnit(*Units);
2582 if (VNInfo *V = LR.getVNInfoAt(UseIdx)) {
2583 if (!DefIdx.isValid() ||
2584 MDT.dominates(LIS->getInstructionFromIndex(DefIdx),
2585 LIS->getInstructionFromIndex(V->def)))
2586 DefIdx = V->def;
2587 } else {
2588 return nullptr;
2589 }
2590 }
2591 }
2592
2593 MachineInstr *Def = LIS->getInstructionFromIndex(DefIdx);
2594
2595 if (!Def || !MDT.dominates(Def, &Use))
2596 return nullptr;
2597
2598 assert(Def->modifiesRegister(Reg, this))(static_cast <bool> (Def->modifiesRegister(Reg, this
)) ? void (0) : __assert_fail ("Def->modifiesRegister(Reg, this)"
, "/build/llvm-toolchain-snapshot-14~++20211103100639+f9e6be5cc1a2/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 2598, __extension__ __PRETTY_FUNCTION__))
;
2599
2600 return Def;
2601}
2602
2603MCPhysReg SIRegisterInfo::get32BitRegister(MCPhysReg Reg) const {
2604 assert(getRegSizeInBits(*getPhysRegClass(Reg)) <= 32)(static_cast <bool> (getRegSizeInBits(*getPhysRegClass(
Reg)) <= 32) ? void (0) : __assert_fail ("getRegSizeInBits(*getPhysRegClass(Reg)) <= 32"
, "/build/llvm-toolchain-snapshot-14~++20211103100639+f9e6be5cc1a2/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 2604, __extension__ __PRETTY_FUNCTION__))
;
2605
2606 for (const TargetRegisterClass &RC : { AMDGPU::VGPR_32RegClass,
2607 AMDGPU::SReg_32RegClass,
2608 AMDGPU::AGPR_32RegClass } ) {
2609 if (MCPhysReg Super = getMatchingSuperReg(Reg, AMDGPU::lo16, &RC))
2610 return Super;
2611 }
2612 if (MCPhysReg Super = getMatchingSuperReg(Reg, AMDGPU::hi16,
2613 &AMDGPU::VGPR_32RegClass)) {
2614 return Super;
2615 }
2616
2617 return AMDGPU::NoRegister;
2618}
2619
2620bool SIRegisterInfo::isProperlyAlignedRC(const TargetRegisterClass &RC) const {
2621 if (!ST.needsAlignedVGPRs())
2622 return true;
2623
2624 if (hasVGPRs(&RC))
2625 return RC.hasSuperClassEq(getVGPRClassForBitWidth(getRegSizeInBits(RC)));
2626 if (hasAGPRs(&RC))
2627 return RC.hasSuperClassEq(getAGPRClassForBitWidth(getRegSizeInBits(RC)));
2628
2629 return true;
2630}
2631
2632bool SIRegisterInfo::isConstantPhysReg(MCRegister PhysReg) const {
2633 switch (PhysReg) {
2634 case AMDGPU::SGPR_NULL:
2635 case AMDGPU::SRC_SHARED_BASE:
2636 case AMDGPU::SRC_PRIVATE_BASE:
2637 case AMDGPU::SRC_SHARED_LIMIT:
2638 case AMDGPU::SRC_PRIVATE_LIMIT:
2639 return true;
2640 default:
2641 return false;
2642 }
2643}
2644
2645ArrayRef<MCPhysReg>
2646SIRegisterInfo::getAllSGPR128(const MachineFunction &MF) const {
2647 return makeArrayRef(AMDGPU::SGPR_128RegClass.begin(),
2648 ST.getMaxNumSGPRs(MF) / 4);
2649}
2650
2651ArrayRef<MCPhysReg>
2652SIRegisterInfo::getAllSGPR64(const MachineFunction &MF) const {
2653 return makeArrayRef(AMDGPU::SGPR_64RegClass.begin(),
2654 ST.getMaxNumSGPRs(MF) / 2);
2655}
2656
2657ArrayRef<MCPhysReg>
2658SIRegisterInfo::getAllSGPR32(const MachineFunction &MF) const {
2659 return makeArrayRef(AMDGPU::SGPR_32RegClass.begin(), ST.getMaxNumSGPRs(MF));
2660}