Bug Summary

File:llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp
Warning:line 1156, column 5
Value stored to 'Desc' is never read

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name SIRegisterInfo.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/build-llvm -resource-dir /usr/lib/llvm-14/lib/clang/14.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I lib/Target/AMDGPU -I /build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/AMDGPU -I include -I /build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include -D NDEBUG -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-14/lib/clang/14.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-command-line-argument -Wno-unknown-warning-option -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/build-llvm -ferror-limit 19 -fvisibility hidden -fvisibility-inlines-hidden -fgnuc-version=4.2.1 -fcolor-diagnostics -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2021-09-26-234817-15343-1 -x c++ /build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp
1//===-- SIRegisterInfo.cpp - SI Register Information ---------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// SI implementation of the TargetRegisterInfo class.
11//
12//===----------------------------------------------------------------------===//
13
14#include "SIRegisterInfo.h"
15#include "AMDGPU.h"
16#include "AMDGPURegisterBankInfo.h"
17#include "GCNSubtarget.h"
18#include "MCTargetDesc/AMDGPUInstPrinter.h"
19#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
20#include "SIMachineFunctionInfo.h"
21#include "llvm/CodeGen/LiveIntervals.h"
22#include "llvm/CodeGen/MachineDominators.h"
23#include "llvm/CodeGen/RegisterScavenging.h"
24
25using namespace llvm;
26
27#define GET_REGINFO_TARGET_DESC
28#include "AMDGPUGenRegisterInfo.inc"
29
30static cl::opt<bool> EnableSpillSGPRToVGPR(
31 "amdgpu-spill-sgpr-to-vgpr",
32 cl::desc("Enable spilling VGPRs to SGPRs"),
33 cl::ReallyHidden,
34 cl::init(true));
35
36std::array<std::vector<int16_t>, 16> SIRegisterInfo::RegSplitParts;
37std::array<std::array<uint16_t, 32>, 9> SIRegisterInfo::SubRegFromChannelTable;
38
39// Map numbers of DWORDs to indexes in SubRegFromChannelTable.
40// Valid indexes are shifted 1, such that a 0 mapping means unsupported.
41// e.g. for 8 DWORDs (256-bit), SubRegFromChannelTableWidthMap[8] = 8,
42// meaning index 7 in SubRegFromChannelTable.
43static const std::array<unsigned, 17> SubRegFromChannelTableWidthMap = {
44 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 0, 0, 0, 0, 0, 0, 9};
45
46namespace llvm {
47
48// A temporary struct to spill SGPRs.
49// This is mostly to spill SGPRs to memory. Spilling SGPRs into VGPR lanes emits
50// just v_writelane and v_readlane.
51//
52// When spilling to memory, the SGPRs are written into VGPR lanes and the VGPR
53// is saved to scratch (or the other way around for loads).
54// For this, a VGPR is required where the needed lanes can be clobbered. The
55// RegScavenger can provide a VGPR where currently active lanes can be
56// clobbered, but we still need to save inactive lanes.
57// The high-level steps are:
58// - Try to scavenge SGPR(s) to save exec
59// - Try to scavenge VGPR
60// - Save needed, all or inactive lanes of a TmpVGPR
61// - Spill/Restore SGPRs using TmpVGPR
62// - Restore TmpVGPR
63//
64// To save all lanes of TmpVGPR, exec needs to be saved and modified. If we
65// cannot scavenge temporary SGPRs to save exec, we use the following code:
66// buffer_store_dword TmpVGPR ; only if active lanes need to be saved
67// s_not exec, exec
68// buffer_store_dword TmpVGPR ; save inactive lanes
69// s_not exec, exec
70struct SGPRSpillBuilder {
71 struct PerVGPRData {
72 unsigned PerVGPR;
73 unsigned NumVGPRs;
74 int64_t VGPRLanes;
75 };
76
77 // The SGPR to save
78 Register SuperReg;
79 MachineBasicBlock::iterator MI;
80 ArrayRef<int16_t> SplitParts;
81 unsigned NumSubRegs;
82 bool IsKill;
83 const DebugLoc &DL;
84
85 /* When spilling to stack */
86 // The SGPRs are written into this VGPR, which is then written to scratch
87 // (or vice versa for loads).
88 Register TmpVGPR = AMDGPU::NoRegister;
89 // Temporary spill slot to save TmpVGPR to.
90 int TmpVGPRIndex = 0;
91 // If TmpVGPR is live before the spill or if it is scavenged.
92 bool TmpVGPRLive = false;
93 // Scavenged SGPR to save EXEC.
94 Register SavedExecReg = AMDGPU::NoRegister;
95 // Stack index to write the SGPRs to.
96 int Index;
97 unsigned EltSize = 4;
98
99 RegScavenger *RS;
100 MachineBasicBlock &MBB;
101 MachineFunction &MF;
102 SIMachineFunctionInfo &MFI;
103 const SIInstrInfo &TII;
104 const SIRegisterInfo &TRI;
105 bool IsWave32;
106 Register ExecReg;
107 unsigned MovOpc;
108 unsigned NotOpc;
109
110 SGPRSpillBuilder(const SIRegisterInfo &TRI, const SIInstrInfo &TII,
111 bool IsWave32, MachineBasicBlock::iterator MI, int Index,
112 RegScavenger *RS)
113 : SuperReg(MI->getOperand(0).getReg()), MI(MI),
114 IsKill(MI->getOperand(0).isKill()), DL(MI->getDebugLoc()), Index(Index),
115 RS(RS), MBB(*MI->getParent()), MF(*MBB.getParent()),
116 MFI(*MF.getInfo<SIMachineFunctionInfo>()), TII(TII), TRI(TRI),
117 IsWave32(IsWave32) {
118 const TargetRegisterClass *RC = TRI.getPhysRegClass(SuperReg);
119 SplitParts = TRI.getRegSplitParts(RC, EltSize);
120 NumSubRegs = SplitParts.empty() ? 1 : SplitParts.size();
121
122 if (IsWave32) {
123 ExecReg = AMDGPU::EXEC_LO;
124 MovOpc = AMDGPU::S_MOV_B32;
125 NotOpc = AMDGPU::S_NOT_B32;
126 } else {
127 ExecReg = AMDGPU::EXEC;
128 MovOpc = AMDGPU::S_MOV_B64;
129 NotOpc = AMDGPU::S_NOT_B64;
130 }
131
132 assert(SuperReg != AMDGPU::M0 && "m0 should never spill")(static_cast <bool> (SuperReg != AMDGPU::M0 && "m0 should never spill"
) ? void (0) : __assert_fail ("SuperReg != AMDGPU::M0 && \"m0 should never spill\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 132, __extension__ __PRETTY_FUNCTION__))
;
133 assert(SuperReg != AMDGPU::EXEC_LO && SuperReg != AMDGPU::EXEC_HI &&(static_cast <bool> (SuperReg != AMDGPU::EXEC_LO &&
SuperReg != AMDGPU::EXEC_HI && SuperReg != AMDGPU::EXEC
&& "exec should never spill") ? void (0) : __assert_fail
("SuperReg != AMDGPU::EXEC_LO && SuperReg != AMDGPU::EXEC_HI && SuperReg != AMDGPU::EXEC && \"exec should never spill\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 134, __extension__ __PRETTY_FUNCTION__))
134 SuperReg != AMDGPU::EXEC && "exec should never spill")(static_cast <bool> (SuperReg != AMDGPU::EXEC_LO &&
SuperReg != AMDGPU::EXEC_HI && SuperReg != AMDGPU::EXEC
&& "exec should never spill") ? void (0) : __assert_fail
("SuperReg != AMDGPU::EXEC_LO && SuperReg != AMDGPU::EXEC_HI && SuperReg != AMDGPU::EXEC && \"exec should never spill\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 134, __extension__ __PRETTY_FUNCTION__))
;
135 }
136
137 PerVGPRData getPerVGPRData() {
138 PerVGPRData Data;
139 Data.PerVGPR = IsWave32 ? 32 : 64;
140 Data.NumVGPRs = (NumSubRegs + (Data.PerVGPR - 1)) / Data.PerVGPR;
141 Data.VGPRLanes = (1LL << std::min(Data.PerVGPR, NumSubRegs)) - 1LL;
142 return Data;
143 }
144
145 // Tries to scavenge SGPRs to save EXEC and a VGPR. Uses v0 if no VGPR is
146 // free.
147 // Writes these instructions if an SGPR can be scavenged:
148 // s_mov_b64 s[6:7], exec ; Save exec
149 // s_mov_b64 exec, 3 ; Wanted lanemask
150 // buffer_store_dword v1 ; Write scavenged VGPR to emergency slot
151 //
152 // Writes these instructions if no SGPR can be scavenged:
153 // buffer_store_dword v0 ; Only if no free VGPR was found
154 // s_not_b64 exec, exec
155 // buffer_store_dword v0 ; Save inactive lanes
156 // ; exec stays inverted, it is flipped back in
157 // ; restore.
158 void prepare() {
159 // Scavenged temporary VGPR to use. It must be scavenged once for any number
160 // of spilled subregs.
161 // FIXME: The liveness analysis is limited and does not tell if a register
162 // is in use in lanes that are currently inactive. We can never be sure if
163 // a register as actually in use in another lane, so we need to save all
164 // used lanes of the chosen VGPR.
165 assert(RS && "Cannot spill SGPR to memory without RegScavenger")(static_cast <bool> (RS && "Cannot spill SGPR to memory without RegScavenger"
) ? void (0) : __assert_fail ("RS && \"Cannot spill SGPR to memory without RegScavenger\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 165, __extension__ __PRETTY_FUNCTION__))
;
166 TmpVGPR = RS->scavengeRegister(&AMDGPU::VGPR_32RegClass, MI, 0, false);
167
168 // Reserve temporary stack slot
169 TmpVGPRIndex = MFI.getScavengeFI(MF.getFrameInfo(), TRI);
170 if (TmpVGPR) {
171 // Found a register that is dead in the currently active lanes, we only
172 // need to spill inactive lanes.
173 TmpVGPRLive = false;
174 } else {
175 // Pick v0 because it doesn't make a difference.
176 TmpVGPR = AMDGPU::VGPR0;
177 TmpVGPRLive = true;
178 }
179
180 // Try to scavenge SGPRs to save exec
181 assert(!SavedExecReg && "Exec is already saved, refuse to save again")(static_cast <bool> (!SavedExecReg && "Exec is already saved, refuse to save again"
) ? void (0) : __assert_fail ("!SavedExecReg && \"Exec is already saved, refuse to save again\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 181, __extension__ __PRETTY_FUNCTION__))
;
182 const TargetRegisterClass &RC =
183 IsWave32 ? AMDGPU::SGPR_32RegClass : AMDGPU::SGPR_64RegClass;
184 RS->setRegUsed(SuperReg);
185 SavedExecReg = RS->scavengeRegister(&RC, MI, 0, false);
186
187 int64_t VGPRLanes = getPerVGPRData().VGPRLanes;
188
189 if (SavedExecReg) {
190 RS->setRegUsed(SavedExecReg);
191 // Set exec to needed lanes
192 BuildMI(MBB, MI, DL, TII.get(MovOpc), SavedExecReg).addReg(ExecReg);
193 auto I = BuildMI(MBB, MI, DL, TII.get(MovOpc), ExecReg).addImm(VGPRLanes);
194 if (!TmpVGPRLive)
195 I.addReg(TmpVGPR, RegState::ImplicitDefine);
196 // Spill needed lanes
197 TRI.buildVGPRSpillLoadStore(*this, TmpVGPRIndex, 0, /*IsLoad*/ false);
198 } else {
199 // Spill active lanes
200 if (TmpVGPRLive)
201 TRI.buildVGPRSpillLoadStore(*this, TmpVGPRIndex, 0, /*IsLoad*/ false,
202 /*IsKill*/ false);
203 // Spill inactive lanes
204 auto I = BuildMI(MBB, MI, DL, TII.get(NotOpc), ExecReg).addReg(ExecReg);
205 if (!TmpVGPRLive)
206 I.addReg(TmpVGPR, RegState::ImplicitDefine);
207 TRI.buildVGPRSpillLoadStore(*this, TmpVGPRIndex, 0, /*IsLoad*/ false);
208 }
209 }
210
211 // Writes these instructions if an SGPR can be scavenged:
212 // buffer_load_dword v1 ; Write scavenged VGPR to emergency slot
213 // s_waitcnt vmcnt(0) ; If a free VGPR was found
214 // s_mov_b64 exec, s[6:7] ; Save exec
215 //
216 // Writes these instructions if no SGPR can be scavenged:
217 // buffer_load_dword v0 ; Restore inactive lanes
218 // s_waitcnt vmcnt(0) ; If a free VGPR was found
219 // s_not_b64 exec, exec
220 // buffer_load_dword v0 ; Only if no free VGPR was found
221 void restore() {
222 if (SavedExecReg) {
223 // Restore used lanes
224 TRI.buildVGPRSpillLoadStore(*this, TmpVGPRIndex, 0, /*IsLoad*/ true,
225 /*IsKill*/ false);
226 // Restore exec
227 auto I = BuildMI(MBB, MI, DL, TII.get(MovOpc), ExecReg)
228 .addReg(SavedExecReg, RegState::Kill);
229 // Add an implicit use of the load so it is not dead.
230 // FIXME This inserts an unnecessary waitcnt
231 if (!TmpVGPRLive) {
232 I.addReg(TmpVGPR, RegState::ImplicitKill);
233 }
234 } else {
235 // Restore inactive lanes
236 TRI.buildVGPRSpillLoadStore(*this, TmpVGPRIndex, 0, /*IsLoad*/ true,
237 /*IsKill*/ false);
238 auto I = BuildMI(MBB, MI, DL, TII.get(NotOpc), ExecReg).addReg(ExecReg);
239 if (!TmpVGPRLive) {
240 I.addReg(TmpVGPR, RegState::ImplicitKill);
241 }
242 // Restore active lanes
243 if (TmpVGPRLive)
244 TRI.buildVGPRSpillLoadStore(*this, TmpVGPRIndex, 0, /*IsLoad*/ true);
245 }
246 }
247
248 // Write TmpVGPR to memory or read TmpVGPR from memory.
249 // Either using a single buffer_load/store if exec is set to the needed mask
250 // or using
251 // buffer_load
252 // s_not exec, exec
253 // buffer_load
254 // s_not exec, exec
255 void readWriteTmpVGPR(unsigned Offset, bool IsLoad) {
256 if (SavedExecReg) {
257 // Spill needed lanes
258 TRI.buildVGPRSpillLoadStore(*this, Index, Offset, IsLoad);
259 } else {
260 // Spill active lanes
261 TRI.buildVGPRSpillLoadStore(*this, Index, Offset, IsLoad,
262 /*IsKill*/ false);
263 // Spill inactive lanes
264 BuildMI(MBB, MI, DL, TII.get(NotOpc), ExecReg).addReg(ExecReg);
265 TRI.buildVGPRSpillLoadStore(*this, Index, Offset, IsLoad);
266 BuildMI(MBB, MI, DL, TII.get(NotOpc), ExecReg).addReg(ExecReg);
267 }
268 }
269};
270
271} // namespace llvm
272
273SIRegisterInfo::SIRegisterInfo(const GCNSubtarget &ST)
274 : AMDGPUGenRegisterInfo(AMDGPU::PC_REG, ST.getAMDGPUDwarfFlavour()), ST(ST),
275 SpillSGPRToVGPR(EnableSpillSGPRToVGPR), isWave32(ST.isWave32()) {
276
277 assert(getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() == 3 &&(static_cast <bool> (getSubRegIndexLaneMask(AMDGPU::sub0
).getAsInteger() == 3 && getSubRegIndexLaneMask(AMDGPU
::sub31).getAsInteger() == (3ULL << 62) && (getSubRegIndexLaneMask
(AMDGPU::lo16) | getSubRegIndexLaneMask(AMDGPU::hi16)).getAsInteger
() == getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() &&
"getNumCoveredRegs() will not work with generated subreg masks!"
) ? void (0) : __assert_fail ("getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() == 3 && getSubRegIndexLaneMask(AMDGPU::sub31).getAsInteger() == (3ULL << 62) && (getSubRegIndexLaneMask(AMDGPU::lo16) | getSubRegIndexLaneMask(AMDGPU::hi16)).getAsInteger() == getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() && \"getNumCoveredRegs() will not work with generated subreg masks!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 282, __extension__ __PRETTY_FUNCTION__))
278 getSubRegIndexLaneMask(AMDGPU::sub31).getAsInteger() == (3ULL << 62) &&(static_cast <bool> (getSubRegIndexLaneMask(AMDGPU::sub0
).getAsInteger() == 3 && getSubRegIndexLaneMask(AMDGPU
::sub31).getAsInteger() == (3ULL << 62) && (getSubRegIndexLaneMask
(AMDGPU::lo16) | getSubRegIndexLaneMask(AMDGPU::hi16)).getAsInteger
() == getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() &&
"getNumCoveredRegs() will not work with generated subreg masks!"
) ? void (0) : __assert_fail ("getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() == 3 && getSubRegIndexLaneMask(AMDGPU::sub31).getAsInteger() == (3ULL << 62) && (getSubRegIndexLaneMask(AMDGPU::lo16) | getSubRegIndexLaneMask(AMDGPU::hi16)).getAsInteger() == getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() && \"getNumCoveredRegs() will not work with generated subreg masks!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 282, __extension__ __PRETTY_FUNCTION__))
279 (getSubRegIndexLaneMask(AMDGPU::lo16) |(static_cast <bool> (getSubRegIndexLaneMask(AMDGPU::sub0
).getAsInteger() == 3 && getSubRegIndexLaneMask(AMDGPU
::sub31).getAsInteger() == (3ULL << 62) && (getSubRegIndexLaneMask
(AMDGPU::lo16) | getSubRegIndexLaneMask(AMDGPU::hi16)).getAsInteger
() == getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() &&
"getNumCoveredRegs() will not work with generated subreg masks!"
) ? void (0) : __assert_fail ("getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() == 3 && getSubRegIndexLaneMask(AMDGPU::sub31).getAsInteger() == (3ULL << 62) && (getSubRegIndexLaneMask(AMDGPU::lo16) | getSubRegIndexLaneMask(AMDGPU::hi16)).getAsInteger() == getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() && \"getNumCoveredRegs() will not work with generated subreg masks!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 282, __extension__ __PRETTY_FUNCTION__))
280 getSubRegIndexLaneMask(AMDGPU::hi16)).getAsInteger() ==(static_cast <bool> (getSubRegIndexLaneMask(AMDGPU::sub0
).getAsInteger() == 3 && getSubRegIndexLaneMask(AMDGPU
::sub31).getAsInteger() == (3ULL << 62) && (getSubRegIndexLaneMask
(AMDGPU::lo16) | getSubRegIndexLaneMask(AMDGPU::hi16)).getAsInteger
() == getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() &&
"getNumCoveredRegs() will not work with generated subreg masks!"
) ? void (0) : __assert_fail ("getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() == 3 && getSubRegIndexLaneMask(AMDGPU::sub31).getAsInteger() == (3ULL << 62) && (getSubRegIndexLaneMask(AMDGPU::lo16) | getSubRegIndexLaneMask(AMDGPU::hi16)).getAsInteger() == getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() && \"getNumCoveredRegs() will not work with generated subreg masks!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 282, __extension__ __PRETTY_FUNCTION__))
281 getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() &&(static_cast <bool> (getSubRegIndexLaneMask(AMDGPU::sub0
).getAsInteger() == 3 && getSubRegIndexLaneMask(AMDGPU
::sub31).getAsInteger() == (3ULL << 62) && (getSubRegIndexLaneMask
(AMDGPU::lo16) | getSubRegIndexLaneMask(AMDGPU::hi16)).getAsInteger
() == getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() &&
"getNumCoveredRegs() will not work with generated subreg masks!"
) ? void (0) : __assert_fail ("getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() == 3 && getSubRegIndexLaneMask(AMDGPU::sub31).getAsInteger() == (3ULL << 62) && (getSubRegIndexLaneMask(AMDGPU::lo16) | getSubRegIndexLaneMask(AMDGPU::hi16)).getAsInteger() == getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() && \"getNumCoveredRegs() will not work with generated subreg masks!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 282, __extension__ __PRETTY_FUNCTION__))
282 "getNumCoveredRegs() will not work with generated subreg masks!")(static_cast <bool> (getSubRegIndexLaneMask(AMDGPU::sub0
).getAsInteger() == 3 && getSubRegIndexLaneMask(AMDGPU
::sub31).getAsInteger() == (3ULL << 62) && (getSubRegIndexLaneMask
(AMDGPU::lo16) | getSubRegIndexLaneMask(AMDGPU::hi16)).getAsInteger
() == getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() &&
"getNumCoveredRegs() will not work with generated subreg masks!"
) ? void (0) : __assert_fail ("getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() == 3 && getSubRegIndexLaneMask(AMDGPU::sub31).getAsInteger() == (3ULL << 62) && (getSubRegIndexLaneMask(AMDGPU::lo16) | getSubRegIndexLaneMask(AMDGPU::hi16)).getAsInteger() == getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() && \"getNumCoveredRegs() will not work with generated subreg masks!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 282, __extension__ __PRETTY_FUNCTION__))
;
283
284 RegPressureIgnoredUnits.resize(getNumRegUnits());
285 RegPressureIgnoredUnits.set(
286 *MCRegUnitIterator(MCRegister::from(AMDGPU::M0), this));
287 for (auto Reg : AMDGPU::VGPR_HI16RegClass)
288 RegPressureIgnoredUnits.set(*MCRegUnitIterator(Reg, this));
289
290 // HACK: Until this is fully tablegen'd.
291 static llvm::once_flag InitializeRegSplitPartsFlag;
292
293 static auto InitializeRegSplitPartsOnce = [this]() {
294 for (unsigned Idx = 1, E = getNumSubRegIndices() - 1; Idx < E; ++Idx) {
295 unsigned Size = getSubRegIdxSize(Idx);
296 if (Size & 31)
297 continue;
298 std::vector<int16_t> &Vec = RegSplitParts[Size / 32 - 1];
299 unsigned Pos = getSubRegIdxOffset(Idx);
300 if (Pos % Size)
301 continue;
302 Pos /= Size;
303 if (Vec.empty()) {
304 unsigned MaxNumParts = 1024 / Size; // Maximum register is 1024 bits.
305 Vec.resize(MaxNumParts);
306 }
307 Vec[Pos] = Idx;
308 }
309 };
310
311 static llvm::once_flag InitializeSubRegFromChannelTableFlag;
312
313 static auto InitializeSubRegFromChannelTableOnce = [this]() {
314 for (auto &Row : SubRegFromChannelTable)
315 Row.fill(AMDGPU::NoSubRegister);
316 for (uint16_t Idx = 1; Idx < getNumSubRegIndices(); ++Idx) {
317 unsigned Width = AMDGPUSubRegIdxRanges[Idx].Size / 32;
318 unsigned Offset = AMDGPUSubRegIdxRanges[Idx].Offset / 32;
319 assert(Width < SubRegFromChannelTableWidthMap.size())(static_cast <bool> (Width < SubRegFromChannelTableWidthMap
.size()) ? void (0) : __assert_fail ("Width < SubRegFromChannelTableWidthMap.size()"
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 319, __extension__ __PRETTY_FUNCTION__))
;
320 Width = SubRegFromChannelTableWidthMap[Width];
321 if (Width == 0)
322 continue;
323 unsigned TableIdx = Width - 1;
324 assert(TableIdx < SubRegFromChannelTable.size())(static_cast <bool> (TableIdx < SubRegFromChannelTable
.size()) ? void (0) : __assert_fail ("TableIdx < SubRegFromChannelTable.size()"
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 324, __extension__ __PRETTY_FUNCTION__))
;
325 assert(Offset < SubRegFromChannelTable[TableIdx].size())(static_cast <bool> (Offset < SubRegFromChannelTable
[TableIdx].size()) ? void (0) : __assert_fail ("Offset < SubRegFromChannelTable[TableIdx].size()"
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 325, __extension__ __PRETTY_FUNCTION__))
;
326 SubRegFromChannelTable[TableIdx][Offset] = Idx;
327 }
328 };
329
330 llvm::call_once(InitializeRegSplitPartsFlag, InitializeRegSplitPartsOnce);
331 llvm::call_once(InitializeSubRegFromChannelTableFlag,
332 InitializeSubRegFromChannelTableOnce);
333}
334
335void SIRegisterInfo::reserveRegisterTuples(BitVector &Reserved,
336 MCRegister Reg) const {
337 MCRegAliasIterator R(Reg, this, true);
338
339 for (; R.isValid(); ++R)
340 Reserved.set(*R);
341}
342
343// Forced to be here by one .inc
344const MCPhysReg *SIRegisterInfo::getCalleeSavedRegs(
345 const MachineFunction *MF) const {
346 CallingConv::ID CC = MF->getFunction().getCallingConv();
347 switch (CC) {
348 case CallingConv::C:
349 case CallingConv::Fast:
350 case CallingConv::Cold:
351 case CallingConv::AMDGPU_Gfx:
352 return MF->getSubtarget<GCNSubtarget>().hasGFX90AInsts()
353 ? CSR_AMDGPU_HighRegs_With_AGPRs_SaveList
354 : CSR_AMDGPU_HighRegs_SaveList;
355 default: {
356 // Dummy to not crash RegisterClassInfo.
357 static const MCPhysReg NoCalleeSavedReg = AMDGPU::NoRegister;
358 return &NoCalleeSavedReg;
359 }
360 }
361}
362
363const MCPhysReg *
364SIRegisterInfo::getCalleeSavedRegsViaCopy(const MachineFunction *MF) const {
365 return nullptr;
366}
367
368const uint32_t *SIRegisterInfo::getCallPreservedMask(const MachineFunction &MF,
369 CallingConv::ID CC) const {
370 switch (CC) {
371 case CallingConv::C:
372 case CallingConv::Fast:
373 case CallingConv::Cold:
374 case CallingConv::AMDGPU_Gfx:
375 return MF.getSubtarget<GCNSubtarget>().hasGFX90AInsts()
376 ? CSR_AMDGPU_HighRegs_With_AGPRs_RegMask
377 : CSR_AMDGPU_HighRegs_RegMask;
378 default:
379 return nullptr;
380 }
381}
382
383const uint32_t *SIRegisterInfo::getNoPreservedMask() const {
384 return CSR_AMDGPU_NoRegs_RegMask;
385}
386
387Register SIRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
388 const SIFrameLowering *TFI =
389 MF.getSubtarget<GCNSubtarget>().getFrameLowering();
390 const SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>();
391 // During ISel lowering we always reserve the stack pointer in entry
392 // functions, but never actually want to reference it when accessing our own
393 // frame. If we need a frame pointer we use it, but otherwise we can just use
394 // an immediate "0" which we represent by returning NoRegister.
395 if (FuncInfo->isEntryFunction()) {
396 return TFI->hasFP(MF) ? FuncInfo->getFrameOffsetReg() : Register();
397 }
398 return TFI->hasFP(MF) ? FuncInfo->getFrameOffsetReg()
399 : FuncInfo->getStackPtrOffsetReg();
400}
401
402bool SIRegisterInfo::hasBasePointer(const MachineFunction &MF) const {
403 // When we need stack realignment, we can't reference off of the
404 // stack pointer, so we reserve a base pointer.
405 const MachineFrameInfo &MFI = MF.getFrameInfo();
406 return MFI.getNumFixedObjects() && shouldRealignStack(MF);
407}
408
409Register SIRegisterInfo::getBaseRegister() const { return AMDGPU::SGPR34; }
410
411const uint32_t *SIRegisterInfo::getAllVGPRRegMask() const {
412 return CSR_AMDGPU_AllVGPRs_RegMask;
413}
414
415const uint32_t *SIRegisterInfo::getAllAGPRRegMask() const {
416 return CSR_AMDGPU_AllAGPRs_RegMask;
417}
418
419const uint32_t *SIRegisterInfo::getAllVectorRegMask() const {
420 return CSR_AMDGPU_AllVectorRegs_RegMask;
421}
422
423const uint32_t *SIRegisterInfo::getAllAllocatableSRegMask() const {
424 return CSR_AMDGPU_AllAllocatableSRegs_RegMask;
425}
426
427unsigned SIRegisterInfo::getSubRegFromChannel(unsigned Channel,
428 unsigned NumRegs) {
429 assert(NumRegs < SubRegFromChannelTableWidthMap.size())(static_cast <bool> (NumRegs < SubRegFromChannelTableWidthMap
.size()) ? void (0) : __assert_fail ("NumRegs < SubRegFromChannelTableWidthMap.size()"
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 429, __extension__ __PRETTY_FUNCTION__))
;
430 unsigned NumRegIndex = SubRegFromChannelTableWidthMap[NumRegs];
431 assert(NumRegIndex && "Not implemented")(static_cast <bool> (NumRegIndex && "Not implemented"
) ? void (0) : __assert_fail ("NumRegIndex && \"Not implemented\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 431, __extension__ __PRETTY_FUNCTION__))
;
432 assert(Channel < SubRegFromChannelTable[NumRegIndex - 1].size())(static_cast <bool> (Channel < SubRegFromChannelTable
[NumRegIndex - 1].size()) ? void (0) : __assert_fail ("Channel < SubRegFromChannelTable[NumRegIndex - 1].size()"
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 432, __extension__ __PRETTY_FUNCTION__))
;
433 return SubRegFromChannelTable[NumRegIndex - 1][Channel];
434}
435
436MCRegister SIRegisterInfo::reservedPrivateSegmentBufferReg(
437 const MachineFunction &MF) const {
438 unsigned BaseIdx = alignDown(ST.getMaxNumSGPRs(MF), 4) - 4;
439 MCRegister BaseReg(AMDGPU::SGPR_32RegClass.getRegister(BaseIdx));
440 return getMatchingSuperReg(BaseReg, AMDGPU::sub0, &AMDGPU::SGPR_128RegClass);
441}
442
443BitVector SIRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
444 BitVector Reserved(getNumRegs());
445 Reserved.set(AMDGPU::MODE);
446
447 // EXEC_LO and EXEC_HI could be allocated and used as regular register, but
448 // this seems likely to result in bugs, so I'm marking them as reserved.
449 reserveRegisterTuples(Reserved, AMDGPU::EXEC);
450 reserveRegisterTuples(Reserved, AMDGPU::FLAT_SCR);
451
452 // M0 has to be reserved so that llvm accepts it as a live-in into a block.
453 reserveRegisterTuples(Reserved, AMDGPU::M0);
454
455 // Reserve src_vccz, src_execz, src_scc.
456 reserveRegisterTuples(Reserved, AMDGPU::SRC_VCCZ);
457 reserveRegisterTuples(Reserved, AMDGPU::SRC_EXECZ);
458 reserveRegisterTuples(Reserved, AMDGPU::SRC_SCC);
459
460 // Reserve the memory aperture registers.
461 reserveRegisterTuples(Reserved, AMDGPU::SRC_SHARED_BASE);
462 reserveRegisterTuples(Reserved, AMDGPU::SRC_SHARED_LIMIT);
463 reserveRegisterTuples(Reserved, AMDGPU::SRC_PRIVATE_BASE);
464 reserveRegisterTuples(Reserved, AMDGPU::SRC_PRIVATE_LIMIT);
465
466 // Reserve src_pops_exiting_wave_id - support is not implemented in Codegen.
467 reserveRegisterTuples(Reserved, AMDGPU::SRC_POPS_EXITING_WAVE_ID);
468
469 // Reserve xnack_mask registers - support is not implemented in Codegen.
470 reserveRegisterTuples(Reserved, AMDGPU::XNACK_MASK);
471
472 // Reserve lds_direct register - support is not implemented in Codegen.
473 reserveRegisterTuples(Reserved, AMDGPU::LDS_DIRECT);
474
475 // Reserve Trap Handler registers - support is not implemented in Codegen.
476 reserveRegisterTuples(Reserved, AMDGPU::TBA);
477 reserveRegisterTuples(Reserved, AMDGPU::TMA);
478 reserveRegisterTuples(Reserved, AMDGPU::TTMP0_TTMP1);
479 reserveRegisterTuples(Reserved, AMDGPU::TTMP2_TTMP3);
480 reserveRegisterTuples(Reserved, AMDGPU::TTMP4_TTMP5);
481 reserveRegisterTuples(Reserved, AMDGPU::TTMP6_TTMP7);
482 reserveRegisterTuples(Reserved, AMDGPU::TTMP8_TTMP9);
483 reserveRegisterTuples(Reserved, AMDGPU::TTMP10_TTMP11);
484 reserveRegisterTuples(Reserved, AMDGPU::TTMP12_TTMP13);
485 reserveRegisterTuples(Reserved, AMDGPU::TTMP14_TTMP15);
486
487 // Reserve null register - it shall never be allocated
488 reserveRegisterTuples(Reserved, AMDGPU::SGPR_NULL);
489
490 // Disallow vcc_hi allocation in wave32. It may be allocated but most likely
491 // will result in bugs.
492 if (isWave32) {
493 Reserved.set(AMDGPU::VCC);
494 Reserved.set(AMDGPU::VCC_HI);
495 }
496
497 unsigned MaxNumSGPRs = ST.getMaxNumSGPRs(MF);
498 unsigned TotalNumSGPRs = AMDGPU::SGPR_32RegClass.getNumRegs();
499 for (unsigned i = MaxNumSGPRs; i < TotalNumSGPRs; ++i) {
500 unsigned Reg = AMDGPU::SGPR_32RegClass.getRegister(i);
501 reserveRegisterTuples(Reserved, Reg);
502 }
503
504 unsigned MaxNumVGPRs = ST.getMaxNumVGPRs(MF);
505 // TODO: In an entry function without calls and AGPRs used it is possible
506 // to use the whole register budget for VGPRs. Even more it shall
507 // be possible to estimate maximum AGPR/VGPR pressure and split
508 // register file accordingly.
509 if (ST.hasGFX90AInsts())
510 MaxNumVGPRs /= 2;
511 unsigned TotalNumVGPRs = AMDGPU::VGPR_32RegClass.getNumRegs();
512 for (unsigned i = MaxNumVGPRs; i < TotalNumVGPRs; ++i) {
513 unsigned Reg = AMDGPU::VGPR_32RegClass.getRegister(i);
514 reserveRegisterTuples(Reserved, Reg);
515 Reg = AMDGPU::AGPR_32RegClass.getRegister(i);
516 reserveRegisterTuples(Reserved, Reg);
517 }
518
519 for (auto Reg : AMDGPU::SReg_32RegClass) {
520 Reserved.set(getSubReg(Reg, AMDGPU::hi16));
521 Register Low = getSubReg(Reg, AMDGPU::lo16);
522 // This is to prevent BB vcc liveness errors.
523 if (!AMDGPU::SGPR_LO16RegClass.contains(Low))
524 Reserved.set(Low);
525 }
526
527 for (auto Reg : AMDGPU::AGPR_32RegClass) {
528 Reserved.set(getSubReg(Reg, AMDGPU::hi16));
529 }
530
531 // Reserve all the rest AGPRs if there are no instructions to use it.
532 if (!ST.hasMAIInsts()) {
533 for (unsigned i = 0; i < MaxNumVGPRs; ++i) {
534 unsigned Reg = AMDGPU::AGPR_32RegClass.getRegister(i);
535 reserveRegisterTuples(Reserved, Reg);
536 }
537 }
538
539 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
540
541 Register ScratchRSrcReg = MFI->getScratchRSrcReg();
542 if (ScratchRSrcReg != AMDGPU::NoRegister) {
543 // Reserve 4 SGPRs for the scratch buffer resource descriptor in case we need
544 // to spill.
545 // TODO: May need to reserve a VGPR if doing LDS spilling.
546 reserveRegisterTuples(Reserved, ScratchRSrcReg);
547 }
548
549 // We have to assume the SP is needed in case there are calls in the function,
550 // which is detected after the function is lowered. If we aren't really going
551 // to need SP, don't bother reserving it.
552 MCRegister StackPtrReg = MFI->getStackPtrOffsetReg();
553
554 if (StackPtrReg) {
555 reserveRegisterTuples(Reserved, StackPtrReg);
556 assert(!isSubRegister(ScratchRSrcReg, StackPtrReg))(static_cast <bool> (!isSubRegister(ScratchRSrcReg, StackPtrReg
)) ? void (0) : __assert_fail ("!isSubRegister(ScratchRSrcReg, StackPtrReg)"
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 556, __extension__ __PRETTY_FUNCTION__))
;
557 }
558
559 MCRegister FrameReg = MFI->getFrameOffsetReg();
560 if (FrameReg) {
561 reserveRegisterTuples(Reserved, FrameReg);
562 assert(!isSubRegister(ScratchRSrcReg, FrameReg))(static_cast <bool> (!isSubRegister(ScratchRSrcReg, FrameReg
)) ? void (0) : __assert_fail ("!isSubRegister(ScratchRSrcReg, FrameReg)"
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 562, __extension__ __PRETTY_FUNCTION__))
;
563 }
564
565 if (hasBasePointer(MF)) {
566 MCRegister BasePtrReg = getBaseRegister();
567 reserveRegisterTuples(Reserved, BasePtrReg);
568 assert(!isSubRegister(ScratchRSrcReg, BasePtrReg))(static_cast <bool> (!isSubRegister(ScratchRSrcReg, BasePtrReg
)) ? void (0) : __assert_fail ("!isSubRegister(ScratchRSrcReg, BasePtrReg)"
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 568, __extension__ __PRETTY_FUNCTION__))
;
569 }
570
571 for (auto Reg : MFI->WWMReservedRegs) {
572 reserveRegisterTuples(Reserved, Reg.first);
573 }
574
575 // Reserve VGPRs used for SGPR spilling.
576 // Note we treat freezeReservedRegs unusually because we run register
577 // allocation in two phases. It's OK to re-freeze with new registers for the
578 // second run.
579#if 0
580 for (auto &SpilledFI : MFI->sgpr_spill_vgprs()) {
581 for (auto &SpilledVGPR : SpilledFI.second)
582 reserveRegisterTuples(Reserved, SpilledVGPR.VGPR);
583 }
584#endif
585
586 // FIXME: Stop using reserved registers for this.
587 for (MCPhysReg Reg : MFI->getAGPRSpillVGPRs())
588 reserveRegisterTuples(Reserved, Reg);
589
590 for (MCPhysReg Reg : MFI->getVGPRSpillAGPRs())
591 reserveRegisterTuples(Reserved, Reg);
592
593 for (auto SSpill : MFI->getSGPRSpillVGPRs())
594 reserveRegisterTuples(Reserved, SSpill.VGPR);
595
596 return Reserved;
597}
598
599bool SIRegisterInfo::shouldRealignStack(const MachineFunction &MF) const {
600 const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
601 // On entry, the base address is 0, so it can't possibly need any more
602 // alignment.
603
604 // FIXME: Should be able to specify the entry frame alignment per calling
605 // convention instead.
606 if (Info->isEntryFunction())
607 return false;
608
609 return TargetRegisterInfo::shouldRealignStack(MF);
610}
611
612bool SIRegisterInfo::requiresRegisterScavenging(const MachineFunction &Fn) const {
613 const SIMachineFunctionInfo *Info = Fn.getInfo<SIMachineFunctionInfo>();
614 if (Info->isEntryFunction()) {
615 const MachineFrameInfo &MFI = Fn.getFrameInfo();
616 return MFI.hasStackObjects() || MFI.hasCalls();
617 }
618
619 // May need scavenger for dealing with callee saved registers.
620 return true;
621}
622
623bool SIRegisterInfo::requiresFrameIndexScavenging(
624 const MachineFunction &MF) const {
625 // Do not use frame virtual registers. They used to be used for SGPRs, but
626 // once we reach PrologEpilogInserter, we can no longer spill SGPRs. If the
627 // scavenger fails, we can increment/decrement the necessary SGPRs to avoid a
628 // spill.
629 return false;
630}
631
632bool SIRegisterInfo::requiresFrameIndexReplacementScavenging(
633 const MachineFunction &MF) const {
634 const MachineFrameInfo &MFI = MF.getFrameInfo();
635 return MFI.hasStackObjects();
636}
637
638bool SIRegisterInfo::requiresVirtualBaseRegisters(
639 const MachineFunction &) const {
640 // There are no special dedicated stack or frame pointers.
641 return true;
642}
643
644int64_t SIRegisterInfo::getScratchInstrOffset(const MachineInstr *MI) const {
645 assert(SIInstrInfo::isMUBUF(*MI) || SIInstrInfo::isFLATScratch(*MI))(static_cast <bool> (SIInstrInfo::isMUBUF(*MI) || SIInstrInfo
::isFLATScratch(*MI)) ? void (0) : __assert_fail ("SIInstrInfo::isMUBUF(*MI) || SIInstrInfo::isFLATScratch(*MI)"
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 645, __extension__ __PRETTY_FUNCTION__))
;
646
647 int OffIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
648 AMDGPU::OpName::offset);
649 return MI->getOperand(OffIdx).getImm();
650}
651
652int64_t SIRegisterInfo::getFrameIndexInstrOffset(const MachineInstr *MI,
653 int Idx) const {
654 if (!SIInstrInfo::isMUBUF(*MI) && !SIInstrInfo::isFLATScratch(*MI))
655 return 0;
656
657 assert((Idx == AMDGPU::getNamedOperandIdx(MI->getOpcode(),(static_cast <bool> ((Idx == AMDGPU::getNamedOperandIdx
(MI->getOpcode(), AMDGPU::OpName::vaddr) || (Idx == AMDGPU
::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::saddr
))) && "Should never see frame index on non-address operand"
) ? void (0) : __assert_fail ("(Idx == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::vaddr) || (Idx == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::saddr))) && \"Should never see frame index on non-address operand\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 661, __extension__ __PRETTY_FUNCTION__))
658 AMDGPU::OpName::vaddr) ||(static_cast <bool> ((Idx == AMDGPU::getNamedOperandIdx
(MI->getOpcode(), AMDGPU::OpName::vaddr) || (Idx == AMDGPU
::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::saddr
))) && "Should never see frame index on non-address operand"
) ? void (0) : __assert_fail ("(Idx == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::vaddr) || (Idx == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::saddr))) && \"Should never see frame index on non-address operand\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 661, __extension__ __PRETTY_FUNCTION__))
659 (Idx == AMDGPU::getNamedOperandIdx(MI->getOpcode(),(static_cast <bool> ((Idx == AMDGPU::getNamedOperandIdx
(MI->getOpcode(), AMDGPU::OpName::vaddr) || (Idx == AMDGPU
::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::saddr
))) && "Should never see frame index on non-address operand"
) ? void (0) : __assert_fail ("(Idx == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::vaddr) || (Idx == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::saddr))) && \"Should never see frame index on non-address operand\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 661, __extension__ __PRETTY_FUNCTION__))
660 AMDGPU::OpName::saddr))) &&(static_cast <bool> ((Idx == AMDGPU::getNamedOperandIdx
(MI->getOpcode(), AMDGPU::OpName::vaddr) || (Idx == AMDGPU
::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::saddr
))) && "Should never see frame index on non-address operand"
) ? void (0) : __assert_fail ("(Idx == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::vaddr) || (Idx == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::saddr))) && \"Should never see frame index on non-address operand\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 661, __extension__ __PRETTY_FUNCTION__))
661 "Should never see frame index on non-address operand")(static_cast <bool> ((Idx == AMDGPU::getNamedOperandIdx
(MI->getOpcode(), AMDGPU::OpName::vaddr) || (Idx == AMDGPU
::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::saddr
))) && "Should never see frame index on non-address operand"
) ? void (0) : __assert_fail ("(Idx == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::vaddr) || (Idx == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::saddr))) && \"Should never see frame index on non-address operand\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 661, __extension__ __PRETTY_FUNCTION__))
;
662
663 return getScratchInstrOffset(MI);
664}
665
666bool SIRegisterInfo::needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const {
667 if (!SIInstrInfo::isMUBUF(*MI) && !SIInstrInfo::isFLATScratch(*MI))
668 return false;
669
670 int64_t FullOffset = Offset + getScratchInstrOffset(MI);
671
672 if (SIInstrInfo::isMUBUF(*MI))
673 return !SIInstrInfo::isLegalMUBUFImmOffset(FullOffset);
674
675 const SIInstrInfo *TII = ST.getInstrInfo();
676 return !TII->isLegalFLATOffset(FullOffset, AMDGPUAS::PRIVATE_ADDRESS,
677 SIInstrFlags::FlatScratch);
678}
679
680Register SIRegisterInfo::materializeFrameBaseRegister(MachineBasicBlock *MBB,
681 int FrameIdx,
682 int64_t Offset) const {
683 MachineBasicBlock::iterator Ins = MBB->begin();
684 DebugLoc DL; // Defaults to "unknown"
685
686 if (Ins != MBB->end())
687 DL = Ins->getDebugLoc();
688
689 MachineFunction *MF = MBB->getParent();
690 const SIInstrInfo *TII = ST.getInstrInfo();
691 MachineRegisterInfo &MRI = MF->getRegInfo();
692 unsigned MovOpc = ST.enableFlatScratch() ? AMDGPU::S_MOV_B32
693 : AMDGPU::V_MOV_B32_e32;
694
695 Register BaseReg = MRI.createVirtualRegister(
696 ST.enableFlatScratch() ? &AMDGPU::SReg_32_XEXEC_HIRegClass
697 : &AMDGPU::VGPR_32RegClass);
698
699 if (Offset == 0) {
700 BuildMI(*MBB, Ins, DL, TII->get(MovOpc), BaseReg)
701 .addFrameIndex(FrameIdx);
702 return BaseReg;
703 }
704
705 Register OffsetReg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
706
707 Register FIReg = MRI.createVirtualRegister(
708 ST.enableFlatScratch() ? &AMDGPU::SReg_32_XM0RegClass
709 : &AMDGPU::VGPR_32RegClass);
710
711 BuildMI(*MBB, Ins, DL, TII->get(AMDGPU::S_MOV_B32), OffsetReg)
712 .addImm(Offset);
713 BuildMI(*MBB, Ins, DL, TII->get(MovOpc), FIReg)
714 .addFrameIndex(FrameIdx);
715
716 if (ST.enableFlatScratch() ) {
717 BuildMI(*MBB, Ins, DL, TII->get(AMDGPU::S_ADD_I32), BaseReg)
718 .addReg(OffsetReg, RegState::Kill)
719 .addReg(FIReg);
720 return BaseReg;
721 }
722
723 TII->getAddNoCarry(*MBB, Ins, DL, BaseReg)
724 .addReg(OffsetReg, RegState::Kill)
725 .addReg(FIReg)
726 .addImm(0); // clamp bit
727
728 return BaseReg;
729}
730
731void SIRegisterInfo::resolveFrameIndex(MachineInstr &MI, Register BaseReg,
732 int64_t Offset) const {
733 const SIInstrInfo *TII = ST.getInstrInfo();
734 bool IsFlat = TII->isFLATScratch(MI);
735
736#ifndef NDEBUG
737 // FIXME: Is it possible to be storing a frame index to itself?
738 bool SeenFI = false;
739 for (const MachineOperand &MO: MI.operands()) {
740 if (MO.isFI()) {
741 if (SeenFI)
742 llvm_unreachable("should not see multiple frame indices")::llvm::llvm_unreachable_internal("should not see multiple frame indices"
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 742)
;
743
744 SeenFI = true;
745 }
746 }
747#endif
748
749 MachineOperand *FIOp =
750 TII->getNamedOperand(MI, IsFlat ? AMDGPU::OpName::saddr
751 : AMDGPU::OpName::vaddr);
752
753 MachineOperand *OffsetOp = TII->getNamedOperand(MI, AMDGPU::OpName::offset);
754 int64_t NewOffset = OffsetOp->getImm() + Offset;
755
756 assert(FIOp && FIOp->isFI() && "frame index must be address operand")(static_cast <bool> (FIOp && FIOp->isFI() &&
"frame index must be address operand") ? void (0) : __assert_fail
("FIOp && FIOp->isFI() && \"frame index must be address operand\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 756, __extension__ __PRETTY_FUNCTION__))
;
757 assert(TII->isMUBUF(MI) || TII->isFLATScratch(MI))(static_cast <bool> (TII->isMUBUF(MI) || TII->isFLATScratch
(MI)) ? void (0) : __assert_fail ("TII->isMUBUF(MI) || TII->isFLATScratch(MI)"
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 757, __extension__ __PRETTY_FUNCTION__))
;
758
759 if (IsFlat) {
760 assert(TII->isLegalFLATOffset(NewOffset, AMDGPUAS::PRIVATE_ADDRESS,(static_cast <bool> (TII->isLegalFLATOffset(NewOffset
, AMDGPUAS::PRIVATE_ADDRESS, SIInstrFlags::FlatScratch) &&
"offset should be legal") ? void (0) : __assert_fail ("TII->isLegalFLATOffset(NewOffset, AMDGPUAS::PRIVATE_ADDRESS, SIInstrFlags::FlatScratch) && \"offset should be legal\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 762, __extension__ __PRETTY_FUNCTION__))
761 SIInstrFlags::FlatScratch) &&(static_cast <bool> (TII->isLegalFLATOffset(NewOffset
, AMDGPUAS::PRIVATE_ADDRESS, SIInstrFlags::FlatScratch) &&
"offset should be legal") ? void (0) : __assert_fail ("TII->isLegalFLATOffset(NewOffset, AMDGPUAS::PRIVATE_ADDRESS, SIInstrFlags::FlatScratch) && \"offset should be legal\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 762, __extension__ __PRETTY_FUNCTION__))
762 "offset should be legal")(static_cast <bool> (TII->isLegalFLATOffset(NewOffset
, AMDGPUAS::PRIVATE_ADDRESS, SIInstrFlags::FlatScratch) &&
"offset should be legal") ? void (0) : __assert_fail ("TII->isLegalFLATOffset(NewOffset, AMDGPUAS::PRIVATE_ADDRESS, SIInstrFlags::FlatScratch) && \"offset should be legal\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 762, __extension__ __PRETTY_FUNCTION__))
;
763 FIOp->ChangeToRegister(BaseReg, false);
764 OffsetOp->setImm(NewOffset);
765 return;
766 }
767
768#ifndef NDEBUG
769 MachineOperand *SOffset = TII->getNamedOperand(MI, AMDGPU::OpName::soffset);
770 assert(SOffset->isImm() && SOffset->getImm() == 0)(static_cast <bool> (SOffset->isImm() && SOffset
->getImm() == 0) ? void (0) : __assert_fail ("SOffset->isImm() && SOffset->getImm() == 0"
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 770, __extension__ __PRETTY_FUNCTION__))
;
771#endif
772
773 assert(SIInstrInfo::isLegalMUBUFImmOffset(NewOffset) &&(static_cast <bool> (SIInstrInfo::isLegalMUBUFImmOffset
(NewOffset) && "offset should be legal") ? void (0) :
__assert_fail ("SIInstrInfo::isLegalMUBUFImmOffset(NewOffset) && \"offset should be legal\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 774, __extension__ __PRETTY_FUNCTION__))
774 "offset should be legal")(static_cast <bool> (SIInstrInfo::isLegalMUBUFImmOffset
(NewOffset) && "offset should be legal") ? void (0) :
__assert_fail ("SIInstrInfo::isLegalMUBUFImmOffset(NewOffset) && \"offset should be legal\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 774, __extension__ __PRETTY_FUNCTION__))
;
775
776 FIOp->ChangeToRegister(BaseReg, false);
777 OffsetOp->setImm(NewOffset);
778}
779
780bool SIRegisterInfo::isFrameOffsetLegal(const MachineInstr *MI,
781 Register BaseReg,
782 int64_t Offset) const {
783 if (!SIInstrInfo::isMUBUF(*MI) && !SIInstrInfo::isFLATScratch(*MI))
784 return false;
785
786 int64_t NewOffset = Offset + getScratchInstrOffset(MI);
787
788 if (SIInstrInfo::isMUBUF(*MI))
789 return SIInstrInfo::isLegalMUBUFImmOffset(NewOffset);
790
791 const SIInstrInfo *TII = ST.getInstrInfo();
792 return TII->isLegalFLATOffset(NewOffset, AMDGPUAS::PRIVATE_ADDRESS,
793 SIInstrFlags::FlatScratch);
794}
795
796const TargetRegisterClass *SIRegisterInfo::getPointerRegClass(
797 const MachineFunction &MF, unsigned Kind) const {
798 // This is inaccurate. It depends on the instruction and address space. The
799 // only place where we should hit this is for dealing with frame indexes /
800 // private accesses, so this is correct in that case.
801 return &AMDGPU::VGPR_32RegClass;
802}
803
804const TargetRegisterClass *
805SIRegisterInfo::getCrossCopyRegClass(const TargetRegisterClass *RC) const {
806 if (isAGPRClass(RC) && !ST.hasGFX90AInsts())
807 return getEquivalentVGPRClass(RC);
808
809 return RC;
810}
811
812static unsigned getNumSubRegsForSpillOp(unsigned Op) {
813
814 switch (Op) {
815 case AMDGPU::SI_SPILL_S1024_SAVE:
816 case AMDGPU::SI_SPILL_S1024_RESTORE:
817 case AMDGPU::SI_SPILL_V1024_SAVE:
818 case AMDGPU::SI_SPILL_V1024_RESTORE:
819 case AMDGPU::SI_SPILL_A1024_SAVE:
820 case AMDGPU::SI_SPILL_A1024_RESTORE:
821 return 32;
822 case AMDGPU::SI_SPILL_S512_SAVE:
823 case AMDGPU::SI_SPILL_S512_RESTORE:
824 case AMDGPU::SI_SPILL_V512_SAVE:
825 case AMDGPU::SI_SPILL_V512_RESTORE:
826 case AMDGPU::SI_SPILL_A512_SAVE:
827 case AMDGPU::SI_SPILL_A512_RESTORE:
828 return 16;
829 case AMDGPU::SI_SPILL_S256_SAVE:
830 case AMDGPU::SI_SPILL_S256_RESTORE:
831 case AMDGPU::SI_SPILL_V256_SAVE:
832 case AMDGPU::SI_SPILL_V256_RESTORE:
833 case AMDGPU::SI_SPILL_A256_SAVE:
834 case AMDGPU::SI_SPILL_A256_RESTORE:
835 return 8;
836 case AMDGPU::SI_SPILL_S224_SAVE:
837 case AMDGPU::SI_SPILL_S224_RESTORE:
838 case AMDGPU::SI_SPILL_V224_SAVE:
839 case AMDGPU::SI_SPILL_V224_RESTORE:
840 case AMDGPU::SI_SPILL_A224_SAVE:
841 case AMDGPU::SI_SPILL_A224_RESTORE:
842 return 7;
843 case AMDGPU::SI_SPILL_S192_SAVE:
844 case AMDGPU::SI_SPILL_S192_RESTORE:
845 case AMDGPU::SI_SPILL_V192_SAVE:
846 case AMDGPU::SI_SPILL_V192_RESTORE:
847 case AMDGPU::SI_SPILL_A192_SAVE:
848 case AMDGPU::SI_SPILL_A192_RESTORE:
849 return 6;
850 case AMDGPU::SI_SPILL_S160_SAVE:
851 case AMDGPU::SI_SPILL_S160_RESTORE:
852 case AMDGPU::SI_SPILL_V160_SAVE:
853 case AMDGPU::SI_SPILL_V160_RESTORE:
854 case AMDGPU::SI_SPILL_A160_SAVE:
855 case AMDGPU::SI_SPILL_A160_RESTORE:
856 return 5;
857 case AMDGPU::SI_SPILL_S128_SAVE:
858 case AMDGPU::SI_SPILL_S128_RESTORE:
859 case AMDGPU::SI_SPILL_V128_SAVE:
860 case AMDGPU::SI_SPILL_V128_RESTORE:
861 case AMDGPU::SI_SPILL_A128_SAVE:
862 case AMDGPU::SI_SPILL_A128_RESTORE:
863 return 4;
864 case AMDGPU::SI_SPILL_S96_SAVE:
865 case AMDGPU::SI_SPILL_S96_RESTORE:
866 case AMDGPU::SI_SPILL_V96_SAVE:
867 case AMDGPU::SI_SPILL_V96_RESTORE:
868 case AMDGPU::SI_SPILL_A96_SAVE:
869 case AMDGPU::SI_SPILL_A96_RESTORE:
870 return 3;
871 case AMDGPU::SI_SPILL_S64_SAVE:
872 case AMDGPU::SI_SPILL_S64_RESTORE:
873 case AMDGPU::SI_SPILL_V64_SAVE:
874 case AMDGPU::SI_SPILL_V64_RESTORE:
875 case AMDGPU::SI_SPILL_A64_SAVE:
876 case AMDGPU::SI_SPILL_A64_RESTORE:
877 return 2;
878 case AMDGPU::SI_SPILL_S32_SAVE:
879 case AMDGPU::SI_SPILL_S32_RESTORE:
880 case AMDGPU::SI_SPILL_V32_SAVE:
881 case AMDGPU::SI_SPILL_V32_RESTORE:
882 case AMDGPU::SI_SPILL_A32_SAVE:
883 case AMDGPU::SI_SPILL_A32_RESTORE:
884 return 1;
885 default: llvm_unreachable("Invalid spill opcode")::llvm::llvm_unreachable_internal("Invalid spill opcode", "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 885)
;
886 }
887}
888
889static int getOffsetMUBUFStore(unsigned Opc) {
890 switch (Opc) {
891 case AMDGPU::BUFFER_STORE_DWORD_OFFEN:
892 return AMDGPU::BUFFER_STORE_DWORD_OFFSET;
893 case AMDGPU::BUFFER_STORE_BYTE_OFFEN:
894 return AMDGPU::BUFFER_STORE_BYTE_OFFSET;
895 case AMDGPU::BUFFER_STORE_SHORT_OFFEN:
896 return AMDGPU::BUFFER_STORE_SHORT_OFFSET;
897 case AMDGPU::BUFFER_STORE_DWORDX2_OFFEN:
898 return AMDGPU::BUFFER_STORE_DWORDX2_OFFSET;
899 case AMDGPU::BUFFER_STORE_DWORDX4_OFFEN:
900 return AMDGPU::BUFFER_STORE_DWORDX4_OFFSET;
901 case AMDGPU::BUFFER_STORE_SHORT_D16_HI_OFFEN:
902 return AMDGPU::BUFFER_STORE_SHORT_D16_HI_OFFSET;
903 case AMDGPU::BUFFER_STORE_BYTE_D16_HI_OFFEN:
904 return AMDGPU::BUFFER_STORE_BYTE_D16_HI_OFFSET;
905 default:
906 return -1;
907 }
908}
909
910static int getOffsetMUBUFLoad(unsigned Opc) {
911 switch (Opc) {
912 case AMDGPU::BUFFER_LOAD_DWORD_OFFEN:
913 return AMDGPU::BUFFER_LOAD_DWORD_OFFSET;
914 case AMDGPU::BUFFER_LOAD_UBYTE_OFFEN:
915 return AMDGPU::BUFFER_LOAD_UBYTE_OFFSET;
916 case AMDGPU::BUFFER_LOAD_SBYTE_OFFEN:
917 return AMDGPU::BUFFER_LOAD_SBYTE_OFFSET;
918 case AMDGPU::BUFFER_LOAD_USHORT_OFFEN:
919 return AMDGPU::BUFFER_LOAD_USHORT_OFFSET;
920 case AMDGPU::BUFFER_LOAD_SSHORT_OFFEN:
921 return AMDGPU::BUFFER_LOAD_SSHORT_OFFSET;
922 case AMDGPU::BUFFER_LOAD_DWORDX2_OFFEN:
923 return AMDGPU::BUFFER_LOAD_DWORDX2_OFFSET;
924 case AMDGPU::BUFFER_LOAD_DWORDX4_OFFEN:
925 return AMDGPU::BUFFER_LOAD_DWORDX4_OFFSET;
926 case AMDGPU::BUFFER_LOAD_UBYTE_D16_OFFEN:
927 return AMDGPU::BUFFER_LOAD_UBYTE_D16_OFFSET;
928 case AMDGPU::BUFFER_LOAD_UBYTE_D16_HI_OFFEN:
929 return AMDGPU::BUFFER_LOAD_UBYTE_D16_HI_OFFSET;
930 case AMDGPU::BUFFER_LOAD_SBYTE_D16_OFFEN:
931 return AMDGPU::BUFFER_LOAD_SBYTE_D16_OFFSET;
932 case AMDGPU::BUFFER_LOAD_SBYTE_D16_HI_OFFEN:
933 return AMDGPU::BUFFER_LOAD_SBYTE_D16_HI_OFFSET;
934 case AMDGPU::BUFFER_LOAD_SHORT_D16_OFFEN:
935 return AMDGPU::BUFFER_LOAD_SHORT_D16_OFFSET;
936 case AMDGPU::BUFFER_LOAD_SHORT_D16_HI_OFFEN:
937 return AMDGPU::BUFFER_LOAD_SHORT_D16_HI_OFFSET;
938 default:
939 return -1;
940 }
941}
942
943static MachineInstrBuilder spillVGPRtoAGPR(const GCNSubtarget &ST,
944 MachineBasicBlock &MBB,
945 MachineBasicBlock::iterator MI,
946 int Index, unsigned Lane,
947 unsigned ValueReg, bool IsKill) {
948 MachineFunction *MF = MBB.getParent();
949 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
950 const SIInstrInfo *TII = ST.getInstrInfo();
951
952 MCPhysReg Reg = MFI->getVGPRToAGPRSpill(Index, Lane);
953
954 if (Reg == AMDGPU::NoRegister)
955 return MachineInstrBuilder();
956
957 bool IsStore = MI->mayStore();
958 MachineRegisterInfo &MRI = MF->getRegInfo();
959 auto *TRI = static_cast<const SIRegisterInfo*>(MRI.getTargetRegisterInfo());
960
961 unsigned Dst = IsStore ? Reg : ValueReg;
962 unsigned Src = IsStore ? ValueReg : Reg;
963 unsigned Opc = (IsStore ^ TRI->isVGPR(MRI, Reg)) ? AMDGPU::V_ACCVGPR_WRITE_B32_e64
964 : AMDGPU::V_ACCVGPR_READ_B32_e64;
965
966 auto MIB = BuildMI(MBB, MI, MI->getDebugLoc(), TII->get(Opc), Dst)
967 .addReg(Src, getKillRegState(IsKill));
968 MIB->setAsmPrinterFlag(MachineInstr::ReloadReuse);
969 return MIB;
970}
971
972// This differs from buildSpillLoadStore by only scavenging a VGPR. It does not
973// need to handle the case where an SGPR may need to be spilled while spilling.
974static bool buildMUBUFOffsetLoadStore(const GCNSubtarget &ST,
975 MachineFrameInfo &MFI,
976 MachineBasicBlock::iterator MI,
977 int Index,
978 int64_t Offset) {
979 const SIInstrInfo *TII = ST.getInstrInfo();
980 MachineBasicBlock *MBB = MI->getParent();
981 const DebugLoc &DL = MI->getDebugLoc();
982 bool IsStore = MI->mayStore();
983
984 unsigned Opc = MI->getOpcode();
985 int LoadStoreOp = IsStore ?
986 getOffsetMUBUFStore(Opc) : getOffsetMUBUFLoad(Opc);
987 if (LoadStoreOp == -1)
988 return false;
989
990 const MachineOperand *Reg = TII->getNamedOperand(*MI, AMDGPU::OpName::vdata);
991 if (spillVGPRtoAGPR(ST, *MBB, MI, Index, 0, Reg->getReg(), false).getInstr())
992 return true;
993
994 MachineInstrBuilder NewMI =
995 BuildMI(*MBB, MI, DL, TII->get(LoadStoreOp))
996 .add(*Reg)
997 .add(*TII->getNamedOperand(*MI, AMDGPU::OpName::srsrc))
998 .add(*TII->getNamedOperand(*MI, AMDGPU::OpName::soffset))
999 .addImm(Offset)
1000 .addImm(0) // cpol
1001 .addImm(0) // tfe
1002 .addImm(0) // swz
1003 .cloneMemRefs(*MI);
1004
1005 const MachineOperand *VDataIn = TII->getNamedOperand(*MI,
1006 AMDGPU::OpName::vdata_in);
1007 if (VDataIn)
1008 NewMI.add(*VDataIn);
1009 return true;
1010}
1011
1012static unsigned getFlatScratchSpillOpcode(const SIInstrInfo *TII,
1013 unsigned LoadStoreOp,
1014 unsigned EltSize) {
1015 bool IsStore = TII->get(LoadStoreOp).mayStore();
1016 bool UseST =
1017 AMDGPU::getNamedOperandIdx(LoadStoreOp, AMDGPU::OpName::vaddr) < 0 &&
1018 AMDGPU::getNamedOperandIdx(LoadStoreOp, AMDGPU::OpName::saddr) < 0;
1019
1020 switch (EltSize) {
1021 case 4:
1022 LoadStoreOp = IsStore ? AMDGPU::SCRATCH_STORE_DWORD_SADDR
1023 : AMDGPU::SCRATCH_LOAD_DWORD_SADDR;
1024 break;
1025 case 8:
1026 LoadStoreOp = IsStore ? AMDGPU::SCRATCH_STORE_DWORDX2_SADDR
1027 : AMDGPU::SCRATCH_LOAD_DWORDX2_SADDR;
1028 break;
1029 case 12:
1030 LoadStoreOp = IsStore ? AMDGPU::SCRATCH_STORE_DWORDX3_SADDR
1031 : AMDGPU::SCRATCH_LOAD_DWORDX3_SADDR;
1032 break;
1033 case 16:
1034 LoadStoreOp = IsStore ? AMDGPU::SCRATCH_STORE_DWORDX4_SADDR
1035 : AMDGPU::SCRATCH_LOAD_DWORDX4_SADDR;
1036 break;
1037 default:
1038 llvm_unreachable("Unexpected spill load/store size!")::llvm::llvm_unreachable_internal("Unexpected spill load/store size!"
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 1038)
;
1039 }
1040
1041 if (UseST)
1042 LoadStoreOp = AMDGPU::getFlatScratchInstSTfromSS(LoadStoreOp);
1043
1044 return LoadStoreOp;
1045}
1046
1047void SIRegisterInfo::buildSpillLoadStore(
1048 MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
1049 unsigned LoadStoreOp, int Index, Register ValueReg, bool IsKill,
1050 MCRegister ScratchOffsetReg, int64_t InstOffset, MachineMemOperand *MMO,
1051 RegScavenger *RS, LivePhysRegs *LiveRegs) const {
1052 assert((!RS || !LiveRegs) && "Only RS or LiveRegs can be set but not both")(static_cast <bool> ((!RS || !LiveRegs) && "Only RS or LiveRegs can be set but not both"
) ? void (0) : __assert_fail ("(!RS || !LiveRegs) && \"Only RS or LiveRegs can be set but not both\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 1052, __extension__ __PRETTY_FUNCTION__))
;
1053
1054 MachineFunction *MF = MBB.getParent();
1055 const SIInstrInfo *TII = ST.getInstrInfo();
1056 const MachineFrameInfo &MFI = MF->getFrameInfo();
1057 const SIMachineFunctionInfo *FuncInfo = MF->getInfo<SIMachineFunctionInfo>();
1058
1059 const MCInstrDesc *Desc = &TII->get(LoadStoreOp);
1060 const DebugLoc &DL = MI != MBB.end() ? MI->getDebugLoc() : DebugLoc();
1061 bool IsStore = Desc->mayStore();
1062 bool IsFlat = TII->isFLATScratch(LoadStoreOp);
1063
1064 bool Scavenged = false;
1065 MCRegister SOffset = ScratchOffsetReg;
1066
1067 const TargetRegisterClass *RC = getRegClassForReg(MF->getRegInfo(), ValueReg);
1068 // On gfx90a+ AGPR is a regular VGPR acceptable for loads and stores.
1069 const bool IsAGPR = !ST.hasGFX90AInsts() && hasAGPRs(RC);
1070 const unsigned RegWidth = AMDGPU::getRegBitWidth(RC->getID()) / 8;
1071
1072 // Always use 4 byte operations for AGPRs because we need to scavenge
1073 // a temporary VGPR.
1074 unsigned EltSize = (IsFlat && !IsAGPR) ? std::min(RegWidth, 16u) : 4u;
1075 unsigned NumSubRegs = RegWidth / EltSize;
1076 unsigned Size = NumSubRegs * EltSize;
1077 unsigned RemSize = RegWidth - Size;
1078 unsigned NumRemSubRegs = RemSize ? 1 : 0;
1079 int64_t Offset = InstOffset + MFI.getObjectOffset(Index);
1080 int64_t MaxOffset = Offset + Size + RemSize - EltSize;
1081 int64_t ScratchOffsetRegDelta = 0;
1082
1083 if (IsFlat && EltSize > 4) {
1084 LoadStoreOp = getFlatScratchSpillOpcode(TII, LoadStoreOp, EltSize);
1085 Desc = &TII->get(LoadStoreOp);
1086 }
1087
1088 Align Alignment = MFI.getObjectAlign(Index);
1089 const MachinePointerInfo &BasePtrInfo = MMO->getPointerInfo();
1090
1091 assert((IsFlat || ((Offset % EltSize) == 0)) &&(static_cast <bool> ((IsFlat || ((Offset % EltSize) == 0
)) && "unexpected VGPR spill offset") ? void (0) : __assert_fail
("(IsFlat || ((Offset % EltSize) == 0)) && \"unexpected VGPR spill offset\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 1092, __extension__ __PRETTY_FUNCTION__))
1092 "unexpected VGPR spill offset")(static_cast <bool> ((IsFlat || ((Offset % EltSize) == 0
)) && "unexpected VGPR spill offset") ? void (0) : __assert_fail
("(IsFlat || ((Offset % EltSize) == 0)) && \"unexpected VGPR spill offset\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 1092, __extension__ __PRETTY_FUNCTION__))
;
1093
1094 bool IsOffsetLegal =
1095 IsFlat ? TII->isLegalFLATOffset(MaxOffset, AMDGPUAS::PRIVATE_ADDRESS,
1096 SIInstrFlags::FlatScratch)
1097 : SIInstrInfo::isLegalMUBUFImmOffset(MaxOffset);
1098 if (!IsOffsetLegal || (IsFlat && !SOffset && !ST.hasFlatScratchSTMode())) {
1099 SOffset = MCRegister();
1100
1101 // We currently only support spilling VGPRs to EltSize boundaries, meaning
1102 // we can simplify the adjustment of Offset here to just scale with
1103 // WavefrontSize.
1104 if (!IsFlat)
1105 Offset *= ST.getWavefrontSize();
1106
1107 // We don't have access to the register scavenger if this function is called
1108 // during PEI::scavengeFrameVirtualRegs() so use LiveRegs in this case.
1109 if (RS) {
1110 SOffset = RS->scavengeRegister(&AMDGPU::SGPR_32RegClass, MI, 0, false);
1111 } else if (LiveRegs) {
1112 for (MCRegister Reg : AMDGPU::SGPR_32RegClass) {
1113 if (LiveRegs->available(MF->getRegInfo(), Reg)) {
1114 SOffset = Reg;
1115 break;
1116 }
1117 }
1118 }
1119
1120 if (!SOffset) {
1121 // There are no free SGPRs, and since we are in the process of spilling
1122 // VGPRs too. Since we need a VGPR in order to spill SGPRs (this is true
1123 // on SI/CI and on VI it is true until we implement spilling using scalar
1124 // stores), we have no way to free up an SGPR. Our solution here is to
1125 // add the offset directly to the ScratchOffset or StackPtrOffset
1126 // register, and then subtract the offset after the spill to return the
1127 // register to it's original value.
1128 if (!ScratchOffsetReg)
1129 ScratchOffsetReg = FuncInfo->getStackPtrOffsetReg();
1130 SOffset = ScratchOffsetReg;
1131 ScratchOffsetRegDelta = Offset;
1132 } else {
1133 Scavenged = true;
1134 }
1135
1136 if (!SOffset)
1137 report_fatal_error("could not scavenge SGPR to spill in entry function");
1138
1139 if (ScratchOffsetReg == AMDGPU::NoRegister) {
1140 BuildMI(MBB, MI, DL, TII->get(AMDGPU::S_MOV_B32), SOffset).addImm(Offset);
1141 } else {
1142 BuildMI(MBB, MI, DL, TII->get(AMDGPU::S_ADD_I32), SOffset)
1143 .addReg(ScratchOffsetReg)
1144 .addImm(Offset);
1145 }
1146
1147 Offset = 0;
1148 }
1149
1150 if (IsFlat && SOffset == AMDGPU::NoRegister) {
1151 assert(AMDGPU::getNamedOperandIdx(LoadStoreOp, AMDGPU::OpName::vaddr) < 0(static_cast <bool> (AMDGPU::getNamedOperandIdx(LoadStoreOp
, AMDGPU::OpName::vaddr) < 0 && "Unexpected vaddr for flat scratch with a FI operand"
) ? void (0) : __assert_fail ("AMDGPU::getNamedOperandIdx(LoadStoreOp, AMDGPU::OpName::vaddr) < 0 && \"Unexpected vaddr for flat scratch with a FI operand\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 1152, __extension__ __PRETTY_FUNCTION__))
1152 && "Unexpected vaddr for flat scratch with a FI operand")(static_cast <bool> (AMDGPU::getNamedOperandIdx(LoadStoreOp
, AMDGPU::OpName::vaddr) < 0 && "Unexpected vaddr for flat scratch with a FI operand"
) ? void (0) : __assert_fail ("AMDGPU::getNamedOperandIdx(LoadStoreOp, AMDGPU::OpName::vaddr) < 0 && \"Unexpected vaddr for flat scratch with a FI operand\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 1152, __extension__ __PRETTY_FUNCTION__))
;
1153
1154 assert(ST.hasFlatScratchSTMode())(static_cast <bool> (ST.hasFlatScratchSTMode()) ? void (
0) : __assert_fail ("ST.hasFlatScratchSTMode()", "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 1154, __extension__ __PRETTY_FUNCTION__))
;
1155 LoadStoreOp = AMDGPU::getFlatScratchInstSTfromSS(LoadStoreOp);
1156 Desc = &TII->get(LoadStoreOp);
Value stored to 'Desc' is never read
1157 }
1158
1159 Register TmpReg;
1160
1161 for (unsigned i = 0, e = NumSubRegs + NumRemSubRegs, RegOffset = 0; i != e;
1162 ++i, RegOffset += EltSize) {
1163 if (i == NumSubRegs) {
1164 EltSize = RemSize;
1165 LoadStoreOp = getFlatScratchSpillOpcode(TII, LoadStoreOp, EltSize);
1166 }
1167 Desc = &TII->get(LoadStoreOp);
1168
1169 unsigned NumRegs = EltSize / 4;
1170 Register SubReg = e == 1
1171 ? ValueReg
1172 : Register(getSubReg(ValueReg,
1173 getSubRegFromChannel(RegOffset / 4, NumRegs)));
1174
1175 unsigned SOffsetRegState = 0;
1176 unsigned SrcDstRegState = getDefRegState(!IsStore);
1177 if (i + 1 == e) {
1178 SOffsetRegState |= getKillRegState(Scavenged);
1179 // The last implicit use carries the "Kill" flag.
1180 SrcDstRegState |= getKillRegState(IsKill);
1181 }
1182
1183 // Make sure the whole register is defined if there are undef components by
1184 // adding an implicit def of the super-reg on the first instruction.
1185 bool NeedSuperRegDef = e > 1 && IsStore && i == 0;
1186 bool NeedSuperRegImpOperand = e > 1;
1187
1188 // Remaining element size to spill into memory after some parts of it
1189 // spilled into either AGPRs or VGPRs.
1190 unsigned RemEltSize = EltSize;
1191
1192 // AGPRs to spill VGPRs and vice versa are allocated in a reverse order,
1193 // starting from the last lane. In case if a register cannot be completely
1194 // spilled into another register that will ensure its alignment does not
1195 // change. For targets with VGPR alignment requirement this is important
1196 // in case of flat scratch usage as we might get a scratch_load or
1197 // scratch_store of an unaligned register otherwise.
1198 for (int LaneS = (RegOffset + EltSize) / 4 - 1, Lane = LaneS,
1199 LaneE = RegOffset / 4;
1200 Lane >= LaneE; --Lane) {
1201 bool IsSubReg = e > 1 || EltSize > 4;
1202 Register Sub = IsSubReg
1203 ? Register(getSubReg(ValueReg, getSubRegFromChannel(Lane)))
1204 : ValueReg;
1205 auto MIB = spillVGPRtoAGPR(ST, MBB, MI, Index, Lane, Sub, IsKill);
1206 if (!MIB.getInstr())
1207 break;
1208 if (NeedSuperRegDef || (IsSubReg && IsStore && Lane == LaneS && !i)) {
1209 MIB.addReg(ValueReg, RegState::ImplicitDefine);
1210 NeedSuperRegDef = false;
1211 }
1212 if (IsSubReg || NeedSuperRegImpOperand) {
1213 NeedSuperRegImpOperand = true;
1214 unsigned State = SrcDstRegState;
1215 if (Lane != LaneE)
1216 State &= ~RegState::Kill;
1217 MIB.addReg(ValueReg, RegState::Implicit | State);
1218 }
1219 RemEltSize -= 4;
1220 }
1221
1222 if (!RemEltSize) // Fully spilled into AGPRs.
1223 continue;
1224
1225 if (RemEltSize != EltSize) { // Partially spilled to AGPRs
1226 assert(IsFlat && EltSize > 4)(static_cast <bool> (IsFlat && EltSize > 4) ?
void (0) : __assert_fail ("IsFlat && EltSize > 4"
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 1226, __extension__ __PRETTY_FUNCTION__))
;
1227
1228 unsigned NumRegs = RemEltSize / 4;
1229 SubReg = Register(getSubReg(ValueReg,
1230 getSubRegFromChannel(RegOffset / 4, NumRegs)));
1231 unsigned Opc = getFlatScratchSpillOpcode(TII, LoadStoreOp, RemEltSize);
1232 Desc = &TII->get(Opc);
1233 }
1234
1235 unsigned FinalReg = SubReg;
1236
1237 if (IsAGPR) {
1238 assert(EltSize == 4)(static_cast <bool> (EltSize == 4) ? void (0) : __assert_fail
("EltSize == 4", "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 1238, __extension__ __PRETTY_FUNCTION__))
;
1239
1240 if (!TmpReg) {
1241 assert(RS && "Needs to have RegScavenger to spill an AGPR!")(static_cast <bool> (RS && "Needs to have RegScavenger to spill an AGPR!"
) ? void (0) : __assert_fail ("RS && \"Needs to have RegScavenger to spill an AGPR!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 1241, __extension__ __PRETTY_FUNCTION__))
;
1242 // FIXME: change to scavengeRegisterBackwards()
1243 TmpReg = RS->scavengeRegister(&AMDGPU::VGPR_32RegClass, MI, 0);
1244 RS->setRegUsed(TmpReg);
1245 }
1246 if (IsStore) {
1247 auto AccRead = BuildMI(MBB, MI, DL,
1248 TII->get(AMDGPU::V_ACCVGPR_READ_B32_e64), TmpReg)
1249 .addReg(SubReg, getKillRegState(IsKill));
1250 if (NeedSuperRegDef)
1251 AccRead.addReg(ValueReg, RegState::ImplicitDefine);
1252 AccRead->setAsmPrinterFlag(MachineInstr::ReloadReuse);
1253 }
1254 SubReg = TmpReg;
1255 }
1256
1257 MachinePointerInfo PInfo = BasePtrInfo.getWithOffset(RegOffset);
1258 MachineMemOperand *NewMMO =
1259 MF->getMachineMemOperand(PInfo, MMO->getFlags(), RemEltSize,
1260 commonAlignment(Alignment, RegOffset));
1261
1262 auto MIB =
1263 BuildMI(MBB, MI, DL, *Desc)
1264 .addReg(SubReg, getDefRegState(!IsStore) | getKillRegState(IsKill));
1265 if (!IsFlat)
1266 MIB.addReg(FuncInfo->getScratchRSrcReg());
1267
1268 if (SOffset == AMDGPU::NoRegister) {
1269 if (!IsFlat)
1270 MIB.addImm(0);
1271 } else {
1272 MIB.addReg(SOffset, SOffsetRegState);
1273 }
1274 MIB.addImm(Offset + RegOffset)
1275 .addImm(0); // cpol
1276 if (!IsFlat)
1277 MIB.addImm(0) // tfe
1278 .addImm(0); // swz
1279 MIB.addMemOperand(NewMMO);
1280
1281 if (!IsAGPR && NeedSuperRegDef)
1282 MIB.addReg(ValueReg, RegState::ImplicitDefine);
1283
1284 if (!IsStore && TmpReg != AMDGPU::NoRegister) {
1285 MIB = BuildMI(MBB, MI, DL, TII->get(AMDGPU::V_ACCVGPR_WRITE_B32_e64),
1286 FinalReg)
1287 .addReg(TmpReg, RegState::Kill);
1288 MIB->setAsmPrinterFlag(MachineInstr::ReloadReuse);
1289 }
1290
1291 if (NeedSuperRegImpOperand)
1292 MIB.addReg(ValueReg, RegState::Implicit | SrcDstRegState);
1293 }
1294
1295 if (ScratchOffsetRegDelta != 0) {
1296 // Subtract the offset we added to the ScratchOffset register.
1297 BuildMI(MBB, MI, DL, TII->get(AMDGPU::S_ADD_I32), SOffset)
1298 .addReg(SOffset)
1299 .addImm(-ScratchOffsetRegDelta);
1300 }
1301}
1302
1303void SIRegisterInfo::buildVGPRSpillLoadStore(SGPRSpillBuilder &SB, int Index,
1304 int Offset, bool IsLoad,
1305 bool IsKill) const {
1306 // Load/store VGPR
1307 MachineFrameInfo &FrameInfo = SB.MF.getFrameInfo();
1308 assert(FrameInfo.getStackID(Index) != TargetStackID::SGPRSpill)(static_cast <bool> (FrameInfo.getStackID(Index) != TargetStackID
::SGPRSpill) ? void (0) : __assert_fail ("FrameInfo.getStackID(Index) != TargetStackID::SGPRSpill"
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 1308, __extension__ __PRETTY_FUNCTION__))
;
1309
1310 Register FrameReg =
1311 FrameInfo.isFixedObjectIndex(Index) && hasBasePointer(SB.MF)
1312 ? getBaseRegister()
1313 : getFrameRegister(SB.MF);
1314
1315 Align Alignment = FrameInfo.getObjectAlign(Index);
1316 MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(SB.MF, Index);
1317 MachineMemOperand *MMO = SB.MF.getMachineMemOperand(
1318 PtrInfo, IsLoad ? MachineMemOperand::MOLoad : MachineMemOperand::MOStore,
1319 SB.EltSize, Alignment);
1320
1321 if (IsLoad) {
1322 unsigned Opc = ST.enableFlatScratch() ? AMDGPU::SCRATCH_LOAD_DWORD_SADDR
1323 : AMDGPU::BUFFER_LOAD_DWORD_OFFSET;
1324 buildSpillLoadStore(SB.MBB, SB.MI, Opc, Index, SB.TmpVGPR, false, FrameReg,
1325 Offset * SB.EltSize, MMO, SB.RS);
1326 } else {
1327 unsigned Opc = ST.enableFlatScratch() ? AMDGPU::SCRATCH_STORE_DWORD_SADDR
1328 : AMDGPU::BUFFER_STORE_DWORD_OFFSET;
1329 buildSpillLoadStore(SB.MBB, SB.MI, Opc, Index, SB.TmpVGPR, IsKill, FrameReg,
1330 Offset * SB.EltSize, MMO, SB.RS);
1331 // This only ever adds one VGPR spill
1332 SB.MFI.addToSpilledVGPRs(1);
1333 }
1334}
1335
1336bool SIRegisterInfo::spillSGPR(MachineBasicBlock::iterator MI,
1337 int Index,
1338 RegScavenger *RS,
1339 LiveIntervals *LIS,
1340 bool OnlyToVGPR) const {
1341 SGPRSpillBuilder SB(*this, *ST.getInstrInfo(), isWave32, MI, Index, RS);
1342
1343 ArrayRef<SIMachineFunctionInfo::SpilledReg> VGPRSpills =
1344 SB.MFI.getSGPRToVGPRSpills(Index);
1345 bool SpillToVGPR = !VGPRSpills.empty();
1346 if (OnlyToVGPR && !SpillToVGPR)
1347 return false;
1348
1349 assert(SpillToVGPR || (SB.SuperReg != SB.MFI.getStackPtrOffsetReg() &&(static_cast <bool> (SpillToVGPR || (SB.SuperReg != SB.
MFI.getStackPtrOffsetReg() && SB.SuperReg != SB.MFI.getFrameOffsetReg
())) ? void (0) : __assert_fail ("SpillToVGPR || (SB.SuperReg != SB.MFI.getStackPtrOffsetReg() && SB.SuperReg != SB.MFI.getFrameOffsetReg())"
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 1350, __extension__ __PRETTY_FUNCTION__))
1350 SB.SuperReg != SB.MFI.getFrameOffsetReg()))(static_cast <bool> (SpillToVGPR || (SB.SuperReg != SB.
MFI.getStackPtrOffsetReg() && SB.SuperReg != SB.MFI.getFrameOffsetReg
())) ? void (0) : __assert_fail ("SpillToVGPR || (SB.SuperReg != SB.MFI.getStackPtrOffsetReg() && SB.SuperReg != SB.MFI.getFrameOffsetReg())"
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 1350, __extension__ __PRETTY_FUNCTION__))
;
1351
1352 if (SpillToVGPR) {
1353 for (unsigned i = 0, e = SB.NumSubRegs; i < e; ++i) {
1354 Register SubReg =
1355 SB.NumSubRegs == 1
1356 ? SB.SuperReg
1357 : Register(getSubReg(SB.SuperReg, SB.SplitParts[i]));
1358 SIMachineFunctionInfo::SpilledReg Spill = VGPRSpills[i];
1359
1360 bool UseKill = SB.IsKill && i == SB.NumSubRegs - 1;
1361
1362 // Mark the "old value of vgpr" input undef only if this is the first sgpr
1363 // spill to this specific vgpr in the first basic block.
1364 auto MIB = BuildMI(SB.MBB, MI, SB.DL, SB.TII.get(AMDGPU::V_WRITELANE_B32),
1365 Spill.VGPR)
1366 .addReg(SubReg, getKillRegState(UseKill))
1367 .addImm(Spill.Lane)
1368 .addReg(Spill.VGPR);
1369 if (LIS) {
1370 if (i == 0)
1371 LIS->ReplaceMachineInstrInMaps(*MI, *MIB);
1372 else
1373 LIS->InsertMachineInstrInMaps(*MIB);
1374 }
1375
1376 if (i == 0 && SB.NumSubRegs > 1) {
1377 // We may be spilling a super-register which is only partially defined,
1378 // and need to ensure later spills think the value is defined.
1379 MIB.addReg(SB.SuperReg, RegState::ImplicitDefine);
1380 }
1381
1382 if (SB.NumSubRegs > 1)
1383 MIB.addReg(SB.SuperReg, getKillRegState(UseKill) | RegState::Implicit);
1384
1385 // FIXME: Since this spills to another register instead of an actual
1386 // frame index, we should delete the frame index when all references to
1387 // it are fixed.
1388 }
1389 } else {
1390 SB.prepare();
1391
1392 // SubReg carries the "Kill" flag when SubReg == SB.SuperReg.
1393 unsigned SubKillState = getKillRegState((SB.NumSubRegs == 1) && SB.IsKill);
1394
1395 // Per VGPR helper data
1396 auto PVD = SB.getPerVGPRData();
1397
1398 for (unsigned Offset = 0; Offset < PVD.NumVGPRs; ++Offset) {
1399 unsigned TmpVGPRFlags = RegState::Undef;
1400
1401 // Write sub registers into the VGPR
1402 for (unsigned i = Offset * PVD.PerVGPR,
1403 e = std::min((Offset + 1) * PVD.PerVGPR, SB.NumSubRegs);
1404 i < e; ++i) {
1405 Register SubReg =
1406 SB.NumSubRegs == 1
1407 ? SB.SuperReg
1408 : Register(getSubReg(SB.SuperReg, SB.SplitParts[i]));
1409
1410 MachineInstrBuilder WriteLane =
1411 BuildMI(SB.MBB, MI, SB.DL, SB.TII.get(AMDGPU::V_WRITELANE_B32),
1412 SB.TmpVGPR)
1413 .addReg(SubReg, SubKillState)
1414 .addImm(i % PVD.PerVGPR)
1415 .addReg(SB.TmpVGPR, TmpVGPRFlags);
1416 TmpVGPRFlags = 0;
1417
1418 if (LIS) {
1419 if (i == 0)
1420 LIS->ReplaceMachineInstrInMaps(*MI, *WriteLane);
1421 else
1422 LIS->InsertMachineInstrInMaps(*WriteLane);
1423 }
1424
1425 // There could be undef components of a spilled super register.
1426 // TODO: Can we detect this and skip the spill?
1427 if (SB.NumSubRegs > 1) {
1428 // The last implicit use of the SB.SuperReg carries the "Kill" flag.
1429 unsigned SuperKillState = 0;
1430 if (i + 1 == SB.NumSubRegs)
1431 SuperKillState |= getKillRegState(SB.IsKill);
1432 WriteLane.addReg(SB.SuperReg, RegState::Implicit | SuperKillState);
1433 }
1434 }
1435
1436 // Write out VGPR
1437 SB.readWriteTmpVGPR(Offset, /*IsLoad*/ false);
1438 }
1439
1440 SB.restore();
1441 }
1442
1443 MI->eraseFromParent();
1444 SB.MFI.addToSpilledSGPRs(SB.NumSubRegs);
1445
1446 if (LIS)
1447 LIS->removeAllRegUnitsForPhysReg(SB.SuperReg);
1448
1449 return true;
1450}
1451
1452bool SIRegisterInfo::restoreSGPR(MachineBasicBlock::iterator MI,
1453 int Index,
1454 RegScavenger *RS,
1455 LiveIntervals *LIS,
1456 bool OnlyToVGPR) const {
1457 SGPRSpillBuilder SB(*this, *ST.getInstrInfo(), isWave32, MI, Index, RS);
1458
1459 ArrayRef<SIMachineFunctionInfo::SpilledReg> VGPRSpills =
1460 SB.MFI.getSGPRToVGPRSpills(Index);
1461 bool SpillToVGPR = !VGPRSpills.empty();
1462 if (OnlyToVGPR && !SpillToVGPR)
1463 return false;
1464
1465 if (SpillToVGPR) {
1466 for (unsigned i = 0, e = SB.NumSubRegs; i < e; ++i) {
1467 Register SubReg =
1468 SB.NumSubRegs == 1
1469 ? SB.SuperReg
1470 : Register(getSubReg(SB.SuperReg, SB.SplitParts[i]));
1471
1472 SIMachineFunctionInfo::SpilledReg Spill = VGPRSpills[i];
1473 auto MIB =
1474 BuildMI(SB.MBB, MI, SB.DL, SB.TII.get(AMDGPU::V_READLANE_B32), SubReg)
1475 .addReg(Spill.VGPR)
1476 .addImm(Spill.Lane);
1477 if (SB.NumSubRegs > 1 && i == 0)
1478 MIB.addReg(SB.SuperReg, RegState::ImplicitDefine);
1479 if (LIS) {
1480 if (i == e - 1)
1481 LIS->ReplaceMachineInstrInMaps(*MI, *MIB);
1482 else
1483 LIS->InsertMachineInstrInMaps(*MIB);
1484 }
1485
1486 }
1487 } else {
1488 SB.prepare();
1489
1490 // Per VGPR helper data
1491 auto PVD = SB.getPerVGPRData();
1492
1493 for (unsigned Offset = 0; Offset < PVD.NumVGPRs; ++Offset) {
1494 // Load in VGPR data
1495 SB.readWriteTmpVGPR(Offset, /*IsLoad*/ true);
1496
1497 // Unpack lanes
1498 for (unsigned i = Offset * PVD.PerVGPR,
1499 e = std::min((Offset + 1) * PVD.PerVGPR, SB.NumSubRegs);
1500 i < e; ++i) {
1501 Register SubReg =
1502 SB.NumSubRegs == 1
1503 ? SB.SuperReg
1504 : Register(getSubReg(SB.SuperReg, SB.SplitParts[i]));
1505
1506 bool LastSubReg = (i + 1 == e);
1507 auto MIB = BuildMI(SB.MBB, MI, SB.DL,
1508 SB.TII.get(AMDGPU::V_READLANE_B32), SubReg)
1509 .addReg(SB.TmpVGPR, getKillRegState(LastSubReg))
1510 .addImm(i);
1511 if (SB.NumSubRegs > 1 && i == 0)
1512 MIB.addReg(SB.SuperReg, RegState::ImplicitDefine);
1513 if (LIS) {
1514 if (i == e - 1)
1515 LIS->ReplaceMachineInstrInMaps(*MI, *MIB);
1516 else
1517 LIS->InsertMachineInstrInMaps(*MIB);
1518 }
1519 }
1520 }
1521
1522 SB.restore();
1523 }
1524
1525 MI->eraseFromParent();
1526
1527 if (LIS)
1528 LIS->removeAllRegUnitsForPhysReg(SB.SuperReg);
1529
1530 return true;
1531}
1532
1533/// Special case of eliminateFrameIndex. Returns true if the SGPR was spilled to
1534/// a VGPR and the stack slot can be safely eliminated when all other users are
1535/// handled.
1536bool SIRegisterInfo::eliminateSGPRToVGPRSpillFrameIndex(
1537 MachineBasicBlock::iterator MI,
1538 int FI,
1539 RegScavenger *RS,
1540 LiveIntervals *LIS) const {
1541 switch (MI->getOpcode()) {
1542 case AMDGPU::SI_SPILL_S1024_SAVE:
1543 case AMDGPU::SI_SPILL_S512_SAVE:
1544 case AMDGPU::SI_SPILL_S256_SAVE:
1545 case AMDGPU::SI_SPILL_S224_SAVE:
1546 case AMDGPU::SI_SPILL_S192_SAVE:
1547 case AMDGPU::SI_SPILL_S160_SAVE:
1548 case AMDGPU::SI_SPILL_S128_SAVE:
1549 case AMDGPU::SI_SPILL_S96_SAVE:
1550 case AMDGPU::SI_SPILL_S64_SAVE:
1551 case AMDGPU::SI_SPILL_S32_SAVE:
1552 return spillSGPR(MI, FI, RS, LIS, true);
1553 case AMDGPU::SI_SPILL_S1024_RESTORE:
1554 case AMDGPU::SI_SPILL_S512_RESTORE:
1555 case AMDGPU::SI_SPILL_S256_RESTORE:
1556 case AMDGPU::SI_SPILL_S224_RESTORE:
1557 case AMDGPU::SI_SPILL_S192_RESTORE:
1558 case AMDGPU::SI_SPILL_S160_RESTORE:
1559 case AMDGPU::SI_SPILL_S128_RESTORE:
1560 case AMDGPU::SI_SPILL_S96_RESTORE:
1561 case AMDGPU::SI_SPILL_S64_RESTORE:
1562 case AMDGPU::SI_SPILL_S32_RESTORE:
1563 return restoreSGPR(MI, FI, RS, LIS, true);
1564 default:
1565 llvm_unreachable("not an SGPR spill instruction")::llvm::llvm_unreachable_internal("not an SGPR spill instruction"
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 1565)
;
1566 }
1567}
1568
1569void SIRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI,
1570 int SPAdj, unsigned FIOperandNum,
1571 RegScavenger *RS) const {
1572 MachineFunction *MF = MI->getParent()->getParent();
1573 MachineBasicBlock *MBB = MI->getParent();
1574 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
1575 MachineFrameInfo &FrameInfo = MF->getFrameInfo();
1576 const SIInstrInfo *TII = ST.getInstrInfo();
1577 DebugLoc DL = MI->getDebugLoc();
1578
1579 assert(SPAdj == 0 && "unhandled SP adjustment in call sequence?")(static_cast <bool> (SPAdj == 0 && "unhandled SP adjustment in call sequence?"
) ? void (0) : __assert_fail ("SPAdj == 0 && \"unhandled SP adjustment in call sequence?\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 1579, __extension__ __PRETTY_FUNCTION__))
;
1580
1581 MachineOperand &FIOp = MI->getOperand(FIOperandNum);
1582 int Index = MI->getOperand(FIOperandNum).getIndex();
1583
1584 Register FrameReg = FrameInfo.isFixedObjectIndex(Index) && hasBasePointer(*MF)
1585 ? getBaseRegister()
1586 : getFrameRegister(*MF);
1587
1588 switch (MI->getOpcode()) {
1589 // SGPR register spill
1590 case AMDGPU::SI_SPILL_S1024_SAVE:
1591 case AMDGPU::SI_SPILL_S512_SAVE:
1592 case AMDGPU::SI_SPILL_S256_SAVE:
1593 case AMDGPU::SI_SPILL_S224_SAVE:
1594 case AMDGPU::SI_SPILL_S192_SAVE:
1595 case AMDGPU::SI_SPILL_S160_SAVE:
1596 case AMDGPU::SI_SPILL_S128_SAVE:
1597 case AMDGPU::SI_SPILL_S96_SAVE:
1598 case AMDGPU::SI_SPILL_S64_SAVE:
1599 case AMDGPU::SI_SPILL_S32_SAVE: {
1600 spillSGPR(MI, Index, RS);
1601 break;
1602 }
1603
1604 // SGPR register restore
1605 case AMDGPU::SI_SPILL_S1024_RESTORE:
1606 case AMDGPU::SI_SPILL_S512_RESTORE:
1607 case AMDGPU::SI_SPILL_S256_RESTORE:
1608 case AMDGPU::SI_SPILL_S224_RESTORE:
1609 case AMDGPU::SI_SPILL_S192_RESTORE:
1610 case AMDGPU::SI_SPILL_S160_RESTORE:
1611 case AMDGPU::SI_SPILL_S128_RESTORE:
1612 case AMDGPU::SI_SPILL_S96_RESTORE:
1613 case AMDGPU::SI_SPILL_S64_RESTORE:
1614 case AMDGPU::SI_SPILL_S32_RESTORE: {
1615 restoreSGPR(MI, Index, RS);
1616 break;
1617 }
1618
1619 // VGPR register spill
1620 case AMDGPU::SI_SPILL_V1024_SAVE:
1621 case AMDGPU::SI_SPILL_V512_SAVE:
1622 case AMDGPU::SI_SPILL_V256_SAVE:
1623 case AMDGPU::SI_SPILL_V224_SAVE:
1624 case AMDGPU::SI_SPILL_V192_SAVE:
1625 case AMDGPU::SI_SPILL_V160_SAVE:
1626 case AMDGPU::SI_SPILL_V128_SAVE:
1627 case AMDGPU::SI_SPILL_V96_SAVE:
1628 case AMDGPU::SI_SPILL_V64_SAVE:
1629 case AMDGPU::SI_SPILL_V32_SAVE:
1630 case AMDGPU::SI_SPILL_A1024_SAVE:
1631 case AMDGPU::SI_SPILL_A512_SAVE:
1632 case AMDGPU::SI_SPILL_A256_SAVE:
1633 case AMDGPU::SI_SPILL_A224_SAVE:
1634 case AMDGPU::SI_SPILL_A192_SAVE:
1635 case AMDGPU::SI_SPILL_A160_SAVE:
1636 case AMDGPU::SI_SPILL_A128_SAVE:
1637 case AMDGPU::SI_SPILL_A96_SAVE:
1638 case AMDGPU::SI_SPILL_A64_SAVE:
1639 case AMDGPU::SI_SPILL_A32_SAVE: {
1640 const MachineOperand *VData = TII->getNamedOperand(*MI,
1641 AMDGPU::OpName::vdata);
1642 assert(TII->getNamedOperand(*MI, AMDGPU::OpName::soffset)->getReg() ==(static_cast <bool> (TII->getNamedOperand(*MI, AMDGPU
::OpName::soffset)->getReg() == MFI->getStackPtrOffsetReg
()) ? void (0) : __assert_fail ("TII->getNamedOperand(*MI, AMDGPU::OpName::soffset)->getReg() == MFI->getStackPtrOffsetReg()"
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 1643, __extension__ __PRETTY_FUNCTION__))
1643 MFI->getStackPtrOffsetReg())(static_cast <bool> (TII->getNamedOperand(*MI, AMDGPU
::OpName::soffset)->getReg() == MFI->getStackPtrOffsetReg
()) ? void (0) : __assert_fail ("TII->getNamedOperand(*MI, AMDGPU::OpName::soffset)->getReg() == MFI->getStackPtrOffsetReg()"
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 1643, __extension__ __PRETTY_FUNCTION__))
;
1644
1645 unsigned Opc = ST.enableFlatScratch() ? AMDGPU::SCRATCH_STORE_DWORD_SADDR
1646 : AMDGPU::BUFFER_STORE_DWORD_OFFSET;
1647 auto *MBB = MI->getParent();
1648 buildSpillLoadStore(
1649 *MBB, MI, Opc, Index, VData->getReg(), VData->isKill(), FrameReg,
1650 TII->getNamedOperand(*MI, AMDGPU::OpName::offset)->getImm(),
1651 *MI->memoperands_begin(), RS);
1652 MFI->addToSpilledVGPRs(getNumSubRegsForSpillOp(MI->getOpcode()));
1653 MI->eraseFromParent();
1654 break;
1655 }
1656 case AMDGPU::SI_SPILL_V32_RESTORE:
1657 case AMDGPU::SI_SPILL_V64_RESTORE:
1658 case AMDGPU::SI_SPILL_V96_RESTORE:
1659 case AMDGPU::SI_SPILL_V128_RESTORE:
1660 case AMDGPU::SI_SPILL_V160_RESTORE:
1661 case AMDGPU::SI_SPILL_V192_RESTORE:
1662 case AMDGPU::SI_SPILL_V224_RESTORE:
1663 case AMDGPU::SI_SPILL_V256_RESTORE:
1664 case AMDGPU::SI_SPILL_V512_RESTORE:
1665 case AMDGPU::SI_SPILL_V1024_RESTORE:
1666 case AMDGPU::SI_SPILL_A32_RESTORE:
1667 case AMDGPU::SI_SPILL_A64_RESTORE:
1668 case AMDGPU::SI_SPILL_A96_RESTORE:
1669 case AMDGPU::SI_SPILL_A128_RESTORE:
1670 case AMDGPU::SI_SPILL_A160_RESTORE:
1671 case AMDGPU::SI_SPILL_A192_RESTORE:
1672 case AMDGPU::SI_SPILL_A224_RESTORE:
1673 case AMDGPU::SI_SPILL_A256_RESTORE:
1674 case AMDGPU::SI_SPILL_A512_RESTORE:
1675 case AMDGPU::SI_SPILL_A1024_RESTORE: {
1676 const MachineOperand *VData = TII->getNamedOperand(*MI,
1677 AMDGPU::OpName::vdata);
1678 assert(TII->getNamedOperand(*MI, AMDGPU::OpName::soffset)->getReg() ==(static_cast <bool> (TII->getNamedOperand(*MI, AMDGPU
::OpName::soffset)->getReg() == MFI->getStackPtrOffsetReg
()) ? void (0) : __assert_fail ("TII->getNamedOperand(*MI, AMDGPU::OpName::soffset)->getReg() == MFI->getStackPtrOffsetReg()"
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 1679, __extension__ __PRETTY_FUNCTION__))
1679 MFI->getStackPtrOffsetReg())(static_cast <bool> (TII->getNamedOperand(*MI, AMDGPU
::OpName::soffset)->getReg() == MFI->getStackPtrOffsetReg
()) ? void (0) : __assert_fail ("TII->getNamedOperand(*MI, AMDGPU::OpName::soffset)->getReg() == MFI->getStackPtrOffsetReg()"
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 1679, __extension__ __PRETTY_FUNCTION__))
;
1680
1681 unsigned Opc = ST.enableFlatScratch() ? AMDGPU::SCRATCH_LOAD_DWORD_SADDR
1682 : AMDGPU::BUFFER_LOAD_DWORD_OFFSET;
1683 auto *MBB = MI->getParent();
1684 buildSpillLoadStore(
1685 *MBB, MI, Opc, Index, VData->getReg(), VData->isKill(), FrameReg,
1686 TII->getNamedOperand(*MI, AMDGPU::OpName::offset)->getImm(),
1687 *MI->memoperands_begin(), RS);
1688 MI->eraseFromParent();
1689 break;
1690 }
1691
1692 default: {
1693 // Other access to frame index
1694 const DebugLoc &DL = MI->getDebugLoc();
1695
1696 int64_t Offset = FrameInfo.getObjectOffset(Index);
1697 if (ST.enableFlatScratch()) {
1698 if (TII->isFLATScratch(*MI)) {
1699 assert((int16_t)FIOperandNum ==(static_cast <bool> ((int16_t)FIOperandNum == AMDGPU::getNamedOperandIdx
(MI->getOpcode(), AMDGPU::OpName::saddr)) ? void (0) : __assert_fail
("(int16_t)FIOperandNum == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::saddr)"
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 1701, __extension__ __PRETTY_FUNCTION__))
1700 AMDGPU::getNamedOperandIdx(MI->getOpcode(),(static_cast <bool> ((int16_t)FIOperandNum == AMDGPU::getNamedOperandIdx
(MI->getOpcode(), AMDGPU::OpName::saddr)) ? void (0) : __assert_fail
("(int16_t)FIOperandNum == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::saddr)"
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 1701, __extension__ __PRETTY_FUNCTION__))
1701 AMDGPU::OpName::saddr))(static_cast <bool> ((int16_t)FIOperandNum == AMDGPU::getNamedOperandIdx
(MI->getOpcode(), AMDGPU::OpName::saddr)) ? void (0) : __assert_fail
("(int16_t)FIOperandNum == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::saddr)"
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 1701, __extension__ __PRETTY_FUNCTION__))
;
1702
1703 // The offset is always swizzled, just replace it
1704 if (FrameReg)
1705 FIOp.ChangeToRegister(FrameReg, false);
1706
1707 if (!Offset)
1708 return;
1709
1710 MachineOperand *OffsetOp =
1711 TII->getNamedOperand(*MI, AMDGPU::OpName::offset);
1712 int64_t NewOffset = Offset + OffsetOp->getImm();
1713 if (TII->isLegalFLATOffset(NewOffset, AMDGPUAS::PRIVATE_ADDRESS,
1714 SIInstrFlags::FlatScratch)) {
1715 OffsetOp->setImm(NewOffset);
1716 if (FrameReg)
1717 return;
1718 Offset = 0;
1719 }
1720
1721 assert(!TII->getNamedOperand(*MI, AMDGPU::OpName::vaddr) &&(static_cast <bool> (!TII->getNamedOperand(*MI, AMDGPU
::OpName::vaddr) && "Unexpected vaddr for flat scratch with a FI operand"
) ? void (0) : __assert_fail ("!TII->getNamedOperand(*MI, AMDGPU::OpName::vaddr) && \"Unexpected vaddr for flat scratch with a FI operand\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 1722, __extension__ __PRETTY_FUNCTION__))
1722 "Unexpected vaddr for flat scratch with a FI operand")(static_cast <bool> (!TII->getNamedOperand(*MI, AMDGPU
::OpName::vaddr) && "Unexpected vaddr for flat scratch with a FI operand"
) ? void (0) : __assert_fail ("!TII->getNamedOperand(*MI, AMDGPU::OpName::vaddr) && \"Unexpected vaddr for flat scratch with a FI operand\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 1722, __extension__ __PRETTY_FUNCTION__))
;
1723
1724 // On GFX10 we have ST mode to use no registers for an address.
1725 // Otherwise we need to materialize 0 into an SGPR.
1726 if (!Offset && ST.hasFlatScratchSTMode()) {
1727 unsigned Opc = MI->getOpcode();
1728 unsigned NewOpc = AMDGPU::getFlatScratchInstSTfromSS(Opc);
1729 MI->RemoveOperand(
1730 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::saddr));
1731 MI->setDesc(TII->get(NewOpc));
1732 return;
1733 }
1734 }
1735
1736 if (!FrameReg) {
1737 FIOp.ChangeToImmediate(Offset);
1738 if (TII->isImmOperandLegal(*MI, FIOperandNum, FIOp))
1739 return;
1740 }
1741
1742 // We need to use register here. Check if we can use an SGPR or need
1743 // a VGPR.
1744 FIOp.ChangeToRegister(AMDGPU::M0, false);
1745 bool UseSGPR = TII->isOperandLegal(*MI, FIOperandNum, &FIOp);
1746
1747 if (!Offset && FrameReg && UseSGPR) {
1748 FIOp.setReg(FrameReg);
1749 return;
1750 }
1751
1752 const TargetRegisterClass *RC = UseSGPR ? &AMDGPU::SReg_32_XM0RegClass
1753 : &AMDGPU::VGPR_32RegClass;
1754
1755 Register TmpReg = RS->scavengeRegister(RC, MI, 0, !UseSGPR);
1756 FIOp.setReg(TmpReg);
1757 FIOp.setIsKill(true);
1758
1759 if ((!FrameReg || !Offset) && TmpReg) {
1760 unsigned Opc = UseSGPR ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
1761 auto MIB = BuildMI(*MBB, MI, DL, TII->get(Opc), TmpReg);
1762 if (FrameReg)
1763 MIB.addReg(FrameReg);
1764 else
1765 MIB.addImm(Offset);
1766
1767 return;
1768 }
1769
1770 Register TmpSReg =
1771 UseSGPR ? TmpReg
1772 : RS->scavengeRegister(&AMDGPU::SReg_32_XM0RegClass, MI, 0,
1773 !UseSGPR);
1774
1775 // TODO: for flat scratch another attempt can be made with a VGPR index
1776 // if no SGPRs can be scavenged.
1777 if ((!TmpSReg && !FrameReg) || (!TmpReg && !UseSGPR))
1778 report_fatal_error("Cannot scavenge register in FI elimination!");
1779
1780 if (!TmpSReg) {
1781 // Use frame register and restore it after.
1782 TmpSReg = FrameReg;
1783 FIOp.setReg(FrameReg);
1784 FIOp.setIsKill(false);
1785 }
1786
1787 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_ADD_I32), TmpSReg)
1788 .addReg(FrameReg)
1789 .addImm(Offset);
1790
1791 if (!UseSGPR)
1792 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::V_MOV_B32_e32), TmpReg)
1793 .addReg(TmpSReg, RegState::Kill);
1794
1795 if (TmpSReg == FrameReg) {
1796 // Undo frame register modification.
1797 BuildMI(*MBB, std::next(MI), DL, TII->get(AMDGPU::S_ADD_I32),
1798 FrameReg)
1799 .addReg(FrameReg)
1800 .addImm(-Offset);
1801 }
1802
1803 return;
1804 }
1805
1806 bool IsMUBUF = TII->isMUBUF(*MI);
1807
1808 if (!IsMUBUF && !MFI->isEntryFunction()) {
1809 // Convert to a swizzled stack address by scaling by the wave size.
1810 //
1811 // In an entry function/kernel the offset is already swizzled.
1812
1813 bool IsCopy = MI->getOpcode() == AMDGPU::V_MOV_B32_e32;
1814 Register ResultReg =
1815 IsCopy ? MI->getOperand(0).getReg()
1816 : RS->scavengeRegister(&AMDGPU::VGPR_32RegClass, MI, 0);
1817
1818 int64_t Offset = FrameInfo.getObjectOffset(Index);
1819 if (Offset == 0) {
1820 // XXX - This never happens because of emergency scavenging slot at 0?
1821 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::V_LSHRREV_B32_e64), ResultReg)
1822 .addImm(ST.getWavefrontSizeLog2())
1823 .addReg(FrameReg);
1824 } else {
1825 if (auto MIB = TII->getAddNoCarry(*MBB, MI, DL, ResultReg, *RS)) {
1826 // Reuse ResultReg in intermediate step.
1827 Register ScaledReg = ResultReg;
1828
1829 BuildMI(*MBB, *MIB, DL, TII->get(AMDGPU::V_LSHRREV_B32_e64),
1830 ScaledReg)
1831 .addImm(ST.getWavefrontSizeLog2())
1832 .addReg(FrameReg);
1833
1834 const bool IsVOP2 = MIB->getOpcode() == AMDGPU::V_ADD_U32_e32;
1835
1836 // TODO: Fold if use instruction is another add of a constant.
1837 if (IsVOP2 || AMDGPU::isInlinableLiteral32(Offset, ST.hasInv2PiInlineImm())) {
1838 // FIXME: This can fail
1839 MIB.addImm(Offset);
1840 MIB.addReg(ScaledReg, RegState::Kill);
1841 if (!IsVOP2)
1842 MIB.addImm(0); // clamp bit
1843 } else {
1844 assert(MIB->getOpcode() == AMDGPU::V_ADD_CO_U32_e64 &&(static_cast <bool> (MIB->getOpcode() == AMDGPU::V_ADD_CO_U32_e64
&& "Need to reuse carry out register") ? void (0) : __assert_fail
("MIB->getOpcode() == AMDGPU::V_ADD_CO_U32_e64 && \"Need to reuse carry out register\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 1845, __extension__ __PRETTY_FUNCTION__))
1845 "Need to reuse carry out register")(static_cast <bool> (MIB->getOpcode() == AMDGPU::V_ADD_CO_U32_e64
&& "Need to reuse carry out register") ? void (0) : __assert_fail
("MIB->getOpcode() == AMDGPU::V_ADD_CO_U32_e64 && \"Need to reuse carry out register\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 1845, __extension__ __PRETTY_FUNCTION__))
;
1846
1847 // Use scavenged unused carry out as offset register.
1848 Register ConstOffsetReg;
1849 if (!isWave32)
1850 ConstOffsetReg = getSubReg(MIB.getReg(1), AMDGPU::sub0);
1851 else
1852 ConstOffsetReg = MIB.getReg(1);
1853
1854 BuildMI(*MBB, *MIB, DL, TII->get(AMDGPU::S_MOV_B32), ConstOffsetReg)
1855 .addImm(Offset);
1856 MIB.addReg(ConstOffsetReg, RegState::Kill);
1857 MIB.addReg(ScaledReg, RegState::Kill);
1858 MIB.addImm(0); // clamp bit
1859 }
1860 } else {
1861 // We have to produce a carry out, and there isn't a free SGPR pair
1862 // for it. We can keep the whole computation on the SALU to avoid
1863 // clobbering an additional register at the cost of an extra mov.
1864
1865 // We may have 1 free scratch SGPR even though a carry out is
1866 // unavailable. Only one additional mov is needed.
1867 Register TmpScaledReg =
1868 RS->scavengeRegister(&AMDGPU::SReg_32_XM0RegClass, MI, 0, false);
1869 Register ScaledReg = TmpScaledReg.isValid() ? TmpScaledReg : FrameReg;
1870
1871 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_LSHR_B32), ScaledReg)
1872 .addReg(FrameReg)
1873 .addImm(ST.getWavefrontSizeLog2());
1874 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_ADD_I32), ScaledReg)
1875 .addReg(ScaledReg, RegState::Kill)
1876 .addImm(Offset);
1877 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::COPY), ResultReg)
1878 .addReg(ScaledReg, RegState::Kill);
1879
1880 // If there were truly no free SGPRs, we need to undo everything.
1881 if (!TmpScaledReg.isValid()) {
1882 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_ADD_I32), ScaledReg)
1883 .addReg(ScaledReg, RegState::Kill)
1884 .addImm(-Offset);
1885 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_LSHL_B32), ScaledReg)
1886 .addReg(FrameReg)
1887 .addImm(ST.getWavefrontSizeLog2());
1888 }
1889 }
1890 }
1891
1892 // Don't introduce an extra copy if we're just materializing in a mov.
1893 if (IsCopy)
1894 MI->eraseFromParent();
1895 else
1896 FIOp.ChangeToRegister(ResultReg, false, false, true);
1897 return;
1898 }
1899
1900 if (IsMUBUF) {
1901 // Disable offen so we don't need a 0 vgpr base.
1902 assert(static_cast<int>(FIOperandNum) ==(static_cast <bool> (static_cast<int>(FIOperandNum
) == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName
::vaddr)) ? void (0) : __assert_fail ("static_cast<int>(FIOperandNum) == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::vaddr)"
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 1904, __extension__ __PRETTY_FUNCTION__))
1903 AMDGPU::getNamedOperandIdx(MI->getOpcode(),(static_cast <bool> (static_cast<int>(FIOperandNum
) == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName
::vaddr)) ? void (0) : __assert_fail ("static_cast<int>(FIOperandNum) == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::vaddr)"
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 1904, __extension__ __PRETTY_FUNCTION__))
1904 AMDGPU::OpName::vaddr))(static_cast <bool> (static_cast<int>(FIOperandNum
) == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName
::vaddr)) ? void (0) : __assert_fail ("static_cast<int>(FIOperandNum) == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::vaddr)"
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 1904, __extension__ __PRETTY_FUNCTION__))
;
1905
1906 auto &SOffset = *TII->getNamedOperand(*MI, AMDGPU::OpName::soffset);
1907 assert((SOffset.isImm() && SOffset.getImm() == 0))(static_cast <bool> ((SOffset.isImm() && SOffset
.getImm() == 0)) ? void (0) : __assert_fail ("(SOffset.isImm() && SOffset.getImm() == 0)"
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 1907, __extension__ __PRETTY_FUNCTION__))
;
1908
1909 if (FrameReg != AMDGPU::NoRegister)
1910 SOffset.ChangeToRegister(FrameReg, false);
1911
1912 int64_t Offset = FrameInfo.getObjectOffset(Index);
1913 int64_t OldImm
1914 = TII->getNamedOperand(*MI, AMDGPU::OpName::offset)->getImm();
1915 int64_t NewOffset = OldImm + Offset;
1916
1917 if (SIInstrInfo::isLegalMUBUFImmOffset(NewOffset) &&
1918 buildMUBUFOffsetLoadStore(ST, FrameInfo, MI, Index, NewOffset)) {
1919 MI->eraseFromParent();
1920 return;
1921 }
1922 }
1923
1924 // If the offset is simply too big, don't convert to a scratch wave offset
1925 // relative index.
1926
1927 FIOp.ChangeToImmediate(Offset);
1928 if (!TII->isImmOperandLegal(*MI, FIOperandNum, FIOp)) {
1929 Register TmpReg = RS->scavengeRegister(&AMDGPU::VGPR_32RegClass, MI, 0);
1930 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::V_MOV_B32_e32), TmpReg)
1931 .addImm(Offset);
1932 FIOp.ChangeToRegister(TmpReg, false, false, true);
1933 }
1934 }
1935 }
1936}
1937
1938StringRef SIRegisterInfo::getRegAsmName(MCRegister Reg) const {
1939 return AMDGPUInstPrinter::getRegisterName(Reg);
1940}
1941
1942static const TargetRegisterClass *
1943getAnyVGPRClassForBitWidth(unsigned BitWidth) {
1944 if (BitWidth <= 64)
1945 return &AMDGPU::VReg_64RegClass;
1946 if (BitWidth <= 96)
1947 return &AMDGPU::VReg_96RegClass;
1948 if (BitWidth <= 128)
1949 return &AMDGPU::VReg_128RegClass;
1950 if (BitWidth <= 160)
1951 return &AMDGPU::VReg_160RegClass;
1952 if (BitWidth <= 192)
1953 return &AMDGPU::VReg_192RegClass;
1954 if (BitWidth <= 224)
1955 return &AMDGPU::VReg_224RegClass;
1956 if (BitWidth <= 256)
1957 return &AMDGPU::VReg_256RegClass;
1958 if (BitWidth <= 512)
1959 return &AMDGPU::VReg_512RegClass;
1960 if (BitWidth <= 1024)
1961 return &AMDGPU::VReg_1024RegClass;
1962
1963 return nullptr;
1964}
1965
1966static const TargetRegisterClass *
1967getAlignedVGPRClassForBitWidth(unsigned BitWidth) {
1968 if (BitWidth <= 64)
1969 return &AMDGPU::VReg_64_Align2RegClass;
1970 if (BitWidth <= 96)
1971 return &AMDGPU::VReg_96_Align2RegClass;
1972 if (BitWidth <= 128)
1973 return &AMDGPU::VReg_128_Align2RegClass;
1974 if (BitWidth <= 160)
1975 return &AMDGPU::VReg_160_Align2RegClass;
1976 if (BitWidth <= 192)
1977 return &AMDGPU::VReg_192_Align2RegClass;
1978 if (BitWidth <= 224)
1979 return &AMDGPU::VReg_224_Align2RegClass;
1980 if (BitWidth <= 256)
1981 return &AMDGPU::VReg_256_Align2RegClass;
1982 if (BitWidth <= 512)
1983 return &AMDGPU::VReg_512_Align2RegClass;
1984 if (BitWidth <= 1024)
1985 return &AMDGPU::VReg_1024_Align2RegClass;
1986
1987 return nullptr;
1988}
1989
1990const TargetRegisterClass *
1991SIRegisterInfo::getVGPRClassForBitWidth(unsigned BitWidth) const {
1992 if (BitWidth == 1)
1993 return &AMDGPU::VReg_1RegClass;
1994 if (BitWidth <= 16)
1995 return &AMDGPU::VGPR_LO16RegClass;
1996 if (BitWidth <= 32)
1997 return &AMDGPU::VGPR_32RegClass;
1998 return ST.needsAlignedVGPRs() ? getAlignedVGPRClassForBitWidth(BitWidth)
1999 : getAnyVGPRClassForBitWidth(BitWidth);
2000}
2001
2002static const TargetRegisterClass *
2003getAnyAGPRClassForBitWidth(unsigned BitWidth) {
2004 if (BitWidth <= 64)
2005 return &AMDGPU::AReg_64RegClass;
2006 if (BitWidth <= 96)
2007 return &AMDGPU::AReg_96RegClass;
2008 if (BitWidth <= 128)
2009 return &AMDGPU::AReg_128RegClass;
2010 if (BitWidth <= 160)
2011 return &AMDGPU::AReg_160RegClass;
2012 if (BitWidth <= 192)
2013 return &AMDGPU::AReg_192RegClass;
2014 if (BitWidth <= 224)
2015 return &AMDGPU::AReg_224RegClass;
2016 if (BitWidth <= 256)
2017 return &AMDGPU::AReg_256RegClass;
2018 if (BitWidth <= 512)
2019 return &AMDGPU::AReg_512RegClass;
2020 if (BitWidth <= 1024)
2021 return &AMDGPU::AReg_1024RegClass;
2022
2023 return nullptr;
2024}
2025
2026static const TargetRegisterClass *
2027getAlignedAGPRClassForBitWidth(unsigned BitWidth) {
2028 if (BitWidth <= 64)
2029 return &AMDGPU::AReg_64_Align2RegClass;
2030 if (BitWidth <= 96)
2031 return &AMDGPU::AReg_96_Align2RegClass;
2032 if (BitWidth <= 128)
2033 return &AMDGPU::AReg_128_Align2RegClass;
2034 if (BitWidth <= 160)
2035 return &AMDGPU::AReg_160_Align2RegClass;
2036 if (BitWidth <= 192)
2037 return &AMDGPU::AReg_192_Align2RegClass;
2038 if (BitWidth <= 224)
2039 return &AMDGPU::AReg_224_Align2RegClass;
2040 if (BitWidth <= 256)
2041 return &AMDGPU::AReg_256_Align2RegClass;
2042 if (BitWidth <= 512)
2043 return &AMDGPU::AReg_512_Align2RegClass;
2044 if (BitWidth <= 1024)
2045 return &AMDGPU::AReg_1024_Align2RegClass;
2046
2047 return nullptr;
2048}
2049
2050const TargetRegisterClass *
2051SIRegisterInfo::getAGPRClassForBitWidth(unsigned BitWidth) const {
2052 if (BitWidth <= 16)
2053 return &AMDGPU::AGPR_LO16RegClass;
2054 if (BitWidth <= 32)
2055 return &AMDGPU::AGPR_32RegClass;
2056 return ST.needsAlignedVGPRs() ? getAlignedAGPRClassForBitWidth(BitWidth)
2057 : getAnyAGPRClassForBitWidth(BitWidth);
2058}
2059
2060const TargetRegisterClass *
2061SIRegisterInfo::getSGPRClassForBitWidth(unsigned BitWidth) {
2062 if (BitWidth <= 16)
2063 return &AMDGPU::SGPR_LO16RegClass;
2064 if (BitWidth <= 32)
2065 return &AMDGPU::SReg_32RegClass;
2066 if (BitWidth <= 64)
2067 return &AMDGPU::SReg_64RegClass;
2068 if (BitWidth <= 96)
2069 return &AMDGPU::SGPR_96RegClass;
2070 if (BitWidth <= 128)
2071 return &AMDGPU::SGPR_128RegClass;
2072 if (BitWidth <= 160)
2073 return &AMDGPU::SGPR_160RegClass;
2074 if (BitWidth <= 192)
2075 return &AMDGPU::SGPR_192RegClass;
2076 if (BitWidth <= 224)
2077 return &AMDGPU::SGPR_224RegClass;
2078 if (BitWidth <= 256)
2079 return &AMDGPU::SGPR_256RegClass;
2080 if (BitWidth <= 512)
2081 return &AMDGPU::SGPR_512RegClass;
2082 if (BitWidth <= 1024)
2083 return &AMDGPU::SGPR_1024RegClass;
2084
2085 return nullptr;
2086}
2087
2088// FIXME: This is very slow. It might be worth creating a map from physreg to
2089// register class.
2090const TargetRegisterClass *
2091SIRegisterInfo::getPhysRegClass(MCRegister Reg) const {
2092 static const TargetRegisterClass *const BaseClasses[] = {
2093 &AMDGPU::VGPR_LO16RegClass,
2094 &AMDGPU::VGPR_HI16RegClass,
2095 &AMDGPU::SReg_LO16RegClass,
2096 &AMDGPU::AGPR_LO16RegClass,
2097 &AMDGPU::VGPR_32RegClass,
2098 &AMDGPU::SReg_32RegClass,
2099 &AMDGPU::AGPR_32RegClass,
2100 &AMDGPU::AGPR_32RegClass,
2101 &AMDGPU::VReg_64_Align2RegClass,
2102 &AMDGPU::VReg_64RegClass,
2103 &AMDGPU::SReg_64RegClass,
2104 &AMDGPU::AReg_64_Align2RegClass,
2105 &AMDGPU::AReg_64RegClass,
2106 &AMDGPU::VReg_96_Align2RegClass,
2107 &AMDGPU::VReg_96RegClass,
2108 &AMDGPU::SReg_96RegClass,
2109 &AMDGPU::AReg_96_Align2RegClass,
2110 &AMDGPU::AReg_96RegClass,
2111 &AMDGPU::VReg_128_Align2RegClass,
2112 &AMDGPU::VReg_128RegClass,
2113 &AMDGPU::SReg_128RegClass,
2114 &AMDGPU::AReg_128_Align2RegClass,
2115 &AMDGPU::AReg_128RegClass,
2116 &AMDGPU::VReg_160_Align2RegClass,
2117 &AMDGPU::VReg_160RegClass,
2118 &AMDGPU::SReg_160RegClass,
2119 &AMDGPU::AReg_160_Align2RegClass,
2120 &AMDGPU::AReg_160RegClass,
2121 &AMDGPU::VReg_192_Align2RegClass,
2122 &AMDGPU::VReg_192RegClass,
2123 &AMDGPU::SReg_192RegClass,
2124 &AMDGPU::AReg_192_Align2RegClass,
2125 &AMDGPU::AReg_192RegClass,
2126 &AMDGPU::VReg_224_Align2RegClass,
2127 &AMDGPU::VReg_224RegClass,
2128 &AMDGPU::SReg_224RegClass,
2129 &AMDGPU::AReg_224_Align2RegClass,
2130 &AMDGPU::AReg_224RegClass,
2131 &AMDGPU::VReg_256_Align2RegClass,
2132 &AMDGPU::VReg_256RegClass,
2133 &AMDGPU::SReg_256RegClass,
2134 &AMDGPU::AReg_256_Align2RegClass,
2135 &AMDGPU::AReg_256RegClass,
2136 &AMDGPU::VReg_512_Align2RegClass,
2137 &AMDGPU::VReg_512RegClass,
2138 &AMDGPU::SReg_512RegClass,
2139 &AMDGPU::AReg_512_Align2RegClass,
2140 &AMDGPU::AReg_512RegClass,
2141 &AMDGPU::SReg_1024RegClass,
2142 &AMDGPU::VReg_1024_Align2RegClass,
2143 &AMDGPU::VReg_1024RegClass,
2144 &AMDGPU::AReg_1024_Align2RegClass,
2145 &AMDGPU::AReg_1024RegClass,
2146 &AMDGPU::SCC_CLASSRegClass,
2147 &AMDGPU::Pseudo_SReg_32RegClass,
2148 &AMDGPU::Pseudo_SReg_128RegClass,
2149 };
2150
2151 for (const TargetRegisterClass *BaseClass : BaseClasses) {
2152 if (BaseClass->contains(Reg)) {
2153 return BaseClass;
2154 }
2155 }
2156 return nullptr;
2157}
2158
2159bool SIRegisterInfo::isSGPRReg(const MachineRegisterInfo &MRI,
2160 Register Reg) const {
2161 const TargetRegisterClass *RC;
2162 if (Reg.isVirtual())
2163 RC = MRI.getRegClass(Reg);
2164 else
2165 RC = getPhysRegClass(Reg);
2166 return isSGPRClass(RC);
2167}
2168
2169bool SIRegisterInfo::hasVGPRs(const TargetRegisterClass *RC) const {
2170 return RC->TSFlags & SIRCFlags::HasVGPR;
2171}
2172
2173bool SIRegisterInfo::hasAGPRs(const TargetRegisterClass *RC) const {
2174 return RC->TSFlags & SIRCFlags::HasAGPR;
2175}
2176
2177const TargetRegisterClass *
2178SIRegisterInfo::getEquivalentVGPRClass(const TargetRegisterClass *SRC) const {
2179 unsigned Size = getRegSizeInBits(*SRC);
2180 const TargetRegisterClass *VRC = getVGPRClassForBitWidth(Size);
2181 assert(VRC && "Invalid register class size")(static_cast <bool> (VRC && "Invalid register class size"
) ? void (0) : __assert_fail ("VRC && \"Invalid register class size\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 2181, __extension__ __PRETTY_FUNCTION__))
;
2182 return VRC;
2183}
2184
2185const TargetRegisterClass *
2186SIRegisterInfo::getEquivalentAGPRClass(const TargetRegisterClass *SRC) const {
2187 unsigned Size = getRegSizeInBits(*SRC);
2188 const TargetRegisterClass *ARC = getAGPRClassForBitWidth(Size);
2189 assert(ARC && "Invalid register class size")(static_cast <bool> (ARC && "Invalid register class size"
) ? void (0) : __assert_fail ("ARC && \"Invalid register class size\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 2189, __extension__ __PRETTY_FUNCTION__))
;
2190 return ARC;
2191}
2192
2193const TargetRegisterClass *
2194SIRegisterInfo::getEquivalentSGPRClass(const TargetRegisterClass *VRC) const {
2195 unsigned Size = getRegSizeInBits(*VRC);
2196 if (Size == 32)
2197 return &AMDGPU::SGPR_32RegClass;
2198 const TargetRegisterClass *SRC = getSGPRClassForBitWidth(Size);
2199 assert(SRC && "Invalid register class size")(static_cast <bool> (SRC && "Invalid register class size"
) ? void (0) : __assert_fail ("SRC && \"Invalid register class size\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 2199, __extension__ __PRETTY_FUNCTION__))
;
2200 return SRC;
2201}
2202
2203const TargetRegisterClass *SIRegisterInfo::getSubRegClass(
2204 const TargetRegisterClass *RC, unsigned SubIdx) const {
2205 if (SubIdx == AMDGPU::NoSubRegister)
2206 return RC;
2207
2208 // We can assume that each lane corresponds to one 32-bit register.
2209 unsigned Size = getNumChannelsFromSubReg(SubIdx) * 32;
2210 if (isSGPRClass(RC)) {
2211 if (Size == 32)
2212 RC = &AMDGPU::SGPR_32RegClass;
2213 else
2214 RC = getSGPRClassForBitWidth(Size);
2215 } else if (hasAGPRs(RC)) {
2216 RC = getAGPRClassForBitWidth(Size);
2217 } else {
2218 RC = getVGPRClassForBitWidth(Size);
2219 }
2220 assert(RC && "Invalid sub-register class size")(static_cast <bool> (RC && "Invalid sub-register class size"
) ? void (0) : __assert_fail ("RC && \"Invalid sub-register class size\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 2220, __extension__ __PRETTY_FUNCTION__))
;
2221 return RC;
2222}
2223
2224const TargetRegisterClass *
2225SIRegisterInfo::getCompatibleSubRegClass(const TargetRegisterClass *SuperRC,
2226 const TargetRegisterClass *SubRC,
2227 unsigned SubIdx) const {
2228 // Ensure this subregister index is aligned in the super register.
2229 const TargetRegisterClass *MatchRC =
2230 getMatchingSuperRegClass(SuperRC, SubRC, SubIdx);
2231 return MatchRC && MatchRC->hasSubClassEq(SuperRC) ? MatchRC : nullptr;
2232}
2233
2234bool SIRegisterInfo::opCanUseInlineConstant(unsigned OpType) const {
2235 if (OpType >= AMDGPU::OPERAND_REG_INLINE_AC_FIRST &&
2236 OpType <= AMDGPU::OPERAND_REG_INLINE_AC_LAST)
2237 return !ST.hasMFMAInlineLiteralBug();
2238
2239 return OpType >= AMDGPU::OPERAND_SRC_FIRST &&
2240 OpType <= AMDGPU::OPERAND_SRC_LAST;
2241}
2242
2243bool SIRegisterInfo::shouldRewriteCopySrc(
2244 const TargetRegisterClass *DefRC,
2245 unsigned DefSubReg,
2246 const TargetRegisterClass *SrcRC,
2247 unsigned SrcSubReg) const {
2248 // We want to prefer the smallest register class possible, so we don't want to
2249 // stop and rewrite on anything that looks like a subregister
2250 // extract. Operations mostly don't care about the super register class, so we
2251 // only want to stop on the most basic of copies between the same register
2252 // class.
2253 //
2254 // e.g. if we have something like
2255 // %0 = ...
2256 // %1 = ...
2257 // %2 = REG_SEQUENCE %0, sub0, %1, sub1, %2, sub2
2258 // %3 = COPY %2, sub0
2259 //
2260 // We want to look through the COPY to find:
2261 // => %3 = COPY %0
2262
2263 // Plain copy.
2264 return getCommonSubClass(DefRC, SrcRC) != nullptr;
2265}
2266
2267bool SIRegisterInfo::opCanUseLiteralConstant(unsigned OpType) const {
2268 // TODO: 64-bit operands have extending behavior from 32-bit literal.
2269 return OpType >= AMDGPU::OPERAND_REG_IMM_FIRST &&
2270 OpType <= AMDGPU::OPERAND_REG_IMM_LAST;
2271}
2272
2273/// Returns a lowest register that is not used at any point in the function.
2274/// If all registers are used, then this function will return
2275/// AMDGPU::NoRegister. If \p ReserveHighestVGPR = true, then return
2276/// highest unused register.
2277MCRegister SIRegisterInfo::findUnusedRegister(const MachineRegisterInfo &MRI,
2278 const TargetRegisterClass *RC,
2279 const MachineFunction &MF,
2280 bool ReserveHighestVGPR) const {
2281 if (ReserveHighestVGPR) {
2282 for (MCRegister Reg : reverse(*RC))
2283 if (MRI.isAllocatable(Reg) && !MRI.isPhysRegUsed(Reg))
2284 return Reg;
2285 } else {
2286 for (MCRegister Reg : *RC)
2287 if (MRI.isAllocatable(Reg) && !MRI.isPhysRegUsed(Reg))
2288 return Reg;
2289 }
2290 return MCRegister();
2291}
2292
2293ArrayRef<int16_t> SIRegisterInfo::getRegSplitParts(const TargetRegisterClass *RC,
2294 unsigned EltSize) const {
2295 const unsigned RegBitWidth = AMDGPU::getRegBitWidth(*RC->MC);
2296 assert(RegBitWidth >= 32 && RegBitWidth <= 1024)(static_cast <bool> (RegBitWidth >= 32 && RegBitWidth
<= 1024) ? void (0) : __assert_fail ("RegBitWidth >= 32 && RegBitWidth <= 1024"
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 2296, __extension__ __PRETTY_FUNCTION__))
;
2297
2298 const unsigned RegDWORDs = RegBitWidth / 32;
2299 const unsigned EltDWORDs = EltSize / 4;
2300 assert(RegSplitParts.size() + 1 >= EltDWORDs)(static_cast <bool> (RegSplitParts.size() + 1 >= EltDWORDs
) ? void (0) : __assert_fail ("RegSplitParts.size() + 1 >= EltDWORDs"
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 2300, __extension__ __PRETTY_FUNCTION__))
;
2301
2302 const std::vector<int16_t> &Parts = RegSplitParts[EltDWORDs - 1];
2303 const unsigned NumParts = RegDWORDs / EltDWORDs;
2304
2305 return makeArrayRef(Parts.data(), NumParts);
2306}
2307
2308const TargetRegisterClass*
2309SIRegisterInfo::getRegClassForReg(const MachineRegisterInfo &MRI,
2310 Register Reg) const {
2311 return Reg.isVirtual() ? MRI.getRegClass(Reg) : getPhysRegClass(Reg);
2312}
2313
2314bool SIRegisterInfo::isVGPR(const MachineRegisterInfo &MRI,
2315 Register Reg) const {
2316 const TargetRegisterClass *RC = getRegClassForReg(MRI, Reg);
2317 // Registers without classes are unaddressable, SGPR-like registers.
2318 return RC && isVGPRClass(RC);
2319}
2320
2321bool SIRegisterInfo::isAGPR(const MachineRegisterInfo &MRI,
2322 Register Reg) const {
2323 const TargetRegisterClass *RC = getRegClassForReg(MRI, Reg);
2324
2325 // Registers without classes are unaddressable, SGPR-like registers.
2326 return RC && isAGPRClass(RC);
2327}
2328
2329bool SIRegisterInfo::shouldCoalesce(MachineInstr *MI,
2330 const TargetRegisterClass *SrcRC,
2331 unsigned SubReg,
2332 const TargetRegisterClass *DstRC,
2333 unsigned DstSubReg,
2334 const TargetRegisterClass *NewRC,
2335 LiveIntervals &LIS) const {
2336 unsigned SrcSize = getRegSizeInBits(*SrcRC);
2337 unsigned DstSize = getRegSizeInBits(*DstRC);
2338 unsigned NewSize = getRegSizeInBits(*NewRC);
2339
2340 // Do not increase size of registers beyond dword, we would need to allocate
2341 // adjacent registers and constraint regalloc more than needed.
2342
2343 // Always allow dword coalescing.
2344 if (SrcSize <= 32 || DstSize <= 32)
2345 return true;
2346
2347 return NewSize <= DstSize || NewSize <= SrcSize;
2348}
2349
2350unsigned SIRegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC,
2351 MachineFunction &MF) const {
2352 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
2353
2354 unsigned Occupancy = ST.getOccupancyWithLocalMemSize(MFI->getLDSSize(),
2355 MF.getFunction());
2356 switch (RC->getID()) {
2357 default:
2358 return AMDGPUGenRegisterInfo::getRegPressureLimit(RC, MF);
2359 case AMDGPU::VGPR_32RegClassID:
2360 case AMDGPU::VGPR_LO16RegClassID:
2361 case AMDGPU::VGPR_HI16RegClassID:
2362 return std::min(ST.getMaxNumVGPRs(Occupancy), ST.getMaxNumVGPRs(MF));
2363 case AMDGPU::SGPR_32RegClassID:
2364 case AMDGPU::SGPR_LO16RegClassID:
2365 return std::min(ST.getMaxNumSGPRs(Occupancy, true), ST.getMaxNumSGPRs(MF));
2366 }
2367}
2368
2369unsigned SIRegisterInfo::getRegPressureSetLimit(const MachineFunction &MF,
2370 unsigned Idx) const {
2371 if (Idx == AMDGPU::RegisterPressureSets::VGPR_32 ||
2372 Idx == AMDGPU::RegisterPressureSets::AGPR_32)
2373 return getRegPressureLimit(&AMDGPU::VGPR_32RegClass,
2374 const_cast<MachineFunction &>(MF));
2375
2376 if (Idx == AMDGPU::RegisterPressureSets::SReg_32)
2377 return getRegPressureLimit(&AMDGPU::SGPR_32RegClass,
2378 const_cast<MachineFunction &>(MF));
2379
2380 llvm_unreachable("Unexpected register pressure set!")::llvm::llvm_unreachable_internal("Unexpected register pressure set!"
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 2380)
;
2381}
2382
2383const int *SIRegisterInfo::getRegUnitPressureSets(unsigned RegUnit) const {
2384 static const int Empty[] = { -1 };
2385
2386 if (RegPressureIgnoredUnits[RegUnit])
2387 return Empty;
2388
2389 return AMDGPUGenRegisterInfo::getRegUnitPressureSets(RegUnit);
2390}
2391
2392MCRegister SIRegisterInfo::getReturnAddressReg(const MachineFunction &MF) const {
2393 // Not a callee saved register.
2394 return AMDGPU::SGPR30_SGPR31;
2395}
2396
2397const TargetRegisterClass *
2398SIRegisterInfo::getRegClassForSizeOnBank(unsigned Size,
2399 const RegisterBank &RB,
2400 const MachineRegisterInfo &MRI) const {
2401 switch (RB.getID()) {
2402 case AMDGPU::VGPRRegBankID:
2403 return getVGPRClassForBitWidth(std::max(32u, Size));
2404 case AMDGPU::VCCRegBankID:
2405 assert(Size == 1)(static_cast <bool> (Size == 1) ? void (0) : __assert_fail
("Size == 1", "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 2405, __extension__ __PRETTY_FUNCTION__))
;
2406 return isWave32 ? &AMDGPU::SReg_32_XM0_XEXECRegClass
2407 : &AMDGPU::SReg_64_XEXECRegClass;
2408 case AMDGPU::SGPRRegBankID:
2409 return getSGPRClassForBitWidth(std::max(32u, Size));
2410 case AMDGPU::AGPRRegBankID:
2411 return getAGPRClassForBitWidth(std::max(32u, Size));
2412 default:
2413 llvm_unreachable("unknown register bank")::llvm::llvm_unreachable_internal("unknown register bank", "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 2413)
;
2414 }
2415}
2416
2417const TargetRegisterClass *
2418SIRegisterInfo::getConstrainedRegClassForOperand(const MachineOperand &MO,
2419 const MachineRegisterInfo &MRI) const {
2420 const RegClassOrRegBank &RCOrRB = MRI.getRegClassOrRegBank(MO.getReg());
2421 if (const RegisterBank *RB = RCOrRB.dyn_cast<const RegisterBank*>())
2422 return getRegClassForTypeOnBank(MRI.getType(MO.getReg()), *RB, MRI);
2423
2424 const TargetRegisterClass *RC = RCOrRB.get<const TargetRegisterClass*>();
2425 return getAllocatableClass(RC);
2426}
2427
2428MCRegister SIRegisterInfo::getVCC() const {
2429 return isWave32 ? AMDGPU::VCC_LO : AMDGPU::VCC;
2430}
2431
2432const TargetRegisterClass *SIRegisterInfo::getVGPR64Class() const {
2433 // VGPR tuples have an alignment requirement on gfx90a variants.
2434 return ST.needsAlignedVGPRs() ? &AMDGPU::VReg_64_Align2RegClass
2435 : &AMDGPU::VReg_64RegClass;
2436}
2437
2438const TargetRegisterClass *
2439SIRegisterInfo::getRegClass(unsigned RCID) const {
2440 switch ((int)RCID) {
2441 case AMDGPU::SReg_1RegClassID:
2442 return getBoolRC();
2443 case AMDGPU::SReg_1_XEXECRegClassID:
2444 return isWave32 ? &AMDGPU::SReg_32_XM0_XEXECRegClass
2445 : &AMDGPU::SReg_64_XEXECRegClass;
2446 case -1:
2447 return nullptr;
2448 default:
2449 return AMDGPUGenRegisterInfo::getRegClass(RCID);
2450 }
2451}
2452
2453// Find reaching register definition
2454MachineInstr *SIRegisterInfo::findReachingDef(Register Reg, unsigned SubReg,
2455 MachineInstr &Use,
2456 MachineRegisterInfo &MRI,
2457 LiveIntervals *LIS) const {
2458 auto &MDT = LIS->getAnalysis<MachineDominatorTree>();
2459 SlotIndex UseIdx = LIS->getInstructionIndex(Use);
2460 SlotIndex DefIdx;
2461
2462 if (Reg.isVirtual()) {
2463 if (!LIS->hasInterval(Reg))
2464 return nullptr;
2465 LiveInterval &LI = LIS->getInterval(Reg);
2466 LaneBitmask SubLanes = SubReg ? getSubRegIndexLaneMask(SubReg)
2467 : MRI.getMaxLaneMaskForVReg(Reg);
2468 VNInfo *V = nullptr;
2469 if (LI.hasSubRanges()) {
2470 for (auto &S : LI.subranges()) {
2471 if ((S.LaneMask & SubLanes) == SubLanes) {
2472 V = S.getVNInfoAt(UseIdx);
2473 break;
2474 }
2475 }
2476 } else {
2477 V = LI.getVNInfoAt(UseIdx);
2478 }
2479 if (!V)
2480 return nullptr;
2481 DefIdx = V->def;
2482 } else {
2483 // Find last def.
2484 for (MCRegUnitIterator Units(Reg.asMCReg(), this); Units.isValid();
2485 ++Units) {
2486 LiveRange &LR = LIS->getRegUnit(*Units);
2487 if (VNInfo *V = LR.getVNInfoAt(UseIdx)) {
2488 if (!DefIdx.isValid() ||
2489 MDT.dominates(LIS->getInstructionFromIndex(DefIdx),
2490 LIS->getInstructionFromIndex(V->def)))
2491 DefIdx = V->def;
2492 } else {
2493 return nullptr;
2494 }
2495 }
2496 }
2497
2498 MachineInstr *Def = LIS->getInstructionFromIndex(DefIdx);
2499
2500 if (!Def || !MDT.dominates(Def, &Use))
2501 return nullptr;
2502
2503 assert(Def->modifiesRegister(Reg, this))(static_cast <bool> (Def->modifiesRegister(Reg, this
)) ? void (0) : __assert_fail ("Def->modifiesRegister(Reg, this)"
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 2503, __extension__ __PRETTY_FUNCTION__))
;
2504
2505 return Def;
2506}
2507
2508MCPhysReg SIRegisterInfo::get32BitRegister(MCPhysReg Reg) const {
2509 assert(getRegSizeInBits(*getPhysRegClass(Reg)) <= 32)(static_cast <bool> (getRegSizeInBits(*getPhysRegClass(
Reg)) <= 32) ? void (0) : __assert_fail ("getRegSizeInBits(*getPhysRegClass(Reg)) <= 32"
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 2509, __extension__ __PRETTY_FUNCTION__))
;
2510
2511 for (const TargetRegisterClass &RC : { AMDGPU::VGPR_32RegClass,
2512 AMDGPU::SReg_32RegClass,
2513 AMDGPU::AGPR_32RegClass } ) {
2514 if (MCPhysReg Super = getMatchingSuperReg(Reg, AMDGPU::lo16, &RC))
2515 return Super;
2516 }
2517 if (MCPhysReg Super = getMatchingSuperReg(Reg, AMDGPU::hi16,
2518 &AMDGPU::VGPR_32RegClass)) {
2519 return Super;
2520 }
2521
2522 return AMDGPU::NoRegister;
2523}
2524
2525bool SIRegisterInfo::isProperlyAlignedRC(const TargetRegisterClass &RC) const {
2526 if (!ST.needsAlignedVGPRs())
2527 return true;
2528
2529 if (hasVGPRs(&RC))
2530 return RC.hasSuperClassEq(getVGPRClassForBitWidth(getRegSizeInBits(RC)));
2531 if (hasAGPRs(&RC))
2532 return RC.hasSuperClassEq(getAGPRClassForBitWidth(getRegSizeInBits(RC)));
2533
2534 return true;
2535}
2536
2537bool SIRegisterInfo::isConstantPhysReg(MCRegister PhysReg) const {
2538 switch (PhysReg) {
2539 case AMDGPU::SGPR_NULL:
2540 case AMDGPU::SRC_SHARED_BASE:
2541 case AMDGPU::SRC_PRIVATE_BASE:
2542 case AMDGPU::SRC_SHARED_LIMIT:
2543 case AMDGPU::SRC_PRIVATE_LIMIT:
2544 return true;
2545 default:
2546 return false;
2547 }
2548}
2549
2550ArrayRef<MCPhysReg>
2551SIRegisterInfo::getAllSGPR128(const MachineFunction &MF) const {
2552 return makeArrayRef(AMDGPU::SGPR_128RegClass.begin(),
2553 ST.getMaxNumSGPRs(MF) / 4);
2554}
2555
2556ArrayRef<MCPhysReg>
2557SIRegisterInfo::getAllSGPR64(const MachineFunction &MF) const {
2558 return makeArrayRef(AMDGPU::SGPR_64RegClass.begin(),
2559 ST.getMaxNumSGPRs(MF) / 2);
2560}
2561
2562ArrayRef<MCPhysReg>
2563SIRegisterInfo::getAllSGPR32(const MachineFunction &MF) const {
2564 return makeArrayRef(AMDGPU::SGPR_32RegClass.begin(), ST.getMaxNumSGPRs(MF));
2565}