Bug Summary

File:llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp
Warning:line 1061, column 5
Value stored to 'Desc' is never read

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name SIRegisterInfo.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -fhalf-no-semantic-interposition -mframe-pointer=none -fmath-errno -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/build-llvm/lib/Target/AMDGPU -resource-dir /usr/lib/llvm-13/lib/clang/13.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/build-llvm/lib/Target/AMDGPU -I /build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AMDGPU -I /build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/build-llvm/include -I /build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/backward -internal-isystem /usr/lib/llvm-13/lib/clang/13.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/build-llvm/lib/Target/AMDGPU -fdebug-prefix-map=/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f=. -ferror-limit 19 -fvisibility hidden -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2021-04-14-063029-18377-1 -x c++ /build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp
1//===-- SIRegisterInfo.cpp - SI Register Information ---------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// SI implementation of the TargetRegisterInfo class.
11//
12//===----------------------------------------------------------------------===//
13
14#include "SIRegisterInfo.h"
15#include "AMDGPU.h"
16#include "AMDGPURegisterBankInfo.h"
17#include "GCNSubtarget.h"
18#include "MCTargetDesc/AMDGPUInstPrinter.h"
19#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
20#include "SIMachineFunctionInfo.h"
21#include "llvm/CodeGen/LiveIntervals.h"
22#include "llvm/CodeGen/MachineDominators.h"
23#include "llvm/CodeGen/RegisterScavenging.h"
24
25using namespace llvm;
26
27#define GET_REGINFO_TARGET_DESC
28#include "AMDGPUGenRegisterInfo.inc"
29
30static cl::opt<bool> EnableSpillSGPRToVGPR(
31 "amdgpu-spill-sgpr-to-vgpr",
32 cl::desc("Enable spilling VGPRs to SGPRs"),
33 cl::ReallyHidden,
34 cl::init(true));
35
36std::array<std::vector<int16_t>, 16> SIRegisterInfo::RegSplitParts;
37std::array<std::array<uint16_t, 32>, 9> SIRegisterInfo::SubRegFromChannelTable;
38
39// Map numbers of DWORDs to indexes in SubRegFromChannelTable.
40// Valid indexes are shifted 1, such that a 0 mapping means unsupported.
41// e.g. for 8 DWORDs (256-bit), SubRegFromChannelTableWidthMap[8] = 8,
42// meaning index 7 in SubRegFromChannelTable.
43static const std::array<unsigned, 17> SubRegFromChannelTableWidthMap = {
44 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 0, 0, 0, 0, 0, 0, 9};
45
46namespace llvm {
47
48// A temporary struct to spill SGPRs.
49// This is mostly to spill SGPRs to memory. Spilling SGPRs into VGPR lanes emits
50// just v_writelane and v_readlane.
51//
52// When spilling to memory, the SGPRs are written into VGPR lanes and the VGPR
53// is saved to scratch (or the other way around for loads).
54// For this, a VGPR is required where the needed lanes can be clobbered. The
55// RegScavenger can provide a VGPR where currently active lanes can be
56// clobbered, but we still need to save inactive lanes.
57// The high-level steps are:
58// - Try to scavenge SGPR(s) to save exec
59// - Try to scavenge VGPR
60// - Save needed, all or inactive lanes of a TmpVGPR
61// - Spill/Restore SGPRs using TmpVGPR
62// - Restore TmpVGPR
63//
64// To save all lanes of TmpVGPR, exec needs to be saved and modified. If we
65// cannot scavenge temporary SGPRs to save exec, we use the following code:
66// buffer_store_dword TmpVGPR ; only if active lanes need to be saved
67// s_not exec, exec
68// buffer_store_dword TmpVGPR ; save inactive lanes
69// s_not exec, exec
70struct SGPRSpillBuilder {
71 struct PerVGPRData {
72 unsigned PerVGPR;
73 unsigned NumVGPRs;
74 int64_t VGPRLanes;
75 };
76
77 // The SGPR to save
78 Register SuperReg;
79 MachineBasicBlock::iterator MI;
80 ArrayRef<int16_t> SplitParts;
81 unsigned NumSubRegs;
82 bool IsKill;
83 const DebugLoc &DL;
84
85 /* When spilling to stack */
86 // The SGPRs are written into this VGPR, which is then written to scratch
87 // (or vice versa for loads).
88 Register TmpVGPR = AMDGPU::NoRegister;
89 // Temporary spill slot to save TmpVGPR to.
90 int TmpVGPRIndex = 0;
91 // If TmpVGPR is live before the spill or if it is scavenged.
92 bool TmpVGPRLive = false;
93 // Scavenged SGPR to save EXEC.
94 Register SavedExecReg = AMDGPU::NoRegister;
95 // Stack index to write the SGPRs to.
96 int Index;
97 unsigned EltSize = 4;
98
99 RegScavenger *RS;
100 MachineBasicBlock &MBB;
101 MachineFunction &MF;
102 SIMachineFunctionInfo &MFI;
103 const SIInstrInfo &TII;
104 const SIRegisterInfo &TRI;
105 bool IsWave32;
106 Register ExecReg;
107 unsigned MovOpc;
108 unsigned NotOpc;
109
110 SGPRSpillBuilder(const SIRegisterInfo &TRI, const SIInstrInfo &TII,
111 bool IsWave32, MachineBasicBlock::iterator MI, int Index,
112 RegScavenger *RS)
113 : SuperReg(MI->getOperand(0).getReg()), MI(MI),
114 IsKill(MI->getOperand(0).isKill()), DL(MI->getDebugLoc()), Index(Index),
115 RS(RS), MBB(*MI->getParent()), MF(*MBB.getParent()),
116 MFI(*MF.getInfo<SIMachineFunctionInfo>()), TII(TII), TRI(TRI),
117 IsWave32(IsWave32) {
118 const TargetRegisterClass *RC = TRI.getPhysRegClass(SuperReg);
119 SplitParts = TRI.getRegSplitParts(RC, EltSize);
120 NumSubRegs = SplitParts.empty() ? 1 : SplitParts.size();
121
122 if (IsWave32) {
123 ExecReg = AMDGPU::EXEC_LO;
124 MovOpc = AMDGPU::S_MOV_B32;
125 NotOpc = AMDGPU::S_NOT_B32;
126 } else {
127 ExecReg = AMDGPU::EXEC;
128 MovOpc = AMDGPU::S_MOV_B64;
129 NotOpc = AMDGPU::S_NOT_B64;
130 }
131
132 assert(SuperReg != AMDGPU::M0 && "m0 should never spill")((SuperReg != AMDGPU::M0 && "m0 should never spill") ?
static_cast<void> (0) : __assert_fail ("SuperReg != AMDGPU::M0 && \"m0 should never spill\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 132, __PRETTY_FUNCTION__))
;
133 assert(SuperReg != AMDGPU::EXEC_LO && SuperReg != AMDGPU::EXEC_HI &&((SuperReg != AMDGPU::EXEC_LO && SuperReg != AMDGPU::
EXEC_HI && SuperReg != AMDGPU::EXEC && "exec should never spill"
) ? static_cast<void> (0) : __assert_fail ("SuperReg != AMDGPU::EXEC_LO && SuperReg != AMDGPU::EXEC_HI && SuperReg != AMDGPU::EXEC && \"exec should never spill\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 134, __PRETTY_FUNCTION__))
134 SuperReg != AMDGPU::EXEC && "exec should never spill")((SuperReg != AMDGPU::EXEC_LO && SuperReg != AMDGPU::
EXEC_HI && SuperReg != AMDGPU::EXEC && "exec should never spill"
) ? static_cast<void> (0) : __assert_fail ("SuperReg != AMDGPU::EXEC_LO && SuperReg != AMDGPU::EXEC_HI && SuperReg != AMDGPU::EXEC && \"exec should never spill\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 134, __PRETTY_FUNCTION__))
;
135 }
136
137 PerVGPRData getPerVGPRData() {
138 PerVGPRData Data;
139 Data.PerVGPR = IsWave32 ? 32 : 64;
140 Data.NumVGPRs = (NumSubRegs + (Data.PerVGPR - 1)) / Data.PerVGPR;
141 Data.VGPRLanes = (1LL << std::min(Data.PerVGPR, NumSubRegs)) - 1LL;
142 return Data;
143 }
144
145 // Tries to scavenge SGPRs to save EXEC and a VGPR. Uses v0 if no VGPR is
146 // free.
147 // Writes these instructions if an SGPR can be scavenged:
148 // s_mov_b64 s[6:7], exec ; Save exec
149 // s_mov_b64 exec, 3 ; Wanted lanemask
150 // buffer_store_dword v1 ; Write scavenged VGPR to emergency slot
151 //
152 // Writes these instructions if no SGPR can be scavenged:
153 // buffer_store_dword v0 ; Only if no free VGPR was found
154 // s_not_b64 exec, exec
155 // buffer_store_dword v0 ; Save inactive lanes
156 // ; exec stays inverted, it is flipped back in
157 // ; restore.
158 void prepare() {
159 // Scavenged temporary VGPR to use. It must be scavenged once for any number
160 // of spilled subregs.
161 // FIXME: The liveness analysis is limited and does not tell if a register
162 // is in use in lanes that are currently inactive. We can never be sure if
163 // a register as actually in use in another lane, so we need to save all
164 // used lanes of the chosen VGPR.
165 assert(RS && "Cannot spill SGPR to memory without RegScavenger")((RS && "Cannot spill SGPR to memory without RegScavenger"
) ? static_cast<void> (0) : __assert_fail ("RS && \"Cannot spill SGPR to memory without RegScavenger\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 165, __PRETTY_FUNCTION__))
;
166 TmpVGPR = RS->scavengeRegister(&AMDGPU::VGPR_32RegClass, MI, 0, false);
167
168 // Reserve temporary stack slot
169 TmpVGPRIndex = MFI.getScavengeFI(MF.getFrameInfo(), TRI);
170 if (TmpVGPR) {
171 // Found a register that is dead in the currently active lanes, we only
172 // need to spill inactive lanes.
173 TmpVGPRLive = false;
174 } else {
175 // Pick v0 because it doesn't make a difference.
176 TmpVGPR = AMDGPU::VGPR0;
177 TmpVGPRLive = true;
178 }
179
180 // Try to scavenge SGPRs to save exec
181 assert(!SavedExecReg && "Exec is already saved, refuse to save again")((!SavedExecReg && "Exec is already saved, refuse to save again"
) ? static_cast<void> (0) : __assert_fail ("!SavedExecReg && \"Exec is already saved, refuse to save again\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 181, __PRETTY_FUNCTION__))
;
182 const TargetRegisterClass &RC =
183 IsWave32 ? AMDGPU::SGPR_32RegClass : AMDGPU::SGPR_64RegClass;
184 RS->setRegUsed(SuperReg);
185 SavedExecReg = RS->scavengeRegister(&RC, MI, 0, false);
186
187 int64_t VGPRLanes = getPerVGPRData().VGPRLanes;
188
189 if (SavedExecReg) {
190 // Set exec to needed lanes
191 BuildMI(MBB, MI, DL, TII.get(MovOpc), SavedExecReg).addReg(ExecReg);
192 auto I = BuildMI(MBB, MI, DL, TII.get(MovOpc), ExecReg).addImm(VGPRLanes);
193 if (!TmpVGPRLive)
194 I.addReg(TmpVGPR, RegState::ImplicitDefine);
195 // Spill needed lanes
196 TRI.buildVGPRSpillLoadStore(*this, TmpVGPRIndex, 0, /*IsLoad*/ false);
197 } else {
198 // Spill active lanes
199 if (TmpVGPRLive)
200 TRI.buildVGPRSpillLoadStore(*this, TmpVGPRIndex, 0, /*IsLoad*/ false,
201 /*IsKill*/ false);
202 // Spill inactive lanes
203 auto I = BuildMI(MBB, MI, DL, TII.get(NotOpc), ExecReg).addReg(ExecReg);
204 if (!TmpVGPRLive)
205 I.addReg(TmpVGPR, RegState::ImplicitDefine);
206 TRI.buildVGPRSpillLoadStore(*this, TmpVGPRIndex, 0, /*IsLoad*/ false);
207 }
208 }
209
210 // Writes these instructions if an SGPR can be scavenged:
211 // buffer_load_dword v1 ; Write scavenged VGPR to emergency slot
212 // s_waitcnt vmcnt(0) ; If a free VGPR was found
213 // s_mov_b64 exec, s[6:7] ; Save exec
214 //
215 // Writes these instructions if no SGPR can be scavenged:
216 // buffer_load_dword v0 ; Restore inactive lanes
217 // s_waitcnt vmcnt(0) ; If a free VGPR was found
218 // s_not_b64 exec, exec
219 // buffer_load_dword v0 ; Only if no free VGPR was found
220 void restore() {
221 if (SavedExecReg) {
222 // Restore used lanes
223 TRI.buildVGPRSpillLoadStore(*this, TmpVGPRIndex, 0, /*IsLoad*/ true,
224 /*IsKill*/ false);
225 // Restore exec
226 auto I = BuildMI(MBB, MI, DL, TII.get(MovOpc), ExecReg)
227 .addReg(SavedExecReg, RegState::Kill);
228 // Add an implicit use of the load so it is not dead.
229 // FIXME This inserts an unnecessary waitcnt
230 if (!TmpVGPRLive) {
231 I.addReg(TmpVGPR, RegState::ImplicitKill);
232 }
233 } else {
234 // Restore inactive lanes
235 TRI.buildVGPRSpillLoadStore(*this, TmpVGPRIndex, 0, /*IsLoad*/ true,
236 /*IsKill*/ false);
237 auto I = BuildMI(MBB, MI, DL, TII.get(NotOpc), ExecReg).addReg(ExecReg);
238 if (!TmpVGPRLive) {
239 I.addReg(TmpVGPR, RegState::ImplicitKill);
240 }
241 // Restore active lanes
242 if (TmpVGPRLive)
243 TRI.buildVGPRSpillLoadStore(*this, TmpVGPRIndex, 0, /*IsLoad*/ true);
244 }
245 }
246
247 // Write TmpVGPR to memory or read TmpVGPR from memory.
248 // Either using a single buffer_load/store if exec is set to the needed mask
249 // or using
250 // buffer_load
251 // s_not exec, exec
252 // buffer_load
253 // s_not exec, exec
254 void readWriteTmpVGPR(unsigned Offset, bool IsLoad) {
255 if (SavedExecReg) {
256 // Spill needed lanes
257 TRI.buildVGPRSpillLoadStore(*this, Index, Offset, IsLoad);
258 } else {
259 // Spill active lanes
260 TRI.buildVGPRSpillLoadStore(*this, Index, Offset, IsLoad,
261 /*IsKill*/ false);
262 // Spill inactive lanes
263 BuildMI(MBB, MI, DL, TII.get(NotOpc), ExecReg).addReg(ExecReg);
264 TRI.buildVGPRSpillLoadStore(*this, Index, Offset, IsLoad);
265 BuildMI(MBB, MI, DL, TII.get(NotOpc), ExecReg).addReg(ExecReg);
266 }
267 }
268};
269
270} // namespace llvm
271
272SIRegisterInfo::SIRegisterInfo(const GCNSubtarget &ST)
273 : AMDGPUGenRegisterInfo(AMDGPU::PC_REG, ST.getAMDGPUDwarfFlavour()), ST(ST),
274 SpillSGPRToVGPR(EnableSpillSGPRToVGPR), isWave32(ST.isWave32()) {
275
276 assert(getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() == 3 &&((getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() == 3 &&
getSubRegIndexLaneMask(AMDGPU::sub31).getAsInteger() == (3ULL
<< 62) && (getSubRegIndexLaneMask(AMDGPU::lo16
) | getSubRegIndexLaneMask(AMDGPU::hi16)).getAsInteger() == getSubRegIndexLaneMask
(AMDGPU::sub0).getAsInteger() && "getNumCoveredRegs() will not work with generated subreg masks!"
) ? static_cast<void> (0) : __assert_fail ("getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() == 3 && getSubRegIndexLaneMask(AMDGPU::sub31).getAsInteger() == (3ULL << 62) && (getSubRegIndexLaneMask(AMDGPU::lo16) | getSubRegIndexLaneMask(AMDGPU::hi16)).getAsInteger() == getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() && \"getNumCoveredRegs() will not work with generated subreg masks!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 281, __PRETTY_FUNCTION__))
277 getSubRegIndexLaneMask(AMDGPU::sub31).getAsInteger() == (3ULL << 62) &&((getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() == 3 &&
getSubRegIndexLaneMask(AMDGPU::sub31).getAsInteger() == (3ULL
<< 62) && (getSubRegIndexLaneMask(AMDGPU::lo16
) | getSubRegIndexLaneMask(AMDGPU::hi16)).getAsInteger() == getSubRegIndexLaneMask
(AMDGPU::sub0).getAsInteger() && "getNumCoveredRegs() will not work with generated subreg masks!"
) ? static_cast<void> (0) : __assert_fail ("getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() == 3 && getSubRegIndexLaneMask(AMDGPU::sub31).getAsInteger() == (3ULL << 62) && (getSubRegIndexLaneMask(AMDGPU::lo16) | getSubRegIndexLaneMask(AMDGPU::hi16)).getAsInteger() == getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() && \"getNumCoveredRegs() will not work with generated subreg masks!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 281, __PRETTY_FUNCTION__))
278 (getSubRegIndexLaneMask(AMDGPU::lo16) |((getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() == 3 &&
getSubRegIndexLaneMask(AMDGPU::sub31).getAsInteger() == (3ULL
<< 62) && (getSubRegIndexLaneMask(AMDGPU::lo16
) | getSubRegIndexLaneMask(AMDGPU::hi16)).getAsInteger() == getSubRegIndexLaneMask
(AMDGPU::sub0).getAsInteger() && "getNumCoveredRegs() will not work with generated subreg masks!"
) ? static_cast<void> (0) : __assert_fail ("getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() == 3 && getSubRegIndexLaneMask(AMDGPU::sub31).getAsInteger() == (3ULL << 62) && (getSubRegIndexLaneMask(AMDGPU::lo16) | getSubRegIndexLaneMask(AMDGPU::hi16)).getAsInteger() == getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() && \"getNumCoveredRegs() will not work with generated subreg masks!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 281, __PRETTY_FUNCTION__))
279 getSubRegIndexLaneMask(AMDGPU::hi16)).getAsInteger() ==((getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() == 3 &&
getSubRegIndexLaneMask(AMDGPU::sub31).getAsInteger() == (3ULL
<< 62) && (getSubRegIndexLaneMask(AMDGPU::lo16
) | getSubRegIndexLaneMask(AMDGPU::hi16)).getAsInteger() == getSubRegIndexLaneMask
(AMDGPU::sub0).getAsInteger() && "getNumCoveredRegs() will not work with generated subreg masks!"
) ? static_cast<void> (0) : __assert_fail ("getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() == 3 && getSubRegIndexLaneMask(AMDGPU::sub31).getAsInteger() == (3ULL << 62) && (getSubRegIndexLaneMask(AMDGPU::lo16) | getSubRegIndexLaneMask(AMDGPU::hi16)).getAsInteger() == getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() && \"getNumCoveredRegs() will not work with generated subreg masks!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 281, __PRETTY_FUNCTION__))
280 getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() &&((getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() == 3 &&
getSubRegIndexLaneMask(AMDGPU::sub31).getAsInteger() == (3ULL
<< 62) && (getSubRegIndexLaneMask(AMDGPU::lo16
) | getSubRegIndexLaneMask(AMDGPU::hi16)).getAsInteger() == getSubRegIndexLaneMask
(AMDGPU::sub0).getAsInteger() && "getNumCoveredRegs() will not work with generated subreg masks!"
) ? static_cast<void> (0) : __assert_fail ("getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() == 3 && getSubRegIndexLaneMask(AMDGPU::sub31).getAsInteger() == (3ULL << 62) && (getSubRegIndexLaneMask(AMDGPU::lo16) | getSubRegIndexLaneMask(AMDGPU::hi16)).getAsInteger() == getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() && \"getNumCoveredRegs() will not work with generated subreg masks!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 281, __PRETTY_FUNCTION__))
281 "getNumCoveredRegs() will not work with generated subreg masks!")((getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() == 3 &&
getSubRegIndexLaneMask(AMDGPU::sub31).getAsInteger() == (3ULL
<< 62) && (getSubRegIndexLaneMask(AMDGPU::lo16
) | getSubRegIndexLaneMask(AMDGPU::hi16)).getAsInteger() == getSubRegIndexLaneMask
(AMDGPU::sub0).getAsInteger() && "getNumCoveredRegs() will not work with generated subreg masks!"
) ? static_cast<void> (0) : __assert_fail ("getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() == 3 && getSubRegIndexLaneMask(AMDGPU::sub31).getAsInteger() == (3ULL << 62) && (getSubRegIndexLaneMask(AMDGPU::lo16) | getSubRegIndexLaneMask(AMDGPU::hi16)).getAsInteger() == getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() && \"getNumCoveredRegs() will not work with generated subreg masks!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 281, __PRETTY_FUNCTION__))
;
282
283 RegPressureIgnoredUnits.resize(getNumRegUnits());
284 RegPressureIgnoredUnits.set(
285 *MCRegUnitIterator(MCRegister::from(AMDGPU::M0), this));
286 for (auto Reg : AMDGPU::VGPR_HI16RegClass)
287 RegPressureIgnoredUnits.set(*MCRegUnitIterator(Reg, this));
288
289 // HACK: Until this is fully tablegen'd.
290 static llvm::once_flag InitializeRegSplitPartsFlag;
291
292 static auto InitializeRegSplitPartsOnce = [this]() {
293 for (unsigned Idx = 1, E = getNumSubRegIndices() - 1; Idx < E; ++Idx) {
294 unsigned Size = getSubRegIdxSize(Idx);
295 if (Size & 31)
296 continue;
297 std::vector<int16_t> &Vec = RegSplitParts[Size / 32 - 1];
298 unsigned Pos = getSubRegIdxOffset(Idx);
299 if (Pos % Size)
300 continue;
301 Pos /= Size;
302 if (Vec.empty()) {
303 unsigned MaxNumParts = 1024 / Size; // Maximum register is 1024 bits.
304 Vec.resize(MaxNumParts);
305 }
306 Vec[Pos] = Idx;
307 }
308 };
309
310 static llvm::once_flag InitializeSubRegFromChannelTableFlag;
311
312 static auto InitializeSubRegFromChannelTableOnce = [this]() {
313 for (auto &Row : SubRegFromChannelTable)
314 Row.fill(AMDGPU::NoSubRegister);
315 for (uint16_t Idx = 1; Idx < getNumSubRegIndices(); ++Idx) {
316 unsigned Width = AMDGPUSubRegIdxRanges[Idx].Size / 32;
317 unsigned Offset = AMDGPUSubRegIdxRanges[Idx].Offset / 32;
318 assert(Width < SubRegFromChannelTableWidthMap.size())((Width < SubRegFromChannelTableWidthMap.size()) ? static_cast
<void> (0) : __assert_fail ("Width < SubRegFromChannelTableWidthMap.size()"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 318, __PRETTY_FUNCTION__))
;
319 Width = SubRegFromChannelTableWidthMap[Width];
320 if (Width == 0)
321 continue;
322 unsigned TableIdx = Width - 1;
323 assert(TableIdx < SubRegFromChannelTable.size())((TableIdx < SubRegFromChannelTable.size()) ? static_cast<
void> (0) : __assert_fail ("TableIdx < SubRegFromChannelTable.size()"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 323, __PRETTY_FUNCTION__))
;
324 assert(Offset < SubRegFromChannelTable[TableIdx].size())((Offset < SubRegFromChannelTable[TableIdx].size()) ? static_cast
<void> (0) : __assert_fail ("Offset < SubRegFromChannelTable[TableIdx].size()"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 324, __PRETTY_FUNCTION__))
;
325 SubRegFromChannelTable[TableIdx][Offset] = Idx;
326 }
327 };
328
329 llvm::call_once(InitializeRegSplitPartsFlag, InitializeRegSplitPartsOnce);
330 llvm::call_once(InitializeSubRegFromChannelTableFlag,
331 InitializeSubRegFromChannelTableOnce);
332}
333
334void SIRegisterInfo::reserveRegisterTuples(BitVector &Reserved,
335 MCRegister Reg) const {
336 MCRegAliasIterator R(Reg, this, true);
337
338 for (; R.isValid(); ++R)
339 Reserved.set(*R);
340}
341
342// Forced to be here by one .inc
343const MCPhysReg *SIRegisterInfo::getCalleeSavedRegs(
344 const MachineFunction *MF) const {
345 CallingConv::ID CC = MF->getFunction().getCallingConv();
346 switch (CC) {
347 case CallingConv::C:
348 case CallingConv::Fast:
349 case CallingConv::Cold:
350 case CallingConv::AMDGPU_Gfx:
351 return MF->getSubtarget<GCNSubtarget>().hasGFX90AInsts()
352 ? CSR_AMDGPU_HighRegs_With_AGPRs_SaveList
353 : CSR_AMDGPU_HighRegs_SaveList;
354 default: {
355 // Dummy to not crash RegisterClassInfo.
356 static const MCPhysReg NoCalleeSavedReg = AMDGPU::NoRegister;
357 return &NoCalleeSavedReg;
358 }
359 }
360}
361
362const MCPhysReg *
363SIRegisterInfo::getCalleeSavedRegsViaCopy(const MachineFunction *MF) const {
364 return nullptr;
365}
366
367const uint32_t *SIRegisterInfo::getCallPreservedMask(const MachineFunction &MF,
368 CallingConv::ID CC) const {
369 switch (CC) {
370 case CallingConv::C:
371 case CallingConv::Fast:
372 case CallingConv::Cold:
373 case CallingConv::AMDGPU_Gfx:
374 return MF.getSubtarget<GCNSubtarget>().hasGFX90AInsts()
375 ? CSR_AMDGPU_HighRegs_With_AGPRs_RegMask
376 : CSR_AMDGPU_HighRegs_RegMask;
377 default:
378 return nullptr;
379 }
380}
381
382const uint32_t *SIRegisterInfo::getNoPreservedMask() const {
383 return CSR_AMDGPU_NoRegs_RegMask;
384}
385
386Register SIRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
387 const SIFrameLowering *TFI =
388 MF.getSubtarget<GCNSubtarget>().getFrameLowering();
389 const SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>();
390 // During ISel lowering we always reserve the stack pointer in entry
391 // functions, but never actually want to reference it when accessing our own
392 // frame. If we need a frame pointer we use it, but otherwise we can just use
393 // an immediate "0" which we represent by returning NoRegister.
394 if (FuncInfo->isEntryFunction()) {
395 return TFI->hasFP(MF) ? FuncInfo->getFrameOffsetReg() : Register();
396 }
397 return TFI->hasFP(MF) ? FuncInfo->getFrameOffsetReg()
398 : FuncInfo->getStackPtrOffsetReg();
399}
400
401bool SIRegisterInfo::hasBasePointer(const MachineFunction &MF) const {
402 // When we need stack realignment, we can't reference off of the
403 // stack pointer, so we reserve a base pointer.
404 const MachineFrameInfo &MFI = MF.getFrameInfo();
405 return MFI.getNumFixedObjects() && shouldRealignStack(MF);
406}
407
408Register SIRegisterInfo::getBaseRegister() const { return AMDGPU::SGPR34; }
409
410const uint32_t *SIRegisterInfo::getAllVGPRRegMask() const {
411 return CSR_AMDGPU_AllVGPRs_RegMask;
412}
413
414const uint32_t *SIRegisterInfo::getAllAGPRRegMask() const {
415 return CSR_AMDGPU_AllAGPRs_RegMask;
416}
417
418const uint32_t *SIRegisterInfo::getAllVectorRegMask() const {
419 return CSR_AMDGPU_AllVectorRegs_RegMask;
420}
421
422const uint32_t *SIRegisterInfo::getAllAllocatableSRegMask() const {
423 return CSR_AMDGPU_AllAllocatableSRegs_RegMask;
424}
425
426unsigned SIRegisterInfo::getSubRegFromChannel(unsigned Channel,
427 unsigned NumRegs) {
428 assert(NumRegs < SubRegFromChannelTableWidthMap.size())((NumRegs < SubRegFromChannelTableWidthMap.size()) ? static_cast
<void> (0) : __assert_fail ("NumRegs < SubRegFromChannelTableWidthMap.size()"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 428, __PRETTY_FUNCTION__))
;
429 unsigned NumRegIndex = SubRegFromChannelTableWidthMap[NumRegs];
430 assert(NumRegIndex && "Not implemented")((NumRegIndex && "Not implemented") ? static_cast<
void> (0) : __assert_fail ("NumRegIndex && \"Not implemented\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 430, __PRETTY_FUNCTION__))
;
431 assert(Channel < SubRegFromChannelTable[NumRegIndex - 1].size())((Channel < SubRegFromChannelTable[NumRegIndex - 1].size()
) ? static_cast<void> (0) : __assert_fail ("Channel < SubRegFromChannelTable[NumRegIndex - 1].size()"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 431, __PRETTY_FUNCTION__))
;
432 return SubRegFromChannelTable[NumRegIndex - 1][Channel];
433}
434
435MCRegister SIRegisterInfo::reservedPrivateSegmentBufferReg(
436 const MachineFunction &MF) const {
437 unsigned BaseIdx = alignDown(ST.getMaxNumSGPRs(MF), 4) - 4;
438 MCRegister BaseReg(AMDGPU::SGPR_32RegClass.getRegister(BaseIdx));
439 return getMatchingSuperReg(BaseReg, AMDGPU::sub0, &AMDGPU::SGPR_128RegClass);
440}
441
442BitVector SIRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
443 BitVector Reserved(getNumRegs());
444 Reserved.set(AMDGPU::MODE);
445
446 // EXEC_LO and EXEC_HI could be allocated and used as regular register, but
447 // this seems likely to result in bugs, so I'm marking them as reserved.
448 reserveRegisterTuples(Reserved, AMDGPU::EXEC);
449 reserveRegisterTuples(Reserved, AMDGPU::FLAT_SCR);
450
451 // M0 has to be reserved so that llvm accepts it as a live-in into a block.
452 reserveRegisterTuples(Reserved, AMDGPU::M0);
453
454 // Reserve src_vccz, src_execz, src_scc.
455 reserveRegisterTuples(Reserved, AMDGPU::SRC_VCCZ);
456 reserveRegisterTuples(Reserved, AMDGPU::SRC_EXECZ);
457 reserveRegisterTuples(Reserved, AMDGPU::SRC_SCC);
458
459 // Reserve the memory aperture registers.
460 reserveRegisterTuples(Reserved, AMDGPU::SRC_SHARED_BASE);
461 reserveRegisterTuples(Reserved, AMDGPU::SRC_SHARED_LIMIT);
462 reserveRegisterTuples(Reserved, AMDGPU::SRC_PRIVATE_BASE);
463 reserveRegisterTuples(Reserved, AMDGPU::SRC_PRIVATE_LIMIT);
464
465 // Reserve src_pops_exiting_wave_id - support is not implemented in Codegen.
466 reserveRegisterTuples(Reserved, AMDGPU::SRC_POPS_EXITING_WAVE_ID);
467
468 // Reserve xnack_mask registers - support is not implemented in Codegen.
469 reserveRegisterTuples(Reserved, AMDGPU::XNACK_MASK);
470
471 // Reserve lds_direct register - support is not implemented in Codegen.
472 reserveRegisterTuples(Reserved, AMDGPU::LDS_DIRECT);
473
474 // Reserve Trap Handler registers - support is not implemented in Codegen.
475 reserveRegisterTuples(Reserved, AMDGPU::TBA);
476 reserveRegisterTuples(Reserved, AMDGPU::TMA);
477 reserveRegisterTuples(Reserved, AMDGPU::TTMP0_TTMP1);
478 reserveRegisterTuples(Reserved, AMDGPU::TTMP2_TTMP3);
479 reserveRegisterTuples(Reserved, AMDGPU::TTMP4_TTMP5);
480 reserveRegisterTuples(Reserved, AMDGPU::TTMP6_TTMP7);
481 reserveRegisterTuples(Reserved, AMDGPU::TTMP8_TTMP9);
482 reserveRegisterTuples(Reserved, AMDGPU::TTMP10_TTMP11);
483 reserveRegisterTuples(Reserved, AMDGPU::TTMP12_TTMP13);
484 reserveRegisterTuples(Reserved, AMDGPU::TTMP14_TTMP15);
485
486 // Reserve null register - it shall never be allocated
487 reserveRegisterTuples(Reserved, AMDGPU::SGPR_NULL);
488
489 // Disallow vcc_hi allocation in wave32. It may be allocated but most likely
490 // will result in bugs.
491 if (isWave32) {
492 Reserved.set(AMDGPU::VCC);
493 Reserved.set(AMDGPU::VCC_HI);
494 }
495
496 unsigned MaxNumSGPRs = ST.getMaxNumSGPRs(MF);
497 unsigned TotalNumSGPRs = AMDGPU::SGPR_32RegClass.getNumRegs();
498 for (unsigned i = MaxNumSGPRs; i < TotalNumSGPRs; ++i) {
499 unsigned Reg = AMDGPU::SGPR_32RegClass.getRegister(i);
500 reserveRegisterTuples(Reserved, Reg);
501 }
502
503 unsigned MaxNumVGPRs = ST.getMaxNumVGPRs(MF);
504 // TODO: In an entry function without calls and AGPRs used it is possible
505 // to use the whole register budget for VGPRs. Even more it shall
506 // be possible to estimate maximum AGPR/VGPR pressure and split
507 // register file accordingly.
508 if (ST.hasGFX90AInsts())
509 MaxNumVGPRs /= 2;
510 unsigned TotalNumVGPRs = AMDGPU::VGPR_32RegClass.getNumRegs();
511 for (unsigned i = MaxNumVGPRs; i < TotalNumVGPRs; ++i) {
512 unsigned Reg = AMDGPU::VGPR_32RegClass.getRegister(i);
513 reserveRegisterTuples(Reserved, Reg);
514 Reg = AMDGPU::AGPR_32RegClass.getRegister(i);
515 reserveRegisterTuples(Reserved, Reg);
516 }
517
518 for (auto Reg : AMDGPU::SReg_32RegClass) {
519 Reserved.set(getSubReg(Reg, AMDGPU::hi16));
520 Register Low = getSubReg(Reg, AMDGPU::lo16);
521 // This is to prevent BB vcc liveness errors.
522 if (!AMDGPU::SGPR_LO16RegClass.contains(Low))
523 Reserved.set(Low);
524 }
525
526 for (auto Reg : AMDGPU::AGPR_32RegClass) {
527 Reserved.set(getSubReg(Reg, AMDGPU::hi16));
528 }
529
530 // Reserve all the rest AGPRs if there are no instructions to use it.
531 if (!ST.hasMAIInsts()) {
532 for (unsigned i = 0; i < MaxNumVGPRs; ++i) {
533 unsigned Reg = AMDGPU::AGPR_32RegClass.getRegister(i);
534 reserveRegisterTuples(Reserved, Reg);
535 }
536 }
537
538 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
539
540 Register ScratchRSrcReg = MFI->getScratchRSrcReg();
541 if (ScratchRSrcReg != AMDGPU::NoRegister) {
542 // Reserve 4 SGPRs for the scratch buffer resource descriptor in case we need
543 // to spill.
544 // TODO: May need to reserve a VGPR if doing LDS spilling.
545 reserveRegisterTuples(Reserved, ScratchRSrcReg);
546 }
547
548 // We have to assume the SP is needed in case there are calls in the function,
549 // which is detected after the function is lowered. If we aren't really going
550 // to need SP, don't bother reserving it.
551 MCRegister StackPtrReg = MFI->getStackPtrOffsetReg();
552
553 if (StackPtrReg) {
554 reserveRegisterTuples(Reserved, StackPtrReg);
555 assert(!isSubRegister(ScratchRSrcReg, StackPtrReg))((!isSubRegister(ScratchRSrcReg, StackPtrReg)) ? static_cast<
void> (0) : __assert_fail ("!isSubRegister(ScratchRSrcReg, StackPtrReg)"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 555, __PRETTY_FUNCTION__))
;
556 }
557
558 MCRegister FrameReg = MFI->getFrameOffsetReg();
559 if (FrameReg) {
560 reserveRegisterTuples(Reserved, FrameReg);
561 assert(!isSubRegister(ScratchRSrcReg, FrameReg))((!isSubRegister(ScratchRSrcReg, FrameReg)) ? static_cast<
void> (0) : __assert_fail ("!isSubRegister(ScratchRSrcReg, FrameReg)"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 561, __PRETTY_FUNCTION__))
;
562 }
563
564 if (hasBasePointer(MF)) {
565 MCRegister BasePtrReg = getBaseRegister();
566 reserveRegisterTuples(Reserved, BasePtrReg);
567 assert(!isSubRegister(ScratchRSrcReg, BasePtrReg))((!isSubRegister(ScratchRSrcReg, BasePtrReg)) ? static_cast<
void> (0) : __assert_fail ("!isSubRegister(ScratchRSrcReg, BasePtrReg)"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 567, __PRETTY_FUNCTION__))
;
568 }
569
570 for (MCRegister Reg : MFI->WWMReservedRegs) {
571 reserveRegisterTuples(Reserved, Reg);
572 }
573
574 // FIXME: Stop using reserved registers for this.
575 for (MCPhysReg Reg : MFI->getAGPRSpillVGPRs())
576 reserveRegisterTuples(Reserved, Reg);
577
578 for (MCPhysReg Reg : MFI->getVGPRSpillAGPRs())
579 reserveRegisterTuples(Reserved, Reg);
580
581 for (auto SSpill : MFI->getSGPRSpillVGPRs())
582 reserveRegisterTuples(Reserved, SSpill.VGPR);
583
584 return Reserved;
585}
586
587bool SIRegisterInfo::shouldRealignStack(const MachineFunction &MF) const {
588 const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
589 // On entry, the base address is 0, so it can't possibly need any more
590 // alignment.
591
592 // FIXME: Should be able to specify the entry frame alignment per calling
593 // convention instead.
594 if (Info->isEntryFunction())
595 return false;
596
597 return TargetRegisterInfo::shouldRealignStack(MF);
598}
599
600bool SIRegisterInfo::requiresRegisterScavenging(const MachineFunction &Fn) const {
601 const SIMachineFunctionInfo *Info = Fn.getInfo<SIMachineFunctionInfo>();
602 if (Info->isEntryFunction()) {
603 const MachineFrameInfo &MFI = Fn.getFrameInfo();
604 return MFI.hasStackObjects() || MFI.hasCalls();
605 }
606
607 // May need scavenger for dealing with callee saved registers.
608 return true;
609}
610
611bool SIRegisterInfo::requiresFrameIndexScavenging(
612 const MachineFunction &MF) const {
613 // Do not use frame virtual registers. They used to be used for SGPRs, but
614 // once we reach PrologEpilogInserter, we can no longer spill SGPRs. If the
615 // scavenger fails, we can increment/decrement the necessary SGPRs to avoid a
616 // spill.
617 return false;
618}
619
620bool SIRegisterInfo::requiresFrameIndexReplacementScavenging(
621 const MachineFunction &MF) const {
622 const MachineFrameInfo &MFI = MF.getFrameInfo();
623 return MFI.hasStackObjects();
624}
625
626bool SIRegisterInfo::requiresVirtualBaseRegisters(
627 const MachineFunction &) const {
628 // There are no special dedicated stack or frame pointers.
629 return true;
630}
631
632int64_t SIRegisterInfo::getScratchInstrOffset(const MachineInstr *MI) const {
633 assert(SIInstrInfo::isMUBUF(*MI) || SIInstrInfo::isFLATScratch(*MI))((SIInstrInfo::isMUBUF(*MI) || SIInstrInfo::isFLATScratch(*MI
)) ? static_cast<void> (0) : __assert_fail ("SIInstrInfo::isMUBUF(*MI) || SIInstrInfo::isFLATScratch(*MI)"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 633, __PRETTY_FUNCTION__))
;
634
635 int OffIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
636 AMDGPU::OpName::offset);
637 return MI->getOperand(OffIdx).getImm();
638}
639
640int64_t SIRegisterInfo::getFrameIndexInstrOffset(const MachineInstr *MI,
641 int Idx) const {
642 if (!SIInstrInfo::isMUBUF(*MI) && !SIInstrInfo::isFLATScratch(*MI))
643 return 0;
644
645 assert((Idx == AMDGPU::getNamedOperandIdx(MI->getOpcode(),(((Idx == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU
::OpName::vaddr) || (Idx == AMDGPU::getNamedOperandIdx(MI->
getOpcode(), AMDGPU::OpName::saddr))) && "Should never see frame index on non-address operand"
) ? static_cast<void> (0) : __assert_fail ("(Idx == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::vaddr) || (Idx == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::saddr))) && \"Should never see frame index on non-address operand\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 649, __PRETTY_FUNCTION__))
646 AMDGPU::OpName::vaddr) ||(((Idx == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU
::OpName::vaddr) || (Idx == AMDGPU::getNamedOperandIdx(MI->
getOpcode(), AMDGPU::OpName::saddr))) && "Should never see frame index on non-address operand"
) ? static_cast<void> (0) : __assert_fail ("(Idx == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::vaddr) || (Idx == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::saddr))) && \"Should never see frame index on non-address operand\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 649, __PRETTY_FUNCTION__))
647 (Idx == AMDGPU::getNamedOperandIdx(MI->getOpcode(),(((Idx == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU
::OpName::vaddr) || (Idx == AMDGPU::getNamedOperandIdx(MI->
getOpcode(), AMDGPU::OpName::saddr))) && "Should never see frame index on non-address operand"
) ? static_cast<void> (0) : __assert_fail ("(Idx == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::vaddr) || (Idx == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::saddr))) && \"Should never see frame index on non-address operand\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 649, __PRETTY_FUNCTION__))
648 AMDGPU::OpName::saddr))) &&(((Idx == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU
::OpName::vaddr) || (Idx == AMDGPU::getNamedOperandIdx(MI->
getOpcode(), AMDGPU::OpName::saddr))) && "Should never see frame index on non-address operand"
) ? static_cast<void> (0) : __assert_fail ("(Idx == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::vaddr) || (Idx == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::saddr))) && \"Should never see frame index on non-address operand\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 649, __PRETTY_FUNCTION__))
649 "Should never see frame index on non-address operand")(((Idx == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU
::OpName::vaddr) || (Idx == AMDGPU::getNamedOperandIdx(MI->
getOpcode(), AMDGPU::OpName::saddr))) && "Should never see frame index on non-address operand"
) ? static_cast<void> (0) : __assert_fail ("(Idx == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::vaddr) || (Idx == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::saddr))) && \"Should never see frame index on non-address operand\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 649, __PRETTY_FUNCTION__))
;
650
651 return getScratchInstrOffset(MI);
652}
653
654bool SIRegisterInfo::needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const {
655 if (!SIInstrInfo::isMUBUF(*MI) && !SIInstrInfo::isFLATScratch(*MI))
656 return false;
657
658 int64_t FullOffset = Offset + getScratchInstrOffset(MI);
659
660 if (SIInstrInfo::isMUBUF(*MI))
661 return !SIInstrInfo::isLegalMUBUFImmOffset(FullOffset);
662
663 const SIInstrInfo *TII = ST.getInstrInfo();
664 return !TII->isLegalFLATOffset(FullOffset, AMDGPUAS::PRIVATE_ADDRESS,
665 SIInstrFlags::FlatScratch);
666}
667
668Register SIRegisterInfo::materializeFrameBaseRegister(MachineBasicBlock *MBB,
669 int FrameIdx,
670 int64_t Offset) const {
671 MachineBasicBlock::iterator Ins = MBB->begin();
672 DebugLoc DL; // Defaults to "unknown"
673
674 if (Ins != MBB->end())
675 DL = Ins->getDebugLoc();
676
677 MachineFunction *MF = MBB->getParent();
678 const SIInstrInfo *TII = ST.getInstrInfo();
679 MachineRegisterInfo &MRI = MF->getRegInfo();
680 unsigned MovOpc = ST.enableFlatScratch() ? AMDGPU::S_MOV_B32
681 : AMDGPU::V_MOV_B32_e32;
682
683 Register BaseReg = MRI.createVirtualRegister(
684 ST.enableFlatScratch() ? &AMDGPU::SReg_32_XEXEC_HIRegClass
685 : &AMDGPU::VGPR_32RegClass);
686
687 if (Offset == 0) {
688 BuildMI(*MBB, Ins, DL, TII->get(MovOpc), BaseReg)
689 .addFrameIndex(FrameIdx);
690 return BaseReg;
691 }
692
693 Register OffsetReg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
694
695 Register FIReg = MRI.createVirtualRegister(
696 ST.enableFlatScratch() ? &AMDGPU::SReg_32_XM0RegClass
697 : &AMDGPU::VGPR_32RegClass);
698
699 BuildMI(*MBB, Ins, DL, TII->get(AMDGPU::S_MOV_B32), OffsetReg)
700 .addImm(Offset);
701 BuildMI(*MBB, Ins, DL, TII->get(MovOpc), FIReg)
702 .addFrameIndex(FrameIdx);
703
704 if (ST.enableFlatScratch() ) {
705 BuildMI(*MBB, Ins, DL, TII->get(AMDGPU::S_ADD_U32), BaseReg)
706 .addReg(OffsetReg, RegState::Kill)
707 .addReg(FIReg);
708 return BaseReg;
709 }
710
711 TII->getAddNoCarry(*MBB, Ins, DL, BaseReg)
712 .addReg(OffsetReg, RegState::Kill)
713 .addReg(FIReg)
714 .addImm(0); // clamp bit
715
716 return BaseReg;
717}
718
719void SIRegisterInfo::resolveFrameIndex(MachineInstr &MI, Register BaseReg,
720 int64_t Offset) const {
721 const SIInstrInfo *TII = ST.getInstrInfo();
722 bool IsFlat = TII->isFLATScratch(MI);
723
724#ifndef NDEBUG
725 // FIXME: Is it possible to be storing a frame index to itself?
726 bool SeenFI = false;
727 for (const MachineOperand &MO: MI.operands()) {
728 if (MO.isFI()) {
729 if (SeenFI)
730 llvm_unreachable("should not see multiple frame indices")::llvm::llvm_unreachable_internal("should not see multiple frame indices"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 730)
;
731
732 SeenFI = true;
733 }
734 }
735#endif
736
737 MachineOperand *FIOp =
738 TII->getNamedOperand(MI, IsFlat ? AMDGPU::OpName::saddr
739 : AMDGPU::OpName::vaddr);
740
741 MachineOperand *OffsetOp = TII->getNamedOperand(MI, AMDGPU::OpName::offset);
742 int64_t NewOffset = OffsetOp->getImm() + Offset;
743
744 assert(FIOp && FIOp->isFI() && "frame index must be address operand")((FIOp && FIOp->isFI() && "frame index must be address operand"
) ? static_cast<void> (0) : __assert_fail ("FIOp && FIOp->isFI() && \"frame index must be address operand\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 744, __PRETTY_FUNCTION__))
;
745 assert(TII->isMUBUF(MI) || TII->isFLATScratch(MI))((TII->isMUBUF(MI) || TII->isFLATScratch(MI)) ? static_cast
<void> (0) : __assert_fail ("TII->isMUBUF(MI) || TII->isFLATScratch(MI)"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 745, __PRETTY_FUNCTION__))
;
746
747 if (IsFlat) {
748 assert(TII->isLegalFLATOffset(NewOffset, AMDGPUAS::PRIVATE_ADDRESS,((TII->isLegalFLATOffset(NewOffset, AMDGPUAS::PRIVATE_ADDRESS
, SIInstrFlags::FlatScratch) && "offset should be legal"
) ? static_cast<void> (0) : __assert_fail ("TII->isLegalFLATOffset(NewOffset, AMDGPUAS::PRIVATE_ADDRESS, SIInstrFlags::FlatScratch) && \"offset should be legal\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 750, __PRETTY_FUNCTION__))
749 SIInstrFlags::FlatScratch) &&((TII->isLegalFLATOffset(NewOffset, AMDGPUAS::PRIVATE_ADDRESS
, SIInstrFlags::FlatScratch) && "offset should be legal"
) ? static_cast<void> (0) : __assert_fail ("TII->isLegalFLATOffset(NewOffset, AMDGPUAS::PRIVATE_ADDRESS, SIInstrFlags::FlatScratch) && \"offset should be legal\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 750, __PRETTY_FUNCTION__))
750 "offset should be legal")((TII->isLegalFLATOffset(NewOffset, AMDGPUAS::PRIVATE_ADDRESS
, SIInstrFlags::FlatScratch) && "offset should be legal"
) ? static_cast<void> (0) : __assert_fail ("TII->isLegalFLATOffset(NewOffset, AMDGPUAS::PRIVATE_ADDRESS, SIInstrFlags::FlatScratch) && \"offset should be legal\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 750, __PRETTY_FUNCTION__))
;
751 FIOp->ChangeToRegister(BaseReg, false);
752 OffsetOp->setImm(NewOffset);
753 return;
754 }
755
756#ifndef NDEBUG
757 MachineOperand *SOffset = TII->getNamedOperand(MI, AMDGPU::OpName::soffset);
758 assert(SOffset->isImm() && SOffset->getImm() == 0)((SOffset->isImm() && SOffset->getImm() == 0) ?
static_cast<void> (0) : __assert_fail ("SOffset->isImm() && SOffset->getImm() == 0"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 758, __PRETTY_FUNCTION__))
;
759#endif
760
761 assert(SIInstrInfo::isLegalMUBUFImmOffset(NewOffset) &&((SIInstrInfo::isLegalMUBUFImmOffset(NewOffset) && "offset should be legal"
) ? static_cast<void> (0) : __assert_fail ("SIInstrInfo::isLegalMUBUFImmOffset(NewOffset) && \"offset should be legal\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 762, __PRETTY_FUNCTION__))
762 "offset should be legal")((SIInstrInfo::isLegalMUBUFImmOffset(NewOffset) && "offset should be legal"
) ? static_cast<void> (0) : __assert_fail ("SIInstrInfo::isLegalMUBUFImmOffset(NewOffset) && \"offset should be legal\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 762, __PRETTY_FUNCTION__))
;
763
764 FIOp->ChangeToRegister(BaseReg, false);
765 OffsetOp->setImm(NewOffset);
766}
767
768bool SIRegisterInfo::isFrameOffsetLegal(const MachineInstr *MI,
769 Register BaseReg,
770 int64_t Offset) const {
771 if (!SIInstrInfo::isMUBUF(*MI) && !SIInstrInfo::isFLATScratch(*MI))
772 return false;
773
774 int64_t NewOffset = Offset + getScratchInstrOffset(MI);
775
776 if (SIInstrInfo::isMUBUF(*MI))
777 return SIInstrInfo::isLegalMUBUFImmOffset(NewOffset);
778
779 const SIInstrInfo *TII = ST.getInstrInfo();
780 return TII->isLegalFLATOffset(NewOffset, AMDGPUAS::PRIVATE_ADDRESS,
781 SIInstrFlags::FlatScratch);
782}
783
784const TargetRegisterClass *SIRegisterInfo::getPointerRegClass(
785 const MachineFunction &MF, unsigned Kind) const {
786 // This is inaccurate. It depends on the instruction and address space. The
787 // only place where we should hit this is for dealing with frame indexes /
788 // private accesses, so this is correct in that case.
789 return &AMDGPU::VGPR_32RegClass;
790}
791
792static unsigned getNumSubRegsForSpillOp(unsigned Op) {
793
794 switch (Op) {
795 case AMDGPU::SI_SPILL_S1024_SAVE:
796 case AMDGPU::SI_SPILL_S1024_RESTORE:
797 case AMDGPU::SI_SPILL_V1024_SAVE:
798 case AMDGPU::SI_SPILL_V1024_RESTORE:
799 case AMDGPU::SI_SPILL_A1024_SAVE:
800 case AMDGPU::SI_SPILL_A1024_RESTORE:
801 return 32;
802 case AMDGPU::SI_SPILL_S512_SAVE:
803 case AMDGPU::SI_SPILL_S512_RESTORE:
804 case AMDGPU::SI_SPILL_V512_SAVE:
805 case AMDGPU::SI_SPILL_V512_RESTORE:
806 case AMDGPU::SI_SPILL_A512_SAVE:
807 case AMDGPU::SI_SPILL_A512_RESTORE:
808 return 16;
809 case AMDGPU::SI_SPILL_S256_SAVE:
810 case AMDGPU::SI_SPILL_S256_RESTORE:
811 case AMDGPU::SI_SPILL_V256_SAVE:
812 case AMDGPU::SI_SPILL_V256_RESTORE:
813 case AMDGPU::SI_SPILL_A256_SAVE:
814 case AMDGPU::SI_SPILL_A256_RESTORE:
815 return 8;
816 case AMDGPU::SI_SPILL_S192_SAVE:
817 case AMDGPU::SI_SPILL_S192_RESTORE:
818 case AMDGPU::SI_SPILL_V192_SAVE:
819 case AMDGPU::SI_SPILL_V192_RESTORE:
820 case AMDGPU::SI_SPILL_A192_SAVE:
821 case AMDGPU::SI_SPILL_A192_RESTORE:
822 return 6;
823 case AMDGPU::SI_SPILL_S160_SAVE:
824 case AMDGPU::SI_SPILL_S160_RESTORE:
825 case AMDGPU::SI_SPILL_V160_SAVE:
826 case AMDGPU::SI_SPILL_V160_RESTORE:
827 case AMDGPU::SI_SPILL_A160_SAVE:
828 case AMDGPU::SI_SPILL_A160_RESTORE:
829 return 5;
830 case AMDGPU::SI_SPILL_S128_SAVE:
831 case AMDGPU::SI_SPILL_S128_RESTORE:
832 case AMDGPU::SI_SPILL_V128_SAVE:
833 case AMDGPU::SI_SPILL_V128_RESTORE:
834 case AMDGPU::SI_SPILL_A128_SAVE:
835 case AMDGPU::SI_SPILL_A128_RESTORE:
836 return 4;
837 case AMDGPU::SI_SPILL_S96_SAVE:
838 case AMDGPU::SI_SPILL_S96_RESTORE:
839 case AMDGPU::SI_SPILL_V96_SAVE:
840 case AMDGPU::SI_SPILL_V96_RESTORE:
841 case AMDGPU::SI_SPILL_A96_SAVE:
842 case AMDGPU::SI_SPILL_A96_RESTORE:
843 return 3;
844 case AMDGPU::SI_SPILL_S64_SAVE:
845 case AMDGPU::SI_SPILL_S64_RESTORE:
846 case AMDGPU::SI_SPILL_V64_SAVE:
847 case AMDGPU::SI_SPILL_V64_RESTORE:
848 case AMDGPU::SI_SPILL_A64_SAVE:
849 case AMDGPU::SI_SPILL_A64_RESTORE:
850 return 2;
851 case AMDGPU::SI_SPILL_S32_SAVE:
852 case AMDGPU::SI_SPILL_S32_RESTORE:
853 case AMDGPU::SI_SPILL_V32_SAVE:
854 case AMDGPU::SI_SPILL_V32_RESTORE:
855 case AMDGPU::SI_SPILL_A32_SAVE:
856 case AMDGPU::SI_SPILL_A32_RESTORE:
857 return 1;
858 default: llvm_unreachable("Invalid spill opcode")::llvm::llvm_unreachable_internal("Invalid spill opcode", "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 858)
;
859 }
860}
861
862static int getOffsetMUBUFStore(unsigned Opc) {
863 switch (Opc) {
864 case AMDGPU::BUFFER_STORE_DWORD_OFFEN:
865 return AMDGPU::BUFFER_STORE_DWORD_OFFSET;
866 case AMDGPU::BUFFER_STORE_BYTE_OFFEN:
867 return AMDGPU::BUFFER_STORE_BYTE_OFFSET;
868 case AMDGPU::BUFFER_STORE_SHORT_OFFEN:
869 return AMDGPU::BUFFER_STORE_SHORT_OFFSET;
870 case AMDGPU::BUFFER_STORE_DWORDX2_OFFEN:
871 return AMDGPU::BUFFER_STORE_DWORDX2_OFFSET;
872 case AMDGPU::BUFFER_STORE_DWORDX4_OFFEN:
873 return AMDGPU::BUFFER_STORE_DWORDX4_OFFSET;
874 case AMDGPU::BUFFER_STORE_SHORT_D16_HI_OFFEN:
875 return AMDGPU::BUFFER_STORE_SHORT_D16_HI_OFFSET;
876 case AMDGPU::BUFFER_STORE_BYTE_D16_HI_OFFEN:
877 return AMDGPU::BUFFER_STORE_BYTE_D16_HI_OFFSET;
878 default:
879 return -1;
880 }
881}
882
883static int getOffsetMUBUFLoad(unsigned Opc) {
884 switch (Opc) {
885 case AMDGPU::BUFFER_LOAD_DWORD_OFFEN:
886 return AMDGPU::BUFFER_LOAD_DWORD_OFFSET;
887 case AMDGPU::BUFFER_LOAD_UBYTE_OFFEN:
888 return AMDGPU::BUFFER_LOAD_UBYTE_OFFSET;
889 case AMDGPU::BUFFER_LOAD_SBYTE_OFFEN:
890 return AMDGPU::BUFFER_LOAD_SBYTE_OFFSET;
891 case AMDGPU::BUFFER_LOAD_USHORT_OFFEN:
892 return AMDGPU::BUFFER_LOAD_USHORT_OFFSET;
893 case AMDGPU::BUFFER_LOAD_SSHORT_OFFEN:
894 return AMDGPU::BUFFER_LOAD_SSHORT_OFFSET;
895 case AMDGPU::BUFFER_LOAD_DWORDX2_OFFEN:
896 return AMDGPU::BUFFER_LOAD_DWORDX2_OFFSET;
897 case AMDGPU::BUFFER_LOAD_DWORDX4_OFFEN:
898 return AMDGPU::BUFFER_LOAD_DWORDX4_OFFSET;
899 case AMDGPU::BUFFER_LOAD_UBYTE_D16_OFFEN:
900 return AMDGPU::BUFFER_LOAD_UBYTE_D16_OFFSET;
901 case AMDGPU::BUFFER_LOAD_UBYTE_D16_HI_OFFEN:
902 return AMDGPU::BUFFER_LOAD_UBYTE_D16_HI_OFFSET;
903 case AMDGPU::BUFFER_LOAD_SBYTE_D16_OFFEN:
904 return AMDGPU::BUFFER_LOAD_SBYTE_D16_OFFSET;
905 case AMDGPU::BUFFER_LOAD_SBYTE_D16_HI_OFFEN:
906 return AMDGPU::BUFFER_LOAD_SBYTE_D16_HI_OFFSET;
907 case AMDGPU::BUFFER_LOAD_SHORT_D16_OFFEN:
908 return AMDGPU::BUFFER_LOAD_SHORT_D16_OFFSET;
909 case AMDGPU::BUFFER_LOAD_SHORT_D16_HI_OFFEN:
910 return AMDGPU::BUFFER_LOAD_SHORT_D16_HI_OFFSET;
911 default:
912 return -1;
913 }
914}
915
916static MachineInstrBuilder spillVGPRtoAGPR(const GCNSubtarget &ST,
917 MachineBasicBlock::iterator MI,
918 int Index,
919 unsigned Lane,
920 unsigned ValueReg,
921 bool IsKill) {
922 MachineBasicBlock *MBB = MI->getParent();
923 MachineFunction *MF = MI->getParent()->getParent();
924 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
925 const SIInstrInfo *TII = ST.getInstrInfo();
926
927 MCPhysReg Reg = MFI->getVGPRToAGPRSpill(Index, Lane);
928
929 if (Reg == AMDGPU::NoRegister)
930 return MachineInstrBuilder();
931
932 bool IsStore = MI->mayStore();
933 MachineRegisterInfo &MRI = MF->getRegInfo();
934 auto *TRI = static_cast<const SIRegisterInfo*>(MRI.getTargetRegisterInfo());
935
936 unsigned Dst = IsStore ? Reg : ValueReg;
937 unsigned Src = IsStore ? ValueReg : Reg;
938 unsigned Opc = (IsStore ^ TRI->isVGPR(MRI, Reg)) ? AMDGPU::V_ACCVGPR_WRITE_B32_e64
939 : AMDGPU::V_ACCVGPR_READ_B32_e64;
940
941 auto MIB = BuildMI(*MBB, MI, MI->getDebugLoc(), TII->get(Opc), Dst)
942 .addReg(Src, getKillRegState(IsKill));
943 MIB->setAsmPrinterFlag(MachineInstr::ReloadReuse);
944 return MIB;
945}
946
947// This differs from buildSpillLoadStore by only scavenging a VGPR. It does not
948// need to handle the case where an SGPR may need to be spilled while spilling.
949static bool buildMUBUFOffsetLoadStore(const GCNSubtarget &ST,
950 MachineFrameInfo &MFI,
951 MachineBasicBlock::iterator MI,
952 int Index,
953 int64_t Offset) {
954 const SIInstrInfo *TII = ST.getInstrInfo();
955 MachineBasicBlock *MBB = MI->getParent();
956 const DebugLoc &DL = MI->getDebugLoc();
957 bool IsStore = MI->mayStore();
958
959 unsigned Opc = MI->getOpcode();
960 int LoadStoreOp = IsStore ?
961 getOffsetMUBUFStore(Opc) : getOffsetMUBUFLoad(Opc);
962 if (LoadStoreOp == -1)
963 return false;
964
965 const MachineOperand *Reg = TII->getNamedOperand(*MI, AMDGPU::OpName::vdata);
966 if (spillVGPRtoAGPR(ST, MI, Index, 0, Reg->getReg(), false).getInstr())
967 return true;
968
969 MachineInstrBuilder NewMI =
970 BuildMI(*MBB, MI, DL, TII->get(LoadStoreOp))
971 .add(*Reg)
972 .add(*TII->getNamedOperand(*MI, AMDGPU::OpName::srsrc))
973 .add(*TII->getNamedOperand(*MI, AMDGPU::OpName::soffset))
974 .addImm(Offset)
975 .addImm(0) // cpol
976 .addImm(0) // tfe
977 .addImm(0) // swz
978 .cloneMemRefs(*MI);
979
980 const MachineOperand *VDataIn = TII->getNamedOperand(*MI,
981 AMDGPU::OpName::vdata_in);
982 if (VDataIn)
983 NewMI.add(*VDataIn);
984 return true;
985}
986
987static unsigned getFlatScratchSpillOpcode(const SIInstrInfo *TII,
988 unsigned LoadStoreOp,
989 unsigned EltSize) {
990 bool IsStore = TII->get(LoadStoreOp).mayStore();
991 bool UseST =
992 AMDGPU::getNamedOperandIdx(LoadStoreOp, AMDGPU::OpName::vaddr) < 0 &&
993 AMDGPU::getNamedOperandIdx(LoadStoreOp, AMDGPU::OpName::saddr) < 0;
994
995 switch (EltSize) {
996 case 4:
997 LoadStoreOp = IsStore ? AMDGPU::SCRATCH_STORE_DWORD_SADDR
998 : AMDGPU::SCRATCH_LOAD_DWORD_SADDR;
999 break;
1000 case 8:
1001 LoadStoreOp = IsStore ? AMDGPU::SCRATCH_STORE_DWORDX2_SADDR
1002 : AMDGPU::SCRATCH_LOAD_DWORDX2_SADDR;
1003 break;
1004 case 12:
1005 LoadStoreOp = IsStore ? AMDGPU::SCRATCH_STORE_DWORDX3_SADDR
1006 : AMDGPU::SCRATCH_LOAD_DWORDX3_SADDR;
1007 break;
1008 case 16:
1009 LoadStoreOp = IsStore ? AMDGPU::SCRATCH_STORE_DWORDX4_SADDR
1010 : AMDGPU::SCRATCH_LOAD_DWORDX4_SADDR;
1011 break;
1012 default:
1013 llvm_unreachable("Unexpected spill load/store size!")::llvm::llvm_unreachable_internal("Unexpected spill load/store size!"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 1013)
;
1014 }
1015
1016 if (UseST)
1017 LoadStoreOp = AMDGPU::getFlatScratchInstSTfromSS(LoadStoreOp);
1018
1019 return LoadStoreOp;
1020}
1021
1022void SIRegisterInfo::buildSpillLoadStore(
1023 MachineBasicBlock::iterator MI, unsigned LoadStoreOp, int Index,
1024 Register ValueReg, bool IsKill, MCRegister ScratchOffsetReg,
1025 int64_t InstOffset, MachineMemOperand *MMO, RegScavenger *RS,
1026 LivePhysRegs *LiveRegs) const {
1027 assert((!RS || !LiveRegs) && "Only RS or LiveRegs can be set but not both")(((!RS || !LiveRegs) && "Only RS or LiveRegs can be set but not both"
) ? static_cast<void> (0) : __assert_fail ("(!RS || !LiveRegs) && \"Only RS or LiveRegs can be set but not both\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 1027, __PRETTY_FUNCTION__))
;
1028
1029 MachineBasicBlock *MBB = MI->getParent();
1030 MachineFunction *MF = MI->getParent()->getParent();
1031 const SIInstrInfo *TII = ST.getInstrInfo();
1032 const MachineFrameInfo &MFI = MF->getFrameInfo();
1033 const SIMachineFunctionInfo *FuncInfo = MF->getInfo<SIMachineFunctionInfo>();
1034
1035 const MCInstrDesc *Desc = &TII->get(LoadStoreOp);
1036 const DebugLoc &DL = MI->getDebugLoc();
1037 bool IsStore = Desc->mayStore();
1038 bool IsFlat = TII->isFLATScratch(LoadStoreOp);
1039
1040 bool Scavenged = false;
1041 MCRegister SOffset = ScratchOffsetReg;
1042
1043 const TargetRegisterClass *RC = getRegClassForReg(MF->getRegInfo(), ValueReg);
1044 // On gfx90a+ AGPR is a regular VGPR acceptable for loads and stores.
1045 const bool IsAGPR = !ST.hasGFX90AInsts() && hasAGPRs(RC);
1046 const unsigned RegWidth = AMDGPU::getRegBitWidth(RC->getID()) / 8;
1047
1048 // Always use 4 byte operations for AGPRs because we need to scavenge
1049 // a temporary VGPR.
1050 unsigned EltSize = (IsFlat && !IsAGPR) ? std::min(RegWidth, 16u) : 4u;
1051 unsigned NumSubRegs = RegWidth / EltSize;
1052 unsigned Size = NumSubRegs * EltSize;
1053 unsigned RemSize = RegWidth - Size;
1054 unsigned NumRemSubRegs = RemSize ? 1 : 0;
1055 int64_t Offset = InstOffset + MFI.getObjectOffset(Index);
1056 int64_t MaxOffset = Offset + Size + RemSize - EltSize;
1057 int64_t ScratchOffsetRegDelta = 0;
1058
1059 if (IsFlat && EltSize > 4) {
1060 LoadStoreOp = getFlatScratchSpillOpcode(TII, LoadStoreOp, EltSize);
1061 Desc = &TII->get(LoadStoreOp);
Value stored to 'Desc' is never read
1062 }
1063
1064 Align Alignment = MFI.getObjectAlign(Index);
1065 const MachinePointerInfo &BasePtrInfo = MMO->getPointerInfo();
1066
1067 assert((IsFlat || ((Offset % EltSize) == 0)) &&(((IsFlat || ((Offset % EltSize) == 0)) && "unexpected VGPR spill offset"
) ? static_cast<void> (0) : __assert_fail ("(IsFlat || ((Offset % EltSize) == 0)) && \"unexpected VGPR spill offset\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 1068, __PRETTY_FUNCTION__))
1068 "unexpected VGPR spill offset")(((IsFlat || ((Offset % EltSize) == 0)) && "unexpected VGPR spill offset"
) ? static_cast<void> (0) : __assert_fail ("(IsFlat || ((Offset % EltSize) == 0)) && \"unexpected VGPR spill offset\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 1068, __PRETTY_FUNCTION__))
;
1069
1070 bool IsOffsetLegal =
1071 IsFlat ? TII->isLegalFLATOffset(MaxOffset, AMDGPUAS::PRIVATE_ADDRESS,
1072 SIInstrFlags::FlatScratch)
1073 : SIInstrInfo::isLegalMUBUFImmOffset(MaxOffset);
1074 if (!IsOffsetLegal || (IsFlat && !SOffset && !ST.hasFlatScratchSTMode())) {
1075 SOffset = MCRegister();
1076
1077 // We currently only support spilling VGPRs to EltSize boundaries, meaning
1078 // we can simplify the adjustment of Offset here to just scale with
1079 // WavefrontSize.
1080 if (!IsFlat)
1081 Offset *= ST.getWavefrontSize();
1082
1083 // We don't have access to the register scavenger if this function is called
1084 // during PEI::scavengeFrameVirtualRegs() so use LiveRegs in this case.
1085 if (RS) {
1086 SOffset = RS->scavengeRegister(&AMDGPU::SGPR_32RegClass, MI, 0, false);
1087 } else if (LiveRegs) {
1088 for (MCRegister Reg : AMDGPU::SGPR_32RegClass) {
1089 if (LiveRegs->available(MF->getRegInfo(), Reg)) {
1090 SOffset = Reg;
1091 break;
1092 }
1093 }
1094 }
1095
1096 if (!SOffset) {
1097 // There are no free SGPRs, and since we are in the process of spilling
1098 // VGPRs too. Since we need a VGPR in order to spill SGPRs (this is true
1099 // on SI/CI and on VI it is true until we implement spilling using scalar
1100 // stores), we have no way to free up an SGPR. Our solution here is to
1101 // add the offset directly to the ScratchOffset or StackPtrOffset
1102 // register, and then subtract the offset after the spill to return the
1103 // register to it's original value.
1104 if (!ScratchOffsetReg)
1105 ScratchOffsetReg = FuncInfo->getStackPtrOffsetReg();
1106 SOffset = ScratchOffsetReg;
1107 ScratchOffsetRegDelta = Offset;
1108 } else {
1109 Scavenged = true;
1110 }
1111
1112 if (!SOffset)
1113 report_fatal_error("could not scavenge SGPR to spill in entry function");
1114
1115 if (ScratchOffsetReg == AMDGPU::NoRegister) {
1116 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_MOV_B32), SOffset)
1117 .addImm(Offset);
1118 } else {
1119 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_ADD_U32), SOffset)
1120 .addReg(ScratchOffsetReg)
1121 .addImm(Offset);
1122 }
1123
1124 Offset = 0;
1125 }
1126
1127 if (IsFlat && SOffset == AMDGPU::NoRegister) {
1128 assert(AMDGPU::getNamedOperandIdx(LoadStoreOp, AMDGPU::OpName::vaddr) < 0((AMDGPU::getNamedOperandIdx(LoadStoreOp, AMDGPU::OpName::vaddr
) < 0 && "Unexpected vaddr for flat scratch with a FI operand"
) ? static_cast<void> (0) : __assert_fail ("AMDGPU::getNamedOperandIdx(LoadStoreOp, AMDGPU::OpName::vaddr) < 0 && \"Unexpected vaddr for flat scratch with a FI operand\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 1129, __PRETTY_FUNCTION__))
1129 && "Unexpected vaddr for flat scratch with a FI operand")((AMDGPU::getNamedOperandIdx(LoadStoreOp, AMDGPU::OpName::vaddr
) < 0 && "Unexpected vaddr for flat scratch with a FI operand"
) ? static_cast<void> (0) : __assert_fail ("AMDGPU::getNamedOperandIdx(LoadStoreOp, AMDGPU::OpName::vaddr) < 0 && \"Unexpected vaddr for flat scratch with a FI operand\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 1129, __PRETTY_FUNCTION__))
;
1130
1131 assert(ST.hasFlatScratchSTMode())((ST.hasFlatScratchSTMode()) ? static_cast<void> (0) : __assert_fail
("ST.hasFlatScratchSTMode()", "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 1131, __PRETTY_FUNCTION__))
;
1132 LoadStoreOp = AMDGPU::getFlatScratchInstSTfromSS(LoadStoreOp);
1133 Desc = &TII->get(LoadStoreOp);
1134 }
1135
1136 Register TmpReg;
1137
1138 for (unsigned i = 0, e = NumSubRegs + NumRemSubRegs, RegOffset = 0; i != e;
1139 ++i, RegOffset += EltSize) {
1140 if (i == NumSubRegs) {
1141 EltSize = RemSize;
1142 LoadStoreOp = getFlatScratchSpillOpcode(TII, LoadStoreOp, EltSize);
1143 }
1144 Desc = &TII->get(LoadStoreOp);
1145
1146 unsigned NumRegs = EltSize / 4;
1147 Register SubReg = e == 1
1148 ? ValueReg
1149 : Register(getSubReg(ValueReg,
1150 getSubRegFromChannel(RegOffset / 4, NumRegs)));
1151
1152 unsigned SOffsetRegState = 0;
1153 unsigned SrcDstRegState = getDefRegState(!IsStore);
1154 if (i + 1 == e) {
1155 SOffsetRegState |= getKillRegState(Scavenged);
1156 // The last implicit use carries the "Kill" flag.
1157 SrcDstRegState |= getKillRegState(IsKill);
1158 }
1159
1160 // Make sure the whole register is defined if there are undef components by
1161 // adding an implicit def of the super-reg on the first instruction.
1162 bool NeedSuperRegDef = e > 1 && IsStore && i == 0;
1163 bool NeedSuperRegImpOperand = e > 1;
1164
1165 unsigned Lane = RegOffset / 4;
1166 unsigned LaneE = (RegOffset + EltSize) / 4;
1167 for ( ; Lane != LaneE; ++Lane) {
1168 bool IsSubReg = e > 1 || EltSize > 4;
1169 Register Sub = IsSubReg
1170 ? Register(getSubReg(ValueReg, getSubRegFromChannel(Lane)))
1171 : ValueReg;
1172 auto MIB = spillVGPRtoAGPR(ST, MI, Index, Lane, Sub, IsKill);
1173 if (!MIB.getInstr())
1174 break;
1175 if (NeedSuperRegDef || (IsSubReg && IsStore && Lane == 0)) {
1176 MIB.addReg(ValueReg, RegState::ImplicitDefine);
1177 NeedSuperRegDef = false;
1178 }
1179 if (IsSubReg || NeedSuperRegImpOperand) {
1180 NeedSuperRegImpOperand = true;
1181 unsigned State = SrcDstRegState;
1182 if (Lane + 1 != LaneE)
1183 State &= ~RegState::Kill;
1184 MIB.addReg(ValueReg, RegState::Implicit | State);
1185 }
1186 }
1187
1188 if (Lane == LaneE) // Fully spilled into AGPRs.
1189 continue;
1190
1191 // Offset in bytes from the beginning of the ValueReg to its portion we
1192 // still need to spill. It may differ from RegOffset if a portion of
1193 // current SubReg has been already spilled into AGPRs by the loop above.
1194 unsigned RemRegOffset = Lane * 4;
1195 unsigned RemEltSize = EltSize - (RemRegOffset - RegOffset);
1196 if (RemEltSize != EltSize) { // Partially spilled to AGPRs
1197 assert(IsFlat && EltSize > 4)((IsFlat && EltSize > 4) ? static_cast<void>
(0) : __assert_fail ("IsFlat && EltSize > 4", "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 1197, __PRETTY_FUNCTION__))
;
1198
1199 unsigned NumRegs = RemEltSize / 4;
1200 SubReg = Register(getSubReg(ValueReg,
1201 getSubRegFromChannel(RemRegOffset / 4, NumRegs)));
1202 unsigned Opc = getFlatScratchSpillOpcode(TII, LoadStoreOp, RemEltSize);
1203 Desc = &TII->get(Opc);
1204 }
1205
1206 unsigned FinalReg = SubReg;
1207
1208 if (IsAGPR) {
1209 assert(EltSize == 4)((EltSize == 4) ? static_cast<void> (0) : __assert_fail
("EltSize == 4", "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 1209, __PRETTY_FUNCTION__))
;
1210
1211 if (!TmpReg) {
1212 assert(RS && "Needs to have RegScavenger to spill an AGPR!")((RS && "Needs to have RegScavenger to spill an AGPR!"
) ? static_cast<void> (0) : __assert_fail ("RS && \"Needs to have RegScavenger to spill an AGPR!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 1212, __PRETTY_FUNCTION__))
;
1213 // FIXME: change to scavengeRegisterBackwards()
1214 TmpReg = RS->scavengeRegister(&AMDGPU::VGPR_32RegClass, MI, 0);
1215 RS->setRegUsed(TmpReg);
1216 }
1217 if (IsStore) {
1218 auto AccRead = BuildMI(*MBB, MI, DL,
1219 TII->get(AMDGPU::V_ACCVGPR_READ_B32_e64), TmpReg)
1220 .addReg(SubReg, getKillRegState(IsKill));
1221 if (NeedSuperRegDef)
1222 AccRead.addReg(ValueReg, RegState::ImplicitDefine);
1223 AccRead->setAsmPrinterFlag(MachineInstr::ReloadReuse);
1224 }
1225 SubReg = TmpReg;
1226 }
1227
1228 MachinePointerInfo PInfo = BasePtrInfo.getWithOffset(RemRegOffset);
1229 MachineMemOperand *NewMMO =
1230 MF->getMachineMemOperand(PInfo, MMO->getFlags(), RemEltSize,
1231 commonAlignment(Alignment, RemRegOffset));
1232
1233 auto MIB = BuildMI(*MBB, MI, DL, *Desc)
1234 .addReg(SubReg,
1235 getDefRegState(!IsStore) | getKillRegState(IsKill));
1236 if (!IsFlat)
1237 MIB.addReg(FuncInfo->getScratchRSrcReg());
1238
1239 if (SOffset == AMDGPU::NoRegister) {
1240 if (!IsFlat)
1241 MIB.addImm(0);
1242 } else {
1243 MIB.addReg(SOffset, SOffsetRegState);
1244 }
1245 MIB.addImm(Offset + RemRegOffset)
1246 .addImm(0); // cpol
1247 if (!IsFlat)
1248 MIB.addImm(0) // tfe
1249 .addImm(0); // swz
1250 MIB.addMemOperand(NewMMO);
1251
1252 if (!IsAGPR && NeedSuperRegDef)
1253 MIB.addReg(ValueReg, RegState::ImplicitDefine);
1254
1255 if (!IsStore && TmpReg != AMDGPU::NoRegister) {
1256 MIB = BuildMI(*MBB, MI, DL, TII->get(AMDGPU::V_ACCVGPR_WRITE_B32_e64),
1257 FinalReg)
1258 .addReg(TmpReg, RegState::Kill);
1259 MIB->setAsmPrinterFlag(MachineInstr::ReloadReuse);
1260 }
1261
1262 if (NeedSuperRegImpOperand)
1263 MIB.addReg(ValueReg, RegState::Implicit | SrcDstRegState);
1264 }
1265
1266 if (ScratchOffsetRegDelta != 0) {
1267 // Subtract the offset we added to the ScratchOffset register.
1268 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_SUB_U32), SOffset)
1269 .addReg(SOffset)
1270 .addImm(ScratchOffsetRegDelta);
1271 }
1272}
1273
1274void SIRegisterInfo::buildVGPRSpillLoadStore(SGPRSpillBuilder &SB, int Index,
1275 int Offset, bool IsLoad,
1276 bool IsKill) const {
1277 // Load/store VGPR
1278 MachineFrameInfo &FrameInfo = SB.MF.getFrameInfo();
1279 assert(FrameInfo.getStackID(Index) != TargetStackID::SGPRSpill)((FrameInfo.getStackID(Index) != TargetStackID::SGPRSpill) ? static_cast
<void> (0) : __assert_fail ("FrameInfo.getStackID(Index) != TargetStackID::SGPRSpill"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 1279, __PRETTY_FUNCTION__))
;
1280
1281 Register FrameReg =
1282 FrameInfo.isFixedObjectIndex(Index) && hasBasePointer(SB.MF)
1283 ? getBaseRegister()
1284 : getFrameRegister(SB.MF);
1285
1286 Align Alignment = FrameInfo.getObjectAlign(Index);
1287 MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(SB.MF, Index);
1288 MachineMemOperand *MMO = SB.MF.getMachineMemOperand(
1289 PtrInfo, IsLoad ? MachineMemOperand::MOLoad : MachineMemOperand::MOStore,
1290 SB.EltSize, Alignment);
1291
1292 if (IsLoad) {
1293 unsigned Opc = ST.enableFlatScratch() ? AMDGPU::SCRATCH_LOAD_DWORD_SADDR
1294 : AMDGPU::BUFFER_LOAD_DWORD_OFFSET;
1295 buildSpillLoadStore(SB.MI, Opc, Index, SB.TmpVGPR, false, FrameReg,
1296 Offset * SB.EltSize, MMO, SB.RS);
1297 } else {
1298 unsigned Opc = ST.enableFlatScratch() ? AMDGPU::SCRATCH_STORE_DWORD_SADDR
1299 : AMDGPU::BUFFER_STORE_DWORD_OFFSET;
1300 buildSpillLoadStore(SB.MI, Opc, Index, SB.TmpVGPR, IsKill, FrameReg,
1301 Offset * SB.EltSize, MMO, SB.RS);
1302 // This only ever adds one VGPR spill
1303 SB.MFI.addToSpilledVGPRs(1);
1304 }
1305}
1306
1307bool SIRegisterInfo::spillSGPR(MachineBasicBlock::iterator MI,
1308 int Index,
1309 RegScavenger *RS,
1310 bool OnlyToVGPR) const {
1311 SGPRSpillBuilder SB(*this, *ST.getInstrInfo(), isWave32, MI, Index, RS);
1312
1313 ArrayRef<SIMachineFunctionInfo::SpilledReg> VGPRSpills =
1314 SB.MFI.getSGPRToVGPRSpills(Index);
1315 bool SpillToVGPR = !VGPRSpills.empty();
1316 if (OnlyToVGPR && !SpillToVGPR)
1317 return false;
1318
1319 assert(SpillToVGPR || (SB.SuperReg != SB.MFI.getStackPtrOffsetReg() &&((SpillToVGPR || (SB.SuperReg != SB.MFI.getStackPtrOffsetReg(
) && SB.SuperReg != SB.MFI.getFrameOffsetReg())) ? static_cast
<void> (0) : __assert_fail ("SpillToVGPR || (SB.SuperReg != SB.MFI.getStackPtrOffsetReg() && SB.SuperReg != SB.MFI.getFrameOffsetReg())"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 1320, __PRETTY_FUNCTION__))
1320 SB.SuperReg != SB.MFI.getFrameOffsetReg()))((SpillToVGPR || (SB.SuperReg != SB.MFI.getStackPtrOffsetReg(
) && SB.SuperReg != SB.MFI.getFrameOffsetReg())) ? static_cast
<void> (0) : __assert_fail ("SpillToVGPR || (SB.SuperReg != SB.MFI.getStackPtrOffsetReg() && SB.SuperReg != SB.MFI.getFrameOffsetReg())"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 1320, __PRETTY_FUNCTION__))
;
1321
1322 if (SpillToVGPR) {
1323 for (unsigned i = 0, e = SB.NumSubRegs; i < e; ++i) {
1324 Register SubReg =
1325 SB.NumSubRegs == 1
1326 ? SB.SuperReg
1327 : Register(getSubReg(SB.SuperReg, SB.SplitParts[i]));
1328 SIMachineFunctionInfo::SpilledReg Spill = VGPRSpills[i];
1329
1330 bool UseKill = SB.IsKill && i == SB.NumSubRegs - 1;
1331
1332 // Mark the "old value of vgpr" input undef only if this is the first sgpr
1333 // spill to this specific vgpr in the first basic block.
1334 auto MIB = BuildMI(SB.MBB, MI, SB.DL, SB.TII.get(AMDGPU::V_WRITELANE_B32),
1335 Spill.VGPR)
1336 .addReg(SubReg, getKillRegState(UseKill))
1337 .addImm(Spill.Lane)
1338 .addReg(Spill.VGPR);
1339
1340 if (i == 0 && SB.NumSubRegs > 1) {
1341 // We may be spilling a super-register which is only partially defined,
1342 // and need to ensure later spills think the value is defined.
1343 MIB.addReg(SB.SuperReg, RegState::ImplicitDefine);
1344 }
1345
1346 if (SB.NumSubRegs > 1)
1347 MIB.addReg(SB.SuperReg, getKillRegState(UseKill) | RegState::Implicit);
1348
1349 // FIXME: Since this spills to another register instead of an actual
1350 // frame index, we should delete the frame index when all references to
1351 // it are fixed.
1352 }
1353 } else {
1354 SB.prepare();
1355
1356 // SubReg carries the "Kill" flag when SubReg == SB.SuperReg.
1357 unsigned SubKillState = getKillRegState((SB.NumSubRegs == 1) && SB.IsKill);
1358
1359 // Per VGPR helper data
1360 auto PVD = SB.getPerVGPRData();
1361
1362 for (unsigned Offset = 0; Offset < PVD.NumVGPRs; ++Offset) {
1363 unsigned TmpVGPRFlags = RegState::Undef;
1364
1365 // Write sub registers into the VGPR
1366 for (unsigned i = Offset * PVD.PerVGPR,
1367 e = std::min((Offset + 1) * PVD.PerVGPR, SB.NumSubRegs);
1368 i < e; ++i) {
1369 Register SubReg =
1370 SB.NumSubRegs == 1
1371 ? SB.SuperReg
1372 : Register(getSubReg(SB.SuperReg, SB.SplitParts[i]));
1373
1374 MachineInstrBuilder WriteLane =
1375 BuildMI(SB.MBB, MI, SB.DL, SB.TII.get(AMDGPU::V_WRITELANE_B32),
1376 SB.TmpVGPR)
1377 .addReg(SubReg, SubKillState)
1378 .addImm(i % PVD.PerVGPR)
1379 .addReg(SB.TmpVGPR, TmpVGPRFlags);
1380 TmpVGPRFlags = 0;
1381
1382 // There could be undef components of a spilled super register.
1383 // TODO: Can we detect this and skip the spill?
1384 if (SB.NumSubRegs > 1) {
1385 // The last implicit use of the SB.SuperReg carries the "Kill" flag.
1386 unsigned SuperKillState = 0;
1387 if (i + 1 == SB.NumSubRegs)
1388 SuperKillState |= getKillRegState(SB.IsKill);
1389 WriteLane.addReg(SB.SuperReg, RegState::Implicit | SuperKillState);
1390 }
1391 }
1392
1393 // Write out VGPR
1394 SB.readWriteTmpVGPR(Offset, /*IsLoad*/ false);
1395 }
1396
1397 SB.restore();
1398 }
1399
1400 MI->eraseFromParent();
1401 SB.MFI.addToSpilledSGPRs(SB.NumSubRegs);
1402 return true;
1403}
1404
1405bool SIRegisterInfo::restoreSGPR(MachineBasicBlock::iterator MI,
1406 int Index,
1407 RegScavenger *RS,
1408 bool OnlyToVGPR) const {
1409 SGPRSpillBuilder SB(*this, *ST.getInstrInfo(), isWave32, MI, Index, RS);
1410
1411 ArrayRef<SIMachineFunctionInfo::SpilledReg> VGPRSpills =
1412 SB.MFI.getSGPRToVGPRSpills(Index);
1413 bool SpillToVGPR = !VGPRSpills.empty();
1414 if (OnlyToVGPR && !SpillToVGPR)
1415 return false;
1416
1417 if (SpillToVGPR) {
1418 for (unsigned i = 0, e = SB.NumSubRegs; i < e; ++i) {
1419 Register SubReg =
1420 SB.NumSubRegs == 1
1421 ? SB.SuperReg
1422 : Register(getSubReg(SB.SuperReg, SB.SplitParts[i]));
1423
1424 SIMachineFunctionInfo::SpilledReg Spill = VGPRSpills[i];
1425 auto MIB =
1426 BuildMI(SB.MBB, MI, SB.DL, SB.TII.get(AMDGPU::V_READLANE_B32), SubReg)
1427 .addReg(Spill.VGPR)
1428 .addImm(Spill.Lane);
1429 if (SB.NumSubRegs > 1 && i == 0)
1430 MIB.addReg(SB.SuperReg, RegState::ImplicitDefine);
1431 }
1432 } else {
1433 SB.prepare();
1434
1435 // Per VGPR helper data
1436 auto PVD = SB.getPerVGPRData();
1437
1438 for (unsigned Offset = 0; Offset < PVD.NumVGPRs; ++Offset) {
1439 // Load in VGPR data
1440 SB.readWriteTmpVGPR(Offset, /*IsLoad*/ true);
1441
1442 // Unpack lanes
1443 for (unsigned i = Offset * PVD.PerVGPR,
1444 e = std::min((Offset + 1) * PVD.PerVGPR, SB.NumSubRegs);
1445 i < e; ++i) {
1446 Register SubReg =
1447 SB.NumSubRegs == 1
1448 ? SB.SuperReg
1449 : Register(getSubReg(SB.SuperReg, SB.SplitParts[i]));
1450
1451 bool LastSubReg = (i + 1 == e);
1452 auto MIB = BuildMI(SB.MBB, MI, SB.DL,
1453 SB.TII.get(AMDGPU::V_READLANE_B32), SubReg)
1454 .addReg(SB.TmpVGPR, getKillRegState(LastSubReg))
1455 .addImm(i);
1456 if (SB.NumSubRegs > 1 && i == 0)
1457 MIB.addReg(SB.SuperReg, RegState::ImplicitDefine);
1458 }
1459 }
1460
1461 SB.restore();
1462 }
1463
1464 MI->eraseFromParent();
1465 return true;
1466}
1467
1468/// Special case of eliminateFrameIndex. Returns true if the SGPR was spilled to
1469/// a VGPR and the stack slot can be safely eliminated when all other users are
1470/// handled.
1471bool SIRegisterInfo::eliminateSGPRToVGPRSpillFrameIndex(
1472 MachineBasicBlock::iterator MI,
1473 int FI,
1474 RegScavenger *RS) const {
1475 switch (MI->getOpcode()) {
1476 case AMDGPU::SI_SPILL_S1024_SAVE:
1477 case AMDGPU::SI_SPILL_S512_SAVE:
1478 case AMDGPU::SI_SPILL_S256_SAVE:
1479 case AMDGPU::SI_SPILL_S192_SAVE:
1480 case AMDGPU::SI_SPILL_S160_SAVE:
1481 case AMDGPU::SI_SPILL_S128_SAVE:
1482 case AMDGPU::SI_SPILL_S96_SAVE:
1483 case AMDGPU::SI_SPILL_S64_SAVE:
1484 case AMDGPU::SI_SPILL_S32_SAVE:
1485 return spillSGPR(MI, FI, RS, true);
1486 case AMDGPU::SI_SPILL_S1024_RESTORE:
1487 case AMDGPU::SI_SPILL_S512_RESTORE:
1488 case AMDGPU::SI_SPILL_S256_RESTORE:
1489 case AMDGPU::SI_SPILL_S192_RESTORE:
1490 case AMDGPU::SI_SPILL_S160_RESTORE:
1491 case AMDGPU::SI_SPILL_S128_RESTORE:
1492 case AMDGPU::SI_SPILL_S96_RESTORE:
1493 case AMDGPU::SI_SPILL_S64_RESTORE:
1494 case AMDGPU::SI_SPILL_S32_RESTORE:
1495 return restoreSGPR(MI, FI, RS, true);
1496 default:
1497 llvm_unreachable("not an SGPR spill instruction")::llvm::llvm_unreachable_internal("not an SGPR spill instruction"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 1497)
;
1498 }
1499}
1500
1501void SIRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI,
1502 int SPAdj, unsigned FIOperandNum,
1503 RegScavenger *RS) const {
1504 MachineFunction *MF = MI->getParent()->getParent();
1505 MachineBasicBlock *MBB = MI->getParent();
1506 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
1507 MachineFrameInfo &FrameInfo = MF->getFrameInfo();
1508 const SIInstrInfo *TII = ST.getInstrInfo();
1509 DebugLoc DL = MI->getDebugLoc();
1510
1511 assert(SPAdj == 0 && "unhandled SP adjustment in call sequence?")((SPAdj == 0 && "unhandled SP adjustment in call sequence?"
) ? static_cast<void> (0) : __assert_fail ("SPAdj == 0 && \"unhandled SP adjustment in call sequence?\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 1511, __PRETTY_FUNCTION__))
;
1512
1513 MachineOperand &FIOp = MI->getOperand(FIOperandNum);
1514 int Index = MI->getOperand(FIOperandNum).getIndex();
1515
1516 Register FrameReg = FrameInfo.isFixedObjectIndex(Index) && hasBasePointer(*MF)
1517 ? getBaseRegister()
1518 : getFrameRegister(*MF);
1519
1520 switch (MI->getOpcode()) {
1521 // SGPR register spill
1522 case AMDGPU::SI_SPILL_S1024_SAVE:
1523 case AMDGPU::SI_SPILL_S512_SAVE:
1524 case AMDGPU::SI_SPILL_S256_SAVE:
1525 case AMDGPU::SI_SPILL_S192_SAVE:
1526 case AMDGPU::SI_SPILL_S160_SAVE:
1527 case AMDGPU::SI_SPILL_S128_SAVE:
1528 case AMDGPU::SI_SPILL_S96_SAVE:
1529 case AMDGPU::SI_SPILL_S64_SAVE:
1530 case AMDGPU::SI_SPILL_S32_SAVE: {
1531 spillSGPR(MI, Index, RS);
1532 break;
1533 }
1534
1535 // SGPR register restore
1536 case AMDGPU::SI_SPILL_S1024_RESTORE:
1537 case AMDGPU::SI_SPILL_S512_RESTORE:
1538 case AMDGPU::SI_SPILL_S256_RESTORE:
1539 case AMDGPU::SI_SPILL_S192_RESTORE:
1540 case AMDGPU::SI_SPILL_S160_RESTORE:
1541 case AMDGPU::SI_SPILL_S128_RESTORE:
1542 case AMDGPU::SI_SPILL_S96_RESTORE:
1543 case AMDGPU::SI_SPILL_S64_RESTORE:
1544 case AMDGPU::SI_SPILL_S32_RESTORE: {
1545 restoreSGPR(MI, Index, RS);
1546 break;
1547 }
1548
1549 // VGPR register spill
1550 case AMDGPU::SI_SPILL_V1024_SAVE:
1551 case AMDGPU::SI_SPILL_V512_SAVE:
1552 case AMDGPU::SI_SPILL_V256_SAVE:
1553 case AMDGPU::SI_SPILL_V192_SAVE:
1554 case AMDGPU::SI_SPILL_V160_SAVE:
1555 case AMDGPU::SI_SPILL_V128_SAVE:
1556 case AMDGPU::SI_SPILL_V96_SAVE:
1557 case AMDGPU::SI_SPILL_V64_SAVE:
1558 case AMDGPU::SI_SPILL_V32_SAVE:
1559 case AMDGPU::SI_SPILL_A1024_SAVE:
1560 case AMDGPU::SI_SPILL_A512_SAVE:
1561 case AMDGPU::SI_SPILL_A256_SAVE:
1562 case AMDGPU::SI_SPILL_A192_SAVE:
1563 case AMDGPU::SI_SPILL_A160_SAVE:
1564 case AMDGPU::SI_SPILL_A128_SAVE:
1565 case AMDGPU::SI_SPILL_A96_SAVE:
1566 case AMDGPU::SI_SPILL_A64_SAVE:
1567 case AMDGPU::SI_SPILL_A32_SAVE: {
1568 const MachineOperand *VData = TII->getNamedOperand(*MI,
1569 AMDGPU::OpName::vdata);
1570 assert(TII->getNamedOperand(*MI, AMDGPU::OpName::soffset)->getReg() ==((TII->getNamedOperand(*MI, AMDGPU::OpName::soffset)->getReg
() == MFI->getStackPtrOffsetReg()) ? static_cast<void>
(0) : __assert_fail ("TII->getNamedOperand(*MI, AMDGPU::OpName::soffset)->getReg() == MFI->getStackPtrOffsetReg()"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 1571, __PRETTY_FUNCTION__))
1571 MFI->getStackPtrOffsetReg())((TII->getNamedOperand(*MI, AMDGPU::OpName::soffset)->getReg
() == MFI->getStackPtrOffsetReg()) ? static_cast<void>
(0) : __assert_fail ("TII->getNamedOperand(*MI, AMDGPU::OpName::soffset)->getReg() == MFI->getStackPtrOffsetReg()"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 1571, __PRETTY_FUNCTION__))
;
1572
1573 unsigned Opc = ST.enableFlatScratch() ? AMDGPU::SCRATCH_STORE_DWORD_SADDR
1574 : AMDGPU::BUFFER_STORE_DWORD_OFFSET;
1575 buildSpillLoadStore(MI, Opc,
1576 Index,
1577 VData->getReg(), VData->isKill(),
1578 FrameReg,
1579 TII->getNamedOperand(*MI, AMDGPU::OpName::offset)->getImm(),
1580 *MI->memoperands_begin(),
1581 RS);
1582 MFI->addToSpilledVGPRs(getNumSubRegsForSpillOp(MI->getOpcode()));
1583 MI->eraseFromParent();
1584 break;
1585 }
1586 case AMDGPU::SI_SPILL_V32_RESTORE:
1587 case AMDGPU::SI_SPILL_V64_RESTORE:
1588 case AMDGPU::SI_SPILL_V96_RESTORE:
1589 case AMDGPU::SI_SPILL_V128_RESTORE:
1590 case AMDGPU::SI_SPILL_V160_RESTORE:
1591 case AMDGPU::SI_SPILL_V192_RESTORE:
1592 case AMDGPU::SI_SPILL_V256_RESTORE:
1593 case AMDGPU::SI_SPILL_V512_RESTORE:
1594 case AMDGPU::SI_SPILL_V1024_RESTORE:
1595 case AMDGPU::SI_SPILL_A32_RESTORE:
1596 case AMDGPU::SI_SPILL_A64_RESTORE:
1597 case AMDGPU::SI_SPILL_A96_RESTORE:
1598 case AMDGPU::SI_SPILL_A128_RESTORE:
1599 case AMDGPU::SI_SPILL_A160_RESTORE:
1600 case AMDGPU::SI_SPILL_A192_RESTORE:
1601 case AMDGPU::SI_SPILL_A256_RESTORE:
1602 case AMDGPU::SI_SPILL_A512_RESTORE:
1603 case AMDGPU::SI_SPILL_A1024_RESTORE: {
1604 const MachineOperand *VData = TII->getNamedOperand(*MI,
1605 AMDGPU::OpName::vdata);
1606 assert(TII->getNamedOperand(*MI, AMDGPU::OpName::soffset)->getReg() ==((TII->getNamedOperand(*MI, AMDGPU::OpName::soffset)->getReg
() == MFI->getStackPtrOffsetReg()) ? static_cast<void>
(0) : __assert_fail ("TII->getNamedOperand(*MI, AMDGPU::OpName::soffset)->getReg() == MFI->getStackPtrOffsetReg()"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 1607, __PRETTY_FUNCTION__))
1607 MFI->getStackPtrOffsetReg())((TII->getNamedOperand(*MI, AMDGPU::OpName::soffset)->getReg
() == MFI->getStackPtrOffsetReg()) ? static_cast<void>
(0) : __assert_fail ("TII->getNamedOperand(*MI, AMDGPU::OpName::soffset)->getReg() == MFI->getStackPtrOffsetReg()"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 1607, __PRETTY_FUNCTION__))
;
1608
1609 unsigned Opc = ST.enableFlatScratch() ? AMDGPU::SCRATCH_LOAD_DWORD_SADDR
1610 : AMDGPU::BUFFER_LOAD_DWORD_OFFSET;
1611 buildSpillLoadStore(MI, Opc,
1612 Index,
1613 VData->getReg(), VData->isKill(),
1614 FrameReg,
1615 TII->getNamedOperand(*MI, AMDGPU::OpName::offset)->getImm(),
1616 *MI->memoperands_begin(),
1617 RS);
1618 MI->eraseFromParent();
1619 break;
1620 }
1621
1622 default: {
1623 // Other access to frame index
1624 const DebugLoc &DL = MI->getDebugLoc();
1625
1626 int64_t Offset = FrameInfo.getObjectOffset(Index);
1627 if (ST.enableFlatScratch()) {
1628 if (TII->isFLATScratch(*MI)) {
1629 assert((int16_t)FIOperandNum ==(((int16_t)FIOperandNum == AMDGPU::getNamedOperandIdx(MI->
getOpcode(), AMDGPU::OpName::saddr)) ? static_cast<void>
(0) : __assert_fail ("(int16_t)FIOperandNum == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::saddr)"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 1631, __PRETTY_FUNCTION__))
1630 AMDGPU::getNamedOperandIdx(MI->getOpcode(),(((int16_t)FIOperandNum == AMDGPU::getNamedOperandIdx(MI->
getOpcode(), AMDGPU::OpName::saddr)) ? static_cast<void>
(0) : __assert_fail ("(int16_t)FIOperandNum == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::saddr)"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 1631, __PRETTY_FUNCTION__))
1631 AMDGPU::OpName::saddr))(((int16_t)FIOperandNum == AMDGPU::getNamedOperandIdx(MI->
getOpcode(), AMDGPU::OpName::saddr)) ? static_cast<void>
(0) : __assert_fail ("(int16_t)FIOperandNum == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::saddr)"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 1631, __PRETTY_FUNCTION__))
;
1632
1633 // The offset is always swizzled, just replace it
1634 if (FrameReg)
1635 FIOp.ChangeToRegister(FrameReg, false);
1636
1637 if (!Offset)
1638 return;
1639
1640 MachineOperand *OffsetOp =
1641 TII->getNamedOperand(*MI, AMDGPU::OpName::offset);
1642 int64_t NewOffset = Offset + OffsetOp->getImm();
1643 if (TII->isLegalFLATOffset(NewOffset, AMDGPUAS::PRIVATE_ADDRESS,
1644 SIInstrFlags::FlatScratch)) {
1645 OffsetOp->setImm(NewOffset);
1646 if (FrameReg)
1647 return;
1648 Offset = 0;
1649 }
1650
1651 assert(!TII->getNamedOperand(*MI, AMDGPU::OpName::vaddr) &&((!TII->getNamedOperand(*MI, AMDGPU::OpName::vaddr) &&
"Unexpected vaddr for flat scratch with a FI operand") ? static_cast
<void> (0) : __assert_fail ("!TII->getNamedOperand(*MI, AMDGPU::OpName::vaddr) && \"Unexpected vaddr for flat scratch with a FI operand\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 1652, __PRETTY_FUNCTION__))
1652 "Unexpected vaddr for flat scratch with a FI operand")((!TII->getNamedOperand(*MI, AMDGPU::OpName::vaddr) &&
"Unexpected vaddr for flat scratch with a FI operand") ? static_cast
<void> (0) : __assert_fail ("!TII->getNamedOperand(*MI, AMDGPU::OpName::vaddr) && \"Unexpected vaddr for flat scratch with a FI operand\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 1652, __PRETTY_FUNCTION__))
;
1653
1654 // On GFX10 we have ST mode to use no registers for an address.
1655 // Otherwise we need to materialize 0 into an SGPR.
1656 if (!Offset && ST.hasFlatScratchSTMode()) {
1657 unsigned Opc = MI->getOpcode();
1658 unsigned NewOpc = AMDGPU::getFlatScratchInstSTfromSS(Opc);
1659 MI->RemoveOperand(
1660 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::saddr));
1661 MI->setDesc(TII->get(NewOpc));
1662 return;
1663 }
1664 }
1665
1666 if (!FrameReg) {
1667 FIOp.ChangeToImmediate(Offset);
1668 if (TII->isImmOperandLegal(*MI, FIOperandNum, FIOp))
1669 return;
1670 }
1671
1672 // We need to use register here. Check if we can use an SGPR or need
1673 // a VGPR.
1674 FIOp.ChangeToRegister(AMDGPU::M0, false);
1675 bool UseSGPR = TII->isOperandLegal(*MI, FIOperandNum, &FIOp);
1676
1677 if (!Offset && FrameReg && UseSGPR) {
1678 FIOp.setReg(FrameReg);
1679 return;
1680 }
1681
1682 const TargetRegisterClass *RC = UseSGPR ? &AMDGPU::SReg_32_XM0RegClass
1683 : &AMDGPU::VGPR_32RegClass;
1684
1685 Register TmpReg = RS->scavengeRegister(RC, MI, 0, !UseSGPR);
1686 FIOp.setReg(TmpReg);
1687 FIOp.setIsKill(true);
1688
1689 if ((!FrameReg || !Offset) && TmpReg) {
1690 unsigned Opc = UseSGPR ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
1691 auto MIB = BuildMI(*MBB, MI, DL, TII->get(Opc), TmpReg);
1692 if (FrameReg)
1693 MIB.addReg(FrameReg);
1694 else
1695 MIB.addImm(Offset);
1696
1697 return;
1698 }
1699
1700 Register TmpSReg =
1701 UseSGPR ? TmpReg
1702 : RS->scavengeRegister(&AMDGPU::SReg_32_XM0RegClass, MI, 0,
1703 !UseSGPR);
1704
1705 // TODO: for flat scratch another attempt can be made with a VGPR index
1706 // if no SGPRs can be scavenged.
1707 if ((!TmpSReg && !FrameReg) || (!TmpReg && !UseSGPR))
1708 report_fatal_error("Cannot scavenge register in FI elimination!");
1709
1710 if (!TmpSReg) {
1711 // Use frame register and restore it after.
1712 TmpSReg = FrameReg;
1713 FIOp.setReg(FrameReg);
1714 FIOp.setIsKill(false);
1715 }
1716
1717 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_ADD_U32), TmpSReg)
1718 .addReg(FrameReg)
1719 .addImm(Offset);
1720
1721 if (!UseSGPR)
1722 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::V_MOV_B32_e32), TmpReg)
1723 .addReg(TmpSReg, RegState::Kill);
1724
1725 if (TmpSReg == FrameReg) {
1726 // Undo frame register modification.
1727 BuildMI(*MBB, std::next(MI), DL, TII->get(AMDGPU::S_SUB_U32),
1728 FrameReg)
1729 .addReg(FrameReg)
1730 .addImm(Offset);
1731 }
1732
1733 return;
1734 }
1735
1736 bool IsMUBUF = TII->isMUBUF(*MI);
1737
1738 if (!IsMUBUF && !MFI->isEntryFunction()) {
1739 // Convert to a swizzled stack address by scaling by the wave size.
1740 //
1741 // In an entry function/kernel the offset is already swizzled.
1742
1743 bool IsCopy = MI->getOpcode() == AMDGPU::V_MOV_B32_e32;
1744 Register ResultReg =
1745 IsCopy ? MI->getOperand(0).getReg()
1746 : RS->scavengeRegister(&AMDGPU::VGPR_32RegClass, MI, 0);
1747
1748 int64_t Offset = FrameInfo.getObjectOffset(Index);
1749 if (Offset == 0) {
1750 // XXX - This never happens because of emergency scavenging slot at 0?
1751 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::V_LSHRREV_B32_e64), ResultReg)
1752 .addImm(ST.getWavefrontSizeLog2())
1753 .addReg(FrameReg);
1754 } else {
1755 if (auto MIB = TII->getAddNoCarry(*MBB, MI, DL, ResultReg, *RS)) {
1756 // Reuse ResultReg in intermediate step.
1757 Register ScaledReg = ResultReg;
1758
1759 BuildMI(*MBB, *MIB, DL, TII->get(AMDGPU::V_LSHRREV_B32_e64),
1760 ScaledReg)
1761 .addImm(ST.getWavefrontSizeLog2())
1762 .addReg(FrameReg);
1763
1764 const bool IsVOP2 = MIB->getOpcode() == AMDGPU::V_ADD_U32_e32;
1765
1766 // TODO: Fold if use instruction is another add of a constant.
1767 if (IsVOP2 || AMDGPU::isInlinableLiteral32(Offset, ST.hasInv2PiInlineImm())) {
1768 // FIXME: This can fail
1769 MIB.addImm(Offset);
1770 MIB.addReg(ScaledReg, RegState::Kill);
1771 if (!IsVOP2)
1772 MIB.addImm(0); // clamp bit
1773 } else {
1774 assert(MIB->getOpcode() == AMDGPU::V_ADD_CO_U32_e64 &&((MIB->getOpcode() == AMDGPU::V_ADD_CO_U32_e64 && "Need to reuse carry out register"
) ? static_cast<void> (0) : __assert_fail ("MIB->getOpcode() == AMDGPU::V_ADD_CO_U32_e64 && \"Need to reuse carry out register\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 1775, __PRETTY_FUNCTION__))
1775 "Need to reuse carry out register")((MIB->getOpcode() == AMDGPU::V_ADD_CO_U32_e64 && "Need to reuse carry out register"
) ? static_cast<void> (0) : __assert_fail ("MIB->getOpcode() == AMDGPU::V_ADD_CO_U32_e64 && \"Need to reuse carry out register\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 1775, __PRETTY_FUNCTION__))
;
1776
1777 // Use scavenged unused carry out as offset register.
1778 Register ConstOffsetReg;
1779 if (!isWave32)
1780 ConstOffsetReg = getSubReg(MIB.getReg(1), AMDGPU::sub0);
1781 else
1782 ConstOffsetReg = MIB.getReg(1);
1783
1784 BuildMI(*MBB, *MIB, DL, TII->get(AMDGPU::S_MOV_B32), ConstOffsetReg)
1785 .addImm(Offset);
1786 MIB.addReg(ConstOffsetReg, RegState::Kill);
1787 MIB.addReg(ScaledReg, RegState::Kill);
1788 MIB.addImm(0); // clamp bit
1789 }
1790 } else {
1791 // We have to produce a carry out, and there isn't a free SGPR pair
1792 // for it. We can keep the whole computation on the SALU to avoid
1793 // clobbering an additional register at the cost of an extra mov.
1794
1795 // We may have 1 free scratch SGPR even though a carry out is
1796 // unavailable. Only one additional mov is needed.
1797 Register TmpScaledReg =
1798 RS->scavengeRegister(&AMDGPU::SReg_32_XM0RegClass, MI, 0, false);
1799 Register ScaledReg = TmpScaledReg.isValid() ? TmpScaledReg : FrameReg;
1800
1801 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_LSHR_B32), ScaledReg)
1802 .addReg(FrameReg)
1803 .addImm(ST.getWavefrontSizeLog2());
1804 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_ADD_U32), ScaledReg)
1805 .addReg(ScaledReg, RegState::Kill)
1806 .addImm(Offset);
1807 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::COPY), ResultReg)
1808 .addReg(ScaledReg, RegState::Kill);
1809
1810 // If there were truly no free SGPRs, we need to undo everything.
1811 if (!TmpScaledReg.isValid()) {
1812 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_SUB_U32), ScaledReg)
1813 .addReg(ScaledReg, RegState::Kill)
1814 .addImm(Offset);
1815 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_LSHL_B32), ScaledReg)
1816 .addReg(FrameReg)
1817 .addImm(ST.getWavefrontSizeLog2());
1818 }
1819 }
1820 }
1821
1822 // Don't introduce an extra copy if we're just materializing in a mov.
1823 if (IsCopy)
1824 MI->eraseFromParent();
1825 else
1826 FIOp.ChangeToRegister(ResultReg, false, false, true);
1827 return;
1828 }
1829
1830 if (IsMUBUF) {
1831 // Disable offen so we don't need a 0 vgpr base.
1832 assert(static_cast<int>(FIOperandNum) ==((static_cast<int>(FIOperandNum) == AMDGPU::getNamedOperandIdx
(MI->getOpcode(), AMDGPU::OpName::vaddr)) ? static_cast<
void> (0) : __assert_fail ("static_cast<int>(FIOperandNum) == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::vaddr)"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 1834, __PRETTY_FUNCTION__))
1833 AMDGPU::getNamedOperandIdx(MI->getOpcode(),((static_cast<int>(FIOperandNum) == AMDGPU::getNamedOperandIdx
(MI->getOpcode(), AMDGPU::OpName::vaddr)) ? static_cast<
void> (0) : __assert_fail ("static_cast<int>(FIOperandNum) == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::vaddr)"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 1834, __PRETTY_FUNCTION__))
1834 AMDGPU::OpName::vaddr))((static_cast<int>(FIOperandNum) == AMDGPU::getNamedOperandIdx
(MI->getOpcode(), AMDGPU::OpName::vaddr)) ? static_cast<
void> (0) : __assert_fail ("static_cast<int>(FIOperandNum) == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::vaddr)"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 1834, __PRETTY_FUNCTION__))
;
1835
1836 auto &SOffset = *TII->getNamedOperand(*MI, AMDGPU::OpName::soffset);
1837 assert((SOffset.isImm() && SOffset.getImm() == 0))(((SOffset.isImm() && SOffset.getImm() == 0)) ? static_cast
<void> (0) : __assert_fail ("(SOffset.isImm() && SOffset.getImm() == 0)"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 1837, __PRETTY_FUNCTION__))
;
1838
1839 if (FrameReg != AMDGPU::NoRegister)
1840 SOffset.ChangeToRegister(FrameReg, false);
1841
1842 int64_t Offset = FrameInfo.getObjectOffset(Index);
1843 int64_t OldImm
1844 = TII->getNamedOperand(*MI, AMDGPU::OpName::offset)->getImm();
1845 int64_t NewOffset = OldImm + Offset;
1846
1847 if (SIInstrInfo::isLegalMUBUFImmOffset(NewOffset) &&
1848 buildMUBUFOffsetLoadStore(ST, FrameInfo, MI, Index, NewOffset)) {
1849 MI->eraseFromParent();
1850 return;
1851 }
1852 }
1853
1854 // If the offset is simply too big, don't convert to a scratch wave offset
1855 // relative index.
1856
1857 FIOp.ChangeToImmediate(Offset);
1858 if (!TII->isImmOperandLegal(*MI, FIOperandNum, FIOp)) {
1859 Register TmpReg = RS->scavengeRegister(&AMDGPU::VGPR_32RegClass, MI, 0);
1860 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::V_MOV_B32_e32), TmpReg)
1861 .addImm(Offset);
1862 FIOp.ChangeToRegister(TmpReg, false, false, true);
1863 }
1864 }
1865 }
1866}
1867
1868StringRef SIRegisterInfo::getRegAsmName(MCRegister Reg) const {
1869 return AMDGPUInstPrinter::getRegisterName(Reg);
1870}
1871
1872static const TargetRegisterClass *
1873getAnyVGPRClassForBitWidth(unsigned BitWidth) {
1874 if (BitWidth <= 64)
1875 return &AMDGPU::VReg_64RegClass;
1876 if (BitWidth <= 96)
1877 return &AMDGPU::VReg_96RegClass;
1878 if (BitWidth <= 128)
1879 return &AMDGPU::VReg_128RegClass;
1880 if (BitWidth <= 160)
1881 return &AMDGPU::VReg_160RegClass;
1882 if (BitWidth <= 192)
1883 return &AMDGPU::VReg_192RegClass;
1884 if (BitWidth <= 256)
1885 return &AMDGPU::VReg_256RegClass;
1886 if (BitWidth <= 512)
1887 return &AMDGPU::VReg_512RegClass;
1888 if (BitWidth <= 1024)
1889 return &AMDGPU::VReg_1024RegClass;
1890
1891 return nullptr;
1892}
1893
1894static const TargetRegisterClass *
1895getAlignedVGPRClassForBitWidth(unsigned BitWidth) {
1896 if (BitWidth <= 64)
1897 return &AMDGPU::VReg_64_Align2RegClass;
1898 if (BitWidth <= 96)
1899 return &AMDGPU::VReg_96_Align2RegClass;
1900 if (BitWidth <= 128)
1901 return &AMDGPU::VReg_128_Align2RegClass;
1902 if (BitWidth <= 160)
1903 return &AMDGPU::VReg_160_Align2RegClass;
1904 if (BitWidth <= 192)
1905 return &AMDGPU::VReg_192_Align2RegClass;
1906 if (BitWidth <= 256)
1907 return &AMDGPU::VReg_256_Align2RegClass;
1908 if (BitWidth <= 512)
1909 return &AMDGPU::VReg_512_Align2RegClass;
1910 if (BitWidth <= 1024)
1911 return &AMDGPU::VReg_1024_Align2RegClass;
1912
1913 return nullptr;
1914}
1915
1916const TargetRegisterClass *
1917SIRegisterInfo::getVGPRClassForBitWidth(unsigned BitWidth) const {
1918 if (BitWidth == 1)
1919 return &AMDGPU::VReg_1RegClass;
1920 if (BitWidth <= 16)
1921 return &AMDGPU::VGPR_LO16RegClass;
1922 if (BitWidth <= 32)
1923 return &AMDGPU::VGPR_32RegClass;
1924 return ST.needsAlignedVGPRs() ? getAlignedVGPRClassForBitWidth(BitWidth)
1925 : getAnyVGPRClassForBitWidth(BitWidth);
1926}
1927
1928static const TargetRegisterClass *
1929getAnyAGPRClassForBitWidth(unsigned BitWidth) {
1930 if (BitWidth <= 64)
1931 return &AMDGPU::AReg_64RegClass;
1932 if (BitWidth <= 96)
1933 return &AMDGPU::AReg_96RegClass;
1934 if (BitWidth <= 128)
1935 return &AMDGPU::AReg_128RegClass;
1936 if (BitWidth <= 160)
1937 return &AMDGPU::AReg_160RegClass;
1938 if (BitWidth <= 192)
1939 return &AMDGPU::AReg_192RegClass;
1940 if (BitWidth <= 256)
1941 return &AMDGPU::AReg_256RegClass;
1942 if (BitWidth <= 512)
1943 return &AMDGPU::AReg_512RegClass;
1944 if (BitWidth <= 1024)
1945 return &AMDGPU::AReg_1024RegClass;
1946
1947 return nullptr;
1948}
1949
1950static const TargetRegisterClass *
1951getAlignedAGPRClassForBitWidth(unsigned BitWidth) {
1952 if (BitWidth <= 64)
1953 return &AMDGPU::AReg_64_Align2RegClass;
1954 if (BitWidth <= 96)
1955 return &AMDGPU::AReg_96_Align2RegClass;
1956 if (BitWidth <= 128)
1957 return &AMDGPU::AReg_128_Align2RegClass;
1958 if (BitWidth <= 160)
1959 return &AMDGPU::AReg_160_Align2RegClass;
1960 if (BitWidth <= 192)
1961 return &AMDGPU::AReg_192_Align2RegClass;
1962 if (BitWidth <= 256)
1963 return &AMDGPU::AReg_256_Align2RegClass;
1964 if (BitWidth <= 512)
1965 return &AMDGPU::AReg_512_Align2RegClass;
1966 if (BitWidth <= 1024)
1967 return &AMDGPU::AReg_1024_Align2RegClass;
1968
1969 return nullptr;
1970}
1971
1972const TargetRegisterClass *
1973SIRegisterInfo::getAGPRClassForBitWidth(unsigned BitWidth) const {
1974 if (BitWidth <= 16)
1975 return &AMDGPU::AGPR_LO16RegClass;
1976 if (BitWidth <= 32)
1977 return &AMDGPU::AGPR_32RegClass;
1978 return ST.needsAlignedVGPRs() ? getAlignedAGPRClassForBitWidth(BitWidth)
1979 : getAnyAGPRClassForBitWidth(BitWidth);
1980}
1981
1982const TargetRegisterClass *
1983SIRegisterInfo::getSGPRClassForBitWidth(unsigned BitWidth) {
1984 if (BitWidth <= 16)
1985 return &AMDGPU::SGPR_LO16RegClass;
1986 if (BitWidth <= 32)
1987 return &AMDGPU::SReg_32RegClass;
1988 if (BitWidth <= 64)
1989 return &AMDGPU::SReg_64RegClass;
1990 if (BitWidth <= 96)
1991 return &AMDGPU::SGPR_96RegClass;
1992 if (BitWidth <= 128)
1993 return &AMDGPU::SGPR_128RegClass;
1994 if (BitWidth <= 160)
1995 return &AMDGPU::SGPR_160RegClass;
1996 if (BitWidth <= 192)
1997 return &AMDGPU::SGPR_192RegClass;
1998 if (BitWidth <= 256)
1999 return &AMDGPU::SGPR_256RegClass;
2000 if (BitWidth <= 512)
2001 return &AMDGPU::SGPR_512RegClass;
2002 if (BitWidth <= 1024)
2003 return &AMDGPU::SGPR_1024RegClass;
2004
2005 return nullptr;
2006}
2007
2008// FIXME: This is very slow. It might be worth creating a map from physreg to
2009// register class.
2010const TargetRegisterClass *
2011SIRegisterInfo::getPhysRegClass(MCRegister Reg) const {
2012 static const TargetRegisterClass *const BaseClasses[] = {
2013 &AMDGPU::VGPR_LO16RegClass,
2014 &AMDGPU::VGPR_HI16RegClass,
2015 &AMDGPU::SReg_LO16RegClass,
2016 &AMDGPU::AGPR_LO16RegClass,
2017 &AMDGPU::VGPR_32RegClass,
2018 &AMDGPU::SReg_32RegClass,
2019 &AMDGPU::AGPR_32RegClass,
2020 &AMDGPU::AGPR_32RegClass,
2021 &AMDGPU::VReg_64_Align2RegClass,
2022 &AMDGPU::VReg_64RegClass,
2023 &AMDGPU::SReg_64RegClass,
2024 &AMDGPU::AReg_64_Align2RegClass,
2025 &AMDGPU::AReg_64RegClass,
2026 &AMDGPU::VReg_96_Align2RegClass,
2027 &AMDGPU::VReg_96RegClass,
2028 &AMDGPU::SReg_96RegClass,
2029 &AMDGPU::AReg_96_Align2RegClass,
2030 &AMDGPU::AReg_96RegClass,
2031 &AMDGPU::VReg_128_Align2RegClass,
2032 &AMDGPU::VReg_128RegClass,
2033 &AMDGPU::SReg_128RegClass,
2034 &AMDGPU::AReg_128_Align2RegClass,
2035 &AMDGPU::AReg_128RegClass,
2036 &AMDGPU::VReg_160_Align2RegClass,
2037 &AMDGPU::VReg_160RegClass,
2038 &AMDGPU::SReg_160RegClass,
2039 &AMDGPU::AReg_160_Align2RegClass,
2040 &AMDGPU::AReg_160RegClass,
2041 &AMDGPU::VReg_192_Align2RegClass,
2042 &AMDGPU::VReg_192RegClass,
2043 &AMDGPU::SReg_192RegClass,
2044 &AMDGPU::AReg_192_Align2RegClass,
2045 &AMDGPU::AReg_192RegClass,
2046 &AMDGPU::VReg_256_Align2RegClass,
2047 &AMDGPU::VReg_256RegClass,
2048 &AMDGPU::SReg_256RegClass,
2049 &AMDGPU::AReg_256_Align2RegClass,
2050 &AMDGPU::AReg_256RegClass,
2051 &AMDGPU::VReg_512_Align2RegClass,
2052 &AMDGPU::VReg_512RegClass,
2053 &AMDGPU::SReg_512RegClass,
2054 &AMDGPU::AReg_512_Align2RegClass,
2055 &AMDGPU::AReg_512RegClass,
2056 &AMDGPU::SReg_1024RegClass,
2057 &AMDGPU::VReg_1024_Align2RegClass,
2058 &AMDGPU::VReg_1024RegClass,
2059 &AMDGPU::AReg_1024_Align2RegClass,
2060 &AMDGPU::AReg_1024RegClass,
2061 &AMDGPU::SCC_CLASSRegClass,
2062 &AMDGPU::Pseudo_SReg_32RegClass,
2063 &AMDGPU::Pseudo_SReg_128RegClass,
2064 };
2065
2066 for (const TargetRegisterClass *BaseClass : BaseClasses) {
2067 if (BaseClass->contains(Reg)) {
2068 return BaseClass;
2069 }
2070 }
2071 return nullptr;
2072}
2073
2074bool SIRegisterInfo::isSGPRReg(const MachineRegisterInfo &MRI,
2075 Register Reg) const {
2076 const TargetRegisterClass *RC;
2077 if (Reg.isVirtual())
2078 RC = MRI.getRegClass(Reg);
2079 else
2080 RC = getPhysRegClass(Reg);
2081 return isSGPRClass(RC);
2082}
2083
2084// TODO: It might be helpful to have some target specific flags in
2085// TargetRegisterClass to mark which classes are VGPRs to make this trivial.
2086bool SIRegisterInfo::hasVGPRs(const TargetRegisterClass *RC) const {
2087 unsigned Size = getRegSizeInBits(*RC);
2088 if (Size == 16) {
2089 return getCommonSubClass(&AMDGPU::VGPR_LO16RegClass, RC) != nullptr ||
2090 getCommonSubClass(&AMDGPU::VGPR_HI16RegClass, RC) != nullptr;
2091 }
2092 const TargetRegisterClass *VRC = getVGPRClassForBitWidth(Size);
2093 if (!VRC) {
2094 assert(Size < 32 && "Invalid register class size")((Size < 32 && "Invalid register class size") ? static_cast
<void> (0) : __assert_fail ("Size < 32 && \"Invalid register class size\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 2094, __PRETTY_FUNCTION__))
;
2095 return false;
2096 }
2097 return getCommonSubClass(VRC, RC) != nullptr;
2098}
2099
2100bool SIRegisterInfo::hasAGPRs(const TargetRegisterClass *RC) const {
2101 unsigned Size = getRegSizeInBits(*RC);
2102 if (Size < 16)
2103 return false;
2104 const TargetRegisterClass *ARC = getAGPRClassForBitWidth(Size);
2105 if (!ARC) {
2106 assert(getVGPRClassForBitWidth(Size) && "Invalid register class size")((getVGPRClassForBitWidth(Size) && "Invalid register class size"
) ? static_cast<void> (0) : __assert_fail ("getVGPRClassForBitWidth(Size) && \"Invalid register class size\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 2106, __PRETTY_FUNCTION__))
;
2107 return false;
2108 }
2109 return getCommonSubClass(ARC, RC) != nullptr;
2110}
2111
2112const TargetRegisterClass *
2113SIRegisterInfo::getEquivalentVGPRClass(const TargetRegisterClass *SRC) const {
2114 unsigned Size = getRegSizeInBits(*SRC);
2115 const TargetRegisterClass *VRC = getVGPRClassForBitWidth(Size);
2116 assert(VRC && "Invalid register class size")((VRC && "Invalid register class size") ? static_cast
<void> (0) : __assert_fail ("VRC && \"Invalid register class size\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 2116, __PRETTY_FUNCTION__))
;
2117 return VRC;
2118}
2119
2120const TargetRegisterClass *
2121SIRegisterInfo::getEquivalentAGPRClass(const TargetRegisterClass *SRC) const {
2122 unsigned Size = getRegSizeInBits(*SRC);
2123 const TargetRegisterClass *ARC = getAGPRClassForBitWidth(Size);
2124 assert(ARC && "Invalid register class size")((ARC && "Invalid register class size") ? static_cast
<void> (0) : __assert_fail ("ARC && \"Invalid register class size\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 2124, __PRETTY_FUNCTION__))
;
2125 return ARC;
2126}
2127
2128const TargetRegisterClass *
2129SIRegisterInfo::getEquivalentSGPRClass(const TargetRegisterClass *VRC) const {
2130 unsigned Size = getRegSizeInBits(*VRC);
2131 if (Size == 32)
2132 return &AMDGPU::SGPR_32RegClass;
2133 const TargetRegisterClass *SRC = getSGPRClassForBitWidth(Size);
2134 assert(SRC && "Invalid register class size")((SRC && "Invalid register class size") ? static_cast
<void> (0) : __assert_fail ("SRC && \"Invalid register class size\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 2134, __PRETTY_FUNCTION__))
;
2135 return SRC;
2136}
2137
2138const TargetRegisterClass *SIRegisterInfo::getSubRegClass(
2139 const TargetRegisterClass *RC, unsigned SubIdx) const {
2140 if (SubIdx == AMDGPU::NoSubRegister)
2141 return RC;
2142
2143 // We can assume that each lane corresponds to one 32-bit register.
2144 unsigned Size = getNumChannelsFromSubReg(SubIdx) * 32;
2145 if (isSGPRClass(RC)) {
2146 if (Size == 32)
2147 RC = &AMDGPU::SGPR_32RegClass;
2148 else
2149 RC = getSGPRClassForBitWidth(Size);
2150 } else if (hasAGPRs(RC)) {
2151 RC = getAGPRClassForBitWidth(Size);
2152 } else {
2153 RC = getVGPRClassForBitWidth(Size);
2154 }
2155 assert(RC && "Invalid sub-register class size")((RC && "Invalid sub-register class size") ? static_cast
<void> (0) : __assert_fail ("RC && \"Invalid sub-register class size\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 2155, __PRETTY_FUNCTION__))
;
2156 return RC;
2157}
2158
2159const TargetRegisterClass *
2160SIRegisterInfo::getCompatibleSubRegClass(const TargetRegisterClass *SuperRC,
2161 const TargetRegisterClass *SubRC,
2162 unsigned SubIdx) const {
2163 // Ensure this subregister index is aligned in the super register.
2164 const TargetRegisterClass *MatchRC =
2165 getMatchingSuperRegClass(SuperRC, SubRC, SubIdx);
2166 return MatchRC && MatchRC->hasSubClassEq(SuperRC) ? MatchRC : nullptr;
2167}
2168
2169bool SIRegisterInfo::opCanUseInlineConstant(unsigned OpType) const {
2170 if (OpType >= AMDGPU::OPERAND_REG_INLINE_AC_FIRST &&
2171 OpType <= AMDGPU::OPERAND_REG_INLINE_AC_LAST)
2172 return !ST.hasMFMAInlineLiteralBug();
2173
2174 return OpType >= AMDGPU::OPERAND_SRC_FIRST &&
2175 OpType <= AMDGPU::OPERAND_SRC_LAST;
2176}
2177
2178bool SIRegisterInfo::shouldRewriteCopySrc(
2179 const TargetRegisterClass *DefRC,
2180 unsigned DefSubReg,
2181 const TargetRegisterClass *SrcRC,
2182 unsigned SrcSubReg) const {
2183 // We want to prefer the smallest register class possible, so we don't want to
2184 // stop and rewrite on anything that looks like a subregister
2185 // extract. Operations mostly don't care about the super register class, so we
2186 // only want to stop on the most basic of copies between the same register
2187 // class.
2188 //
2189 // e.g. if we have something like
2190 // %0 = ...
2191 // %1 = ...
2192 // %2 = REG_SEQUENCE %0, sub0, %1, sub1, %2, sub2
2193 // %3 = COPY %2, sub0
2194 //
2195 // We want to look through the COPY to find:
2196 // => %3 = COPY %0
2197
2198 // Plain copy.
2199 return getCommonSubClass(DefRC, SrcRC) != nullptr;
2200}
2201
2202bool SIRegisterInfo::opCanUseLiteralConstant(unsigned OpType) const {
2203 // TODO: 64-bit operands have extending behavior from 32-bit literal.
2204 return OpType >= AMDGPU::OPERAND_REG_IMM_FIRST &&
2205 OpType <= AMDGPU::OPERAND_REG_IMM_LAST;
2206}
2207
2208/// Returns a lowest register that is not used at any point in the function.
2209/// If all registers are used, then this function will return
2210/// AMDGPU::NoRegister. If \p ReserveHighestVGPR = true, then return
2211/// highest unused register.
2212MCRegister SIRegisterInfo::findUnusedRegister(const MachineRegisterInfo &MRI,
2213 const TargetRegisterClass *RC,
2214 const MachineFunction &MF,
2215 bool ReserveHighestVGPR) const {
2216 if (ReserveHighestVGPR) {
2217 for (MCRegister Reg : reverse(*RC))
2218 if (MRI.isAllocatable(Reg) && !MRI.isPhysRegUsed(Reg))
2219 return Reg;
2220 } else {
2221 for (MCRegister Reg : *RC)
2222 if (MRI.isAllocatable(Reg) && !MRI.isPhysRegUsed(Reg))
2223 return Reg;
2224 }
2225 return MCRegister();
2226}
2227
2228ArrayRef<int16_t> SIRegisterInfo::getRegSplitParts(const TargetRegisterClass *RC,
2229 unsigned EltSize) const {
2230 const unsigned RegBitWidth = AMDGPU::getRegBitWidth(*RC->MC);
2231 assert(RegBitWidth >= 32 && RegBitWidth <= 1024)((RegBitWidth >= 32 && RegBitWidth <= 1024) ? static_cast
<void> (0) : __assert_fail ("RegBitWidth >= 32 && RegBitWidth <= 1024"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 2231, __PRETTY_FUNCTION__))
;
2232
2233 const unsigned RegDWORDs = RegBitWidth / 32;
2234 const unsigned EltDWORDs = EltSize / 4;
2235 assert(RegSplitParts.size() + 1 >= EltDWORDs)((RegSplitParts.size() + 1 >= EltDWORDs) ? static_cast<
void> (0) : __assert_fail ("RegSplitParts.size() + 1 >= EltDWORDs"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 2235, __PRETTY_FUNCTION__))
;
2236
2237 const std::vector<int16_t> &Parts = RegSplitParts[EltDWORDs - 1];
2238 const unsigned NumParts = RegDWORDs / EltDWORDs;
2239
2240 return makeArrayRef(Parts.data(), NumParts);
2241}
2242
2243const TargetRegisterClass*
2244SIRegisterInfo::getRegClassForReg(const MachineRegisterInfo &MRI,
2245 Register Reg) const {
2246 return Reg.isVirtual() ? MRI.getRegClass(Reg) : getPhysRegClass(Reg);
2247}
2248
2249bool SIRegisterInfo::isVGPR(const MachineRegisterInfo &MRI,
2250 Register Reg) const {
2251 const TargetRegisterClass *RC = getRegClassForReg(MRI, Reg);
2252 // Registers without classes are unaddressable, SGPR-like registers.
2253 return RC && hasVGPRs(RC);
2254}
2255
2256bool SIRegisterInfo::isAGPR(const MachineRegisterInfo &MRI,
2257 Register Reg) const {
2258 const TargetRegisterClass *RC = getRegClassForReg(MRI, Reg);
2259
2260 // Registers without classes are unaddressable, SGPR-like registers.
2261 return RC && hasAGPRs(RC);
2262}
2263
2264bool SIRegisterInfo::shouldCoalesce(MachineInstr *MI,
2265 const TargetRegisterClass *SrcRC,
2266 unsigned SubReg,
2267 const TargetRegisterClass *DstRC,
2268 unsigned DstSubReg,
2269 const TargetRegisterClass *NewRC,
2270 LiveIntervals &LIS) const {
2271 unsigned SrcSize = getRegSizeInBits(*SrcRC);
2272 unsigned DstSize = getRegSizeInBits(*DstRC);
2273 unsigned NewSize = getRegSizeInBits(*NewRC);
2274
2275 // Do not increase size of registers beyond dword, we would need to allocate
2276 // adjacent registers and constraint regalloc more than needed.
2277
2278 // Always allow dword coalescing.
2279 if (SrcSize <= 32 || DstSize <= 32)
2280 return true;
2281
2282 return NewSize <= DstSize || NewSize <= SrcSize;
2283}
2284
2285unsigned SIRegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC,
2286 MachineFunction &MF) const {
2287 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
2288
2289 unsigned Occupancy = ST.getOccupancyWithLocalMemSize(MFI->getLDSSize(),
2290 MF.getFunction());
2291 switch (RC->getID()) {
2292 default:
2293 return AMDGPUGenRegisterInfo::getRegPressureLimit(RC, MF);
2294 case AMDGPU::VGPR_32RegClassID:
2295 case AMDGPU::VGPR_LO16RegClassID:
2296 case AMDGPU::VGPR_HI16RegClassID:
2297 return std::min(ST.getMaxNumVGPRs(Occupancy), ST.getMaxNumVGPRs(MF));
2298 case AMDGPU::SGPR_32RegClassID:
2299 case AMDGPU::SGPR_LO16RegClassID:
2300 return std::min(ST.getMaxNumSGPRs(Occupancy, true), ST.getMaxNumSGPRs(MF));
2301 }
2302}
2303
2304unsigned SIRegisterInfo::getRegPressureSetLimit(const MachineFunction &MF,
2305 unsigned Idx) const {
2306 if (Idx == AMDGPU::RegisterPressureSets::VGPR_32 ||
2307 Idx == AMDGPU::RegisterPressureSets::AGPR_32)
2308 return getRegPressureLimit(&AMDGPU::VGPR_32RegClass,
2309 const_cast<MachineFunction &>(MF));
2310
2311 if (Idx == AMDGPU::RegisterPressureSets::SReg_32)
2312 return getRegPressureLimit(&AMDGPU::SGPR_32RegClass,
2313 const_cast<MachineFunction &>(MF));
2314
2315 llvm_unreachable("Unexpected register pressure set!")::llvm::llvm_unreachable_internal("Unexpected register pressure set!"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 2315)
;
2316}
2317
2318const int *SIRegisterInfo::getRegUnitPressureSets(unsigned RegUnit) const {
2319 static const int Empty[] = { -1 };
2320
2321 if (RegPressureIgnoredUnits[RegUnit])
2322 return Empty;
2323
2324 return AMDGPUGenRegisterInfo::getRegUnitPressureSets(RegUnit);
2325}
2326
2327MCRegister SIRegisterInfo::getReturnAddressReg(const MachineFunction &MF) const {
2328 // Not a callee saved register.
2329 return AMDGPU::SGPR30_SGPR31;
2330}
2331
2332const TargetRegisterClass *
2333SIRegisterInfo::getRegClassForSizeOnBank(unsigned Size,
2334 const RegisterBank &RB,
2335 const MachineRegisterInfo &MRI) const {
2336 switch (RB.getID()) {
2337 case AMDGPU::VGPRRegBankID:
2338 return getVGPRClassForBitWidth(std::max(32u, Size));
2339 case AMDGPU::VCCRegBankID:
2340 assert(Size == 1)((Size == 1) ? static_cast<void> (0) : __assert_fail ("Size == 1"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 2340, __PRETTY_FUNCTION__))
;
2341 return isWave32 ? &AMDGPU::SReg_32_XM0_XEXECRegClass
2342 : &AMDGPU::SReg_64_XEXECRegClass;
2343 case AMDGPU::SGPRRegBankID:
2344 return getSGPRClassForBitWidth(std::max(32u, Size));
2345 case AMDGPU::AGPRRegBankID:
2346 return getAGPRClassForBitWidth(std::max(32u, Size));
2347 default:
2348 llvm_unreachable("unknown register bank")::llvm::llvm_unreachable_internal("unknown register bank", "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 2348)
;
2349 }
2350}
2351
2352const TargetRegisterClass *
2353SIRegisterInfo::getConstrainedRegClassForOperand(const MachineOperand &MO,
2354 const MachineRegisterInfo &MRI) const {
2355 const RegClassOrRegBank &RCOrRB = MRI.getRegClassOrRegBank(MO.getReg());
2356 if (const RegisterBank *RB = RCOrRB.dyn_cast<const RegisterBank*>())
2357 return getRegClassForTypeOnBank(MRI.getType(MO.getReg()), *RB, MRI);
2358
2359 const TargetRegisterClass *RC = RCOrRB.get<const TargetRegisterClass*>();
2360 return getAllocatableClass(RC);
2361}
2362
2363MCRegister SIRegisterInfo::getVCC() const {
2364 return isWave32 ? AMDGPU::VCC_LO : AMDGPU::VCC;
2365}
2366
2367const TargetRegisterClass *SIRegisterInfo::getVGPR64Class() const {
2368 // VGPR tuples have an alignment requirement on gfx90a variants.
2369 return ST.needsAlignedVGPRs() ? &AMDGPU::VReg_64_Align2RegClass
2370 : &AMDGPU::VReg_64RegClass;
2371}
2372
2373const TargetRegisterClass *
2374SIRegisterInfo::getRegClass(unsigned RCID) const {
2375 switch ((int)RCID) {
2376 case AMDGPU::SReg_1RegClassID:
2377 return getBoolRC();
2378 case AMDGPU::SReg_1_XEXECRegClassID:
2379 return isWave32 ? &AMDGPU::SReg_32_XM0_XEXECRegClass
2380 : &AMDGPU::SReg_64_XEXECRegClass;
2381 case -1:
2382 return nullptr;
2383 default:
2384 return AMDGPUGenRegisterInfo::getRegClass(RCID);
2385 }
2386}
2387
2388// Find reaching register definition
2389MachineInstr *SIRegisterInfo::findReachingDef(Register Reg, unsigned SubReg,
2390 MachineInstr &Use,
2391 MachineRegisterInfo &MRI,
2392 LiveIntervals *LIS) const {
2393 auto &MDT = LIS->getAnalysis<MachineDominatorTree>();
2394 SlotIndex UseIdx = LIS->getInstructionIndex(Use);
2395 SlotIndex DefIdx;
2396
2397 if (Reg.isVirtual()) {
2398 if (!LIS->hasInterval(Reg))
2399 return nullptr;
2400 LiveInterval &LI = LIS->getInterval(Reg);
2401 LaneBitmask SubLanes = SubReg ? getSubRegIndexLaneMask(SubReg)
2402 : MRI.getMaxLaneMaskForVReg(Reg);
2403 VNInfo *V = nullptr;
2404 if (LI.hasSubRanges()) {
2405 for (auto &S : LI.subranges()) {
2406 if ((S.LaneMask & SubLanes) == SubLanes) {
2407 V = S.getVNInfoAt(UseIdx);
2408 break;
2409 }
2410 }
2411 } else {
2412 V = LI.getVNInfoAt(UseIdx);
2413 }
2414 if (!V)
2415 return nullptr;
2416 DefIdx = V->def;
2417 } else {
2418 // Find last def.
2419 for (MCRegUnitIterator Units(Reg.asMCReg(), this); Units.isValid();
2420 ++Units) {
2421 LiveRange &LR = LIS->getRegUnit(*Units);
2422 if (VNInfo *V = LR.getVNInfoAt(UseIdx)) {
2423 if (!DefIdx.isValid() ||
2424 MDT.dominates(LIS->getInstructionFromIndex(DefIdx),
2425 LIS->getInstructionFromIndex(V->def)))
2426 DefIdx = V->def;
2427 } else {
2428 return nullptr;
2429 }
2430 }
2431 }
2432
2433 MachineInstr *Def = LIS->getInstructionFromIndex(DefIdx);
2434
2435 if (!Def || !MDT.dominates(Def, &Use))
2436 return nullptr;
2437
2438 assert(Def->modifiesRegister(Reg, this))((Def->modifiesRegister(Reg, this)) ? static_cast<void>
(0) : __assert_fail ("Def->modifiesRegister(Reg, this)", "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 2438, __PRETTY_FUNCTION__))
;
2439
2440 return Def;
2441}
2442
2443MCPhysReg SIRegisterInfo::get32BitRegister(MCPhysReg Reg) const {
2444 assert(getRegSizeInBits(*getPhysRegClass(Reg)) <= 32)((getRegSizeInBits(*getPhysRegClass(Reg)) <= 32) ? static_cast
<void> (0) : __assert_fail ("getRegSizeInBits(*getPhysRegClass(Reg)) <= 32"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp"
, 2444, __PRETTY_FUNCTION__))
;
2445
2446 for (const TargetRegisterClass &RC : { AMDGPU::VGPR_32RegClass,
2447 AMDGPU::SReg_32RegClass,
2448 AMDGPU::AGPR_32RegClass } ) {
2449 if (MCPhysReg Super = getMatchingSuperReg(Reg, AMDGPU::lo16, &RC))
2450 return Super;
2451 }
2452 if (MCPhysReg Super = getMatchingSuperReg(Reg, AMDGPU::hi16,
2453 &AMDGPU::VGPR_32RegClass)) {
2454 return Super;
2455 }
2456
2457 return AMDGPU::NoRegister;
2458}
2459
2460bool SIRegisterInfo::isProperlyAlignedRC(const TargetRegisterClass &RC) const {
2461 if (!ST.needsAlignedVGPRs())
2462 return true;
2463
2464 if (hasVGPRs(&RC))
2465 return RC.hasSuperClassEq(getVGPRClassForBitWidth(getRegSizeInBits(RC)));
2466 if (hasAGPRs(&RC))
2467 return RC.hasSuperClassEq(getAGPRClassForBitWidth(getRegSizeInBits(RC)));
2468
2469 return true;
2470}
2471
2472bool SIRegisterInfo::isConstantPhysReg(MCRegister PhysReg) const {
2473 switch (PhysReg) {
2474 case AMDGPU::SGPR_NULL:
2475 case AMDGPU::SRC_SHARED_BASE:
2476 case AMDGPU::SRC_PRIVATE_BASE:
2477 case AMDGPU::SRC_SHARED_LIMIT:
2478 case AMDGPU::SRC_PRIVATE_LIMIT:
2479 return true;
2480 default:
2481 return false;
2482 }
2483}
2484
2485ArrayRef<MCPhysReg>
2486SIRegisterInfo::getAllSGPR128(const MachineFunction &MF) const {
2487 return makeArrayRef(AMDGPU::SGPR_128RegClass.begin(),
2488 ST.getMaxNumSGPRs(MF) / 4);
2489}
2490
2491ArrayRef<MCPhysReg>
2492SIRegisterInfo::getAllSGPR64(const MachineFunction &MF) const {
2493 return makeArrayRef(AMDGPU::SGPR_64RegClass.begin(),
2494 ST.getMaxNumSGPRs(MF) / 2);
2495}
2496
2497ArrayRef<MCPhysReg>
2498SIRegisterInfo::getAllSGPR32(const MachineFunction &MF) const {
2499 return makeArrayRef(AMDGPU::SGPR_32RegClass.begin(), ST.getMaxNumSGPRs(MF));
2500}