File: | llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp |
Warning: | line 897, column 5 Value stored to 'Desc' is never read |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | //===-- SIRegisterInfo.cpp - SI Register Information ---------------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | /// \file |
10 | /// SI implementation of the TargetRegisterInfo class. |
11 | // |
12 | //===----------------------------------------------------------------------===// |
13 | |
14 | #include "SIRegisterInfo.h" |
15 | #include "AMDGPU.h" |
16 | #include "AMDGPURegisterBankInfo.h" |
17 | #include "GCNSubtarget.h" |
18 | #include "MCTargetDesc/AMDGPUInstPrinter.h" |
19 | #include "MCTargetDesc/AMDGPUMCTargetDesc.h" |
20 | #include "SIMachineFunctionInfo.h" |
21 | #include "llvm/CodeGen/LiveIntervals.h" |
22 | #include "llvm/CodeGen/MachineDominators.h" |
23 | #include "llvm/CodeGen/RegisterScavenging.h" |
24 | |
25 | using namespace llvm; |
26 | |
27 | #define GET_REGINFO_TARGET_DESC |
28 | #include "AMDGPUGenRegisterInfo.inc" |
29 | |
30 | static cl::opt<bool> EnableSpillSGPRToVGPR( |
31 | "amdgpu-spill-sgpr-to-vgpr", |
32 | cl::desc("Enable spilling VGPRs to SGPRs"), |
33 | cl::ReallyHidden, |
34 | cl::init(true)); |
35 | |
36 | std::array<std::vector<int16_t>, 16> SIRegisterInfo::RegSplitParts; |
37 | std::array<std::array<uint16_t, 32>, 9> SIRegisterInfo::SubRegFromChannelTable; |
38 | |
39 | // Map numbers of DWORDs to indexes in SubRegFromChannelTable. |
40 | // Valid indexes are shifted 1, such that a 0 mapping means unsupported. |
41 | // e.g. for 8 DWORDs (256-bit), SubRegFromChannelTableWidthMap[8] = 8, |
42 | // meaning index 7 in SubRegFromChannelTable. |
43 | static const std::array<unsigned, 17> SubRegFromChannelTableWidthMap = { |
44 | 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 0, 0, 0, 0, 0, 0, 9}; |
45 | |
46 | SIRegisterInfo::SIRegisterInfo(const GCNSubtarget &ST) |
47 | : AMDGPUGenRegisterInfo(AMDGPU::PC_REG, ST.getAMDGPUDwarfFlavour()), ST(ST), |
48 | SpillSGPRToVGPR(EnableSpillSGPRToVGPR), isWave32(ST.isWave32()) { |
49 | |
50 | assert(getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() == 3 &&((getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() == 3 && getSubRegIndexLaneMask(AMDGPU::sub31).getAsInteger() == (3ULL << 62) && (getSubRegIndexLaneMask(AMDGPU::lo16 ) | getSubRegIndexLaneMask(AMDGPU::hi16)).getAsInteger() == getSubRegIndexLaneMask (AMDGPU::sub0).getAsInteger() && "getNumCoveredRegs() will not work with generated subreg masks!" ) ? static_cast<void> (0) : __assert_fail ("getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() == 3 && getSubRegIndexLaneMask(AMDGPU::sub31).getAsInteger() == (3ULL << 62) && (getSubRegIndexLaneMask(AMDGPU::lo16) | getSubRegIndexLaneMask(AMDGPU::hi16)).getAsInteger() == getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() && \"getNumCoveredRegs() will not work with generated subreg masks!\"" , "/build/llvm-toolchain-snapshot-13~++20210405022414+5f57793c4fe4/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp" , 55, __PRETTY_FUNCTION__)) |
51 | getSubRegIndexLaneMask(AMDGPU::sub31).getAsInteger() == (3ULL << 62) &&((getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() == 3 && getSubRegIndexLaneMask(AMDGPU::sub31).getAsInteger() == (3ULL << 62) && (getSubRegIndexLaneMask(AMDGPU::lo16 ) | getSubRegIndexLaneMask(AMDGPU::hi16)).getAsInteger() == getSubRegIndexLaneMask (AMDGPU::sub0).getAsInteger() && "getNumCoveredRegs() will not work with generated subreg masks!" ) ? static_cast<void> (0) : __assert_fail ("getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() == 3 && getSubRegIndexLaneMask(AMDGPU::sub31).getAsInteger() == (3ULL << 62) && (getSubRegIndexLaneMask(AMDGPU::lo16) | getSubRegIndexLaneMask(AMDGPU::hi16)).getAsInteger() == getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() && \"getNumCoveredRegs() will not work with generated subreg masks!\"" , "/build/llvm-toolchain-snapshot-13~++20210405022414+5f57793c4fe4/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp" , 55, __PRETTY_FUNCTION__)) |
52 | (getSubRegIndexLaneMask(AMDGPU::lo16) |((getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() == 3 && getSubRegIndexLaneMask(AMDGPU::sub31).getAsInteger() == (3ULL << 62) && (getSubRegIndexLaneMask(AMDGPU::lo16 ) | getSubRegIndexLaneMask(AMDGPU::hi16)).getAsInteger() == getSubRegIndexLaneMask (AMDGPU::sub0).getAsInteger() && "getNumCoveredRegs() will not work with generated subreg masks!" ) ? static_cast<void> (0) : __assert_fail ("getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() == 3 && getSubRegIndexLaneMask(AMDGPU::sub31).getAsInteger() == (3ULL << 62) && (getSubRegIndexLaneMask(AMDGPU::lo16) | getSubRegIndexLaneMask(AMDGPU::hi16)).getAsInteger() == getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() && \"getNumCoveredRegs() will not work with generated subreg masks!\"" , "/build/llvm-toolchain-snapshot-13~++20210405022414+5f57793c4fe4/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp" , 55, __PRETTY_FUNCTION__)) |
53 | getSubRegIndexLaneMask(AMDGPU::hi16)).getAsInteger() ==((getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() == 3 && getSubRegIndexLaneMask(AMDGPU::sub31).getAsInteger() == (3ULL << 62) && (getSubRegIndexLaneMask(AMDGPU::lo16 ) | getSubRegIndexLaneMask(AMDGPU::hi16)).getAsInteger() == getSubRegIndexLaneMask (AMDGPU::sub0).getAsInteger() && "getNumCoveredRegs() will not work with generated subreg masks!" ) ? static_cast<void> (0) : __assert_fail ("getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() == 3 && getSubRegIndexLaneMask(AMDGPU::sub31).getAsInteger() == (3ULL << 62) && (getSubRegIndexLaneMask(AMDGPU::lo16) | getSubRegIndexLaneMask(AMDGPU::hi16)).getAsInteger() == getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() && \"getNumCoveredRegs() will not work with generated subreg masks!\"" , "/build/llvm-toolchain-snapshot-13~++20210405022414+5f57793c4fe4/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp" , 55, __PRETTY_FUNCTION__)) |
54 | getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() &&((getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() == 3 && getSubRegIndexLaneMask(AMDGPU::sub31).getAsInteger() == (3ULL << 62) && (getSubRegIndexLaneMask(AMDGPU::lo16 ) | getSubRegIndexLaneMask(AMDGPU::hi16)).getAsInteger() == getSubRegIndexLaneMask (AMDGPU::sub0).getAsInteger() && "getNumCoveredRegs() will not work with generated subreg masks!" ) ? static_cast<void> (0) : __assert_fail ("getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() == 3 && getSubRegIndexLaneMask(AMDGPU::sub31).getAsInteger() == (3ULL << 62) && (getSubRegIndexLaneMask(AMDGPU::lo16) | getSubRegIndexLaneMask(AMDGPU::hi16)).getAsInteger() == getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() && \"getNumCoveredRegs() will not work with generated subreg masks!\"" , "/build/llvm-toolchain-snapshot-13~++20210405022414+5f57793c4fe4/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp" , 55, __PRETTY_FUNCTION__)) |
55 | "getNumCoveredRegs() will not work with generated subreg masks!")((getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() == 3 && getSubRegIndexLaneMask(AMDGPU::sub31).getAsInteger() == (3ULL << 62) && (getSubRegIndexLaneMask(AMDGPU::lo16 ) | getSubRegIndexLaneMask(AMDGPU::hi16)).getAsInteger() == getSubRegIndexLaneMask (AMDGPU::sub0).getAsInteger() && "getNumCoveredRegs() will not work with generated subreg masks!" ) ? static_cast<void> (0) : __assert_fail ("getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() == 3 && getSubRegIndexLaneMask(AMDGPU::sub31).getAsInteger() == (3ULL << 62) && (getSubRegIndexLaneMask(AMDGPU::lo16) | getSubRegIndexLaneMask(AMDGPU::hi16)).getAsInteger() == getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() && \"getNumCoveredRegs() will not work with generated subreg masks!\"" , "/build/llvm-toolchain-snapshot-13~++20210405022414+5f57793c4fe4/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp" , 55, __PRETTY_FUNCTION__)); |
56 | |
57 | RegPressureIgnoredUnits.resize(getNumRegUnits()); |
58 | RegPressureIgnoredUnits.set( |
59 | *MCRegUnitIterator(MCRegister::from(AMDGPU::M0), this)); |
60 | for (auto Reg : AMDGPU::VGPR_HI16RegClass) |
61 | RegPressureIgnoredUnits.set(*MCRegUnitIterator(Reg, this)); |
62 | |
63 | // HACK: Until this is fully tablegen'd. |
64 | static llvm::once_flag InitializeRegSplitPartsFlag; |
65 | |
66 | static auto InitializeRegSplitPartsOnce = [this]() { |
67 | for (unsigned Idx = 1, E = getNumSubRegIndices() - 1; Idx < E; ++Idx) { |
68 | unsigned Size = getSubRegIdxSize(Idx); |
69 | if (Size & 31) |
70 | continue; |
71 | std::vector<int16_t> &Vec = RegSplitParts[Size / 32 - 1]; |
72 | unsigned Pos = getSubRegIdxOffset(Idx); |
73 | if (Pos % Size) |
74 | continue; |
75 | Pos /= Size; |
76 | if (Vec.empty()) { |
77 | unsigned MaxNumParts = 1024 / Size; // Maximum register is 1024 bits. |
78 | Vec.resize(MaxNumParts); |
79 | } |
80 | Vec[Pos] = Idx; |
81 | } |
82 | }; |
83 | |
84 | static llvm::once_flag InitializeSubRegFromChannelTableFlag; |
85 | |
86 | static auto InitializeSubRegFromChannelTableOnce = [this]() { |
87 | for (auto &Row : SubRegFromChannelTable) |
88 | Row.fill(AMDGPU::NoSubRegister); |
89 | for (uint16_t Idx = 1; Idx < getNumSubRegIndices(); ++Idx) { |
90 | unsigned Width = AMDGPUSubRegIdxRanges[Idx].Size / 32; |
91 | unsigned Offset = AMDGPUSubRegIdxRanges[Idx].Offset / 32; |
92 | assert(Width < SubRegFromChannelTableWidthMap.size())((Width < SubRegFromChannelTableWidthMap.size()) ? static_cast <void> (0) : __assert_fail ("Width < SubRegFromChannelTableWidthMap.size()" , "/build/llvm-toolchain-snapshot-13~++20210405022414+5f57793c4fe4/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp" , 92, __PRETTY_FUNCTION__)); |
93 | Width = SubRegFromChannelTableWidthMap[Width]; |
94 | if (Width == 0) |
95 | continue; |
96 | unsigned TableIdx = Width - 1; |
97 | assert(TableIdx < SubRegFromChannelTable.size())((TableIdx < SubRegFromChannelTable.size()) ? static_cast< void> (0) : __assert_fail ("TableIdx < SubRegFromChannelTable.size()" , "/build/llvm-toolchain-snapshot-13~++20210405022414+5f57793c4fe4/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp" , 97, __PRETTY_FUNCTION__)); |
98 | assert(Offset < SubRegFromChannelTable[TableIdx].size())((Offset < SubRegFromChannelTable[TableIdx].size()) ? static_cast <void> (0) : __assert_fail ("Offset < SubRegFromChannelTable[TableIdx].size()" , "/build/llvm-toolchain-snapshot-13~++20210405022414+5f57793c4fe4/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp" , 98, __PRETTY_FUNCTION__)); |
99 | SubRegFromChannelTable[TableIdx][Offset] = Idx; |
100 | } |
101 | }; |
102 | |
103 | llvm::call_once(InitializeRegSplitPartsFlag, InitializeRegSplitPartsOnce); |
104 | llvm::call_once(InitializeSubRegFromChannelTableFlag, |
105 | InitializeSubRegFromChannelTableOnce); |
106 | } |
107 | |
108 | void SIRegisterInfo::reserveRegisterTuples(BitVector &Reserved, |
109 | MCRegister Reg) const { |
110 | MCRegAliasIterator R(Reg, this, true); |
111 | |
112 | for (; R.isValid(); ++R) |
113 | Reserved.set(*R); |
114 | } |
115 | |
116 | // Forced to be here by one .inc |
117 | const MCPhysReg *SIRegisterInfo::getCalleeSavedRegs( |
118 | const MachineFunction *MF) const { |
119 | CallingConv::ID CC = MF->getFunction().getCallingConv(); |
120 | switch (CC) { |
121 | case CallingConv::C: |
122 | case CallingConv::Fast: |
123 | case CallingConv::Cold: |
124 | case CallingConv::AMDGPU_Gfx: |
125 | return MF->getSubtarget<GCNSubtarget>().hasGFX90AInsts() |
126 | ? CSR_AMDGPU_HighRegs_With_AGPRs_SaveList |
127 | : CSR_AMDGPU_HighRegs_SaveList; |
128 | default: { |
129 | // Dummy to not crash RegisterClassInfo. |
130 | static const MCPhysReg NoCalleeSavedReg = AMDGPU::NoRegister; |
131 | return &NoCalleeSavedReg; |
132 | } |
133 | } |
134 | } |
135 | |
136 | const MCPhysReg * |
137 | SIRegisterInfo::getCalleeSavedRegsViaCopy(const MachineFunction *MF) const { |
138 | return nullptr; |
139 | } |
140 | |
141 | const uint32_t *SIRegisterInfo::getCallPreservedMask(const MachineFunction &MF, |
142 | CallingConv::ID CC) const { |
143 | switch (CC) { |
144 | case CallingConv::C: |
145 | case CallingConv::Fast: |
146 | case CallingConv::Cold: |
147 | case CallingConv::AMDGPU_Gfx: |
148 | return MF.getSubtarget<GCNSubtarget>().hasGFX90AInsts() |
149 | ? CSR_AMDGPU_HighRegs_With_AGPRs_RegMask |
150 | : CSR_AMDGPU_HighRegs_RegMask; |
151 | default: |
152 | return nullptr; |
153 | } |
154 | } |
155 | |
156 | const uint32_t *SIRegisterInfo::getNoPreservedMask() const { |
157 | return CSR_AMDGPU_NoRegs_RegMask; |
158 | } |
159 | |
160 | Register SIRegisterInfo::getFrameRegister(const MachineFunction &MF) const { |
161 | const SIFrameLowering *TFI = |
162 | MF.getSubtarget<GCNSubtarget>().getFrameLowering(); |
163 | const SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>(); |
164 | // During ISel lowering we always reserve the stack pointer in entry |
165 | // functions, but never actually want to reference it when accessing our own |
166 | // frame. If we need a frame pointer we use it, but otherwise we can just use |
167 | // an immediate "0" which we represent by returning NoRegister. |
168 | if (FuncInfo->isEntryFunction()) { |
169 | return TFI->hasFP(MF) ? FuncInfo->getFrameOffsetReg() : Register(); |
170 | } |
171 | return TFI->hasFP(MF) ? FuncInfo->getFrameOffsetReg() |
172 | : FuncInfo->getStackPtrOffsetReg(); |
173 | } |
174 | |
175 | bool SIRegisterInfo::hasBasePointer(const MachineFunction &MF) const { |
176 | // When we need stack realignment, we can't reference off of the |
177 | // stack pointer, so we reserve a base pointer. |
178 | const MachineFrameInfo &MFI = MF.getFrameInfo(); |
179 | return MFI.getNumFixedObjects() && shouldRealignStack(MF); |
180 | } |
181 | |
182 | Register SIRegisterInfo::getBaseRegister() const { return AMDGPU::SGPR34; } |
183 | |
184 | const uint32_t *SIRegisterInfo::getAllVGPRRegMask() const { |
185 | return CSR_AMDGPU_AllVGPRs_RegMask; |
186 | } |
187 | |
188 | const uint32_t *SIRegisterInfo::getAllAGPRRegMask() const { |
189 | return CSR_AMDGPU_AllAGPRs_RegMask; |
190 | } |
191 | |
192 | const uint32_t *SIRegisterInfo::getAllVectorRegMask() const { |
193 | return CSR_AMDGPU_AllVectorRegs_RegMask; |
194 | } |
195 | |
196 | const uint32_t *SIRegisterInfo::getAllAllocatableSRegMask() const { |
197 | return CSR_AMDGPU_AllAllocatableSRegs_RegMask; |
198 | } |
199 | |
200 | unsigned SIRegisterInfo::getSubRegFromChannel(unsigned Channel, |
201 | unsigned NumRegs) { |
202 | assert(NumRegs < SubRegFromChannelTableWidthMap.size())((NumRegs < SubRegFromChannelTableWidthMap.size()) ? static_cast <void> (0) : __assert_fail ("NumRegs < SubRegFromChannelTableWidthMap.size()" , "/build/llvm-toolchain-snapshot-13~++20210405022414+5f57793c4fe4/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp" , 202, __PRETTY_FUNCTION__)); |
203 | unsigned NumRegIndex = SubRegFromChannelTableWidthMap[NumRegs]; |
204 | assert(NumRegIndex && "Not implemented")((NumRegIndex && "Not implemented") ? static_cast< void> (0) : __assert_fail ("NumRegIndex && \"Not implemented\"" , "/build/llvm-toolchain-snapshot-13~++20210405022414+5f57793c4fe4/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp" , 204, __PRETTY_FUNCTION__)); |
205 | assert(Channel < SubRegFromChannelTable[NumRegIndex - 1].size())((Channel < SubRegFromChannelTable[NumRegIndex - 1].size() ) ? static_cast<void> (0) : __assert_fail ("Channel < SubRegFromChannelTable[NumRegIndex - 1].size()" , "/build/llvm-toolchain-snapshot-13~++20210405022414+5f57793c4fe4/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp" , 205, __PRETTY_FUNCTION__)); |
206 | return SubRegFromChannelTable[NumRegIndex - 1][Channel]; |
207 | } |
208 | |
209 | MCRegister SIRegisterInfo::reservedPrivateSegmentBufferReg( |
210 | const MachineFunction &MF) const { |
211 | unsigned BaseIdx = alignDown(ST.getMaxNumSGPRs(MF), 4) - 4; |
212 | MCRegister BaseReg(AMDGPU::SGPR_32RegClass.getRegister(BaseIdx)); |
213 | return getMatchingSuperReg(BaseReg, AMDGPU::sub0, &AMDGPU::SGPR_128RegClass); |
214 | } |
215 | |
216 | BitVector SIRegisterInfo::getReservedRegs(const MachineFunction &MF) const { |
217 | BitVector Reserved(getNumRegs()); |
218 | Reserved.set(AMDGPU::MODE); |
219 | |
220 | // EXEC_LO and EXEC_HI could be allocated and used as regular register, but |
221 | // this seems likely to result in bugs, so I'm marking them as reserved. |
222 | reserveRegisterTuples(Reserved, AMDGPU::EXEC); |
223 | reserveRegisterTuples(Reserved, AMDGPU::FLAT_SCR); |
224 | |
225 | // M0 has to be reserved so that llvm accepts it as a live-in into a block. |
226 | reserveRegisterTuples(Reserved, AMDGPU::M0); |
227 | |
228 | // Reserve src_vccz, src_execz, src_scc. |
229 | reserveRegisterTuples(Reserved, AMDGPU::SRC_VCCZ); |
230 | reserveRegisterTuples(Reserved, AMDGPU::SRC_EXECZ); |
231 | reserveRegisterTuples(Reserved, AMDGPU::SRC_SCC); |
232 | |
233 | // Reserve the memory aperture registers. |
234 | reserveRegisterTuples(Reserved, AMDGPU::SRC_SHARED_BASE); |
235 | reserveRegisterTuples(Reserved, AMDGPU::SRC_SHARED_LIMIT); |
236 | reserveRegisterTuples(Reserved, AMDGPU::SRC_PRIVATE_BASE); |
237 | reserveRegisterTuples(Reserved, AMDGPU::SRC_PRIVATE_LIMIT); |
238 | |
239 | // Reserve src_pops_exiting_wave_id - support is not implemented in Codegen. |
240 | reserveRegisterTuples(Reserved, AMDGPU::SRC_POPS_EXITING_WAVE_ID); |
241 | |
242 | // Reserve xnack_mask registers - support is not implemented in Codegen. |
243 | reserveRegisterTuples(Reserved, AMDGPU::XNACK_MASK); |
244 | |
245 | // Reserve lds_direct register - support is not implemented in Codegen. |
246 | reserveRegisterTuples(Reserved, AMDGPU::LDS_DIRECT); |
247 | |
248 | // Reserve Trap Handler registers - support is not implemented in Codegen. |
249 | reserveRegisterTuples(Reserved, AMDGPU::TBA); |
250 | reserveRegisterTuples(Reserved, AMDGPU::TMA); |
251 | reserveRegisterTuples(Reserved, AMDGPU::TTMP0_TTMP1); |
252 | reserveRegisterTuples(Reserved, AMDGPU::TTMP2_TTMP3); |
253 | reserveRegisterTuples(Reserved, AMDGPU::TTMP4_TTMP5); |
254 | reserveRegisterTuples(Reserved, AMDGPU::TTMP6_TTMP7); |
255 | reserveRegisterTuples(Reserved, AMDGPU::TTMP8_TTMP9); |
256 | reserveRegisterTuples(Reserved, AMDGPU::TTMP10_TTMP11); |
257 | reserveRegisterTuples(Reserved, AMDGPU::TTMP12_TTMP13); |
258 | reserveRegisterTuples(Reserved, AMDGPU::TTMP14_TTMP15); |
259 | |
260 | // Reserve null register - it shall never be allocated |
261 | reserveRegisterTuples(Reserved, AMDGPU::SGPR_NULL); |
262 | |
263 | // Disallow vcc_hi allocation in wave32. It may be allocated but most likely |
264 | // will result in bugs. |
265 | if (isWave32) { |
266 | Reserved.set(AMDGPU::VCC); |
267 | Reserved.set(AMDGPU::VCC_HI); |
268 | } |
269 | |
270 | unsigned MaxNumSGPRs = ST.getMaxNumSGPRs(MF); |
271 | unsigned TotalNumSGPRs = AMDGPU::SGPR_32RegClass.getNumRegs(); |
272 | for (unsigned i = MaxNumSGPRs; i < TotalNumSGPRs; ++i) { |
273 | unsigned Reg = AMDGPU::SGPR_32RegClass.getRegister(i); |
274 | reserveRegisterTuples(Reserved, Reg); |
275 | } |
276 | |
277 | unsigned MaxNumVGPRs = ST.getMaxNumVGPRs(MF); |
278 | // TODO: In an entry function without calls and AGPRs used it is possible |
279 | // to use the whole register budget for VGPRs. Even more it shall |
280 | // be possible to estimate maximum AGPR/VGPR pressure and split |
281 | // register file accordingly. |
282 | if (ST.hasGFX90AInsts()) |
283 | MaxNumVGPRs /= 2; |
284 | unsigned TotalNumVGPRs = AMDGPU::VGPR_32RegClass.getNumRegs(); |
285 | for (unsigned i = MaxNumVGPRs; i < TotalNumVGPRs; ++i) { |
286 | unsigned Reg = AMDGPU::VGPR_32RegClass.getRegister(i); |
287 | reserveRegisterTuples(Reserved, Reg); |
288 | Reg = AMDGPU::AGPR_32RegClass.getRegister(i); |
289 | reserveRegisterTuples(Reserved, Reg); |
290 | } |
291 | |
292 | for (auto Reg : AMDGPU::SReg_32RegClass) { |
293 | Reserved.set(getSubReg(Reg, AMDGPU::hi16)); |
294 | Register Low = getSubReg(Reg, AMDGPU::lo16); |
295 | // This is to prevent BB vcc liveness errors. |
296 | if (!AMDGPU::SGPR_LO16RegClass.contains(Low)) |
297 | Reserved.set(Low); |
298 | } |
299 | |
300 | for (auto Reg : AMDGPU::AGPR_32RegClass) { |
301 | Reserved.set(getSubReg(Reg, AMDGPU::hi16)); |
302 | } |
303 | |
304 | // Reserve all the rest AGPRs if there are no instructions to use it. |
305 | if (!ST.hasMAIInsts()) { |
306 | for (unsigned i = 0; i < MaxNumVGPRs; ++i) { |
307 | unsigned Reg = AMDGPU::AGPR_32RegClass.getRegister(i); |
308 | reserveRegisterTuples(Reserved, Reg); |
309 | } |
310 | } |
311 | |
312 | const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); |
313 | |
314 | Register ScratchRSrcReg = MFI->getScratchRSrcReg(); |
315 | if (ScratchRSrcReg != AMDGPU::NoRegister) { |
316 | // Reserve 4 SGPRs for the scratch buffer resource descriptor in case we need |
317 | // to spill. |
318 | // TODO: May need to reserve a VGPR if doing LDS spilling. |
319 | reserveRegisterTuples(Reserved, ScratchRSrcReg); |
320 | } |
321 | |
322 | // We have to assume the SP is needed in case there are calls in the function, |
323 | // which is detected after the function is lowered. If we aren't really going |
324 | // to need SP, don't bother reserving it. |
325 | MCRegister StackPtrReg = MFI->getStackPtrOffsetReg(); |
326 | |
327 | if (StackPtrReg) { |
328 | reserveRegisterTuples(Reserved, StackPtrReg); |
329 | assert(!isSubRegister(ScratchRSrcReg, StackPtrReg))((!isSubRegister(ScratchRSrcReg, StackPtrReg)) ? static_cast< void> (0) : __assert_fail ("!isSubRegister(ScratchRSrcReg, StackPtrReg)" , "/build/llvm-toolchain-snapshot-13~++20210405022414+5f57793c4fe4/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp" , 329, __PRETTY_FUNCTION__)); |
330 | } |
331 | |
332 | MCRegister FrameReg = MFI->getFrameOffsetReg(); |
333 | if (FrameReg) { |
334 | reserveRegisterTuples(Reserved, FrameReg); |
335 | assert(!isSubRegister(ScratchRSrcReg, FrameReg))((!isSubRegister(ScratchRSrcReg, FrameReg)) ? static_cast< void> (0) : __assert_fail ("!isSubRegister(ScratchRSrcReg, FrameReg)" , "/build/llvm-toolchain-snapshot-13~++20210405022414+5f57793c4fe4/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp" , 335, __PRETTY_FUNCTION__)); |
336 | } |
337 | |
338 | if (hasBasePointer(MF)) { |
339 | MCRegister BasePtrReg = getBaseRegister(); |
340 | reserveRegisterTuples(Reserved, BasePtrReg); |
341 | assert(!isSubRegister(ScratchRSrcReg, BasePtrReg))((!isSubRegister(ScratchRSrcReg, BasePtrReg)) ? static_cast< void> (0) : __assert_fail ("!isSubRegister(ScratchRSrcReg, BasePtrReg)" , "/build/llvm-toolchain-snapshot-13~++20210405022414+5f57793c4fe4/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp" , 341, __PRETTY_FUNCTION__)); |
342 | } |
343 | |
344 | for (MCRegister Reg : MFI->WWMReservedRegs) { |
345 | reserveRegisterTuples(Reserved, Reg); |
346 | } |
347 | |
348 | // FIXME: Stop using reserved registers for this. |
349 | for (MCPhysReg Reg : MFI->getAGPRSpillVGPRs()) |
350 | reserveRegisterTuples(Reserved, Reg); |
351 | |
352 | for (MCPhysReg Reg : MFI->getVGPRSpillAGPRs()) |
353 | reserveRegisterTuples(Reserved, Reg); |
354 | |
355 | for (auto SSpill : MFI->getSGPRSpillVGPRs()) |
356 | reserveRegisterTuples(Reserved, SSpill.VGPR); |
357 | |
358 | return Reserved; |
359 | } |
360 | |
361 | bool SIRegisterInfo::shouldRealignStack(const MachineFunction &MF) const { |
362 | const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); |
363 | // On entry, the base address is 0, so it can't possibly need any more |
364 | // alignment. |
365 | |
366 | // FIXME: Should be able to specify the entry frame alignment per calling |
367 | // convention instead. |
368 | if (Info->isEntryFunction()) |
369 | return false; |
370 | |
371 | return TargetRegisterInfo::shouldRealignStack(MF); |
372 | } |
373 | |
374 | bool SIRegisterInfo::requiresRegisterScavenging(const MachineFunction &Fn) const { |
375 | const SIMachineFunctionInfo *Info = Fn.getInfo<SIMachineFunctionInfo>(); |
376 | if (Info->isEntryFunction()) { |
377 | const MachineFrameInfo &MFI = Fn.getFrameInfo(); |
378 | return MFI.hasStackObjects() || MFI.hasCalls(); |
379 | } |
380 | |
381 | // May need scavenger for dealing with callee saved registers. |
382 | return true; |
383 | } |
384 | |
385 | bool SIRegisterInfo::requiresFrameIndexScavenging( |
386 | const MachineFunction &MF) const { |
387 | // Do not use frame virtual registers. They used to be used for SGPRs, but |
388 | // once we reach PrologEpilogInserter, we can no longer spill SGPRs. If the |
389 | // scavenger fails, we can increment/decrement the necessary SGPRs to avoid a |
390 | // spill. |
391 | return false; |
392 | } |
393 | |
394 | bool SIRegisterInfo::requiresFrameIndexReplacementScavenging( |
395 | const MachineFunction &MF) const { |
396 | const MachineFrameInfo &MFI = MF.getFrameInfo(); |
397 | return MFI.hasStackObjects(); |
398 | } |
399 | |
400 | bool SIRegisterInfo::requiresVirtualBaseRegisters( |
401 | const MachineFunction &) const { |
402 | // There are no special dedicated stack or frame pointers. |
403 | return true; |
404 | } |
405 | |
406 | int64_t SIRegisterInfo::getScratchInstrOffset(const MachineInstr *MI) const { |
407 | assert(SIInstrInfo::isMUBUF(*MI) || SIInstrInfo::isFLATScratch(*MI))((SIInstrInfo::isMUBUF(*MI) || SIInstrInfo::isFLATScratch(*MI )) ? static_cast<void> (0) : __assert_fail ("SIInstrInfo::isMUBUF(*MI) || SIInstrInfo::isFLATScratch(*MI)" , "/build/llvm-toolchain-snapshot-13~++20210405022414+5f57793c4fe4/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp" , 407, __PRETTY_FUNCTION__)); |
408 | |
409 | int OffIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(), |
410 | AMDGPU::OpName::offset); |
411 | return MI->getOperand(OffIdx).getImm(); |
412 | } |
413 | |
414 | int64_t SIRegisterInfo::getFrameIndexInstrOffset(const MachineInstr *MI, |
415 | int Idx) const { |
416 | if (!SIInstrInfo::isMUBUF(*MI) && !SIInstrInfo::isFLATScratch(*MI)) |
417 | return 0; |
418 | |
419 | assert((Idx == AMDGPU::getNamedOperandIdx(MI->getOpcode(),(((Idx == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU ::OpName::vaddr) || (Idx == AMDGPU::getNamedOperandIdx(MI-> getOpcode(), AMDGPU::OpName::saddr))) && "Should never see frame index on non-address operand" ) ? static_cast<void> (0) : __assert_fail ("(Idx == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::vaddr) || (Idx == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::saddr))) && \"Should never see frame index on non-address operand\"" , "/build/llvm-toolchain-snapshot-13~++20210405022414+5f57793c4fe4/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp" , 423, __PRETTY_FUNCTION__)) |
420 | AMDGPU::OpName::vaddr) ||(((Idx == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU ::OpName::vaddr) || (Idx == AMDGPU::getNamedOperandIdx(MI-> getOpcode(), AMDGPU::OpName::saddr))) && "Should never see frame index on non-address operand" ) ? static_cast<void> (0) : __assert_fail ("(Idx == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::vaddr) || (Idx == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::saddr))) && \"Should never see frame index on non-address operand\"" , "/build/llvm-toolchain-snapshot-13~++20210405022414+5f57793c4fe4/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp" , 423, __PRETTY_FUNCTION__)) |
421 | (Idx == AMDGPU::getNamedOperandIdx(MI->getOpcode(),(((Idx == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU ::OpName::vaddr) || (Idx == AMDGPU::getNamedOperandIdx(MI-> getOpcode(), AMDGPU::OpName::saddr))) && "Should never see frame index on non-address operand" ) ? static_cast<void> (0) : __assert_fail ("(Idx == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::vaddr) || (Idx == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::saddr))) && \"Should never see frame index on non-address operand\"" , "/build/llvm-toolchain-snapshot-13~++20210405022414+5f57793c4fe4/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp" , 423, __PRETTY_FUNCTION__)) |
422 | AMDGPU::OpName::saddr))) &&(((Idx == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU ::OpName::vaddr) || (Idx == AMDGPU::getNamedOperandIdx(MI-> getOpcode(), AMDGPU::OpName::saddr))) && "Should never see frame index on non-address operand" ) ? static_cast<void> (0) : __assert_fail ("(Idx == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::vaddr) || (Idx == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::saddr))) && \"Should never see frame index on non-address operand\"" , "/build/llvm-toolchain-snapshot-13~++20210405022414+5f57793c4fe4/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp" , 423, __PRETTY_FUNCTION__)) |
423 | "Should never see frame index on non-address operand")(((Idx == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU ::OpName::vaddr) || (Idx == AMDGPU::getNamedOperandIdx(MI-> getOpcode(), AMDGPU::OpName::saddr))) && "Should never see frame index on non-address operand" ) ? static_cast<void> (0) : __assert_fail ("(Idx == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::vaddr) || (Idx == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::saddr))) && \"Should never see frame index on non-address operand\"" , "/build/llvm-toolchain-snapshot-13~++20210405022414+5f57793c4fe4/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp" , 423, __PRETTY_FUNCTION__)); |
424 | |
425 | return getScratchInstrOffset(MI); |
426 | } |
427 | |
428 | bool SIRegisterInfo::needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const { |
429 | if (!SIInstrInfo::isMUBUF(*MI) && !SIInstrInfo::isFLATScratch(*MI)) |
430 | return false; |
431 | |
432 | int64_t FullOffset = Offset + getScratchInstrOffset(MI); |
433 | |
434 | if (SIInstrInfo::isMUBUF(*MI)) |
435 | return !SIInstrInfo::isLegalMUBUFImmOffset(FullOffset); |
436 | |
437 | const SIInstrInfo *TII = ST.getInstrInfo(); |
438 | return !TII->isLegalFLATOffset(FullOffset, AMDGPUAS::PRIVATE_ADDRESS, true); |
439 | } |
440 | |
441 | Register SIRegisterInfo::materializeFrameBaseRegister(MachineBasicBlock *MBB, |
442 | int FrameIdx, |
443 | int64_t Offset) const { |
444 | MachineBasicBlock::iterator Ins = MBB->begin(); |
445 | DebugLoc DL; // Defaults to "unknown" |
446 | |
447 | if (Ins != MBB->end()) |
448 | DL = Ins->getDebugLoc(); |
449 | |
450 | MachineFunction *MF = MBB->getParent(); |
451 | const SIInstrInfo *TII = ST.getInstrInfo(); |
452 | MachineRegisterInfo &MRI = MF->getRegInfo(); |
453 | unsigned MovOpc = ST.enableFlatScratch() ? AMDGPU::S_MOV_B32 |
454 | : AMDGPU::V_MOV_B32_e32; |
455 | |
456 | Register BaseReg = MRI.createVirtualRegister( |
457 | ST.enableFlatScratch() ? &AMDGPU::SReg_32_XEXEC_HIRegClass |
458 | : &AMDGPU::VGPR_32RegClass); |
459 | |
460 | if (Offset == 0) { |
461 | BuildMI(*MBB, Ins, DL, TII->get(MovOpc), BaseReg) |
462 | .addFrameIndex(FrameIdx); |
463 | return BaseReg; |
464 | } |
465 | |
466 | Register OffsetReg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); |
467 | |
468 | Register FIReg = MRI.createVirtualRegister( |
469 | ST.enableFlatScratch() ? &AMDGPU::SReg_32_XM0RegClass |
470 | : &AMDGPU::VGPR_32RegClass); |
471 | |
472 | BuildMI(*MBB, Ins, DL, TII->get(AMDGPU::S_MOV_B32), OffsetReg) |
473 | .addImm(Offset); |
474 | BuildMI(*MBB, Ins, DL, TII->get(MovOpc), FIReg) |
475 | .addFrameIndex(FrameIdx); |
476 | |
477 | if (ST.enableFlatScratch() ) { |
478 | BuildMI(*MBB, Ins, DL, TII->get(AMDGPU::S_ADD_U32), BaseReg) |
479 | .addReg(OffsetReg, RegState::Kill) |
480 | .addReg(FIReg); |
481 | return BaseReg; |
482 | } |
483 | |
484 | TII->getAddNoCarry(*MBB, Ins, DL, BaseReg) |
485 | .addReg(OffsetReg, RegState::Kill) |
486 | .addReg(FIReg) |
487 | .addImm(0); // clamp bit |
488 | |
489 | return BaseReg; |
490 | } |
491 | |
492 | void SIRegisterInfo::resolveFrameIndex(MachineInstr &MI, Register BaseReg, |
493 | int64_t Offset) const { |
494 | const SIInstrInfo *TII = ST.getInstrInfo(); |
495 | bool IsFlat = TII->isFLATScratch(MI); |
496 | |
497 | #ifndef NDEBUG |
498 | // FIXME: Is it possible to be storing a frame index to itself? |
499 | bool SeenFI = false; |
500 | for (const MachineOperand &MO: MI.operands()) { |
501 | if (MO.isFI()) { |
502 | if (SeenFI) |
503 | llvm_unreachable("should not see multiple frame indices")::llvm::llvm_unreachable_internal("should not see multiple frame indices" , "/build/llvm-toolchain-snapshot-13~++20210405022414+5f57793c4fe4/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp" , 503); |
504 | |
505 | SeenFI = true; |
506 | } |
507 | } |
508 | #endif |
509 | |
510 | MachineOperand *FIOp = |
511 | TII->getNamedOperand(MI, IsFlat ? AMDGPU::OpName::saddr |
512 | : AMDGPU::OpName::vaddr); |
513 | |
514 | MachineOperand *OffsetOp = TII->getNamedOperand(MI, AMDGPU::OpName::offset); |
515 | int64_t NewOffset = OffsetOp->getImm() + Offset; |
516 | |
517 | assert(FIOp && FIOp->isFI() && "frame index must be address operand")((FIOp && FIOp->isFI() && "frame index must be address operand" ) ? static_cast<void> (0) : __assert_fail ("FIOp && FIOp->isFI() && \"frame index must be address operand\"" , "/build/llvm-toolchain-snapshot-13~++20210405022414+5f57793c4fe4/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp" , 517, __PRETTY_FUNCTION__)); |
518 | assert(TII->isMUBUF(MI) || TII->isFLATScratch(MI))((TII->isMUBUF(MI) || TII->isFLATScratch(MI)) ? static_cast <void> (0) : __assert_fail ("TII->isMUBUF(MI) || TII->isFLATScratch(MI)" , "/build/llvm-toolchain-snapshot-13~++20210405022414+5f57793c4fe4/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp" , 518, __PRETTY_FUNCTION__)); |
519 | |
520 | if (IsFlat) { |
521 | assert(TII->isLegalFLATOffset(NewOffset, AMDGPUAS::PRIVATE_ADDRESS, true) &&((TII->isLegalFLATOffset(NewOffset, AMDGPUAS::PRIVATE_ADDRESS , true) && "offset should be legal") ? static_cast< void> (0) : __assert_fail ("TII->isLegalFLATOffset(NewOffset, AMDGPUAS::PRIVATE_ADDRESS, true) && \"offset should be legal\"" , "/build/llvm-toolchain-snapshot-13~++20210405022414+5f57793c4fe4/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp" , 522, __PRETTY_FUNCTION__)) |
522 | "offset should be legal")((TII->isLegalFLATOffset(NewOffset, AMDGPUAS::PRIVATE_ADDRESS , true) && "offset should be legal") ? static_cast< void> (0) : __assert_fail ("TII->isLegalFLATOffset(NewOffset, AMDGPUAS::PRIVATE_ADDRESS, true) && \"offset should be legal\"" , "/build/llvm-toolchain-snapshot-13~++20210405022414+5f57793c4fe4/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp" , 522, __PRETTY_FUNCTION__)); |
523 | FIOp->ChangeToRegister(BaseReg, false); |
524 | OffsetOp->setImm(NewOffset); |
525 | return; |
526 | } |
527 | |
528 | #ifndef NDEBUG |
529 | MachineOperand *SOffset = TII->getNamedOperand(MI, AMDGPU::OpName::soffset); |
530 | assert(SOffset->isImm() && SOffset->getImm() == 0)((SOffset->isImm() && SOffset->getImm() == 0) ? static_cast<void> (0) : __assert_fail ("SOffset->isImm() && SOffset->getImm() == 0" , "/build/llvm-toolchain-snapshot-13~++20210405022414+5f57793c4fe4/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp" , 530, __PRETTY_FUNCTION__)); |
531 | #endif |
532 | |
533 | assert(SIInstrInfo::isLegalMUBUFImmOffset(NewOffset) &&((SIInstrInfo::isLegalMUBUFImmOffset(NewOffset) && "offset should be legal" ) ? static_cast<void> (0) : __assert_fail ("SIInstrInfo::isLegalMUBUFImmOffset(NewOffset) && \"offset should be legal\"" , "/build/llvm-toolchain-snapshot-13~++20210405022414+5f57793c4fe4/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp" , 534, __PRETTY_FUNCTION__)) |
534 | "offset should be legal")((SIInstrInfo::isLegalMUBUFImmOffset(NewOffset) && "offset should be legal" ) ? static_cast<void> (0) : __assert_fail ("SIInstrInfo::isLegalMUBUFImmOffset(NewOffset) && \"offset should be legal\"" , "/build/llvm-toolchain-snapshot-13~++20210405022414+5f57793c4fe4/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp" , 534, __PRETTY_FUNCTION__)); |
535 | |
536 | FIOp->ChangeToRegister(BaseReg, false); |
537 | OffsetOp->setImm(NewOffset); |
538 | } |
539 | |
540 | bool SIRegisterInfo::isFrameOffsetLegal(const MachineInstr *MI, |
541 | Register BaseReg, |
542 | int64_t Offset) const { |
543 | if (!SIInstrInfo::isMUBUF(*MI) && !SIInstrInfo::isFLATScratch(*MI)) |
544 | return false; |
545 | |
546 | int64_t NewOffset = Offset + getScratchInstrOffset(MI); |
547 | |
548 | if (SIInstrInfo::isMUBUF(*MI)) |
549 | return SIInstrInfo::isLegalMUBUFImmOffset(NewOffset); |
550 | |
551 | const SIInstrInfo *TII = ST.getInstrInfo(); |
552 | return TII->isLegalFLATOffset(NewOffset, AMDGPUAS::PRIVATE_ADDRESS, true); |
553 | } |
554 | |
555 | const TargetRegisterClass *SIRegisterInfo::getPointerRegClass( |
556 | const MachineFunction &MF, unsigned Kind) const { |
557 | // This is inaccurate. It depends on the instruction and address space. The |
558 | // only place where we should hit this is for dealing with frame indexes / |
559 | // private accesses, so this is correct in that case. |
560 | return &AMDGPU::VGPR_32RegClass; |
561 | } |
562 | |
563 | static unsigned getNumSubRegsForSpillOp(unsigned Op) { |
564 | |
565 | switch (Op) { |
566 | case AMDGPU::SI_SPILL_S1024_SAVE: |
567 | case AMDGPU::SI_SPILL_S1024_RESTORE: |
568 | case AMDGPU::SI_SPILL_V1024_SAVE: |
569 | case AMDGPU::SI_SPILL_V1024_RESTORE: |
570 | case AMDGPU::SI_SPILL_A1024_SAVE: |
571 | case AMDGPU::SI_SPILL_A1024_RESTORE: |
572 | return 32; |
573 | case AMDGPU::SI_SPILL_S512_SAVE: |
574 | case AMDGPU::SI_SPILL_S512_RESTORE: |
575 | case AMDGPU::SI_SPILL_V512_SAVE: |
576 | case AMDGPU::SI_SPILL_V512_RESTORE: |
577 | case AMDGPU::SI_SPILL_A512_SAVE: |
578 | case AMDGPU::SI_SPILL_A512_RESTORE: |
579 | return 16; |
580 | case AMDGPU::SI_SPILL_S256_SAVE: |
581 | case AMDGPU::SI_SPILL_S256_RESTORE: |
582 | case AMDGPU::SI_SPILL_V256_SAVE: |
583 | case AMDGPU::SI_SPILL_V256_RESTORE: |
584 | case AMDGPU::SI_SPILL_A256_SAVE: |
585 | case AMDGPU::SI_SPILL_A256_RESTORE: |
586 | return 8; |
587 | case AMDGPU::SI_SPILL_S192_SAVE: |
588 | case AMDGPU::SI_SPILL_S192_RESTORE: |
589 | case AMDGPU::SI_SPILL_V192_SAVE: |
590 | case AMDGPU::SI_SPILL_V192_RESTORE: |
591 | case AMDGPU::SI_SPILL_A192_SAVE: |
592 | case AMDGPU::SI_SPILL_A192_RESTORE: |
593 | return 6; |
594 | case AMDGPU::SI_SPILL_S160_SAVE: |
595 | case AMDGPU::SI_SPILL_S160_RESTORE: |
596 | case AMDGPU::SI_SPILL_V160_SAVE: |
597 | case AMDGPU::SI_SPILL_V160_RESTORE: |
598 | case AMDGPU::SI_SPILL_A160_SAVE: |
599 | case AMDGPU::SI_SPILL_A160_RESTORE: |
600 | return 5; |
601 | case AMDGPU::SI_SPILL_S128_SAVE: |
602 | case AMDGPU::SI_SPILL_S128_RESTORE: |
603 | case AMDGPU::SI_SPILL_V128_SAVE: |
604 | case AMDGPU::SI_SPILL_V128_RESTORE: |
605 | case AMDGPU::SI_SPILL_A128_SAVE: |
606 | case AMDGPU::SI_SPILL_A128_RESTORE: |
607 | return 4; |
608 | case AMDGPU::SI_SPILL_S96_SAVE: |
609 | case AMDGPU::SI_SPILL_S96_RESTORE: |
610 | case AMDGPU::SI_SPILL_V96_SAVE: |
611 | case AMDGPU::SI_SPILL_V96_RESTORE: |
612 | case AMDGPU::SI_SPILL_A96_SAVE: |
613 | case AMDGPU::SI_SPILL_A96_RESTORE: |
614 | return 3; |
615 | case AMDGPU::SI_SPILL_S64_SAVE: |
616 | case AMDGPU::SI_SPILL_S64_RESTORE: |
617 | case AMDGPU::SI_SPILL_V64_SAVE: |
618 | case AMDGPU::SI_SPILL_V64_RESTORE: |
619 | case AMDGPU::SI_SPILL_A64_SAVE: |
620 | case AMDGPU::SI_SPILL_A64_RESTORE: |
621 | return 2; |
622 | case AMDGPU::SI_SPILL_S32_SAVE: |
623 | case AMDGPU::SI_SPILL_S32_RESTORE: |
624 | case AMDGPU::SI_SPILL_V32_SAVE: |
625 | case AMDGPU::SI_SPILL_V32_RESTORE: |
626 | case AMDGPU::SI_SPILL_A32_SAVE: |
627 | case AMDGPU::SI_SPILL_A32_RESTORE: |
628 | return 1; |
629 | default: llvm_unreachable("Invalid spill opcode")::llvm::llvm_unreachable_internal("Invalid spill opcode", "/build/llvm-toolchain-snapshot-13~++20210405022414+5f57793c4fe4/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp" , 629); |
630 | } |
631 | } |
632 | |
633 | static int getOffsetMUBUFStore(unsigned Opc) { |
634 | switch (Opc) { |
635 | case AMDGPU::BUFFER_STORE_DWORD_OFFEN: |
636 | return AMDGPU::BUFFER_STORE_DWORD_OFFSET; |
637 | case AMDGPU::BUFFER_STORE_BYTE_OFFEN: |
638 | return AMDGPU::BUFFER_STORE_BYTE_OFFSET; |
639 | case AMDGPU::BUFFER_STORE_SHORT_OFFEN: |
640 | return AMDGPU::BUFFER_STORE_SHORT_OFFSET; |
641 | case AMDGPU::BUFFER_STORE_DWORDX2_OFFEN: |
642 | return AMDGPU::BUFFER_STORE_DWORDX2_OFFSET; |
643 | case AMDGPU::BUFFER_STORE_DWORDX4_OFFEN: |
644 | return AMDGPU::BUFFER_STORE_DWORDX4_OFFSET; |
645 | case AMDGPU::BUFFER_STORE_SHORT_D16_HI_OFFEN: |
646 | return AMDGPU::BUFFER_STORE_SHORT_D16_HI_OFFSET; |
647 | case AMDGPU::BUFFER_STORE_BYTE_D16_HI_OFFEN: |
648 | return AMDGPU::BUFFER_STORE_BYTE_D16_HI_OFFSET; |
649 | default: |
650 | return -1; |
651 | } |
652 | } |
653 | |
654 | static int getOffsetMUBUFLoad(unsigned Opc) { |
655 | switch (Opc) { |
656 | case AMDGPU::BUFFER_LOAD_DWORD_OFFEN: |
657 | return AMDGPU::BUFFER_LOAD_DWORD_OFFSET; |
658 | case AMDGPU::BUFFER_LOAD_UBYTE_OFFEN: |
659 | return AMDGPU::BUFFER_LOAD_UBYTE_OFFSET; |
660 | case AMDGPU::BUFFER_LOAD_SBYTE_OFFEN: |
661 | return AMDGPU::BUFFER_LOAD_SBYTE_OFFSET; |
662 | case AMDGPU::BUFFER_LOAD_USHORT_OFFEN: |
663 | return AMDGPU::BUFFER_LOAD_USHORT_OFFSET; |
664 | case AMDGPU::BUFFER_LOAD_SSHORT_OFFEN: |
665 | return AMDGPU::BUFFER_LOAD_SSHORT_OFFSET; |
666 | case AMDGPU::BUFFER_LOAD_DWORDX2_OFFEN: |
667 | return AMDGPU::BUFFER_LOAD_DWORDX2_OFFSET; |
668 | case AMDGPU::BUFFER_LOAD_DWORDX4_OFFEN: |
669 | return AMDGPU::BUFFER_LOAD_DWORDX4_OFFSET; |
670 | case AMDGPU::BUFFER_LOAD_UBYTE_D16_OFFEN: |
671 | return AMDGPU::BUFFER_LOAD_UBYTE_D16_OFFSET; |
672 | case AMDGPU::BUFFER_LOAD_UBYTE_D16_HI_OFFEN: |
673 | return AMDGPU::BUFFER_LOAD_UBYTE_D16_HI_OFFSET; |
674 | case AMDGPU::BUFFER_LOAD_SBYTE_D16_OFFEN: |
675 | return AMDGPU::BUFFER_LOAD_SBYTE_D16_OFFSET; |
676 | case AMDGPU::BUFFER_LOAD_SBYTE_D16_HI_OFFEN: |
677 | return AMDGPU::BUFFER_LOAD_SBYTE_D16_HI_OFFSET; |
678 | case AMDGPU::BUFFER_LOAD_SHORT_D16_OFFEN: |
679 | return AMDGPU::BUFFER_LOAD_SHORT_D16_OFFSET; |
680 | case AMDGPU::BUFFER_LOAD_SHORT_D16_HI_OFFEN: |
681 | return AMDGPU::BUFFER_LOAD_SHORT_D16_HI_OFFSET; |
682 | default: |
683 | return -1; |
684 | } |
685 | } |
686 | |
687 | static MachineInstrBuilder spillVGPRtoAGPR(const GCNSubtarget &ST, |
688 | MachineBasicBlock::iterator MI, |
689 | int Index, |
690 | unsigned Lane, |
691 | unsigned ValueReg, |
692 | bool IsKill) { |
693 | MachineBasicBlock *MBB = MI->getParent(); |
694 | MachineFunction *MF = MI->getParent()->getParent(); |
695 | SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>(); |
696 | const SIInstrInfo *TII = ST.getInstrInfo(); |
697 | |
698 | MCPhysReg Reg = MFI->getVGPRToAGPRSpill(Index, Lane); |
699 | |
700 | if (Reg == AMDGPU::NoRegister) |
701 | return MachineInstrBuilder(); |
702 | |
703 | bool IsStore = MI->mayStore(); |
704 | MachineRegisterInfo &MRI = MF->getRegInfo(); |
705 | auto *TRI = static_cast<const SIRegisterInfo*>(MRI.getTargetRegisterInfo()); |
706 | |
707 | unsigned Dst = IsStore ? Reg : ValueReg; |
708 | unsigned Src = IsStore ? ValueReg : Reg; |
709 | unsigned Opc = (IsStore ^ TRI->isVGPR(MRI, Reg)) ? AMDGPU::V_ACCVGPR_WRITE_B32_e64 |
710 | : AMDGPU::V_ACCVGPR_READ_B32_e64; |
711 | |
712 | auto MIB = BuildMI(*MBB, MI, MI->getDebugLoc(), TII->get(Opc), Dst) |
713 | .addReg(Src, getKillRegState(IsKill)); |
714 | MIB->setAsmPrinterFlag(MachineInstr::ReloadReuse); |
715 | return MIB; |
716 | } |
717 | |
718 | // This differs from buildSpillLoadStore by only scavenging a VGPR. It does not |
719 | // need to handle the case where an SGPR may need to be spilled while spilling. |
720 | static bool buildMUBUFOffsetLoadStore(const GCNSubtarget &ST, |
721 | MachineFrameInfo &MFI, |
722 | MachineBasicBlock::iterator MI, |
723 | int Index, |
724 | int64_t Offset) { |
725 | const SIInstrInfo *TII = ST.getInstrInfo(); |
726 | MachineBasicBlock *MBB = MI->getParent(); |
727 | const DebugLoc &DL = MI->getDebugLoc(); |
728 | bool IsStore = MI->mayStore(); |
729 | |
730 | unsigned Opc = MI->getOpcode(); |
731 | int LoadStoreOp = IsStore ? |
732 | getOffsetMUBUFStore(Opc) : getOffsetMUBUFLoad(Opc); |
733 | if (LoadStoreOp == -1) |
734 | return false; |
735 | |
736 | const MachineOperand *Reg = TII->getNamedOperand(*MI, AMDGPU::OpName::vdata); |
737 | if (spillVGPRtoAGPR(ST, MI, Index, 0, Reg->getReg(), false).getInstr()) |
738 | return true; |
739 | |
740 | MachineInstrBuilder NewMI = |
741 | BuildMI(*MBB, MI, DL, TII->get(LoadStoreOp)) |
742 | .add(*Reg) |
743 | .add(*TII->getNamedOperand(*MI, AMDGPU::OpName::srsrc)) |
744 | .add(*TII->getNamedOperand(*MI, AMDGPU::OpName::soffset)) |
745 | .addImm(Offset) |
746 | .addImm(0) // cpol |
747 | .addImm(0) // tfe |
748 | .addImm(0) // swz |
749 | .cloneMemRefs(*MI); |
750 | |
751 | const MachineOperand *VDataIn = TII->getNamedOperand(*MI, |
752 | AMDGPU::OpName::vdata_in); |
753 | if (VDataIn) |
754 | NewMI.add(*VDataIn); |
755 | return true; |
756 | } |
757 | |
758 | static unsigned getFlatScratchSpillOpcode(const SIInstrInfo *TII, |
759 | unsigned LoadStoreOp, |
760 | unsigned EltSize) { |
761 | bool IsStore = TII->get(LoadStoreOp).mayStore(); |
762 | bool UseST = |
763 | AMDGPU::getNamedOperandIdx(LoadStoreOp, AMDGPU::OpName::vaddr) < 0 && |
764 | AMDGPU::getNamedOperandIdx(LoadStoreOp, AMDGPU::OpName::saddr) < 0; |
765 | |
766 | switch (EltSize) { |
767 | case 4: |
768 | LoadStoreOp = IsStore ? AMDGPU::SCRATCH_STORE_DWORD_SADDR |
769 | : AMDGPU::SCRATCH_LOAD_DWORD_SADDR; |
770 | break; |
771 | case 8: |
772 | LoadStoreOp = IsStore ? AMDGPU::SCRATCH_STORE_DWORDX2_SADDR |
773 | : AMDGPU::SCRATCH_LOAD_DWORDX2_SADDR; |
774 | break; |
775 | case 12: |
776 | LoadStoreOp = IsStore ? AMDGPU::SCRATCH_STORE_DWORDX3_SADDR |
777 | : AMDGPU::SCRATCH_LOAD_DWORDX3_SADDR; |
778 | break; |
779 | case 16: |
780 | LoadStoreOp = IsStore ? AMDGPU::SCRATCH_STORE_DWORDX4_SADDR |
781 | : AMDGPU::SCRATCH_LOAD_DWORDX4_SADDR; |
782 | break; |
783 | default: |
784 | llvm_unreachable("Unexpected spill load/store size!")::llvm::llvm_unreachable_internal("Unexpected spill load/store size!" , "/build/llvm-toolchain-snapshot-13~++20210405022414+5f57793c4fe4/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp" , 784); |
785 | } |
786 | |
787 | if (UseST) |
788 | LoadStoreOp = AMDGPU::getFlatScratchInstSTfromSS(LoadStoreOp); |
789 | |
790 | return LoadStoreOp; |
791 | } |
792 | |
793 | void SIRegisterInfo::buildSpillLoadStore(MachineBasicBlock::iterator MI, |
794 | unsigned LoadStoreOp, |
795 | int Index, |
796 | Register ValueReg, |
797 | bool IsKill, |
798 | MCRegister ScratchOffsetReg, |
799 | int64_t InstOffset, |
800 | MachineMemOperand *MMO, |
801 | RegScavenger *RS) const { |
802 | MachineBasicBlock *MBB = MI->getParent(); |
803 | MachineFunction *MF = MI->getParent()->getParent(); |
804 | const SIInstrInfo *TII = ST.getInstrInfo(); |
805 | const MachineFrameInfo &MFI = MF->getFrameInfo(); |
806 | const SIMachineFunctionInfo *FuncInfo = MF->getInfo<SIMachineFunctionInfo>(); |
807 | |
808 | const MCInstrDesc *Desc = &TII->get(LoadStoreOp); |
809 | const DebugLoc &DL = MI->getDebugLoc(); |
810 | bool IsStore = Desc->mayStore(); |
811 | bool IsFlat = TII->isFLATScratch(LoadStoreOp); |
812 | |
813 | bool Scavenged = false; |
814 | MCRegister SOffset = ScratchOffsetReg; |
815 | |
816 | const TargetRegisterClass *RC = getRegClassForReg(MF->getRegInfo(), ValueReg); |
817 | // On gfx90a+ AGPR is a regular VGPR acceptable for loads and stores. |
818 | const bool IsAGPR = !ST.hasGFX90AInsts() && hasAGPRs(RC); |
819 | const unsigned RegWidth = AMDGPU::getRegBitWidth(RC->getID()) / 8; |
820 | |
821 | // Always use 4 byte operations for AGPRs because we need to scavenge |
822 | // a temporary VGPR. |
823 | unsigned EltSize = (IsFlat && !IsAGPR) ? std::min(RegWidth, 16u) : 4u; |
824 | unsigned NumSubRegs = RegWidth / EltSize; |
825 | unsigned Size = NumSubRegs * EltSize; |
826 | unsigned RemSize = RegWidth - Size; |
827 | unsigned NumRemSubRegs = RemSize ? 1 : 0; |
828 | int64_t Offset = InstOffset + MFI.getObjectOffset(Index); |
829 | int64_t MaxOffset = Offset + Size + RemSize - EltSize; |
830 | int64_t ScratchOffsetRegDelta = 0; |
831 | |
832 | if (IsFlat && EltSize > 4) { |
833 | LoadStoreOp = getFlatScratchSpillOpcode(TII, LoadStoreOp, EltSize); |
834 | Desc = &TII->get(LoadStoreOp); |
835 | } |
836 | |
837 | Align Alignment = MFI.getObjectAlign(Index); |
838 | const MachinePointerInfo &BasePtrInfo = MMO->getPointerInfo(); |
839 | |
840 | assert((IsFlat || ((Offset % EltSize) == 0)) &&(((IsFlat || ((Offset % EltSize) == 0)) && "unexpected VGPR spill offset" ) ? static_cast<void> (0) : __assert_fail ("(IsFlat || ((Offset % EltSize) == 0)) && \"unexpected VGPR spill offset\"" , "/build/llvm-toolchain-snapshot-13~++20210405022414+5f57793c4fe4/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp" , 841, __PRETTY_FUNCTION__)) |
841 | "unexpected VGPR spill offset")(((IsFlat || ((Offset % EltSize) == 0)) && "unexpected VGPR spill offset" ) ? static_cast<void> (0) : __assert_fail ("(IsFlat || ((Offset % EltSize) == 0)) && \"unexpected VGPR spill offset\"" , "/build/llvm-toolchain-snapshot-13~++20210405022414+5f57793c4fe4/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp" , 841, __PRETTY_FUNCTION__)); |
842 | |
843 | bool IsOffsetLegal = IsFlat |
844 | ? TII->isLegalFLATOffset(MaxOffset, AMDGPUAS::PRIVATE_ADDRESS, true) |
845 | : SIInstrInfo::isLegalMUBUFImmOffset(MaxOffset); |
846 | if (!IsOffsetLegal || (IsFlat && !SOffset && !ST.hasFlatScratchSTMode())) { |
847 | SOffset = MCRegister(); |
848 | |
849 | // We currently only support spilling VGPRs to EltSize boundaries, meaning |
850 | // we can simplify the adjustment of Offset here to just scale with |
851 | // WavefrontSize. |
852 | if (!IsFlat) |
853 | Offset *= ST.getWavefrontSize(); |
854 | |
855 | // We don't have access to the register scavenger if this function is called |
856 | // during PEI::scavengeFrameVirtualRegs(). |
857 | if (RS) |
858 | SOffset = RS->scavengeRegister(&AMDGPU::SGPR_32RegClass, MI, 0, false); |
859 | |
860 | if (!SOffset) { |
861 | // There are no free SGPRs, and since we are in the process of spilling |
862 | // VGPRs too. Since we need a VGPR in order to spill SGPRs (this is true |
863 | // on SI/CI and on VI it is true until we implement spilling using scalar |
864 | // stores), we have no way to free up an SGPR. Our solution here is to |
865 | // add the offset directly to the ScratchOffset or StackPtrOffset |
866 | // register, and then subtract the offset after the spill to return the |
867 | // register to it's original value. |
868 | if (!ScratchOffsetReg) |
869 | ScratchOffsetReg = FuncInfo->getStackPtrOffsetReg(); |
870 | SOffset = ScratchOffsetReg; |
871 | ScratchOffsetRegDelta = Offset; |
872 | } else { |
873 | Scavenged = true; |
874 | } |
875 | |
876 | if (!SOffset) |
877 | report_fatal_error("could not scavenge SGPR to spill in entry function"); |
878 | |
879 | if (ScratchOffsetReg == AMDGPU::NoRegister) { |
880 | BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_MOV_B32), SOffset) |
881 | .addImm(Offset); |
882 | } else { |
883 | BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_ADD_U32), SOffset) |
884 | .addReg(ScratchOffsetReg) |
885 | .addImm(Offset); |
886 | } |
887 | |
888 | Offset = 0; |
889 | } |
890 | |
891 | if (IsFlat && SOffset == AMDGPU::NoRegister) { |
892 | assert(AMDGPU::getNamedOperandIdx(LoadStoreOp, AMDGPU::OpName::vaddr) < 0((AMDGPU::getNamedOperandIdx(LoadStoreOp, AMDGPU::OpName::vaddr ) < 0 && "Unexpected vaddr for flat scratch with a FI operand" ) ? static_cast<void> (0) : __assert_fail ("AMDGPU::getNamedOperandIdx(LoadStoreOp, AMDGPU::OpName::vaddr) < 0 && \"Unexpected vaddr for flat scratch with a FI operand\"" , "/build/llvm-toolchain-snapshot-13~++20210405022414+5f57793c4fe4/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp" , 893, __PRETTY_FUNCTION__)) |
893 | && "Unexpected vaddr for flat scratch with a FI operand")((AMDGPU::getNamedOperandIdx(LoadStoreOp, AMDGPU::OpName::vaddr ) < 0 && "Unexpected vaddr for flat scratch with a FI operand" ) ? static_cast<void> (0) : __assert_fail ("AMDGPU::getNamedOperandIdx(LoadStoreOp, AMDGPU::OpName::vaddr) < 0 && \"Unexpected vaddr for flat scratch with a FI operand\"" , "/build/llvm-toolchain-snapshot-13~++20210405022414+5f57793c4fe4/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp" , 893, __PRETTY_FUNCTION__)); |
894 | |
895 | assert(ST.hasFlatScratchSTMode())((ST.hasFlatScratchSTMode()) ? static_cast<void> (0) : __assert_fail ("ST.hasFlatScratchSTMode()", "/build/llvm-toolchain-snapshot-13~++20210405022414+5f57793c4fe4/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp" , 895, __PRETTY_FUNCTION__)); |
896 | LoadStoreOp = AMDGPU::getFlatScratchInstSTfromSS(LoadStoreOp); |
897 | Desc = &TII->get(LoadStoreOp); |
Value stored to 'Desc' is never read | |
898 | } |
899 | |
900 | Register TmpReg; |
901 | |
902 | for (unsigned i = 0, e = NumSubRegs + NumRemSubRegs, RegOffset = 0; i != e; |
903 | ++i, RegOffset += EltSize) { |
904 | if (i == NumSubRegs) { |
905 | EltSize = RemSize; |
906 | LoadStoreOp = getFlatScratchSpillOpcode(TII, LoadStoreOp, EltSize); |
907 | } |
908 | Desc = &TII->get(LoadStoreOp); |
909 | |
910 | unsigned NumRegs = EltSize / 4; |
911 | Register SubReg = e == 1 |
912 | ? ValueReg |
913 | : Register(getSubReg(ValueReg, |
914 | getSubRegFromChannel(RegOffset / 4, NumRegs))); |
915 | |
916 | unsigned SOffsetRegState = 0; |
917 | unsigned SrcDstRegState = getDefRegState(!IsStore); |
918 | if (i + 1 == e) { |
919 | SOffsetRegState |= getKillRegState(Scavenged); |
920 | // The last implicit use carries the "Kill" flag. |
921 | SrcDstRegState |= getKillRegState(IsKill); |
922 | } |
923 | |
924 | // Make sure the whole register is defined if there are undef components by |
925 | // adding an implicit def of the super-reg on the first instruction. |
926 | bool NeedSuperRegDef = e > 1 && IsStore && i == 0; |
927 | bool NeedSuperRegImpOperand = e > 1; |
928 | |
929 | unsigned Lane = RegOffset / 4; |
930 | unsigned LaneE = (RegOffset + EltSize) / 4; |
931 | for ( ; Lane != LaneE; ++Lane) { |
932 | bool IsSubReg = e > 1 || EltSize > 4; |
933 | Register Sub = IsSubReg |
934 | ? Register(getSubReg(ValueReg, getSubRegFromChannel(Lane))) |
935 | : ValueReg; |
936 | auto MIB = spillVGPRtoAGPR(ST, MI, Index, Lane, Sub, IsKill); |
937 | if (!MIB.getInstr()) |
938 | break; |
939 | if (NeedSuperRegDef || (IsSubReg && IsStore && Lane == 0)) { |
940 | MIB.addReg(ValueReg, RegState::ImplicitDefine); |
941 | NeedSuperRegDef = false; |
942 | } |
943 | if (IsSubReg || NeedSuperRegImpOperand) { |
944 | NeedSuperRegImpOperand = true; |
945 | unsigned State = SrcDstRegState; |
946 | if (Lane + 1 != LaneE) |
947 | State &= ~RegState::Kill; |
948 | MIB.addReg(ValueReg, RegState::Implicit | State); |
949 | } |
950 | } |
951 | |
952 | if (Lane == LaneE) // Fully spilled into AGPRs. |
953 | continue; |
954 | |
955 | // Offset in bytes from the beginning of the ValueReg to its portion we |
956 | // still need to spill. It may differ from RegOffset if a portion of |
957 | // current SubReg has been already spilled into AGPRs by the loop above. |
958 | unsigned RemRegOffset = Lane * 4; |
959 | unsigned RemEltSize = EltSize - (RemRegOffset - RegOffset); |
960 | if (RemEltSize != EltSize) { // Partially spilled to AGPRs |
961 | assert(IsFlat && EltSize > 4)((IsFlat && EltSize > 4) ? static_cast<void> (0) : __assert_fail ("IsFlat && EltSize > 4", "/build/llvm-toolchain-snapshot-13~++20210405022414+5f57793c4fe4/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp" , 961, __PRETTY_FUNCTION__)); |
962 | |
963 | unsigned NumRegs = RemEltSize / 4; |
964 | SubReg = Register(getSubReg(ValueReg, |
965 | getSubRegFromChannel(RemRegOffset / 4, NumRegs))); |
966 | unsigned Opc = getFlatScratchSpillOpcode(TII, LoadStoreOp, RemEltSize); |
967 | Desc = &TII->get(Opc); |
968 | } |
969 | |
970 | unsigned FinalReg = SubReg; |
971 | |
972 | if (IsAGPR) { |
973 | assert(EltSize == 4)((EltSize == 4) ? static_cast<void> (0) : __assert_fail ("EltSize == 4", "/build/llvm-toolchain-snapshot-13~++20210405022414+5f57793c4fe4/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp" , 973, __PRETTY_FUNCTION__)); |
974 | |
975 | if (!TmpReg) { |
976 | assert(RS && "Needs to have RegScavenger to spill an AGPR!")((RS && "Needs to have RegScavenger to spill an AGPR!" ) ? static_cast<void> (0) : __assert_fail ("RS && \"Needs to have RegScavenger to spill an AGPR!\"" , "/build/llvm-toolchain-snapshot-13~++20210405022414+5f57793c4fe4/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp" , 976, __PRETTY_FUNCTION__)); |
977 | // FIXME: change to scavengeRegisterBackwards() |
978 | TmpReg = RS->scavengeRegister(&AMDGPU::VGPR_32RegClass, MI, 0); |
979 | RS->setRegUsed(TmpReg); |
980 | } |
981 | if (IsStore) { |
982 | auto AccRead = BuildMI(*MBB, MI, DL, |
983 | TII->get(AMDGPU::V_ACCVGPR_READ_B32_e64), TmpReg) |
984 | .addReg(SubReg, getKillRegState(IsKill)); |
985 | if (NeedSuperRegDef) |
986 | AccRead.addReg(ValueReg, RegState::ImplicitDefine); |
987 | AccRead->setAsmPrinterFlag(MachineInstr::ReloadReuse); |
988 | } |
989 | SubReg = TmpReg; |
990 | } |
991 | |
992 | MachinePointerInfo PInfo = BasePtrInfo.getWithOffset(RemRegOffset); |
993 | MachineMemOperand *NewMMO = |
994 | MF->getMachineMemOperand(PInfo, MMO->getFlags(), RemEltSize, |
995 | commonAlignment(Alignment, RemRegOffset)); |
996 | |
997 | auto MIB = BuildMI(*MBB, MI, DL, *Desc) |
998 | .addReg(SubReg, |
999 | getDefRegState(!IsStore) | getKillRegState(IsKill)); |
1000 | if (!IsFlat) |
1001 | MIB.addReg(FuncInfo->getScratchRSrcReg()); |
1002 | |
1003 | if (SOffset == AMDGPU::NoRegister) { |
1004 | if (!IsFlat) |
1005 | MIB.addImm(0); |
1006 | } else { |
1007 | MIB.addReg(SOffset, SOffsetRegState); |
1008 | } |
1009 | MIB.addImm(Offset + RemRegOffset) |
1010 | .addImm(0); // cpol |
1011 | if (!IsFlat) |
1012 | MIB.addImm(0) // tfe |
1013 | .addImm(0); // swz |
1014 | MIB.addMemOperand(NewMMO); |
1015 | |
1016 | if (!IsAGPR && NeedSuperRegDef) |
1017 | MIB.addReg(ValueReg, RegState::ImplicitDefine); |
1018 | |
1019 | if (!IsStore && TmpReg != AMDGPU::NoRegister) { |
1020 | MIB = BuildMI(*MBB, MI, DL, TII->get(AMDGPU::V_ACCVGPR_WRITE_B32_e64), |
1021 | FinalReg) |
1022 | .addReg(TmpReg, RegState::Kill); |
1023 | MIB->setAsmPrinterFlag(MachineInstr::ReloadReuse); |
1024 | } |
1025 | |
1026 | if (NeedSuperRegImpOperand) |
1027 | MIB.addReg(ValueReg, RegState::Implicit | SrcDstRegState); |
1028 | } |
1029 | |
1030 | if (ScratchOffsetRegDelta != 0) { |
1031 | // Subtract the offset we added to the ScratchOffset register. |
1032 | BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_SUB_U32), SOffset) |
1033 | .addReg(SOffset) |
1034 | .addImm(ScratchOffsetRegDelta); |
1035 | } |
1036 | } |
1037 | |
1038 | // Generate a VMEM access which loads or stores the VGPR containing an SGPR |
1039 | // spill such that all the lanes set in VGPRLanes are loaded or stored. |
1040 | // This generates exec mask manipulation and will use SGPRs available in MI |
1041 | // or VGPR lanes in the VGPR to save and restore the exec mask. |
1042 | void SIRegisterInfo::buildSGPRSpillLoadStore(MachineBasicBlock::iterator MI, |
1043 | int Index, int Offset, |
1044 | unsigned EltSize, Register VGPR, |
1045 | int64_t VGPRLanes, |
1046 | RegScavenger *RS, |
1047 | bool IsLoad) const { |
1048 | MachineBasicBlock *MBB = MI->getParent(); |
1049 | MachineFunction *MF = MBB->getParent(); |
1050 | SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>(); |
1051 | const SIInstrInfo *TII = ST.getInstrInfo(); |
1052 | |
1053 | Register SuperReg = MI->getOperand(0).getReg(); |
1054 | const TargetRegisterClass *RC = getPhysRegClass(SuperReg); |
1055 | ArrayRef<int16_t> SplitParts = getRegSplitParts(RC, EltSize); |
1056 | unsigned NumSubRegs = SplitParts.empty() ? 1 : SplitParts.size(); |
1057 | unsigned FirstPart = Offset * 32; |
1058 | unsigned ExecLane = 0; |
1059 | |
1060 | bool IsKill = MI->getOperand(0).isKill(); |
1061 | const DebugLoc &DL = MI->getDebugLoc(); |
1062 | |
1063 | // Cannot handle load/store to EXEC |
1064 | assert(SuperReg != AMDGPU::EXEC_LO && SuperReg != AMDGPU::EXEC_HI &&((SuperReg != AMDGPU::EXEC_LO && SuperReg != AMDGPU:: EXEC_HI && SuperReg != AMDGPU::EXEC && "exec should never spill" ) ? static_cast<void> (0) : __assert_fail ("SuperReg != AMDGPU::EXEC_LO && SuperReg != AMDGPU::EXEC_HI && SuperReg != AMDGPU::EXEC && \"exec should never spill\"" , "/build/llvm-toolchain-snapshot-13~++20210405022414+5f57793c4fe4/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp" , 1065, __PRETTY_FUNCTION__)) |
1065 | SuperReg != AMDGPU::EXEC && "exec should never spill")((SuperReg != AMDGPU::EXEC_LO && SuperReg != AMDGPU:: EXEC_HI && SuperReg != AMDGPU::EXEC && "exec should never spill" ) ? static_cast<void> (0) : __assert_fail ("SuperReg != AMDGPU::EXEC_LO && SuperReg != AMDGPU::EXEC_HI && SuperReg != AMDGPU::EXEC && \"exec should never spill\"" , "/build/llvm-toolchain-snapshot-13~++20210405022414+5f57793c4fe4/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp" , 1065, __PRETTY_FUNCTION__)); |
1066 | |
1067 | // On Wave32 only handle EXEC_LO. |
1068 | // On Wave64 only update EXEC_HI if there is sufficent space for a copy. |
1069 | bool OnlyExecLo = isWave32 || NumSubRegs == 1 || SuperReg == AMDGPU::EXEC_HI; |
1070 | |
1071 | unsigned ExecMovOpc = OnlyExecLo ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64; |
1072 | Register ExecReg = OnlyExecLo ? AMDGPU::EXEC_LO : AMDGPU::EXEC; |
1073 | Register SavedExecReg; |
1074 | |
1075 | // Backup EXEC |
1076 | if (OnlyExecLo) { |
1077 | SavedExecReg = |
1078 | NumSubRegs == 1 |
1079 | ? SuperReg |
1080 | : Register(getSubReg(SuperReg, SplitParts[FirstPart + ExecLane])); |
1081 | } else { |
1082 | // If src/dst is an odd size it is possible subreg0 is not aligned. |
1083 | for (; ExecLane < (NumSubRegs - 1); ++ExecLane) { |
1084 | SavedExecReg = getMatchingSuperReg( |
1085 | getSubReg(SuperReg, SplitParts[FirstPart + ExecLane]), AMDGPU::sub0, |
1086 | &AMDGPU::SReg_64_XEXECRegClass); |
1087 | if (SavedExecReg) |
1088 | break; |
1089 | } |
1090 | } |
1091 | assert(SavedExecReg)((SavedExecReg) ? static_cast<void> (0) : __assert_fail ("SavedExecReg", "/build/llvm-toolchain-snapshot-13~++20210405022414+5f57793c4fe4/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp" , 1091, __PRETTY_FUNCTION__)); |
1092 | BuildMI(*MBB, MI, DL, TII->get(ExecMovOpc), SavedExecReg).addReg(ExecReg); |
1093 | |
1094 | // Setup EXEC |
1095 | BuildMI(*MBB, MI, DL, TII->get(ExecMovOpc), ExecReg).addImm(VGPRLanes); |
1096 | |
1097 | // Load/store VGPR |
1098 | MachineFrameInfo &FrameInfo = MF->getFrameInfo(); |
1099 | assert(FrameInfo.getStackID(Index) != TargetStackID::SGPRSpill)((FrameInfo.getStackID(Index) != TargetStackID::SGPRSpill) ? static_cast <void> (0) : __assert_fail ("FrameInfo.getStackID(Index) != TargetStackID::SGPRSpill" , "/build/llvm-toolchain-snapshot-13~++20210405022414+5f57793c4fe4/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp" , 1099, __PRETTY_FUNCTION__)); |
1100 | |
1101 | Register FrameReg = FrameInfo.isFixedObjectIndex(Index) && hasBasePointer(*MF) |
1102 | ? getBaseRegister() |
1103 | : getFrameRegister(*MF); |
1104 | |
1105 | Align Alignment = FrameInfo.getObjectAlign(Index); |
1106 | MachinePointerInfo PtrInfo = |
1107 | MachinePointerInfo::getFixedStack(*MF, Index); |
1108 | MachineMemOperand *MMO = MF->getMachineMemOperand( |
1109 | PtrInfo, IsLoad ? MachineMemOperand::MOLoad : MachineMemOperand::MOStore, |
1110 | EltSize, Alignment); |
1111 | |
1112 | if (IsLoad) { |
1113 | unsigned Opc = ST.enableFlatScratch() ? AMDGPU::SCRATCH_LOAD_DWORD_SADDR |
1114 | : AMDGPU::BUFFER_LOAD_DWORD_OFFSET; |
1115 | buildSpillLoadStore(MI, Opc, |
1116 | Index, |
1117 | VGPR, false, |
1118 | FrameReg, |
1119 | Offset * EltSize, MMO, |
1120 | RS); |
1121 | } else { |
1122 | unsigned Opc = ST.enableFlatScratch() ? AMDGPU::SCRATCH_STORE_DWORD_SADDR |
1123 | : AMDGPU::BUFFER_STORE_DWORD_OFFSET; |
1124 | buildSpillLoadStore(MI, Opc, Index, VGPR, |
1125 | IsKill, FrameReg, |
1126 | Offset * EltSize, MMO, RS); |
1127 | // This only ever adds one VGPR spill |
1128 | MFI->addToSpilledVGPRs(1); |
1129 | } |
1130 | |
1131 | // Restore EXEC |
1132 | BuildMI(*MBB, MI, DL, TII->get(ExecMovOpc), ExecReg) |
1133 | .addReg(SavedExecReg, getKillRegState(IsLoad || IsKill)); |
1134 | |
1135 | // Restore clobbered SGPRs |
1136 | if (IsLoad) { |
1137 | // Nothing to do; register will be overwritten |
1138 | } else if (!IsKill) { |
1139 | // Restore SGPRs from appropriate VGPR lanes |
1140 | if (!OnlyExecLo) { |
1141 | BuildMI(*MBB, MI, DL, TII->get(AMDGPU::V_READLANE_B32), |
1142 | getSubReg(SuperReg, SplitParts[FirstPart + ExecLane + 1])) |
1143 | .addReg(VGPR) |
1144 | .addImm(ExecLane + 1); |
1145 | } |
1146 | BuildMI(*MBB, MI, DL, TII->get(AMDGPU::V_READLANE_B32), |
1147 | NumSubRegs == 1 ? SavedExecReg |
1148 | : Register(getSubReg( |
1149 | SuperReg, SplitParts[FirstPart + ExecLane]))) |
1150 | .addReg(VGPR, RegState::Kill) |
1151 | .addImm(ExecLane); |
1152 | } |
1153 | } |
1154 | |
1155 | bool SIRegisterInfo::spillSGPR(MachineBasicBlock::iterator MI, |
1156 | int Index, |
1157 | RegScavenger *RS, |
1158 | bool OnlyToVGPR) const { |
1159 | MachineBasicBlock *MBB = MI->getParent(); |
1160 | MachineFunction *MF = MBB->getParent(); |
1161 | SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>(); |
1162 | |
1163 | ArrayRef<SIMachineFunctionInfo::SpilledReg> VGPRSpills |
1164 | = MFI->getSGPRToVGPRSpills(Index); |
1165 | bool SpillToVGPR = !VGPRSpills.empty(); |
1166 | if (OnlyToVGPR && !SpillToVGPR) |
1167 | return false; |
1168 | |
1169 | const SIInstrInfo *TII = ST.getInstrInfo(); |
1170 | |
1171 | Register SuperReg = MI->getOperand(0).getReg(); |
1172 | bool IsKill = MI->getOperand(0).isKill(); |
1173 | const DebugLoc &DL = MI->getDebugLoc(); |
1174 | |
1175 | assert(SpillToVGPR || (SuperReg != MFI->getStackPtrOffsetReg() &&((SpillToVGPR || (SuperReg != MFI->getStackPtrOffsetReg() && SuperReg != MFI->getFrameOffsetReg())) ? static_cast<void > (0) : __assert_fail ("SpillToVGPR || (SuperReg != MFI->getStackPtrOffsetReg() && SuperReg != MFI->getFrameOffsetReg())" , "/build/llvm-toolchain-snapshot-13~++20210405022414+5f57793c4fe4/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp" , 1176, __PRETTY_FUNCTION__)) |
1176 | SuperReg != MFI->getFrameOffsetReg()))((SpillToVGPR || (SuperReg != MFI->getStackPtrOffsetReg() && SuperReg != MFI->getFrameOffsetReg())) ? static_cast<void > (0) : __assert_fail ("SpillToVGPR || (SuperReg != MFI->getStackPtrOffsetReg() && SuperReg != MFI->getFrameOffsetReg())" , "/build/llvm-toolchain-snapshot-13~++20210405022414+5f57793c4fe4/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp" , 1176, __PRETTY_FUNCTION__)); |
1177 | |
1178 | assert(SuperReg != AMDGPU::M0 && "m0 should never spill")((SuperReg != AMDGPU::M0 && "m0 should never spill") ? static_cast<void> (0) : __assert_fail ("SuperReg != AMDGPU::M0 && \"m0 should never spill\"" , "/build/llvm-toolchain-snapshot-13~++20210405022414+5f57793c4fe4/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp" , 1178, __PRETTY_FUNCTION__)); |
1179 | assert(SuperReg != AMDGPU::EXEC_LO && SuperReg != AMDGPU::EXEC_HI &&((SuperReg != AMDGPU::EXEC_LO && SuperReg != AMDGPU:: EXEC_HI && SuperReg != AMDGPU::EXEC && "exec should never spill" ) ? static_cast<void> (0) : __assert_fail ("SuperReg != AMDGPU::EXEC_LO && SuperReg != AMDGPU::EXEC_HI && SuperReg != AMDGPU::EXEC && \"exec should never spill\"" , "/build/llvm-toolchain-snapshot-13~++20210405022414+5f57793c4fe4/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp" , 1180, __PRETTY_FUNCTION__)) |
1180 | SuperReg != AMDGPU::EXEC && "exec should never spill")((SuperReg != AMDGPU::EXEC_LO && SuperReg != AMDGPU:: EXEC_HI && SuperReg != AMDGPU::EXEC && "exec should never spill" ) ? static_cast<void> (0) : __assert_fail ("SuperReg != AMDGPU::EXEC_LO && SuperReg != AMDGPU::EXEC_HI && SuperReg != AMDGPU::EXEC && \"exec should never spill\"" , "/build/llvm-toolchain-snapshot-13~++20210405022414+5f57793c4fe4/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp" , 1180, __PRETTY_FUNCTION__)); |
1181 | |
1182 | unsigned EltSize = 4; |
1183 | const TargetRegisterClass *RC = getPhysRegClass(SuperReg); |
1184 | |
1185 | ArrayRef<int16_t> SplitParts = getRegSplitParts(RC, EltSize); |
1186 | unsigned NumSubRegs = SplitParts.empty() ? 1 : SplitParts.size(); |
1187 | |
1188 | if (SpillToVGPR) { |
1189 | for (unsigned i = 0, e = NumSubRegs; i < e; ++i) { |
1190 | Register SubReg = NumSubRegs == 1 |
1191 | ? SuperReg |
1192 | : Register(getSubReg(SuperReg, SplitParts[i])); |
1193 | SIMachineFunctionInfo::SpilledReg Spill = VGPRSpills[i]; |
1194 | |
1195 | bool UseKill = IsKill && i == NumSubRegs - 1; |
1196 | |
1197 | // Mark the "old value of vgpr" input undef only if this is the first sgpr |
1198 | // spill to this specific vgpr in the first basic block. |
1199 | auto MIB = |
1200 | BuildMI(*MBB, MI, DL, TII->get(AMDGPU::V_WRITELANE_B32), Spill.VGPR) |
1201 | .addReg(SubReg, getKillRegState(UseKill)) |
1202 | .addImm(Spill.Lane) |
1203 | .addReg(Spill.VGPR); |
1204 | |
1205 | if (i == 0 && NumSubRegs > 1) { |
1206 | // We may be spilling a super-register which is only partially defined, |
1207 | // and need to ensure later spills think the value is defined. |
1208 | MIB.addReg(SuperReg, RegState::ImplicitDefine); |
1209 | } |
1210 | |
1211 | if (NumSubRegs > 1) |
1212 | MIB.addReg(SuperReg, getKillRegState(UseKill) | RegState::Implicit); |
1213 | |
1214 | // FIXME: Since this spills to another register instead of an actual |
1215 | // frame index, we should delete the frame index when all references to |
1216 | // it are fixed. |
1217 | } |
1218 | } else { |
1219 | // Scavenged temporary VGPR to use. It must be scavenged once for any number |
1220 | // of spilled subregs. |
1221 | Register TmpVGPR = RS->scavengeRegister(&AMDGPU::VGPR_32RegClass, MI, 0); |
1222 | RS->setRegUsed(TmpVGPR); |
1223 | |
1224 | // SubReg carries the "Kill" flag when SubReg == SuperReg. |
1225 | unsigned SubKillState = getKillRegState((NumSubRegs == 1) && IsKill); |
1226 | |
1227 | unsigned PerVGPR = 32; |
1228 | unsigned NumVGPRs = (NumSubRegs + (PerVGPR - 1)) / PerVGPR; |
1229 | int64_t VGPRLanes = (1LL << std::min(PerVGPR, NumSubRegs)) - 1LL; |
1230 | |
1231 | for (unsigned Offset = 0; Offset < NumVGPRs; ++Offset) { |
1232 | unsigned TmpVGPRFlags = RegState::Undef; |
1233 | |
1234 | // Write sub registers into the VGPR |
1235 | for (unsigned i = Offset * PerVGPR, |
1236 | e = std::min((Offset + 1) * PerVGPR, NumSubRegs); |
1237 | i < e; ++i) { |
1238 | Register SubReg = NumSubRegs == 1 |
1239 | ? SuperReg |
1240 | : Register(getSubReg(SuperReg, SplitParts[i])); |
1241 | |
1242 | MachineInstrBuilder WriteLane = |
1243 | BuildMI(*MBB, MI, DL, TII->get(AMDGPU::V_WRITELANE_B32), TmpVGPR) |
1244 | .addReg(SubReg, SubKillState) |
1245 | .addImm(i % PerVGPR) |
1246 | .addReg(TmpVGPR, TmpVGPRFlags); |
1247 | TmpVGPRFlags = 0; |
1248 | |
1249 | // There could be undef components of a spilled super register. |
1250 | // TODO: Can we detect this and skip the spill? |
1251 | if (NumSubRegs > 1) { |
1252 | // The last implicit use of the SuperReg carries the "Kill" flag. |
1253 | unsigned SuperKillState = 0; |
1254 | if (i + 1 == NumSubRegs) |
1255 | SuperKillState |= getKillRegState(IsKill); |
1256 | WriteLane.addReg(SuperReg, RegState::Implicit | SuperKillState); |
1257 | } |
1258 | } |
1259 | |
1260 | // Write out VGPR |
1261 | buildSGPRSpillLoadStore(MI, Index, Offset, EltSize, TmpVGPR, VGPRLanes, |
1262 | RS, false); |
1263 | } |
1264 | } |
1265 | |
1266 | MI->eraseFromParent(); |
1267 | MFI->addToSpilledSGPRs(NumSubRegs); |
1268 | return true; |
1269 | } |
1270 | |
1271 | bool SIRegisterInfo::restoreSGPR(MachineBasicBlock::iterator MI, |
1272 | int Index, |
1273 | RegScavenger *RS, |
1274 | bool OnlyToVGPR) const { |
1275 | MachineFunction *MF = MI->getParent()->getParent(); |
1276 | MachineBasicBlock *MBB = MI->getParent(); |
1277 | SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>(); |
1278 | |
1279 | ArrayRef<SIMachineFunctionInfo::SpilledReg> VGPRSpills |
1280 | = MFI->getSGPRToVGPRSpills(Index); |
1281 | bool SpillToVGPR = !VGPRSpills.empty(); |
1282 | if (OnlyToVGPR && !SpillToVGPR) |
1283 | return false; |
1284 | |
1285 | const SIInstrInfo *TII = ST.getInstrInfo(); |
1286 | const DebugLoc &DL = MI->getDebugLoc(); |
1287 | |
1288 | Register SuperReg = MI->getOperand(0).getReg(); |
1289 | |
1290 | assert(SuperReg != AMDGPU::M0 && "m0 should never spill")((SuperReg != AMDGPU::M0 && "m0 should never spill") ? static_cast<void> (0) : __assert_fail ("SuperReg != AMDGPU::M0 && \"m0 should never spill\"" , "/build/llvm-toolchain-snapshot-13~++20210405022414+5f57793c4fe4/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp" , 1290, __PRETTY_FUNCTION__)); |
1291 | assert(SuperReg != AMDGPU::EXEC_LO && SuperReg != AMDGPU::EXEC_HI &&((SuperReg != AMDGPU::EXEC_LO && SuperReg != AMDGPU:: EXEC_HI && SuperReg != AMDGPU::EXEC && "exec should never spill" ) ? static_cast<void> (0) : __assert_fail ("SuperReg != AMDGPU::EXEC_LO && SuperReg != AMDGPU::EXEC_HI && SuperReg != AMDGPU::EXEC && \"exec should never spill\"" , "/build/llvm-toolchain-snapshot-13~++20210405022414+5f57793c4fe4/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp" , 1292, __PRETTY_FUNCTION__)) |
1292 | SuperReg != AMDGPU::EXEC && "exec should never spill")((SuperReg != AMDGPU::EXEC_LO && SuperReg != AMDGPU:: EXEC_HI && SuperReg != AMDGPU::EXEC && "exec should never spill" ) ? static_cast<void> (0) : __assert_fail ("SuperReg != AMDGPU::EXEC_LO && SuperReg != AMDGPU::EXEC_HI && SuperReg != AMDGPU::EXEC && \"exec should never spill\"" , "/build/llvm-toolchain-snapshot-13~++20210405022414+5f57793c4fe4/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp" , 1292, __PRETTY_FUNCTION__)); |
1293 | |
1294 | unsigned EltSize = 4; |
1295 | |
1296 | const TargetRegisterClass *RC = getPhysRegClass(SuperReg); |
1297 | |
1298 | ArrayRef<int16_t> SplitParts = getRegSplitParts(RC, EltSize); |
1299 | unsigned NumSubRegs = SplitParts.empty() ? 1 : SplitParts.size(); |
1300 | |
1301 | if (SpillToVGPR) { |
1302 | for (unsigned i = 0, e = NumSubRegs; i < e; ++i) { |
1303 | Register SubReg = NumSubRegs == 1 |
1304 | ? SuperReg |
1305 | : Register(getSubReg(SuperReg, SplitParts[i])); |
1306 | |
1307 | SIMachineFunctionInfo::SpilledReg Spill = VGPRSpills[i]; |
1308 | auto MIB = BuildMI(*MBB, MI, DL, TII->get(AMDGPU::V_READLANE_B32), SubReg) |
1309 | .addReg(Spill.VGPR) |
1310 | .addImm(Spill.Lane); |
1311 | if (NumSubRegs > 1 && i == 0) |
1312 | MIB.addReg(SuperReg, RegState::ImplicitDefine); |
1313 | } |
1314 | } else { |
1315 | Register TmpVGPR = RS->scavengeRegister(&AMDGPU::VGPR_32RegClass, MI, 0); |
1316 | RS->setRegUsed(TmpVGPR); |
1317 | |
1318 | unsigned PerVGPR = 32; |
1319 | unsigned NumVGPRs = (NumSubRegs + (PerVGPR - 1)) / PerVGPR; |
1320 | int64_t VGPRLanes = (1LL << std::min(PerVGPR, NumSubRegs)) - 1LL; |
1321 | |
1322 | for (unsigned Offset = 0; Offset < NumVGPRs; ++Offset) { |
1323 | // Load in VGPR data |
1324 | buildSGPRSpillLoadStore(MI, Index, Offset, EltSize, TmpVGPR, VGPRLanes, |
1325 | RS, true); |
1326 | |
1327 | // Unpack lanes |
1328 | for (unsigned i = Offset * PerVGPR, |
1329 | e = std::min((Offset + 1) * PerVGPR, NumSubRegs); |
1330 | i < e; ++i) { |
1331 | Register SubReg = NumSubRegs == 1 |
1332 | ? SuperReg |
1333 | : Register(getSubReg(SuperReg, SplitParts[i])); |
1334 | |
1335 | bool LastSubReg = (i + 1 == e); |
1336 | auto MIB = |
1337 | BuildMI(*MBB, MI, DL, TII->get(AMDGPU::V_READLANE_B32), SubReg) |
1338 | .addReg(TmpVGPR, getKillRegState(LastSubReg)) |
1339 | .addImm(i); |
1340 | if (NumSubRegs > 1 && i == 0) |
1341 | MIB.addReg(SuperReg, RegState::ImplicitDefine); |
1342 | } |
1343 | } |
1344 | } |
1345 | |
1346 | MI->eraseFromParent(); |
1347 | return true; |
1348 | } |
1349 | |
1350 | /// Special case of eliminateFrameIndex. Returns true if the SGPR was spilled to |
1351 | /// a VGPR and the stack slot can be safely eliminated when all other users are |
1352 | /// handled. |
1353 | bool SIRegisterInfo::eliminateSGPRToVGPRSpillFrameIndex( |
1354 | MachineBasicBlock::iterator MI, |
1355 | int FI, |
1356 | RegScavenger *RS) const { |
1357 | switch (MI->getOpcode()) { |
1358 | case AMDGPU::SI_SPILL_S1024_SAVE: |
1359 | case AMDGPU::SI_SPILL_S512_SAVE: |
1360 | case AMDGPU::SI_SPILL_S256_SAVE: |
1361 | case AMDGPU::SI_SPILL_S192_SAVE: |
1362 | case AMDGPU::SI_SPILL_S160_SAVE: |
1363 | case AMDGPU::SI_SPILL_S128_SAVE: |
1364 | case AMDGPU::SI_SPILL_S96_SAVE: |
1365 | case AMDGPU::SI_SPILL_S64_SAVE: |
1366 | case AMDGPU::SI_SPILL_S32_SAVE: |
1367 | return spillSGPR(MI, FI, RS, true); |
1368 | case AMDGPU::SI_SPILL_S1024_RESTORE: |
1369 | case AMDGPU::SI_SPILL_S512_RESTORE: |
1370 | case AMDGPU::SI_SPILL_S256_RESTORE: |
1371 | case AMDGPU::SI_SPILL_S192_RESTORE: |
1372 | case AMDGPU::SI_SPILL_S160_RESTORE: |
1373 | case AMDGPU::SI_SPILL_S128_RESTORE: |
1374 | case AMDGPU::SI_SPILL_S96_RESTORE: |
1375 | case AMDGPU::SI_SPILL_S64_RESTORE: |
1376 | case AMDGPU::SI_SPILL_S32_RESTORE: |
1377 | return restoreSGPR(MI, FI, RS, true); |
1378 | default: |
1379 | llvm_unreachable("not an SGPR spill instruction")::llvm::llvm_unreachable_internal("not an SGPR spill instruction" , "/build/llvm-toolchain-snapshot-13~++20210405022414+5f57793c4fe4/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp" , 1379); |
1380 | } |
1381 | } |
1382 | |
1383 | void SIRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI, |
1384 | int SPAdj, unsigned FIOperandNum, |
1385 | RegScavenger *RS) const { |
1386 | MachineFunction *MF = MI->getParent()->getParent(); |
1387 | MachineBasicBlock *MBB = MI->getParent(); |
1388 | SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>(); |
1389 | MachineFrameInfo &FrameInfo = MF->getFrameInfo(); |
1390 | const SIInstrInfo *TII = ST.getInstrInfo(); |
1391 | DebugLoc DL = MI->getDebugLoc(); |
1392 | |
1393 | assert(SPAdj == 0 && "unhandled SP adjustment in call sequence?")((SPAdj == 0 && "unhandled SP adjustment in call sequence?" ) ? static_cast<void> (0) : __assert_fail ("SPAdj == 0 && \"unhandled SP adjustment in call sequence?\"" , "/build/llvm-toolchain-snapshot-13~++20210405022414+5f57793c4fe4/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp" , 1393, __PRETTY_FUNCTION__)); |
1394 | |
1395 | MachineOperand &FIOp = MI->getOperand(FIOperandNum); |
1396 | int Index = MI->getOperand(FIOperandNum).getIndex(); |
1397 | |
1398 | Register FrameReg = FrameInfo.isFixedObjectIndex(Index) && hasBasePointer(*MF) |
1399 | ? getBaseRegister() |
1400 | : getFrameRegister(*MF); |
1401 | |
1402 | switch (MI->getOpcode()) { |
1403 | // SGPR register spill |
1404 | case AMDGPU::SI_SPILL_S1024_SAVE: |
1405 | case AMDGPU::SI_SPILL_S512_SAVE: |
1406 | case AMDGPU::SI_SPILL_S256_SAVE: |
1407 | case AMDGPU::SI_SPILL_S192_SAVE: |
1408 | case AMDGPU::SI_SPILL_S160_SAVE: |
1409 | case AMDGPU::SI_SPILL_S128_SAVE: |
1410 | case AMDGPU::SI_SPILL_S96_SAVE: |
1411 | case AMDGPU::SI_SPILL_S64_SAVE: |
1412 | case AMDGPU::SI_SPILL_S32_SAVE: { |
1413 | spillSGPR(MI, Index, RS); |
1414 | break; |
1415 | } |
1416 | |
1417 | // SGPR register restore |
1418 | case AMDGPU::SI_SPILL_S1024_RESTORE: |
1419 | case AMDGPU::SI_SPILL_S512_RESTORE: |
1420 | case AMDGPU::SI_SPILL_S256_RESTORE: |
1421 | case AMDGPU::SI_SPILL_S192_RESTORE: |
1422 | case AMDGPU::SI_SPILL_S160_RESTORE: |
1423 | case AMDGPU::SI_SPILL_S128_RESTORE: |
1424 | case AMDGPU::SI_SPILL_S96_RESTORE: |
1425 | case AMDGPU::SI_SPILL_S64_RESTORE: |
1426 | case AMDGPU::SI_SPILL_S32_RESTORE: { |
1427 | restoreSGPR(MI, Index, RS); |
1428 | break; |
1429 | } |
1430 | |
1431 | // VGPR register spill |
1432 | case AMDGPU::SI_SPILL_V1024_SAVE: |
1433 | case AMDGPU::SI_SPILL_V512_SAVE: |
1434 | case AMDGPU::SI_SPILL_V256_SAVE: |
1435 | case AMDGPU::SI_SPILL_V192_SAVE: |
1436 | case AMDGPU::SI_SPILL_V160_SAVE: |
1437 | case AMDGPU::SI_SPILL_V128_SAVE: |
1438 | case AMDGPU::SI_SPILL_V96_SAVE: |
1439 | case AMDGPU::SI_SPILL_V64_SAVE: |
1440 | case AMDGPU::SI_SPILL_V32_SAVE: |
1441 | case AMDGPU::SI_SPILL_A1024_SAVE: |
1442 | case AMDGPU::SI_SPILL_A512_SAVE: |
1443 | case AMDGPU::SI_SPILL_A256_SAVE: |
1444 | case AMDGPU::SI_SPILL_A192_SAVE: |
1445 | case AMDGPU::SI_SPILL_A160_SAVE: |
1446 | case AMDGPU::SI_SPILL_A128_SAVE: |
1447 | case AMDGPU::SI_SPILL_A96_SAVE: |
1448 | case AMDGPU::SI_SPILL_A64_SAVE: |
1449 | case AMDGPU::SI_SPILL_A32_SAVE: { |
1450 | const MachineOperand *VData = TII->getNamedOperand(*MI, |
1451 | AMDGPU::OpName::vdata); |
1452 | assert(TII->getNamedOperand(*MI, AMDGPU::OpName::soffset)->getReg() ==((TII->getNamedOperand(*MI, AMDGPU::OpName::soffset)->getReg () == MFI->getStackPtrOffsetReg()) ? static_cast<void> (0) : __assert_fail ("TII->getNamedOperand(*MI, AMDGPU::OpName::soffset)->getReg() == MFI->getStackPtrOffsetReg()" , "/build/llvm-toolchain-snapshot-13~++20210405022414+5f57793c4fe4/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp" , 1453, __PRETTY_FUNCTION__)) |
1453 | MFI->getStackPtrOffsetReg())((TII->getNamedOperand(*MI, AMDGPU::OpName::soffset)->getReg () == MFI->getStackPtrOffsetReg()) ? static_cast<void> (0) : __assert_fail ("TII->getNamedOperand(*MI, AMDGPU::OpName::soffset)->getReg() == MFI->getStackPtrOffsetReg()" , "/build/llvm-toolchain-snapshot-13~++20210405022414+5f57793c4fe4/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp" , 1453, __PRETTY_FUNCTION__)); |
1454 | |
1455 | unsigned Opc = ST.enableFlatScratch() ? AMDGPU::SCRATCH_STORE_DWORD_SADDR |
1456 | : AMDGPU::BUFFER_STORE_DWORD_OFFSET; |
1457 | buildSpillLoadStore(MI, Opc, |
1458 | Index, |
1459 | VData->getReg(), VData->isKill(), |
1460 | FrameReg, |
1461 | TII->getNamedOperand(*MI, AMDGPU::OpName::offset)->getImm(), |
1462 | *MI->memoperands_begin(), |
1463 | RS); |
1464 | MFI->addToSpilledVGPRs(getNumSubRegsForSpillOp(MI->getOpcode())); |
1465 | MI->eraseFromParent(); |
1466 | break; |
1467 | } |
1468 | case AMDGPU::SI_SPILL_V32_RESTORE: |
1469 | case AMDGPU::SI_SPILL_V64_RESTORE: |
1470 | case AMDGPU::SI_SPILL_V96_RESTORE: |
1471 | case AMDGPU::SI_SPILL_V128_RESTORE: |
1472 | case AMDGPU::SI_SPILL_V160_RESTORE: |
1473 | case AMDGPU::SI_SPILL_V192_RESTORE: |
1474 | case AMDGPU::SI_SPILL_V256_RESTORE: |
1475 | case AMDGPU::SI_SPILL_V512_RESTORE: |
1476 | case AMDGPU::SI_SPILL_V1024_RESTORE: |
1477 | case AMDGPU::SI_SPILL_A32_RESTORE: |
1478 | case AMDGPU::SI_SPILL_A64_RESTORE: |
1479 | case AMDGPU::SI_SPILL_A96_RESTORE: |
1480 | case AMDGPU::SI_SPILL_A128_RESTORE: |
1481 | case AMDGPU::SI_SPILL_A160_RESTORE: |
1482 | case AMDGPU::SI_SPILL_A192_RESTORE: |
1483 | case AMDGPU::SI_SPILL_A256_RESTORE: |
1484 | case AMDGPU::SI_SPILL_A512_RESTORE: |
1485 | case AMDGPU::SI_SPILL_A1024_RESTORE: { |
1486 | const MachineOperand *VData = TII->getNamedOperand(*MI, |
1487 | AMDGPU::OpName::vdata); |
1488 | assert(TII->getNamedOperand(*MI, AMDGPU::OpName::soffset)->getReg() ==((TII->getNamedOperand(*MI, AMDGPU::OpName::soffset)->getReg () == MFI->getStackPtrOffsetReg()) ? static_cast<void> (0) : __assert_fail ("TII->getNamedOperand(*MI, AMDGPU::OpName::soffset)->getReg() == MFI->getStackPtrOffsetReg()" , "/build/llvm-toolchain-snapshot-13~++20210405022414+5f57793c4fe4/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp" , 1489, __PRETTY_FUNCTION__)) |
1489 | MFI->getStackPtrOffsetReg())((TII->getNamedOperand(*MI, AMDGPU::OpName::soffset)->getReg () == MFI->getStackPtrOffsetReg()) ? static_cast<void> (0) : __assert_fail ("TII->getNamedOperand(*MI, AMDGPU::OpName::soffset)->getReg() == MFI->getStackPtrOffsetReg()" , "/build/llvm-toolchain-snapshot-13~++20210405022414+5f57793c4fe4/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp" , 1489, __PRETTY_FUNCTION__)); |
1490 | |
1491 | unsigned Opc = ST.enableFlatScratch() ? AMDGPU::SCRATCH_LOAD_DWORD_SADDR |
1492 | : AMDGPU::BUFFER_LOAD_DWORD_OFFSET; |
1493 | buildSpillLoadStore(MI, Opc, |
1494 | Index, |
1495 | VData->getReg(), VData->isKill(), |
1496 | FrameReg, |
1497 | TII->getNamedOperand(*MI, AMDGPU::OpName::offset)->getImm(), |
1498 | *MI->memoperands_begin(), |
1499 | RS); |
1500 | MI->eraseFromParent(); |
1501 | break; |
1502 | } |
1503 | |
1504 | default: { |
1505 | const DebugLoc &DL = MI->getDebugLoc(); |
1506 | |
1507 | int64_t Offset = FrameInfo.getObjectOffset(Index); |
1508 | if (ST.enableFlatScratch()) { |
1509 | if (TII->isFLATScratch(*MI)) { |
1510 | assert((int16_t)FIOperandNum ==(((int16_t)FIOperandNum == AMDGPU::getNamedOperandIdx(MI-> getOpcode(), AMDGPU::OpName::saddr)) ? static_cast<void> (0) : __assert_fail ("(int16_t)FIOperandNum == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::saddr)" , "/build/llvm-toolchain-snapshot-13~++20210405022414+5f57793c4fe4/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp" , 1512, __PRETTY_FUNCTION__)) |
1511 | AMDGPU::getNamedOperandIdx(MI->getOpcode(),(((int16_t)FIOperandNum == AMDGPU::getNamedOperandIdx(MI-> getOpcode(), AMDGPU::OpName::saddr)) ? static_cast<void> (0) : __assert_fail ("(int16_t)FIOperandNum == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::saddr)" , "/build/llvm-toolchain-snapshot-13~++20210405022414+5f57793c4fe4/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp" , 1512, __PRETTY_FUNCTION__)) |
1512 | AMDGPU::OpName::saddr))(((int16_t)FIOperandNum == AMDGPU::getNamedOperandIdx(MI-> getOpcode(), AMDGPU::OpName::saddr)) ? static_cast<void> (0) : __assert_fail ("(int16_t)FIOperandNum == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::saddr)" , "/build/llvm-toolchain-snapshot-13~++20210405022414+5f57793c4fe4/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp" , 1512, __PRETTY_FUNCTION__)); |
1513 | |
1514 | // The offset is always swizzled, just replace it |
1515 | if (FrameReg) |
1516 | FIOp.ChangeToRegister(FrameReg, false); |
1517 | |
1518 | if (!Offset) |
1519 | return; |
1520 | |
1521 | MachineOperand *OffsetOp = |
1522 | TII->getNamedOperand(*MI, AMDGPU::OpName::offset); |
1523 | int64_t NewOffset = Offset + OffsetOp->getImm(); |
1524 | if (TII->isLegalFLATOffset(NewOffset, AMDGPUAS::PRIVATE_ADDRESS, |
1525 | true)) { |
1526 | OffsetOp->setImm(NewOffset); |
1527 | if (FrameReg) |
1528 | return; |
1529 | Offset = 0; |
1530 | } |
1531 | |
1532 | assert(!TII->getNamedOperand(*MI, AMDGPU::OpName::vaddr) &&((!TII->getNamedOperand(*MI, AMDGPU::OpName::vaddr) && "Unexpected vaddr for flat scratch with a FI operand") ? static_cast <void> (0) : __assert_fail ("!TII->getNamedOperand(*MI, AMDGPU::OpName::vaddr) && \"Unexpected vaddr for flat scratch with a FI operand\"" , "/build/llvm-toolchain-snapshot-13~++20210405022414+5f57793c4fe4/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp" , 1533, __PRETTY_FUNCTION__)) |
1533 | "Unexpected vaddr for flat scratch with a FI operand")((!TII->getNamedOperand(*MI, AMDGPU::OpName::vaddr) && "Unexpected vaddr for flat scratch with a FI operand") ? static_cast <void> (0) : __assert_fail ("!TII->getNamedOperand(*MI, AMDGPU::OpName::vaddr) && \"Unexpected vaddr for flat scratch with a FI operand\"" , "/build/llvm-toolchain-snapshot-13~++20210405022414+5f57793c4fe4/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp" , 1533, __PRETTY_FUNCTION__)); |
1534 | |
1535 | // On GFX10 we have ST mode to use no registers for an address. |
1536 | // Otherwise we need to materialize 0 into an SGPR. |
1537 | if (!Offset && ST.hasFlatScratchSTMode()) { |
1538 | unsigned Opc = MI->getOpcode(); |
1539 | unsigned NewOpc = AMDGPU::getFlatScratchInstSTfromSS(Opc); |
1540 | MI->RemoveOperand( |
1541 | AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::saddr)); |
1542 | MI->setDesc(TII->get(NewOpc)); |
1543 | return; |
1544 | } |
1545 | } |
1546 | |
1547 | if (!FrameReg) { |
1548 | FIOp.ChangeToImmediate(Offset); |
1549 | if (TII->isImmOperandLegal(*MI, FIOperandNum, FIOp)) |
1550 | return; |
1551 | } |
1552 | |
1553 | // We need to use register here. Check if we can use an SGPR or need |
1554 | // a VGPR. |
1555 | FIOp.ChangeToRegister(AMDGPU::M0, false); |
1556 | bool UseSGPR = TII->isOperandLegal(*MI, FIOperandNum, &FIOp); |
1557 | |
1558 | if (!Offset && FrameReg && UseSGPR) { |
1559 | FIOp.setReg(FrameReg); |
1560 | return; |
1561 | } |
1562 | |
1563 | const TargetRegisterClass *RC = UseSGPR ? &AMDGPU::SReg_32_XM0RegClass |
1564 | : &AMDGPU::VGPR_32RegClass; |
1565 | |
1566 | Register TmpReg = RS->scavengeRegister(RC, MI, 0, !UseSGPR); |
1567 | FIOp.setReg(TmpReg); |
1568 | FIOp.setIsKill(true); |
1569 | |
1570 | if ((!FrameReg || !Offset) && TmpReg) { |
1571 | unsigned Opc = UseSGPR ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32; |
1572 | auto MIB = BuildMI(*MBB, MI, DL, TII->get(Opc), TmpReg); |
1573 | if (FrameReg) |
1574 | MIB.addReg(FrameReg); |
1575 | else |
1576 | MIB.addImm(Offset); |
1577 | |
1578 | return; |
1579 | } |
1580 | |
1581 | Register TmpSReg = |
1582 | UseSGPR ? TmpReg |
1583 | : RS->scavengeRegister(&AMDGPU::SReg_32_XM0RegClass, MI, 0, |
1584 | !UseSGPR); |
1585 | |
1586 | // TODO: for flat scratch another attempt can be made with a VGPR index |
1587 | // if no SGPRs can be scavenged. |
1588 | if ((!TmpSReg && !FrameReg) || (!TmpReg && !UseSGPR)) |
1589 | report_fatal_error("Cannot scavenge register in FI elimination!"); |
1590 | |
1591 | if (!TmpSReg) { |
1592 | // Use frame register and restore it after. |
1593 | TmpSReg = FrameReg; |
1594 | FIOp.setReg(FrameReg); |
1595 | FIOp.setIsKill(false); |
1596 | } |
1597 | |
1598 | BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_ADD_U32), TmpSReg) |
1599 | .addReg(FrameReg) |
1600 | .addImm(Offset); |
1601 | |
1602 | if (!UseSGPR) |
1603 | BuildMI(*MBB, MI, DL, TII->get(AMDGPU::V_MOV_B32_e32), TmpReg) |
1604 | .addReg(TmpSReg, RegState::Kill); |
1605 | |
1606 | if (TmpSReg == FrameReg) { |
1607 | // Undo frame register modification. |
1608 | BuildMI(*MBB, std::next(MI), DL, TII->get(AMDGPU::S_SUB_U32), |
1609 | FrameReg) |
1610 | .addReg(FrameReg) |
1611 | .addImm(Offset); |
1612 | } |
1613 | |
1614 | return; |
1615 | } |
1616 | |
1617 | bool IsMUBUF = TII->isMUBUF(*MI); |
1618 | |
1619 | if (!IsMUBUF && !MFI->isEntryFunction()) { |
1620 | // Convert to a swizzled stack address by scaling by the wave size. |
1621 | // |
1622 | // In an entry function/kernel the offset is already swizzled. |
1623 | |
1624 | bool IsCopy = MI->getOpcode() == AMDGPU::V_MOV_B32_e32; |
1625 | Register ResultReg = |
1626 | IsCopy ? MI->getOperand(0).getReg() |
1627 | : RS->scavengeRegister(&AMDGPU::VGPR_32RegClass, MI, 0); |
1628 | |
1629 | int64_t Offset = FrameInfo.getObjectOffset(Index); |
1630 | if (Offset == 0) { |
1631 | // XXX - This never happens because of emergency scavenging slot at 0? |
1632 | BuildMI(*MBB, MI, DL, TII->get(AMDGPU::V_LSHRREV_B32_e64), ResultReg) |
1633 | .addImm(ST.getWavefrontSizeLog2()) |
1634 | .addReg(FrameReg); |
1635 | } else { |
1636 | if (auto MIB = TII->getAddNoCarry(*MBB, MI, DL, ResultReg, *RS)) { |
1637 | // Reuse ResultReg in intermediate step. |
1638 | Register ScaledReg = ResultReg; |
1639 | |
1640 | BuildMI(*MBB, *MIB, DL, TII->get(AMDGPU::V_LSHRREV_B32_e64), |
1641 | ScaledReg) |
1642 | .addImm(ST.getWavefrontSizeLog2()) |
1643 | .addReg(FrameReg); |
1644 | |
1645 | const bool IsVOP2 = MIB->getOpcode() == AMDGPU::V_ADD_U32_e32; |
1646 | |
1647 | // TODO: Fold if use instruction is another add of a constant. |
1648 | if (IsVOP2 || AMDGPU::isInlinableLiteral32(Offset, ST.hasInv2PiInlineImm())) { |
1649 | // FIXME: This can fail |
1650 | MIB.addImm(Offset); |
1651 | MIB.addReg(ScaledReg, RegState::Kill); |
1652 | if (!IsVOP2) |
1653 | MIB.addImm(0); // clamp bit |
1654 | } else { |
1655 | assert(MIB->getOpcode() == AMDGPU::V_ADD_CO_U32_e64 &&((MIB->getOpcode() == AMDGPU::V_ADD_CO_U32_e64 && "Need to reuse carry out register" ) ? static_cast<void> (0) : __assert_fail ("MIB->getOpcode() == AMDGPU::V_ADD_CO_U32_e64 && \"Need to reuse carry out register\"" , "/build/llvm-toolchain-snapshot-13~++20210405022414+5f57793c4fe4/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp" , 1656, __PRETTY_FUNCTION__)) |
1656 | "Need to reuse carry out register")((MIB->getOpcode() == AMDGPU::V_ADD_CO_U32_e64 && "Need to reuse carry out register" ) ? static_cast<void> (0) : __assert_fail ("MIB->getOpcode() == AMDGPU::V_ADD_CO_U32_e64 && \"Need to reuse carry out register\"" , "/build/llvm-toolchain-snapshot-13~++20210405022414+5f57793c4fe4/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp" , 1656, __PRETTY_FUNCTION__)); |
1657 | |
1658 | // Use scavenged unused carry out as offset register. |
1659 | Register ConstOffsetReg; |
1660 | if (!isWave32) |
1661 | ConstOffsetReg = getSubReg(MIB.getReg(1), AMDGPU::sub0); |
1662 | else |
1663 | ConstOffsetReg = MIB.getReg(1); |
1664 | |
1665 | BuildMI(*MBB, *MIB, DL, TII->get(AMDGPU::S_MOV_B32), ConstOffsetReg) |
1666 | .addImm(Offset); |
1667 | MIB.addReg(ConstOffsetReg, RegState::Kill); |
1668 | MIB.addReg(ScaledReg, RegState::Kill); |
1669 | MIB.addImm(0); // clamp bit |
1670 | } |
1671 | } else { |
1672 | // We have to produce a carry out, and there isn't a free SGPR pair |
1673 | // for it. We can keep the whole computation on the SALU to avoid |
1674 | // clobbering an additional register at the cost of an extra mov. |
1675 | |
1676 | // We may have 1 free scratch SGPR even though a carry out is |
1677 | // unavailable. Only one additional mov is needed. |
1678 | Register TmpScaledReg = |
1679 | RS->scavengeRegister(&AMDGPU::SReg_32_XM0RegClass, MI, 0, false); |
1680 | Register ScaledReg = TmpScaledReg.isValid() ? TmpScaledReg : FrameReg; |
1681 | |
1682 | BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_LSHR_B32), ScaledReg) |
1683 | .addReg(FrameReg) |
1684 | .addImm(ST.getWavefrontSizeLog2()); |
1685 | BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_ADD_U32), ScaledReg) |
1686 | .addReg(ScaledReg, RegState::Kill) |
1687 | .addImm(Offset); |
1688 | BuildMI(*MBB, MI, DL, TII->get(AMDGPU::COPY), ResultReg) |
1689 | .addReg(ScaledReg, RegState::Kill); |
1690 | |
1691 | // If there were truly no free SGPRs, we need to undo everything. |
1692 | if (!TmpScaledReg.isValid()) { |
1693 | BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_SUB_U32), ScaledReg) |
1694 | .addReg(ScaledReg, RegState::Kill) |
1695 | .addImm(Offset); |
1696 | BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_LSHL_B32), ScaledReg) |
1697 | .addReg(FrameReg) |
1698 | .addImm(ST.getWavefrontSizeLog2()); |
1699 | } |
1700 | } |
1701 | } |
1702 | |
1703 | // Don't introduce an extra copy if we're just materializing in a mov. |
1704 | if (IsCopy) |
1705 | MI->eraseFromParent(); |
1706 | else |
1707 | FIOp.ChangeToRegister(ResultReg, false, false, true); |
1708 | return; |
1709 | } |
1710 | |
1711 | if (IsMUBUF) { |
1712 | // Disable offen so we don't need a 0 vgpr base. |
1713 | assert(static_cast<int>(FIOperandNum) ==((static_cast<int>(FIOperandNum) == AMDGPU::getNamedOperandIdx (MI->getOpcode(), AMDGPU::OpName::vaddr)) ? static_cast< void> (0) : __assert_fail ("static_cast<int>(FIOperandNum) == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::vaddr)" , "/build/llvm-toolchain-snapshot-13~++20210405022414+5f57793c4fe4/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp" , 1715, __PRETTY_FUNCTION__)) |
1714 | AMDGPU::getNamedOperandIdx(MI->getOpcode(),((static_cast<int>(FIOperandNum) == AMDGPU::getNamedOperandIdx (MI->getOpcode(), AMDGPU::OpName::vaddr)) ? static_cast< void> (0) : __assert_fail ("static_cast<int>(FIOperandNum) == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::vaddr)" , "/build/llvm-toolchain-snapshot-13~++20210405022414+5f57793c4fe4/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp" , 1715, __PRETTY_FUNCTION__)) |
1715 | AMDGPU::OpName::vaddr))((static_cast<int>(FIOperandNum) == AMDGPU::getNamedOperandIdx (MI->getOpcode(), AMDGPU::OpName::vaddr)) ? static_cast< void> (0) : __assert_fail ("static_cast<int>(FIOperandNum) == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::vaddr)" , "/build/llvm-toolchain-snapshot-13~++20210405022414+5f57793c4fe4/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp" , 1715, __PRETTY_FUNCTION__)); |
1716 | |
1717 | auto &SOffset = *TII->getNamedOperand(*MI, AMDGPU::OpName::soffset); |
1718 | assert((SOffset.isImm() && SOffset.getImm() == 0))(((SOffset.isImm() && SOffset.getImm() == 0)) ? static_cast <void> (0) : __assert_fail ("(SOffset.isImm() && SOffset.getImm() == 0)" , "/build/llvm-toolchain-snapshot-13~++20210405022414+5f57793c4fe4/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp" , 1718, __PRETTY_FUNCTION__)); |
1719 | |
1720 | if (FrameReg != AMDGPU::NoRegister) |
1721 | SOffset.ChangeToRegister(FrameReg, false); |
1722 | |
1723 | int64_t Offset = FrameInfo.getObjectOffset(Index); |
1724 | int64_t OldImm |
1725 | = TII->getNamedOperand(*MI, AMDGPU::OpName::offset)->getImm(); |
1726 | int64_t NewOffset = OldImm + Offset; |
1727 | |
1728 | if (SIInstrInfo::isLegalMUBUFImmOffset(NewOffset) && |
1729 | buildMUBUFOffsetLoadStore(ST, FrameInfo, MI, Index, NewOffset)) { |
1730 | MI->eraseFromParent(); |
1731 | return; |
1732 | } |
1733 | } |
1734 | |
1735 | // If the offset is simply too big, don't convert to a scratch wave offset |
1736 | // relative index. |
1737 | |
1738 | FIOp.ChangeToImmediate(Offset); |
1739 | if (!TII->isImmOperandLegal(*MI, FIOperandNum, FIOp)) { |
1740 | Register TmpReg = RS->scavengeRegister(&AMDGPU::VGPR_32RegClass, MI, 0); |
1741 | BuildMI(*MBB, MI, DL, TII->get(AMDGPU::V_MOV_B32_e32), TmpReg) |
1742 | .addImm(Offset); |
1743 | FIOp.ChangeToRegister(TmpReg, false, false, true); |
1744 | } |
1745 | } |
1746 | } |
1747 | } |
1748 | |
1749 | StringRef SIRegisterInfo::getRegAsmName(MCRegister Reg) const { |
1750 | return AMDGPUInstPrinter::getRegisterName(Reg); |
1751 | } |
1752 | |
1753 | static const TargetRegisterClass * |
1754 | getAnyVGPRClassForBitWidth(unsigned BitWidth) { |
1755 | if (BitWidth <= 64) |
1756 | return &AMDGPU::VReg_64RegClass; |
1757 | if (BitWidth <= 96) |
1758 | return &AMDGPU::VReg_96RegClass; |
1759 | if (BitWidth <= 128) |
1760 | return &AMDGPU::VReg_128RegClass; |
1761 | if (BitWidth <= 160) |
1762 | return &AMDGPU::VReg_160RegClass; |
1763 | if (BitWidth <= 192) |
1764 | return &AMDGPU::VReg_192RegClass; |
1765 | if (BitWidth <= 256) |
1766 | return &AMDGPU::VReg_256RegClass; |
1767 | if (BitWidth <= 512) |
1768 | return &AMDGPU::VReg_512RegClass; |
1769 | if (BitWidth <= 1024) |
1770 | return &AMDGPU::VReg_1024RegClass; |
1771 | |
1772 | return nullptr; |
1773 | } |
1774 | |
1775 | static const TargetRegisterClass * |
1776 | getAlignedVGPRClassForBitWidth(unsigned BitWidth) { |
1777 | if (BitWidth <= 64) |
1778 | return &AMDGPU::VReg_64_Align2RegClass; |
1779 | if (BitWidth <= 96) |
1780 | return &AMDGPU::VReg_96_Align2RegClass; |
1781 | if (BitWidth <= 128) |
1782 | return &AMDGPU::VReg_128_Align2RegClass; |
1783 | if (BitWidth <= 160) |
1784 | return &AMDGPU::VReg_160_Align2RegClass; |
1785 | if (BitWidth <= 192) |
1786 | return &AMDGPU::VReg_192_Align2RegClass; |
1787 | if (BitWidth <= 256) |
1788 | return &AMDGPU::VReg_256_Align2RegClass; |
1789 | if (BitWidth <= 512) |
1790 | return &AMDGPU::VReg_512_Align2RegClass; |
1791 | if (BitWidth <= 1024) |
1792 | return &AMDGPU::VReg_1024_Align2RegClass; |
1793 | |
1794 | return nullptr; |
1795 | } |
1796 | |
1797 | const TargetRegisterClass * |
1798 | SIRegisterInfo::getVGPRClassForBitWidth(unsigned BitWidth) const { |
1799 | if (BitWidth == 1) |
1800 | return &AMDGPU::VReg_1RegClass; |
1801 | if (BitWidth <= 16) |
1802 | return &AMDGPU::VGPR_LO16RegClass; |
1803 | if (BitWidth <= 32) |
1804 | return &AMDGPU::VGPR_32RegClass; |
1805 | return ST.needsAlignedVGPRs() ? getAlignedVGPRClassForBitWidth(BitWidth) |
1806 | : getAnyVGPRClassForBitWidth(BitWidth); |
1807 | } |
1808 | |
1809 | static const TargetRegisterClass * |
1810 | getAnyAGPRClassForBitWidth(unsigned BitWidth) { |
1811 | if (BitWidth <= 64) |
1812 | return &AMDGPU::AReg_64RegClass; |
1813 | if (BitWidth <= 96) |
1814 | return &AMDGPU::AReg_96RegClass; |
1815 | if (BitWidth <= 128) |
1816 | return &AMDGPU::AReg_128RegClass; |
1817 | if (BitWidth <= 160) |
1818 | return &AMDGPU::AReg_160RegClass; |
1819 | if (BitWidth <= 192) |
1820 | return &AMDGPU::AReg_192RegClass; |
1821 | if (BitWidth <= 256) |
1822 | return &AMDGPU::AReg_256RegClass; |
1823 | if (BitWidth <= 512) |
1824 | return &AMDGPU::AReg_512RegClass; |
1825 | if (BitWidth <= 1024) |
1826 | return &AMDGPU::AReg_1024RegClass; |
1827 | |
1828 | return nullptr; |
1829 | } |
1830 | |
1831 | static const TargetRegisterClass * |
1832 | getAlignedAGPRClassForBitWidth(unsigned BitWidth) { |
1833 | if (BitWidth <= 64) |
1834 | return &AMDGPU::AReg_64_Align2RegClass; |
1835 | if (BitWidth <= 96) |
1836 | return &AMDGPU::AReg_96_Align2RegClass; |
1837 | if (BitWidth <= 128) |
1838 | return &AMDGPU::AReg_128_Align2RegClass; |
1839 | if (BitWidth <= 160) |
1840 | return &AMDGPU::AReg_160_Align2RegClass; |
1841 | if (BitWidth <= 192) |
1842 | return &AMDGPU::AReg_192_Align2RegClass; |
1843 | if (BitWidth <= 256) |
1844 | return &AMDGPU::AReg_256_Align2RegClass; |
1845 | if (BitWidth <= 512) |
1846 | return &AMDGPU::AReg_512_Align2RegClass; |
1847 | if (BitWidth <= 1024) |
1848 | return &AMDGPU::AReg_1024_Align2RegClass; |
1849 | |
1850 | return nullptr; |
1851 | } |
1852 | |
1853 | const TargetRegisterClass * |
1854 | SIRegisterInfo::getAGPRClassForBitWidth(unsigned BitWidth) const { |
1855 | if (BitWidth <= 16) |
1856 | return &AMDGPU::AGPR_LO16RegClass; |
1857 | if (BitWidth <= 32) |
1858 | return &AMDGPU::AGPR_32RegClass; |
1859 | return ST.needsAlignedVGPRs() ? getAlignedAGPRClassForBitWidth(BitWidth) |
1860 | : getAnyAGPRClassForBitWidth(BitWidth); |
1861 | } |
1862 | |
1863 | const TargetRegisterClass * |
1864 | SIRegisterInfo::getSGPRClassForBitWidth(unsigned BitWidth) { |
1865 | if (BitWidth <= 16) |
1866 | return &AMDGPU::SGPR_LO16RegClass; |
1867 | if (BitWidth <= 32) |
1868 | return &AMDGPU::SReg_32RegClass; |
1869 | if (BitWidth <= 64) |
1870 | return &AMDGPU::SReg_64RegClass; |
1871 | if (BitWidth <= 96) |
1872 | return &AMDGPU::SGPR_96RegClass; |
1873 | if (BitWidth <= 128) |
1874 | return &AMDGPU::SGPR_128RegClass; |
1875 | if (BitWidth <= 160) |
1876 | return &AMDGPU::SGPR_160RegClass; |
1877 | if (BitWidth <= 192) |
1878 | return &AMDGPU::SGPR_192RegClass; |
1879 | if (BitWidth <= 256) |
1880 | return &AMDGPU::SGPR_256RegClass; |
1881 | if (BitWidth <= 512) |
1882 | return &AMDGPU::SGPR_512RegClass; |
1883 | if (BitWidth <= 1024) |
1884 | return &AMDGPU::SGPR_1024RegClass; |
1885 | |
1886 | return nullptr; |
1887 | } |
1888 | |
1889 | // FIXME: This is very slow. It might be worth creating a map from physreg to |
1890 | // register class. |
1891 | const TargetRegisterClass * |
1892 | SIRegisterInfo::getPhysRegClass(MCRegister Reg) const { |
1893 | static const TargetRegisterClass *const BaseClasses[] = { |
1894 | &AMDGPU::VGPR_LO16RegClass, |
1895 | &AMDGPU::VGPR_HI16RegClass, |
1896 | &AMDGPU::SReg_LO16RegClass, |
1897 | &AMDGPU::AGPR_LO16RegClass, |
1898 | &AMDGPU::VGPR_32RegClass, |
1899 | &AMDGPU::SReg_32RegClass, |
1900 | &AMDGPU::AGPR_32RegClass, |
1901 | &AMDGPU::AGPR_32RegClass, |
1902 | &AMDGPU::VReg_64_Align2RegClass, |
1903 | &AMDGPU::VReg_64RegClass, |
1904 | &AMDGPU::SReg_64RegClass, |
1905 | &AMDGPU::AReg_64_Align2RegClass, |
1906 | &AMDGPU::AReg_64RegClass, |
1907 | &AMDGPU::VReg_96_Align2RegClass, |
1908 | &AMDGPU::VReg_96RegClass, |
1909 | &AMDGPU::SReg_96RegClass, |
1910 | &AMDGPU::AReg_96_Align2RegClass, |
1911 | &AMDGPU::AReg_96RegClass, |
1912 | &AMDGPU::VReg_128_Align2RegClass, |
1913 | &AMDGPU::VReg_128RegClass, |
1914 | &AMDGPU::SReg_128RegClass, |
1915 | &AMDGPU::AReg_128_Align2RegClass, |
1916 | &AMDGPU::AReg_128RegClass, |
1917 | &AMDGPU::VReg_160_Align2RegClass, |
1918 | &AMDGPU::VReg_160RegClass, |
1919 | &AMDGPU::SReg_160RegClass, |
1920 | &AMDGPU::AReg_160_Align2RegClass, |
1921 | &AMDGPU::AReg_160RegClass, |
1922 | &AMDGPU::VReg_192_Align2RegClass, |
1923 | &AMDGPU::VReg_192RegClass, |
1924 | &AMDGPU::SReg_192RegClass, |
1925 | &AMDGPU::AReg_192_Align2RegClass, |
1926 | &AMDGPU::AReg_192RegClass, |
1927 | &AMDGPU::VReg_256_Align2RegClass, |
1928 | &AMDGPU::VReg_256RegClass, |
1929 | &AMDGPU::SReg_256RegClass, |
1930 | &AMDGPU::AReg_256_Align2RegClass, |
1931 | &AMDGPU::AReg_256RegClass, |
1932 | &AMDGPU::VReg_512_Align2RegClass, |
1933 | &AMDGPU::VReg_512RegClass, |
1934 | &AMDGPU::SReg_512RegClass, |
1935 | &AMDGPU::AReg_512_Align2RegClass, |
1936 | &AMDGPU::AReg_512RegClass, |
1937 | &AMDGPU::SReg_1024RegClass, |
1938 | &AMDGPU::VReg_1024_Align2RegClass, |
1939 | &AMDGPU::VReg_1024RegClass, |
1940 | &AMDGPU::AReg_1024_Align2RegClass, |
1941 | &AMDGPU::AReg_1024RegClass, |
1942 | &AMDGPU::SCC_CLASSRegClass, |
1943 | &AMDGPU::Pseudo_SReg_32RegClass, |
1944 | &AMDGPU::Pseudo_SReg_128RegClass, |
1945 | }; |
1946 | |
1947 | for (const TargetRegisterClass *BaseClass : BaseClasses) { |
1948 | if (BaseClass->contains(Reg)) { |
1949 | return BaseClass; |
1950 | } |
1951 | } |
1952 | return nullptr; |
1953 | } |
1954 | |
1955 | bool SIRegisterInfo::isSGPRReg(const MachineRegisterInfo &MRI, |
1956 | Register Reg) const { |
1957 | const TargetRegisterClass *RC; |
1958 | if (Reg.isVirtual()) |
1959 | RC = MRI.getRegClass(Reg); |
1960 | else |
1961 | RC = getPhysRegClass(Reg); |
1962 | return isSGPRClass(RC); |
1963 | } |
1964 | |
1965 | // TODO: It might be helpful to have some target specific flags in |
1966 | // TargetRegisterClass to mark which classes are VGPRs to make this trivial. |
1967 | bool SIRegisterInfo::hasVGPRs(const TargetRegisterClass *RC) const { |
1968 | unsigned Size = getRegSizeInBits(*RC); |
1969 | if (Size == 16) { |
1970 | return getCommonSubClass(&AMDGPU::VGPR_LO16RegClass, RC) != nullptr || |
1971 | getCommonSubClass(&AMDGPU::VGPR_HI16RegClass, RC) != nullptr; |
1972 | } |
1973 | const TargetRegisterClass *VRC = getVGPRClassForBitWidth(Size); |
1974 | if (!VRC) { |
1975 | assert(Size < 32 && "Invalid register class size")((Size < 32 && "Invalid register class size") ? static_cast <void> (0) : __assert_fail ("Size < 32 && \"Invalid register class size\"" , "/build/llvm-toolchain-snapshot-13~++20210405022414+5f57793c4fe4/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp" , 1975, __PRETTY_FUNCTION__)); |
1976 | return false; |
1977 | } |
1978 | return getCommonSubClass(VRC, RC) != nullptr; |
1979 | } |
1980 | |
1981 | bool SIRegisterInfo::hasAGPRs(const TargetRegisterClass *RC) const { |
1982 | unsigned Size = getRegSizeInBits(*RC); |
1983 | if (Size < 16) |
1984 | return false; |
1985 | const TargetRegisterClass *ARC = getAGPRClassForBitWidth(Size); |
1986 | if (!ARC) { |
1987 | assert(getVGPRClassForBitWidth(Size) && "Invalid register class size")((getVGPRClassForBitWidth(Size) && "Invalid register class size" ) ? static_cast<void> (0) : __assert_fail ("getVGPRClassForBitWidth(Size) && \"Invalid register class size\"" , "/build/llvm-toolchain-snapshot-13~++20210405022414+5f57793c4fe4/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp" , 1987, __PRETTY_FUNCTION__)); |
1988 | return false; |
1989 | } |
1990 | return getCommonSubClass(ARC, RC) != nullptr; |
1991 | } |
1992 | |
1993 | const TargetRegisterClass * |
1994 | SIRegisterInfo::getEquivalentVGPRClass(const TargetRegisterClass *SRC) const { |
1995 | unsigned Size = getRegSizeInBits(*SRC); |
1996 | const TargetRegisterClass *VRC = getVGPRClassForBitWidth(Size); |
1997 | assert(VRC && "Invalid register class size")((VRC && "Invalid register class size") ? static_cast <void> (0) : __assert_fail ("VRC && \"Invalid register class size\"" , "/build/llvm-toolchain-snapshot-13~++20210405022414+5f57793c4fe4/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp" , 1997, __PRETTY_FUNCTION__)); |
1998 | return VRC; |
1999 | } |
2000 | |
2001 | const TargetRegisterClass * |
2002 | SIRegisterInfo::getEquivalentAGPRClass(const TargetRegisterClass *SRC) const { |
2003 | unsigned Size = getRegSizeInBits(*SRC); |
2004 | const TargetRegisterClass *ARC = getAGPRClassForBitWidth(Size); |
2005 | assert(ARC && "Invalid register class size")((ARC && "Invalid register class size") ? static_cast <void> (0) : __assert_fail ("ARC && \"Invalid register class size\"" , "/build/llvm-toolchain-snapshot-13~++20210405022414+5f57793c4fe4/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp" , 2005, __PRETTY_FUNCTION__)); |
2006 | return ARC; |
2007 | } |
2008 | |
2009 | const TargetRegisterClass * |
2010 | SIRegisterInfo::getEquivalentSGPRClass(const TargetRegisterClass *VRC) const { |
2011 | unsigned Size = getRegSizeInBits(*VRC); |
2012 | if (Size == 32) |
2013 | return &AMDGPU::SGPR_32RegClass; |
2014 | const TargetRegisterClass *SRC = getSGPRClassForBitWidth(Size); |
2015 | assert(SRC && "Invalid register class size")((SRC && "Invalid register class size") ? static_cast <void> (0) : __assert_fail ("SRC && \"Invalid register class size\"" , "/build/llvm-toolchain-snapshot-13~++20210405022414+5f57793c4fe4/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp" , 2015, __PRETTY_FUNCTION__)); |
2016 | return SRC; |
2017 | } |
2018 | |
2019 | const TargetRegisterClass *SIRegisterInfo::getSubRegClass( |
2020 | const TargetRegisterClass *RC, unsigned SubIdx) const { |
2021 | if (SubIdx == AMDGPU::NoSubRegister) |
2022 | return RC; |
2023 | |
2024 | // We can assume that each lane corresponds to one 32-bit register. |
2025 | unsigned Size = getNumChannelsFromSubReg(SubIdx) * 32; |
2026 | if (isSGPRClass(RC)) { |
2027 | if (Size == 32) |
2028 | RC = &AMDGPU::SGPR_32RegClass; |
2029 | else |
2030 | RC = getSGPRClassForBitWidth(Size); |
2031 | } else if (hasAGPRs(RC)) { |
2032 | RC = getAGPRClassForBitWidth(Size); |
2033 | } else { |
2034 | RC = getVGPRClassForBitWidth(Size); |
2035 | } |
2036 | assert(RC && "Invalid sub-register class size")((RC && "Invalid sub-register class size") ? static_cast <void> (0) : __assert_fail ("RC && \"Invalid sub-register class size\"" , "/build/llvm-toolchain-snapshot-13~++20210405022414+5f57793c4fe4/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp" , 2036, __PRETTY_FUNCTION__)); |
2037 | return RC; |
2038 | } |
2039 | |
2040 | const TargetRegisterClass * |
2041 | SIRegisterInfo::getCompatibleSubRegClass(const TargetRegisterClass *SuperRC, |
2042 | const TargetRegisterClass *SubRC, |
2043 | unsigned SubIdx) const { |
2044 | // Ensure this subregister index is aligned in the super register. |
2045 | const TargetRegisterClass *MatchRC = |
2046 | getMatchingSuperRegClass(SuperRC, SubRC, SubIdx); |
2047 | return MatchRC && MatchRC->hasSubClassEq(SuperRC) ? MatchRC : nullptr; |
2048 | } |
2049 | |
2050 | bool SIRegisterInfo::opCanUseInlineConstant(unsigned OpType) const { |
2051 | if (OpType >= AMDGPU::OPERAND_REG_INLINE_AC_FIRST && |
2052 | OpType <= AMDGPU::OPERAND_REG_INLINE_AC_LAST) |
2053 | return !ST.hasMFMAInlineLiteralBug(); |
2054 | |
2055 | return OpType >= AMDGPU::OPERAND_SRC_FIRST && |
2056 | OpType <= AMDGPU::OPERAND_SRC_LAST; |
2057 | } |
2058 | |
2059 | bool SIRegisterInfo::shouldRewriteCopySrc( |
2060 | const TargetRegisterClass *DefRC, |
2061 | unsigned DefSubReg, |
2062 | const TargetRegisterClass *SrcRC, |
2063 | unsigned SrcSubReg) const { |
2064 | // We want to prefer the smallest register class possible, so we don't want to |
2065 | // stop and rewrite on anything that looks like a subregister |
2066 | // extract. Operations mostly don't care about the super register class, so we |
2067 | // only want to stop on the most basic of copies between the same register |
2068 | // class. |
2069 | // |
2070 | // e.g. if we have something like |
2071 | // %0 = ... |
2072 | // %1 = ... |
2073 | // %2 = REG_SEQUENCE %0, sub0, %1, sub1, %2, sub2 |
2074 | // %3 = COPY %2, sub0 |
2075 | // |
2076 | // We want to look through the COPY to find: |
2077 | // => %3 = COPY %0 |
2078 | |
2079 | // Plain copy. |
2080 | return getCommonSubClass(DefRC, SrcRC) != nullptr; |
2081 | } |
2082 | |
2083 | bool SIRegisterInfo::opCanUseLiteralConstant(unsigned OpType) const { |
2084 | // TODO: 64-bit operands have extending behavior from 32-bit literal. |
2085 | return OpType >= AMDGPU::OPERAND_REG_IMM_FIRST && |
2086 | OpType <= AMDGPU::OPERAND_REG_IMM_LAST; |
2087 | } |
2088 | |
2089 | /// Returns a lowest register that is not used at any point in the function. |
2090 | /// If all registers are used, then this function will return |
2091 | /// AMDGPU::NoRegister. If \p ReserveHighestVGPR = true, then return |
2092 | /// highest unused register. |
2093 | MCRegister SIRegisterInfo::findUnusedRegister(const MachineRegisterInfo &MRI, |
2094 | const TargetRegisterClass *RC, |
2095 | const MachineFunction &MF, |
2096 | bool ReserveHighestVGPR) const { |
2097 | if (ReserveHighestVGPR) { |
2098 | for (MCRegister Reg : reverse(*RC)) |
2099 | if (MRI.isAllocatable(Reg) && !MRI.isPhysRegUsed(Reg)) |
2100 | return Reg; |
2101 | } else { |
2102 | for (MCRegister Reg : *RC) |
2103 | if (MRI.isAllocatable(Reg) && !MRI.isPhysRegUsed(Reg)) |
2104 | return Reg; |
2105 | } |
2106 | return MCRegister(); |
2107 | } |
2108 | |
2109 | ArrayRef<int16_t> SIRegisterInfo::getRegSplitParts(const TargetRegisterClass *RC, |
2110 | unsigned EltSize) const { |
2111 | const unsigned RegBitWidth = AMDGPU::getRegBitWidth(*RC->MC); |
2112 | assert(RegBitWidth >= 32 && RegBitWidth <= 1024)((RegBitWidth >= 32 && RegBitWidth <= 1024) ? static_cast <void> (0) : __assert_fail ("RegBitWidth >= 32 && RegBitWidth <= 1024" , "/build/llvm-toolchain-snapshot-13~++20210405022414+5f57793c4fe4/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp" , 2112, __PRETTY_FUNCTION__)); |
2113 | |
2114 | const unsigned RegDWORDs = RegBitWidth / 32; |
2115 | const unsigned EltDWORDs = EltSize / 4; |
2116 | assert(RegSplitParts.size() + 1 >= EltDWORDs)((RegSplitParts.size() + 1 >= EltDWORDs) ? static_cast< void> (0) : __assert_fail ("RegSplitParts.size() + 1 >= EltDWORDs" , "/build/llvm-toolchain-snapshot-13~++20210405022414+5f57793c4fe4/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp" , 2116, __PRETTY_FUNCTION__)); |
2117 | |
2118 | const std::vector<int16_t> &Parts = RegSplitParts[EltDWORDs - 1]; |
2119 | const unsigned NumParts = RegDWORDs / EltDWORDs; |
2120 | |
2121 | return makeArrayRef(Parts.data(), NumParts); |
2122 | } |
2123 | |
2124 | const TargetRegisterClass* |
2125 | SIRegisterInfo::getRegClassForReg(const MachineRegisterInfo &MRI, |
2126 | Register Reg) const { |
2127 | return Reg.isVirtual() ? MRI.getRegClass(Reg) : getPhysRegClass(Reg); |
2128 | } |
2129 | |
2130 | bool SIRegisterInfo::isVGPR(const MachineRegisterInfo &MRI, |
2131 | Register Reg) const { |
2132 | const TargetRegisterClass *RC = getRegClassForReg(MRI, Reg); |
2133 | // Registers without classes are unaddressable, SGPR-like registers. |
2134 | return RC && hasVGPRs(RC); |
2135 | } |
2136 | |
2137 | bool SIRegisterInfo::isAGPR(const MachineRegisterInfo &MRI, |
2138 | Register Reg) const { |
2139 | const TargetRegisterClass *RC = getRegClassForReg(MRI, Reg); |
2140 | |
2141 | // Registers without classes are unaddressable, SGPR-like registers. |
2142 | return RC && hasAGPRs(RC); |
2143 | } |
2144 | |
2145 | bool SIRegisterInfo::shouldCoalesce(MachineInstr *MI, |
2146 | const TargetRegisterClass *SrcRC, |
2147 | unsigned SubReg, |
2148 | const TargetRegisterClass *DstRC, |
2149 | unsigned DstSubReg, |
2150 | const TargetRegisterClass *NewRC, |
2151 | LiveIntervals &LIS) const { |
2152 | unsigned SrcSize = getRegSizeInBits(*SrcRC); |
2153 | unsigned DstSize = getRegSizeInBits(*DstRC); |
2154 | unsigned NewSize = getRegSizeInBits(*NewRC); |
2155 | |
2156 | // Do not increase size of registers beyond dword, we would need to allocate |
2157 | // adjacent registers and constraint regalloc more than needed. |
2158 | |
2159 | // Always allow dword coalescing. |
2160 | if (SrcSize <= 32 || DstSize <= 32) |
2161 | return true; |
2162 | |
2163 | return NewSize <= DstSize || NewSize <= SrcSize; |
2164 | } |
2165 | |
2166 | unsigned SIRegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC, |
2167 | MachineFunction &MF) const { |
2168 | const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); |
2169 | |
2170 | unsigned Occupancy = ST.getOccupancyWithLocalMemSize(MFI->getLDSSize(), |
2171 | MF.getFunction()); |
2172 | switch (RC->getID()) { |
2173 | default: |
2174 | return AMDGPUGenRegisterInfo::getRegPressureLimit(RC, MF); |
2175 | case AMDGPU::VGPR_32RegClassID: |
2176 | case AMDGPU::VGPR_LO16RegClassID: |
2177 | case AMDGPU::VGPR_HI16RegClassID: |
2178 | return std::min(ST.getMaxNumVGPRs(Occupancy), ST.getMaxNumVGPRs(MF)); |
2179 | case AMDGPU::SGPR_32RegClassID: |
2180 | case AMDGPU::SGPR_LO16RegClassID: |
2181 | return std::min(ST.getMaxNumSGPRs(Occupancy, true), ST.getMaxNumSGPRs(MF)); |
2182 | } |
2183 | } |
2184 | |
2185 | unsigned SIRegisterInfo::getRegPressureSetLimit(const MachineFunction &MF, |
2186 | unsigned Idx) const { |
2187 | if (Idx == AMDGPU::RegisterPressureSets::VGPR_32 || |
2188 | Idx == AMDGPU::RegisterPressureSets::AGPR_32) |
2189 | return getRegPressureLimit(&AMDGPU::VGPR_32RegClass, |
2190 | const_cast<MachineFunction &>(MF)); |
2191 | |
2192 | if (Idx == AMDGPU::RegisterPressureSets::SReg_32) |
2193 | return getRegPressureLimit(&AMDGPU::SGPR_32RegClass, |
2194 | const_cast<MachineFunction &>(MF)); |
2195 | |
2196 | llvm_unreachable("Unexpected register pressure set!")::llvm::llvm_unreachable_internal("Unexpected register pressure set!" , "/build/llvm-toolchain-snapshot-13~++20210405022414+5f57793c4fe4/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp" , 2196); |
2197 | } |
2198 | |
2199 | const int *SIRegisterInfo::getRegUnitPressureSets(unsigned RegUnit) const { |
2200 | static const int Empty[] = { -1 }; |
2201 | |
2202 | if (RegPressureIgnoredUnits[RegUnit]) |
2203 | return Empty; |
2204 | |
2205 | return AMDGPUGenRegisterInfo::getRegUnitPressureSets(RegUnit); |
2206 | } |
2207 | |
2208 | MCRegister SIRegisterInfo::getReturnAddressReg(const MachineFunction &MF) const { |
2209 | // Not a callee saved register. |
2210 | return AMDGPU::SGPR30_SGPR31; |
2211 | } |
2212 | |
2213 | const TargetRegisterClass * |
2214 | SIRegisterInfo::getRegClassForSizeOnBank(unsigned Size, |
2215 | const RegisterBank &RB, |
2216 | const MachineRegisterInfo &MRI) const { |
2217 | switch (RB.getID()) { |
2218 | case AMDGPU::VGPRRegBankID: |
2219 | return getVGPRClassForBitWidth(std::max(32u, Size)); |
2220 | case AMDGPU::VCCRegBankID: |
2221 | assert(Size == 1)((Size == 1) ? static_cast<void> (0) : __assert_fail ("Size == 1" , "/build/llvm-toolchain-snapshot-13~++20210405022414+5f57793c4fe4/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp" , 2221, __PRETTY_FUNCTION__)); |
2222 | return isWave32 ? &AMDGPU::SReg_32_XM0_XEXECRegClass |
2223 | : &AMDGPU::SReg_64_XEXECRegClass; |
2224 | case AMDGPU::SGPRRegBankID: |
2225 | return getSGPRClassForBitWidth(std::max(32u, Size)); |
2226 | case AMDGPU::AGPRRegBankID: |
2227 | return getAGPRClassForBitWidth(std::max(32u, Size)); |
2228 | default: |
2229 | llvm_unreachable("unknown register bank")::llvm::llvm_unreachable_internal("unknown register bank", "/build/llvm-toolchain-snapshot-13~++20210405022414+5f57793c4fe4/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp" , 2229); |
2230 | } |
2231 | } |
2232 | |
2233 | const TargetRegisterClass * |
2234 | SIRegisterInfo::getConstrainedRegClassForOperand(const MachineOperand &MO, |
2235 | const MachineRegisterInfo &MRI) const { |
2236 | const RegClassOrRegBank &RCOrRB = MRI.getRegClassOrRegBank(MO.getReg()); |
2237 | if (const RegisterBank *RB = RCOrRB.dyn_cast<const RegisterBank*>()) |
2238 | return getRegClassForTypeOnBank(MRI.getType(MO.getReg()), *RB, MRI); |
2239 | |
2240 | const TargetRegisterClass *RC = RCOrRB.get<const TargetRegisterClass*>(); |
2241 | return getAllocatableClass(RC); |
2242 | } |
2243 | |
2244 | MCRegister SIRegisterInfo::getVCC() const { |
2245 | return isWave32 ? AMDGPU::VCC_LO : AMDGPU::VCC; |
2246 | } |
2247 | |
2248 | const TargetRegisterClass *SIRegisterInfo::getVGPR64Class() const { |
2249 | // VGPR tuples have an alignment requirement on gfx90a variants. |
2250 | return ST.needsAlignedVGPRs() ? &AMDGPU::VReg_64_Align2RegClass |
2251 | : &AMDGPU::VReg_64RegClass; |
2252 | } |
2253 | |
2254 | const TargetRegisterClass * |
2255 | SIRegisterInfo::getRegClass(unsigned RCID) const { |
2256 | switch ((int)RCID) { |
2257 | case AMDGPU::SReg_1RegClassID: |
2258 | return getBoolRC(); |
2259 | case AMDGPU::SReg_1_XEXECRegClassID: |
2260 | return isWave32 ? &AMDGPU::SReg_32_XM0_XEXECRegClass |
2261 | : &AMDGPU::SReg_64_XEXECRegClass; |
2262 | case -1: |
2263 | return nullptr; |
2264 | default: |
2265 | return AMDGPUGenRegisterInfo::getRegClass(RCID); |
2266 | } |
2267 | } |
2268 | |
2269 | // Find reaching register definition |
2270 | MachineInstr *SIRegisterInfo::findReachingDef(Register Reg, unsigned SubReg, |
2271 | MachineInstr &Use, |
2272 | MachineRegisterInfo &MRI, |
2273 | LiveIntervals *LIS) const { |
2274 | auto &MDT = LIS->getAnalysis<MachineDominatorTree>(); |
2275 | SlotIndex UseIdx = LIS->getInstructionIndex(Use); |
2276 | SlotIndex DefIdx; |
2277 | |
2278 | if (Reg.isVirtual()) { |
2279 | if (!LIS->hasInterval(Reg)) |
2280 | return nullptr; |
2281 | LiveInterval &LI = LIS->getInterval(Reg); |
2282 | LaneBitmask SubLanes = SubReg ? getSubRegIndexLaneMask(SubReg) |
2283 | : MRI.getMaxLaneMaskForVReg(Reg); |
2284 | VNInfo *V = nullptr; |
2285 | if (LI.hasSubRanges()) { |
2286 | for (auto &S : LI.subranges()) { |
2287 | if ((S.LaneMask & SubLanes) == SubLanes) { |
2288 | V = S.getVNInfoAt(UseIdx); |
2289 | break; |
2290 | } |
2291 | } |
2292 | } else { |
2293 | V = LI.getVNInfoAt(UseIdx); |
2294 | } |
2295 | if (!V) |
2296 | return nullptr; |
2297 | DefIdx = V->def; |
2298 | } else { |
2299 | // Find last def. |
2300 | for (MCRegUnitIterator Units(Reg.asMCReg(), this); Units.isValid(); |
2301 | ++Units) { |
2302 | LiveRange &LR = LIS->getRegUnit(*Units); |
2303 | if (VNInfo *V = LR.getVNInfoAt(UseIdx)) { |
2304 | if (!DefIdx.isValid() || |
2305 | MDT.dominates(LIS->getInstructionFromIndex(DefIdx), |
2306 | LIS->getInstructionFromIndex(V->def))) |
2307 | DefIdx = V->def; |
2308 | } else { |
2309 | return nullptr; |
2310 | } |
2311 | } |
2312 | } |
2313 | |
2314 | MachineInstr *Def = LIS->getInstructionFromIndex(DefIdx); |
2315 | |
2316 | if (!Def || !MDT.dominates(Def, &Use)) |
2317 | return nullptr; |
2318 | |
2319 | assert(Def->modifiesRegister(Reg, this))((Def->modifiesRegister(Reg, this)) ? static_cast<void> (0) : __assert_fail ("Def->modifiesRegister(Reg, this)", "/build/llvm-toolchain-snapshot-13~++20210405022414+5f57793c4fe4/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp" , 2319, __PRETTY_FUNCTION__)); |
2320 | |
2321 | return Def; |
2322 | } |
2323 | |
2324 | MCPhysReg SIRegisterInfo::get32BitRegister(MCPhysReg Reg) const { |
2325 | assert(getRegSizeInBits(*getPhysRegClass(Reg)) <= 32)((getRegSizeInBits(*getPhysRegClass(Reg)) <= 32) ? static_cast <void> (0) : __assert_fail ("getRegSizeInBits(*getPhysRegClass(Reg)) <= 32" , "/build/llvm-toolchain-snapshot-13~++20210405022414+5f57793c4fe4/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp" , 2325, __PRETTY_FUNCTION__)); |
2326 | |
2327 | for (const TargetRegisterClass &RC : { AMDGPU::VGPR_32RegClass, |
2328 | AMDGPU::SReg_32RegClass, |
2329 | AMDGPU::AGPR_32RegClass } ) { |
2330 | if (MCPhysReg Super = getMatchingSuperReg(Reg, AMDGPU::lo16, &RC)) |
2331 | return Super; |
2332 | } |
2333 | if (MCPhysReg Super = getMatchingSuperReg(Reg, AMDGPU::hi16, |
2334 | &AMDGPU::VGPR_32RegClass)) { |
2335 | return Super; |
2336 | } |
2337 | |
2338 | return AMDGPU::NoRegister; |
2339 | } |
2340 | |
2341 | bool SIRegisterInfo::isProperlyAlignedRC(const TargetRegisterClass &RC) const { |
2342 | if (!ST.needsAlignedVGPRs()) |
2343 | return true; |
2344 | |
2345 | if (hasVGPRs(&RC)) |
2346 | return RC.hasSuperClassEq(getVGPRClassForBitWidth(getRegSizeInBits(RC))); |
2347 | if (hasAGPRs(&RC)) |
2348 | return RC.hasSuperClassEq(getAGPRClassForBitWidth(getRegSizeInBits(RC))); |
2349 | |
2350 | return true; |
2351 | } |
2352 | |
2353 | bool SIRegisterInfo::isConstantPhysReg(MCRegister PhysReg) const { |
2354 | switch (PhysReg) { |
2355 | case AMDGPU::SGPR_NULL: |
2356 | case AMDGPU::SRC_SHARED_BASE: |
2357 | case AMDGPU::SRC_PRIVATE_BASE: |
2358 | case AMDGPU::SRC_SHARED_LIMIT: |
2359 | case AMDGPU::SRC_PRIVATE_LIMIT: |
2360 | return true; |
2361 | default: |
2362 | return false; |
2363 | } |
2364 | } |
2365 | |
2366 | ArrayRef<MCPhysReg> |
2367 | SIRegisterInfo::getAllSGPR128(const MachineFunction &MF) const { |
2368 | return makeArrayRef(AMDGPU::SGPR_128RegClass.begin(), |
2369 | ST.getMaxNumSGPRs(MF) / 4); |
2370 | } |
2371 | |
2372 | ArrayRef<MCPhysReg> |
2373 | SIRegisterInfo::getAllSGPR64(const MachineFunction &MF) const { |
2374 | return makeArrayRef(AMDGPU::SGPR_64RegClass.begin(), |
2375 | ST.getMaxNumSGPRs(MF) / 2); |
2376 | } |
2377 | |
2378 | ArrayRef<MCPhysReg> |
2379 | SIRegisterInfo::getAllSGPR32(const MachineFunction &MF) const { |
2380 | return makeArrayRef(AMDGPU::SGPR_32RegClass.begin(), ST.getMaxNumSGPRs(MF)); |
2381 | } |