File: | llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp |
Warning: | line 1215, column 5 Value stored to 'Desc' is never read |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | //===-- SIRegisterInfo.cpp - SI Register Information ---------------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | /// \file |
10 | /// SI implementation of the TargetRegisterInfo class. |
11 | // |
12 | //===----------------------------------------------------------------------===// |
13 | |
14 | #include "SIRegisterInfo.h" |
15 | #include "AMDGPU.h" |
16 | #include "AMDGPURegisterBankInfo.h" |
17 | #include "GCNSubtarget.h" |
18 | #include "MCTargetDesc/AMDGPUInstPrinter.h" |
19 | #include "MCTargetDesc/AMDGPUMCTargetDesc.h" |
20 | #include "SIMachineFunctionInfo.h" |
21 | #include "llvm/CodeGen/LiveIntervals.h" |
22 | #include "llvm/CodeGen/MachineDominators.h" |
23 | #include "llvm/CodeGen/RegisterScavenging.h" |
24 | |
25 | using namespace llvm; |
26 | |
27 | #define GET_REGINFO_TARGET_DESC |
28 | #include "AMDGPUGenRegisterInfo.inc" |
29 | |
30 | static cl::opt<bool> EnableSpillSGPRToVGPR( |
31 | "amdgpu-spill-sgpr-to-vgpr", |
32 | cl::desc("Enable spilling VGPRs to SGPRs"), |
33 | cl::ReallyHidden, |
34 | cl::init(true)); |
35 | |
36 | std::array<std::vector<int16_t>, 16> SIRegisterInfo::RegSplitParts; |
37 | std::array<std::array<uint16_t, 32>, 9> SIRegisterInfo::SubRegFromChannelTable; |
38 | |
39 | // Map numbers of DWORDs to indexes in SubRegFromChannelTable. |
40 | // Valid indexes are shifted 1, such that a 0 mapping means unsupported. |
41 | // e.g. for 8 DWORDs (256-bit), SubRegFromChannelTableWidthMap[8] = 8, |
42 | // meaning index 7 in SubRegFromChannelTable. |
43 | static const std::array<unsigned, 17> SubRegFromChannelTableWidthMap = { |
44 | 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 0, 0, 0, 0, 0, 0, 9}; |
45 | |
46 | namespace llvm { |
47 | |
48 | // A temporary struct to spill SGPRs. |
49 | // This is mostly to spill SGPRs to memory. Spilling SGPRs into VGPR lanes emits |
50 | // just v_writelane and v_readlane. |
51 | // |
52 | // When spilling to memory, the SGPRs are written into VGPR lanes and the VGPR |
53 | // is saved to scratch (or the other way around for loads). |
54 | // For this, a VGPR is required where the needed lanes can be clobbered. The |
55 | // RegScavenger can provide a VGPR where currently active lanes can be |
56 | // clobbered, but we still need to save inactive lanes. |
57 | // The high-level steps are: |
58 | // - Try to scavenge SGPR(s) to save exec |
59 | // - Try to scavenge VGPR |
60 | // - Save needed, all or inactive lanes of a TmpVGPR |
61 | // - Spill/Restore SGPRs using TmpVGPR |
62 | // - Restore TmpVGPR |
63 | // |
64 | // To save all lanes of TmpVGPR, exec needs to be saved and modified. If we |
65 | // cannot scavenge temporary SGPRs to save exec, we use the following code: |
66 | // buffer_store_dword TmpVGPR ; only if active lanes need to be saved |
67 | // s_not exec, exec |
68 | // buffer_store_dword TmpVGPR ; save inactive lanes |
69 | // s_not exec, exec |
70 | struct SGPRSpillBuilder { |
71 | struct PerVGPRData { |
72 | unsigned PerVGPR; |
73 | unsigned NumVGPRs; |
74 | int64_t VGPRLanes; |
75 | }; |
76 | |
77 | // The SGPR to save |
78 | Register SuperReg; |
79 | MachineBasicBlock::iterator MI; |
80 | ArrayRef<int16_t> SplitParts; |
81 | unsigned NumSubRegs; |
82 | bool IsKill; |
83 | const DebugLoc &DL; |
84 | |
85 | /* When spilling to stack */ |
86 | // The SGPRs are written into this VGPR, which is then written to scratch |
87 | // (or vice versa for loads). |
88 | Register TmpVGPR = AMDGPU::NoRegister; |
89 | // Temporary spill slot to save TmpVGPR to. |
90 | int TmpVGPRIndex = 0; |
91 | // If TmpVGPR is live before the spill or if it is scavenged. |
92 | bool TmpVGPRLive = false; |
93 | // Scavenged SGPR to save EXEC. |
94 | Register SavedExecReg = AMDGPU::NoRegister; |
95 | // Stack index to write the SGPRs to. |
96 | int Index; |
97 | unsigned EltSize = 4; |
98 | |
99 | RegScavenger *RS; |
100 | MachineBasicBlock *MBB; |
101 | MachineFunction &MF; |
102 | SIMachineFunctionInfo &MFI; |
103 | const SIInstrInfo &TII; |
104 | const SIRegisterInfo &TRI; |
105 | bool IsWave32; |
106 | Register ExecReg; |
107 | unsigned MovOpc; |
108 | unsigned NotOpc; |
109 | |
110 | SGPRSpillBuilder(const SIRegisterInfo &TRI, const SIInstrInfo &TII, |
111 | bool IsWave32, MachineBasicBlock::iterator MI, int Index, |
112 | RegScavenger *RS) |
113 | : SGPRSpillBuilder(TRI, TII, IsWave32, MI, MI->getOperand(0).getReg(), |
114 | MI->getOperand(0).isKill(), Index, RS) {} |
115 | |
116 | SGPRSpillBuilder(const SIRegisterInfo &TRI, const SIInstrInfo &TII, |
117 | bool IsWave32, MachineBasicBlock::iterator MI, Register Reg, |
118 | bool IsKill, int Index, RegScavenger *RS) |
119 | : SuperReg(Reg), MI(MI), IsKill(IsKill), DL(MI->getDebugLoc()), |
120 | Index(Index), RS(RS), MBB(MI->getParent()), MF(*MBB->getParent()), |
121 | MFI(*MF.getInfo<SIMachineFunctionInfo>()), TII(TII), TRI(TRI), |
122 | IsWave32(IsWave32) { |
123 | const TargetRegisterClass *RC = TRI.getPhysRegClass(SuperReg); |
124 | SplitParts = TRI.getRegSplitParts(RC, EltSize); |
125 | NumSubRegs = SplitParts.empty() ? 1 : SplitParts.size(); |
126 | |
127 | if (IsWave32) { |
128 | ExecReg = AMDGPU::EXEC_LO; |
129 | MovOpc = AMDGPU::S_MOV_B32; |
130 | NotOpc = AMDGPU::S_NOT_B32; |
131 | } else { |
132 | ExecReg = AMDGPU::EXEC; |
133 | MovOpc = AMDGPU::S_MOV_B64; |
134 | NotOpc = AMDGPU::S_NOT_B64; |
135 | } |
136 | |
137 | assert(SuperReg != AMDGPU::M0 && "m0 should never spill")(static_cast <bool> (SuperReg != AMDGPU::M0 && "m0 should never spill" ) ? void (0) : __assert_fail ("SuperReg != AMDGPU::M0 && \"m0 should never spill\"" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 137, __extension__ __PRETTY_FUNCTION__)); |
138 | assert(SuperReg != AMDGPU::EXEC_LO && SuperReg != AMDGPU::EXEC_HI &&(static_cast <bool> (SuperReg != AMDGPU::EXEC_LO && SuperReg != AMDGPU::EXEC_HI && SuperReg != AMDGPU::EXEC && "exec should never spill") ? void (0) : __assert_fail ("SuperReg != AMDGPU::EXEC_LO && SuperReg != AMDGPU::EXEC_HI && SuperReg != AMDGPU::EXEC && \"exec should never spill\"" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 139, __extension__ __PRETTY_FUNCTION__)) |
139 | SuperReg != AMDGPU::EXEC && "exec should never spill")(static_cast <bool> (SuperReg != AMDGPU::EXEC_LO && SuperReg != AMDGPU::EXEC_HI && SuperReg != AMDGPU::EXEC && "exec should never spill") ? void (0) : __assert_fail ("SuperReg != AMDGPU::EXEC_LO && SuperReg != AMDGPU::EXEC_HI && SuperReg != AMDGPU::EXEC && \"exec should never spill\"" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 139, __extension__ __PRETTY_FUNCTION__)); |
140 | } |
141 | |
142 | PerVGPRData getPerVGPRData() { |
143 | PerVGPRData Data; |
144 | Data.PerVGPR = IsWave32 ? 32 : 64; |
145 | Data.NumVGPRs = (NumSubRegs + (Data.PerVGPR - 1)) / Data.PerVGPR; |
146 | Data.VGPRLanes = (1LL << std::min(Data.PerVGPR, NumSubRegs)) - 1LL; |
147 | return Data; |
148 | } |
149 | |
150 | // Tries to scavenge SGPRs to save EXEC and a VGPR. Uses v0 if no VGPR is |
151 | // free. |
152 | // Writes these instructions if an SGPR can be scavenged: |
153 | // s_mov_b64 s[6:7], exec ; Save exec |
154 | // s_mov_b64 exec, 3 ; Wanted lanemask |
155 | // buffer_store_dword v1 ; Write scavenged VGPR to emergency slot |
156 | // |
157 | // Writes these instructions if no SGPR can be scavenged: |
158 | // buffer_store_dword v0 ; Only if no free VGPR was found |
159 | // s_not_b64 exec, exec |
160 | // buffer_store_dword v0 ; Save inactive lanes |
161 | // ; exec stays inverted, it is flipped back in |
162 | // ; restore. |
163 | void prepare() { |
164 | // Scavenged temporary VGPR to use. It must be scavenged once for any number |
165 | // of spilled subregs. |
166 | // FIXME: The liveness analysis is limited and does not tell if a register |
167 | // is in use in lanes that are currently inactive. We can never be sure if |
168 | // a register as actually in use in another lane, so we need to save all |
169 | // used lanes of the chosen VGPR. |
170 | assert(RS && "Cannot spill SGPR to memory without RegScavenger")(static_cast <bool> (RS && "Cannot spill SGPR to memory without RegScavenger" ) ? void (0) : __assert_fail ("RS && \"Cannot spill SGPR to memory without RegScavenger\"" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 170, __extension__ __PRETTY_FUNCTION__)); |
171 | TmpVGPR = RS->scavengeRegister(&AMDGPU::VGPR_32RegClass, MI, 0, false); |
172 | |
173 | // Reserve temporary stack slot |
174 | TmpVGPRIndex = MFI.getScavengeFI(MF.getFrameInfo(), TRI); |
175 | if (TmpVGPR) { |
176 | // Found a register that is dead in the currently active lanes, we only |
177 | // need to spill inactive lanes. |
178 | TmpVGPRLive = false; |
179 | } else { |
180 | // Pick v0 because it doesn't make a difference. |
181 | TmpVGPR = AMDGPU::VGPR0; |
182 | TmpVGPRLive = true; |
183 | } |
184 | |
185 | // Try to scavenge SGPRs to save exec |
186 | assert(!SavedExecReg && "Exec is already saved, refuse to save again")(static_cast <bool> (!SavedExecReg && "Exec is already saved, refuse to save again" ) ? void (0) : __assert_fail ("!SavedExecReg && \"Exec is already saved, refuse to save again\"" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 186, __extension__ __PRETTY_FUNCTION__)); |
187 | const TargetRegisterClass &RC = |
188 | IsWave32 ? AMDGPU::SGPR_32RegClass : AMDGPU::SGPR_64RegClass; |
189 | RS->setRegUsed(SuperReg); |
190 | SavedExecReg = RS->scavengeRegister(&RC, MI, 0, false); |
191 | |
192 | int64_t VGPRLanes = getPerVGPRData().VGPRLanes; |
193 | |
194 | if (SavedExecReg) { |
195 | RS->setRegUsed(SavedExecReg); |
196 | // Set exec to needed lanes |
197 | BuildMI(*MBB, MI, DL, TII.get(MovOpc), SavedExecReg).addReg(ExecReg); |
198 | auto I = |
199 | BuildMI(*MBB, MI, DL, TII.get(MovOpc), ExecReg).addImm(VGPRLanes); |
200 | if (!TmpVGPRLive) |
201 | I.addReg(TmpVGPR, RegState::ImplicitDefine); |
202 | // Spill needed lanes |
203 | TRI.buildVGPRSpillLoadStore(*this, TmpVGPRIndex, 0, /*IsLoad*/ false); |
204 | } else { |
205 | // Spill active lanes |
206 | if (TmpVGPRLive) |
207 | TRI.buildVGPRSpillLoadStore(*this, TmpVGPRIndex, 0, /*IsLoad*/ false, |
208 | /*IsKill*/ false); |
209 | // Spill inactive lanes |
210 | auto I = BuildMI(*MBB, MI, DL, TII.get(NotOpc), ExecReg).addReg(ExecReg); |
211 | if (!TmpVGPRLive) |
212 | I.addReg(TmpVGPR, RegState::ImplicitDefine); |
213 | I->getOperand(2).setIsDead(true); // Mark SCC as dead. |
214 | TRI.buildVGPRSpillLoadStore(*this, TmpVGPRIndex, 0, /*IsLoad*/ false); |
215 | } |
216 | } |
217 | |
218 | // Writes these instructions if an SGPR can be scavenged: |
219 | // buffer_load_dword v1 ; Write scavenged VGPR to emergency slot |
220 | // s_waitcnt vmcnt(0) ; If a free VGPR was found |
221 | // s_mov_b64 exec, s[6:7] ; Save exec |
222 | // |
223 | // Writes these instructions if no SGPR can be scavenged: |
224 | // buffer_load_dword v0 ; Restore inactive lanes |
225 | // s_waitcnt vmcnt(0) ; If a free VGPR was found |
226 | // s_not_b64 exec, exec |
227 | // buffer_load_dword v0 ; Only if no free VGPR was found |
228 | void restore() { |
229 | if (SavedExecReg) { |
230 | // Restore used lanes |
231 | TRI.buildVGPRSpillLoadStore(*this, TmpVGPRIndex, 0, /*IsLoad*/ true, |
232 | /*IsKill*/ false); |
233 | // Restore exec |
234 | auto I = BuildMI(*MBB, MI, DL, TII.get(MovOpc), ExecReg) |
235 | .addReg(SavedExecReg, RegState::Kill); |
236 | // Add an implicit use of the load so it is not dead. |
237 | // FIXME This inserts an unnecessary waitcnt |
238 | if (!TmpVGPRLive) { |
239 | I.addReg(TmpVGPR, RegState::ImplicitKill); |
240 | } |
241 | } else { |
242 | // Restore inactive lanes |
243 | TRI.buildVGPRSpillLoadStore(*this, TmpVGPRIndex, 0, /*IsLoad*/ true, |
244 | /*IsKill*/ false); |
245 | auto I = BuildMI(*MBB, MI, DL, TII.get(NotOpc), ExecReg).addReg(ExecReg); |
246 | if (!TmpVGPRLive) |
247 | I.addReg(TmpVGPR, RegState::ImplicitKill); |
248 | I->getOperand(2).setIsDead(true); // Mark SCC as dead. |
249 | |
250 | // Restore active lanes |
251 | if (TmpVGPRLive) |
252 | TRI.buildVGPRSpillLoadStore(*this, TmpVGPRIndex, 0, /*IsLoad*/ true); |
253 | } |
254 | } |
255 | |
256 | // Write TmpVGPR to memory or read TmpVGPR from memory. |
257 | // Either using a single buffer_load/store if exec is set to the needed mask |
258 | // or using |
259 | // buffer_load |
260 | // s_not exec, exec |
261 | // buffer_load |
262 | // s_not exec, exec |
263 | void readWriteTmpVGPR(unsigned Offset, bool IsLoad) { |
264 | if (SavedExecReg) { |
265 | // Spill needed lanes |
266 | TRI.buildVGPRSpillLoadStore(*this, Index, Offset, IsLoad); |
267 | } else { |
268 | // Spill active lanes |
269 | TRI.buildVGPRSpillLoadStore(*this, Index, Offset, IsLoad, |
270 | /*IsKill*/ false); |
271 | // Spill inactive lanes |
272 | auto Not0 = BuildMI(*MBB, MI, DL, TII.get(NotOpc), ExecReg).addReg(ExecReg); |
273 | Not0->getOperand(2).setIsDead(); // Mark SCC as dead. |
274 | TRI.buildVGPRSpillLoadStore(*this, Index, Offset, IsLoad); |
275 | auto Not1 = BuildMI(*MBB, MI, DL, TII.get(NotOpc), ExecReg).addReg(ExecReg); |
276 | Not1->getOperand(2).setIsDead(); // Mark SCC as dead. |
277 | } |
278 | } |
279 | |
280 | void setMI(MachineBasicBlock *NewMBB, MachineBasicBlock::iterator NewMI) { |
281 | assert(MBB->getParent() == &MF)(static_cast <bool> (MBB->getParent() == &MF) ? void (0) : __assert_fail ("MBB->getParent() == &MF", "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp" , 281, __extension__ __PRETTY_FUNCTION__)); |
282 | MI = NewMI; |
283 | MBB = NewMBB; |
284 | } |
285 | }; |
286 | |
287 | } // namespace llvm |
288 | |
289 | SIRegisterInfo::SIRegisterInfo(const GCNSubtarget &ST) |
290 | : AMDGPUGenRegisterInfo(AMDGPU::PC_REG, ST.getAMDGPUDwarfFlavour()), ST(ST), |
291 | SpillSGPRToVGPR(EnableSpillSGPRToVGPR), isWave32(ST.isWave32()) { |
292 | |
293 | assert(getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() == 3 &&(static_cast <bool> (getSubRegIndexLaneMask(AMDGPU::sub0 ).getAsInteger() == 3 && getSubRegIndexLaneMask(AMDGPU ::sub31).getAsInteger() == (3ULL << 62) && (getSubRegIndexLaneMask (AMDGPU::lo16) | getSubRegIndexLaneMask(AMDGPU::hi16)).getAsInteger () == getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() && "getNumCoveredRegs() will not work with generated subreg masks!" ) ? void (0) : __assert_fail ("getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() == 3 && getSubRegIndexLaneMask(AMDGPU::sub31).getAsInteger() == (3ULL << 62) && (getSubRegIndexLaneMask(AMDGPU::lo16) | getSubRegIndexLaneMask(AMDGPU::hi16)).getAsInteger() == getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() && \"getNumCoveredRegs() will not work with generated subreg masks!\"" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 298, __extension__ __PRETTY_FUNCTION__)) |
294 | getSubRegIndexLaneMask(AMDGPU::sub31).getAsInteger() == (3ULL << 62) &&(static_cast <bool> (getSubRegIndexLaneMask(AMDGPU::sub0 ).getAsInteger() == 3 && getSubRegIndexLaneMask(AMDGPU ::sub31).getAsInteger() == (3ULL << 62) && (getSubRegIndexLaneMask (AMDGPU::lo16) | getSubRegIndexLaneMask(AMDGPU::hi16)).getAsInteger () == getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() && "getNumCoveredRegs() will not work with generated subreg masks!" ) ? void (0) : __assert_fail ("getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() == 3 && getSubRegIndexLaneMask(AMDGPU::sub31).getAsInteger() == (3ULL << 62) && (getSubRegIndexLaneMask(AMDGPU::lo16) | getSubRegIndexLaneMask(AMDGPU::hi16)).getAsInteger() == getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() && \"getNumCoveredRegs() will not work with generated subreg masks!\"" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 298, __extension__ __PRETTY_FUNCTION__)) |
295 | (getSubRegIndexLaneMask(AMDGPU::lo16) |(static_cast <bool> (getSubRegIndexLaneMask(AMDGPU::sub0 ).getAsInteger() == 3 && getSubRegIndexLaneMask(AMDGPU ::sub31).getAsInteger() == (3ULL << 62) && (getSubRegIndexLaneMask (AMDGPU::lo16) | getSubRegIndexLaneMask(AMDGPU::hi16)).getAsInteger () == getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() && "getNumCoveredRegs() will not work with generated subreg masks!" ) ? void (0) : __assert_fail ("getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() == 3 && getSubRegIndexLaneMask(AMDGPU::sub31).getAsInteger() == (3ULL << 62) && (getSubRegIndexLaneMask(AMDGPU::lo16) | getSubRegIndexLaneMask(AMDGPU::hi16)).getAsInteger() == getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() && \"getNumCoveredRegs() will not work with generated subreg masks!\"" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 298, __extension__ __PRETTY_FUNCTION__)) |
296 | getSubRegIndexLaneMask(AMDGPU::hi16)).getAsInteger() ==(static_cast <bool> (getSubRegIndexLaneMask(AMDGPU::sub0 ).getAsInteger() == 3 && getSubRegIndexLaneMask(AMDGPU ::sub31).getAsInteger() == (3ULL << 62) && (getSubRegIndexLaneMask (AMDGPU::lo16) | getSubRegIndexLaneMask(AMDGPU::hi16)).getAsInteger () == getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() && "getNumCoveredRegs() will not work with generated subreg masks!" ) ? void (0) : __assert_fail ("getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() == 3 && getSubRegIndexLaneMask(AMDGPU::sub31).getAsInteger() == (3ULL << 62) && (getSubRegIndexLaneMask(AMDGPU::lo16) | getSubRegIndexLaneMask(AMDGPU::hi16)).getAsInteger() == getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() && \"getNumCoveredRegs() will not work with generated subreg masks!\"" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 298, __extension__ __PRETTY_FUNCTION__)) |
297 | getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() &&(static_cast <bool> (getSubRegIndexLaneMask(AMDGPU::sub0 ).getAsInteger() == 3 && getSubRegIndexLaneMask(AMDGPU ::sub31).getAsInteger() == (3ULL << 62) && (getSubRegIndexLaneMask (AMDGPU::lo16) | getSubRegIndexLaneMask(AMDGPU::hi16)).getAsInteger () == getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() && "getNumCoveredRegs() will not work with generated subreg masks!" ) ? void (0) : __assert_fail ("getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() == 3 && getSubRegIndexLaneMask(AMDGPU::sub31).getAsInteger() == (3ULL << 62) && (getSubRegIndexLaneMask(AMDGPU::lo16) | getSubRegIndexLaneMask(AMDGPU::hi16)).getAsInteger() == getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() && \"getNumCoveredRegs() will not work with generated subreg masks!\"" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 298, __extension__ __PRETTY_FUNCTION__)) |
298 | "getNumCoveredRegs() will not work with generated subreg masks!")(static_cast <bool> (getSubRegIndexLaneMask(AMDGPU::sub0 ).getAsInteger() == 3 && getSubRegIndexLaneMask(AMDGPU ::sub31).getAsInteger() == (3ULL << 62) && (getSubRegIndexLaneMask (AMDGPU::lo16) | getSubRegIndexLaneMask(AMDGPU::hi16)).getAsInteger () == getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() && "getNumCoveredRegs() will not work with generated subreg masks!" ) ? void (0) : __assert_fail ("getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() == 3 && getSubRegIndexLaneMask(AMDGPU::sub31).getAsInteger() == (3ULL << 62) && (getSubRegIndexLaneMask(AMDGPU::lo16) | getSubRegIndexLaneMask(AMDGPU::hi16)).getAsInteger() == getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() && \"getNumCoveredRegs() will not work with generated subreg masks!\"" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 298, __extension__ __PRETTY_FUNCTION__)); |
299 | |
300 | RegPressureIgnoredUnits.resize(getNumRegUnits()); |
301 | RegPressureIgnoredUnits.set( |
302 | *MCRegUnitIterator(MCRegister::from(AMDGPU::M0), this)); |
303 | for (auto Reg : AMDGPU::VGPR_HI16RegClass) |
304 | RegPressureIgnoredUnits.set(*MCRegUnitIterator(Reg, this)); |
305 | |
306 | // HACK: Until this is fully tablegen'd. |
307 | static llvm::once_flag InitializeRegSplitPartsFlag; |
308 | |
309 | static auto InitializeRegSplitPartsOnce = [this]() { |
310 | for (unsigned Idx = 1, E = getNumSubRegIndices() - 1; Idx < E; ++Idx) { |
311 | unsigned Size = getSubRegIdxSize(Idx); |
312 | if (Size & 31) |
313 | continue; |
314 | std::vector<int16_t> &Vec = RegSplitParts[Size / 32 - 1]; |
315 | unsigned Pos = getSubRegIdxOffset(Idx); |
316 | if (Pos % Size) |
317 | continue; |
318 | Pos /= Size; |
319 | if (Vec.empty()) { |
320 | unsigned MaxNumParts = 1024 / Size; // Maximum register is 1024 bits. |
321 | Vec.resize(MaxNumParts); |
322 | } |
323 | Vec[Pos] = Idx; |
324 | } |
325 | }; |
326 | |
327 | static llvm::once_flag InitializeSubRegFromChannelTableFlag; |
328 | |
329 | static auto InitializeSubRegFromChannelTableOnce = [this]() { |
330 | for (auto &Row : SubRegFromChannelTable) |
331 | Row.fill(AMDGPU::NoSubRegister); |
332 | for (uint16_t Idx = 1; Idx < getNumSubRegIndices(); ++Idx) { |
333 | unsigned Width = AMDGPUSubRegIdxRanges[Idx].Size / 32; |
334 | unsigned Offset = AMDGPUSubRegIdxRanges[Idx].Offset / 32; |
335 | assert(Width < SubRegFromChannelTableWidthMap.size())(static_cast <bool> (Width < SubRegFromChannelTableWidthMap .size()) ? void (0) : __assert_fail ("Width < SubRegFromChannelTableWidthMap.size()" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 335, __extension__ __PRETTY_FUNCTION__)); |
336 | Width = SubRegFromChannelTableWidthMap[Width]; |
337 | if (Width == 0) |
338 | continue; |
339 | unsigned TableIdx = Width - 1; |
340 | assert(TableIdx < SubRegFromChannelTable.size())(static_cast <bool> (TableIdx < SubRegFromChannelTable .size()) ? void (0) : __assert_fail ("TableIdx < SubRegFromChannelTable.size()" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 340, __extension__ __PRETTY_FUNCTION__)); |
341 | assert(Offset < SubRegFromChannelTable[TableIdx].size())(static_cast <bool> (Offset < SubRegFromChannelTable [TableIdx].size()) ? void (0) : __assert_fail ("Offset < SubRegFromChannelTable[TableIdx].size()" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 341, __extension__ __PRETTY_FUNCTION__)); |
342 | SubRegFromChannelTable[TableIdx][Offset] = Idx; |
343 | } |
344 | }; |
345 | |
346 | llvm::call_once(InitializeRegSplitPartsFlag, InitializeRegSplitPartsOnce); |
347 | llvm::call_once(InitializeSubRegFromChannelTableFlag, |
348 | InitializeSubRegFromChannelTableOnce); |
349 | } |
350 | |
351 | void SIRegisterInfo::reserveRegisterTuples(BitVector &Reserved, |
352 | MCRegister Reg) const { |
353 | MCRegAliasIterator R(Reg, this, true); |
354 | |
355 | for (; R.isValid(); ++R) |
356 | Reserved.set(*R); |
357 | } |
358 | |
359 | // Forced to be here by one .inc |
360 | const MCPhysReg *SIRegisterInfo::getCalleeSavedRegs( |
361 | const MachineFunction *MF) const { |
362 | CallingConv::ID CC = MF->getFunction().getCallingConv(); |
363 | switch (CC) { |
364 | case CallingConv::C: |
365 | case CallingConv::Fast: |
366 | case CallingConv::Cold: |
367 | return MF->getSubtarget<GCNSubtarget>().hasGFX90AInsts() |
368 | ? CSR_AMDGPU_HighRegs_With_AGPRs_SaveList |
369 | : CSR_AMDGPU_HighRegs_SaveList; |
370 | case CallingConv::AMDGPU_Gfx: |
371 | return MF->getSubtarget<GCNSubtarget>().hasGFX90AInsts() |
372 | ? CSR_AMDGPU_SI_Gfx_With_AGPRs_SaveList |
373 | : CSR_AMDGPU_SI_Gfx_SaveList; |
374 | default: { |
375 | // Dummy to not crash RegisterClassInfo. |
376 | static const MCPhysReg NoCalleeSavedReg = AMDGPU::NoRegister; |
377 | return &NoCalleeSavedReg; |
378 | } |
379 | } |
380 | } |
381 | |
382 | const MCPhysReg * |
383 | SIRegisterInfo::getCalleeSavedRegsViaCopy(const MachineFunction *MF) const { |
384 | return nullptr; |
385 | } |
386 | |
387 | const uint32_t *SIRegisterInfo::getCallPreservedMask(const MachineFunction &MF, |
388 | CallingConv::ID CC) const { |
389 | switch (CC) { |
390 | case CallingConv::C: |
391 | case CallingConv::Fast: |
392 | case CallingConv::Cold: |
393 | return MF.getSubtarget<GCNSubtarget>().hasGFX90AInsts() |
394 | ? CSR_AMDGPU_HighRegs_With_AGPRs_RegMask |
395 | : CSR_AMDGPU_HighRegs_RegMask; |
396 | case CallingConv::AMDGPU_Gfx: |
397 | return MF.getSubtarget<GCNSubtarget>().hasGFX90AInsts() |
398 | ? CSR_AMDGPU_SI_Gfx_With_AGPRs_RegMask |
399 | : CSR_AMDGPU_SI_Gfx_RegMask; |
400 | default: |
401 | return nullptr; |
402 | } |
403 | } |
404 | |
405 | const uint32_t *SIRegisterInfo::getNoPreservedMask() const { |
406 | return CSR_AMDGPU_NoRegs_RegMask; |
407 | } |
408 | |
409 | const TargetRegisterClass * |
410 | SIRegisterInfo::getLargestLegalSuperClass(const TargetRegisterClass *RC, |
411 | const MachineFunction &MF) const { |
412 | // FIXME: Should have a helper function like getEquivalentVGPRClass to get the |
413 | // equivalent AV class. If used one, the verifier will crash after |
414 | // RegBankSelect in the GISel flow. The aligned regclasses are not fully given |
415 | // until Instruction selection. |
416 | if (MF.getSubtarget<GCNSubtarget>().hasMAIInsts() && |
417 | (isVGPRClass(RC) || isAGPRClass(RC))) { |
418 | if (RC == &AMDGPU::VGPR_32RegClass || RC == &AMDGPU::AGPR_32RegClass) |
419 | return &AMDGPU::AV_32RegClass; |
420 | if (RC == &AMDGPU::VReg_64RegClass || RC == &AMDGPU::AReg_64RegClass) |
421 | return &AMDGPU::AV_64RegClass; |
422 | if (RC == &AMDGPU::VReg_64_Align2RegClass || |
423 | RC == &AMDGPU::AReg_64_Align2RegClass) |
424 | return &AMDGPU::AV_64_Align2RegClass; |
425 | if (RC == &AMDGPU::VReg_96RegClass || RC == &AMDGPU::AReg_96RegClass) |
426 | return &AMDGPU::AV_96RegClass; |
427 | if (RC == &AMDGPU::VReg_96_Align2RegClass || |
428 | RC == &AMDGPU::AReg_96_Align2RegClass) |
429 | return &AMDGPU::AV_96_Align2RegClass; |
430 | if (RC == &AMDGPU::VReg_128RegClass || RC == &AMDGPU::AReg_128RegClass) |
431 | return &AMDGPU::AV_128RegClass; |
432 | if (RC == &AMDGPU::VReg_128_Align2RegClass || |
433 | RC == &AMDGPU::AReg_128_Align2RegClass) |
434 | return &AMDGPU::AV_128_Align2RegClass; |
435 | if (RC == &AMDGPU::VReg_160RegClass || RC == &AMDGPU::AReg_160RegClass) |
436 | return &AMDGPU::AV_160RegClass; |
437 | if (RC == &AMDGPU::VReg_160_Align2RegClass || |
438 | RC == &AMDGPU::AReg_160_Align2RegClass) |
439 | return &AMDGPU::AV_160_Align2RegClass; |
440 | if (RC == &AMDGPU::VReg_192RegClass || RC == &AMDGPU::AReg_192RegClass) |
441 | return &AMDGPU::AV_192RegClass; |
442 | if (RC == &AMDGPU::VReg_192_Align2RegClass || |
443 | RC == &AMDGPU::AReg_192_Align2RegClass) |
444 | return &AMDGPU::AV_192_Align2RegClass; |
445 | if (RC == &AMDGPU::VReg_256RegClass || RC == &AMDGPU::AReg_256RegClass) |
446 | return &AMDGPU::AV_256RegClass; |
447 | if (RC == &AMDGPU::VReg_256_Align2RegClass || |
448 | RC == &AMDGPU::AReg_256_Align2RegClass) |
449 | return &AMDGPU::AV_256_Align2RegClass; |
450 | if (RC == &AMDGPU::VReg_512RegClass || RC == &AMDGPU::AReg_512RegClass) |
451 | return &AMDGPU::AV_512RegClass; |
452 | if (RC == &AMDGPU::VReg_512_Align2RegClass || |
453 | RC == &AMDGPU::AReg_512_Align2RegClass) |
454 | return &AMDGPU::AV_512_Align2RegClass; |
455 | if (RC == &AMDGPU::VReg_1024RegClass || RC == &AMDGPU::AReg_1024RegClass) |
456 | return &AMDGPU::AV_1024RegClass; |
457 | if (RC == &AMDGPU::VReg_1024_Align2RegClass || |
458 | RC == &AMDGPU::AReg_1024_Align2RegClass) |
459 | return &AMDGPU::AV_1024_Align2RegClass; |
460 | } |
461 | |
462 | return TargetRegisterInfo::getLargestLegalSuperClass(RC, MF); |
463 | } |
464 | |
465 | Register SIRegisterInfo::getFrameRegister(const MachineFunction &MF) const { |
466 | const SIFrameLowering *TFI = |
467 | MF.getSubtarget<GCNSubtarget>().getFrameLowering(); |
468 | const SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>(); |
469 | // During ISel lowering we always reserve the stack pointer in entry |
470 | // functions, but never actually want to reference it when accessing our own |
471 | // frame. If we need a frame pointer we use it, but otherwise we can just use |
472 | // an immediate "0" which we represent by returning NoRegister. |
473 | if (FuncInfo->isEntryFunction()) { |
474 | return TFI->hasFP(MF) ? FuncInfo->getFrameOffsetReg() : Register(); |
475 | } |
476 | return TFI->hasFP(MF) ? FuncInfo->getFrameOffsetReg() |
477 | : FuncInfo->getStackPtrOffsetReg(); |
478 | } |
479 | |
480 | bool SIRegisterInfo::hasBasePointer(const MachineFunction &MF) const { |
481 | // When we need stack realignment, we can't reference off of the |
482 | // stack pointer, so we reserve a base pointer. |
483 | const MachineFrameInfo &MFI = MF.getFrameInfo(); |
484 | return MFI.getNumFixedObjects() && shouldRealignStack(MF); |
485 | } |
486 | |
487 | Register SIRegisterInfo::getBaseRegister() const { return AMDGPU::SGPR34; } |
488 | |
489 | const uint32_t *SIRegisterInfo::getAllVGPRRegMask() const { |
490 | return CSR_AMDGPU_AllVGPRs_RegMask; |
491 | } |
492 | |
493 | const uint32_t *SIRegisterInfo::getAllAGPRRegMask() const { |
494 | return CSR_AMDGPU_AllAGPRs_RegMask; |
495 | } |
496 | |
497 | const uint32_t *SIRegisterInfo::getAllVectorRegMask() const { |
498 | return CSR_AMDGPU_AllVectorRegs_RegMask; |
499 | } |
500 | |
501 | const uint32_t *SIRegisterInfo::getAllAllocatableSRegMask() const { |
502 | return CSR_AMDGPU_AllAllocatableSRegs_RegMask; |
503 | } |
504 | |
505 | unsigned SIRegisterInfo::getSubRegFromChannel(unsigned Channel, |
506 | unsigned NumRegs) { |
507 | assert(NumRegs < SubRegFromChannelTableWidthMap.size())(static_cast <bool> (NumRegs < SubRegFromChannelTableWidthMap .size()) ? void (0) : __assert_fail ("NumRegs < SubRegFromChannelTableWidthMap.size()" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 507, __extension__ __PRETTY_FUNCTION__)); |
508 | unsigned NumRegIndex = SubRegFromChannelTableWidthMap[NumRegs]; |
509 | assert(NumRegIndex && "Not implemented")(static_cast <bool> (NumRegIndex && "Not implemented" ) ? void (0) : __assert_fail ("NumRegIndex && \"Not implemented\"" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 509, __extension__ __PRETTY_FUNCTION__)); |
510 | assert(Channel < SubRegFromChannelTable[NumRegIndex - 1].size())(static_cast <bool> (Channel < SubRegFromChannelTable [NumRegIndex - 1].size()) ? void (0) : __assert_fail ("Channel < SubRegFromChannelTable[NumRegIndex - 1].size()" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 510, __extension__ __PRETTY_FUNCTION__)); |
511 | return SubRegFromChannelTable[NumRegIndex - 1][Channel]; |
512 | } |
513 | |
514 | MCRegister SIRegisterInfo::reservedPrivateSegmentBufferReg( |
515 | const MachineFunction &MF) const { |
516 | unsigned BaseIdx = alignDown(ST.getMaxNumSGPRs(MF), 4) - 4; |
517 | MCRegister BaseReg(AMDGPU::SGPR_32RegClass.getRegister(BaseIdx)); |
518 | return getMatchingSuperReg(BaseReg, AMDGPU::sub0, &AMDGPU::SGPR_128RegClass); |
519 | } |
520 | |
521 | BitVector SIRegisterInfo::getReservedRegs(const MachineFunction &MF) const { |
522 | BitVector Reserved(getNumRegs()); |
523 | Reserved.set(AMDGPU::MODE); |
524 | |
525 | // EXEC_LO and EXEC_HI could be allocated and used as regular register, but |
526 | // this seems likely to result in bugs, so I'm marking them as reserved. |
527 | reserveRegisterTuples(Reserved, AMDGPU::EXEC); |
528 | reserveRegisterTuples(Reserved, AMDGPU::FLAT_SCR); |
529 | |
530 | // M0 has to be reserved so that llvm accepts it as a live-in into a block. |
531 | reserveRegisterTuples(Reserved, AMDGPU::M0); |
532 | |
533 | // Reserve src_vccz, src_execz, src_scc. |
534 | reserveRegisterTuples(Reserved, AMDGPU::SRC_VCCZ); |
535 | reserveRegisterTuples(Reserved, AMDGPU::SRC_EXECZ); |
536 | reserveRegisterTuples(Reserved, AMDGPU::SRC_SCC); |
537 | |
538 | // Reserve the memory aperture registers. |
539 | reserveRegisterTuples(Reserved, AMDGPU::SRC_SHARED_BASE); |
540 | reserveRegisterTuples(Reserved, AMDGPU::SRC_SHARED_LIMIT); |
541 | reserveRegisterTuples(Reserved, AMDGPU::SRC_PRIVATE_BASE); |
542 | reserveRegisterTuples(Reserved, AMDGPU::SRC_PRIVATE_LIMIT); |
543 | |
544 | // Reserve src_pops_exiting_wave_id - support is not implemented in Codegen. |
545 | reserveRegisterTuples(Reserved, AMDGPU::SRC_POPS_EXITING_WAVE_ID); |
546 | |
547 | // Reserve xnack_mask registers - support is not implemented in Codegen. |
548 | reserveRegisterTuples(Reserved, AMDGPU::XNACK_MASK); |
549 | |
550 | // Reserve lds_direct register - support is not implemented in Codegen. |
551 | reserveRegisterTuples(Reserved, AMDGPU::LDS_DIRECT); |
552 | |
553 | // Reserve Trap Handler registers - support is not implemented in Codegen. |
554 | reserveRegisterTuples(Reserved, AMDGPU::TBA); |
555 | reserveRegisterTuples(Reserved, AMDGPU::TMA); |
556 | reserveRegisterTuples(Reserved, AMDGPU::TTMP0_TTMP1); |
557 | reserveRegisterTuples(Reserved, AMDGPU::TTMP2_TTMP3); |
558 | reserveRegisterTuples(Reserved, AMDGPU::TTMP4_TTMP5); |
559 | reserveRegisterTuples(Reserved, AMDGPU::TTMP6_TTMP7); |
560 | reserveRegisterTuples(Reserved, AMDGPU::TTMP8_TTMP9); |
561 | reserveRegisterTuples(Reserved, AMDGPU::TTMP10_TTMP11); |
562 | reserveRegisterTuples(Reserved, AMDGPU::TTMP12_TTMP13); |
563 | reserveRegisterTuples(Reserved, AMDGPU::TTMP14_TTMP15); |
564 | |
565 | // Reserve null register - it shall never be allocated |
566 | reserveRegisterTuples(Reserved, AMDGPU::SGPR_NULL); |
567 | |
568 | // Disallow vcc_hi allocation in wave32. It may be allocated but most likely |
569 | // will result in bugs. |
570 | if (isWave32) { |
571 | Reserved.set(AMDGPU::VCC); |
572 | Reserved.set(AMDGPU::VCC_HI); |
573 | } |
574 | |
575 | unsigned MaxNumSGPRs = ST.getMaxNumSGPRs(MF); |
576 | unsigned TotalNumSGPRs = AMDGPU::SGPR_32RegClass.getNumRegs(); |
577 | for (unsigned i = MaxNumSGPRs; i < TotalNumSGPRs; ++i) { |
578 | unsigned Reg = AMDGPU::SGPR_32RegClass.getRegister(i); |
579 | reserveRegisterTuples(Reserved, Reg); |
580 | } |
581 | |
582 | const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); |
583 | unsigned MaxNumVGPRs = ST.getMaxNumVGPRs(MF); |
584 | unsigned MaxNumAGPRs = MaxNumVGPRs; |
585 | unsigned TotalNumVGPRs = AMDGPU::VGPR_32RegClass.getNumRegs(); |
586 | |
587 | if (ST.hasGFX90AInsts()) { |
588 | // In an entry function without calls and AGPRs used it is possible to use |
589 | // the whole register budget for VGPRs. |
590 | |
591 | // TODO: it shall be possible to estimate maximum AGPR/VGPR pressure and |
592 | // split register file accordingly. |
593 | if (MFI->usesAGPRs(MF)) { |
594 | MaxNumVGPRs /= 2; |
595 | MaxNumAGPRs = MaxNumVGPRs; |
596 | } else { |
597 | if (MaxNumVGPRs > TotalNumVGPRs) { |
598 | MaxNumAGPRs = MaxNumVGPRs - TotalNumVGPRs; |
599 | MaxNumVGPRs = TotalNumVGPRs; |
600 | } else |
601 | MaxNumAGPRs = 0; |
602 | } |
603 | } |
604 | |
605 | for (unsigned i = MaxNumVGPRs; i < TotalNumVGPRs; ++i) { |
606 | unsigned Reg = AMDGPU::VGPR_32RegClass.getRegister(i); |
607 | reserveRegisterTuples(Reserved, Reg); |
608 | } |
609 | |
610 | for (unsigned i = MaxNumAGPRs; i < TotalNumVGPRs; ++i) { |
611 | unsigned Reg = AMDGPU::AGPR_32RegClass.getRegister(i); |
612 | reserveRegisterTuples(Reserved, Reg); |
613 | } |
614 | |
615 | for (auto Reg : AMDGPU::SReg_32RegClass) { |
616 | Reserved.set(getSubReg(Reg, AMDGPU::hi16)); |
617 | Register Low = getSubReg(Reg, AMDGPU::lo16); |
618 | // This is to prevent BB vcc liveness errors. |
619 | if (!AMDGPU::SGPR_LO16RegClass.contains(Low)) |
620 | Reserved.set(Low); |
621 | } |
622 | |
623 | for (auto Reg : AMDGPU::AGPR_32RegClass) { |
624 | Reserved.set(getSubReg(Reg, AMDGPU::hi16)); |
625 | } |
626 | |
627 | // Reserve all the rest AGPRs if there are no instructions to use it. |
628 | if (!ST.hasMAIInsts()) { |
629 | for (unsigned i = 0; i < MaxNumVGPRs; ++i) { |
630 | unsigned Reg = AMDGPU::AGPR_32RegClass.getRegister(i); |
631 | reserveRegisterTuples(Reserved, Reg); |
632 | } |
633 | } |
634 | |
635 | Register ScratchRSrcReg = MFI->getScratchRSrcReg(); |
636 | if (ScratchRSrcReg != AMDGPU::NoRegister) { |
637 | // Reserve 4 SGPRs for the scratch buffer resource descriptor in case we need |
638 | // to spill. |
639 | // TODO: May need to reserve a VGPR if doing LDS spilling. |
640 | reserveRegisterTuples(Reserved, ScratchRSrcReg); |
641 | } |
642 | |
643 | // We have to assume the SP is needed in case there are calls in the function, |
644 | // which is detected after the function is lowered. If we aren't really going |
645 | // to need SP, don't bother reserving it. |
646 | MCRegister StackPtrReg = MFI->getStackPtrOffsetReg(); |
647 | |
648 | if (StackPtrReg) { |
649 | reserveRegisterTuples(Reserved, StackPtrReg); |
650 | assert(!isSubRegister(ScratchRSrcReg, StackPtrReg))(static_cast <bool> (!isSubRegister(ScratchRSrcReg, StackPtrReg )) ? void (0) : __assert_fail ("!isSubRegister(ScratchRSrcReg, StackPtrReg)" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 650, __extension__ __PRETTY_FUNCTION__)); |
651 | } |
652 | |
653 | MCRegister FrameReg = MFI->getFrameOffsetReg(); |
654 | if (FrameReg) { |
655 | reserveRegisterTuples(Reserved, FrameReg); |
656 | assert(!isSubRegister(ScratchRSrcReg, FrameReg))(static_cast <bool> (!isSubRegister(ScratchRSrcReg, FrameReg )) ? void (0) : __assert_fail ("!isSubRegister(ScratchRSrcReg, FrameReg)" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 656, __extension__ __PRETTY_FUNCTION__)); |
657 | } |
658 | |
659 | if (hasBasePointer(MF)) { |
660 | MCRegister BasePtrReg = getBaseRegister(); |
661 | reserveRegisterTuples(Reserved, BasePtrReg); |
662 | assert(!isSubRegister(ScratchRSrcReg, BasePtrReg))(static_cast <bool> (!isSubRegister(ScratchRSrcReg, BasePtrReg )) ? void (0) : __assert_fail ("!isSubRegister(ScratchRSrcReg, BasePtrReg)" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 662, __extension__ __PRETTY_FUNCTION__)); |
663 | } |
664 | |
665 | for (auto Reg : MFI->WWMReservedRegs) { |
666 | reserveRegisterTuples(Reserved, Reg.first); |
667 | } |
668 | |
669 | // Reserve VGPRs used for SGPR spilling. |
670 | // Note we treat freezeReservedRegs unusually because we run register |
671 | // allocation in two phases. It's OK to re-freeze with new registers for the |
672 | // second run. |
673 | #if 0 |
674 | for (auto &SpilledFI : MFI->sgpr_spill_vgprs()) { |
675 | for (auto &SpilledVGPR : SpilledFI.second) |
676 | reserveRegisterTuples(Reserved, SpilledVGPR.VGPR); |
677 | } |
678 | #endif |
679 | |
680 | // FIXME: Stop using reserved registers for this. |
681 | for (MCPhysReg Reg : MFI->getAGPRSpillVGPRs()) |
682 | reserveRegisterTuples(Reserved, Reg); |
683 | |
684 | for (MCPhysReg Reg : MFI->getVGPRSpillAGPRs()) |
685 | reserveRegisterTuples(Reserved, Reg); |
686 | |
687 | for (auto SSpill : MFI->getSGPRSpillVGPRs()) |
688 | reserveRegisterTuples(Reserved, SSpill.VGPR); |
689 | |
690 | return Reserved; |
691 | } |
692 | |
693 | bool SIRegisterInfo::isAsmClobberable(const MachineFunction &MF, |
694 | MCRegister PhysReg) const { |
695 | return !MF.getRegInfo().isReserved(PhysReg); |
696 | } |
697 | |
698 | bool SIRegisterInfo::shouldRealignStack(const MachineFunction &MF) const { |
699 | const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); |
700 | // On entry, the base address is 0, so it can't possibly need any more |
701 | // alignment. |
702 | |
703 | // FIXME: Should be able to specify the entry frame alignment per calling |
704 | // convention instead. |
705 | if (Info->isEntryFunction()) |
706 | return false; |
707 | |
708 | return TargetRegisterInfo::shouldRealignStack(MF); |
709 | } |
710 | |
711 | bool SIRegisterInfo::requiresRegisterScavenging(const MachineFunction &Fn) const { |
712 | const SIMachineFunctionInfo *Info = Fn.getInfo<SIMachineFunctionInfo>(); |
713 | if (Info->isEntryFunction()) { |
714 | const MachineFrameInfo &MFI = Fn.getFrameInfo(); |
715 | return MFI.hasStackObjects() || MFI.hasCalls(); |
716 | } |
717 | |
718 | // May need scavenger for dealing with callee saved registers. |
719 | return true; |
720 | } |
721 | |
722 | bool SIRegisterInfo::requiresFrameIndexScavenging( |
723 | const MachineFunction &MF) const { |
724 | // Do not use frame virtual registers. They used to be used for SGPRs, but |
725 | // once we reach PrologEpilogInserter, we can no longer spill SGPRs. If the |
726 | // scavenger fails, we can increment/decrement the necessary SGPRs to avoid a |
727 | // spill. |
728 | return false; |
729 | } |
730 | |
731 | bool SIRegisterInfo::requiresFrameIndexReplacementScavenging( |
732 | const MachineFunction &MF) const { |
733 | const MachineFrameInfo &MFI = MF.getFrameInfo(); |
734 | return MFI.hasStackObjects(); |
735 | } |
736 | |
737 | bool SIRegisterInfo::requiresVirtualBaseRegisters( |
738 | const MachineFunction &) const { |
739 | // There are no special dedicated stack or frame pointers. |
740 | return true; |
741 | } |
742 | |
743 | int64_t SIRegisterInfo::getScratchInstrOffset(const MachineInstr *MI) const { |
744 | assert(SIInstrInfo::isMUBUF(*MI) || SIInstrInfo::isFLATScratch(*MI))(static_cast <bool> (SIInstrInfo::isMUBUF(*MI) || SIInstrInfo ::isFLATScratch(*MI)) ? void (0) : __assert_fail ("SIInstrInfo::isMUBUF(*MI) || SIInstrInfo::isFLATScratch(*MI)" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 744, __extension__ __PRETTY_FUNCTION__)); |
745 | |
746 | int OffIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(), |
747 | AMDGPU::OpName::offset); |
748 | return MI->getOperand(OffIdx).getImm(); |
749 | } |
750 | |
751 | int64_t SIRegisterInfo::getFrameIndexInstrOffset(const MachineInstr *MI, |
752 | int Idx) const { |
753 | if (!SIInstrInfo::isMUBUF(*MI) && !SIInstrInfo::isFLATScratch(*MI)) |
754 | return 0; |
755 | |
756 | assert((Idx == AMDGPU::getNamedOperandIdx(MI->getOpcode(),(static_cast <bool> ((Idx == AMDGPU::getNamedOperandIdx (MI->getOpcode(), AMDGPU::OpName::vaddr) || (Idx == AMDGPU ::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::saddr ))) && "Should never see frame index on non-address operand" ) ? void (0) : __assert_fail ("(Idx == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::vaddr) || (Idx == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::saddr))) && \"Should never see frame index on non-address operand\"" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 760, __extension__ __PRETTY_FUNCTION__)) |
757 | AMDGPU::OpName::vaddr) ||(static_cast <bool> ((Idx == AMDGPU::getNamedOperandIdx (MI->getOpcode(), AMDGPU::OpName::vaddr) || (Idx == AMDGPU ::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::saddr ))) && "Should never see frame index on non-address operand" ) ? void (0) : __assert_fail ("(Idx == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::vaddr) || (Idx == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::saddr))) && \"Should never see frame index on non-address operand\"" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 760, __extension__ __PRETTY_FUNCTION__)) |
758 | (Idx == AMDGPU::getNamedOperandIdx(MI->getOpcode(),(static_cast <bool> ((Idx == AMDGPU::getNamedOperandIdx (MI->getOpcode(), AMDGPU::OpName::vaddr) || (Idx == AMDGPU ::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::saddr ))) && "Should never see frame index on non-address operand" ) ? void (0) : __assert_fail ("(Idx == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::vaddr) || (Idx == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::saddr))) && \"Should never see frame index on non-address operand\"" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 760, __extension__ __PRETTY_FUNCTION__)) |
759 | AMDGPU::OpName::saddr))) &&(static_cast <bool> ((Idx == AMDGPU::getNamedOperandIdx (MI->getOpcode(), AMDGPU::OpName::vaddr) || (Idx == AMDGPU ::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::saddr ))) && "Should never see frame index on non-address operand" ) ? void (0) : __assert_fail ("(Idx == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::vaddr) || (Idx == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::saddr))) && \"Should never see frame index on non-address operand\"" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 760, __extension__ __PRETTY_FUNCTION__)) |
760 | "Should never see frame index on non-address operand")(static_cast <bool> ((Idx == AMDGPU::getNamedOperandIdx (MI->getOpcode(), AMDGPU::OpName::vaddr) || (Idx == AMDGPU ::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::saddr ))) && "Should never see frame index on non-address operand" ) ? void (0) : __assert_fail ("(Idx == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::vaddr) || (Idx == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::saddr))) && \"Should never see frame index on non-address operand\"" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 760, __extension__ __PRETTY_FUNCTION__)); |
761 | |
762 | return getScratchInstrOffset(MI); |
763 | } |
764 | |
765 | bool SIRegisterInfo::needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const { |
766 | if (!SIInstrInfo::isMUBUF(*MI) && !SIInstrInfo::isFLATScratch(*MI)) |
767 | return false; |
768 | |
769 | int64_t FullOffset = Offset + getScratchInstrOffset(MI); |
770 | |
771 | if (SIInstrInfo::isMUBUF(*MI)) |
772 | return !SIInstrInfo::isLegalMUBUFImmOffset(FullOffset); |
773 | |
774 | const SIInstrInfo *TII = ST.getInstrInfo(); |
775 | return !TII->isLegalFLATOffset(FullOffset, AMDGPUAS::PRIVATE_ADDRESS, |
776 | SIInstrFlags::FlatScratch); |
777 | } |
778 | |
779 | Register SIRegisterInfo::materializeFrameBaseRegister(MachineBasicBlock *MBB, |
780 | int FrameIdx, |
781 | int64_t Offset) const { |
782 | MachineBasicBlock::iterator Ins = MBB->begin(); |
783 | DebugLoc DL; // Defaults to "unknown" |
784 | |
785 | if (Ins != MBB->end()) |
786 | DL = Ins->getDebugLoc(); |
787 | |
788 | MachineFunction *MF = MBB->getParent(); |
789 | const SIInstrInfo *TII = ST.getInstrInfo(); |
790 | MachineRegisterInfo &MRI = MF->getRegInfo(); |
791 | unsigned MovOpc = ST.enableFlatScratch() ? AMDGPU::S_MOV_B32 |
792 | : AMDGPU::V_MOV_B32_e32; |
793 | |
794 | Register BaseReg = MRI.createVirtualRegister( |
795 | ST.enableFlatScratch() ? &AMDGPU::SReg_32_XEXEC_HIRegClass |
796 | : &AMDGPU::VGPR_32RegClass); |
797 | |
798 | if (Offset == 0) { |
799 | BuildMI(*MBB, Ins, DL, TII->get(MovOpc), BaseReg) |
800 | .addFrameIndex(FrameIdx); |
801 | return BaseReg; |
802 | } |
803 | |
804 | Register OffsetReg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); |
805 | |
806 | Register FIReg = MRI.createVirtualRegister( |
807 | ST.enableFlatScratch() ? &AMDGPU::SReg_32_XM0RegClass |
808 | : &AMDGPU::VGPR_32RegClass); |
809 | |
810 | BuildMI(*MBB, Ins, DL, TII->get(AMDGPU::S_MOV_B32), OffsetReg) |
811 | .addImm(Offset); |
812 | BuildMI(*MBB, Ins, DL, TII->get(MovOpc), FIReg) |
813 | .addFrameIndex(FrameIdx); |
814 | |
815 | if (ST.enableFlatScratch() ) { |
816 | BuildMI(*MBB, Ins, DL, TII->get(AMDGPU::S_ADD_I32), BaseReg) |
817 | .addReg(OffsetReg, RegState::Kill) |
818 | .addReg(FIReg); |
819 | return BaseReg; |
820 | } |
821 | |
822 | TII->getAddNoCarry(*MBB, Ins, DL, BaseReg) |
823 | .addReg(OffsetReg, RegState::Kill) |
824 | .addReg(FIReg) |
825 | .addImm(0); // clamp bit |
826 | |
827 | return BaseReg; |
828 | } |
829 | |
830 | void SIRegisterInfo::resolveFrameIndex(MachineInstr &MI, Register BaseReg, |
831 | int64_t Offset) const { |
832 | const SIInstrInfo *TII = ST.getInstrInfo(); |
833 | bool IsFlat = TII->isFLATScratch(MI); |
834 | |
835 | #ifndef NDEBUG |
836 | // FIXME: Is it possible to be storing a frame index to itself? |
837 | bool SeenFI = false; |
838 | for (const MachineOperand &MO: MI.operands()) { |
839 | if (MO.isFI()) { |
840 | if (SeenFI) |
841 | llvm_unreachable("should not see multiple frame indices")::llvm::llvm_unreachable_internal("should not see multiple frame indices" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 841); |
842 | |
843 | SeenFI = true; |
844 | } |
845 | } |
846 | #endif |
847 | |
848 | MachineOperand *FIOp = |
849 | TII->getNamedOperand(MI, IsFlat ? AMDGPU::OpName::saddr |
850 | : AMDGPU::OpName::vaddr); |
851 | |
852 | MachineOperand *OffsetOp = TII->getNamedOperand(MI, AMDGPU::OpName::offset); |
853 | int64_t NewOffset = OffsetOp->getImm() + Offset; |
854 | |
855 | assert(FIOp && FIOp->isFI() && "frame index must be address operand")(static_cast <bool> (FIOp && FIOp->isFI() && "frame index must be address operand") ? void (0) : __assert_fail ("FIOp && FIOp->isFI() && \"frame index must be address operand\"" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 855, __extension__ __PRETTY_FUNCTION__)); |
856 | assert(TII->isMUBUF(MI) || TII->isFLATScratch(MI))(static_cast <bool> (TII->isMUBUF(MI) || TII->isFLATScratch (MI)) ? void (0) : __assert_fail ("TII->isMUBUF(MI) || TII->isFLATScratch(MI)" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 856, __extension__ __PRETTY_FUNCTION__)); |
857 | |
858 | if (IsFlat) { |
859 | assert(TII->isLegalFLATOffset(NewOffset, AMDGPUAS::PRIVATE_ADDRESS,(static_cast <bool> (TII->isLegalFLATOffset(NewOffset , AMDGPUAS::PRIVATE_ADDRESS, SIInstrFlags::FlatScratch) && "offset should be legal") ? void (0) : __assert_fail ("TII->isLegalFLATOffset(NewOffset, AMDGPUAS::PRIVATE_ADDRESS, SIInstrFlags::FlatScratch) && \"offset should be legal\"" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 861, __extension__ __PRETTY_FUNCTION__)) |
860 | SIInstrFlags::FlatScratch) &&(static_cast <bool> (TII->isLegalFLATOffset(NewOffset , AMDGPUAS::PRIVATE_ADDRESS, SIInstrFlags::FlatScratch) && "offset should be legal") ? void (0) : __assert_fail ("TII->isLegalFLATOffset(NewOffset, AMDGPUAS::PRIVATE_ADDRESS, SIInstrFlags::FlatScratch) && \"offset should be legal\"" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 861, __extension__ __PRETTY_FUNCTION__)) |
861 | "offset should be legal")(static_cast <bool> (TII->isLegalFLATOffset(NewOffset , AMDGPUAS::PRIVATE_ADDRESS, SIInstrFlags::FlatScratch) && "offset should be legal") ? void (0) : __assert_fail ("TII->isLegalFLATOffset(NewOffset, AMDGPUAS::PRIVATE_ADDRESS, SIInstrFlags::FlatScratch) && \"offset should be legal\"" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 861, __extension__ __PRETTY_FUNCTION__)); |
862 | FIOp->ChangeToRegister(BaseReg, false); |
863 | OffsetOp->setImm(NewOffset); |
864 | return; |
865 | } |
866 | |
867 | #ifndef NDEBUG |
868 | MachineOperand *SOffset = TII->getNamedOperand(MI, AMDGPU::OpName::soffset); |
869 | assert(SOffset->isImm() && SOffset->getImm() == 0)(static_cast <bool> (SOffset->isImm() && SOffset ->getImm() == 0) ? void (0) : __assert_fail ("SOffset->isImm() && SOffset->getImm() == 0" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 869, __extension__ __PRETTY_FUNCTION__)); |
870 | #endif |
871 | |
872 | assert(SIInstrInfo::isLegalMUBUFImmOffset(NewOffset) &&(static_cast <bool> (SIInstrInfo::isLegalMUBUFImmOffset (NewOffset) && "offset should be legal") ? void (0) : __assert_fail ("SIInstrInfo::isLegalMUBUFImmOffset(NewOffset) && \"offset should be legal\"" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 873, __extension__ __PRETTY_FUNCTION__)) |
873 | "offset should be legal")(static_cast <bool> (SIInstrInfo::isLegalMUBUFImmOffset (NewOffset) && "offset should be legal") ? void (0) : __assert_fail ("SIInstrInfo::isLegalMUBUFImmOffset(NewOffset) && \"offset should be legal\"" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 873, __extension__ __PRETTY_FUNCTION__)); |
874 | |
875 | FIOp->ChangeToRegister(BaseReg, false); |
876 | OffsetOp->setImm(NewOffset); |
877 | } |
878 | |
879 | bool SIRegisterInfo::isFrameOffsetLegal(const MachineInstr *MI, |
880 | Register BaseReg, |
881 | int64_t Offset) const { |
882 | if (!SIInstrInfo::isMUBUF(*MI) && !SIInstrInfo::isFLATScratch(*MI)) |
883 | return false; |
884 | |
885 | int64_t NewOffset = Offset + getScratchInstrOffset(MI); |
886 | |
887 | if (SIInstrInfo::isMUBUF(*MI)) |
888 | return SIInstrInfo::isLegalMUBUFImmOffset(NewOffset); |
889 | |
890 | const SIInstrInfo *TII = ST.getInstrInfo(); |
891 | return TII->isLegalFLATOffset(NewOffset, AMDGPUAS::PRIVATE_ADDRESS, |
892 | SIInstrFlags::FlatScratch); |
893 | } |
894 | |
895 | const TargetRegisterClass *SIRegisterInfo::getPointerRegClass( |
896 | const MachineFunction &MF, unsigned Kind) const { |
897 | // This is inaccurate. It depends on the instruction and address space. The |
898 | // only place where we should hit this is for dealing with frame indexes / |
899 | // private accesses, so this is correct in that case. |
900 | return &AMDGPU::VGPR_32RegClass; |
901 | } |
902 | |
903 | const TargetRegisterClass * |
904 | SIRegisterInfo::getCrossCopyRegClass(const TargetRegisterClass *RC) const { |
905 | if (isAGPRClass(RC) && !ST.hasGFX90AInsts()) |
906 | return getEquivalentVGPRClass(RC); |
907 | |
908 | return RC; |
909 | } |
910 | |
911 | static unsigned getNumSubRegsForSpillOp(unsigned Op) { |
912 | |
913 | switch (Op) { |
914 | case AMDGPU::SI_SPILL_S1024_SAVE: |
915 | case AMDGPU::SI_SPILL_S1024_RESTORE: |
916 | case AMDGPU::SI_SPILL_V1024_SAVE: |
917 | case AMDGPU::SI_SPILL_V1024_RESTORE: |
918 | case AMDGPU::SI_SPILL_A1024_SAVE: |
919 | case AMDGPU::SI_SPILL_A1024_RESTORE: |
920 | case AMDGPU::SI_SPILL_AV1024_SAVE: |
921 | case AMDGPU::SI_SPILL_AV1024_RESTORE: |
922 | return 32; |
923 | case AMDGPU::SI_SPILL_S512_SAVE: |
924 | case AMDGPU::SI_SPILL_S512_RESTORE: |
925 | case AMDGPU::SI_SPILL_V512_SAVE: |
926 | case AMDGPU::SI_SPILL_V512_RESTORE: |
927 | case AMDGPU::SI_SPILL_A512_SAVE: |
928 | case AMDGPU::SI_SPILL_A512_RESTORE: |
929 | case AMDGPU::SI_SPILL_AV512_SAVE: |
930 | case AMDGPU::SI_SPILL_AV512_RESTORE: |
931 | return 16; |
932 | case AMDGPU::SI_SPILL_S256_SAVE: |
933 | case AMDGPU::SI_SPILL_S256_RESTORE: |
934 | case AMDGPU::SI_SPILL_V256_SAVE: |
935 | case AMDGPU::SI_SPILL_V256_RESTORE: |
936 | case AMDGPU::SI_SPILL_A256_SAVE: |
937 | case AMDGPU::SI_SPILL_A256_RESTORE: |
938 | case AMDGPU::SI_SPILL_AV256_SAVE: |
939 | case AMDGPU::SI_SPILL_AV256_RESTORE: |
940 | return 8; |
941 | case AMDGPU::SI_SPILL_S224_SAVE: |
942 | case AMDGPU::SI_SPILL_S224_RESTORE: |
943 | case AMDGPU::SI_SPILL_V224_SAVE: |
944 | case AMDGPU::SI_SPILL_V224_RESTORE: |
945 | case AMDGPU::SI_SPILL_A224_SAVE: |
946 | case AMDGPU::SI_SPILL_A224_RESTORE: |
947 | case AMDGPU::SI_SPILL_AV224_SAVE: |
948 | case AMDGPU::SI_SPILL_AV224_RESTORE: |
949 | return 7; |
950 | case AMDGPU::SI_SPILL_S192_SAVE: |
951 | case AMDGPU::SI_SPILL_S192_RESTORE: |
952 | case AMDGPU::SI_SPILL_V192_SAVE: |
953 | case AMDGPU::SI_SPILL_V192_RESTORE: |
954 | case AMDGPU::SI_SPILL_A192_SAVE: |
955 | case AMDGPU::SI_SPILL_A192_RESTORE: |
956 | case AMDGPU::SI_SPILL_AV192_SAVE: |
957 | case AMDGPU::SI_SPILL_AV192_RESTORE: |
958 | return 6; |
959 | case AMDGPU::SI_SPILL_S160_SAVE: |
960 | case AMDGPU::SI_SPILL_S160_RESTORE: |
961 | case AMDGPU::SI_SPILL_V160_SAVE: |
962 | case AMDGPU::SI_SPILL_V160_RESTORE: |
963 | case AMDGPU::SI_SPILL_A160_SAVE: |
964 | case AMDGPU::SI_SPILL_A160_RESTORE: |
965 | case AMDGPU::SI_SPILL_AV160_SAVE: |
966 | case AMDGPU::SI_SPILL_AV160_RESTORE: |
967 | return 5; |
968 | case AMDGPU::SI_SPILL_S128_SAVE: |
969 | case AMDGPU::SI_SPILL_S128_RESTORE: |
970 | case AMDGPU::SI_SPILL_V128_SAVE: |
971 | case AMDGPU::SI_SPILL_V128_RESTORE: |
972 | case AMDGPU::SI_SPILL_A128_SAVE: |
973 | case AMDGPU::SI_SPILL_A128_RESTORE: |
974 | case AMDGPU::SI_SPILL_AV128_SAVE: |
975 | case AMDGPU::SI_SPILL_AV128_RESTORE: |
976 | return 4; |
977 | case AMDGPU::SI_SPILL_S96_SAVE: |
978 | case AMDGPU::SI_SPILL_S96_RESTORE: |
979 | case AMDGPU::SI_SPILL_V96_SAVE: |
980 | case AMDGPU::SI_SPILL_V96_RESTORE: |
981 | case AMDGPU::SI_SPILL_A96_SAVE: |
982 | case AMDGPU::SI_SPILL_A96_RESTORE: |
983 | case AMDGPU::SI_SPILL_AV96_SAVE: |
984 | case AMDGPU::SI_SPILL_AV96_RESTORE: |
985 | return 3; |
986 | case AMDGPU::SI_SPILL_S64_SAVE: |
987 | case AMDGPU::SI_SPILL_S64_RESTORE: |
988 | case AMDGPU::SI_SPILL_V64_SAVE: |
989 | case AMDGPU::SI_SPILL_V64_RESTORE: |
990 | case AMDGPU::SI_SPILL_A64_SAVE: |
991 | case AMDGPU::SI_SPILL_A64_RESTORE: |
992 | case AMDGPU::SI_SPILL_AV64_SAVE: |
993 | case AMDGPU::SI_SPILL_AV64_RESTORE: |
994 | return 2; |
995 | case AMDGPU::SI_SPILL_S32_SAVE: |
996 | case AMDGPU::SI_SPILL_S32_RESTORE: |
997 | case AMDGPU::SI_SPILL_V32_SAVE: |
998 | case AMDGPU::SI_SPILL_V32_RESTORE: |
999 | case AMDGPU::SI_SPILL_A32_SAVE: |
1000 | case AMDGPU::SI_SPILL_A32_RESTORE: |
1001 | case AMDGPU::SI_SPILL_AV32_SAVE: |
1002 | case AMDGPU::SI_SPILL_AV32_RESTORE: |
1003 | return 1; |
1004 | default: llvm_unreachable("Invalid spill opcode")::llvm::llvm_unreachable_internal("Invalid spill opcode", "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp" , 1004); |
1005 | } |
1006 | } |
1007 | |
1008 | static int getOffsetMUBUFStore(unsigned Opc) { |
1009 | switch (Opc) { |
1010 | case AMDGPU::BUFFER_STORE_DWORD_OFFEN: |
1011 | return AMDGPU::BUFFER_STORE_DWORD_OFFSET; |
1012 | case AMDGPU::BUFFER_STORE_BYTE_OFFEN: |
1013 | return AMDGPU::BUFFER_STORE_BYTE_OFFSET; |
1014 | case AMDGPU::BUFFER_STORE_SHORT_OFFEN: |
1015 | return AMDGPU::BUFFER_STORE_SHORT_OFFSET; |
1016 | case AMDGPU::BUFFER_STORE_DWORDX2_OFFEN: |
1017 | return AMDGPU::BUFFER_STORE_DWORDX2_OFFSET; |
1018 | case AMDGPU::BUFFER_STORE_DWORDX4_OFFEN: |
1019 | return AMDGPU::BUFFER_STORE_DWORDX4_OFFSET; |
1020 | case AMDGPU::BUFFER_STORE_SHORT_D16_HI_OFFEN: |
1021 | return AMDGPU::BUFFER_STORE_SHORT_D16_HI_OFFSET; |
1022 | case AMDGPU::BUFFER_STORE_BYTE_D16_HI_OFFEN: |
1023 | return AMDGPU::BUFFER_STORE_BYTE_D16_HI_OFFSET; |
1024 | default: |
1025 | return -1; |
1026 | } |
1027 | } |
1028 | |
1029 | static int getOffsetMUBUFLoad(unsigned Opc) { |
1030 | switch (Opc) { |
1031 | case AMDGPU::BUFFER_LOAD_DWORD_OFFEN: |
1032 | return AMDGPU::BUFFER_LOAD_DWORD_OFFSET; |
1033 | case AMDGPU::BUFFER_LOAD_UBYTE_OFFEN: |
1034 | return AMDGPU::BUFFER_LOAD_UBYTE_OFFSET; |
1035 | case AMDGPU::BUFFER_LOAD_SBYTE_OFFEN: |
1036 | return AMDGPU::BUFFER_LOAD_SBYTE_OFFSET; |
1037 | case AMDGPU::BUFFER_LOAD_USHORT_OFFEN: |
1038 | return AMDGPU::BUFFER_LOAD_USHORT_OFFSET; |
1039 | case AMDGPU::BUFFER_LOAD_SSHORT_OFFEN: |
1040 | return AMDGPU::BUFFER_LOAD_SSHORT_OFFSET; |
1041 | case AMDGPU::BUFFER_LOAD_DWORDX2_OFFEN: |
1042 | return AMDGPU::BUFFER_LOAD_DWORDX2_OFFSET; |
1043 | case AMDGPU::BUFFER_LOAD_DWORDX4_OFFEN: |
1044 | return AMDGPU::BUFFER_LOAD_DWORDX4_OFFSET; |
1045 | case AMDGPU::BUFFER_LOAD_UBYTE_D16_OFFEN: |
1046 | return AMDGPU::BUFFER_LOAD_UBYTE_D16_OFFSET; |
1047 | case AMDGPU::BUFFER_LOAD_UBYTE_D16_HI_OFFEN: |
1048 | return AMDGPU::BUFFER_LOAD_UBYTE_D16_HI_OFFSET; |
1049 | case AMDGPU::BUFFER_LOAD_SBYTE_D16_OFFEN: |
1050 | return AMDGPU::BUFFER_LOAD_SBYTE_D16_OFFSET; |
1051 | case AMDGPU::BUFFER_LOAD_SBYTE_D16_HI_OFFEN: |
1052 | return AMDGPU::BUFFER_LOAD_SBYTE_D16_HI_OFFSET; |
1053 | case AMDGPU::BUFFER_LOAD_SHORT_D16_OFFEN: |
1054 | return AMDGPU::BUFFER_LOAD_SHORT_D16_OFFSET; |
1055 | case AMDGPU::BUFFER_LOAD_SHORT_D16_HI_OFFEN: |
1056 | return AMDGPU::BUFFER_LOAD_SHORT_D16_HI_OFFSET; |
1057 | default: |
1058 | return -1; |
1059 | } |
1060 | } |
1061 | |
1062 | static MachineInstrBuilder spillVGPRtoAGPR(const GCNSubtarget &ST, |
1063 | MachineBasicBlock &MBB, |
1064 | MachineBasicBlock::iterator MI, |
1065 | int Index, unsigned Lane, |
1066 | unsigned ValueReg, bool IsKill) { |
1067 | MachineFunction *MF = MBB.getParent(); |
1068 | SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>(); |
1069 | const SIInstrInfo *TII = ST.getInstrInfo(); |
1070 | |
1071 | MCPhysReg Reg = MFI->getVGPRToAGPRSpill(Index, Lane); |
1072 | |
1073 | if (Reg == AMDGPU::NoRegister) |
1074 | return MachineInstrBuilder(); |
1075 | |
1076 | bool IsStore = MI->mayStore(); |
1077 | MachineRegisterInfo &MRI = MF->getRegInfo(); |
1078 | auto *TRI = static_cast<const SIRegisterInfo*>(MRI.getTargetRegisterInfo()); |
1079 | |
1080 | unsigned Dst = IsStore ? Reg : ValueReg; |
1081 | unsigned Src = IsStore ? ValueReg : Reg; |
1082 | bool IsVGPR = TRI->isVGPR(MRI, Reg); |
1083 | DebugLoc DL = MI->getDebugLoc(); |
1084 | if (IsVGPR == TRI->isVGPR(MRI, ValueReg)) { |
1085 | // Spiller during regalloc may restore a spilled register to its superclass. |
1086 | // It could result in AGPR spills restored to VGPRs or the other way around, |
1087 | // making the src and dst with identical regclasses at this point. It just |
1088 | // needs a copy in such cases. |
1089 | auto CopyMIB = BuildMI(MBB, MI, DL, TII->get(AMDGPU::COPY), Dst) |
1090 | .addReg(Src, getKillRegState(IsKill)); |
1091 | CopyMIB->setAsmPrinterFlag(MachineInstr::ReloadReuse); |
1092 | return CopyMIB; |
1093 | } |
1094 | unsigned Opc = (IsStore ^ IsVGPR) ? AMDGPU::V_ACCVGPR_WRITE_B32_e64 |
1095 | : AMDGPU::V_ACCVGPR_READ_B32_e64; |
1096 | |
1097 | auto MIB = BuildMI(MBB, MI, DL, TII->get(Opc), Dst) |
1098 | .addReg(Src, getKillRegState(IsKill)); |
1099 | MIB->setAsmPrinterFlag(MachineInstr::ReloadReuse); |
1100 | return MIB; |
1101 | } |
1102 | |
1103 | // This differs from buildSpillLoadStore by only scavenging a VGPR. It does not |
1104 | // need to handle the case where an SGPR may need to be spilled while spilling. |
1105 | static bool buildMUBUFOffsetLoadStore(const GCNSubtarget &ST, |
1106 | MachineFrameInfo &MFI, |
1107 | MachineBasicBlock::iterator MI, |
1108 | int Index, |
1109 | int64_t Offset) { |
1110 | const SIInstrInfo *TII = ST.getInstrInfo(); |
1111 | MachineBasicBlock *MBB = MI->getParent(); |
1112 | const DebugLoc &DL = MI->getDebugLoc(); |
1113 | bool IsStore = MI->mayStore(); |
1114 | |
1115 | unsigned Opc = MI->getOpcode(); |
1116 | int LoadStoreOp = IsStore ? |
1117 | getOffsetMUBUFStore(Opc) : getOffsetMUBUFLoad(Opc); |
1118 | if (LoadStoreOp == -1) |
1119 | return false; |
1120 | |
1121 | const MachineOperand *Reg = TII->getNamedOperand(*MI, AMDGPU::OpName::vdata); |
1122 | if (spillVGPRtoAGPR(ST, *MBB, MI, Index, 0, Reg->getReg(), false).getInstr()) |
1123 | return true; |
1124 | |
1125 | MachineInstrBuilder NewMI = |
1126 | BuildMI(*MBB, MI, DL, TII->get(LoadStoreOp)) |
1127 | .add(*Reg) |
1128 | .add(*TII->getNamedOperand(*MI, AMDGPU::OpName::srsrc)) |
1129 | .add(*TII->getNamedOperand(*MI, AMDGPU::OpName::soffset)) |
1130 | .addImm(Offset) |
1131 | .addImm(0) // cpol |
1132 | .addImm(0) // tfe |
1133 | .addImm(0) // swz |
1134 | .cloneMemRefs(*MI); |
1135 | |
1136 | const MachineOperand *VDataIn = TII->getNamedOperand(*MI, |
1137 | AMDGPU::OpName::vdata_in); |
1138 | if (VDataIn) |
1139 | NewMI.add(*VDataIn); |
1140 | return true; |
1141 | } |
1142 | |
1143 | static unsigned getFlatScratchSpillOpcode(const SIInstrInfo *TII, |
1144 | unsigned LoadStoreOp, |
1145 | unsigned EltSize) { |
1146 | bool IsStore = TII->get(LoadStoreOp).mayStore(); |
1147 | bool UseST = |
1148 | AMDGPU::getNamedOperandIdx(LoadStoreOp, AMDGPU::OpName::vaddr) < 0 && |
1149 | AMDGPU::getNamedOperandIdx(LoadStoreOp, AMDGPU::OpName::saddr) < 0; |
1150 | |
1151 | switch (EltSize) { |
1152 | case 4: |
1153 | LoadStoreOp = IsStore ? AMDGPU::SCRATCH_STORE_DWORD_SADDR |
1154 | : AMDGPU::SCRATCH_LOAD_DWORD_SADDR; |
1155 | break; |
1156 | case 8: |
1157 | LoadStoreOp = IsStore ? AMDGPU::SCRATCH_STORE_DWORDX2_SADDR |
1158 | : AMDGPU::SCRATCH_LOAD_DWORDX2_SADDR; |
1159 | break; |
1160 | case 12: |
1161 | LoadStoreOp = IsStore ? AMDGPU::SCRATCH_STORE_DWORDX3_SADDR |
1162 | : AMDGPU::SCRATCH_LOAD_DWORDX3_SADDR; |
1163 | break; |
1164 | case 16: |
1165 | LoadStoreOp = IsStore ? AMDGPU::SCRATCH_STORE_DWORDX4_SADDR |
1166 | : AMDGPU::SCRATCH_LOAD_DWORDX4_SADDR; |
1167 | break; |
1168 | default: |
1169 | llvm_unreachable("Unexpected spill load/store size!")::llvm::llvm_unreachable_internal("Unexpected spill load/store size!" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 1169); |
1170 | } |
1171 | |
1172 | if (UseST) |
1173 | LoadStoreOp = AMDGPU::getFlatScratchInstSTfromSS(LoadStoreOp); |
1174 | |
1175 | return LoadStoreOp; |
1176 | } |
1177 | |
1178 | void SIRegisterInfo::buildSpillLoadStore( |
1179 | MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, const DebugLoc &DL, |
1180 | unsigned LoadStoreOp, int Index, Register ValueReg, bool IsKill, |
1181 | MCRegister ScratchOffsetReg, int64_t InstOffset, MachineMemOperand *MMO, |
1182 | RegScavenger *RS, LivePhysRegs *LiveRegs) const { |
1183 | assert((!RS || !LiveRegs) && "Only RS or LiveRegs can be set but not both")(static_cast <bool> ((!RS || !LiveRegs) && "Only RS or LiveRegs can be set but not both" ) ? void (0) : __assert_fail ("(!RS || !LiveRegs) && \"Only RS or LiveRegs can be set but not both\"" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 1183, __extension__ __PRETTY_FUNCTION__)); |
1184 | |
1185 | MachineFunction *MF = MBB.getParent(); |
1186 | const SIInstrInfo *TII = ST.getInstrInfo(); |
1187 | const MachineFrameInfo &MFI = MF->getFrameInfo(); |
1188 | const SIMachineFunctionInfo *FuncInfo = MF->getInfo<SIMachineFunctionInfo>(); |
1189 | |
1190 | const MCInstrDesc *Desc = &TII->get(LoadStoreOp); |
1191 | bool IsStore = Desc->mayStore(); |
1192 | bool IsFlat = TII->isFLATScratch(LoadStoreOp); |
1193 | |
1194 | bool Scavenged = false; |
1195 | MCRegister SOffset = ScratchOffsetReg; |
1196 | |
1197 | const TargetRegisterClass *RC = getRegClassForReg(MF->getRegInfo(), ValueReg); |
1198 | // On gfx90a+ AGPR is a regular VGPR acceptable for loads and stores. |
1199 | const bool IsAGPR = !ST.hasGFX90AInsts() && isAGPRClass(RC); |
1200 | const unsigned RegWidth = AMDGPU::getRegBitWidth(RC->getID()) / 8; |
1201 | |
1202 | // Always use 4 byte operations for AGPRs because we need to scavenge |
1203 | // a temporary VGPR. |
1204 | unsigned EltSize = (IsFlat && !IsAGPR) ? std::min(RegWidth, 16u) : 4u; |
1205 | unsigned NumSubRegs = RegWidth / EltSize; |
1206 | unsigned Size = NumSubRegs * EltSize; |
1207 | unsigned RemSize = RegWidth - Size; |
1208 | unsigned NumRemSubRegs = RemSize ? 1 : 0; |
1209 | int64_t Offset = InstOffset + MFI.getObjectOffset(Index); |
1210 | int64_t MaxOffset = Offset + Size + RemSize - EltSize; |
1211 | int64_t ScratchOffsetRegDelta = 0; |
1212 | |
1213 | if (IsFlat && EltSize > 4) { |
1214 | LoadStoreOp = getFlatScratchSpillOpcode(TII, LoadStoreOp, EltSize); |
1215 | Desc = &TII->get(LoadStoreOp); |
Value stored to 'Desc' is never read | |
1216 | } |
1217 | |
1218 | Align Alignment = MFI.getObjectAlign(Index); |
1219 | const MachinePointerInfo &BasePtrInfo = MMO->getPointerInfo(); |
1220 | |
1221 | assert((IsFlat || ((Offset % EltSize) == 0)) &&(static_cast <bool> ((IsFlat || ((Offset % EltSize) == 0 )) && "unexpected VGPR spill offset") ? void (0) : __assert_fail ("(IsFlat || ((Offset % EltSize) == 0)) && \"unexpected VGPR spill offset\"" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 1222, __extension__ __PRETTY_FUNCTION__)) |
1222 | "unexpected VGPR spill offset")(static_cast <bool> ((IsFlat || ((Offset % EltSize) == 0 )) && "unexpected VGPR spill offset") ? void (0) : __assert_fail ("(IsFlat || ((Offset % EltSize) == 0)) && \"unexpected VGPR spill offset\"" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 1222, __extension__ __PRETTY_FUNCTION__)); |
1223 | |
1224 | bool IsOffsetLegal = |
1225 | IsFlat ? TII->isLegalFLATOffset(MaxOffset, AMDGPUAS::PRIVATE_ADDRESS, |
1226 | SIInstrFlags::FlatScratch) |
1227 | : SIInstrInfo::isLegalMUBUFImmOffset(MaxOffset); |
1228 | if (!IsOffsetLegal || (IsFlat && !SOffset && !ST.hasFlatScratchSTMode())) { |
1229 | SOffset = MCRegister(); |
1230 | |
1231 | // We currently only support spilling VGPRs to EltSize boundaries, meaning |
1232 | // we can simplify the adjustment of Offset here to just scale with |
1233 | // WavefrontSize. |
1234 | if (!IsFlat) |
1235 | Offset *= ST.getWavefrontSize(); |
1236 | |
1237 | // We don't have access to the register scavenger if this function is called |
1238 | // during PEI::scavengeFrameVirtualRegs() so use LiveRegs in this case. |
1239 | if (RS) { |
1240 | SOffset = RS->scavengeRegister(&AMDGPU::SGPR_32RegClass, MI, 0, false); |
1241 | } else if (LiveRegs) { |
1242 | for (MCRegister Reg : AMDGPU::SGPR_32RegClass) { |
1243 | if (LiveRegs->available(MF->getRegInfo(), Reg)) { |
1244 | SOffset = Reg; |
1245 | break; |
1246 | } |
1247 | } |
1248 | } |
1249 | |
1250 | if (!SOffset) { |
1251 | // There are no free SGPRs, and since we are in the process of spilling |
1252 | // VGPRs too. Since we need a VGPR in order to spill SGPRs (this is true |
1253 | // on SI/CI and on VI it is true until we implement spilling using scalar |
1254 | // stores), we have no way to free up an SGPR. Our solution here is to |
1255 | // add the offset directly to the ScratchOffset or StackPtrOffset |
1256 | // register, and then subtract the offset after the spill to return the |
1257 | // register to it's original value. |
1258 | if (!ScratchOffsetReg) |
1259 | ScratchOffsetReg = FuncInfo->getStackPtrOffsetReg(); |
1260 | SOffset = ScratchOffsetReg; |
1261 | ScratchOffsetRegDelta = Offset; |
1262 | } else { |
1263 | Scavenged = true; |
1264 | } |
1265 | |
1266 | if (!SOffset) |
1267 | report_fatal_error("could not scavenge SGPR to spill in entry function"); |
1268 | |
1269 | if (ScratchOffsetReg == AMDGPU::NoRegister) { |
1270 | BuildMI(MBB, MI, DL, TII->get(AMDGPU::S_MOV_B32), SOffset).addImm(Offset); |
1271 | } else { |
1272 | auto Add = BuildMI(MBB, MI, DL, TII->get(AMDGPU::S_ADD_I32), SOffset) |
1273 | .addReg(ScratchOffsetReg) |
1274 | .addImm(Offset); |
1275 | Add->getOperand(3).setIsDead(); // Mark SCC as dead. |
1276 | } |
1277 | |
1278 | Offset = 0; |
1279 | } |
1280 | |
1281 | if (IsFlat && SOffset == AMDGPU::NoRegister) { |
1282 | assert(AMDGPU::getNamedOperandIdx(LoadStoreOp, AMDGPU::OpName::vaddr) < 0(static_cast <bool> (AMDGPU::getNamedOperandIdx(LoadStoreOp , AMDGPU::OpName::vaddr) < 0 && "Unexpected vaddr for flat scratch with a FI operand" ) ? void (0) : __assert_fail ("AMDGPU::getNamedOperandIdx(LoadStoreOp, AMDGPU::OpName::vaddr) < 0 && \"Unexpected vaddr for flat scratch with a FI operand\"" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 1283, __extension__ __PRETTY_FUNCTION__)) |
1283 | && "Unexpected vaddr for flat scratch with a FI operand")(static_cast <bool> (AMDGPU::getNamedOperandIdx(LoadStoreOp , AMDGPU::OpName::vaddr) < 0 && "Unexpected vaddr for flat scratch with a FI operand" ) ? void (0) : __assert_fail ("AMDGPU::getNamedOperandIdx(LoadStoreOp, AMDGPU::OpName::vaddr) < 0 && \"Unexpected vaddr for flat scratch with a FI operand\"" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 1283, __extension__ __PRETTY_FUNCTION__)); |
1284 | |
1285 | assert(ST.hasFlatScratchSTMode())(static_cast <bool> (ST.hasFlatScratchSTMode()) ? void ( 0) : __assert_fail ("ST.hasFlatScratchSTMode()", "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp" , 1285, __extension__ __PRETTY_FUNCTION__)); |
1286 | LoadStoreOp = AMDGPU::getFlatScratchInstSTfromSS(LoadStoreOp); |
1287 | Desc = &TII->get(LoadStoreOp); |
1288 | } |
1289 | |
1290 | Register TmpReg; |
1291 | |
1292 | for (unsigned i = 0, e = NumSubRegs + NumRemSubRegs, RegOffset = 0; i != e; |
1293 | ++i, RegOffset += EltSize) { |
1294 | if (i == NumSubRegs) { |
1295 | EltSize = RemSize; |
1296 | LoadStoreOp = getFlatScratchSpillOpcode(TII, LoadStoreOp, EltSize); |
1297 | } |
1298 | Desc = &TII->get(LoadStoreOp); |
1299 | |
1300 | unsigned NumRegs = EltSize / 4; |
1301 | Register SubReg = e == 1 |
1302 | ? ValueReg |
1303 | : Register(getSubReg(ValueReg, |
1304 | getSubRegFromChannel(RegOffset / 4, NumRegs))); |
1305 | |
1306 | unsigned SOffsetRegState = 0; |
1307 | unsigned SrcDstRegState = getDefRegState(!IsStore); |
1308 | if (i + 1 == e) { |
1309 | SOffsetRegState |= getKillRegState(Scavenged); |
1310 | // The last implicit use carries the "Kill" flag. |
1311 | SrcDstRegState |= getKillRegState(IsKill); |
1312 | } |
1313 | |
1314 | // Make sure the whole register is defined if there are undef components by |
1315 | // adding an implicit def of the super-reg on the first instruction. |
1316 | bool NeedSuperRegDef = e > 1 && IsStore && i == 0; |
1317 | bool NeedSuperRegImpOperand = e > 1; |
1318 | |
1319 | // Remaining element size to spill into memory after some parts of it |
1320 | // spilled into either AGPRs or VGPRs. |
1321 | unsigned RemEltSize = EltSize; |
1322 | |
1323 | // AGPRs to spill VGPRs and vice versa are allocated in a reverse order, |
1324 | // starting from the last lane. In case if a register cannot be completely |
1325 | // spilled into another register that will ensure its alignment does not |
1326 | // change. For targets with VGPR alignment requirement this is important |
1327 | // in case of flat scratch usage as we might get a scratch_load or |
1328 | // scratch_store of an unaligned register otherwise. |
1329 | for (int LaneS = (RegOffset + EltSize) / 4 - 1, Lane = LaneS, |
1330 | LaneE = RegOffset / 4; |
1331 | Lane >= LaneE; --Lane) { |
1332 | bool IsSubReg = e > 1 || EltSize > 4; |
1333 | Register Sub = IsSubReg |
1334 | ? Register(getSubReg(ValueReg, getSubRegFromChannel(Lane))) |
1335 | : ValueReg; |
1336 | auto MIB = spillVGPRtoAGPR(ST, MBB, MI, Index, Lane, Sub, IsKill); |
1337 | if (!MIB.getInstr()) |
1338 | break; |
1339 | if (NeedSuperRegDef || (IsSubReg && IsStore && Lane == LaneS && !i)) { |
1340 | MIB.addReg(ValueReg, RegState::ImplicitDefine); |
1341 | NeedSuperRegDef = false; |
1342 | } |
1343 | if (IsSubReg || NeedSuperRegImpOperand) { |
1344 | NeedSuperRegImpOperand = true; |
1345 | unsigned State = SrcDstRegState; |
1346 | if (Lane != LaneE) |
1347 | State &= ~RegState::Kill; |
1348 | MIB.addReg(ValueReg, RegState::Implicit | State); |
1349 | } |
1350 | RemEltSize -= 4; |
1351 | } |
1352 | |
1353 | if (!RemEltSize) // Fully spilled into AGPRs. |
1354 | continue; |
1355 | |
1356 | if (RemEltSize != EltSize) { // Partially spilled to AGPRs |
1357 | assert(IsFlat && EltSize > 4)(static_cast <bool> (IsFlat && EltSize > 4) ? void (0) : __assert_fail ("IsFlat && EltSize > 4" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 1357, __extension__ __PRETTY_FUNCTION__)); |
1358 | |
1359 | unsigned NumRegs = RemEltSize / 4; |
1360 | SubReg = Register(getSubReg(ValueReg, |
1361 | getSubRegFromChannel(RegOffset / 4, NumRegs))); |
1362 | unsigned Opc = getFlatScratchSpillOpcode(TII, LoadStoreOp, RemEltSize); |
1363 | Desc = &TII->get(Opc); |
1364 | } |
1365 | |
1366 | unsigned FinalReg = SubReg; |
1367 | |
1368 | if (IsAGPR) { |
1369 | assert(EltSize == 4)(static_cast <bool> (EltSize == 4) ? void (0) : __assert_fail ("EltSize == 4", "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp" , 1369, __extension__ __PRETTY_FUNCTION__)); |
1370 | |
1371 | if (!TmpReg) { |
1372 | assert(RS && "Needs to have RegScavenger to spill an AGPR!")(static_cast <bool> (RS && "Needs to have RegScavenger to spill an AGPR!" ) ? void (0) : __assert_fail ("RS && \"Needs to have RegScavenger to spill an AGPR!\"" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 1372, __extension__ __PRETTY_FUNCTION__)); |
1373 | // FIXME: change to scavengeRegisterBackwards() |
1374 | TmpReg = RS->scavengeRegister(&AMDGPU::VGPR_32RegClass, MI, 0); |
1375 | RS->setRegUsed(TmpReg); |
1376 | } |
1377 | if (IsStore) { |
1378 | auto AccRead = BuildMI(MBB, MI, DL, |
1379 | TII->get(AMDGPU::V_ACCVGPR_READ_B32_e64), TmpReg) |
1380 | .addReg(SubReg, getKillRegState(IsKill)); |
1381 | if (NeedSuperRegDef) |
1382 | AccRead.addReg(ValueReg, RegState::ImplicitDefine); |
1383 | AccRead->setAsmPrinterFlag(MachineInstr::ReloadReuse); |
1384 | } |
1385 | SubReg = TmpReg; |
1386 | } |
1387 | |
1388 | MachinePointerInfo PInfo = BasePtrInfo.getWithOffset(RegOffset); |
1389 | MachineMemOperand *NewMMO = |
1390 | MF->getMachineMemOperand(PInfo, MMO->getFlags(), RemEltSize, |
1391 | commonAlignment(Alignment, RegOffset)); |
1392 | |
1393 | auto MIB = |
1394 | BuildMI(MBB, MI, DL, *Desc) |
1395 | .addReg(SubReg, getDefRegState(!IsStore) | getKillRegState(IsKill)); |
1396 | if (!IsFlat) |
1397 | MIB.addReg(FuncInfo->getScratchRSrcReg()); |
1398 | |
1399 | if (SOffset == AMDGPU::NoRegister) { |
1400 | if (!IsFlat) |
1401 | MIB.addImm(0); |
1402 | } else { |
1403 | MIB.addReg(SOffset, SOffsetRegState); |
1404 | } |
1405 | MIB.addImm(Offset + RegOffset) |
1406 | .addImm(0); // cpol |
1407 | if (!IsFlat) |
1408 | MIB.addImm(0) // tfe |
1409 | .addImm(0); // swz |
1410 | MIB.addMemOperand(NewMMO); |
1411 | |
1412 | if (!IsAGPR && NeedSuperRegDef) |
1413 | MIB.addReg(ValueReg, RegState::ImplicitDefine); |
1414 | |
1415 | if (!IsStore && TmpReg != AMDGPU::NoRegister) { |
1416 | MIB = BuildMI(MBB, MI, DL, TII->get(AMDGPU::V_ACCVGPR_WRITE_B32_e64), |
1417 | FinalReg) |
1418 | .addReg(TmpReg, RegState::Kill); |
1419 | MIB->setAsmPrinterFlag(MachineInstr::ReloadReuse); |
1420 | } |
1421 | |
1422 | if (NeedSuperRegImpOperand) |
1423 | MIB.addReg(ValueReg, RegState::Implicit | SrcDstRegState); |
1424 | } |
1425 | |
1426 | if (ScratchOffsetRegDelta != 0) { |
1427 | // Subtract the offset we added to the ScratchOffset register. |
1428 | BuildMI(MBB, MI, DL, TII->get(AMDGPU::S_ADD_I32), SOffset) |
1429 | .addReg(SOffset) |
1430 | .addImm(-ScratchOffsetRegDelta); |
1431 | } |
1432 | } |
1433 | |
1434 | void SIRegisterInfo::buildVGPRSpillLoadStore(SGPRSpillBuilder &SB, int Index, |
1435 | int Offset, bool IsLoad, |
1436 | bool IsKill) const { |
1437 | // Load/store VGPR |
1438 | MachineFrameInfo &FrameInfo = SB.MF.getFrameInfo(); |
1439 | assert(FrameInfo.getStackID(Index) != TargetStackID::SGPRSpill)(static_cast <bool> (FrameInfo.getStackID(Index) != TargetStackID ::SGPRSpill) ? void (0) : __assert_fail ("FrameInfo.getStackID(Index) != TargetStackID::SGPRSpill" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 1439, __extension__ __PRETTY_FUNCTION__)); |
1440 | |
1441 | Register FrameReg = |
1442 | FrameInfo.isFixedObjectIndex(Index) && hasBasePointer(SB.MF) |
1443 | ? getBaseRegister() |
1444 | : getFrameRegister(SB.MF); |
1445 | |
1446 | Align Alignment = FrameInfo.getObjectAlign(Index); |
1447 | MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(SB.MF, Index); |
1448 | MachineMemOperand *MMO = SB.MF.getMachineMemOperand( |
1449 | PtrInfo, IsLoad ? MachineMemOperand::MOLoad : MachineMemOperand::MOStore, |
1450 | SB.EltSize, Alignment); |
1451 | |
1452 | if (IsLoad) { |
1453 | unsigned Opc = ST.enableFlatScratch() ? AMDGPU::SCRATCH_LOAD_DWORD_SADDR |
1454 | : AMDGPU::BUFFER_LOAD_DWORD_OFFSET; |
1455 | buildSpillLoadStore(*SB.MBB, SB.MI, SB.DL, Opc, Index, SB.TmpVGPR, false, |
1456 | FrameReg, Offset * SB.EltSize, MMO, SB.RS); |
1457 | } else { |
1458 | unsigned Opc = ST.enableFlatScratch() ? AMDGPU::SCRATCH_STORE_DWORD_SADDR |
1459 | : AMDGPU::BUFFER_STORE_DWORD_OFFSET; |
1460 | buildSpillLoadStore(*SB.MBB, SB.MI, SB.DL, Opc, Index, SB.TmpVGPR, IsKill, |
1461 | FrameReg, Offset * SB.EltSize, MMO, SB.RS); |
1462 | // This only ever adds one VGPR spill |
1463 | SB.MFI.addToSpilledVGPRs(1); |
1464 | } |
1465 | } |
1466 | |
1467 | bool SIRegisterInfo::spillSGPR(MachineBasicBlock::iterator MI, |
1468 | int Index, |
1469 | RegScavenger *RS, |
1470 | LiveIntervals *LIS, |
1471 | bool OnlyToVGPR) const { |
1472 | SGPRSpillBuilder SB(*this, *ST.getInstrInfo(), isWave32, MI, Index, RS); |
1473 | |
1474 | ArrayRef<SIMachineFunctionInfo::SpilledReg> VGPRSpills = |
1475 | SB.MFI.getSGPRToVGPRSpills(Index); |
1476 | bool SpillToVGPR = !VGPRSpills.empty(); |
1477 | if (OnlyToVGPR && !SpillToVGPR) |
1478 | return false; |
1479 | |
1480 | assert(SpillToVGPR || (SB.SuperReg != SB.MFI.getStackPtrOffsetReg() &&(static_cast <bool> (SpillToVGPR || (SB.SuperReg != SB. MFI.getStackPtrOffsetReg() && SB.SuperReg != SB.MFI.getFrameOffsetReg ())) ? void (0) : __assert_fail ("SpillToVGPR || (SB.SuperReg != SB.MFI.getStackPtrOffsetReg() && SB.SuperReg != SB.MFI.getFrameOffsetReg())" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 1481, __extension__ __PRETTY_FUNCTION__)) |
1481 | SB.SuperReg != SB.MFI.getFrameOffsetReg()))(static_cast <bool> (SpillToVGPR || (SB.SuperReg != SB. MFI.getStackPtrOffsetReg() && SB.SuperReg != SB.MFI.getFrameOffsetReg ())) ? void (0) : __assert_fail ("SpillToVGPR || (SB.SuperReg != SB.MFI.getStackPtrOffsetReg() && SB.SuperReg != SB.MFI.getFrameOffsetReg())" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 1481, __extension__ __PRETTY_FUNCTION__)); |
1482 | |
1483 | if (SpillToVGPR) { |
1484 | |
1485 | assert(SB.NumSubRegs == VGPRSpills.size() &&(static_cast <bool> (SB.NumSubRegs == VGPRSpills.size() && "Num of VGPR lanes should be equal to num of SGPRs spilled" ) ? void (0) : __assert_fail ("SB.NumSubRegs == VGPRSpills.size() && \"Num of VGPR lanes should be equal to num of SGPRs spilled\"" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 1486, __extension__ __PRETTY_FUNCTION__)) |
1486 | "Num of VGPR lanes should be equal to num of SGPRs spilled")(static_cast <bool> (SB.NumSubRegs == VGPRSpills.size() && "Num of VGPR lanes should be equal to num of SGPRs spilled" ) ? void (0) : __assert_fail ("SB.NumSubRegs == VGPRSpills.size() && \"Num of VGPR lanes should be equal to num of SGPRs spilled\"" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 1486, __extension__ __PRETTY_FUNCTION__)); |
1487 | |
1488 | for (unsigned i = 0, e = SB.NumSubRegs; i < e; ++i) { |
1489 | Register SubReg = |
1490 | SB.NumSubRegs == 1 |
1491 | ? SB.SuperReg |
1492 | : Register(getSubReg(SB.SuperReg, SB.SplitParts[i])); |
1493 | SIMachineFunctionInfo::SpilledReg Spill = VGPRSpills[i]; |
1494 | |
1495 | bool UseKill = SB.IsKill && i == SB.NumSubRegs - 1; |
1496 | |
1497 | // Mark the "old value of vgpr" input undef only if this is the first sgpr |
1498 | // spill to this specific vgpr in the first basic block. |
1499 | auto MIB = BuildMI(*SB.MBB, MI, SB.DL, |
1500 | SB.TII.get(AMDGPU::V_WRITELANE_B32), Spill.VGPR) |
1501 | .addReg(SubReg, getKillRegState(UseKill)) |
1502 | .addImm(Spill.Lane) |
1503 | .addReg(Spill.VGPR); |
1504 | if (LIS) { |
1505 | if (i == 0) |
1506 | LIS->ReplaceMachineInstrInMaps(*MI, *MIB); |
1507 | else |
1508 | LIS->InsertMachineInstrInMaps(*MIB); |
1509 | } |
1510 | |
1511 | if (i == 0 && SB.NumSubRegs > 1) { |
1512 | // We may be spilling a super-register which is only partially defined, |
1513 | // and need to ensure later spills think the value is defined. |
1514 | MIB.addReg(SB.SuperReg, RegState::ImplicitDefine); |
1515 | } |
1516 | |
1517 | if (SB.NumSubRegs > 1) |
1518 | MIB.addReg(SB.SuperReg, getKillRegState(UseKill) | RegState::Implicit); |
1519 | |
1520 | // FIXME: Since this spills to another register instead of an actual |
1521 | // frame index, we should delete the frame index when all references to |
1522 | // it are fixed. |
1523 | } |
1524 | } else { |
1525 | SB.prepare(); |
1526 | |
1527 | // SubReg carries the "Kill" flag when SubReg == SB.SuperReg. |
1528 | unsigned SubKillState = getKillRegState((SB.NumSubRegs == 1) && SB.IsKill); |
1529 | |
1530 | // Per VGPR helper data |
1531 | auto PVD = SB.getPerVGPRData(); |
1532 | |
1533 | for (unsigned Offset = 0; Offset < PVD.NumVGPRs; ++Offset) { |
1534 | unsigned TmpVGPRFlags = RegState::Undef; |
1535 | |
1536 | // Write sub registers into the VGPR |
1537 | for (unsigned i = Offset * PVD.PerVGPR, |
1538 | e = std::min((Offset + 1) * PVD.PerVGPR, SB.NumSubRegs); |
1539 | i < e; ++i) { |
1540 | Register SubReg = |
1541 | SB.NumSubRegs == 1 |
1542 | ? SB.SuperReg |
1543 | : Register(getSubReg(SB.SuperReg, SB.SplitParts[i])); |
1544 | |
1545 | MachineInstrBuilder WriteLane = |
1546 | BuildMI(*SB.MBB, MI, SB.DL, SB.TII.get(AMDGPU::V_WRITELANE_B32), |
1547 | SB.TmpVGPR) |
1548 | .addReg(SubReg, SubKillState) |
1549 | .addImm(i % PVD.PerVGPR) |
1550 | .addReg(SB.TmpVGPR, TmpVGPRFlags); |
1551 | TmpVGPRFlags = 0; |
1552 | |
1553 | if (LIS) { |
1554 | if (i == 0) |
1555 | LIS->ReplaceMachineInstrInMaps(*MI, *WriteLane); |
1556 | else |
1557 | LIS->InsertMachineInstrInMaps(*WriteLane); |
1558 | } |
1559 | |
1560 | // There could be undef components of a spilled super register. |
1561 | // TODO: Can we detect this and skip the spill? |
1562 | if (SB.NumSubRegs > 1) { |
1563 | // The last implicit use of the SB.SuperReg carries the "Kill" flag. |
1564 | unsigned SuperKillState = 0; |
1565 | if (i + 1 == SB.NumSubRegs) |
1566 | SuperKillState |= getKillRegState(SB.IsKill); |
1567 | WriteLane.addReg(SB.SuperReg, RegState::Implicit | SuperKillState); |
1568 | } |
1569 | } |
1570 | |
1571 | // Write out VGPR |
1572 | SB.readWriteTmpVGPR(Offset, /*IsLoad*/ false); |
1573 | } |
1574 | |
1575 | SB.restore(); |
1576 | } |
1577 | |
1578 | MI->eraseFromParent(); |
1579 | SB.MFI.addToSpilledSGPRs(SB.NumSubRegs); |
1580 | |
1581 | if (LIS) |
1582 | LIS->removeAllRegUnitsForPhysReg(SB.SuperReg); |
1583 | |
1584 | return true; |
1585 | } |
1586 | |
1587 | bool SIRegisterInfo::restoreSGPR(MachineBasicBlock::iterator MI, |
1588 | int Index, |
1589 | RegScavenger *RS, |
1590 | LiveIntervals *LIS, |
1591 | bool OnlyToVGPR) const { |
1592 | SGPRSpillBuilder SB(*this, *ST.getInstrInfo(), isWave32, MI, Index, RS); |
1593 | |
1594 | ArrayRef<SIMachineFunctionInfo::SpilledReg> VGPRSpills = |
1595 | SB.MFI.getSGPRToVGPRSpills(Index); |
1596 | bool SpillToVGPR = !VGPRSpills.empty(); |
1597 | if (OnlyToVGPR && !SpillToVGPR) |
1598 | return false; |
1599 | |
1600 | if (SpillToVGPR) { |
1601 | for (unsigned i = 0, e = SB.NumSubRegs; i < e; ++i) { |
1602 | Register SubReg = |
1603 | SB.NumSubRegs == 1 |
1604 | ? SB.SuperReg |
1605 | : Register(getSubReg(SB.SuperReg, SB.SplitParts[i])); |
1606 | |
1607 | SIMachineFunctionInfo::SpilledReg Spill = VGPRSpills[i]; |
1608 | auto MIB = BuildMI(*SB.MBB, MI, SB.DL, SB.TII.get(AMDGPU::V_READLANE_B32), |
1609 | SubReg) |
1610 | .addReg(Spill.VGPR) |
1611 | .addImm(Spill.Lane); |
1612 | if (SB.NumSubRegs > 1 && i == 0) |
1613 | MIB.addReg(SB.SuperReg, RegState::ImplicitDefine); |
1614 | if (LIS) { |
1615 | if (i == e - 1) |
1616 | LIS->ReplaceMachineInstrInMaps(*MI, *MIB); |
1617 | else |
1618 | LIS->InsertMachineInstrInMaps(*MIB); |
1619 | } |
1620 | |
1621 | } |
1622 | } else { |
1623 | SB.prepare(); |
1624 | |
1625 | // Per VGPR helper data |
1626 | auto PVD = SB.getPerVGPRData(); |
1627 | |
1628 | for (unsigned Offset = 0; Offset < PVD.NumVGPRs; ++Offset) { |
1629 | // Load in VGPR data |
1630 | SB.readWriteTmpVGPR(Offset, /*IsLoad*/ true); |
1631 | |
1632 | // Unpack lanes |
1633 | for (unsigned i = Offset * PVD.PerVGPR, |
1634 | e = std::min((Offset + 1) * PVD.PerVGPR, SB.NumSubRegs); |
1635 | i < e; ++i) { |
1636 | Register SubReg = |
1637 | SB.NumSubRegs == 1 |
1638 | ? SB.SuperReg |
1639 | : Register(getSubReg(SB.SuperReg, SB.SplitParts[i])); |
1640 | |
1641 | bool LastSubReg = (i + 1 == e); |
1642 | auto MIB = BuildMI(*SB.MBB, MI, SB.DL, |
1643 | SB.TII.get(AMDGPU::V_READLANE_B32), SubReg) |
1644 | .addReg(SB.TmpVGPR, getKillRegState(LastSubReg)) |
1645 | .addImm(i); |
1646 | if (SB.NumSubRegs > 1 && i == 0) |
1647 | MIB.addReg(SB.SuperReg, RegState::ImplicitDefine); |
1648 | if (LIS) { |
1649 | if (i == e - 1) |
1650 | LIS->ReplaceMachineInstrInMaps(*MI, *MIB); |
1651 | else |
1652 | LIS->InsertMachineInstrInMaps(*MIB); |
1653 | } |
1654 | } |
1655 | } |
1656 | |
1657 | SB.restore(); |
1658 | } |
1659 | |
1660 | MI->eraseFromParent(); |
1661 | |
1662 | if (LIS) |
1663 | LIS->removeAllRegUnitsForPhysReg(SB.SuperReg); |
1664 | |
1665 | return true; |
1666 | } |
1667 | |
1668 | bool SIRegisterInfo::spillEmergencySGPR(MachineBasicBlock::iterator MI, |
1669 | MachineBasicBlock &RestoreMBB, |
1670 | Register SGPR, RegScavenger *RS) const { |
1671 | SGPRSpillBuilder SB(*this, *ST.getInstrInfo(), isWave32, MI, SGPR, false, 0, |
1672 | RS); |
1673 | SB.prepare(); |
1674 | // Generate the spill of SGPR to SB.TmpVGPR. |
1675 | unsigned SubKillState = getKillRegState((SB.NumSubRegs == 1) && SB.IsKill); |
1676 | auto PVD = SB.getPerVGPRData(); |
1677 | for (unsigned Offset = 0; Offset < PVD.NumVGPRs; ++Offset) { |
1678 | unsigned TmpVGPRFlags = RegState::Undef; |
1679 | // Write sub registers into the VGPR |
1680 | for (unsigned i = Offset * PVD.PerVGPR, |
1681 | e = std::min((Offset + 1) * PVD.PerVGPR, SB.NumSubRegs); |
1682 | i < e; ++i) { |
1683 | Register SubReg = |
1684 | SB.NumSubRegs == 1 |
1685 | ? SB.SuperReg |
1686 | : Register(getSubReg(SB.SuperReg, SB.SplitParts[i])); |
1687 | |
1688 | MachineInstrBuilder WriteLane = |
1689 | BuildMI(*SB.MBB, MI, SB.DL, SB.TII.get(AMDGPU::V_WRITELANE_B32), |
1690 | SB.TmpVGPR) |
1691 | .addReg(SubReg, SubKillState) |
1692 | .addImm(i % PVD.PerVGPR) |
1693 | .addReg(SB.TmpVGPR, TmpVGPRFlags); |
1694 | TmpVGPRFlags = 0; |
1695 | // There could be undef components of a spilled super register. |
1696 | // TODO: Can we detect this and skip the spill? |
1697 | if (SB.NumSubRegs > 1) { |
1698 | // The last implicit use of the SB.SuperReg carries the "Kill" flag. |
1699 | unsigned SuperKillState = 0; |
1700 | if (i + 1 == SB.NumSubRegs) |
1701 | SuperKillState |= getKillRegState(SB.IsKill); |
1702 | WriteLane.addReg(SB.SuperReg, RegState::Implicit | SuperKillState); |
1703 | } |
1704 | } |
1705 | // Don't need to write VGPR out. |
1706 | } |
1707 | |
1708 | // Restore clobbered registers in the specified restore block. |
1709 | MI = RestoreMBB.end(); |
1710 | SB.setMI(&RestoreMBB, MI); |
1711 | // Generate the restore of SGPR from SB.TmpVGPR. |
1712 | for (unsigned Offset = 0; Offset < PVD.NumVGPRs; ++Offset) { |
1713 | // Don't need to load VGPR in. |
1714 | // Unpack lanes |
1715 | for (unsigned i = Offset * PVD.PerVGPR, |
1716 | e = std::min((Offset + 1) * PVD.PerVGPR, SB.NumSubRegs); |
1717 | i < e; ++i) { |
1718 | Register SubReg = |
1719 | SB.NumSubRegs == 1 |
1720 | ? SB.SuperReg |
1721 | : Register(getSubReg(SB.SuperReg, SB.SplitParts[i])); |
1722 | bool LastSubReg = (i + 1 == e); |
1723 | auto MIB = BuildMI(*SB.MBB, MI, SB.DL, SB.TII.get(AMDGPU::V_READLANE_B32), |
1724 | SubReg) |
1725 | .addReg(SB.TmpVGPR, getKillRegState(LastSubReg)) |
1726 | .addImm(i); |
1727 | if (SB.NumSubRegs > 1 && i == 0) |
1728 | MIB.addReg(SB.SuperReg, RegState::ImplicitDefine); |
1729 | } |
1730 | } |
1731 | SB.restore(); |
1732 | |
1733 | SB.MFI.addToSpilledSGPRs(SB.NumSubRegs); |
1734 | return false; |
1735 | } |
1736 | |
1737 | /// Special case of eliminateFrameIndex. Returns true if the SGPR was spilled to |
1738 | /// a VGPR and the stack slot can be safely eliminated when all other users are |
1739 | /// handled. |
1740 | bool SIRegisterInfo::eliminateSGPRToVGPRSpillFrameIndex( |
1741 | MachineBasicBlock::iterator MI, |
1742 | int FI, |
1743 | RegScavenger *RS, |
1744 | LiveIntervals *LIS) const { |
1745 | switch (MI->getOpcode()) { |
1746 | case AMDGPU::SI_SPILL_S1024_SAVE: |
1747 | case AMDGPU::SI_SPILL_S512_SAVE: |
1748 | case AMDGPU::SI_SPILL_S256_SAVE: |
1749 | case AMDGPU::SI_SPILL_S224_SAVE: |
1750 | case AMDGPU::SI_SPILL_S192_SAVE: |
1751 | case AMDGPU::SI_SPILL_S160_SAVE: |
1752 | case AMDGPU::SI_SPILL_S128_SAVE: |
1753 | case AMDGPU::SI_SPILL_S96_SAVE: |
1754 | case AMDGPU::SI_SPILL_S64_SAVE: |
1755 | case AMDGPU::SI_SPILL_S32_SAVE: |
1756 | return spillSGPR(MI, FI, RS, LIS, true); |
1757 | case AMDGPU::SI_SPILL_S1024_RESTORE: |
1758 | case AMDGPU::SI_SPILL_S512_RESTORE: |
1759 | case AMDGPU::SI_SPILL_S256_RESTORE: |
1760 | case AMDGPU::SI_SPILL_S224_RESTORE: |
1761 | case AMDGPU::SI_SPILL_S192_RESTORE: |
1762 | case AMDGPU::SI_SPILL_S160_RESTORE: |
1763 | case AMDGPU::SI_SPILL_S128_RESTORE: |
1764 | case AMDGPU::SI_SPILL_S96_RESTORE: |
1765 | case AMDGPU::SI_SPILL_S64_RESTORE: |
1766 | case AMDGPU::SI_SPILL_S32_RESTORE: |
1767 | return restoreSGPR(MI, FI, RS, LIS, true); |
1768 | default: |
1769 | llvm_unreachable("not an SGPR spill instruction")::llvm::llvm_unreachable_internal("not an SGPR spill instruction" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 1769); |
1770 | } |
1771 | } |
1772 | |
1773 | void SIRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI, |
1774 | int SPAdj, unsigned FIOperandNum, |
1775 | RegScavenger *RS) const { |
1776 | MachineFunction *MF = MI->getParent()->getParent(); |
1777 | MachineBasicBlock *MBB = MI->getParent(); |
1778 | SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>(); |
1779 | MachineFrameInfo &FrameInfo = MF->getFrameInfo(); |
1780 | const SIInstrInfo *TII = ST.getInstrInfo(); |
1781 | DebugLoc DL = MI->getDebugLoc(); |
1782 | |
1783 | assert(SPAdj == 0 && "unhandled SP adjustment in call sequence?")(static_cast <bool> (SPAdj == 0 && "unhandled SP adjustment in call sequence?" ) ? void (0) : __assert_fail ("SPAdj == 0 && \"unhandled SP adjustment in call sequence?\"" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 1783, __extension__ __PRETTY_FUNCTION__)); |
1784 | |
1785 | MachineOperand &FIOp = MI->getOperand(FIOperandNum); |
1786 | int Index = MI->getOperand(FIOperandNum).getIndex(); |
1787 | |
1788 | Register FrameReg = FrameInfo.isFixedObjectIndex(Index) && hasBasePointer(*MF) |
1789 | ? getBaseRegister() |
1790 | : getFrameRegister(*MF); |
1791 | |
1792 | switch (MI->getOpcode()) { |
1793 | // SGPR register spill |
1794 | case AMDGPU::SI_SPILL_S1024_SAVE: |
1795 | case AMDGPU::SI_SPILL_S512_SAVE: |
1796 | case AMDGPU::SI_SPILL_S256_SAVE: |
1797 | case AMDGPU::SI_SPILL_S224_SAVE: |
1798 | case AMDGPU::SI_SPILL_S192_SAVE: |
1799 | case AMDGPU::SI_SPILL_S160_SAVE: |
1800 | case AMDGPU::SI_SPILL_S128_SAVE: |
1801 | case AMDGPU::SI_SPILL_S96_SAVE: |
1802 | case AMDGPU::SI_SPILL_S64_SAVE: |
1803 | case AMDGPU::SI_SPILL_S32_SAVE: { |
1804 | spillSGPR(MI, Index, RS); |
1805 | break; |
1806 | } |
1807 | |
1808 | // SGPR register restore |
1809 | case AMDGPU::SI_SPILL_S1024_RESTORE: |
1810 | case AMDGPU::SI_SPILL_S512_RESTORE: |
1811 | case AMDGPU::SI_SPILL_S256_RESTORE: |
1812 | case AMDGPU::SI_SPILL_S224_RESTORE: |
1813 | case AMDGPU::SI_SPILL_S192_RESTORE: |
1814 | case AMDGPU::SI_SPILL_S160_RESTORE: |
1815 | case AMDGPU::SI_SPILL_S128_RESTORE: |
1816 | case AMDGPU::SI_SPILL_S96_RESTORE: |
1817 | case AMDGPU::SI_SPILL_S64_RESTORE: |
1818 | case AMDGPU::SI_SPILL_S32_RESTORE: { |
1819 | restoreSGPR(MI, Index, RS); |
1820 | break; |
1821 | } |
1822 | |
1823 | // VGPR register spill |
1824 | case AMDGPU::SI_SPILL_V1024_SAVE: |
1825 | case AMDGPU::SI_SPILL_V512_SAVE: |
1826 | case AMDGPU::SI_SPILL_V256_SAVE: |
1827 | case AMDGPU::SI_SPILL_V224_SAVE: |
1828 | case AMDGPU::SI_SPILL_V192_SAVE: |
1829 | case AMDGPU::SI_SPILL_V160_SAVE: |
1830 | case AMDGPU::SI_SPILL_V128_SAVE: |
1831 | case AMDGPU::SI_SPILL_V96_SAVE: |
1832 | case AMDGPU::SI_SPILL_V64_SAVE: |
1833 | case AMDGPU::SI_SPILL_V32_SAVE: |
1834 | case AMDGPU::SI_SPILL_A1024_SAVE: |
1835 | case AMDGPU::SI_SPILL_A512_SAVE: |
1836 | case AMDGPU::SI_SPILL_A256_SAVE: |
1837 | case AMDGPU::SI_SPILL_A224_SAVE: |
1838 | case AMDGPU::SI_SPILL_A192_SAVE: |
1839 | case AMDGPU::SI_SPILL_A160_SAVE: |
1840 | case AMDGPU::SI_SPILL_A128_SAVE: |
1841 | case AMDGPU::SI_SPILL_A96_SAVE: |
1842 | case AMDGPU::SI_SPILL_A64_SAVE: |
1843 | case AMDGPU::SI_SPILL_A32_SAVE: |
1844 | case AMDGPU::SI_SPILL_AV1024_SAVE: |
1845 | case AMDGPU::SI_SPILL_AV512_SAVE: |
1846 | case AMDGPU::SI_SPILL_AV256_SAVE: |
1847 | case AMDGPU::SI_SPILL_AV224_SAVE: |
1848 | case AMDGPU::SI_SPILL_AV192_SAVE: |
1849 | case AMDGPU::SI_SPILL_AV160_SAVE: |
1850 | case AMDGPU::SI_SPILL_AV128_SAVE: |
1851 | case AMDGPU::SI_SPILL_AV96_SAVE: |
1852 | case AMDGPU::SI_SPILL_AV64_SAVE: |
1853 | case AMDGPU::SI_SPILL_AV32_SAVE: { |
1854 | const MachineOperand *VData = TII->getNamedOperand(*MI, |
1855 | AMDGPU::OpName::vdata); |
1856 | assert(TII->getNamedOperand(*MI, AMDGPU::OpName::soffset)->getReg() ==(static_cast <bool> (TII->getNamedOperand(*MI, AMDGPU ::OpName::soffset)->getReg() == MFI->getStackPtrOffsetReg ()) ? void (0) : __assert_fail ("TII->getNamedOperand(*MI, AMDGPU::OpName::soffset)->getReg() == MFI->getStackPtrOffsetReg()" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 1857, __extension__ __PRETTY_FUNCTION__)) |
1857 | MFI->getStackPtrOffsetReg())(static_cast <bool> (TII->getNamedOperand(*MI, AMDGPU ::OpName::soffset)->getReg() == MFI->getStackPtrOffsetReg ()) ? void (0) : __assert_fail ("TII->getNamedOperand(*MI, AMDGPU::OpName::soffset)->getReg() == MFI->getStackPtrOffsetReg()" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 1857, __extension__ __PRETTY_FUNCTION__)); |
1858 | |
1859 | unsigned Opc = ST.enableFlatScratch() ? AMDGPU::SCRATCH_STORE_DWORD_SADDR |
1860 | : AMDGPU::BUFFER_STORE_DWORD_OFFSET; |
1861 | auto *MBB = MI->getParent(); |
1862 | buildSpillLoadStore( |
1863 | *MBB, MI, DL, Opc, Index, VData->getReg(), VData->isKill(), FrameReg, |
1864 | TII->getNamedOperand(*MI, AMDGPU::OpName::offset)->getImm(), |
1865 | *MI->memoperands_begin(), RS); |
1866 | MFI->addToSpilledVGPRs(getNumSubRegsForSpillOp(MI->getOpcode())); |
1867 | MI->eraseFromParent(); |
1868 | break; |
1869 | } |
1870 | case AMDGPU::SI_SPILL_V32_RESTORE: |
1871 | case AMDGPU::SI_SPILL_V64_RESTORE: |
1872 | case AMDGPU::SI_SPILL_V96_RESTORE: |
1873 | case AMDGPU::SI_SPILL_V128_RESTORE: |
1874 | case AMDGPU::SI_SPILL_V160_RESTORE: |
1875 | case AMDGPU::SI_SPILL_V192_RESTORE: |
1876 | case AMDGPU::SI_SPILL_V224_RESTORE: |
1877 | case AMDGPU::SI_SPILL_V256_RESTORE: |
1878 | case AMDGPU::SI_SPILL_V512_RESTORE: |
1879 | case AMDGPU::SI_SPILL_V1024_RESTORE: |
1880 | case AMDGPU::SI_SPILL_A32_RESTORE: |
1881 | case AMDGPU::SI_SPILL_A64_RESTORE: |
1882 | case AMDGPU::SI_SPILL_A96_RESTORE: |
1883 | case AMDGPU::SI_SPILL_A128_RESTORE: |
1884 | case AMDGPU::SI_SPILL_A160_RESTORE: |
1885 | case AMDGPU::SI_SPILL_A192_RESTORE: |
1886 | case AMDGPU::SI_SPILL_A224_RESTORE: |
1887 | case AMDGPU::SI_SPILL_A256_RESTORE: |
1888 | case AMDGPU::SI_SPILL_A512_RESTORE: |
1889 | case AMDGPU::SI_SPILL_A1024_RESTORE: |
1890 | case AMDGPU::SI_SPILL_AV32_RESTORE: |
1891 | case AMDGPU::SI_SPILL_AV64_RESTORE: |
1892 | case AMDGPU::SI_SPILL_AV96_RESTORE: |
1893 | case AMDGPU::SI_SPILL_AV128_RESTORE: |
1894 | case AMDGPU::SI_SPILL_AV160_RESTORE: |
1895 | case AMDGPU::SI_SPILL_AV192_RESTORE: |
1896 | case AMDGPU::SI_SPILL_AV224_RESTORE: |
1897 | case AMDGPU::SI_SPILL_AV256_RESTORE: |
1898 | case AMDGPU::SI_SPILL_AV512_RESTORE: |
1899 | case AMDGPU::SI_SPILL_AV1024_RESTORE: { |
1900 | const MachineOperand *VData = TII->getNamedOperand(*MI, |
1901 | AMDGPU::OpName::vdata); |
1902 | assert(TII->getNamedOperand(*MI, AMDGPU::OpName::soffset)->getReg() ==(static_cast <bool> (TII->getNamedOperand(*MI, AMDGPU ::OpName::soffset)->getReg() == MFI->getStackPtrOffsetReg ()) ? void (0) : __assert_fail ("TII->getNamedOperand(*MI, AMDGPU::OpName::soffset)->getReg() == MFI->getStackPtrOffsetReg()" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 1903, __extension__ __PRETTY_FUNCTION__)) |
1903 | MFI->getStackPtrOffsetReg())(static_cast <bool> (TII->getNamedOperand(*MI, AMDGPU ::OpName::soffset)->getReg() == MFI->getStackPtrOffsetReg ()) ? void (0) : __assert_fail ("TII->getNamedOperand(*MI, AMDGPU::OpName::soffset)->getReg() == MFI->getStackPtrOffsetReg()" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 1903, __extension__ __PRETTY_FUNCTION__)); |
1904 | |
1905 | unsigned Opc = ST.enableFlatScratch() ? AMDGPU::SCRATCH_LOAD_DWORD_SADDR |
1906 | : AMDGPU::BUFFER_LOAD_DWORD_OFFSET; |
1907 | auto *MBB = MI->getParent(); |
1908 | buildSpillLoadStore( |
1909 | *MBB, MI, DL, Opc, Index, VData->getReg(), VData->isKill(), FrameReg, |
1910 | TII->getNamedOperand(*MI, AMDGPU::OpName::offset)->getImm(), |
1911 | *MI->memoperands_begin(), RS); |
1912 | MI->eraseFromParent(); |
1913 | break; |
1914 | } |
1915 | |
1916 | default: { |
1917 | // Other access to frame index |
1918 | const DebugLoc &DL = MI->getDebugLoc(); |
1919 | |
1920 | int64_t Offset = FrameInfo.getObjectOffset(Index); |
1921 | if (ST.enableFlatScratch()) { |
1922 | if (TII->isFLATScratch(*MI)) { |
1923 | assert((int16_t)FIOperandNum ==(static_cast <bool> ((int16_t)FIOperandNum == AMDGPU::getNamedOperandIdx (MI->getOpcode(), AMDGPU::OpName::saddr)) ? void (0) : __assert_fail ("(int16_t)FIOperandNum == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::saddr)" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 1925, __extension__ __PRETTY_FUNCTION__)) |
1924 | AMDGPU::getNamedOperandIdx(MI->getOpcode(),(static_cast <bool> ((int16_t)FIOperandNum == AMDGPU::getNamedOperandIdx (MI->getOpcode(), AMDGPU::OpName::saddr)) ? void (0) : __assert_fail ("(int16_t)FIOperandNum == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::saddr)" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 1925, __extension__ __PRETTY_FUNCTION__)) |
1925 | AMDGPU::OpName::saddr))(static_cast <bool> ((int16_t)FIOperandNum == AMDGPU::getNamedOperandIdx (MI->getOpcode(), AMDGPU::OpName::saddr)) ? void (0) : __assert_fail ("(int16_t)FIOperandNum == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::saddr)" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 1925, __extension__ __PRETTY_FUNCTION__)); |
1926 | |
1927 | // The offset is always swizzled, just replace it |
1928 | if (FrameReg) |
1929 | FIOp.ChangeToRegister(FrameReg, false); |
1930 | |
1931 | if (!Offset) |
1932 | return; |
1933 | |
1934 | MachineOperand *OffsetOp = |
1935 | TII->getNamedOperand(*MI, AMDGPU::OpName::offset); |
1936 | int64_t NewOffset = Offset + OffsetOp->getImm(); |
1937 | if (TII->isLegalFLATOffset(NewOffset, AMDGPUAS::PRIVATE_ADDRESS, |
1938 | SIInstrFlags::FlatScratch)) { |
1939 | OffsetOp->setImm(NewOffset); |
1940 | if (FrameReg) |
1941 | return; |
1942 | Offset = 0; |
1943 | } |
1944 | |
1945 | assert(!TII->getNamedOperand(*MI, AMDGPU::OpName::vaddr) &&(static_cast <bool> (!TII->getNamedOperand(*MI, AMDGPU ::OpName::vaddr) && "Unexpected vaddr for flat scratch with a FI operand" ) ? void (0) : __assert_fail ("!TII->getNamedOperand(*MI, AMDGPU::OpName::vaddr) && \"Unexpected vaddr for flat scratch with a FI operand\"" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 1946, __extension__ __PRETTY_FUNCTION__)) |
1946 | "Unexpected vaddr for flat scratch with a FI operand")(static_cast <bool> (!TII->getNamedOperand(*MI, AMDGPU ::OpName::vaddr) && "Unexpected vaddr for flat scratch with a FI operand" ) ? void (0) : __assert_fail ("!TII->getNamedOperand(*MI, AMDGPU::OpName::vaddr) && \"Unexpected vaddr for flat scratch with a FI operand\"" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 1946, __extension__ __PRETTY_FUNCTION__)); |
1947 | |
1948 | // On GFX10 we have ST mode to use no registers for an address. |
1949 | // Otherwise we need to materialize 0 into an SGPR. |
1950 | if (!Offset && ST.hasFlatScratchSTMode()) { |
1951 | unsigned Opc = MI->getOpcode(); |
1952 | unsigned NewOpc = AMDGPU::getFlatScratchInstSTfromSS(Opc); |
1953 | MI->RemoveOperand( |
1954 | AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::saddr)); |
1955 | MI->setDesc(TII->get(NewOpc)); |
1956 | return; |
1957 | } |
1958 | } |
1959 | |
1960 | if (!FrameReg) { |
1961 | FIOp.ChangeToImmediate(Offset); |
1962 | if (TII->isImmOperandLegal(*MI, FIOperandNum, FIOp)) |
1963 | return; |
1964 | } |
1965 | |
1966 | // We need to use register here. Check if we can use an SGPR or need |
1967 | // a VGPR. |
1968 | FIOp.ChangeToRegister(AMDGPU::M0, false); |
1969 | bool UseSGPR = TII->isOperandLegal(*MI, FIOperandNum, &FIOp); |
1970 | |
1971 | if (!Offset && FrameReg && UseSGPR) { |
1972 | FIOp.setReg(FrameReg); |
1973 | return; |
1974 | } |
1975 | |
1976 | const TargetRegisterClass *RC = UseSGPR ? &AMDGPU::SReg_32_XM0RegClass |
1977 | : &AMDGPU::VGPR_32RegClass; |
1978 | |
1979 | Register TmpReg = RS->scavengeRegister(RC, MI, 0, !UseSGPR); |
1980 | FIOp.setReg(TmpReg); |
1981 | FIOp.setIsKill(true); |
1982 | |
1983 | if ((!FrameReg || !Offset) && TmpReg) { |
1984 | unsigned Opc = UseSGPR ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32; |
1985 | auto MIB = BuildMI(*MBB, MI, DL, TII->get(Opc), TmpReg); |
1986 | if (FrameReg) |
1987 | MIB.addReg(FrameReg); |
1988 | else |
1989 | MIB.addImm(Offset); |
1990 | |
1991 | return; |
1992 | } |
1993 | |
1994 | Register TmpSReg = |
1995 | UseSGPR ? TmpReg |
1996 | : RS->scavengeRegister(&AMDGPU::SReg_32_XM0RegClass, MI, 0, |
1997 | !UseSGPR); |
1998 | |
1999 | // TODO: for flat scratch another attempt can be made with a VGPR index |
2000 | // if no SGPRs can be scavenged. |
2001 | if ((!TmpSReg && !FrameReg) || (!TmpReg && !UseSGPR)) |
2002 | report_fatal_error("Cannot scavenge register in FI elimination!"); |
2003 | |
2004 | if (!TmpSReg) { |
2005 | // Use frame register and restore it after. |
2006 | TmpSReg = FrameReg; |
2007 | FIOp.setReg(FrameReg); |
2008 | FIOp.setIsKill(false); |
2009 | } |
2010 | |
2011 | BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_ADD_I32), TmpSReg) |
2012 | .addReg(FrameReg) |
2013 | .addImm(Offset); |
2014 | |
2015 | if (!UseSGPR) |
2016 | BuildMI(*MBB, MI, DL, TII->get(AMDGPU::V_MOV_B32_e32), TmpReg) |
2017 | .addReg(TmpSReg, RegState::Kill); |
2018 | |
2019 | if (TmpSReg == FrameReg) { |
2020 | // Undo frame register modification. |
2021 | BuildMI(*MBB, std::next(MI), DL, TII->get(AMDGPU::S_ADD_I32), |
2022 | FrameReg) |
2023 | .addReg(FrameReg) |
2024 | .addImm(-Offset); |
2025 | } |
2026 | |
2027 | return; |
2028 | } |
2029 | |
2030 | bool IsMUBUF = TII->isMUBUF(*MI); |
2031 | |
2032 | if (!IsMUBUF && !MFI->isEntryFunction()) { |
2033 | // Convert to a swizzled stack address by scaling by the wave size. |
2034 | // |
2035 | // In an entry function/kernel the offset is already swizzled. |
2036 | |
2037 | bool IsCopy = MI->getOpcode() == AMDGPU::V_MOV_B32_e32; |
2038 | Register ResultReg = |
2039 | IsCopy ? MI->getOperand(0).getReg() |
2040 | : RS->scavengeRegister(&AMDGPU::VGPR_32RegClass, MI, 0); |
2041 | |
2042 | int64_t Offset = FrameInfo.getObjectOffset(Index); |
2043 | if (Offset == 0) { |
2044 | // XXX - This never happens because of emergency scavenging slot at 0? |
2045 | BuildMI(*MBB, MI, DL, TII->get(AMDGPU::V_LSHRREV_B32_e64), ResultReg) |
2046 | .addImm(ST.getWavefrontSizeLog2()) |
2047 | .addReg(FrameReg); |
2048 | } else { |
2049 | if (auto MIB = TII->getAddNoCarry(*MBB, MI, DL, ResultReg, *RS)) { |
2050 | // Reuse ResultReg in intermediate step. |
2051 | Register ScaledReg = ResultReg; |
2052 | |
2053 | BuildMI(*MBB, *MIB, DL, TII->get(AMDGPU::V_LSHRREV_B32_e64), |
2054 | ScaledReg) |
2055 | .addImm(ST.getWavefrontSizeLog2()) |
2056 | .addReg(FrameReg); |
2057 | |
2058 | const bool IsVOP2 = MIB->getOpcode() == AMDGPU::V_ADD_U32_e32; |
2059 | |
2060 | // TODO: Fold if use instruction is another add of a constant. |
2061 | if (IsVOP2 || AMDGPU::isInlinableLiteral32(Offset, ST.hasInv2PiInlineImm())) { |
2062 | // FIXME: This can fail |
2063 | MIB.addImm(Offset); |
2064 | MIB.addReg(ScaledReg, RegState::Kill); |
2065 | if (!IsVOP2) |
2066 | MIB.addImm(0); // clamp bit |
2067 | } else { |
2068 | assert(MIB->getOpcode() == AMDGPU::V_ADD_CO_U32_e64 &&(static_cast <bool> (MIB->getOpcode() == AMDGPU::V_ADD_CO_U32_e64 && "Need to reuse carry out register") ? void (0) : __assert_fail ("MIB->getOpcode() == AMDGPU::V_ADD_CO_U32_e64 && \"Need to reuse carry out register\"" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 2069, __extension__ __PRETTY_FUNCTION__)) |
2069 | "Need to reuse carry out register")(static_cast <bool> (MIB->getOpcode() == AMDGPU::V_ADD_CO_U32_e64 && "Need to reuse carry out register") ? void (0) : __assert_fail ("MIB->getOpcode() == AMDGPU::V_ADD_CO_U32_e64 && \"Need to reuse carry out register\"" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 2069, __extension__ __PRETTY_FUNCTION__)); |
2070 | |
2071 | // Use scavenged unused carry out as offset register. |
2072 | Register ConstOffsetReg; |
2073 | if (!isWave32) |
2074 | ConstOffsetReg = getSubReg(MIB.getReg(1), AMDGPU::sub0); |
2075 | else |
2076 | ConstOffsetReg = MIB.getReg(1); |
2077 | |
2078 | BuildMI(*MBB, *MIB, DL, TII->get(AMDGPU::S_MOV_B32), ConstOffsetReg) |
2079 | .addImm(Offset); |
2080 | MIB.addReg(ConstOffsetReg, RegState::Kill); |
2081 | MIB.addReg(ScaledReg, RegState::Kill); |
2082 | MIB.addImm(0); // clamp bit |
2083 | } |
2084 | } else { |
2085 | // We have to produce a carry out, and there isn't a free SGPR pair |
2086 | // for it. We can keep the whole computation on the SALU to avoid |
2087 | // clobbering an additional register at the cost of an extra mov. |
2088 | |
2089 | // We may have 1 free scratch SGPR even though a carry out is |
2090 | // unavailable. Only one additional mov is needed. |
2091 | Register TmpScaledReg = |
2092 | RS->scavengeRegister(&AMDGPU::SReg_32_XM0RegClass, MI, 0, false); |
2093 | Register ScaledReg = TmpScaledReg.isValid() ? TmpScaledReg : FrameReg; |
2094 | |
2095 | BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_LSHR_B32), ScaledReg) |
2096 | .addReg(FrameReg) |
2097 | .addImm(ST.getWavefrontSizeLog2()); |
2098 | BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_ADD_I32), ScaledReg) |
2099 | .addReg(ScaledReg, RegState::Kill) |
2100 | .addImm(Offset); |
2101 | BuildMI(*MBB, MI, DL, TII->get(AMDGPU::COPY), ResultReg) |
2102 | .addReg(ScaledReg, RegState::Kill); |
2103 | |
2104 | // If there were truly no free SGPRs, we need to undo everything. |
2105 | if (!TmpScaledReg.isValid()) { |
2106 | BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_ADD_I32), ScaledReg) |
2107 | .addReg(ScaledReg, RegState::Kill) |
2108 | .addImm(-Offset); |
2109 | BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_LSHL_B32), ScaledReg) |
2110 | .addReg(FrameReg) |
2111 | .addImm(ST.getWavefrontSizeLog2()); |
2112 | } |
2113 | } |
2114 | } |
2115 | |
2116 | // Don't introduce an extra copy if we're just materializing in a mov. |
2117 | if (IsCopy) |
2118 | MI->eraseFromParent(); |
2119 | else |
2120 | FIOp.ChangeToRegister(ResultReg, false, false, true); |
2121 | return; |
2122 | } |
2123 | |
2124 | if (IsMUBUF) { |
2125 | // Disable offen so we don't need a 0 vgpr base. |
2126 | assert(static_cast<int>(FIOperandNum) ==(static_cast <bool> (static_cast<int>(FIOperandNum ) == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName ::vaddr)) ? void (0) : __assert_fail ("static_cast<int>(FIOperandNum) == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::vaddr)" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 2128, __extension__ __PRETTY_FUNCTION__)) |
2127 | AMDGPU::getNamedOperandIdx(MI->getOpcode(),(static_cast <bool> (static_cast<int>(FIOperandNum ) == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName ::vaddr)) ? void (0) : __assert_fail ("static_cast<int>(FIOperandNum) == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::vaddr)" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 2128, __extension__ __PRETTY_FUNCTION__)) |
2128 | AMDGPU::OpName::vaddr))(static_cast <bool> (static_cast<int>(FIOperandNum ) == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName ::vaddr)) ? void (0) : __assert_fail ("static_cast<int>(FIOperandNum) == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::vaddr)" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 2128, __extension__ __PRETTY_FUNCTION__)); |
2129 | |
2130 | auto &SOffset = *TII->getNamedOperand(*MI, AMDGPU::OpName::soffset); |
2131 | assert((SOffset.isImm() && SOffset.getImm() == 0))(static_cast <bool> ((SOffset.isImm() && SOffset .getImm() == 0)) ? void (0) : __assert_fail ("(SOffset.isImm() && SOffset.getImm() == 0)" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 2131, __extension__ __PRETTY_FUNCTION__)); |
2132 | |
2133 | if (FrameReg != AMDGPU::NoRegister) |
2134 | SOffset.ChangeToRegister(FrameReg, false); |
2135 | |
2136 | int64_t Offset = FrameInfo.getObjectOffset(Index); |
2137 | int64_t OldImm |
2138 | = TII->getNamedOperand(*MI, AMDGPU::OpName::offset)->getImm(); |
2139 | int64_t NewOffset = OldImm + Offset; |
2140 | |
2141 | if (SIInstrInfo::isLegalMUBUFImmOffset(NewOffset) && |
2142 | buildMUBUFOffsetLoadStore(ST, FrameInfo, MI, Index, NewOffset)) { |
2143 | MI->eraseFromParent(); |
2144 | return; |
2145 | } |
2146 | } |
2147 | |
2148 | // If the offset is simply too big, don't convert to a scratch wave offset |
2149 | // relative index. |
2150 | |
2151 | FIOp.ChangeToImmediate(Offset); |
2152 | if (!TII->isImmOperandLegal(*MI, FIOperandNum, FIOp)) { |
2153 | Register TmpReg = RS->scavengeRegister(&AMDGPU::VGPR_32RegClass, MI, 0); |
2154 | BuildMI(*MBB, MI, DL, TII->get(AMDGPU::V_MOV_B32_e32), TmpReg) |
2155 | .addImm(Offset); |
2156 | FIOp.ChangeToRegister(TmpReg, false, false, true); |
2157 | } |
2158 | } |
2159 | } |
2160 | } |
2161 | |
2162 | StringRef SIRegisterInfo::getRegAsmName(MCRegister Reg) const { |
2163 | return AMDGPUInstPrinter::getRegisterName(Reg); |
2164 | } |
2165 | |
2166 | static const TargetRegisterClass * |
2167 | getAnyVGPRClassForBitWidth(unsigned BitWidth) { |
2168 | if (BitWidth <= 64) |
2169 | return &AMDGPU::VReg_64RegClass; |
2170 | if (BitWidth <= 96) |
2171 | return &AMDGPU::VReg_96RegClass; |
2172 | if (BitWidth <= 128) |
2173 | return &AMDGPU::VReg_128RegClass; |
2174 | if (BitWidth <= 160) |
2175 | return &AMDGPU::VReg_160RegClass; |
2176 | if (BitWidth <= 192) |
2177 | return &AMDGPU::VReg_192RegClass; |
2178 | if (BitWidth <= 224) |
2179 | return &AMDGPU::VReg_224RegClass; |
2180 | if (BitWidth <= 256) |
2181 | return &AMDGPU::VReg_256RegClass; |
2182 | if (BitWidth <= 512) |
2183 | return &AMDGPU::VReg_512RegClass; |
2184 | if (BitWidth <= 1024) |
2185 | return &AMDGPU::VReg_1024RegClass; |
2186 | |
2187 | return nullptr; |
2188 | } |
2189 | |
2190 | static const TargetRegisterClass * |
2191 | getAlignedVGPRClassForBitWidth(unsigned BitWidth) { |
2192 | if (BitWidth <= 64) |
2193 | return &AMDGPU::VReg_64_Align2RegClass; |
2194 | if (BitWidth <= 96) |
2195 | return &AMDGPU::VReg_96_Align2RegClass; |
2196 | if (BitWidth <= 128) |
2197 | return &AMDGPU::VReg_128_Align2RegClass; |
2198 | if (BitWidth <= 160) |
2199 | return &AMDGPU::VReg_160_Align2RegClass; |
2200 | if (BitWidth <= 192) |
2201 | return &AMDGPU::VReg_192_Align2RegClass; |
2202 | if (BitWidth <= 224) |
2203 | return &AMDGPU::VReg_224_Align2RegClass; |
2204 | if (BitWidth <= 256) |
2205 | return &AMDGPU::VReg_256_Align2RegClass; |
2206 | if (BitWidth <= 512) |
2207 | return &AMDGPU::VReg_512_Align2RegClass; |
2208 | if (BitWidth <= 1024) |
2209 | return &AMDGPU::VReg_1024_Align2RegClass; |
2210 | |
2211 | return nullptr; |
2212 | } |
2213 | |
2214 | const TargetRegisterClass * |
2215 | SIRegisterInfo::getVGPRClassForBitWidth(unsigned BitWidth) const { |
2216 | if (BitWidth == 1) |
2217 | return &AMDGPU::VReg_1RegClass; |
2218 | if (BitWidth <= 16) |
2219 | return &AMDGPU::VGPR_LO16RegClass; |
2220 | if (BitWidth <= 32) |
2221 | return &AMDGPU::VGPR_32RegClass; |
2222 | return ST.needsAlignedVGPRs() ? getAlignedVGPRClassForBitWidth(BitWidth) |
2223 | : getAnyVGPRClassForBitWidth(BitWidth); |
2224 | } |
2225 | |
2226 | static const TargetRegisterClass * |
2227 | getAnyAGPRClassForBitWidth(unsigned BitWidth) { |
2228 | if (BitWidth <= 64) |
2229 | return &AMDGPU::AReg_64RegClass; |
2230 | if (BitWidth <= 96) |
2231 | return &AMDGPU::AReg_96RegClass; |
2232 | if (BitWidth <= 128) |
2233 | return &AMDGPU::AReg_128RegClass; |
2234 | if (BitWidth <= 160) |
2235 | return &AMDGPU::AReg_160RegClass; |
2236 | if (BitWidth <= 192) |
2237 | return &AMDGPU::AReg_192RegClass; |
2238 | if (BitWidth <= 224) |
2239 | return &AMDGPU::AReg_224RegClass; |
2240 | if (BitWidth <= 256) |
2241 | return &AMDGPU::AReg_256RegClass; |
2242 | if (BitWidth <= 512) |
2243 | return &AMDGPU::AReg_512RegClass; |
2244 | if (BitWidth <= 1024) |
2245 | return &AMDGPU::AReg_1024RegClass; |
2246 | |
2247 | return nullptr; |
2248 | } |
2249 | |
2250 | static const TargetRegisterClass * |
2251 | getAlignedAGPRClassForBitWidth(unsigned BitWidth) { |
2252 | if (BitWidth <= 64) |
2253 | return &AMDGPU::AReg_64_Align2RegClass; |
2254 | if (BitWidth <= 96) |
2255 | return &AMDGPU::AReg_96_Align2RegClass; |
2256 | if (BitWidth <= 128) |
2257 | return &AMDGPU::AReg_128_Align2RegClass; |
2258 | if (BitWidth <= 160) |
2259 | return &AMDGPU::AReg_160_Align2RegClass; |
2260 | if (BitWidth <= 192) |
2261 | return &AMDGPU::AReg_192_Align2RegClass; |
2262 | if (BitWidth <= 224) |
2263 | return &AMDGPU::AReg_224_Align2RegClass; |
2264 | if (BitWidth <= 256) |
2265 | return &AMDGPU::AReg_256_Align2RegClass; |
2266 | if (BitWidth <= 512) |
2267 | return &AMDGPU::AReg_512_Align2RegClass; |
2268 | if (BitWidth <= 1024) |
2269 | return &AMDGPU::AReg_1024_Align2RegClass; |
2270 | |
2271 | return nullptr; |
2272 | } |
2273 | |
2274 | const TargetRegisterClass * |
2275 | SIRegisterInfo::getAGPRClassForBitWidth(unsigned BitWidth) const { |
2276 | if (BitWidth <= 16) |
2277 | return &AMDGPU::AGPR_LO16RegClass; |
2278 | if (BitWidth <= 32) |
2279 | return &AMDGPU::AGPR_32RegClass; |
2280 | return ST.needsAlignedVGPRs() ? getAlignedAGPRClassForBitWidth(BitWidth) |
2281 | : getAnyAGPRClassForBitWidth(BitWidth); |
2282 | } |
2283 | |
2284 | static const TargetRegisterClass * |
2285 | getAnyVectorSuperClassForBitWidth(unsigned BitWidth) { |
2286 | if (BitWidth <= 64) |
2287 | return &AMDGPU::AV_64RegClass; |
2288 | if (BitWidth <= 96) |
2289 | return &AMDGPU::AV_96RegClass; |
2290 | if (BitWidth <= 128) |
2291 | return &AMDGPU::AV_128RegClass; |
2292 | if (BitWidth <= 160) |
2293 | return &AMDGPU::AV_160RegClass; |
2294 | if (BitWidth <= 192) |
2295 | return &AMDGPU::AV_192RegClass; |
2296 | if (BitWidth <= 224) |
2297 | return &AMDGPU::AV_224RegClass; |
2298 | if (BitWidth <= 256) |
2299 | return &AMDGPU::AV_256RegClass; |
2300 | if (BitWidth <= 512) |
2301 | return &AMDGPU::AV_512RegClass; |
2302 | if (BitWidth <= 1024) |
2303 | return &AMDGPU::AV_1024RegClass; |
2304 | |
2305 | return nullptr; |
2306 | } |
2307 | |
2308 | static const TargetRegisterClass * |
2309 | getAlignedVectorSuperClassForBitWidth(unsigned BitWidth) { |
2310 | if (BitWidth <= 64) |
2311 | return &AMDGPU::AV_64_Align2RegClass; |
2312 | if (BitWidth <= 96) |
2313 | return &AMDGPU::AV_96_Align2RegClass; |
2314 | if (BitWidth <= 128) |
2315 | return &AMDGPU::AV_128_Align2RegClass; |
2316 | if (BitWidth <= 160) |
2317 | return &AMDGPU::AV_160_Align2RegClass; |
2318 | if (BitWidth <= 192) |
2319 | return &AMDGPU::AV_192_Align2RegClass; |
2320 | if (BitWidth <= 224) |
2321 | return &AMDGPU::AV_224_Align2RegClass; |
2322 | if (BitWidth <= 256) |
2323 | return &AMDGPU::AV_256_Align2RegClass; |
2324 | if (BitWidth <= 512) |
2325 | return &AMDGPU::AV_512_Align2RegClass; |
2326 | if (BitWidth <= 1024) |
2327 | return &AMDGPU::AV_1024_Align2RegClass; |
2328 | |
2329 | return nullptr; |
2330 | } |
2331 | |
2332 | const TargetRegisterClass * |
2333 | SIRegisterInfo::getVectorSuperClassForBitWidth(unsigned BitWidth) const { |
2334 | if (BitWidth <= 16) |
2335 | return &AMDGPU::VGPR_LO16RegClass; |
2336 | if (BitWidth <= 32) |
2337 | return &AMDGPU::AV_32RegClass; |
2338 | return ST.needsAlignedVGPRs() |
2339 | ? getAlignedVectorSuperClassForBitWidth(BitWidth) |
2340 | : getAnyVectorSuperClassForBitWidth(BitWidth); |
2341 | } |
2342 | |
2343 | const TargetRegisterClass * |
2344 | SIRegisterInfo::getSGPRClassForBitWidth(unsigned BitWidth) { |
2345 | if (BitWidth <= 16) |
2346 | return &AMDGPU::SGPR_LO16RegClass; |
2347 | if (BitWidth <= 32) |
2348 | return &AMDGPU::SReg_32RegClass; |
2349 | if (BitWidth <= 64) |
2350 | return &AMDGPU::SReg_64RegClass; |
2351 | if (BitWidth <= 96) |
2352 | return &AMDGPU::SGPR_96RegClass; |
2353 | if (BitWidth <= 128) |
2354 | return &AMDGPU::SGPR_128RegClass; |
2355 | if (BitWidth <= 160) |
2356 | return &AMDGPU::SGPR_160RegClass; |
2357 | if (BitWidth <= 192) |
2358 | return &AMDGPU::SGPR_192RegClass; |
2359 | if (BitWidth <= 224) |
2360 | return &AMDGPU::SGPR_224RegClass; |
2361 | if (BitWidth <= 256) |
2362 | return &AMDGPU::SGPR_256RegClass; |
2363 | if (BitWidth <= 512) |
2364 | return &AMDGPU::SGPR_512RegClass; |
2365 | if (BitWidth <= 1024) |
2366 | return &AMDGPU::SGPR_1024RegClass; |
2367 | |
2368 | return nullptr; |
2369 | } |
2370 | |
2371 | // FIXME: This is very slow. It might be worth creating a map from physreg to |
2372 | // register class. |
2373 | const TargetRegisterClass * |
2374 | SIRegisterInfo::getPhysRegClass(MCRegister Reg) const { |
2375 | static const TargetRegisterClass *const BaseClasses[] = { |
2376 | &AMDGPU::VGPR_LO16RegClass, |
2377 | &AMDGPU::VGPR_HI16RegClass, |
2378 | &AMDGPU::SReg_LO16RegClass, |
2379 | &AMDGPU::AGPR_LO16RegClass, |
2380 | &AMDGPU::VGPR_32RegClass, |
2381 | &AMDGPU::SReg_32RegClass, |
2382 | &AMDGPU::AGPR_32RegClass, |
2383 | &AMDGPU::AGPR_32RegClass, |
2384 | &AMDGPU::VReg_64_Align2RegClass, |
2385 | &AMDGPU::VReg_64RegClass, |
2386 | &AMDGPU::SReg_64RegClass, |
2387 | &AMDGPU::AReg_64_Align2RegClass, |
2388 | &AMDGPU::AReg_64RegClass, |
2389 | &AMDGPU::VReg_96_Align2RegClass, |
2390 | &AMDGPU::VReg_96RegClass, |
2391 | &AMDGPU::SReg_96RegClass, |
2392 | &AMDGPU::AReg_96_Align2RegClass, |
2393 | &AMDGPU::AReg_96RegClass, |
2394 | &AMDGPU::VReg_128_Align2RegClass, |
2395 | &AMDGPU::VReg_128RegClass, |
2396 | &AMDGPU::SReg_128RegClass, |
2397 | &AMDGPU::AReg_128_Align2RegClass, |
2398 | &AMDGPU::AReg_128RegClass, |
2399 | &AMDGPU::VReg_160_Align2RegClass, |
2400 | &AMDGPU::VReg_160RegClass, |
2401 | &AMDGPU::SReg_160RegClass, |
2402 | &AMDGPU::AReg_160_Align2RegClass, |
2403 | &AMDGPU::AReg_160RegClass, |
2404 | &AMDGPU::VReg_192_Align2RegClass, |
2405 | &AMDGPU::VReg_192RegClass, |
2406 | &AMDGPU::SReg_192RegClass, |
2407 | &AMDGPU::AReg_192_Align2RegClass, |
2408 | &AMDGPU::AReg_192RegClass, |
2409 | &AMDGPU::VReg_224_Align2RegClass, |
2410 | &AMDGPU::VReg_224RegClass, |
2411 | &AMDGPU::SReg_224RegClass, |
2412 | &AMDGPU::AReg_224_Align2RegClass, |
2413 | &AMDGPU::AReg_224RegClass, |
2414 | &AMDGPU::VReg_256_Align2RegClass, |
2415 | &AMDGPU::VReg_256RegClass, |
2416 | &AMDGPU::SReg_256RegClass, |
2417 | &AMDGPU::AReg_256_Align2RegClass, |
2418 | &AMDGPU::AReg_256RegClass, |
2419 | &AMDGPU::VReg_512_Align2RegClass, |
2420 | &AMDGPU::VReg_512RegClass, |
2421 | &AMDGPU::SReg_512RegClass, |
2422 | &AMDGPU::AReg_512_Align2RegClass, |
2423 | &AMDGPU::AReg_512RegClass, |
2424 | &AMDGPU::SReg_1024RegClass, |
2425 | &AMDGPU::VReg_1024_Align2RegClass, |
2426 | &AMDGPU::VReg_1024RegClass, |
2427 | &AMDGPU::AReg_1024_Align2RegClass, |
2428 | &AMDGPU::AReg_1024RegClass, |
2429 | &AMDGPU::SCC_CLASSRegClass, |
2430 | &AMDGPU::Pseudo_SReg_32RegClass, |
2431 | &AMDGPU::Pseudo_SReg_128RegClass, |
2432 | }; |
2433 | |
2434 | for (const TargetRegisterClass *BaseClass : BaseClasses) { |
2435 | if (BaseClass->contains(Reg)) { |
2436 | return BaseClass; |
2437 | } |
2438 | } |
2439 | return nullptr; |
2440 | } |
2441 | |
2442 | bool SIRegisterInfo::isSGPRReg(const MachineRegisterInfo &MRI, |
2443 | Register Reg) const { |
2444 | const TargetRegisterClass *RC; |
2445 | if (Reg.isVirtual()) |
2446 | RC = MRI.getRegClass(Reg); |
2447 | else |
2448 | RC = getPhysRegClass(Reg); |
2449 | return isSGPRClass(RC); |
2450 | } |
2451 | |
2452 | const TargetRegisterClass * |
2453 | SIRegisterInfo::getEquivalentVGPRClass(const TargetRegisterClass *SRC) const { |
2454 | unsigned Size = getRegSizeInBits(*SRC); |
2455 | const TargetRegisterClass *VRC = getVGPRClassForBitWidth(Size); |
2456 | assert(VRC && "Invalid register class size")(static_cast <bool> (VRC && "Invalid register class size" ) ? void (0) : __assert_fail ("VRC && \"Invalid register class size\"" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 2456, __extension__ __PRETTY_FUNCTION__)); |
2457 | return VRC; |
2458 | } |
2459 | |
2460 | const TargetRegisterClass * |
2461 | SIRegisterInfo::getEquivalentAGPRClass(const TargetRegisterClass *SRC) const { |
2462 | unsigned Size = getRegSizeInBits(*SRC); |
2463 | const TargetRegisterClass *ARC = getAGPRClassForBitWidth(Size); |
2464 | assert(ARC && "Invalid register class size")(static_cast <bool> (ARC && "Invalid register class size" ) ? void (0) : __assert_fail ("ARC && \"Invalid register class size\"" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 2464, __extension__ __PRETTY_FUNCTION__)); |
2465 | return ARC; |
2466 | } |
2467 | |
2468 | const TargetRegisterClass * |
2469 | SIRegisterInfo::getEquivalentSGPRClass(const TargetRegisterClass *VRC) const { |
2470 | unsigned Size = getRegSizeInBits(*VRC); |
2471 | if (Size == 32) |
2472 | return &AMDGPU::SGPR_32RegClass; |
2473 | const TargetRegisterClass *SRC = getSGPRClassForBitWidth(Size); |
2474 | assert(SRC && "Invalid register class size")(static_cast <bool> (SRC && "Invalid register class size" ) ? void (0) : __assert_fail ("SRC && \"Invalid register class size\"" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 2474, __extension__ __PRETTY_FUNCTION__)); |
2475 | return SRC; |
2476 | } |
2477 | |
2478 | const TargetRegisterClass *SIRegisterInfo::getSubRegClass( |
2479 | const TargetRegisterClass *RC, unsigned SubIdx) const { |
2480 | if (SubIdx == AMDGPU::NoSubRegister) |
2481 | return RC; |
2482 | |
2483 | // We can assume that each lane corresponds to one 32-bit register. |
2484 | unsigned Size = getNumChannelsFromSubReg(SubIdx) * 32; |
2485 | if (isAGPRClass(RC)) { |
2486 | RC = getAGPRClassForBitWidth(Size); |
2487 | } else if (isVGPRClass(RC)) { |
2488 | RC = getVGPRClassForBitWidth(Size); |
2489 | } else if (isVectorSuperClass(RC)) { |
2490 | RC = getVectorSuperClassForBitWidth(Size); |
2491 | } else { |
2492 | RC = getSGPRClassForBitWidth(Size); |
2493 | } |
2494 | assert(RC && "Invalid sub-register class size")(static_cast <bool> (RC && "Invalid sub-register class size" ) ? void (0) : __assert_fail ("RC && \"Invalid sub-register class size\"" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 2494, __extension__ __PRETTY_FUNCTION__)); |
2495 | return RC; |
2496 | } |
2497 | |
2498 | const TargetRegisterClass * |
2499 | SIRegisterInfo::getCompatibleSubRegClass(const TargetRegisterClass *SuperRC, |
2500 | const TargetRegisterClass *SubRC, |
2501 | unsigned SubIdx) const { |
2502 | // Ensure this subregister index is aligned in the super register. |
2503 | const TargetRegisterClass *MatchRC = |
2504 | getMatchingSuperRegClass(SuperRC, SubRC, SubIdx); |
2505 | return MatchRC && MatchRC->hasSubClassEq(SuperRC) ? MatchRC : nullptr; |
2506 | } |
2507 | |
2508 | bool SIRegisterInfo::opCanUseInlineConstant(unsigned OpType) const { |
2509 | if (OpType >= AMDGPU::OPERAND_REG_INLINE_AC_FIRST && |
2510 | OpType <= AMDGPU::OPERAND_REG_INLINE_AC_LAST) |
2511 | return !ST.hasMFMAInlineLiteralBug(); |
2512 | |
2513 | return OpType >= AMDGPU::OPERAND_SRC_FIRST && |
2514 | OpType <= AMDGPU::OPERAND_SRC_LAST; |
2515 | } |
2516 | |
2517 | bool SIRegisterInfo::shouldRewriteCopySrc( |
2518 | const TargetRegisterClass *DefRC, |
2519 | unsigned DefSubReg, |
2520 | const TargetRegisterClass *SrcRC, |
2521 | unsigned SrcSubReg) const { |
2522 | // We want to prefer the smallest register class possible, so we don't want to |
2523 | // stop and rewrite on anything that looks like a subregister |
2524 | // extract. Operations mostly don't care about the super register class, so we |
2525 | // only want to stop on the most basic of copies between the same register |
2526 | // class. |
2527 | // |
2528 | // e.g. if we have something like |
2529 | // %0 = ... |
2530 | // %1 = ... |
2531 | // %2 = REG_SEQUENCE %0, sub0, %1, sub1, %2, sub2 |
2532 | // %3 = COPY %2, sub0 |
2533 | // |
2534 | // We want to look through the COPY to find: |
2535 | // => %3 = COPY %0 |
2536 | |
2537 | // Plain copy. |
2538 | return getCommonSubClass(DefRC, SrcRC) != nullptr; |
2539 | } |
2540 | |
2541 | bool SIRegisterInfo::opCanUseLiteralConstant(unsigned OpType) const { |
2542 | // TODO: 64-bit operands have extending behavior from 32-bit literal. |
2543 | return OpType >= AMDGPU::OPERAND_REG_IMM_FIRST && |
2544 | OpType <= AMDGPU::OPERAND_REG_IMM_LAST; |
2545 | } |
2546 | |
2547 | /// Returns a lowest register that is not used at any point in the function. |
2548 | /// If all registers are used, then this function will return |
2549 | /// AMDGPU::NoRegister. If \p ReserveHighestVGPR = true, then return |
2550 | /// highest unused register. |
2551 | MCRegister SIRegisterInfo::findUnusedRegister(const MachineRegisterInfo &MRI, |
2552 | const TargetRegisterClass *RC, |
2553 | const MachineFunction &MF, |
2554 | bool ReserveHighestVGPR) const { |
2555 | if (ReserveHighestVGPR) { |
2556 | for (MCRegister Reg : reverse(*RC)) |
2557 | if (MRI.isAllocatable(Reg) && !MRI.isPhysRegUsed(Reg)) |
2558 | return Reg; |
2559 | } else { |
2560 | for (MCRegister Reg : *RC) |
2561 | if (MRI.isAllocatable(Reg) && !MRI.isPhysRegUsed(Reg)) |
2562 | return Reg; |
2563 | } |
2564 | return MCRegister(); |
2565 | } |
2566 | |
2567 | ArrayRef<int16_t> SIRegisterInfo::getRegSplitParts(const TargetRegisterClass *RC, |
2568 | unsigned EltSize) const { |
2569 | const unsigned RegBitWidth = AMDGPU::getRegBitWidth(*RC->MC); |
2570 | assert(RegBitWidth >= 32 && RegBitWidth <= 1024)(static_cast <bool> (RegBitWidth >= 32 && RegBitWidth <= 1024) ? void (0) : __assert_fail ("RegBitWidth >= 32 && RegBitWidth <= 1024" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 2570, __extension__ __PRETTY_FUNCTION__)); |
2571 | |
2572 | const unsigned RegDWORDs = RegBitWidth / 32; |
2573 | const unsigned EltDWORDs = EltSize / 4; |
2574 | assert(RegSplitParts.size() + 1 >= EltDWORDs)(static_cast <bool> (RegSplitParts.size() + 1 >= EltDWORDs ) ? void (0) : __assert_fail ("RegSplitParts.size() + 1 >= EltDWORDs" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 2574, __extension__ __PRETTY_FUNCTION__)); |
2575 | |
2576 | const std::vector<int16_t> &Parts = RegSplitParts[EltDWORDs - 1]; |
2577 | const unsigned NumParts = RegDWORDs / EltDWORDs; |
2578 | |
2579 | return makeArrayRef(Parts.data(), NumParts); |
2580 | } |
2581 | |
2582 | const TargetRegisterClass* |
2583 | SIRegisterInfo::getRegClassForReg(const MachineRegisterInfo &MRI, |
2584 | Register Reg) const { |
2585 | return Reg.isVirtual() ? MRI.getRegClass(Reg) : getPhysRegClass(Reg); |
2586 | } |
2587 | |
2588 | bool SIRegisterInfo::isVGPR(const MachineRegisterInfo &MRI, |
2589 | Register Reg) const { |
2590 | const TargetRegisterClass *RC = getRegClassForReg(MRI, Reg); |
2591 | // Registers without classes are unaddressable, SGPR-like registers. |
2592 | return RC && isVGPRClass(RC); |
2593 | } |
2594 | |
2595 | bool SIRegisterInfo::isAGPR(const MachineRegisterInfo &MRI, |
2596 | Register Reg) const { |
2597 | const TargetRegisterClass *RC = getRegClassForReg(MRI, Reg); |
2598 | |
2599 | // Registers without classes are unaddressable, SGPR-like registers. |
2600 | return RC && isAGPRClass(RC); |
2601 | } |
2602 | |
2603 | bool SIRegisterInfo::shouldCoalesce(MachineInstr *MI, |
2604 | const TargetRegisterClass *SrcRC, |
2605 | unsigned SubReg, |
2606 | const TargetRegisterClass *DstRC, |
2607 | unsigned DstSubReg, |
2608 | const TargetRegisterClass *NewRC, |
2609 | LiveIntervals &LIS) const { |
2610 | unsigned SrcSize = getRegSizeInBits(*SrcRC); |
2611 | unsigned DstSize = getRegSizeInBits(*DstRC); |
2612 | unsigned NewSize = getRegSizeInBits(*NewRC); |
2613 | |
2614 | // Do not increase size of registers beyond dword, we would need to allocate |
2615 | // adjacent registers and constraint regalloc more than needed. |
2616 | |
2617 | // Always allow dword coalescing. |
2618 | if (SrcSize <= 32 || DstSize <= 32) |
2619 | return true; |
2620 | |
2621 | return NewSize <= DstSize || NewSize <= SrcSize; |
2622 | } |
2623 | |
2624 | unsigned SIRegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC, |
2625 | MachineFunction &MF) const { |
2626 | const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); |
2627 | |
2628 | unsigned Occupancy = ST.getOccupancyWithLocalMemSize(MFI->getLDSSize(), |
2629 | MF.getFunction()); |
2630 | switch (RC->getID()) { |
2631 | default: |
2632 | return AMDGPUGenRegisterInfo::getRegPressureLimit(RC, MF); |
2633 | case AMDGPU::VGPR_32RegClassID: |
2634 | case AMDGPU::VGPR_LO16RegClassID: |
2635 | case AMDGPU::VGPR_HI16RegClassID: |
2636 | return std::min(ST.getMaxNumVGPRs(Occupancy), ST.getMaxNumVGPRs(MF)); |
2637 | case AMDGPU::SGPR_32RegClassID: |
2638 | case AMDGPU::SGPR_LO16RegClassID: |
2639 | return std::min(ST.getMaxNumSGPRs(Occupancy, true), ST.getMaxNumSGPRs(MF)); |
2640 | } |
2641 | } |
2642 | |
2643 | unsigned SIRegisterInfo::getRegPressureSetLimit(const MachineFunction &MF, |
2644 | unsigned Idx) const { |
2645 | if (Idx == AMDGPU::RegisterPressureSets::VGPR_32 || |
2646 | Idx == AMDGPU::RegisterPressureSets::AGPR_32) |
2647 | return getRegPressureLimit(&AMDGPU::VGPR_32RegClass, |
2648 | const_cast<MachineFunction &>(MF)); |
2649 | |
2650 | if (Idx == AMDGPU::RegisterPressureSets::SReg_32) |
2651 | return getRegPressureLimit(&AMDGPU::SGPR_32RegClass, |
2652 | const_cast<MachineFunction &>(MF)); |
2653 | |
2654 | llvm_unreachable("Unexpected register pressure set!")::llvm::llvm_unreachable_internal("Unexpected register pressure set!" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 2654); |
2655 | } |
2656 | |
2657 | const int *SIRegisterInfo::getRegUnitPressureSets(unsigned RegUnit) const { |
2658 | static const int Empty[] = { -1 }; |
2659 | |
2660 | if (RegPressureIgnoredUnits[RegUnit]) |
2661 | return Empty; |
2662 | |
2663 | return AMDGPUGenRegisterInfo::getRegUnitPressureSets(RegUnit); |
2664 | } |
2665 | |
2666 | MCRegister SIRegisterInfo::getReturnAddressReg(const MachineFunction &MF) const { |
2667 | // Not a callee saved register. |
2668 | return AMDGPU::SGPR30_SGPR31; |
2669 | } |
2670 | |
2671 | const TargetRegisterClass * |
2672 | SIRegisterInfo::getRegClassForSizeOnBank(unsigned Size, |
2673 | const RegisterBank &RB, |
2674 | const MachineRegisterInfo &MRI) const { |
2675 | switch (RB.getID()) { |
2676 | case AMDGPU::VGPRRegBankID: |
2677 | return getVGPRClassForBitWidth(std::max(32u, Size)); |
2678 | case AMDGPU::VCCRegBankID: |
2679 | assert(Size == 1)(static_cast <bool> (Size == 1) ? void (0) : __assert_fail ("Size == 1", "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 2679 , __extension__ __PRETTY_FUNCTION__)); |
2680 | return isWave32 ? &AMDGPU::SReg_32_XM0_XEXECRegClass |
2681 | : &AMDGPU::SReg_64_XEXECRegClass; |
2682 | case AMDGPU::SGPRRegBankID: |
2683 | return getSGPRClassForBitWidth(std::max(32u, Size)); |
2684 | case AMDGPU::AGPRRegBankID: |
2685 | return getAGPRClassForBitWidth(std::max(32u, Size)); |
2686 | default: |
2687 | llvm_unreachable("unknown register bank")::llvm::llvm_unreachable_internal("unknown register bank", "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp" , 2687); |
2688 | } |
2689 | } |
2690 | |
2691 | const TargetRegisterClass * |
2692 | SIRegisterInfo::getConstrainedRegClassForOperand(const MachineOperand &MO, |
2693 | const MachineRegisterInfo &MRI) const { |
2694 | const RegClassOrRegBank &RCOrRB = MRI.getRegClassOrRegBank(MO.getReg()); |
2695 | if (const RegisterBank *RB = RCOrRB.dyn_cast<const RegisterBank*>()) |
2696 | return getRegClassForTypeOnBank(MRI.getType(MO.getReg()), *RB, MRI); |
2697 | |
2698 | if (const auto *RC = RCOrRB.dyn_cast<const TargetRegisterClass *>()) |
2699 | return getAllocatableClass(RC); |
2700 | |
2701 | return nullptr; |
2702 | } |
2703 | |
2704 | MCRegister SIRegisterInfo::getVCC() const { |
2705 | return isWave32 ? AMDGPU::VCC_LO : AMDGPU::VCC; |
2706 | } |
2707 | |
2708 | const TargetRegisterClass *SIRegisterInfo::getVGPR64Class() const { |
2709 | // VGPR tuples have an alignment requirement on gfx90a variants. |
2710 | return ST.needsAlignedVGPRs() ? &AMDGPU::VReg_64_Align2RegClass |
2711 | : &AMDGPU::VReg_64RegClass; |
2712 | } |
2713 | |
2714 | const TargetRegisterClass * |
2715 | SIRegisterInfo::getRegClass(unsigned RCID) const { |
2716 | switch ((int)RCID) { |
2717 | case AMDGPU::SReg_1RegClassID: |
2718 | return getBoolRC(); |
2719 | case AMDGPU::SReg_1_XEXECRegClassID: |
2720 | return isWave32 ? &AMDGPU::SReg_32_XM0_XEXECRegClass |
2721 | : &AMDGPU::SReg_64_XEXECRegClass; |
2722 | case -1: |
2723 | return nullptr; |
2724 | default: |
2725 | return AMDGPUGenRegisterInfo::getRegClass(RCID); |
2726 | } |
2727 | } |
2728 | |
2729 | // Find reaching register definition |
2730 | MachineInstr *SIRegisterInfo::findReachingDef(Register Reg, unsigned SubReg, |
2731 | MachineInstr &Use, |
2732 | MachineRegisterInfo &MRI, |
2733 | LiveIntervals *LIS) const { |
2734 | auto &MDT = LIS->getAnalysis<MachineDominatorTree>(); |
2735 | SlotIndex UseIdx = LIS->getInstructionIndex(Use); |
2736 | SlotIndex DefIdx; |
2737 | |
2738 | if (Reg.isVirtual()) { |
2739 | if (!LIS->hasInterval(Reg)) |
2740 | return nullptr; |
2741 | LiveInterval &LI = LIS->getInterval(Reg); |
2742 | LaneBitmask SubLanes = SubReg ? getSubRegIndexLaneMask(SubReg) |
2743 | : MRI.getMaxLaneMaskForVReg(Reg); |
2744 | VNInfo *V = nullptr; |
2745 | if (LI.hasSubRanges()) { |
2746 | for (auto &S : LI.subranges()) { |
2747 | if ((S.LaneMask & SubLanes) == SubLanes) { |
2748 | V = S.getVNInfoAt(UseIdx); |
2749 | break; |
2750 | } |
2751 | } |
2752 | } else { |
2753 | V = LI.getVNInfoAt(UseIdx); |
2754 | } |
2755 | if (!V) |
2756 | return nullptr; |
2757 | DefIdx = V->def; |
2758 | } else { |
2759 | // Find last def. |
2760 | for (MCRegUnitIterator Units(Reg.asMCReg(), this); Units.isValid(); |
2761 | ++Units) { |
2762 | LiveRange &LR = LIS->getRegUnit(*Units); |
2763 | if (VNInfo *V = LR.getVNInfoAt(UseIdx)) { |
2764 | if (!DefIdx.isValid() || |
2765 | MDT.dominates(LIS->getInstructionFromIndex(DefIdx), |
2766 | LIS->getInstructionFromIndex(V->def))) |
2767 | DefIdx = V->def; |
2768 | } else { |
2769 | return nullptr; |
2770 | } |
2771 | } |
2772 | } |
2773 | |
2774 | MachineInstr *Def = LIS->getInstructionFromIndex(DefIdx); |
2775 | |
2776 | if (!Def || !MDT.dominates(Def, &Use)) |
2777 | return nullptr; |
2778 | |
2779 | assert(Def->modifiesRegister(Reg, this))(static_cast <bool> (Def->modifiesRegister(Reg, this )) ? void (0) : __assert_fail ("Def->modifiesRegister(Reg, this)" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 2779, __extension__ __PRETTY_FUNCTION__)); |
2780 | |
2781 | return Def; |
2782 | } |
2783 | |
2784 | MCPhysReg SIRegisterInfo::get32BitRegister(MCPhysReg Reg) const { |
2785 | assert(getRegSizeInBits(*getPhysRegClass(Reg)) <= 32)(static_cast <bool> (getRegSizeInBits(*getPhysRegClass( Reg)) <= 32) ? void (0) : __assert_fail ("getRegSizeInBits(*getPhysRegClass(Reg)) <= 32" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 2785, __extension__ __PRETTY_FUNCTION__)); |
2786 | |
2787 | for (const TargetRegisterClass &RC : { AMDGPU::VGPR_32RegClass, |
2788 | AMDGPU::SReg_32RegClass, |
2789 | AMDGPU::AGPR_32RegClass } ) { |
2790 | if (MCPhysReg Super = getMatchingSuperReg(Reg, AMDGPU::lo16, &RC)) |
2791 | return Super; |
2792 | } |
2793 | if (MCPhysReg Super = getMatchingSuperReg(Reg, AMDGPU::hi16, |
2794 | &AMDGPU::VGPR_32RegClass)) { |
2795 | return Super; |
2796 | } |
2797 | |
2798 | return AMDGPU::NoRegister; |
2799 | } |
2800 | |
2801 | bool SIRegisterInfo::isProperlyAlignedRC(const TargetRegisterClass &RC) const { |
2802 | if (!ST.needsAlignedVGPRs()) |
2803 | return true; |
2804 | |
2805 | if (isVGPRClass(&RC)) |
2806 | return RC.hasSuperClassEq(getVGPRClassForBitWidth(getRegSizeInBits(RC))); |
2807 | if (isAGPRClass(&RC)) |
2808 | return RC.hasSuperClassEq(getAGPRClassForBitWidth(getRegSizeInBits(RC))); |
2809 | if (isVectorSuperClass(&RC)) |
2810 | return RC.hasSuperClassEq( |
2811 | getVectorSuperClassForBitWidth(getRegSizeInBits(RC))); |
2812 | |
2813 | return true; |
2814 | } |
2815 | |
2816 | bool SIRegisterInfo::isConstantPhysReg(MCRegister PhysReg) const { |
2817 | switch (PhysReg) { |
2818 | case AMDGPU::SGPR_NULL: |
2819 | case AMDGPU::SRC_SHARED_BASE: |
2820 | case AMDGPU::SRC_PRIVATE_BASE: |
2821 | case AMDGPU::SRC_SHARED_LIMIT: |
2822 | case AMDGPU::SRC_PRIVATE_LIMIT: |
2823 | return true; |
2824 | default: |
2825 | return false; |
2826 | } |
2827 | } |
2828 | |
2829 | ArrayRef<MCPhysReg> |
2830 | SIRegisterInfo::getAllSGPR128(const MachineFunction &MF) const { |
2831 | return makeArrayRef(AMDGPU::SGPR_128RegClass.begin(), |
2832 | ST.getMaxNumSGPRs(MF) / 4); |
2833 | } |
2834 | |
2835 | ArrayRef<MCPhysReg> |
2836 | SIRegisterInfo::getAllSGPR64(const MachineFunction &MF) const { |
2837 | return makeArrayRef(AMDGPU::SGPR_64RegClass.begin(), |
2838 | ST.getMaxNumSGPRs(MF) / 2); |
2839 | } |
2840 | |
2841 | ArrayRef<MCPhysReg> |
2842 | SIRegisterInfo::getAllSGPR32(const MachineFunction &MF) const { |
2843 | return makeArrayRef(AMDGPU::SGPR_32RegClass.begin(), ST.getMaxNumSGPRs(MF)); |
2844 | } |