| File: | build/source/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp |
| Warning: | line 1330, column 5 Value stored to 'Desc' is never read |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
| 1 | //===-- SIRegisterInfo.cpp - SI Register Information ---------------------===// |
| 2 | // |
| 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 4 | // See https://llvm.org/LICENSE.txt for license information. |
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| 6 | // |
| 7 | //===----------------------------------------------------------------------===// |
| 8 | // |
| 9 | /// \file |
| 10 | /// SI implementation of the TargetRegisterInfo class. |
| 11 | // |
| 12 | //===----------------------------------------------------------------------===// |
| 13 | |
| 14 | #include "AMDGPU.h" |
| 15 | #include "AMDGPURegisterBankInfo.h" |
| 16 | #include "GCNSubtarget.h" |
| 17 | #include "MCTargetDesc/AMDGPUInstPrinter.h" |
| 18 | #include "MCTargetDesc/AMDGPUMCTargetDesc.h" |
| 19 | #include "SIMachineFunctionInfo.h" |
| 20 | #include "SIRegisterInfo.h" |
| 21 | #include "llvm/CodeGen/LiveIntervals.h" |
| 22 | #include "llvm/CodeGen/LivePhysRegs.h" |
| 23 | #include "llvm/CodeGen/MachineDominators.h" |
| 24 | #include "llvm/CodeGen/MachineFrameInfo.h" |
| 25 | #include "llvm/CodeGen/RegisterScavenging.h" |
| 26 | |
| 27 | using namespace llvm; |
| 28 | |
| 29 | #define GET_REGINFO_TARGET_DESC |
| 30 | #include "AMDGPUGenRegisterInfo.inc" |
| 31 | |
| 32 | static cl::opt<bool> EnableSpillSGPRToVGPR( |
| 33 | "amdgpu-spill-sgpr-to-vgpr", |
| 34 | cl::desc("Enable spilling VGPRs to SGPRs"), |
| 35 | cl::ReallyHidden, |
| 36 | cl::init(true)); |
| 37 | |
| 38 | std::array<std::vector<int16_t>, 16> SIRegisterInfo::RegSplitParts; |
| 39 | std::array<std::array<uint16_t, 32>, 9> SIRegisterInfo::SubRegFromChannelTable; |
| 40 | |
| 41 | // Map numbers of DWORDs to indexes in SubRegFromChannelTable. |
| 42 | // Valid indexes are shifted 1, such that a 0 mapping means unsupported. |
| 43 | // e.g. for 8 DWORDs (256-bit), SubRegFromChannelTableWidthMap[8] = 8, |
| 44 | // meaning index 7 in SubRegFromChannelTable. |
| 45 | static const std::array<unsigned, 17> SubRegFromChannelTableWidthMap = { |
| 46 | 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 0, 0, 0, 0, 0, 0, 9}; |
| 47 | |
| 48 | namespace llvm { |
| 49 | |
| 50 | // A temporary struct to spill SGPRs. |
| 51 | // This is mostly to spill SGPRs to memory. Spilling SGPRs into VGPR lanes emits |
| 52 | // just v_writelane and v_readlane. |
| 53 | // |
| 54 | // When spilling to memory, the SGPRs are written into VGPR lanes and the VGPR |
| 55 | // is saved to scratch (or the other way around for loads). |
| 56 | // For this, a VGPR is required where the needed lanes can be clobbered. The |
| 57 | // RegScavenger can provide a VGPR where currently active lanes can be |
| 58 | // clobbered, but we still need to save inactive lanes. |
| 59 | // The high-level steps are: |
| 60 | // - Try to scavenge SGPR(s) to save exec |
| 61 | // - Try to scavenge VGPR |
| 62 | // - Save needed, all or inactive lanes of a TmpVGPR |
| 63 | // - Spill/Restore SGPRs using TmpVGPR |
| 64 | // - Restore TmpVGPR |
| 65 | // |
| 66 | // To save all lanes of TmpVGPR, exec needs to be saved and modified. If we |
| 67 | // cannot scavenge temporary SGPRs to save exec, we use the following code: |
| 68 | // buffer_store_dword TmpVGPR ; only if active lanes need to be saved |
| 69 | // s_not exec, exec |
| 70 | // buffer_store_dword TmpVGPR ; save inactive lanes |
| 71 | // s_not exec, exec |
| 72 | struct SGPRSpillBuilder { |
| 73 | struct PerVGPRData { |
| 74 | unsigned PerVGPR; |
| 75 | unsigned NumVGPRs; |
| 76 | int64_t VGPRLanes; |
| 77 | }; |
| 78 | |
| 79 | // The SGPR to save |
| 80 | Register SuperReg; |
| 81 | MachineBasicBlock::iterator MI; |
| 82 | ArrayRef<int16_t> SplitParts; |
| 83 | unsigned NumSubRegs; |
| 84 | bool IsKill; |
| 85 | const DebugLoc &DL; |
| 86 | |
| 87 | /* When spilling to stack */ |
| 88 | // The SGPRs are written into this VGPR, which is then written to scratch |
| 89 | // (or vice versa for loads). |
| 90 | Register TmpVGPR = AMDGPU::NoRegister; |
| 91 | // Temporary spill slot to save TmpVGPR to. |
| 92 | int TmpVGPRIndex = 0; |
| 93 | // If TmpVGPR is live before the spill or if it is scavenged. |
| 94 | bool TmpVGPRLive = false; |
| 95 | // Scavenged SGPR to save EXEC. |
| 96 | Register SavedExecReg = AMDGPU::NoRegister; |
| 97 | // Stack index to write the SGPRs to. |
| 98 | int Index; |
| 99 | unsigned EltSize = 4; |
| 100 | |
| 101 | RegScavenger *RS; |
| 102 | MachineBasicBlock *MBB; |
| 103 | MachineFunction &MF; |
| 104 | SIMachineFunctionInfo &MFI; |
| 105 | const SIInstrInfo &TII; |
| 106 | const SIRegisterInfo &TRI; |
| 107 | bool IsWave32; |
| 108 | Register ExecReg; |
| 109 | unsigned MovOpc; |
| 110 | unsigned NotOpc; |
| 111 | |
| 112 | SGPRSpillBuilder(const SIRegisterInfo &TRI, const SIInstrInfo &TII, |
| 113 | bool IsWave32, MachineBasicBlock::iterator MI, int Index, |
| 114 | RegScavenger *RS) |
| 115 | : SGPRSpillBuilder(TRI, TII, IsWave32, MI, MI->getOperand(0).getReg(), |
| 116 | MI->getOperand(0).isKill(), Index, RS) {} |
| 117 | |
| 118 | SGPRSpillBuilder(const SIRegisterInfo &TRI, const SIInstrInfo &TII, |
| 119 | bool IsWave32, MachineBasicBlock::iterator MI, Register Reg, |
| 120 | bool IsKill, int Index, RegScavenger *RS) |
| 121 | : SuperReg(Reg), MI(MI), IsKill(IsKill), DL(MI->getDebugLoc()), |
| 122 | Index(Index), RS(RS), MBB(MI->getParent()), MF(*MBB->getParent()), |
| 123 | MFI(*MF.getInfo<SIMachineFunctionInfo>()), TII(TII), TRI(TRI), |
| 124 | IsWave32(IsWave32) { |
| 125 | const TargetRegisterClass *RC = TRI.getPhysRegBaseClass(SuperReg); |
| 126 | SplitParts = TRI.getRegSplitParts(RC, EltSize); |
| 127 | NumSubRegs = SplitParts.empty() ? 1 : SplitParts.size(); |
| 128 | |
| 129 | if (IsWave32) { |
| 130 | ExecReg = AMDGPU::EXEC_LO; |
| 131 | MovOpc = AMDGPU::S_MOV_B32; |
| 132 | NotOpc = AMDGPU::S_NOT_B32; |
| 133 | } else { |
| 134 | ExecReg = AMDGPU::EXEC; |
| 135 | MovOpc = AMDGPU::S_MOV_B64; |
| 136 | NotOpc = AMDGPU::S_NOT_B64; |
| 137 | } |
| 138 | |
| 139 | assert(SuperReg != AMDGPU::M0 && "m0 should never spill")(static_cast <bool> (SuperReg != AMDGPU::M0 && "m0 should never spill" ) ? void (0) : __assert_fail ("SuperReg != AMDGPU::M0 && \"m0 should never spill\"" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 139, __extension__ __PRETTY_FUNCTION__)); |
| 140 | assert(SuperReg != AMDGPU::EXEC_LO && SuperReg != AMDGPU::EXEC_HI &&(static_cast <bool> (SuperReg != AMDGPU::EXEC_LO && SuperReg != AMDGPU::EXEC_HI && SuperReg != AMDGPU::EXEC && "exec should never spill") ? void (0) : __assert_fail ("SuperReg != AMDGPU::EXEC_LO && SuperReg != AMDGPU::EXEC_HI && SuperReg != AMDGPU::EXEC && \"exec should never spill\"" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 141, __extension__ __PRETTY_FUNCTION__)) |
| 141 | SuperReg != AMDGPU::EXEC && "exec should never spill")(static_cast <bool> (SuperReg != AMDGPU::EXEC_LO && SuperReg != AMDGPU::EXEC_HI && SuperReg != AMDGPU::EXEC && "exec should never spill") ? void (0) : __assert_fail ("SuperReg != AMDGPU::EXEC_LO && SuperReg != AMDGPU::EXEC_HI && SuperReg != AMDGPU::EXEC && \"exec should never spill\"" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 141, __extension__ __PRETTY_FUNCTION__)); |
| 142 | } |
| 143 | |
| 144 | PerVGPRData getPerVGPRData() { |
| 145 | PerVGPRData Data; |
| 146 | Data.PerVGPR = IsWave32 ? 32 : 64; |
| 147 | Data.NumVGPRs = (NumSubRegs + (Data.PerVGPR - 1)) / Data.PerVGPR; |
| 148 | Data.VGPRLanes = (1LL << std::min(Data.PerVGPR, NumSubRegs)) - 1LL; |
| 149 | return Data; |
| 150 | } |
| 151 | |
| 152 | // Tries to scavenge SGPRs to save EXEC and a VGPR. Uses v0 if no VGPR is |
| 153 | // free. |
| 154 | // Writes these instructions if an SGPR can be scavenged: |
| 155 | // s_mov_b64 s[6:7], exec ; Save exec |
| 156 | // s_mov_b64 exec, 3 ; Wanted lanemask |
| 157 | // buffer_store_dword v1 ; Write scavenged VGPR to emergency slot |
| 158 | // |
| 159 | // Writes these instructions if no SGPR can be scavenged: |
| 160 | // buffer_store_dword v0 ; Only if no free VGPR was found |
| 161 | // s_not_b64 exec, exec |
| 162 | // buffer_store_dword v0 ; Save inactive lanes |
| 163 | // ; exec stays inverted, it is flipped back in |
| 164 | // ; restore. |
| 165 | void prepare() { |
| 166 | // Scavenged temporary VGPR to use. It must be scavenged once for any number |
| 167 | // of spilled subregs. |
| 168 | // FIXME: The liveness analysis is limited and does not tell if a register |
| 169 | // is in use in lanes that are currently inactive. We can never be sure if |
| 170 | // a register as actually in use in another lane, so we need to save all |
| 171 | // used lanes of the chosen VGPR. |
| 172 | assert(RS && "Cannot spill SGPR to memory without RegScavenger")(static_cast <bool> (RS && "Cannot spill SGPR to memory without RegScavenger" ) ? void (0) : __assert_fail ("RS && \"Cannot spill SGPR to memory without RegScavenger\"" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 172, __extension__ __PRETTY_FUNCTION__)); |
| 173 | TmpVGPR = RS->scavengeRegister(&AMDGPU::VGPR_32RegClass, MI, 0, false); |
| 174 | |
| 175 | // Reserve temporary stack slot |
| 176 | TmpVGPRIndex = MFI.getScavengeFI(MF.getFrameInfo(), TRI); |
| 177 | if (TmpVGPR) { |
| 178 | // Found a register that is dead in the currently active lanes, we only |
| 179 | // need to spill inactive lanes. |
| 180 | TmpVGPRLive = false; |
| 181 | } else { |
| 182 | // Pick v0 because it doesn't make a difference. |
| 183 | TmpVGPR = AMDGPU::VGPR0; |
| 184 | TmpVGPRLive = true; |
| 185 | } |
| 186 | |
| 187 | if (TmpVGPRLive) { |
| 188 | // We need to inform the scavenger that this index is already in use until |
| 189 | // we're done with the custom emergency spill. |
| 190 | RS->assignRegToScavengingIndex(TmpVGPRIndex, TmpVGPR); |
| 191 | } |
| 192 | |
| 193 | // We may end up recursively calling the scavenger, and don't want to re-use |
| 194 | // the same register. |
| 195 | RS->setRegUsed(TmpVGPR); |
| 196 | |
| 197 | // Try to scavenge SGPRs to save exec |
| 198 | assert(!SavedExecReg && "Exec is already saved, refuse to save again")(static_cast <bool> (!SavedExecReg && "Exec is already saved, refuse to save again" ) ? void (0) : __assert_fail ("!SavedExecReg && \"Exec is already saved, refuse to save again\"" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 198, __extension__ __PRETTY_FUNCTION__)); |
| 199 | const TargetRegisterClass &RC = |
| 200 | IsWave32 ? AMDGPU::SGPR_32RegClass : AMDGPU::SGPR_64RegClass; |
| 201 | RS->setRegUsed(SuperReg); |
| 202 | SavedExecReg = RS->scavengeRegister(&RC, MI, 0, false); |
| 203 | |
| 204 | int64_t VGPRLanes = getPerVGPRData().VGPRLanes; |
| 205 | |
| 206 | if (SavedExecReg) { |
| 207 | RS->setRegUsed(SavedExecReg); |
| 208 | // Set exec to needed lanes |
| 209 | BuildMI(*MBB, MI, DL, TII.get(MovOpc), SavedExecReg).addReg(ExecReg); |
| 210 | auto I = |
| 211 | BuildMI(*MBB, MI, DL, TII.get(MovOpc), ExecReg).addImm(VGPRLanes); |
| 212 | if (!TmpVGPRLive) |
| 213 | I.addReg(TmpVGPR, RegState::ImplicitDefine); |
| 214 | // Spill needed lanes |
| 215 | TRI.buildVGPRSpillLoadStore(*this, TmpVGPRIndex, 0, /*IsLoad*/ false); |
| 216 | } else { |
| 217 | // The modify and restore of exec clobber SCC, which we would have to save |
| 218 | // and restore. FIXME: We probably would need to reserve a register for |
| 219 | // this. |
| 220 | if (RS->isRegUsed(AMDGPU::SCC)) |
| 221 | MI->emitError("unhandled SGPR spill to memory"); |
| 222 | |
| 223 | // Spill active lanes |
| 224 | if (TmpVGPRLive) |
| 225 | TRI.buildVGPRSpillLoadStore(*this, TmpVGPRIndex, 0, /*IsLoad*/ false, |
| 226 | /*IsKill*/ false); |
| 227 | // Spill inactive lanes |
| 228 | auto I = BuildMI(*MBB, MI, DL, TII.get(NotOpc), ExecReg).addReg(ExecReg); |
| 229 | if (!TmpVGPRLive) |
| 230 | I.addReg(TmpVGPR, RegState::ImplicitDefine); |
| 231 | I->getOperand(2).setIsDead(); // Mark SCC as dead. |
| 232 | TRI.buildVGPRSpillLoadStore(*this, TmpVGPRIndex, 0, /*IsLoad*/ false); |
| 233 | } |
| 234 | } |
| 235 | |
| 236 | // Writes these instructions if an SGPR can be scavenged: |
| 237 | // buffer_load_dword v1 ; Write scavenged VGPR to emergency slot |
| 238 | // s_waitcnt vmcnt(0) ; If a free VGPR was found |
| 239 | // s_mov_b64 exec, s[6:7] ; Save exec |
| 240 | // |
| 241 | // Writes these instructions if no SGPR can be scavenged: |
| 242 | // buffer_load_dword v0 ; Restore inactive lanes |
| 243 | // s_waitcnt vmcnt(0) ; If a free VGPR was found |
| 244 | // s_not_b64 exec, exec |
| 245 | // buffer_load_dword v0 ; Only if no free VGPR was found |
| 246 | void restore() { |
| 247 | if (SavedExecReg) { |
| 248 | // Restore used lanes |
| 249 | TRI.buildVGPRSpillLoadStore(*this, TmpVGPRIndex, 0, /*IsLoad*/ true, |
| 250 | /*IsKill*/ false); |
| 251 | // Restore exec |
| 252 | auto I = BuildMI(*MBB, MI, DL, TII.get(MovOpc), ExecReg) |
| 253 | .addReg(SavedExecReg, RegState::Kill); |
| 254 | // Add an implicit use of the load so it is not dead. |
| 255 | // FIXME This inserts an unnecessary waitcnt |
| 256 | if (!TmpVGPRLive) { |
| 257 | I.addReg(TmpVGPR, RegState::ImplicitKill); |
| 258 | } |
| 259 | } else { |
| 260 | // Restore inactive lanes |
| 261 | TRI.buildVGPRSpillLoadStore(*this, TmpVGPRIndex, 0, /*IsLoad*/ true, |
| 262 | /*IsKill*/ false); |
| 263 | auto I = BuildMI(*MBB, MI, DL, TII.get(NotOpc), ExecReg).addReg(ExecReg); |
| 264 | if (!TmpVGPRLive) |
| 265 | I.addReg(TmpVGPR, RegState::ImplicitKill); |
| 266 | I->getOperand(2).setIsDead(); // Mark SCC as dead. |
| 267 | |
| 268 | // Restore active lanes |
| 269 | if (TmpVGPRLive) |
| 270 | TRI.buildVGPRSpillLoadStore(*this, TmpVGPRIndex, 0, /*IsLoad*/ true); |
| 271 | } |
| 272 | |
| 273 | // Inform the scavenger where we're releasing our custom scavenged register. |
| 274 | if (TmpVGPRLive) { |
| 275 | MachineBasicBlock::iterator RestorePt = std::prev(MI); |
| 276 | RS->assignRegToScavengingIndex(TmpVGPRIndex, TmpVGPR, &*RestorePt); |
| 277 | } |
| 278 | } |
| 279 | |
| 280 | // Write TmpVGPR to memory or read TmpVGPR from memory. |
| 281 | // Either using a single buffer_load/store if exec is set to the needed mask |
| 282 | // or using |
| 283 | // buffer_load |
| 284 | // s_not exec, exec |
| 285 | // buffer_load |
| 286 | // s_not exec, exec |
| 287 | void readWriteTmpVGPR(unsigned Offset, bool IsLoad) { |
| 288 | if (SavedExecReg) { |
| 289 | // Spill needed lanes |
| 290 | TRI.buildVGPRSpillLoadStore(*this, Index, Offset, IsLoad); |
| 291 | } else { |
| 292 | // The modify and restore of exec clobber SCC, which we would have to save |
| 293 | // and restore. FIXME: We probably would need to reserve a register for |
| 294 | // this. |
| 295 | if (RS->isRegUsed(AMDGPU::SCC)) |
| 296 | MI->emitError("unhandled SGPR spill to memory"); |
| 297 | |
| 298 | // Spill active lanes |
| 299 | TRI.buildVGPRSpillLoadStore(*this, Index, Offset, IsLoad, |
| 300 | /*IsKill*/ false); |
| 301 | // Spill inactive lanes |
| 302 | auto Not0 = BuildMI(*MBB, MI, DL, TII.get(NotOpc), ExecReg).addReg(ExecReg); |
| 303 | Not0->getOperand(2).setIsDead(); // Mark SCC as dead. |
| 304 | TRI.buildVGPRSpillLoadStore(*this, Index, Offset, IsLoad); |
| 305 | auto Not1 = BuildMI(*MBB, MI, DL, TII.get(NotOpc), ExecReg).addReg(ExecReg); |
| 306 | Not1->getOperand(2).setIsDead(); // Mark SCC as dead. |
| 307 | } |
| 308 | } |
| 309 | |
| 310 | void setMI(MachineBasicBlock *NewMBB, MachineBasicBlock::iterator NewMI) { |
| 311 | assert(MBB->getParent() == &MF)(static_cast <bool> (MBB->getParent() == &MF) ? void (0) : __assert_fail ("MBB->getParent() == &MF", "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp" , 311, __extension__ __PRETTY_FUNCTION__)); |
| 312 | MI = NewMI; |
| 313 | MBB = NewMBB; |
| 314 | } |
| 315 | }; |
| 316 | |
| 317 | } // namespace llvm |
| 318 | |
| 319 | SIRegisterInfo::SIRegisterInfo(const GCNSubtarget &ST) |
| 320 | : AMDGPUGenRegisterInfo(AMDGPU::PC_REG, ST.getAMDGPUDwarfFlavour()), ST(ST), |
| 321 | SpillSGPRToVGPR(EnableSpillSGPRToVGPR), isWave32(ST.isWave32()) { |
| 322 | |
| 323 | assert(getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() == 3 &&(static_cast <bool> (getSubRegIndexLaneMask(AMDGPU::sub0 ).getAsInteger() == 3 && getSubRegIndexLaneMask(AMDGPU ::sub31).getAsInteger() == (3ULL << 62) && (getSubRegIndexLaneMask (AMDGPU::lo16) | getSubRegIndexLaneMask(AMDGPU::hi16)).getAsInteger () == getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() && "getNumCoveredRegs() will not work with generated subreg masks!" ) ? void (0) : __assert_fail ("getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() == 3 && getSubRegIndexLaneMask(AMDGPU::sub31).getAsInteger() == (3ULL << 62) && (getSubRegIndexLaneMask(AMDGPU::lo16) | getSubRegIndexLaneMask(AMDGPU::hi16)).getAsInteger() == getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() && \"getNumCoveredRegs() will not work with generated subreg masks!\"" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 328, __extension__ __PRETTY_FUNCTION__)) |
| 324 | getSubRegIndexLaneMask(AMDGPU::sub31).getAsInteger() == (3ULL << 62) &&(static_cast <bool> (getSubRegIndexLaneMask(AMDGPU::sub0 ).getAsInteger() == 3 && getSubRegIndexLaneMask(AMDGPU ::sub31).getAsInteger() == (3ULL << 62) && (getSubRegIndexLaneMask (AMDGPU::lo16) | getSubRegIndexLaneMask(AMDGPU::hi16)).getAsInteger () == getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() && "getNumCoveredRegs() will not work with generated subreg masks!" ) ? void (0) : __assert_fail ("getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() == 3 && getSubRegIndexLaneMask(AMDGPU::sub31).getAsInteger() == (3ULL << 62) && (getSubRegIndexLaneMask(AMDGPU::lo16) | getSubRegIndexLaneMask(AMDGPU::hi16)).getAsInteger() == getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() && \"getNumCoveredRegs() will not work with generated subreg masks!\"" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 328, __extension__ __PRETTY_FUNCTION__)) |
| 325 | (getSubRegIndexLaneMask(AMDGPU::lo16) |(static_cast <bool> (getSubRegIndexLaneMask(AMDGPU::sub0 ).getAsInteger() == 3 && getSubRegIndexLaneMask(AMDGPU ::sub31).getAsInteger() == (3ULL << 62) && (getSubRegIndexLaneMask (AMDGPU::lo16) | getSubRegIndexLaneMask(AMDGPU::hi16)).getAsInteger () == getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() && "getNumCoveredRegs() will not work with generated subreg masks!" ) ? void (0) : __assert_fail ("getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() == 3 && getSubRegIndexLaneMask(AMDGPU::sub31).getAsInteger() == (3ULL << 62) && (getSubRegIndexLaneMask(AMDGPU::lo16) | getSubRegIndexLaneMask(AMDGPU::hi16)).getAsInteger() == getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() && \"getNumCoveredRegs() will not work with generated subreg masks!\"" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 328, __extension__ __PRETTY_FUNCTION__)) |
| 326 | getSubRegIndexLaneMask(AMDGPU::hi16)).getAsInteger() ==(static_cast <bool> (getSubRegIndexLaneMask(AMDGPU::sub0 ).getAsInteger() == 3 && getSubRegIndexLaneMask(AMDGPU ::sub31).getAsInteger() == (3ULL << 62) && (getSubRegIndexLaneMask (AMDGPU::lo16) | getSubRegIndexLaneMask(AMDGPU::hi16)).getAsInteger () == getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() && "getNumCoveredRegs() will not work with generated subreg masks!" ) ? void (0) : __assert_fail ("getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() == 3 && getSubRegIndexLaneMask(AMDGPU::sub31).getAsInteger() == (3ULL << 62) && (getSubRegIndexLaneMask(AMDGPU::lo16) | getSubRegIndexLaneMask(AMDGPU::hi16)).getAsInteger() == getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() && \"getNumCoveredRegs() will not work with generated subreg masks!\"" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 328, __extension__ __PRETTY_FUNCTION__)) |
| 327 | getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() &&(static_cast <bool> (getSubRegIndexLaneMask(AMDGPU::sub0 ).getAsInteger() == 3 && getSubRegIndexLaneMask(AMDGPU ::sub31).getAsInteger() == (3ULL << 62) && (getSubRegIndexLaneMask (AMDGPU::lo16) | getSubRegIndexLaneMask(AMDGPU::hi16)).getAsInteger () == getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() && "getNumCoveredRegs() will not work with generated subreg masks!" ) ? void (0) : __assert_fail ("getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() == 3 && getSubRegIndexLaneMask(AMDGPU::sub31).getAsInteger() == (3ULL << 62) && (getSubRegIndexLaneMask(AMDGPU::lo16) | getSubRegIndexLaneMask(AMDGPU::hi16)).getAsInteger() == getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() && \"getNumCoveredRegs() will not work with generated subreg masks!\"" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 328, __extension__ __PRETTY_FUNCTION__)) |
| 328 | "getNumCoveredRegs() will not work with generated subreg masks!")(static_cast <bool> (getSubRegIndexLaneMask(AMDGPU::sub0 ).getAsInteger() == 3 && getSubRegIndexLaneMask(AMDGPU ::sub31).getAsInteger() == (3ULL << 62) && (getSubRegIndexLaneMask (AMDGPU::lo16) | getSubRegIndexLaneMask(AMDGPU::hi16)).getAsInteger () == getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() && "getNumCoveredRegs() will not work with generated subreg masks!" ) ? void (0) : __assert_fail ("getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() == 3 && getSubRegIndexLaneMask(AMDGPU::sub31).getAsInteger() == (3ULL << 62) && (getSubRegIndexLaneMask(AMDGPU::lo16) | getSubRegIndexLaneMask(AMDGPU::hi16)).getAsInteger() == getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() && \"getNumCoveredRegs() will not work with generated subreg masks!\"" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 328, __extension__ __PRETTY_FUNCTION__)); |
| 329 | |
| 330 | RegPressureIgnoredUnits.resize(getNumRegUnits()); |
| 331 | RegPressureIgnoredUnits.set( |
| 332 | *MCRegUnitIterator(MCRegister::from(AMDGPU::M0), this)); |
| 333 | for (auto Reg : AMDGPU::VGPR_HI16RegClass) |
| 334 | RegPressureIgnoredUnits.set(*MCRegUnitIterator(Reg, this)); |
| 335 | |
| 336 | // HACK: Until this is fully tablegen'd. |
| 337 | static llvm::once_flag InitializeRegSplitPartsFlag; |
| 338 | |
| 339 | static auto InitializeRegSplitPartsOnce = [this]() { |
| 340 | for (unsigned Idx = 1, E = getNumSubRegIndices() - 1; Idx < E; ++Idx) { |
| 341 | unsigned Size = getSubRegIdxSize(Idx); |
| 342 | if (Size & 31) |
| 343 | continue; |
| 344 | std::vector<int16_t> &Vec = RegSplitParts[Size / 32 - 1]; |
| 345 | unsigned Pos = getSubRegIdxOffset(Idx); |
| 346 | if (Pos % Size) |
| 347 | continue; |
| 348 | Pos /= Size; |
| 349 | if (Vec.empty()) { |
| 350 | unsigned MaxNumParts = 1024 / Size; // Maximum register is 1024 bits. |
| 351 | Vec.resize(MaxNumParts); |
| 352 | } |
| 353 | Vec[Pos] = Idx; |
| 354 | } |
| 355 | }; |
| 356 | |
| 357 | static llvm::once_flag InitializeSubRegFromChannelTableFlag; |
| 358 | |
| 359 | static auto InitializeSubRegFromChannelTableOnce = [this]() { |
| 360 | for (auto &Row : SubRegFromChannelTable) |
| 361 | Row.fill(AMDGPU::NoSubRegister); |
| 362 | for (unsigned Idx = 1; Idx < getNumSubRegIndices(); ++Idx) { |
| 363 | unsigned Width = AMDGPUSubRegIdxRanges[Idx].Size / 32; |
| 364 | unsigned Offset = AMDGPUSubRegIdxRanges[Idx].Offset / 32; |
| 365 | assert(Width < SubRegFromChannelTableWidthMap.size())(static_cast <bool> (Width < SubRegFromChannelTableWidthMap .size()) ? void (0) : __assert_fail ("Width < SubRegFromChannelTableWidthMap.size()" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 365, __extension__ __PRETTY_FUNCTION__)); |
| 366 | Width = SubRegFromChannelTableWidthMap[Width]; |
| 367 | if (Width == 0) |
| 368 | continue; |
| 369 | unsigned TableIdx = Width - 1; |
| 370 | assert(TableIdx < SubRegFromChannelTable.size())(static_cast <bool> (TableIdx < SubRegFromChannelTable .size()) ? void (0) : __assert_fail ("TableIdx < SubRegFromChannelTable.size()" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 370, __extension__ __PRETTY_FUNCTION__)); |
| 371 | assert(Offset < SubRegFromChannelTable[TableIdx].size())(static_cast <bool> (Offset < SubRegFromChannelTable [TableIdx].size()) ? void (0) : __assert_fail ("Offset < SubRegFromChannelTable[TableIdx].size()" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 371, __extension__ __PRETTY_FUNCTION__)); |
| 372 | SubRegFromChannelTable[TableIdx][Offset] = Idx; |
| 373 | } |
| 374 | }; |
| 375 | |
| 376 | llvm::call_once(InitializeRegSplitPartsFlag, InitializeRegSplitPartsOnce); |
| 377 | llvm::call_once(InitializeSubRegFromChannelTableFlag, |
| 378 | InitializeSubRegFromChannelTableOnce); |
| 379 | } |
| 380 | |
| 381 | void SIRegisterInfo::reserveRegisterTuples(BitVector &Reserved, |
| 382 | MCRegister Reg) const { |
| 383 | for (MCRegAliasIterator R(Reg, this, true); R.isValid(); ++R) |
| 384 | Reserved.set(*R); |
| 385 | } |
| 386 | |
| 387 | // Forced to be here by one .inc |
| 388 | const MCPhysReg *SIRegisterInfo::getCalleeSavedRegs( |
| 389 | const MachineFunction *MF) const { |
| 390 | CallingConv::ID CC = MF->getFunction().getCallingConv(); |
| 391 | switch (CC) { |
| 392 | case CallingConv::C: |
| 393 | case CallingConv::Fast: |
| 394 | case CallingConv::Cold: |
| 395 | return ST.hasGFX90AInsts() ? CSR_AMDGPU_GFX90AInsts_SaveList |
| 396 | : CSR_AMDGPU_SaveList; |
| 397 | case CallingConv::AMDGPU_Gfx: |
| 398 | return ST.hasGFX90AInsts() ? CSR_AMDGPU_SI_Gfx_GFX90AInsts_SaveList |
| 399 | : CSR_AMDGPU_SI_Gfx_SaveList; |
| 400 | default: { |
| 401 | // Dummy to not crash RegisterClassInfo. |
| 402 | static const MCPhysReg NoCalleeSavedReg = AMDGPU::NoRegister; |
| 403 | return &NoCalleeSavedReg; |
| 404 | } |
| 405 | } |
| 406 | } |
| 407 | |
| 408 | const MCPhysReg * |
| 409 | SIRegisterInfo::getCalleeSavedRegsViaCopy(const MachineFunction *MF) const { |
| 410 | return nullptr; |
| 411 | } |
| 412 | |
| 413 | const uint32_t *SIRegisterInfo::getCallPreservedMask(const MachineFunction &MF, |
| 414 | CallingConv::ID CC) const { |
| 415 | switch (CC) { |
| 416 | case CallingConv::C: |
| 417 | case CallingConv::Fast: |
| 418 | case CallingConv::Cold: |
| 419 | return ST.hasGFX90AInsts() ? CSR_AMDGPU_GFX90AInsts_RegMask |
| 420 | : CSR_AMDGPU_RegMask; |
| 421 | case CallingConv::AMDGPU_Gfx: |
| 422 | return ST.hasGFX90AInsts() ? CSR_AMDGPU_SI_Gfx_GFX90AInsts_RegMask |
| 423 | : CSR_AMDGPU_SI_Gfx_RegMask; |
| 424 | default: |
| 425 | return nullptr; |
| 426 | } |
| 427 | } |
| 428 | |
| 429 | const uint32_t *SIRegisterInfo::getNoPreservedMask() const { |
| 430 | return CSR_AMDGPU_NoRegs_RegMask; |
| 431 | } |
| 432 | |
| 433 | const TargetRegisterClass * |
| 434 | SIRegisterInfo::getLargestLegalSuperClass(const TargetRegisterClass *RC, |
| 435 | const MachineFunction &MF) const { |
| 436 | // FIXME: Should have a helper function like getEquivalentVGPRClass to get the |
| 437 | // equivalent AV class. If used one, the verifier will crash after |
| 438 | // RegBankSelect in the GISel flow. The aligned regclasses are not fully given |
| 439 | // until Instruction selection. |
| 440 | if (ST.hasMAIInsts() && (isVGPRClass(RC) || isAGPRClass(RC))) { |
| 441 | if (RC == &AMDGPU::VGPR_32RegClass || RC == &AMDGPU::AGPR_32RegClass) |
| 442 | return &AMDGPU::AV_32RegClass; |
| 443 | if (RC == &AMDGPU::VReg_64RegClass || RC == &AMDGPU::AReg_64RegClass) |
| 444 | return &AMDGPU::AV_64RegClass; |
| 445 | if (RC == &AMDGPU::VReg_64_Align2RegClass || |
| 446 | RC == &AMDGPU::AReg_64_Align2RegClass) |
| 447 | return &AMDGPU::AV_64_Align2RegClass; |
| 448 | if (RC == &AMDGPU::VReg_96RegClass || RC == &AMDGPU::AReg_96RegClass) |
| 449 | return &AMDGPU::AV_96RegClass; |
| 450 | if (RC == &AMDGPU::VReg_96_Align2RegClass || |
| 451 | RC == &AMDGPU::AReg_96_Align2RegClass) |
| 452 | return &AMDGPU::AV_96_Align2RegClass; |
| 453 | if (RC == &AMDGPU::VReg_128RegClass || RC == &AMDGPU::AReg_128RegClass) |
| 454 | return &AMDGPU::AV_128RegClass; |
| 455 | if (RC == &AMDGPU::VReg_128_Align2RegClass || |
| 456 | RC == &AMDGPU::AReg_128_Align2RegClass) |
| 457 | return &AMDGPU::AV_128_Align2RegClass; |
| 458 | if (RC == &AMDGPU::VReg_160RegClass || RC == &AMDGPU::AReg_160RegClass) |
| 459 | return &AMDGPU::AV_160RegClass; |
| 460 | if (RC == &AMDGPU::VReg_160_Align2RegClass || |
| 461 | RC == &AMDGPU::AReg_160_Align2RegClass) |
| 462 | return &AMDGPU::AV_160_Align2RegClass; |
| 463 | if (RC == &AMDGPU::VReg_192RegClass || RC == &AMDGPU::AReg_192RegClass) |
| 464 | return &AMDGPU::AV_192RegClass; |
| 465 | if (RC == &AMDGPU::VReg_192_Align2RegClass || |
| 466 | RC == &AMDGPU::AReg_192_Align2RegClass) |
| 467 | return &AMDGPU::AV_192_Align2RegClass; |
| 468 | if (RC == &AMDGPU::VReg_256RegClass || RC == &AMDGPU::AReg_256RegClass) |
| 469 | return &AMDGPU::AV_256RegClass; |
| 470 | if (RC == &AMDGPU::VReg_256_Align2RegClass || |
| 471 | RC == &AMDGPU::AReg_256_Align2RegClass) |
| 472 | return &AMDGPU::AV_256_Align2RegClass; |
| 473 | if (RC == &AMDGPU::VReg_512RegClass || RC == &AMDGPU::AReg_512RegClass) |
| 474 | return &AMDGPU::AV_512RegClass; |
| 475 | if (RC == &AMDGPU::VReg_512_Align2RegClass || |
| 476 | RC == &AMDGPU::AReg_512_Align2RegClass) |
| 477 | return &AMDGPU::AV_512_Align2RegClass; |
| 478 | if (RC == &AMDGPU::VReg_1024RegClass || RC == &AMDGPU::AReg_1024RegClass) |
| 479 | return &AMDGPU::AV_1024RegClass; |
| 480 | if (RC == &AMDGPU::VReg_1024_Align2RegClass || |
| 481 | RC == &AMDGPU::AReg_1024_Align2RegClass) |
| 482 | return &AMDGPU::AV_1024_Align2RegClass; |
| 483 | } |
| 484 | |
| 485 | return TargetRegisterInfo::getLargestLegalSuperClass(RC, MF); |
| 486 | } |
| 487 | |
| 488 | Register SIRegisterInfo::getFrameRegister(const MachineFunction &MF) const { |
| 489 | const SIFrameLowering *TFI = ST.getFrameLowering(); |
| 490 | const SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>(); |
| 491 | // During ISel lowering we always reserve the stack pointer in entry |
| 492 | // functions, but never actually want to reference it when accessing our own |
| 493 | // frame. If we need a frame pointer we use it, but otherwise we can just use |
| 494 | // an immediate "0" which we represent by returning NoRegister. |
| 495 | if (FuncInfo->isEntryFunction()) { |
| 496 | return TFI->hasFP(MF) ? FuncInfo->getFrameOffsetReg() : Register(); |
| 497 | } |
| 498 | return TFI->hasFP(MF) ? FuncInfo->getFrameOffsetReg() |
| 499 | : FuncInfo->getStackPtrOffsetReg(); |
| 500 | } |
| 501 | |
| 502 | bool SIRegisterInfo::hasBasePointer(const MachineFunction &MF) const { |
| 503 | // When we need stack realignment, we can't reference off of the |
| 504 | // stack pointer, so we reserve a base pointer. |
| 505 | const MachineFrameInfo &MFI = MF.getFrameInfo(); |
| 506 | return MFI.getNumFixedObjects() && shouldRealignStack(MF); |
| 507 | } |
| 508 | |
| 509 | Register SIRegisterInfo::getBaseRegister() const { return AMDGPU::SGPR34; } |
| 510 | |
| 511 | const uint32_t *SIRegisterInfo::getAllVGPRRegMask() const { |
| 512 | return AMDGPU_AllVGPRs_RegMask; |
| 513 | } |
| 514 | |
| 515 | const uint32_t *SIRegisterInfo::getAllAGPRRegMask() const { |
| 516 | return AMDGPU_AllAGPRs_RegMask; |
| 517 | } |
| 518 | |
| 519 | const uint32_t *SIRegisterInfo::getAllVectorRegMask() const { |
| 520 | return AMDGPU_AllVectorRegs_RegMask; |
| 521 | } |
| 522 | |
| 523 | const uint32_t *SIRegisterInfo::getAllAllocatableSRegMask() const { |
| 524 | return AMDGPU_AllAllocatableSRegs_RegMask; |
| 525 | } |
| 526 | |
| 527 | unsigned SIRegisterInfo::getSubRegFromChannel(unsigned Channel, |
| 528 | unsigned NumRegs) { |
| 529 | assert(NumRegs < SubRegFromChannelTableWidthMap.size())(static_cast <bool> (NumRegs < SubRegFromChannelTableWidthMap .size()) ? void (0) : __assert_fail ("NumRegs < SubRegFromChannelTableWidthMap.size()" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 529, __extension__ __PRETTY_FUNCTION__)); |
| 530 | unsigned NumRegIndex = SubRegFromChannelTableWidthMap[NumRegs]; |
| 531 | assert(NumRegIndex && "Not implemented")(static_cast <bool> (NumRegIndex && "Not implemented" ) ? void (0) : __assert_fail ("NumRegIndex && \"Not implemented\"" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 531, __extension__ __PRETTY_FUNCTION__)); |
| 532 | assert(Channel < SubRegFromChannelTable[NumRegIndex - 1].size())(static_cast <bool> (Channel < SubRegFromChannelTable [NumRegIndex - 1].size()) ? void (0) : __assert_fail ("Channel < SubRegFromChannelTable[NumRegIndex - 1].size()" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 532, __extension__ __PRETTY_FUNCTION__)); |
| 533 | return SubRegFromChannelTable[NumRegIndex - 1][Channel]; |
| 534 | } |
| 535 | |
| 536 | MCRegister SIRegisterInfo::reservedPrivateSegmentBufferReg( |
| 537 | const MachineFunction &MF) const { |
| 538 | unsigned BaseIdx = alignDown(ST.getMaxNumSGPRs(MF), 4) - 4; |
| 539 | MCRegister BaseReg(AMDGPU::SGPR_32RegClass.getRegister(BaseIdx)); |
| 540 | return getMatchingSuperReg(BaseReg, AMDGPU::sub0, &AMDGPU::SGPR_128RegClass); |
| 541 | } |
| 542 | |
| 543 | BitVector SIRegisterInfo::getReservedRegs(const MachineFunction &MF) const { |
| 544 | BitVector Reserved(getNumRegs()); |
| 545 | Reserved.set(AMDGPU::MODE); |
| 546 | |
| 547 | const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); |
| 548 | |
| 549 | // Reserve special purpose registers. |
| 550 | // |
| 551 | // EXEC_LO and EXEC_HI could be allocated and used as regular register, but |
| 552 | // this seems likely to result in bugs, so I'm marking them as reserved. |
| 553 | reserveRegisterTuples(Reserved, AMDGPU::EXEC); |
| 554 | reserveRegisterTuples(Reserved, AMDGPU::FLAT_SCR); |
| 555 | |
| 556 | // M0 has to be reserved so that llvm accepts it as a live-in into a block. |
| 557 | reserveRegisterTuples(Reserved, AMDGPU::M0); |
| 558 | |
| 559 | // Reserve src_vccz, src_execz, src_scc. |
| 560 | reserveRegisterTuples(Reserved, AMDGPU::SRC_VCCZ); |
| 561 | reserveRegisterTuples(Reserved, AMDGPU::SRC_EXECZ); |
| 562 | reserveRegisterTuples(Reserved, AMDGPU::SRC_SCC); |
| 563 | |
| 564 | // Reserve the memory aperture registers |
| 565 | reserveRegisterTuples(Reserved, AMDGPU::SRC_SHARED_BASE); |
| 566 | reserveRegisterTuples(Reserved, AMDGPU::SRC_SHARED_LIMIT); |
| 567 | reserveRegisterTuples(Reserved, AMDGPU::SRC_PRIVATE_BASE); |
| 568 | reserveRegisterTuples(Reserved, AMDGPU::SRC_PRIVATE_LIMIT); |
| 569 | |
| 570 | // Reserve src_pops_exiting_wave_id - support is not implemented in Codegen. |
| 571 | reserveRegisterTuples(Reserved, AMDGPU::SRC_POPS_EXITING_WAVE_ID); |
| 572 | |
| 573 | // Reserve xnack_mask registers - support is not implemented in Codegen. |
| 574 | reserveRegisterTuples(Reserved, AMDGPU::XNACK_MASK); |
| 575 | |
| 576 | // Reserve lds_direct register - support is not implemented in Codegen. |
| 577 | reserveRegisterTuples(Reserved, AMDGPU::LDS_DIRECT); |
| 578 | |
| 579 | // Reserve Trap Handler registers - support is not implemented in Codegen. |
| 580 | reserveRegisterTuples(Reserved, AMDGPU::TBA); |
| 581 | reserveRegisterTuples(Reserved, AMDGPU::TMA); |
| 582 | reserveRegisterTuples(Reserved, AMDGPU::TTMP0_TTMP1); |
| 583 | reserveRegisterTuples(Reserved, AMDGPU::TTMP2_TTMP3); |
| 584 | reserveRegisterTuples(Reserved, AMDGPU::TTMP4_TTMP5); |
| 585 | reserveRegisterTuples(Reserved, AMDGPU::TTMP6_TTMP7); |
| 586 | reserveRegisterTuples(Reserved, AMDGPU::TTMP8_TTMP9); |
| 587 | reserveRegisterTuples(Reserved, AMDGPU::TTMP10_TTMP11); |
| 588 | reserveRegisterTuples(Reserved, AMDGPU::TTMP12_TTMP13); |
| 589 | reserveRegisterTuples(Reserved, AMDGPU::TTMP14_TTMP15); |
| 590 | |
| 591 | // Reserve null register - it shall never be allocated |
| 592 | reserveRegisterTuples(Reserved, AMDGPU::SGPR_NULL64); |
| 593 | |
| 594 | // Disallow vcc_hi allocation in wave32. It may be allocated but most likely |
| 595 | // will result in bugs. |
| 596 | if (isWave32) { |
| 597 | Reserved.set(AMDGPU::VCC); |
| 598 | Reserved.set(AMDGPU::VCC_HI); |
| 599 | } |
| 600 | |
| 601 | // Reserve SGPRs. |
| 602 | // |
| 603 | unsigned MaxNumSGPRs = ST.getMaxNumSGPRs(MF); |
| 604 | unsigned TotalNumSGPRs = AMDGPU::SGPR_32RegClass.getNumRegs(); |
| 605 | for (unsigned i = MaxNumSGPRs; i < TotalNumSGPRs; ++i) { |
| 606 | unsigned Reg = AMDGPU::SGPR_32RegClass.getRegister(i); |
| 607 | reserveRegisterTuples(Reserved, Reg); |
| 608 | } |
| 609 | |
| 610 | Register ScratchRSrcReg = MFI->getScratchRSrcReg(); |
| 611 | if (ScratchRSrcReg != AMDGPU::NoRegister) { |
| 612 | // Reserve 4 SGPRs for the scratch buffer resource descriptor in case we |
| 613 | // need to spill. |
| 614 | // TODO: May need to reserve a VGPR if doing LDS spilling. |
| 615 | reserveRegisterTuples(Reserved, ScratchRSrcReg); |
| 616 | } |
| 617 | |
| 618 | // We have to assume the SP is needed in case there are calls in the function, |
| 619 | // which is detected after the function is lowered. If we aren't really going |
| 620 | // to need SP, don't bother reserving it. |
| 621 | MCRegister StackPtrReg = MFI->getStackPtrOffsetReg(); |
| 622 | if (StackPtrReg) { |
| 623 | reserveRegisterTuples(Reserved, StackPtrReg); |
| 624 | assert(!isSubRegister(ScratchRSrcReg, StackPtrReg))(static_cast <bool> (!isSubRegister(ScratchRSrcReg, StackPtrReg )) ? void (0) : __assert_fail ("!isSubRegister(ScratchRSrcReg, StackPtrReg)" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 624, __extension__ __PRETTY_FUNCTION__)); |
| 625 | } |
| 626 | |
| 627 | MCRegister FrameReg = MFI->getFrameOffsetReg(); |
| 628 | if (FrameReg) { |
| 629 | reserveRegisterTuples(Reserved, FrameReg); |
| 630 | assert(!isSubRegister(ScratchRSrcReg, FrameReg))(static_cast <bool> (!isSubRegister(ScratchRSrcReg, FrameReg )) ? void (0) : __assert_fail ("!isSubRegister(ScratchRSrcReg, FrameReg)" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 630, __extension__ __PRETTY_FUNCTION__)); |
| 631 | } |
| 632 | |
| 633 | if (hasBasePointer(MF)) { |
| 634 | MCRegister BasePtrReg = getBaseRegister(); |
| 635 | reserveRegisterTuples(Reserved, BasePtrReg); |
| 636 | assert(!isSubRegister(ScratchRSrcReg, BasePtrReg))(static_cast <bool> (!isSubRegister(ScratchRSrcReg, BasePtrReg )) ? void (0) : __assert_fail ("!isSubRegister(ScratchRSrcReg, BasePtrReg)" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 636, __extension__ __PRETTY_FUNCTION__)); |
| 637 | } |
| 638 | |
| 639 | // Reserve VGPRs/AGPRs. |
| 640 | // |
| 641 | unsigned MaxNumVGPRs = ST.getMaxNumVGPRs(MF); |
| 642 | unsigned MaxNumAGPRs = MaxNumVGPRs; |
| 643 | unsigned TotalNumVGPRs = AMDGPU::VGPR_32RegClass.getNumRegs(); |
| 644 | |
| 645 | // On GFX90A, the number of VGPRs and AGPRs need not be equal. Theoretically, |
| 646 | // a wave may have up to 512 total vector registers combining together both |
| 647 | // VGPRs and AGPRs. Hence, in an entry function without calls and without |
| 648 | // AGPRs used within it, it is possible to use the whole vector register |
| 649 | // budget for VGPRs. |
| 650 | // |
| 651 | // TODO: it shall be possible to estimate maximum AGPR/VGPR pressure and split |
| 652 | // register file accordingly. |
| 653 | if (ST.hasGFX90AInsts()) { |
| 654 | if (MFI->usesAGPRs(MF)) { |
| 655 | MaxNumVGPRs /= 2; |
| 656 | MaxNumAGPRs = MaxNumVGPRs; |
| 657 | } else { |
| 658 | if (MaxNumVGPRs > TotalNumVGPRs) { |
| 659 | MaxNumAGPRs = MaxNumVGPRs - TotalNumVGPRs; |
| 660 | MaxNumVGPRs = TotalNumVGPRs; |
| 661 | } else |
| 662 | MaxNumAGPRs = 0; |
| 663 | } |
| 664 | } |
| 665 | |
| 666 | for (unsigned i = MaxNumVGPRs; i < TotalNumVGPRs; ++i) { |
| 667 | unsigned Reg = AMDGPU::VGPR_32RegClass.getRegister(i); |
| 668 | reserveRegisterTuples(Reserved, Reg); |
| 669 | } |
| 670 | |
| 671 | if (ST.hasMAIInsts()) { |
| 672 | for (unsigned i = MaxNumAGPRs; i < TotalNumVGPRs; ++i) { |
| 673 | unsigned Reg = AMDGPU::AGPR_32RegClass.getRegister(i); |
| 674 | reserveRegisterTuples(Reserved, Reg); |
| 675 | } |
| 676 | } else { |
| 677 | // Reserve all the AGPRs if there are no instructions to use it. |
| 678 | for (MCRegister Reg : AMDGPU::AGPR_32RegClass) |
| 679 | reserveRegisterTuples(Reserved, Reg); |
| 680 | } |
| 681 | |
| 682 | // On GFX908, in order to guarantee copying between AGPRs, we need a scratch |
| 683 | // VGPR available at all times. |
| 684 | if (ST.hasMAIInsts() && !ST.hasGFX90AInsts()) { |
| 685 | reserveRegisterTuples(Reserved, MFI->getVGPRForAGPRCopy()); |
| 686 | } |
| 687 | |
| 688 | for (Register Reg : MFI->getWWMReservedRegs()) |
| 689 | reserveRegisterTuples(Reserved, Reg); |
| 690 | |
| 691 | // FIXME: Stop using reserved registers for this. |
| 692 | for (MCPhysReg Reg : MFI->getAGPRSpillVGPRs()) |
| 693 | reserveRegisterTuples(Reserved, Reg); |
| 694 | |
| 695 | for (MCPhysReg Reg : MFI->getVGPRSpillAGPRs()) |
| 696 | reserveRegisterTuples(Reserved, Reg); |
| 697 | |
| 698 | for (auto Reg : MFI->getSGPRSpillVGPRs()) |
| 699 | reserveRegisterTuples(Reserved, Reg); |
| 700 | |
| 701 | return Reserved; |
| 702 | } |
| 703 | |
| 704 | bool SIRegisterInfo::isAsmClobberable(const MachineFunction &MF, |
| 705 | MCRegister PhysReg) const { |
| 706 | return !MF.getRegInfo().isReserved(PhysReg); |
| 707 | } |
| 708 | |
| 709 | bool SIRegisterInfo::shouldRealignStack(const MachineFunction &MF) const { |
| 710 | const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); |
| 711 | // On entry, the base address is 0, so it can't possibly need any more |
| 712 | // alignment. |
| 713 | |
| 714 | // FIXME: Should be able to specify the entry frame alignment per calling |
| 715 | // convention instead. |
| 716 | if (Info->isEntryFunction()) |
| 717 | return false; |
| 718 | |
| 719 | return TargetRegisterInfo::shouldRealignStack(MF); |
| 720 | } |
| 721 | |
| 722 | bool SIRegisterInfo::requiresRegisterScavenging(const MachineFunction &Fn) const { |
| 723 | const SIMachineFunctionInfo *Info = Fn.getInfo<SIMachineFunctionInfo>(); |
| 724 | if (Info->isEntryFunction()) { |
| 725 | const MachineFrameInfo &MFI = Fn.getFrameInfo(); |
| 726 | return MFI.hasStackObjects() || MFI.hasCalls(); |
| 727 | } |
| 728 | |
| 729 | // May need scavenger for dealing with callee saved registers. |
| 730 | return true; |
| 731 | } |
| 732 | |
| 733 | bool SIRegisterInfo::requiresFrameIndexScavenging( |
| 734 | const MachineFunction &MF) const { |
| 735 | // Do not use frame virtual registers. They used to be used for SGPRs, but |
| 736 | // once we reach PrologEpilogInserter, we can no longer spill SGPRs. If the |
| 737 | // scavenger fails, we can increment/decrement the necessary SGPRs to avoid a |
| 738 | // spill. |
| 739 | return false; |
| 740 | } |
| 741 | |
| 742 | bool SIRegisterInfo::requiresFrameIndexReplacementScavenging( |
| 743 | const MachineFunction &MF) const { |
| 744 | const MachineFrameInfo &MFI = MF.getFrameInfo(); |
| 745 | return MFI.hasStackObjects(); |
| 746 | } |
| 747 | |
| 748 | bool SIRegisterInfo::requiresVirtualBaseRegisters( |
| 749 | const MachineFunction &) const { |
| 750 | // There are no special dedicated stack or frame pointers. |
| 751 | return true; |
| 752 | } |
| 753 | |
| 754 | int64_t SIRegisterInfo::getScratchInstrOffset(const MachineInstr *MI) const { |
| 755 | assert(SIInstrInfo::isMUBUF(*MI) || SIInstrInfo::isFLATScratch(*MI))(static_cast <bool> (SIInstrInfo::isMUBUF(*MI) || SIInstrInfo ::isFLATScratch(*MI)) ? void (0) : __assert_fail ("SIInstrInfo::isMUBUF(*MI) || SIInstrInfo::isFLATScratch(*MI)" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 755, __extension__ __PRETTY_FUNCTION__)); |
| 756 | |
| 757 | int OffIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(), |
| 758 | AMDGPU::OpName::offset); |
| 759 | return MI->getOperand(OffIdx).getImm(); |
| 760 | } |
| 761 | |
| 762 | int64_t SIRegisterInfo::getFrameIndexInstrOffset(const MachineInstr *MI, |
| 763 | int Idx) const { |
| 764 | if (!SIInstrInfo::isMUBUF(*MI) && !SIInstrInfo::isFLATScratch(*MI)) |
| 765 | return 0; |
| 766 | |
| 767 | assert((Idx == AMDGPU::getNamedOperandIdx(MI->getOpcode(),(static_cast <bool> ((Idx == AMDGPU::getNamedOperandIdx (MI->getOpcode(), AMDGPU::OpName::vaddr) || (Idx == AMDGPU ::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::saddr ))) && "Should never see frame index on non-address operand" ) ? void (0) : __assert_fail ("(Idx == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::vaddr) || (Idx == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::saddr))) && \"Should never see frame index on non-address operand\"" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 771, __extension__ __PRETTY_FUNCTION__)) |
| 768 | AMDGPU::OpName::vaddr) ||(static_cast <bool> ((Idx == AMDGPU::getNamedOperandIdx (MI->getOpcode(), AMDGPU::OpName::vaddr) || (Idx == AMDGPU ::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::saddr ))) && "Should never see frame index on non-address operand" ) ? void (0) : __assert_fail ("(Idx == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::vaddr) || (Idx == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::saddr))) && \"Should never see frame index on non-address operand\"" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 771, __extension__ __PRETTY_FUNCTION__)) |
| 769 | (Idx == AMDGPU::getNamedOperandIdx(MI->getOpcode(),(static_cast <bool> ((Idx == AMDGPU::getNamedOperandIdx (MI->getOpcode(), AMDGPU::OpName::vaddr) || (Idx == AMDGPU ::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::saddr ))) && "Should never see frame index on non-address operand" ) ? void (0) : __assert_fail ("(Idx == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::vaddr) || (Idx == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::saddr))) && \"Should never see frame index on non-address operand\"" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 771, __extension__ __PRETTY_FUNCTION__)) |
| 770 | AMDGPU::OpName::saddr))) &&(static_cast <bool> ((Idx == AMDGPU::getNamedOperandIdx (MI->getOpcode(), AMDGPU::OpName::vaddr) || (Idx == AMDGPU ::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::saddr ))) && "Should never see frame index on non-address operand" ) ? void (0) : __assert_fail ("(Idx == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::vaddr) || (Idx == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::saddr))) && \"Should never see frame index on non-address operand\"" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 771, __extension__ __PRETTY_FUNCTION__)) |
| 771 | "Should never see frame index on non-address operand")(static_cast <bool> ((Idx == AMDGPU::getNamedOperandIdx (MI->getOpcode(), AMDGPU::OpName::vaddr) || (Idx == AMDGPU ::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::saddr ))) && "Should never see frame index on non-address operand" ) ? void (0) : __assert_fail ("(Idx == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::vaddr) || (Idx == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::saddr))) && \"Should never see frame index on non-address operand\"" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 771, __extension__ __PRETTY_FUNCTION__)); |
| 772 | |
| 773 | return getScratchInstrOffset(MI); |
| 774 | } |
| 775 | |
| 776 | bool SIRegisterInfo::needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const { |
| 777 | if (!SIInstrInfo::isMUBUF(*MI) && !SIInstrInfo::isFLATScratch(*MI)) |
| 778 | return false; |
| 779 | |
| 780 | int64_t FullOffset = Offset + getScratchInstrOffset(MI); |
| 781 | |
| 782 | if (SIInstrInfo::isMUBUF(*MI)) |
| 783 | return !SIInstrInfo::isLegalMUBUFImmOffset(FullOffset); |
| 784 | |
| 785 | const SIInstrInfo *TII = ST.getInstrInfo(); |
| 786 | return !TII->isLegalFLATOffset(FullOffset, AMDGPUAS::PRIVATE_ADDRESS, |
| 787 | SIInstrFlags::FlatScratch); |
| 788 | } |
| 789 | |
| 790 | Register SIRegisterInfo::materializeFrameBaseRegister(MachineBasicBlock *MBB, |
| 791 | int FrameIdx, |
| 792 | int64_t Offset) const { |
| 793 | MachineBasicBlock::iterator Ins = MBB->begin(); |
| 794 | DebugLoc DL; // Defaults to "unknown" |
| 795 | |
| 796 | if (Ins != MBB->end()) |
| 797 | DL = Ins->getDebugLoc(); |
| 798 | |
| 799 | MachineFunction *MF = MBB->getParent(); |
| 800 | const SIInstrInfo *TII = ST.getInstrInfo(); |
| 801 | MachineRegisterInfo &MRI = MF->getRegInfo(); |
| 802 | unsigned MovOpc = ST.enableFlatScratch() ? AMDGPU::S_MOV_B32 |
| 803 | : AMDGPU::V_MOV_B32_e32; |
| 804 | |
| 805 | Register BaseReg = MRI.createVirtualRegister( |
| 806 | ST.enableFlatScratch() ? &AMDGPU::SReg_32_XEXEC_HIRegClass |
| 807 | : &AMDGPU::VGPR_32RegClass); |
| 808 | |
| 809 | if (Offset == 0) { |
| 810 | BuildMI(*MBB, Ins, DL, TII->get(MovOpc), BaseReg) |
| 811 | .addFrameIndex(FrameIdx); |
| 812 | return BaseReg; |
| 813 | } |
| 814 | |
| 815 | Register OffsetReg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); |
| 816 | |
| 817 | Register FIReg = MRI.createVirtualRegister( |
| 818 | ST.enableFlatScratch() ? &AMDGPU::SReg_32_XM0RegClass |
| 819 | : &AMDGPU::VGPR_32RegClass); |
| 820 | |
| 821 | BuildMI(*MBB, Ins, DL, TII->get(AMDGPU::S_MOV_B32), OffsetReg) |
| 822 | .addImm(Offset); |
| 823 | BuildMI(*MBB, Ins, DL, TII->get(MovOpc), FIReg) |
| 824 | .addFrameIndex(FrameIdx); |
| 825 | |
| 826 | if (ST.enableFlatScratch() ) { |
| 827 | BuildMI(*MBB, Ins, DL, TII->get(AMDGPU::S_ADD_I32), BaseReg) |
| 828 | .addReg(OffsetReg, RegState::Kill) |
| 829 | .addReg(FIReg); |
| 830 | return BaseReg; |
| 831 | } |
| 832 | |
| 833 | TII->getAddNoCarry(*MBB, Ins, DL, BaseReg) |
| 834 | .addReg(OffsetReg, RegState::Kill) |
| 835 | .addReg(FIReg) |
| 836 | .addImm(0); // clamp bit |
| 837 | |
| 838 | return BaseReg; |
| 839 | } |
| 840 | |
| 841 | void SIRegisterInfo::resolveFrameIndex(MachineInstr &MI, Register BaseReg, |
| 842 | int64_t Offset) const { |
| 843 | const SIInstrInfo *TII = ST.getInstrInfo(); |
| 844 | bool IsFlat = TII->isFLATScratch(MI); |
| 845 | |
| 846 | #ifndef NDEBUG |
| 847 | // FIXME: Is it possible to be storing a frame index to itself? |
| 848 | bool SeenFI = false; |
| 849 | for (const MachineOperand &MO: MI.operands()) { |
| 850 | if (MO.isFI()) { |
| 851 | if (SeenFI) |
| 852 | llvm_unreachable("should not see multiple frame indices")::llvm::llvm_unreachable_internal("should not see multiple frame indices" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 852); |
| 853 | |
| 854 | SeenFI = true; |
| 855 | } |
| 856 | } |
| 857 | #endif |
| 858 | |
| 859 | MachineOperand *FIOp = |
| 860 | TII->getNamedOperand(MI, IsFlat ? AMDGPU::OpName::saddr |
| 861 | : AMDGPU::OpName::vaddr); |
| 862 | |
| 863 | MachineOperand *OffsetOp = TII->getNamedOperand(MI, AMDGPU::OpName::offset); |
| 864 | int64_t NewOffset = OffsetOp->getImm() + Offset; |
| 865 | |
| 866 | assert(FIOp && FIOp->isFI() && "frame index must be address operand")(static_cast <bool> (FIOp && FIOp->isFI() && "frame index must be address operand") ? void (0) : __assert_fail ("FIOp && FIOp->isFI() && \"frame index must be address operand\"" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 866, __extension__ __PRETTY_FUNCTION__)); |
| 867 | assert(TII->isMUBUF(MI) || TII->isFLATScratch(MI))(static_cast <bool> (TII->isMUBUF(MI) || TII->isFLATScratch (MI)) ? void (0) : __assert_fail ("TII->isMUBUF(MI) || TII->isFLATScratch(MI)" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 867, __extension__ __PRETTY_FUNCTION__)); |
| 868 | |
| 869 | if (IsFlat) { |
| 870 | assert(TII->isLegalFLATOffset(NewOffset, AMDGPUAS::PRIVATE_ADDRESS,(static_cast <bool> (TII->isLegalFLATOffset(NewOffset , AMDGPUAS::PRIVATE_ADDRESS, SIInstrFlags::FlatScratch) && "offset should be legal") ? void (0) : __assert_fail ("TII->isLegalFLATOffset(NewOffset, AMDGPUAS::PRIVATE_ADDRESS, SIInstrFlags::FlatScratch) && \"offset should be legal\"" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 872, __extension__ __PRETTY_FUNCTION__)) |
| 871 | SIInstrFlags::FlatScratch) &&(static_cast <bool> (TII->isLegalFLATOffset(NewOffset , AMDGPUAS::PRIVATE_ADDRESS, SIInstrFlags::FlatScratch) && "offset should be legal") ? void (0) : __assert_fail ("TII->isLegalFLATOffset(NewOffset, AMDGPUAS::PRIVATE_ADDRESS, SIInstrFlags::FlatScratch) && \"offset should be legal\"" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 872, __extension__ __PRETTY_FUNCTION__)) |
| 872 | "offset should be legal")(static_cast <bool> (TII->isLegalFLATOffset(NewOffset , AMDGPUAS::PRIVATE_ADDRESS, SIInstrFlags::FlatScratch) && "offset should be legal") ? void (0) : __assert_fail ("TII->isLegalFLATOffset(NewOffset, AMDGPUAS::PRIVATE_ADDRESS, SIInstrFlags::FlatScratch) && \"offset should be legal\"" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 872, __extension__ __PRETTY_FUNCTION__)); |
| 873 | FIOp->ChangeToRegister(BaseReg, false); |
| 874 | OffsetOp->setImm(NewOffset); |
| 875 | return; |
| 876 | } |
| 877 | |
| 878 | #ifndef NDEBUG |
| 879 | MachineOperand *SOffset = TII->getNamedOperand(MI, AMDGPU::OpName::soffset); |
| 880 | assert(SOffset->isImm() && SOffset->getImm() == 0)(static_cast <bool> (SOffset->isImm() && SOffset ->getImm() == 0) ? void (0) : __assert_fail ("SOffset->isImm() && SOffset->getImm() == 0" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 880, __extension__ __PRETTY_FUNCTION__)); |
| 881 | #endif |
| 882 | |
| 883 | assert(SIInstrInfo::isLegalMUBUFImmOffset(NewOffset) &&(static_cast <bool> (SIInstrInfo::isLegalMUBUFImmOffset (NewOffset) && "offset should be legal") ? void (0) : __assert_fail ("SIInstrInfo::isLegalMUBUFImmOffset(NewOffset) && \"offset should be legal\"" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 884, __extension__ __PRETTY_FUNCTION__)) |
| 884 | "offset should be legal")(static_cast <bool> (SIInstrInfo::isLegalMUBUFImmOffset (NewOffset) && "offset should be legal") ? void (0) : __assert_fail ("SIInstrInfo::isLegalMUBUFImmOffset(NewOffset) && \"offset should be legal\"" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 884, __extension__ __PRETTY_FUNCTION__)); |
| 885 | |
| 886 | FIOp->ChangeToRegister(BaseReg, false); |
| 887 | OffsetOp->setImm(NewOffset); |
| 888 | } |
| 889 | |
| 890 | bool SIRegisterInfo::isFrameOffsetLegal(const MachineInstr *MI, |
| 891 | Register BaseReg, |
| 892 | int64_t Offset) const { |
| 893 | if (!SIInstrInfo::isMUBUF(*MI) && !SIInstrInfo::isFLATScratch(*MI)) |
| 894 | return false; |
| 895 | |
| 896 | int64_t NewOffset = Offset + getScratchInstrOffset(MI); |
| 897 | |
| 898 | if (SIInstrInfo::isMUBUF(*MI)) |
| 899 | return SIInstrInfo::isLegalMUBUFImmOffset(NewOffset); |
| 900 | |
| 901 | const SIInstrInfo *TII = ST.getInstrInfo(); |
| 902 | return TII->isLegalFLATOffset(NewOffset, AMDGPUAS::PRIVATE_ADDRESS, |
| 903 | SIInstrFlags::FlatScratch); |
| 904 | } |
| 905 | |
| 906 | const TargetRegisterClass *SIRegisterInfo::getPointerRegClass( |
| 907 | const MachineFunction &MF, unsigned Kind) const { |
| 908 | // This is inaccurate. It depends on the instruction and address space. The |
| 909 | // only place where we should hit this is for dealing with frame indexes / |
| 910 | // private accesses, so this is correct in that case. |
| 911 | return &AMDGPU::VGPR_32RegClass; |
| 912 | } |
| 913 | |
| 914 | const TargetRegisterClass * |
| 915 | SIRegisterInfo::getCrossCopyRegClass(const TargetRegisterClass *RC) const { |
| 916 | if (isAGPRClass(RC) && !ST.hasGFX90AInsts()) |
| 917 | return getEquivalentVGPRClass(RC); |
| 918 | if (RC == &AMDGPU::SCC_CLASSRegClass) |
| 919 | return getWaveMaskRegClass(); |
| 920 | |
| 921 | return RC; |
| 922 | } |
| 923 | |
| 924 | static unsigned getNumSubRegsForSpillOp(unsigned Op) { |
| 925 | |
| 926 | switch (Op) { |
| 927 | case AMDGPU::SI_SPILL_S1024_SAVE: |
| 928 | case AMDGPU::SI_SPILL_S1024_RESTORE: |
| 929 | case AMDGPU::SI_SPILL_V1024_SAVE: |
| 930 | case AMDGPU::SI_SPILL_V1024_RESTORE: |
| 931 | case AMDGPU::SI_SPILL_A1024_SAVE: |
| 932 | case AMDGPU::SI_SPILL_A1024_RESTORE: |
| 933 | case AMDGPU::SI_SPILL_AV1024_SAVE: |
| 934 | case AMDGPU::SI_SPILL_AV1024_RESTORE: |
| 935 | return 32; |
| 936 | case AMDGPU::SI_SPILL_S512_SAVE: |
| 937 | case AMDGPU::SI_SPILL_S512_RESTORE: |
| 938 | case AMDGPU::SI_SPILL_V512_SAVE: |
| 939 | case AMDGPU::SI_SPILL_V512_RESTORE: |
| 940 | case AMDGPU::SI_SPILL_A512_SAVE: |
| 941 | case AMDGPU::SI_SPILL_A512_RESTORE: |
| 942 | case AMDGPU::SI_SPILL_AV512_SAVE: |
| 943 | case AMDGPU::SI_SPILL_AV512_RESTORE: |
| 944 | return 16; |
| 945 | case AMDGPU::SI_SPILL_S384_SAVE: |
| 946 | case AMDGPU::SI_SPILL_S384_RESTORE: |
| 947 | case AMDGPU::SI_SPILL_V384_SAVE: |
| 948 | case AMDGPU::SI_SPILL_V384_RESTORE: |
| 949 | case AMDGPU::SI_SPILL_A384_SAVE: |
| 950 | case AMDGPU::SI_SPILL_A384_RESTORE: |
| 951 | case AMDGPU::SI_SPILL_AV384_SAVE: |
| 952 | case AMDGPU::SI_SPILL_AV384_RESTORE: |
| 953 | return 12; |
| 954 | case AMDGPU::SI_SPILL_S352_SAVE: |
| 955 | case AMDGPU::SI_SPILL_S352_RESTORE: |
| 956 | case AMDGPU::SI_SPILL_V352_SAVE: |
| 957 | case AMDGPU::SI_SPILL_V352_RESTORE: |
| 958 | case AMDGPU::SI_SPILL_A352_SAVE: |
| 959 | case AMDGPU::SI_SPILL_A352_RESTORE: |
| 960 | case AMDGPU::SI_SPILL_AV352_SAVE: |
| 961 | case AMDGPU::SI_SPILL_AV352_RESTORE: |
| 962 | return 11; |
| 963 | case AMDGPU::SI_SPILL_S320_SAVE: |
| 964 | case AMDGPU::SI_SPILL_S320_RESTORE: |
| 965 | case AMDGPU::SI_SPILL_V320_SAVE: |
| 966 | case AMDGPU::SI_SPILL_V320_RESTORE: |
| 967 | case AMDGPU::SI_SPILL_A320_SAVE: |
| 968 | case AMDGPU::SI_SPILL_A320_RESTORE: |
| 969 | case AMDGPU::SI_SPILL_AV320_SAVE: |
| 970 | case AMDGPU::SI_SPILL_AV320_RESTORE: |
| 971 | return 10; |
| 972 | case AMDGPU::SI_SPILL_S288_SAVE: |
| 973 | case AMDGPU::SI_SPILL_S288_RESTORE: |
| 974 | case AMDGPU::SI_SPILL_V288_SAVE: |
| 975 | case AMDGPU::SI_SPILL_V288_RESTORE: |
| 976 | case AMDGPU::SI_SPILL_A288_SAVE: |
| 977 | case AMDGPU::SI_SPILL_A288_RESTORE: |
| 978 | case AMDGPU::SI_SPILL_AV288_SAVE: |
| 979 | case AMDGPU::SI_SPILL_AV288_RESTORE: |
| 980 | return 9; |
| 981 | case AMDGPU::SI_SPILL_S256_SAVE: |
| 982 | case AMDGPU::SI_SPILL_S256_RESTORE: |
| 983 | case AMDGPU::SI_SPILL_V256_SAVE: |
| 984 | case AMDGPU::SI_SPILL_V256_RESTORE: |
| 985 | case AMDGPU::SI_SPILL_A256_SAVE: |
| 986 | case AMDGPU::SI_SPILL_A256_RESTORE: |
| 987 | case AMDGPU::SI_SPILL_AV256_SAVE: |
| 988 | case AMDGPU::SI_SPILL_AV256_RESTORE: |
| 989 | return 8; |
| 990 | case AMDGPU::SI_SPILL_S224_SAVE: |
| 991 | case AMDGPU::SI_SPILL_S224_RESTORE: |
| 992 | case AMDGPU::SI_SPILL_V224_SAVE: |
| 993 | case AMDGPU::SI_SPILL_V224_RESTORE: |
| 994 | case AMDGPU::SI_SPILL_A224_SAVE: |
| 995 | case AMDGPU::SI_SPILL_A224_RESTORE: |
| 996 | case AMDGPU::SI_SPILL_AV224_SAVE: |
| 997 | case AMDGPU::SI_SPILL_AV224_RESTORE: |
| 998 | return 7; |
| 999 | case AMDGPU::SI_SPILL_S192_SAVE: |
| 1000 | case AMDGPU::SI_SPILL_S192_RESTORE: |
| 1001 | case AMDGPU::SI_SPILL_V192_SAVE: |
| 1002 | case AMDGPU::SI_SPILL_V192_RESTORE: |
| 1003 | case AMDGPU::SI_SPILL_A192_SAVE: |
| 1004 | case AMDGPU::SI_SPILL_A192_RESTORE: |
| 1005 | case AMDGPU::SI_SPILL_AV192_SAVE: |
| 1006 | case AMDGPU::SI_SPILL_AV192_RESTORE: |
| 1007 | return 6; |
| 1008 | case AMDGPU::SI_SPILL_S160_SAVE: |
| 1009 | case AMDGPU::SI_SPILL_S160_RESTORE: |
| 1010 | case AMDGPU::SI_SPILL_V160_SAVE: |
| 1011 | case AMDGPU::SI_SPILL_V160_RESTORE: |
| 1012 | case AMDGPU::SI_SPILL_A160_SAVE: |
| 1013 | case AMDGPU::SI_SPILL_A160_RESTORE: |
| 1014 | case AMDGPU::SI_SPILL_AV160_SAVE: |
| 1015 | case AMDGPU::SI_SPILL_AV160_RESTORE: |
| 1016 | return 5; |
| 1017 | case AMDGPU::SI_SPILL_S128_SAVE: |
| 1018 | case AMDGPU::SI_SPILL_S128_RESTORE: |
| 1019 | case AMDGPU::SI_SPILL_V128_SAVE: |
| 1020 | case AMDGPU::SI_SPILL_V128_RESTORE: |
| 1021 | case AMDGPU::SI_SPILL_A128_SAVE: |
| 1022 | case AMDGPU::SI_SPILL_A128_RESTORE: |
| 1023 | case AMDGPU::SI_SPILL_AV128_SAVE: |
| 1024 | case AMDGPU::SI_SPILL_AV128_RESTORE: |
| 1025 | return 4; |
| 1026 | case AMDGPU::SI_SPILL_S96_SAVE: |
| 1027 | case AMDGPU::SI_SPILL_S96_RESTORE: |
| 1028 | case AMDGPU::SI_SPILL_V96_SAVE: |
| 1029 | case AMDGPU::SI_SPILL_V96_RESTORE: |
| 1030 | case AMDGPU::SI_SPILL_A96_SAVE: |
| 1031 | case AMDGPU::SI_SPILL_A96_RESTORE: |
| 1032 | case AMDGPU::SI_SPILL_AV96_SAVE: |
| 1033 | case AMDGPU::SI_SPILL_AV96_RESTORE: |
| 1034 | return 3; |
| 1035 | case AMDGPU::SI_SPILL_S64_SAVE: |
| 1036 | case AMDGPU::SI_SPILL_S64_RESTORE: |
| 1037 | case AMDGPU::SI_SPILL_V64_SAVE: |
| 1038 | case AMDGPU::SI_SPILL_V64_RESTORE: |
| 1039 | case AMDGPU::SI_SPILL_A64_SAVE: |
| 1040 | case AMDGPU::SI_SPILL_A64_RESTORE: |
| 1041 | case AMDGPU::SI_SPILL_AV64_SAVE: |
| 1042 | case AMDGPU::SI_SPILL_AV64_RESTORE: |
| 1043 | return 2; |
| 1044 | case AMDGPU::SI_SPILL_S32_SAVE: |
| 1045 | case AMDGPU::SI_SPILL_S32_RESTORE: |
| 1046 | case AMDGPU::SI_SPILL_V32_SAVE: |
| 1047 | case AMDGPU::SI_SPILL_V32_RESTORE: |
| 1048 | case AMDGPU::SI_SPILL_A32_SAVE: |
| 1049 | case AMDGPU::SI_SPILL_A32_RESTORE: |
| 1050 | case AMDGPU::SI_SPILL_AV32_SAVE: |
| 1051 | case AMDGPU::SI_SPILL_AV32_RESTORE: |
| 1052 | return 1; |
| 1053 | default: llvm_unreachable("Invalid spill opcode")::llvm::llvm_unreachable_internal("Invalid spill opcode", "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp" , 1053); |
| 1054 | } |
| 1055 | } |
| 1056 | |
| 1057 | static int getOffsetMUBUFStore(unsigned Opc) { |
| 1058 | switch (Opc) { |
| 1059 | case AMDGPU::BUFFER_STORE_DWORD_OFFEN: |
| 1060 | return AMDGPU::BUFFER_STORE_DWORD_OFFSET; |
| 1061 | case AMDGPU::BUFFER_STORE_BYTE_OFFEN: |
| 1062 | return AMDGPU::BUFFER_STORE_BYTE_OFFSET; |
| 1063 | case AMDGPU::BUFFER_STORE_SHORT_OFFEN: |
| 1064 | return AMDGPU::BUFFER_STORE_SHORT_OFFSET; |
| 1065 | case AMDGPU::BUFFER_STORE_DWORDX2_OFFEN: |
| 1066 | return AMDGPU::BUFFER_STORE_DWORDX2_OFFSET; |
| 1067 | case AMDGPU::BUFFER_STORE_DWORDX3_OFFEN: |
| 1068 | return AMDGPU::BUFFER_STORE_DWORDX3_OFFSET; |
| 1069 | case AMDGPU::BUFFER_STORE_DWORDX4_OFFEN: |
| 1070 | return AMDGPU::BUFFER_STORE_DWORDX4_OFFSET; |
| 1071 | case AMDGPU::BUFFER_STORE_SHORT_D16_HI_OFFEN: |
| 1072 | return AMDGPU::BUFFER_STORE_SHORT_D16_HI_OFFSET; |
| 1073 | case AMDGPU::BUFFER_STORE_BYTE_D16_HI_OFFEN: |
| 1074 | return AMDGPU::BUFFER_STORE_BYTE_D16_HI_OFFSET; |
| 1075 | default: |
| 1076 | return -1; |
| 1077 | } |
| 1078 | } |
| 1079 | |
| 1080 | static int getOffsetMUBUFLoad(unsigned Opc) { |
| 1081 | switch (Opc) { |
| 1082 | case AMDGPU::BUFFER_LOAD_DWORD_OFFEN: |
| 1083 | return AMDGPU::BUFFER_LOAD_DWORD_OFFSET; |
| 1084 | case AMDGPU::BUFFER_LOAD_UBYTE_OFFEN: |
| 1085 | return AMDGPU::BUFFER_LOAD_UBYTE_OFFSET; |
| 1086 | case AMDGPU::BUFFER_LOAD_SBYTE_OFFEN: |
| 1087 | return AMDGPU::BUFFER_LOAD_SBYTE_OFFSET; |
| 1088 | case AMDGPU::BUFFER_LOAD_USHORT_OFFEN: |
| 1089 | return AMDGPU::BUFFER_LOAD_USHORT_OFFSET; |
| 1090 | case AMDGPU::BUFFER_LOAD_SSHORT_OFFEN: |
| 1091 | return AMDGPU::BUFFER_LOAD_SSHORT_OFFSET; |
| 1092 | case AMDGPU::BUFFER_LOAD_DWORDX2_OFFEN: |
| 1093 | return AMDGPU::BUFFER_LOAD_DWORDX2_OFFSET; |
| 1094 | case AMDGPU::BUFFER_LOAD_DWORDX3_OFFEN: |
| 1095 | return AMDGPU::BUFFER_LOAD_DWORDX3_OFFSET; |
| 1096 | case AMDGPU::BUFFER_LOAD_DWORDX4_OFFEN: |
| 1097 | return AMDGPU::BUFFER_LOAD_DWORDX4_OFFSET; |
| 1098 | case AMDGPU::BUFFER_LOAD_UBYTE_D16_OFFEN: |
| 1099 | return AMDGPU::BUFFER_LOAD_UBYTE_D16_OFFSET; |
| 1100 | case AMDGPU::BUFFER_LOAD_UBYTE_D16_HI_OFFEN: |
| 1101 | return AMDGPU::BUFFER_LOAD_UBYTE_D16_HI_OFFSET; |
| 1102 | case AMDGPU::BUFFER_LOAD_SBYTE_D16_OFFEN: |
| 1103 | return AMDGPU::BUFFER_LOAD_SBYTE_D16_OFFSET; |
| 1104 | case AMDGPU::BUFFER_LOAD_SBYTE_D16_HI_OFFEN: |
| 1105 | return AMDGPU::BUFFER_LOAD_SBYTE_D16_HI_OFFSET; |
| 1106 | case AMDGPU::BUFFER_LOAD_SHORT_D16_OFFEN: |
| 1107 | return AMDGPU::BUFFER_LOAD_SHORT_D16_OFFSET; |
| 1108 | case AMDGPU::BUFFER_LOAD_SHORT_D16_HI_OFFEN: |
| 1109 | return AMDGPU::BUFFER_LOAD_SHORT_D16_HI_OFFSET; |
| 1110 | default: |
| 1111 | return -1; |
| 1112 | } |
| 1113 | } |
| 1114 | |
| 1115 | static int getOffenMUBUFStore(unsigned Opc) { |
| 1116 | switch (Opc) { |
| 1117 | case AMDGPU::BUFFER_STORE_DWORD_OFFSET: |
| 1118 | return AMDGPU::BUFFER_STORE_DWORD_OFFEN; |
| 1119 | case AMDGPU::BUFFER_STORE_BYTE_OFFSET: |
| 1120 | return AMDGPU::BUFFER_STORE_BYTE_OFFEN; |
| 1121 | case AMDGPU::BUFFER_STORE_SHORT_OFFSET: |
| 1122 | return AMDGPU::BUFFER_STORE_SHORT_OFFEN; |
| 1123 | case AMDGPU::BUFFER_STORE_DWORDX2_OFFSET: |
| 1124 | return AMDGPU::BUFFER_STORE_DWORDX2_OFFEN; |
| 1125 | case AMDGPU::BUFFER_STORE_DWORDX3_OFFSET: |
| 1126 | return AMDGPU::BUFFER_STORE_DWORDX3_OFFEN; |
| 1127 | case AMDGPU::BUFFER_STORE_DWORDX4_OFFSET: |
| 1128 | return AMDGPU::BUFFER_STORE_DWORDX4_OFFEN; |
| 1129 | case AMDGPU::BUFFER_STORE_SHORT_D16_HI_OFFSET: |
| 1130 | return AMDGPU::BUFFER_STORE_SHORT_D16_HI_OFFEN; |
| 1131 | case AMDGPU::BUFFER_STORE_BYTE_D16_HI_OFFSET: |
| 1132 | return AMDGPU::BUFFER_STORE_BYTE_D16_HI_OFFEN; |
| 1133 | default: |
| 1134 | return -1; |
| 1135 | } |
| 1136 | } |
| 1137 | |
| 1138 | static int getOffenMUBUFLoad(unsigned Opc) { |
| 1139 | switch (Opc) { |
| 1140 | case AMDGPU::BUFFER_LOAD_DWORD_OFFSET: |
| 1141 | return AMDGPU::BUFFER_LOAD_DWORD_OFFEN; |
| 1142 | case AMDGPU::BUFFER_LOAD_UBYTE_OFFSET: |
| 1143 | return AMDGPU::BUFFER_LOAD_UBYTE_OFFEN; |
| 1144 | case AMDGPU::BUFFER_LOAD_SBYTE_OFFSET: |
| 1145 | return AMDGPU::BUFFER_LOAD_SBYTE_OFFEN; |
| 1146 | case AMDGPU::BUFFER_LOAD_USHORT_OFFSET: |
| 1147 | return AMDGPU::BUFFER_LOAD_USHORT_OFFEN; |
| 1148 | case AMDGPU::BUFFER_LOAD_SSHORT_OFFSET: |
| 1149 | return AMDGPU::BUFFER_LOAD_SSHORT_OFFEN; |
| 1150 | case AMDGPU::BUFFER_LOAD_DWORDX2_OFFSET: |
| 1151 | return AMDGPU::BUFFER_LOAD_DWORDX2_OFFEN; |
| 1152 | case AMDGPU::BUFFER_LOAD_DWORDX3_OFFSET: |
| 1153 | return AMDGPU::BUFFER_LOAD_DWORDX3_OFFEN; |
| 1154 | case AMDGPU::BUFFER_LOAD_DWORDX4_OFFSET: |
| 1155 | return AMDGPU::BUFFER_LOAD_DWORDX4_OFFEN; |
| 1156 | case AMDGPU::BUFFER_LOAD_UBYTE_D16_OFFSET: |
| 1157 | return AMDGPU::BUFFER_LOAD_UBYTE_D16_OFFEN; |
| 1158 | case AMDGPU::BUFFER_LOAD_UBYTE_D16_HI_OFFSET: |
| 1159 | return AMDGPU::BUFFER_LOAD_UBYTE_D16_HI_OFFEN; |
| 1160 | case AMDGPU::BUFFER_LOAD_SBYTE_D16_OFFSET: |
| 1161 | return AMDGPU::BUFFER_LOAD_SBYTE_D16_OFFEN; |
| 1162 | case AMDGPU::BUFFER_LOAD_SBYTE_D16_HI_OFFSET: |
| 1163 | return AMDGPU::BUFFER_LOAD_SBYTE_D16_HI_OFFEN; |
| 1164 | case AMDGPU::BUFFER_LOAD_SHORT_D16_OFFSET: |
| 1165 | return AMDGPU::BUFFER_LOAD_SHORT_D16_OFFEN; |
| 1166 | case AMDGPU::BUFFER_LOAD_SHORT_D16_HI_OFFSET: |
| 1167 | return AMDGPU::BUFFER_LOAD_SHORT_D16_HI_OFFEN; |
| 1168 | default: |
| 1169 | return -1; |
| 1170 | } |
| 1171 | } |
| 1172 | |
| 1173 | static MachineInstrBuilder spillVGPRtoAGPR(const GCNSubtarget &ST, |
| 1174 | MachineBasicBlock &MBB, |
| 1175 | MachineBasicBlock::iterator MI, |
| 1176 | int Index, unsigned Lane, |
| 1177 | unsigned ValueReg, bool IsKill) { |
| 1178 | MachineFunction *MF = MBB.getParent(); |
| 1179 | SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>(); |
| 1180 | const SIInstrInfo *TII = ST.getInstrInfo(); |
| 1181 | |
| 1182 | MCPhysReg Reg = MFI->getVGPRToAGPRSpill(Index, Lane); |
| 1183 | |
| 1184 | if (Reg == AMDGPU::NoRegister) |
| 1185 | return MachineInstrBuilder(); |
| 1186 | |
| 1187 | bool IsStore = MI->mayStore(); |
| 1188 | MachineRegisterInfo &MRI = MF->getRegInfo(); |
| 1189 | auto *TRI = static_cast<const SIRegisterInfo*>(MRI.getTargetRegisterInfo()); |
| 1190 | |
| 1191 | unsigned Dst = IsStore ? Reg : ValueReg; |
| 1192 | unsigned Src = IsStore ? ValueReg : Reg; |
| 1193 | bool IsVGPR = TRI->isVGPR(MRI, Reg); |
| 1194 | DebugLoc DL = MI->getDebugLoc(); |
| 1195 | if (IsVGPR == TRI->isVGPR(MRI, ValueReg)) { |
| 1196 | // Spiller during regalloc may restore a spilled register to its superclass. |
| 1197 | // It could result in AGPR spills restored to VGPRs or the other way around, |
| 1198 | // making the src and dst with identical regclasses at this point. It just |
| 1199 | // needs a copy in such cases. |
| 1200 | auto CopyMIB = BuildMI(MBB, MI, DL, TII->get(AMDGPU::COPY), Dst) |
| 1201 | .addReg(Src, getKillRegState(IsKill)); |
| 1202 | CopyMIB->setAsmPrinterFlag(MachineInstr::ReloadReuse); |
| 1203 | return CopyMIB; |
| 1204 | } |
| 1205 | unsigned Opc = (IsStore ^ IsVGPR) ? AMDGPU::V_ACCVGPR_WRITE_B32_e64 |
| 1206 | : AMDGPU::V_ACCVGPR_READ_B32_e64; |
| 1207 | |
| 1208 | auto MIB = BuildMI(MBB, MI, DL, TII->get(Opc), Dst) |
| 1209 | .addReg(Src, getKillRegState(IsKill)); |
| 1210 | MIB->setAsmPrinterFlag(MachineInstr::ReloadReuse); |
| 1211 | return MIB; |
| 1212 | } |
| 1213 | |
| 1214 | // This differs from buildSpillLoadStore by only scavenging a VGPR. It does not |
| 1215 | // need to handle the case where an SGPR may need to be spilled while spilling. |
| 1216 | static bool buildMUBUFOffsetLoadStore(const GCNSubtarget &ST, |
| 1217 | MachineFrameInfo &MFI, |
| 1218 | MachineBasicBlock::iterator MI, |
| 1219 | int Index, |
| 1220 | int64_t Offset) { |
| 1221 | const SIInstrInfo *TII = ST.getInstrInfo(); |
| 1222 | MachineBasicBlock *MBB = MI->getParent(); |
| 1223 | const DebugLoc &DL = MI->getDebugLoc(); |
| 1224 | bool IsStore = MI->mayStore(); |
| 1225 | |
| 1226 | unsigned Opc = MI->getOpcode(); |
| 1227 | int LoadStoreOp = IsStore ? |
| 1228 | getOffsetMUBUFStore(Opc) : getOffsetMUBUFLoad(Opc); |
| 1229 | if (LoadStoreOp == -1) |
| 1230 | return false; |
| 1231 | |
| 1232 | const MachineOperand *Reg = TII->getNamedOperand(*MI, AMDGPU::OpName::vdata); |
| 1233 | if (spillVGPRtoAGPR(ST, *MBB, MI, Index, 0, Reg->getReg(), false).getInstr()) |
| 1234 | return true; |
| 1235 | |
| 1236 | MachineInstrBuilder NewMI = |
| 1237 | BuildMI(*MBB, MI, DL, TII->get(LoadStoreOp)) |
| 1238 | .add(*Reg) |
| 1239 | .add(*TII->getNamedOperand(*MI, AMDGPU::OpName::srsrc)) |
| 1240 | .add(*TII->getNamedOperand(*MI, AMDGPU::OpName::soffset)) |
| 1241 | .addImm(Offset) |
| 1242 | .addImm(0) // cpol |
| 1243 | .addImm(0) // swz |
| 1244 | .cloneMemRefs(*MI); |
| 1245 | |
| 1246 | const MachineOperand *VDataIn = TII->getNamedOperand(*MI, |
| 1247 | AMDGPU::OpName::vdata_in); |
| 1248 | if (VDataIn) |
| 1249 | NewMI.add(*VDataIn); |
| 1250 | return true; |
| 1251 | } |
| 1252 | |
| 1253 | static unsigned getFlatScratchSpillOpcode(const SIInstrInfo *TII, |
| 1254 | unsigned LoadStoreOp, |
| 1255 | unsigned EltSize) { |
| 1256 | bool IsStore = TII->get(LoadStoreOp).mayStore(); |
| 1257 | bool HasVAddr = AMDGPU::hasNamedOperand(LoadStoreOp, AMDGPU::OpName::vaddr); |
| 1258 | bool UseST = |
| 1259 | !HasVAddr && !AMDGPU::hasNamedOperand(LoadStoreOp, AMDGPU::OpName::saddr); |
| 1260 | |
| 1261 | switch (EltSize) { |
| 1262 | case 4: |
| 1263 | LoadStoreOp = IsStore ? AMDGPU::SCRATCH_STORE_DWORD_SADDR |
| 1264 | : AMDGPU::SCRATCH_LOAD_DWORD_SADDR; |
| 1265 | break; |
| 1266 | case 8: |
| 1267 | LoadStoreOp = IsStore ? AMDGPU::SCRATCH_STORE_DWORDX2_SADDR |
| 1268 | : AMDGPU::SCRATCH_LOAD_DWORDX2_SADDR; |
| 1269 | break; |
| 1270 | case 12: |
| 1271 | LoadStoreOp = IsStore ? AMDGPU::SCRATCH_STORE_DWORDX3_SADDR |
| 1272 | : AMDGPU::SCRATCH_LOAD_DWORDX3_SADDR; |
| 1273 | break; |
| 1274 | case 16: |
| 1275 | LoadStoreOp = IsStore ? AMDGPU::SCRATCH_STORE_DWORDX4_SADDR |
| 1276 | : AMDGPU::SCRATCH_LOAD_DWORDX4_SADDR; |
| 1277 | break; |
| 1278 | default: |
| 1279 | llvm_unreachable("Unexpected spill load/store size!")::llvm::llvm_unreachable_internal("Unexpected spill load/store size!" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 1279); |
| 1280 | } |
| 1281 | |
| 1282 | if (HasVAddr) |
| 1283 | LoadStoreOp = AMDGPU::getFlatScratchInstSVfromSS(LoadStoreOp); |
| 1284 | else if (UseST) |
| 1285 | LoadStoreOp = AMDGPU::getFlatScratchInstSTfromSS(LoadStoreOp); |
| 1286 | |
| 1287 | return LoadStoreOp; |
| 1288 | } |
| 1289 | |
| 1290 | void SIRegisterInfo::buildSpillLoadStore( |
| 1291 | MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, const DebugLoc &DL, |
| 1292 | unsigned LoadStoreOp, int Index, Register ValueReg, bool IsKill, |
| 1293 | MCRegister ScratchOffsetReg, int64_t InstOffset, MachineMemOperand *MMO, |
| 1294 | RegScavenger *RS, LivePhysRegs *LiveRegs) const { |
| 1295 | assert((!RS || !LiveRegs) && "Only RS or LiveRegs can be set but not both")(static_cast <bool> ((!RS || !LiveRegs) && "Only RS or LiveRegs can be set but not both" ) ? void (0) : __assert_fail ("(!RS || !LiveRegs) && \"Only RS or LiveRegs can be set but not both\"" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 1295, __extension__ __PRETTY_FUNCTION__)); |
| 1296 | |
| 1297 | MachineFunction *MF = MBB.getParent(); |
| 1298 | const SIInstrInfo *TII = ST.getInstrInfo(); |
| 1299 | const MachineFrameInfo &MFI = MF->getFrameInfo(); |
| 1300 | const SIMachineFunctionInfo *FuncInfo = MF->getInfo<SIMachineFunctionInfo>(); |
| 1301 | |
| 1302 | const MCInstrDesc *Desc = &TII->get(LoadStoreOp); |
| 1303 | bool IsStore = Desc->mayStore(); |
| 1304 | bool IsFlat = TII->isFLATScratch(LoadStoreOp); |
| 1305 | |
| 1306 | bool CanClobberSCC = false; |
| 1307 | bool Scavenged = false; |
| 1308 | MCRegister SOffset = ScratchOffsetReg; |
| 1309 | |
| 1310 | const TargetRegisterClass *RC = getRegClassForReg(MF->getRegInfo(), ValueReg); |
| 1311 | // On gfx90a+ AGPR is a regular VGPR acceptable for loads and stores. |
| 1312 | const bool IsAGPR = !ST.hasGFX90AInsts() && isAGPRClass(RC); |
| 1313 | const unsigned RegWidth = AMDGPU::getRegBitWidth(RC->getID()) / 8; |
| 1314 | |
| 1315 | // Always use 4 byte operations for AGPRs because we need to scavenge |
| 1316 | // a temporary VGPR. |
| 1317 | unsigned EltSize = (IsFlat && !IsAGPR) ? std::min(RegWidth, 16u) : 4u; |
| 1318 | unsigned NumSubRegs = RegWidth / EltSize; |
| 1319 | unsigned Size = NumSubRegs * EltSize; |
| 1320 | unsigned RemSize = RegWidth - Size; |
| 1321 | unsigned NumRemSubRegs = RemSize ? 1 : 0; |
| 1322 | int64_t Offset = InstOffset + MFI.getObjectOffset(Index); |
| 1323 | int64_t MaterializedOffset = Offset; |
| 1324 | |
| 1325 | int64_t MaxOffset = Offset + Size + RemSize - EltSize; |
| 1326 | int64_t ScratchOffsetRegDelta = 0; |
| 1327 | |
| 1328 | if (IsFlat && EltSize > 4) { |
| 1329 | LoadStoreOp = getFlatScratchSpillOpcode(TII, LoadStoreOp, EltSize); |
| 1330 | Desc = &TII->get(LoadStoreOp); |
Value stored to 'Desc' is never read | |
| 1331 | } |
| 1332 | |
| 1333 | Align Alignment = MFI.getObjectAlign(Index); |
| 1334 | const MachinePointerInfo &BasePtrInfo = MMO->getPointerInfo(); |
| 1335 | |
| 1336 | assert((IsFlat || ((Offset % EltSize) == 0)) &&(static_cast <bool> ((IsFlat || ((Offset % EltSize) == 0 )) && "unexpected VGPR spill offset") ? void (0) : __assert_fail ("(IsFlat || ((Offset % EltSize) == 0)) && \"unexpected VGPR spill offset\"" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 1337, __extension__ __PRETTY_FUNCTION__)) |
| 1337 | "unexpected VGPR spill offset")(static_cast <bool> ((IsFlat || ((Offset % EltSize) == 0 )) && "unexpected VGPR spill offset") ? void (0) : __assert_fail ("(IsFlat || ((Offset % EltSize) == 0)) && \"unexpected VGPR spill offset\"" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 1337, __extension__ __PRETTY_FUNCTION__)); |
| 1338 | |
| 1339 | // Track a VGPR to use for a constant offset we need to materialize. |
| 1340 | Register TmpOffsetVGPR; |
| 1341 | |
| 1342 | // Track a VGPR to use as an intermediate value. |
| 1343 | Register TmpIntermediateVGPR; |
| 1344 | bool UseVGPROffset = false; |
| 1345 | |
| 1346 | // Materialize a VGPR offset required for the given SGPR/VGPR/Immediate |
| 1347 | // combination. |
| 1348 | auto MaterializeVOffset = [&](Register SGPRBase, Register TmpVGPR, |
| 1349 | int64_t VOffset) { |
| 1350 | // We are using a VGPR offset |
| 1351 | if (IsFlat && SGPRBase) { |
| 1352 | // We only have 1 VGPR offset, or 1 SGPR offset. We don't have a free |
| 1353 | // SGPR, so perform the add as vector. |
| 1354 | // We don't need a base SGPR in the kernel. |
| 1355 | |
| 1356 | if (ST.getConstantBusLimit(AMDGPU::V_ADD_U32_e64) >= 2) { |
| 1357 | BuildMI(MBB, MI, DL, TII->get(AMDGPU::V_ADD_U32_e64), TmpVGPR) |
| 1358 | .addReg(SGPRBase) |
| 1359 | .addImm(VOffset) |
| 1360 | .addImm(0); // clamp |
| 1361 | } else { |
| 1362 | BuildMI(MBB, MI, DL, TII->get(AMDGPU::V_MOV_B32_e32), TmpVGPR) |
| 1363 | .addReg(SGPRBase); |
| 1364 | BuildMI(MBB, MI, DL, TII->get(AMDGPU::V_ADD_U32_e32), TmpVGPR) |
| 1365 | .addImm(VOffset) |
| 1366 | .addReg(TmpOffsetVGPR); |
| 1367 | } |
| 1368 | } else { |
| 1369 | assert(TmpOffsetVGPR)(static_cast <bool> (TmpOffsetVGPR) ? void (0) : __assert_fail ("TmpOffsetVGPR", "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp" , 1369, __extension__ __PRETTY_FUNCTION__)); |
| 1370 | BuildMI(MBB, MI, DL, TII->get(AMDGPU::V_MOV_B32_e32), TmpVGPR) |
| 1371 | .addImm(VOffset); |
| 1372 | } |
| 1373 | }; |
| 1374 | |
| 1375 | bool IsOffsetLegal = |
| 1376 | IsFlat ? TII->isLegalFLATOffset(MaxOffset, AMDGPUAS::PRIVATE_ADDRESS, |
| 1377 | SIInstrFlags::FlatScratch) |
| 1378 | : SIInstrInfo::isLegalMUBUFImmOffset(MaxOffset); |
| 1379 | if (!IsOffsetLegal || (IsFlat && !SOffset && !ST.hasFlatScratchSTMode())) { |
| 1380 | SOffset = MCRegister(); |
| 1381 | |
| 1382 | // We don't have access to the register scavenger if this function is called |
| 1383 | // during PEI::scavengeFrameVirtualRegs() so use LiveRegs in this case. |
| 1384 | // TODO: Clobbering SCC is not necessary for scratch instructions in the |
| 1385 | // entry. |
| 1386 | if (RS) { |
| 1387 | SOffset = RS->scavengeRegisterBackwards(AMDGPU::SGPR_32RegClass, MI, false, 0, false); |
| 1388 | |
| 1389 | // Piggy back on the liveness scan we just did see if SCC is dead. |
| 1390 | CanClobberSCC = !RS->isRegUsed(AMDGPU::SCC); |
| 1391 | } else if (LiveRegs) { |
| 1392 | CanClobberSCC = !LiveRegs->contains(AMDGPU::SCC); |
| 1393 | for (MCRegister Reg : AMDGPU::SGPR_32RegClass) { |
| 1394 | if (LiveRegs->available(MF->getRegInfo(), Reg)) { |
| 1395 | SOffset = Reg; |
| 1396 | break; |
| 1397 | } |
| 1398 | } |
| 1399 | } |
| 1400 | |
| 1401 | if (ScratchOffsetReg != AMDGPU::NoRegister && !CanClobberSCC) |
| 1402 | SOffset = Register(); |
| 1403 | |
| 1404 | if (!SOffset) { |
| 1405 | UseVGPROffset = true; |
| 1406 | |
| 1407 | if (RS) { |
| 1408 | TmpOffsetVGPR = RS->scavengeRegisterBackwards(AMDGPU::VGPR_32RegClass, MI, false, 0); |
| 1409 | } else { |
| 1410 | assert(LiveRegs)(static_cast <bool> (LiveRegs) ? void (0) : __assert_fail ("LiveRegs", "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 1410 , __extension__ __PRETTY_FUNCTION__)); |
| 1411 | for (MCRegister Reg : AMDGPU::VGPR_32RegClass) { |
| 1412 | if (LiveRegs->available(MF->getRegInfo(), Reg)) { |
| 1413 | TmpOffsetVGPR = Reg; |
| 1414 | break; |
| 1415 | } |
| 1416 | } |
| 1417 | } |
| 1418 | |
| 1419 | assert(TmpOffsetVGPR)(static_cast <bool> (TmpOffsetVGPR) ? void (0) : __assert_fail ("TmpOffsetVGPR", "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp" , 1419, __extension__ __PRETTY_FUNCTION__)); |
| 1420 | } else if (!SOffset && CanClobberSCC) { |
| 1421 | // There are no free SGPRs, and since we are in the process of spilling |
| 1422 | // VGPRs too. Since we need a VGPR in order to spill SGPRs (this is true |
| 1423 | // on SI/CI and on VI it is true until we implement spilling using scalar |
| 1424 | // stores), we have no way to free up an SGPR. Our solution here is to |
| 1425 | // add the offset directly to the ScratchOffset or StackPtrOffset |
| 1426 | // register, and then subtract the offset after the spill to return the |
| 1427 | // register to it's original value. |
| 1428 | |
| 1429 | // TODO: If we don't have to do an emergency stack slot spill, converting |
| 1430 | // to use the VGPR offset is fewer instructions. |
| 1431 | if (!ScratchOffsetReg) |
| 1432 | ScratchOffsetReg = FuncInfo->getStackPtrOffsetReg(); |
| 1433 | SOffset = ScratchOffsetReg; |
| 1434 | ScratchOffsetRegDelta = Offset; |
| 1435 | } else { |
| 1436 | Scavenged = true; |
| 1437 | } |
| 1438 | |
| 1439 | // We currently only support spilling VGPRs to EltSize boundaries, meaning |
| 1440 | // we can simplify the adjustment of Offset here to just scale with |
| 1441 | // WavefrontSize. |
| 1442 | if (!IsFlat && !UseVGPROffset) |
| 1443 | Offset *= ST.getWavefrontSize(); |
| 1444 | |
| 1445 | if (!UseVGPROffset && !SOffset) |
| 1446 | report_fatal_error("could not scavenge SGPR to spill in entry function"); |
| 1447 | |
| 1448 | if (UseVGPROffset) { |
| 1449 | // We are using a VGPR offset |
| 1450 | MaterializeVOffset(ScratchOffsetReg, TmpOffsetVGPR, Offset); |
| 1451 | } else if (ScratchOffsetReg == AMDGPU::NoRegister) { |
| 1452 | BuildMI(MBB, MI, DL, TII->get(AMDGPU::S_MOV_B32), SOffset).addImm(Offset); |
| 1453 | } else { |
| 1454 | assert(Offset != 0)(static_cast <bool> (Offset != 0) ? void (0) : __assert_fail ("Offset != 0", "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 1454, __extension__ __PRETTY_FUNCTION__)); |
| 1455 | auto Add = BuildMI(MBB, MI, DL, TII->get(AMDGPU::S_ADD_I32), SOffset) |
| 1456 | .addReg(ScratchOffsetReg) |
| 1457 | .addImm(Offset); |
| 1458 | Add->getOperand(3).setIsDead(); // Mark SCC as dead. |
| 1459 | } |
| 1460 | |
| 1461 | Offset = 0; |
| 1462 | } |
| 1463 | |
| 1464 | if (IsFlat && SOffset == AMDGPU::NoRegister) { |
| 1465 | assert(AMDGPU::getNamedOperandIdx(LoadStoreOp, AMDGPU::OpName::vaddr) < 0(static_cast <bool> (AMDGPU::getNamedOperandIdx(LoadStoreOp , AMDGPU::OpName::vaddr) < 0 && "Unexpected vaddr for flat scratch with a FI operand" ) ? void (0) : __assert_fail ("AMDGPU::getNamedOperandIdx(LoadStoreOp, AMDGPU::OpName::vaddr) < 0 && \"Unexpected vaddr for flat scratch with a FI operand\"" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 1466, __extension__ __PRETTY_FUNCTION__)) |
| 1466 | && "Unexpected vaddr for flat scratch with a FI operand")(static_cast <bool> (AMDGPU::getNamedOperandIdx(LoadStoreOp , AMDGPU::OpName::vaddr) < 0 && "Unexpected vaddr for flat scratch with a FI operand" ) ? void (0) : __assert_fail ("AMDGPU::getNamedOperandIdx(LoadStoreOp, AMDGPU::OpName::vaddr) < 0 && \"Unexpected vaddr for flat scratch with a FI operand\"" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 1466, __extension__ __PRETTY_FUNCTION__)); |
| 1467 | |
| 1468 | if (UseVGPROffset) { |
| 1469 | LoadStoreOp = AMDGPU::getFlatScratchInstSVfromSS(LoadStoreOp); |
| 1470 | } else { |
| 1471 | assert(ST.hasFlatScratchSTMode())(static_cast <bool> (ST.hasFlatScratchSTMode()) ? void ( 0) : __assert_fail ("ST.hasFlatScratchSTMode()", "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp" , 1471, __extension__ __PRETTY_FUNCTION__)); |
| 1472 | LoadStoreOp = AMDGPU::getFlatScratchInstSTfromSS(LoadStoreOp); |
| 1473 | } |
| 1474 | |
| 1475 | Desc = &TII->get(LoadStoreOp); |
| 1476 | } |
| 1477 | |
| 1478 | for (unsigned i = 0, e = NumSubRegs + NumRemSubRegs, RegOffset = 0; i != e; |
| 1479 | ++i, RegOffset += EltSize) { |
| 1480 | if (i == NumSubRegs) { |
| 1481 | EltSize = RemSize; |
| 1482 | LoadStoreOp = getFlatScratchSpillOpcode(TII, LoadStoreOp, EltSize); |
| 1483 | } |
| 1484 | Desc = &TII->get(LoadStoreOp); |
| 1485 | |
| 1486 | if (!IsFlat && UseVGPROffset) { |
| 1487 | int NewLoadStoreOp = IsStore ? getOffenMUBUFStore(LoadStoreOp) |
| 1488 | : getOffenMUBUFLoad(LoadStoreOp); |
| 1489 | Desc = &TII->get(NewLoadStoreOp); |
| 1490 | } |
| 1491 | |
| 1492 | if (UseVGPROffset && TmpOffsetVGPR == TmpIntermediateVGPR) { |
| 1493 | // If we are spilling an AGPR beyond the range of the memory instruction |
| 1494 | // offset and need to use a VGPR offset, we ideally have at least 2 |
| 1495 | // scratch VGPRs. If we don't have a second free VGPR without spilling, |
| 1496 | // recycle the VGPR used for the offset which requires resetting after |
| 1497 | // each subregister. |
| 1498 | |
| 1499 | MaterializeVOffset(ScratchOffsetReg, TmpOffsetVGPR, MaterializedOffset); |
| 1500 | } |
| 1501 | |
| 1502 | unsigned NumRegs = EltSize / 4; |
| 1503 | Register SubReg = e == 1 |
| 1504 | ? ValueReg |
| 1505 | : Register(getSubReg(ValueReg, |
| 1506 | getSubRegFromChannel(RegOffset / 4, NumRegs))); |
| 1507 | |
| 1508 | unsigned SOffsetRegState = 0; |
| 1509 | unsigned SrcDstRegState = getDefRegState(!IsStore); |
| 1510 | const bool IsLastSubReg = i + 1 == e; |
| 1511 | const bool IsFirstSubReg = i == 0; |
| 1512 | if (IsLastSubReg) { |
| 1513 | SOffsetRegState |= getKillRegState(Scavenged); |
| 1514 | // The last implicit use carries the "Kill" flag. |
| 1515 | SrcDstRegState |= getKillRegState(IsKill); |
| 1516 | } |
| 1517 | |
| 1518 | // Make sure the whole register is defined if there are undef components by |
| 1519 | // adding an implicit def of the super-reg on the first instruction. |
| 1520 | bool NeedSuperRegDef = e > 1 && IsStore && IsFirstSubReg; |
| 1521 | bool NeedSuperRegImpOperand = e > 1; |
| 1522 | |
| 1523 | // Remaining element size to spill into memory after some parts of it |
| 1524 | // spilled into either AGPRs or VGPRs. |
| 1525 | unsigned RemEltSize = EltSize; |
| 1526 | |
| 1527 | // AGPRs to spill VGPRs and vice versa are allocated in a reverse order, |
| 1528 | // starting from the last lane. In case if a register cannot be completely |
| 1529 | // spilled into another register that will ensure its alignment does not |
| 1530 | // change. For targets with VGPR alignment requirement this is important |
| 1531 | // in case of flat scratch usage as we might get a scratch_load or |
| 1532 | // scratch_store of an unaligned register otherwise. |
| 1533 | for (int LaneS = (RegOffset + EltSize) / 4 - 1, Lane = LaneS, |
| 1534 | LaneE = RegOffset / 4; |
| 1535 | Lane >= LaneE; --Lane) { |
| 1536 | bool IsSubReg = e > 1 || EltSize > 4; |
| 1537 | Register Sub = IsSubReg |
| 1538 | ? Register(getSubReg(ValueReg, getSubRegFromChannel(Lane))) |
| 1539 | : ValueReg; |
| 1540 | auto MIB = spillVGPRtoAGPR(ST, MBB, MI, Index, Lane, Sub, IsKill); |
| 1541 | if (!MIB.getInstr()) |
| 1542 | break; |
| 1543 | if (NeedSuperRegDef || (IsSubReg && IsStore && Lane == LaneS && IsFirstSubReg)) { |
| 1544 | MIB.addReg(ValueReg, RegState::ImplicitDefine); |
| 1545 | NeedSuperRegDef = false; |
| 1546 | } |
| 1547 | if ((IsSubReg || NeedSuperRegImpOperand) && (IsFirstSubReg || IsLastSubReg)) { |
| 1548 | NeedSuperRegImpOperand = true; |
| 1549 | unsigned State = SrcDstRegState; |
| 1550 | if (!IsLastSubReg || (Lane != LaneE)) |
| 1551 | State &= ~RegState::Kill; |
| 1552 | if (!IsFirstSubReg || (Lane != LaneS)) |
| 1553 | State &= ~RegState::Define; |
| 1554 | MIB.addReg(ValueReg, RegState::Implicit | State); |
| 1555 | } |
| 1556 | RemEltSize -= 4; |
| 1557 | } |
| 1558 | |
| 1559 | if (!RemEltSize) // Fully spilled into AGPRs. |
| 1560 | continue; |
| 1561 | |
| 1562 | if (RemEltSize != EltSize) { // Partially spilled to AGPRs |
| 1563 | assert(IsFlat && EltSize > 4)(static_cast <bool> (IsFlat && EltSize > 4) ? void (0) : __assert_fail ("IsFlat && EltSize > 4" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 1563, __extension__ __PRETTY_FUNCTION__)); |
| 1564 | |
| 1565 | unsigned NumRegs = RemEltSize / 4; |
| 1566 | SubReg = Register(getSubReg(ValueReg, |
| 1567 | getSubRegFromChannel(RegOffset / 4, NumRegs))); |
| 1568 | unsigned Opc = getFlatScratchSpillOpcode(TII, LoadStoreOp, RemEltSize); |
| 1569 | Desc = &TII->get(Opc); |
| 1570 | } |
| 1571 | |
| 1572 | unsigned FinalReg = SubReg; |
| 1573 | |
| 1574 | if (IsAGPR) { |
| 1575 | assert(EltSize == 4)(static_cast <bool> (EltSize == 4) ? void (0) : __assert_fail ("EltSize == 4", "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp" , 1575, __extension__ __PRETTY_FUNCTION__)); |
| 1576 | |
| 1577 | if (!TmpIntermediateVGPR) { |
| 1578 | TmpIntermediateVGPR = FuncInfo->getVGPRForAGPRCopy(); |
| 1579 | assert(MF->getRegInfo().isReserved(TmpIntermediateVGPR))(static_cast <bool> (MF->getRegInfo().isReserved(TmpIntermediateVGPR )) ? void (0) : __assert_fail ("MF->getRegInfo().isReserved(TmpIntermediateVGPR)" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 1579, __extension__ __PRETTY_FUNCTION__)); |
| 1580 | } |
| 1581 | if (IsStore) { |
| 1582 | auto AccRead = BuildMI(MBB, MI, DL, |
| 1583 | TII->get(AMDGPU::V_ACCVGPR_READ_B32_e64), |
| 1584 | TmpIntermediateVGPR) |
| 1585 | .addReg(SubReg, getKillRegState(IsKill)); |
| 1586 | if (NeedSuperRegDef) |
| 1587 | AccRead.addReg(ValueReg, RegState::ImplicitDefine); |
| 1588 | AccRead->setAsmPrinterFlag(MachineInstr::ReloadReuse); |
| 1589 | } |
| 1590 | SubReg = TmpIntermediateVGPR; |
| 1591 | } else if (UseVGPROffset) { |
| 1592 | // FIXME: change to scavengeRegisterBackwards() |
| 1593 | if (!TmpOffsetVGPR) { |
| 1594 | TmpOffsetVGPR = RS->scavengeRegister(&AMDGPU::VGPR_32RegClass, MI, 0); |
| 1595 | RS->setRegUsed(TmpOffsetVGPR); |
| 1596 | } |
| 1597 | } |
| 1598 | |
| 1599 | MachinePointerInfo PInfo = BasePtrInfo.getWithOffset(RegOffset); |
| 1600 | MachineMemOperand *NewMMO = |
| 1601 | MF->getMachineMemOperand(PInfo, MMO->getFlags(), RemEltSize, |
| 1602 | commonAlignment(Alignment, RegOffset)); |
| 1603 | |
| 1604 | auto MIB = |
| 1605 | BuildMI(MBB, MI, DL, *Desc) |
| 1606 | .addReg(SubReg, getDefRegState(!IsStore) | getKillRegState(IsKill)); |
| 1607 | |
| 1608 | if (UseVGPROffset) { |
| 1609 | // For an AGPR spill, we reuse the same temp VGPR for the offset and the |
| 1610 | // intermediate accvgpr_write. |
| 1611 | MIB.addReg(TmpOffsetVGPR, getKillRegState(IsLastSubReg && !IsAGPR)); |
| 1612 | } |
| 1613 | |
| 1614 | if (!IsFlat) |
| 1615 | MIB.addReg(FuncInfo->getScratchRSrcReg()); |
| 1616 | |
| 1617 | if (SOffset == AMDGPU::NoRegister) { |
| 1618 | if (!IsFlat) { |
| 1619 | if (UseVGPROffset && ScratchOffsetReg) { |
| 1620 | MIB.addReg(ScratchOffsetReg); |
| 1621 | } else { |
| 1622 | assert(FuncInfo->isEntryFunction())(static_cast <bool> (FuncInfo->isEntryFunction()) ? void (0) : __assert_fail ("FuncInfo->isEntryFunction()", "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp" , 1622, __extension__ __PRETTY_FUNCTION__)); |
| 1623 | MIB.addImm(0); |
| 1624 | } |
| 1625 | } |
| 1626 | } else { |
| 1627 | MIB.addReg(SOffset, SOffsetRegState); |
| 1628 | } |
| 1629 | MIB.addImm(Offset + RegOffset) |
| 1630 | .addImm(0); // cpol |
| 1631 | if (!IsFlat) |
| 1632 | MIB.addImm(0); // swz |
| 1633 | MIB.addMemOperand(NewMMO); |
| 1634 | |
| 1635 | if (!IsAGPR && NeedSuperRegDef) |
| 1636 | MIB.addReg(ValueReg, RegState::ImplicitDefine); |
| 1637 | |
| 1638 | if (!IsStore && IsAGPR && TmpIntermediateVGPR != AMDGPU::NoRegister) { |
| 1639 | MIB = BuildMI(MBB, MI, DL, TII->get(AMDGPU::V_ACCVGPR_WRITE_B32_e64), |
| 1640 | FinalReg) |
| 1641 | .addReg(TmpIntermediateVGPR, RegState::Kill); |
| 1642 | MIB->setAsmPrinterFlag(MachineInstr::ReloadReuse); |
| 1643 | } |
| 1644 | |
| 1645 | if (NeedSuperRegImpOperand && (IsFirstSubReg || IsLastSubReg)) |
| 1646 | MIB.addReg(ValueReg, RegState::Implicit | SrcDstRegState); |
| 1647 | } |
| 1648 | |
| 1649 | if (ScratchOffsetRegDelta != 0) { |
| 1650 | // Subtract the offset we added to the ScratchOffset register. |
| 1651 | BuildMI(MBB, MI, DL, TII->get(AMDGPU::S_ADD_I32), SOffset) |
| 1652 | .addReg(SOffset) |
| 1653 | .addImm(-ScratchOffsetRegDelta); |
| 1654 | } |
| 1655 | } |
| 1656 | |
| 1657 | void SIRegisterInfo::buildVGPRSpillLoadStore(SGPRSpillBuilder &SB, int Index, |
| 1658 | int Offset, bool IsLoad, |
| 1659 | bool IsKill) const { |
| 1660 | // Load/store VGPR |
| 1661 | MachineFrameInfo &FrameInfo = SB.MF.getFrameInfo(); |
| 1662 | assert(FrameInfo.getStackID(Index) != TargetStackID::SGPRSpill)(static_cast <bool> (FrameInfo.getStackID(Index) != TargetStackID ::SGPRSpill) ? void (0) : __assert_fail ("FrameInfo.getStackID(Index) != TargetStackID::SGPRSpill" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 1662, __extension__ __PRETTY_FUNCTION__)); |
| 1663 | |
| 1664 | Register FrameReg = |
| 1665 | FrameInfo.isFixedObjectIndex(Index) && hasBasePointer(SB.MF) |
| 1666 | ? getBaseRegister() |
| 1667 | : getFrameRegister(SB.MF); |
| 1668 | |
| 1669 | Align Alignment = FrameInfo.getObjectAlign(Index); |
| 1670 | MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(SB.MF, Index); |
| 1671 | MachineMemOperand *MMO = SB.MF.getMachineMemOperand( |
| 1672 | PtrInfo, IsLoad ? MachineMemOperand::MOLoad : MachineMemOperand::MOStore, |
| 1673 | SB.EltSize, Alignment); |
| 1674 | |
| 1675 | if (IsLoad) { |
| 1676 | unsigned Opc = ST.enableFlatScratch() ? AMDGPU::SCRATCH_LOAD_DWORD_SADDR |
| 1677 | : AMDGPU::BUFFER_LOAD_DWORD_OFFSET; |
| 1678 | buildSpillLoadStore(*SB.MBB, SB.MI, SB.DL, Opc, Index, SB.TmpVGPR, false, |
| 1679 | FrameReg, Offset * SB.EltSize, MMO, SB.RS); |
| 1680 | } else { |
| 1681 | unsigned Opc = ST.enableFlatScratch() ? AMDGPU::SCRATCH_STORE_DWORD_SADDR |
| 1682 | : AMDGPU::BUFFER_STORE_DWORD_OFFSET; |
| 1683 | buildSpillLoadStore(*SB.MBB, SB.MI, SB.DL, Opc, Index, SB.TmpVGPR, IsKill, |
| 1684 | FrameReg, Offset * SB.EltSize, MMO, SB.RS); |
| 1685 | // This only ever adds one VGPR spill |
| 1686 | SB.MFI.addToSpilledVGPRs(1); |
| 1687 | } |
| 1688 | } |
| 1689 | |
| 1690 | bool SIRegisterInfo::spillSGPR(MachineBasicBlock::iterator MI, int Index, |
| 1691 | RegScavenger *RS, SlotIndexes *Indexes, |
| 1692 | LiveIntervals *LIS, bool OnlyToVGPR) const { |
| 1693 | SGPRSpillBuilder SB(*this, *ST.getInstrInfo(), isWave32, MI, Index, RS); |
| 1694 | |
| 1695 | ArrayRef<SpilledReg> VGPRSpills = SB.MFI.getSGPRSpillToVGPRLanes(Index); |
| 1696 | bool SpillToVGPR = !VGPRSpills.empty(); |
| 1697 | if (OnlyToVGPR && !SpillToVGPR) |
| 1698 | return false; |
| 1699 | |
| 1700 | assert(SpillToVGPR || (SB.SuperReg != SB.MFI.getStackPtrOffsetReg() &&(static_cast <bool> (SpillToVGPR || (SB.SuperReg != SB. MFI.getStackPtrOffsetReg() && SB.SuperReg != SB.MFI.getFrameOffsetReg ())) ? void (0) : __assert_fail ("SpillToVGPR || (SB.SuperReg != SB.MFI.getStackPtrOffsetReg() && SB.SuperReg != SB.MFI.getFrameOffsetReg())" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 1701, __extension__ __PRETTY_FUNCTION__)) |
| 1701 | SB.SuperReg != SB.MFI.getFrameOffsetReg()))(static_cast <bool> (SpillToVGPR || (SB.SuperReg != SB. MFI.getStackPtrOffsetReg() && SB.SuperReg != SB.MFI.getFrameOffsetReg ())) ? void (0) : __assert_fail ("SpillToVGPR || (SB.SuperReg != SB.MFI.getStackPtrOffsetReg() && SB.SuperReg != SB.MFI.getFrameOffsetReg())" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 1701, __extension__ __PRETTY_FUNCTION__)); |
| 1702 | |
| 1703 | if (SpillToVGPR) { |
| 1704 | |
| 1705 | assert(SB.NumSubRegs == VGPRSpills.size() &&(static_cast <bool> (SB.NumSubRegs == VGPRSpills.size() && "Num of VGPR lanes should be equal to num of SGPRs spilled" ) ? void (0) : __assert_fail ("SB.NumSubRegs == VGPRSpills.size() && \"Num of VGPR lanes should be equal to num of SGPRs spilled\"" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 1706, __extension__ __PRETTY_FUNCTION__)) |
| 1706 | "Num of VGPR lanes should be equal to num of SGPRs spilled")(static_cast <bool> (SB.NumSubRegs == VGPRSpills.size() && "Num of VGPR lanes should be equal to num of SGPRs spilled" ) ? void (0) : __assert_fail ("SB.NumSubRegs == VGPRSpills.size() && \"Num of VGPR lanes should be equal to num of SGPRs spilled\"" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 1706, __extension__ __PRETTY_FUNCTION__)); |
| 1707 | |
| 1708 | for (unsigned i = 0, e = SB.NumSubRegs; i < e; ++i) { |
| 1709 | Register SubReg = |
| 1710 | SB.NumSubRegs == 1 |
| 1711 | ? SB.SuperReg |
| 1712 | : Register(getSubReg(SB.SuperReg, SB.SplitParts[i])); |
| 1713 | SpilledReg Spill = VGPRSpills[i]; |
| 1714 | |
| 1715 | bool IsFirstSubreg = i == 0; |
| 1716 | bool IsLastSubreg = i == SB.NumSubRegs - 1; |
| 1717 | bool UseKill = SB.IsKill && IsLastSubreg; |
| 1718 | |
| 1719 | |
| 1720 | // Mark the "old value of vgpr" input undef only if this is the first sgpr |
| 1721 | // spill to this specific vgpr in the first basic block. |
| 1722 | auto MIB = BuildMI(*SB.MBB, MI, SB.DL, |
| 1723 | SB.TII.get(AMDGPU::V_WRITELANE_B32), Spill.VGPR) |
| 1724 | .addReg(SubReg, getKillRegState(UseKill)) |
| 1725 | .addImm(Spill.Lane) |
| 1726 | .addReg(Spill.VGPR); |
| 1727 | if (Indexes) { |
| 1728 | if (IsFirstSubreg) |
| 1729 | Indexes->replaceMachineInstrInMaps(*MI, *MIB); |
| 1730 | else |
| 1731 | Indexes->insertMachineInstrInMaps(*MIB); |
| 1732 | } |
| 1733 | |
| 1734 | if (IsFirstSubreg && SB.NumSubRegs > 1) { |
| 1735 | // We may be spilling a super-register which is only partially defined, |
| 1736 | // and need to ensure later spills think the value is defined. |
| 1737 | MIB.addReg(SB.SuperReg, RegState::ImplicitDefine); |
| 1738 | } |
| 1739 | |
| 1740 | if (SB.NumSubRegs > 1 && (IsFirstSubreg || IsLastSubreg)) |
| 1741 | MIB.addReg(SB.SuperReg, getKillRegState(UseKill) | RegState::Implicit); |
| 1742 | |
| 1743 | // FIXME: Since this spills to another register instead of an actual |
| 1744 | // frame index, we should delete the frame index when all references to |
| 1745 | // it are fixed. |
| 1746 | } |
| 1747 | } else { |
| 1748 | SB.prepare(); |
| 1749 | |
| 1750 | // SubReg carries the "Kill" flag when SubReg == SB.SuperReg. |
| 1751 | unsigned SubKillState = getKillRegState((SB.NumSubRegs == 1) && SB.IsKill); |
| 1752 | |
| 1753 | // Per VGPR helper data |
| 1754 | auto PVD = SB.getPerVGPRData(); |
| 1755 | |
| 1756 | for (unsigned Offset = 0; Offset < PVD.NumVGPRs; ++Offset) { |
| 1757 | unsigned TmpVGPRFlags = RegState::Undef; |
| 1758 | |
| 1759 | // Write sub registers into the VGPR |
| 1760 | for (unsigned i = Offset * PVD.PerVGPR, |
| 1761 | e = std::min((Offset + 1) * PVD.PerVGPR, SB.NumSubRegs); |
| 1762 | i < e; ++i) { |
| 1763 | Register SubReg = |
| 1764 | SB.NumSubRegs == 1 |
| 1765 | ? SB.SuperReg |
| 1766 | : Register(getSubReg(SB.SuperReg, SB.SplitParts[i])); |
| 1767 | |
| 1768 | MachineInstrBuilder WriteLane = |
| 1769 | BuildMI(*SB.MBB, MI, SB.DL, SB.TII.get(AMDGPU::V_WRITELANE_B32), |
| 1770 | SB.TmpVGPR) |
| 1771 | .addReg(SubReg, SubKillState) |
| 1772 | .addImm(i % PVD.PerVGPR) |
| 1773 | .addReg(SB.TmpVGPR, TmpVGPRFlags); |
| 1774 | TmpVGPRFlags = 0; |
| 1775 | |
| 1776 | if (Indexes) { |
| 1777 | if (i == 0) |
| 1778 | Indexes->replaceMachineInstrInMaps(*MI, *WriteLane); |
| 1779 | else |
| 1780 | Indexes->insertMachineInstrInMaps(*WriteLane); |
| 1781 | } |
| 1782 | |
| 1783 | // There could be undef components of a spilled super register. |
| 1784 | // TODO: Can we detect this and skip the spill? |
| 1785 | if (SB.NumSubRegs > 1) { |
| 1786 | // The last implicit use of the SB.SuperReg carries the "Kill" flag. |
| 1787 | unsigned SuperKillState = 0; |
| 1788 | if (i + 1 == SB.NumSubRegs) |
| 1789 | SuperKillState |= getKillRegState(SB.IsKill); |
| 1790 | WriteLane.addReg(SB.SuperReg, RegState::Implicit | SuperKillState); |
| 1791 | } |
| 1792 | } |
| 1793 | |
| 1794 | // Write out VGPR |
| 1795 | SB.readWriteTmpVGPR(Offset, /*IsLoad*/ false); |
| 1796 | } |
| 1797 | |
| 1798 | SB.restore(); |
| 1799 | } |
| 1800 | |
| 1801 | MI->eraseFromParent(); |
| 1802 | SB.MFI.addToSpilledSGPRs(SB.NumSubRegs); |
| 1803 | |
| 1804 | if (LIS) |
| 1805 | LIS->removeAllRegUnitsForPhysReg(SB.SuperReg); |
| 1806 | |
| 1807 | return true; |
| 1808 | } |
| 1809 | |
| 1810 | bool SIRegisterInfo::restoreSGPR(MachineBasicBlock::iterator MI, int Index, |
| 1811 | RegScavenger *RS, SlotIndexes *Indexes, |
| 1812 | LiveIntervals *LIS, bool OnlyToVGPR) const { |
| 1813 | SGPRSpillBuilder SB(*this, *ST.getInstrInfo(), isWave32, MI, Index, RS); |
| 1814 | |
| 1815 | ArrayRef<SpilledReg> VGPRSpills = SB.MFI.getSGPRSpillToVGPRLanes(Index); |
| 1816 | bool SpillToVGPR = !VGPRSpills.empty(); |
| 1817 | if (OnlyToVGPR && !SpillToVGPR) |
| 1818 | return false; |
| 1819 | |
| 1820 | if (SpillToVGPR) { |
| 1821 | for (unsigned i = 0, e = SB.NumSubRegs; i < e; ++i) { |
| 1822 | Register SubReg = |
| 1823 | SB.NumSubRegs == 1 |
| 1824 | ? SB.SuperReg |
| 1825 | : Register(getSubReg(SB.SuperReg, SB.SplitParts[i])); |
| 1826 | |
| 1827 | SpilledReg Spill = VGPRSpills[i]; |
| 1828 | auto MIB = BuildMI(*SB.MBB, MI, SB.DL, SB.TII.get(AMDGPU::V_READLANE_B32), |
| 1829 | SubReg) |
| 1830 | .addReg(Spill.VGPR) |
| 1831 | .addImm(Spill.Lane); |
| 1832 | if (SB.NumSubRegs > 1 && i == 0) |
| 1833 | MIB.addReg(SB.SuperReg, RegState::ImplicitDefine); |
| 1834 | if (Indexes) { |
| 1835 | if (i == e - 1) |
| 1836 | Indexes->replaceMachineInstrInMaps(*MI, *MIB); |
| 1837 | else |
| 1838 | Indexes->insertMachineInstrInMaps(*MIB); |
| 1839 | } |
| 1840 | } |
| 1841 | } else { |
| 1842 | SB.prepare(); |
| 1843 | |
| 1844 | // Per VGPR helper data |
| 1845 | auto PVD = SB.getPerVGPRData(); |
| 1846 | |
| 1847 | for (unsigned Offset = 0; Offset < PVD.NumVGPRs; ++Offset) { |
| 1848 | // Load in VGPR data |
| 1849 | SB.readWriteTmpVGPR(Offset, /*IsLoad*/ true); |
| 1850 | |
| 1851 | // Unpack lanes |
| 1852 | for (unsigned i = Offset * PVD.PerVGPR, |
| 1853 | e = std::min((Offset + 1) * PVD.PerVGPR, SB.NumSubRegs); |
| 1854 | i < e; ++i) { |
| 1855 | Register SubReg = |
| 1856 | SB.NumSubRegs == 1 |
| 1857 | ? SB.SuperReg |
| 1858 | : Register(getSubReg(SB.SuperReg, SB.SplitParts[i])); |
| 1859 | |
| 1860 | bool LastSubReg = (i + 1 == e); |
| 1861 | auto MIB = BuildMI(*SB.MBB, MI, SB.DL, |
| 1862 | SB.TII.get(AMDGPU::V_READLANE_B32), SubReg) |
| 1863 | .addReg(SB.TmpVGPR, getKillRegState(LastSubReg)) |
| 1864 | .addImm(i); |
| 1865 | if (SB.NumSubRegs > 1 && i == 0) |
| 1866 | MIB.addReg(SB.SuperReg, RegState::ImplicitDefine); |
| 1867 | if (Indexes) { |
| 1868 | if (i == e - 1) |
| 1869 | Indexes->replaceMachineInstrInMaps(*MI, *MIB); |
| 1870 | else |
| 1871 | Indexes->insertMachineInstrInMaps(*MIB); |
| 1872 | } |
| 1873 | } |
| 1874 | } |
| 1875 | |
| 1876 | SB.restore(); |
| 1877 | } |
| 1878 | |
| 1879 | MI->eraseFromParent(); |
| 1880 | |
| 1881 | if (LIS) |
| 1882 | LIS->removeAllRegUnitsForPhysReg(SB.SuperReg); |
| 1883 | |
| 1884 | return true; |
| 1885 | } |
| 1886 | |
| 1887 | bool SIRegisterInfo::spillEmergencySGPR(MachineBasicBlock::iterator MI, |
| 1888 | MachineBasicBlock &RestoreMBB, |
| 1889 | Register SGPR, RegScavenger *RS) const { |
| 1890 | SGPRSpillBuilder SB(*this, *ST.getInstrInfo(), isWave32, MI, SGPR, false, 0, |
| 1891 | RS); |
| 1892 | SB.prepare(); |
| 1893 | // Generate the spill of SGPR to SB.TmpVGPR. |
| 1894 | unsigned SubKillState = getKillRegState((SB.NumSubRegs == 1) && SB.IsKill); |
| 1895 | auto PVD = SB.getPerVGPRData(); |
| 1896 | for (unsigned Offset = 0; Offset < PVD.NumVGPRs; ++Offset) { |
| 1897 | unsigned TmpVGPRFlags = RegState::Undef; |
| 1898 | // Write sub registers into the VGPR |
| 1899 | for (unsigned i = Offset * PVD.PerVGPR, |
| 1900 | e = std::min((Offset + 1) * PVD.PerVGPR, SB.NumSubRegs); |
| 1901 | i < e; ++i) { |
| 1902 | Register SubReg = |
| 1903 | SB.NumSubRegs == 1 |
| 1904 | ? SB.SuperReg |
| 1905 | : Register(getSubReg(SB.SuperReg, SB.SplitParts[i])); |
| 1906 | |
| 1907 | MachineInstrBuilder WriteLane = |
| 1908 | BuildMI(*SB.MBB, MI, SB.DL, SB.TII.get(AMDGPU::V_WRITELANE_B32), |
| 1909 | SB.TmpVGPR) |
| 1910 | .addReg(SubReg, SubKillState) |
| 1911 | .addImm(i % PVD.PerVGPR) |
| 1912 | .addReg(SB.TmpVGPR, TmpVGPRFlags); |
| 1913 | TmpVGPRFlags = 0; |
| 1914 | // There could be undef components of a spilled super register. |
| 1915 | // TODO: Can we detect this and skip the spill? |
| 1916 | if (SB.NumSubRegs > 1) { |
| 1917 | // The last implicit use of the SB.SuperReg carries the "Kill" flag. |
| 1918 | unsigned SuperKillState = 0; |
| 1919 | if (i + 1 == SB.NumSubRegs) |
| 1920 | SuperKillState |= getKillRegState(SB.IsKill); |
| 1921 | WriteLane.addReg(SB.SuperReg, RegState::Implicit | SuperKillState); |
| 1922 | } |
| 1923 | } |
| 1924 | // Don't need to write VGPR out. |
| 1925 | } |
| 1926 | |
| 1927 | // Restore clobbered registers in the specified restore block. |
| 1928 | MI = RestoreMBB.end(); |
| 1929 | SB.setMI(&RestoreMBB, MI); |
| 1930 | // Generate the restore of SGPR from SB.TmpVGPR. |
| 1931 | for (unsigned Offset = 0; Offset < PVD.NumVGPRs; ++Offset) { |
| 1932 | // Don't need to load VGPR in. |
| 1933 | // Unpack lanes |
| 1934 | for (unsigned i = Offset * PVD.PerVGPR, |
| 1935 | e = std::min((Offset + 1) * PVD.PerVGPR, SB.NumSubRegs); |
| 1936 | i < e; ++i) { |
| 1937 | Register SubReg = |
| 1938 | SB.NumSubRegs == 1 |
| 1939 | ? SB.SuperReg |
| 1940 | : Register(getSubReg(SB.SuperReg, SB.SplitParts[i])); |
| 1941 | bool LastSubReg = (i + 1 == e); |
| 1942 | auto MIB = BuildMI(*SB.MBB, MI, SB.DL, SB.TII.get(AMDGPU::V_READLANE_B32), |
| 1943 | SubReg) |
| 1944 | .addReg(SB.TmpVGPR, getKillRegState(LastSubReg)) |
| 1945 | .addImm(i); |
| 1946 | if (SB.NumSubRegs > 1 && i == 0) |
| 1947 | MIB.addReg(SB.SuperReg, RegState::ImplicitDefine); |
| 1948 | } |
| 1949 | } |
| 1950 | SB.restore(); |
| 1951 | |
| 1952 | SB.MFI.addToSpilledSGPRs(SB.NumSubRegs); |
| 1953 | return false; |
| 1954 | } |
| 1955 | |
| 1956 | /// Special case of eliminateFrameIndex. Returns true if the SGPR was spilled to |
| 1957 | /// a VGPR and the stack slot can be safely eliminated when all other users are |
| 1958 | /// handled. |
| 1959 | bool SIRegisterInfo::eliminateSGPRToVGPRSpillFrameIndex( |
| 1960 | MachineBasicBlock::iterator MI, int FI, RegScavenger *RS, |
| 1961 | SlotIndexes *Indexes, LiveIntervals *LIS) const { |
| 1962 | switch (MI->getOpcode()) { |
| 1963 | case AMDGPU::SI_SPILL_S1024_SAVE: |
| 1964 | case AMDGPU::SI_SPILL_S512_SAVE: |
| 1965 | case AMDGPU::SI_SPILL_S384_SAVE: |
| 1966 | case AMDGPU::SI_SPILL_S352_SAVE: |
| 1967 | case AMDGPU::SI_SPILL_S320_SAVE: |
| 1968 | case AMDGPU::SI_SPILL_S288_SAVE: |
| 1969 | case AMDGPU::SI_SPILL_S256_SAVE: |
| 1970 | case AMDGPU::SI_SPILL_S224_SAVE: |
| 1971 | case AMDGPU::SI_SPILL_S192_SAVE: |
| 1972 | case AMDGPU::SI_SPILL_S160_SAVE: |
| 1973 | case AMDGPU::SI_SPILL_S128_SAVE: |
| 1974 | case AMDGPU::SI_SPILL_S96_SAVE: |
| 1975 | case AMDGPU::SI_SPILL_S64_SAVE: |
| 1976 | case AMDGPU::SI_SPILL_S32_SAVE: |
| 1977 | return spillSGPR(MI, FI, RS, Indexes, LIS, true); |
| 1978 | case AMDGPU::SI_SPILL_S1024_RESTORE: |
| 1979 | case AMDGPU::SI_SPILL_S512_RESTORE: |
| 1980 | case AMDGPU::SI_SPILL_S384_RESTORE: |
| 1981 | case AMDGPU::SI_SPILL_S352_RESTORE: |
| 1982 | case AMDGPU::SI_SPILL_S320_RESTORE: |
| 1983 | case AMDGPU::SI_SPILL_S288_RESTORE: |
| 1984 | case AMDGPU::SI_SPILL_S256_RESTORE: |
| 1985 | case AMDGPU::SI_SPILL_S224_RESTORE: |
| 1986 | case AMDGPU::SI_SPILL_S192_RESTORE: |
| 1987 | case AMDGPU::SI_SPILL_S160_RESTORE: |
| 1988 | case AMDGPU::SI_SPILL_S128_RESTORE: |
| 1989 | case AMDGPU::SI_SPILL_S96_RESTORE: |
| 1990 | case AMDGPU::SI_SPILL_S64_RESTORE: |
| 1991 | case AMDGPU::SI_SPILL_S32_RESTORE: |
| 1992 | return restoreSGPR(MI, FI, RS, Indexes, LIS, true); |
| 1993 | default: |
| 1994 | llvm_unreachable("not an SGPR spill instruction")::llvm::llvm_unreachable_internal("not an SGPR spill instruction" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 1994); |
| 1995 | } |
| 1996 | } |
| 1997 | |
| 1998 | bool SIRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI, |
| 1999 | int SPAdj, unsigned FIOperandNum, |
| 2000 | RegScavenger *RS) const { |
| 2001 | MachineFunction *MF = MI->getParent()->getParent(); |
| 2002 | MachineBasicBlock *MBB = MI->getParent(); |
| 2003 | SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>(); |
| 2004 | MachineFrameInfo &FrameInfo = MF->getFrameInfo(); |
| 2005 | const SIInstrInfo *TII = ST.getInstrInfo(); |
| 2006 | DebugLoc DL = MI->getDebugLoc(); |
| 2007 | |
| 2008 | assert(SPAdj == 0 && "unhandled SP adjustment in call sequence?")(static_cast <bool> (SPAdj == 0 && "unhandled SP adjustment in call sequence?" ) ? void (0) : __assert_fail ("SPAdj == 0 && \"unhandled SP adjustment in call sequence?\"" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 2008, __extension__ __PRETTY_FUNCTION__)); |
| 2009 | |
| 2010 | MachineOperand &FIOp = MI->getOperand(FIOperandNum); |
| 2011 | int Index = MI->getOperand(FIOperandNum).getIndex(); |
| 2012 | |
| 2013 | Register FrameReg = FrameInfo.isFixedObjectIndex(Index) && hasBasePointer(*MF) |
| 2014 | ? getBaseRegister() |
| 2015 | : getFrameRegister(*MF); |
| 2016 | |
| 2017 | switch (MI->getOpcode()) { |
| 2018 | // SGPR register spill |
| 2019 | case AMDGPU::SI_SPILL_S1024_SAVE: |
| 2020 | case AMDGPU::SI_SPILL_S512_SAVE: |
| 2021 | case AMDGPU::SI_SPILL_S384_SAVE: |
| 2022 | case AMDGPU::SI_SPILL_S352_SAVE: |
| 2023 | case AMDGPU::SI_SPILL_S320_SAVE: |
| 2024 | case AMDGPU::SI_SPILL_S288_SAVE: |
| 2025 | case AMDGPU::SI_SPILL_S256_SAVE: |
| 2026 | case AMDGPU::SI_SPILL_S224_SAVE: |
| 2027 | case AMDGPU::SI_SPILL_S192_SAVE: |
| 2028 | case AMDGPU::SI_SPILL_S160_SAVE: |
| 2029 | case AMDGPU::SI_SPILL_S128_SAVE: |
| 2030 | case AMDGPU::SI_SPILL_S96_SAVE: |
| 2031 | case AMDGPU::SI_SPILL_S64_SAVE: |
| 2032 | case AMDGPU::SI_SPILL_S32_SAVE: { |
| 2033 | return spillSGPR(MI, Index, RS); |
| 2034 | } |
| 2035 | |
| 2036 | // SGPR register restore |
| 2037 | case AMDGPU::SI_SPILL_S1024_RESTORE: |
| 2038 | case AMDGPU::SI_SPILL_S512_RESTORE: |
| 2039 | case AMDGPU::SI_SPILL_S384_RESTORE: |
| 2040 | case AMDGPU::SI_SPILL_S352_RESTORE: |
| 2041 | case AMDGPU::SI_SPILL_S320_RESTORE: |
| 2042 | case AMDGPU::SI_SPILL_S288_RESTORE: |
| 2043 | case AMDGPU::SI_SPILL_S256_RESTORE: |
| 2044 | case AMDGPU::SI_SPILL_S224_RESTORE: |
| 2045 | case AMDGPU::SI_SPILL_S192_RESTORE: |
| 2046 | case AMDGPU::SI_SPILL_S160_RESTORE: |
| 2047 | case AMDGPU::SI_SPILL_S128_RESTORE: |
| 2048 | case AMDGPU::SI_SPILL_S96_RESTORE: |
| 2049 | case AMDGPU::SI_SPILL_S64_RESTORE: |
| 2050 | case AMDGPU::SI_SPILL_S32_RESTORE: { |
| 2051 | return restoreSGPR(MI, Index, RS); |
| 2052 | } |
| 2053 | |
| 2054 | // VGPR register spill |
| 2055 | case AMDGPU::SI_SPILL_V1024_SAVE: |
| 2056 | case AMDGPU::SI_SPILL_V512_SAVE: |
| 2057 | case AMDGPU::SI_SPILL_V384_SAVE: |
| 2058 | case AMDGPU::SI_SPILL_V352_SAVE: |
| 2059 | case AMDGPU::SI_SPILL_V320_SAVE: |
| 2060 | case AMDGPU::SI_SPILL_V288_SAVE: |
| 2061 | case AMDGPU::SI_SPILL_V256_SAVE: |
| 2062 | case AMDGPU::SI_SPILL_V224_SAVE: |
| 2063 | case AMDGPU::SI_SPILL_V192_SAVE: |
| 2064 | case AMDGPU::SI_SPILL_V160_SAVE: |
| 2065 | case AMDGPU::SI_SPILL_V128_SAVE: |
| 2066 | case AMDGPU::SI_SPILL_V96_SAVE: |
| 2067 | case AMDGPU::SI_SPILL_V64_SAVE: |
| 2068 | case AMDGPU::SI_SPILL_V32_SAVE: |
| 2069 | case AMDGPU::SI_SPILL_A1024_SAVE: |
| 2070 | case AMDGPU::SI_SPILL_A512_SAVE: |
| 2071 | case AMDGPU::SI_SPILL_A384_SAVE: |
| 2072 | case AMDGPU::SI_SPILL_A352_SAVE: |
| 2073 | case AMDGPU::SI_SPILL_A320_SAVE: |
| 2074 | case AMDGPU::SI_SPILL_A288_SAVE: |
| 2075 | case AMDGPU::SI_SPILL_A256_SAVE: |
| 2076 | case AMDGPU::SI_SPILL_A224_SAVE: |
| 2077 | case AMDGPU::SI_SPILL_A192_SAVE: |
| 2078 | case AMDGPU::SI_SPILL_A160_SAVE: |
| 2079 | case AMDGPU::SI_SPILL_A128_SAVE: |
| 2080 | case AMDGPU::SI_SPILL_A96_SAVE: |
| 2081 | case AMDGPU::SI_SPILL_A64_SAVE: |
| 2082 | case AMDGPU::SI_SPILL_A32_SAVE: |
| 2083 | case AMDGPU::SI_SPILL_AV1024_SAVE: |
| 2084 | case AMDGPU::SI_SPILL_AV512_SAVE: |
| 2085 | case AMDGPU::SI_SPILL_AV384_SAVE: |
| 2086 | case AMDGPU::SI_SPILL_AV352_SAVE: |
| 2087 | case AMDGPU::SI_SPILL_AV320_SAVE: |
| 2088 | case AMDGPU::SI_SPILL_AV288_SAVE: |
| 2089 | case AMDGPU::SI_SPILL_AV256_SAVE: |
| 2090 | case AMDGPU::SI_SPILL_AV224_SAVE: |
| 2091 | case AMDGPU::SI_SPILL_AV192_SAVE: |
| 2092 | case AMDGPU::SI_SPILL_AV160_SAVE: |
| 2093 | case AMDGPU::SI_SPILL_AV128_SAVE: |
| 2094 | case AMDGPU::SI_SPILL_AV96_SAVE: |
| 2095 | case AMDGPU::SI_SPILL_AV64_SAVE: |
| 2096 | case AMDGPU::SI_SPILL_AV32_SAVE: { |
| 2097 | const MachineOperand *VData = TII->getNamedOperand(*MI, |
| 2098 | AMDGPU::OpName::vdata); |
| 2099 | assert(TII->getNamedOperand(*MI, AMDGPU::OpName::soffset)->getReg() ==(static_cast <bool> (TII->getNamedOperand(*MI, AMDGPU ::OpName::soffset)->getReg() == MFI->getStackPtrOffsetReg ()) ? void (0) : __assert_fail ("TII->getNamedOperand(*MI, AMDGPU::OpName::soffset)->getReg() == MFI->getStackPtrOffsetReg()" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 2100, __extension__ __PRETTY_FUNCTION__)) |
| 2100 | MFI->getStackPtrOffsetReg())(static_cast <bool> (TII->getNamedOperand(*MI, AMDGPU ::OpName::soffset)->getReg() == MFI->getStackPtrOffsetReg ()) ? void (0) : __assert_fail ("TII->getNamedOperand(*MI, AMDGPU::OpName::soffset)->getReg() == MFI->getStackPtrOffsetReg()" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 2100, __extension__ __PRETTY_FUNCTION__)); |
| 2101 | |
| 2102 | unsigned Opc = ST.enableFlatScratch() ? AMDGPU::SCRATCH_STORE_DWORD_SADDR |
| 2103 | : AMDGPU::BUFFER_STORE_DWORD_OFFSET; |
| 2104 | auto *MBB = MI->getParent(); |
| 2105 | buildSpillLoadStore( |
| 2106 | *MBB, MI, DL, Opc, Index, VData->getReg(), VData->isKill(), FrameReg, |
| 2107 | TII->getNamedOperand(*MI, AMDGPU::OpName::offset)->getImm(), |
| 2108 | *MI->memoperands_begin(), RS); |
| 2109 | MFI->addToSpilledVGPRs(getNumSubRegsForSpillOp(MI->getOpcode())); |
| 2110 | MI->eraseFromParent(); |
| 2111 | return true; |
| 2112 | } |
| 2113 | case AMDGPU::SI_SPILL_V32_RESTORE: |
| 2114 | case AMDGPU::SI_SPILL_V64_RESTORE: |
| 2115 | case AMDGPU::SI_SPILL_V96_RESTORE: |
| 2116 | case AMDGPU::SI_SPILL_V128_RESTORE: |
| 2117 | case AMDGPU::SI_SPILL_V160_RESTORE: |
| 2118 | case AMDGPU::SI_SPILL_V192_RESTORE: |
| 2119 | case AMDGPU::SI_SPILL_V224_RESTORE: |
| 2120 | case AMDGPU::SI_SPILL_V256_RESTORE: |
| 2121 | case AMDGPU::SI_SPILL_V288_RESTORE: |
| 2122 | case AMDGPU::SI_SPILL_V320_RESTORE: |
| 2123 | case AMDGPU::SI_SPILL_V352_RESTORE: |
| 2124 | case AMDGPU::SI_SPILL_V384_RESTORE: |
| 2125 | case AMDGPU::SI_SPILL_V512_RESTORE: |
| 2126 | case AMDGPU::SI_SPILL_V1024_RESTORE: |
| 2127 | case AMDGPU::SI_SPILL_A32_RESTORE: |
| 2128 | case AMDGPU::SI_SPILL_A64_RESTORE: |
| 2129 | case AMDGPU::SI_SPILL_A96_RESTORE: |
| 2130 | case AMDGPU::SI_SPILL_A128_RESTORE: |
| 2131 | case AMDGPU::SI_SPILL_A160_RESTORE: |
| 2132 | case AMDGPU::SI_SPILL_A192_RESTORE: |
| 2133 | case AMDGPU::SI_SPILL_A224_RESTORE: |
| 2134 | case AMDGPU::SI_SPILL_A256_RESTORE: |
| 2135 | case AMDGPU::SI_SPILL_A288_RESTORE: |
| 2136 | case AMDGPU::SI_SPILL_A320_RESTORE: |
| 2137 | case AMDGPU::SI_SPILL_A352_RESTORE: |
| 2138 | case AMDGPU::SI_SPILL_A384_RESTORE: |
| 2139 | case AMDGPU::SI_SPILL_A512_RESTORE: |
| 2140 | case AMDGPU::SI_SPILL_A1024_RESTORE: |
| 2141 | case AMDGPU::SI_SPILL_AV32_RESTORE: |
| 2142 | case AMDGPU::SI_SPILL_AV64_RESTORE: |
| 2143 | case AMDGPU::SI_SPILL_AV96_RESTORE: |
| 2144 | case AMDGPU::SI_SPILL_AV128_RESTORE: |
| 2145 | case AMDGPU::SI_SPILL_AV160_RESTORE: |
| 2146 | case AMDGPU::SI_SPILL_AV192_RESTORE: |
| 2147 | case AMDGPU::SI_SPILL_AV224_RESTORE: |
| 2148 | case AMDGPU::SI_SPILL_AV256_RESTORE: |
| 2149 | case AMDGPU::SI_SPILL_AV288_RESTORE: |
| 2150 | case AMDGPU::SI_SPILL_AV320_RESTORE: |
| 2151 | case AMDGPU::SI_SPILL_AV352_RESTORE: |
| 2152 | case AMDGPU::SI_SPILL_AV384_RESTORE: |
| 2153 | case AMDGPU::SI_SPILL_AV512_RESTORE: |
| 2154 | case AMDGPU::SI_SPILL_AV1024_RESTORE: { |
| 2155 | const MachineOperand *VData = TII->getNamedOperand(*MI, |
| 2156 | AMDGPU::OpName::vdata); |
| 2157 | assert(TII->getNamedOperand(*MI, AMDGPU::OpName::soffset)->getReg() ==(static_cast <bool> (TII->getNamedOperand(*MI, AMDGPU ::OpName::soffset)->getReg() == MFI->getStackPtrOffsetReg ()) ? void (0) : __assert_fail ("TII->getNamedOperand(*MI, AMDGPU::OpName::soffset)->getReg() == MFI->getStackPtrOffsetReg()" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 2158, __extension__ __PRETTY_FUNCTION__)) |
| 2158 | MFI->getStackPtrOffsetReg())(static_cast <bool> (TII->getNamedOperand(*MI, AMDGPU ::OpName::soffset)->getReg() == MFI->getStackPtrOffsetReg ()) ? void (0) : __assert_fail ("TII->getNamedOperand(*MI, AMDGPU::OpName::soffset)->getReg() == MFI->getStackPtrOffsetReg()" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 2158, __extension__ __PRETTY_FUNCTION__)); |
| 2159 | |
| 2160 | unsigned Opc = ST.enableFlatScratch() ? AMDGPU::SCRATCH_LOAD_DWORD_SADDR |
| 2161 | : AMDGPU::BUFFER_LOAD_DWORD_OFFSET; |
| 2162 | auto *MBB = MI->getParent(); |
| 2163 | buildSpillLoadStore( |
| 2164 | *MBB, MI, DL, Opc, Index, VData->getReg(), VData->isKill(), FrameReg, |
| 2165 | TII->getNamedOperand(*MI, AMDGPU::OpName::offset)->getImm(), |
| 2166 | *MI->memoperands_begin(), RS); |
| 2167 | MI->eraseFromParent(); |
| 2168 | return true; |
| 2169 | } |
| 2170 | |
| 2171 | default: { |
| 2172 | // Other access to frame index |
| 2173 | const DebugLoc &DL = MI->getDebugLoc(); |
| 2174 | |
| 2175 | int64_t Offset = FrameInfo.getObjectOffset(Index); |
| 2176 | if (ST.enableFlatScratch()) { |
| 2177 | if (TII->isFLATScratch(*MI)) { |
| 2178 | assert((int16_t)FIOperandNum ==(static_cast <bool> ((int16_t)FIOperandNum == AMDGPU::getNamedOperandIdx (MI->getOpcode(), AMDGPU::OpName::saddr)) ? void (0) : __assert_fail ("(int16_t)FIOperandNum == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::saddr)" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 2180, __extension__ __PRETTY_FUNCTION__)) |
| 2179 | AMDGPU::getNamedOperandIdx(MI->getOpcode(),(static_cast <bool> ((int16_t)FIOperandNum == AMDGPU::getNamedOperandIdx (MI->getOpcode(), AMDGPU::OpName::saddr)) ? void (0) : __assert_fail ("(int16_t)FIOperandNum == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::saddr)" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 2180, __extension__ __PRETTY_FUNCTION__)) |
| 2180 | AMDGPU::OpName::saddr))(static_cast <bool> ((int16_t)FIOperandNum == AMDGPU::getNamedOperandIdx (MI->getOpcode(), AMDGPU::OpName::saddr)) ? void (0) : __assert_fail ("(int16_t)FIOperandNum == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::saddr)" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 2180, __extension__ __PRETTY_FUNCTION__)); |
| 2181 | |
| 2182 | // The offset is always swizzled, just replace it |
| 2183 | if (FrameReg) |
| 2184 | FIOp.ChangeToRegister(FrameReg, false); |
| 2185 | |
| 2186 | if (!Offset) |
| 2187 | return false; |
| 2188 | |
| 2189 | MachineOperand *OffsetOp = |
| 2190 | TII->getNamedOperand(*MI, AMDGPU::OpName::offset); |
| 2191 | int64_t NewOffset = Offset + OffsetOp->getImm(); |
| 2192 | if (TII->isLegalFLATOffset(NewOffset, AMDGPUAS::PRIVATE_ADDRESS, |
| 2193 | SIInstrFlags::FlatScratch)) { |
| 2194 | OffsetOp->setImm(NewOffset); |
| 2195 | if (FrameReg) |
| 2196 | return false; |
| 2197 | Offset = 0; |
| 2198 | } |
| 2199 | |
| 2200 | if (!Offset) { |
| 2201 | unsigned Opc = MI->getOpcode(); |
| 2202 | int NewOpc = -1; |
| 2203 | if (AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::vaddr)) { |
| 2204 | NewOpc = AMDGPU::getFlatScratchInstSVfromSVS(Opc); |
| 2205 | } else if (ST.hasFlatScratchSTMode()) { |
| 2206 | // On GFX10 we have ST mode to use no registers for an address. |
| 2207 | // Otherwise we need to materialize 0 into an SGPR. |
| 2208 | NewOpc = AMDGPU::getFlatScratchInstSTfromSS(Opc); |
| 2209 | } |
| 2210 | |
| 2211 | if (NewOpc != -1) { |
| 2212 | // removeOperand doesn't fixup tied operand indexes as it goes, so |
| 2213 | // it asserts. Untie vdst_in for now and retie them afterwards. |
| 2214 | int VDstIn = AMDGPU::getNamedOperandIdx(Opc, |
| 2215 | AMDGPU::OpName::vdst_in); |
| 2216 | bool TiedVDst = VDstIn != -1 && |
| 2217 | MI->getOperand(VDstIn).isReg() && |
| 2218 | MI->getOperand(VDstIn).isTied(); |
| 2219 | if (TiedVDst) |
| 2220 | MI->untieRegOperand(VDstIn); |
| 2221 | |
| 2222 | MI->removeOperand( |
| 2223 | AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::saddr)); |
| 2224 | |
| 2225 | if (TiedVDst) { |
| 2226 | int NewVDst = |
| 2227 | AMDGPU::getNamedOperandIdx(NewOpc, AMDGPU::OpName::vdst); |
| 2228 | int NewVDstIn = |
| 2229 | AMDGPU::getNamedOperandIdx(NewOpc, AMDGPU::OpName::vdst_in); |
| 2230 | assert (NewVDst != -1 && NewVDstIn != -1 && "Must be tied!")(static_cast <bool> (NewVDst != -1 && NewVDstIn != -1 && "Must be tied!") ? void (0) : __assert_fail ("NewVDst != -1 && NewVDstIn != -1 && \"Must be tied!\"" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 2230, __extension__ __PRETTY_FUNCTION__)); |
| 2231 | MI->tieOperands(NewVDst, NewVDstIn); |
| 2232 | } |
| 2233 | MI->setDesc(TII->get(NewOpc)); |
| 2234 | return false; |
| 2235 | } |
| 2236 | } |
| 2237 | } |
| 2238 | |
| 2239 | if (!FrameReg) { |
| 2240 | FIOp.ChangeToImmediate(Offset); |
| 2241 | if (TII->isImmOperandLegal(*MI, FIOperandNum, FIOp)) |
| 2242 | return false; |
| 2243 | } |
| 2244 | |
| 2245 | // We need to use register here. Check if we can use an SGPR or need |
| 2246 | // a VGPR. |
| 2247 | FIOp.ChangeToRegister(AMDGPU::M0, false); |
| 2248 | bool UseSGPR = TII->isOperandLegal(*MI, FIOperandNum, &FIOp); |
| 2249 | |
| 2250 | if (!Offset && FrameReg && UseSGPR) { |
| 2251 | FIOp.setReg(FrameReg); |
| 2252 | return false; |
| 2253 | } |
| 2254 | |
| 2255 | const TargetRegisterClass *RC = UseSGPR ? &AMDGPU::SReg_32_XM0RegClass |
| 2256 | : &AMDGPU::VGPR_32RegClass; |
| 2257 | |
| 2258 | Register TmpReg = RS->scavengeRegister(RC, MI, 0, !UseSGPR); |
| 2259 | FIOp.setReg(TmpReg); |
| 2260 | FIOp.setIsKill(); |
| 2261 | |
| 2262 | if ((!FrameReg || !Offset) && TmpReg) { |
| 2263 | unsigned Opc = UseSGPR ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32; |
| 2264 | auto MIB = BuildMI(*MBB, MI, DL, TII->get(Opc), TmpReg); |
| 2265 | if (FrameReg) |
| 2266 | MIB.addReg(FrameReg); |
| 2267 | else |
| 2268 | MIB.addImm(Offset); |
| 2269 | |
| 2270 | return false; |
| 2271 | } |
| 2272 | |
| 2273 | bool NeedSaveSCC = |
| 2274 | RS->isRegUsed(AMDGPU::SCC) && !MI->definesRegister(AMDGPU::SCC); |
| 2275 | |
| 2276 | Register TmpSReg = |
| 2277 | UseSGPR ? TmpReg |
| 2278 | : RS->scavengeRegister(&AMDGPU::SReg_32_XM0RegClass, MI, 0, |
| 2279 | !UseSGPR); |
| 2280 | |
| 2281 | // TODO: for flat scratch another attempt can be made with a VGPR index |
| 2282 | // if no SGPRs can be scavenged. |
| 2283 | if ((!TmpSReg && !FrameReg) || (!TmpReg && !UseSGPR)) |
| 2284 | report_fatal_error("Cannot scavenge register in FI elimination!"); |
| 2285 | |
| 2286 | if (!TmpSReg) { |
| 2287 | // Use frame register and restore it after. |
| 2288 | TmpSReg = FrameReg; |
| 2289 | FIOp.setReg(FrameReg); |
| 2290 | FIOp.setIsKill(false); |
| 2291 | } |
| 2292 | |
| 2293 | if (NeedSaveSCC) { |
| 2294 | assert(!(Offset & 0x1) && "Flat scratch offset must be aligned!")(static_cast <bool> (!(Offset & 0x1) && "Flat scratch offset must be aligned!" ) ? void (0) : __assert_fail ("!(Offset & 0x1) && \"Flat scratch offset must be aligned!\"" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 2294, __extension__ __PRETTY_FUNCTION__)); |
| 2295 | BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_ADDC_U32), TmpSReg) |
| 2296 | .addReg(FrameReg) |
| 2297 | .addImm(Offset); |
| 2298 | BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_BITCMP1_B32)) |
| 2299 | .addReg(TmpSReg) |
| 2300 | .addImm(0); |
| 2301 | BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_BITSET0_B32), TmpSReg) |
| 2302 | .addImm(0) |
| 2303 | .addReg(TmpSReg); |
| 2304 | } else { |
| 2305 | BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_ADD_I32), TmpSReg) |
| 2306 | .addReg(FrameReg) |
| 2307 | .addImm(Offset); |
| 2308 | } |
| 2309 | |
| 2310 | if (!UseSGPR) |
| 2311 | BuildMI(*MBB, MI, DL, TII->get(AMDGPU::V_MOV_B32_e32), TmpReg) |
| 2312 | .addReg(TmpSReg, RegState::Kill); |
| 2313 | |
| 2314 | if (TmpSReg == FrameReg) { |
| 2315 | // Undo frame register modification. |
| 2316 | if (NeedSaveSCC && !MI->registerDefIsDead(AMDGPU::SCC)) { |
| 2317 | MachineBasicBlock::iterator I = |
| 2318 | BuildMI(*MBB, std::next(MI), DL, TII->get(AMDGPU::S_ADDC_U32), |
| 2319 | TmpSReg) |
| 2320 | .addReg(FrameReg) |
| 2321 | .addImm(-Offset); |
| 2322 | I = BuildMI(*MBB, std::next(I), DL, TII->get(AMDGPU::S_BITCMP1_B32)) |
| 2323 | .addReg(TmpSReg) |
| 2324 | .addImm(0); |
| 2325 | BuildMI(*MBB, std::next(I), DL, TII->get(AMDGPU::S_BITSET0_B32), |
| 2326 | TmpSReg) |
| 2327 | .addImm(0) |
| 2328 | .addReg(TmpSReg); |
| 2329 | } else { |
| 2330 | BuildMI(*MBB, std::next(MI), DL, TII->get(AMDGPU::S_ADD_I32), |
| 2331 | FrameReg) |
| 2332 | .addReg(FrameReg) |
| 2333 | .addImm(-Offset); |
| 2334 | } |
| 2335 | } |
| 2336 | |
| 2337 | return false; |
| 2338 | } |
| 2339 | |
| 2340 | bool IsMUBUF = TII->isMUBUF(*MI); |
| 2341 | |
| 2342 | if (!IsMUBUF && !MFI->isEntryFunction()) { |
| 2343 | // Convert to a swizzled stack address by scaling by the wave size. |
| 2344 | // In an entry function/kernel the offset is already swizzled. |
| 2345 | bool IsSALU = isSGPRClass(TII->getOpRegClass(*MI, FIOperandNum)); |
| 2346 | bool LiveSCC = |
| 2347 | RS->isRegUsed(AMDGPU::SCC) && !MI->definesRegister(AMDGPU::SCC); |
| 2348 | const TargetRegisterClass *RC = IsSALU && !LiveSCC |
| 2349 | ? &AMDGPU::SReg_32RegClass |
| 2350 | : &AMDGPU::VGPR_32RegClass; |
| 2351 | bool IsCopy = MI->getOpcode() == AMDGPU::V_MOV_B32_e32 || |
| 2352 | MI->getOpcode() == AMDGPU::V_MOV_B32_e64; |
| 2353 | Register ResultReg = IsCopy ? MI->getOperand(0).getReg() |
| 2354 | : RS->scavengeRegister(RC, MI, 0); |
| 2355 | |
| 2356 | int64_t Offset = FrameInfo.getObjectOffset(Index); |
| 2357 | if (Offset == 0) { |
| 2358 | unsigned OpCode = IsSALU && !LiveSCC ? AMDGPU::S_LSHR_B32 |
| 2359 | : AMDGPU::V_LSHRREV_B32_e64; |
| 2360 | // XXX - This never happens because of emergency scavenging slot at 0? |
| 2361 | auto Shift = BuildMI(*MBB, MI, DL, TII->get(OpCode), ResultReg) |
| 2362 | .addImm(ST.getWavefrontSizeLog2()) |
| 2363 | .addReg(FrameReg); |
| 2364 | if (IsSALU && !LiveSCC) |
| 2365 | Shift.getInstr()->getOperand(3).setIsDead(); // Mark SCC as dead. |
| 2366 | if (IsSALU && LiveSCC) { |
| 2367 | Register NewDest = |
| 2368 | RS->scavengeRegister(&AMDGPU::SReg_32RegClass, Shift, 0); |
| 2369 | BuildMI(*MBB, MI, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), |
| 2370 | NewDest) |
| 2371 | .addReg(ResultReg); |
| 2372 | ResultReg = NewDest; |
| 2373 | } |
| 2374 | } else { |
| 2375 | MachineInstrBuilder MIB; |
| 2376 | if (!IsSALU) { |
| 2377 | if ((MIB = TII->getAddNoCarry(*MBB, MI, DL, ResultReg, *RS)) != |
| 2378 | nullptr) { |
| 2379 | // Reuse ResultReg in intermediate step. |
| 2380 | Register ScaledReg = ResultReg; |
| 2381 | |
| 2382 | BuildMI(*MBB, *MIB, DL, TII->get(AMDGPU::V_LSHRREV_B32_e64), |
| 2383 | ScaledReg) |
| 2384 | .addImm(ST.getWavefrontSizeLog2()) |
| 2385 | .addReg(FrameReg); |
| 2386 | |
| 2387 | const bool IsVOP2 = MIB->getOpcode() == AMDGPU::V_ADD_U32_e32; |
| 2388 | |
| 2389 | // TODO: Fold if use instruction is another add of a constant. |
| 2390 | if (IsVOP2 || AMDGPU::isInlinableLiteral32(Offset, ST.hasInv2PiInlineImm())) { |
| 2391 | // FIXME: This can fail |
| 2392 | MIB.addImm(Offset); |
| 2393 | MIB.addReg(ScaledReg, RegState::Kill); |
| 2394 | if (!IsVOP2) |
| 2395 | MIB.addImm(0); // clamp bit |
| 2396 | } else { |
| 2397 | assert(MIB->getOpcode() == AMDGPU::V_ADD_CO_U32_e64 &&(static_cast <bool> (MIB->getOpcode() == AMDGPU::V_ADD_CO_U32_e64 && "Need to reuse carry out register") ? void (0) : __assert_fail ("MIB->getOpcode() == AMDGPU::V_ADD_CO_U32_e64 && \"Need to reuse carry out register\"" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 2398, __extension__ __PRETTY_FUNCTION__)) |
| 2398 | "Need to reuse carry out register")(static_cast <bool> (MIB->getOpcode() == AMDGPU::V_ADD_CO_U32_e64 && "Need to reuse carry out register") ? void (0) : __assert_fail ("MIB->getOpcode() == AMDGPU::V_ADD_CO_U32_e64 && \"Need to reuse carry out register\"" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 2398, __extension__ __PRETTY_FUNCTION__)); |
| 2399 | |
| 2400 | // Use scavenged unused carry out as offset register. |
| 2401 | Register ConstOffsetReg; |
| 2402 | if (!isWave32) |
| 2403 | ConstOffsetReg = getSubReg(MIB.getReg(1), AMDGPU::sub0); |
| 2404 | else |
| 2405 | ConstOffsetReg = MIB.getReg(1); |
| 2406 | |
| 2407 | BuildMI(*MBB, *MIB, DL, TII->get(AMDGPU::S_MOV_B32), ConstOffsetReg) |
| 2408 | .addImm(Offset); |
| 2409 | MIB.addReg(ConstOffsetReg, RegState::Kill); |
| 2410 | MIB.addReg(ScaledReg, RegState::Kill); |
| 2411 | MIB.addImm(0); // clamp bit |
| 2412 | } |
| 2413 | } |
| 2414 | } |
| 2415 | if (!MIB || IsSALU) { |
| 2416 | // We have to produce a carry out, and there isn't a free SGPR pair |
| 2417 | // for it. We can keep the whole computation on the SALU to avoid |
| 2418 | // clobbering an additional register at the cost of an extra mov. |
| 2419 | |
| 2420 | // We may have 1 free scratch SGPR even though a carry out is |
| 2421 | // unavailable. Only one additional mov is needed. |
| 2422 | Register TmpScaledReg = |
| 2423 | RS->scavengeRegister(&AMDGPU::SReg_32_XM0RegClass, MI, 0, false); |
| 2424 | Register ScaledReg = TmpScaledReg.isValid() ? TmpScaledReg : FrameReg; |
| 2425 | |
| 2426 | BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_LSHR_B32), ScaledReg) |
| 2427 | .addReg(FrameReg) |
| 2428 | .addImm(ST.getWavefrontSizeLog2()); |
| 2429 | BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_ADD_I32), ScaledReg) |
| 2430 | .addReg(ScaledReg, RegState::Kill) |
| 2431 | .addImm(Offset); |
| 2432 | if (!IsSALU) |
| 2433 | BuildMI(*MBB, MI, DL, TII->get(AMDGPU::COPY), ResultReg) |
| 2434 | .addReg(ScaledReg, RegState::Kill); |
| 2435 | else |
| 2436 | ResultReg = ScaledReg; |
| 2437 | |
| 2438 | // If there were truly no free SGPRs, we need to undo everything. |
| 2439 | if (!TmpScaledReg.isValid()) { |
| 2440 | BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_ADD_I32), ScaledReg) |
| 2441 | .addReg(ScaledReg, RegState::Kill) |
| 2442 | .addImm(-Offset); |
| 2443 | BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_LSHL_B32), ScaledReg) |
| 2444 | .addReg(FrameReg) |
| 2445 | .addImm(ST.getWavefrontSizeLog2()); |
| 2446 | } |
| 2447 | } |
| 2448 | } |
| 2449 | |
| 2450 | // Don't introduce an extra copy if we're just materializing in a mov. |
| 2451 | if (IsCopy) { |
| 2452 | MI->eraseFromParent(); |
| 2453 | return true; |
| 2454 | } |
| 2455 | FIOp.ChangeToRegister(ResultReg, false, false, true); |
| 2456 | return false; |
| 2457 | } |
| 2458 | |
| 2459 | if (IsMUBUF) { |
| 2460 | // Disable offen so we don't need a 0 vgpr base. |
| 2461 | assert(static_cast<int>(FIOperandNum) ==(static_cast <bool> (static_cast<int>(FIOperandNum ) == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName ::vaddr)) ? void (0) : __assert_fail ("static_cast<int>(FIOperandNum) == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::vaddr)" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 2463, __extension__ __PRETTY_FUNCTION__)) |
| 2462 | AMDGPU::getNamedOperandIdx(MI->getOpcode(),(static_cast <bool> (static_cast<int>(FIOperandNum ) == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName ::vaddr)) ? void (0) : __assert_fail ("static_cast<int>(FIOperandNum) == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::vaddr)" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 2463, __extension__ __PRETTY_FUNCTION__)) |
| 2463 | AMDGPU::OpName::vaddr))(static_cast <bool> (static_cast<int>(FIOperandNum ) == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName ::vaddr)) ? void (0) : __assert_fail ("static_cast<int>(FIOperandNum) == AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::vaddr)" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 2463, __extension__ __PRETTY_FUNCTION__)); |
| 2464 | |
| 2465 | auto &SOffset = *TII->getNamedOperand(*MI, AMDGPU::OpName::soffset); |
| 2466 | assert((SOffset.isImm() && SOffset.getImm() == 0))(static_cast <bool> ((SOffset.isImm() && SOffset .getImm() == 0)) ? void (0) : __assert_fail ("(SOffset.isImm() && SOffset.getImm() == 0)" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 2466, __extension__ __PRETTY_FUNCTION__)); |
| 2467 | |
| 2468 | if (FrameReg != AMDGPU::NoRegister) |
| 2469 | SOffset.ChangeToRegister(FrameReg, false); |
| 2470 | |
| 2471 | int64_t Offset = FrameInfo.getObjectOffset(Index); |
| 2472 | int64_t OldImm |
| 2473 | = TII->getNamedOperand(*MI, AMDGPU::OpName::offset)->getImm(); |
| 2474 | int64_t NewOffset = OldImm + Offset; |
| 2475 | |
| 2476 | if (SIInstrInfo::isLegalMUBUFImmOffset(NewOffset) && |
| 2477 | buildMUBUFOffsetLoadStore(ST, FrameInfo, MI, Index, NewOffset)) { |
| 2478 | MI->eraseFromParent(); |
| 2479 | return true; |
| 2480 | } |
| 2481 | } |
| 2482 | |
| 2483 | // If the offset is simply too big, don't convert to a scratch wave offset |
| 2484 | // relative index. |
| 2485 | |
| 2486 | FIOp.ChangeToImmediate(Offset); |
| 2487 | if (!TII->isImmOperandLegal(*MI, FIOperandNum, FIOp)) { |
| 2488 | Register TmpReg = RS->scavengeRegister(&AMDGPU::VGPR_32RegClass, MI, 0); |
| 2489 | BuildMI(*MBB, MI, DL, TII->get(AMDGPU::V_MOV_B32_e32), TmpReg) |
| 2490 | .addImm(Offset); |
| 2491 | FIOp.ChangeToRegister(TmpReg, false, false, true); |
| 2492 | } |
| 2493 | } |
| 2494 | } |
| 2495 | return false; |
| 2496 | } |
| 2497 | |
| 2498 | StringRef SIRegisterInfo::getRegAsmName(MCRegister Reg) const { |
| 2499 | return AMDGPUInstPrinter::getRegisterName(Reg); |
| 2500 | } |
| 2501 | |
| 2502 | static const TargetRegisterClass * |
| 2503 | getAnyVGPRClassForBitWidth(unsigned BitWidth) { |
| 2504 | if (BitWidth == 64) |
| 2505 | return &AMDGPU::VReg_64RegClass; |
| 2506 | if (BitWidth == 96) |
| 2507 | return &AMDGPU::VReg_96RegClass; |
| 2508 | if (BitWidth == 128) |
| 2509 | return &AMDGPU::VReg_128RegClass; |
| 2510 | if (BitWidth == 160) |
| 2511 | return &AMDGPU::VReg_160RegClass; |
| 2512 | if (BitWidth == 192) |
| 2513 | return &AMDGPU::VReg_192RegClass; |
| 2514 | if (BitWidth == 224) |
| 2515 | return &AMDGPU::VReg_224RegClass; |
| 2516 | if (BitWidth == 256) |
| 2517 | return &AMDGPU::VReg_256RegClass; |
| 2518 | if (BitWidth == 288) |
| 2519 | return &AMDGPU::VReg_288RegClass; |
| 2520 | if (BitWidth == 320) |
| 2521 | return &AMDGPU::VReg_320RegClass; |
| 2522 | if (BitWidth == 352) |
| 2523 | return &AMDGPU::VReg_352RegClass; |
| 2524 | if (BitWidth == 384) |
| 2525 | return &AMDGPU::VReg_384RegClass; |
| 2526 | if (BitWidth == 512) |
| 2527 | return &AMDGPU::VReg_512RegClass; |
| 2528 | if (BitWidth == 1024) |
| 2529 | return &AMDGPU::VReg_1024RegClass; |
| 2530 | |
| 2531 | return nullptr; |
| 2532 | } |
| 2533 | |
| 2534 | static const TargetRegisterClass * |
| 2535 | getAlignedVGPRClassForBitWidth(unsigned BitWidth) { |
| 2536 | if (BitWidth == 64) |
| 2537 | return &AMDGPU::VReg_64_Align2RegClass; |
| 2538 | if (BitWidth == 96) |
| 2539 | return &AMDGPU::VReg_96_Align2RegClass; |
| 2540 | if (BitWidth == 128) |
| 2541 | return &AMDGPU::VReg_128_Align2RegClass; |
| 2542 | if (BitWidth == 160) |
| 2543 | return &AMDGPU::VReg_160_Align2RegClass; |
| 2544 | if (BitWidth == 192) |
| 2545 | return &AMDGPU::VReg_192_Align2RegClass; |
| 2546 | if (BitWidth == 224) |
| 2547 | return &AMDGPU::VReg_224_Align2RegClass; |
| 2548 | if (BitWidth == 256) |
| 2549 | return &AMDGPU::VReg_256_Align2RegClass; |
| 2550 | if (BitWidth == 288) |
| 2551 | return &AMDGPU::VReg_288_Align2RegClass; |
| 2552 | if (BitWidth == 320) |
| 2553 | return &AMDGPU::VReg_320_Align2RegClass; |
| 2554 | if (BitWidth == 352) |
| 2555 | return &AMDGPU::VReg_352_Align2RegClass; |
| 2556 | if (BitWidth == 384) |
| 2557 | return &AMDGPU::VReg_384_Align2RegClass; |
| 2558 | if (BitWidth == 512) |
| 2559 | return &AMDGPU::VReg_512_Align2RegClass; |
| 2560 | if (BitWidth == 1024) |
| 2561 | return &AMDGPU::VReg_1024_Align2RegClass; |
| 2562 | |
| 2563 | return nullptr; |
| 2564 | } |
| 2565 | |
| 2566 | const TargetRegisterClass * |
| 2567 | SIRegisterInfo::getVGPRClassForBitWidth(unsigned BitWidth) const { |
| 2568 | if (BitWidth == 1) |
| 2569 | return &AMDGPU::VReg_1RegClass; |
| 2570 | if (BitWidth == 16) |
| 2571 | return &AMDGPU::VGPR_LO16RegClass; |
| 2572 | if (BitWidth == 32) |
| 2573 | return &AMDGPU::VGPR_32RegClass; |
| 2574 | return ST.needsAlignedVGPRs() ? getAlignedVGPRClassForBitWidth(BitWidth) |
| 2575 | : getAnyVGPRClassForBitWidth(BitWidth); |
| 2576 | } |
| 2577 | |
| 2578 | static const TargetRegisterClass * |
| 2579 | getAnyAGPRClassForBitWidth(unsigned BitWidth) { |
| 2580 | if (BitWidth == 64) |
| 2581 | return &AMDGPU::AReg_64RegClass; |
| 2582 | if (BitWidth == 96) |
| 2583 | return &AMDGPU::AReg_96RegClass; |
| 2584 | if (BitWidth == 128) |
| 2585 | return &AMDGPU::AReg_128RegClass; |
| 2586 | if (BitWidth == 160) |
| 2587 | return &AMDGPU::AReg_160RegClass; |
| 2588 | if (BitWidth == 192) |
| 2589 | return &AMDGPU::AReg_192RegClass; |
| 2590 | if (BitWidth == 224) |
| 2591 | return &AMDGPU::AReg_224RegClass; |
| 2592 | if (BitWidth == 256) |
| 2593 | return &AMDGPU::AReg_256RegClass; |
| 2594 | if (BitWidth == 288) |
| 2595 | return &AMDGPU::AReg_288RegClass; |
| 2596 | if (BitWidth == 320) |
| 2597 | return &AMDGPU::AReg_320RegClass; |
| 2598 | if (BitWidth == 352) |
| 2599 | return &AMDGPU::AReg_352RegClass; |
| 2600 | if (BitWidth == 384) |
| 2601 | return &AMDGPU::AReg_384RegClass; |
| 2602 | if (BitWidth == 512) |
| 2603 | return &AMDGPU::AReg_512RegClass; |
| 2604 | if (BitWidth == 1024) |
| 2605 | return &AMDGPU::AReg_1024RegClass; |
| 2606 | |
| 2607 | return nullptr; |
| 2608 | } |
| 2609 | |
| 2610 | static const TargetRegisterClass * |
| 2611 | getAlignedAGPRClassForBitWidth(unsigned BitWidth) { |
| 2612 | if (BitWidth == 64) |
| 2613 | return &AMDGPU::AReg_64_Align2RegClass; |
| 2614 | if (BitWidth == 96) |
| 2615 | return &AMDGPU::AReg_96_Align2RegClass; |
| 2616 | if (BitWidth == 128) |
| 2617 | return &AMDGPU::AReg_128_Align2RegClass; |
| 2618 | if (BitWidth == 160) |
| 2619 | return &AMDGPU::AReg_160_Align2RegClass; |
| 2620 | if (BitWidth == 192) |
| 2621 | return &AMDGPU::AReg_192_Align2RegClass; |
| 2622 | if (BitWidth == 224) |
| 2623 | return &AMDGPU::AReg_224_Align2RegClass; |
| 2624 | if (BitWidth == 256) |
| 2625 | return &AMDGPU::AReg_256_Align2RegClass; |
| 2626 | if (BitWidth == 288) |
| 2627 | return &AMDGPU::AReg_288_Align2RegClass; |
| 2628 | if (BitWidth == 320) |
| 2629 | return &AMDGPU::AReg_320_Align2RegClass; |
| 2630 | if (BitWidth == 352) |
| 2631 | return &AMDGPU::AReg_352_Align2RegClass; |
| 2632 | if (BitWidth == 384) |
| 2633 | return &AMDGPU::AReg_384_Align2RegClass; |
| 2634 | if (BitWidth == 512) |
| 2635 | return &AMDGPU::AReg_512_Align2RegClass; |
| 2636 | if (BitWidth == 1024) |
| 2637 | return &AMDGPU::AReg_1024_Align2RegClass; |
| 2638 | |
| 2639 | return nullptr; |
| 2640 | } |
| 2641 | |
| 2642 | const TargetRegisterClass * |
| 2643 | SIRegisterInfo::getAGPRClassForBitWidth(unsigned BitWidth) const { |
| 2644 | if (BitWidth == 16) |
| 2645 | return &AMDGPU::AGPR_LO16RegClass; |
| 2646 | if (BitWidth == 32) |
| 2647 | return &AMDGPU::AGPR_32RegClass; |
| 2648 | return ST.needsAlignedVGPRs() ? getAlignedAGPRClassForBitWidth(BitWidth) |
| 2649 | : getAnyAGPRClassForBitWidth(BitWidth); |
| 2650 | } |
| 2651 | |
| 2652 | static const TargetRegisterClass * |
| 2653 | getAnyVectorSuperClassForBitWidth(unsigned BitWidth) { |
| 2654 | if (BitWidth == 64) |
| 2655 | return &AMDGPU::AV_64RegClass; |
| 2656 | if (BitWidth == 96) |
| 2657 | return &AMDGPU::AV_96RegClass; |
| 2658 | if (BitWidth == 128) |
| 2659 | return &AMDGPU::AV_128RegClass; |
| 2660 | if (BitWidth == 160) |
| 2661 | return &AMDGPU::AV_160RegClass; |
| 2662 | if (BitWidth == 192) |
| 2663 | return &AMDGPU::AV_192RegClass; |
| 2664 | if (BitWidth == 224) |
| 2665 | return &AMDGPU::AV_224RegClass; |
| 2666 | if (BitWidth == 256) |
| 2667 | return &AMDGPU::AV_256RegClass; |
| 2668 | if (BitWidth == 288) |
| 2669 | return &AMDGPU::AV_288RegClass; |
| 2670 | if (BitWidth == 320) |
| 2671 | return &AMDGPU::AV_320RegClass; |
| 2672 | if (BitWidth == 352) |
| 2673 | return &AMDGPU::AV_352RegClass; |
| 2674 | if (BitWidth == 384) |
| 2675 | return &AMDGPU::AV_384RegClass; |
| 2676 | if (BitWidth == 512) |
| 2677 | return &AMDGPU::AV_512RegClass; |
| 2678 | if (BitWidth == 1024) |
| 2679 | return &AMDGPU::AV_1024RegClass; |
| 2680 | |
| 2681 | return nullptr; |
| 2682 | } |
| 2683 | |
| 2684 | static const TargetRegisterClass * |
| 2685 | getAlignedVectorSuperClassForBitWidth(unsigned BitWidth) { |
| 2686 | if (BitWidth == 64) |
| 2687 | return &AMDGPU::AV_64_Align2RegClass; |
| 2688 | if (BitWidth == 96) |
| 2689 | return &AMDGPU::AV_96_Align2RegClass; |
| 2690 | if (BitWidth == 128) |
| 2691 | return &AMDGPU::AV_128_Align2RegClass; |
| 2692 | if (BitWidth == 160) |
| 2693 | return &AMDGPU::AV_160_Align2RegClass; |
| 2694 | if (BitWidth == 192) |
| 2695 | return &AMDGPU::AV_192_Align2RegClass; |
| 2696 | if (BitWidth == 224) |
| 2697 | return &AMDGPU::AV_224_Align2RegClass; |
| 2698 | if (BitWidth == 256) |
| 2699 | return &AMDGPU::AV_256_Align2RegClass; |
| 2700 | if (BitWidth == 288) |
| 2701 | return &AMDGPU::AV_288_Align2RegClass; |
| 2702 | if (BitWidth == 320) |
| 2703 | return &AMDGPU::AV_320_Align2RegClass; |
| 2704 | if (BitWidth == 352) |
| 2705 | return &AMDGPU::AV_352_Align2RegClass; |
| 2706 | if (BitWidth == 384) |
| 2707 | return &AMDGPU::AV_384_Align2RegClass; |
| 2708 | if (BitWidth == 512) |
| 2709 | return &AMDGPU::AV_512_Align2RegClass; |
| 2710 | if (BitWidth == 1024) |
| 2711 | return &AMDGPU::AV_1024_Align2RegClass; |
| 2712 | |
| 2713 | return nullptr; |
| 2714 | } |
| 2715 | |
| 2716 | const TargetRegisterClass * |
| 2717 | SIRegisterInfo::getVectorSuperClassForBitWidth(unsigned BitWidth) const { |
| 2718 | if (BitWidth == 16) |
| 2719 | return &AMDGPU::VGPR_LO16RegClass; |
| 2720 | if (BitWidth == 32) |
| 2721 | return &AMDGPU::AV_32RegClass; |
| 2722 | return ST.needsAlignedVGPRs() |
| 2723 | ? getAlignedVectorSuperClassForBitWidth(BitWidth) |
| 2724 | : getAnyVectorSuperClassForBitWidth(BitWidth); |
| 2725 | } |
| 2726 | |
| 2727 | const TargetRegisterClass * |
| 2728 | SIRegisterInfo::getSGPRClassForBitWidth(unsigned BitWidth) { |
| 2729 | if (BitWidth == 16) |
| 2730 | return &AMDGPU::SGPR_LO16RegClass; |
| 2731 | if (BitWidth == 32) |
| 2732 | return &AMDGPU::SReg_32RegClass; |
| 2733 | if (BitWidth == 64) |
| 2734 | return &AMDGPU::SReg_64RegClass; |
| 2735 | if (BitWidth == 96) |
| 2736 | return &AMDGPU::SGPR_96RegClass; |
| 2737 | if (BitWidth == 128) |
| 2738 | return &AMDGPU::SGPR_128RegClass; |
| 2739 | if (BitWidth == 160) |
| 2740 | return &AMDGPU::SGPR_160RegClass; |
| 2741 | if (BitWidth == 192) |
| 2742 | return &AMDGPU::SGPR_192RegClass; |
| 2743 | if (BitWidth == 224) |
| 2744 | return &AMDGPU::SGPR_224RegClass; |
| 2745 | if (BitWidth == 256) |
| 2746 | return &AMDGPU::SGPR_256RegClass; |
| 2747 | if (BitWidth == 288) |
| 2748 | return &AMDGPU::SGPR_288RegClass; |
| 2749 | if (BitWidth == 320) |
| 2750 | return &AMDGPU::SGPR_320RegClass; |
| 2751 | if (BitWidth == 352) |
| 2752 | return &AMDGPU::SGPR_352RegClass; |
| 2753 | if (BitWidth == 384) |
| 2754 | return &AMDGPU::SGPR_384RegClass; |
| 2755 | if (BitWidth == 512) |
| 2756 | return &AMDGPU::SGPR_512RegClass; |
| 2757 | if (BitWidth == 1024) |
| 2758 | return &AMDGPU::SGPR_1024RegClass; |
| 2759 | |
| 2760 | return nullptr; |
| 2761 | } |
| 2762 | |
| 2763 | bool SIRegisterInfo::isSGPRReg(const MachineRegisterInfo &MRI, |
| 2764 | Register Reg) const { |
| 2765 | const TargetRegisterClass *RC; |
| 2766 | if (Reg.isVirtual()) |
| 2767 | RC = MRI.getRegClass(Reg); |
| 2768 | else |
| 2769 | RC = getPhysRegBaseClass(Reg); |
| 2770 | return RC ? isSGPRClass(RC) : false; |
| 2771 | } |
| 2772 | |
| 2773 | const TargetRegisterClass * |
| 2774 | SIRegisterInfo::getEquivalentVGPRClass(const TargetRegisterClass *SRC) const { |
| 2775 | unsigned Size = getRegSizeInBits(*SRC); |
| 2776 | const TargetRegisterClass *VRC = getVGPRClassForBitWidth(Size); |
| 2777 | assert(VRC && "Invalid register class size")(static_cast <bool> (VRC && "Invalid register class size" ) ? void (0) : __assert_fail ("VRC && \"Invalid register class size\"" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 2777, __extension__ __PRETTY_FUNCTION__)); |
| 2778 | return VRC; |
| 2779 | } |
| 2780 | |
| 2781 | const TargetRegisterClass * |
| 2782 | SIRegisterInfo::getEquivalentAGPRClass(const TargetRegisterClass *SRC) const { |
| 2783 | unsigned Size = getRegSizeInBits(*SRC); |
| 2784 | const TargetRegisterClass *ARC = getAGPRClassForBitWidth(Size); |
| 2785 | assert(ARC && "Invalid register class size")(static_cast <bool> (ARC && "Invalid register class size" ) ? void (0) : __assert_fail ("ARC && \"Invalid register class size\"" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 2785, __extension__ __PRETTY_FUNCTION__)); |
| 2786 | return ARC; |
| 2787 | } |
| 2788 | |
| 2789 | const TargetRegisterClass * |
| 2790 | SIRegisterInfo::getEquivalentSGPRClass(const TargetRegisterClass *VRC) const { |
| 2791 | unsigned Size = getRegSizeInBits(*VRC); |
| 2792 | if (Size == 32) |
| 2793 | return &AMDGPU::SGPR_32RegClass; |
| 2794 | const TargetRegisterClass *SRC = getSGPRClassForBitWidth(Size); |
| 2795 | assert(SRC && "Invalid register class size")(static_cast <bool> (SRC && "Invalid register class size" ) ? void (0) : __assert_fail ("SRC && \"Invalid register class size\"" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 2795, __extension__ __PRETTY_FUNCTION__)); |
| 2796 | return SRC; |
| 2797 | } |
| 2798 | |
| 2799 | const TargetRegisterClass * |
| 2800 | SIRegisterInfo::getCompatibleSubRegClass(const TargetRegisterClass *SuperRC, |
| 2801 | const TargetRegisterClass *SubRC, |
| 2802 | unsigned SubIdx) const { |
| 2803 | // Ensure this subregister index is aligned in the super register. |
| 2804 | const TargetRegisterClass *MatchRC = |
| 2805 | getMatchingSuperRegClass(SuperRC, SubRC, SubIdx); |
| 2806 | return MatchRC && MatchRC->hasSubClassEq(SuperRC) ? MatchRC : nullptr; |
| 2807 | } |
| 2808 | |
| 2809 | bool SIRegisterInfo::opCanUseInlineConstant(unsigned OpType) const { |
| 2810 | if (OpType >= AMDGPU::OPERAND_REG_INLINE_AC_FIRST && |
| 2811 | OpType <= AMDGPU::OPERAND_REG_INLINE_AC_LAST) |
| 2812 | return !ST.hasMFMAInlineLiteralBug(); |
| 2813 | |
| 2814 | return OpType >= AMDGPU::OPERAND_SRC_FIRST && |
| 2815 | OpType <= AMDGPU::OPERAND_SRC_LAST; |
| 2816 | } |
| 2817 | |
| 2818 | bool SIRegisterInfo::shouldRewriteCopySrc( |
| 2819 | const TargetRegisterClass *DefRC, |
| 2820 | unsigned DefSubReg, |
| 2821 | const TargetRegisterClass *SrcRC, |
| 2822 | unsigned SrcSubReg) const { |
| 2823 | // We want to prefer the smallest register class possible, so we don't want to |
| 2824 | // stop and rewrite on anything that looks like a subregister |
| 2825 | // extract. Operations mostly don't care about the super register class, so we |
| 2826 | // only want to stop on the most basic of copies between the same register |
| 2827 | // class. |
| 2828 | // |
| 2829 | // e.g. if we have something like |
| 2830 | // %0 = ... |
| 2831 | // %1 = ... |
| 2832 | // %2 = REG_SEQUENCE %0, sub0, %1, sub1, %2, sub2 |
| 2833 | // %3 = COPY %2, sub0 |
| 2834 | // |
| 2835 | // We want to look through the COPY to find: |
| 2836 | // => %3 = COPY %0 |
| 2837 | |
| 2838 | // Plain copy. |
| 2839 | return getCommonSubClass(DefRC, SrcRC) != nullptr; |
| 2840 | } |
| 2841 | |
| 2842 | bool SIRegisterInfo::opCanUseLiteralConstant(unsigned OpType) const { |
| 2843 | // TODO: 64-bit operands have extending behavior from 32-bit literal. |
| 2844 | return OpType >= AMDGPU::OPERAND_REG_IMM_FIRST && |
| 2845 | OpType <= AMDGPU::OPERAND_REG_IMM_LAST; |
| 2846 | } |
| 2847 | |
| 2848 | /// Returns a lowest register that is not used at any point in the function. |
| 2849 | /// If all registers are used, then this function will return |
| 2850 | /// AMDGPU::NoRegister. If \p ReserveHighestVGPR = true, then return |
| 2851 | /// highest unused register. |
| 2852 | MCRegister SIRegisterInfo::findUnusedRegister(const MachineRegisterInfo &MRI, |
| 2853 | const TargetRegisterClass *RC, |
| 2854 | const MachineFunction &MF, |
| 2855 | bool ReserveHighestVGPR) const { |
| 2856 | if (ReserveHighestVGPR) { |
| 2857 | for (MCRegister Reg : reverse(*RC)) |
| 2858 | if (MRI.isAllocatable(Reg) && !MRI.isPhysRegUsed(Reg)) |
| 2859 | return Reg; |
| 2860 | } else { |
| 2861 | for (MCRegister Reg : *RC) |
| 2862 | if (MRI.isAllocatable(Reg) && !MRI.isPhysRegUsed(Reg)) |
| 2863 | return Reg; |
| 2864 | } |
| 2865 | return MCRegister(); |
| 2866 | } |
| 2867 | |
| 2868 | ArrayRef<int16_t> SIRegisterInfo::getRegSplitParts(const TargetRegisterClass *RC, |
| 2869 | unsigned EltSize) const { |
| 2870 | const unsigned RegBitWidth = AMDGPU::getRegBitWidth(*RC->MC); |
| 2871 | assert(RegBitWidth >= 32 && RegBitWidth <= 1024)(static_cast <bool> (RegBitWidth >= 32 && RegBitWidth <= 1024) ? void (0) : __assert_fail ("RegBitWidth >= 32 && RegBitWidth <= 1024" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 2871, __extension__ __PRETTY_FUNCTION__)); |
| 2872 | |
| 2873 | const unsigned RegDWORDs = RegBitWidth / 32; |
| 2874 | const unsigned EltDWORDs = EltSize / 4; |
| 2875 | assert(RegSplitParts.size() + 1 >= EltDWORDs)(static_cast <bool> (RegSplitParts.size() + 1 >= EltDWORDs ) ? void (0) : __assert_fail ("RegSplitParts.size() + 1 >= EltDWORDs" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 2875, __extension__ __PRETTY_FUNCTION__)); |
| 2876 | |
| 2877 | const std::vector<int16_t> &Parts = RegSplitParts[EltDWORDs - 1]; |
| 2878 | const unsigned NumParts = RegDWORDs / EltDWORDs; |
| 2879 | |
| 2880 | return ArrayRef(Parts.data(), NumParts); |
| 2881 | } |
| 2882 | |
| 2883 | const TargetRegisterClass* |
| 2884 | SIRegisterInfo::getRegClassForReg(const MachineRegisterInfo &MRI, |
| 2885 | Register Reg) const { |
| 2886 | return Reg.isVirtual() ? MRI.getRegClass(Reg) : getPhysRegBaseClass(Reg); |
| 2887 | } |
| 2888 | |
| 2889 | const TargetRegisterClass * |
| 2890 | SIRegisterInfo::getRegClassForOperandReg(const MachineRegisterInfo &MRI, |
| 2891 | const MachineOperand &MO) const { |
| 2892 | const TargetRegisterClass *SrcRC = getRegClassForReg(MRI, MO.getReg()); |
| 2893 | return getSubRegisterClass(SrcRC, MO.getSubReg()); |
| 2894 | } |
| 2895 | |
| 2896 | bool SIRegisterInfo::isVGPR(const MachineRegisterInfo &MRI, |
| 2897 | Register Reg) const { |
| 2898 | const TargetRegisterClass *RC = getRegClassForReg(MRI, Reg); |
| 2899 | // Registers without classes are unaddressable, SGPR-like registers. |
| 2900 | return RC && isVGPRClass(RC); |
| 2901 | } |
| 2902 | |
| 2903 | bool SIRegisterInfo::isAGPR(const MachineRegisterInfo &MRI, |
| 2904 | Register Reg) const { |
| 2905 | const TargetRegisterClass *RC = getRegClassForReg(MRI, Reg); |
| 2906 | |
| 2907 | // Registers without classes are unaddressable, SGPR-like registers. |
| 2908 | return RC && isAGPRClass(RC); |
| 2909 | } |
| 2910 | |
| 2911 | bool SIRegisterInfo::shouldCoalesce(MachineInstr *MI, |
| 2912 | const TargetRegisterClass *SrcRC, |
| 2913 | unsigned SubReg, |
| 2914 | const TargetRegisterClass *DstRC, |
| 2915 | unsigned DstSubReg, |
| 2916 | const TargetRegisterClass *NewRC, |
| 2917 | LiveIntervals &LIS) const { |
| 2918 | unsigned SrcSize = getRegSizeInBits(*SrcRC); |
| 2919 | unsigned DstSize = getRegSizeInBits(*DstRC); |
| 2920 | unsigned NewSize = getRegSizeInBits(*NewRC); |
| 2921 | |
| 2922 | // Do not increase size of registers beyond dword, we would need to allocate |
| 2923 | // adjacent registers and constraint regalloc more than needed. |
| 2924 | |
| 2925 | // Always allow dword coalescing. |
| 2926 | if (SrcSize <= 32 || DstSize <= 32) |
| 2927 | return true; |
| 2928 | |
| 2929 | return NewSize <= DstSize || NewSize <= SrcSize; |
| 2930 | } |
| 2931 | |
| 2932 | unsigned SIRegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC, |
| 2933 | MachineFunction &MF) const { |
| 2934 | const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); |
| 2935 | |
| 2936 | unsigned Occupancy = ST.getOccupancyWithLocalMemSize(MFI->getLDSSize(), |
| 2937 | MF.getFunction()); |
| 2938 | switch (RC->getID()) { |
| 2939 | default: |
| 2940 | return AMDGPUGenRegisterInfo::getRegPressureLimit(RC, MF); |
| 2941 | case AMDGPU::VGPR_32RegClassID: |
| 2942 | case AMDGPU::VGPR_LO16RegClassID: |
| 2943 | case AMDGPU::VGPR_HI16RegClassID: |
| 2944 | return std::min(ST.getMaxNumVGPRs(Occupancy), ST.getMaxNumVGPRs(MF)); |
| 2945 | case AMDGPU::SGPR_32RegClassID: |
| 2946 | case AMDGPU::SGPR_LO16RegClassID: |
| 2947 | return std::min(ST.getMaxNumSGPRs(Occupancy, true), ST.getMaxNumSGPRs(MF)); |
| 2948 | } |
| 2949 | } |
| 2950 | |
| 2951 | unsigned SIRegisterInfo::getRegPressureSetLimit(const MachineFunction &MF, |
| 2952 | unsigned Idx) const { |
| 2953 | if (Idx == AMDGPU::RegisterPressureSets::VGPR_32 || |
| 2954 | Idx == AMDGPU::RegisterPressureSets::AGPR_32) |
| 2955 | return getRegPressureLimit(&AMDGPU::VGPR_32RegClass, |
| 2956 | const_cast<MachineFunction &>(MF)); |
| 2957 | |
| 2958 | if (Idx == AMDGPU::RegisterPressureSets::SReg_32) |
| 2959 | return getRegPressureLimit(&AMDGPU::SGPR_32RegClass, |
| 2960 | const_cast<MachineFunction &>(MF)); |
| 2961 | |
| 2962 | llvm_unreachable("Unexpected register pressure set!")::llvm::llvm_unreachable_internal("Unexpected register pressure set!" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 2962); |
| 2963 | } |
| 2964 | |
| 2965 | const int *SIRegisterInfo::getRegUnitPressureSets(unsigned RegUnit) const { |
| 2966 | static const int Empty[] = { -1 }; |
| 2967 | |
| 2968 | if (RegPressureIgnoredUnits[RegUnit]) |
| 2969 | return Empty; |
| 2970 | |
| 2971 | return AMDGPUGenRegisterInfo::getRegUnitPressureSets(RegUnit); |
| 2972 | } |
| 2973 | |
| 2974 | MCRegister SIRegisterInfo::getReturnAddressReg(const MachineFunction &MF) const { |
| 2975 | // Not a callee saved register. |
| 2976 | return AMDGPU::SGPR30_SGPR31; |
| 2977 | } |
| 2978 | |
| 2979 | const TargetRegisterClass * |
| 2980 | SIRegisterInfo::getRegClassForSizeOnBank(unsigned Size, |
| 2981 | const RegisterBank &RB) const { |
| 2982 | switch (RB.getID()) { |
| 2983 | case AMDGPU::VGPRRegBankID: |
| 2984 | return getVGPRClassForBitWidth(std::max(32u, Size)); |
| 2985 | case AMDGPU::VCCRegBankID: |
| 2986 | assert(Size == 1)(static_cast <bool> (Size == 1) ? void (0) : __assert_fail ("Size == 1", "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 2986 , __extension__ __PRETTY_FUNCTION__)); |
| 2987 | return isWave32 ? &AMDGPU::SReg_32_XM0_XEXECRegClass |
| 2988 | : &AMDGPU::SReg_64_XEXECRegClass; |
| 2989 | case AMDGPU::SGPRRegBankID: |
| 2990 | return getSGPRClassForBitWidth(std::max(32u, Size)); |
| 2991 | case AMDGPU::AGPRRegBankID: |
| 2992 | return getAGPRClassForBitWidth(std::max(32u, Size)); |
| 2993 | default: |
| 2994 | llvm_unreachable("unknown register bank")::llvm::llvm_unreachable_internal("unknown register bank", "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp" , 2994); |
| 2995 | } |
| 2996 | } |
| 2997 | |
| 2998 | const TargetRegisterClass * |
| 2999 | SIRegisterInfo::getConstrainedRegClassForOperand(const MachineOperand &MO, |
| 3000 | const MachineRegisterInfo &MRI) const { |
| 3001 | const RegClassOrRegBank &RCOrRB = MRI.getRegClassOrRegBank(MO.getReg()); |
| 3002 | if (const RegisterBank *RB = RCOrRB.dyn_cast<const RegisterBank*>()) |
| 3003 | return getRegClassForTypeOnBank(MRI.getType(MO.getReg()), *RB); |
| 3004 | |
| 3005 | if (const auto *RC = RCOrRB.dyn_cast<const TargetRegisterClass *>()) |
| 3006 | return getAllocatableClass(RC); |
| 3007 | |
| 3008 | return nullptr; |
| 3009 | } |
| 3010 | |
| 3011 | MCRegister SIRegisterInfo::getVCC() const { |
| 3012 | return isWave32 ? AMDGPU::VCC_LO : AMDGPU::VCC; |
| 3013 | } |
| 3014 | |
| 3015 | MCRegister SIRegisterInfo::getExec() const { |
| 3016 | return isWave32 ? AMDGPU::EXEC_LO : AMDGPU::EXEC; |
| 3017 | } |
| 3018 | |
| 3019 | const TargetRegisterClass *SIRegisterInfo::getVGPR64Class() const { |
| 3020 | // VGPR tuples have an alignment requirement on gfx90a variants. |
| 3021 | return ST.needsAlignedVGPRs() ? &AMDGPU::VReg_64_Align2RegClass |
| 3022 | : &AMDGPU::VReg_64RegClass; |
| 3023 | } |
| 3024 | |
| 3025 | const TargetRegisterClass * |
| 3026 | SIRegisterInfo::getRegClass(unsigned RCID) const { |
| 3027 | switch ((int)RCID) { |
| 3028 | case AMDGPU::SReg_1RegClassID: |
| 3029 | return getBoolRC(); |
| 3030 | case AMDGPU::SReg_1_XEXECRegClassID: |
| 3031 | return isWave32 ? &AMDGPU::SReg_32_XM0_XEXECRegClass |
| 3032 | : &AMDGPU::SReg_64_XEXECRegClass; |
| 3033 | case -1: |
| 3034 | return nullptr; |
| 3035 | default: |
| 3036 | return AMDGPUGenRegisterInfo::getRegClass(RCID); |
| 3037 | } |
| 3038 | } |
| 3039 | |
| 3040 | // Find reaching register definition |
| 3041 | MachineInstr *SIRegisterInfo::findReachingDef(Register Reg, unsigned SubReg, |
| 3042 | MachineInstr &Use, |
| 3043 | MachineRegisterInfo &MRI, |
| 3044 | LiveIntervals *LIS) const { |
| 3045 | auto &MDT = LIS->getAnalysis<MachineDominatorTree>(); |
| 3046 | SlotIndex UseIdx = LIS->getInstructionIndex(Use); |
| 3047 | SlotIndex DefIdx; |
| 3048 | |
| 3049 | if (Reg.isVirtual()) { |
| 3050 | if (!LIS->hasInterval(Reg)) |
| 3051 | return nullptr; |
| 3052 | LiveInterval &LI = LIS->getInterval(Reg); |
| 3053 | LaneBitmask SubLanes = SubReg ? getSubRegIndexLaneMask(SubReg) |
| 3054 | : MRI.getMaxLaneMaskForVReg(Reg); |
| 3055 | VNInfo *V = nullptr; |
| 3056 | if (LI.hasSubRanges()) { |
| 3057 | for (auto &S : LI.subranges()) { |
| 3058 | if ((S.LaneMask & SubLanes) == SubLanes) { |
| 3059 | V = S.getVNInfoAt(UseIdx); |
| 3060 | break; |
| 3061 | } |
| 3062 | } |
| 3063 | } else { |
| 3064 | V = LI.getVNInfoAt(UseIdx); |
| 3065 | } |
| 3066 | if (!V) |
| 3067 | return nullptr; |
| 3068 | DefIdx = V->def; |
| 3069 | } else { |
| 3070 | // Find last def. |
| 3071 | for (MCRegUnitIterator Units(Reg.asMCReg(), this); Units.isValid(); |
| 3072 | ++Units) { |
| 3073 | LiveRange &LR = LIS->getRegUnit(*Units); |
| 3074 | if (VNInfo *V = LR.getVNInfoAt(UseIdx)) { |
| 3075 | if (!DefIdx.isValid() || |
| 3076 | MDT.dominates(LIS->getInstructionFromIndex(DefIdx), |
| 3077 | LIS->getInstructionFromIndex(V->def))) |
| 3078 | DefIdx = V->def; |
| 3079 | } else { |
| 3080 | return nullptr; |
| 3081 | } |
| 3082 | } |
| 3083 | } |
| 3084 | |
| 3085 | MachineInstr *Def = LIS->getInstructionFromIndex(DefIdx); |
| 3086 | |
| 3087 | if (!Def || !MDT.dominates(Def, &Use)) |
| 3088 | return nullptr; |
| 3089 | |
| 3090 | assert(Def->modifiesRegister(Reg, this))(static_cast <bool> (Def->modifiesRegister(Reg, this )) ? void (0) : __assert_fail ("Def->modifiesRegister(Reg, this)" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 3090, __extension__ __PRETTY_FUNCTION__)); |
| 3091 | |
| 3092 | return Def; |
| 3093 | } |
| 3094 | |
| 3095 | MCPhysReg SIRegisterInfo::get32BitRegister(MCPhysReg Reg) const { |
| 3096 | assert(getRegSizeInBits(*getPhysRegBaseClass(Reg)) <= 32)(static_cast <bool> (getRegSizeInBits(*getPhysRegBaseClass (Reg)) <= 32) ? void (0) : __assert_fail ("getRegSizeInBits(*getPhysRegBaseClass(Reg)) <= 32" , "llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp", 3096, __extension__ __PRETTY_FUNCTION__)); |
| 3097 | |
| 3098 | for (const TargetRegisterClass &RC : { AMDGPU::VGPR_32RegClass, |
| 3099 | AMDGPU::SReg_32RegClass, |
| 3100 | AMDGPU::AGPR_32RegClass } ) { |
| 3101 | if (MCPhysReg Super = getMatchingSuperReg(Reg, AMDGPU::lo16, &RC)) |
| 3102 | return Super; |
| 3103 | } |
| 3104 | if (MCPhysReg Super = getMatchingSuperReg(Reg, AMDGPU::hi16, |
| 3105 | &AMDGPU::VGPR_32RegClass)) { |
| 3106 | return Super; |
| 3107 | } |
| 3108 | |
| 3109 | return AMDGPU::NoRegister; |
| 3110 | } |
| 3111 | |
| 3112 | bool SIRegisterInfo::isProperlyAlignedRC(const TargetRegisterClass &RC) const { |
| 3113 | if (!ST.needsAlignedVGPRs()) |
| 3114 | return true; |
| 3115 | |
| 3116 | if (isVGPRClass(&RC)) |
| 3117 | return RC.hasSuperClassEq(getVGPRClassForBitWidth(getRegSizeInBits(RC))); |
| 3118 | if (isAGPRClass(&RC)) |
| 3119 | return RC.hasSuperClassEq(getAGPRClassForBitWidth(getRegSizeInBits(RC))); |
| 3120 | if (isVectorSuperClass(&RC)) |
| 3121 | return RC.hasSuperClassEq( |
| 3122 | getVectorSuperClassForBitWidth(getRegSizeInBits(RC))); |
| 3123 | |
| 3124 | return true; |
| 3125 | } |
| 3126 | |
| 3127 | const TargetRegisterClass * |
| 3128 | SIRegisterInfo::getProperlyAlignedRC(const TargetRegisterClass *RC) const { |
| 3129 | if (!RC || !ST.needsAlignedVGPRs()) |
| 3130 | return RC; |
| 3131 | |
| 3132 | unsigned Size = getRegSizeInBits(*RC); |
| 3133 | if (Size <= 32) |
| 3134 | return RC; |
| 3135 | |
| 3136 | if (isVGPRClass(RC)) |
| 3137 | return getAlignedVGPRClassForBitWidth(Size); |
| 3138 | if (isAGPRClass(RC)) |
| 3139 | return getAlignedAGPRClassForBitWidth(Size); |
| 3140 | if (isVectorSuperClass(RC)) |
| 3141 | return getAlignedVectorSuperClassForBitWidth(Size); |
| 3142 | |
| 3143 | return RC; |
| 3144 | } |
| 3145 | |
| 3146 | ArrayRef<MCPhysReg> |
| 3147 | SIRegisterInfo::getAllSGPR128(const MachineFunction &MF) const { |
| 3148 | return ArrayRef(AMDGPU::SGPR_128RegClass.begin(), ST.getMaxNumSGPRs(MF) / 4); |
| 3149 | } |
| 3150 | |
| 3151 | ArrayRef<MCPhysReg> |
| 3152 | SIRegisterInfo::getAllSGPR64(const MachineFunction &MF) const { |
| 3153 | return ArrayRef(AMDGPU::SGPR_64RegClass.begin(), ST.getMaxNumSGPRs(MF) / 2); |
| 3154 | } |
| 3155 | |
| 3156 | ArrayRef<MCPhysReg> |
| 3157 | SIRegisterInfo::getAllSGPR32(const MachineFunction &MF) const { |
| 3158 | return ArrayRef(AMDGPU::SGPR_32RegClass.begin(), ST.getMaxNumSGPRs(MF)); |
| 3159 | } |