File: | build/source/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp |
Warning: | line 3597, column 5 Value stored to 'MI' is never read |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | //===- AMDGPUInstructionSelector.cpp ----------------------------*- C++ -*-==// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | /// \file |
9 | /// This file implements the targeting of the InstructionSelector class for |
10 | /// AMDGPU. |
11 | /// \todo This should be generated by TableGen. |
12 | //===----------------------------------------------------------------------===// |
13 | |
14 | #include "AMDGPUInstructionSelector.h" |
15 | #include "AMDGPU.h" |
16 | #include "AMDGPUGlobalISelUtils.h" |
17 | #include "AMDGPUInstrInfo.h" |
18 | #include "AMDGPURegisterBankInfo.h" |
19 | #include "AMDGPUTargetMachine.h" |
20 | #include "SIMachineFunctionInfo.h" |
21 | #include "Utils/AMDGPUBaseInfo.h" |
22 | #include "llvm/CodeGen/GlobalISel/GISelKnownBits.h" |
23 | #include "llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h" |
24 | #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h" |
25 | #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h" |
26 | #include "llvm/CodeGen/MachineFrameInfo.h" |
27 | #include "llvm/IR/DiagnosticInfo.h" |
28 | #include "llvm/IR/IntrinsicsAMDGPU.h" |
29 | #include <optional> |
30 | |
31 | #define DEBUG_TYPE"amdgpu-isel" "amdgpu-isel" |
32 | |
33 | using namespace llvm; |
34 | using namespace MIPatternMatch; |
35 | |
36 | static cl::opt<bool> AllowRiskySelect( |
37 | "amdgpu-global-isel-risky-select", |
38 | cl::desc("Allow GlobalISel to select cases that are likely to not work yet"), |
39 | cl::init(false), |
40 | cl::ReallyHidden); |
41 | |
42 | #define GET_GLOBALISEL_IMPL |
43 | #define AMDGPUSubtarget GCNSubtarget |
44 | #include "AMDGPUGenGlobalISel.inc" |
45 | #undef GET_GLOBALISEL_IMPL |
46 | #undef AMDGPUSubtarget |
47 | |
48 | AMDGPUInstructionSelector::AMDGPUInstructionSelector( |
49 | const GCNSubtarget &STI, const AMDGPURegisterBankInfo &RBI, |
50 | const AMDGPUTargetMachine &TM) |
51 | : TII(*STI.getInstrInfo()), TRI(*STI.getRegisterInfo()), RBI(RBI), TM(TM), |
52 | STI(STI), |
53 | EnableLateStructurizeCFG(AMDGPUTargetMachine::EnableLateStructurizeCFG), |
54 | #define GET_GLOBALISEL_PREDICATES_INIT |
55 | #include "AMDGPUGenGlobalISel.inc" |
56 | #undef GET_GLOBALISEL_PREDICATES_INIT |
57 | #define GET_GLOBALISEL_TEMPORARIES_INIT |
58 | #include "AMDGPUGenGlobalISel.inc" |
59 | #undef GET_GLOBALISEL_TEMPORARIES_INIT |
60 | { |
61 | } |
62 | |
63 | const char *AMDGPUInstructionSelector::getName() { return DEBUG_TYPE"amdgpu-isel"; } |
64 | |
65 | void AMDGPUInstructionSelector::setupMF(MachineFunction &MF, GISelKnownBits *KB, |
66 | CodeGenCoverage &CoverageInfo, |
67 | ProfileSummaryInfo *PSI, |
68 | BlockFrequencyInfo *BFI) { |
69 | MRI = &MF.getRegInfo(); |
70 | Subtarget = &MF.getSubtarget<GCNSubtarget>(); |
71 | InstructionSelector::setupMF(MF, KB, CoverageInfo, PSI, BFI); |
72 | } |
73 | |
74 | bool AMDGPUInstructionSelector::isVCC(Register Reg, |
75 | const MachineRegisterInfo &MRI) const { |
76 | // The verifier is oblivious to s1 being a valid value for wavesize registers. |
77 | if (Reg.isPhysical()) |
78 | return false; |
79 | |
80 | auto &RegClassOrBank = MRI.getRegClassOrRegBank(Reg); |
81 | const TargetRegisterClass *RC = |
82 | RegClassOrBank.dyn_cast<const TargetRegisterClass*>(); |
83 | if (RC) { |
84 | const LLT Ty = MRI.getType(Reg); |
85 | if (!Ty.isValid() || Ty.getSizeInBits() != 1) |
86 | return false; |
87 | // G_TRUNC s1 result is never vcc. |
88 | return MRI.getVRegDef(Reg)->getOpcode() != AMDGPU::G_TRUNC && |
89 | RC->hasSuperClassEq(TRI.getBoolRC()); |
90 | } |
91 | |
92 | const RegisterBank *RB = RegClassOrBank.get<const RegisterBank *>(); |
93 | return RB->getID() == AMDGPU::VCCRegBankID; |
94 | } |
95 | |
96 | bool AMDGPUInstructionSelector::constrainCopyLikeIntrin(MachineInstr &MI, |
97 | unsigned NewOpc) const { |
98 | MI.setDesc(TII.get(NewOpc)); |
99 | MI.removeOperand(1); // Remove intrinsic ID. |
100 | MI.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true)); |
101 | |
102 | MachineOperand &Dst = MI.getOperand(0); |
103 | MachineOperand &Src = MI.getOperand(1); |
104 | |
105 | // TODO: This should be legalized to s32 if needed |
106 | if (MRI->getType(Dst.getReg()) == LLT::scalar(1)) |
107 | return false; |
108 | |
109 | const TargetRegisterClass *DstRC |
110 | = TRI.getConstrainedRegClassForOperand(Dst, *MRI); |
111 | const TargetRegisterClass *SrcRC |
112 | = TRI.getConstrainedRegClassForOperand(Src, *MRI); |
113 | if (!DstRC || DstRC != SrcRC) |
114 | return false; |
115 | |
116 | return RBI.constrainGenericRegister(Dst.getReg(), *DstRC, *MRI) && |
117 | RBI.constrainGenericRegister(Src.getReg(), *SrcRC, *MRI); |
118 | } |
119 | |
120 | bool AMDGPUInstructionSelector::selectCOPY(MachineInstr &I) const { |
121 | const DebugLoc &DL = I.getDebugLoc(); |
122 | MachineBasicBlock *BB = I.getParent(); |
123 | I.setDesc(TII.get(TargetOpcode::COPY)); |
124 | |
125 | const MachineOperand &Src = I.getOperand(1); |
126 | MachineOperand &Dst = I.getOperand(0); |
127 | Register DstReg = Dst.getReg(); |
128 | Register SrcReg = Src.getReg(); |
129 | |
130 | if (isVCC(DstReg, *MRI)) { |
131 | if (SrcReg == AMDGPU::SCC) { |
132 | const TargetRegisterClass *RC |
133 | = TRI.getConstrainedRegClassForOperand(Dst, *MRI); |
134 | if (!RC) |
135 | return true; |
136 | return RBI.constrainGenericRegister(DstReg, *RC, *MRI); |
137 | } |
138 | |
139 | if (!isVCC(SrcReg, *MRI)) { |
140 | // TODO: Should probably leave the copy and let copyPhysReg expand it. |
141 | if (!RBI.constrainGenericRegister(DstReg, *TRI.getBoolRC(), *MRI)) |
142 | return false; |
143 | |
144 | const TargetRegisterClass *SrcRC |
145 | = TRI.getConstrainedRegClassForOperand(Src, *MRI); |
146 | |
147 | std::optional<ValueAndVReg> ConstVal = |
148 | getIConstantVRegValWithLookThrough(SrcReg, *MRI, true); |
149 | if (ConstVal) { |
150 | unsigned MovOpc = |
151 | STI.isWave64() ? AMDGPU::S_MOV_B64 : AMDGPU::S_MOV_B32; |
152 | BuildMI(*BB, &I, DL, TII.get(MovOpc), DstReg) |
153 | .addImm(ConstVal->Value.getBoolValue() ? -1 : 0); |
154 | } else { |
155 | Register MaskedReg = MRI->createVirtualRegister(SrcRC); |
156 | |
157 | // We can't trust the high bits at this point, so clear them. |
158 | |
159 | // TODO: Skip masking high bits if def is known boolean. |
160 | |
161 | unsigned AndOpc = |
162 | TRI.isSGPRClass(SrcRC) ? AMDGPU::S_AND_B32 : AMDGPU::V_AND_B32_e32; |
163 | BuildMI(*BB, &I, DL, TII.get(AndOpc), MaskedReg) |
164 | .addImm(1) |
165 | .addReg(SrcReg); |
166 | BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_CMP_NE_U32_e64), DstReg) |
167 | .addImm(0) |
168 | .addReg(MaskedReg); |
169 | } |
170 | |
171 | if (!MRI->getRegClassOrNull(SrcReg)) |
172 | MRI->setRegClass(SrcReg, SrcRC); |
173 | I.eraseFromParent(); |
174 | return true; |
175 | } |
176 | |
177 | const TargetRegisterClass *RC = |
178 | TRI.getConstrainedRegClassForOperand(Dst, *MRI); |
179 | if (RC && !RBI.constrainGenericRegister(DstReg, *RC, *MRI)) |
180 | return false; |
181 | |
182 | return true; |
183 | } |
184 | |
185 | for (const MachineOperand &MO : I.operands()) { |
186 | if (MO.getReg().isPhysical()) |
187 | continue; |
188 | |
189 | const TargetRegisterClass *RC = |
190 | TRI.getConstrainedRegClassForOperand(MO, *MRI); |
191 | if (!RC) |
192 | continue; |
193 | RBI.constrainGenericRegister(MO.getReg(), *RC, *MRI); |
194 | } |
195 | return true; |
196 | } |
197 | |
198 | bool AMDGPUInstructionSelector::selectPHI(MachineInstr &I) const { |
199 | const Register DefReg = I.getOperand(0).getReg(); |
200 | const LLT DefTy = MRI->getType(DefReg); |
201 | if (DefTy == LLT::scalar(1)) { |
202 | if (!AllowRiskySelect) { |
203 | LLVM_DEBUG(dbgs() << "Skipping risky boolean phi\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("amdgpu-isel")) { dbgs() << "Skipping risky boolean phi\n" ; } } while (false); |
204 | return false; |
205 | } |
206 | |
207 | LLVM_DEBUG(dbgs() << "Selecting risky boolean phi\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("amdgpu-isel")) { dbgs() << "Selecting risky boolean phi\n" ; } } while (false); |
208 | } |
209 | |
210 | // TODO: Verify this doesn't have insane operands (i.e. VGPR to SGPR copy) |
211 | |
212 | const RegClassOrRegBank &RegClassOrBank = |
213 | MRI->getRegClassOrRegBank(DefReg); |
214 | |
215 | const TargetRegisterClass *DefRC |
216 | = RegClassOrBank.dyn_cast<const TargetRegisterClass *>(); |
217 | if (!DefRC) { |
218 | if (!DefTy.isValid()) { |
219 | LLVM_DEBUG(dbgs() << "PHI operand has no type, not a gvreg?\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("amdgpu-isel")) { dbgs() << "PHI operand has no type, not a gvreg?\n" ; } } while (false); |
220 | return false; |
221 | } |
222 | |
223 | const RegisterBank &RB = *RegClassOrBank.get<const RegisterBank *>(); |
224 | DefRC = TRI.getRegClassForTypeOnBank(DefTy, RB); |
225 | if (!DefRC) { |
226 | LLVM_DEBUG(dbgs() << "PHI operand has unexpected size/bank\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("amdgpu-isel")) { dbgs() << "PHI operand has unexpected size/bank\n" ; } } while (false); |
227 | return false; |
228 | } |
229 | } |
230 | |
231 | // TODO: Verify that all registers have the same bank |
232 | I.setDesc(TII.get(TargetOpcode::PHI)); |
233 | return RBI.constrainGenericRegister(DefReg, *DefRC, *MRI); |
234 | } |
235 | |
236 | MachineOperand |
237 | AMDGPUInstructionSelector::getSubOperand64(MachineOperand &MO, |
238 | const TargetRegisterClass &SubRC, |
239 | unsigned SubIdx) const { |
240 | |
241 | MachineInstr *MI = MO.getParent(); |
242 | MachineBasicBlock *BB = MO.getParent()->getParent(); |
243 | Register DstReg = MRI->createVirtualRegister(&SubRC); |
244 | |
245 | if (MO.isReg()) { |
246 | unsigned ComposedSubIdx = TRI.composeSubRegIndices(MO.getSubReg(), SubIdx); |
247 | Register Reg = MO.getReg(); |
248 | BuildMI(*BB, MI, MI->getDebugLoc(), TII.get(AMDGPU::COPY), DstReg) |
249 | .addReg(Reg, 0, ComposedSubIdx); |
250 | |
251 | return MachineOperand::CreateReg(DstReg, MO.isDef(), MO.isImplicit(), |
252 | MO.isKill(), MO.isDead(), MO.isUndef(), |
253 | MO.isEarlyClobber(), 0, MO.isDebug(), |
254 | MO.isInternalRead()); |
255 | } |
256 | |
257 | assert(MO.isImm())(static_cast <bool> (MO.isImm()) ? void (0) : __assert_fail ("MO.isImm()", "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp" , 257, __extension__ __PRETTY_FUNCTION__)); |
258 | |
259 | APInt Imm(64, MO.getImm()); |
260 | |
261 | switch (SubIdx) { |
262 | default: |
263 | llvm_unreachable("do not know to split immediate with this sub index.")::llvm::llvm_unreachable_internal("do not know to split immediate with this sub index." , "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp", 263 ); |
264 | case AMDGPU::sub0: |
265 | return MachineOperand::CreateImm(Imm.getLoBits(32).getSExtValue()); |
266 | case AMDGPU::sub1: |
267 | return MachineOperand::CreateImm(Imm.getHiBits(32).getSExtValue()); |
268 | } |
269 | } |
270 | |
271 | static unsigned getLogicalBitOpcode(unsigned Opc, bool Is64) { |
272 | switch (Opc) { |
273 | case AMDGPU::G_AND: |
274 | return Is64 ? AMDGPU::S_AND_B64 : AMDGPU::S_AND_B32; |
275 | case AMDGPU::G_OR: |
276 | return Is64 ? AMDGPU::S_OR_B64 : AMDGPU::S_OR_B32; |
277 | case AMDGPU::G_XOR: |
278 | return Is64 ? AMDGPU::S_XOR_B64 : AMDGPU::S_XOR_B32; |
279 | default: |
280 | llvm_unreachable("not a bit op")::llvm::llvm_unreachable_internal("not a bit op", "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp" , 280); |
281 | } |
282 | } |
283 | |
284 | bool AMDGPUInstructionSelector::selectG_AND_OR_XOR(MachineInstr &I) const { |
285 | Register DstReg = I.getOperand(0).getReg(); |
286 | unsigned Size = RBI.getSizeInBits(DstReg, *MRI, TRI); |
287 | |
288 | const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI); |
289 | if (DstRB->getID() != AMDGPU::SGPRRegBankID && |
290 | DstRB->getID() != AMDGPU::VCCRegBankID) |
291 | return false; |
292 | |
293 | bool Is64 = Size > 32 || (DstRB->getID() == AMDGPU::VCCRegBankID && |
294 | STI.isWave64()); |
295 | I.setDesc(TII.get(getLogicalBitOpcode(I.getOpcode(), Is64))); |
296 | |
297 | // Dead implicit-def of scc |
298 | I.addOperand(MachineOperand::CreateReg(AMDGPU::SCC, true, // isDef |
299 | true, // isImp |
300 | false, // isKill |
301 | true)); // isDead |
302 | return constrainSelectedInstRegOperands(I, TII, TRI, RBI); |
303 | } |
304 | |
305 | bool AMDGPUInstructionSelector::selectG_ADD_SUB(MachineInstr &I) const { |
306 | MachineBasicBlock *BB = I.getParent(); |
307 | MachineFunction *MF = BB->getParent(); |
308 | Register DstReg = I.getOperand(0).getReg(); |
309 | const DebugLoc &DL = I.getDebugLoc(); |
310 | LLT Ty = MRI->getType(DstReg); |
311 | if (Ty.isVector()) |
312 | return false; |
313 | |
314 | unsigned Size = Ty.getSizeInBits(); |
315 | const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI); |
316 | const bool IsSALU = DstRB->getID() == AMDGPU::SGPRRegBankID; |
317 | const bool Sub = I.getOpcode() == TargetOpcode::G_SUB; |
318 | |
319 | if (Size == 32) { |
320 | if (IsSALU) { |
321 | const unsigned Opc = Sub ? AMDGPU::S_SUB_U32 : AMDGPU::S_ADD_U32; |
322 | MachineInstr *Add = |
323 | BuildMI(*BB, &I, DL, TII.get(Opc), DstReg) |
324 | .add(I.getOperand(1)) |
325 | .add(I.getOperand(2)); |
326 | I.eraseFromParent(); |
327 | return constrainSelectedInstRegOperands(*Add, TII, TRI, RBI); |
328 | } |
329 | |
330 | if (STI.hasAddNoCarry()) { |
331 | const unsigned Opc = Sub ? AMDGPU::V_SUB_U32_e64 : AMDGPU::V_ADD_U32_e64; |
332 | I.setDesc(TII.get(Opc)); |
333 | I.addOperand(*MF, MachineOperand::CreateImm(0)); |
334 | I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true)); |
335 | return constrainSelectedInstRegOperands(I, TII, TRI, RBI); |
336 | } |
337 | |
338 | const unsigned Opc = Sub ? AMDGPU::V_SUB_CO_U32_e64 : AMDGPU::V_ADD_CO_U32_e64; |
339 | |
340 | Register UnusedCarry = MRI->createVirtualRegister(TRI.getWaveMaskRegClass()); |
341 | MachineInstr *Add |
342 | = BuildMI(*BB, &I, DL, TII.get(Opc), DstReg) |
343 | .addDef(UnusedCarry, RegState::Dead) |
344 | .add(I.getOperand(1)) |
345 | .add(I.getOperand(2)) |
346 | .addImm(0); |
347 | I.eraseFromParent(); |
348 | return constrainSelectedInstRegOperands(*Add, TII, TRI, RBI); |
349 | } |
350 | |
351 | assert(!Sub && "illegal sub should not reach here")(static_cast <bool> (!Sub && "illegal sub should not reach here" ) ? void (0) : __assert_fail ("!Sub && \"illegal sub should not reach here\"" , "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp", 351 , __extension__ __PRETTY_FUNCTION__)); |
352 | |
353 | const TargetRegisterClass &RC |
354 | = IsSALU ? AMDGPU::SReg_64_XEXECRegClass : AMDGPU::VReg_64RegClass; |
355 | const TargetRegisterClass &HalfRC |
356 | = IsSALU ? AMDGPU::SReg_32RegClass : AMDGPU::VGPR_32RegClass; |
357 | |
358 | MachineOperand Lo1(getSubOperand64(I.getOperand(1), HalfRC, AMDGPU::sub0)); |
359 | MachineOperand Lo2(getSubOperand64(I.getOperand(2), HalfRC, AMDGPU::sub0)); |
360 | MachineOperand Hi1(getSubOperand64(I.getOperand(1), HalfRC, AMDGPU::sub1)); |
361 | MachineOperand Hi2(getSubOperand64(I.getOperand(2), HalfRC, AMDGPU::sub1)); |
362 | |
363 | Register DstLo = MRI->createVirtualRegister(&HalfRC); |
364 | Register DstHi = MRI->createVirtualRegister(&HalfRC); |
365 | |
366 | if (IsSALU) { |
367 | BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADD_U32), DstLo) |
368 | .add(Lo1) |
369 | .add(Lo2); |
370 | BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADDC_U32), DstHi) |
371 | .add(Hi1) |
372 | .add(Hi2); |
373 | } else { |
374 | const TargetRegisterClass *CarryRC = TRI.getWaveMaskRegClass(); |
375 | Register CarryReg = MRI->createVirtualRegister(CarryRC); |
376 | BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_ADD_CO_U32_e64), DstLo) |
377 | .addDef(CarryReg) |
378 | .add(Lo1) |
379 | .add(Lo2) |
380 | .addImm(0); |
381 | MachineInstr *Addc = BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_ADDC_U32_e64), DstHi) |
382 | .addDef(MRI->createVirtualRegister(CarryRC), RegState::Dead) |
383 | .add(Hi1) |
384 | .add(Hi2) |
385 | .addReg(CarryReg, RegState::Kill) |
386 | .addImm(0); |
387 | |
388 | if (!constrainSelectedInstRegOperands(*Addc, TII, TRI, RBI)) |
389 | return false; |
390 | } |
391 | |
392 | BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg) |
393 | .addReg(DstLo) |
394 | .addImm(AMDGPU::sub0) |
395 | .addReg(DstHi) |
396 | .addImm(AMDGPU::sub1); |
397 | |
398 | |
399 | if (!RBI.constrainGenericRegister(DstReg, RC, *MRI)) |
400 | return false; |
401 | |
402 | I.eraseFromParent(); |
403 | return true; |
404 | } |
405 | |
406 | bool AMDGPUInstructionSelector::selectG_UADDO_USUBO_UADDE_USUBE( |
407 | MachineInstr &I) const { |
408 | MachineBasicBlock *BB = I.getParent(); |
409 | MachineFunction *MF = BB->getParent(); |
410 | const DebugLoc &DL = I.getDebugLoc(); |
411 | Register Dst0Reg = I.getOperand(0).getReg(); |
412 | Register Dst1Reg = I.getOperand(1).getReg(); |
413 | const bool IsAdd = I.getOpcode() == AMDGPU::G_UADDO || |
414 | I.getOpcode() == AMDGPU::G_UADDE; |
415 | const bool HasCarryIn = I.getOpcode() == AMDGPU::G_UADDE || |
416 | I.getOpcode() == AMDGPU::G_USUBE; |
417 | |
418 | if (isVCC(Dst1Reg, *MRI)) { |
419 | unsigned NoCarryOpc = |
420 | IsAdd ? AMDGPU::V_ADD_CO_U32_e64 : AMDGPU::V_SUB_CO_U32_e64; |
421 | unsigned CarryOpc = IsAdd ? AMDGPU::V_ADDC_U32_e64 : AMDGPU::V_SUBB_U32_e64; |
422 | I.setDesc(TII.get(HasCarryIn ? CarryOpc : NoCarryOpc)); |
423 | I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true)); |
424 | I.addOperand(*MF, MachineOperand::CreateImm(0)); |
425 | return constrainSelectedInstRegOperands(I, TII, TRI, RBI); |
426 | } |
427 | |
428 | Register Src0Reg = I.getOperand(2).getReg(); |
429 | Register Src1Reg = I.getOperand(3).getReg(); |
430 | |
431 | if (HasCarryIn) { |
432 | BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), AMDGPU::SCC) |
433 | .addReg(I.getOperand(4).getReg()); |
434 | } |
435 | |
436 | unsigned NoCarryOpc = IsAdd ? AMDGPU::S_ADD_U32 : AMDGPU::S_SUB_U32; |
437 | unsigned CarryOpc = IsAdd ? AMDGPU::S_ADDC_U32 : AMDGPU::S_SUBB_U32; |
438 | |
439 | BuildMI(*BB, &I, DL, TII.get(HasCarryIn ? CarryOpc : NoCarryOpc), Dst0Reg) |
440 | .add(I.getOperand(2)) |
441 | .add(I.getOperand(3)); |
442 | BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), Dst1Reg) |
443 | .addReg(AMDGPU::SCC); |
444 | |
445 | if (!MRI->getRegClassOrNull(Dst1Reg)) |
446 | MRI->setRegClass(Dst1Reg, &AMDGPU::SReg_32RegClass); |
447 | |
448 | if (!RBI.constrainGenericRegister(Dst0Reg, AMDGPU::SReg_32RegClass, *MRI) || |
449 | !RBI.constrainGenericRegister(Src0Reg, AMDGPU::SReg_32RegClass, *MRI) || |
450 | !RBI.constrainGenericRegister(Src1Reg, AMDGPU::SReg_32RegClass, *MRI)) |
451 | return false; |
452 | |
453 | if (HasCarryIn && |
454 | !RBI.constrainGenericRegister(I.getOperand(4).getReg(), |
455 | AMDGPU::SReg_32RegClass, *MRI)) |
456 | return false; |
457 | |
458 | I.eraseFromParent(); |
459 | return true; |
460 | } |
461 | |
462 | bool AMDGPUInstructionSelector::selectG_AMDGPU_MAD_64_32( |
463 | MachineInstr &I) const { |
464 | MachineBasicBlock *BB = I.getParent(); |
465 | MachineFunction *MF = BB->getParent(); |
466 | const bool IsUnsigned = I.getOpcode() == AMDGPU::G_AMDGPU_MAD_U64_U32; |
467 | |
468 | unsigned Opc; |
469 | if (Subtarget->hasMADIntraFwdBug()) |
470 | Opc = IsUnsigned ? AMDGPU::V_MAD_U64_U32_gfx11_e64 |
471 | : AMDGPU::V_MAD_I64_I32_gfx11_e64; |
472 | else |
473 | Opc = IsUnsigned ? AMDGPU::V_MAD_U64_U32_e64 : AMDGPU::V_MAD_I64_I32_e64; |
474 | I.setDesc(TII.get(Opc)); |
475 | I.addOperand(*MF, MachineOperand::CreateImm(0)); |
476 | I.addImplicitDefUseOperands(*MF); |
477 | return constrainSelectedInstRegOperands(I, TII, TRI, RBI); |
478 | } |
479 | |
480 | // TODO: We should probably legalize these to only using 32-bit results. |
481 | bool AMDGPUInstructionSelector::selectG_EXTRACT(MachineInstr &I) const { |
482 | MachineBasicBlock *BB = I.getParent(); |
483 | Register DstReg = I.getOperand(0).getReg(); |
484 | Register SrcReg = I.getOperand(1).getReg(); |
485 | LLT DstTy = MRI->getType(DstReg); |
486 | LLT SrcTy = MRI->getType(SrcReg); |
487 | const unsigned SrcSize = SrcTy.getSizeInBits(); |
488 | unsigned DstSize = DstTy.getSizeInBits(); |
489 | |
490 | // TODO: Should handle any multiple of 32 offset. |
491 | unsigned Offset = I.getOperand(2).getImm(); |
492 | if (Offset % 32 != 0 || DstSize > 128) |
493 | return false; |
494 | |
495 | // 16-bit operations really use 32-bit registers. |
496 | // FIXME: Probably should not allow 16-bit G_EXTRACT results. |
497 | if (DstSize == 16) |
498 | DstSize = 32; |
499 | |
500 | const TargetRegisterClass *DstRC = |
501 | TRI.getConstrainedRegClassForOperand(I.getOperand(0), *MRI); |
502 | if (!DstRC || !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI)) |
503 | return false; |
504 | |
505 | const RegisterBank *SrcBank = RBI.getRegBank(SrcReg, *MRI, TRI); |
506 | const TargetRegisterClass *SrcRC = |
507 | TRI.getRegClassForSizeOnBank(SrcSize, *SrcBank); |
508 | if (!SrcRC) |
509 | return false; |
510 | unsigned SubReg = SIRegisterInfo::getSubRegFromChannel(Offset / 32, |
511 | DstSize / 32); |
512 | SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubReg); |
513 | if (!SrcRC) |
514 | return false; |
515 | |
516 | SrcReg = constrainOperandRegClass(*MF, TRI, *MRI, TII, RBI, I, |
517 | *SrcRC, I.getOperand(1)); |
518 | const DebugLoc &DL = I.getDebugLoc(); |
519 | BuildMI(*BB, &I, DL, TII.get(TargetOpcode::COPY), DstReg) |
520 | .addReg(SrcReg, 0, SubReg); |
521 | |
522 | I.eraseFromParent(); |
523 | return true; |
524 | } |
525 | |
526 | bool AMDGPUInstructionSelector::selectG_MERGE_VALUES(MachineInstr &MI) const { |
527 | MachineBasicBlock *BB = MI.getParent(); |
528 | Register DstReg = MI.getOperand(0).getReg(); |
529 | LLT DstTy = MRI->getType(DstReg); |
530 | LLT SrcTy = MRI->getType(MI.getOperand(1).getReg()); |
531 | |
532 | const unsigned SrcSize = SrcTy.getSizeInBits(); |
533 | if (SrcSize < 32) |
534 | return selectImpl(MI, *CoverageInfo); |
535 | |
536 | const DebugLoc &DL = MI.getDebugLoc(); |
537 | const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI); |
538 | const unsigned DstSize = DstTy.getSizeInBits(); |
539 | const TargetRegisterClass *DstRC = |
540 | TRI.getRegClassForSizeOnBank(DstSize, *DstBank); |
541 | if (!DstRC) |
542 | return false; |
543 | |
544 | ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(DstRC, SrcSize / 8); |
545 | MachineInstrBuilder MIB = |
546 | BuildMI(*BB, &MI, DL, TII.get(TargetOpcode::REG_SEQUENCE), DstReg); |
547 | for (int I = 0, E = MI.getNumOperands() - 1; I != E; ++I) { |
548 | MachineOperand &Src = MI.getOperand(I + 1); |
549 | MIB.addReg(Src.getReg(), getUndefRegState(Src.isUndef())); |
550 | MIB.addImm(SubRegs[I]); |
551 | |
552 | const TargetRegisterClass *SrcRC |
553 | = TRI.getConstrainedRegClassForOperand(Src, *MRI); |
554 | if (SrcRC && !RBI.constrainGenericRegister(Src.getReg(), *SrcRC, *MRI)) |
555 | return false; |
556 | } |
557 | |
558 | if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI)) |
559 | return false; |
560 | |
561 | MI.eraseFromParent(); |
562 | return true; |
563 | } |
564 | |
565 | bool AMDGPUInstructionSelector::selectG_UNMERGE_VALUES(MachineInstr &MI) const { |
566 | MachineBasicBlock *BB = MI.getParent(); |
567 | const int NumDst = MI.getNumOperands() - 1; |
568 | |
569 | MachineOperand &Src = MI.getOperand(NumDst); |
570 | |
571 | Register SrcReg = Src.getReg(); |
572 | Register DstReg0 = MI.getOperand(0).getReg(); |
573 | LLT DstTy = MRI->getType(DstReg0); |
574 | LLT SrcTy = MRI->getType(SrcReg); |
575 | |
576 | const unsigned DstSize = DstTy.getSizeInBits(); |
577 | const unsigned SrcSize = SrcTy.getSizeInBits(); |
578 | const DebugLoc &DL = MI.getDebugLoc(); |
579 | const RegisterBank *SrcBank = RBI.getRegBank(SrcReg, *MRI, TRI); |
580 | |
581 | const TargetRegisterClass *SrcRC = |
582 | TRI.getRegClassForSizeOnBank(SrcSize, *SrcBank); |
583 | if (!SrcRC || !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI)) |
584 | return false; |
585 | |
586 | // Note we could have mixed SGPR and VGPR destination banks for an SGPR |
587 | // source, and this relies on the fact that the same subregister indices are |
588 | // used for both. |
589 | ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(SrcRC, DstSize / 8); |
590 | for (int I = 0, E = NumDst; I != E; ++I) { |
591 | MachineOperand &Dst = MI.getOperand(I); |
592 | BuildMI(*BB, &MI, DL, TII.get(TargetOpcode::COPY), Dst.getReg()) |
593 | .addReg(SrcReg, 0, SubRegs[I]); |
594 | |
595 | // Make sure the subregister index is valid for the source register. |
596 | SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubRegs[I]); |
597 | if (!SrcRC || !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI)) |
598 | return false; |
599 | |
600 | const TargetRegisterClass *DstRC = |
601 | TRI.getConstrainedRegClassForOperand(Dst, *MRI); |
602 | if (DstRC && !RBI.constrainGenericRegister(Dst.getReg(), *DstRC, *MRI)) |
603 | return false; |
604 | } |
605 | |
606 | MI.eraseFromParent(); |
607 | return true; |
608 | } |
609 | |
610 | bool AMDGPUInstructionSelector::selectG_BUILD_VECTOR(MachineInstr &MI) const { |
611 | assert(MI.getOpcode() == AMDGPU::G_BUILD_VECTOR_TRUNC ||(static_cast <bool> (MI.getOpcode() == AMDGPU::G_BUILD_VECTOR_TRUNC || MI.getOpcode() == AMDGPU::G_BUILD_VECTOR) ? void (0) : __assert_fail ("MI.getOpcode() == AMDGPU::G_BUILD_VECTOR_TRUNC || MI.getOpcode() == AMDGPU::G_BUILD_VECTOR" , "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp", 612 , __extension__ __PRETTY_FUNCTION__)) |
612 | MI.getOpcode() == AMDGPU::G_BUILD_VECTOR)(static_cast <bool> (MI.getOpcode() == AMDGPU::G_BUILD_VECTOR_TRUNC || MI.getOpcode() == AMDGPU::G_BUILD_VECTOR) ? void (0) : __assert_fail ("MI.getOpcode() == AMDGPU::G_BUILD_VECTOR_TRUNC || MI.getOpcode() == AMDGPU::G_BUILD_VECTOR" , "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp", 612 , __extension__ __PRETTY_FUNCTION__)); |
613 | |
614 | Register Src0 = MI.getOperand(1).getReg(); |
615 | Register Src1 = MI.getOperand(2).getReg(); |
616 | LLT SrcTy = MRI->getType(Src0); |
617 | const unsigned SrcSize = SrcTy.getSizeInBits(); |
618 | |
619 | // BUILD_VECTOR with >=32 bits source is handled by MERGE_VALUE. |
620 | if (MI.getOpcode() == AMDGPU::G_BUILD_VECTOR && SrcSize >= 32) { |
621 | return selectG_MERGE_VALUES(MI); |
622 | } |
623 | |
624 | // Selection logic below is for V2S16 only. |
625 | // For G_BUILD_VECTOR_TRUNC, additionally check that the operands are s32. |
626 | Register Dst = MI.getOperand(0).getReg(); |
627 | if (MRI->getType(Dst) != LLT::fixed_vector(2, 16) || |
628 | (MI.getOpcode() == AMDGPU::G_BUILD_VECTOR_TRUNC && |
629 | SrcTy != LLT::scalar(32))) |
630 | return selectImpl(MI, *CoverageInfo); |
631 | |
632 | const RegisterBank *DstBank = RBI.getRegBank(Dst, *MRI, TRI); |
633 | if (DstBank->getID() == AMDGPU::AGPRRegBankID) |
634 | return false; |
635 | |
636 | assert(DstBank->getID() == AMDGPU::SGPRRegBankID ||(static_cast <bool> (DstBank->getID() == AMDGPU::SGPRRegBankID || DstBank->getID() == AMDGPU::VGPRRegBankID) ? void (0) : __assert_fail ("DstBank->getID() == AMDGPU::SGPRRegBankID || DstBank->getID() == AMDGPU::VGPRRegBankID" , "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp", 637 , __extension__ __PRETTY_FUNCTION__)) |
637 | DstBank->getID() == AMDGPU::VGPRRegBankID)(static_cast <bool> (DstBank->getID() == AMDGPU::SGPRRegBankID || DstBank->getID() == AMDGPU::VGPRRegBankID) ? void (0) : __assert_fail ("DstBank->getID() == AMDGPU::SGPRRegBankID || DstBank->getID() == AMDGPU::VGPRRegBankID" , "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp", 637 , __extension__ __PRETTY_FUNCTION__)); |
638 | const bool IsVector = DstBank->getID() == AMDGPU::VGPRRegBankID; |
639 | |
640 | const DebugLoc &DL = MI.getDebugLoc(); |
641 | MachineBasicBlock *BB = MI.getParent(); |
642 | |
643 | // First, before trying TableGen patterns, check if both sources are |
644 | // constants. In those cases, we can trivially compute the final constant |
645 | // and emit a simple move. |
646 | auto ConstSrc1 = getAnyConstantVRegValWithLookThrough(Src1, *MRI, true, true); |
647 | if (ConstSrc1) { |
648 | auto ConstSrc0 = |
649 | getAnyConstantVRegValWithLookThrough(Src0, *MRI, true, true); |
650 | if (ConstSrc0) { |
651 | const int64_t K0 = ConstSrc0->Value.getSExtValue(); |
652 | const int64_t K1 = ConstSrc1->Value.getSExtValue(); |
653 | uint32_t Lo16 = static_cast<uint32_t>(K0) & 0xffff; |
654 | uint32_t Hi16 = static_cast<uint32_t>(K1) & 0xffff; |
655 | uint32_t Imm = Lo16 | (Hi16 << 16); |
656 | |
657 | // VALU |
658 | if (IsVector) { |
659 | BuildMI(*BB, &MI, DL, TII.get(AMDGPU::V_MOV_B32_e32), Dst).addImm(Imm); |
660 | MI.eraseFromParent(); |
661 | return RBI.constrainGenericRegister(Dst, AMDGPU::VGPR_32RegClass, *MRI); |
662 | } |
663 | |
664 | // SALU |
665 | BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), Dst).addImm(Imm); |
666 | MI.eraseFromParent(); |
667 | return RBI.constrainGenericRegister(Dst, AMDGPU::SReg_32RegClass, *MRI); |
668 | } |
669 | } |
670 | |
671 | // Now try TableGen patterns. |
672 | if (selectImpl(MI, *CoverageInfo)) |
673 | return true; |
674 | |
675 | // TODO: This should probably be a combine somewhere |
676 | // (build_vector $src0, undef) -> copy $src0 |
677 | MachineInstr *Src1Def = getDefIgnoringCopies(Src1, *MRI); |
678 | if (Src1Def->getOpcode() == AMDGPU::G_IMPLICIT_DEF) { |
679 | MI.setDesc(TII.get(AMDGPU::COPY)); |
680 | MI.removeOperand(2); |
681 | const auto &RC = |
682 | IsVector ? AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass; |
683 | return RBI.constrainGenericRegister(Dst, RC, *MRI) && |
684 | RBI.constrainGenericRegister(Src0, RC, *MRI); |
685 | } |
686 | |
687 | // TODO: Can be improved? |
688 | if (IsVector) { |
689 | Register TmpReg = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass); |
690 | auto MIB = BuildMI(*BB, MI, DL, TII.get(AMDGPU::V_AND_B32_e32), TmpReg) |
691 | .addImm(0xFFFF) |
692 | .addReg(Src0); |
693 | if (!constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI)) |
694 | return false; |
695 | |
696 | MIB = BuildMI(*BB, MI, DL, TII.get(AMDGPU::V_LSHL_OR_B32_e64), Dst) |
697 | .addReg(Src1) |
698 | .addImm(16) |
699 | .addReg(TmpReg); |
700 | if (!constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI)) |
701 | return false; |
702 | |
703 | MI.eraseFromParent(); |
704 | return true; |
705 | } |
706 | |
707 | Register ShiftSrc0; |
708 | Register ShiftSrc1; |
709 | |
710 | // With multiple uses of the shift, this will duplicate the shift and |
711 | // increase register pressure. |
712 | // |
713 | // (build_vector (lshr_oneuse $src0, 16), (lshr_oneuse $src1, 16) |
714 | // => (S_PACK_HH_B32_B16 $src0, $src1) |
715 | // (build_vector (lshr_oneuse SReg_32:$src0, 16), $src1) |
716 | // => (S_PACK_HL_B32_B16 $src0, $src1) |
717 | // (build_vector $src0, (lshr_oneuse SReg_32:$src1, 16)) |
718 | // => (S_PACK_LH_B32_B16 $src0, $src1) |
719 | // (build_vector $src0, $src1) |
720 | // => (S_PACK_LL_B32_B16 $src0, $src1) |
721 | |
722 | bool Shift0 = mi_match( |
723 | Src0, *MRI, m_OneUse(m_GLShr(m_Reg(ShiftSrc0), m_SpecificICst(16)))); |
724 | |
725 | bool Shift1 = mi_match( |
726 | Src1, *MRI, m_OneUse(m_GLShr(m_Reg(ShiftSrc1), m_SpecificICst(16)))); |
727 | |
728 | unsigned Opc = AMDGPU::S_PACK_LL_B32_B16; |
729 | if (Shift0 && Shift1) { |
730 | Opc = AMDGPU::S_PACK_HH_B32_B16; |
731 | MI.getOperand(1).setReg(ShiftSrc0); |
732 | MI.getOperand(2).setReg(ShiftSrc1); |
733 | } else if (Shift1) { |
734 | Opc = AMDGPU::S_PACK_LH_B32_B16; |
735 | MI.getOperand(2).setReg(ShiftSrc1); |
736 | } else if (Shift0) { |
737 | auto ConstSrc1 = |
738 | getAnyConstantVRegValWithLookThrough(Src1, *MRI, true, true); |
739 | if (ConstSrc1 && ConstSrc1->Value == 0) { |
740 | // build_vector_trunc (lshr $src0, 16), 0 -> s_lshr_b32 $src0, 16 |
741 | auto MIB = BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_LSHR_B32), Dst) |
742 | .addReg(ShiftSrc0) |
743 | .addImm(16); |
744 | |
745 | MI.eraseFromParent(); |
746 | return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI); |
747 | } |
748 | if (STI.hasSPackHL()) { |
749 | Opc = AMDGPU::S_PACK_HL_B32_B16; |
750 | MI.getOperand(1).setReg(ShiftSrc0); |
751 | } |
752 | } |
753 | |
754 | MI.setDesc(TII.get(Opc)); |
755 | return constrainSelectedInstRegOperands(MI, TII, TRI, RBI); |
756 | } |
757 | |
758 | bool AMDGPUInstructionSelector::selectG_PTR_ADD(MachineInstr &I) const { |
759 | return selectG_ADD_SUB(I); |
760 | } |
761 | |
762 | bool AMDGPUInstructionSelector::selectG_IMPLICIT_DEF(MachineInstr &I) const { |
763 | const MachineOperand &MO = I.getOperand(0); |
764 | |
765 | // FIXME: Interface for getConstrainedRegClassForOperand needs work. The |
766 | // regbank check here is to know why getConstrainedRegClassForOperand failed. |
767 | const TargetRegisterClass *RC = TRI.getConstrainedRegClassForOperand(MO, *MRI); |
768 | if ((!RC && !MRI->getRegBankOrNull(MO.getReg())) || |
769 | (RC && RBI.constrainGenericRegister(MO.getReg(), *RC, *MRI))) { |
770 | I.setDesc(TII.get(TargetOpcode::IMPLICIT_DEF)); |
771 | return true; |
772 | } |
773 | |
774 | return false; |
775 | } |
776 | |
777 | bool AMDGPUInstructionSelector::selectG_INSERT(MachineInstr &I) const { |
778 | MachineBasicBlock *BB = I.getParent(); |
779 | |
780 | Register DstReg = I.getOperand(0).getReg(); |
781 | Register Src0Reg = I.getOperand(1).getReg(); |
782 | Register Src1Reg = I.getOperand(2).getReg(); |
783 | LLT Src1Ty = MRI->getType(Src1Reg); |
784 | |
785 | unsigned DstSize = MRI->getType(DstReg).getSizeInBits(); |
786 | unsigned InsSize = Src1Ty.getSizeInBits(); |
787 | |
788 | int64_t Offset = I.getOperand(3).getImm(); |
789 | |
790 | // FIXME: These cases should have been illegal and unnecessary to check here. |
791 | if (Offset % 32 != 0 || InsSize % 32 != 0) |
792 | return false; |
793 | |
794 | // Currently not handled by getSubRegFromChannel. |
795 | if (InsSize > 128) |
796 | return false; |
797 | |
798 | unsigned SubReg = TRI.getSubRegFromChannel(Offset / 32, InsSize / 32); |
799 | if (SubReg == AMDGPU::NoSubRegister) |
800 | return false; |
801 | |
802 | const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI); |
803 | const TargetRegisterClass *DstRC = |
804 | TRI.getRegClassForSizeOnBank(DstSize, *DstBank); |
805 | if (!DstRC) |
806 | return false; |
807 | |
808 | const RegisterBank *Src0Bank = RBI.getRegBank(Src0Reg, *MRI, TRI); |
809 | const RegisterBank *Src1Bank = RBI.getRegBank(Src1Reg, *MRI, TRI); |
810 | const TargetRegisterClass *Src0RC = |
811 | TRI.getRegClassForSizeOnBank(DstSize, *Src0Bank); |
812 | const TargetRegisterClass *Src1RC = |
813 | TRI.getRegClassForSizeOnBank(InsSize, *Src1Bank); |
814 | |
815 | // Deal with weird cases where the class only partially supports the subreg |
816 | // index. |
817 | Src0RC = TRI.getSubClassWithSubReg(Src0RC, SubReg); |
818 | if (!Src0RC || !Src1RC) |
819 | return false; |
820 | |
821 | if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) || |
822 | !RBI.constrainGenericRegister(Src0Reg, *Src0RC, *MRI) || |
823 | !RBI.constrainGenericRegister(Src1Reg, *Src1RC, *MRI)) |
824 | return false; |
825 | |
826 | const DebugLoc &DL = I.getDebugLoc(); |
827 | BuildMI(*BB, &I, DL, TII.get(TargetOpcode::INSERT_SUBREG), DstReg) |
828 | .addReg(Src0Reg) |
829 | .addReg(Src1Reg) |
830 | .addImm(SubReg); |
831 | |
832 | I.eraseFromParent(); |
833 | return true; |
834 | } |
835 | |
836 | bool AMDGPUInstructionSelector::selectG_SBFX_UBFX(MachineInstr &MI) const { |
837 | Register DstReg = MI.getOperand(0).getReg(); |
838 | Register SrcReg = MI.getOperand(1).getReg(); |
839 | Register OffsetReg = MI.getOperand(2).getReg(); |
840 | Register WidthReg = MI.getOperand(3).getReg(); |
841 | |
842 | assert(RBI.getRegBank(DstReg, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID &&(static_cast <bool> (RBI.getRegBank(DstReg, *MRI, TRI)-> getID() == AMDGPU::VGPRRegBankID && "scalar BFX instructions are expanded in regbankselect" ) ? void (0) : __assert_fail ("RBI.getRegBank(DstReg, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID && \"scalar BFX instructions are expanded in regbankselect\"" , "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp", 843 , __extension__ __PRETTY_FUNCTION__)) |
843 | "scalar BFX instructions are expanded in regbankselect")(static_cast <bool> (RBI.getRegBank(DstReg, *MRI, TRI)-> getID() == AMDGPU::VGPRRegBankID && "scalar BFX instructions are expanded in regbankselect" ) ? void (0) : __assert_fail ("RBI.getRegBank(DstReg, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID && \"scalar BFX instructions are expanded in regbankselect\"" , "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp", 843 , __extension__ __PRETTY_FUNCTION__)); |
844 | assert(MRI->getType(MI.getOperand(0).getReg()).getSizeInBits() == 32 &&(static_cast <bool> (MRI->getType(MI.getOperand(0).getReg ()).getSizeInBits() == 32 && "64-bit vector BFX instructions are expanded in regbankselect" ) ? void (0) : __assert_fail ("MRI->getType(MI.getOperand(0).getReg()).getSizeInBits() == 32 && \"64-bit vector BFX instructions are expanded in regbankselect\"" , "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp", 845 , __extension__ __PRETTY_FUNCTION__)) |
845 | "64-bit vector BFX instructions are expanded in regbankselect")(static_cast <bool> (MRI->getType(MI.getOperand(0).getReg ()).getSizeInBits() == 32 && "64-bit vector BFX instructions are expanded in regbankselect" ) ? void (0) : __assert_fail ("MRI->getType(MI.getOperand(0).getReg()).getSizeInBits() == 32 && \"64-bit vector BFX instructions are expanded in regbankselect\"" , "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp", 845 , __extension__ __PRETTY_FUNCTION__)); |
846 | |
847 | const DebugLoc &DL = MI.getDebugLoc(); |
848 | MachineBasicBlock *MBB = MI.getParent(); |
849 | |
850 | bool IsSigned = MI.getOpcode() == TargetOpcode::G_SBFX; |
851 | unsigned Opc = IsSigned ? AMDGPU::V_BFE_I32_e64 : AMDGPU::V_BFE_U32_e64; |
852 | auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc), DstReg) |
853 | .addReg(SrcReg) |
854 | .addReg(OffsetReg) |
855 | .addReg(WidthReg); |
856 | MI.eraseFromParent(); |
857 | return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI); |
858 | } |
859 | |
860 | bool AMDGPUInstructionSelector::selectInterpP1F16(MachineInstr &MI) const { |
861 | if (STI.getLDSBankCount() != 16) |
862 | return selectImpl(MI, *CoverageInfo); |
863 | |
864 | Register Dst = MI.getOperand(0).getReg(); |
865 | Register Src0 = MI.getOperand(2).getReg(); |
866 | Register M0Val = MI.getOperand(6).getReg(); |
867 | if (!RBI.constrainGenericRegister(M0Val, AMDGPU::SReg_32RegClass, *MRI) || |
868 | !RBI.constrainGenericRegister(Dst, AMDGPU::VGPR_32RegClass, *MRI) || |
869 | !RBI.constrainGenericRegister(Src0, AMDGPU::VGPR_32RegClass, *MRI)) |
870 | return false; |
871 | |
872 | // This requires 2 instructions. It is possible to write a pattern to support |
873 | // this, but the generated isel emitter doesn't correctly deal with multiple |
874 | // output instructions using the same physical register input. The copy to m0 |
875 | // is incorrectly placed before the second instruction. |
876 | // |
877 | // TODO: Match source modifiers. |
878 | |
879 | Register InterpMov = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass); |
880 | const DebugLoc &DL = MI.getDebugLoc(); |
881 | MachineBasicBlock *MBB = MI.getParent(); |
882 | |
883 | BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0) |
884 | .addReg(M0Val); |
885 | BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_INTERP_MOV_F32), InterpMov) |
886 | .addImm(2) |
887 | .addImm(MI.getOperand(4).getImm()) // $attr |
888 | .addImm(MI.getOperand(3).getImm()); // $attrchan |
889 | |
890 | BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_INTERP_P1LV_F16), Dst) |
891 | .addImm(0) // $src0_modifiers |
892 | .addReg(Src0) // $src0 |
893 | .addImm(MI.getOperand(4).getImm()) // $attr |
894 | .addImm(MI.getOperand(3).getImm()) // $attrchan |
895 | .addImm(0) // $src2_modifiers |
896 | .addReg(InterpMov) // $src2 - 2 f16 values selected by high |
897 | .addImm(MI.getOperand(5).getImm()) // $high |
898 | .addImm(0) // $clamp |
899 | .addImm(0); // $omod |
900 | |
901 | MI.eraseFromParent(); |
902 | return true; |
903 | } |
904 | |
905 | // Writelane is special in that it can use SGPR and M0 (which would normally |
906 | // count as using the constant bus twice - but in this case it is allowed since |
907 | // the lane selector doesn't count as a use of the constant bus). However, it is |
908 | // still required to abide by the 1 SGPR rule. Fix this up if we might have |
909 | // multiple SGPRs. |
910 | bool AMDGPUInstructionSelector::selectWritelane(MachineInstr &MI) const { |
911 | // With a constant bus limit of at least 2, there's no issue. |
912 | if (STI.getConstantBusLimit(AMDGPU::V_WRITELANE_B32) > 1) |
913 | return selectImpl(MI, *CoverageInfo); |
914 | |
915 | MachineBasicBlock *MBB = MI.getParent(); |
916 | const DebugLoc &DL = MI.getDebugLoc(); |
917 | Register VDst = MI.getOperand(0).getReg(); |
918 | Register Val = MI.getOperand(2).getReg(); |
919 | Register LaneSelect = MI.getOperand(3).getReg(); |
920 | Register VDstIn = MI.getOperand(4).getReg(); |
921 | |
922 | auto MIB = BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_WRITELANE_B32), VDst); |
923 | |
924 | std::optional<ValueAndVReg> ConstSelect = |
925 | getIConstantVRegValWithLookThrough(LaneSelect, *MRI); |
926 | if (ConstSelect) { |
927 | // The selector has to be an inline immediate, so we can use whatever for |
928 | // the other operands. |
929 | MIB.addReg(Val); |
930 | MIB.addImm(ConstSelect->Value.getSExtValue() & |
931 | maskTrailingOnes<uint64_t>(STI.getWavefrontSizeLog2())); |
932 | } else { |
933 | std::optional<ValueAndVReg> ConstVal = |
934 | getIConstantVRegValWithLookThrough(Val, *MRI); |
935 | |
936 | // If the value written is an inline immediate, we can get away without a |
937 | // copy to m0. |
938 | if (ConstVal && AMDGPU::isInlinableLiteral32(ConstVal->Value.getSExtValue(), |
939 | STI.hasInv2PiInlineImm())) { |
940 | MIB.addImm(ConstVal->Value.getSExtValue()); |
941 | MIB.addReg(LaneSelect); |
942 | } else { |
943 | MIB.addReg(Val); |
944 | |
945 | // If the lane selector was originally in a VGPR and copied with |
946 | // readfirstlane, there's a hazard to read the same SGPR from the |
947 | // VALU. Constrain to a different SGPR to help avoid needing a nop later. |
948 | RBI.constrainGenericRegister(LaneSelect, AMDGPU::SReg_32_XM0RegClass, *MRI); |
949 | |
950 | BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::COPY), AMDGPU::M0) |
951 | .addReg(LaneSelect); |
952 | MIB.addReg(AMDGPU::M0); |
953 | } |
954 | } |
955 | |
956 | MIB.addReg(VDstIn); |
957 | |
958 | MI.eraseFromParent(); |
959 | return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI); |
960 | } |
961 | |
962 | // We need to handle this here because tablegen doesn't support matching |
963 | // instructions with multiple outputs. |
964 | bool AMDGPUInstructionSelector::selectDivScale(MachineInstr &MI) const { |
965 | Register Dst0 = MI.getOperand(0).getReg(); |
966 | Register Dst1 = MI.getOperand(1).getReg(); |
967 | |
968 | LLT Ty = MRI->getType(Dst0); |
969 | unsigned Opc; |
970 | if (Ty == LLT::scalar(32)) |
971 | Opc = AMDGPU::V_DIV_SCALE_F32_e64; |
972 | else if (Ty == LLT::scalar(64)) |
973 | Opc = AMDGPU::V_DIV_SCALE_F64_e64; |
974 | else |
975 | return false; |
976 | |
977 | // TODO: Match source modifiers. |
978 | |
979 | const DebugLoc &DL = MI.getDebugLoc(); |
980 | MachineBasicBlock *MBB = MI.getParent(); |
981 | |
982 | Register Numer = MI.getOperand(3).getReg(); |
983 | Register Denom = MI.getOperand(4).getReg(); |
984 | unsigned ChooseDenom = MI.getOperand(5).getImm(); |
985 | |
986 | Register Src0 = ChooseDenom != 0 ? Numer : Denom; |
987 | |
988 | auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc), Dst0) |
989 | .addDef(Dst1) |
990 | .addImm(0) // $src0_modifiers |
991 | .addUse(Src0) // $src0 |
992 | .addImm(0) // $src1_modifiers |
993 | .addUse(Denom) // $src1 |
994 | .addImm(0) // $src2_modifiers |
995 | .addUse(Numer) // $src2 |
996 | .addImm(0) // $clamp |
997 | .addImm(0); // $omod |
998 | |
999 | MI.eraseFromParent(); |
1000 | return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI); |
1001 | } |
1002 | |
1003 | bool AMDGPUInstructionSelector::selectG_INTRINSIC(MachineInstr &I) const { |
1004 | unsigned IntrinsicID = I.getIntrinsicID(); |
1005 | switch (IntrinsicID) { |
1006 | case Intrinsic::amdgcn_if_break: { |
1007 | MachineBasicBlock *BB = I.getParent(); |
1008 | |
1009 | // FIXME: Manually selecting to avoid dealing with the SReg_1 trick |
1010 | // SelectionDAG uses for wave32 vs wave64. |
1011 | BuildMI(*BB, &I, I.getDebugLoc(), TII.get(AMDGPU::SI_IF_BREAK)) |
1012 | .add(I.getOperand(0)) |
1013 | .add(I.getOperand(2)) |
1014 | .add(I.getOperand(3)); |
1015 | |
1016 | Register DstReg = I.getOperand(0).getReg(); |
1017 | Register Src0Reg = I.getOperand(2).getReg(); |
1018 | Register Src1Reg = I.getOperand(3).getReg(); |
1019 | |
1020 | I.eraseFromParent(); |
1021 | |
1022 | for (Register Reg : { DstReg, Src0Reg, Src1Reg }) |
1023 | MRI->setRegClass(Reg, TRI.getWaveMaskRegClass()); |
1024 | |
1025 | return true; |
1026 | } |
1027 | case Intrinsic::amdgcn_interp_p1_f16: |
1028 | return selectInterpP1F16(I); |
1029 | case Intrinsic::amdgcn_wqm: |
1030 | return constrainCopyLikeIntrin(I, AMDGPU::WQM); |
1031 | case Intrinsic::amdgcn_softwqm: |
1032 | return constrainCopyLikeIntrin(I, AMDGPU::SOFT_WQM); |
1033 | case Intrinsic::amdgcn_strict_wwm: |
1034 | case Intrinsic::amdgcn_wwm: |
1035 | return constrainCopyLikeIntrin(I, AMDGPU::STRICT_WWM); |
1036 | case Intrinsic::amdgcn_strict_wqm: |
1037 | return constrainCopyLikeIntrin(I, AMDGPU::STRICT_WQM); |
1038 | case Intrinsic::amdgcn_writelane: |
1039 | return selectWritelane(I); |
1040 | case Intrinsic::amdgcn_div_scale: |
1041 | return selectDivScale(I); |
1042 | case Intrinsic::amdgcn_icmp: |
1043 | case Intrinsic::amdgcn_fcmp: |
1044 | if (selectImpl(I, *CoverageInfo)) |
1045 | return true; |
1046 | return selectIntrinsicCmp(I); |
1047 | case Intrinsic::amdgcn_ballot: |
1048 | return selectBallot(I); |
1049 | case Intrinsic::amdgcn_reloc_constant: |
1050 | return selectRelocConstant(I); |
1051 | case Intrinsic::amdgcn_groupstaticsize: |
1052 | return selectGroupStaticSize(I); |
1053 | case Intrinsic::returnaddress: |
1054 | return selectReturnAddress(I); |
1055 | case Intrinsic::amdgcn_smfmac_f32_16x16x32_f16: |
1056 | case Intrinsic::amdgcn_smfmac_f32_32x32x16_f16: |
1057 | case Intrinsic::amdgcn_smfmac_f32_16x16x32_bf16: |
1058 | case Intrinsic::amdgcn_smfmac_f32_32x32x16_bf16: |
1059 | case Intrinsic::amdgcn_smfmac_i32_16x16x64_i8: |
1060 | case Intrinsic::amdgcn_smfmac_i32_32x32x32_i8: |
1061 | case Intrinsic::amdgcn_smfmac_f32_16x16x64_bf8_bf8: |
1062 | case Intrinsic::amdgcn_smfmac_f32_16x16x64_bf8_fp8: |
1063 | case Intrinsic::amdgcn_smfmac_f32_16x16x64_fp8_bf8: |
1064 | case Intrinsic::amdgcn_smfmac_f32_16x16x64_fp8_fp8: |
1065 | case Intrinsic::amdgcn_smfmac_f32_32x32x32_bf8_bf8: |
1066 | case Intrinsic::amdgcn_smfmac_f32_32x32x32_bf8_fp8: |
1067 | case Intrinsic::amdgcn_smfmac_f32_32x32x32_fp8_bf8: |
1068 | case Intrinsic::amdgcn_smfmac_f32_32x32x32_fp8_fp8: |
1069 | return selectSMFMACIntrin(I); |
1070 | default: |
1071 | return selectImpl(I, *CoverageInfo); |
1072 | } |
1073 | } |
1074 | |
1075 | static int getV_CMPOpcode(CmpInst::Predicate P, unsigned Size, |
1076 | const GCNSubtarget &ST) { |
1077 | if (Size != 16 && Size != 32 && Size != 64) |
1078 | return -1; |
1079 | |
1080 | if (Size == 16 && !ST.has16BitInsts()) |
1081 | return -1; |
1082 | |
1083 | const auto Select = [&](unsigned S16Opc, unsigned TrueS16Opc, unsigned S32Opc, |
1084 | unsigned S64Opc) { |
1085 | if (Size == 16) |
1086 | return ST.hasTrue16BitInsts() ? TrueS16Opc : S16Opc; |
1087 | if (Size == 32) |
1088 | return S32Opc; |
1089 | return S64Opc; |
1090 | }; |
1091 | |
1092 | switch (P) { |
1093 | default: |
1094 | llvm_unreachable("Unknown condition code!")::llvm::llvm_unreachable_internal("Unknown condition code!", "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp" , 1094); |
1095 | case CmpInst::ICMP_NE: |
1096 | return Select(AMDGPU::V_CMP_NE_U16_e64, AMDGPU::V_CMP_NE_U16_t16_e64, |
1097 | AMDGPU::V_CMP_NE_U32_e64, AMDGPU::V_CMP_NE_U64_e64); |
1098 | case CmpInst::ICMP_EQ: |
1099 | return Select(AMDGPU::V_CMP_EQ_U16_e64, AMDGPU::V_CMP_EQ_U16_t16_e64, |
1100 | AMDGPU::V_CMP_EQ_U32_e64, AMDGPU::V_CMP_EQ_U64_e64); |
1101 | case CmpInst::ICMP_SGT: |
1102 | return Select(AMDGPU::V_CMP_GT_I16_e64, AMDGPU::V_CMP_GT_I16_t16_e64, |
1103 | AMDGPU::V_CMP_GT_I32_e64, AMDGPU::V_CMP_GT_I64_e64); |
1104 | case CmpInst::ICMP_SGE: |
1105 | return Select(AMDGPU::V_CMP_GE_I16_e64, AMDGPU::V_CMP_GE_I16_t16_e64, |
1106 | AMDGPU::V_CMP_GE_I32_e64, AMDGPU::V_CMP_GE_I64_e64); |
1107 | case CmpInst::ICMP_SLT: |
1108 | return Select(AMDGPU::V_CMP_LT_I16_e64, AMDGPU::V_CMP_LT_I16_t16_e64, |
1109 | AMDGPU::V_CMP_LT_I32_e64, AMDGPU::V_CMP_LT_I64_e64); |
1110 | case CmpInst::ICMP_SLE: |
1111 | return Select(AMDGPU::V_CMP_LE_I16_e64, AMDGPU::V_CMP_LE_I16_t16_e64, |
1112 | AMDGPU::V_CMP_LE_I32_e64, AMDGPU::V_CMP_LE_I64_e64); |
1113 | case CmpInst::ICMP_UGT: |
1114 | return Select(AMDGPU::V_CMP_GT_U16_e64, AMDGPU::V_CMP_GT_U16_t16_e64, |
1115 | AMDGPU::V_CMP_GT_U32_e64, AMDGPU::V_CMP_GT_U64_e64); |
1116 | case CmpInst::ICMP_UGE: |
1117 | return Select(AMDGPU::V_CMP_GE_U16_e64, AMDGPU::V_CMP_GE_U16_t16_e64, |
1118 | AMDGPU::V_CMP_GE_U32_e64, AMDGPU::V_CMP_GE_U64_e64); |
1119 | case CmpInst::ICMP_ULT: |
1120 | return Select(AMDGPU::V_CMP_LT_U16_e64, AMDGPU::V_CMP_LT_U16_t16_e64, |
1121 | AMDGPU::V_CMP_LT_U32_e64, AMDGPU::V_CMP_LT_U64_e64); |
1122 | case CmpInst::ICMP_ULE: |
1123 | return Select(AMDGPU::V_CMP_LE_U16_e64, AMDGPU::V_CMP_LE_U16_t16_e64, |
1124 | AMDGPU::V_CMP_LE_U32_e64, AMDGPU::V_CMP_LE_U64_e64); |
1125 | |
1126 | case CmpInst::FCMP_OEQ: |
1127 | return Select(AMDGPU::V_CMP_EQ_F16_e64, AMDGPU::V_CMP_EQ_F16_t16_e64, |
1128 | AMDGPU::V_CMP_EQ_F32_e64, AMDGPU::V_CMP_EQ_F64_e64); |
1129 | case CmpInst::FCMP_OGT: |
1130 | return Select(AMDGPU::V_CMP_GT_F16_e64, AMDGPU::V_CMP_GT_F16_t16_e64, |
1131 | AMDGPU::V_CMP_GT_F32_e64, AMDGPU::V_CMP_GT_F64_e64); |
1132 | case CmpInst::FCMP_OGE: |
1133 | return Select(AMDGPU::V_CMP_GE_F16_e64, AMDGPU::V_CMP_GE_F16_t16_e64, |
1134 | AMDGPU::V_CMP_GE_F32_e64, AMDGPU::V_CMP_GE_F64_e64); |
1135 | case CmpInst::FCMP_OLT: |
1136 | return Select(AMDGPU::V_CMP_LT_F16_e64, AMDGPU::V_CMP_LT_F16_t16_e64, |
1137 | AMDGPU::V_CMP_LT_F32_e64, AMDGPU::V_CMP_LT_F64_e64); |
1138 | case CmpInst::FCMP_OLE: |
1139 | return Select(AMDGPU::V_CMP_LE_F16_e64, AMDGPU::V_CMP_LE_F16_t16_e64, |
1140 | AMDGPU::V_CMP_LE_F32_e64, AMDGPU::V_CMP_LE_F64_e64); |
1141 | case CmpInst::FCMP_ONE: |
1142 | return Select(AMDGPU::V_CMP_NEQ_F16_e64, AMDGPU::V_CMP_NEQ_F16_t16_e64, |
1143 | AMDGPU::V_CMP_NEQ_F32_e64, AMDGPU::V_CMP_NEQ_F64_e64); |
1144 | case CmpInst::FCMP_ORD: |
1145 | return Select(AMDGPU::V_CMP_O_F16_e64, AMDGPU::V_CMP_O_F16_t16_e64, |
1146 | AMDGPU::V_CMP_O_F32_e64, AMDGPU::V_CMP_O_F64_e64); |
1147 | case CmpInst::FCMP_UNO: |
1148 | return Select(AMDGPU::V_CMP_U_F16_e64, AMDGPU::V_CMP_U_F16_t16_e64, |
1149 | AMDGPU::V_CMP_U_F32_e64, AMDGPU::V_CMP_U_F64_e64); |
1150 | case CmpInst::FCMP_UEQ: |
1151 | return Select(AMDGPU::V_CMP_NLG_F16_e64, AMDGPU::V_CMP_NLG_F16_t16_e64, |
1152 | AMDGPU::V_CMP_NLG_F32_e64, AMDGPU::V_CMP_NLG_F64_e64); |
1153 | case CmpInst::FCMP_UGT: |
1154 | return Select(AMDGPU::V_CMP_NLE_F16_e64, AMDGPU::V_CMP_NLE_F16_t16_e64, |
1155 | AMDGPU::V_CMP_NLE_F32_e64, AMDGPU::V_CMP_NLE_F64_e64); |
1156 | case CmpInst::FCMP_UGE: |
1157 | return Select(AMDGPU::V_CMP_NLT_F16_e64, AMDGPU::V_CMP_NLT_F16_t16_e64, |
1158 | AMDGPU::V_CMP_NLT_F32_e64, AMDGPU::V_CMP_NLT_F64_e64); |
1159 | case CmpInst::FCMP_ULT: |
1160 | return Select(AMDGPU::V_CMP_NGE_F16_e64, AMDGPU::V_CMP_NGE_F16_t16_e64, |
1161 | AMDGPU::V_CMP_NGE_F32_e64, AMDGPU::V_CMP_NGE_F64_e64); |
1162 | case CmpInst::FCMP_ULE: |
1163 | return Select(AMDGPU::V_CMP_NGT_F16_e64, AMDGPU::V_CMP_NGT_F16_t16_e64, |
1164 | AMDGPU::V_CMP_NGT_F32_e64, AMDGPU::V_CMP_NGT_F64_e64); |
1165 | case CmpInst::FCMP_UNE: |
1166 | return Select(AMDGPU::V_CMP_NEQ_F16_e64, AMDGPU::V_CMP_NEQ_F16_t16_e64, |
1167 | AMDGPU::V_CMP_NEQ_F32_e64, AMDGPU::V_CMP_NEQ_F64_e64); |
1168 | case CmpInst::FCMP_TRUE: |
1169 | return Select(AMDGPU::V_CMP_TRU_F16_e64, AMDGPU::V_CMP_TRU_F16_t16_e64, |
1170 | AMDGPU::V_CMP_TRU_F32_e64, AMDGPU::V_CMP_TRU_F64_e64); |
1171 | case CmpInst::FCMP_FALSE: |
1172 | return Select(AMDGPU::V_CMP_F_F16_e64, AMDGPU::V_CMP_F_F16_t16_e64, |
1173 | AMDGPU::V_CMP_F_F32_e64, AMDGPU::V_CMP_F_F64_e64); |
1174 | } |
1175 | } |
1176 | |
1177 | int AMDGPUInstructionSelector::getS_CMPOpcode(CmpInst::Predicate P, |
1178 | unsigned Size) const { |
1179 | if (Size == 64) { |
1180 | if (!STI.hasScalarCompareEq64()) |
1181 | return -1; |
1182 | |
1183 | switch (P) { |
1184 | case CmpInst::ICMP_NE: |
1185 | return AMDGPU::S_CMP_LG_U64; |
1186 | case CmpInst::ICMP_EQ: |
1187 | return AMDGPU::S_CMP_EQ_U64; |
1188 | default: |
1189 | return -1; |
1190 | } |
1191 | } |
1192 | |
1193 | if (Size != 32) |
1194 | return -1; |
1195 | |
1196 | switch (P) { |
1197 | case CmpInst::ICMP_NE: |
1198 | return AMDGPU::S_CMP_LG_U32; |
1199 | case CmpInst::ICMP_EQ: |
1200 | return AMDGPU::S_CMP_EQ_U32; |
1201 | case CmpInst::ICMP_SGT: |
1202 | return AMDGPU::S_CMP_GT_I32; |
1203 | case CmpInst::ICMP_SGE: |
1204 | return AMDGPU::S_CMP_GE_I32; |
1205 | case CmpInst::ICMP_SLT: |
1206 | return AMDGPU::S_CMP_LT_I32; |
1207 | case CmpInst::ICMP_SLE: |
1208 | return AMDGPU::S_CMP_LE_I32; |
1209 | case CmpInst::ICMP_UGT: |
1210 | return AMDGPU::S_CMP_GT_U32; |
1211 | case CmpInst::ICMP_UGE: |
1212 | return AMDGPU::S_CMP_GE_U32; |
1213 | case CmpInst::ICMP_ULT: |
1214 | return AMDGPU::S_CMP_LT_U32; |
1215 | case CmpInst::ICMP_ULE: |
1216 | return AMDGPU::S_CMP_LE_U32; |
1217 | default: |
1218 | llvm_unreachable("Unknown condition code!")::llvm::llvm_unreachable_internal("Unknown condition code!", "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp" , 1218); |
1219 | } |
1220 | } |
1221 | |
1222 | bool AMDGPUInstructionSelector::selectG_ICMP(MachineInstr &I) const { |
1223 | MachineBasicBlock *BB = I.getParent(); |
1224 | const DebugLoc &DL = I.getDebugLoc(); |
1225 | |
1226 | Register SrcReg = I.getOperand(2).getReg(); |
1227 | unsigned Size = RBI.getSizeInBits(SrcReg, *MRI, TRI); |
1228 | |
1229 | auto Pred = (CmpInst::Predicate)I.getOperand(1).getPredicate(); |
1230 | |
1231 | Register CCReg = I.getOperand(0).getReg(); |
1232 | if (!isVCC(CCReg, *MRI)) { |
1233 | int Opcode = getS_CMPOpcode(Pred, Size); |
1234 | if (Opcode == -1) |
1235 | return false; |
1236 | MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode)) |
1237 | .add(I.getOperand(2)) |
1238 | .add(I.getOperand(3)); |
1239 | BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), CCReg) |
1240 | .addReg(AMDGPU::SCC); |
1241 | bool Ret = |
1242 | constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI) && |
1243 | RBI.constrainGenericRegister(CCReg, AMDGPU::SReg_32RegClass, *MRI); |
1244 | I.eraseFromParent(); |
1245 | return Ret; |
1246 | } |
1247 | |
1248 | int Opcode = getV_CMPOpcode(Pred, Size, *Subtarget); |
1249 | if (Opcode == -1) |
1250 | return false; |
1251 | |
1252 | MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode), |
1253 | I.getOperand(0).getReg()) |
1254 | .add(I.getOperand(2)) |
1255 | .add(I.getOperand(3)); |
1256 | RBI.constrainGenericRegister(ICmp->getOperand(0).getReg(), |
1257 | *TRI.getBoolRC(), *MRI); |
1258 | bool Ret = constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI); |
1259 | I.eraseFromParent(); |
1260 | return Ret; |
1261 | } |
1262 | |
1263 | bool AMDGPUInstructionSelector::selectIntrinsicCmp(MachineInstr &I) const { |
1264 | Register Dst = I.getOperand(0).getReg(); |
1265 | if (isVCC(Dst, *MRI)) |
1266 | return false; |
1267 | |
1268 | LLT DstTy = MRI->getType(Dst); |
1269 | if (DstTy.getSizeInBits() != STI.getWavefrontSize()) |
1270 | return false; |
1271 | |
1272 | MachineBasicBlock *BB = I.getParent(); |
1273 | const DebugLoc &DL = I.getDebugLoc(); |
1274 | Register SrcReg = I.getOperand(2).getReg(); |
1275 | unsigned Size = RBI.getSizeInBits(SrcReg, *MRI, TRI); |
1276 | |
1277 | // i1 inputs are not supported in GlobalISel. |
1278 | if (Size == 1) |
1279 | return false; |
1280 | |
1281 | auto Pred = static_cast<CmpInst::Predicate>(I.getOperand(4).getImm()); |
1282 | if (!CmpInst::isIntPredicate(Pred) && !CmpInst::isFPPredicate(Pred)) { |
1283 | BuildMI(*BB, &I, DL, TII.get(AMDGPU::IMPLICIT_DEF), Dst); |
1284 | I.eraseFromParent(); |
1285 | return RBI.constrainGenericRegister(Dst, *TRI.getBoolRC(), *MRI); |
1286 | } |
1287 | |
1288 | const int Opcode = getV_CMPOpcode(Pred, Size, *Subtarget); |
1289 | if (Opcode == -1) |
1290 | return false; |
1291 | |
1292 | MachineInstr *SelectedMI; |
1293 | if (CmpInst::isFPPredicate(Pred)) { |
1294 | MachineOperand &LHS = I.getOperand(2); |
1295 | MachineOperand &RHS = I.getOperand(3); |
1296 | auto [Src0, Src0Mods] = selectVOP3ModsImpl(LHS); |
1297 | auto [Src1, Src1Mods] = selectVOP3ModsImpl(RHS); |
1298 | Register Src0Reg = |
1299 | copyToVGPRIfSrcFolded(Src0, Src0Mods, LHS, &I, /*ForceVGPR*/ true); |
1300 | Register Src1Reg = |
1301 | copyToVGPRIfSrcFolded(Src1, Src1Mods, RHS, &I, /*ForceVGPR*/ true); |
1302 | SelectedMI = BuildMI(*BB, &I, DL, TII.get(Opcode), Dst) |
1303 | .addImm(Src0Mods) |
1304 | .addReg(Src0Reg) |
1305 | .addImm(Src1Mods) |
1306 | .addReg(Src1Reg) |
1307 | .addImm(0); // clamp |
1308 | } else { |
1309 | SelectedMI = BuildMI(*BB, &I, DL, TII.get(Opcode), Dst) |
1310 | .add(I.getOperand(2)) |
1311 | .add(I.getOperand(3)); |
1312 | } |
1313 | |
1314 | RBI.constrainGenericRegister(Dst, *TRI.getBoolRC(), *MRI); |
1315 | if (!constrainSelectedInstRegOperands(*SelectedMI, TII, TRI, RBI)) |
1316 | return false; |
1317 | |
1318 | I.eraseFromParent(); |
1319 | return true; |
1320 | } |
1321 | |
1322 | bool AMDGPUInstructionSelector::selectBallot(MachineInstr &I) const { |
1323 | MachineBasicBlock *BB = I.getParent(); |
1324 | const DebugLoc &DL = I.getDebugLoc(); |
1325 | Register DstReg = I.getOperand(0).getReg(); |
1326 | const unsigned Size = MRI->getType(DstReg).getSizeInBits(); |
1327 | const bool Is64 = Size == 64; |
1328 | |
1329 | if (Size != STI.getWavefrontSize()) |
1330 | return false; |
1331 | |
1332 | std::optional<ValueAndVReg> Arg = |
1333 | getIConstantVRegValWithLookThrough(I.getOperand(2).getReg(), *MRI); |
1334 | |
1335 | if (Arg) { |
1336 | const int64_t Value = Arg->Value.getSExtValue(); |
1337 | if (Value == 0) { |
1338 | unsigned Opcode = Is64 ? AMDGPU::S_MOV_B64 : AMDGPU::S_MOV_B32; |
1339 | BuildMI(*BB, &I, DL, TII.get(Opcode), DstReg).addImm(0); |
1340 | } else if (Value == -1) { // all ones |
1341 | Register SrcReg = Is64 ? AMDGPU::EXEC : AMDGPU::EXEC_LO; |
1342 | BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), DstReg).addReg(SrcReg); |
1343 | } else |
1344 | return false; |
1345 | } else { |
1346 | Register SrcReg = I.getOperand(2).getReg(); |
1347 | BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), DstReg).addReg(SrcReg); |
1348 | } |
1349 | |
1350 | I.eraseFromParent(); |
1351 | return true; |
1352 | } |
1353 | |
1354 | bool AMDGPUInstructionSelector::selectRelocConstant(MachineInstr &I) const { |
1355 | Register DstReg = I.getOperand(0).getReg(); |
1356 | const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI); |
1357 | const TargetRegisterClass *DstRC = TRI.getRegClassForSizeOnBank(32, *DstBank); |
1358 | if (!DstRC || !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI)) |
1359 | return false; |
1360 | |
1361 | const bool IsVALU = DstBank->getID() == AMDGPU::VGPRRegBankID; |
1362 | |
1363 | Module *M = MF->getFunction().getParent(); |
1364 | const MDNode *Metadata = I.getOperand(2).getMetadata(); |
1365 | auto SymbolName = cast<MDString>(Metadata->getOperand(0))->getString(); |
1366 | auto RelocSymbol = cast<GlobalVariable>( |
1367 | M->getOrInsertGlobal(SymbolName, Type::getInt32Ty(M->getContext()))); |
1368 | |
1369 | MachineBasicBlock *BB = I.getParent(); |
1370 | BuildMI(*BB, &I, I.getDebugLoc(), |
1371 | TII.get(IsVALU ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32), DstReg) |
1372 | .addGlobalAddress(RelocSymbol, 0, SIInstrInfo::MO_ABS32_LO); |
1373 | |
1374 | I.eraseFromParent(); |
1375 | return true; |
1376 | } |
1377 | |
1378 | bool AMDGPUInstructionSelector::selectGroupStaticSize(MachineInstr &I) const { |
1379 | Triple::OSType OS = MF->getTarget().getTargetTriple().getOS(); |
1380 | |
1381 | Register DstReg = I.getOperand(0).getReg(); |
1382 | const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI); |
1383 | unsigned Mov = DstRB->getID() == AMDGPU::SGPRRegBankID ? |
1384 | AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32; |
1385 | |
1386 | MachineBasicBlock *MBB = I.getParent(); |
1387 | const DebugLoc &DL = I.getDebugLoc(); |
1388 | |
1389 | auto MIB = BuildMI(*MBB, &I, DL, TII.get(Mov), DstReg); |
1390 | |
1391 | if (OS == Triple::AMDHSA || OS == Triple::AMDPAL) { |
1392 | const SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>(); |
1393 | MIB.addImm(MFI->getLDSSize()); |
1394 | } else { |
1395 | Module *M = MF->getFunction().getParent(); |
1396 | const GlobalValue *GV |
1397 | = Intrinsic::getDeclaration(M, Intrinsic::amdgcn_groupstaticsize); |
1398 | MIB.addGlobalAddress(GV, 0, SIInstrInfo::MO_ABS32_LO); |
1399 | } |
1400 | |
1401 | I.eraseFromParent(); |
1402 | return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI); |
1403 | } |
1404 | |
1405 | bool AMDGPUInstructionSelector::selectReturnAddress(MachineInstr &I) const { |
1406 | MachineBasicBlock *MBB = I.getParent(); |
1407 | MachineFunction &MF = *MBB->getParent(); |
1408 | const DebugLoc &DL = I.getDebugLoc(); |
1409 | |
1410 | MachineOperand &Dst = I.getOperand(0); |
1411 | Register DstReg = Dst.getReg(); |
1412 | unsigned Depth = I.getOperand(2).getImm(); |
1413 | |
1414 | const TargetRegisterClass *RC |
1415 | = TRI.getConstrainedRegClassForOperand(Dst, *MRI); |
1416 | if (!RC->hasSubClassEq(&AMDGPU::SGPR_64RegClass) || |
1417 | !RBI.constrainGenericRegister(DstReg, *RC, *MRI)) |
1418 | return false; |
1419 | |
1420 | // Check for kernel and shader functions |
1421 | if (Depth != 0 || |
1422 | MF.getInfo<SIMachineFunctionInfo>()->isEntryFunction()) { |
1423 | BuildMI(*MBB, &I, DL, TII.get(AMDGPU::S_MOV_B64), DstReg) |
1424 | .addImm(0); |
1425 | I.eraseFromParent(); |
1426 | return true; |
1427 | } |
1428 | |
1429 | MachineFrameInfo &MFI = MF.getFrameInfo(); |
1430 | // There is a call to @llvm.returnaddress in this function |
1431 | MFI.setReturnAddressIsTaken(true); |
1432 | |
1433 | // Get the return address reg and mark it as an implicit live-in |
1434 | Register ReturnAddrReg = TRI.getReturnAddressReg(MF); |
1435 | Register LiveIn = getFunctionLiveInPhysReg(MF, TII, ReturnAddrReg, |
1436 | AMDGPU::SReg_64RegClass, DL); |
1437 | BuildMI(*MBB, &I, DL, TII.get(AMDGPU::COPY), DstReg) |
1438 | .addReg(LiveIn); |
1439 | I.eraseFromParent(); |
1440 | return true; |
1441 | } |
1442 | |
1443 | bool AMDGPUInstructionSelector::selectEndCfIntrinsic(MachineInstr &MI) const { |
1444 | // FIXME: Manually selecting to avoid dealing with the SReg_1 trick |
1445 | // SelectionDAG uses for wave32 vs wave64. |
1446 | MachineBasicBlock *BB = MI.getParent(); |
1447 | BuildMI(*BB, &MI, MI.getDebugLoc(), TII.get(AMDGPU::SI_END_CF)) |
1448 | .add(MI.getOperand(1)); |
1449 | |
1450 | Register Reg = MI.getOperand(1).getReg(); |
1451 | MI.eraseFromParent(); |
1452 | |
1453 | if (!MRI->getRegClassOrNull(Reg)) |
1454 | MRI->setRegClass(Reg, TRI.getWaveMaskRegClass()); |
1455 | return true; |
1456 | } |
1457 | |
1458 | bool AMDGPUInstructionSelector::selectDSOrderedIntrinsic( |
1459 | MachineInstr &MI, Intrinsic::ID IntrID) const { |
1460 | MachineBasicBlock *MBB = MI.getParent(); |
1461 | MachineFunction *MF = MBB->getParent(); |
1462 | const DebugLoc &DL = MI.getDebugLoc(); |
1463 | |
1464 | unsigned IndexOperand = MI.getOperand(7).getImm(); |
1465 | bool WaveRelease = MI.getOperand(8).getImm() != 0; |
1466 | bool WaveDone = MI.getOperand(9).getImm() != 0; |
1467 | |
1468 | if (WaveDone && !WaveRelease) |
1469 | report_fatal_error("ds_ordered_count: wave_done requires wave_release"); |
1470 | |
1471 | unsigned OrderedCountIndex = IndexOperand & 0x3f; |
1472 | IndexOperand &= ~0x3f; |
1473 | unsigned CountDw = 0; |
1474 | |
1475 | if (STI.getGeneration() >= AMDGPUSubtarget::GFX10) { |
1476 | CountDw = (IndexOperand >> 24) & 0xf; |
1477 | IndexOperand &= ~(0xf << 24); |
1478 | |
1479 | if (CountDw < 1 || CountDw > 4) { |
1480 | report_fatal_error( |
1481 | "ds_ordered_count: dword count must be between 1 and 4"); |
1482 | } |
1483 | } |
1484 | |
1485 | if (IndexOperand) |
1486 | report_fatal_error("ds_ordered_count: bad index operand"); |
1487 | |
1488 | unsigned Instruction = IntrID == Intrinsic::amdgcn_ds_ordered_add ? 0 : 1; |
1489 | unsigned ShaderType = SIInstrInfo::getDSShaderTypeValue(*MF); |
1490 | |
1491 | unsigned Offset0 = OrderedCountIndex << 2; |
1492 | unsigned Offset1 = WaveRelease | (WaveDone << 1) | (Instruction << 4); |
1493 | |
1494 | if (STI.getGeneration() >= AMDGPUSubtarget::GFX10) |
1495 | Offset1 |= (CountDw - 1) << 6; |
1496 | |
1497 | if (STI.getGeneration() < AMDGPUSubtarget::GFX11) |
1498 | Offset1 |= ShaderType << 2; |
1499 | |
1500 | unsigned Offset = Offset0 | (Offset1 << 8); |
1501 | |
1502 | Register M0Val = MI.getOperand(2).getReg(); |
1503 | BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0) |
1504 | .addReg(M0Val); |
1505 | |
1506 | Register DstReg = MI.getOperand(0).getReg(); |
1507 | Register ValReg = MI.getOperand(3).getReg(); |
1508 | MachineInstrBuilder DS = |
1509 | BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::DS_ORDERED_COUNT), DstReg) |
1510 | .addReg(ValReg) |
1511 | .addImm(Offset) |
1512 | .cloneMemRefs(MI); |
1513 | |
1514 | if (!RBI.constrainGenericRegister(M0Val, AMDGPU::SReg_32RegClass, *MRI)) |
1515 | return false; |
1516 | |
1517 | bool Ret = constrainSelectedInstRegOperands(*DS, TII, TRI, RBI); |
1518 | MI.eraseFromParent(); |
1519 | return Ret; |
1520 | } |
1521 | |
1522 | static unsigned gwsIntrinToOpcode(unsigned IntrID) { |
1523 | switch (IntrID) { |
1524 | case Intrinsic::amdgcn_ds_gws_init: |
1525 | return AMDGPU::DS_GWS_INIT; |
1526 | case Intrinsic::amdgcn_ds_gws_barrier: |
1527 | return AMDGPU::DS_GWS_BARRIER; |
1528 | case Intrinsic::amdgcn_ds_gws_sema_v: |
1529 | return AMDGPU::DS_GWS_SEMA_V; |
1530 | case Intrinsic::amdgcn_ds_gws_sema_br: |
1531 | return AMDGPU::DS_GWS_SEMA_BR; |
1532 | case Intrinsic::amdgcn_ds_gws_sema_p: |
1533 | return AMDGPU::DS_GWS_SEMA_P; |
1534 | case Intrinsic::amdgcn_ds_gws_sema_release_all: |
1535 | return AMDGPU::DS_GWS_SEMA_RELEASE_ALL; |
1536 | default: |
1537 | llvm_unreachable("not a gws intrinsic")::llvm::llvm_unreachable_internal("not a gws intrinsic", "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp" , 1537); |
1538 | } |
1539 | } |
1540 | |
1541 | bool AMDGPUInstructionSelector::selectDSGWSIntrinsic(MachineInstr &MI, |
1542 | Intrinsic::ID IID) const { |
1543 | if (IID == Intrinsic::amdgcn_ds_gws_sema_release_all && |
1544 | !STI.hasGWSSemaReleaseAll()) |
1545 | return false; |
1546 | |
1547 | // intrinsic ID, vsrc, offset |
1548 | const bool HasVSrc = MI.getNumOperands() == 3; |
1549 | assert(HasVSrc || MI.getNumOperands() == 2)(static_cast <bool> (HasVSrc || MI.getNumOperands() == 2 ) ? void (0) : __assert_fail ("HasVSrc || MI.getNumOperands() == 2" , "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp", 1549 , __extension__ __PRETTY_FUNCTION__)); |
1550 | |
1551 | Register BaseOffset = MI.getOperand(HasVSrc ? 2 : 1).getReg(); |
1552 | const RegisterBank *OffsetRB = RBI.getRegBank(BaseOffset, *MRI, TRI); |
1553 | if (OffsetRB->getID() != AMDGPU::SGPRRegBankID) |
1554 | return false; |
1555 | |
1556 | MachineInstr *OffsetDef = getDefIgnoringCopies(BaseOffset, *MRI); |
1557 | unsigned ImmOffset; |
1558 | |
1559 | MachineBasicBlock *MBB = MI.getParent(); |
1560 | const DebugLoc &DL = MI.getDebugLoc(); |
1561 | |
1562 | MachineInstr *Readfirstlane = nullptr; |
1563 | |
1564 | // If we legalized the VGPR input, strip out the readfirstlane to analyze the |
1565 | // incoming offset, in case there's an add of a constant. We'll have to put it |
1566 | // back later. |
1567 | if (OffsetDef->getOpcode() == AMDGPU::V_READFIRSTLANE_B32) { |
1568 | Readfirstlane = OffsetDef; |
1569 | BaseOffset = OffsetDef->getOperand(1).getReg(); |
1570 | OffsetDef = getDefIgnoringCopies(BaseOffset, *MRI); |
1571 | } |
1572 | |
1573 | if (OffsetDef->getOpcode() == AMDGPU::G_CONSTANT) { |
1574 | // If we have a constant offset, try to use the 0 in m0 as the base. |
1575 | // TODO: Look into changing the default m0 initialization value. If the |
1576 | // default -1 only set the low 16-bits, we could leave it as-is and add 1 to |
1577 | // the immediate offset. |
1578 | |
1579 | ImmOffset = OffsetDef->getOperand(1).getCImm()->getZExtValue(); |
1580 | BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), AMDGPU::M0) |
1581 | .addImm(0); |
1582 | } else { |
1583 | std::tie(BaseOffset, ImmOffset) = |
1584 | AMDGPU::getBaseWithConstantOffset(*MRI, BaseOffset, KnownBits); |
1585 | |
1586 | if (Readfirstlane) { |
1587 | // We have the constant offset now, so put the readfirstlane back on the |
1588 | // variable component. |
1589 | if (!RBI.constrainGenericRegister(BaseOffset, AMDGPU::VGPR_32RegClass, *MRI)) |
1590 | return false; |
1591 | |
1592 | Readfirstlane->getOperand(1).setReg(BaseOffset); |
1593 | BaseOffset = Readfirstlane->getOperand(0).getReg(); |
1594 | } else { |
1595 | if (!RBI.constrainGenericRegister(BaseOffset, |
1596 | AMDGPU::SReg_32RegClass, *MRI)) |
1597 | return false; |
1598 | } |
1599 | |
1600 | Register M0Base = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass); |
1601 | BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::S_LSHL_B32), M0Base) |
1602 | .addReg(BaseOffset) |
1603 | .addImm(16); |
1604 | |
1605 | BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0) |
1606 | .addReg(M0Base); |
1607 | } |
1608 | |
1609 | // The resource id offset is computed as (<isa opaque base> + M0[21:16] + |
1610 | // offset field) % 64. Some versions of the programming guide omit the m0 |
1611 | // part, or claim it's from offset 0. |
1612 | auto MIB = BuildMI(*MBB, &MI, DL, TII.get(gwsIntrinToOpcode(IID))); |
1613 | |
1614 | if (HasVSrc) { |
1615 | Register VSrc = MI.getOperand(1).getReg(); |
1616 | MIB.addReg(VSrc); |
1617 | |
1618 | if (!RBI.constrainGenericRegister(VSrc, AMDGPU::VGPR_32RegClass, *MRI)) |
1619 | return false; |
1620 | } |
1621 | |
1622 | MIB.addImm(ImmOffset) |
1623 | .cloneMemRefs(MI); |
1624 | |
1625 | TII.enforceOperandRCAlignment(*MIB, AMDGPU::OpName::data0); |
1626 | |
1627 | MI.eraseFromParent(); |
1628 | return true; |
1629 | } |
1630 | |
1631 | bool AMDGPUInstructionSelector::selectDSAppendConsume(MachineInstr &MI, |
1632 | bool IsAppend) const { |
1633 | Register PtrBase = MI.getOperand(2).getReg(); |
1634 | LLT PtrTy = MRI->getType(PtrBase); |
1635 | bool IsGDS = PtrTy.getAddressSpace() == AMDGPUAS::REGION_ADDRESS; |
1636 | |
1637 | unsigned Offset; |
1638 | std::tie(PtrBase, Offset) = selectDS1Addr1OffsetImpl(MI.getOperand(2)); |
1639 | |
1640 | // TODO: Should this try to look through readfirstlane like GWS? |
1641 | if (!isDSOffsetLegal(PtrBase, Offset)) { |
1642 | PtrBase = MI.getOperand(2).getReg(); |
1643 | Offset = 0; |
1644 | } |
1645 | |
1646 | MachineBasicBlock *MBB = MI.getParent(); |
1647 | const DebugLoc &DL = MI.getDebugLoc(); |
1648 | const unsigned Opc = IsAppend ? AMDGPU::DS_APPEND : AMDGPU::DS_CONSUME; |
1649 | |
1650 | BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0) |
1651 | .addReg(PtrBase); |
1652 | if (!RBI.constrainGenericRegister(PtrBase, AMDGPU::SReg_32RegClass, *MRI)) |
1653 | return false; |
1654 | |
1655 | auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc), MI.getOperand(0).getReg()) |
1656 | .addImm(Offset) |
1657 | .addImm(IsGDS ? -1 : 0) |
1658 | .cloneMemRefs(MI); |
1659 | MI.eraseFromParent(); |
1660 | return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI); |
1661 | } |
1662 | |
1663 | bool AMDGPUInstructionSelector::selectSBarrier(MachineInstr &MI) const { |
1664 | if (TM.getOptLevel() > CodeGenOpt::None) { |
1665 | unsigned WGSize = STI.getFlatWorkGroupSizes(MF->getFunction()).second; |
1666 | if (WGSize <= STI.getWavefrontSize()) { |
1667 | MachineBasicBlock *MBB = MI.getParent(); |
1668 | const DebugLoc &DL = MI.getDebugLoc(); |
1669 | BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::WAVE_BARRIER)); |
1670 | MI.eraseFromParent(); |
1671 | return true; |
1672 | } |
1673 | } |
1674 | return selectImpl(MI, *CoverageInfo); |
1675 | } |
1676 | |
1677 | static bool parseTexFail(uint64_t TexFailCtrl, bool &TFE, bool &LWE, |
1678 | bool &IsTexFail) { |
1679 | if (TexFailCtrl) |
1680 | IsTexFail = true; |
1681 | |
1682 | TFE = (TexFailCtrl & 0x1) ? true : false; |
1683 | TexFailCtrl &= ~(uint64_t)0x1; |
1684 | LWE = (TexFailCtrl & 0x2) ? true : false; |
1685 | TexFailCtrl &= ~(uint64_t)0x2; |
1686 | |
1687 | return TexFailCtrl == 0; |
1688 | } |
1689 | |
1690 | bool AMDGPUInstructionSelector::selectImageIntrinsic( |
1691 | MachineInstr &MI, const AMDGPU::ImageDimIntrinsicInfo *Intr) const { |
1692 | MachineBasicBlock *MBB = MI.getParent(); |
1693 | const DebugLoc &DL = MI.getDebugLoc(); |
1694 | |
1695 | const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode = |
1696 | AMDGPU::getMIMGBaseOpcodeInfo(Intr->BaseOpcode); |
1697 | |
1698 | const AMDGPU::MIMGDimInfo *DimInfo = AMDGPU::getMIMGDimInfo(Intr->Dim); |
1699 | unsigned IntrOpcode = Intr->BaseOpcode; |
1700 | const bool IsGFX10Plus = AMDGPU::isGFX10Plus(STI); |
1701 | const bool IsGFX11Plus = AMDGPU::isGFX11Plus(STI); |
1702 | |
1703 | const unsigned ArgOffset = MI.getNumExplicitDefs() + 1; |
1704 | |
1705 | Register VDataIn, VDataOut; |
1706 | LLT VDataTy; |
1707 | int NumVDataDwords = -1; |
1708 | bool IsD16 = MI.getOpcode() == AMDGPU::G_AMDGPU_INTRIN_IMAGE_LOAD_D16 || |
1709 | MI.getOpcode() == AMDGPU::G_AMDGPU_INTRIN_IMAGE_STORE_D16; |
1710 | |
1711 | bool Unorm; |
1712 | if (!BaseOpcode->Sampler) |
1713 | Unorm = true; |
1714 | else |
1715 | Unorm = MI.getOperand(ArgOffset + Intr->UnormIndex).getImm() != 0; |
1716 | |
1717 | bool TFE; |
1718 | bool LWE; |
1719 | bool IsTexFail = false; |
1720 | if (!parseTexFail(MI.getOperand(ArgOffset + Intr->TexFailCtrlIndex).getImm(), |
1721 | TFE, LWE, IsTexFail)) |
1722 | return false; |
1723 | |
1724 | const int Flags = MI.getOperand(ArgOffset + Intr->NumArgs).getImm(); |
1725 | const bool IsA16 = (Flags & 1) != 0; |
1726 | const bool IsG16 = (Flags & 2) != 0; |
1727 | |
1728 | // A16 implies 16 bit gradients if subtarget doesn't support G16 |
1729 | if (IsA16 && !STI.hasG16() && !IsG16) |
1730 | return false; |
1731 | |
1732 | unsigned DMask = 0; |
1733 | unsigned DMaskLanes = 0; |
1734 | |
1735 | if (BaseOpcode->Atomic) { |
1736 | VDataOut = MI.getOperand(0).getReg(); |
1737 | VDataIn = MI.getOperand(2).getReg(); |
1738 | LLT Ty = MRI->getType(VDataIn); |
1739 | |
1740 | // Be careful to allow atomic swap on 16-bit element vectors. |
1741 | const bool Is64Bit = BaseOpcode->AtomicX2 ? |
1742 | Ty.getSizeInBits() == 128 : |
1743 | Ty.getSizeInBits() == 64; |
1744 | |
1745 | if (BaseOpcode->AtomicX2) { |
1746 | assert(MI.getOperand(3).getReg() == AMDGPU::NoRegister)(static_cast <bool> (MI.getOperand(3).getReg() == AMDGPU ::NoRegister) ? void (0) : __assert_fail ("MI.getOperand(3).getReg() == AMDGPU::NoRegister" , "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp", 1746 , __extension__ __PRETTY_FUNCTION__)); |
1747 | |
1748 | DMask = Is64Bit ? 0xf : 0x3; |
1749 | NumVDataDwords = Is64Bit ? 4 : 2; |
1750 | } else { |
1751 | DMask = Is64Bit ? 0x3 : 0x1; |
1752 | NumVDataDwords = Is64Bit ? 2 : 1; |
1753 | } |
1754 | } else { |
1755 | DMask = MI.getOperand(ArgOffset + Intr->DMaskIndex).getImm(); |
1756 | DMaskLanes = BaseOpcode->Gather4 ? 4 : llvm::popcount(DMask); |
1757 | |
1758 | if (BaseOpcode->Store) { |
1759 | VDataIn = MI.getOperand(1).getReg(); |
1760 | VDataTy = MRI->getType(VDataIn); |
1761 | NumVDataDwords = (VDataTy.getSizeInBits() + 31) / 32; |
1762 | } else { |
1763 | VDataOut = MI.getOperand(0).getReg(); |
1764 | VDataTy = MRI->getType(VDataOut); |
1765 | NumVDataDwords = DMaskLanes; |
1766 | |
1767 | if (IsD16 && !STI.hasUnpackedD16VMem()) |
1768 | NumVDataDwords = (DMaskLanes + 1) / 2; |
1769 | } |
1770 | } |
1771 | |
1772 | // Set G16 opcode |
1773 | if (Subtarget->hasG16() && IsG16) { |
1774 | const AMDGPU::MIMGG16MappingInfo *G16MappingInfo = |
1775 | AMDGPU::getMIMGG16MappingInfo(Intr->BaseOpcode); |
1776 | assert(G16MappingInfo)(static_cast <bool> (G16MappingInfo) ? void (0) : __assert_fail ("G16MappingInfo", "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp" , 1776, __extension__ __PRETTY_FUNCTION__)); |
1777 | IntrOpcode = G16MappingInfo->G16; // set opcode to variant with _g16 |
1778 | } |
1779 | |
1780 | // TODO: Check this in verifier. |
1781 | assert((!IsTexFail || DMaskLanes >= 1) && "should have legalized this")(static_cast <bool> ((!IsTexFail || DMaskLanes >= 1) && "should have legalized this") ? void (0) : __assert_fail ("(!IsTexFail || DMaskLanes >= 1) && \"should have legalized this\"" , "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp", 1781 , __extension__ __PRETTY_FUNCTION__)); |
1782 | |
1783 | unsigned CPol = MI.getOperand(ArgOffset + Intr->CachePolicyIndex).getImm(); |
1784 | if (BaseOpcode->Atomic) |
1785 | CPol |= AMDGPU::CPol::GLC; // TODO no-return optimization |
1786 | if (CPol & ~AMDGPU::CPol::ALL) |
1787 | return false; |
1788 | |
1789 | int NumVAddrRegs = 0; |
1790 | int NumVAddrDwords = 0; |
1791 | for (unsigned I = Intr->VAddrStart; I < Intr->VAddrEnd; I++) { |
1792 | // Skip the $noregs and 0s inserted during legalization. |
1793 | MachineOperand &AddrOp = MI.getOperand(ArgOffset + I); |
1794 | if (!AddrOp.isReg()) |
1795 | continue; // XXX - Break? |
1796 | |
1797 | Register Addr = AddrOp.getReg(); |
1798 | if (!Addr) |
1799 | break; |
1800 | |
1801 | ++NumVAddrRegs; |
1802 | NumVAddrDwords += (MRI->getType(Addr).getSizeInBits() + 31) / 32; |
1803 | } |
1804 | |
1805 | // The legalizer preprocessed the intrinsic arguments. If we aren't using |
1806 | // NSA, these should have been packed into a single value in the first |
1807 | // address register |
1808 | const bool UseNSA = |
1809 | NumVAddrRegs != 1 && |
1810 | (STI.hasPartialNSAEncoding() ? NumVAddrDwords >= NumVAddrRegs |
1811 | : NumVAddrDwords == NumVAddrRegs); |
1812 | if (UseNSA && !STI.hasFeature(AMDGPU::FeatureNSAEncoding)) { |
1813 | LLVM_DEBUG(dbgs() << "Trying to use NSA on non-NSA target\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("amdgpu-isel")) { dbgs() << "Trying to use NSA on non-NSA target\n" ; } } while (false); |
1814 | return false; |
1815 | } |
1816 | |
1817 | if (IsTexFail) |
1818 | ++NumVDataDwords; |
1819 | |
1820 | int Opcode = -1; |
1821 | if (IsGFX11Plus) { |
1822 | Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, |
1823 | UseNSA ? AMDGPU::MIMGEncGfx11NSA |
1824 | : AMDGPU::MIMGEncGfx11Default, |
1825 | NumVDataDwords, NumVAddrDwords); |
1826 | } else if (IsGFX10Plus) { |
1827 | Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, |
1828 | UseNSA ? AMDGPU::MIMGEncGfx10NSA |
1829 | : AMDGPU::MIMGEncGfx10Default, |
1830 | NumVDataDwords, NumVAddrDwords); |
1831 | } else { |
1832 | if (Subtarget->hasGFX90AInsts()) { |
1833 | Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx90a, |
1834 | NumVDataDwords, NumVAddrDwords); |
1835 | if (Opcode == -1) { |
1836 | LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("amdgpu-isel")) { dbgs() << "requested image instruction is not supported on this GPU\n" ; } } while (false) |
1837 | dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("amdgpu-isel")) { dbgs() << "requested image instruction is not supported on this GPU\n" ; } } while (false) |
1838 | << "requested image instruction is not supported on this GPU\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("amdgpu-isel")) { dbgs() << "requested image instruction is not supported on this GPU\n" ; } } while (false); |
1839 | return false; |
1840 | } |
1841 | } |
1842 | if (Opcode == -1 && |
1843 | STI.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) |
1844 | Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx8, |
1845 | NumVDataDwords, NumVAddrDwords); |
1846 | if (Opcode == -1) |
1847 | Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx6, |
1848 | NumVDataDwords, NumVAddrDwords); |
1849 | } |
1850 | assert(Opcode != -1)(static_cast <bool> (Opcode != -1) ? void (0) : __assert_fail ("Opcode != -1", "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp" , 1850, __extension__ __PRETTY_FUNCTION__)); |
1851 | |
1852 | auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opcode)) |
1853 | .cloneMemRefs(MI); |
1854 | |
1855 | if (VDataOut) { |
1856 | if (BaseOpcode->AtomicX2) { |
1857 | const bool Is64 = MRI->getType(VDataOut).getSizeInBits() == 64; |
1858 | |
1859 | Register TmpReg = MRI->createVirtualRegister( |
1860 | Is64 ? &AMDGPU::VReg_128RegClass : &AMDGPU::VReg_64RegClass); |
1861 | unsigned SubReg = Is64 ? AMDGPU::sub0_sub1 : AMDGPU::sub0; |
1862 | |
1863 | MIB.addDef(TmpReg); |
1864 | if (!MRI->use_empty(VDataOut)) { |
1865 | BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), VDataOut) |
1866 | .addReg(TmpReg, RegState::Kill, SubReg); |
1867 | } |
1868 | |
1869 | } else { |
1870 | MIB.addDef(VDataOut); // vdata output |
1871 | } |
1872 | } |
1873 | |
1874 | if (VDataIn) |
1875 | MIB.addReg(VDataIn); // vdata input |
1876 | |
1877 | for (int I = 0; I != NumVAddrRegs; ++I) { |
1878 | MachineOperand &SrcOp = MI.getOperand(ArgOffset + Intr->VAddrStart + I); |
1879 | if (SrcOp.isReg()) { |
1880 | assert(SrcOp.getReg() != 0)(static_cast <bool> (SrcOp.getReg() != 0) ? void (0) : __assert_fail ("SrcOp.getReg() != 0", "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp" , 1880, __extension__ __PRETTY_FUNCTION__)); |
1881 | MIB.addReg(SrcOp.getReg()); |
1882 | } |
1883 | } |
1884 | |
1885 | MIB.addReg(MI.getOperand(ArgOffset + Intr->RsrcIndex).getReg()); |
1886 | if (BaseOpcode->Sampler) |
1887 | MIB.addReg(MI.getOperand(ArgOffset + Intr->SampIndex).getReg()); |
1888 | |
1889 | MIB.addImm(DMask); // dmask |
1890 | |
1891 | if (IsGFX10Plus) |
1892 | MIB.addImm(DimInfo->Encoding); |
1893 | MIB.addImm(Unorm); |
1894 | |
1895 | MIB.addImm(CPol); |
1896 | MIB.addImm(IsA16 && // a16 or r128 |
1897 | STI.hasFeature(AMDGPU::FeatureR128A16) ? -1 : 0); |
1898 | if (IsGFX10Plus) |
1899 | MIB.addImm(IsA16 ? -1 : 0); |
1900 | |
1901 | if (!Subtarget->hasGFX90AInsts()) { |
1902 | MIB.addImm(TFE); // tfe |
1903 | } else if (TFE) { |
1904 | LLVM_DEBUG(dbgs() << "TFE is not supported on this GPU\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("amdgpu-isel")) { dbgs() << "TFE is not supported on this GPU\n" ; } } while (false); |
1905 | return false; |
1906 | } |
1907 | |
1908 | MIB.addImm(LWE); // lwe |
1909 | if (!IsGFX10Plus) |
1910 | MIB.addImm(DimInfo->DA ? -1 : 0); |
1911 | if (BaseOpcode->HasD16) |
1912 | MIB.addImm(IsD16 ? -1 : 0); |
1913 | |
1914 | if (IsTexFail) { |
1915 | // An image load instruction with TFE/LWE only conditionally writes to its |
1916 | // result registers. Initialize them to zero so that we always get well |
1917 | // defined result values. |
1918 | assert(VDataOut && !VDataIn)(static_cast <bool> (VDataOut && !VDataIn) ? void (0) : __assert_fail ("VDataOut && !VDataIn", "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp" , 1918, __extension__ __PRETTY_FUNCTION__)); |
1919 | Register Tied = MRI->cloneVirtualRegister(VDataOut); |
1920 | Register Zero = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass); |
1921 | BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::V_MOV_B32_e32), Zero) |
1922 | .addImm(0); |
1923 | auto Parts = TRI.getRegSplitParts(MRI->getRegClass(Tied), 4); |
1924 | if (STI.usePRTStrictNull()) { |
1925 | // With enable-prt-strict-null enabled, initialize all result registers to |
1926 | // zero. |
1927 | auto RegSeq = |
1928 | BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::REG_SEQUENCE), Tied); |
1929 | for (auto Sub : Parts) |
1930 | RegSeq.addReg(Zero).addImm(Sub); |
1931 | } else { |
1932 | // With enable-prt-strict-null disabled, only initialize the extra TFE/LWE |
1933 | // result register. |
1934 | Register Undef = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass); |
1935 | BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::IMPLICIT_DEF), Undef); |
1936 | auto RegSeq = |
1937 | BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::REG_SEQUENCE), Tied); |
1938 | for (auto Sub : Parts.drop_back(1)) |
1939 | RegSeq.addReg(Undef).addImm(Sub); |
1940 | RegSeq.addReg(Zero).addImm(Parts.back()); |
1941 | } |
1942 | MIB.addReg(Tied, RegState::Implicit); |
1943 | MIB->tieOperands(0, MIB->getNumOperands() - 1); |
1944 | } |
1945 | |
1946 | MI.eraseFromParent(); |
1947 | constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI); |
1948 | TII.enforceOperandRCAlignment(*MIB, AMDGPU::OpName::vaddr); |
1949 | return true; |
1950 | } |
1951 | |
1952 | // We need to handle this here because tablegen doesn't support matching |
1953 | // instructions with multiple outputs. |
1954 | bool AMDGPUInstructionSelector::selectDSBvhStackIntrinsic( |
1955 | MachineInstr &MI) const { |
1956 | Register Dst0 = MI.getOperand(0).getReg(); |
1957 | Register Dst1 = MI.getOperand(1).getReg(); |
1958 | |
1959 | const DebugLoc &DL = MI.getDebugLoc(); |
1960 | MachineBasicBlock *MBB = MI.getParent(); |
1961 | |
1962 | Register Addr = MI.getOperand(3).getReg(); |
1963 | Register Data0 = MI.getOperand(4).getReg(); |
1964 | Register Data1 = MI.getOperand(5).getReg(); |
1965 | unsigned Offset = MI.getOperand(6).getImm(); |
1966 | |
1967 | auto MIB = BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::DS_BVH_STACK_RTN_B32), Dst0) |
1968 | .addDef(Dst1) |
1969 | .addUse(Addr) |
1970 | .addUse(Data0) |
1971 | .addUse(Data1) |
1972 | .addImm(Offset) |
1973 | .cloneMemRefs(MI); |
1974 | |
1975 | MI.eraseFromParent(); |
1976 | return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI); |
1977 | } |
1978 | |
1979 | bool AMDGPUInstructionSelector::selectG_INTRINSIC_W_SIDE_EFFECTS( |
1980 | MachineInstr &I) const { |
1981 | unsigned IntrinsicID = I.getIntrinsicID(); |
1982 | switch (IntrinsicID) { |
1983 | case Intrinsic::amdgcn_end_cf: |
1984 | return selectEndCfIntrinsic(I); |
1985 | case Intrinsic::amdgcn_ds_ordered_add: |
1986 | case Intrinsic::amdgcn_ds_ordered_swap: |
1987 | return selectDSOrderedIntrinsic(I, IntrinsicID); |
1988 | case Intrinsic::amdgcn_ds_gws_init: |
1989 | case Intrinsic::amdgcn_ds_gws_barrier: |
1990 | case Intrinsic::amdgcn_ds_gws_sema_v: |
1991 | case Intrinsic::amdgcn_ds_gws_sema_br: |
1992 | case Intrinsic::amdgcn_ds_gws_sema_p: |
1993 | case Intrinsic::amdgcn_ds_gws_sema_release_all: |
1994 | return selectDSGWSIntrinsic(I, IntrinsicID); |
1995 | case Intrinsic::amdgcn_ds_append: |
1996 | return selectDSAppendConsume(I, true); |
1997 | case Intrinsic::amdgcn_ds_consume: |
1998 | return selectDSAppendConsume(I, false); |
1999 | case Intrinsic::amdgcn_s_barrier: |
2000 | return selectSBarrier(I); |
2001 | case Intrinsic::amdgcn_raw_buffer_load_lds: |
2002 | case Intrinsic::amdgcn_struct_buffer_load_lds: |
2003 | return selectBufferLoadLds(I); |
2004 | case Intrinsic::amdgcn_global_load_lds: |
2005 | return selectGlobalLoadLds(I); |
2006 | case Intrinsic::amdgcn_exp_compr: |
2007 | if (!STI.hasCompressedExport()) { |
2008 | Function &F = I.getMF()->getFunction(); |
2009 | DiagnosticInfoUnsupported NoFpRet( |
2010 | F, "intrinsic not supported on subtarget", I.getDebugLoc(), DS_Error); |
2011 | F.getContext().diagnose(NoFpRet); |
2012 | return false; |
2013 | } |
2014 | break; |
2015 | case Intrinsic::amdgcn_ds_bvh_stack_rtn: |
2016 | return selectDSBvhStackIntrinsic(I); |
2017 | } |
2018 | return selectImpl(I, *CoverageInfo); |
2019 | } |
2020 | |
2021 | bool AMDGPUInstructionSelector::selectG_SELECT(MachineInstr &I) const { |
2022 | if (selectImpl(I, *CoverageInfo)) |
2023 | return true; |
2024 | |
2025 | MachineBasicBlock *BB = I.getParent(); |
2026 | const DebugLoc &DL = I.getDebugLoc(); |
2027 | |
2028 | Register DstReg = I.getOperand(0).getReg(); |
2029 | unsigned Size = RBI.getSizeInBits(DstReg, *MRI, TRI); |
2030 | assert(Size <= 32 || Size == 64)(static_cast <bool> (Size <= 32 || Size == 64) ? void (0) : __assert_fail ("Size <= 32 || Size == 64", "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp" , 2030, __extension__ __PRETTY_FUNCTION__)); |
2031 | const MachineOperand &CCOp = I.getOperand(1); |
2032 | Register CCReg = CCOp.getReg(); |
2033 | if (!isVCC(CCReg, *MRI)) { |
2034 | unsigned SelectOpcode = Size == 64 ? AMDGPU::S_CSELECT_B64 : |
2035 | AMDGPU::S_CSELECT_B32; |
2036 | MachineInstr *CopySCC = BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), AMDGPU::SCC) |
2037 | .addReg(CCReg); |
2038 | |
2039 | // The generic constrainSelectedInstRegOperands doesn't work for the scc register |
2040 | // bank, because it does not cover the register class that we used to represent |
2041 | // for it. So we need to manually set the register class here. |
2042 | if (!MRI->getRegClassOrNull(CCReg)) |
2043 | MRI->setRegClass(CCReg, TRI.getConstrainedRegClassForOperand(CCOp, *MRI)); |
2044 | MachineInstr *Select = BuildMI(*BB, &I, DL, TII.get(SelectOpcode), DstReg) |
2045 | .add(I.getOperand(2)) |
2046 | .add(I.getOperand(3)); |
2047 | |
2048 | bool Ret = false; |
2049 | Ret |= constrainSelectedInstRegOperands(*Select, TII, TRI, RBI); |
2050 | Ret |= constrainSelectedInstRegOperands(*CopySCC, TII, TRI, RBI); |
2051 | I.eraseFromParent(); |
2052 | return Ret; |
2053 | } |
2054 | |
2055 | // Wide VGPR select should have been split in RegBankSelect. |
2056 | if (Size > 32) |
2057 | return false; |
2058 | |
2059 | MachineInstr *Select = |
2060 | BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_CNDMASK_B32_e64), DstReg) |
2061 | .addImm(0) |
2062 | .add(I.getOperand(3)) |
2063 | .addImm(0) |
2064 | .add(I.getOperand(2)) |
2065 | .add(I.getOperand(1)); |
2066 | |
2067 | bool Ret = constrainSelectedInstRegOperands(*Select, TII, TRI, RBI); |
2068 | I.eraseFromParent(); |
2069 | return Ret; |
2070 | } |
2071 | |
2072 | static int sizeToSubRegIndex(unsigned Size) { |
2073 | switch (Size) { |
2074 | case 32: |
2075 | return AMDGPU::sub0; |
2076 | case 64: |
2077 | return AMDGPU::sub0_sub1; |
2078 | case 96: |
2079 | return AMDGPU::sub0_sub1_sub2; |
2080 | case 128: |
2081 | return AMDGPU::sub0_sub1_sub2_sub3; |
2082 | case 256: |
2083 | return AMDGPU::sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7; |
2084 | default: |
2085 | if (Size < 32) |
2086 | return AMDGPU::sub0; |
2087 | if (Size > 256) |
2088 | return -1; |
2089 | return sizeToSubRegIndex(llvm::bit_ceil(Size)); |
2090 | } |
2091 | } |
2092 | |
2093 | bool AMDGPUInstructionSelector::selectG_TRUNC(MachineInstr &I) const { |
2094 | Register DstReg = I.getOperand(0).getReg(); |
2095 | Register SrcReg = I.getOperand(1).getReg(); |
2096 | const LLT DstTy = MRI->getType(DstReg); |
2097 | const LLT SrcTy = MRI->getType(SrcReg); |
2098 | const LLT S1 = LLT::scalar(1); |
2099 | |
2100 | const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI); |
2101 | const RegisterBank *DstRB; |
2102 | if (DstTy == S1) { |
2103 | // This is a special case. We don't treat s1 for legalization artifacts as |
2104 | // vcc booleans. |
2105 | DstRB = SrcRB; |
2106 | } else { |
2107 | DstRB = RBI.getRegBank(DstReg, *MRI, TRI); |
2108 | if (SrcRB != DstRB) |
2109 | return false; |
2110 | } |
2111 | |
2112 | const bool IsVALU = DstRB->getID() == AMDGPU::VGPRRegBankID; |
2113 | |
2114 | unsigned DstSize = DstTy.getSizeInBits(); |
2115 | unsigned SrcSize = SrcTy.getSizeInBits(); |
2116 | |
2117 | const TargetRegisterClass *SrcRC = |
2118 | TRI.getRegClassForSizeOnBank(SrcSize, *SrcRB); |
2119 | const TargetRegisterClass *DstRC = |
2120 | TRI.getRegClassForSizeOnBank(DstSize, *DstRB); |
2121 | if (!SrcRC || !DstRC) |
2122 | return false; |
2123 | |
2124 | if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) || |
2125 | !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI)) { |
2126 | LLVM_DEBUG(dbgs() << "Failed to constrain G_TRUNC\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("amdgpu-isel")) { dbgs() << "Failed to constrain G_TRUNC\n" ; } } while (false); |
2127 | return false; |
2128 | } |
2129 | |
2130 | if (DstTy == LLT::fixed_vector(2, 16) && SrcTy == LLT::fixed_vector(2, 32)) { |
2131 | MachineBasicBlock *MBB = I.getParent(); |
2132 | const DebugLoc &DL = I.getDebugLoc(); |
2133 | |
2134 | Register LoReg = MRI->createVirtualRegister(DstRC); |
2135 | Register HiReg = MRI->createVirtualRegister(DstRC); |
2136 | BuildMI(*MBB, I, DL, TII.get(AMDGPU::COPY), LoReg) |
2137 | .addReg(SrcReg, 0, AMDGPU::sub0); |
2138 | BuildMI(*MBB, I, DL, TII.get(AMDGPU::COPY), HiReg) |
2139 | .addReg(SrcReg, 0, AMDGPU::sub1); |
2140 | |
2141 | if (IsVALU && STI.hasSDWA()) { |
2142 | // Write the low 16-bits of the high element into the high 16-bits of the |
2143 | // low element. |
2144 | MachineInstr *MovSDWA = |
2145 | BuildMI(*MBB, I, DL, TII.get(AMDGPU::V_MOV_B32_sdwa), DstReg) |
2146 | .addImm(0) // $src0_modifiers |
2147 | .addReg(HiReg) // $src0 |
2148 | .addImm(0) // $clamp |
2149 | .addImm(AMDGPU::SDWA::WORD_1) // $dst_sel |
2150 | .addImm(AMDGPU::SDWA::UNUSED_PRESERVE) // $dst_unused |
2151 | .addImm(AMDGPU::SDWA::WORD_0) // $src0_sel |
2152 | .addReg(LoReg, RegState::Implicit); |
2153 | MovSDWA->tieOperands(0, MovSDWA->getNumOperands() - 1); |
2154 | } else { |
2155 | Register TmpReg0 = MRI->createVirtualRegister(DstRC); |
2156 | Register TmpReg1 = MRI->createVirtualRegister(DstRC); |
2157 | Register ImmReg = MRI->createVirtualRegister(DstRC); |
2158 | if (IsVALU) { |
2159 | BuildMI(*MBB, I, DL, TII.get(AMDGPU::V_LSHLREV_B32_e64), TmpReg0) |
2160 | .addImm(16) |
2161 | .addReg(HiReg); |
2162 | } else { |
2163 | BuildMI(*MBB, I, DL, TII.get(AMDGPU::S_LSHL_B32), TmpReg0) |
2164 | .addReg(HiReg) |
2165 | .addImm(16); |
2166 | } |
2167 | |
2168 | unsigned MovOpc = IsVALU ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32; |
2169 | unsigned AndOpc = IsVALU ? AMDGPU::V_AND_B32_e64 : AMDGPU::S_AND_B32; |
2170 | unsigned OrOpc = IsVALU ? AMDGPU::V_OR_B32_e64 : AMDGPU::S_OR_B32; |
2171 | |
2172 | BuildMI(*MBB, I, DL, TII.get(MovOpc), ImmReg) |
2173 | .addImm(0xffff); |
2174 | BuildMI(*MBB, I, DL, TII.get(AndOpc), TmpReg1) |
2175 | .addReg(LoReg) |
2176 | .addReg(ImmReg); |
2177 | BuildMI(*MBB, I, DL, TII.get(OrOpc), DstReg) |
2178 | .addReg(TmpReg0) |
2179 | .addReg(TmpReg1); |
2180 | } |
2181 | |
2182 | I.eraseFromParent(); |
2183 | return true; |
2184 | } |
2185 | |
2186 | if (!DstTy.isScalar()) |
2187 | return false; |
2188 | |
2189 | if (SrcSize > 32) { |
2190 | int SubRegIdx = sizeToSubRegIndex(DstSize); |
2191 | if (SubRegIdx == -1) |
2192 | return false; |
2193 | |
2194 | // Deal with weird cases where the class only partially supports the subreg |
2195 | // index. |
2196 | const TargetRegisterClass *SrcWithSubRC |
2197 | = TRI.getSubClassWithSubReg(SrcRC, SubRegIdx); |
2198 | if (!SrcWithSubRC) |
2199 | return false; |
2200 | |
2201 | if (SrcWithSubRC != SrcRC) { |
2202 | if (!RBI.constrainGenericRegister(SrcReg, *SrcWithSubRC, *MRI)) |
2203 | return false; |
2204 | } |
2205 | |
2206 | I.getOperand(1).setSubReg(SubRegIdx); |
2207 | } |
2208 | |
2209 | I.setDesc(TII.get(TargetOpcode::COPY)); |
2210 | return true; |
2211 | } |
2212 | |
2213 | /// \returns true if a bitmask for \p Size bits will be an inline immediate. |
2214 | static bool shouldUseAndMask(unsigned Size, unsigned &Mask) { |
2215 | Mask = maskTrailingOnes<unsigned>(Size); |
2216 | int SignedMask = static_cast<int>(Mask); |
2217 | return SignedMask >= -16 && SignedMask <= 64; |
2218 | } |
2219 | |
2220 | // Like RegisterBankInfo::getRegBank, but don't assume vcc for s1. |
2221 | const RegisterBank *AMDGPUInstructionSelector::getArtifactRegBank( |
2222 | Register Reg, const MachineRegisterInfo &MRI, |
2223 | const TargetRegisterInfo &TRI) const { |
2224 | const RegClassOrRegBank &RegClassOrBank = MRI.getRegClassOrRegBank(Reg); |
2225 | if (auto *RB = RegClassOrBank.dyn_cast<const RegisterBank *>()) |
2226 | return RB; |
2227 | |
2228 | // Ignore the type, since we don't use vcc in artifacts. |
2229 | if (auto *RC = RegClassOrBank.dyn_cast<const TargetRegisterClass *>()) |
2230 | return &RBI.getRegBankFromRegClass(*RC, LLT()); |
2231 | return nullptr; |
2232 | } |
2233 | |
2234 | bool AMDGPUInstructionSelector::selectG_SZA_EXT(MachineInstr &I) const { |
2235 | bool InReg = I.getOpcode() == AMDGPU::G_SEXT_INREG; |
2236 | bool Signed = I.getOpcode() == AMDGPU::G_SEXT || InReg; |
2237 | const DebugLoc &DL = I.getDebugLoc(); |
2238 | MachineBasicBlock &MBB = *I.getParent(); |
2239 | const Register DstReg = I.getOperand(0).getReg(); |
2240 | const Register SrcReg = I.getOperand(1).getReg(); |
2241 | |
2242 | const LLT DstTy = MRI->getType(DstReg); |
2243 | const LLT SrcTy = MRI->getType(SrcReg); |
2244 | const unsigned SrcSize = I.getOpcode() == AMDGPU::G_SEXT_INREG ? |
2245 | I.getOperand(2).getImm() : SrcTy.getSizeInBits(); |
2246 | const unsigned DstSize = DstTy.getSizeInBits(); |
2247 | if (!DstTy.isScalar()) |
2248 | return false; |
2249 | |
2250 | // Artifact casts should never use vcc. |
2251 | const RegisterBank *SrcBank = getArtifactRegBank(SrcReg, *MRI, TRI); |
2252 | |
2253 | // FIXME: This should probably be illegal and split earlier. |
2254 | if (I.getOpcode() == AMDGPU::G_ANYEXT) { |
2255 | if (DstSize <= 32) |
2256 | return selectCOPY(I); |
2257 | |
2258 | const TargetRegisterClass *SrcRC = |
2259 | TRI.getRegClassForTypeOnBank(SrcTy, *SrcBank); |
2260 | const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI); |
2261 | const TargetRegisterClass *DstRC = |
2262 | TRI.getRegClassForSizeOnBank(DstSize, *DstBank); |
2263 | |
2264 | Register UndefReg = MRI->createVirtualRegister(SrcRC); |
2265 | BuildMI(MBB, I, DL, TII.get(AMDGPU::IMPLICIT_DEF), UndefReg); |
2266 | BuildMI(MBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg) |
2267 | .addReg(SrcReg) |
2268 | .addImm(AMDGPU::sub0) |
2269 | .addReg(UndefReg) |
2270 | .addImm(AMDGPU::sub1); |
2271 | I.eraseFromParent(); |
2272 | |
2273 | return RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) && |
2274 | RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI); |
2275 | } |
2276 | |
2277 | if (SrcBank->getID() == AMDGPU::VGPRRegBankID && DstSize <= 32) { |
2278 | // 64-bit should have been split up in RegBankSelect |
2279 | |
2280 | // Try to use an and with a mask if it will save code size. |
2281 | unsigned Mask; |
2282 | if (!Signed && shouldUseAndMask(SrcSize, Mask)) { |
2283 | MachineInstr *ExtI = |
2284 | BuildMI(MBB, I, DL, TII.get(AMDGPU::V_AND_B32_e32), DstReg) |
2285 | .addImm(Mask) |
2286 | .addReg(SrcReg); |
2287 | I.eraseFromParent(); |
2288 | return constrainSelectedInstRegOperands(*ExtI, TII, TRI, RBI); |
2289 | } |
2290 | |
2291 | const unsigned BFE = Signed ? AMDGPU::V_BFE_I32_e64 : AMDGPU::V_BFE_U32_e64; |
2292 | MachineInstr *ExtI = |
2293 | BuildMI(MBB, I, DL, TII.get(BFE), DstReg) |
2294 | .addReg(SrcReg) |
2295 | .addImm(0) // Offset |
2296 | .addImm(SrcSize); // Width |
2297 | I.eraseFromParent(); |
2298 | return constrainSelectedInstRegOperands(*ExtI, TII, TRI, RBI); |
2299 | } |
2300 | |
2301 | if (SrcBank->getID() == AMDGPU::SGPRRegBankID && DstSize <= 64) { |
2302 | const TargetRegisterClass &SrcRC = InReg && DstSize > 32 ? |
2303 | AMDGPU::SReg_64RegClass : AMDGPU::SReg_32RegClass; |
2304 | if (!RBI.constrainGenericRegister(SrcReg, SrcRC, *MRI)) |
2305 | return false; |
2306 | |
2307 | if (Signed && DstSize == 32 && (SrcSize == 8 || SrcSize == 16)) { |
2308 | const unsigned SextOpc = SrcSize == 8 ? |
2309 | AMDGPU::S_SEXT_I32_I8 : AMDGPU::S_SEXT_I32_I16; |
2310 | BuildMI(MBB, I, DL, TII.get(SextOpc), DstReg) |
2311 | .addReg(SrcReg); |
2312 | I.eraseFromParent(); |
2313 | return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_32RegClass, *MRI); |
2314 | } |
2315 | |
2316 | // Using a single 32-bit SALU to calculate the high half is smaller than |
2317 | // S_BFE with a literal constant operand. |
2318 | if (DstSize > 32 && SrcSize == 32) { |
2319 | Register HiReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass); |
2320 | unsigned SubReg = InReg ? AMDGPU::sub0 : AMDGPU::NoSubRegister; |
2321 | if (Signed) { |
2322 | BuildMI(MBB, I, DL, TII.get(AMDGPU::S_ASHR_I32), HiReg) |
2323 | .addReg(SrcReg, 0, SubReg) |
2324 | .addImm(31); |
2325 | } else { |
2326 | BuildMI(MBB, I, DL, TII.get(AMDGPU::S_MOV_B32), HiReg) |
2327 | .addImm(0); |
2328 | } |
2329 | BuildMI(MBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg) |
2330 | .addReg(SrcReg, 0, SubReg) |
2331 | .addImm(AMDGPU::sub0) |
2332 | .addReg(HiReg) |
2333 | .addImm(AMDGPU::sub1); |
2334 | I.eraseFromParent(); |
2335 | return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_64RegClass, |
2336 | *MRI); |
2337 | } |
2338 | |
2339 | const unsigned BFE64 = Signed ? AMDGPU::S_BFE_I64 : AMDGPU::S_BFE_U64; |
2340 | const unsigned BFE32 = Signed ? AMDGPU::S_BFE_I32 : AMDGPU::S_BFE_U32; |
2341 | |
2342 | // Scalar BFE is encoded as S1[5:0] = offset, S1[22:16]= width. |
2343 | if (DstSize > 32 && (SrcSize <= 32 || InReg)) { |
2344 | // We need a 64-bit register source, but the high bits don't matter. |
2345 | Register ExtReg = MRI->createVirtualRegister(&AMDGPU::SReg_64RegClass); |
2346 | Register UndefReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass); |
2347 | unsigned SubReg = InReg ? AMDGPU::sub0 : AMDGPU::NoSubRegister; |
2348 | |
2349 | BuildMI(MBB, I, DL, TII.get(AMDGPU::IMPLICIT_DEF), UndefReg); |
2350 | BuildMI(MBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), ExtReg) |
2351 | .addReg(SrcReg, 0, SubReg) |
2352 | .addImm(AMDGPU::sub0) |
2353 | .addReg(UndefReg) |
2354 | .addImm(AMDGPU::sub1); |
2355 | |
2356 | BuildMI(MBB, I, DL, TII.get(BFE64), DstReg) |
2357 | .addReg(ExtReg) |
2358 | .addImm(SrcSize << 16); |
2359 | |
2360 | I.eraseFromParent(); |
2361 | return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_64RegClass, *MRI); |
2362 | } |
2363 | |
2364 | unsigned Mask; |
2365 | if (!Signed && shouldUseAndMask(SrcSize, Mask)) { |
2366 | BuildMI(MBB, I, DL, TII.get(AMDGPU::S_AND_B32), DstReg) |
2367 | .addReg(SrcReg) |
2368 | .addImm(Mask); |
2369 | } else { |
2370 | BuildMI(MBB, I, DL, TII.get(BFE32), DstReg) |
2371 | .addReg(SrcReg) |
2372 | .addImm(SrcSize << 16); |
2373 | } |
2374 | |
2375 | I.eraseFromParent(); |
2376 | return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_32RegClass, *MRI); |
2377 | } |
2378 | |
2379 | return false; |
2380 | } |
2381 | |
2382 | bool AMDGPUInstructionSelector::selectG_CONSTANT(MachineInstr &I) const { |
2383 | MachineBasicBlock *BB = I.getParent(); |
2384 | MachineOperand &ImmOp = I.getOperand(1); |
2385 | Register DstReg = I.getOperand(0).getReg(); |
2386 | unsigned Size = MRI->getType(DstReg).getSizeInBits(); |
2387 | |
2388 | // The AMDGPU backend only supports Imm operands and not CImm or FPImm. |
2389 | if (ImmOp.isFPImm()) { |
2390 | const APInt &Imm = ImmOp.getFPImm()->getValueAPF().bitcastToAPInt(); |
2391 | ImmOp.ChangeToImmediate(Imm.getZExtValue()); |
2392 | } else if (ImmOp.isCImm()) { |
2393 | ImmOp.ChangeToImmediate(ImmOp.getCImm()->getSExtValue()); |
2394 | } else { |
2395 | llvm_unreachable("Not supported by g_constants")::llvm::llvm_unreachable_internal("Not supported by g_constants" , "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp", 2395 ); |
2396 | } |
2397 | |
2398 | const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI); |
2399 | const bool IsSgpr = DstRB->getID() == AMDGPU::SGPRRegBankID; |
2400 | |
2401 | unsigned Opcode; |
2402 | if (DstRB->getID() == AMDGPU::VCCRegBankID) { |
2403 | Opcode = STI.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64; |
2404 | } else { |
2405 | Opcode = IsSgpr ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32; |
2406 | |
2407 | // We should never produce s1 values on banks other than VCC. If the user of |
2408 | // this already constrained the register, we may incorrectly think it's VCC |
2409 | // if it wasn't originally. |
2410 | if (Size == 1) |
2411 | return false; |
2412 | } |
2413 | |
2414 | if (Size != 64) { |
2415 | I.setDesc(TII.get(Opcode)); |
2416 | I.addImplicitDefUseOperands(*MF); |
2417 | return constrainSelectedInstRegOperands(I, TII, TRI, RBI); |
2418 | } |
2419 | |
2420 | const DebugLoc &DL = I.getDebugLoc(); |
2421 | |
2422 | APInt Imm(Size, I.getOperand(1).getImm()); |
2423 | |
2424 | MachineInstr *ResInst; |
2425 | if (IsSgpr && TII.isInlineConstant(Imm)) { |
2426 | ResInst = BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_MOV_B64), DstReg) |
2427 | .addImm(I.getOperand(1).getImm()); |
2428 | } else { |
2429 | const TargetRegisterClass *RC = IsSgpr ? |
2430 | &AMDGPU::SReg_32RegClass : &AMDGPU::VGPR_32RegClass; |
2431 | Register LoReg = MRI->createVirtualRegister(RC); |
2432 | Register HiReg = MRI->createVirtualRegister(RC); |
2433 | |
2434 | BuildMI(*BB, &I, DL, TII.get(Opcode), LoReg) |
2435 | .addImm(Imm.trunc(32).getZExtValue()); |
2436 | |
2437 | BuildMI(*BB, &I, DL, TII.get(Opcode), HiReg) |
2438 | .addImm(Imm.ashr(32).getZExtValue()); |
2439 | |
2440 | ResInst = BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg) |
2441 | .addReg(LoReg) |
2442 | .addImm(AMDGPU::sub0) |
2443 | .addReg(HiReg) |
2444 | .addImm(AMDGPU::sub1); |
2445 | } |
2446 | |
2447 | // We can't call constrainSelectedInstRegOperands here, because it doesn't |
2448 | // work for target independent opcodes |
2449 | I.eraseFromParent(); |
2450 | const TargetRegisterClass *DstRC = |
2451 | TRI.getConstrainedRegClassForOperand(ResInst->getOperand(0), *MRI); |
2452 | if (!DstRC) |
2453 | return true; |
2454 | return RBI.constrainGenericRegister(DstReg, *DstRC, *MRI); |
2455 | } |
2456 | |
2457 | bool AMDGPUInstructionSelector::selectG_FNEG(MachineInstr &MI) const { |
2458 | // Only manually handle the f64 SGPR case. |
2459 | // |
2460 | // FIXME: This is a workaround for 2.5 different tablegen problems. Because |
2461 | // the bit ops theoretically have a second result due to the implicit def of |
2462 | // SCC, the GlobalISelEmitter is overly conservative and rejects it. Fixing |
2463 | // that is easy by disabling the check. The result works, but uses a |
2464 | // nonsensical sreg32orlds_and_sreg_1 regclass. |
2465 | // |
2466 | // The DAG emitter is more problematic, and incorrectly adds both S_XOR_B32 to |
2467 | // the variadic REG_SEQUENCE operands. |
2468 | |
2469 | Register Dst = MI.getOperand(0).getReg(); |
2470 | const RegisterBank *DstRB = RBI.getRegBank(Dst, *MRI, TRI); |
2471 | if (DstRB->getID() != AMDGPU::SGPRRegBankID || |
2472 | MRI->getType(Dst) != LLT::scalar(64)) |
2473 | return false; |
2474 | |
2475 | Register Src = MI.getOperand(1).getReg(); |
2476 | MachineInstr *Fabs = getOpcodeDef(TargetOpcode::G_FABS, Src, *MRI); |
2477 | if (Fabs) |
2478 | Src = Fabs->getOperand(1).getReg(); |
2479 | |
2480 | if (!RBI.constrainGenericRegister(Src, AMDGPU::SReg_64RegClass, *MRI) || |
2481 | !RBI.constrainGenericRegister(Dst, AMDGPU::SReg_64RegClass, *MRI)) |
2482 | return false; |
2483 | |
2484 | MachineBasicBlock *BB = MI.getParent(); |
2485 | const DebugLoc &DL = MI.getDebugLoc(); |
2486 | Register LoReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass); |
2487 | Register HiReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass); |
2488 | Register ConstReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass); |
2489 | Register OpReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass); |
2490 | |
2491 | BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), LoReg) |
2492 | .addReg(Src, 0, AMDGPU::sub0); |
2493 | BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), HiReg) |
2494 | .addReg(Src, 0, AMDGPU::sub1); |
2495 | BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), ConstReg) |
2496 | .addImm(0x80000000); |
2497 | |
2498 | // Set or toggle sign bit. |
2499 | unsigned Opc = Fabs ? AMDGPU::S_OR_B32 : AMDGPU::S_XOR_B32; |
2500 | BuildMI(*BB, &MI, DL, TII.get(Opc), OpReg) |
2501 | .addReg(HiReg) |
2502 | .addReg(ConstReg); |
2503 | BuildMI(*BB, &MI, DL, TII.get(AMDGPU::REG_SEQUENCE), Dst) |
2504 | .addReg(LoReg) |
2505 | .addImm(AMDGPU::sub0) |
2506 | .addReg(OpReg) |
2507 | .addImm(AMDGPU::sub1); |
2508 | MI.eraseFromParent(); |
2509 | return true; |
2510 | } |
2511 | |
2512 | // FIXME: This is a workaround for the same tablegen problems as G_FNEG |
2513 | bool AMDGPUInstructionSelector::selectG_FABS(MachineInstr &MI) const { |
2514 | Register Dst = MI.getOperand(0).getReg(); |
2515 | const RegisterBank *DstRB = RBI.getRegBank(Dst, *MRI, TRI); |
2516 | if (DstRB->getID() != AMDGPU::SGPRRegBankID || |
2517 | MRI->getType(Dst) != LLT::scalar(64)) |
2518 | return false; |
2519 | |
2520 | Register Src = MI.getOperand(1).getReg(); |
2521 | MachineBasicBlock *BB = MI.getParent(); |
2522 | const DebugLoc &DL = MI.getDebugLoc(); |
2523 | Register LoReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass); |
2524 | Register HiReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass); |
2525 | Register ConstReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass); |
2526 | Register OpReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass); |
2527 | |
2528 | if (!RBI.constrainGenericRegister(Src, AMDGPU::SReg_64RegClass, *MRI) || |
2529 | !RBI.constrainGenericRegister(Dst, AMDGPU::SReg_64RegClass, *MRI)) |
2530 | return false; |
2531 | |
2532 | BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), LoReg) |
2533 | .addReg(Src, 0, AMDGPU::sub0); |
2534 | BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), HiReg) |
2535 | .addReg(Src, 0, AMDGPU::sub1); |
2536 | BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), ConstReg) |
2537 | .addImm(0x7fffffff); |
2538 | |
2539 | // Clear sign bit. |
2540 | // TODO: Should this used S_BITSET0_*? |
2541 | BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_AND_B32), OpReg) |
2542 | .addReg(HiReg) |
2543 | .addReg(ConstReg); |
2544 | BuildMI(*BB, &MI, DL, TII.get(AMDGPU::REG_SEQUENCE), Dst) |
2545 | .addReg(LoReg) |
2546 | .addImm(AMDGPU::sub0) |
2547 | .addReg(OpReg) |
2548 | .addImm(AMDGPU::sub1); |
2549 | |
2550 | MI.eraseFromParent(); |
2551 | return true; |
2552 | } |
2553 | |
2554 | static bool isConstant(const MachineInstr &MI) { |
2555 | return MI.getOpcode() == TargetOpcode::G_CONSTANT; |
2556 | } |
2557 | |
2558 | void AMDGPUInstructionSelector::getAddrModeInfo(const MachineInstr &Load, |
2559 | const MachineRegisterInfo &MRI, SmallVectorImpl<GEPInfo> &AddrInfo) const { |
2560 | |
2561 | const MachineInstr *PtrMI = MRI.getUniqueVRegDef(Load.getOperand(1).getReg()); |
2562 | |
2563 | assert(PtrMI)(static_cast <bool> (PtrMI) ? void (0) : __assert_fail ( "PtrMI", "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp" , 2563, __extension__ __PRETTY_FUNCTION__)); |
2564 | |
2565 | if (PtrMI->getOpcode() != TargetOpcode::G_PTR_ADD) |
2566 | return; |
2567 | |
2568 | GEPInfo GEPInfo; |
2569 | |
2570 | for (unsigned i = 1; i != 3; ++i) { |
2571 | const MachineOperand &GEPOp = PtrMI->getOperand(i); |
2572 | const MachineInstr *OpDef = MRI.getUniqueVRegDef(GEPOp.getReg()); |
2573 | assert(OpDef)(static_cast <bool> (OpDef) ? void (0) : __assert_fail ( "OpDef", "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp" , 2573, __extension__ __PRETTY_FUNCTION__)); |
2574 | if (i == 2 && isConstant(*OpDef)) { |
2575 | // TODO: Could handle constant base + variable offset, but a combine |
2576 | // probably should have commuted it. |
2577 | assert(GEPInfo.Imm == 0)(static_cast <bool> (GEPInfo.Imm == 0) ? void (0) : __assert_fail ("GEPInfo.Imm == 0", "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp" , 2577, __extension__ __PRETTY_FUNCTION__)); |
2578 | GEPInfo.Imm = OpDef->getOperand(1).getCImm()->getSExtValue(); |
2579 | continue; |
2580 | } |
2581 | const RegisterBank *OpBank = RBI.getRegBank(GEPOp.getReg(), MRI, TRI); |
2582 | if (OpBank->getID() == AMDGPU::SGPRRegBankID) |
2583 | GEPInfo.SgprParts.push_back(GEPOp.getReg()); |
2584 | else |
2585 | GEPInfo.VgprParts.push_back(GEPOp.getReg()); |
2586 | } |
2587 | |
2588 | AddrInfo.push_back(GEPInfo); |
2589 | getAddrModeInfo(*PtrMI, MRI, AddrInfo); |
2590 | } |
2591 | |
2592 | bool AMDGPUInstructionSelector::isSGPR(Register Reg) const { |
2593 | return RBI.getRegBank(Reg, *MRI, TRI)->getID() == AMDGPU::SGPRRegBankID; |
2594 | } |
2595 | |
2596 | bool AMDGPUInstructionSelector::isInstrUniform(const MachineInstr &MI) const { |
2597 | if (!MI.hasOneMemOperand()) |
2598 | return false; |
2599 | |
2600 | const MachineMemOperand *MMO = *MI.memoperands_begin(); |
2601 | const Value *Ptr = MMO->getValue(); |
2602 | |
2603 | // UndefValue means this is a load of a kernel input. These are uniform. |
2604 | // Sometimes LDS instructions have constant pointers. |
2605 | // If Ptr is null, then that means this mem operand contains a |
2606 | // PseudoSourceValue like GOT. |
2607 | if (!Ptr || isa<UndefValue>(Ptr) || isa<Argument>(Ptr) || |
2608 | isa<Constant>(Ptr) || isa<GlobalValue>(Ptr)) |
2609 | return true; |
2610 | |
2611 | if (MMO->getAddrSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT) |
2612 | return true; |
2613 | |
2614 | const Instruction *I = dyn_cast<Instruction>(Ptr); |
2615 | return I && I->getMetadata("amdgpu.uniform"); |
2616 | } |
2617 | |
2618 | bool AMDGPUInstructionSelector::hasVgprParts(ArrayRef<GEPInfo> AddrInfo) const { |
2619 | for (const GEPInfo &GEPInfo : AddrInfo) { |
2620 | if (!GEPInfo.VgprParts.empty()) |
2621 | return true; |
2622 | } |
2623 | return false; |
2624 | } |
2625 | |
2626 | void AMDGPUInstructionSelector::initM0(MachineInstr &I) const { |
2627 | const LLT PtrTy = MRI->getType(I.getOperand(1).getReg()); |
2628 | unsigned AS = PtrTy.getAddressSpace(); |
2629 | if ((AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::REGION_ADDRESS) && |
2630 | STI.ldsRequiresM0Init()) { |
2631 | MachineBasicBlock *BB = I.getParent(); |
2632 | |
2633 | // If DS instructions require M0 initialization, insert it before selecting. |
2634 | BuildMI(*BB, &I, I.getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), AMDGPU::M0) |
2635 | .addImm(-1); |
2636 | } |
2637 | } |
2638 | |
2639 | bool AMDGPUInstructionSelector::selectG_LOAD_STORE_ATOMICRMW( |
2640 | MachineInstr &I) const { |
2641 | initM0(I); |
2642 | return selectImpl(I, *CoverageInfo); |
2643 | } |
2644 | |
2645 | static bool isVCmpResult(Register Reg, MachineRegisterInfo &MRI) { |
2646 | if (Reg.isPhysical()) |
2647 | return false; |
2648 | |
2649 | MachineInstr &MI = *MRI.getUniqueVRegDef(Reg); |
2650 | const unsigned Opcode = MI.getOpcode(); |
2651 | |
2652 | if (Opcode == AMDGPU::COPY) |
2653 | return isVCmpResult(MI.getOperand(1).getReg(), MRI); |
2654 | |
2655 | if (Opcode == AMDGPU::G_AND || Opcode == AMDGPU::G_OR || |
2656 | Opcode == AMDGPU::G_XOR) |
2657 | return isVCmpResult(MI.getOperand(1).getReg(), MRI) && |
2658 | isVCmpResult(MI.getOperand(2).getReg(), MRI); |
2659 | |
2660 | if (Opcode == TargetOpcode::G_INTRINSIC) |
2661 | return MI.getIntrinsicID() == Intrinsic::amdgcn_class; |
2662 | |
2663 | return Opcode == AMDGPU::G_ICMP || Opcode == AMDGPU::G_FCMP; |
2664 | } |
2665 | |
2666 | bool AMDGPUInstructionSelector::selectG_BRCOND(MachineInstr &I) const { |
2667 | MachineBasicBlock *BB = I.getParent(); |
2668 | MachineOperand &CondOp = I.getOperand(0); |
2669 | Register CondReg = CondOp.getReg(); |
2670 | const DebugLoc &DL = I.getDebugLoc(); |
2671 | |
2672 | unsigned BrOpcode; |
2673 | Register CondPhysReg; |
2674 | const TargetRegisterClass *ConstrainRC; |
2675 | |
2676 | // In SelectionDAG, we inspect the IR block for uniformity metadata to decide |
2677 | // whether the branch is uniform when selecting the instruction. In |
2678 | // GlobalISel, we should push that decision into RegBankSelect. Assume for now |
2679 | // RegBankSelect knows what it's doing if the branch condition is scc, even |
2680 | // though it currently does not. |
2681 | if (!isVCC(CondReg, *MRI)) { |
2682 | if (MRI->getType(CondReg) != LLT::scalar(32)) |
2683 | return false; |
2684 | |
2685 | CondPhysReg = AMDGPU::SCC; |
2686 | BrOpcode = AMDGPU::S_CBRANCH_SCC1; |
2687 | ConstrainRC = &AMDGPU::SReg_32RegClass; |
2688 | } else { |
2689 | // FIXME: Should scc->vcc copies and with exec? |
2690 | |
2691 | // Unless the value of CondReg is a result of a V_CMP* instruction then we |
2692 | // need to insert an and with exec. |
2693 | if (!isVCmpResult(CondReg, *MRI)) { |
2694 | const bool Is64 = STI.isWave64(); |
2695 | const unsigned Opcode = Is64 ? AMDGPU::S_AND_B64 : AMDGPU::S_AND_B32; |
2696 | const Register Exec = Is64 ? AMDGPU::EXEC : AMDGPU::EXEC_LO; |
2697 | |
2698 | Register TmpReg = MRI->createVirtualRegister(TRI.getBoolRC()); |
2699 | BuildMI(*BB, &I, DL, TII.get(Opcode), TmpReg) |
2700 | .addReg(CondReg) |
2701 | .addReg(Exec); |
2702 | CondReg = TmpReg; |
2703 | } |
2704 | |
2705 | CondPhysReg = TRI.getVCC(); |
2706 | BrOpcode = AMDGPU::S_CBRANCH_VCCNZ; |
2707 | ConstrainRC = TRI.getBoolRC(); |
2708 | } |
2709 | |
2710 | if (!MRI->getRegClassOrNull(CondReg)) |
2711 | MRI->setRegClass(CondReg, ConstrainRC); |
2712 | |
2713 | BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), CondPhysReg) |
2714 | .addReg(CondReg); |
2715 | BuildMI(*BB, &I, DL, TII.get(BrOpcode)) |
2716 | .addMBB(I.getOperand(1).getMBB()); |
2717 | |
2718 | I.eraseFromParent(); |
2719 | return true; |
2720 | } |
2721 | |
2722 | bool AMDGPUInstructionSelector::selectG_GLOBAL_VALUE( |
2723 | MachineInstr &I) const { |
2724 | Register DstReg = I.getOperand(0).getReg(); |
2725 | const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI); |
2726 | const bool IsVGPR = DstRB->getID() == AMDGPU::VGPRRegBankID; |
2727 | I.setDesc(TII.get(IsVGPR ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32)); |
2728 | if (IsVGPR) |
2729 | I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true)); |
2730 | |
2731 | return RBI.constrainGenericRegister( |
2732 | DstReg, IsVGPR ? AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass, *MRI); |
2733 | } |
2734 | |
2735 | bool AMDGPUInstructionSelector::selectG_PTRMASK(MachineInstr &I) const { |
2736 | Register DstReg = I.getOperand(0).getReg(); |
2737 | Register SrcReg = I.getOperand(1).getReg(); |
2738 | Register MaskReg = I.getOperand(2).getReg(); |
2739 | LLT Ty = MRI->getType(DstReg); |
2740 | LLT MaskTy = MRI->getType(MaskReg); |
2741 | MachineBasicBlock *BB = I.getParent(); |
2742 | const DebugLoc &DL = I.getDebugLoc(); |
2743 | |
2744 | const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI); |
2745 | const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI); |
2746 | const RegisterBank *MaskRB = RBI.getRegBank(MaskReg, *MRI, TRI); |
2747 | const bool IsVGPR = DstRB->getID() == AMDGPU::VGPRRegBankID; |
2748 | if (DstRB != SrcRB) // Should only happen for hand written MIR. |
2749 | return false; |
2750 | |
2751 | // Try to avoid emitting a bit operation when we only need to touch half of |
2752 | // the 64-bit pointer. |
2753 | APInt MaskOnes = KnownBits->getKnownOnes(MaskReg).zext(64); |
2754 | const APInt MaskHi32 = APInt::getHighBitsSet(64, 32); |
2755 | const APInt MaskLo32 = APInt::getLowBitsSet(64, 32); |
2756 | |
2757 | const bool CanCopyLow32 = (MaskOnes & MaskLo32) == MaskLo32; |
2758 | const bool CanCopyHi32 = (MaskOnes & MaskHi32) == MaskHi32; |
2759 | |
2760 | if (!IsVGPR && Ty.getSizeInBits() == 64 && |
2761 | !CanCopyLow32 && !CanCopyHi32) { |
2762 | auto MIB = BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_AND_B64), DstReg) |
2763 | .addReg(SrcReg) |
2764 | .addReg(MaskReg); |
2765 | I.eraseFromParent(); |
2766 | return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI); |
2767 | } |
2768 | |
2769 | unsigned NewOpc = IsVGPR ? AMDGPU::V_AND_B32_e64 : AMDGPU::S_AND_B32; |
2770 | const TargetRegisterClass &RegRC |
2771 | = IsVGPR ? AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass; |
2772 | |
2773 | const TargetRegisterClass *DstRC = TRI.getRegClassForTypeOnBank(Ty, *DstRB); |
2774 | const TargetRegisterClass *SrcRC = TRI.getRegClassForTypeOnBank(Ty, *SrcRB); |
2775 | const TargetRegisterClass *MaskRC = |
2776 | TRI.getRegClassForTypeOnBank(MaskTy, *MaskRB); |
2777 | |
2778 | if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) || |
2779 | !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) || |
2780 | !RBI.constrainGenericRegister(MaskReg, *MaskRC, *MRI)) |
2781 | return false; |
2782 | |
2783 | if (Ty.getSizeInBits() == 32) { |
2784 | assert(MaskTy.getSizeInBits() == 32 &&(static_cast <bool> (MaskTy.getSizeInBits() == 32 && "ptrmask should have been narrowed during legalize") ? void ( 0) : __assert_fail ("MaskTy.getSizeInBits() == 32 && \"ptrmask should have been narrowed during legalize\"" , "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp", 2785 , __extension__ __PRETTY_FUNCTION__)) |
2785 | "ptrmask should have been narrowed during legalize")(static_cast <bool> (MaskTy.getSizeInBits() == 32 && "ptrmask should have been narrowed during legalize") ? void ( 0) : __assert_fail ("MaskTy.getSizeInBits() == 32 && \"ptrmask should have been narrowed during legalize\"" , "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp", 2785 , __extension__ __PRETTY_FUNCTION__)); |
2786 | |
2787 | BuildMI(*BB, &I, DL, TII.get(NewOpc), DstReg) |
2788 | .addReg(SrcReg) |
2789 | .addReg(MaskReg); |
2790 | I.eraseFromParent(); |
2791 | return true; |
2792 | } |
2793 | |
2794 | Register HiReg = MRI->createVirtualRegister(&RegRC); |
2795 | Register LoReg = MRI->createVirtualRegister(&RegRC); |
2796 | |
2797 | // Extract the subregisters from the source pointer. |
2798 | BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), LoReg) |
2799 | .addReg(SrcReg, 0, AMDGPU::sub0); |
2800 | BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), HiReg) |
2801 | .addReg(SrcReg, 0, AMDGPU::sub1); |
2802 | |
2803 | Register MaskedLo, MaskedHi; |
2804 | |
2805 | if (CanCopyLow32) { |
2806 | // If all the bits in the low half are 1, we only need a copy for it. |
2807 | MaskedLo = LoReg; |
2808 | } else { |
2809 | // Extract the mask subregister and apply the and. |
2810 | Register MaskLo = MRI->createVirtualRegister(&RegRC); |
2811 | MaskedLo = MRI->createVirtualRegister(&RegRC); |
2812 | |
2813 | BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), MaskLo) |
2814 | .addReg(MaskReg, 0, AMDGPU::sub0); |
2815 | BuildMI(*BB, &I, DL, TII.get(NewOpc), MaskedLo) |
2816 | .addReg(LoReg) |
2817 | .addReg(MaskLo); |
2818 | } |
2819 | |
2820 | if (CanCopyHi32) { |
2821 | // If all the bits in the high half are 1, we only need a copy for it. |
2822 | MaskedHi = HiReg; |
2823 | } else { |
2824 | Register MaskHi = MRI->createVirtualRegister(&RegRC); |
2825 | MaskedHi = MRI->createVirtualRegister(&RegRC); |
2826 | |
2827 | BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), MaskHi) |
2828 | .addReg(MaskReg, 0, AMDGPU::sub1); |
2829 | BuildMI(*BB, &I, DL, TII.get(NewOpc), MaskedHi) |
2830 | .addReg(HiReg) |
2831 | .addReg(MaskHi); |
2832 | } |
2833 | |
2834 | BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg) |
2835 | .addReg(MaskedLo) |
2836 | .addImm(AMDGPU::sub0) |
2837 | .addReg(MaskedHi) |
2838 | .addImm(AMDGPU::sub1); |
2839 | I.eraseFromParent(); |
2840 | return true; |
2841 | } |
2842 | |
2843 | /// Return the register to use for the index value, and the subregister to use |
2844 | /// for the indirectly accessed register. |
2845 | static std::pair<Register, unsigned> |
2846 | computeIndirectRegIndex(MachineRegisterInfo &MRI, const SIRegisterInfo &TRI, |
2847 | const TargetRegisterClass *SuperRC, Register IdxReg, |
2848 | unsigned EltSize, GISelKnownBits &KnownBits) { |
2849 | Register IdxBaseReg; |
2850 | int Offset; |
2851 | |
2852 | std::tie(IdxBaseReg, Offset) = |
2853 | AMDGPU::getBaseWithConstantOffset(MRI, IdxReg, &KnownBits); |
2854 | if (IdxBaseReg == AMDGPU::NoRegister) { |
2855 | // This will happen if the index is a known constant. This should ordinarily |
2856 | // be legalized out, but handle it as a register just in case. |
2857 | assert(Offset == 0)(static_cast <bool> (Offset == 0) ? void (0) : __assert_fail ("Offset == 0", "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp" , 2857, __extension__ __PRETTY_FUNCTION__)); |
2858 | IdxBaseReg = IdxReg; |
2859 | } |
2860 | |
2861 | ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(SuperRC, EltSize); |
2862 | |
2863 | // Skip out of bounds offsets, or else we would end up using an undefined |
2864 | // register. |
2865 | if (static_cast<unsigned>(Offset) >= SubRegs.size()) |
2866 | return std::pair(IdxReg, SubRegs[0]); |
2867 | return std::pair(IdxBaseReg, SubRegs[Offset]); |
2868 | } |
2869 | |
2870 | bool AMDGPUInstructionSelector::selectG_EXTRACT_VECTOR_ELT( |
2871 | MachineInstr &MI) const { |
2872 | Register DstReg = MI.getOperand(0).getReg(); |
2873 | Register SrcReg = MI.getOperand(1).getReg(); |
2874 | Register IdxReg = MI.getOperand(2).getReg(); |
2875 | |
2876 | LLT DstTy = MRI->getType(DstReg); |
2877 | LLT SrcTy = MRI->getType(SrcReg); |
2878 | |
2879 | const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI); |
2880 | const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI); |
2881 | const RegisterBank *IdxRB = RBI.getRegBank(IdxReg, *MRI, TRI); |
2882 | |
2883 | // The index must be scalar. If it wasn't RegBankSelect should have moved this |
2884 | // into a waterfall loop. |
2885 | if (IdxRB->getID() != AMDGPU::SGPRRegBankID) |
2886 | return false; |
2887 | |
2888 | const TargetRegisterClass *SrcRC = |
2889 | TRI.getRegClassForTypeOnBank(SrcTy, *SrcRB); |
2890 | const TargetRegisterClass *DstRC = |
2891 | TRI.getRegClassForTypeOnBank(DstTy, *DstRB); |
2892 | if (!SrcRC || !DstRC) |
2893 | return false; |
2894 | if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) || |
2895 | !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) || |
2896 | !RBI.constrainGenericRegister(IdxReg, AMDGPU::SReg_32RegClass, *MRI)) |
2897 | return false; |
2898 | |
2899 | MachineBasicBlock *BB = MI.getParent(); |
2900 | const DebugLoc &DL = MI.getDebugLoc(); |
2901 | const bool Is64 = DstTy.getSizeInBits() == 64; |
2902 | |
2903 | unsigned SubReg; |
2904 | std::tie(IdxReg, SubReg) = computeIndirectRegIndex( |
2905 | *MRI, TRI, SrcRC, IdxReg, DstTy.getSizeInBits() / 8, *KnownBits); |
2906 | |
2907 | if (SrcRB->getID() == AMDGPU::SGPRRegBankID) { |
2908 | if (DstTy.getSizeInBits() != 32 && !Is64) |
2909 | return false; |
2910 | |
2911 | BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0) |
2912 | .addReg(IdxReg); |
2913 | |
2914 | unsigned Opc = Is64 ? AMDGPU::S_MOVRELS_B64 : AMDGPU::S_MOVRELS_B32; |
2915 | BuildMI(*BB, &MI, DL, TII.get(Opc), DstReg) |
2916 | .addReg(SrcReg, 0, SubReg) |
2917 | .addReg(SrcReg, RegState::Implicit); |
2918 | MI.eraseFromParent(); |
2919 | return true; |
2920 | } |
2921 | |
2922 | if (SrcRB->getID() != AMDGPU::VGPRRegBankID || DstTy.getSizeInBits() != 32) |
2923 | return false; |
2924 | |
2925 | if (!STI.useVGPRIndexMode()) { |
2926 | BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0) |
2927 | .addReg(IdxReg); |
2928 | BuildMI(*BB, &MI, DL, TII.get(AMDGPU::V_MOVRELS_B32_e32), DstReg) |
2929 | .addReg(SrcReg, 0, SubReg) |
2930 | .addReg(SrcReg, RegState::Implicit); |
2931 | MI.eraseFromParent(); |
2932 | return true; |
2933 | } |
2934 | |
2935 | const MCInstrDesc &GPRIDXDesc = |
2936 | TII.getIndirectGPRIDXPseudo(TRI.getRegSizeInBits(*SrcRC), true); |
2937 | BuildMI(*BB, MI, DL, GPRIDXDesc, DstReg) |
2938 | .addReg(SrcReg) |
2939 | .addReg(IdxReg) |
2940 | .addImm(SubReg); |
2941 | |
2942 | MI.eraseFromParent(); |
2943 | return true; |
2944 | } |
2945 | |
2946 | // TODO: Fold insert_vector_elt (extract_vector_elt) into movrelsd |
2947 | bool AMDGPUInstructionSelector::selectG_INSERT_VECTOR_ELT( |
2948 | MachineInstr &MI) const { |
2949 | Register DstReg = MI.getOperand(0).getReg(); |
2950 | Register VecReg = MI.getOperand(1).getReg(); |
2951 | Register ValReg = MI.getOperand(2).getReg(); |
2952 | Register IdxReg = MI.getOperand(3).getReg(); |
2953 | |
2954 | LLT VecTy = MRI->getType(DstReg); |
2955 | LLT ValTy = MRI->getType(ValReg); |
2956 | unsigned VecSize = VecTy.getSizeInBits(); |
2957 | unsigned ValSize = ValTy.getSizeInBits(); |
2958 | |
2959 | const RegisterBank *VecRB = RBI.getRegBank(VecReg, *MRI, TRI); |
2960 | const RegisterBank *ValRB = RBI.getRegBank(ValReg, *MRI, TRI); |
2961 | const RegisterBank *IdxRB = RBI.getRegBank(IdxReg, *MRI, TRI); |
2962 | |
2963 | assert(VecTy.getElementType() == ValTy)(static_cast <bool> (VecTy.getElementType() == ValTy) ? void (0) : __assert_fail ("VecTy.getElementType() == ValTy", "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp", 2963 , __extension__ __PRETTY_FUNCTION__)); |
2964 | |
2965 | // The index must be scalar. If it wasn't RegBankSelect should have moved this |
2966 | // into a waterfall loop. |
2967 | if (IdxRB->getID() != AMDGPU::SGPRRegBankID) |
2968 | return false; |
2969 | |
2970 | const TargetRegisterClass *VecRC = |
2971 | TRI.getRegClassForTypeOnBank(VecTy, *VecRB); |
2972 | const TargetRegisterClass *ValRC = |
2973 | TRI.getRegClassForTypeOnBank(ValTy, *ValRB); |
2974 | |
2975 | if (!RBI.constrainGenericRegister(VecReg, *VecRC, *MRI) || |
2976 | !RBI.constrainGenericRegister(DstReg, *VecRC, *MRI) || |
2977 | !RBI.constrainGenericRegister(ValReg, *ValRC, *MRI) || |
2978 | !RBI.constrainGenericRegister(IdxReg, AMDGPU::SReg_32RegClass, *MRI)) |
2979 | return false; |
2980 | |
2981 | if (VecRB->getID() == AMDGPU::VGPRRegBankID && ValSize != 32) |
2982 | return false; |
2983 | |
2984 | unsigned SubReg; |
2985 | std::tie(IdxReg, SubReg) = computeIndirectRegIndex(*MRI, TRI, VecRC, IdxReg, |
2986 | ValSize / 8, *KnownBits); |
2987 | |
2988 | const bool IndexMode = VecRB->getID() == AMDGPU::VGPRRegBankID && |
2989 | STI.useVGPRIndexMode(); |
2990 | |
2991 | MachineBasicBlock *BB = MI.getParent(); |
2992 | const DebugLoc &DL = MI.getDebugLoc(); |
2993 | |
2994 | if (!IndexMode) { |
2995 | BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0) |
2996 | .addReg(IdxReg); |
2997 | |
2998 | const MCInstrDesc &RegWriteOp = TII.getIndirectRegWriteMovRelPseudo( |
2999 | VecSize, ValSize, VecRB->getID() == AMDGPU::SGPRRegBankID); |
3000 | BuildMI(*BB, MI, DL, RegWriteOp, DstReg) |
3001 | .addReg(VecReg) |
3002 | .addReg(ValReg) |
3003 | .addImm(SubReg); |
3004 | MI.eraseFromParent(); |
3005 | return true; |
3006 | } |
3007 | |
3008 | const MCInstrDesc &GPRIDXDesc = |
3009 | TII.getIndirectGPRIDXPseudo(TRI.getRegSizeInBits(*VecRC), false); |
3010 | BuildMI(*BB, MI, DL, GPRIDXDesc, DstReg) |
3011 | .addReg(VecReg) |
3012 | .addReg(ValReg) |
3013 | .addReg(IdxReg) |
3014 | .addImm(SubReg); |
3015 | |
3016 | MI.eraseFromParent(); |
3017 | return true; |
3018 | } |
3019 | |
3020 | bool AMDGPUInstructionSelector::selectBufferLoadLds(MachineInstr &MI) const { |
3021 | unsigned Opc; |
3022 | unsigned Size = MI.getOperand(3).getImm(); |
3023 | |
3024 | // The struct intrinsic variants add one additional operand over raw. |
3025 | const bool HasVIndex = MI.getNumOperands() == 9; |
3026 | Register VIndex; |
3027 | int OpOffset = 0; |
3028 | if (HasVIndex) { |
3029 | VIndex = MI.getOperand(4).getReg(); |
3030 | OpOffset = 1; |
3031 | } |
3032 | |
3033 | Register VOffset = MI.getOperand(4 + OpOffset).getReg(); |
3034 | std::optional<ValueAndVReg> MaybeVOffset = |
3035 | getIConstantVRegValWithLookThrough(VOffset, *MRI); |
3036 | const bool HasVOffset = !MaybeVOffset || MaybeVOffset->Value.getZExtValue(); |
3037 | |
3038 | switch (Size) { |
3039 | default: |
3040 | return false; |
3041 | case 1: |
3042 | Opc = HasVIndex ? HasVOffset ? AMDGPU::BUFFER_LOAD_UBYTE_LDS_BOTHEN |
3043 | : AMDGPU::BUFFER_LOAD_UBYTE_LDS_IDXEN |
3044 | : HasVOffset ? AMDGPU::BUFFER_LOAD_UBYTE_LDS_OFFEN |
3045 | : AMDGPU::BUFFER_LOAD_UBYTE_LDS_OFFSET; |
3046 | break; |
3047 | case 2: |
3048 | Opc = HasVIndex ? HasVOffset ? AMDGPU::BUFFER_LOAD_USHORT_LDS_BOTHEN |
3049 | : AMDGPU::BUFFER_LOAD_USHORT_LDS_IDXEN |
3050 | : HasVOffset ? AMDGPU::BUFFER_LOAD_USHORT_LDS_OFFEN |
3051 | : AMDGPU::BUFFER_LOAD_USHORT_LDS_OFFSET; |
3052 | break; |
3053 | case 4: |
3054 | Opc = HasVIndex ? HasVOffset ? AMDGPU::BUFFER_LOAD_DWORD_LDS_BOTHEN |
3055 | : AMDGPU::BUFFER_LOAD_DWORD_LDS_IDXEN |
3056 | : HasVOffset ? AMDGPU::BUFFER_LOAD_DWORD_LDS_OFFEN |
3057 | : AMDGPU::BUFFER_LOAD_DWORD_LDS_OFFSET; |
3058 | break; |
3059 | } |
3060 | |
3061 | MachineBasicBlock *MBB = MI.getParent(); |
3062 | const DebugLoc &DL = MI.getDebugLoc(); |
3063 | BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0) |
3064 | .add(MI.getOperand(2)); |
3065 | |
3066 | auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc)); |
3067 | |
3068 | if (HasVIndex && HasVOffset) { |
3069 | Register IdxReg = MRI->createVirtualRegister(TRI.getVGPR64Class()); |
3070 | BuildMI(*MBB, &*MIB, DL, TII.get(AMDGPU::REG_SEQUENCE), IdxReg) |
3071 | .addReg(VIndex) |
3072 | .addImm(AMDGPU::sub0) |
3073 | .addReg(VOffset) |
3074 | .addImm(AMDGPU::sub1); |
3075 | |
3076 | MIB.addReg(IdxReg); |
3077 | } else if (HasVIndex) { |
3078 | MIB.addReg(VIndex); |
3079 | } else if (HasVOffset) { |
3080 | MIB.addReg(VOffset); |
3081 | } |
3082 | |
3083 | MIB.add(MI.getOperand(1)); // rsrc |
3084 | MIB.add(MI.getOperand(5 + OpOffset)); // soffset |
3085 | MIB.add(MI.getOperand(6 + OpOffset)); // imm offset |
3086 | unsigned Aux = MI.getOperand(7 + OpOffset).getImm(); |
3087 | MIB.addImm(Aux & AMDGPU::CPol::ALL); // cpol |
3088 | MIB.addImm((Aux >> 3) & 1); // swz |
3089 | |
3090 | MachineMemOperand *LoadMMO = *MI.memoperands_begin(); |
3091 | MachinePointerInfo LoadPtrI = LoadMMO->getPointerInfo(); |
3092 | LoadPtrI.Offset = MI.getOperand(6 + OpOffset).getImm(); |
3093 | MachinePointerInfo StorePtrI = LoadPtrI; |
3094 | StorePtrI.V = nullptr; |
3095 | StorePtrI.AddrSpace = AMDGPUAS::LOCAL_ADDRESS; |
3096 | |
3097 | auto F = LoadMMO->getFlags() & |
3098 | ~(MachineMemOperand::MOStore | MachineMemOperand::MOLoad); |
3099 | LoadMMO = MF->getMachineMemOperand(LoadPtrI, F | MachineMemOperand::MOLoad, |
3100 | Size, LoadMMO->getBaseAlign()); |
3101 | |
3102 | MachineMemOperand *StoreMMO = |
3103 | MF->getMachineMemOperand(StorePtrI, F | MachineMemOperand::MOStore, |
3104 | sizeof(int32_t), LoadMMO->getBaseAlign()); |
3105 | |
3106 | MIB.setMemRefs({LoadMMO, StoreMMO}); |
3107 | |
3108 | MI.eraseFromParent(); |
3109 | return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI); |
3110 | } |
3111 | |
3112 | /// Match a zero extend from a 32-bit value to 64-bits. |
3113 | static Register matchZeroExtendFromS32(MachineRegisterInfo &MRI, Register Reg) { |
3114 | Register ZExtSrc; |
3115 | if (mi_match(Reg, MRI, m_GZExt(m_Reg(ZExtSrc)))) |
3116 | return MRI.getType(ZExtSrc) == LLT::scalar(32) ? ZExtSrc : Register(); |
3117 | |
3118 | // Match legalized form %zext = G_MERGE_VALUES (s32 %x), (s32 0) |
3119 | const MachineInstr *Def = getDefIgnoringCopies(Reg, MRI); |
3120 | if (Def->getOpcode() != AMDGPU::G_MERGE_VALUES) |
3121 | return Register(); |
3122 | |
3123 | assert(Def->getNumOperands() == 3 &&(static_cast <bool> (Def->getNumOperands() == 3 && MRI.getType(Def->getOperand(0).getReg()) == LLT::scalar(64 )) ? void (0) : __assert_fail ("Def->getNumOperands() == 3 && MRI.getType(Def->getOperand(0).getReg()) == LLT::scalar(64)" , "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp", 3124 , __extension__ __PRETTY_FUNCTION__)) |
3124 | MRI.getType(Def->getOperand(0).getReg()) == LLT::scalar(64))(static_cast <bool> (Def->getNumOperands() == 3 && MRI.getType(Def->getOperand(0).getReg()) == LLT::scalar(64 )) ? void (0) : __assert_fail ("Def->getNumOperands() == 3 && MRI.getType(Def->getOperand(0).getReg()) == LLT::scalar(64)" , "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp", 3124 , __extension__ __PRETTY_FUNCTION__)); |
3125 | if (mi_match(Def->getOperand(2).getReg(), MRI, m_ZeroInt())) { |
3126 | return Def->getOperand(1).getReg(); |
3127 | } |
3128 | |
3129 | return Register(); |
3130 | } |
3131 | |
3132 | bool AMDGPUInstructionSelector::selectGlobalLoadLds(MachineInstr &MI) const{ |
3133 | unsigned Opc; |
3134 | unsigned Size = MI.getOperand(3).getImm(); |
3135 | |
3136 | switch (Size) { |
3137 | default: |
3138 | return false; |
3139 | case 1: |
3140 | Opc = AMDGPU::GLOBAL_LOAD_LDS_UBYTE; |
3141 | break; |
3142 | case 2: |
3143 | Opc = AMDGPU::GLOBAL_LOAD_LDS_USHORT; |
3144 | break; |
3145 | case 4: |
3146 | Opc = AMDGPU::GLOBAL_LOAD_LDS_DWORD; |
3147 | break; |
3148 | } |
3149 | |
3150 | MachineBasicBlock *MBB = MI.getParent(); |
3151 | const DebugLoc &DL = MI.getDebugLoc(); |
3152 | BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0) |
3153 | .add(MI.getOperand(2)); |
3154 | |
3155 | Register Addr = MI.getOperand(1).getReg(); |
3156 | Register VOffset; |
3157 | // Try to split SAddr and VOffset. Global and LDS pointers share the same |
3158 | // immediate offset, so we cannot use a regular SelectGlobalSAddr(). |
3159 | if (!isSGPR(Addr)) { |
3160 | auto AddrDef = getDefSrcRegIgnoringCopies(Addr, *MRI); |
3161 | if (isSGPR(AddrDef->Reg)) { |
3162 | Addr = AddrDef->Reg; |
3163 | } else if (AddrDef->MI->getOpcode() == AMDGPU::G_PTR_ADD) { |
3164 | Register SAddr = |
3165 | getSrcRegIgnoringCopies(AddrDef->MI->getOperand(1).getReg(), *MRI); |
3166 | if (isSGPR(SAddr)) { |
3167 | Register PtrBaseOffset = AddrDef->MI->getOperand(2).getReg(); |
3168 | if (Register Off = matchZeroExtendFromS32(*MRI, PtrBaseOffset)) { |
3169 | Addr = SAddr; |
3170 | VOffset = Off; |
3171 | } |
3172 | } |
3173 | } |
3174 | } |
3175 | |
3176 | if (isSGPR(Addr)) { |
3177 | Opc = AMDGPU::getGlobalSaddrOp(Opc); |
3178 | if (!VOffset) { |
3179 | VOffset = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass); |
3180 | BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_MOV_B32_e32), VOffset) |
3181 | .addImm(0); |
3182 | } |
3183 | } |
3184 | |
3185 | auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc)) |
3186 | .addReg(Addr); |
3187 | |
3188 | if (isSGPR(Addr)) |
3189 | MIB.addReg(VOffset); |
3190 | |
3191 | MIB.add(MI.getOperand(4)) // offset |
3192 | .add(MI.getOperand(5)); // cpol |
3193 | |
3194 | MachineMemOperand *LoadMMO = *MI.memoperands_begin(); |
3195 | MachinePointerInfo LoadPtrI = LoadMMO->getPointerInfo(); |
3196 | LoadPtrI.Offset = MI.getOperand(4).getImm(); |
3197 | MachinePointerInfo StorePtrI = LoadPtrI; |
3198 | LoadPtrI.AddrSpace = AMDGPUAS::GLOBAL_ADDRESS; |
3199 | StorePtrI.AddrSpace = AMDGPUAS::LOCAL_ADDRESS; |
3200 | auto F = LoadMMO->getFlags() & |
3201 | ~(MachineMemOperand::MOStore | MachineMemOperand::MOLoad); |
3202 | LoadMMO = MF->getMachineMemOperand(LoadPtrI, F | MachineMemOperand::MOLoad, |
3203 | Size, LoadMMO->getBaseAlign()); |
3204 | MachineMemOperand *StoreMMO = |
3205 | MF->getMachineMemOperand(StorePtrI, F | MachineMemOperand::MOStore, |
3206 | sizeof(int32_t), Align(4)); |
3207 | |
3208 | MIB.setMemRefs({LoadMMO, StoreMMO}); |
3209 | |
3210 | MI.eraseFromParent(); |
3211 | return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI); |
3212 | } |
3213 | |
3214 | bool AMDGPUInstructionSelector::selectBVHIntrinsic(MachineInstr &MI) const{ |
3215 | MI.setDesc(TII.get(MI.getOperand(1).getImm())); |
3216 | MI.removeOperand(1); |
3217 | MI.addImplicitDefUseOperands(*MI.getParent()->getParent()); |
3218 | return true; |
3219 | } |
3220 | |
3221 | bool AMDGPUInstructionSelector::selectSMFMACIntrin(MachineInstr &MI) const { |
3222 | unsigned Opc; |
3223 | switch (MI.getIntrinsicID()) { |
3224 | case Intrinsic::amdgcn_smfmac_f32_16x16x32_f16: |
3225 | Opc = AMDGPU::V_SMFMAC_F32_16X16X32_F16_e64; |
3226 | break; |
3227 | case Intrinsic::amdgcn_smfmac_f32_32x32x16_f16: |
3228 | Opc = AMDGPU::V_SMFMAC_F32_32X32X16_F16_e64; |
3229 | break; |
3230 | case Intrinsic::amdgcn_smfmac_f32_16x16x32_bf16: |
3231 | Opc = AMDGPU::V_SMFMAC_F32_16X16X32_BF16_e64; |
3232 | break; |
3233 | case Intrinsic::amdgcn_smfmac_f32_32x32x16_bf16: |
3234 | Opc = AMDGPU::V_SMFMAC_F32_32X32X16_BF16_e64; |
3235 | break; |
3236 | case Intrinsic::amdgcn_smfmac_i32_16x16x64_i8: |
3237 | Opc = AMDGPU::V_SMFMAC_I32_16X16X64_I8_e64; |
3238 | break; |
3239 | case Intrinsic::amdgcn_smfmac_i32_32x32x32_i8: |
3240 | Opc = AMDGPU::V_SMFMAC_I32_32X32X32_I8_e64; |
3241 | break; |
3242 | case Intrinsic::amdgcn_smfmac_f32_16x16x64_bf8_bf8: |
3243 | Opc = AMDGPU::V_SMFMAC_F32_16X16X64_BF8_BF8_e64; |
3244 | break; |
3245 | case Intrinsic::amdgcn_smfmac_f32_16x16x64_bf8_fp8: |
3246 | Opc = AMDGPU::V_SMFMAC_F32_16X16X64_BF8_FP8_e64; |
3247 | break; |
3248 | case Intrinsic::amdgcn_smfmac_f32_16x16x64_fp8_bf8: |
3249 | Opc = AMDGPU::V_SMFMAC_F32_16X16X64_FP8_BF8_e64; |
3250 | break; |
3251 | case Intrinsic::amdgcn_smfmac_f32_16x16x64_fp8_fp8: |
3252 | Opc = AMDGPU::V_SMFMAC_F32_16X16X64_FP8_FP8_e64; |
3253 | break; |
3254 | case Intrinsic::amdgcn_smfmac_f32_32x32x32_bf8_bf8: |
3255 | Opc = AMDGPU::V_SMFMAC_F32_32X32X32_BF8_BF8_e64; |
3256 | break; |
3257 | case Intrinsic::amdgcn_smfmac_f32_32x32x32_bf8_fp8: |
3258 | Opc = AMDGPU::V_SMFMAC_F32_32X32X32_BF8_FP8_e64; |
3259 | break; |
3260 | case Intrinsic::amdgcn_smfmac_f32_32x32x32_fp8_bf8: |
3261 | Opc = AMDGPU::V_SMFMAC_F32_32X32X32_FP8_BF8_e64; |
3262 | break; |
3263 | case Intrinsic::amdgcn_smfmac_f32_32x32x32_fp8_fp8: |
3264 | Opc = AMDGPU::V_SMFMAC_F32_32X32X32_FP8_FP8_e64; |
3265 | break; |
3266 | default: |
3267 | llvm_unreachable("unhandled smfmac intrinsic")::llvm::llvm_unreachable_internal("unhandled smfmac intrinsic" , "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp", 3267 ); |
3268 | } |
3269 | |
3270 | auto VDst_In = MI.getOperand(4); |
3271 | |
3272 | MI.setDesc(TII.get(Opc)); |
3273 | MI.removeOperand(4); // VDst_In |
3274 | MI.removeOperand(1); // Intrinsic ID |
3275 | MI.addOperand(VDst_In); // Readd VDst_In to the end |
3276 | MI.addImplicitDefUseOperands(*MI.getParent()->getParent()); |
3277 | return true; |
3278 | } |
3279 | |
3280 | bool AMDGPUInstructionSelector::selectWaveAddress(MachineInstr &MI) const { |
3281 | Register DstReg = MI.getOperand(0).getReg(); |
3282 | Register SrcReg = MI.getOperand(1).getReg(); |
3283 | const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI); |
3284 | const bool IsVALU = DstRB->getID() == AMDGPU::VGPRRegBankID; |
3285 | MachineBasicBlock *MBB = MI.getParent(); |
3286 | const DebugLoc &DL = MI.getDebugLoc(); |
3287 | |
3288 | if (IsVALU) { |
3289 | BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_LSHRREV_B32_e64), DstReg) |
3290 | .addImm(Subtarget->getWavefrontSizeLog2()) |
3291 | .addReg(SrcReg); |
3292 | } else { |
3293 | BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_LSHR_B32), DstReg) |
3294 | .addReg(SrcReg) |
3295 | .addImm(Subtarget->getWavefrontSizeLog2()); |
3296 | } |
3297 | |
3298 | const TargetRegisterClass &RC = |
3299 | IsVALU ? AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass; |
3300 | if (!RBI.constrainGenericRegister(DstReg, RC, *MRI)) |
3301 | return false; |
3302 | |
3303 | MI.eraseFromParent(); |
3304 | return true; |
3305 | } |
3306 | |
3307 | bool AMDGPUInstructionSelector::select(MachineInstr &I) { |
3308 | if (I.isPHI()) |
3309 | return selectPHI(I); |
3310 | |
3311 | if (!I.isPreISelOpcode()) { |
3312 | if (I.isCopy()) |
3313 | return selectCOPY(I); |
3314 | return true; |
3315 | } |
3316 | |
3317 | switch (I.getOpcode()) { |
3318 | case TargetOpcode::G_AND: |
3319 | case TargetOpcode::G_OR: |
3320 | case TargetOpcode::G_XOR: |
3321 | if (selectImpl(I, *CoverageInfo)) |
3322 | return true; |
3323 | return selectG_AND_OR_XOR(I); |
3324 | case TargetOpcode::G_ADD: |
3325 | case TargetOpcode::G_SUB: |
3326 | if (selectImpl(I, *CoverageInfo)) |
3327 | return true; |
3328 | return selectG_ADD_SUB(I); |
3329 | case TargetOpcode::G_UADDO: |
3330 | case TargetOpcode::G_USUBO: |
3331 | case TargetOpcode::G_UADDE: |
3332 | case TargetOpcode::G_USUBE: |
3333 | return selectG_UADDO_USUBO_UADDE_USUBE(I); |
3334 | case AMDGPU::G_AMDGPU_MAD_U64_U32: |
3335 | case AMDGPU::G_AMDGPU_MAD_I64_I32: |
3336 | return selectG_AMDGPU_MAD_64_32(I); |
3337 | case TargetOpcode::G_INTTOPTR: |
3338 | case TargetOpcode::G_BITCAST: |
3339 | case TargetOpcode::G_PTRTOINT: |
3340 | return selectCOPY(I); |
3341 | case TargetOpcode::G_CONSTANT: |
3342 | case TargetOpcode::G_FCONSTANT: |
3343 | return selectG_CONSTANT(I); |
3344 | case TargetOpcode::G_FNEG: |
3345 | if (selectImpl(I, *CoverageInfo)) |
3346 | return true; |
3347 | return selectG_FNEG(I); |
3348 | case TargetOpcode::G_FABS: |
3349 | if (selectImpl(I, *CoverageInfo)) |
3350 | return true; |
3351 | return selectG_FABS(I); |
3352 | case TargetOpcode::G_EXTRACT: |
3353 | return selectG_EXTRACT(I); |
3354 | case TargetOpcode::G_MERGE_VALUES: |
3355 | case TargetOpcode::G_CONCAT_VECTORS: |
3356 | return selectG_MERGE_VALUES(I); |
3357 | case TargetOpcode::G_UNMERGE_VALUES: |
3358 | return selectG_UNMERGE_VALUES(I); |
3359 | case TargetOpcode::G_BUILD_VECTOR: |
3360 | case TargetOpcode::G_BUILD_VECTOR_TRUNC: |
3361 | return selectG_BUILD_VECTOR(I); |
3362 | case TargetOpcode::G_PTR_ADD: |
3363 | if (selectImpl(I, *CoverageInfo)) |
3364 | return true; |
3365 | return selectG_PTR_ADD(I); |
3366 | case TargetOpcode::G_IMPLICIT_DEF: |
3367 | return selectG_IMPLICIT_DEF(I); |
3368 | case TargetOpcode::G_FREEZE: |
3369 | return selectCOPY(I); |
3370 | case TargetOpcode::G_INSERT: |
3371 | return selectG_INSERT(I); |
3372 | case TargetOpcode::G_INTRINSIC: |
3373 | return selectG_INTRINSIC(I); |
3374 | case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS: |
3375 | return selectG_INTRINSIC_W_SIDE_EFFECTS(I); |
3376 | case TargetOpcode::G_ICMP: |
3377 | if (selectG_ICMP(I)) |
3378 | return true; |
3379 | return selectImpl(I, *CoverageInfo); |
3380 | case TargetOpcode::G_LOAD: |
3381 | case TargetOpcode::G_STORE: |
3382 | case TargetOpcode::G_ATOMIC_CMPXCHG: |
3383 | case TargetOpcode::G_ATOMICRMW_XCHG: |
3384 | case TargetOpcode::G_ATOMICRMW_ADD: |
3385 | case TargetOpcode::G_ATOMICRMW_SUB: |
3386 | case TargetOpcode::G_ATOMICRMW_AND: |
3387 | case TargetOpcode::G_ATOMICRMW_OR: |
3388 | case TargetOpcode::G_ATOMICRMW_XOR: |
3389 | case TargetOpcode::G_ATOMICRMW_MIN: |
3390 | case TargetOpcode::G_ATOMICRMW_MAX: |
3391 | case TargetOpcode::G_ATOMICRMW_UMIN: |
3392 | case TargetOpcode::G_ATOMICRMW_UMAX: |
3393 | case TargetOpcode::G_ATOMICRMW_UINC_WRAP: |
3394 | case TargetOpcode::G_ATOMICRMW_UDEC_WRAP: |
3395 | case TargetOpcode::G_ATOMICRMW_FADD: |
3396 | case AMDGPU::G_AMDGPU_ATOMIC_FMIN: |
3397 | case AMDGPU::G_AMDGPU_ATOMIC_FMAX: |
3398 | return selectG_LOAD_STORE_ATOMICRMW(I); |
3399 | case TargetOpcode::G_SELECT: |
3400 | return selectG_SELECT(I); |
3401 | case TargetOpcode::G_TRUNC: |
3402 | return selectG_TRUNC(I); |
3403 | case TargetOpcode::G_SEXT: |
3404 | case TargetOpcode::G_ZEXT: |
3405 | case TargetOpcode::G_ANYEXT: |
3406 | case TargetOpcode::G_SEXT_INREG: |
3407 | if (selectImpl(I, *CoverageInfo)) |
3408 | return true; |
3409 | return selectG_SZA_EXT(I); |
3410 | case TargetOpcode::G_BRCOND: |
3411 | return selectG_BRCOND(I); |
3412 | case TargetOpcode::G_GLOBAL_VALUE: |
3413 | return selectG_GLOBAL_VALUE(I); |
3414 | case TargetOpcode::G_PTRMASK: |
3415 | return selectG_PTRMASK(I); |
3416 | case TargetOpcode::G_EXTRACT_VECTOR_ELT: |
3417 | return selectG_EXTRACT_VECTOR_ELT(I); |
3418 | case TargetOpcode::G_INSERT_VECTOR_ELT: |
3419 | return selectG_INSERT_VECTOR_ELT(I); |
3420 | case AMDGPU::G_AMDGPU_INTRIN_IMAGE_LOAD: |
3421 | case AMDGPU::G_AMDGPU_INTRIN_IMAGE_LOAD_D16: |
3422 | case AMDGPU::G_AMDGPU_INTRIN_IMAGE_STORE: |
3423 | case AMDGPU::G_AMDGPU_INTRIN_IMAGE_STORE_D16: { |
3424 | const AMDGPU::ImageDimIntrinsicInfo *Intr |
3425 | = AMDGPU::getImageDimIntrinsicInfo(I.getIntrinsicID()); |
3426 | assert(Intr && "not an image intrinsic with image pseudo")(static_cast <bool> (Intr && "not an image intrinsic with image pseudo" ) ? void (0) : __assert_fail ("Intr && \"not an image intrinsic with image pseudo\"" , "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp", 3426 , __extension__ __PRETTY_FUNCTION__)); |
3427 | return selectImageIntrinsic(I, Intr); |
3428 | } |
3429 | case AMDGPU::G_AMDGPU_INTRIN_BVH_INTERSECT_RAY: |
3430 | return selectBVHIntrinsic(I); |
3431 | case AMDGPU::G_SBFX: |
3432 | case AMDGPU::G_UBFX: |
3433 | return selectG_SBFX_UBFX(I); |
3434 | case AMDGPU::G_SI_CALL: |
3435 | I.setDesc(TII.get(AMDGPU::SI_CALL)); |
3436 | return true; |
3437 | case AMDGPU::G_AMDGPU_WAVE_ADDRESS: |
3438 | return selectWaveAddress(I); |
3439 | default: |
3440 | return selectImpl(I, *CoverageInfo); |
3441 | } |
3442 | return false; |
3443 | } |
3444 | |
3445 | InstructionSelector::ComplexRendererFns |
3446 | AMDGPUInstructionSelector::selectVCSRC(MachineOperand &Root) const { |
3447 | return {{ |
3448 | [=](MachineInstrBuilder &MIB) { MIB.add(Root); } |
3449 | }}; |
3450 | |
3451 | } |
3452 | |
3453 | std::pair<Register, unsigned> AMDGPUInstructionSelector::selectVOP3ModsImpl( |
3454 | MachineOperand &Root, bool AllowAbs, bool OpSel) const { |
3455 | Register Src = Root.getReg(); |
3456 | unsigned Mods = 0; |
3457 | MachineInstr *MI = getDefIgnoringCopies(Src, *MRI); |
3458 | |
3459 | if (MI->getOpcode() == AMDGPU::G_FNEG) { |
3460 | Src = MI->getOperand(1).getReg(); |
3461 | Mods |= SISrcMods::NEG; |
3462 | MI = getDefIgnoringCopies(Src, *MRI); |
3463 | } |
3464 | |
3465 | if (AllowAbs && MI->getOpcode() == AMDGPU::G_FABS) { |
3466 | Src = MI->getOperand(1).getReg(); |
3467 | Mods |= SISrcMods::ABS; |
3468 | } |
3469 | |
3470 | if (OpSel) |
3471 | Mods |= SISrcMods::OP_SEL_0; |
3472 | |
3473 | return std::pair(Src, Mods); |
3474 | } |
3475 | |
3476 | Register AMDGPUInstructionSelector::copyToVGPRIfSrcFolded( |
3477 | Register Src, unsigned Mods, MachineOperand Root, MachineInstr *InsertPt, |
3478 | bool ForceVGPR) const { |
3479 | if ((Mods != 0 || ForceVGPR) && |
3480 | RBI.getRegBank(Src, *MRI, TRI)->getID() != AMDGPU::VGPRRegBankID) { |
3481 | |
3482 | // If we looked through copies to find source modifiers on an SGPR operand, |
3483 | // we now have an SGPR register source. To avoid potentially violating the |
3484 | // constant bus restriction, we need to insert a copy to a VGPR. |
3485 | Register VGPRSrc = MRI->cloneVirtualRegister(Root.getReg()); |
3486 | BuildMI(*InsertPt->getParent(), InsertPt, InsertPt->getDebugLoc(), |
3487 | TII.get(AMDGPU::COPY), VGPRSrc) |
3488 | .addReg(Src); |
3489 | Src = VGPRSrc; |
3490 | } |
3491 | |
3492 | return Src; |
3493 | } |
3494 | |
3495 | /// |
3496 | /// This will select either an SGPR or VGPR operand and will save us from |
3497 | /// having to write an extra tablegen pattern. |
3498 | InstructionSelector::ComplexRendererFns |
3499 | AMDGPUInstructionSelector::selectVSRC0(MachineOperand &Root) const { |
3500 | return {{ |
3501 | [=](MachineInstrBuilder &MIB) { MIB.add(Root); } |
3502 | }}; |
3503 | } |
3504 | |
3505 | InstructionSelector::ComplexRendererFns |
3506 | AMDGPUInstructionSelector::selectVOP3Mods0(MachineOperand &Root) const { |
3507 | Register Src; |
3508 | unsigned Mods; |
3509 | std::tie(Src, Mods) = selectVOP3ModsImpl(Root); |
3510 | |
3511 | return {{ |
3512 | [=](MachineInstrBuilder &MIB) { |
3513 | MIB.addReg(copyToVGPRIfSrcFolded(Src, Mods, Root, MIB)); |
3514 | }, |
3515 | [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }, // src0_mods |
3516 | [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, // clamp |
3517 | [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // omod |
3518 | }}; |
3519 | } |
3520 | |
3521 | InstructionSelector::ComplexRendererFns |
3522 | AMDGPUInstructionSelector::selectVOP3BMods0(MachineOperand &Root) const { |
3523 | Register Src; |
3524 | unsigned Mods; |
3525 | std::tie(Src, Mods) = selectVOP3ModsImpl(Root, /* AllowAbs */ false); |
3526 | |
3527 | return {{ |
3528 | [=](MachineInstrBuilder &MIB) { |
3529 | MIB.addReg(copyToVGPRIfSrcFolded(Src, Mods, Root, MIB)); |
3530 | }, |
3531 | [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }, // src0_mods |
3532 | [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, // clamp |
3533 | [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // omod |
3534 | }}; |
3535 | } |
3536 | |
3537 | InstructionSelector::ComplexRendererFns |
3538 | AMDGPUInstructionSelector::selectVOP3OMods(MachineOperand &Root) const { |
3539 | return {{ |
3540 | [=](MachineInstrBuilder &MIB) { MIB.add(Root); }, |
3541 | [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, // clamp |
3542 | [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // omod |
3543 | }}; |
3544 | } |
3545 | |
3546 | InstructionSelector::ComplexRendererFns |
3547 | AMDGPUInstructionSelector::selectVOP3Mods(MachineOperand &Root) const { |
3548 | Register Src; |
3549 | unsigned Mods; |
3550 | std::tie(Src, Mods) = selectVOP3ModsImpl(Root); |
3551 | |
3552 | return {{ |
3553 | [=](MachineInstrBuilder &MIB) { |
3554 | MIB.addReg(copyToVGPRIfSrcFolded(Src, Mods, Root, MIB)); |
3555 | }, |
3556 | [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); } // src_mods |
3557 | }}; |
3558 | } |
3559 | |
3560 | InstructionSelector::ComplexRendererFns |
3561 | AMDGPUInstructionSelector::selectVOP3BMods(MachineOperand &Root) const { |
3562 | Register Src; |
3563 | unsigned Mods; |
3564 | std::tie(Src, Mods) = selectVOP3ModsImpl(Root, /* AllowAbs */ false); |
3565 | |
3566 | return {{ |
3567 | [=](MachineInstrBuilder &MIB) { |
3568 | MIB.addReg(copyToVGPRIfSrcFolded(Src, Mods, Root, MIB)); |
3569 | }, |
3570 | [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); } // src_mods |
3571 | }}; |
3572 | } |
3573 | |
3574 | InstructionSelector::ComplexRendererFns |
3575 | AMDGPUInstructionSelector::selectVOP3NoMods(MachineOperand &Root) const { |
3576 | Register Reg = Root.getReg(); |
3577 | const MachineInstr *Def = getDefIgnoringCopies(Reg, *MRI); |
3578 | if (Def->getOpcode() == AMDGPU::G_FNEG || Def->getOpcode() == AMDGPU::G_FABS) |
3579 | return {}; |
3580 | return {{ |
3581 | [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); }, |
3582 | }}; |
3583 | } |
3584 | |
3585 | std::pair<Register, unsigned> |
3586 | AMDGPUInstructionSelector::selectVOP3PModsImpl( |
3587 | Register Src, const MachineRegisterInfo &MRI, bool IsDOT) const { |
3588 | unsigned Mods = 0; |
3589 | MachineInstr *MI = MRI.getVRegDef(Src); |
3590 | |
3591 | if (MI && MI->getOpcode() == AMDGPU::G_FNEG && |
3592 | // It's possible to see an f32 fneg here, but unlikely. |
3593 | // TODO: Treat f32 fneg as only high bit. |
3594 | MRI.getType(Src) == LLT::fixed_vector(2, 16)) { |
3595 | Mods ^= (SISrcMods::NEG | SISrcMods::NEG_HI); |
3596 | Src = MI->getOperand(1).getReg(); |
3597 | MI = MRI.getVRegDef(Src); |
Value stored to 'MI' is never read | |
3598 | } |
3599 | |
3600 | // TODO: Match op_sel through g_build_vector_trunc and g_shuffle_vector. |
3601 | (void)IsDOT; // DOTs do not use OPSEL on gfx940+, check ST.hasDOTOpSelHazard() |
3602 | |
3603 | // Packed instructions do not have abs modifiers. |
3604 | Mods |= SISrcMods::OP_SEL_1; |
3605 | |
3606 | return std::pair(Src, Mods); |
3607 | } |
3608 | |
3609 | InstructionSelector::ComplexRendererFns |
3610 | AMDGPUInstructionSelector::selectVOP3PMods(MachineOperand &Root) const { |
3611 | MachineRegisterInfo &MRI |
3612 | = Root.getParent()->getParent()->getParent()->getRegInfo(); |
3613 | |
3614 | Register Src; |
3615 | unsigned Mods; |
3616 | std::tie(Src, Mods) = selectVOP3PModsImpl(Root.getReg(), MRI); |
3617 | |
3618 | return {{ |
3619 | [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); }, |
3620 | [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); } // src_mods |
3621 | }}; |
3622 | } |
3623 | |
3624 | InstructionSelector::ComplexRendererFns |
3625 | AMDGPUInstructionSelector::selectVOP3PModsDOT(MachineOperand &Root) const { |
3626 | MachineRegisterInfo &MRI |
3627 | = Root.getParent()->getParent()->getParent()->getRegInfo(); |
3628 | |
3629 | Register Src; |
3630 | unsigned Mods; |
3631 | std::tie(Src, Mods) = selectVOP3PModsImpl(Root.getReg(), MRI, true); |
3632 | |
3633 | return {{ |
3634 | [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); }, |
3635 | [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); } // src_mods |
3636 | }}; |
3637 | } |
3638 | |
3639 | InstructionSelector::ComplexRendererFns |
3640 | AMDGPUInstructionSelector::selectDotIUVOP3PMods(MachineOperand &Root) const { |
3641 | // Literal i1 value set in intrinsic, represents SrcMods for the next operand. |
3642 | // Value is in Imm operand as i1 sign extended to int64_t. |
3643 | // 1(-1) promotes packed values to signed, 0 treats them as unsigned. |
3644 | assert((Root.isImm() && (Root.getImm() == -1 || Root.getImm() == 0)) &&(static_cast <bool> ((Root.isImm() && (Root.getImm () == -1 || Root.getImm() == 0)) && "expected i1 value" ) ? void (0) : __assert_fail ("(Root.isImm() && (Root.getImm() == -1 || Root.getImm() == 0)) && \"expected i1 value\"" , "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp", 3645 , __extension__ __PRETTY_FUNCTION__)) |
3645 | "expected i1 value")(static_cast <bool> ((Root.isImm() && (Root.getImm () == -1 || Root.getImm() == 0)) && "expected i1 value" ) ? void (0) : __assert_fail ("(Root.isImm() && (Root.getImm() == -1 || Root.getImm() == 0)) && \"expected i1 value\"" , "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp", 3645 , __extension__ __PRETTY_FUNCTION__)); |
3646 | unsigned Mods = SISrcMods::OP_SEL_1; |
3647 | if (Root.getImm() == -1) |
3648 | Mods ^= SISrcMods::NEG; |
3649 | return {{ |
3650 | [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); } // src_mods |
3651 | }}; |
3652 | } |
3653 | |
3654 | InstructionSelector::ComplexRendererFns |
3655 | AMDGPUInstructionSelector::selectWMMAOpSelVOP3PMods( |
3656 | MachineOperand &Root) const { |
3657 | assert((Root.isImm() && (Root.getImm() == -1 || Root.getImm() == 0)) &&(static_cast <bool> ((Root.isImm() && (Root.getImm () == -1 || Root.getImm() == 0)) && "expected i1 value" ) ? void (0) : __assert_fail ("(Root.isImm() && (Root.getImm() == -1 || Root.getImm() == 0)) && \"expected i1 value\"" , "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp", 3658 , __extension__ __PRETTY_FUNCTION__)) |
3658 | "expected i1 value")(static_cast <bool> ((Root.isImm() && (Root.getImm () == -1 || Root.getImm() == 0)) && "expected i1 value" ) ? void (0) : __assert_fail ("(Root.isImm() && (Root.getImm() == -1 || Root.getImm() == 0)) && \"expected i1 value\"" , "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp", 3658 , __extension__ __PRETTY_FUNCTION__)); |
3659 | unsigned Mods = SISrcMods::OP_SEL_1; |
3660 | if (Root.getImm() != 0) |
3661 | Mods |= SISrcMods::OP_SEL_0; |
3662 | |
3663 | return {{ |
3664 | [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); } // src_mods |
3665 | }}; |
3666 | } |
3667 | |
3668 | InstructionSelector::ComplexRendererFns |
3669 | AMDGPUInstructionSelector::selectVOP3OpSelMods(MachineOperand &Root) const { |
3670 | Register Src; |
3671 | unsigned Mods; |
3672 | std::tie(Src, Mods) = selectVOP3ModsImpl(Root); |
3673 | |
3674 | // FIXME: Handle op_sel |
3675 | return {{ |
3676 | [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); }, |
3677 | [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); } // src_mods |
3678 | }}; |
3679 | } |
3680 | |
3681 | InstructionSelector::ComplexRendererFns |
3682 | AMDGPUInstructionSelector::selectVINTERPMods(MachineOperand &Root) const { |
3683 | Register Src; |
3684 | unsigned Mods; |
3685 | std::tie(Src, Mods) = selectVOP3ModsImpl(Root, |
3686 | /* AllowAbs */ false, |
3687 | /* OpSel */ false); |
3688 | |
3689 | return {{ |
3690 | [=](MachineInstrBuilder &MIB) { |
3691 | MIB.addReg( |
3692 | copyToVGPRIfSrcFolded(Src, Mods, Root, MIB, /* ForceVGPR */ true)); |
3693 | }, |
3694 | [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }, // src0_mods |
3695 | }}; |
3696 | } |
3697 | |
3698 | InstructionSelector::ComplexRendererFns |
3699 | AMDGPUInstructionSelector::selectVINTERPModsHi(MachineOperand &Root) const { |
3700 | Register Src; |
3701 | unsigned Mods; |
3702 | std::tie(Src, Mods) = selectVOP3ModsImpl(Root, |
3703 | /* AllowAbs */ false, |
3704 | /* OpSel */ true); |
3705 | |
3706 | return {{ |
3707 | [=](MachineInstrBuilder &MIB) { |
3708 | MIB.addReg( |
3709 | copyToVGPRIfSrcFolded(Src, Mods, Root, MIB, /* ForceVGPR */ true)); |
3710 | }, |
3711 | [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }, // src0_mods |
3712 | }}; |
3713 | } |
3714 | |
3715 | bool AMDGPUInstructionSelector::selectSmrdOffset(MachineOperand &Root, |
3716 | Register &Base, |
3717 | Register *SOffset, |
3718 | int64_t *Offset) const { |
3719 | MachineInstr *MI = Root.getParent(); |
3720 | MachineBasicBlock *MBB = MI->getParent(); |
3721 | |
3722 | // FIXME: We should shrink the GEP if the offset is known to be <= 32-bits, |
3723 | // then we can select all ptr + 32-bit offsets. |
3724 | SmallVector<GEPInfo, 4> AddrInfo; |
3725 | getAddrModeInfo(*MI, *MRI, AddrInfo); |
3726 | |
3727 | if (AddrInfo.empty()) |
3728 | return false; |
3729 | |
3730 | const GEPInfo &GEPI = AddrInfo[0]; |
3731 | std::optional<int64_t> EncodedImm = |
3732 | AMDGPU::getSMRDEncodedOffset(STI, GEPI.Imm, false); |
3733 | |
3734 | if (SOffset && Offset) { |
3735 | if (GEPI.SgprParts.size() == 1 && GEPI.Imm != 0 && EncodedImm && |
3736 | AddrInfo.size() > 1) { |
3737 | const GEPInfo &GEPI2 = AddrInfo[1]; |
3738 | if (GEPI2.SgprParts.size() == 2 && GEPI2.Imm == 0) { |
3739 | if (Register OffsetReg = |
3740 | matchZeroExtendFromS32(*MRI, GEPI2.SgprParts[1])) { |
3741 | Base = GEPI2.SgprParts[0]; |
3742 | *SOffset = OffsetReg; |
3743 | *Offset = *EncodedImm; |
3744 | return true; |
3745 | } |
3746 | } |
3747 | } |
3748 | return false; |
3749 | } |
3750 | |
3751 | if (Offset && GEPI.SgprParts.size() == 1 && EncodedImm) { |
3752 | Base = GEPI.SgprParts[0]; |
3753 | *Offset = *EncodedImm; |
3754 | return true; |
3755 | } |
3756 | |
3757 | // SGPR offset is unsigned. |
3758 | if (SOffset && GEPI.SgprParts.size() == 1 && isUInt<32>(GEPI.Imm) && |
3759 | GEPI.Imm != 0) { |
3760 | // If we make it this far we have a load with an 32-bit immediate offset. |
3761 | // It is OK to select this using a sgpr offset, because we have already |
3762 | // failed trying to select this load into one of the _IMM variants since |
3763 | // the _IMM Patterns are considered before the _SGPR patterns. |
3764 | Base = GEPI.SgprParts[0]; |
3765 | *SOffset = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass); |
3766 | BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), *SOffset) |
3767 | .addImm(GEPI.Imm); |
3768 | return true; |
3769 | } |
3770 | |
3771 | if (SOffset && GEPI.SgprParts.size() && GEPI.Imm == 0) { |
3772 | if (Register OffsetReg = matchZeroExtendFromS32(*MRI, GEPI.SgprParts[1])) { |
3773 | Base = GEPI.SgprParts[0]; |
3774 | *SOffset = OffsetReg; |
3775 | return true; |
3776 | } |
3777 | } |
3778 | |
3779 | return false; |
3780 | } |
3781 | |
3782 | InstructionSelector::ComplexRendererFns |
3783 | AMDGPUInstructionSelector::selectSmrdImm(MachineOperand &Root) const { |
3784 | Register Base; |
3785 | int64_t Offset; |
3786 | if (!selectSmrdOffset(Root, Base, /* SOffset= */ nullptr, &Offset)) |
3787 | return std::nullopt; |
3788 | |
3789 | return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(Base); }, |
3790 | [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }}}; |
3791 | } |
3792 | |
3793 | InstructionSelector::ComplexRendererFns |
3794 | AMDGPUInstructionSelector::selectSmrdImm32(MachineOperand &Root) const { |
3795 | SmallVector<GEPInfo, 4> AddrInfo; |
3796 | getAddrModeInfo(*Root.getParent(), *MRI, AddrInfo); |
3797 | |
3798 | if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1) |
3799 | return std::nullopt; |
3800 | |
3801 | const GEPInfo &GEPInfo = AddrInfo[0]; |
3802 | Register PtrReg = GEPInfo.SgprParts[0]; |
3803 | std::optional<int64_t> EncodedImm = |
3804 | AMDGPU::getSMRDEncodedLiteralOffset32(STI, GEPInfo.Imm); |
3805 | if (!EncodedImm) |
3806 | return std::nullopt; |
3807 | |
3808 | return {{ |
3809 | [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); }, |
3810 | [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); } |
3811 | }}; |
3812 | } |
3813 | |
3814 | InstructionSelector::ComplexRendererFns |
3815 | AMDGPUInstructionSelector::selectSmrdSgpr(MachineOperand &Root) const { |
3816 | Register Base, SOffset; |
3817 | if (!selectSmrdOffset(Root, Base, &SOffset, /* Offset= */ nullptr)) |
3818 | return std::nullopt; |
3819 | |
3820 | return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(Base); }, |
3821 | [=](MachineInstrBuilder &MIB) { MIB.addReg(SOffset); }}}; |
3822 | } |
3823 | |
3824 | InstructionSelector::ComplexRendererFns |
3825 | AMDGPUInstructionSelector::selectSmrdSgprImm(MachineOperand &Root) const { |
3826 | Register Base, SOffset; |
3827 | int64_t Offset; |
3828 | if (!selectSmrdOffset(Root, Base, &SOffset, &Offset)) |
3829 | return std::nullopt; |
3830 | |
3831 | return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(Base); }, |
3832 | [=](MachineInstrBuilder &MIB) { MIB.addReg(SOffset); }, |
3833 | [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }}}; |
3834 | } |
3835 | |
3836 | std::pair<Register, int> |
3837 | AMDGPUInstructionSelector::selectFlatOffsetImpl(MachineOperand &Root, |
3838 | uint64_t FlatVariant) const { |
3839 | MachineInstr *MI = Root.getParent(); |
3840 | |
3841 | auto Default = std::pair(Root.getReg(), 0); |
3842 | |
3843 | if (!STI.hasFlatInstOffsets()) |
3844 | return Default; |
3845 | |
3846 | Register PtrBase; |
3847 | int64_t ConstOffset; |
3848 | std::tie(PtrBase, ConstOffset) = |
3849 | getPtrBaseWithConstantOffset(Root.getReg(), *MRI); |
3850 | if (ConstOffset == 0 || !isFlatScratchBaseLegal(PtrBase, FlatVariant)) |
3851 | return Default; |
3852 | |
3853 | unsigned AddrSpace = (*MI->memoperands_begin())->getAddrSpace(); |
3854 | if (!TII.isLegalFLATOffset(ConstOffset, AddrSpace, FlatVariant)) |
3855 | return Default; |
3856 | |
3857 | return std::pair(PtrBase, ConstOffset); |
3858 | } |
3859 | |
3860 | InstructionSelector::ComplexRendererFns |
3861 | AMDGPUInstructionSelector::selectFlatOffset(MachineOperand &Root) const { |
3862 | auto PtrWithOffset = selectFlatOffsetImpl(Root, SIInstrFlags::FLAT); |
3863 | |
3864 | return {{ |
3865 | [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrWithOffset.first); }, |
3866 | [=](MachineInstrBuilder &MIB) { MIB.addImm(PtrWithOffset.second); }, |
3867 | }}; |
3868 | } |
3869 | |
3870 | InstructionSelector::ComplexRendererFns |
3871 | AMDGPUInstructionSelector::selectGlobalOffset(MachineOperand &Root) const { |
3872 | auto PtrWithOffset = selectFlatOffsetImpl(Root, SIInstrFlags::FlatGlobal); |
3873 | |
3874 | return {{ |
3875 | [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrWithOffset.first); }, |
3876 | [=](MachineInstrBuilder &MIB) { MIB.addImm(PtrWithOffset.second); }, |
3877 | }}; |
3878 | } |
3879 | |
3880 | InstructionSelector::ComplexRendererFns |
3881 | AMDGPUInstructionSelector::selectScratchOffset(MachineOperand &Root) const { |
3882 | auto PtrWithOffset = selectFlatOffsetImpl(Root, SIInstrFlags::FlatScratch); |
3883 | |
3884 | return {{ |
3885 | [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrWithOffset.first); }, |
3886 | [=](MachineInstrBuilder &MIB) { MIB.addImm(PtrWithOffset.second); }, |
3887 | }}; |
3888 | } |
3889 | |
3890 | // Match (64-bit SGPR base) + (zext vgpr offset) + sext(imm offset) |
3891 | InstructionSelector::ComplexRendererFns |
3892 | AMDGPUInstructionSelector::selectGlobalSAddr(MachineOperand &Root) const { |
3893 | Register Addr = Root.getReg(); |
3894 | Register PtrBase; |
3895 | int64_t ConstOffset; |
3896 | int64_t ImmOffset = 0; |
3897 | |
3898 | // Match the immediate offset first, which canonically is moved as low as |
3899 | // possible. |
3900 | std::tie(PtrBase, ConstOffset) = getPtrBaseWithConstantOffset(Addr, *MRI); |
3901 | |
3902 | if (ConstOffset != 0) { |
3903 | if (TII.isLegalFLATOffset(ConstOffset, AMDGPUAS::GLOBAL_ADDRESS, |
3904 | SIInstrFlags::FlatGlobal)) { |
3905 | Addr = PtrBase; |
3906 | ImmOffset = ConstOffset; |
3907 | } else { |
3908 | auto PtrBaseDef = getDefSrcRegIgnoringCopies(PtrBase, *MRI); |
3909 | if (isSGPR(PtrBaseDef->Reg)) { |
3910 | if (ConstOffset > 0) { |
3911 | // Offset is too large. |
3912 | // |
3913 | // saddr + large_offset -> saddr + |
3914 | // (voffset = large_offset & ~MaxOffset) + |
3915 | // (large_offset & MaxOffset); |
3916 | int64_t SplitImmOffset, RemainderOffset; |
3917 | std::tie(SplitImmOffset, RemainderOffset) = TII.splitFlatOffset( |
3918 | ConstOffset, AMDGPUAS::GLOBAL_ADDRESS, SIInstrFlags::FlatGlobal); |
3919 | |
3920 | if (isUInt<32>(RemainderOffset)) { |
3921 | MachineInstr *MI = Root.getParent(); |
3922 | MachineBasicBlock *MBB = MI->getParent(); |
3923 | Register HighBits = |
3924 | MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass); |
3925 | |
3926 | BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::V_MOV_B32_e32), |
3927 | HighBits) |
3928 | .addImm(RemainderOffset); |
3929 | |
3930 | return {{ |
3931 | [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrBase); }, // saddr |
3932 | [=](MachineInstrBuilder &MIB) { |
3933 | MIB.addReg(HighBits); |
3934 | }, // voffset |
3935 | [=](MachineInstrBuilder &MIB) { MIB.addImm(SplitImmOffset); }, |
3936 | }}; |
3937 | } |
3938 | } |
3939 | |
3940 | // We are adding a 64 bit SGPR and a constant. If constant bus limit |
3941 | // is 1 we would need to perform 1 or 2 extra moves for each half of |
3942 | // the constant and it is better to do a scalar add and then issue a |
3943 | // single VALU instruction to materialize zero. Otherwise it is less |
3944 | // instructions to perform VALU adds with immediates or inline literals. |
3945 | unsigned NumLiterals = |
3946 | !TII.isInlineConstant(APInt(32, ConstOffset & 0xffffffff)) + |
3947 | !TII.isInlineConstant(APInt(32, ConstOffset >> 32)); |
3948 | if (STI.getConstantBusLimit(AMDGPU::V_ADD_U32_e64) > NumLiterals) |
3949 | return std::nullopt; |
3950 | } |
3951 | } |
3952 | } |
3953 | |
3954 | // Match the variable offset. |
3955 | auto AddrDef = getDefSrcRegIgnoringCopies(Addr, *MRI); |
3956 | if (AddrDef->MI->getOpcode() == AMDGPU::G_PTR_ADD) { |
3957 | // Look through the SGPR->VGPR copy. |
3958 | Register SAddr = |
3959 | getSrcRegIgnoringCopies(AddrDef->MI->getOperand(1).getReg(), *MRI); |
3960 | |
3961 | if (isSGPR(SAddr)) { |
3962 | Register PtrBaseOffset = AddrDef->MI->getOperand(2).getReg(); |
3963 | |
3964 | // It's possible voffset is an SGPR here, but the copy to VGPR will be |
3965 | // inserted later. |
3966 | if (Register VOffset = matchZeroExtendFromS32(*MRI, PtrBaseOffset)) { |
3967 | return {{[=](MachineInstrBuilder &MIB) { // saddr |
3968 | MIB.addReg(SAddr); |
3969 | }, |
3970 | [=](MachineInstrBuilder &MIB) { // voffset |
3971 | MIB.addReg(VOffset); |
3972 | }, |
3973 | [=](MachineInstrBuilder &MIB) { // offset |
3974 | MIB.addImm(ImmOffset); |
3975 | }}}; |
3976 | } |
3977 | } |
3978 | } |
3979 | |
3980 | // FIXME: We should probably have folded COPY (G_IMPLICIT_DEF) earlier, and |
3981 | // drop this. |
3982 | if (AddrDef->MI->getOpcode() == AMDGPU::G_IMPLICIT_DEF || |
3983 | AddrDef->MI->getOpcode() == AMDGPU::G_CONSTANT || !isSGPR(AddrDef->Reg)) |
3984 | return std::nullopt; |
3985 | |
3986 | // It's cheaper to materialize a single 32-bit zero for vaddr than the two |
3987 | // moves required to copy a 64-bit SGPR to VGPR. |
3988 | MachineInstr *MI = Root.getParent(); |
3989 | MachineBasicBlock *MBB = MI->getParent(); |
3990 | Register VOffset = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass); |
3991 | |
3992 | BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::V_MOV_B32_e32), VOffset) |
3993 | .addImm(0); |
3994 | |
3995 | return {{ |
3996 | [=](MachineInstrBuilder &MIB) { MIB.addReg(AddrDef->Reg); }, // saddr |
3997 | [=](MachineInstrBuilder &MIB) { MIB.addReg(VOffset); }, // voffset |
3998 | [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); } // offset |
3999 | }}; |
4000 | } |
4001 | |
4002 | InstructionSelector::ComplexRendererFns |
4003 | AMDGPUInstructionSelector::selectScratchSAddr(MachineOperand &Root) const { |
4004 | Register Addr = Root.getReg(); |
4005 | Register PtrBase; |
4006 | int64_t ConstOffset; |
4007 | int64_t ImmOffset = 0; |
4008 | |
4009 | // Match the immediate offset first, which canonically is moved as low as |
4010 | // possible. |
4011 | std::tie(PtrBase, ConstOffset) = getPtrBaseWithConstantOffset(Addr, *MRI); |
4012 | |
4013 | if (ConstOffset != 0 && isFlatScratchBaseLegal(PtrBase) && |
4014 | TII.isLegalFLATOffset(ConstOffset, AMDGPUAS::PRIVATE_ADDRESS, |
4015 | SIInstrFlags::FlatScratch)) { |
4016 | Addr = PtrBase; |
4017 | ImmOffset = ConstOffset; |
4018 | } |
4019 | |
4020 | auto AddrDef = getDefSrcRegIgnoringCopies(Addr, *MRI); |
4021 | if (AddrDef->MI->getOpcode() == AMDGPU::G_FRAME_INDEX) { |
4022 | int FI = AddrDef->MI->getOperand(1).getIndex(); |
4023 | return {{ |
4024 | [=](MachineInstrBuilder &MIB) { MIB.addFrameIndex(FI); }, // saddr |
4025 | [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); } // offset |
4026 | }}; |
4027 | } |
4028 | |
4029 | Register SAddr = AddrDef->Reg; |
4030 | |
4031 | if (AddrDef->MI->getOpcode() == AMDGPU::G_PTR_ADD) { |
4032 | Register LHS = AddrDef->MI->getOperand(1).getReg(); |
4033 | Register RHS = AddrDef->MI->getOperand(2).getReg(); |
4034 | auto LHSDef = getDefSrcRegIgnoringCopies(LHS, *MRI); |
4035 | auto RHSDef = getDefSrcRegIgnoringCopies(RHS, *MRI); |
4036 | |
4037 | if (LHSDef->MI->getOpcode() == AMDGPU::G_FRAME_INDEX && |
4038 | isSGPR(RHSDef->Reg)) { |
4039 | int FI = LHSDef->MI->getOperand(1).getIndex(); |
4040 | MachineInstr &I = *Root.getParent(); |
4041 | MachineBasicBlock *BB = I.getParent(); |
4042 | const DebugLoc &DL = I.getDebugLoc(); |
4043 | SAddr = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass); |
4044 | |
4045 | BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADD_I32), SAddr) |
4046 | .addFrameIndex(FI) |
4047 | .addReg(RHSDef->Reg); |
4048 | } |
4049 | } |
4050 | |
4051 | if (!isSGPR(SAddr)) |
4052 | return std::nullopt; |
4053 | |
4054 | return {{ |
4055 | [=](MachineInstrBuilder &MIB) { MIB.addReg(SAddr); }, // saddr |
4056 | [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); } // offset |
4057 | }}; |
4058 | } |
4059 | |
4060 | // Check whether the flat scratch SVS swizzle bug affects this access. |
4061 | bool AMDGPUInstructionSelector::checkFlatScratchSVSSwizzleBug( |
4062 | Register VAddr, Register SAddr, uint64_t ImmOffset) const { |
4063 | if (!Subtarget->hasFlatScratchSVSSwizzleBug()) |
4064 | return false; |
4065 | |
4066 | // The bug affects the swizzling of SVS accesses if there is any carry out |
4067 | // from the two low order bits (i.e. from bit 1 into bit 2) when adding |
4068 | // voffset to (soffset + inst_offset). |
4069 | auto VKnown = KnownBits->getKnownBits(VAddr); |
4070 | auto SKnown = KnownBits::computeForAddSub( |
4071 | true, false, KnownBits->getKnownBits(SAddr), |
4072 | KnownBits::makeConstant(APInt(32, ImmOffset))); |
4073 | uint64_t VMax = VKnown.getMaxValue().getZExtValue(); |
4074 | uint64_t SMax = SKnown.getMaxValue().getZExtValue(); |
4075 | return (VMax & 3) + (SMax & 3) >= 4; |
4076 | } |
4077 | |
4078 | InstructionSelector::ComplexRendererFns |
4079 | AMDGPUInstructionSelector::selectScratchSVAddr(MachineOperand &Root) const { |
4080 | Register Addr = Root.getReg(); |
4081 | Register PtrBase; |
4082 | int64_t ConstOffset; |
4083 | int64_t ImmOffset = 0; |
4084 | |
4085 | // Match the immediate offset first, which canonically is moved as low as |
4086 | // possible. |
4087 | std::tie(PtrBase, ConstOffset) = getPtrBaseWithConstantOffset(Addr, *MRI); |
4088 | |
4089 | if (ConstOffset != 0 && |
4090 | TII.isLegalFLATOffset(ConstOffset, AMDGPUAS::PRIVATE_ADDRESS, true)) { |
4091 | Addr = PtrBase; |
4092 | ImmOffset = ConstOffset; |
4093 | } |
4094 | |
4095 | auto AddrDef = getDefSrcRegIgnoringCopies(Addr, *MRI); |
4096 | if (AddrDef->MI->getOpcode() != AMDGPU::G_PTR_ADD) |
4097 | return std::nullopt; |
4098 | |
4099 | Register RHS = AddrDef->MI->getOperand(2).getReg(); |
4100 | if (RBI.getRegBank(RHS, *MRI, TRI)->getID() != AMDGPU::VGPRRegBankID) |
4101 | return std::nullopt; |
4102 | |
4103 | Register LHS = AddrDef->MI->getOperand(1).getReg(); |
4104 | auto LHSDef = getDefSrcRegIgnoringCopies(LHS, *MRI); |
4105 | |
4106 | if (!isFlatScratchBaseLegal(LHS) || !isFlatScratchBaseLegal(RHS)) |
4107 | return std::nullopt; |
4108 | |
4109 | if (checkFlatScratchSVSSwizzleBug(RHS, LHS, ImmOffset)) |
4110 | return std::nullopt; |
4111 | |
4112 | if (LHSDef->MI->getOpcode() == AMDGPU::G_FRAME_INDEX) { |
4113 | int FI = LHSDef->MI->getOperand(1).getIndex(); |
4114 | return {{ |
4115 | [=](MachineInstrBuilder &MIB) { MIB.addReg(RHS); }, // vaddr |
4116 | [=](MachineInstrBuilder &MIB) { MIB.addFrameIndex(FI); }, // saddr |
4117 | [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); } // offset |
4118 | }}; |
4119 | } |
4120 | |
4121 | if (!isSGPR(LHS)) |
4122 | return std::nullopt; |
4123 | |
4124 | return {{ |
4125 | [=](MachineInstrBuilder &MIB) { MIB.addReg(RHS); }, // vaddr |
4126 | [=](MachineInstrBuilder &MIB) { MIB.addReg(LHS); }, // saddr |
4127 | [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); } // offset |
4128 | }}; |
4129 | } |
4130 | |
4131 | InstructionSelector::ComplexRendererFns |
4132 | AMDGPUInstructionSelector::selectMUBUFScratchOffen(MachineOperand &Root) const { |
4133 | MachineInstr *MI = Root.getParent(); |
4134 | MachineBasicBlock *MBB = MI->getParent(); |
4135 | MachineFunction *MF = MBB->getParent(); |
4136 | const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>(); |
4137 | |
4138 | int64_t Offset = 0; |
4139 | if (mi_match(Root.getReg(), *MRI, m_ICst(Offset)) && |
4140 | Offset != TM.getNullPointerValue(AMDGPUAS::PRIVATE_ADDRESS)) { |
4141 | Register HighBits = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass); |
4142 | |
4143 | // TODO: Should this be inside the render function? The iterator seems to |
4144 | // move. |
4145 | const uint32_t MaxOffset = SIInstrInfo::getMaxMUBUFImmOffset(); |
4146 | BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::V_MOV_B32_e32), |
4147 | HighBits) |
4148 | .addImm(Offset & ~MaxOffset); |
4149 | |
4150 | return {{[=](MachineInstrBuilder &MIB) { // rsrc |
4151 | MIB.addReg(Info->getScratchRSrcReg()); |
4152 | }, |
4153 | [=](MachineInstrBuilder &MIB) { // vaddr |
4154 | MIB.addReg(HighBits); |
4155 | }, |
4156 | [=](MachineInstrBuilder &MIB) { // soffset |
4157 | // Use constant zero for soffset and rely on eliminateFrameIndex |
4158 | // to choose the appropriate frame register if need be. |
4159 | MIB.addImm(0); |
4160 | }, |
4161 | [=](MachineInstrBuilder &MIB) { // offset |
4162 | MIB.addImm(Offset & MaxOffset); |
4163 | }}}; |
4164 | } |
4165 | |
4166 | assert(Offset == 0 || Offset == -1)(static_cast <bool> (Offset == 0 || Offset == -1) ? void (0) : __assert_fail ("Offset == 0 || Offset == -1", "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp" , 4166, __extension__ __PRETTY_FUNCTION__)); |
4167 | |
4168 | // Try to fold a frame index directly into the MUBUF vaddr field, and any |
4169 | // offsets. |
4170 | std::optional<int> FI; |
4171 | Register VAddr = Root.getReg(); |
4172 | if (const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg())) { |
4173 | Register PtrBase; |
4174 | int64_t ConstOffset; |
4175 | std::tie(PtrBase, ConstOffset) = getPtrBaseWithConstantOffset(VAddr, *MRI); |
4176 | if (ConstOffset != 0) { |
4177 | if (SIInstrInfo::isLegalMUBUFImmOffset(ConstOffset) && |
4178 | (!STI.privateMemoryResourceIsRangeChecked() || |
4179 | KnownBits->signBitIsZero(PtrBase))) { |
4180 | const MachineInstr *PtrBaseDef = MRI->getVRegDef(PtrBase); |
4181 | if (PtrBaseDef->getOpcode() == AMDGPU::G_FRAME_INDEX) |
4182 | FI = PtrBaseDef->getOperand(1).getIndex(); |
4183 | else |
4184 | VAddr = PtrBase; |
4185 | Offset = ConstOffset; |
4186 | } |
4187 | } else if (RootDef->getOpcode() == AMDGPU::G_FRAME_INDEX) { |
4188 | FI = RootDef->getOperand(1).getIndex(); |
4189 | } |
4190 | } |
4191 | |
4192 | return {{[=](MachineInstrBuilder &MIB) { // rsrc |
4193 | MIB.addReg(Info->getScratchRSrcReg()); |
4194 | }, |
4195 | [=](MachineInstrBuilder &MIB) { // vaddr |
4196 | if (FI) |
4197 | MIB.addFrameIndex(*FI); |
4198 | else |
4199 | MIB.addReg(VAddr); |
4200 | }, |
4201 | [=](MachineInstrBuilder &MIB) { // soffset |
4202 | // Use constant zero for soffset and rely on eliminateFrameIndex |
4203 | // to choose the appropriate frame register if need be. |
4204 | MIB.addImm(0); |
4205 | }, |
4206 | [=](MachineInstrBuilder &MIB) { // offset |
4207 | MIB.addImm(Offset); |
4208 | }}}; |
4209 | } |
4210 | |
4211 | bool AMDGPUInstructionSelector::isDSOffsetLegal(Register Base, |
4212 | int64_t Offset) const { |
4213 | if (!isUInt<16>(Offset)) |
4214 | return false; |
4215 | |
4216 | if (STI.hasUsableDSOffset() || STI.unsafeDSOffsetFoldingEnabled()) |
4217 | return true; |
4218 | |
4219 | // On Southern Islands instruction with a negative base value and an offset |
4220 | // don't seem to work. |
4221 | return KnownBits->signBitIsZero(Base); |
4222 | } |
4223 | |
4224 | bool AMDGPUInstructionSelector::isDSOffset2Legal(Register Base, int64_t Offset0, |
4225 | int64_t Offset1, |
4226 | unsigned Size) const { |
4227 | if (Offset0 % Size != 0 || Offset1 % Size != 0) |
4228 | return false; |
4229 | if (!isUInt<8>(Offset0 / Size) || !isUInt<8>(Offset1 / Size)) |
4230 | return false; |
4231 | |
4232 | if (STI.hasUsableDSOffset() || STI.unsafeDSOffsetFoldingEnabled()) |
4233 | return true; |
4234 | |
4235 | // On Southern Islands instruction with a negative base value and an offset |
4236 | // don't seem to work. |
4237 | return KnownBits->signBitIsZero(Base); |
4238 | } |
4239 | |
4240 | bool AMDGPUInstructionSelector::isFlatScratchBaseLegal( |
4241 | Register Base, uint64_t FlatVariant) const { |
4242 | if (FlatVariant != SIInstrFlags::FlatScratch) |
4243 | return true; |
4244 | |
4245 | // When value in 32-bit Base can be negative calculate scratch offset using |
4246 | // 32-bit add instruction, otherwise use Base(unsigned) + offset. |
4247 | return KnownBits->signBitIsZero(Base); |
4248 | } |
4249 | |
4250 | bool AMDGPUInstructionSelector::isUnneededShiftMask(const MachineInstr &MI, |
4251 | unsigned ShAmtBits) const { |
4252 | assert(MI.getOpcode() == TargetOpcode::G_AND)(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_AND ) ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_AND" , "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp", 4252 , __extension__ __PRETTY_FUNCTION__)); |
4253 | |
4254 | std::optional<APInt> RHS = |
4255 | getIConstantVRegVal(MI.getOperand(2).getReg(), *MRI); |
4256 | if (!RHS) |
4257 | return false; |
4258 | |
4259 | if (RHS->countr_one() >= ShAmtBits) |
4260 | return true; |
4261 | |
4262 | const APInt &LHSKnownZeros = |
4263 | KnownBits->getKnownZeroes(MI.getOperand(1).getReg()); |
4264 | return (LHSKnownZeros | *RHS).countr_one() >= ShAmtBits; |
4265 | } |
4266 | |
4267 | // Return the wave level SGPR base address if this is a wave address. |
4268 | static Register getWaveAddress(const MachineInstr *Def) { |
4269 | return Def->getOpcode() == AMDGPU::G_AMDGPU_WAVE_ADDRESS |
4270 | ? Def->getOperand(1).getReg() |
4271 | : Register(); |
4272 | } |
4273 | |
4274 | InstructionSelector::ComplexRendererFns |
4275 | AMDGPUInstructionSelector::selectMUBUFScratchOffset( |
4276 | MachineOperand &Root) const { |
4277 | Register Reg = Root.getReg(); |
4278 | const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>(); |
4279 | |
4280 | const MachineInstr *Def = MRI->getVRegDef(Reg); |
4281 | if (Register WaveBase = getWaveAddress(Def)) { |
4282 | return {{ |
4283 | [=](MachineInstrBuilder &MIB) { // rsrc |
4284 | MIB.addReg(Info->getScratchRSrcReg()); |
4285 | }, |
4286 | [=](MachineInstrBuilder &MIB) { // soffset |
4287 | MIB.addReg(WaveBase); |
4288 | }, |
4289 | [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // offset |
4290 | }}; |
4291 | } |
4292 | |
4293 | int64_t Offset = 0; |
4294 | |
4295 | // FIXME: Copy check is a hack |
4296 | Register BasePtr; |
4297 | if (mi_match(Reg, *MRI, m_GPtrAdd(m_Reg(BasePtr), m_Copy(m_ICst(Offset))))) { |
4298 | if (!SIInstrInfo::isLegalMUBUFImmOffset(Offset)) |
4299 | return {}; |
4300 | const MachineInstr *BasePtrDef = MRI->getVRegDef(BasePtr); |
4301 | Register WaveBase = getWaveAddress(BasePtrDef); |
4302 | if (!WaveBase) |
4303 | return {}; |
4304 | |
4305 | return {{ |
4306 | [=](MachineInstrBuilder &MIB) { // rsrc |
4307 | MIB.addReg(Info->getScratchRSrcReg()); |
4308 | }, |
4309 | [=](MachineInstrBuilder &MIB) { // soffset |
4310 | MIB.addReg(WaveBase); |
4311 | }, |
4312 | [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); } // offset |
4313 | }}; |
4314 | } |
4315 | |
4316 | if (!mi_match(Root.getReg(), *MRI, m_ICst(Offset)) || |
4317 | !SIInstrInfo::isLegalMUBUFImmOffset(Offset)) |
4318 | return {}; |
4319 | |
4320 | return {{ |
4321 | [=](MachineInstrBuilder &MIB) { // rsrc |
4322 | MIB.addReg(Info->getScratchRSrcReg()); |
4323 | }, |
4324 | [=](MachineInstrBuilder &MIB) { // soffset |
4325 | MIB.addImm(0); |
4326 | }, |
4327 | [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); } // offset |
4328 | }}; |
4329 | } |
4330 | |
4331 | std::pair<Register, unsigned> |
4332 | AMDGPUInstructionSelector::selectDS1Addr1OffsetImpl(MachineOperand &Root) const { |
4333 | const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg()); |
4334 | if (!RootDef) |
4335 | return std::pair(Root.getReg(), 0); |
4336 | |
4337 | int64_t ConstAddr = 0; |
4338 | |
4339 | Register PtrBase; |
4340 | int64_t Offset; |
4341 | std::tie(PtrBase, Offset) = |
4342 | getPtrBaseWithConstantOffset(Root.getReg(), *MRI); |
4343 | |
4344 | if (Offset) { |
4345 | if (isDSOffsetLegal(PtrBase, Offset)) { |
4346 | // (add n0, c0) |
4347 | return std::pair(PtrBase, Offset); |
4348 | } |
4349 | } else if (RootDef->getOpcode() == AMDGPU::G_SUB) { |
4350 | // TODO |
4351 | |
4352 | |
4353 | } else if (mi_match(Root.getReg(), *MRI, m_ICst(ConstAddr))) { |
4354 | // TODO |
4355 | |
4356 | } |
4357 | |
4358 | return std::pair(Root.getReg(), 0); |
4359 | } |
4360 | |
4361 | InstructionSelector::ComplexRendererFns |
4362 | AMDGPUInstructionSelector::selectDS1Addr1Offset(MachineOperand &Root) const { |
4363 | Register Reg; |
4364 | unsigned Offset; |
4365 | std::tie(Reg, Offset) = selectDS1Addr1OffsetImpl(Root); |
4366 | return {{ |
4367 | [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); }, |
4368 | [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); } |
4369 | }}; |
4370 | } |
4371 | |
4372 | InstructionSelector::ComplexRendererFns |
4373 | AMDGPUInstructionSelector::selectDS64Bit4ByteAligned(MachineOperand &Root) const { |
4374 | return selectDSReadWrite2(Root, 4); |
4375 | } |
4376 | |
4377 | InstructionSelector::ComplexRendererFns |
4378 | AMDGPUInstructionSelector::selectDS128Bit8ByteAligned(MachineOperand &Root) const { |
4379 | return selectDSReadWrite2(Root, 8); |
4380 | } |
4381 | |
4382 | InstructionSelector::ComplexRendererFns |
4383 | AMDGPUInstructionSelector::selectDSReadWrite2(MachineOperand &Root, |
4384 | unsigned Size) const { |
4385 | Register Reg; |
4386 | unsigned Offset; |
4387 | std::tie(Reg, Offset) = selectDSReadWrite2Impl(Root, Size); |
4388 | return {{ |
4389 | [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); }, |
4390 | [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }, |
4391 | [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset+1); } |
4392 | }}; |
4393 | } |
4394 | |
4395 | std::pair<Register, unsigned> |
4396 | AMDGPUInstructionSelector::selectDSReadWrite2Impl(MachineOperand &Root, |
4397 | unsigned Size) const { |
4398 | const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg()); |
4399 | if (!RootDef) |
4400 | return std::pair(Root.getReg(), 0); |
4401 | |
4402 | int64_t ConstAddr = 0; |
4403 | |
4404 | Register PtrBase; |
4405 | int64_t Offset; |
4406 | std::tie(PtrBase, Offset) = |
4407 | getPtrBaseWithConstantOffset(Root.getReg(), *MRI); |
4408 | |
4409 | if (Offset) { |
4410 | int64_t OffsetValue0 = Offset; |
4411 | int64_t OffsetValue1 = Offset + Size; |
4412 | if (isDSOffset2Legal(PtrBase, OffsetValue0, OffsetValue1, Size)) { |
4413 | // (add n0, c0) |
4414 | return std::pair(PtrBase, OffsetValue0 / Size); |
4415 | } |
4416 | } else if (RootDef->getOpcode() == AMDGPU::G_SUB) { |
4417 | // TODO |
4418 | |
4419 | } else if (mi_match(Root.getReg(), *MRI, m_ICst(ConstAddr))) { |
4420 | // TODO |
4421 | |
4422 | } |
4423 | |
4424 | return std::pair(Root.getReg(), 0); |
4425 | } |
4426 | |
4427 | /// If \p Root is a G_PTR_ADD with a G_CONSTANT on the right hand side, return |
4428 | /// the base value with the constant offset. There may be intervening copies |
4429 | /// between \p Root and the identified constant. Returns \p Root, 0 if this does |
4430 | /// not match the pattern. |
4431 | std::pair<Register, int64_t> |
4432 | AMDGPUInstructionSelector::getPtrBaseWithConstantOffset( |
4433 | Register Root, const MachineRegisterInfo &MRI) const { |
4434 | MachineInstr *RootI = getDefIgnoringCopies(Root, MRI); |
4435 | if (RootI->getOpcode() != TargetOpcode::G_PTR_ADD) |
4436 | return {Root, 0}; |
4437 | |
4438 | MachineOperand &RHS = RootI->getOperand(2); |
4439 | std::optional<ValueAndVReg> MaybeOffset = |
4440 | getIConstantVRegValWithLookThrough(RHS.getReg(), MRI); |
4441 | if (!MaybeOffset) |
4442 | return {Root, 0}; |
4443 | return {RootI->getOperand(1).getReg(), MaybeOffset->Value.getSExtValue()}; |
4444 | } |
4445 | |
4446 | static void addZeroImm(MachineInstrBuilder &MIB) { |
4447 | MIB.addImm(0); |
4448 | } |
4449 | |
4450 | /// Return a resource descriptor for use with an arbitrary 64-bit pointer. If \p |
4451 | /// BasePtr is not valid, a null base pointer will be used. |
4452 | static Register buildRSRC(MachineIRBuilder &B, MachineRegisterInfo &MRI, |
4453 | uint32_t FormatLo, uint32_t FormatHi, |
4454 | Register BasePtr) { |
4455 | Register RSrc2 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); |
4456 | Register RSrc3 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); |
4457 | Register RSrcHi = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); |
4458 | Register RSrc = MRI.createVirtualRegister(&AMDGPU::SGPR_128RegClass); |
4459 | |
4460 | B.buildInstr(AMDGPU::S_MOV_B32) |
4461 | .addDef(RSrc2) |
4462 | .addImm(FormatLo); |
4463 | B.buildInstr(AMDGPU::S_MOV_B32) |
4464 | .addDef(RSrc3) |
4465 | .addImm(FormatHi); |
4466 | |
4467 | // Build the half of the subregister with the constants before building the |
4468 | // full 128-bit register. If we are building multiple resource descriptors, |
4469 | // this will allow CSEing of the 2-component register. |
4470 | B.buildInstr(AMDGPU::REG_SEQUENCE) |
4471 | .addDef(RSrcHi) |
4472 | .addReg(RSrc2) |
4473 | .addImm(AMDGPU::sub0) |
4474 | .addReg(RSrc3) |
4475 | .addImm(AMDGPU::sub1); |
4476 | |
4477 | Register RSrcLo = BasePtr; |
4478 | if (!BasePtr) { |
4479 | RSrcLo = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); |
4480 | B.buildInstr(AMDGPU::S_MOV_B64) |
4481 | .addDef(RSrcLo) |
4482 | .addImm(0); |
4483 | } |
4484 | |
4485 | B.buildInstr(AMDGPU::REG_SEQUENCE) |
4486 | .addDef(RSrc) |
4487 | .addReg(RSrcLo) |
4488 | .addImm(AMDGPU::sub0_sub1) |
4489 | .addReg(RSrcHi) |
4490 | .addImm(AMDGPU::sub2_sub3); |
4491 | |
4492 | return RSrc; |
4493 | } |
4494 | |
4495 | static Register buildAddr64RSrc(MachineIRBuilder &B, MachineRegisterInfo &MRI, |
4496 | const SIInstrInfo &TII, Register BasePtr) { |
4497 | uint64_t DefaultFormat = TII.getDefaultRsrcDataFormat(); |
4498 | |
4499 | // FIXME: Why are half the "default" bits ignored based on the addressing |
4500 | // mode? |
4501 | return buildRSRC(B, MRI, 0, Hi_32(DefaultFormat), BasePtr); |
4502 | } |
4503 | |
4504 | static Register buildOffsetSrc(MachineIRBuilder &B, MachineRegisterInfo &MRI, |
4505 | const SIInstrInfo &TII, Register BasePtr) { |
4506 | uint64_t DefaultFormat = TII.getDefaultRsrcDataFormat(); |
4507 | |
4508 | // FIXME: Why are half the "default" bits ignored based on the addressing |
4509 | // mode? |
4510 | return buildRSRC(B, MRI, -1, Hi_32(DefaultFormat), BasePtr); |
4511 | } |
4512 | |
4513 | AMDGPUInstructionSelector::MUBUFAddressData |
4514 | AMDGPUInstructionSelector::parseMUBUFAddress(Register Src) const { |
4515 | MUBUFAddressData Data; |
4516 | Data.N0 = Src; |
4517 | |
4518 | Register PtrBase; |
4519 | int64_t Offset; |
4520 | |
4521 | std::tie(PtrBase, Offset) = getPtrBaseWithConstantOffset(Src, *MRI); |
4522 | if (isUInt<32>(Offset)) { |
4523 | Data.N0 = PtrBase; |
4524 | Data.Offset = Offset; |
4525 | } |
4526 | |
4527 | if (MachineInstr *InputAdd |
4528 | = getOpcodeDef(TargetOpcode::G_PTR_ADD, Data.N0, *MRI)) { |
4529 | Data.N2 = InputAdd->getOperand(1).getReg(); |
4530 | Data.N3 = InputAdd->getOperand(2).getReg(); |
4531 | |
4532 | // FIXME: Need to fix extra SGPR->VGPRcopies inserted |
4533 | // FIXME: Don't know this was defined by operand 0 |
4534 | // |
4535 | // TODO: Remove this when we have copy folding optimizations after |
4536 | // RegBankSelect. |
4537 | Data.N2 = getDefIgnoringCopies(Data.N2, *MRI)->getOperand(0).getReg(); |
4538 | Data.N3 = getDefIgnoringCopies(Data.N3, *MRI)->getOperand(0).getReg(); |
4539 | } |
4540 | |
4541 | return Data; |
4542 | } |
4543 | |
4544 | /// Return if the addr64 mubuf mode should be used for the given address. |
4545 | bool AMDGPUInstructionSelector::shouldUseAddr64(MUBUFAddressData Addr) const { |
4546 | // (ptr_add N2, N3) -> addr64, or |
4547 | // (ptr_add (ptr_add N2, N3), C1) -> addr64 |
4548 | if (Addr.N2) |
4549 | return true; |
4550 | |
4551 | const RegisterBank *N0Bank = RBI.getRegBank(Addr.N0, *MRI, TRI); |
4552 | return N0Bank->getID() == AMDGPU::VGPRRegBankID; |
4553 | } |
4554 | |
4555 | /// Split an immediate offset \p ImmOffset depending on whether it fits in the |
4556 | /// immediate field. Modifies \p ImmOffset and sets \p SOffset to the variable |
4557 | /// component. |
4558 | void AMDGPUInstructionSelector::splitIllegalMUBUFOffset( |
4559 | MachineIRBuilder &B, Register &SOffset, int64_t &ImmOffset) const { |
4560 | if (SIInstrInfo::isLegalMUBUFImmOffset(ImmOffset)) |
4561 | return; |
4562 | |
4563 | // Illegal offset, store it in soffset. |
4564 | SOffset = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass); |
4565 | B.buildInstr(AMDGPU::S_MOV_B32) |
4566 | .addDef(SOffset) |
4567 | .addImm(ImmOffset); |
4568 | ImmOffset = 0; |
4569 | } |
4570 | |
4571 | bool AMDGPUInstructionSelector::selectMUBUFAddr64Impl( |
4572 | MachineOperand &Root, Register &VAddr, Register &RSrcReg, |
4573 | Register &SOffset, int64_t &Offset) const { |
4574 | // FIXME: Predicates should stop this from reaching here. |
4575 | // addr64 bit was removed for volcanic islands. |
4576 | if (!STI.hasAddr64() || STI.useFlatForGlobal()) |
4577 | return false; |
4578 | |
4579 | MUBUFAddressData AddrData = parseMUBUFAddress(Root.getReg()); |
4580 | if (!shouldUseAddr64(AddrData)) |
4581 | return false; |
4582 | |
4583 | Register N0 = AddrData.N0; |
4584 | Register N2 = AddrData.N2; |
4585 | Register N3 = AddrData.N3; |
4586 | Offset = AddrData.Offset; |
4587 | |
4588 | // Base pointer for the SRD. |
4589 | Register SRDPtr; |
4590 | |
4591 | if (N2) { |
4592 | if (RBI.getRegBank(N2, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) { |
4593 | assert(N3)(static_cast <bool> (N3) ? void (0) : __assert_fail ("N3" , "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp", 4593 , __extension__ __PRETTY_FUNCTION__)); |
4594 | if (RBI.getRegBank(N3, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) { |
4595 | // Both N2 and N3 are divergent. Use N0 (the result of the add) as the |
4596 | // addr64, and construct the default resource from a 0 address. |
4597 | VAddr = N0; |
4598 | } else { |
4599 | SRDPtr = N3; |
4600 | VAddr = N2; |
4601 | } |
4602 | } else { |
4603 | // N2 is not divergent. |
4604 | SRDPtr = N2; |
4605 | VAddr = N3; |
4606 | } |
4607 | } else if (RBI.getRegBank(N0, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) { |
4608 | // Use the default null pointer in the resource |
4609 | VAddr = N0; |
4610 | } else { |
4611 | // N0 -> offset, or |
4612 | // (N0 + C1) -> offset |
4613 | SRDPtr = N0; |
4614 | } |
4615 | |
4616 | MachineIRBuilder B(*Root.getParent()); |
4617 | RSrcReg = buildAddr64RSrc(B, *MRI, TII, SRDPtr); |
4618 | splitIllegalMUBUFOffset(B, SOffset, Offset); |
4619 | return true; |
4620 | } |
4621 | |
4622 | bool AMDGPUInstructionSelector::selectMUBUFOffsetImpl( |
4623 | MachineOperand &Root, Register &RSrcReg, Register &SOffset, |
4624 | int64_t &Offset) const { |
4625 | |
4626 | // FIXME: Pattern should not reach here. |
4627 | if (STI.useFlatForGlobal()) |
4628 | return false; |
4629 | |
4630 | MUBUFAddressData AddrData = parseMUBUFAddress(Root.getReg()); |
4631 | if (shouldUseAddr64(AddrData)) |
4632 | return false; |
4633 | |
4634 | // N0 -> offset, or |
4635 | // (N0 + C1) -> offset |
4636 | Register SRDPtr = AddrData.N0; |
4637 | Offset = AddrData.Offset; |
4638 | |
4639 | // TODO: Look through extensions for 32-bit soffset. |
4640 | MachineIRBuilder B(*Root.getParent()); |
4641 | |
4642 | RSrcReg = buildOffsetSrc(B, *MRI, TII, SRDPtr); |
4643 | splitIllegalMUBUFOffset(B, SOffset, Offset); |
4644 | return true; |
4645 | } |
4646 | |
4647 | InstructionSelector::ComplexRendererFns |
4648 | AMDGPUInstructionSelector::selectMUBUFAddr64(MachineOperand &Root) const { |
4649 | Register VAddr; |
4650 | Register RSrcReg; |
4651 | Register SOffset; |
4652 | int64_t Offset = 0; |
4653 | |
4654 | if (!selectMUBUFAddr64Impl(Root, VAddr, RSrcReg, SOffset, Offset)) |
4655 | return {}; |
4656 | |
4657 | // FIXME: Use defaulted operands for trailing 0s and remove from the complex |
4658 | // pattern. |
4659 | return {{ |
4660 | [=](MachineInstrBuilder &MIB) { // rsrc |
4661 | MIB.addReg(RSrcReg); |
4662 | }, |
4663 | [=](MachineInstrBuilder &MIB) { // vaddr |
4664 | MIB.addReg(VAddr); |
4665 | }, |
4666 | [=](MachineInstrBuilder &MIB) { // soffset |
4667 | if (SOffset) |
4668 | MIB.addReg(SOffset); |
4669 | else |
4670 | MIB.addImm(0); |
4671 | }, |
4672 | [=](MachineInstrBuilder &MIB) { // offset |
4673 | MIB.addImm(Offset); |
4674 | }, |
4675 | addZeroImm, // cpol |
4676 | addZeroImm, // tfe |
4677 | addZeroImm // swz |
4678 | }}; |
4679 | } |
4680 | |
4681 | InstructionSelector::ComplexRendererFns |
4682 | AMDGPUInstructionSelector::selectMUBUFOffset(MachineOperand &Root) const { |
4683 | Register RSrcReg; |
4684 | Register SOffset; |
4685 | int64_t Offset = 0; |
4686 | |
4687 | if (!selectMUBUFOffsetImpl(Root, RSrcReg, SOffset, Offset)) |
4688 | return {}; |
4689 | |
4690 | return {{ |
4691 | [=](MachineInstrBuilder &MIB) { // rsrc |
4692 | MIB.addReg(RSrcReg); |
4693 | }, |
4694 | [=](MachineInstrBuilder &MIB) { // soffset |
4695 | if (SOffset) |
4696 | MIB.addReg(SOffset); |
4697 | else |
4698 | MIB.addImm(0); |
4699 | }, |
4700 | [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }, // offset |
4701 | addZeroImm, // cpol |
4702 | addZeroImm, // tfe |
4703 | addZeroImm, // swz |
4704 | }}; |
4705 | } |
4706 | |
4707 | /// Get an immediate that must be 32-bits, and treated as zero extended. |
4708 | static std::optional<uint64_t> |
4709 | getConstantZext32Val(Register Reg, const MachineRegisterInfo &MRI) { |
4710 | // getIConstantVRegVal sexts any values, so see if that matters. |
4711 | std::optional<int64_t> OffsetVal = getIConstantVRegSExtVal(Reg, MRI); |
4712 | if (!OffsetVal || !isInt<32>(*OffsetVal)) |
4713 | return std::nullopt; |
4714 | return Lo_32(*OffsetVal); |
4715 | } |
4716 | |
4717 | InstructionSelector::ComplexRendererFns |
4718 | AMDGPUInstructionSelector::selectSMRDBufferImm(MachineOperand &Root) const { |
4719 | std::optional<uint64_t> OffsetVal = getConstantZext32Val(Root.getReg(), *MRI); |
4720 | if (!OffsetVal) |
4721 | return {}; |
4722 | |
4723 | std::optional<int64_t> EncodedImm = |
4724 | AMDGPU::getSMRDEncodedOffset(STI, *OffsetVal, true); |
4725 | if (!EncodedImm) |
4726 | return {}; |
4727 | |
4728 | return {{ [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); } }}; |
4729 | } |
4730 | |
4731 | InstructionSelector::ComplexRendererFns |
4732 | AMDGPUInstructionSelector::selectSMRDBufferImm32(MachineOperand &Root) const { |
4733 | assert(STI.getGeneration() == AMDGPUSubtarget::SEA_ISLANDS)(static_cast <bool> (STI.getGeneration() == AMDGPUSubtarget ::SEA_ISLANDS) ? void (0) : __assert_fail ("STI.getGeneration() == AMDGPUSubtarget::SEA_ISLANDS" , "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp", 4733 , __extension__ __PRETTY_FUNCTION__)); |
4734 | |
4735 | std::optional<uint64_t> OffsetVal = getConstantZext32Val(Root.getReg(), *MRI); |
4736 | if (!OffsetVal) |
4737 | return {}; |
4738 | |
4739 | std::optional<int64_t> EncodedImm = |
4740 | AMDGPU::getSMRDEncodedLiteralOffset32(STI, *OffsetVal); |
4741 | if (!EncodedImm) |
4742 | return {}; |
4743 | |
4744 | return {{ [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); } }}; |
4745 | } |
4746 | |
4747 | InstructionSelector::ComplexRendererFns |
4748 | AMDGPUInstructionSelector::selectSMRDBufferSgprImm(MachineOperand &Root) const { |
4749 | // Match the (soffset + offset) pair as a 32-bit register base and |
4750 | // an immediate offset. |
4751 | Register SOffset; |
4752 | unsigned Offset; |
4753 | std::tie(SOffset, Offset) = |
4754 | AMDGPU::getBaseWithConstantOffset(*MRI, Root.getReg(), KnownBits); |
4755 | if (!SOffset) |
4756 | return std::nullopt; |
4757 | |
4758 | std::optional<int64_t> EncodedOffset = |
4759 | AMDGPU::getSMRDEncodedOffset(STI, Offset, /* IsBuffer */ true); |
4760 | if (!EncodedOffset) |
4761 | return std::nullopt; |
4762 | |
4763 | assert(MRI->getType(SOffset) == LLT::scalar(32))(static_cast <bool> (MRI->getType(SOffset) == LLT::scalar (32)) ? void (0) : __assert_fail ("MRI->getType(SOffset) == LLT::scalar(32)" , "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp", 4763 , __extension__ __PRETTY_FUNCTION__)); |
4764 | return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(SOffset); }, |
4765 | [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedOffset); }}}; |
4766 | } |
4767 | |
4768 | // Variant of stripBitCast that returns the instruction instead of a |
4769 | // MachineOperand. |
4770 | static MachineInstr *stripBitCast(MachineInstr *MI, MachineRegisterInfo &MRI) { |
4771 | if (MI->getOpcode() == AMDGPU::G_BITCAST) |
4772 | return getDefIgnoringCopies(MI->getOperand(1).getReg(), MRI); |
4773 | return MI; |
4774 | } |
4775 | |
4776 | // Figure out if this is really an extract of the high 16-bits of a dword, |
4777 | // returns nullptr if it isn't. |
4778 | static MachineInstr *isExtractHiElt(MachineInstr *Inst, |
4779 | MachineRegisterInfo &MRI) { |
4780 | Inst = stripBitCast(Inst, MRI); |
4781 | |
4782 | if (Inst->getOpcode() != AMDGPU::G_TRUNC) |
4783 | return nullptr; |
4784 | |
4785 | MachineInstr *TruncOp = |
4786 | getDefIgnoringCopies(Inst->getOperand(1).getReg(), MRI); |
4787 | TruncOp = stripBitCast(TruncOp, MRI); |
4788 | |
4789 | // G_LSHR x, (G_CONSTANT i32 16) |
4790 | if (TruncOp->getOpcode() == AMDGPU::G_LSHR) { |
4791 | auto SrlAmount = getIConstantVRegValWithLookThrough( |
4792 | TruncOp->getOperand(2).getReg(), MRI); |
4793 | if (SrlAmount && SrlAmount->Value.getZExtValue() == 16) { |
4794 | MachineInstr *SrlOp = |
4795 | getDefIgnoringCopies(TruncOp->getOperand(1).getReg(), MRI); |
4796 | return stripBitCast(SrlOp, MRI); |
4797 | } |
4798 | } |
4799 | |
4800 | // G_SHUFFLE_VECTOR x, y, shufflemask(1, 1|0) |
4801 | // 1, 0 swaps the low/high 16 bits. |
4802 | // 1, 1 sets the high 16 bits to be the same as the low 16. |
4803 | // in any case, it selects the high elts. |
4804 | if (TruncOp->getOpcode() == AMDGPU::G_SHUFFLE_VECTOR) { |
4805 | assert(MRI.getType(TruncOp->getOperand(0).getReg()) ==(static_cast <bool> (MRI.getType(TruncOp->getOperand (0).getReg()) == LLT::fixed_vector(2, 16)) ? void (0) : __assert_fail ("MRI.getType(TruncOp->getOperand(0).getReg()) == LLT::fixed_vector(2, 16)" , "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp", 4806 , __extension__ __PRETTY_FUNCTION__)) |
4806 | LLT::fixed_vector(2, 16))(static_cast <bool> (MRI.getType(TruncOp->getOperand (0).getReg()) == LLT::fixed_vector(2, 16)) ? void (0) : __assert_fail ("MRI.getType(TruncOp->getOperand(0).getReg()) == LLT::fixed_vector(2, 16)" , "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp", 4806 , __extension__ __PRETTY_FUNCTION__)); |
4807 | |
4808 | ArrayRef<int> Mask = TruncOp->getOperand(3).getShuffleMask(); |
4809 | assert(Mask.size() == 2)(static_cast <bool> (Mask.size() == 2) ? void (0) : __assert_fail ("Mask.size() == 2", "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp" , 4809, __extension__ __PRETTY_FUNCTION__)); |
4810 | |
4811 | if (Mask[0] == 1 && Mask[1] <= 1) { |
4812 | MachineInstr *LHS = |
4813 | getDefIgnoringCopies(TruncOp->getOperand(1).getReg(), MRI); |
4814 | return stripBitCast(LHS, MRI); |
4815 | } |
4816 | } |
4817 | |
4818 | return nullptr; |
4819 | } |
4820 | |
4821 | std::pair<Register, unsigned> |
4822 | AMDGPUInstructionSelector::selectVOP3PMadMixModsImpl(MachineOperand &Root, |
4823 | bool &Matched) const { |
4824 | Matched = false; |
4825 | |
4826 | Register Src; |
4827 | unsigned Mods; |
4828 | std::tie(Src, Mods) = selectVOP3ModsImpl(Root); |
4829 | |
4830 | MachineInstr *MI = getDefIgnoringCopies(Src, *MRI); |
4831 | if (MI->getOpcode() == AMDGPU::G_FPEXT) { |
4832 | MachineOperand *MO = &MI->getOperand(1); |
4833 | Src = MO->getReg(); |
4834 | MI = getDefIgnoringCopies(Src, *MRI); |
4835 | |
4836 | assert(MRI->getType(Src) == LLT::scalar(16))(static_cast <bool> (MRI->getType(Src) == LLT::scalar (16)) ? void (0) : __assert_fail ("MRI->getType(Src) == LLT::scalar(16)" , "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp", 4836 , __extension__ __PRETTY_FUNCTION__)); |
4837 | |
4838 | // See through bitcasts. |
4839 | // FIXME: Would be nice to use stripBitCast here. |
4840 | if (MI->getOpcode() == AMDGPU::G_BITCAST) { |
4841 | MO = &MI->getOperand(1); |
4842 | Src = MO->getReg(); |
4843 | MI = getDefIgnoringCopies(Src, *MRI); |
4844 | } |
4845 | |
4846 | const auto CheckAbsNeg = [&]() { |
4847 | // Be careful about folding modifiers if we already have an abs. fneg is |
4848 | // applied last, so we don't want to apply an earlier fneg. |
4849 | if ((Mods & SISrcMods::ABS) == 0) { |
4850 | unsigned ModsTmp; |
4851 | std::tie(Src, ModsTmp) = selectVOP3ModsImpl(*MO); |
4852 | MI = getDefIgnoringCopies(Src, *MRI); |
4853 | |
4854 | if ((ModsTmp & SISrcMods::NEG) != 0) |
4855 | Mods ^= SISrcMods::NEG; |
4856 | |
4857 | if ((ModsTmp & SISrcMods::ABS) != 0) |
4858 | Mods |= SISrcMods::ABS; |
4859 | } |
4860 | }; |
4861 | |
4862 | CheckAbsNeg(); |
4863 | |
4864 | // op_sel/op_sel_hi decide the source type and source. |
4865 | // If the source's op_sel_hi is set, it indicates to do a conversion from |
4866 | // fp16. If the sources's op_sel is set, it picks the high half of the |
4867 | // source register. |
4868 | |
4869 | Mods |= SISrcMods::OP_SEL_1; |
4870 | |
4871 | if (MachineInstr *ExtractHiEltMI = isExtractHiElt(MI, *MRI)) { |
4872 | Mods |= SISrcMods::OP_SEL_0; |
4873 | MI = ExtractHiEltMI; |
4874 | MO = &MI->getOperand(0); |
4875 | Src = MO->getReg(); |
4876 | |
4877 | CheckAbsNeg(); |
4878 | } |
4879 | |
4880 | Matched = true; |
4881 | } |
4882 | |
4883 | return {Src, Mods}; |
4884 | } |
4885 | |
4886 | InstructionSelector::ComplexRendererFns |
4887 | AMDGPUInstructionSelector::selectVOP3PMadMixModsExt( |
4888 | MachineOperand &Root) const { |
4889 | Register Src; |
4890 | unsigned Mods; |
4891 | bool Matched; |
4892 | std::tie(Src, Mods) = selectVOP3PMadMixModsImpl(Root, Matched); |
4893 | if (!Matched) |
4894 | return {}; |
4895 | |
4896 | return {{ |
4897 | [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); }, |
4898 | [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); } // src_mods |
4899 | }}; |
4900 | } |
4901 | |
4902 | InstructionSelector::ComplexRendererFns |
4903 | AMDGPUInstructionSelector::selectVOP3PMadMixMods(MachineOperand &Root) const { |
4904 | Register Src; |
4905 | unsigned Mods; |
4906 | bool Matched; |
4907 | std::tie(Src, Mods) = selectVOP3PMadMixModsImpl(Root, Matched); |
4908 | |
4909 | return {{ |
4910 | [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); }, |
4911 | [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); } // src_mods |
4912 | }}; |
4913 | } |
4914 | |
4915 | void AMDGPUInstructionSelector::renderTruncImm32(MachineInstrBuilder &MIB, |
4916 | const MachineInstr &MI, |
4917 | int OpIdx) const { |
4918 | assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 && "Expected G_CONSTANT") ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 && \"Expected G_CONSTANT\"" , "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp", 4919 , __extension__ __PRETTY_FUNCTION__)) |
4919 | "Expected G_CONSTANT")(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 && "Expected G_CONSTANT") ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 && \"Expected G_CONSTANT\"" , "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp", 4919 , __extension__ __PRETTY_FUNCTION__)); |
4920 | MIB.addImm(MI.getOperand(1).getCImm()->getSExtValue()); |
4921 | } |
4922 | |
4923 | void AMDGPUInstructionSelector::renderNegateImm(MachineInstrBuilder &MIB, |
4924 | const MachineInstr &MI, |
4925 | int OpIdx) const { |
4926 | assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 && "Expected G_CONSTANT") ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 && \"Expected G_CONSTANT\"" , "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp", 4927 , __extension__ __PRETTY_FUNCTION__)) |
4927 | "Expected G_CONSTANT")(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 && "Expected G_CONSTANT") ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 && \"Expected G_CONSTANT\"" , "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp", 4927 , __extension__ __PRETTY_FUNCTION__)); |
4928 | MIB.addImm(-MI.getOperand(1).getCImm()->getSExtValue()); |
4929 | } |
4930 | |
4931 | void AMDGPUInstructionSelector::renderBitcastImm(MachineInstrBuilder &MIB, |
4932 | const MachineInstr &MI, |
4933 | int OpIdx) const { |
4934 | assert(OpIdx == -1)(static_cast <bool> (OpIdx == -1) ? void (0) : __assert_fail ("OpIdx == -1", "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp" , 4934, __extension__ __PRETTY_FUNCTION__)); |
4935 | |
4936 | const MachineOperand &Op = MI.getOperand(1); |
4937 | if (MI.getOpcode() == TargetOpcode::G_FCONSTANT) |
4938 | MIB.addImm(Op.getFPImm()->getValueAPF().bitcastToAPInt().getZExtValue()); |
4939 | else { |
4940 | assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && "Expected G_CONSTANT")(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_CONSTANT && "Expected G_CONSTANT") ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_CONSTANT && \"Expected G_CONSTANT\"" , "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp", 4940 , __extension__ __PRETTY_FUNCTION__)); |
4941 | MIB.addImm(Op.getCImm()->getSExtValue()); |
4942 | } |
4943 | } |
4944 | |
4945 | void AMDGPUInstructionSelector::renderPopcntImm(MachineInstrBuilder &MIB, |
4946 | const MachineInstr &MI, |
4947 | int OpIdx) const { |
4948 | assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 && "Expected G_CONSTANT") ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 && \"Expected G_CONSTANT\"" , "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp", 4949 , __extension__ __PRETTY_FUNCTION__)) |
4949 | "Expected G_CONSTANT")(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 && "Expected G_CONSTANT") ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 && \"Expected G_CONSTANT\"" , "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp", 4949 , __extension__ __PRETTY_FUNCTION__)); |
4950 | MIB.addImm(MI.getOperand(1).getCImm()->getValue().popcount()); |
4951 | } |
4952 | |
4953 | /// This only really exists to satisfy DAG type checking machinery, so is a |
4954 | /// no-op here. |
4955 | void AMDGPUInstructionSelector::renderTruncTImm(MachineInstrBuilder &MIB, |
4956 | const MachineInstr &MI, |
4957 | int OpIdx) const { |
4958 | MIB.addImm(MI.getOperand(OpIdx).getImm()); |
4959 | } |
4960 | |
4961 | void AMDGPUInstructionSelector::renderOpSelTImm(MachineInstrBuilder &MIB, |
4962 | const MachineInstr &MI, |
4963 | int OpIdx) const { |
4964 | assert(OpIdx >= 0 && "expected to match an immediate operand")(static_cast <bool> (OpIdx >= 0 && "expected to match an immediate operand" ) ? void (0) : __assert_fail ("OpIdx >= 0 && \"expected to match an immediate operand\"" , "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp", 4964 , __extension__ __PRETTY_FUNCTION__)); |
4965 | MIB.addImm(MI.getOperand(OpIdx).getImm() ? SISrcMods::OP_SEL_0 : 0); |
4966 | } |
4967 | |
4968 | void AMDGPUInstructionSelector::renderExtractCPol(MachineInstrBuilder &MIB, |
4969 | const MachineInstr &MI, |
4970 | int OpIdx) const { |
4971 | assert(OpIdx >= 0 && "expected to match an immediate operand")(static_cast <bool> (OpIdx >= 0 && "expected to match an immediate operand" ) ? void (0) : __assert_fail ("OpIdx >= 0 && \"expected to match an immediate operand\"" , "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp", 4971 , __extension__ __PRETTY_FUNCTION__)); |
4972 | MIB.addImm(MI.getOperand(OpIdx).getImm() & AMDGPU::CPol::ALL); |
4973 | } |
4974 | |
4975 | void AMDGPUInstructionSelector::renderExtractSWZ(MachineInstrBuilder &MIB, |
4976 | const MachineInstr &MI, |
4977 | int OpIdx) const { |
4978 | assert(OpIdx >= 0 && "expected to match an immediate operand")(static_cast <bool> (OpIdx >= 0 && "expected to match an immediate operand" ) ? void (0) : __assert_fail ("OpIdx >= 0 && \"expected to match an immediate operand\"" , "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp", 4978 , __extension__ __PRETTY_FUNCTION__)); |
4979 | MIB.addImm((MI.getOperand(OpIdx).getImm() >> 3) & 1); |
4980 | } |
4981 | |
4982 | void AMDGPUInstructionSelector::renderSetGLC(MachineInstrBuilder &MIB, |
4983 | const MachineInstr &MI, |
4984 | int OpIdx) const { |
4985 | assert(OpIdx >= 0 && "expected to match an immediate operand")(static_cast <bool> (OpIdx >= 0 && "expected to match an immediate operand" ) ? void (0) : __assert_fail ("OpIdx >= 0 && \"expected to match an immediate operand\"" , "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp", 4985 , __extension__ __PRETTY_FUNCTION__)); |
4986 | MIB.addImm(MI.getOperand(OpIdx).getImm() | AMDGPU::CPol::GLC); |
4987 | } |
4988 | |
4989 | void AMDGPUInstructionSelector::renderFrameIndex(MachineInstrBuilder &MIB, |
4990 | const MachineInstr &MI, |
4991 | int OpIdx) const { |
4992 | MIB.addFrameIndex((MI.getOperand(1).getIndex())); |
4993 | } |
4994 | |
4995 | bool AMDGPUInstructionSelector::isInlineImmediate16(int64_t Imm) const { |
4996 | return AMDGPU::isInlinableLiteral16(Imm, STI.hasInv2PiInlineImm()); |
4997 | } |
4998 | |
4999 | bool AMDGPUInstructionSelector::isInlineImmediate32(int64_t Imm) const { |
5000 | return AMDGPU::isInlinableLiteral32(Imm, STI.hasInv2PiInlineImm()); |
5001 | } |
5002 | |
5003 | bool AMDGPUInstructionSelector::isInlineImmediate64(int64_t Imm) const { |
5004 | return AMDGPU::isInlinableLiteral64(Imm, STI.hasInv2PiInlineImm()); |
5005 | } |
5006 | |
5007 | bool AMDGPUInstructionSelector::isInlineImmediate(const APFloat &Imm) const { |
5008 | return TII.isInlineConstant(Imm); |
5009 | } |