Bug Summary

File:build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
Warning:line 3715, column 5
Value stored to 'MI' is never read

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name AMDGPUInstructionSelector.cpp -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -ffp-contract=on -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/build-llvm/tools/clang/stage2-bins -resource-dir /usr/lib/llvm-16/lib/clang/16.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I lib/Target/AMDGPU -I /build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/llvm/lib/Target/AMDGPU -I include -I /build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/llvm/include -D _FORTIFY_SOURCE=2 -D NDEBUG -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-16/lib/clang/16.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -fmacro-prefix-map=/build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/build-llvm/tools/clang/stage2-bins=build-llvm/tools/clang/stage2-bins -fmacro-prefix-map=/build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/= -fcoverage-prefix-map=/build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/build-llvm/tools/clang/stage2-bins=build-llvm/tools/clang/stage2-bins -fcoverage-prefix-map=/build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/= -O2 -Wno-unused-command-line-argument -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -Wno-misleading-indentation -std=c++17 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/build-llvm/tools/clang/stage2-bins -fdebug-prefix-map=/build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/build-llvm/tools/clang/stage2-bins=build-llvm/tools/clang/stage2-bins -fdebug-prefix-map=/build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/= -ferror-limit 19 -fvisibility=hidden -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -fcolor-diagnostics -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2022-09-04-125545-48738-1 -x c++ /build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
1//===- AMDGPUInstructionSelector.cpp ----------------------------*- C++ -*-==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file implements the targeting of the InstructionSelector class for
10/// AMDGPU.
11/// \todo This should be generated by TableGen.
12//===----------------------------------------------------------------------===//
13
14#include "AMDGPUInstructionSelector.h"
15#include "AMDGPU.h"
16#include "AMDGPUGlobalISelUtils.h"
17#include "AMDGPUInstrInfo.h"
18#include "AMDGPURegisterBankInfo.h"
19#include "AMDGPUTargetMachine.h"
20#include "SIMachineFunctionInfo.h"
21#include "Utils/AMDGPUBaseInfo.h"
22#include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
23#include "llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h"
24#include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
25#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
26#include "llvm/CodeGen/MachineFrameInfo.h"
27#include "llvm/IR/DiagnosticInfo.h"
28#include "llvm/IR/IntrinsicsAMDGPU.h"
29
30#define DEBUG_TYPE"amdgpu-isel" "amdgpu-isel"
31
32using namespace llvm;
33using namespace MIPatternMatch;
34
35static cl::opt<bool> AllowRiskySelect(
36 "amdgpu-global-isel-risky-select",
37 cl::desc("Allow GlobalISel to select cases that are likely to not work yet"),
38 cl::init(false),
39 cl::ReallyHidden);
40
41#define GET_GLOBALISEL_IMPL
42#define AMDGPUSubtarget GCNSubtarget
43#include "AMDGPUGenGlobalISel.inc"
44#undef GET_GLOBALISEL_IMPL
45#undef AMDGPUSubtarget
46
47AMDGPUInstructionSelector::AMDGPUInstructionSelector(
48 const GCNSubtarget &STI, const AMDGPURegisterBankInfo &RBI,
49 const AMDGPUTargetMachine &TM)
50 : TII(*STI.getInstrInfo()), TRI(*STI.getRegisterInfo()), RBI(RBI), TM(TM),
51 STI(STI),
52 EnableLateStructurizeCFG(AMDGPUTargetMachine::EnableLateStructurizeCFG),
53#define GET_GLOBALISEL_PREDICATES_INIT
54#include "AMDGPUGenGlobalISel.inc"
55#undef GET_GLOBALISEL_PREDICATES_INIT
56#define GET_GLOBALISEL_TEMPORARIES_INIT
57#include "AMDGPUGenGlobalISel.inc"
58#undef GET_GLOBALISEL_TEMPORARIES_INIT
59{
60}
61
62const char *AMDGPUInstructionSelector::getName() { return DEBUG_TYPE"amdgpu-isel"; }
63
64void AMDGPUInstructionSelector::setupMF(MachineFunction &MF, GISelKnownBits *KB,
65 CodeGenCoverage &CoverageInfo,
66 ProfileSummaryInfo *PSI,
67 BlockFrequencyInfo *BFI) {
68 MRI = &MF.getRegInfo();
69 Subtarget = &MF.getSubtarget<GCNSubtarget>();
70 InstructionSelector::setupMF(MF, KB, CoverageInfo, PSI, BFI);
71}
72
73bool AMDGPUInstructionSelector::isVCC(Register Reg,
74 const MachineRegisterInfo &MRI) const {
75 // The verifier is oblivious to s1 being a valid value for wavesize registers.
76 if (Reg.isPhysical())
77 return false;
78
79 auto &RegClassOrBank = MRI.getRegClassOrRegBank(Reg);
80 const TargetRegisterClass *RC =
81 RegClassOrBank.dyn_cast<const TargetRegisterClass*>();
82 if (RC) {
83 const LLT Ty = MRI.getType(Reg);
84 if (!Ty.isValid() || Ty.getSizeInBits() != 1)
85 return false;
86 // G_TRUNC s1 result is never vcc.
87 return MRI.getVRegDef(Reg)->getOpcode() != AMDGPU::G_TRUNC &&
88 RC->hasSuperClassEq(TRI.getBoolRC());
89 }
90
91 const RegisterBank *RB = RegClassOrBank.get<const RegisterBank *>();
92 return RB->getID() == AMDGPU::VCCRegBankID;
93}
94
95bool AMDGPUInstructionSelector::constrainCopyLikeIntrin(MachineInstr &MI,
96 unsigned NewOpc) const {
97 MI.setDesc(TII.get(NewOpc));
98 MI.removeOperand(1); // Remove intrinsic ID.
99 MI.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
100
101 MachineOperand &Dst = MI.getOperand(0);
102 MachineOperand &Src = MI.getOperand(1);
103
104 // TODO: This should be legalized to s32 if needed
105 if (MRI->getType(Dst.getReg()) == LLT::scalar(1))
106 return false;
107
108 const TargetRegisterClass *DstRC
109 = TRI.getConstrainedRegClassForOperand(Dst, *MRI);
110 const TargetRegisterClass *SrcRC
111 = TRI.getConstrainedRegClassForOperand(Src, *MRI);
112 if (!DstRC || DstRC != SrcRC)
113 return false;
114
115 return RBI.constrainGenericRegister(Dst.getReg(), *DstRC, *MRI) &&
116 RBI.constrainGenericRegister(Src.getReg(), *SrcRC, *MRI);
117}
118
119bool AMDGPUInstructionSelector::selectCOPY(MachineInstr &I) const {
120 const DebugLoc &DL = I.getDebugLoc();
121 MachineBasicBlock *BB = I.getParent();
122 I.setDesc(TII.get(TargetOpcode::COPY));
123
124 const MachineOperand &Src = I.getOperand(1);
125 MachineOperand &Dst = I.getOperand(0);
126 Register DstReg = Dst.getReg();
127 Register SrcReg = Src.getReg();
128
129 if (isVCC(DstReg, *MRI)) {
130 if (SrcReg == AMDGPU::SCC) {
131 const TargetRegisterClass *RC
132 = TRI.getConstrainedRegClassForOperand(Dst, *MRI);
133 if (!RC)
134 return true;
135 return RBI.constrainGenericRegister(DstReg, *RC, *MRI);
136 }
137
138 if (!isVCC(SrcReg, *MRI)) {
139 // TODO: Should probably leave the copy and let copyPhysReg expand it.
140 if (!RBI.constrainGenericRegister(DstReg, *TRI.getBoolRC(), *MRI))
141 return false;
142
143 const TargetRegisterClass *SrcRC
144 = TRI.getConstrainedRegClassForOperand(Src, *MRI);
145
146 Optional<ValueAndVReg> ConstVal =
147 getIConstantVRegValWithLookThrough(SrcReg, *MRI, true);
148 if (ConstVal) {
149 unsigned MovOpc =
150 STI.isWave64() ? AMDGPU::S_MOV_B64 : AMDGPU::S_MOV_B32;
151 BuildMI(*BB, &I, DL, TII.get(MovOpc), DstReg)
152 .addImm(ConstVal->Value.getBoolValue() ? -1 : 0);
153 } else {
154 Register MaskedReg = MRI->createVirtualRegister(SrcRC);
155
156 // We can't trust the high bits at this point, so clear them.
157
158 // TODO: Skip masking high bits if def is known boolean.
159
160 unsigned AndOpc =
161 TRI.isSGPRClass(SrcRC) ? AMDGPU::S_AND_B32 : AMDGPU::V_AND_B32_e32;
162 BuildMI(*BB, &I, DL, TII.get(AndOpc), MaskedReg)
163 .addImm(1)
164 .addReg(SrcReg);
165 BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_CMP_NE_U32_e64), DstReg)
166 .addImm(0)
167 .addReg(MaskedReg);
168 }
169
170 if (!MRI->getRegClassOrNull(SrcReg))
171 MRI->setRegClass(SrcReg, SrcRC);
172 I.eraseFromParent();
173 return true;
174 }
175
176 const TargetRegisterClass *RC =
177 TRI.getConstrainedRegClassForOperand(Dst, *MRI);
178 if (RC && !RBI.constrainGenericRegister(DstReg, *RC, *MRI))
179 return false;
180
181 return true;
182 }
183
184 for (const MachineOperand &MO : I.operands()) {
185 if (MO.getReg().isPhysical())
186 continue;
187
188 const TargetRegisterClass *RC =
189 TRI.getConstrainedRegClassForOperand(MO, *MRI);
190 if (!RC)
191 continue;
192 RBI.constrainGenericRegister(MO.getReg(), *RC, *MRI);
193 }
194 return true;
195}
196
197bool AMDGPUInstructionSelector::selectPHI(MachineInstr &I) const {
198 const Register DefReg = I.getOperand(0).getReg();
199 const LLT DefTy = MRI->getType(DefReg);
200 if (DefTy == LLT::scalar(1)) {
201 if (!AllowRiskySelect) {
202 LLVM_DEBUG(dbgs() << "Skipping risky boolean phi\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-isel")) { dbgs() << "Skipping risky boolean phi\n"
; } } while (false)
;
203 return false;
204 }
205
206 LLVM_DEBUG(dbgs() << "Selecting risky boolean phi\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-isel")) { dbgs() << "Selecting risky boolean phi\n"
; } } while (false)
;
207 }
208
209 // TODO: Verify this doesn't have insane operands (i.e. VGPR to SGPR copy)
210
211 const RegClassOrRegBank &RegClassOrBank =
212 MRI->getRegClassOrRegBank(DefReg);
213
214 const TargetRegisterClass *DefRC
215 = RegClassOrBank.dyn_cast<const TargetRegisterClass *>();
216 if (!DefRC) {
217 if (!DefTy.isValid()) {
218 LLVM_DEBUG(dbgs() << "PHI operand has no type, not a gvreg?\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-isel")) { dbgs() << "PHI operand has no type, not a gvreg?\n"
; } } while (false)
;
219 return false;
220 }
221
222 const RegisterBank &RB = *RegClassOrBank.get<const RegisterBank *>();
223 DefRC = TRI.getRegClassForTypeOnBank(DefTy, RB);
224 if (!DefRC) {
225 LLVM_DEBUG(dbgs() << "PHI operand has unexpected size/bank\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-isel")) { dbgs() << "PHI operand has unexpected size/bank\n"
; } } while (false)
;
226 return false;
227 }
228 }
229
230 // TODO: Verify that all registers have the same bank
231 I.setDesc(TII.get(TargetOpcode::PHI));
232 return RBI.constrainGenericRegister(DefReg, *DefRC, *MRI);
233}
234
235MachineOperand
236AMDGPUInstructionSelector::getSubOperand64(MachineOperand &MO,
237 const TargetRegisterClass &SubRC,
238 unsigned SubIdx) const {
239
240 MachineInstr *MI = MO.getParent();
241 MachineBasicBlock *BB = MO.getParent()->getParent();
242 Register DstReg = MRI->createVirtualRegister(&SubRC);
243
244 if (MO.isReg()) {
245 unsigned ComposedSubIdx = TRI.composeSubRegIndices(MO.getSubReg(), SubIdx);
246 Register Reg = MO.getReg();
247 BuildMI(*BB, MI, MI->getDebugLoc(), TII.get(AMDGPU::COPY), DstReg)
248 .addReg(Reg, 0, ComposedSubIdx);
249
250 return MachineOperand::CreateReg(DstReg, MO.isDef(), MO.isImplicit(),
251 MO.isKill(), MO.isDead(), MO.isUndef(),
252 MO.isEarlyClobber(), 0, MO.isDebug(),
253 MO.isInternalRead());
254 }
255
256 assert(MO.isImm())(static_cast <bool> (MO.isImm()) ? void (0) : __assert_fail
("MO.isImm()", "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp"
, 256, __extension__ __PRETTY_FUNCTION__))
;
257
258 APInt Imm(64, MO.getImm());
259
260 switch (SubIdx) {
261 default:
262 llvm_unreachable("do not know to split immediate with this sub index.")::llvm::llvm_unreachable_internal("do not know to split immediate with this sub index."
, "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp", 262
)
;
263 case AMDGPU::sub0:
264 return MachineOperand::CreateImm(Imm.getLoBits(32).getSExtValue());
265 case AMDGPU::sub1:
266 return MachineOperand::CreateImm(Imm.getHiBits(32).getSExtValue());
267 }
268}
269
270static unsigned getLogicalBitOpcode(unsigned Opc, bool Is64) {
271 switch (Opc) {
272 case AMDGPU::G_AND:
273 return Is64 ? AMDGPU::S_AND_B64 : AMDGPU::S_AND_B32;
274 case AMDGPU::G_OR:
275 return Is64 ? AMDGPU::S_OR_B64 : AMDGPU::S_OR_B32;
276 case AMDGPU::G_XOR:
277 return Is64 ? AMDGPU::S_XOR_B64 : AMDGPU::S_XOR_B32;
278 default:
279 llvm_unreachable("not a bit op")::llvm::llvm_unreachable_internal("not a bit op", "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp"
, 279)
;
280 }
281}
282
283bool AMDGPUInstructionSelector::selectG_AND_OR_XOR(MachineInstr &I) const {
284 Register DstReg = I.getOperand(0).getReg();
285 unsigned Size = RBI.getSizeInBits(DstReg, *MRI, TRI);
286
287 const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
288 if (DstRB->getID() != AMDGPU::SGPRRegBankID &&
289 DstRB->getID() != AMDGPU::VCCRegBankID)
290 return false;
291
292 bool Is64 = Size > 32 || (DstRB->getID() == AMDGPU::VCCRegBankID &&
293 STI.isWave64());
294 I.setDesc(TII.get(getLogicalBitOpcode(I.getOpcode(), Is64)));
295
296 // Dead implicit-def of scc
297 I.addOperand(MachineOperand::CreateReg(AMDGPU::SCC, true, // isDef
298 true, // isImp
299 false, // isKill
300 true)); // isDead
301 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
302}
303
304bool AMDGPUInstructionSelector::selectG_ADD_SUB(MachineInstr &I) const {
305 MachineBasicBlock *BB = I.getParent();
306 MachineFunction *MF = BB->getParent();
307 Register DstReg = I.getOperand(0).getReg();
308 const DebugLoc &DL = I.getDebugLoc();
309 LLT Ty = MRI->getType(DstReg);
310 if (Ty.isVector())
311 return false;
312
313 unsigned Size = Ty.getSizeInBits();
314 const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
315 const bool IsSALU = DstRB->getID() == AMDGPU::SGPRRegBankID;
316 const bool Sub = I.getOpcode() == TargetOpcode::G_SUB;
317
318 if (Size == 32) {
319 if (IsSALU) {
320 const unsigned Opc = Sub ? AMDGPU::S_SUB_U32 : AMDGPU::S_ADD_U32;
321 MachineInstr *Add =
322 BuildMI(*BB, &I, DL, TII.get(Opc), DstReg)
323 .add(I.getOperand(1))
324 .add(I.getOperand(2));
325 I.eraseFromParent();
326 return constrainSelectedInstRegOperands(*Add, TII, TRI, RBI);
327 }
328
329 if (STI.hasAddNoCarry()) {
330 const unsigned Opc = Sub ? AMDGPU::V_SUB_U32_e64 : AMDGPU::V_ADD_U32_e64;
331 I.setDesc(TII.get(Opc));
332 I.addOperand(*MF, MachineOperand::CreateImm(0));
333 I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
334 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
335 }
336
337 const unsigned Opc = Sub ? AMDGPU::V_SUB_CO_U32_e64 : AMDGPU::V_ADD_CO_U32_e64;
338
339 Register UnusedCarry = MRI->createVirtualRegister(TRI.getWaveMaskRegClass());
340 MachineInstr *Add
341 = BuildMI(*BB, &I, DL, TII.get(Opc), DstReg)
342 .addDef(UnusedCarry, RegState::Dead)
343 .add(I.getOperand(1))
344 .add(I.getOperand(2))
345 .addImm(0);
346 I.eraseFromParent();
347 return constrainSelectedInstRegOperands(*Add, TII, TRI, RBI);
348 }
349
350 assert(!Sub && "illegal sub should not reach here")(static_cast <bool> (!Sub && "illegal sub should not reach here"
) ? void (0) : __assert_fail ("!Sub && \"illegal sub should not reach here\""
, "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp", 350
, __extension__ __PRETTY_FUNCTION__))
;
351
352 const TargetRegisterClass &RC
353 = IsSALU ? AMDGPU::SReg_64_XEXECRegClass : AMDGPU::VReg_64RegClass;
354 const TargetRegisterClass &HalfRC
355 = IsSALU ? AMDGPU::SReg_32RegClass : AMDGPU::VGPR_32RegClass;
356
357 MachineOperand Lo1(getSubOperand64(I.getOperand(1), HalfRC, AMDGPU::sub0));
358 MachineOperand Lo2(getSubOperand64(I.getOperand(2), HalfRC, AMDGPU::sub0));
359 MachineOperand Hi1(getSubOperand64(I.getOperand(1), HalfRC, AMDGPU::sub1));
360 MachineOperand Hi2(getSubOperand64(I.getOperand(2), HalfRC, AMDGPU::sub1));
361
362 Register DstLo = MRI->createVirtualRegister(&HalfRC);
363 Register DstHi = MRI->createVirtualRegister(&HalfRC);
364
365 if (IsSALU) {
366 BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADD_U32), DstLo)
367 .add(Lo1)
368 .add(Lo2);
369 BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADDC_U32), DstHi)
370 .add(Hi1)
371 .add(Hi2);
372 } else {
373 const TargetRegisterClass *CarryRC = TRI.getWaveMaskRegClass();
374 Register CarryReg = MRI->createVirtualRegister(CarryRC);
375 BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_ADD_CO_U32_e64), DstLo)
376 .addDef(CarryReg)
377 .add(Lo1)
378 .add(Lo2)
379 .addImm(0);
380 MachineInstr *Addc = BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_ADDC_U32_e64), DstHi)
381 .addDef(MRI->createVirtualRegister(CarryRC), RegState::Dead)
382 .add(Hi1)
383 .add(Hi2)
384 .addReg(CarryReg, RegState::Kill)
385 .addImm(0);
386
387 if (!constrainSelectedInstRegOperands(*Addc, TII, TRI, RBI))
388 return false;
389 }
390
391 BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
392 .addReg(DstLo)
393 .addImm(AMDGPU::sub0)
394 .addReg(DstHi)
395 .addImm(AMDGPU::sub1);
396
397
398 if (!RBI.constrainGenericRegister(DstReg, RC, *MRI))
399 return false;
400
401 I.eraseFromParent();
402 return true;
403}
404
405bool AMDGPUInstructionSelector::selectG_UADDO_USUBO_UADDE_USUBE(
406 MachineInstr &I) const {
407 MachineBasicBlock *BB = I.getParent();
408 MachineFunction *MF = BB->getParent();
409 const DebugLoc &DL = I.getDebugLoc();
410 Register Dst0Reg = I.getOperand(0).getReg();
411 Register Dst1Reg = I.getOperand(1).getReg();
412 const bool IsAdd = I.getOpcode() == AMDGPU::G_UADDO ||
413 I.getOpcode() == AMDGPU::G_UADDE;
414 const bool HasCarryIn = I.getOpcode() == AMDGPU::G_UADDE ||
415 I.getOpcode() == AMDGPU::G_USUBE;
416
417 if (isVCC(Dst1Reg, *MRI)) {
418 unsigned NoCarryOpc =
419 IsAdd ? AMDGPU::V_ADD_CO_U32_e64 : AMDGPU::V_SUB_CO_U32_e64;
420 unsigned CarryOpc = IsAdd ? AMDGPU::V_ADDC_U32_e64 : AMDGPU::V_SUBB_U32_e64;
421 I.setDesc(TII.get(HasCarryIn ? CarryOpc : NoCarryOpc));
422 I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
423 I.addOperand(*MF, MachineOperand::CreateImm(0));
424 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
425 }
426
427 Register Src0Reg = I.getOperand(2).getReg();
428 Register Src1Reg = I.getOperand(3).getReg();
429
430 if (HasCarryIn) {
431 BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), AMDGPU::SCC)
432 .addReg(I.getOperand(4).getReg());
433 }
434
435 unsigned NoCarryOpc = IsAdd ? AMDGPU::S_ADD_U32 : AMDGPU::S_SUB_U32;
436 unsigned CarryOpc = IsAdd ? AMDGPU::S_ADDC_U32 : AMDGPU::S_SUBB_U32;
437
438 BuildMI(*BB, &I, DL, TII.get(HasCarryIn ? CarryOpc : NoCarryOpc), Dst0Reg)
439 .add(I.getOperand(2))
440 .add(I.getOperand(3));
441 BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), Dst1Reg)
442 .addReg(AMDGPU::SCC);
443
444 if (!MRI->getRegClassOrNull(Dst1Reg))
445 MRI->setRegClass(Dst1Reg, &AMDGPU::SReg_32RegClass);
446
447 if (!RBI.constrainGenericRegister(Dst0Reg, AMDGPU::SReg_32RegClass, *MRI) ||
448 !RBI.constrainGenericRegister(Src0Reg, AMDGPU::SReg_32RegClass, *MRI) ||
449 !RBI.constrainGenericRegister(Src1Reg, AMDGPU::SReg_32RegClass, *MRI))
450 return false;
451
452 if (HasCarryIn &&
453 !RBI.constrainGenericRegister(I.getOperand(4).getReg(),
454 AMDGPU::SReg_32RegClass, *MRI))
455 return false;
456
457 I.eraseFromParent();
458 return true;
459}
460
461bool AMDGPUInstructionSelector::selectG_AMDGPU_MAD_64_32(
462 MachineInstr &I) const {
463 MachineBasicBlock *BB = I.getParent();
464 MachineFunction *MF = BB->getParent();
465 const bool IsUnsigned = I.getOpcode() == AMDGPU::G_AMDGPU_MAD_U64_U32;
466
467 unsigned Opc;
468 if (Subtarget->getGeneration() == AMDGPUSubtarget::GFX11)
469 Opc = IsUnsigned ? AMDGPU::V_MAD_U64_U32_gfx11_e64
470 : AMDGPU::V_MAD_I64_I32_gfx11_e64;
471 else
472 Opc = IsUnsigned ? AMDGPU::V_MAD_U64_U32_e64 : AMDGPU::V_MAD_I64_I32_e64;
473 I.setDesc(TII.get(Opc));
474 I.addOperand(*MF, MachineOperand::CreateImm(0));
475 I.addImplicitDefUseOperands(*MF);
476 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
477}
478
479// TODO: We should probably legalize these to only using 32-bit results.
480bool AMDGPUInstructionSelector::selectG_EXTRACT(MachineInstr &I) const {
481 MachineBasicBlock *BB = I.getParent();
482 Register DstReg = I.getOperand(0).getReg();
483 Register SrcReg = I.getOperand(1).getReg();
484 LLT DstTy = MRI->getType(DstReg);
485 LLT SrcTy = MRI->getType(SrcReg);
486 const unsigned SrcSize = SrcTy.getSizeInBits();
487 unsigned DstSize = DstTy.getSizeInBits();
488
489 // TODO: Should handle any multiple of 32 offset.
490 unsigned Offset = I.getOperand(2).getImm();
491 if (Offset % 32 != 0 || DstSize > 128)
492 return false;
493
494 // 16-bit operations really use 32-bit registers.
495 // FIXME: Probably should not allow 16-bit G_EXTRACT results.
496 if (DstSize == 16)
497 DstSize = 32;
498
499 const TargetRegisterClass *DstRC =
500 TRI.getConstrainedRegClassForOperand(I.getOperand(0), *MRI);
501 if (!DstRC || !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI))
502 return false;
503
504 const RegisterBank *SrcBank = RBI.getRegBank(SrcReg, *MRI, TRI);
505 const TargetRegisterClass *SrcRC =
506 TRI.getRegClassForSizeOnBank(SrcSize, *SrcBank);
507 if (!SrcRC)
508 return false;
509 unsigned SubReg = SIRegisterInfo::getSubRegFromChannel(Offset / 32,
510 DstSize / 32);
511 SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubReg);
512 if (!SrcRC)
513 return false;
514
515 SrcReg = constrainOperandRegClass(*MF, TRI, *MRI, TII, RBI, I,
516 *SrcRC, I.getOperand(1));
517 const DebugLoc &DL = I.getDebugLoc();
518 BuildMI(*BB, &I, DL, TII.get(TargetOpcode::COPY), DstReg)
519 .addReg(SrcReg, 0, SubReg);
520
521 I.eraseFromParent();
522 return true;
523}
524
525bool AMDGPUInstructionSelector::selectG_MERGE_VALUES(MachineInstr &MI) const {
526 MachineBasicBlock *BB = MI.getParent();
527 Register DstReg = MI.getOperand(0).getReg();
528 LLT DstTy = MRI->getType(DstReg);
529 LLT SrcTy = MRI->getType(MI.getOperand(1).getReg());
530
531 const unsigned SrcSize = SrcTy.getSizeInBits();
532 if (SrcSize < 32)
533 return selectImpl(MI, *CoverageInfo);
534
535 const DebugLoc &DL = MI.getDebugLoc();
536 const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
537 const unsigned DstSize = DstTy.getSizeInBits();
538 const TargetRegisterClass *DstRC =
539 TRI.getRegClassForSizeOnBank(DstSize, *DstBank);
540 if (!DstRC)
541 return false;
542
543 ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(DstRC, SrcSize / 8);
544 MachineInstrBuilder MIB =
545 BuildMI(*BB, &MI, DL, TII.get(TargetOpcode::REG_SEQUENCE), DstReg);
546 for (int I = 0, E = MI.getNumOperands() - 1; I != E; ++I) {
547 MachineOperand &Src = MI.getOperand(I + 1);
548 MIB.addReg(Src.getReg(), getUndefRegState(Src.isUndef()));
549 MIB.addImm(SubRegs[I]);
550
551 const TargetRegisterClass *SrcRC
552 = TRI.getConstrainedRegClassForOperand(Src, *MRI);
553 if (SrcRC && !RBI.constrainGenericRegister(Src.getReg(), *SrcRC, *MRI))
554 return false;
555 }
556
557 if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI))
558 return false;
559
560 MI.eraseFromParent();
561 return true;
562}
563
564bool AMDGPUInstructionSelector::selectG_UNMERGE_VALUES(MachineInstr &MI) const {
565 MachineBasicBlock *BB = MI.getParent();
566 const int NumDst = MI.getNumOperands() - 1;
567
568 MachineOperand &Src = MI.getOperand(NumDst);
569
570 Register SrcReg = Src.getReg();
571 Register DstReg0 = MI.getOperand(0).getReg();
572 LLT DstTy = MRI->getType(DstReg0);
573 LLT SrcTy = MRI->getType(SrcReg);
574
575 const unsigned DstSize = DstTy.getSizeInBits();
576 const unsigned SrcSize = SrcTy.getSizeInBits();
577 const DebugLoc &DL = MI.getDebugLoc();
578 const RegisterBank *SrcBank = RBI.getRegBank(SrcReg, *MRI, TRI);
579
580 const TargetRegisterClass *SrcRC =
581 TRI.getRegClassForSizeOnBank(SrcSize, *SrcBank);
582 if (!SrcRC || !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI))
583 return false;
584
585 // Note we could have mixed SGPR and VGPR destination banks for an SGPR
586 // source, and this relies on the fact that the same subregister indices are
587 // used for both.
588 ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(SrcRC, DstSize / 8);
589 for (int I = 0, E = NumDst; I != E; ++I) {
590 MachineOperand &Dst = MI.getOperand(I);
591 BuildMI(*BB, &MI, DL, TII.get(TargetOpcode::COPY), Dst.getReg())
592 .addReg(SrcReg, 0, SubRegs[I]);
593
594 // Make sure the subregister index is valid for the source register.
595 SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubRegs[I]);
596 if (!SrcRC || !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI))
597 return false;
598
599 const TargetRegisterClass *DstRC =
600 TRI.getConstrainedRegClassForOperand(Dst, *MRI);
601 if (DstRC && !RBI.constrainGenericRegister(Dst.getReg(), *DstRC, *MRI))
602 return false;
603 }
604
605 MI.eraseFromParent();
606 return true;
607}
608
609bool AMDGPUInstructionSelector::selectG_BUILD_VECTOR_TRUNC(
610 MachineInstr &MI) const {
611 if (selectImpl(MI, *CoverageInfo))
612 return true;
613
614 const LLT S32 = LLT::scalar(32);
615 const LLT V2S16 = LLT::fixed_vector(2, 16);
616
617 Register Dst = MI.getOperand(0).getReg();
618 if (MRI->getType(Dst) != V2S16)
619 return false;
620
621 const RegisterBank *DstBank = RBI.getRegBank(Dst, *MRI, TRI);
622 if (DstBank->getID() != AMDGPU::SGPRRegBankID)
623 return false;
624
625 Register Src0 = MI.getOperand(1).getReg();
626 Register Src1 = MI.getOperand(2).getReg();
627 if (MRI->getType(Src0) != S32)
628 return false;
629
630 const DebugLoc &DL = MI.getDebugLoc();
631 MachineBasicBlock *BB = MI.getParent();
632
633 auto ConstSrc1 = getAnyConstantVRegValWithLookThrough(Src1, *MRI, true, true);
634 if (ConstSrc1) {
635 auto ConstSrc0 =
636 getAnyConstantVRegValWithLookThrough(Src0, *MRI, true, true);
637 if (ConstSrc0) {
638 const int64_t K0 = ConstSrc0->Value.getSExtValue();
639 const int64_t K1 = ConstSrc1->Value.getSExtValue();
640 uint32_t Lo16 = static_cast<uint32_t>(K0) & 0xffff;
641 uint32_t Hi16 = static_cast<uint32_t>(K1) & 0xffff;
642
643 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), Dst)
644 .addImm(Lo16 | (Hi16 << 16));
645 MI.eraseFromParent();
646 return RBI.constrainGenericRegister(Dst, AMDGPU::SReg_32RegClass, *MRI);
647 }
648 }
649
650 // TODO: This should probably be a combine somewhere
651 // (build_vector_trunc $src0, undef -> copy $src0
652 MachineInstr *Src1Def = getDefIgnoringCopies(Src1, *MRI);
653 if (Src1Def && Src1Def->getOpcode() == AMDGPU::G_IMPLICIT_DEF) {
654 MI.setDesc(TII.get(AMDGPU::COPY));
655 MI.removeOperand(2);
656 return RBI.constrainGenericRegister(Dst, AMDGPU::SReg_32RegClass, *MRI) &&
657 RBI.constrainGenericRegister(Src0, AMDGPU::SReg_32RegClass, *MRI);
658 }
659
660 Register ShiftSrc0;
661 Register ShiftSrc1;
662
663 // With multiple uses of the shift, this will duplicate the shift and
664 // increase register pressure.
665 //
666 // (build_vector_trunc (lshr_oneuse $src0, 16), (lshr_oneuse $src1, 16)
667 // => (S_PACK_HH_B32_B16 $src0, $src1)
668 // (build_vector_trunc (lshr_oneuse SReg_32:$src0, 16), $src1)
669 // => (S_PACK_HL_B32_B16 $src0, $src1)
670 // (build_vector_trunc $src0, (lshr_oneuse SReg_32:$src1, 16))
671 // => (S_PACK_LH_B32_B16 $src0, $src1)
672 // (build_vector_trunc $src0, $src1)
673 // => (S_PACK_LL_B32_B16 $src0, $src1)
674
675 bool Shift0 = mi_match(
676 Src0, *MRI, m_OneUse(m_GLShr(m_Reg(ShiftSrc0), m_SpecificICst(16))));
677
678 bool Shift1 = mi_match(
679 Src1, *MRI, m_OneUse(m_GLShr(m_Reg(ShiftSrc1), m_SpecificICst(16))));
680
681 unsigned Opc = AMDGPU::S_PACK_LL_B32_B16;
682 if (Shift0 && Shift1) {
683 Opc = AMDGPU::S_PACK_HH_B32_B16;
684 MI.getOperand(1).setReg(ShiftSrc0);
685 MI.getOperand(2).setReg(ShiftSrc1);
686 } else if (Shift1) {
687 Opc = AMDGPU::S_PACK_LH_B32_B16;
688 MI.getOperand(2).setReg(ShiftSrc1);
689 } else if (Shift0) {
690 if (ConstSrc1 && ConstSrc1->Value == 0) {
691 // build_vector_trunc (lshr $src0, 16), 0 -> s_lshr_b32 $src0, 16
692 auto MIB = BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_LSHR_B32), Dst)
693 .addReg(ShiftSrc0)
694 .addImm(16);
695
696 MI.eraseFromParent();
697 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
698 }
699 if (STI.hasSPackHL()) {
700 Opc = AMDGPU::S_PACK_HL_B32_B16;
701 MI.getOperand(1).setReg(ShiftSrc0);
702 }
703 }
704
705 MI.setDesc(TII.get(Opc));
706 return constrainSelectedInstRegOperands(MI, TII, TRI, RBI);
707}
708
709bool AMDGPUInstructionSelector::selectG_PTR_ADD(MachineInstr &I) const {
710 return selectG_ADD_SUB(I);
711}
712
713bool AMDGPUInstructionSelector::selectG_IMPLICIT_DEF(MachineInstr &I) const {
714 const MachineOperand &MO = I.getOperand(0);
715
716 // FIXME: Interface for getConstrainedRegClassForOperand needs work. The
717 // regbank check here is to know why getConstrainedRegClassForOperand failed.
718 const TargetRegisterClass *RC = TRI.getConstrainedRegClassForOperand(MO, *MRI);
719 if ((!RC && !MRI->getRegBankOrNull(MO.getReg())) ||
720 (RC && RBI.constrainGenericRegister(MO.getReg(), *RC, *MRI))) {
721 I.setDesc(TII.get(TargetOpcode::IMPLICIT_DEF));
722 return true;
723 }
724
725 return false;
726}
727
728bool AMDGPUInstructionSelector::selectG_INSERT(MachineInstr &I) const {
729 MachineBasicBlock *BB = I.getParent();
730
731 Register DstReg = I.getOperand(0).getReg();
732 Register Src0Reg = I.getOperand(1).getReg();
733 Register Src1Reg = I.getOperand(2).getReg();
734 LLT Src1Ty = MRI->getType(Src1Reg);
735
736 unsigned DstSize = MRI->getType(DstReg).getSizeInBits();
737 unsigned InsSize = Src1Ty.getSizeInBits();
738
739 int64_t Offset = I.getOperand(3).getImm();
740
741 // FIXME: These cases should have been illegal and unnecessary to check here.
742 if (Offset % 32 != 0 || InsSize % 32 != 0)
743 return false;
744
745 // Currently not handled by getSubRegFromChannel.
746 if (InsSize > 128)
747 return false;
748
749 unsigned SubReg = TRI.getSubRegFromChannel(Offset / 32, InsSize / 32);
750 if (SubReg == AMDGPU::NoSubRegister)
751 return false;
752
753 const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
754 const TargetRegisterClass *DstRC =
755 TRI.getRegClassForSizeOnBank(DstSize, *DstBank);
756 if (!DstRC)
757 return false;
758
759 const RegisterBank *Src0Bank = RBI.getRegBank(Src0Reg, *MRI, TRI);
760 const RegisterBank *Src1Bank = RBI.getRegBank(Src1Reg, *MRI, TRI);
761 const TargetRegisterClass *Src0RC =
762 TRI.getRegClassForSizeOnBank(DstSize, *Src0Bank);
763 const TargetRegisterClass *Src1RC =
764 TRI.getRegClassForSizeOnBank(InsSize, *Src1Bank);
765
766 // Deal with weird cases where the class only partially supports the subreg
767 // index.
768 Src0RC = TRI.getSubClassWithSubReg(Src0RC, SubReg);
769 if (!Src0RC || !Src1RC)
770 return false;
771
772 if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) ||
773 !RBI.constrainGenericRegister(Src0Reg, *Src0RC, *MRI) ||
774 !RBI.constrainGenericRegister(Src1Reg, *Src1RC, *MRI))
775 return false;
776
777 const DebugLoc &DL = I.getDebugLoc();
778 BuildMI(*BB, &I, DL, TII.get(TargetOpcode::INSERT_SUBREG), DstReg)
779 .addReg(Src0Reg)
780 .addReg(Src1Reg)
781 .addImm(SubReg);
782
783 I.eraseFromParent();
784 return true;
785}
786
787bool AMDGPUInstructionSelector::selectG_SBFX_UBFX(MachineInstr &MI) const {
788 Register DstReg = MI.getOperand(0).getReg();
789 Register SrcReg = MI.getOperand(1).getReg();
790 Register OffsetReg = MI.getOperand(2).getReg();
791 Register WidthReg = MI.getOperand(3).getReg();
792
793 assert(RBI.getRegBank(DstReg, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID &&(static_cast <bool> (RBI.getRegBank(DstReg, *MRI, TRI)->
getID() == AMDGPU::VGPRRegBankID && "scalar BFX instructions are expanded in regbankselect"
) ? void (0) : __assert_fail ("RBI.getRegBank(DstReg, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID && \"scalar BFX instructions are expanded in regbankselect\""
, "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp", 794
, __extension__ __PRETTY_FUNCTION__))
794 "scalar BFX instructions are expanded in regbankselect")(static_cast <bool> (RBI.getRegBank(DstReg, *MRI, TRI)->
getID() == AMDGPU::VGPRRegBankID && "scalar BFX instructions are expanded in regbankselect"
) ? void (0) : __assert_fail ("RBI.getRegBank(DstReg, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID && \"scalar BFX instructions are expanded in regbankselect\""
, "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp", 794
, __extension__ __PRETTY_FUNCTION__))
;
795 assert(MRI->getType(MI.getOperand(0).getReg()).getSizeInBits() == 32 &&(static_cast <bool> (MRI->getType(MI.getOperand(0).getReg
()).getSizeInBits() == 32 && "64-bit vector BFX instructions are expanded in regbankselect"
) ? void (0) : __assert_fail ("MRI->getType(MI.getOperand(0).getReg()).getSizeInBits() == 32 && \"64-bit vector BFX instructions are expanded in regbankselect\""
, "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp", 796
, __extension__ __PRETTY_FUNCTION__))
796 "64-bit vector BFX instructions are expanded in regbankselect")(static_cast <bool> (MRI->getType(MI.getOperand(0).getReg
()).getSizeInBits() == 32 && "64-bit vector BFX instructions are expanded in regbankselect"
) ? void (0) : __assert_fail ("MRI->getType(MI.getOperand(0).getReg()).getSizeInBits() == 32 && \"64-bit vector BFX instructions are expanded in regbankselect\""
, "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp", 796
, __extension__ __PRETTY_FUNCTION__))
;
797
798 const DebugLoc &DL = MI.getDebugLoc();
799 MachineBasicBlock *MBB = MI.getParent();
800
801 bool IsSigned = MI.getOpcode() == TargetOpcode::G_SBFX;
802 unsigned Opc = IsSigned ? AMDGPU::V_BFE_I32_e64 : AMDGPU::V_BFE_U32_e64;
803 auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc), DstReg)
804 .addReg(SrcReg)
805 .addReg(OffsetReg)
806 .addReg(WidthReg);
807 MI.eraseFromParent();
808 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
809}
810
811bool AMDGPUInstructionSelector::selectInterpP1F16(MachineInstr &MI) const {
812 if (STI.getLDSBankCount() != 16)
813 return selectImpl(MI, *CoverageInfo);
814
815 Register Dst = MI.getOperand(0).getReg();
816 Register Src0 = MI.getOperand(2).getReg();
817 Register M0Val = MI.getOperand(6).getReg();
818 if (!RBI.constrainGenericRegister(M0Val, AMDGPU::SReg_32RegClass, *MRI) ||
819 !RBI.constrainGenericRegister(Dst, AMDGPU::VGPR_32RegClass, *MRI) ||
820 !RBI.constrainGenericRegister(Src0, AMDGPU::VGPR_32RegClass, *MRI))
821 return false;
822
823 // This requires 2 instructions. It is possible to write a pattern to support
824 // this, but the generated isel emitter doesn't correctly deal with multiple
825 // output instructions using the same physical register input. The copy to m0
826 // is incorrectly placed before the second instruction.
827 //
828 // TODO: Match source modifiers.
829
830 Register InterpMov = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
831 const DebugLoc &DL = MI.getDebugLoc();
832 MachineBasicBlock *MBB = MI.getParent();
833
834 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
835 .addReg(M0Val);
836 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_INTERP_MOV_F32), InterpMov)
837 .addImm(2)
838 .addImm(MI.getOperand(4).getImm()) // $attr
839 .addImm(MI.getOperand(3).getImm()); // $attrchan
840
841 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_INTERP_P1LV_F16), Dst)
842 .addImm(0) // $src0_modifiers
843 .addReg(Src0) // $src0
844 .addImm(MI.getOperand(4).getImm()) // $attr
845 .addImm(MI.getOperand(3).getImm()) // $attrchan
846 .addImm(0) // $src2_modifiers
847 .addReg(InterpMov) // $src2 - 2 f16 values selected by high
848 .addImm(MI.getOperand(5).getImm()) // $high
849 .addImm(0) // $clamp
850 .addImm(0); // $omod
851
852 MI.eraseFromParent();
853 return true;
854}
855
856// Writelane is special in that it can use SGPR and M0 (which would normally
857// count as using the constant bus twice - but in this case it is allowed since
858// the lane selector doesn't count as a use of the constant bus). However, it is
859// still required to abide by the 1 SGPR rule. Fix this up if we might have
860// multiple SGPRs.
861bool AMDGPUInstructionSelector::selectWritelane(MachineInstr &MI) const {
862 // With a constant bus limit of at least 2, there's no issue.
863 if (STI.getConstantBusLimit(AMDGPU::V_WRITELANE_B32) > 1)
864 return selectImpl(MI, *CoverageInfo);
865
866 MachineBasicBlock *MBB = MI.getParent();
867 const DebugLoc &DL = MI.getDebugLoc();
868 Register VDst = MI.getOperand(0).getReg();
869 Register Val = MI.getOperand(2).getReg();
870 Register LaneSelect = MI.getOperand(3).getReg();
871 Register VDstIn = MI.getOperand(4).getReg();
872
873 auto MIB = BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_WRITELANE_B32), VDst);
874
875 Optional<ValueAndVReg> ConstSelect =
876 getIConstantVRegValWithLookThrough(LaneSelect, *MRI);
877 if (ConstSelect) {
878 // The selector has to be an inline immediate, so we can use whatever for
879 // the other operands.
880 MIB.addReg(Val);
881 MIB.addImm(ConstSelect->Value.getSExtValue() &
882 maskTrailingOnes<uint64_t>(STI.getWavefrontSizeLog2()));
883 } else {
884 Optional<ValueAndVReg> ConstVal =
885 getIConstantVRegValWithLookThrough(Val, *MRI);
886
887 // If the value written is an inline immediate, we can get away without a
888 // copy to m0.
889 if (ConstVal && AMDGPU::isInlinableLiteral32(ConstVal->Value.getSExtValue(),
890 STI.hasInv2PiInlineImm())) {
891 MIB.addImm(ConstVal->Value.getSExtValue());
892 MIB.addReg(LaneSelect);
893 } else {
894 MIB.addReg(Val);
895
896 // If the lane selector was originally in a VGPR and copied with
897 // readfirstlane, there's a hazard to read the same SGPR from the
898 // VALU. Constrain to a different SGPR to help avoid needing a nop later.
899 RBI.constrainGenericRegister(LaneSelect, AMDGPU::SReg_32_XM0RegClass, *MRI);
900
901 BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
902 .addReg(LaneSelect);
903 MIB.addReg(AMDGPU::M0);
904 }
905 }
906
907 MIB.addReg(VDstIn);
908
909 MI.eraseFromParent();
910 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
911}
912
913// We need to handle this here because tablegen doesn't support matching
914// instructions with multiple outputs.
915bool AMDGPUInstructionSelector::selectDivScale(MachineInstr &MI) const {
916 Register Dst0 = MI.getOperand(0).getReg();
917 Register Dst1 = MI.getOperand(1).getReg();
918
919 LLT Ty = MRI->getType(Dst0);
920 unsigned Opc;
921 if (Ty == LLT::scalar(32))
922 Opc = AMDGPU::V_DIV_SCALE_F32_e64;
923 else if (Ty == LLT::scalar(64))
924 Opc = AMDGPU::V_DIV_SCALE_F64_e64;
925 else
926 return false;
927
928 // TODO: Match source modifiers.
929
930 const DebugLoc &DL = MI.getDebugLoc();
931 MachineBasicBlock *MBB = MI.getParent();
932
933 Register Numer = MI.getOperand(3).getReg();
934 Register Denom = MI.getOperand(4).getReg();
935 unsigned ChooseDenom = MI.getOperand(5).getImm();
936
937 Register Src0 = ChooseDenom != 0 ? Numer : Denom;
938
939 auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc), Dst0)
940 .addDef(Dst1)
941 .addImm(0) // $src0_modifiers
942 .addUse(Src0) // $src0
943 .addImm(0) // $src1_modifiers
944 .addUse(Denom) // $src1
945 .addImm(0) // $src2_modifiers
946 .addUse(Numer) // $src2
947 .addImm(0) // $clamp
948 .addImm(0); // $omod
949
950 MI.eraseFromParent();
951 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
952}
953
954bool AMDGPUInstructionSelector::selectG_INTRINSIC(MachineInstr &I) const {
955 unsigned IntrinsicID = I.getIntrinsicID();
956 switch (IntrinsicID) {
957 case Intrinsic::amdgcn_if_break: {
958 MachineBasicBlock *BB = I.getParent();
959
960 // FIXME: Manually selecting to avoid dealing with the SReg_1 trick
961 // SelectionDAG uses for wave32 vs wave64.
962 BuildMI(*BB, &I, I.getDebugLoc(), TII.get(AMDGPU::SI_IF_BREAK))
963 .add(I.getOperand(0))
964 .add(I.getOperand(2))
965 .add(I.getOperand(3));
966
967 Register DstReg = I.getOperand(0).getReg();
968 Register Src0Reg = I.getOperand(2).getReg();
969 Register Src1Reg = I.getOperand(3).getReg();
970
971 I.eraseFromParent();
972
973 for (Register Reg : { DstReg, Src0Reg, Src1Reg })
974 MRI->setRegClass(Reg, TRI.getWaveMaskRegClass());
975
976 return true;
977 }
978 case Intrinsic::amdgcn_interp_p1_f16:
979 return selectInterpP1F16(I);
980 case Intrinsic::amdgcn_wqm:
981 return constrainCopyLikeIntrin(I, AMDGPU::WQM);
982 case Intrinsic::amdgcn_softwqm:
983 return constrainCopyLikeIntrin(I, AMDGPU::SOFT_WQM);
984 case Intrinsic::amdgcn_strict_wwm:
985 case Intrinsic::amdgcn_wwm:
986 return constrainCopyLikeIntrin(I, AMDGPU::STRICT_WWM);
987 case Intrinsic::amdgcn_strict_wqm:
988 return constrainCopyLikeIntrin(I, AMDGPU::STRICT_WQM);
989 case Intrinsic::amdgcn_writelane:
990 return selectWritelane(I);
991 case Intrinsic::amdgcn_div_scale:
992 return selectDivScale(I);
993 case Intrinsic::amdgcn_icmp:
994 return selectIntrinsicIcmp(I);
995 case Intrinsic::amdgcn_ballot:
996 return selectBallot(I);
997 case Intrinsic::amdgcn_reloc_constant:
998 return selectRelocConstant(I);
999 case Intrinsic::amdgcn_groupstaticsize:
1000 return selectGroupStaticSize(I);
1001 case Intrinsic::returnaddress:
1002 return selectReturnAddress(I);
1003 case Intrinsic::amdgcn_smfmac_f32_16x16x32_f16:
1004 case Intrinsic::amdgcn_smfmac_f32_32x32x16_f16:
1005 case Intrinsic::amdgcn_smfmac_f32_16x16x32_bf16:
1006 case Intrinsic::amdgcn_smfmac_f32_32x32x16_bf16:
1007 case Intrinsic::amdgcn_smfmac_i32_16x16x64_i8:
1008 case Intrinsic::amdgcn_smfmac_i32_32x32x32_i8:
1009 case Intrinsic::amdgcn_smfmac_f32_16x16x64_bf8_bf8:
1010 case Intrinsic::amdgcn_smfmac_f32_16x16x64_bf8_fp8:
1011 case Intrinsic::amdgcn_smfmac_f32_16x16x64_fp8_bf8:
1012 case Intrinsic::amdgcn_smfmac_f32_16x16x64_fp8_fp8:
1013 case Intrinsic::amdgcn_smfmac_f32_32x32x32_bf8_bf8:
1014 case Intrinsic::amdgcn_smfmac_f32_32x32x32_bf8_fp8:
1015 case Intrinsic::amdgcn_smfmac_f32_32x32x32_fp8_bf8:
1016 case Intrinsic::amdgcn_smfmac_f32_32x32x32_fp8_fp8:
1017 return selectSMFMACIntrin(I);
1018 default:
1019 return selectImpl(I, *CoverageInfo);
1020 }
1021}
1022
1023static int getV_CMPOpcode(CmpInst::Predicate P, unsigned Size) {
1024 if (Size != 32 && Size != 64)
1025 return -1;
1026 switch (P) {
1027 default:
1028 llvm_unreachable("Unknown condition code!")::llvm::llvm_unreachable_internal("Unknown condition code!", "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp"
, 1028)
;
1029 case CmpInst::ICMP_NE:
1030 return Size == 32 ? AMDGPU::V_CMP_NE_U32_e64 : AMDGPU::V_CMP_NE_U64_e64;
1031 case CmpInst::ICMP_EQ:
1032 return Size == 32 ? AMDGPU::V_CMP_EQ_U32_e64 : AMDGPU::V_CMP_EQ_U64_e64;
1033 case CmpInst::ICMP_SGT:
1034 return Size == 32 ? AMDGPU::V_CMP_GT_I32_e64 : AMDGPU::V_CMP_GT_I64_e64;
1035 case CmpInst::ICMP_SGE:
1036 return Size == 32 ? AMDGPU::V_CMP_GE_I32_e64 : AMDGPU::V_CMP_GE_I64_e64;
1037 case CmpInst::ICMP_SLT:
1038 return Size == 32 ? AMDGPU::V_CMP_LT_I32_e64 : AMDGPU::V_CMP_LT_I64_e64;
1039 case CmpInst::ICMP_SLE:
1040 return Size == 32 ? AMDGPU::V_CMP_LE_I32_e64 : AMDGPU::V_CMP_LE_I64_e64;
1041 case CmpInst::ICMP_UGT:
1042 return Size == 32 ? AMDGPU::V_CMP_GT_U32_e64 : AMDGPU::V_CMP_GT_U64_e64;
1043 case CmpInst::ICMP_UGE:
1044 return Size == 32 ? AMDGPU::V_CMP_GE_U32_e64 : AMDGPU::V_CMP_GE_U64_e64;
1045 case CmpInst::ICMP_ULT:
1046 return Size == 32 ? AMDGPU::V_CMP_LT_U32_e64 : AMDGPU::V_CMP_LT_U64_e64;
1047 case CmpInst::ICMP_ULE:
1048 return Size == 32 ? AMDGPU::V_CMP_LE_U32_e64 : AMDGPU::V_CMP_LE_U64_e64;
1049 }
1050}
1051
1052int AMDGPUInstructionSelector::getS_CMPOpcode(CmpInst::Predicate P,
1053 unsigned Size) const {
1054 if (Size == 64) {
1055 if (!STI.hasScalarCompareEq64())
1056 return -1;
1057
1058 switch (P) {
1059 case CmpInst::ICMP_NE:
1060 return AMDGPU::S_CMP_LG_U64;
1061 case CmpInst::ICMP_EQ:
1062 return AMDGPU::S_CMP_EQ_U64;
1063 default:
1064 return -1;
1065 }
1066 }
1067
1068 if (Size != 32)
1069 return -1;
1070
1071 switch (P) {
1072 case CmpInst::ICMP_NE:
1073 return AMDGPU::S_CMP_LG_U32;
1074 case CmpInst::ICMP_EQ:
1075 return AMDGPU::S_CMP_EQ_U32;
1076 case CmpInst::ICMP_SGT:
1077 return AMDGPU::S_CMP_GT_I32;
1078 case CmpInst::ICMP_SGE:
1079 return AMDGPU::S_CMP_GE_I32;
1080 case CmpInst::ICMP_SLT:
1081 return AMDGPU::S_CMP_LT_I32;
1082 case CmpInst::ICMP_SLE:
1083 return AMDGPU::S_CMP_LE_I32;
1084 case CmpInst::ICMP_UGT:
1085 return AMDGPU::S_CMP_GT_U32;
1086 case CmpInst::ICMP_UGE:
1087 return AMDGPU::S_CMP_GE_U32;
1088 case CmpInst::ICMP_ULT:
1089 return AMDGPU::S_CMP_LT_U32;
1090 case CmpInst::ICMP_ULE:
1091 return AMDGPU::S_CMP_LE_U32;
1092 default:
1093 llvm_unreachable("Unknown condition code!")::llvm::llvm_unreachable_internal("Unknown condition code!", "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp"
, 1093)
;
1094 }
1095}
1096
1097bool AMDGPUInstructionSelector::selectG_ICMP(MachineInstr &I) const {
1098 MachineBasicBlock *BB = I.getParent();
1099 const DebugLoc &DL = I.getDebugLoc();
1100
1101 Register SrcReg = I.getOperand(2).getReg();
1102 unsigned Size = RBI.getSizeInBits(SrcReg, *MRI, TRI);
1103
1104 auto Pred = (CmpInst::Predicate)I.getOperand(1).getPredicate();
1105
1106 Register CCReg = I.getOperand(0).getReg();
1107 if (!isVCC(CCReg, *MRI)) {
1108 int Opcode = getS_CMPOpcode(Pred, Size);
1109 if (Opcode == -1)
1110 return false;
1111 MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode))
1112 .add(I.getOperand(2))
1113 .add(I.getOperand(3));
1114 BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), CCReg)
1115 .addReg(AMDGPU::SCC);
1116 bool Ret =
1117 constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI) &&
1118 RBI.constrainGenericRegister(CCReg, AMDGPU::SReg_32RegClass, *MRI);
1119 I.eraseFromParent();
1120 return Ret;
1121 }
1122
1123 int Opcode = getV_CMPOpcode(Pred, Size);
1124 if (Opcode == -1)
1125 return false;
1126
1127 MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode),
1128 I.getOperand(0).getReg())
1129 .add(I.getOperand(2))
1130 .add(I.getOperand(3));
1131 RBI.constrainGenericRegister(ICmp->getOperand(0).getReg(),
1132 *TRI.getBoolRC(), *MRI);
1133 bool Ret = constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI);
1134 I.eraseFromParent();
1135 return Ret;
1136}
1137
1138bool AMDGPUInstructionSelector::selectIntrinsicIcmp(MachineInstr &I) const {
1139 Register Dst = I.getOperand(0).getReg();
1140 if (isVCC(Dst, *MRI))
1141 return false;
1142
1143 if (MRI->getType(Dst).getSizeInBits() != STI.getWavefrontSize())
1144 return false;
1145
1146 MachineBasicBlock *BB = I.getParent();
1147 const DebugLoc &DL = I.getDebugLoc();
1148 Register SrcReg = I.getOperand(2).getReg();
1149 unsigned Size = RBI.getSizeInBits(SrcReg, *MRI, TRI);
1150
1151 auto Pred = static_cast<CmpInst::Predicate>(I.getOperand(4).getImm());
1152 if (!ICmpInst::isIntPredicate(static_cast<ICmpInst::Predicate>(Pred))) {
1153 MachineInstr *ICmp =
1154 BuildMI(*BB, &I, DL, TII.get(AMDGPU::IMPLICIT_DEF), Dst);
1155
1156 if (!RBI.constrainGenericRegister(ICmp->getOperand(0).getReg(),
1157 *TRI.getBoolRC(), *MRI))
1158 return false;
1159 I.eraseFromParent();
1160 return true;
1161 }
1162
1163 int Opcode = getV_CMPOpcode(Pred, Size);
1164 if (Opcode == -1)
1165 return false;
1166
1167 MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode), Dst)
1168 .add(I.getOperand(2))
1169 .add(I.getOperand(3));
1170 RBI.constrainGenericRegister(ICmp->getOperand(0).getReg(), *TRI.getBoolRC(),
1171 *MRI);
1172 bool Ret = constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI);
1173 I.eraseFromParent();
1174 return Ret;
1175}
1176
1177bool AMDGPUInstructionSelector::selectBallot(MachineInstr &I) const {
1178 MachineBasicBlock *BB = I.getParent();
1179 const DebugLoc &DL = I.getDebugLoc();
1180 Register DstReg = I.getOperand(0).getReg();
1181 const unsigned Size = MRI->getType(DstReg).getSizeInBits();
1182 const bool Is64 = Size == 64;
1183
1184 if (Size != STI.getWavefrontSize())
1185 return false;
1186
1187 Optional<ValueAndVReg> Arg =
1188 getIConstantVRegValWithLookThrough(I.getOperand(2).getReg(), *MRI);
1189
1190 if (Arg) {
1191 const int64_t Value = Arg.value().Value.getSExtValue();
1192 if (Value == 0) {
1193 unsigned Opcode = Is64 ? AMDGPU::S_MOV_B64 : AMDGPU::S_MOV_B32;
1194 BuildMI(*BB, &I, DL, TII.get(Opcode), DstReg).addImm(0);
1195 } else if (Value == -1) { // all ones
1196 Register SrcReg = Is64 ? AMDGPU::EXEC : AMDGPU::EXEC_LO;
1197 BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), DstReg).addReg(SrcReg);
1198 } else
1199 return false;
1200 } else {
1201 Register SrcReg = I.getOperand(2).getReg();
1202 BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), DstReg).addReg(SrcReg);
1203 }
1204
1205 I.eraseFromParent();
1206 return true;
1207}
1208
1209bool AMDGPUInstructionSelector::selectRelocConstant(MachineInstr &I) const {
1210 Register DstReg = I.getOperand(0).getReg();
1211 const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
1212 const TargetRegisterClass *DstRC = TRI.getRegClassForSizeOnBank(32, *DstBank);
1213 if (!DstRC || !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI))
1214 return false;
1215
1216 const bool IsVALU = DstBank->getID() == AMDGPU::VGPRRegBankID;
1217
1218 Module *M = MF->getFunction().getParent();
1219 const MDNode *Metadata = I.getOperand(2).getMetadata();
1220 auto SymbolName = cast<MDString>(Metadata->getOperand(0))->getString();
1221 auto RelocSymbol = cast<GlobalVariable>(
1222 M->getOrInsertGlobal(SymbolName, Type::getInt32Ty(M->getContext())));
1223
1224 MachineBasicBlock *BB = I.getParent();
1225 BuildMI(*BB, &I, I.getDebugLoc(),
1226 TII.get(IsVALU ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32), DstReg)
1227 .addGlobalAddress(RelocSymbol, 0, SIInstrInfo::MO_ABS32_LO);
1228
1229 I.eraseFromParent();
1230 return true;
1231}
1232
1233bool AMDGPUInstructionSelector::selectGroupStaticSize(MachineInstr &I) const {
1234 Triple::OSType OS = MF->getTarget().getTargetTriple().getOS();
1235
1236 Register DstReg = I.getOperand(0).getReg();
1237 const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
1238 unsigned Mov = DstRB->getID() == AMDGPU::SGPRRegBankID ?
1239 AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
1240
1241 MachineBasicBlock *MBB = I.getParent();
1242 const DebugLoc &DL = I.getDebugLoc();
1243
1244 auto MIB = BuildMI(*MBB, &I, DL, TII.get(Mov), DstReg);
1245
1246 if (OS == Triple::AMDHSA || OS == Triple::AMDPAL) {
1247 const SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
1248 MIB.addImm(MFI->getLDSSize());
1249 } else {
1250 Module *M = MF->getFunction().getParent();
1251 const GlobalValue *GV
1252 = Intrinsic::getDeclaration(M, Intrinsic::amdgcn_groupstaticsize);
1253 MIB.addGlobalAddress(GV, 0, SIInstrInfo::MO_ABS32_LO);
1254 }
1255
1256 I.eraseFromParent();
1257 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
1258}
1259
1260bool AMDGPUInstructionSelector::selectReturnAddress(MachineInstr &I) const {
1261 MachineBasicBlock *MBB = I.getParent();
1262 MachineFunction &MF = *MBB->getParent();
1263 const DebugLoc &DL = I.getDebugLoc();
1264
1265 MachineOperand &Dst = I.getOperand(0);
1266 Register DstReg = Dst.getReg();
1267 unsigned Depth = I.getOperand(2).getImm();
1268
1269 const TargetRegisterClass *RC
1270 = TRI.getConstrainedRegClassForOperand(Dst, *MRI);
1271 if (!RC->hasSubClassEq(&AMDGPU::SGPR_64RegClass) ||
1272 !RBI.constrainGenericRegister(DstReg, *RC, *MRI))
1273 return false;
1274
1275 // Check for kernel and shader functions
1276 if (Depth != 0 ||
1277 MF.getInfo<SIMachineFunctionInfo>()->isEntryFunction()) {
1278 BuildMI(*MBB, &I, DL, TII.get(AMDGPU::S_MOV_B64), DstReg)
1279 .addImm(0);
1280 I.eraseFromParent();
1281 return true;
1282 }
1283
1284 MachineFrameInfo &MFI = MF.getFrameInfo();
1285 // There is a call to @llvm.returnaddress in this function
1286 MFI.setReturnAddressIsTaken(true);
1287
1288 // Get the return address reg and mark it as an implicit live-in
1289 Register ReturnAddrReg = TRI.getReturnAddressReg(MF);
1290 Register LiveIn = getFunctionLiveInPhysReg(MF, TII, ReturnAddrReg,
1291 AMDGPU::SReg_64RegClass, DL);
1292 BuildMI(*MBB, &I, DL, TII.get(AMDGPU::COPY), DstReg)
1293 .addReg(LiveIn);
1294 I.eraseFromParent();
1295 return true;
1296}
1297
1298bool AMDGPUInstructionSelector::selectEndCfIntrinsic(MachineInstr &MI) const {
1299 // FIXME: Manually selecting to avoid dealing with the SReg_1 trick
1300 // SelectionDAG uses for wave32 vs wave64.
1301 MachineBasicBlock *BB = MI.getParent();
1302 BuildMI(*BB, &MI, MI.getDebugLoc(), TII.get(AMDGPU::SI_END_CF))
1303 .add(MI.getOperand(1));
1304
1305 Register Reg = MI.getOperand(1).getReg();
1306 MI.eraseFromParent();
1307
1308 if (!MRI->getRegClassOrNull(Reg))
1309 MRI->setRegClass(Reg, TRI.getWaveMaskRegClass());
1310 return true;
1311}
1312
1313bool AMDGPUInstructionSelector::selectDSOrderedIntrinsic(
1314 MachineInstr &MI, Intrinsic::ID IntrID) const {
1315 MachineBasicBlock *MBB = MI.getParent();
1316 MachineFunction *MF = MBB->getParent();
1317 const DebugLoc &DL = MI.getDebugLoc();
1318
1319 unsigned IndexOperand = MI.getOperand(7).getImm();
1320 bool WaveRelease = MI.getOperand(8).getImm() != 0;
1321 bool WaveDone = MI.getOperand(9).getImm() != 0;
1322
1323 if (WaveDone && !WaveRelease)
1324 report_fatal_error("ds_ordered_count: wave_done requires wave_release");
1325
1326 unsigned OrderedCountIndex = IndexOperand & 0x3f;
1327 IndexOperand &= ~0x3f;
1328 unsigned CountDw = 0;
1329
1330 if (STI.getGeneration() >= AMDGPUSubtarget::GFX10) {
1331 CountDw = (IndexOperand >> 24) & 0xf;
1332 IndexOperand &= ~(0xf << 24);
1333
1334 if (CountDw < 1 || CountDw > 4) {
1335 report_fatal_error(
1336 "ds_ordered_count: dword count must be between 1 and 4");
1337 }
1338 }
1339
1340 if (IndexOperand)
1341 report_fatal_error("ds_ordered_count: bad index operand");
1342
1343 unsigned Instruction = IntrID == Intrinsic::amdgcn_ds_ordered_add ? 0 : 1;
1344 unsigned ShaderType = SIInstrInfo::getDSShaderTypeValue(*MF);
1345
1346 unsigned Offset0 = OrderedCountIndex << 2;
1347 unsigned Offset1 = WaveRelease | (WaveDone << 1) | (Instruction << 4);
1348
1349 if (STI.getGeneration() >= AMDGPUSubtarget::GFX10)
1350 Offset1 |= (CountDw - 1) << 6;
1351
1352 if (STI.getGeneration() < AMDGPUSubtarget::GFX11)
1353 Offset1 |= ShaderType << 2;
1354
1355 unsigned Offset = Offset0 | (Offset1 << 8);
1356
1357 Register M0Val = MI.getOperand(2).getReg();
1358 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
1359 .addReg(M0Val);
1360
1361 Register DstReg = MI.getOperand(0).getReg();
1362 Register ValReg = MI.getOperand(3).getReg();
1363 MachineInstrBuilder DS =
1364 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::DS_ORDERED_COUNT), DstReg)
1365 .addReg(ValReg)
1366 .addImm(Offset)
1367 .cloneMemRefs(MI);
1368
1369 if (!RBI.constrainGenericRegister(M0Val, AMDGPU::SReg_32RegClass, *MRI))
1370 return false;
1371
1372 bool Ret = constrainSelectedInstRegOperands(*DS, TII, TRI, RBI);
1373 MI.eraseFromParent();
1374 return Ret;
1375}
1376
1377static unsigned gwsIntrinToOpcode(unsigned IntrID) {
1378 switch (IntrID) {
1379 case Intrinsic::amdgcn_ds_gws_init:
1380 return AMDGPU::DS_GWS_INIT;
1381 case Intrinsic::amdgcn_ds_gws_barrier:
1382 return AMDGPU::DS_GWS_BARRIER;
1383 case Intrinsic::amdgcn_ds_gws_sema_v:
1384 return AMDGPU::DS_GWS_SEMA_V;
1385 case Intrinsic::amdgcn_ds_gws_sema_br:
1386 return AMDGPU::DS_GWS_SEMA_BR;
1387 case Intrinsic::amdgcn_ds_gws_sema_p:
1388 return AMDGPU::DS_GWS_SEMA_P;
1389 case Intrinsic::amdgcn_ds_gws_sema_release_all:
1390 return AMDGPU::DS_GWS_SEMA_RELEASE_ALL;
1391 default:
1392 llvm_unreachable("not a gws intrinsic")::llvm::llvm_unreachable_internal("not a gws intrinsic", "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp"
, 1392)
;
1393 }
1394}
1395
1396bool AMDGPUInstructionSelector::selectDSGWSIntrinsic(MachineInstr &MI,
1397 Intrinsic::ID IID) const {
1398 if (IID == Intrinsic::amdgcn_ds_gws_sema_release_all &&
1399 !STI.hasGWSSemaReleaseAll())
1400 return false;
1401
1402 // intrinsic ID, vsrc, offset
1403 const bool HasVSrc = MI.getNumOperands() == 3;
1404 assert(HasVSrc || MI.getNumOperands() == 2)(static_cast <bool> (HasVSrc || MI.getNumOperands() == 2
) ? void (0) : __assert_fail ("HasVSrc || MI.getNumOperands() == 2"
, "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp", 1404
, __extension__ __PRETTY_FUNCTION__))
;
1405
1406 Register BaseOffset = MI.getOperand(HasVSrc ? 2 : 1).getReg();
1407 const RegisterBank *OffsetRB = RBI.getRegBank(BaseOffset, *MRI, TRI);
1408 if (OffsetRB->getID() != AMDGPU::SGPRRegBankID)
1409 return false;
1410
1411 MachineInstr *OffsetDef = getDefIgnoringCopies(BaseOffset, *MRI);
1412 assert(OffsetDef)(static_cast <bool> (OffsetDef) ? void (0) : __assert_fail
("OffsetDef", "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp"
, 1412, __extension__ __PRETTY_FUNCTION__))
;
1413
1414 unsigned ImmOffset;
1415
1416 MachineBasicBlock *MBB = MI.getParent();
1417 const DebugLoc &DL = MI.getDebugLoc();
1418
1419 MachineInstr *Readfirstlane = nullptr;
1420
1421 // If we legalized the VGPR input, strip out the readfirstlane to analyze the
1422 // incoming offset, in case there's an add of a constant. We'll have to put it
1423 // back later.
1424 if (OffsetDef->getOpcode() == AMDGPU::V_READFIRSTLANE_B32) {
1425 Readfirstlane = OffsetDef;
1426 BaseOffset = OffsetDef->getOperand(1).getReg();
1427 OffsetDef = getDefIgnoringCopies(BaseOffset, *MRI);
1428 }
1429
1430 if (OffsetDef->getOpcode() == AMDGPU::G_CONSTANT) {
1431 // If we have a constant offset, try to use the 0 in m0 as the base.
1432 // TODO: Look into changing the default m0 initialization value. If the
1433 // default -1 only set the low 16-bits, we could leave it as-is and add 1 to
1434 // the immediate offset.
1435
1436 ImmOffset = OffsetDef->getOperand(1).getCImm()->getZExtValue();
1437 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), AMDGPU::M0)
1438 .addImm(0);
1439 } else {
1440 std::tie(BaseOffset, ImmOffset) =
1441 AMDGPU::getBaseWithConstantOffset(*MRI, BaseOffset);
1442
1443 if (Readfirstlane) {
1444 // We have the constant offset now, so put the readfirstlane back on the
1445 // variable component.
1446 if (!RBI.constrainGenericRegister(BaseOffset, AMDGPU::VGPR_32RegClass, *MRI))
1447 return false;
1448
1449 Readfirstlane->getOperand(1).setReg(BaseOffset);
1450 BaseOffset = Readfirstlane->getOperand(0).getReg();
1451 } else {
1452 if (!RBI.constrainGenericRegister(BaseOffset,
1453 AMDGPU::SReg_32RegClass, *MRI))
1454 return false;
1455 }
1456
1457 Register M0Base = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
1458 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::S_LSHL_B32), M0Base)
1459 .addReg(BaseOffset)
1460 .addImm(16);
1461
1462 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
1463 .addReg(M0Base);
1464 }
1465
1466 // The resource id offset is computed as (<isa opaque base> + M0[21:16] +
1467 // offset field) % 64. Some versions of the programming guide omit the m0
1468 // part, or claim it's from offset 0.
1469 auto MIB = BuildMI(*MBB, &MI, DL, TII.get(gwsIntrinToOpcode(IID)));
1470
1471 if (HasVSrc) {
1472 Register VSrc = MI.getOperand(1).getReg();
1473 MIB.addReg(VSrc);
1474
1475 if (!RBI.constrainGenericRegister(VSrc, AMDGPU::VGPR_32RegClass, *MRI))
1476 return false;
1477 }
1478
1479 MIB.addImm(ImmOffset)
1480 .cloneMemRefs(MI);
1481
1482 TII.enforceOperandRCAlignment(*MIB, AMDGPU::OpName::data0);
1483
1484 MI.eraseFromParent();
1485 return true;
1486}
1487
1488bool AMDGPUInstructionSelector::selectDSAppendConsume(MachineInstr &MI,
1489 bool IsAppend) const {
1490 Register PtrBase = MI.getOperand(2).getReg();
1491 LLT PtrTy = MRI->getType(PtrBase);
1492 bool IsGDS = PtrTy.getAddressSpace() == AMDGPUAS::REGION_ADDRESS;
1493
1494 unsigned Offset;
1495 std::tie(PtrBase, Offset) = selectDS1Addr1OffsetImpl(MI.getOperand(2));
1496
1497 // TODO: Should this try to look through readfirstlane like GWS?
1498 if (!isDSOffsetLegal(PtrBase, Offset)) {
1499 PtrBase = MI.getOperand(2).getReg();
1500 Offset = 0;
1501 }
1502
1503 MachineBasicBlock *MBB = MI.getParent();
1504 const DebugLoc &DL = MI.getDebugLoc();
1505 const unsigned Opc = IsAppend ? AMDGPU::DS_APPEND : AMDGPU::DS_CONSUME;
1506
1507 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
1508 .addReg(PtrBase);
1509 if (!RBI.constrainGenericRegister(PtrBase, AMDGPU::SReg_32RegClass, *MRI))
1510 return false;
1511
1512 auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc), MI.getOperand(0).getReg())
1513 .addImm(Offset)
1514 .addImm(IsGDS ? -1 : 0)
1515 .cloneMemRefs(MI);
1516 MI.eraseFromParent();
1517 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
1518}
1519
1520bool AMDGPUInstructionSelector::selectSBarrier(MachineInstr &MI) const {
1521 if (TM.getOptLevel() > CodeGenOpt::None) {
1522 unsigned WGSize = STI.getFlatWorkGroupSizes(MF->getFunction()).second;
1523 if (WGSize <= STI.getWavefrontSize()) {
1524 MachineBasicBlock *MBB = MI.getParent();
1525 const DebugLoc &DL = MI.getDebugLoc();
1526 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::WAVE_BARRIER));
1527 MI.eraseFromParent();
1528 return true;
1529 }
1530 }
1531 return selectImpl(MI, *CoverageInfo);
1532}
1533
1534static bool parseTexFail(uint64_t TexFailCtrl, bool &TFE, bool &LWE,
1535 bool &IsTexFail) {
1536 if (TexFailCtrl)
1537 IsTexFail = true;
1538
1539 TFE = (TexFailCtrl & 0x1) ? true : false;
1540 TexFailCtrl &= ~(uint64_t)0x1;
1541 LWE = (TexFailCtrl & 0x2) ? true : false;
1542 TexFailCtrl &= ~(uint64_t)0x2;
1543
1544 return TexFailCtrl == 0;
1545}
1546
1547bool AMDGPUInstructionSelector::selectImageIntrinsic(
1548 MachineInstr &MI, const AMDGPU::ImageDimIntrinsicInfo *Intr) const {
1549 MachineBasicBlock *MBB = MI.getParent();
1550 const DebugLoc &DL = MI.getDebugLoc();
1551
1552 const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode =
1553 AMDGPU::getMIMGBaseOpcodeInfo(Intr->BaseOpcode);
1554
1555 const AMDGPU::MIMGDimInfo *DimInfo = AMDGPU::getMIMGDimInfo(Intr->Dim);
1556 unsigned IntrOpcode = Intr->BaseOpcode;
1557 const bool IsGFX10Plus = AMDGPU::isGFX10Plus(STI);
1558 const bool IsGFX11Plus = AMDGPU::isGFX11Plus(STI);
1559
1560 const unsigned ArgOffset = MI.getNumExplicitDefs() + 1;
1561
1562 Register VDataIn, VDataOut;
1563 LLT VDataTy;
1564 int NumVDataDwords = -1;
1565 bool IsD16 = MI.getOpcode() == AMDGPU::G_AMDGPU_INTRIN_IMAGE_LOAD_D16 ||
1566 MI.getOpcode() == AMDGPU::G_AMDGPU_INTRIN_IMAGE_STORE_D16;
1567
1568 bool Unorm;
1569 if (!BaseOpcode->Sampler)
1570 Unorm = true;
1571 else
1572 Unorm = MI.getOperand(ArgOffset + Intr->UnormIndex).getImm() != 0;
1573
1574 bool TFE;
1575 bool LWE;
1576 bool IsTexFail = false;
1577 if (!parseTexFail(MI.getOperand(ArgOffset + Intr->TexFailCtrlIndex).getImm(),
1578 TFE, LWE, IsTexFail))
1579 return false;
1580
1581 const int Flags = MI.getOperand(ArgOffset + Intr->NumArgs).getImm();
1582 const bool IsA16 = (Flags & 1) != 0;
1583 const bool IsG16 = (Flags & 2) != 0;
1584
1585 // A16 implies 16 bit gradients if subtarget doesn't support G16
1586 if (IsA16 && !STI.hasG16() && !IsG16)
1587 return false;
1588
1589 unsigned DMask = 0;
1590 unsigned DMaskLanes = 0;
1591
1592 if (BaseOpcode->Atomic) {
1593 VDataOut = MI.getOperand(0).getReg();
1594 VDataIn = MI.getOperand(2).getReg();
1595 LLT Ty = MRI->getType(VDataIn);
1596
1597 // Be careful to allow atomic swap on 16-bit element vectors.
1598 const bool Is64Bit = BaseOpcode->AtomicX2 ?
1599 Ty.getSizeInBits() == 128 :
1600 Ty.getSizeInBits() == 64;
1601
1602 if (BaseOpcode->AtomicX2) {
1603 assert(MI.getOperand(3).getReg() == AMDGPU::NoRegister)(static_cast <bool> (MI.getOperand(3).getReg() == AMDGPU
::NoRegister) ? void (0) : __assert_fail ("MI.getOperand(3).getReg() == AMDGPU::NoRegister"
, "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp", 1603
, __extension__ __PRETTY_FUNCTION__))
;
1604
1605 DMask = Is64Bit ? 0xf : 0x3;
1606 NumVDataDwords = Is64Bit ? 4 : 2;
1607 } else {
1608 DMask = Is64Bit ? 0x3 : 0x1;
1609 NumVDataDwords = Is64Bit ? 2 : 1;
1610 }
1611 } else {
1612 DMask = MI.getOperand(ArgOffset + Intr->DMaskIndex).getImm();
1613 DMaskLanes = BaseOpcode->Gather4 ? 4 : countPopulation(DMask);
1614
1615 if (BaseOpcode->Store) {
1616 VDataIn = MI.getOperand(1).getReg();
1617 VDataTy = MRI->getType(VDataIn);
1618 NumVDataDwords = (VDataTy.getSizeInBits() + 31) / 32;
1619 } else {
1620 VDataOut = MI.getOperand(0).getReg();
1621 VDataTy = MRI->getType(VDataOut);
1622 NumVDataDwords = DMaskLanes;
1623
1624 if (IsD16 && !STI.hasUnpackedD16VMem())
1625 NumVDataDwords = (DMaskLanes + 1) / 2;
1626 }
1627 }
1628
1629 // Set G16 opcode
1630 if (IsG16 && !IsA16) {
1631 const AMDGPU::MIMGG16MappingInfo *G16MappingInfo =
1632 AMDGPU::getMIMGG16MappingInfo(Intr->BaseOpcode);
1633 assert(G16MappingInfo)(static_cast <bool> (G16MappingInfo) ? void (0) : __assert_fail
("G16MappingInfo", "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp"
, 1633, __extension__ __PRETTY_FUNCTION__))
;
1634 IntrOpcode = G16MappingInfo->G16; // set opcode to variant with _g16
1635 }
1636
1637 // TODO: Check this in verifier.
1638 assert((!IsTexFail || DMaskLanes >= 1) && "should have legalized this")(static_cast <bool> ((!IsTexFail || DMaskLanes >= 1)
&& "should have legalized this") ? void (0) : __assert_fail
("(!IsTexFail || DMaskLanes >= 1) && \"should have legalized this\""
, "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp", 1638
, __extension__ __PRETTY_FUNCTION__))
;
1639
1640 unsigned CPol = MI.getOperand(ArgOffset + Intr->CachePolicyIndex).getImm();
1641 if (BaseOpcode->Atomic)
1642 CPol |= AMDGPU::CPol::GLC; // TODO no-return optimization
1643 if (CPol & ~AMDGPU::CPol::ALL)
1644 return false;
1645
1646 int NumVAddrRegs = 0;
1647 int NumVAddrDwords = 0;
1648 for (unsigned I = Intr->VAddrStart; I < Intr->VAddrEnd; I++) {
1649 // Skip the $noregs and 0s inserted during legalization.
1650 MachineOperand &AddrOp = MI.getOperand(ArgOffset + I);
1651 if (!AddrOp.isReg())
1652 continue; // XXX - Break?
1653
1654 Register Addr = AddrOp.getReg();
1655 if (!Addr)
1656 break;
1657
1658 ++NumVAddrRegs;
1659 NumVAddrDwords += (MRI->getType(Addr).getSizeInBits() + 31) / 32;
1660 }
1661
1662 // The legalizer preprocessed the intrinsic arguments. If we aren't using
1663 // NSA, these should have been packed into a single value in the first
1664 // address register
1665 const bool UseNSA = NumVAddrRegs != 1 && NumVAddrDwords == NumVAddrRegs;
1666 if (UseNSA && !STI.hasFeature(AMDGPU::FeatureNSAEncoding)) {
1667 LLVM_DEBUG(dbgs() << "Trying to use NSA on non-NSA target\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-isel")) { dbgs() << "Trying to use NSA on non-NSA target\n"
; } } while (false)
;
1668 return false;
1669 }
1670
1671 if (IsTexFail)
1672 ++NumVDataDwords;
1673
1674 int Opcode = -1;
1675 if (IsGFX11Plus) {
1676 Opcode = AMDGPU::getMIMGOpcode(IntrOpcode,
1677 UseNSA ? AMDGPU::MIMGEncGfx11NSA
1678 : AMDGPU::MIMGEncGfx11Default,
1679 NumVDataDwords, NumVAddrDwords);
1680 } else if (IsGFX10Plus) {
1681 Opcode = AMDGPU::getMIMGOpcode(IntrOpcode,
1682 UseNSA ? AMDGPU::MIMGEncGfx10NSA
1683 : AMDGPU::MIMGEncGfx10Default,
1684 NumVDataDwords, NumVAddrDwords);
1685 } else {
1686 if (Subtarget->hasGFX90AInsts()) {
1687 Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx90a,
1688 NumVDataDwords, NumVAddrDwords);
1689 if (Opcode == -1) {
1690 LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-isel")) { dbgs() << "requested image instruction is not supported on this GPU\n"
; } } while (false)
1691 dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-isel")) { dbgs() << "requested image instruction is not supported on this GPU\n"
; } } while (false)
1692 << "requested image instruction is not supported on this GPU\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-isel")) { dbgs() << "requested image instruction is not supported on this GPU\n"
; } } while (false)
;
1693 return false;
1694 }
1695 }
1696 if (Opcode == -1 &&
1697 STI.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS)
1698 Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx8,
1699 NumVDataDwords, NumVAddrDwords);
1700 if (Opcode == -1)
1701 Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx6,
1702 NumVDataDwords, NumVAddrDwords);
1703 }
1704 assert(Opcode != -1)(static_cast <bool> (Opcode != -1) ? void (0) : __assert_fail
("Opcode != -1", "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp"
, 1704, __extension__ __PRETTY_FUNCTION__))
;
1705
1706 auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opcode))
1707 .cloneMemRefs(MI);
1708
1709 if (VDataOut) {
1710 if (BaseOpcode->AtomicX2) {
1711 const bool Is64 = MRI->getType(VDataOut).getSizeInBits() == 64;
1712
1713 Register TmpReg = MRI->createVirtualRegister(
1714 Is64 ? &AMDGPU::VReg_128RegClass : &AMDGPU::VReg_64RegClass);
1715 unsigned SubReg = Is64 ? AMDGPU::sub0_sub1 : AMDGPU::sub0;
1716
1717 MIB.addDef(TmpReg);
1718 if (!MRI->use_empty(VDataOut)) {
1719 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), VDataOut)
1720 .addReg(TmpReg, RegState::Kill, SubReg);
1721 }
1722
1723 } else {
1724 MIB.addDef(VDataOut); // vdata output
1725 }
1726 }
1727
1728 if (VDataIn)
1729 MIB.addReg(VDataIn); // vdata input
1730
1731 for (int I = 0; I != NumVAddrRegs; ++I) {
1732 MachineOperand &SrcOp = MI.getOperand(ArgOffset + Intr->VAddrStart + I);
1733 if (SrcOp.isReg()) {
1734 assert(SrcOp.getReg() != 0)(static_cast <bool> (SrcOp.getReg() != 0) ? void (0) : __assert_fail
("SrcOp.getReg() != 0", "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp"
, 1734, __extension__ __PRETTY_FUNCTION__))
;
1735 MIB.addReg(SrcOp.getReg());
1736 }
1737 }
1738
1739 MIB.addReg(MI.getOperand(ArgOffset + Intr->RsrcIndex).getReg());
1740 if (BaseOpcode->Sampler)
1741 MIB.addReg(MI.getOperand(ArgOffset + Intr->SampIndex).getReg());
1742
1743 MIB.addImm(DMask); // dmask
1744
1745 if (IsGFX10Plus)
1746 MIB.addImm(DimInfo->Encoding);
1747 MIB.addImm(Unorm);
1748
1749 MIB.addImm(CPol);
1750 MIB.addImm(IsA16 && // a16 or r128
1751 STI.hasFeature(AMDGPU::FeatureR128A16) ? -1 : 0);
1752 if (IsGFX10Plus)
1753 MIB.addImm(IsA16 ? -1 : 0);
1754
1755 if (!Subtarget->hasGFX90AInsts()) {
1756 MIB.addImm(TFE); // tfe
1757 } else if (TFE) {
1758 LLVM_DEBUG(dbgs() << "TFE is not supported on this GPU\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-isel")) { dbgs() << "TFE is not supported on this GPU\n"
; } } while (false)
;
1759 return false;
1760 }
1761
1762 MIB.addImm(LWE); // lwe
1763 if (!IsGFX10Plus)
1764 MIB.addImm(DimInfo->DA ? -1 : 0);
1765 if (BaseOpcode->HasD16)
1766 MIB.addImm(IsD16 ? -1 : 0);
1767
1768 if (IsTexFail) {
1769 // An image load instruction with TFE/LWE only conditionally writes to its
1770 // result registers. Initialize them to zero so that we always get well
1771 // defined result values.
1772 assert(VDataOut && !VDataIn)(static_cast <bool> (VDataOut && !VDataIn) ? void
(0) : __assert_fail ("VDataOut && !VDataIn", "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp"
, 1772, __extension__ __PRETTY_FUNCTION__))
;
1773 Register Tied = MRI->cloneVirtualRegister(VDataOut);
1774 Register Zero = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
1775 BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::V_MOV_B32_e32), Zero)
1776 .addImm(0);
1777 auto Parts = TRI.getRegSplitParts(MRI->getRegClass(Tied), 4);
1778 if (STI.usePRTStrictNull()) {
1779 // With enable-prt-strict-null enabled, initialize all result registers to
1780 // zero.
1781 auto RegSeq =
1782 BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::REG_SEQUENCE), Tied);
1783 for (auto Sub : Parts)
1784 RegSeq.addReg(Zero).addImm(Sub);
1785 } else {
1786 // With enable-prt-strict-null disabled, only initialize the extra TFE/LWE
1787 // result register.
1788 Register Undef = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
1789 BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::IMPLICIT_DEF), Undef);
1790 auto RegSeq =
1791 BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::REG_SEQUENCE), Tied);
1792 for (auto Sub : Parts.drop_back(1))
1793 RegSeq.addReg(Undef).addImm(Sub);
1794 RegSeq.addReg(Zero).addImm(Parts.back());
1795 }
1796 MIB.addReg(Tied, RegState::Implicit);
1797 MIB->tieOperands(0, MIB->getNumOperands() - 1);
1798 }
1799
1800 MI.eraseFromParent();
1801 constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
1802 TII.enforceOperandRCAlignment(*MIB, AMDGPU::OpName::vaddr);
1803 return true;
1804}
1805
1806bool AMDGPUInstructionSelector::selectG_INTRINSIC_W_SIDE_EFFECTS(
1807 MachineInstr &I) const {
1808 unsigned IntrinsicID = I.getIntrinsicID();
1809 switch (IntrinsicID) {
1810 case Intrinsic::amdgcn_end_cf:
1811 return selectEndCfIntrinsic(I);
1812 case Intrinsic::amdgcn_ds_ordered_add:
1813 case Intrinsic::amdgcn_ds_ordered_swap:
1814 return selectDSOrderedIntrinsic(I, IntrinsicID);
1815 case Intrinsic::amdgcn_ds_gws_init:
1816 case Intrinsic::amdgcn_ds_gws_barrier:
1817 case Intrinsic::amdgcn_ds_gws_sema_v:
1818 case Intrinsic::amdgcn_ds_gws_sema_br:
1819 case Intrinsic::amdgcn_ds_gws_sema_p:
1820 case Intrinsic::amdgcn_ds_gws_sema_release_all:
1821 return selectDSGWSIntrinsic(I, IntrinsicID);
1822 case Intrinsic::amdgcn_ds_append:
1823 return selectDSAppendConsume(I, true);
1824 case Intrinsic::amdgcn_ds_consume:
1825 return selectDSAppendConsume(I, false);
1826 case Intrinsic::amdgcn_s_barrier:
1827 return selectSBarrier(I);
1828 case Intrinsic::amdgcn_global_atomic_fadd:
1829 return selectGlobalAtomicFadd(I, I.getOperand(2), I.getOperand(3));
1830 case Intrinsic::amdgcn_raw_buffer_load_lds:
1831 case Intrinsic::amdgcn_struct_buffer_load_lds:
1832 return selectBufferLoadLds(I);
1833 case Intrinsic::amdgcn_global_load_lds:
1834 return selectGlobalLoadLds(I);
1835 case Intrinsic::amdgcn_exp_compr:
1836 if (!STI.hasCompressedExport()) {
1837 Function &F = I.getMF()->getFunction();
1838 DiagnosticInfoUnsupported NoFpRet(
1839 F, "intrinsic not supported on subtarget", I.getDebugLoc(), DS_Error);
1840 F.getContext().diagnose(NoFpRet);
1841 return false;
1842 }
1843 break;
1844 }
1845 return selectImpl(I, *CoverageInfo);
1846}
1847
1848bool AMDGPUInstructionSelector::selectG_SELECT(MachineInstr &I) const {
1849 if (selectImpl(I, *CoverageInfo))
1850 return true;
1851
1852 MachineBasicBlock *BB = I.getParent();
1853 const DebugLoc &DL = I.getDebugLoc();
1854
1855 Register DstReg = I.getOperand(0).getReg();
1856 unsigned Size = RBI.getSizeInBits(DstReg, *MRI, TRI);
1857 assert(Size <= 32 || Size == 64)(static_cast <bool> (Size <= 32 || Size == 64) ? void
(0) : __assert_fail ("Size <= 32 || Size == 64", "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp"
, 1857, __extension__ __PRETTY_FUNCTION__))
;
1858 const MachineOperand &CCOp = I.getOperand(1);
1859 Register CCReg = CCOp.getReg();
1860 if (!isVCC(CCReg, *MRI)) {
1861 unsigned SelectOpcode = Size == 64 ? AMDGPU::S_CSELECT_B64 :
1862 AMDGPU::S_CSELECT_B32;
1863 MachineInstr *CopySCC = BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), AMDGPU::SCC)
1864 .addReg(CCReg);
1865
1866 // The generic constrainSelectedInstRegOperands doesn't work for the scc register
1867 // bank, because it does not cover the register class that we used to represent
1868 // for it. So we need to manually set the register class here.
1869 if (!MRI->getRegClassOrNull(CCReg))
1870 MRI->setRegClass(CCReg, TRI.getConstrainedRegClassForOperand(CCOp, *MRI));
1871 MachineInstr *Select = BuildMI(*BB, &I, DL, TII.get(SelectOpcode), DstReg)
1872 .add(I.getOperand(2))
1873 .add(I.getOperand(3));
1874
1875 bool Ret = false;
1876 Ret |= constrainSelectedInstRegOperands(*Select, TII, TRI, RBI);
1877 Ret |= constrainSelectedInstRegOperands(*CopySCC, TII, TRI, RBI);
1878 I.eraseFromParent();
1879 return Ret;
1880 }
1881
1882 // Wide VGPR select should have been split in RegBankSelect.
1883 if (Size > 32)
1884 return false;
1885
1886 MachineInstr *Select =
1887 BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
1888 .addImm(0)
1889 .add(I.getOperand(3))
1890 .addImm(0)
1891 .add(I.getOperand(2))
1892 .add(I.getOperand(1));
1893
1894 bool Ret = constrainSelectedInstRegOperands(*Select, TII, TRI, RBI);
1895 I.eraseFromParent();
1896 return Ret;
1897}
1898
1899static int sizeToSubRegIndex(unsigned Size) {
1900 switch (Size) {
1901 case 32:
1902 return AMDGPU::sub0;
1903 case 64:
1904 return AMDGPU::sub0_sub1;
1905 case 96:
1906 return AMDGPU::sub0_sub1_sub2;
1907 case 128:
1908 return AMDGPU::sub0_sub1_sub2_sub3;
1909 case 256:
1910 return AMDGPU::sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7;
1911 default:
1912 if (Size < 32)
1913 return AMDGPU::sub0;
1914 if (Size > 256)
1915 return -1;
1916 return sizeToSubRegIndex(PowerOf2Ceil(Size));
1917 }
1918}
1919
1920bool AMDGPUInstructionSelector::selectG_TRUNC(MachineInstr &I) const {
1921 Register DstReg = I.getOperand(0).getReg();
1922 Register SrcReg = I.getOperand(1).getReg();
1923 const LLT DstTy = MRI->getType(DstReg);
1924 const LLT SrcTy = MRI->getType(SrcReg);
1925 const LLT S1 = LLT::scalar(1);
1926
1927 const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI);
1928 const RegisterBank *DstRB;
1929 if (DstTy == S1) {
1930 // This is a special case. We don't treat s1 for legalization artifacts as
1931 // vcc booleans.
1932 DstRB = SrcRB;
1933 } else {
1934 DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
1935 if (SrcRB != DstRB)
1936 return false;
1937 }
1938
1939 const bool IsVALU = DstRB->getID() == AMDGPU::VGPRRegBankID;
1940
1941 unsigned DstSize = DstTy.getSizeInBits();
1942 unsigned SrcSize = SrcTy.getSizeInBits();
1943
1944 const TargetRegisterClass *SrcRC =
1945 TRI.getRegClassForSizeOnBank(SrcSize, *SrcRB);
1946 const TargetRegisterClass *DstRC =
1947 TRI.getRegClassForSizeOnBank(DstSize, *DstRB);
1948 if (!SrcRC || !DstRC)
1949 return false;
1950
1951 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) ||
1952 !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI)) {
1953 LLVM_DEBUG(dbgs() << "Failed to constrain G_TRUNC\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-isel")) { dbgs() << "Failed to constrain G_TRUNC\n"
; } } while (false)
;
1954 return false;
1955 }
1956
1957 if (DstTy == LLT::fixed_vector(2, 16) && SrcTy == LLT::fixed_vector(2, 32)) {
1958 MachineBasicBlock *MBB = I.getParent();
1959 const DebugLoc &DL = I.getDebugLoc();
1960
1961 Register LoReg = MRI->createVirtualRegister(DstRC);
1962 Register HiReg = MRI->createVirtualRegister(DstRC);
1963 BuildMI(*MBB, I, DL, TII.get(AMDGPU::COPY), LoReg)
1964 .addReg(SrcReg, 0, AMDGPU::sub0);
1965 BuildMI(*MBB, I, DL, TII.get(AMDGPU::COPY), HiReg)
1966 .addReg(SrcReg, 0, AMDGPU::sub1);
1967
1968 if (IsVALU && STI.hasSDWA()) {
1969 // Write the low 16-bits of the high element into the high 16-bits of the
1970 // low element.
1971 MachineInstr *MovSDWA =
1972 BuildMI(*MBB, I, DL, TII.get(AMDGPU::V_MOV_B32_sdwa), DstReg)
1973 .addImm(0) // $src0_modifiers
1974 .addReg(HiReg) // $src0
1975 .addImm(0) // $clamp
1976 .addImm(AMDGPU::SDWA::WORD_1) // $dst_sel
1977 .addImm(AMDGPU::SDWA::UNUSED_PRESERVE) // $dst_unused
1978 .addImm(AMDGPU::SDWA::WORD_0) // $src0_sel
1979 .addReg(LoReg, RegState::Implicit);
1980 MovSDWA->tieOperands(0, MovSDWA->getNumOperands() - 1);
1981 } else {
1982 Register TmpReg0 = MRI->createVirtualRegister(DstRC);
1983 Register TmpReg1 = MRI->createVirtualRegister(DstRC);
1984 Register ImmReg = MRI->createVirtualRegister(DstRC);
1985 if (IsVALU) {
1986 BuildMI(*MBB, I, DL, TII.get(AMDGPU::V_LSHLREV_B32_e64), TmpReg0)
1987 .addImm(16)
1988 .addReg(HiReg);
1989 } else {
1990 BuildMI(*MBB, I, DL, TII.get(AMDGPU::S_LSHL_B32), TmpReg0)
1991 .addReg(HiReg)
1992 .addImm(16);
1993 }
1994
1995 unsigned MovOpc = IsVALU ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32;
1996 unsigned AndOpc = IsVALU ? AMDGPU::V_AND_B32_e64 : AMDGPU::S_AND_B32;
1997 unsigned OrOpc = IsVALU ? AMDGPU::V_OR_B32_e64 : AMDGPU::S_OR_B32;
1998
1999 BuildMI(*MBB, I, DL, TII.get(MovOpc), ImmReg)
2000 .addImm(0xffff);
2001 BuildMI(*MBB, I, DL, TII.get(AndOpc), TmpReg1)
2002 .addReg(LoReg)
2003 .addReg(ImmReg);
2004 BuildMI(*MBB, I, DL, TII.get(OrOpc), DstReg)
2005 .addReg(TmpReg0)
2006 .addReg(TmpReg1);
2007 }
2008
2009 I.eraseFromParent();
2010 return true;
2011 }
2012
2013 if (!DstTy.isScalar())
2014 return false;
2015
2016 if (SrcSize > 32) {
2017 int SubRegIdx = sizeToSubRegIndex(DstSize);
2018 if (SubRegIdx == -1)
2019 return false;
2020
2021 // Deal with weird cases where the class only partially supports the subreg
2022 // index.
2023 const TargetRegisterClass *SrcWithSubRC
2024 = TRI.getSubClassWithSubReg(SrcRC, SubRegIdx);
2025 if (!SrcWithSubRC)
2026 return false;
2027
2028 if (SrcWithSubRC != SrcRC) {
2029 if (!RBI.constrainGenericRegister(SrcReg, *SrcWithSubRC, *MRI))
2030 return false;
2031 }
2032
2033 I.getOperand(1).setSubReg(SubRegIdx);
2034 }
2035
2036 I.setDesc(TII.get(TargetOpcode::COPY));
2037 return true;
2038}
2039
2040/// \returns true if a bitmask for \p Size bits will be an inline immediate.
2041static bool shouldUseAndMask(unsigned Size, unsigned &Mask) {
2042 Mask = maskTrailingOnes<unsigned>(Size);
2043 int SignedMask = static_cast<int>(Mask);
2044 return SignedMask >= -16 && SignedMask <= 64;
2045}
2046
2047// Like RegisterBankInfo::getRegBank, but don't assume vcc for s1.
2048const RegisterBank *AMDGPUInstructionSelector::getArtifactRegBank(
2049 Register Reg, const MachineRegisterInfo &MRI,
2050 const TargetRegisterInfo &TRI) const {
2051 const RegClassOrRegBank &RegClassOrBank = MRI.getRegClassOrRegBank(Reg);
2052 if (auto *RB = RegClassOrBank.dyn_cast<const RegisterBank *>())
2053 return RB;
2054
2055 // Ignore the type, since we don't use vcc in artifacts.
2056 if (auto *RC = RegClassOrBank.dyn_cast<const TargetRegisterClass *>())
2057 return &RBI.getRegBankFromRegClass(*RC, LLT());
2058 return nullptr;
2059}
2060
2061bool AMDGPUInstructionSelector::selectG_SZA_EXT(MachineInstr &I) const {
2062 bool InReg = I.getOpcode() == AMDGPU::G_SEXT_INREG;
2063 bool Signed = I.getOpcode() == AMDGPU::G_SEXT || InReg;
2064 const DebugLoc &DL = I.getDebugLoc();
2065 MachineBasicBlock &MBB = *I.getParent();
2066 const Register DstReg = I.getOperand(0).getReg();
2067 const Register SrcReg = I.getOperand(1).getReg();
2068
2069 const LLT DstTy = MRI->getType(DstReg);
2070 const LLT SrcTy = MRI->getType(SrcReg);
2071 const unsigned SrcSize = I.getOpcode() == AMDGPU::G_SEXT_INREG ?
2072 I.getOperand(2).getImm() : SrcTy.getSizeInBits();
2073 const unsigned DstSize = DstTy.getSizeInBits();
2074 if (!DstTy.isScalar())
2075 return false;
2076
2077 // Artifact casts should never use vcc.
2078 const RegisterBank *SrcBank = getArtifactRegBank(SrcReg, *MRI, TRI);
2079
2080 // FIXME: This should probably be illegal and split earlier.
2081 if (I.getOpcode() == AMDGPU::G_ANYEXT) {
2082 if (DstSize <= 32)
2083 return selectCOPY(I);
2084
2085 const TargetRegisterClass *SrcRC =
2086 TRI.getRegClassForTypeOnBank(SrcTy, *SrcBank);
2087 const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
2088 const TargetRegisterClass *DstRC =
2089 TRI.getRegClassForSizeOnBank(DstSize, *DstBank);
2090
2091 Register UndefReg = MRI->createVirtualRegister(SrcRC);
2092 BuildMI(MBB, I, DL, TII.get(AMDGPU::IMPLICIT_DEF), UndefReg);
2093 BuildMI(MBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
2094 .addReg(SrcReg)
2095 .addImm(AMDGPU::sub0)
2096 .addReg(UndefReg)
2097 .addImm(AMDGPU::sub1);
2098 I.eraseFromParent();
2099
2100 return RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) &&
2101 RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI);
2102 }
2103
2104 if (SrcBank->getID() == AMDGPU::VGPRRegBankID && DstSize <= 32) {
2105 // 64-bit should have been split up in RegBankSelect
2106
2107 // Try to use an and with a mask if it will save code size.
2108 unsigned Mask;
2109 if (!Signed && shouldUseAndMask(SrcSize, Mask)) {
2110 MachineInstr *ExtI =
2111 BuildMI(MBB, I, DL, TII.get(AMDGPU::V_AND_B32_e32), DstReg)
2112 .addImm(Mask)
2113 .addReg(SrcReg);
2114 I.eraseFromParent();
2115 return constrainSelectedInstRegOperands(*ExtI, TII, TRI, RBI);
2116 }
2117
2118 const unsigned BFE = Signed ? AMDGPU::V_BFE_I32_e64 : AMDGPU::V_BFE_U32_e64;
2119 MachineInstr *ExtI =
2120 BuildMI(MBB, I, DL, TII.get(BFE), DstReg)
2121 .addReg(SrcReg)
2122 .addImm(0) // Offset
2123 .addImm(SrcSize); // Width
2124 I.eraseFromParent();
2125 return constrainSelectedInstRegOperands(*ExtI, TII, TRI, RBI);
2126 }
2127
2128 if (SrcBank->getID() == AMDGPU::SGPRRegBankID && DstSize <= 64) {
2129 const TargetRegisterClass &SrcRC = InReg && DstSize > 32 ?
2130 AMDGPU::SReg_64RegClass : AMDGPU::SReg_32RegClass;
2131 if (!RBI.constrainGenericRegister(SrcReg, SrcRC, *MRI))
2132 return false;
2133
2134 if (Signed && DstSize == 32 && (SrcSize == 8 || SrcSize == 16)) {
2135 const unsigned SextOpc = SrcSize == 8 ?
2136 AMDGPU::S_SEXT_I32_I8 : AMDGPU::S_SEXT_I32_I16;
2137 BuildMI(MBB, I, DL, TII.get(SextOpc), DstReg)
2138 .addReg(SrcReg);
2139 I.eraseFromParent();
2140 return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_32RegClass, *MRI);
2141 }
2142
2143 const unsigned BFE64 = Signed ? AMDGPU::S_BFE_I64 : AMDGPU::S_BFE_U64;
2144 const unsigned BFE32 = Signed ? AMDGPU::S_BFE_I32 : AMDGPU::S_BFE_U32;
2145
2146 // Scalar BFE is encoded as S1[5:0] = offset, S1[22:16]= width.
2147 if (DstSize > 32 && (SrcSize <= 32 || InReg)) {
2148 // We need a 64-bit register source, but the high bits don't matter.
2149 Register ExtReg = MRI->createVirtualRegister(&AMDGPU::SReg_64RegClass);
2150 Register UndefReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2151 unsigned SubReg = InReg ? AMDGPU::sub0 : 0;
2152
2153 BuildMI(MBB, I, DL, TII.get(AMDGPU::IMPLICIT_DEF), UndefReg);
2154 BuildMI(MBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), ExtReg)
2155 .addReg(SrcReg, 0, SubReg)
2156 .addImm(AMDGPU::sub0)
2157 .addReg(UndefReg)
2158 .addImm(AMDGPU::sub1);
2159
2160 BuildMI(MBB, I, DL, TII.get(BFE64), DstReg)
2161 .addReg(ExtReg)
2162 .addImm(SrcSize << 16);
2163
2164 I.eraseFromParent();
2165 return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_64RegClass, *MRI);
2166 }
2167
2168 unsigned Mask;
2169 if (!Signed && shouldUseAndMask(SrcSize, Mask)) {
2170 BuildMI(MBB, I, DL, TII.get(AMDGPU::S_AND_B32), DstReg)
2171 .addReg(SrcReg)
2172 .addImm(Mask);
2173 } else {
2174 BuildMI(MBB, I, DL, TII.get(BFE32), DstReg)
2175 .addReg(SrcReg)
2176 .addImm(SrcSize << 16);
2177 }
2178
2179 I.eraseFromParent();
2180 return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_32RegClass, *MRI);
2181 }
2182
2183 return false;
2184}
2185
2186bool AMDGPUInstructionSelector::selectG_CONSTANT(MachineInstr &I) const {
2187 MachineBasicBlock *BB = I.getParent();
2188 MachineOperand &ImmOp = I.getOperand(1);
2189 Register DstReg = I.getOperand(0).getReg();
2190 unsigned Size = MRI->getType(DstReg).getSizeInBits();
2191
2192 // The AMDGPU backend only supports Imm operands and not CImm or FPImm.
2193 if (ImmOp.isFPImm()) {
2194 const APInt &Imm = ImmOp.getFPImm()->getValueAPF().bitcastToAPInt();
2195 ImmOp.ChangeToImmediate(Imm.getZExtValue());
2196 } else if (ImmOp.isCImm()) {
2197 ImmOp.ChangeToImmediate(ImmOp.getCImm()->getSExtValue());
2198 } else {
2199 llvm_unreachable("Not supported by g_constants")::llvm::llvm_unreachable_internal("Not supported by g_constants"
, "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp", 2199
)
;
2200 }
2201
2202 const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2203 const bool IsSgpr = DstRB->getID() == AMDGPU::SGPRRegBankID;
2204
2205 unsigned Opcode;
2206 if (DstRB->getID() == AMDGPU::VCCRegBankID) {
2207 Opcode = STI.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64;
2208 } else {
2209 Opcode = IsSgpr ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
2210
2211 // We should never produce s1 values on banks other than VCC. If the user of
2212 // this already constrained the register, we may incorrectly think it's VCC
2213 // if it wasn't originally.
2214 if (Size == 1)
2215 return false;
2216 }
2217
2218 if (Size != 64) {
2219 I.setDesc(TII.get(Opcode));
2220 I.addImplicitDefUseOperands(*MF);
2221 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
2222 }
2223
2224 const DebugLoc &DL = I.getDebugLoc();
2225
2226 APInt Imm(Size, I.getOperand(1).getImm());
2227
2228 MachineInstr *ResInst;
2229 if (IsSgpr && TII.isInlineConstant(Imm)) {
2230 ResInst = BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_MOV_B64), DstReg)
2231 .addImm(I.getOperand(1).getImm());
2232 } else {
2233 const TargetRegisterClass *RC = IsSgpr ?
2234 &AMDGPU::SReg_32RegClass : &AMDGPU::VGPR_32RegClass;
2235 Register LoReg = MRI->createVirtualRegister(RC);
2236 Register HiReg = MRI->createVirtualRegister(RC);
2237
2238 BuildMI(*BB, &I, DL, TII.get(Opcode), LoReg)
2239 .addImm(Imm.trunc(32).getZExtValue());
2240
2241 BuildMI(*BB, &I, DL, TII.get(Opcode), HiReg)
2242 .addImm(Imm.ashr(32).getZExtValue());
2243
2244 ResInst = BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
2245 .addReg(LoReg)
2246 .addImm(AMDGPU::sub0)
2247 .addReg(HiReg)
2248 .addImm(AMDGPU::sub1);
2249 }
2250
2251 // We can't call constrainSelectedInstRegOperands here, because it doesn't
2252 // work for target independent opcodes
2253 I.eraseFromParent();
2254 const TargetRegisterClass *DstRC =
2255 TRI.getConstrainedRegClassForOperand(ResInst->getOperand(0), *MRI);
2256 if (!DstRC)
2257 return true;
2258 return RBI.constrainGenericRegister(DstReg, *DstRC, *MRI);
2259}
2260
2261bool AMDGPUInstructionSelector::selectG_FNEG(MachineInstr &MI) const {
2262 // Only manually handle the f64 SGPR case.
2263 //
2264 // FIXME: This is a workaround for 2.5 different tablegen problems. Because
2265 // the bit ops theoretically have a second result due to the implicit def of
2266 // SCC, the GlobalISelEmitter is overly conservative and rejects it. Fixing
2267 // that is easy by disabling the check. The result works, but uses a
2268 // nonsensical sreg32orlds_and_sreg_1 regclass.
2269 //
2270 // The DAG emitter is more problematic, and incorrectly adds both S_XOR_B32 to
2271 // the variadic REG_SEQUENCE operands.
2272
2273 Register Dst = MI.getOperand(0).getReg();
2274 const RegisterBank *DstRB = RBI.getRegBank(Dst, *MRI, TRI);
2275 if (DstRB->getID() != AMDGPU::SGPRRegBankID ||
2276 MRI->getType(Dst) != LLT::scalar(64))
2277 return false;
2278
2279 Register Src = MI.getOperand(1).getReg();
2280 MachineInstr *Fabs = getOpcodeDef(TargetOpcode::G_FABS, Src, *MRI);
2281 if (Fabs)
2282 Src = Fabs->getOperand(1).getReg();
2283
2284 if (!RBI.constrainGenericRegister(Src, AMDGPU::SReg_64RegClass, *MRI) ||
2285 !RBI.constrainGenericRegister(Dst, AMDGPU::SReg_64RegClass, *MRI))
2286 return false;
2287
2288 MachineBasicBlock *BB = MI.getParent();
2289 const DebugLoc &DL = MI.getDebugLoc();
2290 Register LoReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2291 Register HiReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2292 Register ConstReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2293 Register OpReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2294
2295 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), LoReg)
2296 .addReg(Src, 0, AMDGPU::sub0);
2297 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), HiReg)
2298 .addReg(Src, 0, AMDGPU::sub1);
2299 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), ConstReg)
2300 .addImm(0x80000000);
2301
2302 // Set or toggle sign bit.
2303 unsigned Opc = Fabs ? AMDGPU::S_OR_B32 : AMDGPU::S_XOR_B32;
2304 BuildMI(*BB, &MI, DL, TII.get(Opc), OpReg)
2305 .addReg(HiReg)
2306 .addReg(ConstReg);
2307 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::REG_SEQUENCE), Dst)
2308 .addReg(LoReg)
2309 .addImm(AMDGPU::sub0)
2310 .addReg(OpReg)
2311 .addImm(AMDGPU::sub1);
2312 MI.eraseFromParent();
2313 return true;
2314}
2315
2316// FIXME: This is a workaround for the same tablegen problems as G_FNEG
2317bool AMDGPUInstructionSelector::selectG_FABS(MachineInstr &MI) const {
2318 Register Dst = MI.getOperand(0).getReg();
2319 const RegisterBank *DstRB = RBI.getRegBank(Dst, *MRI, TRI);
2320 if (DstRB->getID() != AMDGPU::SGPRRegBankID ||
2321 MRI->getType(Dst) != LLT::scalar(64))
2322 return false;
2323
2324 Register Src = MI.getOperand(1).getReg();
2325 MachineBasicBlock *BB = MI.getParent();
2326 const DebugLoc &DL = MI.getDebugLoc();
2327 Register LoReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2328 Register HiReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2329 Register ConstReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2330 Register OpReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2331
2332 if (!RBI.constrainGenericRegister(Src, AMDGPU::SReg_64RegClass, *MRI) ||
2333 !RBI.constrainGenericRegister(Dst, AMDGPU::SReg_64RegClass, *MRI))
2334 return false;
2335
2336 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), LoReg)
2337 .addReg(Src, 0, AMDGPU::sub0);
2338 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), HiReg)
2339 .addReg(Src, 0, AMDGPU::sub1);
2340 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), ConstReg)
2341 .addImm(0x7fffffff);
2342
2343 // Clear sign bit.
2344 // TODO: Should this used S_BITSET0_*?
2345 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_AND_B32), OpReg)
2346 .addReg(HiReg)
2347 .addReg(ConstReg);
2348 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::REG_SEQUENCE), Dst)
2349 .addReg(LoReg)
2350 .addImm(AMDGPU::sub0)
2351 .addReg(OpReg)
2352 .addImm(AMDGPU::sub1);
2353
2354 MI.eraseFromParent();
2355 return true;
2356}
2357
2358static bool isConstant(const MachineInstr &MI) {
2359 return MI.getOpcode() == TargetOpcode::G_CONSTANT;
2360}
2361
2362void AMDGPUInstructionSelector::getAddrModeInfo(const MachineInstr &Load,
2363 const MachineRegisterInfo &MRI, SmallVectorImpl<GEPInfo> &AddrInfo) const {
2364
2365 const MachineInstr *PtrMI = MRI.getUniqueVRegDef(Load.getOperand(1).getReg());
2366
2367 assert(PtrMI)(static_cast <bool> (PtrMI) ? void (0) : __assert_fail (
"PtrMI", "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp"
, 2367, __extension__ __PRETTY_FUNCTION__))
;
2368
2369 if (PtrMI->getOpcode() != TargetOpcode::G_PTR_ADD)
2370 return;
2371
2372 GEPInfo GEPInfo;
2373
2374 for (unsigned i = 1; i != 3; ++i) {
2375 const MachineOperand &GEPOp = PtrMI->getOperand(i);
2376 const MachineInstr *OpDef = MRI.getUniqueVRegDef(GEPOp.getReg());
2377 assert(OpDef)(static_cast <bool> (OpDef) ? void (0) : __assert_fail (
"OpDef", "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp"
, 2377, __extension__ __PRETTY_FUNCTION__))
;
2378 if (i == 2 && isConstant(*OpDef)) {
2379 // TODO: Could handle constant base + variable offset, but a combine
2380 // probably should have commuted it.
2381 assert(GEPInfo.Imm == 0)(static_cast <bool> (GEPInfo.Imm == 0) ? void (0) : __assert_fail
("GEPInfo.Imm == 0", "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp"
, 2381, __extension__ __PRETTY_FUNCTION__))
;
2382 GEPInfo.Imm = OpDef->getOperand(1).getCImm()->getSExtValue();
2383 continue;
2384 }
2385 const RegisterBank *OpBank = RBI.getRegBank(GEPOp.getReg(), MRI, TRI);
2386 if (OpBank->getID() == AMDGPU::SGPRRegBankID)
2387 GEPInfo.SgprParts.push_back(GEPOp.getReg());
2388 else
2389 GEPInfo.VgprParts.push_back(GEPOp.getReg());
2390 }
2391
2392 AddrInfo.push_back(GEPInfo);
2393 getAddrModeInfo(*PtrMI, MRI, AddrInfo);
2394}
2395
2396bool AMDGPUInstructionSelector::isSGPR(Register Reg) const {
2397 return RBI.getRegBank(Reg, *MRI, TRI)->getID() == AMDGPU::SGPRRegBankID;
2398}
2399
2400bool AMDGPUInstructionSelector::isInstrUniform(const MachineInstr &MI) const {
2401 if (!MI.hasOneMemOperand())
2402 return false;
2403
2404 const MachineMemOperand *MMO = *MI.memoperands_begin();
2405 const Value *Ptr = MMO->getValue();
2406
2407 // UndefValue means this is a load of a kernel input. These are uniform.
2408 // Sometimes LDS instructions have constant pointers.
2409 // If Ptr is null, then that means this mem operand contains a
2410 // PseudoSourceValue like GOT.
2411 if (!Ptr || isa<UndefValue>(Ptr) || isa<Argument>(Ptr) ||
2412 isa<Constant>(Ptr) || isa<GlobalValue>(Ptr))
2413 return true;
2414
2415 if (MMO->getAddrSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT)
2416 return true;
2417
2418 const Instruction *I = dyn_cast<Instruction>(Ptr);
2419 return I && I->getMetadata("amdgpu.uniform");
2420}
2421
2422bool AMDGPUInstructionSelector::hasVgprParts(ArrayRef<GEPInfo> AddrInfo) const {
2423 for (const GEPInfo &GEPInfo : AddrInfo) {
2424 if (!GEPInfo.VgprParts.empty())
2425 return true;
2426 }
2427 return false;
2428}
2429
2430void AMDGPUInstructionSelector::initM0(MachineInstr &I) const {
2431 const LLT PtrTy = MRI->getType(I.getOperand(1).getReg());
2432 unsigned AS = PtrTy.getAddressSpace();
2433 if ((AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::REGION_ADDRESS) &&
2434 STI.ldsRequiresM0Init()) {
2435 MachineBasicBlock *BB = I.getParent();
2436
2437 // If DS instructions require M0 initialization, insert it before selecting.
2438 BuildMI(*BB, &I, I.getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), AMDGPU::M0)
2439 .addImm(-1);
2440 }
2441}
2442
2443bool AMDGPUInstructionSelector::selectG_LOAD_STORE_ATOMICRMW(
2444 MachineInstr &I) const {
2445 if (I.getOpcode() == TargetOpcode::G_ATOMICRMW_FADD) {
2446 const LLT PtrTy = MRI->getType(I.getOperand(1).getReg());
2447 unsigned AS = PtrTy.getAddressSpace();
2448 if (AS == AMDGPUAS::GLOBAL_ADDRESS)
2449 return selectGlobalAtomicFadd(I, I.getOperand(1), I.getOperand(2));
2450 }
2451
2452 initM0(I);
2453 return selectImpl(I, *CoverageInfo);
2454}
2455
2456static bool isVCmpResult(Register Reg, MachineRegisterInfo &MRI) {
2457 if (Reg.isPhysical())
2458 return false;
2459
2460 MachineInstr &MI = *MRI.getUniqueVRegDef(Reg);
2461 const unsigned Opcode = MI.getOpcode();
2462
2463 if (Opcode == AMDGPU::COPY)
2464 return isVCmpResult(MI.getOperand(1).getReg(), MRI);
2465
2466 if (Opcode == AMDGPU::G_AND || Opcode == AMDGPU::G_OR ||
2467 Opcode == AMDGPU::G_XOR)
2468 return isVCmpResult(MI.getOperand(1).getReg(), MRI) &&
2469 isVCmpResult(MI.getOperand(2).getReg(), MRI);
2470
2471 if (Opcode == TargetOpcode::G_INTRINSIC)
2472 return MI.getIntrinsicID() == Intrinsic::amdgcn_class;
2473
2474 return Opcode == AMDGPU::G_ICMP || Opcode == AMDGPU::G_FCMP;
2475}
2476
2477bool AMDGPUInstructionSelector::selectG_BRCOND(MachineInstr &I) const {
2478 MachineBasicBlock *BB = I.getParent();
2479 MachineOperand &CondOp = I.getOperand(0);
2480 Register CondReg = CondOp.getReg();
2481 const DebugLoc &DL = I.getDebugLoc();
2482
2483 unsigned BrOpcode;
2484 Register CondPhysReg;
2485 const TargetRegisterClass *ConstrainRC;
2486
2487 // In SelectionDAG, we inspect the IR block for uniformity metadata to decide
2488 // whether the branch is uniform when selecting the instruction. In
2489 // GlobalISel, we should push that decision into RegBankSelect. Assume for now
2490 // RegBankSelect knows what it's doing if the branch condition is scc, even
2491 // though it currently does not.
2492 if (!isVCC(CondReg, *MRI)) {
2493 if (MRI->getType(CondReg) != LLT::scalar(32))
2494 return false;
2495
2496 CondPhysReg = AMDGPU::SCC;
2497 BrOpcode = AMDGPU::S_CBRANCH_SCC1;
2498 ConstrainRC = &AMDGPU::SReg_32RegClass;
2499 } else {
2500 // FIXME: Should scc->vcc copies and with exec?
2501
2502 // Unless the value of CondReg is a result of a V_CMP* instruction then we
2503 // need to insert an and with exec.
2504 if (!isVCmpResult(CondReg, *MRI)) {
2505 const bool Is64 = STI.isWave64();
2506 const unsigned Opcode = Is64 ? AMDGPU::S_AND_B64 : AMDGPU::S_AND_B32;
2507 const Register Exec = Is64 ? AMDGPU::EXEC : AMDGPU::EXEC_LO;
2508
2509 Register TmpReg = MRI->createVirtualRegister(TRI.getBoolRC());
2510 BuildMI(*BB, &I, DL, TII.get(Opcode), TmpReg)
2511 .addReg(CondReg)
2512 .addReg(Exec);
2513 CondReg = TmpReg;
2514 }
2515
2516 CondPhysReg = TRI.getVCC();
2517 BrOpcode = AMDGPU::S_CBRANCH_VCCNZ;
2518 ConstrainRC = TRI.getBoolRC();
2519 }
2520
2521 if (!MRI->getRegClassOrNull(CondReg))
2522 MRI->setRegClass(CondReg, ConstrainRC);
2523
2524 BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), CondPhysReg)
2525 .addReg(CondReg);
2526 BuildMI(*BB, &I, DL, TII.get(BrOpcode))
2527 .addMBB(I.getOperand(1).getMBB());
2528
2529 I.eraseFromParent();
2530 return true;
2531}
2532
2533bool AMDGPUInstructionSelector::selectG_GLOBAL_VALUE(
2534 MachineInstr &I) const {
2535 Register DstReg = I.getOperand(0).getReg();
2536 const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2537 const bool IsVGPR = DstRB->getID() == AMDGPU::VGPRRegBankID;
2538 I.setDesc(TII.get(IsVGPR ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32));
2539 if (IsVGPR)
2540 I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
2541
2542 return RBI.constrainGenericRegister(
2543 DstReg, IsVGPR ? AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass, *MRI);
2544}
2545
2546bool AMDGPUInstructionSelector::selectG_PTRMASK(MachineInstr &I) const {
2547 Register DstReg = I.getOperand(0).getReg();
2548 Register SrcReg = I.getOperand(1).getReg();
2549 Register MaskReg = I.getOperand(2).getReg();
2550 LLT Ty = MRI->getType(DstReg);
2551 LLT MaskTy = MRI->getType(MaskReg);
2552 MachineBasicBlock *BB = I.getParent();
2553 const DebugLoc &DL = I.getDebugLoc();
2554
2555 const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2556 const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI);
2557 const RegisterBank *MaskRB = RBI.getRegBank(MaskReg, *MRI, TRI);
2558 const bool IsVGPR = DstRB->getID() == AMDGPU::VGPRRegBankID;
2559 if (DstRB != SrcRB) // Should only happen for hand written MIR.
2560 return false;
2561
2562 // Try to avoid emitting a bit operation when we only need to touch half of
2563 // the 64-bit pointer.
2564 APInt MaskOnes = KnownBits->getKnownOnes(MaskReg).zext(64);
2565 const APInt MaskHi32 = APInt::getHighBitsSet(64, 32);
2566 const APInt MaskLo32 = APInt::getLowBitsSet(64, 32);
2567
2568 const bool CanCopyLow32 = (MaskOnes & MaskLo32) == MaskLo32;
2569 const bool CanCopyHi32 = (MaskOnes & MaskHi32) == MaskHi32;
2570
2571 if (!IsVGPR && Ty.getSizeInBits() == 64 &&
2572 !CanCopyLow32 && !CanCopyHi32) {
2573 auto MIB = BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_AND_B64), DstReg)
2574 .addReg(SrcReg)
2575 .addReg(MaskReg);
2576 I.eraseFromParent();
2577 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
2578 }
2579
2580 unsigned NewOpc = IsVGPR ? AMDGPU::V_AND_B32_e64 : AMDGPU::S_AND_B32;
2581 const TargetRegisterClass &RegRC
2582 = IsVGPR ? AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass;
2583
2584 const TargetRegisterClass *DstRC = TRI.getRegClassForTypeOnBank(Ty, *DstRB);
2585 const TargetRegisterClass *SrcRC = TRI.getRegClassForTypeOnBank(Ty, *SrcRB);
2586 const TargetRegisterClass *MaskRC =
2587 TRI.getRegClassForTypeOnBank(MaskTy, *MaskRB);
2588
2589 if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) ||
2590 !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) ||
2591 !RBI.constrainGenericRegister(MaskReg, *MaskRC, *MRI))
2592 return false;
2593
2594 if (Ty.getSizeInBits() == 32) {
2595 assert(MaskTy.getSizeInBits() == 32 &&(static_cast <bool> (MaskTy.getSizeInBits() == 32 &&
"ptrmask should have been narrowed during legalize") ? void (
0) : __assert_fail ("MaskTy.getSizeInBits() == 32 && \"ptrmask should have been narrowed during legalize\""
, "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp", 2596
, __extension__ __PRETTY_FUNCTION__))
2596 "ptrmask should have been narrowed during legalize")(static_cast <bool> (MaskTy.getSizeInBits() == 32 &&
"ptrmask should have been narrowed during legalize") ? void (
0) : __assert_fail ("MaskTy.getSizeInBits() == 32 && \"ptrmask should have been narrowed during legalize\""
, "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp", 2596
, __extension__ __PRETTY_FUNCTION__))
;
2597
2598 BuildMI(*BB, &I, DL, TII.get(NewOpc), DstReg)
2599 .addReg(SrcReg)
2600 .addReg(MaskReg);
2601 I.eraseFromParent();
2602 return true;
2603 }
2604
2605 Register HiReg = MRI->createVirtualRegister(&RegRC);
2606 Register LoReg = MRI->createVirtualRegister(&RegRC);
2607
2608 // Extract the subregisters from the source pointer.
2609 BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), LoReg)
2610 .addReg(SrcReg, 0, AMDGPU::sub0);
2611 BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), HiReg)
2612 .addReg(SrcReg, 0, AMDGPU::sub1);
2613
2614 Register MaskedLo, MaskedHi;
2615
2616 if (CanCopyLow32) {
2617 // If all the bits in the low half are 1, we only need a copy for it.
2618 MaskedLo = LoReg;
2619 } else {
2620 // Extract the mask subregister and apply the and.
2621 Register MaskLo = MRI->createVirtualRegister(&RegRC);
2622 MaskedLo = MRI->createVirtualRegister(&RegRC);
2623
2624 BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), MaskLo)
2625 .addReg(MaskReg, 0, AMDGPU::sub0);
2626 BuildMI(*BB, &I, DL, TII.get(NewOpc), MaskedLo)
2627 .addReg(LoReg)
2628 .addReg(MaskLo);
2629 }
2630
2631 if (CanCopyHi32) {
2632 // If all the bits in the high half are 1, we only need a copy for it.
2633 MaskedHi = HiReg;
2634 } else {
2635 Register MaskHi = MRI->createVirtualRegister(&RegRC);
2636 MaskedHi = MRI->createVirtualRegister(&RegRC);
2637
2638 BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), MaskHi)
2639 .addReg(MaskReg, 0, AMDGPU::sub1);
2640 BuildMI(*BB, &I, DL, TII.get(NewOpc), MaskedHi)
2641 .addReg(HiReg)
2642 .addReg(MaskHi);
2643 }
2644
2645 BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
2646 .addReg(MaskedLo)
2647 .addImm(AMDGPU::sub0)
2648 .addReg(MaskedHi)
2649 .addImm(AMDGPU::sub1);
2650 I.eraseFromParent();
2651 return true;
2652}
2653
2654/// Return the register to use for the index value, and the subregister to use
2655/// for the indirectly accessed register.
2656static std::pair<Register, unsigned>
2657computeIndirectRegIndex(MachineRegisterInfo &MRI,
2658 const SIRegisterInfo &TRI,
2659 const TargetRegisterClass *SuperRC,
2660 Register IdxReg,
2661 unsigned EltSize) {
2662 Register IdxBaseReg;
2663 int Offset;
2664
2665 std::tie(IdxBaseReg, Offset) = AMDGPU::getBaseWithConstantOffset(MRI, IdxReg);
2666 if (IdxBaseReg == AMDGPU::NoRegister) {
2667 // This will happen if the index is a known constant. This should ordinarily
2668 // be legalized out, but handle it as a register just in case.
2669 assert(Offset == 0)(static_cast <bool> (Offset == 0) ? void (0) : __assert_fail
("Offset == 0", "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp"
, 2669, __extension__ __PRETTY_FUNCTION__))
;
2670 IdxBaseReg = IdxReg;
2671 }
2672
2673 ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(SuperRC, EltSize);
2674
2675 // Skip out of bounds offsets, or else we would end up using an undefined
2676 // register.
2677 if (static_cast<unsigned>(Offset) >= SubRegs.size())
2678 return std::make_pair(IdxReg, SubRegs[0]);
2679 return std::make_pair(IdxBaseReg, SubRegs[Offset]);
2680}
2681
2682bool AMDGPUInstructionSelector::selectG_EXTRACT_VECTOR_ELT(
2683 MachineInstr &MI) const {
2684 Register DstReg = MI.getOperand(0).getReg();
2685 Register SrcReg = MI.getOperand(1).getReg();
2686 Register IdxReg = MI.getOperand(2).getReg();
2687
2688 LLT DstTy = MRI->getType(DstReg);
2689 LLT SrcTy = MRI->getType(SrcReg);
2690
2691 const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2692 const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI);
2693 const RegisterBank *IdxRB = RBI.getRegBank(IdxReg, *MRI, TRI);
2694
2695 // The index must be scalar. If it wasn't RegBankSelect should have moved this
2696 // into a waterfall loop.
2697 if (IdxRB->getID() != AMDGPU::SGPRRegBankID)
2698 return false;
2699
2700 const TargetRegisterClass *SrcRC =
2701 TRI.getRegClassForTypeOnBank(SrcTy, *SrcRB);
2702 const TargetRegisterClass *DstRC =
2703 TRI.getRegClassForTypeOnBank(DstTy, *DstRB);
2704 if (!SrcRC || !DstRC)
2705 return false;
2706 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) ||
2707 !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) ||
2708 !RBI.constrainGenericRegister(IdxReg, AMDGPU::SReg_32RegClass, *MRI))
2709 return false;
2710
2711 MachineBasicBlock *BB = MI.getParent();
2712 const DebugLoc &DL = MI.getDebugLoc();
2713 const bool Is64 = DstTy.getSizeInBits() == 64;
2714
2715 unsigned SubReg;
2716 std::tie(IdxReg, SubReg) = computeIndirectRegIndex(*MRI, TRI, SrcRC, IdxReg,
2717 DstTy.getSizeInBits() / 8);
2718
2719 if (SrcRB->getID() == AMDGPU::SGPRRegBankID) {
2720 if (DstTy.getSizeInBits() != 32 && !Is64)
2721 return false;
2722
2723 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
2724 .addReg(IdxReg);
2725
2726 unsigned Opc = Is64 ? AMDGPU::S_MOVRELS_B64 : AMDGPU::S_MOVRELS_B32;
2727 BuildMI(*BB, &MI, DL, TII.get(Opc), DstReg)
2728 .addReg(SrcReg, 0, SubReg)
2729 .addReg(SrcReg, RegState::Implicit);
2730 MI.eraseFromParent();
2731 return true;
2732 }
2733
2734 if (SrcRB->getID() != AMDGPU::VGPRRegBankID || DstTy.getSizeInBits() != 32)
2735 return false;
2736
2737 if (!STI.useVGPRIndexMode()) {
2738 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
2739 .addReg(IdxReg);
2740 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::V_MOVRELS_B32_e32), DstReg)
2741 .addReg(SrcReg, 0, SubReg)
2742 .addReg(SrcReg, RegState::Implicit);
2743 MI.eraseFromParent();
2744 return true;
2745 }
2746
2747 const MCInstrDesc &GPRIDXDesc =
2748 TII.getIndirectGPRIDXPseudo(TRI.getRegSizeInBits(*SrcRC), true);
2749 BuildMI(*BB, MI, DL, GPRIDXDesc, DstReg)
2750 .addReg(SrcReg)
2751 .addReg(IdxReg)
2752 .addImm(SubReg);
2753
2754 MI.eraseFromParent();
2755 return true;
2756}
2757
2758// TODO: Fold insert_vector_elt (extract_vector_elt) into movrelsd
2759bool AMDGPUInstructionSelector::selectG_INSERT_VECTOR_ELT(
2760 MachineInstr &MI) const {
2761 Register DstReg = MI.getOperand(0).getReg();
2762 Register VecReg = MI.getOperand(1).getReg();
2763 Register ValReg = MI.getOperand(2).getReg();
2764 Register IdxReg = MI.getOperand(3).getReg();
2765
2766 LLT VecTy = MRI->getType(DstReg);
2767 LLT ValTy = MRI->getType(ValReg);
2768 unsigned VecSize = VecTy.getSizeInBits();
2769 unsigned ValSize = ValTy.getSizeInBits();
2770
2771 const RegisterBank *VecRB = RBI.getRegBank(VecReg, *MRI, TRI);
2772 const RegisterBank *ValRB = RBI.getRegBank(ValReg, *MRI, TRI);
2773 const RegisterBank *IdxRB = RBI.getRegBank(IdxReg, *MRI, TRI);
2774
2775 assert(VecTy.getElementType() == ValTy)(static_cast <bool> (VecTy.getElementType() == ValTy) ?
void (0) : __assert_fail ("VecTy.getElementType() == ValTy",
"llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp", 2775
, __extension__ __PRETTY_FUNCTION__))
;
2776
2777 // The index must be scalar. If it wasn't RegBankSelect should have moved this
2778 // into a waterfall loop.
2779 if (IdxRB->getID() != AMDGPU::SGPRRegBankID)
2780 return false;
2781
2782 const TargetRegisterClass *VecRC =
2783 TRI.getRegClassForTypeOnBank(VecTy, *VecRB);
2784 const TargetRegisterClass *ValRC =
2785 TRI.getRegClassForTypeOnBank(ValTy, *ValRB);
2786
2787 if (!RBI.constrainGenericRegister(VecReg, *VecRC, *MRI) ||
2788 !RBI.constrainGenericRegister(DstReg, *VecRC, *MRI) ||
2789 !RBI.constrainGenericRegister(ValReg, *ValRC, *MRI) ||
2790 !RBI.constrainGenericRegister(IdxReg, AMDGPU::SReg_32RegClass, *MRI))
2791 return false;
2792
2793 if (VecRB->getID() == AMDGPU::VGPRRegBankID && ValSize != 32)
2794 return false;
2795
2796 unsigned SubReg;
2797 std::tie(IdxReg, SubReg) = computeIndirectRegIndex(*MRI, TRI, VecRC, IdxReg,
2798 ValSize / 8);
2799
2800 const bool IndexMode = VecRB->getID() == AMDGPU::VGPRRegBankID &&
2801 STI.useVGPRIndexMode();
2802
2803 MachineBasicBlock *BB = MI.getParent();
2804 const DebugLoc &DL = MI.getDebugLoc();
2805
2806 if (!IndexMode) {
2807 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
2808 .addReg(IdxReg);
2809
2810 const MCInstrDesc &RegWriteOp = TII.getIndirectRegWriteMovRelPseudo(
2811 VecSize, ValSize, VecRB->getID() == AMDGPU::SGPRRegBankID);
2812 BuildMI(*BB, MI, DL, RegWriteOp, DstReg)
2813 .addReg(VecReg)
2814 .addReg(ValReg)
2815 .addImm(SubReg);
2816 MI.eraseFromParent();
2817 return true;
2818 }
2819
2820 const MCInstrDesc &GPRIDXDesc =
2821 TII.getIndirectGPRIDXPseudo(TRI.getRegSizeInBits(*VecRC), false);
2822 BuildMI(*BB, MI, DL, GPRIDXDesc, DstReg)
2823 .addReg(VecReg)
2824 .addReg(ValReg)
2825 .addReg(IdxReg)
2826 .addImm(SubReg);
2827
2828 MI.eraseFromParent();
2829 return true;
2830}
2831
2832static bool isZeroOrUndef(int X) {
2833 return X == 0 || X == -1;
2834}
2835
2836static bool isOneOrUndef(int X) {
2837 return X == 1 || X == -1;
2838}
2839
2840static bool isZeroOrOneOrUndef(int X) {
2841 return X == 0 || X == 1 || X == -1;
2842}
2843
2844// Normalize a VOP3P shuffle mask to refer to the low/high half of a single
2845// 32-bit register.
2846static Register normalizeVOP3PMask(int NewMask[2], Register Src0, Register Src1,
2847 ArrayRef<int> Mask) {
2848 NewMask[0] = Mask[0];
2849 NewMask[1] = Mask[1];
2850 if (isZeroOrOneOrUndef(Mask[0]) && isZeroOrOneOrUndef(Mask[1]))
2851 return Src0;
2852
2853 assert(NewMask[0] == 2 || NewMask[0] == 3 || NewMask[0] == -1)(static_cast <bool> (NewMask[0] == 2 || NewMask[0] == 3
|| NewMask[0] == -1) ? void (0) : __assert_fail ("NewMask[0] == 2 || NewMask[0] == 3 || NewMask[0] == -1"
, "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp", 2853
, __extension__ __PRETTY_FUNCTION__))
;
2854 assert(NewMask[1] == 2 || NewMask[1] == 3 || NewMask[1] == -1)(static_cast <bool> (NewMask[1] == 2 || NewMask[1] == 3
|| NewMask[1] == -1) ? void (0) : __assert_fail ("NewMask[1] == 2 || NewMask[1] == 3 || NewMask[1] == -1"
, "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp", 2854
, __extension__ __PRETTY_FUNCTION__))
;
2855
2856 // Shift the mask inputs to be 0/1;
2857 NewMask[0] = NewMask[0] == -1 ? -1 : NewMask[0] - 2;
2858 NewMask[1] = NewMask[1] == -1 ? -1 : NewMask[1] - 2;
2859 return Src1;
2860}
2861
2862// This is only legal with VOP3P instructions as an aid to op_sel matching.
2863bool AMDGPUInstructionSelector::selectG_SHUFFLE_VECTOR(
2864 MachineInstr &MI) const {
2865 Register DstReg = MI.getOperand(0).getReg();
2866 Register Src0Reg = MI.getOperand(1).getReg();
2867 Register Src1Reg = MI.getOperand(2).getReg();
2868 ArrayRef<int> ShufMask = MI.getOperand(3).getShuffleMask();
2869
2870 const LLT V2S16 = LLT::fixed_vector(2, 16);
2871 if (MRI->getType(DstReg) != V2S16 || MRI->getType(Src0Reg) != V2S16)
2872 return false;
2873
2874 if (!AMDGPU::isLegalVOP3PShuffleMask(ShufMask))
2875 return false;
2876
2877 assert(ShufMask.size() == 2)(static_cast <bool> (ShufMask.size() == 2) ? void (0) :
__assert_fail ("ShufMask.size() == 2", "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp"
, 2877, __extension__ __PRETTY_FUNCTION__))
;
2878
2879 MachineBasicBlock *MBB = MI.getParent();
2880 const DebugLoc &DL = MI.getDebugLoc();
2881
2882 const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2883 const bool IsVALU = DstRB->getID() == AMDGPU::VGPRRegBankID;
2884 const TargetRegisterClass &RC = IsVALU ?
2885 AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass;
2886
2887 // Handle the degenerate case which should have folded out.
2888 if (ShufMask[0] == -1 && ShufMask[1] == -1) {
2889 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::IMPLICIT_DEF), DstReg);
2890
2891 MI.eraseFromParent();
2892 return RBI.constrainGenericRegister(DstReg, RC, *MRI);
2893 }
2894
2895 // A legal VOP3P mask only reads one of the sources.
2896 int Mask[2];
2897 Register SrcVec = normalizeVOP3PMask(Mask, Src0Reg, Src1Reg, ShufMask);
2898
2899 if (!RBI.constrainGenericRegister(DstReg, RC, *MRI) ||
2900 !RBI.constrainGenericRegister(SrcVec, RC, *MRI))
2901 return false;
2902
2903 // TODO: This also should have been folded out
2904 if (isZeroOrUndef(Mask[0]) && isOneOrUndef(Mask[1])) {
2905 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::COPY), DstReg)
2906 .addReg(SrcVec);
2907
2908 MI.eraseFromParent();
2909 return true;
2910 }
2911
2912 if (Mask[0] == 1 && Mask[1] == -1) {
2913 if (IsVALU) {
2914 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_LSHRREV_B32_e64), DstReg)
2915 .addImm(16)
2916 .addReg(SrcVec);
2917 } else {
2918 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_LSHR_B32), DstReg)
2919 .addReg(SrcVec)
2920 .addImm(16);
2921 }
2922 } else if (Mask[0] == -1 && Mask[1] == 0) {
2923 if (IsVALU) {
2924 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_LSHLREV_B32_e64), DstReg)
2925 .addImm(16)
2926 .addReg(SrcVec);
2927 } else {
2928 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_LSHL_B32), DstReg)
2929 .addReg(SrcVec)
2930 .addImm(16);
2931 }
2932 } else if (Mask[0] == 0 && Mask[1] == 0) {
2933 if (IsVALU) {
2934 if (STI.hasSDWA()) {
2935 // Write low half of the register into the high half.
2936 MachineInstr *MovSDWA =
2937 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_MOV_B32_sdwa), DstReg)
2938 .addImm(0) // $src0_modifiers
2939 .addReg(SrcVec) // $src0
2940 .addImm(0) // $clamp
2941 .addImm(AMDGPU::SDWA::WORD_1) // $dst_sel
2942 .addImm(AMDGPU::SDWA::UNUSED_PRESERVE) // $dst_unused
2943 .addImm(AMDGPU::SDWA::WORD_0) // $src0_sel
2944 .addReg(SrcVec, RegState::Implicit);
2945 MovSDWA->tieOperands(0, MovSDWA->getNumOperands() - 1);
2946 } else {
2947 Register TmpReg = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
2948 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_AND_B32_e32), TmpReg)
2949 .addImm(0xFFFF)
2950 .addReg(SrcVec);
2951 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_LSHL_OR_B32_e64), DstReg)
2952 .addReg(TmpReg)
2953 .addImm(16)
2954 .addReg(TmpReg);
2955 }
2956 } else {
2957 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_PACK_LL_B32_B16), DstReg)
2958 .addReg(SrcVec)
2959 .addReg(SrcVec);
2960 }
2961 } else if (Mask[0] == 1 && Mask[1] == 1) {
2962 if (IsVALU) {
2963 if (STI.hasSDWA()) {
2964 // Write high half of the register into the low half.
2965 MachineInstr *MovSDWA =
2966 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_MOV_B32_sdwa), DstReg)
2967 .addImm(0) // $src0_modifiers
2968 .addReg(SrcVec) // $src0
2969 .addImm(0) // $clamp
2970 .addImm(AMDGPU::SDWA::WORD_0) // $dst_sel
2971 .addImm(AMDGPU::SDWA::UNUSED_PRESERVE) // $dst_unused
2972 .addImm(AMDGPU::SDWA::WORD_1) // $src0_sel
2973 .addReg(SrcVec, RegState::Implicit);
2974 MovSDWA->tieOperands(0, MovSDWA->getNumOperands() - 1);
2975 } else {
2976 Register TmpReg = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
2977 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_LSHRREV_B32_e64), TmpReg)
2978 .addImm(16)
2979 .addReg(SrcVec);
2980 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_LSHL_OR_B32_e64), DstReg)
2981 .addReg(TmpReg)
2982 .addImm(16)
2983 .addReg(TmpReg);
2984 }
2985 } else {
2986 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_PACK_HH_B32_B16), DstReg)
2987 .addReg(SrcVec)
2988 .addReg(SrcVec);
2989 }
2990 } else if (Mask[0] == 1 && Mask[1] == 0) {
2991 if (IsVALU) {
2992 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_ALIGNBIT_B32_e64), DstReg)
2993 .addReg(SrcVec)
2994 .addReg(SrcVec)
2995 .addImm(16);
2996 } else {
2997 if (STI.hasSPackHL()) {
2998 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_PACK_HL_B32_B16), DstReg)
2999 .addReg(SrcVec)
3000 .addReg(SrcVec);
3001 } else {
3002 Register TmpReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
3003 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_LSHR_B32), TmpReg)
3004 .addReg(SrcVec)
3005 .addImm(16);
3006 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_PACK_LL_B32_B16), DstReg)
3007 .addReg(TmpReg)
3008 .addReg(SrcVec);
3009 }
3010 }
3011 } else
3012 llvm_unreachable("all shuffle masks should be handled")::llvm::llvm_unreachable_internal("all shuffle masks should be handled"
, "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp", 3012
)
;
3013
3014 MI.eraseFromParent();
3015 return true;
3016}
3017
3018bool AMDGPUInstructionSelector::selectAMDGPU_BUFFER_ATOMIC_FADD(
3019 MachineInstr &MI) const {
3020 const Register DefReg = MI.getOperand(0).getReg();
3021 LLT DefTy = MRI->getType(DefReg);
3022 if (AMDGPU::hasAtomicFaddRtnForTy(STI, DefTy))
3023 return selectImpl(MI, *CoverageInfo);
3024
3025 MachineBasicBlock *MBB = MI.getParent();
3026 const DebugLoc &DL = MI.getDebugLoc();
3027
3028 if (!MRI->use_nodbg_empty(DefReg)) {
3029 Function &F = MBB->getParent()->getFunction();
3030 DiagnosticInfoUnsupported
3031 NoFpRet(F, "return versions of fp atomics not supported",
3032 MI.getDebugLoc(), DS_Error);
3033 F.getContext().diagnose(NoFpRet);
3034 return false;
3035 }
3036
3037 // FIXME: This is only needed because tablegen requires number of dst operands
3038 // in match and replace pattern to be the same. Otherwise patterns can be
3039 // exported from SDag path.
3040 MachineOperand &VDataIn = MI.getOperand(1);
3041 MachineOperand &VIndex = MI.getOperand(3);
3042 MachineOperand &VOffset = MI.getOperand(4);
3043 MachineOperand &SOffset = MI.getOperand(5);
3044 int16_t Offset = MI.getOperand(6).getImm();
3045
3046 bool HasVOffset = !isOperandImmEqual(VOffset, 0, *MRI);
3047 bool HasVIndex = !isOperandImmEqual(VIndex, 0, *MRI);
3048
3049 unsigned Opcode;
3050 if (HasVOffset) {
3051 Opcode = HasVIndex ? AMDGPU::BUFFER_ATOMIC_ADD_F32_BOTHEN
3052 : AMDGPU::BUFFER_ATOMIC_ADD_F32_OFFEN;
3053 } else {
3054 Opcode = HasVIndex ? AMDGPU::BUFFER_ATOMIC_ADD_F32_IDXEN
3055 : AMDGPU::BUFFER_ATOMIC_ADD_F32_OFFSET;
3056 }
3057
3058 if (MRI->getType(VDataIn.getReg()).isVector()) {
3059 switch (Opcode) {
3060 case AMDGPU::BUFFER_ATOMIC_ADD_F32_BOTHEN:
3061 Opcode = AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_BOTHEN;
3062 break;
3063 case AMDGPU::BUFFER_ATOMIC_ADD_F32_OFFEN:
3064 Opcode = AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_OFFEN;
3065 break;
3066 case AMDGPU::BUFFER_ATOMIC_ADD_F32_IDXEN:
3067 Opcode = AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_IDXEN;
3068 break;
3069 case AMDGPU::BUFFER_ATOMIC_ADD_F32_OFFSET:
3070 Opcode = AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_OFFSET;
3071 break;
3072 }
3073 }
3074
3075 auto I = BuildMI(*MBB, MI, DL, TII.get(Opcode));
3076 I.add(VDataIn);
3077
3078 if (Opcode == AMDGPU::BUFFER_ATOMIC_ADD_F32_BOTHEN ||
3079 Opcode == AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_BOTHEN) {
3080 Register IdxReg = MRI->createVirtualRegister(TRI.getVGPR64Class());
3081 BuildMI(*MBB, &*I, DL, TII.get(AMDGPU::REG_SEQUENCE), IdxReg)
3082 .addReg(VIndex.getReg())
3083 .addImm(AMDGPU::sub0)
3084 .addReg(VOffset.getReg())
3085 .addImm(AMDGPU::sub1);
3086
3087 I.addReg(IdxReg);
3088 } else if (HasVIndex) {
3089 I.add(VIndex);
3090 } else if (HasVOffset) {
3091 I.add(VOffset);
3092 }
3093
3094 I.add(MI.getOperand(2)); // rsrc
3095 I.add(SOffset);
3096 I.addImm(Offset);
3097 I.addImm(MI.getOperand(7).getImm()); // cpol
3098 I.cloneMemRefs(MI);
3099
3100 MI.eraseFromParent();
3101
3102 return true;
3103}
3104
3105bool AMDGPUInstructionSelector::selectGlobalAtomicFadd(
3106 MachineInstr &MI, MachineOperand &AddrOp, MachineOperand &DataOp) const {
3107
3108 if (STI.hasGFX90AInsts()) {
3109 // gfx90a adds return versions of the global atomic fadd instructions so no
3110 // special handling is required.
3111 return selectImpl(MI, *CoverageInfo);
3112 }
3113
3114 MachineBasicBlock *MBB = MI.getParent();
3115 const DebugLoc &DL = MI.getDebugLoc();
3116
3117 if (!MRI->use_nodbg_empty(MI.getOperand(0).getReg())) {
3118 Function &F = MBB->getParent()->getFunction();
3119 DiagnosticInfoUnsupported
3120 NoFpRet(F, "return versions of fp atomics not supported",
3121 MI.getDebugLoc(), DS_Error);
3122 F.getContext().diagnose(NoFpRet);
3123 return false;
3124 }
3125
3126 // FIXME: This is only needed because tablegen requires number of dst operands
3127 // in match and replace pattern to be the same. Otherwise patterns can be
3128 // exported from SDag path.
3129 auto Addr = selectFlatOffsetImpl(AddrOp, SIInstrFlags::FlatGlobal);
3130
3131 Register Data = DataOp.getReg();
3132 const unsigned Opc = MRI->getType(Data).isVector() ?
3133 AMDGPU::GLOBAL_ATOMIC_PK_ADD_F16 : AMDGPU::GLOBAL_ATOMIC_ADD_F32;
3134 auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc))
3135 .addReg(Addr.first)
3136 .addReg(Data)
3137 .addImm(Addr.second)
3138 .addImm(0) // cpol
3139 .cloneMemRefs(MI);
3140
3141 MI.eraseFromParent();
3142 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
3143}
3144
3145bool AMDGPUInstructionSelector::selectBufferLoadLds(MachineInstr &MI) const {
3146 unsigned Opc;
3147 unsigned Size = MI.getOperand(3).getImm();
3148
3149 // The struct intrinsic variants add one additional operand over raw.
3150 const bool HasVIndex = MI.getNumOperands() == 9;
3151 Register VIndex;
3152 int OpOffset = 0;
3153 if (HasVIndex) {
3154 VIndex = MI.getOperand(4).getReg();
3155 OpOffset = 1;
3156 }
3157
3158 Register VOffset = MI.getOperand(4 + OpOffset).getReg();
3159 Optional<ValueAndVReg> MaybeVOffset =
3160 getIConstantVRegValWithLookThrough(VOffset, *MRI);
3161 const bool HasVOffset = !MaybeVOffset || MaybeVOffset->Value.getZExtValue();
3162
3163 switch (Size) {
3164 default:
3165 return false;
3166 case 1:
3167 Opc = HasVIndex ? HasVOffset ? AMDGPU::BUFFER_LOAD_UBYTE_LDS_BOTHEN
3168 : AMDGPU::BUFFER_LOAD_UBYTE_LDS_IDXEN
3169 : HasVOffset ? AMDGPU::BUFFER_LOAD_UBYTE_LDS_OFFEN
3170 : AMDGPU::BUFFER_LOAD_UBYTE_LDS_OFFSET;
3171 break;
3172 case 2:
3173 Opc = HasVIndex ? HasVOffset ? AMDGPU::BUFFER_LOAD_USHORT_LDS_BOTHEN
3174 : AMDGPU::BUFFER_LOAD_USHORT_LDS_IDXEN
3175 : HasVOffset ? AMDGPU::BUFFER_LOAD_USHORT_LDS_OFFEN
3176 : AMDGPU::BUFFER_LOAD_USHORT_LDS_OFFSET;
3177 break;
3178 case 4:
3179 Opc = HasVIndex ? HasVOffset ? AMDGPU::BUFFER_LOAD_DWORD_LDS_BOTHEN
3180 : AMDGPU::BUFFER_LOAD_DWORD_LDS_IDXEN
3181 : HasVOffset ? AMDGPU::BUFFER_LOAD_DWORD_LDS_OFFEN
3182 : AMDGPU::BUFFER_LOAD_DWORD_LDS_OFFSET;
3183 break;
3184 }
3185
3186 MachineBasicBlock *MBB = MI.getParent();
3187 const DebugLoc &DL = MI.getDebugLoc();
3188 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
3189 .add(MI.getOperand(2));
3190
3191 auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc));
3192
3193 if (HasVIndex && HasVOffset) {
3194 Register IdxReg = MRI->createVirtualRegister(TRI.getVGPR64Class());
3195 BuildMI(*MBB, &*MIB, DL, TII.get(AMDGPU::REG_SEQUENCE), IdxReg)
3196 .addReg(VIndex)
3197 .addImm(AMDGPU::sub0)
3198 .addReg(VOffset)
3199 .addImm(AMDGPU::sub1);
3200
3201 MIB.addReg(IdxReg);
3202 } else if (HasVIndex) {
3203 MIB.addReg(VIndex);
3204 } else if (HasVOffset) {
3205 MIB.addReg(VOffset);
3206 }
3207
3208 MIB.add(MI.getOperand(1)); // rsrc
3209 MIB.add(MI.getOperand(5 + OpOffset)); // soffset
3210 MIB.add(MI.getOperand(6 + OpOffset)); // imm offset
3211 unsigned Aux = MI.getOperand(7 + OpOffset).getImm();
3212 MIB.addImm(Aux & AMDGPU::CPol::ALL); // cpol
3213 MIB.addImm((Aux >> 3) & 1); // swz
3214
3215 MachineMemOperand *LoadMMO = *MI.memoperands_begin();
3216 MachinePointerInfo LoadPtrI = LoadMMO->getPointerInfo();
3217 LoadPtrI.Offset = MI.getOperand(6 + OpOffset).getImm();
3218 MachinePointerInfo StorePtrI = LoadPtrI;
3219 StorePtrI.V = nullptr;
3220 StorePtrI.AddrSpace = AMDGPUAS::LOCAL_ADDRESS;
3221
3222 auto F = LoadMMO->getFlags() &
3223 ~(MachineMemOperand::MOStore | MachineMemOperand::MOLoad);
3224 LoadMMO = MF->getMachineMemOperand(LoadPtrI, F | MachineMemOperand::MOLoad,
3225 Size, LoadMMO->getBaseAlign());
3226
3227 MachineMemOperand *StoreMMO =
3228 MF->getMachineMemOperand(StorePtrI, F | MachineMemOperand::MOStore,
3229 sizeof(int32_t), LoadMMO->getBaseAlign());
3230
3231 MIB.setMemRefs({LoadMMO, StoreMMO});
3232
3233 MI.eraseFromParent();
3234 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
3235}
3236
3237/// Match a zero extend from a 32-bit value to 64-bits.
3238static Register matchZeroExtendFromS32(MachineRegisterInfo &MRI, Register Reg) {
3239 Register ZExtSrc;
3240 if (mi_match(Reg, MRI, m_GZExt(m_Reg(ZExtSrc))))
3241 return MRI.getType(ZExtSrc) == LLT::scalar(32) ? ZExtSrc : Register();
3242
3243 // Match legalized form %zext = G_MERGE_VALUES (s32 %x), (s32 0)
3244 const MachineInstr *Def = getDefIgnoringCopies(Reg, MRI);
3245 if (Def->getOpcode() != AMDGPU::G_MERGE_VALUES)
3246 return Register();
3247
3248 assert(Def->getNumOperands() == 3 &&(static_cast <bool> (Def->getNumOperands() == 3 &&
MRI.getType(Def->getOperand(0).getReg()) == LLT::scalar(64
)) ? void (0) : __assert_fail ("Def->getNumOperands() == 3 && MRI.getType(Def->getOperand(0).getReg()) == LLT::scalar(64)"
, "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp", 3249
, __extension__ __PRETTY_FUNCTION__))
3249 MRI.getType(Def->getOperand(0).getReg()) == LLT::scalar(64))(static_cast <bool> (Def->getNumOperands() == 3 &&
MRI.getType(Def->getOperand(0).getReg()) == LLT::scalar(64
)) ? void (0) : __assert_fail ("Def->getNumOperands() == 3 && MRI.getType(Def->getOperand(0).getReg()) == LLT::scalar(64)"
, "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp", 3249
, __extension__ __PRETTY_FUNCTION__))
;
3250 if (mi_match(Def->getOperand(2).getReg(), MRI, m_ZeroInt())) {
3251 return Def->getOperand(1).getReg();
3252 }
3253
3254 return Register();
3255}
3256
3257bool AMDGPUInstructionSelector::selectGlobalLoadLds(MachineInstr &MI) const{
3258 unsigned Opc;
3259 unsigned Size = MI.getOperand(3).getImm();
3260
3261 switch (Size) {
3262 default:
3263 return false;
3264 case 1:
3265 Opc = AMDGPU::GLOBAL_LOAD_LDS_UBYTE;
3266 break;
3267 case 2:
3268 Opc = AMDGPU::GLOBAL_LOAD_LDS_USHORT;
3269 break;
3270 case 4:
3271 Opc = AMDGPU::GLOBAL_LOAD_LDS_DWORD;
3272 break;
3273 }
3274
3275 MachineBasicBlock *MBB = MI.getParent();
3276 const DebugLoc &DL = MI.getDebugLoc();
3277 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
3278 .add(MI.getOperand(2));
3279
3280 Register Addr = MI.getOperand(1).getReg();
3281 Register VOffset;
3282 // Try to split SAddr and VOffset. Global and LDS pointers share the same
3283 // immediate offset, so we cannot use a regular SelectGlobalSAddr().
3284 if (!isSGPR(Addr)) {
3285 auto AddrDef = getDefSrcRegIgnoringCopies(Addr, *MRI);
3286 if (isSGPR(AddrDef->Reg)) {
3287 Addr = AddrDef->Reg;
3288 } else if (AddrDef->MI->getOpcode() == AMDGPU::G_PTR_ADD) {
3289 Register SAddr =
3290 getSrcRegIgnoringCopies(AddrDef->MI->getOperand(1).getReg(), *MRI);
3291 if (SAddr && isSGPR(SAddr)) {
3292 Register PtrBaseOffset = AddrDef->MI->getOperand(2).getReg();
3293 if (Register Off = matchZeroExtendFromS32(*MRI, PtrBaseOffset)) {
3294 Addr = SAddr;
3295 VOffset = Off;
3296 }
3297 }
3298 }
3299 }
3300
3301 if (isSGPR(Addr)) {
3302 Opc = AMDGPU::getGlobalSaddrOp(Opc);
3303 if (!VOffset) {
3304 VOffset = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3305 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_MOV_B32_e32), VOffset)
3306 .addImm(0);
3307 }
3308 }
3309
3310 auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc))
3311 .addReg(Addr);
3312
3313 if (isSGPR(Addr))
3314 MIB.addReg(VOffset);
3315
3316 MIB.add(MI.getOperand(4)) // offset
3317 .add(MI.getOperand(5)); // cpol
3318
3319 MachineMemOperand *LoadMMO = *MI.memoperands_begin();
3320 MachinePointerInfo LoadPtrI = LoadMMO->getPointerInfo();
3321 LoadPtrI.Offset = MI.getOperand(4).getImm();
3322 MachinePointerInfo StorePtrI = LoadPtrI;
3323 LoadPtrI.AddrSpace = AMDGPUAS::GLOBAL_ADDRESS;
3324 StorePtrI.AddrSpace = AMDGPUAS::LOCAL_ADDRESS;
3325 auto F = LoadMMO->getFlags() &
3326 ~(MachineMemOperand::MOStore | MachineMemOperand::MOLoad);
3327 LoadMMO = MF->getMachineMemOperand(LoadPtrI, F | MachineMemOperand::MOLoad,
3328 Size, LoadMMO->getBaseAlign());
3329 MachineMemOperand *StoreMMO =
3330 MF->getMachineMemOperand(StorePtrI, F | MachineMemOperand::MOStore,
3331 sizeof(int32_t), Align(4));
3332
3333 MIB.setMemRefs({LoadMMO, StoreMMO});
3334
3335 MI.eraseFromParent();
3336 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
3337}
3338
3339bool AMDGPUInstructionSelector::selectBVHIntrinsic(MachineInstr &MI) const{
3340 MI.setDesc(TII.get(MI.getOperand(1).getImm()));
3341 MI.removeOperand(1);
3342 MI.addImplicitDefUseOperands(*MI.getParent()->getParent());
3343 return true;
3344}
3345
3346bool AMDGPUInstructionSelector::selectSMFMACIntrin(MachineInstr &MI) const {
3347 unsigned Opc;
3348 switch (MI.getIntrinsicID()) {
3349 case Intrinsic::amdgcn_smfmac_f32_16x16x32_f16:
3350 Opc = AMDGPU::V_SMFMAC_F32_16X16X32_F16_e64;
3351 break;
3352 case Intrinsic::amdgcn_smfmac_f32_32x32x16_f16:
3353 Opc = AMDGPU::V_SMFMAC_F32_32X32X16_F16_e64;
3354 break;
3355 case Intrinsic::amdgcn_smfmac_f32_16x16x32_bf16:
3356 Opc = AMDGPU::V_SMFMAC_F32_16X16X32_BF16_e64;
3357 break;
3358 case Intrinsic::amdgcn_smfmac_f32_32x32x16_bf16:
3359 Opc = AMDGPU::V_SMFMAC_F32_32X32X16_BF16_e64;
3360 break;
3361 case Intrinsic::amdgcn_smfmac_i32_16x16x64_i8:
3362 Opc = AMDGPU::V_SMFMAC_I32_16X16X64_I8_e64;
3363 break;
3364 case Intrinsic::amdgcn_smfmac_i32_32x32x32_i8:
3365 Opc = AMDGPU::V_SMFMAC_I32_32X32X32_I8_e64;
3366 break;
3367 case Intrinsic::amdgcn_smfmac_f32_16x16x64_bf8_bf8:
3368 Opc = AMDGPU::V_SMFMAC_F32_16X16X64_BF8_BF8_e64;
3369 break;
3370 case Intrinsic::amdgcn_smfmac_f32_16x16x64_bf8_fp8:
3371 Opc = AMDGPU::V_SMFMAC_F32_16X16X64_BF8_FP8_e64;
3372 break;
3373 case Intrinsic::amdgcn_smfmac_f32_16x16x64_fp8_bf8:
3374 Opc = AMDGPU::V_SMFMAC_F32_16X16X64_FP8_BF8_e64;
3375 break;
3376 case Intrinsic::amdgcn_smfmac_f32_16x16x64_fp8_fp8:
3377 Opc = AMDGPU::V_SMFMAC_F32_16X16X64_FP8_FP8_e64;
3378 break;
3379 case Intrinsic::amdgcn_smfmac_f32_32x32x32_bf8_bf8:
3380 Opc = AMDGPU::V_SMFMAC_F32_32X32X32_BF8_BF8_e64;
3381 break;
3382 case Intrinsic::amdgcn_smfmac_f32_32x32x32_bf8_fp8:
3383 Opc = AMDGPU::V_SMFMAC_F32_32X32X32_BF8_FP8_e64;
3384 break;
3385 case Intrinsic::amdgcn_smfmac_f32_32x32x32_fp8_bf8:
3386 Opc = AMDGPU::V_SMFMAC_F32_32X32X32_FP8_BF8_e64;
3387 break;
3388 case Intrinsic::amdgcn_smfmac_f32_32x32x32_fp8_fp8:
3389 Opc = AMDGPU::V_SMFMAC_F32_32X32X32_FP8_FP8_e64;
3390 break;
3391 default:
3392 llvm_unreachable("unhandled smfmac intrinsic")::llvm::llvm_unreachable_internal("unhandled smfmac intrinsic"
, "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp", 3392
)
;
3393 }
3394
3395 auto VDst_In = MI.getOperand(4);
3396
3397 MI.setDesc(TII.get(Opc));
3398 MI.removeOperand(4); // VDst_In
3399 MI.removeOperand(1); // Intrinsic ID
3400 MI.addOperand(VDst_In); // Readd VDst_In to the end
3401 MI.addImplicitDefUseOperands(*MI.getParent()->getParent());
3402 return true;
3403}
3404
3405bool AMDGPUInstructionSelector::selectWaveAddress(MachineInstr &MI) const {
3406 Register DstReg = MI.getOperand(0).getReg();
3407 Register SrcReg = MI.getOperand(1).getReg();
3408 const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
3409 const bool IsVALU = DstRB->getID() == AMDGPU::VGPRRegBankID;
3410 MachineBasicBlock *MBB = MI.getParent();
3411 const DebugLoc &DL = MI.getDebugLoc();
3412
3413 if (IsVALU) {
3414 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_LSHRREV_B32_e64), DstReg)
3415 .addImm(Subtarget->getWavefrontSizeLog2())
3416 .addReg(SrcReg);
3417 } else {
3418 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_LSHR_B32), DstReg)
3419 .addReg(SrcReg)
3420 .addImm(Subtarget->getWavefrontSizeLog2());
3421 }
3422
3423 const TargetRegisterClass &RC =
3424 IsVALU ? AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass;
3425 if (!RBI.constrainGenericRegister(DstReg, RC, *MRI))
3426 return false;
3427
3428 MI.eraseFromParent();
3429 return true;
3430}
3431
3432bool AMDGPUInstructionSelector::select(MachineInstr &I) {
3433 if (I.isPHI())
3434 return selectPHI(I);
3435
3436 if (!I.isPreISelOpcode()) {
3437 if (I.isCopy())
3438 return selectCOPY(I);
3439 return true;
3440 }
3441
3442 switch (I.getOpcode()) {
3443 case TargetOpcode::G_AND:
3444 case TargetOpcode::G_OR:
3445 case TargetOpcode::G_XOR:
3446 if (selectImpl(I, *CoverageInfo))
3447 return true;
3448 return selectG_AND_OR_XOR(I);
3449 case TargetOpcode::G_ADD:
3450 case TargetOpcode::G_SUB:
3451 if (selectImpl(I, *CoverageInfo))
3452 return true;
3453 return selectG_ADD_SUB(I);
3454 case TargetOpcode::G_UADDO:
3455 case TargetOpcode::G_USUBO:
3456 case TargetOpcode::G_UADDE:
3457 case TargetOpcode::G_USUBE:
3458 return selectG_UADDO_USUBO_UADDE_USUBE(I);
3459 case AMDGPU::G_AMDGPU_MAD_U64_U32:
3460 case AMDGPU::G_AMDGPU_MAD_I64_I32:
3461 return selectG_AMDGPU_MAD_64_32(I);
3462 case TargetOpcode::G_INTTOPTR:
3463 case TargetOpcode::G_BITCAST:
3464 case TargetOpcode::G_PTRTOINT:
3465 return selectCOPY(I);
3466 case TargetOpcode::G_CONSTANT:
3467 case TargetOpcode::G_FCONSTANT:
3468 return selectG_CONSTANT(I);
3469 case TargetOpcode::G_FNEG:
3470 if (selectImpl(I, *CoverageInfo))
3471 return true;
3472 return selectG_FNEG(I);
3473 case TargetOpcode::G_FABS:
3474 if (selectImpl(I, *CoverageInfo))
3475 return true;
3476 return selectG_FABS(I);
3477 case TargetOpcode::G_EXTRACT:
3478 return selectG_EXTRACT(I);
3479 case TargetOpcode::G_MERGE_VALUES:
3480 case TargetOpcode::G_BUILD_VECTOR:
3481 case TargetOpcode::G_CONCAT_VECTORS:
3482 return selectG_MERGE_VALUES(I);
3483 case TargetOpcode::G_UNMERGE_VALUES:
3484 return selectG_UNMERGE_VALUES(I);
3485 case TargetOpcode::G_BUILD_VECTOR_TRUNC:
3486 return selectG_BUILD_VECTOR_TRUNC(I);
3487 case TargetOpcode::G_PTR_ADD:
3488 if (selectImpl(I, *CoverageInfo))
3489 return true;
3490 return selectG_PTR_ADD(I);
3491 case TargetOpcode::G_IMPLICIT_DEF:
3492 return selectG_IMPLICIT_DEF(I);
3493 case TargetOpcode::G_FREEZE:
3494 return selectCOPY(I);
3495 case TargetOpcode::G_INSERT:
3496 return selectG_INSERT(I);
3497 case TargetOpcode::G_INTRINSIC:
3498 return selectG_INTRINSIC(I);
3499 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
3500 return selectG_INTRINSIC_W_SIDE_EFFECTS(I);
3501 case TargetOpcode::G_ICMP:
3502 if (selectG_ICMP(I))
3503 return true;
3504 return selectImpl(I, *CoverageInfo);
3505 case TargetOpcode::G_LOAD:
3506 case TargetOpcode::G_STORE:
3507 case TargetOpcode::G_ATOMIC_CMPXCHG:
3508 case TargetOpcode::G_ATOMICRMW_XCHG:
3509 case TargetOpcode::G_ATOMICRMW_ADD:
3510 case TargetOpcode::G_ATOMICRMW_SUB:
3511 case TargetOpcode::G_ATOMICRMW_AND:
3512 case TargetOpcode::G_ATOMICRMW_OR:
3513 case TargetOpcode::G_ATOMICRMW_XOR:
3514 case TargetOpcode::G_ATOMICRMW_MIN:
3515 case TargetOpcode::G_ATOMICRMW_MAX:
3516 case TargetOpcode::G_ATOMICRMW_UMIN:
3517 case TargetOpcode::G_ATOMICRMW_UMAX:
3518 case TargetOpcode::G_ATOMICRMW_FADD:
3519 case AMDGPU::G_AMDGPU_ATOMIC_INC:
3520 case AMDGPU::G_AMDGPU_ATOMIC_DEC:
3521 case AMDGPU::G_AMDGPU_ATOMIC_FMIN:
3522 case AMDGPU::G_AMDGPU_ATOMIC_FMAX:
3523 return selectG_LOAD_STORE_ATOMICRMW(I);
3524 case TargetOpcode::G_SELECT:
3525 return selectG_SELECT(I);
3526 case TargetOpcode::G_TRUNC:
3527 return selectG_TRUNC(I);
3528 case TargetOpcode::G_SEXT:
3529 case TargetOpcode::G_ZEXT:
3530 case TargetOpcode::G_ANYEXT:
3531 case TargetOpcode::G_SEXT_INREG:
3532 if (selectImpl(I, *CoverageInfo))
3533 return true;
3534 return selectG_SZA_EXT(I);
3535 case TargetOpcode::G_BRCOND:
3536 return selectG_BRCOND(I);
3537 case TargetOpcode::G_GLOBAL_VALUE:
3538 return selectG_GLOBAL_VALUE(I);
3539 case TargetOpcode::G_PTRMASK:
3540 return selectG_PTRMASK(I);
3541 case TargetOpcode::G_EXTRACT_VECTOR_ELT:
3542 return selectG_EXTRACT_VECTOR_ELT(I);
3543 case TargetOpcode::G_INSERT_VECTOR_ELT:
3544 return selectG_INSERT_VECTOR_ELT(I);
3545 case TargetOpcode::G_SHUFFLE_VECTOR:
3546 return selectG_SHUFFLE_VECTOR(I);
3547 case AMDGPU::G_AMDGPU_INTRIN_IMAGE_LOAD:
3548 case AMDGPU::G_AMDGPU_INTRIN_IMAGE_LOAD_D16:
3549 case AMDGPU::G_AMDGPU_INTRIN_IMAGE_STORE:
3550 case AMDGPU::G_AMDGPU_INTRIN_IMAGE_STORE_D16: {
3551 const AMDGPU::ImageDimIntrinsicInfo *Intr
3552 = AMDGPU::getImageDimIntrinsicInfo(I.getIntrinsicID());
3553 assert(Intr && "not an image intrinsic with image pseudo")(static_cast <bool> (Intr && "not an image intrinsic with image pseudo"
) ? void (0) : __assert_fail ("Intr && \"not an image intrinsic with image pseudo\""
, "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp", 3553
, __extension__ __PRETTY_FUNCTION__))
;
3554 return selectImageIntrinsic(I, Intr);
3555 }
3556 case AMDGPU::G_AMDGPU_INTRIN_BVH_INTERSECT_RAY:
3557 return selectBVHIntrinsic(I);
3558 case AMDGPU::G_AMDGPU_BUFFER_ATOMIC_FADD:
3559 return selectAMDGPU_BUFFER_ATOMIC_FADD(I);
3560 case AMDGPU::G_SBFX:
3561 case AMDGPU::G_UBFX:
3562 return selectG_SBFX_UBFX(I);
3563 case AMDGPU::G_SI_CALL:
3564 I.setDesc(TII.get(AMDGPU::SI_CALL));
3565 return true;
3566 case AMDGPU::G_AMDGPU_WAVE_ADDRESS:
3567 return selectWaveAddress(I);
3568 default:
3569 return selectImpl(I, *CoverageInfo);
3570 }
3571 return false;
3572}
3573
3574InstructionSelector::ComplexRendererFns
3575AMDGPUInstructionSelector::selectVCSRC(MachineOperand &Root) const {
3576 return {{
3577 [=](MachineInstrBuilder &MIB) { MIB.add(Root); }
3578 }};
3579
3580}
3581
3582std::pair<Register, unsigned> AMDGPUInstructionSelector::selectVOP3ModsImpl(
3583 MachineOperand &Root, bool AllowAbs, bool OpSel, bool ForceVGPR) const {
3584 Register Src = Root.getReg();
3585 Register OrigSrc = Src;
3586 unsigned Mods = 0;
3587 MachineInstr *MI = getDefIgnoringCopies(Src, *MRI);
3588
3589 if (MI && MI->getOpcode() == AMDGPU::G_FNEG) {
3590 Src = MI->getOperand(1).getReg();
3591 Mods |= SISrcMods::NEG;
3592 MI = getDefIgnoringCopies(Src, *MRI);
3593 }
3594
3595 if (AllowAbs && MI && MI->getOpcode() == AMDGPU::G_FABS) {
3596 Src = MI->getOperand(1).getReg();
3597 Mods |= SISrcMods::ABS;
3598 }
3599
3600 if (OpSel)
3601 Mods |= SISrcMods::OP_SEL_0;
3602
3603 if ((Mods != 0 || ForceVGPR) &&
3604 RBI.getRegBank(Src, *MRI, TRI)->getID() != AMDGPU::VGPRRegBankID) {
3605 MachineInstr *UseMI = Root.getParent();
3606
3607 // If we looked through copies to find source modifiers on an SGPR operand,
3608 // we now have an SGPR register source. To avoid potentially violating the
3609 // constant bus restriction, we need to insert a copy to a VGPR.
3610 Register VGPRSrc = MRI->cloneVirtualRegister(OrigSrc);
3611 BuildMI(*UseMI->getParent(), UseMI, UseMI->getDebugLoc(),
3612 TII.get(AMDGPU::COPY), VGPRSrc)
3613 .addReg(Src);
3614 Src = VGPRSrc;
3615 }
3616
3617 return std::make_pair(Src, Mods);
3618}
3619
3620///
3621/// This will select either an SGPR or VGPR operand and will save us from
3622/// having to write an extra tablegen pattern.
3623InstructionSelector::ComplexRendererFns
3624AMDGPUInstructionSelector::selectVSRC0(MachineOperand &Root) const {
3625 return {{
3626 [=](MachineInstrBuilder &MIB) { MIB.add(Root); }
3627 }};
3628}
3629
3630InstructionSelector::ComplexRendererFns
3631AMDGPUInstructionSelector::selectVOP3Mods0(MachineOperand &Root) const {
3632 Register Src;
3633 unsigned Mods;
3634 std::tie(Src, Mods) = selectVOP3ModsImpl(Root);
3635
3636 return {{
3637 [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3638 [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }, // src0_mods
3639 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, // clamp
3640 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // omod
3641 }};
3642}
3643
3644InstructionSelector::ComplexRendererFns
3645AMDGPUInstructionSelector::selectVOP3BMods0(MachineOperand &Root) const {
3646 Register Src;
3647 unsigned Mods;
3648 std::tie(Src, Mods) = selectVOP3ModsImpl(Root, /* AllowAbs */ false);
3649
3650 return {{
3651 [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3652 [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }, // src0_mods
3653 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, // clamp
3654 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // omod
3655 }};
3656}
3657
3658InstructionSelector::ComplexRendererFns
3659AMDGPUInstructionSelector::selectVOP3OMods(MachineOperand &Root) const {
3660 return {{
3661 [=](MachineInstrBuilder &MIB) { MIB.add(Root); },
3662 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, // clamp
3663 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // omod
3664 }};
3665}
3666
3667InstructionSelector::ComplexRendererFns
3668AMDGPUInstructionSelector::selectVOP3Mods(MachineOperand &Root) const {
3669 Register Src;
3670 unsigned Mods;
3671 std::tie(Src, Mods) = selectVOP3ModsImpl(Root);
3672
3673 return {{
3674 [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3675 [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); } // src_mods
3676 }};
3677}
3678
3679InstructionSelector::ComplexRendererFns
3680AMDGPUInstructionSelector::selectVOP3BMods(MachineOperand &Root) const {
3681 Register Src;
3682 unsigned Mods;
3683 std::tie(Src, Mods) = selectVOP3ModsImpl(Root, /* AllowAbs */ false);
3684
3685 return {{
3686 [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3687 [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); } // src_mods
3688 }};
3689}
3690
3691InstructionSelector::ComplexRendererFns
3692AMDGPUInstructionSelector::selectVOP3NoMods(MachineOperand &Root) const {
3693 Register Reg = Root.getReg();
3694 const MachineInstr *Def = getDefIgnoringCopies(Reg, *MRI);
3695 if (Def && (Def->getOpcode() == AMDGPU::G_FNEG ||
3696 Def->getOpcode() == AMDGPU::G_FABS))
3697 return {};
3698 return {{
3699 [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); },
3700 }};
3701}
3702
3703std::pair<Register, unsigned>
3704AMDGPUInstructionSelector::selectVOP3PModsImpl(
3705 Register Src, const MachineRegisterInfo &MRI, bool IsDOT) const {
3706 unsigned Mods = 0;
3707 MachineInstr *MI = MRI.getVRegDef(Src);
3708
3709 if (MI && MI->getOpcode() == AMDGPU::G_FNEG &&
3710 // It's possible to see an f32 fneg here, but unlikely.
3711 // TODO: Treat f32 fneg as only high bit.
3712 MRI.getType(Src) == LLT::fixed_vector(2, 16)) {
3713 Mods ^= (SISrcMods::NEG | SISrcMods::NEG_HI);
3714 Src = MI->getOperand(1).getReg();
3715 MI = MRI.getVRegDef(Src);
Value stored to 'MI' is never read
3716 }
3717
3718 // TODO: Match op_sel through g_build_vector_trunc and g_shuffle_vector.
3719 (void)IsDOT; // DOTs do not use OPSEL on gfx940+, check ST.hasDOTOpSelHazard()
3720
3721 // Packed instructions do not have abs modifiers.
3722 Mods |= SISrcMods::OP_SEL_1;
3723
3724 return std::make_pair(Src, Mods);
3725}
3726
3727InstructionSelector::ComplexRendererFns
3728AMDGPUInstructionSelector::selectVOP3PMods(MachineOperand &Root) const {
3729 MachineRegisterInfo &MRI
3730 = Root.getParent()->getParent()->getParent()->getRegInfo();
3731
3732 Register Src;
3733 unsigned Mods;
3734 std::tie(Src, Mods) = selectVOP3PModsImpl(Root.getReg(), MRI);
3735
3736 return {{
3737 [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3738 [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); } // src_mods
3739 }};
3740}
3741
3742InstructionSelector::ComplexRendererFns
3743AMDGPUInstructionSelector::selectVOP3PModsDOT(MachineOperand &Root) const {
3744 MachineRegisterInfo &MRI
3745 = Root.getParent()->getParent()->getParent()->getRegInfo();
3746
3747 Register Src;
3748 unsigned Mods;
3749 std::tie(Src, Mods) = selectVOP3PModsImpl(Root.getReg(), MRI, true);
3750
3751 return {{
3752 [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3753 [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); } // src_mods
3754 }};
3755}
3756
3757InstructionSelector::ComplexRendererFns
3758AMDGPUInstructionSelector::selectDotIUVOP3PMods(MachineOperand &Root) const {
3759 // Literal i1 value set in intrinsic, represents SrcMods for the next operand.
3760 // Value is in Imm operand as i1 sign extended to int64_t.
3761 // 1(-1) promotes packed values to signed, 0 treats them as unsigned.
3762 assert((Root.isImm() && (Root.getImm() == -1 || Root.getImm() == 0)) &&(static_cast <bool> ((Root.isImm() && (Root.getImm
() == -1 || Root.getImm() == 0)) && "expected i1 value"
) ? void (0) : __assert_fail ("(Root.isImm() && (Root.getImm() == -1 || Root.getImm() == 0)) && \"expected i1 value\""
, "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp", 3763
, __extension__ __PRETTY_FUNCTION__))
3763 "expected i1 value")(static_cast <bool> ((Root.isImm() && (Root.getImm
() == -1 || Root.getImm() == 0)) && "expected i1 value"
) ? void (0) : __assert_fail ("(Root.isImm() && (Root.getImm() == -1 || Root.getImm() == 0)) && \"expected i1 value\""
, "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp", 3763
, __extension__ __PRETTY_FUNCTION__))
;
3764 unsigned Mods = SISrcMods::OP_SEL_1;
3765 if (Root.getImm() == -1)
3766 Mods ^= SISrcMods::NEG;
3767 return {{
3768 [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); } // src_mods
3769 }};
3770}
3771
3772InstructionSelector::ComplexRendererFns
3773AMDGPUInstructionSelector::selectWMMAOpSelVOP3PMods(
3774 MachineOperand &Root) const {
3775 assert((Root.isImm() && (Root.getImm() == -1 || Root.getImm() == 0)) &&(static_cast <bool> ((Root.isImm() && (Root.getImm
() == -1 || Root.getImm() == 0)) && "expected i1 value"
) ? void (0) : __assert_fail ("(Root.isImm() && (Root.getImm() == -1 || Root.getImm() == 0)) && \"expected i1 value\""
, "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp", 3776
, __extension__ __PRETTY_FUNCTION__))
3776 "expected i1 value")(static_cast <bool> ((Root.isImm() && (Root.getImm
() == -1 || Root.getImm() == 0)) && "expected i1 value"
) ? void (0) : __assert_fail ("(Root.isImm() && (Root.getImm() == -1 || Root.getImm() == 0)) && \"expected i1 value\""
, "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp", 3776
, __extension__ __PRETTY_FUNCTION__))
;
3777 unsigned Mods = SISrcMods::OP_SEL_1;
3778 if (Root.getImm() != 0)
3779 Mods |= SISrcMods::OP_SEL_0;
3780
3781 return {{
3782 [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); } // src_mods
3783 }};
3784}
3785
3786InstructionSelector::ComplexRendererFns
3787AMDGPUInstructionSelector::selectVOP3Mods_nnan(MachineOperand &Root) const {
3788 Register Src;
3789 unsigned Mods;
3790 std::tie(Src, Mods) = selectVOP3ModsImpl(Root);
3791 if (!isKnownNeverNaN(Src, *MRI))
3792 return None;
3793
3794 return {{
3795 [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3796 [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); } // src_mods
3797 }};
3798}
3799
3800InstructionSelector::ComplexRendererFns
3801AMDGPUInstructionSelector::selectVOP3OpSelMods(MachineOperand &Root) const {
3802 // FIXME: Handle op_sel
3803 return {{
3804 [=](MachineInstrBuilder &MIB) { MIB.addReg(Root.getReg()); },
3805 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // src_mods
3806 }};
3807}
3808
3809InstructionSelector::ComplexRendererFns
3810AMDGPUInstructionSelector::selectVINTERPMods(MachineOperand &Root) const {
3811 Register Src;
3812 unsigned Mods;
3813 std::tie(Src, Mods) = selectVOP3ModsImpl(Root,
3814 /* AllowAbs */ false,
3815 /* OpSel */ false,
3816 /* ForceVGPR */ true);
3817
3818 return {{
3819 [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3820 [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }, // src0_mods
3821 }};
3822}
3823
3824InstructionSelector::ComplexRendererFns
3825AMDGPUInstructionSelector::selectVINTERPModsHi(MachineOperand &Root) const {
3826 Register Src;
3827 unsigned Mods;
3828 std::tie(Src, Mods) = selectVOP3ModsImpl(Root,
3829 /* AllowAbs */ false,
3830 /* OpSel */ true,
3831 /* ForceVGPR */ true);
3832
3833 return {{
3834 [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3835 [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }, // src0_mods
3836 }};
3837}
3838
3839bool AMDGPUInstructionSelector::selectSmrdOffset(MachineOperand &Root,
3840 Register &Base,
3841 Register *SOffset,
3842 int64_t *Offset) const {
3843 MachineInstr *MI = Root.getParent();
3844 MachineBasicBlock *MBB = MI->getParent();
3845
3846 // FIXME: We should shrink the GEP if the offset is known to be <= 32-bits,
3847 // then we can select all ptr + 32-bit offsets.
3848 SmallVector<GEPInfo, 4> AddrInfo;
3849 getAddrModeInfo(*MI, *MRI, AddrInfo);
3850
3851 if (AddrInfo.empty())
3852 return false;
3853
3854 const GEPInfo &GEPI = AddrInfo[0];
3855 Optional<int64_t> EncodedImm =
3856 AMDGPU::getSMRDEncodedOffset(STI, GEPI.Imm, false);
3857
3858 if (SOffset && Offset) {
3859 if (GEPI.SgprParts.size() == 1 && GEPI.Imm != 0 && EncodedImm &&
3860 AddrInfo.size() > 1) {
3861 const GEPInfo &GEPI2 = AddrInfo[1];
3862 if (GEPI2.SgprParts.size() == 2 && GEPI2.Imm == 0) {
3863 if (Register OffsetReg =
3864 matchZeroExtendFromS32(*MRI, GEPI2.SgprParts[1])) {
3865 Base = GEPI2.SgprParts[0];
3866 *SOffset = OffsetReg;
3867 *Offset = *EncodedImm;
3868 return true;
3869 }
3870 }
3871 }
3872 return false;
3873 }
3874
3875 if (Offset && GEPI.SgprParts.size() == 1 && EncodedImm) {
3876 Base = GEPI.SgprParts[0];
3877 *Offset = *EncodedImm;
3878 return true;
3879 }
3880
3881 // SGPR offset is unsigned.
3882 if (SOffset && GEPI.SgprParts.size() == 1 && isUInt<32>(GEPI.Imm) &&
3883 GEPI.Imm != 0) {
3884 // If we make it this far we have a load with an 32-bit immediate offset.
3885 // It is OK to select this using a sgpr offset, because we have already
3886 // failed trying to select this load into one of the _IMM variants since
3887 // the _IMM Patterns are considered before the _SGPR patterns.
3888 Base = GEPI.SgprParts[0];
3889 *SOffset = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
3890 BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), *SOffset)
3891 .addImm(GEPI.Imm);
3892 return true;
3893 }
3894
3895 if (SOffset && GEPI.SgprParts.size() && GEPI.Imm == 0) {
3896 if (Register OffsetReg = matchZeroExtendFromS32(*MRI, GEPI.SgprParts[1])) {
3897 Base = GEPI.SgprParts[0];
3898 *SOffset = OffsetReg;
3899 return true;
3900 }
3901 }
3902
3903 return false;
3904}
3905
3906InstructionSelector::ComplexRendererFns
3907AMDGPUInstructionSelector::selectSmrdImm(MachineOperand &Root) const {
3908 Register Base;
3909 int64_t Offset;
3910 if (!selectSmrdOffset(Root, Base, /* SOffset= */ nullptr, &Offset))
3911 return None;
3912
3913 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(Base); },
3914 [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }}};
3915}
3916
3917InstructionSelector::ComplexRendererFns
3918AMDGPUInstructionSelector::selectSmrdImm32(MachineOperand &Root) const {
3919 SmallVector<GEPInfo, 4> AddrInfo;
3920 getAddrModeInfo(*Root.getParent(), *MRI, AddrInfo);
3921
3922 if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1)
3923 return None;
3924
3925 const GEPInfo &GEPInfo = AddrInfo[0];
3926 Register PtrReg = GEPInfo.SgprParts[0];
3927 Optional<int64_t> EncodedImm =
3928 AMDGPU::getSMRDEncodedLiteralOffset32(STI, GEPInfo.Imm);
3929 if (!EncodedImm)
3930 return None;
3931
3932 return {{
3933 [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); },
3934 [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); }
3935 }};
3936}
3937
3938InstructionSelector::ComplexRendererFns
3939AMDGPUInstructionSelector::selectSmrdSgpr(MachineOperand &Root) const {
3940 Register Base, SOffset;
3941 if (!selectSmrdOffset(Root, Base, &SOffset, /* Offset= */ nullptr))
3942 return None;
3943
3944 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(Base); },
3945 [=](MachineInstrBuilder &MIB) { MIB.addReg(SOffset); }}};
3946}
3947
3948InstructionSelector::ComplexRendererFns
3949AMDGPUInstructionSelector::selectSmrdSgprImm(MachineOperand &Root) const {
3950 Register Base, SOffset;
3951 int64_t Offset;
3952 if (!selectSmrdOffset(Root, Base, &SOffset, &Offset))
3953 return None;
3954
3955 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(Base); },
3956 [=](MachineInstrBuilder &MIB) { MIB.addReg(SOffset); },
3957 [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }}};
3958}
3959
3960std::pair<Register, int>
3961AMDGPUInstructionSelector::selectFlatOffsetImpl(MachineOperand &Root,
3962 uint64_t FlatVariant) const {
3963 MachineInstr *MI = Root.getParent();
3964
3965 auto Default = std::make_pair(Root.getReg(), 0);
3966
3967 if (!STI.hasFlatInstOffsets())
3968 return Default;
3969
3970 Register PtrBase;
3971 int64_t ConstOffset;
3972 std::tie(PtrBase, ConstOffset) =
3973 getPtrBaseWithConstantOffset(Root.getReg(), *MRI);
3974 if (ConstOffset == 0)
3975 return Default;
3976
3977 unsigned AddrSpace = (*MI->memoperands_begin())->getAddrSpace();
3978 if (!TII.isLegalFLATOffset(ConstOffset, AddrSpace, FlatVariant))
3979 return Default;
3980
3981 return std::make_pair(PtrBase, ConstOffset);
3982}
3983
3984InstructionSelector::ComplexRendererFns
3985AMDGPUInstructionSelector::selectFlatOffset(MachineOperand &Root) const {
3986 auto PtrWithOffset = selectFlatOffsetImpl(Root, SIInstrFlags::FLAT);
3987
3988 return {{
3989 [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrWithOffset.first); },
3990 [=](MachineInstrBuilder &MIB) { MIB.addImm(PtrWithOffset.second); },
3991 }};
3992}
3993
3994InstructionSelector::ComplexRendererFns
3995AMDGPUInstructionSelector::selectGlobalOffset(MachineOperand &Root) const {
3996 auto PtrWithOffset = selectFlatOffsetImpl(Root, SIInstrFlags::FlatGlobal);
3997
3998 return {{
3999 [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrWithOffset.first); },
4000 [=](MachineInstrBuilder &MIB) { MIB.addImm(PtrWithOffset.second); },
4001 }};
4002}
4003
4004InstructionSelector::ComplexRendererFns
4005AMDGPUInstructionSelector::selectScratchOffset(MachineOperand &Root) const {
4006 auto PtrWithOffset = selectFlatOffsetImpl(Root, SIInstrFlags::FlatScratch);
4007
4008 return {{
4009 [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrWithOffset.first); },
4010 [=](MachineInstrBuilder &MIB) { MIB.addImm(PtrWithOffset.second); },
4011 }};
4012}
4013
4014// Match (64-bit SGPR base) + (zext vgpr offset) + sext(imm offset)
4015InstructionSelector::ComplexRendererFns
4016AMDGPUInstructionSelector::selectGlobalSAddr(MachineOperand &Root) const {
4017 Register Addr = Root.getReg();
4018 Register PtrBase;
4019 int64_t ConstOffset;
4020 int64_t ImmOffset = 0;
4021
4022 // Match the immediate offset first, which canonically is moved as low as
4023 // possible.
4024 std::tie(PtrBase, ConstOffset) = getPtrBaseWithConstantOffset(Addr, *MRI);
4025
4026 if (ConstOffset != 0) {
4027 if (TII.isLegalFLATOffset(ConstOffset, AMDGPUAS::GLOBAL_ADDRESS,
4028 SIInstrFlags::FlatGlobal)) {
4029 Addr = PtrBase;
4030 ImmOffset = ConstOffset;
4031 } else {
4032 auto PtrBaseDef = getDefSrcRegIgnoringCopies(PtrBase, *MRI);
4033 if (isSGPR(PtrBaseDef->Reg)) {
4034 if (ConstOffset > 0) {
4035 // Offset is too large.
4036 //
4037 // saddr + large_offset -> saddr +
4038 // (voffset = large_offset & ~MaxOffset) +
4039 // (large_offset & MaxOffset);
4040 int64_t SplitImmOffset, RemainderOffset;
4041 std::tie(SplitImmOffset, RemainderOffset) = TII.splitFlatOffset(
4042 ConstOffset, AMDGPUAS::GLOBAL_ADDRESS, SIInstrFlags::FlatGlobal);
4043
4044 if (isUInt<32>(RemainderOffset)) {
4045 MachineInstr *MI = Root.getParent();
4046 MachineBasicBlock *MBB = MI->getParent();
4047 Register HighBits =
4048 MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
4049
4050 BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::V_MOV_B32_e32),
4051 HighBits)
4052 .addImm(RemainderOffset);
4053
4054 return {{
4055 [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrBase); }, // saddr
4056 [=](MachineInstrBuilder &MIB) {
4057 MIB.addReg(HighBits);
4058 }, // voffset
4059 [=](MachineInstrBuilder &MIB) { MIB.addImm(SplitImmOffset); },
4060 }};
4061 }
4062 }
4063
4064 // We are adding a 64 bit SGPR and a constant. If constant bus limit
4065 // is 1 we would need to perform 1 or 2 extra moves for each half of
4066 // the constant and it is better to do a scalar add and then issue a
4067 // single VALU instruction to materialize zero. Otherwise it is less
4068 // instructions to perform VALU adds with immediates or inline literals.
4069 unsigned NumLiterals =
4070 !TII.isInlineConstant(APInt(32, ConstOffset & 0xffffffff)) +
4071 !TII.isInlineConstant(APInt(32, ConstOffset >> 32));
4072 if (STI.getConstantBusLimit(AMDGPU::V_ADD_U32_e64) > NumLiterals)
4073 return None;
4074 }
4075 }
4076 }
4077
4078 // Match the variable offset.
4079 auto AddrDef = getDefSrcRegIgnoringCopies(Addr, *MRI);
4080 if (AddrDef->MI->getOpcode() == AMDGPU::G_PTR_ADD) {
4081 // Look through the SGPR->VGPR copy.
4082 Register SAddr =
4083 getSrcRegIgnoringCopies(AddrDef->MI->getOperand(1).getReg(), *MRI);
4084
4085 if (SAddr && isSGPR(SAddr)) {
4086 Register PtrBaseOffset = AddrDef->MI->getOperand(2).getReg();
4087
4088 // It's possible voffset is an SGPR here, but the copy to VGPR will be
4089 // inserted later.
4090 if (Register VOffset = matchZeroExtendFromS32(*MRI, PtrBaseOffset)) {
4091 return {{[=](MachineInstrBuilder &MIB) { // saddr
4092 MIB.addReg(SAddr);
4093 },
4094 [=](MachineInstrBuilder &MIB) { // voffset
4095 MIB.addReg(VOffset);
4096 },
4097 [=](MachineInstrBuilder &MIB) { // offset
4098 MIB.addImm(ImmOffset);
4099 }}};
4100 }
4101 }
4102 }
4103
4104 // FIXME: We should probably have folded COPY (G_IMPLICIT_DEF) earlier, and
4105 // drop this.
4106 if (AddrDef->MI->getOpcode() == AMDGPU::G_IMPLICIT_DEF ||
4107 AddrDef->MI->getOpcode() == AMDGPU::G_CONSTANT || !isSGPR(AddrDef->Reg))
4108 return None;
4109
4110 // It's cheaper to materialize a single 32-bit zero for vaddr than the two
4111 // moves required to copy a 64-bit SGPR to VGPR.
4112 MachineInstr *MI = Root.getParent();
4113 MachineBasicBlock *MBB = MI->getParent();
4114 Register VOffset = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
4115
4116 BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::V_MOV_B32_e32), VOffset)
4117 .addImm(0);
4118
4119 return {{
4120 [=](MachineInstrBuilder &MIB) { MIB.addReg(AddrDef->Reg); }, // saddr
4121 [=](MachineInstrBuilder &MIB) { MIB.addReg(VOffset); }, // voffset
4122 [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); } // offset
4123 }};
4124}
4125
4126InstructionSelector::ComplexRendererFns
4127AMDGPUInstructionSelector::selectScratchSAddr(MachineOperand &Root) const {
4128 Register Addr = Root.getReg();
4129 Register PtrBase;
4130 int64_t ConstOffset;
4131 int64_t ImmOffset = 0;
4132
4133 // Match the immediate offset first, which canonically is moved as low as
4134 // possible.
4135 std::tie(PtrBase, ConstOffset) = getPtrBaseWithConstantOffset(Addr, *MRI);
4136
4137 if (ConstOffset != 0 &&
4138 TII.isLegalFLATOffset(ConstOffset, AMDGPUAS::PRIVATE_ADDRESS,
4139 SIInstrFlags::FlatScratch)) {
4140 Addr = PtrBase;
4141 ImmOffset = ConstOffset;
4142 }
4143
4144 auto AddrDef = getDefSrcRegIgnoringCopies(Addr, *MRI);
4145 if (AddrDef->MI->getOpcode() == AMDGPU::G_FRAME_INDEX) {
4146 int FI = AddrDef->MI->getOperand(1).getIndex();
4147 return {{
4148 [=](MachineInstrBuilder &MIB) { MIB.addFrameIndex(FI); }, // saddr
4149 [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); } // offset
4150 }};
4151 }
4152
4153 Register SAddr = AddrDef->Reg;
4154
4155 if (AddrDef->MI->getOpcode() == AMDGPU::G_PTR_ADD) {
4156 Register LHS = AddrDef->MI->getOperand(1).getReg();
4157 Register RHS = AddrDef->MI->getOperand(2).getReg();
4158 auto LHSDef = getDefSrcRegIgnoringCopies(LHS, *MRI);
4159 auto RHSDef = getDefSrcRegIgnoringCopies(RHS, *MRI);
4160
4161 if (LHSDef->MI->getOpcode() == AMDGPU::G_FRAME_INDEX &&
4162 isSGPR(RHSDef->Reg)) {
4163 int FI = LHSDef->MI->getOperand(1).getIndex();
4164 MachineInstr &I = *Root.getParent();
4165 MachineBasicBlock *BB = I.getParent();
4166 const DebugLoc &DL = I.getDebugLoc();
4167 SAddr = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
4168
4169 BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADD_I32), SAddr)
4170 .addFrameIndex(FI)
4171 .addReg(RHSDef->Reg);
4172 }
4173 }
4174
4175 if (!isSGPR(SAddr))
4176 return None;
4177
4178 return {{
4179 [=](MachineInstrBuilder &MIB) { MIB.addReg(SAddr); }, // saddr
4180 [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); } // offset
4181 }};
4182}
4183
4184// Check whether the flat scratch SVS swizzle bug affects this access.
4185bool AMDGPUInstructionSelector::checkFlatScratchSVSSwizzleBug(
4186 Register VAddr, Register SAddr, uint64_t ImmOffset) const {
4187 if (!Subtarget->hasFlatScratchSVSSwizzleBug())
4188 return false;
4189
4190 // The bug affects the swizzling of SVS accesses if there is any carry out
4191 // from the two low order bits (i.e. from bit 1 into bit 2) when adding
4192 // voffset to (soffset + inst_offset).
4193 auto VKnown = KnownBits->getKnownBits(VAddr);
4194 auto SKnown = KnownBits::computeForAddSub(
4195 true, false, KnownBits->getKnownBits(SAddr),
4196 KnownBits::makeConstant(APInt(32, ImmOffset)));
4197 uint64_t VMax = VKnown.getMaxValue().getZExtValue();
4198 uint64_t SMax = SKnown.getMaxValue().getZExtValue();
4199 return (VMax & 3) + (SMax & 3) >= 4;
4200}
4201
4202InstructionSelector::ComplexRendererFns
4203AMDGPUInstructionSelector::selectScratchSVAddr(MachineOperand &Root) const {
4204 Register Addr = Root.getReg();
4205 Register PtrBase;
4206 int64_t ConstOffset;
4207 int64_t ImmOffset = 0;
4208
4209 // Match the immediate offset first, which canonically is moved as low as
4210 // possible.
4211 std::tie(PtrBase, ConstOffset) = getPtrBaseWithConstantOffset(Addr, *MRI);
4212
4213 if (ConstOffset != 0 &&
4214 TII.isLegalFLATOffset(ConstOffset, AMDGPUAS::PRIVATE_ADDRESS, true)) {
4215 Addr = PtrBase;
4216 ImmOffset = ConstOffset;
4217 }
4218
4219 auto AddrDef = getDefSrcRegIgnoringCopies(Addr, *MRI);
4220 if (AddrDef->MI->getOpcode() != AMDGPU::G_PTR_ADD)
4221 return None;
4222
4223 Register RHS = AddrDef->MI->getOperand(2).getReg();
4224 if (RBI.getRegBank(RHS, *MRI, TRI)->getID() != AMDGPU::VGPRRegBankID)
4225 return None;
4226
4227 Register LHS = AddrDef->MI->getOperand(1).getReg();
4228 auto LHSDef = getDefSrcRegIgnoringCopies(LHS, *MRI);
4229
4230 if (checkFlatScratchSVSSwizzleBug(RHS, LHS, ImmOffset))
4231 return None;
4232
4233 if (LHSDef->MI->getOpcode() == AMDGPU::G_FRAME_INDEX) {
4234 int FI = LHSDef->MI->getOperand(1).getIndex();
4235 return {{
4236 [=](MachineInstrBuilder &MIB) { MIB.addReg(RHS); }, // vaddr
4237 [=](MachineInstrBuilder &MIB) { MIB.addFrameIndex(FI); }, // saddr
4238 [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); } // offset
4239 }};
4240 }
4241
4242 if (!isSGPR(LHS))
4243 return None;
4244
4245 return {{
4246 [=](MachineInstrBuilder &MIB) { MIB.addReg(RHS); }, // vaddr
4247 [=](MachineInstrBuilder &MIB) { MIB.addReg(LHS); }, // saddr
4248 [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); } // offset
4249 }};
4250}
4251
4252InstructionSelector::ComplexRendererFns
4253AMDGPUInstructionSelector::selectMUBUFScratchOffen(MachineOperand &Root) const {
4254 MachineInstr *MI = Root.getParent();
4255 MachineBasicBlock *MBB = MI->getParent();
4256 MachineFunction *MF = MBB->getParent();
4257 const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
4258
4259 int64_t Offset = 0;
4260 if (mi_match(Root.getReg(), *MRI, m_ICst(Offset)) &&
4261 Offset != TM.getNullPointerValue(AMDGPUAS::PRIVATE_ADDRESS)) {
4262 Register HighBits = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
4263
4264 // TODO: Should this be inside the render function? The iterator seems to
4265 // move.
4266 BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::V_MOV_B32_e32),
4267 HighBits)
4268 .addImm(Offset & ~4095);
4269
4270 return {{[=](MachineInstrBuilder &MIB) { // rsrc
4271 MIB.addReg(Info->getScratchRSrcReg());
4272 },
4273 [=](MachineInstrBuilder &MIB) { // vaddr
4274 MIB.addReg(HighBits);
4275 },
4276 [=](MachineInstrBuilder &MIB) { // soffset
4277 // Use constant zero for soffset and rely on eliminateFrameIndex
4278 // to choose the appropriate frame register if need be.
4279 MIB.addImm(0);
4280 },
4281 [=](MachineInstrBuilder &MIB) { // offset
4282 MIB.addImm(Offset & 4095);
4283 }}};
4284 }
4285
4286 assert(Offset == 0 || Offset == -1)(static_cast <bool> (Offset == 0 || Offset == -1) ? void
(0) : __assert_fail ("Offset == 0 || Offset == -1", "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp"
, 4286, __extension__ __PRETTY_FUNCTION__))
;
4287
4288 // Try to fold a frame index directly into the MUBUF vaddr field, and any
4289 // offsets.
4290 Optional<int> FI;
4291 Register VAddr = Root.getReg();
4292 if (const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg())) {
4293 Register PtrBase;
4294 int64_t ConstOffset;
4295 std::tie(PtrBase, ConstOffset) = getPtrBaseWithConstantOffset(VAddr, *MRI);
4296 if (ConstOffset != 0) {
4297 if (SIInstrInfo::isLegalMUBUFImmOffset(ConstOffset) &&
4298 (!STI.privateMemoryResourceIsRangeChecked() ||
4299 KnownBits->signBitIsZero(PtrBase))) {
4300 const MachineInstr *PtrBaseDef = MRI->getVRegDef(PtrBase);
4301 if (PtrBaseDef->getOpcode() == AMDGPU::G_FRAME_INDEX)
4302 FI = PtrBaseDef->getOperand(1).getIndex();
4303 else
4304 VAddr = PtrBase;
4305 Offset = ConstOffset;
4306 }
4307 } else if (RootDef->getOpcode() == AMDGPU::G_FRAME_INDEX) {
4308 FI = RootDef->getOperand(1).getIndex();
4309 }
4310 }
4311
4312 return {{[=](MachineInstrBuilder &MIB) { // rsrc
4313 MIB.addReg(Info->getScratchRSrcReg());
4314 },
4315 [=](MachineInstrBuilder &MIB) { // vaddr
4316 if (FI)
4317 MIB.addFrameIndex(FI.value());
4318 else
4319 MIB.addReg(VAddr);
4320 },
4321 [=](MachineInstrBuilder &MIB) { // soffset
4322 // Use constant zero for soffset and rely on eliminateFrameIndex
4323 // to choose the appropriate frame register if need be.
4324 MIB.addImm(0);
4325 },
4326 [=](MachineInstrBuilder &MIB) { // offset
4327 MIB.addImm(Offset);
4328 }}};
4329}
4330
4331bool AMDGPUInstructionSelector::isDSOffsetLegal(Register Base,
4332 int64_t Offset) const {
4333 if (!isUInt<16>(Offset))
4334 return false;
4335
4336 if (STI.hasUsableDSOffset() || STI.unsafeDSOffsetFoldingEnabled())
4337 return true;
4338
4339 // On Southern Islands instruction with a negative base value and an offset
4340 // don't seem to work.
4341 return KnownBits->signBitIsZero(Base);
4342}
4343
4344bool AMDGPUInstructionSelector::isDSOffset2Legal(Register Base, int64_t Offset0,
4345 int64_t Offset1,
4346 unsigned Size) const {
4347 if (Offset0 % Size != 0 || Offset1 % Size != 0)
4348 return false;
4349 if (!isUInt<8>(Offset0 / Size) || !isUInt<8>(Offset1 / Size))
4350 return false;
4351
4352 if (STI.hasUsableDSOffset() || STI.unsafeDSOffsetFoldingEnabled())
4353 return true;
4354
4355 // On Southern Islands instruction with a negative base value and an offset
4356 // don't seem to work.
4357 return KnownBits->signBitIsZero(Base);
4358}
4359
4360bool AMDGPUInstructionSelector::isUnneededShiftMask(const MachineInstr &MI,
4361 unsigned ShAmtBits) const {
4362 assert(MI.getOpcode() == TargetOpcode::G_AND)(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_AND
) ? void (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_AND"
, "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp", 4362
, __extension__ __PRETTY_FUNCTION__))
;
4363
4364 Optional<APInt> RHS = getIConstantVRegVal(MI.getOperand(2).getReg(), *MRI);
4365 if (!RHS)
4366 return false;
4367
4368 if (RHS->countTrailingOnes() >= ShAmtBits)
4369 return true;
4370
4371 const APInt &LHSKnownZeros =
4372 KnownBits->getKnownZeroes(MI.getOperand(1).getReg());
4373 return (LHSKnownZeros | *RHS).countTrailingOnes() >= ShAmtBits;
4374}
4375
4376// Return the wave level SGPR base address if this is a wave address.
4377static Register getWaveAddress(const MachineInstr *Def) {
4378 return Def->getOpcode() == AMDGPU::G_AMDGPU_WAVE_ADDRESS
4379 ? Def->getOperand(1).getReg()
4380 : Register();
4381}
4382
4383InstructionSelector::ComplexRendererFns
4384AMDGPUInstructionSelector::selectMUBUFScratchOffset(
4385 MachineOperand &Root) const {
4386 Register Reg = Root.getReg();
4387 const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
4388
4389 const MachineInstr *Def = MRI->getVRegDef(Reg);
4390 if (Register WaveBase = getWaveAddress(Def)) {
4391 return {{
4392 [=](MachineInstrBuilder &MIB) { // rsrc
4393 MIB.addReg(Info->getScratchRSrcReg());
4394 },
4395 [=](MachineInstrBuilder &MIB) { // soffset
4396 MIB.addReg(WaveBase);
4397 },
4398 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // offset
4399 }};
4400 }
4401
4402 int64_t Offset = 0;
4403
4404 // FIXME: Copy check is a hack
4405 Register BasePtr;
4406 if (mi_match(Reg, *MRI, m_GPtrAdd(m_Reg(BasePtr), m_Copy(m_ICst(Offset))))) {
4407 if (!SIInstrInfo::isLegalMUBUFImmOffset(Offset))
4408 return {};
4409 const MachineInstr *BasePtrDef = MRI->getVRegDef(BasePtr);
4410 Register WaveBase = getWaveAddress(BasePtrDef);
4411 if (!WaveBase)
4412 return {};
4413
4414 return {{
4415 [=](MachineInstrBuilder &MIB) { // rsrc
4416 MIB.addReg(Info->getScratchRSrcReg());
4417 },
4418 [=](MachineInstrBuilder &MIB) { // soffset
4419 MIB.addReg(WaveBase);
4420 },
4421 [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); } // offset
4422 }};
4423 }
4424
4425 if (!mi_match(Root.getReg(), *MRI, m_ICst(Offset)) ||
4426 !SIInstrInfo::isLegalMUBUFImmOffset(Offset))
4427 return {};
4428
4429 return {{
4430 [=](MachineInstrBuilder &MIB) { // rsrc
4431 MIB.addReg(Info->getScratchRSrcReg());
4432 },
4433 [=](MachineInstrBuilder &MIB) { // soffset
4434 MIB.addImm(0);
4435 },
4436 [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); } // offset
4437 }};
4438}
4439
4440std::pair<Register, unsigned>
4441AMDGPUInstructionSelector::selectDS1Addr1OffsetImpl(MachineOperand &Root) const {
4442 const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg());
4443 if (!RootDef)
4444 return std::make_pair(Root.getReg(), 0);
4445
4446 int64_t ConstAddr = 0;
4447
4448 Register PtrBase;
4449 int64_t Offset;
4450 std::tie(PtrBase, Offset) =
4451 getPtrBaseWithConstantOffset(Root.getReg(), *MRI);
4452
4453 if (Offset) {
4454 if (isDSOffsetLegal(PtrBase, Offset)) {
4455 // (add n0, c0)
4456 return std::make_pair(PtrBase, Offset);
4457 }
4458 } else if (RootDef->getOpcode() == AMDGPU::G_SUB) {
4459 // TODO
4460
4461
4462 } else if (mi_match(Root.getReg(), *MRI, m_ICst(ConstAddr))) {
4463 // TODO
4464
4465 }
4466
4467 return std::make_pair(Root.getReg(), 0);
4468}
4469
4470InstructionSelector::ComplexRendererFns
4471AMDGPUInstructionSelector::selectDS1Addr1Offset(MachineOperand &Root) const {
4472 Register Reg;
4473 unsigned Offset;
4474 std::tie(Reg, Offset) = selectDS1Addr1OffsetImpl(Root);
4475 return {{
4476 [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); },
4477 [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }
4478 }};
4479}
4480
4481InstructionSelector::ComplexRendererFns
4482AMDGPUInstructionSelector::selectDS64Bit4ByteAligned(MachineOperand &Root) const {
4483 return selectDSReadWrite2(Root, 4);
4484}
4485
4486InstructionSelector::ComplexRendererFns
4487AMDGPUInstructionSelector::selectDS128Bit8ByteAligned(MachineOperand &Root) const {
4488 return selectDSReadWrite2(Root, 8);
4489}
4490
4491InstructionSelector::ComplexRendererFns
4492AMDGPUInstructionSelector::selectDSReadWrite2(MachineOperand &Root,
4493 unsigned Size) const {
4494 Register Reg;
4495 unsigned Offset;
4496 std::tie(Reg, Offset) = selectDSReadWrite2Impl(Root, Size);
4497 return {{
4498 [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); },
4499 [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); },
4500 [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset+1); }
4501 }};
4502}
4503
4504std::pair<Register, unsigned>
4505AMDGPUInstructionSelector::selectDSReadWrite2Impl(MachineOperand &Root,
4506 unsigned Size) const {
4507 const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg());
4508 if (!RootDef)
4509 return std::make_pair(Root.getReg(), 0);
4510
4511 int64_t ConstAddr = 0;
4512
4513 Register PtrBase;
4514 int64_t Offset;
4515 std::tie(PtrBase, Offset) =
4516 getPtrBaseWithConstantOffset(Root.getReg(), *MRI);
4517
4518 if (Offset) {
4519 int64_t OffsetValue0 = Offset;
4520 int64_t OffsetValue1 = Offset + Size;
4521 if (isDSOffset2Legal(PtrBase, OffsetValue0, OffsetValue1, Size)) {
4522 // (add n0, c0)
4523 return std::make_pair(PtrBase, OffsetValue0 / Size);
4524 }
4525 } else if (RootDef->getOpcode() == AMDGPU::G_SUB) {
4526 // TODO
4527
4528 } else if (mi_match(Root.getReg(), *MRI, m_ICst(ConstAddr))) {
4529 // TODO
4530
4531 }
4532
4533 return std::make_pair(Root.getReg(), 0);
4534}
4535
4536/// If \p Root is a G_PTR_ADD with a G_CONSTANT on the right hand side, return
4537/// the base value with the constant offset. There may be intervening copies
4538/// between \p Root and the identified constant. Returns \p Root, 0 if this does
4539/// not match the pattern.
4540std::pair<Register, int64_t>
4541AMDGPUInstructionSelector::getPtrBaseWithConstantOffset(
4542 Register Root, const MachineRegisterInfo &MRI) const {
4543 MachineInstr *RootI = getDefIgnoringCopies(Root, MRI);
4544 if (RootI->getOpcode() != TargetOpcode::G_PTR_ADD)
4545 return {Root, 0};
4546
4547 MachineOperand &RHS = RootI->getOperand(2);
4548 Optional<ValueAndVReg> MaybeOffset =
4549 getIConstantVRegValWithLookThrough(RHS.getReg(), MRI);
4550 if (!MaybeOffset)
4551 return {Root, 0};
4552 return {RootI->getOperand(1).getReg(), MaybeOffset->Value.getSExtValue()};
4553}
4554
4555static void addZeroImm(MachineInstrBuilder &MIB) {
4556 MIB.addImm(0);
4557}
4558
4559/// Return a resource descriptor for use with an arbitrary 64-bit pointer. If \p
4560/// BasePtr is not valid, a null base pointer will be used.
4561static Register buildRSRC(MachineIRBuilder &B, MachineRegisterInfo &MRI,
4562 uint32_t FormatLo, uint32_t FormatHi,
4563 Register BasePtr) {
4564 Register RSrc2 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
4565 Register RSrc3 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
4566 Register RSrcHi = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
4567 Register RSrc = MRI.createVirtualRegister(&AMDGPU::SGPR_128RegClass);
4568
4569 B.buildInstr(AMDGPU::S_MOV_B32)
4570 .addDef(RSrc2)
4571 .addImm(FormatLo);
4572 B.buildInstr(AMDGPU::S_MOV_B32)
4573 .addDef(RSrc3)
4574 .addImm(FormatHi);
4575
4576 // Build the half of the subregister with the constants before building the
4577 // full 128-bit register. If we are building multiple resource descriptors,
4578 // this will allow CSEing of the 2-component register.
4579 B.buildInstr(AMDGPU::REG_SEQUENCE)
4580 .addDef(RSrcHi)
4581 .addReg(RSrc2)
4582 .addImm(AMDGPU::sub0)
4583 .addReg(RSrc3)
4584 .addImm(AMDGPU::sub1);
4585
4586 Register RSrcLo = BasePtr;
4587 if (!BasePtr) {
4588 RSrcLo = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
4589 B.buildInstr(AMDGPU::S_MOV_B64)
4590 .addDef(RSrcLo)
4591 .addImm(0);
4592 }
4593
4594 B.buildInstr(AMDGPU::REG_SEQUENCE)
4595 .addDef(RSrc)
4596 .addReg(RSrcLo)
4597 .addImm(AMDGPU::sub0_sub1)
4598 .addReg(RSrcHi)
4599 .addImm(AMDGPU::sub2_sub3);
4600
4601 return RSrc;
4602}
4603
4604static Register buildAddr64RSrc(MachineIRBuilder &B, MachineRegisterInfo &MRI,
4605 const SIInstrInfo &TII, Register BasePtr) {
4606 uint64_t DefaultFormat = TII.getDefaultRsrcDataFormat();
4607
4608 // FIXME: Why are half the "default" bits ignored based on the addressing
4609 // mode?
4610 return buildRSRC(B, MRI, 0, Hi_32(DefaultFormat), BasePtr);
4611}
4612
4613static Register buildOffsetSrc(MachineIRBuilder &B, MachineRegisterInfo &MRI,
4614 const SIInstrInfo &TII, Register BasePtr) {
4615 uint64_t DefaultFormat = TII.getDefaultRsrcDataFormat();
4616
4617 // FIXME: Why are half the "default" bits ignored based on the addressing
4618 // mode?
4619 return buildRSRC(B, MRI, -1, Hi_32(DefaultFormat), BasePtr);
4620}
4621
4622AMDGPUInstructionSelector::MUBUFAddressData
4623AMDGPUInstructionSelector::parseMUBUFAddress(Register Src) const {
4624 MUBUFAddressData Data;
4625 Data.N0 = Src;
4626
4627 Register PtrBase;
4628 int64_t Offset;
4629
4630 std::tie(PtrBase, Offset) = getPtrBaseWithConstantOffset(Src, *MRI);
4631 if (isUInt<32>(Offset)) {
4632 Data.N0 = PtrBase;
4633 Data.Offset = Offset;
4634 }
4635
4636 if (MachineInstr *InputAdd
4637 = getOpcodeDef(TargetOpcode::G_PTR_ADD, Data.N0, *MRI)) {
4638 Data.N2 = InputAdd->getOperand(1).getReg();
4639 Data.N3 = InputAdd->getOperand(2).getReg();
4640
4641 // FIXME: Need to fix extra SGPR->VGPRcopies inserted
4642 // FIXME: Don't know this was defined by operand 0
4643 //
4644 // TODO: Remove this when we have copy folding optimizations after
4645 // RegBankSelect.
4646 Data.N2 = getDefIgnoringCopies(Data.N2, *MRI)->getOperand(0).getReg();
4647 Data.N3 = getDefIgnoringCopies(Data.N3, *MRI)->getOperand(0).getReg();
4648 }
4649
4650 return Data;
4651}
4652
4653/// Return if the addr64 mubuf mode should be used for the given address.
4654bool AMDGPUInstructionSelector::shouldUseAddr64(MUBUFAddressData Addr) const {
4655 // (ptr_add N2, N3) -> addr64, or
4656 // (ptr_add (ptr_add N2, N3), C1) -> addr64
4657 if (Addr.N2)
4658 return true;
4659
4660 const RegisterBank *N0Bank = RBI.getRegBank(Addr.N0, *MRI, TRI);
4661 return N0Bank->getID() == AMDGPU::VGPRRegBankID;
4662}
4663
4664/// Split an immediate offset \p ImmOffset depending on whether it fits in the
4665/// immediate field. Modifies \p ImmOffset and sets \p SOffset to the variable
4666/// component.
4667void AMDGPUInstructionSelector::splitIllegalMUBUFOffset(
4668 MachineIRBuilder &B, Register &SOffset, int64_t &ImmOffset) const {
4669 if (SIInstrInfo::isLegalMUBUFImmOffset(ImmOffset))
4670 return;
4671
4672 // Illegal offset, store it in soffset.
4673 SOffset = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
4674 B.buildInstr(AMDGPU::S_MOV_B32)
4675 .addDef(SOffset)
4676 .addImm(ImmOffset);
4677 ImmOffset = 0;
4678}
4679
4680bool AMDGPUInstructionSelector::selectMUBUFAddr64Impl(
4681 MachineOperand &Root, Register &VAddr, Register &RSrcReg,
4682 Register &SOffset, int64_t &Offset) const {
4683 // FIXME: Predicates should stop this from reaching here.
4684 // addr64 bit was removed for volcanic islands.
4685 if (!STI.hasAddr64() || STI.useFlatForGlobal())
4686 return false;
4687
4688 MUBUFAddressData AddrData = parseMUBUFAddress(Root.getReg());
4689 if (!shouldUseAddr64(AddrData))
4690 return false;
4691
4692 Register N0 = AddrData.N0;
4693 Register N2 = AddrData.N2;
4694 Register N3 = AddrData.N3;
4695 Offset = AddrData.Offset;
4696
4697 // Base pointer for the SRD.
4698 Register SRDPtr;
4699
4700 if (N2) {
4701 if (RBI.getRegBank(N2, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) {
4702 assert(N3)(static_cast <bool> (N3) ? void (0) : __assert_fail ("N3"
, "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp", 4702
, __extension__ __PRETTY_FUNCTION__))
;
4703 if (RBI.getRegBank(N3, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) {
4704 // Both N2 and N3 are divergent. Use N0 (the result of the add) as the
4705 // addr64, and construct the default resource from a 0 address.
4706 VAddr = N0;
4707 } else {
4708 SRDPtr = N3;
4709 VAddr = N2;
4710 }
4711 } else {
4712 // N2 is not divergent.
4713 SRDPtr = N2;
4714 VAddr = N3;
4715 }
4716 } else if (RBI.getRegBank(N0, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) {
4717 // Use the default null pointer in the resource
4718 VAddr = N0;
4719 } else {
4720 // N0 -> offset, or
4721 // (N0 + C1) -> offset
4722 SRDPtr = N0;
4723 }
4724
4725 MachineIRBuilder B(*Root.getParent());
4726 RSrcReg = buildAddr64RSrc(B, *MRI, TII, SRDPtr);
4727 splitIllegalMUBUFOffset(B, SOffset, Offset);
4728 return true;
4729}
4730
4731bool AMDGPUInstructionSelector::selectMUBUFOffsetImpl(
4732 MachineOperand &Root, Register &RSrcReg, Register &SOffset,
4733 int64_t &Offset) const {
4734
4735 // FIXME: Pattern should not reach here.
4736 if (STI.useFlatForGlobal())
4737 return false;
4738
4739 MUBUFAddressData AddrData = parseMUBUFAddress(Root.getReg());
4740 if (shouldUseAddr64(AddrData))
4741 return false;
4742
4743 // N0 -> offset, or
4744 // (N0 + C1) -> offset
4745 Register SRDPtr = AddrData.N0;
4746 Offset = AddrData.Offset;
4747
4748 // TODO: Look through extensions for 32-bit soffset.
4749 MachineIRBuilder B(*Root.getParent());
4750
4751 RSrcReg = buildOffsetSrc(B, *MRI, TII, SRDPtr);
4752 splitIllegalMUBUFOffset(B, SOffset, Offset);
4753 return true;
4754}
4755
4756InstructionSelector::ComplexRendererFns
4757AMDGPUInstructionSelector::selectMUBUFAddr64(MachineOperand &Root) const {
4758 Register VAddr;
4759 Register RSrcReg;
4760 Register SOffset;
4761 int64_t Offset = 0;
4762
4763 if (!selectMUBUFAddr64Impl(Root, VAddr, RSrcReg, SOffset, Offset))
4764 return {};
4765
4766 // FIXME: Use defaulted operands for trailing 0s and remove from the complex
4767 // pattern.
4768 return {{
4769 [=](MachineInstrBuilder &MIB) { // rsrc
4770 MIB.addReg(RSrcReg);
4771 },
4772 [=](MachineInstrBuilder &MIB) { // vaddr
4773 MIB.addReg(VAddr);
4774 },
4775 [=](MachineInstrBuilder &MIB) { // soffset
4776 if (SOffset)
4777 MIB.addReg(SOffset);
4778 else
4779 MIB.addImm(0);
4780 },
4781 [=](MachineInstrBuilder &MIB) { // offset
4782 MIB.addImm(Offset);
4783 },
4784 addZeroImm, // cpol
4785 addZeroImm, // tfe
4786 addZeroImm // swz
4787 }};
4788}
4789
4790InstructionSelector::ComplexRendererFns
4791AMDGPUInstructionSelector::selectMUBUFOffset(MachineOperand &Root) const {
4792 Register RSrcReg;
4793 Register SOffset;
4794 int64_t Offset = 0;
4795
4796 if (!selectMUBUFOffsetImpl(Root, RSrcReg, SOffset, Offset))
4797 return {};
4798
4799 return {{
4800 [=](MachineInstrBuilder &MIB) { // rsrc
4801 MIB.addReg(RSrcReg);
4802 },
4803 [=](MachineInstrBuilder &MIB) { // soffset
4804 if (SOffset)
4805 MIB.addReg(SOffset);
4806 else
4807 MIB.addImm(0);
4808 },
4809 [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }, // offset
4810 addZeroImm, // cpol
4811 addZeroImm, // tfe
4812 addZeroImm, // swz
4813 }};
4814}
4815
4816InstructionSelector::ComplexRendererFns
4817AMDGPUInstructionSelector::selectMUBUFAddr64Atomic(MachineOperand &Root) const {
4818 Register VAddr;
4819 Register RSrcReg;
4820 Register SOffset;
4821 int64_t Offset = 0;
4822
4823 if (!selectMUBUFAddr64Impl(Root, VAddr, RSrcReg, SOffset, Offset))
4824 return {};
4825
4826 // FIXME: Use defaulted operands for trailing 0s and remove from the complex
4827 // pattern.
4828 return {{
4829 [=](MachineInstrBuilder &MIB) { // rsrc
4830 MIB.addReg(RSrcReg);
4831 },
4832 [=](MachineInstrBuilder &MIB) { // vaddr
4833 MIB.addReg(VAddr);
4834 },
4835 [=](MachineInstrBuilder &MIB) { // soffset
4836 if (SOffset)
4837 MIB.addReg(SOffset);
4838 else
4839 MIB.addImm(0);
4840 },
4841 [=](MachineInstrBuilder &MIB) { // offset
4842 MIB.addImm(Offset);
4843 },
4844 [=](MachineInstrBuilder &MIB) {
4845 MIB.addImm(AMDGPU::CPol::GLC); // cpol
4846 }
4847 }};
4848}
4849
4850InstructionSelector::ComplexRendererFns
4851AMDGPUInstructionSelector::selectMUBUFOffsetAtomic(MachineOperand &Root) const {
4852 Register RSrcReg;
4853 Register SOffset;
4854 int64_t Offset = 0;
4855
4856 if (!selectMUBUFOffsetImpl(Root, RSrcReg, SOffset, Offset))
4857 return {};
4858
4859 return {{
4860 [=](MachineInstrBuilder &MIB) { // rsrc
4861 MIB.addReg(RSrcReg);
4862 },
4863 [=](MachineInstrBuilder &MIB) { // soffset
4864 if (SOffset)
4865 MIB.addReg(SOffset);
4866 else
4867 MIB.addImm(0);
4868 },
4869 [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }, // offset
4870 [=](MachineInstrBuilder &MIB) { MIB.addImm(AMDGPU::CPol::GLC); } // cpol
4871 }};
4872}
4873
4874/// Get an immediate that must be 32-bits, and treated as zero extended.
4875static Optional<uint64_t> getConstantZext32Val(Register Reg,
4876 const MachineRegisterInfo &MRI) {
4877 // getIConstantVRegVal sexts any values, so see if that matters.
4878 Optional<int64_t> OffsetVal = getIConstantVRegSExtVal(Reg, MRI);
4879 if (!OffsetVal || !isInt<32>(*OffsetVal))
4880 return None;
4881 return Lo_32(*OffsetVal);
4882}
4883
4884InstructionSelector::ComplexRendererFns
4885AMDGPUInstructionSelector::selectSMRDBufferImm(MachineOperand &Root) const {
4886 Optional<uint64_t> OffsetVal = getConstantZext32Val(Root.getReg(), *MRI);
4887 if (!OffsetVal)
4888 return {};
4889
4890 Optional<int64_t> EncodedImm =
4891 AMDGPU::getSMRDEncodedOffset(STI, *OffsetVal, true);
4892 if (!EncodedImm)
4893 return {};
4894
4895 return {{ [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); } }};
4896}
4897
4898InstructionSelector::ComplexRendererFns
4899AMDGPUInstructionSelector::selectSMRDBufferImm32(MachineOperand &Root) const {
4900 assert(STI.getGeneration() == AMDGPUSubtarget::SEA_ISLANDS)(static_cast <bool> (STI.getGeneration() == AMDGPUSubtarget
::SEA_ISLANDS) ? void (0) : __assert_fail ("STI.getGeneration() == AMDGPUSubtarget::SEA_ISLANDS"
, "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp", 4900
, __extension__ __PRETTY_FUNCTION__))
;
4901
4902 Optional<uint64_t> OffsetVal = getConstantZext32Val(Root.getReg(), *MRI);
4903 if (!OffsetVal)
4904 return {};
4905
4906 Optional<int64_t> EncodedImm
4907 = AMDGPU::getSMRDEncodedLiteralOffset32(STI, *OffsetVal);
4908 if (!EncodedImm)
4909 return {};
4910
4911 return {{ [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); } }};
4912}
4913
4914void AMDGPUInstructionSelector::renderTruncImm32(MachineInstrBuilder &MIB,
4915 const MachineInstr &MI,
4916 int OpIdx) const {
4917 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_CONSTANT
&& OpIdx == -1 && "Expected G_CONSTANT") ? void
(0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 && \"Expected G_CONSTANT\""
, "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp", 4918
, __extension__ __PRETTY_FUNCTION__))
4918 "Expected G_CONSTANT")(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_CONSTANT
&& OpIdx == -1 && "Expected G_CONSTANT") ? void
(0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 && \"Expected G_CONSTANT\""
, "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp", 4918
, __extension__ __PRETTY_FUNCTION__))
;
4919 MIB.addImm(MI.getOperand(1).getCImm()->getSExtValue());
4920}
4921
4922void AMDGPUInstructionSelector::renderNegateImm(MachineInstrBuilder &MIB,
4923 const MachineInstr &MI,
4924 int OpIdx) const {
4925 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_CONSTANT
&& OpIdx == -1 && "Expected G_CONSTANT") ? void
(0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 && \"Expected G_CONSTANT\""
, "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp", 4926
, __extension__ __PRETTY_FUNCTION__))
4926 "Expected G_CONSTANT")(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_CONSTANT
&& OpIdx == -1 && "Expected G_CONSTANT") ? void
(0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 && \"Expected G_CONSTANT\""
, "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp", 4926
, __extension__ __PRETTY_FUNCTION__))
;
4927 MIB.addImm(-MI.getOperand(1).getCImm()->getSExtValue());
4928}
4929
4930void AMDGPUInstructionSelector::renderBitcastImm(MachineInstrBuilder &MIB,
4931 const MachineInstr &MI,
4932 int OpIdx) const {
4933 assert(OpIdx == -1)(static_cast <bool> (OpIdx == -1) ? void (0) : __assert_fail
("OpIdx == -1", "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp"
, 4933, __extension__ __PRETTY_FUNCTION__))
;
4934
4935 const MachineOperand &Op = MI.getOperand(1);
4936 if (MI.getOpcode() == TargetOpcode::G_FCONSTANT)
4937 MIB.addImm(Op.getFPImm()->getValueAPF().bitcastToAPInt().getZExtValue());
4938 else {
4939 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && "Expected G_CONSTANT")(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_CONSTANT
&& "Expected G_CONSTANT") ? void (0) : __assert_fail
("MI.getOpcode() == TargetOpcode::G_CONSTANT && \"Expected G_CONSTANT\""
, "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp", 4939
, __extension__ __PRETTY_FUNCTION__))
;
4940 MIB.addImm(Op.getCImm()->getSExtValue());
4941 }
4942}
4943
4944void AMDGPUInstructionSelector::renderPopcntImm(MachineInstrBuilder &MIB,
4945 const MachineInstr &MI,
4946 int OpIdx) const {
4947 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_CONSTANT
&& OpIdx == -1 && "Expected G_CONSTANT") ? void
(0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 && \"Expected G_CONSTANT\""
, "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp", 4948
, __extension__ __PRETTY_FUNCTION__))
4948 "Expected G_CONSTANT")(static_cast <bool> (MI.getOpcode() == TargetOpcode::G_CONSTANT
&& OpIdx == -1 && "Expected G_CONSTANT") ? void
(0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 && \"Expected G_CONSTANT\""
, "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp", 4948
, __extension__ __PRETTY_FUNCTION__))
;
4949 MIB.addImm(MI.getOperand(1).getCImm()->getValue().countPopulation());
4950}
4951
4952/// This only really exists to satisfy DAG type checking machinery, so is a
4953/// no-op here.
4954void AMDGPUInstructionSelector::renderTruncTImm(MachineInstrBuilder &MIB,
4955 const MachineInstr &MI,
4956 int OpIdx) const {
4957 MIB.addImm(MI.getOperand(OpIdx).getImm());
4958}
4959
4960void AMDGPUInstructionSelector::renderExtractCPol(MachineInstrBuilder &MIB,
4961 const MachineInstr &MI,
4962 int OpIdx) const {
4963 assert(OpIdx >= 0 && "expected to match an immediate operand")(static_cast <bool> (OpIdx >= 0 && "expected to match an immediate operand"
) ? void (0) : __assert_fail ("OpIdx >= 0 && \"expected to match an immediate operand\""
, "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp", 4963
, __extension__ __PRETTY_FUNCTION__))
;
4964 MIB.addImm(MI.getOperand(OpIdx).getImm() & AMDGPU::CPol::ALL);
4965}
4966
4967void AMDGPUInstructionSelector::renderExtractSWZ(MachineInstrBuilder &MIB,
4968 const MachineInstr &MI,
4969 int OpIdx) const {
4970 assert(OpIdx >= 0 && "expected to match an immediate operand")(static_cast <bool> (OpIdx >= 0 && "expected to match an immediate operand"
) ? void (0) : __assert_fail ("OpIdx >= 0 && \"expected to match an immediate operand\""
, "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp", 4970
, __extension__ __PRETTY_FUNCTION__))
;
4971 MIB.addImm((MI.getOperand(OpIdx).getImm() >> 3) & 1);
4972}
4973
4974void AMDGPUInstructionSelector::renderSetGLC(MachineInstrBuilder &MIB,
4975 const MachineInstr &MI,
4976 int OpIdx) const {
4977 assert(OpIdx >= 0 && "expected to match an immediate operand")(static_cast <bool> (OpIdx >= 0 && "expected to match an immediate operand"
) ? void (0) : __assert_fail ("OpIdx >= 0 && \"expected to match an immediate operand\""
, "llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp", 4977
, __extension__ __PRETTY_FUNCTION__))
;
4978 MIB.addImm(MI.getOperand(OpIdx).getImm() | AMDGPU::CPol::GLC);
4979}
4980
4981void AMDGPUInstructionSelector::renderFrameIndex(MachineInstrBuilder &MIB,
4982 const MachineInstr &MI,
4983 int OpIdx) const {
4984 MIB.addFrameIndex((MI.getOperand(1).getIndex()));
4985}
4986
4987bool AMDGPUInstructionSelector::isInlineImmediate16(int64_t Imm) const {
4988 return AMDGPU::isInlinableLiteral16(Imm, STI.hasInv2PiInlineImm());
4989}
4990
4991bool AMDGPUInstructionSelector::isInlineImmediate32(int64_t Imm) const {
4992 return AMDGPU::isInlinableLiteral32(Imm, STI.hasInv2PiInlineImm());
4993}
4994
4995bool AMDGPUInstructionSelector::isInlineImmediate64(int64_t Imm) const {
4996 return AMDGPU::isInlinableLiteral64(Imm, STI.hasInv2PiInlineImm());
4997}
4998
4999bool AMDGPUInstructionSelector::isInlineImmediate(const APFloat &Imm) const {
5000 return TII.isInlineConstant(Imm);
5001}