Bug Summary

File:llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
Warning:line 3287, column 5
Value stored to 'MI' is never read

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name AMDGPUInstructionSelector.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -fhalf-no-semantic-interposition -mframe-pointer=none -fmath-errno -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -fno-split-dwarf-inlining -debugger-tuning=gdb -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-12/lib/clang/12.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/build-llvm/lib/Target/AMDGPU -I /build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/lib/Target/AMDGPU -I /build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/build-llvm/include -I /build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/include -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/backward -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-12/lib/clang/12.0.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/build-llvm/lib/Target/AMDGPU -fdebug-prefix-map=/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5=. -ferror-limit 19 -fvisibility hidden -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -o /tmp/scan-build-2021-01-16-002530-32805-1 -x c++ /build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
1//===- AMDGPUInstructionSelector.cpp ----------------------------*- C++ -*-==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file implements the targeting of the InstructionSelector class for
10/// AMDGPU.
11/// \todo This should be generated by TableGen.
12//===----------------------------------------------------------------------===//
13
14#include "AMDGPUInstructionSelector.h"
15#include "AMDGPU.h"
16#include "AMDGPUGlobalISelUtils.h"
17#include "AMDGPUInstrInfo.h"
18#include "AMDGPURegisterBankInfo.h"
19#include "AMDGPUSubtarget.h"
20#include "AMDGPUTargetMachine.h"
21#include "SIMachineFunctionInfo.h"
22#include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
23#include "llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h"
24#include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
25#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
26#include "llvm/IR/DiagnosticInfo.h"
27
28#define DEBUG_TYPE"amdgpu-isel" "amdgpu-isel"
29
30using namespace llvm;
31using namespace MIPatternMatch;
32
33static cl::opt<bool> AllowRiskySelect(
34 "amdgpu-global-isel-risky-select",
35 cl::desc("Allow GlobalISel to select cases that are likely to not work yet"),
36 cl::init(false),
37 cl::ReallyHidden);
38
39#define GET_GLOBALISEL_IMPL
40#define AMDGPUSubtarget GCNSubtarget
41#include "AMDGPUGenGlobalISel.inc"
42#undef GET_GLOBALISEL_IMPL
43#undef AMDGPUSubtarget
44
45AMDGPUInstructionSelector::AMDGPUInstructionSelector(
46 const GCNSubtarget &STI, const AMDGPURegisterBankInfo &RBI,
47 const AMDGPUTargetMachine &TM)
48 : InstructionSelector(), TII(*STI.getInstrInfo()),
49 TRI(*STI.getRegisterInfo()), RBI(RBI), TM(TM),
50 STI(STI),
51 EnableLateStructurizeCFG(AMDGPUTargetMachine::EnableLateStructurizeCFG),
52#define GET_GLOBALISEL_PREDICATES_INIT
53#include "AMDGPUGenGlobalISel.inc"
54#undef GET_GLOBALISEL_PREDICATES_INIT
55#define GET_GLOBALISEL_TEMPORARIES_INIT
56#include "AMDGPUGenGlobalISel.inc"
57#undef GET_GLOBALISEL_TEMPORARIES_INIT
58{
59}
60
61const char *AMDGPUInstructionSelector::getName() { return DEBUG_TYPE"amdgpu-isel"; }
62
63void AMDGPUInstructionSelector::setupMF(MachineFunction &MF, GISelKnownBits &KB,
64 CodeGenCoverage &CoverageInfo) {
65 MRI = &MF.getRegInfo();
66 Subtarget = &MF.getSubtarget<GCNSubtarget>();
67 InstructionSelector::setupMF(MF, KB, CoverageInfo);
68}
69
70bool AMDGPUInstructionSelector::isVCC(Register Reg,
71 const MachineRegisterInfo &MRI) const {
72 // The verifier is oblivious to s1 being a valid value for wavesize registers.
73 if (Reg.isPhysical())
74 return false;
75
76 auto &RegClassOrBank = MRI.getRegClassOrRegBank(Reg);
77 const TargetRegisterClass *RC =
78 RegClassOrBank.dyn_cast<const TargetRegisterClass*>();
79 if (RC) {
80 const LLT Ty = MRI.getType(Reg);
81 return RC->hasSuperClassEq(TRI.getBoolRC()) &&
82 Ty.isValid() && Ty.getSizeInBits() == 1;
83 }
84
85 const RegisterBank *RB = RegClassOrBank.get<const RegisterBank *>();
86 return RB->getID() == AMDGPU::VCCRegBankID;
87}
88
89bool AMDGPUInstructionSelector::constrainCopyLikeIntrin(MachineInstr &MI,
90 unsigned NewOpc) const {
91 MI.setDesc(TII.get(NewOpc));
92 MI.RemoveOperand(1); // Remove intrinsic ID.
93 MI.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
94
95 MachineOperand &Dst = MI.getOperand(0);
96 MachineOperand &Src = MI.getOperand(1);
97
98 // TODO: This should be legalized to s32 if needed
99 if (MRI->getType(Dst.getReg()) == LLT::scalar(1))
100 return false;
101
102 const TargetRegisterClass *DstRC
103 = TRI.getConstrainedRegClassForOperand(Dst, *MRI);
104 const TargetRegisterClass *SrcRC
105 = TRI.getConstrainedRegClassForOperand(Src, *MRI);
106 if (!DstRC || DstRC != SrcRC)
107 return false;
108
109 return RBI.constrainGenericRegister(Dst.getReg(), *DstRC, *MRI) &&
110 RBI.constrainGenericRegister(Src.getReg(), *SrcRC, *MRI);
111}
112
113bool AMDGPUInstructionSelector::selectCOPY(MachineInstr &I) const {
114 const DebugLoc &DL = I.getDebugLoc();
115 MachineBasicBlock *BB = I.getParent();
116 I.setDesc(TII.get(TargetOpcode::COPY));
117
118 const MachineOperand &Src = I.getOperand(1);
119 MachineOperand &Dst = I.getOperand(0);
120 Register DstReg = Dst.getReg();
121 Register SrcReg = Src.getReg();
122
123 if (isVCC(DstReg, *MRI)) {
124 if (SrcReg == AMDGPU::SCC) {
125 const TargetRegisterClass *RC
126 = TRI.getConstrainedRegClassForOperand(Dst, *MRI);
127 if (!RC)
128 return true;
129 return RBI.constrainGenericRegister(DstReg, *RC, *MRI);
130 }
131
132 if (!isVCC(SrcReg, *MRI)) {
133 // TODO: Should probably leave the copy and let copyPhysReg expand it.
134 if (!RBI.constrainGenericRegister(DstReg, *TRI.getBoolRC(), *MRI))
135 return false;
136
137 const TargetRegisterClass *SrcRC
138 = TRI.getConstrainedRegClassForOperand(Src, *MRI);
139
140 Register MaskedReg = MRI->createVirtualRegister(SrcRC);
141
142 // We can't trust the high bits at this point, so clear them.
143
144 // TODO: Skip masking high bits if def is known boolean.
145
146 unsigned AndOpc = TRI.isSGPRClass(SrcRC) ?
147 AMDGPU::S_AND_B32 : AMDGPU::V_AND_B32_e32;
148 BuildMI(*BB, &I, DL, TII.get(AndOpc), MaskedReg)
149 .addImm(1)
150 .addReg(SrcReg);
151 BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_CMP_NE_U32_e64), DstReg)
152 .addImm(0)
153 .addReg(MaskedReg);
154
155 if (!MRI->getRegClassOrNull(SrcReg))
156 MRI->setRegClass(SrcReg, SrcRC);
157 I.eraseFromParent();
158 return true;
159 }
160
161 const TargetRegisterClass *RC =
162 TRI.getConstrainedRegClassForOperand(Dst, *MRI);
163 if (RC && !RBI.constrainGenericRegister(DstReg, *RC, *MRI))
164 return false;
165
166 return true;
167 }
168
169 for (const MachineOperand &MO : I.operands()) {
170 if (MO.getReg().isPhysical())
171 continue;
172
173 const TargetRegisterClass *RC =
174 TRI.getConstrainedRegClassForOperand(MO, *MRI);
175 if (!RC)
176 continue;
177 RBI.constrainGenericRegister(MO.getReg(), *RC, *MRI);
178 }
179 return true;
180}
181
182bool AMDGPUInstructionSelector::selectPHI(MachineInstr &I) const {
183 const Register DefReg = I.getOperand(0).getReg();
184 const LLT DefTy = MRI->getType(DefReg);
185 if (DefTy == LLT::scalar(1)) {
186 if (!AllowRiskySelect) {
187 LLVM_DEBUG(dbgs() << "Skipping risky boolean phi\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-isel")) { dbgs() << "Skipping risky boolean phi\n"
; } } while (false)
;
188 return false;
189 }
190
191 LLVM_DEBUG(dbgs() << "Selecting risky boolean phi\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-isel")) { dbgs() << "Selecting risky boolean phi\n"
; } } while (false)
;
192 }
193
194 // TODO: Verify this doesn't have insane operands (i.e. VGPR to SGPR copy)
195
196 const RegClassOrRegBank &RegClassOrBank =
197 MRI->getRegClassOrRegBank(DefReg);
198
199 const TargetRegisterClass *DefRC
200 = RegClassOrBank.dyn_cast<const TargetRegisterClass *>();
201 if (!DefRC) {
202 if (!DefTy.isValid()) {
203 LLVM_DEBUG(dbgs() << "PHI operand has no type, not a gvreg?\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-isel")) { dbgs() << "PHI operand has no type, not a gvreg?\n"
; } } while (false)
;
204 return false;
205 }
206
207 const RegisterBank &RB = *RegClassOrBank.get<const RegisterBank *>();
208 DefRC = TRI.getRegClassForTypeOnBank(DefTy, RB, *MRI);
209 if (!DefRC) {
210 LLVM_DEBUG(dbgs() << "PHI operand has unexpected size/bank\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-isel")) { dbgs() << "PHI operand has unexpected size/bank\n"
; } } while (false)
;
211 return false;
212 }
213 }
214
215 // TODO: Verify that all registers have the same bank
216 I.setDesc(TII.get(TargetOpcode::PHI));
217 return RBI.constrainGenericRegister(DefReg, *DefRC, *MRI);
218}
219
220MachineOperand
221AMDGPUInstructionSelector::getSubOperand64(MachineOperand &MO,
222 const TargetRegisterClass &SubRC,
223 unsigned SubIdx) const {
224
225 MachineInstr *MI = MO.getParent();
226 MachineBasicBlock *BB = MO.getParent()->getParent();
227 Register DstReg = MRI->createVirtualRegister(&SubRC);
228
229 if (MO.isReg()) {
230 unsigned ComposedSubIdx = TRI.composeSubRegIndices(MO.getSubReg(), SubIdx);
231 Register Reg = MO.getReg();
232 BuildMI(*BB, MI, MI->getDebugLoc(), TII.get(AMDGPU::COPY), DstReg)
233 .addReg(Reg, 0, ComposedSubIdx);
234
235 return MachineOperand::CreateReg(DstReg, MO.isDef(), MO.isImplicit(),
236 MO.isKill(), MO.isDead(), MO.isUndef(),
237 MO.isEarlyClobber(), 0, MO.isDebug(),
238 MO.isInternalRead());
239 }
240
241 assert(MO.isImm())((MO.isImm()) ? static_cast<void> (0) : __assert_fail (
"MO.isImm()", "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp"
, 241, __PRETTY_FUNCTION__))
;
242
243 APInt Imm(64, MO.getImm());
244
245 switch (SubIdx) {
246 default:
247 llvm_unreachable("do not know to split immediate with this sub index.")::llvm::llvm_unreachable_internal("do not know to split immediate with this sub index."
, "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp"
, 247)
;
248 case AMDGPU::sub0:
249 return MachineOperand::CreateImm(Imm.getLoBits(32).getSExtValue());
250 case AMDGPU::sub1:
251 return MachineOperand::CreateImm(Imm.getHiBits(32).getSExtValue());
252 }
253}
254
255static unsigned getLogicalBitOpcode(unsigned Opc, bool Is64) {
256 switch (Opc) {
257 case AMDGPU::G_AND:
258 return Is64 ? AMDGPU::S_AND_B64 : AMDGPU::S_AND_B32;
259 case AMDGPU::G_OR:
260 return Is64 ? AMDGPU::S_OR_B64 : AMDGPU::S_OR_B32;
261 case AMDGPU::G_XOR:
262 return Is64 ? AMDGPU::S_XOR_B64 : AMDGPU::S_XOR_B32;
263 default:
264 llvm_unreachable("not a bit op")::llvm::llvm_unreachable_internal("not a bit op", "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp"
, 264)
;
265 }
266}
267
268bool AMDGPUInstructionSelector::selectG_AND_OR_XOR(MachineInstr &I) const {
269 Register DstReg = I.getOperand(0).getReg();
270 unsigned Size = RBI.getSizeInBits(DstReg, *MRI, TRI);
271
272 const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
273 if (DstRB->getID() != AMDGPU::SGPRRegBankID &&
274 DstRB->getID() != AMDGPU::VCCRegBankID)
275 return false;
276
277 bool Is64 = Size > 32 || (DstRB->getID() == AMDGPU::VCCRegBankID &&
278 STI.isWave64());
279 I.setDesc(TII.get(getLogicalBitOpcode(I.getOpcode(), Is64)));
280
281 // Dead implicit-def of scc
282 I.addOperand(MachineOperand::CreateReg(AMDGPU::SCC, true, // isDef
283 true, // isImp
284 false, // isKill
285 true)); // isDead
286 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
287}
288
289bool AMDGPUInstructionSelector::selectG_ADD_SUB(MachineInstr &I) const {
290 MachineBasicBlock *BB = I.getParent();
291 MachineFunction *MF = BB->getParent();
292 Register DstReg = I.getOperand(0).getReg();
293 const DebugLoc &DL = I.getDebugLoc();
294 LLT Ty = MRI->getType(DstReg);
295 if (Ty.isVector())
296 return false;
297
298 unsigned Size = Ty.getSizeInBits();
299 const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
300 const bool IsSALU = DstRB->getID() == AMDGPU::SGPRRegBankID;
301 const bool Sub = I.getOpcode() == TargetOpcode::G_SUB;
302
303 if (Size == 32) {
304 if (IsSALU) {
305 const unsigned Opc = Sub ? AMDGPU::S_SUB_U32 : AMDGPU::S_ADD_U32;
306 MachineInstr *Add =
307 BuildMI(*BB, &I, DL, TII.get(Opc), DstReg)
308 .add(I.getOperand(1))
309 .add(I.getOperand(2));
310 I.eraseFromParent();
311 return constrainSelectedInstRegOperands(*Add, TII, TRI, RBI);
312 }
313
314 if (STI.hasAddNoCarry()) {
315 const unsigned Opc = Sub ? AMDGPU::V_SUB_U32_e64 : AMDGPU::V_ADD_U32_e64;
316 I.setDesc(TII.get(Opc));
317 I.addOperand(*MF, MachineOperand::CreateImm(0));
318 I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
319 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
320 }
321
322 const unsigned Opc = Sub ? AMDGPU::V_SUB_CO_U32_e64 : AMDGPU::V_ADD_CO_U32_e64;
323
324 Register UnusedCarry = MRI->createVirtualRegister(TRI.getWaveMaskRegClass());
325 MachineInstr *Add
326 = BuildMI(*BB, &I, DL, TII.get(Opc), DstReg)
327 .addDef(UnusedCarry, RegState::Dead)
328 .add(I.getOperand(1))
329 .add(I.getOperand(2))
330 .addImm(0);
331 I.eraseFromParent();
332 return constrainSelectedInstRegOperands(*Add, TII, TRI, RBI);
333 }
334
335 assert(!Sub && "illegal sub should not reach here")((!Sub && "illegal sub should not reach here") ? static_cast
<void> (0) : __assert_fail ("!Sub && \"illegal sub should not reach here\""
, "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp"
, 335, __PRETTY_FUNCTION__))
;
336
337 const TargetRegisterClass &RC
338 = IsSALU ? AMDGPU::SReg_64_XEXECRegClass : AMDGPU::VReg_64RegClass;
339 const TargetRegisterClass &HalfRC
340 = IsSALU ? AMDGPU::SReg_32RegClass : AMDGPU::VGPR_32RegClass;
341
342 MachineOperand Lo1(getSubOperand64(I.getOperand(1), HalfRC, AMDGPU::sub0));
343 MachineOperand Lo2(getSubOperand64(I.getOperand(2), HalfRC, AMDGPU::sub0));
344 MachineOperand Hi1(getSubOperand64(I.getOperand(1), HalfRC, AMDGPU::sub1));
345 MachineOperand Hi2(getSubOperand64(I.getOperand(2), HalfRC, AMDGPU::sub1));
346
347 Register DstLo = MRI->createVirtualRegister(&HalfRC);
348 Register DstHi = MRI->createVirtualRegister(&HalfRC);
349
350 if (IsSALU) {
351 BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADD_U32), DstLo)
352 .add(Lo1)
353 .add(Lo2);
354 BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADDC_U32), DstHi)
355 .add(Hi1)
356 .add(Hi2);
357 } else {
358 const TargetRegisterClass *CarryRC = TRI.getWaveMaskRegClass();
359 Register CarryReg = MRI->createVirtualRegister(CarryRC);
360 BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_ADD_CO_U32_e64), DstLo)
361 .addDef(CarryReg)
362 .add(Lo1)
363 .add(Lo2)
364 .addImm(0);
365 MachineInstr *Addc = BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_ADDC_U32_e64), DstHi)
366 .addDef(MRI->createVirtualRegister(CarryRC), RegState::Dead)
367 .add(Hi1)
368 .add(Hi2)
369 .addReg(CarryReg, RegState::Kill)
370 .addImm(0);
371
372 if (!constrainSelectedInstRegOperands(*Addc, TII, TRI, RBI))
373 return false;
374 }
375
376 BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
377 .addReg(DstLo)
378 .addImm(AMDGPU::sub0)
379 .addReg(DstHi)
380 .addImm(AMDGPU::sub1);
381
382
383 if (!RBI.constrainGenericRegister(DstReg, RC, *MRI))
384 return false;
385
386 I.eraseFromParent();
387 return true;
388}
389
390bool AMDGPUInstructionSelector::selectG_UADDO_USUBO_UADDE_USUBE(
391 MachineInstr &I) const {
392 MachineBasicBlock *BB = I.getParent();
393 MachineFunction *MF = BB->getParent();
394 const DebugLoc &DL = I.getDebugLoc();
395 Register Dst0Reg = I.getOperand(0).getReg();
396 Register Dst1Reg = I.getOperand(1).getReg();
397 const bool IsAdd = I.getOpcode() == AMDGPU::G_UADDO ||
398 I.getOpcode() == AMDGPU::G_UADDE;
399 const bool HasCarryIn = I.getOpcode() == AMDGPU::G_UADDE ||
400 I.getOpcode() == AMDGPU::G_USUBE;
401
402 if (isVCC(Dst1Reg, *MRI)) {
403 unsigned NoCarryOpc =
404 IsAdd ? AMDGPU::V_ADD_CO_U32_e64 : AMDGPU::V_SUB_CO_U32_e64;
405 unsigned CarryOpc = IsAdd ? AMDGPU::V_ADDC_U32_e64 : AMDGPU::V_SUBB_U32_e64;
406 I.setDesc(TII.get(HasCarryIn ? CarryOpc : NoCarryOpc));
407 I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
408 I.addOperand(*MF, MachineOperand::CreateImm(0));
409 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
410 }
411
412 Register Src0Reg = I.getOperand(2).getReg();
413 Register Src1Reg = I.getOperand(3).getReg();
414
415 if (HasCarryIn) {
416 BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), AMDGPU::SCC)
417 .addReg(I.getOperand(4).getReg());
418 }
419
420 unsigned NoCarryOpc = IsAdd ? AMDGPU::S_ADD_U32 : AMDGPU::S_SUB_U32;
421 unsigned CarryOpc = IsAdd ? AMDGPU::S_ADDC_U32 : AMDGPU::S_SUBB_U32;
422
423 BuildMI(*BB, &I, DL, TII.get(HasCarryIn ? CarryOpc : NoCarryOpc), Dst0Reg)
424 .add(I.getOperand(2))
425 .add(I.getOperand(3));
426 BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), Dst1Reg)
427 .addReg(AMDGPU::SCC);
428
429 if (!MRI->getRegClassOrNull(Dst1Reg))
430 MRI->setRegClass(Dst1Reg, &AMDGPU::SReg_32RegClass);
431
432 if (!RBI.constrainGenericRegister(Dst0Reg, AMDGPU::SReg_32RegClass, *MRI) ||
433 !RBI.constrainGenericRegister(Src0Reg, AMDGPU::SReg_32RegClass, *MRI) ||
434 !RBI.constrainGenericRegister(Src1Reg, AMDGPU::SReg_32RegClass, *MRI))
435 return false;
436
437 if (HasCarryIn &&
438 !RBI.constrainGenericRegister(I.getOperand(4).getReg(),
439 AMDGPU::SReg_32RegClass, *MRI))
440 return false;
441
442 I.eraseFromParent();
443 return true;
444}
445
446// TODO: We should probably legalize these to only using 32-bit results.
447bool AMDGPUInstructionSelector::selectG_EXTRACT(MachineInstr &I) const {
448 MachineBasicBlock *BB = I.getParent();
449 Register DstReg = I.getOperand(0).getReg();
450 Register SrcReg = I.getOperand(1).getReg();
451 LLT DstTy = MRI->getType(DstReg);
452 LLT SrcTy = MRI->getType(SrcReg);
453 const unsigned SrcSize = SrcTy.getSizeInBits();
454 unsigned DstSize = DstTy.getSizeInBits();
455
456 // TODO: Should handle any multiple of 32 offset.
457 unsigned Offset = I.getOperand(2).getImm();
458 if (Offset % 32 != 0 || DstSize > 128)
459 return false;
460
461 // 16-bit operations really use 32-bit registers.
462 // FIXME: Probably should not allow 16-bit G_EXTRACT results.
463 if (DstSize == 16)
464 DstSize = 32;
465
466 const TargetRegisterClass *DstRC =
467 TRI.getConstrainedRegClassForOperand(I.getOperand(0), *MRI);
468 if (!DstRC || !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI))
469 return false;
470
471 const RegisterBank *SrcBank = RBI.getRegBank(SrcReg, *MRI, TRI);
472 const TargetRegisterClass *SrcRC =
473 TRI.getRegClassForSizeOnBank(SrcSize, *SrcBank, *MRI);
474 if (!SrcRC)
475 return false;
476 unsigned SubReg = SIRegisterInfo::getSubRegFromChannel(Offset / 32,
477 DstSize / 32);
478 SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubReg);
479 if (!SrcRC)
480 return false;
481
482 SrcReg = constrainOperandRegClass(*MF, TRI, *MRI, TII, RBI, I,
483 *SrcRC, I.getOperand(1));
484 const DebugLoc &DL = I.getDebugLoc();
485 BuildMI(*BB, &I, DL, TII.get(TargetOpcode::COPY), DstReg)
486 .addReg(SrcReg, 0, SubReg);
487
488 I.eraseFromParent();
489 return true;
490}
491
492bool AMDGPUInstructionSelector::selectG_MERGE_VALUES(MachineInstr &MI) const {
493 MachineBasicBlock *BB = MI.getParent();
494 Register DstReg = MI.getOperand(0).getReg();
495 LLT DstTy = MRI->getType(DstReg);
496 LLT SrcTy = MRI->getType(MI.getOperand(1).getReg());
497
498 const unsigned SrcSize = SrcTy.getSizeInBits();
499 if (SrcSize < 32)
500 return selectImpl(MI, *CoverageInfo);
501
502 const DebugLoc &DL = MI.getDebugLoc();
503 const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
504 const unsigned DstSize = DstTy.getSizeInBits();
505 const TargetRegisterClass *DstRC =
506 TRI.getRegClassForSizeOnBank(DstSize, *DstBank, *MRI);
507 if (!DstRC)
508 return false;
509
510 ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(DstRC, SrcSize / 8);
511 MachineInstrBuilder MIB =
512 BuildMI(*BB, &MI, DL, TII.get(TargetOpcode::REG_SEQUENCE), DstReg);
513 for (int I = 0, E = MI.getNumOperands() - 1; I != E; ++I) {
514 MachineOperand &Src = MI.getOperand(I + 1);
515 MIB.addReg(Src.getReg(), getUndefRegState(Src.isUndef()));
516 MIB.addImm(SubRegs[I]);
517
518 const TargetRegisterClass *SrcRC
519 = TRI.getConstrainedRegClassForOperand(Src, *MRI);
520 if (SrcRC && !RBI.constrainGenericRegister(Src.getReg(), *SrcRC, *MRI))
521 return false;
522 }
523
524 if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI))
525 return false;
526
527 MI.eraseFromParent();
528 return true;
529}
530
531bool AMDGPUInstructionSelector::selectG_UNMERGE_VALUES(MachineInstr &MI) const {
532 MachineBasicBlock *BB = MI.getParent();
533 const int NumDst = MI.getNumOperands() - 1;
534
535 MachineOperand &Src = MI.getOperand(NumDst);
536
537 Register SrcReg = Src.getReg();
538 Register DstReg0 = MI.getOperand(0).getReg();
539 LLT DstTy = MRI->getType(DstReg0);
540 LLT SrcTy = MRI->getType(SrcReg);
541
542 const unsigned DstSize = DstTy.getSizeInBits();
543 const unsigned SrcSize = SrcTy.getSizeInBits();
544 const DebugLoc &DL = MI.getDebugLoc();
545 const RegisterBank *SrcBank = RBI.getRegBank(SrcReg, *MRI, TRI);
546
547 const TargetRegisterClass *SrcRC =
548 TRI.getRegClassForSizeOnBank(SrcSize, *SrcBank, *MRI);
549 if (!SrcRC || !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI))
550 return false;
551
552 // Note we could have mixed SGPR and VGPR destination banks for an SGPR
553 // source, and this relies on the fact that the same subregister indices are
554 // used for both.
555 ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(SrcRC, DstSize / 8);
556 for (int I = 0, E = NumDst; I != E; ++I) {
557 MachineOperand &Dst = MI.getOperand(I);
558 BuildMI(*BB, &MI, DL, TII.get(TargetOpcode::COPY), Dst.getReg())
559 .addReg(SrcReg, 0, SubRegs[I]);
560
561 // Make sure the subregister index is valid for the source register.
562 SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubRegs[I]);
563 if (!SrcRC || !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI))
564 return false;
565
566 const TargetRegisterClass *DstRC =
567 TRI.getConstrainedRegClassForOperand(Dst, *MRI);
568 if (DstRC && !RBI.constrainGenericRegister(Dst.getReg(), *DstRC, *MRI))
569 return false;
570 }
571
572 MI.eraseFromParent();
573 return true;
574}
575
576bool AMDGPUInstructionSelector::selectG_BUILD_VECTOR_TRUNC(
577 MachineInstr &MI) const {
578 if (selectImpl(MI, *CoverageInfo))
579 return true;
580
581 const LLT S32 = LLT::scalar(32);
582 const LLT V2S16 = LLT::vector(2, 16);
583
584 Register Dst = MI.getOperand(0).getReg();
585 if (MRI->getType(Dst) != V2S16)
586 return false;
587
588 const RegisterBank *DstBank = RBI.getRegBank(Dst, *MRI, TRI);
589 if (DstBank->getID() != AMDGPU::SGPRRegBankID)
590 return false;
591
592 Register Src0 = MI.getOperand(1).getReg();
593 Register Src1 = MI.getOperand(2).getReg();
594 if (MRI->getType(Src0) != S32)
595 return false;
596
597 const DebugLoc &DL = MI.getDebugLoc();
598 MachineBasicBlock *BB = MI.getParent();
599
600 auto ConstSrc1 = getConstantVRegValWithLookThrough(Src1, *MRI, true, true);
601 if (ConstSrc1) {
602 auto ConstSrc0 = getConstantVRegValWithLookThrough(Src0, *MRI, true, true);
603 if (ConstSrc0) {
604 const int64_t K0 = ConstSrc0->Value.getSExtValue();
605 const int64_t K1 = ConstSrc1->Value.getSExtValue();
606 uint32_t Lo16 = static_cast<uint32_t>(K0) & 0xffff;
607 uint32_t Hi16 = static_cast<uint32_t>(K1) & 0xffff;
608
609 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), Dst)
610 .addImm(Lo16 | (Hi16 << 16));
611 MI.eraseFromParent();
612 return RBI.constrainGenericRegister(Dst, AMDGPU::SReg_32RegClass, *MRI);
613 }
614 }
615
616 // TODO: This should probably be a combine somewhere
617 // (build_vector_trunc $src0, undef -> copy $src0
618 MachineInstr *Src1Def = getDefIgnoringCopies(Src1, *MRI);
619 if (Src1Def && Src1Def->getOpcode() == AMDGPU::G_IMPLICIT_DEF) {
620 MI.setDesc(TII.get(AMDGPU::COPY));
621 MI.RemoveOperand(2);
622 return RBI.constrainGenericRegister(Dst, AMDGPU::SReg_32RegClass, *MRI) &&
623 RBI.constrainGenericRegister(Src0, AMDGPU::SReg_32RegClass, *MRI);
624 }
625
626 Register ShiftSrc0;
627 Register ShiftSrc1;
628
629 // With multiple uses of the shift, this will duplicate the shift and
630 // increase register pressure.
631 //
632 // (build_vector_trunc (lshr_oneuse $src0, 16), (lshr_oneuse $src1, 16)
633 // => (S_PACK_HH_B32_B16 $src0, $src1)
634 // (build_vector_trunc $src0, (lshr_oneuse SReg_32:$src1, 16))
635 // => (S_PACK_LH_B32_B16 $src0, $src1)
636 // (build_vector_trunc $src0, $src1)
637 // => (S_PACK_LL_B32_B16 $src0, $src1)
638
639 bool Shift0 = mi_match(
640 Src0, *MRI, m_OneUse(m_GLShr(m_Reg(ShiftSrc0), m_SpecificICst(16))));
641
642 bool Shift1 = mi_match(
643 Src1, *MRI, m_OneUse(m_GLShr(m_Reg(ShiftSrc1), m_SpecificICst(16))));
644
645 unsigned Opc = AMDGPU::S_PACK_LL_B32_B16;
646 if (Shift0 && Shift1) {
647 Opc = AMDGPU::S_PACK_HH_B32_B16;
648 MI.getOperand(1).setReg(ShiftSrc0);
649 MI.getOperand(2).setReg(ShiftSrc1);
650 } else if (Shift1) {
651 Opc = AMDGPU::S_PACK_LH_B32_B16;
652 MI.getOperand(2).setReg(ShiftSrc1);
653 } else if (Shift0 && ConstSrc1 && ConstSrc1->Value == 0) {
654 // build_vector_trunc (lshr $src0, 16), 0 -> s_lshr_b32 $src0, 16
655 auto MIB = BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_LSHR_B32), Dst)
656 .addReg(ShiftSrc0)
657 .addImm(16);
658
659 MI.eraseFromParent();
660 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
661 }
662
663 MI.setDesc(TII.get(Opc));
664 return constrainSelectedInstRegOperands(MI, TII, TRI, RBI);
665}
666
667bool AMDGPUInstructionSelector::selectG_PTR_ADD(MachineInstr &I) const {
668 return selectG_ADD_SUB(I);
669}
670
671bool AMDGPUInstructionSelector::selectG_IMPLICIT_DEF(MachineInstr &I) const {
672 const MachineOperand &MO = I.getOperand(0);
673
674 // FIXME: Interface for getConstrainedRegClassForOperand needs work. The
675 // regbank check here is to know why getConstrainedRegClassForOperand failed.
676 const TargetRegisterClass *RC = TRI.getConstrainedRegClassForOperand(MO, *MRI);
677 if ((!RC && !MRI->getRegBankOrNull(MO.getReg())) ||
678 (RC && RBI.constrainGenericRegister(MO.getReg(), *RC, *MRI))) {
679 I.setDesc(TII.get(TargetOpcode::IMPLICIT_DEF));
680 return true;
681 }
682
683 return false;
684}
685
686bool AMDGPUInstructionSelector::selectG_INSERT(MachineInstr &I) const {
687 MachineBasicBlock *BB = I.getParent();
688
689 Register DstReg = I.getOperand(0).getReg();
690 Register Src0Reg = I.getOperand(1).getReg();
691 Register Src1Reg = I.getOperand(2).getReg();
692 LLT Src1Ty = MRI->getType(Src1Reg);
693
694 unsigned DstSize = MRI->getType(DstReg).getSizeInBits();
695 unsigned InsSize = Src1Ty.getSizeInBits();
696
697 int64_t Offset = I.getOperand(3).getImm();
698
699 // FIXME: These cases should have been illegal and unnecessary to check here.
700 if (Offset % 32 != 0 || InsSize % 32 != 0)
701 return false;
702
703 // Currently not handled by getSubRegFromChannel.
704 if (InsSize > 128)
705 return false;
706
707 unsigned SubReg = TRI.getSubRegFromChannel(Offset / 32, InsSize / 32);
708 if (SubReg == AMDGPU::NoSubRegister)
709 return false;
710
711 const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
712 const TargetRegisterClass *DstRC =
713 TRI.getRegClassForSizeOnBank(DstSize, *DstBank, *MRI);
714 if (!DstRC)
715 return false;
716
717 const RegisterBank *Src0Bank = RBI.getRegBank(Src0Reg, *MRI, TRI);
718 const RegisterBank *Src1Bank = RBI.getRegBank(Src1Reg, *MRI, TRI);
719 const TargetRegisterClass *Src0RC =
720 TRI.getRegClassForSizeOnBank(DstSize, *Src0Bank, *MRI);
721 const TargetRegisterClass *Src1RC =
722 TRI.getRegClassForSizeOnBank(InsSize, *Src1Bank, *MRI);
723
724 // Deal with weird cases where the class only partially supports the subreg
725 // index.
726 Src0RC = TRI.getSubClassWithSubReg(Src0RC, SubReg);
727 if (!Src0RC || !Src1RC)
728 return false;
729
730 if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) ||
731 !RBI.constrainGenericRegister(Src0Reg, *Src0RC, *MRI) ||
732 !RBI.constrainGenericRegister(Src1Reg, *Src1RC, *MRI))
733 return false;
734
735 const DebugLoc &DL = I.getDebugLoc();
736 BuildMI(*BB, &I, DL, TII.get(TargetOpcode::INSERT_SUBREG), DstReg)
737 .addReg(Src0Reg)
738 .addReg(Src1Reg)
739 .addImm(SubReg);
740
741 I.eraseFromParent();
742 return true;
743}
744
745bool AMDGPUInstructionSelector::selectInterpP1F16(MachineInstr &MI) const {
746 if (STI.getLDSBankCount() != 16)
747 return selectImpl(MI, *CoverageInfo);
748
749 Register Dst = MI.getOperand(0).getReg();
750 Register Src0 = MI.getOperand(2).getReg();
751 Register M0Val = MI.getOperand(6).getReg();
752 if (!RBI.constrainGenericRegister(M0Val, AMDGPU::SReg_32RegClass, *MRI) ||
753 !RBI.constrainGenericRegister(Dst, AMDGPU::VGPR_32RegClass, *MRI) ||
754 !RBI.constrainGenericRegister(Src0, AMDGPU::VGPR_32RegClass, *MRI))
755 return false;
756
757 // This requires 2 instructions. It is possible to write a pattern to support
758 // this, but the generated isel emitter doesn't correctly deal with multiple
759 // output instructions using the same physical register input. The copy to m0
760 // is incorrectly placed before the second instruction.
761 //
762 // TODO: Match source modifiers.
763
764 Register InterpMov = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
765 const DebugLoc &DL = MI.getDebugLoc();
766 MachineBasicBlock *MBB = MI.getParent();
767
768 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
769 .addReg(M0Val);
770 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_INTERP_MOV_F32), InterpMov)
771 .addImm(2)
772 .addImm(MI.getOperand(4).getImm()) // $attr
773 .addImm(MI.getOperand(3).getImm()); // $attrchan
774
775 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_INTERP_P1LV_F16), Dst)
776 .addImm(0) // $src0_modifiers
777 .addReg(Src0) // $src0
778 .addImm(MI.getOperand(4).getImm()) // $attr
779 .addImm(MI.getOperand(3).getImm()) // $attrchan
780 .addImm(0) // $src2_modifiers
781 .addReg(InterpMov) // $src2 - 2 f16 values selected by high
782 .addImm(MI.getOperand(5).getImm()) // $high
783 .addImm(0) // $clamp
784 .addImm(0); // $omod
785
786 MI.eraseFromParent();
787 return true;
788}
789
790// Writelane is special in that it can use SGPR and M0 (which would normally
791// count as using the constant bus twice - but in this case it is allowed since
792// the lane selector doesn't count as a use of the constant bus). However, it is
793// still required to abide by the 1 SGPR rule. Fix this up if we might have
794// multiple SGPRs.
795bool AMDGPUInstructionSelector::selectWritelane(MachineInstr &MI) const {
796 // With a constant bus limit of at least 2, there's no issue.
797 if (STI.getConstantBusLimit(AMDGPU::V_WRITELANE_B32) > 1)
798 return selectImpl(MI, *CoverageInfo);
799
800 MachineBasicBlock *MBB = MI.getParent();
801 const DebugLoc &DL = MI.getDebugLoc();
802 Register VDst = MI.getOperand(0).getReg();
803 Register Val = MI.getOperand(2).getReg();
804 Register LaneSelect = MI.getOperand(3).getReg();
805 Register VDstIn = MI.getOperand(4).getReg();
806
807 auto MIB = BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_WRITELANE_B32), VDst);
808
809 Optional<ValueAndVReg> ConstSelect =
810 getConstantVRegValWithLookThrough(LaneSelect, *MRI, true, true);
811 if (ConstSelect) {
812 // The selector has to be an inline immediate, so we can use whatever for
813 // the other operands.
814 MIB.addReg(Val);
815 MIB.addImm(ConstSelect->Value.getSExtValue() &
816 maskTrailingOnes<uint64_t>(STI.getWavefrontSizeLog2()));
817 } else {
818 Optional<ValueAndVReg> ConstVal =
819 getConstantVRegValWithLookThrough(Val, *MRI, true, true);
820
821 // If the value written is an inline immediate, we can get away without a
822 // copy to m0.
823 if (ConstVal && AMDGPU::isInlinableLiteral32(ConstVal->Value.getSExtValue(),
824 STI.hasInv2PiInlineImm())) {
825 MIB.addImm(ConstVal->Value.getSExtValue());
826 MIB.addReg(LaneSelect);
827 } else {
828 MIB.addReg(Val);
829
830 // If the lane selector was originally in a VGPR and copied with
831 // readfirstlane, there's a hazard to read the same SGPR from the
832 // VALU. Constrain to a different SGPR to help avoid needing a nop later.
833 RBI.constrainGenericRegister(LaneSelect, AMDGPU::SReg_32_XM0RegClass, *MRI);
834
835 BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
836 .addReg(LaneSelect);
837 MIB.addReg(AMDGPU::M0);
838 }
839 }
840
841 MIB.addReg(VDstIn);
842
843 MI.eraseFromParent();
844 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
845}
846
847// We need to handle this here because tablegen doesn't support matching
848// instructions with multiple outputs.
849bool AMDGPUInstructionSelector::selectDivScale(MachineInstr &MI) const {
850 Register Dst0 = MI.getOperand(0).getReg();
851 Register Dst1 = MI.getOperand(1).getReg();
852
853 LLT Ty = MRI->getType(Dst0);
854 unsigned Opc;
855 if (Ty == LLT::scalar(32))
856 Opc = AMDGPU::V_DIV_SCALE_F32_e64;
857 else if (Ty == LLT::scalar(64))
858 Opc = AMDGPU::V_DIV_SCALE_F64_e64;
859 else
860 return false;
861
862 // TODO: Match source modifiers.
863
864 const DebugLoc &DL = MI.getDebugLoc();
865 MachineBasicBlock *MBB = MI.getParent();
866
867 Register Numer = MI.getOperand(3).getReg();
868 Register Denom = MI.getOperand(4).getReg();
869 unsigned ChooseDenom = MI.getOperand(5).getImm();
870
871 Register Src0 = ChooseDenom != 0 ? Numer : Denom;
872
873 auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc), Dst0)
874 .addDef(Dst1)
875 .addImm(0) // $src0_modifiers
876 .addUse(Src0) // $src0
877 .addImm(0) // $src1_modifiers
878 .addUse(Denom) // $src1
879 .addImm(0) // $src2_modifiers
880 .addUse(Numer) // $src2
881 .addImm(0) // $clamp
882 .addImm(0); // $omod
883
884 MI.eraseFromParent();
885 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
886}
887
888bool AMDGPUInstructionSelector::selectG_INTRINSIC(MachineInstr &I) const {
889 unsigned IntrinsicID = I.getIntrinsicID();
890 switch (IntrinsicID) {
891 case Intrinsic::amdgcn_if_break: {
892 MachineBasicBlock *BB = I.getParent();
893
894 // FIXME: Manually selecting to avoid dealiing with the SReg_1 trick
895 // SelectionDAG uses for wave32 vs wave64.
896 BuildMI(*BB, &I, I.getDebugLoc(), TII.get(AMDGPU::SI_IF_BREAK))
897 .add(I.getOperand(0))
898 .add(I.getOperand(2))
899 .add(I.getOperand(3));
900
901 Register DstReg = I.getOperand(0).getReg();
902 Register Src0Reg = I.getOperand(2).getReg();
903 Register Src1Reg = I.getOperand(3).getReg();
904
905 I.eraseFromParent();
906
907 for (Register Reg : { DstReg, Src0Reg, Src1Reg })
908 MRI->setRegClass(Reg, TRI.getWaveMaskRegClass());
909
910 return true;
911 }
912 case Intrinsic::amdgcn_interp_p1_f16:
913 return selectInterpP1F16(I);
914 case Intrinsic::amdgcn_wqm:
915 return constrainCopyLikeIntrin(I, AMDGPU::WQM);
916 case Intrinsic::amdgcn_softwqm:
917 return constrainCopyLikeIntrin(I, AMDGPU::SOFT_WQM);
918 case Intrinsic::amdgcn_wwm:
919 return constrainCopyLikeIntrin(I, AMDGPU::WWM);
920 case Intrinsic::amdgcn_writelane:
921 return selectWritelane(I);
922 case Intrinsic::amdgcn_div_scale:
923 return selectDivScale(I);
924 case Intrinsic::amdgcn_icmp:
925 return selectIntrinsicIcmp(I);
926 case Intrinsic::amdgcn_ballot:
927 return selectBallot(I);
928 case Intrinsic::amdgcn_reloc_constant:
929 return selectRelocConstant(I);
930 case Intrinsic::amdgcn_groupstaticsize:
931 return selectGroupStaticSize(I);
932 case Intrinsic::returnaddress:
933 return selectReturnAddress(I);
934 default:
935 return selectImpl(I, *CoverageInfo);
936 }
937}
938
939static int getV_CMPOpcode(CmpInst::Predicate P, unsigned Size) {
940 if (Size != 32 && Size != 64)
941 return -1;
942 switch (P) {
943 default:
944 llvm_unreachable("Unknown condition code!")::llvm::llvm_unreachable_internal("Unknown condition code!", "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp"
, 944)
;
945 case CmpInst::ICMP_NE:
946 return Size == 32 ? AMDGPU::V_CMP_NE_U32_e64 : AMDGPU::V_CMP_NE_U64_e64;
947 case CmpInst::ICMP_EQ:
948 return Size == 32 ? AMDGPU::V_CMP_EQ_U32_e64 : AMDGPU::V_CMP_EQ_U64_e64;
949 case CmpInst::ICMP_SGT:
950 return Size == 32 ? AMDGPU::V_CMP_GT_I32_e64 : AMDGPU::V_CMP_GT_I64_e64;
951 case CmpInst::ICMP_SGE:
952 return Size == 32 ? AMDGPU::V_CMP_GE_I32_e64 : AMDGPU::V_CMP_GE_I64_e64;
953 case CmpInst::ICMP_SLT:
954 return Size == 32 ? AMDGPU::V_CMP_LT_I32_e64 : AMDGPU::V_CMP_LT_I64_e64;
955 case CmpInst::ICMP_SLE:
956 return Size == 32 ? AMDGPU::V_CMP_LE_I32_e64 : AMDGPU::V_CMP_LE_I64_e64;
957 case CmpInst::ICMP_UGT:
958 return Size == 32 ? AMDGPU::V_CMP_GT_U32_e64 : AMDGPU::V_CMP_GT_U64_e64;
959 case CmpInst::ICMP_UGE:
960 return Size == 32 ? AMDGPU::V_CMP_GE_U32_e64 : AMDGPU::V_CMP_GE_U64_e64;
961 case CmpInst::ICMP_ULT:
962 return Size == 32 ? AMDGPU::V_CMP_LT_U32_e64 : AMDGPU::V_CMP_LT_U64_e64;
963 case CmpInst::ICMP_ULE:
964 return Size == 32 ? AMDGPU::V_CMP_LE_U32_e64 : AMDGPU::V_CMP_LE_U64_e64;
965 }
966}
967
968int AMDGPUInstructionSelector::getS_CMPOpcode(CmpInst::Predicate P,
969 unsigned Size) const {
970 if (Size == 64) {
971 if (!STI.hasScalarCompareEq64())
972 return -1;
973
974 switch (P) {
975 case CmpInst::ICMP_NE:
976 return AMDGPU::S_CMP_LG_U64;
977 case CmpInst::ICMP_EQ:
978 return AMDGPU::S_CMP_EQ_U64;
979 default:
980 return -1;
981 }
982 }
983
984 if (Size != 32)
985 return -1;
986
987 switch (P) {
988 case CmpInst::ICMP_NE:
989 return AMDGPU::S_CMP_LG_U32;
990 case CmpInst::ICMP_EQ:
991 return AMDGPU::S_CMP_EQ_U32;
992 case CmpInst::ICMP_SGT:
993 return AMDGPU::S_CMP_GT_I32;
994 case CmpInst::ICMP_SGE:
995 return AMDGPU::S_CMP_GE_I32;
996 case CmpInst::ICMP_SLT:
997 return AMDGPU::S_CMP_LT_I32;
998 case CmpInst::ICMP_SLE:
999 return AMDGPU::S_CMP_LE_I32;
1000 case CmpInst::ICMP_UGT:
1001 return AMDGPU::S_CMP_GT_U32;
1002 case CmpInst::ICMP_UGE:
1003 return AMDGPU::S_CMP_GE_U32;
1004 case CmpInst::ICMP_ULT:
1005 return AMDGPU::S_CMP_LT_U32;
1006 case CmpInst::ICMP_ULE:
1007 return AMDGPU::S_CMP_LE_U32;
1008 default:
1009 llvm_unreachable("Unknown condition code!")::llvm::llvm_unreachable_internal("Unknown condition code!", "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp"
, 1009)
;
1010 }
1011}
1012
1013bool AMDGPUInstructionSelector::selectG_ICMP(MachineInstr &I) const {
1014 MachineBasicBlock *BB = I.getParent();
1015 const DebugLoc &DL = I.getDebugLoc();
1016
1017 Register SrcReg = I.getOperand(2).getReg();
1018 unsigned Size = RBI.getSizeInBits(SrcReg, *MRI, TRI);
1019
1020 auto Pred = (CmpInst::Predicate)I.getOperand(1).getPredicate();
1021
1022 Register CCReg = I.getOperand(0).getReg();
1023 if (!isVCC(CCReg, *MRI)) {
1024 int Opcode = getS_CMPOpcode(Pred, Size);
1025 if (Opcode == -1)
1026 return false;
1027 MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode))
1028 .add(I.getOperand(2))
1029 .add(I.getOperand(3));
1030 BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), CCReg)
1031 .addReg(AMDGPU::SCC);
1032 bool Ret =
1033 constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI) &&
1034 RBI.constrainGenericRegister(CCReg, AMDGPU::SReg_32RegClass, *MRI);
1035 I.eraseFromParent();
1036 return Ret;
1037 }
1038
1039 int Opcode = getV_CMPOpcode(Pred, Size);
1040 if (Opcode == -1)
1041 return false;
1042
1043 MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode),
1044 I.getOperand(0).getReg())
1045 .add(I.getOperand(2))
1046 .add(I.getOperand(3));
1047 RBI.constrainGenericRegister(ICmp->getOperand(0).getReg(),
1048 *TRI.getBoolRC(), *MRI);
1049 bool Ret = constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI);
1050 I.eraseFromParent();
1051 return Ret;
1052}
1053
1054bool AMDGPUInstructionSelector::selectIntrinsicIcmp(MachineInstr &I) const {
1055 Register Dst = I.getOperand(0).getReg();
1056 if (isVCC(Dst, *MRI))
1057 return false;
1058
1059 if (MRI->getType(Dst).getSizeInBits() != STI.getWavefrontSize())
1060 return false;
1061
1062 MachineBasicBlock *BB = I.getParent();
1063 const DebugLoc &DL = I.getDebugLoc();
1064 Register SrcReg = I.getOperand(2).getReg();
1065 unsigned Size = RBI.getSizeInBits(SrcReg, *MRI, TRI);
1066 auto Pred = static_cast<CmpInst::Predicate>(I.getOperand(4).getImm());
1067
1068 int Opcode = getV_CMPOpcode(Pred, Size);
1069 if (Opcode == -1)
1070 return false;
1071
1072 MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode), Dst)
1073 .add(I.getOperand(2))
1074 .add(I.getOperand(3));
1075 RBI.constrainGenericRegister(ICmp->getOperand(0).getReg(), *TRI.getBoolRC(),
1076 *MRI);
1077 bool Ret = constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI);
1078 I.eraseFromParent();
1079 return Ret;
1080}
1081
1082bool AMDGPUInstructionSelector::selectBallot(MachineInstr &I) const {
1083 MachineBasicBlock *BB = I.getParent();
1084 const DebugLoc &DL = I.getDebugLoc();
1085 Register DstReg = I.getOperand(0).getReg();
1086 const unsigned Size = MRI->getType(DstReg).getSizeInBits();
1087 const bool Is64 = Size == 64;
1088
1089 if (Size != STI.getWavefrontSize())
1090 return false;
1091
1092 Optional<ValueAndVReg> Arg =
1093 getConstantVRegValWithLookThrough(I.getOperand(2).getReg(), *MRI, true);
1094
1095 if (Arg.hasValue()) {
1096 const int64_t Value = Arg.getValue().Value.getSExtValue();
1097 if (Value == 0) {
1098 unsigned Opcode = Is64 ? AMDGPU::S_MOV_B64 : AMDGPU::S_MOV_B32;
1099 BuildMI(*BB, &I, DL, TII.get(Opcode), DstReg).addImm(0);
1100 } else if (Value == -1) { // all ones
1101 Register SrcReg = Is64 ? AMDGPU::EXEC : AMDGPU::EXEC_LO;
1102 BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), DstReg).addReg(SrcReg);
1103 } else
1104 return false;
1105 } else {
1106 Register SrcReg = I.getOperand(2).getReg();
1107 BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), DstReg).addReg(SrcReg);
1108 }
1109
1110 I.eraseFromParent();
1111 return true;
1112}
1113
1114bool AMDGPUInstructionSelector::selectRelocConstant(MachineInstr &I) const {
1115 Register DstReg = I.getOperand(0).getReg();
1116 const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
1117 const TargetRegisterClass *DstRC =
1118 TRI.getRegClassForSizeOnBank(32, *DstBank, *MRI);
1119 if (!DstRC || !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI))
1120 return false;
1121
1122 const bool IsVALU = DstBank->getID() == AMDGPU::VGPRRegBankID;
1123
1124 Module *M = MF->getFunction().getParent();
1125 const MDNode *Metadata = I.getOperand(2).getMetadata();
1126 auto SymbolName = cast<MDString>(Metadata->getOperand(0))->getString();
1127 auto RelocSymbol = cast<GlobalVariable>(
1128 M->getOrInsertGlobal(SymbolName, Type::getInt32Ty(M->getContext())));
1129
1130 MachineBasicBlock *BB = I.getParent();
1131 BuildMI(*BB, &I, I.getDebugLoc(),
1132 TII.get(IsVALU ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32), DstReg)
1133 .addGlobalAddress(RelocSymbol, 0, SIInstrInfo::MO_ABS32_LO);
1134
1135 I.eraseFromParent();
1136 return true;
1137}
1138
1139bool AMDGPUInstructionSelector::selectGroupStaticSize(MachineInstr &I) const {
1140 Triple::OSType OS = MF->getTarget().getTargetTriple().getOS();
1141
1142 Register DstReg = I.getOperand(0).getReg();
1143 const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
1144 unsigned Mov = DstRB->getID() == AMDGPU::SGPRRegBankID ?
1145 AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
1146
1147 MachineBasicBlock *MBB = I.getParent();
1148 const DebugLoc &DL = I.getDebugLoc();
1149
1150 auto MIB = BuildMI(*MBB, &I, DL, TII.get(Mov), DstReg);
1151
1152 if (OS == Triple::AMDHSA || OS == Triple::AMDPAL) {
1153 const SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
1154 MIB.addImm(MFI->getLDSSize());
1155 } else {
1156 Module *M = MF->getFunction().getParent();
1157 const GlobalValue *GV
1158 = Intrinsic::getDeclaration(M, Intrinsic::amdgcn_groupstaticsize);
1159 MIB.addGlobalAddress(GV, 0, SIInstrInfo::MO_ABS32_LO);
1160 }
1161
1162 I.eraseFromParent();
1163 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
1164}
1165
1166bool AMDGPUInstructionSelector::selectReturnAddress(MachineInstr &I) const {
1167 MachineBasicBlock *MBB = I.getParent();
1168 MachineFunction &MF = *MBB->getParent();
1169 const DebugLoc &DL = I.getDebugLoc();
1170
1171 MachineOperand &Dst = I.getOperand(0);
1172 Register DstReg = Dst.getReg();
1173 unsigned Depth = I.getOperand(2).getImm();
1174
1175 const TargetRegisterClass *RC
1176 = TRI.getConstrainedRegClassForOperand(Dst, *MRI);
1177 if (!RC->hasSubClassEq(&AMDGPU::SGPR_64RegClass) ||
1178 !RBI.constrainGenericRegister(DstReg, *RC, *MRI))
1179 return false;
1180
1181 // Check for kernel and shader functions
1182 if (Depth != 0 ||
1183 MF.getInfo<SIMachineFunctionInfo>()->isEntryFunction()) {
1184 BuildMI(*MBB, &I, DL, TII.get(AMDGPU::S_MOV_B64), DstReg)
1185 .addImm(0);
1186 I.eraseFromParent();
1187 return true;
1188 }
1189
1190 MachineFrameInfo &MFI = MF.getFrameInfo();
1191 // There is a call to @llvm.returnaddress in this function
1192 MFI.setReturnAddressIsTaken(true);
1193
1194 // Get the return address reg and mark it as an implicit live-in
1195 Register ReturnAddrReg = TRI.getReturnAddressReg(MF);
1196 Register LiveIn = getFunctionLiveInPhysReg(MF, TII, ReturnAddrReg,
1197 AMDGPU::SReg_64RegClass);
1198 BuildMI(*MBB, &I, DL, TII.get(AMDGPU::COPY), DstReg)
1199 .addReg(LiveIn);
1200 I.eraseFromParent();
1201 return true;
1202}
1203
1204bool AMDGPUInstructionSelector::selectEndCfIntrinsic(MachineInstr &MI) const {
1205 // FIXME: Manually selecting to avoid dealiing with the SReg_1 trick
1206 // SelectionDAG uses for wave32 vs wave64.
1207 MachineBasicBlock *BB = MI.getParent();
1208 BuildMI(*BB, &MI, MI.getDebugLoc(), TII.get(AMDGPU::SI_END_CF))
1209 .add(MI.getOperand(1));
1210
1211 Register Reg = MI.getOperand(1).getReg();
1212 MI.eraseFromParent();
1213
1214 if (!MRI->getRegClassOrNull(Reg))
1215 MRI->setRegClass(Reg, TRI.getWaveMaskRegClass());
1216 return true;
1217}
1218
1219bool AMDGPUInstructionSelector::selectDSOrderedIntrinsic(
1220 MachineInstr &MI, Intrinsic::ID IntrID) const {
1221 MachineBasicBlock *MBB = MI.getParent();
1222 MachineFunction *MF = MBB->getParent();
1223 const DebugLoc &DL = MI.getDebugLoc();
1224
1225 unsigned IndexOperand = MI.getOperand(7).getImm();
1226 bool WaveRelease = MI.getOperand(8).getImm() != 0;
1227 bool WaveDone = MI.getOperand(9).getImm() != 0;
1228
1229 if (WaveDone && !WaveRelease)
1230 report_fatal_error("ds_ordered_count: wave_done requires wave_release");
1231
1232 unsigned OrderedCountIndex = IndexOperand & 0x3f;
1233 IndexOperand &= ~0x3f;
1234 unsigned CountDw = 0;
1235
1236 if (STI.getGeneration() >= AMDGPUSubtarget::GFX10) {
1237 CountDw = (IndexOperand >> 24) & 0xf;
1238 IndexOperand &= ~(0xf << 24);
1239
1240 if (CountDw < 1 || CountDw > 4) {
1241 report_fatal_error(
1242 "ds_ordered_count: dword count must be between 1 and 4");
1243 }
1244 }
1245
1246 if (IndexOperand)
1247 report_fatal_error("ds_ordered_count: bad index operand");
1248
1249 unsigned Instruction = IntrID == Intrinsic::amdgcn_ds_ordered_add ? 0 : 1;
1250 unsigned ShaderType = SIInstrInfo::getDSShaderTypeValue(*MF);
1251
1252 unsigned Offset0 = OrderedCountIndex << 2;
1253 unsigned Offset1 = WaveRelease | (WaveDone << 1) | (ShaderType << 2) |
1254 (Instruction << 4);
1255
1256 if (STI.getGeneration() >= AMDGPUSubtarget::GFX10)
1257 Offset1 |= (CountDw - 1) << 6;
1258
1259 unsigned Offset = Offset0 | (Offset1 << 8);
1260
1261 Register M0Val = MI.getOperand(2).getReg();
1262 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
1263 .addReg(M0Val);
1264
1265 Register DstReg = MI.getOperand(0).getReg();
1266 Register ValReg = MI.getOperand(3).getReg();
1267 MachineInstrBuilder DS =
1268 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::DS_ORDERED_COUNT), DstReg)
1269 .addReg(ValReg)
1270 .addImm(Offset)
1271 .cloneMemRefs(MI);
1272
1273 if (!RBI.constrainGenericRegister(M0Val, AMDGPU::SReg_32RegClass, *MRI))
1274 return false;
1275
1276 bool Ret = constrainSelectedInstRegOperands(*DS, TII, TRI, RBI);
1277 MI.eraseFromParent();
1278 return Ret;
1279}
1280
1281static unsigned gwsIntrinToOpcode(unsigned IntrID) {
1282 switch (IntrID) {
1283 case Intrinsic::amdgcn_ds_gws_init:
1284 return AMDGPU::DS_GWS_INIT;
1285 case Intrinsic::amdgcn_ds_gws_barrier:
1286 return AMDGPU::DS_GWS_BARRIER;
1287 case Intrinsic::amdgcn_ds_gws_sema_v:
1288 return AMDGPU::DS_GWS_SEMA_V;
1289 case Intrinsic::amdgcn_ds_gws_sema_br:
1290 return AMDGPU::DS_GWS_SEMA_BR;
1291 case Intrinsic::amdgcn_ds_gws_sema_p:
1292 return AMDGPU::DS_GWS_SEMA_P;
1293 case Intrinsic::amdgcn_ds_gws_sema_release_all:
1294 return AMDGPU::DS_GWS_SEMA_RELEASE_ALL;
1295 default:
1296 llvm_unreachable("not a gws intrinsic")::llvm::llvm_unreachable_internal("not a gws intrinsic", "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp"
, 1296)
;
1297 }
1298}
1299
1300bool AMDGPUInstructionSelector::selectDSGWSIntrinsic(MachineInstr &MI,
1301 Intrinsic::ID IID) const {
1302 if (IID == Intrinsic::amdgcn_ds_gws_sema_release_all &&
1303 !STI.hasGWSSemaReleaseAll())
1304 return false;
1305
1306 // intrinsic ID, vsrc, offset
1307 const bool HasVSrc = MI.getNumOperands() == 3;
1308 assert(HasVSrc || MI.getNumOperands() == 2)((HasVSrc || MI.getNumOperands() == 2) ? static_cast<void>
(0) : __assert_fail ("HasVSrc || MI.getNumOperands() == 2", "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp"
, 1308, __PRETTY_FUNCTION__))
;
1309
1310 Register BaseOffset = MI.getOperand(HasVSrc ? 2 : 1).getReg();
1311 const RegisterBank *OffsetRB = RBI.getRegBank(BaseOffset, *MRI, TRI);
1312 if (OffsetRB->getID() != AMDGPU::SGPRRegBankID)
1313 return false;
1314
1315 MachineInstr *OffsetDef = getDefIgnoringCopies(BaseOffset, *MRI);
1316 assert(OffsetDef)((OffsetDef) ? static_cast<void> (0) : __assert_fail ("OffsetDef"
, "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp"
, 1316, __PRETTY_FUNCTION__))
;
1317
1318 unsigned ImmOffset;
1319
1320 MachineBasicBlock *MBB = MI.getParent();
1321 const DebugLoc &DL = MI.getDebugLoc();
1322
1323 MachineInstr *Readfirstlane = nullptr;
1324
1325 // If we legalized the VGPR input, strip out the readfirstlane to analyze the
1326 // incoming offset, in case there's an add of a constant. We'll have to put it
1327 // back later.
1328 if (OffsetDef->getOpcode() == AMDGPU::V_READFIRSTLANE_B32) {
1329 Readfirstlane = OffsetDef;
1330 BaseOffset = OffsetDef->getOperand(1).getReg();
1331 OffsetDef = getDefIgnoringCopies(BaseOffset, *MRI);
1332 }
1333
1334 if (OffsetDef->getOpcode() == AMDGPU::G_CONSTANT) {
1335 // If we have a constant offset, try to use the 0 in m0 as the base.
1336 // TODO: Look into changing the default m0 initialization value. If the
1337 // default -1 only set the low 16-bits, we could leave it as-is and add 1 to
1338 // the immediate offset.
1339
1340 ImmOffset = OffsetDef->getOperand(1).getCImm()->getZExtValue();
1341 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), AMDGPU::M0)
1342 .addImm(0);
1343 } else {
1344 std::tie(BaseOffset, ImmOffset) =
1345 AMDGPU::getBaseWithConstantOffset(*MRI, BaseOffset);
1346
1347 if (Readfirstlane) {
1348 // We have the constant offset now, so put the readfirstlane back on the
1349 // variable component.
1350 if (!RBI.constrainGenericRegister(BaseOffset, AMDGPU::VGPR_32RegClass, *MRI))
1351 return false;
1352
1353 Readfirstlane->getOperand(1).setReg(BaseOffset);
1354 BaseOffset = Readfirstlane->getOperand(0).getReg();
1355 } else {
1356 if (!RBI.constrainGenericRegister(BaseOffset,
1357 AMDGPU::SReg_32RegClass, *MRI))
1358 return false;
1359 }
1360
1361 Register M0Base = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
1362 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::S_LSHL_B32), M0Base)
1363 .addReg(BaseOffset)
1364 .addImm(16);
1365
1366 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
1367 .addReg(M0Base);
1368 }
1369
1370 // The resource id offset is computed as (<isa opaque base> + M0[21:16] +
1371 // offset field) % 64. Some versions of the programming guide omit the m0
1372 // part, or claim it's from offset 0.
1373 auto MIB = BuildMI(*MBB, &MI, DL, TII.get(gwsIntrinToOpcode(IID)));
1374
1375 if (HasVSrc) {
1376 Register VSrc = MI.getOperand(1).getReg();
1377 MIB.addReg(VSrc);
1378 if (!RBI.constrainGenericRegister(VSrc, AMDGPU::VGPR_32RegClass, *MRI))
1379 return false;
1380 }
1381
1382 MIB.addImm(ImmOffset)
1383 .cloneMemRefs(MI);
1384
1385 MI.eraseFromParent();
1386 return true;
1387}
1388
1389bool AMDGPUInstructionSelector::selectDSAppendConsume(MachineInstr &MI,
1390 bool IsAppend) const {
1391 Register PtrBase = MI.getOperand(2).getReg();
1392 LLT PtrTy = MRI->getType(PtrBase);
1393 bool IsGDS = PtrTy.getAddressSpace() == AMDGPUAS::REGION_ADDRESS;
1394
1395 unsigned Offset;
1396 std::tie(PtrBase, Offset) = selectDS1Addr1OffsetImpl(MI.getOperand(2));
1397
1398 // TODO: Should this try to look through readfirstlane like GWS?
1399 if (!isDSOffsetLegal(PtrBase, Offset)) {
1400 PtrBase = MI.getOperand(2).getReg();
1401 Offset = 0;
1402 }
1403
1404 MachineBasicBlock *MBB = MI.getParent();
1405 const DebugLoc &DL = MI.getDebugLoc();
1406 const unsigned Opc = IsAppend ? AMDGPU::DS_APPEND : AMDGPU::DS_CONSUME;
1407
1408 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
1409 .addReg(PtrBase);
1410 if (!RBI.constrainGenericRegister(PtrBase, AMDGPU::SReg_32RegClass, *MRI))
1411 return false;
1412
1413 auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc), MI.getOperand(0).getReg())
1414 .addImm(Offset)
1415 .addImm(IsGDS ? -1 : 0)
1416 .cloneMemRefs(MI);
1417 MI.eraseFromParent();
1418 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
1419}
1420
1421bool AMDGPUInstructionSelector::selectSBarrier(MachineInstr &MI) const {
1422 if (TM.getOptLevel() > CodeGenOpt::None) {
1423 unsigned WGSize = STI.getFlatWorkGroupSizes(MF->getFunction()).second;
1424 if (WGSize <= STI.getWavefrontSize()) {
1425 MachineBasicBlock *MBB = MI.getParent();
1426 const DebugLoc &DL = MI.getDebugLoc();
1427 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::WAVE_BARRIER));
1428 MI.eraseFromParent();
1429 return true;
1430 }
1431 }
1432 return selectImpl(MI, *CoverageInfo);
1433}
1434
1435static bool parseTexFail(uint64_t TexFailCtrl, bool &TFE, bool &LWE,
1436 bool &IsTexFail) {
1437 if (TexFailCtrl)
1438 IsTexFail = true;
1439
1440 TFE = (TexFailCtrl & 0x1) ? 1 : 0;
1441 TexFailCtrl &= ~(uint64_t)0x1;
1442 LWE = (TexFailCtrl & 0x2) ? 1 : 0;
1443 TexFailCtrl &= ~(uint64_t)0x2;
1444
1445 return TexFailCtrl == 0;
1446}
1447
1448static bool parseCachePolicy(uint64_t Value,
1449 bool *GLC, bool *SLC, bool *DLC) {
1450 if (GLC) {
1451 *GLC = (Value & 0x1) ? 1 : 0;
1452 Value &= ~(uint64_t)0x1;
1453 }
1454 if (SLC) {
1455 *SLC = (Value & 0x2) ? 1 : 0;
1456 Value &= ~(uint64_t)0x2;
1457 }
1458 if (DLC) {
1459 *DLC = (Value & 0x4) ? 1 : 0;
1460 Value &= ~(uint64_t)0x4;
1461 }
1462
1463 return Value == 0;
1464}
1465
1466bool AMDGPUInstructionSelector::selectImageIntrinsic(
1467 MachineInstr &MI, const AMDGPU::ImageDimIntrinsicInfo *Intr) const {
1468 MachineBasicBlock *MBB = MI.getParent();
1469 const DebugLoc &DL = MI.getDebugLoc();
1470
1471 const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode =
1472 AMDGPU::getMIMGBaseOpcodeInfo(Intr->BaseOpcode);
1473
1474 const AMDGPU::MIMGDimInfo *DimInfo = AMDGPU::getMIMGDimInfo(Intr->Dim);
1475 const AMDGPU::MIMGLZMappingInfo *LZMappingInfo =
1476 AMDGPU::getMIMGLZMappingInfo(Intr->BaseOpcode);
1477 const AMDGPU::MIMGMIPMappingInfo *MIPMappingInfo =
1478 AMDGPU::getMIMGMIPMappingInfo(Intr->BaseOpcode);
1479 unsigned IntrOpcode = Intr->BaseOpcode;
1480 const bool IsGFX10Plus = AMDGPU::isGFX10Plus(STI);
1481
1482 const unsigned ArgOffset = MI.getNumExplicitDefs() + 1;
1483
1484 Register VDataIn, VDataOut;
1485 LLT VDataTy;
1486 int NumVDataDwords = -1;
1487 bool IsD16 = false;
1488
1489 bool Unorm;
1490 if (!BaseOpcode->Sampler)
1491 Unorm = true;
1492 else
1493 Unorm = MI.getOperand(ArgOffset + Intr->UnormIndex).getImm() != 0;
1494
1495 bool TFE;
1496 bool LWE;
1497 bool IsTexFail = false;
1498 if (!parseTexFail(MI.getOperand(ArgOffset + Intr->TexFailCtrlIndex).getImm(),
1499 TFE, LWE, IsTexFail))
1500 return false;
1501
1502 const int Flags = MI.getOperand(ArgOffset + Intr->NumArgs).getImm();
1503 const bool IsA16 = (Flags & 1) != 0;
1504 const bool IsG16 = (Flags & 2) != 0;
1505
1506 // A16 implies 16 bit gradients
1507 if (IsA16 && !IsG16)
1508 return false;
1509
1510 unsigned DMask = 0;
1511 unsigned DMaskLanes = 0;
1512
1513 if (BaseOpcode->Atomic) {
1514 VDataOut = MI.getOperand(0).getReg();
1515 VDataIn = MI.getOperand(2).getReg();
1516 LLT Ty = MRI->getType(VDataIn);
1517
1518 // Be careful to allow atomic swap on 16-bit element vectors.
1519 const bool Is64Bit = BaseOpcode->AtomicX2 ?
1520 Ty.getSizeInBits() == 128 :
1521 Ty.getSizeInBits() == 64;
1522
1523 if (BaseOpcode->AtomicX2) {
1524 assert(MI.getOperand(3).getReg() == AMDGPU::NoRegister)((MI.getOperand(3).getReg() == AMDGPU::NoRegister) ? static_cast
<void> (0) : __assert_fail ("MI.getOperand(3).getReg() == AMDGPU::NoRegister"
, "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp"
, 1524, __PRETTY_FUNCTION__))
;
1525
1526 DMask = Is64Bit ? 0xf : 0x3;
1527 NumVDataDwords = Is64Bit ? 4 : 2;
1528 } else {
1529 DMask = Is64Bit ? 0x3 : 0x1;
1530 NumVDataDwords = Is64Bit ? 2 : 1;
1531 }
1532 } else {
1533 DMask = MI.getOperand(ArgOffset + Intr->DMaskIndex).getImm();
1534 DMaskLanes = BaseOpcode->Gather4 ? 4 : countPopulation(DMask);
1535
1536 // One memoperand is mandatory, except for getresinfo.
1537 // FIXME: Check this in verifier.
1538 if (!MI.memoperands_empty()) {
1539 const MachineMemOperand *MMO = *MI.memoperands_begin();
1540
1541 // Infer d16 from the memory size, as the register type will be mangled by
1542 // unpacked subtargets, or by TFE.
1543 IsD16 = ((8 * MMO->getSize()) / DMaskLanes) < 32;
1544 }
1545
1546 if (BaseOpcode->Store) {
1547 VDataIn = MI.getOperand(1).getReg();
1548 VDataTy = MRI->getType(VDataIn);
1549 NumVDataDwords = (VDataTy.getSizeInBits() + 31) / 32;
1550 } else {
1551 VDataOut = MI.getOperand(0).getReg();
1552 VDataTy = MRI->getType(VDataOut);
1553 NumVDataDwords = DMaskLanes;
1554
1555 if (IsD16 && !STI.hasUnpackedD16VMem())
1556 NumVDataDwords = (DMaskLanes + 1) / 2;
1557 }
1558 }
1559
1560 // Optimize _L to _LZ when _L is zero
1561 if (LZMappingInfo) {
1562 // The legalizer replaced the register with an immediate 0 if we need to
1563 // change the opcode.
1564 const MachineOperand &Lod = MI.getOperand(ArgOffset + Intr->LodIndex);
1565 if (Lod.isImm()) {
1566 assert(Lod.getImm() == 0)((Lod.getImm() == 0) ? static_cast<void> (0) : __assert_fail
("Lod.getImm() == 0", "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp"
, 1566, __PRETTY_FUNCTION__))
;
1567 IntrOpcode = LZMappingInfo->LZ; // set new opcode to _lz variant of _l
1568 }
1569 }
1570
1571 // Optimize _mip away, when 'lod' is zero
1572 if (MIPMappingInfo) {
1573 const MachineOperand &Lod = MI.getOperand(ArgOffset + Intr->MipIndex);
1574 if (Lod.isImm()) {
1575 assert(Lod.getImm() == 0)((Lod.getImm() == 0) ? static_cast<void> (0) : __assert_fail
("Lod.getImm() == 0", "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp"
, 1575, __PRETTY_FUNCTION__))
;
1576 IntrOpcode = MIPMappingInfo->NONMIP; // set new opcode to variant without _mip
1577 }
1578 }
1579
1580 // Set G16 opcode
1581 if (IsG16 && !IsA16) {
1582 const AMDGPU::MIMGG16MappingInfo *G16MappingInfo =
1583 AMDGPU::getMIMGG16MappingInfo(Intr->BaseOpcode);
1584 assert(G16MappingInfo)((G16MappingInfo) ? static_cast<void> (0) : __assert_fail
("G16MappingInfo", "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp"
, 1584, __PRETTY_FUNCTION__))
;
1585 IntrOpcode = G16MappingInfo->G16; // set opcode to variant with _g16
1586 }
1587
1588 // TODO: Check this in verifier.
1589 assert((!IsTexFail || DMaskLanes >= 1) && "should have legalized this")(((!IsTexFail || DMaskLanes >= 1) && "should have legalized this"
) ? static_cast<void> (0) : __assert_fail ("(!IsTexFail || DMaskLanes >= 1) && \"should have legalized this\""
, "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp"
, 1589, __PRETTY_FUNCTION__))
;
1590
1591 bool GLC = false;
1592 bool SLC = false;
1593 bool DLC = false;
1594 if (BaseOpcode->Atomic) {
1595 GLC = true; // TODO no-return optimization
1596 if (!parseCachePolicy(
1597 MI.getOperand(ArgOffset + Intr->CachePolicyIndex).getImm(), nullptr,
1598 &SLC, IsGFX10Plus ? &DLC : nullptr))
1599 return false;
1600 } else {
1601 if (!parseCachePolicy(
1602 MI.getOperand(ArgOffset + Intr->CachePolicyIndex).getImm(), &GLC,
1603 &SLC, IsGFX10Plus ? &DLC : nullptr))
1604 return false;
1605 }
1606
1607 int NumVAddrRegs = 0;
1608 int NumVAddrDwords = 0;
1609 for (unsigned I = Intr->VAddrStart; I < Intr->VAddrEnd; I++) {
1610 // Skip the $noregs and 0s inserted during legalization.
1611 MachineOperand &AddrOp = MI.getOperand(ArgOffset + I);
1612 if (!AddrOp.isReg())
1613 continue; // XXX - Break?
1614
1615 Register Addr = AddrOp.getReg();
1616 if (!Addr)
1617 break;
1618
1619 ++NumVAddrRegs;
1620 NumVAddrDwords += (MRI->getType(Addr).getSizeInBits() + 31) / 32;
1621 }
1622
1623 // The legalizer preprocessed the intrinsic arguments. If we aren't using
1624 // NSA, these should have beeen packed into a single value in the first
1625 // address register
1626 const bool UseNSA = NumVAddrRegs != 1 && NumVAddrDwords == NumVAddrRegs;
1627 if (UseNSA && !STI.hasFeature(AMDGPU::FeatureNSAEncoding)) {
1628 LLVM_DEBUG(dbgs() << "Trying to use NSA on non-NSA target\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-isel")) { dbgs() << "Trying to use NSA on non-NSA target\n"
; } } while (false)
;
1629 return false;
1630 }
1631
1632 if (IsTexFail)
1633 ++NumVDataDwords;
1634
1635 int Opcode = -1;
1636 if (IsGFX10Plus) {
1637 Opcode = AMDGPU::getMIMGOpcode(IntrOpcode,
1638 UseNSA ? AMDGPU::MIMGEncGfx10NSA
1639 : AMDGPU::MIMGEncGfx10Default,
1640 NumVDataDwords, NumVAddrDwords);
1641 } else {
1642 if (STI.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS)
1643 Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx8,
1644 NumVDataDwords, NumVAddrDwords);
1645 if (Opcode == -1)
1646 Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx6,
1647 NumVDataDwords, NumVAddrDwords);
1648 }
1649 assert(Opcode != -1)((Opcode != -1) ? static_cast<void> (0) : __assert_fail
("Opcode != -1", "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp"
, 1649, __PRETTY_FUNCTION__))
;
1650
1651 auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opcode))
1652 .cloneMemRefs(MI);
1653
1654 if (VDataOut) {
1655 if (BaseOpcode->AtomicX2) {
1656 const bool Is64 = MRI->getType(VDataOut).getSizeInBits() == 64;
1657
1658 Register TmpReg = MRI->createVirtualRegister(
1659 Is64 ? &AMDGPU::VReg_128RegClass : &AMDGPU::VReg_64RegClass);
1660 unsigned SubReg = Is64 ? AMDGPU::sub0_sub1 : AMDGPU::sub0;
1661
1662 MIB.addDef(TmpReg);
1663 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), VDataOut)
1664 .addReg(TmpReg, RegState::Kill, SubReg);
1665
1666 } else {
1667 MIB.addDef(VDataOut); // vdata output
1668 }
1669 }
1670
1671 if (VDataIn)
1672 MIB.addReg(VDataIn); // vdata input
1673
1674 for (int I = 0; I != NumVAddrRegs; ++I) {
1675 MachineOperand &SrcOp = MI.getOperand(ArgOffset + Intr->VAddrStart + I);
1676 if (SrcOp.isReg()) {
1677 assert(SrcOp.getReg() != 0)((SrcOp.getReg() != 0) ? static_cast<void> (0) : __assert_fail
("SrcOp.getReg() != 0", "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp"
, 1677, __PRETTY_FUNCTION__))
;
1678 MIB.addReg(SrcOp.getReg());
1679 }
1680 }
1681
1682 MIB.addReg(MI.getOperand(ArgOffset + Intr->RsrcIndex).getReg());
1683 if (BaseOpcode->Sampler)
1684 MIB.addReg(MI.getOperand(ArgOffset + Intr->SampIndex).getReg());
1685
1686 MIB.addImm(DMask); // dmask
1687
1688 if (IsGFX10Plus)
1689 MIB.addImm(DimInfo->Encoding);
1690 MIB.addImm(Unorm);
1691 if (IsGFX10Plus)
1692 MIB.addImm(DLC);
1693
1694 MIB.addImm(GLC);
1695 MIB.addImm(SLC);
1696 MIB.addImm(IsA16 && // a16 or r128
1697 STI.hasFeature(AMDGPU::FeatureR128A16) ? -1 : 0);
1698 if (IsGFX10Plus)
1699 MIB.addImm(IsA16 ? -1 : 0);
1700
1701 MIB.addImm(TFE); // tfe
1702 MIB.addImm(LWE); // lwe
1703 if (!IsGFX10Plus)
1704 MIB.addImm(DimInfo->DA ? -1 : 0);
1705 if (BaseOpcode->HasD16)
1706 MIB.addImm(IsD16 ? -1 : 0);
1707
1708 MI.eraseFromParent();
1709 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
1710}
1711
1712bool AMDGPUInstructionSelector::selectG_INTRINSIC_W_SIDE_EFFECTS(
1713 MachineInstr &I) const {
1714 unsigned IntrinsicID = I.getIntrinsicID();
1715 switch (IntrinsicID) {
1716 case Intrinsic::amdgcn_end_cf:
1717 return selectEndCfIntrinsic(I);
1718 case Intrinsic::amdgcn_ds_ordered_add:
1719 case Intrinsic::amdgcn_ds_ordered_swap:
1720 return selectDSOrderedIntrinsic(I, IntrinsicID);
1721 case Intrinsic::amdgcn_ds_gws_init:
1722 case Intrinsic::amdgcn_ds_gws_barrier:
1723 case Intrinsic::amdgcn_ds_gws_sema_v:
1724 case Intrinsic::amdgcn_ds_gws_sema_br:
1725 case Intrinsic::amdgcn_ds_gws_sema_p:
1726 case Intrinsic::amdgcn_ds_gws_sema_release_all:
1727 return selectDSGWSIntrinsic(I, IntrinsicID);
1728 case Intrinsic::amdgcn_ds_append:
1729 return selectDSAppendConsume(I, true);
1730 case Intrinsic::amdgcn_ds_consume:
1731 return selectDSAppendConsume(I, false);
1732 case Intrinsic::amdgcn_s_barrier:
1733 return selectSBarrier(I);
1734 case Intrinsic::amdgcn_global_atomic_fadd:
1735 return selectGlobalAtomicFaddIntrinsic(I);
1736 default: {
1737 return selectImpl(I, *CoverageInfo);
1738 }
1739 }
1740}
1741
1742bool AMDGPUInstructionSelector::selectG_SELECT(MachineInstr &I) const {
1743 if (selectImpl(I, *CoverageInfo))
1744 return true;
1745
1746 MachineBasicBlock *BB = I.getParent();
1747 const DebugLoc &DL = I.getDebugLoc();
1748
1749 Register DstReg = I.getOperand(0).getReg();
1750 unsigned Size = RBI.getSizeInBits(DstReg, *MRI, TRI);
1751 assert(Size <= 32 || Size == 64)((Size <= 32 || Size == 64) ? static_cast<void> (0) :
__assert_fail ("Size <= 32 || Size == 64", "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp"
, 1751, __PRETTY_FUNCTION__))
;
1752 const MachineOperand &CCOp = I.getOperand(1);
1753 Register CCReg = CCOp.getReg();
1754 if (!isVCC(CCReg, *MRI)) {
1755 unsigned SelectOpcode = Size == 64 ? AMDGPU::S_CSELECT_B64 :
1756 AMDGPU::S_CSELECT_B32;
1757 MachineInstr *CopySCC = BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), AMDGPU::SCC)
1758 .addReg(CCReg);
1759
1760 // The generic constrainSelectedInstRegOperands doesn't work for the scc register
1761 // bank, because it does not cover the register class that we used to represent
1762 // for it. So we need to manually set the register class here.
1763 if (!MRI->getRegClassOrNull(CCReg))
1764 MRI->setRegClass(CCReg, TRI.getConstrainedRegClassForOperand(CCOp, *MRI));
1765 MachineInstr *Select = BuildMI(*BB, &I, DL, TII.get(SelectOpcode), DstReg)
1766 .add(I.getOperand(2))
1767 .add(I.getOperand(3));
1768
1769 bool Ret = constrainSelectedInstRegOperands(*Select, TII, TRI, RBI) |
1770 constrainSelectedInstRegOperands(*CopySCC, TII, TRI, RBI);
1771 I.eraseFromParent();
1772 return Ret;
1773 }
1774
1775 // Wide VGPR select should have been split in RegBankSelect.
1776 if (Size > 32)
1777 return false;
1778
1779 MachineInstr *Select =
1780 BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
1781 .addImm(0)
1782 .add(I.getOperand(3))
1783 .addImm(0)
1784 .add(I.getOperand(2))
1785 .add(I.getOperand(1));
1786
1787 bool Ret = constrainSelectedInstRegOperands(*Select, TII, TRI, RBI);
1788 I.eraseFromParent();
1789 return Ret;
1790}
1791
1792static int sizeToSubRegIndex(unsigned Size) {
1793 switch (Size) {
1794 case 32:
1795 return AMDGPU::sub0;
1796 case 64:
1797 return AMDGPU::sub0_sub1;
1798 case 96:
1799 return AMDGPU::sub0_sub1_sub2;
1800 case 128:
1801 return AMDGPU::sub0_sub1_sub2_sub3;
1802 case 256:
1803 return AMDGPU::sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7;
1804 default:
1805 if (Size < 32)
1806 return AMDGPU::sub0;
1807 if (Size > 256)
1808 return -1;
1809 return sizeToSubRegIndex(PowerOf2Ceil(Size));
1810 }
1811}
1812
1813bool AMDGPUInstructionSelector::selectG_TRUNC(MachineInstr &I) const {
1814 Register DstReg = I.getOperand(0).getReg();
1815 Register SrcReg = I.getOperand(1).getReg();
1816 const LLT DstTy = MRI->getType(DstReg);
1817 const LLT SrcTy = MRI->getType(SrcReg);
1818 const LLT S1 = LLT::scalar(1);
1819
1820 const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI);
1821 const RegisterBank *DstRB;
1822 if (DstTy == S1) {
1823 // This is a special case. We don't treat s1 for legalization artifacts as
1824 // vcc booleans.
1825 DstRB = SrcRB;
1826 } else {
1827 DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
1828 if (SrcRB != DstRB)
1829 return false;
1830 }
1831
1832 const bool IsVALU = DstRB->getID() == AMDGPU::VGPRRegBankID;
1833
1834 unsigned DstSize = DstTy.getSizeInBits();
1835 unsigned SrcSize = SrcTy.getSizeInBits();
1836
1837 const TargetRegisterClass *SrcRC
1838 = TRI.getRegClassForSizeOnBank(SrcSize, *SrcRB, *MRI);
1839 const TargetRegisterClass *DstRC
1840 = TRI.getRegClassForSizeOnBank(DstSize, *DstRB, *MRI);
1841 if (!SrcRC || !DstRC)
1842 return false;
1843
1844 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) ||
1845 !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI)) {
1846 LLVM_DEBUG(dbgs() << "Failed to constrain G_TRUNC\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-isel")) { dbgs() << "Failed to constrain G_TRUNC\n"
; } } while (false)
;
1847 return false;
1848 }
1849
1850 if (DstTy == LLT::vector(2, 16) && SrcTy == LLT::vector(2, 32)) {
1851 MachineBasicBlock *MBB = I.getParent();
1852 const DebugLoc &DL = I.getDebugLoc();
1853
1854 Register LoReg = MRI->createVirtualRegister(DstRC);
1855 Register HiReg = MRI->createVirtualRegister(DstRC);
1856 BuildMI(*MBB, I, DL, TII.get(AMDGPU::COPY), LoReg)
1857 .addReg(SrcReg, 0, AMDGPU::sub0);
1858 BuildMI(*MBB, I, DL, TII.get(AMDGPU::COPY), HiReg)
1859 .addReg(SrcReg, 0, AMDGPU::sub1);
1860
1861 if (IsVALU && STI.hasSDWA()) {
1862 // Write the low 16-bits of the high element into the high 16-bits of the
1863 // low element.
1864 MachineInstr *MovSDWA =
1865 BuildMI(*MBB, I, DL, TII.get(AMDGPU::V_MOV_B32_sdwa), DstReg)
1866 .addImm(0) // $src0_modifiers
1867 .addReg(HiReg) // $src0
1868 .addImm(0) // $clamp
1869 .addImm(AMDGPU::SDWA::WORD_1) // $dst_sel
1870 .addImm(AMDGPU::SDWA::UNUSED_PRESERVE) // $dst_unused
1871 .addImm(AMDGPU::SDWA::WORD_0) // $src0_sel
1872 .addReg(LoReg, RegState::Implicit);
1873 MovSDWA->tieOperands(0, MovSDWA->getNumOperands() - 1);
1874 } else {
1875 Register TmpReg0 = MRI->createVirtualRegister(DstRC);
1876 Register TmpReg1 = MRI->createVirtualRegister(DstRC);
1877 Register ImmReg = MRI->createVirtualRegister(DstRC);
1878 if (IsVALU) {
1879 BuildMI(*MBB, I, DL, TII.get(AMDGPU::V_LSHLREV_B32_e64), TmpReg0)
1880 .addImm(16)
1881 .addReg(HiReg);
1882 } else {
1883 BuildMI(*MBB, I, DL, TII.get(AMDGPU::S_LSHL_B32), TmpReg0)
1884 .addReg(HiReg)
1885 .addImm(16);
1886 }
1887
1888 unsigned MovOpc = IsVALU ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32;
1889 unsigned AndOpc = IsVALU ? AMDGPU::V_AND_B32_e64 : AMDGPU::S_AND_B32;
1890 unsigned OrOpc = IsVALU ? AMDGPU::V_OR_B32_e64 : AMDGPU::S_OR_B32;
1891
1892 BuildMI(*MBB, I, DL, TII.get(MovOpc), ImmReg)
1893 .addImm(0xffff);
1894 BuildMI(*MBB, I, DL, TII.get(AndOpc), TmpReg1)
1895 .addReg(LoReg)
1896 .addReg(ImmReg);
1897 BuildMI(*MBB, I, DL, TII.get(OrOpc), DstReg)
1898 .addReg(TmpReg0)
1899 .addReg(TmpReg1);
1900 }
1901
1902 I.eraseFromParent();
1903 return true;
1904 }
1905
1906 if (!DstTy.isScalar())
1907 return false;
1908
1909 if (SrcSize > 32) {
1910 int SubRegIdx = sizeToSubRegIndex(DstSize);
1911 if (SubRegIdx == -1)
1912 return false;
1913
1914 // Deal with weird cases where the class only partially supports the subreg
1915 // index.
1916 const TargetRegisterClass *SrcWithSubRC
1917 = TRI.getSubClassWithSubReg(SrcRC, SubRegIdx);
1918 if (!SrcWithSubRC)
1919 return false;
1920
1921 if (SrcWithSubRC != SrcRC) {
1922 if (!RBI.constrainGenericRegister(SrcReg, *SrcWithSubRC, *MRI))
1923 return false;
1924 }
1925
1926 I.getOperand(1).setSubReg(SubRegIdx);
1927 }
1928
1929 I.setDesc(TII.get(TargetOpcode::COPY));
1930 return true;
1931}
1932
1933/// \returns true if a bitmask for \p Size bits will be an inline immediate.
1934static bool shouldUseAndMask(unsigned Size, unsigned &Mask) {
1935 Mask = maskTrailingOnes<unsigned>(Size);
1936 int SignedMask = static_cast<int>(Mask);
1937 return SignedMask >= -16 && SignedMask <= 64;
1938}
1939
1940// Like RegisterBankInfo::getRegBank, but don't assume vcc for s1.
1941const RegisterBank *AMDGPUInstructionSelector::getArtifactRegBank(
1942 Register Reg, const MachineRegisterInfo &MRI,
1943 const TargetRegisterInfo &TRI) const {
1944 const RegClassOrRegBank &RegClassOrBank = MRI.getRegClassOrRegBank(Reg);
1945 if (auto *RB = RegClassOrBank.dyn_cast<const RegisterBank *>())
1946 return RB;
1947
1948 // Ignore the type, since we don't use vcc in artifacts.
1949 if (auto *RC = RegClassOrBank.dyn_cast<const TargetRegisterClass *>())
1950 return &RBI.getRegBankFromRegClass(*RC, LLT());
1951 return nullptr;
1952}
1953
1954bool AMDGPUInstructionSelector::selectG_SZA_EXT(MachineInstr &I) const {
1955 bool InReg = I.getOpcode() == AMDGPU::G_SEXT_INREG;
1956 bool Signed = I.getOpcode() == AMDGPU::G_SEXT || InReg;
1957 const DebugLoc &DL = I.getDebugLoc();
1958 MachineBasicBlock &MBB = *I.getParent();
1959 const Register DstReg = I.getOperand(0).getReg();
1960 const Register SrcReg = I.getOperand(1).getReg();
1961
1962 const LLT DstTy = MRI->getType(DstReg);
1963 const LLT SrcTy = MRI->getType(SrcReg);
1964 const unsigned SrcSize = I.getOpcode() == AMDGPU::G_SEXT_INREG ?
1965 I.getOperand(2).getImm() : SrcTy.getSizeInBits();
1966 const unsigned DstSize = DstTy.getSizeInBits();
1967 if (!DstTy.isScalar())
1968 return false;
1969
1970 // Artifact casts should never use vcc.
1971 const RegisterBank *SrcBank = getArtifactRegBank(SrcReg, *MRI, TRI);
1972
1973 // FIXME: This should probably be illegal and split earlier.
1974 if (I.getOpcode() == AMDGPU::G_ANYEXT) {
1975 if (DstSize <= 32)
1976 return selectCOPY(I);
1977
1978 const TargetRegisterClass *SrcRC =
1979 TRI.getRegClassForTypeOnBank(SrcTy, *SrcBank, *MRI);
1980 const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
1981 const TargetRegisterClass *DstRC =
1982 TRI.getRegClassForSizeOnBank(DstSize, *DstBank, *MRI);
1983
1984 Register UndefReg = MRI->createVirtualRegister(SrcRC);
1985 BuildMI(MBB, I, DL, TII.get(AMDGPU::IMPLICIT_DEF), UndefReg);
1986 BuildMI(MBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
1987 .addReg(SrcReg)
1988 .addImm(AMDGPU::sub0)
1989 .addReg(UndefReg)
1990 .addImm(AMDGPU::sub1);
1991 I.eraseFromParent();
1992
1993 return RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) &&
1994 RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI);
1995 }
1996
1997 if (SrcBank->getID() == AMDGPU::VGPRRegBankID && DstSize <= 32) {
1998 // 64-bit should have been split up in RegBankSelect
1999
2000 // Try to use an and with a mask if it will save code size.
2001 unsigned Mask;
2002 if (!Signed && shouldUseAndMask(SrcSize, Mask)) {
2003 MachineInstr *ExtI =
2004 BuildMI(MBB, I, DL, TII.get(AMDGPU::V_AND_B32_e32), DstReg)
2005 .addImm(Mask)
2006 .addReg(SrcReg);
2007 I.eraseFromParent();
2008 return constrainSelectedInstRegOperands(*ExtI, TII, TRI, RBI);
2009 }
2010
2011 const unsigned BFE = Signed ? AMDGPU::V_BFE_I32_e64 : AMDGPU::V_BFE_U32_e64;
2012 MachineInstr *ExtI =
2013 BuildMI(MBB, I, DL, TII.get(BFE), DstReg)
2014 .addReg(SrcReg)
2015 .addImm(0) // Offset
2016 .addImm(SrcSize); // Width
2017 I.eraseFromParent();
2018 return constrainSelectedInstRegOperands(*ExtI, TII, TRI, RBI);
2019 }
2020
2021 if (SrcBank->getID() == AMDGPU::SGPRRegBankID && DstSize <= 64) {
2022 const TargetRegisterClass &SrcRC = InReg && DstSize > 32 ?
2023 AMDGPU::SReg_64RegClass : AMDGPU::SReg_32RegClass;
2024 if (!RBI.constrainGenericRegister(SrcReg, SrcRC, *MRI))
2025 return false;
2026
2027 if (Signed && DstSize == 32 && (SrcSize == 8 || SrcSize == 16)) {
2028 const unsigned SextOpc = SrcSize == 8 ?
2029 AMDGPU::S_SEXT_I32_I8 : AMDGPU::S_SEXT_I32_I16;
2030 BuildMI(MBB, I, DL, TII.get(SextOpc), DstReg)
2031 .addReg(SrcReg);
2032 I.eraseFromParent();
2033 return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_32RegClass, *MRI);
2034 }
2035
2036 const unsigned BFE64 = Signed ? AMDGPU::S_BFE_I64 : AMDGPU::S_BFE_U64;
2037 const unsigned BFE32 = Signed ? AMDGPU::S_BFE_I32 : AMDGPU::S_BFE_U32;
2038
2039 // Scalar BFE is encoded as S1[5:0] = offset, S1[22:16]= width.
2040 if (DstSize > 32 && (SrcSize <= 32 || InReg)) {
2041 // We need a 64-bit register source, but the high bits don't matter.
2042 Register ExtReg = MRI->createVirtualRegister(&AMDGPU::SReg_64RegClass);
2043 Register UndefReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2044 unsigned SubReg = InReg ? AMDGPU::sub0 : 0;
2045
2046 BuildMI(MBB, I, DL, TII.get(AMDGPU::IMPLICIT_DEF), UndefReg);
2047 BuildMI(MBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), ExtReg)
2048 .addReg(SrcReg, 0, SubReg)
2049 .addImm(AMDGPU::sub0)
2050 .addReg(UndefReg)
2051 .addImm(AMDGPU::sub1);
2052
2053 BuildMI(MBB, I, DL, TII.get(BFE64), DstReg)
2054 .addReg(ExtReg)
2055 .addImm(SrcSize << 16);
2056
2057 I.eraseFromParent();
2058 return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_64RegClass, *MRI);
2059 }
2060
2061 unsigned Mask;
2062 if (!Signed && shouldUseAndMask(SrcSize, Mask)) {
2063 BuildMI(MBB, I, DL, TII.get(AMDGPU::S_AND_B32), DstReg)
2064 .addReg(SrcReg)
2065 .addImm(Mask);
2066 } else {
2067 BuildMI(MBB, I, DL, TII.get(BFE32), DstReg)
2068 .addReg(SrcReg)
2069 .addImm(SrcSize << 16);
2070 }
2071
2072 I.eraseFromParent();
2073 return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_32RegClass, *MRI);
2074 }
2075
2076 return false;
2077}
2078
2079bool AMDGPUInstructionSelector::selectG_CONSTANT(MachineInstr &I) const {
2080 MachineBasicBlock *BB = I.getParent();
2081 MachineOperand &ImmOp = I.getOperand(1);
2082 Register DstReg = I.getOperand(0).getReg();
2083 unsigned Size = MRI->getType(DstReg).getSizeInBits();
2084
2085 // The AMDGPU backend only supports Imm operands and not CImm or FPImm.
2086 if (ImmOp.isFPImm()) {
2087 const APInt &Imm = ImmOp.getFPImm()->getValueAPF().bitcastToAPInt();
2088 ImmOp.ChangeToImmediate(Imm.getZExtValue());
2089 } else if (ImmOp.isCImm()) {
2090 ImmOp.ChangeToImmediate(ImmOp.getCImm()->getSExtValue());
2091 } else {
2092 llvm_unreachable("Not supported by g_constants")::llvm::llvm_unreachable_internal("Not supported by g_constants"
, "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp"
, 2092)
;
2093 }
2094
2095 const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2096 const bool IsSgpr = DstRB->getID() == AMDGPU::SGPRRegBankID;
2097
2098 unsigned Opcode;
2099 if (DstRB->getID() == AMDGPU::VCCRegBankID) {
2100 Opcode = STI.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64;
2101 } else {
2102 Opcode = IsSgpr ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
2103
2104 // We should never produce s1 values on banks other than VCC. If the user of
2105 // this already constrained the register, we may incorrectly think it's VCC
2106 // if it wasn't originally.
2107 if (Size == 1)
2108 return false;
2109 }
2110
2111 if (Size != 64) {
2112 I.setDesc(TII.get(Opcode));
2113 I.addImplicitDefUseOperands(*MF);
2114 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
2115 }
2116
2117 const DebugLoc &DL = I.getDebugLoc();
2118
2119 APInt Imm(Size, I.getOperand(1).getImm());
2120
2121 MachineInstr *ResInst;
2122 if (IsSgpr && TII.isInlineConstant(Imm)) {
2123 ResInst = BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_MOV_B64), DstReg)
2124 .addImm(I.getOperand(1).getImm());
2125 } else {
2126 const TargetRegisterClass *RC = IsSgpr ?
2127 &AMDGPU::SReg_32RegClass : &AMDGPU::VGPR_32RegClass;
2128 Register LoReg = MRI->createVirtualRegister(RC);
2129 Register HiReg = MRI->createVirtualRegister(RC);
2130
2131 BuildMI(*BB, &I, DL, TII.get(Opcode), LoReg)
2132 .addImm(Imm.trunc(32).getZExtValue());
2133
2134 BuildMI(*BB, &I, DL, TII.get(Opcode), HiReg)
2135 .addImm(Imm.ashr(32).getZExtValue());
2136
2137 ResInst = BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
2138 .addReg(LoReg)
2139 .addImm(AMDGPU::sub0)
2140 .addReg(HiReg)
2141 .addImm(AMDGPU::sub1);
2142 }
2143
2144 // We can't call constrainSelectedInstRegOperands here, because it doesn't
2145 // work for target independent opcodes
2146 I.eraseFromParent();
2147 const TargetRegisterClass *DstRC =
2148 TRI.getConstrainedRegClassForOperand(ResInst->getOperand(0), *MRI);
2149 if (!DstRC)
2150 return true;
2151 return RBI.constrainGenericRegister(DstReg, *DstRC, *MRI);
2152}
2153
2154bool AMDGPUInstructionSelector::selectG_FNEG(MachineInstr &MI) const {
2155 // Only manually handle the f64 SGPR case.
2156 //
2157 // FIXME: This is a workaround for 2.5 different tablegen problems. Because
2158 // the bit ops theoretically have a second result due to the implicit def of
2159 // SCC, the GlobalISelEmitter is overly conservative and rejects it. Fixing
2160 // that is easy by disabling the check. The result works, but uses a
2161 // nonsensical sreg32orlds_and_sreg_1 regclass.
2162 //
2163 // The DAG emitter is more problematic, and incorrectly adds both S_XOR_B32 to
2164 // the variadic REG_SEQUENCE operands.
2165
2166 Register Dst = MI.getOperand(0).getReg();
2167 const RegisterBank *DstRB = RBI.getRegBank(Dst, *MRI, TRI);
2168 if (DstRB->getID() != AMDGPU::SGPRRegBankID ||
2169 MRI->getType(Dst) != LLT::scalar(64))
2170 return false;
2171
2172 Register Src = MI.getOperand(1).getReg();
2173 MachineInstr *Fabs = getOpcodeDef(TargetOpcode::G_FABS, Src, *MRI);
2174 if (Fabs)
2175 Src = Fabs->getOperand(1).getReg();
2176
2177 if (!RBI.constrainGenericRegister(Src, AMDGPU::SReg_64RegClass, *MRI) ||
2178 !RBI.constrainGenericRegister(Dst, AMDGPU::SReg_64RegClass, *MRI))
2179 return false;
2180
2181 MachineBasicBlock *BB = MI.getParent();
2182 const DebugLoc &DL = MI.getDebugLoc();
2183 Register LoReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2184 Register HiReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2185 Register ConstReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2186 Register OpReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2187
2188 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), LoReg)
2189 .addReg(Src, 0, AMDGPU::sub0);
2190 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), HiReg)
2191 .addReg(Src, 0, AMDGPU::sub1);
2192 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), ConstReg)
2193 .addImm(0x80000000);
2194
2195 // Set or toggle sign bit.
2196 unsigned Opc = Fabs ? AMDGPU::S_OR_B32 : AMDGPU::S_XOR_B32;
2197 BuildMI(*BB, &MI, DL, TII.get(Opc), OpReg)
2198 .addReg(HiReg)
2199 .addReg(ConstReg);
2200 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::REG_SEQUENCE), Dst)
2201 .addReg(LoReg)
2202 .addImm(AMDGPU::sub0)
2203 .addReg(OpReg)
2204 .addImm(AMDGPU::sub1);
2205 MI.eraseFromParent();
2206 return true;
2207}
2208
2209// FIXME: This is a workaround for the same tablegen problems as G_FNEG
2210bool AMDGPUInstructionSelector::selectG_FABS(MachineInstr &MI) const {
2211 Register Dst = MI.getOperand(0).getReg();
2212 const RegisterBank *DstRB = RBI.getRegBank(Dst, *MRI, TRI);
2213 if (DstRB->getID() != AMDGPU::SGPRRegBankID ||
2214 MRI->getType(Dst) != LLT::scalar(64))
2215 return false;
2216
2217 Register Src = MI.getOperand(1).getReg();
2218 MachineBasicBlock *BB = MI.getParent();
2219 const DebugLoc &DL = MI.getDebugLoc();
2220 Register LoReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2221 Register HiReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2222 Register ConstReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2223 Register OpReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2224
2225 if (!RBI.constrainGenericRegister(Src, AMDGPU::SReg_64RegClass, *MRI) ||
2226 !RBI.constrainGenericRegister(Dst, AMDGPU::SReg_64RegClass, *MRI))
2227 return false;
2228
2229 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), LoReg)
2230 .addReg(Src, 0, AMDGPU::sub0);
2231 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), HiReg)
2232 .addReg(Src, 0, AMDGPU::sub1);
2233 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), ConstReg)
2234 .addImm(0x7fffffff);
2235
2236 // Clear sign bit.
2237 // TODO: Should this used S_BITSET0_*?
2238 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_AND_B32), OpReg)
2239 .addReg(HiReg)
2240 .addReg(ConstReg);
2241 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::REG_SEQUENCE), Dst)
2242 .addReg(LoReg)
2243 .addImm(AMDGPU::sub0)
2244 .addReg(OpReg)
2245 .addImm(AMDGPU::sub1);
2246
2247 MI.eraseFromParent();
2248 return true;
2249}
2250
2251static bool isConstant(const MachineInstr &MI) {
2252 return MI.getOpcode() == TargetOpcode::G_CONSTANT;
2253}
2254
2255void AMDGPUInstructionSelector::getAddrModeInfo(const MachineInstr &Load,
2256 const MachineRegisterInfo &MRI, SmallVectorImpl<GEPInfo> &AddrInfo) const {
2257
2258 const MachineInstr *PtrMI = MRI.getUniqueVRegDef(Load.getOperand(1).getReg());
2259
2260 assert(PtrMI)((PtrMI) ? static_cast<void> (0) : __assert_fail ("PtrMI"
, "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp"
, 2260, __PRETTY_FUNCTION__))
;
2261
2262 if (PtrMI->getOpcode() != TargetOpcode::G_PTR_ADD)
2263 return;
2264
2265 GEPInfo GEPInfo(*PtrMI);
2266
2267 for (unsigned i = 1; i != 3; ++i) {
2268 const MachineOperand &GEPOp = PtrMI->getOperand(i);
2269 const MachineInstr *OpDef = MRI.getUniqueVRegDef(GEPOp.getReg());
2270 assert(OpDef)((OpDef) ? static_cast<void> (0) : __assert_fail ("OpDef"
, "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp"
, 2270, __PRETTY_FUNCTION__))
;
2271 if (i == 2 && isConstant(*OpDef)) {
2272 // TODO: Could handle constant base + variable offset, but a combine
2273 // probably should have commuted it.
2274 assert(GEPInfo.Imm == 0)((GEPInfo.Imm == 0) ? static_cast<void> (0) : __assert_fail
("GEPInfo.Imm == 0", "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp"
, 2274, __PRETTY_FUNCTION__))
;
2275 GEPInfo.Imm = OpDef->getOperand(1).getCImm()->getSExtValue();
2276 continue;
2277 }
2278 const RegisterBank *OpBank = RBI.getRegBank(GEPOp.getReg(), MRI, TRI);
2279 if (OpBank->getID() == AMDGPU::SGPRRegBankID)
2280 GEPInfo.SgprParts.push_back(GEPOp.getReg());
2281 else
2282 GEPInfo.VgprParts.push_back(GEPOp.getReg());
2283 }
2284
2285 AddrInfo.push_back(GEPInfo);
2286 getAddrModeInfo(*PtrMI, MRI, AddrInfo);
2287}
2288
2289bool AMDGPUInstructionSelector::isSGPR(Register Reg) const {
2290 return RBI.getRegBank(Reg, *MRI, TRI)->getID() == AMDGPU::SGPRRegBankID;
2291}
2292
2293bool AMDGPUInstructionSelector::isInstrUniform(const MachineInstr &MI) const {
2294 if (!MI.hasOneMemOperand())
2295 return false;
2296
2297 const MachineMemOperand *MMO = *MI.memoperands_begin();
2298 const Value *Ptr = MMO->getValue();
2299
2300 // UndefValue means this is a load of a kernel input. These are uniform.
2301 // Sometimes LDS instructions have constant pointers.
2302 // If Ptr is null, then that means this mem operand contains a
2303 // PseudoSourceValue like GOT.
2304 if (!Ptr || isa<UndefValue>(Ptr) || isa<Argument>(Ptr) ||
2305 isa<Constant>(Ptr) || isa<GlobalValue>(Ptr))
2306 return true;
2307
2308 if (MMO->getAddrSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT)
2309 return true;
2310
2311 const Instruction *I = dyn_cast<Instruction>(Ptr);
2312 return I && I->getMetadata("amdgpu.uniform");
2313}
2314
2315bool AMDGPUInstructionSelector::hasVgprParts(ArrayRef<GEPInfo> AddrInfo) const {
2316 for (const GEPInfo &GEPInfo : AddrInfo) {
2317 if (!GEPInfo.VgprParts.empty())
2318 return true;
2319 }
2320 return false;
2321}
2322
2323void AMDGPUInstructionSelector::initM0(MachineInstr &I) const {
2324 const LLT PtrTy = MRI->getType(I.getOperand(1).getReg());
2325 unsigned AS = PtrTy.getAddressSpace();
2326 if ((AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::REGION_ADDRESS) &&
2327 STI.ldsRequiresM0Init()) {
2328 MachineBasicBlock *BB = I.getParent();
2329
2330 // If DS instructions require M0 initializtion, insert it before selecting.
2331 BuildMI(*BB, &I, I.getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), AMDGPU::M0)
2332 .addImm(-1);
2333 }
2334}
2335
2336bool AMDGPUInstructionSelector::selectG_LOAD_STORE_ATOMICRMW(
2337 MachineInstr &I) const {
2338 initM0(I);
2339 return selectImpl(I, *CoverageInfo);
2340}
2341
2342// TODO: No rtn optimization.
2343bool AMDGPUInstructionSelector::selectG_AMDGPU_ATOMIC_CMPXCHG(
2344 MachineInstr &MI) const {
2345 Register PtrReg = MI.getOperand(1).getReg();
2346 const LLT PtrTy = MRI->getType(PtrReg);
2347 if (PtrTy.getAddressSpace() == AMDGPUAS::FLAT_ADDRESS ||
2348 STI.useFlatForGlobal())
2349 return selectImpl(MI, *CoverageInfo);
2350
2351 Register DstReg = MI.getOperand(0).getReg();
2352 const LLT Ty = MRI->getType(DstReg);
2353 const bool Is64 = Ty.getSizeInBits() == 64;
2354 const unsigned SubReg = Is64 ? AMDGPU::sub0_sub1 : AMDGPU::sub0;
2355 Register TmpReg = MRI->createVirtualRegister(
2356 Is64 ? &AMDGPU::VReg_128RegClass : &AMDGPU::VReg_64RegClass);
2357
2358 const DebugLoc &DL = MI.getDebugLoc();
2359 MachineBasicBlock *BB = MI.getParent();
2360
2361 Register VAddr, RSrcReg, SOffset;
2362 int64_t Offset = 0;
2363
2364 unsigned Opcode;
2365 if (selectMUBUFOffsetImpl(MI.getOperand(1), RSrcReg, SOffset, Offset)) {
2366 Opcode = Is64 ? AMDGPU::BUFFER_ATOMIC_CMPSWAP_X2_OFFSET_RTN :
2367 AMDGPU::BUFFER_ATOMIC_CMPSWAP_OFFSET_RTN;
2368 } else if (selectMUBUFAddr64Impl(MI.getOperand(1), VAddr,
2369 RSrcReg, SOffset, Offset)) {
2370 Opcode = Is64 ? AMDGPU::BUFFER_ATOMIC_CMPSWAP_X2_ADDR64_RTN :
2371 AMDGPU::BUFFER_ATOMIC_CMPSWAP_ADDR64_RTN;
2372 } else
2373 return selectImpl(MI, *CoverageInfo);
2374
2375 auto MIB = BuildMI(*BB, &MI, DL, TII.get(Opcode), TmpReg)
2376 .addReg(MI.getOperand(2).getReg());
2377
2378 if (VAddr)
2379 MIB.addReg(VAddr);
2380
2381 MIB.addReg(RSrcReg);
2382 if (SOffset)
2383 MIB.addReg(SOffset);
2384 else
2385 MIB.addImm(0);
2386
2387 MIB.addImm(Offset);
2388 MIB.addImm(1); // glc
2389 MIB.addImm(0); // slc
2390 MIB.cloneMemRefs(MI);
2391
2392 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), DstReg)
2393 .addReg(TmpReg, RegState::Kill, SubReg);
2394
2395 MI.eraseFromParent();
2396
2397 MRI->setRegClass(
2398 DstReg, Is64 ? &AMDGPU::VReg_64RegClass : &AMDGPU::VGPR_32RegClass);
2399 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
2400}
2401
2402bool AMDGPUInstructionSelector::selectG_BRCOND(MachineInstr &I) const {
2403 MachineBasicBlock *BB = I.getParent();
2404 MachineOperand &CondOp = I.getOperand(0);
2405 Register CondReg = CondOp.getReg();
2406 const DebugLoc &DL = I.getDebugLoc();
2407
2408 unsigned BrOpcode;
2409 Register CondPhysReg;
2410 const TargetRegisterClass *ConstrainRC;
2411
2412 // In SelectionDAG, we inspect the IR block for uniformity metadata to decide
2413 // whether the branch is uniform when selecting the instruction. In
2414 // GlobalISel, we should push that decision into RegBankSelect. Assume for now
2415 // RegBankSelect knows what it's doing if the branch condition is scc, even
2416 // though it currently does not.
2417 if (!isVCC(CondReg, *MRI)) {
2418 if (MRI->getType(CondReg) != LLT::scalar(32))
2419 return false;
2420
2421 CondPhysReg = AMDGPU::SCC;
2422 BrOpcode = AMDGPU::S_CBRANCH_SCC1;
2423 ConstrainRC = &AMDGPU::SReg_32RegClass;
2424 } else {
2425 // FIXME: Do we have to insert an and with exec here, like in SelectionDAG?
2426 // We sort of know that a VCC producer based on the register bank, that ands
2427 // inactive lanes with 0. What if there was a logical operation with vcc
2428 // producers in different blocks/with different exec masks?
2429 // FIXME: Should scc->vcc copies and with exec?
2430 CondPhysReg = TRI.getVCC();
2431 BrOpcode = AMDGPU::S_CBRANCH_VCCNZ;
2432 ConstrainRC = TRI.getBoolRC();
2433 }
2434
2435 if (!MRI->getRegClassOrNull(CondReg))
2436 MRI->setRegClass(CondReg, ConstrainRC);
2437
2438 BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), CondPhysReg)
2439 .addReg(CondReg);
2440 BuildMI(*BB, &I, DL, TII.get(BrOpcode))
2441 .addMBB(I.getOperand(1).getMBB());
2442
2443 I.eraseFromParent();
2444 return true;
2445}
2446
2447bool AMDGPUInstructionSelector::selectG_GLOBAL_VALUE(
2448 MachineInstr &I) const {
2449 Register DstReg = I.getOperand(0).getReg();
2450 const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2451 const bool IsVGPR = DstRB->getID() == AMDGPU::VGPRRegBankID;
2452 I.setDesc(TII.get(IsVGPR ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32));
2453 if (IsVGPR)
2454 I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
2455
2456 return RBI.constrainGenericRegister(
2457 DstReg, IsVGPR ? AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass, *MRI);
2458}
2459
2460bool AMDGPUInstructionSelector::selectG_PTRMASK(MachineInstr &I) const {
2461 Register DstReg = I.getOperand(0).getReg();
2462 Register SrcReg = I.getOperand(1).getReg();
2463 Register MaskReg = I.getOperand(2).getReg();
2464 LLT Ty = MRI->getType(DstReg);
2465 LLT MaskTy = MRI->getType(MaskReg);
2466
2467 const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2468 const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI);
2469 const RegisterBank *MaskRB = RBI.getRegBank(MaskReg, *MRI, TRI);
2470 const bool IsVGPR = DstRB->getID() == AMDGPU::VGPRRegBankID;
2471 if (DstRB != SrcRB) // Should only happen for hand written MIR.
2472 return false;
2473
2474 unsigned NewOpc = IsVGPR ? AMDGPU::V_AND_B32_e64 : AMDGPU::S_AND_B32;
2475 const TargetRegisterClass &RegRC
2476 = IsVGPR ? AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass;
2477
2478 const TargetRegisterClass *DstRC = TRI.getRegClassForTypeOnBank(Ty, *DstRB,
2479 *MRI);
2480 const TargetRegisterClass *SrcRC = TRI.getRegClassForTypeOnBank(Ty, *SrcRB,
2481 *MRI);
2482 const TargetRegisterClass *MaskRC =
2483 TRI.getRegClassForTypeOnBank(MaskTy, *MaskRB, *MRI);
2484
2485 if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) ||
2486 !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) ||
2487 !RBI.constrainGenericRegister(MaskReg, *MaskRC, *MRI))
2488 return false;
2489
2490 MachineBasicBlock *BB = I.getParent();
2491 const DebugLoc &DL = I.getDebugLoc();
2492 if (Ty.getSizeInBits() == 32) {
2493 assert(MaskTy.getSizeInBits() == 32 &&((MaskTy.getSizeInBits() == 32 && "ptrmask should have been narrowed during legalize"
) ? static_cast<void> (0) : __assert_fail ("MaskTy.getSizeInBits() == 32 && \"ptrmask should have been narrowed during legalize\""
, "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp"
, 2494, __PRETTY_FUNCTION__))
2494 "ptrmask should have been narrowed during legalize")((MaskTy.getSizeInBits() == 32 && "ptrmask should have been narrowed during legalize"
) ? static_cast<void> (0) : __assert_fail ("MaskTy.getSizeInBits() == 32 && \"ptrmask should have been narrowed during legalize\""
, "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp"
, 2494, __PRETTY_FUNCTION__))
;
2495
2496 BuildMI(*BB, &I, DL, TII.get(NewOpc), DstReg)
2497 .addReg(SrcReg)
2498 .addReg(MaskReg);
2499 I.eraseFromParent();
2500 return true;
2501 }
2502
2503 Register HiReg = MRI->createVirtualRegister(&RegRC);
2504 Register LoReg = MRI->createVirtualRegister(&RegRC);
2505
2506 // Extract the subregisters from the source pointer.
2507 BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), LoReg)
2508 .addReg(SrcReg, 0, AMDGPU::sub0);
2509 BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), HiReg)
2510 .addReg(SrcReg, 0, AMDGPU::sub1);
2511
2512 Register MaskedLo, MaskedHi;
2513
2514 // Try to avoid emitting a bit operation when we only need to touch half of
2515 // the 64-bit pointer.
2516 APInt MaskOnes = KnownBits->getKnownOnes(MaskReg).zextOrSelf(64);
2517
2518 const APInt MaskHi32 = APInt::getHighBitsSet(64, 32);
2519 const APInt MaskLo32 = APInt::getLowBitsSet(64, 32);
2520 if ((MaskOnes & MaskLo32) == MaskLo32) {
2521 // If all the bits in the low half are 1, we only need a copy for it.
2522 MaskedLo = LoReg;
2523 } else {
2524 // Extract the mask subregister and apply the and.
2525 Register MaskLo = MRI->createVirtualRegister(&RegRC);
2526 MaskedLo = MRI->createVirtualRegister(&RegRC);
2527
2528 BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), MaskLo)
2529 .addReg(MaskReg, 0, AMDGPU::sub0);
2530 BuildMI(*BB, &I, DL, TII.get(NewOpc), MaskedLo)
2531 .addReg(LoReg)
2532 .addReg(MaskLo);
2533 }
2534
2535 if ((MaskOnes & MaskHi32) == MaskHi32) {
2536 // If all the bits in the high half are 1, we only need a copy for it.
2537 MaskedHi = HiReg;
2538 } else {
2539 Register MaskHi = MRI->createVirtualRegister(&RegRC);
2540 MaskedHi = MRI->createVirtualRegister(&RegRC);
2541
2542 BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), MaskHi)
2543 .addReg(MaskReg, 0, AMDGPU::sub1);
2544 BuildMI(*BB, &I, DL, TII.get(NewOpc), MaskedHi)
2545 .addReg(HiReg)
2546 .addReg(MaskHi);
2547 }
2548
2549 BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
2550 .addReg(MaskedLo)
2551 .addImm(AMDGPU::sub0)
2552 .addReg(MaskedHi)
2553 .addImm(AMDGPU::sub1);
2554 I.eraseFromParent();
2555 return true;
2556}
2557
2558/// Return the register to use for the index value, and the subregister to use
2559/// for the indirectly accessed register.
2560static std::pair<Register, unsigned>
2561computeIndirectRegIndex(MachineRegisterInfo &MRI,
2562 const SIRegisterInfo &TRI,
2563 const TargetRegisterClass *SuperRC,
2564 Register IdxReg,
2565 unsigned EltSize) {
2566 Register IdxBaseReg;
2567 int Offset;
2568
2569 std::tie(IdxBaseReg, Offset) = AMDGPU::getBaseWithConstantOffset(MRI, IdxReg);
2570 if (IdxBaseReg == AMDGPU::NoRegister) {
2571 // This will happen if the index is a known constant. This should ordinarily
2572 // be legalized out, but handle it as a register just in case.
2573 assert(Offset == 0)((Offset == 0) ? static_cast<void> (0) : __assert_fail (
"Offset == 0", "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp"
, 2573, __PRETTY_FUNCTION__))
;
2574 IdxBaseReg = IdxReg;
2575 }
2576
2577 ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(SuperRC, EltSize);
2578
2579 // Skip out of bounds offsets, or else we would end up using an undefined
2580 // register.
2581 if (static_cast<unsigned>(Offset) >= SubRegs.size())
2582 return std::make_pair(IdxReg, SubRegs[0]);
2583 return std::make_pair(IdxBaseReg, SubRegs[Offset]);
2584}
2585
2586bool AMDGPUInstructionSelector::selectG_EXTRACT_VECTOR_ELT(
2587 MachineInstr &MI) const {
2588 Register DstReg = MI.getOperand(0).getReg();
2589 Register SrcReg = MI.getOperand(1).getReg();
2590 Register IdxReg = MI.getOperand(2).getReg();
2591
2592 LLT DstTy = MRI->getType(DstReg);
2593 LLT SrcTy = MRI->getType(SrcReg);
2594
2595 const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2596 const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI);
2597 const RegisterBank *IdxRB = RBI.getRegBank(IdxReg, *MRI, TRI);
2598
2599 // The index must be scalar. If it wasn't RegBankSelect should have moved this
2600 // into a waterfall loop.
2601 if (IdxRB->getID() != AMDGPU::SGPRRegBankID)
2602 return false;
2603
2604 const TargetRegisterClass *SrcRC = TRI.getRegClassForTypeOnBank(SrcTy, *SrcRB,
2605 *MRI);
2606 const TargetRegisterClass *DstRC = TRI.getRegClassForTypeOnBank(DstTy, *DstRB,
2607 *MRI);
2608 if (!SrcRC || !DstRC)
2609 return false;
2610 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) ||
2611 !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) ||
2612 !RBI.constrainGenericRegister(IdxReg, AMDGPU::SReg_32RegClass, *MRI))
2613 return false;
2614
2615 MachineBasicBlock *BB = MI.getParent();
2616 const DebugLoc &DL = MI.getDebugLoc();
2617 const bool Is64 = DstTy.getSizeInBits() == 64;
2618
2619 unsigned SubReg;
2620 std::tie(IdxReg, SubReg) = computeIndirectRegIndex(*MRI, TRI, SrcRC, IdxReg,
2621 DstTy.getSizeInBits() / 8);
2622
2623 if (SrcRB->getID() == AMDGPU::SGPRRegBankID) {
2624 if (DstTy.getSizeInBits() != 32 && !Is64)
2625 return false;
2626
2627 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
2628 .addReg(IdxReg);
2629
2630 unsigned Opc = Is64 ? AMDGPU::S_MOVRELS_B64 : AMDGPU::S_MOVRELS_B32;
2631 BuildMI(*BB, &MI, DL, TII.get(Opc), DstReg)
2632 .addReg(SrcReg, 0, SubReg)
2633 .addReg(SrcReg, RegState::Implicit);
2634 MI.eraseFromParent();
2635 return true;
2636 }
2637
2638 if (SrcRB->getID() != AMDGPU::VGPRRegBankID || DstTy.getSizeInBits() != 32)
2639 return false;
2640
2641 if (!STI.useVGPRIndexMode()) {
2642 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
2643 .addReg(IdxReg);
2644 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::V_MOVRELS_B32_e32), DstReg)
2645 .addReg(SrcReg, 0, SubReg)
2646 .addReg(SrcReg, RegState::Implicit);
2647 MI.eraseFromParent();
2648 return true;
2649 }
2650
2651 const MCInstrDesc &GPRIDXDesc =
2652 TII.getIndirectGPRIDXPseudo(TRI.getRegSizeInBits(*SrcRC), true);
2653 BuildMI(*BB, MI, DL, GPRIDXDesc, DstReg)
2654 .addReg(SrcReg)
2655 .addReg(IdxReg)
2656 .addImm(SubReg);
2657
2658 MI.eraseFromParent();
2659 return true;
2660}
2661
2662// TODO: Fold insert_vector_elt (extract_vector_elt) into movrelsd
2663bool AMDGPUInstructionSelector::selectG_INSERT_VECTOR_ELT(
2664 MachineInstr &MI) const {
2665 Register DstReg = MI.getOperand(0).getReg();
2666 Register VecReg = MI.getOperand(1).getReg();
2667 Register ValReg = MI.getOperand(2).getReg();
2668 Register IdxReg = MI.getOperand(3).getReg();
2669
2670 LLT VecTy = MRI->getType(DstReg);
2671 LLT ValTy = MRI->getType(ValReg);
2672 unsigned VecSize = VecTy.getSizeInBits();
2673 unsigned ValSize = ValTy.getSizeInBits();
2674
2675 const RegisterBank *VecRB = RBI.getRegBank(VecReg, *MRI, TRI);
2676 const RegisterBank *ValRB = RBI.getRegBank(ValReg, *MRI, TRI);
2677 const RegisterBank *IdxRB = RBI.getRegBank(IdxReg, *MRI, TRI);
2678
2679 assert(VecTy.getElementType() == ValTy)((VecTy.getElementType() == ValTy) ? static_cast<void> (
0) : __assert_fail ("VecTy.getElementType() == ValTy", "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp"
, 2679, __PRETTY_FUNCTION__))
;
2680
2681 // The index must be scalar. If it wasn't RegBankSelect should have moved this
2682 // into a waterfall loop.
2683 if (IdxRB->getID() != AMDGPU::SGPRRegBankID)
2684 return false;
2685
2686 const TargetRegisterClass *VecRC = TRI.getRegClassForTypeOnBank(VecTy, *VecRB,
2687 *MRI);
2688 const TargetRegisterClass *ValRC = TRI.getRegClassForTypeOnBank(ValTy, *ValRB,
2689 *MRI);
2690
2691 if (!RBI.constrainGenericRegister(VecReg, *VecRC, *MRI) ||
2692 !RBI.constrainGenericRegister(DstReg, *VecRC, *MRI) ||
2693 !RBI.constrainGenericRegister(ValReg, *ValRC, *MRI) ||
2694 !RBI.constrainGenericRegister(IdxReg, AMDGPU::SReg_32RegClass, *MRI))
2695 return false;
2696
2697 if (VecRB->getID() == AMDGPU::VGPRRegBankID && ValSize != 32)
2698 return false;
2699
2700 unsigned SubReg;
2701 std::tie(IdxReg, SubReg) = computeIndirectRegIndex(*MRI, TRI, VecRC, IdxReg,
2702 ValSize / 8);
2703
2704 const bool IndexMode = VecRB->getID() == AMDGPU::VGPRRegBankID &&
2705 STI.useVGPRIndexMode();
2706
2707 MachineBasicBlock *BB = MI.getParent();
2708 const DebugLoc &DL = MI.getDebugLoc();
2709
2710 if (!IndexMode) {
2711 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
2712 .addReg(IdxReg);
2713
2714 const MCInstrDesc &RegWriteOp = TII.getIndirectRegWriteMovRelPseudo(
2715 VecSize, ValSize, VecRB->getID() == AMDGPU::SGPRRegBankID);
2716 BuildMI(*BB, MI, DL, RegWriteOp, DstReg)
2717 .addReg(VecReg)
2718 .addReg(ValReg)
2719 .addImm(SubReg);
2720 MI.eraseFromParent();
2721 return true;
2722 }
2723
2724 const MCInstrDesc &GPRIDXDesc =
2725 TII.getIndirectGPRIDXPseudo(TRI.getRegSizeInBits(*VecRC), false);
2726 BuildMI(*BB, MI, DL, GPRIDXDesc, DstReg)
2727 .addReg(VecReg)
2728 .addReg(ValReg)
2729 .addReg(IdxReg)
2730 .addImm(SubReg);
2731
2732 MI.eraseFromParent();
2733 return true;
2734}
2735
2736static bool isZeroOrUndef(int X) {
2737 return X == 0 || X == -1;
2738}
2739
2740static bool isOneOrUndef(int X) {
2741 return X == 1 || X == -1;
2742}
2743
2744static bool isZeroOrOneOrUndef(int X) {
2745 return X == 0 || X == 1 || X == -1;
2746}
2747
2748// Normalize a VOP3P shuffle mask to refer to the low/high half of a single
2749// 32-bit register.
2750static Register normalizeVOP3PMask(int NewMask[2], Register Src0, Register Src1,
2751 ArrayRef<int> Mask) {
2752 NewMask[0] = Mask[0];
2753 NewMask[1] = Mask[1];
2754 if (isZeroOrOneOrUndef(Mask[0]) && isZeroOrOneOrUndef(Mask[1]))
2755 return Src0;
2756
2757 assert(NewMask[0] == 2 || NewMask[0] == 3 || NewMask[0] == -1)((NewMask[0] == 2 || NewMask[0] == 3 || NewMask[0] == -1) ? static_cast
<void> (0) : __assert_fail ("NewMask[0] == 2 || NewMask[0] == 3 || NewMask[0] == -1"
, "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp"
, 2757, __PRETTY_FUNCTION__))
;
2758 assert(NewMask[1] == 2 || NewMask[1] == 3 || NewMask[1] == -1)((NewMask[1] == 2 || NewMask[1] == 3 || NewMask[1] == -1) ? static_cast
<void> (0) : __assert_fail ("NewMask[1] == 2 || NewMask[1] == 3 || NewMask[1] == -1"
, "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp"
, 2758, __PRETTY_FUNCTION__))
;
2759
2760 // Shift the mask inputs to be 0/1;
2761 NewMask[0] = NewMask[0] == -1 ? -1 : NewMask[0] - 2;
2762 NewMask[1] = NewMask[1] == -1 ? -1 : NewMask[1] - 2;
2763 return Src1;
2764}
2765
2766// This is only legal with VOP3P instructions as an aid to op_sel matching.
2767bool AMDGPUInstructionSelector::selectG_SHUFFLE_VECTOR(
2768 MachineInstr &MI) const {
2769 Register DstReg = MI.getOperand(0).getReg();
2770 Register Src0Reg = MI.getOperand(1).getReg();
2771 Register Src1Reg = MI.getOperand(2).getReg();
2772 ArrayRef<int> ShufMask = MI.getOperand(3).getShuffleMask();
2773
2774 const LLT V2S16 = LLT::vector(2, 16);
2775 if (MRI->getType(DstReg) != V2S16 || MRI->getType(Src0Reg) != V2S16)
2776 return false;
2777
2778 if (!AMDGPU::isLegalVOP3PShuffleMask(ShufMask))
2779 return false;
2780
2781 assert(ShufMask.size() == 2)((ShufMask.size() == 2) ? static_cast<void> (0) : __assert_fail
("ShufMask.size() == 2", "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp"
, 2781, __PRETTY_FUNCTION__))
;
2782 assert(STI.hasSDWA() && "no target has VOP3P but not SDWA")((STI.hasSDWA() && "no target has VOP3P but not SDWA"
) ? static_cast<void> (0) : __assert_fail ("STI.hasSDWA() && \"no target has VOP3P but not SDWA\""
, "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp"
, 2782, __PRETTY_FUNCTION__))
;
2783
2784 MachineBasicBlock *MBB = MI.getParent();
2785 const DebugLoc &DL = MI.getDebugLoc();
2786
2787 const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2788 const bool IsVALU = DstRB->getID() == AMDGPU::VGPRRegBankID;
2789 const TargetRegisterClass &RC = IsVALU ?
2790 AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass;
2791
2792 // Handle the degenerate case which should have folded out.
2793 if (ShufMask[0] == -1 && ShufMask[1] == -1) {
2794 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::IMPLICIT_DEF), DstReg);
2795
2796 MI.eraseFromParent();
2797 return RBI.constrainGenericRegister(DstReg, RC, *MRI);
2798 }
2799
2800 // A legal VOP3P mask only reads one of the sources.
2801 int Mask[2];
2802 Register SrcVec = normalizeVOP3PMask(Mask, Src0Reg, Src1Reg, ShufMask);
2803
2804 if (!RBI.constrainGenericRegister(DstReg, RC, *MRI) ||
2805 !RBI.constrainGenericRegister(SrcVec, RC, *MRI))
2806 return false;
2807
2808 // TODO: This also should have been folded out
2809 if (isZeroOrUndef(Mask[0]) && isOneOrUndef(Mask[1])) {
2810 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::COPY), DstReg)
2811 .addReg(SrcVec);
2812
2813 MI.eraseFromParent();
2814 return true;
2815 }
2816
2817 if (Mask[0] == 1 && Mask[1] == -1) {
2818 if (IsVALU) {
2819 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_LSHRREV_B32_e64), DstReg)
2820 .addImm(16)
2821 .addReg(SrcVec);
2822 } else {
2823 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_LSHR_B32), DstReg)
2824 .addReg(SrcVec)
2825 .addImm(16);
2826 }
2827 } else if (Mask[0] == -1 && Mask[1] == 0) {
2828 if (IsVALU) {
2829 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_LSHLREV_B32_e64), DstReg)
2830 .addImm(16)
2831 .addReg(SrcVec);
2832 } else {
2833 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_LSHL_B32), DstReg)
2834 .addReg(SrcVec)
2835 .addImm(16);
2836 }
2837 } else if (Mask[0] == 0 && Mask[1] == 0) {
2838 if (IsVALU) {
2839 // Write low half of the register into the high half.
2840 MachineInstr *MovSDWA =
2841 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_MOV_B32_sdwa), DstReg)
2842 .addImm(0) // $src0_modifiers
2843 .addReg(SrcVec) // $src0
2844 .addImm(0) // $clamp
2845 .addImm(AMDGPU::SDWA::WORD_1) // $dst_sel
2846 .addImm(AMDGPU::SDWA::UNUSED_PRESERVE) // $dst_unused
2847 .addImm(AMDGPU::SDWA::WORD_0) // $src0_sel
2848 .addReg(SrcVec, RegState::Implicit);
2849 MovSDWA->tieOperands(0, MovSDWA->getNumOperands() - 1);
2850 } else {
2851 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_PACK_LL_B32_B16), DstReg)
2852 .addReg(SrcVec)
2853 .addReg(SrcVec);
2854 }
2855 } else if (Mask[0] == 1 && Mask[1] == 1) {
2856 if (IsVALU) {
2857 // Write high half of the register into the low half.
2858 MachineInstr *MovSDWA =
2859 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_MOV_B32_sdwa), DstReg)
2860 .addImm(0) // $src0_modifiers
2861 .addReg(SrcVec) // $src0
2862 .addImm(0) // $clamp
2863 .addImm(AMDGPU::SDWA::WORD_0) // $dst_sel
2864 .addImm(AMDGPU::SDWA::UNUSED_PRESERVE) // $dst_unused
2865 .addImm(AMDGPU::SDWA::WORD_1) // $src0_sel
2866 .addReg(SrcVec, RegState::Implicit);
2867 MovSDWA->tieOperands(0, MovSDWA->getNumOperands() - 1);
2868 } else {
2869 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_PACK_HH_B32_B16), DstReg)
2870 .addReg(SrcVec)
2871 .addReg(SrcVec);
2872 }
2873 } else if (Mask[0] == 1 && Mask[1] == 0) {
2874 if (IsVALU) {
2875 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_ALIGNBIT_B32_e64), DstReg)
2876 .addReg(SrcVec)
2877 .addReg(SrcVec)
2878 .addImm(16);
2879 } else {
2880 Register TmpReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2881 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_LSHR_B32), TmpReg)
2882 .addReg(SrcVec)
2883 .addImm(16);
2884 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_PACK_LL_B32_B16), DstReg)
2885 .addReg(TmpReg)
2886 .addReg(SrcVec);
2887 }
2888 } else
2889 llvm_unreachable("all shuffle masks should be handled")::llvm::llvm_unreachable_internal("all shuffle masks should be handled"
, "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp"
, 2889)
;
2890
2891 MI.eraseFromParent();
2892 return true;
2893}
2894
2895bool AMDGPUInstructionSelector::selectAMDGPU_BUFFER_ATOMIC_FADD(
2896 MachineInstr &MI) const {
2897
2898 MachineBasicBlock *MBB = MI.getParent();
2899 const DebugLoc &DL = MI.getDebugLoc();
2900
2901 if (!MRI->use_nodbg_empty(MI.getOperand(0).getReg())) {
2902 Function &F = MBB->getParent()->getFunction();
2903 DiagnosticInfoUnsupported
2904 NoFpRet(F, "return versions of fp atomics not supported",
2905 MI.getDebugLoc(), DS_Error);
2906 F.getContext().diagnose(NoFpRet);
2907 return false;
2908 }
2909
2910 // FIXME: This is only needed because tablegen requires number of dst operands
2911 // in match and replace pattern to be the same. Otherwise patterns can be
2912 // exported from SDag path.
2913 MachineOperand &VDataIn = MI.getOperand(1);
2914 MachineOperand &VIndex = MI.getOperand(3);
2915 MachineOperand &VOffset = MI.getOperand(4);
2916 MachineOperand &SOffset = MI.getOperand(5);
2917 int16_t Offset = MI.getOperand(6).getImm();
2918
2919 bool HasVOffset = !isOperandImmEqual(VOffset, 0, *MRI);
2920 bool HasVIndex = !isOperandImmEqual(VIndex, 0, *MRI);
2921
2922 unsigned Opcode;
2923 if (HasVOffset) {
2924 Opcode = HasVIndex ? AMDGPU::BUFFER_ATOMIC_ADD_F32_BOTHEN
2925 : AMDGPU::BUFFER_ATOMIC_ADD_F32_OFFEN;
2926 } else {
2927 Opcode = HasVIndex ? AMDGPU::BUFFER_ATOMIC_ADD_F32_IDXEN
2928 : AMDGPU::BUFFER_ATOMIC_ADD_F32_OFFSET;
2929 }
2930
2931 if (MRI->getType(VDataIn.getReg()).isVector()) {
2932 switch (Opcode) {
2933 case AMDGPU::BUFFER_ATOMIC_ADD_F32_BOTHEN:
2934 Opcode = AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_BOTHEN;
2935 break;
2936 case AMDGPU::BUFFER_ATOMIC_ADD_F32_OFFEN:
2937 Opcode = AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_OFFEN;
2938 break;
2939 case AMDGPU::BUFFER_ATOMIC_ADD_F32_IDXEN:
2940 Opcode = AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_IDXEN;
2941 break;
2942 case AMDGPU::BUFFER_ATOMIC_ADD_F32_OFFSET:
2943 Opcode = AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_OFFSET;
2944 break;
2945 }
2946 }
2947
2948 auto I = BuildMI(*MBB, MI, DL, TII.get(Opcode));
2949 I.add(VDataIn);
2950
2951 if (Opcode == AMDGPU::BUFFER_ATOMIC_ADD_F32_BOTHEN ||
2952 Opcode == AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_BOTHEN) {
2953 Register IdxReg = MRI->createVirtualRegister(&AMDGPU::VReg_64RegClass);
2954 BuildMI(*MBB, &*I, DL, TII.get(AMDGPU::REG_SEQUENCE), IdxReg)
2955 .addReg(VIndex.getReg())
2956 .addImm(AMDGPU::sub0)
2957 .addReg(VOffset.getReg())
2958 .addImm(AMDGPU::sub1);
2959
2960 I.addReg(IdxReg);
2961 } else if (HasVIndex) {
2962 I.add(VIndex);
2963 } else if (HasVOffset) {
2964 I.add(VOffset);
2965 }
2966
2967 I.add(MI.getOperand(2)); // rsrc
2968 I.add(SOffset);
2969 I.addImm(Offset);
2970 renderExtractSLC(I, MI, 7);
2971 I.cloneMemRefs(MI);
2972
2973 MI.eraseFromParent();
2974
2975 return true;
2976}
2977
2978bool AMDGPUInstructionSelector::selectGlobalAtomicFaddIntrinsic(
2979 MachineInstr &MI) const{
2980
2981 MachineBasicBlock *MBB = MI.getParent();
2982 const DebugLoc &DL = MI.getDebugLoc();
2983
2984 if (!MRI->use_nodbg_empty(MI.getOperand(0).getReg())) {
2985 Function &F = MBB->getParent()->getFunction();
2986 DiagnosticInfoUnsupported
2987 NoFpRet(F, "return versions of fp atomics not supported",
2988 MI.getDebugLoc(), DS_Error);
2989 F.getContext().diagnose(NoFpRet);
2990 return false;
2991 }
2992
2993 // FIXME: This is only needed because tablegen requires number of dst operands
2994 // in match and replace pattern to be the same. Otherwise patterns can be
2995 // exported from SDag path.
2996 auto Addr = selectFlatOffsetImpl<true>(MI.getOperand(2));
2997
2998 Register Data = MI.getOperand(3).getReg();
2999 const unsigned Opc = MRI->getType(Data).isVector() ?
3000 AMDGPU::GLOBAL_ATOMIC_PK_ADD_F16 : AMDGPU::GLOBAL_ATOMIC_ADD_F32;
3001 auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc))
3002 .addReg(Addr.first)
3003 .addReg(Data)
3004 .addImm(Addr.second)
3005 .addImm(0) // SLC
3006 .cloneMemRefs(MI);
3007
3008 MI.eraseFromParent();
3009 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
3010}
3011
3012bool AMDGPUInstructionSelector::selectBVHIntrinsic(MachineInstr &MI) const{
3013 MI.setDesc(TII.get(MI.getOperand(1).getImm()));
3014 MI.RemoveOperand(1);
3015 MI.addImplicitDefUseOperands(*MI.getParent()->getParent());
3016 return true;
3017}
3018
3019bool AMDGPUInstructionSelector::select(MachineInstr &I) {
3020 if (I.isPHI())
3021 return selectPHI(I);
3022
3023 if (!I.isPreISelOpcode()) {
3024 if (I.isCopy())
3025 return selectCOPY(I);
3026 return true;
3027 }
3028
3029 switch (I.getOpcode()) {
3030 case TargetOpcode::G_AND:
3031 case TargetOpcode::G_OR:
3032 case TargetOpcode::G_XOR:
3033 if (selectImpl(I, *CoverageInfo))
3034 return true;
3035 return selectG_AND_OR_XOR(I);
3036 case TargetOpcode::G_ADD:
3037 case TargetOpcode::G_SUB:
3038 if (selectImpl(I, *CoverageInfo))
3039 return true;
3040 return selectG_ADD_SUB(I);
3041 case TargetOpcode::G_UADDO:
3042 case TargetOpcode::G_USUBO:
3043 case TargetOpcode::G_UADDE:
3044 case TargetOpcode::G_USUBE:
3045 return selectG_UADDO_USUBO_UADDE_USUBE(I);
3046 case TargetOpcode::G_INTTOPTR:
3047 case TargetOpcode::G_BITCAST:
3048 case TargetOpcode::G_PTRTOINT:
3049 return selectCOPY(I);
3050 case TargetOpcode::G_CONSTANT:
3051 case TargetOpcode::G_FCONSTANT:
3052 return selectG_CONSTANT(I);
3053 case TargetOpcode::G_FNEG:
3054 if (selectImpl(I, *CoverageInfo))
3055 return true;
3056 return selectG_FNEG(I);
3057 case TargetOpcode::G_FABS:
3058 if (selectImpl(I, *CoverageInfo))
3059 return true;
3060 return selectG_FABS(I);
3061 case TargetOpcode::G_EXTRACT:
3062 return selectG_EXTRACT(I);
3063 case TargetOpcode::G_MERGE_VALUES:
3064 case TargetOpcode::G_BUILD_VECTOR:
3065 case TargetOpcode::G_CONCAT_VECTORS:
3066 return selectG_MERGE_VALUES(I);
3067 case TargetOpcode::G_UNMERGE_VALUES:
3068 return selectG_UNMERGE_VALUES(I);
3069 case TargetOpcode::G_BUILD_VECTOR_TRUNC:
3070 return selectG_BUILD_VECTOR_TRUNC(I);
3071 case TargetOpcode::G_PTR_ADD:
3072 return selectG_PTR_ADD(I);
3073 case TargetOpcode::G_IMPLICIT_DEF:
3074 return selectG_IMPLICIT_DEF(I);
3075 case TargetOpcode::G_FREEZE:
3076 return selectCOPY(I);
3077 case TargetOpcode::G_INSERT:
3078 return selectG_INSERT(I);
3079 case TargetOpcode::G_INTRINSIC:
3080 return selectG_INTRINSIC(I);
3081 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
3082 return selectG_INTRINSIC_W_SIDE_EFFECTS(I);
3083 case TargetOpcode::G_ICMP:
3084 if (selectG_ICMP(I))
3085 return true;
3086 return selectImpl(I, *CoverageInfo);
3087 case TargetOpcode::G_LOAD:
3088 case TargetOpcode::G_STORE:
3089 case TargetOpcode::G_ATOMIC_CMPXCHG:
3090 case TargetOpcode::G_ATOMICRMW_XCHG:
3091 case TargetOpcode::G_ATOMICRMW_ADD:
3092 case TargetOpcode::G_ATOMICRMW_SUB:
3093 case TargetOpcode::G_ATOMICRMW_AND:
3094 case TargetOpcode::G_ATOMICRMW_OR:
3095 case TargetOpcode::G_ATOMICRMW_XOR:
3096 case TargetOpcode::G_ATOMICRMW_MIN:
3097 case TargetOpcode::G_ATOMICRMW_MAX:
3098 case TargetOpcode::G_ATOMICRMW_UMIN:
3099 case TargetOpcode::G_ATOMICRMW_UMAX:
3100 case TargetOpcode::G_ATOMICRMW_FADD:
3101 case AMDGPU::G_AMDGPU_ATOMIC_INC:
3102 case AMDGPU::G_AMDGPU_ATOMIC_DEC:
3103 case AMDGPU::G_AMDGPU_ATOMIC_FMIN:
3104 case AMDGPU::G_AMDGPU_ATOMIC_FMAX:
3105 return selectG_LOAD_STORE_ATOMICRMW(I);
3106 case AMDGPU::G_AMDGPU_ATOMIC_CMPXCHG:
3107 return selectG_AMDGPU_ATOMIC_CMPXCHG(I);
3108 case TargetOpcode::G_SELECT:
3109 return selectG_SELECT(I);
3110 case TargetOpcode::G_TRUNC:
3111 return selectG_TRUNC(I);
3112 case TargetOpcode::G_SEXT:
3113 case TargetOpcode::G_ZEXT:
3114 case TargetOpcode::G_ANYEXT:
3115 case TargetOpcode::G_SEXT_INREG:
3116 if (selectImpl(I, *CoverageInfo))
3117 return true;
3118 return selectG_SZA_EXT(I);
3119 case TargetOpcode::G_BRCOND:
3120 return selectG_BRCOND(I);
3121 case TargetOpcode::G_GLOBAL_VALUE:
3122 return selectG_GLOBAL_VALUE(I);
3123 case TargetOpcode::G_PTRMASK:
3124 return selectG_PTRMASK(I);
3125 case TargetOpcode::G_EXTRACT_VECTOR_ELT:
3126 return selectG_EXTRACT_VECTOR_ELT(I);
3127 case TargetOpcode::G_INSERT_VECTOR_ELT:
3128 return selectG_INSERT_VECTOR_ELT(I);
3129 case TargetOpcode::G_SHUFFLE_VECTOR:
3130 return selectG_SHUFFLE_VECTOR(I);
3131 case AMDGPU::G_AMDGPU_INTRIN_IMAGE_LOAD:
3132 case AMDGPU::G_AMDGPU_INTRIN_IMAGE_STORE: {
3133 const AMDGPU::ImageDimIntrinsicInfo *Intr
3134 = AMDGPU::getImageDimIntrinsicInfo(I.getIntrinsicID());
3135 assert(Intr && "not an image intrinsic with image pseudo")((Intr && "not an image intrinsic with image pseudo")
? static_cast<void> (0) : __assert_fail ("Intr && \"not an image intrinsic with image pseudo\""
, "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp"
, 3135, __PRETTY_FUNCTION__))
;
3136 return selectImageIntrinsic(I, Intr);
3137 }
3138 case AMDGPU::G_AMDGPU_INTRIN_BVH_INTERSECT_RAY:
3139 return selectBVHIntrinsic(I);
3140 case AMDGPU::G_AMDGPU_BUFFER_ATOMIC_FADD:
3141 return selectAMDGPU_BUFFER_ATOMIC_FADD(I);
3142 default:
3143 return selectImpl(I, *CoverageInfo);
3144 }
3145 return false;
3146}
3147
3148InstructionSelector::ComplexRendererFns
3149AMDGPUInstructionSelector::selectVCSRC(MachineOperand &Root) const {
3150 return {{
3151 [=](MachineInstrBuilder &MIB) { MIB.add(Root); }
3152 }};
3153
3154}
3155
3156std::pair<Register, unsigned>
3157AMDGPUInstructionSelector::selectVOP3ModsImpl(MachineOperand &Root,
3158 bool AllowAbs) const {
3159 Register Src = Root.getReg();
3160 Register OrigSrc = Src;
3161 unsigned Mods = 0;
3162 MachineInstr *MI = getDefIgnoringCopies(Src, *MRI);
3163
3164 if (MI && MI->getOpcode() == AMDGPU::G_FNEG) {
3165 Src = MI->getOperand(1).getReg();
3166 Mods |= SISrcMods::NEG;
3167 MI = getDefIgnoringCopies(Src, *MRI);
3168 }
3169
3170 if (AllowAbs && MI && MI->getOpcode() == AMDGPU::G_FABS) {
3171 Src = MI->getOperand(1).getReg();
3172 Mods |= SISrcMods::ABS;
3173 }
3174
3175 if (Mods != 0 &&
3176 RBI.getRegBank(Src, *MRI, TRI)->getID() != AMDGPU::VGPRRegBankID) {
3177 MachineInstr *UseMI = Root.getParent();
3178
3179 // If we looked through copies to find source modifiers on an SGPR operand,
3180 // we now have an SGPR register source. To avoid potentially violating the
3181 // constant bus restriction, we need to insert a copy to a VGPR.
3182 Register VGPRSrc = MRI->cloneVirtualRegister(OrigSrc);
3183 BuildMI(*UseMI->getParent(), UseMI, UseMI->getDebugLoc(),
3184 TII.get(AMDGPU::COPY), VGPRSrc)
3185 .addReg(Src);
3186 Src = VGPRSrc;
3187 }
3188
3189 return std::make_pair(Src, Mods);
3190}
3191
3192///
3193/// This will select either an SGPR or VGPR operand and will save us from
3194/// having to write an extra tablegen pattern.
3195InstructionSelector::ComplexRendererFns
3196AMDGPUInstructionSelector::selectVSRC0(MachineOperand &Root) const {
3197 return {{
3198 [=](MachineInstrBuilder &MIB) { MIB.add(Root); }
3199 }};
3200}
3201
3202InstructionSelector::ComplexRendererFns
3203AMDGPUInstructionSelector::selectVOP3Mods0(MachineOperand &Root) const {
3204 Register Src;
3205 unsigned Mods;
3206 std::tie(Src, Mods) = selectVOP3ModsImpl(Root);
3207
3208 return {{
3209 [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3210 [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }, // src0_mods
3211 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, // clamp
3212 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // omod
3213 }};
3214}
3215
3216InstructionSelector::ComplexRendererFns
3217AMDGPUInstructionSelector::selectVOP3BMods0(MachineOperand &Root) const {
3218 Register Src;
3219 unsigned Mods;
3220 std::tie(Src, Mods) = selectVOP3ModsImpl(Root, /* AllowAbs */ false);
3221
3222 return {{
3223 [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3224 [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }, // src0_mods
3225 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, // clamp
3226 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // omod
3227 }};
3228}
3229
3230InstructionSelector::ComplexRendererFns
3231AMDGPUInstructionSelector::selectVOP3OMods(MachineOperand &Root) const {
3232 return {{
3233 [=](MachineInstrBuilder &MIB) { MIB.add(Root); },
3234 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, // clamp
3235 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // omod
3236 }};
3237}
3238
3239InstructionSelector::ComplexRendererFns
3240AMDGPUInstructionSelector::selectVOP3Mods(MachineOperand &Root) const {
3241 Register Src;
3242 unsigned Mods;
3243 std::tie(Src, Mods) = selectVOP3ModsImpl(Root);
3244
3245 return {{
3246 [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3247 [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); } // src_mods
3248 }};
3249}
3250
3251InstructionSelector::ComplexRendererFns
3252AMDGPUInstructionSelector::selectVOP3BMods(MachineOperand &Root) const {
3253 Register Src;
3254 unsigned Mods;
3255 std::tie(Src, Mods) = selectVOP3ModsImpl(Root, /* AllowAbs */ false);
3256
3257 return {{
3258 [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3259 [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); } // src_mods
3260 }};
3261}
3262
3263InstructionSelector::ComplexRendererFns
3264AMDGPUInstructionSelector::selectVOP3NoMods(MachineOperand &Root) const {
3265 Register Reg = Root.getReg();
3266 const MachineInstr *Def = getDefIgnoringCopies(Reg, *MRI);
3267 if (Def && (Def->getOpcode() == AMDGPU::G_FNEG ||
3268 Def->getOpcode() == AMDGPU::G_FABS))
3269 return {};
3270 return {{
3271 [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); },
3272 }};
3273}
3274
3275std::pair<Register, unsigned>
3276AMDGPUInstructionSelector::selectVOP3PModsImpl(
3277 Register Src, const MachineRegisterInfo &MRI) const {
3278 unsigned Mods = 0;
3279 MachineInstr *MI = MRI.getVRegDef(Src);
3280
3281 if (MI && MI->getOpcode() == AMDGPU::G_FNEG &&
3282 // It's possible to see an f32 fneg here, but unlikely.
3283 // TODO: Treat f32 fneg as only high bit.
3284 MRI.getType(Src) == LLT::vector(2, 16)) {
3285 Mods ^= (SISrcMods::NEG | SISrcMods::NEG_HI);
3286 Src = MI->getOperand(1).getReg();
3287 MI = MRI.getVRegDef(Src);
Value stored to 'MI' is never read
3288 }
3289
3290 // TODO: Match op_sel through g_build_vector_trunc and g_shuffle_vector.
3291
3292 // Packed instructions do not have abs modifiers.
3293 Mods |= SISrcMods::OP_SEL_1;
3294
3295 return std::make_pair(Src, Mods);
3296}
3297
3298InstructionSelector::ComplexRendererFns
3299AMDGPUInstructionSelector::selectVOP3PMods(MachineOperand &Root) const {
3300 MachineRegisterInfo &MRI
3301 = Root.getParent()->getParent()->getParent()->getRegInfo();
3302
3303 Register Src;
3304 unsigned Mods;
3305 std::tie(Src, Mods) = selectVOP3PModsImpl(Root.getReg(), MRI);
3306
3307 return {{
3308 [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3309 [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); } // src_mods
3310 }};
3311}
3312
3313InstructionSelector::ComplexRendererFns
3314AMDGPUInstructionSelector::selectVOP3Mods_nnan(MachineOperand &Root) const {
3315 Register Src;
3316 unsigned Mods;
3317 std::tie(Src, Mods) = selectVOP3ModsImpl(Root);
3318 if (!isKnownNeverNaN(Src, *MRI))
3319 return None;
3320
3321 return {{
3322 [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3323 [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); } // src_mods
3324 }};
3325}
3326
3327InstructionSelector::ComplexRendererFns
3328AMDGPUInstructionSelector::selectVOP3OpSelMods(MachineOperand &Root) const {
3329 // FIXME: Handle op_sel
3330 return {{
3331 [=](MachineInstrBuilder &MIB) { MIB.addReg(Root.getReg()); },
3332 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // src_mods
3333 }};
3334}
3335
3336InstructionSelector::ComplexRendererFns
3337AMDGPUInstructionSelector::selectSmrdImm(MachineOperand &Root) const {
3338 SmallVector<GEPInfo, 4> AddrInfo;
3339 getAddrModeInfo(*Root.getParent(), *MRI, AddrInfo);
3340
3341 if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1)
3342 return None;
3343
3344 const GEPInfo &GEPInfo = AddrInfo[0];
3345 Optional<int64_t> EncodedImm =
3346 AMDGPU::getSMRDEncodedOffset(STI, GEPInfo.Imm, false);
3347 if (!EncodedImm)
3348 return None;
3349
3350 unsigned PtrReg = GEPInfo.SgprParts[0];
3351 return {{
3352 [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); },
3353 [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); }
3354 }};
3355}
3356
3357InstructionSelector::ComplexRendererFns
3358AMDGPUInstructionSelector::selectSmrdImm32(MachineOperand &Root) const {
3359 SmallVector<GEPInfo, 4> AddrInfo;
3360 getAddrModeInfo(*Root.getParent(), *MRI, AddrInfo);
3361
3362 if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1)
3363 return None;
3364
3365 const GEPInfo &GEPInfo = AddrInfo[0];
3366 Register PtrReg = GEPInfo.SgprParts[0];
3367 Optional<int64_t> EncodedImm =
3368 AMDGPU::getSMRDEncodedLiteralOffset32(STI, GEPInfo.Imm);
3369 if (!EncodedImm)
3370 return None;
3371
3372 return {{
3373 [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); },
3374 [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); }
3375 }};
3376}
3377
3378InstructionSelector::ComplexRendererFns
3379AMDGPUInstructionSelector::selectSmrdSgpr(MachineOperand &Root) const {
3380 MachineInstr *MI = Root.getParent();
3381 MachineBasicBlock *MBB = MI->getParent();
3382
3383 SmallVector<GEPInfo, 4> AddrInfo;
3384 getAddrModeInfo(*MI, *MRI, AddrInfo);
3385
3386 // FIXME: We should shrink the GEP if the offset is known to be <= 32-bits,
3387 // then we can select all ptr + 32-bit offsets not just immediate offsets.
3388 if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1)
3389 return None;
3390
3391 const GEPInfo &GEPInfo = AddrInfo[0];
3392 // SGPR offset is unsigned.
3393 if (!GEPInfo.Imm || GEPInfo.Imm < 0 || !isUInt<32>(GEPInfo.Imm))
3394 return None;
3395
3396 // If we make it this far we have a load with an 32-bit immediate offset.
3397 // It is OK to select this using a sgpr offset, because we have already
3398 // failed trying to select this load into one of the _IMM variants since
3399 // the _IMM Patterns are considered before the _SGPR patterns.
3400 Register PtrReg = GEPInfo.SgprParts[0];
3401 Register OffsetReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
3402 BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), OffsetReg)
3403 .addImm(GEPInfo.Imm);
3404 return {{
3405 [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); },
3406 [=](MachineInstrBuilder &MIB) { MIB.addReg(OffsetReg); }
3407 }};
3408}
3409
3410template <bool Signed>
3411std::pair<Register, int>
3412AMDGPUInstructionSelector::selectFlatOffsetImpl(MachineOperand &Root) const {
3413 MachineInstr *MI = Root.getParent();
3414
3415 auto Default = std::make_pair(Root.getReg(), 0);
3416
3417 if (!STI.hasFlatInstOffsets())
3418 return Default;
3419
3420 Register PtrBase;
3421 int64_t ConstOffset;
3422 std::tie(PtrBase, ConstOffset) =
3423 getPtrBaseWithConstantOffset(Root.getReg(), *MRI);
3424 if (ConstOffset == 0)
3425 return Default;
3426
3427 unsigned AddrSpace = (*MI->memoperands_begin())->getAddrSpace();
3428 if (!TII.isLegalFLATOffset(ConstOffset, AddrSpace, Signed))
3429 return Default;
3430
3431 return std::make_pair(PtrBase, ConstOffset);
3432}
3433
3434InstructionSelector::ComplexRendererFns
3435AMDGPUInstructionSelector::selectFlatOffset(MachineOperand &Root) const {
3436 auto PtrWithOffset = selectFlatOffsetImpl<false>(Root);
3437
3438 return {{
3439 [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrWithOffset.first); },
3440 [=](MachineInstrBuilder &MIB) { MIB.addImm(PtrWithOffset.second); },
3441 }};
3442}
3443
3444InstructionSelector::ComplexRendererFns
3445AMDGPUInstructionSelector::selectFlatOffsetSigned(MachineOperand &Root) const {
3446 auto PtrWithOffset = selectFlatOffsetImpl<true>(Root);
3447
3448 return {{
3449 [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrWithOffset.first); },
3450 [=](MachineInstrBuilder &MIB) { MIB.addImm(PtrWithOffset.second); },
3451 }};
3452}
3453
3454/// Match a zero extend from a 32-bit value to 64-bits.
3455static Register matchZeroExtendFromS32(MachineRegisterInfo &MRI, Register Reg) {
3456 Register ZExtSrc;
3457 if (mi_match(Reg, MRI, m_GZExt(m_Reg(ZExtSrc))))
3458 return MRI.getType(ZExtSrc) == LLT::scalar(32) ? ZExtSrc : Register();
3459
3460 // Match legalized form %zext = G_MERGE_VALUES (s32 %x), (s32 0)
3461 const MachineInstr *Def = getDefIgnoringCopies(Reg, MRI);
3462 if (Def->getOpcode() != AMDGPU::G_MERGE_VALUES)
3463 return false;
3464
3465 if (mi_match(Def->getOperand(2).getReg(), MRI, m_ZeroInt())) {
3466 return Def->getOperand(1).getReg();
3467 }
3468
3469 return Register();
3470}
3471
3472// Match (64-bit SGPR base) + (zext vgpr offset) + sext(imm offset)
3473InstructionSelector::ComplexRendererFns
3474AMDGPUInstructionSelector::selectGlobalSAddr(MachineOperand &Root) const {
3475 Register Addr = Root.getReg();
3476 Register PtrBase;
3477 int64_t ConstOffset;
3478 int64_t ImmOffset = 0;
3479
3480 // Match the immediate offset first, which canonically is moved as low as
3481 // possible.
3482 std::tie(PtrBase, ConstOffset) = getPtrBaseWithConstantOffset(Addr, *MRI);
3483
3484 if (ConstOffset != 0) {
3485 if (TII.isLegalFLATOffset(ConstOffset, AMDGPUAS::GLOBAL_ADDRESS, true)) {
3486 Addr = PtrBase;
3487 ImmOffset = ConstOffset;
3488 } else if (ConstOffset > 0) {
3489 auto PtrBaseDef = getDefSrcRegIgnoringCopies(PtrBase, *MRI);
3490 if (!PtrBaseDef)
3491 return None;
3492
3493 if (isSGPR(PtrBaseDef->Reg)) {
3494 // Offset is too large.
3495 //
3496 // saddr + large_offset -> saddr + (voffset = large_offset & ~MaxOffset)
3497 // + (large_offset & MaxOffset);
3498 int64_t SplitImmOffset, RemainderOffset;
3499 std::tie(SplitImmOffset, RemainderOffset)
3500 = TII.splitFlatOffset(ConstOffset, AMDGPUAS::GLOBAL_ADDRESS, true);
3501
3502 if (isUInt<32>(RemainderOffset)) {
3503 MachineInstr *MI = Root.getParent();
3504 MachineBasicBlock *MBB = MI->getParent();
3505 Register HighBits
3506 = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3507
3508 BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::V_MOV_B32_e32),
3509 HighBits)
3510 .addImm(RemainderOffset);
3511
3512 return {{
3513 [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrBase); }, // saddr
3514 [=](MachineInstrBuilder &MIB) { MIB.addReg(HighBits); }, // voffset
3515 [=](MachineInstrBuilder &MIB) { MIB.addImm(SplitImmOffset); },
3516 }};
3517 }
3518 }
3519 }
3520 }
3521
3522 auto AddrDef = getDefSrcRegIgnoringCopies(Addr, *MRI);
3523 if (!AddrDef)
3524 return None;
3525
3526 // Match the variable offset.
3527 if (AddrDef->MI->getOpcode() != AMDGPU::G_PTR_ADD) {
3528 // FIXME: We should probably have folded COPY (G_IMPLICIT_DEF) earlier, and
3529 // drop this.
3530 if (AddrDef->MI->getOpcode() == AMDGPU::G_IMPLICIT_DEF ||
3531 AddrDef->MI->getOpcode() == AMDGPU::G_CONSTANT)
3532 return None;
3533
3534 // It's cheaper to materialize a single 32-bit zero for vaddr than the two
3535 // moves required to copy a 64-bit SGPR to VGPR.
3536 const Register SAddr = AddrDef->Reg;
3537 if (!isSGPR(SAddr))
3538 return None;
3539
3540 MachineInstr *MI = Root.getParent();
3541 MachineBasicBlock *MBB = MI->getParent();
3542 Register VOffset = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3543
3544 BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::V_MOV_B32_e32),
3545 VOffset)
3546 .addImm(0);
3547
3548 return {{
3549 [=](MachineInstrBuilder &MIB) { MIB.addReg(SAddr); }, // saddr
3550 [=](MachineInstrBuilder &MIB) { MIB.addReg(VOffset); }, // voffset
3551 [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); } // offset
3552 }};
3553 }
3554
3555 // Look through the SGPR->VGPR copy.
3556 Register SAddr =
3557 getSrcRegIgnoringCopies(AddrDef->MI->getOperand(1).getReg(), *MRI);
3558 if (!SAddr || !isSGPR(SAddr))
3559 return None;
3560
3561 Register PtrBaseOffset = AddrDef->MI->getOperand(2).getReg();
3562
3563 // It's possible voffset is an SGPR here, but the copy to VGPR will be
3564 // inserted later.
3565 Register VOffset = matchZeroExtendFromS32(*MRI, PtrBaseOffset);
3566 if (!VOffset)
3567 return None;
3568
3569 return {{[=](MachineInstrBuilder &MIB) { // saddr
3570 MIB.addReg(SAddr);
3571 },
3572 [=](MachineInstrBuilder &MIB) { // voffset
3573 MIB.addReg(VOffset);
3574 },
3575 [=](MachineInstrBuilder &MIB) { // offset
3576 MIB.addImm(ImmOffset);
3577 }}};
3578}
3579
3580InstructionSelector::ComplexRendererFns
3581AMDGPUInstructionSelector::selectScratchSAddr(MachineOperand &Root) const {
3582 Register Addr = Root.getReg();
3583 Register PtrBase;
3584 int64_t ConstOffset;
3585 int64_t ImmOffset = 0;
3586
3587 // Match the immediate offset first, which canonically is moved as low as
3588 // possible.
3589 std::tie(PtrBase, ConstOffset) = getPtrBaseWithConstantOffset(Addr, *MRI);
3590
3591 if (ConstOffset != 0 &&
3592 TII.isLegalFLATOffset(ConstOffset, AMDGPUAS::PRIVATE_ADDRESS, true)) {
3593 Addr = PtrBase;
3594 ImmOffset = ConstOffset;
3595 }
3596
3597 auto AddrDef = getDefSrcRegIgnoringCopies(Addr, *MRI);
3598 if (!AddrDef)
3599 return None;
3600
3601 if (AddrDef->MI->getOpcode() == AMDGPU::G_FRAME_INDEX) {
3602 int FI = AddrDef->MI->getOperand(1).getIndex();
3603 return {{
3604 [=](MachineInstrBuilder &MIB) { MIB.addFrameIndex(FI); }, // saddr
3605 [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); } // offset
3606 }};
3607 }
3608
3609 Register SAddr = AddrDef->Reg;
3610
3611 if (AddrDef->MI->getOpcode() == AMDGPU::G_PTR_ADD) {
3612 Register LHS = AddrDef->MI->getOperand(1).getReg();
3613 Register RHS = AddrDef->MI->getOperand(2).getReg();
3614 auto LHSDef = getDefSrcRegIgnoringCopies(LHS, *MRI);
3615 auto RHSDef = getDefSrcRegIgnoringCopies(RHS, *MRI);
3616
3617 if (LHSDef && RHSDef &&
3618 LHSDef->MI->getOpcode() == AMDGPU::G_FRAME_INDEX &&
3619 isSGPR(RHSDef->Reg)) {
3620 int FI = LHSDef->MI->getOperand(1).getIndex();
3621 MachineInstr &I = *Root.getParent();
3622 MachineBasicBlock *BB = I.getParent();
3623 const DebugLoc &DL = I.getDebugLoc();
3624 SAddr = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
3625
3626 BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADD_U32), SAddr)
3627 .addFrameIndex(FI)
3628 .addReg(RHSDef->Reg);
3629 }
3630 }
3631
3632 if (!isSGPR(SAddr))
3633 return None;
3634
3635 return {{
3636 [=](MachineInstrBuilder &MIB) { MIB.addReg(SAddr); }, // saddr
3637 [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); } // offset
3638 }};
3639}
3640
3641static bool isStackPtrRelative(const MachinePointerInfo &PtrInfo) {
3642 auto PSV = PtrInfo.V.dyn_cast<const PseudoSourceValue *>();
3643 return PSV && PSV->isStack();
3644}
3645
3646InstructionSelector::ComplexRendererFns
3647AMDGPUInstructionSelector::selectMUBUFScratchOffen(MachineOperand &Root) const {
3648 MachineInstr *MI = Root.getParent();
3649 MachineBasicBlock *MBB = MI->getParent();
3650 MachineFunction *MF = MBB->getParent();
3651 const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
3652
3653 int64_t Offset = 0;
3654 if (mi_match(Root.getReg(), *MRI, m_ICst(Offset)) &&
3655 Offset != TM.getNullPointerValue(AMDGPUAS::PRIVATE_ADDRESS)) {
3656 Register HighBits = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3657
3658 // TODO: Should this be inside the render function? The iterator seems to
3659 // move.
3660 BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::V_MOV_B32_e32),
3661 HighBits)
3662 .addImm(Offset & ~4095);
3663
3664 return {{[=](MachineInstrBuilder &MIB) { // rsrc
3665 MIB.addReg(Info->getScratchRSrcReg());
3666 },
3667 [=](MachineInstrBuilder &MIB) { // vaddr
3668 MIB.addReg(HighBits);
3669 },
3670 [=](MachineInstrBuilder &MIB) { // soffset
3671 const MachineMemOperand *MMO = *MI->memoperands_begin();
3672 const MachinePointerInfo &PtrInfo = MMO->getPointerInfo();
3673
3674 if (isStackPtrRelative(PtrInfo))
3675 MIB.addReg(Info->getStackPtrOffsetReg());
3676 else
3677 MIB.addImm(0);
3678 },
3679 [=](MachineInstrBuilder &MIB) { // offset
3680 MIB.addImm(Offset & 4095);
3681 }}};
3682 }
3683
3684 assert(Offset == 0 || Offset == -1)((Offset == 0 || Offset == -1) ? static_cast<void> (0) :
__assert_fail ("Offset == 0 || Offset == -1", "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp"
, 3684, __PRETTY_FUNCTION__))
;
3685
3686 // Try to fold a frame index directly into the MUBUF vaddr field, and any
3687 // offsets.
3688 Optional<int> FI;
3689 Register VAddr = Root.getReg();
3690 if (const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg())) {
3691 if (isBaseWithConstantOffset(Root, *MRI)) {
3692 const MachineOperand &LHS = RootDef->getOperand(1);
3693 const MachineOperand &RHS = RootDef->getOperand(2);
3694 const MachineInstr *LHSDef = MRI->getVRegDef(LHS.getReg());
3695 const MachineInstr *RHSDef = MRI->getVRegDef(RHS.getReg());
3696 if (LHSDef && RHSDef) {
3697 int64_t PossibleOffset =
3698 RHSDef->getOperand(1).getCImm()->getSExtValue();
3699 if (SIInstrInfo::isLegalMUBUFImmOffset(PossibleOffset) &&
3700 (!STI.privateMemoryResourceIsRangeChecked() ||
3701 KnownBits->signBitIsZero(LHS.getReg()))) {
3702 if (LHSDef->getOpcode() == AMDGPU::G_FRAME_INDEX)
3703 FI = LHSDef->getOperand(1).getIndex();
3704 else
3705 VAddr = LHS.getReg();
3706 Offset = PossibleOffset;
3707 }
3708 }
3709 } else if (RootDef->getOpcode() == AMDGPU::G_FRAME_INDEX) {
3710 FI = RootDef->getOperand(1).getIndex();
3711 }
3712 }
3713
3714 return {{[=](MachineInstrBuilder &MIB) { // rsrc
3715 MIB.addReg(Info->getScratchRSrcReg());
3716 },
3717 [=](MachineInstrBuilder &MIB) { // vaddr
3718 if (FI.hasValue())
3719 MIB.addFrameIndex(FI.getValue());
3720 else
3721 MIB.addReg(VAddr);
3722 },
3723 [=](MachineInstrBuilder &MIB) { // soffset
3724 // If we don't know this private access is a local stack object, it
3725 // needs to be relative to the entry point's scratch wave offset.
3726 // TODO: Should split large offsets that don't fit like above.
3727 // TODO: Don't use scratch wave offset just because the offset
3728 // didn't fit.
3729 if (!Info->isEntryFunction() && FI.hasValue())
3730 MIB.addReg(Info->getStackPtrOffsetReg());
3731 else
3732 MIB.addImm(0);
3733 },
3734 [=](MachineInstrBuilder &MIB) { // offset
3735 MIB.addImm(Offset);
3736 }}};
3737}
3738
3739bool AMDGPUInstructionSelector::isDSOffsetLegal(Register Base,
3740 int64_t Offset) const {
3741 if (!isUInt<16>(Offset))
3742 return false;
3743
3744 if (STI.hasUsableDSOffset() || STI.unsafeDSOffsetFoldingEnabled())
3745 return true;
3746
3747 // On Southern Islands instruction with a negative base value and an offset
3748 // don't seem to work.
3749 return KnownBits->signBitIsZero(Base);
3750}
3751
3752bool AMDGPUInstructionSelector::isDSOffset2Legal(Register Base, int64_t Offset0,
3753 int64_t Offset1,
3754 unsigned Size) const {
3755 if (Offset0 % Size != 0 || Offset1 % Size != 0)
3756 return false;
3757 if (!isUInt<8>(Offset0 / Size) || !isUInt<8>(Offset1 / Size))
3758 return false;
3759
3760 if (STI.hasUsableDSOffset() || STI.unsafeDSOffsetFoldingEnabled())
3761 return true;
3762
3763 // On Southern Islands instruction with a negative base value and an offset
3764 // don't seem to work.
3765 return KnownBits->signBitIsZero(Base);
3766}
3767
3768InstructionSelector::ComplexRendererFns
3769AMDGPUInstructionSelector::selectMUBUFScratchOffset(
3770 MachineOperand &Root) const {
3771 MachineInstr *MI = Root.getParent();
3772 MachineBasicBlock *MBB = MI->getParent();
3773
3774 int64_t Offset = 0;
3775 if (!mi_match(Root.getReg(), *MRI, m_ICst(Offset)) ||
3776 !SIInstrInfo::isLegalMUBUFImmOffset(Offset))
3777 return {};
3778
3779 const MachineFunction *MF = MBB->getParent();
3780 const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
3781 const MachineMemOperand *MMO = *MI->memoperands_begin();
3782 const MachinePointerInfo &PtrInfo = MMO->getPointerInfo();
3783
3784 return {{
3785 [=](MachineInstrBuilder &MIB) { // rsrc
3786 MIB.addReg(Info->getScratchRSrcReg());
3787 },
3788 [=](MachineInstrBuilder &MIB) { // soffset
3789 if (isStackPtrRelative(PtrInfo))
3790 MIB.addReg(Info->getStackPtrOffsetReg());
3791 else
3792 MIB.addImm(0);
3793 },
3794 [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); } // offset
3795 }};
3796}
3797
3798std::pair<Register, unsigned>
3799AMDGPUInstructionSelector::selectDS1Addr1OffsetImpl(MachineOperand &Root) const {
3800 const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg());
3801 if (!RootDef)
3802 return std::make_pair(Root.getReg(), 0);
3803
3804 int64_t ConstAddr = 0;
3805
3806 Register PtrBase;
3807 int64_t Offset;
3808 std::tie(PtrBase, Offset) =
3809 getPtrBaseWithConstantOffset(Root.getReg(), *MRI);
3810
3811 if (Offset) {
3812 if (isDSOffsetLegal(PtrBase, Offset)) {
3813 // (add n0, c0)
3814 return std::make_pair(PtrBase, Offset);
3815 }
3816 } else if (RootDef->getOpcode() == AMDGPU::G_SUB) {
3817 // TODO
3818
3819
3820 } else if (mi_match(Root.getReg(), *MRI, m_ICst(ConstAddr))) {
3821 // TODO
3822
3823 }
3824
3825 return std::make_pair(Root.getReg(), 0);
3826}
3827
3828InstructionSelector::ComplexRendererFns
3829AMDGPUInstructionSelector::selectDS1Addr1Offset(MachineOperand &Root) const {
3830 Register Reg;
3831 unsigned Offset;
3832 std::tie(Reg, Offset) = selectDS1Addr1OffsetImpl(Root);
3833 return {{
3834 [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); },
3835 [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }
3836 }};
3837}
3838
3839InstructionSelector::ComplexRendererFns
3840AMDGPUInstructionSelector::selectDS64Bit4ByteAligned(MachineOperand &Root) const {
3841 return selectDSReadWrite2(Root, 4);
3842}
3843
3844InstructionSelector::ComplexRendererFns
3845AMDGPUInstructionSelector::selectDS128Bit8ByteAligned(MachineOperand &Root) const {
3846 return selectDSReadWrite2(Root, 8);
3847}
3848
3849InstructionSelector::ComplexRendererFns
3850AMDGPUInstructionSelector::selectDSReadWrite2(MachineOperand &Root,
3851 unsigned Size) const {
3852 Register Reg;
3853 unsigned Offset;
3854 std::tie(Reg, Offset) = selectDSReadWrite2Impl(Root, Size);
3855 return {{
3856 [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); },
3857 [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); },
3858 [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset+1); }
3859 }};
3860}
3861
3862std::pair<Register, unsigned>
3863AMDGPUInstructionSelector::selectDSReadWrite2Impl(MachineOperand &Root,
3864 unsigned Size) const {
3865 const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg());
3866 if (!RootDef)
3867 return std::make_pair(Root.getReg(), 0);
3868
3869 int64_t ConstAddr = 0;
3870
3871 Register PtrBase;
3872 int64_t Offset;
3873 std::tie(PtrBase, Offset) =
3874 getPtrBaseWithConstantOffset(Root.getReg(), *MRI);
3875
3876 if (Offset) {
3877 int64_t OffsetValue0 = Offset;
3878 int64_t OffsetValue1 = Offset + Size;
3879 if (isDSOffset2Legal(PtrBase, OffsetValue0, OffsetValue1, Size)) {
3880 // (add n0, c0)
3881 return std::make_pair(PtrBase, OffsetValue0 / Size);
3882 }
3883 } else if (RootDef->getOpcode() == AMDGPU::G_SUB) {
3884 // TODO
3885
3886 } else if (mi_match(Root.getReg(), *MRI, m_ICst(ConstAddr))) {
3887 // TODO
3888
3889 }
3890
3891 return std::make_pair(Root.getReg(), 0);
3892}
3893
3894/// If \p Root is a G_PTR_ADD with a G_CONSTANT on the right hand side, return
3895/// the base value with the constant offset. There may be intervening copies
3896/// between \p Root and the identified constant. Returns \p Root, 0 if this does
3897/// not match the pattern.
3898std::pair<Register, int64_t>
3899AMDGPUInstructionSelector::getPtrBaseWithConstantOffset(
3900 Register Root, const MachineRegisterInfo &MRI) const {
3901 MachineInstr *RootI = getDefIgnoringCopies(Root, MRI);
3902 if (RootI->getOpcode() != TargetOpcode::G_PTR_ADD)
3903 return {Root, 0};
3904
3905 MachineOperand &RHS = RootI->getOperand(2);
3906 Optional<ValueAndVReg> MaybeOffset
3907 = getConstantVRegValWithLookThrough(RHS.getReg(), MRI, true);
3908 if (!MaybeOffset)
3909 return {Root, 0};
3910 return {RootI->getOperand(1).getReg(), MaybeOffset->Value.getSExtValue()};
3911}
3912
3913static void addZeroImm(MachineInstrBuilder &MIB) {
3914 MIB.addImm(0);
3915}
3916
3917/// Return a resource descriptor for use with an arbitrary 64-bit pointer. If \p
3918/// BasePtr is not valid, a null base pointer will be used.
3919static Register buildRSRC(MachineIRBuilder &B, MachineRegisterInfo &MRI,
3920 uint32_t FormatLo, uint32_t FormatHi,
3921 Register BasePtr) {
3922 Register RSrc2 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
3923 Register RSrc3 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
3924 Register RSrcHi = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
3925 Register RSrc = MRI.createVirtualRegister(&AMDGPU::SGPR_128RegClass);
3926
3927 B.buildInstr(AMDGPU::S_MOV_B32)
3928 .addDef(RSrc2)
3929 .addImm(FormatLo);
3930 B.buildInstr(AMDGPU::S_MOV_B32)
3931 .addDef(RSrc3)
3932 .addImm(FormatHi);
3933
3934 // Build the half of the subregister with the constants before building the
3935 // full 128-bit register. If we are building multiple resource descriptors,
3936 // this will allow CSEing of the 2-component register.
3937 B.buildInstr(AMDGPU::REG_SEQUENCE)
3938 .addDef(RSrcHi)
3939 .addReg(RSrc2)
3940 .addImm(AMDGPU::sub0)
3941 .addReg(RSrc3)
3942 .addImm(AMDGPU::sub1);
3943
3944 Register RSrcLo = BasePtr;
3945 if (!BasePtr) {
3946 RSrcLo = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
3947 B.buildInstr(AMDGPU::S_MOV_B64)
3948 .addDef(RSrcLo)
3949 .addImm(0);
3950 }
3951
3952 B.buildInstr(AMDGPU::REG_SEQUENCE)
3953 .addDef(RSrc)
3954 .addReg(RSrcLo)
3955 .addImm(AMDGPU::sub0_sub1)
3956 .addReg(RSrcHi)
3957 .addImm(AMDGPU::sub2_sub3);
3958
3959 return RSrc;
3960}
3961
3962static Register buildAddr64RSrc(MachineIRBuilder &B, MachineRegisterInfo &MRI,
3963 const SIInstrInfo &TII, Register BasePtr) {
3964 uint64_t DefaultFormat = TII.getDefaultRsrcDataFormat();
3965
3966 // FIXME: Why are half the "default" bits ignored based on the addressing
3967 // mode?
3968 return buildRSRC(B, MRI, 0, Hi_32(DefaultFormat), BasePtr);
3969}
3970
3971static Register buildOffsetSrc(MachineIRBuilder &B, MachineRegisterInfo &MRI,
3972 const SIInstrInfo &TII, Register BasePtr) {
3973 uint64_t DefaultFormat = TII.getDefaultRsrcDataFormat();
3974
3975 // FIXME: Why are half the "default" bits ignored based on the addressing
3976 // mode?
3977 return buildRSRC(B, MRI, -1, Hi_32(DefaultFormat), BasePtr);
3978}
3979
3980AMDGPUInstructionSelector::MUBUFAddressData
3981AMDGPUInstructionSelector::parseMUBUFAddress(Register Src) const {
3982 MUBUFAddressData Data;
3983 Data.N0 = Src;
3984
3985 Register PtrBase;
3986 int64_t Offset;
3987
3988 std::tie(PtrBase, Offset) = getPtrBaseWithConstantOffset(Src, *MRI);
3989 if (isUInt<32>(Offset)) {
3990 Data.N0 = PtrBase;
3991 Data.Offset = Offset;
3992 }
3993
3994 if (MachineInstr *InputAdd
3995 = getOpcodeDef(TargetOpcode::G_PTR_ADD, Data.N0, *MRI)) {
3996 Data.N2 = InputAdd->getOperand(1).getReg();
3997 Data.N3 = InputAdd->getOperand(2).getReg();
3998
3999 // FIXME: Need to fix extra SGPR->VGPRcopies inserted
4000 // FIXME: Don't know this was defined by operand 0
4001 //
4002 // TODO: Remove this when we have copy folding optimizations after
4003 // RegBankSelect.
4004 Data.N2 = getDefIgnoringCopies(Data.N2, *MRI)->getOperand(0).getReg();
4005 Data.N3 = getDefIgnoringCopies(Data.N3, *MRI)->getOperand(0).getReg();
4006 }
4007
4008 return Data;
4009}
4010
4011/// Return if the addr64 mubuf mode should be used for the given address.
4012bool AMDGPUInstructionSelector::shouldUseAddr64(MUBUFAddressData Addr) const {
4013 // (ptr_add N2, N3) -> addr64, or
4014 // (ptr_add (ptr_add N2, N3), C1) -> addr64
4015 if (Addr.N2)
4016 return true;
4017
4018 const RegisterBank *N0Bank = RBI.getRegBank(Addr.N0, *MRI, TRI);
4019 return N0Bank->getID() == AMDGPU::VGPRRegBankID;
4020}
4021
4022/// Split an immediate offset \p ImmOffset depending on whether it fits in the
4023/// immediate field. Modifies \p ImmOffset and sets \p SOffset to the variable
4024/// component.
4025void AMDGPUInstructionSelector::splitIllegalMUBUFOffset(
4026 MachineIRBuilder &B, Register &SOffset, int64_t &ImmOffset) const {
4027 if (SIInstrInfo::isLegalMUBUFImmOffset(ImmOffset))
4028 return;
4029
4030 // Illegal offset, store it in soffset.
4031 SOffset = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
4032 B.buildInstr(AMDGPU::S_MOV_B32)
4033 .addDef(SOffset)
4034 .addImm(ImmOffset);
4035 ImmOffset = 0;
4036}
4037
4038bool AMDGPUInstructionSelector::selectMUBUFAddr64Impl(
4039 MachineOperand &Root, Register &VAddr, Register &RSrcReg,
4040 Register &SOffset, int64_t &Offset) const {
4041 // FIXME: Predicates should stop this from reaching here.
4042 // addr64 bit was removed for volcanic islands.
4043 if (!STI.hasAddr64() || STI.useFlatForGlobal())
4044 return false;
4045
4046 MUBUFAddressData AddrData = parseMUBUFAddress(Root.getReg());
4047 if (!shouldUseAddr64(AddrData))
4048 return false;
4049
4050 Register N0 = AddrData.N0;
4051 Register N2 = AddrData.N2;
4052 Register N3 = AddrData.N3;
4053 Offset = AddrData.Offset;
4054
4055 // Base pointer for the SRD.
4056 Register SRDPtr;
4057
4058 if (N2) {
4059 if (RBI.getRegBank(N2, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) {
4060 assert(N3)((N3) ? static_cast<void> (0) : __assert_fail ("N3", "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp"
, 4060, __PRETTY_FUNCTION__))
;
4061 if (RBI.getRegBank(N3, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) {
4062 // Both N2 and N3 are divergent. Use N0 (the result of the add) as the
4063 // addr64, and construct the default resource from a 0 address.
4064 VAddr = N0;
4065 } else {
4066 SRDPtr = N3;
4067 VAddr = N2;
4068 }
4069 } else {
4070 // N2 is not divergent.
4071 SRDPtr = N2;
4072 VAddr = N3;
4073 }
4074 } else if (RBI.getRegBank(N0, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) {
4075 // Use the default null pointer in the resource
4076 VAddr = N0;
4077 } else {
4078 // N0 -> offset, or
4079 // (N0 + C1) -> offset
4080 SRDPtr = N0;
4081 }
4082
4083 MachineIRBuilder B(*Root.getParent());
4084 RSrcReg = buildAddr64RSrc(B, *MRI, TII, SRDPtr);
4085 splitIllegalMUBUFOffset(B, SOffset, Offset);
4086 return true;
4087}
4088
4089bool AMDGPUInstructionSelector::selectMUBUFOffsetImpl(
4090 MachineOperand &Root, Register &RSrcReg, Register &SOffset,
4091 int64_t &Offset) const {
4092
4093 // FIXME: Pattern should not reach here.
4094 if (STI.useFlatForGlobal())
4095 return false;
4096
4097 MUBUFAddressData AddrData = parseMUBUFAddress(Root.getReg());
4098 if (shouldUseAddr64(AddrData))
4099 return false;
4100
4101 // N0 -> offset, or
4102 // (N0 + C1) -> offset
4103 Register SRDPtr = AddrData.N0;
4104 Offset = AddrData.Offset;
4105
4106 // TODO: Look through extensions for 32-bit soffset.
4107 MachineIRBuilder B(*Root.getParent());
4108
4109 RSrcReg = buildOffsetSrc(B, *MRI, TII, SRDPtr);
4110 splitIllegalMUBUFOffset(B, SOffset, Offset);
4111 return true;
4112}
4113
4114InstructionSelector::ComplexRendererFns
4115AMDGPUInstructionSelector::selectMUBUFAddr64(MachineOperand &Root) const {
4116 Register VAddr;
4117 Register RSrcReg;
4118 Register SOffset;
4119 int64_t Offset = 0;
4120
4121 if (!selectMUBUFAddr64Impl(Root, VAddr, RSrcReg, SOffset, Offset))
4122 return {};
4123
4124 // FIXME: Use defaulted operands for trailing 0s and remove from the complex
4125 // pattern.
4126 return {{
4127 [=](MachineInstrBuilder &MIB) { // rsrc
4128 MIB.addReg(RSrcReg);
4129 },
4130 [=](MachineInstrBuilder &MIB) { // vaddr
4131 MIB.addReg(VAddr);
4132 },
4133 [=](MachineInstrBuilder &MIB) { // soffset
4134 if (SOffset)
4135 MIB.addReg(SOffset);
4136 else
4137 MIB.addImm(0);
4138 },
4139 [=](MachineInstrBuilder &MIB) { // offset
4140 MIB.addImm(Offset);
4141 },
4142 addZeroImm, // glc
4143 addZeroImm, // slc
4144 addZeroImm, // tfe
4145 addZeroImm, // dlc
4146 addZeroImm // swz
4147 }};
4148}
4149
4150InstructionSelector::ComplexRendererFns
4151AMDGPUInstructionSelector::selectMUBUFOffset(MachineOperand &Root) const {
4152 Register RSrcReg;
4153 Register SOffset;
4154 int64_t Offset = 0;
4155
4156 if (!selectMUBUFOffsetImpl(Root, RSrcReg, SOffset, Offset))
4157 return {};
4158
4159 return {{
4160 [=](MachineInstrBuilder &MIB) { // rsrc
4161 MIB.addReg(RSrcReg);
4162 },
4163 [=](MachineInstrBuilder &MIB) { // soffset
4164 if (SOffset)
4165 MIB.addReg(SOffset);
4166 else
4167 MIB.addImm(0);
4168 },
4169 [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }, // offset
4170 addZeroImm, // glc
4171 addZeroImm, // slc
4172 addZeroImm, // tfe
4173 addZeroImm, // dlc
4174 addZeroImm // swz
4175 }};
4176}
4177
4178InstructionSelector::ComplexRendererFns
4179AMDGPUInstructionSelector::selectMUBUFAddr64Atomic(MachineOperand &Root) const {
4180 Register VAddr;
4181 Register RSrcReg;
4182 Register SOffset;
4183 int64_t Offset = 0;
4184
4185 if (!selectMUBUFAddr64Impl(Root, VAddr, RSrcReg, SOffset, Offset))
4186 return {};
4187
4188 // FIXME: Use defaulted operands for trailing 0s and remove from the complex
4189 // pattern.
4190 return {{
4191 [=](MachineInstrBuilder &MIB) { // rsrc
4192 MIB.addReg(RSrcReg);
4193 },
4194 [=](MachineInstrBuilder &MIB) { // vaddr
4195 MIB.addReg(VAddr);
4196 },
4197 [=](MachineInstrBuilder &MIB) { // soffset
4198 if (SOffset)
4199 MIB.addReg(SOffset);
4200 else
4201 MIB.addImm(0);
4202 },
4203 [=](MachineInstrBuilder &MIB) { // offset
4204 MIB.addImm(Offset);
4205 },
4206 addZeroImm // slc
4207 }};
4208}
4209
4210InstructionSelector::ComplexRendererFns
4211AMDGPUInstructionSelector::selectMUBUFOffsetAtomic(MachineOperand &Root) const {
4212 Register RSrcReg;
4213 Register SOffset;
4214 int64_t Offset = 0;
4215
4216 if (!selectMUBUFOffsetImpl(Root, RSrcReg, SOffset, Offset))
4217 return {};
4218
4219 return {{
4220 [=](MachineInstrBuilder &MIB) { // rsrc
4221 MIB.addReg(RSrcReg);
4222 },
4223 [=](MachineInstrBuilder &MIB) { // soffset
4224 if (SOffset)
4225 MIB.addReg(SOffset);
4226 else
4227 MIB.addImm(0);
4228 },
4229 [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }, // offset
4230 addZeroImm // slc
4231 }};
4232}
4233
4234/// Get an immediate that must be 32-bits, and treated as zero extended.
4235static Optional<uint64_t> getConstantZext32Val(Register Reg,
4236 const MachineRegisterInfo &MRI) {
4237 // getConstantVRegVal sexts any values, so see if that matters.
4238 Optional<int64_t> OffsetVal = getConstantVRegSExtVal(Reg, MRI);
4239 if (!OffsetVal || !isInt<32>(*OffsetVal))
4240 return None;
4241 return Lo_32(*OffsetVal);
4242}
4243
4244InstructionSelector::ComplexRendererFns
4245AMDGPUInstructionSelector::selectSMRDBufferImm(MachineOperand &Root) const {
4246 Optional<uint64_t> OffsetVal = getConstantZext32Val(Root.getReg(), *MRI);
4247 if (!OffsetVal)
4248 return {};
4249
4250 Optional<int64_t> EncodedImm =
4251 AMDGPU::getSMRDEncodedOffset(STI, *OffsetVal, true);
4252 if (!EncodedImm)
4253 return {};
4254
4255 return {{ [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); } }};
4256}
4257
4258InstructionSelector::ComplexRendererFns
4259AMDGPUInstructionSelector::selectSMRDBufferImm32(MachineOperand &Root) const {
4260 assert(STI.getGeneration() == AMDGPUSubtarget::SEA_ISLANDS)((STI.getGeneration() == AMDGPUSubtarget::SEA_ISLANDS) ? static_cast
<void> (0) : __assert_fail ("STI.getGeneration() == AMDGPUSubtarget::SEA_ISLANDS"
, "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp"
, 4260, __PRETTY_FUNCTION__))
;
4261
4262 Optional<uint64_t> OffsetVal = getConstantZext32Val(Root.getReg(), *MRI);
4263 if (!OffsetVal)
4264 return {};
4265
4266 Optional<int64_t> EncodedImm
4267 = AMDGPU::getSMRDEncodedLiteralOffset32(STI, *OffsetVal);
4268 if (!EncodedImm)
4269 return {};
4270
4271 return {{ [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); } }};
4272}
4273
4274void AMDGPUInstructionSelector::renderTruncImm32(MachineInstrBuilder &MIB,
4275 const MachineInstr &MI,
4276 int OpIdx) const {
4277 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&((MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx
== -1 && "Expected G_CONSTANT") ? static_cast<void
> (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 && \"Expected G_CONSTANT\""
, "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp"
, 4278, __PRETTY_FUNCTION__))
4278 "Expected G_CONSTANT")((MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx
== -1 && "Expected G_CONSTANT") ? static_cast<void
> (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 && \"Expected G_CONSTANT\""
, "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp"
, 4278, __PRETTY_FUNCTION__))
;
4279 MIB.addImm(MI.getOperand(1).getCImm()->getSExtValue());
4280}
4281
4282void AMDGPUInstructionSelector::renderNegateImm(MachineInstrBuilder &MIB,
4283 const MachineInstr &MI,
4284 int OpIdx) const {
4285 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&((MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx
== -1 && "Expected G_CONSTANT") ? static_cast<void
> (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 && \"Expected G_CONSTANT\""
, "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp"
, 4286, __PRETTY_FUNCTION__))
4286 "Expected G_CONSTANT")((MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx
== -1 && "Expected G_CONSTANT") ? static_cast<void
> (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 && \"Expected G_CONSTANT\""
, "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp"
, 4286, __PRETTY_FUNCTION__))
;
4287 MIB.addImm(-MI.getOperand(1).getCImm()->getSExtValue());
4288}
4289
4290void AMDGPUInstructionSelector::renderBitcastImm(MachineInstrBuilder &MIB,
4291 const MachineInstr &MI,
4292 int OpIdx) const {
4293 assert(OpIdx == -1)((OpIdx == -1) ? static_cast<void> (0) : __assert_fail (
"OpIdx == -1", "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp"
, 4293, __PRETTY_FUNCTION__))
;
4294
4295 const MachineOperand &Op = MI.getOperand(1);
4296 if (MI.getOpcode() == TargetOpcode::G_FCONSTANT)
4297 MIB.addImm(Op.getFPImm()->getValueAPF().bitcastToAPInt().getZExtValue());
4298 else {
4299 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && "Expected G_CONSTANT")((MI.getOpcode() == TargetOpcode::G_CONSTANT && "Expected G_CONSTANT"
) ? static_cast<void> (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_CONSTANT && \"Expected G_CONSTANT\""
, "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp"
, 4299, __PRETTY_FUNCTION__))
;
4300 MIB.addImm(Op.getCImm()->getSExtValue());
4301 }
4302}
4303
4304void AMDGPUInstructionSelector::renderPopcntImm(MachineInstrBuilder &MIB,
4305 const MachineInstr &MI,
4306 int OpIdx) const {
4307 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&((MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx
== -1 && "Expected G_CONSTANT") ? static_cast<void
> (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 && \"Expected G_CONSTANT\""
, "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp"
, 4308, __PRETTY_FUNCTION__))
4308 "Expected G_CONSTANT")((MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx
== -1 && "Expected G_CONSTANT") ? static_cast<void
> (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 && \"Expected G_CONSTANT\""
, "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp"
, 4308, __PRETTY_FUNCTION__))
;
4309 MIB.addImm(MI.getOperand(1).getCImm()->getValue().countPopulation());
4310}
4311
4312/// This only really exists to satisfy DAG type checking machinery, so is a
4313/// no-op here.
4314void AMDGPUInstructionSelector::renderTruncTImm(MachineInstrBuilder &MIB,
4315 const MachineInstr &MI,
4316 int OpIdx) const {
4317 MIB.addImm(MI.getOperand(OpIdx).getImm());
4318}
4319
4320void AMDGPUInstructionSelector::renderExtractGLC(MachineInstrBuilder &MIB,
4321 const MachineInstr &MI,
4322 int OpIdx) const {
4323 assert(OpIdx >= 0 && "expected to match an immediate operand")((OpIdx >= 0 && "expected to match an immediate operand"
) ? static_cast<void> (0) : __assert_fail ("OpIdx >= 0 && \"expected to match an immediate operand\""
, "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp"
, 4323, __PRETTY_FUNCTION__))
;
4324 MIB.addImm(MI.getOperand(OpIdx).getImm() & 1);
4325}
4326
4327void AMDGPUInstructionSelector::renderExtractSLC(MachineInstrBuilder &MIB,
4328 const MachineInstr &MI,
4329 int OpIdx) const {
4330 assert(OpIdx >= 0 && "expected to match an immediate operand")((OpIdx >= 0 && "expected to match an immediate operand"
) ? static_cast<void> (0) : __assert_fail ("OpIdx >= 0 && \"expected to match an immediate operand\""
, "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp"
, 4330, __PRETTY_FUNCTION__))
;
4331 MIB.addImm((MI.getOperand(OpIdx).getImm() >> 1) & 1);
4332}
4333
4334void AMDGPUInstructionSelector::renderExtractDLC(MachineInstrBuilder &MIB,
4335 const MachineInstr &MI,
4336 int OpIdx) const {
4337 assert(OpIdx >= 0 && "expected to match an immediate operand")((OpIdx >= 0 && "expected to match an immediate operand"
) ? static_cast<void> (0) : __assert_fail ("OpIdx >= 0 && \"expected to match an immediate operand\""
, "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp"
, 4337, __PRETTY_FUNCTION__))
;
4338 MIB.addImm((MI.getOperand(OpIdx).getImm() >> 2) & 1);
4339}
4340
4341void AMDGPUInstructionSelector::renderExtractSWZ(MachineInstrBuilder &MIB,
4342 const MachineInstr &MI,
4343 int OpIdx) const {
4344 assert(OpIdx >= 0 && "expected to match an immediate operand")((OpIdx >= 0 && "expected to match an immediate operand"
) ? static_cast<void> (0) : __assert_fail ("OpIdx >= 0 && \"expected to match an immediate operand\""
, "/build/llvm-toolchain-snapshot-12~++20210115100614+a14c36fe27f5/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp"
, 4344, __PRETTY_FUNCTION__))
;
4345 MIB.addImm((MI.getOperand(OpIdx).getImm() >> 3) & 1);
4346}
4347
4348void AMDGPUInstructionSelector::renderFrameIndex(MachineInstrBuilder &MIB,
4349 const MachineInstr &MI,
4350 int OpIdx) const {
4351 MIB.addFrameIndex((MI.getOperand(1).getIndex()));
4352}
4353
4354bool AMDGPUInstructionSelector::isInlineImmediate16(int64_t Imm) const {
4355 return AMDGPU::isInlinableLiteral16(Imm, STI.hasInv2PiInlineImm());
4356}
4357
4358bool AMDGPUInstructionSelector::isInlineImmediate32(int64_t Imm) const {
4359 return AMDGPU::isInlinableLiteral32(Imm, STI.hasInv2PiInlineImm());
4360}
4361
4362bool AMDGPUInstructionSelector::isInlineImmediate64(int64_t Imm) const {
4363 return AMDGPU::isInlinableLiteral64(Imm, STI.hasInv2PiInlineImm());
4364}
4365
4366bool AMDGPUInstructionSelector::isInlineImmediate(const APFloat &Imm) const {
4367 return TII.isInlineConstant(Imm);
4368}