Bug Summary

File:llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
Warning:line 3263, column 5
Value stored to 'MI' is never read

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name AMDGPUInstructionSelector.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -fno-split-dwarf-inlining -debugger-tuning=gdb -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-12/lib/clang/12.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/build-llvm/lib/Target/AMDGPU -I /build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/llvm/lib/Target/AMDGPU -I /build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/build-llvm/include -I /build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/llvm/include -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/backward -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-12/lib/clang/12.0.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/build-llvm/lib/Target/AMDGPU -fdebug-prefix-map=/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e=. -ferror-limit 19 -fvisibility hidden -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -o /tmp/scan-build-2020-10-27-053609-25509-1 -x c++ /build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
1//===- AMDGPUInstructionSelector.cpp ----------------------------*- C++ -*-==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file implements the targeting of the InstructionSelector class for
10/// AMDGPU.
11/// \todo This should be generated by TableGen.
12//===----------------------------------------------------------------------===//
13
14#include "AMDGPUInstructionSelector.h"
15#include "AMDGPUInstrInfo.h"
16#include "AMDGPUGlobalISelUtils.h"
17#include "AMDGPURegisterBankInfo.h"
18#include "AMDGPUSubtarget.h"
19#include "AMDGPUTargetMachine.h"
20#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
21#include "SIMachineFunctionInfo.h"
22#include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
23#include "llvm/CodeGen/GlobalISel/InstructionSelector.h"
24#include "llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h"
25#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
26#include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
27#include "llvm/CodeGen/GlobalISel/Utils.h"
28#include "llvm/CodeGen/MachineBasicBlock.h"
29#include "llvm/CodeGen/MachineFunction.h"
30#include "llvm/CodeGen/MachineInstr.h"
31#include "llvm/CodeGen/MachineInstrBuilder.h"
32#include "llvm/CodeGen/MachineRegisterInfo.h"
33#include "llvm/IR/DiagnosticInfo.h"
34#include "llvm/IR/Type.h"
35#include "llvm/Support/Debug.h"
36#include "llvm/Support/raw_ostream.h"
37
38#define DEBUG_TYPE"amdgpu-isel" "amdgpu-isel"
39
40using namespace llvm;
41using namespace MIPatternMatch;
42
43static cl::opt<bool> AllowRiskySelect(
44 "amdgpu-global-isel-risky-select",
45 cl::desc("Allow GlobalISel to select cases that are likely to not work yet"),
46 cl::init(false),
47 cl::ReallyHidden);
48
49#define GET_GLOBALISEL_IMPL
50#define AMDGPUSubtarget GCNSubtarget
51#include "AMDGPUGenGlobalISel.inc"
52#undef GET_GLOBALISEL_IMPL
53#undef AMDGPUSubtarget
54
55AMDGPUInstructionSelector::AMDGPUInstructionSelector(
56 const GCNSubtarget &STI, const AMDGPURegisterBankInfo &RBI,
57 const AMDGPUTargetMachine &TM)
58 : InstructionSelector(), TII(*STI.getInstrInfo()),
59 TRI(*STI.getRegisterInfo()), RBI(RBI), TM(TM),
60 STI(STI),
61 EnableLateStructurizeCFG(AMDGPUTargetMachine::EnableLateStructurizeCFG),
62#define GET_GLOBALISEL_PREDICATES_INIT
63#include "AMDGPUGenGlobalISel.inc"
64#undef GET_GLOBALISEL_PREDICATES_INIT
65#define GET_GLOBALISEL_TEMPORARIES_INIT
66#include "AMDGPUGenGlobalISel.inc"
67#undef GET_GLOBALISEL_TEMPORARIES_INIT
68{
69}
70
71const char *AMDGPUInstructionSelector::getName() { return DEBUG_TYPE"amdgpu-isel"; }
72
73void AMDGPUInstructionSelector::setupMF(MachineFunction &MF, GISelKnownBits &KB,
74 CodeGenCoverage &CoverageInfo) {
75 MRI = &MF.getRegInfo();
76 Subtarget = &MF.getSubtarget<GCNSubtarget>();
77 InstructionSelector::setupMF(MF, KB, CoverageInfo);
78}
79
80bool AMDGPUInstructionSelector::isVCC(Register Reg,
81 const MachineRegisterInfo &MRI) const {
82 // The verifier is oblivious to s1 being a valid value for wavesize registers.
83 if (Reg.isPhysical())
84 return false;
85
86 auto &RegClassOrBank = MRI.getRegClassOrRegBank(Reg);
87 const TargetRegisterClass *RC =
88 RegClassOrBank.dyn_cast<const TargetRegisterClass*>();
89 if (RC) {
90 const LLT Ty = MRI.getType(Reg);
91 return RC->hasSuperClassEq(TRI.getBoolRC()) &&
92 Ty.isValid() && Ty.getSizeInBits() == 1;
93 }
94
95 const RegisterBank *RB = RegClassOrBank.get<const RegisterBank *>();
96 return RB->getID() == AMDGPU::VCCRegBankID;
97}
98
99bool AMDGPUInstructionSelector::constrainCopyLikeIntrin(MachineInstr &MI,
100 unsigned NewOpc) const {
101 MI.setDesc(TII.get(NewOpc));
102 MI.RemoveOperand(1); // Remove intrinsic ID.
103 MI.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
104
105 MachineOperand &Dst = MI.getOperand(0);
106 MachineOperand &Src = MI.getOperand(1);
107
108 // TODO: This should be legalized to s32 if needed
109 if (MRI->getType(Dst.getReg()) == LLT::scalar(1))
110 return false;
111
112 const TargetRegisterClass *DstRC
113 = TRI.getConstrainedRegClassForOperand(Dst, *MRI);
114 const TargetRegisterClass *SrcRC
115 = TRI.getConstrainedRegClassForOperand(Src, *MRI);
116 if (!DstRC || DstRC != SrcRC)
117 return false;
118
119 return RBI.constrainGenericRegister(Dst.getReg(), *DstRC, *MRI) &&
120 RBI.constrainGenericRegister(Src.getReg(), *SrcRC, *MRI);
121}
122
123bool AMDGPUInstructionSelector::selectCOPY(MachineInstr &I) const {
124 const DebugLoc &DL = I.getDebugLoc();
125 MachineBasicBlock *BB = I.getParent();
126 I.setDesc(TII.get(TargetOpcode::COPY));
127
128 const MachineOperand &Src = I.getOperand(1);
129 MachineOperand &Dst = I.getOperand(0);
130 Register DstReg = Dst.getReg();
131 Register SrcReg = Src.getReg();
132
133 if (isVCC(DstReg, *MRI)) {
134 if (SrcReg == AMDGPU::SCC) {
135 const TargetRegisterClass *RC
136 = TRI.getConstrainedRegClassForOperand(Dst, *MRI);
137 if (!RC)
138 return true;
139 return RBI.constrainGenericRegister(DstReg, *RC, *MRI);
140 }
141
142 if (!isVCC(SrcReg, *MRI)) {
143 // TODO: Should probably leave the copy and let copyPhysReg expand it.
144 if (!RBI.constrainGenericRegister(DstReg, *TRI.getBoolRC(), *MRI))
145 return false;
146
147 const TargetRegisterClass *SrcRC
148 = TRI.getConstrainedRegClassForOperand(Src, *MRI);
149
150 Register MaskedReg = MRI->createVirtualRegister(SrcRC);
151
152 // We can't trust the high bits at this point, so clear them.
153
154 // TODO: Skip masking high bits if def is known boolean.
155
156 unsigned AndOpc = TRI.isSGPRClass(SrcRC) ?
157 AMDGPU::S_AND_B32 : AMDGPU::V_AND_B32_e32;
158 BuildMI(*BB, &I, DL, TII.get(AndOpc), MaskedReg)
159 .addImm(1)
160 .addReg(SrcReg);
161 BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_CMP_NE_U32_e64), DstReg)
162 .addImm(0)
163 .addReg(MaskedReg);
164
165 if (!MRI->getRegClassOrNull(SrcReg))
166 MRI->setRegClass(SrcReg, SrcRC);
167 I.eraseFromParent();
168 return true;
169 }
170
171 const TargetRegisterClass *RC =
172 TRI.getConstrainedRegClassForOperand(Dst, *MRI);
173 if (RC && !RBI.constrainGenericRegister(DstReg, *RC, *MRI))
174 return false;
175
176 return true;
177 }
178
179 for (const MachineOperand &MO : I.operands()) {
180 if (MO.getReg().isPhysical())
181 continue;
182
183 const TargetRegisterClass *RC =
184 TRI.getConstrainedRegClassForOperand(MO, *MRI);
185 if (!RC)
186 continue;
187 RBI.constrainGenericRegister(MO.getReg(), *RC, *MRI);
188 }
189 return true;
190}
191
192bool AMDGPUInstructionSelector::selectPHI(MachineInstr &I) const {
193 const Register DefReg = I.getOperand(0).getReg();
194 const LLT DefTy = MRI->getType(DefReg);
195 if (DefTy == LLT::scalar(1)) {
196 if (!AllowRiskySelect) {
197 LLVM_DEBUG(dbgs() << "Skipping risky boolean phi\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-isel")) { dbgs() << "Skipping risky boolean phi\n"
; } } while (false)
;
198 return false;
199 }
200
201 LLVM_DEBUG(dbgs() << "Selecting risky boolean phi\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-isel")) { dbgs() << "Selecting risky boolean phi\n"
; } } while (false)
;
202 }
203
204 // TODO: Verify this doesn't have insane operands (i.e. VGPR to SGPR copy)
205
206 const RegClassOrRegBank &RegClassOrBank =
207 MRI->getRegClassOrRegBank(DefReg);
208
209 const TargetRegisterClass *DefRC
210 = RegClassOrBank.dyn_cast<const TargetRegisterClass *>();
211 if (!DefRC) {
212 if (!DefTy.isValid()) {
213 LLVM_DEBUG(dbgs() << "PHI operand has no type, not a gvreg?\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-isel")) { dbgs() << "PHI operand has no type, not a gvreg?\n"
; } } while (false)
;
214 return false;
215 }
216
217 const RegisterBank &RB = *RegClassOrBank.get<const RegisterBank *>();
218 DefRC = TRI.getRegClassForTypeOnBank(DefTy, RB, *MRI);
219 if (!DefRC) {
220 LLVM_DEBUG(dbgs() << "PHI operand has unexpected size/bank\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-isel")) { dbgs() << "PHI operand has unexpected size/bank\n"
; } } while (false)
;
221 return false;
222 }
223 }
224
225 // TODO: Verify that all registers have the same bank
226 I.setDesc(TII.get(TargetOpcode::PHI));
227 return RBI.constrainGenericRegister(DefReg, *DefRC, *MRI);
228}
229
230MachineOperand
231AMDGPUInstructionSelector::getSubOperand64(MachineOperand &MO,
232 const TargetRegisterClass &SubRC,
233 unsigned SubIdx) const {
234
235 MachineInstr *MI = MO.getParent();
236 MachineBasicBlock *BB = MO.getParent()->getParent();
237 Register DstReg = MRI->createVirtualRegister(&SubRC);
238
239 if (MO.isReg()) {
240 unsigned ComposedSubIdx = TRI.composeSubRegIndices(MO.getSubReg(), SubIdx);
241 Register Reg = MO.getReg();
242 BuildMI(*BB, MI, MI->getDebugLoc(), TII.get(AMDGPU::COPY), DstReg)
243 .addReg(Reg, 0, ComposedSubIdx);
244
245 return MachineOperand::CreateReg(DstReg, MO.isDef(), MO.isImplicit(),
246 MO.isKill(), MO.isDead(), MO.isUndef(),
247 MO.isEarlyClobber(), 0, MO.isDebug(),
248 MO.isInternalRead());
249 }
250
251 assert(MO.isImm())((MO.isImm()) ? static_cast<void> (0) : __assert_fail (
"MO.isImm()", "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp"
, 251, __PRETTY_FUNCTION__))
;
252
253 APInt Imm(64, MO.getImm());
254
255 switch (SubIdx) {
256 default:
257 llvm_unreachable("do not know to split immediate with this sub index.")::llvm::llvm_unreachable_internal("do not know to split immediate with this sub index."
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp"
, 257)
;
258 case AMDGPU::sub0:
259 return MachineOperand::CreateImm(Imm.getLoBits(32).getSExtValue());
260 case AMDGPU::sub1:
261 return MachineOperand::CreateImm(Imm.getHiBits(32).getSExtValue());
262 }
263}
264
265static unsigned getLogicalBitOpcode(unsigned Opc, bool Is64) {
266 switch (Opc) {
267 case AMDGPU::G_AND:
268 return Is64 ? AMDGPU::S_AND_B64 : AMDGPU::S_AND_B32;
269 case AMDGPU::G_OR:
270 return Is64 ? AMDGPU::S_OR_B64 : AMDGPU::S_OR_B32;
271 case AMDGPU::G_XOR:
272 return Is64 ? AMDGPU::S_XOR_B64 : AMDGPU::S_XOR_B32;
273 default:
274 llvm_unreachable("not a bit op")::llvm::llvm_unreachable_internal("not a bit op", "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp"
, 274)
;
275 }
276}
277
278bool AMDGPUInstructionSelector::selectG_AND_OR_XOR(MachineInstr &I) const {
279 Register DstReg = I.getOperand(0).getReg();
280 unsigned Size = RBI.getSizeInBits(DstReg, *MRI, TRI);
281
282 const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
283 if (DstRB->getID() != AMDGPU::SGPRRegBankID &&
284 DstRB->getID() != AMDGPU::VCCRegBankID)
285 return false;
286
287 bool Is64 = Size > 32 || (DstRB->getID() == AMDGPU::VCCRegBankID &&
288 STI.isWave64());
289 I.setDesc(TII.get(getLogicalBitOpcode(I.getOpcode(), Is64)));
290
291 // Dead implicit-def of scc
292 I.addOperand(MachineOperand::CreateReg(AMDGPU::SCC, true, // isDef
293 true, // isImp
294 false, // isKill
295 true)); // isDead
296 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
297}
298
299bool AMDGPUInstructionSelector::selectG_ADD_SUB(MachineInstr &I) const {
300 MachineBasicBlock *BB = I.getParent();
301 MachineFunction *MF = BB->getParent();
302 Register DstReg = I.getOperand(0).getReg();
303 const DebugLoc &DL = I.getDebugLoc();
304 LLT Ty = MRI->getType(DstReg);
305 if (Ty.isVector())
306 return false;
307
308 unsigned Size = Ty.getSizeInBits();
309 const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
310 const bool IsSALU = DstRB->getID() == AMDGPU::SGPRRegBankID;
311 const bool Sub = I.getOpcode() == TargetOpcode::G_SUB;
312
313 if (Size == 32) {
314 if (IsSALU) {
315 const unsigned Opc = Sub ? AMDGPU::S_SUB_U32 : AMDGPU::S_ADD_U32;
316 MachineInstr *Add =
317 BuildMI(*BB, &I, DL, TII.get(Opc), DstReg)
318 .add(I.getOperand(1))
319 .add(I.getOperand(2));
320 I.eraseFromParent();
321 return constrainSelectedInstRegOperands(*Add, TII, TRI, RBI);
322 }
323
324 if (STI.hasAddNoCarry()) {
325 const unsigned Opc = Sub ? AMDGPU::V_SUB_U32_e64 : AMDGPU::V_ADD_U32_e64;
326 I.setDesc(TII.get(Opc));
327 I.addOperand(*MF, MachineOperand::CreateImm(0));
328 I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
329 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
330 }
331
332 const unsigned Opc = Sub ? AMDGPU::V_SUB_CO_U32_e64 : AMDGPU::V_ADD_CO_U32_e64;
333
334 Register UnusedCarry = MRI->createVirtualRegister(TRI.getWaveMaskRegClass());
335 MachineInstr *Add
336 = BuildMI(*BB, &I, DL, TII.get(Opc), DstReg)
337 .addDef(UnusedCarry, RegState::Dead)
338 .add(I.getOperand(1))
339 .add(I.getOperand(2))
340 .addImm(0);
341 I.eraseFromParent();
342 return constrainSelectedInstRegOperands(*Add, TII, TRI, RBI);
343 }
344
345 assert(!Sub && "illegal sub should not reach here")((!Sub && "illegal sub should not reach here") ? static_cast
<void> (0) : __assert_fail ("!Sub && \"illegal sub should not reach here\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp"
, 345, __PRETTY_FUNCTION__))
;
346
347 const TargetRegisterClass &RC
348 = IsSALU ? AMDGPU::SReg_64_XEXECRegClass : AMDGPU::VReg_64RegClass;
349 const TargetRegisterClass &HalfRC
350 = IsSALU ? AMDGPU::SReg_32RegClass : AMDGPU::VGPR_32RegClass;
351
352 MachineOperand Lo1(getSubOperand64(I.getOperand(1), HalfRC, AMDGPU::sub0));
353 MachineOperand Lo2(getSubOperand64(I.getOperand(2), HalfRC, AMDGPU::sub0));
354 MachineOperand Hi1(getSubOperand64(I.getOperand(1), HalfRC, AMDGPU::sub1));
355 MachineOperand Hi2(getSubOperand64(I.getOperand(2), HalfRC, AMDGPU::sub1));
356
357 Register DstLo = MRI->createVirtualRegister(&HalfRC);
358 Register DstHi = MRI->createVirtualRegister(&HalfRC);
359
360 if (IsSALU) {
361 BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADD_U32), DstLo)
362 .add(Lo1)
363 .add(Lo2);
364 BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADDC_U32), DstHi)
365 .add(Hi1)
366 .add(Hi2);
367 } else {
368 const TargetRegisterClass *CarryRC = TRI.getWaveMaskRegClass();
369 Register CarryReg = MRI->createVirtualRegister(CarryRC);
370 BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_ADD_CO_U32_e64), DstLo)
371 .addDef(CarryReg)
372 .add(Lo1)
373 .add(Lo2)
374 .addImm(0);
375 MachineInstr *Addc = BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_ADDC_U32_e64), DstHi)
376 .addDef(MRI->createVirtualRegister(CarryRC), RegState::Dead)
377 .add(Hi1)
378 .add(Hi2)
379 .addReg(CarryReg, RegState::Kill)
380 .addImm(0);
381
382 if (!constrainSelectedInstRegOperands(*Addc, TII, TRI, RBI))
383 return false;
384 }
385
386 BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
387 .addReg(DstLo)
388 .addImm(AMDGPU::sub0)
389 .addReg(DstHi)
390 .addImm(AMDGPU::sub1);
391
392
393 if (!RBI.constrainGenericRegister(DstReg, RC, *MRI))
394 return false;
395
396 I.eraseFromParent();
397 return true;
398}
399
400bool AMDGPUInstructionSelector::selectG_UADDO_USUBO_UADDE_USUBE(
401 MachineInstr &I) const {
402 MachineBasicBlock *BB = I.getParent();
403 MachineFunction *MF = BB->getParent();
404 const DebugLoc &DL = I.getDebugLoc();
405 Register Dst0Reg = I.getOperand(0).getReg();
406 Register Dst1Reg = I.getOperand(1).getReg();
407 const bool IsAdd = I.getOpcode() == AMDGPU::G_UADDO ||
408 I.getOpcode() == AMDGPU::G_UADDE;
409 const bool HasCarryIn = I.getOpcode() == AMDGPU::G_UADDE ||
410 I.getOpcode() == AMDGPU::G_USUBE;
411
412 if (isVCC(Dst1Reg, *MRI)) {
413 unsigned NoCarryOpc =
414 IsAdd ? AMDGPU::V_ADD_CO_U32_e64 : AMDGPU::V_SUB_CO_U32_e64;
415 unsigned CarryOpc = IsAdd ? AMDGPU::V_ADDC_U32_e64 : AMDGPU::V_SUBB_U32_e64;
416 I.setDesc(TII.get(HasCarryIn ? CarryOpc : NoCarryOpc));
417 I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
418 I.addOperand(*MF, MachineOperand::CreateImm(0));
419 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
420 }
421
422 Register Src0Reg = I.getOperand(2).getReg();
423 Register Src1Reg = I.getOperand(3).getReg();
424
425 if (HasCarryIn) {
426 BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), AMDGPU::SCC)
427 .addReg(I.getOperand(4).getReg());
428 }
429
430 unsigned NoCarryOpc = IsAdd ? AMDGPU::S_ADD_U32 : AMDGPU::S_SUB_U32;
431 unsigned CarryOpc = IsAdd ? AMDGPU::S_ADDC_U32 : AMDGPU::S_SUBB_U32;
432
433 BuildMI(*BB, &I, DL, TII.get(HasCarryIn ? CarryOpc : NoCarryOpc), Dst0Reg)
434 .add(I.getOperand(2))
435 .add(I.getOperand(3));
436 BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), Dst1Reg)
437 .addReg(AMDGPU::SCC);
438
439 if (!MRI->getRegClassOrNull(Dst1Reg))
440 MRI->setRegClass(Dst1Reg, &AMDGPU::SReg_32RegClass);
441
442 if (!RBI.constrainGenericRegister(Dst0Reg, AMDGPU::SReg_32RegClass, *MRI) ||
443 !RBI.constrainGenericRegister(Src0Reg, AMDGPU::SReg_32RegClass, *MRI) ||
444 !RBI.constrainGenericRegister(Src1Reg, AMDGPU::SReg_32RegClass, *MRI))
445 return false;
446
447 if (HasCarryIn &&
448 !RBI.constrainGenericRegister(I.getOperand(4).getReg(),
449 AMDGPU::SReg_32RegClass, *MRI))
450 return false;
451
452 I.eraseFromParent();
453 return true;
454}
455
456// TODO: We should probably legalize these to only using 32-bit results.
457bool AMDGPUInstructionSelector::selectG_EXTRACT(MachineInstr &I) const {
458 MachineBasicBlock *BB = I.getParent();
459 Register DstReg = I.getOperand(0).getReg();
460 Register SrcReg = I.getOperand(1).getReg();
461 LLT DstTy = MRI->getType(DstReg);
462 LLT SrcTy = MRI->getType(SrcReg);
463 const unsigned SrcSize = SrcTy.getSizeInBits();
464 unsigned DstSize = DstTy.getSizeInBits();
465
466 // TODO: Should handle any multiple of 32 offset.
467 unsigned Offset = I.getOperand(2).getImm();
468 if (Offset % 32 != 0 || DstSize > 128)
469 return false;
470
471 // 16-bit operations really use 32-bit registers.
472 // FIXME: Probably should not allow 16-bit G_EXTRACT results.
473 if (DstSize == 16)
474 DstSize = 32;
475
476 const TargetRegisterClass *DstRC =
477 TRI.getConstrainedRegClassForOperand(I.getOperand(0), *MRI);
478 if (!DstRC || !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI))
479 return false;
480
481 const RegisterBank *SrcBank = RBI.getRegBank(SrcReg, *MRI, TRI);
482 const TargetRegisterClass *SrcRC =
483 TRI.getRegClassForSizeOnBank(SrcSize, *SrcBank, *MRI);
484 if (!SrcRC)
485 return false;
486 unsigned SubReg = SIRegisterInfo::getSubRegFromChannel(Offset / 32,
487 DstSize / 32);
488 SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubReg);
489 if (!SrcRC)
490 return false;
491
492 SrcReg = constrainOperandRegClass(*MF, TRI, *MRI, TII, RBI, I,
493 *SrcRC, I.getOperand(1));
494 const DebugLoc &DL = I.getDebugLoc();
495 BuildMI(*BB, &I, DL, TII.get(TargetOpcode::COPY), DstReg)
496 .addReg(SrcReg, 0, SubReg);
497
498 I.eraseFromParent();
499 return true;
500}
501
502bool AMDGPUInstructionSelector::selectG_MERGE_VALUES(MachineInstr &MI) const {
503 MachineBasicBlock *BB = MI.getParent();
504 Register DstReg = MI.getOperand(0).getReg();
505 LLT DstTy = MRI->getType(DstReg);
506 LLT SrcTy = MRI->getType(MI.getOperand(1).getReg());
507
508 const unsigned SrcSize = SrcTy.getSizeInBits();
509 if (SrcSize < 32)
510 return selectImpl(MI, *CoverageInfo);
511
512 const DebugLoc &DL = MI.getDebugLoc();
513 const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
514 const unsigned DstSize = DstTy.getSizeInBits();
515 const TargetRegisterClass *DstRC =
516 TRI.getRegClassForSizeOnBank(DstSize, *DstBank, *MRI);
517 if (!DstRC)
518 return false;
519
520 ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(DstRC, SrcSize / 8);
521 MachineInstrBuilder MIB =
522 BuildMI(*BB, &MI, DL, TII.get(TargetOpcode::REG_SEQUENCE), DstReg);
523 for (int I = 0, E = MI.getNumOperands() - 1; I != E; ++I) {
524 MachineOperand &Src = MI.getOperand(I + 1);
525 MIB.addReg(Src.getReg(), getUndefRegState(Src.isUndef()));
526 MIB.addImm(SubRegs[I]);
527
528 const TargetRegisterClass *SrcRC
529 = TRI.getConstrainedRegClassForOperand(Src, *MRI);
530 if (SrcRC && !RBI.constrainGenericRegister(Src.getReg(), *SrcRC, *MRI))
531 return false;
532 }
533
534 if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI))
535 return false;
536
537 MI.eraseFromParent();
538 return true;
539}
540
541bool AMDGPUInstructionSelector::selectG_UNMERGE_VALUES(MachineInstr &MI) const {
542 MachineBasicBlock *BB = MI.getParent();
543 const int NumDst = MI.getNumOperands() - 1;
544
545 MachineOperand &Src = MI.getOperand(NumDst);
546
547 Register SrcReg = Src.getReg();
548 Register DstReg0 = MI.getOperand(0).getReg();
549 LLT DstTy = MRI->getType(DstReg0);
550 LLT SrcTy = MRI->getType(SrcReg);
551
552 const unsigned DstSize = DstTy.getSizeInBits();
553 const unsigned SrcSize = SrcTy.getSizeInBits();
554 const DebugLoc &DL = MI.getDebugLoc();
555 const RegisterBank *SrcBank = RBI.getRegBank(SrcReg, *MRI, TRI);
556
557 const TargetRegisterClass *SrcRC =
558 TRI.getRegClassForSizeOnBank(SrcSize, *SrcBank, *MRI);
559 if (!SrcRC || !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI))
560 return false;
561
562 // Note we could have mixed SGPR and VGPR destination banks for an SGPR
563 // source, and this relies on the fact that the same subregister indices are
564 // used for both.
565 ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(SrcRC, DstSize / 8);
566 for (int I = 0, E = NumDst; I != E; ++I) {
567 MachineOperand &Dst = MI.getOperand(I);
568 BuildMI(*BB, &MI, DL, TII.get(TargetOpcode::COPY), Dst.getReg())
569 .addReg(SrcReg, 0, SubRegs[I]);
570
571 // Make sure the subregister index is valid for the source register.
572 SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubRegs[I]);
573 if (!SrcRC || !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI))
574 return false;
575
576 const TargetRegisterClass *DstRC =
577 TRI.getConstrainedRegClassForOperand(Dst, *MRI);
578 if (DstRC && !RBI.constrainGenericRegister(Dst.getReg(), *DstRC, *MRI))
579 return false;
580 }
581
582 MI.eraseFromParent();
583 return true;
584}
585
586bool AMDGPUInstructionSelector::selectG_BUILD_VECTOR_TRUNC(
587 MachineInstr &MI) const {
588 if (selectImpl(MI, *CoverageInfo))
589 return true;
590
591 const LLT S32 = LLT::scalar(32);
592 const LLT V2S16 = LLT::vector(2, 16);
593
594 Register Dst = MI.getOperand(0).getReg();
595 if (MRI->getType(Dst) != V2S16)
596 return false;
597
598 const RegisterBank *DstBank = RBI.getRegBank(Dst, *MRI, TRI);
599 if (DstBank->getID() != AMDGPU::SGPRRegBankID)
600 return false;
601
602 Register Src0 = MI.getOperand(1).getReg();
603 Register Src1 = MI.getOperand(2).getReg();
604 if (MRI->getType(Src0) != S32)
605 return false;
606
607 const DebugLoc &DL = MI.getDebugLoc();
608 MachineBasicBlock *BB = MI.getParent();
609
610 auto ConstSrc1 = getConstantVRegValWithLookThrough(Src1, *MRI, true, true);
611 if (ConstSrc1) {
612 auto ConstSrc0 = getConstantVRegValWithLookThrough(Src0, *MRI, true, true);
613 if (ConstSrc0) {
614 uint32_t Lo16 = static_cast<uint32_t>(ConstSrc0->Value) & 0xffff;
615 uint32_t Hi16 = static_cast<uint32_t>(ConstSrc1->Value) & 0xffff;
616
617 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), Dst)
618 .addImm(Lo16 | (Hi16 << 16));
619 MI.eraseFromParent();
620 return RBI.constrainGenericRegister(Dst, AMDGPU::SReg_32RegClass, *MRI);
621 }
622 }
623
624 // TODO: This should probably be a combine somewhere
625 // (build_vector_trunc $src0, undef -> copy $src0
626 MachineInstr *Src1Def = getDefIgnoringCopies(Src1, *MRI);
627 if (Src1Def && Src1Def->getOpcode() == AMDGPU::G_IMPLICIT_DEF) {
628 MI.setDesc(TII.get(AMDGPU::COPY));
629 MI.RemoveOperand(2);
630 return RBI.constrainGenericRegister(Dst, AMDGPU::SReg_32RegClass, *MRI) &&
631 RBI.constrainGenericRegister(Src0, AMDGPU::SReg_32RegClass, *MRI);
632 }
633
634 Register ShiftSrc0;
635 Register ShiftSrc1;
636 int64_t ShiftAmt;
637
638 // With multiple uses of the shift, this will duplicate the shift and
639 // increase register pressure.
640 //
641 // (build_vector_trunc (lshr_oneuse $src0, 16), (lshr_oneuse $src1, 16)
642 // => (S_PACK_HH_B32_B16 $src0, $src1)
643 // (build_vector_trunc $src0, (lshr_oneuse SReg_32:$src1, 16))
644 // => (S_PACK_LH_B32_B16 $src0, $src1)
645 // (build_vector_trunc $src0, $src1)
646 // => (S_PACK_LL_B32_B16 $src0, $src1)
647
648 // FIXME: This is an inconvenient way to check a specific value
649 bool Shift0 = mi_match(
650 Src0, *MRI, m_OneUse(m_GLShr(m_Reg(ShiftSrc0), m_ICst(ShiftAmt)))) &&
651 ShiftAmt == 16;
652
653 bool Shift1 = mi_match(
654 Src1, *MRI, m_OneUse(m_GLShr(m_Reg(ShiftSrc1), m_ICst(ShiftAmt)))) &&
655 ShiftAmt == 16;
656
657 unsigned Opc = AMDGPU::S_PACK_LL_B32_B16;
658 if (Shift0 && Shift1) {
659 Opc = AMDGPU::S_PACK_HH_B32_B16;
660 MI.getOperand(1).setReg(ShiftSrc0);
661 MI.getOperand(2).setReg(ShiftSrc1);
662 } else if (Shift1) {
663 Opc = AMDGPU::S_PACK_LH_B32_B16;
664 MI.getOperand(2).setReg(ShiftSrc1);
665 } else if (Shift0 && ConstSrc1 && ConstSrc1->Value == 0) {
666 // build_vector_trunc (lshr $src0, 16), 0 -> s_lshr_b32 $src0, 16
667 auto MIB = BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_LSHR_B32), Dst)
668 .addReg(ShiftSrc0)
669 .addImm(16);
670
671 MI.eraseFromParent();
672 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
673 }
674
675 MI.setDesc(TII.get(Opc));
676 return constrainSelectedInstRegOperands(MI, TII, TRI, RBI);
677}
678
679bool AMDGPUInstructionSelector::selectG_PTR_ADD(MachineInstr &I) const {
680 return selectG_ADD_SUB(I);
681}
682
683bool AMDGPUInstructionSelector::selectG_IMPLICIT_DEF(MachineInstr &I) const {
684 const MachineOperand &MO = I.getOperand(0);
685
686 // FIXME: Interface for getConstrainedRegClassForOperand needs work. The
687 // regbank check here is to know why getConstrainedRegClassForOperand failed.
688 const TargetRegisterClass *RC = TRI.getConstrainedRegClassForOperand(MO, *MRI);
689 if ((!RC && !MRI->getRegBankOrNull(MO.getReg())) ||
690 (RC && RBI.constrainGenericRegister(MO.getReg(), *RC, *MRI))) {
691 I.setDesc(TII.get(TargetOpcode::IMPLICIT_DEF));
692 return true;
693 }
694
695 return false;
696}
697
698bool AMDGPUInstructionSelector::selectG_INSERT(MachineInstr &I) const {
699 MachineBasicBlock *BB = I.getParent();
700
701 Register DstReg = I.getOperand(0).getReg();
702 Register Src0Reg = I.getOperand(1).getReg();
703 Register Src1Reg = I.getOperand(2).getReg();
704 LLT Src1Ty = MRI->getType(Src1Reg);
705
706 unsigned DstSize = MRI->getType(DstReg).getSizeInBits();
707 unsigned InsSize = Src1Ty.getSizeInBits();
708
709 int64_t Offset = I.getOperand(3).getImm();
710
711 // FIXME: These cases should have been illegal and unnecessary to check here.
712 if (Offset % 32 != 0 || InsSize % 32 != 0)
713 return false;
714
715 // Currently not handled by getSubRegFromChannel.
716 if (InsSize > 128)
717 return false;
718
719 unsigned SubReg = TRI.getSubRegFromChannel(Offset / 32, InsSize / 32);
720 if (SubReg == AMDGPU::NoSubRegister)
721 return false;
722
723 const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
724 const TargetRegisterClass *DstRC =
725 TRI.getRegClassForSizeOnBank(DstSize, *DstBank, *MRI);
726 if (!DstRC)
727 return false;
728
729 const RegisterBank *Src0Bank = RBI.getRegBank(Src0Reg, *MRI, TRI);
730 const RegisterBank *Src1Bank = RBI.getRegBank(Src1Reg, *MRI, TRI);
731 const TargetRegisterClass *Src0RC =
732 TRI.getRegClassForSizeOnBank(DstSize, *Src0Bank, *MRI);
733 const TargetRegisterClass *Src1RC =
734 TRI.getRegClassForSizeOnBank(InsSize, *Src1Bank, *MRI);
735
736 // Deal with weird cases where the class only partially supports the subreg
737 // index.
738 Src0RC = TRI.getSubClassWithSubReg(Src0RC, SubReg);
739 if (!Src0RC || !Src1RC)
740 return false;
741
742 if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) ||
743 !RBI.constrainGenericRegister(Src0Reg, *Src0RC, *MRI) ||
744 !RBI.constrainGenericRegister(Src1Reg, *Src1RC, *MRI))
745 return false;
746
747 const DebugLoc &DL = I.getDebugLoc();
748 BuildMI(*BB, &I, DL, TII.get(TargetOpcode::INSERT_SUBREG), DstReg)
749 .addReg(Src0Reg)
750 .addReg(Src1Reg)
751 .addImm(SubReg);
752
753 I.eraseFromParent();
754 return true;
755}
756
757bool AMDGPUInstructionSelector::selectInterpP1F16(MachineInstr &MI) const {
758 if (STI.getLDSBankCount() != 16)
759 return selectImpl(MI, *CoverageInfo);
760
761 Register Dst = MI.getOperand(0).getReg();
762 Register Src0 = MI.getOperand(2).getReg();
763 Register M0Val = MI.getOperand(6).getReg();
764 if (!RBI.constrainGenericRegister(M0Val, AMDGPU::SReg_32RegClass, *MRI) ||
765 !RBI.constrainGenericRegister(Dst, AMDGPU::VGPR_32RegClass, *MRI) ||
766 !RBI.constrainGenericRegister(Src0, AMDGPU::VGPR_32RegClass, *MRI))
767 return false;
768
769 // This requires 2 instructions. It is possible to write a pattern to support
770 // this, but the generated isel emitter doesn't correctly deal with multiple
771 // output instructions using the same physical register input. The copy to m0
772 // is incorrectly placed before the second instruction.
773 //
774 // TODO: Match source modifiers.
775
776 Register InterpMov = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
777 const DebugLoc &DL = MI.getDebugLoc();
778 MachineBasicBlock *MBB = MI.getParent();
779
780 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
781 .addReg(M0Val);
782 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_INTERP_MOV_F32), InterpMov)
783 .addImm(2)
784 .addImm(MI.getOperand(4).getImm()) // $attr
785 .addImm(MI.getOperand(3).getImm()); // $attrchan
786
787 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_INTERP_P1LV_F16), Dst)
788 .addImm(0) // $src0_modifiers
789 .addReg(Src0) // $src0
790 .addImm(MI.getOperand(4).getImm()) // $attr
791 .addImm(MI.getOperand(3).getImm()) // $attrchan
792 .addImm(0) // $src2_modifiers
793 .addReg(InterpMov) // $src2 - 2 f16 values selected by high
794 .addImm(MI.getOperand(5).getImm()) // $high
795 .addImm(0) // $clamp
796 .addImm(0); // $omod
797
798 MI.eraseFromParent();
799 return true;
800}
801
802// Writelane is special in that it can use SGPR and M0 (which would normally
803// count as using the constant bus twice - but in this case it is allowed since
804// the lane selector doesn't count as a use of the constant bus). However, it is
805// still required to abide by the 1 SGPR rule. Fix this up if we might have
806// multiple SGPRs.
807bool AMDGPUInstructionSelector::selectWritelane(MachineInstr &MI) const {
808 // With a constant bus limit of at least 2, there's no issue.
809 if (STI.getConstantBusLimit(AMDGPU::V_WRITELANE_B32) > 1)
810 return selectImpl(MI, *CoverageInfo);
811
812 MachineBasicBlock *MBB = MI.getParent();
813 const DebugLoc &DL = MI.getDebugLoc();
814 Register VDst = MI.getOperand(0).getReg();
815 Register Val = MI.getOperand(2).getReg();
816 Register LaneSelect = MI.getOperand(3).getReg();
817 Register VDstIn = MI.getOperand(4).getReg();
818
819 auto MIB = BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_WRITELANE_B32), VDst);
820
821 Optional<ValueAndVReg> ConstSelect =
822 getConstantVRegValWithLookThrough(LaneSelect, *MRI, true, true);
823 if (ConstSelect) {
824 // The selector has to be an inline immediate, so we can use whatever for
825 // the other operands.
826 MIB.addReg(Val);
827 MIB.addImm(ConstSelect->Value &
828 maskTrailingOnes<uint64_t>(STI.getWavefrontSizeLog2()));
829 } else {
830 Optional<ValueAndVReg> ConstVal =
831 getConstantVRegValWithLookThrough(Val, *MRI, true, true);
832
833 // If the value written is an inline immediate, we can get away without a
834 // copy to m0.
835 if (ConstVal && AMDGPU::isInlinableLiteral32(ConstVal->Value,
836 STI.hasInv2PiInlineImm())) {
837 MIB.addImm(ConstVal->Value);
838 MIB.addReg(LaneSelect);
839 } else {
840 MIB.addReg(Val);
841
842 // If the lane selector was originally in a VGPR and copied with
843 // readfirstlane, there's a hazard to read the same SGPR from the
844 // VALU. Constrain to a different SGPR to help avoid needing a nop later.
845 RBI.constrainGenericRegister(LaneSelect, AMDGPU::SReg_32_XM0RegClass, *MRI);
846
847 BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
848 .addReg(LaneSelect);
849 MIB.addReg(AMDGPU::M0);
850 }
851 }
852
853 MIB.addReg(VDstIn);
854
855 MI.eraseFromParent();
856 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
857}
858
859// We need to handle this here because tablegen doesn't support matching
860// instructions with multiple outputs.
861bool AMDGPUInstructionSelector::selectDivScale(MachineInstr &MI) const {
862 Register Dst0 = MI.getOperand(0).getReg();
863 Register Dst1 = MI.getOperand(1).getReg();
864
865 LLT Ty = MRI->getType(Dst0);
866 unsigned Opc;
867 if (Ty == LLT::scalar(32))
868 Opc = AMDGPU::V_DIV_SCALE_F32;
869 else if (Ty == LLT::scalar(64))
870 Opc = AMDGPU::V_DIV_SCALE_F64;
871 else
872 return false;
873
874 const DebugLoc &DL = MI.getDebugLoc();
875 MachineBasicBlock *MBB = MI.getParent();
876
877 Register Numer = MI.getOperand(3).getReg();
878 Register Denom = MI.getOperand(4).getReg();
879 unsigned ChooseDenom = MI.getOperand(5).getImm();
880
881 Register Src0 = ChooseDenom != 0 ? Numer : Denom;
882
883 auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc), Dst0)
884 .addDef(Dst1)
885 .addUse(Src0)
886 .addUse(Denom)
887 .addUse(Numer);
888
889 MI.eraseFromParent();
890 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
891}
892
893bool AMDGPUInstructionSelector::selectG_INTRINSIC(MachineInstr &I) const {
894 unsigned IntrinsicID = I.getIntrinsicID();
895 switch (IntrinsicID) {
896 case Intrinsic::amdgcn_if_break: {
897 MachineBasicBlock *BB = I.getParent();
898
899 // FIXME: Manually selecting to avoid dealiing with the SReg_1 trick
900 // SelectionDAG uses for wave32 vs wave64.
901 BuildMI(*BB, &I, I.getDebugLoc(), TII.get(AMDGPU::SI_IF_BREAK))
902 .add(I.getOperand(0))
903 .add(I.getOperand(2))
904 .add(I.getOperand(3));
905
906 Register DstReg = I.getOperand(0).getReg();
907 Register Src0Reg = I.getOperand(2).getReg();
908 Register Src1Reg = I.getOperand(3).getReg();
909
910 I.eraseFromParent();
911
912 for (Register Reg : { DstReg, Src0Reg, Src1Reg })
913 MRI->setRegClass(Reg, TRI.getWaveMaskRegClass());
914
915 return true;
916 }
917 case Intrinsic::amdgcn_interp_p1_f16:
918 return selectInterpP1F16(I);
919 case Intrinsic::amdgcn_wqm:
920 return constrainCopyLikeIntrin(I, AMDGPU::WQM);
921 case Intrinsic::amdgcn_softwqm:
922 return constrainCopyLikeIntrin(I, AMDGPU::SOFT_WQM);
923 case Intrinsic::amdgcn_wwm:
924 return constrainCopyLikeIntrin(I, AMDGPU::WWM);
925 case Intrinsic::amdgcn_writelane:
926 return selectWritelane(I);
927 case Intrinsic::amdgcn_div_scale:
928 return selectDivScale(I);
929 case Intrinsic::amdgcn_icmp:
930 return selectIntrinsicIcmp(I);
931 case Intrinsic::amdgcn_ballot:
932 return selectBallot(I);
933 case Intrinsic::amdgcn_reloc_constant:
934 return selectRelocConstant(I);
935 case Intrinsic::amdgcn_groupstaticsize:
936 return selectGroupStaticSize(I);
937 case Intrinsic::returnaddress:
938 return selectReturnAddress(I);
939 default:
940 return selectImpl(I, *CoverageInfo);
941 }
942}
943
944static int getV_CMPOpcode(CmpInst::Predicate P, unsigned Size) {
945 if (Size != 32 && Size != 64)
946 return -1;
947 switch (P) {
948 default:
949 llvm_unreachable("Unknown condition code!")::llvm::llvm_unreachable_internal("Unknown condition code!", "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp"
, 949)
;
950 case CmpInst::ICMP_NE:
951 return Size == 32 ? AMDGPU::V_CMP_NE_U32_e64 : AMDGPU::V_CMP_NE_U64_e64;
952 case CmpInst::ICMP_EQ:
953 return Size == 32 ? AMDGPU::V_CMP_EQ_U32_e64 : AMDGPU::V_CMP_EQ_U64_e64;
954 case CmpInst::ICMP_SGT:
955 return Size == 32 ? AMDGPU::V_CMP_GT_I32_e64 : AMDGPU::V_CMP_GT_I64_e64;
956 case CmpInst::ICMP_SGE:
957 return Size == 32 ? AMDGPU::V_CMP_GE_I32_e64 : AMDGPU::V_CMP_GE_I64_e64;
958 case CmpInst::ICMP_SLT:
959 return Size == 32 ? AMDGPU::V_CMP_LT_I32_e64 : AMDGPU::V_CMP_LT_I64_e64;
960 case CmpInst::ICMP_SLE:
961 return Size == 32 ? AMDGPU::V_CMP_LE_I32_e64 : AMDGPU::V_CMP_LE_I64_e64;
962 case CmpInst::ICMP_UGT:
963 return Size == 32 ? AMDGPU::V_CMP_GT_U32_e64 : AMDGPU::V_CMP_GT_U64_e64;
964 case CmpInst::ICMP_UGE:
965 return Size == 32 ? AMDGPU::V_CMP_GE_U32_e64 : AMDGPU::V_CMP_GE_U64_e64;
966 case CmpInst::ICMP_ULT:
967 return Size == 32 ? AMDGPU::V_CMP_LT_U32_e64 : AMDGPU::V_CMP_LT_U64_e64;
968 case CmpInst::ICMP_ULE:
969 return Size == 32 ? AMDGPU::V_CMP_LE_U32_e64 : AMDGPU::V_CMP_LE_U64_e64;
970 }
971}
972
973int AMDGPUInstructionSelector::getS_CMPOpcode(CmpInst::Predicate P,
974 unsigned Size) const {
975 if (Size == 64) {
976 if (!STI.hasScalarCompareEq64())
977 return -1;
978
979 switch (P) {
980 case CmpInst::ICMP_NE:
981 return AMDGPU::S_CMP_LG_U64;
982 case CmpInst::ICMP_EQ:
983 return AMDGPU::S_CMP_EQ_U64;
984 default:
985 return -1;
986 }
987 }
988
989 if (Size != 32)
990 return -1;
991
992 switch (P) {
993 case CmpInst::ICMP_NE:
994 return AMDGPU::S_CMP_LG_U32;
995 case CmpInst::ICMP_EQ:
996 return AMDGPU::S_CMP_EQ_U32;
997 case CmpInst::ICMP_SGT:
998 return AMDGPU::S_CMP_GT_I32;
999 case CmpInst::ICMP_SGE:
1000 return AMDGPU::S_CMP_GE_I32;
1001 case CmpInst::ICMP_SLT:
1002 return AMDGPU::S_CMP_LT_I32;
1003 case CmpInst::ICMP_SLE:
1004 return AMDGPU::S_CMP_LE_I32;
1005 case CmpInst::ICMP_UGT:
1006 return AMDGPU::S_CMP_GT_U32;
1007 case CmpInst::ICMP_UGE:
1008 return AMDGPU::S_CMP_GE_U32;
1009 case CmpInst::ICMP_ULT:
1010 return AMDGPU::S_CMP_LT_U32;
1011 case CmpInst::ICMP_ULE:
1012 return AMDGPU::S_CMP_LE_U32;
1013 default:
1014 llvm_unreachable("Unknown condition code!")::llvm::llvm_unreachable_internal("Unknown condition code!", "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp"
, 1014)
;
1015 }
1016}
1017
1018bool AMDGPUInstructionSelector::selectG_ICMP(MachineInstr &I) const {
1019 MachineBasicBlock *BB = I.getParent();
1020 const DebugLoc &DL = I.getDebugLoc();
1021
1022 Register SrcReg = I.getOperand(2).getReg();
1023 unsigned Size = RBI.getSizeInBits(SrcReg, *MRI, TRI);
1024
1025 auto Pred = (CmpInst::Predicate)I.getOperand(1).getPredicate();
1026
1027 Register CCReg = I.getOperand(0).getReg();
1028 if (!isVCC(CCReg, *MRI)) {
1029 int Opcode = getS_CMPOpcode(Pred, Size);
1030 if (Opcode == -1)
1031 return false;
1032 MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode))
1033 .add(I.getOperand(2))
1034 .add(I.getOperand(3));
1035 BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), CCReg)
1036 .addReg(AMDGPU::SCC);
1037 bool Ret =
1038 constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI) &&
1039 RBI.constrainGenericRegister(CCReg, AMDGPU::SReg_32RegClass, *MRI);
1040 I.eraseFromParent();
1041 return Ret;
1042 }
1043
1044 int Opcode = getV_CMPOpcode(Pred, Size);
1045 if (Opcode == -1)
1046 return false;
1047
1048 MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode),
1049 I.getOperand(0).getReg())
1050 .add(I.getOperand(2))
1051 .add(I.getOperand(3));
1052 RBI.constrainGenericRegister(ICmp->getOperand(0).getReg(),
1053 *TRI.getBoolRC(), *MRI);
1054 bool Ret = constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI);
1055 I.eraseFromParent();
1056 return Ret;
1057}
1058
1059bool AMDGPUInstructionSelector::selectIntrinsicIcmp(MachineInstr &I) const {
1060 Register Dst = I.getOperand(0).getReg();
1061 if (isVCC(Dst, *MRI))
1062 return false;
1063
1064 if (MRI->getType(Dst).getSizeInBits() != STI.getWavefrontSize())
1065 return false;
1066
1067 MachineBasicBlock *BB = I.getParent();
1068 const DebugLoc &DL = I.getDebugLoc();
1069 Register SrcReg = I.getOperand(2).getReg();
1070 unsigned Size = RBI.getSizeInBits(SrcReg, *MRI, TRI);
1071 auto Pred = static_cast<CmpInst::Predicate>(I.getOperand(4).getImm());
1072
1073 int Opcode = getV_CMPOpcode(Pred, Size);
1074 if (Opcode == -1)
1075 return false;
1076
1077 MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode), Dst)
1078 .add(I.getOperand(2))
1079 .add(I.getOperand(3));
1080 RBI.constrainGenericRegister(ICmp->getOperand(0).getReg(), *TRI.getBoolRC(),
1081 *MRI);
1082 bool Ret = constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI);
1083 I.eraseFromParent();
1084 return Ret;
1085}
1086
1087bool AMDGPUInstructionSelector::selectBallot(MachineInstr &I) const {
1088 MachineBasicBlock *BB = I.getParent();
1089 const DebugLoc &DL = I.getDebugLoc();
1090 Register DstReg = I.getOperand(0).getReg();
1091 const unsigned Size = MRI->getType(DstReg).getSizeInBits();
1092 const bool Is64 = Size == 64;
1093
1094 if (Size != STI.getWavefrontSize())
1095 return false;
1096
1097 Optional<ValueAndVReg> Arg =
1098 getConstantVRegValWithLookThrough(I.getOperand(2).getReg(), *MRI, true);
1099
1100 if (Arg.hasValue()) {
1101 const int64_t Value = Arg.getValue().Value;
1102 if (Value == 0) {
1103 unsigned Opcode = Is64 ? AMDGPU::S_MOV_B64 : AMDGPU::S_MOV_B32;
1104 BuildMI(*BB, &I, DL, TII.get(Opcode), DstReg).addImm(0);
1105 } else if (Value == -1) { // all ones
1106 Register SrcReg = Is64 ? AMDGPU::EXEC : AMDGPU::EXEC_LO;
1107 BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), DstReg).addReg(SrcReg);
1108 } else
1109 return false;
1110 } else {
1111 Register SrcReg = I.getOperand(2).getReg();
1112 BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), DstReg).addReg(SrcReg);
1113 }
1114
1115 I.eraseFromParent();
1116 return true;
1117}
1118
1119bool AMDGPUInstructionSelector::selectRelocConstant(MachineInstr &I) const {
1120 Register DstReg = I.getOperand(0).getReg();
1121 const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
1122 const TargetRegisterClass *DstRC =
1123 TRI.getRegClassForSizeOnBank(32, *DstBank, *MRI);
1124 if (!DstRC || !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI))
1125 return false;
1126
1127 const bool IsVALU = DstBank->getID() == AMDGPU::VGPRRegBankID;
1128
1129 Module *M = MF->getFunction().getParent();
1130 const MDNode *Metadata = I.getOperand(2).getMetadata();
1131 auto SymbolName = cast<MDString>(Metadata->getOperand(0))->getString();
1132 auto RelocSymbol = cast<GlobalVariable>(
1133 M->getOrInsertGlobal(SymbolName, Type::getInt32Ty(M->getContext())));
1134
1135 MachineBasicBlock *BB = I.getParent();
1136 BuildMI(*BB, &I, I.getDebugLoc(),
1137 TII.get(IsVALU ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32), DstReg)
1138 .addGlobalAddress(RelocSymbol, 0, SIInstrInfo::MO_ABS32_LO);
1139
1140 I.eraseFromParent();
1141 return true;
1142}
1143
1144bool AMDGPUInstructionSelector::selectGroupStaticSize(MachineInstr &I) const {
1145 Triple::OSType OS = MF->getTarget().getTargetTriple().getOS();
1146
1147 Register DstReg = I.getOperand(0).getReg();
1148 const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
1149 unsigned Mov = DstRB->getID() == AMDGPU::SGPRRegBankID ?
1150 AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
1151
1152 MachineBasicBlock *MBB = I.getParent();
1153 const DebugLoc &DL = I.getDebugLoc();
1154
1155 auto MIB = BuildMI(*MBB, &I, DL, TII.get(Mov), DstReg);
1156
1157 if (OS == Triple::AMDHSA || OS == Triple::AMDPAL) {
1158 const SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
1159 MIB.addImm(MFI->getLDSSize());
1160 } else {
1161 Module *M = MF->getFunction().getParent();
1162 const GlobalValue *GV
1163 = Intrinsic::getDeclaration(M, Intrinsic::amdgcn_groupstaticsize);
1164 MIB.addGlobalAddress(GV, 0, SIInstrInfo::MO_ABS32_LO);
1165 }
1166
1167 I.eraseFromParent();
1168 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
1169}
1170
1171bool AMDGPUInstructionSelector::selectReturnAddress(MachineInstr &I) const {
1172 MachineBasicBlock *MBB = I.getParent();
1173 MachineFunction &MF = *MBB->getParent();
1174 const DebugLoc &DL = I.getDebugLoc();
1175
1176 MachineOperand &Dst = I.getOperand(0);
1177 Register DstReg = Dst.getReg();
1178 unsigned Depth = I.getOperand(2).getImm();
1179
1180 const TargetRegisterClass *RC
1181 = TRI.getConstrainedRegClassForOperand(Dst, *MRI);
1182 if (!RC->hasSubClassEq(&AMDGPU::SGPR_64RegClass) ||
1183 !RBI.constrainGenericRegister(DstReg, *RC, *MRI))
1184 return false;
1185
1186 // Check for kernel and shader functions
1187 if (Depth != 0 ||
1188 MF.getInfo<SIMachineFunctionInfo>()->isEntryFunction()) {
1189 BuildMI(*MBB, &I, DL, TII.get(AMDGPU::S_MOV_B64), DstReg)
1190 .addImm(0);
1191 I.eraseFromParent();
1192 return true;
1193 }
1194
1195 MachineFrameInfo &MFI = MF.getFrameInfo();
1196 // There is a call to @llvm.returnaddress in this function
1197 MFI.setReturnAddressIsTaken(true);
1198
1199 // Get the return address reg and mark it as an implicit live-in
1200 Register ReturnAddrReg = TRI.getReturnAddressReg(MF);
1201 Register LiveIn = getFunctionLiveInPhysReg(MF, TII, ReturnAddrReg,
1202 AMDGPU::SReg_64RegClass);
1203 BuildMI(*MBB, &I, DL, TII.get(AMDGPU::COPY), DstReg)
1204 .addReg(LiveIn);
1205 I.eraseFromParent();
1206 return true;
1207}
1208
1209bool AMDGPUInstructionSelector::selectEndCfIntrinsic(MachineInstr &MI) const {
1210 // FIXME: Manually selecting to avoid dealiing with the SReg_1 trick
1211 // SelectionDAG uses for wave32 vs wave64.
1212 MachineBasicBlock *BB = MI.getParent();
1213 BuildMI(*BB, &MI, MI.getDebugLoc(), TII.get(AMDGPU::SI_END_CF))
1214 .add(MI.getOperand(1));
1215
1216 Register Reg = MI.getOperand(1).getReg();
1217 MI.eraseFromParent();
1218
1219 if (!MRI->getRegClassOrNull(Reg))
1220 MRI->setRegClass(Reg, TRI.getWaveMaskRegClass());
1221 return true;
1222}
1223
1224bool AMDGPUInstructionSelector::selectDSOrderedIntrinsic(
1225 MachineInstr &MI, Intrinsic::ID IntrID) const {
1226 MachineBasicBlock *MBB = MI.getParent();
1227 MachineFunction *MF = MBB->getParent();
1228 const DebugLoc &DL = MI.getDebugLoc();
1229
1230 unsigned IndexOperand = MI.getOperand(7).getImm();
1231 bool WaveRelease = MI.getOperand(8).getImm() != 0;
1232 bool WaveDone = MI.getOperand(9).getImm() != 0;
1233
1234 if (WaveDone && !WaveRelease)
1235 report_fatal_error("ds_ordered_count: wave_done requires wave_release");
1236
1237 unsigned OrderedCountIndex = IndexOperand & 0x3f;
1238 IndexOperand &= ~0x3f;
1239 unsigned CountDw = 0;
1240
1241 if (STI.getGeneration() >= AMDGPUSubtarget::GFX10) {
1242 CountDw = (IndexOperand >> 24) & 0xf;
1243 IndexOperand &= ~(0xf << 24);
1244
1245 if (CountDw < 1 || CountDw > 4) {
1246 report_fatal_error(
1247 "ds_ordered_count: dword count must be between 1 and 4");
1248 }
1249 }
1250
1251 if (IndexOperand)
1252 report_fatal_error("ds_ordered_count: bad index operand");
1253
1254 unsigned Instruction = IntrID == Intrinsic::amdgcn_ds_ordered_add ? 0 : 1;
1255 unsigned ShaderType = SIInstrInfo::getDSShaderTypeValue(*MF);
1256
1257 unsigned Offset0 = OrderedCountIndex << 2;
1258 unsigned Offset1 = WaveRelease | (WaveDone << 1) | (ShaderType << 2) |
1259 (Instruction << 4);
1260
1261 if (STI.getGeneration() >= AMDGPUSubtarget::GFX10)
1262 Offset1 |= (CountDw - 1) << 6;
1263
1264 unsigned Offset = Offset0 | (Offset1 << 8);
1265
1266 Register M0Val = MI.getOperand(2).getReg();
1267 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
1268 .addReg(M0Val);
1269
1270 Register DstReg = MI.getOperand(0).getReg();
1271 Register ValReg = MI.getOperand(3).getReg();
1272 MachineInstrBuilder DS =
1273 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::DS_ORDERED_COUNT), DstReg)
1274 .addReg(ValReg)
1275 .addImm(Offset)
1276 .cloneMemRefs(MI);
1277
1278 if (!RBI.constrainGenericRegister(M0Val, AMDGPU::SReg_32RegClass, *MRI))
1279 return false;
1280
1281 bool Ret = constrainSelectedInstRegOperands(*DS, TII, TRI, RBI);
1282 MI.eraseFromParent();
1283 return Ret;
1284}
1285
1286static unsigned gwsIntrinToOpcode(unsigned IntrID) {
1287 switch (IntrID) {
1288 case Intrinsic::amdgcn_ds_gws_init:
1289 return AMDGPU::DS_GWS_INIT;
1290 case Intrinsic::amdgcn_ds_gws_barrier:
1291 return AMDGPU::DS_GWS_BARRIER;
1292 case Intrinsic::amdgcn_ds_gws_sema_v:
1293 return AMDGPU::DS_GWS_SEMA_V;
1294 case Intrinsic::amdgcn_ds_gws_sema_br:
1295 return AMDGPU::DS_GWS_SEMA_BR;
1296 case Intrinsic::amdgcn_ds_gws_sema_p:
1297 return AMDGPU::DS_GWS_SEMA_P;
1298 case Intrinsic::amdgcn_ds_gws_sema_release_all:
1299 return AMDGPU::DS_GWS_SEMA_RELEASE_ALL;
1300 default:
1301 llvm_unreachable("not a gws intrinsic")::llvm::llvm_unreachable_internal("not a gws intrinsic", "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp"
, 1301)
;
1302 }
1303}
1304
1305bool AMDGPUInstructionSelector::selectDSGWSIntrinsic(MachineInstr &MI,
1306 Intrinsic::ID IID) const {
1307 if (IID == Intrinsic::amdgcn_ds_gws_sema_release_all &&
1308 !STI.hasGWSSemaReleaseAll())
1309 return false;
1310
1311 // intrinsic ID, vsrc, offset
1312 const bool HasVSrc = MI.getNumOperands() == 3;
1313 assert(HasVSrc || MI.getNumOperands() == 2)((HasVSrc || MI.getNumOperands() == 2) ? static_cast<void>
(0) : __assert_fail ("HasVSrc || MI.getNumOperands() == 2", "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp"
, 1313, __PRETTY_FUNCTION__))
;
1314
1315 Register BaseOffset = MI.getOperand(HasVSrc ? 2 : 1).getReg();
1316 const RegisterBank *OffsetRB = RBI.getRegBank(BaseOffset, *MRI, TRI);
1317 if (OffsetRB->getID() != AMDGPU::SGPRRegBankID)
1318 return false;
1319
1320 MachineInstr *OffsetDef = getDefIgnoringCopies(BaseOffset, *MRI);
1321 assert(OffsetDef)((OffsetDef) ? static_cast<void> (0) : __assert_fail ("OffsetDef"
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp"
, 1321, __PRETTY_FUNCTION__))
;
1322
1323 unsigned ImmOffset;
1324
1325 MachineBasicBlock *MBB = MI.getParent();
1326 const DebugLoc &DL = MI.getDebugLoc();
1327
1328 MachineInstr *Readfirstlane = nullptr;
1329
1330 // If we legalized the VGPR input, strip out the readfirstlane to analyze the
1331 // incoming offset, in case there's an add of a constant. We'll have to put it
1332 // back later.
1333 if (OffsetDef->getOpcode() == AMDGPU::V_READFIRSTLANE_B32) {
1334 Readfirstlane = OffsetDef;
1335 BaseOffset = OffsetDef->getOperand(1).getReg();
1336 OffsetDef = getDefIgnoringCopies(BaseOffset, *MRI);
1337 }
1338
1339 if (OffsetDef->getOpcode() == AMDGPU::G_CONSTANT) {
1340 // If we have a constant offset, try to use the 0 in m0 as the base.
1341 // TODO: Look into changing the default m0 initialization value. If the
1342 // default -1 only set the low 16-bits, we could leave it as-is and add 1 to
1343 // the immediate offset.
1344
1345 ImmOffset = OffsetDef->getOperand(1).getCImm()->getZExtValue();
1346 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), AMDGPU::M0)
1347 .addImm(0);
1348 } else {
1349 std::tie(BaseOffset, ImmOffset, OffsetDef)
1350 = AMDGPU::getBaseWithConstantOffset(*MRI, BaseOffset);
1351
1352 if (Readfirstlane) {
1353 // We have the constant offset now, so put the readfirstlane back on the
1354 // variable component.
1355 if (!RBI.constrainGenericRegister(BaseOffset, AMDGPU::VGPR_32RegClass, *MRI))
1356 return false;
1357
1358 Readfirstlane->getOperand(1).setReg(BaseOffset);
1359 BaseOffset = Readfirstlane->getOperand(0).getReg();
1360 } else {
1361 if (!RBI.constrainGenericRegister(BaseOffset,
1362 AMDGPU::SReg_32RegClass, *MRI))
1363 return false;
1364 }
1365
1366 Register M0Base = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
1367 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::S_LSHL_B32), M0Base)
1368 .addReg(BaseOffset)
1369 .addImm(16);
1370
1371 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
1372 .addReg(M0Base);
1373 }
1374
1375 // The resource id offset is computed as (<isa opaque base> + M0[21:16] +
1376 // offset field) % 64. Some versions of the programming guide omit the m0
1377 // part, or claim it's from offset 0.
1378 auto MIB = BuildMI(*MBB, &MI, DL, TII.get(gwsIntrinToOpcode(IID)));
1379
1380 if (HasVSrc) {
1381 Register VSrc = MI.getOperand(1).getReg();
1382 MIB.addReg(VSrc);
1383 if (!RBI.constrainGenericRegister(VSrc, AMDGPU::VGPR_32RegClass, *MRI))
1384 return false;
1385 }
1386
1387 MIB.addImm(ImmOffset)
1388 .addImm(-1) // $gds
1389 .cloneMemRefs(MI);
1390
1391 MI.eraseFromParent();
1392 return true;
1393}
1394
1395bool AMDGPUInstructionSelector::selectDSAppendConsume(MachineInstr &MI,
1396 bool IsAppend) const {
1397 Register PtrBase = MI.getOperand(2).getReg();
1398 LLT PtrTy = MRI->getType(PtrBase);
1399 bool IsGDS = PtrTy.getAddressSpace() == AMDGPUAS::REGION_ADDRESS;
1400
1401 unsigned Offset;
1402 std::tie(PtrBase, Offset) = selectDS1Addr1OffsetImpl(MI.getOperand(2));
1403
1404 // TODO: Should this try to look through readfirstlane like GWS?
1405 if (!isDSOffsetLegal(PtrBase, Offset, 16)) {
1406 PtrBase = MI.getOperand(2).getReg();
1407 Offset = 0;
1408 }
1409
1410 MachineBasicBlock *MBB = MI.getParent();
1411 const DebugLoc &DL = MI.getDebugLoc();
1412 const unsigned Opc = IsAppend ? AMDGPU::DS_APPEND : AMDGPU::DS_CONSUME;
1413
1414 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
1415 .addReg(PtrBase);
1416 if (!RBI.constrainGenericRegister(PtrBase, AMDGPU::SReg_32RegClass, *MRI))
1417 return false;
1418
1419 auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc), MI.getOperand(0).getReg())
1420 .addImm(Offset)
1421 .addImm(IsGDS ? -1 : 0)
1422 .cloneMemRefs(MI);
1423 MI.eraseFromParent();
1424 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
1425}
1426
1427bool AMDGPUInstructionSelector::selectSBarrier(MachineInstr &MI) const {
1428 if (TM.getOptLevel() > CodeGenOpt::None) {
1429 unsigned WGSize = STI.getFlatWorkGroupSizes(MF->getFunction()).second;
1430 if (WGSize <= STI.getWavefrontSize()) {
1431 MachineBasicBlock *MBB = MI.getParent();
1432 const DebugLoc &DL = MI.getDebugLoc();
1433 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::WAVE_BARRIER));
1434 MI.eraseFromParent();
1435 return true;
1436 }
1437 }
1438 return selectImpl(MI, *CoverageInfo);
1439}
1440
1441static bool parseTexFail(uint64_t TexFailCtrl, bool &TFE, bool &LWE,
1442 bool &IsTexFail) {
1443 if (TexFailCtrl)
1444 IsTexFail = true;
1445
1446 TFE = (TexFailCtrl & 0x1) ? 1 : 0;
1447 TexFailCtrl &= ~(uint64_t)0x1;
1448 LWE = (TexFailCtrl & 0x2) ? 1 : 0;
1449 TexFailCtrl &= ~(uint64_t)0x2;
1450
1451 return TexFailCtrl == 0;
1452}
1453
1454static bool parseCachePolicy(uint64_t Value,
1455 bool *GLC, bool *SLC, bool *DLC) {
1456 if (GLC) {
1457 *GLC = (Value & 0x1) ? 1 : 0;
1458 Value &= ~(uint64_t)0x1;
1459 }
1460 if (SLC) {
1461 *SLC = (Value & 0x2) ? 1 : 0;
1462 Value &= ~(uint64_t)0x2;
1463 }
1464 if (DLC) {
1465 *DLC = (Value & 0x4) ? 1 : 0;
1466 Value &= ~(uint64_t)0x4;
1467 }
1468
1469 return Value == 0;
1470}
1471
1472bool AMDGPUInstructionSelector::selectImageIntrinsic(
1473 MachineInstr &MI, const AMDGPU::ImageDimIntrinsicInfo *Intr) const {
1474 MachineBasicBlock *MBB = MI.getParent();
1475 const DebugLoc &DL = MI.getDebugLoc();
1476
1477 const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode =
1478 AMDGPU::getMIMGBaseOpcodeInfo(Intr->BaseOpcode);
1479
1480 const AMDGPU::MIMGDimInfo *DimInfo = AMDGPU::getMIMGDimInfo(Intr->Dim);
1481 const AMDGPU::MIMGLZMappingInfo *LZMappingInfo =
1482 AMDGPU::getMIMGLZMappingInfo(Intr->BaseOpcode);
1483 const AMDGPU::MIMGMIPMappingInfo *MIPMappingInfo =
1484 AMDGPU::getMIMGMIPMappingInfo(Intr->BaseOpcode);
1485 unsigned IntrOpcode = Intr->BaseOpcode;
1486 const bool IsGFX10 = STI.getGeneration() >= AMDGPUSubtarget::GFX10;
1487
1488 const unsigned ArgOffset = MI.getNumExplicitDefs() + 1;
1489
1490 Register VDataIn, VDataOut;
1491 LLT VDataTy;
1492 int NumVDataDwords = -1;
1493 bool IsD16 = false;
1494
1495 bool Unorm;
1496 if (!BaseOpcode->Sampler)
1497 Unorm = true;
1498 else
1499 Unorm = MI.getOperand(ArgOffset + Intr->UnormIndex).getImm() != 0;
1500
1501 bool TFE;
1502 bool LWE;
1503 bool IsTexFail = false;
1504 if (!parseTexFail(MI.getOperand(ArgOffset + Intr->TexFailCtrlIndex).getImm(),
1505 TFE, LWE, IsTexFail))
1506 return false;
1507
1508 const int Flags = MI.getOperand(ArgOffset + Intr->NumArgs).getImm();
1509 const bool IsA16 = (Flags & 1) != 0;
1510 const bool IsG16 = (Flags & 2) != 0;
1511
1512 // A16 implies 16 bit gradients
1513 if (IsA16 && !IsG16)
1514 return false;
1515
1516 unsigned DMask = 0;
1517 unsigned DMaskLanes = 0;
1518
1519 if (BaseOpcode->Atomic) {
1520 VDataOut = MI.getOperand(0).getReg();
1521 VDataIn = MI.getOperand(2).getReg();
1522 LLT Ty = MRI->getType(VDataIn);
1523
1524 // Be careful to allow atomic swap on 16-bit element vectors.
1525 const bool Is64Bit = BaseOpcode->AtomicX2 ?
1526 Ty.getSizeInBits() == 128 :
1527 Ty.getSizeInBits() == 64;
1528
1529 if (BaseOpcode->AtomicX2) {
1530 assert(MI.getOperand(3).getReg() == AMDGPU::NoRegister)((MI.getOperand(3).getReg() == AMDGPU::NoRegister) ? static_cast
<void> (0) : __assert_fail ("MI.getOperand(3).getReg() == AMDGPU::NoRegister"
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp"
, 1530, __PRETTY_FUNCTION__))
;
1531
1532 DMask = Is64Bit ? 0xf : 0x3;
1533 NumVDataDwords = Is64Bit ? 4 : 2;
1534 } else {
1535 DMask = Is64Bit ? 0x3 : 0x1;
1536 NumVDataDwords = Is64Bit ? 2 : 1;
1537 }
1538 } else {
1539 DMask = MI.getOperand(ArgOffset + Intr->DMaskIndex).getImm();
1540 DMaskLanes = BaseOpcode->Gather4 ? 4 : countPopulation(DMask);
1541
1542 // One memoperand is mandatory, except for getresinfo.
1543 // FIXME: Check this in verifier.
1544 if (!MI.memoperands_empty()) {
1545 const MachineMemOperand *MMO = *MI.memoperands_begin();
1546
1547 // Infer d16 from the memory size, as the register type will be mangled by
1548 // unpacked subtargets, or by TFE.
1549 IsD16 = ((8 * MMO->getSize()) / DMaskLanes) < 32;
1550 }
1551
1552 if (BaseOpcode->Store) {
1553 VDataIn = MI.getOperand(1).getReg();
1554 VDataTy = MRI->getType(VDataIn);
1555 NumVDataDwords = (VDataTy.getSizeInBits() + 31) / 32;
1556 } else {
1557 VDataOut = MI.getOperand(0).getReg();
1558 VDataTy = MRI->getType(VDataOut);
1559 NumVDataDwords = DMaskLanes;
1560
1561 if (IsD16 && !STI.hasUnpackedD16VMem())
1562 NumVDataDwords = (DMaskLanes + 1) / 2;
1563 }
1564 }
1565
1566 // Optimize _L to _LZ when _L is zero
1567 if (LZMappingInfo) {
1568 // The legalizer replaced the register with an immediate 0 if we need to
1569 // change the opcode.
1570 const MachineOperand &Lod = MI.getOperand(ArgOffset + Intr->LodIndex);
1571 if (Lod.isImm()) {
1572 assert(Lod.getImm() == 0)((Lod.getImm() == 0) ? static_cast<void> (0) : __assert_fail
("Lod.getImm() == 0", "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp"
, 1572, __PRETTY_FUNCTION__))
;
1573 IntrOpcode = LZMappingInfo->LZ; // set new opcode to _lz variant of _l
1574 }
1575 }
1576
1577 // Optimize _mip away, when 'lod' is zero
1578 if (MIPMappingInfo) {
1579 const MachineOperand &Lod = MI.getOperand(ArgOffset + Intr->MipIndex);
1580 if (Lod.isImm()) {
1581 assert(Lod.getImm() == 0)((Lod.getImm() == 0) ? static_cast<void> (0) : __assert_fail
("Lod.getImm() == 0", "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp"
, 1581, __PRETTY_FUNCTION__))
;
1582 IntrOpcode = MIPMappingInfo->NONMIP; // set new opcode to variant without _mip
1583 }
1584 }
1585
1586 // Set G16 opcode
1587 if (IsG16 && !IsA16) {
1588 const AMDGPU::MIMGG16MappingInfo *G16MappingInfo =
1589 AMDGPU::getMIMGG16MappingInfo(Intr->BaseOpcode);
1590 assert(G16MappingInfo)((G16MappingInfo) ? static_cast<void> (0) : __assert_fail
("G16MappingInfo", "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp"
, 1590, __PRETTY_FUNCTION__))
;
1591 IntrOpcode = G16MappingInfo->G16; // set opcode to variant with _g16
1592 }
1593
1594 // TODO: Check this in verifier.
1595 assert((!IsTexFail || DMaskLanes >= 1) && "should have legalized this")(((!IsTexFail || DMaskLanes >= 1) && "should have legalized this"
) ? static_cast<void> (0) : __assert_fail ("(!IsTexFail || DMaskLanes >= 1) && \"should have legalized this\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp"
, 1595, __PRETTY_FUNCTION__))
;
1596
1597 bool GLC = false;
1598 bool SLC = false;
1599 bool DLC = false;
1600 if (BaseOpcode->Atomic) {
1601 GLC = true; // TODO no-return optimization
1602 if (!parseCachePolicy(
1603 MI.getOperand(ArgOffset + Intr->CachePolicyIndex).getImm(), nullptr,
1604 &SLC, IsGFX10 ? &DLC : nullptr))
1605 return false;
1606 } else {
1607 if (!parseCachePolicy(
1608 MI.getOperand(ArgOffset + Intr->CachePolicyIndex).getImm(), &GLC,
1609 &SLC, IsGFX10 ? &DLC : nullptr))
1610 return false;
1611 }
1612
1613 int NumVAddrRegs = 0;
1614 int NumVAddrDwords = 0;
1615 for (unsigned I = Intr->VAddrStart; I < Intr->VAddrEnd; I++) {
1616 // Skip the $noregs and 0s inserted during legalization.
1617 MachineOperand &AddrOp = MI.getOperand(ArgOffset + I);
1618 if (!AddrOp.isReg())
1619 continue; // XXX - Break?
1620
1621 Register Addr = AddrOp.getReg();
1622 if (!Addr)
1623 break;
1624
1625 ++NumVAddrRegs;
1626 NumVAddrDwords += (MRI->getType(Addr).getSizeInBits() + 31) / 32;
1627 }
1628
1629 // The legalizer preprocessed the intrinsic arguments. If we aren't using
1630 // NSA, these should have beeen packed into a single value in the first
1631 // address register
1632 const bool UseNSA = NumVAddrRegs != 1 && NumVAddrDwords == NumVAddrRegs;
1633 if (UseNSA && !STI.hasFeature(AMDGPU::FeatureNSAEncoding)) {
1634 LLVM_DEBUG(dbgs() << "Trying to use NSA on non-NSA target\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-isel")) { dbgs() << "Trying to use NSA on non-NSA target\n"
; } } while (false)
;
1635 return false;
1636 }
1637
1638 if (IsTexFail)
1639 ++NumVDataDwords;
1640
1641 int Opcode = -1;
1642 if (IsGFX10) {
1643 Opcode = AMDGPU::getMIMGOpcode(IntrOpcode,
1644 UseNSA ? AMDGPU::MIMGEncGfx10NSA
1645 : AMDGPU::MIMGEncGfx10Default,
1646 NumVDataDwords, NumVAddrDwords);
1647 } else {
1648 if (STI.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS)
1649 Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx8,
1650 NumVDataDwords, NumVAddrDwords);
1651 if (Opcode == -1)
1652 Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx6,
1653 NumVDataDwords, NumVAddrDwords);
1654 }
1655 assert(Opcode != -1)((Opcode != -1) ? static_cast<void> (0) : __assert_fail
("Opcode != -1", "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp"
, 1655, __PRETTY_FUNCTION__))
;
1656
1657 auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opcode))
1658 .cloneMemRefs(MI);
1659
1660 if (VDataOut) {
1661 if (BaseOpcode->AtomicX2) {
1662 const bool Is64 = MRI->getType(VDataOut).getSizeInBits() == 64;
1663
1664 Register TmpReg = MRI->createVirtualRegister(
1665 Is64 ? &AMDGPU::VReg_128RegClass : &AMDGPU::VReg_64RegClass);
1666 unsigned SubReg = Is64 ? AMDGPU::sub0_sub1 : AMDGPU::sub0;
1667
1668 MIB.addDef(TmpReg);
1669 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), VDataOut)
1670 .addReg(TmpReg, RegState::Kill, SubReg);
1671
1672 } else {
1673 MIB.addDef(VDataOut); // vdata output
1674 }
1675 }
1676
1677 if (VDataIn)
1678 MIB.addReg(VDataIn); // vdata input
1679
1680 for (int I = 0; I != NumVAddrRegs; ++I) {
1681 MachineOperand &SrcOp = MI.getOperand(ArgOffset + Intr->VAddrStart + I);
1682 if (SrcOp.isReg()) {
1683 assert(SrcOp.getReg() != 0)((SrcOp.getReg() != 0) ? static_cast<void> (0) : __assert_fail
("SrcOp.getReg() != 0", "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp"
, 1683, __PRETTY_FUNCTION__))
;
1684 MIB.addReg(SrcOp.getReg());
1685 }
1686 }
1687
1688 MIB.addReg(MI.getOperand(ArgOffset + Intr->RsrcIndex).getReg());
1689 if (BaseOpcode->Sampler)
1690 MIB.addReg(MI.getOperand(ArgOffset + Intr->SampIndex).getReg());
1691
1692 MIB.addImm(DMask); // dmask
1693
1694 if (IsGFX10)
1695 MIB.addImm(DimInfo->Encoding);
1696 MIB.addImm(Unorm);
1697 if (IsGFX10)
1698 MIB.addImm(DLC);
1699
1700 MIB.addImm(GLC);
1701 MIB.addImm(SLC);
1702 MIB.addImm(IsA16 && // a16 or r128
1703 STI.hasFeature(AMDGPU::FeatureR128A16) ? -1 : 0);
1704 if (IsGFX10)
1705 MIB.addImm(IsA16 ? -1 : 0);
1706
1707 MIB.addImm(TFE); // tfe
1708 MIB.addImm(LWE); // lwe
1709 if (!IsGFX10)
1710 MIB.addImm(DimInfo->DA ? -1 : 0);
1711 if (BaseOpcode->HasD16)
1712 MIB.addImm(IsD16 ? -1 : 0);
1713
1714 MI.eraseFromParent();
1715 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
1716}
1717
1718bool AMDGPUInstructionSelector::selectG_INTRINSIC_W_SIDE_EFFECTS(
1719 MachineInstr &I) const {
1720 unsigned IntrinsicID = I.getIntrinsicID();
1721 switch (IntrinsicID) {
1722 case Intrinsic::amdgcn_end_cf:
1723 return selectEndCfIntrinsic(I);
1724 case Intrinsic::amdgcn_ds_ordered_add:
1725 case Intrinsic::amdgcn_ds_ordered_swap:
1726 return selectDSOrderedIntrinsic(I, IntrinsicID);
1727 case Intrinsic::amdgcn_ds_gws_init:
1728 case Intrinsic::amdgcn_ds_gws_barrier:
1729 case Intrinsic::amdgcn_ds_gws_sema_v:
1730 case Intrinsic::amdgcn_ds_gws_sema_br:
1731 case Intrinsic::amdgcn_ds_gws_sema_p:
1732 case Intrinsic::amdgcn_ds_gws_sema_release_all:
1733 return selectDSGWSIntrinsic(I, IntrinsicID);
1734 case Intrinsic::amdgcn_ds_append:
1735 return selectDSAppendConsume(I, true);
1736 case Intrinsic::amdgcn_ds_consume:
1737 return selectDSAppendConsume(I, false);
1738 case Intrinsic::amdgcn_s_barrier:
1739 return selectSBarrier(I);
1740 case Intrinsic::amdgcn_global_atomic_fadd:
1741 return selectGlobalAtomicFaddIntrinsic(I);
1742 default: {
1743 return selectImpl(I, *CoverageInfo);
1744 }
1745 }
1746}
1747
1748bool AMDGPUInstructionSelector::selectG_SELECT(MachineInstr &I) const {
1749 if (selectImpl(I, *CoverageInfo))
1750 return true;
1751
1752 MachineBasicBlock *BB = I.getParent();
1753 const DebugLoc &DL = I.getDebugLoc();
1754
1755 Register DstReg = I.getOperand(0).getReg();
1756 unsigned Size = RBI.getSizeInBits(DstReg, *MRI, TRI);
1757 assert(Size <= 32 || Size == 64)((Size <= 32 || Size == 64) ? static_cast<void> (0) :
__assert_fail ("Size <= 32 || Size == 64", "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp"
, 1757, __PRETTY_FUNCTION__))
;
1758 const MachineOperand &CCOp = I.getOperand(1);
1759 Register CCReg = CCOp.getReg();
1760 if (!isVCC(CCReg, *MRI)) {
1761 unsigned SelectOpcode = Size == 64 ? AMDGPU::S_CSELECT_B64 :
1762 AMDGPU::S_CSELECT_B32;
1763 MachineInstr *CopySCC = BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), AMDGPU::SCC)
1764 .addReg(CCReg);
1765
1766 // The generic constrainSelectedInstRegOperands doesn't work for the scc register
1767 // bank, because it does not cover the register class that we used to represent
1768 // for it. So we need to manually set the register class here.
1769 if (!MRI->getRegClassOrNull(CCReg))
1770 MRI->setRegClass(CCReg, TRI.getConstrainedRegClassForOperand(CCOp, *MRI));
1771 MachineInstr *Select = BuildMI(*BB, &I, DL, TII.get(SelectOpcode), DstReg)
1772 .add(I.getOperand(2))
1773 .add(I.getOperand(3));
1774
1775 bool Ret = constrainSelectedInstRegOperands(*Select, TII, TRI, RBI) |
1776 constrainSelectedInstRegOperands(*CopySCC, TII, TRI, RBI);
1777 I.eraseFromParent();
1778 return Ret;
1779 }
1780
1781 // Wide VGPR select should have been split in RegBankSelect.
1782 if (Size > 32)
1783 return false;
1784
1785 MachineInstr *Select =
1786 BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
1787 .addImm(0)
1788 .add(I.getOperand(3))
1789 .addImm(0)
1790 .add(I.getOperand(2))
1791 .add(I.getOperand(1));
1792
1793 bool Ret = constrainSelectedInstRegOperands(*Select, TII, TRI, RBI);
1794 I.eraseFromParent();
1795 return Ret;
1796}
1797
1798static int sizeToSubRegIndex(unsigned Size) {
1799 switch (Size) {
1800 case 32:
1801 return AMDGPU::sub0;
1802 case 64:
1803 return AMDGPU::sub0_sub1;
1804 case 96:
1805 return AMDGPU::sub0_sub1_sub2;
1806 case 128:
1807 return AMDGPU::sub0_sub1_sub2_sub3;
1808 case 256:
1809 return AMDGPU::sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7;
1810 default:
1811 if (Size < 32)
1812 return AMDGPU::sub0;
1813 if (Size > 256)
1814 return -1;
1815 return sizeToSubRegIndex(PowerOf2Ceil(Size));
1816 }
1817}
1818
1819bool AMDGPUInstructionSelector::selectG_TRUNC(MachineInstr &I) const {
1820 Register DstReg = I.getOperand(0).getReg();
1821 Register SrcReg = I.getOperand(1).getReg();
1822 const LLT DstTy = MRI->getType(DstReg);
1823 const LLT SrcTy = MRI->getType(SrcReg);
1824 const LLT S1 = LLT::scalar(1);
1825
1826 const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI);
1827 const RegisterBank *DstRB;
1828 if (DstTy == S1) {
1829 // This is a special case. We don't treat s1 for legalization artifacts as
1830 // vcc booleans.
1831 DstRB = SrcRB;
1832 } else {
1833 DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
1834 if (SrcRB != DstRB)
1835 return false;
1836 }
1837
1838 const bool IsVALU = DstRB->getID() == AMDGPU::VGPRRegBankID;
1839
1840 unsigned DstSize = DstTy.getSizeInBits();
1841 unsigned SrcSize = SrcTy.getSizeInBits();
1842
1843 const TargetRegisterClass *SrcRC
1844 = TRI.getRegClassForSizeOnBank(SrcSize, *SrcRB, *MRI);
1845 const TargetRegisterClass *DstRC
1846 = TRI.getRegClassForSizeOnBank(DstSize, *DstRB, *MRI);
1847 if (!SrcRC || !DstRC)
1848 return false;
1849
1850 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) ||
1851 !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI)) {
1852 LLVM_DEBUG(dbgs() << "Failed to constrain G_TRUNC\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("amdgpu-isel")) { dbgs() << "Failed to constrain G_TRUNC\n"
; } } while (false)
;
1853 return false;
1854 }
1855
1856 if (DstTy == LLT::vector(2, 16) && SrcTy == LLT::vector(2, 32)) {
1857 MachineBasicBlock *MBB = I.getParent();
1858 const DebugLoc &DL = I.getDebugLoc();
1859
1860 Register LoReg = MRI->createVirtualRegister(DstRC);
1861 Register HiReg = MRI->createVirtualRegister(DstRC);
1862 BuildMI(*MBB, I, DL, TII.get(AMDGPU::COPY), LoReg)
1863 .addReg(SrcReg, 0, AMDGPU::sub0);
1864 BuildMI(*MBB, I, DL, TII.get(AMDGPU::COPY), HiReg)
1865 .addReg(SrcReg, 0, AMDGPU::sub1);
1866
1867 if (IsVALU && STI.hasSDWA()) {
1868 // Write the low 16-bits of the high element into the high 16-bits of the
1869 // low element.
1870 MachineInstr *MovSDWA =
1871 BuildMI(*MBB, I, DL, TII.get(AMDGPU::V_MOV_B32_sdwa), DstReg)
1872 .addImm(0) // $src0_modifiers
1873 .addReg(HiReg) // $src0
1874 .addImm(0) // $clamp
1875 .addImm(AMDGPU::SDWA::WORD_1) // $dst_sel
1876 .addImm(AMDGPU::SDWA::UNUSED_PRESERVE) // $dst_unused
1877 .addImm(AMDGPU::SDWA::WORD_0) // $src0_sel
1878 .addReg(LoReg, RegState::Implicit);
1879 MovSDWA->tieOperands(0, MovSDWA->getNumOperands() - 1);
1880 } else {
1881 Register TmpReg0 = MRI->createVirtualRegister(DstRC);
1882 Register TmpReg1 = MRI->createVirtualRegister(DstRC);
1883 Register ImmReg = MRI->createVirtualRegister(DstRC);
1884 if (IsVALU) {
1885 BuildMI(*MBB, I, DL, TII.get(AMDGPU::V_LSHLREV_B32_e64), TmpReg0)
1886 .addImm(16)
1887 .addReg(HiReg);
1888 } else {
1889 BuildMI(*MBB, I, DL, TII.get(AMDGPU::S_LSHL_B32), TmpReg0)
1890 .addReg(HiReg)
1891 .addImm(16);
1892 }
1893
1894 unsigned MovOpc = IsVALU ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32;
1895 unsigned AndOpc = IsVALU ? AMDGPU::V_AND_B32_e64 : AMDGPU::S_AND_B32;
1896 unsigned OrOpc = IsVALU ? AMDGPU::V_OR_B32_e64 : AMDGPU::S_OR_B32;
1897
1898 BuildMI(*MBB, I, DL, TII.get(MovOpc), ImmReg)
1899 .addImm(0xffff);
1900 BuildMI(*MBB, I, DL, TII.get(AndOpc), TmpReg1)
1901 .addReg(LoReg)
1902 .addReg(ImmReg);
1903 BuildMI(*MBB, I, DL, TII.get(OrOpc), DstReg)
1904 .addReg(TmpReg0)
1905 .addReg(TmpReg1);
1906 }
1907
1908 I.eraseFromParent();
1909 return true;
1910 }
1911
1912 if (!DstTy.isScalar())
1913 return false;
1914
1915 if (SrcSize > 32) {
1916 int SubRegIdx = sizeToSubRegIndex(DstSize);
1917 if (SubRegIdx == -1)
1918 return false;
1919
1920 // Deal with weird cases where the class only partially supports the subreg
1921 // index.
1922 const TargetRegisterClass *SrcWithSubRC
1923 = TRI.getSubClassWithSubReg(SrcRC, SubRegIdx);
1924 if (!SrcWithSubRC)
1925 return false;
1926
1927 if (SrcWithSubRC != SrcRC) {
1928 if (!RBI.constrainGenericRegister(SrcReg, *SrcWithSubRC, *MRI))
1929 return false;
1930 }
1931
1932 I.getOperand(1).setSubReg(SubRegIdx);
1933 }
1934
1935 I.setDesc(TII.get(TargetOpcode::COPY));
1936 return true;
1937}
1938
1939/// \returns true if a bitmask for \p Size bits will be an inline immediate.
1940static bool shouldUseAndMask(unsigned Size, unsigned &Mask) {
1941 Mask = maskTrailingOnes<unsigned>(Size);
1942 int SignedMask = static_cast<int>(Mask);
1943 return SignedMask >= -16 && SignedMask <= 64;
1944}
1945
1946// Like RegisterBankInfo::getRegBank, but don't assume vcc for s1.
1947const RegisterBank *AMDGPUInstructionSelector::getArtifactRegBank(
1948 Register Reg, const MachineRegisterInfo &MRI,
1949 const TargetRegisterInfo &TRI) const {
1950 const RegClassOrRegBank &RegClassOrBank = MRI.getRegClassOrRegBank(Reg);
1951 if (auto *RB = RegClassOrBank.dyn_cast<const RegisterBank *>())
1952 return RB;
1953
1954 // Ignore the type, since we don't use vcc in artifacts.
1955 if (auto *RC = RegClassOrBank.dyn_cast<const TargetRegisterClass *>())
1956 return &RBI.getRegBankFromRegClass(*RC, LLT());
1957 return nullptr;
1958}
1959
1960bool AMDGPUInstructionSelector::selectG_SZA_EXT(MachineInstr &I) const {
1961 bool InReg = I.getOpcode() == AMDGPU::G_SEXT_INREG;
1962 bool Signed = I.getOpcode() == AMDGPU::G_SEXT || InReg;
1963 const DebugLoc &DL = I.getDebugLoc();
1964 MachineBasicBlock &MBB = *I.getParent();
1965 const Register DstReg = I.getOperand(0).getReg();
1966 const Register SrcReg = I.getOperand(1).getReg();
1967
1968 const LLT DstTy = MRI->getType(DstReg);
1969 const LLT SrcTy = MRI->getType(SrcReg);
1970 const unsigned SrcSize = I.getOpcode() == AMDGPU::G_SEXT_INREG ?
1971 I.getOperand(2).getImm() : SrcTy.getSizeInBits();
1972 const unsigned DstSize = DstTy.getSizeInBits();
1973 if (!DstTy.isScalar())
1974 return false;
1975
1976 // Artifact casts should never use vcc.
1977 const RegisterBank *SrcBank = getArtifactRegBank(SrcReg, *MRI, TRI);
1978
1979 // FIXME: This should probably be illegal and split earlier.
1980 if (I.getOpcode() == AMDGPU::G_ANYEXT) {
1981 if (DstSize <= 32)
1982 return selectCOPY(I);
1983
1984 const TargetRegisterClass *SrcRC =
1985 TRI.getRegClassForTypeOnBank(SrcTy, *SrcBank, *MRI);
1986 const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
1987 const TargetRegisterClass *DstRC =
1988 TRI.getRegClassForSizeOnBank(DstSize, *DstBank, *MRI);
1989
1990 Register UndefReg = MRI->createVirtualRegister(SrcRC);
1991 BuildMI(MBB, I, DL, TII.get(AMDGPU::IMPLICIT_DEF), UndefReg);
1992 BuildMI(MBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
1993 .addReg(SrcReg)
1994 .addImm(AMDGPU::sub0)
1995 .addReg(UndefReg)
1996 .addImm(AMDGPU::sub1);
1997 I.eraseFromParent();
1998
1999 return RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) &&
2000 RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI);
2001 }
2002
2003 if (SrcBank->getID() == AMDGPU::VGPRRegBankID && DstSize <= 32) {
2004 // 64-bit should have been split up in RegBankSelect
2005
2006 // Try to use an and with a mask if it will save code size.
2007 unsigned Mask;
2008 if (!Signed && shouldUseAndMask(SrcSize, Mask)) {
2009 MachineInstr *ExtI =
2010 BuildMI(MBB, I, DL, TII.get(AMDGPU::V_AND_B32_e32), DstReg)
2011 .addImm(Mask)
2012 .addReg(SrcReg);
2013 I.eraseFromParent();
2014 return constrainSelectedInstRegOperands(*ExtI, TII, TRI, RBI);
2015 }
2016
2017 const unsigned BFE = Signed ? AMDGPU::V_BFE_I32 : AMDGPU::V_BFE_U32;
2018 MachineInstr *ExtI =
2019 BuildMI(MBB, I, DL, TII.get(BFE), DstReg)
2020 .addReg(SrcReg)
2021 .addImm(0) // Offset
2022 .addImm(SrcSize); // Width
2023 I.eraseFromParent();
2024 return constrainSelectedInstRegOperands(*ExtI, TII, TRI, RBI);
2025 }
2026
2027 if (SrcBank->getID() == AMDGPU::SGPRRegBankID && DstSize <= 64) {
2028 const TargetRegisterClass &SrcRC = InReg && DstSize > 32 ?
2029 AMDGPU::SReg_64RegClass : AMDGPU::SReg_32RegClass;
2030 if (!RBI.constrainGenericRegister(SrcReg, SrcRC, *MRI))
2031 return false;
2032
2033 if (Signed && DstSize == 32 && (SrcSize == 8 || SrcSize == 16)) {
2034 const unsigned SextOpc = SrcSize == 8 ?
2035 AMDGPU::S_SEXT_I32_I8 : AMDGPU::S_SEXT_I32_I16;
2036 BuildMI(MBB, I, DL, TII.get(SextOpc), DstReg)
2037 .addReg(SrcReg);
2038 I.eraseFromParent();
2039 return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_32RegClass, *MRI);
2040 }
2041
2042 const unsigned BFE64 = Signed ? AMDGPU::S_BFE_I64 : AMDGPU::S_BFE_U64;
2043 const unsigned BFE32 = Signed ? AMDGPU::S_BFE_I32 : AMDGPU::S_BFE_U32;
2044
2045 // Scalar BFE is encoded as S1[5:0] = offset, S1[22:16]= width.
2046 if (DstSize > 32 && (SrcSize <= 32 || InReg)) {
2047 // We need a 64-bit register source, but the high bits don't matter.
2048 Register ExtReg = MRI->createVirtualRegister(&AMDGPU::SReg_64RegClass);
2049 Register UndefReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2050 unsigned SubReg = InReg ? AMDGPU::sub0 : 0;
2051
2052 BuildMI(MBB, I, DL, TII.get(AMDGPU::IMPLICIT_DEF), UndefReg);
2053 BuildMI(MBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), ExtReg)
2054 .addReg(SrcReg, 0, SubReg)
2055 .addImm(AMDGPU::sub0)
2056 .addReg(UndefReg)
2057 .addImm(AMDGPU::sub1);
2058
2059 BuildMI(MBB, I, DL, TII.get(BFE64), DstReg)
2060 .addReg(ExtReg)
2061 .addImm(SrcSize << 16);
2062
2063 I.eraseFromParent();
2064 return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_64RegClass, *MRI);
2065 }
2066
2067 unsigned Mask;
2068 if (!Signed && shouldUseAndMask(SrcSize, Mask)) {
2069 BuildMI(MBB, I, DL, TII.get(AMDGPU::S_AND_B32), DstReg)
2070 .addReg(SrcReg)
2071 .addImm(Mask);
2072 } else {
2073 BuildMI(MBB, I, DL, TII.get(BFE32), DstReg)
2074 .addReg(SrcReg)
2075 .addImm(SrcSize << 16);
2076 }
2077
2078 I.eraseFromParent();
2079 return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_32RegClass, *MRI);
2080 }
2081
2082 return false;
2083}
2084
2085bool AMDGPUInstructionSelector::selectG_CONSTANT(MachineInstr &I) const {
2086 MachineBasicBlock *BB = I.getParent();
2087 MachineOperand &ImmOp = I.getOperand(1);
2088 Register DstReg = I.getOperand(0).getReg();
2089 unsigned Size = MRI->getType(DstReg).getSizeInBits();
2090
2091 // The AMDGPU backend only supports Imm operands and not CImm or FPImm.
2092 if (ImmOp.isFPImm()) {
2093 const APInt &Imm = ImmOp.getFPImm()->getValueAPF().bitcastToAPInt();
2094 ImmOp.ChangeToImmediate(Imm.getZExtValue());
2095 } else if (ImmOp.isCImm()) {
2096 ImmOp.ChangeToImmediate(ImmOp.getCImm()->getSExtValue());
2097 } else {
2098 llvm_unreachable("Not supported by g_constants")::llvm::llvm_unreachable_internal("Not supported by g_constants"
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp"
, 2098)
;
2099 }
2100
2101 const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2102 const bool IsSgpr = DstRB->getID() == AMDGPU::SGPRRegBankID;
2103
2104 unsigned Opcode;
2105 if (DstRB->getID() == AMDGPU::VCCRegBankID) {
2106 Opcode = STI.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64;
2107 } else {
2108 Opcode = IsSgpr ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
2109
2110 // We should never produce s1 values on banks other than VCC. If the user of
2111 // this already constrained the register, we may incorrectly think it's VCC
2112 // if it wasn't originally.
2113 if (Size == 1)
2114 return false;
2115 }
2116
2117 if (Size != 64) {
2118 I.setDesc(TII.get(Opcode));
2119 I.addImplicitDefUseOperands(*MF);
2120 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
2121 }
2122
2123 const DebugLoc &DL = I.getDebugLoc();
2124
2125 APInt Imm(Size, I.getOperand(1).getImm());
2126
2127 MachineInstr *ResInst;
2128 if (IsSgpr && TII.isInlineConstant(Imm)) {
2129 ResInst = BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_MOV_B64), DstReg)
2130 .addImm(I.getOperand(1).getImm());
2131 } else {
2132 const TargetRegisterClass *RC = IsSgpr ?
2133 &AMDGPU::SReg_32RegClass : &AMDGPU::VGPR_32RegClass;
2134 Register LoReg = MRI->createVirtualRegister(RC);
2135 Register HiReg = MRI->createVirtualRegister(RC);
2136
2137 BuildMI(*BB, &I, DL, TII.get(Opcode), LoReg)
2138 .addImm(Imm.trunc(32).getZExtValue());
2139
2140 BuildMI(*BB, &I, DL, TII.get(Opcode), HiReg)
2141 .addImm(Imm.ashr(32).getZExtValue());
2142
2143 ResInst = BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
2144 .addReg(LoReg)
2145 .addImm(AMDGPU::sub0)
2146 .addReg(HiReg)
2147 .addImm(AMDGPU::sub1);
2148 }
2149
2150 // We can't call constrainSelectedInstRegOperands here, because it doesn't
2151 // work for target independent opcodes
2152 I.eraseFromParent();
2153 const TargetRegisterClass *DstRC =
2154 TRI.getConstrainedRegClassForOperand(ResInst->getOperand(0), *MRI);
2155 if (!DstRC)
2156 return true;
2157 return RBI.constrainGenericRegister(DstReg, *DstRC, *MRI);
2158}
2159
2160bool AMDGPUInstructionSelector::selectG_FNEG(MachineInstr &MI) const {
2161 // Only manually handle the f64 SGPR case.
2162 //
2163 // FIXME: This is a workaround for 2.5 different tablegen problems. Because
2164 // the bit ops theoretically have a second result due to the implicit def of
2165 // SCC, the GlobalISelEmitter is overly conservative and rejects it. Fixing
2166 // that is easy by disabling the check. The result works, but uses a
2167 // nonsensical sreg32orlds_and_sreg_1 regclass.
2168 //
2169 // The DAG emitter is more problematic, and incorrectly adds both S_XOR_B32 to
2170 // the variadic REG_SEQUENCE operands.
2171
2172 Register Dst = MI.getOperand(0).getReg();
2173 const RegisterBank *DstRB = RBI.getRegBank(Dst, *MRI, TRI);
2174 if (DstRB->getID() != AMDGPU::SGPRRegBankID ||
2175 MRI->getType(Dst) != LLT::scalar(64))
2176 return false;
2177
2178 Register Src = MI.getOperand(1).getReg();
2179 MachineInstr *Fabs = getOpcodeDef(TargetOpcode::G_FABS, Src, *MRI);
2180 if (Fabs)
2181 Src = Fabs->getOperand(1).getReg();
2182
2183 if (!RBI.constrainGenericRegister(Src, AMDGPU::SReg_64RegClass, *MRI) ||
2184 !RBI.constrainGenericRegister(Dst, AMDGPU::SReg_64RegClass, *MRI))
2185 return false;
2186
2187 MachineBasicBlock *BB = MI.getParent();
2188 const DebugLoc &DL = MI.getDebugLoc();
2189 Register LoReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2190 Register HiReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2191 Register ConstReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2192 Register OpReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2193
2194 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), LoReg)
2195 .addReg(Src, 0, AMDGPU::sub0);
2196 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), HiReg)
2197 .addReg(Src, 0, AMDGPU::sub1);
2198 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), ConstReg)
2199 .addImm(0x80000000);
2200
2201 // Set or toggle sign bit.
2202 unsigned Opc = Fabs ? AMDGPU::S_OR_B32 : AMDGPU::S_XOR_B32;
2203 BuildMI(*BB, &MI, DL, TII.get(Opc), OpReg)
2204 .addReg(HiReg)
2205 .addReg(ConstReg);
2206 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::REG_SEQUENCE), Dst)
2207 .addReg(LoReg)
2208 .addImm(AMDGPU::sub0)
2209 .addReg(OpReg)
2210 .addImm(AMDGPU::sub1);
2211 MI.eraseFromParent();
2212 return true;
2213}
2214
2215// FIXME: This is a workaround for the same tablegen problems as G_FNEG
2216bool AMDGPUInstructionSelector::selectG_FABS(MachineInstr &MI) const {
2217 Register Dst = MI.getOperand(0).getReg();
2218 const RegisterBank *DstRB = RBI.getRegBank(Dst, *MRI, TRI);
2219 if (DstRB->getID() != AMDGPU::SGPRRegBankID ||
2220 MRI->getType(Dst) != LLT::scalar(64))
2221 return false;
2222
2223 Register Src = MI.getOperand(1).getReg();
2224 MachineBasicBlock *BB = MI.getParent();
2225 const DebugLoc &DL = MI.getDebugLoc();
2226 Register LoReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2227 Register HiReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2228 Register ConstReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2229 Register OpReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2230
2231 if (!RBI.constrainGenericRegister(Src, AMDGPU::SReg_64RegClass, *MRI) ||
2232 !RBI.constrainGenericRegister(Dst, AMDGPU::SReg_64RegClass, *MRI))
2233 return false;
2234
2235 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), LoReg)
2236 .addReg(Src, 0, AMDGPU::sub0);
2237 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), HiReg)
2238 .addReg(Src, 0, AMDGPU::sub1);
2239 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), ConstReg)
2240 .addImm(0x7fffffff);
2241
2242 // Clear sign bit.
2243 // TODO: Should this used S_BITSET0_*?
2244 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_AND_B32), OpReg)
2245 .addReg(HiReg)
2246 .addReg(ConstReg);
2247 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::REG_SEQUENCE), Dst)
2248 .addReg(LoReg)
2249 .addImm(AMDGPU::sub0)
2250 .addReg(OpReg)
2251 .addImm(AMDGPU::sub1);
2252
2253 MI.eraseFromParent();
2254 return true;
2255}
2256
2257static bool isConstant(const MachineInstr &MI) {
2258 return MI.getOpcode() == TargetOpcode::G_CONSTANT;
2259}
2260
2261void AMDGPUInstructionSelector::getAddrModeInfo(const MachineInstr &Load,
2262 const MachineRegisterInfo &MRI, SmallVectorImpl<GEPInfo> &AddrInfo) const {
2263
2264 const MachineInstr *PtrMI = MRI.getUniqueVRegDef(Load.getOperand(1).getReg());
2265
2266 assert(PtrMI)((PtrMI) ? static_cast<void> (0) : __assert_fail ("PtrMI"
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp"
, 2266, __PRETTY_FUNCTION__))
;
2267
2268 if (PtrMI->getOpcode() != TargetOpcode::G_PTR_ADD)
2269 return;
2270
2271 GEPInfo GEPInfo(*PtrMI);
2272
2273 for (unsigned i = 1; i != 3; ++i) {
2274 const MachineOperand &GEPOp = PtrMI->getOperand(i);
2275 const MachineInstr *OpDef = MRI.getUniqueVRegDef(GEPOp.getReg());
2276 assert(OpDef)((OpDef) ? static_cast<void> (0) : __assert_fail ("OpDef"
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp"
, 2276, __PRETTY_FUNCTION__))
;
2277 if (i == 2 && isConstant(*OpDef)) {
2278 // TODO: Could handle constant base + variable offset, but a combine
2279 // probably should have commuted it.
2280 assert(GEPInfo.Imm == 0)((GEPInfo.Imm == 0) ? static_cast<void> (0) : __assert_fail
("GEPInfo.Imm == 0", "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp"
, 2280, __PRETTY_FUNCTION__))
;
2281 GEPInfo.Imm = OpDef->getOperand(1).getCImm()->getSExtValue();
2282 continue;
2283 }
2284 const RegisterBank *OpBank = RBI.getRegBank(GEPOp.getReg(), MRI, TRI);
2285 if (OpBank->getID() == AMDGPU::SGPRRegBankID)
2286 GEPInfo.SgprParts.push_back(GEPOp.getReg());
2287 else
2288 GEPInfo.VgprParts.push_back(GEPOp.getReg());
2289 }
2290
2291 AddrInfo.push_back(GEPInfo);
2292 getAddrModeInfo(*PtrMI, MRI, AddrInfo);
2293}
2294
2295bool AMDGPUInstructionSelector::isInstrUniform(const MachineInstr &MI) const {
2296 if (!MI.hasOneMemOperand())
2297 return false;
2298
2299 const MachineMemOperand *MMO = *MI.memoperands_begin();
2300 const Value *Ptr = MMO->getValue();
2301
2302 // UndefValue means this is a load of a kernel input. These are uniform.
2303 // Sometimes LDS instructions have constant pointers.
2304 // If Ptr is null, then that means this mem operand contains a
2305 // PseudoSourceValue like GOT.
2306 if (!Ptr || isa<UndefValue>(Ptr) || isa<Argument>(Ptr) ||
2307 isa<Constant>(Ptr) || isa<GlobalValue>(Ptr))
2308 return true;
2309
2310 if (MMO->getAddrSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT)
2311 return true;
2312
2313 const Instruction *I = dyn_cast<Instruction>(Ptr);
2314 return I && I->getMetadata("amdgpu.uniform");
2315}
2316
2317bool AMDGPUInstructionSelector::hasVgprParts(ArrayRef<GEPInfo> AddrInfo) const {
2318 for (const GEPInfo &GEPInfo : AddrInfo) {
2319 if (!GEPInfo.VgprParts.empty())
2320 return true;
2321 }
2322 return false;
2323}
2324
2325void AMDGPUInstructionSelector::initM0(MachineInstr &I) const {
2326 const LLT PtrTy = MRI->getType(I.getOperand(1).getReg());
2327 unsigned AS = PtrTy.getAddressSpace();
2328 if ((AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::REGION_ADDRESS) &&
2329 STI.ldsRequiresM0Init()) {
2330 MachineBasicBlock *BB = I.getParent();
2331
2332 // If DS instructions require M0 initializtion, insert it before selecting.
2333 BuildMI(*BB, &I, I.getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), AMDGPU::M0)
2334 .addImm(-1);
2335 }
2336}
2337
2338bool AMDGPUInstructionSelector::selectG_LOAD_STORE_ATOMICRMW(
2339 MachineInstr &I) const {
2340 initM0(I);
2341 return selectImpl(I, *CoverageInfo);
2342}
2343
2344// TODO: No rtn optimization.
2345bool AMDGPUInstructionSelector::selectG_AMDGPU_ATOMIC_CMPXCHG(
2346 MachineInstr &MI) const {
2347 Register PtrReg = MI.getOperand(1).getReg();
2348 const LLT PtrTy = MRI->getType(PtrReg);
2349 if (PtrTy.getAddressSpace() == AMDGPUAS::FLAT_ADDRESS ||
2350 STI.useFlatForGlobal())
2351 return selectImpl(MI, *CoverageInfo);
2352
2353 Register DstReg = MI.getOperand(0).getReg();
2354 const LLT Ty = MRI->getType(DstReg);
2355 const bool Is64 = Ty.getSizeInBits() == 64;
2356 const unsigned SubReg = Is64 ? AMDGPU::sub0_sub1 : AMDGPU::sub0;
2357 Register TmpReg = MRI->createVirtualRegister(
2358 Is64 ? &AMDGPU::VReg_128RegClass : &AMDGPU::VReg_64RegClass);
2359
2360 const DebugLoc &DL = MI.getDebugLoc();
2361 MachineBasicBlock *BB = MI.getParent();
2362
2363 Register VAddr, RSrcReg, SOffset;
2364 int64_t Offset = 0;
2365
2366 unsigned Opcode;
2367 if (selectMUBUFOffsetImpl(MI.getOperand(1), RSrcReg, SOffset, Offset)) {
2368 Opcode = Is64 ? AMDGPU::BUFFER_ATOMIC_CMPSWAP_X2_OFFSET_RTN :
2369 AMDGPU::BUFFER_ATOMIC_CMPSWAP_OFFSET_RTN;
2370 } else if (selectMUBUFAddr64Impl(MI.getOperand(1), VAddr,
2371 RSrcReg, SOffset, Offset)) {
2372 Opcode = Is64 ? AMDGPU::BUFFER_ATOMIC_CMPSWAP_X2_ADDR64_RTN :
2373 AMDGPU::BUFFER_ATOMIC_CMPSWAP_ADDR64_RTN;
2374 } else
2375 return selectImpl(MI, *CoverageInfo);
2376
2377 auto MIB = BuildMI(*BB, &MI, DL, TII.get(Opcode), TmpReg)
2378 .addReg(MI.getOperand(2).getReg());
2379
2380 if (VAddr)
2381 MIB.addReg(VAddr);
2382
2383 MIB.addReg(RSrcReg);
2384 if (SOffset)
2385 MIB.addReg(SOffset);
2386 else
2387 MIB.addImm(0);
2388
2389 MIB.addImm(Offset);
2390 MIB.addImm(0); // slc
2391 MIB.cloneMemRefs(MI);
2392
2393 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), DstReg)
2394 .addReg(TmpReg, RegState::Kill, SubReg);
2395
2396 MI.eraseFromParent();
2397
2398 MRI->setRegClass(
2399 DstReg, Is64 ? &AMDGPU::VReg_64RegClass : &AMDGPU::VGPR_32RegClass);
2400 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
2401}
2402
2403bool AMDGPUInstructionSelector::selectG_BRCOND(MachineInstr &I) const {
2404 MachineBasicBlock *BB = I.getParent();
2405 MachineOperand &CondOp = I.getOperand(0);
2406 Register CondReg = CondOp.getReg();
2407 const DebugLoc &DL = I.getDebugLoc();
2408
2409 unsigned BrOpcode;
2410 Register CondPhysReg;
2411 const TargetRegisterClass *ConstrainRC;
2412
2413 // In SelectionDAG, we inspect the IR block for uniformity metadata to decide
2414 // whether the branch is uniform when selecting the instruction. In
2415 // GlobalISel, we should push that decision into RegBankSelect. Assume for now
2416 // RegBankSelect knows what it's doing if the branch condition is scc, even
2417 // though it currently does not.
2418 if (!isVCC(CondReg, *MRI)) {
2419 if (MRI->getType(CondReg) != LLT::scalar(32))
2420 return false;
2421
2422 CondPhysReg = AMDGPU::SCC;
2423 BrOpcode = AMDGPU::S_CBRANCH_SCC1;
2424 ConstrainRC = &AMDGPU::SReg_32RegClass;
2425 } else {
2426 // FIXME: Do we have to insert an and with exec here, like in SelectionDAG?
2427 // We sort of know that a VCC producer based on the register bank, that ands
2428 // inactive lanes with 0. What if there was a logical operation with vcc
2429 // producers in different blocks/with different exec masks?
2430 // FIXME: Should scc->vcc copies and with exec?
2431 CondPhysReg = TRI.getVCC();
2432 BrOpcode = AMDGPU::S_CBRANCH_VCCNZ;
2433 ConstrainRC = TRI.getBoolRC();
2434 }
2435
2436 if (!MRI->getRegClassOrNull(CondReg))
2437 MRI->setRegClass(CondReg, ConstrainRC);
2438
2439 BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), CondPhysReg)
2440 .addReg(CondReg);
2441 BuildMI(*BB, &I, DL, TII.get(BrOpcode))
2442 .addMBB(I.getOperand(1).getMBB());
2443
2444 I.eraseFromParent();
2445 return true;
2446}
2447
2448bool AMDGPUInstructionSelector::selectG_GLOBAL_VALUE(
2449 MachineInstr &I) const {
2450 Register DstReg = I.getOperand(0).getReg();
2451 const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2452 const bool IsVGPR = DstRB->getID() == AMDGPU::VGPRRegBankID;
2453 I.setDesc(TII.get(IsVGPR ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32));
2454 if (IsVGPR)
2455 I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
2456
2457 return RBI.constrainGenericRegister(
2458 DstReg, IsVGPR ? AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass, *MRI);
2459}
2460
2461bool AMDGPUInstructionSelector::selectG_PTRMASK(MachineInstr &I) const {
2462 Register DstReg = I.getOperand(0).getReg();
2463 Register SrcReg = I.getOperand(1).getReg();
2464 Register MaskReg = I.getOperand(2).getReg();
2465 LLT Ty = MRI->getType(DstReg);
2466 LLT MaskTy = MRI->getType(MaskReg);
2467
2468 const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2469 const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI);
2470 const RegisterBank *MaskRB = RBI.getRegBank(MaskReg, *MRI, TRI);
2471 const bool IsVGPR = DstRB->getID() == AMDGPU::VGPRRegBankID;
2472 if (DstRB != SrcRB) // Should only happen for hand written MIR.
2473 return false;
2474
2475 unsigned NewOpc = IsVGPR ? AMDGPU::V_AND_B32_e64 : AMDGPU::S_AND_B32;
2476 const TargetRegisterClass &RegRC
2477 = IsVGPR ? AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass;
2478
2479 const TargetRegisterClass *DstRC = TRI.getRegClassForTypeOnBank(Ty, *DstRB,
2480 *MRI);
2481 const TargetRegisterClass *SrcRC = TRI.getRegClassForTypeOnBank(Ty, *SrcRB,
2482 *MRI);
2483 const TargetRegisterClass *MaskRC =
2484 TRI.getRegClassForTypeOnBank(MaskTy, *MaskRB, *MRI);
2485
2486 if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) ||
2487 !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) ||
2488 !RBI.constrainGenericRegister(MaskReg, *MaskRC, *MRI))
2489 return false;
2490
2491 MachineBasicBlock *BB = I.getParent();
2492 const DebugLoc &DL = I.getDebugLoc();
2493 if (Ty.getSizeInBits() == 32) {
2494 assert(MaskTy.getSizeInBits() == 32 &&((MaskTy.getSizeInBits() == 32 && "ptrmask should have been narrowed during legalize"
) ? static_cast<void> (0) : __assert_fail ("MaskTy.getSizeInBits() == 32 && \"ptrmask should have been narrowed during legalize\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp"
, 2495, __PRETTY_FUNCTION__))
2495 "ptrmask should have been narrowed during legalize")((MaskTy.getSizeInBits() == 32 && "ptrmask should have been narrowed during legalize"
) ? static_cast<void> (0) : __assert_fail ("MaskTy.getSizeInBits() == 32 && \"ptrmask should have been narrowed during legalize\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp"
, 2495, __PRETTY_FUNCTION__))
;
2496
2497 BuildMI(*BB, &I, DL, TII.get(NewOpc), DstReg)
2498 .addReg(SrcReg)
2499 .addReg(MaskReg);
2500 I.eraseFromParent();
2501 return true;
2502 }
2503
2504 Register HiReg = MRI->createVirtualRegister(&RegRC);
2505 Register LoReg = MRI->createVirtualRegister(&RegRC);
2506
2507 // Extract the subregisters from the source pointer.
2508 BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), LoReg)
2509 .addReg(SrcReg, 0, AMDGPU::sub0);
2510 BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), HiReg)
2511 .addReg(SrcReg, 0, AMDGPU::sub1);
2512
2513 Register MaskedLo, MaskedHi;
2514
2515 // Try to avoid emitting a bit operation when we only need to touch half of
2516 // the 64-bit pointer.
2517 APInt MaskOnes = KnownBits->getKnownOnes(MaskReg).zextOrSelf(64);
2518
2519 const APInt MaskHi32 = APInt::getHighBitsSet(64, 32);
2520 const APInt MaskLo32 = APInt::getLowBitsSet(64, 32);
2521 if ((MaskOnes & MaskLo32) == MaskLo32) {
2522 // If all the bits in the low half are 1, we only need a copy for it.
2523 MaskedLo = LoReg;
2524 } else {
2525 // Extract the mask subregister and apply the and.
2526 Register MaskLo = MRI->createVirtualRegister(&RegRC);
2527 MaskedLo = MRI->createVirtualRegister(&RegRC);
2528
2529 BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), MaskLo)
2530 .addReg(MaskReg, 0, AMDGPU::sub0);
2531 BuildMI(*BB, &I, DL, TII.get(NewOpc), MaskedLo)
2532 .addReg(LoReg)
2533 .addReg(MaskLo);
2534 }
2535
2536 if ((MaskOnes & MaskHi32) == MaskHi32) {
2537 // If all the bits in the high half are 1, we only need a copy for it.
2538 MaskedHi = HiReg;
2539 } else {
2540 Register MaskHi = MRI->createVirtualRegister(&RegRC);
2541 MaskedHi = MRI->createVirtualRegister(&RegRC);
2542
2543 BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), MaskHi)
2544 .addReg(MaskReg, 0, AMDGPU::sub1);
2545 BuildMI(*BB, &I, DL, TII.get(NewOpc), MaskedHi)
2546 .addReg(HiReg)
2547 .addReg(MaskHi);
2548 }
2549
2550 BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
2551 .addReg(MaskedLo)
2552 .addImm(AMDGPU::sub0)
2553 .addReg(MaskedHi)
2554 .addImm(AMDGPU::sub1);
2555 I.eraseFromParent();
2556 return true;
2557}
2558
2559/// Return the register to use for the index value, and the subregister to use
2560/// for the indirectly accessed register.
2561static std::pair<Register, unsigned>
2562computeIndirectRegIndex(MachineRegisterInfo &MRI,
2563 const SIRegisterInfo &TRI,
2564 const TargetRegisterClass *SuperRC,
2565 Register IdxReg,
2566 unsigned EltSize) {
2567 Register IdxBaseReg;
2568 int Offset;
2569 MachineInstr *Unused;
2570
2571 std::tie(IdxBaseReg, Offset, Unused)
2572 = AMDGPU::getBaseWithConstantOffset(MRI, IdxReg);
2573 if (IdxBaseReg == AMDGPU::NoRegister) {
2574 // This will happen if the index is a known constant. This should ordinarily
2575 // be legalized out, but handle it as a register just in case.
2576 assert(Offset == 0)((Offset == 0) ? static_cast<void> (0) : __assert_fail (
"Offset == 0", "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp"
, 2576, __PRETTY_FUNCTION__))
;
2577 IdxBaseReg = IdxReg;
2578 }
2579
2580 ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(SuperRC, EltSize);
2581
2582 // Skip out of bounds offsets, or else we would end up using an undefined
2583 // register.
2584 if (static_cast<unsigned>(Offset) >= SubRegs.size())
2585 return std::make_pair(IdxReg, SubRegs[0]);
2586 return std::make_pair(IdxBaseReg, SubRegs[Offset]);
2587}
2588
2589bool AMDGPUInstructionSelector::selectG_EXTRACT_VECTOR_ELT(
2590 MachineInstr &MI) const {
2591 Register DstReg = MI.getOperand(0).getReg();
2592 Register SrcReg = MI.getOperand(1).getReg();
2593 Register IdxReg = MI.getOperand(2).getReg();
2594
2595 LLT DstTy = MRI->getType(DstReg);
2596 LLT SrcTy = MRI->getType(SrcReg);
2597
2598 const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2599 const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI);
2600 const RegisterBank *IdxRB = RBI.getRegBank(IdxReg, *MRI, TRI);
2601
2602 // The index must be scalar. If it wasn't RegBankSelect should have moved this
2603 // into a waterfall loop.
2604 if (IdxRB->getID() != AMDGPU::SGPRRegBankID)
2605 return false;
2606
2607 const TargetRegisterClass *SrcRC = TRI.getRegClassForTypeOnBank(SrcTy, *SrcRB,
2608 *MRI);
2609 const TargetRegisterClass *DstRC = TRI.getRegClassForTypeOnBank(DstTy, *DstRB,
2610 *MRI);
2611 if (!SrcRC || !DstRC)
2612 return false;
2613 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) ||
2614 !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) ||
2615 !RBI.constrainGenericRegister(IdxReg, AMDGPU::SReg_32RegClass, *MRI))
2616 return false;
2617
2618 MachineBasicBlock *BB = MI.getParent();
2619 const DebugLoc &DL = MI.getDebugLoc();
2620 const bool Is64 = DstTy.getSizeInBits() == 64;
2621
2622 unsigned SubReg;
2623 std::tie(IdxReg, SubReg) = computeIndirectRegIndex(*MRI, TRI, SrcRC, IdxReg,
2624 DstTy.getSizeInBits() / 8);
2625
2626 if (SrcRB->getID() == AMDGPU::SGPRRegBankID) {
2627 if (DstTy.getSizeInBits() != 32 && !Is64)
2628 return false;
2629
2630 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
2631 .addReg(IdxReg);
2632
2633 unsigned Opc = Is64 ? AMDGPU::S_MOVRELS_B64 : AMDGPU::S_MOVRELS_B32;
2634 BuildMI(*BB, &MI, DL, TII.get(Opc), DstReg)
2635 .addReg(SrcReg, 0, SubReg)
2636 .addReg(SrcReg, RegState::Implicit);
2637 MI.eraseFromParent();
2638 return true;
2639 }
2640
2641 if (SrcRB->getID() != AMDGPU::VGPRRegBankID || DstTy.getSizeInBits() != 32)
2642 return false;
2643
2644 if (!STI.useVGPRIndexMode()) {
2645 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
2646 .addReg(IdxReg);
2647 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::V_MOVRELS_B32_e32), DstReg)
2648 .addReg(SrcReg, 0, SubReg)
2649 .addReg(SrcReg, RegState::Implicit);
2650 MI.eraseFromParent();
2651 return true;
2652 }
2653
2654 BuildMI(*BB, MI, DL, TII.get(AMDGPU::S_SET_GPR_IDX_ON))
2655 .addReg(IdxReg)
2656 .addImm(AMDGPU::VGPRIndexMode::SRC0_ENABLE);
2657 BuildMI(*BB, MI, DL, TII.get(AMDGPU::V_MOV_B32_e32), DstReg)
2658 .addReg(SrcReg, 0, SubReg)
2659 .addReg(SrcReg, RegState::Implicit)
2660 .addReg(AMDGPU::M0, RegState::Implicit);
2661 BuildMI(*BB, MI, DL, TII.get(AMDGPU::S_SET_GPR_IDX_OFF));
2662
2663 MI.eraseFromParent();
2664 return true;
2665}
2666
2667// TODO: Fold insert_vector_elt (extract_vector_elt) into movrelsd
2668bool AMDGPUInstructionSelector::selectG_INSERT_VECTOR_ELT(
2669 MachineInstr &MI) const {
2670 Register DstReg = MI.getOperand(0).getReg();
2671 Register VecReg = MI.getOperand(1).getReg();
2672 Register ValReg = MI.getOperand(2).getReg();
2673 Register IdxReg = MI.getOperand(3).getReg();
2674
2675 LLT VecTy = MRI->getType(DstReg);
2676 LLT ValTy = MRI->getType(ValReg);
2677 unsigned VecSize = VecTy.getSizeInBits();
2678 unsigned ValSize = ValTy.getSizeInBits();
2679
2680 const RegisterBank *VecRB = RBI.getRegBank(VecReg, *MRI, TRI);
2681 const RegisterBank *ValRB = RBI.getRegBank(ValReg, *MRI, TRI);
2682 const RegisterBank *IdxRB = RBI.getRegBank(IdxReg, *MRI, TRI);
2683
2684 assert(VecTy.getElementType() == ValTy)((VecTy.getElementType() == ValTy) ? static_cast<void> (
0) : __assert_fail ("VecTy.getElementType() == ValTy", "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp"
, 2684, __PRETTY_FUNCTION__))
;
2685
2686 // The index must be scalar. If it wasn't RegBankSelect should have moved this
2687 // into a waterfall loop.
2688 if (IdxRB->getID() != AMDGPU::SGPRRegBankID)
2689 return false;
2690
2691 const TargetRegisterClass *VecRC = TRI.getRegClassForTypeOnBank(VecTy, *VecRB,
2692 *MRI);
2693 const TargetRegisterClass *ValRC = TRI.getRegClassForTypeOnBank(ValTy, *ValRB,
2694 *MRI);
2695
2696 if (!RBI.constrainGenericRegister(VecReg, *VecRC, *MRI) ||
2697 !RBI.constrainGenericRegister(DstReg, *VecRC, *MRI) ||
2698 !RBI.constrainGenericRegister(ValReg, *ValRC, *MRI) ||
2699 !RBI.constrainGenericRegister(IdxReg, AMDGPU::SReg_32RegClass, *MRI))
2700 return false;
2701
2702 if (VecRB->getID() == AMDGPU::VGPRRegBankID && ValSize != 32)
2703 return false;
2704
2705 unsigned SubReg;
2706 std::tie(IdxReg, SubReg) = computeIndirectRegIndex(*MRI, TRI, VecRC, IdxReg,
2707 ValSize / 8);
2708
2709 const bool IndexMode = VecRB->getID() == AMDGPU::VGPRRegBankID &&
2710 STI.useVGPRIndexMode();
2711
2712 MachineBasicBlock *BB = MI.getParent();
2713 const DebugLoc &DL = MI.getDebugLoc();
2714
2715 if (IndexMode) {
2716 BuildMI(*BB, MI, DL, TII.get(AMDGPU::S_SET_GPR_IDX_ON))
2717 .addReg(IdxReg)
2718 .addImm(AMDGPU::VGPRIndexMode::DST_ENABLE);
2719 } else {
2720 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
2721 .addReg(IdxReg);
2722 }
2723
2724 const MCInstrDesc &RegWriteOp
2725 = TII.getIndirectRegWritePseudo(VecSize, ValSize,
2726 VecRB->getID() == AMDGPU::SGPRRegBankID);
2727 BuildMI(*BB, MI, DL, RegWriteOp, DstReg)
2728 .addReg(VecReg)
2729 .addReg(ValReg)
2730 .addImm(SubReg);
2731
2732 if (IndexMode)
2733 BuildMI(*BB, MI, DL, TII.get(AMDGPU::S_SET_GPR_IDX_OFF));
2734
2735 MI.eraseFromParent();
2736 return true;
2737}
2738
2739static bool isZeroOrUndef(int X) {
2740 return X == 0 || X == -1;
2741}
2742
2743static bool isOneOrUndef(int X) {
2744 return X == 1 || X == -1;
2745}
2746
2747static bool isZeroOrOneOrUndef(int X) {
2748 return X == 0 || X == 1 || X == -1;
2749}
2750
2751// Normalize a VOP3P shuffle mask to refer to the low/high half of a single
2752// 32-bit register.
2753static Register normalizeVOP3PMask(int NewMask[2], Register Src0, Register Src1,
2754 ArrayRef<int> Mask) {
2755 NewMask[0] = Mask[0];
2756 NewMask[1] = Mask[1];
2757 if (isZeroOrOneOrUndef(Mask[0]) && isZeroOrOneOrUndef(Mask[1]))
2758 return Src0;
2759
2760 assert(NewMask[0] == 2 || NewMask[0] == 3 || NewMask[0] == -1)((NewMask[0] == 2 || NewMask[0] == 3 || NewMask[0] == -1) ? static_cast
<void> (0) : __assert_fail ("NewMask[0] == 2 || NewMask[0] == 3 || NewMask[0] == -1"
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp"
, 2760, __PRETTY_FUNCTION__))
;
2761 assert(NewMask[1] == 2 || NewMask[1] == 3 || NewMask[1] == -1)((NewMask[1] == 2 || NewMask[1] == 3 || NewMask[1] == -1) ? static_cast
<void> (0) : __assert_fail ("NewMask[1] == 2 || NewMask[1] == 3 || NewMask[1] == -1"
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp"
, 2761, __PRETTY_FUNCTION__))
;
2762
2763 // Shift the mask inputs to be 0/1;
2764 NewMask[0] = NewMask[0] == -1 ? -1 : NewMask[0] - 2;
2765 NewMask[1] = NewMask[1] == -1 ? -1 : NewMask[1] - 2;
2766 return Src1;
2767}
2768
2769// This is only legal with VOP3P instructions as an aid to op_sel matching.
2770bool AMDGPUInstructionSelector::selectG_SHUFFLE_VECTOR(
2771 MachineInstr &MI) const {
2772 Register DstReg = MI.getOperand(0).getReg();
2773 Register Src0Reg = MI.getOperand(1).getReg();
2774 Register Src1Reg = MI.getOperand(2).getReg();
2775 ArrayRef<int> ShufMask = MI.getOperand(3).getShuffleMask();
2776
2777 const LLT V2S16 = LLT::vector(2, 16);
2778 if (MRI->getType(DstReg) != V2S16 || MRI->getType(Src0Reg) != V2S16)
2779 return false;
2780
2781 if (!AMDGPU::isLegalVOP3PShuffleMask(ShufMask))
2782 return false;
2783
2784 assert(ShufMask.size() == 2)((ShufMask.size() == 2) ? static_cast<void> (0) : __assert_fail
("ShufMask.size() == 2", "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp"
, 2784, __PRETTY_FUNCTION__))
;
2785 assert(STI.hasSDWA() && "no target has VOP3P but not SDWA")((STI.hasSDWA() && "no target has VOP3P but not SDWA"
) ? static_cast<void> (0) : __assert_fail ("STI.hasSDWA() && \"no target has VOP3P but not SDWA\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp"
, 2785, __PRETTY_FUNCTION__))
;
2786
2787 MachineBasicBlock *MBB = MI.getParent();
2788 const DebugLoc &DL = MI.getDebugLoc();
2789
2790 const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2791 const bool IsVALU = DstRB->getID() == AMDGPU::VGPRRegBankID;
2792 const TargetRegisterClass &RC = IsVALU ?
2793 AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass;
2794
2795 // Handle the degenerate case which should have folded out.
2796 if (ShufMask[0] == -1 && ShufMask[1] == -1) {
2797 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::IMPLICIT_DEF), DstReg);
2798
2799 MI.eraseFromParent();
2800 return RBI.constrainGenericRegister(DstReg, RC, *MRI);
2801 }
2802
2803 // A legal VOP3P mask only reads one of the sources.
2804 int Mask[2];
2805 Register SrcVec = normalizeVOP3PMask(Mask, Src0Reg, Src1Reg, ShufMask);
2806
2807 if (!RBI.constrainGenericRegister(DstReg, RC, *MRI) ||
2808 !RBI.constrainGenericRegister(SrcVec, RC, *MRI))
2809 return false;
2810
2811 // TODO: This also should have been folded out
2812 if (isZeroOrUndef(Mask[0]) && isOneOrUndef(Mask[1])) {
2813 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::COPY), DstReg)
2814 .addReg(SrcVec);
2815
2816 MI.eraseFromParent();
2817 return true;
2818 }
2819
2820 if (Mask[0] == 1 && Mask[1] == -1) {
2821 if (IsVALU) {
2822 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_LSHRREV_B32_e64), DstReg)
2823 .addImm(16)
2824 .addReg(SrcVec);
2825 } else {
2826 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_LSHR_B32), DstReg)
2827 .addReg(SrcVec)
2828 .addImm(16);
2829 }
2830 } else if (Mask[0] == -1 && Mask[1] == 0) {
2831 if (IsVALU) {
2832 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_LSHLREV_B32_e64), DstReg)
2833 .addImm(16)
2834 .addReg(SrcVec);
2835 } else {
2836 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_LSHL_B32), DstReg)
2837 .addReg(SrcVec)
2838 .addImm(16);
2839 }
2840 } else if (Mask[0] == 0 && Mask[1] == 0) {
2841 if (IsVALU) {
2842 // Write low half of the register into the high half.
2843 MachineInstr *MovSDWA =
2844 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_MOV_B32_sdwa), DstReg)
2845 .addImm(0) // $src0_modifiers
2846 .addReg(SrcVec) // $src0
2847 .addImm(0) // $clamp
2848 .addImm(AMDGPU::SDWA::WORD_1) // $dst_sel
2849 .addImm(AMDGPU::SDWA::UNUSED_PRESERVE) // $dst_unused
2850 .addImm(AMDGPU::SDWA::WORD_0) // $src0_sel
2851 .addReg(SrcVec, RegState::Implicit);
2852 MovSDWA->tieOperands(0, MovSDWA->getNumOperands() - 1);
2853 } else {
2854 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_PACK_LL_B32_B16), DstReg)
2855 .addReg(SrcVec)
2856 .addReg(SrcVec);
2857 }
2858 } else if (Mask[0] == 1 && Mask[1] == 1) {
2859 if (IsVALU) {
2860 // Write high half of the register into the low half.
2861 MachineInstr *MovSDWA =
2862 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_MOV_B32_sdwa), DstReg)
2863 .addImm(0) // $src0_modifiers
2864 .addReg(SrcVec) // $src0
2865 .addImm(0) // $clamp
2866 .addImm(AMDGPU::SDWA::WORD_0) // $dst_sel
2867 .addImm(AMDGPU::SDWA::UNUSED_PRESERVE) // $dst_unused
2868 .addImm(AMDGPU::SDWA::WORD_1) // $src0_sel
2869 .addReg(SrcVec, RegState::Implicit);
2870 MovSDWA->tieOperands(0, MovSDWA->getNumOperands() - 1);
2871 } else {
2872 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_PACK_HH_B32_B16), DstReg)
2873 .addReg(SrcVec)
2874 .addReg(SrcVec);
2875 }
2876 } else if (Mask[0] == 1 && Mask[1] == 0) {
2877 if (IsVALU) {
2878 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_ALIGNBIT_B32), DstReg)
2879 .addReg(SrcVec)
2880 .addReg(SrcVec)
2881 .addImm(16);
2882 } else {
2883 Register TmpReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2884 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_LSHR_B32), TmpReg)
2885 .addReg(SrcVec)
2886 .addImm(16);
2887 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_PACK_LL_B32_B16), DstReg)
2888 .addReg(TmpReg)
2889 .addReg(SrcVec);
2890 }
2891 } else
2892 llvm_unreachable("all shuffle masks should be handled")::llvm::llvm_unreachable_internal("all shuffle masks should be handled"
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp"
, 2892)
;
2893
2894 MI.eraseFromParent();
2895 return true;
2896}
2897
2898bool AMDGPUInstructionSelector::selectAMDGPU_BUFFER_ATOMIC_FADD(
2899 MachineInstr &MI) const {
2900
2901 MachineBasicBlock *MBB = MI.getParent();
2902 const DebugLoc &DL = MI.getDebugLoc();
2903
2904 if (!MRI->use_nodbg_empty(MI.getOperand(0).getReg())) {
2905 Function &F = MBB->getParent()->getFunction();
2906 DiagnosticInfoUnsupported
2907 NoFpRet(F, "return versions of fp atomics not supported",
2908 MI.getDebugLoc(), DS_Error);
2909 F.getContext().diagnose(NoFpRet);
2910 return false;
2911 }
2912
2913 // FIXME: This is only needed because tablegen requires number of dst operands
2914 // in match and replace pattern to be the same. Otherwise patterns can be
2915 // exported from SDag path.
2916 MachineOperand &VDataIn = MI.getOperand(1);
2917 MachineOperand &VIndex = MI.getOperand(3);
2918 MachineOperand &VOffset = MI.getOperand(4);
2919 MachineOperand &SOffset = MI.getOperand(5);
2920 int16_t Offset = MI.getOperand(6).getImm();
2921
2922 bool HasVOffset = !isOperandImmEqual(VOffset, 0, *MRI);
2923 bool HasVIndex = !isOperandImmEqual(VIndex, 0, *MRI);
2924
2925 unsigned Opcode;
2926 if (HasVOffset) {
2927 Opcode = HasVIndex ? AMDGPU::BUFFER_ATOMIC_ADD_F32_BOTHEN
2928 : AMDGPU::BUFFER_ATOMIC_ADD_F32_OFFEN;
2929 } else {
2930 Opcode = HasVIndex ? AMDGPU::BUFFER_ATOMIC_ADD_F32_IDXEN
2931 : AMDGPU::BUFFER_ATOMIC_ADD_F32_OFFSET;
2932 }
2933
2934 if (MRI->getType(VDataIn.getReg()).isVector()) {
2935 switch (Opcode) {
2936 case AMDGPU::BUFFER_ATOMIC_ADD_F32_BOTHEN:
2937 Opcode = AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_BOTHEN;
2938 break;
2939 case AMDGPU::BUFFER_ATOMIC_ADD_F32_OFFEN:
2940 Opcode = AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_OFFEN;
2941 break;
2942 case AMDGPU::BUFFER_ATOMIC_ADD_F32_IDXEN:
2943 Opcode = AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_IDXEN;
2944 break;
2945 case AMDGPU::BUFFER_ATOMIC_ADD_F32_OFFSET:
2946 Opcode = AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_OFFSET;
2947 break;
2948 }
2949 }
2950
2951 auto I = BuildMI(*MBB, MI, DL, TII.get(Opcode));
2952 I.add(VDataIn);
2953
2954 if (Opcode == AMDGPU::BUFFER_ATOMIC_ADD_F32_BOTHEN ||
2955 Opcode == AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_BOTHEN) {
2956 Register IdxReg = MRI->createVirtualRegister(&AMDGPU::VReg_64RegClass);
2957 BuildMI(*MBB, &*I, DL, TII.get(AMDGPU::REG_SEQUENCE), IdxReg)
2958 .addReg(VIndex.getReg())
2959 .addImm(AMDGPU::sub0)
2960 .addReg(VOffset.getReg())
2961 .addImm(AMDGPU::sub1);
2962
2963 I.addReg(IdxReg);
2964 } else if (HasVIndex) {
2965 I.add(VIndex);
2966 } else if (HasVOffset) {
2967 I.add(VOffset);
2968 }
2969
2970 I.add(MI.getOperand(2)); // rsrc
2971 I.add(SOffset);
2972 I.addImm(Offset);
2973 renderExtractSLC(I, MI, 7);
2974 I.cloneMemRefs(MI);
2975
2976 MI.eraseFromParent();
2977
2978 return true;
2979}
2980
2981bool AMDGPUInstructionSelector::selectGlobalAtomicFaddIntrinsic(
2982 MachineInstr &MI) const{
2983
2984 MachineBasicBlock *MBB = MI.getParent();
2985 const DebugLoc &DL = MI.getDebugLoc();
2986
2987 if (!MRI->use_nodbg_empty(MI.getOperand(0).getReg())) {
2988 Function &F = MBB->getParent()->getFunction();
2989 DiagnosticInfoUnsupported
2990 NoFpRet(F, "return versions of fp atomics not supported",
2991 MI.getDebugLoc(), DS_Error);
2992 F.getContext().diagnose(NoFpRet);
2993 return false;
2994 }
2995
2996 // FIXME: This is only needed because tablegen requires number of dst operands
2997 // in match and replace pattern to be the same. Otherwise patterns can be
2998 // exported from SDag path.
2999 auto Addr = selectFlatOffsetImpl<true>(MI.getOperand(2));
3000
3001 Register Data = MI.getOperand(3).getReg();
3002 const unsigned Opc = MRI->getType(Data).isVector() ?
3003 AMDGPU::GLOBAL_ATOMIC_PK_ADD_F16 : AMDGPU::GLOBAL_ATOMIC_ADD_F32;
3004 auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc))
3005 .addReg(Addr.first)
3006 .addReg(Data)
3007 .addImm(Addr.second)
3008 .addImm(0) // SLC
3009 .cloneMemRefs(MI);
3010
3011 MI.eraseFromParent();
3012 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
3013}
3014
3015bool AMDGPUInstructionSelector::selectBVHIntrinsic(MachineInstr &MI) const{
3016 MI.setDesc(TII.get(MI.getOperand(1).getImm()));
3017 MI.RemoveOperand(1);
3018 MI.addImplicitDefUseOperands(*MI.getParent()->getParent());
3019 return true;
3020}
3021
3022bool AMDGPUInstructionSelector::select(MachineInstr &I) {
3023 if (I.isPHI())
3024 return selectPHI(I);
3025
3026 if (!I.isPreISelOpcode()) {
3027 if (I.isCopy())
3028 return selectCOPY(I);
3029 return true;
3030 }
3031
3032 switch (I.getOpcode()) {
3033 case TargetOpcode::G_AND:
3034 case TargetOpcode::G_OR:
3035 case TargetOpcode::G_XOR:
3036 if (selectImpl(I, *CoverageInfo))
3037 return true;
3038 return selectG_AND_OR_XOR(I);
3039 case TargetOpcode::G_ADD:
3040 case TargetOpcode::G_SUB:
3041 if (selectImpl(I, *CoverageInfo))
3042 return true;
3043 return selectG_ADD_SUB(I);
3044 case TargetOpcode::G_UADDO:
3045 case TargetOpcode::G_USUBO:
3046 case TargetOpcode::G_UADDE:
3047 case TargetOpcode::G_USUBE:
3048 return selectG_UADDO_USUBO_UADDE_USUBE(I);
3049 case TargetOpcode::G_INTTOPTR:
3050 case TargetOpcode::G_BITCAST:
3051 case TargetOpcode::G_PTRTOINT:
3052 return selectCOPY(I);
3053 case TargetOpcode::G_CONSTANT:
3054 case TargetOpcode::G_FCONSTANT:
3055 return selectG_CONSTANT(I);
3056 case TargetOpcode::G_FNEG:
3057 if (selectImpl(I, *CoverageInfo))
3058 return true;
3059 return selectG_FNEG(I);
3060 case TargetOpcode::G_FABS:
3061 if (selectImpl(I, *CoverageInfo))
3062 return true;
3063 return selectG_FABS(I);
3064 case TargetOpcode::G_EXTRACT:
3065 return selectG_EXTRACT(I);
3066 case TargetOpcode::G_MERGE_VALUES:
3067 case TargetOpcode::G_BUILD_VECTOR:
3068 case TargetOpcode::G_CONCAT_VECTORS:
3069 return selectG_MERGE_VALUES(I);
3070 case TargetOpcode::G_UNMERGE_VALUES:
3071 return selectG_UNMERGE_VALUES(I);
3072 case TargetOpcode::G_BUILD_VECTOR_TRUNC:
3073 return selectG_BUILD_VECTOR_TRUNC(I);
3074 case TargetOpcode::G_PTR_ADD:
3075 return selectG_PTR_ADD(I);
3076 case TargetOpcode::G_IMPLICIT_DEF:
3077 return selectG_IMPLICIT_DEF(I);
3078 case TargetOpcode::G_FREEZE:
3079 return selectCOPY(I);
3080 case TargetOpcode::G_INSERT:
3081 return selectG_INSERT(I);
3082 case TargetOpcode::G_INTRINSIC:
3083 return selectG_INTRINSIC(I);
3084 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
3085 return selectG_INTRINSIC_W_SIDE_EFFECTS(I);
3086 case TargetOpcode::G_ICMP:
3087 if (selectG_ICMP(I))
3088 return true;
3089 return selectImpl(I, *CoverageInfo);
3090 case TargetOpcode::G_LOAD:
3091 case TargetOpcode::G_STORE:
3092 case TargetOpcode::G_ATOMIC_CMPXCHG:
3093 case TargetOpcode::G_ATOMICRMW_XCHG:
3094 case TargetOpcode::G_ATOMICRMW_ADD:
3095 case TargetOpcode::G_ATOMICRMW_SUB:
3096 case TargetOpcode::G_ATOMICRMW_AND:
3097 case TargetOpcode::G_ATOMICRMW_OR:
3098 case TargetOpcode::G_ATOMICRMW_XOR:
3099 case TargetOpcode::G_ATOMICRMW_MIN:
3100 case TargetOpcode::G_ATOMICRMW_MAX:
3101 case TargetOpcode::G_ATOMICRMW_UMIN:
3102 case TargetOpcode::G_ATOMICRMW_UMAX:
3103 case TargetOpcode::G_ATOMICRMW_FADD:
3104 case AMDGPU::G_AMDGPU_ATOMIC_INC:
3105 case AMDGPU::G_AMDGPU_ATOMIC_DEC:
3106 case AMDGPU::G_AMDGPU_ATOMIC_FMIN:
3107 case AMDGPU::G_AMDGPU_ATOMIC_FMAX:
3108 return selectG_LOAD_STORE_ATOMICRMW(I);
3109 case AMDGPU::G_AMDGPU_ATOMIC_CMPXCHG:
3110 return selectG_AMDGPU_ATOMIC_CMPXCHG(I);
3111 case TargetOpcode::G_SELECT:
3112 return selectG_SELECT(I);
3113 case TargetOpcode::G_TRUNC:
3114 return selectG_TRUNC(I);
3115 case TargetOpcode::G_SEXT:
3116 case TargetOpcode::G_ZEXT:
3117 case TargetOpcode::G_ANYEXT:
3118 case TargetOpcode::G_SEXT_INREG:
3119 if (selectImpl(I, *CoverageInfo))
3120 return true;
3121 return selectG_SZA_EXT(I);
3122 case TargetOpcode::G_BRCOND:
3123 return selectG_BRCOND(I);
3124 case TargetOpcode::G_GLOBAL_VALUE:
3125 return selectG_GLOBAL_VALUE(I);
3126 case TargetOpcode::G_PTRMASK:
3127 return selectG_PTRMASK(I);
3128 case TargetOpcode::G_EXTRACT_VECTOR_ELT:
3129 return selectG_EXTRACT_VECTOR_ELT(I);
3130 case TargetOpcode::G_INSERT_VECTOR_ELT:
3131 return selectG_INSERT_VECTOR_ELT(I);
3132 case TargetOpcode::G_SHUFFLE_VECTOR:
3133 return selectG_SHUFFLE_VECTOR(I);
3134 case AMDGPU::G_AMDGPU_INTRIN_IMAGE_LOAD:
3135 case AMDGPU::G_AMDGPU_INTRIN_IMAGE_STORE: {
3136 const AMDGPU::ImageDimIntrinsicInfo *Intr
3137 = AMDGPU::getImageDimIntrinsicInfo(I.getIntrinsicID());
3138 assert(Intr && "not an image intrinsic with image pseudo")((Intr && "not an image intrinsic with image pseudo")
? static_cast<void> (0) : __assert_fail ("Intr && \"not an image intrinsic with image pseudo\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp"
, 3138, __PRETTY_FUNCTION__))
;
3139 return selectImageIntrinsic(I, Intr);
3140 }
3141 case AMDGPU::G_AMDGPU_INTRIN_BVH_INTERSECT_RAY:
3142 return selectBVHIntrinsic(I);
3143 case AMDGPU::G_AMDGPU_BUFFER_ATOMIC_FADD:
3144 return selectAMDGPU_BUFFER_ATOMIC_FADD(I);
3145 default:
3146 return selectImpl(I, *CoverageInfo);
3147 }
3148 return false;
3149}
3150
3151InstructionSelector::ComplexRendererFns
3152AMDGPUInstructionSelector::selectVCSRC(MachineOperand &Root) const {
3153 return {{
3154 [=](MachineInstrBuilder &MIB) { MIB.add(Root); }
3155 }};
3156
3157}
3158
3159std::pair<Register, unsigned>
3160AMDGPUInstructionSelector::selectVOP3ModsImpl(MachineOperand &Root) const {
3161 Register Src = Root.getReg();
3162 Register OrigSrc = Src;
3163 unsigned Mods = 0;
3164 MachineInstr *MI = getDefIgnoringCopies(Src, *MRI);
3165
3166 if (MI && MI->getOpcode() == AMDGPU::G_FNEG) {
3167 Src = MI->getOperand(1).getReg();
3168 Mods |= SISrcMods::NEG;
3169 MI = getDefIgnoringCopies(Src, *MRI);
3170 }
3171
3172 if (MI && MI->getOpcode() == AMDGPU::G_FABS) {
3173 Src = MI->getOperand(1).getReg();
3174 Mods |= SISrcMods::ABS;
3175 }
3176
3177 if (Mods != 0 &&
3178 RBI.getRegBank(Src, *MRI, TRI)->getID() != AMDGPU::VGPRRegBankID) {
3179 MachineInstr *UseMI = Root.getParent();
3180
3181 // If we looked through copies to find source modifiers on an SGPR operand,
3182 // we now have an SGPR register source. To avoid potentially violating the
3183 // constant bus restriction, we need to insert a copy to a VGPR.
3184 Register VGPRSrc = MRI->cloneVirtualRegister(OrigSrc);
3185 BuildMI(*UseMI->getParent(), UseMI, UseMI->getDebugLoc(),
3186 TII.get(AMDGPU::COPY), VGPRSrc)
3187 .addReg(Src);
3188 Src = VGPRSrc;
3189 }
3190
3191 return std::make_pair(Src, Mods);
3192}
3193
3194///
3195/// This will select either an SGPR or VGPR operand and will save us from
3196/// having to write an extra tablegen pattern.
3197InstructionSelector::ComplexRendererFns
3198AMDGPUInstructionSelector::selectVSRC0(MachineOperand &Root) const {
3199 return {{
3200 [=](MachineInstrBuilder &MIB) { MIB.add(Root); }
3201 }};
3202}
3203
3204InstructionSelector::ComplexRendererFns
3205AMDGPUInstructionSelector::selectVOP3Mods0(MachineOperand &Root) const {
3206 Register Src;
3207 unsigned Mods;
3208 std::tie(Src, Mods) = selectVOP3ModsImpl(Root);
3209
3210 return {{
3211 [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3212 [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }, // src0_mods
3213 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, // clamp
3214 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // omod
3215 }};
3216}
3217
3218InstructionSelector::ComplexRendererFns
3219AMDGPUInstructionSelector::selectVOP3OMods(MachineOperand &Root) const {
3220 return {{
3221 [=](MachineInstrBuilder &MIB) { MIB.add(Root); },
3222 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, // clamp
3223 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // omod
3224 }};
3225}
3226
3227InstructionSelector::ComplexRendererFns
3228AMDGPUInstructionSelector::selectVOP3Mods(MachineOperand &Root) const {
3229 Register Src;
3230 unsigned Mods;
3231 std::tie(Src, Mods) = selectVOP3ModsImpl(Root);
3232
3233 return {{
3234 [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3235 [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); } // src_mods
3236 }};
3237}
3238
3239InstructionSelector::ComplexRendererFns
3240AMDGPUInstructionSelector::selectVOP3NoMods(MachineOperand &Root) const {
3241 Register Reg = Root.getReg();
3242 const MachineInstr *Def = getDefIgnoringCopies(Reg, *MRI);
3243 if (Def && (Def->getOpcode() == AMDGPU::G_FNEG ||
3244 Def->getOpcode() == AMDGPU::G_FABS))
3245 return {};
3246 return {{
3247 [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); },
3248 }};
3249}
3250
3251std::pair<Register, unsigned>
3252AMDGPUInstructionSelector::selectVOP3PModsImpl(
3253 Register Src, const MachineRegisterInfo &MRI) const {
3254 unsigned Mods = 0;
3255 MachineInstr *MI = MRI.getVRegDef(Src);
3256
3257 if (MI && MI->getOpcode() == AMDGPU::G_FNEG &&
3258 // It's possible to see an f32 fneg here, but unlikely.
3259 // TODO: Treat f32 fneg as only high bit.
3260 MRI.getType(Src) == LLT::vector(2, 16)) {
3261 Mods ^= (SISrcMods::NEG | SISrcMods::NEG_HI);
3262 Src = MI->getOperand(1).getReg();
3263 MI = MRI.getVRegDef(Src);
Value stored to 'MI' is never read
3264 }
3265
3266 // TODO: Match op_sel through g_build_vector_trunc and g_shuffle_vector.
3267
3268 // Packed instructions do not have abs modifiers.
3269 Mods |= SISrcMods::OP_SEL_1;
3270
3271 return std::make_pair(Src, Mods);
3272}
3273
3274InstructionSelector::ComplexRendererFns
3275AMDGPUInstructionSelector::selectVOP3PMods(MachineOperand &Root) const {
3276 MachineRegisterInfo &MRI
3277 = Root.getParent()->getParent()->getParent()->getRegInfo();
3278
3279 Register Src;
3280 unsigned Mods;
3281 std::tie(Src, Mods) = selectVOP3PModsImpl(Root.getReg(), MRI);
3282
3283 return {{
3284 [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3285 [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); } // src_mods
3286 }};
3287}
3288
3289InstructionSelector::ComplexRendererFns
3290AMDGPUInstructionSelector::selectVOP3Mods_nnan(MachineOperand &Root) const {
3291 Register Src;
3292 unsigned Mods;
3293 std::tie(Src, Mods) = selectVOP3ModsImpl(Root);
3294 if (!isKnownNeverNaN(Src, *MRI))
3295 return None;
3296
3297 return {{
3298 [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3299 [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); } // src_mods
3300 }};
3301}
3302
3303InstructionSelector::ComplexRendererFns
3304AMDGPUInstructionSelector::selectVOP3OpSelMods(MachineOperand &Root) const {
3305 // FIXME: Handle op_sel
3306 return {{
3307 [=](MachineInstrBuilder &MIB) { MIB.addReg(Root.getReg()); },
3308 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // src_mods
3309 }};
3310}
3311
3312InstructionSelector::ComplexRendererFns
3313AMDGPUInstructionSelector::selectSmrdImm(MachineOperand &Root) const {
3314 SmallVector<GEPInfo, 4> AddrInfo;
3315 getAddrModeInfo(*Root.getParent(), *MRI, AddrInfo);
3316
3317 if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1)
3318 return None;
3319
3320 const GEPInfo &GEPInfo = AddrInfo[0];
3321 Optional<int64_t> EncodedImm =
3322 AMDGPU::getSMRDEncodedOffset(STI, GEPInfo.Imm, false);
3323 if (!EncodedImm)
3324 return None;
3325
3326 unsigned PtrReg = GEPInfo.SgprParts[0];
3327 return {{
3328 [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); },
3329 [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); }
3330 }};
3331}
3332
3333InstructionSelector::ComplexRendererFns
3334AMDGPUInstructionSelector::selectSmrdImm32(MachineOperand &Root) const {
3335 SmallVector<GEPInfo, 4> AddrInfo;
3336 getAddrModeInfo(*Root.getParent(), *MRI, AddrInfo);
3337
3338 if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1)
3339 return None;
3340
3341 const GEPInfo &GEPInfo = AddrInfo[0];
3342 Register PtrReg = GEPInfo.SgprParts[0];
3343 Optional<int64_t> EncodedImm =
3344 AMDGPU::getSMRDEncodedLiteralOffset32(STI, GEPInfo.Imm);
3345 if (!EncodedImm)
3346 return None;
3347
3348 return {{
3349 [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); },
3350 [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); }
3351 }};
3352}
3353
3354InstructionSelector::ComplexRendererFns
3355AMDGPUInstructionSelector::selectSmrdSgpr(MachineOperand &Root) const {
3356 MachineInstr *MI = Root.getParent();
3357 MachineBasicBlock *MBB = MI->getParent();
3358
3359 SmallVector<GEPInfo, 4> AddrInfo;
3360 getAddrModeInfo(*MI, *MRI, AddrInfo);
3361
3362 // FIXME: We should shrink the GEP if the offset is known to be <= 32-bits,
3363 // then we can select all ptr + 32-bit offsets not just immediate offsets.
3364 if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1)
3365 return None;
3366
3367 const GEPInfo &GEPInfo = AddrInfo[0];
3368 // SGPR offset is unsigned.
3369 if (!GEPInfo.Imm || GEPInfo.Imm < 0 || !isUInt<32>(GEPInfo.Imm))
3370 return None;
3371
3372 // If we make it this far we have a load with an 32-bit immediate offset.
3373 // It is OK to select this using a sgpr offset, because we have already
3374 // failed trying to select this load into one of the _IMM variants since
3375 // the _IMM Patterns are considered before the _SGPR patterns.
3376 Register PtrReg = GEPInfo.SgprParts[0];
3377 Register OffsetReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
3378 BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), OffsetReg)
3379 .addImm(GEPInfo.Imm);
3380 return {{
3381 [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); },
3382 [=](MachineInstrBuilder &MIB) { MIB.addReg(OffsetReg); }
3383 }};
3384}
3385
3386template <bool Signed>
3387std::pair<Register, int>
3388AMDGPUInstructionSelector::selectFlatOffsetImpl(MachineOperand &Root) const {
3389 MachineInstr *MI = Root.getParent();
3390
3391 auto Default = std::make_pair(Root.getReg(), 0);
3392
3393 if (!STI.hasFlatInstOffsets())
3394 return Default;
3395
3396 const MachineInstr *OpDef = MRI->getVRegDef(Root.getReg());
3397 if (!OpDef || OpDef->getOpcode() != AMDGPU::G_PTR_ADD)
3398 return Default;
3399
3400 Optional<int64_t> Offset =
3401 getConstantVRegVal(OpDef->getOperand(2).getReg(), *MRI);
3402 if (!Offset.hasValue())
3403 return Default;
3404
3405 unsigned AddrSpace = (*MI->memoperands_begin())->getAddrSpace();
3406 if (!TII.isLegalFLATOffset(Offset.getValue(), AddrSpace, Signed))
3407 return Default;
3408
3409 Register BasePtr = OpDef->getOperand(1).getReg();
3410
3411 return std::make_pair(BasePtr, Offset.getValue());
3412}
3413
3414InstructionSelector::ComplexRendererFns
3415AMDGPUInstructionSelector::selectFlatOffset(MachineOperand &Root) const {
3416 auto PtrWithOffset = selectFlatOffsetImpl<false>(Root);
3417
3418 return {{
3419 [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrWithOffset.first); },
3420 [=](MachineInstrBuilder &MIB) { MIB.addImm(PtrWithOffset.second); },
3421 }};
3422}
3423
3424InstructionSelector::ComplexRendererFns
3425AMDGPUInstructionSelector::selectFlatOffsetSigned(MachineOperand &Root) const {
3426 auto PtrWithOffset = selectFlatOffsetImpl<true>(Root);
3427
3428 return {{
3429 [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrWithOffset.first); },
3430 [=](MachineInstrBuilder &MIB) { MIB.addImm(PtrWithOffset.second); },
3431 }};
3432}
3433
3434/// Match a zero extend from a 32-bit value to 64-bits.
3435static Register matchZeroExtendFromS32(MachineRegisterInfo &MRI, Register Reg) {
3436 Register ZExtSrc;
3437 if (mi_match(Reg, MRI, m_GZExt(m_Reg(ZExtSrc))))
3438 return MRI.getType(ZExtSrc) == LLT::scalar(32) ? ZExtSrc : Register();
3439
3440 // Match legalized form %zext = G_MERGE_VALUES (s32 %x), (s32 0)
3441 const MachineInstr *Def = getDefIgnoringCopies(Reg, MRI);
3442 if (Def->getOpcode() != AMDGPU::G_MERGE_VALUES)
3443 return false;
3444
3445 int64_t MergeRHS;
3446 if (mi_match(Def->getOperand(2).getReg(), MRI, m_ICst(MergeRHS)) &&
3447 MergeRHS == 0) {
3448 return Def->getOperand(1).getReg();
3449 }
3450
3451 return Register();
3452}
3453
3454// Match (64-bit SGPR base) + (zext vgpr offset) + sext(imm offset)
3455InstructionSelector::ComplexRendererFns
3456AMDGPUInstructionSelector::selectGlobalSAddr(MachineOperand &Root) const {
3457 Register PtrBase;
3458 int64_t ImmOffset;
3459
3460 // Match the immediate offset first, which canonically is moved as low as
3461 // possible.
3462 std::tie(PtrBase, ImmOffset) = getPtrBaseWithConstantOffset(Root.getReg(),
3463 *MRI);
3464
3465 // TODO: Could split larger constant into VGPR offset.
3466 if (ImmOffset != 0 &&
3467 !TII.isLegalFLATOffset(ImmOffset, AMDGPUAS::GLOBAL_ADDRESS, true)) {
3468 PtrBase = Root.getReg();
3469 ImmOffset = 0;
3470 }
3471
3472 // Match the variable offset.
3473 const MachineInstr *PtrBaseDef = getDefIgnoringCopies(PtrBase, *MRI);
3474 if (PtrBaseDef->getOpcode() != AMDGPU::G_PTR_ADD)
3475 return None;
3476
3477 // Look through the SGPR->VGPR copy.
3478 Register PtrBaseSrc =
3479 getSrcRegIgnoringCopies(PtrBaseDef->getOperand(1).getReg(), *MRI);
3480 if (!PtrBaseSrc)
3481 return None;
3482
3483 const RegisterBank *BaseRB = RBI.getRegBank(PtrBaseSrc, *MRI, TRI);
3484 if (BaseRB->getID() != AMDGPU::SGPRRegBankID)
3485 return None;
3486
3487 Register SAddr = PtrBaseSrc;
3488 Register PtrBaseOffset = PtrBaseDef->getOperand(2).getReg();
3489
3490 // It's possible voffset is an SGPR here, but the copy to VGPR will be
3491 // inserted later.
3492 Register VOffset = matchZeroExtendFromS32(*MRI, PtrBaseOffset);
3493 if (!VOffset)
3494 return None;
3495
3496 return {{[=](MachineInstrBuilder &MIB) { // saddr
3497 MIB.addReg(SAddr);
3498 },
3499 [=](MachineInstrBuilder &MIB) { // voffset
3500 MIB.addReg(VOffset);
3501 },
3502 [=](MachineInstrBuilder &MIB) { // offset
3503 MIB.addImm(ImmOffset);
3504 }}};
3505}
3506
3507static bool isStackPtrRelative(const MachinePointerInfo &PtrInfo) {
3508 auto PSV = PtrInfo.V.dyn_cast<const PseudoSourceValue *>();
3509 return PSV && PSV->isStack();
3510}
3511
3512InstructionSelector::ComplexRendererFns
3513AMDGPUInstructionSelector::selectMUBUFScratchOffen(MachineOperand &Root) const {
3514 MachineInstr *MI = Root.getParent();
3515 MachineBasicBlock *MBB = MI->getParent();
3516 MachineFunction *MF = MBB->getParent();
3517 const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
3518
3519 int64_t Offset = 0;
3520 if (mi_match(Root.getReg(), *MRI, m_ICst(Offset)) &&
3521 Offset != TM.getNullPointerValue(AMDGPUAS::PRIVATE_ADDRESS)) {
3522 Register HighBits = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3523
3524 // TODO: Should this be inside the render function? The iterator seems to
3525 // move.
3526 BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::V_MOV_B32_e32),
3527 HighBits)
3528 .addImm(Offset & ~4095);
3529
3530 return {{[=](MachineInstrBuilder &MIB) { // rsrc
3531 MIB.addReg(Info->getScratchRSrcReg());
3532 },
3533 [=](MachineInstrBuilder &MIB) { // vaddr
3534 MIB.addReg(HighBits);
3535 },
3536 [=](MachineInstrBuilder &MIB) { // soffset
3537 const MachineMemOperand *MMO = *MI->memoperands_begin();
3538 const MachinePointerInfo &PtrInfo = MMO->getPointerInfo();
3539
3540 if (isStackPtrRelative(PtrInfo))
3541 MIB.addReg(Info->getStackPtrOffsetReg());
3542 else
3543 MIB.addImm(0);
3544 },
3545 [=](MachineInstrBuilder &MIB) { // offset
3546 MIB.addImm(Offset & 4095);
3547 }}};
3548 }
3549
3550 assert(Offset == 0 || Offset == -1)((Offset == 0 || Offset == -1) ? static_cast<void> (0) :
__assert_fail ("Offset == 0 || Offset == -1", "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp"
, 3550, __PRETTY_FUNCTION__))
;
3551
3552 // Try to fold a frame index directly into the MUBUF vaddr field, and any
3553 // offsets.
3554 Optional<int> FI;
3555 Register VAddr = Root.getReg();
3556 if (const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg())) {
3557 if (isBaseWithConstantOffset(Root, *MRI)) {
3558 const MachineOperand &LHS = RootDef->getOperand(1);
3559 const MachineOperand &RHS = RootDef->getOperand(2);
3560 const MachineInstr *LHSDef = MRI->getVRegDef(LHS.getReg());
3561 const MachineInstr *RHSDef = MRI->getVRegDef(RHS.getReg());
3562 if (LHSDef && RHSDef) {
3563 int64_t PossibleOffset =
3564 RHSDef->getOperand(1).getCImm()->getSExtValue();
3565 if (SIInstrInfo::isLegalMUBUFImmOffset(PossibleOffset) &&
3566 (!STI.privateMemoryResourceIsRangeChecked() ||
3567 KnownBits->signBitIsZero(LHS.getReg()))) {
3568 if (LHSDef->getOpcode() == AMDGPU::G_FRAME_INDEX)
3569 FI = LHSDef->getOperand(1).getIndex();
3570 else
3571 VAddr = LHS.getReg();
3572 Offset = PossibleOffset;
3573 }
3574 }
3575 } else if (RootDef->getOpcode() == AMDGPU::G_FRAME_INDEX) {
3576 FI = RootDef->getOperand(1).getIndex();
3577 }
3578 }
3579
3580 return {{[=](MachineInstrBuilder &MIB) { // rsrc
3581 MIB.addReg(Info->getScratchRSrcReg());
3582 },
3583 [=](MachineInstrBuilder &MIB) { // vaddr
3584 if (FI.hasValue())
3585 MIB.addFrameIndex(FI.getValue());
3586 else
3587 MIB.addReg(VAddr);
3588 },
3589 [=](MachineInstrBuilder &MIB) { // soffset
3590 // If we don't know this private access is a local stack object, it
3591 // needs to be relative to the entry point's scratch wave offset.
3592 // TODO: Should split large offsets that don't fit like above.
3593 // TODO: Don't use scratch wave offset just because the offset
3594 // didn't fit.
3595 if (!Info->isEntryFunction() && FI.hasValue())
3596 MIB.addReg(Info->getStackPtrOffsetReg());
3597 else
3598 MIB.addImm(0);
3599 },
3600 [=](MachineInstrBuilder &MIB) { // offset
3601 MIB.addImm(Offset);
3602 }}};
3603}
3604
3605bool AMDGPUInstructionSelector::isDSOffsetLegal(Register Base,
3606 int64_t Offset,
3607 unsigned OffsetBits) const {
3608 if ((OffsetBits == 16 && !isUInt<16>(Offset)) ||
3609 (OffsetBits == 8 && !isUInt<8>(Offset)))
3610 return false;
3611
3612 if (STI.hasUsableDSOffset() || STI.unsafeDSOffsetFoldingEnabled())
3613 return true;
3614
3615 // On Southern Islands instruction with a negative base value and an offset
3616 // don't seem to work.
3617 return KnownBits->signBitIsZero(Base);
3618}
3619
3620InstructionSelector::ComplexRendererFns
3621AMDGPUInstructionSelector::selectMUBUFScratchOffset(
3622 MachineOperand &Root) const {
3623 MachineInstr *MI = Root.getParent();
3624 MachineBasicBlock *MBB = MI->getParent();
3625
3626 int64_t Offset = 0;
3627 if (!mi_match(Root.getReg(), *MRI, m_ICst(Offset)) ||
3628 !SIInstrInfo::isLegalMUBUFImmOffset(Offset))
3629 return {};
3630
3631 const MachineFunction *MF = MBB->getParent();
3632 const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
3633 const MachineMemOperand *MMO = *MI->memoperands_begin();
3634 const MachinePointerInfo &PtrInfo = MMO->getPointerInfo();
3635
3636 return {{
3637 [=](MachineInstrBuilder &MIB) { // rsrc
3638 MIB.addReg(Info->getScratchRSrcReg());
3639 },
3640 [=](MachineInstrBuilder &MIB) { // soffset
3641 if (isStackPtrRelative(PtrInfo))
3642 MIB.addReg(Info->getStackPtrOffsetReg());
3643 else
3644 MIB.addImm(0);
3645 },
3646 [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); } // offset
3647 }};
3648}
3649
3650std::pair<Register, unsigned>
3651AMDGPUInstructionSelector::selectDS1Addr1OffsetImpl(MachineOperand &Root) const {
3652 const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg());
3653 if (!RootDef)
3654 return std::make_pair(Root.getReg(), 0);
3655
3656 int64_t ConstAddr = 0;
3657
3658 Register PtrBase;
3659 int64_t Offset;
3660 std::tie(PtrBase, Offset) =
3661 getPtrBaseWithConstantOffset(Root.getReg(), *MRI);
3662
3663 if (Offset) {
3664 if (isDSOffsetLegal(PtrBase, Offset, 16)) {
3665 // (add n0, c0)
3666 return std::make_pair(PtrBase, Offset);
3667 }
3668 } else if (RootDef->getOpcode() == AMDGPU::G_SUB) {
3669 // TODO
3670
3671
3672 } else if (mi_match(Root.getReg(), *MRI, m_ICst(ConstAddr))) {
3673 // TODO
3674
3675 }
3676
3677 return std::make_pair(Root.getReg(), 0);
3678}
3679
3680InstructionSelector::ComplexRendererFns
3681AMDGPUInstructionSelector::selectDS1Addr1Offset(MachineOperand &Root) const {
3682 Register Reg;
3683 unsigned Offset;
3684 std::tie(Reg, Offset) = selectDS1Addr1OffsetImpl(Root);
3685 return {{
3686 [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); },
3687 [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }
3688 }};
3689}
3690
3691InstructionSelector::ComplexRendererFns
3692AMDGPUInstructionSelector::selectDS64Bit4ByteAligned(MachineOperand &Root) const {
3693 return selectDSReadWrite2(Root, false);
3694}
3695
3696InstructionSelector::ComplexRendererFns
3697AMDGPUInstructionSelector::selectDS128Bit8ByteAligned(MachineOperand &Root) const {
3698 return selectDSReadWrite2(Root, true);
3699}
3700
3701InstructionSelector::ComplexRendererFns
3702AMDGPUInstructionSelector::selectDSReadWrite2(MachineOperand &Root,
3703 bool IsDS128) const {
3704 Register Reg;
3705 unsigned Offset;
3706 std::tie(Reg, Offset) = selectDSReadWrite2Impl(Root, IsDS128);
3707 return {{
3708 [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); },
3709 [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); },
3710 [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset+1); }
3711 }};
3712}
3713
3714std::pair<Register, unsigned>
3715AMDGPUInstructionSelector::selectDSReadWrite2Impl(MachineOperand &Root,
3716 bool IsDS128) const {
3717 const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg());
3718 if (!RootDef)
3719 return std::make_pair(Root.getReg(), 0);
3720
3721 int64_t ConstAddr = 0;
3722
3723 Register PtrBase;
3724 int64_t Offset;
3725 std::tie(PtrBase, Offset) =
3726 getPtrBaseWithConstantOffset(Root.getReg(), *MRI);
3727
3728 if (Offset) {
3729 int64_t OffsetValue0 = Offset / (IsDS128 ? 8 : 4);
3730 int64_t OffsetValue1 = OffsetValue0 + 1;
3731 if (isDSOffsetLegal(PtrBase, OffsetValue1, (IsDS128 ? 16 : 8))) {
3732 // (add n0, c0)
3733 return std::make_pair(PtrBase, OffsetValue0);
3734 }
3735 } else if (RootDef->getOpcode() == AMDGPU::G_SUB) {
3736 // TODO
3737
3738 } else if (mi_match(Root.getReg(), *MRI, m_ICst(ConstAddr))) {
3739 // TODO
3740
3741 }
3742
3743 return std::make_pair(Root.getReg(), 0);
3744}
3745
3746/// If \p Root is a G_PTR_ADD with a G_CONSTANT on the right hand side, return
3747/// the base value with the constant offset. There may be intervening copies
3748/// between \p Root and the identified constant. Returns \p Root, 0 if this does
3749/// not match the pattern.
3750std::pair<Register, int64_t>
3751AMDGPUInstructionSelector::getPtrBaseWithConstantOffset(
3752 Register Root, const MachineRegisterInfo &MRI) const {
3753 MachineInstr *RootI = getDefIgnoringCopies(Root, MRI);
3754 if (RootI->getOpcode() != TargetOpcode::G_PTR_ADD)
3755 return {Root, 0};
3756
3757 MachineOperand &RHS = RootI->getOperand(2);
3758 Optional<ValueAndVReg> MaybeOffset
3759 = getConstantVRegValWithLookThrough(RHS.getReg(), MRI, true);
3760 if (!MaybeOffset)
3761 return {Root, 0};
3762 return {RootI->getOperand(1).getReg(), MaybeOffset->Value};
3763}
3764
3765static void addZeroImm(MachineInstrBuilder &MIB) {
3766 MIB.addImm(0);
3767}
3768
3769/// Return a resource descriptor for use with an arbitrary 64-bit pointer. If \p
3770/// BasePtr is not valid, a null base pointer will be used.
3771static Register buildRSRC(MachineIRBuilder &B, MachineRegisterInfo &MRI,
3772 uint32_t FormatLo, uint32_t FormatHi,
3773 Register BasePtr) {
3774 Register RSrc2 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
3775 Register RSrc3 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
3776 Register RSrcHi = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
3777 Register RSrc = MRI.createVirtualRegister(&AMDGPU::SGPR_128RegClass);
3778
3779 B.buildInstr(AMDGPU::S_MOV_B32)
3780 .addDef(RSrc2)
3781 .addImm(FormatLo);
3782 B.buildInstr(AMDGPU::S_MOV_B32)
3783 .addDef(RSrc3)
3784 .addImm(FormatHi);
3785
3786 // Build the half of the subregister with the constants before building the
3787 // full 128-bit register. If we are building multiple resource descriptors,
3788 // this will allow CSEing of the 2-component register.
3789 B.buildInstr(AMDGPU::REG_SEQUENCE)
3790 .addDef(RSrcHi)
3791 .addReg(RSrc2)
3792 .addImm(AMDGPU::sub0)
3793 .addReg(RSrc3)
3794 .addImm(AMDGPU::sub1);
3795
3796 Register RSrcLo = BasePtr;
3797 if (!BasePtr) {
3798 RSrcLo = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
3799 B.buildInstr(AMDGPU::S_MOV_B64)
3800 .addDef(RSrcLo)
3801 .addImm(0);
3802 }
3803
3804 B.buildInstr(AMDGPU::REG_SEQUENCE)
3805 .addDef(RSrc)
3806 .addReg(RSrcLo)
3807 .addImm(AMDGPU::sub0_sub1)
3808 .addReg(RSrcHi)
3809 .addImm(AMDGPU::sub2_sub3);
3810
3811 return RSrc;
3812}
3813
3814static Register buildAddr64RSrc(MachineIRBuilder &B, MachineRegisterInfo &MRI,
3815 const SIInstrInfo &TII, Register BasePtr) {
3816 uint64_t DefaultFormat = TII.getDefaultRsrcDataFormat();
3817
3818 // FIXME: Why are half the "default" bits ignored based on the addressing
3819 // mode?
3820 return buildRSRC(B, MRI, 0, Hi_32(DefaultFormat), BasePtr);
3821}
3822
3823static Register buildOffsetSrc(MachineIRBuilder &B, MachineRegisterInfo &MRI,
3824 const SIInstrInfo &TII, Register BasePtr) {
3825 uint64_t DefaultFormat = TII.getDefaultRsrcDataFormat();
3826
3827 // FIXME: Why are half the "default" bits ignored based on the addressing
3828 // mode?
3829 return buildRSRC(B, MRI, -1, Hi_32(DefaultFormat), BasePtr);
3830}
3831
3832AMDGPUInstructionSelector::MUBUFAddressData
3833AMDGPUInstructionSelector::parseMUBUFAddress(Register Src) const {
3834 MUBUFAddressData Data;
3835 Data.N0 = Src;
3836
3837 Register PtrBase;
3838 int64_t Offset;
3839
3840 std::tie(PtrBase, Offset) = getPtrBaseWithConstantOffset(Src, *MRI);
3841 if (isUInt<32>(Offset)) {
3842 Data.N0 = PtrBase;
3843 Data.Offset = Offset;
3844 }
3845
3846 if (MachineInstr *InputAdd
3847 = getOpcodeDef(TargetOpcode::G_PTR_ADD, Data.N0, *MRI)) {
3848 Data.N2 = InputAdd->getOperand(1).getReg();
3849 Data.N3 = InputAdd->getOperand(2).getReg();
3850
3851 // FIXME: Need to fix extra SGPR->VGPRcopies inserted
3852 // FIXME: Don't know this was defined by operand 0
3853 //
3854 // TODO: Remove this when we have copy folding optimizations after
3855 // RegBankSelect.
3856 Data.N2 = getDefIgnoringCopies(Data.N2, *MRI)->getOperand(0).getReg();
3857 Data.N3 = getDefIgnoringCopies(Data.N3, *MRI)->getOperand(0).getReg();
3858 }
3859
3860 return Data;
3861}
3862
3863/// Return if the addr64 mubuf mode should be used for the given address.
3864bool AMDGPUInstructionSelector::shouldUseAddr64(MUBUFAddressData Addr) const {
3865 // (ptr_add N2, N3) -> addr64, or
3866 // (ptr_add (ptr_add N2, N3), C1) -> addr64
3867 if (Addr.N2)
3868 return true;
3869
3870 const RegisterBank *N0Bank = RBI.getRegBank(Addr.N0, *MRI, TRI);
3871 return N0Bank->getID() == AMDGPU::VGPRRegBankID;
3872}
3873
3874/// Split an immediate offset \p ImmOffset depending on whether it fits in the
3875/// immediate field. Modifies \p ImmOffset and sets \p SOffset to the variable
3876/// component.
3877void AMDGPUInstructionSelector::splitIllegalMUBUFOffset(
3878 MachineIRBuilder &B, Register &SOffset, int64_t &ImmOffset) const {
3879 if (SIInstrInfo::isLegalMUBUFImmOffset(ImmOffset))
3880 return;
3881
3882 // Illegal offset, store it in soffset.
3883 SOffset = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
3884 B.buildInstr(AMDGPU::S_MOV_B32)
3885 .addDef(SOffset)
3886 .addImm(ImmOffset);
3887 ImmOffset = 0;
3888}
3889
3890bool AMDGPUInstructionSelector::selectMUBUFAddr64Impl(
3891 MachineOperand &Root, Register &VAddr, Register &RSrcReg,
3892 Register &SOffset, int64_t &Offset) const {
3893 // FIXME: Predicates should stop this from reaching here.
3894 // addr64 bit was removed for volcanic islands.
3895 if (!STI.hasAddr64() || STI.useFlatForGlobal())
3896 return false;
3897
3898 MUBUFAddressData AddrData = parseMUBUFAddress(Root.getReg());
3899 if (!shouldUseAddr64(AddrData))
3900 return false;
3901
3902 Register N0 = AddrData.N0;
3903 Register N2 = AddrData.N2;
3904 Register N3 = AddrData.N3;
3905 Offset = AddrData.Offset;
3906
3907 // Base pointer for the SRD.
3908 Register SRDPtr;
3909
3910 if (N2) {
3911 if (RBI.getRegBank(N2, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) {
3912 assert(N3)((N3) ? static_cast<void> (0) : __assert_fail ("N3", "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp"
, 3912, __PRETTY_FUNCTION__))
;
3913 if (RBI.getRegBank(N3, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) {
3914 // Both N2 and N3 are divergent. Use N0 (the result of the add) as the
3915 // addr64, and construct the default resource from a 0 address.
3916 VAddr = N0;
3917 } else {
3918 SRDPtr = N3;
3919 VAddr = N2;
3920 }
3921 } else {
3922 // N2 is not divergent.
3923 SRDPtr = N2;
3924 VAddr = N3;
3925 }
3926 } else if (RBI.getRegBank(N0, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) {
3927 // Use the default null pointer in the resource
3928 VAddr = N0;
3929 } else {
3930 // N0 -> offset, or
3931 // (N0 + C1) -> offset
3932 SRDPtr = N0;
3933 }
3934
3935 MachineIRBuilder B(*Root.getParent());
3936 RSrcReg = buildAddr64RSrc(B, *MRI, TII, SRDPtr);
3937 splitIllegalMUBUFOffset(B, SOffset, Offset);
3938 return true;
3939}
3940
3941bool AMDGPUInstructionSelector::selectMUBUFOffsetImpl(
3942 MachineOperand &Root, Register &RSrcReg, Register &SOffset,
3943 int64_t &Offset) const {
3944
3945 // FIXME: Pattern should not reach here.
3946 if (STI.useFlatForGlobal())
3947 return false;
3948
3949 MUBUFAddressData AddrData = parseMUBUFAddress(Root.getReg());
3950 if (shouldUseAddr64(AddrData))
3951 return false;
3952
3953 // N0 -> offset, or
3954 // (N0 + C1) -> offset
3955 Register SRDPtr = AddrData.N0;
3956 Offset = AddrData.Offset;
3957
3958 // TODO: Look through extensions for 32-bit soffset.
3959 MachineIRBuilder B(*Root.getParent());
3960
3961 RSrcReg = buildOffsetSrc(B, *MRI, TII, SRDPtr);
3962 splitIllegalMUBUFOffset(B, SOffset, Offset);
3963 return true;
3964}
3965
3966InstructionSelector::ComplexRendererFns
3967AMDGPUInstructionSelector::selectMUBUFAddr64(MachineOperand &Root) const {
3968 Register VAddr;
3969 Register RSrcReg;
3970 Register SOffset;
3971 int64_t Offset = 0;
3972
3973 if (!selectMUBUFAddr64Impl(Root, VAddr, RSrcReg, SOffset, Offset))
3974 return {};
3975
3976 // FIXME: Use defaulted operands for trailing 0s and remove from the complex
3977 // pattern.
3978 return {{
3979 [=](MachineInstrBuilder &MIB) { // rsrc
3980 MIB.addReg(RSrcReg);
3981 },
3982 [=](MachineInstrBuilder &MIB) { // vaddr
3983 MIB.addReg(VAddr);
3984 },
3985 [=](MachineInstrBuilder &MIB) { // soffset
3986 if (SOffset)
3987 MIB.addReg(SOffset);
3988 else
3989 MIB.addImm(0);
3990 },
3991 [=](MachineInstrBuilder &MIB) { // offset
3992 MIB.addImm(Offset);
3993 },
3994 addZeroImm, // glc
3995 addZeroImm, // slc
3996 addZeroImm, // tfe
3997 addZeroImm, // dlc
3998 addZeroImm // swz
3999 }};
4000}
4001
4002InstructionSelector::ComplexRendererFns
4003AMDGPUInstructionSelector::selectMUBUFOffset(MachineOperand &Root) const {
4004 Register RSrcReg;
4005 Register SOffset;
4006 int64_t Offset = 0;
4007
4008 if (!selectMUBUFOffsetImpl(Root, RSrcReg, SOffset, Offset))
4009 return {};
4010
4011 return {{
4012 [=](MachineInstrBuilder &MIB) { // rsrc
4013 MIB.addReg(RSrcReg);
4014 },
4015 [=](MachineInstrBuilder &MIB) { // soffset
4016 if (SOffset)
4017 MIB.addReg(SOffset);
4018 else
4019 MIB.addImm(0);
4020 },
4021 [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }, // offset
4022 addZeroImm, // glc
4023 addZeroImm, // slc
4024 addZeroImm, // tfe
4025 addZeroImm, // dlc
4026 addZeroImm // swz
4027 }};
4028}
4029
4030InstructionSelector::ComplexRendererFns
4031AMDGPUInstructionSelector::selectMUBUFAddr64Atomic(MachineOperand &Root) const {
4032 Register VAddr;
4033 Register RSrcReg;
4034 Register SOffset;
4035 int64_t Offset = 0;
4036
4037 if (!selectMUBUFAddr64Impl(Root, VAddr, RSrcReg, SOffset, Offset))
4038 return {};
4039
4040 // FIXME: Use defaulted operands for trailing 0s and remove from the complex
4041 // pattern.
4042 return {{
4043 [=](MachineInstrBuilder &MIB) { // rsrc
4044 MIB.addReg(RSrcReg);
4045 },
4046 [=](MachineInstrBuilder &MIB) { // vaddr
4047 MIB.addReg(VAddr);
4048 },
4049 [=](MachineInstrBuilder &MIB) { // soffset
4050 if (SOffset)
4051 MIB.addReg(SOffset);
4052 else
4053 MIB.addImm(0);
4054 },
4055 [=](MachineInstrBuilder &MIB) { // offset
4056 MIB.addImm(Offset);
4057 },
4058 addZeroImm // slc
4059 }};
4060}
4061
4062InstructionSelector::ComplexRendererFns
4063AMDGPUInstructionSelector::selectMUBUFOffsetAtomic(MachineOperand &Root) const {
4064 Register RSrcReg;
4065 Register SOffset;
4066 int64_t Offset = 0;
4067
4068 if (!selectMUBUFOffsetImpl(Root, RSrcReg, SOffset, Offset))
4069 return {};
4070
4071 return {{
4072 [=](MachineInstrBuilder &MIB) { // rsrc
4073 MIB.addReg(RSrcReg);
4074 },
4075 [=](MachineInstrBuilder &MIB) { // soffset
4076 if (SOffset)
4077 MIB.addReg(SOffset);
4078 else
4079 MIB.addImm(0);
4080 },
4081 [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }, // offset
4082 addZeroImm // slc
4083 }};
4084}
4085
4086/// Get an immediate that must be 32-bits, and treated as zero extended.
4087static Optional<uint64_t> getConstantZext32Val(Register Reg,
4088 const MachineRegisterInfo &MRI) {
4089 // getConstantVRegVal sexts any values, so see if that matters.
4090 Optional<int64_t> OffsetVal = getConstantVRegVal(Reg, MRI);
4091 if (!OffsetVal || !isInt<32>(*OffsetVal))
4092 return None;
4093 return Lo_32(*OffsetVal);
4094}
4095
4096InstructionSelector::ComplexRendererFns
4097AMDGPUInstructionSelector::selectSMRDBufferImm(MachineOperand &Root) const {
4098 Optional<uint64_t> OffsetVal = getConstantZext32Val(Root.getReg(), *MRI);
4099 if (!OffsetVal)
4100 return {};
4101
4102 Optional<int64_t> EncodedImm =
4103 AMDGPU::getSMRDEncodedOffset(STI, *OffsetVal, true);
4104 if (!EncodedImm)
4105 return {};
4106
4107 return {{ [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); } }};
4108}
4109
4110InstructionSelector::ComplexRendererFns
4111AMDGPUInstructionSelector::selectSMRDBufferImm32(MachineOperand &Root) const {
4112 assert(STI.getGeneration() == AMDGPUSubtarget::SEA_ISLANDS)((STI.getGeneration() == AMDGPUSubtarget::SEA_ISLANDS) ? static_cast
<void> (0) : __assert_fail ("STI.getGeneration() == AMDGPUSubtarget::SEA_ISLANDS"
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp"
, 4112, __PRETTY_FUNCTION__))
;
4113
4114 Optional<uint64_t> OffsetVal = getConstantZext32Val(Root.getReg(), *MRI);
4115 if (!OffsetVal)
4116 return {};
4117
4118 Optional<int64_t> EncodedImm
4119 = AMDGPU::getSMRDEncodedLiteralOffset32(STI, *OffsetVal);
4120 if (!EncodedImm)
4121 return {};
4122
4123 return {{ [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); } }};
4124}
4125
4126void AMDGPUInstructionSelector::renderTruncImm32(MachineInstrBuilder &MIB,
4127 const MachineInstr &MI,
4128 int OpIdx) const {
4129 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&((MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx
== -1 && "Expected G_CONSTANT") ? static_cast<void
> (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 && \"Expected G_CONSTANT\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp"
, 4130, __PRETTY_FUNCTION__))
4130 "Expected G_CONSTANT")((MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx
== -1 && "Expected G_CONSTANT") ? static_cast<void
> (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 && \"Expected G_CONSTANT\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp"
, 4130, __PRETTY_FUNCTION__))
;
4131 MIB.addImm(MI.getOperand(1).getCImm()->getSExtValue());
4132}
4133
4134void AMDGPUInstructionSelector::renderNegateImm(MachineInstrBuilder &MIB,
4135 const MachineInstr &MI,
4136 int OpIdx) const {
4137 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&((MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx
== -1 && "Expected G_CONSTANT") ? static_cast<void
> (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 && \"Expected G_CONSTANT\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp"
, 4138, __PRETTY_FUNCTION__))
4138 "Expected G_CONSTANT")((MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx
== -1 && "Expected G_CONSTANT") ? static_cast<void
> (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 && \"Expected G_CONSTANT\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp"
, 4138, __PRETTY_FUNCTION__))
;
4139 MIB.addImm(-MI.getOperand(1).getCImm()->getSExtValue());
4140}
4141
4142void AMDGPUInstructionSelector::renderBitcastImm(MachineInstrBuilder &MIB,
4143 const MachineInstr &MI,
4144 int OpIdx) const {
4145 assert(OpIdx == -1)((OpIdx == -1) ? static_cast<void> (0) : __assert_fail (
"OpIdx == -1", "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp"
, 4145, __PRETTY_FUNCTION__))
;
4146
4147 const MachineOperand &Op = MI.getOperand(1);
4148 if (MI.getOpcode() == TargetOpcode::G_FCONSTANT)
4149 MIB.addImm(Op.getFPImm()->getValueAPF().bitcastToAPInt().getZExtValue());
4150 else {
4151 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && "Expected G_CONSTANT")((MI.getOpcode() == TargetOpcode::G_CONSTANT && "Expected G_CONSTANT"
) ? static_cast<void> (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_CONSTANT && \"Expected G_CONSTANT\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp"
, 4151, __PRETTY_FUNCTION__))
;
4152 MIB.addImm(Op.getCImm()->getSExtValue());
4153 }
4154}
4155
4156void AMDGPUInstructionSelector::renderPopcntImm(MachineInstrBuilder &MIB,
4157 const MachineInstr &MI,
4158 int OpIdx) const {
4159 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&((MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx
== -1 && "Expected G_CONSTANT") ? static_cast<void
> (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 && \"Expected G_CONSTANT\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp"
, 4160, __PRETTY_FUNCTION__))
4160 "Expected G_CONSTANT")((MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx
== -1 && "Expected G_CONSTANT") ? static_cast<void
> (0) : __assert_fail ("MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 && \"Expected G_CONSTANT\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp"
, 4160, __PRETTY_FUNCTION__))
;
4161 MIB.addImm(MI.getOperand(1).getCImm()->getValue().countPopulation());
4162}
4163
4164/// This only really exists to satisfy DAG type checking machinery, so is a
4165/// no-op here.
4166void AMDGPUInstructionSelector::renderTruncTImm(MachineInstrBuilder &MIB,
4167 const MachineInstr &MI,
4168 int OpIdx) const {
4169 MIB.addImm(MI.getOperand(OpIdx).getImm());
4170}
4171
4172void AMDGPUInstructionSelector::renderExtractGLC(MachineInstrBuilder &MIB,
4173 const MachineInstr &MI,
4174 int OpIdx) const {
4175 assert(OpIdx >= 0 && "expected to match an immediate operand")((OpIdx >= 0 && "expected to match an immediate operand"
) ? static_cast<void> (0) : __assert_fail ("OpIdx >= 0 && \"expected to match an immediate operand\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp"
, 4175, __PRETTY_FUNCTION__))
;
4176 MIB.addImm(MI.getOperand(OpIdx).getImm() & 1);
4177}
4178
4179void AMDGPUInstructionSelector::renderExtractSLC(MachineInstrBuilder &MIB,
4180 const MachineInstr &MI,
4181 int OpIdx) const {
4182 assert(OpIdx >= 0 && "expected to match an immediate operand")((OpIdx >= 0 && "expected to match an immediate operand"
) ? static_cast<void> (0) : __assert_fail ("OpIdx >= 0 && \"expected to match an immediate operand\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp"
, 4182, __PRETTY_FUNCTION__))
;
4183 MIB.addImm((MI.getOperand(OpIdx).getImm() >> 1) & 1);
4184}
4185
4186void AMDGPUInstructionSelector::renderExtractDLC(MachineInstrBuilder &MIB,
4187 const MachineInstr &MI,
4188 int OpIdx) const {
4189 assert(OpIdx >= 0 && "expected to match an immediate operand")((OpIdx >= 0 && "expected to match an immediate operand"
) ? static_cast<void> (0) : __assert_fail ("OpIdx >= 0 && \"expected to match an immediate operand\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp"
, 4189, __PRETTY_FUNCTION__))
;
4190 MIB.addImm((MI.getOperand(OpIdx).getImm() >> 2) & 1);
4191}
4192
4193void AMDGPUInstructionSelector::renderExtractSWZ(MachineInstrBuilder &MIB,
4194 const MachineInstr &MI,
4195 int OpIdx) const {
4196 assert(OpIdx >= 0 && "expected to match an immediate operand")((OpIdx >= 0 && "expected to match an immediate operand"
) ? static_cast<void> (0) : __assert_fail ("OpIdx >= 0 && \"expected to match an immediate operand\""
, "/build/llvm-toolchain-snapshot-12~++20201026111116+d3205bbca3e/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp"
, 4196, __PRETTY_FUNCTION__))
;
4197 MIB.addImm((MI.getOperand(OpIdx).getImm() >> 3) & 1);
4198}
4199
4200void AMDGPUInstructionSelector::renderFrameIndex(MachineInstrBuilder &MIB,
4201 const MachineInstr &MI,
4202 int OpIdx) const {
4203 MIB.addFrameIndex((MI.getOperand(1).getIndex()));
4204}
4205
4206bool AMDGPUInstructionSelector::isInlineImmediate16(int64_t Imm) const {
4207 return AMDGPU::isInlinableLiteral16(Imm, STI.hasInv2PiInlineImm());
4208}
4209
4210bool AMDGPUInstructionSelector::isInlineImmediate32(int64_t Imm) const {
4211 return AMDGPU::isInlinableLiteral32(Imm, STI.hasInv2PiInlineImm());
4212}
4213
4214bool AMDGPUInstructionSelector::isInlineImmediate64(int64_t Imm) const {
4215 return AMDGPU::isInlinableLiteral64(Imm, STI.hasInv2PiInlineImm());
4216}
4217
4218bool AMDGPUInstructionSelector::isInlineImmediate(const APFloat &Imm) const {
4219 return TII.isInlineConstant(Imm);
4220}