Bug Summary

File:build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
Warning:line 2221, column 15
Called C++ object pointer is uninitialized

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name SIInstrInfo.cpp -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -ffp-contract=on -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/build-llvm/tools/clang/stage2-bins -resource-dir /usr/lib/llvm-16/lib/clang/16.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I lib/Target/AMDGPU -I /build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/llvm/lib/Target/AMDGPU -I include -I /build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/llvm/include -D _FORTIFY_SOURCE=2 -D NDEBUG -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-16/lib/clang/16.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -fmacro-prefix-map=/build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/build-llvm/tools/clang/stage2-bins=build-llvm/tools/clang/stage2-bins -fmacro-prefix-map=/build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/= -fcoverage-prefix-map=/build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/build-llvm/tools/clang/stage2-bins=build-llvm/tools/clang/stage2-bins -fcoverage-prefix-map=/build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/= -O2 -Wno-unused-command-line-argument -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -Wno-misleading-indentation -std=c++17 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/build-llvm/tools/clang/stage2-bins -fdebug-prefix-map=/build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/build-llvm/tools/clang/stage2-bins=build-llvm/tools/clang/stage2-bins -fdebug-prefix-map=/build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/= -ferror-limit 19 -fvisibility=hidden -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -fcolor-diagnostics -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2022-09-04-125545-48738-1 -x c++ /build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
1//===- SIInstrInfo.cpp - SI Instruction Information ----------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// SI Implementation of TargetInstrInfo.
11//
12//===----------------------------------------------------------------------===//
13
14#include "SIInstrInfo.h"
15#include "AMDGPU.h"
16#include "AMDGPUInstrInfo.h"
17#include "GCNHazardRecognizer.h"
18#include "GCNSubtarget.h"
19#include "SIMachineFunctionInfo.h"
20#include "llvm/Analysis/ValueTracking.h"
21#include "llvm/CodeGen/LiveIntervals.h"
22#include "llvm/CodeGen/LiveVariables.h"
23#include "llvm/CodeGen/MachineDominators.h"
24#include "llvm/CodeGen/MachineFrameInfo.h"
25#include "llvm/CodeGen/MachineScheduler.h"
26#include "llvm/CodeGen/RegisterScavenging.h"
27#include "llvm/CodeGen/ScheduleDAG.h"
28#include "llvm/IR/DiagnosticInfo.h"
29#include "llvm/IR/IntrinsicsAMDGPU.h"
30#include "llvm/MC/MCContext.h"
31#include "llvm/Support/CommandLine.h"
32#include "llvm/Target/TargetMachine.h"
33
34using namespace llvm;
35
36#define DEBUG_TYPE"si-instr-info" "si-instr-info"
37
38#define GET_INSTRINFO_CTOR_DTOR
39#include "AMDGPUGenInstrInfo.inc"
40
41namespace llvm {
42namespace AMDGPU {
43#define GET_D16ImageDimIntrinsics_IMPL
44#define GET_ImageDimIntrinsicTable_IMPL
45#define GET_RsrcIntrinsics_IMPL
46#include "AMDGPUGenSearchableTables.inc"
47}
48}
49
50
51// Must be at least 4 to be able to branch over minimum unconditional branch
52// code. This is only for making it possible to write reasonably small tests for
53// long branches.
54static cl::opt<unsigned>
55BranchOffsetBits("amdgpu-s-branch-bits", cl::ReallyHidden, cl::init(16),
56 cl::desc("Restrict range of branch instructions (DEBUG)"));
57
58static cl::opt<bool> Fix16BitCopies(
59 "amdgpu-fix-16-bit-physreg-copies",
60 cl::desc("Fix copies between 32 and 16 bit registers by extending to 32 bit"),
61 cl::init(true),
62 cl::ReallyHidden);
63
64SIInstrInfo::SIInstrInfo(const GCNSubtarget &ST)
65 : AMDGPUGenInstrInfo(AMDGPU::ADJCALLSTACKUP, AMDGPU::ADJCALLSTACKDOWN),
66 RI(ST), ST(ST) {
67 SchedModel.init(&ST);
68}
69
70//===----------------------------------------------------------------------===//
71// TargetInstrInfo callbacks
72//===----------------------------------------------------------------------===//
73
74static unsigned getNumOperandsNoGlue(SDNode *Node) {
75 unsigned N = Node->getNumOperands();
76 while (N && Node->getOperand(N - 1).getValueType() == MVT::Glue)
77 --N;
78 return N;
79}
80
81/// Returns true if both nodes have the same value for the given
82/// operand \p Op, or if both nodes do not have this operand.
83static bool nodesHaveSameOperandValue(SDNode *N0, SDNode* N1, unsigned OpName) {
84 unsigned Opc0 = N0->getMachineOpcode();
85 unsigned Opc1 = N1->getMachineOpcode();
86
87 int Op0Idx = AMDGPU::getNamedOperandIdx(Opc0, OpName);
88 int Op1Idx = AMDGPU::getNamedOperandIdx(Opc1, OpName);
89
90 if (Op0Idx == -1 && Op1Idx == -1)
91 return true;
92
93
94 if ((Op0Idx == -1 && Op1Idx != -1) ||
95 (Op1Idx == -1 && Op0Idx != -1))
96 return false;
97
98 // getNamedOperandIdx returns the index for the MachineInstr's operands,
99 // which includes the result as the first operand. We are indexing into the
100 // MachineSDNode's operands, so we need to skip the result operand to get
101 // the real index.
102 --Op0Idx;
103 --Op1Idx;
104
105 return N0->getOperand(Op0Idx) == N1->getOperand(Op1Idx);
106}
107
108bool SIInstrInfo::isReallyTriviallyReMaterializable(
109 const MachineInstr &MI) const {
110 if (isVOP1(MI) || isVOP2(MI) || isVOP3(MI) || isSDWA(MI) || isSALU(MI)) {
111 // Normally VALU use of exec would block the rematerialization, but that
112 // is OK in this case to have an implicit exec read as all VALU do.
113 // We really want all of the generic logic for this except for this.
114
115 // Another potential implicit use is mode register. The core logic of
116 // the RA will not attempt rematerialization if mode is set anywhere
117 // in the function, otherwise it is safe since mode is not changed.
118
119 // There is difference to generic method which does not allow
120 // rematerialization if there are virtual register uses. We allow this,
121 // therefore this method includes SOP instructions as well.
122 return !MI.hasImplicitDef() &&
123 MI.getNumImplicitOperands() == MI.getDesc().getNumImplicitUses() &&
124 !MI.mayRaiseFPException();
125 }
126
127 return false;
128}
129
130// Returns true if the scalar result of a VALU instruction depends on exec.
131static bool resultDependsOnExec(const MachineInstr &MI) {
132 // Ignore comparisons which are only used masked with exec.
133 // This allows some hoisting/sinking of VALU comparisons.
134 if (MI.isCompare()) {
135 const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
136 Register DstReg = MI.getOperand(0).getReg();
137 if (!DstReg.isVirtual())
138 return true;
139 for (MachineInstr &Use : MRI.use_nodbg_instructions(DstReg)) {
140 switch (Use.getOpcode()) {
141 case AMDGPU::S_AND_SAVEEXEC_B32:
142 case AMDGPU::S_AND_SAVEEXEC_B64:
143 break;
144 case AMDGPU::S_AND_B32:
145 case AMDGPU::S_AND_B64:
146 if (!Use.readsRegister(AMDGPU::EXEC))
147 return true;
148 break;
149 default:
150 return true;
151 }
152 }
153 return false;
154 }
155
156 switch (MI.getOpcode()) {
157 default:
158 break;
159 case AMDGPU::V_READFIRSTLANE_B32:
160 return true;
161 }
162
163 return false;
164}
165
166bool SIInstrInfo::isIgnorableUse(const MachineOperand &MO) const {
167 // Any implicit use of exec by VALU is not a real register read.
168 return MO.getReg() == AMDGPU::EXEC && MO.isImplicit() &&
169 isVALU(*MO.getParent()) && !resultDependsOnExec(*MO.getParent());
170}
171
172bool SIInstrInfo::areLoadsFromSameBasePtr(SDNode *Load0, SDNode *Load1,
173 int64_t &Offset0,
174 int64_t &Offset1) const {
175 if (!Load0->isMachineOpcode() || !Load1->isMachineOpcode())
176 return false;
177
178 unsigned Opc0 = Load0->getMachineOpcode();
179 unsigned Opc1 = Load1->getMachineOpcode();
180
181 // Make sure both are actually loads.
182 if (!get(Opc0).mayLoad() || !get(Opc1).mayLoad())
183 return false;
184
185 if (isDS(Opc0) && isDS(Opc1)) {
186
187 // FIXME: Handle this case:
188 if (getNumOperandsNoGlue(Load0) != getNumOperandsNoGlue(Load1))
189 return false;
190
191 // Check base reg.
192 if (Load0->getOperand(0) != Load1->getOperand(0))
193 return false;
194
195 // Skip read2 / write2 variants for simplicity.
196 // TODO: We should report true if the used offsets are adjacent (excluded
197 // st64 versions).
198 int Offset0Idx = AMDGPU::getNamedOperandIdx(Opc0, AMDGPU::OpName::offset);
199 int Offset1Idx = AMDGPU::getNamedOperandIdx(Opc1, AMDGPU::OpName::offset);
200 if (Offset0Idx == -1 || Offset1Idx == -1)
201 return false;
202
203 // XXX - be careful of dataless loads
204 // getNamedOperandIdx returns the index for MachineInstrs. Since they
205 // include the output in the operand list, but SDNodes don't, we need to
206 // subtract the index by one.
207 Offset0Idx -= get(Opc0).NumDefs;
208 Offset1Idx -= get(Opc1).NumDefs;
209 Offset0 = cast<ConstantSDNode>(Load0->getOperand(Offset0Idx))->getZExtValue();
210 Offset1 = cast<ConstantSDNode>(Load1->getOperand(Offset1Idx))->getZExtValue();
211 return true;
212 }
213
214 if (isSMRD(Opc0) && isSMRD(Opc1)) {
215 // Skip time and cache invalidation instructions.
216 if (AMDGPU::getNamedOperandIdx(Opc0, AMDGPU::OpName::sbase) == -1 ||
217 AMDGPU::getNamedOperandIdx(Opc1, AMDGPU::OpName::sbase) == -1)
218 return false;
219
220 unsigned NumOps = getNumOperandsNoGlue(Load0);
221 if (NumOps != getNumOperandsNoGlue(Load1))
222 return false;
223
224 // Check base reg.
225 if (Load0->getOperand(0) != Load1->getOperand(0))
226 return false;
227
228 // Match register offsets, if both register and immediate offsets present.
229 assert(NumOps == 4 || NumOps == 5)(static_cast <bool> (NumOps == 4 || NumOps == 5) ? void
(0) : __assert_fail ("NumOps == 4 || NumOps == 5", "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp"
, 229, __extension__ __PRETTY_FUNCTION__))
;
230 if (NumOps == 5 && Load0->getOperand(1) != Load1->getOperand(1))
231 return false;
232
233 const ConstantSDNode *Load0Offset =
234 dyn_cast<ConstantSDNode>(Load0->getOperand(NumOps - 3));
235 const ConstantSDNode *Load1Offset =
236 dyn_cast<ConstantSDNode>(Load1->getOperand(NumOps - 3));
237
238 if (!Load0Offset || !Load1Offset)
239 return false;
240
241 Offset0 = Load0Offset->getZExtValue();
242 Offset1 = Load1Offset->getZExtValue();
243 return true;
244 }
245
246 // MUBUF and MTBUF can access the same addresses.
247 if ((isMUBUF(Opc0) || isMTBUF(Opc0)) && (isMUBUF(Opc1) || isMTBUF(Opc1))) {
248
249 // MUBUF and MTBUF have vaddr at different indices.
250 if (!nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::soffset) ||
251 !nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::vaddr) ||
252 !nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::srsrc))
253 return false;
254
255 int OffIdx0 = AMDGPU::getNamedOperandIdx(Opc0, AMDGPU::OpName::offset);
256 int OffIdx1 = AMDGPU::getNamedOperandIdx(Opc1, AMDGPU::OpName::offset);
257
258 if (OffIdx0 == -1 || OffIdx1 == -1)
259 return false;
260
261 // getNamedOperandIdx returns the index for MachineInstrs. Since they
262 // include the output in the operand list, but SDNodes don't, we need to
263 // subtract the index by one.
264 OffIdx0 -= get(Opc0).NumDefs;
265 OffIdx1 -= get(Opc1).NumDefs;
266
267 SDValue Off0 = Load0->getOperand(OffIdx0);
268 SDValue Off1 = Load1->getOperand(OffIdx1);
269
270 // The offset might be a FrameIndexSDNode.
271 if (!isa<ConstantSDNode>(Off0) || !isa<ConstantSDNode>(Off1))
272 return false;
273
274 Offset0 = cast<ConstantSDNode>(Off0)->getZExtValue();
275 Offset1 = cast<ConstantSDNode>(Off1)->getZExtValue();
276 return true;
277 }
278
279 return false;
280}
281
282static bool isStride64(unsigned Opc) {
283 switch (Opc) {
284 case AMDGPU::DS_READ2ST64_B32:
285 case AMDGPU::DS_READ2ST64_B64:
286 case AMDGPU::DS_WRITE2ST64_B32:
287 case AMDGPU::DS_WRITE2ST64_B64:
288 return true;
289 default:
290 return false;
291 }
292}
293
294bool SIInstrInfo::getMemOperandsWithOffsetWidth(
295 const MachineInstr &LdSt, SmallVectorImpl<const MachineOperand *> &BaseOps,
296 int64_t &Offset, bool &OffsetIsScalable, unsigned &Width,
297 const TargetRegisterInfo *TRI) const {
298 if (!LdSt.mayLoadOrStore())
299 return false;
300
301 unsigned Opc = LdSt.getOpcode();
302 OffsetIsScalable = false;
303 const MachineOperand *BaseOp, *OffsetOp;
304 int DataOpIdx;
305
306 if (isDS(LdSt)) {
307 BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::addr);
308 OffsetOp = getNamedOperand(LdSt, AMDGPU::OpName::offset);
309 if (OffsetOp) {
310 // Normal, single offset LDS instruction.
311 if (!BaseOp) {
312 // DS_CONSUME/DS_APPEND use M0 for the base address.
313 // TODO: find the implicit use operand for M0 and use that as BaseOp?
314 return false;
315 }
316 BaseOps.push_back(BaseOp);
317 Offset = OffsetOp->getImm();
318 // Get appropriate operand, and compute width accordingly.
319 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdst);
320 if (DataOpIdx == -1)
321 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::data0);
322 Width = getOpSize(LdSt, DataOpIdx);
323 } else {
324 // The 2 offset instructions use offset0 and offset1 instead. We can treat
325 // these as a load with a single offset if the 2 offsets are consecutive.
326 // We will use this for some partially aligned loads.
327 const MachineOperand *Offset0Op =
328 getNamedOperand(LdSt, AMDGPU::OpName::offset0);
329 const MachineOperand *Offset1Op =
330 getNamedOperand(LdSt, AMDGPU::OpName::offset1);
331
332 unsigned Offset0 = Offset0Op->getImm();
333 unsigned Offset1 = Offset1Op->getImm();
334 if (Offset0 + 1 != Offset1)
335 return false;
336
337 // Each of these offsets is in element sized units, so we need to convert
338 // to bytes of the individual reads.
339
340 unsigned EltSize;
341 if (LdSt.mayLoad())
342 EltSize = TRI->getRegSizeInBits(*getOpRegClass(LdSt, 0)) / 16;
343 else {
344 assert(LdSt.mayStore())(static_cast <bool> (LdSt.mayStore()) ? void (0) : __assert_fail
("LdSt.mayStore()", "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp"
, 344, __extension__ __PRETTY_FUNCTION__))
;
345 int Data0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::data0);
346 EltSize = TRI->getRegSizeInBits(*getOpRegClass(LdSt, Data0Idx)) / 8;
347 }
348
349 if (isStride64(Opc))
350 EltSize *= 64;
351
352 BaseOps.push_back(BaseOp);
353 Offset = EltSize * Offset0;
354 // Get appropriate operand(s), and compute width accordingly.
355 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdst);
356 if (DataOpIdx == -1) {
357 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::data0);
358 Width = getOpSize(LdSt, DataOpIdx);
359 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::data1);
360 Width += getOpSize(LdSt, DataOpIdx);
361 } else {
362 Width = getOpSize(LdSt, DataOpIdx);
363 }
364 }
365 return true;
366 }
367
368 if (isMUBUF(LdSt) || isMTBUF(LdSt)) {
369 const MachineOperand *RSrc = getNamedOperand(LdSt, AMDGPU::OpName::srsrc);
370 if (!RSrc) // e.g. BUFFER_WBINVL1_VOL
371 return false;
372 BaseOps.push_back(RSrc);
373 BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::vaddr);
374 if (BaseOp && !BaseOp->isFI())
375 BaseOps.push_back(BaseOp);
376 const MachineOperand *OffsetImm =
377 getNamedOperand(LdSt, AMDGPU::OpName::offset);
378 Offset = OffsetImm->getImm();
379 const MachineOperand *SOffset =
380 getNamedOperand(LdSt, AMDGPU::OpName::soffset);
381 if (SOffset) {
382 if (SOffset->isReg())
383 BaseOps.push_back(SOffset);
384 else
385 Offset += SOffset->getImm();
386 }
387 // Get appropriate operand, and compute width accordingly.
388 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdst);
389 if (DataOpIdx == -1)
390 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdata);
391 if (DataOpIdx == -1) // LDS DMA
392 return false;
393 Width = getOpSize(LdSt, DataOpIdx);
394 return true;
395 }
396
397 if (isMIMG(LdSt)) {
398 int SRsrcIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::srsrc);
399 BaseOps.push_back(&LdSt.getOperand(SRsrcIdx));
400 int VAddr0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vaddr0);
401 if (VAddr0Idx >= 0) {
402 // GFX10 possible NSA encoding.
403 for (int I = VAddr0Idx; I < SRsrcIdx; ++I)
404 BaseOps.push_back(&LdSt.getOperand(I));
405 } else {
406 BaseOps.push_back(getNamedOperand(LdSt, AMDGPU::OpName::vaddr));
407 }
408 Offset = 0;
409 // Get appropriate operand, and compute width accordingly.
410 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdata);
411 Width = getOpSize(LdSt, DataOpIdx);
412 return true;
413 }
414
415 if (isSMRD(LdSt)) {
416 BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::sbase);
417 if (!BaseOp) // e.g. S_MEMTIME
418 return false;
419 BaseOps.push_back(BaseOp);
420 OffsetOp = getNamedOperand(LdSt, AMDGPU::OpName::offset);
421 Offset = OffsetOp ? OffsetOp->getImm() : 0;
422 // Get appropriate operand, and compute width accordingly.
423 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::sdst);
424 Width = getOpSize(LdSt, DataOpIdx);
425 return true;
426 }
427
428 if (isFLAT(LdSt)) {
429 // Instructions have either vaddr or saddr or both or none.
430 BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::vaddr);
431 if (BaseOp)
432 BaseOps.push_back(BaseOp);
433 BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::saddr);
434 if (BaseOp)
435 BaseOps.push_back(BaseOp);
436 Offset = getNamedOperand(LdSt, AMDGPU::OpName::offset)->getImm();
437 // Get appropriate operand, and compute width accordingly.
438 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdst);
439 if (DataOpIdx == -1)
440 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdata);
441 if (DataOpIdx == -1) // LDS DMA
442 return false;
443 Width = getOpSize(LdSt, DataOpIdx);
444 return true;
445 }
446
447 return false;
448}
449
450static bool memOpsHaveSameBasePtr(const MachineInstr &MI1,
451 ArrayRef<const MachineOperand *> BaseOps1,
452 const MachineInstr &MI2,
453 ArrayRef<const MachineOperand *> BaseOps2) {
454 // Only examine the first "base" operand of each instruction, on the
455 // assumption that it represents the real base address of the memory access.
456 // Other operands are typically offsets or indices from this base address.
457 if (BaseOps1.front()->isIdenticalTo(*BaseOps2.front()))
458 return true;
459
460 if (!MI1.hasOneMemOperand() || !MI2.hasOneMemOperand())
461 return false;
462
463 auto MO1 = *MI1.memoperands_begin();
464 auto MO2 = *MI2.memoperands_begin();
465 if (MO1->getAddrSpace() != MO2->getAddrSpace())
466 return false;
467
468 auto Base1 = MO1->getValue();
469 auto Base2 = MO2->getValue();
470 if (!Base1 || !Base2)
471 return false;
472 Base1 = getUnderlyingObject(Base1);
473 Base2 = getUnderlyingObject(Base2);
474
475 if (isa<UndefValue>(Base1) || isa<UndefValue>(Base2))
476 return false;
477
478 return Base1 == Base2;
479}
480
481bool SIInstrInfo::shouldClusterMemOps(ArrayRef<const MachineOperand *> BaseOps1,
482 ArrayRef<const MachineOperand *> BaseOps2,
483 unsigned NumLoads,
484 unsigned NumBytes) const {
485 // If the mem ops (to be clustered) do not have the same base ptr, then they
486 // should not be clustered
487 if (!BaseOps1.empty() && !BaseOps2.empty()) {
488 const MachineInstr &FirstLdSt = *BaseOps1.front()->getParent();
489 const MachineInstr &SecondLdSt = *BaseOps2.front()->getParent();
490 if (!memOpsHaveSameBasePtr(FirstLdSt, BaseOps1, SecondLdSt, BaseOps2))
491 return false;
492 } else if (!BaseOps1.empty() || !BaseOps2.empty()) {
493 // If only one base op is empty, they do not have the same base ptr
494 return false;
495 }
496
497 // In order to avoid register pressure, on an average, the number of DWORDS
498 // loaded together by all clustered mem ops should not exceed 8. This is an
499 // empirical value based on certain observations and performance related
500 // experiments.
501 // The good thing about this heuristic is - it avoids clustering of too many
502 // sub-word loads, and also avoids clustering of wide loads. Below is the
503 // brief summary of how the heuristic behaves for various `LoadSize`.
504 // (1) 1 <= LoadSize <= 4: cluster at max 8 mem ops
505 // (2) 5 <= LoadSize <= 8: cluster at max 4 mem ops
506 // (3) 9 <= LoadSize <= 12: cluster at max 2 mem ops
507 // (4) 13 <= LoadSize <= 16: cluster at max 2 mem ops
508 // (5) LoadSize >= 17: do not cluster
509 const unsigned LoadSize = NumBytes / NumLoads;
510 const unsigned NumDWORDs = ((LoadSize + 3) / 4) * NumLoads;
511 return NumDWORDs <= 8;
512}
513
514// FIXME: This behaves strangely. If, for example, you have 32 load + stores,
515// the first 16 loads will be interleaved with the stores, and the next 16 will
516// be clustered as expected. It should really split into 2 16 store batches.
517//
518// Loads are clustered until this returns false, rather than trying to schedule
519// groups of stores. This also means we have to deal with saying different
520// address space loads should be clustered, and ones which might cause bank
521// conflicts.
522//
523// This might be deprecated so it might not be worth that much effort to fix.
524bool SIInstrInfo::shouldScheduleLoadsNear(SDNode *Load0, SDNode *Load1,
525 int64_t Offset0, int64_t Offset1,
526 unsigned NumLoads) const {
527 assert(Offset1 > Offset0 &&(static_cast <bool> (Offset1 > Offset0 && "Second offset should be larger than first offset!"
) ? void (0) : __assert_fail ("Offset1 > Offset0 && \"Second offset should be larger than first offset!\""
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 528, __extension__
__PRETTY_FUNCTION__))
528 "Second offset should be larger than first offset!")(static_cast <bool> (Offset1 > Offset0 && "Second offset should be larger than first offset!"
) ? void (0) : __assert_fail ("Offset1 > Offset0 && \"Second offset should be larger than first offset!\""
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 528, __extension__
__PRETTY_FUNCTION__))
;
529 // If we have less than 16 loads in a row, and the offsets are within 64
530 // bytes, then schedule together.
531
532 // A cacheline is 64 bytes (for global memory).
533 return (NumLoads <= 16 && (Offset1 - Offset0) < 64);
534}
535
536static void reportIllegalCopy(const SIInstrInfo *TII, MachineBasicBlock &MBB,
537 MachineBasicBlock::iterator MI,
538 const DebugLoc &DL, MCRegister DestReg,
539 MCRegister SrcReg, bool KillSrc,
540 const char *Msg = "illegal SGPR to VGPR copy") {
541 MachineFunction *MF = MBB.getParent();
542 DiagnosticInfoUnsupported IllegalCopy(MF->getFunction(), Msg, DL, DS_Error);
543 LLVMContext &C = MF->getFunction().getContext();
544 C.diagnose(IllegalCopy);
545
546 BuildMI(MBB, MI, DL, TII->get(AMDGPU::SI_ILLEGAL_COPY), DestReg)
547 .addReg(SrcReg, getKillRegState(KillSrc));
548}
549
550/// Handle copying from SGPR to AGPR, or from AGPR to AGPR on GFX908. It is not
551/// possible to have a direct copy in these cases on GFX908, so an intermediate
552/// VGPR copy is required.
553static void indirectCopyToAGPR(const SIInstrInfo &TII,
554 MachineBasicBlock &MBB,
555 MachineBasicBlock::iterator MI,
556 const DebugLoc &DL, MCRegister DestReg,
557 MCRegister SrcReg, bool KillSrc,
558 RegScavenger &RS,
559 Register ImpDefSuperReg = Register(),
560 Register ImpUseSuperReg = Register()) {
561 assert((TII.getSubtarget().hasMAIInsts() &&(static_cast <bool> ((TII.getSubtarget().hasMAIInsts() &&
!TII.getSubtarget().hasGFX90AInsts()) && "Expected GFX908 subtarget."
) ? void (0) : __assert_fail ("(TII.getSubtarget().hasMAIInsts() && !TII.getSubtarget().hasGFX90AInsts()) && \"Expected GFX908 subtarget.\""
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 563, __extension__
__PRETTY_FUNCTION__))
562 !TII.getSubtarget().hasGFX90AInsts()) &&(static_cast <bool> ((TII.getSubtarget().hasMAIInsts() &&
!TII.getSubtarget().hasGFX90AInsts()) && "Expected GFX908 subtarget."
) ? void (0) : __assert_fail ("(TII.getSubtarget().hasMAIInsts() && !TII.getSubtarget().hasGFX90AInsts()) && \"Expected GFX908 subtarget.\""
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 563, __extension__
__PRETTY_FUNCTION__))
563 "Expected GFX908 subtarget.")(static_cast <bool> ((TII.getSubtarget().hasMAIInsts() &&
!TII.getSubtarget().hasGFX90AInsts()) && "Expected GFX908 subtarget."
) ? void (0) : __assert_fail ("(TII.getSubtarget().hasMAIInsts() && !TII.getSubtarget().hasGFX90AInsts()) && \"Expected GFX908 subtarget.\""
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 563, __extension__
__PRETTY_FUNCTION__))
;
564
565 assert((AMDGPU::SReg_32RegClass.contains(SrcReg) ||(static_cast <bool> ((AMDGPU::SReg_32RegClass.contains(
SrcReg) || AMDGPU::AGPR_32RegClass.contains(SrcReg)) &&
"Source register of the copy should be either an SGPR or an AGPR."
) ? void (0) : __assert_fail ("(AMDGPU::SReg_32RegClass.contains(SrcReg) || AMDGPU::AGPR_32RegClass.contains(SrcReg)) && \"Source register of the copy should be either an SGPR or an AGPR.\""
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 567, __extension__
__PRETTY_FUNCTION__))
566 AMDGPU::AGPR_32RegClass.contains(SrcReg)) &&(static_cast <bool> ((AMDGPU::SReg_32RegClass.contains(
SrcReg) || AMDGPU::AGPR_32RegClass.contains(SrcReg)) &&
"Source register of the copy should be either an SGPR or an AGPR."
) ? void (0) : __assert_fail ("(AMDGPU::SReg_32RegClass.contains(SrcReg) || AMDGPU::AGPR_32RegClass.contains(SrcReg)) && \"Source register of the copy should be either an SGPR or an AGPR.\""
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 567, __extension__
__PRETTY_FUNCTION__))
567 "Source register of the copy should be either an SGPR or an AGPR.")(static_cast <bool> ((AMDGPU::SReg_32RegClass.contains(
SrcReg) || AMDGPU::AGPR_32RegClass.contains(SrcReg)) &&
"Source register of the copy should be either an SGPR or an AGPR."
) ? void (0) : __assert_fail ("(AMDGPU::SReg_32RegClass.contains(SrcReg) || AMDGPU::AGPR_32RegClass.contains(SrcReg)) && \"Source register of the copy should be either an SGPR or an AGPR.\""
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 567, __extension__
__PRETTY_FUNCTION__))
;
568
569 assert(AMDGPU::AGPR_32RegClass.contains(DestReg) &&(static_cast <bool> (AMDGPU::AGPR_32RegClass.contains(DestReg
) && "Destination register of the copy should be an AGPR."
) ? void (0) : __assert_fail ("AMDGPU::AGPR_32RegClass.contains(DestReg) && \"Destination register of the copy should be an AGPR.\""
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 570, __extension__
__PRETTY_FUNCTION__))
570 "Destination register of the copy should be an AGPR.")(static_cast <bool> (AMDGPU::AGPR_32RegClass.contains(DestReg
) && "Destination register of the copy should be an AGPR."
) ? void (0) : __assert_fail ("AMDGPU::AGPR_32RegClass.contains(DestReg) && \"Destination register of the copy should be an AGPR.\""
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 570, __extension__
__PRETTY_FUNCTION__))
;
571
572 const SIRegisterInfo &RI = TII.getRegisterInfo();
573
574 // First try to find defining accvgpr_write to avoid temporary registers.
575 for (auto Def = MI, E = MBB.begin(); Def != E; ) {
576 --Def;
577 if (!Def->definesRegister(SrcReg, &RI))
578 continue;
579 if (Def->getOpcode() != AMDGPU::V_ACCVGPR_WRITE_B32_e64)
580 break;
581
582 MachineOperand &DefOp = Def->getOperand(1);
583 assert(DefOp.isReg() || DefOp.isImm())(static_cast <bool> (DefOp.isReg() || DefOp.isImm()) ? void
(0) : __assert_fail ("DefOp.isReg() || DefOp.isImm()", "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp"
, 583, __extension__ __PRETTY_FUNCTION__))
;
584
585 if (DefOp.isReg()) {
586 // Check that register source operand if not clobbered before MI.
587 // Immediate operands are always safe to propagate.
588 bool SafeToPropagate = true;
589 for (auto I = Def; I != MI && SafeToPropagate; ++I)
590 if (I->modifiesRegister(DefOp.getReg(), &RI))
591 SafeToPropagate = false;
592
593 if (!SafeToPropagate)
594 break;
595
596 DefOp.setIsKill(false);
597 }
598
599 MachineInstrBuilder Builder =
600 BuildMI(MBB, MI, DL, TII.get(AMDGPU::V_ACCVGPR_WRITE_B32_e64), DestReg)
601 .add(DefOp);
602 if (ImpDefSuperReg)
603 Builder.addReg(ImpDefSuperReg, RegState::Define | RegState::Implicit);
604
605 if (ImpUseSuperReg) {
606 Builder.addReg(ImpUseSuperReg,
607 getKillRegState(KillSrc) | RegState::Implicit);
608 }
609
610 return;
611 }
612
613 RS.enterBasicBlock(MBB);
614 RS.forward(MI);
615
616 // Ideally we want to have three registers for a long reg_sequence copy
617 // to hide 2 waitstates between v_mov_b32 and accvgpr_write.
618 unsigned MaxVGPRs = RI.getRegPressureLimit(&AMDGPU::VGPR_32RegClass,
619 *MBB.getParent());
620
621 // Registers in the sequence are allocated contiguously so we can just
622 // use register number to pick one of three round-robin temps.
623 unsigned RegNo = (DestReg - AMDGPU::AGPR0) % 3;
624 Register Tmp =
625 MBB.getParent()->getInfo<SIMachineFunctionInfo>()->getVGPRForAGPRCopy();
626 assert(MBB.getParent()->getRegInfo().isReserved(Tmp) &&(static_cast <bool> (MBB.getParent()->getRegInfo().isReserved
(Tmp) && "VGPR used for an intermediate copy should have been reserved."
) ? void (0) : __assert_fail ("MBB.getParent()->getRegInfo().isReserved(Tmp) && \"VGPR used for an intermediate copy should have been reserved.\""
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 627, __extension__
__PRETTY_FUNCTION__))
627 "VGPR used for an intermediate copy should have been reserved.")(static_cast <bool> (MBB.getParent()->getRegInfo().isReserved
(Tmp) && "VGPR used for an intermediate copy should have been reserved."
) ? void (0) : __assert_fail ("MBB.getParent()->getRegInfo().isReserved(Tmp) && \"VGPR used for an intermediate copy should have been reserved.\""
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 627, __extension__
__PRETTY_FUNCTION__))
;
628
629 // Only loop through if there are any free registers left, otherwise
630 // scavenger may report a fatal error without emergency spill slot
631 // or spill with the slot.
632 while (RegNo-- && RS.FindUnusedReg(&AMDGPU::VGPR_32RegClass)) {
633 Register Tmp2 = RS.scavengeRegister(&AMDGPU::VGPR_32RegClass, 0);
634 if (!Tmp2 || RI.getHWRegIndex(Tmp2) >= MaxVGPRs)
635 break;
636 Tmp = Tmp2;
637 RS.setRegUsed(Tmp);
638 }
639
640 // Insert copy to temporary VGPR.
641 unsigned TmpCopyOp = AMDGPU::V_MOV_B32_e32;
642 if (AMDGPU::AGPR_32RegClass.contains(SrcReg)) {
643 TmpCopyOp = AMDGPU::V_ACCVGPR_READ_B32_e64;
644 } else {
645 assert(AMDGPU::SReg_32RegClass.contains(SrcReg))(static_cast <bool> (AMDGPU::SReg_32RegClass.contains(SrcReg
)) ? void (0) : __assert_fail ("AMDGPU::SReg_32RegClass.contains(SrcReg)"
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 645, __extension__
__PRETTY_FUNCTION__))
;
646 }
647
648 MachineInstrBuilder UseBuilder = BuildMI(MBB, MI, DL, TII.get(TmpCopyOp), Tmp)
649 .addReg(SrcReg, getKillRegState(KillSrc));
650 if (ImpUseSuperReg) {
651 UseBuilder.addReg(ImpUseSuperReg,
652 getKillRegState(KillSrc) | RegState::Implicit);
653 }
654
655 MachineInstrBuilder DefBuilder
656 = BuildMI(MBB, MI, DL, TII.get(AMDGPU::V_ACCVGPR_WRITE_B32_e64), DestReg)
657 .addReg(Tmp, RegState::Kill);
658
659 if (ImpDefSuperReg)
660 DefBuilder.addReg(ImpDefSuperReg, RegState::Define | RegState::Implicit);
661}
662
663static void expandSGPRCopy(const SIInstrInfo &TII, MachineBasicBlock &MBB,
664 MachineBasicBlock::iterator MI, const DebugLoc &DL,
665 MCRegister DestReg, MCRegister SrcReg, bool KillSrc,
666 const TargetRegisterClass *RC, bool Forward) {
667 const SIRegisterInfo &RI = TII.getRegisterInfo();
668 ArrayRef<int16_t> BaseIndices = RI.getRegSplitParts(RC, 4);
669 MachineBasicBlock::iterator I = MI;
670 MachineInstr *FirstMI = nullptr, *LastMI = nullptr;
671
672 for (unsigned Idx = 0; Idx < BaseIndices.size(); ++Idx) {
673 int16_t SubIdx = BaseIndices[Idx];
674 Register Reg = RI.getSubReg(DestReg, SubIdx);
675 unsigned Opcode = AMDGPU::S_MOV_B32;
676
677 // Is SGPR aligned? If so try to combine with next.
678 Register Src = RI.getSubReg(SrcReg, SubIdx);
679 bool AlignedDest = ((Reg - AMDGPU::SGPR0) % 2) == 0;
680 bool AlignedSrc = ((Src - AMDGPU::SGPR0) % 2) == 0;
681 if (AlignedDest && AlignedSrc && (Idx + 1 < BaseIndices.size())) {
682 // Can use SGPR64 copy
683 unsigned Channel = RI.getChannelFromSubReg(SubIdx);
684 SubIdx = RI.getSubRegFromChannel(Channel, 2);
685 Opcode = AMDGPU::S_MOV_B64;
686 Idx++;
687 }
688
689 LastMI = BuildMI(MBB, I, DL, TII.get(Opcode), RI.getSubReg(DestReg, SubIdx))
690 .addReg(RI.getSubReg(SrcReg, SubIdx))
691 .addReg(SrcReg, RegState::Implicit);
692
693 if (!FirstMI)
694 FirstMI = LastMI;
695
696 if (!Forward)
697 I--;
698 }
699
700 assert(FirstMI && LastMI)(static_cast <bool> (FirstMI && LastMI) ? void (
0) : __assert_fail ("FirstMI && LastMI", "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp"
, 700, __extension__ __PRETTY_FUNCTION__))
;
701 if (!Forward)
702 std::swap(FirstMI, LastMI);
703
704 FirstMI->addOperand(
705 MachineOperand::CreateReg(DestReg, true /*IsDef*/, true /*IsImp*/));
706
707 if (KillSrc)
708 LastMI->addRegisterKilled(SrcReg, &RI);
709}
710
711void SIInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
712 MachineBasicBlock::iterator MI,
713 const DebugLoc &DL, MCRegister DestReg,
714 MCRegister SrcReg, bool KillSrc) const {
715 const TargetRegisterClass *RC = RI.getPhysRegClass(DestReg);
716
717 // FIXME: This is hack to resolve copies between 16 bit and 32 bit
718 // registers until all patterns are fixed.
719 if (Fix16BitCopies &&
720 ((RI.getRegSizeInBits(*RC) == 16) ^
721 (RI.getRegSizeInBits(*RI.getPhysRegClass(SrcReg)) == 16))) {
722 MCRegister &RegToFix = (RI.getRegSizeInBits(*RC) == 16) ? DestReg : SrcReg;
723 MCRegister Super = RI.get32BitRegister(RegToFix);
724 assert(RI.getSubReg(Super, AMDGPU::lo16) == RegToFix)(static_cast <bool> (RI.getSubReg(Super, AMDGPU::lo16) ==
RegToFix) ? void (0) : __assert_fail ("RI.getSubReg(Super, AMDGPU::lo16) == RegToFix"
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 724, __extension__
__PRETTY_FUNCTION__))
;
725 RegToFix = Super;
726
727 if (DestReg == SrcReg) {
728 // Insert empty bundle since ExpandPostRA expects an instruction here.
729 BuildMI(MBB, MI, DL, get(AMDGPU::BUNDLE));
730 return;
731 }
732
733 RC = RI.getPhysRegClass(DestReg);
734 }
735
736 if (RC == &AMDGPU::VGPR_32RegClass) {
737 assert(AMDGPU::VGPR_32RegClass.contains(SrcReg) ||(static_cast <bool> (AMDGPU::VGPR_32RegClass.contains(SrcReg
) || AMDGPU::SReg_32RegClass.contains(SrcReg) || AMDGPU::AGPR_32RegClass
.contains(SrcReg)) ? void (0) : __assert_fail ("AMDGPU::VGPR_32RegClass.contains(SrcReg) || AMDGPU::SReg_32RegClass.contains(SrcReg) || AMDGPU::AGPR_32RegClass.contains(SrcReg)"
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 739, __extension__
__PRETTY_FUNCTION__))
738 AMDGPU::SReg_32RegClass.contains(SrcReg) ||(static_cast <bool> (AMDGPU::VGPR_32RegClass.contains(SrcReg
) || AMDGPU::SReg_32RegClass.contains(SrcReg) || AMDGPU::AGPR_32RegClass
.contains(SrcReg)) ? void (0) : __assert_fail ("AMDGPU::VGPR_32RegClass.contains(SrcReg) || AMDGPU::SReg_32RegClass.contains(SrcReg) || AMDGPU::AGPR_32RegClass.contains(SrcReg)"
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 739, __extension__
__PRETTY_FUNCTION__))
739 AMDGPU::AGPR_32RegClass.contains(SrcReg))(static_cast <bool> (AMDGPU::VGPR_32RegClass.contains(SrcReg
) || AMDGPU::SReg_32RegClass.contains(SrcReg) || AMDGPU::AGPR_32RegClass
.contains(SrcReg)) ? void (0) : __assert_fail ("AMDGPU::VGPR_32RegClass.contains(SrcReg) || AMDGPU::SReg_32RegClass.contains(SrcReg) || AMDGPU::AGPR_32RegClass.contains(SrcReg)"
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 739, __extension__
__PRETTY_FUNCTION__))
;
740 unsigned Opc = AMDGPU::AGPR_32RegClass.contains(SrcReg) ?
741 AMDGPU::V_ACCVGPR_READ_B32_e64 : AMDGPU::V_MOV_B32_e32;
742 BuildMI(MBB, MI, DL, get(Opc), DestReg)
743 .addReg(SrcReg, getKillRegState(KillSrc));
744 return;
745 }
746
747 if (RC == &AMDGPU::SReg_32_XM0RegClass ||
748 RC == &AMDGPU::SReg_32RegClass) {
749 if (SrcReg == AMDGPU::SCC) {
750 BuildMI(MBB, MI, DL, get(AMDGPU::S_CSELECT_B32), DestReg)
751 .addImm(1)
752 .addImm(0);
753 return;
754 }
755
756 if (DestReg == AMDGPU::VCC_LO) {
757 if (AMDGPU::SReg_32RegClass.contains(SrcReg)) {
758 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), AMDGPU::VCC_LO)
759 .addReg(SrcReg, getKillRegState(KillSrc));
760 } else {
761 // FIXME: Hack until VReg_1 removed.
762 assert(AMDGPU::VGPR_32RegClass.contains(SrcReg))(static_cast <bool> (AMDGPU::VGPR_32RegClass.contains(SrcReg
)) ? void (0) : __assert_fail ("AMDGPU::VGPR_32RegClass.contains(SrcReg)"
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 762, __extension__
__PRETTY_FUNCTION__))
;
763 BuildMI(MBB, MI, DL, get(AMDGPU::V_CMP_NE_U32_e32))
764 .addImm(0)
765 .addReg(SrcReg, getKillRegState(KillSrc));
766 }
767
768 return;
769 }
770
771 if (!AMDGPU::SReg_32RegClass.contains(SrcReg)) {
772 reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc);
773 return;
774 }
775
776 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), DestReg)
777 .addReg(SrcReg, getKillRegState(KillSrc));
778 return;
779 }
780
781 if (RC == &AMDGPU::SReg_64RegClass) {
782 if (SrcReg == AMDGPU::SCC) {
783 BuildMI(MBB, MI, DL, get(AMDGPU::S_CSELECT_B64), DestReg)
784 .addImm(1)
785 .addImm(0);
786 return;
787 }
788
789 if (DestReg == AMDGPU::VCC) {
790 if (AMDGPU::SReg_64RegClass.contains(SrcReg)) {
791 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), AMDGPU::VCC)
792 .addReg(SrcReg, getKillRegState(KillSrc));
793 } else {
794 // FIXME: Hack until VReg_1 removed.
795 assert(AMDGPU::VGPR_32RegClass.contains(SrcReg))(static_cast <bool> (AMDGPU::VGPR_32RegClass.contains(SrcReg
)) ? void (0) : __assert_fail ("AMDGPU::VGPR_32RegClass.contains(SrcReg)"
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 795, __extension__
__PRETTY_FUNCTION__))
;
796 BuildMI(MBB, MI, DL, get(AMDGPU::V_CMP_NE_U32_e32))
797 .addImm(0)
798 .addReg(SrcReg, getKillRegState(KillSrc));
799 }
800
801 return;
802 }
803
804 if (!AMDGPU::SReg_64RegClass.contains(SrcReg)) {
805 reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc);
806 return;
807 }
808
809 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), DestReg)
810 .addReg(SrcReg, getKillRegState(KillSrc));
811 return;
812 }
813
814 if (DestReg == AMDGPU::SCC) {
815 // Copying 64-bit or 32-bit sources to SCC barely makes sense,
816 // but SelectionDAG emits such copies for i1 sources.
817 if (AMDGPU::SReg_64RegClass.contains(SrcReg)) {
818 // This copy can only be produced by patterns
819 // with explicit SCC, which are known to be enabled
820 // only for subtargets with S_CMP_LG_U64 present.
821 assert(ST.hasScalarCompareEq64())(static_cast <bool> (ST.hasScalarCompareEq64()) ? void (
0) : __assert_fail ("ST.hasScalarCompareEq64()", "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp"
, 821, __extension__ __PRETTY_FUNCTION__))
;
822 BuildMI(MBB, MI, DL, get(AMDGPU::S_CMP_LG_U64))
823 .addReg(SrcReg, getKillRegState(KillSrc))
824 .addImm(0);
825 } else {
826 assert(AMDGPU::SReg_32RegClass.contains(SrcReg))(static_cast <bool> (AMDGPU::SReg_32RegClass.contains(SrcReg
)) ? void (0) : __assert_fail ("AMDGPU::SReg_32RegClass.contains(SrcReg)"
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 826, __extension__
__PRETTY_FUNCTION__))
;
827 BuildMI(MBB, MI, DL, get(AMDGPU::S_CMP_LG_U32))
828 .addReg(SrcReg, getKillRegState(KillSrc))
829 .addImm(0);
830 }
831
832 return;
833 }
834
835 if (RC == &AMDGPU::AGPR_32RegClass) {
836 if (AMDGPU::VGPR_32RegClass.contains(SrcReg) ||
837 (ST.hasGFX90AInsts() && AMDGPU::SReg_32RegClass.contains(SrcReg))) {
838 BuildMI(MBB, MI, DL, get(AMDGPU::V_ACCVGPR_WRITE_B32_e64), DestReg)
839 .addReg(SrcReg, getKillRegState(KillSrc));
840 return;
841 }
842
843 if (AMDGPU::AGPR_32RegClass.contains(SrcReg) && ST.hasGFX90AInsts()) {
844 BuildMI(MBB, MI, DL, get(AMDGPU::V_ACCVGPR_MOV_B32), DestReg)
845 .addReg(SrcReg, getKillRegState(KillSrc));
846 return;
847 }
848
849 // FIXME: Pass should maintain scavenger to avoid scan through the block on
850 // every AGPR spill.
851 RegScavenger RS;
852 indirectCopyToAGPR(*this, MBB, MI, DL, DestReg, SrcReg, KillSrc, RS);
853 return;
854 }
855
856 const unsigned Size = RI.getRegSizeInBits(*RC);
857 if (Size == 16) {
858 assert(AMDGPU::VGPR_LO16RegClass.contains(SrcReg) ||(static_cast <bool> (AMDGPU::VGPR_LO16RegClass.contains
(SrcReg) || AMDGPU::VGPR_HI16RegClass.contains(SrcReg) || AMDGPU
::SReg_LO16RegClass.contains(SrcReg) || AMDGPU::AGPR_LO16RegClass
.contains(SrcReg)) ? void (0) : __assert_fail ("AMDGPU::VGPR_LO16RegClass.contains(SrcReg) || AMDGPU::VGPR_HI16RegClass.contains(SrcReg) || AMDGPU::SReg_LO16RegClass.contains(SrcReg) || AMDGPU::AGPR_LO16RegClass.contains(SrcReg)"
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 861, __extension__
__PRETTY_FUNCTION__))
859 AMDGPU::VGPR_HI16RegClass.contains(SrcReg) ||(static_cast <bool> (AMDGPU::VGPR_LO16RegClass.contains
(SrcReg) || AMDGPU::VGPR_HI16RegClass.contains(SrcReg) || AMDGPU
::SReg_LO16RegClass.contains(SrcReg) || AMDGPU::AGPR_LO16RegClass
.contains(SrcReg)) ? void (0) : __assert_fail ("AMDGPU::VGPR_LO16RegClass.contains(SrcReg) || AMDGPU::VGPR_HI16RegClass.contains(SrcReg) || AMDGPU::SReg_LO16RegClass.contains(SrcReg) || AMDGPU::AGPR_LO16RegClass.contains(SrcReg)"
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 861, __extension__
__PRETTY_FUNCTION__))
860 AMDGPU::SReg_LO16RegClass.contains(SrcReg) ||(static_cast <bool> (AMDGPU::VGPR_LO16RegClass.contains
(SrcReg) || AMDGPU::VGPR_HI16RegClass.contains(SrcReg) || AMDGPU
::SReg_LO16RegClass.contains(SrcReg) || AMDGPU::AGPR_LO16RegClass
.contains(SrcReg)) ? void (0) : __assert_fail ("AMDGPU::VGPR_LO16RegClass.contains(SrcReg) || AMDGPU::VGPR_HI16RegClass.contains(SrcReg) || AMDGPU::SReg_LO16RegClass.contains(SrcReg) || AMDGPU::AGPR_LO16RegClass.contains(SrcReg)"
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 861, __extension__
__PRETTY_FUNCTION__))
861 AMDGPU::AGPR_LO16RegClass.contains(SrcReg))(static_cast <bool> (AMDGPU::VGPR_LO16RegClass.contains
(SrcReg) || AMDGPU::VGPR_HI16RegClass.contains(SrcReg) || AMDGPU
::SReg_LO16RegClass.contains(SrcReg) || AMDGPU::AGPR_LO16RegClass
.contains(SrcReg)) ? void (0) : __assert_fail ("AMDGPU::VGPR_LO16RegClass.contains(SrcReg) || AMDGPU::VGPR_HI16RegClass.contains(SrcReg) || AMDGPU::SReg_LO16RegClass.contains(SrcReg) || AMDGPU::AGPR_LO16RegClass.contains(SrcReg)"
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 861, __extension__
__PRETTY_FUNCTION__))
;
862
863 bool IsSGPRDst = AMDGPU::SReg_LO16RegClass.contains(DestReg);
864 bool IsSGPRSrc = AMDGPU::SReg_LO16RegClass.contains(SrcReg);
865 bool IsAGPRDst = AMDGPU::AGPR_LO16RegClass.contains(DestReg);
866 bool IsAGPRSrc = AMDGPU::AGPR_LO16RegClass.contains(SrcReg);
867 bool DstLow = AMDGPU::VGPR_LO16RegClass.contains(DestReg) ||
868 AMDGPU::SReg_LO16RegClass.contains(DestReg) ||
869 AMDGPU::AGPR_LO16RegClass.contains(DestReg);
870 bool SrcLow = AMDGPU::VGPR_LO16RegClass.contains(SrcReg) ||
871 AMDGPU::SReg_LO16RegClass.contains(SrcReg) ||
872 AMDGPU::AGPR_LO16RegClass.contains(SrcReg);
873 MCRegister NewDestReg = RI.get32BitRegister(DestReg);
874 MCRegister NewSrcReg = RI.get32BitRegister(SrcReg);
875
876 if (IsSGPRDst) {
877 if (!IsSGPRSrc) {
878 reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc);
879 return;
880 }
881
882 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), NewDestReg)
883 .addReg(NewSrcReg, getKillRegState(KillSrc));
884 return;
885 }
886
887 if (IsAGPRDst || IsAGPRSrc) {
888 if (!DstLow || !SrcLow) {
889 reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc,
890 "Cannot use hi16 subreg with an AGPR!");
891 }
892
893 copyPhysReg(MBB, MI, DL, NewDestReg, NewSrcReg, KillSrc);
894 return;
895 }
896
897 if (IsSGPRSrc && !ST.hasSDWAScalar()) {
898 if (!DstLow || !SrcLow) {
899 reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc,
900 "Cannot use hi16 subreg on VI!");
901 }
902
903 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), NewDestReg)
904 .addReg(NewSrcReg, getKillRegState(KillSrc));
905 return;
906 }
907
908 auto MIB = BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_sdwa), NewDestReg)
909 .addImm(0) // src0_modifiers
910 .addReg(NewSrcReg)
911 .addImm(0) // clamp
912 .addImm(DstLow ? AMDGPU::SDWA::SdwaSel::WORD_0
913 : AMDGPU::SDWA::SdwaSel::WORD_1)
914 .addImm(AMDGPU::SDWA::DstUnused::UNUSED_PRESERVE)
915 .addImm(SrcLow ? AMDGPU::SDWA::SdwaSel::WORD_0
916 : AMDGPU::SDWA::SdwaSel::WORD_1)
917 .addReg(NewDestReg, RegState::Implicit | RegState::Undef);
918 // First implicit operand is $exec.
919 MIB->tieOperands(0, MIB->getNumOperands() - 1);
920 return;
921 }
922
923 const TargetRegisterClass *SrcRC = RI.getPhysRegClass(SrcReg);
924 if (RC == RI.getVGPR64Class() && (SrcRC == RC || RI.isSGPRClass(SrcRC))) {
925 if (ST.hasMovB64()) {
926 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B64_e32), DestReg)
927 .addReg(SrcReg, getKillRegState(KillSrc));
928 return;
929 }
930 if (ST.hasPackedFP32Ops()) {
931 BuildMI(MBB, MI, DL, get(AMDGPU::V_PK_MOV_B32), DestReg)
932 .addImm(SISrcMods::OP_SEL_1)
933 .addReg(SrcReg)
934 .addImm(SISrcMods::OP_SEL_0 | SISrcMods::OP_SEL_1)
935 .addReg(SrcReg)
936 .addImm(0) // op_sel_lo
937 .addImm(0) // op_sel_hi
938 .addImm(0) // neg_lo
939 .addImm(0) // neg_hi
940 .addImm(0) // clamp
941 .addReg(SrcReg, getKillRegState(KillSrc) | RegState::Implicit);
942 return;
943 }
944 }
945
946 const bool Forward = RI.getHWRegIndex(DestReg) <= RI.getHWRegIndex(SrcReg);
947 if (RI.isSGPRClass(RC)) {
948 if (!RI.isSGPRClass(SrcRC)) {
949 reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc);
950 return;
951 }
952 const bool CanKillSuperReg = KillSrc && !RI.regsOverlap(SrcReg, DestReg);
953 expandSGPRCopy(*this, MBB, MI, DL, DestReg, SrcReg, CanKillSuperReg, RC,
954 Forward);
955 return;
956 }
957
958 unsigned EltSize = 4;
959 unsigned Opcode = AMDGPU::V_MOV_B32_e32;
960 if (RI.isAGPRClass(RC)) {
961 if (ST.hasGFX90AInsts() && RI.isAGPRClass(SrcRC))
962 Opcode = AMDGPU::V_ACCVGPR_MOV_B32;
963 else if (RI.hasVGPRs(SrcRC) ||
964 (ST.hasGFX90AInsts() && RI.isSGPRClass(SrcRC)))
965 Opcode = AMDGPU::V_ACCVGPR_WRITE_B32_e64;
966 else
967 Opcode = AMDGPU::INSTRUCTION_LIST_END;
968 } else if (RI.hasVGPRs(RC) && RI.isAGPRClass(SrcRC)) {
969 Opcode = AMDGPU::V_ACCVGPR_READ_B32_e64;
970 } else if ((Size % 64 == 0) && RI.hasVGPRs(RC) &&
971 (RI.isProperlyAlignedRC(*RC) &&
972 (SrcRC == RC || RI.isSGPRClass(SrcRC)))) {
973 // TODO: In 96-bit case, could do a 64-bit mov and then a 32-bit mov.
974 if (ST.hasMovB64()) {
975 Opcode = AMDGPU::V_MOV_B64_e32;
976 EltSize = 8;
977 } else if (ST.hasPackedFP32Ops()) {
978 Opcode = AMDGPU::V_PK_MOV_B32;
979 EltSize = 8;
980 }
981 }
982
983 // For the cases where we need an intermediate instruction/temporary register
984 // (destination is an AGPR), we need a scavenger.
985 //
986 // FIXME: The pass should maintain this for us so we don't have to re-scan the
987 // whole block for every handled copy.
988 std::unique_ptr<RegScavenger> RS;
989 if (Opcode == AMDGPU::INSTRUCTION_LIST_END)
990 RS.reset(new RegScavenger());
991
992 ArrayRef<int16_t> SubIndices = RI.getRegSplitParts(RC, EltSize);
993
994 // If there is an overlap, we can't kill the super-register on the last
995 // instruction, since it will also kill the components made live by this def.
996 const bool CanKillSuperReg = KillSrc && !RI.regsOverlap(SrcReg, DestReg);
997
998 for (unsigned Idx = 0; Idx < SubIndices.size(); ++Idx) {
999 unsigned SubIdx;
1000 if (Forward)
1001 SubIdx = SubIndices[Idx];
1002 else
1003 SubIdx = SubIndices[SubIndices.size() - Idx - 1];
1004
1005 bool UseKill = CanKillSuperReg && Idx == SubIndices.size() - 1;
1006
1007 if (Opcode == AMDGPU::INSTRUCTION_LIST_END) {
1008 Register ImpDefSuper = Idx == 0 ? Register(DestReg) : Register();
1009 Register ImpUseSuper = SrcReg;
1010 indirectCopyToAGPR(*this, MBB, MI, DL, RI.getSubReg(DestReg, SubIdx),
1011 RI.getSubReg(SrcReg, SubIdx), UseKill, *RS,
1012 ImpDefSuper, ImpUseSuper);
1013 } else if (Opcode == AMDGPU::V_PK_MOV_B32) {
1014 Register DstSubReg = RI.getSubReg(DestReg, SubIdx);
1015 Register SrcSubReg = RI.getSubReg(SrcReg, SubIdx);
1016 MachineInstrBuilder MIB =
1017 BuildMI(MBB, MI, DL, get(AMDGPU::V_PK_MOV_B32), DstSubReg)
1018 .addImm(SISrcMods::OP_SEL_1)
1019 .addReg(SrcSubReg)
1020 .addImm(SISrcMods::OP_SEL_0 | SISrcMods::OP_SEL_1)
1021 .addReg(SrcSubReg)
1022 .addImm(0) // op_sel_lo
1023 .addImm(0) // op_sel_hi
1024 .addImm(0) // neg_lo
1025 .addImm(0) // neg_hi
1026 .addImm(0) // clamp
1027 .addReg(SrcReg, getKillRegState(UseKill) | RegState::Implicit);
1028 if (Idx == 0)
1029 MIB.addReg(DestReg, RegState::Define | RegState::Implicit);
1030 } else {
1031 MachineInstrBuilder Builder =
1032 BuildMI(MBB, MI, DL, get(Opcode), RI.getSubReg(DestReg, SubIdx))
1033 .addReg(RI.getSubReg(SrcReg, SubIdx));
1034 if (Idx == 0)
1035 Builder.addReg(DestReg, RegState::Define | RegState::Implicit);
1036
1037 Builder.addReg(SrcReg, getKillRegState(UseKill) | RegState::Implicit);
1038 }
1039 }
1040}
1041
1042int SIInstrInfo::commuteOpcode(unsigned Opcode) const {
1043 int NewOpc;
1044
1045 // Try to map original to commuted opcode
1046 NewOpc = AMDGPU::getCommuteRev(Opcode);
1047 if (NewOpc != -1)
1048 // Check if the commuted (REV) opcode exists on the target.
1049 return pseudoToMCOpcode(NewOpc) != -1 ? NewOpc : -1;
1050
1051 // Try to map commuted to original opcode
1052 NewOpc = AMDGPU::getCommuteOrig(Opcode);
1053 if (NewOpc != -1)
1054 // Check if the original (non-REV) opcode exists on the target.
1055 return pseudoToMCOpcode(NewOpc) != -1 ? NewOpc : -1;
1056
1057 return Opcode;
1058}
1059
1060void SIInstrInfo::materializeImmediate(MachineBasicBlock &MBB,
1061 MachineBasicBlock::iterator MI,
1062 const DebugLoc &DL, unsigned DestReg,
1063 int64_t Value) const {
1064 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
1065 const TargetRegisterClass *RegClass = MRI.getRegClass(DestReg);
1066 if (RegClass == &AMDGPU::SReg_32RegClass ||
1067 RegClass == &AMDGPU::SGPR_32RegClass ||
1068 RegClass == &AMDGPU::SReg_32_XM0RegClass ||
1069 RegClass == &AMDGPU::SReg_32_XM0_XEXECRegClass) {
1070 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), DestReg)
1071 .addImm(Value);
1072 return;
1073 }
1074
1075 if (RegClass == &AMDGPU::SReg_64RegClass ||
1076 RegClass == &AMDGPU::SGPR_64RegClass ||
1077 RegClass == &AMDGPU::SReg_64_XEXECRegClass) {
1078 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), DestReg)
1079 .addImm(Value);
1080 return;
1081 }
1082
1083 if (RegClass == &AMDGPU::VGPR_32RegClass) {
1084 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DestReg)
1085 .addImm(Value);
1086 return;
1087 }
1088 if (RegClass->hasSuperClassEq(&AMDGPU::VReg_64RegClass)) {
1089 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B64_PSEUDO), DestReg)
1090 .addImm(Value);
1091 return;
1092 }
1093
1094 unsigned EltSize = 4;
1095 unsigned Opcode = AMDGPU::V_MOV_B32_e32;
1096 if (RI.isSGPRClass(RegClass)) {
1097 if (RI.getRegSizeInBits(*RegClass) > 32) {
1098 Opcode = AMDGPU::S_MOV_B64;
1099 EltSize = 8;
1100 } else {
1101 Opcode = AMDGPU::S_MOV_B32;
1102 EltSize = 4;
1103 }
1104 }
1105
1106 ArrayRef<int16_t> SubIndices = RI.getRegSplitParts(RegClass, EltSize);
1107 for (unsigned Idx = 0; Idx < SubIndices.size(); ++Idx) {
1108 int64_t IdxValue = Idx == 0 ? Value : 0;
1109
1110 MachineInstrBuilder Builder = BuildMI(MBB, MI, DL,
1111 get(Opcode), RI.getSubReg(DestReg, SubIndices[Idx]));
1112 Builder.addImm(IdxValue);
1113 }
1114}
1115
1116const TargetRegisterClass *
1117SIInstrInfo::getPreferredSelectRegClass(unsigned Size) const {
1118 return &AMDGPU::VGPR_32RegClass;
1119}
1120
1121void SIInstrInfo::insertVectorSelect(MachineBasicBlock &MBB,
1122 MachineBasicBlock::iterator I,
1123 const DebugLoc &DL, Register DstReg,
1124 ArrayRef<MachineOperand> Cond,
1125 Register TrueReg,
1126 Register FalseReg) const {
1127 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
1128 const TargetRegisterClass *BoolXExecRC =
1129 RI.getRegClass(AMDGPU::SReg_1_XEXECRegClassID);
1130 assert(MRI.getRegClass(DstReg) == &AMDGPU::VGPR_32RegClass &&(static_cast <bool> (MRI.getRegClass(DstReg) == &AMDGPU
::VGPR_32RegClass && "Not a VGPR32 reg") ? void (0) :
__assert_fail ("MRI.getRegClass(DstReg) == &AMDGPU::VGPR_32RegClass && \"Not a VGPR32 reg\""
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 1131, __extension__
__PRETTY_FUNCTION__))
1131 "Not a VGPR32 reg")(static_cast <bool> (MRI.getRegClass(DstReg) == &AMDGPU
::VGPR_32RegClass && "Not a VGPR32 reg") ? void (0) :
__assert_fail ("MRI.getRegClass(DstReg) == &AMDGPU::VGPR_32RegClass && \"Not a VGPR32 reg\""
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 1131, __extension__
__PRETTY_FUNCTION__))
;
1132
1133 if (Cond.size() == 1) {
1134 Register SReg = MRI.createVirtualRegister(BoolXExecRC);
1135 BuildMI(MBB, I, DL, get(AMDGPU::COPY), SReg)
1136 .add(Cond[0]);
1137 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
1138 .addImm(0)
1139 .addReg(FalseReg)
1140 .addImm(0)
1141 .addReg(TrueReg)
1142 .addReg(SReg);
1143 } else if (Cond.size() == 2) {
1144 assert(Cond[0].isImm() && "Cond[0] is not an immediate")(static_cast <bool> (Cond[0].isImm() && "Cond[0] is not an immediate"
) ? void (0) : __assert_fail ("Cond[0].isImm() && \"Cond[0] is not an immediate\""
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 1144, __extension__
__PRETTY_FUNCTION__))
;
1145 switch (Cond[0].getImm()) {
1146 case SIInstrInfo::SCC_TRUE: {
1147 Register SReg = MRI.createVirtualRegister(BoolXExecRC);
1148 BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_CSELECT_B32
1149 : AMDGPU::S_CSELECT_B64), SReg)
1150 .addImm(1)
1151 .addImm(0);
1152 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
1153 .addImm(0)
1154 .addReg(FalseReg)
1155 .addImm(0)
1156 .addReg(TrueReg)
1157 .addReg(SReg);
1158 break;
1159 }
1160 case SIInstrInfo::SCC_FALSE: {
1161 Register SReg = MRI.createVirtualRegister(BoolXExecRC);
1162 BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_CSELECT_B32
1163 : AMDGPU::S_CSELECT_B64), SReg)
1164 .addImm(0)
1165 .addImm(1);
1166 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
1167 .addImm(0)
1168 .addReg(FalseReg)
1169 .addImm(0)
1170 .addReg(TrueReg)
1171 .addReg(SReg);
1172 break;
1173 }
1174 case SIInstrInfo::VCCNZ: {
1175 MachineOperand RegOp = Cond[1];
1176 RegOp.setImplicit(false);
1177 Register SReg = MRI.createVirtualRegister(BoolXExecRC);
1178 BuildMI(MBB, I, DL, get(AMDGPU::COPY), SReg)
1179 .add(RegOp);
1180 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
1181 .addImm(0)
1182 .addReg(FalseReg)
1183 .addImm(0)
1184 .addReg(TrueReg)
1185 .addReg(SReg);
1186 break;
1187 }
1188 case SIInstrInfo::VCCZ: {
1189 MachineOperand RegOp = Cond[1];
1190 RegOp.setImplicit(false);
1191 Register SReg = MRI.createVirtualRegister(BoolXExecRC);
1192 BuildMI(MBB, I, DL, get(AMDGPU::COPY), SReg)
1193 .add(RegOp);
1194 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
1195 .addImm(0)
1196 .addReg(TrueReg)
1197 .addImm(0)
1198 .addReg(FalseReg)
1199 .addReg(SReg);
1200 break;
1201 }
1202 case SIInstrInfo::EXECNZ: {
1203 Register SReg = MRI.createVirtualRegister(BoolXExecRC);
1204 Register SReg2 = MRI.createVirtualRegister(RI.getBoolRC());
1205 BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_OR_SAVEEXEC_B32
1206 : AMDGPU::S_OR_SAVEEXEC_B64), SReg2)
1207 .addImm(0);
1208 BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_CSELECT_B32
1209 : AMDGPU::S_CSELECT_B64), SReg)
1210 .addImm(1)
1211 .addImm(0);
1212 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
1213 .addImm(0)
1214 .addReg(FalseReg)
1215 .addImm(0)
1216 .addReg(TrueReg)
1217 .addReg(SReg);
1218 break;
1219 }
1220 case SIInstrInfo::EXECZ: {
1221 Register SReg = MRI.createVirtualRegister(BoolXExecRC);
1222 Register SReg2 = MRI.createVirtualRegister(RI.getBoolRC());
1223 BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_OR_SAVEEXEC_B32
1224 : AMDGPU::S_OR_SAVEEXEC_B64), SReg2)
1225 .addImm(0);
1226 BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_CSELECT_B32
1227 : AMDGPU::S_CSELECT_B64), SReg)
1228 .addImm(0)
1229 .addImm(1);
1230 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
1231 .addImm(0)
1232 .addReg(FalseReg)
1233 .addImm(0)
1234 .addReg(TrueReg)
1235 .addReg(SReg);
1236 llvm_unreachable("Unhandled branch predicate EXECZ")::llvm::llvm_unreachable_internal("Unhandled branch predicate EXECZ"
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 1236)
;
1237 break;
1238 }
1239 default:
1240 llvm_unreachable("invalid branch predicate")::llvm::llvm_unreachable_internal("invalid branch predicate",
"llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 1240)
;
1241 }
1242 } else {
1243 llvm_unreachable("Can only handle Cond size 1 or 2")::llvm::llvm_unreachable_internal("Can only handle Cond size 1 or 2"
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 1243)
;
1244 }
1245}
1246
1247Register SIInstrInfo::insertEQ(MachineBasicBlock *MBB,
1248 MachineBasicBlock::iterator I,
1249 const DebugLoc &DL,
1250 Register SrcReg, int Value) const {
1251 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
1252 Register Reg = MRI.createVirtualRegister(RI.getBoolRC());
1253 BuildMI(*MBB, I, DL, get(AMDGPU::V_CMP_EQ_I32_e64), Reg)
1254 .addImm(Value)
1255 .addReg(SrcReg);
1256
1257 return Reg;
1258}
1259
1260Register SIInstrInfo::insertNE(MachineBasicBlock *MBB,
1261 MachineBasicBlock::iterator I,
1262 const DebugLoc &DL,
1263 Register SrcReg, int Value) const {
1264 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
1265 Register Reg = MRI.createVirtualRegister(RI.getBoolRC());
1266 BuildMI(*MBB, I, DL, get(AMDGPU::V_CMP_NE_I32_e64), Reg)
1267 .addImm(Value)
1268 .addReg(SrcReg);
1269
1270 return Reg;
1271}
1272
1273unsigned SIInstrInfo::getMovOpcode(const TargetRegisterClass *DstRC) const {
1274
1275 if (RI.isAGPRClass(DstRC))
1276 return AMDGPU::COPY;
1277 if (RI.getRegSizeInBits(*DstRC) == 32) {
1278 return RI.isSGPRClass(DstRC) ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
1279 } else if (RI.getRegSizeInBits(*DstRC) == 64 && RI.isSGPRClass(DstRC)) {
1280 return AMDGPU::S_MOV_B64;
1281 } else if (RI.getRegSizeInBits(*DstRC) == 64 && !RI.isSGPRClass(DstRC)) {
1282 return AMDGPU::V_MOV_B64_PSEUDO;
1283 }
1284 return AMDGPU::COPY;
1285}
1286
1287const MCInstrDesc &
1288SIInstrInfo::getIndirectGPRIDXPseudo(unsigned VecSize,
1289 bool IsIndirectSrc) const {
1290 if (IsIndirectSrc) {
1291 if (VecSize <= 32) // 4 bytes
1292 return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V1);
1293 if (VecSize <= 64) // 8 bytes
1294 return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V2);
1295 if (VecSize <= 96) // 12 bytes
1296 return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V3);
1297 if (VecSize <= 128) // 16 bytes
1298 return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V4);
1299 if (VecSize <= 160) // 20 bytes
1300 return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V5);
1301 if (VecSize <= 256) // 32 bytes
1302 return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V8);
1303 if (VecSize <= 512) // 64 bytes
1304 return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V16);
1305 if (VecSize <= 1024) // 128 bytes
1306 return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V32);
1307
1308 llvm_unreachable("unsupported size for IndirectRegReadGPRIDX pseudos")::llvm::llvm_unreachable_internal("unsupported size for IndirectRegReadGPRIDX pseudos"
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 1308)
;
1309 }
1310
1311 if (VecSize <= 32) // 4 bytes
1312 return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V1);
1313 if (VecSize <= 64) // 8 bytes
1314 return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V2);
1315 if (VecSize <= 96) // 12 bytes
1316 return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V3);
1317 if (VecSize <= 128) // 16 bytes
1318 return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V4);
1319 if (VecSize <= 160) // 20 bytes
1320 return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V5);
1321 if (VecSize <= 256) // 32 bytes
1322 return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V8);
1323 if (VecSize <= 512) // 64 bytes
1324 return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V16);
1325 if (VecSize <= 1024) // 128 bytes
1326 return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V32);
1327
1328 llvm_unreachable("unsupported size for IndirectRegWriteGPRIDX pseudos")::llvm::llvm_unreachable_internal("unsupported size for IndirectRegWriteGPRIDX pseudos"
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 1328)
;
1329}
1330
1331static unsigned getIndirectVGPRWriteMovRelPseudoOpc(unsigned VecSize) {
1332 if (VecSize <= 32) // 4 bytes
1333 return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V1;
1334 if (VecSize <= 64) // 8 bytes
1335 return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V2;
1336 if (VecSize <= 96) // 12 bytes
1337 return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V3;
1338 if (VecSize <= 128) // 16 bytes
1339 return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V4;
1340 if (VecSize <= 160) // 20 bytes
1341 return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V5;
1342 if (VecSize <= 256) // 32 bytes
1343 return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V8;
1344 if (VecSize <= 512) // 64 bytes
1345 return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V16;
1346 if (VecSize <= 1024) // 128 bytes
1347 return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V32;
1348
1349 llvm_unreachable("unsupported size for IndirectRegWrite pseudos")::llvm::llvm_unreachable_internal("unsupported size for IndirectRegWrite pseudos"
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 1349)
;
1350}
1351
1352static unsigned getIndirectSGPRWriteMovRelPseudo32(unsigned VecSize) {
1353 if (VecSize <= 32) // 4 bytes
1354 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V1;
1355 if (VecSize <= 64) // 8 bytes
1356 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V2;
1357 if (VecSize <= 96) // 12 bytes
1358 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V3;
1359 if (VecSize <= 128) // 16 bytes
1360 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V4;
1361 if (VecSize <= 160) // 20 bytes
1362 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V5;
1363 if (VecSize <= 256) // 32 bytes
1364 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V8;
1365 if (VecSize <= 512) // 64 bytes
1366 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V16;
1367 if (VecSize <= 1024) // 128 bytes
1368 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V32;
1369
1370 llvm_unreachable("unsupported size for IndirectRegWrite pseudos")::llvm::llvm_unreachable_internal("unsupported size for IndirectRegWrite pseudos"
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 1370)
;
1371}
1372
1373static unsigned getIndirectSGPRWriteMovRelPseudo64(unsigned VecSize) {
1374 if (VecSize <= 64) // 8 bytes
1375 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V1;
1376 if (VecSize <= 128) // 16 bytes
1377 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V2;
1378 if (VecSize <= 256) // 32 bytes
1379 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V4;
1380 if (VecSize <= 512) // 64 bytes
1381 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V8;
1382 if (VecSize <= 1024) // 128 bytes
1383 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V16;
1384
1385 llvm_unreachable("unsupported size for IndirectRegWrite pseudos")::llvm::llvm_unreachable_internal("unsupported size for IndirectRegWrite pseudos"
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 1385)
;
1386}
1387
1388const MCInstrDesc &
1389SIInstrInfo::getIndirectRegWriteMovRelPseudo(unsigned VecSize, unsigned EltSize,
1390 bool IsSGPR) const {
1391 if (IsSGPR) {
1392 switch (EltSize) {
1393 case 32:
1394 return get(getIndirectSGPRWriteMovRelPseudo32(VecSize));
1395 case 64:
1396 return get(getIndirectSGPRWriteMovRelPseudo64(VecSize));
1397 default:
1398 llvm_unreachable("invalid reg indexing elt size")::llvm::llvm_unreachable_internal("invalid reg indexing elt size"
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 1398)
;
1399 }
1400 }
1401
1402 assert(EltSize == 32 && "invalid reg indexing elt size")(static_cast <bool> (EltSize == 32 && "invalid reg indexing elt size"
) ? void (0) : __assert_fail ("EltSize == 32 && \"invalid reg indexing elt size\""
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 1402, __extension__
__PRETTY_FUNCTION__))
;
1403 return get(getIndirectVGPRWriteMovRelPseudoOpc(VecSize));
1404}
1405
1406static unsigned getSGPRSpillSaveOpcode(unsigned Size) {
1407 switch (Size) {
1408 case 4:
1409 return AMDGPU::SI_SPILL_S32_SAVE;
1410 case 8:
1411 return AMDGPU::SI_SPILL_S64_SAVE;
1412 case 12:
1413 return AMDGPU::SI_SPILL_S96_SAVE;
1414 case 16:
1415 return AMDGPU::SI_SPILL_S128_SAVE;
1416 case 20:
1417 return AMDGPU::SI_SPILL_S160_SAVE;
1418 case 24:
1419 return AMDGPU::SI_SPILL_S192_SAVE;
1420 case 28:
1421 return AMDGPU::SI_SPILL_S224_SAVE;
1422 case 32:
1423 return AMDGPU::SI_SPILL_S256_SAVE;
1424 case 64:
1425 return AMDGPU::SI_SPILL_S512_SAVE;
1426 case 128:
1427 return AMDGPU::SI_SPILL_S1024_SAVE;
1428 default:
1429 llvm_unreachable("unknown register size")::llvm::llvm_unreachable_internal("unknown register size", "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp"
, 1429)
;
1430 }
1431}
1432
1433static unsigned getVGPRSpillSaveOpcode(unsigned Size) {
1434 switch (Size) {
1435 case 4:
1436 return AMDGPU::SI_SPILL_V32_SAVE;
1437 case 8:
1438 return AMDGPU::SI_SPILL_V64_SAVE;
1439 case 12:
1440 return AMDGPU::SI_SPILL_V96_SAVE;
1441 case 16:
1442 return AMDGPU::SI_SPILL_V128_SAVE;
1443 case 20:
1444 return AMDGPU::SI_SPILL_V160_SAVE;
1445 case 24:
1446 return AMDGPU::SI_SPILL_V192_SAVE;
1447 case 28:
1448 return AMDGPU::SI_SPILL_V224_SAVE;
1449 case 32:
1450 return AMDGPU::SI_SPILL_V256_SAVE;
1451 case 64:
1452 return AMDGPU::SI_SPILL_V512_SAVE;
1453 case 128:
1454 return AMDGPU::SI_SPILL_V1024_SAVE;
1455 default:
1456 llvm_unreachable("unknown register size")::llvm::llvm_unreachable_internal("unknown register size", "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp"
, 1456)
;
1457 }
1458}
1459
1460static unsigned getAGPRSpillSaveOpcode(unsigned Size) {
1461 switch (Size) {
1462 case 4:
1463 return AMDGPU::SI_SPILL_A32_SAVE;
1464 case 8:
1465 return AMDGPU::SI_SPILL_A64_SAVE;
1466 case 12:
1467 return AMDGPU::SI_SPILL_A96_SAVE;
1468 case 16:
1469 return AMDGPU::SI_SPILL_A128_SAVE;
1470 case 20:
1471 return AMDGPU::SI_SPILL_A160_SAVE;
1472 case 24:
1473 return AMDGPU::SI_SPILL_A192_SAVE;
1474 case 28:
1475 return AMDGPU::SI_SPILL_A224_SAVE;
1476 case 32:
1477 return AMDGPU::SI_SPILL_A256_SAVE;
1478 case 64:
1479 return AMDGPU::SI_SPILL_A512_SAVE;
1480 case 128:
1481 return AMDGPU::SI_SPILL_A1024_SAVE;
1482 default:
1483 llvm_unreachable("unknown register size")::llvm::llvm_unreachable_internal("unknown register size", "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp"
, 1483)
;
1484 }
1485}
1486
1487static unsigned getAVSpillSaveOpcode(unsigned Size) {
1488 switch (Size) {
1489 case 4:
1490 return AMDGPU::SI_SPILL_AV32_SAVE;
1491 case 8:
1492 return AMDGPU::SI_SPILL_AV64_SAVE;
1493 case 12:
1494 return AMDGPU::SI_SPILL_AV96_SAVE;
1495 case 16:
1496 return AMDGPU::SI_SPILL_AV128_SAVE;
1497 case 20:
1498 return AMDGPU::SI_SPILL_AV160_SAVE;
1499 case 24:
1500 return AMDGPU::SI_SPILL_AV192_SAVE;
1501 case 28:
1502 return AMDGPU::SI_SPILL_AV224_SAVE;
1503 case 32:
1504 return AMDGPU::SI_SPILL_AV256_SAVE;
1505 case 64:
1506 return AMDGPU::SI_SPILL_AV512_SAVE;
1507 case 128:
1508 return AMDGPU::SI_SPILL_AV1024_SAVE;
1509 default:
1510 llvm_unreachable("unknown register size")::llvm::llvm_unreachable_internal("unknown register size", "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp"
, 1510)
;
1511 }
1512}
1513
1514void SIInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
1515 MachineBasicBlock::iterator MI,
1516 Register SrcReg, bool isKill,
1517 int FrameIndex,
1518 const TargetRegisterClass *RC,
1519 const TargetRegisterInfo *TRI) const {
1520 MachineFunction *MF = MBB.getParent();
1521 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
1522 MachineFrameInfo &FrameInfo = MF->getFrameInfo();
1523 const DebugLoc &DL = MBB.findDebugLoc(MI);
1524
1525 MachinePointerInfo PtrInfo
1526 = MachinePointerInfo::getFixedStack(*MF, FrameIndex);
1527 MachineMemOperand *MMO = MF->getMachineMemOperand(
1528 PtrInfo, MachineMemOperand::MOStore, FrameInfo.getObjectSize(FrameIndex),
1529 FrameInfo.getObjectAlign(FrameIndex));
1530 unsigned SpillSize = TRI->getSpillSize(*RC);
1531
1532 MachineRegisterInfo &MRI = MF->getRegInfo();
1533 if (RI.isSGPRClass(RC)) {
1534 MFI->setHasSpilledSGPRs();
1535 assert(SrcReg != AMDGPU::M0 && "m0 should not be spilled")(static_cast <bool> (SrcReg != AMDGPU::M0 && "m0 should not be spilled"
) ? void (0) : __assert_fail ("SrcReg != AMDGPU::M0 && \"m0 should not be spilled\""
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 1535, __extension__
__PRETTY_FUNCTION__))
;
1536 assert(SrcReg != AMDGPU::EXEC_LO && SrcReg != AMDGPU::EXEC_HI &&(static_cast <bool> (SrcReg != AMDGPU::EXEC_LO &&
SrcReg != AMDGPU::EXEC_HI && SrcReg != AMDGPU::EXEC &&
"exec should not be spilled") ? void (0) : __assert_fail ("SrcReg != AMDGPU::EXEC_LO && SrcReg != AMDGPU::EXEC_HI && SrcReg != AMDGPU::EXEC && \"exec should not be spilled\""
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 1537, __extension__
__PRETTY_FUNCTION__))
1537 SrcReg != AMDGPU::EXEC && "exec should not be spilled")(static_cast <bool> (SrcReg != AMDGPU::EXEC_LO &&
SrcReg != AMDGPU::EXEC_HI && SrcReg != AMDGPU::EXEC &&
"exec should not be spilled") ? void (0) : __assert_fail ("SrcReg != AMDGPU::EXEC_LO && SrcReg != AMDGPU::EXEC_HI && SrcReg != AMDGPU::EXEC && \"exec should not be spilled\""
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 1537, __extension__
__PRETTY_FUNCTION__))
;
1538
1539 // We are only allowed to create one new instruction when spilling
1540 // registers, so we need to use pseudo instruction for spilling SGPRs.
1541 const MCInstrDesc &OpDesc = get(getSGPRSpillSaveOpcode(SpillSize));
1542
1543 // The SGPR spill/restore instructions only work on number sgprs, so we need
1544 // to make sure we are using the correct register class.
1545 if (SrcReg.isVirtual() && SpillSize == 4) {
1546 MRI.constrainRegClass(SrcReg, &AMDGPU::SReg_32_XM0_XEXECRegClass);
1547 }
1548
1549 BuildMI(MBB, MI, DL, OpDesc)
1550 .addReg(SrcReg, getKillRegState(isKill)) // data
1551 .addFrameIndex(FrameIndex) // addr
1552 .addMemOperand(MMO)
1553 .addReg(MFI->getStackPtrOffsetReg(), RegState::Implicit);
1554
1555 if (RI.spillSGPRToVGPR())
1556 FrameInfo.setStackID(FrameIndex, TargetStackID::SGPRSpill);
1557 return;
1558 }
1559
1560 unsigned Opcode = RI.isVectorSuperClass(RC) ? getAVSpillSaveOpcode(SpillSize)
1561 : RI.isAGPRClass(RC) ? getAGPRSpillSaveOpcode(SpillSize)
1562 : getVGPRSpillSaveOpcode(SpillSize);
1563 MFI->setHasSpilledVGPRs();
1564
1565 BuildMI(MBB, MI, DL, get(Opcode))
1566 .addReg(SrcReg, getKillRegState(isKill)) // data
1567 .addFrameIndex(FrameIndex) // addr
1568 .addReg(MFI->getStackPtrOffsetReg()) // scratch_offset
1569 .addImm(0) // offset
1570 .addMemOperand(MMO);
1571}
1572
1573static unsigned getSGPRSpillRestoreOpcode(unsigned Size) {
1574 switch (Size) {
1575 case 4:
1576 return AMDGPU::SI_SPILL_S32_RESTORE;
1577 case 8:
1578 return AMDGPU::SI_SPILL_S64_RESTORE;
1579 case 12:
1580 return AMDGPU::SI_SPILL_S96_RESTORE;
1581 case 16:
1582 return AMDGPU::SI_SPILL_S128_RESTORE;
1583 case 20:
1584 return AMDGPU::SI_SPILL_S160_RESTORE;
1585 case 24:
1586 return AMDGPU::SI_SPILL_S192_RESTORE;
1587 case 28:
1588 return AMDGPU::SI_SPILL_S224_RESTORE;
1589 case 32:
1590 return AMDGPU::SI_SPILL_S256_RESTORE;
1591 case 64:
1592 return AMDGPU::SI_SPILL_S512_RESTORE;
1593 case 128:
1594 return AMDGPU::SI_SPILL_S1024_RESTORE;
1595 default:
1596 llvm_unreachable("unknown register size")::llvm::llvm_unreachable_internal("unknown register size", "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp"
, 1596)
;
1597 }
1598}
1599
1600static unsigned getVGPRSpillRestoreOpcode(unsigned Size) {
1601 switch (Size) {
1602 case 4:
1603 return AMDGPU::SI_SPILL_V32_RESTORE;
1604 case 8:
1605 return AMDGPU::SI_SPILL_V64_RESTORE;
1606 case 12:
1607 return AMDGPU::SI_SPILL_V96_RESTORE;
1608 case 16:
1609 return AMDGPU::SI_SPILL_V128_RESTORE;
1610 case 20:
1611 return AMDGPU::SI_SPILL_V160_RESTORE;
1612 case 24:
1613 return AMDGPU::SI_SPILL_V192_RESTORE;
1614 case 28:
1615 return AMDGPU::SI_SPILL_V224_RESTORE;
1616 case 32:
1617 return AMDGPU::SI_SPILL_V256_RESTORE;
1618 case 64:
1619 return AMDGPU::SI_SPILL_V512_RESTORE;
1620 case 128:
1621 return AMDGPU::SI_SPILL_V1024_RESTORE;
1622 default:
1623 llvm_unreachable("unknown register size")::llvm::llvm_unreachable_internal("unknown register size", "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp"
, 1623)
;
1624 }
1625}
1626
1627static unsigned getAGPRSpillRestoreOpcode(unsigned Size) {
1628 switch (Size) {
1629 case 4:
1630 return AMDGPU::SI_SPILL_A32_RESTORE;
1631 case 8:
1632 return AMDGPU::SI_SPILL_A64_RESTORE;
1633 case 12:
1634 return AMDGPU::SI_SPILL_A96_RESTORE;
1635 case 16:
1636 return AMDGPU::SI_SPILL_A128_RESTORE;
1637 case 20:
1638 return AMDGPU::SI_SPILL_A160_RESTORE;
1639 case 24:
1640 return AMDGPU::SI_SPILL_A192_RESTORE;
1641 case 28:
1642 return AMDGPU::SI_SPILL_A224_RESTORE;
1643 case 32:
1644 return AMDGPU::SI_SPILL_A256_RESTORE;
1645 case 64:
1646 return AMDGPU::SI_SPILL_A512_RESTORE;
1647 case 128:
1648 return AMDGPU::SI_SPILL_A1024_RESTORE;
1649 default:
1650 llvm_unreachable("unknown register size")::llvm::llvm_unreachable_internal("unknown register size", "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp"
, 1650)
;
1651 }
1652}
1653
1654static unsigned getAVSpillRestoreOpcode(unsigned Size) {
1655 switch (Size) {
1656 case 4:
1657 return AMDGPU::SI_SPILL_AV32_RESTORE;
1658 case 8:
1659 return AMDGPU::SI_SPILL_AV64_RESTORE;
1660 case 12:
1661 return AMDGPU::SI_SPILL_AV96_RESTORE;
1662 case 16:
1663 return AMDGPU::SI_SPILL_AV128_RESTORE;
1664 case 20:
1665 return AMDGPU::SI_SPILL_AV160_RESTORE;
1666 case 24:
1667 return AMDGPU::SI_SPILL_AV192_RESTORE;
1668 case 28:
1669 return AMDGPU::SI_SPILL_AV224_RESTORE;
1670 case 32:
1671 return AMDGPU::SI_SPILL_AV256_RESTORE;
1672 case 64:
1673 return AMDGPU::SI_SPILL_AV512_RESTORE;
1674 case 128:
1675 return AMDGPU::SI_SPILL_AV1024_RESTORE;
1676 default:
1677 llvm_unreachable("unknown register size")::llvm::llvm_unreachable_internal("unknown register size", "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp"
, 1677)
;
1678 }
1679}
1680
1681void SIInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
1682 MachineBasicBlock::iterator MI,
1683 Register DestReg, int FrameIndex,
1684 const TargetRegisterClass *RC,
1685 const TargetRegisterInfo *TRI) const {
1686 MachineFunction *MF = MBB.getParent();
1687 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
1688 MachineFrameInfo &FrameInfo = MF->getFrameInfo();
1689 const DebugLoc &DL = MBB.findDebugLoc(MI);
1690 unsigned SpillSize = TRI->getSpillSize(*RC);
1691
1692 MachinePointerInfo PtrInfo
1693 = MachinePointerInfo::getFixedStack(*MF, FrameIndex);
1694
1695 MachineMemOperand *MMO = MF->getMachineMemOperand(
1696 PtrInfo, MachineMemOperand::MOLoad, FrameInfo.getObjectSize(FrameIndex),
1697 FrameInfo.getObjectAlign(FrameIndex));
1698
1699 if (RI.isSGPRClass(RC)) {
1700 MFI->setHasSpilledSGPRs();
1701 assert(DestReg != AMDGPU::M0 && "m0 should not be reloaded into")(static_cast <bool> (DestReg != AMDGPU::M0 && "m0 should not be reloaded into"
) ? void (0) : __assert_fail ("DestReg != AMDGPU::M0 && \"m0 should not be reloaded into\""
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 1701, __extension__
__PRETTY_FUNCTION__))
;
1702 assert(DestReg != AMDGPU::EXEC_LO && DestReg != AMDGPU::EXEC_HI &&(static_cast <bool> (DestReg != AMDGPU::EXEC_LO &&
DestReg != AMDGPU::EXEC_HI && DestReg != AMDGPU::EXEC
&& "exec should not be spilled") ? void (0) : __assert_fail
("DestReg != AMDGPU::EXEC_LO && DestReg != AMDGPU::EXEC_HI && DestReg != AMDGPU::EXEC && \"exec should not be spilled\""
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 1703, __extension__
__PRETTY_FUNCTION__))
1703 DestReg != AMDGPU::EXEC && "exec should not be spilled")(static_cast <bool> (DestReg != AMDGPU::EXEC_LO &&
DestReg != AMDGPU::EXEC_HI && DestReg != AMDGPU::EXEC
&& "exec should not be spilled") ? void (0) : __assert_fail
("DestReg != AMDGPU::EXEC_LO && DestReg != AMDGPU::EXEC_HI && DestReg != AMDGPU::EXEC && \"exec should not be spilled\""
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 1703, __extension__
__PRETTY_FUNCTION__))
;
1704
1705 // FIXME: Maybe this should not include a memoperand because it will be
1706 // lowered to non-memory instructions.
1707 const MCInstrDesc &OpDesc = get(getSGPRSpillRestoreOpcode(SpillSize));
1708 if (DestReg.isVirtual() && SpillSize == 4) {
1709 MachineRegisterInfo &MRI = MF->getRegInfo();
1710 MRI.constrainRegClass(DestReg, &AMDGPU::SReg_32_XM0_XEXECRegClass);
1711 }
1712
1713 if (RI.spillSGPRToVGPR())
1714 FrameInfo.setStackID(FrameIndex, TargetStackID::SGPRSpill);
1715 BuildMI(MBB, MI, DL, OpDesc, DestReg)
1716 .addFrameIndex(FrameIndex) // addr
1717 .addMemOperand(MMO)
1718 .addReg(MFI->getStackPtrOffsetReg(), RegState::Implicit);
1719
1720 return;
1721 }
1722
1723 unsigned Opcode = RI.isVectorSuperClass(RC)
1724 ? getAVSpillRestoreOpcode(SpillSize)
1725 : RI.isAGPRClass(RC) ? getAGPRSpillRestoreOpcode(SpillSize)
1726 : getVGPRSpillRestoreOpcode(SpillSize);
1727 BuildMI(MBB, MI, DL, get(Opcode), DestReg)
1728 .addFrameIndex(FrameIndex) // vaddr
1729 .addReg(MFI->getStackPtrOffsetReg()) // scratch_offset
1730 .addImm(0) // offset
1731 .addMemOperand(MMO);
1732}
1733
1734void SIInstrInfo::insertNoop(MachineBasicBlock &MBB,
1735 MachineBasicBlock::iterator MI) const {
1736 insertNoops(MBB, MI, 1);
1737}
1738
1739void SIInstrInfo::insertNoops(MachineBasicBlock &MBB,
1740 MachineBasicBlock::iterator MI,
1741 unsigned Quantity) const {
1742 DebugLoc DL = MBB.findDebugLoc(MI);
1743 while (Quantity > 0) {
1744 unsigned Arg = std::min(Quantity, 8u);
1745 Quantity -= Arg;
1746 BuildMI(MBB, MI, DL, get(AMDGPU::S_NOP)).addImm(Arg - 1);
1747 }
1748}
1749
1750void SIInstrInfo::insertReturn(MachineBasicBlock &MBB) const {
1751 auto MF = MBB.getParent();
1752 SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
1753
1754 assert(Info->isEntryFunction())(static_cast <bool> (Info->isEntryFunction()) ? void
(0) : __assert_fail ("Info->isEntryFunction()", "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp"
, 1754, __extension__ __PRETTY_FUNCTION__))
;
1755
1756 if (MBB.succ_empty()) {
1757 bool HasNoTerminator = MBB.getFirstTerminator() == MBB.end();
1758 if (HasNoTerminator) {
1759 if (Info->returnsVoid()) {
1760 BuildMI(MBB, MBB.end(), DebugLoc(), get(AMDGPU::S_ENDPGM)).addImm(0);
1761 } else {
1762 BuildMI(MBB, MBB.end(), DebugLoc(), get(AMDGPU::SI_RETURN_TO_EPILOG));
1763 }
1764 }
1765 }
1766}
1767
1768unsigned SIInstrInfo::getNumWaitStates(const MachineInstr &MI) {
1769 switch (MI.getOpcode()) {
1770 default:
1771 if (MI.isMetaInstruction())
1772 return 0;
1773 return 1; // FIXME: Do wait states equal cycles?
1774
1775 case AMDGPU::S_NOP:
1776 return MI.getOperand(0).getImm() + 1;
1777 // SI_RETURN_TO_EPILOG is a fallthrough to code outside of the function. The
1778 // hazard, even if one exist, won't really be visible. Should we handle it?
1779 }
1780}
1781
1782bool SIInstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
1783 const SIRegisterInfo *TRI = ST.getRegisterInfo();
1784 MachineBasicBlock &MBB = *MI.getParent();
1785 DebugLoc DL = MBB.findDebugLoc(MI);
1786 switch (MI.getOpcode()) {
1
Control jumps to 'case V_MOV_B64_DPP_PSEUDO:' at line 1908
1787 default: return TargetInstrInfo::expandPostRAPseudo(MI);
1788 case AMDGPU::S_MOV_B64_term:
1789 // This is only a terminator to get the correct spill code placement during
1790 // register allocation.
1791 MI.setDesc(get(AMDGPU::S_MOV_B64));
1792 break;
1793
1794 case AMDGPU::S_MOV_B32_term:
1795 // This is only a terminator to get the correct spill code placement during
1796 // register allocation.
1797 MI.setDesc(get(AMDGPU::S_MOV_B32));
1798 break;
1799
1800 case AMDGPU::S_XOR_B64_term:
1801 // This is only a terminator to get the correct spill code placement during
1802 // register allocation.
1803 MI.setDesc(get(AMDGPU::S_XOR_B64));
1804 break;
1805
1806 case AMDGPU::S_XOR_B32_term:
1807 // This is only a terminator to get the correct spill code placement during
1808 // register allocation.
1809 MI.setDesc(get(AMDGPU::S_XOR_B32));
1810 break;
1811 case AMDGPU::S_OR_B64_term:
1812 // This is only a terminator to get the correct spill code placement during
1813 // register allocation.
1814 MI.setDesc(get(AMDGPU::S_OR_B64));
1815 break;
1816 case AMDGPU::S_OR_B32_term:
1817 // This is only a terminator to get the correct spill code placement during
1818 // register allocation.
1819 MI.setDesc(get(AMDGPU::S_OR_B32));
1820 break;
1821
1822 case AMDGPU::S_ANDN2_B64_term:
1823 // This is only a terminator to get the correct spill code placement during
1824 // register allocation.
1825 MI.setDesc(get(AMDGPU::S_ANDN2_B64));
1826 break;
1827
1828 case AMDGPU::S_ANDN2_B32_term:
1829 // This is only a terminator to get the correct spill code placement during
1830 // register allocation.
1831 MI.setDesc(get(AMDGPU::S_ANDN2_B32));
1832 break;
1833
1834 case AMDGPU::S_AND_B64_term:
1835 // This is only a terminator to get the correct spill code placement during
1836 // register allocation.
1837 MI.setDesc(get(AMDGPU::S_AND_B64));
1838 break;
1839
1840 case AMDGPU::S_AND_B32_term:
1841 // This is only a terminator to get the correct spill code placement during
1842 // register allocation.
1843 MI.setDesc(get(AMDGPU::S_AND_B32));
1844 break;
1845
1846 case AMDGPU::V_MOV_B64_PSEUDO: {
1847 Register Dst = MI.getOperand(0).getReg();
1848 Register DstLo = RI.getSubReg(Dst, AMDGPU::sub0);
1849 Register DstHi = RI.getSubReg(Dst, AMDGPU::sub1);
1850
1851 const MachineOperand &SrcOp = MI.getOperand(1);
1852 // FIXME: Will this work for 64-bit floating point immediates?
1853 assert(!SrcOp.isFPImm())(static_cast <bool> (!SrcOp.isFPImm()) ? void (0) : __assert_fail
("!SrcOp.isFPImm()", "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp"
, 1853, __extension__ __PRETTY_FUNCTION__))
;
1854 if (ST.hasMovB64()) {
1855 MI.setDesc(get(AMDGPU::V_MOV_B64_e32));
1856 if (!isLiteralConstant(MI, 1) || isUInt<32>(SrcOp.getImm()))
1857 break;
1858 }
1859 if (SrcOp.isImm()) {
1860 APInt Imm(64, SrcOp.getImm());
1861 APInt Lo(32, Imm.getLoBits(32).getZExtValue());
1862 APInt Hi(32, Imm.getHiBits(32).getZExtValue());
1863 if (ST.hasPackedFP32Ops() && Lo == Hi && isInlineConstant(Lo)) {
1864 BuildMI(MBB, MI, DL, get(AMDGPU::V_PK_MOV_B32), Dst)
1865 .addImm(SISrcMods::OP_SEL_1)
1866 .addImm(Lo.getSExtValue())
1867 .addImm(SISrcMods::OP_SEL_1)
1868 .addImm(Lo.getSExtValue())
1869 .addImm(0) // op_sel_lo
1870 .addImm(0) // op_sel_hi
1871 .addImm(0) // neg_lo
1872 .addImm(0) // neg_hi
1873 .addImm(0); // clamp
1874 } else {
1875 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstLo)
1876 .addImm(Lo.getSExtValue())
1877 .addReg(Dst, RegState::Implicit | RegState::Define);
1878 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstHi)
1879 .addImm(Hi.getSExtValue())
1880 .addReg(Dst, RegState::Implicit | RegState::Define);
1881 }
1882 } else {
1883 assert(SrcOp.isReg())(static_cast <bool> (SrcOp.isReg()) ? void (0) : __assert_fail
("SrcOp.isReg()", "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 1883
, __extension__ __PRETTY_FUNCTION__))
;
1884 if (ST.hasPackedFP32Ops() &&
1885 !RI.isAGPR(MBB.getParent()->getRegInfo(), SrcOp.getReg())) {
1886 BuildMI(MBB, MI, DL, get(AMDGPU::V_PK_MOV_B32), Dst)
1887 .addImm(SISrcMods::OP_SEL_1) // src0_mod
1888 .addReg(SrcOp.getReg())
1889 .addImm(SISrcMods::OP_SEL_0 | SISrcMods::OP_SEL_1) // src1_mod
1890 .addReg(SrcOp.getReg())
1891 .addImm(0) // op_sel_lo
1892 .addImm(0) // op_sel_hi
1893 .addImm(0) // neg_lo
1894 .addImm(0) // neg_hi
1895 .addImm(0); // clamp
1896 } else {
1897 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstLo)
1898 .addReg(RI.getSubReg(SrcOp.getReg(), AMDGPU::sub0))
1899 .addReg(Dst, RegState::Implicit | RegState::Define);
1900 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstHi)
1901 .addReg(RI.getSubReg(SrcOp.getReg(), AMDGPU::sub1))
1902 .addReg(Dst, RegState::Implicit | RegState::Define);
1903 }
1904 }
1905 MI.eraseFromParent();
1906 break;
1907 }
1908 case AMDGPU::V_MOV_B64_DPP_PSEUDO: {
1909 expandMovDPP64(MI);
2
Calling 'SIInstrInfo::expandMovDPP64'
1910 break;
1911 }
1912 case AMDGPU::S_MOV_B64_IMM_PSEUDO: {
1913 const MachineOperand &SrcOp = MI.getOperand(1);
1914 assert(!SrcOp.isFPImm())(static_cast <bool> (!SrcOp.isFPImm()) ? void (0) : __assert_fail
("!SrcOp.isFPImm()", "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp"
, 1914, __extension__ __PRETTY_FUNCTION__))
;
1915 APInt Imm(64, SrcOp.getImm());
1916 if (Imm.isIntN(32) || isInlineConstant(Imm)) {
1917 MI.setDesc(get(AMDGPU::S_MOV_B64));
1918 break;
1919 }
1920
1921 Register Dst = MI.getOperand(0).getReg();
1922 Register DstLo = RI.getSubReg(Dst, AMDGPU::sub0);
1923 Register DstHi = RI.getSubReg(Dst, AMDGPU::sub1);
1924
1925 APInt Lo(32, Imm.getLoBits(32).getZExtValue());
1926 APInt Hi(32, Imm.getHiBits(32).getZExtValue());
1927 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), DstLo)
1928 .addImm(Lo.getSExtValue())
1929 .addReg(Dst, RegState::Implicit | RegState::Define);
1930 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), DstHi)
1931 .addImm(Hi.getSExtValue())
1932 .addReg(Dst, RegState::Implicit | RegState::Define);
1933 MI.eraseFromParent();
1934 break;
1935 }
1936 case AMDGPU::V_SET_INACTIVE_B32: {
1937 unsigned NotOpc = ST.isWave32() ? AMDGPU::S_NOT_B32 : AMDGPU::S_NOT_B64;
1938 unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC;
1939 // FIXME: We may possibly optimize the COPY once we find ways to make LLVM
1940 // optimizations (mainly Register Coalescer) aware of WWM register liveness.
1941 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), MI.getOperand(0).getReg())
1942 .add(MI.getOperand(1));
1943 auto FirstNot = BuildMI(MBB, MI, DL, get(NotOpc), Exec).addReg(Exec);
1944 FirstNot->addRegisterDead(AMDGPU::SCC, TRI); // SCC is overwritten
1945 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), MI.getOperand(0).getReg())
1946 .add(MI.getOperand(2));
1947 BuildMI(MBB, MI, DL, get(NotOpc), Exec)
1948 .addReg(Exec);
1949 MI.eraseFromParent();
1950 break;
1951 }
1952 case AMDGPU::V_SET_INACTIVE_B64: {
1953 unsigned NotOpc = ST.isWave32() ? AMDGPU::S_NOT_B32 : AMDGPU::S_NOT_B64;
1954 unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC;
1955 MachineInstr *Copy = BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B64_PSEUDO),
1956 MI.getOperand(0).getReg())
1957 .add(MI.getOperand(1));
1958 expandPostRAPseudo(*Copy);
1959 auto FirstNot = BuildMI(MBB, MI, DL, get(NotOpc), Exec).addReg(Exec);
1960 FirstNot->addRegisterDead(AMDGPU::SCC, TRI); // SCC is overwritten
1961 Copy = BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B64_PSEUDO),
1962 MI.getOperand(0).getReg())
1963 .add(MI.getOperand(2));
1964 expandPostRAPseudo(*Copy);
1965 BuildMI(MBB, MI, DL, get(NotOpc), Exec)
1966 .addReg(Exec);
1967 MI.eraseFromParent();
1968 break;
1969 }
1970 case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V1:
1971 case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V2:
1972 case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V3:
1973 case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V4:
1974 case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V5:
1975 case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V8:
1976 case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V16:
1977 case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V32:
1978 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V1:
1979 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V2:
1980 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V3:
1981 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V4:
1982 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V5:
1983 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V8:
1984 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V16:
1985 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V32:
1986 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V1:
1987 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V2:
1988 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V4:
1989 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V8:
1990 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V16: {
1991 const TargetRegisterClass *EltRC = getOpRegClass(MI, 2);
1992
1993 unsigned Opc;
1994 if (RI.hasVGPRs(EltRC)) {
1995 Opc = AMDGPU::V_MOVRELD_B32_e32;
1996 } else {
1997 Opc = RI.getRegSizeInBits(*EltRC) == 64 ? AMDGPU::S_MOVRELD_B64
1998 : AMDGPU::S_MOVRELD_B32;
1999 }
2000
2001 const MCInstrDesc &OpDesc = get(Opc);
2002 Register VecReg = MI.getOperand(0).getReg();
2003 bool IsUndef = MI.getOperand(1).isUndef();
2004 unsigned SubReg = MI.getOperand(3).getImm();
2005 assert(VecReg == MI.getOperand(1).getReg())(static_cast <bool> (VecReg == MI.getOperand(1).getReg(
)) ? void (0) : __assert_fail ("VecReg == MI.getOperand(1).getReg()"
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 2005, __extension__
__PRETTY_FUNCTION__))
;
2006
2007 MachineInstrBuilder MIB =
2008 BuildMI(MBB, MI, DL, OpDesc)
2009 .addReg(RI.getSubReg(VecReg, SubReg), RegState::Undef)
2010 .add(MI.getOperand(2))
2011 .addReg(VecReg, RegState::ImplicitDefine)
2012 .addReg(VecReg, RegState::Implicit | (IsUndef ? RegState::Undef : 0));
2013
2014 const int ImpDefIdx =
2015 OpDesc.getNumOperands() + OpDesc.getNumImplicitUses();
2016 const int ImpUseIdx = ImpDefIdx + 1;
2017 MIB->tieOperands(ImpDefIdx, ImpUseIdx);
2018 MI.eraseFromParent();
2019 break;
2020 }
2021 case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V1:
2022 case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V2:
2023 case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V3:
2024 case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V4:
2025 case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V5:
2026 case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V8:
2027 case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V16:
2028 case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V32: {
2029 assert(ST.useVGPRIndexMode())(static_cast <bool> (ST.useVGPRIndexMode()) ? void (0) :
__assert_fail ("ST.useVGPRIndexMode()", "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp"
, 2029, __extension__ __PRETTY_FUNCTION__))
;
2030 Register VecReg = MI.getOperand(0).getReg();
2031 bool IsUndef = MI.getOperand(1).isUndef();
2032 Register Idx = MI.getOperand(3).getReg();
2033 Register SubReg = MI.getOperand(4).getImm();
2034
2035 MachineInstr *SetOn = BuildMI(MBB, MI, DL, get(AMDGPU::S_SET_GPR_IDX_ON))
2036 .addReg(Idx)
2037 .addImm(AMDGPU::VGPRIndexMode::DST_ENABLE);
2038 SetOn->getOperand(3).setIsUndef();
2039
2040 const MCInstrDesc &OpDesc = get(AMDGPU::V_MOV_B32_indirect_write);
2041 MachineInstrBuilder MIB =
2042 BuildMI(MBB, MI, DL, OpDesc)
2043 .addReg(RI.getSubReg(VecReg, SubReg), RegState::Undef)
2044 .add(MI.getOperand(2))
2045 .addReg(VecReg, RegState::ImplicitDefine)
2046 .addReg(VecReg,
2047 RegState::Implicit | (IsUndef ? RegState::Undef : 0));
2048
2049 const int ImpDefIdx = OpDesc.getNumOperands() + OpDesc.getNumImplicitUses();
2050 const int ImpUseIdx = ImpDefIdx + 1;
2051 MIB->tieOperands(ImpDefIdx, ImpUseIdx);
2052
2053 MachineInstr *SetOff = BuildMI(MBB, MI, DL, get(AMDGPU::S_SET_GPR_IDX_OFF));
2054
2055 finalizeBundle(MBB, SetOn->getIterator(), std::next(SetOff->getIterator()));
2056
2057 MI.eraseFromParent();
2058 break;
2059 }
2060 case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V1:
2061 case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V2:
2062 case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V3:
2063 case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V4:
2064 case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V5:
2065 case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V8:
2066 case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V16:
2067 case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V32: {
2068 assert(ST.useVGPRIndexMode())(static_cast <bool> (ST.useVGPRIndexMode()) ? void (0) :
__assert_fail ("ST.useVGPRIndexMode()", "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp"
, 2068, __extension__ __PRETTY_FUNCTION__))
;
2069 Register Dst = MI.getOperand(0).getReg();
2070 Register VecReg = MI.getOperand(1).getReg();
2071 bool IsUndef = MI.getOperand(1).isUndef();
2072 Register Idx = MI.getOperand(2).getReg();
2073 Register SubReg = MI.getOperand(3).getImm();
2074
2075 MachineInstr *SetOn = BuildMI(MBB, MI, DL, get(AMDGPU::S_SET_GPR_IDX_ON))
2076 .addReg(Idx)
2077 .addImm(AMDGPU::VGPRIndexMode::SRC0_ENABLE);
2078 SetOn->getOperand(3).setIsUndef();
2079
2080 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_indirect_read))
2081 .addDef(Dst)
2082 .addReg(RI.getSubReg(VecReg, SubReg), RegState::Undef)
2083 .addReg(VecReg, RegState::Implicit | (IsUndef ? RegState::Undef : 0));
2084
2085 MachineInstr *SetOff = BuildMI(MBB, MI, DL, get(AMDGPU::S_SET_GPR_IDX_OFF));
2086
2087 finalizeBundle(MBB, SetOn->getIterator(), std::next(SetOff->getIterator()));
2088
2089 MI.eraseFromParent();
2090 break;
2091 }
2092 case AMDGPU::SI_PC_ADD_REL_OFFSET: {
2093 MachineFunction &MF = *MBB.getParent();
2094 Register Reg = MI.getOperand(0).getReg();
2095 Register RegLo = RI.getSubReg(Reg, AMDGPU::sub0);
2096 Register RegHi = RI.getSubReg(Reg, AMDGPU::sub1);
2097
2098 // Create a bundle so these instructions won't be re-ordered by the
2099 // post-RA scheduler.
2100 MIBundleBuilder Bundler(MBB, MI);
2101 Bundler.append(BuildMI(MF, DL, get(AMDGPU::S_GETPC_B64), Reg));
2102
2103 // Add 32-bit offset from this instruction to the start of the
2104 // constant data.
2105 Bundler.append(BuildMI(MF, DL, get(AMDGPU::S_ADD_U32), RegLo)
2106 .addReg(RegLo)
2107 .add(MI.getOperand(1)));
2108
2109 MachineInstrBuilder MIB = BuildMI(MF, DL, get(AMDGPU::S_ADDC_U32), RegHi)
2110 .addReg(RegHi);
2111 MIB.add(MI.getOperand(2));
2112
2113 Bundler.append(MIB);
2114 finalizeBundle(MBB, Bundler.begin());
2115
2116 MI.eraseFromParent();
2117 break;
2118 }
2119 case AMDGPU::ENTER_STRICT_WWM: {
2120 // This only gets its own opcode so that SIPreAllocateWWMRegs can tell when
2121 // Whole Wave Mode is entered.
2122 MI.setDesc(get(ST.isWave32() ? AMDGPU::S_OR_SAVEEXEC_B32
2123 : AMDGPU::S_OR_SAVEEXEC_B64));
2124 break;
2125 }
2126 case AMDGPU::ENTER_STRICT_WQM: {
2127 // This only gets its own opcode so that SIPreAllocateWWMRegs can tell when
2128 // STRICT_WQM is entered.
2129 const unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC;
2130 const unsigned WQMOp = ST.isWave32() ? AMDGPU::S_WQM_B32 : AMDGPU::S_WQM_B64;
2131 const unsigned MovOp = ST.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64;
2132 BuildMI(MBB, MI, DL, get(MovOp), MI.getOperand(0).getReg()).addReg(Exec);
2133 BuildMI(MBB, MI, DL, get(WQMOp), Exec).addReg(Exec);
2134
2135 MI.eraseFromParent();
2136 break;
2137 }
2138 case AMDGPU::EXIT_STRICT_WWM:
2139 case AMDGPU::EXIT_STRICT_WQM: {
2140 // This only gets its own opcode so that SIPreAllocateWWMRegs can tell when
2141 // WWM/STICT_WQM is exited.
2142 MI.setDesc(get(ST.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64));
2143 break;
2144 }
2145 case AMDGPU::SI_RETURN: {
2146 const MachineFunction *MF = MBB.getParent();
2147 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
2148 const SIRegisterInfo *TRI = ST.getRegisterInfo();
2149 // Hiding the return address use with SI_RETURN may lead to extra kills in
2150 // the function and missing live-ins. We are fine in practice because callee
2151 // saved register handling ensures the register value is restored before
2152 // RET, but we need the undef flag here to appease the MachineVerifier
2153 // liveness checks.
2154 MachineInstrBuilder MIB =
2155 BuildMI(MBB, MI, DL, get(AMDGPU::S_SETPC_B64_return))
2156 .addReg(TRI->getReturnAddressReg(*MF), RegState::Undef);
2157
2158 MIB.copyImplicitOps(MI);
2159 MI.eraseFromParent();
2160 break;
2161 }
2162 }
2163 return true;
2164}
2165
2166std::pair<MachineInstr*, MachineInstr*>
2167SIInstrInfo::expandMovDPP64(MachineInstr &MI) const {
2168 assert (MI.getOpcode() == AMDGPU::V_MOV_B64_DPP_PSEUDO)(static_cast <bool> (MI.getOpcode() == AMDGPU::V_MOV_B64_DPP_PSEUDO
) ? void (0) : __assert_fail ("MI.getOpcode() == AMDGPU::V_MOV_B64_DPP_PSEUDO"
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 2168, __extension__
__PRETTY_FUNCTION__))
;
3
'?' condition is true
2169
2170 if (ST.hasMovB64() &&
4
Assuming the condition is false
2171 AMDGPU::isLegal64BitDPPControl(
2172 getNamedOperand(MI, AMDGPU::OpName::dpp_ctrl)->getImm())) {
2173 MI.setDesc(get(AMDGPU::V_MOV_B64_dpp));
2174 return std::make_pair(&MI, nullptr);
2175 }
2176
2177 MachineBasicBlock &MBB = *MI.getParent();
2178 DebugLoc DL = MBB.findDebugLoc(MI);
2179 MachineFunction *MF = MBB.getParent();
2180 MachineRegisterInfo &MRI = MF->getRegInfo();
2181 Register Dst = MI.getOperand(0).getReg();
2182 unsigned Part = 0;
2183 MachineInstr *Split[2];
2184
2185 for (auto Sub : { AMDGPU::sub0, AMDGPU::sub1 }) {
5
Assuming '__begin1' is equal to '__end1'
2186 auto MovDPP = BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_dpp));
2187 if (Dst.isPhysical()) {
2188 MovDPP.addDef(RI.getSubReg(Dst, Sub));
2189 } else {
2190 assert(MRI.isSSA())(static_cast <bool> (MRI.isSSA()) ? void (0) : __assert_fail
("MRI.isSSA()", "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 2190
, __extension__ __PRETTY_FUNCTION__))
;
2191 auto Tmp = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
2192 MovDPP.addDef(Tmp);
2193 }
2194
2195 for (unsigned I = 1; I <= 2; ++I) { // old and src operands.
2196 const MachineOperand &SrcOp = MI.getOperand(I);
2197 assert(!SrcOp.isFPImm())(static_cast <bool> (!SrcOp.isFPImm()) ? void (0) : __assert_fail
("!SrcOp.isFPImm()", "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp"
, 2197, __extension__ __PRETTY_FUNCTION__))
;
2198 if (SrcOp.isImm()) {
2199 APInt Imm(64, SrcOp.getImm());
2200 Imm.ashrInPlace(Part * 32);
2201 MovDPP.addImm(Imm.getLoBits(32).getZExtValue());
2202 } else {
2203 assert(SrcOp.isReg())(static_cast <bool> (SrcOp.isReg()) ? void (0) : __assert_fail
("SrcOp.isReg()", "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 2203
, __extension__ __PRETTY_FUNCTION__))
;
2204 Register Src = SrcOp.getReg();
2205 if (Src.isPhysical())
2206 MovDPP.addReg(RI.getSubReg(Src, Sub));
2207 else
2208 MovDPP.addReg(Src, SrcOp.isUndef() ? RegState::Undef : 0, Sub);
2209 }
2210 }
2211
2212 for (const MachineOperand &MO : llvm::drop_begin(MI.explicit_operands(), 3))
2213 MovDPP.addImm(MO.getImm());
2214
2215 Split[Part] = MovDPP;
2216 ++Part;
2217 }
2218
2219 if (Dst.isVirtual())
6
Assuming the condition is true
7
Taking true branch
2220 BuildMI(MBB, MI, DL, get(AMDGPU::REG_SEQUENCE), Dst)
2221 .addReg(Split[0]->getOperand(0).getReg())
8
Called C++ object pointer is uninitialized
2222 .addImm(AMDGPU::sub0)
2223 .addReg(Split[1]->getOperand(0).getReg())
2224 .addImm(AMDGPU::sub1);
2225
2226 MI.eraseFromParent();
2227 return std::make_pair(Split[0], Split[1]);
2228}
2229
2230bool SIInstrInfo::swapSourceModifiers(MachineInstr &MI,
2231 MachineOperand &Src0,
2232 unsigned Src0OpName,
2233 MachineOperand &Src1,
2234 unsigned Src1OpName) const {
2235 MachineOperand *Src0Mods = getNamedOperand(MI, Src0OpName);
2236 if (!Src0Mods)
2237 return false;
2238
2239 MachineOperand *Src1Mods = getNamedOperand(MI, Src1OpName);
2240 assert(Src1Mods &&(static_cast <bool> (Src1Mods && "All commutable instructions have both src0 and src1 modifiers"
) ? void (0) : __assert_fail ("Src1Mods && \"All commutable instructions have both src0 and src1 modifiers\""
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 2241, __extension__
__PRETTY_FUNCTION__))
2241 "All commutable instructions have both src0 and src1 modifiers")(static_cast <bool> (Src1Mods && "All commutable instructions have both src0 and src1 modifiers"
) ? void (0) : __assert_fail ("Src1Mods && \"All commutable instructions have both src0 and src1 modifiers\""
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 2241, __extension__
__PRETTY_FUNCTION__))
;
2242
2243 int Src0ModsVal = Src0Mods->getImm();
2244 int Src1ModsVal = Src1Mods->getImm();
2245
2246 Src1Mods->setImm(Src0ModsVal);
2247 Src0Mods->setImm(Src1ModsVal);
2248 return true;
2249}
2250
2251static MachineInstr *swapRegAndNonRegOperand(MachineInstr &MI,
2252 MachineOperand &RegOp,
2253 MachineOperand &NonRegOp) {
2254 Register Reg = RegOp.getReg();
2255 unsigned SubReg = RegOp.getSubReg();
2256 bool IsKill = RegOp.isKill();
2257 bool IsDead = RegOp.isDead();
2258 bool IsUndef = RegOp.isUndef();
2259 bool IsDebug = RegOp.isDebug();
2260
2261 if (NonRegOp.isImm())
2262 RegOp.ChangeToImmediate(NonRegOp.getImm());
2263 else if (NonRegOp.isFI())
2264 RegOp.ChangeToFrameIndex(NonRegOp.getIndex());
2265 else if (NonRegOp.isGlobal()) {
2266 RegOp.ChangeToGA(NonRegOp.getGlobal(), NonRegOp.getOffset(),
2267 NonRegOp.getTargetFlags());
2268 } else
2269 return nullptr;
2270
2271 // Make sure we don't reinterpret a subreg index in the target flags.
2272 RegOp.setTargetFlags(NonRegOp.getTargetFlags());
2273
2274 NonRegOp.ChangeToRegister(Reg, false, false, IsKill, IsDead, IsUndef, IsDebug);
2275 NonRegOp.setSubReg(SubReg);
2276
2277 return &MI;
2278}
2279
2280MachineInstr *SIInstrInfo::commuteInstructionImpl(MachineInstr &MI, bool NewMI,
2281 unsigned Src0Idx,
2282 unsigned Src1Idx) const {
2283 assert(!NewMI && "this should never be used")(static_cast <bool> (!NewMI && "this should never be used"
) ? void (0) : __assert_fail ("!NewMI && \"this should never be used\""
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 2283, __extension__
__PRETTY_FUNCTION__))
;
2284
2285 unsigned Opc = MI.getOpcode();
2286 int CommutedOpcode = commuteOpcode(Opc);
2287 if (CommutedOpcode == -1)
2288 return nullptr;
2289
2290 assert(AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0) ==(static_cast <bool> (AMDGPU::getNamedOperandIdx(Opc, AMDGPU
::OpName::src0) == static_cast<int>(Src0Idx) &&
AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1) == static_cast
<int>(Src1Idx) && "inconsistency with findCommutedOpIndices"
) ? void (0) : __assert_fail ("AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0) == static_cast<int>(Src0Idx) && AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1) == static_cast<int>(Src1Idx) && \"inconsistency with findCommutedOpIndices\""
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 2294, __extension__
__PRETTY_FUNCTION__))
2291 static_cast<int>(Src0Idx) &&(static_cast <bool> (AMDGPU::getNamedOperandIdx(Opc, AMDGPU
::OpName::src0) == static_cast<int>(Src0Idx) &&
AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1) == static_cast
<int>(Src1Idx) && "inconsistency with findCommutedOpIndices"
) ? void (0) : __assert_fail ("AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0) == static_cast<int>(Src0Idx) && AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1) == static_cast<int>(Src1Idx) && \"inconsistency with findCommutedOpIndices\""
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 2294, __extension__
__PRETTY_FUNCTION__))
2292 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1) ==(static_cast <bool> (AMDGPU::getNamedOperandIdx(Opc, AMDGPU
::OpName::src0) == static_cast<int>(Src0Idx) &&
AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1) == static_cast
<int>(Src1Idx) && "inconsistency with findCommutedOpIndices"
) ? void (0) : __assert_fail ("AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0) == static_cast<int>(Src0Idx) && AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1) == static_cast<int>(Src1Idx) && \"inconsistency with findCommutedOpIndices\""
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 2294, __extension__
__PRETTY_FUNCTION__))
2293 static_cast<int>(Src1Idx) &&(static_cast <bool> (AMDGPU::getNamedOperandIdx(Opc, AMDGPU
::OpName::src0) == static_cast<int>(Src0Idx) &&
AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1) == static_cast
<int>(Src1Idx) && "inconsistency with findCommutedOpIndices"
) ? void (0) : __assert_fail ("AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0) == static_cast<int>(Src0Idx) && AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1) == static_cast<int>(Src1Idx) && \"inconsistency with findCommutedOpIndices\""
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 2294, __extension__
__PRETTY_FUNCTION__))
2294 "inconsistency with findCommutedOpIndices")(static_cast <bool> (AMDGPU::getNamedOperandIdx(Opc, AMDGPU
::OpName::src0) == static_cast<int>(Src0Idx) &&
AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1) == static_cast
<int>(Src1Idx) && "inconsistency with findCommutedOpIndices"
) ? void (0) : __assert_fail ("AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0) == static_cast<int>(Src0Idx) && AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1) == static_cast<int>(Src1Idx) && \"inconsistency with findCommutedOpIndices\""
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 2294, __extension__
__PRETTY_FUNCTION__))
;
2295
2296 MachineOperand &Src0 = MI.getOperand(Src0Idx);
2297 MachineOperand &Src1 = MI.getOperand(Src1Idx);
2298
2299 MachineInstr *CommutedMI = nullptr;
2300 if (Src0.isReg() && Src1.isReg()) {
2301 if (isOperandLegal(MI, Src1Idx, &Src0)) {
2302 // Be sure to copy the source modifiers to the right place.
2303 CommutedMI
2304 = TargetInstrInfo::commuteInstructionImpl(MI, NewMI, Src0Idx, Src1Idx);
2305 }
2306
2307 } else if (Src0.isReg() && !Src1.isReg()) {
2308 // src0 should always be able to support any operand type, so no need to
2309 // check operand legality.
2310 CommutedMI = swapRegAndNonRegOperand(MI, Src0, Src1);
2311 } else if (!Src0.isReg() && Src1.isReg()) {
2312 if (isOperandLegal(MI, Src1Idx, &Src0))
2313 CommutedMI = swapRegAndNonRegOperand(MI, Src1, Src0);
2314 } else {
2315 // FIXME: Found two non registers to commute. This does happen.
2316 return nullptr;
2317 }
2318
2319 if (CommutedMI) {
2320 swapSourceModifiers(MI, Src0, AMDGPU::OpName::src0_modifiers,
2321 Src1, AMDGPU::OpName::src1_modifiers);
2322
2323 CommutedMI->setDesc(get(CommutedOpcode));
2324 }
2325
2326 return CommutedMI;
2327}
2328
2329// This needs to be implemented because the source modifiers may be inserted
2330// between the true commutable operands, and the base
2331// TargetInstrInfo::commuteInstruction uses it.
2332bool SIInstrInfo::findCommutedOpIndices(const MachineInstr &MI,
2333 unsigned &SrcOpIdx0,
2334 unsigned &SrcOpIdx1) const {
2335 return findCommutedOpIndices(MI.getDesc(), SrcOpIdx0, SrcOpIdx1);
2336}
2337
2338bool SIInstrInfo::findCommutedOpIndices(MCInstrDesc Desc, unsigned &SrcOpIdx0,
2339 unsigned &SrcOpIdx1) const {
2340 if (!Desc.isCommutable())
2341 return false;
2342
2343 unsigned Opc = Desc.getOpcode();
2344 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0);
2345 if (Src0Idx == -1)
2346 return false;
2347
2348 int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1);
2349 if (Src1Idx == -1)
2350 return false;
2351
2352 return fixCommutedOpIndices(SrcOpIdx0, SrcOpIdx1, Src0Idx, Src1Idx);
2353}
2354
2355bool SIInstrInfo::isBranchOffsetInRange(unsigned BranchOp,
2356 int64_t BrOffset) const {
2357 // BranchRelaxation should never have to check s_setpc_b64 because its dest
2358 // block is unanalyzable.
2359 assert(BranchOp != AMDGPU::S_SETPC_B64)(static_cast <bool> (BranchOp != AMDGPU::S_SETPC_B64) ?
void (0) : __assert_fail ("BranchOp != AMDGPU::S_SETPC_B64",
"llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 2359, __extension__
__PRETTY_FUNCTION__))
;
2360
2361 // Convert to dwords.
2362 BrOffset /= 4;
2363
2364 // The branch instructions do PC += signext(SIMM16 * 4) + 4, so the offset is
2365 // from the next instruction.
2366 BrOffset -= 1;
2367
2368 return isIntN(BranchOffsetBits, BrOffset);
2369}
2370
2371MachineBasicBlock *SIInstrInfo::getBranchDestBlock(
2372 const MachineInstr &MI) const {
2373 if (MI.getOpcode() == AMDGPU::S_SETPC_B64) {
2374 // This would be a difficult analysis to perform, but can always be legal so
2375 // there's no need to analyze it.
2376 return nullptr;
2377 }
2378
2379 return MI.getOperand(0).getMBB();
2380}
2381
2382void SIInstrInfo::insertIndirectBranch(MachineBasicBlock &MBB,
2383 MachineBasicBlock &DestBB,
2384 MachineBasicBlock &RestoreBB,
2385 const DebugLoc &DL, int64_t BrOffset,
2386 RegScavenger *RS) const {
2387 assert(RS && "RegScavenger required for long branching")(static_cast <bool> (RS && "RegScavenger required for long branching"
) ? void (0) : __assert_fail ("RS && \"RegScavenger required for long branching\""
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 2387, __extension__
__PRETTY_FUNCTION__))
;
2388 assert(MBB.empty() &&(static_cast <bool> (MBB.empty() && "new block should be inserted for expanding unconditional branch"
) ? void (0) : __assert_fail ("MBB.empty() && \"new block should be inserted for expanding unconditional branch\""
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 2389, __extension__
__PRETTY_FUNCTION__))
2389 "new block should be inserted for expanding unconditional branch")(static_cast <bool> (MBB.empty() && "new block should be inserted for expanding unconditional branch"
) ? void (0) : __assert_fail ("MBB.empty() && \"new block should be inserted for expanding unconditional branch\""
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 2389, __extension__
__PRETTY_FUNCTION__))
;
2390 assert(MBB.pred_size() == 1)(static_cast <bool> (MBB.pred_size() == 1) ? void (0) :
__assert_fail ("MBB.pred_size() == 1", "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp"
, 2390, __extension__ __PRETTY_FUNCTION__))
;
2391 assert(RestoreBB.empty() &&(static_cast <bool> (RestoreBB.empty() && "restore block should be inserted for restoring clobbered registers"
) ? void (0) : __assert_fail ("RestoreBB.empty() && \"restore block should be inserted for restoring clobbered registers\""
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 2392, __extension__
__PRETTY_FUNCTION__))
2392 "restore block should be inserted for restoring clobbered registers")(static_cast <bool> (RestoreBB.empty() && "restore block should be inserted for restoring clobbered registers"
) ? void (0) : __assert_fail ("RestoreBB.empty() && \"restore block should be inserted for restoring clobbered registers\""
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 2392, __extension__
__PRETTY_FUNCTION__))
;
2393
2394 MachineFunction *MF = MBB.getParent();
2395 MachineRegisterInfo &MRI = MF->getRegInfo();
2396
2397 // FIXME: Virtual register workaround for RegScavenger not working with empty
2398 // blocks.
2399 Register PCReg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
2400
2401 auto I = MBB.end();
2402
2403 // We need to compute the offset relative to the instruction immediately after
2404 // s_getpc_b64. Insert pc arithmetic code before last terminator.
2405 MachineInstr *GetPC = BuildMI(MBB, I, DL, get(AMDGPU::S_GETPC_B64), PCReg);
2406
2407 auto &MCCtx = MF->getContext();
2408 MCSymbol *PostGetPCLabel =
2409 MCCtx.createTempSymbol("post_getpc", /*AlwaysAddSuffix=*/true);
2410 GetPC->setPostInstrSymbol(*MF, PostGetPCLabel);
2411
2412 MCSymbol *OffsetLo =
2413 MCCtx.createTempSymbol("offset_lo", /*AlwaysAddSuffix=*/true);
2414 MCSymbol *OffsetHi =
2415 MCCtx.createTempSymbol("offset_hi", /*AlwaysAddSuffix=*/true);
2416 BuildMI(MBB, I, DL, get(AMDGPU::S_ADD_U32))
2417 .addReg(PCReg, RegState::Define, AMDGPU::sub0)
2418 .addReg(PCReg, 0, AMDGPU::sub0)
2419 .addSym(OffsetLo, MO_FAR_BRANCH_OFFSET);
2420 BuildMI(MBB, I, DL, get(AMDGPU::S_ADDC_U32))
2421 .addReg(PCReg, RegState::Define, AMDGPU::sub1)
2422 .addReg(PCReg, 0, AMDGPU::sub1)
2423 .addSym(OffsetHi, MO_FAR_BRANCH_OFFSET);
2424
2425 // Insert the indirect branch after the other terminator.
2426 BuildMI(&MBB, DL, get(AMDGPU::S_SETPC_B64))
2427 .addReg(PCReg);
2428
2429 // FIXME: If spilling is necessary, this will fail because this scavenger has
2430 // no emergency stack slots. It is non-trivial to spill in this situation,
2431 // because the restore code needs to be specially placed after the
2432 // jump. BranchRelaxation then needs to be made aware of the newly inserted
2433 // block.
2434 //
2435 // If a spill is needed for the pc register pair, we need to insert a spill
2436 // restore block right before the destination block, and insert a short branch
2437 // into the old destination block's fallthrough predecessor.
2438 // e.g.:
2439 //
2440 // s_cbranch_scc0 skip_long_branch:
2441 //
2442 // long_branch_bb:
2443 // spill s[8:9]
2444 // s_getpc_b64 s[8:9]
2445 // s_add_u32 s8, s8, restore_bb
2446 // s_addc_u32 s9, s9, 0
2447 // s_setpc_b64 s[8:9]
2448 //
2449 // skip_long_branch:
2450 // foo;
2451 //
2452 // .....
2453 //
2454 // dest_bb_fallthrough_predecessor:
2455 // bar;
2456 // s_branch dest_bb
2457 //
2458 // restore_bb:
2459 // restore s[8:9]
2460 // fallthrough dest_bb
2461 ///
2462 // dest_bb:
2463 // buzz;
2464
2465 RS->enterBasicBlockEnd(MBB);
2466 Register Scav = RS->scavengeRegisterBackwards(
2467 AMDGPU::SReg_64RegClass, MachineBasicBlock::iterator(GetPC),
2468 /* RestoreAfter */ false, 0, /* AllowSpill */ false);
2469 if (Scav) {
2470 RS->setRegUsed(Scav);
2471 MRI.replaceRegWith(PCReg, Scav);
2472 MRI.clearVirtRegs();
2473 } else {
2474 // As SGPR needs VGPR to be spilled, we reuse the slot of temporary VGPR for
2475 // SGPR spill.
2476 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
2477 const SIRegisterInfo *TRI = ST.getRegisterInfo();
2478 TRI->spillEmergencySGPR(GetPC, RestoreBB, AMDGPU::SGPR0_SGPR1, RS);
2479 MRI.replaceRegWith(PCReg, AMDGPU::SGPR0_SGPR1);
2480 MRI.clearVirtRegs();
2481 }
2482
2483 MCSymbol *DestLabel = Scav ? DestBB.getSymbol() : RestoreBB.getSymbol();
2484 // Now, the distance could be defined.
2485 auto *Offset = MCBinaryExpr::createSub(
2486 MCSymbolRefExpr::create(DestLabel, MCCtx),
2487 MCSymbolRefExpr::create(PostGetPCLabel, MCCtx), MCCtx);
2488 // Add offset assignments.
2489 auto *Mask = MCConstantExpr::create(0xFFFFFFFFULL, MCCtx);
2490 OffsetLo->setVariableValue(MCBinaryExpr::createAnd(Offset, Mask, MCCtx));
2491 auto *ShAmt = MCConstantExpr::create(32, MCCtx);
2492 OffsetHi->setVariableValue(MCBinaryExpr::createAShr(Offset, ShAmt, MCCtx));
2493}
2494
2495unsigned SIInstrInfo::getBranchOpcode(SIInstrInfo::BranchPredicate Cond) {
2496 switch (Cond) {
2497 case SIInstrInfo::SCC_TRUE:
2498 return AMDGPU::S_CBRANCH_SCC1;
2499 case SIInstrInfo::SCC_FALSE:
2500 return AMDGPU::S_CBRANCH_SCC0;
2501 case SIInstrInfo::VCCNZ:
2502 return AMDGPU::S_CBRANCH_VCCNZ;
2503 case SIInstrInfo::VCCZ:
2504 return AMDGPU::S_CBRANCH_VCCZ;
2505 case SIInstrInfo::EXECNZ:
2506 return AMDGPU::S_CBRANCH_EXECNZ;
2507 case SIInstrInfo::EXECZ:
2508 return AMDGPU::S_CBRANCH_EXECZ;
2509 default:
2510 llvm_unreachable("invalid branch predicate")::llvm::llvm_unreachable_internal("invalid branch predicate",
"llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 2510)
;
2511 }
2512}
2513
2514SIInstrInfo::BranchPredicate SIInstrInfo::getBranchPredicate(unsigned Opcode) {
2515 switch (Opcode) {
2516 case AMDGPU::S_CBRANCH_SCC0:
2517 return SCC_FALSE;
2518 case AMDGPU::S_CBRANCH_SCC1:
2519 return SCC_TRUE;
2520 case AMDGPU::S_CBRANCH_VCCNZ:
2521 return VCCNZ;
2522 case AMDGPU::S_CBRANCH_VCCZ:
2523 return VCCZ;
2524 case AMDGPU::S_CBRANCH_EXECNZ:
2525 return EXECNZ;
2526 case AMDGPU::S_CBRANCH_EXECZ:
2527 return EXECZ;
2528 default:
2529 return INVALID_BR;
2530 }
2531}
2532
2533bool SIInstrInfo::analyzeBranchImpl(MachineBasicBlock &MBB,
2534 MachineBasicBlock::iterator I,
2535 MachineBasicBlock *&TBB,
2536 MachineBasicBlock *&FBB,
2537 SmallVectorImpl<MachineOperand> &Cond,
2538 bool AllowModify) const {
2539 if (I->getOpcode() == AMDGPU::S_BRANCH) {
2540 // Unconditional Branch
2541 TBB = I->getOperand(0).getMBB();
2542 return false;
2543 }
2544
2545 MachineBasicBlock *CondBB = nullptr;
2546
2547 if (I->getOpcode() == AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO) {
2548 CondBB = I->getOperand(1).getMBB();
2549 Cond.push_back(I->getOperand(0));
2550 } else {
2551 BranchPredicate Pred = getBranchPredicate(I->getOpcode());
2552 if (Pred == INVALID_BR)
2553 return true;
2554
2555 CondBB = I->getOperand(0).getMBB();
2556 Cond.push_back(MachineOperand::CreateImm(Pred));
2557 Cond.push_back(I->getOperand(1)); // Save the branch register.
2558 }
2559 ++I;
2560
2561 if (I == MBB.end()) {
2562 // Conditional branch followed by fall-through.
2563 TBB = CondBB;
2564 return false;
2565 }
2566
2567 if (I->getOpcode() == AMDGPU::S_BRANCH) {
2568 TBB = CondBB;
2569 FBB = I->getOperand(0).getMBB();
2570 return false;
2571 }
2572
2573 return true;
2574}
2575
2576bool SIInstrInfo::analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
2577 MachineBasicBlock *&FBB,
2578 SmallVectorImpl<MachineOperand> &Cond,
2579 bool AllowModify) const {
2580 MachineBasicBlock::iterator I = MBB.getFirstTerminator();
2581 auto E = MBB.end();
2582 if (I == E)
2583 return false;
2584
2585 // Skip over the instructions that are artificially terminators for special
2586 // exec management.
2587 while (I != E && !I->isBranch() && !I->isReturn()) {
2588 switch (I->getOpcode()) {
2589 case AMDGPU::S_MOV_B64_term:
2590 case AMDGPU::S_XOR_B64_term:
2591 case AMDGPU::S_OR_B64_term:
2592 case AMDGPU::S_ANDN2_B64_term:
2593 case AMDGPU::S_AND_B64_term:
2594 case AMDGPU::S_MOV_B32_term:
2595 case AMDGPU::S_XOR_B32_term:
2596 case AMDGPU::S_OR_B32_term:
2597 case AMDGPU::S_ANDN2_B32_term:
2598 case AMDGPU::S_AND_B32_term:
2599 break;
2600 case AMDGPU::SI_IF:
2601 case AMDGPU::SI_ELSE:
2602 case AMDGPU::SI_KILL_I1_TERMINATOR:
2603 case AMDGPU::SI_KILL_F32_COND_IMM_TERMINATOR:
2604 // FIXME: It's messy that these need to be considered here at all.
2605 return true;
2606 default:
2607 llvm_unreachable("unexpected non-branch terminator inst")::llvm::llvm_unreachable_internal("unexpected non-branch terminator inst"
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 2607)
;
2608 }
2609
2610 ++I;
2611 }
2612
2613 if (I == E)
2614 return false;
2615
2616 return analyzeBranchImpl(MBB, I, TBB, FBB, Cond, AllowModify);
2617}
2618
2619unsigned SIInstrInfo::removeBranch(MachineBasicBlock &MBB,
2620 int *BytesRemoved) const {
2621 unsigned Count = 0;
2622 unsigned RemovedSize = 0;
2623 for (MachineInstr &MI : llvm::make_early_inc_range(MBB.terminators())) {
2624 // Skip over artificial terminators when removing instructions.
2625 if (MI.isBranch() || MI.isReturn()) {
2626 RemovedSize += getInstSizeInBytes(MI);
2627 MI.eraseFromParent();
2628 ++Count;
2629 }
2630 }
2631
2632 if (BytesRemoved)
2633 *BytesRemoved = RemovedSize;
2634
2635 return Count;
2636}
2637
2638// Copy the flags onto the implicit condition register operand.
2639static void preserveCondRegFlags(MachineOperand &CondReg,
2640 const MachineOperand &OrigCond) {
2641 CondReg.setIsUndef(OrigCond.isUndef());
2642 CondReg.setIsKill(OrigCond.isKill());
2643}
2644
2645unsigned SIInstrInfo::insertBranch(MachineBasicBlock &MBB,
2646 MachineBasicBlock *TBB,
2647 MachineBasicBlock *FBB,
2648 ArrayRef<MachineOperand> Cond,
2649 const DebugLoc &DL,
2650 int *BytesAdded) const {
2651 if (!FBB && Cond.empty()) {
2652 BuildMI(&MBB, DL, get(AMDGPU::S_BRANCH))
2653 .addMBB(TBB);
2654 if (BytesAdded)
2655 *BytesAdded = ST.hasOffset3fBug() ? 8 : 4;
2656 return 1;
2657 }
2658
2659 if(Cond.size() == 1 && Cond[0].isReg()) {
2660 BuildMI(&MBB, DL, get(AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO))
2661 .add(Cond[0])
2662 .addMBB(TBB);
2663 return 1;
2664 }
2665
2666 assert(TBB && Cond[0].isImm())(static_cast <bool> (TBB && Cond[0].isImm()) ? void
(0) : __assert_fail ("TBB && Cond[0].isImm()", "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp"
, 2666, __extension__ __PRETTY_FUNCTION__))
;
2667
2668 unsigned Opcode
2669 = getBranchOpcode(static_cast<BranchPredicate>(Cond[0].getImm()));
2670
2671 if (!FBB) {
2672 Cond[1].isUndef();
2673 MachineInstr *CondBr =
2674 BuildMI(&MBB, DL, get(Opcode))
2675 .addMBB(TBB);
2676
2677 // Copy the flags onto the implicit condition register operand.
2678 preserveCondRegFlags(CondBr->getOperand(1), Cond[1]);
2679 fixImplicitOperands(*CondBr);
2680
2681 if (BytesAdded)
2682 *BytesAdded = ST.hasOffset3fBug() ? 8 : 4;
2683 return 1;
2684 }
2685
2686 assert(TBB && FBB)(static_cast <bool> (TBB && FBB) ? void (0) : __assert_fail
("TBB && FBB", "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp"
, 2686, __extension__ __PRETTY_FUNCTION__))
;
2687
2688 MachineInstr *CondBr =
2689 BuildMI(&MBB, DL, get(Opcode))
2690 .addMBB(TBB);
2691 fixImplicitOperands(*CondBr);
2692 BuildMI(&MBB, DL, get(AMDGPU::S_BRANCH))
2693 .addMBB(FBB);
2694
2695 MachineOperand &CondReg = CondBr->getOperand(1);
2696 CondReg.setIsUndef(Cond[1].isUndef());
2697 CondReg.setIsKill(Cond[1].isKill());
2698
2699 if (BytesAdded)
2700 *BytesAdded = ST.hasOffset3fBug() ? 16 : 8;
2701
2702 return 2;
2703}
2704
2705bool SIInstrInfo::reverseBranchCondition(
2706 SmallVectorImpl<MachineOperand> &Cond) const {
2707 if (Cond.size() != 2) {
2708 return true;
2709 }
2710
2711 if (Cond[0].isImm()) {
2712 Cond[0].setImm(-Cond[0].getImm());
2713 return false;
2714 }
2715
2716 return true;
2717}
2718
2719bool SIInstrInfo::canInsertSelect(const MachineBasicBlock &MBB,
2720 ArrayRef<MachineOperand> Cond,
2721 Register DstReg, Register TrueReg,
2722 Register FalseReg, int &CondCycles,
2723 int &TrueCycles, int &FalseCycles) const {
2724 switch (Cond[0].getImm()) {
2725 case VCCNZ:
2726 case VCCZ: {
2727 const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
2728 const TargetRegisterClass *RC = MRI.getRegClass(TrueReg);
2729 if (MRI.getRegClass(FalseReg) != RC)
2730 return false;
2731
2732 int NumInsts = AMDGPU::getRegBitWidth(RC->getID()) / 32;
2733 CondCycles = TrueCycles = FalseCycles = NumInsts; // ???
2734
2735 // Limit to equal cost for branch vs. N v_cndmask_b32s.
2736 return RI.hasVGPRs(RC) && NumInsts <= 6;
2737 }
2738 case SCC_TRUE:
2739 case SCC_FALSE: {
2740 // FIXME: We could insert for VGPRs if we could replace the original compare
2741 // with a vector one.
2742 const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
2743 const TargetRegisterClass *RC = MRI.getRegClass(TrueReg);
2744 if (MRI.getRegClass(FalseReg) != RC)
2745 return false;
2746
2747 int NumInsts = AMDGPU::getRegBitWidth(RC->getID()) / 32;
2748
2749 // Multiples of 8 can do s_cselect_b64
2750 if (NumInsts % 2 == 0)
2751 NumInsts /= 2;
2752
2753 CondCycles = TrueCycles = FalseCycles = NumInsts; // ???
2754 return RI.isSGPRClass(RC);
2755 }
2756 default:
2757 return false;
2758 }
2759}
2760
2761void SIInstrInfo::insertSelect(MachineBasicBlock &MBB,
2762 MachineBasicBlock::iterator I, const DebugLoc &DL,
2763 Register DstReg, ArrayRef<MachineOperand> Cond,
2764 Register TrueReg, Register FalseReg) const {
2765 BranchPredicate Pred = static_cast<BranchPredicate>(Cond[0].getImm());
2766 if (Pred == VCCZ || Pred == SCC_FALSE) {
2767 Pred = static_cast<BranchPredicate>(-Pred);
2768 std::swap(TrueReg, FalseReg);
2769 }
2770
2771 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
2772 const TargetRegisterClass *DstRC = MRI.getRegClass(DstReg);
2773 unsigned DstSize = RI.getRegSizeInBits(*DstRC);
2774
2775 if (DstSize == 32) {
2776 MachineInstr *Select;
2777 if (Pred == SCC_TRUE) {
2778 Select = BuildMI(MBB, I, DL, get(AMDGPU::S_CSELECT_B32), DstReg)
2779 .addReg(TrueReg)
2780 .addReg(FalseReg);
2781 } else {
2782 // Instruction's operands are backwards from what is expected.
2783 Select = BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e32), DstReg)
2784 .addReg(FalseReg)
2785 .addReg(TrueReg);
2786 }
2787
2788 preserveCondRegFlags(Select->getOperand(3), Cond[1]);
2789 return;
2790 }
2791
2792 if (DstSize == 64 && Pred == SCC_TRUE) {
2793 MachineInstr *Select =
2794 BuildMI(MBB, I, DL, get(AMDGPU::S_CSELECT_B64), DstReg)
2795 .addReg(TrueReg)
2796 .addReg(FalseReg);
2797
2798 preserveCondRegFlags(Select->getOperand(3), Cond[1]);
2799 return;
2800 }
2801
2802 static const int16_t Sub0_15[] = {
2803 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3,
2804 AMDGPU::sub4, AMDGPU::sub5, AMDGPU::sub6, AMDGPU::sub7,
2805 AMDGPU::sub8, AMDGPU::sub9, AMDGPU::sub10, AMDGPU::sub11,
2806 AMDGPU::sub12, AMDGPU::sub13, AMDGPU::sub14, AMDGPU::sub15,
2807 };
2808
2809 static const int16_t Sub0_15_64[] = {
2810 AMDGPU::sub0_sub1, AMDGPU::sub2_sub3,
2811 AMDGPU::sub4_sub5, AMDGPU::sub6_sub7,
2812 AMDGPU::sub8_sub9, AMDGPU::sub10_sub11,
2813 AMDGPU::sub12_sub13, AMDGPU::sub14_sub15,
2814 };
2815
2816 unsigned SelOp = AMDGPU::V_CNDMASK_B32_e32;
2817 const TargetRegisterClass *EltRC = &AMDGPU::VGPR_32RegClass;
2818 const int16_t *SubIndices = Sub0_15;
2819 int NElts = DstSize / 32;
2820
2821 // 64-bit select is only available for SALU.
2822 // TODO: Split 96-bit into 64-bit and 32-bit, not 3x 32-bit.
2823 if (Pred == SCC_TRUE) {
2824 if (NElts % 2) {
2825 SelOp = AMDGPU::S_CSELECT_B32;
2826 EltRC = &AMDGPU::SGPR_32RegClass;
2827 } else {
2828 SelOp = AMDGPU::S_CSELECT_B64;
2829 EltRC = &AMDGPU::SGPR_64RegClass;
2830 SubIndices = Sub0_15_64;
2831 NElts /= 2;
2832 }
2833 }
2834
2835 MachineInstrBuilder MIB = BuildMI(
2836 MBB, I, DL, get(AMDGPU::REG_SEQUENCE), DstReg);
2837
2838 I = MIB->getIterator();
2839
2840 SmallVector<Register, 8> Regs;
2841 for (int Idx = 0; Idx != NElts; ++Idx) {
2842 Register DstElt = MRI.createVirtualRegister(EltRC);
2843 Regs.push_back(DstElt);
2844
2845 unsigned SubIdx = SubIndices[Idx];
2846
2847 MachineInstr *Select;
2848 if (SelOp == AMDGPU::V_CNDMASK_B32_e32) {
2849 Select =
2850 BuildMI(MBB, I, DL, get(SelOp), DstElt)
2851 .addReg(FalseReg, 0, SubIdx)
2852 .addReg(TrueReg, 0, SubIdx);
2853 } else {
2854 Select =
2855 BuildMI(MBB, I, DL, get(SelOp), DstElt)
2856 .addReg(TrueReg, 0, SubIdx)
2857 .addReg(FalseReg, 0, SubIdx);
2858 }
2859
2860 preserveCondRegFlags(Select->getOperand(3), Cond[1]);
2861 fixImplicitOperands(*Select);
2862
2863 MIB.addReg(DstElt)
2864 .addImm(SubIdx);
2865 }
2866}
2867
2868bool SIInstrInfo::isFoldableCopy(const MachineInstr &MI) {
2869 switch (MI.getOpcode()) {
2870 case AMDGPU::V_MOV_B32_e32:
2871 case AMDGPU::V_MOV_B32_e64:
2872 case AMDGPU::V_MOV_B64_PSEUDO:
2873 case AMDGPU::V_MOV_B64_e32:
2874 case AMDGPU::V_MOV_B64_e64:
2875 case AMDGPU::S_MOV_B32:
2876 case AMDGPU::S_MOV_B64:
2877 case AMDGPU::COPY:
2878 case AMDGPU::V_ACCVGPR_WRITE_B32_e64:
2879 case AMDGPU::V_ACCVGPR_READ_B32_e64:
2880 case AMDGPU::V_ACCVGPR_MOV_B32:
2881 return true;
2882 default:
2883 return false;
2884 }
2885}
2886
2887static constexpr unsigned ModifierOpNames[] = {
2888 AMDGPU::OpName::src0_modifiers, AMDGPU::OpName::src1_modifiers,
2889 AMDGPU::OpName::src2_modifiers, AMDGPU::OpName::clamp,
2890 AMDGPU::OpName::omod};
2891
2892void SIInstrInfo::removeModOperands(MachineInstr &MI) const {
2893 unsigned Opc = MI.getOpcode();
2894 for (unsigned Name : reverse(ModifierOpNames))
2895 MI.removeOperand(AMDGPU::getNamedOperandIdx(Opc, Name));
2896}
2897
2898bool SIInstrInfo::FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI,
2899 Register Reg, MachineRegisterInfo *MRI) const {
2900 if (!MRI->hasOneNonDBGUse(Reg))
2901 return false;
2902
2903 switch (DefMI.getOpcode()) {
2904 default:
2905 return false;
2906 case AMDGPU::S_MOV_B64:
2907 // TODO: We could fold 64-bit immediates, but this get complicated
2908 // when there are sub-registers.
2909 return false;
2910
2911 case AMDGPU::V_MOV_B32_e32:
2912 case AMDGPU::S_MOV_B32:
2913 case AMDGPU::V_ACCVGPR_WRITE_B32_e64:
2914 break;
2915 }
2916
2917 const MachineOperand *ImmOp = getNamedOperand(DefMI, AMDGPU::OpName::src0);
2918 assert(ImmOp)(static_cast <bool> (ImmOp) ? void (0) : __assert_fail (
"ImmOp", "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 2918, __extension__
__PRETTY_FUNCTION__))
;
2919 // FIXME: We could handle FrameIndex values here.
2920 if (!ImmOp->isImm())
2921 return false;
2922
2923 unsigned Opc = UseMI.getOpcode();
2924 if (Opc == AMDGPU::COPY) {
2925 Register DstReg = UseMI.getOperand(0).getReg();
2926 bool Is16Bit = getOpSize(UseMI, 0) == 2;
2927 bool isVGPRCopy = RI.isVGPR(*MRI, DstReg);
2928 unsigned NewOpc = isVGPRCopy ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32;
2929 APInt Imm(32, ImmOp->getImm());
2930
2931 if (UseMI.getOperand(1).getSubReg() == AMDGPU::hi16)
2932 Imm = Imm.ashr(16);
2933
2934 if (RI.isAGPR(*MRI, DstReg)) {
2935 if (!isInlineConstant(Imm))
2936 return false;
2937 NewOpc = AMDGPU::V_ACCVGPR_WRITE_B32_e64;
2938 }
2939
2940 if (Is16Bit) {
2941 if (isVGPRCopy)
2942 return false; // Do not clobber vgpr_hi16
2943
2944 if (DstReg.isVirtual() && UseMI.getOperand(0).getSubReg() != AMDGPU::lo16)
2945 return false;
2946
2947 UseMI.getOperand(0).setSubReg(0);
2948 if (DstReg.isPhysical()) {
2949 DstReg = RI.get32BitRegister(DstReg);
2950 UseMI.getOperand(0).setReg(DstReg);
2951 }
2952 assert(UseMI.getOperand(1).getReg().isVirtual())(static_cast <bool> (UseMI.getOperand(1).getReg().isVirtual
()) ? void (0) : __assert_fail ("UseMI.getOperand(1).getReg().isVirtual()"
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 2952, __extension__
__PRETTY_FUNCTION__))
;
2953 }
2954
2955 UseMI.setDesc(get(NewOpc));
2956 UseMI.getOperand(1).ChangeToImmediate(Imm.getSExtValue());
2957 UseMI.addImplicitDefUseOperands(*UseMI.getParent()->getParent());
2958 return true;
2959 }
2960
2961 if (Opc == AMDGPU::V_MAD_F32_e64 || Opc == AMDGPU::V_MAC_F32_e64 ||
2962 Opc == AMDGPU::V_MAD_F16_e64 || Opc == AMDGPU::V_MAC_F16_e64 ||
2963 Opc == AMDGPU::V_FMA_F32_e64 || Opc == AMDGPU::V_FMAC_F32_e64 ||
2964 Opc == AMDGPU::V_FMA_F16_e64 || Opc == AMDGPU::V_FMAC_F16_e64) {
2965 // Don't fold if we are using source or output modifiers. The new VOP2
2966 // instructions don't have them.
2967 if (hasAnyModifiersSet(UseMI))
2968 return false;
2969
2970 // If this is a free constant, there's no reason to do this.
2971 // TODO: We could fold this here instead of letting SIFoldOperands do it
2972 // later.
2973 MachineOperand *Src0 = getNamedOperand(UseMI, AMDGPU::OpName::src0);
2974
2975 // Any src operand can be used for the legality check.
2976 if (isInlineConstant(UseMI, *Src0, *ImmOp))
2977 return false;
2978
2979 bool IsF32 = Opc == AMDGPU::V_MAD_F32_e64 || Opc == AMDGPU::V_MAC_F32_e64 ||
2980 Opc == AMDGPU::V_FMA_F32_e64 || Opc == AMDGPU::V_FMAC_F32_e64;
2981 bool IsFMA = Opc == AMDGPU::V_FMA_F32_e64 || Opc == AMDGPU::V_FMAC_F32_e64 ||
2982 Opc == AMDGPU::V_FMA_F16_e64 || Opc == AMDGPU::V_FMAC_F16_e64;
2983 MachineOperand *Src1 = getNamedOperand(UseMI, AMDGPU::OpName::src1);
2984 MachineOperand *Src2 = getNamedOperand(UseMI, AMDGPU::OpName::src2);
2985
2986 // Multiplied part is the constant: Use v_madmk_{f16, f32}.
2987 // We should only expect these to be on src0 due to canonicalization.
2988 if (Src0->isReg() && Src0->getReg() == Reg) {
2989 if (!Src1->isReg() || RI.isSGPRClass(MRI->getRegClass(Src1->getReg())))
2990 return false;
2991
2992 if (!Src2->isReg() || RI.isSGPRClass(MRI->getRegClass(Src2->getReg())))
2993 return false;
2994
2995 unsigned NewOpc =
2996 IsFMA ? (IsF32 ? AMDGPU::V_FMAMK_F32 : AMDGPU::V_FMAMK_F16)
2997 : (IsF32 ? AMDGPU::V_MADMK_F32 : AMDGPU::V_MADMK_F16);
2998 if (pseudoToMCOpcode(NewOpc) == -1)
2999 return false;
3000
3001 // We need to swap operands 0 and 1 since madmk constant is at operand 1.
3002
3003 const int64_t Imm = ImmOp->getImm();
3004
3005 // FIXME: This would be a lot easier if we could return a new instruction
3006 // instead of having to modify in place.
3007
3008 Register Src1Reg = Src1->getReg();
3009 unsigned Src1SubReg = Src1->getSubReg();
3010 Src0->setReg(Src1Reg);
3011 Src0->setSubReg(Src1SubReg);
3012 Src0->setIsKill(Src1->isKill());
3013
3014 if (Opc == AMDGPU::V_MAC_F32_e64 ||
3015 Opc == AMDGPU::V_MAC_F16_e64 ||
3016 Opc == AMDGPU::V_FMAC_F32_e64 ||
3017 Opc == AMDGPU::V_FMAC_F16_e64)
3018 UseMI.untieRegOperand(
3019 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2));
3020
3021 Src1->ChangeToImmediate(Imm);
3022
3023 removeModOperands(UseMI);
3024 UseMI.setDesc(get(NewOpc));
3025
3026 bool DeleteDef = MRI->use_nodbg_empty(Reg);
3027 if (DeleteDef)
3028 DefMI.eraseFromParent();
3029
3030 return true;
3031 }
3032
3033 // Added part is the constant: Use v_madak_{f16, f32}.
3034 if (Src2->isReg() && Src2->getReg() == Reg) {
3035 // Not allowed to use constant bus for another operand.
3036 // We can however allow an inline immediate as src0.
3037 bool Src0Inlined = false;
3038 if (Src0->isReg()) {
3039 // Try to inline constant if possible.
3040 // If the Def moves immediate and the use is single
3041 // We are saving VGPR here.
3042 MachineInstr *Def = MRI->getUniqueVRegDef(Src0->getReg());
3043 if (Def && Def->isMoveImmediate() &&
3044 isInlineConstant(Def->getOperand(1)) &&
3045 MRI->hasOneUse(Src0->getReg())) {
3046 Src0->ChangeToImmediate(Def->getOperand(1).getImm());
3047 Src0Inlined = true;
3048 } else if ((Src0->getReg().isPhysical() &&
3049 (ST.getConstantBusLimit(Opc) <= 1 &&
3050 RI.isSGPRClass(RI.getPhysRegClass(Src0->getReg())))) ||
3051 (Src0->getReg().isVirtual() &&
3052 (ST.getConstantBusLimit(Opc) <= 1 &&
3053 RI.isSGPRClass(MRI->getRegClass(Src0->getReg())))))
3054 return false;
3055 // VGPR is okay as Src0 - fallthrough
3056 }
3057
3058 if (Src1->isReg() && !Src0Inlined ) {
3059 // We have one slot for inlinable constant so far - try to fill it
3060 MachineInstr *Def = MRI->getUniqueVRegDef(Src1->getReg());
3061 if (Def && Def->isMoveImmediate() &&
3062 isInlineConstant(Def->getOperand(1)) &&
3063 MRI->hasOneUse(Src1->getReg()) &&
3064 commuteInstruction(UseMI)) {
3065 Src0->ChangeToImmediate(Def->getOperand(1).getImm());
3066 } else if ((Src1->getReg().isPhysical() &&
3067 RI.isSGPRClass(RI.getPhysRegClass(Src1->getReg()))) ||
3068 (Src1->getReg().isVirtual() &&
3069 RI.isSGPRClass(MRI->getRegClass(Src1->getReg()))))
3070 return false;
3071 // VGPR is okay as Src1 - fallthrough
3072 }
3073
3074 unsigned NewOpc =
3075 IsFMA ? (IsF32 ? AMDGPU::V_FMAAK_F32 : AMDGPU::V_FMAAK_F16)
3076 : (IsF32 ? AMDGPU::V_MADAK_F32 : AMDGPU::V_MADAK_F16);
3077 if (pseudoToMCOpcode(NewOpc) == -1)
3078 return false;
3079
3080 const int64_t Imm = ImmOp->getImm();
3081
3082 // FIXME: This would be a lot easier if we could return a new instruction
3083 // instead of having to modify in place.
3084
3085 if (Opc == AMDGPU::V_MAC_F32_e64 ||
3086 Opc == AMDGPU::V_MAC_F16_e64 ||
3087 Opc == AMDGPU::V_FMAC_F32_e64 ||
3088 Opc == AMDGPU::V_FMAC_F16_e64)
3089 UseMI.untieRegOperand(
3090 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2));
3091
3092 // ChangingToImmediate adds Src2 back to the instruction.
3093 Src2->ChangeToImmediate(Imm);
3094
3095 // These come before src2.
3096 removeModOperands(UseMI);
3097 UseMI.setDesc(get(NewOpc));
3098 // It might happen that UseMI was commuted
3099 // and we now have SGPR as SRC1. If so 2 inlined
3100 // constant and SGPR are illegal.
3101 legalizeOperands(UseMI);
3102
3103 bool DeleteDef = MRI->use_nodbg_empty(Reg);
3104 if (DeleteDef)
3105 DefMI.eraseFromParent();
3106
3107 return true;
3108 }
3109 }
3110
3111 return false;
3112}
3113
3114static bool
3115memOpsHaveSameBaseOperands(ArrayRef<const MachineOperand *> BaseOps1,
3116 ArrayRef<const MachineOperand *> BaseOps2) {
3117 if (BaseOps1.size() != BaseOps2.size())
3118 return false;
3119 for (size_t I = 0, E = BaseOps1.size(); I < E; ++I) {
3120 if (!BaseOps1[I]->isIdenticalTo(*BaseOps2[I]))
3121 return false;
3122 }
3123 return true;
3124}
3125
3126static bool offsetsDoNotOverlap(int WidthA, int OffsetA,
3127 int WidthB, int OffsetB) {
3128 int LowOffset = OffsetA < OffsetB ? OffsetA : OffsetB;
3129 int HighOffset = OffsetA < OffsetB ? OffsetB : OffsetA;
3130 int LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;
3131 return LowOffset + LowWidth <= HighOffset;
3132}
3133
3134bool SIInstrInfo::checkInstOffsetsDoNotOverlap(const MachineInstr &MIa,
3135 const MachineInstr &MIb) const {
3136 SmallVector<const MachineOperand *, 4> BaseOps0, BaseOps1;
3137 int64_t Offset0, Offset1;
3138 unsigned Dummy0, Dummy1;
3139 bool Offset0IsScalable, Offset1IsScalable;
3140 if (!getMemOperandsWithOffsetWidth(MIa, BaseOps0, Offset0, Offset0IsScalable,
3141 Dummy0, &RI) ||
3142 !getMemOperandsWithOffsetWidth(MIb, BaseOps1, Offset1, Offset1IsScalable,
3143 Dummy1, &RI))
3144 return false;
3145
3146 if (!memOpsHaveSameBaseOperands(BaseOps0, BaseOps1))
3147 return false;
3148
3149 if (!MIa.hasOneMemOperand() || !MIb.hasOneMemOperand()) {
3150 // FIXME: Handle ds_read2 / ds_write2.
3151 return false;
3152 }
3153 unsigned Width0 = MIa.memoperands().front()->getSize();
3154 unsigned Width1 = MIb.memoperands().front()->getSize();
3155 return offsetsDoNotOverlap(Width0, Offset0, Width1, Offset1);
3156}
3157
3158bool SIInstrInfo::areMemAccessesTriviallyDisjoint(const MachineInstr &MIa,
3159 const MachineInstr &MIb) const {
3160 assert(MIa.mayLoadOrStore() &&(static_cast <bool> (MIa.mayLoadOrStore() && "MIa must load from or modify a memory location"
) ? void (0) : __assert_fail ("MIa.mayLoadOrStore() && \"MIa must load from or modify a memory location\""
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 3161, __extension__
__PRETTY_FUNCTION__))
3161 "MIa must load from or modify a memory location")(static_cast <bool> (MIa.mayLoadOrStore() && "MIa must load from or modify a memory location"
) ? void (0) : __assert_fail ("MIa.mayLoadOrStore() && \"MIa must load from or modify a memory location\""
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 3161, __extension__
__PRETTY_FUNCTION__))
;
3162 assert(MIb.mayLoadOrStore() &&(static_cast <bool> (MIb.mayLoadOrStore() && "MIb must load from or modify a memory location"
) ? void (0) : __assert_fail ("MIb.mayLoadOrStore() && \"MIb must load from or modify a memory location\""
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 3163, __extension__
__PRETTY_FUNCTION__))
3163 "MIb must load from or modify a memory location")(static_cast <bool> (MIb.mayLoadOrStore() && "MIb must load from or modify a memory location"
) ? void (0) : __assert_fail ("MIb.mayLoadOrStore() && \"MIb must load from or modify a memory location\""
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 3163, __extension__
__PRETTY_FUNCTION__))
;
3164
3165 if (MIa.hasUnmodeledSideEffects() || MIb.hasUnmodeledSideEffects())
3166 return false;
3167
3168 // XXX - Can we relax this between address spaces?
3169 if (MIa.hasOrderedMemoryRef() || MIb.hasOrderedMemoryRef())
3170 return false;
3171
3172 // TODO: Should we check the address space from the MachineMemOperand? That
3173 // would allow us to distinguish objects we know don't alias based on the
3174 // underlying address space, even if it was lowered to a different one,
3175 // e.g. private accesses lowered to use MUBUF instructions on a scratch
3176 // buffer.
3177 if (isDS(MIa)) {
3178 if (isDS(MIb))
3179 return checkInstOffsetsDoNotOverlap(MIa, MIb);
3180
3181 return !isFLAT(MIb) || isSegmentSpecificFLAT(MIb);
3182 }
3183
3184 if (isMUBUF(MIa) || isMTBUF(MIa)) {
3185 if (isMUBUF(MIb) || isMTBUF(MIb))
3186 return checkInstOffsetsDoNotOverlap(MIa, MIb);
3187
3188 return !isFLAT(MIb) && !isSMRD(MIb);
3189 }
3190
3191 if (isSMRD(MIa)) {
3192 if (isSMRD(MIb))
3193 return checkInstOffsetsDoNotOverlap(MIa, MIb);
3194
3195 return !isFLAT(MIb) && !isMUBUF(MIb) && !isMTBUF(MIb);
3196 }
3197
3198 if (isFLAT(MIa)) {
3199 if (isFLAT(MIb))
3200 return checkInstOffsetsDoNotOverlap(MIa, MIb);
3201
3202 return false;
3203 }
3204
3205 return false;
3206}
3207
3208static bool getFoldableImm(Register Reg, const MachineRegisterInfo &MRI,
3209 int64_t &Imm, MachineInstr **DefMI = nullptr) {
3210 if (Reg.isPhysical())
3211 return false;
3212 auto *Def = MRI.getUniqueVRegDef(Reg);
3213 if (Def && SIInstrInfo::isFoldableCopy(*Def) && Def->getOperand(1).isImm()) {
3214 Imm = Def->getOperand(1).getImm();
3215 if (DefMI)
3216 *DefMI = Def;
3217 return true;
3218 }
3219 return false;
3220}
3221
3222static bool getFoldableImm(const MachineOperand *MO, int64_t &Imm,
3223 MachineInstr **DefMI = nullptr) {
3224 if (!MO->isReg())
3225 return false;
3226 const MachineFunction *MF = MO->getParent()->getParent()->getParent();
3227 const MachineRegisterInfo &MRI = MF->getRegInfo();
3228 return getFoldableImm(MO->getReg(), MRI, Imm, DefMI);
3229}
3230
3231static void updateLiveVariables(LiveVariables *LV, MachineInstr &MI,
3232 MachineInstr &NewMI) {
3233 if (LV) {
3234 unsigned NumOps = MI.getNumOperands();
3235 for (unsigned I = 1; I < NumOps; ++I) {
3236 MachineOperand &Op = MI.getOperand(I);
3237 if (Op.isReg() && Op.isKill())
3238 LV->replaceKillInstruction(Op.getReg(), MI, NewMI);
3239 }
3240 }
3241}
3242
3243MachineInstr *SIInstrInfo::convertToThreeAddress(MachineInstr &MI,
3244 LiveVariables *LV,
3245 LiveIntervals *LIS) const {
3246 MachineBasicBlock &MBB = *MI.getParent();
3247 unsigned Opc = MI.getOpcode();
3248
3249 // Handle MFMA.
3250 int NewMFMAOpc = AMDGPU::getMFMAEarlyClobberOp(Opc);
3251 if (NewMFMAOpc != -1) {
3252 MachineInstrBuilder MIB =
3253 BuildMI(MBB, MI, MI.getDebugLoc(), get(NewMFMAOpc));
3254 for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I)
3255 MIB.add(MI.getOperand(I));
3256 updateLiveVariables(LV, MI, *MIB);
3257 if (LIS)
3258 LIS->ReplaceMachineInstrInMaps(MI, *MIB);
3259 return MIB;
3260 }
3261
3262 if (SIInstrInfo::isWMMA(MI)) {
3263 unsigned NewOpc = AMDGPU::mapWMMA2AddrTo3AddrOpcode(MI.getOpcode());
3264 MachineInstrBuilder MIB = BuildMI(MBB, MI, MI.getDebugLoc(), get(NewOpc))
3265 .setMIFlags(MI.getFlags());
3266 for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I)
3267 MIB->addOperand(MI.getOperand(I));
3268
3269 updateLiveVariables(LV, MI, *MIB);
3270 if (LIS)
3271 LIS->ReplaceMachineInstrInMaps(MI, *MIB);
3272
3273 return MIB;
3274 }
3275
3276 // Handle MAC/FMAC.
3277 bool IsF16 = Opc == AMDGPU::V_MAC_F16_e32 || Opc == AMDGPU::V_MAC_F16_e64 ||
3278 Opc == AMDGPU::V_FMAC_F16_e32 || Opc == AMDGPU::V_FMAC_F16_e64;
3279 bool IsFMA = Opc == AMDGPU::V_FMAC_F32_e32 || Opc == AMDGPU::V_FMAC_F32_e64 ||
3280 Opc == AMDGPU::V_FMAC_LEGACY_F32_e32 ||
3281 Opc == AMDGPU::V_FMAC_LEGACY_F32_e64 ||
3282 Opc == AMDGPU::V_FMAC_F16_e32 || Opc == AMDGPU::V_FMAC_F16_e64 ||
3283 Opc == AMDGPU::V_FMAC_F64_e32 || Opc == AMDGPU::V_FMAC_F64_e64;
3284 bool IsF64 = Opc == AMDGPU::V_FMAC_F64_e32 || Opc == AMDGPU::V_FMAC_F64_e64;
3285 bool IsLegacy = Opc == AMDGPU::V_MAC_LEGACY_F32_e32 ||
3286 Opc == AMDGPU::V_MAC_LEGACY_F32_e64 ||
3287 Opc == AMDGPU::V_FMAC_LEGACY_F32_e32 ||
3288 Opc == AMDGPU::V_FMAC_LEGACY_F32_e64;
3289 bool Src0Literal = false;
3290
3291 switch (Opc) {
3292 default:
3293 return nullptr;
3294 case AMDGPU::V_MAC_F16_e64:
3295 case AMDGPU::V_FMAC_F16_e64:
3296 case AMDGPU::V_MAC_F32_e64:
3297 case AMDGPU::V_MAC_LEGACY_F32_e64:
3298 case AMDGPU::V_FMAC_F32_e64:
3299 case AMDGPU::V_FMAC_LEGACY_F32_e64:
3300 case AMDGPU::V_FMAC_F64_e64:
3301 break;
3302 case AMDGPU::V_MAC_F16_e32:
3303 case AMDGPU::V_FMAC_F16_e32:
3304 case AMDGPU::V_MAC_F32_e32:
3305 case AMDGPU::V_MAC_LEGACY_F32_e32:
3306 case AMDGPU::V_FMAC_F32_e32:
3307 case AMDGPU::V_FMAC_LEGACY_F32_e32:
3308 case AMDGPU::V_FMAC_F64_e32: {
3309 int Src0Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
3310 AMDGPU::OpName::src0);
3311 const MachineOperand *Src0 = &MI.getOperand(Src0Idx);
3312 if (!Src0->isReg() && !Src0->isImm())
3313 return nullptr;
3314
3315 if (Src0->isImm() && !isInlineConstant(MI, Src0Idx, *Src0))
3316 Src0Literal = true;
3317
3318 break;
3319 }
3320 }
3321
3322 MachineInstrBuilder MIB;
3323 const MachineOperand *Dst = getNamedOperand(MI, AMDGPU::OpName::vdst);
3324 const MachineOperand *Src0 = getNamedOperand(MI, AMDGPU::OpName::src0);
3325 const MachineOperand *Src0Mods =
3326 getNamedOperand(MI, AMDGPU::OpName::src0_modifiers);
3327 const MachineOperand *Src1 = getNamedOperand(MI, AMDGPU::OpName::src1);
3328 const MachineOperand *Src1Mods =
3329 getNamedOperand(MI, AMDGPU::OpName::src1_modifiers);
3330 const MachineOperand *Src2 = getNamedOperand(MI, AMDGPU::OpName::src2);
3331 const MachineOperand *Src2Mods =
3332 getNamedOperand(MI, AMDGPU::OpName::src2_modifiers);
3333 const MachineOperand *Clamp = getNamedOperand(MI, AMDGPU::OpName::clamp);
3334 const MachineOperand *Omod = getNamedOperand(MI, AMDGPU::OpName::omod);
3335
3336 if (!Src0Mods && !Src1Mods && !Src2Mods && !Clamp && !Omod && !IsF64 &&
3337 !IsLegacy &&
3338 // If we have an SGPR input, we will violate the constant bus restriction.
3339 (ST.getConstantBusLimit(Opc) > 1 || !Src0->isReg() ||
3340 !RI.isSGPRReg(MBB.getParent()->getRegInfo(), Src0->getReg()))) {
3341 MachineInstr *DefMI;
3342 const auto killDef = [&]() -> void {
3343 const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
3344 // The only user is the instruction which will be killed.
3345 Register DefReg = DefMI->getOperand(0).getReg();
3346 if (!MRI.hasOneNonDBGUse(DefReg))
3347 return;
3348 // We cannot just remove the DefMI here, calling pass will crash.
3349 DefMI->setDesc(get(AMDGPU::IMPLICIT_DEF));
3350 for (unsigned I = DefMI->getNumOperands() - 1; I != 0; --I)
3351 DefMI->removeOperand(I);
3352 if (LV)
3353 LV->getVarInfo(DefReg).AliveBlocks.clear();
3354 };
3355
3356 int64_t Imm;
3357 if (!Src0Literal && getFoldableImm(Src2, Imm, &DefMI)) {
3358 unsigned NewOpc =
3359 IsFMA ? (IsF16 ? AMDGPU::V_FMAAK_F16 : AMDGPU::V_FMAAK_F32)
3360 : (IsF16 ? AMDGPU::V_MADAK_F16 : AMDGPU::V_MADAK_F32);
3361 if (pseudoToMCOpcode(NewOpc) != -1) {
3362 MIB = BuildMI(MBB, MI, MI.getDebugLoc(), get(NewOpc))
3363 .add(*Dst)
3364 .add(*Src0)
3365 .add(*Src1)
3366 .addImm(Imm);
3367 updateLiveVariables(LV, MI, *MIB);
3368 if (LIS)
3369 LIS->ReplaceMachineInstrInMaps(MI, *MIB);
3370 killDef();
3371 return MIB;
3372 }
3373 }
3374 unsigned NewOpc = IsFMA
3375 ? (IsF16 ? AMDGPU::V_FMAMK_F16 : AMDGPU::V_FMAMK_F32)
3376 : (IsF16 ? AMDGPU::V_MADMK_F16 : AMDGPU::V_MADMK_F32);
3377 if (!Src0Literal && getFoldableImm(Src1, Imm, &DefMI)) {
3378 if (pseudoToMCOpcode(NewOpc) != -1) {
3379 MIB = BuildMI(MBB, MI, MI.getDebugLoc(), get(NewOpc))
3380 .add(*Dst)
3381 .add(*Src0)
3382 .addImm(Imm)
3383 .add(*Src2);
3384 updateLiveVariables(LV, MI, *MIB);
3385 if (LIS)
3386 LIS->ReplaceMachineInstrInMaps(MI, *MIB);
3387 killDef();
3388 return MIB;
3389 }
3390 }
3391 if (Src0Literal || getFoldableImm(Src0, Imm, &DefMI)) {
3392 if (Src0Literal) {
3393 Imm = Src0->getImm();
3394 DefMI = nullptr;
3395 }
3396 if (pseudoToMCOpcode(NewOpc) != -1 &&
3397 isOperandLegal(
3398 MI, AMDGPU::getNamedOperandIdx(NewOpc, AMDGPU::OpName::src0),
3399 Src1)) {
3400 MIB = BuildMI(MBB, MI, MI.getDebugLoc(), get(NewOpc))
3401 .add(*Dst)
3402 .add(*Src1)
3403 .addImm(Imm)
3404 .add(*Src2);
3405 updateLiveVariables(LV, MI, *MIB);
3406 if (LIS)
3407 LIS->ReplaceMachineInstrInMaps(MI, *MIB);
3408 if (DefMI)
3409 killDef();
3410 return MIB;
3411 }
3412 }
3413 }
3414
3415 // VOP2 mac/fmac with a literal operand cannot be converted to VOP3 mad/fma
3416 // if VOP3 does not allow a literal operand.
3417 if (Src0Literal && !ST.hasVOP3Literal())
3418 return nullptr;
3419
3420 unsigned NewOpc = IsFMA ? IsF16 ? AMDGPU::V_FMA_F16_gfx9_e64
3421 : IsF64 ? AMDGPU::V_FMA_F64_e64
3422 : IsLegacy
3423 ? AMDGPU::V_FMA_LEGACY_F32_e64
3424 : AMDGPU::V_FMA_F32_e64
3425 : IsF16 ? AMDGPU::V_MAD_F16_e64
3426 : IsLegacy ? AMDGPU::V_MAD_LEGACY_F32_e64
3427 : AMDGPU::V_MAD_F32_e64;
3428 if (pseudoToMCOpcode(NewOpc) == -1)
3429 return nullptr;
3430
3431 MIB = BuildMI(MBB, MI, MI.getDebugLoc(), get(NewOpc))
3432 .add(*Dst)
3433 .addImm(Src0Mods ? Src0Mods->getImm() : 0)
3434 .add(*Src0)
3435 .addImm(Src1Mods ? Src1Mods->getImm() : 0)
3436 .add(*Src1)
3437 .addImm(Src2Mods ? Src2Mods->getImm() : 0)
3438 .add(*Src2)
3439 .addImm(Clamp ? Clamp->getImm() : 0)
3440 .addImm(Omod ? Omod->getImm() : 0);
3441 updateLiveVariables(LV, MI, *MIB);
3442 if (LIS)
3443 LIS->ReplaceMachineInstrInMaps(MI, *MIB);
3444 return MIB;
3445}
3446
3447// It's not generally safe to move VALU instructions across these since it will
3448// start using the register as a base index rather than directly.
3449// XXX - Why isn't hasSideEffects sufficient for these?
3450static bool changesVGPRIndexingMode(const MachineInstr &MI) {
3451 switch (MI.getOpcode()) {
3452 case AMDGPU::S_SET_GPR_IDX_ON:
3453 case AMDGPU::S_SET_GPR_IDX_MODE:
3454 case AMDGPU::S_SET_GPR_IDX_OFF:
3455 return true;
3456 default:
3457 return false;
3458 }
3459}
3460
3461bool SIInstrInfo::isSchedulingBoundary(const MachineInstr &MI,
3462 const MachineBasicBlock *MBB,
3463 const MachineFunction &MF) const {
3464 // Skipping the check for SP writes in the base implementation. The reason it
3465 // was added was apparently due to compile time concerns.
3466 //
3467 // TODO: Do we really want this barrier? It triggers unnecessary hazard nops
3468 // but is probably avoidable.
3469
3470 // Copied from base implementation.
3471 // Terminators and labels can't be scheduled around.
3472 if (MI.isTerminator() || MI.isPosition())
3473 return true;
3474
3475 // INLINEASM_BR can jump to another block
3476 if (MI.getOpcode() == TargetOpcode::INLINEASM_BR)
3477 return true;
3478
3479 if (MI.getOpcode() == AMDGPU::SCHED_BARRIER && MI.getOperand(0).getImm() == 0)
3480 return true;
3481
3482 // Target-independent instructions do not have an implicit-use of EXEC, even
3483 // when they operate on VGPRs. Treating EXEC modifications as scheduling
3484 // boundaries prevents incorrect movements of such instructions.
3485 return MI.modifiesRegister(AMDGPU::EXEC, &RI) ||
3486 MI.getOpcode() == AMDGPU::S_SETREG_IMM32_B32 ||
3487 MI.getOpcode() == AMDGPU::S_SETREG_B32 ||
3488 MI.getOpcode() == AMDGPU::S_SETPRIO ||
3489 changesVGPRIndexingMode(MI);
3490}
3491
3492bool SIInstrInfo::isAlwaysGDS(uint16_t Opcode) const {
3493 return Opcode == AMDGPU::DS_ORDERED_COUNT ||
3494 Opcode == AMDGPU::DS_GWS_INIT ||
3495 Opcode == AMDGPU::DS_GWS_SEMA_V ||
3496 Opcode == AMDGPU::DS_GWS_SEMA_BR ||
3497 Opcode == AMDGPU::DS_GWS_SEMA_P ||
3498 Opcode == AMDGPU::DS_GWS_SEMA_RELEASE_ALL ||
3499 Opcode == AMDGPU::DS_GWS_BARRIER;
3500}
3501
3502bool SIInstrInfo::modifiesModeRegister(const MachineInstr &MI) {
3503 // Skip the full operand and register alias search modifiesRegister
3504 // does. There's only a handful of instructions that touch this, it's only an
3505 // implicit def, and doesn't alias any other registers.
3506 if (const MCPhysReg *ImpDef = MI.getDesc().getImplicitDefs()) {
3507 for (; ImpDef && *ImpDef; ++ImpDef) {
3508 if (*ImpDef == AMDGPU::MODE)
3509 return true;
3510 }
3511 }
3512
3513 return false;
3514}
3515
3516bool SIInstrInfo::hasUnwantedEffectsWhenEXECEmpty(const MachineInstr &MI) const {
3517 unsigned Opcode = MI.getOpcode();
3518
3519 if (MI.mayStore() && isSMRD(MI))
3520 return true; // scalar store or atomic
3521
3522 // This will terminate the function when other lanes may need to continue.
3523 if (MI.isReturn())
3524 return true;
3525
3526 // These instructions cause shader I/O that may cause hardware lockups
3527 // when executed with an empty EXEC mask.
3528 //
3529 // Note: exp with VM = DONE = 0 is automatically skipped by hardware when
3530 // EXEC = 0, but checking for that case here seems not worth it
3531 // given the typical code patterns.
3532 if (Opcode == AMDGPU::S_SENDMSG || Opcode == AMDGPU::S_SENDMSGHALT ||
3533 isEXP(Opcode) ||
3534 Opcode == AMDGPU::DS_ORDERED_COUNT || Opcode == AMDGPU::S_TRAP ||
3535 Opcode == AMDGPU::DS_GWS_INIT || Opcode == AMDGPU::DS_GWS_BARRIER)
3536 return true;
3537
3538 if (MI.isCall() || MI.isInlineAsm())
3539 return true; // conservative assumption
3540
3541 // A mode change is a scalar operation that influences vector instructions.
3542 if (modifiesModeRegister(MI))
3543 return true;
3544
3545 // These are like SALU instructions in terms of effects, so it's questionable
3546 // whether we should return true for those.
3547 //
3548 // However, executing them with EXEC = 0 causes them to operate on undefined
3549 // data, which we avoid by returning true here.
3550 if (Opcode == AMDGPU::V_READFIRSTLANE_B32 ||
3551 Opcode == AMDGPU::V_READLANE_B32 || Opcode == AMDGPU::V_WRITELANE_B32)
3552 return true;
3553
3554 return false;
3555}
3556
3557bool SIInstrInfo::mayReadEXEC(const MachineRegisterInfo &MRI,
3558 const MachineInstr &MI) const {
3559 if (MI.isMetaInstruction())
3560 return false;
3561
3562 // This won't read exec if this is an SGPR->SGPR copy.
3563 if (MI.isCopyLike()) {
3564 if (!RI.isSGPRReg(MRI, MI.getOperand(0).getReg()))
3565 return true;
3566
3567 // Make sure this isn't copying exec as a normal operand
3568 return MI.readsRegister(AMDGPU::EXEC, &RI);
3569 }
3570
3571 // Make a conservative assumption about the callee.
3572 if (MI.isCall())
3573 return true;
3574
3575 // Be conservative with any unhandled generic opcodes.
3576 if (!isTargetSpecificOpcode(MI.getOpcode()))
3577 return true;
3578
3579 return !isSALU(MI) || MI.readsRegister(AMDGPU::EXEC, &RI);
3580}
3581
3582bool SIInstrInfo::isInlineConstant(const APInt &Imm) const {
3583 switch (Imm.getBitWidth()) {
3584 case 1: // This likely will be a condition code mask.
3585 return true;
3586
3587 case 32:
3588 return AMDGPU::isInlinableLiteral32(Imm.getSExtValue(),
3589 ST.hasInv2PiInlineImm());
3590 case 64:
3591 return AMDGPU::isInlinableLiteral64(Imm.getSExtValue(),
3592 ST.hasInv2PiInlineImm());
3593 case 16:
3594 return ST.has16BitInsts() &&
3595 AMDGPU::isInlinableLiteral16(Imm.getSExtValue(),
3596 ST.hasInv2PiInlineImm());
3597 default:
3598 llvm_unreachable("invalid bitwidth")::llvm::llvm_unreachable_internal("invalid bitwidth", "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp"
, 3598)
;
3599 }
3600}
3601
3602bool SIInstrInfo::isInlineConstant(const MachineOperand &MO,
3603 uint8_t OperandType) const {
3604 if (!MO.isImm() ||
3605 OperandType < AMDGPU::OPERAND_SRC_FIRST ||
3606 OperandType > AMDGPU::OPERAND_SRC_LAST)
3607 return false;
3608
3609 // MachineOperand provides no way to tell the true operand size, since it only
3610 // records a 64-bit value. We need to know the size to determine if a 32-bit
3611 // floating point immediate bit pattern is legal for an integer immediate. It
3612 // would be for any 32-bit integer operand, but would not be for a 64-bit one.
3613
3614 int64_t Imm = MO.getImm();
3615 switch (OperandType) {
3616 case AMDGPU::OPERAND_REG_IMM_INT32:
3617 case AMDGPU::OPERAND_REG_IMM_FP32:
3618 case AMDGPU::OPERAND_REG_IMM_FP32_DEFERRED:
3619 case AMDGPU::OPERAND_REG_INLINE_C_INT32:
3620 case AMDGPU::OPERAND_REG_INLINE_C_FP32:
3621 case AMDGPU::OPERAND_REG_IMM_V2FP32:
3622 case AMDGPU::OPERAND_REG_INLINE_C_V2FP32:
3623 case AMDGPU::OPERAND_REG_IMM_V2INT32:
3624 case AMDGPU::OPERAND_REG_INLINE_C_V2INT32:
3625 case AMDGPU::OPERAND_REG_INLINE_AC_INT32:
3626 case AMDGPU::OPERAND_REG_INLINE_AC_FP32: {
3627 int32_t Trunc = static_cast<int32_t>(Imm);
3628 return AMDGPU::isInlinableLiteral32(Trunc, ST.hasInv2PiInlineImm());
3629 }
3630 case AMDGPU::OPERAND_REG_IMM_INT64:
3631 case AMDGPU::OPERAND_REG_IMM_FP64:
3632 case AMDGPU::OPERAND_REG_INLINE_C_INT64:
3633 case AMDGPU::OPERAND_REG_INLINE_C_FP64:
3634 case AMDGPU::OPERAND_REG_INLINE_AC_FP64:
3635 return AMDGPU::isInlinableLiteral64(MO.getImm(),
3636 ST.hasInv2PiInlineImm());
3637 case AMDGPU::OPERAND_REG_IMM_INT16:
3638 case AMDGPU::OPERAND_REG_INLINE_C_INT16:
3639 case AMDGPU::OPERAND_REG_INLINE_AC_INT16:
3640 // We would expect inline immediates to not be concerned with an integer/fp
3641 // distinction. However, in the case of 16-bit integer operations, the
3642 // "floating point" values appear to not work. It seems read the low 16-bits
3643 // of 32-bit immediates, which happens to always work for the integer
3644 // values.
3645 //
3646 // See llvm bugzilla 46302.
3647 //
3648 // TODO: Theoretically we could use op-sel to use the high bits of the
3649 // 32-bit FP values.
3650 return AMDGPU::isInlinableIntLiteral(Imm);
3651 case AMDGPU::OPERAND_REG_IMM_V2INT16:
3652 case AMDGPU::OPERAND_REG_INLINE_C_V2INT16:
3653 case AMDGPU::OPERAND_REG_INLINE_AC_V2INT16:
3654 // This suffers the same problem as the scalar 16-bit cases.
3655 return AMDGPU::isInlinableIntLiteralV216(Imm);
3656 case AMDGPU::OPERAND_REG_IMM_FP16:
3657 case AMDGPU::OPERAND_REG_IMM_FP16_DEFERRED:
3658 case AMDGPU::OPERAND_REG_INLINE_C_FP16:
3659 case AMDGPU::OPERAND_REG_INLINE_AC_FP16: {
3660 if (isInt<16>(Imm) || isUInt<16>(Imm)) {
3661 // A few special case instructions have 16-bit operands on subtargets
3662 // where 16-bit instructions are not legal.
3663 // TODO: Do the 32-bit immediates work? We shouldn't really need to handle
3664 // constants in these cases
3665 int16_t Trunc = static_cast<int16_t>(Imm);
3666 return ST.has16BitInsts() &&
3667 AMDGPU::isInlinableLiteral16(Trunc, ST.hasInv2PiInlineImm());
3668 }
3669
3670 return false;
3671 }
3672 case AMDGPU::OPERAND_REG_IMM_V2FP16:
3673 case AMDGPU::OPERAND_REG_INLINE_C_V2FP16:
3674 case AMDGPU::OPERAND_REG_INLINE_AC_V2FP16: {
3675 uint32_t Trunc = static_cast<uint32_t>(Imm);
3676 return AMDGPU::isInlinableLiteralV216(Trunc, ST.hasInv2PiInlineImm());
3677 }
3678 case AMDGPU::OPERAND_KIMM32:
3679 case AMDGPU::OPERAND_KIMM16:
3680 return false;
3681 default:
3682 llvm_unreachable("invalid bitwidth")::llvm::llvm_unreachable_internal("invalid bitwidth", "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp"
, 3682)
;
3683 }
3684}
3685
3686bool SIInstrInfo::isLiteralConstantLike(const MachineOperand &MO,
3687 const MCOperandInfo &OpInfo) const {
3688 switch (MO.getType()) {
3689 case MachineOperand::MO_Register:
3690 return false;
3691 case MachineOperand::MO_Immediate:
3692 return !isInlineConstant(MO, OpInfo);
3693 case MachineOperand::MO_FrameIndex:
3694 case MachineOperand::MO_MachineBasicBlock:
3695 case MachineOperand::MO_ExternalSymbol:
3696 case MachineOperand::MO_GlobalAddress:
3697 case MachineOperand::MO_MCSymbol:
3698 return true;
3699 default:
3700 llvm_unreachable("unexpected operand type")::llvm::llvm_unreachable_internal("unexpected operand type", "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp"
, 3700)
;
3701 }
3702}
3703
3704static bool compareMachineOp(const MachineOperand &Op0,
3705 const MachineOperand &Op1) {
3706 if (Op0.getType() != Op1.getType())
3707 return false;
3708
3709 switch (Op0.getType()) {
3710 case MachineOperand::MO_Register:
3711 return Op0.getReg() == Op1.getReg();
3712 case MachineOperand::MO_Immediate:
3713 return Op0.getImm() == Op1.getImm();
3714 default:
3715 llvm_unreachable("Didn't expect to be comparing these operand types")::llvm::llvm_unreachable_internal("Didn't expect to be comparing these operand types"
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 3715)
;
3716 }
3717}
3718
3719bool SIInstrInfo::isImmOperandLegal(const MachineInstr &MI, unsigned OpNo,
3720 const MachineOperand &MO) const {
3721 const MCInstrDesc &InstDesc = MI.getDesc();
3722 const MCOperandInfo &OpInfo = InstDesc.OpInfo[OpNo];
3723
3724 assert(MO.isImm() || MO.isTargetIndex() || MO.isFI() || MO.isGlobal())(static_cast <bool> (MO.isImm() || MO.isTargetIndex() ||
MO.isFI() || MO.isGlobal()) ? void (0) : __assert_fail ("MO.isImm() || MO.isTargetIndex() || MO.isFI() || MO.isGlobal()"
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 3724, __extension__
__PRETTY_FUNCTION__))
;
3725
3726 if (OpInfo.OperandType == MCOI::OPERAND_IMMEDIATE)
3727 return true;
3728
3729 if (OpInfo.RegClass < 0)
3730 return false;
3731
3732 if (MO.isImm() && isInlineConstant(MO, OpInfo)) {
3733 if (isMAI(MI) && ST.hasMFMAInlineLiteralBug() &&
3734 OpNo ==(unsigned)AMDGPU::getNamedOperandIdx(MI.getOpcode(),
3735 AMDGPU::OpName::src2))
3736 return false;
3737 return RI.opCanUseInlineConstant(OpInfo.OperandType);
3738 }
3739
3740 if (!RI.opCanUseLiteralConstant(OpInfo.OperandType))
3741 return false;
3742
3743 if (!isVOP3(MI) || !AMDGPU::isSISrcOperand(InstDesc, OpNo))
3744 return true;
3745
3746 return ST.hasVOP3Literal();
3747}
3748
3749bool SIInstrInfo::hasVALU32BitEncoding(unsigned Opcode) const {
3750 // GFX90A does not have V_MUL_LEGACY_F32_e32.
3751 if (Opcode == AMDGPU::V_MUL_LEGACY_F32_e64 && ST.hasGFX90AInsts())
3752 return false;
3753
3754 int Op32 = AMDGPU::getVOPe32(Opcode);
3755 if (Op32 == -1)
3756 return false;
3757
3758 return pseudoToMCOpcode(Op32) != -1;
3759}
3760
3761bool SIInstrInfo::hasModifiers(unsigned Opcode) const {
3762 // The src0_modifier operand is present on all instructions
3763 // that have modifiers.
3764
3765 return AMDGPU::getNamedOperandIdx(Opcode,
3766 AMDGPU::OpName::src0_modifiers) != -1;
3767}
3768
3769bool SIInstrInfo::hasModifiersSet(const MachineInstr &MI,
3770 unsigned OpName) const {
3771 const MachineOperand *Mods = getNamedOperand(MI, OpName);
3772 return Mods && Mods->getImm();
3773}
3774
3775bool SIInstrInfo::hasAnyModifiersSet(const MachineInstr &MI) const {
3776 return any_of(ModifierOpNames,
3777 [&](unsigned Name) { return hasModifiersSet(MI, Name); });
3778}
3779
3780bool SIInstrInfo::canShrink(const MachineInstr &MI,
3781 const MachineRegisterInfo &MRI) const {
3782 const MachineOperand *Src2 = getNamedOperand(MI, AMDGPU::OpName::src2);
3783 // Can't shrink instruction with three operands.
3784 if (Src2) {
3785 switch (MI.getOpcode()) {
3786 default: return false;
3787
3788 case AMDGPU::V_ADDC_U32_e64:
3789 case AMDGPU::V_SUBB_U32_e64:
3790 case AMDGPU::V_SUBBREV_U32_e64: {
3791 const MachineOperand *Src1
3792 = getNamedOperand(MI, AMDGPU::OpName::src1);
3793 if (!Src1->isReg() || !RI.isVGPR(MRI, Src1->getReg()))
3794 return false;
3795 // Additional verification is needed for sdst/src2.
3796 return true;
3797 }
3798 case AMDGPU::V_MAC_F16_e64:
3799 case AMDGPU::V_MAC_F32_e64:
3800 case AMDGPU::V_MAC_LEGACY_F32_e64:
3801 case AMDGPU::V_FMAC_F16_e64:
3802 case AMDGPU::V_FMAC_F32_e64:
3803 case AMDGPU::V_FMAC_F64_e64:
3804 case AMDGPU::V_FMAC_LEGACY_F32_e64:
3805 if (!Src2->isReg() || !RI.isVGPR(MRI, Src2->getReg()) ||
3806 hasModifiersSet(MI, AMDGPU::OpName::src2_modifiers))
3807 return false;
3808 break;
3809
3810 case AMDGPU::V_CNDMASK_B32_e64:
3811 break;
3812 }
3813 }
3814
3815 const MachineOperand *Src1 = getNamedOperand(MI, AMDGPU::OpName::src1);
3816 if (Src1 && (!Src1->isReg() || !RI.isVGPR(MRI, Src1->getReg()) ||
3817 hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers)))
3818 return false;
3819
3820 // We don't need to check src0, all input types are legal, so just make sure
3821 // src0 isn't using any modifiers.
3822 if (hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers))
3823 return false;
3824
3825 // Can it be shrunk to a valid 32 bit opcode?
3826 if (!hasVALU32BitEncoding(MI.getOpcode()))
3827 return false;
3828
3829 // Check output modifiers
3830 return !hasModifiersSet(MI, AMDGPU::OpName::omod) &&
3831 !hasModifiersSet(MI, AMDGPU::OpName::clamp);
3832}
3833
3834// Set VCC operand with all flags from \p Orig, except for setting it as
3835// implicit.
3836static void copyFlagsToImplicitVCC(MachineInstr &MI,
3837 const MachineOperand &Orig) {
3838
3839 for (MachineOperand &Use : MI.implicit_operands()) {
3840 if (Use.isUse() &&
3841 (Use.getReg() == AMDGPU::VCC || Use.getReg() == AMDGPU::VCC_LO)) {
3842 Use.setIsUndef(Orig.isUndef());
3843 Use.setIsKill(Orig.isKill());
3844 return;
3845 }
3846 }
3847}
3848
3849MachineInstr *SIInstrInfo::buildShrunkInst(MachineInstr &MI,
3850 unsigned Op32) const {
3851 MachineBasicBlock *MBB = MI.getParent();
3852 MachineInstrBuilder Inst32 =
3853 BuildMI(*MBB, MI, MI.getDebugLoc(), get(Op32))
3854 .setMIFlags(MI.getFlags());
3855
3856 // Add the dst operand if the 32-bit encoding also has an explicit $vdst.
3857 // For VOPC instructions, this is replaced by an implicit def of vcc.
3858 if (AMDGPU::getNamedOperandIdx(Op32, AMDGPU::OpName::vdst) != -1) {
3859 // dst
3860 Inst32.add(MI.getOperand(0));
3861 } else if (AMDGPU::getNamedOperandIdx(Op32, AMDGPU::OpName::sdst) != -1) {
3862 // VOPCX instructions won't be writing to an explicit dst, so this should
3863 // not fail for these instructions.
3864 assert(((MI.getOperand(0).getReg() == AMDGPU::VCC) ||(static_cast <bool> (((MI.getOperand(0).getReg() == AMDGPU
::VCC) || (MI.getOperand(0).getReg() == AMDGPU::VCC_LO)) &&
"Unexpected case") ? void (0) : __assert_fail ("((MI.getOperand(0).getReg() == AMDGPU::VCC) || (MI.getOperand(0).getReg() == AMDGPU::VCC_LO)) && \"Unexpected case\""
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 3866, __extension__
__PRETTY_FUNCTION__))
3865 (MI.getOperand(0).getReg() == AMDGPU::VCC_LO)) &&(static_cast <bool> (((MI.getOperand(0).getReg() == AMDGPU
::VCC) || (MI.getOperand(0).getReg() == AMDGPU::VCC_LO)) &&
"Unexpected case") ? void (0) : __assert_fail ("((MI.getOperand(0).getReg() == AMDGPU::VCC) || (MI.getOperand(0).getReg() == AMDGPU::VCC_LO)) && \"Unexpected case\""
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 3866, __extension__
__PRETTY_FUNCTION__))
3866 "Unexpected case")(static_cast <bool> (((MI.getOperand(0).getReg() == AMDGPU
::VCC) || (MI.getOperand(0).getReg() == AMDGPU::VCC_LO)) &&
"Unexpected case") ? void (0) : __assert_fail ("((MI.getOperand(0).getReg() == AMDGPU::VCC) || (MI.getOperand(0).getReg() == AMDGPU::VCC_LO)) && \"Unexpected case\""
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 3866, __extension__
__PRETTY_FUNCTION__))
;
3867 }
3868
3869 Inst32.add(*getNamedOperand(MI, AMDGPU::OpName::src0));
3870
3871 const MachineOperand *Src1 = getNamedOperand(MI, AMDGPU::OpName::src1);
3872 if (Src1)
3873 Inst32.add(*Src1);
3874
3875 const MachineOperand *Src2 = getNamedOperand(MI, AMDGPU::OpName::src2);
3876
3877 if (Src2) {
3878 int Op32Src2Idx = AMDGPU::getNamedOperandIdx(Op32, AMDGPU::OpName::src2);
3879 if (Op32Src2Idx != -1) {
3880 Inst32.add(*Src2);
3881 } else {
3882 // In the case of V_CNDMASK_B32_e32, the explicit operand src2 is
3883 // replaced with an implicit read of vcc or vcc_lo. The implicit read
3884 // of vcc was already added during the initial BuildMI, but we
3885 // 1) may need to change vcc to vcc_lo to preserve the original register
3886 // 2) have to preserve the original flags.
3887 fixImplicitOperands(*Inst32);
3888 copyFlagsToImplicitVCC(*Inst32, *Src2);
3889 }
3890 }
3891
3892 return Inst32;
3893}
3894
3895bool SIInstrInfo::usesConstantBus(const MachineRegisterInfo &MRI,
3896 const MachineOperand &MO,
3897 const MCOperandInfo &OpInfo) const {
3898 // Literal constants use the constant bus.
3899 //if (isLiteralConstantLike(MO, OpInfo))
3900 // return true;
3901 if (MO.isImm())
3902 return !isInlineConstant(MO, OpInfo);
3903
3904 if (!MO.isReg())
3905 return true; // Misc other operands like FrameIndex
3906
3907 if (!MO.isUse())
3908 return false;
3909
3910 if (MO.getReg().isVirtual())
3911 return RI.isSGPRClass(MRI.getRegClass(MO.getReg()));
3912
3913 // Null is free
3914 if (MO.getReg() == AMDGPU::SGPR_NULL || MO.getReg() == AMDGPU::SGPR_NULL64)
3915 return false;
3916
3917 // SGPRs use the constant bus
3918 if (MO.isImplicit()) {
3919 return MO.getReg() == AMDGPU::M0 ||
3920 MO.getReg() == AMDGPU::VCC ||
3921 MO.getReg() == AMDGPU::VCC_LO;
3922 } else {
3923 return AMDGPU::SReg_32RegClass.contains(MO.getReg()) ||
3924 AMDGPU::SReg_64RegClass.contains(MO.getReg());
3925 }
3926}
3927
3928static Register findImplicitSGPRRead(const MachineInstr &MI) {
3929 for (const MachineOperand &MO : MI.implicit_operands()) {
3930 // We only care about reads.
3931 if (MO.isDef())
3932 continue;
3933
3934 switch (MO.getReg()) {
3935 case AMDGPU::VCC:
3936 case AMDGPU::VCC_LO:
3937 case AMDGPU::VCC_HI:
3938 case AMDGPU::M0:
3939 case AMDGPU::FLAT_SCR:
3940 return MO.getReg();
3941
3942 default:
3943 break;
3944 }
3945 }
3946
3947 return AMDGPU::NoRegister;
3948}
3949
3950static bool shouldReadExec(const MachineInstr &MI) {
3951 if (SIInstrInfo::isVALU(MI)) {
3952 switch (MI.getOpcode()) {
3953 case AMDGPU::V_READLANE_B32:
3954 case AMDGPU::V_WRITELANE_B32:
3955 return false;
3956 }
3957
3958 return true;
3959 }
3960
3961 if (MI.isPreISelOpcode() ||
3962 SIInstrInfo::isGenericOpcode(MI.getOpcode()) ||
3963 SIInstrInfo::isSALU(MI) ||
3964 SIInstrInfo::isSMRD(MI))
3965 return false;
3966
3967 return true;
3968}
3969
3970static bool isSubRegOf(const SIRegisterInfo &TRI,
3971 const MachineOperand &SuperVec,
3972 const MachineOperand &SubReg) {
3973 if (SubReg.getReg().isPhysical())
3974 return TRI.isSubRegister(SuperVec.getReg(), SubReg.getReg());
3975
3976 return SubReg.getSubReg() != AMDGPU::NoSubRegister &&
3977 SubReg.getReg() == SuperVec.getReg();
3978}
3979
3980bool SIInstrInfo::verifyInstruction(const MachineInstr &MI,
3981 StringRef &ErrInfo) const {
3982 uint16_t Opcode = MI.getOpcode();
3983 if (SIInstrInfo::isGenericOpcode(MI.getOpcode()))
3984 return true;
3985
3986 const MachineFunction *MF = MI.getParent()->getParent();
3987 const MachineRegisterInfo &MRI = MF->getRegInfo();
3988
3989 int Src0Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0);
3990 int Src1Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1);
3991 int Src2Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2);
3992 int Src3Idx = -1;
3993 if (Src0Idx == -1) {
3994 // VOPD V_DUAL_* instructions use different operand names.
3995 Src0Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0X);
3996 Src1Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::vsrc1X);
3997 Src2Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0Y);
3998 Src3Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::vsrc1Y);
3999 }
4000
4001 // Make sure the number of operands is correct.
4002 const MCInstrDesc &Desc = get(Opcode);
4003 if (!Desc.isVariadic() &&
4004 Desc.getNumOperands() != MI.getNumExplicitOperands()) {
4005 ErrInfo = "Instruction has wrong number of operands.";
4006 return false;
4007 }
4008
4009 if (MI.isInlineAsm()) {
4010 // Verify register classes for inlineasm constraints.
4011 for (unsigned I = InlineAsm::MIOp_FirstOperand, E = MI.getNumOperands();
4012 I != E; ++I) {
4013 const TargetRegisterClass *RC = MI.getRegClassConstraint(I, this, &RI);
4014 if (!RC)
4015 continue;
4016
4017 const MachineOperand &Op = MI.getOperand(I);
4018 if (!Op.isReg())
4019 continue;
4020
4021 Register Reg = Op.getReg();
4022 if (!Reg.isVirtual() && !RC->contains(Reg)) {
4023 ErrInfo = "inlineasm operand has incorrect register class.";
4024 return false;
4025 }
4026 }
4027
4028 return true;
4029 }
4030
4031 if (isMIMG(MI) && MI.memoperands_empty() && MI.mayLoadOrStore()) {
4032 ErrInfo = "missing memory operand from MIMG instruction.";
4033 return false;
4034 }
4035
4036 // Make sure the register classes are correct.
4037 for (int i = 0, e = Desc.getNumOperands(); i != e; ++i) {
4038 const MachineOperand &MO = MI.getOperand(i);
4039 if (MO.isFPImm()) {
4040 ErrInfo = "FPImm Machine Operands are not supported. ISel should bitcast "
4041 "all fp values to integers.";
4042 return false;
4043 }
4044
4045 int RegClass = Desc.OpInfo[i].RegClass;
4046
4047 switch (Desc.OpInfo[i].OperandType) {
4048 case MCOI::OPERAND_REGISTER:
4049 if (MI.getOperand(i).isImm() || MI.getOperand(i).isGlobal()) {
4050 ErrInfo = "Illegal immediate value for operand.";
4051 return false;
4052 }
4053 break;
4054 case AMDGPU::OPERAND_REG_IMM_INT32:
4055 case AMDGPU::OPERAND_REG_IMM_FP32:
4056 case AMDGPU::OPERAND_REG_IMM_FP32_DEFERRED:
4057 case AMDGPU::OPERAND_REG_IMM_V2FP32:
4058 break;
4059 case AMDGPU::OPERAND_REG_INLINE_C_INT32:
4060 case AMDGPU::OPERAND_REG_INLINE_C_FP32:
4061 case AMDGPU::OPERAND_REG_INLINE_C_INT64:
4062 case AMDGPU::OPERAND_REG_INLINE_C_FP64:
4063 case AMDGPU::OPERAND_REG_INLINE_C_INT16:
4064 case AMDGPU::OPERAND_REG_INLINE_C_FP16:
4065 case AMDGPU::OPERAND_REG_INLINE_AC_INT32:
4066 case AMDGPU::OPERAND_REG_INLINE_AC_FP32:
4067 case AMDGPU::OPERAND_REG_INLINE_AC_INT16:
4068 case AMDGPU::OPERAND_REG_INLINE_AC_FP16:
4069 case AMDGPU::OPERAND_REG_INLINE_AC_FP64: {
4070 if (!MO.isReg() && (!MO.isImm() || !isInlineConstant(MI, i))) {
4071 ErrInfo = "Illegal immediate value for operand.";
4072 return false;
4073 }
4074 break;
4075 }
4076 case MCOI::OPERAND_IMMEDIATE:
4077 case AMDGPU::OPERAND_KIMM32:
4078 // Check if this operand is an immediate.
4079 // FrameIndex operands will be replaced by immediates, so they are
4080 // allowed.
4081 if (!MI.getOperand(i).isImm() && !MI.getOperand(i).isFI()) {
4082 ErrInfo = "Expected immediate, but got non-immediate";
4083 return false;
4084 }
4085 [[fallthrough]];
4086 default:
4087 continue;
4088 }
4089
4090 if (!MO.isReg())
4091 continue;
4092 Register Reg = MO.getReg();
4093 if (!Reg)
4094 continue;
4095
4096 // FIXME: Ideally we would have separate instruction definitions with the
4097 // aligned register constraint.
4098 // FIXME: We do not verify inline asm operands, but custom inline asm
4099 // verification is broken anyway
4100 if (ST.needsAlignedVGPRs()) {
4101 const TargetRegisterClass *RC = RI.getRegClassForReg(MRI, Reg);
4102 if (RI.hasVectorRegisters(RC) && MO.getSubReg()) {
4103 const TargetRegisterClass *SubRC =
4104 RI.getSubRegClass(RC, MO.getSubReg());
4105 RC = RI.getCompatibleSubRegClass(RC, SubRC, MO.getSubReg());
4106 if (RC)
4107 RC = SubRC;
4108 }
4109
4110 // Check that this is the aligned version of the class.
4111 if (!RC || !RI.isProperlyAlignedRC(*RC)) {
4112 ErrInfo = "Subtarget requires even aligned vector registers";
4113 return false;
4114 }
4115 }
4116
4117 if (RegClass != -1) {
4118 if (Reg.isVirtual())
4119 continue;
4120
4121 const TargetRegisterClass *RC = RI.getRegClass(RegClass);
4122 if (!RC->contains(Reg)) {
4123 ErrInfo = "Operand has incorrect register class.";
4124 return false;
4125 }
4126 }
4127 }
4128
4129 // Verify SDWA
4130 if (isSDWA(MI)) {
4131 if (!ST.hasSDWA()) {
4132 ErrInfo = "SDWA is not supported on this target";
4133 return false;
4134 }
4135
4136 int DstIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::vdst);
4137
4138 for (int OpIdx : {DstIdx, Src0Idx, Src1Idx, Src2Idx}) {
4139 if (OpIdx == -1)
4140 continue;
4141 const MachineOperand &MO = MI.getOperand(OpIdx);
4142
4143 if (!ST.hasSDWAScalar()) {
4144 // Only VGPRS on VI
4145 if (!MO.isReg() || !RI.hasVGPRs(RI.getRegClassForReg(MRI, MO.getReg()))) {
4146 ErrInfo = "Only VGPRs allowed as operands in SDWA instructions on VI";
4147 return false;
4148 }
4149 } else {
4150 // No immediates on GFX9
4151 if (!MO.isReg()) {
4152 ErrInfo =
4153 "Only reg allowed as operands in SDWA instructions on GFX9+";
4154 return false;
4155 }
4156 }
4157 }
4158
4159 if (!ST.hasSDWAOmod()) {
4160 // No omod allowed on VI
4161 const MachineOperand *OMod = getNamedOperand(MI, AMDGPU::OpName::omod);
4162 if (OMod != nullptr &&
4163 (!OMod->isImm() || OMod->getImm() != 0)) {
4164 ErrInfo = "OMod not allowed in SDWA instructions on VI";
4165 return false;
4166 }
4167 }
4168
4169 uint16_t BasicOpcode = AMDGPU::getBasicFromSDWAOp(Opcode);
4170 if (isVOPC(BasicOpcode)) {
4171 if (!ST.hasSDWASdst() && DstIdx != -1) {
4172 // Only vcc allowed as dst on VI for VOPC
4173 const MachineOperand &Dst = MI.getOperand(DstIdx);
4174 if (!Dst.isReg() || Dst.getReg() != AMDGPU::VCC) {
4175 ErrInfo = "Only VCC allowed as dst in SDWA instructions on VI";
4176 return false;
4177 }
4178 } else if (!ST.hasSDWAOutModsVOPC()) {
4179 // No clamp allowed on GFX9 for VOPC
4180 const MachineOperand *Clamp = getNamedOperand(MI, AMDGPU::OpName::clamp);
4181 if (Clamp && (!Clamp->isImm() || Clamp->getImm() != 0)) {
4182 ErrInfo = "Clamp not allowed in VOPC SDWA instructions on VI";
4183 return false;
4184 }
4185
4186 // No omod allowed on GFX9 for VOPC
4187 const MachineOperand *OMod = getNamedOperand(MI, AMDGPU::OpName::omod);
4188 if (OMod && (!OMod->isImm() || OMod->getImm() != 0)) {
4189 ErrInfo = "OMod not allowed in VOPC SDWA instructions on VI";
4190 return false;
4191 }
4192 }
4193 }
4194
4195 const MachineOperand *DstUnused = getNamedOperand(MI, AMDGPU::OpName::dst_unused);
4196 if (DstUnused && DstUnused->isImm() &&
4197 DstUnused->getImm() == AMDGPU::SDWA::UNUSED_PRESERVE) {
4198 const MachineOperand &Dst = MI.getOperand(DstIdx);
4199 if (!Dst.isReg() || !Dst.isTied()) {
4200 ErrInfo = "Dst register should have tied register";
4201 return false;
4202 }
4203
4204 const MachineOperand &TiedMO =
4205 MI.getOperand(MI.findTiedOperandIdx(DstIdx));
4206 if (!TiedMO.isReg() || !TiedMO.isImplicit() || !TiedMO.isUse()) {
4207 ErrInfo =
4208 "Dst register should be tied to implicit use of preserved register";
4209 return false;
4210 } else if (TiedMO.getReg().isPhysical() &&
4211 Dst.getReg() != TiedMO.getReg()) {
4212 ErrInfo = "Dst register should use same physical register as preserved";
4213 return false;
4214 }
4215 }
4216 }
4217
4218 // Verify MIMG
4219 if (isMIMG(MI.getOpcode()) && !MI.mayStore()) {
4220 // Ensure that the return type used is large enough for all the options
4221 // being used TFE/LWE require an extra result register.
4222 const MachineOperand *DMask = getNamedOperand(MI, AMDGPU::OpName::dmask);
4223 if (DMask) {
4224 uint64_t DMaskImm = DMask->getImm();
4225 uint32_t RegCount =
4226 isGather4(MI.getOpcode()) ? 4 : countPopulation(DMaskImm);
4227 const MachineOperand *TFE = getNamedOperand(MI, AMDGPU::OpName::tfe);
4228 const MachineOperand *LWE = getNamedOperand(MI, AMDGPU::OpName::lwe);
4229 const MachineOperand *D16 = getNamedOperand(MI, AMDGPU::OpName::d16);
4230
4231 // Adjust for packed 16 bit values
4232 if (D16 && D16->getImm() && !ST.hasUnpackedD16VMem())
4233 RegCount >>= 1;
4234
4235 // Adjust if using LWE or TFE
4236 if ((LWE && LWE->getImm()) || (TFE && TFE->getImm()))
4237 RegCount += 1;
4238
4239 const uint32_t DstIdx =
4240 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::vdata);
4241 const MachineOperand &Dst = MI.getOperand(DstIdx);
4242 if (Dst.isReg()) {
4243 const TargetRegisterClass *DstRC = getOpRegClass(MI, DstIdx);
4244 uint32_t DstSize = RI.getRegSizeInBits(*DstRC) / 32;
4245 if (RegCount > DstSize) {
4246 ErrInfo = "MIMG instruction returns too many registers for dst "
4247 "register class";
4248 return false;
4249 }
4250 }
4251 }
4252 }
4253
4254 // Verify VOP*. Ignore multiple sgpr operands on writelane.
4255 if (isVALU(MI) && Desc.getOpcode() != AMDGPU::V_WRITELANE_B32) {
4256 unsigned ConstantBusCount = 0;
4257 bool UsesLiteral = false;
4258 const MachineOperand *LiteralVal = nullptr;
4259
4260 int ImmIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::imm);
4261 if (ImmIdx != -1) {
4262 ++ConstantBusCount;
4263 UsesLiteral = true;
4264 LiteralVal = &MI.getOperand(ImmIdx);
4265 }
4266
4267 SmallVector<Register, 2> SGPRsUsed;
4268 Register SGPRUsed;
4269
4270 // Only look at the true operands. Only a real operand can use the constant
4271 // bus, and we don't want to check pseudo-operands like the source modifier
4272 // flags.
4273 for (int OpIdx : {Src0Idx, Src1Idx, Src2Idx, Src3Idx}) {
4274 if (OpIdx == -1)
4275 continue;
4276 const MachineOperand &MO = MI.getOperand(OpIdx);
4277 if (usesConstantBus(MRI, MO, MI.getDesc().OpInfo[OpIdx])) {
4278 if (MO.isReg()) {
4279 SGPRUsed = MO.getReg();
4280 if (!llvm::is_contained(SGPRsUsed, SGPRUsed)) {
4281 ++ConstantBusCount;
4282 SGPRsUsed.push_back(SGPRUsed);
4283 }
4284 } else {
4285 if (!UsesLiteral) {
4286 ++ConstantBusCount;
4287 UsesLiteral = true;
4288 LiteralVal = &MO;
4289 } else if (!MO.isIdenticalTo(*LiteralVal)) {
4290 assert(isVOP2(MI) || isVOP3(MI))(static_cast <bool> (isVOP2(MI) || isVOP3(MI)) ? void (
0) : __assert_fail ("isVOP2(MI) || isVOP3(MI)", "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp"
, 4290, __extension__ __PRETTY_FUNCTION__))
;
4291 ErrInfo = "VOP2/VOP3 instruction uses more than one literal";
4292 return false;
4293 }
4294 }
4295 }
4296 }
4297
4298 SGPRUsed = findImplicitSGPRRead(MI);
4299 if (SGPRUsed != AMDGPU::NoRegister) {
4300 // Implicit uses may safely overlap true operands
4301 if (llvm::all_of(SGPRsUsed, [this, SGPRUsed](unsigned SGPR) {
4302 return !RI.regsOverlap(SGPRUsed, SGPR);
4303 })) {
4304 ++ConstantBusCount;
4305 SGPRsUsed.push_back(SGPRUsed);
4306 }
4307 }
4308
4309 // v_writelane_b32 is an exception from constant bus restriction:
4310 // vsrc0 can be sgpr, const or m0 and lane select sgpr, m0 or inline-const
4311 if (ConstantBusCount > ST.getConstantBusLimit(Opcode) &&
4312 Opcode != AMDGPU::V_WRITELANE_B32) {
4313 ErrInfo = "VOP* instruction violates constant bus restriction";
4314 return false;
4315 }
4316
4317 if (isVOP3(MI) && UsesLiteral && !ST.hasVOP3Literal()) {
4318 ErrInfo = "VOP3 instruction uses literal";
4319 return false;
4320 }
4321 }
4322
4323 // Special case for writelane - this can break the multiple constant bus rule,
4324 // but still can't use more than one SGPR register
4325 if (Desc.getOpcode() == AMDGPU::V_WRITELANE_B32) {
4326 unsigned SGPRCount = 0;
4327 Register SGPRUsed = AMDGPU::NoRegister;
4328
4329 for (int OpIdx : {Src0Idx, Src1Idx}) {
4330 if (OpIdx == -1)
4331 break;
4332
4333 const MachineOperand &MO = MI.getOperand(OpIdx);
4334
4335 if (usesConstantBus(MRI, MO, MI.getDesc().OpInfo[OpIdx])) {
4336 if (MO.isReg() && MO.getReg() != AMDGPU::M0) {
4337 if (MO.getReg() != SGPRUsed)
4338 ++SGPRCount;
4339 SGPRUsed = MO.getReg();
4340 }
4341 }
4342 if (SGPRCount > ST.getConstantBusLimit(Opcode)) {
4343 ErrInfo = "WRITELANE instruction violates constant bus restriction";
4344 return false;
4345 }
4346 }
4347 }
4348
4349 // Verify misc. restrictions on specific instructions.
4350 if (Desc.getOpcode() == AMDGPU::V_DIV_SCALE_F32_e64 ||
4351 Desc.getOpcode() == AMDGPU::V_DIV_SCALE_F64_e64) {
4352 const MachineOperand &Src0 = MI.getOperand(Src0Idx);
4353 const MachineOperand &Src1 = MI.getOperand(Src1Idx);
4354 const MachineOperand &Src2 = MI.getOperand(Src2Idx);
4355 if (Src0.isReg() && Src1.isReg() && Src2.isReg()) {
4356 if (!compareMachineOp(Src0, Src1) &&
4357 !compareMachineOp(Src0, Src2)) {
4358 ErrInfo = "v_div_scale_{f32|f64} require src0 = src1 or src2";
4359 return false;
4360 }
4361 }
4362 if ((getNamedOperand(MI, AMDGPU::OpName::src0_modifiers)->getImm() &
4363 SISrcMods::ABS) ||
4364 (getNamedOperand(MI, AMDGPU::OpName::src1_modifiers)->getImm() &
4365 SISrcMods::ABS) ||
4366 (getNamedOperand(MI, AMDGPU::OpName::src2_modifiers)->getImm() &
4367 SISrcMods::ABS)) {
4368 ErrInfo = "ABS not allowed in VOP3B instructions";
4369 return false;
4370 }
4371 }
4372
4373 if (isSOP2(MI) || isSOPC(MI)) {
4374 const MachineOperand &Src0 = MI.getOperand(Src0Idx);
4375 const MachineOperand &Src1 = MI.getOperand(Src1Idx);
4376
4377 if (!Src0.isReg() && !Src1.isReg() &&
4378 !isInlineConstant(Src0, Desc.OpInfo[Src0Idx].OperandType) &&
4379 !isInlineConstant(Src1, Desc.OpInfo[Src1Idx].OperandType) &&
4380 !Src0.isIdenticalTo(Src1)) {
4381 ErrInfo = "SOP2/SOPC instruction requires too many immediate constants";
4382 return false;
4383 }
4384 }
4385
4386 if (isSOPK(MI)) {
4387 auto Op = getNamedOperand(MI, AMDGPU::OpName::simm16);
4388 if (Desc.isBranch()) {
4389 if (!Op->isMBB()) {
4390 ErrInfo = "invalid branch target for SOPK instruction";
4391 return false;
4392 }
4393 } else {
4394 uint64_t Imm = Op->getImm();
4395 if (sopkIsZext(MI)) {
4396 if (!isUInt<16>(Imm)) {
4397 ErrInfo = "invalid immediate for SOPK instruction";
4398 return false;
4399 }
4400 } else {
4401 if (!isInt<16>(Imm)) {
4402 ErrInfo = "invalid immediate for SOPK instruction";
4403 return false;
4404 }
4405 }
4406 }
4407 }
4408
4409 if (Desc.getOpcode() == AMDGPU::V_MOVRELS_B32_e32 ||
4410 Desc.getOpcode() == AMDGPU::V_MOVRELS_B32_e64 ||
4411 Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e32 ||
4412 Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e64) {
4413 const bool IsDst = Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e32 ||
4414 Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e64;
4415
4416 const unsigned StaticNumOps = Desc.getNumOperands() +
4417 Desc.getNumImplicitUses();
4418 const unsigned NumImplicitOps = IsDst ? 2 : 1;
4419
4420 // Allow additional implicit operands. This allows a fixup done by the post
4421 // RA scheduler where the main implicit operand is killed and implicit-defs
4422 // are added for sub-registers that remain live after this instruction.
4423 if (MI.getNumOperands() < StaticNumOps + NumImplicitOps) {
4424 ErrInfo = "missing implicit register operands";
4425 return false;
4426 }
4427
4428 const MachineOperand *Dst = getNamedOperand(MI, AMDGPU::OpName::vdst);
4429 if (IsDst) {
4430 if (!Dst->isUse()) {
4431 ErrInfo = "v_movreld_b32 vdst should be a use operand";
4432 return false;
4433 }
4434
4435 unsigned UseOpIdx;
4436 if (!MI.isRegTiedToUseOperand(StaticNumOps, &UseOpIdx) ||
4437 UseOpIdx != StaticNumOps + 1) {
4438 ErrInfo = "movrel implicit operands should be tied";
4439 return false;
4440 }
4441 }
4442
4443 const MachineOperand &Src0 = MI.getOperand(Src0Idx);
4444 const MachineOperand &ImpUse
4445 = MI.getOperand(StaticNumOps + NumImplicitOps - 1);
4446 if (!ImpUse.isReg() || !ImpUse.isUse() ||
4447 !isSubRegOf(RI, ImpUse, IsDst ? *Dst : Src0)) {
4448 ErrInfo = "src0 should be subreg of implicit vector use";
4449 return false;
4450 }
4451 }
4452
4453 // Make sure we aren't losing exec uses in the td files. This mostly requires
4454 // being careful when using let Uses to try to add other use registers.
4455 if (shouldReadExec(MI)) {
4456 if (!MI.hasRegisterImplicitUseOperand(AMDGPU::EXEC)) {
4457 ErrInfo = "VALU instruction does not implicitly read exec mask";
4458 return false;
4459 }
4460 }
4461
4462 if (isSMRD(MI)) {
4463 if (MI.mayStore() &&
4464 ST.getGeneration() == AMDGPUSubtarget::VOLCANIC_ISLANDS) {
4465 // The register offset form of scalar stores may only use m0 as the
4466 // soffset register.
4467 const MachineOperand *Soff = getNamedOperand(MI, AMDGPU::OpName::soffset);
4468 if (Soff && Soff->getReg() != AMDGPU::M0) {
4469 ErrInfo = "scalar stores must use m0 as offset register";
4470 return false;
4471 }
4472 }
4473 }
4474
4475 if (isFLAT(MI) && !ST.hasFlatInstOffsets()) {
4476 const MachineOperand *Offset = getNamedOperand(MI, AMDGPU::OpName::offset);
4477 if (Offset->getImm() != 0) {
4478 ErrInfo = "subtarget does not support offsets in flat instructions";
4479 return false;
4480 }
4481 }
4482
4483 if (isMIMG(MI)) {
4484 const MachineOperand *DimOp = getNamedOperand(MI, AMDGPU::OpName::dim);
4485 if (DimOp) {
4486 int VAddr0Idx = AMDGPU::getNamedOperandIdx(Opcode,
4487 AMDGPU::OpName::vaddr0);
4488 int SRsrcIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::srsrc);
4489 const AMDGPU::MIMGInfo *Info = AMDGPU::getMIMGInfo(Opcode);
4490 const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode =
4491 AMDGPU::getMIMGBaseOpcodeInfo(Info->BaseOpcode);
4492 const AMDGPU::MIMGDimInfo *Dim =
4493 AMDGPU::getMIMGDimInfoByEncoding(DimOp->getImm());
4494
4495 if (!Dim) {
4496 ErrInfo = "dim is out of range";
4497 return false;
4498 }
4499
4500 bool IsA16 = false;
4501 if (ST.hasR128A16()) {
4502 const MachineOperand *R128A16 = getNamedOperand(MI, AMDGPU::OpName::r128);
4503 IsA16 = R128A16->getImm() != 0;
4504 } else if (ST.hasGFX10A16()) {
4505 const MachineOperand *A16 = getNamedOperand(MI, AMDGPU::OpName::a16);
4506 IsA16 = A16->getImm() != 0;
4507 }
4508
4509 bool IsNSA = SRsrcIdx - VAddr0Idx > 1;
4510
4511 unsigned AddrWords =
4512 AMDGPU::getAddrSizeMIMGOp(BaseOpcode, Dim, IsA16, ST.hasG16());
4513
4514 unsigned VAddrWords;
4515 if (IsNSA) {
4516 VAddrWords = SRsrcIdx - VAddr0Idx;
4517 } else {
4518 const TargetRegisterClass *RC = getOpRegClass(MI, VAddr0Idx);
4519 VAddrWords = MRI.getTargetRegisterInfo()->getRegSizeInBits(*RC) / 32;
4520 if (AddrWords > 8)
4521 AddrWords = 16;
4522 }
4523
4524 if (VAddrWords != AddrWords) {
4525 LLVM_DEBUG(dbgs() << "bad vaddr size, expected " << AddrWordsdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("si-instr-info")) { dbgs() << "bad vaddr size, expected "
<< AddrWords << " but got " << VAddrWords <<
"\n"; } } while (false)
4526 << " but got " << VAddrWords << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("si-instr-info")) { dbgs() << "bad vaddr size, expected "
<< AddrWords << " but got " << VAddrWords <<
"\n"; } } while (false)
;
4527 ErrInfo = "bad vaddr size";
4528 return false;
4529 }
4530 }
4531 }
4532
4533 const MachineOperand *DppCt = getNamedOperand(MI, AMDGPU::OpName::dpp_ctrl);
4534 if (DppCt) {
4535 using namespace AMDGPU::DPP;
4536
4537 unsigned DC = DppCt->getImm();
4538 if (DC == DppCtrl::DPP_UNUSED1 || DC == DppCtrl::DPP_UNUSED2 ||
4539 DC == DppCtrl::DPP_UNUSED3 || DC > DppCtrl::DPP_LAST ||
4540 (DC >= DppCtrl::DPP_UNUSED4_FIRST && DC <= DppCtrl::DPP_UNUSED4_LAST) ||
4541 (DC >= DppCtrl::DPP_UNUSED5_FIRST && DC <= DppCtrl::DPP_UNUSED5_LAST) ||
4542 (DC >= DppCtrl::DPP_UNUSED6_FIRST && DC <= DppCtrl::DPP_UNUSED6_LAST) ||
4543 (DC >= DppCtrl::DPP_UNUSED7_FIRST && DC <= DppCtrl::DPP_UNUSED7_LAST) ||
4544 (DC >= DppCtrl::DPP_UNUSED8_FIRST && DC <= DppCtrl::DPP_UNUSED8_LAST)) {
4545 ErrInfo = "Invalid dpp_ctrl value";
4546 return false;
4547 }
4548 if (DC >= DppCtrl::WAVE_SHL1 && DC <= DppCtrl::WAVE_ROR1 &&
4549 ST.getGeneration() >= AMDGPUSubtarget::GFX10) {
4550 ErrInfo = "Invalid dpp_ctrl value: "
4551 "wavefront shifts are not supported on GFX10+";
4552 return false;
4553 }
4554 if (DC >= DppCtrl::BCAST15 && DC <= DppCtrl::BCAST31 &&
4555 ST.getGeneration() >= AMDGPUSubtarget::GFX10) {
4556 ErrInfo = "Invalid dpp_ctrl value: "
4557 "broadcasts are not supported on GFX10+";
4558 return false;
4559 }
4560 if (DC >= DppCtrl::ROW_SHARE_FIRST && DC <= DppCtrl::ROW_XMASK_LAST &&
4561 ST.getGeneration() < AMDGPUSubtarget::GFX10) {
4562 if (DC >= DppCtrl::ROW_NEWBCAST_FIRST &&
4563 DC <= DppCtrl::ROW_NEWBCAST_LAST &&
4564 !ST.hasGFX90AInsts()) {
4565 ErrInfo = "Invalid dpp_ctrl value: "
4566 "row_newbroadcast/row_share is not supported before "
4567 "GFX90A/GFX10";
4568 return false;
4569 } else if (DC > DppCtrl::ROW_NEWBCAST_LAST || !ST.hasGFX90AInsts()) {
4570 ErrInfo = "Invalid dpp_ctrl value: "
4571 "row_share and row_xmask are not supported before GFX10";
4572 return false;
4573 }
4574 }
4575
4576 int DstIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::vdst);
4577
4578 if (Opcode != AMDGPU::V_MOV_B64_DPP_PSEUDO &&
4579 ((DstIdx >= 0 &&
4580 (Desc.OpInfo[DstIdx].RegClass == AMDGPU::VReg_64RegClassID ||
4581 Desc.OpInfo[DstIdx].RegClass == AMDGPU::VReg_64_Align2RegClassID)) ||
4582 ((Src0Idx >= 0 &&
4583 (Desc.OpInfo[Src0Idx].RegClass == AMDGPU::VReg_64RegClassID ||
4584 Desc.OpInfo[Src0Idx].RegClass ==
4585 AMDGPU::VReg_64_Align2RegClassID)))) &&
4586 !AMDGPU::isLegal64BitDPPControl(DC)) {
4587 ErrInfo = "Invalid dpp_ctrl value: "
4588 "64 bit dpp only support row_newbcast";
4589 return false;
4590 }
4591 }
4592
4593 if ((MI.mayStore() || MI.mayLoad()) && !isVGPRSpill(MI)) {
4594 const MachineOperand *Dst = getNamedOperand(MI, AMDGPU::OpName::vdst);
4595 uint16_t DataNameIdx = isDS(Opcode) ? AMDGPU::OpName::data0
4596 : AMDGPU::OpName::vdata;
4597 const MachineOperand *Data = getNamedOperand(MI, DataNameIdx);
4598 const MachineOperand *Data2 = getNamedOperand(MI, AMDGPU::OpName::data1);
4599 if (Data && !Data->isReg())
4600 Data = nullptr;
4601
4602 if (ST.hasGFX90AInsts()) {
4603 if (Dst && Data &&
4604 (RI.isAGPR(MRI, Dst->getReg()) != RI.isAGPR(MRI, Data->getReg()))) {
4605 ErrInfo = "Invalid register class: "
4606 "vdata and vdst should be both VGPR or AGPR";
4607 return false;
4608 }
4609 if (Data && Data2 &&
4610 (RI.isAGPR(MRI, Data->getReg()) != RI.isAGPR(MRI, Data2->getReg()))) {
4611 ErrInfo = "Invalid register class: "
4612 "both data operands should be VGPR or AGPR";
4613 return false;
4614 }
4615 } else {
4616 if ((Dst && RI.isAGPR(MRI, Dst->getReg())) ||
4617 (Data && RI.isAGPR(MRI, Data->getReg())) ||
4618 (Data2 && RI.isAGPR(MRI, Data2->getReg()))) {
4619 ErrInfo = "Invalid register class: "
4620 "agpr loads and stores not supported on this GPU";
4621 return false;
4622 }
4623 }
4624 }
4625
4626 if (ST.needsAlignedVGPRs()) {
4627 const auto isAlignedReg = [&MI, &MRI, this](unsigned OpName) -> bool {
4628 const MachineOperand *Op = getNamedOperand(MI, OpName);
4629 if (!Op)
4630 return true;
4631 Register Reg = Op->getReg();
4632 if (Reg.isPhysical())
4633 return !(RI.getHWRegIndex(Reg) & 1);
4634 const TargetRegisterClass &RC = *MRI.getRegClass(Reg);
4635 return RI.getRegSizeInBits(RC) > 32 && RI.isProperlyAlignedRC(RC) &&
4636 !(RI.getChannelFromSubReg(Op->getSubReg()) & 1);
4637 };
4638
4639 if (MI.getOpcode() == AMDGPU::DS_GWS_INIT ||
4640 MI.getOpcode() == AMDGPU::DS_GWS_SEMA_BR ||
4641 MI.getOpcode() == AMDGPU::DS_GWS_BARRIER) {
4642
4643 if (!isAlignedReg(AMDGPU::OpName::data0)) {
4644 ErrInfo = "Subtarget requires even aligned vector registers "
4645 "for DS_GWS instructions";
4646 return false;
4647 }
4648 }
4649
4650 if (isMIMG(MI)) {
4651 if (!isAlignedReg(AMDGPU::OpName::vaddr)) {
4652 ErrInfo = "Subtarget requires even aligned vector registers "
4653 "for vaddr operand of image instructions";
4654 return false;
4655 }
4656 }
4657 }
4658
4659 if (MI.getOpcode() == AMDGPU::V_ACCVGPR_WRITE_B32_e64 &&
4660 !ST.hasGFX90AInsts()) {
4661 const MachineOperand *Src = getNamedOperand(MI, AMDGPU::OpName::src0);
4662 if (Src->isReg() && RI.isSGPRReg(MRI, Src->getReg())) {
4663 ErrInfo = "Invalid register class: "
4664 "v_accvgpr_write with an SGPR is not supported on this GPU";
4665 return false;
4666 }
4667 }
4668
4669 if (Desc.getOpcode() == AMDGPU::G_AMDGPU_WAVE_ADDRESS) {
4670 const MachineOperand &SrcOp = MI.getOperand(1);
4671 if (!SrcOp.isReg() || SrcOp.getReg().isVirtual()) {
4672 ErrInfo = "pseudo expects only physical SGPRs";
4673 return false;
4674 }
4675 }
4676
4677 return true;
4678}
4679
4680unsigned SIInstrInfo::getVALUOp(const MachineInstr &MI) const {
4681 switch (MI.getOpcode()) {
4682 default: return AMDGPU::INSTRUCTION_LIST_END;
4683 case AMDGPU::REG_SEQUENCE: return AMDGPU::REG_SEQUENCE;
4684 case AMDGPU::COPY: return AMDGPU::COPY;
4685 case AMDGPU::PHI: return AMDGPU::PHI;
4686 case AMDGPU::INSERT_SUBREG: return AMDGPU::INSERT_SUBREG;
4687 case AMDGPU::WQM: return AMDGPU::WQM;
4688 case AMDGPU::SOFT_WQM: return AMDGPU::SOFT_WQM;
4689 case AMDGPU::STRICT_WWM: return AMDGPU::STRICT_WWM;
4690 case AMDGPU::STRICT_WQM: return AMDGPU::STRICT_WQM;
4691 case AMDGPU::S_MOV_B32: {
4692 const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
4693 return MI.getOperand(1).isReg() ||
4694 RI.isAGPR(MRI, MI.getOperand(0).getReg()) ?
4695 AMDGPU::COPY : AMDGPU::V_MOV_B32_e32;
4696 }
4697 case AMDGPU::S_ADD_I32:
4698 return ST.hasAddNoCarry() ? AMDGPU::V_ADD_U32_e64 : AMDGPU::V_ADD_CO_U32_e32;
4699 case AMDGPU::S_ADDC_U32:
4700 return AMDGPU::V_ADDC_U32_e32;
4701 case AMDGPU::S_SUB_I32:
4702 return ST.hasAddNoCarry() ? AMDGPU::V_SUB_U32_e64 : AMDGPU::V_SUB_CO_U32_e32;
4703 // FIXME: These are not consistently handled, and selected when the carry is
4704 // used.
4705 case AMDGPU::S_ADD_U32:
4706 return AMDGPU::V_ADD_CO_U32_e32;
4707 case AMDGPU::S_SUB_U32:
4708 return AMDGPU::V_SUB_CO_U32_e32;
4709 case AMDGPU::S_SUBB_U32: return AMDGPU::V_SUBB_U32_e32;
4710 case AMDGPU::S_MUL_I32: return AMDGPU::V_MUL_LO_U32_e64;
4711 case AMDGPU::S_MUL_HI_U32: return AMDGPU::V_MUL_HI_U32_e64;
4712 case AMDGPU::S_MUL_HI_I32: return AMDGPU::V_MUL_HI_I32_e64;
4713 case AMDGPU::S_AND_B32: return AMDGPU::V_AND_B32_e64;
4714 case AMDGPU::S_OR_B32: return AMDGPU::V_OR_B32_e64;
4715 case AMDGPU::S_XOR_B32: return AMDGPU::V_XOR_B32_e64;
4716 case AMDGPU::S_XNOR_B32:
4717 return ST.hasDLInsts() ? AMDGPU::V_XNOR_B32_e64 : AMDGPU::INSTRUCTION_LIST_END;
4718 case AMDGPU::S_MIN_I32: return AMDGPU::V_MIN_I32_e64;
4719 case AMDGPU::S_MIN_U32: return AMDGPU::V_MIN_U32_e64;
4720 case AMDGPU::S_MAX_I32: return AMDGPU::V_MAX_I32_e64;
4721 case AMDGPU::S_MAX_U32: return AMDGPU::V_MAX_U32_e64;
4722 case AMDGPU::S_ASHR_I32: return AMDGPU::V_ASHR_I32_e32;
4723 case AMDGPU::S_ASHR_I64: return AMDGPU::V_ASHR_I64_e64;
4724 case AMDGPU::S_LSHL_B32: return AMDGPU::V_LSHL_B32_e32;
4725 case AMDGPU::S_LSHL_B64: return AMDGPU::V_LSHL_B64_e64;
4726 case AMDGPU::S_LSHR_B32: return AMDGPU::V_LSHR_B32_e32;
4727 case AMDGPU::S_LSHR_B64: return AMDGPU::V_LSHR_B64_e64;
4728 case AMDGPU::S_SEXT_I32_I8: return AMDGPU::V_BFE_I32_e64;
4729 case AMDGPU::S_SEXT_I32_I16: return AMDGPU::V_BFE_I32_e64;
4730 case AMDGPU::S_BFE_U32: return AMDGPU::V_BFE_U32_e64;
4731 case AMDGPU::S_BFE_I32: return AMDGPU::V_BFE_I32_e64;
4732 case AMDGPU::S_BFM_B32: return AMDGPU::V_BFM_B32_e64;
4733 case AMDGPU::S_BREV_B32: return AMDGPU::V_BFREV_B32_e32;
4734 case AMDGPU::S_NOT_B32: return AMDGPU::V_NOT_B32_e32;
4735 case AMDGPU::S_NOT_B64: return AMDGPU::V_NOT_B32_e32;
4736 case AMDGPU::S_CMP_EQ_I32: return AMDGPU::V_CMP_EQ_I32_e64;
4737 case AMDGPU::S_CMP_LG_I32: return AMDGPU::V_CMP_NE_I32_e64;
4738 case AMDGPU::S_CMP_GT_I32: return AMDGPU::V_CMP_GT_I32_e64;
4739 case AMDGPU::S_CMP_GE_I32: return AMDGPU::V_CMP_GE_I32_e64;
4740 case AMDGPU::S_CMP_LT_I32: return AMDGPU::V_CMP_LT_I32_e64;
4741 case AMDGPU::S_CMP_LE_I32: return AMDGPU::V_CMP_LE_I32_e64;
4742 case AMDGPU::S_CMP_EQ_U32: return AMDGPU::V_CMP_EQ_U32_e64;
4743 case AMDGPU::S_CMP_LG_U32: return AMDGPU::V_CMP_NE_U32_e64;
4744 case AMDGPU::S_CMP_GT_U32: return AMDGPU::V_CMP_GT_U32_e64;
4745 case AMDGPU::S_CMP_GE_U32: return AMDGPU::V_CMP_GE_U32_e64;
4746 case AMDGPU::S_CMP_LT_U32: return AMDGPU::V_CMP_LT_U32_e64;
4747 case AMDGPU::S_CMP_LE_U32: return AMDGPU::V_CMP_LE_U32_e64;
4748 case AMDGPU::S_CMP_EQ_U64: return AMDGPU::V_CMP_EQ_U64_e64;
4749 case AMDGPU::S_CMP_LG_U64: return AMDGPU::V_CMP_NE_U64_e64;
4750 case AMDGPU::S_BCNT1_I32_B32: return AMDGPU::V_BCNT_U32_B32_e64;
4751 case AMDGPU::S_FF1_I32_B32: return AMDGPU::V_FFBL_B32_e32;
4752 case AMDGPU::S_FLBIT_I32_B32: return AMDGPU::V_FFBH_U32_e32;
4753 case AMDGPU::S_FLBIT_I32: return AMDGPU::V_FFBH_I32_e64;
4754 case AMDGPU::S_CBRANCH_SCC0: return AMDGPU::S_CBRANCH_VCCZ;
4755 case AMDGPU::S_CBRANCH_SCC1: return AMDGPU::S_CBRANCH_VCCNZ;
4756 }
4757 llvm_unreachable(::llvm::llvm_unreachable_internal("Unexpected scalar opcode without corresponding vector one!"
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 4758)
4758 "Unexpected scalar opcode without corresponding vector one!")::llvm::llvm_unreachable_internal("Unexpected scalar opcode without corresponding vector one!"
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 4758)
;
4759}
4760
4761static const TargetRegisterClass *
4762adjustAllocatableRegClass(const GCNSubtarget &ST, const SIRegisterInfo &RI,
4763 const MachineRegisterInfo &MRI,
4764 const MCInstrDesc &TID, unsigned RCID,
4765 bool IsAllocatable) {
4766 if ((IsAllocatable || !ST.hasGFX90AInsts() || !MRI.reservedRegsFrozen()) &&
4767 (((TID.mayLoad() || TID.mayStore()) &&
4768 !(TID.TSFlags & SIInstrFlags::VGPRSpill)) ||
4769 (TID.TSFlags & (SIInstrFlags::DS | SIInstrFlags::MIMG)))) {
4770 switch (RCID) {
4771 case AMDGPU::AV_32RegClassID:
4772 RCID = AMDGPU::VGPR_32RegClassID;
4773 break;
4774 case AMDGPU::AV_64RegClassID:
4775 RCID = AMDGPU::VReg_64RegClassID;
4776 break;
4777 case AMDGPU::AV_96RegClassID:
4778 RCID = AMDGPU::VReg_96RegClassID;
4779 break;
4780 case AMDGPU::AV_128RegClassID:
4781 RCID = AMDGPU::VReg_128RegClassID;
4782 break;
4783 case AMDGPU::AV_160RegClassID:
4784 RCID = AMDGPU::VReg_160RegClassID;
4785 break;
4786 case AMDGPU::AV_512RegClassID:
4787 RCID = AMDGPU::VReg_512RegClassID;
4788 break;
4789 default:
4790 break;
4791 }
4792 }
4793
4794 return RI.getProperlyAlignedRC(RI.getRegClass(RCID));
4795}
4796
4797const TargetRegisterClass *SIInstrInfo::getRegClass(const MCInstrDesc &TID,
4798 unsigned OpNum, const TargetRegisterInfo *TRI,
4799 const MachineFunction &MF)
4800 const {
4801 if (OpNum >= TID.getNumOperands())
4802 return nullptr;
4803 auto RegClass = TID.OpInfo[OpNum].RegClass;
4804 bool IsAllocatable = false;
4805 if (TID.TSFlags & (SIInstrFlags::DS | SIInstrFlags::FLAT)) {
4806 // vdst and vdata should be both VGPR or AGPR, same for the DS instructions
4807 // with two data operands. Request register class constrained to VGPR only
4808 // of both operands present as Machine Copy Propagation can not check this
4809 // constraint and possibly other passes too.
4810 //
4811 // The check is limited to FLAT and DS because atomics in non-flat encoding
4812 // have their vdst and vdata tied to be the same register.
4813 const int VDstIdx = AMDGPU::getNamedOperandIdx(TID.Opcode,
4814 AMDGPU::OpName::vdst);
4815 const int DataIdx = AMDGPU::getNamedOperandIdx(TID.Opcode,
4816 (TID.TSFlags & SIInstrFlags::DS) ? AMDGPU::OpName::data0
4817 : AMDGPU::OpName::vdata);
4818 if (DataIdx != -1) {
4819 IsAllocatable = VDstIdx != -1 ||
4820 AMDGPU::getNamedOperandIdx(TID.Opcode,
4821 AMDGPU::OpName::data1) != -1;
4822 }
4823 }
4824 return adjustAllocatableRegClass(ST, RI, MF.getRegInfo(), TID, RegClass,
4825 IsAllocatable);
4826}
4827
4828const TargetRegisterClass *SIInstrInfo::getOpRegClass(const MachineInstr &MI,
4829 unsigned OpNo) const {
4830 const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
4831 const MCInstrDesc &Desc = get(MI.getOpcode());
4832 if (MI.isVariadic() || OpNo >= Desc.getNumOperands() ||
4833 Desc.OpInfo[OpNo].RegClass == -1) {
4834 Register Reg = MI.getOperand(OpNo).getReg();
4835
4836 if (Reg.isVirtual())
4837 return MRI.getRegClass(Reg);
4838 return RI.getPhysRegClass(Reg);
4839 }
4840
4841 unsigned RCID = Desc.OpInfo[OpNo].RegClass;
4842 return adjustAllocatableRegClass(ST, RI, MRI, Desc, RCID, true);
4843}
4844
4845void SIInstrInfo::legalizeOpWithMove(MachineInstr &MI, unsigned OpIdx) const {
4846 MachineBasicBlock::iterator I = MI;
4847 MachineBasicBlock *MBB = MI.getParent();
4848 MachineOperand &MO = MI.getOperand(OpIdx);
4849 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
4850 unsigned RCID = get(MI.getOpcode()).OpInfo[OpIdx].RegClass;
4851 const TargetRegisterClass *RC = RI.getRegClass(RCID);
4852 unsigned Size = RI.getRegSizeInBits(*RC);
4853 unsigned Opcode = (Size == 64) ? AMDGPU::V_MOV_B64_PSEUDO : AMDGPU::V_MOV_B32_e32;
4854 if (MO.isReg())
4855 Opcode = AMDGPU::COPY;
4856 else if (RI.isSGPRClass(RC))
4857 Opcode = (Size == 64) ? AMDGPU::S_MOV_B64 : AMDGPU::S_MOV_B32;
4858
4859 const TargetRegisterClass *VRC = RI.getEquivalentVGPRClass(RC);
4860 const TargetRegisterClass *VRC64 = RI.getVGPR64Class();
4861 if (RI.getCommonSubClass(VRC64, VRC))
4862 VRC = VRC64;
4863 else
4864 VRC = &AMDGPU::VGPR_32RegClass;
4865
4866 Register Reg = MRI.createVirtualRegister(VRC);
4867 DebugLoc DL = MBB->findDebugLoc(I);
4868 BuildMI(*MI.getParent(), I, DL, get(Opcode), Reg).add(MO);
4869 MO.ChangeToRegister(Reg, false);
4870}
4871
4872unsigned SIInstrInfo::buildExtractSubReg(MachineBasicBlock::iterator MI,
4873 MachineRegisterInfo &MRI,
4874 MachineOperand &SuperReg,
4875 const TargetRegisterClass *SuperRC,
4876 unsigned SubIdx,
4877 const TargetRegisterClass *SubRC)
4878 const {
4879 MachineBasicBlock *MBB = MI->getParent();
4880 DebugLoc DL = MI->getDebugLoc();
4881 Register SubReg = MRI.createVirtualRegister(SubRC);
4882
4883 if (SuperReg.getSubReg() == AMDGPU::NoSubRegister) {
4884 BuildMI(*MBB, MI, DL, get(TargetOpcode::COPY), SubReg)
4885 .addReg(SuperReg.getReg(), 0, SubIdx);
4886 return SubReg;
4887 }
4888
4889 // Just in case the super register is itself a sub-register, copy it to a new
4890 // value so we don't need to worry about merging its subreg index with the
4891 // SubIdx passed to this function. The register coalescer should be able to
4892 // eliminate this extra copy.
4893 Register NewSuperReg = MRI.createVirtualRegister(SuperRC);
4894
4895 BuildMI(*MBB, MI, DL, get(TargetOpcode::COPY), NewSuperReg)
4896 .addReg(SuperReg.getReg(), 0, SuperReg.getSubReg());
4897
4898 BuildMI(*MBB, MI, DL, get(TargetOpcode::COPY), SubReg)
4899 .addReg(NewSuperReg, 0, SubIdx);
4900
4901 return SubReg;
4902}
4903
4904MachineOperand SIInstrInfo::buildExtractSubRegOrImm(
4905 MachineBasicBlock::iterator MII,
4906 MachineRegisterInfo &MRI,
4907 MachineOperand &Op,
4908 const TargetRegisterClass *SuperRC,
4909 unsigned SubIdx,
4910 const TargetRegisterClass *SubRC) const {
4911 if (Op.isImm()) {
4912 if (SubIdx == AMDGPU::sub0)
4913 return MachineOperand::CreateImm(static_cast<int32_t>(Op.getImm()));
4914 if (SubIdx == AMDGPU::sub1)
4915 return MachineOperand::CreateImm(static_cast<int32_t>(Op.getImm() >> 32));
4916
4917 llvm_unreachable("Unhandled register index for immediate")::llvm::llvm_unreachable_internal("Unhandled register index for immediate"
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 4917)
;
4918 }
4919
4920 unsigned SubReg = buildExtractSubReg(MII, MRI, Op, SuperRC,
4921 SubIdx, SubRC);
4922 return MachineOperand::CreateReg(SubReg, false);
4923}
4924
4925// Change the order of operands from (0, 1, 2) to (0, 2, 1)
4926void SIInstrInfo::swapOperands(MachineInstr &Inst) const {
4927 assert(Inst.getNumExplicitOperands() == 3)(static_cast <bool> (Inst.getNumExplicitOperands() == 3
) ? void (0) : __assert_fail ("Inst.getNumExplicitOperands() == 3"
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 4927, __extension__
__PRETTY_FUNCTION__))
;
4928 MachineOperand Op1 = Inst.getOperand(1);
4929 Inst.removeOperand(1);
4930 Inst.addOperand(Op1);
4931}
4932
4933bool SIInstrInfo::isLegalRegOperand(const MachineRegisterInfo &MRI,
4934 const MCOperandInfo &OpInfo,
4935 const MachineOperand &MO) const {
4936 if (!MO.isReg())
4937 return false;
4938
4939 Register Reg = MO.getReg();
4940
4941 const TargetRegisterClass *DRC = RI.getRegClass(OpInfo.RegClass);
4942 if (Reg.isPhysical())
4943 return DRC->contains(Reg);
4944
4945 const TargetRegisterClass *RC = MRI.getRegClass(Reg);
4946
4947 if (MO.getSubReg()) {
4948 const MachineFunction *MF = MO.getParent()->getParent()->getParent();
4949 const TargetRegisterClass *SuperRC = RI.getLargestLegalSuperClass(RC, *MF);
4950 if (!SuperRC)
4951 return false;
4952
4953 DRC = RI.getMatchingSuperRegClass(SuperRC, DRC, MO.getSubReg());
4954 if (!DRC)
4955 return false;
4956 }
4957 return RC->hasSuperClassEq(DRC);
4958}
4959
4960bool SIInstrInfo::isLegalVSrcOperand(const MachineRegisterInfo &MRI,
4961 const MCOperandInfo &OpInfo,
4962 const MachineOperand &MO) const {
4963 if (MO.isReg())
4964 return isLegalRegOperand(MRI, OpInfo, MO);
4965
4966 // Handle non-register types that are treated like immediates.
4967 assert(MO.isImm() || MO.isTargetIndex() || MO.isFI() || MO.isGlobal())(static_cast <bool> (MO.isImm() || MO.isTargetIndex() ||
MO.isFI() || MO.isGlobal()) ? void (0) : __assert_fail ("MO.isImm() || MO.isTargetIndex() || MO.isFI() || MO.isGlobal()"
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 4967, __extension__
__PRETTY_FUNCTION__))
;
4968 return true;
4969}
4970
4971bool SIInstrInfo::isOperandLegal(const MachineInstr &MI, unsigned OpIdx,
4972 const MachineOperand *MO) const {
4973 const MachineFunction &MF = *MI.getParent()->getParent();
4974 const MachineRegisterInfo &MRI = MF.getRegInfo();
4975 const MCInstrDesc &InstDesc = MI.getDesc();
4976 const MCOperandInfo &OpInfo = InstDesc.OpInfo[OpIdx];
4977 const TargetRegisterClass *DefinedRC =
4978 OpInfo.RegClass != -1 ? RI.getRegClass(OpInfo.RegClass) : nullptr;
4979 if (!MO)
4980 MO = &MI.getOperand(OpIdx);
4981
4982 int ConstantBusLimit = ST.getConstantBusLimit(MI.getOpcode());
4983 int LiteralLimit = !isVOP3(MI) || ST.hasVOP3Literal() ? 1 : 0;
4984 if (isVALU(MI) && usesConstantBus(MRI, *MO, OpInfo)) {
4985 if (isLiteralConstantLike(*MO, OpInfo) && !LiteralLimit--)
4986 return false;
4987
4988 SmallDenseSet<RegSubRegPair> SGPRsUsed;
4989 if (MO->isReg())
4990 SGPRsUsed.insert(RegSubRegPair(MO->getReg(), MO->getSubReg()));
4991
4992 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
4993 if (i == OpIdx)
4994 continue;
4995 const MachineOperand &Op = MI.getOperand(i);
4996 if (Op.isReg()) {
4997 RegSubRegPair SGPR(Op.getReg(), Op.getSubReg());
4998 if (!SGPRsUsed.count(SGPR) &&
4999 usesConstantBus(MRI, Op, InstDesc.OpInfo[i])) {
5000 if (--ConstantBusLimit <= 0)
5001 return false;
5002 SGPRsUsed.insert(SGPR);
5003 }
5004 } else if (InstDesc.OpInfo[i].OperandType == AMDGPU::OPERAND_KIMM32 ||
5005 (AMDGPU::isSISrcOperand(InstDesc, i) &&
5006 isLiteralConstantLike(Op, InstDesc.OpInfo[i]))) {
5007 if (!LiteralLimit--)
5008 return false;
5009 if (--ConstantBusLimit <= 0)
5010 return false;
5011 }
5012 }
5013 }
5014
5015 if (MO->isReg()) {
5016 if (!DefinedRC)
5017 return OpInfo.OperandType == MCOI::OPERAND_UNKNOWN;
5018 if (!isLegalRegOperand(MRI, OpInfo, *MO))
5019 return false;
5020 bool IsAGPR = RI.isAGPR(MRI, MO->getReg());
5021 if (IsAGPR && !ST.hasMAIInsts())
5022 return false;
5023 unsigned Opc = MI.getOpcode();
5024 if (IsAGPR &&
5025 (!ST.hasGFX90AInsts() || !MRI.reservedRegsFrozen()) &&
5026 (MI.mayLoad() || MI.mayStore() || isDS(Opc) || isMIMG(Opc)))
5027 return false;
5028 // Atomics should have both vdst and vdata either vgpr or agpr.
5029 const int VDstIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdst);
5030 const int DataIdx = AMDGPU::getNamedOperandIdx(Opc,
5031 isDS(Opc) ? AMDGPU::OpName::data0 : AMDGPU::OpName::vdata);
5032 if ((int)OpIdx == VDstIdx && DataIdx != -1 &&
5033 MI.getOperand(DataIdx).isReg() &&
5034 RI.isAGPR(MRI, MI.getOperand(DataIdx).getReg()) != IsAGPR)
5035 return false;
5036 if ((int)OpIdx == DataIdx) {
5037 if (VDstIdx != -1 &&
5038 RI.isAGPR(MRI, MI.getOperand(VDstIdx).getReg()) != IsAGPR)
5039 return false;
5040 // DS instructions with 2 src operands also must have tied RC.
5041 const int Data1Idx = AMDGPU::getNamedOperandIdx(Opc,
5042 AMDGPU::OpName::data1);
5043 if (Data1Idx != -1 && MI.getOperand(Data1Idx).isReg() &&
5044 RI.isAGPR(MRI, MI.getOperand(Data1Idx).getReg()) != IsAGPR)
5045 return false;
5046 }
5047 if (Opc == AMDGPU::V_ACCVGPR_WRITE_B32_e64 && !ST.hasGFX90AInsts() &&
5048 (int)OpIdx == AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0) &&
5049 RI.isSGPRReg(MRI, MO->getReg()))
5050 return false;
5051 return true;
5052 }
5053
5054 // Handle non-register types that are treated like immediates.
5055 assert(MO->isImm() || MO->isTargetIndex() || MO->isFI() || MO->isGlobal())(static_cast <bool> (MO->isImm() || MO->isTargetIndex
() || MO->isFI() || MO->isGlobal()) ? void (0) : __assert_fail
("MO->isImm() || MO->isTargetIndex() || MO->isFI() || MO->isGlobal()"
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 5055, __extension__
__PRETTY_FUNCTION__))
;
5056
5057 if (!DefinedRC) {
5058 // This operand expects an immediate.
5059 return true;
5060 }
5061
5062 return isImmOperandLegal(MI, OpIdx, *MO);
5063}
5064
5065void SIInstrInfo::legalizeOperandsVOP2(MachineRegisterInfo &MRI,
5066 MachineInstr &MI) const {
5067 unsigned Opc = MI.getOpcode();
5068 const MCInstrDesc &InstrDesc = get(Opc);
5069
5070 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0);
5071 MachineOperand &Src0 = MI.getOperand(Src0Idx);
5072
5073 int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1);
5074 MachineOperand &Src1 = MI.getOperand(Src1Idx);
5075
5076 // If there is an implicit SGPR use such as VCC use for v_addc_u32/v_subb_u32
5077 // we need to only have one constant bus use before GFX10.
5078 bool HasImplicitSGPR = findImplicitSGPRRead(MI) != AMDGPU::NoRegister;
5079 if (HasImplicitSGPR && ST.getConstantBusLimit(Opc) <= 1 &&
5080 Src0.isReg() && (RI.isSGPRReg(MRI, Src0.getReg()) ||
5081 isLiteralConstantLike(Src0, InstrDesc.OpInfo[Src0Idx])))
5082 legalizeOpWithMove(MI, Src0Idx);
5083
5084 // Special case: V_WRITELANE_B32 accepts only immediate or SGPR operands for
5085 // both the value to write (src0) and lane select (src1). Fix up non-SGPR
5086 // src0/src1 with V_READFIRSTLANE.
5087 if (Opc == AMDGPU::V_WRITELANE_B32) {
5088 const DebugLoc &DL = MI.getDebugLoc();
5089 if (Src0.isReg() && RI.isVGPR(MRI, Src0.getReg())) {
5090 Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
5091 BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg)
5092 .add(Src0);
5093 Src0.ChangeToRegister(Reg, false);
5094 }
5095 if (Src1.isReg() && RI.isVGPR(MRI, Src1.getReg())) {
5096 Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
5097 const DebugLoc &DL = MI.getDebugLoc();
5098 BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg)
5099 .add(Src1);
5100 Src1.ChangeToRegister(Reg, false);
5101 }
5102 return;
5103 }
5104
5105 // No VOP2 instructions support AGPRs.
5106 if (Src0.isReg() && RI.isAGPR(MRI, Src0.getReg()))
5107 legalizeOpWithMove(MI, Src0Idx);
5108
5109 if (Src1.isReg() && RI.isAGPR(MRI, Src1.getReg()))
5110 legalizeOpWithMove(MI, Src1Idx);
5111
5112 // VOP2 src0 instructions support all operand types, so we don't need to check
5113 // their legality. If src1 is already legal, we don't need to do anything.
5114 if (isLegalRegOperand(MRI, InstrDesc.OpInfo[Src1Idx], Src1))
5115 return;
5116
5117 // Special case: V_READLANE_B32 accepts only immediate or SGPR operands for
5118 // lane select. Fix up using V_READFIRSTLANE, since we assume that the lane
5119 // select is uniform.
5120 if (Opc == AMDGPU::V_READLANE_B32 && Src1.isReg() &&
5121 RI.isVGPR(MRI, Src1.getReg())) {
5122 Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
5123 const DebugLoc &DL = MI.getDebugLoc();
5124 BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg)
5125 .add(Src1);
5126 Src1.ChangeToRegister(Reg, false);
5127 return;
5128 }
5129
5130 // We do not use commuteInstruction here because it is too aggressive and will
5131 // commute if it is possible. We only want to commute here if it improves
5132 // legality. This can be called a fairly large number of times so don't waste
5133 // compile time pointlessly swapping and checking legality again.
5134 if (HasImplicitSGPR || !MI.isCommutable()) {
5135 legalizeOpWithMove(MI, Src1Idx);
5136 return;
5137 }
5138
5139 // If src0 can be used as src1, commuting will make the operands legal.
5140 // Otherwise we have to give up and insert a move.
5141 //
5142 // TODO: Other immediate-like operand kinds could be commuted if there was a
5143 // MachineOperand::ChangeTo* for them.
5144 if ((!Src1.isImm() && !Src1.isReg()) ||
5145 !isLegalRegOperand(MRI, InstrDesc.OpInfo[Src1Idx], Src0)) {
5146 legalizeOpWithMove(MI, Src1Idx);
5147 return;
5148 }
5149
5150 int CommutedOpc = commuteOpcode(MI);
5151 if (CommutedOpc == -1) {
5152 legalizeOpWithMove(MI, Src1Idx);
5153 return;
5154 }
5155
5156 MI.setDesc(get(CommutedOpc));
5157
5158 Register Src0Reg = Src0.getReg();
5159 unsigned Src0SubReg = Src0.getSubReg();
5160 bool Src0Kill = Src0.isKill();
5161
5162 if (Src1.isImm())
5163 Src0.ChangeToImmediate(Src1.getImm());
5164 else if (Src1.isReg()) {
5165 Src0.ChangeToRegister(Src1.getReg(), false, false, Src1.isKill());
5166 Src0.setSubReg(Src1.getSubReg());
5167 } else
5168 llvm_unreachable("Should only have register or immediate operands")::llvm::llvm_unreachable_internal("Should only have register or immediate operands"
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 5168)
;
5169
5170 Src1.ChangeToRegister(Src0Reg, false, false, Src0Kill);
5171 Src1.setSubReg(Src0SubReg);
5172 fixImplicitOperands(MI);
5173}
5174
5175// Legalize VOP3 operands. All operand types are supported for any operand
5176// but only one literal constant and only starting from GFX10.
5177void SIInstrInfo::legalizeOperandsVOP3(MachineRegisterInfo &MRI,
5178 MachineInstr &MI) const {
5179 unsigned Opc = MI.getOpcode();
5180
5181 int VOP3Idx[3] = {
5182 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0),
5183 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1),
5184 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2)
5185 };
5186
5187 if (Opc == AMDGPU::V_PERMLANE16_B32_e64 ||
5188 Opc == AMDGPU::V_PERMLANEX16_B32_e64) {
5189 // src1 and src2 must be scalar
5190 MachineOperand &Src1 = MI.getOperand(VOP3Idx[1]);
5191 MachineOperand &Src2 = MI.getOperand(VOP3Idx[2]);
5192 const DebugLoc &DL = MI.getDebugLoc();
5193 if (Src1.isReg() && !RI.isSGPRClass(MRI.getRegClass(Src1.getReg()))) {
5194 Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
5195 BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg)
5196 .add(Src1);
5197 Src1.ChangeToRegister(Reg, false);
5198 }
5199 if (Src2.isReg() && !RI.isSGPRClass(MRI.getRegClass(Src2.getReg()))) {
5200 Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
5201 BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg)
5202 .add(Src2);
5203 Src2.ChangeToRegister(Reg, false);
5204 }
5205 }
5206
5207 // Find the one SGPR operand we are allowed to use.
5208 int ConstantBusLimit = ST.getConstantBusLimit(Opc);
5209 int LiteralLimit = ST.hasVOP3Literal() ? 1 : 0;
5210 SmallDenseSet<unsigned> SGPRsUsed;
5211 Register SGPRReg = findUsedSGPR(MI, VOP3Idx);
5212 if (SGPRReg != AMDGPU::NoRegister) {
5213 SGPRsUsed.insert(SGPRReg);
5214 --ConstantBusLimit;
5215 }
5216
5217 for (int Idx : VOP3Idx) {
5218 if (Idx == -1)
5219 break;
5220 MachineOperand &MO = MI.getOperand(Idx);
5221
5222 if (!MO.isReg()) {
5223 if (!isLiteralConstantLike(MO, get(Opc).OpInfo[Idx]))
5224 continue;
5225
5226 if (LiteralLimit > 0 && ConstantBusLimit > 0) {
5227 --LiteralLimit;
5228 --ConstantBusLimit;
5229 continue;
5230 }
5231
5232 --LiteralLimit;
5233 --ConstantBusLimit;
5234 legalizeOpWithMove(MI, Idx);
5235 continue;
5236 }
5237
5238 if (RI.hasAGPRs(RI.getRegClassForReg(MRI, MO.getReg())) &&
5239 !isOperandLegal(MI, Idx, &MO)) {
5240 legalizeOpWithMove(MI, Idx);
5241 continue;
5242 }
5243
5244 if (!RI.isSGPRClass(RI.getRegClassForReg(MRI, MO.getReg())))
5245 continue; // VGPRs are legal
5246
5247 // We can use one SGPR in each VOP3 instruction prior to GFX10
5248 // and two starting from GFX10.
5249 if (SGPRsUsed.count(MO.getReg()))
5250 continue;
5251 if (ConstantBusLimit > 0) {
5252 SGPRsUsed.insert(MO.getReg());
5253 --ConstantBusLimit;
5254 continue;
5255 }
5256
5257 // If we make it this far, then the operand is not legal and we must
5258 // legalize it.
5259 legalizeOpWithMove(MI, Idx);
5260 }
5261}
5262
5263Register SIInstrInfo::readlaneVGPRToSGPR(Register SrcReg, MachineInstr &UseMI,
5264 MachineRegisterInfo &MRI) const {
5265 const TargetRegisterClass *VRC = MRI.getRegClass(SrcReg);
5266 const TargetRegisterClass *SRC = RI.getEquivalentSGPRClass(VRC);
5267 Register DstReg = MRI.createVirtualRegister(SRC);
5268 unsigned SubRegs = RI.getRegSizeInBits(*VRC) / 32;
5269
5270 if (RI.hasAGPRs(VRC)) {
5271 VRC = RI.getEquivalentVGPRClass(VRC);
5272 Register NewSrcReg = MRI.createVirtualRegister(VRC);
5273 BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(),
5274 get(TargetOpcode::COPY), NewSrcReg)
5275 .addReg(SrcReg);
5276 SrcReg = NewSrcReg;
5277 }
5278
5279 if (SubRegs == 1) {
5280 BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(),
5281 get(AMDGPU::V_READFIRSTLANE_B32), DstReg)
5282 .addReg(SrcReg);
5283 return DstReg;
5284 }
5285
5286 SmallVector<unsigned, 8> SRegs;
5287 for (unsigned i = 0; i < SubRegs; ++i) {
5288 Register SGPR = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
5289 BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(),
5290 get(AMDGPU::V_READFIRSTLANE_B32), SGPR)
5291 .addReg(SrcReg, 0, RI.getSubRegFromChannel(i));
5292 SRegs.push_back(SGPR);
5293 }
5294
5295 MachineInstrBuilder MIB =
5296 BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(),
5297 get(AMDGPU::REG_SEQUENCE), DstReg);
5298 for (unsigned i = 0; i < SubRegs; ++i) {
5299 MIB.addReg(SRegs[i]);
5300 MIB.addImm(RI.getSubRegFromChannel(i));
5301 }
5302 return DstReg;
5303}
5304
5305void SIInstrInfo::legalizeOperandsSMRD(MachineRegisterInfo &MRI,
5306 MachineInstr &MI) const {
5307
5308 // If the pointer is store in VGPRs, then we need to move them to
5309 // SGPRs using v_readfirstlane. This is safe because we only select
5310 // loads with uniform pointers to SMRD instruction so we know the
5311 // pointer value is uniform.
5312 MachineOperand *SBase = getNamedOperand(MI, AMDGPU::OpName::sbase);
5313 if (SBase && !RI.isSGPRClass(MRI.getRegClass(SBase->getReg()))) {
5314 Register SGPR = readlaneVGPRToSGPR(SBase->getReg(), MI, MRI);
5315 SBase->setReg(SGPR);
5316 }
5317 MachineOperand *SOff = getNamedOperand(MI, AMDGPU::OpName::soffset);
5318 if (SOff && !RI.isSGPRClass(MRI.getRegClass(SOff->getReg()))) {
5319 Register SGPR = readlaneVGPRToSGPR(SOff->getReg(), MI, MRI);
5320 SOff->setReg(SGPR);
5321 }
5322}
5323
5324bool SIInstrInfo::moveFlatAddrToVGPR(MachineInstr &Inst) const {
5325 unsigned Opc = Inst.getOpcode();
5326 int OldSAddrIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::saddr);
5327 if (OldSAddrIdx < 0)
5328 return false;
5329
5330 assert(isSegmentSpecificFLAT(Inst))(static_cast <bool> (isSegmentSpecificFLAT(Inst)) ? void
(0) : __assert_fail ("isSegmentSpecificFLAT(Inst)", "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp"
, 5330, __extension__ __PRETTY_FUNCTION__))
;
5331
5332 int NewOpc = AMDGPU::getGlobalVaddrOp(Opc);
5333 if (NewOpc < 0)
5334 NewOpc = AMDGPU::getFlatScratchInstSVfromSS(Opc);
5335 if (NewOpc < 0)
5336 return false;
5337
5338 MachineRegisterInfo &MRI = Inst.getMF()->getRegInfo();
5339 MachineOperand &SAddr = Inst.getOperand(OldSAddrIdx);
5340 if (RI.isSGPRReg(MRI, SAddr.getReg()))
5341 return false;
5342
5343 int NewVAddrIdx = AMDGPU::getNamedOperandIdx(NewOpc, AMDGPU::OpName::vaddr);
5344 if (NewVAddrIdx < 0)
5345 return false;
5346
5347 int OldVAddrIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vaddr);
5348
5349 // Check vaddr, it shall be zero or absent.
5350 MachineInstr *VAddrDef = nullptr;
5351 if (OldVAddrIdx >= 0) {
5352 MachineOperand &VAddr = Inst.getOperand(OldVAddrIdx);
5353 VAddrDef = MRI.getUniqueVRegDef(VAddr.getReg());
5354 if (!VAddrDef || VAddrDef->getOpcode() != AMDGPU::V_MOV_B32_e32 ||
5355 !VAddrDef->getOperand(1).isImm() ||
5356 VAddrDef->getOperand(1).getImm() != 0)
5357 return false;
5358 }
5359
5360 const MCInstrDesc &NewDesc = get(NewOpc);
5361 Inst.setDesc(NewDesc);
5362
5363 // Callers expect iterator to be valid after this call, so modify the
5364 // instruction in place.
5365 if (OldVAddrIdx == NewVAddrIdx) {
5366 MachineOperand &NewVAddr = Inst.getOperand(NewVAddrIdx);
5367 // Clear use list from the old vaddr holding a zero register.
5368 MRI.removeRegOperandFromUseList(&NewVAddr);
5369 MRI.moveOperands(&NewVAddr, &SAddr, 1);
5370 Inst.removeOperand(OldSAddrIdx);
5371 // Update the use list with the pointer we have just moved from vaddr to
5372 // saddr position. Otherwise new vaddr will be missing from the use list.
5373 MRI.removeRegOperandFromUseList(&NewVAddr);
5374 MRI.addRegOperandToUseList(&NewVAddr);
5375 } else {
5376 assert(OldSAddrIdx == NewVAddrIdx)(static_cast <bool> (OldSAddrIdx == NewVAddrIdx) ? void
(0) : __assert_fail ("OldSAddrIdx == NewVAddrIdx", "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp"
, 5376, __extension__ __PRETTY_FUNCTION__))
;
5377
5378 if (OldVAddrIdx >= 0) {
5379 int NewVDstIn = AMDGPU::getNamedOperandIdx(NewOpc,
5380 AMDGPU::OpName::vdst_in);
5381
5382 // removeOperand doesn't try to fixup tied operand indexes at it goes, so
5383 // it asserts. Untie the operands for now and retie them afterwards.
5384 if (NewVDstIn != -1) {
5385 int OldVDstIn = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdst_in);
5386 Inst.untieRegOperand(OldVDstIn);
5387 }
5388
5389 Inst.removeOperand(OldVAddrIdx);
5390
5391 if (NewVDstIn != -1) {
5392 int NewVDst = AMDGPU::getNamedOperandIdx(NewOpc, AMDGPU::OpName::vdst);
5393 Inst.tieOperands(NewVDst, NewVDstIn);
5394 }
5395 }
5396 }
5397
5398 if (VAddrDef && MRI.use_nodbg_empty(VAddrDef->getOperand(0).getReg()))
5399 VAddrDef->eraseFromParent();
5400
5401 return true;
5402}
5403
5404// FIXME: Remove this when SelectionDAG is obsoleted.
5405void SIInstrInfo::legalizeOperandsFLAT(MachineRegisterInfo &MRI,
5406 MachineInstr &MI) const {
5407 if (!isSegmentSpecificFLAT(MI))
5408 return;
5409
5410 // Fixup SGPR operands in VGPRs. We only select these when the DAG divergence
5411 // thinks they are uniform, so a readfirstlane should be valid.
5412 MachineOperand *SAddr = getNamedOperand(MI, AMDGPU::OpName::saddr);
5413 if (!SAddr || RI.isSGPRClass(MRI.getRegClass(SAddr->getReg())))
5414 return;
5415
5416 if (moveFlatAddrToVGPR(MI))
5417 return;
5418
5419 Register ToSGPR = readlaneVGPRToSGPR(SAddr->getReg(), MI, MRI);
5420 SAddr->setReg(ToSGPR);
5421}
5422
5423void SIInstrInfo::legalizeGenericOperand(MachineBasicBlock &InsertMBB,
5424 MachineBasicBlock::iterator I,
5425 const TargetRegisterClass *DstRC,
5426 MachineOperand &Op,
5427 MachineRegisterInfo &MRI,
5428 const DebugLoc &DL) const {
5429 Register OpReg = Op.getReg();
5430 unsigned OpSubReg = Op.getSubReg();
5431
5432 const TargetRegisterClass *OpRC = RI.getSubClassWithSubReg(
5433 RI.getRegClassForReg(MRI, OpReg), OpSubReg);
5434
5435 // Check if operand is already the correct register class.
5436 if (DstRC == OpRC)
5437 return;
5438
5439 Register DstReg = MRI.createVirtualRegister(DstRC);
5440 auto Copy = BuildMI(InsertMBB, I, DL, get(AMDGPU::COPY), DstReg).add(Op);
5441
5442 Op.setReg(DstReg);
5443 Op.setSubReg(0);
5444
5445 MachineInstr *Def = MRI.getVRegDef(OpReg);
5446 if (!Def)
5447 return;
5448
5449 // Try to eliminate the copy if it is copying an immediate value.
5450 if (Def->isMoveImmediate() && DstRC != &AMDGPU::VReg_1RegClass)
5451 FoldImmediate(*Copy, *Def, OpReg, &MRI);
5452
5453 bool ImpDef = Def->isImplicitDef();
5454 while (!ImpDef && Def && Def->isCopy()) {
5455 if (Def->getOperand(1).getReg().isPhysical())
5456 break;
5457 Def = MRI.getUniqueVRegDef(Def->getOperand(1).getReg());
5458 ImpDef = Def && Def->isImplicitDef();
5459 }
5460 if (!RI.isSGPRClass(DstRC) && !Copy->readsRegister(AMDGPU::EXEC, &RI) &&
5461 !ImpDef)
5462 Copy.addReg(AMDGPU::EXEC, RegState::Implicit);
5463}
5464
5465// Emit the actual waterfall loop, executing the wrapped instruction for each
5466// unique value of \p Rsrc across all lanes. In the best case we execute 1
5467// iteration, in the worst case we execute 64 (once per lane).
5468static void
5469emitLoadSRsrcFromVGPRLoop(const SIInstrInfo &TII, MachineRegisterInfo &MRI,
5470 MachineBasicBlock &OrigBB, MachineBasicBlock &LoopBB,
5471 MachineBasicBlock &BodyBB, const DebugLoc &DL,
5472 MachineOperand &Rsrc) {
5473 MachineFunction &MF = *OrigBB.getParent();
5474 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
5475 const SIRegisterInfo *TRI = ST.getRegisterInfo();
5476 unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC;
5477 unsigned SaveExecOpc =
5478 ST.isWave32() ? AMDGPU::S_AND_SAVEEXEC_B32 : AMDGPU::S_AND_SAVEEXEC_B64;
5479 unsigned XorTermOpc =
5480 ST.isWave32() ? AMDGPU::S_XOR_B32_term : AMDGPU::S_XOR_B64_term;
5481 unsigned AndOpc =
5482 ST.isWave32() ? AMDGPU::S_AND_B32 : AMDGPU::S_AND_B64;
5483 const auto *BoolXExecRC = TRI->getRegClass(AMDGPU::SReg_1_XEXECRegClassID);
5484
5485 MachineBasicBlock::iterator I = LoopBB.begin();
5486
5487 SmallVector<Register, 8> ReadlanePieces;
5488 Register CondReg = AMDGPU::NoRegister;
5489
5490 Register VRsrc = Rsrc.getReg();
5491 unsigned VRsrcUndef = getUndefRegState(Rsrc.isUndef());
5492
5493 unsigned RegSize = TRI->getRegSizeInBits(Rsrc.getReg(), MRI);
5494 unsigned NumSubRegs = RegSize / 32;
5495 assert(NumSubRegs % 2 == 0 && NumSubRegs <= 32 && "Unhandled register size")(static_cast <bool> (NumSubRegs % 2 == 0 && NumSubRegs
<= 32 && "Unhandled register size") ? void (0) : __assert_fail
("NumSubRegs % 2 == 0 && NumSubRegs <= 32 && \"Unhandled register size\""
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 5495, __extension__
__PRETTY_FUNCTION__))
;
5496
5497 for (unsigned Idx = 0; Idx < NumSubRegs; Idx += 2) {
5498
5499 Register CurRegLo = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
5500 Register CurRegHi = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
5501
5502 // Read the next variant <- also loop target.
5503 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::V_READFIRSTLANE_B32), CurRegLo)
5504 .addReg(VRsrc, VRsrcUndef, TRI->getSubRegFromChannel(Idx));
5505
5506 // Read the next variant <- also loop target.
5507 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::V_READFIRSTLANE_B32), CurRegHi)
5508 .addReg(VRsrc, VRsrcUndef, TRI->getSubRegFromChannel(Idx + 1));
5509
5510 ReadlanePieces.push_back(CurRegLo);
5511 ReadlanePieces.push_back(CurRegHi);
5512
5513 // Comparison is to be done as 64-bit.
5514 Register CurReg = MRI.createVirtualRegister(&AMDGPU::SGPR_64RegClass);
5515 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), CurReg)
5516 .addReg(CurRegLo)
5517 .addImm(AMDGPU::sub0)
5518 .addReg(CurRegHi)
5519 .addImm(AMDGPU::sub1);
5520
5521 Register NewCondReg = MRI.createVirtualRegister(BoolXExecRC);
5522 auto Cmp =
5523 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::V_CMP_EQ_U64_e64), NewCondReg)
5524 .addReg(CurReg);
5525 if (NumSubRegs <= 2)
5526 Cmp.addReg(VRsrc);
5527 else
5528 Cmp.addReg(VRsrc, VRsrcUndef, TRI->getSubRegFromChannel(Idx, 2));
5529
5530 // Combine the comparison results with AND.
5531 if (CondReg == AMDGPU::NoRegister) // First.
5532 CondReg = NewCondReg;
5533 else { // If not the first, we create an AND.
5534 Register AndReg = MRI.createVirtualRegister(BoolXExecRC);
5535 BuildMI(LoopBB, I, DL, TII.get(AndOpc), AndReg)
5536 .addReg(CondReg)
5537 .addReg(NewCondReg);
5538 CondReg = AndReg;
5539 }
5540 } // End for loop.
5541
5542 auto SRsrcRC = TRI->getEquivalentSGPRClass(MRI.getRegClass(VRsrc));
5543 Register SRsrc = MRI.createVirtualRegister(SRsrcRC);
5544
5545 // Build scalar Rsrc.
5546 auto Merge = BuildMI(LoopBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), SRsrc);
5547 unsigned Channel = 0;
5548 for (Register Piece : ReadlanePieces) {
5549 Merge.addReg(Piece)
5550 .addImm(TRI->getSubRegFromChannel(Channel++));
5551 }
5552
5553 // Update Rsrc operand to use the SGPR Rsrc.
5554 Rsrc.setReg(SRsrc);
5555 Rsrc.setIsKill(true);
5556
5557 Register SaveExec = MRI.createVirtualRegister(BoolXExecRC);
5558 MRI.setSimpleHint(SaveExec, CondReg);
5559
5560 // Update EXEC to matching lanes, saving original to SaveExec.
5561 BuildMI(LoopBB, I, DL, TII.get(SaveExecOpc), SaveExec)
5562 .addReg(CondReg, RegState::Kill);
5563
5564 // The original instruction is here; we insert the terminators after it.
5565 I = BodyBB.end();
5566
5567 // Update EXEC, switch all done bits to 0 and all todo bits to 1.
5568 BuildMI(BodyBB, I, DL, TII.get(XorTermOpc), Exec)
5569 .addReg(Exec)
5570 .addReg(SaveExec);
5571
5572 BuildMI(BodyBB, I, DL, TII.get(AMDGPU::SI_WATERFALL_LOOP)).addMBB(&LoopBB);
5573}
5574
5575// Build a waterfall loop around \p MI, replacing the VGPR \p Rsrc register
5576// with SGPRs by iterating over all unique values across all lanes.
5577// Returns the loop basic block that now contains \p MI.
5578static MachineBasicBlock *
5579loadSRsrcFromVGPR(const SIInstrInfo &TII, MachineInstr &MI,
5580 MachineOperand &Rsrc, MachineDominatorTree *MDT,
5581 MachineBasicBlock::iterator Begin = nullptr,
5582 MachineBasicBlock::iterator End = nullptr) {
5583 MachineBasicBlock &MBB = *MI.getParent();
5584 MachineFunction &MF = *MBB.getParent();
5585 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
5586 const SIRegisterInfo *TRI = ST.getRegisterInfo();
5587 MachineRegisterInfo &MRI = MF.getRegInfo();
5588 if (!Begin.isValid())
5589 Begin = &MI;
5590 if (!End.isValid()) {
5591 End = &MI;
5592 ++End;
5593 }
5594 const DebugLoc &DL = MI.getDebugLoc();
5595 unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC;
5596 unsigned MovExecOpc = ST.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64;
5597 const auto *BoolXExecRC = TRI->getRegClass(AMDGPU::SReg_1_XEXECRegClassID);
5598
5599 Register SaveExec = MRI.createVirtualRegister(BoolXExecRC);
5600
5601 // Save the EXEC mask
5602 BuildMI(MBB, Begin, DL, TII.get(MovExecOpc), SaveExec).addReg(Exec);
5603
5604 // Killed uses in the instruction we are waterfalling around will be
5605 // incorrect due to the added control-flow.
5606 MachineBasicBlock::iterator AfterMI = MI;
5607 ++AfterMI;
5608 for (auto I = Begin; I != AfterMI; I++) {
5609 for (auto &MO : I->uses()) {
5610 if (MO.isReg() && MO.isUse()) {
5611 MRI.clearKillFlags(MO.getReg());
5612 }
5613 }
5614 }
5615
5616 // To insert the loop we need to split the block. Move everything after this
5617 // point to a new block, and insert a new empty block between the two.
5618 MachineBasicBlock *LoopBB = MF.CreateMachineBasicBlock();
5619 MachineBasicBlock *BodyBB = MF.CreateMachineBasicBlock();
5620 MachineBasicBlock *RemainderBB = MF.CreateMachineBasicBlock();
5621 MachineFunction::iterator MBBI(MBB);
5622 ++MBBI;
5623
5624 MF.insert(MBBI, LoopBB);
5625 MF.insert(MBBI, BodyBB);
5626 MF.insert(MBBI, RemainderBB);
5627
5628 LoopBB->addSuccessor(BodyBB);
5629 BodyBB->addSuccessor(LoopBB);
5630 BodyBB->addSuccessor(RemainderBB);
5631
5632 // Move Begin to MI to the BodyBB, and the remainder of the block to
5633 // RemainderBB.
5634 RemainderBB->transferSuccessorsAndUpdatePHIs(&MBB);
5635 RemainderBB->splice(RemainderBB->begin(), &MBB, End, MBB.end());
5636 BodyBB->splice(BodyBB->begin(), &MBB, Begin, MBB.end());
5637
5638 MBB.addSuccessor(LoopBB);
5639
5640 // Update dominators. We know that MBB immediately dominates LoopBB, that
5641 // LoopBB immediately dominates BodyBB, and BodyBB immediately dominates
5642 // RemainderBB. RemainderBB immediately dominates all of the successors
5643 // transferred to it from MBB that MBB used to properly dominate.
5644 if (MDT) {
5645 MDT->addNewBlock(LoopBB, &MBB);
5646 MDT->addNewBlock(BodyBB, LoopBB);
5647 MDT->addNewBlock(RemainderBB, BodyBB);
5648 for (auto &Succ : RemainderBB->successors()) {
5649 if (MDT->properlyDominates(&MBB, Succ)) {
5650 MDT->changeImmediateDominator(Succ, RemainderBB);
5651 }
5652 }
5653 }
5654
5655 emitLoadSRsrcFromVGPRLoop(TII, MRI, MBB, *LoopBB, *BodyBB, DL, Rsrc);
5656
5657 // Restore the EXEC mask
5658 MachineBasicBlock::iterator First = RemainderBB->begin();
5659 BuildMI(*RemainderBB, First, DL, TII.get(MovExecOpc), Exec).addReg(SaveExec);
5660 return BodyBB;
5661}
5662
5663// Extract pointer from Rsrc and return a zero-value Rsrc replacement.
5664static std::tuple<unsigned, unsigned>
5665extractRsrcPtr(const SIInstrInfo &TII, MachineInstr &MI, MachineOperand &Rsrc) {
5666 MachineBasicBlock &MBB = *MI.getParent();
5667 MachineFunction &MF = *MBB.getParent();
5668 MachineRegisterInfo &MRI = MF.getRegInfo();
5669
5670 // Extract the ptr from the resource descriptor.
5671 unsigned RsrcPtr =
5672 TII.buildExtractSubReg(MI, MRI, Rsrc, &AMDGPU::VReg_128RegClass,
5673 AMDGPU::sub0_sub1, &AMDGPU::VReg_64RegClass);
5674
5675 // Create an empty resource descriptor
5676 Register Zero64 = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
5677 Register SRsrcFormatLo = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
5678 Register SRsrcFormatHi = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
5679 Register NewSRsrc = MRI.createVirtualRegister(&AMDGPU::SGPR_128RegClass);
5680 uint64_t RsrcDataFormat = TII.getDefaultRsrcDataFormat();
5681
5682 // Zero64 = 0
5683 BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(AMDGPU::S_MOV_B64), Zero64)
5684 .addImm(0);
5685
5686 // SRsrcFormatLo = RSRC_DATA_FORMAT{31-0}
5687 BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), SRsrcFormatLo)
5688 .addImm(RsrcDataFormat & 0xFFFFFFFF);
5689
5690 // SRsrcFormatHi = RSRC_DATA_FORMAT{63-32}
5691 BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), SRsrcFormatHi)
5692 .addImm(RsrcDataFormat >> 32);
5693
5694 // NewSRsrc = {Zero64, SRsrcFormat}
5695 BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(AMDGPU::REG_SEQUENCE), NewSRsrc)
5696 .addReg(Zero64)
5697 .addImm(AMDGPU::sub0_sub1)
5698 .addReg(SRsrcFormatLo)
5699 .addImm(AMDGPU::sub2)
5700 .addReg(SRsrcFormatHi)
5701 .addImm(AMDGPU::sub3);
5702
5703 return std::make_tuple(RsrcPtr, NewSRsrc);
5704}
5705
5706MachineBasicBlock *
5707SIInstrInfo::legalizeOperands(MachineInstr &MI,
5708 MachineDominatorTree *MDT) const {
5709 MachineFunction &MF = *MI.getParent()->getParent();
5710 MachineRegisterInfo &MRI = MF.getRegInfo();
5711 MachineBasicBlock *CreatedBB = nullptr;
5712
5713 // Legalize VOP2
5714 if (isVOP2(MI) || isVOPC(MI)) {
5715 legalizeOperandsVOP2(MRI, MI);
5716 return CreatedBB;
5717 }
5718
5719 // Legalize VOP3
5720 if (isVOP3(MI)) {
5721 legalizeOperandsVOP3(MRI, MI);
5722 return CreatedBB;
5723 }
5724
5725 // Legalize SMRD
5726 if (isSMRD(MI)) {
5727 legalizeOperandsSMRD(MRI, MI);
5728 return CreatedBB;
5729 }
5730
5731 // Legalize FLAT
5732 if (isFLAT(MI)) {
5733 legalizeOperandsFLAT(MRI, MI);
5734 return CreatedBB;
5735 }
5736
5737 // Legalize REG_SEQUENCE and PHI
5738 // The register class of the operands much be the same type as the register
5739 // class of the output.
5740 if (MI.getOpcode() == AMDGPU::PHI) {
5741 const TargetRegisterClass *RC = nullptr, *SRC = nullptr, *VRC = nullptr;
5742 for (unsigned i = 1, e = MI.getNumOperands(); i != e; i += 2) {
5743 if (!MI.getOperand(i).isReg() || !MI.getOperand(i).getReg().isVirtual())
5744 continue;
5745 const TargetRegisterClass *OpRC =
5746 MRI.getRegClass(MI.getOperand(i).getReg());
5747 if (RI.hasVectorRegisters(OpRC)) {
5748 VRC = OpRC;
5749 } else {
5750 SRC = OpRC;
5751 }
5752 }
5753
5754 // If any of the operands are VGPR registers, then they all most be
5755 // otherwise we will create illegal VGPR->SGPR copies when legalizing
5756 // them.
5757 if (VRC || !RI.isSGPRClass(getOpRegClass(MI, 0))) {
5758 if (!VRC) {
5759 assert(SRC)(static_cast <bool> (SRC) ? void (0) : __assert_fail ("SRC"
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 5759, __extension__
__PRETTY_FUNCTION__))
;
5760 if (getOpRegClass(MI, 0) == &AMDGPU::VReg_1RegClass) {
5761 VRC = &AMDGPU::VReg_1RegClass;
5762 } else
5763 VRC = RI.isAGPRClass(getOpRegClass(MI, 0))
5764 ? RI.getEquivalentAGPRClass(SRC)
5765 : RI.getEquivalentVGPRClass(SRC);
5766 } else {
5767 VRC = RI.isAGPRClass(getOpRegClass(MI, 0))
5768 ? RI.getEquivalentAGPRClass(VRC)
5769 : RI.getEquivalentVGPRClass(VRC);
5770 }
5771 RC = VRC;
5772 } else {
5773 RC = SRC;
5774 }
5775
5776 // Update all the operands so they have the same type.
5777 for (unsigned I = 1, E = MI.getNumOperands(); I != E; I += 2) {
5778 MachineOperand &Op = MI.getOperand(I);
5779 if (!Op.isReg() || !Op.getReg().isVirtual())
5780 continue;
5781
5782 // MI is a PHI instruction.
5783 MachineBasicBlock *InsertBB = MI.getOperand(I + 1).getMBB();
5784 MachineBasicBlock::iterator Insert = InsertBB->getFirstTerminator();
5785
5786 // Avoid creating no-op copies with the same src and dst reg class. These
5787 // confuse some of the machine passes.
5788 legalizeGenericOperand(*InsertBB, Insert, RC, Op, MRI, MI.getDebugLoc());
5789 }
5790 }
5791
5792 // REG_SEQUENCE doesn't really require operand legalization, but if one has a
5793 // VGPR dest type and SGPR sources, insert copies so all operands are
5794 // VGPRs. This seems to help operand folding / the register coalescer.
5795 if (MI.getOpcode() == AMDGPU::REG_SEQUENCE) {
5796 MachineBasicBlock *MBB = MI.getParent();
5797 const TargetRegisterClass *DstRC = getOpRegClass(MI, 0);
5798 if (RI.hasVGPRs(DstRC)) {
5799 // Update all the operands so they are VGPR register classes. These may
5800 // not be the same register class because REG_SEQUENCE supports mixing
5801 // subregister index types e.g. sub0_sub1 + sub2 + sub3
5802 for (unsigned I = 1, E = MI.getNumOperands(); I != E; I += 2) {
5803 MachineOperand &Op = MI.getOperand(I);
5804 if (!Op.isReg() || !Op.getReg().isVirtual())
5805 continue;
5806
5807 const TargetRegisterClass *OpRC = MRI.getRegClass(Op.getReg());
5808 const TargetRegisterClass *VRC = RI.getEquivalentVGPRClass(OpRC);
5809 if (VRC == OpRC)
5810 continue;
5811
5812 legalizeGenericOperand(*MBB, MI, VRC, Op, MRI, MI.getDebugLoc());
5813 Op.setIsKill();
5814 }
5815 }
5816
5817 return CreatedBB;
5818 }
5819
5820 // Legalize INSERT_SUBREG
5821 // src0 must have the same register class as dst
5822 if (MI.getOpcode() == AMDGPU::INSERT_SUBREG) {
5823 Register Dst = MI.getOperand(0).getReg();
5824 Register Src0 = MI.getOperand(1).getReg();
5825 const TargetRegisterClass *DstRC = MRI.getRegClass(Dst);
5826 const TargetRegisterClass *Src0RC = MRI.getRegClass(Src0);
5827 if (DstRC != Src0RC) {
5828 MachineBasicBlock *MBB = MI.getParent();
5829 MachineOperand &Op = MI.getOperand(1);
5830 legalizeGenericOperand(*MBB, MI, DstRC, Op, MRI, MI.getDebugLoc());
5831 }
5832 return CreatedBB;
5833 }
5834
5835 // Legalize SI_INIT_M0
5836 if (MI.getOpcode() == AMDGPU::SI_INIT_M0) {
5837 MachineOperand &Src = MI.getOperand(0);
5838 if (Src.isReg() && RI.hasVectorRegisters(MRI.getRegClass(Src.getReg())))
5839 Src.setReg(readlaneVGPRToSGPR(Src.getReg(), MI, MRI));
5840 return CreatedBB;
5841 }
5842
5843 // Legalize MIMG and MUBUF/MTBUF for shaders.
5844 //
5845 // Shaders only generate MUBUF/MTBUF instructions via intrinsics or via
5846 // scratch memory access. In both cases, the legalization never involves
5847 // conversion to the addr64 form.
5848 if (isMIMG(MI) || (AMDGPU::isGraphics(MF.getFunction().getCallingConv()) &&
5849 (isMUBUF(MI) || isMTBUF(MI)))) {
5850 MachineOperand *SRsrc = getNamedOperand(MI, AMDGPU::OpName::srsrc);
5851 if (SRsrc && !RI.isSGPRClass(MRI.getRegClass(SRsrc->getReg())))
5852 CreatedBB = loadSRsrcFromVGPR(*this, MI, *SRsrc, MDT);
5853
5854 MachineOperand *SSamp = getNamedOperand(MI, AMDGPU::OpName::ssamp);
5855 if (SSamp && !RI.isSGPRClass(MRI.getRegClass(SSamp->getReg())))
5856 CreatedBB = loadSRsrcFromVGPR(*this, MI, *SSamp, MDT);
5857
5858 return CreatedBB;
5859 }
5860
5861 // Legalize SI_CALL
5862 if (MI.getOpcode() == AMDGPU::SI_CALL_ISEL) {
5863 MachineOperand *Dest = &MI.getOperand(0);
5864 if (!RI.isSGPRClass(MRI.getRegClass(Dest->getReg()))) {
5865 // Move everything between ADJCALLSTACKUP and ADJCALLSTACKDOWN and
5866 // following copies, we also need to move copies from and to physical
5867 // registers into the loop block.
5868 unsigned FrameSetupOpcode = getCallFrameSetupOpcode();
5869 unsigned FrameDestroyOpcode = getCallFrameDestroyOpcode();
5870
5871 // Also move the copies to physical registers into the loop block
5872 MachineBasicBlock &MBB = *MI.getParent();
5873 MachineBasicBlock::iterator Start(&MI);
5874 while (Start->getOpcode() != FrameSetupOpcode)
5875 --Start;
5876 MachineBasicBlock::iterator End(&MI);
5877 while (End->getOpcode() != FrameDestroyOpcode)
5878 ++End;
5879 // Also include following copies of the return value
5880 ++End;
5881 while (End != MBB.end() && End->isCopy() && End->getOperand(1).isReg() &&
5882 MI.definesRegister(End->getOperand(1).getReg()))
5883 ++End;
5884 CreatedBB = loadSRsrcFromVGPR(*this, MI, *Dest, MDT, Start, End);
5885 }
5886 }
5887
5888 // Legalize MUBUF* instructions.
5889 int RsrcIdx =
5890 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::srsrc);
5891 if (RsrcIdx != -1) {
5892 // We have an MUBUF instruction
5893 MachineOperand *Rsrc = &MI.getOperand(RsrcIdx);
5894 unsigned RsrcRC = get(MI.getOpcode()).OpInfo[RsrcIdx].RegClass;
5895 if (RI.getCommonSubClass(MRI.getRegClass(Rsrc->getReg()),
5896 RI.getRegClass(RsrcRC))) {
5897 // The operands are legal.
5898 // FIXME: We may need to legalize operands besides srsrc.
5899 return CreatedBB;
5900 }
5901
5902 // Legalize a VGPR Rsrc.
5903 //
5904 // If the instruction is _ADDR64, we can avoid a waterfall by extracting
5905 // the base pointer from the VGPR Rsrc, adding it to the VAddr, then using
5906 // a zero-value SRsrc.
5907 //
5908 // If the instruction is _OFFSET (both idxen and offen disabled), and we
5909 // support ADDR64 instructions, we can convert to ADDR64 and do the same as
5910 // above.
5911 //
5912 // Otherwise we are on non-ADDR64 hardware, and/or we have
5913 // idxen/offen/bothen and we fall back to a waterfall loop.
5914
5915 MachineBasicBlock &MBB = *MI.getParent();
5916
5917 MachineOperand *VAddr = getNamedOperand(MI, AMDGPU::OpName::vaddr);
5918 if (VAddr && AMDGPU::getIfAddr64Inst(MI.getOpcode()) != -1) {
5919 // This is already an ADDR64 instruction so we need to add the pointer
5920 // extracted from the resource descriptor to the current value of VAddr.
5921 Register NewVAddrLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
5922 Register NewVAddrHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
5923 Register NewVAddr = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass);
5924
5925 const auto *BoolXExecRC = RI.getRegClass(AMDGPU::SReg_1_XEXECRegClassID);
5926 Register CondReg0 = MRI.createVirtualRegister(BoolXExecRC);
5927 Register CondReg1 = MRI.createVirtualRegister(BoolXExecRC);
5928
5929 unsigned RsrcPtr, NewSRsrc;
5930 std::tie(RsrcPtr, NewSRsrc) = extractRsrcPtr(*this, MI, *Rsrc);
5931
5932 // NewVaddrLo = RsrcPtr:sub0 + VAddr:sub0
5933 const DebugLoc &DL = MI.getDebugLoc();
5934 BuildMI(MBB, MI, DL, get(AMDGPU::V_ADD_CO_U32_e64), NewVAddrLo)
5935 .addDef(CondReg0)
5936 .addReg(RsrcPtr, 0, AMDGPU::sub0)
5937 .addReg(VAddr->getReg(), 0, AMDGPU::sub0)
5938 .addImm(0);
5939
5940 // NewVaddrHi = RsrcPtr:sub1 + VAddr:sub1
5941 BuildMI(MBB, MI, DL, get(AMDGPU::V_ADDC_U32_e64), NewVAddrHi)
5942 .addDef(CondReg1, RegState::Dead)
5943 .addReg(RsrcPtr, 0, AMDGPU::sub1)
5944 .addReg(VAddr->getReg(), 0, AMDGPU::sub1)
5945 .addReg(CondReg0, RegState::Kill)
5946 .addImm(0);
5947
5948 // NewVaddr = {NewVaddrHi, NewVaddrLo}
5949 BuildMI(MBB, MI, MI.getDebugLoc(), get(AMDGPU::REG_SEQUENCE), NewVAddr)
5950 .addReg(NewVAddrLo)
5951 .addImm(AMDGPU::sub0)
5952 .addReg(NewVAddrHi)
5953 .addImm(AMDGPU::sub1);
5954
5955 VAddr->setReg(NewVAddr);
5956 Rsrc->setReg(NewSRsrc);
5957 } else if (!VAddr && ST.hasAddr64()) {
5958 // This instructions is the _OFFSET variant, so we need to convert it to
5959 // ADDR64.
5960 assert(ST.getGeneration() < AMDGPUSubtarget::VOLCANIC_ISLANDS &&(static_cast <bool> (ST.getGeneration() < AMDGPUSubtarget
::VOLCANIC_ISLANDS && "FIXME: Need to emit flat atomics here"
) ? void (0) : __assert_fail ("ST.getGeneration() < AMDGPUSubtarget::VOLCANIC_ISLANDS && \"FIXME: Need to emit flat atomics here\""
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 5961, __extension__
__PRETTY_FUNCTION__))
5961 "FIXME: Need to emit flat atomics here")(static_cast <bool> (ST.getGeneration() < AMDGPUSubtarget
::VOLCANIC_ISLANDS && "FIXME: Need to emit flat atomics here"
) ? void (0) : __assert_fail ("ST.getGeneration() < AMDGPUSubtarget::VOLCANIC_ISLANDS && \"FIXME: Need to emit flat atomics here\""
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 5961, __extension__
__PRETTY_FUNCTION__))
;
5962
5963 unsigned RsrcPtr, NewSRsrc;
5964 std::tie(RsrcPtr, NewSRsrc) = extractRsrcPtr(*this, MI, *Rsrc);
5965
5966 Register NewVAddr = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass);
5967 MachineOperand *VData = getNamedOperand(MI, AMDGPU::OpName::vdata);
5968 MachineOperand *Offset = getNamedOperand(MI, AMDGPU::OpName::offset);
5969 MachineOperand *SOffset = getNamedOperand(MI, AMDGPU::OpName::soffset);
5970 unsigned Addr64Opcode = AMDGPU::getAddr64Inst(MI.getOpcode());
5971
5972 // Atomics with return have an additional tied operand and are
5973 // missing some of the special bits.
5974 MachineOperand *VDataIn = getNamedOperand(MI, AMDGPU::OpName::vdata_in);
5975 MachineInstr *Addr64;
5976
5977 if (!VDataIn) {
5978 // Regular buffer load / store.
5979 MachineInstrBuilder MIB =
5980 BuildMI(MBB, MI, MI.getDebugLoc(), get(Addr64Opcode))
5981 .add(*VData)
5982 .addReg(NewVAddr)
5983 .addReg(NewSRsrc)
5984 .add(*SOffset)
5985 .add(*Offset);
5986
5987 if (const MachineOperand *CPol =
5988 getNamedOperand(MI, AMDGPU::OpName::cpol)) {
5989 MIB.addImm(CPol->getImm());
5990 }
5991
5992 if (const MachineOperand *TFE =
5993 getNamedOperand(MI, AMDGPU::OpName::tfe)) {
5994 MIB.addImm(TFE->getImm());
5995 }
5996
5997 MIB.addImm(getNamedImmOperand(MI, AMDGPU::OpName::swz));
5998
5999 MIB.cloneMemRefs(MI);
6000 Addr64 = MIB;
6001 } else {