Bug Summary

File:llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
Warning:line 2126, column 15
Called C++ object pointer is uninitialized

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name SIInstrInfo.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -ffp-contract=on -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-14~++20220118101002+ec47dba1c8a2/build-llvm/tools/clang/stage2-bins -resource-dir /usr/lib/llvm-14/lib/clang/14.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I lib/Target/AMDGPU -I /build/llvm-toolchain-snapshot-14~++20220118101002+ec47dba1c8a2/llvm/lib/Target/AMDGPU -I include -I /build/llvm-toolchain-snapshot-14~++20220118101002+ec47dba1c8a2/llvm/include -D _FORTIFY_SOURCE=2 -D NDEBUG -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-14/lib/clang/14.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -fmacro-prefix-map=/build/llvm-toolchain-snapshot-14~++20220118101002+ec47dba1c8a2/build-llvm/tools/clang/stage2-bins=build-llvm/tools/clang/stage2-bins -fmacro-prefix-map=/build/llvm-toolchain-snapshot-14~++20220118101002+ec47dba1c8a2/= -fcoverage-prefix-map=/build/llvm-toolchain-snapshot-14~++20220118101002+ec47dba1c8a2/build-llvm/tools/clang/stage2-bins=build-llvm/tools/clang/stage2-bins -fcoverage-prefix-map=/build/llvm-toolchain-snapshot-14~++20220118101002+ec47dba1c8a2/= -O3 -Wno-unused-command-line-argument -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-14~++20220118101002+ec47dba1c8a2/build-llvm/tools/clang/stage2-bins -fdebug-prefix-map=/build/llvm-toolchain-snapshot-14~++20220118101002+ec47dba1c8a2/build-llvm/tools/clang/stage2-bins=build-llvm/tools/clang/stage2-bins -fdebug-prefix-map=/build/llvm-toolchain-snapshot-14~++20220118101002+ec47dba1c8a2/= -ferror-limit 19 -fvisibility hidden -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -fcolor-diagnostics -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2022-01-19-001817-16337-1 -x c++ /build/llvm-toolchain-snapshot-14~++20220118101002+ec47dba1c8a2/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp

/build/llvm-toolchain-snapshot-14~++20220118101002+ec47dba1c8a2/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp

1//===- SIInstrInfo.cpp - SI Instruction Information ----------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// SI Implementation of TargetInstrInfo.
11//
12//===----------------------------------------------------------------------===//
13
14#include "SIInstrInfo.h"
15#include "AMDGPU.h"
16#include "AMDGPUInstrInfo.h"
17#include "GCNHazardRecognizer.h"
18#include "GCNSubtarget.h"
19#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
20#include "SIMachineFunctionInfo.h"
21#include "llvm/Analysis/ValueTracking.h"
22#include "llvm/CodeGen/LiveIntervals.h"
23#include "llvm/CodeGen/LiveVariables.h"
24#include "llvm/CodeGen/MachineDominators.h"
25#include "llvm/CodeGen/MachineScheduler.h"
26#include "llvm/CodeGen/RegisterScavenging.h"
27#include "llvm/CodeGen/ScheduleDAG.h"
28#include "llvm/IR/DiagnosticInfo.h"
29#include "llvm/IR/IntrinsicsAMDGPU.h"
30#include "llvm/MC/MCContext.h"
31#include "llvm/Support/CommandLine.h"
32#include "llvm/Target/TargetMachine.h"
33
34using namespace llvm;
35
36#define DEBUG_TYPE"si-instr-info" "si-instr-info"
37
38#define GET_INSTRINFO_CTOR_DTOR
39#include "AMDGPUGenInstrInfo.inc"
40
41namespace llvm {
42
43class AAResults;
44
45namespace AMDGPU {
46#define GET_D16ImageDimIntrinsics_IMPL
47#define GET_ImageDimIntrinsicTable_IMPL
48#define GET_RsrcIntrinsics_IMPL
49#include "AMDGPUGenSearchableTables.inc"
50}
51}
52
53
54// Must be at least 4 to be able to branch over minimum unconditional branch
55// code. This is only for making it possible to write reasonably small tests for
56// long branches.
57static cl::opt<unsigned>
58BranchOffsetBits("amdgpu-s-branch-bits", cl::ReallyHidden, cl::init(16),
59 cl::desc("Restrict range of branch instructions (DEBUG)"));
60
61static cl::opt<bool> Fix16BitCopies(
62 "amdgpu-fix-16-bit-physreg-copies",
63 cl::desc("Fix copies between 32 and 16 bit registers by extending to 32 bit"),
64 cl::init(true),
65 cl::ReallyHidden);
66
67SIInstrInfo::SIInstrInfo(const GCNSubtarget &ST)
68 : AMDGPUGenInstrInfo(AMDGPU::ADJCALLSTACKUP, AMDGPU::ADJCALLSTACKDOWN),
69 RI(ST), ST(ST) {
70 SchedModel.init(&ST);
71}
72
73//===----------------------------------------------------------------------===//
74// TargetInstrInfo callbacks
75//===----------------------------------------------------------------------===//
76
77static unsigned getNumOperandsNoGlue(SDNode *Node) {
78 unsigned N = Node->getNumOperands();
79 while (N && Node->getOperand(N - 1).getValueType() == MVT::Glue)
80 --N;
81 return N;
82}
83
84/// Returns true if both nodes have the same value for the given
85/// operand \p Op, or if both nodes do not have this operand.
86static bool nodesHaveSameOperandValue(SDNode *N0, SDNode* N1, unsigned OpName) {
87 unsigned Opc0 = N0->getMachineOpcode();
88 unsigned Opc1 = N1->getMachineOpcode();
89
90 int Op0Idx = AMDGPU::getNamedOperandIdx(Opc0, OpName);
91 int Op1Idx = AMDGPU::getNamedOperandIdx(Opc1, OpName);
92
93 if (Op0Idx == -1 && Op1Idx == -1)
94 return true;
95
96
97 if ((Op0Idx == -1 && Op1Idx != -1) ||
98 (Op1Idx == -1 && Op0Idx != -1))
99 return false;
100
101 // getNamedOperandIdx returns the index for the MachineInstr's operands,
102 // which includes the result as the first operand. We are indexing into the
103 // MachineSDNode's operands, so we need to skip the result operand to get
104 // the real index.
105 --Op0Idx;
106 --Op1Idx;
107
108 return N0->getOperand(Op0Idx) == N1->getOperand(Op1Idx);
109}
110
111bool SIInstrInfo::isReallyTriviallyReMaterializable(const MachineInstr &MI,
112 AAResults *AA) const {
113 if (isVOP1(MI) || isVOP2(MI) || isVOP3(MI) || isSDWA(MI) || isSALU(MI)) {
114 // Normally VALU use of exec would block the rematerialization, but that
115 // is OK in this case to have an implicit exec read as all VALU do.
116 // We really want all of the generic logic for this except for this.
117
118 // Another potential implicit use is mode register. The core logic of
119 // the RA will not attempt rematerialization if mode is set anywhere
120 // in the function, otherwise it is safe since mode is not changed.
121
122 // There is difference to generic method which does not allow
123 // rematerialization if there are virtual register uses. We allow this,
124 // therefore this method includes SOP instructions as well.
125 return !MI.hasImplicitDef() &&
126 MI.getNumImplicitOperands() == MI.getDesc().getNumImplicitUses() &&
127 !MI.mayRaiseFPException();
128 }
129
130 return false;
131}
132
133bool SIInstrInfo::isIgnorableUse(const MachineOperand &MO) const {
134 // Any implicit use of exec by VALU is not a real register read.
135 return MO.getReg() == AMDGPU::EXEC && MO.isImplicit() &&
136 isVALU(*MO.getParent());
137}
138
139bool SIInstrInfo::areLoadsFromSameBasePtr(SDNode *Load0, SDNode *Load1,
140 int64_t &Offset0,
141 int64_t &Offset1) const {
142 if (!Load0->isMachineOpcode() || !Load1->isMachineOpcode())
143 return false;
144
145 unsigned Opc0 = Load0->getMachineOpcode();
146 unsigned Opc1 = Load1->getMachineOpcode();
147
148 // Make sure both are actually loads.
149 if (!get(Opc0).mayLoad() || !get(Opc1).mayLoad())
150 return false;
151
152 if (isDS(Opc0) && isDS(Opc1)) {
153
154 // FIXME: Handle this case:
155 if (getNumOperandsNoGlue(Load0) != getNumOperandsNoGlue(Load1))
156 return false;
157
158 // Check base reg.
159 if (Load0->getOperand(0) != Load1->getOperand(0))
160 return false;
161
162 // Skip read2 / write2 variants for simplicity.
163 // TODO: We should report true if the used offsets are adjacent (excluded
164 // st64 versions).
165 int Offset0Idx = AMDGPU::getNamedOperandIdx(Opc0, AMDGPU::OpName::offset);
166 int Offset1Idx = AMDGPU::getNamedOperandIdx(Opc1, AMDGPU::OpName::offset);
167 if (Offset0Idx == -1 || Offset1Idx == -1)
168 return false;
169
170 // XXX - be careful of datalesss loads
171 // getNamedOperandIdx returns the index for MachineInstrs. Since they
172 // include the output in the operand list, but SDNodes don't, we need to
173 // subtract the index by one.
174 Offset0Idx -= get(Opc0).NumDefs;
175 Offset1Idx -= get(Opc1).NumDefs;
176 Offset0 = cast<ConstantSDNode>(Load0->getOperand(Offset0Idx))->getZExtValue();
177 Offset1 = cast<ConstantSDNode>(Load1->getOperand(Offset1Idx))->getZExtValue();
178 return true;
179 }
180
181 if (isSMRD(Opc0) && isSMRD(Opc1)) {
182 // Skip time and cache invalidation instructions.
183 if (AMDGPU::getNamedOperandIdx(Opc0, AMDGPU::OpName::sbase) == -1 ||
184 AMDGPU::getNamedOperandIdx(Opc1, AMDGPU::OpName::sbase) == -1)
185 return false;
186
187 assert(getNumOperandsNoGlue(Load0) == getNumOperandsNoGlue(Load1))(static_cast <bool> (getNumOperandsNoGlue(Load0) == getNumOperandsNoGlue
(Load1)) ? void (0) : __assert_fail ("getNumOperandsNoGlue(Load0) == getNumOperandsNoGlue(Load1)"
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 187, __extension__
__PRETTY_FUNCTION__))
;
188
189 // Check base reg.
190 if (Load0->getOperand(0) != Load1->getOperand(0))
191 return false;
192
193 const ConstantSDNode *Load0Offset =
194 dyn_cast<ConstantSDNode>(Load0->getOperand(1));
195 const ConstantSDNode *Load1Offset =
196 dyn_cast<ConstantSDNode>(Load1->getOperand(1));
197
198 if (!Load0Offset || !Load1Offset)
199 return false;
200
201 Offset0 = Load0Offset->getZExtValue();
202 Offset1 = Load1Offset->getZExtValue();
203 return true;
204 }
205
206 // MUBUF and MTBUF can access the same addresses.
207 if ((isMUBUF(Opc0) || isMTBUF(Opc0)) && (isMUBUF(Opc1) || isMTBUF(Opc1))) {
208
209 // MUBUF and MTBUF have vaddr at different indices.
210 if (!nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::soffset) ||
211 !nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::vaddr) ||
212 !nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::srsrc))
213 return false;
214
215 int OffIdx0 = AMDGPU::getNamedOperandIdx(Opc0, AMDGPU::OpName::offset);
216 int OffIdx1 = AMDGPU::getNamedOperandIdx(Opc1, AMDGPU::OpName::offset);
217
218 if (OffIdx0 == -1 || OffIdx1 == -1)
219 return false;
220
221 // getNamedOperandIdx returns the index for MachineInstrs. Since they
222 // include the output in the operand list, but SDNodes don't, we need to
223 // subtract the index by one.
224 OffIdx0 -= get(Opc0).NumDefs;
225 OffIdx1 -= get(Opc1).NumDefs;
226
227 SDValue Off0 = Load0->getOperand(OffIdx0);
228 SDValue Off1 = Load1->getOperand(OffIdx1);
229
230 // The offset might be a FrameIndexSDNode.
231 if (!isa<ConstantSDNode>(Off0) || !isa<ConstantSDNode>(Off1))
232 return false;
233
234 Offset0 = cast<ConstantSDNode>(Off0)->getZExtValue();
235 Offset1 = cast<ConstantSDNode>(Off1)->getZExtValue();
236 return true;
237 }
238
239 return false;
240}
241
242static bool isStride64(unsigned Opc) {
243 switch (Opc) {
244 case AMDGPU::DS_READ2ST64_B32:
245 case AMDGPU::DS_READ2ST64_B64:
246 case AMDGPU::DS_WRITE2ST64_B32:
247 case AMDGPU::DS_WRITE2ST64_B64:
248 return true;
249 default:
250 return false;
251 }
252}
253
254bool SIInstrInfo::getMemOperandsWithOffsetWidth(
255 const MachineInstr &LdSt, SmallVectorImpl<const MachineOperand *> &BaseOps,
256 int64_t &Offset, bool &OffsetIsScalable, unsigned &Width,
257 const TargetRegisterInfo *TRI) const {
258 if (!LdSt.mayLoadOrStore())
259 return false;
260
261 unsigned Opc = LdSt.getOpcode();
262 OffsetIsScalable = false;
263 const MachineOperand *BaseOp, *OffsetOp;
264 int DataOpIdx;
265
266 if (isDS(LdSt)) {
267 BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::addr);
268 OffsetOp = getNamedOperand(LdSt, AMDGPU::OpName::offset);
269 if (OffsetOp) {
270 // Normal, single offset LDS instruction.
271 if (!BaseOp) {
272 // DS_CONSUME/DS_APPEND use M0 for the base address.
273 // TODO: find the implicit use operand for M0 and use that as BaseOp?
274 return false;
275 }
276 BaseOps.push_back(BaseOp);
277 Offset = OffsetOp->getImm();
278 // Get appropriate operand, and compute width accordingly.
279 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdst);
280 if (DataOpIdx == -1)
281 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::data0);
282 Width = getOpSize(LdSt, DataOpIdx);
283 } else {
284 // The 2 offset instructions use offset0 and offset1 instead. We can treat
285 // these as a load with a single offset if the 2 offsets are consecutive.
286 // We will use this for some partially aligned loads.
287 const MachineOperand *Offset0Op =
288 getNamedOperand(LdSt, AMDGPU::OpName::offset0);
289 const MachineOperand *Offset1Op =
290 getNamedOperand(LdSt, AMDGPU::OpName::offset1);
291
292 unsigned Offset0 = Offset0Op->getImm();
293 unsigned Offset1 = Offset1Op->getImm();
294 if (Offset0 + 1 != Offset1)
295 return false;
296
297 // Each of these offsets is in element sized units, so we need to convert
298 // to bytes of the individual reads.
299
300 unsigned EltSize;
301 if (LdSt.mayLoad())
302 EltSize = TRI->getRegSizeInBits(*getOpRegClass(LdSt, 0)) / 16;
303 else {
304 assert(LdSt.mayStore())(static_cast <bool> (LdSt.mayStore()) ? void (0) : __assert_fail
("LdSt.mayStore()", "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp"
, 304, __extension__ __PRETTY_FUNCTION__))
;
305 int Data0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::data0);
306 EltSize = TRI->getRegSizeInBits(*getOpRegClass(LdSt, Data0Idx)) / 8;
307 }
308
309 if (isStride64(Opc))
310 EltSize *= 64;
311
312 BaseOps.push_back(BaseOp);
313 Offset = EltSize * Offset0;
314 // Get appropriate operand(s), and compute width accordingly.
315 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdst);
316 if (DataOpIdx == -1) {
317 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::data0);
318 Width = getOpSize(LdSt, DataOpIdx);
319 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::data1);
320 Width += getOpSize(LdSt, DataOpIdx);
321 } else {
322 Width = getOpSize(LdSt, DataOpIdx);
323 }
324 }
325 return true;
326 }
327
328 if (isMUBUF(LdSt) || isMTBUF(LdSt)) {
329 const MachineOperand *RSrc = getNamedOperand(LdSt, AMDGPU::OpName::srsrc);
330 if (!RSrc) // e.g. BUFFER_WBINVL1_VOL
331 return false;
332 BaseOps.push_back(RSrc);
333 BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::vaddr);
334 if (BaseOp && !BaseOp->isFI())
335 BaseOps.push_back(BaseOp);
336 const MachineOperand *OffsetImm =
337 getNamedOperand(LdSt, AMDGPU::OpName::offset);
338 Offset = OffsetImm->getImm();
339 const MachineOperand *SOffset =
340 getNamedOperand(LdSt, AMDGPU::OpName::soffset);
341 if (SOffset) {
342 if (SOffset->isReg())
343 BaseOps.push_back(SOffset);
344 else
345 Offset += SOffset->getImm();
346 }
347 // Get appropriate operand, and compute width accordingly.
348 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdst);
349 if (DataOpIdx == -1)
350 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdata);
351 Width = getOpSize(LdSt, DataOpIdx);
352 return true;
353 }
354
355 if (isMIMG(LdSt)) {
356 int SRsrcIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::srsrc);
357 BaseOps.push_back(&LdSt.getOperand(SRsrcIdx));
358 int VAddr0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vaddr0);
359 if (VAddr0Idx >= 0) {
360 // GFX10 possible NSA encoding.
361 for (int I = VAddr0Idx; I < SRsrcIdx; ++I)
362 BaseOps.push_back(&LdSt.getOperand(I));
363 } else {
364 BaseOps.push_back(getNamedOperand(LdSt, AMDGPU::OpName::vaddr));
365 }
366 Offset = 0;
367 // Get appropriate operand, and compute width accordingly.
368 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdata);
369 Width = getOpSize(LdSt, DataOpIdx);
370 return true;
371 }
372
373 if (isSMRD(LdSt)) {
374 BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::sbase);
375 if (!BaseOp) // e.g. S_MEMTIME
376 return false;
377 BaseOps.push_back(BaseOp);
378 OffsetOp = getNamedOperand(LdSt, AMDGPU::OpName::offset);
379 Offset = OffsetOp ? OffsetOp->getImm() : 0;
380 // Get appropriate operand, and compute width accordingly.
381 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::sdst);
382 Width = getOpSize(LdSt, DataOpIdx);
383 return true;
384 }
385
386 if (isFLAT(LdSt)) {
387 // Instructions have either vaddr or saddr or both or none.
388 BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::vaddr);
389 if (BaseOp)
390 BaseOps.push_back(BaseOp);
391 BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::saddr);
392 if (BaseOp)
393 BaseOps.push_back(BaseOp);
394 Offset = getNamedOperand(LdSt, AMDGPU::OpName::offset)->getImm();
395 // Get appropriate operand, and compute width accordingly.
396 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdst);
397 if (DataOpIdx == -1)
398 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdata);
399 Width = getOpSize(LdSt, DataOpIdx);
400 return true;
401 }
402
403 return false;
404}
405
406static bool memOpsHaveSameBasePtr(const MachineInstr &MI1,
407 ArrayRef<const MachineOperand *> BaseOps1,
408 const MachineInstr &MI2,
409 ArrayRef<const MachineOperand *> BaseOps2) {
410 // Only examine the first "base" operand of each instruction, on the
411 // assumption that it represents the real base address of the memory access.
412 // Other operands are typically offsets or indices from this base address.
413 if (BaseOps1.front()->isIdenticalTo(*BaseOps2.front()))
414 return true;
415
416 if (!MI1.hasOneMemOperand() || !MI2.hasOneMemOperand())
417 return false;
418
419 auto MO1 = *MI1.memoperands_begin();
420 auto MO2 = *MI2.memoperands_begin();
421 if (MO1->getAddrSpace() != MO2->getAddrSpace())
422 return false;
423
424 auto Base1 = MO1->getValue();
425 auto Base2 = MO2->getValue();
426 if (!Base1 || !Base2)
427 return false;
428 Base1 = getUnderlyingObject(Base1);
429 Base2 = getUnderlyingObject(Base2);
430
431 if (isa<UndefValue>(Base1) || isa<UndefValue>(Base2))
432 return false;
433
434 return Base1 == Base2;
435}
436
437bool SIInstrInfo::shouldClusterMemOps(ArrayRef<const MachineOperand *> BaseOps1,
438 ArrayRef<const MachineOperand *> BaseOps2,
439 unsigned NumLoads,
440 unsigned NumBytes) const {
441 // If the mem ops (to be clustered) do not have the same base ptr, then they
442 // should not be clustered
443 if (!BaseOps1.empty() && !BaseOps2.empty()) {
444 const MachineInstr &FirstLdSt = *BaseOps1.front()->getParent();
445 const MachineInstr &SecondLdSt = *BaseOps2.front()->getParent();
446 if (!memOpsHaveSameBasePtr(FirstLdSt, BaseOps1, SecondLdSt, BaseOps2))
447 return false;
448 } else if (!BaseOps1.empty() || !BaseOps2.empty()) {
449 // If only one base op is empty, they do not have the same base ptr
450 return false;
451 }
452
453 // In order to avoid regester pressure, on an average, the number of DWORDS
454 // loaded together by all clustered mem ops should not exceed 8. This is an
455 // empirical value based on certain observations and performance related
456 // experiments.
457 // The good thing about this heuristic is - it avoids clustering of too many
458 // sub-word loads, and also avoids clustering of wide loads. Below is the
459 // brief summary of how the heuristic behaves for various `LoadSize`.
460 // (1) 1 <= LoadSize <= 4: cluster at max 8 mem ops
461 // (2) 5 <= LoadSize <= 8: cluster at max 4 mem ops
462 // (3) 9 <= LoadSize <= 12: cluster at max 2 mem ops
463 // (4) 13 <= LoadSize <= 16: cluster at max 2 mem ops
464 // (5) LoadSize >= 17: do not cluster
465 const unsigned LoadSize = NumBytes / NumLoads;
466 const unsigned NumDWORDs = ((LoadSize + 3) / 4) * NumLoads;
467 return NumDWORDs <= 8;
468}
469
470// FIXME: This behaves strangely. If, for example, you have 32 load + stores,
471// the first 16 loads will be interleaved with the stores, and the next 16 will
472// be clustered as expected. It should really split into 2 16 store batches.
473//
474// Loads are clustered until this returns false, rather than trying to schedule
475// groups of stores. This also means we have to deal with saying different
476// address space loads should be clustered, and ones which might cause bank
477// conflicts.
478//
479// This might be deprecated so it might not be worth that much effort to fix.
480bool SIInstrInfo::shouldScheduleLoadsNear(SDNode *Load0, SDNode *Load1,
481 int64_t Offset0, int64_t Offset1,
482 unsigned NumLoads) const {
483 assert(Offset1 > Offset0 &&(static_cast <bool> (Offset1 > Offset0 && "Second offset should be larger than first offset!"
) ? void (0) : __assert_fail ("Offset1 > Offset0 && \"Second offset should be larger than first offset!\""
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 484, __extension__
__PRETTY_FUNCTION__))
484 "Second offset should be larger than first offset!")(static_cast <bool> (Offset1 > Offset0 && "Second offset should be larger than first offset!"
) ? void (0) : __assert_fail ("Offset1 > Offset0 && \"Second offset should be larger than first offset!\""
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 484, __extension__
__PRETTY_FUNCTION__))
;
485 // If we have less than 16 loads in a row, and the offsets are within 64
486 // bytes, then schedule together.
487
488 // A cacheline is 64 bytes (for global memory).
489 return (NumLoads <= 16 && (Offset1 - Offset0) < 64);
490}
491
492static void reportIllegalCopy(const SIInstrInfo *TII, MachineBasicBlock &MBB,
493 MachineBasicBlock::iterator MI,
494 const DebugLoc &DL, MCRegister DestReg,
495 MCRegister SrcReg, bool KillSrc,
496 const char *Msg = "illegal SGPR to VGPR copy") {
497 MachineFunction *MF = MBB.getParent();
498 DiagnosticInfoUnsupported IllegalCopy(MF->getFunction(), Msg, DL, DS_Error);
499 LLVMContext &C = MF->getFunction().getContext();
500 C.diagnose(IllegalCopy);
501
502 BuildMI(MBB, MI, DL, TII->get(AMDGPU::SI_ILLEGAL_COPY), DestReg)
503 .addReg(SrcReg, getKillRegState(KillSrc));
504}
505
506/// Handle copying from SGPR to AGPR, or from AGPR to AGPR. It is not possible
507/// to directly copy, so an intermediate VGPR needs to be used.
508static void indirectCopyToAGPR(const SIInstrInfo &TII,
509 MachineBasicBlock &MBB,
510 MachineBasicBlock::iterator MI,
511 const DebugLoc &DL, MCRegister DestReg,
512 MCRegister SrcReg, bool KillSrc,
513 RegScavenger &RS,
514 Register ImpDefSuperReg = Register(),
515 Register ImpUseSuperReg = Register()) {
516 const SIRegisterInfo &RI = TII.getRegisterInfo();
517
518 assert(AMDGPU::SReg_32RegClass.contains(SrcReg) ||(static_cast <bool> (AMDGPU::SReg_32RegClass.contains(SrcReg
) || AMDGPU::AGPR_32RegClass.contains(SrcReg)) ? void (0) : __assert_fail
("AMDGPU::SReg_32RegClass.contains(SrcReg) || AMDGPU::AGPR_32RegClass.contains(SrcReg)"
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 519, __extension__
__PRETTY_FUNCTION__))
519 AMDGPU::AGPR_32RegClass.contains(SrcReg))(static_cast <bool> (AMDGPU::SReg_32RegClass.contains(SrcReg
) || AMDGPU::AGPR_32RegClass.contains(SrcReg)) ? void (0) : __assert_fail
("AMDGPU::SReg_32RegClass.contains(SrcReg) || AMDGPU::AGPR_32RegClass.contains(SrcReg)"
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 519, __extension__
__PRETTY_FUNCTION__))
;
520
521 // First try to find defining accvgpr_write to avoid temporary registers.
522 for (auto Def = MI, E = MBB.begin(); Def != E; ) {
523 --Def;
524 if (!Def->definesRegister(SrcReg, &RI))
525 continue;
526 if (Def->getOpcode() != AMDGPU::V_ACCVGPR_WRITE_B32_e64)
527 break;
528
529 MachineOperand &DefOp = Def->getOperand(1);
530 assert(DefOp.isReg() || DefOp.isImm())(static_cast <bool> (DefOp.isReg() || DefOp.isImm()) ? void
(0) : __assert_fail ("DefOp.isReg() || DefOp.isImm()", "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp"
, 530, __extension__ __PRETTY_FUNCTION__))
;
531
532 if (DefOp.isReg()) {
533 // Check that register source operand if not clobbered before MI.
534 // Immediate operands are always safe to propagate.
535 bool SafeToPropagate = true;
536 for (auto I = Def; I != MI && SafeToPropagate; ++I)
537 if (I->modifiesRegister(DefOp.getReg(), &RI))
538 SafeToPropagate = false;
539
540 if (!SafeToPropagate)
541 break;
542
543 DefOp.setIsKill(false);
544 }
545
546 MachineInstrBuilder Builder =
547 BuildMI(MBB, MI, DL, TII.get(AMDGPU::V_ACCVGPR_WRITE_B32_e64), DestReg)
548 .add(DefOp);
549 if (ImpDefSuperReg)
550 Builder.addReg(ImpDefSuperReg, RegState::Define | RegState::Implicit);
551
552 if (ImpUseSuperReg) {
553 Builder.addReg(ImpUseSuperReg,
554 getKillRegState(KillSrc) | RegState::Implicit);
555 }
556
557 return;
558 }
559
560 RS.enterBasicBlock(MBB);
561 RS.forward(MI);
562
563 // Ideally we want to have three registers for a long reg_sequence copy
564 // to hide 2 waitstates between v_mov_b32 and accvgpr_write.
565 unsigned MaxVGPRs = RI.getRegPressureLimit(&AMDGPU::VGPR_32RegClass,
566 *MBB.getParent());
567
568 // Registers in the sequence are allocated contiguously so we can just
569 // use register number to pick one of three round-robin temps.
570 unsigned RegNo = DestReg % 3;
571 Register Tmp = RS.scavengeRegister(&AMDGPU::VGPR_32RegClass, 0);
572 if (!Tmp)
573 report_fatal_error("Cannot scavenge VGPR to copy to AGPR");
574 RS.setRegUsed(Tmp);
575
576 if (!TII.getSubtarget().hasGFX90AInsts()) {
577 // Only loop through if there are any free registers left, otherwise
578 // scavenger may report a fatal error without emergency spill slot
579 // or spill with the slot.
580 while (RegNo-- && RS.FindUnusedReg(&AMDGPU::VGPR_32RegClass)) {
581 Register Tmp2 = RS.scavengeRegister(&AMDGPU::VGPR_32RegClass, 0);
582 if (!Tmp2 || RI.getHWRegIndex(Tmp2) >= MaxVGPRs)
583 break;
584 Tmp = Tmp2;
585 RS.setRegUsed(Tmp);
586 }
587 }
588
589 // Insert copy to temporary VGPR.
590 unsigned TmpCopyOp = AMDGPU::V_MOV_B32_e32;
591 if (AMDGPU::AGPR_32RegClass.contains(SrcReg)) {
592 TmpCopyOp = AMDGPU::V_ACCVGPR_READ_B32_e64;
593 } else {
594 assert(AMDGPU::SReg_32RegClass.contains(SrcReg))(static_cast <bool> (AMDGPU::SReg_32RegClass.contains(SrcReg
)) ? void (0) : __assert_fail ("AMDGPU::SReg_32RegClass.contains(SrcReg)"
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 594, __extension__
__PRETTY_FUNCTION__))
;
595 }
596
597 MachineInstrBuilder UseBuilder = BuildMI(MBB, MI, DL, TII.get(TmpCopyOp), Tmp)
598 .addReg(SrcReg, getKillRegState(KillSrc));
599 if (ImpUseSuperReg) {
600 UseBuilder.addReg(ImpUseSuperReg,
601 getKillRegState(KillSrc) | RegState::Implicit);
602 }
603
604 MachineInstrBuilder DefBuilder
605 = BuildMI(MBB, MI, DL, TII.get(AMDGPU::V_ACCVGPR_WRITE_B32_e64), DestReg)
606 .addReg(Tmp, RegState::Kill);
607
608 if (ImpDefSuperReg)
609 DefBuilder.addReg(ImpDefSuperReg, RegState::Define | RegState::Implicit);
610}
611
612static void expandSGPRCopy(const SIInstrInfo &TII, MachineBasicBlock &MBB,
613 MachineBasicBlock::iterator MI, const DebugLoc &DL,
614 MCRegister DestReg, MCRegister SrcReg, bool KillSrc,
615 const TargetRegisterClass *RC, bool Forward) {
616 const SIRegisterInfo &RI = TII.getRegisterInfo();
617 ArrayRef<int16_t> BaseIndices = RI.getRegSplitParts(RC, 4);
618 MachineBasicBlock::iterator I = MI;
619 MachineInstr *FirstMI = nullptr, *LastMI = nullptr;
620
621 for (unsigned Idx = 0; Idx < BaseIndices.size(); ++Idx) {
622 int16_t SubIdx = BaseIndices[Idx];
623 Register Reg = RI.getSubReg(DestReg, SubIdx);
624 unsigned Opcode = AMDGPU::S_MOV_B32;
625
626 // Is SGPR aligned? If so try to combine with next.
627 Register Src = RI.getSubReg(SrcReg, SubIdx);
628 bool AlignedDest = ((Reg - AMDGPU::SGPR0) % 2) == 0;
629 bool AlignedSrc = ((Src - AMDGPU::SGPR0) % 2) == 0;
630 if (AlignedDest && AlignedSrc && (Idx + 1 < BaseIndices.size())) {
631 // Can use SGPR64 copy
632 unsigned Channel = RI.getChannelFromSubReg(SubIdx);
633 SubIdx = RI.getSubRegFromChannel(Channel, 2);
634 Opcode = AMDGPU::S_MOV_B64;
635 Idx++;
636 }
637
638 LastMI = BuildMI(MBB, I, DL, TII.get(Opcode), RI.getSubReg(DestReg, SubIdx))
639 .addReg(RI.getSubReg(SrcReg, SubIdx))
640 .addReg(SrcReg, RegState::Implicit);
641
642 if (!FirstMI)
643 FirstMI = LastMI;
644
645 if (!Forward)
646 I--;
647 }
648
649 assert(FirstMI && LastMI)(static_cast <bool> (FirstMI && LastMI) ? void (
0) : __assert_fail ("FirstMI && LastMI", "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp"
, 649, __extension__ __PRETTY_FUNCTION__))
;
650 if (!Forward)
651 std::swap(FirstMI, LastMI);
652
653 FirstMI->addOperand(
654 MachineOperand::CreateReg(DestReg, true /*IsDef*/, true /*IsImp*/));
655
656 if (KillSrc)
657 LastMI->addRegisterKilled(SrcReg, &RI);
658}
659
660void SIInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
661 MachineBasicBlock::iterator MI,
662 const DebugLoc &DL, MCRegister DestReg,
663 MCRegister SrcReg, bool KillSrc) const {
664 const TargetRegisterClass *RC = RI.getPhysRegClass(DestReg);
665
666 // FIXME: This is hack to resolve copies between 16 bit and 32 bit
667 // registers until all patterns are fixed.
668 if (Fix16BitCopies &&
669 ((RI.getRegSizeInBits(*RC) == 16) ^
670 (RI.getRegSizeInBits(*RI.getPhysRegClass(SrcReg)) == 16))) {
671 MCRegister &RegToFix = (RI.getRegSizeInBits(*RC) == 16) ? DestReg : SrcReg;
672 MCRegister Super = RI.get32BitRegister(RegToFix);
673 assert(RI.getSubReg(Super, AMDGPU::lo16) == RegToFix)(static_cast <bool> (RI.getSubReg(Super, AMDGPU::lo16) ==
RegToFix) ? void (0) : __assert_fail ("RI.getSubReg(Super, AMDGPU::lo16) == RegToFix"
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 673, __extension__
__PRETTY_FUNCTION__))
;
674 RegToFix = Super;
675
676 if (DestReg == SrcReg) {
677 // Insert empty bundle since ExpandPostRA expects an instruction here.
678 BuildMI(MBB, MI, DL, get(AMDGPU::BUNDLE));
679 return;
680 }
681
682 RC = RI.getPhysRegClass(DestReg);
683 }
684
685 if (RC == &AMDGPU::VGPR_32RegClass) {
686 assert(AMDGPU::VGPR_32RegClass.contains(SrcReg) ||(static_cast <bool> (AMDGPU::VGPR_32RegClass.contains(SrcReg
) || AMDGPU::SReg_32RegClass.contains(SrcReg) || AMDGPU::AGPR_32RegClass
.contains(SrcReg)) ? void (0) : __assert_fail ("AMDGPU::VGPR_32RegClass.contains(SrcReg) || AMDGPU::SReg_32RegClass.contains(SrcReg) || AMDGPU::AGPR_32RegClass.contains(SrcReg)"
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 688, __extension__
__PRETTY_FUNCTION__))
687 AMDGPU::SReg_32RegClass.contains(SrcReg) ||(static_cast <bool> (AMDGPU::VGPR_32RegClass.contains(SrcReg
) || AMDGPU::SReg_32RegClass.contains(SrcReg) || AMDGPU::AGPR_32RegClass
.contains(SrcReg)) ? void (0) : __assert_fail ("AMDGPU::VGPR_32RegClass.contains(SrcReg) || AMDGPU::SReg_32RegClass.contains(SrcReg) || AMDGPU::AGPR_32RegClass.contains(SrcReg)"
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 688, __extension__
__PRETTY_FUNCTION__))
688 AMDGPU::AGPR_32RegClass.contains(SrcReg))(static_cast <bool> (AMDGPU::VGPR_32RegClass.contains(SrcReg
) || AMDGPU::SReg_32RegClass.contains(SrcReg) || AMDGPU::AGPR_32RegClass
.contains(SrcReg)) ? void (0) : __assert_fail ("AMDGPU::VGPR_32RegClass.contains(SrcReg) || AMDGPU::SReg_32RegClass.contains(SrcReg) || AMDGPU::AGPR_32RegClass.contains(SrcReg)"
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 688, __extension__
__PRETTY_FUNCTION__))
;
689 unsigned Opc = AMDGPU::AGPR_32RegClass.contains(SrcReg) ?
690 AMDGPU::V_ACCVGPR_READ_B32_e64 : AMDGPU::V_MOV_B32_e32;
691 BuildMI(MBB, MI, DL, get(Opc), DestReg)
692 .addReg(SrcReg, getKillRegState(KillSrc));
693 return;
694 }
695
696 if (RC == &AMDGPU::SReg_32_XM0RegClass ||
697 RC == &AMDGPU::SReg_32RegClass) {
698 if (SrcReg == AMDGPU::SCC) {
699 BuildMI(MBB, MI, DL, get(AMDGPU::S_CSELECT_B32), DestReg)
700 .addImm(1)
701 .addImm(0);
702 return;
703 }
704
705 if (DestReg == AMDGPU::VCC_LO) {
706 if (AMDGPU::SReg_32RegClass.contains(SrcReg)) {
707 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), AMDGPU::VCC_LO)
708 .addReg(SrcReg, getKillRegState(KillSrc));
709 } else {
710 // FIXME: Hack until VReg_1 removed.
711 assert(AMDGPU::VGPR_32RegClass.contains(SrcReg))(static_cast <bool> (AMDGPU::VGPR_32RegClass.contains(SrcReg
)) ? void (0) : __assert_fail ("AMDGPU::VGPR_32RegClass.contains(SrcReg)"
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 711, __extension__
__PRETTY_FUNCTION__))
;
712 BuildMI(MBB, MI, DL, get(AMDGPU::V_CMP_NE_U32_e32))
713 .addImm(0)
714 .addReg(SrcReg, getKillRegState(KillSrc));
715 }
716
717 return;
718 }
719
720 if (!AMDGPU::SReg_32RegClass.contains(SrcReg)) {
721 reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc);
722 return;
723 }
724
725 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), DestReg)
726 .addReg(SrcReg, getKillRegState(KillSrc));
727 return;
728 }
729
730 if (RC == &AMDGPU::SReg_64RegClass) {
731 if (SrcReg == AMDGPU::SCC) {
732 BuildMI(MBB, MI, DL, get(AMDGPU::S_CSELECT_B64), DestReg)
733 .addImm(1)
734 .addImm(0);
735 return;
736 }
737
738 if (DestReg == AMDGPU::VCC) {
739 if (AMDGPU::SReg_64RegClass.contains(SrcReg)) {
740 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), AMDGPU::VCC)
741 .addReg(SrcReg, getKillRegState(KillSrc));
742 } else {
743 // FIXME: Hack until VReg_1 removed.
744 assert(AMDGPU::VGPR_32RegClass.contains(SrcReg))(static_cast <bool> (AMDGPU::VGPR_32RegClass.contains(SrcReg
)) ? void (0) : __assert_fail ("AMDGPU::VGPR_32RegClass.contains(SrcReg)"
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 744, __extension__
__PRETTY_FUNCTION__))
;
745 BuildMI(MBB, MI, DL, get(AMDGPU::V_CMP_NE_U32_e32))
746 .addImm(0)
747 .addReg(SrcReg, getKillRegState(KillSrc));
748 }
749
750 return;
751 }
752
753 if (!AMDGPU::SReg_64RegClass.contains(SrcReg)) {
754 reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc);
755 return;
756 }
757
758 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), DestReg)
759 .addReg(SrcReg, getKillRegState(KillSrc));
760 return;
761 }
762
763 if (DestReg == AMDGPU::SCC) {
764 // Copying 64-bit or 32-bit sources to SCC barely makes sense,
765 // but SelectionDAG emits such copies for i1 sources.
766 if (AMDGPU::SReg_64RegClass.contains(SrcReg)) {
767 // This copy can only be produced by patterns
768 // with explicit SCC, which are known to be enabled
769 // only for subtargets with S_CMP_LG_U64 present.
770 assert(ST.hasScalarCompareEq64())(static_cast <bool> (ST.hasScalarCompareEq64()) ? void (
0) : __assert_fail ("ST.hasScalarCompareEq64()", "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp"
, 770, __extension__ __PRETTY_FUNCTION__))
;
771 BuildMI(MBB, MI, DL, get(AMDGPU::S_CMP_LG_U64))
772 .addReg(SrcReg, getKillRegState(KillSrc))
773 .addImm(0);
774 } else {
775 assert(AMDGPU::SReg_32RegClass.contains(SrcReg))(static_cast <bool> (AMDGPU::SReg_32RegClass.contains(SrcReg
)) ? void (0) : __assert_fail ("AMDGPU::SReg_32RegClass.contains(SrcReg)"
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 775, __extension__
__PRETTY_FUNCTION__))
;
776 BuildMI(MBB, MI, DL, get(AMDGPU::S_CMP_LG_U32))
777 .addReg(SrcReg, getKillRegState(KillSrc))
778 .addImm(0);
779 }
780
781 return;
782 }
783
784 if (RC == &AMDGPU::AGPR_32RegClass) {
785 if (AMDGPU::VGPR_32RegClass.contains(SrcReg)) {
786 BuildMI(MBB, MI, DL, get(AMDGPU::V_ACCVGPR_WRITE_B32_e64), DestReg)
787 .addReg(SrcReg, getKillRegState(KillSrc));
788 return;
789 }
790
791 if (AMDGPU::AGPR_32RegClass.contains(SrcReg) && ST.hasGFX90AInsts()) {
792 BuildMI(MBB, MI, DL, get(AMDGPU::V_ACCVGPR_MOV_B32), DestReg)
793 .addReg(SrcReg, getKillRegState(KillSrc));
794 return;
795 }
796
797 // FIXME: Pass should maintain scavenger to avoid scan through the block on
798 // every AGPR spill.
799 RegScavenger RS;
800 indirectCopyToAGPR(*this, MBB, MI, DL, DestReg, SrcReg, KillSrc, RS);
801 return;
802 }
803
804 const unsigned Size = RI.getRegSizeInBits(*RC);
805 if (Size == 16) {
806 assert(AMDGPU::VGPR_LO16RegClass.contains(SrcReg) ||(static_cast <bool> (AMDGPU::VGPR_LO16RegClass.contains
(SrcReg) || AMDGPU::VGPR_HI16RegClass.contains(SrcReg) || AMDGPU
::SReg_LO16RegClass.contains(SrcReg) || AMDGPU::AGPR_LO16RegClass
.contains(SrcReg)) ? void (0) : __assert_fail ("AMDGPU::VGPR_LO16RegClass.contains(SrcReg) || AMDGPU::VGPR_HI16RegClass.contains(SrcReg) || AMDGPU::SReg_LO16RegClass.contains(SrcReg) || AMDGPU::AGPR_LO16RegClass.contains(SrcReg)"
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 809, __extension__
__PRETTY_FUNCTION__))
807 AMDGPU::VGPR_HI16RegClass.contains(SrcReg) ||(static_cast <bool> (AMDGPU::VGPR_LO16RegClass.contains
(SrcReg) || AMDGPU::VGPR_HI16RegClass.contains(SrcReg) || AMDGPU
::SReg_LO16RegClass.contains(SrcReg) || AMDGPU::AGPR_LO16RegClass
.contains(SrcReg)) ? void (0) : __assert_fail ("AMDGPU::VGPR_LO16RegClass.contains(SrcReg) || AMDGPU::VGPR_HI16RegClass.contains(SrcReg) || AMDGPU::SReg_LO16RegClass.contains(SrcReg) || AMDGPU::AGPR_LO16RegClass.contains(SrcReg)"
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 809, __extension__
__PRETTY_FUNCTION__))
808 AMDGPU::SReg_LO16RegClass.contains(SrcReg) ||(static_cast <bool> (AMDGPU::VGPR_LO16RegClass.contains
(SrcReg) || AMDGPU::VGPR_HI16RegClass.contains(SrcReg) || AMDGPU
::SReg_LO16RegClass.contains(SrcReg) || AMDGPU::AGPR_LO16RegClass
.contains(SrcReg)) ? void (0) : __assert_fail ("AMDGPU::VGPR_LO16RegClass.contains(SrcReg) || AMDGPU::VGPR_HI16RegClass.contains(SrcReg) || AMDGPU::SReg_LO16RegClass.contains(SrcReg) || AMDGPU::AGPR_LO16RegClass.contains(SrcReg)"
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 809, __extension__
__PRETTY_FUNCTION__))
809 AMDGPU::AGPR_LO16RegClass.contains(SrcReg))(static_cast <bool> (AMDGPU::VGPR_LO16RegClass.contains
(SrcReg) || AMDGPU::VGPR_HI16RegClass.contains(SrcReg) || AMDGPU
::SReg_LO16RegClass.contains(SrcReg) || AMDGPU::AGPR_LO16RegClass
.contains(SrcReg)) ? void (0) : __assert_fail ("AMDGPU::VGPR_LO16RegClass.contains(SrcReg) || AMDGPU::VGPR_HI16RegClass.contains(SrcReg) || AMDGPU::SReg_LO16RegClass.contains(SrcReg) || AMDGPU::AGPR_LO16RegClass.contains(SrcReg)"
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 809, __extension__
__PRETTY_FUNCTION__))
;
810
811 bool IsSGPRDst = AMDGPU::SReg_LO16RegClass.contains(DestReg);
812 bool IsSGPRSrc = AMDGPU::SReg_LO16RegClass.contains(SrcReg);
813 bool IsAGPRDst = AMDGPU::AGPR_LO16RegClass.contains(DestReg);
814 bool IsAGPRSrc = AMDGPU::AGPR_LO16RegClass.contains(SrcReg);
815 bool DstLow = AMDGPU::VGPR_LO16RegClass.contains(DestReg) ||
816 AMDGPU::SReg_LO16RegClass.contains(DestReg) ||
817 AMDGPU::AGPR_LO16RegClass.contains(DestReg);
818 bool SrcLow = AMDGPU::VGPR_LO16RegClass.contains(SrcReg) ||
819 AMDGPU::SReg_LO16RegClass.contains(SrcReg) ||
820 AMDGPU::AGPR_LO16RegClass.contains(SrcReg);
821 MCRegister NewDestReg = RI.get32BitRegister(DestReg);
822 MCRegister NewSrcReg = RI.get32BitRegister(SrcReg);
823
824 if (IsSGPRDst) {
825 if (!IsSGPRSrc) {
826 reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc);
827 return;
828 }
829
830 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), NewDestReg)
831 .addReg(NewSrcReg, getKillRegState(KillSrc));
832 return;
833 }
834
835 if (IsAGPRDst || IsAGPRSrc) {
836 if (!DstLow || !SrcLow) {
837 reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc,
838 "Cannot use hi16 subreg with an AGPR!");
839 }
840
841 copyPhysReg(MBB, MI, DL, NewDestReg, NewSrcReg, KillSrc);
842 return;
843 }
844
845 if (IsSGPRSrc && !ST.hasSDWAScalar()) {
846 if (!DstLow || !SrcLow) {
847 reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc,
848 "Cannot use hi16 subreg on VI!");
849 }
850
851 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), NewDestReg)
852 .addReg(NewSrcReg, getKillRegState(KillSrc));
853 return;
854 }
855
856 auto MIB = BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_sdwa), NewDestReg)
857 .addImm(0) // src0_modifiers
858 .addReg(NewSrcReg)
859 .addImm(0) // clamp
860 .addImm(DstLow ? AMDGPU::SDWA::SdwaSel::WORD_0
861 : AMDGPU::SDWA::SdwaSel::WORD_1)
862 .addImm(AMDGPU::SDWA::DstUnused::UNUSED_PRESERVE)
863 .addImm(SrcLow ? AMDGPU::SDWA::SdwaSel::WORD_0
864 : AMDGPU::SDWA::SdwaSel::WORD_1)
865 .addReg(NewDestReg, RegState::Implicit | RegState::Undef);
866 // First implicit operand is $exec.
867 MIB->tieOperands(0, MIB->getNumOperands() - 1);
868 return;
869 }
870
871 const TargetRegisterClass *SrcRC = RI.getPhysRegClass(SrcReg);
872 if (RC == RI.getVGPR64Class() && (SrcRC == RC || RI.isSGPRClass(SrcRC))) {
873 if (ST.hasPackedFP32Ops()) {
874 BuildMI(MBB, MI, DL, get(AMDGPU::V_PK_MOV_B32), DestReg)
875 .addImm(SISrcMods::OP_SEL_1)
876 .addReg(SrcReg)
877 .addImm(SISrcMods::OP_SEL_0 | SISrcMods::OP_SEL_1)
878 .addReg(SrcReg)
879 .addImm(0) // op_sel_lo
880 .addImm(0) // op_sel_hi
881 .addImm(0) // neg_lo
882 .addImm(0) // neg_hi
883 .addImm(0) // clamp
884 .addReg(SrcReg, getKillRegState(KillSrc) | RegState::Implicit);
885 return;
886 }
887 }
888
889 const bool Forward = RI.getHWRegIndex(DestReg) <= RI.getHWRegIndex(SrcReg);
890 if (RI.isSGPRClass(RC)) {
891 if (!RI.isSGPRClass(SrcRC)) {
892 reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc);
893 return;
894 }
895 expandSGPRCopy(*this, MBB, MI, DL, DestReg, SrcReg, KillSrc, RC, Forward);
896 return;
897 }
898
899 unsigned EltSize = 4;
900 unsigned Opcode = AMDGPU::V_MOV_B32_e32;
901 if (RI.isAGPRClass(RC)) {
902 if (ST.hasGFX90AInsts() && RI.isAGPRClass(SrcRC))
903 Opcode = AMDGPU::V_ACCVGPR_MOV_B32;
904 else if (RI.hasVGPRs(SrcRC))
905 Opcode = AMDGPU::V_ACCVGPR_WRITE_B32_e64;
906 else
907 Opcode = AMDGPU::INSTRUCTION_LIST_END;
908 } else if (RI.hasVGPRs(RC) && RI.isAGPRClass(SrcRC)) {
909 Opcode = AMDGPU::V_ACCVGPR_READ_B32_e64;
910 } else if ((Size % 64 == 0) && RI.hasVGPRs(RC) &&
911 (RI.isProperlyAlignedRC(*RC) &&
912 (SrcRC == RC || RI.isSGPRClass(SrcRC)))) {
913 // TODO: In 96-bit case, could do a 64-bit mov and then a 32-bit mov.
914 if (ST.hasPackedFP32Ops()) {
915 Opcode = AMDGPU::V_PK_MOV_B32;
916 EltSize = 8;
917 }
918 }
919
920 // For the cases where we need an intermediate instruction/temporary register
921 // (destination is an AGPR), we need a scavenger.
922 //
923 // FIXME: The pass should maintain this for us so we don't have to re-scan the
924 // whole block for every handled copy.
925 std::unique_ptr<RegScavenger> RS;
926 if (Opcode == AMDGPU::INSTRUCTION_LIST_END)
927 RS.reset(new RegScavenger());
928
929 ArrayRef<int16_t> SubIndices = RI.getRegSplitParts(RC, EltSize);
930
931 // If there is an overlap, we can't kill the super-register on the last
932 // instruction, since it will also kill the components made live by this def.
933 const bool CanKillSuperReg = KillSrc && !RI.regsOverlap(SrcReg, DestReg);
934
935 for (unsigned Idx = 0; Idx < SubIndices.size(); ++Idx) {
936 unsigned SubIdx;
937 if (Forward)
938 SubIdx = SubIndices[Idx];
939 else
940 SubIdx = SubIndices[SubIndices.size() - Idx - 1];
941
942 bool UseKill = CanKillSuperReg && Idx == SubIndices.size() - 1;
943
944 if (Opcode == AMDGPU::INSTRUCTION_LIST_END) {
945 Register ImpDefSuper = Idx == 0 ? Register(DestReg) : Register();
946 Register ImpUseSuper = SrcReg;
947 indirectCopyToAGPR(*this, MBB, MI, DL, RI.getSubReg(DestReg, SubIdx),
948 RI.getSubReg(SrcReg, SubIdx), UseKill, *RS,
949 ImpDefSuper, ImpUseSuper);
950 } else if (Opcode == AMDGPU::V_PK_MOV_B32) {
951 Register DstSubReg = RI.getSubReg(DestReg, SubIdx);
952 Register SrcSubReg = RI.getSubReg(SrcReg, SubIdx);
953 MachineInstrBuilder MIB =
954 BuildMI(MBB, MI, DL, get(AMDGPU::V_PK_MOV_B32), DstSubReg)
955 .addImm(SISrcMods::OP_SEL_1)
956 .addReg(SrcSubReg)
957 .addImm(SISrcMods::OP_SEL_0 | SISrcMods::OP_SEL_1)
958 .addReg(SrcSubReg)
959 .addImm(0) // op_sel_lo
960 .addImm(0) // op_sel_hi
961 .addImm(0) // neg_lo
962 .addImm(0) // neg_hi
963 .addImm(0) // clamp
964 .addReg(SrcReg, getKillRegState(UseKill) | RegState::Implicit);
965 if (Idx == 0)
966 MIB.addReg(DestReg, RegState::Define | RegState::Implicit);
967 } else {
968 MachineInstrBuilder Builder =
969 BuildMI(MBB, MI, DL, get(Opcode), RI.getSubReg(DestReg, SubIdx))
970 .addReg(RI.getSubReg(SrcReg, SubIdx));
971 if (Idx == 0)
972 Builder.addReg(DestReg, RegState::Define | RegState::Implicit);
973
974 Builder.addReg(SrcReg, getKillRegState(UseKill) | RegState::Implicit);
975 }
976 }
977}
978
979int SIInstrInfo::commuteOpcode(unsigned Opcode) const {
980 int NewOpc;
981
982 // Try to map original to commuted opcode
983 NewOpc = AMDGPU::getCommuteRev(Opcode);
984 if (NewOpc != -1)
985 // Check if the commuted (REV) opcode exists on the target.
986 return pseudoToMCOpcode(NewOpc) != -1 ? NewOpc : -1;
987
988 // Try to map commuted to original opcode
989 NewOpc = AMDGPU::getCommuteOrig(Opcode);
990 if (NewOpc != -1)
991 // Check if the original (non-REV) opcode exists on the target.
992 return pseudoToMCOpcode(NewOpc) != -1 ? NewOpc : -1;
993
994 return Opcode;
995}
996
997void SIInstrInfo::materializeImmediate(MachineBasicBlock &MBB,
998 MachineBasicBlock::iterator MI,
999 const DebugLoc &DL, unsigned DestReg,
1000 int64_t Value) const {
1001 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
1002 const TargetRegisterClass *RegClass = MRI.getRegClass(DestReg);
1003 if (RegClass == &AMDGPU::SReg_32RegClass ||
1004 RegClass == &AMDGPU::SGPR_32RegClass ||
1005 RegClass == &AMDGPU::SReg_32_XM0RegClass ||
1006 RegClass == &AMDGPU::SReg_32_XM0_XEXECRegClass) {
1007 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), DestReg)
1008 .addImm(Value);
1009 return;
1010 }
1011
1012 if (RegClass == &AMDGPU::SReg_64RegClass ||
1013 RegClass == &AMDGPU::SGPR_64RegClass ||
1014 RegClass == &AMDGPU::SReg_64_XEXECRegClass) {
1015 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), DestReg)
1016 .addImm(Value);
1017 return;
1018 }
1019
1020 if (RegClass == &AMDGPU::VGPR_32RegClass) {
1021 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DestReg)
1022 .addImm(Value);
1023 return;
1024 }
1025 if (RegClass->hasSuperClassEq(&AMDGPU::VReg_64RegClass)) {
1026 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B64_PSEUDO), DestReg)
1027 .addImm(Value);
1028 return;
1029 }
1030
1031 unsigned EltSize = 4;
1032 unsigned Opcode = AMDGPU::V_MOV_B32_e32;
1033 if (RI.isSGPRClass(RegClass)) {
1034 if (RI.getRegSizeInBits(*RegClass) > 32) {
1035 Opcode = AMDGPU::S_MOV_B64;
1036 EltSize = 8;
1037 } else {
1038 Opcode = AMDGPU::S_MOV_B32;
1039 EltSize = 4;
1040 }
1041 }
1042
1043 ArrayRef<int16_t> SubIndices = RI.getRegSplitParts(RegClass, EltSize);
1044 for (unsigned Idx = 0; Idx < SubIndices.size(); ++Idx) {
1045 int64_t IdxValue = Idx == 0 ? Value : 0;
1046
1047 MachineInstrBuilder Builder = BuildMI(MBB, MI, DL,
1048 get(Opcode), RI.getSubReg(DestReg, SubIndices[Idx]));
1049 Builder.addImm(IdxValue);
1050 }
1051}
1052
1053const TargetRegisterClass *
1054SIInstrInfo::getPreferredSelectRegClass(unsigned Size) const {
1055 return &AMDGPU::VGPR_32RegClass;
1056}
1057
1058void SIInstrInfo::insertVectorSelect(MachineBasicBlock &MBB,
1059 MachineBasicBlock::iterator I,
1060 const DebugLoc &DL, Register DstReg,
1061 ArrayRef<MachineOperand> Cond,
1062 Register TrueReg,
1063 Register FalseReg) const {
1064 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
1065 const TargetRegisterClass *BoolXExecRC =
1066 RI.getRegClass(AMDGPU::SReg_1_XEXECRegClassID);
1067 assert(MRI.getRegClass(DstReg) == &AMDGPU::VGPR_32RegClass &&(static_cast <bool> (MRI.getRegClass(DstReg) == &AMDGPU
::VGPR_32RegClass && "Not a VGPR32 reg") ? void (0) :
__assert_fail ("MRI.getRegClass(DstReg) == &AMDGPU::VGPR_32RegClass && \"Not a VGPR32 reg\""
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 1068, __extension__
__PRETTY_FUNCTION__))
1068 "Not a VGPR32 reg")(static_cast <bool> (MRI.getRegClass(DstReg) == &AMDGPU
::VGPR_32RegClass && "Not a VGPR32 reg") ? void (0) :
__assert_fail ("MRI.getRegClass(DstReg) == &AMDGPU::VGPR_32RegClass && \"Not a VGPR32 reg\""
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 1068, __extension__
__PRETTY_FUNCTION__))
;
1069
1070 if (Cond.size() == 1) {
1071 Register SReg = MRI.createVirtualRegister(BoolXExecRC);
1072 BuildMI(MBB, I, DL, get(AMDGPU::COPY), SReg)
1073 .add(Cond[0]);
1074 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
1075 .addImm(0)
1076 .addReg(FalseReg)
1077 .addImm(0)
1078 .addReg(TrueReg)
1079 .addReg(SReg);
1080 } else if (Cond.size() == 2) {
1081 assert(Cond[0].isImm() && "Cond[0] is not an immediate")(static_cast <bool> (Cond[0].isImm() && "Cond[0] is not an immediate"
) ? void (0) : __assert_fail ("Cond[0].isImm() && \"Cond[0] is not an immediate\""
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 1081, __extension__
__PRETTY_FUNCTION__))
;
1082 switch (Cond[0].getImm()) {
1083 case SIInstrInfo::SCC_TRUE: {
1084 Register SReg = MRI.createVirtualRegister(BoolXExecRC);
1085 BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_CSELECT_B32
1086 : AMDGPU::S_CSELECT_B64), SReg)
1087 .addImm(1)
1088 .addImm(0);
1089 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
1090 .addImm(0)
1091 .addReg(FalseReg)
1092 .addImm(0)
1093 .addReg(TrueReg)
1094 .addReg(SReg);
1095 break;
1096 }
1097 case SIInstrInfo::SCC_FALSE: {
1098 Register SReg = MRI.createVirtualRegister(BoolXExecRC);
1099 BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_CSELECT_B32
1100 : AMDGPU::S_CSELECT_B64), SReg)
1101 .addImm(0)
1102 .addImm(1);
1103 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
1104 .addImm(0)
1105 .addReg(FalseReg)
1106 .addImm(0)
1107 .addReg(TrueReg)
1108 .addReg(SReg);
1109 break;
1110 }
1111 case SIInstrInfo::VCCNZ: {
1112 MachineOperand RegOp = Cond[1];
1113 RegOp.setImplicit(false);
1114 Register SReg = MRI.createVirtualRegister(BoolXExecRC);
1115 BuildMI(MBB, I, DL, get(AMDGPU::COPY), SReg)
1116 .add(RegOp);
1117 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
1118 .addImm(0)
1119 .addReg(FalseReg)
1120 .addImm(0)
1121 .addReg(TrueReg)
1122 .addReg(SReg);
1123 break;
1124 }
1125 case SIInstrInfo::VCCZ: {
1126 MachineOperand RegOp = Cond[1];
1127 RegOp.setImplicit(false);
1128 Register SReg = MRI.createVirtualRegister(BoolXExecRC);
1129 BuildMI(MBB, I, DL, get(AMDGPU::COPY), SReg)
1130 .add(RegOp);
1131 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
1132 .addImm(0)
1133 .addReg(TrueReg)
1134 .addImm(0)
1135 .addReg(FalseReg)
1136 .addReg(SReg);
1137 break;
1138 }
1139 case SIInstrInfo::EXECNZ: {
1140 Register SReg = MRI.createVirtualRegister(BoolXExecRC);
1141 Register SReg2 = MRI.createVirtualRegister(RI.getBoolRC());
1142 BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_OR_SAVEEXEC_B32
1143 : AMDGPU::S_OR_SAVEEXEC_B64), SReg2)
1144 .addImm(0);
1145 BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_CSELECT_B32
1146 : AMDGPU::S_CSELECT_B64), SReg)
1147 .addImm(1)
1148 .addImm(0);
1149 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
1150 .addImm(0)
1151 .addReg(FalseReg)
1152 .addImm(0)
1153 .addReg(TrueReg)
1154 .addReg(SReg);
1155 break;
1156 }
1157 case SIInstrInfo::EXECZ: {
1158 Register SReg = MRI.createVirtualRegister(BoolXExecRC);
1159 Register SReg2 = MRI.createVirtualRegister(RI.getBoolRC());
1160 BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_OR_SAVEEXEC_B32
1161 : AMDGPU::S_OR_SAVEEXEC_B64), SReg2)
1162 .addImm(0);
1163 BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_CSELECT_B32
1164 : AMDGPU::S_CSELECT_B64), SReg)
1165 .addImm(0)
1166 .addImm(1);
1167 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
1168 .addImm(0)
1169 .addReg(FalseReg)
1170 .addImm(0)
1171 .addReg(TrueReg)
1172 .addReg(SReg);
1173 llvm_unreachable("Unhandled branch predicate EXECZ")::llvm::llvm_unreachable_internal("Unhandled branch predicate EXECZ"
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 1173)
;
1174 break;
1175 }
1176 default:
1177 llvm_unreachable("invalid branch predicate")::llvm::llvm_unreachable_internal("invalid branch predicate",
"llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 1177)
;
1178 }
1179 } else {
1180 llvm_unreachable("Can only handle Cond size 1 or 2")::llvm::llvm_unreachable_internal("Can only handle Cond size 1 or 2"
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 1180)
;
1181 }
1182}
1183
1184Register SIInstrInfo::insertEQ(MachineBasicBlock *MBB,
1185 MachineBasicBlock::iterator I,
1186 const DebugLoc &DL,
1187 Register SrcReg, int Value) const {
1188 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
1189 Register Reg = MRI.createVirtualRegister(RI.getBoolRC());
1190 BuildMI(*MBB, I, DL, get(AMDGPU::V_CMP_EQ_I32_e64), Reg)
1191 .addImm(Value)
1192 .addReg(SrcReg);
1193
1194 return Reg;
1195}
1196
1197Register SIInstrInfo::insertNE(MachineBasicBlock *MBB,
1198 MachineBasicBlock::iterator I,
1199 const DebugLoc &DL,
1200 Register SrcReg, int Value) const {
1201 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
1202 Register Reg = MRI.createVirtualRegister(RI.getBoolRC());
1203 BuildMI(*MBB, I, DL, get(AMDGPU::V_CMP_NE_I32_e64), Reg)
1204 .addImm(Value)
1205 .addReg(SrcReg);
1206
1207 return Reg;
1208}
1209
1210unsigned SIInstrInfo::getMovOpcode(const TargetRegisterClass *DstRC) const {
1211
1212 if (RI.isAGPRClass(DstRC))
1213 return AMDGPU::COPY;
1214 if (RI.getRegSizeInBits(*DstRC) == 32) {
1215 return RI.isSGPRClass(DstRC) ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
1216 } else if (RI.getRegSizeInBits(*DstRC) == 64 && RI.isSGPRClass(DstRC)) {
1217 return AMDGPU::S_MOV_B64;
1218 } else if (RI.getRegSizeInBits(*DstRC) == 64 && !RI.isSGPRClass(DstRC)) {
1219 return AMDGPU::V_MOV_B64_PSEUDO;
1220 }
1221 return AMDGPU::COPY;
1222}
1223
1224const MCInstrDesc &
1225SIInstrInfo::getIndirectGPRIDXPseudo(unsigned VecSize,
1226 bool IsIndirectSrc) const {
1227 if (IsIndirectSrc) {
1228 if (VecSize <= 32) // 4 bytes
1229 return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V1);
1230 if (VecSize <= 64) // 8 bytes
1231 return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V2);
1232 if (VecSize <= 96) // 12 bytes
1233 return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V3);
1234 if (VecSize <= 128) // 16 bytes
1235 return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V4);
1236 if (VecSize <= 160) // 20 bytes
1237 return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V5);
1238 if (VecSize <= 256) // 32 bytes
1239 return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V8);
1240 if (VecSize <= 512) // 64 bytes
1241 return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V16);
1242 if (VecSize <= 1024) // 128 bytes
1243 return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V32);
1244
1245 llvm_unreachable("unsupported size for IndirectRegReadGPRIDX pseudos")::llvm::llvm_unreachable_internal("unsupported size for IndirectRegReadGPRIDX pseudos"
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 1245)
;
1246 }
1247
1248 if (VecSize <= 32) // 4 bytes
1249 return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V1);
1250 if (VecSize <= 64) // 8 bytes
1251 return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V2);
1252 if (VecSize <= 96) // 12 bytes
1253 return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V3);
1254 if (VecSize <= 128) // 16 bytes
1255 return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V4);
1256 if (VecSize <= 160) // 20 bytes
1257 return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V5);
1258 if (VecSize <= 256) // 32 bytes
1259 return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V8);
1260 if (VecSize <= 512) // 64 bytes
1261 return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V16);
1262 if (VecSize <= 1024) // 128 bytes
1263 return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V32);
1264
1265 llvm_unreachable("unsupported size for IndirectRegWriteGPRIDX pseudos")::llvm::llvm_unreachable_internal("unsupported size for IndirectRegWriteGPRIDX pseudos"
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 1265)
;
1266}
1267
1268static unsigned getIndirectVGPRWriteMovRelPseudoOpc(unsigned VecSize) {
1269 if (VecSize <= 32) // 4 bytes
1270 return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V1;
1271 if (VecSize <= 64) // 8 bytes
1272 return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V2;
1273 if (VecSize <= 96) // 12 bytes
1274 return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V3;
1275 if (VecSize <= 128) // 16 bytes
1276 return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V4;
1277 if (VecSize <= 160) // 20 bytes
1278 return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V5;
1279 if (VecSize <= 256) // 32 bytes
1280 return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V8;
1281 if (VecSize <= 512) // 64 bytes
1282 return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V16;
1283 if (VecSize <= 1024) // 128 bytes
1284 return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V32;
1285
1286 llvm_unreachable("unsupported size for IndirectRegWrite pseudos")::llvm::llvm_unreachable_internal("unsupported size for IndirectRegWrite pseudos"
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 1286)
;
1287}
1288
1289static unsigned getIndirectSGPRWriteMovRelPseudo32(unsigned VecSize) {
1290 if (VecSize <= 32) // 4 bytes
1291 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V1;
1292 if (VecSize <= 64) // 8 bytes
1293 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V2;
1294 if (VecSize <= 96) // 12 bytes
1295 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V3;
1296 if (VecSize <= 128) // 16 bytes
1297 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V4;
1298 if (VecSize <= 160) // 20 bytes
1299 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V5;
1300 if (VecSize <= 256) // 32 bytes
1301 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V8;
1302 if (VecSize <= 512) // 64 bytes
1303 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V16;
1304 if (VecSize <= 1024) // 128 bytes
1305 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V32;
1306
1307 llvm_unreachable("unsupported size for IndirectRegWrite pseudos")::llvm::llvm_unreachable_internal("unsupported size for IndirectRegWrite pseudos"
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 1307)
;
1308}
1309
1310static unsigned getIndirectSGPRWriteMovRelPseudo64(unsigned VecSize) {
1311 if (VecSize <= 64) // 8 bytes
1312 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V1;
1313 if (VecSize <= 128) // 16 bytes
1314 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V2;
1315 if (VecSize <= 256) // 32 bytes
1316 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V4;
1317 if (VecSize <= 512) // 64 bytes
1318 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V8;
1319 if (VecSize <= 1024) // 128 bytes
1320 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V16;
1321
1322 llvm_unreachable("unsupported size for IndirectRegWrite pseudos")::llvm::llvm_unreachable_internal("unsupported size for IndirectRegWrite pseudos"
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 1322)
;
1323}
1324
1325const MCInstrDesc &
1326SIInstrInfo::getIndirectRegWriteMovRelPseudo(unsigned VecSize, unsigned EltSize,
1327 bool IsSGPR) const {
1328 if (IsSGPR) {
1329 switch (EltSize) {
1330 case 32:
1331 return get(getIndirectSGPRWriteMovRelPseudo32(VecSize));
1332 case 64:
1333 return get(getIndirectSGPRWriteMovRelPseudo64(VecSize));
1334 default:
1335 llvm_unreachable("invalid reg indexing elt size")::llvm::llvm_unreachable_internal("invalid reg indexing elt size"
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 1335)
;
1336 }
1337 }
1338
1339 assert(EltSize == 32 && "invalid reg indexing elt size")(static_cast <bool> (EltSize == 32 && "invalid reg indexing elt size"
) ? void (0) : __assert_fail ("EltSize == 32 && \"invalid reg indexing elt size\""
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 1339, __extension__
__PRETTY_FUNCTION__))
;
1340 return get(getIndirectVGPRWriteMovRelPseudoOpc(VecSize));
1341}
1342
1343static unsigned getSGPRSpillSaveOpcode(unsigned Size) {
1344 switch (Size) {
1345 case 4:
1346 return AMDGPU::SI_SPILL_S32_SAVE;
1347 case 8:
1348 return AMDGPU::SI_SPILL_S64_SAVE;
1349 case 12:
1350 return AMDGPU::SI_SPILL_S96_SAVE;
1351 case 16:
1352 return AMDGPU::SI_SPILL_S128_SAVE;
1353 case 20:
1354 return AMDGPU::SI_SPILL_S160_SAVE;
1355 case 24:
1356 return AMDGPU::SI_SPILL_S192_SAVE;
1357 case 28:
1358 return AMDGPU::SI_SPILL_S224_SAVE;
1359 case 32:
1360 return AMDGPU::SI_SPILL_S256_SAVE;
1361 case 64:
1362 return AMDGPU::SI_SPILL_S512_SAVE;
1363 case 128:
1364 return AMDGPU::SI_SPILL_S1024_SAVE;
1365 default:
1366 llvm_unreachable("unknown register size")::llvm::llvm_unreachable_internal("unknown register size", "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp"
, 1366)
;
1367 }
1368}
1369
1370static unsigned getVGPRSpillSaveOpcode(unsigned Size) {
1371 switch (Size) {
1372 case 4:
1373 return AMDGPU::SI_SPILL_V32_SAVE;
1374 case 8:
1375 return AMDGPU::SI_SPILL_V64_SAVE;
1376 case 12:
1377 return AMDGPU::SI_SPILL_V96_SAVE;
1378 case 16:
1379 return AMDGPU::SI_SPILL_V128_SAVE;
1380 case 20:
1381 return AMDGPU::SI_SPILL_V160_SAVE;
1382 case 24:
1383 return AMDGPU::SI_SPILL_V192_SAVE;
1384 case 28:
1385 return AMDGPU::SI_SPILL_V224_SAVE;
1386 case 32:
1387 return AMDGPU::SI_SPILL_V256_SAVE;
1388 case 64:
1389 return AMDGPU::SI_SPILL_V512_SAVE;
1390 case 128:
1391 return AMDGPU::SI_SPILL_V1024_SAVE;
1392 default:
1393 llvm_unreachable("unknown register size")::llvm::llvm_unreachable_internal("unknown register size", "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp"
, 1393)
;
1394 }
1395}
1396
1397static unsigned getAGPRSpillSaveOpcode(unsigned Size) {
1398 switch (Size) {
1399 case 4:
1400 return AMDGPU::SI_SPILL_A32_SAVE;
1401 case 8:
1402 return AMDGPU::SI_SPILL_A64_SAVE;
1403 case 12:
1404 return AMDGPU::SI_SPILL_A96_SAVE;
1405 case 16:
1406 return AMDGPU::SI_SPILL_A128_SAVE;
1407 case 20:
1408 return AMDGPU::SI_SPILL_A160_SAVE;
1409 case 24:
1410 return AMDGPU::SI_SPILL_A192_SAVE;
1411 case 28:
1412 return AMDGPU::SI_SPILL_A224_SAVE;
1413 case 32:
1414 return AMDGPU::SI_SPILL_A256_SAVE;
1415 case 64:
1416 return AMDGPU::SI_SPILL_A512_SAVE;
1417 case 128:
1418 return AMDGPU::SI_SPILL_A1024_SAVE;
1419 default:
1420 llvm_unreachable("unknown register size")::llvm::llvm_unreachable_internal("unknown register size", "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp"
, 1420)
;
1421 }
1422}
1423
1424static unsigned getAVSpillSaveOpcode(unsigned Size) {
1425 switch (Size) {
1426 case 4:
1427 return AMDGPU::SI_SPILL_AV32_SAVE;
1428 case 8:
1429 return AMDGPU::SI_SPILL_AV64_SAVE;
1430 case 12:
1431 return AMDGPU::SI_SPILL_AV96_SAVE;
1432 case 16:
1433 return AMDGPU::SI_SPILL_AV128_SAVE;
1434 case 20:
1435 return AMDGPU::SI_SPILL_AV160_SAVE;
1436 case 24:
1437 return AMDGPU::SI_SPILL_AV192_SAVE;
1438 case 28:
1439 return AMDGPU::SI_SPILL_AV224_SAVE;
1440 case 32:
1441 return AMDGPU::SI_SPILL_AV256_SAVE;
1442 case 64:
1443 return AMDGPU::SI_SPILL_AV512_SAVE;
1444 case 128:
1445 return AMDGPU::SI_SPILL_AV1024_SAVE;
1446 default:
1447 llvm_unreachable("unknown register size")::llvm::llvm_unreachable_internal("unknown register size", "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp"
, 1447)
;
1448 }
1449}
1450
1451void SIInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
1452 MachineBasicBlock::iterator MI,
1453 Register SrcReg, bool isKill,
1454 int FrameIndex,
1455 const TargetRegisterClass *RC,
1456 const TargetRegisterInfo *TRI) const {
1457 MachineFunction *MF = MBB.getParent();
1458 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
1459 MachineFrameInfo &FrameInfo = MF->getFrameInfo();
1460 const DebugLoc &DL = MBB.findDebugLoc(MI);
1461
1462 MachinePointerInfo PtrInfo
1463 = MachinePointerInfo::getFixedStack(*MF, FrameIndex);
1464 MachineMemOperand *MMO = MF->getMachineMemOperand(
1465 PtrInfo, MachineMemOperand::MOStore, FrameInfo.getObjectSize(FrameIndex),
1466 FrameInfo.getObjectAlign(FrameIndex));
1467 unsigned SpillSize = TRI->getSpillSize(*RC);
1468
1469 MachineRegisterInfo &MRI = MF->getRegInfo();
1470 if (RI.isSGPRClass(RC)) {
1471 MFI->setHasSpilledSGPRs();
1472 assert(SrcReg != AMDGPU::M0 && "m0 should not be spilled")(static_cast <bool> (SrcReg != AMDGPU::M0 && "m0 should not be spilled"
) ? void (0) : __assert_fail ("SrcReg != AMDGPU::M0 && \"m0 should not be spilled\""
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 1472, __extension__
__PRETTY_FUNCTION__))
;
1473 assert(SrcReg != AMDGPU::EXEC_LO && SrcReg != AMDGPU::EXEC_HI &&(static_cast <bool> (SrcReg != AMDGPU::EXEC_LO &&
SrcReg != AMDGPU::EXEC_HI && SrcReg != AMDGPU::EXEC &&
"exec should not be spilled") ? void (0) : __assert_fail ("SrcReg != AMDGPU::EXEC_LO && SrcReg != AMDGPU::EXEC_HI && SrcReg != AMDGPU::EXEC && \"exec should not be spilled\""
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 1474, __extension__
__PRETTY_FUNCTION__))
1474 SrcReg != AMDGPU::EXEC && "exec should not be spilled")(static_cast <bool> (SrcReg != AMDGPU::EXEC_LO &&
SrcReg != AMDGPU::EXEC_HI && SrcReg != AMDGPU::EXEC &&
"exec should not be spilled") ? void (0) : __assert_fail ("SrcReg != AMDGPU::EXEC_LO && SrcReg != AMDGPU::EXEC_HI && SrcReg != AMDGPU::EXEC && \"exec should not be spilled\""
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 1474, __extension__
__PRETTY_FUNCTION__))
;
1475
1476 // We are only allowed to create one new instruction when spilling
1477 // registers, so we need to use pseudo instruction for spilling SGPRs.
1478 const MCInstrDesc &OpDesc = get(getSGPRSpillSaveOpcode(SpillSize));
1479
1480 // The SGPR spill/restore instructions only work on number sgprs, so we need
1481 // to make sure we are using the correct register class.
1482 if (SrcReg.isVirtual() && SpillSize == 4) {
1483 MRI.constrainRegClass(SrcReg, &AMDGPU::SReg_32_XM0_XEXECRegClass);
1484 }
1485
1486 BuildMI(MBB, MI, DL, OpDesc)
1487 .addReg(SrcReg, getKillRegState(isKill)) // data
1488 .addFrameIndex(FrameIndex) // addr
1489 .addMemOperand(MMO)
1490 .addReg(MFI->getStackPtrOffsetReg(), RegState::Implicit);
1491
1492 if (RI.spillSGPRToVGPR())
1493 FrameInfo.setStackID(FrameIndex, TargetStackID::SGPRSpill);
1494 return;
1495 }
1496
1497 unsigned Opcode = RI.isVectorSuperClass(RC) ? getAVSpillSaveOpcode(SpillSize)
1498 : RI.isAGPRClass(RC) ? getAGPRSpillSaveOpcode(SpillSize)
1499 : getVGPRSpillSaveOpcode(SpillSize);
1500 MFI->setHasSpilledVGPRs();
1501
1502 BuildMI(MBB, MI, DL, get(Opcode))
1503 .addReg(SrcReg, getKillRegState(isKill)) // data
1504 .addFrameIndex(FrameIndex) // addr
1505 .addReg(MFI->getStackPtrOffsetReg()) // scratch_offset
1506 .addImm(0) // offset
1507 .addMemOperand(MMO);
1508}
1509
1510static unsigned getSGPRSpillRestoreOpcode(unsigned Size) {
1511 switch (Size) {
1512 case 4:
1513 return AMDGPU::SI_SPILL_S32_RESTORE;
1514 case 8:
1515 return AMDGPU::SI_SPILL_S64_RESTORE;
1516 case 12:
1517 return AMDGPU::SI_SPILL_S96_RESTORE;
1518 case 16:
1519 return AMDGPU::SI_SPILL_S128_RESTORE;
1520 case 20:
1521 return AMDGPU::SI_SPILL_S160_RESTORE;
1522 case 24:
1523 return AMDGPU::SI_SPILL_S192_RESTORE;
1524 case 28:
1525 return AMDGPU::SI_SPILL_S224_RESTORE;
1526 case 32:
1527 return AMDGPU::SI_SPILL_S256_RESTORE;
1528 case 64:
1529 return AMDGPU::SI_SPILL_S512_RESTORE;
1530 case 128:
1531 return AMDGPU::SI_SPILL_S1024_RESTORE;
1532 default:
1533 llvm_unreachable("unknown register size")::llvm::llvm_unreachable_internal("unknown register size", "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp"
, 1533)
;
1534 }
1535}
1536
1537static unsigned getVGPRSpillRestoreOpcode(unsigned Size) {
1538 switch (Size) {
1539 case 4:
1540 return AMDGPU::SI_SPILL_V32_RESTORE;
1541 case 8:
1542 return AMDGPU::SI_SPILL_V64_RESTORE;
1543 case 12:
1544 return AMDGPU::SI_SPILL_V96_RESTORE;
1545 case 16:
1546 return AMDGPU::SI_SPILL_V128_RESTORE;
1547 case 20:
1548 return AMDGPU::SI_SPILL_V160_RESTORE;
1549 case 24:
1550 return AMDGPU::SI_SPILL_V192_RESTORE;
1551 case 28:
1552 return AMDGPU::SI_SPILL_V224_RESTORE;
1553 case 32:
1554 return AMDGPU::SI_SPILL_V256_RESTORE;
1555 case 64:
1556 return AMDGPU::SI_SPILL_V512_RESTORE;
1557 case 128:
1558 return AMDGPU::SI_SPILL_V1024_RESTORE;
1559 default:
1560 llvm_unreachable("unknown register size")::llvm::llvm_unreachable_internal("unknown register size", "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp"
, 1560)
;
1561 }
1562}
1563
1564static unsigned getAGPRSpillRestoreOpcode(unsigned Size) {
1565 switch (Size) {
1566 case 4:
1567 return AMDGPU::SI_SPILL_A32_RESTORE;
1568 case 8:
1569 return AMDGPU::SI_SPILL_A64_RESTORE;
1570 case 12:
1571 return AMDGPU::SI_SPILL_A96_RESTORE;
1572 case 16:
1573 return AMDGPU::SI_SPILL_A128_RESTORE;
1574 case 20:
1575 return AMDGPU::SI_SPILL_A160_RESTORE;
1576 case 24:
1577 return AMDGPU::SI_SPILL_A192_RESTORE;
1578 case 28:
1579 return AMDGPU::SI_SPILL_A224_RESTORE;
1580 case 32:
1581 return AMDGPU::SI_SPILL_A256_RESTORE;
1582 case 64:
1583 return AMDGPU::SI_SPILL_A512_RESTORE;
1584 case 128:
1585 return AMDGPU::SI_SPILL_A1024_RESTORE;
1586 default:
1587 llvm_unreachable("unknown register size")::llvm::llvm_unreachable_internal("unknown register size", "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp"
, 1587)
;
1588 }
1589}
1590
1591static unsigned getAVSpillRestoreOpcode(unsigned Size) {
1592 switch (Size) {
1593 case 4:
1594 return AMDGPU::SI_SPILL_AV32_RESTORE;
1595 case 8:
1596 return AMDGPU::SI_SPILL_AV64_RESTORE;
1597 case 12:
1598 return AMDGPU::SI_SPILL_AV96_RESTORE;
1599 case 16:
1600 return AMDGPU::SI_SPILL_AV128_RESTORE;
1601 case 20:
1602 return AMDGPU::SI_SPILL_AV160_RESTORE;
1603 case 24:
1604 return AMDGPU::SI_SPILL_AV192_RESTORE;
1605 case 28:
1606 return AMDGPU::SI_SPILL_AV224_RESTORE;
1607 case 32:
1608 return AMDGPU::SI_SPILL_AV256_RESTORE;
1609 case 64:
1610 return AMDGPU::SI_SPILL_AV512_RESTORE;
1611 case 128:
1612 return AMDGPU::SI_SPILL_AV1024_RESTORE;
1613 default:
1614 llvm_unreachable("unknown register size")::llvm::llvm_unreachable_internal("unknown register size", "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp"
, 1614)
;
1615 }
1616}
1617
1618void SIInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
1619 MachineBasicBlock::iterator MI,
1620 Register DestReg, int FrameIndex,
1621 const TargetRegisterClass *RC,
1622 const TargetRegisterInfo *TRI) const {
1623 MachineFunction *MF = MBB.getParent();
1624 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
1625 MachineFrameInfo &FrameInfo = MF->getFrameInfo();
1626 const DebugLoc &DL = MBB.findDebugLoc(MI);
1627 unsigned SpillSize = TRI->getSpillSize(*RC);
1628
1629 MachinePointerInfo PtrInfo
1630 = MachinePointerInfo::getFixedStack(*MF, FrameIndex);
1631
1632 MachineMemOperand *MMO = MF->getMachineMemOperand(
1633 PtrInfo, MachineMemOperand::MOLoad, FrameInfo.getObjectSize(FrameIndex),
1634 FrameInfo.getObjectAlign(FrameIndex));
1635
1636 if (RI.isSGPRClass(RC)) {
1637 MFI->setHasSpilledSGPRs();
1638 assert(DestReg != AMDGPU::M0 && "m0 should not be reloaded into")(static_cast <bool> (DestReg != AMDGPU::M0 && "m0 should not be reloaded into"
) ? void (0) : __assert_fail ("DestReg != AMDGPU::M0 && \"m0 should not be reloaded into\""
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 1638, __extension__
__PRETTY_FUNCTION__))
;
1639 assert(DestReg != AMDGPU::EXEC_LO && DestReg != AMDGPU::EXEC_HI &&(static_cast <bool> (DestReg != AMDGPU::EXEC_LO &&
DestReg != AMDGPU::EXEC_HI && DestReg != AMDGPU::EXEC
&& "exec should not be spilled") ? void (0) : __assert_fail
("DestReg != AMDGPU::EXEC_LO && DestReg != AMDGPU::EXEC_HI && DestReg != AMDGPU::EXEC && \"exec should not be spilled\""
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 1640, __extension__
__PRETTY_FUNCTION__))
1640 DestReg != AMDGPU::EXEC && "exec should not be spilled")(static_cast <bool> (DestReg != AMDGPU::EXEC_LO &&
DestReg != AMDGPU::EXEC_HI && DestReg != AMDGPU::EXEC
&& "exec should not be spilled") ? void (0) : __assert_fail
("DestReg != AMDGPU::EXEC_LO && DestReg != AMDGPU::EXEC_HI && DestReg != AMDGPU::EXEC && \"exec should not be spilled\""
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 1640, __extension__
__PRETTY_FUNCTION__))
;
1641
1642 // FIXME: Maybe this should not include a memoperand because it will be
1643 // lowered to non-memory instructions.
1644 const MCInstrDesc &OpDesc = get(getSGPRSpillRestoreOpcode(SpillSize));
1645 if (DestReg.isVirtual() && SpillSize == 4) {
1646 MachineRegisterInfo &MRI = MF->getRegInfo();
1647 MRI.constrainRegClass(DestReg, &AMDGPU::SReg_32_XM0_XEXECRegClass);
1648 }
1649
1650 if (RI.spillSGPRToVGPR())
1651 FrameInfo.setStackID(FrameIndex, TargetStackID::SGPRSpill);
1652 BuildMI(MBB, MI, DL, OpDesc, DestReg)
1653 .addFrameIndex(FrameIndex) // addr
1654 .addMemOperand(MMO)
1655 .addReg(MFI->getStackPtrOffsetReg(), RegState::Implicit);
1656
1657 return;
1658 }
1659
1660 unsigned Opcode = RI.isVectorSuperClass(RC)
1661 ? getAVSpillRestoreOpcode(SpillSize)
1662 : RI.isAGPRClass(RC) ? getAGPRSpillRestoreOpcode(SpillSize)
1663 : getVGPRSpillRestoreOpcode(SpillSize);
1664 BuildMI(MBB, MI, DL, get(Opcode), DestReg)
1665 .addFrameIndex(FrameIndex) // vaddr
1666 .addReg(MFI->getStackPtrOffsetReg()) // scratch_offset
1667 .addImm(0) // offset
1668 .addMemOperand(MMO);
1669}
1670
1671void SIInstrInfo::insertNoop(MachineBasicBlock &MBB,
1672 MachineBasicBlock::iterator MI) const {
1673 insertNoops(MBB, MI, 1);
1674}
1675
1676void SIInstrInfo::insertNoops(MachineBasicBlock &MBB,
1677 MachineBasicBlock::iterator MI,
1678 unsigned Quantity) const {
1679 DebugLoc DL = MBB.findDebugLoc(MI);
1680 while (Quantity > 0) {
1681 unsigned Arg = std::min(Quantity, 8u);
1682 Quantity -= Arg;
1683 BuildMI(MBB, MI, DL, get(AMDGPU::S_NOP)).addImm(Arg - 1);
1684 }
1685}
1686
1687void SIInstrInfo::insertReturn(MachineBasicBlock &MBB) const {
1688 auto MF = MBB.getParent();
1689 SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
1690
1691 assert(Info->isEntryFunction())(static_cast <bool> (Info->isEntryFunction()) ? void
(0) : __assert_fail ("Info->isEntryFunction()", "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp"
, 1691, __extension__ __PRETTY_FUNCTION__))
;
1692
1693 if (MBB.succ_empty()) {
1694 bool HasNoTerminator = MBB.getFirstTerminator() == MBB.end();
1695 if (HasNoTerminator) {
1696 if (Info->returnsVoid()) {
1697 BuildMI(MBB, MBB.end(), DebugLoc(), get(AMDGPU::S_ENDPGM)).addImm(0);
1698 } else {
1699 BuildMI(MBB, MBB.end(), DebugLoc(), get(AMDGPU::SI_RETURN_TO_EPILOG));
1700 }
1701 }
1702 }
1703}
1704
1705unsigned SIInstrInfo::getNumWaitStates(const MachineInstr &MI) {
1706 switch (MI.getOpcode()) {
1707 default:
1708 if (MI.isMetaInstruction())
1709 return 0;
1710 return 1; // FIXME: Do wait states equal cycles?
1711
1712 case AMDGPU::S_NOP:
1713 return MI.getOperand(0).getImm() + 1;
1714
1715 // FIXME: Any other pseudo instruction?
1716 // SI_RETURN_TO_EPILOG is a fallthrough to code outside of the function. The
1717 // hazard, even if one exist, won't really be visible. Should we handle it?
1718 case AMDGPU::SI_MASKED_UNREACHABLE:
1719 case AMDGPU::WAVE_BARRIER:
1720 return 0;
1721 }
1722}
1723
1724bool SIInstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
1725 const SIRegisterInfo *TRI = ST.getRegisterInfo();
1726 MachineBasicBlock &MBB = *MI.getParent();
1727 DebugLoc DL = MBB.findDebugLoc(MI);
1728 switch (MI.getOpcode()) {
1
Control jumps to 'case V_MOV_B64_DPP_PSEUDO:' at line 1845
1729 default: return TargetInstrInfo::expandPostRAPseudo(MI);
1730 case AMDGPU::S_MOV_B64_term:
1731 // This is only a terminator to get the correct spill code placement during
1732 // register allocation.
1733 MI.setDesc(get(AMDGPU::S_MOV_B64));
1734 break;
1735
1736 case AMDGPU::S_MOV_B32_term:
1737 // This is only a terminator to get the correct spill code placement during
1738 // register allocation.
1739 MI.setDesc(get(AMDGPU::S_MOV_B32));
1740 break;
1741
1742 case AMDGPU::S_XOR_B64_term:
1743 // This is only a terminator to get the correct spill code placement during
1744 // register allocation.
1745 MI.setDesc(get(AMDGPU::S_XOR_B64));
1746 break;
1747
1748 case AMDGPU::S_XOR_B32_term:
1749 // This is only a terminator to get the correct spill code placement during
1750 // register allocation.
1751 MI.setDesc(get(AMDGPU::S_XOR_B32));
1752 break;
1753 case AMDGPU::S_OR_B64_term:
1754 // This is only a terminator to get the correct spill code placement during
1755 // register allocation.
1756 MI.setDesc(get(AMDGPU::S_OR_B64));
1757 break;
1758 case AMDGPU::S_OR_B32_term:
1759 // This is only a terminator to get the correct spill code placement during
1760 // register allocation.
1761 MI.setDesc(get(AMDGPU::S_OR_B32));
1762 break;
1763
1764 case AMDGPU::S_ANDN2_B64_term:
1765 // This is only a terminator to get the correct spill code placement during
1766 // register allocation.
1767 MI.setDesc(get(AMDGPU::S_ANDN2_B64));
1768 break;
1769
1770 case AMDGPU::S_ANDN2_B32_term:
1771 // This is only a terminator to get the correct spill code placement during
1772 // register allocation.
1773 MI.setDesc(get(AMDGPU::S_ANDN2_B32));
1774 break;
1775
1776 case AMDGPU::S_AND_B64_term:
1777 // This is only a terminator to get the correct spill code placement during
1778 // register allocation.
1779 MI.setDesc(get(AMDGPU::S_AND_B64));
1780 break;
1781
1782 case AMDGPU::S_AND_B32_term:
1783 // This is only a terminator to get the correct spill code placement during
1784 // register allocation.
1785 MI.setDesc(get(AMDGPU::S_AND_B32));
1786 break;
1787
1788 case AMDGPU::V_MOV_B64_PSEUDO: {
1789 Register Dst = MI.getOperand(0).getReg();
1790 Register DstLo = RI.getSubReg(Dst, AMDGPU::sub0);
1791 Register DstHi = RI.getSubReg(Dst, AMDGPU::sub1);
1792
1793 const MachineOperand &SrcOp = MI.getOperand(1);
1794 // FIXME: Will this work for 64-bit floating point immediates?
1795 assert(!SrcOp.isFPImm())(static_cast <bool> (!SrcOp.isFPImm()) ? void (0) : __assert_fail
("!SrcOp.isFPImm()", "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp"
, 1795, __extension__ __PRETTY_FUNCTION__))
;
1796 if (SrcOp.isImm()) {
1797 APInt Imm(64, SrcOp.getImm());
1798 APInt Lo(32, Imm.getLoBits(32).getZExtValue());
1799 APInt Hi(32, Imm.getHiBits(32).getZExtValue());
1800 if (ST.hasPackedFP32Ops() && Lo == Hi && isInlineConstant(Lo)) {
1801 BuildMI(MBB, MI, DL, get(AMDGPU::V_PK_MOV_B32), Dst)
1802 .addImm(SISrcMods::OP_SEL_1)
1803 .addImm(Lo.getSExtValue())
1804 .addImm(SISrcMods::OP_SEL_1)
1805 .addImm(Lo.getSExtValue())
1806 .addImm(0) // op_sel_lo
1807 .addImm(0) // op_sel_hi
1808 .addImm(0) // neg_lo
1809 .addImm(0) // neg_hi
1810 .addImm(0); // clamp
1811 } else {
1812 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstLo)
1813 .addImm(Lo.getSExtValue())
1814 .addReg(Dst, RegState::Implicit | RegState::Define);
1815 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstHi)
1816 .addImm(Hi.getSExtValue())
1817 .addReg(Dst, RegState::Implicit | RegState::Define);
1818 }
1819 } else {
1820 assert(SrcOp.isReg())(static_cast <bool> (SrcOp.isReg()) ? void (0) : __assert_fail
("SrcOp.isReg()", "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 1820
, __extension__ __PRETTY_FUNCTION__))
;
1821 if (ST.hasPackedFP32Ops() &&
1822 !RI.isAGPR(MBB.getParent()->getRegInfo(), SrcOp.getReg())) {
1823 BuildMI(MBB, MI, DL, get(AMDGPU::V_PK_MOV_B32), Dst)
1824 .addImm(SISrcMods::OP_SEL_1) // src0_mod
1825 .addReg(SrcOp.getReg())
1826 .addImm(SISrcMods::OP_SEL_0 | SISrcMods::OP_SEL_1) // src1_mod
1827 .addReg(SrcOp.getReg())
1828 .addImm(0) // op_sel_lo
1829 .addImm(0) // op_sel_hi
1830 .addImm(0) // neg_lo
1831 .addImm(0) // neg_hi
1832 .addImm(0); // clamp
1833 } else {
1834 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstLo)
1835 .addReg(RI.getSubReg(SrcOp.getReg(), AMDGPU::sub0))
1836 .addReg(Dst, RegState::Implicit | RegState::Define);
1837 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstHi)
1838 .addReg(RI.getSubReg(SrcOp.getReg(), AMDGPU::sub1))
1839 .addReg(Dst, RegState::Implicit | RegState::Define);
1840 }
1841 }
1842 MI.eraseFromParent();
1843 break;
1844 }
1845 case AMDGPU::V_MOV_B64_DPP_PSEUDO: {
1846 expandMovDPP64(MI);
2
Calling 'SIInstrInfo::expandMovDPP64'
1847 break;
1848 }
1849 case AMDGPU::S_MOV_B64_IMM_PSEUDO: {
1850 const MachineOperand &SrcOp = MI.getOperand(1);
1851 assert(!SrcOp.isFPImm())(static_cast <bool> (!SrcOp.isFPImm()) ? void (0) : __assert_fail
("!SrcOp.isFPImm()", "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp"
, 1851, __extension__ __PRETTY_FUNCTION__))
;
1852 APInt Imm(64, SrcOp.getImm());
1853 if (Imm.isIntN(32) || isInlineConstant(Imm)) {
1854 MI.setDesc(get(AMDGPU::S_MOV_B64));
1855 break;
1856 }
1857
1858 Register Dst = MI.getOperand(0).getReg();
1859 Register DstLo = RI.getSubReg(Dst, AMDGPU::sub0);
1860 Register DstHi = RI.getSubReg(Dst, AMDGPU::sub1);
1861
1862 APInt Lo(32, Imm.getLoBits(32).getZExtValue());
1863 APInt Hi(32, Imm.getHiBits(32).getZExtValue());
1864 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), DstLo)
1865 .addImm(Lo.getSExtValue())
1866 .addReg(Dst, RegState::Implicit | RegState::Define);
1867 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), DstHi)
1868 .addImm(Hi.getSExtValue())
1869 .addReg(Dst, RegState::Implicit | RegState::Define);
1870 MI.eraseFromParent();
1871 break;
1872 }
1873 case AMDGPU::V_SET_INACTIVE_B32: {
1874 unsigned NotOpc = ST.isWave32() ? AMDGPU::S_NOT_B32 : AMDGPU::S_NOT_B64;
1875 unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC;
1876 auto FirstNot = BuildMI(MBB, MI, DL, get(NotOpc), Exec).addReg(Exec);
1877 FirstNot->addRegisterDead(AMDGPU::SCC, TRI); // SCC is overwritten
1878 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), MI.getOperand(0).getReg())
1879 .add(MI.getOperand(2));
1880 BuildMI(MBB, MI, DL, get(NotOpc), Exec)
1881 .addReg(Exec);
1882 MI.eraseFromParent();
1883 break;
1884 }
1885 case AMDGPU::V_SET_INACTIVE_B64: {
1886 unsigned NotOpc = ST.isWave32() ? AMDGPU::S_NOT_B32 : AMDGPU::S_NOT_B64;
1887 unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC;
1888 auto FirstNot = BuildMI(MBB, MI, DL, get(NotOpc), Exec).addReg(Exec);
1889 FirstNot->addRegisterDead(AMDGPU::SCC, TRI); // SCC is overwritten
1890 MachineInstr *Copy = BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B64_PSEUDO),
1891 MI.getOperand(0).getReg())
1892 .add(MI.getOperand(2));
1893 expandPostRAPseudo(*Copy);
1894 BuildMI(MBB, MI, DL, get(NotOpc), Exec)
1895 .addReg(Exec);
1896 MI.eraseFromParent();
1897 break;
1898 }
1899 case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V1:
1900 case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V2:
1901 case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V3:
1902 case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V4:
1903 case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V5:
1904 case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V8:
1905 case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V16:
1906 case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V32:
1907 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V1:
1908 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V2:
1909 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V3:
1910 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V4:
1911 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V5:
1912 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V8:
1913 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V16:
1914 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V32:
1915 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V1:
1916 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V2:
1917 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V4:
1918 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V8:
1919 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V16: {
1920 const TargetRegisterClass *EltRC = getOpRegClass(MI, 2);
1921
1922 unsigned Opc;
1923 if (RI.hasVGPRs(EltRC)) {
1924 Opc = AMDGPU::V_MOVRELD_B32_e32;
1925 } else {
1926 Opc = RI.getRegSizeInBits(*EltRC) == 64 ? AMDGPU::S_MOVRELD_B64
1927 : AMDGPU::S_MOVRELD_B32;
1928 }
1929
1930 const MCInstrDesc &OpDesc = get(Opc);
1931 Register VecReg = MI.getOperand(0).getReg();
1932 bool IsUndef = MI.getOperand(1).isUndef();
1933 unsigned SubReg = MI.getOperand(3).getImm();
1934 assert(VecReg == MI.getOperand(1).getReg())(static_cast <bool> (VecReg == MI.getOperand(1).getReg(
)) ? void (0) : __assert_fail ("VecReg == MI.getOperand(1).getReg()"
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 1934, __extension__
__PRETTY_FUNCTION__))
;
1935
1936 MachineInstrBuilder MIB =
1937 BuildMI(MBB, MI, DL, OpDesc)
1938 .addReg(RI.getSubReg(VecReg, SubReg), RegState::Undef)
1939 .add(MI.getOperand(2))
1940 .addReg(VecReg, RegState::ImplicitDefine)
1941 .addReg(VecReg, RegState::Implicit | (IsUndef ? RegState::Undef : 0));
1942
1943 const int ImpDefIdx =
1944 OpDesc.getNumOperands() + OpDesc.getNumImplicitUses();
1945 const int ImpUseIdx = ImpDefIdx + 1;
1946 MIB->tieOperands(ImpDefIdx, ImpUseIdx);
1947 MI.eraseFromParent();
1948 break;
1949 }
1950 case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V1:
1951 case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V2:
1952 case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V3:
1953 case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V4:
1954 case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V5:
1955 case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V8:
1956 case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V16:
1957 case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V32: {
1958 assert(ST.useVGPRIndexMode())(static_cast <bool> (ST.useVGPRIndexMode()) ? void (0) :
__assert_fail ("ST.useVGPRIndexMode()", "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp"
, 1958, __extension__ __PRETTY_FUNCTION__))
;
1959 Register VecReg = MI.getOperand(0).getReg();
1960 bool IsUndef = MI.getOperand(1).isUndef();
1961 Register Idx = MI.getOperand(3).getReg();
1962 Register SubReg = MI.getOperand(4).getImm();
1963
1964 MachineInstr *SetOn = BuildMI(MBB, MI, DL, get(AMDGPU::S_SET_GPR_IDX_ON))
1965 .addReg(Idx)
1966 .addImm(AMDGPU::VGPRIndexMode::DST_ENABLE);
1967 SetOn->getOperand(3).setIsUndef();
1968
1969 const MCInstrDesc &OpDesc = get(AMDGPU::V_MOV_B32_indirect_write);
1970 MachineInstrBuilder MIB =
1971 BuildMI(MBB, MI, DL, OpDesc)
1972 .addReg(RI.getSubReg(VecReg, SubReg), RegState::Undef)
1973 .add(MI.getOperand(2))
1974 .addReg(VecReg, RegState::ImplicitDefine)
1975 .addReg(VecReg,
1976 RegState::Implicit | (IsUndef ? RegState::Undef : 0));
1977
1978 const int ImpDefIdx = OpDesc.getNumOperands() + OpDesc.getNumImplicitUses();
1979 const int ImpUseIdx = ImpDefIdx + 1;
1980 MIB->tieOperands(ImpDefIdx, ImpUseIdx);
1981
1982 MachineInstr *SetOff = BuildMI(MBB, MI, DL, get(AMDGPU::S_SET_GPR_IDX_OFF));
1983
1984 finalizeBundle(MBB, SetOn->getIterator(), std::next(SetOff->getIterator()));
1985
1986 MI.eraseFromParent();
1987 break;
1988 }
1989 case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V1:
1990 case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V2:
1991 case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V3:
1992 case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V4:
1993 case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V5:
1994 case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V8:
1995 case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V16:
1996 case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V32: {
1997 assert(ST.useVGPRIndexMode())(static_cast <bool> (ST.useVGPRIndexMode()) ? void (0) :
__assert_fail ("ST.useVGPRIndexMode()", "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp"
, 1997, __extension__ __PRETTY_FUNCTION__))
;
1998 Register Dst = MI.getOperand(0).getReg();
1999 Register VecReg = MI.getOperand(1).getReg();
2000 bool IsUndef = MI.getOperand(1).isUndef();
2001 Register Idx = MI.getOperand(2).getReg();
2002 Register SubReg = MI.getOperand(3).getImm();
2003
2004 MachineInstr *SetOn = BuildMI(MBB, MI, DL, get(AMDGPU::S_SET_GPR_IDX_ON))
2005 .addReg(Idx)
2006 .addImm(AMDGPU::VGPRIndexMode::SRC0_ENABLE);
2007 SetOn->getOperand(3).setIsUndef();
2008
2009 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_indirect_read))
2010 .addDef(Dst)
2011 .addReg(RI.getSubReg(VecReg, SubReg), RegState::Undef)
2012 .addReg(VecReg, RegState::Implicit | (IsUndef ? RegState::Undef : 0));
2013
2014 MachineInstr *SetOff = BuildMI(MBB, MI, DL, get(AMDGPU::S_SET_GPR_IDX_OFF));
2015
2016 finalizeBundle(MBB, SetOn->getIterator(), std::next(SetOff->getIterator()));
2017
2018 MI.eraseFromParent();
2019 break;
2020 }
2021 case AMDGPU::SI_PC_ADD_REL_OFFSET: {
2022 MachineFunction &MF = *MBB.getParent();
2023 Register Reg = MI.getOperand(0).getReg();
2024 Register RegLo = RI.getSubReg(Reg, AMDGPU::sub0);
2025 Register RegHi = RI.getSubReg(Reg, AMDGPU::sub1);
2026
2027 // Create a bundle so these instructions won't be re-ordered by the
2028 // post-RA scheduler.
2029 MIBundleBuilder Bundler(MBB, MI);
2030 Bundler.append(BuildMI(MF, DL, get(AMDGPU::S_GETPC_B64), Reg));
2031
2032 // Add 32-bit offset from this instruction to the start of the
2033 // constant data.
2034 Bundler.append(BuildMI(MF, DL, get(AMDGPU::S_ADD_U32), RegLo)
2035 .addReg(RegLo)
2036 .add(MI.getOperand(1)));
2037
2038 MachineInstrBuilder MIB = BuildMI(MF, DL, get(AMDGPU::S_ADDC_U32), RegHi)
2039 .addReg(RegHi);
2040 MIB.add(MI.getOperand(2));
2041
2042 Bundler.append(MIB);
2043 finalizeBundle(MBB, Bundler.begin());
2044
2045 MI.eraseFromParent();
2046 break;
2047 }
2048 case AMDGPU::ENTER_STRICT_WWM: {
2049 // This only gets its own opcode so that SIPreAllocateWWMRegs can tell when
2050 // Whole Wave Mode is entered.
2051 MI.setDesc(get(ST.isWave32() ? AMDGPU::S_OR_SAVEEXEC_B32
2052 : AMDGPU::S_OR_SAVEEXEC_B64));
2053 break;
2054 }
2055 case AMDGPU::ENTER_STRICT_WQM: {
2056 // This only gets its own opcode so that SIPreAllocateWWMRegs can tell when
2057 // STRICT_WQM is entered.
2058 const unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC;
2059 const unsigned WQMOp = ST.isWave32() ? AMDGPU::S_WQM_B32 : AMDGPU::S_WQM_B64;
2060 const unsigned MovOp = ST.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64;
2061 BuildMI(MBB, MI, DL, get(MovOp), MI.getOperand(0).getReg()).addReg(Exec);
2062 BuildMI(MBB, MI, DL, get(WQMOp), Exec).addReg(Exec);
2063
2064 MI.eraseFromParent();
2065 break;
2066 }
2067 case AMDGPU::EXIT_STRICT_WWM:
2068 case AMDGPU::EXIT_STRICT_WQM: {
2069 // This only gets its own opcode so that SIPreAllocateWWMRegs can tell when
2070 // WWM/STICT_WQM is exited.
2071 MI.setDesc(get(ST.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64));
2072 break;
2073 }
2074 }
2075 return true;
2076}
2077
2078std::pair<MachineInstr*, MachineInstr*>
2079SIInstrInfo::expandMovDPP64(MachineInstr &MI) const {
2080 assert (MI.getOpcode() == AMDGPU::V_MOV_B64_DPP_PSEUDO)(static_cast <bool> (MI.getOpcode() == AMDGPU::V_MOV_B64_DPP_PSEUDO
) ? void (0) : __assert_fail ("MI.getOpcode() == AMDGPU::V_MOV_B64_DPP_PSEUDO"
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 2080, __extension__
__PRETTY_FUNCTION__))
;
3
'?' condition is true
2081
2082 MachineBasicBlock &MBB = *MI.getParent();
2083 DebugLoc DL = MBB.findDebugLoc(MI);
2084 MachineFunction *MF = MBB.getParent();
2085 MachineRegisterInfo &MRI = MF->getRegInfo();
2086 Register Dst = MI.getOperand(0).getReg();
2087 unsigned Part = 0;
2088 MachineInstr *Split[2];
2089
2090 for (auto Sub : { AMDGPU::sub0, AMDGPU::sub1 }) {
4
Assuming '__begin1' is equal to '__end1'
2091 auto MovDPP = BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_dpp));
2092 if (Dst.isPhysical()) {
2093 MovDPP.addDef(RI.getSubReg(Dst, Sub));
2094 } else {
2095 assert(MRI.isSSA())(static_cast <bool> (MRI.isSSA()) ? void (0) : __assert_fail
("MRI.isSSA()", "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 2095
, __extension__ __PRETTY_FUNCTION__))
;
2096 auto Tmp = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
2097 MovDPP.addDef(Tmp);
2098 }
2099
2100 for (unsigned I = 1; I <= 2; ++I) { // old and src operands.
2101 const MachineOperand &SrcOp = MI.getOperand(I);
2102 assert(!SrcOp.isFPImm())(static_cast <bool> (!SrcOp.isFPImm()) ? void (0) : __assert_fail
("!SrcOp.isFPImm()", "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp"
, 2102, __extension__ __PRETTY_FUNCTION__))
;
2103 if (SrcOp.isImm()) {
2104 APInt Imm(64, SrcOp.getImm());
2105 Imm.ashrInPlace(Part * 32);
2106 MovDPP.addImm(Imm.getLoBits(32).getZExtValue());
2107 } else {
2108 assert(SrcOp.isReg())(static_cast <bool> (SrcOp.isReg()) ? void (0) : __assert_fail
("SrcOp.isReg()", "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 2108
, __extension__ __PRETTY_FUNCTION__))
;
2109 Register Src = SrcOp.getReg();
2110 if (Src.isPhysical())
2111 MovDPP.addReg(RI.getSubReg(Src, Sub));
2112 else
2113 MovDPP.addReg(Src, SrcOp.isUndef() ? RegState::Undef : 0, Sub);
2114 }
2115 }
2116
2117 for (unsigned I = 3; I < MI.getNumExplicitOperands(); ++I)
2118 MovDPP.addImm(MI.getOperand(I).getImm());
2119
2120 Split[Part] = MovDPP;
2121 ++Part;
2122 }
2123
2124 if (Dst.isVirtual())
5
Calling 'Register::isVirtual'
11
Returning from 'Register::isVirtual'
12
Taking true branch
2125 BuildMI(MBB, MI, DL, get(AMDGPU::REG_SEQUENCE), Dst)
2126 .addReg(Split[0]->getOperand(0).getReg())
13
Called C++ object pointer is uninitialized
2127 .addImm(AMDGPU::sub0)
2128 .addReg(Split[1]->getOperand(0).getReg())
2129 .addImm(AMDGPU::sub1);
2130
2131 MI.eraseFromParent();
2132 return std::make_pair(Split[0], Split[1]);
2133}
2134
2135bool SIInstrInfo::swapSourceModifiers(MachineInstr &MI,
2136 MachineOperand &Src0,
2137 unsigned Src0OpName,
2138 MachineOperand &Src1,
2139 unsigned Src1OpName) const {
2140 MachineOperand *Src0Mods = getNamedOperand(MI, Src0OpName);
2141 if (!Src0Mods)
2142 return false;
2143
2144 MachineOperand *Src1Mods = getNamedOperand(MI, Src1OpName);
2145 assert(Src1Mods &&(static_cast <bool> (Src1Mods && "All commutable instructions have both src0 and src1 modifiers"
) ? void (0) : __assert_fail ("Src1Mods && \"All commutable instructions have both src0 and src1 modifiers\""
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 2146, __extension__
__PRETTY_FUNCTION__))
2146 "All commutable instructions have both src0 and src1 modifiers")(static_cast <bool> (Src1Mods && "All commutable instructions have both src0 and src1 modifiers"
) ? void (0) : __assert_fail ("Src1Mods && \"All commutable instructions have both src0 and src1 modifiers\""
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 2146, __extension__
__PRETTY_FUNCTION__))
;
2147
2148 int Src0ModsVal = Src0Mods->getImm();
2149 int Src1ModsVal = Src1Mods->getImm();
2150
2151 Src1Mods->setImm(Src0ModsVal);
2152 Src0Mods->setImm(Src1ModsVal);
2153 return true;
2154}
2155
2156static MachineInstr *swapRegAndNonRegOperand(MachineInstr &MI,
2157 MachineOperand &RegOp,
2158 MachineOperand &NonRegOp) {
2159 Register Reg = RegOp.getReg();
2160 unsigned SubReg = RegOp.getSubReg();
2161 bool IsKill = RegOp.isKill();
2162 bool IsDead = RegOp.isDead();
2163 bool IsUndef = RegOp.isUndef();
2164 bool IsDebug = RegOp.isDebug();
2165
2166 if (NonRegOp.isImm())
2167 RegOp.ChangeToImmediate(NonRegOp.getImm());
2168 else if (NonRegOp.isFI())
2169 RegOp.ChangeToFrameIndex(NonRegOp.getIndex());
2170 else if (NonRegOp.isGlobal()) {
2171 RegOp.ChangeToGA(NonRegOp.getGlobal(), NonRegOp.getOffset(),
2172 NonRegOp.getTargetFlags());
2173 } else
2174 return nullptr;
2175
2176 // Make sure we don't reinterpret a subreg index in the target flags.
2177 RegOp.setTargetFlags(NonRegOp.getTargetFlags());
2178
2179 NonRegOp.ChangeToRegister(Reg, false, false, IsKill, IsDead, IsUndef, IsDebug);
2180 NonRegOp.setSubReg(SubReg);
2181
2182 return &MI;
2183}
2184
2185MachineInstr *SIInstrInfo::commuteInstructionImpl(MachineInstr &MI, bool NewMI,
2186 unsigned Src0Idx,
2187 unsigned Src1Idx) const {
2188 assert(!NewMI && "this should never be used")(static_cast <bool> (!NewMI && "this should never be used"
) ? void (0) : __assert_fail ("!NewMI && \"this should never be used\""
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 2188, __extension__
__PRETTY_FUNCTION__))
;
2189
2190 unsigned Opc = MI.getOpcode();
2191 int CommutedOpcode = commuteOpcode(Opc);
2192 if (CommutedOpcode == -1)
2193 return nullptr;
2194
2195 assert(AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0) ==(static_cast <bool> (AMDGPU::getNamedOperandIdx(Opc, AMDGPU
::OpName::src0) == static_cast<int>(Src0Idx) &&
AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1) == static_cast
<int>(Src1Idx) && "inconsistency with findCommutedOpIndices"
) ? void (0) : __assert_fail ("AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0) == static_cast<int>(Src0Idx) && AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1) == static_cast<int>(Src1Idx) && \"inconsistency with findCommutedOpIndices\""
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 2199, __extension__
__PRETTY_FUNCTION__))
2196 static_cast<int>(Src0Idx) &&(static_cast <bool> (AMDGPU::getNamedOperandIdx(Opc, AMDGPU
::OpName::src0) == static_cast<int>(Src0Idx) &&
AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1) == static_cast
<int>(Src1Idx) && "inconsistency with findCommutedOpIndices"
) ? void (0) : __assert_fail ("AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0) == static_cast<int>(Src0Idx) && AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1) == static_cast<int>(Src1Idx) && \"inconsistency with findCommutedOpIndices\""
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 2199, __extension__
__PRETTY_FUNCTION__))
2197 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1) ==(static_cast <bool> (AMDGPU::getNamedOperandIdx(Opc, AMDGPU
::OpName::src0) == static_cast<int>(Src0Idx) &&
AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1) == static_cast
<int>(Src1Idx) && "inconsistency with findCommutedOpIndices"
) ? void (0) : __assert_fail ("AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0) == static_cast<int>(Src0Idx) && AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1) == static_cast<int>(Src1Idx) && \"inconsistency with findCommutedOpIndices\""
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 2199, __extension__
__PRETTY_FUNCTION__))
2198 static_cast<int>(Src1Idx) &&(static_cast <bool> (AMDGPU::getNamedOperandIdx(Opc, AMDGPU
::OpName::src0) == static_cast<int>(Src0Idx) &&
AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1) == static_cast
<int>(Src1Idx) && "inconsistency with findCommutedOpIndices"
) ? void (0) : __assert_fail ("AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0) == static_cast<int>(Src0Idx) && AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1) == static_cast<int>(Src1Idx) && \"inconsistency with findCommutedOpIndices\""
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 2199, __extension__
__PRETTY_FUNCTION__))
2199 "inconsistency with findCommutedOpIndices")(static_cast <bool> (AMDGPU::getNamedOperandIdx(Opc, AMDGPU
::OpName::src0) == static_cast<int>(Src0Idx) &&
AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1) == static_cast
<int>(Src1Idx) && "inconsistency with findCommutedOpIndices"
) ? void (0) : __assert_fail ("AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0) == static_cast<int>(Src0Idx) && AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1) == static_cast<int>(Src1Idx) && \"inconsistency with findCommutedOpIndices\""
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 2199, __extension__
__PRETTY_FUNCTION__))
;
2200
2201 MachineOperand &Src0 = MI.getOperand(Src0Idx);
2202 MachineOperand &Src1 = MI.getOperand(Src1Idx);
2203
2204 MachineInstr *CommutedMI = nullptr;
2205 if (Src0.isReg() && Src1.isReg()) {
2206 if (isOperandLegal(MI, Src1Idx, &Src0)) {
2207 // Be sure to copy the source modifiers to the right place.
2208 CommutedMI
2209 = TargetInstrInfo::commuteInstructionImpl(MI, NewMI, Src0Idx, Src1Idx);
2210 }
2211
2212 } else if (Src0.isReg() && !Src1.isReg()) {
2213 // src0 should always be able to support any operand type, so no need to
2214 // check operand legality.
2215 CommutedMI = swapRegAndNonRegOperand(MI, Src0, Src1);
2216 } else if (!Src0.isReg() && Src1.isReg()) {
2217 if (isOperandLegal(MI, Src1Idx, &Src0))
2218 CommutedMI = swapRegAndNonRegOperand(MI, Src1, Src0);
2219 } else {
2220 // FIXME: Found two non registers to commute. This does happen.
2221 return nullptr;
2222 }
2223
2224 if (CommutedMI) {
2225 swapSourceModifiers(MI, Src0, AMDGPU::OpName::src0_modifiers,
2226 Src1, AMDGPU::OpName::src1_modifiers);
2227
2228 CommutedMI->setDesc(get(CommutedOpcode));
2229 }
2230
2231 return CommutedMI;
2232}
2233
2234// This needs to be implemented because the source modifiers may be inserted
2235// between the true commutable operands, and the base
2236// TargetInstrInfo::commuteInstruction uses it.
2237bool SIInstrInfo::findCommutedOpIndices(const MachineInstr &MI,
2238 unsigned &SrcOpIdx0,
2239 unsigned &SrcOpIdx1) const {
2240 return findCommutedOpIndices(MI.getDesc(), SrcOpIdx0, SrcOpIdx1);
2241}
2242
2243bool SIInstrInfo::findCommutedOpIndices(MCInstrDesc Desc, unsigned &SrcOpIdx0,
2244 unsigned &SrcOpIdx1) const {
2245 if (!Desc.isCommutable())
2246 return false;
2247
2248 unsigned Opc = Desc.getOpcode();
2249 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0);
2250 if (Src0Idx == -1)
2251 return false;
2252
2253 int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1);
2254 if (Src1Idx == -1)
2255 return false;
2256
2257 return fixCommutedOpIndices(SrcOpIdx0, SrcOpIdx1, Src0Idx, Src1Idx);
2258}
2259
2260bool SIInstrInfo::isBranchOffsetInRange(unsigned BranchOp,
2261 int64_t BrOffset) const {
2262 // BranchRelaxation should never have to check s_setpc_b64 because its dest
2263 // block is unanalyzable.
2264 assert(BranchOp != AMDGPU::S_SETPC_B64)(static_cast <bool> (BranchOp != AMDGPU::S_SETPC_B64) ?
void (0) : __assert_fail ("BranchOp != AMDGPU::S_SETPC_B64",
"llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 2264, __extension__
__PRETTY_FUNCTION__))
;
2265
2266 // Convert to dwords.
2267 BrOffset /= 4;
2268
2269 // The branch instructions do PC += signext(SIMM16 * 4) + 4, so the offset is
2270 // from the next instruction.
2271 BrOffset -= 1;
2272
2273 return isIntN(BranchOffsetBits, BrOffset);
2274}
2275
2276MachineBasicBlock *SIInstrInfo::getBranchDestBlock(
2277 const MachineInstr &MI) const {
2278 if (MI.getOpcode() == AMDGPU::S_SETPC_B64) {
2279 // This would be a difficult analysis to perform, but can always be legal so
2280 // there's no need to analyze it.
2281 return nullptr;
2282 }
2283
2284 return MI.getOperand(0).getMBB();
2285}
2286
2287void SIInstrInfo::insertIndirectBranch(MachineBasicBlock &MBB,
2288 MachineBasicBlock &DestBB,
2289 MachineBasicBlock &RestoreBB,
2290 const DebugLoc &DL, int64_t BrOffset,
2291 RegScavenger *RS) const {
2292 assert(RS && "RegScavenger required for long branching")(static_cast <bool> (RS && "RegScavenger required for long branching"
) ? void (0) : __assert_fail ("RS && \"RegScavenger required for long branching\""
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 2292, __extension__
__PRETTY_FUNCTION__))
;
2293 assert(MBB.empty() &&(static_cast <bool> (MBB.empty() && "new block should be inserted for expanding unconditional branch"
) ? void (0) : __assert_fail ("MBB.empty() && \"new block should be inserted for expanding unconditional branch\""
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 2294, __extension__
__PRETTY_FUNCTION__))
2294 "new block should be inserted for expanding unconditional branch")(static_cast <bool> (MBB.empty() && "new block should be inserted for expanding unconditional branch"
) ? void (0) : __assert_fail ("MBB.empty() && \"new block should be inserted for expanding unconditional branch\""
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 2294, __extension__
__PRETTY_FUNCTION__))
;
2295 assert(MBB.pred_size() == 1)(static_cast <bool> (MBB.pred_size() == 1) ? void (0) :
__assert_fail ("MBB.pred_size() == 1", "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp"
, 2295, __extension__ __PRETTY_FUNCTION__))
;
2296 assert(RestoreBB.empty() &&(static_cast <bool> (RestoreBB.empty() && "restore block should be inserted for restoring clobbered registers"
) ? void (0) : __assert_fail ("RestoreBB.empty() && \"restore block should be inserted for restoring clobbered registers\""
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 2297, __extension__
__PRETTY_FUNCTION__))
2297 "restore block should be inserted for restoring clobbered registers")(static_cast <bool> (RestoreBB.empty() && "restore block should be inserted for restoring clobbered registers"
) ? void (0) : __assert_fail ("RestoreBB.empty() && \"restore block should be inserted for restoring clobbered registers\""
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 2297, __extension__
__PRETTY_FUNCTION__))
;
2298
2299 MachineFunction *MF = MBB.getParent();
2300 MachineRegisterInfo &MRI = MF->getRegInfo();
2301
2302 // FIXME: Virtual register workaround for RegScavenger not working with empty
2303 // blocks.
2304 Register PCReg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
2305
2306 auto I = MBB.end();
2307
2308 // We need to compute the offset relative to the instruction immediately after
2309 // s_getpc_b64. Insert pc arithmetic code before last terminator.
2310 MachineInstr *GetPC = BuildMI(MBB, I, DL, get(AMDGPU::S_GETPC_B64), PCReg);
2311
2312 auto &MCCtx = MF->getContext();
2313 MCSymbol *PostGetPCLabel =
2314 MCCtx.createTempSymbol("post_getpc", /*AlwaysAddSuffix=*/true);
2315 GetPC->setPostInstrSymbol(*MF, PostGetPCLabel);
2316
2317 MCSymbol *OffsetLo =
2318 MCCtx.createTempSymbol("offset_lo", /*AlwaysAddSuffix=*/true);
2319 MCSymbol *OffsetHi =
2320 MCCtx.createTempSymbol("offset_hi", /*AlwaysAddSuffix=*/true);
2321 BuildMI(MBB, I, DL, get(AMDGPU::S_ADD_U32))
2322 .addReg(PCReg, RegState::Define, AMDGPU::sub0)
2323 .addReg(PCReg, 0, AMDGPU::sub0)
2324 .addSym(OffsetLo, MO_FAR_BRANCH_OFFSET);
2325 BuildMI(MBB, I, DL, get(AMDGPU::S_ADDC_U32))
2326 .addReg(PCReg, RegState::Define, AMDGPU::sub1)
2327 .addReg(PCReg, 0, AMDGPU::sub1)
2328 .addSym(OffsetHi, MO_FAR_BRANCH_OFFSET);
2329
2330 // Insert the indirect branch after the other terminator.
2331 BuildMI(&MBB, DL, get(AMDGPU::S_SETPC_B64))
2332 .addReg(PCReg);
2333
2334 // FIXME: If spilling is necessary, this will fail because this scavenger has
2335 // no emergency stack slots. It is non-trivial to spill in this situation,
2336 // because the restore code needs to be specially placed after the
2337 // jump. BranchRelaxation then needs to be made aware of the newly inserted
2338 // block.
2339 //
2340 // If a spill is needed for the pc register pair, we need to insert a spill
2341 // restore block right before the destination block, and insert a short branch
2342 // into the old destination block's fallthrough predecessor.
2343 // e.g.:
2344 //
2345 // s_cbranch_scc0 skip_long_branch:
2346 //
2347 // long_branch_bb:
2348 // spill s[8:9]
2349 // s_getpc_b64 s[8:9]
2350 // s_add_u32 s8, s8, restore_bb
2351 // s_addc_u32 s9, s9, 0
2352 // s_setpc_b64 s[8:9]
2353 //
2354 // skip_long_branch:
2355 // foo;
2356 //
2357 // .....
2358 //
2359 // dest_bb_fallthrough_predecessor:
2360 // bar;
2361 // s_branch dest_bb
2362 //
2363 // restore_bb:
2364 // restore s[8:9]
2365 // fallthrough dest_bb
2366 ///
2367 // dest_bb:
2368 // buzz;
2369
2370 RS->enterBasicBlockEnd(MBB);
2371 Register Scav = RS->scavengeRegisterBackwards(
2372 AMDGPU::SReg_64RegClass, MachineBasicBlock::iterator(GetPC),
2373 /* RestoreAfter */ false, 0, /* AllowSpill */ false);
2374 if (Scav) {
2375 RS->setRegUsed(Scav);
2376 MRI.replaceRegWith(PCReg, Scav);
2377 MRI.clearVirtRegs();
2378 } else {
2379 // As SGPR needs VGPR to be spilled, we reuse the slot of temporary VGPR for
2380 // SGPR spill.
2381 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
2382 const SIRegisterInfo *TRI = ST.getRegisterInfo();
2383 TRI->spillEmergencySGPR(GetPC, RestoreBB, AMDGPU::SGPR0_SGPR1, RS);
2384 MRI.replaceRegWith(PCReg, AMDGPU::SGPR0_SGPR1);
2385 MRI.clearVirtRegs();
2386 }
2387
2388 MCSymbol *DestLabel = Scav ? DestBB.getSymbol() : RestoreBB.getSymbol();
2389 // Now, the distance could be defined.
2390 auto *Offset = MCBinaryExpr::createSub(
2391 MCSymbolRefExpr::create(DestLabel, MCCtx),
2392 MCSymbolRefExpr::create(PostGetPCLabel, MCCtx), MCCtx);
2393 // Add offset assignments.
2394 auto *Mask = MCConstantExpr::create(0xFFFFFFFFULL, MCCtx);
2395 OffsetLo->setVariableValue(MCBinaryExpr::createAnd(Offset, Mask, MCCtx));
2396 auto *ShAmt = MCConstantExpr::create(32, MCCtx);
2397 OffsetHi->setVariableValue(MCBinaryExpr::createAShr(Offset, ShAmt, MCCtx));
2398}
2399
2400unsigned SIInstrInfo::getBranchOpcode(SIInstrInfo::BranchPredicate Cond) {
2401 switch (Cond) {
2402 case SIInstrInfo::SCC_TRUE:
2403 return AMDGPU::S_CBRANCH_SCC1;
2404 case SIInstrInfo::SCC_FALSE:
2405 return AMDGPU::S_CBRANCH_SCC0;
2406 case SIInstrInfo::VCCNZ:
2407 return AMDGPU::S_CBRANCH_VCCNZ;
2408 case SIInstrInfo::VCCZ:
2409 return AMDGPU::S_CBRANCH_VCCZ;
2410 case SIInstrInfo::EXECNZ:
2411 return AMDGPU::S_CBRANCH_EXECNZ;
2412 case SIInstrInfo::EXECZ:
2413 return AMDGPU::S_CBRANCH_EXECZ;
2414 default:
2415 llvm_unreachable("invalid branch predicate")::llvm::llvm_unreachable_internal("invalid branch predicate",
"llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 2415)
;
2416 }
2417}
2418
2419SIInstrInfo::BranchPredicate SIInstrInfo::getBranchPredicate(unsigned Opcode) {
2420 switch (Opcode) {
2421 case AMDGPU::S_CBRANCH_SCC0:
2422 return SCC_FALSE;
2423 case AMDGPU::S_CBRANCH_SCC1:
2424 return SCC_TRUE;
2425 case AMDGPU::S_CBRANCH_VCCNZ:
2426 return VCCNZ;
2427 case AMDGPU::S_CBRANCH_VCCZ:
2428 return VCCZ;
2429 case AMDGPU::S_CBRANCH_EXECNZ:
2430 return EXECNZ;
2431 case AMDGPU::S_CBRANCH_EXECZ:
2432 return EXECZ;
2433 default:
2434 return INVALID_BR;
2435 }
2436}
2437
2438bool SIInstrInfo::analyzeBranchImpl(MachineBasicBlock &MBB,
2439 MachineBasicBlock::iterator I,
2440 MachineBasicBlock *&TBB,
2441 MachineBasicBlock *&FBB,
2442 SmallVectorImpl<MachineOperand> &Cond,
2443 bool AllowModify) const {
2444 if (I->getOpcode() == AMDGPU::S_BRANCH) {
2445 // Unconditional Branch
2446 TBB = I->getOperand(0).getMBB();
2447 return false;
2448 }
2449
2450 MachineBasicBlock *CondBB = nullptr;
2451
2452 if (I->getOpcode() == AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO) {
2453 CondBB = I->getOperand(1).getMBB();
2454 Cond.push_back(I->getOperand(0));
2455 } else {
2456 BranchPredicate Pred = getBranchPredicate(I->getOpcode());
2457 if (Pred == INVALID_BR)
2458 return true;
2459
2460 CondBB = I->getOperand(0).getMBB();
2461 Cond.push_back(MachineOperand::CreateImm(Pred));
2462 Cond.push_back(I->getOperand(1)); // Save the branch register.
2463 }
2464 ++I;
2465
2466 if (I == MBB.end()) {
2467 // Conditional branch followed by fall-through.
2468 TBB = CondBB;
2469 return false;
2470 }
2471
2472 if (I->getOpcode() == AMDGPU::S_BRANCH) {
2473 TBB = CondBB;
2474 FBB = I->getOperand(0).getMBB();
2475 return false;
2476 }
2477
2478 return true;
2479}
2480
2481bool SIInstrInfo::analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
2482 MachineBasicBlock *&FBB,
2483 SmallVectorImpl<MachineOperand> &Cond,
2484 bool AllowModify) const {
2485 MachineBasicBlock::iterator I = MBB.getFirstTerminator();
2486 auto E = MBB.end();
2487 if (I == E)
2488 return false;
2489
2490 // Skip over the instructions that are artificially terminators for special
2491 // exec management.
2492 while (I != E && !I->isBranch() && !I->isReturn()) {
2493 switch (I->getOpcode()) {
2494 case AMDGPU::S_MOV_B64_term:
2495 case AMDGPU::S_XOR_B64_term:
2496 case AMDGPU::S_OR_B64_term:
2497 case AMDGPU::S_ANDN2_B64_term:
2498 case AMDGPU::S_AND_B64_term:
2499 case AMDGPU::S_MOV_B32_term:
2500 case AMDGPU::S_XOR_B32_term:
2501 case AMDGPU::S_OR_B32_term:
2502 case AMDGPU::S_ANDN2_B32_term:
2503 case AMDGPU::S_AND_B32_term:
2504 break;
2505 case AMDGPU::SI_IF:
2506 case AMDGPU::SI_ELSE:
2507 case AMDGPU::SI_KILL_I1_TERMINATOR:
2508 case AMDGPU::SI_KILL_F32_COND_IMM_TERMINATOR:
2509 // FIXME: It's messy that these need to be considered here at all.
2510 return true;
2511 default:
2512 llvm_unreachable("unexpected non-branch terminator inst")::llvm::llvm_unreachable_internal("unexpected non-branch terminator inst"
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 2512)
;
2513 }
2514
2515 ++I;
2516 }
2517
2518 if (I == E)
2519 return false;
2520
2521 return analyzeBranchImpl(MBB, I, TBB, FBB, Cond, AllowModify);
2522}
2523
2524unsigned SIInstrInfo::removeBranch(MachineBasicBlock &MBB,
2525 int *BytesRemoved) const {
2526 unsigned Count = 0;
2527 unsigned RemovedSize = 0;
2528 for (MachineInstr &MI : llvm::make_early_inc_range(MBB.terminators())) {
2529 // Skip over artificial terminators when removing instructions.
2530 if (MI.isBranch() || MI.isReturn()) {
2531 RemovedSize += getInstSizeInBytes(MI);
2532 MI.eraseFromParent();
2533 ++Count;
2534 }
2535 }
2536
2537 if (BytesRemoved)
2538 *BytesRemoved = RemovedSize;
2539
2540 return Count;
2541}
2542
2543// Copy the flags onto the implicit condition register operand.
2544static void preserveCondRegFlags(MachineOperand &CondReg,
2545 const MachineOperand &OrigCond) {
2546 CondReg.setIsUndef(OrigCond.isUndef());
2547 CondReg.setIsKill(OrigCond.isKill());
2548}
2549
2550unsigned SIInstrInfo::insertBranch(MachineBasicBlock &MBB,
2551 MachineBasicBlock *TBB,
2552 MachineBasicBlock *FBB,
2553 ArrayRef<MachineOperand> Cond,
2554 const DebugLoc &DL,
2555 int *BytesAdded) const {
2556 if (!FBB && Cond.empty()) {
2557 BuildMI(&MBB, DL, get(AMDGPU::S_BRANCH))
2558 .addMBB(TBB);
2559 if (BytesAdded)
2560 *BytesAdded = ST.hasOffset3fBug() ? 8 : 4;
2561 return 1;
2562 }
2563
2564 if(Cond.size() == 1 && Cond[0].isReg()) {
2565 BuildMI(&MBB, DL, get(AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO))
2566 .add(Cond[0])
2567 .addMBB(TBB);
2568 return 1;
2569 }
2570
2571 assert(TBB && Cond[0].isImm())(static_cast <bool> (TBB && Cond[0].isImm()) ? void
(0) : __assert_fail ("TBB && Cond[0].isImm()", "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp"
, 2571, __extension__ __PRETTY_FUNCTION__))
;
2572
2573 unsigned Opcode
2574 = getBranchOpcode(static_cast<BranchPredicate>(Cond[0].getImm()));
2575
2576 if (!FBB) {
2577 Cond[1].isUndef();
2578 MachineInstr *CondBr =
2579 BuildMI(&MBB, DL, get(Opcode))
2580 .addMBB(TBB);
2581
2582 // Copy the flags onto the implicit condition register operand.
2583 preserveCondRegFlags(CondBr->getOperand(1), Cond[1]);
2584 fixImplicitOperands(*CondBr);
2585
2586 if (BytesAdded)
2587 *BytesAdded = ST.hasOffset3fBug() ? 8 : 4;
2588 return 1;
2589 }
2590
2591 assert(TBB && FBB)(static_cast <bool> (TBB && FBB) ? void (0) : __assert_fail
("TBB && FBB", "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp"
, 2591, __extension__ __PRETTY_FUNCTION__))
;
2592
2593 MachineInstr *CondBr =
2594 BuildMI(&MBB, DL, get(Opcode))
2595 .addMBB(TBB);
2596 fixImplicitOperands(*CondBr);
2597 BuildMI(&MBB, DL, get(AMDGPU::S_BRANCH))
2598 .addMBB(FBB);
2599
2600 MachineOperand &CondReg = CondBr->getOperand(1);
2601 CondReg.setIsUndef(Cond[1].isUndef());
2602 CondReg.setIsKill(Cond[1].isKill());
2603
2604 if (BytesAdded)
2605 *BytesAdded = ST.hasOffset3fBug() ? 16 : 8;
2606
2607 return 2;
2608}
2609
2610bool SIInstrInfo::reverseBranchCondition(
2611 SmallVectorImpl<MachineOperand> &Cond) const {
2612 if (Cond.size() != 2) {
2613 return true;
2614 }
2615
2616 if (Cond[0].isImm()) {
2617 Cond[0].setImm(-Cond[0].getImm());
2618 return false;
2619 }
2620
2621 return true;
2622}
2623
2624bool SIInstrInfo::canInsertSelect(const MachineBasicBlock &MBB,
2625 ArrayRef<MachineOperand> Cond,
2626 Register DstReg, Register TrueReg,
2627 Register FalseReg, int &CondCycles,
2628 int &TrueCycles, int &FalseCycles) const {
2629 switch (Cond[0].getImm()) {
2630 case VCCNZ:
2631 case VCCZ: {
2632 const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
2633 const TargetRegisterClass *RC = MRI.getRegClass(TrueReg);
2634 if (MRI.getRegClass(FalseReg) != RC)
2635 return false;
2636
2637 int NumInsts = AMDGPU::getRegBitWidth(RC->getID()) / 32;
2638 CondCycles = TrueCycles = FalseCycles = NumInsts; // ???
2639
2640 // Limit to equal cost for branch vs. N v_cndmask_b32s.
2641 return RI.hasVGPRs(RC) && NumInsts <= 6;
2642 }
2643 case SCC_TRUE:
2644 case SCC_FALSE: {
2645 // FIXME: We could insert for VGPRs if we could replace the original compare
2646 // with a vector one.
2647 const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
2648 const TargetRegisterClass *RC = MRI.getRegClass(TrueReg);
2649 if (MRI.getRegClass(FalseReg) != RC)
2650 return false;
2651
2652 int NumInsts = AMDGPU::getRegBitWidth(RC->getID()) / 32;
2653
2654 // Multiples of 8 can do s_cselect_b64
2655 if (NumInsts % 2 == 0)
2656 NumInsts /= 2;
2657
2658 CondCycles = TrueCycles = FalseCycles = NumInsts; // ???
2659 return RI.isSGPRClass(RC);
2660 }
2661 default:
2662 return false;
2663 }
2664}
2665
2666void SIInstrInfo::insertSelect(MachineBasicBlock &MBB,
2667 MachineBasicBlock::iterator I, const DebugLoc &DL,
2668 Register DstReg, ArrayRef<MachineOperand> Cond,
2669 Register TrueReg, Register FalseReg) const {
2670 BranchPredicate Pred = static_cast<BranchPredicate>(Cond[0].getImm());
2671 if (Pred == VCCZ || Pred == SCC_FALSE) {
2672 Pred = static_cast<BranchPredicate>(-Pred);
2673 std::swap(TrueReg, FalseReg);
2674 }
2675
2676 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
2677 const TargetRegisterClass *DstRC = MRI.getRegClass(DstReg);
2678 unsigned DstSize = RI.getRegSizeInBits(*DstRC);
2679
2680 if (DstSize == 32) {
2681 MachineInstr *Select;
2682 if (Pred == SCC_TRUE) {
2683 Select = BuildMI(MBB, I, DL, get(AMDGPU::S_CSELECT_B32), DstReg)
2684 .addReg(TrueReg)
2685 .addReg(FalseReg);
2686 } else {
2687 // Instruction's operands are backwards from what is expected.
2688 Select = BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e32), DstReg)
2689 .addReg(FalseReg)
2690 .addReg(TrueReg);
2691 }
2692
2693 preserveCondRegFlags(Select->getOperand(3), Cond[1]);
2694 return;
2695 }
2696
2697 if (DstSize == 64 && Pred == SCC_TRUE) {
2698 MachineInstr *Select =
2699 BuildMI(MBB, I, DL, get(AMDGPU::S_CSELECT_B64), DstReg)
2700 .addReg(TrueReg)
2701 .addReg(FalseReg);
2702
2703 preserveCondRegFlags(Select->getOperand(3), Cond[1]);
2704 return;
2705 }
2706
2707 static const int16_t Sub0_15[] = {
2708 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3,
2709 AMDGPU::sub4, AMDGPU::sub5, AMDGPU::sub6, AMDGPU::sub7,
2710 AMDGPU::sub8, AMDGPU::sub9, AMDGPU::sub10, AMDGPU::sub11,
2711 AMDGPU::sub12, AMDGPU::sub13, AMDGPU::sub14, AMDGPU::sub15,
2712 };
2713
2714 static const int16_t Sub0_15_64[] = {
2715 AMDGPU::sub0_sub1, AMDGPU::sub2_sub3,
2716 AMDGPU::sub4_sub5, AMDGPU::sub6_sub7,
2717 AMDGPU::sub8_sub9, AMDGPU::sub10_sub11,
2718 AMDGPU::sub12_sub13, AMDGPU::sub14_sub15,
2719 };
2720
2721 unsigned SelOp = AMDGPU::V_CNDMASK_B32_e32;
2722 const TargetRegisterClass *EltRC = &AMDGPU::VGPR_32RegClass;
2723 const int16_t *SubIndices = Sub0_15;
2724 int NElts = DstSize / 32;
2725
2726 // 64-bit select is only available for SALU.
2727 // TODO: Split 96-bit into 64-bit and 32-bit, not 3x 32-bit.
2728 if (Pred == SCC_TRUE) {
2729 if (NElts % 2) {
2730 SelOp = AMDGPU::S_CSELECT_B32;
2731 EltRC = &AMDGPU::SGPR_32RegClass;
2732 } else {
2733 SelOp = AMDGPU::S_CSELECT_B64;
2734 EltRC = &AMDGPU::SGPR_64RegClass;
2735 SubIndices = Sub0_15_64;
2736 NElts /= 2;
2737 }
2738 }
2739
2740 MachineInstrBuilder MIB = BuildMI(
2741 MBB, I, DL, get(AMDGPU::REG_SEQUENCE), DstReg);
2742
2743 I = MIB->getIterator();
2744
2745 SmallVector<Register, 8> Regs;
2746 for (int Idx = 0; Idx != NElts; ++Idx) {
2747 Register DstElt = MRI.createVirtualRegister(EltRC);
2748 Regs.push_back(DstElt);
2749
2750 unsigned SubIdx = SubIndices[Idx];
2751
2752 MachineInstr *Select;
2753 if (SelOp == AMDGPU::V_CNDMASK_B32_e32) {
2754 Select =
2755 BuildMI(MBB, I, DL, get(SelOp), DstElt)
2756 .addReg(FalseReg, 0, SubIdx)
2757 .addReg(TrueReg, 0, SubIdx);
2758 } else {
2759 Select =
2760 BuildMI(MBB, I, DL, get(SelOp), DstElt)
2761 .addReg(TrueReg, 0, SubIdx)
2762 .addReg(FalseReg, 0, SubIdx);
2763 }
2764
2765 preserveCondRegFlags(Select->getOperand(3), Cond[1]);
2766 fixImplicitOperands(*Select);
2767
2768 MIB.addReg(DstElt)
2769 .addImm(SubIdx);
2770 }
2771}
2772
2773bool SIInstrInfo::isFoldableCopy(const MachineInstr &MI) {
2774 switch (MI.getOpcode()) {
2775 case AMDGPU::V_MOV_B32_e32:
2776 case AMDGPU::V_MOV_B32_e64:
2777 case AMDGPU::V_MOV_B64_PSEUDO:
2778 case AMDGPU::S_MOV_B32:
2779 case AMDGPU::S_MOV_B64:
2780 case AMDGPU::COPY:
2781 case AMDGPU::V_ACCVGPR_WRITE_B32_e64:
2782 case AMDGPU::V_ACCVGPR_READ_B32_e64:
2783 case AMDGPU::V_ACCVGPR_MOV_B32:
2784 return true;
2785 default:
2786 return false;
2787 }
2788}
2789
2790unsigned SIInstrInfo::getAddressSpaceForPseudoSourceKind(
2791 unsigned Kind) const {
2792 switch(Kind) {
2793 case PseudoSourceValue::Stack:
2794 case PseudoSourceValue::FixedStack:
2795 return AMDGPUAS::PRIVATE_ADDRESS;
2796 case PseudoSourceValue::ConstantPool:
2797 case PseudoSourceValue::GOT:
2798 case PseudoSourceValue::JumpTable:
2799 case PseudoSourceValue::GlobalValueCallEntry:
2800 case PseudoSourceValue::ExternalSymbolCallEntry:
2801 case PseudoSourceValue::TargetCustom:
2802 return AMDGPUAS::CONSTANT_ADDRESS;
2803 }
2804 return AMDGPUAS::FLAT_ADDRESS;
2805}
2806
2807static void removeModOperands(MachineInstr &MI) {
2808 unsigned Opc = MI.getOpcode();
2809 int Src0ModIdx = AMDGPU::getNamedOperandIdx(Opc,
2810 AMDGPU::OpName::src0_modifiers);
2811 int Src1ModIdx = AMDGPU::getNamedOperandIdx(Opc,
2812 AMDGPU::OpName::src1_modifiers);
2813 int Src2ModIdx = AMDGPU::getNamedOperandIdx(Opc,
2814 AMDGPU::OpName::src2_modifiers);
2815
2816 MI.RemoveOperand(Src2ModIdx);
2817 MI.RemoveOperand(Src1ModIdx);
2818 MI.RemoveOperand(Src0ModIdx);
2819}
2820
2821bool SIInstrInfo::FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI,
2822 Register Reg, MachineRegisterInfo *MRI) const {
2823 if (!MRI->hasOneNonDBGUse(Reg))
2824 return false;
2825
2826 switch (DefMI.getOpcode()) {
2827 default:
2828 return false;
2829 case AMDGPU::S_MOV_B64:
2830 // TODO: We could fold 64-bit immediates, but this get compilicated
2831 // when there are sub-registers.
2832 return false;
2833
2834 case AMDGPU::V_MOV_B32_e32:
2835 case AMDGPU::S_MOV_B32:
2836 case AMDGPU::V_ACCVGPR_WRITE_B32_e64:
2837 break;
2838 }
2839
2840 const MachineOperand *ImmOp = getNamedOperand(DefMI, AMDGPU::OpName::src0);
2841 assert(ImmOp)(static_cast <bool> (ImmOp) ? void (0) : __assert_fail (
"ImmOp", "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 2841, __extension__
__PRETTY_FUNCTION__))
;
2842 // FIXME: We could handle FrameIndex values here.
2843 if (!ImmOp->isImm())
2844 return false;
2845
2846 unsigned Opc = UseMI.getOpcode();
2847 if (Opc == AMDGPU::COPY) {
2848 Register DstReg = UseMI.getOperand(0).getReg();
2849 bool Is16Bit = getOpSize(UseMI, 0) == 2;
2850 bool isVGPRCopy = RI.isVGPR(*MRI, DstReg);
2851 unsigned NewOpc = isVGPRCopy ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32;
2852 APInt Imm(32, ImmOp->getImm());
2853
2854 if (UseMI.getOperand(1).getSubReg() == AMDGPU::hi16)
2855 Imm = Imm.ashr(16);
2856
2857 if (RI.isAGPR(*MRI, DstReg)) {
2858 if (!isInlineConstant(Imm))
2859 return false;
2860 NewOpc = AMDGPU::V_ACCVGPR_WRITE_B32_e64;
2861 }
2862
2863 if (Is16Bit) {
2864 if (isVGPRCopy)
2865 return false; // Do not clobber vgpr_hi16
2866
2867 if (DstReg.isVirtual() && UseMI.getOperand(0).getSubReg() != AMDGPU::lo16)
2868 return false;
2869
2870 UseMI.getOperand(0).setSubReg(0);
2871 if (DstReg.isPhysical()) {
2872 DstReg = RI.get32BitRegister(DstReg);
2873 UseMI.getOperand(0).setReg(DstReg);
2874 }
2875 assert(UseMI.getOperand(1).getReg().isVirtual())(static_cast <bool> (UseMI.getOperand(1).getReg().isVirtual
()) ? void (0) : __assert_fail ("UseMI.getOperand(1).getReg().isVirtual()"
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 2875, __extension__
__PRETTY_FUNCTION__))
;
2876 }
2877
2878 UseMI.setDesc(get(NewOpc));
2879 UseMI.getOperand(1).ChangeToImmediate(Imm.getSExtValue());
2880 UseMI.addImplicitDefUseOperands(*UseMI.getParent()->getParent());
2881 return true;
2882 }
2883
2884 if (Opc == AMDGPU::V_MAD_F32_e64 || Opc == AMDGPU::V_MAC_F32_e64 ||
2885 Opc == AMDGPU::V_MAD_F16_e64 || Opc == AMDGPU::V_MAC_F16_e64 ||
2886 Opc == AMDGPU::V_FMA_F32_e64 || Opc == AMDGPU::V_FMAC_F32_e64 ||
2887 Opc == AMDGPU::V_FMA_F16_e64 || Opc == AMDGPU::V_FMAC_F16_e64) {
2888 // Don't fold if we are using source or output modifiers. The new VOP2
2889 // instructions don't have them.
2890 if (hasAnyModifiersSet(UseMI))
2891 return false;
2892
2893 // If this is a free constant, there's no reason to do this.
2894 // TODO: We could fold this here instead of letting SIFoldOperands do it
2895 // later.
2896 MachineOperand *Src0 = getNamedOperand(UseMI, AMDGPU::OpName::src0);
2897
2898 // Any src operand can be used for the legality check.
2899 if (isInlineConstant(UseMI, *Src0, *ImmOp))
2900 return false;
2901
2902 bool IsF32 = Opc == AMDGPU::V_MAD_F32_e64 || Opc == AMDGPU::V_MAC_F32_e64 ||
2903 Opc == AMDGPU::V_FMA_F32_e64 || Opc == AMDGPU::V_FMAC_F32_e64;
2904 bool IsFMA = Opc == AMDGPU::V_FMA_F32_e64 || Opc == AMDGPU::V_FMAC_F32_e64 ||
2905 Opc == AMDGPU::V_FMA_F16_e64 || Opc == AMDGPU::V_FMAC_F16_e64;
2906 MachineOperand *Src1 = getNamedOperand(UseMI, AMDGPU::OpName::src1);
2907 MachineOperand *Src2 = getNamedOperand(UseMI, AMDGPU::OpName::src2);
2908
2909 // Multiplied part is the constant: Use v_madmk_{f16, f32}.
2910 // We should only expect these to be on src0 due to canonicalizations.
2911 if (Src0->isReg() && Src0->getReg() == Reg) {
2912 if (!Src1->isReg() || RI.isSGPRClass(MRI->getRegClass(Src1->getReg())))
2913 return false;
2914
2915 if (!Src2->isReg() || RI.isSGPRClass(MRI->getRegClass(Src2->getReg())))
2916 return false;
2917
2918 unsigned NewOpc =
2919 IsFMA ? (IsF32 ? AMDGPU::V_FMAMK_F32 : AMDGPU::V_FMAMK_F16)
2920 : (IsF32 ? AMDGPU::V_MADMK_F32 : AMDGPU::V_MADMK_F16);
2921 if (pseudoToMCOpcode(NewOpc) == -1)
2922 return false;
2923
2924 // We need to swap operands 0 and 1 since madmk constant is at operand 1.
2925
2926 const int64_t Imm = ImmOp->getImm();
2927
2928 // FIXME: This would be a lot easier if we could return a new instruction
2929 // instead of having to modify in place.
2930
2931 // Remove these first since they are at the end.
2932 UseMI.RemoveOperand(
2933 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::omod));
2934 UseMI.RemoveOperand(
2935 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::clamp));
2936
2937 Register Src1Reg = Src1->getReg();
2938 unsigned Src1SubReg = Src1->getSubReg();
2939 Src0->setReg(Src1Reg);
2940 Src0->setSubReg(Src1SubReg);
2941 Src0->setIsKill(Src1->isKill());
2942
2943 if (Opc == AMDGPU::V_MAC_F32_e64 ||
2944 Opc == AMDGPU::V_MAC_F16_e64 ||
2945 Opc == AMDGPU::V_FMAC_F32_e64 ||
2946 Opc == AMDGPU::V_FMAC_F16_e64)
2947 UseMI.untieRegOperand(
2948 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2));
2949
2950 Src1->ChangeToImmediate(Imm);
2951
2952 removeModOperands(UseMI);
2953 UseMI.setDesc(get(NewOpc));
2954
2955 bool DeleteDef = MRI->hasOneNonDBGUse(Reg);
2956 if (DeleteDef)
2957 DefMI.eraseFromParent();
2958
2959 return true;
2960 }
2961
2962 // Added part is the constant: Use v_madak_{f16, f32}.
2963 if (Src2->isReg() && Src2->getReg() == Reg) {
2964 // Not allowed to use constant bus for another operand.
2965 // We can however allow an inline immediate as src0.
2966 bool Src0Inlined = false;
2967 if (Src0->isReg()) {
2968 // Try to inline constant if possible.
2969 // If the Def moves immediate and the use is single
2970 // We are saving VGPR here.
2971 MachineInstr *Def = MRI->getUniqueVRegDef(Src0->getReg());
2972 if (Def && Def->isMoveImmediate() &&
2973 isInlineConstant(Def->getOperand(1)) &&
2974 MRI->hasOneUse(Src0->getReg())) {
2975 Src0->ChangeToImmediate(Def->getOperand(1).getImm());
2976 Src0Inlined = true;
2977 } else if ((Src0->getReg().isPhysical() &&
2978 (ST.getConstantBusLimit(Opc) <= 1 &&
2979 RI.isSGPRClass(RI.getPhysRegClass(Src0->getReg())))) ||
2980 (Src0->getReg().isVirtual() &&
2981 (ST.getConstantBusLimit(Opc) <= 1 &&
2982 RI.isSGPRClass(MRI->getRegClass(Src0->getReg())))))
2983 return false;
2984 // VGPR is okay as Src0 - fallthrough
2985 }
2986
2987 if (Src1->isReg() && !Src0Inlined ) {
2988 // We have one slot for inlinable constant so far - try to fill it
2989 MachineInstr *Def = MRI->getUniqueVRegDef(Src1->getReg());
2990 if (Def && Def->isMoveImmediate() &&
2991 isInlineConstant(Def->getOperand(1)) &&
2992 MRI->hasOneUse(Src1->getReg()) &&
2993 commuteInstruction(UseMI)) {
2994 Src0->ChangeToImmediate(Def->getOperand(1).getImm());
2995 } else if ((Src1->getReg().isPhysical() &&
2996 RI.isSGPRClass(RI.getPhysRegClass(Src1->getReg()))) ||
2997 (Src1->getReg().isVirtual() &&
2998 RI.isSGPRClass(MRI->getRegClass(Src1->getReg()))))
2999 return false;
3000 // VGPR is okay as Src1 - fallthrough
3001 }
3002
3003 unsigned NewOpc =
3004 IsFMA ? (IsF32 ? AMDGPU::V_FMAAK_F32 : AMDGPU::V_FMAAK_F16)
3005 : (IsF32 ? AMDGPU::V_MADAK_F32 : AMDGPU::V_MADAK_F16);
3006 if (pseudoToMCOpcode(NewOpc) == -1)
3007 return false;
3008
3009 const int64_t Imm = ImmOp->getImm();
3010
3011 // FIXME: This would be a lot easier if we could return a new instruction
3012 // instead of having to modify in place.
3013
3014 // Remove these first since they are at the end.
3015 UseMI.RemoveOperand(
3016 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::omod));
3017 UseMI.RemoveOperand(
3018 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::clamp));
3019
3020 if (Opc == AMDGPU::V_MAC_F32_e64 ||
3021 Opc == AMDGPU::V_MAC_F16_e64 ||
3022 Opc == AMDGPU::V_FMAC_F32_e64 ||
3023 Opc == AMDGPU::V_FMAC_F16_e64)
3024 UseMI.untieRegOperand(
3025 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2));
3026
3027 // ChangingToImmediate adds Src2 back to the instruction.
3028 Src2->ChangeToImmediate(Imm);
3029
3030 // These come before src2.
3031 removeModOperands(UseMI);
3032 UseMI.setDesc(get(NewOpc));
3033 // It might happen that UseMI was commuted
3034 // and we now have SGPR as SRC1. If so 2 inlined
3035 // constant and SGPR are illegal.
3036 legalizeOperands(UseMI);
3037
3038 bool DeleteDef = MRI->hasOneNonDBGUse(Reg);
3039 if (DeleteDef)
3040 DefMI.eraseFromParent();
3041
3042 return true;
3043 }
3044 }
3045
3046 return false;
3047}
3048
3049static bool
3050memOpsHaveSameBaseOperands(ArrayRef<const MachineOperand *> BaseOps1,
3051 ArrayRef<const MachineOperand *> BaseOps2) {
3052 if (BaseOps1.size() != BaseOps2.size())
3053 return false;
3054 for (size_t I = 0, E = BaseOps1.size(); I < E; ++I) {
3055 if (!BaseOps1[I]->isIdenticalTo(*BaseOps2[I]))
3056 return false;
3057 }
3058 return true;
3059}
3060
3061static bool offsetsDoNotOverlap(int WidthA, int OffsetA,
3062 int WidthB, int OffsetB) {
3063 int LowOffset = OffsetA < OffsetB ? OffsetA : OffsetB;
3064 int HighOffset = OffsetA < OffsetB ? OffsetB : OffsetA;
3065 int LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;
3066 return LowOffset + LowWidth <= HighOffset;
3067}
3068
3069bool SIInstrInfo::checkInstOffsetsDoNotOverlap(const MachineInstr &MIa,
3070 const MachineInstr &MIb) const {
3071 SmallVector<const MachineOperand *, 4> BaseOps0, BaseOps1;
3072 int64_t Offset0, Offset1;
3073 unsigned Dummy0, Dummy1;
3074 bool Offset0IsScalable, Offset1IsScalable;
3075 if (!getMemOperandsWithOffsetWidth(MIa, BaseOps0, Offset0, Offset0IsScalable,
3076 Dummy0, &RI) ||
3077 !getMemOperandsWithOffsetWidth(MIb, BaseOps1, Offset1, Offset1IsScalable,
3078 Dummy1, &RI))
3079 return false;
3080
3081 if (!memOpsHaveSameBaseOperands(BaseOps0, BaseOps1))
3082 return false;
3083
3084 if (!MIa.hasOneMemOperand() || !MIb.hasOneMemOperand()) {
3085 // FIXME: Handle ds_read2 / ds_write2.
3086 return false;
3087 }
3088 unsigned Width0 = MIa.memoperands().front()->getSize();
3089 unsigned Width1 = MIb.memoperands().front()->getSize();
3090 return offsetsDoNotOverlap(Width0, Offset0, Width1, Offset1);
3091}
3092
3093bool SIInstrInfo::areMemAccessesTriviallyDisjoint(const MachineInstr &MIa,
3094 const MachineInstr &MIb) const {
3095 assert(MIa.mayLoadOrStore() &&(static_cast <bool> (MIa.mayLoadOrStore() && "MIa must load from or modify a memory location"
) ? void (0) : __assert_fail ("MIa.mayLoadOrStore() && \"MIa must load from or modify a memory location\""
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 3096, __extension__
__PRETTY_FUNCTION__))
3096 "MIa must load from or modify a memory location")(static_cast <bool> (MIa.mayLoadOrStore() && "MIa must load from or modify a memory location"
) ? void (0) : __assert_fail ("MIa.mayLoadOrStore() && \"MIa must load from or modify a memory location\""
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 3096, __extension__
__PRETTY_FUNCTION__))
;
3097 assert(MIb.mayLoadOrStore() &&(static_cast <bool> (MIb.mayLoadOrStore() && "MIb must load from or modify a memory location"
) ? void (0) : __assert_fail ("MIb.mayLoadOrStore() && \"MIb must load from or modify a memory location\""
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 3098, __extension__
__PRETTY_FUNCTION__))
3098 "MIb must load from or modify a memory location")(static_cast <bool> (MIb.mayLoadOrStore() && "MIb must load from or modify a memory location"
) ? void (0) : __assert_fail ("MIb.mayLoadOrStore() && \"MIb must load from or modify a memory location\""
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 3098, __extension__
__PRETTY_FUNCTION__))
;
3099
3100 if (MIa.hasUnmodeledSideEffects() || MIb.hasUnmodeledSideEffects())
3101 return false;
3102
3103 // XXX - Can we relax this between address spaces?
3104 if (MIa.hasOrderedMemoryRef() || MIb.hasOrderedMemoryRef())
3105 return false;
3106
3107 // TODO: Should we check the address space from the MachineMemOperand? That
3108 // would allow us to distinguish objects we know don't alias based on the
3109 // underlying address space, even if it was lowered to a different one,
3110 // e.g. private accesses lowered to use MUBUF instructions on a scratch
3111 // buffer.
3112 if (isDS(MIa)) {
3113 if (isDS(MIb))
3114 return checkInstOffsetsDoNotOverlap(MIa, MIb);
3115
3116 return !isFLAT(MIb) || isSegmentSpecificFLAT(MIb);
3117 }
3118
3119 if (isMUBUF(MIa) || isMTBUF(MIa)) {
3120 if (isMUBUF(MIb) || isMTBUF(MIb))
3121 return checkInstOffsetsDoNotOverlap(MIa, MIb);
3122
3123 return !isFLAT(MIb) && !isSMRD(MIb);
3124 }
3125
3126 if (isSMRD(MIa)) {
3127 if (isSMRD(MIb))
3128 return checkInstOffsetsDoNotOverlap(MIa, MIb);
3129
3130 return !isFLAT(MIb) && !isMUBUF(MIb) && !isMTBUF(MIb);
3131 }
3132
3133 if (isFLAT(MIa)) {
3134 if (isFLAT(MIb))
3135 return checkInstOffsetsDoNotOverlap(MIa, MIb);
3136
3137 return false;
3138 }
3139
3140 return false;
3141}
3142
3143static bool getFoldableImm(Register Reg, const MachineRegisterInfo &MRI,
3144 int64_t &Imm, MachineInstr **DefMI = nullptr) {
3145 if (Reg.isPhysical())
3146 return false;
3147 auto *Def = MRI.getUniqueVRegDef(Reg);
3148 if (Def && SIInstrInfo::isFoldableCopy(*Def) && Def->getOperand(1).isImm()) {
3149 Imm = Def->getOperand(1).getImm();
3150 if (DefMI)
3151 *DefMI = Def;
3152 return true;
3153 }
3154 return false;
3155}
3156
3157static bool getFoldableImm(const MachineOperand *MO, int64_t &Imm,
3158 MachineInstr **DefMI = nullptr) {
3159 if (!MO->isReg())
3160 return false;
3161 const MachineFunction *MF = MO->getParent()->getParent()->getParent();
3162 const MachineRegisterInfo &MRI = MF->getRegInfo();
3163 return getFoldableImm(MO->getReg(), MRI, Imm, DefMI);
3164}
3165
3166static void updateLiveVariables(LiveVariables *LV, MachineInstr &MI,
3167 MachineInstr &NewMI) {
3168 if (LV) {
3169 unsigned NumOps = MI.getNumOperands();
3170 for (unsigned I = 1; I < NumOps; ++I) {
3171 MachineOperand &Op = MI.getOperand(I);
3172 if (Op.isReg() && Op.isKill())
3173 LV->replaceKillInstruction(Op.getReg(), MI, NewMI);
3174 }
3175 }
3176}
3177
3178MachineInstr *SIInstrInfo::convertToThreeAddress(MachineInstr &MI,
3179 LiveVariables *LV,
3180 LiveIntervals *LIS) const {
3181 unsigned Opc = MI.getOpcode();
3182 bool IsF16 = false;
3183 bool IsFMA = Opc == AMDGPU::V_FMAC_F32_e32 || Opc == AMDGPU::V_FMAC_F32_e64 ||
3184 Opc == AMDGPU::V_FMAC_F16_e32 || Opc == AMDGPU::V_FMAC_F16_e64 ||
3185 Opc == AMDGPU::V_FMAC_F64_e32 || Opc == AMDGPU::V_FMAC_F64_e64;
3186 bool IsF64 = Opc == AMDGPU::V_FMAC_F64_e32 || Opc == AMDGPU::V_FMAC_F64_e64;
3187
3188 switch (Opc) {
3189 default:
3190 return nullptr;
3191 case AMDGPU::V_MAC_F16_e64:
3192 case AMDGPU::V_FMAC_F16_e64:
3193 IsF16 = true;
3194 LLVM_FALLTHROUGH[[gnu::fallthrough]];
3195 case AMDGPU::V_MAC_F32_e64:
3196 case AMDGPU::V_FMAC_F32_e64:
3197 case AMDGPU::V_FMAC_F64_e64:
3198 break;
3199 case AMDGPU::V_MAC_F16_e32:
3200 case AMDGPU::V_FMAC_F16_e32:
3201 IsF16 = true;
3202 LLVM_FALLTHROUGH[[gnu::fallthrough]];
3203 case AMDGPU::V_MAC_F32_e32:
3204 case AMDGPU::V_FMAC_F32_e32:
3205 case AMDGPU::V_FMAC_F64_e32: {
3206 int Src0Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
3207 AMDGPU::OpName::src0);
3208 const MachineOperand *Src0 = &MI.getOperand(Src0Idx);
3209 if (!Src0->isReg() && !Src0->isImm())
3210 return nullptr;
3211
3212 if (Src0->isImm() && !isInlineConstant(MI, Src0Idx, *Src0))
3213 return nullptr;
3214
3215 break;
3216 }
3217 }
3218
3219 const MachineOperand *Dst = getNamedOperand(MI, AMDGPU::OpName::vdst);
3220 const MachineOperand *Src0 = getNamedOperand(MI, AMDGPU::OpName::src0);
3221 const MachineOperand *Src0Mods =
3222 getNamedOperand(MI, AMDGPU::OpName::src0_modifiers);
3223 const MachineOperand *Src1 = getNamedOperand(MI, AMDGPU::OpName::src1);
3224 const MachineOperand *Src1Mods =
3225 getNamedOperand(MI, AMDGPU::OpName::src1_modifiers);
3226 const MachineOperand *Src2 = getNamedOperand(MI, AMDGPU::OpName::src2);
3227 const MachineOperand *Clamp = getNamedOperand(MI, AMDGPU::OpName::clamp);
3228 const MachineOperand *Omod = getNamedOperand(MI, AMDGPU::OpName::omod);
3229 MachineInstrBuilder MIB;
3230 MachineBasicBlock &MBB = *MI.getParent();
3231
3232 if (!Src0Mods && !Src1Mods && !Clamp && !Omod && !IsF64 &&
3233 // If we have an SGPR input, we will violate the constant bus restriction.
3234 (ST.getConstantBusLimit(Opc) > 1 || !Src0->isReg() ||
3235 !RI.isSGPRReg(MBB.getParent()->getRegInfo(), Src0->getReg()))) {
3236 MachineInstr *DefMI;
3237 const auto killDef = [&DefMI, &MBB, this]() -> void {
3238 const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
3239 // The only user is the instruction which will be killed.
3240 if (!MRI.hasOneNonDBGUse(DefMI->getOperand(0).getReg()))
3241 return;
3242 // We cannot just remove the DefMI here, calling pass will crash.
3243 DefMI->setDesc(get(AMDGPU::IMPLICIT_DEF));
3244 for (unsigned I = DefMI->getNumOperands() - 1; I != 0; --I)
3245 DefMI->RemoveOperand(I);
3246 };
3247
3248 int64_t Imm;
3249 if (getFoldableImm(Src2, Imm, &DefMI)) {
3250 unsigned NewOpc =
3251 IsFMA ? (IsF16 ? AMDGPU::V_FMAAK_F16 : AMDGPU::V_FMAAK_F32)
3252 : (IsF16 ? AMDGPU::V_MADAK_F16 : AMDGPU::V_MADAK_F32);
3253 if (pseudoToMCOpcode(NewOpc) != -1) {
3254 MIB = BuildMI(MBB, MI, MI.getDebugLoc(), get(NewOpc))
3255 .add(*Dst)
3256 .add(*Src0)
3257 .add(*Src1)
3258 .addImm(Imm);
3259 updateLiveVariables(LV, MI, *MIB);
3260 if (LIS)
3261 LIS->ReplaceMachineInstrInMaps(MI, *MIB);
3262 killDef();
3263 return MIB;
3264 }
3265 }
3266 unsigned NewOpc = IsFMA
3267 ? (IsF16 ? AMDGPU::V_FMAMK_F16 : AMDGPU::V_FMAMK_F32)
3268 : (IsF16 ? AMDGPU::V_MADMK_F16 : AMDGPU::V_MADMK_F32);
3269 if (getFoldableImm(Src1, Imm, &DefMI)) {
3270 if (pseudoToMCOpcode(NewOpc) != -1) {
3271 MIB = BuildMI(MBB, MI, MI.getDebugLoc(), get(NewOpc))
3272 .add(*Dst)
3273 .add(*Src0)
3274 .addImm(Imm)
3275 .add(*Src2);
3276 updateLiveVariables(LV, MI, *MIB);
3277 if (LIS)
3278 LIS->ReplaceMachineInstrInMaps(MI, *MIB);
3279 killDef();
3280 return MIB;
3281 }
3282 }
3283 if (getFoldableImm(Src0, Imm, &DefMI)) {
3284 if (pseudoToMCOpcode(NewOpc) != -1 &&
3285 isOperandLegal(
3286 MI, AMDGPU::getNamedOperandIdx(NewOpc, AMDGPU::OpName::src0),
3287 Src1)) {
3288 MIB = BuildMI(MBB, MI, MI.getDebugLoc(), get(NewOpc))
3289 .add(*Dst)
3290 .add(*Src1)
3291 .addImm(Imm)
3292 .add(*Src2);
3293 updateLiveVariables(LV, MI, *MIB);
3294 if (LIS)
3295 LIS->ReplaceMachineInstrInMaps(MI, *MIB);
3296 killDef();
3297 return MIB;
3298 }
3299 }
3300 }
3301
3302 unsigned NewOpc = IsFMA ? (IsF16 ? AMDGPU::V_FMA_F16_gfx9_e64
3303 : IsF64 ? AMDGPU::V_FMA_F64_e64
3304 : AMDGPU::V_FMA_F32_e64)
3305 : (IsF16 ? AMDGPU::V_MAD_F16_e64 : AMDGPU::V_MAD_F32_e64);
3306 if (pseudoToMCOpcode(NewOpc) == -1)
3307 return nullptr;
3308
3309 MIB = BuildMI(MBB, MI, MI.getDebugLoc(), get(NewOpc))
3310 .add(*Dst)
3311 .addImm(Src0Mods ? Src0Mods->getImm() : 0)
3312 .add(*Src0)
3313 .addImm(Src1Mods ? Src1Mods->getImm() : 0)
3314 .add(*Src1)
3315 .addImm(0) // Src mods
3316 .add(*Src2)
3317 .addImm(Clamp ? Clamp->getImm() : 0)
3318 .addImm(Omod ? Omod->getImm() : 0);
3319 updateLiveVariables(LV, MI, *MIB);
3320 if (LIS)
3321 LIS->ReplaceMachineInstrInMaps(MI, *MIB);
3322 return MIB;
3323}
3324
3325// It's not generally safe to move VALU instructions across these since it will
3326// start using the register as a base index rather than directly.
3327// XXX - Why isn't hasSideEffects sufficient for these?
3328static bool changesVGPRIndexingMode(const MachineInstr &MI) {
3329 switch (MI.getOpcode()) {
3330 case AMDGPU::S_SET_GPR_IDX_ON:
3331 case AMDGPU::S_SET_GPR_IDX_MODE:
3332 case AMDGPU::S_SET_GPR_IDX_OFF:
3333 return true;
3334 default:
3335 return false;
3336 }
3337}
3338
3339bool SIInstrInfo::isSchedulingBoundary(const MachineInstr &MI,
3340 const MachineBasicBlock *MBB,
3341 const MachineFunction &MF) const {
3342 // Skipping the check for SP writes in the base implementation. The reason it
3343 // was added was apparently due to compile time concerns.
3344 //
3345 // TODO: Do we really want this barrier? It triggers unnecessary hazard nops
3346 // but is probably avoidable.
3347
3348 // Copied from base implementation.
3349 // Terminators and labels can't be scheduled around.
3350 if (MI.isTerminator() || MI.isPosition())
3351 return true;
3352
3353 // INLINEASM_BR can jump to another block
3354 if (MI.getOpcode() == TargetOpcode::INLINEASM_BR)
3355 return true;
3356
3357 // Target-independent instructions do not have an implicit-use of EXEC, even
3358 // when they operate on VGPRs. Treating EXEC modifications as scheduling
3359 // boundaries prevents incorrect movements of such instructions.
3360 return MI.modifiesRegister(AMDGPU::EXEC, &RI) ||
3361 MI.getOpcode() == AMDGPU::S_SETREG_IMM32_B32 ||
3362 MI.getOpcode() == AMDGPU::S_SETREG_B32 ||
3363 changesVGPRIndexingMode(MI);
3364}
3365
3366bool SIInstrInfo::isAlwaysGDS(uint16_t Opcode) const {
3367 return Opcode == AMDGPU::DS_ORDERED_COUNT ||
3368 Opcode == AMDGPU::DS_GWS_INIT ||
3369 Opcode == AMDGPU::DS_GWS_SEMA_V ||
3370 Opcode == AMDGPU::DS_GWS_SEMA_BR ||
3371 Opcode == AMDGPU::DS_GWS_SEMA_P ||
3372 Opcode == AMDGPU::DS_GWS_SEMA_RELEASE_ALL ||
3373 Opcode == AMDGPU::DS_GWS_BARRIER;
3374}
3375
3376bool SIInstrInfo::modifiesModeRegister(const MachineInstr &MI) {
3377 // Skip the full operand and register alias search modifiesRegister
3378 // does. There's only a handful of instructions that touch this, it's only an
3379 // implicit def, and doesn't alias any other registers.
3380 if (const MCPhysReg *ImpDef = MI.getDesc().getImplicitDefs()) {
3381 for (; ImpDef && *ImpDef; ++ImpDef) {
3382 if (*ImpDef == AMDGPU::MODE)
3383 return true;
3384 }
3385 }
3386
3387 return false;
3388}
3389
3390bool SIInstrInfo::hasUnwantedEffectsWhenEXECEmpty(const MachineInstr &MI) const {
3391 unsigned Opcode = MI.getOpcode();
3392
3393 if (MI.mayStore() && isSMRD(MI))
3394 return true; // scalar store or atomic
3395
3396 // This will terminate the function when other lanes may need to continue.
3397 if (MI.isReturn())
3398 return true;
3399
3400 // These instructions cause shader I/O that may cause hardware lockups
3401 // when executed with an empty EXEC mask.
3402 //
3403 // Note: exp with VM = DONE = 0 is automatically skipped by hardware when
3404 // EXEC = 0, but checking for that case here seems not worth it
3405 // given the typical code patterns.
3406 if (Opcode == AMDGPU::S_SENDMSG || Opcode == AMDGPU::S_SENDMSGHALT ||
3407 isEXP(Opcode) ||
3408 Opcode == AMDGPU::DS_ORDERED_COUNT || Opcode == AMDGPU::S_TRAP ||
3409 Opcode == AMDGPU::DS_GWS_INIT || Opcode == AMDGPU::DS_GWS_BARRIER)
3410 return true;
3411
3412 if (MI.isCall() || MI.isInlineAsm())
3413 return true; // conservative assumption
3414
3415 // A mode change is a scalar operation that influences vector instructions.
3416 if (modifiesModeRegister(MI))
3417 return true;
3418
3419 // These are like SALU instructions in terms of effects, so it's questionable
3420 // whether we should return true for those.
3421 //
3422 // However, executing them with EXEC = 0 causes them to operate on undefined
3423 // data, which we avoid by returning true here.
3424 if (Opcode == AMDGPU::V_READFIRSTLANE_B32 ||
3425 Opcode == AMDGPU::V_READLANE_B32 || Opcode == AMDGPU::V_WRITELANE_B32)
3426 return true;
3427
3428 return false;
3429}
3430
3431bool SIInstrInfo::mayReadEXEC(const MachineRegisterInfo &MRI,
3432 const MachineInstr &MI) const {
3433 if (MI.isMetaInstruction())
3434 return false;
3435
3436 // This won't read exec if this is an SGPR->SGPR copy.
3437 if (MI.isCopyLike()) {
3438 if (!RI.isSGPRReg(MRI, MI.getOperand(0).getReg()))
3439 return true;
3440
3441 // Make sure this isn't copying exec as a normal operand
3442 return MI.readsRegister(AMDGPU::EXEC, &RI);
3443 }
3444
3445 // Make a conservative assumption about the callee.
3446 if (MI.isCall())
3447 return true;
3448
3449 // Be conservative with any unhandled generic opcodes.
3450 if (!isTargetSpecificOpcode(MI.getOpcode()))
3451 return true;
3452
3453 return !isSALU(MI) || MI.readsRegister(AMDGPU::EXEC, &RI);
3454}
3455
3456bool SIInstrInfo::isInlineConstant(const APInt &Imm) const {
3457 switch (Imm.getBitWidth()) {
3458 case 1: // This likely will be a condition code mask.
3459 return true;
3460
3461 case 32:
3462 return AMDGPU::isInlinableLiteral32(Imm.getSExtValue(),
3463 ST.hasInv2PiInlineImm());
3464 case 64:
3465 return AMDGPU::isInlinableLiteral64(Imm.getSExtValue(),
3466 ST.hasInv2PiInlineImm());
3467 case 16:
3468 return ST.has16BitInsts() &&
3469 AMDGPU::isInlinableLiteral16(Imm.getSExtValue(),
3470 ST.hasInv2PiInlineImm());
3471 default:
3472 llvm_unreachable("invalid bitwidth")::llvm::llvm_unreachable_internal("invalid bitwidth", "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp"
, 3472)
;
3473 }
3474}
3475
3476bool SIInstrInfo::isInlineConstant(const MachineOperand &MO,
3477 uint8_t OperandType) const {
3478 if (!MO.isImm() ||
3479 OperandType < AMDGPU::OPERAND_SRC_FIRST ||
3480 OperandType > AMDGPU::OPERAND_SRC_LAST)
3481 return false;
3482
3483 // MachineOperand provides no way to tell the true operand size, since it only
3484 // records a 64-bit value. We need to know the size to determine if a 32-bit
3485 // floating point immediate bit pattern is legal for an integer immediate. It
3486 // would be for any 32-bit integer operand, but would not be for a 64-bit one.
3487
3488 int64_t Imm = MO.getImm();
3489 switch (OperandType) {
3490 case AMDGPU::OPERAND_REG_IMM_INT32:
3491 case AMDGPU::OPERAND_REG_IMM_FP32:
3492 case AMDGPU::OPERAND_REG_IMM_FP32_DEFERRED:
3493 case AMDGPU::OPERAND_REG_INLINE_C_INT32:
3494 case AMDGPU::OPERAND_REG_INLINE_C_FP32:
3495 case AMDGPU::OPERAND_REG_IMM_V2FP32:
3496 case AMDGPU::OPERAND_REG_INLINE_C_V2FP32:
3497 case AMDGPU::OPERAND_REG_IMM_V2INT32:
3498 case AMDGPU::OPERAND_REG_INLINE_C_V2INT32:
3499 case AMDGPU::OPERAND_REG_INLINE_AC_INT32:
3500 case AMDGPU::OPERAND_REG_INLINE_AC_FP32: {
3501 int32_t Trunc = static_cast<int32_t>(Imm);
3502 return AMDGPU::isInlinableLiteral32(Trunc, ST.hasInv2PiInlineImm());
3503 }
3504 case AMDGPU::OPERAND_REG_IMM_INT64:
3505 case AMDGPU::OPERAND_REG_IMM_FP64:
3506 case AMDGPU::OPERAND_REG_INLINE_C_INT64:
3507 case AMDGPU::OPERAND_REG_INLINE_C_FP64:
3508 case AMDGPU::OPERAND_REG_INLINE_AC_FP64:
3509 return AMDGPU::isInlinableLiteral64(MO.getImm(),
3510 ST.hasInv2PiInlineImm());
3511 case AMDGPU::OPERAND_REG_IMM_INT16:
3512 case AMDGPU::OPERAND_REG_INLINE_C_INT16:
3513 case AMDGPU::OPERAND_REG_INLINE_AC_INT16:
3514 // We would expect inline immediates to not be concerned with an integer/fp
3515 // distinction. However, in the case of 16-bit integer operations, the
3516 // "floating point" values appear to not work. It seems read the low 16-bits
3517 // of 32-bit immediates, which happens to always work for the integer
3518 // values.
3519 //
3520 // See llvm bugzilla 46302.
3521 //
3522 // TODO: Theoretically we could use op-sel to use the high bits of the
3523 // 32-bit FP values.
3524 return AMDGPU::isInlinableIntLiteral(Imm);
3525 case AMDGPU::OPERAND_REG_IMM_V2INT16:
3526 case AMDGPU::OPERAND_REG_INLINE_C_V2INT16:
3527 case AMDGPU::OPERAND_REG_INLINE_AC_V2INT16:
3528 // This suffers the same problem as the scalar 16-bit cases.
3529 return AMDGPU::isInlinableIntLiteralV216(Imm);
3530 case AMDGPU::OPERAND_REG_IMM_FP16:
3531 case AMDGPU::OPERAND_REG_IMM_FP16_DEFERRED:
3532 case AMDGPU::OPERAND_REG_INLINE_C_FP16:
3533 case AMDGPU::OPERAND_REG_INLINE_AC_FP16: {
3534 if (isInt<16>(Imm) || isUInt<16>(Imm)) {
3535 // A few special case instructions have 16-bit operands on subtargets
3536 // where 16-bit instructions are not legal.
3537 // TODO: Do the 32-bit immediates work? We shouldn't really need to handle
3538 // constants in these cases
3539 int16_t Trunc = static_cast<int16_t>(Imm);
3540 return ST.has16BitInsts() &&
3541 AMDGPU::isInlinableLiteral16(Trunc, ST.hasInv2PiInlineImm());
3542 }
3543
3544 return false;
3545 }
3546 case AMDGPU::OPERAND_REG_IMM_V2FP16:
3547 case AMDGPU::OPERAND_REG_INLINE_C_V2FP16:
3548 case AMDGPU::OPERAND_REG_INLINE_AC_V2FP16: {
3549 uint32_t Trunc = static_cast<uint32_t>(Imm);
3550 return AMDGPU::isInlinableLiteralV216(Trunc, ST.hasInv2PiInlineImm());
3551 }
3552 case AMDGPU::OPERAND_KIMM32:
3553 case AMDGPU::OPERAND_KIMM16:
3554 return false;
3555 default:
3556 llvm_unreachable("invalid bitwidth")::llvm::llvm_unreachable_internal("invalid bitwidth", "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp"
, 3556)
;
3557 }
3558}
3559
3560bool SIInstrInfo::isLiteralConstantLike(const MachineOperand &MO,
3561 const MCOperandInfo &OpInfo) const {
3562 switch (MO.getType()) {
3563 case MachineOperand::MO_Register:
3564 return false;
3565 case MachineOperand::MO_Immediate:
3566 return !isInlineConstant(MO, OpInfo);
3567 case MachineOperand::MO_FrameIndex:
3568 case MachineOperand::MO_MachineBasicBlock:
3569 case MachineOperand::MO_ExternalSymbol:
3570 case MachineOperand::MO_GlobalAddress:
3571 case MachineOperand::MO_MCSymbol:
3572 return true;
3573 default:
3574 llvm_unreachable("unexpected operand type")::llvm::llvm_unreachable_internal("unexpected operand type", "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp"
, 3574)
;
3575 }
3576}
3577
3578static bool compareMachineOp(const MachineOperand &Op0,
3579 const MachineOperand &Op1) {
3580 if (Op0.getType() != Op1.getType())
3581 return false;
3582
3583 switch (Op0.getType()) {
3584 case MachineOperand::MO_Register:
3585 return Op0.getReg() == Op1.getReg();
3586 case MachineOperand::MO_Immediate:
3587 return Op0.getImm() == Op1.getImm();
3588 default:
3589 llvm_unreachable("Didn't expect to be comparing these operand types")::llvm::llvm_unreachable_internal("Didn't expect to be comparing these operand types"
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 3589)
;
3590 }
3591}
3592
3593bool SIInstrInfo::isImmOperandLegal(const MachineInstr &MI, unsigned OpNo,
3594 const MachineOperand &MO) const {
3595 const MCInstrDesc &InstDesc = MI.getDesc();
3596 const MCOperandInfo &OpInfo = InstDesc.OpInfo[OpNo];
3597
3598 assert(MO.isImm() || MO.isTargetIndex() || MO.isFI() || MO.isGlobal())(static_cast <bool> (MO.isImm() || MO.isTargetIndex() ||
MO.isFI() || MO.isGlobal()) ? void (0) : __assert_fail ("MO.isImm() || MO.isTargetIndex() || MO.isFI() || MO.isGlobal()"
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 3598, __extension__
__PRETTY_FUNCTION__))
;
3599
3600 if (OpInfo.OperandType == MCOI::OPERAND_IMMEDIATE)
3601 return true;
3602
3603 if (OpInfo.RegClass < 0)
3604 return false;
3605
3606 if (MO.isImm() && isInlineConstant(MO, OpInfo)) {
3607 if (isMAI(MI) && ST.hasMFMAInlineLiteralBug() &&
3608 OpNo ==(unsigned)AMDGPU::getNamedOperandIdx(MI.getOpcode(),
3609 AMDGPU::OpName::src2))
3610 return false;
3611 return RI.opCanUseInlineConstant(OpInfo.OperandType);
3612 }
3613
3614 if (!RI.opCanUseLiteralConstant(OpInfo.OperandType))
3615 return false;
3616
3617 if (!isVOP3(MI) || !AMDGPU::isSISrcOperand(InstDesc, OpNo))
3618 return true;
3619
3620 return ST.hasVOP3Literal();
3621}
3622
3623bool SIInstrInfo::hasVALU32BitEncoding(unsigned Opcode) const {
3624 // GFX90A does not have V_MUL_LEGACY_F32_e32.
3625 if (Opcode == AMDGPU::V_MUL_LEGACY_F32_e64 && ST.hasGFX90AInsts())
3626 return false;
3627
3628 int Op32 = AMDGPU::getVOPe32(Opcode);
3629 if (Op32 == -1)
3630 return false;
3631
3632 return pseudoToMCOpcode(Op32) != -1;
3633}
3634
3635bool SIInstrInfo::hasModifiers(unsigned Opcode) const {
3636 // The src0_modifier operand is present on all instructions
3637 // that have modifiers.
3638
3639 return AMDGPU::getNamedOperandIdx(Opcode,
3640 AMDGPU::OpName::src0_modifiers) != -1;
3641}
3642
3643bool SIInstrInfo::hasModifiersSet(const MachineInstr &MI,
3644 unsigned OpName) const {
3645 const MachineOperand *Mods = getNamedOperand(MI, OpName);
3646 return Mods && Mods->getImm();
3647}
3648
3649bool SIInstrInfo::hasAnyModifiersSet(const MachineInstr &MI) const {
3650 return hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers) ||
3651 hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) ||
3652 hasModifiersSet(MI, AMDGPU::OpName::src2_modifiers) ||
3653 hasModifiersSet(MI, AMDGPU::OpName::clamp) ||
3654 hasModifiersSet(MI, AMDGPU::OpName::omod);
3655}
3656
3657bool SIInstrInfo::canShrink(const MachineInstr &MI,
3658 const MachineRegisterInfo &MRI) const {
3659 const MachineOperand *Src2 = getNamedOperand(MI, AMDGPU::OpName::src2);
3660 // Can't shrink instruction with three operands.
3661 if (Src2) {
3662 switch (MI.getOpcode()) {
3663 default: return false;
3664
3665 case AMDGPU::V_ADDC_U32_e64:
3666 case AMDGPU::V_SUBB_U32_e64:
3667 case AMDGPU::V_SUBBREV_U32_e64: {
3668 const MachineOperand *Src1
3669 = getNamedOperand(MI, AMDGPU::OpName::src1);
3670 if (!Src1->isReg() || !RI.isVGPR(MRI, Src1->getReg()))
3671 return false;
3672 // Additional verification is needed for sdst/src2.
3673 return true;
3674 }
3675 case AMDGPU::V_MAC_F16_e64:
3676 case AMDGPU::V_MAC_F32_e64:
3677 case AMDGPU::V_MAC_LEGACY_F32_e64:
3678 case AMDGPU::V_FMAC_F16_e64:
3679 case AMDGPU::V_FMAC_F32_e64:
3680 case AMDGPU::V_FMAC_F64_e64:
3681 case AMDGPU::V_FMAC_LEGACY_F32_e64:
3682 if (!Src2->isReg() || !RI.isVGPR(MRI, Src2->getReg()) ||
3683 hasModifiersSet(MI, AMDGPU::OpName::src2_modifiers))
3684 return false;
3685 break;
3686
3687 case AMDGPU::V_CNDMASK_B32_e64:
3688 break;
3689 }
3690 }
3691
3692 const MachineOperand *Src1 = getNamedOperand(MI, AMDGPU::OpName::src1);
3693 if (Src1 && (!Src1->isReg() || !RI.isVGPR(MRI, Src1->getReg()) ||
3694 hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers)))
3695 return false;
3696
3697 // We don't need to check src0, all input types are legal, so just make sure
3698 // src0 isn't using any modifiers.
3699 if (hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers))
3700 return false;
3701
3702 // Can it be shrunk to a valid 32 bit opcode?
3703 if (!hasVALU32BitEncoding(MI.getOpcode()))
3704 return false;
3705
3706 // Check output modifiers
3707 return !hasModifiersSet(MI, AMDGPU::OpName::omod) &&
3708 !hasModifiersSet(MI, AMDGPU::OpName::clamp);
3709}
3710
3711// Set VCC operand with all flags from \p Orig, except for setting it as
3712// implicit.
3713static void copyFlagsToImplicitVCC(MachineInstr &MI,
3714 const MachineOperand &Orig) {
3715
3716 for (MachineOperand &Use : MI.implicit_operands()) {
3717 if (Use.isUse() &&
3718 (Use.getReg() == AMDGPU::VCC || Use.getReg() == AMDGPU::VCC_LO)) {
3719 Use.setIsUndef(Orig.isUndef());
3720 Use.setIsKill(Orig.isKill());
3721 return;
3722 }
3723 }
3724}
3725
3726MachineInstr *SIInstrInfo::buildShrunkInst(MachineInstr &MI,
3727 unsigned Op32) const {
3728 MachineBasicBlock *MBB = MI.getParent();;
3729 MachineInstrBuilder Inst32 =
3730 BuildMI(*MBB, MI, MI.getDebugLoc(), get(Op32))
3731 .setMIFlags(MI.getFlags());
3732
3733 // Add the dst operand if the 32-bit encoding also has an explicit $vdst.
3734 // For VOPC instructions, this is replaced by an implicit def of vcc.
3735 int Op32DstIdx = AMDGPU::getNamedOperandIdx(Op32, AMDGPU::OpName::vdst);
3736 if (Op32DstIdx != -1) {
3737 // dst
3738 Inst32.add(MI.getOperand(0));
3739 } else {
3740 assert(((MI.getOperand(0).getReg() == AMDGPU::VCC) ||(static_cast <bool> (((MI.getOperand(0).getReg() == AMDGPU
::VCC) || (MI.getOperand(0).getReg() == AMDGPU::VCC_LO)) &&
"Unexpected case") ? void (0) : __assert_fail ("((MI.getOperand(0).getReg() == AMDGPU::VCC) || (MI.getOperand(0).getReg() == AMDGPU::VCC_LO)) && \"Unexpected case\""
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 3742, __extension__
__PRETTY_FUNCTION__))
3741 (MI.getOperand(0).getReg() == AMDGPU::VCC_LO)) &&(static_cast <bool> (((MI.getOperand(0).getReg() == AMDGPU
::VCC) || (MI.getOperand(0).getReg() == AMDGPU::VCC_LO)) &&
"Unexpected case") ? void (0) : __assert_fail ("((MI.getOperand(0).getReg() == AMDGPU::VCC) || (MI.getOperand(0).getReg() == AMDGPU::VCC_LO)) && \"Unexpected case\""
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 3742, __extension__
__PRETTY_FUNCTION__))
3742 "Unexpected case")(static_cast <bool> (((MI.getOperand(0).getReg() == AMDGPU
::VCC) || (MI.getOperand(0).getReg() == AMDGPU::VCC_LO)) &&
"Unexpected case") ? void (0) : __assert_fail ("((MI.getOperand(0).getReg() == AMDGPU::VCC) || (MI.getOperand(0).getReg() == AMDGPU::VCC_LO)) && \"Unexpected case\""
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 3742, __extension__
__PRETTY_FUNCTION__))
;
3743 }
3744
3745 Inst32.add(*getNamedOperand(MI, AMDGPU::OpName::src0));
3746
3747 const MachineOperand *Src1 = getNamedOperand(MI, AMDGPU::OpName::src1);
3748 if (Src1)
3749 Inst32.add(*Src1);
3750
3751 const MachineOperand *Src2 = getNamedOperand(MI, AMDGPU::OpName::src2);
3752
3753 if (Src2) {
3754 int Op32Src2Idx = AMDGPU::getNamedOperandIdx(Op32, AMDGPU::OpName::src2);
3755 if (Op32Src2Idx != -1) {
3756 Inst32.add(*Src2);
3757 } else {
3758 // In the case of V_CNDMASK_B32_e32, the explicit operand src2 is
3759 // replaced with an implicit read of vcc or vcc_lo. The implicit read
3760 // of vcc was already added during the initial BuildMI, but we
3761 // 1) may need to change vcc to vcc_lo to preserve the original register
3762 // 2) have to preserve the original flags.
3763 fixImplicitOperands(*Inst32);
3764 copyFlagsToImplicitVCC(*Inst32, *Src2);
3765 }
3766 }
3767
3768 return Inst32;
3769}
3770
3771bool SIInstrInfo::usesConstantBus(const MachineRegisterInfo &MRI,
3772 const MachineOperand &MO,
3773 const MCOperandInfo &OpInfo) const {
3774 // Literal constants use the constant bus.
3775 //if (isLiteralConstantLike(MO, OpInfo))
3776 // return true;
3777 if (MO.isImm())
3778 return !isInlineConstant(MO, OpInfo);
3779
3780 if (!MO.isReg())
3781 return true; // Misc other operands like FrameIndex
3782
3783 if (!MO.isUse())
3784 return false;
3785
3786 if (MO.getReg().isVirtual())
3787 return RI.isSGPRClass(MRI.getRegClass(MO.getReg()));
3788
3789 // Null is free
3790 if (MO.getReg() == AMDGPU::SGPR_NULL)
3791 return false;
3792
3793 // SGPRs use the constant bus
3794 if (MO.isImplicit()) {
3795 return MO.getReg() == AMDGPU::M0 ||
3796 MO.getReg() == AMDGPU::VCC ||
3797 MO.getReg() == AMDGPU::VCC_LO;
3798 } else {
3799 return AMDGPU::SReg_32RegClass.contains(MO.getReg()) ||
3800 AMDGPU::SReg_64RegClass.contains(MO.getReg());
3801 }
3802}
3803
3804static Register findImplicitSGPRRead(const MachineInstr &MI) {
3805 for (const MachineOperand &MO : MI.implicit_operands()) {
3806 // We only care about reads.
3807 if (MO.isDef())
3808 continue;
3809
3810 switch (MO.getReg()) {
3811 case AMDGPU::VCC:
3812 case AMDGPU::VCC_LO:
3813 case AMDGPU::VCC_HI:
3814 case AMDGPU::M0:
3815 case AMDGPU::FLAT_SCR:
3816 return MO.getReg();
3817
3818 default:
3819 break;
3820 }
3821 }
3822
3823 return AMDGPU::NoRegister;
3824}
3825
3826static bool shouldReadExec(const MachineInstr &MI) {
3827 if (SIInstrInfo::isVALU(MI)) {
3828 switch (MI.getOpcode()) {
3829 case AMDGPU::V_READLANE_B32:
3830 case AMDGPU::V_WRITELANE_B32:
3831 return false;
3832 }
3833
3834 return true;
3835 }
3836
3837 if (MI.isPreISelOpcode() ||
3838 SIInstrInfo::isGenericOpcode(MI.getOpcode()) ||
3839 SIInstrInfo::isSALU(MI) ||
3840 SIInstrInfo::isSMRD(MI))
3841 return false;
3842
3843 return true;
3844}
3845
3846static bool isSubRegOf(const SIRegisterInfo &TRI,
3847 const MachineOperand &SuperVec,
3848 const MachineOperand &SubReg) {
3849 if (SubReg.getReg().isPhysical())
3850 return TRI.isSubRegister(SuperVec.getReg(), SubReg.getReg());
3851
3852 return SubReg.getSubReg() != AMDGPU::NoSubRegister &&
3853 SubReg.getReg() == SuperVec.getReg();
3854}
3855
3856bool SIInstrInfo::verifyInstruction(const MachineInstr &MI,
3857 StringRef &ErrInfo) const {
3858 uint16_t Opcode = MI.getOpcode();
3859 if (SIInstrInfo::isGenericOpcode(MI.getOpcode()))
3860 return true;
3861
3862 const MachineFunction *MF = MI.getParent()->getParent();
3863 const MachineRegisterInfo &MRI = MF->getRegInfo();
3864
3865 int Src0Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0);
3866 int Src1Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1);
3867 int Src2Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2);
3868
3869 // Make sure the number of operands is correct.
3870 const MCInstrDesc &Desc = get(Opcode);
3871 if (!Desc.isVariadic() &&
3872 Desc.getNumOperands() != MI.getNumExplicitOperands()) {
3873 ErrInfo = "Instruction has wrong number of operands.";
3874 return false;
3875 }
3876
3877 if (MI.isInlineAsm()) {
3878 // Verify register classes for inlineasm constraints.
3879 for (unsigned I = InlineAsm::MIOp_FirstOperand, E = MI.getNumOperands();
3880 I != E; ++I) {
3881 const TargetRegisterClass *RC = MI.getRegClassConstraint(I, this, &RI);
3882 if (!RC)
3883 continue;
3884
3885 const MachineOperand &Op = MI.getOperand(I);
3886 if (!Op.isReg())
3887 continue;
3888
3889 Register Reg = Op.getReg();
3890 if (!Reg.isVirtual() && !RC->contains(Reg)) {
3891 ErrInfo = "inlineasm operand has incorrect register class.";
3892 return false;
3893 }
3894 }
3895
3896 return true;
3897 }
3898
3899 if (isMIMG(MI) && MI.memoperands_empty() && MI.mayLoadOrStore()) {
3900 ErrInfo = "missing memory operand from MIMG instruction.";
3901 return false;
3902 }
3903
3904 // Make sure the register classes are correct.
3905 for (int i = 0, e = Desc.getNumOperands(); i != e; ++i) {
3906 const MachineOperand &MO = MI.getOperand(i);
3907 if (MO.isFPImm()) {
3908 ErrInfo = "FPImm Machine Operands are not supported. ISel should bitcast "
3909 "all fp values to integers.";
3910 return false;
3911 }
3912
3913 int RegClass = Desc.OpInfo[i].RegClass;
3914
3915 switch (Desc.OpInfo[i].OperandType) {
3916 case MCOI::OPERAND_REGISTER:
3917 if (MI.getOperand(i).isImm() || MI.getOperand(i).isGlobal()) {
3918 ErrInfo = "Illegal immediate value for operand.";
3919 return false;
3920 }
3921 break;
3922 case AMDGPU::OPERAND_REG_IMM_INT32:
3923 case AMDGPU::OPERAND_REG_IMM_FP32:
3924 case AMDGPU::OPERAND_REG_IMM_FP32_DEFERRED:
3925 break;
3926 case AMDGPU::OPERAND_REG_INLINE_C_INT32:
3927 case AMDGPU::OPERAND_REG_INLINE_C_FP32:
3928 case AMDGPU::OPERAND_REG_INLINE_C_INT64:
3929 case AMDGPU::OPERAND_REG_INLINE_C_FP64:
3930 case AMDGPU::OPERAND_REG_INLINE_C_INT16:
3931 case AMDGPU::OPERAND_REG_INLINE_C_FP16:
3932 case AMDGPU::OPERAND_REG_INLINE_AC_INT32:
3933 case AMDGPU::OPERAND_REG_INLINE_AC_FP32:
3934 case AMDGPU::OPERAND_REG_INLINE_AC_INT16:
3935 case AMDGPU::OPERAND_REG_INLINE_AC_FP16:
3936 case AMDGPU::OPERAND_REG_INLINE_AC_FP64: {
3937 if (!MO.isReg() && (!MO.isImm() || !isInlineConstant(MI, i))) {
3938 ErrInfo = "Illegal immediate value for operand.";
3939 return false;
3940 }
3941 break;
3942 }
3943 case MCOI::OPERAND_IMMEDIATE:
3944 case AMDGPU::OPERAND_KIMM32:
3945 // Check if this operand is an immediate.
3946 // FrameIndex operands will be replaced by immediates, so they are
3947 // allowed.
3948 if (!MI.getOperand(i).isImm() && !MI.getOperand(i).isFI()) {
3949 ErrInfo = "Expected immediate, but got non-immediate";
3950 return false;
3951 }
3952 LLVM_FALLTHROUGH[[gnu::fallthrough]];
3953 default:
3954 continue;
3955 }
3956
3957 if (!MO.isReg())
3958 continue;
3959 Register Reg = MO.getReg();
3960 if (!Reg)
3961 continue;
3962
3963 // FIXME: Ideally we would have separate instruction definitions with the
3964 // aligned register constraint.
3965 // FIXME: We do not verify inline asm operands, but custom inline asm
3966 // verification is broken anyway
3967 if (ST.needsAlignedVGPRs()) {
3968 const TargetRegisterClass *RC = RI.getRegClassForReg(MRI, Reg);
3969 if (RI.hasVectorRegisters(RC) && MO.getSubReg()) {
3970 const TargetRegisterClass *SubRC =
3971 RI.getSubRegClass(RC, MO.getSubReg());
3972 RC = RI.getCompatibleSubRegClass(RC, SubRC, MO.getSubReg());
3973 if (RC)
3974 RC = SubRC;
3975 }
3976
3977 // Check that this is the aligned version of the class.
3978 if (!RC || !RI.isProperlyAlignedRC(*RC)) {
3979 ErrInfo = "Subtarget requires even aligned vector registers";
3980 return false;
3981 }
3982 }
3983
3984 if (RegClass != -1) {
3985 if (Reg.isVirtual())
3986 continue;
3987
3988 const TargetRegisterClass *RC = RI.getRegClass(RegClass);
3989 if (!RC->contains(Reg)) {
3990 ErrInfo = "Operand has incorrect register class.";
3991 return false;
3992 }
3993 }
3994 }
3995
3996 // Verify SDWA
3997 if (isSDWA(MI)) {
3998 if (!ST.hasSDWA()) {
3999 ErrInfo = "SDWA is not supported on this target";
4000 return false;
4001 }
4002
4003 int DstIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::vdst);
4004
4005 const int OpIndicies[] = { DstIdx, Src0Idx, Src1Idx, Src2Idx };
4006
4007 for (int OpIdx: OpIndicies) {
4008 if (OpIdx == -1)
4009 continue;
4010 const MachineOperand &MO = MI.getOperand(OpIdx);
4011
4012 if (!ST.hasSDWAScalar()) {
4013 // Only VGPRS on VI
4014 if (!MO.isReg() || !RI.hasVGPRs(RI.getRegClassForReg(MRI, MO.getReg()))) {
4015 ErrInfo = "Only VGPRs allowed as operands in SDWA instructions on VI";
4016 return false;
4017 }
4018 } else {
4019 // No immediates on GFX9
4020 if (!MO.isReg()) {
4021 ErrInfo =
4022 "Only reg allowed as operands in SDWA instructions on GFX9+";
4023 return false;
4024 }
4025 }
4026 }
4027
4028 if (!ST.hasSDWAOmod()) {
4029 // No omod allowed on VI
4030 const MachineOperand *OMod = getNamedOperand(MI, AMDGPU::OpName::omod);
4031 if (OMod != nullptr &&
4032 (!OMod->isImm() || OMod->getImm() != 0)) {
4033 ErrInfo = "OMod not allowed in SDWA instructions on VI";
4034 return false;
4035 }
4036 }
4037
4038 uint16_t BasicOpcode = AMDGPU::getBasicFromSDWAOp(Opcode);
4039 if (isVOPC(BasicOpcode)) {
4040 if (!ST.hasSDWASdst() && DstIdx != -1) {
4041 // Only vcc allowed as dst on VI for VOPC
4042 const MachineOperand &Dst = MI.getOperand(DstIdx);
4043 if (!Dst.isReg() || Dst.getReg() != AMDGPU::VCC) {
4044 ErrInfo = "Only VCC allowed as dst in SDWA instructions on VI";
4045 return false;
4046 }
4047 } else if (!ST.hasSDWAOutModsVOPC()) {
4048 // No clamp allowed on GFX9 for VOPC
4049 const MachineOperand *Clamp = getNamedOperand(MI, AMDGPU::OpName::clamp);
4050 if (Clamp && (!Clamp->isImm() || Clamp->getImm() != 0)) {
4051 ErrInfo = "Clamp not allowed in VOPC SDWA instructions on VI";
4052 return false;
4053 }
4054
4055 // No omod allowed on GFX9 for VOPC
4056 const MachineOperand *OMod = getNamedOperand(MI, AMDGPU::OpName::omod);
4057 if (OMod && (!OMod->isImm() || OMod->getImm() != 0)) {
4058 ErrInfo = "OMod not allowed in VOPC SDWA instructions on VI";
4059 return false;
4060 }
4061 }
4062 }
4063
4064 const MachineOperand *DstUnused = getNamedOperand(MI, AMDGPU::OpName::dst_unused);
4065 if (DstUnused && DstUnused->isImm() &&
4066 DstUnused->getImm() == AMDGPU::SDWA::UNUSED_PRESERVE) {
4067 const MachineOperand &Dst = MI.getOperand(DstIdx);
4068 if (!Dst.isReg() || !Dst.isTied()) {
4069 ErrInfo = "Dst register should have tied register";
4070 return false;
4071 }
4072
4073 const MachineOperand &TiedMO =
4074 MI.getOperand(MI.findTiedOperandIdx(DstIdx));
4075 if (!TiedMO.isReg() || !TiedMO.isImplicit() || !TiedMO.isUse()) {
4076 ErrInfo =
4077 "Dst register should be tied to implicit use of preserved register";
4078 return false;
4079 } else if (TiedMO.getReg().isPhysical() &&
4080 Dst.getReg() != TiedMO.getReg()) {
4081 ErrInfo = "Dst register should use same physical register as preserved";
4082 return false;
4083 }
4084 }
4085 }
4086
4087 // Verify MIMG
4088 if (isMIMG(MI.getOpcode()) && !MI.mayStore()) {
4089 // Ensure that the return type used is large enough for all the options
4090 // being used TFE/LWE require an extra result register.
4091 const MachineOperand *DMask = getNamedOperand(MI, AMDGPU::OpName::dmask);
4092 if (DMask) {
4093 uint64_t DMaskImm = DMask->getImm();
4094 uint32_t RegCount =
4095 isGather4(MI.getOpcode()) ? 4 : countPopulation(DMaskImm);
4096 const MachineOperand *TFE = getNamedOperand(MI, AMDGPU::OpName::tfe);
4097 const MachineOperand *LWE = getNamedOperand(MI, AMDGPU::OpName::lwe);
4098 const MachineOperand *D16 = getNamedOperand(MI, AMDGPU::OpName::d16);
4099
4100 // Adjust for packed 16 bit values
4101 if (D16 && D16->getImm() && !ST.hasUnpackedD16VMem())
4102 RegCount >>= 1;
4103
4104 // Adjust if using LWE or TFE
4105 if ((LWE && LWE->getImm()) || (TFE && TFE->getImm()))
4106 RegCount += 1;
4107
4108 const uint32_t DstIdx =
4109 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::vdata);
4110 const MachineOperand &Dst = MI.getOperand(DstIdx);
4111 if (Dst.isReg()) {
4112 const TargetRegisterClass *DstRC = getOpRegClass(MI, DstIdx);
4113 uint32_t DstSize = RI.getRegSizeInBits(*DstRC) / 32;
4114 if (RegCount > DstSize) {
4115 ErrInfo = "MIMG instruction returns too many registers for dst "
4116 "register class";
4117 return false;
4118 }
4119 }
4120 }
4121 }
4122
4123 // Verify VOP*. Ignore multiple sgpr operands on writelane.
4124 if (Desc.getOpcode() != AMDGPU::V_WRITELANE_B32
4125 && (isVOP1(MI) || isVOP2(MI) || isVOP3(MI) || isVOPC(MI) || isSDWA(MI))) {
4126 // Only look at the true operands. Only a real operand can use the constant
4127 // bus, and we don't want to check pseudo-operands like the source modifier
4128 // flags.
4129 const int OpIndices[] = { Src0Idx, Src1Idx, Src2Idx };
4130
4131 unsigned ConstantBusCount = 0;
4132 bool UsesLiteral = false;
4133 const MachineOperand *LiteralVal = nullptr;
4134
4135 if (AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::imm) != -1)
4136 ++ConstantBusCount;
4137
4138 SmallVector<Register, 2> SGPRsUsed;
4139 Register SGPRUsed;
4140
4141 for (int OpIdx : OpIndices) {
4142 if (OpIdx == -1)
4143 break;
4144 const MachineOperand &MO = MI.getOperand(OpIdx);
4145 if (usesConstantBus(MRI, MO, MI.getDesc().OpInfo[OpIdx])) {
4146 if (MO.isReg()) {
4147 SGPRUsed = MO.getReg();
4148 if (llvm::all_of(SGPRsUsed, [SGPRUsed](unsigned SGPR) {
4149 return SGPRUsed != SGPR;
4150 })) {
4151 ++ConstantBusCount;
4152 SGPRsUsed.push_back(SGPRUsed);
4153 }
4154 } else {
4155 if (!UsesLiteral) {
4156 ++ConstantBusCount;
4157 UsesLiteral = true;
4158 LiteralVal = &MO;
4159 } else if (!MO.isIdenticalTo(*LiteralVal)) {
4160 assert(isVOP3(MI))(static_cast <bool> (isVOP3(MI)) ? void (0) : __assert_fail
("isVOP3(MI)", "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 4160
, __extension__ __PRETTY_FUNCTION__))
;
4161 ErrInfo = "VOP3 instruction uses more than one literal";
4162 return false;
4163 }
4164 }
4165 }
4166 }
4167
4168 SGPRUsed = findImplicitSGPRRead(MI);
4169 if (SGPRUsed != AMDGPU::NoRegister) {
4170 // Implicit uses may safely overlap true overands
4171 if (llvm::all_of(SGPRsUsed, [this, SGPRUsed](unsigned SGPR) {
4172 return !RI.regsOverlap(SGPRUsed, SGPR);
4173 })) {
4174 ++ConstantBusCount;
4175 SGPRsUsed.push_back(SGPRUsed);
4176 }
4177 }
4178
4179 // v_writelane_b32 is an exception from constant bus restriction:
4180 // vsrc0 can be sgpr, const or m0 and lane select sgpr, m0 or inline-const
4181 if (ConstantBusCount > ST.getConstantBusLimit(Opcode) &&
4182 Opcode != AMDGPU::V_WRITELANE_B32) {
4183 ErrInfo = "VOP* instruction violates constant bus restriction";
4184 return false;
4185 }
4186
4187 if (isVOP3(MI) && UsesLiteral && !ST.hasVOP3Literal()) {
4188 ErrInfo = "VOP3 instruction uses literal";
4189 return false;
4190 }
4191 }
4192
4193 // Special case for writelane - this can break the multiple constant bus rule,
4194 // but still can't use more than one SGPR register
4195 if (Desc.getOpcode() == AMDGPU::V_WRITELANE_B32) {
4196 unsigned SGPRCount = 0;
4197 Register SGPRUsed = AMDGPU::NoRegister;
4198
4199 for (int OpIdx : {Src0Idx, Src1Idx, Src2Idx}) {
4200 if (OpIdx == -1)
4201 break;
4202
4203 const MachineOperand &MO = MI.getOperand(OpIdx);
4204
4205 if (usesConstantBus(MRI, MO, MI.getDesc().OpInfo[OpIdx])) {
4206 if (MO.isReg() && MO.getReg() != AMDGPU::M0) {
4207 if (MO.getReg() != SGPRUsed)
4208 ++SGPRCount;
4209 SGPRUsed = MO.getReg();
4210 }
4211 }
4212 if (SGPRCount > ST.getConstantBusLimit(Opcode)) {
4213 ErrInfo = "WRITELANE instruction violates constant bus restriction";
4214 return false;
4215 }
4216 }
4217 }
4218
4219 // Verify misc. restrictions on specific instructions.
4220 if (Desc.getOpcode() == AMDGPU::V_DIV_SCALE_F32_e64 ||
4221 Desc.getOpcode() == AMDGPU::V_DIV_SCALE_F64_e64) {
4222 const MachineOperand &Src0 = MI.getOperand(Src0Idx);
4223 const MachineOperand &Src1 = MI.getOperand(Src1Idx);
4224 const MachineOperand &Src2 = MI.getOperand(Src2Idx);
4225 if (Src0.isReg() && Src1.isReg() && Src2.isReg()) {
4226 if (!compareMachineOp(Src0, Src1) &&
4227 !compareMachineOp(Src0, Src2)) {
4228 ErrInfo = "v_div_scale_{f32|f64} require src0 = src1 or src2";
4229 return false;
4230 }
4231 }
4232 if ((getNamedOperand(MI, AMDGPU::OpName::src0_modifiers)->getImm() &
4233 SISrcMods::ABS) ||
4234 (getNamedOperand(MI, AMDGPU::OpName::src1_modifiers)->getImm() &
4235 SISrcMods::ABS) ||
4236 (getNamedOperand(MI, AMDGPU::OpName::src2_modifiers)->getImm() &
4237 SISrcMods::ABS)) {
4238 ErrInfo = "ABS not allowed in VOP3B instructions";
4239 return false;
4240 }
4241 }
4242
4243 if (isSOP2(MI) || isSOPC(MI)) {
4244 const MachineOperand &Src0 = MI.getOperand(Src0Idx);
4245 const MachineOperand &Src1 = MI.getOperand(Src1Idx);
4246 unsigned Immediates = 0;
4247
4248 if (!Src0.isReg() &&
4249 !isInlineConstant(Src0, Desc.OpInfo[Src0Idx].OperandType))
4250 Immediates++;
4251 if (!Src1.isReg() &&
4252 !isInlineConstant(Src1, Desc.OpInfo[Src1Idx].OperandType))
4253 Immediates++;
4254
4255 if (Immediates > 1) {
4256 ErrInfo = "SOP2/SOPC instruction requires too many immediate constants";
4257 return false;
4258 }
4259 }
4260
4261 if (isSOPK(MI)) {
4262 auto Op = getNamedOperand(MI, AMDGPU::OpName::simm16);
4263 if (Desc.isBranch()) {
4264 if (!Op->isMBB()) {
4265 ErrInfo = "invalid branch target for SOPK instruction";
4266 return false;
4267 }
4268 } else {
4269 uint64_t Imm = Op->getImm();
4270 if (sopkIsZext(MI)) {
4271 if (!isUInt<16>(Imm)) {
4272 ErrInfo = "invalid immediate for SOPK instruction";
4273 return false;
4274 }
4275 } else {
4276 if (!isInt<16>(Imm)) {
4277 ErrInfo = "invalid immediate for SOPK instruction";
4278 return false;
4279 }
4280 }
4281 }
4282 }
4283
4284 if (Desc.getOpcode() == AMDGPU::V_MOVRELS_B32_e32 ||
4285 Desc.getOpcode() == AMDGPU::V_MOVRELS_B32_e64 ||
4286 Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e32 ||
4287 Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e64) {
4288 const bool IsDst = Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e32 ||
4289 Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e64;
4290
4291 const unsigned StaticNumOps = Desc.getNumOperands() +
4292 Desc.getNumImplicitUses();
4293 const unsigned NumImplicitOps = IsDst ? 2 : 1;
4294
4295 // Allow additional implicit operands. This allows a fixup done by the post
4296 // RA scheduler where the main implicit operand is killed and implicit-defs
4297 // are added for sub-registers that remain live after this instruction.
4298 if (MI.getNumOperands() < StaticNumOps + NumImplicitOps) {
4299 ErrInfo = "missing implicit register operands";
4300 return false;
4301 }
4302
4303 const MachineOperand *Dst = getNamedOperand(MI, AMDGPU::OpName::vdst);
4304 if (IsDst) {
4305 if (!Dst->isUse()) {
4306 ErrInfo = "v_movreld_b32 vdst should be a use operand";
4307 return false;
4308 }
4309
4310 unsigned UseOpIdx;
4311 if (!MI.isRegTiedToUseOperand(StaticNumOps, &UseOpIdx) ||
4312 UseOpIdx != StaticNumOps + 1) {
4313 ErrInfo = "movrel implicit operands should be tied";
4314 return false;
4315 }
4316 }
4317
4318 const MachineOperand &Src0 = MI.getOperand(Src0Idx);
4319 const MachineOperand &ImpUse
4320 = MI.getOperand(StaticNumOps + NumImplicitOps - 1);
4321 if (!ImpUse.isReg() || !ImpUse.isUse() ||
4322 !isSubRegOf(RI, ImpUse, IsDst ? *Dst : Src0)) {
4323 ErrInfo = "src0 should be subreg of implicit vector use";
4324 return false;
4325 }
4326 }
4327
4328 // Make sure we aren't losing exec uses in the td files. This mostly requires
4329 // being careful when using let Uses to try to add other use registers.
4330 if (shouldReadExec(MI)) {
4331 if (!MI.hasRegisterImplicitUseOperand(AMDGPU::EXEC)) {
4332 ErrInfo = "VALU instruction does not implicitly read exec mask";
4333 return false;
4334 }
4335 }
4336
4337 if (isSMRD(MI)) {
4338 if (MI.mayStore()) {
4339 // The register offset form of scalar stores may only use m0 as the
4340 // soffset register.
4341 const MachineOperand *Soff = getNamedOperand(MI, AMDGPU::OpName::soff);
4342 if (Soff && Soff->getReg() != AMDGPU::M0) {
4343 ErrInfo = "scalar stores must use m0 as offset register";
4344 return false;
4345 }
4346 }
4347 }
4348
4349 if (isFLAT(MI) && !ST.hasFlatInstOffsets()) {
4350 const MachineOperand *Offset = getNamedOperand(MI, AMDGPU::OpName::offset);
4351 if (Offset->getImm() != 0) {
4352 ErrInfo = "subtarget does not support offsets in flat instructions";
4353 return false;
4354 }
4355 }
4356
4357 if (isMIMG(MI)) {
4358 const MachineOperand *DimOp = getNamedOperand(MI, AMDGPU::OpName::dim);
4359 if (DimOp) {
4360 int VAddr0Idx = AMDGPU::getNamedOperandIdx(Opcode,
4361 AMDGPU::OpName::vaddr0);
4362 int SRsrcIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::srsrc);
4363 const AMDGPU::MIMGInfo *Info = AMDGPU::getMIMGInfo(Opcode);
4364 const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode =
4365 AMDGPU::getMIMGBaseOpcodeInfo(Info->BaseOpcode);
4366 const AMDGPU::MIMGDimInfo *Dim =
4367 AMDGPU::getMIMGDimInfoByEncoding(DimOp->getImm());
4368
4369 if (!Dim) {
4370 ErrInfo = "dim is out of range";
4371 return false;
4372 }
4373
4374 bool IsA16 = false;
4375 if (ST.hasR128A16()) {
4376 const MachineOperand *R128A16 = getNamedOperand(MI, AMDGPU::OpName::r128);
4377 IsA16 = R128A16->getImm() != 0;
4378 } else if (ST.hasGFX10A16()) {
4379 const MachineOperand *A16 = getNamedOperand(MI, AMDGPU::OpName::a16);
4380 IsA16 = A16->getImm() != 0;
4381 }
4382
4383 bool IsNSA = SRsrcIdx - VAddr0Idx > 1;
4384
4385 unsigned AddrWords =
4386 AMDGPU::getAddrSizeMIMGOp(BaseOpcode, Dim, IsA16, ST.hasG16());
4387
4388 unsigned VAddrWords;
4389 if (IsNSA) {
4390 VAddrWords = SRsrcIdx - VAddr0Idx;
4391 } else {
4392 const TargetRegisterClass *RC = getOpRegClass(MI, VAddr0Idx);
4393 VAddrWords = MRI.getTargetRegisterInfo()->getRegSizeInBits(*RC) / 32;
4394 if (AddrWords > 8)
4395 AddrWords = 16;
4396 }
4397
4398 if (VAddrWords != AddrWords) {
4399 LLVM_DEBUG(dbgs() << "bad vaddr size, expected " << AddrWordsdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("si-instr-info")) { dbgs() << "bad vaddr size, expected "
<< AddrWords << " but got " << VAddrWords <<
"\n"; } } while (false)
4400 << " but got " << VAddrWords << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("si-instr-info")) { dbgs() << "bad vaddr size, expected "
<< AddrWords << " but got " << VAddrWords <<
"\n"; } } while (false)
;
4401 ErrInfo = "bad vaddr size";
4402 return false;
4403 }
4404 }
4405 }
4406
4407 const MachineOperand *DppCt = getNamedOperand(MI, AMDGPU::OpName::dpp_ctrl);
4408 if (DppCt) {
4409 using namespace AMDGPU::DPP;
4410
4411 unsigned DC = DppCt->getImm();
4412 if (DC == DppCtrl::DPP_UNUSED1 || DC == DppCtrl::DPP_UNUSED2 ||
4413 DC == DppCtrl::DPP_UNUSED3 || DC > DppCtrl::DPP_LAST ||
4414 (DC >= DppCtrl::DPP_UNUSED4_FIRST && DC <= DppCtrl::DPP_UNUSED4_LAST) ||
4415 (DC >= DppCtrl::DPP_UNUSED5_FIRST && DC <= DppCtrl::DPP_UNUSED5_LAST) ||
4416 (DC >= DppCtrl::DPP_UNUSED6_FIRST && DC <= DppCtrl::DPP_UNUSED6_LAST) ||
4417 (DC >= DppCtrl::DPP_UNUSED7_FIRST && DC <= DppCtrl::DPP_UNUSED7_LAST) ||
4418 (DC >= DppCtrl::DPP_UNUSED8_FIRST && DC <= DppCtrl::DPP_UNUSED8_LAST)) {
4419 ErrInfo = "Invalid dpp_ctrl value";
4420 return false;
4421 }
4422 if (DC >= DppCtrl::WAVE_SHL1 && DC <= DppCtrl::WAVE_ROR1 &&
4423 ST.getGeneration() >= AMDGPUSubtarget::GFX10) {
4424 ErrInfo = "Invalid dpp_ctrl value: "
4425 "wavefront shifts are not supported on GFX10+";
4426 return false;
4427 }
4428 if (DC >= DppCtrl::BCAST15 && DC <= DppCtrl::BCAST31 &&
4429 ST.getGeneration() >= AMDGPUSubtarget::GFX10) {
4430 ErrInfo = "Invalid dpp_ctrl value: "
4431 "broadcasts are not supported on GFX10+";
4432 return false;
4433 }
4434 if (DC >= DppCtrl::ROW_SHARE_FIRST && DC <= DppCtrl::ROW_XMASK_LAST &&
4435 ST.getGeneration() < AMDGPUSubtarget::GFX10) {
4436 if (DC >= DppCtrl::ROW_NEWBCAST_FIRST &&
4437 DC <= DppCtrl::ROW_NEWBCAST_LAST &&
4438 !ST.hasGFX90AInsts()) {
4439 ErrInfo = "Invalid dpp_ctrl value: "
4440 "row_newbroadcast/row_share is not supported before "
4441 "GFX90A/GFX10";
4442 return false;
4443 } else if (DC > DppCtrl::ROW_NEWBCAST_LAST || !ST.hasGFX90AInsts()) {
4444 ErrInfo = "Invalid dpp_ctrl value: "
4445 "row_share and row_xmask are not supported before GFX10";
4446 return false;
4447 }
4448 }
4449
4450 int DstIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::vdst);
4451 int Src0Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0);
4452
4453 if (Opcode != AMDGPU::V_MOV_B64_DPP_PSEUDO &&
4454 ((DstIdx >= 0 &&
4455 (Desc.OpInfo[DstIdx].RegClass == AMDGPU::VReg_64RegClassID ||
4456 Desc.OpInfo[DstIdx].RegClass == AMDGPU::VReg_64_Align2RegClassID)) ||
4457 ((Src0Idx >= 0 &&
4458 (Desc.OpInfo[Src0Idx].RegClass == AMDGPU::VReg_64RegClassID ||
4459 Desc.OpInfo[Src0Idx].RegClass ==
4460 AMDGPU::VReg_64_Align2RegClassID)))) &&
4461 !AMDGPU::isLegal64BitDPPControl(DC)) {
4462 ErrInfo = "Invalid dpp_ctrl value: "
4463 "64 bit dpp only support row_newbcast";
4464 return false;
4465 }
4466 }
4467
4468 if ((MI.mayStore() || MI.mayLoad()) && !isVGPRSpill(MI)) {
4469 const MachineOperand *Dst = getNamedOperand(MI, AMDGPU::OpName::vdst);
4470 uint16_t DataNameIdx = isDS(Opcode) ? AMDGPU::OpName::data0
4471 : AMDGPU::OpName::vdata;
4472 const MachineOperand *Data = getNamedOperand(MI, DataNameIdx);
4473 const MachineOperand *Data2 = getNamedOperand(MI, AMDGPU::OpName::data1);
4474 if (Data && !Data->isReg())
4475 Data = nullptr;
4476
4477 if (ST.hasGFX90AInsts()) {
4478 if (Dst && Data &&
4479 (RI.isAGPR(MRI, Dst->getReg()) != RI.isAGPR(MRI, Data->getReg()))) {
4480 ErrInfo = "Invalid register class: "
4481 "vdata and vdst should be both VGPR or AGPR";
4482 return false;
4483 }
4484 if (Data && Data2 &&
4485 (RI.isAGPR(MRI, Data->getReg()) != RI.isAGPR(MRI, Data2->getReg()))) {
4486 ErrInfo = "Invalid register class: "
4487 "both data operands should be VGPR or AGPR";
4488 return false;
4489 }
4490 } else {
4491 if ((Dst && RI.isAGPR(MRI, Dst->getReg())) ||
4492 (Data && RI.isAGPR(MRI, Data->getReg())) ||
4493 (Data2 && RI.isAGPR(MRI, Data2->getReg()))) {
4494 ErrInfo = "Invalid register class: "
4495 "agpr loads and stores not supported on this GPU";
4496 return false;
4497 }
4498 }
4499 }
4500
4501 if (ST.needsAlignedVGPRs() &&
4502 (MI.getOpcode() == AMDGPU::DS_GWS_INIT ||
4503 MI.getOpcode() == AMDGPU::DS_GWS_SEMA_BR ||
4504 MI.getOpcode() == AMDGPU::DS_GWS_BARRIER)) {
4505 const MachineOperand *Op = getNamedOperand(MI, AMDGPU::OpName::data0);
4506 Register Reg = Op->getReg();
4507 bool Aligned = true;
4508 if (Reg.isPhysical()) {
4509 Aligned = !(RI.getHWRegIndex(Reg) & 1);
4510 } else {
4511 const TargetRegisterClass &RC = *MRI.getRegClass(Reg);
4512 Aligned = RI.getRegSizeInBits(RC) > 32 && RI.isProperlyAlignedRC(RC) &&
4513 !(RI.getChannelFromSubReg(Op->getSubReg()) & 1);
4514 }
4515
4516 if (!Aligned) {
4517 ErrInfo = "Subtarget requires even aligned vector registers "
4518 "for DS_GWS instructions";
4519 return false;
4520 }
4521 }
4522
4523 return true;
4524}
4525
4526unsigned SIInstrInfo::getVALUOp(const MachineInstr &MI) const {
4527 switch (MI.getOpcode()) {
4528 default: return AMDGPU::INSTRUCTION_LIST_END;
4529 case AMDGPU::REG_SEQUENCE: return AMDGPU::REG_SEQUENCE;
4530 case AMDGPU::COPY: return AMDGPU::COPY;
4531 case AMDGPU::PHI: return AMDGPU::PHI;
4532 case AMDGPU::INSERT_SUBREG: return AMDGPU::INSERT_SUBREG;
4533 case AMDGPU::WQM: return AMDGPU::WQM;
4534 case AMDGPU::SOFT_WQM: return AMDGPU::SOFT_WQM;
4535 case AMDGPU::STRICT_WWM: return AMDGPU::STRICT_WWM;
4536 case AMDGPU::STRICT_WQM: return AMDGPU::STRICT_WQM;
4537 case AMDGPU::S_MOV_B32: {
4538 const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
4539 return MI.getOperand(1).isReg() ||
4540 RI.isAGPR(MRI, MI.getOperand(0).getReg()) ?
4541 AMDGPU::COPY : AMDGPU::V_MOV_B32_e32;
4542 }
4543 case AMDGPU::S_ADD_I32:
4544 return ST.hasAddNoCarry() ? AMDGPU::V_ADD_U32_e64 : AMDGPU::V_ADD_CO_U32_e32;
4545 case AMDGPU::S_ADDC_U32:
4546 return AMDGPU::V_ADDC_U32_e32;
4547 case AMDGPU::S_SUB_I32:
4548 return ST.hasAddNoCarry() ? AMDGPU::V_SUB_U32_e64 : AMDGPU::V_SUB_CO_U32_e32;
4549 // FIXME: These are not consistently handled, and selected when the carry is
4550 // used.
4551 case AMDGPU::S_ADD_U32:
4552 return AMDGPU::V_ADD_CO_U32_e32;
4553 case AMDGPU::S_SUB_U32:
4554 return AMDGPU::V_SUB_CO_U32_e32;
4555 case AMDGPU::S_SUBB_U32: return AMDGPU::V_SUBB_U32_e32;
4556 case AMDGPU::S_MUL_I32: return AMDGPU::V_MUL_LO_U32_e64;
4557 case AMDGPU::S_MUL_HI_U32: return AMDGPU::V_MUL_HI_U32_e64;
4558 case AMDGPU::S_MUL_HI_I32: return AMDGPU::V_MUL_HI_I32_e64;
4559 case AMDGPU::S_AND_B32: return AMDGPU::V_AND_B32_e64;
4560 case AMDGPU::S_OR_B32: return AMDGPU::V_OR_B32_e64;
4561 case AMDGPU::S_XOR_B32: return AMDGPU::V_XOR_B32_e64;
4562 case AMDGPU::S_XNOR_B32:
4563 return ST.hasDLInsts() ? AMDGPU::V_XNOR_B32_e64 : AMDGPU::INSTRUCTION_LIST_END;
4564 case AMDGPU::S_MIN_I32: return AMDGPU::V_MIN_I32_e64;
4565 case AMDGPU::S_MIN_U32: return AMDGPU::V_MIN_U32_e64;
4566 case AMDGPU::S_MAX_I32: return AMDGPU::V_MAX_I32_e64;
4567 case AMDGPU::S_MAX_U32: return AMDGPU::V_MAX_U32_e64;
4568 case AMDGPU::S_ASHR_I32: return AMDGPU::V_ASHR_I32_e32;
4569 case AMDGPU::S_ASHR_I64: return AMDGPU::V_ASHR_I64_e64;
4570 case AMDGPU::S_LSHL_B32: return AMDGPU::V_LSHL_B32_e32;
4571 case AMDGPU::S_LSHL_B64: return AMDGPU::V_LSHL_B64_e64;
4572 case AMDGPU::S_LSHR_B32: return AMDGPU::V_LSHR_B32_e32;
4573 case AMDGPU::S_LSHR_B64: return AMDGPU::V_LSHR_B64_e64;
4574 case AMDGPU::S_SEXT_I32_I8: return AMDGPU::V_BFE_I32_e64;
4575 case AMDGPU::S_SEXT_I32_I16: return AMDGPU::V_BFE_I32_e64;
4576 case AMDGPU::S_BFE_U32: return AMDGPU::V_BFE_U32_e64;
4577 case AMDGPU::S_BFE_I32: return AMDGPU::V_BFE_I32_e64;
4578 case AMDGPU::S_BFM_B32: return AMDGPU::V_BFM_B32_e64;
4579 case AMDGPU::S_BREV_B32: return AMDGPU::V_BFREV_B32_e32;
4580 case AMDGPU::S_NOT_B32: return AMDGPU::V_NOT_B32_e32;
4581 case AMDGPU::S_NOT_B64: return AMDGPU::V_NOT_B32_e32;
4582 case AMDGPU::S_CMP_EQ_I32: return AMDGPU::V_CMP_EQ_I32_e64;
4583 case AMDGPU::S_CMP_LG_I32: return AMDGPU::V_CMP_NE_I32_e64;
4584 case AMDGPU::S_CMP_GT_I32: return AMDGPU::V_CMP_GT_I32_e64;
4585 case AMDGPU::S_CMP_GE_I32: return AMDGPU::V_CMP_GE_I32_e64;
4586 case AMDGPU::S_CMP_LT_I32: return AMDGPU::V_CMP_LT_I32_e64;
4587 case AMDGPU::S_CMP_LE_I32: return AMDGPU::V_CMP_LE_I32_e64;
4588 case AMDGPU::S_CMP_EQ_U32: return AMDGPU::V_CMP_EQ_U32_e64;
4589 case AMDGPU::S_CMP_LG_U32: return AMDGPU::V_CMP_NE_U32_e64;
4590 case AMDGPU::S_CMP_GT_U32: return AMDGPU::V_CMP_GT_U32_e64;
4591 case AMDGPU::S_CMP_GE_U32: return AMDGPU::V_CMP_GE_U32_e64;
4592 case AMDGPU::S_CMP_LT_U32: return AMDGPU::V_CMP_LT_U32_e64;
4593 case AMDGPU::S_CMP_LE_U32: return AMDGPU::V_CMP_LE_U32_e64;
4594 case AMDGPU::S_CMP_EQ_U64: return AMDGPU::V_CMP_EQ_U64_e64;
4595 case AMDGPU::S_CMP_LG_U64: return AMDGPU::V_CMP_NE_U64_e64;
4596 case AMDGPU::S_BCNT1_I32_B32: return AMDGPU::V_BCNT_U32_B32_e64;
4597 case AMDGPU::S_FF1_I32_B32: return AMDGPU::V_FFBL_B32_e32;
4598 case AMDGPU::S_FLBIT_I32_B32: return AMDGPU::V_FFBH_U32_e32;
4599 case AMDGPU::S_FLBIT_I32: return AMDGPU::V_FFBH_I32_e64;
4600 case AMDGPU::S_CBRANCH_SCC0: return AMDGPU::S_CBRANCH_VCCZ;
4601 case AMDGPU::S_CBRANCH_SCC1: return AMDGPU::S_CBRANCH_VCCNZ;
4602 }
4603 llvm_unreachable(::llvm::llvm_unreachable_internal("Unexpected scalar opcode without corresponding vector one!"
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 4604)
4604 "Unexpected scalar opcode without corresponding vector one!")::llvm::llvm_unreachable_internal("Unexpected scalar opcode without corresponding vector one!"
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 4604)
;
4605}
4606
4607static unsigned adjustAllocatableRegClass(const GCNSubtarget &ST,
4608 const MachineRegisterInfo &MRI,
4609 const MCInstrDesc &TID,
4610 unsigned RCID,
4611 bool IsAllocatable) {
4612 if ((IsAllocatable || !ST.hasGFX90AInsts() || !MRI.reservedRegsFrozen()) &&
4613 (((TID.mayLoad() || TID.mayStore()) &&
4614 !(TID.TSFlags & SIInstrFlags::VGPRSpill)) ||
4615 (TID.TSFlags & (SIInstrFlags::DS | SIInstrFlags::MIMG)))) {
4616 switch (RCID) {
4617 case AMDGPU::AV_32RegClassID: return AMDGPU::VGPR_32RegClassID;
4618 case AMDGPU::AV_64RegClassID: return AMDGPU::VReg_64RegClassID;
4619 case AMDGPU::AV_96RegClassID: return AMDGPU::VReg_96RegClassID;
4620 case AMDGPU::AV_128RegClassID: return AMDGPU::VReg_128RegClassID;
4621 case AMDGPU::AV_160RegClassID: return AMDGPU::VReg_160RegClassID;
4622 default:
4623 break;
4624 }
4625 }
4626 return RCID;
4627}
4628
4629const TargetRegisterClass *SIInstrInfo::getRegClass(const MCInstrDesc &TID,
4630 unsigned OpNum, const TargetRegisterInfo *TRI,
4631 const MachineFunction &MF)
4632 const {
4633 if (OpNum >= TID.getNumOperands())
4634 return nullptr;
4635 auto RegClass = TID.OpInfo[OpNum].RegClass;
4636 bool IsAllocatable = false;
4637 if (TID.TSFlags & (SIInstrFlags::DS | SIInstrFlags::FLAT)) {
4638 // vdst and vdata should be both VGPR or AGPR, same for the DS instructions
4639 // with two data operands. Request register class constainted to VGPR only
4640 // of both operands present as Machine Copy Propagation can not check this
4641 // constraint and possibly other passes too.
4642 //
4643 // The check is limited to FLAT and DS because atomics in non-flat encoding
4644 // have their vdst and vdata tied to be the same register.
4645 const int VDstIdx = AMDGPU::getNamedOperandIdx(TID.Opcode,
4646 AMDGPU::OpName::vdst);
4647 const int DataIdx = AMDGPU::getNamedOperandIdx(TID.Opcode,
4648 (TID.TSFlags & SIInstrFlags::DS) ? AMDGPU::OpName::data0
4649 : AMDGPU::OpName::vdata);
4650 if (DataIdx != -1) {
4651 IsAllocatable = VDstIdx != -1 ||
4652 AMDGPU::getNamedOperandIdx(TID.Opcode,
4653 AMDGPU::OpName::data1) != -1;
4654 }
4655 }
4656 RegClass = adjustAllocatableRegClass(ST, MF.getRegInfo(), TID, RegClass,
4657 IsAllocatable);
4658 return RI.getRegClass(RegClass);
4659}
4660
4661const TargetRegisterClass *SIInstrInfo::getOpRegClass(const MachineInstr &MI,
4662 unsigned OpNo) const {
4663 const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
4664 const MCInstrDesc &Desc = get(MI.getOpcode());
4665 if (MI.isVariadic() || OpNo >= Desc.getNumOperands() ||
4666 Desc.OpInfo[OpNo].RegClass == -1) {
4667 Register Reg = MI.getOperand(OpNo).getReg();
4668
4669 if (Reg.isVirtual())
4670 return MRI.getRegClass(Reg);
4671 return RI.getPhysRegClass(Reg);
4672 }
4673
4674 unsigned RCID = Desc.OpInfo[OpNo].RegClass;
4675 RCID = adjustAllocatableRegClass(ST, MRI, Desc, RCID, true);
4676 return RI.getRegClass(RCID);
4677}
4678
4679void SIInstrInfo::legalizeOpWithMove(MachineInstr &MI, unsigned OpIdx) const {
4680 MachineBasicBlock::iterator I = MI;
4681 MachineBasicBlock *MBB = MI.getParent();
4682 MachineOperand &MO = MI.getOperand(OpIdx);
4683 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
4684 unsigned RCID = get(MI.getOpcode()).OpInfo[OpIdx].RegClass;
4685 const TargetRegisterClass *RC = RI.getRegClass(RCID);
4686 unsigned Size = RI.getRegSizeInBits(*RC);
4687 unsigned Opcode = (Size == 64) ? AMDGPU::V_MOV_B64_PSEUDO : AMDGPU::V_MOV_B32_e32;
4688 if (MO.isReg())
4689 Opcode = AMDGPU::COPY;
4690 else if (RI.isSGPRClass(RC))
4691 Opcode = (Size == 64) ? AMDGPU::S_MOV_B64 : AMDGPU::S_MOV_B32;
4692
4693 const TargetRegisterClass *VRC = RI.getEquivalentVGPRClass(RC);
4694 const TargetRegisterClass *VRC64 = RI.getVGPR64Class();
4695 if (RI.getCommonSubClass(VRC64, VRC))
4696 VRC = VRC64;
4697 else
4698 VRC = &AMDGPU::VGPR_32RegClass;
4699
4700 Register Reg = MRI.createVirtualRegister(VRC);
4701 DebugLoc DL = MBB->findDebugLoc(I);
4702 BuildMI(*MI.getParent(), I, DL, get(Opcode), Reg).add(MO);
4703 MO.ChangeToRegister(Reg, false);
4704}
4705
4706unsigned SIInstrInfo::buildExtractSubReg(MachineBasicBlock::iterator MI,
4707 MachineRegisterInfo &MRI,
4708 MachineOperand &SuperReg,
4709 const TargetRegisterClass *SuperRC,
4710 unsigned SubIdx,
4711 const TargetRegisterClass *SubRC)
4712 const {
4713 MachineBasicBlock *MBB = MI->getParent();
4714 DebugLoc DL = MI->getDebugLoc();
4715 Register SubReg = MRI.createVirtualRegister(SubRC);
4716
4717 if (SuperReg.getSubReg() == AMDGPU::NoSubRegister) {
4718 BuildMI(*MBB, MI, DL, get(TargetOpcode::COPY), SubReg)
4719 .addReg(SuperReg.getReg(), 0, SubIdx);
4720 return SubReg;
4721 }
4722
4723 // Just in case the super register is itself a sub-register, copy it to a new
4724 // value so we don't need to worry about merging its subreg index with the
4725 // SubIdx passed to this function. The register coalescer should be able to
4726 // eliminate this extra copy.
4727 Register NewSuperReg = MRI.createVirtualRegister(SuperRC);
4728
4729 BuildMI(*MBB, MI, DL, get(TargetOpcode::COPY), NewSuperReg)
4730 .addReg(SuperReg.getReg(), 0, SuperReg.getSubReg());
4731
4732 BuildMI(*MBB, MI, DL, get(TargetOpcode::COPY), SubReg)
4733 .addReg(NewSuperReg, 0, SubIdx);
4734
4735 return SubReg;
4736}
4737
4738MachineOperand SIInstrInfo::buildExtractSubRegOrImm(
4739 MachineBasicBlock::iterator MII,
4740 MachineRegisterInfo &MRI,
4741 MachineOperand &Op,
4742 const TargetRegisterClass *SuperRC,
4743 unsigned SubIdx,
4744 const TargetRegisterClass *SubRC) const {
4745 if (Op.isImm()) {
4746 if (SubIdx == AMDGPU::sub0)
4747 return MachineOperand::CreateImm(static_cast<int32_t>(Op.getImm()));
4748 if (SubIdx == AMDGPU::sub1)
4749 return MachineOperand::CreateImm(static_cast<int32_t>(Op.getImm() >> 32));
4750
4751 llvm_unreachable("Unhandled register index for immediate")::llvm::llvm_unreachable_internal("Unhandled register index for immediate"
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 4751)
;
4752 }
4753
4754 unsigned SubReg = buildExtractSubReg(MII, MRI, Op, SuperRC,
4755 SubIdx, SubRC);
4756 return MachineOperand::CreateReg(SubReg, false);
4757}
4758
4759// Change the order of operands from (0, 1, 2) to (0, 2, 1)
4760void SIInstrInfo::swapOperands(MachineInstr &Inst) const {
4761 assert(Inst.getNumExplicitOperands() == 3)(static_cast <bool> (Inst.getNumExplicitOperands() == 3
) ? void (0) : __assert_fail ("Inst.getNumExplicitOperands() == 3"
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 4761, __extension__
__PRETTY_FUNCTION__))
;
4762 MachineOperand Op1 = Inst.getOperand(1);
4763 Inst.RemoveOperand(1);
4764 Inst.addOperand(Op1);
4765}
4766
4767bool SIInstrInfo::isLegalRegOperand(const MachineRegisterInfo &MRI,
4768 const MCOperandInfo &OpInfo,
4769 const MachineOperand &MO) const {
4770 if (!MO.isReg())
4771 return false;
4772
4773 Register Reg = MO.getReg();
4774
4775 const TargetRegisterClass *DRC = RI.getRegClass(OpInfo.RegClass);
4776 if (Reg.isPhysical())
4777 return DRC->contains(Reg);
4778
4779 const TargetRegisterClass *RC = MRI.getRegClass(Reg);
4780
4781 if (MO.getSubReg()) {
4782 const MachineFunction *MF = MO.getParent()->getParent()->getParent();
4783 const TargetRegisterClass *SuperRC = RI.getLargestLegalSuperClass(RC, *MF);
4784 if (!SuperRC)
4785 return false;
4786
4787 DRC = RI.getMatchingSuperRegClass(SuperRC, DRC, MO.getSubReg());
4788 if (!DRC)
4789 return false;
4790 }
4791 return RC->hasSuperClassEq(DRC);
4792}
4793
4794bool SIInstrInfo::isLegalVSrcOperand(const MachineRegisterInfo &MRI,
4795 const MCOperandInfo &OpInfo,
4796 const MachineOperand &MO) const {
4797 if (MO.isReg())
4798 return isLegalRegOperand(MRI, OpInfo, MO);
4799
4800 // Handle non-register types that are treated like immediates.
4801 assert(MO.isImm() || MO.isTargetIndex() || MO.isFI() || MO.isGlobal())(static_cast <bool> (MO.isImm() || MO.isTargetIndex() ||
MO.isFI() || MO.isGlobal()) ? void (0) : __assert_fail ("MO.isImm() || MO.isTargetIndex() || MO.isFI() || MO.isGlobal()"
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 4801, __extension__
__PRETTY_FUNCTION__))
;
4802 return true;
4803}
4804
4805bool SIInstrInfo::isOperandLegal(const MachineInstr &MI, unsigned OpIdx,
4806 const MachineOperand *MO) const {
4807 const MachineFunction &MF = *MI.getParent()->getParent();
4808 const MachineRegisterInfo &MRI = MF.getRegInfo();
4809 const MCInstrDesc &InstDesc = MI.getDesc();
4810 const MCOperandInfo &OpInfo = InstDesc.OpInfo[OpIdx];
4811 const TargetRegisterClass *DefinedRC =
4812 OpInfo.RegClass != -1 ? RI.getRegClass(OpInfo.RegClass) : nullptr;
4813 if (!MO)
4814 MO = &MI.getOperand(OpIdx);
4815
4816 int ConstantBusLimit = ST.getConstantBusLimit(MI.getOpcode());
4817 int VOP3LiteralLimit = ST.hasVOP3Literal() ? 1 : 0;
4818 if (isVALU(MI) && usesConstantBus(MRI, *MO, OpInfo)) {
4819 if (isVOP3(MI) && isLiteralConstantLike(*MO, OpInfo) && !VOP3LiteralLimit--)
4820 return false;
4821
4822 SmallDenseSet<RegSubRegPair> SGPRsUsed;
4823 if (MO->isReg())
4824 SGPRsUsed.insert(RegSubRegPair(MO->getReg(), MO->getSubReg()));
4825
4826 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
4827 if (i == OpIdx)
4828 continue;
4829 const MachineOperand &Op = MI.getOperand(i);
4830 if (Op.isReg()) {
4831 RegSubRegPair SGPR(Op.getReg(), Op.getSubReg());
4832 if (!SGPRsUsed.count(SGPR) &&
4833 usesConstantBus(MRI, Op, InstDesc.OpInfo[i])) {
4834 if (--ConstantBusLimit <= 0)
4835 return false;
4836 SGPRsUsed.insert(SGPR);
4837 }
4838 } else if (InstDesc.OpInfo[i].OperandType == AMDGPU::OPERAND_KIMM32) {
4839 if (--ConstantBusLimit <= 0)
4840 return false;
4841 } else if (isVOP3(MI) && AMDGPU::isSISrcOperand(InstDesc, i) &&
4842 isLiteralConstantLike(Op, InstDesc.OpInfo[i])) {
4843 if (!VOP3LiteralLimit--)
4844 return false;
4845 if (--ConstantBusLimit <= 0)
4846 return false;
4847 }
4848 }
4849 }
4850
4851 if (MO->isReg()) {
4852 assert(DefinedRC)(static_cast <bool> (DefinedRC) ? void (0) : __assert_fail
("DefinedRC", "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 4852
, __extension__ __PRETTY_FUNCTION__))
;
4853 if (!isLegalRegOperand(MRI, OpInfo, *MO))
4854 return false;
4855 bool IsAGPR = RI.isAGPR(MRI, MO->getReg());
4856 if (IsAGPR && !ST.hasMAIInsts())
4857 return false;
4858 unsigned Opc = MI.getOpcode();
4859 if (IsAGPR &&
4860 (!ST.hasGFX90AInsts() || !MRI.reservedRegsFrozen()) &&
4861 (MI.mayLoad() || MI.mayStore() || isDS(Opc) || isMIMG(Opc)))
4862 return false;
4863 // Atomics should have both vdst and vdata either vgpr or agpr.
4864 const int VDstIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdst);
4865 const int DataIdx = AMDGPU::getNamedOperandIdx(Opc,
4866 isDS(Opc) ? AMDGPU::OpName::data0 : AMDGPU::OpName::vdata);
4867 if ((int)OpIdx == VDstIdx && DataIdx != -1 &&
4868 MI.getOperand(DataIdx).isReg() &&
4869 RI.isAGPR(MRI, MI.getOperand(DataIdx).getReg()) != IsAGPR)
4870 return false;
4871 if ((int)OpIdx == DataIdx) {
4872 if (VDstIdx != -1 &&
4873 RI.isAGPR(MRI, MI.getOperand(VDstIdx).getReg()) != IsAGPR)
4874 return false;
4875 // DS instructions with 2 src operands also must have tied RC.
4876 const int Data1Idx = AMDGPU::getNamedOperandIdx(Opc,
4877 AMDGPU::OpName::data1);
4878 if (Data1Idx != -1 && MI.getOperand(Data1Idx).isReg() &&
4879 RI.isAGPR(MRI, MI.getOperand(Data1Idx).getReg()) != IsAGPR)
4880 return false;
4881 }
4882 if (Opc == AMDGPU::V_ACCVGPR_WRITE_B32_e64 &&
4883 (int)OpIdx == AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0) &&
4884 RI.isSGPRReg(MRI, MO->getReg()))
4885 return false;
4886 return true;
4887 }
4888
4889 // Handle non-register types that are treated like immediates.
4890 assert(MO->isImm() || MO->isTargetIndex() || MO->isFI() || MO->isGlobal())(static_cast <bool> (MO->isImm() || MO->isTargetIndex
() || MO->isFI() || MO->isGlobal()) ? void (0) : __assert_fail
("MO->isImm() || MO->isTargetIndex() || MO->isFI() || MO->isGlobal()"
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 4890, __extension__
__PRETTY_FUNCTION__))
;
4891
4892 if (!DefinedRC) {
4893 // This operand expects an immediate.
4894 return true;
4895 }
4896
4897 return isImmOperandLegal(MI, OpIdx, *MO);
4898}
4899
4900void SIInstrInfo::legalizeOperandsVOP2(MachineRegisterInfo &MRI,
4901 MachineInstr &MI) const {
4902 unsigned Opc = MI.getOpcode();
4903 const MCInstrDesc &InstrDesc = get(Opc);
4904
4905 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0);
4906 MachineOperand &Src0 = MI.getOperand(Src0Idx);
4907
4908 int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1);
4909 MachineOperand &Src1 = MI.getOperand(Src1Idx);
4910
4911 // If there is an implicit SGPR use such as VCC use for v_addc_u32/v_subb_u32
4912 // we need to only have one constant bus use before GFX10.
4913 bool HasImplicitSGPR = findImplicitSGPRRead(MI) != AMDGPU::NoRegister;
4914 if (HasImplicitSGPR && ST.getConstantBusLimit(Opc) <= 1 &&
4915 Src0.isReg() && (RI.isSGPRReg(MRI, Src0.getReg()) ||
4916 isLiteralConstantLike(Src0, InstrDesc.OpInfo[Src0Idx])))
4917 legalizeOpWithMove(MI, Src0Idx);
4918
4919 // Special case: V_WRITELANE_B32 accepts only immediate or SGPR operands for
4920 // both the value to write (src0) and lane select (src1). Fix up non-SGPR
4921 // src0/src1 with V_READFIRSTLANE.
4922 if (Opc == AMDGPU::V_WRITELANE_B32) {
4923 const DebugLoc &DL = MI.getDebugLoc();
4924 if (Src0.isReg() && RI.isVGPR(MRI, Src0.getReg())) {
4925 Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
4926 BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg)
4927 .add(Src0);
4928 Src0.ChangeToRegister(Reg, false);
4929 }
4930 if (Src1.isReg() && RI.isVGPR(MRI, Src1.getReg())) {
4931 Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
4932 const DebugLoc &DL = MI.getDebugLoc();
4933 BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg)
4934 .add(Src1);
4935 Src1.ChangeToRegister(Reg, false);
4936 }
4937 return;
4938 }
4939
4940 // No VOP2 instructions support AGPRs.
4941 if (Src0.isReg() && RI.isAGPR(MRI, Src0.getReg()))
4942 legalizeOpWithMove(MI, Src0Idx);
4943
4944 if (Src1.isReg() && RI.isAGPR(MRI, Src1.getReg()))
4945 legalizeOpWithMove(MI, Src1Idx);
4946
4947 // VOP2 src0 instructions support all operand types, so we don't need to check
4948 // their legality. If src1 is already legal, we don't need to do anything.
4949 if (isLegalRegOperand(MRI, InstrDesc.OpInfo[Src1Idx], Src1))
4950 return;
4951
4952 // Special case: V_READLANE_B32 accepts only immediate or SGPR operands for
4953 // lane select. Fix up using V_READFIRSTLANE, since we assume that the lane
4954 // select is uniform.
4955 if (Opc == AMDGPU::V_READLANE_B32 && Src1.isReg() &&
4956 RI.isVGPR(MRI, Src1.getReg())) {
4957 Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
4958 const DebugLoc &DL = MI.getDebugLoc();
4959 BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg)
4960 .add(Src1);
4961 Src1.ChangeToRegister(Reg, false);
4962 return;
4963 }
4964
4965 // We do not use commuteInstruction here because it is too aggressive and will
4966 // commute if it is possible. We only want to commute here if it improves
4967 // legality. This can be called a fairly large number of times so don't waste
4968 // compile time pointlessly swapping and checking legality again.
4969 if (HasImplicitSGPR || !MI.isCommutable()) {
4970 legalizeOpWithMove(MI, Src1Idx);
4971 return;
4972 }
4973
4974 // If src0 can be used as src1, commuting will make the operands legal.
4975 // Otherwise we have to give up and insert a move.
4976 //
4977 // TODO: Other immediate-like operand kinds could be commuted if there was a
4978 // MachineOperand::ChangeTo* for them.
4979 if ((!Src1.isImm() && !Src1.isReg()) ||
4980 !isLegalRegOperand(MRI, InstrDesc.OpInfo[Src1Idx], Src0)) {
4981 legalizeOpWithMove(MI, Src1Idx);
4982 return;
4983 }
4984
4985 int CommutedOpc = commuteOpcode(MI);
4986 if (CommutedOpc == -1) {
4987 legalizeOpWithMove(MI, Src1Idx);
4988 return;
4989 }
4990
4991 MI.setDesc(get(CommutedOpc));
4992
4993 Register Src0Reg = Src0.getReg();
4994 unsigned Src0SubReg = Src0.getSubReg();
4995 bool Src0Kill = Src0.isKill();
4996
4997 if (Src1.isImm())
4998 Src0.ChangeToImmediate(Src1.getImm());
4999 else if (Src1.isReg()) {
5000 Src0.ChangeToRegister(Src1.getReg(), false, false, Src1.isKill());
5001 Src0.setSubReg(Src1.getSubReg());
5002 } else
5003 llvm_unreachable("Should only have register or immediate operands")::llvm::llvm_unreachable_internal("Should only have register or immediate operands"
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 5003)
;
5004
5005 Src1.ChangeToRegister(Src0Reg, false, false, Src0Kill);
5006 Src1.setSubReg(Src0SubReg);
5007 fixImplicitOperands(MI);
5008}
5009
5010// Legalize VOP3 operands. All operand types are supported for any operand
5011// but only one literal constant and only starting from GFX10.
5012void SIInstrInfo::legalizeOperandsVOP3(MachineRegisterInfo &MRI,
5013 MachineInstr &MI) const {
5014 unsigned Opc = MI.getOpcode();
5015
5016 int VOP3Idx[3] = {
5017 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0),
5018 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1),
5019 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2)
5020 };
5021
5022 if (Opc == AMDGPU::V_PERMLANE16_B32_e64 ||
5023 Opc == AMDGPU::V_PERMLANEX16_B32_e64) {
5024 // src1 and src2 must be scalar
5025 MachineOperand &Src1 = MI.getOperand(VOP3Idx[1]);
5026 MachineOperand &Src2 = MI.getOperand(VOP3Idx[2]);
5027 const DebugLoc &DL = MI.getDebugLoc();
5028 if (Src1.isReg() && !RI.isSGPRClass(MRI.getRegClass(Src1.getReg()))) {
5029 Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
5030 BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg)
5031 .add(Src1);
5032 Src1.ChangeToRegister(Reg, false);
5033 }
5034 if (Src2.isReg() && !RI.isSGPRClass(MRI.getRegClass(Src2.getReg()))) {
5035 Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
5036 BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg)
5037 .add(Src2);
5038 Src2.ChangeToRegister(Reg, false);
5039 }
5040 }
5041
5042 // Find the one SGPR operand we are allowed to use.
5043 int ConstantBusLimit = ST.getConstantBusLimit(Opc);
5044 int LiteralLimit = ST.hasVOP3Literal() ? 1 : 0;
5045 SmallDenseSet<unsigned> SGPRsUsed;
5046 Register SGPRReg = findUsedSGPR(MI, VOP3Idx);
5047 if (SGPRReg != AMDGPU::NoRegister) {
5048 SGPRsUsed.insert(SGPRReg);
5049 --ConstantBusLimit;
5050 }
5051
5052 for (int Idx : VOP3Idx) {
5053 if (Idx == -1)
5054 break;
5055 MachineOperand &MO = MI.getOperand(Idx);
5056
5057 if (!MO.isReg()) {
5058 if (!isLiteralConstantLike(MO, get(Opc).OpInfo[Idx]))
5059 continue;
5060
5061 if (LiteralLimit > 0 && ConstantBusLimit > 0) {
5062 --LiteralLimit;
5063 --ConstantBusLimit;
5064 continue;
5065 }
5066
5067 --LiteralLimit;
5068 --ConstantBusLimit;
5069 legalizeOpWithMove(MI, Idx);
5070 continue;
5071 }
5072
5073 if (RI.hasAGPRs(RI.getRegClassForReg(MRI, MO.getReg())) &&
5074 !isOperandLegal(MI, Idx, &MO)) {
5075 legalizeOpWithMove(MI, Idx);
5076 continue;
5077 }
5078
5079 if (!RI.isSGPRClass(RI.getRegClassForReg(MRI, MO.getReg())))
5080 continue; // VGPRs are legal
5081
5082 // We can use one SGPR in each VOP3 instruction prior to GFX10
5083 // and two starting from GFX10.
5084 if (SGPRsUsed.count(MO.getReg()))
5085 continue;
5086 if (ConstantBusLimit > 0) {
5087 SGPRsUsed.insert(MO.getReg());
5088 --ConstantBusLimit;
5089 continue;
5090 }
5091
5092 // If we make it this far, then the operand is not legal and we must
5093 // legalize it.
5094 legalizeOpWithMove(MI, Idx);
5095 }
5096}
5097
5098Register SIInstrInfo::readlaneVGPRToSGPR(Register SrcReg, MachineInstr &UseMI,
5099 MachineRegisterInfo &MRI) const {
5100 const TargetRegisterClass *VRC = MRI.getRegClass(SrcReg);
5101 const TargetRegisterClass *SRC = RI.getEquivalentSGPRClass(VRC);
5102 Register DstReg = MRI.createVirtualRegister(SRC);
5103 unsigned SubRegs = RI.getRegSizeInBits(*VRC) / 32;
5104
5105 if (RI.hasAGPRs(VRC)) {
5106 VRC = RI.getEquivalentVGPRClass(VRC);
5107 Register NewSrcReg = MRI.createVirtualRegister(VRC);
5108 BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(),
5109 get(TargetOpcode::COPY), NewSrcReg)
5110 .addReg(SrcReg);
5111 SrcReg = NewSrcReg;
5112 }
5113
5114 if (SubRegs == 1) {
5115 BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(),
5116 get(AMDGPU::V_READFIRSTLANE_B32), DstReg)
5117 .addReg(SrcReg);
5118 return DstReg;
5119 }
5120
5121 SmallVector<unsigned, 8> SRegs;
5122 for (unsigned i = 0; i < SubRegs; ++i) {
5123 Register SGPR = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
5124 BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(),
5125 get(AMDGPU::V_READFIRSTLANE_B32), SGPR)
5126 .addReg(SrcReg, 0, RI.getSubRegFromChannel(i));
5127 SRegs.push_back(SGPR);
5128 }
5129
5130 MachineInstrBuilder MIB =
5131 BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(),
5132 get(AMDGPU::REG_SEQUENCE), DstReg);
5133 for (unsigned i = 0; i < SubRegs; ++i) {
5134 MIB.addReg(SRegs[i]);
5135 MIB.addImm(RI.getSubRegFromChannel(i));
5136 }
5137 return DstReg;
5138}
5139
5140void SIInstrInfo::legalizeOperandsSMRD(MachineRegisterInfo &MRI,
5141 MachineInstr &MI) const {
5142
5143 // If the pointer is store in VGPRs, then we need to move them to
5144 // SGPRs using v_readfirstlane. This is safe because we only select
5145 // loads with uniform pointers to SMRD instruction so we know the
5146 // pointer value is uniform.
5147 MachineOperand *SBase = getNamedOperand(MI, AMDGPU::OpName::sbase);
5148 if (SBase && !RI.isSGPRClass(MRI.getRegClass(SBase->getReg()))) {
5149 Register SGPR = readlaneVGPRToSGPR(SBase->getReg(), MI, MRI);
5150 SBase->setReg(SGPR);
5151 }
5152 MachineOperand *SOff = getNamedOperand(MI, AMDGPU::OpName::soff);
5153 if (SOff && !RI.isSGPRClass(MRI.getRegClass(SOff->getReg()))) {
5154 Register SGPR = readlaneVGPRToSGPR(SOff->getReg(), MI, MRI);
5155 SOff->setReg(SGPR);
5156 }
5157}
5158
5159bool SIInstrInfo::moveFlatAddrToVGPR(MachineInstr &Inst) const {
5160 unsigned Opc = Inst.getOpcode();
5161 int OldSAddrIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::saddr);
5162 if (OldSAddrIdx < 0)
5163 return false;
5164
5165 assert(isSegmentSpecificFLAT(Inst))(static_cast <bool> (isSegmentSpecificFLAT(Inst)) ? void
(0) : __assert_fail ("isSegmentSpecificFLAT(Inst)", "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp"
, 5165, __extension__ __PRETTY_FUNCTION__))
;
5166
5167 int NewOpc = AMDGPU::getGlobalVaddrOp(Opc);
5168 if (NewOpc < 0)
5169 NewOpc = AMDGPU::getFlatScratchInstSVfromSS(Opc);
5170 if (NewOpc < 0)
5171 return false;
5172
5173 MachineRegisterInfo &MRI = Inst.getMF()->getRegInfo();
5174 MachineOperand &SAddr = Inst.getOperand(OldSAddrIdx);
5175 if (RI.isSGPRReg(MRI, SAddr.getReg()))
5176 return false;
5177
5178 int NewVAddrIdx = AMDGPU::getNamedOperandIdx(NewOpc, AMDGPU::OpName::vaddr);
5179 if (NewVAddrIdx < 0)
5180 return false;
5181
5182 int OldVAddrIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vaddr);
5183
5184 // Check vaddr, it shall be zero or absent.
5185 MachineInstr *VAddrDef = nullptr;
5186 if (OldVAddrIdx >= 0) {
5187 MachineOperand &VAddr = Inst.getOperand(OldVAddrIdx);
5188 VAddrDef = MRI.getUniqueVRegDef(VAddr.getReg());
5189 if (!VAddrDef || VAddrDef->getOpcode() != AMDGPU::V_MOV_B32_e32 ||
5190 !VAddrDef->getOperand(1).isImm() ||
5191 VAddrDef->getOperand(1).getImm() != 0)
5192 return false;
5193 }
5194
5195 const MCInstrDesc &NewDesc = get(NewOpc);
5196 Inst.setDesc(NewDesc);
5197
5198 // Callers expect interator to be valid after this call, so modify the
5199 // instruction in place.
5200 if (OldVAddrIdx == NewVAddrIdx) {
5201 MachineOperand &NewVAddr = Inst.getOperand(NewVAddrIdx);
5202 // Clear use list from the old vaddr holding a zero register.
5203 MRI.removeRegOperandFromUseList(&NewVAddr);
5204 MRI.moveOperands(&NewVAddr, &SAddr, 1);
5205 Inst.RemoveOperand(OldSAddrIdx);
5206 // Update the use list with the pointer we have just moved from vaddr to
5207 // saddr poisition. Otherwise new vaddr will be missing from the use list.
5208 MRI.removeRegOperandFromUseList(&NewVAddr);
5209 MRI.addRegOperandToUseList(&NewVAddr);
5210 } else {
5211 assert(OldSAddrIdx == NewVAddrIdx)(static_cast <bool> (OldSAddrIdx == NewVAddrIdx) ? void
(0) : __assert_fail ("OldSAddrIdx == NewVAddrIdx", "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp"
, 5211, __extension__ __PRETTY_FUNCTION__))
;
5212
5213 if (OldVAddrIdx >= 0) {
5214 int NewVDstIn = AMDGPU::getNamedOperandIdx(NewOpc,
5215 AMDGPU::OpName::vdst_in);
5216
5217 // RemoveOperand doesn't try to fixup tied operand indexes at it goes, so
5218 // it asserts. Untie the operands for now and retie them afterwards.
5219 if (NewVDstIn != -1) {
5220 int OldVDstIn = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdst_in);
5221 Inst.untieRegOperand(OldVDstIn);
5222 }
5223
5224 Inst.RemoveOperand(OldVAddrIdx);
5225
5226 if (NewVDstIn != -1) {
5227 int NewVDst = AMDGPU::getNamedOperandIdx(NewOpc, AMDGPU::OpName::vdst);
5228 Inst.tieOperands(NewVDst, NewVDstIn);
5229 }
5230 }
5231 }
5232
5233 if (VAddrDef && MRI.use_nodbg_empty(VAddrDef->getOperand(0).getReg()))
5234 VAddrDef->eraseFromParent();
5235
5236 return true;
5237}
5238
5239// FIXME: Remove this when SelectionDAG is obsoleted.
5240void SIInstrInfo::legalizeOperandsFLAT(MachineRegisterInfo &MRI,
5241 MachineInstr &MI) const {
5242 if (!isSegmentSpecificFLAT(MI))
5243 return;
5244
5245 // Fixup SGPR operands in VGPRs. We only select these when the DAG divergence
5246 // thinks they are uniform, so a readfirstlane should be valid.
5247 MachineOperand *SAddr = getNamedOperand(MI, AMDGPU::OpName::saddr);
5248 if (!SAddr || RI.isSGPRClass(MRI.getRegClass(SAddr->getReg())))
5249 return;
5250
5251 if (moveFlatAddrToVGPR(MI))
5252 return;
5253
5254 Register ToSGPR = readlaneVGPRToSGPR(SAddr->getReg(), MI, MRI);
5255 SAddr->setReg(ToSGPR);
5256}
5257
5258void SIInstrInfo::legalizeGenericOperand(MachineBasicBlock &InsertMBB,
5259 MachineBasicBlock::iterator I,
5260 const TargetRegisterClass *DstRC,
5261 MachineOperand &Op,
5262 MachineRegisterInfo &MRI,
5263 const DebugLoc &DL) const {
5264 Register OpReg = Op.getReg();
5265 unsigned OpSubReg = Op.getSubReg();
5266
5267 const TargetRegisterClass *OpRC = RI.getSubClassWithSubReg(
5268 RI.getRegClassForReg(MRI, OpReg), OpSubReg);
5269
5270 // Check if operand is already the correct register class.
5271 if (DstRC == OpRC)
5272 return;
5273
5274 Register DstReg = MRI.createVirtualRegister(DstRC);
5275 auto Copy = BuildMI(InsertMBB, I, DL, get(AMDGPU::COPY), DstReg).add(Op);
5276
5277 Op.setReg(DstReg);
5278 Op.setSubReg(0);
5279
5280 MachineInstr *Def = MRI.getVRegDef(OpReg);
5281 if (!Def)
5282 return;
5283
5284 // Try to eliminate the copy if it is copying an immediate value.
5285 if (Def->isMoveImmediate() && DstRC != &AMDGPU::VReg_1RegClass)
5286 FoldImmediate(*Copy, *Def, OpReg, &MRI);
5287
5288 bool ImpDef = Def->isImplicitDef();
5289 while (!ImpDef && Def && Def->isCopy()) {
5290 if (Def->getOperand(1).getReg().isPhysical())
5291 break;
5292 Def = MRI.getUniqueVRegDef(Def->getOperand(1).getReg());
5293 ImpDef = Def && Def->isImplicitDef();
5294 }
5295 if (!RI.isSGPRClass(DstRC) && !Copy->readsRegister(AMDGPU::EXEC, &RI) &&
5296 !ImpDef)
5297 Copy.addReg(AMDGPU::EXEC, RegState::Implicit);
5298}
5299
5300// Emit the actual waterfall loop, executing the wrapped instruction for each
5301// unique value of \p Rsrc across all lanes. In the best case we execute 1
5302// iteration, in the worst case we execute 64 (once per lane).
5303static void
5304emitLoadSRsrcFromVGPRLoop(const SIInstrInfo &TII, MachineRegisterInfo &MRI,
5305 MachineBasicBlock &OrigBB, MachineBasicBlock &LoopBB,
5306 const DebugLoc &DL, MachineOperand &Rsrc) {
5307 MachineFunction &MF = *OrigBB.getParent();
5308 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
5309 const SIRegisterInfo *TRI = ST.getRegisterInfo();
5310 unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC;
5311 unsigned SaveExecOpc =
5312 ST.isWave32() ? AMDGPU::S_AND_SAVEEXEC_B32 : AMDGPU::S_AND_SAVEEXEC_B64;
5313 unsigned XorTermOpc =
5314 ST.isWave32() ? AMDGPU::S_XOR_B32_term : AMDGPU::S_XOR_B64_term;
5315 unsigned AndOpc =
5316 ST.isWave32() ? AMDGPU::S_AND_B32 : AMDGPU::S_AND_B64;
5317 const auto *BoolXExecRC = TRI->getRegClass(AMDGPU::SReg_1_XEXECRegClassID);
5318
5319 MachineBasicBlock::iterator I = LoopBB.begin();
5320
5321 SmallVector<Register, 8> ReadlanePieces;
5322 Register CondReg = AMDGPU::NoRegister;
5323
5324 Register VRsrc = Rsrc.getReg();
5325 unsigned VRsrcUndef = getUndefRegState(Rsrc.isUndef());
5326
5327 unsigned RegSize = TRI->getRegSizeInBits(Rsrc.getReg(), MRI);
5328 unsigned NumSubRegs = RegSize / 32;
5329 assert(NumSubRegs % 2 == 0 && NumSubRegs <= 32 && "Unhandled register size")(static_cast <bool> (NumSubRegs % 2 == 0 && NumSubRegs
<= 32 && "Unhandled register size") ? void (0) : __assert_fail
("NumSubRegs % 2 == 0 && NumSubRegs <= 32 && \"Unhandled register size\""
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 5329, __extension__
__PRETTY_FUNCTION__))
;
5330
5331 for (unsigned Idx = 0; Idx < NumSubRegs; Idx += 2) {
5332
5333 Register CurRegLo = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
5334 Register CurRegHi = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
5335
5336 // Read the next variant <- also loop target.
5337 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::V_READFIRSTLANE_B32), CurRegLo)
5338 .addReg(VRsrc, VRsrcUndef, TRI->getSubRegFromChannel(Idx));
5339
5340 // Read the next variant <- also loop target.
5341 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::V_READFIRSTLANE_B32), CurRegHi)
5342 .addReg(VRsrc, VRsrcUndef, TRI->getSubRegFromChannel(Idx + 1));
5343
5344 ReadlanePieces.push_back(CurRegLo);
5345 ReadlanePieces.push_back(CurRegHi);
5346
5347 // Comparison is to be done as 64-bit.
5348 Register CurReg = MRI.createVirtualRegister(&AMDGPU::SGPR_64RegClass);
5349 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), CurReg)
5350 .addReg(CurRegLo)
5351 .addImm(AMDGPU::sub0)
5352 .addReg(CurRegHi)
5353 .addImm(AMDGPU::sub1);
5354
5355 Register NewCondReg = MRI.createVirtualRegister(BoolXExecRC);
5356 auto Cmp =
5357 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::V_CMP_EQ_U64_e64), NewCondReg)
5358 .addReg(CurReg);
5359 if (NumSubRegs <= 2)
5360 Cmp.addReg(VRsrc);
5361 else
5362 Cmp.addReg(VRsrc, VRsrcUndef, TRI->getSubRegFromChannel(Idx, 2));
5363
5364 // Combine the comparision results with AND.
5365 if (CondReg == AMDGPU::NoRegister) // First.
5366 CondReg = NewCondReg;
5367 else { // If not the first, we create an AND.
5368 Register AndReg = MRI.createVirtualRegister(BoolXExecRC);
5369 BuildMI(LoopBB, I, DL, TII.get(AndOpc), AndReg)
5370 .addReg(CondReg)
5371 .addReg(NewCondReg);
5372 CondReg = AndReg;
5373 }
5374 } // End for loop.
5375
5376 auto SRsrcRC = TRI->getEquivalentSGPRClass(MRI.getRegClass(VRsrc));
5377 Register SRsrc = MRI.createVirtualRegister(SRsrcRC);
5378
5379 // Build scalar Rsrc.
5380 auto Merge = BuildMI(LoopBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), SRsrc);
5381 unsigned Channel = 0;
5382 for (Register Piece : ReadlanePieces) {
5383 Merge.addReg(Piece)
5384 .addImm(TRI->getSubRegFromChannel(Channel++));
5385 }
5386
5387 // Update Rsrc operand to use the SGPR Rsrc.
5388 Rsrc.setReg(SRsrc);
5389 Rsrc.setIsKill(true);
5390
5391 Register SaveExec = MRI.createVirtualRegister(BoolXExecRC);
5392 MRI.setSimpleHint(SaveExec, CondReg);
5393
5394 // Update EXEC to matching lanes, saving original to SaveExec.
5395 BuildMI(LoopBB, I, DL, TII.get(SaveExecOpc), SaveExec)
5396 .addReg(CondReg, RegState::Kill);
5397
5398 // The original instruction is here; we insert the terminators after it.
5399 I = LoopBB.end();
5400
5401 // Update EXEC, switch all done bits to 0 and all todo bits to 1.
5402 BuildMI(LoopBB, I, DL, TII.get(XorTermOpc), Exec)
5403 .addReg(Exec)
5404 .addReg(SaveExec);
5405
5406 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::SI_WATERFALL_LOOP)).addMBB(&LoopBB);
5407}
5408
5409// Build a waterfall loop around \p MI, replacing the VGPR \p Rsrc register
5410// with SGPRs by iterating over all unique values across all lanes.
5411// Returns the loop basic block that now contains \p MI.
5412static MachineBasicBlock *
5413loadSRsrcFromVGPR(const SIInstrInfo &TII, MachineInstr &MI,
5414 MachineOperand &Rsrc, MachineDominatorTree *MDT,
5415 MachineBasicBlock::iterator Begin = nullptr,
5416 MachineBasicBlock::iterator End = nullptr) {
5417 MachineBasicBlock &MBB = *MI.getParent();
5418 MachineFunction &MF = *MBB.getParent();
5419 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
5420 const SIRegisterInfo *TRI = ST.getRegisterInfo();
5421 MachineRegisterInfo &MRI = MF.getRegInfo();
5422 if (!Begin.isValid())
5423 Begin = &MI;
5424 if (!End.isValid()) {
5425 End = &MI;
5426 ++End;
5427 }
5428 const DebugLoc &DL = MI.getDebugLoc();
5429 unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC;
5430 unsigned MovExecOpc = ST.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64;
5431 const auto *BoolXExecRC = TRI->getRegClass(AMDGPU::SReg_1_XEXECRegClassID);
5432
5433 Register SaveExec = MRI.createVirtualRegister(BoolXExecRC);
5434
5435 // Save the EXEC mask
5436 BuildMI(MBB, Begin, DL, TII.get(MovExecOpc), SaveExec).addReg(Exec);
5437
5438 // Killed uses in the instruction we are waterfalling around will be
5439 // incorrect due to the added control-flow.
5440 MachineBasicBlock::iterator AfterMI = MI;
5441 ++AfterMI;
5442 for (auto I = Begin; I != AfterMI; I++) {
5443 for (auto &MO : I->uses()) {
5444 if (MO.isReg() && MO.isUse()) {
5445 MRI.clearKillFlags(MO.getReg());
5446 }
5447 }
5448 }
5449
5450 // To insert the loop we need to split the block. Move everything after this
5451 // point to a new block, and insert a new empty block between the two.
5452 MachineBasicBlock *LoopBB = MF.CreateMachineBasicBlock();
5453 MachineBasicBlock *RemainderBB = MF.CreateMachineBasicBlock();
5454 MachineFunction::iterator MBBI(MBB);
5455 ++MBBI;
5456
5457 MF.insert(MBBI, LoopBB);
5458 MF.insert(MBBI, RemainderBB);
5459
5460 LoopBB->addSuccessor(LoopBB);
5461 LoopBB->addSuccessor(RemainderBB);
5462
5463 // Move Begin to MI to the LoopBB, and the remainder of the block to
5464 // RemainderBB.
5465 RemainderBB->transferSuccessorsAndUpdatePHIs(&MBB);
5466 RemainderBB->splice(RemainderBB->begin(), &MBB, End, MBB.end());
5467 LoopBB->splice(LoopBB->begin(), &MBB, Begin, MBB.end());
5468
5469 MBB.addSuccessor(LoopBB);
5470
5471 // Update dominators. We know that MBB immediately dominates LoopBB, that
5472 // LoopBB immediately dominates RemainderBB, and that RemainderBB immediately
5473 // dominates all of the successors transferred to it from MBB that MBB used
5474 // to properly dominate.
5475 if (MDT) {
5476 MDT->addNewBlock(LoopBB, &MBB);
5477 MDT->addNewBlock(RemainderBB, LoopBB);
5478 for (auto &Succ : RemainderBB->successors()) {
5479 if (MDT->properlyDominates(&MBB, Succ)) {
5480 MDT->changeImmediateDominator(Succ, RemainderBB);
5481 }
5482 }
5483 }
5484
5485 emitLoadSRsrcFromVGPRLoop(TII, MRI, MBB, *LoopBB, DL, Rsrc);
5486
5487 // Restore the EXEC mask
5488 MachineBasicBlock::iterator First = RemainderBB->begin();
5489 BuildMI(*RemainderBB, First, DL, TII.get(MovExecOpc), Exec).addReg(SaveExec);
5490 return LoopBB;
5491}
5492
5493// Extract pointer from Rsrc and return a zero-value Rsrc replacement.
5494static std::tuple<unsigned, unsigned>
5495extractRsrcPtr(const SIInstrInfo &TII, MachineInstr &MI, MachineOperand &Rsrc) {
5496 MachineBasicBlock &MBB = *MI.getParent();
5497 MachineFunction &MF = *MBB.getParent();
5498 MachineRegisterInfo &MRI = MF.getRegInfo();
5499
5500 // Extract the ptr from the resource descriptor.
5501 unsigned RsrcPtr =
5502 TII.buildExtractSubReg(MI, MRI, Rsrc, &AMDGPU::VReg_128RegClass,
5503 AMDGPU::sub0_sub1, &AMDGPU::VReg_64RegClass);
5504
5505 // Create an empty resource descriptor
5506 Register Zero64 = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
5507 Register SRsrcFormatLo = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
5508 Register SRsrcFormatHi = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
5509 Register NewSRsrc = MRI.createVirtualRegister(&AMDGPU::SGPR_128RegClass);
5510 uint64_t RsrcDataFormat = TII.getDefaultRsrcDataFormat();
5511
5512 // Zero64 = 0
5513 BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(AMDGPU::S_MOV_B64), Zero64)
5514 .addImm(0);
5515
5516 // SRsrcFormatLo = RSRC_DATA_FORMAT{31-0}
5517 BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), SRsrcFormatLo)
5518 .addImm(RsrcDataFormat & 0xFFFFFFFF);
5519
5520 // SRsrcFormatHi = RSRC_DATA_FORMAT{63-32}
5521 BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), SRsrcFormatHi)
5522 .addImm(RsrcDataFormat >> 32);
5523
5524 // NewSRsrc = {Zero64, SRsrcFormat}
5525 BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(AMDGPU::REG_SEQUENCE), NewSRsrc)
5526 .addReg(Zero64)
5527 .addImm(AMDGPU::sub0_sub1)
5528 .addReg(SRsrcFormatLo)
5529 .addImm(AMDGPU::sub2)
5530 .addReg(SRsrcFormatHi)
5531 .addImm(AMDGPU::sub3);
5532
5533 return std::make_tuple(RsrcPtr, NewSRsrc);
5534}
5535
5536MachineBasicBlock *
5537SIInstrInfo::legalizeOperands(MachineInstr &MI,
5538 MachineDominatorTree *MDT) const {
5539 MachineFunction &MF = *MI.getParent()->getParent();
5540 MachineRegisterInfo &MRI = MF.getRegInfo();
5541 MachineBasicBlock *CreatedBB = nullptr;
5542
5543 // Legalize VOP2
5544 if (isVOP2(MI) || isVOPC(MI)) {
5545 legalizeOperandsVOP2(MRI, MI);
5546 return CreatedBB;
5547 }
5548
5549 // Legalize VOP3
5550 if (isVOP3(MI)) {
5551 legalizeOperandsVOP3(MRI, MI);
5552 return CreatedBB;
5553 }
5554
5555 // Legalize SMRD
5556 if (isSMRD(MI)) {
5557 legalizeOperandsSMRD(MRI, MI);
5558 return CreatedBB;
5559 }
5560
5561 // Legalize FLAT
5562 if (isFLAT(MI)) {
5563 legalizeOperandsFLAT(MRI, MI);
5564 return CreatedBB;
5565 }
5566
5567 // Legalize REG_SEQUENCE and PHI
5568 // The register class of the operands much be the same type as the register
5569 // class of the output.
5570 if (MI.getOpcode() == AMDGPU::PHI) {
5571 const TargetRegisterClass *RC = nullptr, *SRC = nullptr, *VRC = nullptr;
5572 for (unsigned i = 1, e = MI.getNumOperands(); i != e; i += 2) {
5573 if (!MI.getOperand(i).isReg() || !MI.getOperand(i).getReg().isVirtual())
5574 continue;
5575 const TargetRegisterClass *OpRC =
5576 MRI.getRegClass(MI.getOperand(i).getReg());
5577 if (RI.hasVectorRegisters(OpRC)) {
5578 VRC = OpRC;
5579 } else {
5580 SRC = OpRC;
5581 }
5582 }
5583
5584 // If any of the operands are VGPR registers, then they all most be
5585 // otherwise we will create illegal VGPR->SGPR copies when legalizing
5586 // them.
5587 if (VRC || !RI.isSGPRClass(getOpRegClass(MI, 0))) {
5588 if (!VRC) {
5589 assert(SRC)(static_cast <bool> (SRC) ? void (0) : __assert_fail ("SRC"
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 5589, __extension__
__PRETTY_FUNCTION__))
;
5590 if (getOpRegClass(MI, 0) == &AMDGPU::VReg_1RegClass) {
5591 VRC = &AMDGPU::VReg_1RegClass;
5592 } else
5593 VRC = RI.isAGPRClass(getOpRegClass(MI, 0))
5594 ? RI.getEquivalentAGPRClass(SRC)
5595 : RI.getEquivalentVGPRClass(SRC);
5596 } else {
5597 VRC = RI.isAGPRClass(getOpRegClass(MI, 0))
5598 ? RI.getEquivalentAGPRClass(VRC)
5599 : RI.getEquivalentVGPRClass(VRC);
5600 }
5601 RC = VRC;
5602 } else {
5603 RC = SRC;
5604 }
5605
5606 // Update all the operands so they have the same type.
5607 for (unsigned I = 1, E = MI.getNumOperands(); I != E; I += 2) {
5608 MachineOperand &Op = MI.getOperand(I);
5609 if (!Op.isReg() || !Op.getReg().isVirtual())
5610 continue;
5611
5612 // MI is a PHI instruction.
5613 MachineBasicBlock *InsertBB = MI.getOperand(I + 1).getMBB();
5614 MachineBasicBlock::iterator Insert = InsertBB->getFirstTerminator();
5615
5616 // Avoid creating no-op copies with the same src and dst reg class. These
5617 // confuse some of the machine passes.
5618 legalizeGenericOperand(*InsertBB, Insert, RC, Op, MRI, MI.getDebugLoc());
5619 }
5620 }
5621
5622 // REG_SEQUENCE doesn't really require operand legalization, but if one has a
5623 // VGPR dest type and SGPR sources, insert copies so all operands are
5624 // VGPRs. This seems to help operand folding / the register coalescer.
5625 if (MI.getOpcode() == AMDGPU::REG_SEQUENCE) {
5626 MachineBasicBlock *MBB = MI.getParent();
5627 const TargetRegisterClass *DstRC = getOpRegClass(MI, 0);
5628 if (RI.hasVGPRs(DstRC)) {
5629 // Update all the operands so they are VGPR register classes. These may
5630 // not be the same register class because REG_SEQUENCE supports mixing
5631 // subregister index types e.g. sub0_sub1 + sub2 + sub3
5632 for (unsigned I = 1, E = MI.getNumOperands(); I != E; I += 2) {
5633 MachineOperand &Op = MI.getOperand(I);
5634 if (!Op.isReg() || !Op.getReg().isVirtual())
5635 continue;
5636
5637 const TargetRegisterClass *OpRC = MRI.getRegClass(Op.getReg());
5638 const TargetRegisterClass *VRC = RI.getEquivalentVGPRClass(OpRC);
5639 if (VRC == OpRC)
5640 continue;
5641
5642 legalizeGenericOperand(*MBB, MI, VRC, Op, MRI, MI.getDebugLoc());
5643 Op.setIsKill();
5644 }
5645 }
5646
5647 return CreatedBB;
5648 }
5649
5650 // Legalize INSERT_SUBREG
5651 // src0 must have the same register class as dst
5652 if (MI.getOpcode() == AMDGPU::INSERT_SUBREG) {
5653 Register Dst = MI.getOperand(0).getReg();
5654 Register Src0 = MI.getOperand(1).getReg();
5655 const TargetRegisterClass *DstRC = MRI.getRegClass(Dst);
5656 const TargetRegisterClass *Src0RC = MRI.getRegClass(Src0);
5657 if (DstRC != Src0RC) {
5658 MachineBasicBlock *MBB = MI.getParent();
5659 MachineOperand &Op = MI.getOperand(1);
5660 legalizeGenericOperand(*MBB, MI, DstRC, Op, MRI, MI.getDebugLoc());
5661 }
5662 return CreatedBB;
5663 }
5664
5665 // Legalize SI_INIT_M0
5666 if (MI.getOpcode() == AMDGPU::SI_INIT_M0) {
5667 MachineOperand &Src = MI.getOperand(0);
5668 if (Src.isReg() && RI.hasVectorRegisters(MRI.getRegClass(Src.getReg())))
5669 Src.setReg(readlaneVGPRToSGPR(Src.getReg(), MI, MRI));
5670 return CreatedBB;
5671 }
5672
5673 // Legalize MIMG and MUBUF/MTBUF for shaders.
5674 //
5675 // Shaders only generate MUBUF/MTBUF instructions via intrinsics or via
5676 // scratch memory access. In both cases, the legalization never involves
5677 // conversion to the addr64 form.
5678 if (isMIMG(MI) || (AMDGPU::isGraphics(MF.getFunction().getCallingConv()) &&
5679 (isMUBUF(MI) || isMTBUF(MI)))) {
5680 MachineOperand *SRsrc = getNamedOperand(MI, AMDGPU::OpName::srsrc);
5681 if (SRsrc && !RI.isSGPRClass(MRI.getRegClass(SRsrc->getReg())))
5682 CreatedBB = loadSRsrcFromVGPR(*this, MI, *SRsrc, MDT);
5683
5684 MachineOperand *SSamp = getNamedOperand(MI, AMDGPU::OpName::ssamp);
5685 if (SSamp && !RI.isSGPRClass(MRI.getRegClass(SSamp->getReg())))
5686 CreatedBB = loadSRsrcFromVGPR(*this, MI, *SSamp, MDT);
5687
5688 return CreatedBB;
5689 }
5690
5691 // Legalize SI_CALL
5692 if (MI.getOpcode() == AMDGPU::SI_CALL_ISEL) {
5693 MachineOperand *Dest = &MI.getOperand(0);
5694 if (!RI.isSGPRClass(MRI.getRegClass(Dest->getReg()))) {
5695 // Move everything between ADJCALLSTACKUP and ADJCALLSTACKDOWN and
5696 // following copies, we also need to move copies from and to physical
5697 // registers into the loop block.
5698 unsigned FrameSetupOpcode = getCallFrameSetupOpcode();
5699 unsigned FrameDestroyOpcode = getCallFrameDestroyOpcode();
5700
5701 // Also move the copies to physical registers into the loop block
5702 MachineBasicBlock &MBB = *MI.getParent();
5703 MachineBasicBlock::iterator Start(&MI);
5704 while (Start->getOpcode() != FrameSetupOpcode)
5705 --Start;
5706 MachineBasicBlock::iterator End(&MI);
5707 while (End->getOpcode() != FrameDestroyOpcode)
5708 ++End;
5709 // Also include following copies of the return value
5710 ++End;
5711 while (End != MBB.end() && End->isCopy() && End->getOperand(1).isReg() &&
5712 MI.definesRegister(End->getOperand(1).getReg()))
5713 ++End;
5714 CreatedBB = loadSRsrcFromVGPR(*this, MI, *Dest, MDT, Start, End);
5715 }
5716 }
5717
5718 // Legalize MUBUF* instructions.
5719 int RsrcIdx =
5720 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::srsrc);
5721 if (RsrcIdx != -1) {
5722 // We have an MUBUF instruction
5723 MachineOperand *Rsrc = &MI.getOperand(RsrcIdx);
5724 unsigned RsrcRC = get(MI.getOpcode()).OpInfo[RsrcIdx].RegClass;
5725 if (RI.getCommonSubClass(MRI.getRegClass(Rsrc->getReg()),
5726 RI.getRegClass(RsrcRC))) {
5727 // The operands are legal.
5728 // FIXME: We may need to legalize operands besided srsrc.
5729 return CreatedBB;
5730 }
5731
5732 // Legalize a VGPR Rsrc.
5733 //
5734 // If the instruction is _ADDR64, we can avoid a waterfall by extracting
5735 // the base pointer from the VGPR Rsrc, adding it to the VAddr, then using
5736 // a zero-value SRsrc.
5737 //
5738 // If the instruction is _OFFSET (both idxen and offen disabled), and we
5739 // support ADDR64 instructions, we can convert to ADDR64 and do the same as
5740 // above.
5741 //
5742 // Otherwise we are on non-ADDR64 hardware, and/or we have
5743 // idxen/offen/bothen and we fall back to a waterfall loop.
5744
5745 MachineBasicBlock &MBB = *MI.getParent();
5746
5747 MachineOperand *VAddr = getNamedOperand(MI, AMDGPU::OpName::vaddr);
5748 if (VAddr && AMDGPU::getIfAddr64Inst(MI.getOpcode()) != -1) {
5749 // This is already an ADDR64 instruction so we need to add the pointer
5750 // extracted from the resource descriptor to the current value of VAddr.
5751 Register NewVAddrLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
5752 Register NewVAddrHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
5753 Register NewVAddr = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass);
5754
5755 const auto *BoolXExecRC = RI.getRegClass(AMDGPU::SReg_1_XEXECRegClassID);
5756 Register CondReg0 = MRI.createVirtualRegister(BoolXExecRC);
5757 Register CondReg1 = MRI.createVirtualRegister(BoolXExecRC);
5758
5759 unsigned RsrcPtr, NewSRsrc;
5760 std::tie(RsrcPtr, NewSRsrc) = extractRsrcPtr(*this, MI, *Rsrc);
5761
5762 // NewVaddrLo = RsrcPtr:sub0 + VAddr:sub0
5763 const DebugLoc &DL = MI.getDebugLoc();
5764 BuildMI(MBB, MI, DL, get(AMDGPU::V_ADD_CO_U32_e64), NewVAddrLo)
5765 .addDef(CondReg0)
5766 .addReg(RsrcPtr, 0, AMDGPU::sub0)
5767 .addReg(VAddr->getReg(), 0, AMDGPU::sub0)
5768 .addImm(0);
5769
5770 // NewVaddrHi = RsrcPtr:sub1 + VAddr:sub1
5771 BuildMI(MBB, MI, DL, get(AMDGPU::V_ADDC_U32_e64), NewVAddrHi)
5772 .addDef(CondReg1, RegState::Dead)
5773 .addReg(RsrcPtr, 0, AMDGPU::sub1)
5774 .addReg(VAddr->getReg(), 0, AMDGPU::sub1)
5775 .addReg(CondReg0, RegState::Kill)
5776 .addImm(0);
5777
5778 // NewVaddr = {NewVaddrHi, NewVaddrLo}
5779 BuildMI(MBB, MI, MI.getDebugLoc(), get(AMDGPU::REG_SEQUENCE), NewVAddr)
5780 .addReg(NewVAddrLo)
5781 .addImm(AMDGPU::sub0)
5782 .addReg(NewVAddrHi)
5783 .addImm(AMDGPU::sub1);
5784
5785 VAddr->setReg(NewVAddr);
5786 Rsrc->setReg(NewSRsrc);
5787 } else if (!VAddr && ST.hasAddr64()) {
5788 // This instructions is the _OFFSET variant, so we need to convert it to
5789 // ADDR64.
5790 assert(ST.getGeneration() < AMDGPUSubtarget::VOLCANIC_ISLANDS &&(static_cast <bool> (ST.getGeneration() < AMDGPUSubtarget
::VOLCANIC_ISLANDS && "FIXME: Need to emit flat atomics here"
) ? void (0) : __assert_fail ("ST.getGeneration() < AMDGPUSubtarget::VOLCANIC_ISLANDS && \"FIXME: Need to emit flat atomics here\""
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 5791, __extension__
__PRETTY_FUNCTION__))
5791 "FIXME: Need to emit flat atomics here")(static_cast <bool> (ST.getGeneration() < AMDGPUSubtarget
::VOLCANIC_ISLANDS && "FIXME: Need to emit flat atomics here"
) ? void (0) : __assert_fail ("ST.getGeneration() < AMDGPUSubtarget::VOLCANIC_ISLANDS && \"FIXME: Need to emit flat atomics here\""
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 5791, __extension__
__PRETTY_FUNCTION__))
;
5792
5793 unsigned RsrcPtr, NewSRsrc;
5794 std::tie(RsrcPtr, NewSRsrc) = extractRsrcPtr(*this, MI, *Rsrc);
5795
5796 Register NewVAddr = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass);
5797 MachineOperand *VData = getNamedOperand(MI, AMDGPU::OpName::vdata);
5798 MachineOperand *Offset = getNamedOperand(MI, AMDGPU::OpName::offset);
5799 MachineOperand *SOffset = getNamedOperand(MI, AMDGPU::OpName::soffset);
5800 unsigned Addr64Opcode = AMDGPU::getAddr64Inst(MI.getOpcode());
5801
5802 // Atomics rith return have have an additional tied operand and are
5803 // missing some of the special bits.
5804 MachineOperand *VDataIn = getNamedOperand(MI, AMDGPU::OpName::vdata_in);
5805 MachineInstr *Addr64;
5806
5807 if (!VDataIn) {
5808 // Regular buffer load / store.
5809 MachineInstrBuilder MIB =
5810 BuildMI(MBB, MI, MI.getDebugLoc(), get(Addr64Opcode))
5811 .add(*VData)
5812 .addReg(NewVAddr)
5813 .addReg(NewSRsrc)
5814 .add(*SOffset)
5815 .add(*Offset);
5816
5817 if (const MachineOperand *CPol =
5818 getNamedOperand(MI, AMDGPU::OpName::cpol)) {
5819 MIB.addImm(CPol->getImm());
5820 }
5821
5822 if (const MachineOperand *TFE =
5823 getNamedOperand(MI, AMDGPU::OpName::tfe)) {
5824 MIB.addImm(TFE->getImm());
5825 }
5826
5827 MIB.addImm(getNamedImmOperand(MI, AMDGPU::OpName::swz));
5828
5829 MIB.cloneMemRefs(MI);
5830 Addr64 = MIB;
5831 } else {
5832 // Atomics with return.
5833 Addr64 = BuildMI(MBB, MI, MI.getDebugLoc(), get(Addr64Opcode))
5834 .add(*VData)
5835 .add(*VDataIn)
5836 .addReg(NewVAddr)
5837 .addReg(NewSRsrc)
5838 .add(*SOffset)
5839 .add(*Offset)
5840 .addImm(getNamedImmOperand(MI, AMDGPU::OpName::cpol))
5841 .cloneMemRefs(MI);
5842 }
5843
5844 MI.removeFromParent();
5845
5846 // NewVaddr = {NewVaddrHi, NewVaddrLo}
5847 BuildMI(MBB, Addr64, Addr64->getDebugLoc(), get(AMDGPU::REG_SEQUENCE),
5848 NewVAddr)
5849 .addReg(RsrcPtr, 0, AMDGPU::sub0)
5850 .addImm(AMDGPU::sub0)
5851 .addReg(RsrcPtr, 0, AMDGPU::sub1)
5852 .addImm(AMDGPU::sub1);
5853 } else {
5854 // This is another variant; legalize Rsrc with waterfall loop from VGPRs
5855 // to SGPRs.
5856 CreatedBB = loadSRsrcFromVGPR(*this, MI, *Rsrc, MDT);
5857 return CreatedBB;
5858 }
5859 }
5860 return CreatedBB;
5861}
5862
5863MachineBasicBlock *SIInstrInfo::moveToVALU(MachineInstr &TopInst,
5864 MachineDominatorTree *MDT) const {
5865 SetVectorType Worklist;
5866 Worklist.insert(&TopInst);
5867 MachineBasicBlock *CreatedBB = nullptr;
5868 MachineBasicBlock *CreatedBBTmp = nullptr;
5869
5870 while (!Worklist.empty()) {
5871 MachineInstr &Inst = *Worklist.pop_back_val();
5872 MachineBasicBlock *MBB = Inst.getParent();
5873 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
5874
5875 unsigned Opcode = Inst.getOpcode();
5876 unsigned NewOpcode = getVALUOp(Inst);
5877
5878 // Handle some special cases
5879 switch (Opcode) {
5880 default:
5881 break;
5882 case AMDGPU::S_ADD_U64_PSEUDO:
5883 case AMDGPU::S_SUB_U64_PSEUDO:
5884 splitScalar64BitAddSub(Worklist, Inst, MDT);
5885 Inst.eraseFromParent();
5886 continue;
5887 case AMDGPU::S_ADD_I32:
5888 case AMDGPU::S_SUB_I32: {
5889 // FIXME: The u32 versions currently selected use the carry.
5890 bool Changed;
5891 std::tie(Changed, CreatedBBTmp) = moveScalarAddSub(Worklist, Inst, MDT);
5892 if (CreatedBBTmp && TopInst.getParent() == CreatedBBTmp)
5893 CreatedBB = CreatedBBTmp;
5894 if (Changed)
5895 continue;
5896
5897 // Default handling
5898 break;
5899 }
5900 case AMDGPU::S_AND_B64:
5901 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_AND_B32, MDT);
5902 Inst.eraseFromParent();
5903 continue;
5904
5905 case AMDGPU::S_OR_B64:
5906 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_OR_B32, MDT);
5907 Inst.eraseFromParent();
5908 continue;
5909
5910 case AMDGPU::S_XOR_B64:
5911 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_XOR_B32, MDT);
5912 Inst.eraseFromParent();
5913 continue;
5914
5915 case AMDGPU::S_NAND_B64:
5916 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_NAND_B32, MDT);
5917 Inst.eraseFromParent();
5918 continue;
5919
5920 case AMDGPU::S_NOR_B64:
5921 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_NOR_B32, MDT);
5922 Inst.eraseFromParent();
5923 continue;
5924
5925 case AMDGPU::S_XNOR_B64:
5926 if (ST.hasDLInsts())
5927 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_XNOR_B32, MDT);
5928 else
5929 splitScalar64BitXnor(Worklist, Inst, MDT);
5930 Inst.eraseFromParent();
5931 continue;
5932
5933 case AMDGPU::S_ANDN2_B64:
5934 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_ANDN2_B32, MDT);
5935 Inst.eraseFromParent();
5936 continue;
5937
5938 case AMDGPU::S_ORN2_B64:
5939 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_ORN2_B32, MDT);
5940 Inst.eraseFromParent();
5941 continue;
5942
5943 case AMDGPU::S_BREV_B64:
5944 splitScalar64BitUnaryOp(Worklist, Inst, AMDGPU::S_BREV_B32, true);
5945 Inst.eraseFromParent();
5946 continue;
5947
5948 case AMDGPU::S_NOT_B64:
5949 splitScalar64BitUnaryOp(Worklist, Inst, AMDGPU::S_NOT_B32);
5950 Inst.eraseFromParent();
5951 continue;
5952
5953 case AMDGPU::S_BCNT1_I32_B64:
5954 splitScalar64BitBCNT(Worklist, Inst);
5955 Inst.eraseFromParent();
5956 continue;
5957
5958 case AMDGPU::S_BFE_I64:
5959 splitScalar64BitBFE(Worklist, Inst);
5960 Inst.eraseFromParent();
5961 continue;
5962
5963 case AMDGPU::S_LSHL_B32:
5964 if (ST.hasOnlyRevVALUShifts()) {
5965 NewOpcode = AMDGPU::V_LSHLREV_B32_e64;
5966 swapOperands(Inst);
5967 }
5968 break;
5969 case AMDGPU::S_ASHR_I32:
5970 if (ST.hasOnlyRevVALUShifts()) {
5971 NewOpcode = AMDGPU::V_ASHRREV_I32_e64;
5972 swapOperands(Inst);
5973 }
5974 break;
5975 case AMDGPU::S_LSHR_B32:
5976 if (ST.hasOnlyRevVALUShifts()) {
5977 NewOpcode = AMDGPU::V_LSHRREV_B32_e64;
5978 swapOperands(Inst);
5979 }
5980 break;
5981 case AMDGPU::S_LSHL_B64:
5982 if (ST.hasOnlyRevVALUShifts()) {
5983 NewOpcode = AMDGPU::V_LSHLREV_B64_e64;
5984 swapOperands(Inst);
5985 }
5986 break;
5987 case AMDGPU::S_ASHR_I64:
5988 if (ST.hasOnlyRevVALUShifts()) {
5989 NewOpcode = AMDGPU::V_ASHRREV_I64_e64;
5990 swapOperands(Inst);
5991 }
5992 break;
5993 case AMDGPU::S_LSHR_B64:
5994 if (ST.hasOnlyRevVALUShifts()) {
5995 NewOpcode = AMDGPU::V_LSHRREV_B64_e64;
5996 swapOperands(Inst);
5997 }
5998 break;
5999
6000 case AMDGPU::S_ABS_I32:
6001 lowerScalarAbs(Worklist, Inst);
6002 Inst.eraseFromParent();
6003 continue;
6004
6005 case AMDGPU::S_CBRANCH_SCC0:
6006 case AMDGPU::S_CBRANCH_SCC1: {
6007 // Clear unused bits of vcc
6008 Register CondReg = Inst.getOperand(1).getReg();
6009 bool IsSCC = CondReg == AMDGPU::SCC;
6010 Register VCC = RI.getVCC();
6011 Register EXEC = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC;
6012 unsigned Opc = ST.isWave32() ? AMDGPU::S_AND_B32 : AMDGPU::S_AND_B64;
6013 BuildMI(*MBB, Inst, Inst.getDebugLoc(), get(Opc), VCC)
6014 .addReg(EXEC)
6015 .addReg(IsSCC ? VCC : CondReg);
6016 Inst.RemoveOperand(1);
6017 }
6018 break;
6019
6020 case AMDGPU::S_BFE_U64:
6021 case AMDGPU::S_BFM_B64:
6022 llvm_unreachable("Moving this op to VALU not implemented")::llvm::llvm_unreachable_internal("Moving this op to VALU not implemented"
, "llvm/lib/Target/AMDGPU/SIInstrInfo.cpp", 6022)
;
6023
6024 case AMDGPU::S_PACK_LL_B32_B16:
6025 case AMDGPU::S_PACK_LH_B32_B16:
6026 case AMDGPU::S_PACK_HH_B32_B16:
6027 movePackToVALU(Worklist, MRI, Inst);
6028 Inst.eraseFromParent();
6029 continue;
6030
6031 case AMDGPU::S_XNOR_B32:
6032 lowerScalarXnor(Worklist, Inst);
6033 Inst.eraseFromParent();
6034 continue;
6035
6036 case AMDGPU::S_NAND_B32:
6037 splitScalarNotBinop(Worklist, Inst, AMDGPU::S_AND_B32);
6038 Inst.eraseFromParent();
6039 continue;
6040
6041 case