Bug Summary

File:llvm/lib/Target/AMDGPU/R600InstrInfo.cpp
Warning:line 547, column 10
3rd function call argument is an uninitialized value

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name R600InstrInfo.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/build-llvm/lib/Target/AMDGPU -resource-dir /usr/lib/llvm-14/lib/clang/14.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/build-llvm/lib/Target/AMDGPU -I /build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU -I /build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/build-llvm/include -I /build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/include -D NDEBUG -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-14/lib/clang/14.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/build-llvm/lib/Target/AMDGPU -fdebug-prefix-map=/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0=. -ferror-limit 19 -fvisibility hidden -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2021-08-28-193554-24367-1 -x c++ /build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/R600InstrInfo.cpp
1//===-- R600InstrInfo.cpp - R600 Instruction Information ------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// R600 Implementation of TargetInstrInfo.
11//
12//===----------------------------------------------------------------------===//
13
14#include "R600InstrInfo.h"
15#include "AMDGPU.h"
16#include "MCTargetDesc/R600MCTargetDesc.h"
17#include "R600.h"
18#include "R600Defines.h"
19#include "R600Subtarget.h"
20#include "llvm/ADT/SmallSet.h"
21
22using namespace llvm;
23
24#define GET_INSTRINFO_CTOR_DTOR
25#include "R600GenDFAPacketizer.inc"
26
27#define GET_INSTRINFO_CTOR_DTOR
28#define GET_INSTRMAP_INFO
29#define GET_INSTRINFO_NAMED_OPS
30#include "R600GenInstrInfo.inc"
31
32R600InstrInfo::R600InstrInfo(const R600Subtarget &ST)
33 : R600GenInstrInfo(-1, -1), RI(), ST(ST) {}
34
35bool R600InstrInfo::isVector(const MachineInstr &MI) const {
36 return get(MI.getOpcode()).TSFlags & R600_InstFlag::VECTOR;
37}
38
39void R600InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
40 MachineBasicBlock::iterator MI,
41 const DebugLoc &DL, MCRegister DestReg,
42 MCRegister SrcReg, bool KillSrc) const {
43 unsigned VectorComponents = 0;
44 if ((R600::R600_Reg128RegClass.contains(DestReg) ||
45 R600::R600_Reg128VerticalRegClass.contains(DestReg)) &&
46 (R600::R600_Reg128RegClass.contains(SrcReg) ||
47 R600::R600_Reg128VerticalRegClass.contains(SrcReg))) {
48 VectorComponents = 4;
49 } else if((R600::R600_Reg64RegClass.contains(DestReg) ||
50 R600::R600_Reg64VerticalRegClass.contains(DestReg)) &&
51 (R600::R600_Reg64RegClass.contains(SrcReg) ||
52 R600::R600_Reg64VerticalRegClass.contains(SrcReg))) {
53 VectorComponents = 2;
54 }
55
56 if (VectorComponents > 0) {
57 for (unsigned I = 0; I < VectorComponents; I++) {
58 unsigned SubRegIndex = R600RegisterInfo::getSubRegFromChannel(I);
59 buildDefaultInstruction(MBB, MI, R600::MOV,
60 RI.getSubReg(DestReg, SubRegIndex),
61 RI.getSubReg(SrcReg, SubRegIndex))
62 .addReg(DestReg,
63 RegState::Define | RegState::Implicit);
64 }
65 } else {
66 MachineInstr *NewMI = buildDefaultInstruction(MBB, MI, R600::MOV,
67 DestReg, SrcReg);
68 NewMI->getOperand(getOperandIdx(*NewMI, R600::OpName::src0))
69 .setIsKill(KillSrc);
70 }
71}
72
73/// \returns true if \p MBBI can be moved into a new basic.
74bool R600InstrInfo::isLegalToSplitMBBAt(MachineBasicBlock &MBB,
75 MachineBasicBlock::iterator MBBI) const {
76 for (MachineInstr::const_mop_iterator I = MBBI->operands_begin(),
77 E = MBBI->operands_end(); I != E; ++I) {
78 if (I->isReg() && !I->getReg().isVirtual() && I->isUse() &&
79 RI.isPhysRegLiveAcrossClauses(I->getReg()))
80 return false;
81 }
82 return true;
83}
84
85bool R600InstrInfo::isMov(unsigned Opcode) const {
86 switch(Opcode) {
87 default:
88 return false;
89 case R600::MOV:
90 case R600::MOV_IMM_F32:
91 case R600::MOV_IMM_I32:
92 return true;
93 }
94}
95
96bool R600InstrInfo::isReductionOp(unsigned Opcode) const {
97 return false;
98}
99
100bool R600InstrInfo::isCubeOp(unsigned Opcode) const {
101 switch(Opcode) {
102 default: return false;
103 case R600::CUBE_r600_pseudo:
104 case R600::CUBE_r600_real:
105 case R600::CUBE_eg_pseudo:
106 case R600::CUBE_eg_real:
107 return true;
108 }
109}
110
111bool R600InstrInfo::isALUInstr(unsigned Opcode) const {
112 unsigned TargetFlags = get(Opcode).TSFlags;
113
114 return (TargetFlags & R600_InstFlag::ALU_INST);
115}
116
117bool R600InstrInfo::hasInstrModifiers(unsigned Opcode) const {
118 unsigned TargetFlags = get(Opcode).TSFlags;
119
120 return ((TargetFlags & R600_InstFlag::OP1) |
121 (TargetFlags & R600_InstFlag::OP2) |
122 (TargetFlags & R600_InstFlag::OP3));
123}
124
125bool R600InstrInfo::isLDSInstr(unsigned Opcode) const {
126 unsigned TargetFlags = get(Opcode).TSFlags;
127
128 return ((TargetFlags & R600_InstFlag::LDS_1A) |
129 (TargetFlags & R600_InstFlag::LDS_1A1D) |
130 (TargetFlags & R600_InstFlag::LDS_1A2D));
131}
132
133bool R600InstrInfo::isLDSRetInstr(unsigned Opcode) const {
134 return isLDSInstr(Opcode) && getOperandIdx(Opcode, R600::OpName::dst) != -1;
135}
136
137bool R600InstrInfo::canBeConsideredALU(const MachineInstr &MI) const {
138 if (isALUInstr(MI.getOpcode()))
139 return true;
140 if (isVector(MI) || isCubeOp(MI.getOpcode()))
141 return true;
142 switch (MI.getOpcode()) {
143 case R600::PRED_X:
144 case R600::INTERP_PAIR_XY:
145 case R600::INTERP_PAIR_ZW:
146 case R600::INTERP_VEC_LOAD:
147 case R600::COPY:
148 case R600::DOT_4:
149 return true;
150 default:
151 return false;
152 }
153}
154
155bool R600InstrInfo::isTransOnly(unsigned Opcode) const {
156 if (ST.hasCaymanISA())
157 return false;
158 return (get(Opcode).getSchedClass() == R600::Sched::TransALU);
159}
160
161bool R600InstrInfo::isTransOnly(const MachineInstr &MI) const {
162 return isTransOnly(MI.getOpcode());
163}
164
165bool R600InstrInfo::isVectorOnly(unsigned Opcode) const {
166 return (get(Opcode).getSchedClass() == R600::Sched::VecALU);
167}
168
169bool R600InstrInfo::isVectorOnly(const MachineInstr &MI) const {
170 return isVectorOnly(MI.getOpcode());
171}
172
173bool R600InstrInfo::isExport(unsigned Opcode) const {
174 return (get(Opcode).TSFlags & R600_InstFlag::IS_EXPORT);
175}
176
177bool R600InstrInfo::usesVertexCache(unsigned Opcode) const {
178 return ST.hasVertexCache() && IS_VTX(get(Opcode))((get(Opcode)).TSFlags & R600_InstFlag::VTX_INST);
179}
180
181bool R600InstrInfo::usesVertexCache(const MachineInstr &MI) const {
182 const MachineFunction *MF = MI.getParent()->getParent();
183 return !AMDGPU::isCompute(MF->getFunction().getCallingConv()) &&
184 usesVertexCache(MI.getOpcode());
185}
186
187bool R600InstrInfo::usesTextureCache(unsigned Opcode) const {
188 return (!ST.hasVertexCache() && IS_VTX(get(Opcode))((get(Opcode)).TSFlags & R600_InstFlag::VTX_INST)) || IS_TEX(get(Opcode))((get(Opcode)).TSFlags & R600_InstFlag::TEX_INST);
189}
190
191bool R600InstrInfo::usesTextureCache(const MachineInstr &MI) const {
192 const MachineFunction *MF = MI.getParent()->getParent();
193 return (AMDGPU::isCompute(MF->getFunction().getCallingConv()) &&
194 usesVertexCache(MI.getOpcode())) ||
195 usesTextureCache(MI.getOpcode());
196}
197
198bool R600InstrInfo::mustBeLastInClause(unsigned Opcode) const {
199 switch (Opcode) {
200 case R600::KILLGT:
201 case R600::GROUP_BARRIER:
202 return true;
203 default:
204 return false;
205 }
206}
207
208bool R600InstrInfo::usesAddressRegister(MachineInstr &MI) const {
209 return MI.findRegisterUseOperandIdx(R600::AR_X, false, &RI) != -1;
210}
211
212bool R600InstrInfo::definesAddressRegister(MachineInstr &MI) const {
213 return MI.findRegisterDefOperandIdx(R600::AR_X, false, false, &RI) != -1;
214}
215
216bool R600InstrInfo::readsLDSSrcReg(const MachineInstr &MI) const {
217 if (!isALUInstr(MI.getOpcode())) {
218 return false;
219 }
220 for (MachineInstr::const_mop_iterator I = MI.operands_begin(),
221 E = MI.operands_end();
222 I != E; ++I) {
223 if (!I->isReg() || !I->isUse() || I->getReg().isVirtual())
224 continue;
225
226 if (R600::R600_LDS_SRC_REGRegClass.contains(I->getReg()))
227 return true;
228 }
229 return false;
230}
231
232int R600InstrInfo::getSelIdx(unsigned Opcode, unsigned SrcIdx) const {
233 static const unsigned SrcSelTable[][2] = {
234 {R600::OpName::src0, R600::OpName::src0_sel},
235 {R600::OpName::src1, R600::OpName::src1_sel},
236 {R600::OpName::src2, R600::OpName::src2_sel},
237 {R600::OpName::src0_X, R600::OpName::src0_sel_X},
238 {R600::OpName::src0_Y, R600::OpName::src0_sel_Y},
239 {R600::OpName::src0_Z, R600::OpName::src0_sel_Z},
240 {R600::OpName::src0_W, R600::OpName::src0_sel_W},
241 {R600::OpName::src1_X, R600::OpName::src1_sel_X},
242 {R600::OpName::src1_Y, R600::OpName::src1_sel_Y},
243 {R600::OpName::src1_Z, R600::OpName::src1_sel_Z},
244 {R600::OpName::src1_W, R600::OpName::src1_sel_W}
245 };
246
247 for (const auto &Row : SrcSelTable) {
248 if (getOperandIdx(Opcode, Row[0]) == (int)SrcIdx) {
249 return getOperandIdx(Opcode, Row[1]);
250 }
251 }
252 return -1;
253}
254
255SmallVector<std::pair<MachineOperand *, int64_t>, 3>
256R600InstrInfo::getSrcs(MachineInstr &MI) const {
257 SmallVector<std::pair<MachineOperand *, int64_t>, 3> Result;
258
259 if (MI.getOpcode() == R600::DOT_4) {
260 static const unsigned OpTable[8][2] = {
261 {R600::OpName::src0_X, R600::OpName::src0_sel_X},
262 {R600::OpName::src0_Y, R600::OpName::src0_sel_Y},
263 {R600::OpName::src0_Z, R600::OpName::src0_sel_Z},
264 {R600::OpName::src0_W, R600::OpName::src0_sel_W},
265 {R600::OpName::src1_X, R600::OpName::src1_sel_X},
266 {R600::OpName::src1_Y, R600::OpName::src1_sel_Y},
267 {R600::OpName::src1_Z, R600::OpName::src1_sel_Z},
268 {R600::OpName::src1_W, R600::OpName::src1_sel_W},
269 };
270
271 for (unsigned j = 0; j < 8; j++) {
272 MachineOperand &MO =
273 MI.getOperand(getOperandIdx(MI.getOpcode(), OpTable[j][0]));
274 Register Reg = MO.getReg();
275 if (Reg == R600::ALU_CONST) {
276 MachineOperand &Sel =
277 MI.getOperand(getOperandIdx(MI.getOpcode(), OpTable[j][1]));
278 Result.push_back(std::make_pair(&MO, Sel.getImm()));
279 continue;
280 }
281
282 }
283 return Result;
284 }
285
286 static const unsigned OpTable[3][2] = {
287 {R600::OpName::src0, R600::OpName::src0_sel},
288 {R600::OpName::src1, R600::OpName::src1_sel},
289 {R600::OpName::src2, R600::OpName::src2_sel},
290 };
291
292 for (unsigned j = 0; j < 3; j++) {
293 int SrcIdx = getOperandIdx(MI.getOpcode(), OpTable[j][0]);
294 if (SrcIdx < 0)
295 break;
296 MachineOperand &MO = MI.getOperand(SrcIdx);
297 Register Reg = MO.getReg();
298 if (Reg == R600::ALU_CONST) {
299 MachineOperand &Sel =
300 MI.getOperand(getOperandIdx(MI.getOpcode(), OpTable[j][1]));
301 Result.push_back(std::make_pair(&MO, Sel.getImm()));
302 continue;
303 }
304 if (Reg == R600::ALU_LITERAL_X) {
305 MachineOperand &Operand =
306 MI.getOperand(getOperandIdx(MI.getOpcode(), R600::OpName::literal));
307 if (Operand.isImm()) {
308 Result.push_back(std::make_pair(&MO, Operand.getImm()));
309 continue;
310 }
311 assert(Operand.isGlobal())(static_cast <bool> (Operand.isGlobal()) ? void (0) : __assert_fail
("Operand.isGlobal()", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/R600InstrInfo.cpp"
, 311, __extension__ __PRETTY_FUNCTION__))
;
312 }
313 Result.push_back(std::make_pair(&MO, 0));
314 }
315 return Result;
316}
317
318std::vector<std::pair<int, unsigned>>
319R600InstrInfo::ExtractSrcs(MachineInstr &MI,
320 const DenseMap<unsigned, unsigned> &PV,
321 unsigned &ConstCount) const {
322 ConstCount = 0;
323 const std::pair<int, unsigned> DummyPair(-1, 0);
324 std::vector<std::pair<int, unsigned>> Result;
325 unsigned i = 0;
326 for (const auto &Src : getSrcs(MI)) {
327 ++i;
328 Register Reg = Src.first->getReg();
329 int Index = RI.getEncodingValue(Reg) & 0xff;
330 if (Reg == R600::OQAP) {
331 Result.push_back(std::make_pair(Index, 0U));
332 }
333 if (PV.find(Reg) != PV.end()) {
334 // 255 is used to tells its a PS/PV reg
335 Result.push_back(std::make_pair(255, 0U));
336 continue;
337 }
338 if (Index > 127) {
339 ConstCount++;
340 Result.push_back(DummyPair);
341 continue;
342 }
343 unsigned Chan = RI.getHWRegChan(Reg);
344 Result.push_back(std::make_pair(Index, Chan));
345 }
346 for (; i < 3; ++i)
347 Result.push_back(DummyPair);
348 return Result;
349}
350
351static std::vector<std::pair<int, unsigned>>
352Swizzle(std::vector<std::pair<int, unsigned>> Src,
353 R600InstrInfo::BankSwizzle Swz) {
354 if (Src[0] == Src[1])
355 Src[1].first = -1;
356 switch (Swz) {
357 case R600InstrInfo::ALU_VEC_012_SCL_210:
358 break;
359 case R600InstrInfo::ALU_VEC_021_SCL_122:
360 std::swap(Src[1], Src[2]);
361 break;
362 case R600InstrInfo::ALU_VEC_102_SCL_221:
363 std::swap(Src[0], Src[1]);
364 break;
365 case R600InstrInfo::ALU_VEC_120_SCL_212:
366 std::swap(Src[0], Src[1]);
367 std::swap(Src[0], Src[2]);
368 break;
369 case R600InstrInfo::ALU_VEC_201:
370 std::swap(Src[0], Src[2]);
371 std::swap(Src[0], Src[1]);
372 break;
373 case R600InstrInfo::ALU_VEC_210:
374 std::swap(Src[0], Src[2]);
375 break;
376 }
377 return Src;
378}
379
380static unsigned getTransSwizzle(R600InstrInfo::BankSwizzle Swz, unsigned Op) {
381 assert(Op < 3 && "Out of range swizzle index")(static_cast <bool> (Op < 3 && "Out of range swizzle index"
) ? void (0) : __assert_fail ("Op < 3 && \"Out of range swizzle index\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/R600InstrInfo.cpp"
, 381, __extension__ __PRETTY_FUNCTION__))
;
382 switch (Swz) {
383 case R600InstrInfo::ALU_VEC_012_SCL_210: {
384 unsigned Cycles[3] = { 2, 1, 0};
385 return Cycles[Op];
386 }
387 case R600InstrInfo::ALU_VEC_021_SCL_122: {
388 unsigned Cycles[3] = { 1, 2, 2};
389 return Cycles[Op];
390 }
391 case R600InstrInfo::ALU_VEC_120_SCL_212: {
392 unsigned Cycles[3] = { 2, 1, 2};
393 return Cycles[Op];
394 }
395 case R600InstrInfo::ALU_VEC_102_SCL_221: {
396 unsigned Cycles[3] = { 2, 2, 1};
397 return Cycles[Op];
398 }
399 default:
400 llvm_unreachable("Wrong Swizzle for Trans Slot")::llvm::llvm_unreachable_internal("Wrong Swizzle for Trans Slot"
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/R600InstrInfo.cpp"
, 400)
;
401 }
402}
403
404/// returns how many MIs (whose inputs are represented by IGSrcs) can be packed
405/// in the same Instruction Group while meeting read port limitations given a
406/// Swz swizzle sequence.
407unsigned R600InstrInfo::isLegalUpTo(
408 const std::vector<std::vector<std::pair<int, unsigned>>> &IGSrcs,
409 const std::vector<R600InstrInfo::BankSwizzle> &Swz,
410 const std::vector<std::pair<int, unsigned>> &TransSrcs,
411 R600InstrInfo::BankSwizzle TransSwz) const {
412 int Vector[4][3];
413 memset(Vector, -1, sizeof(Vector));
414 for (unsigned i = 0, e = IGSrcs.size(); i < e; i++) {
415 const std::vector<std::pair<int, unsigned>> &Srcs =
416 Swizzle(IGSrcs[i], Swz[i]);
417 for (unsigned j = 0; j < 3; j++) {
418 const std::pair<int, unsigned> &Src = Srcs[j];
419 if (Src.first < 0 || Src.first == 255)
420 continue;
421 if (Src.first == GET_REG_INDEX(RI.getEncodingValue(R600::OQAP))((RI.getEncodingValue(R600::OQAP)) & 0x1ff)) {
422 if (Swz[i] != R600InstrInfo::ALU_VEC_012_SCL_210 &&
423 Swz[i] != R600InstrInfo::ALU_VEC_021_SCL_122) {
424 // The value from output queue A (denoted by register OQAP) can
425 // only be fetched during the first cycle.
426 return false;
427 }
428 // OQAP does not count towards the normal read port restrictions
429 continue;
430 }
431 if (Vector[Src.second][j] < 0)
432 Vector[Src.second][j] = Src.first;
433 if (Vector[Src.second][j] != Src.first)
434 return i;
435 }
436 }
437 // Now check Trans Alu
438 for (unsigned i = 0, e = TransSrcs.size(); i < e; ++i) {
439 const std::pair<int, unsigned> &Src = TransSrcs[i];
440 unsigned Cycle = getTransSwizzle(TransSwz, i);
441 if (Src.first < 0)
442 continue;
443 if (Src.first == 255)
444 continue;
445 if (Vector[Src.second][Cycle] < 0)
446 Vector[Src.second][Cycle] = Src.first;
447 if (Vector[Src.second][Cycle] != Src.first)
448 return IGSrcs.size() - 1;
449 }
450 return IGSrcs.size();
451}
452
453/// Given a swizzle sequence SwzCandidate and an index Idx, returns the next
454/// (in lexicographic term) swizzle sequence assuming that all swizzles after
455/// Idx can be skipped
456static bool
457NextPossibleSolution(
458 std::vector<R600InstrInfo::BankSwizzle> &SwzCandidate,
459 unsigned Idx) {
460 assert(Idx < SwzCandidate.size())(static_cast <bool> (Idx < SwzCandidate.size()) ? void
(0) : __assert_fail ("Idx < SwzCandidate.size()", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/R600InstrInfo.cpp"
, 460, __extension__ __PRETTY_FUNCTION__))
;
461 int ResetIdx = Idx;
462 while (ResetIdx > -1 && SwzCandidate[ResetIdx] == R600InstrInfo::ALU_VEC_210)
463 ResetIdx --;
464 for (unsigned i = ResetIdx + 1, e = SwzCandidate.size(); i < e; i++) {
465 SwzCandidate[i] = R600InstrInfo::ALU_VEC_012_SCL_210;
466 }
467 if (ResetIdx == -1)
468 return false;
469 int NextSwizzle = SwzCandidate[ResetIdx] + 1;
470 SwzCandidate[ResetIdx] = (R600InstrInfo::BankSwizzle)NextSwizzle;
471 return true;
472}
473
474/// Enumerate all possible Swizzle sequence to find one that can meet all
475/// read port requirements.
476bool R600InstrInfo::FindSwizzleForVectorSlot(
477 const std::vector<std::vector<std::pair<int, unsigned>>> &IGSrcs,
478 std::vector<R600InstrInfo::BankSwizzle> &SwzCandidate,
479 const std::vector<std::pair<int, unsigned>> &TransSrcs,
480 R600InstrInfo::BankSwizzle TransSwz) const {
481 unsigned ValidUpTo = 0;
482 do {
483 ValidUpTo = isLegalUpTo(IGSrcs, SwzCandidate, TransSrcs, TransSwz);
484 if (ValidUpTo == IGSrcs.size())
485 return true;
486 } while (NextPossibleSolution(SwzCandidate, ValidUpTo));
487 return false;
488}
489
490/// Instructions in Trans slot can't read gpr at cycle 0 if they also read
491/// a const, and can't read a gpr at cycle 1 if they read 2 const.
492static bool
493isConstCompatible(R600InstrInfo::BankSwizzle TransSwz,
494 const std::vector<std::pair<int, unsigned>> &TransOps,
495 unsigned ConstCount) {
496 // TransALU can't read 3 constants
497 if (ConstCount > 2)
498 return false;
499 for (unsigned i = 0, e = TransOps.size(); i < e; ++i) {
500 const std::pair<int, unsigned> &Src = TransOps[i];
501 unsigned Cycle = getTransSwizzle(TransSwz, i);
502 if (Src.first < 0)
503 continue;
504 if (ConstCount > 0 && Cycle == 0)
505 return false;
506 if (ConstCount > 1 && Cycle == 1)
507 return false;
508 }
509 return true;
510}
511
512bool
513R600InstrInfo::fitsReadPortLimitations(const std::vector<MachineInstr *> &IG,
514 const DenseMap<unsigned, unsigned> &PV,
515 std::vector<BankSwizzle> &ValidSwizzle,
516 bool isLastAluTrans)
517 const {
518 //Todo : support shared src0 - src1 operand
519
520 std::vector<std::vector<std::pair<int, unsigned>>> IGSrcs;
521 ValidSwizzle.clear();
522 unsigned ConstCount;
1
'ConstCount' declared without an initial value
523 BankSwizzle TransBS = ALU_VEC_012_SCL_210;
524 for (unsigned i = 0, e = IG.size(); i < e; ++i) {
2
Assuming 'i' is >= 'e'
3
Loop condition is false. Execution continues on line 531
525 IGSrcs.push_back(ExtractSrcs(*IG[i], PV, ConstCount));
526 unsigned Op = getOperandIdx(IG[i]->getOpcode(),
527 R600::OpName::bank_swizzle);
528 ValidSwizzle.push_back( (R600InstrInfo::BankSwizzle)
529 IG[i]->getOperand(Op).getImm());
530 }
531 std::vector<std::pair<int, unsigned>> TransOps;
532 if (!isLastAluTrans)
4
Assuming 'isLastAluTrans' is true
5
Taking false branch
533 return FindSwizzleForVectorSlot(IGSrcs, ValidSwizzle, TransOps, TransBS);
534
535 TransOps = std::move(IGSrcs.back());
536 IGSrcs.pop_back();
537 ValidSwizzle.pop_back();
538
539 static const R600InstrInfo::BankSwizzle TransSwz[] = {
540 ALU_VEC_012_SCL_210,
541 ALU_VEC_021_SCL_122,
542 ALU_VEC_120_SCL_212,
543 ALU_VEC_102_SCL_221
544 };
545 for (unsigned i = 0; i < 4; i++) {
6
Loop condition is true. Entering loop body
546 TransBS = TransSwz[i];
547 if (!isConstCompatible(TransBS, TransOps, ConstCount))
7
3rd function call argument is an uninitialized value
548 continue;
549 bool Result = FindSwizzleForVectorSlot(IGSrcs, ValidSwizzle, TransOps,
550 TransBS);
551 if (Result) {
552 ValidSwizzle.push_back(TransBS);
553 return true;
554 }
555 }
556
557 return false;
558}
559
560bool
561R600InstrInfo::fitsConstReadLimitations(const std::vector<unsigned> &Consts)
562 const {
563 assert (Consts.size() <= 12 && "Too many operands in instructions group")(static_cast <bool> (Consts.size() <= 12 && "Too many operands in instructions group"
) ? void (0) : __assert_fail ("Consts.size() <= 12 && \"Too many operands in instructions group\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/R600InstrInfo.cpp"
, 563, __extension__ __PRETTY_FUNCTION__))
;
564 unsigned Pair1 = 0, Pair2 = 0;
565 for (unsigned i = 0, n = Consts.size(); i < n; ++i) {
566 unsigned ReadConstHalf = Consts[i] & 2;
567 unsigned ReadConstIndex = Consts[i] & (~3);
568 unsigned ReadHalfConst = ReadConstIndex | ReadConstHalf;
569 if (!Pair1) {
570 Pair1 = ReadHalfConst;
571 continue;
572 }
573 if (Pair1 == ReadHalfConst)
574 continue;
575 if (!Pair2) {
576 Pair2 = ReadHalfConst;
577 continue;
578 }
579 if (Pair2 != ReadHalfConst)
580 return false;
581 }
582 return true;
583}
584
585bool
586R600InstrInfo::fitsConstReadLimitations(const std::vector<MachineInstr *> &MIs)
587 const {
588 std::vector<unsigned> Consts;
589 SmallSet<int64_t, 4> Literals;
590 for (unsigned i = 0, n = MIs.size(); i < n; i++) {
591 MachineInstr &MI = *MIs[i];
592 if (!isALUInstr(MI.getOpcode()))
593 continue;
594
595 for (const auto &Src : getSrcs(MI)) {
596 if (Src.first->getReg() == R600::ALU_LITERAL_X)
597 Literals.insert(Src.second);
598 if (Literals.size() > 4)
599 return false;
600 if (Src.first->getReg() == R600::ALU_CONST)
601 Consts.push_back(Src.second);
602 if (R600::R600_KC0RegClass.contains(Src.first->getReg()) ||
603 R600::R600_KC1RegClass.contains(Src.first->getReg())) {
604 unsigned Index = RI.getEncodingValue(Src.first->getReg()) & 0xff;
605 unsigned Chan = RI.getHWRegChan(Src.first->getReg());
606 Consts.push_back((Index << 2) | Chan);
607 }
608 }
609 }
610 return fitsConstReadLimitations(Consts);
611}
612
613DFAPacketizer *
614R600InstrInfo::CreateTargetScheduleState(const TargetSubtargetInfo &STI) const {
615 const InstrItineraryData *II = STI.getInstrItineraryData();
616 return static_cast<const R600Subtarget &>(STI).createDFAPacketizer(II);
617}
618
619static bool
620isPredicateSetter(unsigned Opcode) {
621 switch (Opcode) {
622 case R600::PRED_X:
623 return true;
624 default:
625 return false;
626 }
627}
628
629static MachineInstr *
630findFirstPredicateSetterFrom(MachineBasicBlock &MBB,
631 MachineBasicBlock::iterator I) {
632 while (I != MBB.begin()) {
633 --I;
634 MachineInstr &MI = *I;
635 if (isPredicateSetter(MI.getOpcode()))
636 return &MI;
637 }
638
639 return nullptr;
640}
641
642static
643bool isJump(unsigned Opcode) {
644 return Opcode == R600::JUMP || Opcode == R600::JUMP_COND;
645}
646
647static bool isBranch(unsigned Opcode) {
648 return Opcode == R600::BRANCH || Opcode == R600::BRANCH_COND_i32 ||
649 Opcode == R600::BRANCH_COND_f32;
650}
651
652bool R600InstrInfo::analyzeBranch(MachineBasicBlock &MBB,
653 MachineBasicBlock *&TBB,
654 MachineBasicBlock *&FBB,
655 SmallVectorImpl<MachineOperand> &Cond,
656 bool AllowModify) const {
657 // Most of the following comes from the ARM implementation of analyzeBranch
658
659 // If the block has no terminators, it just falls into the block after it.
660 MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr();
661 if (I == MBB.end())
662 return false;
663
664 // R600::BRANCH* instructions are only available after isel and are not
665 // handled
666 if (isBranch(I->getOpcode()))
667 return true;
668 if (!isJump(I->getOpcode())) {
669 return false;
670 }
671
672 // Remove successive JUMP
673 while (I != MBB.begin() && std::prev(I)->getOpcode() == R600::JUMP) {
674 MachineBasicBlock::iterator PriorI = std::prev(I);
675 if (AllowModify)
676 I->removeFromParent();
677 I = PriorI;
678 }
679 MachineInstr &LastInst = *I;
680
681 // If there is only one terminator instruction, process it.
682 unsigned LastOpc = LastInst.getOpcode();
683 if (I == MBB.begin() || !isJump((--I)->getOpcode())) {
684 if (LastOpc == R600::JUMP) {
685 TBB = LastInst.getOperand(0).getMBB();
686 return false;
687 } else if (LastOpc == R600::JUMP_COND) {
688 auto predSet = I;
689 while (!isPredicateSetter(predSet->getOpcode())) {
690 predSet = --I;
691 }
692 TBB = LastInst.getOperand(0).getMBB();
693 Cond.push_back(predSet->getOperand(1));
694 Cond.push_back(predSet->getOperand(2));
695 Cond.push_back(MachineOperand::CreateReg(R600::PRED_SEL_ONE, false));
696 return false;
697 }
698 return true; // Can't handle indirect branch.
699 }
700
701 // Get the instruction before it if it is a terminator.
702 MachineInstr &SecondLastInst = *I;
703 unsigned SecondLastOpc = SecondLastInst.getOpcode();
704
705 // If the block ends with a B and a Bcc, handle it.
706 if (SecondLastOpc == R600::JUMP_COND && LastOpc == R600::JUMP) {
707 auto predSet = --I;
708 while (!isPredicateSetter(predSet->getOpcode())) {
709 predSet = --I;
710 }
711 TBB = SecondLastInst.getOperand(0).getMBB();
712 FBB = LastInst.getOperand(0).getMBB();
713 Cond.push_back(predSet->getOperand(1));
714 Cond.push_back(predSet->getOperand(2));
715 Cond.push_back(MachineOperand::CreateReg(R600::PRED_SEL_ONE, false));
716 return false;
717 }
718
719 // Otherwise, can't handle this.
720 return true;
721}
722
723static
724MachineBasicBlock::iterator FindLastAluClause(MachineBasicBlock &MBB) {
725 for (MachineBasicBlock::reverse_iterator It = MBB.rbegin(), E = MBB.rend();
726 It != E; ++It) {
727 if (It->getOpcode() == R600::CF_ALU ||
728 It->getOpcode() == R600::CF_ALU_PUSH_BEFORE)
729 return It.getReverse();
730 }
731 return MBB.end();
732}
733
734unsigned R600InstrInfo::insertBranch(MachineBasicBlock &MBB,
735 MachineBasicBlock *TBB,
736 MachineBasicBlock *FBB,
737 ArrayRef<MachineOperand> Cond,
738 const DebugLoc &DL,
739 int *BytesAdded) const {
740 assert(TBB && "insertBranch must not be told to insert a fallthrough")(static_cast <bool> (TBB && "insertBranch must not be told to insert a fallthrough"
) ? void (0) : __assert_fail ("TBB && \"insertBranch must not be told to insert a fallthrough\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/R600InstrInfo.cpp"
, 740, __extension__ __PRETTY_FUNCTION__))
;
741 assert(!BytesAdded && "code size not handled")(static_cast <bool> (!BytesAdded && "code size not handled"
) ? void (0) : __assert_fail ("!BytesAdded && \"code size not handled\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/R600InstrInfo.cpp"
, 741, __extension__ __PRETTY_FUNCTION__))
;
742
743 if (!FBB) {
744 if (Cond.empty()) {
745 BuildMI(&MBB, DL, get(R600::JUMP)).addMBB(TBB);
746 return 1;
747 } else {
748 MachineInstr *PredSet = findFirstPredicateSetterFrom(MBB, MBB.end());
749 assert(PredSet && "No previous predicate !")(static_cast <bool> (PredSet && "No previous predicate !"
) ? void (0) : __assert_fail ("PredSet && \"No previous predicate !\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/R600InstrInfo.cpp"
, 749, __extension__ __PRETTY_FUNCTION__))
;
750 addFlag(*PredSet, 0, MO_FLAG_PUSH(1 << 4));
751 PredSet->getOperand(2).setImm(Cond[1].getImm());
752
753 BuildMI(&MBB, DL, get(R600::JUMP_COND))
754 .addMBB(TBB)
755 .addReg(R600::PREDICATE_BIT, RegState::Kill);
756 MachineBasicBlock::iterator CfAlu = FindLastAluClause(MBB);
757 if (CfAlu == MBB.end())
758 return 1;
759 assert (CfAlu->getOpcode() == R600::CF_ALU)(static_cast <bool> (CfAlu->getOpcode() == R600::CF_ALU
) ? void (0) : __assert_fail ("CfAlu->getOpcode() == R600::CF_ALU"
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/R600InstrInfo.cpp"
, 759, __extension__ __PRETTY_FUNCTION__))
;
760 CfAlu->setDesc(get(R600::CF_ALU_PUSH_BEFORE));
761 return 1;
762 }
763 } else {
764 MachineInstr *PredSet = findFirstPredicateSetterFrom(MBB, MBB.end());
765 assert(PredSet && "No previous predicate !")(static_cast <bool> (PredSet && "No previous predicate !"
) ? void (0) : __assert_fail ("PredSet && \"No previous predicate !\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/R600InstrInfo.cpp"
, 765, __extension__ __PRETTY_FUNCTION__))
;
766 addFlag(*PredSet, 0, MO_FLAG_PUSH(1 << 4));
767 PredSet->getOperand(2).setImm(Cond[1].getImm());
768 BuildMI(&MBB, DL, get(R600::JUMP_COND))
769 .addMBB(TBB)
770 .addReg(R600::PREDICATE_BIT, RegState::Kill);
771 BuildMI(&MBB, DL, get(R600::JUMP)).addMBB(FBB);
772 MachineBasicBlock::iterator CfAlu = FindLastAluClause(MBB);
773 if (CfAlu == MBB.end())
774 return 2;
775 assert (CfAlu->getOpcode() == R600::CF_ALU)(static_cast <bool> (CfAlu->getOpcode() == R600::CF_ALU
) ? void (0) : __assert_fail ("CfAlu->getOpcode() == R600::CF_ALU"
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/R600InstrInfo.cpp"
, 775, __extension__ __PRETTY_FUNCTION__))
;
776 CfAlu->setDesc(get(R600::CF_ALU_PUSH_BEFORE));
777 return 2;
778 }
779}
780
781unsigned R600InstrInfo::removeBranch(MachineBasicBlock &MBB,
782 int *BytesRemoved) const {
783 assert(!BytesRemoved && "code size not handled")(static_cast <bool> (!BytesRemoved && "code size not handled"
) ? void (0) : __assert_fail ("!BytesRemoved && \"code size not handled\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/R600InstrInfo.cpp"
, 783, __extension__ __PRETTY_FUNCTION__))
;
784
785 // Note : we leave PRED* instructions there.
786 // They may be needed when predicating instructions.
787
788 MachineBasicBlock::iterator I = MBB.end();
789
790 if (I == MBB.begin()) {
791 return 0;
792 }
793 --I;
794 switch (I->getOpcode()) {
795 default:
796 return 0;
797 case R600::JUMP_COND: {
798 MachineInstr *predSet = findFirstPredicateSetterFrom(MBB, I);
799 clearFlag(*predSet, 0, MO_FLAG_PUSH(1 << 4));
800 I->eraseFromParent();
801 MachineBasicBlock::iterator CfAlu = FindLastAluClause(MBB);
802 if (CfAlu == MBB.end())
803 break;
804 assert (CfAlu->getOpcode() == R600::CF_ALU_PUSH_BEFORE)(static_cast <bool> (CfAlu->getOpcode() == R600::CF_ALU_PUSH_BEFORE
) ? void (0) : __assert_fail ("CfAlu->getOpcode() == R600::CF_ALU_PUSH_BEFORE"
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/R600InstrInfo.cpp"
, 804, __extension__ __PRETTY_FUNCTION__))
;
805 CfAlu->setDesc(get(R600::CF_ALU));
806 break;
807 }
808 case R600::JUMP:
809 I->eraseFromParent();
810 break;
811 }
812 I = MBB.end();
813
814 if (I == MBB.begin()) {
815 return 1;
816 }
817 --I;
818 switch (I->getOpcode()) {
819 // FIXME: only one case??
820 default:
821 return 1;
822 case R600::JUMP_COND: {
823 MachineInstr *predSet = findFirstPredicateSetterFrom(MBB, I);
824 clearFlag(*predSet, 0, MO_FLAG_PUSH(1 << 4));
825 I->eraseFromParent();
826 MachineBasicBlock::iterator CfAlu = FindLastAluClause(MBB);
827 if (CfAlu == MBB.end())
828 break;
829 assert (CfAlu->getOpcode() == R600::CF_ALU_PUSH_BEFORE)(static_cast <bool> (CfAlu->getOpcode() == R600::CF_ALU_PUSH_BEFORE
) ? void (0) : __assert_fail ("CfAlu->getOpcode() == R600::CF_ALU_PUSH_BEFORE"
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/R600InstrInfo.cpp"
, 829, __extension__ __PRETTY_FUNCTION__))
;
830 CfAlu->setDesc(get(R600::CF_ALU));
831 break;
832 }
833 case R600::JUMP:
834 I->eraseFromParent();
835 break;
836 }
837 return 2;
838}
839
840bool R600InstrInfo::isPredicated(const MachineInstr &MI) const {
841 int idx = MI.findFirstPredOperandIdx();
842 if (idx < 0)
843 return false;
844
845 Register Reg = MI.getOperand(idx).getReg();
846 switch (Reg) {
847 default: return false;
848 case R600::PRED_SEL_ONE:
849 case R600::PRED_SEL_ZERO:
850 case R600::PREDICATE_BIT:
851 return true;
852 }
853}
854
855bool R600InstrInfo::isPredicable(const MachineInstr &MI) const {
856 // XXX: KILL* instructions can be predicated, but they must be the last
857 // instruction in a clause, so this means any instructions after them cannot
858 // be predicated. Until we have proper support for instruction clauses in the
859 // backend, we will mark KILL* instructions as unpredicable.
860
861 if (MI.getOpcode() == R600::KILLGT) {
862 return false;
863 } else if (MI.getOpcode() == R600::CF_ALU) {
864 // If the clause start in the middle of MBB then the MBB has more
865 // than a single clause, unable to predicate several clauses.
866 if (MI.getParent()->begin() != MachineBasicBlock::const_iterator(MI))
867 return false;
868 // TODO: We don't support KC merging atm
869 return MI.getOperand(3).getImm() == 0 && MI.getOperand(4).getImm() == 0;
870 } else if (isVector(MI)) {
871 return false;
872 } else {
873 return TargetInstrInfo::isPredicable(MI);
874 }
875}
876
877bool
878R600InstrInfo::isProfitableToIfCvt(MachineBasicBlock &MBB,
879 unsigned NumCycles,
880 unsigned ExtraPredCycles,
881 BranchProbability Probability) const{
882 return true;
883}
884
885bool
886R600InstrInfo::isProfitableToIfCvt(MachineBasicBlock &TMBB,
887 unsigned NumTCycles,
888 unsigned ExtraTCycles,
889 MachineBasicBlock &FMBB,
890 unsigned NumFCycles,
891 unsigned ExtraFCycles,
892 BranchProbability Probability) const {
893 return true;
894}
895
896bool
897R600InstrInfo::isProfitableToDupForIfCvt(MachineBasicBlock &MBB,
898 unsigned NumCycles,
899 BranchProbability Probability)
900 const {
901 return true;
902}
903
904bool
905R600InstrInfo::isProfitableToUnpredicate(MachineBasicBlock &TMBB,
906 MachineBasicBlock &FMBB) const {
907 return false;
908}
909
910bool
911R600InstrInfo::reverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
912 MachineOperand &MO = Cond[1];
913 switch (MO.getImm()) {
914 case R600::PRED_SETE_INT:
915 MO.setImm(R600::PRED_SETNE_INT);
916 break;
917 case R600::PRED_SETNE_INT:
918 MO.setImm(R600::PRED_SETE_INT);
919 break;
920 case R600::PRED_SETE:
921 MO.setImm(R600::PRED_SETNE);
922 break;
923 case R600::PRED_SETNE:
924 MO.setImm(R600::PRED_SETE);
925 break;
926 default:
927 return true;
928 }
929
930 MachineOperand &MO2 = Cond[2];
931 switch (MO2.getReg()) {
932 case R600::PRED_SEL_ZERO:
933 MO2.setReg(R600::PRED_SEL_ONE);
934 break;
935 case R600::PRED_SEL_ONE:
936 MO2.setReg(R600::PRED_SEL_ZERO);
937 break;
938 default:
939 return true;
940 }
941 return false;
942}
943
944bool R600InstrInfo::ClobbersPredicate(MachineInstr &MI,
945 std::vector<MachineOperand> &Pred,
946 bool SkipDead) const {
947 return isPredicateSetter(MI.getOpcode());
948}
949
950bool R600InstrInfo::PredicateInstruction(MachineInstr &MI,
951 ArrayRef<MachineOperand> Pred) const {
952 int PIdx = MI.findFirstPredOperandIdx();
953
954 if (MI.getOpcode() == R600::CF_ALU) {
955 MI.getOperand(8).setImm(0);
956 return true;
957 }
958
959 if (MI.getOpcode() == R600::DOT_4) {
960 MI.getOperand(getOperandIdx(MI, R600::OpName::pred_sel_X))
961 .setReg(Pred[2].getReg());
962 MI.getOperand(getOperandIdx(MI, R600::OpName::pred_sel_Y))
963 .setReg(Pred[2].getReg());
964 MI.getOperand(getOperandIdx(MI, R600::OpName::pred_sel_Z))
965 .setReg(Pred[2].getReg());
966 MI.getOperand(getOperandIdx(MI, R600::OpName::pred_sel_W))
967 .setReg(Pred[2].getReg());
968 MachineInstrBuilder MIB(*MI.getParent()->getParent(), MI);
969 MIB.addReg(R600::PREDICATE_BIT, RegState::Implicit);
970 return true;
971 }
972
973 if (PIdx != -1) {
974 MachineOperand &PMO = MI.getOperand(PIdx);
975 PMO.setReg(Pred[2].getReg());
976 MachineInstrBuilder MIB(*MI.getParent()->getParent(), MI);
977 MIB.addReg(R600::PREDICATE_BIT, RegState::Implicit);
978 return true;
979 }
980
981 return false;
982}
983
984unsigned int R600InstrInfo::getPredicationCost(const MachineInstr &) const {
985 return 2;
986}
987
988unsigned int R600InstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
989 const MachineInstr &,
990 unsigned *PredCost) const {
991 if (PredCost)
992 *PredCost = 2;
993 return 2;
994}
995
996unsigned R600InstrInfo::calculateIndirectAddress(unsigned RegIndex,
997 unsigned Channel) const {
998 assert(Channel == 0)(static_cast <bool> (Channel == 0) ? void (0) : __assert_fail
("Channel == 0", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/R600InstrInfo.cpp"
, 998, __extension__ __PRETTY_FUNCTION__))
;
999 return RegIndex;
1000}
1001
1002bool R600InstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
1003 switch (MI.getOpcode()) {
1004 default: {
1005 MachineBasicBlock *MBB = MI.getParent();
1006 int OffsetOpIdx =
1007 R600::getNamedOperandIdx(MI.getOpcode(), R600::OpName::addr);
1008 // addr is a custom operand with multiple MI operands, and only the
1009 // first MI operand is given a name.
1010 int RegOpIdx = OffsetOpIdx + 1;
1011 int ChanOpIdx =
1012 R600::getNamedOperandIdx(MI.getOpcode(), R600::OpName::chan);
1013 if (isRegisterLoad(MI)) {
1014 int DstOpIdx =
1015 R600::getNamedOperandIdx(MI.getOpcode(), R600::OpName::dst);
1016 unsigned RegIndex = MI.getOperand(RegOpIdx).getImm();
1017 unsigned Channel = MI.getOperand(ChanOpIdx).getImm();
1018 unsigned Address = calculateIndirectAddress(RegIndex, Channel);
1019 Register OffsetReg = MI.getOperand(OffsetOpIdx).getReg();
1020 if (OffsetReg == R600::INDIRECT_BASE_ADDR) {
1021 buildMovInstr(MBB, MI, MI.getOperand(DstOpIdx).getReg(),
1022 getIndirectAddrRegClass()->getRegister(Address));
1023 } else {
1024 buildIndirectRead(MBB, MI, MI.getOperand(DstOpIdx).getReg(), Address,
1025 OffsetReg);
1026 }
1027 } else if (isRegisterStore(MI)) {
1028 int ValOpIdx =
1029 R600::getNamedOperandIdx(MI.getOpcode(), R600::OpName::val);
1030 unsigned RegIndex = MI.getOperand(RegOpIdx).getImm();
1031 unsigned Channel = MI.getOperand(ChanOpIdx).getImm();
1032 unsigned Address = calculateIndirectAddress(RegIndex, Channel);
1033 Register OffsetReg = MI.getOperand(OffsetOpIdx).getReg();
1034 if (OffsetReg == R600::INDIRECT_BASE_ADDR) {
1035 buildMovInstr(MBB, MI, getIndirectAddrRegClass()->getRegister(Address),
1036 MI.getOperand(ValOpIdx).getReg());
1037 } else {
1038 buildIndirectWrite(MBB, MI, MI.getOperand(ValOpIdx).getReg(),
1039 calculateIndirectAddress(RegIndex, Channel),
1040 OffsetReg);
1041 }
1042 } else {
1043 return false;
1044 }
1045
1046 MBB->erase(MI);
1047 return true;
1048 }
1049 case R600::R600_EXTRACT_ELT_V2:
1050 case R600::R600_EXTRACT_ELT_V4:
1051 buildIndirectRead(MI.getParent(), MI, MI.getOperand(0).getReg(),
1052 RI.getHWRegIndex(MI.getOperand(1).getReg()), // Address
1053 MI.getOperand(2).getReg(),
1054 RI.getHWRegChan(MI.getOperand(1).getReg()));
1055 break;
1056 case R600::R600_INSERT_ELT_V2:
1057 case R600::R600_INSERT_ELT_V4:
1058 buildIndirectWrite(MI.getParent(), MI, MI.getOperand(2).getReg(), // Value
1059 RI.getHWRegIndex(MI.getOperand(1).getReg()), // Address
1060 MI.getOperand(3).getReg(), // Offset
1061 RI.getHWRegChan(MI.getOperand(1).getReg())); // Channel
1062 break;
1063 }
1064 MI.eraseFromParent();
1065 return true;
1066}
1067
1068void R600InstrInfo::reserveIndirectRegisters(BitVector &Reserved,
1069 const MachineFunction &MF,
1070 const R600RegisterInfo &TRI) const {
1071 const R600Subtarget &ST = MF.getSubtarget<R600Subtarget>();
1072 const R600FrameLowering *TFL = ST.getFrameLowering();
1073
1074 unsigned StackWidth = TFL->getStackWidth(MF);
1075 int End = getIndirectIndexEnd(MF);
1076
1077 if (End == -1)
1078 return;
1079
1080 for (int Index = getIndirectIndexBegin(MF); Index <= End; ++Index) {
1081 for (unsigned Chan = 0; Chan < StackWidth; ++Chan) {
1082 unsigned Reg = R600::R600_TReg32RegClass.getRegister((4 * Index) + Chan);
1083 TRI.reserveRegisterTuples(Reserved, Reg);
1084 }
1085 }
1086}
1087
1088const TargetRegisterClass *R600InstrInfo::getIndirectAddrRegClass() const {
1089 return &R600::R600_TReg32_XRegClass;
1090}
1091
1092MachineInstrBuilder R600InstrInfo::buildIndirectWrite(MachineBasicBlock *MBB,
1093 MachineBasicBlock::iterator I,
1094 unsigned ValueReg, unsigned Address,
1095 unsigned OffsetReg) const {
1096 return buildIndirectWrite(MBB, I, ValueReg, Address, OffsetReg, 0);
1097}
1098
1099MachineInstrBuilder R600InstrInfo::buildIndirectWrite(MachineBasicBlock *MBB,
1100 MachineBasicBlock::iterator I,
1101 unsigned ValueReg, unsigned Address,
1102 unsigned OffsetReg,
1103 unsigned AddrChan) const {
1104 unsigned AddrReg;
1105 switch (AddrChan) {
1106 default: llvm_unreachable("Invalid Channel")::llvm::llvm_unreachable_internal("Invalid Channel", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/R600InstrInfo.cpp"
, 1106)
;
1107 case 0: AddrReg = R600::R600_AddrRegClass.getRegister(Address); break;
1108 case 1: AddrReg = R600::R600_Addr_YRegClass.getRegister(Address); break;
1109 case 2: AddrReg = R600::R600_Addr_ZRegClass.getRegister(Address); break;
1110 case 3: AddrReg = R600::R600_Addr_WRegClass.getRegister(Address); break;
1111 }
1112 MachineInstr *MOVA = buildDefaultInstruction(*MBB, I, R600::MOVA_INT_eg,
1113 R600::AR_X, OffsetReg);
1114 setImmOperand(*MOVA, R600::OpName::write, 0);
1115
1116 MachineInstrBuilder Mov = buildDefaultInstruction(*MBB, I, R600::MOV,
1117 AddrReg, ValueReg)
1118 .addReg(R600::AR_X,
1119 RegState::Implicit | RegState::Kill);
1120 setImmOperand(*Mov, R600::OpName::dst_rel, 1);
1121 return Mov;
1122}
1123
1124MachineInstrBuilder R600InstrInfo::buildIndirectRead(MachineBasicBlock *MBB,
1125 MachineBasicBlock::iterator I,
1126 unsigned ValueReg, unsigned Address,
1127 unsigned OffsetReg) const {
1128 return buildIndirectRead(MBB, I, ValueReg, Address, OffsetReg, 0);
1129}
1130
1131MachineInstrBuilder R600InstrInfo::buildIndirectRead(MachineBasicBlock *MBB,
1132 MachineBasicBlock::iterator I,
1133 unsigned ValueReg, unsigned Address,
1134 unsigned OffsetReg,
1135 unsigned AddrChan) const {
1136 unsigned AddrReg;
1137 switch (AddrChan) {
1138 default: llvm_unreachable("Invalid Channel")::llvm::llvm_unreachable_internal("Invalid Channel", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/R600InstrInfo.cpp"
, 1138)
;
1139 case 0: AddrReg = R600::R600_AddrRegClass.getRegister(Address); break;
1140 case 1: AddrReg = R600::R600_Addr_YRegClass.getRegister(Address); break;
1141 case 2: AddrReg = R600::R600_Addr_ZRegClass.getRegister(Address); break;
1142 case 3: AddrReg = R600::R600_Addr_WRegClass.getRegister(Address); break;
1143 }
1144 MachineInstr *MOVA = buildDefaultInstruction(*MBB, I, R600::MOVA_INT_eg,
1145 R600::AR_X,
1146 OffsetReg);
1147 setImmOperand(*MOVA, R600::OpName::write, 0);
1148 MachineInstrBuilder Mov = buildDefaultInstruction(*MBB, I, R600::MOV,
1149 ValueReg,
1150 AddrReg)
1151 .addReg(R600::AR_X,
1152 RegState::Implicit | RegState::Kill);
1153 setImmOperand(*Mov, R600::OpName::src0_rel, 1);
1154
1155 return Mov;
1156}
1157
1158int R600InstrInfo::getIndirectIndexBegin(const MachineFunction &MF) const {
1159 const MachineRegisterInfo &MRI = MF.getRegInfo();
1160 const MachineFrameInfo &MFI = MF.getFrameInfo();
1161 int Offset = -1;
1162
1163 if (MFI.getNumObjects() == 0) {
1164 return -1;
1165 }
1166
1167 if (MRI.livein_empty()) {
1168 return 0;
1169 }
1170
1171 const TargetRegisterClass *IndirectRC = getIndirectAddrRegClass();
1172 for (std::pair<unsigned, unsigned> LI : MRI.liveins()) {
1173 Register Reg = LI.first;
1174 if (Reg.isVirtual() || !IndirectRC->contains(Reg))
1175 continue;
1176
1177 unsigned RegIndex;
1178 unsigned RegEnd;
1179 for (RegIndex = 0, RegEnd = IndirectRC->getNumRegs(); RegIndex != RegEnd;
1180 ++RegIndex) {
1181 if (IndirectRC->getRegister(RegIndex) == (unsigned)Reg)
1182 break;
1183 }
1184 Offset = std::max(Offset, (int)RegIndex);
1185 }
1186
1187 return Offset + 1;
1188}
1189
1190int R600InstrInfo::getIndirectIndexEnd(const MachineFunction &MF) const {
1191 int Offset = 0;
1192 const MachineFrameInfo &MFI = MF.getFrameInfo();
1193
1194 // Variable sized objects are not supported
1195 if (MFI.hasVarSizedObjects()) {
1196 return -1;
1197 }
1198
1199 if (MFI.getNumObjects() == 0) {
1200 return -1;
1201 }
1202
1203 const R600Subtarget &ST = MF.getSubtarget<R600Subtarget>();
1204 const R600FrameLowering *TFL = ST.getFrameLowering();
1205
1206 Register IgnoredFrameReg;
1207 Offset = TFL->getFrameIndexReference(MF, -1, IgnoredFrameReg).getFixed();
1208
1209 return getIndirectIndexBegin(MF) + Offset;
1210}
1211
1212unsigned R600InstrInfo::getMaxAlusPerClause() const {
1213 return 115;
1214}
1215
1216MachineInstrBuilder R600InstrInfo::buildDefaultInstruction(MachineBasicBlock &MBB,
1217 MachineBasicBlock::iterator I,
1218 unsigned Opcode,
1219 unsigned DstReg,
1220 unsigned Src0Reg,
1221 unsigned Src1Reg) const {
1222 MachineInstrBuilder MIB = BuildMI(MBB, I, MBB.findDebugLoc(I), get(Opcode),
1223 DstReg); // $dst
1224
1225 if (Src1Reg) {
1226 MIB.addImm(0) // $update_exec_mask
1227 .addImm(0); // $update_predicate
1228 }
1229 MIB.addImm(1) // $write
1230 .addImm(0) // $omod
1231 .addImm(0) // $dst_rel
1232 .addImm(0) // $dst_clamp
1233 .addReg(Src0Reg) // $src0
1234 .addImm(0) // $src0_neg
1235 .addImm(0) // $src0_rel
1236 .addImm(0) // $src0_abs
1237 .addImm(-1); // $src0_sel
1238
1239 if (Src1Reg) {
1240 MIB.addReg(Src1Reg) // $src1
1241 .addImm(0) // $src1_neg
1242 .addImm(0) // $src1_rel
1243 .addImm(0) // $src1_abs
1244 .addImm(-1); // $src1_sel
1245 }
1246
1247 //XXX: The r600g finalizer expects this to be 1, once we've moved the
1248 //scheduling to the backend, we can change the default to 0.
1249 MIB.addImm(1) // $last
1250 .addReg(R600::PRED_SEL_OFF) // $pred_sel
1251 .addImm(0) // $literal
1252 .addImm(0); // $bank_swizzle
1253
1254 return MIB;
1255}
1256
1257#define OPERAND_CASE(Label) \
1258 case Label: { \
1259 static const unsigned Ops[] = \
1260 { \
1261 Label##_X, \
1262 Label##_Y, \
1263 Label##_Z, \
1264 Label##_W \
1265 }; \
1266 return Ops[Slot]; \
1267 }
1268
1269static unsigned getSlotedOps(unsigned Op, unsigned Slot) {
1270 switch (Op) {
1271 OPERAND_CASE(R600::OpName::update_exec_mask)
1272 OPERAND_CASE(R600::OpName::update_pred)
1273 OPERAND_CASE(R600::OpName::write)
1274 OPERAND_CASE(R600::OpName::omod)
1275 OPERAND_CASE(R600::OpName::dst_rel)
1276 OPERAND_CASE(R600::OpName::clamp)
1277 OPERAND_CASE(R600::OpName::src0)
1278 OPERAND_CASE(R600::OpName::src0_neg)
1279 OPERAND_CASE(R600::OpName::src0_rel)
1280 OPERAND_CASE(R600::OpName::src0_abs)
1281 OPERAND_CASE(R600::OpName::src0_sel)
1282 OPERAND_CASE(R600::OpName::src1)
1283 OPERAND_CASE(R600::OpName::src1_neg)
1284 OPERAND_CASE(R600::OpName::src1_rel)
1285 OPERAND_CASE(R600::OpName::src1_abs)
1286 OPERAND_CASE(R600::OpName::src1_sel)
1287 OPERAND_CASE(R600::OpName::pred_sel)
1288 default:
1289 llvm_unreachable("Wrong Operand")::llvm::llvm_unreachable_internal("Wrong Operand", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/R600InstrInfo.cpp"
, 1289)
;
1290 }
1291}
1292
1293#undef OPERAND_CASE
1294
1295MachineInstr *R600InstrInfo::buildSlotOfVectorInstruction(
1296 MachineBasicBlock &MBB, MachineInstr *MI, unsigned Slot, unsigned DstReg)
1297 const {
1298 assert (MI->getOpcode() == R600::DOT_4 && "Not Implemented")(static_cast <bool> (MI->getOpcode() == R600::DOT_4 &&
"Not Implemented") ? void (0) : __assert_fail ("MI->getOpcode() == R600::DOT_4 && \"Not Implemented\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/R600InstrInfo.cpp"
, 1298, __extension__ __PRETTY_FUNCTION__))
;
1299 unsigned Opcode;
1300 if (ST.getGeneration() <= AMDGPUSubtarget::R700)
1301 Opcode = R600::DOT4_r600;
1302 else
1303 Opcode = R600::DOT4_eg;
1304 MachineBasicBlock::iterator I = MI;
1305 MachineOperand &Src0 = MI->getOperand(
1306 getOperandIdx(MI->getOpcode(), getSlotedOps(R600::OpName::src0, Slot)));
1307 MachineOperand &Src1 = MI->getOperand(
1308 getOperandIdx(MI->getOpcode(), getSlotedOps(R600::OpName::src1, Slot)));
1309 MachineInstr *MIB = buildDefaultInstruction(
1310 MBB, I, Opcode, DstReg, Src0.getReg(), Src1.getReg());
1311 static const unsigned Operands[14] = {
1312 R600::OpName::update_exec_mask,
1313 R600::OpName::update_pred,
1314 R600::OpName::write,
1315 R600::OpName::omod,
1316 R600::OpName::dst_rel,
1317 R600::OpName::clamp,
1318 R600::OpName::src0_neg,
1319 R600::OpName::src0_rel,
1320 R600::OpName::src0_abs,
1321 R600::OpName::src0_sel,
1322 R600::OpName::src1_neg,
1323 R600::OpName::src1_rel,
1324 R600::OpName::src1_abs,
1325 R600::OpName::src1_sel,
1326 };
1327
1328 MachineOperand &MO = MI->getOperand(getOperandIdx(MI->getOpcode(),
1329 getSlotedOps(R600::OpName::pred_sel, Slot)));
1330 MIB->getOperand(getOperandIdx(Opcode, R600::OpName::pred_sel))
1331 .setReg(MO.getReg());
1332
1333 for (unsigned i = 0; i < 14; i++) {
1334 MachineOperand &MO = MI->getOperand(
1335 getOperandIdx(MI->getOpcode(), getSlotedOps(Operands[i], Slot)));
1336 assert (MO.isImm())(static_cast <bool> (MO.isImm()) ? void (0) : __assert_fail
("MO.isImm()", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/R600InstrInfo.cpp"
, 1336, __extension__ __PRETTY_FUNCTION__))
;
1337 setImmOperand(*MIB, Operands[i], MO.getImm());
1338 }
1339 MIB->getOperand(20).setImm(0);
1340 return MIB;
1341}
1342
1343MachineInstr *R600InstrInfo::buildMovImm(MachineBasicBlock &BB,
1344 MachineBasicBlock::iterator I,
1345 unsigned DstReg,
1346 uint64_t Imm) const {
1347 MachineInstr *MovImm = buildDefaultInstruction(BB, I, R600::MOV, DstReg,
1348 R600::ALU_LITERAL_X);
1349 setImmOperand(*MovImm, R600::OpName::literal, Imm);
1350 return MovImm;
1351}
1352
1353MachineInstr *R600InstrInfo::buildMovInstr(MachineBasicBlock *MBB,
1354 MachineBasicBlock::iterator I,
1355 unsigned DstReg, unsigned SrcReg) const {
1356 return buildDefaultInstruction(*MBB, I, R600::MOV, DstReg, SrcReg);
1357}
1358
1359int R600InstrInfo::getOperandIdx(const MachineInstr &MI, unsigned Op) const {
1360 return getOperandIdx(MI.getOpcode(), Op);
1361}
1362
1363int R600InstrInfo::getOperandIdx(unsigned Opcode, unsigned Op) const {
1364 return R600::getNamedOperandIdx(Opcode, Op);
1365}
1366
1367void R600InstrInfo::setImmOperand(MachineInstr &MI, unsigned Op,
1368 int64_t Imm) const {
1369 int Idx = getOperandIdx(MI, Op);
1370 assert(Idx != -1 && "Operand not supported for this instruction.")(static_cast <bool> (Idx != -1 && "Operand not supported for this instruction."
) ? void (0) : __assert_fail ("Idx != -1 && \"Operand not supported for this instruction.\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/R600InstrInfo.cpp"
, 1370, __extension__ __PRETTY_FUNCTION__))
;
1371 assert(MI.getOperand(Idx).isImm())(static_cast <bool> (MI.getOperand(Idx).isImm()) ? void
(0) : __assert_fail ("MI.getOperand(Idx).isImm()", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/R600InstrInfo.cpp"
, 1371, __extension__ __PRETTY_FUNCTION__))
;
1372 MI.getOperand(Idx).setImm(Imm);
1373}
1374
1375//===----------------------------------------------------------------------===//
1376// Instruction flag getters/setters
1377//===----------------------------------------------------------------------===//
1378
1379MachineOperand &R600InstrInfo::getFlagOp(MachineInstr &MI, unsigned SrcIdx,
1380 unsigned Flag) const {
1381 unsigned TargetFlags = get(MI.getOpcode()).TSFlags;
1382 int FlagIndex = 0;
1383 if (Flag != 0) {
1384 // If we pass something other than the default value of Flag to this
1385 // function, it means we are want to set a flag on an instruction
1386 // that uses native encoding.
1387 assert(HAS_NATIVE_OPERANDS(TargetFlags))(static_cast <bool> (((TargetFlags) & R600_InstFlag
::NATIVE_OPERANDS)) ? void (0) : __assert_fail ("HAS_NATIVE_OPERANDS(TargetFlags)"
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/R600InstrInfo.cpp"
, 1387, __extension__ __PRETTY_FUNCTION__))
;
1388 bool IsOP3 = (TargetFlags & R600_InstFlag::OP3) == R600_InstFlag::OP3;
1389 switch (Flag) {
1390 case MO_FLAG_CLAMP(1 << 0):
1391 FlagIndex = getOperandIdx(MI, R600::OpName::clamp);
1392 break;
1393 case MO_FLAG_MASK(1 << 3):
1394 FlagIndex = getOperandIdx(MI, R600::OpName::write);
1395 break;
1396 case MO_FLAG_NOT_LAST(1 << 5):
1397 case MO_FLAG_LAST(1 << 6):
1398 FlagIndex = getOperandIdx(MI, R600::OpName::last);
1399 break;
1400 case MO_FLAG_NEG(1 << 1):
1401 switch (SrcIdx) {
1402 case 0:
1403 FlagIndex = getOperandIdx(MI, R600::OpName::src0_neg);
1404 break;
1405 case 1:
1406 FlagIndex = getOperandIdx(MI, R600::OpName::src1_neg);
1407 break;
1408 case 2:
1409 FlagIndex = getOperandIdx(MI, R600::OpName::src2_neg);
1410 break;
1411 }
1412 break;
1413
1414 case MO_FLAG_ABS(1 << 2):
1415 assert(!IsOP3 && "Cannot set absolute value modifier for OP3 "(static_cast <bool> (!IsOP3 && "Cannot set absolute value modifier for OP3 "
"instructions.") ? void (0) : __assert_fail ("!IsOP3 && \"Cannot set absolute value modifier for OP3 \" \"instructions.\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/R600InstrInfo.cpp"
, 1416, __extension__ __PRETTY_FUNCTION__))
1416 "instructions.")(static_cast <bool> (!IsOP3 && "Cannot set absolute value modifier for OP3 "
"instructions.") ? void (0) : __assert_fail ("!IsOP3 && \"Cannot set absolute value modifier for OP3 \" \"instructions.\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/R600InstrInfo.cpp"
, 1416, __extension__ __PRETTY_FUNCTION__))
;
1417 (void)IsOP3;
1418 switch (SrcIdx) {
1419 case 0:
1420 FlagIndex = getOperandIdx(MI, R600::OpName::src0_abs);
1421 break;
1422 case 1:
1423 FlagIndex = getOperandIdx(MI, R600::OpName::src1_abs);
1424 break;
1425 }
1426 break;
1427
1428 default:
1429 FlagIndex = -1;
1430 break;
1431 }
1432 assert(FlagIndex != -1 && "Flag not supported for this instruction")(static_cast <bool> (FlagIndex != -1 && "Flag not supported for this instruction"
) ? void (0) : __assert_fail ("FlagIndex != -1 && \"Flag not supported for this instruction\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/R600InstrInfo.cpp"
, 1432, __extension__ __PRETTY_FUNCTION__))
;
1433 } else {
1434 FlagIndex = GET_FLAG_OPERAND_IDX(TargetFlags)(((TargetFlags) >> 7) & 0x3);
1435 assert(FlagIndex != 0 &&(static_cast <bool> (FlagIndex != 0 && "Instruction flags not supported for this instruction"
) ? void (0) : __assert_fail ("FlagIndex != 0 && \"Instruction flags not supported for this instruction\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/R600InstrInfo.cpp"
, 1436, __extension__ __PRETTY_FUNCTION__))
1436 "Instruction flags not supported for this instruction")(static_cast <bool> (FlagIndex != 0 && "Instruction flags not supported for this instruction"
) ? void (0) : __assert_fail ("FlagIndex != 0 && \"Instruction flags not supported for this instruction\""
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/R600InstrInfo.cpp"
, 1436, __extension__ __PRETTY_FUNCTION__))
;
1437 }
1438
1439 MachineOperand &FlagOp = MI.getOperand(FlagIndex);
1440 assert(FlagOp.isImm())(static_cast <bool> (FlagOp.isImm()) ? void (0) : __assert_fail
("FlagOp.isImm()", "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/R600InstrInfo.cpp"
, 1440, __extension__ __PRETTY_FUNCTION__))
;
1441 return FlagOp;
1442}
1443
1444void R600InstrInfo::addFlag(MachineInstr &MI, unsigned Operand,
1445 unsigned Flag) const {
1446 unsigned TargetFlags = get(MI.getOpcode()).TSFlags;
1447 if (Flag == 0) {
1448 return;
1449 }
1450 if (HAS_NATIVE_OPERANDS(TargetFlags)((TargetFlags) & R600_InstFlag::NATIVE_OPERANDS)) {
1451 MachineOperand &FlagOp = getFlagOp(MI, Operand, Flag);
1452 if (Flag == MO_FLAG_NOT_LAST(1 << 5)) {
1453 clearFlag(MI, Operand, MO_FLAG_LAST(1 << 6));
1454 } else if (Flag == MO_FLAG_MASK(1 << 3)) {
1455 clearFlag(MI, Operand, Flag);
1456 } else {
1457 FlagOp.setImm(1);
1458 }
1459 } else {
1460 MachineOperand &FlagOp = getFlagOp(MI, Operand);
1461 FlagOp.setImm(FlagOp.getImm() | (Flag << (NUM_MO_FLAGS7 * Operand)));
1462 }
1463}
1464
1465void R600InstrInfo::clearFlag(MachineInstr &MI, unsigned Operand,
1466 unsigned Flag) const {
1467 unsigned TargetFlags = get(MI.getOpcode()).TSFlags;
1468 if (HAS_NATIVE_OPERANDS(TargetFlags)((TargetFlags) & R600_InstFlag::NATIVE_OPERANDS)) {
1469 MachineOperand &FlagOp = getFlagOp(MI, Operand, Flag);
1470 FlagOp.setImm(0);
1471 } else {
1472 MachineOperand &FlagOp = getFlagOp(MI);
1473 unsigned InstFlags = FlagOp.getImm();
1474 InstFlags &= ~(Flag << (NUM_MO_FLAGS7 * Operand));
1475 FlagOp.setImm(InstFlags);
1476 }
1477}
1478
1479unsigned R600InstrInfo::getAddressSpaceForPseudoSourceKind(
1480 unsigned Kind) const {
1481 switch (Kind) {
1482 case PseudoSourceValue::Stack:
1483 case PseudoSourceValue::FixedStack:
1484 return AMDGPUAS::PRIVATE_ADDRESS;
1485 case PseudoSourceValue::ConstantPool:
1486 case PseudoSourceValue::GOT:
1487 case PseudoSourceValue::JumpTable:
1488 case PseudoSourceValue::GlobalValueCallEntry:
1489 case PseudoSourceValue::ExternalSymbolCallEntry:
1490 case PseudoSourceValue::TargetCustom:
1491 return AMDGPUAS::CONSTANT_ADDRESS;
1492 }
1493
1494 llvm_unreachable("Invalid pseudo source kind")::llvm::llvm_unreachable_internal("Invalid pseudo source kind"
, "/build/llvm-toolchain-snapshot-14~++20210828111110+16086d47c0d0/llvm/lib/Target/AMDGPU/R600InstrInfo.cpp"
, 1494)
;
1495}