Bug Summary

File:lib/Target/AMDGPU/R600InstrInfo.cpp
Warning:line 570, column 10
3rd function call argument is an uninitialized value

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name R600InstrInfo.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-eagerly-assume -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -mrelocation-model pic -pic-level 2 -mthread-model posix -fmath-errno -masm-verbose -mconstructor-aliases -munwind-tables -fuse-init-array -target-cpu x86-64 -dwarf-column-info -debugger-tuning=gdb -momit-leaf-frame-pointer -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-7/lib/clang/7.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-7~svn338205/build-llvm/lib/Target/AMDGPU -I /build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AMDGPU -I /build/llvm-toolchain-snapshot-7~svn338205/build-llvm/include -I /build/llvm-toolchain-snapshot-7~svn338205/include -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/8/../../../../include/c++/8 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/8/../../../../include/x86_64-linux-gnu/c++/8 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/8/../../../../include/x86_64-linux-gnu/c++/8 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/8/../../../../include/c++/8/backward -internal-isystem /usr/include/clang/7.0.0/include/ -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-7/lib/clang/7.0.0/include -internal-externc-isystem /usr/lib/gcc/x86_64-linux-gnu/8/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-comment -std=c++11 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-7~svn338205/build-llvm/lib/Target/AMDGPU -ferror-limit 19 -fmessage-length 0 -fvisibility-inlines-hidden -fobjc-runtime=gcc -fdiagnostics-show-option -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -o /tmp/scan-build-2018-07-29-043837-17923-1 -x c++ /build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AMDGPU/R600InstrInfo.cpp -faddrsig
1//===-- R600InstrInfo.cpp - R600 Instruction Information ------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10/// \file
11/// R600 Implementation of TargetInstrInfo.
12//
13//===----------------------------------------------------------------------===//
14
15#include "R600InstrInfo.h"
16#include "AMDGPU.h"
17#include "AMDGPUInstrInfo.h"
18#include "AMDGPUSubtarget.h"
19#include "R600Defines.h"
20#include "R600FrameLowering.h"
21#include "R600RegisterInfo.h"
22#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
23#include "Utils/AMDGPUBaseInfo.h"
24#include "llvm/ADT/BitVector.h"
25#include "llvm/ADT/SmallSet.h"
26#include "llvm/ADT/SmallVector.h"
27#include "llvm/CodeGen/MachineBasicBlock.h"
28#include "llvm/CodeGen/MachineFrameInfo.h"
29#include "llvm/CodeGen/MachineFunction.h"
30#include "llvm/CodeGen/MachineInstr.h"
31#include "llvm/CodeGen/MachineInstrBuilder.h"
32#include "llvm/CodeGen/MachineOperand.h"
33#include "llvm/CodeGen/MachineRegisterInfo.h"
34#include "llvm/CodeGen/TargetRegisterInfo.h"
35#include "llvm/CodeGen/TargetSubtargetInfo.h"
36#include "llvm/Support/ErrorHandling.h"
37#include <algorithm>
38#include <cassert>
39#include <cstdint>
40#include <cstring>
41#include <iterator>
42#include <utility>
43#include <vector>
44
45using namespace llvm;
46
47#define GET_INSTRINFO_CTOR_DTOR
48#include "R600GenDFAPacketizer.inc"
49
50#define GET_INSTRINFO_CTOR_DTOR
51#define GET_INSTRMAP_INFO
52#define GET_INSTRINFO_NAMED_OPS
53#include "R600GenInstrInfo.inc"
54
55R600InstrInfo::R600InstrInfo(const R600Subtarget &ST)
56 : R600GenInstrInfo(-1, -1), RI(), ST(ST) {}
57
58bool R600InstrInfo::isVector(const MachineInstr &MI) const {
59 return get(MI.getOpcode()).TSFlags & R600_InstFlag::VECTOR;
60}
61
62void R600InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
63 MachineBasicBlock::iterator MI,
64 const DebugLoc &DL, unsigned DestReg,
65 unsigned SrcReg, bool KillSrc) const {
66 unsigned VectorComponents = 0;
67 if ((R600::R600_Reg128RegClass.contains(DestReg) ||
68 R600::R600_Reg128VerticalRegClass.contains(DestReg)) &&
69 (R600::R600_Reg128RegClass.contains(SrcReg) ||
70 R600::R600_Reg128VerticalRegClass.contains(SrcReg))) {
71 VectorComponents = 4;
72 } else if((R600::R600_Reg64RegClass.contains(DestReg) ||
73 R600::R600_Reg64VerticalRegClass.contains(DestReg)) &&
74 (R600::R600_Reg64RegClass.contains(SrcReg) ||
75 R600::R600_Reg64VerticalRegClass.contains(SrcReg))) {
76 VectorComponents = 2;
77 }
78
79 if (VectorComponents > 0) {
80 for (unsigned I = 0; I < VectorComponents; I++) {
81 unsigned SubRegIndex = AMDGPURegisterInfo::getSubRegFromChannel(I);
82 buildDefaultInstruction(MBB, MI, R600::MOV,
83 RI.getSubReg(DestReg, SubRegIndex),
84 RI.getSubReg(SrcReg, SubRegIndex))
85 .addReg(DestReg,
86 RegState::Define | RegState::Implicit);
87 }
88 } else {
89 MachineInstr *NewMI = buildDefaultInstruction(MBB, MI, R600::MOV,
90 DestReg, SrcReg);
91 NewMI->getOperand(getOperandIdx(*NewMI, R600::OpName::src0))
92 .setIsKill(KillSrc);
93 }
94}
95
96/// \returns true if \p MBBI can be moved into a new basic.
97bool R600InstrInfo::isLegalToSplitMBBAt(MachineBasicBlock &MBB,
98 MachineBasicBlock::iterator MBBI) const {
99 for (MachineInstr::const_mop_iterator I = MBBI->operands_begin(),
100 E = MBBI->operands_end(); I != E; ++I) {
101 if (I->isReg() && !TargetRegisterInfo::isVirtualRegister(I->getReg()) &&
102 I->isUse() && RI.isPhysRegLiveAcrossClauses(I->getReg()))
103 return false;
104 }
105 return true;
106}
107
108bool R600InstrInfo::isMov(unsigned Opcode) const {
109 switch(Opcode) {
110 default:
111 return false;
112 case R600::MOV:
113 case R600::MOV_IMM_F32:
114 case R600::MOV_IMM_I32:
115 return true;
116 }
117}
118
119bool R600InstrInfo::isReductionOp(unsigned Opcode) const {
120 return false;
121}
122
123bool R600InstrInfo::isCubeOp(unsigned Opcode) const {
124 switch(Opcode) {
125 default: return false;
126 case R600::CUBE_r600_pseudo:
127 case R600::CUBE_r600_real:
128 case R600::CUBE_eg_pseudo:
129 case R600::CUBE_eg_real:
130 return true;
131 }
132}
133
134bool R600InstrInfo::isALUInstr(unsigned Opcode) const {
135 unsigned TargetFlags = get(Opcode).TSFlags;
136
137 return (TargetFlags & R600_InstFlag::ALU_INST);
138}
139
140bool R600InstrInfo::hasInstrModifiers(unsigned Opcode) const {
141 unsigned TargetFlags = get(Opcode).TSFlags;
142
143 return ((TargetFlags & R600_InstFlag::OP1) |
144 (TargetFlags & R600_InstFlag::OP2) |
145 (TargetFlags & R600_InstFlag::OP3));
146}
147
148bool R600InstrInfo::isLDSInstr(unsigned Opcode) const {
149 unsigned TargetFlags = get(Opcode).TSFlags;
150
151 return ((TargetFlags & R600_InstFlag::LDS_1A) |
152 (TargetFlags & R600_InstFlag::LDS_1A1D) |
153 (TargetFlags & R600_InstFlag::LDS_1A2D));
154}
155
156bool R600InstrInfo::isLDSRetInstr(unsigned Opcode) const {
157 return isLDSInstr(Opcode) && getOperandIdx(Opcode, R600::OpName::dst) != -1;
158}
159
160bool R600InstrInfo::canBeConsideredALU(const MachineInstr &MI) const {
161 if (isALUInstr(MI.getOpcode()))
162 return true;
163 if (isVector(MI) || isCubeOp(MI.getOpcode()))
164 return true;
165 switch (MI.getOpcode()) {
166 case R600::PRED_X:
167 case R600::INTERP_PAIR_XY:
168 case R600::INTERP_PAIR_ZW:
169 case R600::INTERP_VEC_LOAD:
170 case R600::COPY:
171 case R600::DOT_4:
172 return true;
173 default:
174 return false;
175 }
176}
177
178bool R600InstrInfo::isTransOnly(unsigned Opcode) const {
179 if (ST.hasCaymanISA())
180 return false;
181 return (get(Opcode).getSchedClass() == R600::Sched::TransALU);
182}
183
184bool R600InstrInfo::isTransOnly(const MachineInstr &MI) const {
185 return isTransOnly(MI.getOpcode());
186}
187
188bool R600InstrInfo::isVectorOnly(unsigned Opcode) const {
189 return (get(Opcode).getSchedClass() == R600::Sched::VecALU);
190}
191
192bool R600InstrInfo::isVectorOnly(const MachineInstr &MI) const {
193 return isVectorOnly(MI.getOpcode());
194}
195
196bool R600InstrInfo::isExport(unsigned Opcode) const {
197 return (get(Opcode).TSFlags & R600_InstFlag::IS_EXPORT);
198}
199
200bool R600InstrInfo::usesVertexCache(unsigned Opcode) const {
201 return ST.hasVertexCache() && IS_VTX(get(Opcode))((get(Opcode)).TSFlags & R600_InstFlag::VTX_INST);
202}
203
204bool R600InstrInfo::usesVertexCache(const MachineInstr &MI) const {
205 const MachineFunction *MF = MI.getParent()->getParent();
206 return !AMDGPU::isCompute(MF->getFunction().getCallingConv()) &&
207 usesVertexCache(MI.getOpcode());
208}
209
210bool R600InstrInfo::usesTextureCache(unsigned Opcode) const {
211 return (!ST.hasVertexCache() && IS_VTX(get(Opcode))((get(Opcode)).TSFlags & R600_InstFlag::VTX_INST)) || IS_TEX(get(Opcode))((get(Opcode)).TSFlags & R600_InstFlag::TEX_INST);
212}
213
214bool R600InstrInfo::usesTextureCache(const MachineInstr &MI) const {
215 const MachineFunction *MF = MI.getParent()->getParent();
216 return (AMDGPU::isCompute(MF->getFunction().getCallingConv()) &&
217 usesVertexCache(MI.getOpcode())) ||
218 usesTextureCache(MI.getOpcode());
219}
220
221bool R600InstrInfo::mustBeLastInClause(unsigned Opcode) const {
222 switch (Opcode) {
223 case R600::KILLGT:
224 case R600::GROUP_BARRIER:
225 return true;
226 default:
227 return false;
228 }
229}
230
231bool R600InstrInfo::usesAddressRegister(MachineInstr &MI) const {
232 return MI.findRegisterUseOperandIdx(R600::AR_X) != -1;
233}
234
235bool R600InstrInfo::definesAddressRegister(MachineInstr &MI) const {
236 return MI.findRegisterDefOperandIdx(R600::AR_X) != -1;
237}
238
239bool R600InstrInfo::readsLDSSrcReg(const MachineInstr &MI) const {
240 if (!isALUInstr(MI.getOpcode())) {
241 return false;
242 }
243 for (MachineInstr::const_mop_iterator I = MI.operands_begin(),
244 E = MI.operands_end();
245 I != E; ++I) {
246 if (!I->isReg() || !I->isUse() ||
247 TargetRegisterInfo::isVirtualRegister(I->getReg()))
248 continue;
249
250 if (R600::R600_LDS_SRC_REGRegClass.contains(I->getReg()))
251 return true;
252 }
253 return false;
254}
255
256int R600InstrInfo::getSelIdx(unsigned Opcode, unsigned SrcIdx) const {
257 static const unsigned SrcSelTable[][2] = {
258 {R600::OpName::src0, R600::OpName::src0_sel},
259 {R600::OpName::src1, R600::OpName::src1_sel},
260 {R600::OpName::src2, R600::OpName::src2_sel},
261 {R600::OpName::src0_X, R600::OpName::src0_sel_X},
262 {R600::OpName::src0_Y, R600::OpName::src0_sel_Y},
263 {R600::OpName::src0_Z, R600::OpName::src0_sel_Z},
264 {R600::OpName::src0_W, R600::OpName::src0_sel_W},
265 {R600::OpName::src1_X, R600::OpName::src1_sel_X},
266 {R600::OpName::src1_Y, R600::OpName::src1_sel_Y},
267 {R600::OpName::src1_Z, R600::OpName::src1_sel_Z},
268 {R600::OpName::src1_W, R600::OpName::src1_sel_W}
269 };
270
271 for (const auto &Row : SrcSelTable) {
272 if (getOperandIdx(Opcode, Row[0]) == (int)SrcIdx) {
273 return getOperandIdx(Opcode, Row[1]);
274 }
275 }
276 return -1;
277}
278
279SmallVector<std::pair<MachineOperand *, int64_t>, 3>
280R600InstrInfo::getSrcs(MachineInstr &MI) const {
281 SmallVector<std::pair<MachineOperand *, int64_t>, 3> Result;
282
283 if (MI.getOpcode() == R600::DOT_4) {
284 static const unsigned OpTable[8][2] = {
285 {R600::OpName::src0_X, R600::OpName::src0_sel_X},
286 {R600::OpName::src0_Y, R600::OpName::src0_sel_Y},
287 {R600::OpName::src0_Z, R600::OpName::src0_sel_Z},
288 {R600::OpName::src0_W, R600::OpName::src0_sel_W},
289 {R600::OpName::src1_X, R600::OpName::src1_sel_X},
290 {R600::OpName::src1_Y, R600::OpName::src1_sel_Y},
291 {R600::OpName::src1_Z, R600::OpName::src1_sel_Z},
292 {R600::OpName::src1_W, R600::OpName::src1_sel_W},
293 };
294
295 for (unsigned j = 0; j < 8; j++) {
296 MachineOperand &MO =
297 MI.getOperand(getOperandIdx(MI.getOpcode(), OpTable[j][0]));
298 unsigned Reg = MO.getReg();
299 if (Reg == R600::ALU_CONST) {
300 MachineOperand &Sel =
301 MI.getOperand(getOperandIdx(MI.getOpcode(), OpTable[j][1]));
302 Result.push_back(std::make_pair(&MO, Sel.getImm()));
303 continue;
304 }
305
306 }
307 return Result;
308 }
309
310 static const unsigned OpTable[3][2] = {
311 {R600::OpName::src0, R600::OpName::src0_sel},
312 {R600::OpName::src1, R600::OpName::src1_sel},
313 {R600::OpName::src2, R600::OpName::src2_sel},
314 };
315
316 for (unsigned j = 0; j < 3; j++) {
317 int SrcIdx = getOperandIdx(MI.getOpcode(), OpTable[j][0]);
318 if (SrcIdx < 0)
319 break;
320 MachineOperand &MO = MI.getOperand(SrcIdx);
321 unsigned Reg = MO.getReg();
322 if (Reg == R600::ALU_CONST) {
323 MachineOperand &Sel =
324 MI.getOperand(getOperandIdx(MI.getOpcode(), OpTable[j][1]));
325 Result.push_back(std::make_pair(&MO, Sel.getImm()));
326 continue;
327 }
328 if (Reg == R600::ALU_LITERAL_X) {
329 MachineOperand &Operand =
330 MI.getOperand(getOperandIdx(MI.getOpcode(), R600::OpName::literal));
331 if (Operand.isImm()) {
332 Result.push_back(std::make_pair(&MO, Operand.getImm()));
333 continue;
334 }
335 assert(Operand.isGlobal())(static_cast <bool> (Operand.isGlobal()) ? void (0) : __assert_fail
("Operand.isGlobal()", "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AMDGPU/R600InstrInfo.cpp"
, 335, __extension__ __PRETTY_FUNCTION__))
;
336 }
337 Result.push_back(std::make_pair(&MO, 0));
338 }
339 return Result;
340}
341
342std::vector<std::pair<int, unsigned>>
343R600InstrInfo::ExtractSrcs(MachineInstr &MI,
344 const DenseMap<unsigned, unsigned> &PV,
345 unsigned &ConstCount) const {
346 ConstCount = 0;
347 const std::pair<int, unsigned> DummyPair(-1, 0);
348 std::vector<std::pair<int, unsigned>> Result;
349 unsigned i = 0;
350 for (const auto &Src : getSrcs(MI)) {
351 ++i;
352 unsigned Reg = Src.first->getReg();
353 int Index = RI.getEncodingValue(Reg) & 0xff;
354 if (Reg == R600::OQAP) {
355 Result.push_back(std::make_pair(Index, 0U));
356 }
357 if (PV.find(Reg) != PV.end()) {
358 // 255 is used to tells its a PS/PV reg
359 Result.push_back(std::make_pair(255, 0U));
360 continue;
361 }
362 if (Index > 127) {
363 ConstCount++;
364 Result.push_back(DummyPair);
365 continue;
366 }
367 unsigned Chan = RI.getHWRegChan(Reg);
368 Result.push_back(std::make_pair(Index, Chan));
369 }
370 for (; i < 3; ++i)
371 Result.push_back(DummyPair);
372 return Result;
373}
374
375static std::vector<std::pair<int, unsigned>>
376Swizzle(std::vector<std::pair<int, unsigned>> Src,
377 R600InstrInfo::BankSwizzle Swz) {
378 if (Src[0] == Src[1])
379 Src[1].first = -1;
380 switch (Swz) {
381 case R600InstrInfo::ALU_VEC_012_SCL_210:
382 break;
383 case R600InstrInfo::ALU_VEC_021_SCL_122:
384 std::swap(Src[1], Src[2]);
385 break;
386 case R600InstrInfo::ALU_VEC_102_SCL_221:
387 std::swap(Src[0], Src[1]);
388 break;
389 case R600InstrInfo::ALU_VEC_120_SCL_212:
390 std::swap(Src[0], Src[1]);
391 std::swap(Src[0], Src[2]);
392 break;
393 case R600InstrInfo::ALU_VEC_201:
394 std::swap(Src[0], Src[2]);
395 std::swap(Src[0], Src[1]);
396 break;
397 case R600InstrInfo::ALU_VEC_210:
398 std::swap(Src[0], Src[2]);
399 break;
400 }
401 return Src;
402}
403
404static unsigned getTransSwizzle(R600InstrInfo::BankSwizzle Swz, unsigned Op) {
405 switch (Swz) {
406 case R600InstrInfo::ALU_VEC_012_SCL_210: {
407 unsigned Cycles[3] = { 2, 1, 0};
408 return Cycles[Op];
409 }
410 case R600InstrInfo::ALU_VEC_021_SCL_122: {
411 unsigned Cycles[3] = { 1, 2, 2};
412 return Cycles[Op];
413 }
414 case R600InstrInfo::ALU_VEC_120_SCL_212: {
415 unsigned Cycles[3] = { 2, 1, 2};
416 return Cycles[Op];
417 }
418 case R600InstrInfo::ALU_VEC_102_SCL_221: {
419 unsigned Cycles[3] = { 2, 2, 1};
420 return Cycles[Op];
421 }
422 default:
423 llvm_unreachable("Wrong Swizzle for Trans Slot")::llvm::llvm_unreachable_internal("Wrong Swizzle for Trans Slot"
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AMDGPU/R600InstrInfo.cpp"
, 423)
;
424 }
425}
426
427/// returns how many MIs (whose inputs are represented by IGSrcs) can be packed
428/// in the same Instruction Group while meeting read port limitations given a
429/// Swz swizzle sequence.
430unsigned R600InstrInfo::isLegalUpTo(
431 const std::vector<std::vector<std::pair<int, unsigned>>> &IGSrcs,
432 const std::vector<R600InstrInfo::BankSwizzle> &Swz,
433 const std::vector<std::pair<int, unsigned>> &TransSrcs,
434 R600InstrInfo::BankSwizzle TransSwz) const {
435 int Vector[4][3];
436 memset(Vector, -1, sizeof(Vector));
437 for (unsigned i = 0, e = IGSrcs.size(); i < e; i++) {
438 const std::vector<std::pair<int, unsigned>> &Srcs =
439 Swizzle(IGSrcs[i], Swz[i]);
440 for (unsigned j = 0; j < 3; j++) {
441 const std::pair<int, unsigned> &Src = Srcs[j];
442 if (Src.first < 0 || Src.first == 255)
443 continue;
444 if (Src.first == GET_REG_INDEX(RI.getEncodingValue(R600::OQAP))((RI.getEncodingValue(R600::OQAP)) & 0x1ff)) {
445 if (Swz[i] != R600InstrInfo::ALU_VEC_012_SCL_210 &&
446 Swz[i] != R600InstrInfo::ALU_VEC_021_SCL_122) {
447 // The value from output queue A (denoted by register OQAP) can
448 // only be fetched during the first cycle.
449 return false;
450 }
451 // OQAP does not count towards the normal read port restrictions
452 continue;
453 }
454 if (Vector[Src.second][j] < 0)
455 Vector[Src.second][j] = Src.first;
456 if (Vector[Src.second][j] != Src.first)
457 return i;
458 }
459 }
460 // Now check Trans Alu
461 for (unsigned i = 0, e = TransSrcs.size(); i < e; ++i) {
462 const std::pair<int, unsigned> &Src = TransSrcs[i];
463 unsigned Cycle = getTransSwizzle(TransSwz, i);
464 if (Src.first < 0)
465 continue;
466 if (Src.first == 255)
467 continue;
468 if (Vector[Src.second][Cycle] < 0)
469 Vector[Src.second][Cycle] = Src.first;
470 if (Vector[Src.second][Cycle] != Src.first)
471 return IGSrcs.size() - 1;
472 }
473 return IGSrcs.size();
474}
475
476/// Given a swizzle sequence SwzCandidate and an index Idx, returns the next
477/// (in lexicographic term) swizzle sequence assuming that all swizzles after
478/// Idx can be skipped
479static bool
480NextPossibleSolution(
481 std::vector<R600InstrInfo::BankSwizzle> &SwzCandidate,
482 unsigned Idx) {
483 assert(Idx < SwzCandidate.size())(static_cast <bool> (Idx < SwzCandidate.size()) ? void
(0) : __assert_fail ("Idx < SwzCandidate.size()", "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AMDGPU/R600InstrInfo.cpp"
, 483, __extension__ __PRETTY_FUNCTION__))
;
484 int ResetIdx = Idx;
485 while (ResetIdx > -1 && SwzCandidate[ResetIdx] == R600InstrInfo::ALU_VEC_210)
486 ResetIdx --;
487 for (unsigned i = ResetIdx + 1, e = SwzCandidate.size(); i < e; i++) {
488 SwzCandidate[i] = R600InstrInfo::ALU_VEC_012_SCL_210;
489 }
490 if (ResetIdx == -1)
491 return false;
492 int NextSwizzle = SwzCandidate[ResetIdx] + 1;
493 SwzCandidate[ResetIdx] = (R600InstrInfo::BankSwizzle)NextSwizzle;
494 return true;
495}
496
497/// Enumerate all possible Swizzle sequence to find one that can meet all
498/// read port requirements.
499bool R600InstrInfo::FindSwizzleForVectorSlot(
500 const std::vector<std::vector<std::pair<int, unsigned>>> &IGSrcs,
501 std::vector<R600InstrInfo::BankSwizzle> &SwzCandidate,
502 const std::vector<std::pair<int, unsigned>> &TransSrcs,
503 R600InstrInfo::BankSwizzle TransSwz) const {
504 unsigned ValidUpTo = 0;
505 do {
506 ValidUpTo = isLegalUpTo(IGSrcs, SwzCandidate, TransSrcs, TransSwz);
507 if (ValidUpTo == IGSrcs.size())
508 return true;
509 } while (NextPossibleSolution(SwzCandidate, ValidUpTo));
510 return false;
511}
512
513/// Instructions in Trans slot can't read gpr at cycle 0 if they also read
514/// a const, and can't read a gpr at cycle 1 if they read 2 const.
515static bool
516isConstCompatible(R600InstrInfo::BankSwizzle TransSwz,
517 const std::vector<std::pair<int, unsigned>> &TransOps,
518 unsigned ConstCount) {
519 // TransALU can't read 3 constants
520 if (ConstCount > 2)
521 return false;
522 for (unsigned i = 0, e = TransOps.size(); i < e; ++i) {
523 const std::pair<int, unsigned> &Src = TransOps[i];
524 unsigned Cycle = getTransSwizzle(TransSwz, i);
525 if (Src.first < 0)
526 continue;
527 if (ConstCount > 0 && Cycle == 0)
528 return false;
529 if (ConstCount > 1 && Cycle == 1)
530 return false;
531 }
532 return true;
533}
534
535bool
536R600InstrInfo::fitsReadPortLimitations(const std::vector<MachineInstr *> &IG,
537 const DenseMap<unsigned, unsigned> &PV,
538 std::vector<BankSwizzle> &ValidSwizzle,
539 bool isLastAluTrans)
540 const {
541 //Todo : support shared src0 - src1 operand
542
543 std::vector<std::vector<std::pair<int, unsigned>>> IGSrcs;
544 ValidSwizzle.clear();
545 unsigned ConstCount;
1
'ConstCount' declared without an initial value
546 BankSwizzle TransBS = ALU_VEC_012_SCL_210;
547 for (unsigned i = 0, e = IG.size(); i < e; ++i) {
2
Assuming 'i' is >= 'e'
3
Loop condition is false. Execution continues on line 554
548 IGSrcs.push_back(ExtractSrcs(*IG[i], PV, ConstCount));
549 unsigned Op = getOperandIdx(IG[i]->getOpcode(),
550 R600::OpName::bank_swizzle);
551 ValidSwizzle.push_back( (R600InstrInfo::BankSwizzle)
552 IG[i]->getOperand(Op).getImm());
553 }
554 std::vector<std::pair<int, unsigned>> TransOps;
555 if (!isLastAluTrans)
4
Assuming 'isLastAluTrans' is not equal to 0
5
Taking false branch
556 return FindSwizzleForVectorSlot(IGSrcs, ValidSwizzle, TransOps, TransBS);
557
558 TransOps = std::move(IGSrcs.back());
559 IGSrcs.pop_back();
560 ValidSwizzle.pop_back();
561
562 static const R600InstrInfo::BankSwizzle TransSwz[] = {
563 ALU_VEC_012_SCL_210,
564 ALU_VEC_021_SCL_122,
565 ALU_VEC_120_SCL_212,
566 ALU_VEC_102_SCL_221
567 };
568 for (unsigned i = 0; i < 4; i++) {
6
Loop condition is true. Entering loop body
569 TransBS = TransSwz[i];
570 if (!isConstCompatible(TransBS, TransOps, ConstCount))
7
3rd function call argument is an uninitialized value
571 continue;
572 bool Result = FindSwizzleForVectorSlot(IGSrcs, ValidSwizzle, TransOps,
573 TransBS);
574 if (Result) {
575 ValidSwizzle.push_back(TransBS);
576 return true;
577 }
578 }
579
580 return false;
581}
582
583bool
584R600InstrInfo::fitsConstReadLimitations(const std::vector<unsigned> &Consts)
585 const {
586 assert (Consts.size() <= 12 && "Too many operands in instructions group")(static_cast <bool> (Consts.size() <= 12 && "Too many operands in instructions group"
) ? void (0) : __assert_fail ("Consts.size() <= 12 && \"Too many operands in instructions group\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AMDGPU/R600InstrInfo.cpp"
, 586, __extension__ __PRETTY_FUNCTION__))
;
587 unsigned Pair1 = 0, Pair2 = 0;
588 for (unsigned i = 0, n = Consts.size(); i < n; ++i) {
589 unsigned ReadConstHalf = Consts[i] & 2;
590 unsigned ReadConstIndex = Consts[i] & (~3);
591 unsigned ReadHalfConst = ReadConstIndex | ReadConstHalf;
592 if (!Pair1) {
593 Pair1 = ReadHalfConst;
594 continue;
595 }
596 if (Pair1 == ReadHalfConst)
597 continue;
598 if (!Pair2) {
599 Pair2 = ReadHalfConst;
600 continue;
601 }
602 if (Pair2 != ReadHalfConst)
603 return false;
604 }
605 return true;
606}
607
608bool
609R600InstrInfo::fitsConstReadLimitations(const std::vector<MachineInstr *> &MIs)
610 const {
611 std::vector<unsigned> Consts;
612 SmallSet<int64_t, 4> Literals;
613 for (unsigned i = 0, n = MIs.size(); i < n; i++) {
614 MachineInstr &MI = *MIs[i];
615 if (!isALUInstr(MI.getOpcode()))
616 continue;
617
618 for (const auto &Src : getSrcs(MI)) {
619 if (Src.first->getReg() == R600::ALU_LITERAL_X)
620 Literals.insert(Src.second);
621 if (Literals.size() > 4)
622 return false;
623 if (Src.first->getReg() == R600::ALU_CONST)
624 Consts.push_back(Src.second);
625 if (R600::R600_KC0RegClass.contains(Src.first->getReg()) ||
626 R600::R600_KC1RegClass.contains(Src.first->getReg())) {
627 unsigned Index = RI.getEncodingValue(Src.first->getReg()) & 0xff;
628 unsigned Chan = RI.getHWRegChan(Src.first->getReg());
629 Consts.push_back((Index << 2) | Chan);
630 }
631 }
632 }
633 return fitsConstReadLimitations(Consts);
634}
635
636DFAPacketizer *
637R600InstrInfo::CreateTargetScheduleState(const TargetSubtargetInfo &STI) const {
638 const InstrItineraryData *II = STI.getInstrItineraryData();
639 return static_cast<const R600Subtarget &>(STI).createDFAPacketizer(II);
640}
641
642static bool
643isPredicateSetter(unsigned Opcode) {
644 switch (Opcode) {
645 case R600::PRED_X:
646 return true;
647 default:
648 return false;
649 }
650}
651
652static MachineInstr *
653findFirstPredicateSetterFrom(MachineBasicBlock &MBB,
654 MachineBasicBlock::iterator I) {
655 while (I != MBB.begin()) {
656 --I;
657 MachineInstr &MI = *I;
658 if (isPredicateSetter(MI.getOpcode()))
659 return &MI;
660 }
661
662 return nullptr;
663}
664
665static
666bool isJump(unsigned Opcode) {
667 return Opcode == R600::JUMP || Opcode == R600::JUMP_COND;
668}
669
670static bool isBranch(unsigned Opcode) {
671 return Opcode == R600::BRANCH || Opcode == R600::BRANCH_COND_i32 ||
672 Opcode == R600::BRANCH_COND_f32;
673}
674
675bool R600InstrInfo::analyzeBranch(MachineBasicBlock &MBB,
676 MachineBasicBlock *&TBB,
677 MachineBasicBlock *&FBB,
678 SmallVectorImpl<MachineOperand> &Cond,
679 bool AllowModify) const {
680 // Most of the following comes from the ARM implementation of AnalyzeBranch
681
682 // If the block has no terminators, it just falls into the block after it.
683 MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr();
684 if (I == MBB.end())
685 return false;
686
687 // R600::BRANCH* instructions are only available after isel and are not
688 // handled
689 if (isBranch(I->getOpcode()))
690 return true;
691 if (!isJump(I->getOpcode())) {
692 return false;
693 }
694
695 // Remove successive JUMP
696 while (I != MBB.begin() && std::prev(I)->getOpcode() == R600::JUMP) {
697 MachineBasicBlock::iterator PriorI = std::prev(I);
698 if (AllowModify)
699 I->removeFromParent();
700 I = PriorI;
701 }
702 MachineInstr &LastInst = *I;
703
704 // If there is only one terminator instruction, process it.
705 unsigned LastOpc = LastInst.getOpcode();
706 if (I == MBB.begin() || !isJump((--I)->getOpcode())) {
707 if (LastOpc == R600::JUMP) {
708 TBB = LastInst.getOperand(0).getMBB();
709 return false;
710 } else if (LastOpc == R600::JUMP_COND) {
711 auto predSet = I;
712 while (!isPredicateSetter(predSet->getOpcode())) {
713 predSet = --I;
714 }
715 TBB = LastInst.getOperand(0).getMBB();
716 Cond.push_back(predSet->getOperand(1));
717 Cond.push_back(predSet->getOperand(2));
718 Cond.push_back(MachineOperand::CreateReg(R600::PRED_SEL_ONE, false));
719 return false;
720 }
721 return true; // Can't handle indirect branch.
722 }
723
724 // Get the instruction before it if it is a terminator.
725 MachineInstr &SecondLastInst = *I;
726 unsigned SecondLastOpc = SecondLastInst.getOpcode();
727
728 // If the block ends with a B and a Bcc, handle it.
729 if (SecondLastOpc == R600::JUMP_COND && LastOpc == R600::JUMP) {
730 auto predSet = --I;
731 while (!isPredicateSetter(predSet->getOpcode())) {
732 predSet = --I;
733 }
734 TBB = SecondLastInst.getOperand(0).getMBB();
735 FBB = LastInst.getOperand(0).getMBB();
736 Cond.push_back(predSet->getOperand(1));
737 Cond.push_back(predSet->getOperand(2));
738 Cond.push_back(MachineOperand::CreateReg(R600::PRED_SEL_ONE, false));
739 return false;
740 }
741
742 // Otherwise, can't handle this.
743 return true;
744}
745
746static
747MachineBasicBlock::iterator FindLastAluClause(MachineBasicBlock &MBB) {
748 for (MachineBasicBlock::reverse_iterator It = MBB.rbegin(), E = MBB.rend();
749 It != E; ++It) {
750 if (It->getOpcode() == R600::CF_ALU ||
751 It->getOpcode() == R600::CF_ALU_PUSH_BEFORE)
752 return It.getReverse();
753 }
754 return MBB.end();
755}
756
757unsigned R600InstrInfo::insertBranch(MachineBasicBlock &MBB,
758 MachineBasicBlock *TBB,
759 MachineBasicBlock *FBB,
760 ArrayRef<MachineOperand> Cond,
761 const DebugLoc &DL,
762 int *BytesAdded) const {
763 assert(TBB && "insertBranch must not be told to insert a fallthrough")(static_cast <bool> (TBB && "insertBranch must not be told to insert a fallthrough"
) ? void (0) : __assert_fail ("TBB && \"insertBranch must not be told to insert a fallthrough\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AMDGPU/R600InstrInfo.cpp"
, 763, __extension__ __PRETTY_FUNCTION__))
;
764 assert(!BytesAdded && "code size not handled")(static_cast <bool> (!BytesAdded && "code size not handled"
) ? void (0) : __assert_fail ("!BytesAdded && \"code size not handled\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AMDGPU/R600InstrInfo.cpp"
, 764, __extension__ __PRETTY_FUNCTION__))
;
765
766 if (!FBB) {
767 if (Cond.empty()) {
768 BuildMI(&MBB, DL, get(R600::JUMP)).addMBB(TBB);
769 return 1;
770 } else {
771 MachineInstr *PredSet = findFirstPredicateSetterFrom(MBB, MBB.end());
772 assert(PredSet && "No previous predicate !")(static_cast <bool> (PredSet && "No previous predicate !"
) ? void (0) : __assert_fail ("PredSet && \"No previous predicate !\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AMDGPU/R600InstrInfo.cpp"
, 772, __extension__ __PRETTY_FUNCTION__))
;
773 addFlag(*PredSet, 0, MO_FLAG_PUSH(1 << 4));
774 PredSet->getOperand(2).setImm(Cond[1].getImm());
775
776 BuildMI(&MBB, DL, get(R600::JUMP_COND))
777 .addMBB(TBB)
778 .addReg(R600::PREDICATE_BIT, RegState::Kill);
779 MachineBasicBlock::iterator CfAlu = FindLastAluClause(MBB);
780 if (CfAlu == MBB.end())
781 return 1;
782 assert (CfAlu->getOpcode() == R600::CF_ALU)(static_cast <bool> (CfAlu->getOpcode() == R600::CF_ALU
) ? void (0) : __assert_fail ("CfAlu->getOpcode() == R600::CF_ALU"
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AMDGPU/R600InstrInfo.cpp"
, 782, __extension__ __PRETTY_FUNCTION__))
;
783 CfAlu->setDesc(get(R600::CF_ALU_PUSH_BEFORE));
784 return 1;
785 }
786 } else {
787 MachineInstr *PredSet = findFirstPredicateSetterFrom(MBB, MBB.end());
788 assert(PredSet && "No previous predicate !")(static_cast <bool> (PredSet && "No previous predicate !"
) ? void (0) : __assert_fail ("PredSet && \"No previous predicate !\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AMDGPU/R600InstrInfo.cpp"
, 788, __extension__ __PRETTY_FUNCTION__))
;
789 addFlag(*PredSet, 0, MO_FLAG_PUSH(1 << 4));
790 PredSet->getOperand(2).setImm(Cond[1].getImm());
791 BuildMI(&MBB, DL, get(R600::JUMP_COND))
792 .addMBB(TBB)
793 .addReg(R600::PREDICATE_BIT, RegState::Kill);
794 BuildMI(&MBB, DL, get(R600::JUMP)).addMBB(FBB);
795 MachineBasicBlock::iterator CfAlu = FindLastAluClause(MBB);
796 if (CfAlu == MBB.end())
797 return 2;
798 assert (CfAlu->getOpcode() == R600::CF_ALU)(static_cast <bool> (CfAlu->getOpcode() == R600::CF_ALU
) ? void (0) : __assert_fail ("CfAlu->getOpcode() == R600::CF_ALU"
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AMDGPU/R600InstrInfo.cpp"
, 798, __extension__ __PRETTY_FUNCTION__))
;
799 CfAlu->setDesc(get(R600::CF_ALU_PUSH_BEFORE));
800 return 2;
801 }
802}
803
804unsigned R600InstrInfo::removeBranch(MachineBasicBlock &MBB,
805 int *BytesRemoved) const {
806 assert(!BytesRemoved && "code size not handled")(static_cast <bool> (!BytesRemoved && "code size not handled"
) ? void (0) : __assert_fail ("!BytesRemoved && \"code size not handled\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AMDGPU/R600InstrInfo.cpp"
, 806, __extension__ __PRETTY_FUNCTION__))
;
807
808 // Note : we leave PRED* instructions there.
809 // They may be needed when predicating instructions.
810
811 MachineBasicBlock::iterator I = MBB.end();
812
813 if (I == MBB.begin()) {
814 return 0;
815 }
816 --I;
817 switch (I->getOpcode()) {
818 default:
819 return 0;
820 case R600::JUMP_COND: {
821 MachineInstr *predSet = findFirstPredicateSetterFrom(MBB, I);
822 clearFlag(*predSet, 0, MO_FLAG_PUSH(1 << 4));
823 I->eraseFromParent();
824 MachineBasicBlock::iterator CfAlu = FindLastAluClause(MBB);
825 if (CfAlu == MBB.end())
826 break;
827 assert (CfAlu->getOpcode() == R600::CF_ALU_PUSH_BEFORE)(static_cast <bool> (CfAlu->getOpcode() == R600::CF_ALU_PUSH_BEFORE
) ? void (0) : __assert_fail ("CfAlu->getOpcode() == R600::CF_ALU_PUSH_BEFORE"
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AMDGPU/R600InstrInfo.cpp"
, 827, __extension__ __PRETTY_FUNCTION__))
;
828 CfAlu->setDesc(get(R600::CF_ALU));
829 break;
830 }
831 case R600::JUMP:
832 I->eraseFromParent();
833 break;
834 }
835 I = MBB.end();
836
837 if (I == MBB.begin()) {
838 return 1;
839 }
840 --I;
841 switch (I->getOpcode()) {
842 // FIXME: only one case??
843 default:
844 return 1;
845 case R600::JUMP_COND: {
846 MachineInstr *predSet = findFirstPredicateSetterFrom(MBB, I);
847 clearFlag(*predSet, 0, MO_FLAG_PUSH(1 << 4));
848 I->eraseFromParent();
849 MachineBasicBlock::iterator CfAlu = FindLastAluClause(MBB);
850 if (CfAlu == MBB.end())
851 break;
852 assert (CfAlu->getOpcode() == R600::CF_ALU_PUSH_BEFORE)(static_cast <bool> (CfAlu->getOpcode() == R600::CF_ALU_PUSH_BEFORE
) ? void (0) : __assert_fail ("CfAlu->getOpcode() == R600::CF_ALU_PUSH_BEFORE"
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AMDGPU/R600InstrInfo.cpp"
, 852, __extension__ __PRETTY_FUNCTION__))
;
853 CfAlu->setDesc(get(R600::CF_ALU));
854 break;
855 }
856 case R600::JUMP:
857 I->eraseFromParent();
858 break;
859 }
860 return 2;
861}
862
863bool R600InstrInfo::isPredicated(const MachineInstr &MI) const {
864 int idx = MI.findFirstPredOperandIdx();
865 if (idx < 0)
866 return false;
867
868 unsigned Reg = MI.getOperand(idx).getReg();
869 switch (Reg) {
870 default: return false;
871 case R600::PRED_SEL_ONE:
872 case R600::PRED_SEL_ZERO:
873 case R600::PREDICATE_BIT:
874 return true;
875 }
876}
877
878bool R600InstrInfo::isPredicable(const MachineInstr &MI) const {
879 // XXX: KILL* instructions can be predicated, but they must be the last
880 // instruction in a clause, so this means any instructions after them cannot
881 // be predicated. Until we have proper support for instruction clauses in the
882 // backend, we will mark KILL* instructions as unpredicable.
883
884 if (MI.getOpcode() == R600::KILLGT) {
885 return false;
886 } else if (MI.getOpcode() == R600::CF_ALU) {
887 // If the clause start in the middle of MBB then the MBB has more
888 // than a single clause, unable to predicate several clauses.
889 if (MI.getParent()->begin() != MachineBasicBlock::const_iterator(MI))
890 return false;
891 // TODO: We don't support KC merging atm
892 return MI.getOperand(3).getImm() == 0 && MI.getOperand(4).getImm() == 0;
893 } else if (isVector(MI)) {
894 return false;
895 } else {
896 return TargetInstrInfo::isPredicable(MI);
897 }
898}
899
900bool
901R600InstrInfo::isProfitableToIfCvt(MachineBasicBlock &MBB,
902 unsigned NumCycles,
903 unsigned ExtraPredCycles,
904 BranchProbability Probability) const{
905 return true;
906}
907
908bool
909R600InstrInfo::isProfitableToIfCvt(MachineBasicBlock &TMBB,
910 unsigned NumTCycles,
911 unsigned ExtraTCycles,
912 MachineBasicBlock &FMBB,
913 unsigned NumFCycles,
914 unsigned ExtraFCycles,
915 BranchProbability Probability) const {
916 return true;
917}
918
919bool
920R600InstrInfo::isProfitableToDupForIfCvt(MachineBasicBlock &MBB,
921 unsigned NumCycles,
922 BranchProbability Probability)
923 const {
924 return true;
925}
926
927bool
928R600InstrInfo::isProfitableToUnpredicate(MachineBasicBlock &TMBB,
929 MachineBasicBlock &FMBB) const {
930 return false;
931}
932
933bool
934R600InstrInfo::reverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
935 MachineOperand &MO = Cond[1];
936 switch (MO.getImm()) {
937 case R600::PRED_SETE_INT:
938 MO.setImm(R600::PRED_SETNE_INT);
939 break;
940 case R600::PRED_SETNE_INT:
941 MO.setImm(R600::PRED_SETE_INT);
942 break;
943 case R600::PRED_SETE:
944 MO.setImm(R600::PRED_SETNE);
945 break;
946 case R600::PRED_SETNE:
947 MO.setImm(R600::PRED_SETE);
948 break;
949 default:
950 return true;
951 }
952
953 MachineOperand &MO2 = Cond[2];
954 switch (MO2.getReg()) {
955 case R600::PRED_SEL_ZERO:
956 MO2.setReg(R600::PRED_SEL_ONE);
957 break;
958 case R600::PRED_SEL_ONE:
959 MO2.setReg(R600::PRED_SEL_ZERO);
960 break;
961 default:
962 return true;
963 }
964 return false;
965}
966
967bool R600InstrInfo::DefinesPredicate(MachineInstr &MI,
968 std::vector<MachineOperand> &Pred) const {
969 return isPredicateSetter(MI.getOpcode());
970}
971
972bool R600InstrInfo::PredicateInstruction(MachineInstr &MI,
973 ArrayRef<MachineOperand> Pred) const {
974 int PIdx = MI.findFirstPredOperandIdx();
975
976 if (MI.getOpcode() == R600::CF_ALU) {
977 MI.getOperand(8).setImm(0);
978 return true;
979 }
980
981 if (MI.getOpcode() == R600::DOT_4) {
982 MI.getOperand(getOperandIdx(MI, R600::OpName::pred_sel_X))
983 .setReg(Pred[2].getReg());
984 MI.getOperand(getOperandIdx(MI, R600::OpName::pred_sel_Y))
985 .setReg(Pred[2].getReg());
986 MI.getOperand(getOperandIdx(MI, R600::OpName::pred_sel_Z))
987 .setReg(Pred[2].getReg());
988 MI.getOperand(getOperandIdx(MI, R600::OpName::pred_sel_W))
989 .setReg(Pred[2].getReg());
990 MachineInstrBuilder MIB(*MI.getParent()->getParent(), MI);
991 MIB.addReg(R600::PREDICATE_BIT, RegState::Implicit);
992 return true;
993 }
994
995 if (PIdx != -1) {
996 MachineOperand &PMO = MI.getOperand(PIdx);
997 PMO.setReg(Pred[2].getReg());
998 MachineInstrBuilder MIB(*MI.getParent()->getParent(), MI);
999 MIB.addReg(R600::PREDICATE_BIT, RegState::Implicit);
1000 return true;
1001 }
1002
1003 return false;
1004}
1005
1006unsigned int R600InstrInfo::getPredicationCost(const MachineInstr &) const {
1007 return 2;
1008}
1009
1010unsigned int R600InstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
1011 const MachineInstr &,
1012 unsigned *PredCost) const {
1013 if (PredCost)
1014 *PredCost = 2;
1015 return 2;
1016}
1017
1018unsigned R600InstrInfo::calculateIndirectAddress(unsigned RegIndex,
1019 unsigned Channel) const {
1020 assert(Channel == 0)(static_cast <bool> (Channel == 0) ? void (0) : __assert_fail
("Channel == 0", "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AMDGPU/R600InstrInfo.cpp"
, 1020, __extension__ __PRETTY_FUNCTION__))
;
1021 return RegIndex;
1022}
1023
1024bool R600InstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
1025 switch (MI.getOpcode()) {
1026 default: {
1027 MachineBasicBlock *MBB = MI.getParent();
1028 int OffsetOpIdx =
1029 R600::getNamedOperandIdx(MI.getOpcode(), R600::OpName::addr);
1030 // addr is a custom operand with multiple MI operands, and only the
1031 // first MI operand is given a name.
1032 int RegOpIdx = OffsetOpIdx + 1;
1033 int ChanOpIdx =
1034 R600::getNamedOperandIdx(MI.getOpcode(), R600::OpName::chan);
1035 if (isRegisterLoad(MI)) {
1036 int DstOpIdx =
1037 R600::getNamedOperandIdx(MI.getOpcode(), R600::OpName::dst);
1038 unsigned RegIndex = MI.getOperand(RegOpIdx).getImm();
1039 unsigned Channel = MI.getOperand(ChanOpIdx).getImm();
1040 unsigned Address = calculateIndirectAddress(RegIndex, Channel);
1041 unsigned OffsetReg = MI.getOperand(OffsetOpIdx).getReg();
1042 if (OffsetReg == R600::INDIRECT_BASE_ADDR) {
1043 buildMovInstr(MBB, MI, MI.getOperand(DstOpIdx).getReg(),
1044 getIndirectAddrRegClass()->getRegister(Address));
1045 } else {
1046 buildIndirectRead(MBB, MI, MI.getOperand(DstOpIdx).getReg(), Address,
1047 OffsetReg);
1048 }
1049 } else if (isRegisterStore(MI)) {
1050 int ValOpIdx =
1051 R600::getNamedOperandIdx(MI.getOpcode(), R600::OpName::val);
1052 unsigned RegIndex = MI.getOperand(RegOpIdx).getImm();
1053 unsigned Channel = MI.getOperand(ChanOpIdx).getImm();
1054 unsigned Address = calculateIndirectAddress(RegIndex, Channel);
1055 unsigned OffsetReg = MI.getOperand(OffsetOpIdx).getReg();
1056 if (OffsetReg == R600::INDIRECT_BASE_ADDR) {
1057 buildMovInstr(MBB, MI, getIndirectAddrRegClass()->getRegister(Address),
1058 MI.getOperand(ValOpIdx).getReg());
1059 } else {
1060 buildIndirectWrite(MBB, MI, MI.getOperand(ValOpIdx).getReg(),
1061 calculateIndirectAddress(RegIndex, Channel),
1062 OffsetReg);
1063 }
1064 } else {
1065 return false;
1066 }
1067
1068 MBB->erase(MI);
1069 return true;
1070 }
1071 case R600::R600_EXTRACT_ELT_V2:
1072 case R600::R600_EXTRACT_ELT_V4:
1073 buildIndirectRead(MI.getParent(), MI, MI.getOperand(0).getReg(),
1074 RI.getHWRegIndex(MI.getOperand(1).getReg()), // Address
1075 MI.getOperand(2).getReg(),
1076 RI.getHWRegChan(MI.getOperand(1).getReg()));
1077 break;
1078 case R600::R600_INSERT_ELT_V2:
1079 case R600::R600_INSERT_ELT_V4:
1080 buildIndirectWrite(MI.getParent(), MI, MI.getOperand(2).getReg(), // Value
1081 RI.getHWRegIndex(MI.getOperand(1).getReg()), // Address
1082 MI.getOperand(3).getReg(), // Offset
1083 RI.getHWRegChan(MI.getOperand(1).getReg())); // Channel
1084 break;
1085 }
1086 MI.eraseFromParent();
1087 return true;
1088}
1089
1090void R600InstrInfo::reserveIndirectRegisters(BitVector &Reserved,
1091 const MachineFunction &MF,
1092 const R600RegisterInfo &TRI) const {
1093 const R600Subtarget &ST = MF.getSubtarget<R600Subtarget>();
1094 const R600FrameLowering *TFL = ST.getFrameLowering();
1095
1096 unsigned StackWidth = TFL->getStackWidth(MF);
1097 int End = getIndirectIndexEnd(MF);
1098
1099 if (End == -1)
1100 return;
1101
1102 for (int Index = getIndirectIndexBegin(MF); Index <= End; ++Index) {
1103 for (unsigned Chan = 0; Chan < StackWidth; ++Chan) {
1104 unsigned Reg = R600::R600_TReg32RegClass.getRegister((4 * Index) + Chan);
1105 TRI.reserveRegisterTuples(Reserved, Reg);
1106 }
1107 }
1108}
1109
1110const TargetRegisterClass *R600InstrInfo::getIndirectAddrRegClass() const {
1111 return &R600::R600_TReg32_XRegClass;
1112}
1113
1114MachineInstrBuilder R600InstrInfo::buildIndirectWrite(MachineBasicBlock *MBB,
1115 MachineBasicBlock::iterator I,
1116 unsigned ValueReg, unsigned Address,
1117 unsigned OffsetReg) const {
1118 return buildIndirectWrite(MBB, I, ValueReg, Address, OffsetReg, 0);
1119}
1120
1121MachineInstrBuilder R600InstrInfo::buildIndirectWrite(MachineBasicBlock *MBB,
1122 MachineBasicBlock::iterator I,
1123 unsigned ValueReg, unsigned Address,
1124 unsigned OffsetReg,
1125 unsigned AddrChan) const {
1126 unsigned AddrReg;
1127 switch (AddrChan) {
1128 default: llvm_unreachable("Invalid Channel")::llvm::llvm_unreachable_internal("Invalid Channel", "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AMDGPU/R600InstrInfo.cpp"
, 1128)
;
1129 case 0: AddrReg = R600::R600_AddrRegClass.getRegister(Address); break;
1130 case 1: AddrReg = R600::R600_Addr_YRegClass.getRegister(Address); break;
1131 case 2: AddrReg = R600::R600_Addr_ZRegClass.getRegister(Address); break;
1132 case 3: AddrReg = R600::R600_Addr_WRegClass.getRegister(Address); break;
1133 }
1134 MachineInstr *MOVA = buildDefaultInstruction(*MBB, I, R600::MOVA_INT_eg,
1135 R600::AR_X, OffsetReg);
1136 setImmOperand(*MOVA, R600::OpName::write, 0);
1137
1138 MachineInstrBuilder Mov = buildDefaultInstruction(*MBB, I, R600::MOV,
1139 AddrReg, ValueReg)
1140 .addReg(R600::AR_X,
1141 RegState::Implicit | RegState::Kill);
1142 setImmOperand(*Mov, R600::OpName::dst_rel, 1);
1143 return Mov;
1144}
1145
1146MachineInstrBuilder R600InstrInfo::buildIndirectRead(MachineBasicBlock *MBB,
1147 MachineBasicBlock::iterator I,
1148 unsigned ValueReg, unsigned Address,
1149 unsigned OffsetReg) const {
1150 return buildIndirectRead(MBB, I, ValueReg, Address, OffsetReg, 0);
1151}
1152
1153MachineInstrBuilder R600InstrInfo::buildIndirectRead(MachineBasicBlock *MBB,
1154 MachineBasicBlock::iterator I,
1155 unsigned ValueReg, unsigned Address,
1156 unsigned OffsetReg,
1157 unsigned AddrChan) const {
1158 unsigned AddrReg;
1159 switch (AddrChan) {
1160 default: llvm_unreachable("Invalid Channel")::llvm::llvm_unreachable_internal("Invalid Channel", "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AMDGPU/R600InstrInfo.cpp"
, 1160)
;
1161 case 0: AddrReg = R600::R600_AddrRegClass.getRegister(Address); break;
1162 case 1: AddrReg = R600::R600_Addr_YRegClass.getRegister(Address); break;
1163 case 2: AddrReg = R600::R600_Addr_ZRegClass.getRegister(Address); break;
1164 case 3: AddrReg = R600::R600_Addr_WRegClass.getRegister(Address); break;
1165 }
1166 MachineInstr *MOVA = buildDefaultInstruction(*MBB, I, R600::MOVA_INT_eg,
1167 R600::AR_X,
1168 OffsetReg);
1169 setImmOperand(*MOVA, R600::OpName::write, 0);
1170 MachineInstrBuilder Mov = buildDefaultInstruction(*MBB, I, R600::MOV,
1171 ValueReg,
1172 AddrReg)
1173 .addReg(R600::AR_X,
1174 RegState::Implicit | RegState::Kill);
1175 setImmOperand(*Mov, R600::OpName::src0_rel, 1);
1176
1177 return Mov;
1178}
1179
1180int R600InstrInfo::getIndirectIndexBegin(const MachineFunction &MF) const {
1181 const MachineRegisterInfo &MRI = MF.getRegInfo();
1182 const MachineFrameInfo &MFI = MF.getFrameInfo();
1183 int Offset = -1;
1184
1185 if (MFI.getNumObjects() == 0) {
1186 return -1;
1187 }
1188
1189 if (MRI.livein_empty()) {
1190 return 0;
1191 }
1192
1193 const TargetRegisterClass *IndirectRC = getIndirectAddrRegClass();
1194 for (std::pair<unsigned, unsigned> LI : MRI.liveins()) {
1195 unsigned Reg = LI.first;
1196 if (TargetRegisterInfo::isVirtualRegister(Reg) ||
1197 !IndirectRC->contains(Reg))
1198 continue;
1199
1200 unsigned RegIndex;
1201 unsigned RegEnd;
1202 for (RegIndex = 0, RegEnd = IndirectRC->getNumRegs(); RegIndex != RegEnd;
1203 ++RegIndex) {
1204 if (IndirectRC->getRegister(RegIndex) == Reg)
1205 break;
1206 }
1207 Offset = std::max(Offset, (int)RegIndex);
1208 }
1209
1210 return Offset + 1;
1211}
1212
1213int R600InstrInfo::getIndirectIndexEnd(const MachineFunction &MF) const {
1214 int Offset = 0;
1215 const MachineFrameInfo &MFI = MF.getFrameInfo();
1216
1217 // Variable sized objects are not supported
1218 if (MFI.hasVarSizedObjects()) {
1219 return -1;
1220 }
1221
1222 if (MFI.getNumObjects() == 0) {
1223 return -1;
1224 }
1225
1226 const R600Subtarget &ST = MF.getSubtarget<R600Subtarget>();
1227 const R600FrameLowering *TFL = ST.getFrameLowering();
1228
1229 unsigned IgnoredFrameReg;
1230 Offset = TFL->getFrameIndexReference(MF, -1, IgnoredFrameReg);
1231
1232 return getIndirectIndexBegin(MF) + Offset;
1233}
1234
1235unsigned R600InstrInfo::getMaxAlusPerClause() const {
1236 return 115;
1237}
1238
1239MachineInstrBuilder R600InstrInfo::buildDefaultInstruction(MachineBasicBlock &MBB,
1240 MachineBasicBlock::iterator I,
1241 unsigned Opcode,
1242 unsigned DstReg,
1243 unsigned Src0Reg,
1244 unsigned Src1Reg) const {
1245 MachineInstrBuilder MIB = BuildMI(MBB, I, MBB.findDebugLoc(I), get(Opcode),
1246 DstReg); // $dst
1247
1248 if (Src1Reg) {
1249 MIB.addImm(0) // $update_exec_mask
1250 .addImm(0); // $update_predicate
1251 }
1252 MIB.addImm(1) // $write
1253 .addImm(0) // $omod
1254 .addImm(0) // $dst_rel
1255 .addImm(0) // $dst_clamp
1256 .addReg(Src0Reg) // $src0
1257 .addImm(0) // $src0_neg
1258 .addImm(0) // $src0_rel
1259 .addImm(0) // $src0_abs
1260 .addImm(-1); // $src0_sel
1261
1262 if (Src1Reg) {
1263 MIB.addReg(Src1Reg) // $src1
1264 .addImm(0) // $src1_neg
1265 .addImm(0) // $src1_rel
1266 .addImm(0) // $src1_abs
1267 .addImm(-1); // $src1_sel
1268 }
1269
1270 //XXX: The r600g finalizer expects this to be 1, once we've moved the
1271 //scheduling to the backend, we can change the default to 0.
1272 MIB.addImm(1) // $last
1273 .addReg(R600::PRED_SEL_OFF) // $pred_sel
1274 .addImm(0) // $literal
1275 .addImm(0); // $bank_swizzle
1276
1277 return MIB;
1278}
1279
1280#define OPERAND_CASE(Label) \
1281 case Label: { \
1282 static const unsigned Ops[] = \
1283 { \
1284 Label##_X, \
1285 Label##_Y, \
1286 Label##_Z, \
1287 Label##_W \
1288 }; \
1289 return Ops[Slot]; \
1290 }
1291
1292static unsigned getSlotedOps(unsigned Op, unsigned Slot) {
1293 switch (Op) {
1294 OPERAND_CASE(R600::OpName::update_exec_mask)
1295 OPERAND_CASE(R600::OpName::update_pred)
1296 OPERAND_CASE(R600::OpName::write)
1297 OPERAND_CASE(R600::OpName::omod)
1298 OPERAND_CASE(R600::OpName::dst_rel)
1299 OPERAND_CASE(R600::OpName::clamp)
1300 OPERAND_CASE(R600::OpName::src0)
1301 OPERAND_CASE(R600::OpName::src0_neg)
1302 OPERAND_CASE(R600::OpName::src0_rel)
1303 OPERAND_CASE(R600::OpName::src0_abs)
1304 OPERAND_CASE(R600::OpName::src0_sel)
1305 OPERAND_CASE(R600::OpName::src1)
1306 OPERAND_CASE(R600::OpName::src1_neg)
1307 OPERAND_CASE(R600::OpName::src1_rel)
1308 OPERAND_CASE(R600::OpName::src1_abs)
1309 OPERAND_CASE(R600::OpName::src1_sel)
1310 OPERAND_CASE(R600::OpName::pred_sel)
1311 default:
1312 llvm_unreachable("Wrong Operand")::llvm::llvm_unreachable_internal("Wrong Operand", "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AMDGPU/R600InstrInfo.cpp"
, 1312)
;
1313 }
1314}
1315
1316#undef OPERAND_CASE
1317
1318MachineInstr *R600InstrInfo::buildSlotOfVectorInstruction(
1319 MachineBasicBlock &MBB, MachineInstr *MI, unsigned Slot, unsigned DstReg)
1320 const {
1321 assert (MI->getOpcode() == R600::DOT_4 && "Not Implemented")(static_cast <bool> (MI->getOpcode() == R600::DOT_4 &&
"Not Implemented") ? void (0) : __assert_fail ("MI->getOpcode() == R600::DOT_4 && \"Not Implemented\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AMDGPU/R600InstrInfo.cpp"
, 1321, __extension__ __PRETTY_FUNCTION__))
;
1322 unsigned Opcode;
1323 if (ST.getGeneration() <= AMDGPUSubtarget::R700)
1324 Opcode = R600::DOT4_r600;
1325 else
1326 Opcode = R600::DOT4_eg;
1327 MachineBasicBlock::iterator I = MI;
1328 MachineOperand &Src0 = MI->getOperand(
1329 getOperandIdx(MI->getOpcode(), getSlotedOps(R600::OpName::src0, Slot)));
1330 MachineOperand &Src1 = MI->getOperand(
1331 getOperandIdx(MI->getOpcode(), getSlotedOps(R600::OpName::src1, Slot)));
1332 MachineInstr *MIB = buildDefaultInstruction(
1333 MBB, I, Opcode, DstReg, Src0.getReg(), Src1.getReg());
1334 static const unsigned Operands[14] = {
1335 R600::OpName::update_exec_mask,
1336 R600::OpName::update_pred,
1337 R600::OpName::write,
1338 R600::OpName::omod,
1339 R600::OpName::dst_rel,
1340 R600::OpName::clamp,
1341 R600::OpName::src0_neg,
1342 R600::OpName::src0_rel,
1343 R600::OpName::src0_abs,
1344 R600::OpName::src0_sel,
1345 R600::OpName::src1_neg,
1346 R600::OpName::src1_rel,
1347 R600::OpName::src1_abs,
1348 R600::OpName::src1_sel,
1349 };
1350
1351 MachineOperand &MO = MI->getOperand(getOperandIdx(MI->getOpcode(),
1352 getSlotedOps(R600::OpName::pred_sel, Slot)));
1353 MIB->getOperand(getOperandIdx(Opcode, R600::OpName::pred_sel))
1354 .setReg(MO.getReg());
1355
1356 for (unsigned i = 0; i < 14; i++) {
1357 MachineOperand &MO = MI->getOperand(
1358 getOperandIdx(MI->getOpcode(), getSlotedOps(Operands[i], Slot)));
1359 assert (MO.isImm())(static_cast <bool> (MO.isImm()) ? void (0) : __assert_fail
("MO.isImm()", "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AMDGPU/R600InstrInfo.cpp"
, 1359, __extension__ __PRETTY_FUNCTION__))
;
1360 setImmOperand(*MIB, Operands[i], MO.getImm());
1361 }
1362 MIB->getOperand(20).setImm(0);
1363 return MIB;
1364}
1365
1366MachineInstr *R600InstrInfo::buildMovImm(MachineBasicBlock &BB,
1367 MachineBasicBlock::iterator I,
1368 unsigned DstReg,
1369 uint64_t Imm) const {
1370 MachineInstr *MovImm = buildDefaultInstruction(BB, I, R600::MOV, DstReg,
1371 R600::ALU_LITERAL_X);
1372 setImmOperand(*MovImm, R600::OpName::literal, Imm);
1373 return MovImm;
1374}
1375
1376MachineInstr *R600InstrInfo::buildMovInstr(MachineBasicBlock *MBB,
1377 MachineBasicBlock::iterator I,
1378 unsigned DstReg, unsigned SrcReg) const {
1379 return buildDefaultInstruction(*MBB, I, R600::MOV, DstReg, SrcReg);
1380}
1381
1382int R600InstrInfo::getOperandIdx(const MachineInstr &MI, unsigned Op) const {
1383 return getOperandIdx(MI.getOpcode(), Op);
1384}
1385
1386int R600InstrInfo::getOperandIdx(unsigned Opcode, unsigned Op) const {
1387 return R600::getNamedOperandIdx(Opcode, Op);
1388}
1389
1390void R600InstrInfo::setImmOperand(MachineInstr &MI, unsigned Op,
1391 int64_t Imm) const {
1392 int Idx = getOperandIdx(MI, Op);
1393 assert(Idx != -1 && "Operand not supported for this instruction.")(static_cast <bool> (Idx != -1 && "Operand not supported for this instruction."
) ? void (0) : __assert_fail ("Idx != -1 && \"Operand not supported for this instruction.\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AMDGPU/R600InstrInfo.cpp"
, 1393, __extension__ __PRETTY_FUNCTION__))
;
1394 assert(MI.getOperand(Idx).isImm())(static_cast <bool> (MI.getOperand(Idx).isImm()) ? void
(0) : __assert_fail ("MI.getOperand(Idx).isImm()", "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AMDGPU/R600InstrInfo.cpp"
, 1394, __extension__ __PRETTY_FUNCTION__))
;
1395 MI.getOperand(Idx).setImm(Imm);
1396}
1397
1398//===----------------------------------------------------------------------===//
1399// Instruction flag getters/setters
1400//===----------------------------------------------------------------------===//
1401
1402MachineOperand &R600InstrInfo::getFlagOp(MachineInstr &MI, unsigned SrcIdx,
1403 unsigned Flag) const {
1404 unsigned TargetFlags = get(MI.getOpcode()).TSFlags;
1405 int FlagIndex = 0;
1406 if (Flag != 0) {
1407 // If we pass something other than the default value of Flag to this
1408 // function, it means we are want to set a flag on an instruction
1409 // that uses native encoding.
1410 assert(HAS_NATIVE_OPERANDS(TargetFlags))(static_cast <bool> (((TargetFlags) & R600_InstFlag
::NATIVE_OPERANDS)) ? void (0) : __assert_fail ("HAS_NATIVE_OPERANDS(TargetFlags)"
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AMDGPU/R600InstrInfo.cpp"
, 1410, __extension__ __PRETTY_FUNCTION__))
;
1411 bool IsOP3 = (TargetFlags & R600_InstFlag::OP3) == R600_InstFlag::OP3;
1412 switch (Flag) {
1413 case MO_FLAG_CLAMP(1 << 0):
1414 FlagIndex = getOperandIdx(MI, R600::OpName::clamp);
1415 break;
1416 case MO_FLAG_MASK(1 << 3):
1417 FlagIndex = getOperandIdx(MI, R600::OpName::write);
1418 break;
1419 case MO_FLAG_NOT_LAST(1 << 5):
1420 case MO_FLAG_LAST(1 << 6):
1421 FlagIndex = getOperandIdx(MI, R600::OpName::last);
1422 break;
1423 case MO_FLAG_NEG(1 << 1):
1424 switch (SrcIdx) {
1425 case 0:
1426 FlagIndex = getOperandIdx(MI, R600::OpName::src0_neg);
1427 break;
1428 case 1:
1429 FlagIndex = getOperandIdx(MI, R600::OpName::src1_neg);
1430 break;
1431 case 2:
1432 FlagIndex = getOperandIdx(MI, R600::OpName::src2_neg);
1433 break;
1434 }
1435 break;
1436
1437 case MO_FLAG_ABS(1 << 2):
1438 assert(!IsOP3 && "Cannot set absolute value modifier for OP3 "(static_cast <bool> (!IsOP3 && "Cannot set absolute value modifier for OP3 "
"instructions.") ? void (0) : __assert_fail ("!IsOP3 && \"Cannot set absolute value modifier for OP3 \" \"instructions.\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AMDGPU/R600InstrInfo.cpp"
, 1439, __extension__ __PRETTY_FUNCTION__))
1439 "instructions.")(static_cast <bool> (!IsOP3 && "Cannot set absolute value modifier for OP3 "
"instructions.") ? void (0) : __assert_fail ("!IsOP3 && \"Cannot set absolute value modifier for OP3 \" \"instructions.\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AMDGPU/R600InstrInfo.cpp"
, 1439, __extension__ __PRETTY_FUNCTION__))
;
1440 (void)IsOP3;
1441 switch (SrcIdx) {
1442 case 0:
1443 FlagIndex = getOperandIdx(MI, R600::OpName::src0_abs);
1444 break;
1445 case 1:
1446 FlagIndex = getOperandIdx(MI, R600::OpName::src1_abs);
1447 break;
1448 }
1449 break;
1450
1451 default:
1452 FlagIndex = -1;
1453 break;
1454 }
1455 assert(FlagIndex != -1 && "Flag not supported for this instruction")(static_cast <bool> (FlagIndex != -1 && "Flag not supported for this instruction"
) ? void (0) : __assert_fail ("FlagIndex != -1 && \"Flag not supported for this instruction\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AMDGPU/R600InstrInfo.cpp"
, 1455, __extension__ __PRETTY_FUNCTION__))
;
1456 } else {
1457 FlagIndex = GET_FLAG_OPERAND_IDX(TargetFlags)(((TargetFlags) >> 7) & 0x3);
1458 assert(FlagIndex != 0 &&(static_cast <bool> (FlagIndex != 0 && "Instruction flags not supported for this instruction"
) ? void (0) : __assert_fail ("FlagIndex != 0 && \"Instruction flags not supported for this instruction\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AMDGPU/R600InstrInfo.cpp"
, 1459, __extension__ __PRETTY_FUNCTION__))
1459 "Instruction flags not supported for this instruction")(static_cast <bool> (FlagIndex != 0 && "Instruction flags not supported for this instruction"
) ? void (0) : __assert_fail ("FlagIndex != 0 && \"Instruction flags not supported for this instruction\""
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AMDGPU/R600InstrInfo.cpp"
, 1459, __extension__ __PRETTY_FUNCTION__))
;
1460 }
1461
1462 MachineOperand &FlagOp = MI.getOperand(FlagIndex);
1463 assert(FlagOp.isImm())(static_cast <bool> (FlagOp.isImm()) ? void (0) : __assert_fail
("FlagOp.isImm()", "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AMDGPU/R600InstrInfo.cpp"
, 1463, __extension__ __PRETTY_FUNCTION__))
;
1464 return FlagOp;
1465}
1466
1467void R600InstrInfo::addFlag(MachineInstr &MI, unsigned Operand,
1468 unsigned Flag) const {
1469 unsigned TargetFlags = get(MI.getOpcode()).TSFlags;
1470 if (Flag == 0) {
1471 return;
1472 }
1473 if (HAS_NATIVE_OPERANDS(TargetFlags)((TargetFlags) & R600_InstFlag::NATIVE_OPERANDS)) {
1474 MachineOperand &FlagOp = getFlagOp(MI, Operand, Flag);
1475 if (Flag == MO_FLAG_NOT_LAST(1 << 5)) {
1476 clearFlag(MI, Operand, MO_FLAG_LAST(1 << 6));
1477 } else if (Flag == MO_FLAG_MASK(1 << 3)) {
1478 clearFlag(MI, Operand, Flag);
1479 } else {
1480 FlagOp.setImm(1);
1481 }
1482 } else {
1483 MachineOperand &FlagOp = getFlagOp(MI, Operand);
1484 FlagOp.setImm(FlagOp.getImm() | (Flag << (NUM_MO_FLAGS7 * Operand)));
1485 }
1486}
1487
1488void R600InstrInfo::clearFlag(MachineInstr &MI, unsigned Operand,
1489 unsigned Flag) const {
1490 unsigned TargetFlags = get(MI.getOpcode()).TSFlags;
1491 if (HAS_NATIVE_OPERANDS(TargetFlags)((TargetFlags) & R600_InstFlag::NATIVE_OPERANDS)) {
1492 MachineOperand &FlagOp = getFlagOp(MI, Operand, Flag);
1493 FlagOp.setImm(0);
1494 } else {
1495 MachineOperand &FlagOp = getFlagOp(MI);
1496 unsigned InstFlags = FlagOp.getImm();
1497 InstFlags &= ~(Flag << (NUM_MO_FLAGS7 * Operand));
1498 FlagOp.setImm(InstFlags);
1499 }
1500}
1501
1502unsigned R600InstrInfo::getAddressSpaceForPseudoSourceKind(
1503 PseudoSourceValue::PSVKind Kind) const {
1504 switch (Kind) {
1505 case PseudoSourceValue::Stack:
1506 case PseudoSourceValue::FixedStack:
1507 return ST.getAMDGPUAS().PRIVATE_ADDRESS;
1508 case PseudoSourceValue::ConstantPool:
1509 case PseudoSourceValue::GOT:
1510 case PseudoSourceValue::JumpTable:
1511 case PseudoSourceValue::GlobalValueCallEntry:
1512 case PseudoSourceValue::ExternalSymbolCallEntry:
1513 case PseudoSourceValue::TargetCustom:
1514 return ST.getAMDGPUAS().CONSTANT_ADDRESS;
1515 }
1516 llvm_unreachable("Invalid pseudo source kind")::llvm::llvm_unreachable_internal("Invalid pseudo source kind"
, "/build/llvm-toolchain-snapshot-7~svn338205/lib/Target/AMDGPU/R600InstrInfo.cpp"
, 1516)
;
1517 return ST.getAMDGPUAS().PRIVATE_ADDRESS;
1518}