File: | lib/Target/AMDGPU/SIISelLowering.cpp |
Warning: | line 2059, column 5 Value stored to 'BR' is never read |
1 | //===-- SIISelLowering.cpp - SI DAG Lowering Implementation ---------------===// |
2 | // |
3 | // The LLVM Compiler Infrastructure |
4 | // |
5 | // This file is distributed under the University of Illinois Open Source |
6 | // License. See LICENSE.TXT for details. |
7 | // |
8 | //===----------------------------------------------------------------------===// |
9 | // |
10 | /// \file |
11 | /// \brief Custom DAG lowering for SI |
12 | // |
13 | //===----------------------------------------------------------------------===// |
14 | |
15 | #ifdef _MSC_VER |
16 | // Provide M_PI. |
17 | #define _USE_MATH_DEFINES |
18 | #include <cmath> |
19 | #endif |
20 | |
21 | #include "AMDGPU.h" |
22 | #include "AMDGPUIntrinsicInfo.h" |
23 | #include "AMDGPUSubtarget.h" |
24 | #include "SIDefines.h" |
25 | #include "SIISelLowering.h" |
26 | #include "SIInstrInfo.h" |
27 | #include "SIMachineFunctionInfo.h" |
28 | #include "SIRegisterInfo.h" |
29 | #include "llvm/ADT/BitVector.h" |
30 | #include "llvm/ADT/StringSwitch.h" |
31 | #include "llvm/CodeGen/CallingConvLower.h" |
32 | #include "llvm/CodeGen/MachineInstrBuilder.h" |
33 | #include "llvm/CodeGen/MachineRegisterInfo.h" |
34 | #include "llvm/CodeGen/SelectionDAG.h" |
35 | #include "llvm/CodeGen/Analysis.h" |
36 | #include "llvm/IR/DiagnosticInfo.h" |
37 | #include "llvm/IR/Function.h" |
38 | |
39 | using namespace llvm; |
40 | |
41 | static cl::opt<bool> EnableVGPRIndexMode( |
42 | "amdgpu-vgpr-index-mode", |
43 | cl::desc("Use GPR indexing mode instead of movrel for vector indexing"), |
44 | cl::init(false)); |
45 | |
46 | |
47 | static unsigned findFirstFreeSGPR(CCState &CCInfo) { |
48 | unsigned NumSGPRs = AMDGPU::SGPR_32RegClass.getNumRegs(); |
49 | for (unsigned Reg = 0; Reg < NumSGPRs; ++Reg) { |
50 | if (!CCInfo.isAllocated(AMDGPU::SGPR0 + Reg)) { |
51 | return AMDGPU::SGPR0 + Reg; |
52 | } |
53 | } |
54 | llvm_unreachable("Cannot allocate sgpr")::llvm::llvm_unreachable_internal("Cannot allocate sgpr", "/tmp/buildd/llvm-toolchain-snapshot-4.0~svn290870/lib/Target/AMDGPU/SIISelLowering.cpp" , 54); |
55 | } |
56 | |
57 | SITargetLowering::SITargetLowering(const TargetMachine &TM, |
58 | const SISubtarget &STI) |
59 | : AMDGPUTargetLowering(TM, STI) { |
60 | addRegisterClass(MVT::i1, &AMDGPU::VReg_1RegClass); |
61 | addRegisterClass(MVT::i64, &AMDGPU::SReg_64RegClass); |
62 | |
63 | addRegisterClass(MVT::i32, &AMDGPU::SReg_32_XM0RegClass); |
64 | addRegisterClass(MVT::f32, &AMDGPU::VGPR_32RegClass); |
65 | |
66 | addRegisterClass(MVT::f64, &AMDGPU::VReg_64RegClass); |
67 | addRegisterClass(MVT::v2i32, &AMDGPU::SReg_64RegClass); |
68 | addRegisterClass(MVT::v2f32, &AMDGPU::VReg_64RegClass); |
69 | |
70 | addRegisterClass(MVT::v2i64, &AMDGPU::SReg_128RegClass); |
71 | addRegisterClass(MVT::v2f64, &AMDGPU::SReg_128RegClass); |
72 | |
73 | addRegisterClass(MVT::v4i32, &AMDGPU::SReg_128RegClass); |
74 | addRegisterClass(MVT::v4f32, &AMDGPU::VReg_128RegClass); |
75 | |
76 | addRegisterClass(MVT::v8i32, &AMDGPU::SReg_256RegClass); |
77 | addRegisterClass(MVT::v8f32, &AMDGPU::VReg_256RegClass); |
78 | |
79 | addRegisterClass(MVT::v16i32, &AMDGPU::SReg_512RegClass); |
80 | addRegisterClass(MVT::v16f32, &AMDGPU::VReg_512RegClass); |
81 | |
82 | if (Subtarget->has16BitInsts()) { |
83 | addRegisterClass(MVT::i16, &AMDGPU::SReg_32_XM0RegClass); |
84 | addRegisterClass(MVT::f16, &AMDGPU::SReg_32_XM0RegClass); |
85 | } |
86 | |
87 | computeRegisterProperties(STI.getRegisterInfo()); |
88 | |
89 | // We need to custom lower vector stores from local memory |
90 | setOperationAction(ISD::LOAD, MVT::v2i32, Custom); |
91 | setOperationAction(ISD::LOAD, MVT::v4i32, Custom); |
92 | setOperationAction(ISD::LOAD, MVT::v8i32, Custom); |
93 | setOperationAction(ISD::LOAD, MVT::v16i32, Custom); |
94 | setOperationAction(ISD::LOAD, MVT::i1, Custom); |
95 | |
96 | setOperationAction(ISD::STORE, MVT::v2i32, Custom); |
97 | setOperationAction(ISD::STORE, MVT::v4i32, Custom); |
98 | setOperationAction(ISD::STORE, MVT::v8i32, Custom); |
99 | setOperationAction(ISD::STORE, MVT::v16i32, Custom); |
100 | setOperationAction(ISD::STORE, MVT::i1, Custom); |
101 | |
102 | setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); |
103 | setOperationAction(ISD::GlobalAddress, MVT::i64, Custom); |
104 | setOperationAction(ISD::ConstantPool, MVT::v2i64, Expand); |
105 | |
106 | setOperationAction(ISD::SELECT, MVT::i1, Promote); |
107 | setOperationAction(ISD::SELECT, MVT::i64, Custom); |
108 | setOperationAction(ISD::SELECT, MVT::f64, Promote); |
109 | AddPromotedToType(ISD::SELECT, MVT::f64, MVT::i64); |
110 | |
111 | setOperationAction(ISD::SELECT_CC, MVT::f32, Expand); |
112 | setOperationAction(ISD::SELECT_CC, MVT::i32, Expand); |
113 | setOperationAction(ISD::SELECT_CC, MVT::i64, Expand); |
114 | setOperationAction(ISD::SELECT_CC, MVT::f64, Expand); |
115 | setOperationAction(ISD::SELECT_CC, MVT::i1, Expand); |
116 | |
117 | setOperationAction(ISD::SETCC, MVT::i1, Promote); |
118 | setOperationAction(ISD::SETCC, MVT::v2i1, Expand); |
119 | setOperationAction(ISD::SETCC, MVT::v4i1, Expand); |
120 | AddPromotedToType(ISD::SETCC, MVT::i1, MVT::i32); |
121 | |
122 | setOperationAction(ISD::TRUNCATE, MVT::v2i32, Expand); |
123 | setOperationAction(ISD::FP_ROUND, MVT::v2f32, Expand); |
124 | |
125 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i1, Custom); |
126 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i1, Custom); |
127 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8, Custom); |
128 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i8, Custom); |
129 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Custom); |
130 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i16, Custom); |
131 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::Other, Custom); |
132 | |
133 | setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::f32, Custom); |
134 | setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v4f32, Custom); |
135 | setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom); |
136 | |
137 | setOperationAction(ISD::BRCOND, MVT::Other, Custom); |
138 | setOperationAction(ISD::BR_CC, MVT::i1, Expand); |
139 | setOperationAction(ISD::BR_CC, MVT::i32, Expand); |
140 | setOperationAction(ISD::BR_CC, MVT::i64, Expand); |
141 | setOperationAction(ISD::BR_CC, MVT::f32, Expand); |
142 | setOperationAction(ISD::BR_CC, MVT::f64, Expand); |
143 | |
144 | // We only support LOAD/STORE and vector manipulation ops for vectors |
145 | // with > 4 elements. |
146 | for (MVT VT : {MVT::v8i32, MVT::v8f32, MVT::v16i32, MVT::v16f32, MVT::v2i64, MVT::v2f64}) { |
147 | for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) { |
148 | switch (Op) { |
149 | case ISD::LOAD: |
150 | case ISD::STORE: |
151 | case ISD::BUILD_VECTOR: |
152 | case ISD::BITCAST: |
153 | case ISD::EXTRACT_VECTOR_ELT: |
154 | case ISD::INSERT_VECTOR_ELT: |
155 | case ISD::INSERT_SUBVECTOR: |
156 | case ISD::EXTRACT_SUBVECTOR: |
157 | case ISD::SCALAR_TO_VECTOR: |
158 | break; |
159 | case ISD::CONCAT_VECTORS: |
160 | setOperationAction(Op, VT, Custom); |
161 | break; |
162 | default: |
163 | setOperationAction(Op, VT, Expand); |
164 | break; |
165 | } |
166 | } |
167 | } |
168 | |
169 | // TODO: For dynamic 64-bit vector inserts/extracts, should emit a pseudo that |
170 | // is expanded to avoid having two separate loops in case the index is a VGPR. |
171 | |
172 | // Most operations are naturally 32-bit vector operations. We only support |
173 | // load and store of i64 vectors, so promote v2i64 vector operations to v4i32. |
174 | for (MVT Vec64 : { MVT::v2i64, MVT::v2f64 }) { |
175 | setOperationAction(ISD::BUILD_VECTOR, Vec64, Promote); |
176 | AddPromotedToType(ISD::BUILD_VECTOR, Vec64, MVT::v4i32); |
177 | |
178 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, Vec64, Promote); |
179 | AddPromotedToType(ISD::EXTRACT_VECTOR_ELT, Vec64, MVT::v4i32); |
180 | |
181 | setOperationAction(ISD::INSERT_VECTOR_ELT, Vec64, Promote); |
182 | AddPromotedToType(ISD::INSERT_VECTOR_ELT, Vec64, MVT::v4i32); |
183 | |
184 | setOperationAction(ISD::SCALAR_TO_VECTOR, Vec64, Promote); |
185 | AddPromotedToType(ISD::SCALAR_TO_VECTOR, Vec64, MVT::v4i32); |
186 | } |
187 | |
188 | setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i32, Expand); |
189 | setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8f32, Expand); |
190 | setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i32, Expand); |
191 | setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16f32, Expand); |
192 | |
193 | // BUFFER/FLAT_ATOMIC_CMP_SWAP on GCN GPUs needs input marshalling, |
194 | // and output demarshalling |
195 | setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Custom); |
196 | setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, Custom); |
197 | |
198 | // We can't return success/failure, only the old value, |
199 | // let LLVM add the comparison |
200 | setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i32, Expand); |
201 | setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i64, Expand); |
202 | |
203 | if (getSubtarget()->hasFlatAddressSpace()) { |
204 | setOperationAction(ISD::ADDRSPACECAST, MVT::i32, Custom); |
205 | setOperationAction(ISD::ADDRSPACECAST, MVT::i64, Custom); |
206 | } |
207 | |
208 | setOperationAction(ISD::BSWAP, MVT::i32, Legal); |
209 | setOperationAction(ISD::BITREVERSE, MVT::i32, Legal); |
210 | |
211 | // On SI this is s_memtime and s_memrealtime on VI. |
212 | setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Legal); |
213 | setOperationAction(ISD::TRAP, MVT::Other, Custom); |
214 | |
215 | setOperationAction(ISD::FMINNUM, MVT::f64, Legal); |
216 | setOperationAction(ISD::FMAXNUM, MVT::f64, Legal); |
217 | |
218 | if (Subtarget->getGeneration() >= SISubtarget::SEA_ISLANDS) { |
219 | setOperationAction(ISD::FTRUNC, MVT::f64, Legal); |
220 | setOperationAction(ISD::FCEIL, MVT::f64, Legal); |
221 | setOperationAction(ISD::FRINT, MVT::f64, Legal); |
222 | } |
223 | |
224 | setOperationAction(ISD::FFLOOR, MVT::f64, Legal); |
225 | |
226 | setOperationAction(ISD::FSIN, MVT::f32, Custom); |
227 | setOperationAction(ISD::FCOS, MVT::f32, Custom); |
228 | setOperationAction(ISD::FDIV, MVT::f32, Custom); |
229 | setOperationAction(ISD::FDIV, MVT::f64, Custom); |
230 | |
231 | if (Subtarget->has16BitInsts()) { |
232 | setOperationAction(ISD::Constant, MVT::i16, Legal); |
233 | |
234 | setOperationAction(ISD::SMIN, MVT::i16, Legal); |
235 | setOperationAction(ISD::SMAX, MVT::i16, Legal); |
236 | |
237 | setOperationAction(ISD::UMIN, MVT::i16, Legal); |
238 | setOperationAction(ISD::UMAX, MVT::i16, Legal); |
239 | |
240 | setOperationAction(ISD::SIGN_EXTEND, MVT::i16, Promote); |
241 | AddPromotedToType(ISD::SIGN_EXTEND, MVT::i16, MVT::i32); |
242 | |
243 | setOperationAction(ISD::ROTR, MVT::i16, Promote); |
244 | setOperationAction(ISD::ROTL, MVT::i16, Promote); |
245 | |
246 | setOperationAction(ISD::SDIV, MVT::i16, Promote); |
247 | setOperationAction(ISD::UDIV, MVT::i16, Promote); |
248 | setOperationAction(ISD::SREM, MVT::i16, Promote); |
249 | setOperationAction(ISD::UREM, MVT::i16, Promote); |
250 | |
251 | setOperationAction(ISD::BSWAP, MVT::i16, Promote); |
252 | setOperationAction(ISD::BITREVERSE, MVT::i16, Promote); |
253 | |
254 | setOperationAction(ISD::CTTZ, MVT::i16, Promote); |
255 | setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i16, Promote); |
256 | setOperationAction(ISD::CTLZ, MVT::i16, Promote); |
257 | setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16, Promote); |
258 | |
259 | setOperationAction(ISD::SELECT_CC, MVT::i16, Expand); |
260 | |
261 | setOperationAction(ISD::BR_CC, MVT::i16, Expand); |
262 | |
263 | setOperationAction(ISD::LOAD, MVT::i16, Custom); |
264 | |
265 | setTruncStoreAction(MVT::i64, MVT::i16, Expand); |
266 | |
267 | setOperationAction(ISD::FP16_TO_FP, MVT::i16, Promote); |
268 | AddPromotedToType(ISD::FP16_TO_FP, MVT::i16, MVT::i32); |
269 | setOperationAction(ISD::FP_TO_FP16, MVT::i16, Promote); |
270 | AddPromotedToType(ISD::FP_TO_FP16, MVT::i16, MVT::i32); |
271 | |
272 | setOperationAction(ISD::FP_TO_SINT, MVT::i16, Promote); |
273 | setOperationAction(ISD::FP_TO_UINT, MVT::i16, Promote); |
274 | setOperationAction(ISD::SINT_TO_FP, MVT::i16, Promote); |
275 | setOperationAction(ISD::UINT_TO_FP, MVT::i16, Promote); |
276 | |
277 | // F16 - Constant Actions. |
278 | setOperationAction(ISD::ConstantFP, MVT::f16, Legal); |
279 | |
280 | // F16 - Load/Store Actions. |
281 | setOperationAction(ISD::LOAD, MVT::f16, Promote); |
282 | AddPromotedToType(ISD::LOAD, MVT::f16, MVT::i16); |
283 | setOperationAction(ISD::STORE, MVT::f16, Promote); |
284 | AddPromotedToType(ISD::STORE, MVT::f16, MVT::i16); |
285 | |
286 | // F16 - VOP1 Actions. |
287 | setOperationAction(ISD::FP_ROUND, MVT::f16, Custom); |
288 | setOperationAction(ISD::FCOS, MVT::f16, Promote); |
289 | setOperationAction(ISD::FSIN, MVT::f16, Promote); |
290 | setOperationAction(ISD::FP_TO_SINT, MVT::f16, Promote); |
291 | setOperationAction(ISD::FP_TO_UINT, MVT::f16, Promote); |
292 | setOperationAction(ISD::SINT_TO_FP, MVT::f16, Promote); |
293 | setOperationAction(ISD::UINT_TO_FP, MVT::f16, Promote); |
294 | |
295 | // F16 - VOP2 Actions. |
296 | setOperationAction(ISD::BR_CC, MVT::f16, Expand); |
297 | setOperationAction(ISD::SELECT_CC, MVT::f16, Expand); |
298 | setOperationAction(ISD::FMAXNUM, MVT::f16, Legal); |
299 | setOperationAction(ISD::FMINNUM, MVT::f16, Legal); |
300 | setOperationAction(ISD::FDIV, MVT::f16, Custom); |
301 | |
302 | // F16 - VOP3 Actions. |
303 | setOperationAction(ISD::FMA, MVT::f16, Legal); |
304 | if (!Subtarget->hasFP16Denormals()) |
305 | setOperationAction(ISD::FMAD, MVT::f16, Legal); |
306 | } |
307 | |
308 | setTargetDAGCombine(ISD::FADD); |
309 | setTargetDAGCombine(ISD::FSUB); |
310 | setTargetDAGCombine(ISD::FMINNUM); |
311 | setTargetDAGCombine(ISD::FMAXNUM); |
312 | setTargetDAGCombine(ISD::SMIN); |
313 | setTargetDAGCombine(ISD::SMAX); |
314 | setTargetDAGCombine(ISD::UMIN); |
315 | setTargetDAGCombine(ISD::UMAX); |
316 | setTargetDAGCombine(ISD::SETCC); |
317 | setTargetDAGCombine(ISD::AND); |
318 | setTargetDAGCombine(ISD::OR); |
319 | setTargetDAGCombine(ISD::XOR); |
320 | setTargetDAGCombine(ISD::SINT_TO_FP); |
321 | setTargetDAGCombine(ISD::UINT_TO_FP); |
322 | setTargetDAGCombine(ISD::FCANONICALIZE); |
323 | |
324 | // All memory operations. Some folding on the pointer operand is done to help |
325 | // matching the constant offsets in the addressing modes. |
326 | setTargetDAGCombine(ISD::LOAD); |
327 | setTargetDAGCombine(ISD::STORE); |
328 | setTargetDAGCombine(ISD::ATOMIC_LOAD); |
329 | setTargetDAGCombine(ISD::ATOMIC_STORE); |
330 | setTargetDAGCombine(ISD::ATOMIC_CMP_SWAP); |
331 | setTargetDAGCombine(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS); |
332 | setTargetDAGCombine(ISD::ATOMIC_SWAP); |
333 | setTargetDAGCombine(ISD::ATOMIC_LOAD_ADD); |
334 | setTargetDAGCombine(ISD::ATOMIC_LOAD_SUB); |
335 | setTargetDAGCombine(ISD::ATOMIC_LOAD_AND); |
336 | setTargetDAGCombine(ISD::ATOMIC_LOAD_OR); |
337 | setTargetDAGCombine(ISD::ATOMIC_LOAD_XOR); |
338 | setTargetDAGCombine(ISD::ATOMIC_LOAD_NAND); |
339 | setTargetDAGCombine(ISD::ATOMIC_LOAD_MIN); |
340 | setTargetDAGCombine(ISD::ATOMIC_LOAD_MAX); |
341 | setTargetDAGCombine(ISD::ATOMIC_LOAD_UMIN); |
342 | setTargetDAGCombine(ISD::ATOMIC_LOAD_UMAX); |
343 | |
344 | setSchedulingPreference(Sched::RegPressure); |
345 | } |
346 | |
347 | const SISubtarget *SITargetLowering::getSubtarget() const { |
348 | return static_cast<const SISubtarget *>(Subtarget); |
349 | } |
350 | |
351 | //===----------------------------------------------------------------------===// |
352 | // TargetLowering queries |
353 | //===----------------------------------------------------------------------===// |
354 | |
355 | bool SITargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, |
356 | const CallInst &CI, |
357 | unsigned IntrID) const { |
358 | switch (IntrID) { |
359 | case Intrinsic::amdgcn_atomic_inc: |
360 | case Intrinsic::amdgcn_atomic_dec: |
361 | Info.opc = ISD::INTRINSIC_W_CHAIN; |
362 | Info.memVT = MVT::getVT(CI.getType()); |
363 | Info.ptrVal = CI.getOperand(0); |
364 | Info.align = 0; |
365 | Info.vol = false; |
366 | Info.readMem = true; |
367 | Info.writeMem = true; |
368 | return true; |
369 | default: |
370 | return false; |
371 | } |
372 | } |
373 | |
374 | bool SITargetLowering::isShuffleMaskLegal(const SmallVectorImpl<int> &, |
375 | EVT) const { |
376 | // SI has some legal vector types, but no legal vector operations. Say no |
377 | // shuffles are legal in order to prefer scalarizing some vector operations. |
378 | return false; |
379 | } |
380 | |
381 | bool SITargetLowering::isLegalFlatAddressingMode(const AddrMode &AM) const { |
382 | // Flat instructions do not have offsets, and only have the register |
383 | // address. |
384 | return AM.BaseOffs == 0 && (AM.Scale == 0 || AM.Scale == 1); |
385 | } |
386 | |
387 | bool SITargetLowering::isLegalMUBUFAddressingMode(const AddrMode &AM) const { |
388 | // MUBUF / MTBUF instructions have a 12-bit unsigned byte offset, and |
389 | // additionally can do r + r + i with addr64. 32-bit has more addressing |
390 | // mode options. Depending on the resource constant, it can also do |
391 | // (i64 r0) + (i32 r1) * (i14 i). |
392 | // |
393 | // Private arrays end up using a scratch buffer most of the time, so also |
394 | // assume those use MUBUF instructions. Scratch loads / stores are currently |
395 | // implemented as mubuf instructions with offen bit set, so slightly |
396 | // different than the normal addr64. |
397 | if (!isUInt<12>(AM.BaseOffs)) |
398 | return false; |
399 | |
400 | // FIXME: Since we can split immediate into soffset and immediate offset, |
401 | // would it make sense to allow any immediate? |
402 | |
403 | switch (AM.Scale) { |
404 | case 0: // r + i or just i, depending on HasBaseReg. |
405 | return true; |
406 | case 1: |
407 | return true; // We have r + r or r + i. |
408 | case 2: |
409 | if (AM.HasBaseReg) { |
410 | // Reject 2 * r + r. |
411 | return false; |
412 | } |
413 | |
414 | // Allow 2 * r as r + r |
415 | // Or 2 * r + i is allowed as r + r + i. |
416 | return true; |
417 | default: // Don't allow n * r |
418 | return false; |
419 | } |
420 | } |
421 | |
422 | bool SITargetLowering::isLegalAddressingMode(const DataLayout &DL, |
423 | const AddrMode &AM, Type *Ty, |
424 | unsigned AS) const { |
425 | // No global is ever allowed as a base. |
426 | if (AM.BaseGV) |
427 | return false; |
428 | |
429 | switch (AS) { |
430 | case AMDGPUAS::GLOBAL_ADDRESS: { |
431 | if (Subtarget->getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) { |
432 | // Assume the we will use FLAT for all global memory accesses |
433 | // on VI. |
434 | // FIXME: This assumption is currently wrong. On VI we still use |
435 | // MUBUF instructions for the r + i addressing mode. As currently |
436 | // implemented, the MUBUF instructions only work on buffer < 4GB. |
437 | // It may be possible to support > 4GB buffers with MUBUF instructions, |
438 | // by setting the stride value in the resource descriptor which would |
439 | // increase the size limit to (stride * 4GB). However, this is risky, |
440 | // because it has never been validated. |
441 | return isLegalFlatAddressingMode(AM); |
442 | } |
443 | |
444 | return isLegalMUBUFAddressingMode(AM); |
445 | } |
446 | case AMDGPUAS::CONSTANT_ADDRESS: { |
447 | // If the offset isn't a multiple of 4, it probably isn't going to be |
448 | // correctly aligned. |
449 | // FIXME: Can we get the real alignment here? |
450 | if (AM.BaseOffs % 4 != 0) |
451 | return isLegalMUBUFAddressingMode(AM); |
452 | |
453 | // There are no SMRD extloads, so if we have to do a small type access we |
454 | // will use a MUBUF load. |
455 | // FIXME?: We also need to do this if unaligned, but we don't know the |
456 | // alignment here. |
457 | if (DL.getTypeStoreSize(Ty) < 4) |
458 | return isLegalMUBUFAddressingMode(AM); |
459 | |
460 | if (Subtarget->getGeneration() == SISubtarget::SOUTHERN_ISLANDS) { |
461 | // SMRD instructions have an 8-bit, dword offset on SI. |
462 | if (!isUInt<8>(AM.BaseOffs / 4)) |
463 | return false; |
464 | } else if (Subtarget->getGeneration() == SISubtarget::SEA_ISLANDS) { |
465 | // On CI+, this can also be a 32-bit literal constant offset. If it fits |
466 | // in 8-bits, it can use a smaller encoding. |
467 | if (!isUInt<32>(AM.BaseOffs / 4)) |
468 | return false; |
469 | } else if (Subtarget->getGeneration() == SISubtarget::VOLCANIC_ISLANDS) { |
470 | // On VI, these use the SMEM format and the offset is 20-bit in bytes. |
471 | if (!isUInt<20>(AM.BaseOffs)) |
472 | return false; |
473 | } else |
474 | llvm_unreachable("unhandled generation")::llvm::llvm_unreachable_internal("unhandled generation", "/tmp/buildd/llvm-toolchain-snapshot-4.0~svn290870/lib/Target/AMDGPU/SIISelLowering.cpp" , 474); |
475 | |
476 | if (AM.Scale == 0) // r + i or just i, depending on HasBaseReg. |
477 | return true; |
478 | |
479 | if (AM.Scale == 1 && AM.HasBaseReg) |
480 | return true; |
481 | |
482 | return false; |
483 | } |
484 | |
485 | case AMDGPUAS::PRIVATE_ADDRESS: |
486 | return isLegalMUBUFAddressingMode(AM); |
487 | |
488 | case AMDGPUAS::LOCAL_ADDRESS: |
489 | case AMDGPUAS::REGION_ADDRESS: { |
490 | // Basic, single offset DS instructions allow a 16-bit unsigned immediate |
491 | // field. |
492 | // XXX - If doing a 4-byte aligned 8-byte type access, we effectively have |
493 | // an 8-bit dword offset but we don't know the alignment here. |
494 | if (!isUInt<16>(AM.BaseOffs)) |
495 | return false; |
496 | |
497 | if (AM.Scale == 0) // r + i or just i, depending on HasBaseReg. |
498 | return true; |
499 | |
500 | if (AM.Scale == 1 && AM.HasBaseReg) |
501 | return true; |
502 | |
503 | return false; |
504 | } |
505 | case AMDGPUAS::FLAT_ADDRESS: |
506 | case AMDGPUAS::UNKNOWN_ADDRESS_SPACE: |
507 | // For an unknown address space, this usually means that this is for some |
508 | // reason being used for pure arithmetic, and not based on some addressing |
509 | // computation. We don't have instructions that compute pointers with any |
510 | // addressing modes, so treat them as having no offset like flat |
511 | // instructions. |
512 | return isLegalFlatAddressingMode(AM); |
513 | |
514 | default: |
515 | llvm_unreachable("unhandled address space")::llvm::llvm_unreachable_internal("unhandled address space", "/tmp/buildd/llvm-toolchain-snapshot-4.0~svn290870/lib/Target/AMDGPU/SIISelLowering.cpp" , 515); |
516 | } |
517 | } |
518 | |
519 | bool SITargetLowering::allowsMisalignedMemoryAccesses(EVT VT, |
520 | unsigned AddrSpace, |
521 | unsigned Align, |
522 | bool *IsFast) const { |
523 | if (IsFast) |
524 | *IsFast = false; |
525 | |
526 | // TODO: I think v3i32 should allow unaligned accesses on CI with DS_READ_B96, |
527 | // which isn't a simple VT. |
528 | // Until MVT is extended to handle this, simply check for the size and |
529 | // rely on the condition below: allow accesses if the size is a multiple of 4. |
530 | if (VT == MVT::Other || (VT != MVT::Other && VT.getSizeInBits() > 1024 && |
531 | VT.getStoreSize() > 16)) { |
532 | return false; |
533 | } |
534 | |
535 | if (AddrSpace == AMDGPUAS::LOCAL_ADDRESS || |
536 | AddrSpace == AMDGPUAS::REGION_ADDRESS) { |
537 | // ds_read/write_b64 require 8-byte alignment, but we can do a 4 byte |
538 | // aligned, 8 byte access in a single operation using ds_read2/write2_b32 |
539 | // with adjacent offsets. |
540 | bool AlignedBy4 = (Align % 4 == 0); |
541 | if (IsFast) |
542 | *IsFast = AlignedBy4; |
543 | |
544 | return AlignedBy4; |
545 | } |
546 | |
547 | // FIXME: We have to be conservative here and assume that flat operations |
548 | // will access scratch. If we had access to the IR function, then we |
549 | // could determine if any private memory was used in the function. |
550 | if (!Subtarget->hasUnalignedScratchAccess() && |
551 | (AddrSpace == AMDGPUAS::PRIVATE_ADDRESS || |
552 | AddrSpace == AMDGPUAS::FLAT_ADDRESS)) { |
553 | return false; |
554 | } |
555 | |
556 | if (Subtarget->hasUnalignedBufferAccess()) { |
557 | // If we have an uniform constant load, it still requires using a slow |
558 | // buffer instruction if unaligned. |
559 | if (IsFast) { |
560 | *IsFast = (AddrSpace == AMDGPUAS::CONSTANT_ADDRESS) ? |
561 | (Align % 4 == 0) : true; |
562 | } |
563 | |
564 | return true; |
565 | } |
566 | |
567 | // Smaller than dword value must be aligned. |
568 | if (VT.bitsLT(MVT::i32)) |
569 | return false; |
570 | |
571 | // 8.1.6 - For Dword or larger reads or writes, the two LSBs of the |
572 | // byte-address are ignored, thus forcing Dword alignment. |
573 | // This applies to private, global, and constant memory. |
574 | if (IsFast) |
575 | *IsFast = true; |
576 | |
577 | return VT.bitsGT(MVT::i32) && Align % 4 == 0; |
578 | } |
579 | |
580 | EVT SITargetLowering::getOptimalMemOpType(uint64_t Size, unsigned DstAlign, |
581 | unsigned SrcAlign, bool IsMemset, |
582 | bool ZeroMemset, |
583 | bool MemcpyStrSrc, |
584 | MachineFunction &MF) const { |
585 | // FIXME: Should account for address space here. |
586 | |
587 | // The default fallback uses the private pointer size as a guess for a type to |
588 | // use. Make sure we switch these to 64-bit accesses. |
589 | |
590 | if (Size >= 16 && DstAlign >= 4) // XXX: Should only do for global |
591 | return MVT::v4i32; |
592 | |
593 | if (Size >= 8 && DstAlign >= 4) |
594 | return MVT::v2i32; |
595 | |
596 | // Use the default. |
597 | return MVT::Other; |
598 | } |
599 | |
600 | static bool isFlatGlobalAddrSpace(unsigned AS) { |
601 | return AS == AMDGPUAS::GLOBAL_ADDRESS || |
602 | AS == AMDGPUAS::FLAT_ADDRESS || |
603 | AS == AMDGPUAS::CONSTANT_ADDRESS; |
604 | } |
605 | |
606 | bool SITargetLowering::isNoopAddrSpaceCast(unsigned SrcAS, |
607 | unsigned DestAS) const { |
608 | return isFlatGlobalAddrSpace(SrcAS) && isFlatGlobalAddrSpace(DestAS); |
609 | } |
610 | |
611 | bool SITargetLowering::isMemOpHasNoClobberedMemOperand(const SDNode *N) const { |
612 | const MemSDNode *MemNode = cast<MemSDNode>(N); |
613 | const Value *Ptr = MemNode->getMemOperand()->getValue(); |
614 | const Instruction *I = dyn_cast<Instruction>(Ptr); |
615 | return I && I->getMetadata("amdgpu.noclobber"); |
616 | } |
617 | |
618 | bool SITargetLowering::isCheapAddrSpaceCast(unsigned SrcAS, |
619 | unsigned DestAS) const { |
620 | // Flat -> private/local is a simple truncate. |
621 | // Flat -> global is no-op |
622 | if (SrcAS == AMDGPUAS::FLAT_ADDRESS) |
623 | return true; |
624 | |
625 | return isNoopAddrSpaceCast(SrcAS, DestAS); |
626 | } |
627 | |
628 | bool SITargetLowering::isMemOpUniform(const SDNode *N) const { |
629 | const MemSDNode *MemNode = cast<MemSDNode>(N); |
630 | const Value *Ptr = MemNode->getMemOperand()->getValue(); |
631 | |
632 | // UndefValue means this is a load of a kernel input. These are uniform. |
633 | // Sometimes LDS instructions have constant pointers. |
634 | // If Ptr is null, then that means this mem operand contains a |
635 | // PseudoSourceValue like GOT. |
636 | if (!Ptr || isa<UndefValue>(Ptr) || isa<Argument>(Ptr) || |
637 | isa<Constant>(Ptr) || isa<GlobalValue>(Ptr)) |
638 | return true; |
639 | |
640 | const Instruction *I = dyn_cast<Instruction>(Ptr); |
641 | return I && I->getMetadata("amdgpu.uniform"); |
642 | } |
643 | |
644 | TargetLoweringBase::LegalizeTypeAction |
645 | SITargetLowering::getPreferredVectorAction(EVT VT) const { |
646 | if (VT.getVectorNumElements() != 1 && VT.getScalarType().bitsLE(MVT::i16)) |
647 | return TypeSplitVector; |
648 | |
649 | return TargetLoweringBase::getPreferredVectorAction(VT); |
650 | } |
651 | |
652 | bool SITargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm, |
653 | Type *Ty) const { |
654 | // FIXME: Could be smarter if called for vector constants. |
655 | return true; |
656 | } |
657 | |
658 | bool SITargetLowering::isTypeDesirableForOp(unsigned Op, EVT VT) const { |
659 | if (Subtarget->has16BitInsts() && VT == MVT::i16) { |
660 | switch (Op) { |
661 | case ISD::LOAD: |
662 | case ISD::STORE: |
663 | |
664 | // These operations are done with 32-bit instructions anyway. |
665 | case ISD::AND: |
666 | case ISD::OR: |
667 | case ISD::XOR: |
668 | case ISD::SELECT: |
669 | // TODO: Extensions? |
670 | return true; |
671 | default: |
672 | return false; |
673 | } |
674 | } |
675 | |
676 | // SimplifySetCC uses this function to determine whether or not it should |
677 | // create setcc with i1 operands. We don't have instructions for i1 setcc. |
678 | if (VT == MVT::i1 && Op == ISD::SETCC) |
679 | return false; |
680 | |
681 | return TargetLowering::isTypeDesirableForOp(Op, VT); |
682 | } |
683 | |
684 | SDValue SITargetLowering::LowerParameterPtr(SelectionDAG &DAG, |
685 | const SDLoc &SL, SDValue Chain, |
686 | unsigned Offset) const { |
687 | const DataLayout &DL = DAG.getDataLayout(); |
688 | MachineFunction &MF = DAG.getMachineFunction(); |
689 | const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo(); |
690 | unsigned InputPtrReg = TRI->getPreloadedValue(MF, SIRegisterInfo::KERNARG_SEGMENT_PTR); |
691 | |
692 | MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo(); |
693 | MVT PtrVT = getPointerTy(DL, AMDGPUAS::CONSTANT_ADDRESS); |
694 | SDValue BasePtr = DAG.getCopyFromReg(Chain, SL, |
695 | MRI.getLiveInVirtReg(InputPtrReg), PtrVT); |
696 | return DAG.getNode(ISD::ADD, SL, PtrVT, BasePtr, |
697 | DAG.getConstant(Offset, SL, PtrVT)); |
698 | } |
699 | |
700 | SDValue SITargetLowering::LowerParameter(SelectionDAG &DAG, EVT VT, EVT MemVT, |
701 | const SDLoc &SL, SDValue Chain, |
702 | unsigned Offset, bool Signed) const { |
703 | const DataLayout &DL = DAG.getDataLayout(); |
704 | Type *Ty = MemVT.getTypeForEVT(*DAG.getContext()); |
705 | PointerType *PtrTy = PointerType::get(Ty, AMDGPUAS::CONSTANT_ADDRESS); |
706 | MachinePointerInfo PtrInfo(UndefValue::get(PtrTy)); |
707 | |
708 | unsigned Align = DL.getABITypeAlignment(Ty); |
709 | |
710 | SDValue Ptr = LowerParameterPtr(DAG, SL, Chain, Offset); |
711 | SDValue Load = DAG.getLoad(MemVT, SL, Chain, Ptr, PtrInfo, Align, |
712 | MachineMemOperand::MONonTemporal | |
713 | MachineMemOperand::MODereferenceable | |
714 | MachineMemOperand::MOInvariant); |
715 | |
716 | SDValue Val; |
717 | if (MemVT.isFloatingPoint()) |
718 | Val = getFPExtOrFPTrunc(DAG, Load, SL, VT); |
719 | else if (Signed) |
720 | Val = DAG.getSExtOrTrunc(Load, SL, VT); |
721 | else |
722 | Val = DAG.getZExtOrTrunc(Load, SL, VT); |
723 | |
724 | SDValue Ops[] = { |
725 | Val, |
726 | Load.getValue(1) |
727 | }; |
728 | |
729 | return DAG.getMergeValues(Ops, SL); |
730 | } |
731 | |
732 | SDValue SITargetLowering::LowerFormalArguments( |
733 | SDValue Chain, CallingConv::ID CallConv, bool isVarArg, |
734 | const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL, |
735 | SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { |
736 | const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo(); |
737 | |
738 | MachineFunction &MF = DAG.getMachineFunction(); |
739 | FunctionType *FType = MF.getFunction()->getFunctionType(); |
740 | SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); |
741 | const SISubtarget &ST = MF.getSubtarget<SISubtarget>(); |
742 | |
743 | if (Subtarget->isAmdHsaOS() && AMDGPU::isShader(CallConv)) { |
744 | const Function *Fn = MF.getFunction(); |
745 | DiagnosticInfoUnsupported NoGraphicsHSA( |
746 | *Fn, "unsupported non-compute shaders with HSA", DL.getDebugLoc()); |
747 | DAG.getContext()->diagnose(NoGraphicsHSA); |
748 | return DAG.getEntryNode(); |
749 | } |
750 | |
751 | // Create stack objects that are used for emitting debugger prologue if |
752 | // "amdgpu-debugger-emit-prologue" attribute was specified. |
753 | if (ST.debuggerEmitPrologue()) |
754 | createDebuggerPrologueStackObjects(MF); |
755 | |
756 | SmallVector<ISD::InputArg, 16> Splits; |
757 | BitVector Skipped(Ins.size()); |
758 | |
759 | for (unsigned i = 0, e = Ins.size(), PSInputNum = 0; i != e; ++i) { |
760 | const ISD::InputArg &Arg = Ins[i]; |
761 | |
762 | // First check if it's a PS input addr |
763 | if (CallConv == CallingConv::AMDGPU_PS && !Arg.Flags.isInReg() && |
764 | !Arg.Flags.isByVal() && PSInputNum <= 15) { |
765 | |
766 | if (!Arg.Used && !Info->isPSInputAllocated(PSInputNum)) { |
767 | // We can safely skip PS inputs |
768 | Skipped.set(i); |
769 | ++PSInputNum; |
770 | continue; |
771 | } |
772 | |
773 | Info->markPSInputAllocated(PSInputNum); |
774 | if (Arg.Used) |
775 | Info->PSInputEna |= 1 << PSInputNum; |
776 | |
777 | ++PSInputNum; |
778 | } |
779 | |
780 | if (AMDGPU::isShader(CallConv)) { |
781 | // Second split vertices into their elements |
782 | if (Arg.VT.isVector()) { |
783 | ISD::InputArg NewArg = Arg; |
784 | NewArg.Flags.setSplit(); |
785 | NewArg.VT = Arg.VT.getVectorElementType(); |
786 | |
787 | // We REALLY want the ORIGINAL number of vertex elements here, e.g. a |
788 | // three or five element vertex only needs three or five registers, |
789 | // NOT four or eight. |
790 | Type *ParamType = FType->getParamType(Arg.getOrigArgIndex()); |
791 | unsigned NumElements = ParamType->getVectorNumElements(); |
792 | |
793 | for (unsigned j = 0; j != NumElements; ++j) { |
794 | Splits.push_back(NewArg); |
795 | NewArg.PartOffset += NewArg.VT.getStoreSize(); |
796 | } |
797 | } else { |
798 | Splits.push_back(Arg); |
799 | } |
800 | } |
801 | } |
802 | |
803 | SmallVector<CCValAssign, 16> ArgLocs; |
804 | CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, |
805 | *DAG.getContext()); |
806 | |
807 | // At least one interpolation mode must be enabled or else the GPU will hang. |
808 | // |
809 | // Check PSInputAddr instead of PSInputEna. The idea is that if the user set |
810 | // PSInputAddr, the user wants to enable some bits after the compilation |
811 | // based on run-time states. Since we can't know what the final PSInputEna |
812 | // will look like, so we shouldn't do anything here and the user should take |
813 | // responsibility for the correct programming. |
814 | // |
815 | // Otherwise, the following restrictions apply: |
816 | // - At least one of PERSP_* (0xF) or LINEAR_* (0x70) must be enabled. |
817 | // - If POS_W_FLOAT (11) is enabled, at least one of PERSP_* must be |
818 | // enabled too. |
819 | if (CallConv == CallingConv::AMDGPU_PS && |
820 | ((Info->getPSInputAddr() & 0x7F) == 0 || |
821 | ((Info->getPSInputAddr() & 0xF) == 0 && Info->isPSInputAllocated(11)))) { |
822 | CCInfo.AllocateReg(AMDGPU::VGPR0); |
823 | CCInfo.AllocateReg(AMDGPU::VGPR1); |
824 | Info->markPSInputAllocated(0); |
825 | Info->PSInputEna |= 1; |
826 | } |
827 | |
828 | if (!AMDGPU::isShader(CallConv)) { |
829 | assert(Info->hasWorkGroupIDX() && Info->hasWorkItemIDX())((Info->hasWorkGroupIDX() && Info->hasWorkItemIDX ()) ? static_cast<void> (0) : __assert_fail ("Info->hasWorkGroupIDX() && Info->hasWorkItemIDX()" , "/tmp/buildd/llvm-toolchain-snapshot-4.0~svn290870/lib/Target/AMDGPU/SIISelLowering.cpp" , 829, __PRETTY_FUNCTION__)); |
830 | } else { |
831 | assert(!Info->hasPrivateSegmentBuffer() && !Info->hasDispatchPtr() &&((!Info->hasPrivateSegmentBuffer() && !Info->hasDispatchPtr () && !Info->hasKernargSegmentPtr() && !Info ->hasFlatScratchInit() && !Info->hasWorkGroupIDX () && !Info->hasWorkGroupIDY() && !Info-> hasWorkGroupIDZ() && !Info->hasWorkGroupInfo() && !Info->hasWorkItemIDX() && !Info->hasWorkItemIDY () && !Info->hasWorkItemIDZ()) ? static_cast<void > (0) : __assert_fail ("!Info->hasPrivateSegmentBuffer() && !Info->hasDispatchPtr() && !Info->hasKernargSegmentPtr() && !Info->hasFlatScratchInit() && !Info->hasWorkGroupIDX() && !Info->hasWorkGroupIDY() && !Info->hasWorkGroupIDZ() && !Info->hasWorkGroupInfo() && !Info->hasWorkItemIDX() && !Info->hasWorkItemIDY() && !Info->hasWorkItemIDZ()" , "/tmp/buildd/llvm-toolchain-snapshot-4.0~svn290870/lib/Target/AMDGPU/SIISelLowering.cpp" , 836, __PRETTY_FUNCTION__)) |
832 | !Info->hasKernargSegmentPtr() && !Info->hasFlatScratchInit() &&((!Info->hasPrivateSegmentBuffer() && !Info->hasDispatchPtr () && !Info->hasKernargSegmentPtr() && !Info ->hasFlatScratchInit() && !Info->hasWorkGroupIDX () && !Info->hasWorkGroupIDY() && !Info-> hasWorkGroupIDZ() && !Info->hasWorkGroupInfo() && !Info->hasWorkItemIDX() && !Info->hasWorkItemIDY () && !Info->hasWorkItemIDZ()) ? static_cast<void > (0) : __assert_fail ("!Info->hasPrivateSegmentBuffer() && !Info->hasDispatchPtr() && !Info->hasKernargSegmentPtr() && !Info->hasFlatScratchInit() && !Info->hasWorkGroupIDX() && !Info->hasWorkGroupIDY() && !Info->hasWorkGroupIDZ() && !Info->hasWorkGroupInfo() && !Info->hasWorkItemIDX() && !Info->hasWorkItemIDY() && !Info->hasWorkItemIDZ()" , "/tmp/buildd/llvm-toolchain-snapshot-4.0~svn290870/lib/Target/AMDGPU/SIISelLowering.cpp" , 836, __PRETTY_FUNCTION__)) |
833 | !Info->hasWorkGroupIDX() && !Info->hasWorkGroupIDY() &&((!Info->hasPrivateSegmentBuffer() && !Info->hasDispatchPtr () && !Info->hasKernargSegmentPtr() && !Info ->hasFlatScratchInit() && !Info->hasWorkGroupIDX () && !Info->hasWorkGroupIDY() && !Info-> hasWorkGroupIDZ() && !Info->hasWorkGroupInfo() && !Info->hasWorkItemIDX() && !Info->hasWorkItemIDY () && !Info->hasWorkItemIDZ()) ? static_cast<void > (0) : __assert_fail ("!Info->hasPrivateSegmentBuffer() && !Info->hasDispatchPtr() && !Info->hasKernargSegmentPtr() && !Info->hasFlatScratchInit() && !Info->hasWorkGroupIDX() && !Info->hasWorkGroupIDY() && !Info->hasWorkGroupIDZ() && !Info->hasWorkGroupInfo() && !Info->hasWorkItemIDX() && !Info->hasWorkItemIDY() && !Info->hasWorkItemIDZ()" , "/tmp/buildd/llvm-toolchain-snapshot-4.0~svn290870/lib/Target/AMDGPU/SIISelLowering.cpp" , 836, __PRETTY_FUNCTION__)) |
834 | !Info->hasWorkGroupIDZ() && !Info->hasWorkGroupInfo() &&((!Info->hasPrivateSegmentBuffer() && !Info->hasDispatchPtr () && !Info->hasKernargSegmentPtr() && !Info ->hasFlatScratchInit() && !Info->hasWorkGroupIDX () && !Info->hasWorkGroupIDY() && !Info-> hasWorkGroupIDZ() && !Info->hasWorkGroupInfo() && !Info->hasWorkItemIDX() && !Info->hasWorkItemIDY () && !Info->hasWorkItemIDZ()) ? static_cast<void > (0) : __assert_fail ("!Info->hasPrivateSegmentBuffer() && !Info->hasDispatchPtr() && !Info->hasKernargSegmentPtr() && !Info->hasFlatScratchInit() && !Info->hasWorkGroupIDX() && !Info->hasWorkGroupIDY() && !Info->hasWorkGroupIDZ() && !Info->hasWorkGroupInfo() && !Info->hasWorkItemIDX() && !Info->hasWorkItemIDY() && !Info->hasWorkItemIDZ()" , "/tmp/buildd/llvm-toolchain-snapshot-4.0~svn290870/lib/Target/AMDGPU/SIISelLowering.cpp" , 836, __PRETTY_FUNCTION__)) |
835 | !Info->hasWorkItemIDX() && !Info->hasWorkItemIDY() &&((!Info->hasPrivateSegmentBuffer() && !Info->hasDispatchPtr () && !Info->hasKernargSegmentPtr() && !Info ->hasFlatScratchInit() && !Info->hasWorkGroupIDX () && !Info->hasWorkGroupIDY() && !Info-> hasWorkGroupIDZ() && !Info->hasWorkGroupInfo() && !Info->hasWorkItemIDX() && !Info->hasWorkItemIDY () && !Info->hasWorkItemIDZ()) ? static_cast<void > (0) : __assert_fail ("!Info->hasPrivateSegmentBuffer() && !Info->hasDispatchPtr() && !Info->hasKernargSegmentPtr() && !Info->hasFlatScratchInit() && !Info->hasWorkGroupIDX() && !Info->hasWorkGroupIDY() && !Info->hasWorkGroupIDZ() && !Info->hasWorkGroupInfo() && !Info->hasWorkItemIDX() && !Info->hasWorkItemIDY() && !Info->hasWorkItemIDZ()" , "/tmp/buildd/llvm-toolchain-snapshot-4.0~svn290870/lib/Target/AMDGPU/SIISelLowering.cpp" , 836, __PRETTY_FUNCTION__)) |
836 | !Info->hasWorkItemIDZ())((!Info->hasPrivateSegmentBuffer() && !Info->hasDispatchPtr () && !Info->hasKernargSegmentPtr() && !Info ->hasFlatScratchInit() && !Info->hasWorkGroupIDX () && !Info->hasWorkGroupIDY() && !Info-> hasWorkGroupIDZ() && !Info->hasWorkGroupInfo() && !Info->hasWorkItemIDX() && !Info->hasWorkItemIDY () && !Info->hasWorkItemIDZ()) ? static_cast<void > (0) : __assert_fail ("!Info->hasPrivateSegmentBuffer() && !Info->hasDispatchPtr() && !Info->hasKernargSegmentPtr() && !Info->hasFlatScratchInit() && !Info->hasWorkGroupIDX() && !Info->hasWorkGroupIDY() && !Info->hasWorkGroupIDZ() && !Info->hasWorkGroupInfo() && !Info->hasWorkItemIDX() && !Info->hasWorkItemIDY() && !Info->hasWorkItemIDZ()" , "/tmp/buildd/llvm-toolchain-snapshot-4.0~svn290870/lib/Target/AMDGPU/SIISelLowering.cpp" , 836, __PRETTY_FUNCTION__)); |
837 | } |
838 | |
839 | // FIXME: How should these inputs interact with inreg / custom SGPR inputs? |
840 | if (Info->hasPrivateSegmentBuffer()) { |
841 | unsigned PrivateSegmentBufferReg = Info->addPrivateSegmentBuffer(*TRI); |
842 | MF.addLiveIn(PrivateSegmentBufferReg, &AMDGPU::SReg_128RegClass); |
843 | CCInfo.AllocateReg(PrivateSegmentBufferReg); |
844 | } |
845 | |
846 | if (Info->hasDispatchPtr()) { |
847 | unsigned DispatchPtrReg = Info->addDispatchPtr(*TRI); |
848 | MF.addLiveIn(DispatchPtrReg, &AMDGPU::SGPR_64RegClass); |
849 | CCInfo.AllocateReg(DispatchPtrReg); |
850 | } |
851 | |
852 | if (Info->hasQueuePtr()) { |
853 | unsigned QueuePtrReg = Info->addQueuePtr(*TRI); |
854 | MF.addLiveIn(QueuePtrReg, &AMDGPU::SGPR_64RegClass); |
855 | CCInfo.AllocateReg(QueuePtrReg); |
856 | } |
857 | |
858 | if (Info->hasKernargSegmentPtr()) { |
859 | unsigned InputPtrReg = Info->addKernargSegmentPtr(*TRI); |
860 | MF.addLiveIn(InputPtrReg, &AMDGPU::SGPR_64RegClass); |
861 | CCInfo.AllocateReg(InputPtrReg); |
862 | } |
863 | |
864 | if (Info->hasDispatchID()) { |
865 | unsigned DispatchIDReg = Info->addDispatchID(*TRI); |
866 | MF.addLiveIn(DispatchIDReg, &AMDGPU::SGPR_64RegClass); |
867 | CCInfo.AllocateReg(DispatchIDReg); |
868 | } |
869 | |
870 | if (Info->hasFlatScratchInit()) { |
871 | unsigned FlatScratchInitReg = Info->addFlatScratchInit(*TRI); |
872 | MF.addLiveIn(FlatScratchInitReg, &AMDGPU::SGPR_64RegClass); |
873 | CCInfo.AllocateReg(FlatScratchInitReg); |
874 | } |
875 | |
876 | if (!AMDGPU::isShader(CallConv)) |
877 | analyzeFormalArgumentsCompute(CCInfo, Ins); |
878 | else |
879 | AnalyzeFormalArguments(CCInfo, Splits); |
880 | |
881 | SmallVector<SDValue, 16> Chains; |
882 | |
883 | for (unsigned i = 0, e = Ins.size(), ArgIdx = 0; i != e; ++i) { |
884 | |
885 | const ISD::InputArg &Arg = Ins[i]; |
886 | if (Skipped[i]) { |
887 | InVals.push_back(DAG.getUNDEF(Arg.VT)); |
888 | continue; |
889 | } |
890 | |
891 | CCValAssign &VA = ArgLocs[ArgIdx++]; |
892 | MVT VT = VA.getLocVT(); |
893 | |
894 | if (VA.isMemLoc()) { |
895 | VT = Ins[i].VT; |
896 | EVT MemVT = VA.getLocVT(); |
897 | const unsigned Offset = Subtarget->getExplicitKernelArgOffset() + |
898 | VA.getLocMemOffset(); |
899 | // The first 36 bytes of the input buffer contains information about |
900 | // thread group and global sizes. |
901 | SDValue Arg = LowerParameter(DAG, VT, MemVT, DL, Chain, |
902 | Offset, Ins[i].Flags.isSExt()); |
903 | Chains.push_back(Arg.getValue(1)); |
904 | |
905 | auto *ParamTy = |
906 | dyn_cast<PointerType>(FType->getParamType(Ins[i].getOrigArgIndex())); |
907 | if (Subtarget->getGeneration() == SISubtarget::SOUTHERN_ISLANDS && |
908 | ParamTy && ParamTy->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS) { |
909 | // On SI local pointers are just offsets into LDS, so they are always |
910 | // less than 16-bits. On CI and newer they could potentially be |
911 | // real pointers, so we can't guarantee their size. |
912 | Arg = DAG.getNode(ISD::AssertZext, DL, Arg.getValueType(), Arg, |
913 | DAG.getValueType(MVT::i16)); |
914 | } |
915 | |
916 | InVals.push_back(Arg); |
917 | Info->setABIArgOffset(Offset + MemVT.getStoreSize()); |
918 | continue; |
919 | } |
920 | assert(VA.isRegLoc() && "Parameter must be in a register!")((VA.isRegLoc() && "Parameter must be in a register!" ) ? static_cast<void> (0) : __assert_fail ("VA.isRegLoc() && \"Parameter must be in a register!\"" , "/tmp/buildd/llvm-toolchain-snapshot-4.0~svn290870/lib/Target/AMDGPU/SIISelLowering.cpp" , 920, __PRETTY_FUNCTION__)); |
921 | |
922 | unsigned Reg = VA.getLocReg(); |
923 | |
924 | if (VT == MVT::i64) { |
925 | // For now assume it is a pointer |
926 | Reg = TRI->getMatchingSuperReg(Reg, AMDGPU::sub0, |
927 | &AMDGPU::SGPR_64RegClass); |
928 | Reg = MF.addLiveIn(Reg, &AMDGPU::SGPR_64RegClass); |
929 | SDValue Copy = DAG.getCopyFromReg(Chain, DL, Reg, VT); |
930 | InVals.push_back(Copy); |
931 | continue; |
932 | } |
933 | |
934 | const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg, VT); |
935 | |
936 | Reg = MF.addLiveIn(Reg, RC); |
937 | SDValue Val = DAG.getCopyFromReg(Chain, DL, Reg, VT); |
938 | |
939 | if (Arg.VT.isVector()) { |
940 | |
941 | // Build a vector from the registers |
942 | Type *ParamType = FType->getParamType(Arg.getOrigArgIndex()); |
943 | unsigned NumElements = ParamType->getVectorNumElements(); |
944 | |
945 | SmallVector<SDValue, 4> Regs; |
946 | Regs.push_back(Val); |
947 | for (unsigned j = 1; j != NumElements; ++j) { |
948 | Reg = ArgLocs[ArgIdx++].getLocReg(); |
949 | Reg = MF.addLiveIn(Reg, RC); |
950 | |
951 | SDValue Copy = DAG.getCopyFromReg(Chain, DL, Reg, VT); |
952 | Regs.push_back(Copy); |
953 | } |
954 | |
955 | // Fill up the missing vector elements |
956 | NumElements = Arg.VT.getVectorNumElements() - NumElements; |
957 | Regs.append(NumElements, DAG.getUNDEF(VT)); |
958 | |
959 | InVals.push_back(DAG.getBuildVector(Arg.VT, DL, Regs)); |
960 | continue; |
961 | } |
962 | |
963 | InVals.push_back(Val); |
964 | } |
965 | |
966 | // TODO: Add GridWorkGroupCount user SGPRs when used. For now with HSA we read |
967 | // these from the dispatch pointer. |
968 | |
969 | // Start adding system SGPRs. |
970 | if (Info->hasWorkGroupIDX()) { |
971 | unsigned Reg = Info->addWorkGroupIDX(); |
972 | MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass); |
973 | CCInfo.AllocateReg(Reg); |
974 | } |
975 | |
976 | if (Info->hasWorkGroupIDY()) { |
977 | unsigned Reg = Info->addWorkGroupIDY(); |
978 | MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass); |
979 | CCInfo.AllocateReg(Reg); |
980 | } |
981 | |
982 | if (Info->hasWorkGroupIDZ()) { |
983 | unsigned Reg = Info->addWorkGroupIDZ(); |
984 | MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass); |
985 | CCInfo.AllocateReg(Reg); |
986 | } |
987 | |
988 | if (Info->hasWorkGroupInfo()) { |
989 | unsigned Reg = Info->addWorkGroupInfo(); |
990 | MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass); |
991 | CCInfo.AllocateReg(Reg); |
992 | } |
993 | |
994 | if (Info->hasPrivateSegmentWaveByteOffset()) { |
995 | // Scratch wave offset passed in system SGPR. |
996 | unsigned PrivateSegmentWaveByteOffsetReg; |
997 | |
998 | if (AMDGPU::isShader(CallConv)) { |
999 | PrivateSegmentWaveByteOffsetReg = findFirstFreeSGPR(CCInfo); |
1000 | Info->setPrivateSegmentWaveByteOffset(PrivateSegmentWaveByteOffsetReg); |
1001 | } else |
1002 | PrivateSegmentWaveByteOffsetReg = Info->addPrivateSegmentWaveByteOffset(); |
1003 | |
1004 | MF.addLiveIn(PrivateSegmentWaveByteOffsetReg, &AMDGPU::SGPR_32RegClass); |
1005 | CCInfo.AllocateReg(PrivateSegmentWaveByteOffsetReg); |
1006 | } |
1007 | |
1008 | // Now that we've figured out where the scratch register inputs are, see if |
1009 | // should reserve the arguments and use them directly. |
1010 | bool HasStackObjects = MF.getFrameInfo().hasStackObjects(); |
1011 | // Record that we know we have non-spill stack objects so we don't need to |
1012 | // check all stack objects later. |
1013 | if (HasStackObjects) |
1014 | Info->setHasNonSpillStackObjects(true); |
1015 | |
1016 | // Everything live out of a block is spilled with fast regalloc, so it's |
1017 | // almost certain that spilling will be required. |
1018 | if (getTargetMachine().getOptLevel() == CodeGenOpt::None) |
1019 | HasStackObjects = true; |
1020 | |
1021 | if (ST.isAmdCodeObjectV2()) { |
1022 | if (HasStackObjects) { |
1023 | // If we have stack objects, we unquestionably need the private buffer |
1024 | // resource. For the Code Object V2 ABI, this will be the first 4 user |
1025 | // SGPR inputs. We can reserve those and use them directly. |
1026 | |
1027 | unsigned PrivateSegmentBufferReg = TRI->getPreloadedValue( |
1028 | MF, SIRegisterInfo::PRIVATE_SEGMENT_BUFFER); |
1029 | Info->setScratchRSrcReg(PrivateSegmentBufferReg); |
1030 | |
1031 | unsigned PrivateSegmentWaveByteOffsetReg = TRI->getPreloadedValue( |
1032 | MF, SIRegisterInfo::PRIVATE_SEGMENT_WAVE_BYTE_OFFSET); |
1033 | Info->setScratchWaveOffsetReg(PrivateSegmentWaveByteOffsetReg); |
1034 | } else { |
1035 | unsigned ReservedBufferReg |
1036 | = TRI->reservedPrivateSegmentBufferReg(MF); |
1037 | unsigned ReservedOffsetReg |
1038 | = TRI->reservedPrivateSegmentWaveByteOffsetReg(MF); |
1039 | |
1040 | // We tentatively reserve the last registers (skipping the last two |
1041 | // which may contain VCC). After register allocation, we'll replace |
1042 | // these with the ones immediately after those which were really |
1043 | // allocated. In the prologue copies will be inserted from the argument |
1044 | // to these reserved registers. |
1045 | Info->setScratchRSrcReg(ReservedBufferReg); |
1046 | Info->setScratchWaveOffsetReg(ReservedOffsetReg); |
1047 | } |
1048 | } else { |
1049 | unsigned ReservedBufferReg = TRI->reservedPrivateSegmentBufferReg(MF); |
1050 | |
1051 | // Without HSA, relocations are used for the scratch pointer and the |
1052 | // buffer resource setup is always inserted in the prologue. Scratch wave |
1053 | // offset is still in an input SGPR. |
1054 | Info->setScratchRSrcReg(ReservedBufferReg); |
1055 | |
1056 | if (HasStackObjects) { |
1057 | unsigned ScratchWaveOffsetReg = TRI->getPreloadedValue( |
1058 | MF, SIRegisterInfo::PRIVATE_SEGMENT_WAVE_BYTE_OFFSET); |
1059 | Info->setScratchWaveOffsetReg(ScratchWaveOffsetReg); |
1060 | } else { |
1061 | unsigned ReservedOffsetReg |
1062 | = TRI->reservedPrivateSegmentWaveByteOffsetReg(MF); |
1063 | Info->setScratchWaveOffsetReg(ReservedOffsetReg); |
1064 | } |
1065 | } |
1066 | |
1067 | if (Info->hasWorkItemIDX()) { |
1068 | unsigned Reg = TRI->getPreloadedValue(MF, SIRegisterInfo::WORKITEM_ID_X); |
1069 | MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass); |
1070 | CCInfo.AllocateReg(Reg); |
1071 | } |
1072 | |
1073 | if (Info->hasWorkItemIDY()) { |
1074 | unsigned Reg = TRI->getPreloadedValue(MF, SIRegisterInfo::WORKITEM_ID_Y); |
1075 | MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass); |
1076 | CCInfo.AllocateReg(Reg); |
1077 | } |
1078 | |
1079 | if (Info->hasWorkItemIDZ()) { |
1080 | unsigned Reg = TRI->getPreloadedValue(MF, SIRegisterInfo::WORKITEM_ID_Z); |
1081 | MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass); |
1082 | CCInfo.AllocateReg(Reg); |
1083 | } |
1084 | |
1085 | if (Chains.empty()) |
1086 | return Chain; |
1087 | |
1088 | return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains); |
1089 | } |
1090 | |
1091 | SDValue |
1092 | SITargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, |
1093 | bool isVarArg, |
1094 | const SmallVectorImpl<ISD::OutputArg> &Outs, |
1095 | const SmallVectorImpl<SDValue> &OutVals, |
1096 | const SDLoc &DL, SelectionDAG &DAG) const { |
1097 | MachineFunction &MF = DAG.getMachineFunction(); |
1098 | SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); |
1099 | |
1100 | if (!AMDGPU::isShader(CallConv)) |
1101 | return AMDGPUTargetLowering::LowerReturn(Chain, CallConv, isVarArg, Outs, |
1102 | OutVals, DL, DAG); |
1103 | |
1104 | Info->setIfReturnsVoid(Outs.size() == 0); |
1105 | |
1106 | SmallVector<ISD::OutputArg, 48> Splits; |
1107 | SmallVector<SDValue, 48> SplitVals; |
1108 | |
1109 | // Split vectors into their elements. |
1110 | for (unsigned i = 0, e = Outs.size(); i != e; ++i) { |
1111 | const ISD::OutputArg &Out = Outs[i]; |
1112 | |
1113 | if (Out.VT.isVector()) { |
1114 | MVT VT = Out.VT.getVectorElementType(); |
1115 | ISD::OutputArg NewOut = Out; |
1116 | NewOut.Flags.setSplit(); |
1117 | NewOut.VT = VT; |
1118 | |
1119 | // We want the original number of vector elements here, e.g. |
1120 | // three or five, not four or eight. |
1121 | unsigned NumElements = Out.ArgVT.getVectorNumElements(); |
1122 | |
1123 | for (unsigned j = 0; j != NumElements; ++j) { |
1124 | SDValue Elem = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, OutVals[i], |
1125 | DAG.getConstant(j, DL, MVT::i32)); |
1126 | SplitVals.push_back(Elem); |
1127 | Splits.push_back(NewOut); |
1128 | NewOut.PartOffset += NewOut.VT.getStoreSize(); |
1129 | } |
1130 | } else { |
1131 | SplitVals.push_back(OutVals[i]); |
1132 | Splits.push_back(Out); |
1133 | } |
1134 | } |
1135 | |
1136 | // CCValAssign - represent the assignment of the return value to a location. |
1137 | SmallVector<CCValAssign, 48> RVLocs; |
1138 | |
1139 | // CCState - Info about the registers and stack slots. |
1140 | CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, |
1141 | *DAG.getContext()); |
1142 | |
1143 | // Analyze outgoing return values. |
1144 | AnalyzeReturn(CCInfo, Splits); |
1145 | |
1146 | SDValue Flag; |
1147 | SmallVector<SDValue, 48> RetOps; |
1148 | RetOps.push_back(Chain); // Operand #0 = Chain (updated below) |
1149 | |
1150 | // Copy the result values into the output registers. |
1151 | for (unsigned i = 0, realRVLocIdx = 0; |
1152 | i != RVLocs.size(); |
1153 | ++i, ++realRVLocIdx) { |
1154 | CCValAssign &VA = RVLocs[i]; |
1155 | assert(VA.isRegLoc() && "Can only return in registers!")((VA.isRegLoc() && "Can only return in registers!") ? static_cast<void> (0) : __assert_fail ("VA.isRegLoc() && \"Can only return in registers!\"" , "/tmp/buildd/llvm-toolchain-snapshot-4.0~svn290870/lib/Target/AMDGPU/SIISelLowering.cpp" , 1155, __PRETTY_FUNCTION__)); |
1156 | |
1157 | SDValue Arg = SplitVals[realRVLocIdx]; |
1158 | |
1159 | // Copied from other backends. |
1160 | switch (VA.getLocInfo()) { |
1161 | default: llvm_unreachable("Unknown loc info!")::llvm::llvm_unreachable_internal("Unknown loc info!", "/tmp/buildd/llvm-toolchain-snapshot-4.0~svn290870/lib/Target/AMDGPU/SIISelLowering.cpp" , 1161); |
1162 | case CCValAssign::Full: |
1163 | break; |
1164 | case CCValAssign::BCvt: |
1165 | Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg); |
1166 | break; |
1167 | } |
1168 | |
1169 | Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Arg, Flag); |
1170 | Flag = Chain.getValue(1); |
1171 | RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); |
1172 | } |
1173 | |
1174 | // Update chain and glue. |
1175 | RetOps[0] = Chain; |
1176 | if (Flag.getNode()) |
1177 | RetOps.push_back(Flag); |
1178 | |
1179 | unsigned Opc = Info->returnsVoid() ? AMDGPUISD::ENDPGM : AMDGPUISD::RETURN; |
1180 | return DAG.getNode(Opc, DL, MVT::Other, RetOps); |
1181 | } |
1182 | |
1183 | unsigned SITargetLowering::getRegisterByName(const char* RegName, EVT VT, |
1184 | SelectionDAG &DAG) const { |
1185 | unsigned Reg = StringSwitch<unsigned>(RegName) |
1186 | .Case("m0", AMDGPU::M0) |
1187 | .Case("exec", AMDGPU::EXEC) |
1188 | .Case("exec_lo", AMDGPU::EXEC_LO) |
1189 | .Case("exec_hi", AMDGPU::EXEC_HI) |
1190 | .Case("flat_scratch", AMDGPU::FLAT_SCR) |
1191 | .Case("flat_scratch_lo", AMDGPU::FLAT_SCR_LO) |
1192 | .Case("flat_scratch_hi", AMDGPU::FLAT_SCR_HI) |
1193 | .Default(AMDGPU::NoRegister); |
1194 | |
1195 | if (Reg == AMDGPU::NoRegister) { |
1196 | report_fatal_error(Twine("invalid register name \"" |
1197 | + StringRef(RegName) + "\".")); |
1198 | |
1199 | } |
1200 | |
1201 | if (Subtarget->getGeneration() == SISubtarget::SOUTHERN_ISLANDS && |
1202 | Subtarget->getRegisterInfo()->regsOverlap(Reg, AMDGPU::FLAT_SCR)) { |
1203 | report_fatal_error(Twine("invalid register \"" |
1204 | + StringRef(RegName) + "\" for subtarget.")); |
1205 | } |
1206 | |
1207 | switch (Reg) { |
1208 | case AMDGPU::M0: |
1209 | case AMDGPU::EXEC_LO: |
1210 | case AMDGPU::EXEC_HI: |
1211 | case AMDGPU::FLAT_SCR_LO: |
1212 | case AMDGPU::FLAT_SCR_HI: |
1213 | if (VT.getSizeInBits() == 32) |
1214 | return Reg; |
1215 | break; |
1216 | case AMDGPU::EXEC: |
1217 | case AMDGPU::FLAT_SCR: |
1218 | if (VT.getSizeInBits() == 64) |
1219 | return Reg; |
1220 | break; |
1221 | default: |
1222 | llvm_unreachable("missing register type checking")::llvm::llvm_unreachable_internal("missing register type checking" , "/tmp/buildd/llvm-toolchain-snapshot-4.0~svn290870/lib/Target/AMDGPU/SIISelLowering.cpp" , 1222); |
1223 | } |
1224 | |
1225 | report_fatal_error(Twine("invalid type for register \"" |
1226 | + StringRef(RegName) + "\".")); |
1227 | } |
1228 | |
1229 | // If kill is not the last instruction, split the block so kill is always a |
1230 | // proper terminator. |
1231 | MachineBasicBlock *SITargetLowering::splitKillBlock(MachineInstr &MI, |
1232 | MachineBasicBlock *BB) const { |
1233 | const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); |
1234 | |
1235 | MachineBasicBlock::iterator SplitPoint(&MI); |
1236 | ++SplitPoint; |
1237 | |
1238 | if (SplitPoint == BB->end()) { |
1239 | // Don't bother with a new block. |
1240 | MI.setDesc(TII->get(AMDGPU::SI_KILL_TERMINATOR)); |
1241 | return BB; |
1242 | } |
1243 | |
1244 | MachineFunction *MF = BB->getParent(); |
1245 | MachineBasicBlock *SplitBB |
1246 | = MF->CreateMachineBasicBlock(BB->getBasicBlock()); |
1247 | |
1248 | MF->insert(++MachineFunction::iterator(BB), SplitBB); |
1249 | SplitBB->splice(SplitBB->begin(), BB, SplitPoint, BB->end()); |
1250 | |
1251 | SplitBB->transferSuccessorsAndUpdatePHIs(BB); |
1252 | BB->addSuccessor(SplitBB); |
1253 | |
1254 | MI.setDesc(TII->get(AMDGPU::SI_KILL_TERMINATOR)); |
1255 | return SplitBB; |
1256 | } |
1257 | |
1258 | // Do a v_movrels_b32 or v_movreld_b32 for each unique value of \p IdxReg in the |
1259 | // wavefront. If the value is uniform and just happens to be in a VGPR, this |
1260 | // will only do one iteration. In the worst case, this will loop 64 times. |
1261 | // |
1262 | // TODO: Just use v_readlane_b32 if we know the VGPR has a uniform value. |
1263 | static MachineBasicBlock::iterator emitLoadM0FromVGPRLoop( |
1264 | const SIInstrInfo *TII, |
1265 | MachineRegisterInfo &MRI, |
1266 | MachineBasicBlock &OrigBB, |
1267 | MachineBasicBlock &LoopBB, |
1268 | const DebugLoc &DL, |
1269 | const MachineOperand &IdxReg, |
1270 | unsigned InitReg, |
1271 | unsigned ResultReg, |
1272 | unsigned PhiReg, |
1273 | unsigned InitSaveExecReg, |
1274 | int Offset, |
1275 | bool UseGPRIdxMode) { |
1276 | MachineBasicBlock::iterator I = LoopBB.begin(); |
1277 | |
1278 | unsigned PhiExec = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); |
1279 | unsigned NewExec = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); |
1280 | unsigned CurrentIdxReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); |
1281 | unsigned CondReg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); |
1282 | |
1283 | BuildMI(LoopBB, I, DL, TII->get(TargetOpcode::PHI), PhiReg) |
1284 | .addReg(InitReg) |
1285 | .addMBB(&OrigBB) |
1286 | .addReg(ResultReg) |
1287 | .addMBB(&LoopBB); |
1288 | |
1289 | BuildMI(LoopBB, I, DL, TII->get(TargetOpcode::PHI), PhiExec) |
1290 | .addReg(InitSaveExecReg) |
1291 | .addMBB(&OrigBB) |
1292 | .addReg(NewExec) |
1293 | .addMBB(&LoopBB); |
1294 | |
1295 | // Read the next variant <- also loop target. |
1296 | BuildMI(LoopBB, I, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), CurrentIdxReg) |
1297 | .addReg(IdxReg.getReg(), getUndefRegState(IdxReg.isUndef())); |
1298 | |
1299 | // Compare the just read M0 value to all possible Idx values. |
1300 | BuildMI(LoopBB, I, DL, TII->get(AMDGPU::V_CMP_EQ_U32_e64), CondReg) |
1301 | .addReg(CurrentIdxReg) |
1302 | .addReg(IdxReg.getReg(), 0, IdxReg.getSubReg()); |
1303 | |
1304 | if (UseGPRIdxMode) { |
1305 | unsigned IdxReg; |
1306 | if (Offset == 0) { |
1307 | IdxReg = CurrentIdxReg; |
1308 | } else { |
1309 | IdxReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); |
1310 | BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_ADD_I32), IdxReg) |
1311 | .addReg(CurrentIdxReg, RegState::Kill) |
1312 | .addImm(Offset); |
1313 | } |
1314 | |
1315 | MachineInstr *SetIdx = |
1316 | BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_IDX)) |
1317 | .addReg(IdxReg, RegState::Kill); |
1318 | SetIdx->getOperand(2).setIsUndef(); |
1319 | } else { |
1320 | // Move index from VCC into M0 |
1321 | if (Offset == 0) { |
1322 | BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0) |
1323 | .addReg(CurrentIdxReg, RegState::Kill); |
1324 | } else { |
1325 | BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0) |
1326 | .addReg(CurrentIdxReg, RegState::Kill) |
1327 | .addImm(Offset); |
1328 | } |
1329 | } |
1330 | |
1331 | // Update EXEC, save the original EXEC value to VCC. |
1332 | BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_AND_SAVEEXEC_B64), NewExec) |
1333 | .addReg(CondReg, RegState::Kill); |
1334 | |
1335 | MRI.setSimpleHint(NewExec, CondReg); |
1336 | |
1337 | // Update EXEC, switch all done bits to 0 and all todo bits to 1. |
1338 | MachineInstr *InsertPt = |
1339 | BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_XOR_B64), AMDGPU::EXEC) |
1340 | .addReg(AMDGPU::EXEC) |
1341 | .addReg(NewExec); |
1342 | |
1343 | // XXX - s_xor_b64 sets scc to 1 if the result is nonzero, so can we use |
1344 | // s_cbranch_scc0? |
1345 | |
1346 | // Loop back to V_READFIRSTLANE_B32 if there are still variants to cover. |
1347 | BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ)) |
1348 | .addMBB(&LoopBB); |
1349 | |
1350 | return InsertPt->getIterator(); |
1351 | } |
1352 | |
1353 | // This has slightly sub-optimal regalloc when the source vector is killed by |
1354 | // the read. The register allocator does not understand that the kill is |
1355 | // per-workitem, so is kept alive for the whole loop so we end up not re-using a |
1356 | // subregister from it, using 1 more VGPR than necessary. This was saved when |
1357 | // this was expanded after register allocation. |
1358 | static MachineBasicBlock::iterator loadM0FromVGPR(const SIInstrInfo *TII, |
1359 | MachineBasicBlock &MBB, |
1360 | MachineInstr &MI, |
1361 | unsigned InitResultReg, |
1362 | unsigned PhiReg, |
1363 | int Offset, |
1364 | bool UseGPRIdxMode) { |
1365 | MachineFunction *MF = MBB.getParent(); |
1366 | MachineRegisterInfo &MRI = MF->getRegInfo(); |
1367 | const DebugLoc &DL = MI.getDebugLoc(); |
1368 | MachineBasicBlock::iterator I(&MI); |
1369 | |
1370 | unsigned DstReg = MI.getOperand(0).getReg(); |
1371 | unsigned SaveExec = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); |
1372 | unsigned TmpExec = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); |
1373 | |
1374 | BuildMI(MBB, I, DL, TII->get(TargetOpcode::IMPLICIT_DEF), TmpExec); |
1375 | |
1376 | // Save the EXEC mask |
1377 | BuildMI(MBB, I, DL, TII->get(AMDGPU::S_MOV_B64), SaveExec) |
1378 | .addReg(AMDGPU::EXEC); |
1379 | |
1380 | // To insert the loop we need to split the block. Move everything after this |
1381 | // point to a new block, and insert a new empty block between the two. |
1382 | MachineBasicBlock *LoopBB = MF->CreateMachineBasicBlock(); |
1383 | MachineBasicBlock *RemainderBB = MF->CreateMachineBasicBlock(); |
1384 | MachineFunction::iterator MBBI(MBB); |
1385 | ++MBBI; |
1386 | |
1387 | MF->insert(MBBI, LoopBB); |
1388 | MF->insert(MBBI, RemainderBB); |
1389 | |
1390 | LoopBB->addSuccessor(LoopBB); |
1391 | LoopBB->addSuccessor(RemainderBB); |
1392 | |
1393 | // Move the rest of the block into a new block. |
1394 | RemainderBB->transferSuccessorsAndUpdatePHIs(&MBB); |
1395 | RemainderBB->splice(RemainderBB->begin(), &MBB, I, MBB.end()); |
1396 | |
1397 | MBB.addSuccessor(LoopBB); |
1398 | |
1399 | const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx); |
1400 | |
1401 | auto InsPt = emitLoadM0FromVGPRLoop(TII, MRI, MBB, *LoopBB, DL, *Idx, |
1402 | InitResultReg, DstReg, PhiReg, TmpExec, |
1403 | Offset, UseGPRIdxMode); |
1404 | |
1405 | MachineBasicBlock::iterator First = RemainderBB->begin(); |
1406 | BuildMI(*RemainderBB, First, DL, TII->get(AMDGPU::S_MOV_B64), AMDGPU::EXEC) |
1407 | .addReg(SaveExec); |
1408 | |
1409 | return InsPt; |
1410 | } |
1411 | |
1412 | // Returns subreg index, offset |
1413 | static std::pair<unsigned, int> |
1414 | computeIndirectRegAndOffset(const SIRegisterInfo &TRI, |
1415 | const TargetRegisterClass *SuperRC, |
1416 | unsigned VecReg, |
1417 | int Offset) { |
1418 | int NumElts = SuperRC->getSize() / 4; |
1419 | |
1420 | // Skip out of bounds offsets, or else we would end up using an undefined |
1421 | // register. |
1422 | if (Offset >= NumElts || Offset < 0) |
1423 | return std::make_pair(AMDGPU::sub0, Offset); |
1424 | |
1425 | return std::make_pair(AMDGPU::sub0 + Offset, 0); |
1426 | } |
1427 | |
1428 | // Return true if the index is an SGPR and was set. |
1429 | static bool setM0ToIndexFromSGPR(const SIInstrInfo *TII, |
1430 | MachineRegisterInfo &MRI, |
1431 | MachineInstr &MI, |
1432 | int Offset, |
1433 | bool UseGPRIdxMode, |
1434 | bool IsIndirectSrc) { |
1435 | MachineBasicBlock *MBB = MI.getParent(); |
1436 | const DebugLoc &DL = MI.getDebugLoc(); |
1437 | MachineBasicBlock::iterator I(&MI); |
1438 | |
1439 | const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx); |
1440 | const TargetRegisterClass *IdxRC = MRI.getRegClass(Idx->getReg()); |
1441 | |
1442 | assert(Idx->getReg() != AMDGPU::NoRegister)((Idx->getReg() != AMDGPU::NoRegister) ? static_cast<void > (0) : __assert_fail ("Idx->getReg() != AMDGPU::NoRegister" , "/tmp/buildd/llvm-toolchain-snapshot-4.0~svn290870/lib/Target/AMDGPU/SIISelLowering.cpp" , 1442, __PRETTY_FUNCTION__)); |
1443 | |
1444 | if (!TII->getRegisterInfo().isSGPRClass(IdxRC)) |
1445 | return false; |
1446 | |
1447 | if (UseGPRIdxMode) { |
1448 | unsigned IdxMode = IsIndirectSrc ? |
1449 | VGPRIndexMode::SRC0_ENABLE : VGPRIndexMode::DST_ENABLE; |
1450 | if (Offset == 0) { |
1451 | MachineInstr *SetOn = |
1452 | BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON)) |
1453 | .addOperand(*Idx) |
1454 | .addImm(IdxMode); |
1455 | |
1456 | SetOn->getOperand(3).setIsUndef(); |
1457 | } else { |
1458 | unsigned Tmp = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); |
1459 | BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_ADD_I32), Tmp) |
1460 | .addOperand(*Idx) |
1461 | .addImm(Offset); |
1462 | MachineInstr *SetOn = |
1463 | BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON)) |
1464 | .addReg(Tmp, RegState::Kill) |
1465 | .addImm(IdxMode); |
1466 | |
1467 | SetOn->getOperand(3).setIsUndef(); |
1468 | } |
1469 | |
1470 | return true; |
1471 | } |
1472 | |
1473 | if (Offset == 0) { |
1474 | BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0) |
1475 | .addOperand(*Idx); |
1476 | } else { |
1477 | BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0) |
1478 | .addOperand(*Idx) |
1479 | .addImm(Offset); |
1480 | } |
1481 | |
1482 | return true; |
1483 | } |
1484 | |
1485 | // Control flow needs to be inserted if indexing with a VGPR. |
1486 | static MachineBasicBlock *emitIndirectSrc(MachineInstr &MI, |
1487 | MachineBasicBlock &MBB, |
1488 | const SISubtarget &ST) { |
1489 | const SIInstrInfo *TII = ST.getInstrInfo(); |
1490 | const SIRegisterInfo &TRI = TII->getRegisterInfo(); |
1491 | MachineFunction *MF = MBB.getParent(); |
1492 | MachineRegisterInfo &MRI = MF->getRegInfo(); |
1493 | |
1494 | unsigned Dst = MI.getOperand(0).getReg(); |
1495 | unsigned SrcReg = TII->getNamedOperand(MI, AMDGPU::OpName::src)->getReg(); |
1496 | int Offset = TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm(); |
1497 | |
1498 | const TargetRegisterClass *VecRC = MRI.getRegClass(SrcReg); |
1499 | |
1500 | unsigned SubReg; |
1501 | std::tie(SubReg, Offset) |
1502 | = computeIndirectRegAndOffset(TRI, VecRC, SrcReg, Offset); |
1503 | |
1504 | bool UseGPRIdxMode = ST.hasVGPRIndexMode() && EnableVGPRIndexMode; |
1505 | |
1506 | if (setM0ToIndexFromSGPR(TII, MRI, MI, Offset, UseGPRIdxMode, true)) { |
1507 | MachineBasicBlock::iterator I(&MI); |
1508 | const DebugLoc &DL = MI.getDebugLoc(); |
1509 | |
1510 | if (UseGPRIdxMode) { |
1511 | // TODO: Look at the uses to avoid the copy. This may require rescheduling |
1512 | // to avoid interfering with other uses, so probably requires a new |
1513 | // optimization pass. |
1514 | BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOV_B32_e32), Dst) |
1515 | .addReg(SrcReg, RegState::Undef, SubReg) |
1516 | .addReg(SrcReg, RegState::Implicit) |
1517 | .addReg(AMDGPU::M0, RegState::Implicit); |
1518 | BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF)); |
1519 | } else { |
1520 | BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOVRELS_B32_e32), Dst) |
1521 | .addReg(SrcReg, RegState::Undef, SubReg) |
1522 | .addReg(SrcReg, RegState::Implicit); |
1523 | } |
1524 | |
1525 | MI.eraseFromParent(); |
1526 | |
1527 | return &MBB; |
1528 | } |
1529 | |
1530 | |
1531 | const DebugLoc &DL = MI.getDebugLoc(); |
1532 | MachineBasicBlock::iterator I(&MI); |
1533 | |
1534 | unsigned PhiReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); |
1535 | unsigned InitReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); |
1536 | |
1537 | BuildMI(MBB, I, DL, TII->get(TargetOpcode::IMPLICIT_DEF), InitReg); |
1538 | |
1539 | if (UseGPRIdxMode) { |
1540 | MachineInstr *SetOn = BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON)) |
1541 | .addImm(0) // Reset inside loop. |
1542 | .addImm(VGPRIndexMode::SRC0_ENABLE); |
1543 | SetOn->getOperand(3).setIsUndef(); |
1544 | |
1545 | // Disable again after the loop. |
1546 | BuildMI(MBB, std::next(I), DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF)); |
1547 | } |
1548 | |
1549 | auto InsPt = loadM0FromVGPR(TII, MBB, MI, InitReg, PhiReg, Offset, UseGPRIdxMode); |
1550 | MachineBasicBlock *LoopBB = InsPt->getParent(); |
1551 | |
1552 | if (UseGPRIdxMode) { |
1553 | BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::V_MOV_B32_e32), Dst) |
1554 | .addReg(SrcReg, RegState::Undef, SubReg) |
1555 | .addReg(SrcReg, RegState::Implicit) |
1556 | .addReg(AMDGPU::M0, RegState::Implicit); |
1557 | } else { |
1558 | BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::V_MOVRELS_B32_e32), Dst) |
1559 | .addReg(SrcReg, RegState::Undef, SubReg) |
1560 | .addReg(SrcReg, RegState::Implicit); |
1561 | } |
1562 | |
1563 | MI.eraseFromParent(); |
1564 | |
1565 | return LoopBB; |
1566 | } |
1567 | |
1568 | static unsigned getMOVRELDPseudo(const TargetRegisterClass *VecRC) { |
1569 | switch (VecRC->getSize()) { |
1570 | case 4: |
1571 | return AMDGPU::V_MOVRELD_B32_V1; |
1572 | case 8: |
1573 | return AMDGPU::V_MOVRELD_B32_V2; |
1574 | case 16: |
1575 | return AMDGPU::V_MOVRELD_B32_V4; |
1576 | case 32: |
1577 | return AMDGPU::V_MOVRELD_B32_V8; |
1578 | case 64: |
1579 | return AMDGPU::V_MOVRELD_B32_V16; |
1580 | default: |
1581 | llvm_unreachable("unsupported size for MOVRELD pseudos")::llvm::llvm_unreachable_internal("unsupported size for MOVRELD pseudos" , "/tmp/buildd/llvm-toolchain-snapshot-4.0~svn290870/lib/Target/AMDGPU/SIISelLowering.cpp" , 1581); |
1582 | } |
1583 | } |
1584 | |
1585 | static MachineBasicBlock *emitIndirectDst(MachineInstr &MI, |
1586 | MachineBasicBlock &MBB, |
1587 | const SISubtarget &ST) { |
1588 | const SIInstrInfo *TII = ST.getInstrInfo(); |
1589 | const SIRegisterInfo &TRI = TII->getRegisterInfo(); |
1590 | MachineFunction *MF = MBB.getParent(); |
1591 | MachineRegisterInfo &MRI = MF->getRegInfo(); |
1592 | |
1593 | unsigned Dst = MI.getOperand(0).getReg(); |
1594 | const MachineOperand *SrcVec = TII->getNamedOperand(MI, AMDGPU::OpName::src); |
1595 | const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx); |
1596 | const MachineOperand *Val = TII->getNamedOperand(MI, AMDGPU::OpName::val); |
1597 | int Offset = TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm(); |
1598 | const TargetRegisterClass *VecRC = MRI.getRegClass(SrcVec->getReg()); |
1599 | |
1600 | // This can be an immediate, but will be folded later. |
1601 | assert(Val->getReg())((Val->getReg()) ? static_cast<void> (0) : __assert_fail ("Val->getReg()", "/tmp/buildd/llvm-toolchain-snapshot-4.0~svn290870/lib/Target/AMDGPU/SIISelLowering.cpp" , 1601, __PRETTY_FUNCTION__)); |
1602 | |
1603 | unsigned SubReg; |
1604 | std::tie(SubReg, Offset) = computeIndirectRegAndOffset(TRI, VecRC, |
1605 | SrcVec->getReg(), |
1606 | Offset); |
1607 | bool UseGPRIdxMode = ST.hasVGPRIndexMode() && EnableVGPRIndexMode; |
1608 | |
1609 | if (Idx->getReg() == AMDGPU::NoRegister) { |
1610 | MachineBasicBlock::iterator I(&MI); |
1611 | const DebugLoc &DL = MI.getDebugLoc(); |
1612 | |
1613 | assert(Offset == 0)((Offset == 0) ? static_cast<void> (0) : __assert_fail ( "Offset == 0", "/tmp/buildd/llvm-toolchain-snapshot-4.0~svn290870/lib/Target/AMDGPU/SIISelLowering.cpp" , 1613, __PRETTY_FUNCTION__)); |
1614 | |
1615 | BuildMI(MBB, I, DL, TII->get(TargetOpcode::INSERT_SUBREG), Dst) |
1616 | .addOperand(*SrcVec) |
1617 | .addOperand(*Val) |
1618 | .addImm(SubReg); |
1619 | |
1620 | MI.eraseFromParent(); |
1621 | return &MBB; |
1622 | } |
1623 | |
1624 | if (setM0ToIndexFromSGPR(TII, MRI, MI, Offset, UseGPRIdxMode, false)) { |
1625 | MachineBasicBlock::iterator I(&MI); |
1626 | const DebugLoc &DL = MI.getDebugLoc(); |
1627 | |
1628 | if (UseGPRIdxMode) { |
1629 | BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOV_B32_indirect)) |
1630 | .addReg(SrcVec->getReg(), RegState::Undef, SubReg) // vdst |
1631 | .addOperand(*Val) |
1632 | .addReg(Dst, RegState::ImplicitDefine) |
1633 | .addReg(SrcVec->getReg(), RegState::Implicit) |
1634 | .addReg(AMDGPU::M0, RegState::Implicit); |
1635 | |
1636 | BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF)); |
1637 | } else { |
1638 | const MCInstrDesc &MovRelDesc = TII->get(getMOVRELDPseudo(VecRC)); |
1639 | |
1640 | BuildMI(MBB, I, DL, MovRelDesc) |
1641 | .addReg(Dst, RegState::Define) |
1642 | .addReg(SrcVec->getReg()) |
1643 | .addOperand(*Val) |
1644 | .addImm(SubReg - AMDGPU::sub0); |
1645 | } |
1646 | |
1647 | MI.eraseFromParent(); |
1648 | return &MBB; |
1649 | } |
1650 | |
1651 | if (Val->isReg()) |
1652 | MRI.clearKillFlags(Val->getReg()); |
1653 | |
1654 | const DebugLoc &DL = MI.getDebugLoc(); |
1655 | |
1656 | if (UseGPRIdxMode) { |
1657 | MachineBasicBlock::iterator I(&MI); |
1658 | |
1659 | MachineInstr *SetOn = BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON)) |
1660 | .addImm(0) // Reset inside loop. |
1661 | .addImm(VGPRIndexMode::DST_ENABLE); |
1662 | SetOn->getOperand(3).setIsUndef(); |
1663 | |
1664 | // Disable again after the loop. |
1665 | BuildMI(MBB, std::next(I), DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF)); |
1666 | } |
1667 | |
1668 | unsigned PhiReg = MRI.createVirtualRegister(VecRC); |
1669 | |
1670 | auto InsPt = loadM0FromVGPR(TII, MBB, MI, SrcVec->getReg(), PhiReg, |
1671 | Offset, UseGPRIdxMode); |
1672 | MachineBasicBlock *LoopBB = InsPt->getParent(); |
1673 | |
1674 | if (UseGPRIdxMode) { |
1675 | BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::V_MOV_B32_indirect)) |
1676 | .addReg(PhiReg, RegState::Undef, SubReg) // vdst |
1677 | .addOperand(*Val) // src0 |
1678 | .addReg(Dst, RegState::ImplicitDefine) |
1679 | .addReg(PhiReg, RegState::Implicit) |
1680 | .addReg(AMDGPU::M0, RegState::Implicit); |
1681 | } else { |
1682 | const MCInstrDesc &MovRelDesc = TII->get(getMOVRELDPseudo(VecRC)); |
1683 | |
1684 | BuildMI(*LoopBB, InsPt, DL, MovRelDesc) |
1685 | .addReg(Dst, RegState::Define) |
1686 | .addReg(PhiReg) |
1687 | .addOperand(*Val) |
1688 | .addImm(SubReg - AMDGPU::sub0); |
1689 | } |
1690 | |
1691 | MI.eraseFromParent(); |
1692 | |
1693 | return LoopBB; |
1694 | } |
1695 | |
1696 | MachineBasicBlock *SITargetLowering::EmitInstrWithCustomInserter( |
1697 | MachineInstr &MI, MachineBasicBlock *BB) const { |
1698 | |
1699 | const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); |
1700 | MachineFunction *MF = BB->getParent(); |
1701 | SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>(); |
1702 | |
1703 | if (TII->isMIMG(MI)) { |
1704 | if (!MI.memoperands_empty()) |
1705 | return BB; |
1706 | // Add a memoperand for mimg instructions so that they aren't assumed to |
1707 | // be ordered memory instuctions. |
1708 | |
1709 | MachinePointerInfo PtrInfo(MFI->getImagePSV()); |
1710 | MachineMemOperand::Flags Flags = MachineMemOperand::MODereferenceable; |
1711 | if (MI.mayStore()) |
1712 | Flags |= MachineMemOperand::MOStore; |
1713 | |
1714 | if (MI.mayLoad()) |
1715 | Flags |= MachineMemOperand::MOLoad; |
1716 | |
1717 | auto MMO = MF->getMachineMemOperand(PtrInfo, Flags, 0, 0); |
1718 | MI.addMemOperand(*MF, MMO); |
1719 | return BB; |
1720 | } |
1721 | |
1722 | switch (MI.getOpcode()) { |
1723 | case AMDGPU::SI_INIT_M0: { |
1724 | BuildMI(*BB, MI.getIterator(), MI.getDebugLoc(), |
1725 | TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0) |
1726 | .addOperand(MI.getOperand(0)); |
1727 | MI.eraseFromParent(); |
1728 | return BB; |
1729 | } |
1730 | case AMDGPU::GET_GROUPSTATICSIZE: { |
1731 | DebugLoc DL = MI.getDebugLoc(); |
1732 | BuildMI(*BB, MI, DL, TII->get(AMDGPU::S_MOV_B32)) |
1733 | .addOperand(MI.getOperand(0)) |
1734 | .addImm(MFI->getLDSSize()); |
1735 | MI.eraseFromParent(); |
1736 | return BB; |
1737 | } |
1738 | case AMDGPU::SI_INDIRECT_SRC_V1: |
1739 | case AMDGPU::SI_INDIRECT_SRC_V2: |
1740 | case AMDGPU::SI_INDIRECT_SRC_V4: |
1741 | case AMDGPU::SI_INDIRECT_SRC_V8: |
1742 | case AMDGPU::SI_INDIRECT_SRC_V16: |
1743 | return emitIndirectSrc(MI, *BB, *getSubtarget()); |
1744 | case AMDGPU::SI_INDIRECT_DST_V1: |
1745 | case AMDGPU::SI_INDIRECT_DST_V2: |
1746 | case AMDGPU::SI_INDIRECT_DST_V4: |
1747 | case AMDGPU::SI_INDIRECT_DST_V8: |
1748 | case AMDGPU::SI_INDIRECT_DST_V16: |
1749 | return emitIndirectDst(MI, *BB, *getSubtarget()); |
1750 | case AMDGPU::SI_KILL: |
1751 | return splitKillBlock(MI, BB); |
1752 | case AMDGPU::V_CNDMASK_B64_PSEUDO: { |
1753 | MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); |
1754 | |
1755 | unsigned Dst = MI.getOperand(0).getReg(); |
1756 | unsigned Src0 = MI.getOperand(1).getReg(); |
1757 | unsigned Src1 = MI.getOperand(2).getReg(); |
1758 | const DebugLoc &DL = MI.getDebugLoc(); |
1759 | unsigned SrcCond = MI.getOperand(3).getReg(); |
1760 | |
1761 | unsigned DstLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); |
1762 | unsigned DstHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); |
1763 | |
1764 | BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_CNDMASK_B32_e64), DstLo) |
1765 | .addReg(Src0, 0, AMDGPU::sub0) |
1766 | .addReg(Src1, 0, AMDGPU::sub0) |
1767 | .addReg(SrcCond); |
1768 | BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_CNDMASK_B32_e64), DstHi) |
1769 | .addReg(Src0, 0, AMDGPU::sub1) |
1770 | .addReg(Src1, 0, AMDGPU::sub1) |
1771 | .addReg(SrcCond); |
1772 | |
1773 | BuildMI(*BB, MI, DL, TII->get(AMDGPU::REG_SEQUENCE), Dst) |
1774 | .addReg(DstLo) |
1775 | .addImm(AMDGPU::sub0) |
1776 | .addReg(DstHi) |
1777 | .addImm(AMDGPU::sub1); |
1778 | MI.eraseFromParent(); |
1779 | return BB; |
1780 | } |
1781 | case AMDGPU::SI_BR_UNDEF: { |
1782 | const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); |
1783 | const DebugLoc &DL = MI.getDebugLoc(); |
1784 | MachineInstr *Br = BuildMI(*BB, MI, DL, TII->get(AMDGPU::S_CBRANCH_SCC1)) |
1785 | .addOperand(MI.getOperand(0)); |
1786 | Br->getOperand(1).setIsUndef(true); // read undef SCC |
1787 | MI.eraseFromParent(); |
1788 | return BB; |
1789 | } |
1790 | default: |
1791 | return AMDGPUTargetLowering::EmitInstrWithCustomInserter(MI, BB); |
1792 | } |
1793 | } |
1794 | |
1795 | bool SITargetLowering::enableAggressiveFMAFusion(EVT VT) const { |
1796 | // This currently forces unfolding various combinations of fsub into fma with |
1797 | // free fneg'd operands. As long as we have fast FMA (controlled by |
1798 | // isFMAFasterThanFMulAndFAdd), we should perform these. |
1799 | |
1800 | // When fma is quarter rate, for f64 where add / sub are at best half rate, |
1801 | // most of these combines appear to be cycle neutral but save on instruction |
1802 | // count / code size. |
1803 | return true; |
1804 | } |
1805 | |
1806 | EVT SITargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &Ctx, |
1807 | EVT VT) const { |
1808 | if (!VT.isVector()) { |
1809 | return MVT::i1; |
1810 | } |
1811 | return EVT::getVectorVT(Ctx, MVT::i1, VT.getVectorNumElements()); |
1812 | } |
1813 | |
1814 | MVT SITargetLowering::getScalarShiftAmountTy(const DataLayout &, EVT VT) const { |
1815 | // TODO: Should i16 be used always if legal? For now it would force VALU |
1816 | // shifts. |
1817 | return (VT == MVT::i16) ? MVT::i16 : MVT::i32; |
1818 | } |
1819 | |
1820 | // Answering this is somewhat tricky and depends on the specific device which |
1821 | // have different rates for fma or all f64 operations. |
1822 | // |
1823 | // v_fma_f64 and v_mul_f64 always take the same number of cycles as each other |
1824 | // regardless of which device (although the number of cycles differs between |
1825 | // devices), so it is always profitable for f64. |
1826 | // |
1827 | // v_fma_f32 takes 4 or 16 cycles depending on the device, so it is profitable |
1828 | // only on full rate devices. Normally, we should prefer selecting v_mad_f32 |
1829 | // which we can always do even without fused FP ops since it returns the same |
1830 | // result as the separate operations and since it is always full |
1831 | // rate. Therefore, we lie and report that it is not faster for f32. v_mad_f32 |
1832 | // however does not support denormals, so we do report fma as faster if we have |
1833 | // a fast fma device and require denormals. |
1834 | // |
1835 | bool SITargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const { |
1836 | VT = VT.getScalarType(); |
1837 | |
1838 | if (!VT.isSimple()) |
1839 | return false; |
1840 | |
1841 | switch (VT.getSimpleVT().SimpleTy) { |
1842 | case MVT::f32: |
1843 | // This is as fast on some subtargets. However, we always have full rate f32 |
1844 | // mad available which returns the same result as the separate operations |
1845 | // which we should prefer over fma. We can't use this if we want to support |
1846 | // denormals, so only report this in these cases. |
1847 | return Subtarget->hasFP32Denormals() && Subtarget->hasFastFMAF32(); |
1848 | case MVT::f64: |
1849 | return true; |
1850 | case MVT::f16: |
1851 | return Subtarget->has16BitInsts() && Subtarget->hasFP16Denormals(); |
1852 | default: |
1853 | break; |
1854 | } |
1855 | |
1856 | return false; |
1857 | } |
1858 | |
1859 | //===----------------------------------------------------------------------===// |
1860 | // Custom DAG Lowering Operations |
1861 | //===----------------------------------------------------------------------===// |
1862 | |
1863 | SDValue SITargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { |
1864 | switch (Op.getOpcode()) { |
1865 | default: return AMDGPUTargetLowering::LowerOperation(Op, DAG); |
1866 | case ISD::BRCOND: return LowerBRCOND(Op, DAG); |
1867 | case ISD::LOAD: { |
1868 | SDValue Result = LowerLOAD(Op, DAG); |
1869 | assert((!Result.getNode() ||(((!Result.getNode() || Result.getNode()->getNumValues() == 2) && "Load should return a value and a chain") ? static_cast <void> (0) : __assert_fail ("(!Result.getNode() || Result.getNode()->getNumValues() == 2) && \"Load should return a value and a chain\"" , "/tmp/buildd/llvm-toolchain-snapshot-4.0~svn290870/lib/Target/AMDGPU/SIISelLowering.cpp" , 1871, __PRETTY_FUNCTION__)) |
1870 | Result.getNode()->getNumValues() == 2) &&(((!Result.getNode() || Result.getNode()->getNumValues() == 2) && "Load should return a value and a chain") ? static_cast <void> (0) : __assert_fail ("(!Result.getNode() || Result.getNode()->getNumValues() == 2) && \"Load should return a value and a chain\"" , "/tmp/buildd/llvm-toolchain-snapshot-4.0~svn290870/lib/Target/AMDGPU/SIISelLowering.cpp" , 1871, __PRETTY_FUNCTION__)) |
1871 | "Load should return a value and a chain")(((!Result.getNode() || Result.getNode()->getNumValues() == 2) && "Load should return a value and a chain") ? static_cast <void> (0) : __assert_fail ("(!Result.getNode() || Result.getNode()->getNumValues() == 2) && \"Load should return a value and a chain\"" , "/tmp/buildd/llvm-toolchain-snapshot-4.0~svn290870/lib/Target/AMDGPU/SIISelLowering.cpp" , 1871, __PRETTY_FUNCTION__)); |
1872 | return Result; |
1873 | } |
1874 | |
1875 | case ISD::FSIN: |
1876 | case ISD::FCOS: |
1877 | return LowerTrig(Op, DAG); |
1878 | case ISD::SELECT: return LowerSELECT(Op, DAG); |
1879 | case ISD::FDIV: return LowerFDIV(Op, DAG); |
1880 | case ISD::ATOMIC_CMP_SWAP: return LowerATOMIC_CMP_SWAP(Op, DAG); |
1881 | case ISD::STORE: return LowerSTORE(Op, DAG); |
1882 | case ISD::GlobalAddress: { |
1883 | MachineFunction &MF = DAG.getMachineFunction(); |
1884 | SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); |
1885 | return LowerGlobalAddress(MFI, Op, DAG); |
1886 | } |
1887 | case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); |
1888 | case ISD::INTRINSIC_W_CHAIN: return LowerINTRINSIC_W_CHAIN(Op, DAG); |
1889 | case ISD::INTRINSIC_VOID: return LowerINTRINSIC_VOID(Op, DAG); |
1890 | case ISD::ADDRSPACECAST: return lowerADDRSPACECAST(Op, DAG); |
1891 | case ISD::TRAP: return lowerTRAP(Op, DAG); |
1892 | case ISD::FP_ROUND: |
1893 | return lowerFP_ROUND(Op, DAG); |
1894 | } |
1895 | return SDValue(); |
1896 | } |
1897 | |
1898 | /// \brief Helper function for LowerBRCOND |
1899 | static SDNode *findUser(SDValue Value, unsigned Opcode) { |
1900 | |
1901 | SDNode *Parent = Value.getNode(); |
1902 | for (SDNode::use_iterator I = Parent->use_begin(), E = Parent->use_end(); |
1903 | I != E; ++I) { |
1904 | |
1905 | if (I.getUse().get() != Value) |
1906 | continue; |
1907 | |
1908 | if (I->getOpcode() == Opcode) |
1909 | return *I; |
1910 | } |
1911 | return nullptr; |
1912 | } |
1913 | |
1914 | bool SITargetLowering::isCFIntrinsic(const SDNode *Intr) const { |
1915 | if (Intr->getOpcode() == ISD::INTRINSIC_W_CHAIN) { |
1916 | switch (cast<ConstantSDNode>(Intr->getOperand(1))->getZExtValue()) { |
1917 | case AMDGPUIntrinsic::amdgcn_if: |
1918 | case AMDGPUIntrinsic::amdgcn_else: |
1919 | case AMDGPUIntrinsic::amdgcn_end_cf: |
1920 | case AMDGPUIntrinsic::amdgcn_loop: |
1921 | return true; |
1922 | default: |
1923 | return false; |
1924 | } |
1925 | } |
1926 | |
1927 | if (Intr->getOpcode() == ISD::INTRINSIC_WO_CHAIN) { |
1928 | switch (cast<ConstantSDNode>(Intr->getOperand(0))->getZExtValue()) { |
1929 | case AMDGPUIntrinsic::amdgcn_break: |
1930 | case AMDGPUIntrinsic::amdgcn_if_break: |
1931 | case AMDGPUIntrinsic::amdgcn_else_break: |
1932 | return true; |
1933 | default: |
1934 | return false; |
1935 | } |
1936 | } |
1937 | |
1938 | return false; |
1939 | } |
1940 | |
1941 | void SITargetLowering::createDebuggerPrologueStackObjects( |
1942 | MachineFunction &MF) const { |
1943 | // Create stack objects that are used for emitting debugger prologue. |
1944 | // |
1945 | // Debugger prologue writes work group IDs and work item IDs to scratch memory |
1946 | // at fixed location in the following format: |
1947 | // offset 0: work group ID x |
1948 | // offset 4: work group ID y |
1949 | // offset 8: work group ID z |
1950 | // offset 16: work item ID x |
1951 | // offset 20: work item ID y |
1952 | // offset 24: work item ID z |
1953 | SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); |
1954 | int ObjectIdx = 0; |
1955 | |
1956 | // For each dimension: |
1957 | for (unsigned i = 0; i < 3; ++i) { |
1958 | // Create fixed stack object for work group ID. |
1959 | ObjectIdx = MF.getFrameInfo().CreateFixedObject(4, i * 4, true); |
1960 | Info->setDebuggerWorkGroupIDStackObjectIndex(i, ObjectIdx); |
1961 | // Create fixed stack object for work item ID. |
1962 | ObjectIdx = MF.getFrameInfo().CreateFixedObject(4, i * 4 + 16, true); |
1963 | Info->setDebuggerWorkItemIDStackObjectIndex(i, ObjectIdx); |
1964 | } |
1965 | } |
1966 | |
1967 | bool SITargetLowering::shouldEmitFixup(const GlobalValue *GV) const { |
1968 | const Triple &TT = getTargetMachine().getTargetTriple(); |
1969 | return GV->getType()->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS && |
1970 | AMDGPU::shouldEmitConstantsToTextSection(TT); |
1971 | } |
1972 | |
1973 | bool SITargetLowering::shouldEmitGOTReloc(const GlobalValue *GV) const { |
1974 | return (GV->getType()->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS || |
1975 | GV->getType()->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS) && |
1976 | !shouldEmitFixup(GV) && |
1977 | !getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV); |
1978 | } |
1979 | |
1980 | bool SITargetLowering::shouldEmitPCReloc(const GlobalValue *GV) const { |
1981 | return !shouldEmitFixup(GV) && !shouldEmitGOTReloc(GV); |
1982 | } |
1983 | |
1984 | /// This transforms the control flow intrinsics to get the branch destination as |
1985 | /// last parameter, also switches branch target with BR if the need arise |
1986 | SDValue SITargetLowering::LowerBRCOND(SDValue BRCOND, |
1987 | SelectionDAG &DAG) const { |
1988 | |
1989 | SDLoc DL(BRCOND); |
1990 | |
1991 | SDNode *Intr = BRCOND.getOperand(1).getNode(); |
1992 | SDValue Target = BRCOND.getOperand(2); |
1993 | SDNode *BR = nullptr; |
1994 | SDNode *SetCC = nullptr; |
1995 | |
1996 | if (Intr->getOpcode() == ISD::SETCC) { |
1997 | // As long as we negate the condition everything is fine |
1998 | SetCC = Intr; |
1999 | Intr = SetCC->getOperand(0).getNode(); |
2000 | |
2001 | } else { |
2002 | // Get the target from BR if we don't negate the condition |
2003 | BR = findUser(BRCOND, ISD::BR); |
2004 | Target = BR->getOperand(1); |
2005 | } |
2006 | |
2007 | // FIXME: This changes the types of the intrinsics instead of introducing new |
2008 | // nodes with the correct types. |
2009 | // e.g. llvm.amdgcn.loop |
2010 | |
2011 | // eg: i1,ch = llvm.amdgcn.loop t0, TargetConstant:i32<6271>, t3 |
2012 | // => t9: ch = llvm.amdgcn.loop t0, TargetConstant:i32<6271>, t3, BasicBlock:ch<bb1 0x7fee5286d088> |
2013 | |
2014 | if (!isCFIntrinsic(Intr)) { |
2015 | // This is a uniform branch so we don't need to legalize. |
2016 | return BRCOND; |
2017 | } |
2018 | |
2019 | bool HaveChain = Intr->getOpcode() == ISD::INTRINSIC_VOID || |
2020 | Intr->getOpcode() == ISD::INTRINSIC_W_CHAIN; |
2021 | |
2022 | assert(!SetCC ||((!SetCC || (SetCC->getConstantOperandVal(1) == 1 && cast<CondCodeSDNode>(SetCC->getOperand(2).getNode() )->get() == ISD::SETNE)) ? static_cast<void> (0) : __assert_fail ("!SetCC || (SetCC->getConstantOperandVal(1) == 1 && cast<CondCodeSDNode>(SetCC->getOperand(2).getNode())->get() == ISD::SETNE)" , "/tmp/buildd/llvm-toolchain-snapshot-4.0~svn290870/lib/Target/AMDGPU/SIISelLowering.cpp" , 2025, __PRETTY_FUNCTION__)) |
2023 | (SetCC->getConstantOperandVal(1) == 1 &&((!SetCC || (SetCC->getConstantOperandVal(1) == 1 && cast<CondCodeSDNode>(SetCC->getOperand(2).getNode() )->get() == ISD::SETNE)) ? static_cast<void> (0) : __assert_fail ("!SetCC || (SetCC->getConstantOperandVal(1) == 1 && cast<CondCodeSDNode>(SetCC->getOperand(2).getNode())->get() == ISD::SETNE)" , "/tmp/buildd/llvm-toolchain-snapshot-4.0~svn290870/lib/Target/AMDGPU/SIISelLowering.cpp" , 2025, __PRETTY_FUNCTION__)) |
2024 | cast<CondCodeSDNode>(SetCC->getOperand(2).getNode())->get() ==((!SetCC || (SetCC->getConstantOperandVal(1) == 1 && cast<CondCodeSDNode>(SetCC->getOperand(2).getNode() )->get() == ISD::SETNE)) ? static_cast<void> (0) : __assert_fail ("!SetCC || (SetCC->getConstantOperandVal(1) == 1 && cast<CondCodeSDNode>(SetCC->getOperand(2).getNode())->get() == ISD::SETNE)" , "/tmp/buildd/llvm-toolchain-snapshot-4.0~svn290870/lib/Target/AMDGPU/SIISelLowering.cpp" , 2025, __PRETTY_FUNCTION__)) |
2025 | ISD::SETNE))((!SetCC || (SetCC->getConstantOperandVal(1) == 1 && cast<CondCodeSDNode>(SetCC->getOperand(2).getNode() )->get() == ISD::SETNE)) ? static_cast<void> (0) : __assert_fail ("!SetCC || (SetCC->getConstantOperandVal(1) == 1 && cast<CondCodeSDNode>(SetCC->getOperand(2).getNode())->get() == ISD::SETNE)" , "/tmp/buildd/llvm-toolchain-snapshot-4.0~svn290870/lib/Target/AMDGPU/SIISelLowering.cpp" , 2025, __PRETTY_FUNCTION__)); |
2026 | |
2027 | // operands of the new intrinsic call |
2028 | SmallVector<SDValue, 4> Ops; |
2029 | if (HaveChain) |
2030 | Ops.push_back(BRCOND.getOperand(0)); |
2031 | |
2032 | Ops.append(Intr->op_begin() + (HaveChain ? 1 : 0), Intr->op_end()); |
2033 | Ops.push_back(Target); |
2034 | |
2035 | ArrayRef<EVT> Res(Intr->value_begin() + 1, Intr->value_end()); |
2036 | |
2037 | // build the new intrinsic call |
2038 | SDNode *Result = DAG.getNode( |
2039 | Res.size() > 1 ? ISD::INTRINSIC_W_CHAIN : ISD::INTRINSIC_VOID, DL, |
2040 | DAG.getVTList(Res), Ops).getNode(); |
2041 | |
2042 | if (!HaveChain) { |
2043 | SDValue Ops[] = { |
2044 | SDValue(Result, 0), |
2045 | BRCOND.getOperand(0) |
2046 | }; |
2047 | |
2048 | Result = DAG.getMergeValues(Ops, DL).getNode(); |
2049 | } |
2050 | |
2051 | if (BR) { |
2052 | // Give the branch instruction our target |
2053 | SDValue Ops[] = { |
2054 | BR->getOperand(0), |
2055 | BRCOND.getOperand(2) |
2056 | }; |
2057 | SDValue NewBR = DAG.getNode(ISD::BR, DL, BR->getVTList(), Ops); |
2058 | DAG.ReplaceAllUsesWith(BR, NewBR.getNode()); |
2059 | BR = NewBR.getNode(); |
Value stored to 'BR' is never read | |
2060 | } |
2061 | |
2062 | SDValue Chain = SDValue(Result, Result->getNumValues() - 1); |
2063 | |
2064 | // Copy the intrinsic results to registers |
2065 | for (unsigned i = 1, e = Intr->getNumValues() - 1; i != e; ++i) { |
2066 | SDNode *CopyToReg = findUser(SDValue(Intr, i), ISD::CopyToReg); |
2067 | if (!CopyToReg) |
2068 | continue; |
2069 | |
2070 | Chain = DAG.getCopyToReg( |
2071 | Chain, DL, |
2072 | CopyToReg->getOperand(1), |
2073 | SDValue(Result, i - 1), |
2074 | SDValue()); |
2075 | |
2076 | DAG.ReplaceAllUsesWith(SDValue(CopyToReg, 0), CopyToReg->getOperand(0)); |
2077 | } |
2078 | |
2079 | // Remove the old intrinsic from the chain |
2080 | DAG.ReplaceAllUsesOfValueWith( |
2081 | SDValue(Intr, Intr->getNumValues() - 1), |
2082 | Intr->getOperand(0)); |
2083 | |
2084 | return Chain; |
2085 | } |
2086 | |
2087 | SDValue SITargetLowering::getFPExtOrFPTrunc(SelectionDAG &DAG, |
2088 | SDValue Op, |
2089 | const SDLoc &DL, |
2090 | EVT VT) const { |
2091 | return Op.getValueType().bitsLE(VT) ? |
2092 | DAG.getNode(ISD::FP_EXTEND, DL, VT, Op) : |
2093 | DAG.getNode(ISD::FTRUNC, DL, VT, Op); |
2094 | } |
2095 | |
2096 | SDValue SITargetLowering::lowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const { |
2097 | assert(Op.getValueType() == MVT::f16 &&((Op.getValueType() == MVT::f16 && "Do not know how to custom lower FP_ROUND for non-f16 type" ) ? static_cast<void> (0) : __assert_fail ("Op.getValueType() == MVT::f16 && \"Do not know how to custom lower FP_ROUND for non-f16 type\"" , "/tmp/buildd/llvm-toolchain-snapshot-4.0~svn290870/lib/Target/AMDGPU/SIISelLowering.cpp" , 2098, __PRETTY_FUNCTION__)) |
2098 | "Do not know how to custom lower FP_ROUND for non-f16 type")((Op.getValueType() == MVT::f16 && "Do not know how to custom lower FP_ROUND for non-f16 type" ) ? static_cast<void> (0) : __assert_fail ("Op.getValueType() == MVT::f16 && \"Do not know how to custom lower FP_ROUND for non-f16 type\"" , "/tmp/buildd/llvm-toolchain-snapshot-4.0~svn290870/lib/Target/AMDGPU/SIISelLowering.cpp" , 2098, __PRETTY_FUNCTION__)); |
2099 | |
2100 | SDValue Src = Op.getOperand(0); |
2101 | EVT SrcVT = Src.getValueType(); |
2102 | if (SrcVT != MVT::f64) |
2103 | return Op; |
2104 | |
2105 | SDLoc DL(Op); |
2106 | |
2107 | SDValue FpToFp16 = DAG.getNode(ISD::FP_TO_FP16, DL, MVT::i32, Src); |
2108 | SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, FpToFp16); |
2109 | return DAG.getNode(ISD::BITCAST, DL, MVT::f16, Trunc);; |
2110 | } |
2111 | |
2112 | SDValue SITargetLowering::getSegmentAperture(unsigned AS, |
2113 | SelectionDAG &DAG) const { |
2114 | SDLoc SL; |
2115 | MachineFunction &MF = DAG.getMachineFunction(); |
2116 | SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); |
2117 | unsigned UserSGPR = Info->getQueuePtrUserSGPR(); |
2118 | assert(UserSGPR != AMDGPU::NoRegister)((UserSGPR != AMDGPU::NoRegister) ? static_cast<void> ( 0) : __assert_fail ("UserSGPR != AMDGPU::NoRegister", "/tmp/buildd/llvm-toolchain-snapshot-4.0~svn290870/lib/Target/AMDGPU/SIISelLowering.cpp" , 2118, __PRETTY_FUNCTION__)); |
2119 | |
2120 | SDValue QueuePtr = CreateLiveInRegister( |
2121 | DAG, &AMDGPU::SReg_64RegClass, UserSGPR, MVT::i64); |
2122 | |
2123 | // Offset into amd_queue_t for group_segment_aperture_base_hi / |
2124 | // private_segment_aperture_base_hi. |
2125 | uint32_t StructOffset = (AS == AMDGPUAS::LOCAL_ADDRESS) ? 0x40 : 0x44; |
2126 | |
2127 | SDValue Ptr = DAG.getNode(ISD::ADD, SL, MVT::i64, QueuePtr, |
2128 | DAG.getConstant(StructOffset, SL, MVT::i64)); |
2129 | |
2130 | // TODO: Use custom target PseudoSourceValue. |
2131 | // TODO: We should use the value from the IR intrinsic call, but it might not |
2132 | // be available and how do we get it? |
2133 | Value *V = UndefValue::get(PointerType::get(Type::getInt8Ty(*DAG.getContext()), |
2134 | AMDGPUAS::CONSTANT_ADDRESS)); |
2135 | |
2136 | MachinePointerInfo PtrInfo(V, StructOffset); |
2137 | return DAG.getLoad(MVT::i32, SL, QueuePtr.getValue(1), Ptr, PtrInfo, |
2138 | MinAlign(64, StructOffset), |
2139 | MachineMemOperand::MODereferenceable | |
2140 | MachineMemOperand::MOInvariant); |
2141 | } |
2142 | |
2143 | SDValue SITargetLowering::lowerADDRSPACECAST(SDValue Op, |
2144 | SelectionDAG &DAG) const { |
2145 | SDLoc SL(Op); |
2146 | const AddrSpaceCastSDNode *ASC = cast<AddrSpaceCastSDNode>(Op); |
2147 | |
2148 | SDValue Src = ASC->getOperand(0); |
2149 | |
2150 | // FIXME: Really support non-0 null pointers. |
2151 | SDValue SegmentNullPtr = DAG.getConstant(-1, SL, MVT::i32); |
2152 | SDValue FlatNullPtr = DAG.getConstant(0, SL, MVT::i64); |
2153 | |
2154 | // flat -> local/private |
2155 | if (ASC->getSrcAddressSpace() == AMDGPUAS::FLAT_ADDRESS) { |
2156 | if (ASC->getDestAddressSpace() == AMDGPUAS::LOCAL_ADDRESS || |
2157 | ASC->getDestAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS) { |
2158 | SDValue NonNull = DAG.getSetCC(SL, MVT::i1, Src, FlatNullPtr, ISD::SETNE); |
2159 | SDValue Ptr = DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, Src); |
2160 | |
2161 | return DAG.getNode(ISD::SELECT, SL, MVT::i32, |
2162 | NonNull, Ptr, SegmentNullPtr); |
2163 | } |
2164 | } |
2165 | |
2166 | // local/private -> flat |
2167 | if (ASC->getDestAddressSpace() == AMDGPUAS::FLAT_ADDRESS) { |
2168 | if (ASC->getSrcAddressSpace() == AMDGPUAS::LOCAL_ADDRESS || |
2169 | ASC->getSrcAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS) { |
2170 | SDValue NonNull |
2171 | = DAG.getSetCC(SL, MVT::i1, Src, SegmentNullPtr, ISD::SETNE); |
2172 | |
2173 | SDValue Aperture = getSegmentAperture(ASC->getSrcAddressSpace(), DAG); |
2174 | SDValue CvtPtr |
2175 | = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32, Src, Aperture); |
2176 | |
2177 | return DAG.getNode(ISD::SELECT, SL, MVT::i64, NonNull, |
2178 | DAG.getNode(ISD::BITCAST, SL, MVT::i64, CvtPtr), |
2179 | FlatNullPtr); |
2180 | } |
2181 | } |
2182 | |
2183 | // global <-> flat are no-ops and never emitted. |
2184 | |
2185 | const MachineFunction &MF = DAG.getMachineFunction(); |
2186 | DiagnosticInfoUnsupported InvalidAddrSpaceCast( |
2187 | *MF.getFunction(), "invalid addrspacecast", SL.getDebugLoc()); |
2188 | DAG.getContext()->diagnose(InvalidAddrSpaceCast); |
2189 | |
2190 | return DAG.getUNDEF(ASC->getValueType(0)); |
2191 | } |
2192 | |
2193 | bool |
2194 | SITargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { |
2195 | // We can fold offsets for anything that doesn't require a GOT relocation. |
2196 | return (GA->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS || |
2197 | GA->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS) && |
2198 | !shouldEmitGOTReloc(GA->getGlobal()); |
2199 | } |
2200 | |
2201 | static SDValue buildPCRelGlobalAddress(SelectionDAG &DAG, const GlobalValue *GV, |
2202 | SDLoc DL, unsigned Offset, EVT PtrVT, |
2203 | unsigned GAFlags = SIInstrInfo::MO_NONE) { |
2204 | // In order to support pc-relative addressing, the PC_ADD_REL_OFFSET SDNode is |
2205 | // lowered to the following code sequence: |
2206 | // |
2207 | // For constant address space: |
2208 | // s_getpc_b64 s[0:1] |
2209 | // s_add_u32 s0, s0, $symbol |
2210 | // s_addc_u32 s1, s1, 0 |
2211 | // |
2212 | // s_getpc_b64 returns the address of the s_add_u32 instruction and then |
2213 | // a fixup or relocation is emitted to replace $symbol with a literal |
2214 | // constant, which is a pc-relative offset from the encoding of the $symbol |
2215 | // operand to the global variable. |
2216 | // |
2217 | // For global address space: |
2218 | // s_getpc_b64 s[0:1] |
2219 | // s_add_u32 s0, s0, $symbol@{gotpc}rel32@lo |
2220 | // s_addc_u32 s1, s1, $symbol@{gotpc}rel32@hi |
2221 | // |
2222 | // s_getpc_b64 returns the address of the s_add_u32 instruction and then |
2223 | // fixups or relocations are emitted to replace $symbol@*@lo and |
2224 | // $symbol@*@hi with lower 32 bits and higher 32 bits of a literal constant, |
2225 | // which is a 64-bit pc-relative offset from the encoding of the $symbol |
2226 | // operand to the global variable. |
2227 | // |
2228 | // What we want here is an offset from the value returned by s_getpc |
2229 | // (which is the address of the s_add_u32 instruction) to the global |
2230 | // variable, but since the encoding of $symbol starts 4 bytes after the start |
2231 | // of the s_add_u32 instruction, we end up with an offset that is 4 bytes too |
2232 | // small. This requires us to add 4 to the global variable offset in order to |
2233 | // compute the correct address. |
2234 | SDValue PtrLo = DAG.getTargetGlobalAddress(GV, DL, MVT::i32, Offset + 4, |
2235 | GAFlags); |
2236 | SDValue PtrHi = DAG.getTargetGlobalAddress(GV, DL, MVT::i32, Offset + 4, |
2237 | GAFlags == SIInstrInfo::MO_NONE ? |
2238 | GAFlags : GAFlags + 1); |
2239 | return DAG.getNode(AMDGPUISD::PC_ADD_REL_OFFSET, DL, PtrVT, PtrLo, PtrHi); |
2240 | } |
2241 | |
2242 | SDValue SITargetLowering::LowerGlobalAddress(AMDGPUMachineFunction *MFI, |
2243 | SDValue Op, |
2244 | SelectionDAG &DAG) const { |
2245 | GlobalAddressSDNode *GSD = cast<GlobalAddressSDNode>(Op); |
2246 | |
2247 | if (GSD->getAddressSpace() != AMDGPUAS::CONSTANT_ADDRESS && |
2248 | GSD->getAddressSpace() != AMDGPUAS::GLOBAL_ADDRESS) |
2249 | return AMDGPUTargetLowering::LowerGlobalAddress(MFI, Op, DAG); |
2250 | |
2251 | SDLoc DL(GSD); |
2252 | const GlobalValue *GV = GSD->getGlobal(); |
2253 | EVT PtrVT = Op.getValueType(); |
2254 | |
2255 | if (shouldEmitFixup(GV)) |
2256 | return buildPCRelGlobalAddress(DAG, GV, DL, GSD->getOffset(), PtrVT); |
2257 | else if (shouldEmitPCReloc(GV)) |
2258 | return buildPCRelGlobalAddress(DAG, GV, DL, GSD->getOffset(), PtrVT, |
2259 | SIInstrInfo::MO_REL32); |
2260 | |
2261 | SDValue GOTAddr = buildPCRelGlobalAddress(DAG, GV, DL, 0, PtrVT, |
2262 | SIInstrInfo::MO_GOTPCREL32); |
2263 | |
2264 | Type *Ty = PtrVT.getTypeForEVT(*DAG.getContext()); |
2265 | PointerType *PtrTy = PointerType::get(Ty, AMDGPUAS::CONSTANT_ADDRESS); |
2266 | const DataLayout &DataLayout = DAG.getDataLayout(); |
2267 | unsigned Align = DataLayout.getABITypeAlignment(PtrTy); |
2268 | // FIXME: Use a PseudoSourceValue once those can be assigned an address space. |
2269 | MachinePointerInfo PtrInfo(UndefValue::get(PtrTy)); |
2270 | |
2271 | return DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), GOTAddr, PtrInfo, Align, |
2272 | MachineMemOperand::MODereferenceable | |
2273 | MachineMemOperand::MOInvariant); |
2274 | } |
2275 | |
2276 | SDValue SITargetLowering::lowerTRAP(SDValue Op, |
2277 | SelectionDAG &DAG) const { |
2278 | const MachineFunction &MF = DAG.getMachineFunction(); |
2279 | DiagnosticInfoUnsupported NoTrap(*MF.getFunction(), |
2280 | "trap handler not supported", |
2281 | Op.getDebugLoc(), |
2282 | DS_Warning); |
2283 | DAG.getContext()->diagnose(NoTrap); |
2284 | |
2285 | // Emit s_endpgm. |
2286 | |
2287 | // FIXME: This should really be selected to s_trap, but that requires |
2288 | // setting up the trap handler for it o do anything. |
2289 | return DAG.getNode(AMDGPUISD::ENDPGM, SDLoc(Op), MVT::Other, |
2290 | Op.getOperand(0)); |
2291 | } |
2292 | |
2293 | SDValue SITargetLowering::copyToM0(SelectionDAG &DAG, SDValue Chain, |
2294 | const SDLoc &DL, SDValue V) const { |
2295 | // We can't use S_MOV_B32 directly, because there is no way to specify m0 as |
2296 | // the destination register. |
2297 | // |
2298 | // We can't use CopyToReg, because MachineCSE won't combine COPY instructions, |
2299 | // so we will end up with redundant moves to m0. |
2300 | // |
2301 | // We use a pseudo to ensure we emit s_mov_b32 with m0 as the direct result. |
2302 | |
2303 | // A Null SDValue creates a glue result. |
2304 | SDNode *M0 = DAG.getMachineNode(AMDGPU::SI_INIT_M0, DL, MVT::Other, MVT::Glue, |
2305 | V, Chain); |
2306 | return SDValue(M0, 0); |
2307 | } |
2308 | |
2309 | SDValue SITargetLowering::lowerImplicitZextParam(SelectionDAG &DAG, |
2310 | SDValue Op, |
2311 | MVT VT, |
2312 | unsigned Offset) const { |
2313 | SDLoc SL(Op); |
2314 | SDValue Param = LowerParameter(DAG, MVT::i32, MVT::i32, SL, |
2315 | DAG.getEntryNode(), Offset, false); |
2316 | // The local size values will have the hi 16-bits as zero. |
2317 | return DAG.getNode(ISD::AssertZext, SL, MVT::i32, Param, |
2318 | DAG.getValueType(VT)); |
2319 | } |
2320 | |
2321 | static SDValue emitNonHSAIntrinsicError(SelectionDAG& DAG, SDLoc DL, EVT VT) { |
2322 | DiagnosticInfoUnsupported BadIntrin(*DAG.getMachineFunction().getFunction(), |
2323 | "non-hsa intrinsic with hsa target", |
2324 | DL.getDebugLoc()); |
2325 | DAG.getContext()->diagnose(BadIntrin); |
2326 | return DAG.getUNDEF(VT); |
2327 | } |
2328 | |
2329 | static SDValue emitRemovedIntrinsicError(SelectionDAG& DAG, SDLoc DL, EVT VT) { |
2330 | DiagnosticInfoUnsupported BadIntrin(*DAG.getMachineFunction().getFunction(), |
2331 | "intrinsic not supported on subtarget", |
2332 | DL.getDebugLoc()); |
2333 | DAG.getContext()->diagnose(BadIntrin); |
2334 | return DAG.getUNDEF(VT); |
2335 | } |
2336 | |
2337 | SDValue SITargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, |
2338 | SelectionDAG &DAG) const { |
2339 | MachineFunction &MF = DAG.getMachineFunction(); |
2340 | auto MFI = MF.getInfo<SIMachineFunctionInfo>(); |
2341 | const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo(); |
2342 | |
2343 | EVT VT = Op.getValueType(); |
2344 | SDLoc DL(Op); |
2345 | unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); |
2346 | |
2347 | // TODO: Should this propagate fast-math-flags? |
2348 | |
2349 | switch (IntrinsicID) { |
2350 | case Intrinsic::amdgcn_dispatch_ptr: |
2351 | case Intrinsic::amdgcn_queue_ptr: { |
2352 | if (!Subtarget->isAmdCodeObjectV2()) { |
2353 | DiagnosticInfoUnsupported BadIntrin( |
2354 | *MF.getFunction(), "unsupported hsa intrinsic without hsa target", |
2355 | DL.getDebugLoc()); |
2356 | DAG.getContext()->diagnose(BadIntrin); |
2357 | return DAG.getUNDEF(VT); |
2358 | } |
2359 | |
2360 | auto Reg = IntrinsicID == Intrinsic::amdgcn_dispatch_ptr ? |
2361 | SIRegisterInfo::DISPATCH_PTR : SIRegisterInfo::QUEUE_PTR; |
2362 | return CreateLiveInRegister(DAG, &AMDGPU::SReg_64RegClass, |
2363 | TRI->getPreloadedValue(MF, Reg), VT); |
2364 | } |
2365 | case Intrinsic::amdgcn_implicitarg_ptr: { |
2366 | unsigned offset = getImplicitParameterOffset(MFI, FIRST_IMPLICIT); |
2367 | return LowerParameterPtr(DAG, DL, DAG.getEntryNode(), offset); |
2368 | } |
2369 | case Intrinsic::amdgcn_kernarg_segment_ptr: { |
2370 | unsigned Reg |
2371 | = TRI->getPreloadedValue(MF, SIRegisterInfo::KERNARG_SEGMENT_PTR); |
2372 | return CreateLiveInRegister(DAG, &AMDGPU::SReg_64RegClass, Reg, VT); |
2373 | } |
2374 | case Intrinsic::amdgcn_dispatch_id: { |
2375 | unsigned Reg = TRI->getPreloadedValue(MF, SIRegisterInfo::DISPATCH_ID); |
2376 | return CreateLiveInRegister(DAG, &AMDGPU::SReg_64RegClass, Reg, VT); |
2377 | } |
2378 | case Intrinsic::amdgcn_rcp: |
2379 | return DAG.getNode(AMDGPUISD::RCP, DL, VT, Op.getOperand(1)); |
2380 | case Intrinsic::amdgcn_rsq: |
2381 | case AMDGPUIntrinsic::AMDGPU_rsq: // Legacy name |
2382 | return DAG.getNode(AMDGPUISD::RSQ, DL, VT, Op.getOperand(1)); |
2383 | case Intrinsic::amdgcn_rsq_legacy: { |
2384 | if (Subtarget->getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) |
2385 | return emitRemovedIntrinsicError(DAG, DL, VT); |
2386 | |
2387 | return DAG.getNode(AMDGPUISD::RSQ_LEGACY, DL, VT, Op.getOperand(1)); |
2388 | } |
2389 | case Intrinsic::amdgcn_rcp_legacy: { |
2390 | if (Subtarget->getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) |
2391 | return emitRemovedIntrinsicError(DAG, DL, VT); |
2392 | return DAG.getNode(AMDGPUISD::RCP_LEGACY, DL, VT, Op.getOperand(1)); |
2393 | } |
2394 | case Intrinsic::amdgcn_rsq_clamp: { |
2395 | if (Subtarget->getGeneration() < SISubtarget::VOLCANIC_ISLANDS) |
2396 | return DAG.getNode(AMDGPUISD::RSQ_CLAMP, DL, VT, Op.getOperand(1)); |
2397 | |
2398 | Type *Type = VT.getTypeForEVT(*DAG.getContext()); |
2399 | APFloat Max = APFloat::getLargest(Type->getFltSemantics()); |
2400 | APFloat Min = APFloat::getLargest(Type->getFltSemantics(), true); |
2401 | |
2402 | SDValue Rsq = DAG.getNode(AMDGPUISD::RSQ, DL, VT, Op.getOperand(1)); |
2403 | SDValue Tmp = DAG.getNode(ISD::FMINNUM, DL, VT, Rsq, |
2404 | DAG.getConstantFP(Max, DL, VT)); |
2405 | return DAG.getNode(ISD::FMAXNUM, DL, VT, Tmp, |
2406 | DAG.getConstantFP(Min, DL, VT)); |
2407 | } |
2408 | case Intrinsic::r600_read_ngroups_x: |
2409 | if (Subtarget->isAmdHsaOS()) |
2410 | return emitNonHSAIntrinsicError(DAG, DL, VT); |
2411 | |
2412 | return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(), |
2413 | SI::KernelInputOffsets::NGROUPS_X, false); |
2414 | case Intrinsic::r600_read_ngroups_y: |
2415 | if (Subtarget->isAmdHsaOS()) |
2416 | return emitNonHSAIntrinsicError(DAG, DL, VT); |
2417 | |
2418 | return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(), |
2419 | SI::KernelInputOffsets::NGROUPS_Y, false); |
2420 | case Intrinsic::r600_read_ngroups_z: |
2421 | if (Subtarget->isAmdHsaOS()) |
2422 | return emitNonHSAIntrinsicError(DAG, DL, VT); |
2423 | |
2424 | return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(), |
2425 | SI::KernelInputOffsets::NGROUPS_Z, false); |
2426 | case Intrinsic::r600_read_global_size_x: |
2427 | if (Subtarget->isAmdHsaOS()) |
2428 | return emitNonHSAIntrinsicError(DAG, DL, VT); |
2429 | |
2430 | return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(), |
2431 | SI::KernelInputOffsets::GLOBAL_SIZE_X, false); |
2432 | case Intrinsic::r600_read_global_size_y: |
2433 | if (Subtarget->isAmdHsaOS()) |
2434 | return emitNonHSAIntrinsicError(DAG, DL, VT); |
2435 | |
2436 | return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(), |
2437 | SI::KernelInputOffsets::GLOBAL_SIZE_Y, false); |
2438 | case Intrinsic::r600_read_global_size_z: |
2439 | if (Subtarget->isAmdHsaOS()) |
2440 | return emitNonHSAIntrinsicError(DAG, DL, VT); |
2441 | |
2442 | return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(), |
2443 | SI::KernelInputOffsets::GLOBAL_SIZE_Z, false); |
2444 | case Intrinsic::r600_read_local_size_x: |
2445 | if (Subtarget->isAmdHsaOS()) |
2446 | return emitNonHSAIntrinsicError(DAG, DL, VT); |
2447 | |
2448 | return lowerImplicitZextParam(DAG, Op, MVT::i16, |
2449 | SI::KernelInputOffsets::LOCAL_SIZE_X); |
2450 | case Intrinsic::r600_read_local_size_y: |
2451 | if (Subtarget->isAmdHsaOS()) |
2452 | return emitNonHSAIntrinsicError(DAG, DL, VT); |
2453 | |
2454 | return lowerImplicitZextParam(DAG, Op, MVT::i16, |
2455 | SI::KernelInputOffsets::LOCAL_SIZE_Y); |
2456 | case Intrinsic::r600_read_local_size_z: |
2457 | if (Subtarget->isAmdHsaOS()) |
2458 | return emitNonHSAIntrinsicError(DAG, DL, VT); |
2459 | |
2460 | return lowerImplicitZextParam(DAG, Op, MVT::i16, |
2461 | SI::KernelInputOffsets::LOCAL_SIZE_Z); |
2462 | case Intrinsic::amdgcn_workgroup_id_x: |
2463 | case Intrinsic::r600_read_tgid_x: |
2464 | return CreateLiveInRegister(DAG, &AMDGPU::SReg_32_XM0RegClass, |
2465 | TRI->getPreloadedValue(MF, SIRegisterInfo::WORKGROUP_ID_X), VT); |
2466 | case Intrinsic::amdgcn_workgroup_id_y: |
2467 | case Intrinsic::r600_read_tgid_y: |
2468 | return CreateLiveInRegister(DAG, &AMDGPU::SReg_32_XM0RegClass, |
2469 | TRI->getPreloadedValue(MF, SIRegisterInfo::WORKGROUP_ID_Y), VT); |
2470 | case Intrinsic::amdgcn_workgroup_id_z: |
2471 | case Intrinsic::r600_read_tgid_z: |
2472 | return CreateLiveInRegister(DAG, &AMDGPU::SReg_32_XM0RegClass, |
2473 | TRI->getPreloadedValue(MF, SIRegisterInfo::WORKGROUP_ID_Z), VT); |
2474 | case Intrinsic::amdgcn_workitem_id_x: |
2475 | case Intrinsic::r600_read_tidig_x: |
2476 | return CreateLiveInRegister(DAG, &AMDGPU::VGPR_32RegClass, |
2477 | TRI->getPreloadedValue(MF, SIRegisterInfo::WORKITEM_ID_X), VT); |
2478 | case Intrinsic::amdgcn_workitem_id_y: |
2479 | case Intrinsic::r600_read_tidig_y: |
2480 | return CreateLiveInRegister(DAG, &AMDGPU::VGPR_32RegClass, |
2481 | TRI->getPreloadedValue(MF, SIRegisterInfo::WORKITEM_ID_Y), VT); |
2482 | case Intrinsic::amdgcn_workitem_id_z: |
2483 | case Intrinsic::r600_read_tidig_z: |
2484 | return CreateLiveInRegister(DAG, &AMDGPU::VGPR_32RegClass, |
2485 | TRI->getPreloadedValue(MF, SIRegisterInfo::WORKITEM_ID_Z), VT); |
2486 | case AMDGPUIntrinsic::SI_load_const: { |
2487 | SDValue Ops[] = { |
2488 | Op.getOperand(1), |
2489 | Op.getOperand(2) |
2490 | }; |
2491 | |
2492 | MachineMemOperand *MMO = MF.getMachineMemOperand( |
2493 | MachinePointerInfo(), |
2494 | MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable | |
2495 | MachineMemOperand::MOInvariant, |
2496 | VT.getStoreSize(), 4); |
2497 | return DAG.getMemIntrinsicNode(AMDGPUISD::LOAD_CONSTANT, DL, |
2498 | Op->getVTList(), Ops, VT, MMO); |
2499 | } |
2500 | case AMDGPUIntrinsic::amdgcn_fdiv_fast: { |
2501 | return lowerFDIV_FAST(Op, DAG); |
2502 | } |
2503 | case AMDGPUIntrinsic::SI_vs_load_input: |
2504 | return DAG.getNode(AMDGPUISD::LOAD_INPUT, DL, VT, |
2505 | Op.getOperand(1), |
2506 | Op.getOperand(2), |
2507 | Op.getOperand(3)); |
2508 | |
2509 | case AMDGPUIntrinsic::SI_fs_constant: { |
2510 | SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(3)); |
2511 | SDValue Glue = M0.getValue(1); |
2512 | return DAG.getNode(AMDGPUISD::INTERP_MOV, DL, MVT::f32, |
2513 | DAG.getConstant(2, DL, MVT::i32), // P0 |
2514 | Op.getOperand(1), Op.getOperand(2), Glue); |
2515 | } |
2516 | case AMDGPUIntrinsic::SI_packf16: |
2517 | if (Op.getOperand(1).isUndef() && Op.getOperand(2).isUndef()) |
2518 | return DAG.getUNDEF(MVT::i32); |
2519 | return Op; |
2520 | case AMDGPUIntrinsic::SI_fs_interp: { |
2521 | SDValue IJ = Op.getOperand(4); |
2522 | SDValue I = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, IJ, |
2523 | DAG.getConstant(0, DL, MVT::i32)); |
2524 | SDValue J = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, IJ, |
2525 | DAG.getConstant(1, DL, MVT::i32)); |
2526 | I = DAG.getNode(ISD::BITCAST, DL, MVT::f32, I); |
2527 | J = DAG.getNode(ISD::BITCAST, DL, MVT::f32, J); |
2528 | SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(3)); |
2529 | SDValue Glue = M0.getValue(1); |
2530 | SDValue P1 = DAG.getNode(AMDGPUISD::INTERP_P1, DL, |
2531 | DAG.getVTList(MVT::f32, MVT::Glue), |
2532 | I, Op.getOperand(1), Op.getOperand(2), Glue); |
2533 | Glue = SDValue(P1.getNode(), 1); |
2534 | return DAG.getNode(AMDGPUISD::INTERP_P2, DL, MVT::f32, P1, J, |
2535 | Op.getOperand(1), Op.getOperand(2), Glue); |
2536 | } |
2537 | case Intrinsic::amdgcn_interp_mov: { |
2538 | SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(4)); |
2539 | SDValue Glue = M0.getValue(1); |
2540 | return DAG.getNode(AMDGPUISD::INTERP_MOV, DL, MVT::f32, Op.getOperand(1), |
2541 | Op.getOperand(2), Op.getOperand(3), Glue); |
2542 | } |
2543 | case Intrinsic::amdgcn_interp_p1: { |
2544 | SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(4)); |
2545 | SDValue Glue = M0.getValue(1); |
2546 | return DAG.getNode(AMDGPUISD::INTERP_P1, DL, MVT::f32, Op.getOperand(1), |
2547 | Op.getOperand(2), Op.getOperand(3), Glue); |
2548 | } |
2549 | case Intrinsic::amdgcn_interp_p2: { |
2550 | SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(5)); |
2551 | SDValue Glue = SDValue(M0.getNode(), 1); |
2552 | return DAG.getNode(AMDGPUISD::INTERP_P2, DL, MVT::f32, Op.getOperand(1), |
2553 | Op.getOperand(2), Op.getOperand(3), Op.getOperand(4), |
2554 | Glue); |
2555 | } |
2556 | case Intrinsic::amdgcn_sin: |
2557 | return DAG.getNode(AMDGPUISD::SIN_HW, DL, VT, Op.getOperand(1)); |
2558 | |
2559 | case Intrinsic::amdgcn_cos: |
2560 | return DAG.getNode(AMDGPUISD::COS_HW, DL, VT, Op.getOperand(1)); |
2561 | |
2562 | case Intrinsic::amdgcn_log_clamp: { |
2563 | if (Subtarget->getGeneration() < SISubtarget::VOLCANIC_ISLANDS) |
2564 | return SDValue(); |
2565 | |
2566 | DiagnosticInfoUnsupported BadIntrin( |
2567 | *MF.getFunction(), "intrinsic not supported on subtarget", |
2568 | DL.getDebugLoc()); |
2569 | DAG.getContext()->diagnose(BadIntrin); |
2570 | return DAG.getUNDEF(VT); |
2571 | } |
2572 | case Intrinsic::amdgcn_ldexp: |
2573 | return DAG.getNode(AMDGPUISD::LDEXP, DL, VT, |
2574 | Op.getOperand(1), Op.getOperand(2)); |
2575 | |
2576 | case Intrinsic::amdgcn_fract: |
2577 | return DAG.getNode(AMDGPUISD::FRACT, DL, VT, Op.getOperand(1)); |
2578 | |
2579 | case Intrinsic::amdgcn_class: |
2580 | return DAG.getNode(AMDGPUISD::FP_CLASS, DL, VT, |
2581 | Op.getOperand(1), Op.getOperand(2)); |
2582 | case Intrinsic::amdgcn_div_fmas: |
2583 | return DAG.getNode(AMDGPUISD::DIV_FMAS, DL, VT, |
2584 | Op.getOperand(1), Op.getOperand(2), Op.getOperand(3), |
2585 | Op.getOperand(4)); |
2586 | |
2587 | case Intrinsic::amdgcn_div_fixup: |
2588 | return DAG.getNode(AMDGPUISD::DIV_FIXUP, DL, VT, |
2589 | Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); |
2590 | |
2591 | case Intrinsic::amdgcn_trig_preop: |
2592 | return DAG.getNode(AMDGPUISD::TRIG_PREOP, DL, VT, |
2593 | Op.getOperand(1), Op.getOperand(2)); |
2594 | case Intrinsic::amdgcn_div_scale: { |
2595 | // 3rd parameter required to be a constant. |
2596 | const ConstantSDNode *Param = dyn_cast<ConstantSDNode>(Op.getOperand(3)); |
2597 | if (!Param) |
2598 | return DAG.getUNDEF(VT); |
2599 | |
2600 | // Translate to the operands expected by the machine instruction. The |
2601 | // first parameter must be the same as the first instruction. |
2602 | SDValue Numerator = Op.getOperand(1); |
2603 | SDValue Denominator = Op.getOperand(2); |
2604 | |
2605 | // Note this order is opposite of the machine instruction's operations, |
2606 | // which is s0.f = Quotient, s1.f = Denominator, s2.f = Numerator. The |
2607 | // intrinsic has the numerator as the first operand to match a normal |
2608 | // division operation. |
2609 | |
2610 | SDValue Src0 = Param->isAllOnesValue() ? Numerator : Denominator; |
2611 | |
2612 | return DAG.getNode(AMDGPUISD::DIV_SCALE, DL, Op->getVTList(), Src0, |
2613 | Denominator, Numerator); |
2614 | } |
2615 | case Intrinsic::amdgcn_icmp: { |
2616 | const auto *CD = dyn_cast<ConstantSDNode>(Op.getOperand(3)); |
2617 | int CondCode = CD->getSExtValue(); |
2618 | |
2619 | if (CondCode < ICmpInst::Predicate::FIRST_ICMP_PREDICATE || |
2620 | CondCode >= ICmpInst::Predicate::BAD_ICMP_PREDICATE) |
2621 | return DAG.getUNDEF(VT); |
2622 | |
2623 | ICmpInst::Predicate IcInput = static_cast<ICmpInst::Predicate>(CondCode); |
2624 | ISD::CondCode CCOpcode = getICmpCondCode(IcInput); |
2625 | return DAG.getNode(AMDGPUISD::SETCC, DL, VT, Op.getOperand(1), |
2626 | Op.getOperand(2), DAG.getCondCode(CCOpcode)); |
2627 | } |
2628 | case Intrinsic::amdgcn_fcmp: { |
2629 | const auto *CD = dyn_cast<ConstantSDNode>(Op.getOperand(3)); |
2630 | int CondCode = CD->getSExtValue(); |
2631 | |
2632 | if (CondCode <= FCmpInst::Predicate::FCMP_FALSE || |
2633 | CondCode >= FCmpInst::Predicate::FCMP_TRUE) |
2634 | return DAG.getUNDEF(VT); |
2635 | |
2636 | FCmpInst::Predicate IcInput = static_cast<FCmpInst::Predicate>(CondCode); |
2637 | ISD::CondCode CCOpcode = getFCmpCondCode(IcInput); |
2638 | return DAG.getNode(AMDGPUISD::SETCC, DL, VT, Op.getOperand(1), |
2639 | Op.getOperand(2), DAG.getCondCode(CCOpcode)); |
2640 | } |
2641 | case Intrinsic::amdgcn_fmul_legacy: |
2642 | return DAG.getNode(AMDGPUISD::FMUL_LEGACY, DL, VT, |
2643 | Op.getOperand(1), Op.getOperand(2)); |
2644 | case Intrinsic::amdgcn_sffbh: |
2645 | case AMDGPUIntrinsic::AMDGPU_flbit_i32: // Legacy name. |
2646 | return DAG.getNode(AMDGPUISD::FFBH_I32, DL, VT, Op.getOperand(1)); |
2647 | default: |
2648 | return AMDGPUTargetLowering::LowerOperation(Op, DAG); |
2649 | } |
2650 | } |
2651 | |
2652 | SDValue SITargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op, |
2653 | SelectionDAG &DAG) const { |
2654 | unsigned IntrID = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); |
2655 | SDLoc DL(Op); |
2656 | switch (IntrID) { |
2657 | case Intrinsic::amdgcn_atomic_inc: |
2658 | case Intrinsic::amdgcn_atomic_dec: { |
2659 | MemSDNode *M = cast<MemSDNode>(Op); |
2660 | unsigned Opc = (IntrID == Intrinsic::amdgcn_atomic_inc) ? |
2661 | AMDGPUISD::ATOMIC_INC : AMDGPUISD::ATOMIC_DEC; |
2662 | SDValue Ops[] = { |
2663 | M->getOperand(0), // Chain |
2664 | M->getOperand(2), // Ptr |
2665 | M->getOperand(3) // Value |
2666 | }; |
2667 | |
2668 | return DAG.getMemIntrinsicNode(Opc, SDLoc(Op), M->getVTList(), Ops, |
2669 | M->getMemoryVT(), M->getMemOperand()); |
2670 | } |
2671 | case Intrinsic::amdgcn_buffer_load: |
2672 | case Intrinsic::amdgcn_buffer_load_format: { |
2673 | SDValue Ops[] = { |
2674 | Op.getOperand(0), // Chain |
2675 | Op.getOperand(2), // rsrc |
2676 | Op.getOperand(3), // vindex |
2677 | Op.getOperand(4), // offset |
2678 | Op.getOperand(5), // glc |
2679 | Op.getOperand(6) // slc |
2680 | }; |
2681 | MachineFunction &MF = DAG.getMachineFunction(); |
2682 | SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); |
2683 | |
2684 | unsigned Opc = (IntrID == Intrinsic::amdgcn_buffer_load) ? |
2685 | AMDGPUISD::BUFFER_LOAD : AMDGPUISD::BUFFER_LOAD_FORMAT; |
2686 | EVT VT = Op.getValueType(); |
2687 | EVT IntVT = VT.changeTypeToInteger(); |
2688 | |
2689 | MachineMemOperand *MMO = MF.getMachineMemOperand( |
2690 | MachinePointerInfo(MFI->getBufferPSV()), |
2691 | MachineMemOperand::MOLoad, |
2692 | VT.getStoreSize(), VT.getStoreSize()); |
2693 | |
2694 | return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops, IntVT, MMO); |
2695 | } |
2696 | default: |
2697 | return SDValue(); |
2698 | } |
2699 | } |
2700 | |
2701 | SDValue SITargetLowering::LowerINTRINSIC_VOID(SDValue Op, |
2702 | SelectionDAG &DAG) const { |
2703 | MachineFunction &MF = DAG.getMachineFunction(); |
2704 | SDLoc DL(Op); |
2705 | SDValue Chain = Op.getOperand(0); |
2706 | unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); |
2707 | |
2708 | switch (IntrinsicID) { |
2709 | case AMDGPUIntrinsic::SI_sendmsg: { |
2710 | Chain = copyToM0(DAG, Chain, DL, Op.getOperand(3)); |
2711 | SDValue Glue = Chain.getValue(1); |
2712 | return DAG.getNode(AMDGPUISD::SENDMSG, DL, MVT::Other, Chain, |
2713 | Op.getOperand(2), Glue); |
2714 | } |
2715 | case AMDGPUIntrinsic::SI_tbuffer_store: { |
2716 | SDValue Ops[] = { |
2717 | Chain, |
2718 | Op.getOperand(2), |
2719 | Op.getOperand(3), |
2720 | Op.getOperand(4), |
2721 | Op.getOperand(5), |
2722 | Op.getOperand(6), |
2723 | Op.getOperand(7), |
2724 | Op.getOperand(8), |
2725 | Op.getOperand(9), |
2726 | Op.getOperand(10), |
2727 | Op.getOperand(11), |
2728 | Op.getOperand(12), |
2729 | Op.getOperand(13), |
2730 | Op.getOperand(14) |
2731 | }; |
2732 | |
2733 | EVT VT = Op.getOperand(3).getValueType(); |
2734 | |
2735 | MachineMemOperand *MMO = MF.getMachineMemOperand( |
2736 | MachinePointerInfo(), |
2737 | MachineMemOperand::MOStore, |
2738 | VT.getStoreSize(), 4); |
2739 | return DAG.getMemIntrinsicNode(AMDGPUISD::TBUFFER_STORE_FORMAT, DL, |
2740 | Op->getVTList(), Ops, VT, MMO); |
2741 | } |
2742 | case AMDGPUIntrinsic::AMDGPU_kill: { |
2743 | SDValue Src = Op.getOperand(2); |
2744 | if (const ConstantFPSDNode *K = dyn_cast<ConstantFPSDNode>(Src)) { |
2745 | if (!K->isNegative()) |
2746 | return Chain; |
2747 | |
2748 | SDValue NegOne = DAG.getTargetConstant(FloatToBits(-1.0f), DL, MVT::i32); |
2749 | return DAG.getNode(AMDGPUISD::KILL, DL, MVT::Other, Chain, NegOne); |
2750 | } |
2751 | |
2752 | SDValue Cast = DAG.getNode(ISD::BITCAST, DL, MVT::i32, Src); |
2753 | return DAG.getNode(AMDGPUISD::KILL, DL, MVT::Other, Chain, Cast); |
2754 | } |
2755 | case AMDGPUIntrinsic::SI_export: { |
2756 | const ConstantSDNode *En = cast<ConstantSDNode>(Op.getOperand(2)); |
2757 | const ConstantSDNode *VM = cast<ConstantSDNode>(Op.getOperand(3)); |
2758 | const ConstantSDNode *Done = cast<ConstantSDNode>(Op.getOperand(4)); |
2759 | const ConstantSDNode *Tgt = cast<ConstantSDNode>(Op.getOperand(5)); |
2760 | const ConstantSDNode *Compr = cast<ConstantSDNode>(Op.getOperand(6)); |
2761 | |
2762 | const SDValue Ops[] = { |
2763 | Chain, |
2764 | DAG.getTargetConstant(En->getZExtValue(), DL, MVT::i8), |
2765 | DAG.getTargetConstant(VM->getZExtValue(), DL, MVT::i1), |
2766 | DAG.getTargetConstant(Tgt->getZExtValue(), DL, MVT::i8), |
2767 | DAG.getTargetConstant(Compr->getZExtValue(), DL, MVT::i1), |
2768 | Op.getOperand(7), // src0 |
2769 | Op.getOperand(8), // src1 |
2770 | Op.getOperand(9), // src2 |
2771 | Op.getOperand(10) // src3 |
2772 | }; |
2773 | |
2774 | unsigned Opc = Done->isNullValue() ? |
2775 | AMDGPUISD::EXPORT : AMDGPUISD::EXPORT_DONE; |
2776 | return DAG.getNode(Opc, DL, Op->getVTList(), Ops); |
2777 | } |
2778 | default: |
2779 | return SDValue(); |
2780 | } |
2781 | } |
2782 | |
2783 | SDValue SITargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const { |
2784 | SDLoc DL(Op); |
2785 | LoadSDNode *Load = cast<LoadSDNode>(Op); |
2786 | ISD::LoadExtType ExtType = Load->getExtensionType(); |
2787 | EVT MemVT = Load->getMemoryVT(); |
2788 | |
2789 | if (ExtType == ISD::NON_EXTLOAD && MemVT.getSizeInBits() < 32) { |
2790 | // FIXME: Copied from PPC |
2791 | // First, load into 32 bits, then truncate to 1 bit. |
2792 | |
2793 | SDValue Chain = Load->getChain(); |
2794 | SDValue BasePtr = Load->getBasePtr(); |
2795 | MachineMemOperand *MMO = Load->getMemOperand(); |
2796 | |
2797 | EVT RealMemVT = (MemVT == MVT::i1) ? MVT::i8 : MVT::i16; |
2798 | |
2799 | SDValue NewLD = DAG.getExtLoad(ISD::EXTLOAD, DL, MVT::i32, Chain, |
2800 | BasePtr, RealMemVT, MMO); |
2801 | |
2802 | SDValue Ops[] = { |
2803 | DAG.getNode(ISD::TRUNCATE, DL, MemVT, NewLD), |
2804 | NewLD.getValue(1) |
2805 | }; |
2806 | |
2807 | return DAG.getMergeValues(Ops, DL); |
2808 | } |
2809 | |
2810 | if (!MemVT.isVector()) |
2811 | return SDValue(); |
2812 | |
2813 | assert(Op.getValueType().getVectorElementType() == MVT::i32 &&((Op.getValueType().getVectorElementType() == MVT::i32 && "Custom lowering for non-i32 vectors hasn't been implemented." ) ? static_cast<void> (0) : __assert_fail ("Op.getValueType().getVectorElementType() == MVT::i32 && \"Custom lowering for non-i32 vectors hasn't been implemented.\"" , "/tmp/buildd/llvm-toolchain-snapshot-4.0~svn290870/lib/Target/AMDGPU/SIISelLowering.cpp" , 2814, __PRETTY_FUNCTION__)) |
2814 | "Custom lowering for non-i32 vectors hasn't been implemented.")((Op.getValueType().getVectorElementType() == MVT::i32 && "Custom lowering for non-i32 vectors hasn't been implemented." ) ? static_cast<void> (0) : __assert_fail ("Op.getValueType().getVectorElementType() == MVT::i32 && \"Custom lowering for non-i32 vectors hasn't been implemented.\"" , "/tmp/buildd/llvm-toolchain-snapshot-4.0~svn290870/lib/Target/AMDGPU/SIISelLowering.cpp" , 2814, __PRETTY_FUNCTION__)); |
2815 | |
2816 | unsigned AS = Load->getAddressSpace(); |
2817 | if (!allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), MemVT, |
2818 | AS, Load->getAlignment())) { |
2819 | SDValue Ops[2]; |
2820 | std::tie(Ops[0], Ops[1]) = expandUnalignedLoad(Load, DAG); |
2821 | return DAG.getMergeValues(Ops, DL); |
2822 | } |
2823 | |
2824 | MachineFunction &MF = DAG.getMachineFunction(); |
2825 | SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); |
2826 | // If there is a possibilty that flat instruction access scratch memory |
2827 | // then we need to use the same legalization rules we use for private. |
2828 | if (AS == AMDGPUAS::FLAT_ADDRESS) |
2829 | AS = MFI->hasFlatScratchInit() ? |
2830 | AMDGPUAS::PRIVATE_ADDRESS : AMDGPUAS::GLOBAL_ADDRESS; |
2831 | |
2832 | unsigned NumElements = MemVT.getVectorNumElements(); |
2833 | switch (AS) { |
2834 | case AMDGPUAS::CONSTANT_ADDRESS: |
2835 | if (isMemOpUniform(Load)) |
2836 | return SDValue(); |
2837 | // Non-uniform loads will be selected to MUBUF instructions, so they |
2838 | // have the same legalization requirements as global and private |
2839 | // loads. |
2840 | // |
2841 | LLVM_FALLTHROUGH[[clang::fallthrough]]; |
2842 | case AMDGPUAS::GLOBAL_ADDRESS: { |
2843 | if (Subtarget->getScalarizeGlobalBehavior() && isMemOpUniform(Load) && |
2844 | isMemOpHasNoClobberedMemOperand(Load)) |
2845 | return SDValue(); |
2846 | // Non-uniform loads will be selected to MUBUF instructions, so they |
2847 | // have the same legalization requirements as global and private |
2848 | // loads. |
2849 | // |
2850 | } |
2851 | LLVM_FALLTHROUGH[[clang::fallthrough]]; |
2852 | case AMDGPUAS::FLAT_ADDRESS: |
2853 | if (NumElements > 4) |
2854 | return SplitVectorLoad(Op, DAG); |
2855 | // v4 loads are supported for private and global memory. |
2856 | return SDValue(); |
2857 | case AMDGPUAS::PRIVATE_ADDRESS: { |
2858 | // Depending on the setting of the private_element_size field in the |
2859 | // resource descriptor, we can only make private accesses up to a certain |
2860 | // size. |
2861 | switch (Subtarget->getMaxPrivateElementSize()) { |
2862 | case 4: |
2863 | return scalarizeVectorLoad(Load, DAG); |
2864 | case 8: |
2865 | if (NumElements > 2) |
2866 | return SplitVectorLoad(Op, DAG); |
2867 | return SDValue(); |
2868 | case 16: |
2869 | // Same as global/flat |
2870 | if (NumElements > 4) |
2871 | return SplitVectorLoad(Op, DAG); |
2872 | return SDValue(); |
2873 | default: |
2874 | llvm_unreachable("unsupported private_element_size")::llvm::llvm_unreachable_internal("unsupported private_element_size" , "/tmp/buildd/llvm-toolchain-snapshot-4.0~svn290870/lib/Target/AMDGPU/SIISelLowering.cpp" , 2874); |
2875 | } |
2876 | } |
2877 | case AMDGPUAS::LOCAL_ADDRESS: { |
2878 | if (NumElements > 2) |
2879 | return SplitVectorLoad(Op, DAG); |
2880 | |
2881 | if (NumElements == 2) |
2882 | return SDValue(); |
2883 | |
2884 | // If properly aligned, if we split we might be able to use ds_read_b64. |
2885 | return SplitVectorLoad(Op, DAG); |
2886 | } |
2887 | default: |
2888 | return SDValue(); |
2889 | } |
2890 | } |
2891 | |
2892 | SDValue SITargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const { |
2893 | if (Op.getValueType() != MVT::i64) |
2894 | return SDValue(); |
2895 | |
2896 | SDLoc DL(Op); |
2897 | SDValue Cond = Op.getOperand(0); |
2898 | |
2899 | SDValue Zero = DAG.getConstant(0, DL, MVT::i32); |
2900 | SDValue One = DAG.getConstant(1, DL, MVT::i32); |
2901 | |
2902 | SDValue LHS = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32, Op.getOperand(1)); |
2903 | SDValue RHS = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32, Op.getOperand(2)); |
2904 | |
2905 | SDValue Lo0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, LHS, Zero); |
2906 | SDValue Lo1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, RHS, Zero); |
2907 | |
2908 | SDValue Lo = DAG.getSelect(DL, MVT::i32, Cond, Lo0, Lo1); |
2909 | |
2910 | SDValue Hi0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, LHS, One); |
2911 | SDValue Hi1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, RHS, One); |
2912 | |
2913 | SDValue Hi = DAG.getSelect(DL, MVT::i32, Cond, Hi0, Hi1); |
2914 | |
2915 | SDValue Res = DAG.getBuildVector(MVT::v2i32, DL, {Lo, Hi}); |
2916 | return DAG.getNode(ISD::BITCAST, DL, MVT::i64, Res); |
2917 | } |
2918 | |
2919 | // Catch division cases where we can use shortcuts with rcp and rsq |
2920 | // instructions. |
2921 | SDValue SITargetLowering::lowerFastUnsafeFDIV(SDValue Op, |
2922 | SelectionDAG &DAG) const { |
2923 | SDLoc SL(Op); |
2924 | SDValue LHS = Op.getOperand(0); |
2925 | SDValue RHS = Op.getOperand(1); |
2926 | EVT VT = Op.getValueType(); |
2927 | bool Unsafe = DAG.getTarget().Options.UnsafeFPMath; |
2928 | |
2929 | if (const ConstantFPSDNode *CLHS = dyn_cast<ConstantFPSDNode>(LHS)) { |
2930 | if (Unsafe || (VT == MVT::f32 && !Subtarget->hasFP32Denormals()) || |
2931 | VT == MVT::f16) { |
2932 | if (CLHS->isExactlyValue(1.0)) { |
2933 | // v_rcp_f32 and v_rsq_f32 do not support denormals, and according to |
2934 | // the CI documentation has a worst case error of 1 ulp. |
2935 | // OpenCL requires <= 2.5 ulp for 1.0 / x, so it should always be OK to |
2936 | // use it as long as we aren't trying to use denormals. |
2937 | // |
2938 | // v_rcp_f16 and v_rsq_f16 DO support denormals. |
2939 | |
2940 | // 1.0 / sqrt(x) -> rsq(x) |
2941 | |
2942 | // XXX - Is UnsafeFPMath sufficient to do this for f64? The maximum ULP |
2943 | // error seems really high at 2^29 ULP. |
2944 | if (RHS.getOpcode() == ISD::FSQRT) |
2945 | return DAG.getNode(AMDGPUISD::RSQ, SL, VT, RHS.getOperand(0)); |
2946 | |
2947 | // 1.0 / x -> rcp(x) |
2948 | return DAG.getNode(AMDGPUISD::RCP, SL, VT, RHS); |
2949 | } |
2950 | |
2951 | // Same as for 1.0, but expand the sign out of the constant. |
2952 | if (CLHS->isExactlyValue(-1.0)) { |
2953 | // -1.0 / x -> rcp (fneg x) |
2954 | SDValue FNegRHS = DAG.getNode(ISD::FNEG, SL, VT, RHS); |
2955 | return DAG.getNode(AMDGPUISD::RCP, SL, VT, FNegRHS); |
2956 | } |
2957 | } |
2958 | } |
2959 | |
2960 | const SDNodeFlags *Flags = Op->getFlags(); |
2961 | |
2962 | if (Unsafe || Flags->hasAllowReciprocal()) { |
2963 | // Turn into multiply by the reciprocal. |
2964 | // x / y -> x * (1.0 / y) |
2965 | SDNodeFlags Flags; |
2966 | Flags.setUnsafeAlgebra(true); |
2967 | SDValue Recip = DAG.getNode(AMDGPUISD::RCP, SL, VT, RHS); |
2968 | return DAG.getNode(ISD::FMUL, SL, VT, LHS, Recip, &Flags); |
2969 | } |
2970 | |
2971 | return SDValue(); |
2972 | } |
2973 | |
2974 | static SDValue getFPBinOp(SelectionDAG &DAG, unsigned Opcode, const SDLoc &SL, |
2975 | EVT VT, SDValue A, SDValue B, SDValue GlueChain) { |
2976 | if (GlueChain->getNumValues() <= 1) { |
2977 | return DAG.getNode(Opcode, SL, VT, A, B); |
2978 | } |
2979 | |
2980 | assert(GlueChain->getNumValues() == 3)((GlueChain->getNumValues() == 3) ? static_cast<void> (0) : __assert_fail ("GlueChain->getNumValues() == 3", "/tmp/buildd/llvm-toolchain-snapshot-4.0~svn290870/lib/Target/AMDGPU/SIISelLowering.cpp" , 2980, __PRETTY_FUNCTION__)); |
2981 | |
2982 | SDVTList VTList = DAG.getVTList(VT, MVT::Other, MVT::Glue); |
2983 | switch (Opcode) { |
2984 | default: llvm_unreachable("no chain equivalent for opcode")::llvm::llvm_unreachable_internal("no chain equivalent for opcode" , "/tmp/buildd/llvm-toolchain-snapshot-4.0~svn290870/lib/Target/AMDGPU/SIISelLowering.cpp" , 2984); |
2985 | case ISD::FMUL: |
2986 | Opcode = AMDGPUISD::FMUL_W_CHAIN; |
2987 | break; |
2988 | } |
2989 | |
2990 | return DAG.getNode(Opcode, SL, VTList, GlueChain.getValue(1), A, B, |
2991 | GlueChain.getValue(2)); |
2992 | } |
2993 | |
2994 | static SDValue getFPTernOp(SelectionDAG &DAG, unsigned Opcode, const SDLoc &SL, |
2995 | EVT VT, SDValue A, SDValue B, SDValue C, |
2996 | SDValue GlueChain) { |
2997 | if (GlueChain->getNumValues() <= 1) { |
2998 | return DAG.getNode(Opcode, SL, VT, A, B, C); |
2999 | } |
3000 | |
3001 | assert(GlueChain->getNumValues() == 3)((GlueChain->getNumValues() == 3) ? static_cast<void> (0) : __assert_fail ("GlueChain->getNumValues() == 3", "/tmp/buildd/llvm-toolchain-snapshot-4.0~svn290870/lib/Target/AMDGPU/SIISelLowering.cpp" , 3001, __PRETTY_FUNCTION__)); |
3002 | |
3003 | SDVTList VTList = DAG.getVTList(VT, MVT::Other, MVT::Glue); |
3004 | switch (Opcode) { |
3005 | default: llvm_unreachable("no chain equivalent for opcode")::llvm::llvm_unreachable_internal("no chain equivalent for opcode" , "/tmp/buildd/llvm-toolchain-snapshot-4.0~svn290870/lib/Target/AMDGPU/SIISelLowering.cpp" , 3005); |
3006 | case ISD::FMA: |
3007 | Opcode = AMDGPUISD::FMA_W_CHAIN; |
3008 | break; |
3009 | } |
3010 | |
3011 | return DAG.getNode(Opcode, SL, VTList, GlueChain.getValue(1), A, B, C, |
3012 | GlueChain.getValue(2)); |
3013 | } |
3014 | |
3015 | SDValue SITargetLowering::LowerFDIV16(SDValue Op, SelectionDAG &DAG) const { |
3016 | if (SDValue FastLowered = lowerFastUnsafeFDIV(Op, DAG)) |
3017 | return FastLowered; |
3018 | |
3019 | SDLoc SL(Op); |
3020 | SDValue Src0 = Op.getOperand(0); |
3021 | SDValue Src1 = Op.getOperand(1); |
3022 | |
3023 | SDValue CvtSrc0 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src0); |
3024 | SDValue CvtSrc1 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src1); |
3025 | |
3026 | SDValue RcpSrc1 = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32, CvtSrc1); |
3027 | SDValue Quot = DAG.getNode(ISD::FMUL, SL, MVT::f32, CvtSrc0, RcpSrc1); |
3028 | |
3029 | SDValue FPRoundFlag = DAG.getTargetConstant(0, SL, MVT::i32); |
3030 | SDValue BestQuot = DAG.getNode(ISD::FP_ROUND, SL, MVT::f16, Quot, FPRoundFlag); |
3031 | |
3032 | return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f16, BestQuot, Src1, Src0); |
3033 | } |
3034 | |
3035 | // Faster 2.5 ULP division that does not support denormals. |
3036 | SDValue SITargetLowering::lowerFDIV_FAST(SDValue Op, SelectionDAG &DAG) const { |
3037 | SDLoc SL(Op); |
3038 | SDValue LHS = Op.getOperand(1); |
3039 | SDValue RHS = Op.getOperand(2); |
3040 | |
3041 | SDValue r1 = DAG.getNode(ISD::FABS, SL, MVT::f32, RHS); |
3042 | |
3043 | const APFloat K0Val(BitsToFloat(0x6f800000)); |
3044 | const SDValue K0 = DAG.getConstantFP(K0Val, SL, MVT::f32); |
3045 | |
3046 | const APFloat K1Val(BitsToFloat(0x2f800000)); |
3047 | const SDValue K1 = DAG.getConstantFP(K1Val, SL, MVT::f32); |
3048 | |
3049 | const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f32); |
3050 | |
3051 | EVT SetCCVT = |
3052 | getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f32); |
3053 | |
3054 | SDValue r2 = DAG.getSetCC(SL, SetCCVT, r1, K0, ISD::SETOGT); |
3055 | |
3056 | SDValue r3 = DAG.getNode(ISD::SELECT, SL, MVT::f32, r2, K1, One); |
3057 | |
3058 | // TODO: Should this propagate fast-math-flags? |
3059 | r1 = DAG.getNode(ISD::FMUL, SL, MVT::f32, RHS, r3); |
3060 | |
3061 | // rcp does not support denormals. |
3062 | SDValue r0 = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32, r1); |
3063 | |
3064 | SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f32, LHS, r0); |
3065 | |
3066 | return DAG.getNode(ISD::FMUL, SL, MVT::f32, r3, Mul); |
3067 | } |
3068 | |
3069 | SDValue SITargetLowering::LowerFDIV32(SDValue Op, SelectionDAG &DAG) const { |
3070 | if (SDValue FastLowered = lowerFastUnsafeFDIV(Op, DAG)) |
3071 | return FastLowered; |
3072 | |
3073 | SDLoc SL(Op); |
3074 | SDValue LHS = Op.getOperand(0); |
3075 | SDValue RHS = Op.getOperand(1); |
3076 | |
3077 | const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f32); |
3078 | |
3079 | SDVTList ScaleVT = DAG.getVTList(MVT::f32, MVT::i1); |
3080 | |
3081 | SDValue DenominatorScaled = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, |
3082 | RHS, RHS, LHS); |
3083 | SDValue NumeratorScaled = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, |
3084 | LHS, RHS, LHS); |
3085 | |
3086 | // Denominator is scaled to not be denormal, so using rcp is ok. |
3087 | SDValue ApproxRcp = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32, |
3088 | DenominatorScaled); |
3089 | SDValue NegDivScale0 = DAG.getNode(ISD::FNEG, SL, MVT::f32, |
3090 | DenominatorScaled); |
3091 | |
3092 | const unsigned Denorm32Reg = AMDGPU::Hwreg::ID_MODE | |
3093 | (4 << AMDGPU::Hwreg::OFFSET_SHIFT_) | |
3094 | (1 << AMDGPU::Hwreg::WIDTH_M1_SHIFT_); |
3095 | |
3096 | const SDValue BitField = DAG.getTargetConstant(Denorm32Reg, SL, MVT::i16); |
3097 | |
3098 | if (!Subtarget->hasFP32Denormals()) { |
3099 | SDVTList BindParamVTs = DAG.getVTList(MVT::Other, MVT::Glue); |
3100 | const SDValue EnableDenormValue = DAG.getConstant(FP_DENORM_FLUSH_NONE3, |
3101 | SL, MVT::i32); |
3102 | SDValue EnableDenorm = DAG.getNode(AMDGPUISD::SETREG, SL, BindParamVTs, |
3103 | DAG.getEntryNode(), |
3104 | EnableDenormValue, BitField); |
3105 | SDValue Ops[3] = { |
3106 | NegDivScale0, |
3107 | EnableDenorm.getValue(0), |
3108 | EnableDenorm.getValue(1) |
3109 | }; |
3110 | |
3111 | NegDivScale0 = DAG.getMergeValues(Ops, SL); |
3112 | } |
3113 | |
3114 | SDValue Fma0 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, NegDivScale0, |
3115 | ApproxRcp, One, NegDivScale0); |
3116 | |
3117 | SDValue Fma1 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, Fma0, ApproxRcp, |
3118 | ApproxRcp, Fma0); |
3119 | |
3120 | SDValue Mul = getFPBinOp(DAG, ISD::FMUL, SL, MVT::f32, NumeratorScaled, |
3121 | Fma1, Fma1); |
3122 | |
3123 | SDValue Fma2 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, NegDivScale0, Mul, |
3124 | NumeratorScaled, Mul); |
3125 | |
3126 | SDValue Fma3 = getFPTernOp(DAG, ISD::FMA,SL, MVT::f32, Fma2, Fma1, Mul, Fma2); |
3127 | |
3128 | SDValue Fma4 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, NegDivScale0, Fma3, |
3129 | NumeratorScaled, Fma3); |
3130 | |
3131 | if (!Subtarget->hasFP32Denormals()) { |
3132 | const SDValue DisableDenormValue = |
3133 | DAG.getConstant(FP_DENORM_FLUSH_IN_FLUSH_OUT0, SL, MVT::i32); |
3134 | SDValue DisableDenorm = DAG.getNode(AMDGPUISD::SETREG, SL, MVT::Other, |
3135 | Fma4.getValue(1), |
3136 | DisableDenormValue, |
3137 | BitField, |
3138 | Fma4.getValue(2)); |
3139 | |
3140 | SDValue OutputChain = DAG.getNode(ISD::TokenFactor, SL, MVT::Other, |
3141 | DisableDenorm, DAG.getRoot()); |
3142 | DAG.setRoot(OutputChain); |
3143 | } |
3144 | |
3145 | SDValue Scale = NumeratorScaled.getValue(1); |
3146 | SDValue Fmas = DAG.getNode(AMDGPUISD::DIV_FMAS, SL, MVT::f32, |
3147 | Fma4, Fma1, Fma3, Scale); |
3148 | |
3149 | return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f32, Fmas, RHS, LHS); |
3150 | } |
3151 | |
3152 | SDValue SITargetLowering::LowerFDIV64(SDValue Op, SelectionDAG &DAG) const { |
3153 | if (DAG.getTarget().Options.UnsafeFPMath) |
3154 | return lowerFastUnsafeFDIV(Op, DAG); |
3155 | |
3156 | SDLoc SL(Op); |
3157 | SDValue X = Op.getOperand(0); |
3158 | SDValue Y = Op.getOperand(1); |
3159 | |
3160 | const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f64); |
3161 | |
3162 | SDVTList ScaleVT = DAG.getVTList(MVT::f64, MVT::i1); |
3163 | |
3164 | SDValue DivScale0 = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, Y, Y, X); |
3165 | |
3166 | SDValue NegDivScale0 = DAG.getNode(ISD::FNEG, SL, MVT::f64, DivScale0); |
3167 | |
3168 | SDValue Rcp = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f64, DivScale0); |
3169 | |
3170 | SDValue Fma0 = DAG.getNode(ISD::FMA, SL, MVT::f64, NegDivScale0, Rcp, One); |
3171 | |
3172 | SDValue Fma1 = DAG.getNode(ISD::FMA, SL, MVT::f64, Rcp, Fma0, Rcp); |
3173 | |
3174 | SDValue Fma2 = DAG.getNode(ISD::FMA, SL, MVT::f64, NegDivScale0, Fma1, One); |
3175 | |
3176 | SDValue DivScale1 = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, X, Y, X); |
3177 | |
3178 | SDValue Fma3 = DAG.getNode(ISD::FMA, SL, MVT::f64, Fma1, Fma2, Fma1); |
3179 | SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f64, DivScale1, Fma3); |
3180 | |
3181 | SDValue Fma4 = DAG.getNode(ISD::FMA, SL, MVT::f64, |
3182 | NegDivScale0, Mul, DivScale1); |
3183 | |
3184 | SDValue Scale; |
3185 | |
3186 | if (Subtarget->getGeneration() == SISubtarget::SOUTHERN_ISLANDS) { |
3187 | // Workaround a hardware bug on SI where the condition output from div_scale |
3188 | // is not usable. |
3189 | |
3190 | const SDValue Hi = DAG.getConstant(1, SL, MVT::i32); |
3191 | |
3192 | // Figure out if the scale to use for div_fmas. |
3193 | SDValue NumBC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, X); |
3194 | SDValue DenBC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Y); |
3195 | SDValue Scale0BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, DivScale0); |
3196 | SDValue Scale1BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, DivScale1); |
3197 | |
3198 | SDValue NumHi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, NumBC, Hi); |
3199 | SDValue DenHi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, DenBC, Hi); |
3200 | |
3201 | SDValue Scale0Hi |
3202 | = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Scale0BC, Hi); |
3203 | SDValue Scale1Hi |
3204 | = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Scale1BC, Hi); |
3205 | |
3206 | SDValue CmpDen = DAG.getSetCC(SL, MVT::i1, DenHi, Scale0Hi, ISD::SETEQ); |
3207 | SDValue CmpNum = DAG.getSetCC(SL, MVT::i1, NumHi, Scale1Hi, ISD::SETEQ); |
3208 | Scale = DAG.getNode(ISD::XOR, SL, MVT::i1, CmpNum, CmpDen); |
3209 | } else { |
3210 | Scale = DivScale1.getValue(1); |
3211 | } |
3212 | |
3213 | SDValue Fmas = DAG.getNode(AMDGPUISD::DIV_FMAS, SL, MVT::f64, |
3214 | Fma4, Fma3, Mul, Scale); |
3215 | |
3216 | return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f64, Fmas, Y, X); |
3217 | } |
3218 | |
3219 | SDValue SITargetLowering::LowerFDIV(SDValue Op, SelectionDAG &DAG) const { |
3220 | EVT VT = Op.getValueType(); |
3221 | |
3222 | if (VT == MVT::f32) |
3223 | return LowerFDIV32(Op, DAG); |
3224 | |
3225 | if (VT == MVT::f64) |
3226 | return LowerFDIV64(Op, DAG); |
3227 | |
3228 | if (VT == MVT::f16) |
3229 | return LowerFDIV16(Op, DAG); |
3230 | |
3231 | llvm_unreachable("Unexpected type for fdiv")::llvm::llvm_unreachable_internal("Unexpected type for fdiv", "/tmp/buildd/llvm-toolchain-snapshot-4.0~svn290870/lib/Target/AMDGPU/SIISelLowering.cpp" , 3231); |
3232 | } |
3233 | |
3234 | SDValue SITargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const { |
3235 | SDLoc DL(Op); |
3236 | StoreSDNode *Store = cast<StoreSDNode>(Op); |
3237 | EVT VT = Store->getMemoryVT(); |
3238 | |
3239 | if (VT == MVT::i1) { |
3240 | return DAG.getTruncStore(Store->getChain(), DL, |
3241 | DAG.getSExtOrTrunc(Store->getValue(), DL, MVT::i32), |
3242 | Store->getBasePtr(), MVT::i1, Store->getMemOperand()); |
3243 | } |
3244 | |
3245 | assert(VT.isVector() &&((VT.isVector() && Store->getValue().getValueType( ).getScalarType() == MVT::i32) ? static_cast<void> (0) : __assert_fail ("VT.isVector() && Store->getValue().getValueType().getScalarType() == MVT::i32" , "/tmp/buildd/llvm-toolchain-snapshot-4.0~svn290870/lib/Target/AMDGPU/SIISelLowering.cpp" , 3246, __PRETTY_FUNCTION__)) |
3246 | Store->getValue().getValueType().getScalarType() == MVT::i32)((VT.isVector() && Store->getValue().getValueType( ).getScalarType() == MVT::i32) ? static_cast<void> (0) : __assert_fail ("VT.isVector() && Store->getValue().getValueType().getScalarType() == MVT::i32" , "/tmp/buildd/llvm-toolchain-snapshot-4.0~svn290870/lib/Target/AMDGPU/SIISelLowering.cpp" , 3246, __PRETTY_FUNCTION__)); |
3247 | |
3248 | unsigned AS = Store->getAddressSpace(); |
3249 | if (!allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT, |
3250 | AS, Store->getAlignment())) { |
3251 | return expandUnalignedStore(Store, DAG); |
3252 | } |
3253 | |
3254 | MachineFunction &MF = DAG.getMachineFunction(); |
3255 | SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); |
3256 | // If there is a possibilty that flat instruction access scratch memory |
3257 | // then we need to use the same legalization rules we use for private. |
3258 | if (AS == AMDGPUAS::FLAT_ADDRESS) |
3259 | AS = MFI->hasFlatScratchInit() ? |
3260 | AMDGPUAS::PRIVATE_ADDRESS : AMDGPUAS::GLOBAL_ADDRESS; |
3261 | |
3262 | unsigned NumElements = VT.getVectorNumElements(); |
3263 | switch (AS) { |
3264 | case AMDGPUAS::GLOBAL_ADDRESS: |
3265 | case AMDGPUAS::FLAT_ADDRESS: |
3266 | if (NumElements > 4) |
3267 | return SplitVectorStore(Op, DAG); |
3268 | return SDValue(); |
3269 | case AMDGPUAS::PRIVATE_ADDRESS: { |
3270 | switch (Subtarget->getMaxPrivateElementSize()) { |
3271 | case 4: |
3272 | return scalarizeVectorStore(Store, DAG); |
3273 | case 8: |
3274 | if (NumElements > 2) |
3275 | return SplitVectorStore(Op, DAG); |
3276 | return SDValue(); |
3277 | case 16: |
3278 | if (NumElements > 4) |
3279 | return SplitVectorStore(Op, DAG); |
3280 | return SDValue(); |
3281 | default: |
3282 | llvm_unreachable("unsupported private_element_size")::llvm::llvm_unreachable_internal("unsupported private_element_size" , "/tmp/buildd/llvm-toolchain-snapshot-4.0~svn290870/lib/Target/AMDGPU/SIISelLowering.cpp" , 3282); |
3283 | } |
3284 | } |
3285 | case AMDGPUAS::LOCAL_ADDRESS: { |
3286 | if (NumElements > 2) |
3287 | return SplitVectorStore(Op, DAG); |
3288 | |
3289 | if (NumElements == 2) |
3290 | return Op; |
3291 | |
3292 | // If properly aligned, if we split we might be able to use ds_write_b64. |
3293 | return SplitVectorStore(Op, DAG); |
3294 | } |
3295 | default: |
3296 | llvm_unreachable("unhandled address space")::llvm::llvm_unreachable_internal("unhandled address space", "/tmp/buildd/llvm-toolchain-snapshot-4.0~svn290870/lib/Target/AMDGPU/SIISelLowering.cpp" , 3296); |
3297 | } |
3298 | } |
3299 | |
3300 | SDValue SITargetLowering::LowerTrig(SDValue Op, SelectionDAG &DAG) const { |
3301 | SDLoc DL(Op); |
3302 | EVT VT = Op.getValueType(); |
3303 | SDValue Arg = Op.getOperand(0); |
3304 | // TODO: Should this propagate fast-math-flags? |
3305 | SDValue FractPart = DAG.getNode(AMDGPUISD::FRACT, DL, VT, |
3306 | DAG.getNode(ISD::FMUL, DL, VT, Arg, |
3307 | DAG.getConstantFP(0.5/M_PI3.14159265358979323846, DL, |
3308 | VT))); |
3309 | |
3310 | switch (Op.getOpcode()) { |
3311 | case ISD::FCOS: |
3312 | return DAG.getNode(AMDGPUISD::COS_HW, SDLoc(Op), VT, FractPart); |
3313 | case ISD::FSIN: |
3314 | return DAG.getNode(AMDGPUISD::SIN_HW, SDLoc(Op), VT, FractPart); |
3315 | default: |
3316 | llvm_unreachable("Wrong trig opcode")::llvm::llvm_unreachable_internal("Wrong trig opcode", "/tmp/buildd/llvm-toolchain-snapshot-4.0~svn290870/lib/Target/AMDGPU/SIISelLowering.cpp" , 3316); |
3317 | } |
3318 | } |
3319 | |
3320 | SDValue SITargetLowering::LowerATOMIC_CMP_SWAP(SDValue Op, SelectionDAG &DAG) const { |
3321 | AtomicSDNode *AtomicNode = cast<AtomicSDNode>(Op); |
3322 | assert(AtomicNode->isCompareAndSwap())((AtomicNode->isCompareAndSwap()) ? static_cast<void> (0) : __assert_fail ("AtomicNode->isCompareAndSwap()", "/tmp/buildd/llvm-toolchain-snapshot-4.0~svn290870/lib/Target/AMDGPU/SIISelLowering.cpp" , 3322, __PRETTY_FUNCTION__)); |
3323 | unsigned AS = AtomicNode->getAddressSpace(); |
3324 | |
3325 | // No custom lowering required for local address space |
3326 | if (!isFlatGlobalAddrSpace(AS)) |
3327 | return Op; |
3328 | |
3329 | // Non-local address space requires custom lowering for atomic compare |
3330 | // and swap; cmp and swap should be in a v2i32 or v2i64 in case of _X2 |
3331 | SDLoc DL(Op); |
3332 | SDValue ChainIn = Op.getOperand(0); |
3333 | SDValue Addr = Op.getOperand(1); |
3334 | SDValue Old = Op.getOperand(2); |
3335 | SDValue New = Op.getOperand(3); |
3336 | EVT VT = Op.getValueType(); |
3337 | MVT SimpleVT = VT.getSimpleVT(); |
3338 | MVT VecType = MVT::getVectorVT(SimpleVT, 2); |
3339 | |
3340 | SDValue NewOld = DAG.getBuildVector(VecType, DL, {New, Old}); |
3341 | SDValue Ops[] = { ChainIn, Addr, NewOld }; |
3342 | |
3343 | return DAG.getMemIntrinsicNode(AMDGPUISD::ATOMIC_CMP_SWAP, DL, Op->getVTList(), |
3344 | Ops, VT, AtomicNode->getMemOperand()); |
3345 | } |
3346 | |
3347 | //===----------------------------------------------------------------------===// |
3348 | // Custom DAG optimizations |
3349 | //===----------------------------------------------------------------------===// |
3350 | |
3351 | SDValue SITargetLowering::performUCharToFloatCombine(SDNode *N, |
3352 | DAGCombinerInfo &DCI) const { |
3353 | EVT VT = N->getValueType(0); |
3354 | EVT ScalarVT = VT.getScalarType(); |
3355 | if (ScalarVT != MVT::f32) |
3356 | return SDValue(); |
3357 | |
3358 | SelectionDAG &DAG = DCI.DAG; |
3359 | SDLoc DL(N); |
3360 | |
3361 | SDValue Src = N->getOperand(0); |
3362 | EVT SrcVT = Src.getValueType(); |
3363 | |
3364 | // TODO: We could try to match extracting the higher bytes, which would be |
3365 | // easier if i8 vectors weren't promoted to i32 vectors, particularly after |
3366 | // types are legalized. v4i8 -> v4f32 is probably the only case to worry |
3367 | // about in practice. |
3368 | if (DCI.isAfterLegalizeVectorOps() && SrcVT == MVT::i32) { |
3369 | if (DAG.MaskedValueIsZero(Src, APInt::getHighBitsSet(32, 24))) { |
3370 | SDValue Cvt = DAG.getNode(AMDGPUISD::CVT_F32_UBYTE0, DL, VT, Src); |
3371 | DCI.AddToWorklist(Cvt.getNode()); |
3372 | return Cvt; |
3373 | } |
3374 | } |
3375 | |
3376 | return SDValue(); |
3377 | } |
3378 | |
3379 | /// \brief Return true if the given offset Size in bytes can be folded into |
3380 | /// the immediate offsets of a memory instruction for the given address space. |
3381 | static bool canFoldOffset(unsigned OffsetSize, unsigned AS, |
3382 | const SISubtarget &STI) { |
3383 | switch (AS) { |
3384 | case AMDGPUAS::GLOBAL_ADDRESS: { |
3385 | // MUBUF instructions a 12-bit offset in bytes. |
3386 | return isUInt<12>(OffsetSize); |
3387 | } |
3388 | case AMDGPUAS::CONSTANT_ADDRESS: { |
3389 | // SMRD instructions have an 8-bit offset in dwords on SI and |
3390 | // a 20-bit offset in bytes on VI. |
3391 | if (STI.getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) |
3392 | return isUInt<20>(OffsetSize); |
3393 | else |
3394 | return (OffsetSize % 4 == 0) && isUInt<8>(OffsetSize / 4); |
3395 | } |
3396 | case AMDGPUAS::LOCAL_ADDRESS: |
3397 | case AMDGPUAS::REGION_ADDRESS: { |
3398 | // The single offset versions have a 16-bit offset in bytes. |
3399 | return isUInt<16>(OffsetSize); |
3400 | } |
3401 | case AMDGPUAS::PRIVATE_ADDRESS: |
3402 | // Indirect register addressing does not use any offsets. |
3403 | default: |
3404 | return 0; |
3405 | } |
3406 | } |
3407 | |
3408 | // (shl (add x, c1), c2) -> add (shl x, c2), (shl c1, c2) |
3409 | |
3410 | // This is a variant of |
3411 | // (mul (add x, c1), c2) -> add (mul x, c2), (mul c1, c2), |
3412 | // |
3413 | // The normal DAG combiner will do this, but only if the add has one use since |
3414 | // that would increase the number of instructions. |
3415 | // |
3416 | // This prevents us from seeing a constant offset that can be folded into a |
3417 | // memory instruction's addressing mode. If we know the resulting add offset of |
3418 | // a pointer can be folded into an addressing offset, we can replace the pointer |
3419 | // operand with the add of new constant offset. This eliminates one of the uses, |
3420 | // and may allow the remaining use to also be simplified. |
3421 | // |
3422 | SDValue SITargetLowering::performSHLPtrCombine(SDNode *N, |
3423 | unsigned AddrSpace, |
3424 | DAGCombinerInfo &DCI) const { |
3425 | SDValue N0 = N->getOperand(0); |
3426 | SDValue N1 = N->getOperand(1); |
3427 | |
3428 | if (N0.getOpcode() != ISD::ADD) |
3429 | return SDValue(); |
3430 | |
3431 | const ConstantSDNode *CN1 = dyn_cast<ConstantSDNode>(N1); |
3432 | if (!CN1) |
3433 | return SDValue(); |
3434 | |
3435 | const ConstantSDNode *CAdd = dyn_cast<ConstantSDNode>(N0.getOperand(1)); |
3436 | if (!CAdd) |
3437 | return SDValue(); |
3438 | |
3439 | // If the resulting offset is too large, we can't fold it into the addressing |
3440 | // mode offset. |
3441 | APInt Offset = CAdd->getAPIntValue() << CN1->getAPIntValue(); |
3442 | if (!canFoldOffset(Offset.getZExtValue(), AddrSpace, *getSubtarget())) |
3443 | return SDValue(); |
3444 | |
3445 | SelectionDAG &DAG = DCI.DAG; |
3446 | SDLoc SL(N); |
3447 | EVT VT = N->getValueType(0); |
3448 | |
3449 | SDValue ShlX = DAG.getNode(ISD::SHL, SL, VT, N0.getOperand(0), N1); |
3450 | SDValue COffset = DAG.getConstant(Offset, SL, MVT::i32); |
3451 | |
3452 | return DAG.getNode(ISD::ADD, SL, VT, ShlX, COffset); |
3453 | } |
3454 | |
3455 | SDValue SITargetLowering::performMemSDNodeCombine(MemSDNode *N, |
3456 | DAGCombinerInfo &DCI) const { |
3457 | SDValue Ptr = N->getBasePtr(); |
3458 | SelectionDAG &DAG = DCI.DAG; |
3459 | SDLoc SL(N); |
3460 | |
3461 | // TODO: We could also do this for multiplies. |
3462 | unsigned AS = N->getAddressSpace(); |
3463 | if (Ptr.getOpcode() == ISD::SHL && AS != AMDGPUAS::PRIVATE_ADDRESS) { |
3464 | SDValue NewPtr = performSHLPtrCombine(Ptr.getNode(), AS, DCI); |
3465 | if (NewPtr) { |
3466 | SmallVector<SDValue, 8> NewOps(N->op_begin(), N->op_end()); |
3467 | |
3468 | NewOps[N->getOpcode() == ISD::STORE ? 2 : 1] = NewPtr; |
3469 | return SDValue(DAG.UpdateNodeOperands(N, NewOps), 0); |
3470 | } |
3471 | } |
3472 | |
3473 | return SDValue(); |
3474 | } |
3475 | |
3476 | static bool bitOpWithConstantIsReducible(unsigned Opc, uint32_t Val) { |
3477 | return (Opc == ISD::AND && (Val == 0 || Val == 0xffffffff)) || |
3478 | (Opc == ISD::OR && (Val == 0xffffffff || Val == 0)) || |
3479 | (Opc == ISD::XOR && Val == 0); |
3480 | } |
3481 | |
3482 | // Break up 64-bit bit operation of a constant into two 32-bit and/or/xor. This |
3483 | // will typically happen anyway for a VALU 64-bit and. This exposes other 32-bit |
3484 | // integer combine opportunities since most 64-bit operations are decomposed |
3485 | // this way. TODO: We won't want this for SALU especially if it is an inline |
3486 | // immediate. |
3487 | SDValue SITargetLowering::splitBinaryBitConstantOp( |
3488 | DAGCombinerInfo &DCI, |
3489 | const SDLoc &SL, |
3490 | unsigned Opc, SDValue LHS, |
3491 | const ConstantSDNode *CRHS) const { |
3492 | uint64_t Val = CRHS->getZExtValue(); |
3493 | uint32_t ValLo = Lo_32(Val); |
3494 | uint32_t ValHi = Hi_32(Val); |
3495 | const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); |
3496 | |
3497 | if ((bitOpWithConstantIsReducible(Opc, ValLo) || |
3498 | bitOpWithConstantIsReducible(Opc, ValHi)) || |
3499 | (CRHS->hasOneUse() && !TII->isInlineConstant(CRHS->getAPIntValue()))) { |
3500 | // If we need to materialize a 64-bit immediate, it will be split up later |
3501 | // anyway. Avoid creating the harder to understand 64-bit immediate |
3502 | // materialization. |
3503 | return splitBinaryBitConstantOpImpl(DCI, SL, Opc, LHS, ValLo, ValHi); |
3504 | } |
3505 | |
3506 | return SDValue(); |
3507 | } |
3508 | |
3509 | SDValue SITargetLowering::performAndCombine(SDNode *N, |
3510 | DAGCombinerInfo &DCI) const { |
3511 | if (DCI.isBeforeLegalize()) |
3512 | return SDValue(); |
3513 | |
3514 | SelectionDAG &DAG = DCI.DAG; |
3515 | EVT VT = N->getValueType(0); |
3516 | SDValue LHS = N->getOperand(0); |
3517 | SDValue RHS = N->getOperand(1); |
3518 | |
3519 | |
3520 | if (VT == MVT::i64) { |
3521 | const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS); |
3522 | if (CRHS) { |
3523 | if (SDValue Split |
3524 | = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::AND, LHS, CRHS)) |
3525 | return Split; |
3526 | } |
3527 | } |
3528 | |
3529 | // (and (fcmp ord x, x), (fcmp une (fabs x), inf)) -> |
3530 | // fp_class x, ~(s_nan | q_nan | n_infinity | p_infinity) |
3531 | if (LHS.getOpcode() == ISD::SETCC && RHS.getOpcode() == ISD::SETCC) { |
3532 | ISD::CondCode LCC = cast<CondCodeSDNode>(LHS.getOperand(2))->get(); |
3533 | ISD::CondCode RCC = cast<CondCodeSDNode>(RHS.getOperand(2))->get(); |
3534 | |
3535 | SDValue X = LHS.getOperand(0); |
3536 | SDValue Y = RHS.getOperand(0); |
3537 | if (Y.getOpcode() != ISD::FABS || Y.getOperand(0) != X) |
3538 | return SDValue(); |
3539 | |
3540 | if (LCC == ISD::SETO) { |
3541 | if (X != LHS.getOperand(1)) |
3542 | return SDValue(); |
3543 | |
3544 | if (RCC == ISD::SETUNE) { |
3545 | const ConstantFPSDNode *C1 = dyn_cast<ConstantFPSDNode>(RHS.getOperand(1)); |
3546 | if (!C1 || !C1->isInfinity() || C1->isNegative()) |
3547 | return SDValue(); |
3548 | |
3549 | const uint32_t Mask = SIInstrFlags::N_NORMAL | |
3550 | SIInstrFlags::N_SUBNORMAL | |
3551 | SIInstrFlags::N_ZERO | |
3552 | SIInstrFlags::P_ZERO | |
3553 | SIInstrFlags::P_SUBNORMAL | |
3554 | SIInstrFlags::P_NORMAL; |
3555 | |
3556 | static_assert(((~(SIInstrFlags::S_NAN | |
3557 | SIInstrFlags::Q_NAN | |
3558 | SIInstrFlags::N_INFINITY | |
3559 | SIInstrFlags::P_INFINITY)) & 0x3ff) == Mask, |
3560 | "mask not equal"); |
3561 | |
3562 | SDLoc DL(N); |
3563 | return DAG.getNode(AMDGPUISD::FP_CLASS, DL, MVT::i1, |
3564 | X, DAG.getConstant(Mask, DL, MVT::i32)); |
3565 | } |
3566 | } |
3567 | } |
3568 | |
3569 | return SDValue(); |
3570 | } |
3571 | |
3572 | SDValue SITargetLowering::performOrCombine(SDNode *N, |
3573 | DAGCombinerInfo &DCI) const { |
3574 | SelectionDAG &DAG = DCI.DAG; |
3575 | SDValue LHS = N->getOperand(0); |
3576 | SDValue RHS = N->getOperand(1); |
3577 | |
3578 | EVT VT = N->getValueType(0); |
3579 | if (VT == MVT::i1) { |
3580 | // or (fp_class x, c1), (fp_class x, c2) -> fp_class x, (c1 | c2) |
3581 | if (LHS.getOpcode() == AMDGPUISD::FP_CLASS && |
3582 | RHS.getOpcode() == AMDGPUISD::FP_CLASS) { |
3583 | SDValue Src = LHS.getOperand(0); |
3584 | if (Src != RHS.getOperand(0)) |
3585 | return SDValue(); |
3586 | |
3587 | const ConstantSDNode *CLHS = dyn_cast<ConstantSDNode>(LHS.getOperand(1)); |
3588 | const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS.getOperand(1)); |
3589 | if (!CLHS || !CRHS) |
3590 | return SDValue(); |
3591 | |
3592 | // Only 10 bits are used. |
3593 | static const uint32_t MaxMask = 0x3ff; |
3594 | |
3595 | uint32_t NewMask = (CLHS->getZExtValue() | CRHS->getZExtValue()) & MaxMask; |
3596 | SDLoc DL(N); |
3597 | return DAG.getNode(AMDGPUISD::FP_CLASS, DL, MVT::i1, |
3598 | Src, DAG.getConstant(NewMask, DL, MVT::i32)); |
3599 | } |
3600 | |
3601 | return SDValue(); |
3602 | } |
3603 | |
3604 | if (VT != MVT::i64) |
3605 | return SDValue(); |
3606 | |
3607 | // TODO: This could be a generic combine with a predicate for extracting the |
3608 | // high half of an integer being free. |
3609 | |
3610 | // (or i64:x, (zero_extend i32:y)) -> |
3611 | // i64 (bitcast (v2i32 build_vector (or i32:y, lo_32(x)), hi_32(x))) |
3612 | if (LHS.getOpcode() == ISD::ZERO_EXTEND && |
3613 | RHS.getOpcode() != ISD::ZERO_EXTEND) |
3614 | std::swap(LHS, RHS); |
3615 | |
3616 | if (RHS.getOpcode() == ISD::ZERO_EXTEND) { |
3617 | SDValue ExtSrc = RHS.getOperand(0); |
3618 | EVT SrcVT = ExtSrc.getValueType(); |
3619 | if (SrcVT == MVT::i32) { |
3620 | SDLoc SL(N); |
3621 | SDValue LowLHS, HiBits; |
3622 | std::tie(LowLHS, HiBits) = split64BitValue(LHS, DAG); |
3623 | SDValue LowOr = DAG.getNode(ISD::OR, SL, MVT::i32, LowLHS, ExtSrc); |
3624 | |
3625 | DCI.AddToWorklist(LowOr.getNode()); |
3626 | DCI.AddToWorklist(HiBits.getNode()); |
3627 | |
3628 | SDValue Vec = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32, |
3629 | LowOr, HiBits); |
3630 | return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Vec); |
3631 | } |
3632 | } |
3633 | |
3634 | const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(N->getOperand(1)); |
3635 | if (CRHS) { |
3636 | if (SDValue Split |
3637 | = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::OR, LHS, CRHS)) |
3638 | return Split; |
3639 | } |
3640 | |
3641 | return SDValue(); |
3642 | } |
3643 | |
3644 | SDValue SITargetLowering::performXorCombine(SDNode *N, |
3645 | DAGCombinerInfo &DCI) const { |
3646 | EVT VT = N->getValueType(0); |
3647 | if (VT != MVT::i64) |
3648 | return SDValue(); |
3649 | |
3650 | SDValue LHS = N->getOperand(0); |
3651 | SDValue RHS = N->getOperand(1); |
3652 | |
3653 | const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS); |
3654 | if (CRHS) { |
3655 | if (SDValue Split |
3656 | = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::XOR, LHS, CRHS)) |
3657 | return Split; |
3658 | } |
3659 | |
3660 | return SDValue(); |
3661 | } |
3662 | |
3663 | SDValue SITargetLowering::performClassCombine(SDNode *N, |
3664 | DAGCombinerInfo &DCI) const { |
3665 | SelectionDAG &DAG = DCI.DAG; |
3666 | SDValue Mask = N->getOperand(1); |
3667 | |
3668 | // fp_class x, 0 -> false |
3669 | if (const ConstantSDNode *CMask = dyn_cast<ConstantSDNode>(Mask)) { |
3670 | if (CMask->isNullValue()) |
3671 | return DAG.getConstant(0, SDLoc(N), MVT::i1); |
3672 | } |
3673 | |
3674 | if (N->getOperand(0).isUndef()) |
3675 | return DAG.getUNDEF(MVT::i1); |
3676 | |
3677 | return SDValue(); |
3678 | } |
3679 | |
3680 | // Constant fold canonicalize. |
3681 | SDValue SITargetLowering::performFCanonicalizeCombine( |
3682 | SDNode *N, |
3683 | DAGCombinerInfo &DCI) const { |
3684 | ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N->getOperand(0)); |
3685 | if (!CFP) |
3686 | return SDValue(); |
3687 | |
3688 | SelectionDAG &DAG = DCI.DAG; |
3689 | const APFloat &C = CFP->getValueAPF(); |
3690 | |
3691 | // Flush denormals to 0 if not enabled. |
3692 | if (C.isDenormal()) { |
3693 | EVT VT = N->getValueType(0); |
3694 | if (VT == MVT::f32 && !Subtarget->hasFP32Denormals()) |
3695 | return DAG.getConstantFP(0.0, SDLoc(N), VT); |
3696 | |
3697 | if (VT == MVT::f64 && !Subtarget->hasFP64Denormals()) |
3698 | return DAG.getConstantFP(0.0, SDLoc(N), VT); |
3699 | |
3700 | if (VT == MVT::f16 && !Subtarget->hasFP16Denormals()) |
3701 | return DAG.getConstantFP(0.0, SDLoc(N), VT); |
3702 | } |
3703 | |
3704 | if (C.isNaN()) { |
3705 | EVT VT = N->getValueType(0); |
3706 | APFloat CanonicalQNaN = APFloat::getQNaN(C.getSemantics()); |
3707 | if (C.isSignaling()) { |
3708 | // Quiet a signaling NaN. |
3709 | return DAG.getConstantFP(CanonicalQNaN, SDLoc(N), VT); |
3710 | } |
3711 | |
3712 | // Make sure it is the canonical NaN bitpattern. |
3713 | // |
3714 | // TODO: Can we use -1 as the canonical NaN value since it's an inline |
3715 | // immediate? |
3716 | if (C.bitcastToAPInt() != CanonicalQNaN.bitcastToAPInt()) |
3717 | return DAG.getConstantFP(CanonicalQNaN, SDLoc(N), VT); |
3718 | } |
3719 | |
3720 | return SDValue(CFP, 0); |
3721 | } |
3722 | |
3723 | static unsigned minMaxOpcToMin3Max3Opc(unsigned Opc) { |
3724 | switch (Opc) { |
3725 | case ISD::FMAXNUM: |
3726 | return AMDGPUISD::FMAX3; |
3727 | case ISD::SMAX: |
3728 | return AMDGPUISD::SMAX3; |
3729 | case ISD::UMAX: |
3730 | return AMDGPUISD::UMAX3; |
3731 | case ISD::FMINNUM: |
3732 | return AMDGPUISD::FMIN3; |
3733 | case ISD::SMIN: |
3734 | return AMDGPUISD::SMIN3; |
3735 | case ISD::UMIN: |
3736 | return AMDGPUISD::UMIN3; |
3737 | default: |
3738 | llvm_unreachable("Not a min/max opcode")::llvm::llvm_unreachable_internal("Not a min/max opcode", "/tmp/buildd/llvm-toolchain-snapshot-4.0~svn290870/lib/Target/AMDGPU/SIISelLowering.cpp" , 3738); |
3739 | } |
3740 | } |
3741 | |
3742 | static SDValue performIntMed3ImmCombine(SelectionDAG &DAG, const SDLoc &SL, |
3743 | SDValue Op0, SDValue Op1, bool Signed) { |
3744 | ConstantSDNode *K1 = dyn_cast<ConstantSDNode>(Op1); |
3745 | if (!K1) |
3746 | return SDValue(); |
3747 | |
3748 | ConstantSDNode *K0 = dyn_cast<ConstantSDNode>(Op0.getOperand(1)); |
3749 | if (!K0) |
3750 | return SDValue(); |
3751 | |
3752 | if (Signed) { |
3753 | if (K0->getAPIntValue().sge(K1->getAPIntValue())) |
3754 | return SDValue(); |
3755 | } else { |
3756 | if (K0->getAPIntValue().uge(K1->getAPIntValue())) |
3757 | return SDValue(); |
3758 | } |
3759 | |
3760 | EVT VT = K0->getValueType(0); |
3761 | |
3762 | MVT NVT = MVT::i32; |
3763 | unsigned ExtOp = Signed ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; |
3764 | |
3765 | SDValue Tmp1, Tmp2, Tmp3; |
3766 | Tmp1 = DAG.getNode(ExtOp, SL, NVT, Op0->getOperand(0)); |
3767 | Tmp2 = DAG.getNode(ExtOp, SL, NVT, Op0->getOperand(1)); |
3768 | Tmp3 = DAG.getNode(ExtOp, SL, NVT, Op1); |
3769 | |
3770 | if (VT == MVT::i16) { |
3771 | Tmp1 = DAG.getNode(Signed ? AMDGPUISD::SMED3 : AMDGPUISD::UMED3, SL, NVT, |
3772 | Tmp1, Tmp2, Tmp3); |
3773 | |
3774 | return DAG.getNode(ISD::TRUNCATE, SL, VT, Tmp1); |
3775 | } else |
3776 | return DAG.getNode(Signed ? AMDGPUISD::SMED3 : AMDGPUISD::UMED3, SL, VT, |
3777 | Op0.getOperand(0), SDValue(K0, 0), SDValue(K1, 0)); |
3778 | } |
3779 | |
3780 | static bool isKnownNeverSNan(SelectionDAG &DAG, SDValue Op) { |
3781 | if (!DAG.getTargetLoweringInfo().hasFloatingPointExceptions()) |
3782 | return true; |
3783 | |
3784 | return DAG.isKnownNeverNaN(Op); |
3785 | } |
3786 | |
3787 | static SDValue performFPMed3ImmCombine(SelectionDAG &DAG, const SDLoc &SL, |
3788 | SDValue Op0, SDValue Op1) { |
3789 | ConstantFPSDNode *K1 = dyn_cast<ConstantFPSDNode>(Op1); |
3790 | if (!K1) |
3791 | return SDValue(); |
3792 | |
3793 | ConstantFPSDNode *K0 = dyn_cast<ConstantFPSDNode>(Op0.getOperand(1)); |
3794 | if (!K0) |
3795 | return SDValue(); |
3796 | |
3797 | // Ordered >= (although NaN inputs should have folded away by now). |
3798 | APFloat::cmpResult Cmp = K0->getValueAPF().compare(K1->getValueAPF()); |
3799 | if (Cmp == APFloat::cmpGreaterThan) |
3800 | return SDValue(); |
3801 | |
3802 | // This isn't safe with signaling NaNs because in IEEE mode, min/max on a |
3803 | // signaling NaN gives a quiet NaN. The quiet NaN input to the min would then |
3804 | // give the other result, which is different from med3 with a NaN input. |
3805 | SDValue Var = Op0.getOperand(0); |
3806 | if (!isKnownNeverSNan(DAG, Var)) |
3807 | return SDValue(); |
3808 | |
3809 | return DAG.getNode(AMDGPUISD::FMED3, SL, K0->getValueType(0), |
3810 | Var, SDValue(K0, 0), SDValue(K1, 0)); |
3811 | } |
3812 | |
3813 | SDValue SITargetLowering::performMinMaxCombine(SDNode *N, |
3814 | DAGCombinerInfo &DCI) const { |
3815 | SelectionDAG &DAG = DCI.DAG; |
3816 | |
3817 | unsigned Opc = N->getOpcode(); |
3818 | SDValue Op0 = N->getOperand(0); |
3819 | SDValue Op1 = N->getOperand(1); |
3820 | |
3821 | // Only do this if the inner op has one use since this will just increases |
3822 | // register pressure for no benefit. |
3823 | |
3824 | if (Opc != AMDGPUISD::FMIN_LEGACY && Opc != AMDGPUISD::FMAX_LEGACY) { |
3825 | // max(max(a, b), c) -> max3(a, b, c) |
3826 | // min(min(a, b), c) -> min3(a, b, c) |
3827 | if (Op0.getOpcode() == Opc && Op0.hasOneUse()) { |
3828 | SDLoc DL(N); |
3829 | return DAG.getNode(minMaxOpcToMin3Max3Opc(Opc), |
3830 | DL, |
3831 | N->getValueType(0), |
3832 | Op0.getOperand(0), |
3833 | Op0.getOperand(1), |
3834 | Op1); |
3835 | } |
3836 | |
3837 | // Try commuted. |
3838 | // max(a, max(b, c)) -> max3(a, b, c) |
3839 | // min(a, min(b, c)) -> min3(a, b, c) |
3840 | if (Op1.getOpcode() == Opc && Op1.hasOneUse()) { |
3841 | SDLoc DL(N); |
3842 | return DAG.getNode(minMaxOpcToMin3Max3Opc(Opc), |
3843 | DL, |
3844 | N->getValueType(0), |
3845 | Op0, |
3846 | Op1.getOperand(0), |
3847 | Op1.getOperand(1)); |
3848 | } |
3849 | } |
3850 | |
3851 | // min(max(x, K0), K1), K0 < K1 -> med3(x, K0, K1) |
3852 | if (Opc == ISD::SMIN && Op0.getOpcode() == ISD::SMAX && Op0.hasOneUse()) { |
3853 | if (SDValue Med3 = performIntMed3ImmCombine(DAG, SDLoc(N), Op0, Op1, true)) |
3854 | return Med3; |
3855 | } |
3856 | |
3857 | if (Opc == ISD::UMIN && Op0.getOpcode() == ISD::UMAX && Op0.hasOneUse()) { |
3858 | if (SDValue Med3 = performIntMed3ImmCombine(DAG, SDLoc(N), Op0, Op1, false)) |
3859 | return Med3; |
3860 | } |
3861 | |
3862 | // fminnum(fmaxnum(x, K0), K1), K0 < K1 && !is_snan(x) -> fmed3(x, K0, K1) |
3863 | if (((Opc == ISD::FMINNUM && Op0.getOpcode() == ISD::FMAXNUM) || |
3864 | (Opc == AMDGPUISD::FMIN_LEGACY && |
3865 | Op0.getOpcode() == AMDGPUISD::FMAX_LEGACY)) && |
3866 | N->getValueType(0) == MVT::f32 && Op0.hasOneUse()) { |
3867 | if (SDValue Res = performFPMed3ImmCombine(DAG, SDLoc(N), Op0, Op1)) |
3868 | return Res; |
3869 | } |
3870 | |
3871 | return SDValue(); |
3872 | } |
3873 | |
3874 | unsigned SITargetLowering::getFusedOpcode(const SelectionDAG &DAG, |
3875 | const SDNode *N0, |
3876 | const SDNode *N1) const { |
3877 | EVT VT = N0->getValueType(0); |
3878 | |
3879 | // Only do this if we are not trying to support denormals. v_mad_f32 does not |
3880 | // support denormals ever. |
3881 | if ((VT == MVT::f32 && !Subtarget->hasFP32Denormals()) || |
3882 | (VT == MVT::f16 && !Subtarget->hasFP16Denormals())) |
3883 | return ISD::FMAD; |
3884 | |
3885 | const TargetOptions &Options = DAG.getTarget().Options; |
3886 | if ((Options.AllowFPOpFusion == FPOpFusion::Fast || |
3887 | Options.UnsafeFPMath || |
3888 | (cast<BinaryWithFlagsSDNode>(N0)->Flags.hasUnsafeAlgebra() && |
3889 | cast<BinaryWithFlagsSDNode>(N1)->Flags.hasUnsafeAlgebra())) && |
3890 | isFMAFasterThanFMulAndFAdd(VT)) { |
3891 | return ISD::FMA; |
3892 | } |
3893 | |
3894 | return 0; |
3895 | } |
3896 | |
3897 | SDValue SITargetLowering::performFAddCombine(SDNode *N, |
3898 | DAGCombinerInfo &DCI) const { |
3899 | if (DCI.getDAGCombineLevel() < AfterLegalizeDAG) |
3900 | return SDValue(); |
3901 | |
3902 | SelectionDAG &DAG = DCI.DAG; |
3903 | EVT VT = N->getValueType(0); |
3904 | assert(!VT.isVector())((!VT.isVector()) ? static_cast<void> (0) : __assert_fail ("!VT.isVector()", "/tmp/buildd/llvm-toolchain-snapshot-4.0~svn290870/lib/Target/AMDGPU/SIISelLowering.cpp" , 3904, __PRETTY_FUNCTION__)); |
3905 | |
3906 | SDLoc SL(N); |
3907 | SDValue LHS = N->getOperand(0); |
3908 | SDValue RHS = N->getOperand(1); |
3909 | |
3910 | // These should really be instruction patterns, but writing patterns with |
3911 | // source modiifiers is a pain. |
3912 | |
3913 | // fadd (fadd (a, a), b) -> mad 2.0, a, b |
3914 | if (LHS.getOpcode() == ISD::FADD) { |
3915 | SDValue A = LHS.getOperand(0); |
3916 | if (A == LHS.getOperand(1)) { |
3917 | unsigned FusedOp = getFusedOpcode(DAG, N, LHS.getNode()); |
3918 | if (FusedOp != 0) { |
3919 | const SDValue Two = DAG.getConstantFP(2.0, SL, VT); |
3920 | return DAG.getNode(FusedOp, SL, VT, A, Two, RHS); |
3921 | } |
3922 | } |
3923 | } |
3924 | |
3925 | // fadd (b, fadd (a, a)) -> mad 2.0, a, b |
3926 | if (RHS.getOpcode() == ISD::FADD) { |
3927 | SDValue A = RHS.getOperand(0); |
3928 | if (A == RHS.getOperand(1)) { |
3929 | unsigned FusedOp = getFusedOpcode(DAG, N, RHS.getNode()); |
3930 | if (FusedOp != 0) { |
3931 | const SDValue Two = DAG.getConstantFP(2.0, SL, VT); |
3932 | return DAG.getNode(FusedOp, SL, VT, A, Two, LHS); |
3933 | } |
3934 | } |
3935 | } |
3936 | |
3937 | return SDValue(); |
3938 | } |
3939 | |
3940 | SDValue SITargetLowering::performFSubCombine(SDNode *N, |
3941 | DAGCombinerInfo &DCI) const { |
3942 | if (DCI.getDAGCombineLevel() < AfterLegalizeDAG) |
3943 | return SDValue(); |
3944 | |
3945 | SelectionDAG &DAG = DCI.DAG; |
3946 | SDLoc SL(N); |
3947 | EVT VT = N->getValueType(0); |
3948 | assert(!VT.isVector())((!VT.isVector()) ? static_cast<void> (0) : __assert_fail ("!VT.isVector()", "/tmp/buildd/llvm-toolchain-snapshot-4.0~svn290870/lib/Target/AMDGPU/SIISelLowering.cpp" , 3948, __PRETTY_FUNCTION__)); |
3949 | |
3950 | // Try to get the fneg to fold into the source modifier. This undoes generic |
3951 | // DAG combines and folds them into the mad. |
3952 | // |
3953 | // Only do this if we are not trying to support denormals. v_mad_f32 does |
3954 | // not support denormals ever. |
3955 | SDValue LHS = N->getOperand(0); |
3956 | SDValue RHS = N->getOperand(1); |
3957 | if (LHS.getOpcode() == ISD::FADD) { |
3958 | // (fsub (fadd a, a), c) -> mad 2.0, a, (fneg c) |
3959 | SDValue A = LHS.getOperand(0); |
3960 | if (A == LHS.getOperand(1)) { |
3961 | unsigned FusedOp = getFusedOpcode(DAG, N, LHS.getNode()); |
3962 | if (FusedOp != 0){ |
3963 | const SDValue Two = DAG.getConstantFP(2.0, SL, VT); |
3964 | SDValue NegRHS = DAG.getNode(ISD::FNEG, SL, VT, RHS); |
3965 | |
3966 | return DAG.getNode(FusedOp, SL, VT, A, Two, NegRHS); |
3967 | } |
3968 | } |
3969 | } |
3970 | |
3971 | if (RHS.getOpcode() == ISD::FADD) { |
3972 | // (fsub c, (fadd a, a)) -> mad -2.0, a, c |
3973 | |
3974 | SDValue A = RHS.getOperand(0); |
3975 | if (A == RHS.getOperand(1)) { |
3976 | unsigned FusedOp = getFusedOpcode(DAG, N, RHS.getNode()); |
3977 | if (FusedOp != 0){ |
3978 | const SDValue NegTwo = DAG.getConstantFP(-2.0, SL, VT); |
3979 | return DAG.getNode(FusedOp, SL, VT, A, NegTwo, LHS); |
3980 | } |
3981 | } |
3982 | } |
3983 | |
3984 | return SDValue(); |
3985 | } |
3986 | |
3987 | SDValue SITargetLowering::performSetCCCombine(SDNode *N, |
3988 | DAGCombinerInfo &DCI) const { |
3989 | SelectionDAG &DAG = DCI.DAG; |
3990 | SDLoc SL(N); |
3991 | |
3992 | SDValue LHS = N->getOperand(0); |
3993 | SDValue RHS = N->getOperand(1); |
3994 | EVT VT = LHS.getValueType(); |
3995 | |
3996 | if (VT != MVT::f32 && VT != MVT::f64 && (Subtarget->has16BitInsts() && |
3997 | VT != MVT::f16)) |
3998 | return SDValue(); |
3999 | |
4000 | // Match isinf pattern |
4001 | // (fcmp oeq (fabs x), inf) -> (fp_class x, (p_infinity | n_infinity)) |
4002 | ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get(); |
4003 | if (CC == ISD::SETOEQ && LHS.getOpcode() == ISD::FABS) { |
4004 | const ConstantFPSDNode *CRHS = dyn_cast<ConstantFPSDNode>(RHS); |
4005 | if (!CRHS) |
4006 | return SDValue(); |
4007 | |
4008 | const APFloat &APF = CRHS->getValueAPF(); |
4009 | if (APF.isInfinity() && !APF.isNegative()) { |
4010 | unsigned Mask = SIInstrFlags::P_INFINITY | SIInstrFlags::N_INFINITY; |
4011 | return DAG.getNode(AMDGPUISD::FP_CLASS, SL, MVT::i1, LHS.getOperand(0), |
4012 | DAG.getConstant(Mask, SL, MVT::i32)); |
4013 | } |
4014 | } |
4015 | |
4016 | return SDValue(); |
4017 | } |
4018 | |
4019 | SDValue SITargetLowering::performCvtF32UByteNCombine(SDNode *N, |
4020 | DAGCombinerInfo &DCI) const { |
4021 | SelectionDAG &DAG = DCI.DAG; |
4022 | SDLoc SL(N); |
4023 | unsigned Offset = N->getOpcode() - AMDGPUISD::CVT_F32_UBYTE0; |
4024 | |
4025 | SDValue Src = N->getOperand(0); |
4026 | SDValue Srl = N->getOperand(0); |
4027 | if (Srl.getOpcode() == ISD::ZERO_EXTEND) |
4028 | Srl = Srl.getOperand(0); |
4029 | |
4030 | // TODO: Handle (or x, (srl y, 8)) pattern when known bits are zero. |
4031 | if (Srl.getOpcode() == ISD::SRL) { |
4032 | // cvt_f32_ubyte0 (srl x, 16) -> cvt_f32_ubyte2 x |
4033 | // cvt_f32_ubyte1 (srl x, 16) -> cvt_f32_ubyte3 x |
4034 | // cvt_f32_ubyte0 (srl x, 8) -> cvt_f32_ubyte1 x |
4035 | |
4036 | if (const ConstantSDNode *C = |
4037 | dyn_cast<ConstantSDNode>(Srl.getOperand(1))) { |
4038 | Srl = DAG.getZExtOrTrunc(Srl.getOperand(0), SDLoc(Srl.getOperand(0)), |
4039 | EVT(MVT::i32)); |
4040 | |
4041 | unsigned SrcOffset = C->getZExtValue() + 8 * Offset; |
4042 | if (SrcOffset < 32 && SrcOffset % 8 == 0) { |
4043 | return DAG.getNode(AMDGPUISD::CVT_F32_UBYTE0 + SrcOffset / 8, SL, |
4044 | MVT::f32, Srl); |
4045 | } |
4046 | } |
4047 | } |
4048 | |
4049 | APInt Demanded = APInt::getBitsSet(32, 8 * Offset, 8 * Offset + 8); |
4050 | |
4051 | APInt KnownZero, KnownOne; |
4052 | TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(), |
4053 | !DCI.isBeforeLegalizeOps()); |
4054 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
4055 | if (TLO.ShrinkDemandedConstant(Src, Demanded) || |
4056 | TLI.SimplifyDemandedBits(Src, Demanded, KnownZero, KnownOne, TLO)) { |
4057 | DCI.CommitTargetLoweringOpt(TLO); |
4058 | } |
4059 | |
4060 | return SDValue(); |
4061 | } |
4062 | |
4063 | SDValue SITargetLowering::PerformDAGCombine(SDNode *N, |
4064 | DAGCombinerInfo &DCI) const { |
4065 | switch (N->getOpcode()) { |
4066 | default: |
4067 | return AMDGPUTargetLowering::PerformDAGCombine(N, DCI); |
4068 | case ISD::FADD: |
4069 | return performFAddCombine(N, DCI); |
4070 | case ISD::FSUB: |
4071 | return performFSubCombine(N, DCI); |
4072 | case ISD::SETCC: |
4073 | return performSetCCCombine(N, DCI); |
4074 | case ISD::FMAXNUM: |
4075 | case ISD::FMINNUM: |
4076 | case ISD::SMAX: |
4077 | case ISD::SMIN: |
4078 | case ISD::UMAX: |
4079 | case ISD::UMIN: |
4080 | case AMDGPUISD::FMIN_LEGACY: |
4081 | case AMDGPUISD::FMAX_LEGACY: { |
4082 | if (DCI.getDAGCombineLevel() >= AfterLegalizeDAG && |
4083 | N->getValueType(0) != MVT::f64 && |
4084 | getTargetMachine().getOptLevel() > CodeGenOpt::None) |
4085 | return performMinMaxCombine(N, DCI); |
4086 | break; |
4087 | } |
4088 | case ISD::LOAD: |
4089 | case ISD::STORE: |
4090 | case ISD::ATOMIC_LOAD: |
4091 | case ISD::ATOMIC_STORE: |
4092 | case ISD::ATOMIC_CMP_SWAP: |
4093 | case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: |
4094 | case ISD::ATOMIC_SWAP: |
4095 | case ISD::ATOMIC_LOAD_ADD: |
4096 | case ISD::ATOMIC_LOAD_SUB: |
4097 | case ISD::ATOMIC_LOAD_AND: |
4098 | case ISD::ATOMIC_LOAD_OR: |
4099 | case ISD::ATOMIC_LOAD_XOR: |
4100 | case ISD::ATOMIC_LOAD_NAND: |
4101 | case ISD::ATOMIC_LOAD_MIN: |
4102 | case ISD::ATOMIC_LOAD_MAX: |
4103 | case ISD::ATOMIC_LOAD_UMIN: |
4104 | case ISD::ATOMIC_LOAD_UMAX: |
4105 | case AMDGPUISD::ATOMIC_INC: |
4106 | case AMDGPUISD::ATOMIC_DEC: { // TODO: Target mem intrinsics. |
4107 | if (DCI.isBeforeLegalize()) |
4108 | break; |
4109 | return performMemSDNodeCombine(cast<MemSDNode>(N), DCI); |
4110 | } |
4111 | case ISD::AND: |
4112 | return performAndCombine(N, DCI); |
4113 | case ISD::OR: |
4114 | return performOrCombine(N, DCI); |
4115 | case ISD::XOR: |
4116 | return performXorCombine(N, DCI); |
4117 | case AMDGPUISD::FP_CLASS: |
4118 | return performClassCombine(N, DCI); |
4119 | case ISD::FCANONICALIZE: |
4120 | return performFCanonicalizeCombine(N, DCI); |
4121 | case AMDGPUISD::FRACT: |
4122 | case AMDGPUISD::RCP: |
4123 | case AMDGPUISD::RSQ: |
4124 | case AMDGPUISD::RCP_LEGACY: |
4125 | case AMDGPUISD::RSQ_LEGACY: |
4126 | case AMDGPUISD::RSQ_CLAMP: |
4127 | case AMDGPUISD::LDEXP: { |
4128 | SDValue Src = N->getOperand(0); |
4129 | if (Src.isUndef()) |
4130 | return Src; |
4131 | break; |
4132 | } |
4133 | case ISD::SINT_TO_FP: |
4134 | case ISD::UINT_TO_FP: |
4135 | return performUCharToFloatCombine(N, DCI); |
4136 | case AMDGPUISD::CVT_F32_UBYTE0: |
4137 | case AMDGPUISD::CVT_F32_UBYTE1: |
4138 | case AMDGPUISD::CVT_F32_UBYTE2: |
4139 | case AMDGPUISD::CVT_F32_UBYTE3: |
4140 | return performCvtF32UByteNCombine(N, DCI); |
4141 | } |
4142 | return AMDGPUTargetLowering::PerformDAGCombine(N, DCI); |
4143 | } |
4144 | |
4145 | /// \brief Helper function for adjustWritemask |
4146 | static unsigned SubIdx2Lane(unsigned Idx) { |
4147 | switch (Idx) { |
4148 | default: return 0; |
4149 | case AMDGPU::sub0: return 0; |
4150 | case AMDGPU::sub1: return 1; |
4151 | case AMDGPU::sub2: return 2; |
4152 | case AMDGPU::sub3: return 3; |
4153 | } |
4154 | } |
4155 | |
4156 | /// \brief Adjust the writemask of MIMG instructions |
4157 | void SITargetLowering::adjustWritemask(MachineSDNode *&Node, |
4158 | SelectionDAG &DAG) const { |
4159 | SDNode *Users[4] = { }; |
4160 | unsigned Lane = 0; |
4161 | unsigned DmaskIdx = (Node->getNumOperands() - Node->getNumValues() == 9) ? 2 : 3; |
4162 | unsigned OldDmask = Node->getConstantOperandVal(DmaskIdx); |
4163 | unsigned NewDmask = 0; |
4164 | |
4165 | // Try to figure out the used register components |
4166 | for (SDNode::use_iterator I = Node->use_begin(), E = Node->use_end(); |
4167 | I != E; ++I) { |
4168 | |
4169 | // Abort if we can't understand the usage |
4170 | if (!I->isMachineOpcode() || |
4171 | I->getMachineOpcode() != TargetOpcode::EXTRACT_SUBREG) |
4172 | return; |
4173 | |
4174 | // Lane means which subreg of %VGPRa_VGPRb_VGPRc_VGPRd is used. |
4175 | // Note that subregs are packed, i.e. Lane==0 is the first bit set |
4176 | // in OldDmask, so it can be any of X,Y,Z,W; Lane==1 is the second bit |
4177 | // set, etc. |
4178 | Lane = SubIdx2Lane(I->getConstantOperandVal(1)); |
4179 | |
4180 | // Set which texture component corresponds to the lane. |
4181 | unsigned Comp; |
4182 | for (unsigned i = 0, Dmask = OldDmask; i <= Lane; i++) { |
4183 | assert(Dmask)((Dmask) ? static_cast<void> (0) : __assert_fail ("Dmask" , "/tmp/buildd/llvm-toolchain-snapshot-4.0~svn290870/lib/Target/AMDGPU/SIISelLowering.cpp" , 4183, __PRETTY_FUNCTION__)); |
4184 | Comp = countTrailingZeros(Dmask); |
4185 | Dmask &= ~(1 << Comp); |
4186 | } |
4187 | |
4188 | // Abort if we have more than one user per component |
4189 | if (Users[Lane]) |
4190 | return; |
4191 | |
4192 | Users[Lane] = *I; |
4193 | NewDmask |= 1 << Comp; |
4194 | } |
4195 | |
4196 | // Abort if there's no change |
4197 | if (NewDmask == OldDmask) |
4198 | return; |
4199 | |
4200 | // Adjust the writemask in the node |
4201 | std::vector<SDValue> Ops; |
4202 | Ops.insert(Ops.end(), Node->op_begin(), Node->op_begin() + DmaskIdx); |
4203 | Ops.push_back(DAG.getTargetConstant(NewDmask, SDLoc(Node), MVT::i32)); |
4204 | Ops.insert(Ops.end(), Node->op_begin() + DmaskIdx + 1, Node->op_end()); |
4205 | Node = (MachineSDNode*)DAG.UpdateNodeOperands(Node, Ops); |
4206 | |
4207 | // If we only got one lane, replace it with a copy |
4208 | // (if NewDmask has only one bit set...) |
4209 | if (NewDmask && (NewDmask & (NewDmask-1)) == 0) { |
4210 | SDValue RC = DAG.getTargetConstant(AMDGPU::VGPR_32RegClassID, SDLoc(), |
4211 | MVT::i32); |
4212 | SDNode *Copy = DAG.getMachineNode(TargetOpcode::COPY_TO_REGCLASS, |
4213 | SDLoc(), Users[Lane]->getValueType(0), |
4214 | SDValue(Node, 0), RC); |
4215 | DAG.ReplaceAllUsesWith(Users[Lane], Copy); |
4216 | return; |
4217 | } |
4218 | |
4219 | // Update the users of the node with the new indices |
4220 | for (unsigned i = 0, Idx = AMDGPU::sub0; i < 4; ++i) { |
4221 | |
4222 | SDNode *User = Users[i]; |
4223 | if (!User) |
4224 | continue; |
4225 | |
4226 | SDValue Op = DAG.getTargetConstant(Idx, SDLoc(User), MVT::i32); |
4227 | DAG.UpdateNodeOperands(User, User->getOperand(0), Op); |
4228 | |
4229 | switch (Idx) { |
4230 | default: break; |
4231 | case AMDGPU::sub0: Idx = AMDGPU::sub1; break; |
4232 | case AMDGPU::sub1: Idx = AMDGPU::sub2; break; |
4233 | case AMDGPU::sub2: Idx = AMDGPU::sub3; break; |
4234 | } |
4235 | } |
4236 | } |
4237 | |
4238 | static bool isFrameIndexOp(SDValue Op) { |
4239 | if (Op.getOpcode() == ISD::AssertZext) |
4240 | Op = Op.getOperand(0); |
4241 | |
4242 | return isa<FrameIndexSDNode>(Op); |
4243 | } |
4244 | |
4245 | /// \brief Legalize target independent instructions (e.g. INSERT_SUBREG) |
4246 | /// with frame index operands. |
4247 | /// LLVM assumes that inputs are to these instructions are registers. |
4248 | void SITargetLowering::legalizeTargetIndependentNode(SDNode *Node, |
4249 | SelectionDAG &DAG) const { |
4250 | |
4251 | SmallVector<SDValue, 8> Ops; |
4252 | for (unsigned i = 0; i < Node->getNumOperands(); ++i) { |
4253 | if (!isFrameIndexOp(Node->getOperand(i))) { |
4254 | Ops.push_back(Node->getOperand(i)); |
4255 | continue; |
4256 | } |
4257 | |
4258 | SDLoc DL(Node); |
4259 | Ops.push_back(SDValue(DAG.getMachineNode(AMDGPU::S_MOV_B32, DL, |
4260 | Node->getOperand(i).getValueType(), |
4261 | Node->getOperand(i)), 0)); |
4262 | } |
4263 | |
4264 | DAG.UpdateNodeOperands(Node, Ops); |
4265 | } |
4266 | |
4267 | /// \brief Fold the instructions after selecting them. |
4268 | SDNode *SITargetLowering::PostISelFolding(MachineSDNode *Node, |
4269 | SelectionDAG &DAG) const { |
4270 | const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); |
4271 | unsigned Opcode = Node->getMachineOpcode(); |
4272 | |
4273 | if (TII->isMIMG(Opcode) && !TII->get(Opcode).mayStore() && |
4274 | !TII->isGather4(Opcode)) |
4275 | adjustWritemask(Node, DAG); |
4276 | |
4277 | if (Opcode == AMDGPU::INSERT_SUBREG || |
4278 | Opcode == AMDGPU::REG_SEQUENCE) { |
4279 | legalizeTargetIndependentNode(Node, DAG); |
4280 | return Node; |
4281 | } |
4282 | return Node; |
4283 | } |
4284 | |
4285 | /// \brief Assign the register class depending on the number of |
4286 | /// bits set in the writemask |
4287 | void SITargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI, |
4288 | SDNode *Node) const { |
4289 | const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); |
4290 | |
4291 | MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); |
4292 | |
4293 | if (TII->isVOP3(MI.getOpcode())) { |
4294 | // Make sure constant bus requirements are respected. |
4295 | TII->legalizeOperandsVOP3(MRI, MI); |
4296 | return; |
4297 | } |
4298 | |
4299 | if (TII->isMIMG(MI)) { |
4300 | unsigned VReg = MI.getOperand(0).getReg(); |
4301 | const TargetRegisterClass *RC = MRI.getRegClass(VReg); |
4302 | // TODO: Need mapping tables to handle other cases (register classes). |
4303 | if (RC != &AMDGPU::VReg_128RegClass) |
4304 | return; |
4305 | |
4306 | unsigned DmaskIdx = MI.getNumOperands() == 12 ? 3 : 4; |
4307 | unsigned Writemask = MI.getOperand(DmaskIdx).getImm(); |
4308 | unsigned BitsSet = 0; |
4309 | for (unsigned i = 0; i < 4; ++i) |
4310 | BitsSet += Writemask & (1 << i) ? 1 : 0; |
4311 | switch (BitsSet) { |
4312 | default: return; |
4313 | case 1: RC = &AMDGPU::VGPR_32RegClass; break; |
4314 | case 2: RC = &AMDGPU::VReg_64RegClass; break; |
4315 | case 3: RC = &AMDGPU::VReg_96RegClass; break; |
4316 | } |
4317 | |
4318 | unsigned NewOpcode = TII->getMaskedMIMGOp(MI.getOpcode(), BitsSet); |
4319 | MI.setDesc(TII->get(NewOpcode)); |
4320 | MRI.setRegClass(VReg, RC); |
4321 | return; |
4322 | } |
4323 | |
4324 | // Replace unused atomics with the no return version. |
4325 | int NoRetAtomicOp = AMDGPU::getAtomicNoRetOp(MI.getOpcode()); |
4326 | if (NoRetAtomicOp != -1) { |
4327 | if (!Node->hasAnyUseOfValue(0)) { |
4328 | MI.setDesc(TII->get(NoRetAtomicOp)); |
4329 | MI.RemoveOperand(0); |
4330 | return; |
4331 | } |
4332 | |
4333 | // For mubuf_atomic_cmpswap, we need to have tablegen use an extract_subreg |
4334 | // instruction, because the return type of these instructions is a vec2 of |
4335 | // the memory type, so it can be tied to the input operand. |
4336 | // This means these instructions always have a use, so we need to add a |
4337 | // special case to check if the atomic has only one extract_subreg use, |
4338 | // which itself has no uses. |
4339 | if ((Node->hasNUsesOfValue(1, 0) && |
4340 | Node->use_begin()->isMachineOpcode() && |
4341 | Node->use_begin()->getMachineOpcode() == AMDGPU::EXTRACT_SUBREG && |
4342 | !Node->use_begin()->hasAnyUseOfValue(0))) { |
4343 | unsigned Def = MI.getOperand(0).getReg(); |
4344 | |
4345 | // Change this into a noret atomic. |
4346 | MI.setDesc(TII->get(NoRetAtomicOp)); |
4347 | MI.RemoveOperand(0); |
4348 | |
4349 | // If we only remove the def operand from the atomic instruction, the |
4350 | // extract_subreg will be left with a use of a vreg without a def. |
4351 | // So we need to insert an implicit_def to avoid machine verifier |
4352 | // errors. |
4353 | BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), |
4354 | TII->get(AMDGPU::IMPLICIT_DEF), Def); |
4355 | } |
4356 | return; |
4357 | } |
4358 | } |
4359 | |
4360 | static SDValue buildSMovImm32(SelectionDAG &DAG, const SDLoc &DL, |
4361 | uint64_t Val) { |
4362 | SDValue K = DAG.getTargetConstant(Val, DL, MVT::i32); |
4363 | return SDValue(DAG.getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32, K), 0); |
4364 | } |
4365 | |
4366 | MachineSDNode *SITargetLowering::wrapAddr64Rsrc(SelectionDAG &DAG, |
4367 | const SDLoc &DL, |
4368 | SDValue Ptr) const { |
4369 | const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); |
4370 | |
4371 | // Build the half of the subregister with the constants before building the |
4372 | // full 128-bit register. If we are building multiple resource descriptors, |
4373 | // this will allow CSEing of the 2-component register. |
4374 | const SDValue Ops0[] = { |
4375 | DAG.getTargetConstant(AMDGPU::SGPR_64RegClassID, DL, MVT::i32), |
4376 | buildSMovImm32(DAG, DL, 0), |
4377 | DAG.getTargetConstant(AMDGPU::sub0, DL, MVT::i32), |
4378 | buildSMovImm32(DAG, DL, TII->getDefaultRsrcDataFormat() >> 32), |
4379 | DAG.getTargetConstant(AMDGPU::sub1, DL, MVT::i32) |
4380 | }; |
4381 | |
4382 | SDValue SubRegHi = SDValue(DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, |
4383 | MVT::v2i32, Ops0), 0); |
4384 | |
4385 | // Combine the constants and the pointer. |
4386 | const SDValue Ops1[] = { |
4387 | DAG.getTargetConstant(AMDGPU::SReg_128RegClassID, DL, MVT::i32), |
4388 | Ptr, |
4389 | DAG.getTargetConstant(AMDGPU::sub0_sub1, DL, MVT::i32), |
4390 | SubRegHi, |
4391 | DAG.getTargetConstant(AMDGPU::sub2_sub3, DL, MVT::i32) |
4392 | }; |
4393 | |
4394 | return DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, MVT::v4i32, Ops1); |
4395 | } |
4396 | |
4397 | /// \brief Return a resource descriptor with the 'Add TID' bit enabled |
4398 | /// The TID (Thread ID) is multiplied by the stride value (bits [61:48] |
4399 | /// of the resource descriptor) to create an offset, which is added to |
4400 | /// the resource pointer. |
4401 | MachineSDNode *SITargetLowering::buildRSRC(SelectionDAG &DAG, const SDLoc &DL, |
4402 | SDValue Ptr, uint32_t RsrcDword1, |
4403 | uint64_t RsrcDword2And3) const { |
4404 | SDValue PtrLo = DAG.getTargetExtractSubreg(AMDGPU::sub0, DL, MVT::i32, Ptr); |
4405 | SDValue PtrHi = DAG.getTargetExtractSubreg(AMDGPU::sub1, DL, MVT::i32, Ptr); |
4406 | if (RsrcDword1) { |
4407 | PtrHi = SDValue(DAG.getMachineNode(AMDGPU::S_OR_B32, DL, MVT::i32, PtrHi, |
4408 | DAG.getConstant(RsrcDword1, DL, MVT::i32)), |
4409 | 0); |
4410 | } |
4411 | |
4412 | SDValue DataLo = buildSMovImm32(DAG, DL, |
4413 | RsrcDword2And3 & UINT64_C(0xFFFFFFFF)0xFFFFFFFFUL); |
4414 | SDValue DataHi = buildSMovImm32(DAG, DL, RsrcDword2And3 >> 32); |
4415 | |
4416 | const SDValue Ops[] = { |
4417 | DAG.getTargetConstant(AMDGPU::SReg_128RegClassID, DL, MVT::i32), |
4418 | PtrLo, |
4419 | DAG.getTargetConstant(AMDGPU::sub0, DL, MVT::i32), |
4420 | PtrHi, |
4421 | DAG.getTargetConstant(AMDGPU::sub1, DL, MVT::i32), |
4422 | DataLo, |
4423 | DAG.getTargetConstant(AMDGPU::sub2, DL, MVT::i32), |
4424 | DataHi, |
4425 | DAG.getTargetConstant(AMDGPU::sub3, DL, MVT::i32) |
4426 | }; |
4427 | |
4428 | return DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, MVT::v4i32, Ops); |
4429 | } |
4430 | |
4431 | SDValue SITargetLowering::CreateLiveInRegister(SelectionDAG &DAG, |
4432 | const TargetRegisterClass *RC, |
4433 | unsigned Reg, EVT VT) const { |
4434 | SDValue VReg = AMDGPUTargetLowering::CreateLiveInRegister(DAG, RC, Reg, VT); |
4435 | |
4436 | return DAG.getCopyFromReg(DAG.getEntryNode(), SDLoc(DAG.getEntryNode()), |
4437 | cast<RegisterSDNode>(VReg)->getReg(), VT); |
4438 | } |
4439 | |
4440 | //===----------------------------------------------------------------------===// |
4441 | // SI Inline Assembly Support |
4442 | //===----------------------------------------------------------------------===// |
4443 | |
4444 | std::pair<unsigned, const TargetRegisterClass *> |
4445 | SITargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, |
4446 | StringRef Constraint, |
4447 | MVT VT) const { |
4448 | if (!isTypeLegal(VT)) |
4449 | return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); |
4450 | |
4451 | if (Constraint.size() == 1) { |
4452 | switch (Constraint[0]) { |
4453 | case 's': |
4454 | case 'r': |
4455 | switch (VT.getSizeInBits()) { |
4456 | default: |
4457 | return std::make_pair(0U, nullptr); |
4458 | case 32: |
4459 | case 16: |
4460 | return std::make_pair(0U, &AMDGPU::SReg_32_XM0RegClass); |
4461 | case 64: |
4462 | return std::make_pair(0U, &AMDGPU::SGPR_64RegClass); |
4463 | case 128: |
4464 | return std::make_pair(0U, &AMDGPU::SReg_128RegClass); |
4465 | case 256: |
4466 | return std::make_pair(0U, &AMDGPU::SReg_256RegClass); |
4467 | } |
4468 | |
4469 | case 'v': |
4470 | switch (VT.getSizeInBits()) { |
4471 | default: |
4472 | return std::make_pair(0U, nullptr); |
4473 | case 32: |
4474 | case 16: |
4475 | return std::make_pair(0U, &AMDGPU::VGPR_32RegClass); |
4476 | case 64: |
4477 | return std::make_pair(0U, &AMDGPU::VReg_64RegClass); |
4478 | case 96: |
4479 | return std::make_pair(0U, &AMDGPU::VReg_96RegClass); |
4480 | case 128: |
4481 | return std::make_pair(0U, &AMDGPU::VReg_128RegClass); |
4482 | case 256: |
4483 | return std::make_pair(0U, &AMDGPU::VReg_256RegClass); |
4484 | case 512: |
4485 | return std::make_pair(0U, &AMDGPU::VReg_512RegClass); |
4486 | } |
4487 | } |
4488 | } |
4489 | |
4490 | if (Constraint.size() > 1) { |
4491 | const TargetRegisterClass *RC = nullptr; |
4492 | if (Constraint[1] == 'v') { |
4493 | RC = &AMDGPU::VGPR_32RegClass; |
4494 | } else if (Constraint[1] == 's') { |
4495 | RC = &AMDGPU::SGPR_32RegClass; |
4496 | } |
4497 | |
4498 | if (RC) { |
4499 | uint32_t Idx; |
4500 | bool Failed = Constraint.substr(2).getAsInteger(10, Idx); |
4501 | if (!Failed && Idx < RC->getNumRegs()) |
4502 | return std::make_pair(RC->getRegister(Idx), RC); |
4503 | } |
4504 | } |
4505 | return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); |
4506 | } |
4507 | |
4508 | SITargetLowering::ConstraintType |
4509 | SITargetLowering::getConstraintType(StringRef Constraint) const { |
4510 | if (Constraint.size() == 1) { |
4511 | switch (Constraint[0]) { |
4512 | default: break; |
4513 | case 's': |
4514 | case 'v': |
4515 | return C_RegisterClass; |
4516 | } |
4517 | } |
4518 | return TargetLowering::getConstraintType(Constraint); |
4519 | } |