File: | lib/Target/R600/SIISelLowering.cpp |
Location: | line 804, column 5 |
Description: | Value stored to 'BR' is never read |
1 | //===-- SIISelLowering.cpp - SI DAG Lowering Implementation ---------------===// |
2 | // |
3 | // The LLVM Compiler Infrastructure |
4 | // |
5 | // This file is distributed under the University of Illinois Open Source |
6 | // License. See LICENSE.TXT for details. |
7 | // |
8 | //===----------------------------------------------------------------------===// |
9 | // |
10 | /// \file |
11 | /// \brief Custom DAG lowering for SI |
12 | // |
13 | //===----------------------------------------------------------------------===// |
14 | |
15 | #ifdef _MSC_VER |
16 | // Provide M_PI. |
17 | #define _USE_MATH_DEFINES |
18 | #include <cmath> |
19 | #endif |
20 | |
21 | #include "SIISelLowering.h" |
22 | #include "AMDGPU.h" |
23 | #include "AMDGPUIntrinsicInfo.h" |
24 | #include "AMDGPUSubtarget.h" |
25 | #include "SIInstrInfo.h" |
26 | #include "SIMachineFunctionInfo.h" |
27 | #include "SIRegisterInfo.h" |
28 | #include "llvm/ADT/BitVector.h" |
29 | #include "llvm/CodeGen/CallingConvLower.h" |
30 | #include "llvm/CodeGen/MachineInstrBuilder.h" |
31 | #include "llvm/CodeGen/MachineRegisterInfo.h" |
32 | #include "llvm/CodeGen/SelectionDAG.h" |
33 | #include "llvm/IR/Function.h" |
34 | #include "llvm/ADT/SmallString.h" |
35 | |
36 | using namespace llvm; |
37 | |
38 | SITargetLowering::SITargetLowering(TargetMachine &TM, |
39 | const AMDGPUSubtarget &STI) |
40 | : AMDGPUTargetLowering(TM, STI) { |
41 | addRegisterClass(MVT::i1, &AMDGPU::VReg_1RegClass); |
42 | addRegisterClass(MVT::i64, &AMDGPU::SReg_64RegClass); |
43 | |
44 | addRegisterClass(MVT::v32i8, &AMDGPU::SReg_256RegClass); |
45 | addRegisterClass(MVT::v64i8, &AMDGPU::SReg_512RegClass); |
46 | |
47 | addRegisterClass(MVT::i32, &AMDGPU::SReg_32RegClass); |
48 | addRegisterClass(MVT::f32, &AMDGPU::VGPR_32RegClass); |
49 | |
50 | addRegisterClass(MVT::f64, &AMDGPU::VReg_64RegClass); |
51 | addRegisterClass(MVT::v2i32, &AMDGPU::SReg_64RegClass); |
52 | addRegisterClass(MVT::v2f32, &AMDGPU::VReg_64RegClass); |
53 | |
54 | addRegisterClass(MVT::v4i32, &AMDGPU::SReg_128RegClass); |
55 | addRegisterClass(MVT::v4f32, &AMDGPU::VReg_128RegClass); |
56 | |
57 | addRegisterClass(MVT::v8i32, &AMDGPU::SReg_256RegClass); |
58 | addRegisterClass(MVT::v8f32, &AMDGPU::VReg_256RegClass); |
59 | |
60 | addRegisterClass(MVT::v16i32, &AMDGPU::SReg_512RegClass); |
61 | addRegisterClass(MVT::v16f32, &AMDGPU::VReg_512RegClass); |
62 | |
63 | computeRegisterProperties(); |
64 | |
65 | setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i32, Expand); |
66 | setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8f32, Expand); |
67 | setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i32, Expand); |
68 | setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16f32, Expand); |
69 | |
70 | setOperationAction(ISD::ADD, MVT::i32, Legal); |
71 | setOperationAction(ISD::ADDC, MVT::i32, Legal); |
72 | setOperationAction(ISD::ADDE, MVT::i32, Legal); |
73 | setOperationAction(ISD::SUBC, MVT::i32, Legal); |
74 | setOperationAction(ISD::SUBE, MVT::i32, Legal); |
75 | |
76 | setOperationAction(ISD::FSIN, MVT::f32, Custom); |
77 | setOperationAction(ISD::FCOS, MVT::f32, Custom); |
78 | |
79 | setOperationAction(ISD::FMINNUM, MVT::f32, Legal); |
80 | setOperationAction(ISD::FMAXNUM, MVT::f32, Legal); |
81 | setOperationAction(ISD::FMINNUM, MVT::f64, Legal); |
82 | setOperationAction(ISD::FMAXNUM, MVT::f64, Legal); |
83 | |
84 | // We need to custom lower vector stores from local memory |
85 | setOperationAction(ISD::LOAD, MVT::v4i32, Custom); |
86 | setOperationAction(ISD::LOAD, MVT::v8i32, Custom); |
87 | setOperationAction(ISD::LOAD, MVT::v16i32, Custom); |
88 | |
89 | setOperationAction(ISD::STORE, MVT::v8i32, Custom); |
90 | setOperationAction(ISD::STORE, MVT::v16i32, Custom); |
91 | |
92 | setOperationAction(ISD::STORE, MVT::i1, Custom); |
93 | setOperationAction(ISD::STORE, MVT::v4i32, Custom); |
94 | |
95 | setOperationAction(ISD::SELECT, MVT::i64, Custom); |
96 | setOperationAction(ISD::SELECT, MVT::f64, Promote); |
97 | AddPromotedToType(ISD::SELECT, MVT::f64, MVT::i64); |
98 | |
99 | setOperationAction(ISD::SELECT_CC, MVT::f32, Expand); |
100 | setOperationAction(ISD::SELECT_CC, MVT::i32, Expand); |
101 | setOperationAction(ISD::SELECT_CC, MVT::i64, Expand); |
102 | setOperationAction(ISD::SELECT_CC, MVT::f64, Expand); |
103 | |
104 | setOperationAction(ISD::SETCC, MVT::v2i1, Expand); |
105 | setOperationAction(ISD::SETCC, MVT::v4i1, Expand); |
106 | |
107 | setOperationAction(ISD::BSWAP, MVT::i32, Legal); |
108 | |
109 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Legal); |
110 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i1, Custom); |
111 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i1, Custom); |
112 | |
113 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Legal); |
114 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8, Custom); |
115 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i8, Custom); |
116 | |
117 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Legal); |
118 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Custom); |
119 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i16, Custom); |
120 | |
121 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal); |
122 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::Other, Custom); |
123 | |
124 | setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); |
125 | setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::f32, Custom); |
126 | setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v16i8, Custom); |
127 | setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v4f32, Custom); |
128 | |
129 | setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom); |
130 | setOperationAction(ISD::BRCOND, MVT::Other, Custom); |
131 | |
132 | for (MVT VT : MVT::integer_valuetypes()) { |
133 | if (VT == MVT::i64) |
134 | continue; |
135 | |
136 | setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote); |
137 | setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i8, Legal); |
138 | setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i16, Legal); |
139 | setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i32, Expand); |
140 | |
141 | setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote); |
142 | setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i8, Legal); |
143 | setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i16, Legal); |
144 | setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i32, Expand); |
145 | |
146 | setLoadExtAction(ISD::EXTLOAD, VT, MVT::i1, Promote); |
147 | setLoadExtAction(ISD::EXTLOAD, VT, MVT::i8, Legal); |
148 | setLoadExtAction(ISD::EXTLOAD, VT, MVT::i16, Legal); |
149 | setLoadExtAction(ISD::EXTLOAD, VT, MVT::i32, Expand); |
150 | } |
151 | |
152 | for (MVT VT : MVT::integer_vector_valuetypes()) { |
153 | setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v8i16, Expand); |
154 | setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v16i16, Expand); |
155 | } |
156 | |
157 | for (MVT VT : MVT::fp_valuetypes()) |
158 | setLoadExtAction(ISD::EXTLOAD, VT, MVT::f32, Expand); |
159 | |
160 | setTruncStoreAction(MVT::f64, MVT::f32, Expand); |
161 | setTruncStoreAction(MVT::i64, MVT::i32, Expand); |
162 | setTruncStoreAction(MVT::v8i32, MVT::v8i16, Expand); |
163 | setTruncStoreAction(MVT::v16i32, MVT::v16i16, Expand); |
164 | |
165 | setOperationAction(ISD::LOAD, MVT::i1, Custom); |
166 | |
167 | setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); |
168 | setOperationAction(ISD::GlobalAddress, MVT::i64, Custom); |
169 | setOperationAction(ISD::FrameIndex, MVT::i32, Custom); |
170 | |
171 | // These should use UDIVREM, so set them to expand |
172 | setOperationAction(ISD::UDIV, MVT::i64, Expand); |
173 | setOperationAction(ISD::UREM, MVT::i64, Expand); |
174 | |
175 | // We only support LOAD/STORE and vector manipulation ops for vectors |
176 | // with > 4 elements. |
177 | MVT VecTypes[] = { |
178 | MVT::v8i32, MVT::v8f32, MVT::v16i32, MVT::v16f32 |
179 | }; |
180 | |
181 | setOperationAction(ISD::SELECT_CC, MVT::i1, Expand); |
182 | setOperationAction(ISD::SELECT, MVT::i1, Promote); |
183 | |
184 | for (MVT VT : VecTypes) { |
185 | for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) { |
186 | switch(Op) { |
187 | case ISD::LOAD: |
188 | case ISD::STORE: |
189 | case ISD::BUILD_VECTOR: |
190 | case ISD::BITCAST: |
191 | case ISD::EXTRACT_VECTOR_ELT: |
192 | case ISD::INSERT_VECTOR_ELT: |
193 | case ISD::INSERT_SUBVECTOR: |
194 | case ISD::EXTRACT_SUBVECTOR: |
195 | break; |
196 | case ISD::CONCAT_VECTORS: |
197 | setOperationAction(Op, VT, Custom); |
198 | break; |
199 | default: |
200 | setOperationAction(Op, VT, Expand); |
201 | break; |
202 | } |
203 | } |
204 | } |
205 | |
206 | if (Subtarget->getGeneration() >= AMDGPUSubtarget::SEA_ISLANDS) { |
207 | setOperationAction(ISD::FTRUNC, MVT::f64, Legal); |
208 | setOperationAction(ISD::FCEIL, MVT::f64, Legal); |
209 | setOperationAction(ISD::FFLOOR, MVT::f64, Legal); |
210 | setOperationAction(ISD::FRINT, MVT::f64, Legal); |
211 | } |
212 | |
213 | setOperationAction(ISD::FDIV, MVT::f32, Custom); |
214 | |
215 | setTargetDAGCombine(ISD::FADD); |
216 | setTargetDAGCombine(ISD::FSUB); |
217 | setTargetDAGCombine(ISD::FMINNUM); |
218 | setTargetDAGCombine(ISD::FMAXNUM); |
219 | setTargetDAGCombine(ISD::SELECT_CC); |
220 | setTargetDAGCombine(ISD::SETCC); |
221 | setTargetDAGCombine(ISD::AND); |
222 | setTargetDAGCombine(ISD::OR); |
223 | setTargetDAGCombine(ISD::UINT_TO_FP); |
224 | |
225 | // All memory operations. Some folding on the pointer operand is done to help |
226 | // matching the constant offsets in the addressing modes. |
227 | setTargetDAGCombine(ISD::LOAD); |
228 | setTargetDAGCombine(ISD::STORE); |
229 | setTargetDAGCombine(ISD::ATOMIC_LOAD); |
230 | setTargetDAGCombine(ISD::ATOMIC_STORE); |
231 | setTargetDAGCombine(ISD::ATOMIC_CMP_SWAP); |
232 | setTargetDAGCombine(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS); |
233 | setTargetDAGCombine(ISD::ATOMIC_SWAP); |
234 | setTargetDAGCombine(ISD::ATOMIC_LOAD_ADD); |
235 | setTargetDAGCombine(ISD::ATOMIC_LOAD_SUB); |
236 | setTargetDAGCombine(ISD::ATOMIC_LOAD_AND); |
237 | setTargetDAGCombine(ISD::ATOMIC_LOAD_OR); |
238 | setTargetDAGCombine(ISD::ATOMIC_LOAD_XOR); |
239 | setTargetDAGCombine(ISD::ATOMIC_LOAD_NAND); |
240 | setTargetDAGCombine(ISD::ATOMIC_LOAD_MIN); |
241 | setTargetDAGCombine(ISD::ATOMIC_LOAD_MAX); |
242 | setTargetDAGCombine(ISD::ATOMIC_LOAD_UMIN); |
243 | setTargetDAGCombine(ISD::ATOMIC_LOAD_UMAX); |
244 | |
245 | setSchedulingPreference(Sched::RegPressure); |
246 | } |
247 | |
248 | //===----------------------------------------------------------------------===// |
249 | // TargetLowering queries |
250 | //===----------------------------------------------------------------------===// |
251 | |
252 | bool SITargetLowering::isShuffleMaskLegal(const SmallVectorImpl<int> &, |
253 | EVT) const { |
254 | // SI has some legal vector types, but no legal vector operations. Say no |
255 | // shuffles are legal in order to prefer scalarizing some vector operations. |
256 | return false; |
257 | } |
258 | |
259 | // FIXME: This really needs an address space argument. The immediate offset |
260 | // size is different for different sets of memory instruction sets. |
261 | |
262 | // The single offset DS instructions have a 16-bit unsigned byte offset. |
263 | // |
264 | // MUBUF / MTBUF have a 12-bit unsigned byte offset, and additionally can do r + |
265 | // r + i with addr64. 32-bit has more addressing mode options. Depending on the |
266 | // resource constant, it can also do (i64 r0) + (i32 r1) * (i14 i). |
267 | // |
268 | // SMRD instructions have an 8-bit, dword offset. |
269 | // |
270 | bool SITargetLowering::isLegalAddressingMode(const AddrMode &AM, |
271 | Type *Ty) const { |
272 | // No global is ever allowed as a base. |
273 | if (AM.BaseGV) |
274 | return false; |
275 | |
276 | // Allow a 16-bit unsigned immediate field, since this is what DS instructions |
277 | // use. |
278 | if (!isUInt<16>(AM.BaseOffs)) |
279 | return false; |
280 | |
281 | // Only support r+r, |
282 | switch (AM.Scale) { |
283 | case 0: // "r+i" or just "i", depending on HasBaseReg. |
284 | break; |
285 | case 1: |
286 | if (AM.HasBaseReg && AM.BaseOffs) // "r+r+i" is not allowed. |
287 | return false; |
288 | // Otherwise we have r+r or r+i. |
289 | break; |
290 | case 2: |
291 | if (AM.HasBaseReg || AM.BaseOffs) // 2*r+r or 2*r+i is not allowed. |
292 | return false; |
293 | // Allow 2*r as r+r. |
294 | break; |
295 | default: // Don't allow n * r |
296 | return false; |
297 | } |
298 | |
299 | return true; |
300 | } |
301 | |
302 | bool SITargetLowering::allowsMisalignedMemoryAccesses(EVT VT, |
303 | unsigned AddrSpace, |
304 | unsigned Align, |
305 | bool *IsFast) const { |
306 | if (IsFast) |
307 | *IsFast = false; |
308 | |
309 | // TODO: I think v3i32 should allow unaligned accesses on CI with DS_READ_B96, |
310 | // which isn't a simple VT. |
311 | if (!VT.isSimple() || VT == MVT::Other) |
312 | return false; |
313 | |
314 | // TODO - CI+ supports unaligned memory accesses, but this requires driver |
315 | // support. |
316 | |
317 | // XXX - The only mention I see of this in the ISA manual is for LDS direct |
318 | // reads the "byte address and must be dword aligned". Is it also true for the |
319 | // normal loads and stores? |
320 | if (AddrSpace == AMDGPUAS::LOCAL_ADDRESS) { |
321 | // ds_read/write_b64 require 8-byte alignment, but we can do a 4 byte |
322 | // aligned, 8 byte access in a single operation using ds_read2/write2_b32 |
323 | // with adjacent offsets. |
324 | return Align % 4 == 0; |
325 | } |
326 | |
327 | // Smaller than dword value must be aligned. |
328 | // FIXME: This should be allowed on CI+ |
329 | if (VT.bitsLT(MVT::i32)) |
330 | return false; |
331 | |
332 | // 8.1.6 - For Dword or larger reads or writes, the two LSBs of the |
333 | // byte-address are ignored, thus forcing Dword alignment. |
334 | // This applies to private, global, and constant memory. |
335 | if (IsFast) |
336 | *IsFast = true; |
337 | |
338 | return VT.bitsGT(MVT::i32) && Align % 4 == 0; |
339 | } |
340 | |
341 | EVT SITargetLowering::getOptimalMemOpType(uint64_t Size, unsigned DstAlign, |
342 | unsigned SrcAlign, bool IsMemset, |
343 | bool ZeroMemset, |
344 | bool MemcpyStrSrc, |
345 | MachineFunction &MF) const { |
346 | // FIXME: Should account for address space here. |
347 | |
348 | // The default fallback uses the private pointer size as a guess for a type to |
349 | // use. Make sure we switch these to 64-bit accesses. |
350 | |
351 | if (Size >= 16 && DstAlign >= 4) // XXX: Should only do for global |
352 | return MVT::v4i32; |
353 | |
354 | if (Size >= 8 && DstAlign >= 4) |
355 | return MVT::v2i32; |
356 | |
357 | // Use the default. |
358 | return MVT::Other; |
359 | } |
360 | |
361 | TargetLoweringBase::LegalizeTypeAction |
362 | SITargetLowering::getPreferredVectorAction(EVT VT) const { |
363 | if (VT.getVectorNumElements() != 1 && VT.getScalarType().bitsLE(MVT::i16)) |
364 | return TypeSplitVector; |
365 | |
366 | return TargetLoweringBase::getPreferredVectorAction(VT); |
367 | } |
368 | |
369 | bool SITargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm, |
370 | Type *Ty) const { |
371 | const SIInstrInfo *TII = |
372 | static_cast<const SIInstrInfo *>(Subtarget->getInstrInfo()); |
373 | return TII->isInlineConstant(Imm); |
374 | } |
375 | |
376 | SDValue SITargetLowering::LowerParameter(SelectionDAG &DAG, EVT VT, EVT MemVT, |
377 | SDLoc SL, SDValue Chain, |
378 | unsigned Offset, bool Signed) const { |
379 | const DataLayout *DL = getDataLayout(); |
380 | MachineFunction &MF = DAG.getMachineFunction(); |
381 | const SIRegisterInfo *TRI = |
382 | static_cast<const SIRegisterInfo*>(Subtarget->getRegisterInfo()); |
383 | unsigned InputPtrReg = TRI->getPreloadedValue(MF, SIRegisterInfo::INPUT_PTR); |
384 | |
385 | Type *Ty = VT.getTypeForEVT(*DAG.getContext()); |
386 | |
387 | MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo(); |
388 | PointerType *PtrTy = PointerType::get(Ty, AMDGPUAS::CONSTANT_ADDRESS); |
389 | SDValue BasePtr = DAG.getCopyFromReg(Chain, SL, |
390 | MRI.getLiveInVirtReg(InputPtrReg), MVT::i64); |
391 | SDValue Ptr = DAG.getNode(ISD::ADD, SL, MVT::i64, BasePtr, |
392 | DAG.getConstant(Offset, MVT::i64)); |
393 | SDValue PtrOffset = DAG.getUNDEF(getPointerTy(AMDGPUAS::CONSTANT_ADDRESS)); |
394 | MachinePointerInfo PtrInfo(UndefValue::get(PtrTy)); |
395 | |
396 | return DAG.getLoad(ISD::UNINDEXED, Signed ? ISD::SEXTLOAD : ISD::ZEXTLOAD, |
397 | VT, SL, Chain, Ptr, PtrOffset, PtrInfo, MemVT, |
398 | false, // isVolatile |
399 | true, // isNonTemporal |
400 | true, // isInvariant |
401 | DL->getABITypeAlignment(Ty)); // Alignment |
402 | } |
403 | |
404 | SDValue SITargetLowering::LowerFormalArguments( |
405 | SDValue Chain, CallingConv::ID CallConv, bool isVarArg, |
406 | const SmallVectorImpl<ISD::InputArg> &Ins, SDLoc DL, SelectionDAG &DAG, |
407 | SmallVectorImpl<SDValue> &InVals) const { |
408 | const SIRegisterInfo *TRI = |
409 | static_cast<const SIRegisterInfo *>(Subtarget->getRegisterInfo()); |
410 | |
411 | MachineFunction &MF = DAG.getMachineFunction(); |
412 | FunctionType *FType = MF.getFunction()->getFunctionType(); |
413 | SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); |
414 | |
415 | assert(CallConv == CallingConv::C)((CallConv == CallingConv::C) ? static_cast<void> (0) : __assert_fail ("CallConv == CallingConv::C", "/tmp/buildd/llvm-toolchain-snapshot-3.7~svn228917/lib/Target/R600/SIISelLowering.cpp" , 415, __PRETTY_FUNCTION__)); |
416 | |
417 | SmallVector<ISD::InputArg, 16> Splits; |
418 | BitVector Skipped(Ins.size()); |
419 | |
420 | for (unsigned i = 0, e = Ins.size(), PSInputNum = 0; i != e; ++i) { |
421 | const ISD::InputArg &Arg = Ins[i]; |
422 | |
423 | // First check if it's a PS input addr |
424 | if (Info->getShaderType() == ShaderType::PIXEL && !Arg.Flags.isInReg() && |
425 | !Arg.Flags.isByVal()) { |
426 | |
427 | assert((PSInputNum <= 15) && "Too many PS inputs!")(((PSInputNum <= 15) && "Too many PS inputs!") ? static_cast <void> (0) : __assert_fail ("(PSInputNum <= 15) && \"Too many PS inputs!\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.7~svn228917/lib/Target/R600/SIISelLowering.cpp" , 427, __PRETTY_FUNCTION__)); |
428 | |
429 | if (!Arg.Used) { |
430 | // We can savely skip PS inputs |
431 | Skipped.set(i); |
432 | ++PSInputNum; |
433 | continue; |
434 | } |
435 | |
436 | Info->PSInputAddr |= 1 << PSInputNum++; |
437 | } |
438 | |
439 | // Second split vertices into their elements |
440 | if (Info->getShaderType() != ShaderType::COMPUTE && Arg.VT.isVector()) { |
441 | ISD::InputArg NewArg = Arg; |
442 | NewArg.Flags.setSplit(); |
443 | NewArg.VT = Arg.VT.getVectorElementType(); |
444 | |
445 | // We REALLY want the ORIGINAL number of vertex elements here, e.g. a |
446 | // three or five element vertex only needs three or five registers, |
447 | // NOT four or eigth. |
448 | Type *ParamType = FType->getParamType(Arg.OrigArgIndex); |
449 | unsigned NumElements = ParamType->getVectorNumElements(); |
450 | |
451 | for (unsigned j = 0; j != NumElements; ++j) { |
452 | Splits.push_back(NewArg); |
453 | NewArg.PartOffset += NewArg.VT.getStoreSize(); |
454 | } |
455 | |
456 | } else if (Info->getShaderType() != ShaderType::COMPUTE) { |
457 | Splits.push_back(Arg); |
458 | } |
459 | } |
460 | |
461 | SmallVector<CCValAssign, 16> ArgLocs; |
462 | CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, |
463 | *DAG.getContext()); |
464 | |
465 | // At least one interpolation mode must be enabled or else the GPU will hang. |
466 | if (Info->getShaderType() == ShaderType::PIXEL && |
467 | (Info->PSInputAddr & 0x7F) == 0) { |
468 | Info->PSInputAddr |= 1; |
469 | CCInfo.AllocateReg(AMDGPU::VGPR0); |
470 | CCInfo.AllocateReg(AMDGPU::VGPR1); |
471 | } |
472 | |
473 | // The pointer to the list of arguments is stored in SGPR0, SGPR1 |
474 | // The pointer to the scratch buffer is stored in SGPR2, SGPR3 |
475 | if (Info->getShaderType() == ShaderType::COMPUTE) { |
476 | if (Subtarget->isAmdHsaOS()) |
477 | Info->NumUserSGPRs = 2; // FIXME: Need to support scratch buffers. |
478 | else |
479 | Info->NumUserSGPRs = 4; |
480 | |
481 | unsigned InputPtrReg = |
482 | TRI->getPreloadedValue(MF, SIRegisterInfo::INPUT_PTR); |
483 | unsigned InputPtrRegLo = |
484 | TRI->getPhysRegSubReg(InputPtrReg, &AMDGPU::SReg_32RegClass, 0); |
485 | unsigned InputPtrRegHi = |
486 | TRI->getPhysRegSubReg(InputPtrReg, &AMDGPU::SReg_32RegClass, 1); |
487 | |
488 | unsigned ScratchPtrReg = |
489 | TRI->getPreloadedValue(MF, SIRegisterInfo::SCRATCH_PTR); |
490 | unsigned ScratchPtrRegLo = |
491 | TRI->getPhysRegSubReg(ScratchPtrReg, &AMDGPU::SReg_32RegClass, 0); |
492 | unsigned ScratchPtrRegHi = |
493 | TRI->getPhysRegSubReg(ScratchPtrReg, &AMDGPU::SReg_32RegClass, 1); |
494 | |
495 | CCInfo.AllocateReg(InputPtrRegLo); |
496 | CCInfo.AllocateReg(InputPtrRegHi); |
497 | CCInfo.AllocateReg(ScratchPtrRegLo); |
498 | CCInfo.AllocateReg(ScratchPtrRegHi); |
499 | MF.addLiveIn(InputPtrReg, &AMDGPU::SReg_64RegClass); |
500 | MF.addLiveIn(ScratchPtrReg, &AMDGPU::SReg_64RegClass); |
501 | } |
502 | |
503 | if (Info->getShaderType() == ShaderType::COMPUTE) { |
504 | getOriginalFunctionArgs(DAG, DAG.getMachineFunction().getFunction(), Ins, |
505 | Splits); |
506 | } |
507 | |
508 | AnalyzeFormalArguments(CCInfo, Splits); |
509 | |
510 | for (unsigned i = 0, e = Ins.size(), ArgIdx = 0; i != e; ++i) { |
511 | |
512 | const ISD::InputArg &Arg = Ins[i]; |
513 | if (Skipped[i]) { |
514 | InVals.push_back(DAG.getUNDEF(Arg.VT)); |
515 | continue; |
516 | } |
517 | |
518 | CCValAssign &VA = ArgLocs[ArgIdx++]; |
519 | MVT VT = VA.getLocVT(); |
520 | |
521 | if (VA.isMemLoc()) { |
522 | VT = Ins[i].VT; |
523 | EVT MemVT = Splits[i].VT; |
524 | const unsigned Offset = 36 + VA.getLocMemOffset(); |
525 | // The first 36 bytes of the input buffer contains information about |
526 | // thread group and global sizes. |
527 | SDValue Arg = LowerParameter(DAG, VT, MemVT, DL, DAG.getRoot(), |
528 | Offset, Ins[i].Flags.isSExt()); |
529 | |
530 | const PointerType *ParamTy = |
531 | dyn_cast<PointerType>(FType->getParamType(Ins[i].OrigArgIndex)); |
532 | if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS && |
533 | ParamTy && ParamTy->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS) { |
534 | // On SI local pointers are just offsets into LDS, so they are always |
535 | // less than 16-bits. On CI and newer they could potentially be |
536 | // real pointers, so we can't guarantee their size. |
537 | Arg = DAG.getNode(ISD::AssertZext, DL, Arg.getValueType(), Arg, |
538 | DAG.getValueType(MVT::i16)); |
539 | } |
540 | |
541 | InVals.push_back(Arg); |
542 | Info->ABIArgOffset = Offset + MemVT.getStoreSize(); |
543 | continue; |
544 | } |
545 | assert(VA.isRegLoc() && "Parameter must be in a register!")((VA.isRegLoc() && "Parameter must be in a register!" ) ? static_cast<void> (0) : __assert_fail ("VA.isRegLoc() && \"Parameter must be in a register!\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.7~svn228917/lib/Target/R600/SIISelLowering.cpp" , 545, __PRETTY_FUNCTION__)); |
546 | |
547 | unsigned Reg = VA.getLocReg(); |
548 | |
549 | if (VT == MVT::i64) { |
550 | // For now assume it is a pointer |
551 | Reg = TRI->getMatchingSuperReg(Reg, AMDGPU::sub0, |
552 | &AMDGPU::SReg_64RegClass); |
553 | Reg = MF.addLiveIn(Reg, &AMDGPU::SReg_64RegClass); |
554 | InVals.push_back(DAG.getCopyFromReg(Chain, DL, Reg, VT)); |
555 | continue; |
556 | } |
557 | |
558 | const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg, VT); |
559 | |
560 | Reg = MF.addLiveIn(Reg, RC); |
561 | SDValue Val = DAG.getCopyFromReg(Chain, DL, Reg, VT); |
562 | |
563 | if (Arg.VT.isVector()) { |
564 | |
565 | // Build a vector from the registers |
566 | Type *ParamType = FType->getParamType(Arg.OrigArgIndex); |
567 | unsigned NumElements = ParamType->getVectorNumElements(); |
568 | |
569 | SmallVector<SDValue, 4> Regs; |
570 | Regs.push_back(Val); |
571 | for (unsigned j = 1; j != NumElements; ++j) { |
572 | Reg = ArgLocs[ArgIdx++].getLocReg(); |
573 | Reg = MF.addLiveIn(Reg, RC); |
574 | Regs.push_back(DAG.getCopyFromReg(Chain, DL, Reg, VT)); |
575 | } |
576 | |
577 | // Fill up the missing vector elements |
578 | NumElements = Arg.VT.getVectorNumElements() - NumElements; |
579 | for (unsigned j = 0; j != NumElements; ++j) |
580 | Regs.push_back(DAG.getUNDEF(VT)); |
581 | |
582 | InVals.push_back(DAG.getNode(ISD::BUILD_VECTOR, DL, Arg.VT, Regs)); |
583 | continue; |
584 | } |
585 | |
586 | InVals.push_back(Val); |
587 | } |
588 | |
589 | if (Info->getShaderType() != ShaderType::COMPUTE) { |
590 | unsigned ScratchIdx = CCInfo.getFirstUnallocated( |
591 | AMDGPU::SGPR_32RegClass.begin(), AMDGPU::SGPR_32RegClass.getNumRegs()); |
592 | Info->ScratchOffsetReg = AMDGPU::SGPR_32RegClass.getRegister(ScratchIdx); |
593 | } |
594 | return Chain; |
595 | } |
596 | |
597 | MachineBasicBlock * SITargetLowering::EmitInstrWithCustomInserter( |
598 | MachineInstr * MI, MachineBasicBlock * BB) const { |
599 | |
600 | MachineBasicBlock::iterator I = *MI; |
601 | const SIInstrInfo *TII = |
602 | static_cast<const SIInstrInfo *>(Subtarget->getInstrInfo()); |
603 | |
604 | switch (MI->getOpcode()) { |
605 | default: |
606 | return AMDGPUTargetLowering::EmitInstrWithCustomInserter(MI, BB); |
607 | case AMDGPU::BRANCH: return BB; |
608 | case AMDGPU::V_SUB_F64: { |
609 | unsigned DestReg = MI->getOperand(0).getReg(); |
610 | BuildMI(*BB, I, MI->getDebugLoc(), TII->get(AMDGPU::V_ADD_F64), DestReg) |
611 | .addImm(0) // SRC0 modifiers |
612 | .addReg(MI->getOperand(1).getReg()) |
613 | .addImm(1) // SRC1 modifiers |
614 | .addReg(MI->getOperand(2).getReg()) |
615 | .addImm(0) // CLAMP |
616 | .addImm(0); // OMOD |
617 | MI->eraseFromParent(); |
618 | break; |
619 | } |
620 | case AMDGPU::SI_RegisterStorePseudo: { |
621 | MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); |
622 | unsigned Reg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); |
623 | MachineInstrBuilder MIB = |
624 | BuildMI(*BB, I, MI->getDebugLoc(), TII->get(AMDGPU::SI_RegisterStore), |
625 | Reg); |
626 | for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) |
627 | MIB.addOperand(MI->getOperand(i)); |
628 | |
629 | MI->eraseFromParent(); |
630 | break; |
631 | } |
632 | } |
633 | return BB; |
634 | } |
635 | |
636 | bool SITargetLowering::enableAggressiveFMAFusion(EVT VT) const { |
637 | // This currently forces unfolding various combinations of fsub into fma with |
638 | // free fneg'd operands. As long as we have fast FMA (controlled by |
639 | // isFMAFasterThanFMulAndFAdd), we should perform these. |
640 | |
641 | // When fma is quarter rate, for f64 where add / sub are at best half rate, |
642 | // most of these combines appear to be cycle neutral but save on instruction |
643 | // count / code size. |
644 | return true; |
645 | } |
646 | |
647 | EVT SITargetLowering::getSetCCResultType(LLVMContext &Ctx, EVT VT) const { |
648 | if (!VT.isVector()) { |
649 | return MVT::i1; |
650 | } |
651 | return EVT::getVectorVT(Ctx, MVT::i1, VT.getVectorNumElements()); |
652 | } |
653 | |
654 | MVT SITargetLowering::getScalarShiftAmountTy(EVT VT) const { |
655 | return MVT::i32; |
656 | } |
657 | |
658 | // Answering this is somewhat tricky and depends on the specific device which |
659 | // have different rates for fma or all f64 operations. |
660 | // |
661 | // v_fma_f64 and v_mul_f64 always take the same number of cycles as each other |
662 | // regardless of which device (although the number of cycles differs between |
663 | // devices), so it is always profitable for f64. |
664 | // |
665 | // v_fma_f32 takes 4 or 16 cycles depending on the device, so it is profitable |
666 | // only on full rate devices. Normally, we should prefer selecting v_mad_f32 |
667 | // which we can always do even without fused FP ops since it returns the same |
668 | // result as the separate operations and since it is always full |
669 | // rate. Therefore, we lie and report that it is not faster for f32. v_mad_f32 |
670 | // however does not support denormals, so we do report fma as faster if we have |
671 | // a fast fma device and require denormals. |
672 | // |
673 | bool SITargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const { |
674 | VT = VT.getScalarType(); |
675 | |
676 | if (!VT.isSimple()) |
677 | return false; |
678 | |
679 | switch (VT.getSimpleVT().SimpleTy) { |
680 | case MVT::f32: |
681 | // This is as fast on some subtargets. However, we always have full rate f32 |
682 | // mad available which returns the same result as the separate operations |
683 | // which we should prefer over fma. |
684 | return false; |
685 | case MVT::f64: |
686 | return true; |
687 | default: |
688 | break; |
689 | } |
690 | |
691 | return false; |
692 | } |
693 | |
694 | //===----------------------------------------------------------------------===// |
695 | // Custom DAG Lowering Operations |
696 | //===----------------------------------------------------------------------===// |
697 | |
698 | SDValue SITargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { |
699 | switch (Op.getOpcode()) { |
700 | default: return AMDGPUTargetLowering::LowerOperation(Op, DAG); |
701 | case ISD::FrameIndex: return LowerFrameIndex(Op, DAG); |
702 | case ISD::BRCOND: return LowerBRCOND(Op, DAG); |
703 | case ISD::LOAD: { |
704 | SDValue Result = LowerLOAD(Op, DAG); |
705 | assert((!Result.getNode() ||(((!Result.getNode() || Result.getNode()->getNumValues() == 2) && "Load should return a value and a chain") ? static_cast <void> (0) : __assert_fail ("(!Result.getNode() || Result.getNode()->getNumValues() == 2) && \"Load should return a value and a chain\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.7~svn228917/lib/Target/R600/SIISelLowering.cpp" , 707, __PRETTY_FUNCTION__)) |
706 | Result.getNode()->getNumValues() == 2) &&(((!Result.getNode() || Result.getNode()->getNumValues() == 2) && "Load should return a value and a chain") ? static_cast <void> (0) : __assert_fail ("(!Result.getNode() || Result.getNode()->getNumValues() == 2) && \"Load should return a value and a chain\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.7~svn228917/lib/Target/R600/SIISelLowering.cpp" , 707, __PRETTY_FUNCTION__)) |
707 | "Load should return a value and a chain")(((!Result.getNode() || Result.getNode()->getNumValues() == 2) && "Load should return a value and a chain") ? static_cast <void> (0) : __assert_fail ("(!Result.getNode() || Result.getNode()->getNumValues() == 2) && \"Load should return a value and a chain\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.7~svn228917/lib/Target/R600/SIISelLowering.cpp" , 707, __PRETTY_FUNCTION__)); |
708 | return Result; |
709 | } |
710 | |
711 | case ISD::FSIN: |
712 | case ISD::FCOS: |
713 | return LowerTrig(Op, DAG); |
714 | case ISD::SELECT: return LowerSELECT(Op, DAG); |
715 | case ISD::FDIV: return LowerFDIV(Op, DAG); |
716 | case ISD::STORE: return LowerSTORE(Op, DAG); |
717 | case ISD::GlobalAddress: { |
718 | MachineFunction &MF = DAG.getMachineFunction(); |
719 | SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); |
720 | return LowerGlobalAddress(MFI, Op, DAG); |
721 | } |
722 | case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); |
723 | case ISD::INTRINSIC_VOID: return LowerINTRINSIC_VOID(Op, DAG); |
724 | } |
725 | return SDValue(); |
726 | } |
727 | |
728 | /// \brief Helper function for LowerBRCOND |
729 | static SDNode *findUser(SDValue Value, unsigned Opcode) { |
730 | |
731 | SDNode *Parent = Value.getNode(); |
732 | for (SDNode::use_iterator I = Parent->use_begin(), E = Parent->use_end(); |
733 | I != E; ++I) { |
734 | |
735 | if (I.getUse().get() != Value) |
736 | continue; |
737 | |
738 | if (I->getOpcode() == Opcode) |
739 | return *I; |
740 | } |
741 | return nullptr; |
742 | } |
743 | |
744 | SDValue SITargetLowering::LowerFrameIndex(SDValue Op, SelectionDAG &DAG) const { |
745 | |
746 | FrameIndexSDNode *FINode = cast<FrameIndexSDNode>(Op); |
747 | unsigned FrameIndex = FINode->getIndex(); |
748 | |
749 | return DAG.getTargetFrameIndex(FrameIndex, MVT::i32); |
750 | } |
751 | |
752 | /// This transforms the control flow intrinsics to get the branch destination as |
753 | /// last parameter, also switches branch target with BR if the need arise |
754 | SDValue SITargetLowering::LowerBRCOND(SDValue BRCOND, |
755 | SelectionDAG &DAG) const { |
756 | |
757 | SDLoc DL(BRCOND); |
758 | |
759 | SDNode *Intr = BRCOND.getOperand(1).getNode(); |
760 | SDValue Target = BRCOND.getOperand(2); |
761 | SDNode *BR = nullptr; |
762 | |
763 | if (Intr->getOpcode() == ISD::SETCC) { |
764 | // As long as we negate the condition everything is fine |
765 | SDNode *SetCC = Intr; |
766 | assert(SetCC->getConstantOperandVal(1) == 1)((SetCC->getConstantOperandVal(1) == 1) ? static_cast<void > (0) : __assert_fail ("SetCC->getConstantOperandVal(1) == 1" , "/tmp/buildd/llvm-toolchain-snapshot-3.7~svn228917/lib/Target/R600/SIISelLowering.cpp" , 766, __PRETTY_FUNCTION__)); |
767 | assert(cast<CondCodeSDNode>(SetCC->getOperand(2).getNode())->get() ==((cast<CondCodeSDNode>(SetCC->getOperand(2).getNode( ))->get() == ISD::SETNE) ? static_cast<void> (0) : __assert_fail ("cast<CondCodeSDNode>(SetCC->getOperand(2).getNode())->get() == ISD::SETNE" , "/tmp/buildd/llvm-toolchain-snapshot-3.7~svn228917/lib/Target/R600/SIISelLowering.cpp" , 768, __PRETTY_FUNCTION__)) |
768 | ISD::SETNE)((cast<CondCodeSDNode>(SetCC->getOperand(2).getNode( ))->get() == ISD::SETNE) ? static_cast<void> (0) : __assert_fail ("cast<CondCodeSDNode>(SetCC->getOperand(2).getNode())->get() == ISD::SETNE" , "/tmp/buildd/llvm-toolchain-snapshot-3.7~svn228917/lib/Target/R600/SIISelLowering.cpp" , 768, __PRETTY_FUNCTION__)); |
769 | Intr = SetCC->getOperand(0).getNode(); |
770 | |
771 | } else { |
772 | // Get the target from BR if we don't negate the condition |
773 | BR = findUser(BRCOND, ISD::BR); |
774 | Target = BR->getOperand(1); |
775 | } |
776 | |
777 | assert(Intr->getOpcode() == ISD::INTRINSIC_W_CHAIN)((Intr->getOpcode() == ISD::INTRINSIC_W_CHAIN) ? static_cast <void> (0) : __assert_fail ("Intr->getOpcode() == ISD::INTRINSIC_W_CHAIN" , "/tmp/buildd/llvm-toolchain-snapshot-3.7~svn228917/lib/Target/R600/SIISelLowering.cpp" , 777, __PRETTY_FUNCTION__)); |
778 | |
779 | // Build the result and |
780 | SmallVector<EVT, 4> Res; |
781 | for (unsigned i = 1, e = Intr->getNumValues(); i != e; ++i) |
782 | Res.push_back(Intr->getValueType(i)); |
783 | |
784 | // operands of the new intrinsic call |
785 | SmallVector<SDValue, 4> Ops; |
786 | Ops.push_back(BRCOND.getOperand(0)); |
787 | for (unsigned i = 1, e = Intr->getNumOperands(); i != e; ++i) |
788 | Ops.push_back(Intr->getOperand(i)); |
789 | Ops.push_back(Target); |
790 | |
791 | // build the new intrinsic call |
792 | SDNode *Result = DAG.getNode( |
793 | Res.size() > 1 ? ISD::INTRINSIC_W_CHAIN : ISD::INTRINSIC_VOID, DL, |
794 | DAG.getVTList(Res), Ops).getNode(); |
795 | |
796 | if (BR) { |
797 | // Give the branch instruction our target |
798 | SDValue Ops[] = { |
799 | BR->getOperand(0), |
800 | BRCOND.getOperand(2) |
801 | }; |
802 | SDValue NewBR = DAG.getNode(ISD::BR, DL, BR->getVTList(), Ops); |
803 | DAG.ReplaceAllUsesWith(BR, NewBR.getNode()); |
804 | BR = NewBR.getNode(); |
Value stored to 'BR' is never read | |
805 | } |
806 | |
807 | SDValue Chain = SDValue(Result, Result->getNumValues() - 1); |
808 | |
809 | // Copy the intrinsic results to registers |
810 | for (unsigned i = 1, e = Intr->getNumValues() - 1; i != e; ++i) { |
811 | SDNode *CopyToReg = findUser(SDValue(Intr, i), ISD::CopyToReg); |
812 | if (!CopyToReg) |
813 | continue; |
814 | |
815 | Chain = DAG.getCopyToReg( |
816 | Chain, DL, |
817 | CopyToReg->getOperand(1), |
818 | SDValue(Result, i - 1), |
819 | SDValue()); |
820 | |
821 | DAG.ReplaceAllUsesWith(SDValue(CopyToReg, 0), CopyToReg->getOperand(0)); |
822 | } |
823 | |
824 | // Remove the old intrinsic from the chain |
825 | DAG.ReplaceAllUsesOfValueWith( |
826 | SDValue(Intr, Intr->getNumValues() - 1), |
827 | Intr->getOperand(0)); |
828 | |
829 | return Chain; |
830 | } |
831 | |
832 | SDValue SITargetLowering::LowerGlobalAddress(AMDGPUMachineFunction *MFI, |
833 | SDValue Op, |
834 | SelectionDAG &DAG) const { |
835 | GlobalAddressSDNode *GSD = cast<GlobalAddressSDNode>(Op); |
836 | |
837 | if (GSD->getAddressSpace() != AMDGPUAS::CONSTANT_ADDRESS) |
838 | return AMDGPUTargetLowering::LowerGlobalAddress(MFI, Op, DAG); |
839 | |
840 | SDLoc DL(GSD); |
841 | const GlobalValue *GV = GSD->getGlobal(); |
842 | MVT PtrVT = getPointerTy(GSD->getAddressSpace()); |
843 | |
844 | SDValue Ptr = DAG.getNode(AMDGPUISD::CONST_DATA_PTR, DL, PtrVT); |
845 | SDValue GA = DAG.getTargetGlobalAddress(GV, DL, MVT::i32); |
846 | |
847 | SDValue PtrLo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Ptr, |
848 | DAG.getConstant(0, MVT::i32)); |
849 | SDValue PtrHi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Ptr, |
850 | DAG.getConstant(1, MVT::i32)); |
851 | |
852 | SDValue Lo = DAG.getNode(ISD::ADDC, DL, DAG.getVTList(MVT::i32, MVT::Glue), |
853 | PtrLo, GA); |
854 | SDValue Hi = DAG.getNode(ISD::ADDE, DL, DAG.getVTList(MVT::i32, MVT::Glue), |
855 | PtrHi, DAG.getConstant(0, MVT::i32), |
856 | SDValue(Lo.getNode(), 1)); |
857 | return DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Lo, Hi); |
858 | } |
859 | |
860 | SDValue SITargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, |
861 | SelectionDAG &DAG) const { |
862 | MachineFunction &MF = DAG.getMachineFunction(); |
863 | const SIRegisterInfo *TRI = |
864 | static_cast<const SIRegisterInfo *>(Subtarget->getRegisterInfo()); |
865 | |
866 | EVT VT = Op.getValueType(); |
867 | SDLoc DL(Op); |
868 | unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); |
869 | |
870 | switch (IntrinsicID) { |
871 | case Intrinsic::r600_read_ngroups_x: |
872 | return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(), |
873 | SI::KernelInputOffsets::NGROUPS_X, false); |
874 | case Intrinsic::r600_read_ngroups_y: |
875 | return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(), |
876 | SI::KernelInputOffsets::NGROUPS_Y, false); |
877 | case Intrinsic::r600_read_ngroups_z: |
878 | return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(), |
879 | SI::KernelInputOffsets::NGROUPS_Z, false); |
880 | case Intrinsic::r600_read_global_size_x: |
881 | return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(), |
882 | SI::KernelInputOffsets::GLOBAL_SIZE_X, false); |
883 | case Intrinsic::r600_read_global_size_y: |
884 | return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(), |
885 | SI::KernelInputOffsets::GLOBAL_SIZE_Y, false); |
886 | case Intrinsic::r600_read_global_size_z: |
887 | return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(), |
888 | SI::KernelInputOffsets::GLOBAL_SIZE_Z, false); |
889 | case Intrinsic::r600_read_local_size_x: |
890 | return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(), |
891 | SI::KernelInputOffsets::LOCAL_SIZE_X, false); |
892 | case Intrinsic::r600_read_local_size_y: |
893 | return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(), |
894 | SI::KernelInputOffsets::LOCAL_SIZE_Y, false); |
895 | case Intrinsic::r600_read_local_size_z: |
896 | return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(), |
897 | SI::KernelInputOffsets::LOCAL_SIZE_Z, false); |
898 | |
899 | case Intrinsic::AMDGPU_read_workdim: |
900 | return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(), |
901 | MF.getInfo<SIMachineFunctionInfo>()->ABIArgOffset, |
902 | false); |
903 | |
904 | case Intrinsic::r600_read_tgid_x: |
905 | return CreateLiveInRegister(DAG, &AMDGPU::SReg_32RegClass, |
906 | TRI->getPreloadedValue(MF, SIRegisterInfo::TGID_X), VT); |
907 | case Intrinsic::r600_read_tgid_y: |
908 | return CreateLiveInRegister(DAG, &AMDGPU::SReg_32RegClass, |
909 | TRI->getPreloadedValue(MF, SIRegisterInfo::TGID_Y), VT); |
910 | case Intrinsic::r600_read_tgid_z: |
911 | return CreateLiveInRegister(DAG, &AMDGPU::SReg_32RegClass, |
912 | TRI->getPreloadedValue(MF, SIRegisterInfo::TGID_Z), VT); |
913 | case Intrinsic::r600_read_tidig_x: |
914 | return CreateLiveInRegister(DAG, &AMDGPU::VGPR_32RegClass, |
915 | TRI->getPreloadedValue(MF, SIRegisterInfo::TIDIG_X), VT); |
916 | case Intrinsic::r600_read_tidig_y: |
917 | return CreateLiveInRegister(DAG, &AMDGPU::VGPR_32RegClass, |
918 | TRI->getPreloadedValue(MF, SIRegisterInfo::TIDIG_Y), VT); |
919 | case Intrinsic::r600_read_tidig_z: |
920 | return CreateLiveInRegister(DAG, &AMDGPU::VGPR_32RegClass, |
921 | TRI->getPreloadedValue(MF, SIRegisterInfo::TIDIG_Z), VT); |
922 | case AMDGPUIntrinsic::SI_load_const: { |
923 | SDValue Ops[] = { |
924 | Op.getOperand(1), |
925 | Op.getOperand(2) |
926 | }; |
927 | |
928 | MachineMemOperand *MMO = MF.getMachineMemOperand( |
929 | MachinePointerInfo(), |
930 | MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant, |
931 | VT.getStoreSize(), 4); |
932 | return DAG.getMemIntrinsicNode(AMDGPUISD::LOAD_CONSTANT, DL, |
933 | Op->getVTList(), Ops, VT, MMO); |
934 | } |
935 | case AMDGPUIntrinsic::SI_sample: |
936 | return LowerSampleIntrinsic(AMDGPUISD::SAMPLE, Op, DAG); |
937 | case AMDGPUIntrinsic::SI_sampleb: |
938 | return LowerSampleIntrinsic(AMDGPUISD::SAMPLEB, Op, DAG); |
939 | case AMDGPUIntrinsic::SI_sampled: |
940 | return LowerSampleIntrinsic(AMDGPUISD::SAMPLED, Op, DAG); |
941 | case AMDGPUIntrinsic::SI_samplel: |
942 | return LowerSampleIntrinsic(AMDGPUISD::SAMPLEL, Op, DAG); |
943 | case AMDGPUIntrinsic::SI_vs_load_input: |
944 | return DAG.getNode(AMDGPUISD::LOAD_INPUT, DL, VT, |
945 | Op.getOperand(1), |
946 | Op.getOperand(2), |
947 | Op.getOperand(3)); |
948 | default: |
949 | return AMDGPUTargetLowering::LowerOperation(Op, DAG); |
950 | } |
951 | } |
952 | |
953 | SDValue SITargetLowering::LowerINTRINSIC_VOID(SDValue Op, |
954 | SelectionDAG &DAG) const { |
955 | MachineFunction &MF = DAG.getMachineFunction(); |
956 | SDValue Chain = Op.getOperand(0); |
957 | unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); |
958 | |
959 | switch (IntrinsicID) { |
960 | case AMDGPUIntrinsic::SI_tbuffer_store: { |
961 | SDLoc DL(Op); |
962 | SDValue Ops[] = { |
963 | Chain, |
964 | Op.getOperand(2), |
965 | Op.getOperand(3), |
966 | Op.getOperand(4), |
967 | Op.getOperand(5), |
968 | Op.getOperand(6), |
969 | Op.getOperand(7), |
970 | Op.getOperand(8), |
971 | Op.getOperand(9), |
972 | Op.getOperand(10), |
973 | Op.getOperand(11), |
974 | Op.getOperand(12), |
975 | Op.getOperand(13), |
976 | Op.getOperand(14) |
977 | }; |
978 | |
979 | EVT VT = Op.getOperand(3).getValueType(); |
980 | |
981 | MachineMemOperand *MMO = MF.getMachineMemOperand( |
982 | MachinePointerInfo(), |
983 | MachineMemOperand::MOStore, |
984 | VT.getStoreSize(), 4); |
985 | return DAG.getMemIntrinsicNode(AMDGPUISD::TBUFFER_STORE_FORMAT, DL, |
986 | Op->getVTList(), Ops, VT, MMO); |
987 | } |
988 | default: |
989 | return SDValue(); |
990 | } |
991 | } |
992 | |
993 | SDValue SITargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const { |
994 | SDLoc DL(Op); |
995 | LoadSDNode *Load = cast<LoadSDNode>(Op); |
996 | |
997 | if (Op.getValueType().isVector()) { |
998 | assert(Op.getValueType().getVectorElementType() == MVT::i32 &&((Op.getValueType().getVectorElementType() == MVT::i32 && "Custom lowering for non-i32 vectors hasn't been implemented." ) ? static_cast<void> (0) : __assert_fail ("Op.getValueType().getVectorElementType() == MVT::i32 && \"Custom lowering for non-i32 vectors hasn't been implemented.\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.7~svn228917/lib/Target/R600/SIISelLowering.cpp" , 999, __PRETTY_FUNCTION__)) |
999 | "Custom lowering for non-i32 vectors hasn't been implemented.")((Op.getValueType().getVectorElementType() == MVT::i32 && "Custom lowering for non-i32 vectors hasn't been implemented." ) ? static_cast<void> (0) : __assert_fail ("Op.getValueType().getVectorElementType() == MVT::i32 && \"Custom lowering for non-i32 vectors hasn't been implemented.\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.7~svn228917/lib/Target/R600/SIISelLowering.cpp" , 999, __PRETTY_FUNCTION__)); |
1000 | unsigned NumElements = Op.getValueType().getVectorNumElements(); |
1001 | assert(NumElements != 2 && "v2 loads are supported for all address spaces.")((NumElements != 2 && "v2 loads are supported for all address spaces." ) ? static_cast<void> (0) : __assert_fail ("NumElements != 2 && \"v2 loads are supported for all address spaces.\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.7~svn228917/lib/Target/R600/SIISelLowering.cpp" , 1001, __PRETTY_FUNCTION__)); |
1002 | switch (Load->getAddressSpace()) { |
1003 | default: break; |
1004 | case AMDGPUAS::GLOBAL_ADDRESS: |
1005 | case AMDGPUAS::PRIVATE_ADDRESS: |
1006 | // v4 loads are supported for private and global memory. |
1007 | if (NumElements <= 4) |
1008 | break; |
1009 | // fall-through |
1010 | case AMDGPUAS::LOCAL_ADDRESS: |
1011 | return ScalarizeVectorLoad(Op, DAG); |
1012 | } |
1013 | } |
1014 | |
1015 | return AMDGPUTargetLowering::LowerLOAD(Op, DAG); |
1016 | } |
1017 | |
1018 | SDValue SITargetLowering::LowerSampleIntrinsic(unsigned Opcode, |
1019 | const SDValue &Op, |
1020 | SelectionDAG &DAG) const { |
1021 | return DAG.getNode(Opcode, SDLoc(Op), Op.getValueType(), Op.getOperand(1), |
1022 | Op.getOperand(2), |
1023 | Op.getOperand(3), |
1024 | Op.getOperand(4)); |
1025 | } |
1026 | |
1027 | SDValue SITargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const { |
1028 | if (Op.getValueType() != MVT::i64) |
1029 | return SDValue(); |
1030 | |
1031 | SDLoc DL(Op); |
1032 | SDValue Cond = Op.getOperand(0); |
1033 | |
1034 | SDValue Zero = DAG.getConstant(0, MVT::i32); |
1035 | SDValue One = DAG.getConstant(1, MVT::i32); |
1036 | |
1037 | SDValue LHS = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32, Op.getOperand(1)); |
1038 | SDValue RHS = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32, Op.getOperand(2)); |
1039 | |
1040 | SDValue Lo0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, LHS, Zero); |
1041 | SDValue Lo1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, RHS, Zero); |
1042 | |
1043 | SDValue Lo = DAG.getSelect(DL, MVT::i32, Cond, Lo0, Lo1); |
1044 | |
1045 | SDValue Hi0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, LHS, One); |
1046 | SDValue Hi1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, RHS, One); |
1047 | |
1048 | SDValue Hi = DAG.getSelect(DL, MVT::i32, Cond, Hi0, Hi1); |
1049 | |
1050 | SDValue Res = DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v2i32, Lo, Hi); |
1051 | return DAG.getNode(ISD::BITCAST, DL, MVT::i64, Res); |
1052 | } |
1053 | |
1054 | // Catch division cases where we can use shortcuts with rcp and rsq |
1055 | // instructions. |
1056 | SDValue SITargetLowering::LowerFastFDIV(SDValue Op, SelectionDAG &DAG) const { |
1057 | SDLoc SL(Op); |
1058 | SDValue LHS = Op.getOperand(0); |
1059 | SDValue RHS = Op.getOperand(1); |
1060 | EVT VT = Op.getValueType(); |
1061 | bool Unsafe = DAG.getTarget().Options.UnsafeFPMath; |
1062 | |
1063 | if (const ConstantFPSDNode *CLHS = dyn_cast<ConstantFPSDNode>(LHS)) { |
1064 | if ((Unsafe || (VT == MVT::f32 && !Subtarget->hasFP32Denormals())) && |
1065 | CLHS->isExactlyValue(1.0)) { |
1066 | // v_rcp_f32 and v_rsq_f32 do not support denormals, and according to |
1067 | // the CI documentation has a worst case error of 1 ulp. |
1068 | // OpenCL requires <= 2.5 ulp for 1.0 / x, so it should always be OK to |
1069 | // use it as long as we aren't trying to use denormals. |
1070 | |
1071 | // 1.0 / sqrt(x) -> rsq(x) |
1072 | // |
1073 | // XXX - Is UnsafeFPMath sufficient to do this for f64? The maximum ULP |
1074 | // error seems really high at 2^29 ULP. |
1075 | if (RHS.getOpcode() == ISD::FSQRT) |
1076 | return DAG.getNode(AMDGPUISD::RSQ, SL, VT, RHS.getOperand(0)); |
1077 | |
1078 | // 1.0 / x -> rcp(x) |
1079 | return DAG.getNode(AMDGPUISD::RCP, SL, VT, RHS); |
1080 | } |
1081 | } |
1082 | |
1083 | if (Unsafe) { |
1084 | // Turn into multiply by the reciprocal. |
1085 | // x / y -> x * (1.0 / y) |
1086 | SDValue Recip = DAG.getNode(AMDGPUISD::RCP, SL, VT, RHS); |
1087 | return DAG.getNode(ISD::FMUL, SL, VT, LHS, Recip); |
1088 | } |
1089 | |
1090 | return SDValue(); |
1091 | } |
1092 | |
1093 | SDValue SITargetLowering::LowerFDIV32(SDValue Op, SelectionDAG &DAG) const { |
1094 | SDValue FastLowered = LowerFastFDIV(Op, DAG); |
1095 | if (FastLowered.getNode()) |
1096 | return FastLowered; |
1097 | |
1098 | // This uses v_rcp_f32 which does not handle denormals. Let this hit a |
1099 | // selection error for now rather than do something incorrect. |
1100 | if (Subtarget->hasFP32Denormals()) |
1101 | return SDValue(); |
1102 | |
1103 | SDLoc SL(Op); |
1104 | SDValue LHS = Op.getOperand(0); |
1105 | SDValue RHS = Op.getOperand(1); |
1106 | |
1107 | SDValue r1 = DAG.getNode(ISD::FABS, SL, MVT::f32, RHS); |
1108 | |
1109 | const APFloat K0Val(BitsToFloat(0x6f800000)); |
1110 | const SDValue K0 = DAG.getConstantFP(K0Val, MVT::f32); |
1111 | |
1112 | const APFloat K1Val(BitsToFloat(0x2f800000)); |
1113 | const SDValue K1 = DAG.getConstantFP(K1Val, MVT::f32); |
1114 | |
1115 | const SDValue One = DAG.getConstantFP(1.0, MVT::f32); |
1116 | |
1117 | EVT SetCCVT = getSetCCResultType(*DAG.getContext(), MVT::f32); |
1118 | |
1119 | SDValue r2 = DAG.getSetCC(SL, SetCCVT, r1, K0, ISD::SETOGT); |
1120 | |
1121 | SDValue r3 = DAG.getNode(ISD::SELECT, SL, MVT::f32, r2, K1, One); |
1122 | |
1123 | r1 = DAG.getNode(ISD::FMUL, SL, MVT::f32, RHS, r3); |
1124 | |
1125 | SDValue r0 = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32, r1); |
1126 | |
1127 | SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f32, LHS, r0); |
1128 | |
1129 | return DAG.getNode(ISD::FMUL, SL, MVT::f32, r3, Mul); |
1130 | } |
1131 | |
1132 | SDValue SITargetLowering::LowerFDIV64(SDValue Op, SelectionDAG &DAG) const { |
1133 | return SDValue(); |
1134 | } |
1135 | |
1136 | SDValue SITargetLowering::LowerFDIV(SDValue Op, SelectionDAG &DAG) const { |
1137 | EVT VT = Op.getValueType(); |
1138 | |
1139 | if (VT == MVT::f32) |
1140 | return LowerFDIV32(Op, DAG); |
1141 | |
1142 | if (VT == MVT::f64) |
1143 | return LowerFDIV64(Op, DAG); |
1144 | |
1145 | llvm_unreachable("Unexpected type for fdiv")::llvm::llvm_unreachable_internal("Unexpected type for fdiv", "/tmp/buildd/llvm-toolchain-snapshot-3.7~svn228917/lib/Target/R600/SIISelLowering.cpp" , 1145); |
1146 | } |
1147 | |
1148 | SDValue SITargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const { |
1149 | SDLoc DL(Op); |
1150 | StoreSDNode *Store = cast<StoreSDNode>(Op); |
1151 | EVT VT = Store->getMemoryVT(); |
1152 | |
1153 | // These stores are legal. |
1154 | if (Store->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS) { |
1155 | if (VT.isVector() && VT.getVectorNumElements() > 4) |
1156 | return ScalarizeVectorStore(Op, DAG); |
1157 | return SDValue(); |
1158 | } |
1159 | |
1160 | SDValue Ret = AMDGPUTargetLowering::LowerSTORE(Op, DAG); |
1161 | if (Ret.getNode()) |
1162 | return Ret; |
1163 | |
1164 | if (VT.isVector() && VT.getVectorNumElements() >= 8) |
1165 | return ScalarizeVectorStore(Op, DAG); |
1166 | |
1167 | if (VT == MVT::i1) |
1168 | return DAG.getTruncStore(Store->getChain(), DL, |
1169 | DAG.getSExtOrTrunc(Store->getValue(), DL, MVT::i32), |
1170 | Store->getBasePtr(), MVT::i1, Store->getMemOperand()); |
1171 | |
1172 | return SDValue(); |
1173 | } |
1174 | |
1175 | SDValue SITargetLowering::LowerTrig(SDValue Op, SelectionDAG &DAG) const { |
1176 | EVT VT = Op.getValueType(); |
1177 | SDValue Arg = Op.getOperand(0); |
1178 | SDValue FractPart = DAG.getNode(AMDGPUISD::FRACT, SDLoc(Op), VT, |
1179 | DAG.getNode(ISD::FMUL, SDLoc(Op), VT, Arg, |
1180 | DAG.getConstantFP(0.5 / M_PI3.14159265358979323846, VT))); |
1181 | |
1182 | switch (Op.getOpcode()) { |
1183 | case ISD::FCOS: |
1184 | return DAG.getNode(AMDGPUISD::COS_HW, SDLoc(Op), VT, FractPart); |
1185 | case ISD::FSIN: |
1186 | return DAG.getNode(AMDGPUISD::SIN_HW, SDLoc(Op), VT, FractPart); |
1187 | default: |
1188 | llvm_unreachable("Wrong trig opcode")::llvm::llvm_unreachable_internal("Wrong trig opcode", "/tmp/buildd/llvm-toolchain-snapshot-3.7~svn228917/lib/Target/R600/SIISelLowering.cpp" , 1188); |
1189 | } |
1190 | } |
1191 | |
1192 | //===----------------------------------------------------------------------===// |
1193 | // Custom DAG optimizations |
1194 | //===----------------------------------------------------------------------===// |
1195 | |
1196 | SDValue SITargetLowering::performUCharToFloatCombine(SDNode *N, |
1197 | DAGCombinerInfo &DCI) const { |
1198 | EVT VT = N->getValueType(0); |
1199 | EVT ScalarVT = VT.getScalarType(); |
1200 | if (ScalarVT != MVT::f32) |
1201 | return SDValue(); |
1202 | |
1203 | SelectionDAG &DAG = DCI.DAG; |
1204 | SDLoc DL(N); |
1205 | |
1206 | SDValue Src = N->getOperand(0); |
1207 | EVT SrcVT = Src.getValueType(); |
1208 | |
1209 | // TODO: We could try to match extracting the higher bytes, which would be |
1210 | // easier if i8 vectors weren't promoted to i32 vectors, particularly after |
1211 | // types are legalized. v4i8 -> v4f32 is probably the only case to worry |
1212 | // about in practice. |
1213 | if (DCI.isAfterLegalizeVectorOps() && SrcVT == MVT::i32) { |
1214 | if (DAG.MaskedValueIsZero(Src, APInt::getHighBitsSet(32, 24))) { |
1215 | SDValue Cvt = DAG.getNode(AMDGPUISD::CVT_F32_UBYTE0, DL, VT, Src); |
1216 | DCI.AddToWorklist(Cvt.getNode()); |
1217 | return Cvt; |
1218 | } |
1219 | } |
1220 | |
1221 | // We are primarily trying to catch operations on illegal vector types |
1222 | // before they are expanded. |
1223 | // For scalars, we can use the more flexible method of checking masked bits |
1224 | // after legalization. |
1225 | if (!DCI.isBeforeLegalize() || |
1226 | !SrcVT.isVector() || |
1227 | SrcVT.getVectorElementType() != MVT::i8) { |
1228 | return SDValue(); |
1229 | } |
1230 | |
1231 | assert(DCI.isBeforeLegalize() && "Unexpected legal type")((DCI.isBeforeLegalize() && "Unexpected legal type") ? static_cast<void> (0) : __assert_fail ("DCI.isBeforeLegalize() && \"Unexpected legal type\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.7~svn228917/lib/Target/R600/SIISelLowering.cpp" , 1231, __PRETTY_FUNCTION__)); |
1232 | |
1233 | // Weird sized vectors are a pain to handle, but we know 3 is really the same |
1234 | // size as 4. |
1235 | unsigned NElts = SrcVT.getVectorNumElements(); |
1236 | if (!SrcVT.isSimple() && NElts != 3) |
1237 | return SDValue(); |
1238 | |
1239 | // Handle v4i8 -> v4f32 extload. Replace the v4i8 with a legal i32 load to |
1240 | // prevent a mess from expanding to v4i32 and repacking. |
1241 | if (ISD::isNormalLoad(Src.getNode()) && Src.hasOneUse()) { |
1242 | EVT LoadVT = getEquivalentMemType(*DAG.getContext(), SrcVT); |
1243 | EVT RegVT = getEquivalentLoadRegType(*DAG.getContext(), SrcVT); |
1244 | EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f32, NElts); |
1245 | LoadSDNode *Load = cast<LoadSDNode>(Src); |
1246 | |
1247 | unsigned AS = Load->getAddressSpace(); |
1248 | unsigned Align = Load->getAlignment(); |
1249 | Type *Ty = LoadVT.getTypeForEVT(*DAG.getContext()); |
1250 | unsigned ABIAlignment = getDataLayout()->getABITypeAlignment(Ty); |
1251 | |
1252 | // Don't try to replace the load if we have to expand it due to alignment |
1253 | // problems. Otherwise we will end up scalarizing the load, and trying to |
1254 | // repack into the vector for no real reason. |
1255 | if (Align < ABIAlignment && |
1256 | !allowsMisalignedMemoryAccesses(LoadVT, AS, Align, nullptr)) { |
1257 | return SDValue(); |
1258 | } |
1259 | |
1260 | SDValue NewLoad = DAG.getExtLoad(ISD::ZEXTLOAD, DL, RegVT, |
1261 | Load->getChain(), |
1262 | Load->getBasePtr(), |
1263 | LoadVT, |
1264 | Load->getMemOperand()); |
1265 | |
1266 | // Make sure successors of the original load stay after it by updating |
1267 | // them to use the new Chain. |
1268 | DAG.ReplaceAllUsesOfValueWith(SDValue(Load, 1), NewLoad.getValue(1)); |
1269 | |
1270 | SmallVector<SDValue, 4> Elts; |
1271 | if (RegVT.isVector()) |
1272 | DAG.ExtractVectorElements(NewLoad, Elts); |
1273 | else |
1274 | Elts.push_back(NewLoad); |
1275 | |
1276 | SmallVector<SDValue, 4> Ops; |
1277 | |
1278 | unsigned EltIdx = 0; |
1279 | for (SDValue Elt : Elts) { |
1280 | unsigned ComponentsInElt = std::min(4u, NElts - 4 * EltIdx); |
1281 | for (unsigned I = 0; I < ComponentsInElt; ++I) { |
1282 | unsigned Opc = AMDGPUISD::CVT_F32_UBYTE0 + I; |
1283 | SDValue Cvt = DAG.getNode(Opc, DL, MVT::f32, Elt); |
1284 | DCI.AddToWorklist(Cvt.getNode()); |
1285 | Ops.push_back(Cvt); |
1286 | } |
1287 | |
1288 | ++EltIdx; |
1289 | } |
1290 | |
1291 | assert(Ops.size() == NElts)((Ops.size() == NElts) ? static_cast<void> (0) : __assert_fail ("Ops.size() == NElts", "/tmp/buildd/llvm-toolchain-snapshot-3.7~svn228917/lib/Target/R600/SIISelLowering.cpp" , 1291, __PRETTY_FUNCTION__)); |
1292 | |
1293 | return DAG.getNode(ISD::BUILD_VECTOR, DL, FloatVT, Ops); |
1294 | } |
1295 | |
1296 | return SDValue(); |
1297 | } |
1298 | |
1299 | // (shl (add x, c1), c2) -> add (shl x, c2), (shl c1, c2) |
1300 | |
1301 | // This is a variant of |
1302 | // (mul (add x, c1), c2) -> add (mul x, c2), (mul c1, c2), |
1303 | // |
1304 | // The normal DAG combiner will do this, but only if the add has one use since |
1305 | // that would increase the number of instructions. |
1306 | // |
1307 | // This prevents us from seeing a constant offset that can be folded into a |
1308 | // memory instruction's addressing mode. If we know the resulting add offset of |
1309 | // a pointer can be folded into an addressing offset, we can replace the pointer |
1310 | // operand with the add of new constant offset. This eliminates one of the uses, |
1311 | // and may allow the remaining use to also be simplified. |
1312 | // |
1313 | SDValue SITargetLowering::performSHLPtrCombine(SDNode *N, |
1314 | unsigned AddrSpace, |
1315 | DAGCombinerInfo &DCI) const { |
1316 | SDValue N0 = N->getOperand(0); |
1317 | SDValue N1 = N->getOperand(1); |
1318 | |
1319 | if (N0.getOpcode() != ISD::ADD) |
1320 | return SDValue(); |
1321 | |
1322 | const ConstantSDNode *CN1 = dyn_cast<ConstantSDNode>(N1); |
1323 | if (!CN1) |
1324 | return SDValue(); |
1325 | |
1326 | const ConstantSDNode *CAdd = dyn_cast<ConstantSDNode>(N0.getOperand(1)); |
1327 | if (!CAdd) |
1328 | return SDValue(); |
1329 | |
1330 | const SIInstrInfo *TII = |
1331 | static_cast<const SIInstrInfo *>(Subtarget->getInstrInfo()); |
1332 | |
1333 | // If the resulting offset is too large, we can't fold it into the addressing |
1334 | // mode offset. |
1335 | APInt Offset = CAdd->getAPIntValue() << CN1->getAPIntValue(); |
1336 | if (!TII->canFoldOffset(Offset.getZExtValue(), AddrSpace)) |
1337 | return SDValue(); |
1338 | |
1339 | SelectionDAG &DAG = DCI.DAG; |
1340 | SDLoc SL(N); |
1341 | EVT VT = N->getValueType(0); |
1342 | |
1343 | SDValue ShlX = DAG.getNode(ISD::SHL, SL, VT, N0.getOperand(0), N1); |
1344 | SDValue COffset = DAG.getConstant(Offset, MVT::i32); |
1345 | |
1346 | return DAG.getNode(ISD::ADD, SL, VT, ShlX, COffset); |
1347 | } |
1348 | |
1349 | SDValue SITargetLowering::performAndCombine(SDNode *N, |
1350 | DAGCombinerInfo &DCI) const { |
1351 | if (DCI.isBeforeLegalize()) |
1352 | return SDValue(); |
1353 | |
1354 | SelectionDAG &DAG = DCI.DAG; |
1355 | |
1356 | // (and (fcmp ord x, x), (fcmp une (fabs x), inf)) -> |
1357 | // fp_class x, ~(s_nan | q_nan | n_infinity | p_infinity) |
1358 | SDValue LHS = N->getOperand(0); |
1359 | SDValue RHS = N->getOperand(1); |
1360 | |
1361 | if (LHS.getOpcode() == ISD::SETCC && |
1362 | RHS.getOpcode() == ISD::SETCC) { |
1363 | ISD::CondCode LCC = cast<CondCodeSDNode>(LHS.getOperand(2))->get(); |
1364 | ISD::CondCode RCC = cast<CondCodeSDNode>(RHS.getOperand(2))->get(); |
1365 | |
1366 | SDValue X = LHS.getOperand(0); |
1367 | SDValue Y = RHS.getOperand(0); |
1368 | if (Y.getOpcode() != ISD::FABS || Y.getOperand(0) != X) |
1369 | return SDValue(); |
1370 | |
1371 | if (LCC == ISD::SETO) { |
1372 | if (X != LHS.getOperand(1)) |
1373 | return SDValue(); |
1374 | |
1375 | if (RCC == ISD::SETUNE) { |
1376 | const ConstantFPSDNode *C1 = dyn_cast<ConstantFPSDNode>(RHS.getOperand(1)); |
1377 | if (!C1 || !C1->isInfinity() || C1->isNegative()) |
1378 | return SDValue(); |
1379 | |
1380 | const uint32_t Mask = SIInstrFlags::N_NORMAL | |
1381 | SIInstrFlags::N_SUBNORMAL | |
1382 | SIInstrFlags::N_ZERO | |
1383 | SIInstrFlags::P_ZERO | |
1384 | SIInstrFlags::P_SUBNORMAL | |
1385 | SIInstrFlags::P_NORMAL; |
1386 | |
1387 | static_assert(((~(SIInstrFlags::S_NAN | |
1388 | SIInstrFlags::Q_NAN | |
1389 | SIInstrFlags::N_INFINITY | |
1390 | SIInstrFlags::P_INFINITY)) & 0x3ff) == Mask, |
1391 | "mask not equal"); |
1392 | |
1393 | return DAG.getNode(AMDGPUISD::FP_CLASS, SDLoc(N), MVT::i1, |
1394 | X, DAG.getConstant(Mask, MVT::i32)); |
1395 | } |
1396 | } |
1397 | } |
1398 | |
1399 | return SDValue(); |
1400 | } |
1401 | |
1402 | SDValue SITargetLowering::performOrCombine(SDNode *N, |
1403 | DAGCombinerInfo &DCI) const { |
1404 | SelectionDAG &DAG = DCI.DAG; |
1405 | SDValue LHS = N->getOperand(0); |
1406 | SDValue RHS = N->getOperand(1); |
1407 | |
1408 | // or (fp_class x, c1), (fp_class x, c2) -> fp_class x, (c1 | c2) |
1409 | if (LHS.getOpcode() == AMDGPUISD::FP_CLASS && |
1410 | RHS.getOpcode() == AMDGPUISD::FP_CLASS) { |
1411 | SDValue Src = LHS.getOperand(0); |
1412 | if (Src != RHS.getOperand(0)) |
1413 | return SDValue(); |
1414 | |
1415 | const ConstantSDNode *CLHS = dyn_cast<ConstantSDNode>(LHS.getOperand(1)); |
1416 | const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS.getOperand(1)); |
1417 | if (!CLHS || !CRHS) |
1418 | return SDValue(); |
1419 | |
1420 | // Only 10 bits are used. |
1421 | static const uint32_t MaxMask = 0x3ff; |
1422 | |
1423 | uint32_t NewMask = (CLHS->getZExtValue() | CRHS->getZExtValue()) & MaxMask; |
1424 | return DAG.getNode(AMDGPUISD::FP_CLASS, SDLoc(N), MVT::i1, |
1425 | Src, DAG.getConstant(NewMask, MVT::i32)); |
1426 | } |
1427 | |
1428 | return SDValue(); |
1429 | } |
1430 | |
1431 | SDValue SITargetLowering::performClassCombine(SDNode *N, |
1432 | DAGCombinerInfo &DCI) const { |
1433 | SelectionDAG &DAG = DCI.DAG; |
1434 | SDValue Mask = N->getOperand(1); |
1435 | |
1436 | // fp_class x, 0 -> false |
1437 | if (const ConstantSDNode *CMask = dyn_cast<ConstantSDNode>(Mask)) { |
1438 | if (CMask->isNullValue()) |
1439 | return DAG.getConstant(0, MVT::i1); |
1440 | } |
1441 | |
1442 | return SDValue(); |
1443 | } |
1444 | |
1445 | static unsigned minMaxOpcToMin3Max3Opc(unsigned Opc) { |
1446 | switch (Opc) { |
1447 | case ISD::FMAXNUM: |
1448 | return AMDGPUISD::FMAX3; |
1449 | case AMDGPUISD::SMAX: |
1450 | return AMDGPUISD::SMAX3; |
1451 | case AMDGPUISD::UMAX: |
1452 | return AMDGPUISD::UMAX3; |
1453 | case ISD::FMINNUM: |
1454 | return AMDGPUISD::FMIN3; |
1455 | case AMDGPUISD::SMIN: |
1456 | return AMDGPUISD::SMIN3; |
1457 | case AMDGPUISD::UMIN: |
1458 | return AMDGPUISD::UMIN3; |
1459 | default: |
1460 | llvm_unreachable("Not a min/max opcode")::llvm::llvm_unreachable_internal("Not a min/max opcode", "/tmp/buildd/llvm-toolchain-snapshot-3.7~svn228917/lib/Target/R600/SIISelLowering.cpp" , 1460); |
1461 | } |
1462 | } |
1463 | |
1464 | SDValue SITargetLowering::performMin3Max3Combine(SDNode *N, |
1465 | DAGCombinerInfo &DCI) const { |
1466 | SelectionDAG &DAG = DCI.DAG; |
1467 | |
1468 | unsigned Opc = N->getOpcode(); |
1469 | SDValue Op0 = N->getOperand(0); |
1470 | SDValue Op1 = N->getOperand(1); |
1471 | |
1472 | // Only do this if the inner op has one use since this will just increases |
1473 | // register pressure for no benefit. |
1474 | |
1475 | // max(max(a, b), c) |
1476 | if (Op0.getOpcode() == Opc && Op0.hasOneUse()) { |
1477 | SDLoc DL(N); |
1478 | return DAG.getNode(minMaxOpcToMin3Max3Opc(Opc), |
1479 | DL, |
1480 | N->getValueType(0), |
1481 | Op0.getOperand(0), |
1482 | Op0.getOperand(1), |
1483 | Op1); |
1484 | } |
1485 | |
1486 | // max(a, max(b, c)) |
1487 | if (Op1.getOpcode() == Opc && Op1.hasOneUse()) { |
1488 | SDLoc DL(N); |
1489 | return DAG.getNode(minMaxOpcToMin3Max3Opc(Opc), |
1490 | DL, |
1491 | N->getValueType(0), |
1492 | Op0, |
1493 | Op1.getOperand(0), |
1494 | Op1.getOperand(1)); |
1495 | } |
1496 | |
1497 | return SDValue(); |
1498 | } |
1499 | |
1500 | SDValue SITargetLowering::performSetCCCombine(SDNode *N, |
1501 | DAGCombinerInfo &DCI) const { |
1502 | SelectionDAG &DAG = DCI.DAG; |
1503 | SDLoc SL(N); |
1504 | |
1505 | SDValue LHS = N->getOperand(0); |
1506 | SDValue RHS = N->getOperand(1); |
1507 | EVT VT = LHS.getValueType(); |
1508 | |
1509 | if (VT != MVT::f32 && VT != MVT::f64) |
1510 | return SDValue(); |
1511 | |
1512 | // Match isinf pattern |
1513 | // (fcmp oeq (fabs x), inf) -> (fp_class x, (p_infinity | n_infinity)) |
1514 | ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get(); |
1515 | if (CC == ISD::SETOEQ && LHS.getOpcode() == ISD::FABS) { |
1516 | const ConstantFPSDNode *CRHS = dyn_cast<ConstantFPSDNode>(RHS); |
1517 | if (!CRHS) |
1518 | return SDValue(); |
1519 | |
1520 | const APFloat &APF = CRHS->getValueAPF(); |
1521 | if (APF.isInfinity() && !APF.isNegative()) { |
1522 | unsigned Mask = SIInstrFlags::P_INFINITY | SIInstrFlags::N_INFINITY; |
1523 | return DAG.getNode(AMDGPUISD::FP_CLASS, SL, MVT::i1, |
1524 | LHS.getOperand(0), DAG.getConstant(Mask, MVT::i32)); |
1525 | } |
1526 | } |
1527 | |
1528 | return SDValue(); |
1529 | } |
1530 | |
1531 | SDValue SITargetLowering::PerformDAGCombine(SDNode *N, |
1532 | DAGCombinerInfo &DCI) const { |
1533 | SelectionDAG &DAG = DCI.DAG; |
1534 | SDLoc DL(N); |
1535 | |
1536 | switch (N->getOpcode()) { |
1537 | default: |
1538 | return AMDGPUTargetLowering::PerformDAGCombine(N, DCI); |
1539 | case ISD::SETCC: |
1540 | return performSetCCCombine(N, DCI); |
1541 | case ISD::FMAXNUM: // TODO: What about fmax_legacy? |
1542 | case ISD::FMINNUM: |
1543 | case AMDGPUISD::SMAX: |
1544 | case AMDGPUISD::SMIN: |
1545 | case AMDGPUISD::UMAX: |
1546 | case AMDGPUISD::UMIN: { |
1547 | if (DCI.getDAGCombineLevel() >= AfterLegalizeDAG && |
1548 | getTargetMachine().getOptLevel() > CodeGenOpt::None) |
1549 | return performMin3Max3Combine(N, DCI); |
1550 | break; |
1551 | } |
1552 | |
1553 | case AMDGPUISD::CVT_F32_UBYTE0: |
1554 | case AMDGPUISD::CVT_F32_UBYTE1: |
1555 | case AMDGPUISD::CVT_F32_UBYTE2: |
1556 | case AMDGPUISD::CVT_F32_UBYTE3: { |
1557 | unsigned Offset = N->getOpcode() - AMDGPUISD::CVT_F32_UBYTE0; |
1558 | |
1559 | SDValue Src = N->getOperand(0); |
1560 | APInt Demanded = APInt::getBitsSet(32, 8 * Offset, 8 * Offset + 8); |
1561 | |
1562 | APInt KnownZero, KnownOne; |
1563 | TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(), |
1564 | !DCI.isBeforeLegalizeOps()); |
1565 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
1566 | if (TLO.ShrinkDemandedConstant(Src, Demanded) || |
1567 | TLI.SimplifyDemandedBits(Src, Demanded, KnownZero, KnownOne, TLO)) { |
1568 | DCI.CommitTargetLoweringOpt(TLO); |
1569 | } |
1570 | |
1571 | break; |
1572 | } |
1573 | |
1574 | case ISD::UINT_TO_FP: { |
1575 | return performUCharToFloatCombine(N, DCI); |
1576 | |
1577 | case ISD::FADD: { |
1578 | if (DCI.getDAGCombineLevel() < AfterLegalizeDAG) |
1579 | break; |
1580 | |
1581 | EVT VT = N->getValueType(0); |
1582 | if (VT != MVT::f32) |
1583 | break; |
1584 | |
1585 | SDValue LHS = N->getOperand(0); |
1586 | SDValue RHS = N->getOperand(1); |
1587 | |
1588 | // These should really be instruction patterns, but writing patterns with |
1589 | // source modiifiers is a pain. |
1590 | |
1591 | // fadd (fadd (a, a), b) -> mad 2.0, a, b |
1592 | if (LHS.getOpcode() == ISD::FADD) { |
1593 | SDValue A = LHS.getOperand(0); |
1594 | if (A == LHS.getOperand(1)) { |
1595 | const SDValue Two = DAG.getConstantFP(2.0, MVT::f32); |
1596 | return DAG.getNode(AMDGPUISD::MAD, DL, VT, Two, A, RHS); |
1597 | } |
1598 | } |
1599 | |
1600 | // fadd (b, fadd (a, a)) -> mad 2.0, a, b |
1601 | if (RHS.getOpcode() == ISD::FADD) { |
1602 | SDValue A = RHS.getOperand(0); |
1603 | if (A == RHS.getOperand(1)) { |
1604 | const SDValue Two = DAG.getConstantFP(2.0, MVT::f32); |
1605 | return DAG.getNode(AMDGPUISD::MAD, DL, VT, Two, A, LHS); |
1606 | } |
1607 | } |
1608 | |
1609 | break; |
1610 | } |
1611 | case ISD::FSUB: { |
1612 | if (DCI.getDAGCombineLevel() < AfterLegalizeDAG) |
1613 | break; |
1614 | |
1615 | EVT VT = N->getValueType(0); |
1616 | |
1617 | // Try to get the fneg to fold into the source modifier. This undoes generic |
1618 | // DAG combines and folds them into the mad. |
1619 | if (VT == MVT::f32) { |
1620 | SDValue LHS = N->getOperand(0); |
1621 | SDValue RHS = N->getOperand(1); |
1622 | |
1623 | if (LHS.getOpcode() == ISD::FMUL) { |
1624 | // (fsub (fmul a, b), c) -> mad a, b, (fneg c) |
1625 | |
1626 | SDValue A = LHS.getOperand(0); |
1627 | SDValue B = LHS.getOperand(1); |
1628 | SDValue C = DAG.getNode(ISD::FNEG, DL, VT, RHS); |
1629 | |
1630 | return DAG.getNode(AMDGPUISD::MAD, DL, VT, A, B, C); |
1631 | } |
1632 | |
1633 | if (RHS.getOpcode() == ISD::FMUL) { |
1634 | // (fsub c, (fmul a, b)) -> mad (fneg a), b, c |
1635 | |
1636 | SDValue A = DAG.getNode(ISD::FNEG, DL, VT, RHS.getOperand(0)); |
1637 | SDValue B = RHS.getOperand(1); |
1638 | SDValue C = LHS; |
1639 | |
1640 | return DAG.getNode(AMDGPUISD::MAD, DL, VT, A, B, C); |
1641 | } |
1642 | |
1643 | if (LHS.getOpcode() == ISD::FADD) { |
1644 | // (fsub (fadd a, a), c) -> mad 2.0, a, (fneg c) |
1645 | |
1646 | SDValue A = LHS.getOperand(0); |
1647 | if (A == LHS.getOperand(1)) { |
1648 | const SDValue Two = DAG.getConstantFP(2.0, MVT::f32); |
1649 | SDValue NegRHS = DAG.getNode(ISD::FNEG, DL, VT, RHS); |
1650 | |
1651 | return DAG.getNode(AMDGPUISD::MAD, DL, VT, Two, A, NegRHS); |
1652 | } |
1653 | } |
1654 | |
1655 | if (RHS.getOpcode() == ISD::FADD) { |
1656 | // (fsub c, (fadd a, a)) -> mad -2.0, a, c |
1657 | |
1658 | SDValue A = RHS.getOperand(0); |
1659 | if (A == RHS.getOperand(1)) { |
1660 | const SDValue NegTwo = DAG.getConstantFP(-2.0, MVT::f32); |
1661 | return DAG.getNode(AMDGPUISD::MAD, DL, VT, NegTwo, A, LHS); |
1662 | } |
1663 | } |
1664 | } |
1665 | |
1666 | break; |
1667 | } |
1668 | } |
1669 | case ISD::LOAD: |
1670 | case ISD::STORE: |
1671 | case ISD::ATOMIC_LOAD: |
1672 | case ISD::ATOMIC_STORE: |
1673 | case ISD::ATOMIC_CMP_SWAP: |
1674 | case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: |
1675 | case ISD::ATOMIC_SWAP: |
1676 | case ISD::ATOMIC_LOAD_ADD: |
1677 | case ISD::ATOMIC_LOAD_SUB: |
1678 | case ISD::ATOMIC_LOAD_AND: |
1679 | case ISD::ATOMIC_LOAD_OR: |
1680 | case ISD::ATOMIC_LOAD_XOR: |
1681 | case ISD::ATOMIC_LOAD_NAND: |
1682 | case ISD::ATOMIC_LOAD_MIN: |
1683 | case ISD::ATOMIC_LOAD_MAX: |
1684 | case ISD::ATOMIC_LOAD_UMIN: |
1685 | case ISD::ATOMIC_LOAD_UMAX: { // TODO: Target mem intrinsics. |
1686 | if (DCI.isBeforeLegalize()) |
1687 | break; |
1688 | |
1689 | MemSDNode *MemNode = cast<MemSDNode>(N); |
1690 | SDValue Ptr = MemNode->getBasePtr(); |
1691 | |
1692 | // TODO: We could also do this for multiplies. |
1693 | unsigned AS = MemNode->getAddressSpace(); |
1694 | if (Ptr.getOpcode() == ISD::SHL && AS != AMDGPUAS::PRIVATE_ADDRESS) { |
1695 | SDValue NewPtr = performSHLPtrCombine(Ptr.getNode(), AS, DCI); |
1696 | if (NewPtr) { |
1697 | SmallVector<SDValue, 8> NewOps; |
1698 | for (unsigned I = 0, E = MemNode->getNumOperands(); I != E; ++I) |
1699 | NewOps.push_back(MemNode->getOperand(I)); |
1700 | |
1701 | NewOps[N->getOpcode() == ISD::STORE ? 2 : 1] = NewPtr; |
1702 | return SDValue(DAG.UpdateNodeOperands(MemNode, NewOps), 0); |
1703 | } |
1704 | } |
1705 | break; |
1706 | } |
1707 | case ISD::AND: |
1708 | return performAndCombine(N, DCI); |
1709 | case ISD::OR: |
1710 | return performOrCombine(N, DCI); |
1711 | case AMDGPUISD::FP_CLASS: |
1712 | return performClassCombine(N, DCI); |
1713 | } |
1714 | return AMDGPUTargetLowering::PerformDAGCombine(N, DCI); |
1715 | } |
1716 | |
1717 | /// \brief Test if RegClass is one of the VSrc classes |
1718 | static bool isVSrc(unsigned RegClass) { |
1719 | switch(RegClass) { |
1720 | default: return false; |
1721 | case AMDGPU::VS_32RegClassID: |
1722 | case AMDGPU::VS_64RegClassID: |
1723 | return true; |
1724 | } |
1725 | } |
1726 | |
1727 | /// \brief Analyze the possible immediate value Op |
1728 | /// |
1729 | /// Returns -1 if it isn't an immediate, 0 if it's and inline immediate |
1730 | /// and the immediate value if it's a literal immediate |
1731 | int32_t SITargetLowering::analyzeImmediate(const SDNode *N) const { |
1732 | |
1733 | const SIInstrInfo *TII = |
1734 | static_cast<const SIInstrInfo *>(Subtarget->getInstrInfo()); |
1735 | |
1736 | if (const ConstantSDNode *Node = dyn_cast<ConstantSDNode>(N)) { |
1737 | if (Node->getZExtValue() >> 32) |
1738 | return -1; |
1739 | |
1740 | if (TII->isInlineConstant(Node->getAPIntValue())) |
1741 | return 0; |
1742 | |
1743 | return Node->getZExtValue(); |
1744 | } |
1745 | |
1746 | if (const ConstantFPSDNode *Node = dyn_cast<ConstantFPSDNode>(N)) { |
1747 | if (TII->isInlineConstant(Node->getValueAPF().bitcastToAPInt())) |
1748 | return 0; |
1749 | |
1750 | if (Node->getValueType(0) == MVT::f32) |
1751 | return FloatToBits(Node->getValueAPF().convertToFloat()); |
1752 | |
1753 | return -1; |
1754 | } |
1755 | |
1756 | return -1; |
1757 | } |
1758 | |
1759 | const TargetRegisterClass * |
1760 | SITargetLowering::getRegClassForNode(SelectionDAG &DAG, |
1761 | const SDValue &Op) const { |
1762 | const SIInstrInfo *TII = |
1763 | static_cast<const SIInstrInfo *>(Subtarget->getInstrInfo()); |
1764 | const SIRegisterInfo &TRI = TII->getRegisterInfo(); |
1765 | |
1766 | if (!Op->isMachineOpcode()) { |
1767 | switch(Op->getOpcode()) { |
1768 | case ISD::CopyFromReg: { |
1769 | MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo(); |
1770 | unsigned Reg = cast<RegisterSDNode>(Op->getOperand(1))->getReg(); |
1771 | if (TargetRegisterInfo::isVirtualRegister(Reg)) { |
1772 | return MRI.getRegClass(Reg); |
1773 | } |
1774 | return TRI.getPhysRegClass(Reg); |
1775 | } |
1776 | default: return nullptr; |
1777 | } |
1778 | } |
1779 | const MCInstrDesc &Desc = TII->get(Op->getMachineOpcode()); |
1780 | int OpClassID = Desc.OpInfo[Op.getResNo()].RegClass; |
1781 | if (OpClassID != -1) { |
1782 | return TRI.getRegClass(OpClassID); |
1783 | } |
1784 | switch(Op.getMachineOpcode()) { |
1785 | case AMDGPU::COPY_TO_REGCLASS: |
1786 | // Operand 1 is the register class id for COPY_TO_REGCLASS instructions. |
1787 | OpClassID = cast<ConstantSDNode>(Op->getOperand(1))->getZExtValue(); |
1788 | |
1789 | // If the COPY_TO_REGCLASS instruction is copying to a VSrc register |
1790 | // class, then the register class for the value could be either a |
1791 | // VReg or and SReg. In order to get a more accurate |
1792 | if (isVSrc(OpClassID)) |
1793 | return getRegClassForNode(DAG, Op.getOperand(0)); |
1794 | |
1795 | return TRI.getRegClass(OpClassID); |
1796 | case AMDGPU::EXTRACT_SUBREG: { |
1797 | int SubIdx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); |
1798 | const TargetRegisterClass *SuperClass = |
1799 | getRegClassForNode(DAG, Op.getOperand(0)); |
1800 | return TRI.getSubClassWithSubReg(SuperClass, SubIdx); |
1801 | } |
1802 | case AMDGPU::REG_SEQUENCE: |
1803 | // Operand 0 is the register class id for REG_SEQUENCE instructions. |
1804 | return TRI.getRegClass( |
1805 | cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue()); |
1806 | default: |
1807 | return getRegClassFor(Op.getSimpleValueType()); |
1808 | } |
1809 | } |
1810 | |
1811 | /// \brief Does "Op" fit into register class "RegClass" ? |
1812 | bool SITargetLowering::fitsRegClass(SelectionDAG &DAG, const SDValue &Op, |
1813 | unsigned RegClass) const { |
1814 | const TargetRegisterInfo *TRI = Subtarget->getRegisterInfo(); |
1815 | const TargetRegisterClass *RC = getRegClassForNode(DAG, Op); |
1816 | if (!RC) { |
1817 | return false; |
1818 | } |
1819 | return TRI->getRegClass(RegClass)->hasSubClassEq(RC); |
1820 | } |
1821 | |
1822 | /// \brief Helper function for adjustWritemask |
1823 | static unsigned SubIdx2Lane(unsigned Idx) { |
1824 | switch (Idx) { |
1825 | default: return 0; |
1826 | case AMDGPU::sub0: return 0; |
1827 | case AMDGPU::sub1: return 1; |
1828 | case AMDGPU::sub2: return 2; |
1829 | case AMDGPU::sub3: return 3; |
1830 | } |
1831 | } |
1832 | |
1833 | /// \brief Adjust the writemask of MIMG instructions |
1834 | void SITargetLowering::adjustWritemask(MachineSDNode *&Node, |
1835 | SelectionDAG &DAG) const { |
1836 | SDNode *Users[4] = { }; |
1837 | unsigned Lane = 0; |
1838 | unsigned OldDmask = Node->getConstantOperandVal(0); |
1839 | unsigned NewDmask = 0; |
1840 | |
1841 | // Try to figure out the used register components |
1842 | for (SDNode::use_iterator I = Node->use_begin(), E = Node->use_end(); |
1843 | I != E; ++I) { |
1844 | |
1845 | // Abort if we can't understand the usage |
1846 | if (!I->isMachineOpcode() || |
1847 | I->getMachineOpcode() != TargetOpcode::EXTRACT_SUBREG) |
1848 | return; |
1849 | |
1850 | // Lane means which subreg of %VGPRa_VGPRb_VGPRc_VGPRd is used. |
1851 | // Note that subregs are packed, i.e. Lane==0 is the first bit set |
1852 | // in OldDmask, so it can be any of X,Y,Z,W; Lane==1 is the second bit |
1853 | // set, etc. |
1854 | Lane = SubIdx2Lane(I->getConstantOperandVal(1)); |
1855 | |
1856 | // Set which texture component corresponds to the lane. |
1857 | unsigned Comp; |
1858 | for (unsigned i = 0, Dmask = OldDmask; i <= Lane; i++) { |
1859 | assert(Dmask)((Dmask) ? static_cast<void> (0) : __assert_fail ("Dmask" , "/tmp/buildd/llvm-toolchain-snapshot-3.7~svn228917/lib/Target/R600/SIISelLowering.cpp" , 1859, __PRETTY_FUNCTION__)); |
1860 | Comp = countTrailingZeros(Dmask); |
1861 | Dmask &= ~(1 << Comp); |
1862 | } |
1863 | |
1864 | // Abort if we have more than one user per component |
1865 | if (Users[Lane]) |
1866 | return; |
1867 | |
1868 | Users[Lane] = *I; |
1869 | NewDmask |= 1 << Comp; |
1870 | } |
1871 | |
1872 | // Abort if there's no change |
1873 | if (NewDmask == OldDmask) |
1874 | return; |
1875 | |
1876 | // Adjust the writemask in the node |
1877 | std::vector<SDValue> Ops; |
1878 | Ops.push_back(DAG.getTargetConstant(NewDmask, MVT::i32)); |
1879 | for (unsigned i = 1, e = Node->getNumOperands(); i != e; ++i) |
1880 | Ops.push_back(Node->getOperand(i)); |
1881 | Node = (MachineSDNode*)DAG.UpdateNodeOperands(Node, Ops); |
1882 | |
1883 | // If we only got one lane, replace it with a copy |
1884 | // (if NewDmask has only one bit set...) |
1885 | if (NewDmask && (NewDmask & (NewDmask-1)) == 0) { |
1886 | SDValue RC = DAG.getTargetConstant(AMDGPU::VGPR_32RegClassID, MVT::i32); |
1887 | SDNode *Copy = DAG.getMachineNode(TargetOpcode::COPY_TO_REGCLASS, |
1888 | SDLoc(), Users[Lane]->getValueType(0), |
1889 | SDValue(Node, 0), RC); |
1890 | DAG.ReplaceAllUsesWith(Users[Lane], Copy); |
1891 | return; |
1892 | } |
1893 | |
1894 | // Update the users of the node with the new indices |
1895 | for (unsigned i = 0, Idx = AMDGPU::sub0; i < 4; ++i) { |
1896 | |
1897 | SDNode *User = Users[i]; |
1898 | if (!User) |
1899 | continue; |
1900 | |
1901 | SDValue Op = DAG.getTargetConstant(Idx, MVT::i32); |
1902 | DAG.UpdateNodeOperands(User, User->getOperand(0), Op); |
1903 | |
1904 | switch (Idx) { |
1905 | default: break; |
1906 | case AMDGPU::sub0: Idx = AMDGPU::sub1; break; |
1907 | case AMDGPU::sub1: Idx = AMDGPU::sub2; break; |
1908 | case AMDGPU::sub2: Idx = AMDGPU::sub3; break; |
1909 | } |
1910 | } |
1911 | } |
1912 | |
1913 | /// \brief Legalize target independent instructions (e.g. INSERT_SUBREG) |
1914 | /// with frame index operands. |
1915 | /// LLVM assumes that inputs are to these instructions are registers. |
1916 | void SITargetLowering::legalizeTargetIndependentNode(SDNode *Node, |
1917 | SelectionDAG &DAG) const { |
1918 | |
1919 | SmallVector<SDValue, 8> Ops; |
1920 | for (unsigned i = 0; i < Node->getNumOperands(); ++i) { |
1921 | if (!isa<FrameIndexSDNode>(Node->getOperand(i))) { |
1922 | Ops.push_back(Node->getOperand(i)); |
1923 | continue; |
1924 | } |
1925 | |
1926 | SDLoc DL(Node); |
1927 | Ops.push_back(SDValue(DAG.getMachineNode(AMDGPU::S_MOV_B32, DL, |
1928 | Node->getOperand(i).getValueType(), |
1929 | Node->getOperand(i)), 0)); |
1930 | } |
1931 | |
1932 | DAG.UpdateNodeOperands(Node, Ops); |
1933 | } |
1934 | |
1935 | /// \brief Fold the instructions after selecting them. |
1936 | SDNode *SITargetLowering::PostISelFolding(MachineSDNode *Node, |
1937 | SelectionDAG &DAG) const { |
1938 | const SIInstrInfo *TII = |
1939 | static_cast<const SIInstrInfo *>(Subtarget->getInstrInfo()); |
1940 | Node = AdjustRegClass(Node, DAG); |
1941 | |
1942 | if (TII->isMIMG(Node->getMachineOpcode())) |
1943 | adjustWritemask(Node, DAG); |
1944 | |
1945 | if (Node->getMachineOpcode() == AMDGPU::INSERT_SUBREG || |
1946 | Node->getMachineOpcode() == AMDGPU::REG_SEQUENCE) { |
1947 | legalizeTargetIndependentNode(Node, DAG); |
1948 | return Node; |
1949 | } |
1950 | return Node; |
1951 | } |
1952 | |
1953 | /// \brief Assign the register class depending on the number of |
1954 | /// bits set in the writemask |
1955 | void SITargetLowering::AdjustInstrPostInstrSelection(MachineInstr *MI, |
1956 | SDNode *Node) const { |
1957 | const SIInstrInfo *TII = |
1958 | static_cast<const SIInstrInfo *>(Subtarget->getInstrInfo()); |
1959 | |
1960 | MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo(); |
1961 | TII->legalizeOperands(MI); |
1962 | |
1963 | if (TII->isMIMG(MI->getOpcode())) { |
1964 | unsigned VReg = MI->getOperand(0).getReg(); |
1965 | unsigned Writemask = MI->getOperand(1).getImm(); |
1966 | unsigned BitsSet = 0; |
1967 | for (unsigned i = 0; i < 4; ++i) |
1968 | BitsSet += Writemask & (1 << i) ? 1 : 0; |
1969 | |
1970 | const TargetRegisterClass *RC; |
1971 | switch (BitsSet) { |
1972 | default: return; |
1973 | case 1: RC = &AMDGPU::VGPR_32RegClass; break; |
1974 | case 2: RC = &AMDGPU::VReg_64RegClass; break; |
1975 | case 3: RC = &AMDGPU::VReg_96RegClass; break; |
1976 | } |
1977 | |
1978 | unsigned NewOpcode = TII->getMaskedMIMGOp(MI->getOpcode(), BitsSet); |
1979 | MI->setDesc(TII->get(NewOpcode)); |
1980 | MRI.setRegClass(VReg, RC); |
1981 | return; |
1982 | } |
1983 | |
1984 | // Replace unused atomics with the no return version. |
1985 | int NoRetAtomicOp = AMDGPU::getAtomicNoRetOp(MI->getOpcode()); |
1986 | if (NoRetAtomicOp != -1) { |
1987 | if (!Node->hasAnyUseOfValue(0)) { |
1988 | MI->setDesc(TII->get(NoRetAtomicOp)); |
1989 | MI->RemoveOperand(0); |
1990 | } |
1991 | |
1992 | return; |
1993 | } |
1994 | } |
1995 | |
1996 | static SDValue buildSMovImm32(SelectionDAG &DAG, SDLoc DL, uint64_t Val) { |
1997 | SDValue K = DAG.getTargetConstant(Val, MVT::i32); |
1998 | return SDValue(DAG.getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32, K), 0); |
1999 | } |
2000 | |
2001 | MachineSDNode *SITargetLowering::wrapAddr64Rsrc(SelectionDAG &DAG, |
2002 | SDLoc DL, |
2003 | SDValue Ptr) const { |
2004 | const SIInstrInfo *TII = |
2005 | static_cast<const SIInstrInfo *>(Subtarget->getInstrInfo()); |
2006 | #if 1 |
2007 | // XXX - Workaround for moveToVALU not handling different register class |
2008 | // inserts for REG_SEQUENCE. |
2009 | |
2010 | // Build the half of the subregister with the constants. |
2011 | const SDValue Ops0[] = { |
2012 | DAG.getTargetConstant(AMDGPU::SGPR_64RegClassID, MVT::i32), |
2013 | buildSMovImm32(DAG, DL, 0), |
2014 | DAG.getTargetConstant(AMDGPU::sub0, MVT::i32), |
2015 | buildSMovImm32(DAG, DL, TII->getDefaultRsrcDataFormat() >> 32), |
2016 | DAG.getTargetConstant(AMDGPU::sub1, MVT::i32) |
2017 | }; |
2018 | |
2019 | SDValue SubRegHi = SDValue(DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, |
2020 | MVT::v2i32, Ops0), 0); |
2021 | |
2022 | // Combine the constants and the pointer. |
2023 | const SDValue Ops1[] = { |
2024 | DAG.getTargetConstant(AMDGPU::SReg_128RegClassID, MVT::i32), |
2025 | Ptr, |
2026 | DAG.getTargetConstant(AMDGPU::sub0_sub1, MVT::i32), |
2027 | SubRegHi, |
2028 | DAG.getTargetConstant(AMDGPU::sub2_sub3, MVT::i32) |
2029 | }; |
2030 | |
2031 | return DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, MVT::v4i32, Ops1); |
2032 | #else |
2033 | const SDValue Ops[] = { |
2034 | DAG.getTargetConstant(AMDGPU::SReg_128RegClassID, MVT::i32), |
2035 | Ptr, |
2036 | DAG.getTargetConstant(AMDGPU::sub0_sub1, MVT::i32), |
2037 | buildSMovImm32(DAG, DL, 0), |
2038 | DAG.getTargetConstant(AMDGPU::sub2, MVT::i32), |
2039 | buildSMovImm32(DAG, DL, TII->getDefaultRsrcFormat() >> 32), |
2040 | DAG.getTargetConstant(AMDGPU::sub3, MVT::i32) |
2041 | }; |
2042 | |
2043 | return DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, MVT::v4i32, Ops); |
2044 | |
2045 | #endif |
2046 | } |
2047 | |
2048 | /// \brief Return a resource descriptor with the 'Add TID' bit enabled |
2049 | /// The TID (Thread ID) is multipled by the stride value (bits [61:48] |
2050 | /// of the resource descriptor) to create an offset, which is added to the |
2051 | /// resource ponter. |
2052 | MachineSDNode *SITargetLowering::buildRSRC(SelectionDAG &DAG, |
2053 | SDLoc DL, |
2054 | SDValue Ptr, |
2055 | uint32_t RsrcDword1, |
2056 | uint64_t RsrcDword2And3) const { |
2057 | SDValue PtrLo = DAG.getTargetExtractSubreg(AMDGPU::sub0, DL, MVT::i32, Ptr); |
2058 | SDValue PtrHi = DAG.getTargetExtractSubreg(AMDGPU::sub1, DL, MVT::i32, Ptr); |
2059 | if (RsrcDword1) { |
2060 | PtrHi = SDValue(DAG.getMachineNode(AMDGPU::S_OR_B32, DL, MVT::i32, PtrHi, |
2061 | DAG.getConstant(RsrcDword1, MVT::i32)), 0); |
2062 | } |
2063 | |
2064 | SDValue DataLo = buildSMovImm32(DAG, DL, |
2065 | RsrcDword2And3 & UINT64_C(0xFFFFFFFF)0xFFFFFFFFUL); |
2066 | SDValue DataHi = buildSMovImm32(DAG, DL, RsrcDword2And3 >> 32); |
2067 | |
2068 | const SDValue Ops[] = { |
2069 | DAG.getTargetConstant(AMDGPU::SReg_128RegClassID, MVT::i32), |
2070 | PtrLo, |
2071 | DAG.getTargetConstant(AMDGPU::sub0, MVT::i32), |
2072 | PtrHi, |
2073 | DAG.getTargetConstant(AMDGPU::sub1, MVT::i32), |
2074 | DataLo, |
2075 | DAG.getTargetConstant(AMDGPU::sub2, MVT::i32), |
2076 | DataHi, |
2077 | DAG.getTargetConstant(AMDGPU::sub3, MVT::i32) |
2078 | }; |
2079 | |
2080 | return DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, MVT::v4i32, Ops); |
2081 | } |
2082 | |
2083 | MachineSDNode *SITargetLowering::buildScratchRSRC(SelectionDAG &DAG, |
2084 | SDLoc DL, |
2085 | SDValue Ptr) const { |
2086 | const SIInstrInfo *TII = |
2087 | static_cast<const SIInstrInfo *>(Subtarget->getInstrInfo()); |
2088 | uint64_t Rsrc = TII->getDefaultRsrcDataFormat() | AMDGPU::RSRC_TID_ENABLE | |
2089 | 0xffffffff; // Size |
2090 | |
2091 | return buildRSRC(DAG, DL, Ptr, 0, Rsrc); |
2092 | } |
2093 | |
2094 | MachineSDNode *SITargetLowering::AdjustRegClass(MachineSDNode *N, |
2095 | SelectionDAG &DAG) const { |
2096 | |
2097 | SDLoc DL(N); |
2098 | unsigned NewOpcode = N->getMachineOpcode(); |
2099 | |
2100 | switch (N->getMachineOpcode()) { |
2101 | default: return N; |
2102 | case AMDGPU::S_LOAD_DWORD_IMM: |
2103 | NewOpcode = AMDGPU::BUFFER_LOAD_DWORD_ADDR64; |
2104 | // Fall-through |
2105 | case AMDGPU::S_LOAD_DWORDX2_SGPR: |
2106 | if (NewOpcode == N->getMachineOpcode()) { |
2107 | NewOpcode = AMDGPU::BUFFER_LOAD_DWORDX2_ADDR64; |
2108 | } |
2109 | // Fall-through |
2110 | case AMDGPU::S_LOAD_DWORDX4_IMM: |
2111 | case AMDGPU::S_LOAD_DWORDX4_SGPR: { |
2112 | if (NewOpcode == N->getMachineOpcode()) { |
2113 | NewOpcode = AMDGPU::BUFFER_LOAD_DWORDX4_ADDR64; |
2114 | } |
2115 | if (fitsRegClass(DAG, N->getOperand(0), AMDGPU::SReg_64RegClassID)) { |
2116 | return N; |
2117 | } |
2118 | ConstantSDNode *Offset = cast<ConstantSDNode>(N->getOperand(1)); |
2119 | |
2120 | const SDValue Zero64 = DAG.getTargetConstant(0, MVT::i64); |
2121 | SDValue Ptr(DAG.getMachineNode(AMDGPU::S_MOV_B64, DL, MVT::i64, Zero64), 0); |
2122 | MachineSDNode *RSrc = wrapAddr64Rsrc(DAG, DL, Ptr); |
2123 | |
2124 | SmallVector<SDValue, 8> Ops; |
2125 | Ops.push_back(SDValue(RSrc, 0)); |
2126 | Ops.push_back(N->getOperand(0)); |
2127 | Ops.push_back(DAG.getTargetConstant(0, MVT::i32)); // soffset |
2128 | |
2129 | // The immediate offset is in dwords on SI and in bytes on VI. |
2130 | if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) |
2131 | Ops.push_back(DAG.getTargetConstant(Offset->getSExtValue(), MVT::i32)); |
2132 | else |
2133 | Ops.push_back(DAG.getTargetConstant(Offset->getSExtValue() << 2, MVT::i32)); |
2134 | |
2135 | // Copy remaining operands so we keep any chain and glue nodes that follow |
2136 | // the normal operands. |
2137 | for (unsigned I = 2, E = N->getNumOperands(); I != E; ++I) |
2138 | Ops.push_back(N->getOperand(I)); |
2139 | |
2140 | return DAG.getMachineNode(NewOpcode, DL, N->getVTList(), Ops); |
2141 | } |
2142 | } |
2143 | } |
2144 | |
2145 | SDValue SITargetLowering::CreateLiveInRegister(SelectionDAG &DAG, |
2146 | const TargetRegisterClass *RC, |
2147 | unsigned Reg, EVT VT) const { |
2148 | SDValue VReg = AMDGPUTargetLowering::CreateLiveInRegister(DAG, RC, Reg, VT); |
2149 | |
2150 | return DAG.getCopyFromReg(DAG.getEntryNode(), SDLoc(DAG.getEntryNode()), |
2151 | cast<RegisterSDNode>(VReg)->getReg(), VT); |
2152 | } |