File: | build/llvm-toolchain-snapshot-15~++20220420111733+e13d2efed663/llvm/lib/Target/AMDGPU/SIISelLowering.cpp |
Warning: | line 11403, column 52 Called C++ object pointer is null |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | //===-- SIISelLowering.cpp - SI DAG Lowering Implementation ---------------===// | |||
2 | // | |||
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | |||
4 | // See https://llvm.org/LICENSE.txt for license information. | |||
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | |||
6 | // | |||
7 | //===----------------------------------------------------------------------===// | |||
8 | // | |||
9 | /// \file | |||
10 | /// Custom DAG lowering for SI | |||
11 | // | |||
12 | //===----------------------------------------------------------------------===// | |||
13 | ||||
14 | #include "SIISelLowering.h" | |||
15 | #include "AMDGPU.h" | |||
16 | #include "AMDGPUInstrInfo.h" | |||
17 | #include "AMDGPUTargetMachine.h" | |||
18 | #include "SIMachineFunctionInfo.h" | |||
19 | #include "SIRegisterInfo.h" | |||
20 | #include "llvm/ADT/FloatingPointMode.h" | |||
21 | #include "llvm/ADT/Statistic.h" | |||
22 | #include "llvm/Analysis/LegacyDivergenceAnalysis.h" | |||
23 | #include "llvm/Analysis/OptimizationRemarkEmitter.h" | |||
24 | #include "llvm/BinaryFormat/ELF.h" | |||
25 | #include "llvm/CodeGen/Analysis.h" | |||
26 | #include "llvm/CodeGen/FunctionLoweringInfo.h" | |||
27 | #include "llvm/CodeGen/GlobalISel/GISelKnownBits.h" | |||
28 | #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h" | |||
29 | #include "llvm/CodeGen/MachineFrameInfo.h" | |||
30 | #include "llvm/CodeGen/MachineFunction.h" | |||
31 | #include "llvm/CodeGen/MachineLoopInfo.h" | |||
32 | #include "llvm/IR/DiagnosticInfo.h" | |||
33 | #include "llvm/IR/IntrinsicInst.h" | |||
34 | #include "llvm/IR/IntrinsicsAMDGPU.h" | |||
35 | #include "llvm/IR/IntrinsicsR600.h" | |||
36 | #include "llvm/Support/CommandLine.h" | |||
37 | #include "llvm/Support/KnownBits.h" | |||
38 | ||||
39 | using namespace llvm; | |||
40 | ||||
41 | #define DEBUG_TYPE"si-lower" "si-lower" | |||
42 | ||||
43 | STATISTIC(NumTailCalls, "Number of tail calls")static llvm::Statistic NumTailCalls = {"si-lower", "NumTailCalls" , "Number of tail calls"}; | |||
44 | ||||
45 | static cl::opt<bool> DisableLoopAlignment( | |||
46 | "amdgpu-disable-loop-alignment", | |||
47 | cl::desc("Do not align and prefetch loops"), | |||
48 | cl::init(false)); | |||
49 | ||||
50 | static cl::opt<bool> UseDivergentRegisterIndexing( | |||
51 | "amdgpu-use-divergent-register-indexing", | |||
52 | cl::Hidden, | |||
53 | cl::desc("Use indirect register addressing for divergent indexes"), | |||
54 | cl::init(false)); | |||
55 | ||||
56 | static bool hasFP32Denormals(const MachineFunction &MF) { | |||
57 | const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); | |||
58 | return Info->getMode().allFP32Denormals(); | |||
59 | } | |||
60 | ||||
61 | static bool hasFP64FP16Denormals(const MachineFunction &MF) { | |||
62 | const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); | |||
63 | return Info->getMode().allFP64FP16Denormals(); | |||
64 | } | |||
65 | ||||
66 | static unsigned findFirstFreeSGPR(CCState &CCInfo) { | |||
67 | unsigned NumSGPRs = AMDGPU::SGPR_32RegClass.getNumRegs(); | |||
68 | for (unsigned Reg = 0; Reg < NumSGPRs; ++Reg) { | |||
69 | if (!CCInfo.isAllocated(AMDGPU::SGPR0 + Reg)) { | |||
70 | return AMDGPU::SGPR0 + Reg; | |||
71 | } | |||
72 | } | |||
73 | llvm_unreachable("Cannot allocate sgpr")::llvm::llvm_unreachable_internal("Cannot allocate sgpr", "llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 73); | |||
74 | } | |||
75 | ||||
76 | SITargetLowering::SITargetLowering(const TargetMachine &TM, | |||
77 | const GCNSubtarget &STI) | |||
78 | : AMDGPUTargetLowering(TM, STI), | |||
79 | Subtarget(&STI) { | |||
80 | addRegisterClass(MVT::i1, &AMDGPU::VReg_1RegClass); | |||
81 | addRegisterClass(MVT::i64, &AMDGPU::SReg_64RegClass); | |||
82 | ||||
83 | addRegisterClass(MVT::i32, &AMDGPU::SReg_32RegClass); | |||
84 | addRegisterClass(MVT::f32, &AMDGPU::VGPR_32RegClass); | |||
85 | ||||
86 | addRegisterClass(MVT::v2i32, &AMDGPU::SReg_64RegClass); | |||
87 | ||||
88 | const SIRegisterInfo *TRI = STI.getRegisterInfo(); | |||
89 | const TargetRegisterClass *V64RegClass = TRI->getVGPR64Class(); | |||
90 | ||||
91 | addRegisterClass(MVT::f64, V64RegClass); | |||
92 | addRegisterClass(MVT::v2f32, V64RegClass); | |||
93 | ||||
94 | addRegisterClass(MVT::v3i32, &AMDGPU::SGPR_96RegClass); | |||
95 | addRegisterClass(MVT::v3f32, TRI->getVGPRClassForBitWidth(96)); | |||
96 | ||||
97 | addRegisterClass(MVT::v2i64, &AMDGPU::SGPR_128RegClass); | |||
98 | addRegisterClass(MVT::v2f64, &AMDGPU::SGPR_128RegClass); | |||
99 | ||||
100 | addRegisterClass(MVT::v4i32, &AMDGPU::SGPR_128RegClass); | |||
101 | addRegisterClass(MVT::v4f32, TRI->getVGPRClassForBitWidth(128)); | |||
102 | ||||
103 | addRegisterClass(MVT::v5i32, &AMDGPU::SGPR_160RegClass); | |||
104 | addRegisterClass(MVT::v5f32, TRI->getVGPRClassForBitWidth(160)); | |||
105 | ||||
106 | addRegisterClass(MVT::v6i32, &AMDGPU::SGPR_192RegClass); | |||
107 | addRegisterClass(MVT::v6f32, TRI->getVGPRClassForBitWidth(192)); | |||
108 | ||||
109 | addRegisterClass(MVT::v3i64, &AMDGPU::SGPR_192RegClass); | |||
110 | addRegisterClass(MVT::v3f64, TRI->getVGPRClassForBitWidth(192)); | |||
111 | ||||
112 | addRegisterClass(MVT::v7i32, &AMDGPU::SGPR_224RegClass); | |||
113 | addRegisterClass(MVT::v7f32, TRI->getVGPRClassForBitWidth(224)); | |||
114 | ||||
115 | addRegisterClass(MVT::v8i32, &AMDGPU::SGPR_256RegClass); | |||
116 | addRegisterClass(MVT::v8f32, TRI->getVGPRClassForBitWidth(256)); | |||
117 | ||||
118 | addRegisterClass(MVT::v4i64, &AMDGPU::SGPR_256RegClass); | |||
119 | addRegisterClass(MVT::v4f64, TRI->getVGPRClassForBitWidth(256)); | |||
120 | ||||
121 | addRegisterClass(MVT::v16i32, &AMDGPU::SGPR_512RegClass); | |||
122 | addRegisterClass(MVT::v16f32, TRI->getVGPRClassForBitWidth(512)); | |||
123 | ||||
124 | addRegisterClass(MVT::v8i64, &AMDGPU::SGPR_512RegClass); | |||
125 | addRegisterClass(MVT::v8f64, TRI->getVGPRClassForBitWidth(512)); | |||
126 | ||||
127 | addRegisterClass(MVT::v16i64, &AMDGPU::SGPR_1024RegClass); | |||
128 | addRegisterClass(MVT::v16f64, TRI->getVGPRClassForBitWidth(1024)); | |||
129 | ||||
130 | if (Subtarget->has16BitInsts()) { | |||
131 | addRegisterClass(MVT::i16, &AMDGPU::SReg_32RegClass); | |||
132 | addRegisterClass(MVT::f16, &AMDGPU::SReg_32RegClass); | |||
133 | ||||
134 | // Unless there are also VOP3P operations, not operations are really legal. | |||
135 | addRegisterClass(MVT::v2i16, &AMDGPU::SReg_32RegClass); | |||
136 | addRegisterClass(MVT::v2f16, &AMDGPU::SReg_32RegClass); | |||
137 | addRegisterClass(MVT::v4i16, &AMDGPU::SReg_64RegClass); | |||
138 | addRegisterClass(MVT::v4f16, &AMDGPU::SReg_64RegClass); | |||
139 | addRegisterClass(MVT::v8i16, &AMDGPU::SGPR_128RegClass); | |||
140 | addRegisterClass(MVT::v8f16, &AMDGPU::SGPR_128RegClass); | |||
141 | } | |||
142 | ||||
143 | addRegisterClass(MVT::v32i32, &AMDGPU::VReg_1024RegClass); | |||
144 | addRegisterClass(MVT::v32f32, TRI->getVGPRClassForBitWidth(1024)); | |||
145 | ||||
146 | computeRegisterProperties(Subtarget->getRegisterInfo()); | |||
147 | ||||
148 | // The boolean content concept here is too inflexible. Compares only ever | |||
149 | // really produce a 1-bit result. Any copy/extend from these will turn into a | |||
150 | // select, and zext/1 or sext/-1 are equally cheap. Arbitrarily choose 0/1, as | |||
151 | // it's what most targets use. | |||
152 | setBooleanContents(ZeroOrOneBooleanContent); | |||
153 | setBooleanVectorContents(ZeroOrOneBooleanContent); | |||
154 | ||||
155 | // We need to custom lower vector stores from local memory | |||
156 | setOperationAction(ISD::LOAD, MVT::v2i32, Custom); | |||
157 | setOperationAction(ISD::LOAD, MVT::v3i32, Custom); | |||
158 | setOperationAction(ISD::LOAD, MVT::v4i32, Custom); | |||
159 | setOperationAction(ISD::LOAD, MVT::v5i32, Custom); | |||
160 | setOperationAction(ISD::LOAD, MVT::v6i32, Custom); | |||
161 | setOperationAction(ISD::LOAD, MVT::v7i32, Custom); | |||
162 | setOperationAction(ISD::LOAD, MVT::v8i32, Custom); | |||
163 | setOperationAction(ISD::LOAD, MVT::v16i32, Custom); | |||
164 | setOperationAction(ISD::LOAD, MVT::i1, Custom); | |||
165 | setOperationAction(ISD::LOAD, MVT::v32i32, Custom); | |||
166 | ||||
167 | setOperationAction(ISD::STORE, MVT::v2i32, Custom); | |||
168 | setOperationAction(ISD::STORE, MVT::v3i32, Custom); | |||
169 | setOperationAction(ISD::STORE, MVT::v4i32, Custom); | |||
170 | setOperationAction(ISD::STORE, MVT::v5i32, Custom); | |||
171 | setOperationAction(ISD::STORE, MVT::v6i32, Custom); | |||
172 | setOperationAction(ISD::STORE, MVT::v7i32, Custom); | |||
173 | setOperationAction(ISD::STORE, MVT::v8i32, Custom); | |||
174 | setOperationAction(ISD::STORE, MVT::v16i32, Custom); | |||
175 | setOperationAction(ISD::STORE, MVT::i1, Custom); | |||
176 | setOperationAction(ISD::STORE, MVT::v32i32, Custom); | |||
177 | ||||
178 | setTruncStoreAction(MVT::v2i32, MVT::v2i16, Expand); | |||
179 | setTruncStoreAction(MVT::v3i32, MVT::v3i16, Expand); | |||
180 | setTruncStoreAction(MVT::v4i32, MVT::v4i16, Expand); | |||
181 | setTruncStoreAction(MVT::v8i32, MVT::v8i16, Expand); | |||
182 | setTruncStoreAction(MVT::v16i32, MVT::v16i16, Expand); | |||
183 | setTruncStoreAction(MVT::v32i32, MVT::v32i16, Expand); | |||
184 | setTruncStoreAction(MVT::v2i32, MVT::v2i8, Expand); | |||
185 | setTruncStoreAction(MVT::v4i32, MVT::v4i8, Expand); | |||
186 | setTruncStoreAction(MVT::v8i32, MVT::v8i8, Expand); | |||
187 | setTruncStoreAction(MVT::v16i32, MVT::v16i8, Expand); | |||
188 | setTruncStoreAction(MVT::v32i32, MVT::v32i8, Expand); | |||
189 | setTruncStoreAction(MVT::v2i16, MVT::v2i8, Expand); | |||
190 | setTruncStoreAction(MVT::v4i16, MVT::v4i8, Expand); | |||
191 | setTruncStoreAction(MVT::v8i16, MVT::v8i8, Expand); | |||
192 | setTruncStoreAction(MVT::v16i16, MVT::v16i8, Expand); | |||
193 | setTruncStoreAction(MVT::v32i16, MVT::v32i8, Expand); | |||
194 | ||||
195 | setTruncStoreAction(MVT::v3i64, MVT::v3i16, Expand); | |||
196 | setTruncStoreAction(MVT::v3i64, MVT::v3i32, Expand); | |||
197 | setTruncStoreAction(MVT::v4i64, MVT::v4i8, Expand); | |||
198 | setTruncStoreAction(MVT::v8i64, MVT::v8i8, Expand); | |||
199 | setTruncStoreAction(MVT::v8i64, MVT::v8i16, Expand); | |||
200 | setTruncStoreAction(MVT::v8i64, MVT::v8i32, Expand); | |||
201 | setTruncStoreAction(MVT::v16i64, MVT::v16i32, Expand); | |||
202 | ||||
203 | setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); | |||
204 | setOperationAction(ISD::GlobalAddress, MVT::i64, Custom); | |||
205 | ||||
206 | setOperationAction(ISD::SELECT, MVT::i1, Promote); | |||
207 | setOperationAction(ISD::SELECT, MVT::i64, Custom); | |||
208 | setOperationAction(ISD::SELECT, MVT::f64, Promote); | |||
209 | AddPromotedToType(ISD::SELECT, MVT::f64, MVT::i64); | |||
210 | ||||
211 | setOperationAction(ISD::SELECT_CC, MVT::f32, Expand); | |||
212 | setOperationAction(ISD::SELECT_CC, MVT::i32, Expand); | |||
213 | setOperationAction(ISD::SELECT_CC, MVT::i64, Expand); | |||
214 | setOperationAction(ISD::SELECT_CC, MVT::f64, Expand); | |||
215 | setOperationAction(ISD::SELECT_CC, MVT::i1, Expand); | |||
216 | ||||
217 | setOperationAction(ISD::SETCC, MVT::i1, Promote); | |||
218 | setOperationAction(ISD::SETCC, MVT::v2i1, Expand); | |||
219 | setOperationAction(ISD::SETCC, MVT::v4i1, Expand); | |||
220 | AddPromotedToType(ISD::SETCC, MVT::i1, MVT::i32); | |||
221 | ||||
222 | setOperationAction(ISD::TRUNCATE, MVT::v2i32, Expand); | |||
223 | setOperationAction(ISD::FP_ROUND, MVT::v2f32, Expand); | |||
224 | setOperationAction(ISD::TRUNCATE, MVT::v3i32, Expand); | |||
225 | setOperationAction(ISD::FP_ROUND, MVT::v3f32, Expand); | |||
226 | setOperationAction(ISD::TRUNCATE, MVT::v4i32, Expand); | |||
227 | setOperationAction(ISD::FP_ROUND, MVT::v4f32, Expand); | |||
228 | setOperationAction(ISD::TRUNCATE, MVT::v5i32, Expand); | |||
229 | setOperationAction(ISD::FP_ROUND, MVT::v5f32, Expand); | |||
230 | setOperationAction(ISD::TRUNCATE, MVT::v6i32, Expand); | |||
231 | setOperationAction(ISD::FP_ROUND, MVT::v6f32, Expand); | |||
232 | setOperationAction(ISD::TRUNCATE, MVT::v7i32, Expand); | |||
233 | setOperationAction(ISD::FP_ROUND, MVT::v7f32, Expand); | |||
234 | setOperationAction(ISD::TRUNCATE, MVT::v8i32, Expand); | |||
235 | setOperationAction(ISD::FP_ROUND, MVT::v8f32, Expand); | |||
236 | setOperationAction(ISD::TRUNCATE, MVT::v16i32, Expand); | |||
237 | setOperationAction(ISD::FP_ROUND, MVT::v16f32, Expand); | |||
238 | ||||
239 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i1, Custom); | |||
240 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i1, Custom); | |||
241 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8, Custom); | |||
242 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i8, Custom); | |||
243 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Custom); | |||
244 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v3i16, Custom); | |||
245 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i16, Custom); | |||
246 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::Other, Custom); | |||
247 | ||||
248 | setOperationAction(ISD::BRCOND, MVT::Other, Custom); | |||
249 | setOperationAction(ISD::BR_CC, MVT::i1, Expand); | |||
250 | setOperationAction(ISD::BR_CC, MVT::i32, Expand); | |||
251 | setOperationAction(ISD::BR_CC, MVT::i64, Expand); | |||
252 | setOperationAction(ISD::BR_CC, MVT::f32, Expand); | |||
253 | setOperationAction(ISD::BR_CC, MVT::f64, Expand); | |||
254 | ||||
255 | setOperationAction(ISD::UADDO, MVT::i32, Legal); | |||
256 | setOperationAction(ISD::USUBO, MVT::i32, Legal); | |||
257 | ||||
258 | setOperationAction(ISD::ADDCARRY, MVT::i32, Legal); | |||
259 | setOperationAction(ISD::SUBCARRY, MVT::i32, Legal); | |||
260 | ||||
261 | setOperationAction(ISD::SHL_PARTS, MVT::i64, Expand); | |||
262 | setOperationAction(ISD::SRA_PARTS, MVT::i64, Expand); | |||
263 | setOperationAction(ISD::SRL_PARTS, MVT::i64, Expand); | |||
264 | ||||
265 | #if 0 | |||
266 | setOperationAction(ISD::ADDCARRY, MVT::i64, Legal); | |||
267 | setOperationAction(ISD::SUBCARRY, MVT::i64, Legal); | |||
268 | #endif | |||
269 | ||||
270 | // We only support LOAD/STORE and vector manipulation ops for vectors | |||
271 | // with > 4 elements. | |||
272 | for (MVT VT : { MVT::v8i32, MVT::v8f32, MVT::v16i32, MVT::v16f32, | |||
273 | MVT::v2i64, MVT::v2f64, MVT::v4i16, MVT::v4f16, | |||
274 | MVT::v3i64, MVT::v3f64, MVT::v6i32, MVT::v6f32, | |||
275 | MVT::v4i64, MVT::v4f64, MVT::v8i64, MVT::v8f64, | |||
276 | MVT::v8i16, MVT::v8f16, MVT::v16i64, MVT::v16f64, | |||
277 | MVT::v32i32, MVT::v32f32 }) { | |||
278 | for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) { | |||
279 | switch (Op) { | |||
280 | case ISD::LOAD: | |||
281 | case ISD::STORE: | |||
282 | case ISD::BUILD_VECTOR: | |||
283 | case ISD::BITCAST: | |||
284 | case ISD::EXTRACT_VECTOR_ELT: | |||
285 | case ISD::INSERT_VECTOR_ELT: | |||
286 | case ISD::EXTRACT_SUBVECTOR: | |||
287 | case ISD::SCALAR_TO_VECTOR: | |||
288 | break; | |||
289 | case ISD::INSERT_SUBVECTOR: | |||
290 | case ISD::CONCAT_VECTORS: | |||
291 | setOperationAction(Op, VT, Custom); | |||
292 | break; | |||
293 | default: | |||
294 | setOperationAction(Op, VT, Expand); | |||
295 | break; | |||
296 | } | |||
297 | } | |||
298 | } | |||
299 | ||||
300 | setOperationAction(ISD::FP_EXTEND, MVT::v4f32, Expand); | |||
301 | ||||
302 | // TODO: For dynamic 64-bit vector inserts/extracts, should emit a pseudo that | |||
303 | // is expanded to avoid having two separate loops in case the index is a VGPR. | |||
304 | ||||
305 | // Most operations are naturally 32-bit vector operations. We only support | |||
306 | // load and store of i64 vectors, so promote v2i64 vector operations to v4i32. | |||
307 | for (MVT Vec64 : { MVT::v2i64, MVT::v2f64 }) { | |||
308 | setOperationAction(ISD::BUILD_VECTOR, Vec64, Promote); | |||
309 | AddPromotedToType(ISD::BUILD_VECTOR, Vec64, MVT::v4i32); | |||
310 | ||||
311 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, Vec64, Promote); | |||
312 | AddPromotedToType(ISD::EXTRACT_VECTOR_ELT, Vec64, MVT::v4i32); | |||
313 | ||||
314 | setOperationAction(ISD::INSERT_VECTOR_ELT, Vec64, Promote); | |||
315 | AddPromotedToType(ISD::INSERT_VECTOR_ELT, Vec64, MVT::v4i32); | |||
316 | ||||
317 | setOperationAction(ISD::SCALAR_TO_VECTOR, Vec64, Promote); | |||
318 | AddPromotedToType(ISD::SCALAR_TO_VECTOR, Vec64, MVT::v4i32); | |||
319 | } | |||
320 | ||||
321 | for (MVT Vec64 : { MVT::v3i64, MVT::v3f64 }) { | |||
322 | setOperationAction(ISD::BUILD_VECTOR, Vec64, Promote); | |||
323 | AddPromotedToType(ISD::BUILD_VECTOR, Vec64, MVT::v6i32); | |||
324 | ||||
325 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, Vec64, Promote); | |||
326 | AddPromotedToType(ISD::EXTRACT_VECTOR_ELT, Vec64, MVT::v6i32); | |||
327 | ||||
328 | setOperationAction(ISD::INSERT_VECTOR_ELT, Vec64, Promote); | |||
329 | AddPromotedToType(ISD::INSERT_VECTOR_ELT, Vec64, MVT::v6i32); | |||
330 | ||||
331 | setOperationAction(ISD::SCALAR_TO_VECTOR, Vec64, Promote); | |||
332 | AddPromotedToType(ISD::SCALAR_TO_VECTOR, Vec64, MVT::v6i32); | |||
333 | } | |||
334 | ||||
335 | for (MVT Vec64 : { MVT::v4i64, MVT::v4f64 }) { | |||
336 | setOperationAction(ISD::BUILD_VECTOR, Vec64, Promote); | |||
337 | AddPromotedToType(ISD::BUILD_VECTOR, Vec64, MVT::v8i32); | |||
338 | ||||
339 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, Vec64, Promote); | |||
340 | AddPromotedToType(ISD::EXTRACT_VECTOR_ELT, Vec64, MVT::v8i32); | |||
341 | ||||
342 | setOperationAction(ISD::INSERT_VECTOR_ELT, Vec64, Promote); | |||
343 | AddPromotedToType(ISD::INSERT_VECTOR_ELT, Vec64, MVT::v8i32); | |||
344 | ||||
345 | setOperationAction(ISD::SCALAR_TO_VECTOR, Vec64, Promote); | |||
346 | AddPromotedToType(ISD::SCALAR_TO_VECTOR, Vec64, MVT::v8i32); | |||
347 | } | |||
348 | ||||
349 | for (MVT Vec64 : { MVT::v8i64, MVT::v8f64 }) { | |||
350 | setOperationAction(ISD::BUILD_VECTOR, Vec64, Promote); | |||
351 | AddPromotedToType(ISD::BUILD_VECTOR, Vec64, MVT::v16i32); | |||
352 | ||||
353 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, Vec64, Promote); | |||
354 | AddPromotedToType(ISD::EXTRACT_VECTOR_ELT, Vec64, MVT::v16i32); | |||
355 | ||||
356 | setOperationAction(ISD::INSERT_VECTOR_ELT, Vec64, Promote); | |||
357 | AddPromotedToType(ISD::INSERT_VECTOR_ELT, Vec64, MVT::v16i32); | |||
358 | ||||
359 | setOperationAction(ISD::SCALAR_TO_VECTOR, Vec64, Promote); | |||
360 | AddPromotedToType(ISD::SCALAR_TO_VECTOR, Vec64, MVT::v16i32); | |||
361 | } | |||
362 | ||||
363 | for (MVT Vec64 : { MVT::v16i64, MVT::v16f64 }) { | |||
364 | setOperationAction(ISD::BUILD_VECTOR, Vec64, Promote); | |||
365 | AddPromotedToType(ISD::BUILD_VECTOR, Vec64, MVT::v32i32); | |||
366 | ||||
367 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, Vec64, Promote); | |||
368 | AddPromotedToType(ISD::EXTRACT_VECTOR_ELT, Vec64, MVT::v32i32); | |||
369 | ||||
370 | setOperationAction(ISD::INSERT_VECTOR_ELT, Vec64, Promote); | |||
371 | AddPromotedToType(ISD::INSERT_VECTOR_ELT, Vec64, MVT::v32i32); | |||
372 | ||||
373 | setOperationAction(ISD::SCALAR_TO_VECTOR, Vec64, Promote); | |||
374 | AddPromotedToType(ISD::SCALAR_TO_VECTOR, Vec64, MVT::v32i32); | |||
375 | } | |||
376 | ||||
377 | setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i32, Expand); | |||
378 | setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8f32, Expand); | |||
379 | setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i32, Expand); | |||
380 | setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16f32, Expand); | |||
381 | ||||
382 | setOperationAction(ISD::BUILD_VECTOR, MVT::v4f16, Custom); | |||
383 | setOperationAction(ISD::BUILD_VECTOR, MVT::v4i16, Custom); | |||
384 | ||||
385 | // Avoid stack access for these. | |||
386 | // TODO: Generalize to more vector types. | |||
387 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i16, Custom); | |||
388 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f16, Custom); | |||
389 | setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i16, Custom); | |||
390 | setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f16, Custom); | |||
391 | ||||
392 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i8, Custom); | |||
393 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i8, Custom); | |||
394 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i8, Custom); | |||
395 | setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i8, Custom); | |||
396 | setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i8, Custom); | |||
397 | setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i8, Custom); | |||
398 | ||||
399 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i16, Custom); | |||
400 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f16, Custom); | |||
401 | setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i16, Custom); | |||
402 | setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f16, Custom); | |||
403 | ||||
404 | // Deal with vec3 vector operations when widened to vec4. | |||
405 | setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v3i32, Custom); | |||
406 | setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v3f32, Custom); | |||
407 | setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v4i32, Custom); | |||
408 | setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v4f32, Custom); | |||
409 | ||||
410 | // Deal with vec5/6/7 vector operations when widened to vec8. | |||
411 | setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v5i32, Custom); | |||
412 | setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v5f32, Custom); | |||
413 | setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v6i32, Custom); | |||
414 | setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v6f32, Custom); | |||
415 | setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v7i32, Custom); | |||
416 | setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v7f32, Custom); | |||
417 | setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v8i32, Custom); | |||
418 | setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v8f32, Custom); | |||
419 | ||||
420 | // BUFFER/FLAT_ATOMIC_CMP_SWAP on GCN GPUs needs input marshalling, | |||
421 | // and output demarshalling | |||
422 | setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Custom); | |||
423 | setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, Custom); | |||
424 | ||||
425 | // We can't return success/failure, only the old value, | |||
426 | // let LLVM add the comparison | |||
427 | setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i32, Expand); | |||
428 | setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i64, Expand); | |||
429 | ||||
430 | if (Subtarget->hasFlatAddressSpace()) { | |||
431 | setOperationAction(ISD::ADDRSPACECAST, MVT::i32, Custom); | |||
432 | setOperationAction(ISD::ADDRSPACECAST, MVT::i64, Custom); | |||
433 | } | |||
434 | ||||
435 | setOperationAction(ISD::BITREVERSE, MVT::i32, Legal); | |||
436 | setOperationAction(ISD::BITREVERSE, MVT::i64, Legal); | |||
437 | ||||
438 | // FIXME: This should be narrowed to i32, but that only happens if i64 is | |||
439 | // illegal. | |||
440 | // FIXME: Should lower sub-i32 bswaps to bit-ops without v_perm_b32. | |||
441 | setOperationAction(ISD::BSWAP, MVT::i64, Legal); | |||
442 | setOperationAction(ISD::BSWAP, MVT::i32, Legal); | |||
443 | ||||
444 | // On SI this is s_memtime and s_memrealtime on VI. | |||
445 | setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Legal); | |||
446 | setOperationAction(ISD::TRAP, MVT::Other, Custom); | |||
447 | setOperationAction(ISD::DEBUGTRAP, MVT::Other, Custom); | |||
448 | ||||
449 | if (Subtarget->has16BitInsts()) { | |||
450 | setOperationAction(ISD::FPOW, MVT::f16, Promote); | |||
451 | setOperationAction(ISD::FPOWI, MVT::f16, Promote); | |||
452 | setOperationAction(ISD::FLOG, MVT::f16, Custom); | |||
453 | setOperationAction(ISD::FEXP, MVT::f16, Custom); | |||
454 | setOperationAction(ISD::FLOG10, MVT::f16, Custom); | |||
455 | } | |||
456 | ||||
457 | if (Subtarget->hasMadMacF32Insts()) | |||
458 | setOperationAction(ISD::FMAD, MVT::f32, Legal); | |||
459 | ||||
460 | if (!Subtarget->hasBFI()) { | |||
461 | // fcopysign can be done in a single instruction with BFI. | |||
462 | setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); | |||
463 | setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); | |||
464 | } | |||
465 | ||||
466 | if (!Subtarget->hasBCNT(32)) | |||
467 | setOperationAction(ISD::CTPOP, MVT::i32, Expand); | |||
468 | ||||
469 | if (!Subtarget->hasBCNT(64)) | |||
470 | setOperationAction(ISD::CTPOP, MVT::i64, Expand); | |||
471 | ||||
472 | if (Subtarget->hasFFBH()) { | |||
473 | setOperationAction(ISD::CTLZ, MVT::i32, Custom); | |||
474 | setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Custom); | |||
475 | } | |||
476 | ||||
477 | if (Subtarget->hasFFBL()) { | |||
478 | setOperationAction(ISD::CTTZ, MVT::i32, Custom); | |||
479 | setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Custom); | |||
480 | } | |||
481 | ||||
482 | // We only really have 32-bit BFE instructions (and 16-bit on VI). | |||
483 | // | |||
484 | // On SI+ there are 64-bit BFEs, but they are scalar only and there isn't any | |||
485 | // effort to match them now. We want this to be false for i64 cases when the | |||
486 | // extraction isn't restricted to the upper or lower half. Ideally we would | |||
487 | // have some pass reduce 64-bit extracts to 32-bit if possible. Extracts that | |||
488 | // span the midpoint are probably relatively rare, so don't worry about them | |||
489 | // for now. | |||
490 | if (Subtarget->hasBFE()) | |||
491 | setHasExtractBitsInsn(true); | |||
492 | ||||
493 | // Clamp modifier on add/sub | |||
494 | if (Subtarget->hasIntClamp()) { | |||
495 | setOperationAction(ISD::UADDSAT, MVT::i32, Legal); | |||
496 | setOperationAction(ISD::USUBSAT, MVT::i32, Legal); | |||
497 | } | |||
498 | ||||
499 | if (Subtarget->hasAddNoCarry()) { | |||
500 | setOperationAction(ISD::SADDSAT, MVT::i16, Legal); | |||
501 | setOperationAction(ISD::SSUBSAT, MVT::i16, Legal); | |||
502 | setOperationAction(ISD::SADDSAT, MVT::i32, Legal); | |||
503 | setOperationAction(ISD::SSUBSAT, MVT::i32, Legal); | |||
504 | } | |||
505 | ||||
506 | setOperationAction(ISD::FMINNUM, MVT::f32, Custom); | |||
507 | setOperationAction(ISD::FMAXNUM, MVT::f32, Custom); | |||
508 | setOperationAction(ISD::FMINNUM, MVT::f64, Custom); | |||
509 | setOperationAction(ISD::FMAXNUM, MVT::f64, Custom); | |||
510 | ||||
511 | ||||
512 | // These are really only legal for ieee_mode functions. We should be avoiding | |||
513 | // them for functions that don't have ieee_mode enabled, so just say they are | |||
514 | // legal. | |||
515 | setOperationAction(ISD::FMINNUM_IEEE, MVT::f32, Legal); | |||
516 | setOperationAction(ISD::FMAXNUM_IEEE, MVT::f32, Legal); | |||
517 | setOperationAction(ISD::FMINNUM_IEEE, MVT::f64, Legal); | |||
518 | setOperationAction(ISD::FMAXNUM_IEEE, MVT::f64, Legal); | |||
519 | ||||
520 | ||||
521 | if (Subtarget->haveRoundOpsF64()) { | |||
522 | setOperationAction(ISD::FTRUNC, MVT::f64, Legal); | |||
523 | setOperationAction(ISD::FCEIL, MVT::f64, Legal); | |||
524 | setOperationAction(ISD::FRINT, MVT::f64, Legal); | |||
525 | } else { | |||
526 | setOperationAction(ISD::FCEIL, MVT::f64, Custom); | |||
527 | setOperationAction(ISD::FTRUNC, MVT::f64, Custom); | |||
528 | setOperationAction(ISD::FRINT, MVT::f64, Custom); | |||
529 | setOperationAction(ISD::FFLOOR, MVT::f64, Custom); | |||
530 | } | |||
531 | ||||
532 | setOperationAction(ISD::FFLOOR, MVT::f64, Legal); | |||
533 | ||||
534 | setOperationAction(ISD::FSIN, MVT::f32, Custom); | |||
535 | setOperationAction(ISD::FCOS, MVT::f32, Custom); | |||
536 | setOperationAction(ISD::FDIV, MVT::f32, Custom); | |||
537 | setOperationAction(ISD::FDIV, MVT::f64, Custom); | |||
538 | ||||
539 | if (Subtarget->has16BitInsts()) { | |||
540 | setOperationAction(ISD::Constant, MVT::i16, Legal); | |||
541 | ||||
542 | setOperationAction(ISD::SMIN, MVT::i16, Legal); | |||
543 | setOperationAction(ISD::SMAX, MVT::i16, Legal); | |||
544 | ||||
545 | setOperationAction(ISD::UMIN, MVT::i16, Legal); | |||
546 | setOperationAction(ISD::UMAX, MVT::i16, Legal); | |||
547 | ||||
548 | setOperationAction(ISD::SIGN_EXTEND, MVT::i16, Promote); | |||
549 | AddPromotedToType(ISD::SIGN_EXTEND, MVT::i16, MVT::i32); | |||
550 | ||||
551 | setOperationAction(ISD::ROTR, MVT::i16, Expand); | |||
552 | setOperationAction(ISD::ROTL, MVT::i16, Expand); | |||
553 | ||||
554 | setOperationAction(ISD::SDIV, MVT::i16, Promote); | |||
555 | setOperationAction(ISD::UDIV, MVT::i16, Promote); | |||
556 | setOperationAction(ISD::SREM, MVT::i16, Promote); | |||
557 | setOperationAction(ISD::UREM, MVT::i16, Promote); | |||
558 | setOperationAction(ISD::UADDSAT, MVT::i16, Legal); | |||
559 | setOperationAction(ISD::USUBSAT, MVT::i16, Legal); | |||
560 | ||||
561 | setOperationAction(ISD::BITREVERSE, MVT::i16, Promote); | |||
562 | ||||
563 | setOperationAction(ISD::CTTZ, MVT::i16, Promote); | |||
564 | setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i16, Promote); | |||
565 | setOperationAction(ISD::CTLZ, MVT::i16, Promote); | |||
566 | setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16, Promote); | |||
567 | setOperationAction(ISD::CTPOP, MVT::i16, Promote); | |||
568 | ||||
569 | setOperationAction(ISD::SELECT_CC, MVT::i16, Expand); | |||
570 | ||||
571 | setOperationAction(ISD::BR_CC, MVT::i16, Expand); | |||
572 | ||||
573 | setOperationAction(ISD::LOAD, MVT::i16, Custom); | |||
574 | ||||
575 | setTruncStoreAction(MVT::i64, MVT::i16, Expand); | |||
576 | ||||
577 | setOperationAction(ISD::FP16_TO_FP, MVT::i16, Promote); | |||
578 | AddPromotedToType(ISD::FP16_TO_FP, MVT::i16, MVT::i32); | |||
579 | setOperationAction(ISD::FP_TO_FP16, MVT::i16, Promote); | |||
580 | AddPromotedToType(ISD::FP_TO_FP16, MVT::i16, MVT::i32); | |||
581 | ||||
582 | setOperationAction(ISD::FP_TO_SINT, MVT::i16, Custom); | |||
583 | setOperationAction(ISD::FP_TO_UINT, MVT::i16, Custom); | |||
584 | ||||
585 | // F16 - Constant Actions. | |||
586 | setOperationAction(ISD::ConstantFP, MVT::f16, Legal); | |||
587 | ||||
588 | // F16 - Load/Store Actions. | |||
589 | setOperationAction(ISD::LOAD, MVT::f16, Promote); | |||
590 | AddPromotedToType(ISD::LOAD, MVT::f16, MVT::i16); | |||
591 | setOperationAction(ISD::STORE, MVT::f16, Promote); | |||
592 | AddPromotedToType(ISD::STORE, MVT::f16, MVT::i16); | |||
593 | ||||
594 | // F16 - VOP1 Actions. | |||
595 | setOperationAction(ISD::FP_ROUND, MVT::f16, Custom); | |||
596 | setOperationAction(ISD::FCOS, MVT::f16, Custom); | |||
597 | setOperationAction(ISD::FSIN, MVT::f16, Custom); | |||
598 | ||||
599 | setOperationAction(ISD::SINT_TO_FP, MVT::i16, Custom); | |||
600 | setOperationAction(ISD::UINT_TO_FP, MVT::i16, Custom); | |||
601 | ||||
602 | setOperationAction(ISD::FP_TO_SINT, MVT::f16, Promote); | |||
603 | setOperationAction(ISD::FP_TO_UINT, MVT::f16, Promote); | |||
604 | setOperationAction(ISD::SINT_TO_FP, MVT::f16, Promote); | |||
605 | setOperationAction(ISD::UINT_TO_FP, MVT::f16, Promote); | |||
606 | setOperationAction(ISD::FROUND, MVT::f16, Custom); | |||
607 | setOperationAction(ISD::FPTRUNC_ROUND, MVT::f16, Custom); | |||
608 | ||||
609 | // F16 - VOP2 Actions. | |||
610 | setOperationAction(ISD::BR_CC, MVT::f16, Expand); | |||
611 | setOperationAction(ISD::SELECT_CC, MVT::f16, Expand); | |||
612 | ||||
613 | setOperationAction(ISD::FDIV, MVT::f16, Custom); | |||
614 | ||||
615 | // F16 - VOP3 Actions. | |||
616 | setOperationAction(ISD::FMA, MVT::f16, Legal); | |||
617 | if (STI.hasMadF16()) | |||
618 | setOperationAction(ISD::FMAD, MVT::f16, Legal); | |||
619 | ||||
620 | for (MVT VT : {MVT::v2i16, MVT::v2f16, MVT::v4i16, MVT::v4f16, MVT::v8i16, | |||
621 | MVT::v8f16}) { | |||
622 | for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) { | |||
623 | switch (Op) { | |||
624 | case ISD::LOAD: | |||
625 | case ISD::STORE: | |||
626 | case ISD::BUILD_VECTOR: | |||
627 | case ISD::BITCAST: | |||
628 | case ISD::EXTRACT_VECTOR_ELT: | |||
629 | case ISD::INSERT_VECTOR_ELT: | |||
630 | case ISD::INSERT_SUBVECTOR: | |||
631 | case ISD::EXTRACT_SUBVECTOR: | |||
632 | case ISD::SCALAR_TO_VECTOR: | |||
633 | break; | |||
634 | case ISD::CONCAT_VECTORS: | |||
635 | setOperationAction(Op, VT, Custom); | |||
636 | break; | |||
637 | default: | |||
638 | setOperationAction(Op, VT, Expand); | |||
639 | break; | |||
640 | } | |||
641 | } | |||
642 | } | |||
643 | ||||
644 | // v_perm_b32 can handle either of these. | |||
645 | setOperationAction(ISD::BSWAP, MVT::i16, Legal); | |||
646 | setOperationAction(ISD::BSWAP, MVT::v2i16, Legal); | |||
647 | setOperationAction(ISD::BSWAP, MVT::v4i16, Custom); | |||
648 | ||||
649 | // XXX - Do these do anything? Vector constants turn into build_vector. | |||
650 | setOperationAction(ISD::Constant, MVT::v2i16, Legal); | |||
651 | setOperationAction(ISD::ConstantFP, MVT::v2f16, Legal); | |||
652 | ||||
653 | setOperationAction(ISD::UNDEF, MVT::v2i16, Legal); | |||
654 | setOperationAction(ISD::UNDEF, MVT::v2f16, Legal); | |||
655 | ||||
656 | setOperationAction(ISD::STORE, MVT::v2i16, Promote); | |||
657 | AddPromotedToType(ISD::STORE, MVT::v2i16, MVT::i32); | |||
658 | setOperationAction(ISD::STORE, MVT::v2f16, Promote); | |||
659 | AddPromotedToType(ISD::STORE, MVT::v2f16, MVT::i32); | |||
660 | ||||
661 | setOperationAction(ISD::LOAD, MVT::v2i16, Promote); | |||
662 | AddPromotedToType(ISD::LOAD, MVT::v2i16, MVT::i32); | |||
663 | setOperationAction(ISD::LOAD, MVT::v2f16, Promote); | |||
664 | AddPromotedToType(ISD::LOAD, MVT::v2f16, MVT::i32); | |||
665 | ||||
666 | setOperationAction(ISD::AND, MVT::v2i16, Promote); | |||
667 | AddPromotedToType(ISD::AND, MVT::v2i16, MVT::i32); | |||
668 | setOperationAction(ISD::OR, MVT::v2i16, Promote); | |||
669 | AddPromotedToType(ISD::OR, MVT::v2i16, MVT::i32); | |||
670 | setOperationAction(ISD::XOR, MVT::v2i16, Promote); | |||
671 | AddPromotedToType(ISD::XOR, MVT::v2i16, MVT::i32); | |||
672 | ||||
673 | setOperationAction(ISD::LOAD, MVT::v4i16, Promote); | |||
674 | AddPromotedToType(ISD::LOAD, MVT::v4i16, MVT::v2i32); | |||
675 | setOperationAction(ISD::LOAD, MVT::v4f16, Promote); | |||
676 | AddPromotedToType(ISD::LOAD, MVT::v4f16, MVT::v2i32); | |||
677 | ||||
678 | setOperationAction(ISD::STORE, MVT::v4i16, Promote); | |||
679 | AddPromotedToType(ISD::STORE, MVT::v4i16, MVT::v2i32); | |||
680 | setOperationAction(ISD::STORE, MVT::v4f16, Promote); | |||
681 | AddPromotedToType(ISD::STORE, MVT::v4f16, MVT::v2i32); | |||
682 | ||||
683 | setOperationAction(ISD::LOAD, MVT::v8i16, Promote); | |||
684 | AddPromotedToType(ISD::LOAD, MVT::v8i16, MVT::v4i32); | |||
685 | setOperationAction(ISD::LOAD, MVT::v8f16, Promote); | |||
686 | AddPromotedToType(ISD::LOAD, MVT::v8f16, MVT::v4i32); | |||
687 | ||||
688 | setOperationAction(ISD::STORE, MVT::v4i16, Promote); | |||
689 | AddPromotedToType(ISD::STORE, MVT::v4i16, MVT::v2i32); | |||
690 | setOperationAction(ISD::STORE, MVT::v4f16, Promote); | |||
691 | AddPromotedToType(ISD::STORE, MVT::v4f16, MVT::v2i32); | |||
692 | ||||
693 | setOperationAction(ISD::STORE, MVT::v8i16, Promote); | |||
694 | AddPromotedToType(ISD::STORE, MVT::v8i16, MVT::v4i32); | |||
695 | setOperationAction(ISD::STORE, MVT::v8f16, Promote); | |||
696 | AddPromotedToType(ISD::STORE, MVT::v8f16, MVT::v4i32); | |||
697 | ||||
698 | setOperationAction(ISD::ANY_EXTEND, MVT::v2i32, Expand); | |||
699 | setOperationAction(ISD::ZERO_EXTEND, MVT::v2i32, Expand); | |||
700 | setOperationAction(ISD::SIGN_EXTEND, MVT::v2i32, Expand); | |||
701 | setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Expand); | |||
702 | ||||
703 | setOperationAction(ISD::ANY_EXTEND, MVT::v4i32, Expand); | |||
704 | setOperationAction(ISD::ZERO_EXTEND, MVT::v4i32, Expand); | |||
705 | setOperationAction(ISD::SIGN_EXTEND, MVT::v4i32, Expand); | |||
706 | ||||
707 | setOperationAction(ISD::ANY_EXTEND, MVT::v8i32, Expand); | |||
708 | setOperationAction(ISD::ZERO_EXTEND, MVT::v8i32, Expand); | |||
709 | setOperationAction(ISD::SIGN_EXTEND, MVT::v8i32, Expand); | |||
710 | ||||
711 | if (!Subtarget->hasVOP3PInsts()) { | |||
712 | setOperationAction(ISD::BUILD_VECTOR, MVT::v2i16, Custom); | |||
713 | setOperationAction(ISD::BUILD_VECTOR, MVT::v2f16, Custom); | |||
714 | } | |||
715 | ||||
716 | setOperationAction(ISD::FNEG, MVT::v2f16, Legal); | |||
717 | // This isn't really legal, but this avoids the legalizer unrolling it (and | |||
718 | // allows matching fneg (fabs x) patterns) | |||
719 | setOperationAction(ISD::FABS, MVT::v2f16, Legal); | |||
720 | ||||
721 | setOperationAction(ISD::FMAXNUM, MVT::f16, Custom); | |||
722 | setOperationAction(ISD::FMINNUM, MVT::f16, Custom); | |||
723 | setOperationAction(ISD::FMAXNUM_IEEE, MVT::f16, Legal); | |||
724 | setOperationAction(ISD::FMINNUM_IEEE, MVT::f16, Legal); | |||
725 | ||||
726 | setOperationAction(ISD::FMINNUM_IEEE, MVT::v4f16, Custom); | |||
727 | setOperationAction(ISD::FMAXNUM_IEEE, MVT::v4f16, Custom); | |||
728 | setOperationAction(ISD::FMINNUM_IEEE, MVT::v8f16, Custom); | |||
729 | setOperationAction(ISD::FMAXNUM_IEEE, MVT::v8f16, Custom); | |||
730 | ||||
731 | setOperationAction(ISD::FMINNUM, MVT::v4f16, Expand); | |||
732 | setOperationAction(ISD::FMAXNUM, MVT::v4f16, Expand); | |||
733 | setOperationAction(ISD::FMINNUM, MVT::v8f16, Expand); | |||
734 | setOperationAction(ISD::FMAXNUM, MVT::v8f16, Expand); | |||
735 | ||||
736 | for (MVT Vec16 : { MVT::v8i16, MVT::v8f16 }) { | |||
737 | setOperationAction(ISD::BUILD_VECTOR, Vec16, Custom); | |||
738 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, Vec16, Custom); | |||
739 | setOperationAction(ISD::INSERT_VECTOR_ELT, Vec16, Expand); | |||
740 | setOperationAction(ISD::SCALAR_TO_VECTOR, Vec16, Expand); | |||
741 | } | |||
742 | } | |||
743 | ||||
744 | if (Subtarget->hasVOP3PInsts()) { | |||
745 | setOperationAction(ISD::ADD, MVT::v2i16, Legal); | |||
746 | setOperationAction(ISD::SUB, MVT::v2i16, Legal); | |||
747 | setOperationAction(ISD::MUL, MVT::v2i16, Legal); | |||
748 | setOperationAction(ISD::SHL, MVT::v2i16, Legal); | |||
749 | setOperationAction(ISD::SRL, MVT::v2i16, Legal); | |||
750 | setOperationAction(ISD::SRA, MVT::v2i16, Legal); | |||
751 | setOperationAction(ISD::SMIN, MVT::v2i16, Legal); | |||
752 | setOperationAction(ISD::UMIN, MVT::v2i16, Legal); | |||
753 | setOperationAction(ISD::SMAX, MVT::v2i16, Legal); | |||
754 | setOperationAction(ISD::UMAX, MVT::v2i16, Legal); | |||
755 | ||||
756 | setOperationAction(ISD::UADDSAT, MVT::v2i16, Legal); | |||
757 | setOperationAction(ISD::USUBSAT, MVT::v2i16, Legal); | |||
758 | setOperationAction(ISD::SADDSAT, MVT::v2i16, Legal); | |||
759 | setOperationAction(ISD::SSUBSAT, MVT::v2i16, Legal); | |||
760 | ||||
761 | setOperationAction(ISD::FADD, MVT::v2f16, Legal); | |||
762 | setOperationAction(ISD::FMUL, MVT::v2f16, Legal); | |||
763 | setOperationAction(ISD::FMA, MVT::v2f16, Legal); | |||
764 | ||||
765 | setOperationAction(ISD::FMINNUM_IEEE, MVT::v2f16, Legal); | |||
766 | setOperationAction(ISD::FMAXNUM_IEEE, MVT::v2f16, Legal); | |||
767 | ||||
768 | setOperationAction(ISD::FCANONICALIZE, MVT::v2f16, Legal); | |||
769 | ||||
770 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i16, Custom); | |||
771 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f16, Custom); | |||
772 | ||||
773 | setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f16, Custom); | |||
774 | setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4i16, Custom); | |||
775 | setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8f16, Custom); | |||
776 | setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i16, Custom); | |||
777 | ||||
778 | for (MVT VT : { MVT::v4i16, MVT::v8i16 }) { | |||
779 | // Split vector operations. | |||
780 | setOperationAction(ISD::SHL, VT, Custom); | |||
781 | setOperationAction(ISD::SRA, VT, Custom); | |||
782 | setOperationAction(ISD::SRL, VT, Custom); | |||
783 | setOperationAction(ISD::ADD, VT, Custom); | |||
784 | setOperationAction(ISD::SUB, VT, Custom); | |||
785 | setOperationAction(ISD::MUL, VT, Custom); | |||
786 | ||||
787 | setOperationAction(ISD::SMIN, VT, Custom); | |||
788 | setOperationAction(ISD::SMAX, VT, Custom); | |||
789 | setOperationAction(ISD::UMIN, VT, Custom); | |||
790 | setOperationAction(ISD::UMAX, VT, Custom); | |||
791 | ||||
792 | setOperationAction(ISD::UADDSAT, VT, Custom); | |||
793 | setOperationAction(ISD::SADDSAT, VT, Custom); | |||
794 | setOperationAction(ISD::USUBSAT, VT, Custom); | |||
795 | setOperationAction(ISD::SSUBSAT, VT, Custom); | |||
796 | } | |||
797 | ||||
798 | for (MVT VT : { MVT::v4f16, MVT::v8f16 }) { | |||
799 | // Split vector operations. | |||
800 | setOperationAction(ISD::FADD, VT, Custom); | |||
801 | setOperationAction(ISD::FMUL, VT, Custom); | |||
802 | setOperationAction(ISD::FMA, VT, Custom); | |||
803 | setOperationAction(ISD::FCANONICALIZE, VT, Custom); | |||
804 | } | |||
805 | ||||
806 | setOperationAction(ISD::FMAXNUM, MVT::v2f16, Custom); | |||
807 | setOperationAction(ISD::FMINNUM, MVT::v2f16, Custom); | |||
808 | ||||
809 | setOperationAction(ISD::FMINNUM, MVT::v4f16, Custom); | |||
810 | setOperationAction(ISD::FMAXNUM, MVT::v4f16, Custom); | |||
811 | ||||
812 | setOperationAction(ISD::FEXP, MVT::v2f16, Custom); | |||
813 | setOperationAction(ISD::SELECT, MVT::v4i16, Custom); | |||
814 | setOperationAction(ISD::SELECT, MVT::v4f16, Custom); | |||
815 | ||||
816 | if (Subtarget->hasPackedFP32Ops()) { | |||
817 | setOperationAction(ISD::FADD, MVT::v2f32, Legal); | |||
818 | setOperationAction(ISD::FMUL, MVT::v2f32, Legal); | |||
819 | setOperationAction(ISD::FMA, MVT::v2f32, Legal); | |||
820 | setOperationAction(ISD::FNEG, MVT::v2f32, Legal); | |||
821 | ||||
822 | for (MVT VT : { MVT::v4f32, MVT::v8f32, MVT::v16f32, MVT::v32f32 }) { | |||
823 | setOperationAction(ISD::FADD, VT, Custom); | |||
824 | setOperationAction(ISD::FMUL, VT, Custom); | |||
825 | setOperationAction(ISD::FMA, VT, Custom); | |||
826 | } | |||
827 | } | |||
828 | } | |||
829 | ||||
830 | setOperationAction(ISD::FNEG, MVT::v4f16, Custom); | |||
831 | setOperationAction(ISD::FABS, MVT::v4f16, Custom); | |||
832 | ||||
833 | if (Subtarget->has16BitInsts()) { | |||
834 | setOperationAction(ISD::SELECT, MVT::v2i16, Promote); | |||
835 | AddPromotedToType(ISD::SELECT, MVT::v2i16, MVT::i32); | |||
836 | setOperationAction(ISD::SELECT, MVT::v2f16, Promote); | |||
837 | AddPromotedToType(ISD::SELECT, MVT::v2f16, MVT::i32); | |||
838 | } else { | |||
839 | // Legalization hack. | |||
840 | setOperationAction(ISD::SELECT, MVT::v2i16, Custom); | |||
841 | setOperationAction(ISD::SELECT, MVT::v2f16, Custom); | |||
842 | ||||
843 | setOperationAction(ISD::FNEG, MVT::v2f16, Custom); | |||
844 | setOperationAction(ISD::FABS, MVT::v2f16, Custom); | |||
845 | } | |||
846 | ||||
847 | for (MVT VT : { MVT::v4i16, MVT::v4f16, MVT::v2i8, MVT::v4i8, MVT::v8i8, | |||
848 | MVT::v8i16, MVT::v8f16 }) { | |||
849 | setOperationAction(ISD::SELECT, VT, Custom); | |||
850 | } | |||
851 | ||||
852 | setOperationAction(ISD::SMULO, MVT::i64, Custom); | |||
853 | setOperationAction(ISD::UMULO, MVT::i64, Custom); | |||
854 | ||||
855 | if (Subtarget->hasMad64_32()) { | |||
856 | setOperationAction(ISD::SMUL_LOHI, MVT::i32, Custom); | |||
857 | setOperationAction(ISD::UMUL_LOHI, MVT::i32, Custom); | |||
858 | } | |||
859 | ||||
860 | setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); | |||
861 | setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::f32, Custom); | |||
862 | setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v4f32, Custom); | |||
863 | setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i16, Custom); | |||
864 | setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::f16, Custom); | |||
865 | setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v2i16, Custom); | |||
866 | setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v2f16, Custom); | |||
867 | ||||
868 | setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::v2f16, Custom); | |||
869 | setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::v2i16, Custom); | |||
870 | setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::v3f16, Custom); | |||
871 | setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::v3i16, Custom); | |||
872 | setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::v4f16, Custom); | |||
873 | setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::v4i16, Custom); | |||
874 | setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::v8f16, Custom); | |||
875 | setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom); | |||
876 | setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::f16, Custom); | |||
877 | setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i16, Custom); | |||
878 | setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i8, Custom); | |||
879 | ||||
880 | setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom); | |||
881 | setOperationAction(ISD::INTRINSIC_VOID, MVT::v2i16, Custom); | |||
882 | setOperationAction(ISD::INTRINSIC_VOID, MVT::v2f16, Custom); | |||
883 | setOperationAction(ISD::INTRINSIC_VOID, MVT::v3i16, Custom); | |||
884 | setOperationAction(ISD::INTRINSIC_VOID, MVT::v3f16, Custom); | |||
885 | setOperationAction(ISD::INTRINSIC_VOID, MVT::v4f16, Custom); | |||
886 | setOperationAction(ISD::INTRINSIC_VOID, MVT::v4i16, Custom); | |||
887 | setOperationAction(ISD::INTRINSIC_VOID, MVT::f16, Custom); | |||
888 | setOperationAction(ISD::INTRINSIC_VOID, MVT::i16, Custom); | |||
889 | setOperationAction(ISD::INTRINSIC_VOID, MVT::i8, Custom); | |||
890 | ||||
891 | setTargetDAGCombine({ISD::ADD, | |||
892 | ISD::ADDCARRY, | |||
893 | ISD::SUB, | |||
894 | ISD::SUBCARRY, | |||
895 | ISD::FADD, | |||
896 | ISD::FSUB, | |||
897 | ISD::FMINNUM, | |||
898 | ISD::FMAXNUM, | |||
899 | ISD::FMINNUM_IEEE, | |||
900 | ISD::FMAXNUM_IEEE, | |||
901 | ISD::FMA, | |||
902 | ISD::SMIN, | |||
903 | ISD::SMAX, | |||
904 | ISD::UMIN, | |||
905 | ISD::UMAX, | |||
906 | ISD::SETCC, | |||
907 | ISD::AND, | |||
908 | ISD::OR, | |||
909 | ISD::XOR, | |||
910 | ISD::SINT_TO_FP, | |||
911 | ISD::UINT_TO_FP, | |||
912 | ISD::FCANONICALIZE, | |||
913 | ISD::SCALAR_TO_VECTOR, | |||
914 | ISD::ZERO_EXTEND, | |||
915 | ISD::SIGN_EXTEND_INREG, | |||
916 | ISD::EXTRACT_VECTOR_ELT, | |||
917 | ISD::INSERT_VECTOR_ELT}); | |||
918 | ||||
919 | // All memory operations. Some folding on the pointer operand is done to help | |||
920 | // matching the constant offsets in the addressing modes. | |||
921 | setTargetDAGCombine({ISD::LOAD, | |||
922 | ISD::STORE, | |||
923 | ISD::ATOMIC_LOAD, | |||
924 | ISD::ATOMIC_STORE, | |||
925 | ISD::ATOMIC_CMP_SWAP, | |||
926 | ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, | |||
927 | ISD::ATOMIC_SWAP, | |||
928 | ISD::ATOMIC_LOAD_ADD, | |||
929 | ISD::ATOMIC_LOAD_SUB, | |||
930 | ISD::ATOMIC_LOAD_AND, | |||
931 | ISD::ATOMIC_LOAD_OR, | |||
932 | ISD::ATOMIC_LOAD_XOR, | |||
933 | ISD::ATOMIC_LOAD_NAND, | |||
934 | ISD::ATOMIC_LOAD_MIN, | |||
935 | ISD::ATOMIC_LOAD_MAX, | |||
936 | ISD::ATOMIC_LOAD_UMIN, | |||
937 | ISD::ATOMIC_LOAD_UMAX, | |||
938 | ISD::ATOMIC_LOAD_FADD, | |||
939 | ISD::INTRINSIC_VOID, | |||
940 | ISD::INTRINSIC_W_CHAIN}); | |||
941 | ||||
942 | // FIXME: In other contexts we pretend this is a per-function property. | |||
943 | setStackPointerRegisterToSaveRestore(AMDGPU::SGPR32); | |||
944 | ||||
945 | setSchedulingPreference(Sched::RegPressure); | |||
946 | } | |||
947 | ||||
948 | const GCNSubtarget *SITargetLowering::getSubtarget() const { | |||
949 | return Subtarget; | |||
950 | } | |||
951 | ||||
952 | //===----------------------------------------------------------------------===// | |||
953 | // TargetLowering queries | |||
954 | //===----------------------------------------------------------------------===// | |||
955 | ||||
956 | // v_mad_mix* support a conversion from f16 to f32. | |||
957 | // | |||
958 | // There is only one special case when denormals are enabled we don't currently, | |||
959 | // where this is OK to use. | |||
960 | bool SITargetLowering::isFPExtFoldable(const SelectionDAG &DAG, unsigned Opcode, | |||
961 | EVT DestVT, EVT SrcVT) const { | |||
962 | return ((Opcode == ISD::FMAD && Subtarget->hasMadMixInsts()) || | |||
963 | (Opcode == ISD::FMA && Subtarget->hasFmaMixInsts())) && | |||
964 | DestVT.getScalarType() == MVT::f32 && | |||
965 | SrcVT.getScalarType() == MVT::f16 && | |||
966 | // TODO: This probably only requires no input flushing? | |||
967 | !hasFP32Denormals(DAG.getMachineFunction()); | |||
968 | } | |||
969 | ||||
970 | bool SITargetLowering::isFPExtFoldable(const MachineInstr &MI, unsigned Opcode, | |||
971 | LLT DestTy, LLT SrcTy) const { | |||
972 | return ((Opcode == TargetOpcode::G_FMAD && Subtarget->hasMadMixInsts()) || | |||
973 | (Opcode == TargetOpcode::G_FMA && Subtarget->hasFmaMixInsts())) && | |||
974 | DestTy.getScalarSizeInBits() == 32 && | |||
975 | SrcTy.getScalarSizeInBits() == 16 && | |||
976 | // TODO: This probably only requires no input flushing? | |||
977 | !hasFP32Denormals(*MI.getMF()); | |||
978 | } | |||
979 | ||||
980 | bool SITargetLowering::isShuffleMaskLegal(ArrayRef<int>, EVT) const { | |||
981 | // SI has some legal vector types, but no legal vector operations. Say no | |||
982 | // shuffles are legal in order to prefer scalarizing some vector operations. | |||
983 | return false; | |||
984 | } | |||
985 | ||||
986 | MVT SITargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context, | |||
987 | CallingConv::ID CC, | |||
988 | EVT VT) const { | |||
989 | if (CC == CallingConv::AMDGPU_KERNEL) | |||
990 | return TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT); | |||
991 | ||||
992 | if (VT.isVector()) { | |||
993 | EVT ScalarVT = VT.getScalarType(); | |||
994 | unsigned Size = ScalarVT.getSizeInBits(); | |||
995 | if (Size == 16) { | |||
996 | if (Subtarget->has16BitInsts()) | |||
997 | return VT.isInteger() ? MVT::v2i16 : MVT::v2f16; | |||
998 | return VT.isInteger() ? MVT::i32 : MVT::f32; | |||
999 | } | |||
1000 | ||||
1001 | if (Size < 16) | |||
1002 | return Subtarget->has16BitInsts() ? MVT::i16 : MVT::i32; | |||
1003 | return Size == 32 ? ScalarVT.getSimpleVT() : MVT::i32; | |||
1004 | } | |||
1005 | ||||
1006 | if (VT.getSizeInBits() > 32) | |||
1007 | return MVT::i32; | |||
1008 | ||||
1009 | return TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT); | |||
1010 | } | |||
1011 | ||||
1012 | unsigned SITargetLowering::getNumRegistersForCallingConv(LLVMContext &Context, | |||
1013 | CallingConv::ID CC, | |||
1014 | EVT VT) const { | |||
1015 | if (CC == CallingConv::AMDGPU_KERNEL) | |||
1016 | return TargetLowering::getNumRegistersForCallingConv(Context, CC, VT); | |||
1017 | ||||
1018 | if (VT.isVector()) { | |||
1019 | unsigned NumElts = VT.getVectorNumElements(); | |||
1020 | EVT ScalarVT = VT.getScalarType(); | |||
1021 | unsigned Size = ScalarVT.getSizeInBits(); | |||
1022 | ||||
1023 | // FIXME: Should probably promote 8-bit vectors to i16. | |||
1024 | if (Size == 16 && Subtarget->has16BitInsts()) | |||
1025 | return (NumElts + 1) / 2; | |||
1026 | ||||
1027 | if (Size <= 32) | |||
1028 | return NumElts; | |||
1029 | ||||
1030 | if (Size > 32) | |||
1031 | return NumElts * ((Size + 31) / 32); | |||
1032 | } else if (VT.getSizeInBits() > 32) | |||
1033 | return (VT.getSizeInBits() + 31) / 32; | |||
1034 | ||||
1035 | return TargetLowering::getNumRegistersForCallingConv(Context, CC, VT); | |||
1036 | } | |||
1037 | ||||
1038 | unsigned SITargetLowering::getVectorTypeBreakdownForCallingConv( | |||
1039 | LLVMContext &Context, CallingConv::ID CC, | |||
1040 | EVT VT, EVT &IntermediateVT, | |||
1041 | unsigned &NumIntermediates, MVT &RegisterVT) const { | |||
1042 | if (CC != CallingConv::AMDGPU_KERNEL && VT.isVector()) { | |||
1043 | unsigned NumElts = VT.getVectorNumElements(); | |||
1044 | EVT ScalarVT = VT.getScalarType(); | |||
1045 | unsigned Size = ScalarVT.getSizeInBits(); | |||
1046 | // FIXME: We should fix the ABI to be the same on targets without 16-bit | |||
1047 | // support, but unless we can properly handle 3-vectors, it will be still be | |||
1048 | // inconsistent. | |||
1049 | if (Size == 16 && Subtarget->has16BitInsts()) { | |||
1050 | RegisterVT = VT.isInteger() ? MVT::v2i16 : MVT::v2f16; | |||
1051 | IntermediateVT = RegisterVT; | |||
1052 | NumIntermediates = (NumElts + 1) / 2; | |||
1053 | return NumIntermediates; | |||
1054 | } | |||
1055 | ||||
1056 | if (Size == 32) { | |||
1057 | RegisterVT = ScalarVT.getSimpleVT(); | |||
1058 | IntermediateVT = RegisterVT; | |||
1059 | NumIntermediates = NumElts; | |||
1060 | return NumIntermediates; | |||
1061 | } | |||
1062 | ||||
1063 | if (Size < 16 && Subtarget->has16BitInsts()) { | |||
1064 | // FIXME: Should probably form v2i16 pieces | |||
1065 | RegisterVT = MVT::i16; | |||
1066 | IntermediateVT = ScalarVT; | |||
1067 | NumIntermediates = NumElts; | |||
1068 | return NumIntermediates; | |||
1069 | } | |||
1070 | ||||
1071 | ||||
1072 | if (Size != 16 && Size <= 32) { | |||
1073 | RegisterVT = MVT::i32; | |||
1074 | IntermediateVT = ScalarVT; | |||
1075 | NumIntermediates = NumElts; | |||
1076 | return NumIntermediates; | |||
1077 | } | |||
1078 | ||||
1079 | if (Size > 32) { | |||
1080 | RegisterVT = MVT::i32; | |||
1081 | IntermediateVT = RegisterVT; | |||
1082 | NumIntermediates = NumElts * ((Size + 31) / 32); | |||
1083 | return NumIntermediates; | |||
1084 | } | |||
1085 | } | |||
1086 | ||||
1087 | return TargetLowering::getVectorTypeBreakdownForCallingConv( | |||
1088 | Context, CC, VT, IntermediateVT, NumIntermediates, RegisterVT); | |||
1089 | } | |||
1090 | ||||
1091 | static EVT memVTFromImageData(Type *Ty, unsigned DMaskLanes) { | |||
1092 | assert(DMaskLanes != 0)(static_cast <bool> (DMaskLanes != 0) ? void (0) : __assert_fail ("DMaskLanes != 0", "llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 1092, __extension__ __PRETTY_FUNCTION__)); | |||
1093 | ||||
1094 | if (auto *VT = dyn_cast<FixedVectorType>(Ty)) { | |||
1095 | unsigned NumElts = std::min(DMaskLanes, VT->getNumElements()); | |||
1096 | return EVT::getVectorVT(Ty->getContext(), | |||
1097 | EVT::getEVT(VT->getElementType()), | |||
1098 | NumElts); | |||
1099 | } | |||
1100 | ||||
1101 | return EVT::getEVT(Ty); | |||
1102 | } | |||
1103 | ||||
1104 | // Peek through TFE struct returns to only use the data size. | |||
1105 | static EVT memVTFromImageReturn(Type *Ty, unsigned DMaskLanes) { | |||
1106 | auto *ST = dyn_cast<StructType>(Ty); | |||
1107 | if (!ST) | |||
1108 | return memVTFromImageData(Ty, DMaskLanes); | |||
1109 | ||||
1110 | // Some intrinsics return an aggregate type - special case to work out the | |||
1111 | // correct memVT. | |||
1112 | // | |||
1113 | // Only limited forms of aggregate type currently expected. | |||
1114 | if (ST->getNumContainedTypes() != 2 || | |||
1115 | !ST->getContainedType(1)->isIntegerTy(32)) | |||
1116 | return EVT(); | |||
1117 | return memVTFromImageData(ST->getContainedType(0), DMaskLanes); | |||
1118 | } | |||
1119 | ||||
1120 | bool SITargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, | |||
1121 | const CallInst &CI, | |||
1122 | MachineFunction &MF, | |||
1123 | unsigned IntrID) const { | |||
1124 | Info.flags = MachineMemOperand::MONone; | |||
1125 | if (CI.hasMetadata(LLVMContext::MD_invariant_load)) | |||
1126 | Info.flags |= MachineMemOperand::MOInvariant; | |||
1127 | ||||
1128 | if (const AMDGPU::RsrcIntrinsic *RsrcIntr = | |||
1129 | AMDGPU::lookupRsrcIntrinsic(IntrID)) { | |||
1130 | AttributeList Attr = Intrinsic::getAttributes(CI.getContext(), | |||
1131 | (Intrinsic::ID)IntrID); | |||
1132 | if (Attr.hasFnAttr(Attribute::ReadNone)) | |||
1133 | return false; | |||
1134 | ||||
1135 | SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); | |||
1136 | ||||
1137 | if (RsrcIntr->IsImage) { | |||
1138 | Info.ptrVal = | |||
1139 | MFI->getImagePSV(*MF.getSubtarget<GCNSubtarget>().getInstrInfo()); | |||
1140 | Info.align.reset(); | |||
1141 | } else { | |||
1142 | Info.ptrVal = | |||
1143 | MFI->getBufferPSV(*MF.getSubtarget<GCNSubtarget>().getInstrInfo()); | |||
1144 | } | |||
1145 | ||||
1146 | Info.flags |= MachineMemOperand::MODereferenceable; | |||
1147 | if (Attr.hasFnAttr(Attribute::ReadOnly)) { | |||
1148 | unsigned DMaskLanes = 4; | |||
1149 | ||||
1150 | if (RsrcIntr->IsImage) { | |||
1151 | const AMDGPU::ImageDimIntrinsicInfo *Intr | |||
1152 | = AMDGPU::getImageDimIntrinsicInfo(IntrID); | |||
1153 | const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode = | |||
1154 | AMDGPU::getMIMGBaseOpcodeInfo(Intr->BaseOpcode); | |||
1155 | ||||
1156 | if (!BaseOpcode->Gather4) { | |||
1157 | // If this isn't a gather, we may have excess loaded elements in the | |||
1158 | // IR type. Check the dmask for the real number of elements loaded. | |||
1159 | unsigned DMask | |||
1160 | = cast<ConstantInt>(CI.getArgOperand(0))->getZExtValue(); | |||
1161 | DMaskLanes = DMask == 0 ? 1 : countPopulation(DMask); | |||
1162 | } | |||
1163 | ||||
1164 | Info.memVT = memVTFromImageReturn(CI.getType(), DMaskLanes); | |||
1165 | } else | |||
1166 | Info.memVT = EVT::getEVT(CI.getType()); | |||
1167 | ||||
1168 | // FIXME: What does alignment mean for an image? | |||
1169 | Info.opc = ISD::INTRINSIC_W_CHAIN; | |||
1170 | Info.flags |= MachineMemOperand::MOLoad; | |||
1171 | } else if (Attr.hasFnAttr(Attribute::WriteOnly)) { | |||
1172 | Info.opc = ISD::INTRINSIC_VOID; | |||
1173 | ||||
1174 | Type *DataTy = CI.getArgOperand(0)->getType(); | |||
1175 | if (RsrcIntr->IsImage) { | |||
1176 | unsigned DMask = cast<ConstantInt>(CI.getArgOperand(1))->getZExtValue(); | |||
1177 | unsigned DMaskLanes = DMask == 0 ? 1 : countPopulation(DMask); | |||
1178 | Info.memVT = memVTFromImageData(DataTy, DMaskLanes); | |||
1179 | } else | |||
1180 | Info.memVT = EVT::getEVT(DataTy); | |||
1181 | ||||
1182 | Info.flags |= MachineMemOperand::MOStore; | |||
1183 | } else { | |||
1184 | // Atomic | |||
1185 | Info.opc = CI.getType()->isVoidTy() ? ISD::INTRINSIC_VOID : | |||
1186 | ISD::INTRINSIC_W_CHAIN; | |||
1187 | Info.memVT = MVT::getVT(CI.getArgOperand(0)->getType()); | |||
1188 | Info.flags |= MachineMemOperand::MOLoad | | |||
1189 | MachineMemOperand::MOStore | | |||
1190 | MachineMemOperand::MODereferenceable; | |||
1191 | ||||
1192 | // XXX - Should this be volatile without known ordering? | |||
1193 | Info.flags |= MachineMemOperand::MOVolatile; | |||
1194 | } | |||
1195 | return true; | |||
1196 | } | |||
1197 | ||||
1198 | switch (IntrID) { | |||
1199 | case Intrinsic::amdgcn_atomic_inc: | |||
1200 | case Intrinsic::amdgcn_atomic_dec: | |||
1201 | case Intrinsic::amdgcn_ds_ordered_add: | |||
1202 | case Intrinsic::amdgcn_ds_ordered_swap: | |||
1203 | case Intrinsic::amdgcn_ds_fadd: | |||
1204 | case Intrinsic::amdgcn_ds_fmin: | |||
1205 | case Intrinsic::amdgcn_ds_fmax: { | |||
1206 | Info.opc = ISD::INTRINSIC_W_CHAIN; | |||
1207 | Info.memVT = MVT::getVT(CI.getType()); | |||
1208 | Info.ptrVal = CI.getOperand(0); | |||
1209 | Info.align.reset(); | |||
1210 | Info.flags |= MachineMemOperand::MOLoad | MachineMemOperand::MOStore; | |||
1211 | ||||
1212 | const ConstantInt *Vol = cast<ConstantInt>(CI.getOperand(4)); | |||
1213 | if (!Vol->isZero()) | |||
1214 | Info.flags |= MachineMemOperand::MOVolatile; | |||
1215 | ||||
1216 | return true; | |||
1217 | } | |||
1218 | case Intrinsic::amdgcn_buffer_atomic_fadd: { | |||
1219 | SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); | |||
1220 | ||||
1221 | Info.opc = ISD::INTRINSIC_W_CHAIN; | |||
1222 | Info.memVT = MVT::getVT(CI.getOperand(0)->getType()); | |||
1223 | Info.ptrVal = | |||
1224 | MFI->getBufferPSV(*MF.getSubtarget<GCNSubtarget>().getInstrInfo()); | |||
1225 | Info.align.reset(); | |||
1226 | Info.flags |= MachineMemOperand::MOLoad | MachineMemOperand::MOStore; | |||
1227 | ||||
1228 | const ConstantInt *Vol = dyn_cast<ConstantInt>(CI.getOperand(4)); | |||
1229 | if (!Vol || !Vol->isZero()) | |||
1230 | Info.flags |= MachineMemOperand::MOVolatile; | |||
1231 | ||||
1232 | return true; | |||
1233 | } | |||
1234 | case Intrinsic::amdgcn_ds_append: | |||
1235 | case Intrinsic::amdgcn_ds_consume: { | |||
1236 | Info.opc = ISD::INTRINSIC_W_CHAIN; | |||
1237 | Info.memVT = MVT::getVT(CI.getType()); | |||
1238 | Info.ptrVal = CI.getOperand(0); | |||
1239 | Info.align.reset(); | |||
1240 | Info.flags |= MachineMemOperand::MOLoad | MachineMemOperand::MOStore; | |||
1241 | ||||
1242 | const ConstantInt *Vol = cast<ConstantInt>(CI.getOperand(1)); | |||
1243 | if (!Vol->isZero()) | |||
1244 | Info.flags |= MachineMemOperand::MOVolatile; | |||
1245 | ||||
1246 | return true; | |||
1247 | } | |||
1248 | case Intrinsic::amdgcn_global_atomic_csub: { | |||
1249 | Info.opc = ISD::INTRINSIC_W_CHAIN; | |||
1250 | Info.memVT = MVT::getVT(CI.getType()); | |||
1251 | Info.ptrVal = CI.getOperand(0); | |||
1252 | Info.align.reset(); | |||
1253 | Info.flags |= MachineMemOperand::MOLoad | | |||
1254 | MachineMemOperand::MOStore | | |||
1255 | MachineMemOperand::MOVolatile; | |||
1256 | return true; | |||
1257 | } | |||
1258 | case Intrinsic::amdgcn_image_bvh_intersect_ray: { | |||
1259 | SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); | |||
1260 | Info.opc = ISD::INTRINSIC_W_CHAIN; | |||
1261 | Info.memVT = MVT::getVT(CI.getType()); // XXX: what is correct VT? | |||
1262 | Info.ptrVal = | |||
1263 | MFI->getImagePSV(*MF.getSubtarget<GCNSubtarget>().getInstrInfo()); | |||
1264 | Info.align.reset(); | |||
1265 | Info.flags |= MachineMemOperand::MOLoad | | |||
1266 | MachineMemOperand::MODereferenceable; | |||
1267 | return true; | |||
1268 | } | |||
1269 | case Intrinsic::amdgcn_global_atomic_fadd: | |||
1270 | case Intrinsic::amdgcn_global_atomic_fmin: | |||
1271 | case Intrinsic::amdgcn_global_atomic_fmax: | |||
1272 | case Intrinsic::amdgcn_flat_atomic_fadd: | |||
1273 | case Intrinsic::amdgcn_flat_atomic_fmin: | |||
1274 | case Intrinsic::amdgcn_flat_atomic_fmax: | |||
1275 | case Intrinsic::amdgcn_global_atomic_fadd_v2bf16: | |||
1276 | case Intrinsic::amdgcn_flat_atomic_fadd_v2bf16: { | |||
1277 | Info.opc = ISD::INTRINSIC_W_CHAIN; | |||
1278 | Info.memVT = MVT::getVT(CI.getType()); | |||
1279 | Info.ptrVal = CI.getOperand(0); | |||
1280 | Info.align.reset(); | |||
1281 | Info.flags |= MachineMemOperand::MOLoad | | |||
1282 | MachineMemOperand::MOStore | | |||
1283 | MachineMemOperand::MODereferenceable | | |||
1284 | MachineMemOperand::MOVolatile; | |||
1285 | return true; | |||
1286 | } | |||
1287 | case Intrinsic::amdgcn_ds_gws_init: | |||
1288 | case Intrinsic::amdgcn_ds_gws_barrier: | |||
1289 | case Intrinsic::amdgcn_ds_gws_sema_v: | |||
1290 | case Intrinsic::amdgcn_ds_gws_sema_br: | |||
1291 | case Intrinsic::amdgcn_ds_gws_sema_p: | |||
1292 | case Intrinsic::amdgcn_ds_gws_sema_release_all: { | |||
1293 | Info.opc = ISD::INTRINSIC_VOID; | |||
1294 | ||||
1295 | SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); | |||
1296 | Info.ptrVal = | |||
1297 | MFI->getGWSPSV(*MF.getSubtarget<GCNSubtarget>().getInstrInfo()); | |||
1298 | ||||
1299 | // This is an abstract access, but we need to specify a type and size. | |||
1300 | Info.memVT = MVT::i32; | |||
1301 | Info.size = 4; | |||
1302 | Info.align = Align(4); | |||
1303 | ||||
1304 | if (IntrID == Intrinsic::amdgcn_ds_gws_barrier) | |||
1305 | Info.flags |= MachineMemOperand::MOLoad; | |||
1306 | else | |||
1307 | Info.flags |= MachineMemOperand::MOStore; | |||
1308 | return true; | |||
1309 | } | |||
1310 | default: | |||
1311 | return false; | |||
1312 | } | |||
1313 | } | |||
1314 | ||||
1315 | bool SITargetLowering::getAddrModeArguments(IntrinsicInst *II, | |||
1316 | SmallVectorImpl<Value*> &Ops, | |||
1317 | Type *&AccessTy) const { | |||
1318 | switch (II->getIntrinsicID()) { | |||
1319 | case Intrinsic::amdgcn_atomic_inc: | |||
1320 | case Intrinsic::amdgcn_atomic_dec: | |||
1321 | case Intrinsic::amdgcn_ds_ordered_add: | |||
1322 | case Intrinsic::amdgcn_ds_ordered_swap: | |||
1323 | case Intrinsic::amdgcn_ds_append: | |||
1324 | case Intrinsic::amdgcn_ds_consume: | |||
1325 | case Intrinsic::amdgcn_ds_fadd: | |||
1326 | case Intrinsic::amdgcn_ds_fmin: | |||
1327 | case Intrinsic::amdgcn_ds_fmax: | |||
1328 | case Intrinsic::amdgcn_global_atomic_fadd: | |||
1329 | case Intrinsic::amdgcn_flat_atomic_fadd: | |||
1330 | case Intrinsic::amdgcn_flat_atomic_fmin: | |||
1331 | case Intrinsic::amdgcn_flat_atomic_fmax: | |||
1332 | case Intrinsic::amdgcn_global_atomic_fadd_v2bf16: | |||
1333 | case Intrinsic::amdgcn_flat_atomic_fadd_v2bf16: | |||
1334 | case Intrinsic::amdgcn_global_atomic_csub: { | |||
1335 | Value *Ptr = II->getArgOperand(0); | |||
1336 | AccessTy = II->getType(); | |||
1337 | Ops.push_back(Ptr); | |||
1338 | return true; | |||
1339 | } | |||
1340 | default: | |||
1341 | return false; | |||
1342 | } | |||
1343 | } | |||
1344 | ||||
1345 | bool SITargetLowering::isLegalFlatAddressingMode(const AddrMode &AM) const { | |||
1346 | if (!Subtarget->hasFlatInstOffsets()) { | |||
1347 | // Flat instructions do not have offsets, and only have the register | |||
1348 | // address. | |||
1349 | return AM.BaseOffs == 0 && AM.Scale == 0; | |||
1350 | } | |||
1351 | ||||
1352 | return AM.Scale == 0 && | |||
1353 | (AM.BaseOffs == 0 || | |||
1354 | Subtarget->getInstrInfo()->isLegalFLATOffset( | |||
1355 | AM.BaseOffs, AMDGPUAS::FLAT_ADDRESS, SIInstrFlags::FLAT)); | |||
1356 | } | |||
1357 | ||||
1358 | bool SITargetLowering::isLegalGlobalAddressingMode(const AddrMode &AM) const { | |||
1359 | if (Subtarget->hasFlatGlobalInsts()) | |||
1360 | return AM.Scale == 0 && | |||
1361 | (AM.BaseOffs == 0 || Subtarget->getInstrInfo()->isLegalFLATOffset( | |||
1362 | AM.BaseOffs, AMDGPUAS::GLOBAL_ADDRESS, | |||
1363 | SIInstrFlags::FlatGlobal)); | |||
1364 | ||||
1365 | if (!Subtarget->hasAddr64() || Subtarget->useFlatForGlobal()) { | |||
1366 | // Assume the we will use FLAT for all global memory accesses | |||
1367 | // on VI. | |||
1368 | // FIXME: This assumption is currently wrong. On VI we still use | |||
1369 | // MUBUF instructions for the r + i addressing mode. As currently | |||
1370 | // implemented, the MUBUF instructions only work on buffer < 4GB. | |||
1371 | // It may be possible to support > 4GB buffers with MUBUF instructions, | |||
1372 | // by setting the stride value in the resource descriptor which would | |||
1373 | // increase the size limit to (stride * 4GB). However, this is risky, | |||
1374 | // because it has never been validated. | |||
1375 | return isLegalFlatAddressingMode(AM); | |||
1376 | } | |||
1377 | ||||
1378 | return isLegalMUBUFAddressingMode(AM); | |||
1379 | } | |||
1380 | ||||
1381 | bool SITargetLowering::isLegalMUBUFAddressingMode(const AddrMode &AM) const { | |||
1382 | // MUBUF / MTBUF instructions have a 12-bit unsigned byte offset, and | |||
1383 | // additionally can do r + r + i with addr64. 32-bit has more addressing | |||
1384 | // mode options. Depending on the resource constant, it can also do | |||
1385 | // (i64 r0) + (i32 r1) * (i14 i). | |||
1386 | // | |||
1387 | // Private arrays end up using a scratch buffer most of the time, so also | |||
1388 | // assume those use MUBUF instructions. Scratch loads / stores are currently | |||
1389 | // implemented as mubuf instructions with offen bit set, so slightly | |||
1390 | // different than the normal addr64. | |||
1391 | if (!SIInstrInfo::isLegalMUBUFImmOffset(AM.BaseOffs)) | |||
1392 | return false; | |||
1393 | ||||
1394 | // FIXME: Since we can split immediate into soffset and immediate offset, | |||
1395 | // would it make sense to allow any immediate? | |||
1396 | ||||
1397 | switch (AM.Scale) { | |||
1398 | case 0: // r + i or just i, depending on HasBaseReg. | |||
1399 | return true; | |||
1400 | case 1: | |||
1401 | return true; // We have r + r or r + i. | |||
1402 | case 2: | |||
1403 | if (AM.HasBaseReg) { | |||
1404 | // Reject 2 * r + r. | |||
1405 | return false; | |||
1406 | } | |||
1407 | ||||
1408 | // Allow 2 * r as r + r | |||
1409 | // Or 2 * r + i is allowed as r + r + i. | |||
1410 | return true; | |||
1411 | default: // Don't allow n * r | |||
1412 | return false; | |||
1413 | } | |||
1414 | } | |||
1415 | ||||
1416 | bool SITargetLowering::isLegalAddressingMode(const DataLayout &DL, | |||
1417 | const AddrMode &AM, Type *Ty, | |||
1418 | unsigned AS, Instruction *I) const { | |||
1419 | // No global is ever allowed as a base. | |||
1420 | if (AM.BaseGV) | |||
1421 | return false; | |||
1422 | ||||
1423 | if (AS == AMDGPUAS::GLOBAL_ADDRESS) | |||
1424 | return isLegalGlobalAddressingMode(AM); | |||
1425 | ||||
1426 | if (AS == AMDGPUAS::CONSTANT_ADDRESS || | |||
1427 | AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT || | |||
1428 | AS == AMDGPUAS::BUFFER_FAT_POINTER) { | |||
1429 | // If the offset isn't a multiple of 4, it probably isn't going to be | |||
1430 | // correctly aligned. | |||
1431 | // FIXME: Can we get the real alignment here? | |||
1432 | if (AM.BaseOffs % 4 != 0) | |||
1433 | return isLegalMUBUFAddressingMode(AM); | |||
1434 | ||||
1435 | // There are no SMRD extloads, so if we have to do a small type access we | |||
1436 | // will use a MUBUF load. | |||
1437 | // FIXME?: We also need to do this if unaligned, but we don't know the | |||
1438 | // alignment here. | |||
1439 | if (Ty->isSized() && DL.getTypeStoreSize(Ty) < 4) | |||
1440 | return isLegalGlobalAddressingMode(AM); | |||
1441 | ||||
1442 | if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS) { | |||
1443 | // SMRD instructions have an 8-bit, dword offset on SI. | |||
1444 | if (!isUInt<8>(AM.BaseOffs / 4)) | |||
1445 | return false; | |||
1446 | } else if (Subtarget->getGeneration() == AMDGPUSubtarget::SEA_ISLANDS) { | |||
1447 | // On CI+, this can also be a 32-bit literal constant offset. If it fits | |||
1448 | // in 8-bits, it can use a smaller encoding. | |||
1449 | if (!isUInt<32>(AM.BaseOffs / 4)) | |||
1450 | return false; | |||
1451 | } else if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) { | |||
1452 | // On VI, these use the SMEM format and the offset is 20-bit in bytes. | |||
1453 | if (!isUInt<20>(AM.BaseOffs)) | |||
1454 | return false; | |||
1455 | } else | |||
1456 | llvm_unreachable("unhandled generation")::llvm::llvm_unreachable_internal("unhandled generation", "llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 1456); | |||
1457 | ||||
1458 | if (AM.Scale == 0) // r + i or just i, depending on HasBaseReg. | |||
1459 | return true; | |||
1460 | ||||
1461 | if (AM.Scale == 1 && AM.HasBaseReg) | |||
1462 | return true; | |||
1463 | ||||
1464 | return false; | |||
1465 | ||||
1466 | } else if (AS == AMDGPUAS::PRIVATE_ADDRESS) { | |||
1467 | return isLegalMUBUFAddressingMode(AM); | |||
1468 | } else if (AS == AMDGPUAS::LOCAL_ADDRESS || | |||
1469 | AS == AMDGPUAS::REGION_ADDRESS) { | |||
1470 | // Basic, single offset DS instructions allow a 16-bit unsigned immediate | |||
1471 | // field. | |||
1472 | // XXX - If doing a 4-byte aligned 8-byte type access, we effectively have | |||
1473 | // an 8-bit dword offset but we don't know the alignment here. | |||
1474 | if (!isUInt<16>(AM.BaseOffs)) | |||
1475 | return false; | |||
1476 | ||||
1477 | if (AM.Scale == 0) // r + i or just i, depending on HasBaseReg. | |||
1478 | return true; | |||
1479 | ||||
1480 | if (AM.Scale == 1 && AM.HasBaseReg) | |||
1481 | return true; | |||
1482 | ||||
1483 | return false; | |||
1484 | } else if (AS == AMDGPUAS::FLAT_ADDRESS || | |||
1485 | AS == AMDGPUAS::UNKNOWN_ADDRESS_SPACE) { | |||
1486 | // For an unknown address space, this usually means that this is for some | |||
1487 | // reason being used for pure arithmetic, and not based on some addressing | |||
1488 | // computation. We don't have instructions that compute pointers with any | |||
1489 | // addressing modes, so treat them as having no offset like flat | |||
1490 | // instructions. | |||
1491 | return isLegalFlatAddressingMode(AM); | |||
1492 | } | |||
1493 | ||||
1494 | // Assume a user alias of global for unknown address spaces. | |||
1495 | return isLegalGlobalAddressingMode(AM); | |||
1496 | } | |||
1497 | ||||
1498 | bool SITargetLowering::canMergeStoresTo(unsigned AS, EVT MemVT, | |||
1499 | const MachineFunction &MF) const { | |||
1500 | if (AS == AMDGPUAS::GLOBAL_ADDRESS || AS == AMDGPUAS::FLAT_ADDRESS) { | |||
1501 | return (MemVT.getSizeInBits() <= 4 * 32); | |||
1502 | } else if (AS == AMDGPUAS::PRIVATE_ADDRESS) { | |||
1503 | unsigned MaxPrivateBits = 8 * getSubtarget()->getMaxPrivateElementSize(); | |||
1504 | return (MemVT.getSizeInBits() <= MaxPrivateBits); | |||
1505 | } else if (AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::REGION_ADDRESS) { | |||
1506 | return (MemVT.getSizeInBits() <= 2 * 32); | |||
1507 | } | |||
1508 | return true; | |||
1509 | } | |||
1510 | ||||
1511 | bool SITargetLowering::allowsMisalignedMemoryAccessesImpl( | |||
1512 | unsigned Size, unsigned AddrSpace, Align Alignment, | |||
1513 | MachineMemOperand::Flags Flags, bool *IsFast) const { | |||
1514 | if (IsFast) | |||
1515 | *IsFast = false; | |||
1516 | ||||
1517 | if (AddrSpace == AMDGPUAS::LOCAL_ADDRESS || | |||
1518 | AddrSpace == AMDGPUAS::REGION_ADDRESS) { | |||
1519 | // Check if alignment requirements for ds_read/write instructions are | |||
1520 | // disabled. | |||
1521 | if (!Subtarget->hasUnalignedDSAccessEnabled() && Alignment < Align(4)) | |||
1522 | return false; | |||
1523 | ||||
1524 | Align RequiredAlignment(PowerOf2Ceil(Size/8)); // Natural alignment. | |||
1525 | if (Subtarget->hasLDSMisalignedBug() && Size > 32 && | |||
1526 | Alignment < RequiredAlignment) | |||
1527 | return false; | |||
1528 | ||||
1529 | // Either, the alignment requirements are "enabled", or there is an | |||
1530 | // unaligned LDS access related hardware bug though alignment requirements | |||
1531 | // are "disabled". In either case, we need to check for proper alignment | |||
1532 | // requirements. | |||
1533 | // | |||
1534 | switch (Size) { | |||
1535 | case 64: | |||
1536 | // SI has a hardware bug in the LDS / GDS bounds checking: if the base | |||
1537 | // address is negative, then the instruction is incorrectly treated as | |||
1538 | // out-of-bounds even if base + offsets is in bounds. Split vectorized | |||
1539 | // loads here to avoid emitting ds_read2_b32. We may re-combine the | |||
1540 | // load later in the SILoadStoreOptimizer. | |||
1541 | if (!Subtarget->hasUsableDSOffset() && Alignment < Align(8)) | |||
1542 | return false; | |||
1543 | ||||
1544 | // 8 byte accessing via ds_read/write_b64 require 8-byte alignment, but we | |||
1545 | // can do a 4 byte aligned, 8 byte access in a single operation using | |||
1546 | // ds_read2/write2_b32 with adjacent offsets. | |||
1547 | RequiredAlignment = Align(4); | |||
1548 | break; | |||
1549 | case 96: | |||
1550 | if (!Subtarget->hasDS96AndDS128()) | |||
1551 | return false; | |||
1552 | ||||
1553 | // 12 byte accessing via ds_read/write_b96 require 16-byte alignment on | |||
1554 | // gfx8 and older. | |||
1555 | ||||
1556 | if (Subtarget->hasUnalignedDSAccessEnabled()) { | |||
1557 | // Naturally aligned access is fastest. However, also report it is Fast | |||
1558 | // if memory is aligned less than DWORD. A narrow load or store will be | |||
1559 | // be equally slow as a single ds_read_b96/ds_write_b96, but there will | |||
1560 | // be more of them, so overall we will pay less penalty issuing a single | |||
1561 | // instruction. | |||
1562 | if (IsFast) | |||
1563 | *IsFast = Alignment >= RequiredAlignment || Alignment < Align(4); | |||
1564 | return true; | |||
1565 | } | |||
1566 | ||||
1567 | break; | |||
1568 | case 128: | |||
1569 | if (!Subtarget->hasDS96AndDS128() || !Subtarget->useDS128()) | |||
1570 | return false; | |||
1571 | ||||
1572 | // 16 byte accessing via ds_read/write_b128 require 16-byte alignment on | |||
1573 | // gfx8 and older, but we can do a 8 byte aligned, 16 byte access in a | |||
1574 | // single operation using ds_read2/write2_b64. | |||
1575 | RequiredAlignment = Align(8); | |||
1576 | ||||
1577 | if (Subtarget->hasUnalignedDSAccessEnabled()) { | |||
1578 | // Naturally aligned access is fastest. However, also report it is Fast | |||
1579 | // if memory is aligned less than DWORD. A narrow load or store will be | |||
1580 | // be equally slow as a single ds_read_b128/ds_write_b128, but there | |||
1581 | // will be more of them, so overall we will pay less penalty issuing a | |||
1582 | // single instruction. | |||
1583 | if (IsFast) | |||
1584 | *IsFast = Alignment >= RequiredAlignment || Alignment < Align(4); | |||
1585 | return true; | |||
1586 | } | |||
1587 | ||||
1588 | break; | |||
1589 | default: | |||
1590 | if (Size > 32) | |||
1591 | return false; | |||
1592 | ||||
1593 | break; | |||
1594 | } | |||
1595 | ||||
1596 | if (IsFast) { | |||
1597 | // FIXME: Lie it is fast if +unaligned-access-mode is passed so that | |||
1598 | // DS accesses get vectorized. Do this only for sizes below 96 as | |||
1599 | // b96 and b128 cases already properly handled. | |||
1600 | // Remove Subtarget check once all sizes properly handled. | |||
1601 | *IsFast = Alignment >= RequiredAlignment || | |||
1602 | (Subtarget->hasUnalignedDSAccessEnabled() && Size < 96); | |||
1603 | } | |||
1604 | ||||
1605 | return Alignment >= RequiredAlignment || | |||
1606 | Subtarget->hasUnalignedDSAccessEnabled(); | |||
1607 | } | |||
1608 | ||||
1609 | if (AddrSpace == AMDGPUAS::PRIVATE_ADDRESS) { | |||
1610 | bool AlignedBy4 = Alignment >= Align(4); | |||
1611 | if (IsFast) | |||
1612 | *IsFast = AlignedBy4; | |||
1613 | ||||
1614 | return AlignedBy4 || | |||
1615 | Subtarget->enableFlatScratch() || | |||
1616 | Subtarget->hasUnalignedScratchAccess(); | |||
1617 | } | |||
1618 | ||||
1619 | // FIXME: We have to be conservative here and assume that flat operations | |||
1620 | // will access scratch. If we had access to the IR function, then we | |||
1621 | // could determine if any private memory was used in the function. | |||
1622 | if (AddrSpace == AMDGPUAS::FLAT_ADDRESS && | |||
1623 | !Subtarget->hasUnalignedScratchAccess()) { | |||
1624 | bool AlignedBy4 = Alignment >= Align(4); | |||
1625 | if (IsFast) | |||
1626 | *IsFast = AlignedBy4; | |||
1627 | ||||
1628 | return AlignedBy4; | |||
1629 | } | |||
1630 | ||||
1631 | if (Subtarget->hasUnalignedBufferAccessEnabled()) { | |||
1632 | // If we have a uniform constant load, it still requires using a slow | |||
1633 | // buffer instruction if unaligned. | |||
1634 | if (IsFast) { | |||
1635 | // Accesses can really be issued as 1-byte aligned or 4-byte aligned, so | |||
1636 | // 2-byte alignment is worse than 1 unless doing a 2-byte access. | |||
1637 | *IsFast = (AddrSpace == AMDGPUAS::CONSTANT_ADDRESS || | |||
1638 | AddrSpace == AMDGPUAS::CONSTANT_ADDRESS_32BIT) ? | |||
1639 | Alignment >= Align(4) : Alignment != Align(2); | |||
1640 | } | |||
1641 | ||||
1642 | return true; | |||
1643 | } | |||
1644 | ||||
1645 | // Smaller than dword value must be aligned. | |||
1646 | if (Size < 32) | |||
1647 | return false; | |||
1648 | ||||
1649 | // 8.1.6 - For Dword or larger reads or writes, the two LSBs of the | |||
1650 | // byte-address are ignored, thus forcing Dword alignment. | |||
1651 | // This applies to private, global, and constant memory. | |||
1652 | if (IsFast) | |||
1653 | *IsFast = true; | |||
1654 | ||||
1655 | return Size >= 32 && Alignment >= Align(4); | |||
1656 | } | |||
1657 | ||||
1658 | bool SITargetLowering::allowsMisalignedMemoryAccesses( | |||
1659 | EVT VT, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags, | |||
1660 | bool *IsFast) const { | |||
1661 | if (IsFast) | |||
1662 | *IsFast = false; | |||
1663 | ||||
1664 | // TODO: I think v3i32 should allow unaligned accesses on CI with DS_READ_B96, | |||
1665 | // which isn't a simple VT. | |||
1666 | // Until MVT is extended to handle this, simply check for the size and | |||
1667 | // rely on the condition below: allow accesses if the size is a multiple of 4. | |||
1668 | if (VT == MVT::Other || (VT != MVT::Other && VT.getSizeInBits() > 1024 && | |||
1669 | VT.getStoreSize() > 16)) { | |||
1670 | return false; | |||
1671 | } | |||
1672 | ||||
1673 | bool Allow = allowsMisalignedMemoryAccessesImpl(VT.getSizeInBits(), AddrSpace, | |||
1674 | Alignment, Flags, IsFast); | |||
1675 | ||||
1676 | if (Allow && IsFast && Subtarget->hasUnalignedDSAccessEnabled() && | |||
1677 | (AddrSpace == AMDGPUAS::LOCAL_ADDRESS || | |||
1678 | AddrSpace == AMDGPUAS::REGION_ADDRESS)) { | |||
1679 | // Lie it is fast if +unaligned-access-mode is passed so that DS accesses | |||
1680 | // get vectorized. We could use ds_read2_b*/ds_write2_b* instructions on a | |||
1681 | // misaligned data which is faster than a pair of ds_read_b*/ds_write_b* | |||
1682 | // which would be equally misaligned. | |||
1683 | // This is only used by the common passes, selection always calls the | |||
1684 | // allowsMisalignedMemoryAccessesImpl version. | |||
1685 | *IsFast = true; | |||
1686 | } | |||
1687 | ||||
1688 | return Allow; | |||
1689 | } | |||
1690 | ||||
1691 | EVT SITargetLowering::getOptimalMemOpType( | |||
1692 | const MemOp &Op, const AttributeList &FuncAttributes) const { | |||
1693 | // FIXME: Should account for address space here. | |||
1694 | ||||
1695 | // The default fallback uses the private pointer size as a guess for a type to | |||
1696 | // use. Make sure we switch these to 64-bit accesses. | |||
1697 | ||||
1698 | if (Op.size() >= 16 && | |||
1699 | Op.isDstAligned(Align(4))) // XXX: Should only do for global | |||
1700 | return MVT::v4i32; | |||
1701 | ||||
1702 | if (Op.size() >= 8 && Op.isDstAligned(Align(4))) | |||
1703 | return MVT::v2i32; | |||
1704 | ||||
1705 | // Use the default. | |||
1706 | return MVT::Other; | |||
1707 | } | |||
1708 | ||||
1709 | bool SITargetLowering::isMemOpHasNoClobberedMemOperand(const SDNode *N) const { | |||
1710 | const MemSDNode *MemNode = cast<MemSDNode>(N); | |||
1711 | return MemNode->getMemOperand()->getFlags() & MONoClobber; | |||
1712 | } | |||
1713 | ||||
1714 | bool SITargetLowering::isNonGlobalAddrSpace(unsigned AS) { | |||
1715 | return AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::REGION_ADDRESS || | |||
1716 | AS == AMDGPUAS::PRIVATE_ADDRESS; | |||
1717 | } | |||
1718 | ||||
1719 | bool SITargetLowering::isFreeAddrSpaceCast(unsigned SrcAS, | |||
1720 | unsigned DestAS) const { | |||
1721 | // Flat -> private/local is a simple truncate. | |||
1722 | // Flat -> global is no-op | |||
1723 | if (SrcAS == AMDGPUAS::FLAT_ADDRESS) | |||
1724 | return true; | |||
1725 | ||||
1726 | const GCNTargetMachine &TM = | |||
1727 | static_cast<const GCNTargetMachine &>(getTargetMachine()); | |||
1728 | return TM.isNoopAddrSpaceCast(SrcAS, DestAS); | |||
1729 | } | |||
1730 | ||||
1731 | bool SITargetLowering::isMemOpUniform(const SDNode *N) const { | |||
1732 | const MemSDNode *MemNode = cast<MemSDNode>(N); | |||
1733 | ||||
1734 | return AMDGPUInstrInfo::isUniformMMO(MemNode->getMemOperand()); | |||
1735 | } | |||
1736 | ||||
1737 | TargetLoweringBase::LegalizeTypeAction | |||
1738 | SITargetLowering::getPreferredVectorAction(MVT VT) const { | |||
1739 | if (!VT.isScalableVector() && VT.getVectorNumElements() != 1 && | |||
1740 | VT.getScalarType().bitsLE(MVT::i16)) | |||
1741 | return VT.isPow2VectorType() ? TypeSplitVector : TypeWidenVector; | |||
1742 | return TargetLoweringBase::getPreferredVectorAction(VT); | |||
1743 | } | |||
1744 | ||||
1745 | bool SITargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm, | |||
1746 | Type *Ty) const { | |||
1747 | // FIXME: Could be smarter if called for vector constants. | |||
1748 | return true; | |||
1749 | } | |||
1750 | ||||
1751 | bool SITargetLowering::isTypeDesirableForOp(unsigned Op, EVT VT) const { | |||
1752 | if (Subtarget->has16BitInsts() && VT == MVT::i16) { | |||
1753 | switch (Op) { | |||
1754 | case ISD::LOAD: | |||
1755 | case ISD::STORE: | |||
1756 | ||||
1757 | // These operations are done with 32-bit instructions anyway. | |||
1758 | case ISD::AND: | |||
1759 | case ISD::OR: | |||
1760 | case ISD::XOR: | |||
1761 | case ISD::SELECT: | |||
1762 | // TODO: Extensions? | |||
1763 | return true; | |||
1764 | default: | |||
1765 | return false; | |||
1766 | } | |||
1767 | } | |||
1768 | ||||
1769 | // SimplifySetCC uses this function to determine whether or not it should | |||
1770 | // create setcc with i1 operands. We don't have instructions for i1 setcc. | |||
1771 | if (VT == MVT::i1 && Op == ISD::SETCC) | |||
1772 | return false; | |||
1773 | ||||
1774 | return TargetLowering::isTypeDesirableForOp(Op, VT); | |||
1775 | } | |||
1776 | ||||
1777 | SDValue SITargetLowering::lowerKernArgParameterPtr(SelectionDAG &DAG, | |||
1778 | const SDLoc &SL, | |||
1779 | SDValue Chain, | |||
1780 | uint64_t Offset) const { | |||
1781 | const DataLayout &DL = DAG.getDataLayout(); | |||
1782 | MachineFunction &MF = DAG.getMachineFunction(); | |||
1783 | const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); | |||
1784 | ||||
1785 | const ArgDescriptor *InputPtrReg; | |||
1786 | const TargetRegisterClass *RC; | |||
1787 | LLT ArgTy; | |||
1788 | MVT PtrVT = getPointerTy(DL, AMDGPUAS::CONSTANT_ADDRESS); | |||
1789 | ||||
1790 | std::tie(InputPtrReg, RC, ArgTy) = | |||
1791 | Info->getPreloadedValue(AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR); | |||
1792 | ||||
1793 | // We may not have the kernarg segment argument if we have no kernel | |||
1794 | // arguments. | |||
1795 | if (!InputPtrReg) | |||
1796 | return DAG.getConstant(0, SL, PtrVT); | |||
1797 | ||||
1798 | MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo(); | |||
1799 | SDValue BasePtr = DAG.getCopyFromReg(Chain, SL, | |||
1800 | MRI.getLiveInVirtReg(InputPtrReg->getRegister()), PtrVT); | |||
1801 | ||||
1802 | return DAG.getObjectPtrOffset(SL, BasePtr, TypeSize::Fixed(Offset)); | |||
1803 | } | |||
1804 | ||||
1805 | SDValue SITargetLowering::getImplicitArgPtr(SelectionDAG &DAG, | |||
1806 | const SDLoc &SL) const { | |||
1807 | uint64_t Offset = getImplicitParameterOffset(DAG.getMachineFunction(), | |||
1808 | FIRST_IMPLICIT); | |||
1809 | return lowerKernArgParameterPtr(DAG, SL, DAG.getEntryNode(), Offset); | |||
1810 | } | |||
1811 | ||||
1812 | SDValue SITargetLowering::convertArgType(SelectionDAG &DAG, EVT VT, EVT MemVT, | |||
1813 | const SDLoc &SL, SDValue Val, | |||
1814 | bool Signed, | |||
1815 | const ISD::InputArg *Arg) const { | |||
1816 | // First, if it is a widened vector, narrow it. | |||
1817 | if (VT.isVector() && | |||
1818 | VT.getVectorNumElements() != MemVT.getVectorNumElements()) { | |||
1819 | EVT NarrowedVT = | |||
1820 | EVT::getVectorVT(*DAG.getContext(), MemVT.getVectorElementType(), | |||
1821 | VT.getVectorNumElements()); | |||
1822 | Val = DAG.getNode(ISD::EXTRACT_SUBVECTOR, SL, NarrowedVT, Val, | |||
1823 | DAG.getConstant(0, SL, MVT::i32)); | |||
1824 | } | |||
1825 | ||||
1826 | // Then convert the vector elements or scalar value. | |||
1827 | if (Arg && (Arg->Flags.isSExt() || Arg->Flags.isZExt()) && | |||
1828 | VT.bitsLT(MemVT)) { | |||
1829 | unsigned Opc = Arg->Flags.isZExt() ? ISD::AssertZext : ISD::AssertSext; | |||
1830 | Val = DAG.getNode(Opc, SL, MemVT, Val, DAG.getValueType(VT)); | |||
1831 | } | |||
1832 | ||||
1833 | if (MemVT.isFloatingPoint()) | |||
1834 | Val = getFPExtOrFPRound(DAG, Val, SL, VT); | |||
1835 | else if (Signed) | |||
1836 | Val = DAG.getSExtOrTrunc(Val, SL, VT); | |||
1837 | else | |||
1838 | Val = DAG.getZExtOrTrunc(Val, SL, VT); | |||
1839 | ||||
1840 | return Val; | |||
1841 | } | |||
1842 | ||||
1843 | SDValue SITargetLowering::lowerKernargMemParameter( | |||
1844 | SelectionDAG &DAG, EVT VT, EVT MemVT, const SDLoc &SL, SDValue Chain, | |||
1845 | uint64_t Offset, Align Alignment, bool Signed, | |||
1846 | const ISD::InputArg *Arg) const { | |||
1847 | MachinePointerInfo PtrInfo(AMDGPUAS::CONSTANT_ADDRESS); | |||
1848 | ||||
1849 | // Try to avoid using an extload by loading earlier than the argument address, | |||
1850 | // and extracting the relevant bits. The load should hopefully be merged with | |||
1851 | // the previous argument. | |||
1852 | if (MemVT.getStoreSize() < 4 && Alignment < 4) { | |||
1853 | // TODO: Handle align < 4 and size >= 4 (can happen with packed structs). | |||
1854 | int64_t AlignDownOffset = alignDown(Offset, 4); | |||
1855 | int64_t OffsetDiff = Offset - AlignDownOffset; | |||
1856 | ||||
1857 | EVT IntVT = MemVT.changeTypeToInteger(); | |||
1858 | ||||
1859 | // TODO: If we passed in the base kernel offset we could have a better | |||
1860 | // alignment than 4, but we don't really need it. | |||
1861 | SDValue Ptr = lowerKernArgParameterPtr(DAG, SL, Chain, AlignDownOffset); | |||
1862 | SDValue Load = DAG.getLoad(MVT::i32, SL, Chain, Ptr, PtrInfo, Align(4), | |||
1863 | MachineMemOperand::MODereferenceable | | |||
1864 | MachineMemOperand::MOInvariant); | |||
1865 | ||||
1866 | SDValue ShiftAmt = DAG.getConstant(OffsetDiff * 8, SL, MVT::i32); | |||
1867 | SDValue Extract = DAG.getNode(ISD::SRL, SL, MVT::i32, Load, ShiftAmt); | |||
1868 | ||||
1869 | SDValue ArgVal = DAG.getNode(ISD::TRUNCATE, SL, IntVT, Extract); | |||
1870 | ArgVal = DAG.getNode(ISD::BITCAST, SL, MemVT, ArgVal); | |||
1871 | ArgVal = convertArgType(DAG, VT, MemVT, SL, ArgVal, Signed, Arg); | |||
1872 | ||||
1873 | ||||
1874 | return DAG.getMergeValues({ ArgVal, Load.getValue(1) }, SL); | |||
1875 | } | |||
1876 | ||||
1877 | SDValue Ptr = lowerKernArgParameterPtr(DAG, SL, Chain, Offset); | |||
1878 | SDValue Load = DAG.getLoad(MemVT, SL, Chain, Ptr, PtrInfo, Alignment, | |||
1879 | MachineMemOperand::MODereferenceable | | |||
1880 | MachineMemOperand::MOInvariant); | |||
1881 | ||||
1882 | SDValue Val = convertArgType(DAG, VT, MemVT, SL, Load, Signed, Arg); | |||
1883 | return DAG.getMergeValues({ Val, Load.getValue(1) }, SL); | |||
1884 | } | |||
1885 | ||||
1886 | SDValue SITargetLowering::lowerStackParameter(SelectionDAG &DAG, CCValAssign &VA, | |||
1887 | const SDLoc &SL, SDValue Chain, | |||
1888 | const ISD::InputArg &Arg) const { | |||
1889 | MachineFunction &MF = DAG.getMachineFunction(); | |||
1890 | MachineFrameInfo &MFI = MF.getFrameInfo(); | |||
1891 | ||||
1892 | if (Arg.Flags.isByVal()) { | |||
1893 | unsigned Size = Arg.Flags.getByValSize(); | |||
1894 | int FrameIdx = MFI.CreateFixedObject(Size, VA.getLocMemOffset(), false); | |||
1895 | return DAG.getFrameIndex(FrameIdx, MVT::i32); | |||
1896 | } | |||
1897 | ||||
1898 | unsigned ArgOffset = VA.getLocMemOffset(); | |||
1899 | unsigned ArgSize = VA.getValVT().getStoreSize(); | |||
1900 | ||||
1901 | int FI = MFI.CreateFixedObject(ArgSize, ArgOffset, true); | |||
1902 | ||||
1903 | // Create load nodes to retrieve arguments from the stack. | |||
1904 | SDValue FIN = DAG.getFrameIndex(FI, MVT::i32); | |||
1905 | SDValue ArgValue; | |||
1906 | ||||
1907 | // For NON_EXTLOAD, generic code in getLoad assert(ValVT == MemVT) | |||
1908 | ISD::LoadExtType ExtType = ISD::NON_EXTLOAD; | |||
1909 | MVT MemVT = VA.getValVT(); | |||
1910 | ||||
1911 | switch (VA.getLocInfo()) { | |||
1912 | default: | |||
1913 | break; | |||
1914 | case CCValAssign::BCvt: | |||
1915 | MemVT = VA.getLocVT(); | |||
1916 | break; | |||
1917 | case CCValAssign::SExt: | |||
1918 | ExtType = ISD::SEXTLOAD; | |||
1919 | break; | |||
1920 | case CCValAssign::ZExt: | |||
1921 | ExtType = ISD::ZEXTLOAD; | |||
1922 | break; | |||
1923 | case CCValAssign::AExt: | |||
1924 | ExtType = ISD::EXTLOAD; | |||
1925 | break; | |||
1926 | } | |||
1927 | ||||
1928 | ArgValue = DAG.getExtLoad( | |||
1929 | ExtType, SL, VA.getLocVT(), Chain, FIN, | |||
1930 | MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), | |||
1931 | MemVT); | |||
1932 | return ArgValue; | |||
1933 | } | |||
1934 | ||||
1935 | SDValue SITargetLowering::getPreloadedValue(SelectionDAG &DAG, | |||
1936 | const SIMachineFunctionInfo &MFI, | |||
1937 | EVT VT, | |||
1938 | AMDGPUFunctionArgInfo::PreloadedValue PVID) const { | |||
1939 | const ArgDescriptor *Reg; | |||
1940 | const TargetRegisterClass *RC; | |||
1941 | LLT Ty; | |||
1942 | ||||
1943 | std::tie(Reg, RC, Ty) = MFI.getPreloadedValue(PVID); | |||
1944 | if (!Reg) { | |||
1945 | if (PVID == AMDGPUFunctionArgInfo::PreloadedValue::KERNARG_SEGMENT_PTR) { | |||
1946 | // It's possible for a kernarg intrinsic call to appear in a kernel with | |||
1947 | // no allocated segment, in which case we do not add the user sgpr | |||
1948 | // argument, so just return null. | |||
1949 | return DAG.getConstant(0, SDLoc(), VT); | |||
1950 | } | |||
1951 | ||||
1952 | // It's undefined behavior if a function marked with the amdgpu-no-* | |||
1953 | // attributes uses the corresponding intrinsic. | |||
1954 | return DAG.getUNDEF(VT); | |||
1955 | } | |||
1956 | ||||
1957 | return CreateLiveInRegister(DAG, RC, Reg->getRegister(), VT); | |||
1958 | } | |||
1959 | ||||
1960 | static void processPSInputArgs(SmallVectorImpl<ISD::InputArg> &Splits, | |||
1961 | CallingConv::ID CallConv, | |||
1962 | ArrayRef<ISD::InputArg> Ins, BitVector &Skipped, | |||
1963 | FunctionType *FType, | |||
1964 | SIMachineFunctionInfo *Info) { | |||
1965 | for (unsigned I = 0, E = Ins.size(), PSInputNum = 0; I != E; ++I) { | |||
1966 | const ISD::InputArg *Arg = &Ins[I]; | |||
1967 | ||||
1968 | assert((!Arg->VT.isVector() || Arg->VT.getScalarSizeInBits() == 16) &&(static_cast <bool> ((!Arg->VT.isVector() || Arg-> VT.getScalarSizeInBits() == 16) && "vector type argument should have been split" ) ? void (0) : __assert_fail ("(!Arg->VT.isVector() || Arg->VT.getScalarSizeInBits() == 16) && \"vector type argument should have been split\"" , "llvm/lib/Target/AMDGPU/SIISelLowering.cpp", 1969, __extension__ __PRETTY_FUNCTION__)) | |||
1969 | "vector type argument should have been split")(static_cast <bool> ((!Arg->VT.isVector() || Arg-> VT.getScalarSizeInBits() == 16) && "vector type argument should have been split" ) ? void (0) : __assert_fail ("(!Arg->VT.isVector() || Arg->VT.getScalarSizeInBits() == 16) && \"vector type argument should have been split\"" , "llvm/lib/Target/AMDGPU/SIISelLowering.cpp", 1969, __extension__ __PRETTY_FUNCTION__)); | |||
1970 | ||||
1971 | // First check if it's a PS input addr. | |||
1972 | if (CallConv == CallingConv::AMDGPU_PS && | |||
1973 | !Arg->Flags.isInReg() && PSInputNum <= 15) { | |||
1974 | bool SkipArg = !Arg->Used && !Info->isPSInputAllocated(PSInputNum); | |||
1975 | ||||
1976 | // Inconveniently only the first part of the split is marked as isSplit, | |||
1977 | // so skip to the end. We only want to increment PSInputNum once for the | |||
1978 | // entire split argument. | |||
1979 | if (Arg->Flags.isSplit()) { | |||
1980 | while (!Arg->Flags.isSplitEnd()) { | |||
1981 | assert((!Arg->VT.isVector() ||(static_cast <bool> ((!Arg->VT.isVector() || Arg-> VT.getScalarSizeInBits() == 16) && "unexpected vector split in ps argument type" ) ? void (0) : __assert_fail ("(!Arg->VT.isVector() || Arg->VT.getScalarSizeInBits() == 16) && \"unexpected vector split in ps argument type\"" , "llvm/lib/Target/AMDGPU/SIISelLowering.cpp", 1983, __extension__ __PRETTY_FUNCTION__)) | |||
1982 | Arg->VT.getScalarSizeInBits() == 16) &&(static_cast <bool> ((!Arg->VT.isVector() || Arg-> VT.getScalarSizeInBits() == 16) && "unexpected vector split in ps argument type" ) ? void (0) : __assert_fail ("(!Arg->VT.isVector() || Arg->VT.getScalarSizeInBits() == 16) && \"unexpected vector split in ps argument type\"" , "llvm/lib/Target/AMDGPU/SIISelLowering.cpp", 1983, __extension__ __PRETTY_FUNCTION__)) | |||
1983 | "unexpected vector split in ps argument type")(static_cast <bool> ((!Arg->VT.isVector() || Arg-> VT.getScalarSizeInBits() == 16) && "unexpected vector split in ps argument type" ) ? void (0) : __assert_fail ("(!Arg->VT.isVector() || Arg->VT.getScalarSizeInBits() == 16) && \"unexpected vector split in ps argument type\"" , "llvm/lib/Target/AMDGPU/SIISelLowering.cpp", 1983, __extension__ __PRETTY_FUNCTION__)); | |||
1984 | if (!SkipArg) | |||
1985 | Splits.push_back(*Arg); | |||
1986 | Arg = &Ins[++I]; | |||
1987 | } | |||
1988 | } | |||
1989 | ||||
1990 | if (SkipArg) { | |||
1991 | // We can safely skip PS inputs. | |||
1992 | Skipped.set(Arg->getOrigArgIndex()); | |||
1993 | ++PSInputNum; | |||
1994 | continue; | |||
1995 | } | |||
1996 | ||||
1997 | Info->markPSInputAllocated(PSInputNum); | |||
1998 | if (Arg->Used) | |||
1999 | Info->markPSInputEnabled(PSInputNum); | |||
2000 | ||||
2001 | ++PSInputNum; | |||
2002 | } | |||
2003 | ||||
2004 | Splits.push_back(*Arg); | |||
2005 | } | |||
2006 | } | |||
2007 | ||||
2008 | // Allocate special inputs passed in VGPRs. | |||
2009 | void SITargetLowering::allocateSpecialEntryInputVGPRs(CCState &CCInfo, | |||
2010 | MachineFunction &MF, | |||
2011 | const SIRegisterInfo &TRI, | |||
2012 | SIMachineFunctionInfo &Info) const { | |||
2013 | const LLT S32 = LLT::scalar(32); | |||
2014 | MachineRegisterInfo &MRI = MF.getRegInfo(); | |||
2015 | ||||
2016 | if (Info.hasWorkItemIDX()) { | |||
2017 | Register Reg = AMDGPU::VGPR0; | |||
2018 | MRI.setType(MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass), S32); | |||
2019 | ||||
2020 | CCInfo.AllocateReg(Reg); | |||
2021 | unsigned Mask = (Subtarget->hasPackedTID() && | |||
2022 | Info.hasWorkItemIDY()) ? 0x3ff : ~0u; | |||
2023 | Info.setWorkItemIDX(ArgDescriptor::createRegister(Reg, Mask)); | |||
2024 | } | |||
2025 | ||||
2026 | if (Info.hasWorkItemIDY()) { | |||
2027 | assert(Info.hasWorkItemIDX())(static_cast <bool> (Info.hasWorkItemIDX()) ? void (0) : __assert_fail ("Info.hasWorkItemIDX()", "llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 2027, __extension__ __PRETTY_FUNCTION__)); | |||
2028 | if (Subtarget->hasPackedTID()) { | |||
2029 | Info.setWorkItemIDY(ArgDescriptor::createRegister(AMDGPU::VGPR0, | |||
2030 | 0x3ff << 10)); | |||
2031 | } else { | |||
2032 | unsigned Reg = AMDGPU::VGPR1; | |||
2033 | MRI.setType(MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass), S32); | |||
2034 | ||||
2035 | CCInfo.AllocateReg(Reg); | |||
2036 | Info.setWorkItemIDY(ArgDescriptor::createRegister(Reg)); | |||
2037 | } | |||
2038 | } | |||
2039 | ||||
2040 | if (Info.hasWorkItemIDZ()) { | |||
2041 | assert(Info.hasWorkItemIDX() && Info.hasWorkItemIDY())(static_cast <bool> (Info.hasWorkItemIDX() && Info .hasWorkItemIDY()) ? void (0) : __assert_fail ("Info.hasWorkItemIDX() && Info.hasWorkItemIDY()" , "llvm/lib/Target/AMDGPU/SIISelLowering.cpp", 2041, __extension__ __PRETTY_FUNCTION__)); | |||
2042 | if (Subtarget->hasPackedTID()) { | |||
2043 | Info.setWorkItemIDZ(ArgDescriptor::createRegister(AMDGPU::VGPR0, | |||
2044 | 0x3ff << 20)); | |||
2045 | } else { | |||
2046 | unsigned Reg = AMDGPU::VGPR2; | |||
2047 | MRI.setType(MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass), S32); | |||
2048 | ||||
2049 | CCInfo.AllocateReg(Reg); | |||
2050 | Info.setWorkItemIDZ(ArgDescriptor::createRegister(Reg)); | |||
2051 | } | |||
2052 | } | |||
2053 | } | |||
2054 | ||||
2055 | // Try to allocate a VGPR at the end of the argument list, or if no argument | |||
2056 | // VGPRs are left allocating a stack slot. | |||
2057 | // If \p Mask is is given it indicates bitfield position in the register. | |||
2058 | // If \p Arg is given use it with new ]p Mask instead of allocating new. | |||
2059 | static ArgDescriptor allocateVGPR32Input(CCState &CCInfo, unsigned Mask = ~0u, | |||
2060 | ArgDescriptor Arg = ArgDescriptor()) { | |||
2061 | if (Arg.isSet()) | |||
2062 | return ArgDescriptor::createArg(Arg, Mask); | |||
2063 | ||||
2064 | ArrayRef<MCPhysReg> ArgVGPRs | |||
2065 | = makeArrayRef(AMDGPU::VGPR_32RegClass.begin(), 32); | |||
2066 | unsigned RegIdx = CCInfo.getFirstUnallocated(ArgVGPRs); | |||
2067 | if (RegIdx == ArgVGPRs.size()) { | |||
2068 | // Spill to stack required. | |||
2069 | int64_t Offset = CCInfo.AllocateStack(4, Align(4)); | |||
2070 | ||||
2071 | return ArgDescriptor::createStack(Offset, Mask); | |||
2072 | } | |||
2073 | ||||
2074 | unsigned Reg = ArgVGPRs[RegIdx]; | |||
2075 | Reg = CCInfo.AllocateReg(Reg); | |||
2076 | assert(Reg != AMDGPU::NoRegister)(static_cast <bool> (Reg != AMDGPU::NoRegister) ? void ( 0) : __assert_fail ("Reg != AMDGPU::NoRegister", "llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 2076, __extension__ __PRETTY_FUNCTION__)); | |||
2077 | ||||
2078 | MachineFunction &MF = CCInfo.getMachineFunction(); | |||
2079 | Register LiveInVReg = MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass); | |||
2080 | MF.getRegInfo().setType(LiveInVReg, LLT::scalar(32)); | |||
2081 | return ArgDescriptor::createRegister(Reg, Mask); | |||
2082 | } | |||
2083 | ||||
2084 | static ArgDescriptor allocateSGPR32InputImpl(CCState &CCInfo, | |||
2085 | const TargetRegisterClass *RC, | |||
2086 | unsigned NumArgRegs) { | |||
2087 | ArrayRef<MCPhysReg> ArgSGPRs = makeArrayRef(RC->begin(), 32); | |||
2088 | unsigned RegIdx = CCInfo.getFirstUnallocated(ArgSGPRs); | |||
2089 | if (RegIdx == ArgSGPRs.size()) | |||
2090 | report_fatal_error("ran out of SGPRs for arguments"); | |||
2091 | ||||
2092 | unsigned Reg = ArgSGPRs[RegIdx]; | |||
2093 | Reg = CCInfo.AllocateReg(Reg); | |||
2094 | assert(Reg != AMDGPU::NoRegister)(static_cast <bool> (Reg != AMDGPU::NoRegister) ? void ( 0) : __assert_fail ("Reg != AMDGPU::NoRegister", "llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 2094, __extension__ __PRETTY_FUNCTION__)); | |||
2095 | ||||
2096 | MachineFunction &MF = CCInfo.getMachineFunction(); | |||
2097 | MF.addLiveIn(Reg, RC); | |||
2098 | return ArgDescriptor::createRegister(Reg); | |||
2099 | } | |||
2100 | ||||
2101 | // If this has a fixed position, we still should allocate the register in the | |||
2102 | // CCInfo state. Technically we could get away with this for values passed | |||
2103 | // outside of the normal argument range. | |||
2104 | static void allocateFixedSGPRInputImpl(CCState &CCInfo, | |||
2105 | const TargetRegisterClass *RC, | |||
2106 | MCRegister Reg) { | |||
2107 | Reg = CCInfo.AllocateReg(Reg); | |||
2108 | assert(Reg != AMDGPU::NoRegister)(static_cast <bool> (Reg != AMDGPU::NoRegister) ? void ( 0) : __assert_fail ("Reg != AMDGPU::NoRegister", "llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 2108, __extension__ __PRETTY_FUNCTION__)); | |||
2109 | MachineFunction &MF = CCInfo.getMachineFunction(); | |||
2110 | MF.addLiveIn(Reg, RC); | |||
2111 | } | |||
2112 | ||||
2113 | static void allocateSGPR32Input(CCState &CCInfo, ArgDescriptor &Arg) { | |||
2114 | if (Arg) { | |||
2115 | allocateFixedSGPRInputImpl(CCInfo, &AMDGPU::SGPR_32RegClass, | |||
2116 | Arg.getRegister()); | |||
2117 | } else | |||
2118 | Arg = allocateSGPR32InputImpl(CCInfo, &AMDGPU::SGPR_32RegClass, 32); | |||
2119 | } | |||
2120 | ||||
2121 | static void allocateSGPR64Input(CCState &CCInfo, ArgDescriptor &Arg) { | |||
2122 | if (Arg) { | |||
2123 | allocateFixedSGPRInputImpl(CCInfo, &AMDGPU::SGPR_64RegClass, | |||
2124 | Arg.getRegister()); | |||
2125 | } else | |||
2126 | Arg = allocateSGPR32InputImpl(CCInfo, &AMDGPU::SGPR_64RegClass, 16); | |||
2127 | } | |||
2128 | ||||
2129 | /// Allocate implicit function VGPR arguments at the end of allocated user | |||
2130 | /// arguments. | |||
2131 | void SITargetLowering::allocateSpecialInputVGPRs( | |||
2132 | CCState &CCInfo, MachineFunction &MF, | |||
2133 | const SIRegisterInfo &TRI, SIMachineFunctionInfo &Info) const { | |||
2134 | const unsigned Mask = 0x3ff; | |||
2135 | ArgDescriptor Arg; | |||
2136 | ||||
2137 | if (Info.hasWorkItemIDX()) { | |||
2138 | Arg = allocateVGPR32Input(CCInfo, Mask); | |||
2139 | Info.setWorkItemIDX(Arg); | |||
2140 | } | |||
2141 | ||||
2142 | if (Info.hasWorkItemIDY()) { | |||
2143 | Arg = allocateVGPR32Input(CCInfo, Mask << 10, Arg); | |||
2144 | Info.setWorkItemIDY(Arg); | |||
2145 | } | |||
2146 | ||||
2147 | if (Info.hasWorkItemIDZ()) | |||
2148 | Info.setWorkItemIDZ(allocateVGPR32Input(CCInfo, Mask << 20, Arg)); | |||
2149 | } | |||
2150 | ||||
2151 | /// Allocate implicit function VGPR arguments in fixed registers. | |||
2152 | void SITargetLowering::allocateSpecialInputVGPRsFixed( | |||
2153 | CCState &CCInfo, MachineFunction &MF, | |||
2154 | const SIRegisterInfo &TRI, SIMachineFunctionInfo &Info) const { | |||
2155 | Register Reg = CCInfo.AllocateReg(AMDGPU::VGPR31); | |||
2156 | if (!Reg) | |||
2157 | report_fatal_error("failed to allocated VGPR for implicit arguments"); | |||
2158 | ||||
2159 | const unsigned Mask = 0x3ff; | |||
2160 | Info.setWorkItemIDX(ArgDescriptor::createRegister(Reg, Mask)); | |||
2161 | Info.setWorkItemIDY(ArgDescriptor::createRegister(Reg, Mask << 10)); | |||
2162 | Info.setWorkItemIDZ(ArgDescriptor::createRegister(Reg, Mask << 20)); | |||
2163 | } | |||
2164 | ||||
2165 | void SITargetLowering::allocateSpecialInputSGPRs( | |||
2166 | CCState &CCInfo, | |||
2167 | MachineFunction &MF, | |||
2168 | const SIRegisterInfo &TRI, | |||
2169 | SIMachineFunctionInfo &Info) const { | |||
2170 | auto &ArgInfo = Info.getArgInfo(); | |||
2171 | ||||
2172 | // TODO: Unify handling with private memory pointers. | |||
2173 | if (Info.hasDispatchPtr()) | |||
2174 | allocateSGPR64Input(CCInfo, ArgInfo.DispatchPtr); | |||
2175 | ||||
2176 | if (Info.hasQueuePtr() && AMDGPU::getAmdhsaCodeObjectVersion() < 5) | |||
2177 | allocateSGPR64Input(CCInfo, ArgInfo.QueuePtr); | |||
2178 | ||||
2179 | // Implicit arg ptr takes the place of the kernarg segment pointer. This is a | |||
2180 | // constant offset from the kernarg segment. | |||
2181 | if (Info.hasImplicitArgPtr()) | |||
2182 | allocateSGPR64Input(CCInfo, ArgInfo.ImplicitArgPtr); | |||
2183 | ||||
2184 | if (Info.hasDispatchID()) | |||
2185 | allocateSGPR64Input(CCInfo, ArgInfo.DispatchID); | |||
2186 | ||||
2187 | // flat_scratch_init is not applicable for non-kernel functions. | |||
2188 | ||||
2189 | if (Info.hasWorkGroupIDX()) | |||
2190 | allocateSGPR32Input(CCInfo, ArgInfo.WorkGroupIDX); | |||
2191 | ||||
2192 | if (Info.hasWorkGroupIDY()) | |||
2193 | allocateSGPR32Input(CCInfo, ArgInfo.WorkGroupIDY); | |||
2194 | ||||
2195 | if (Info.hasWorkGroupIDZ()) | |||
2196 | allocateSGPR32Input(CCInfo, ArgInfo.WorkGroupIDZ); | |||
2197 | } | |||
2198 | ||||
2199 | // Allocate special inputs passed in user SGPRs. | |||
2200 | void SITargetLowering::allocateHSAUserSGPRs(CCState &CCInfo, | |||
2201 | MachineFunction &MF, | |||
2202 | const SIRegisterInfo &TRI, | |||
2203 | SIMachineFunctionInfo &Info) const { | |||
2204 | if (Info.hasImplicitBufferPtr()) { | |||
2205 | Register ImplicitBufferPtrReg = Info.addImplicitBufferPtr(TRI); | |||
2206 | MF.addLiveIn(ImplicitBufferPtrReg, &AMDGPU::SGPR_64RegClass); | |||
2207 | CCInfo.AllocateReg(ImplicitBufferPtrReg); | |||
2208 | } | |||
2209 | ||||
2210 | // FIXME: How should these inputs interact with inreg / custom SGPR inputs? | |||
2211 | if (Info.hasPrivateSegmentBuffer()) { | |||
2212 | Register PrivateSegmentBufferReg = Info.addPrivateSegmentBuffer(TRI); | |||
2213 | MF.addLiveIn(PrivateSegmentBufferReg, &AMDGPU::SGPR_128RegClass); | |||
2214 | CCInfo.AllocateReg(PrivateSegmentBufferReg); | |||
2215 | } | |||
2216 | ||||
2217 | if (Info.hasDispatchPtr()) { | |||
2218 | Register DispatchPtrReg = Info.addDispatchPtr(TRI); | |||
2219 | MF.addLiveIn(DispatchPtrReg, &AMDGPU::SGPR_64RegClass); | |||
2220 | CCInfo.AllocateReg(DispatchPtrReg); | |||
2221 | } | |||
2222 | ||||
2223 | if (Info.hasQueuePtr() && AMDGPU::getAmdhsaCodeObjectVersion() < 5) { | |||
2224 | Register QueuePtrReg = Info.addQueuePtr(TRI); | |||
2225 | MF.addLiveIn(QueuePtrReg, &AMDGPU::SGPR_64RegClass); | |||
2226 | CCInfo.AllocateReg(QueuePtrReg); | |||
2227 | } | |||
2228 | ||||
2229 | if (Info.hasKernargSegmentPtr()) { | |||
2230 | MachineRegisterInfo &MRI = MF.getRegInfo(); | |||
2231 | Register InputPtrReg = Info.addKernargSegmentPtr(TRI); | |||
2232 | CCInfo.AllocateReg(InputPtrReg); | |||
2233 | ||||
2234 | Register VReg = MF.addLiveIn(InputPtrReg, &AMDGPU::SGPR_64RegClass); | |||
2235 | MRI.setType(VReg, LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64)); | |||
2236 | } | |||
2237 | ||||
2238 | if (Info.hasDispatchID()) { | |||
2239 | Register DispatchIDReg = Info.addDispatchID(TRI); | |||
2240 | MF.addLiveIn(DispatchIDReg, &AMDGPU::SGPR_64RegClass); | |||
2241 | CCInfo.AllocateReg(DispatchIDReg); | |||
2242 | } | |||
2243 | ||||
2244 | if (Info.hasFlatScratchInit() && !getSubtarget()->isAmdPalOS()) { | |||
2245 | Register FlatScratchInitReg = Info.addFlatScratchInit(TRI); | |||
2246 | MF.addLiveIn(FlatScratchInitReg, &AMDGPU::SGPR_64RegClass); | |||
2247 | CCInfo.AllocateReg(FlatScratchInitReg); | |||
2248 | } | |||
2249 | ||||
2250 | // TODO: Add GridWorkGroupCount user SGPRs when used. For now with HSA we read | |||
2251 | // these from the dispatch pointer. | |||
2252 | } | |||
2253 | ||||
2254 | // Allocate special input registers that are initialized per-wave. | |||
2255 | void SITargetLowering::allocateSystemSGPRs(CCState &CCInfo, | |||
2256 | MachineFunction &MF, | |||
2257 | SIMachineFunctionInfo &Info, | |||
2258 | CallingConv::ID CallConv, | |||
2259 | bool IsShader) const { | |||
2260 | if (Info.hasWorkGroupIDX()) { | |||
2261 | Register Reg = Info.addWorkGroupIDX(); | |||
2262 | MF.addLiveIn(Reg, &AMDGPU::SGPR_32RegClass); | |||
2263 | CCInfo.AllocateReg(Reg); | |||
2264 | } | |||
2265 | ||||
2266 | if (Info.hasWorkGroupIDY()) { | |||
2267 | Register Reg = Info.addWorkGroupIDY(); | |||
2268 | MF.addLiveIn(Reg, &AMDGPU::SGPR_32RegClass); | |||
2269 | CCInfo.AllocateReg(Reg); | |||
2270 | } | |||
2271 | ||||
2272 | if (Info.hasWorkGroupIDZ()) { | |||
2273 | Register Reg = Info.addWorkGroupIDZ(); | |||
2274 | MF.addLiveIn(Reg, &AMDGPU::SGPR_32RegClass); | |||
2275 | CCInfo.AllocateReg(Reg); | |||
2276 | } | |||
2277 | ||||
2278 | if (Info.hasWorkGroupInfo()) { | |||
2279 | Register Reg = Info.addWorkGroupInfo(); | |||
2280 | MF.addLiveIn(Reg, &AMDGPU::SGPR_32RegClass); | |||
2281 | CCInfo.AllocateReg(Reg); | |||
2282 | } | |||
2283 | ||||
2284 | if (Info.hasPrivateSegmentWaveByteOffset()) { | |||
2285 | // Scratch wave offset passed in system SGPR. | |||
2286 | unsigned PrivateSegmentWaveByteOffsetReg; | |||
2287 | ||||
2288 | if (IsShader) { | |||
2289 | PrivateSegmentWaveByteOffsetReg = | |||
2290 | Info.getPrivateSegmentWaveByteOffsetSystemSGPR(); | |||
2291 | ||||
2292 | // This is true if the scratch wave byte offset doesn't have a fixed | |||
2293 | // location. | |||
2294 | if (PrivateSegmentWaveByteOffsetReg == AMDGPU::NoRegister) { | |||
2295 | PrivateSegmentWaveByteOffsetReg = findFirstFreeSGPR(CCInfo); | |||
2296 | Info.setPrivateSegmentWaveByteOffset(PrivateSegmentWaveByteOffsetReg); | |||
2297 | } | |||
2298 | } else | |||
2299 | PrivateSegmentWaveByteOffsetReg = Info.addPrivateSegmentWaveByteOffset(); | |||
2300 | ||||
2301 | MF.addLiveIn(PrivateSegmentWaveByteOffsetReg, &AMDGPU::SGPR_32RegClass); | |||
2302 | CCInfo.AllocateReg(PrivateSegmentWaveByteOffsetReg); | |||
2303 | } | |||
2304 | } | |||
2305 | ||||
2306 | static void reservePrivateMemoryRegs(const TargetMachine &TM, | |||
2307 | MachineFunction &MF, | |||
2308 | const SIRegisterInfo &TRI, | |||
2309 | SIMachineFunctionInfo &Info) { | |||
2310 | // Now that we've figured out where the scratch register inputs are, see if | |||
2311 | // should reserve the arguments and use them directly. | |||
2312 | MachineFrameInfo &MFI = MF.getFrameInfo(); | |||
2313 | bool HasStackObjects = MFI.hasStackObjects(); | |||
2314 | const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); | |||
2315 | ||||
2316 | // Record that we know we have non-spill stack objects so we don't need to | |||
2317 | // check all stack objects later. | |||
2318 | if (HasStackObjects) | |||
2319 | Info.setHasNonSpillStackObjects(true); | |||
2320 | ||||
2321 | // Everything live out of a block is spilled with fast regalloc, so it's | |||
2322 | // almost certain that spilling will be required. | |||
2323 | if (TM.getOptLevel() == CodeGenOpt::None) | |||
2324 | HasStackObjects = true; | |||
2325 | ||||
2326 | // For now assume stack access is needed in any callee functions, so we need | |||
2327 | // the scratch registers to pass in. | |||
2328 | bool RequiresStackAccess = HasStackObjects || MFI.hasCalls(); | |||
2329 | ||||
2330 | if (!ST.enableFlatScratch()) { | |||
2331 | if (RequiresStackAccess && ST.isAmdHsaOrMesa(MF.getFunction())) { | |||
2332 | // If we have stack objects, we unquestionably need the private buffer | |||
2333 | // resource. For the Code Object V2 ABI, this will be the first 4 user | |||
2334 | // SGPR inputs. We can reserve those and use them directly. | |||
2335 | ||||
2336 | Register PrivateSegmentBufferReg = | |||
2337 | Info.getPreloadedReg(AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_BUFFER); | |||
2338 | Info.setScratchRSrcReg(PrivateSegmentBufferReg); | |||
2339 | } else { | |||
2340 | unsigned ReservedBufferReg = TRI.reservedPrivateSegmentBufferReg(MF); | |||
2341 | // We tentatively reserve the last registers (skipping the last registers | |||
2342 | // which may contain VCC, FLAT_SCR, and XNACK). After register allocation, | |||
2343 | // we'll replace these with the ones immediately after those which were | |||
2344 | // really allocated. In the prologue copies will be inserted from the | |||
2345 | // argument to these reserved registers. | |||
2346 | ||||
2347 | // Without HSA, relocations are used for the scratch pointer and the | |||
2348 | // buffer resource setup is always inserted in the prologue. Scratch wave | |||
2349 | // offset is still in an input SGPR. | |||
2350 | Info.setScratchRSrcReg(ReservedBufferReg); | |||
2351 | } | |||
2352 | } | |||
2353 | ||||
2354 | MachineRegisterInfo &MRI = MF.getRegInfo(); | |||
2355 | ||||
2356 | // For entry functions we have to set up the stack pointer if we use it, | |||
2357 | // whereas non-entry functions get this "for free". This means there is no | |||
2358 | // intrinsic advantage to using S32 over S34 in cases where we do not have | |||
2359 | // calls but do need a frame pointer (i.e. if we are requested to have one | |||
2360 | // because frame pointer elimination is disabled). To keep things simple we | |||
2361 | // only ever use S32 as the call ABI stack pointer, and so using it does not | |||
2362 | // imply we need a separate frame pointer. | |||
2363 | // | |||
2364 | // Try to use s32 as the SP, but move it if it would interfere with input | |||
2365 | // arguments. This won't work with calls though. | |||
2366 | // | |||
2367 | // FIXME: Move SP to avoid any possible inputs, or find a way to spill input | |||
2368 | // registers. | |||
2369 | if (!MRI.isLiveIn(AMDGPU::SGPR32)) { | |||
2370 | Info.setStackPtrOffsetReg(AMDGPU::SGPR32); | |||
2371 | } else { | |||
2372 | assert(AMDGPU::isShader(MF.getFunction().getCallingConv()))(static_cast <bool> (AMDGPU::isShader(MF.getFunction(). getCallingConv())) ? void (0) : __assert_fail ("AMDGPU::isShader(MF.getFunction().getCallingConv())" , "llvm/lib/Target/AMDGPU/SIISelLowering.cpp", 2372, __extension__ __PRETTY_FUNCTION__)); | |||
2373 | ||||
2374 | if (MFI.hasCalls()) | |||
2375 | report_fatal_error("call in graphics shader with too many input SGPRs"); | |||
2376 | ||||
2377 | for (unsigned Reg : AMDGPU::SGPR_32RegClass) { | |||
2378 | if (!MRI.isLiveIn(Reg)) { | |||
2379 | Info.setStackPtrOffsetReg(Reg); | |||
2380 | break; | |||
2381 | } | |||
2382 | } | |||
2383 | ||||
2384 | if (Info.getStackPtrOffsetReg() == AMDGPU::SP_REG) | |||
2385 | report_fatal_error("failed to find register for SP"); | |||
2386 | } | |||
2387 | ||||
2388 | // hasFP should be accurate for entry functions even before the frame is | |||
2389 | // finalized, because it does not rely on the known stack size, only | |||
2390 | // properties like whether variable sized objects are present. | |||
2391 | if (ST.getFrameLowering()->hasFP(MF)) { | |||
2392 | Info.setFrameOffsetReg(AMDGPU::SGPR33); | |||
2393 | } | |||
2394 | } | |||
2395 | ||||
2396 | bool SITargetLowering::supportSplitCSR(MachineFunction *MF) const { | |||
2397 | const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>(); | |||
2398 | return !Info->isEntryFunction(); | |||
2399 | } | |||
2400 | ||||
2401 | void SITargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const { | |||
2402 | ||||
2403 | } | |||
2404 | ||||
2405 | void SITargetLowering::insertCopiesSplitCSR( | |||
2406 | MachineBasicBlock *Entry, | |||
2407 | const SmallVectorImpl<MachineBasicBlock *> &Exits) const { | |||
2408 | const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo(); | |||
2409 | ||||
2410 | const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(Entry->getParent()); | |||
2411 | if (!IStart) | |||
2412 | return; | |||
2413 | ||||
2414 | const TargetInstrInfo *TII = Subtarget->getInstrInfo(); | |||
2415 | MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo(); | |||
2416 | MachineBasicBlock::iterator MBBI = Entry->begin(); | |||
2417 | for (const MCPhysReg *I = IStart; *I; ++I) { | |||
2418 | const TargetRegisterClass *RC = nullptr; | |||
2419 | if (AMDGPU::SReg_64RegClass.contains(*I)) | |||
2420 | RC = &AMDGPU::SGPR_64RegClass; | |||
2421 | else if (AMDGPU::SReg_32RegClass.contains(*I)) | |||
2422 | RC = &AMDGPU::SGPR_32RegClass; | |||
2423 | else | |||
2424 | llvm_unreachable("Unexpected register class in CSRsViaCopy!")::llvm::llvm_unreachable_internal("Unexpected register class in CSRsViaCopy!" , "llvm/lib/Target/AMDGPU/SIISelLowering.cpp", 2424); | |||
2425 | ||||
2426 | Register NewVR = MRI->createVirtualRegister(RC); | |||
2427 | // Create copy from CSR to a virtual register. | |||
2428 | Entry->addLiveIn(*I); | |||
2429 | BuildMI(*Entry, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY), NewVR) | |||
2430 | .addReg(*I); | |||
2431 | ||||
2432 | // Insert the copy-back instructions right before the terminator. | |||
2433 | for (auto *Exit : Exits) | |||
2434 | BuildMI(*Exit, Exit->getFirstTerminator(), DebugLoc(), | |||
2435 | TII->get(TargetOpcode::COPY), *I) | |||
2436 | .addReg(NewVR); | |||
2437 | } | |||
2438 | } | |||
2439 | ||||
2440 | SDValue SITargetLowering::LowerFormalArguments( | |||
2441 | SDValue Chain, CallingConv::ID CallConv, bool isVarArg, | |||
2442 | const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL, | |||
2443 | SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { | |||
2444 | const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo(); | |||
2445 | ||||
2446 | MachineFunction &MF = DAG.getMachineFunction(); | |||
2447 | const Function &Fn = MF.getFunction(); | |||
2448 | FunctionType *FType = MF.getFunction().getFunctionType(); | |||
2449 | SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); | |||
2450 | ||||
2451 | if (Subtarget->isAmdHsaOS() && AMDGPU::isGraphics(CallConv)) { | |||
2452 | DiagnosticInfoUnsupported NoGraphicsHSA( | |||
2453 | Fn, "unsupported non-compute shaders with HSA", DL.getDebugLoc()); | |||
2454 | DAG.getContext()->diagnose(NoGraphicsHSA); | |||
2455 | return DAG.getEntryNode(); | |||
2456 | } | |||
2457 | ||||
2458 | Info->allocateModuleLDSGlobal(Fn.getParent()); | |||
2459 | ||||
2460 | SmallVector<ISD::InputArg, 16> Splits; | |||
2461 | SmallVector<CCValAssign, 16> ArgLocs; | |||
2462 | BitVector Skipped(Ins.size()); | |||
2463 | CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, | |||
2464 | *DAG.getContext()); | |||
2465 | ||||
2466 | bool IsGraphics = AMDGPU::isGraphics(CallConv); | |||
2467 | bool IsKernel = AMDGPU::isKernel(CallConv); | |||
2468 | bool IsEntryFunc = AMDGPU::isEntryFunctionCC(CallConv); | |||
2469 | ||||
2470 | if (IsGraphics) { | |||
2471 | assert(!Info->hasDispatchPtr() && !Info->hasKernargSegmentPtr() &&(static_cast <bool> (!Info->hasDispatchPtr() && !Info->hasKernargSegmentPtr() && (!Info->hasFlatScratchInit () || Subtarget->enableFlatScratch()) && !Info-> hasWorkGroupIDX() && !Info->hasWorkGroupIDY() && !Info->hasWorkGroupIDZ() && !Info->hasWorkGroupInfo () && !Info->hasWorkItemIDX() && !Info-> hasWorkItemIDY() && !Info->hasWorkItemIDZ()) ? void (0) : __assert_fail ("!Info->hasDispatchPtr() && !Info->hasKernargSegmentPtr() && (!Info->hasFlatScratchInit() || Subtarget->enableFlatScratch()) && !Info->hasWorkGroupIDX() && !Info->hasWorkGroupIDY() && !Info->hasWorkGroupIDZ() && !Info->hasWorkGroupInfo() && !Info->hasWorkItemIDX() && !Info->hasWorkItemIDY() && !Info->hasWorkItemIDZ()" , "llvm/lib/Target/AMDGPU/SIISelLowering.cpp", 2476, __extension__ __PRETTY_FUNCTION__)) | |||
2472 | (!Info->hasFlatScratchInit() || Subtarget->enableFlatScratch()) &&(static_cast <bool> (!Info->hasDispatchPtr() && !Info->hasKernargSegmentPtr() && (!Info->hasFlatScratchInit () || Subtarget->enableFlatScratch()) && !Info-> hasWorkGroupIDX() && !Info->hasWorkGroupIDY() && !Info->hasWorkGroupIDZ() && !Info->hasWorkGroupInfo () && !Info->hasWorkItemIDX() && !Info-> hasWorkItemIDY() && !Info->hasWorkItemIDZ()) ? void (0) : __assert_fail ("!Info->hasDispatchPtr() && !Info->hasKernargSegmentPtr() && (!Info->hasFlatScratchInit() || Subtarget->enableFlatScratch()) && !Info->hasWorkGroupIDX() && !Info->hasWorkGroupIDY() && !Info->hasWorkGroupIDZ() && !Info->hasWorkGroupInfo() && !Info->hasWorkItemIDX() && !Info->hasWorkItemIDY() && !Info->hasWorkItemIDZ()" , "llvm/lib/Target/AMDGPU/SIISelLowering.cpp", 2476, __extension__ __PRETTY_FUNCTION__)) | |||
2473 | !Info->hasWorkGroupIDX() && !Info->hasWorkGroupIDY() &&(static_cast <bool> (!Info->hasDispatchPtr() && !Info->hasKernargSegmentPtr() && (!Info->hasFlatScratchInit () || Subtarget->enableFlatScratch()) && !Info-> hasWorkGroupIDX() && !Info->hasWorkGroupIDY() && !Info->hasWorkGroupIDZ() && !Info->hasWorkGroupInfo () && !Info->hasWorkItemIDX() && !Info-> hasWorkItemIDY() && !Info->hasWorkItemIDZ()) ? void (0) : __assert_fail ("!Info->hasDispatchPtr() && !Info->hasKernargSegmentPtr() && (!Info->hasFlatScratchInit() || Subtarget->enableFlatScratch()) && !Info->hasWorkGroupIDX() && !Info->hasWorkGroupIDY() && !Info->hasWorkGroupIDZ() && !Info->hasWorkGroupInfo() && !Info->hasWorkItemIDX() && !Info->hasWorkItemIDY() && !Info->hasWorkItemIDZ()" , "llvm/lib/Target/AMDGPU/SIISelLowering.cpp", 2476, __extension__ __PRETTY_FUNCTION__)) | |||
2474 | !Info->hasWorkGroupIDZ() && !Info->hasWorkGroupInfo() &&(static_cast <bool> (!Info->hasDispatchPtr() && !Info->hasKernargSegmentPtr() && (!Info->hasFlatScratchInit () || Subtarget->enableFlatScratch()) && !Info-> hasWorkGroupIDX() && !Info->hasWorkGroupIDY() && !Info->hasWorkGroupIDZ() && !Info->hasWorkGroupInfo () && !Info->hasWorkItemIDX() && !Info-> hasWorkItemIDY() && !Info->hasWorkItemIDZ()) ? void (0) : __assert_fail ("!Info->hasDispatchPtr() && !Info->hasKernargSegmentPtr() && (!Info->hasFlatScratchInit() || Subtarget->enableFlatScratch()) && !Info->hasWorkGroupIDX() && !Info->hasWorkGroupIDY() && !Info->hasWorkGroupIDZ() && !Info->hasWorkGroupInfo() && !Info->hasWorkItemIDX() && !Info->hasWorkItemIDY() && !Info->hasWorkItemIDZ()" , "llvm/lib/Target/AMDGPU/SIISelLowering.cpp", 2476, __extension__ __PRETTY_FUNCTION__)) | |||
2475 | !Info->hasWorkItemIDX() && !Info->hasWorkItemIDY() &&(static_cast <bool> (!Info->hasDispatchPtr() && !Info->hasKernargSegmentPtr() && (!Info->hasFlatScratchInit () || Subtarget->enableFlatScratch()) && !Info-> hasWorkGroupIDX() && !Info->hasWorkGroupIDY() && !Info->hasWorkGroupIDZ() && !Info->hasWorkGroupInfo () && !Info->hasWorkItemIDX() && !Info-> hasWorkItemIDY() && !Info->hasWorkItemIDZ()) ? void (0) : __assert_fail ("!Info->hasDispatchPtr() && !Info->hasKernargSegmentPtr() && (!Info->hasFlatScratchInit() || Subtarget->enableFlatScratch()) && !Info->hasWorkGroupIDX() && !Info->hasWorkGroupIDY() && !Info->hasWorkGroupIDZ() && !Info->hasWorkGroupInfo() && !Info->hasWorkItemIDX() && !Info->hasWorkItemIDY() && !Info->hasWorkItemIDZ()" , "llvm/lib/Target/AMDGPU/SIISelLowering.cpp", 2476, __extension__ __PRETTY_FUNCTION__)) | |||
2476 | !Info->hasWorkItemIDZ())(static_cast <bool> (!Info->hasDispatchPtr() && !Info->hasKernargSegmentPtr() && (!Info->hasFlatScratchInit () || Subtarget->enableFlatScratch()) && !Info-> hasWorkGroupIDX() && !Info->hasWorkGroupIDY() && !Info->hasWorkGroupIDZ() && !Info->hasWorkGroupInfo () && !Info->hasWorkItemIDX() && !Info-> hasWorkItemIDY() && !Info->hasWorkItemIDZ()) ? void (0) : __assert_fail ("!Info->hasDispatchPtr() && !Info->hasKernargSegmentPtr() && (!Info->hasFlatScratchInit() || Subtarget->enableFlatScratch()) && !Info->hasWorkGroupIDX() && !Info->hasWorkGroupIDY() && !Info->hasWorkGroupIDZ() && !Info->hasWorkGroupInfo() && !Info->hasWorkItemIDX() && !Info->hasWorkItemIDY() && !Info->hasWorkItemIDZ()" , "llvm/lib/Target/AMDGPU/SIISelLowering.cpp", 2476, __extension__ __PRETTY_FUNCTION__)); | |||
2477 | } | |||
2478 | ||||
2479 | if (CallConv == CallingConv::AMDGPU_PS) { | |||
2480 | processPSInputArgs(Splits, CallConv, Ins, Skipped, FType, Info); | |||
2481 | ||||
2482 | // At least one interpolation mode must be enabled or else the GPU will | |||
2483 | // hang. | |||
2484 | // | |||
2485 | // Check PSInputAddr instead of PSInputEnable. The idea is that if the user | |||
2486 | // set PSInputAddr, the user wants to enable some bits after the compilation | |||
2487 | // based on run-time states. Since we can't know what the final PSInputEna | |||
2488 | // will look like, so we shouldn't do anything here and the user should take | |||
2489 | // responsibility for the correct programming. | |||
2490 | // | |||
2491 | // Otherwise, the following restrictions apply: | |||
2492 | // - At least one of PERSP_* (0xF) or LINEAR_* (0x70) must be enabled. | |||
2493 | // - If POS_W_FLOAT (11) is enabled, at least one of PERSP_* must be | |||
2494 | // enabled too. | |||
2495 | if ((Info->getPSInputAddr() & 0x7F) == 0 || | |||
2496 | ((Info->getPSInputAddr() & 0xF) == 0 && Info->isPSInputAllocated(11))) { | |||
2497 | CCInfo.AllocateReg(AMDGPU::VGPR0); | |||
2498 | CCInfo.AllocateReg(AMDGPU::VGPR1); | |||
2499 | Info->markPSInputAllocated(0); | |||
2500 | Info->markPSInputEnabled(0); | |||
2501 | } | |||
2502 | if (Subtarget->isAmdPalOS()) { | |||
2503 | // For isAmdPalOS, the user does not enable some bits after compilation | |||
2504 | // based on run-time states; the register values being generated here are | |||
2505 | // the final ones set in hardware. Therefore we need to apply the | |||
2506 | // workaround to PSInputAddr and PSInputEnable together. (The case where | |||
2507 | // a bit is set in PSInputAddr but not PSInputEnable is where the | |||
2508 | // frontend set up an input arg for a particular interpolation mode, but | |||
2509 | // nothing uses that input arg. Really we should have an earlier pass | |||
2510 | // that removes such an arg.) | |||
2511 | unsigned PsInputBits = Info->getPSInputAddr() & Info->getPSInputEnable(); | |||
2512 | if ((PsInputBits & 0x7F) == 0 || | |||
2513 | ((PsInputBits & 0xF) == 0 && (PsInputBits >> 11 & 1))) | |||
2514 | Info->markPSInputEnabled( | |||
2515 | countTrailingZeros(Info->getPSInputAddr(), ZB_Undefined)); | |||
2516 | } | |||
2517 | } else if (IsKernel) { | |||
2518 | assert(Info->hasWorkGroupIDX() && Info->hasWorkItemIDX())(static_cast <bool> (Info->hasWorkGroupIDX() && Info->hasWorkItemIDX()) ? void (0) : __assert_fail ("Info->hasWorkGroupIDX() && Info->hasWorkItemIDX()" , "llvm/lib/Target/AMDGPU/SIISelLowering.cpp", 2518, __extension__ __PRETTY_FUNCTION__)); | |||
2519 | } else { | |||
2520 | Splits.append(Ins.begin(), Ins.end()); | |||
2521 | } | |||
2522 | ||||
2523 | if (IsEntryFunc) { | |||
2524 | allocateSpecialEntryInputVGPRs(CCInfo, MF, *TRI, *Info); | |||
2525 | allocateHSAUserSGPRs(CCInfo, MF, *TRI, *Info); | |||
2526 | } else if (!IsGraphics) { | |||
2527 | // For the fixed ABI, pass workitem IDs in the last argument register. | |||
2528 | allocateSpecialInputVGPRsFixed(CCInfo, MF, *TRI, *Info); | |||
2529 | } | |||
2530 | ||||
2531 | if (IsKernel) { | |||
2532 | analyzeFormalArgumentsCompute(CCInfo, Ins); | |||
2533 | } else { | |||
2534 | CCAssignFn *AssignFn = CCAssignFnForCall(CallConv, isVarArg); | |||
2535 | CCInfo.AnalyzeFormalArguments(Splits, AssignFn); | |||
2536 | } | |||
2537 | ||||
2538 | SmallVector<SDValue, 16> Chains; | |||
2539 | ||||
2540 | // FIXME: This is the minimum kernel argument alignment. We should improve | |||
2541 | // this to the maximum alignment of the arguments. | |||
2542 | // | |||
2543 | // FIXME: Alignment of explicit arguments totally broken with non-0 explicit | |||
2544 | // kern arg offset. | |||
2545 | const Align KernelArgBaseAlign = Align(16); | |||
2546 | ||||
2547 | for (unsigned i = 0, e = Ins.size(), ArgIdx = 0; i != e; ++i) { | |||
2548 | const ISD::InputArg &Arg = Ins[i]; | |||
2549 | if (Arg.isOrigArg() && Skipped[Arg.getOrigArgIndex()]) { | |||
2550 | InVals.push_back(DAG.getUNDEF(Arg.VT)); | |||
2551 | continue; | |||
2552 | } | |||
2553 | ||||
2554 | CCValAssign &VA = ArgLocs[ArgIdx++]; | |||
2555 | MVT VT = VA.getLocVT(); | |||
2556 | ||||
2557 | if (IsEntryFunc && VA.isMemLoc()) { | |||
2558 | VT = Ins[i].VT; | |||
2559 | EVT MemVT = VA.getLocVT(); | |||
2560 | ||||
2561 | const uint64_t Offset = VA.getLocMemOffset(); | |||
2562 | Align Alignment = commonAlignment(KernelArgBaseAlign, Offset); | |||
2563 | ||||
2564 | if (Arg.Flags.isByRef()) { | |||
2565 | SDValue Ptr = lowerKernArgParameterPtr(DAG, DL, Chain, Offset); | |||
2566 | ||||
2567 | const GCNTargetMachine &TM = | |||
2568 | static_cast<const GCNTargetMachine &>(getTargetMachine()); | |||
2569 | if (!TM.isNoopAddrSpaceCast(AMDGPUAS::CONSTANT_ADDRESS, | |||
2570 | Arg.Flags.getPointerAddrSpace())) { | |||
2571 | Ptr = DAG.getAddrSpaceCast(DL, VT, Ptr, AMDGPUAS::CONSTANT_ADDRESS, | |||
2572 | Arg.Flags.getPointerAddrSpace()); | |||
2573 | } | |||
2574 | ||||
2575 | InVals.push_back(Ptr); | |||
2576 | continue; | |||
2577 | } | |||
2578 | ||||
2579 | SDValue Arg = lowerKernargMemParameter( | |||
2580 | DAG, VT, MemVT, DL, Chain, Offset, Alignment, Ins[i].Flags.isSExt(), &Ins[i]); | |||
2581 | Chains.push_back(Arg.getValue(1)); | |||
2582 | ||||
2583 | auto *ParamTy = | |||
2584 | dyn_cast<PointerType>(FType->getParamType(Ins[i].getOrigArgIndex())); | |||
2585 | if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS && | |||
2586 | ParamTy && (ParamTy->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS || | |||
2587 | ParamTy->getAddressSpace() == AMDGPUAS::REGION_ADDRESS)) { | |||
2588 | // On SI local pointers are just offsets into LDS, so they are always | |||
2589 | // less than 16-bits. On CI and newer they could potentially be | |||
2590 | // real pointers, so we can't guarantee their size. | |||
2591 | Arg = DAG.getNode(ISD::AssertZext, DL, Arg.getValueType(), Arg, | |||
2592 | DAG.getValueType(MVT::i16)); | |||
2593 | } | |||
2594 | ||||
2595 | InVals.push_back(Arg); | |||
2596 | continue; | |||
2597 | } else if (!IsEntryFunc && VA.isMemLoc()) { | |||
2598 | SDValue Val = lowerStackParameter(DAG, VA, DL, Chain, Arg); | |||
2599 | InVals.push_back(Val); | |||
2600 | if (!Arg.Flags.isByVal()) | |||
2601 | Chains.push_back(Val.getValue(1)); | |||
2602 | continue; | |||
2603 | } | |||
2604 | ||||
2605 | assert(VA.isRegLoc() && "Parameter must be in a register!")(static_cast <bool> (VA.isRegLoc() && "Parameter must be in a register!" ) ? void (0) : __assert_fail ("VA.isRegLoc() && \"Parameter must be in a register!\"" , "llvm/lib/Target/AMDGPU/SIISelLowering.cpp", 2605, __extension__ __PRETTY_FUNCTION__)); | |||
2606 | ||||
2607 | Register Reg = VA.getLocReg(); | |||
2608 | const TargetRegisterClass *RC = nullptr; | |||
2609 | if (AMDGPU::VGPR_32RegClass.contains(Reg)) | |||
2610 | RC = &AMDGPU::VGPR_32RegClass; | |||
2611 | else if (AMDGPU::SGPR_32RegClass.contains(Reg)) | |||
2612 | RC = &AMDGPU::SGPR_32RegClass; | |||
2613 | else | |||
2614 | llvm_unreachable("Unexpected register class in LowerFormalArguments!")::llvm::llvm_unreachable_internal("Unexpected register class in LowerFormalArguments!" , "llvm/lib/Target/AMDGPU/SIISelLowering.cpp", 2614); | |||
2615 | EVT ValVT = VA.getValVT(); | |||
2616 | ||||
2617 | Reg = MF.addLiveIn(Reg, RC); | |||
2618 | SDValue Val = DAG.getCopyFromReg(Chain, DL, Reg, VT); | |||
2619 | ||||
2620 | if (Arg.Flags.isSRet()) { | |||
2621 | // The return object should be reasonably addressable. | |||
2622 | ||||
2623 | // FIXME: This helps when the return is a real sret. If it is a | |||
2624 | // automatically inserted sret (i.e. CanLowerReturn returns false), an | |||
2625 | // extra copy is inserted in SelectionDAGBuilder which obscures this. | |||
2626 | unsigned NumBits | |||
2627 | = 32 - getSubtarget()->getKnownHighZeroBitsForFrameIndex(); | |||
2628 | Val = DAG.getNode(ISD::AssertZext, DL, VT, Val, | |||
2629 | DAG.getValueType(EVT::getIntegerVT(*DAG.getContext(), NumBits))); | |||
2630 | } | |||
2631 | ||||
2632 | // If this is an 8 or 16-bit value, it is really passed promoted | |||
2633 | // to 32 bits. Insert an assert[sz]ext to capture this, then | |||
2634 | // truncate to the right size. | |||
2635 | switch (VA.getLocInfo()) { | |||
2636 | case CCValAssign::Full: | |||
2637 | break; | |||
2638 | case CCValAssign::BCvt: | |||
2639 | Val = DAG.getNode(ISD::BITCAST, DL, ValVT, Val); | |||
2640 | break; | |||
2641 | case CCValAssign::SExt: | |||
2642 | Val = DAG.getNode(ISD::AssertSext, DL, VT, Val, | |||
2643 | DAG.getValueType(ValVT)); | |||
2644 | Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val); | |||
2645 | break; | |||
2646 | case CCValAssign::ZExt: | |||
2647 | Val = DAG.getNode(ISD::AssertZext, DL, VT, Val, | |||
2648 | DAG.getValueType(ValVT)); | |||
2649 | Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val); | |||
2650 | break; | |||
2651 | case CCValAssign::AExt: | |||
2652 | Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val); | |||
2653 | break; | |||
2654 | default: | |||
2655 | llvm_unreachable("Unknown loc info!")::llvm::llvm_unreachable_internal("Unknown loc info!", "llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 2655); | |||
2656 | } | |||
2657 | ||||
2658 | InVals.push_back(Val); | |||
2659 | } | |||
2660 | ||||
2661 | // Start adding system SGPRs. | |||
2662 | if (IsEntryFunc) { | |||
2663 | allocateSystemSGPRs(CCInfo, MF, *Info, CallConv, IsGraphics); | |||
2664 | } else { | |||
2665 | CCInfo.AllocateReg(Info->getScratchRSrcReg()); | |||
2666 | if (!IsGraphics) | |||
2667 | allocateSpecialInputSGPRs(CCInfo, MF, *TRI, *Info); | |||
2668 | } | |||
2669 | ||||
2670 | auto &ArgUsageInfo = | |||
2671 | DAG.getPass()->getAnalysis<AMDGPUArgumentUsageInfo>(); | |||
2672 | ArgUsageInfo.setFuncArgInfo(Fn, Info->getArgInfo()); | |||
2673 | ||||
2674 | unsigned StackArgSize = CCInfo.getNextStackOffset(); | |||
2675 | Info->setBytesInStackArgArea(StackArgSize); | |||
2676 | ||||
2677 | return Chains.empty() ? Chain : | |||
2678 | DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains); | |||
2679 | } | |||
2680 | ||||
2681 | // TODO: If return values can't fit in registers, we should return as many as | |||
2682 | // possible in registers before passing on stack. | |||
2683 | bool SITargetLowering::CanLowerReturn( | |||
2684 | CallingConv::ID CallConv, | |||
2685 | MachineFunction &MF, bool IsVarArg, | |||
2686 | const SmallVectorImpl<ISD::OutputArg> &Outs, | |||
2687 | LLVMContext &Context) const { | |||
2688 | // Replacing returns with sret/stack usage doesn't make sense for shaders. | |||
2689 | // FIXME: Also sort of a workaround for custom vector splitting in LowerReturn | |||
2690 | // for shaders. Vector types should be explicitly handled by CC. | |||
2691 | if (AMDGPU::isEntryFunctionCC(CallConv)) | |||
2692 | return true; | |||
2693 | ||||
2694 | SmallVector<CCValAssign, 16> RVLocs; | |||
2695 | CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context); | |||
2696 | return CCInfo.CheckReturn(Outs, CCAssignFnForReturn(CallConv, IsVarArg)); | |||
2697 | } | |||
2698 | ||||
2699 | SDValue | |||
2700 | SITargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, | |||
2701 | bool isVarArg, | |||
2702 | const SmallVectorImpl<ISD::OutputArg> &Outs, | |||
2703 | const SmallVectorImpl<SDValue> &OutVals, | |||
2704 | const SDLoc &DL, SelectionDAG &DAG) const { | |||
2705 | MachineFunction &MF = DAG.getMachineFunction(); | |||
2706 | SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); | |||
2707 | ||||
2708 | if (AMDGPU::isKernel(CallConv)) { | |||
2709 | return AMDGPUTargetLowering::LowerReturn(Chain, CallConv, isVarArg, Outs, | |||
2710 | OutVals, DL, DAG); | |||
2711 | } | |||
2712 | ||||
2713 | bool IsShader = AMDGPU::isShader(CallConv); | |||
2714 | ||||
2715 | Info->setIfReturnsVoid(Outs.empty()); | |||
2716 | bool IsWaveEnd = Info->returnsVoid() && IsShader; | |||
2717 | ||||
2718 | // CCValAssign - represent the assignment of the return value to a location. | |||
2719 | SmallVector<CCValAssign, 48> RVLocs; | |||
2720 | SmallVector<ISD::OutputArg, 48> Splits; | |||
2721 | ||||
2722 | // CCState - Info about the registers and stack slots. | |||
2723 | CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, | |||
2724 | *DAG.getContext()); | |||
2725 | ||||
2726 | // Analyze outgoing return values. | |||
2727 | CCInfo.AnalyzeReturn(Outs, CCAssignFnForReturn(CallConv, isVarArg)); | |||
2728 | ||||
2729 | SDValue Flag; | |||
2730 | SmallVector<SDValue, 48> RetOps; | |||
2731 | RetOps.push_back(Chain); // Operand #0 = Chain (updated below) | |||
2732 | ||||
2733 | // Copy the result values into the output registers. | |||
2734 | for (unsigned I = 0, RealRVLocIdx = 0, E = RVLocs.size(); I != E; | |||
2735 | ++I, ++RealRVLocIdx) { | |||
2736 | CCValAssign &VA = RVLocs[I]; | |||
2737 | assert(VA.isRegLoc() && "Can only return in registers!")(static_cast <bool> (VA.isRegLoc() && "Can only return in registers!" ) ? void (0) : __assert_fail ("VA.isRegLoc() && \"Can only return in registers!\"" , "llvm/lib/Target/AMDGPU/SIISelLowering.cpp", 2737, __extension__ __PRETTY_FUNCTION__)); | |||
2738 | // TODO: Partially return in registers if return values don't fit. | |||
2739 | SDValue Arg = OutVals[RealRVLocIdx]; | |||
2740 | ||||
2741 | // Copied from other backends. | |||
2742 | switch (VA.getLocInfo()) { | |||
2743 | case CCValAssign::Full: | |||
2744 | break; | |||
2745 | case CCValAssign::BCvt: | |||
2746 | Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg); | |||
2747 | break; | |||
2748 | case CCValAssign::SExt: | |||
2749 | Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg); | |||
2750 | break; | |||
2751 | case CCValAssign::ZExt: | |||
2752 | Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg); | |||
2753 | break; | |||
2754 | case CCValAssign::AExt: | |||
2755 | Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg); | |||
2756 | break; | |||
2757 | default: | |||
2758 | llvm_unreachable("Unknown loc info!")::llvm::llvm_unreachable_internal("Unknown loc info!", "llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 2758); | |||
2759 | } | |||
2760 | ||||
2761 | Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Arg, Flag); | |||
2762 | Flag = Chain.getValue(1); | |||
2763 | RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); | |||
2764 | } | |||
2765 | ||||
2766 | // FIXME: Does sret work properly? | |||
2767 | if (!Info->isEntryFunction()) { | |||
2768 | const SIRegisterInfo *TRI = Subtarget->getRegisterInfo(); | |||
2769 | const MCPhysReg *I = | |||
2770 | TRI->getCalleeSavedRegsViaCopy(&DAG.getMachineFunction()); | |||
2771 | if (I) { | |||
2772 | for (; *I; ++I) { | |||
2773 | if (AMDGPU::SReg_64RegClass.contains(*I)) | |||
2774 | RetOps.push_back(DAG.getRegister(*I, MVT::i64)); | |||
2775 | else if (AMDGPU::SReg_32RegClass.contains(*I)) | |||
2776 | RetOps.push_back(DAG.getRegister(*I, MVT::i32)); | |||
2777 | else | |||
2778 | llvm_unreachable("Unexpected register class in CSRsViaCopy!")::llvm::llvm_unreachable_internal("Unexpected register class in CSRsViaCopy!" , "llvm/lib/Target/AMDGPU/SIISelLowering.cpp", 2778); | |||
2779 | } | |||
2780 | } | |||
2781 | } | |||
2782 | ||||
2783 | // Update chain and glue. | |||
2784 | RetOps[0] = Chain; | |||
2785 | if (Flag.getNode()) | |||
2786 | RetOps.push_back(Flag); | |||
2787 | ||||
2788 | unsigned Opc = AMDGPUISD::ENDPGM; | |||
2789 | if (!IsWaveEnd) | |||
2790 | Opc = IsShader ? AMDGPUISD::RETURN_TO_EPILOG : AMDGPUISD::RET_FLAG; | |||
2791 | return DAG.getNode(Opc, DL, MVT::Other, RetOps); | |||
2792 | } | |||
2793 | ||||
2794 | SDValue SITargetLowering::LowerCallResult( | |||
2795 | SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool IsVarArg, | |||
2796 | const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL, | |||
2797 | SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, bool IsThisReturn, | |||
2798 | SDValue ThisVal) const { | |||
2799 | CCAssignFn *RetCC = CCAssignFnForReturn(CallConv, IsVarArg); | |||
2800 | ||||
2801 | // Assign locations to each value returned by this call. | |||
2802 | SmallVector<CCValAssign, 16> RVLocs; | |||
2803 | CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs, | |||
2804 | *DAG.getContext()); | |||
2805 | CCInfo.AnalyzeCallResult(Ins, RetCC); | |||
2806 | ||||
2807 | // Copy all of the result registers out of their specified physreg. | |||
2808 | for (unsigned i = 0; i != RVLocs.size(); ++i) { | |||
2809 | CCValAssign VA = RVLocs[i]; | |||
2810 | SDValue Val; | |||
2811 | ||||
2812 | if (VA.isRegLoc()) { | |||
2813 | Val = DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), VA.getLocVT(), InFlag); | |||
2814 | Chain = Val.getValue(1); | |||
2815 | InFlag = Val.getValue(2); | |||
2816 | } else if (VA.isMemLoc()) { | |||
2817 | report_fatal_error("TODO: return values in memory"); | |||
2818 | } else | |||
2819 | llvm_unreachable("unknown argument location type")::llvm::llvm_unreachable_internal("unknown argument location type" , "llvm/lib/Target/AMDGPU/SIISelLowering.cpp", 2819); | |||
2820 | ||||
2821 | switch (VA.getLocInfo()) { | |||
2822 | case CCValAssign::Full: | |||
2823 | break; | |||
2824 | case CCValAssign::BCvt: | |||
2825 | Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val); | |||
2826 | break; | |||
2827 | case CCValAssign::ZExt: | |||
2828 | Val = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), Val, | |||
2829 | DAG.getValueType(VA.getValVT())); | |||
2830 | Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val); | |||
2831 | break; | |||
2832 | case CCValAssign::SExt: | |||
2833 | Val = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), Val, | |||
2834 | DAG.getValueType(VA.getValVT())); | |||
2835 | Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val); | |||
2836 | break; | |||
2837 | case CCValAssign::AExt: | |||
2838 | Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val); | |||
2839 | break; | |||
2840 | default: | |||
2841 | llvm_unreachable("Unknown loc info!")::llvm::llvm_unreachable_internal("Unknown loc info!", "llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 2841); | |||
2842 | } | |||
2843 | ||||
2844 | InVals.push_back(Val); | |||
2845 | } | |||
2846 | ||||
2847 | return Chain; | |||
2848 | } | |||
2849 | ||||
2850 | // Add code to pass special inputs required depending on used features separate | |||
2851 | // from the explicit user arguments present in the IR. | |||
2852 | void SITargetLowering::passSpecialInputs( | |||
2853 | CallLoweringInfo &CLI, | |||
2854 | CCState &CCInfo, | |||
2855 | const SIMachineFunctionInfo &Info, | |||
2856 | SmallVectorImpl<std::pair<unsigned, SDValue>> &RegsToPass, | |||
2857 | SmallVectorImpl<SDValue> &MemOpChains, | |||
2858 | SDValue Chain) const { | |||
2859 | // If we don't have a call site, this was a call inserted by | |||
2860 | // legalization. These can never use special inputs. | |||
2861 | if (!CLI.CB) | |||
2862 | return; | |||
2863 | ||||
2864 | SelectionDAG &DAG = CLI.DAG; | |||
2865 | const SDLoc &DL = CLI.DL; | |||
2866 | const Function &F = DAG.getMachineFunction().getFunction(); | |||
2867 | ||||
2868 | const SIRegisterInfo *TRI = Subtarget->getRegisterInfo(); | |||
2869 | const AMDGPUFunctionArgInfo &CallerArgInfo = Info.getArgInfo(); | |||
2870 | ||||
2871 | const AMDGPUFunctionArgInfo *CalleeArgInfo | |||
2872 | = &AMDGPUArgumentUsageInfo::FixedABIFunctionInfo; | |||
2873 | if (const Function *CalleeFunc = CLI.CB->getCalledFunction()) { | |||
2874 | auto &ArgUsageInfo = | |||
2875 | DAG.getPass()->getAnalysis<AMDGPUArgumentUsageInfo>(); | |||
2876 | CalleeArgInfo = &ArgUsageInfo.lookupFuncArgInfo(*CalleeFunc); | |||
2877 | } | |||
2878 | ||||
2879 | // TODO: Unify with private memory register handling. This is complicated by | |||
2880 | // the fact that at least in kernels, the input argument is not necessarily | |||
2881 | // in the same location as the input. | |||
2882 | static constexpr std::pair<AMDGPUFunctionArgInfo::PreloadedValue, | |||
2883 | StringLiteral> ImplicitAttrs[] = { | |||
2884 | {AMDGPUFunctionArgInfo::DISPATCH_PTR, "amdgpu-no-dispatch-ptr"}, | |||
2885 | {AMDGPUFunctionArgInfo::QUEUE_PTR, "amdgpu-no-queue-ptr" }, | |||
2886 | {AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR, "amdgpu-no-implicitarg-ptr"}, | |||
2887 | {AMDGPUFunctionArgInfo::DISPATCH_ID, "amdgpu-no-dispatch-id"}, | |||
2888 | {AMDGPUFunctionArgInfo::WORKGROUP_ID_X, "amdgpu-no-workgroup-id-x"}, | |||
2889 | {AMDGPUFunctionArgInfo::WORKGROUP_ID_Y,"amdgpu-no-workgroup-id-y"}, | |||
2890 | {AMDGPUFunctionArgInfo::WORKGROUP_ID_Z,"amdgpu-no-workgroup-id-z"} | |||
2891 | }; | |||
2892 | ||||
2893 | for (auto Attr : ImplicitAttrs) { | |||
2894 | const ArgDescriptor *OutgoingArg; | |||
2895 | const TargetRegisterClass *ArgRC; | |||
2896 | LLT ArgTy; | |||
2897 | ||||
2898 | AMDGPUFunctionArgInfo::PreloadedValue InputID = Attr.first; | |||
2899 | ||||
2900 | // If the callee does not use the attribute value, skip copying the value. | |||
2901 | if (CLI.CB->hasFnAttr(Attr.second)) | |||
2902 | continue; | |||
2903 | ||||
2904 | std::tie(OutgoingArg, ArgRC, ArgTy) = | |||
2905 | CalleeArgInfo->getPreloadedValue(InputID); | |||
2906 | if (!OutgoingArg) | |||
2907 | continue; | |||
2908 | ||||
2909 | const ArgDescriptor *IncomingArg; | |||
2910 | const TargetRegisterClass *IncomingArgRC; | |||
2911 | LLT Ty; | |||
2912 | std::tie(IncomingArg, IncomingArgRC, Ty) = | |||
2913 | CallerArgInfo.getPreloadedValue(InputID); | |||
2914 | assert(IncomingArgRC == ArgRC)(static_cast <bool> (IncomingArgRC == ArgRC) ? void (0) : __assert_fail ("IncomingArgRC == ArgRC", "llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 2914, __extension__ __PRETTY_FUNCTION__)); | |||
2915 | ||||
2916 | // All special arguments are ints for now. | |||
2917 | EVT ArgVT = TRI->getSpillSize(*ArgRC) == 8 ? MVT::i64 : MVT::i32; | |||
2918 | SDValue InputReg; | |||
2919 | ||||
2920 | if (IncomingArg) { | |||
2921 | InputReg = loadInputValue(DAG, ArgRC, ArgVT, DL, *IncomingArg); | |||
2922 | } else if (InputID == AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR) { | |||
2923 | // The implicit arg ptr is special because it doesn't have a corresponding | |||
2924 | // input for kernels, and is computed from the kernarg segment pointer. | |||
2925 | InputReg = getImplicitArgPtr(DAG, DL); | |||
2926 | } else { | |||
2927 | // We may have proven the input wasn't needed, although the ABI is | |||
2928 | // requiring it. We just need to allocate the register appropriately. | |||
2929 | InputReg = DAG.getUNDEF(ArgVT); | |||
2930 | } | |||
2931 | ||||
2932 | if (OutgoingArg->isRegister()) { | |||
2933 | RegsToPass.emplace_back(OutgoingArg->getRegister(), InputReg); | |||
2934 | if (!CCInfo.AllocateReg(OutgoingArg->getRegister())) | |||
2935 | report_fatal_error("failed to allocate implicit input argument"); | |||
2936 | } else { | |||
2937 | unsigned SpecialArgOffset = | |||
2938 | CCInfo.AllocateStack(ArgVT.getStoreSize(), Align(4)); | |||
2939 | SDValue ArgStore = storeStackInputValue(DAG, DL, Chain, InputReg, | |||
2940 | SpecialArgOffset); | |||
2941 | MemOpChains.push_back(ArgStore); | |||
2942 | } | |||
2943 | } | |||
2944 | ||||
2945 | // Pack workitem IDs into a single register or pass it as is if already | |||
2946 | // packed. | |||
2947 | const ArgDescriptor *OutgoingArg; | |||
2948 | const TargetRegisterClass *ArgRC; | |||
2949 | LLT Ty; | |||
2950 | ||||
2951 | std::tie(OutgoingArg, ArgRC, Ty) = | |||
2952 | CalleeArgInfo->getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_X); | |||
2953 | if (!OutgoingArg) | |||
2954 | std::tie(OutgoingArg, ArgRC, Ty) = | |||
2955 | CalleeArgInfo->getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Y); | |||
2956 | if (!OutgoingArg) | |||
2957 | std::tie(OutgoingArg, ArgRC, Ty) = | |||
2958 | CalleeArgInfo->getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Z); | |||
2959 | if (!OutgoingArg) | |||
2960 | return; | |||
2961 | ||||
2962 | const ArgDescriptor *IncomingArgX = std::get<0>( | |||
2963 | CallerArgInfo.getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_X)); | |||
2964 | const ArgDescriptor *IncomingArgY = std::get<0>( | |||
2965 | CallerArgInfo.getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Y)); | |||
2966 | const ArgDescriptor *IncomingArgZ = std::get<0>( | |||
2967 | CallerArgInfo.getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Z)); | |||
2968 | ||||
2969 | SDValue InputReg; | |||
2970 | SDLoc SL; | |||
2971 | ||||
2972 | const bool NeedWorkItemIDX = !CLI.CB->hasFnAttr("amdgpu-no-workitem-id-x"); | |||
2973 | const bool NeedWorkItemIDY = !CLI.CB->hasFnAttr("amdgpu-no-workitem-id-y"); | |||
2974 | const bool NeedWorkItemIDZ = !CLI.CB->hasFnAttr("amdgpu-no-workitem-id-z"); | |||
2975 | ||||
2976 | // If incoming ids are not packed we need to pack them. | |||
2977 | if (IncomingArgX && !IncomingArgX->isMasked() && CalleeArgInfo->WorkItemIDX && | |||
2978 | NeedWorkItemIDX) { | |||
2979 | if (Subtarget->getMaxWorkitemID(F, 0) != 0) { | |||
2980 | InputReg = loadInputValue(DAG, ArgRC, MVT::i32, DL, *IncomingArgX); | |||
2981 | } else { | |||
2982 | InputReg = DAG.getConstant(0, DL, MVT::i32); | |||
2983 | } | |||
2984 | } | |||
2985 | ||||
2986 | if (IncomingArgY && !IncomingArgY->isMasked() && CalleeArgInfo->WorkItemIDY && | |||
2987 | NeedWorkItemIDY && Subtarget->getMaxWorkitemID(F, 1) != 0) { | |||
2988 | SDValue Y = loadInputValue(DAG, ArgRC, MVT::i32, DL, *IncomingArgY); | |||
2989 | Y = DAG.getNode(ISD::SHL, SL, MVT::i32, Y, | |||
2990 | DAG.getShiftAmountConstant(10, MVT::i32, SL)); | |||
2991 | InputReg = InputReg.getNode() ? | |||
2992 | DAG.getNode(ISD::OR, SL, MVT::i32, InputReg, Y) : Y; | |||
2993 | } | |||
2994 | ||||
2995 | if (IncomingArgZ && !IncomingArgZ->isMasked() && CalleeArgInfo->WorkItemIDZ && | |||
2996 | NeedWorkItemIDZ && Subtarget->getMaxWorkitemID(F, 2) != 0) { | |||
2997 | SDValue Z = loadInputValue(DAG, ArgRC, MVT::i32, DL, *IncomingArgZ); | |||
2998 | Z = DAG.getNode(ISD::SHL, SL, MVT::i32, Z, | |||
2999 | DAG.getShiftAmountConstant(20, MVT::i32, SL)); | |||
3000 | InputReg = InputReg.getNode() ? | |||
3001 | DAG.getNode(ISD::OR, SL, MVT::i32, InputReg, Z) : Z; | |||
3002 | } | |||
3003 | ||||
3004 | if (!InputReg && (NeedWorkItemIDX || NeedWorkItemIDY || NeedWorkItemIDZ)) { | |||
3005 | if (!IncomingArgX && !IncomingArgY && !IncomingArgZ) { | |||
3006 | // We're in a situation where the outgoing function requires the workitem | |||
3007 | // ID, but the calling function does not have it (e.g a graphics function | |||
3008 | // calling a C calling convention function). This is illegal, but we need | |||
3009 | // to produce something. | |||
3010 | InputReg = DAG.getUNDEF(MVT::i32); | |||
3011 | } else { | |||
3012 | // Workitem ids are already packed, any of present incoming arguments | |||
3013 | // will carry all required fields. | |||
3014 | ArgDescriptor IncomingArg = ArgDescriptor::createArg( | |||
3015 | IncomingArgX ? *IncomingArgX : | |||
3016 | IncomingArgY ? *IncomingArgY : | |||
3017 | *IncomingArgZ, ~0u); | |||
3018 | InputReg = loadInputValue(DAG, ArgRC, MVT::i32, DL, IncomingArg); | |||
3019 | } | |||
3020 | } | |||
3021 | ||||
3022 | if (OutgoingArg->isRegister()) { | |||
3023 | if (InputReg) | |||
3024 | RegsToPass.emplace_back(OutgoingArg->getRegister(), InputReg); | |||
3025 | ||||
3026 | CCInfo.AllocateReg(OutgoingArg->getRegister()); | |||
3027 | } else { | |||
3028 | unsigned SpecialArgOffset = CCInfo.AllocateStack(4, Align(4)); | |||
3029 | if (InputReg) { | |||
3030 | SDValue ArgStore = storeStackInputValue(DAG, DL, Chain, InputReg, | |||
3031 | SpecialArgOffset); | |||
3032 | MemOpChains.push_back(ArgStore); | |||
3033 | } | |||
3034 | } | |||
3035 | } | |||
3036 | ||||
3037 | static bool canGuaranteeTCO(CallingConv::ID CC) { | |||
3038 | return CC == CallingConv::Fast; | |||
3039 | } | |||
3040 | ||||
3041 | /// Return true if we might ever do TCO for calls with this calling convention. | |||
3042 | static bool mayTailCallThisCC(CallingConv::ID CC) { | |||
3043 | switch (CC) { | |||
3044 | case CallingConv::C: | |||
3045 | case CallingConv::AMDGPU_Gfx: | |||
3046 | return true; | |||
3047 | default: | |||
3048 | return canGuaranteeTCO(CC); | |||
3049 | } | |||
3050 | } | |||
3051 | ||||
3052 | bool SITargetLowering::isEligibleForTailCallOptimization( | |||
3053 | SDValue Callee, CallingConv::ID CalleeCC, bool IsVarArg, | |||
3054 | const SmallVectorImpl<ISD::OutputArg> &Outs, | |||
3055 | const SmallVectorImpl<SDValue> &OutVals, | |||
3056 | const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const { | |||
3057 | if (!mayTailCallThisCC(CalleeCC)) | |||
3058 | return false; | |||
3059 | ||||
3060 | // For a divergent call target, we need to do a waterfall loop over the | |||
3061 | // possible callees which precludes us from using a simple jump. | |||
3062 | if (Callee->isDivergent()) | |||
3063 | return false; | |||
3064 | ||||
3065 | MachineFunction &MF = DAG.getMachineFunction(); | |||
3066 | const Function &CallerF = MF.getFunction(); | |||
3067 | CallingConv::ID CallerCC = CallerF.getCallingConv(); | |||
3068 | const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo(); | |||
3069 | const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC); | |||
3070 | ||||
3071 | // Kernels aren't callable, and don't have a live in return address so it | |||
3072 | // doesn't make sense to do a tail call with entry functions. | |||
3073 | if (!CallerPreserved) | |||
3074 | return false; | |||
3075 | ||||
3076 | bool CCMatch = CallerCC == CalleeCC; | |||
3077 | ||||
3078 | if (DAG.getTarget().Options.GuaranteedTailCallOpt) { | |||
3079 | if (canGuaranteeTCO(CalleeCC) && CCMatch) | |||
3080 | return true; | |||
3081 | return false; | |||
3082 | } | |||
3083 | ||||
3084 | // TODO: Can we handle var args? | |||
3085 | if (IsVarArg) | |||
3086 | return false; | |||
3087 | ||||
3088 | for (const Argument &Arg : CallerF.args()) { | |||
3089 | if (Arg.hasByValAttr()) | |||
3090 | return false; | |||
3091 | } | |||
3092 | ||||
3093 | LLVMContext &Ctx = *DAG.getContext(); | |||
3094 | ||||
3095 | // Check that the call results are passed in the same way. | |||
3096 | if (!CCState::resultsCompatible(CalleeCC, CallerCC, MF, Ctx, Ins, | |||
3097 | CCAssignFnForCall(CalleeCC, IsVarArg), | |||
3098 | CCAssignFnForCall(CallerCC, IsVarArg))) | |||
3099 | return false; | |||
3100 | ||||
3101 | // The callee has to preserve all registers the caller needs to preserve. | |||
3102 | if (!CCMatch) { | |||
3103 | const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC); | |||
3104 | if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved)) | |||
3105 | return false; | |||
3106 | } | |||
3107 | ||||
3108 | // Nothing more to check if the callee is taking no arguments. | |||
3109 | if (Outs.empty()) | |||
3110 | return true; | |||
3111 | ||||
3112 | SmallVector<CCValAssign, 16> ArgLocs; | |||
3113 | CCState CCInfo(CalleeCC, IsVarArg, MF, ArgLocs, Ctx); | |||
3114 | ||||
3115 | CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForCall(CalleeCC, IsVarArg)); | |||
3116 | ||||
3117 | const SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>(); | |||
3118 | // If the stack arguments for this call do not fit into our own save area then | |||
3119 | // the call cannot be made tail. | |||
3120 | // TODO: Is this really necessary? | |||
3121 | if (CCInfo.getNextStackOffset() > FuncInfo->getBytesInStackArgArea()) | |||
3122 | return false; | |||
3123 | ||||
3124 | const MachineRegisterInfo &MRI = MF.getRegInfo(); | |||
3125 | return parametersInCSRMatch(MRI, CallerPreserved, ArgLocs, OutVals); | |||
3126 | } | |||
3127 | ||||
3128 | bool SITargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const { | |||
3129 | if (!CI->isTailCall()) | |||
3130 | return false; | |||
3131 | ||||
3132 | const Function *ParentFn = CI->getParent()->getParent(); | |||
3133 | if (AMDGPU::isEntryFunctionCC(ParentFn->getCallingConv())) | |||
3134 | return false; | |||
3135 | return true; | |||
3136 | } | |||
3137 | ||||
3138 | // The wave scratch offset register is used as the global base pointer. | |||
3139 | SDValue SITargetLowering::LowerCall(CallLoweringInfo &CLI, | |||
3140 | SmallVectorImpl<SDValue> &InVals) const { | |||
3141 | SelectionDAG &DAG = CLI.DAG; | |||
3142 | const SDLoc &DL = CLI.DL; | |||
3143 | SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs; | |||
3144 | SmallVector<SDValue, 32> &OutVals = CLI.OutVals; | |||
3145 | SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins; | |||
3146 | SDValue Chain = CLI.Chain; | |||
3147 | SDValue Callee = CLI.Callee; | |||
3148 | bool &IsTailCall = CLI.IsTailCall; | |||
3149 | CallingConv::ID CallConv = CLI.CallConv; | |||
3150 | bool IsVarArg = CLI.IsVarArg; | |||
3151 | bool IsSibCall = false; | |||
3152 | bool IsThisReturn = false; | |||
3153 | MachineFunction &MF = DAG.getMachineFunction(); | |||
3154 | ||||
3155 | if (Callee.isUndef() || isNullConstant(Callee)) { | |||
3156 | if (!CLI.IsTailCall) { | |||
3157 | for (unsigned I = 0, E = CLI.Ins.size(); I != E; ++I) | |||
3158 | InVals.push_back(DAG.getUNDEF(CLI.Ins[I].VT)); | |||
3159 | } | |||
3160 | ||||
3161 | return Chain; | |||
3162 | } | |||
3163 | ||||
3164 | if (IsVarArg) { | |||
3165 | return lowerUnhandledCall(CLI, InVals, | |||
3166 | "unsupported call to variadic function "); | |||
3167 | } | |||
3168 | ||||
3169 | if (!CLI.CB) | |||
3170 | report_fatal_error("unsupported libcall legalization"); | |||
3171 | ||||
3172 | if (IsTailCall && MF.getTarget().Options.GuaranteedTailCallOpt) { | |||
3173 | return lowerUnhandledCall(CLI, InVals, | |||
3174 | "unsupported required tail call to function "); | |||
3175 | } | |||
3176 | ||||
3177 | if (AMDGPU::isShader(CallConv)) { | |||
3178 | // Note the issue is with the CC of the called function, not of the call | |||
3179 | // itself. | |||
3180 | return lowerUnhandledCall(CLI, InVals, | |||
3181 | "unsupported call to a shader function "); | |||
3182 | } | |||
3183 | ||||
3184 | if (AMDGPU::isShader(MF.getFunction().getCallingConv()) && | |||
3185 | CallConv != CallingConv::AMDGPU_Gfx) { | |||
3186 | // Only allow calls with specific calling conventions. | |||
3187 | return lowerUnhandledCall(CLI, InVals, | |||
3188 | "unsupported calling convention for call from " | |||
3189 | "graphics shader of function "); | |||
3190 | } | |||
3191 | ||||
3192 | if (IsTailCall) { | |||
3193 | IsTailCall = isEligibleForTailCallOptimization( | |||
3194 | Callee, CallConv, IsVarArg, Outs, OutVals, Ins, DAG); | |||
3195 | if (!IsTailCall && CLI.CB && CLI.CB->isMustTailCall()) { | |||
3196 | report_fatal_error("failed to perform tail call elimination on a call " | |||
3197 | "site marked musttail"); | |||
3198 | } | |||
3199 | ||||
3200 | bool TailCallOpt = MF.getTarget().Options.GuaranteedTailCallOpt; | |||
3201 | ||||
3202 | // A sibling call is one where we're under the usual C ABI and not planning | |||
3203 | // to change that but can still do a tail call: | |||
3204 | if (!TailCallOpt && IsTailCall) | |||
3205 | IsSibCall = true; | |||
3206 | ||||
3207 | if (IsTailCall) | |||
3208 | ++NumTailCalls; | |||
3209 | } | |||
3210 | ||||
3211 | const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); | |||
3212 | SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; | |||
3213 | SmallVector<SDValue, 8> MemOpChains; | |||
3214 | ||||
3215 | // Analyze operands of the call, assigning locations to each operand. | |||
3216 | SmallVector<CCValAssign, 16> ArgLocs; | |||
3217 | CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext()); | |||
3218 | CCAssignFn *AssignFn = CCAssignFnForCall(CallConv, IsVarArg); | |||
3219 | ||||
3220 | if (CallConv != CallingConv::AMDGPU_Gfx) { | |||
3221 | // With a fixed ABI, allocate fixed registers before user arguments. | |||
3222 | passSpecialInputs(CLI, CCInfo, *Info, RegsToPass, MemOpChains, Chain); | |||
3223 | } | |||
3224 | ||||
3225 | CCInfo.AnalyzeCallOperands(Outs, AssignFn); | |||
3226 | ||||
3227 | // Get a count of how many bytes are to be pushed on the stack. | |||
3228 | unsigned NumBytes = CCInfo.getNextStackOffset(); | |||
3229 | ||||
3230 | if (IsSibCall) { | |||
3231 | // Since we're not changing the ABI to make this a tail call, the memory | |||
3232 | // operands are already available in the caller's incoming argument space. | |||
3233 | NumBytes = 0; | |||
3234 | } | |||
3235 | ||||
3236 | // FPDiff is the byte offset of the call's argument area from the callee's. | |||
3237 | // Stores to callee stack arguments will be placed in FixedStackSlots offset | |||
3238 | // by this amount for a tail call. In a sibling call it must be 0 because the | |||
3239 | // caller will deallocate the entire stack and the callee still expects its | |||
3240 | // arguments to begin at SP+0. Completely unused for non-tail calls. | |||
3241 | int32_t FPDiff = 0; | |||
3242 | MachineFrameInfo &MFI = MF.getFrameInfo(); | |||
3243 | ||||
3244 | // Adjust the stack pointer for the new arguments... | |||
3245 | // These operations are automatically eliminated by the prolog/epilog pass | |||
3246 | if (!IsSibCall) { | |||
3247 | Chain = DAG.getCALLSEQ_START(Chain, 0, 0, DL); | |||
3248 | ||||
3249 | if (!Subtarget->enableFlatScratch()) { | |||
3250 | SmallVector<SDValue, 4> CopyFromChains; | |||
3251 | ||||
3252 | // In the HSA case, this should be an identity copy. | |||
3253 | SDValue ScratchRSrcReg | |||
3254 | = DAG.getCopyFromReg(Chain, DL, Info->getScratchRSrcReg(), MVT::v4i32); | |||
3255 | RegsToPass.emplace_back(AMDGPU::SGPR0_SGPR1_SGPR2_SGPR3, ScratchRSrcReg); | |||
3256 | CopyFromChains.push_back(ScratchRSrcReg.getValue(1)); | |||
3257 | Chain = DAG.getTokenFactor(DL, CopyFromChains); | |||
3258 | } | |||
3259 | } | |||
3260 | ||||
3261 | MVT PtrVT = MVT::i32; | |||
3262 | ||||
3263 | // Walk the register/memloc assignments, inserting copies/loads. | |||
3264 | for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { | |||
3265 | CCValAssign &VA = ArgLocs[i]; | |||
3266 | SDValue Arg = OutVals[i]; | |||
3267 | ||||
3268 | // Promote the value if needed. | |||
3269 | switch (VA.getLocInfo()) { | |||
3270 | case CCValAssign::Full: | |||
3271 | break; | |||
3272 | case CCValAssign::BCvt: | |||
3273 | Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg); | |||
3274 | break; | |||
3275 | case CCValAssign::ZExt: | |||
3276 | Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg); | |||
3277 | break; | |||
3278 | case CCValAssign::SExt: | |||
3279 | Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg); | |||
3280 | break; | |||
3281 | case CCValAssign::AExt: | |||
3282 | Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg); | |||
3283 | break; | |||
3284 | case CCValAssign::FPExt: | |||
3285 | Arg = DAG.getNode(ISD::FP_EXTEND, DL, VA.getLocVT(), Arg); | |||
3286 | break; | |||
3287 | default: | |||
3288 | llvm_unreachable("Unknown loc info!")::llvm::llvm_unreachable_internal("Unknown loc info!", "llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 3288); | |||
3289 | } | |||
3290 | ||||
3291 | if (VA.isRegLoc()) { | |||
3292 | RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); | |||
3293 | } else { | |||
3294 | assert(VA.isMemLoc())(static_cast <bool> (VA.isMemLoc()) ? void (0) : __assert_fail ("VA.isMemLoc()", "llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 3294, __extension__ __PRETTY_FUNCTION__)); | |||
3295 | ||||
3296 | SDValue DstAddr; | |||
3297 | MachinePointerInfo DstInfo; | |||
3298 | ||||
3299 | unsigned LocMemOffset = VA.getLocMemOffset(); | |||
3300 | int32_t Offset = LocMemOffset; | |||
3301 | ||||
3302 | SDValue PtrOff = DAG.getConstant(Offset, DL, PtrVT); | |||
3303 | MaybeAlign Alignment; | |||
3304 | ||||
3305 | if (IsTailCall) { | |||
3306 | ISD::ArgFlagsTy Flags = Outs[i].Flags; | |||
3307 | unsigned OpSize = Flags.isByVal() ? | |||
3308 | Flags.getByValSize() : VA.getValVT().getStoreSize(); | |||
3309 | ||||
3310 | // FIXME: We can have better than the minimum byval required alignment. | |||
3311 | Alignment = | |||
3312 | Flags.isByVal() | |||
3313 | ? Flags.getNonZeroByValAlign() | |||
3314 | : commonAlignment(Subtarget->getStackAlignment(), Offset); | |||
3315 | ||||
3316 | Offset = Offset + FPDiff; | |||
3317 | int FI = MFI.CreateFixedObject(OpSize, Offset, true); | |||
3318 | ||||
3319 | DstAddr = DAG.getFrameIndex(FI, PtrVT); | |||
3320 | DstInfo = MachinePointerInfo::getFixedStack(MF, FI); | |||
3321 | ||||
3322 | // Make sure any stack arguments overlapping with where we're storing | |||
3323 | // are loaded before this eventual operation. Otherwise they'll be | |||
3324 | // clobbered. | |||
3325 | ||||
3326 | // FIXME: Why is this really necessary? This seems to just result in a | |||
3327 | // lot of code to copy the stack and write them back to the same | |||
3328 | // locations, which are supposed to be immutable? | |||
3329 | Chain = addTokenForArgument(Chain, DAG, MFI, FI); | |||
3330 | } else { | |||
3331 | // Stores to the argument stack area are relative to the stack pointer. | |||
3332 | SDValue SP = DAG.getCopyFromReg(Chain, DL, Info->getStackPtrOffsetReg(), | |||
3333 | MVT::i32); | |||
3334 | DstAddr = DAG.getNode(ISD::ADD, DL, MVT::i32, SP, PtrOff); | |||
3335 | DstInfo = MachinePointerInfo::getStack(MF, LocMemOffset); | |||
3336 | Alignment = | |||
3337 | commonAlignment(Subtarget->getStackAlignment(), LocMemOffset); | |||
3338 | } | |||
3339 | ||||
3340 | if (Outs[i].Flags.isByVal()) { | |||
3341 | SDValue SizeNode = | |||
3342 | DAG.getConstant(Outs[i].Flags.getByValSize(), DL, MVT::i32); | |||
3343 | SDValue Cpy = | |||
3344 | DAG.getMemcpy(Chain, DL, DstAddr, Arg, SizeNode, | |||
3345 | Outs[i].Flags.getNonZeroByValAlign(), | |||
3346 | /*isVol = */ false, /*AlwaysInline = */ true, | |||
3347 | /*isTailCall = */ false, DstInfo, | |||
3348 | MachinePointerInfo(AMDGPUAS::PRIVATE_ADDRESS)); | |||
3349 | ||||
3350 | MemOpChains.push_back(Cpy); | |||
3351 | } else { | |||
3352 | SDValue Store = | |||
3353 | DAG.getStore(Chain, DL, Arg, DstAddr, DstInfo, Alignment); | |||
3354 | MemOpChains.push_back(Store); | |||
3355 | } | |||
3356 | } | |||
3357 | } | |||
3358 | ||||
3359 | if (!MemOpChains.empty()) | |||
3360 | Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains); | |||
3361 | ||||
3362 | // Build a sequence of copy-to-reg nodes chained together with token chain | |||
3363 | // and flag operands which copy the outgoing args into the appropriate regs. | |||
3364 | SDValue InFlag; | |||
3365 | for (auto &RegToPass : RegsToPass) { | |||
3366 | Chain = DAG.getCopyToReg(Chain, DL, RegToPass.first, | |||
3367 | RegToPass.second, InFlag); | |||
3368 | InFlag = Chain.getValue(1); | |||
3369 | } | |||
3370 | ||||
3371 | ||||
3372 | // We don't usually want to end the call-sequence here because we would tidy | |||
3373 | // the frame up *after* the call, however in the ABI-changing tail-call case | |||
3374 | // we've carefully laid out the parameters so that when sp is reset they'll be | |||
3375 | // in the correct location. | |||
3376 | if (IsTailCall && !IsSibCall) { | |||
3377 | Chain = DAG.getCALLSEQ_END(Chain, | |||
3378 | DAG.getTargetConstant(NumBytes, DL, MVT::i32), | |||
3379 | DAG.getTargetConstant(0, DL, MVT::i32), | |||
3380 | InFlag, DL); | |||
3381 | InFlag = Chain.getValue(1); | |||
3382 | } | |||
3383 | ||||
3384 | std::vector<SDValue> Ops; | |||
3385 | Ops.push_back(Chain); | |||
3386 | Ops.push_back(Callee); | |||
3387 | // Add a redundant copy of the callee global which will not be legalized, as | |||
3388 | // we need direct access to the callee later. | |||
3389 | if (GlobalAddressSDNode *GSD = dyn_cast<GlobalAddressSDNode>(Callee)) { | |||
3390 | const GlobalValue *GV = GSD->getGlobal(); | |||
3391 | Ops.push_back(DAG.getTargetGlobalAddress(GV, DL, MVT::i64)); | |||
3392 | } else { | |||
3393 | Ops.push_back(DAG.getTargetConstant(0, DL, MVT::i64)); | |||
3394 | } | |||
3395 | ||||
3396 | if (IsTailCall) { | |||
3397 | // Each tail call may have to adjust the stack by a different amount, so | |||
3398 | // this information must travel along with the operation for eventual | |||
3399 | // consumption by emitEpilogue. | |||
3400 | Ops.push_back(DAG.getTargetConstant(FPDiff, DL, MVT::i32)); | |||
3401 | } | |||
3402 | ||||
3403 | // Add argument registers to the end of the list so that they are known live | |||
3404 | // into the call. | |||
3405 | for (auto &RegToPass : RegsToPass) { | |||
3406 | Ops.push_back(DAG.getRegister(RegToPass.first, | |||
3407 | RegToPass.second.getValueType())); | |||
3408 | } | |||
3409 | ||||
3410 | // Add a register mask operand representing the call-preserved registers. | |||
3411 | ||||
3412 | auto *TRI = static_cast<const SIRegisterInfo*>(Subtarget->getRegisterInfo()); | |||
3413 | const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv); | |||
3414 | assert(Mask && "Missing call preserved mask for calling convention")(static_cast <bool> (Mask && "Missing call preserved mask for calling convention" ) ? void (0) : __assert_fail ("Mask && \"Missing call preserved mask for calling convention\"" , "llvm/lib/Target/AMDGPU/SIISelLowering.cpp", 3414, __extension__ __PRETTY_FUNCTION__)); | |||
3415 | Ops.push_back(DAG.getRegisterMask(Mask)); | |||
3416 | ||||
3417 | if (InFlag.getNode()) | |||
3418 | Ops.push_back(InFlag); | |||
3419 | ||||
3420 | SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); | |||
3421 | ||||
3422 | // If we're doing a tall call, use a TC_RETURN here rather than an | |||
3423 | // actual call instruction. | |||
3424 | if (IsTailCall) { | |||
3425 | MFI.setHasTailCall(); | |||
3426 | return DAG.getNode(AMDGPUISD::TC_RETURN, DL, NodeTys, Ops); | |||
3427 | } | |||
3428 | ||||
3429 | // Returns a chain and a flag for retval copy to use. | |||
3430 | SDValue Call = DAG.getNode(AMDGPUISD::CALL, DL, NodeTys, Ops); | |||
3431 | Chain = Call.getValue(0); | |||
3432 | InFlag = Call.getValue(1); | |||
3433 | ||||
3434 | uint64_t CalleePopBytes = NumBytes; | |||
3435 | Chain = DAG.getCALLSEQ_END(Chain, DAG.getTargetConstant(0, DL, MVT::i32), | |||
3436 | DAG.getTargetConstant(CalleePopBytes, DL, MVT::i32), | |||
3437 | InFlag, DL); | |||
3438 | if (!Ins.empty()) | |||
3439 | InFlag = Chain.getValue(1); | |||
3440 | ||||
3441 | // Handle result values, copying them out of physregs into vregs that we | |||
3442 | // return. | |||
3443 | return LowerCallResult(Chain, InFlag, CallConv, IsVarArg, Ins, DL, DAG, | |||
3444 | InVals, IsThisReturn, | |||
3445 | IsThisReturn ? OutVals[0] : SDValue()); | |||
3446 | } | |||
3447 | ||||
3448 | // This is identical to the default implementation in ExpandDYNAMIC_STACKALLOC, | |||
3449 | // except for applying the wave size scale to the increment amount. | |||
3450 | SDValue SITargetLowering::lowerDYNAMIC_STACKALLOCImpl( | |||
3451 | SDValue Op, SelectionDAG &DAG) const { | |||
3452 | const MachineFunction &MF = DAG.getMachineFunction(); | |||
3453 | const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); | |||
3454 | ||||
3455 | SDLoc dl(Op); | |||
3456 | EVT VT = Op.getValueType(); | |||
3457 | SDValue Tmp1 = Op; | |||
3458 | SDValue Tmp2 = Op.getValue(1); | |||
3459 | SDValue Tmp3 = Op.getOperand(2); | |||
3460 | SDValue Chain = Tmp1.getOperand(0); | |||
3461 | ||||
3462 | Register SPReg = Info->getStackPtrOffsetReg(); | |||
3463 | ||||
3464 | // Chain the dynamic stack allocation so that it doesn't modify the stack | |||
3465 | // pointer when other instructions are using the stack. | |||
3466 | Chain = DAG.getCALLSEQ_START(Chain, 0, 0, dl); | |||
3467 | ||||
3468 | SDValue Size = Tmp2.getOperand(1); | |||
3469 | SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, VT); | |||
3470 | Chain = SP.getValue(1); | |||
3471 | MaybeAlign Alignment = cast<ConstantSDNode>(Tmp3)->getMaybeAlignValue(); | |||
3472 | const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); | |||
3473 | const TargetFrameLowering *TFL = ST.getFrameLowering(); | |||
3474 | unsigned Opc = | |||
3475 | TFL->getStackGrowthDirection() == TargetFrameLowering::StackGrowsUp ? | |||
3476 | ISD::ADD : ISD::SUB; | |||
3477 | ||||
3478 | SDValue ScaledSize = DAG.getNode( | |||
3479 | ISD::SHL, dl, VT, Size, | |||
3480 | DAG.getConstant(ST.getWavefrontSizeLog2(), dl, MVT::i32)); | |||
3481 | ||||
3482 | Align StackAlign = TFL->getStackAlign(); | |||
3483 | Tmp1 = DAG.getNode(Opc, dl, VT, SP, ScaledSize); // Value | |||
3484 | if (Alignment && *Alignment > StackAlign) { | |||
3485 | Tmp1 = DAG.getNode(ISD::AND, dl, VT, Tmp1, | |||
3486 | DAG.getConstant(-(uint64_t)Alignment->value() | |||
3487 | << ST.getWavefrontSizeLog2(), | |||
3488 | dl, VT)); | |||
3489 | } | |||
3490 | ||||
3491 | Chain = DAG.getCopyToReg(Chain, dl, SPReg, Tmp1); // Output chain | |||
3492 | Tmp2 = DAG.getCALLSEQ_END( | |||
3493 | Chain, DAG.getIntPtrConstant(0, dl, true), | |||
3494 | DAG.getIntPtrConstant(0, dl, true), SDValue(), dl); | |||
3495 | ||||
3496 | return DAG.getMergeValues({Tmp1, Tmp2}, dl); | |||
3497 | } | |||
3498 | ||||
3499 | SDValue SITargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, | |||
3500 | SelectionDAG &DAG) const { | |||
3501 | // We only handle constant sizes here to allow non-entry block, static sized | |||
3502 | // allocas. A truly dynamic value is more difficult to support because we | |||
3503 | // don't know if the size value is uniform or not. If the size isn't uniform, | |||
3504 | // we would need to do a wave reduction to get the maximum size to know how | |||
3505 | // much to increment the uniform stack pointer. | |||
3506 | SDValue Size = Op.getOperand(1); | |||
3507 | if (isa<ConstantSDNode>(Size)) | |||
3508 | return lowerDYNAMIC_STACKALLOCImpl(Op, DAG); // Use "generic" expansion. | |||
3509 | ||||
3510 | return AMDGPUTargetLowering::LowerDYNAMIC_STACKALLOC(Op, DAG); | |||
3511 | } | |||
3512 | ||||
3513 | Register SITargetLowering::getRegisterByName(const char* RegName, LLT VT, | |||
3514 | const MachineFunction &MF) const { | |||
3515 | Register Reg = StringSwitch<Register>(RegName) | |||
3516 | .Case("m0", AMDGPU::M0) | |||
3517 | .Case("exec", AMDGPU::EXEC) | |||
3518 | .Case("exec_lo", AMDGPU::EXEC_LO) | |||
3519 | .Case("exec_hi", AMDGPU::EXEC_HI) | |||
3520 | .Case("flat_scratch", AMDGPU::FLAT_SCR) | |||
3521 | .Case("flat_scratch_lo", AMDGPU::FLAT_SCR_LO) | |||
3522 | .Case("flat_scratch_hi", AMDGPU::FLAT_SCR_HI) | |||
3523 | .Default(Register()); | |||
3524 | ||||
3525 | if (Reg == AMDGPU::NoRegister) { | |||
3526 | report_fatal_error(Twine("invalid register name \"" | |||
3527 | + StringRef(RegName) + "\".")); | |||
3528 | ||||
3529 | } | |||
3530 | ||||
3531 | if (!Subtarget->hasFlatScrRegister() && | |||
3532 | Subtarget->getRegisterInfo()->regsOverlap(Reg, AMDGPU::FLAT_SCR)) { | |||
3533 | report_fatal_error(Twine("invalid register \"" | |||
3534 | + StringRef(RegName) + "\" for subtarget.")); | |||
3535 | } | |||
3536 | ||||
3537 | switch (Reg) { | |||
3538 | case AMDGPU::M0: | |||
3539 | case AMDGPU::EXEC_LO: | |||
3540 | case AMDGPU::EXEC_HI: | |||
3541 | case AMDGPU::FLAT_SCR_LO: | |||
3542 | case AMDGPU::FLAT_SCR_HI: | |||
3543 | if (VT.getSizeInBits() == 32) | |||
3544 | return Reg; | |||
3545 | break; | |||
3546 | case AMDGPU::EXEC: | |||
3547 | case AMDGPU::FLAT_SCR: | |||
3548 | if (VT.getSizeInBits() == 64) | |||
3549 | return Reg; | |||
3550 | break; | |||
3551 | default: | |||
3552 | llvm_unreachable("missing register type checking")::llvm::llvm_unreachable_internal("missing register type checking" , "llvm/lib/Target/AMDGPU/SIISelLowering.cpp", 3552); | |||
3553 | } | |||
3554 | ||||
3555 | report_fatal_error(Twine("invalid type for register \"" | |||
3556 | + StringRef(RegName) + "\".")); | |||
3557 | } | |||
3558 | ||||
3559 | // If kill is not the last instruction, split the block so kill is always a | |||
3560 | // proper terminator. | |||
3561 | MachineBasicBlock * | |||
3562 | SITargetLowering::splitKillBlock(MachineInstr &MI, | |||
3563 | MachineBasicBlock *BB) const { | |||
3564 | MachineBasicBlock *SplitBB = BB->splitAt(MI, false /*UpdateLiveIns*/); | |||
3565 | const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); | |||
3566 | MI.setDesc(TII->getKillTerminatorFromPseudo(MI.getOpcode())); | |||
3567 | return SplitBB; | |||
3568 | } | |||
3569 | ||||
3570 | // Split block \p MBB at \p MI, as to insert a loop. If \p InstInLoop is true, | |||
3571 | // \p MI will be the only instruction in the loop body block. Otherwise, it will | |||
3572 | // be the first instruction in the remainder block. | |||
3573 | // | |||
3574 | /// \returns { LoopBody, Remainder } | |||
3575 | static std::pair<MachineBasicBlock *, MachineBasicBlock *> | |||
3576 | splitBlockForLoop(MachineInstr &MI, MachineBasicBlock &MBB, bool InstInLoop) { | |||
3577 | MachineFunction *MF = MBB.getParent(); | |||
3578 | MachineBasicBlock::iterator I(&MI); | |||
3579 | ||||
3580 | // To insert the loop we need to split the block. Move everything after this | |||
3581 | // point to a new block, and insert a new empty block between the two. | |||
3582 | MachineBasicBlock *LoopBB = MF->CreateMachineBasicBlock(); | |||
3583 | MachineBasicBlock *RemainderBB = MF->CreateMachineBasicBlock(); | |||
3584 | MachineFunction::iterator MBBI(MBB); | |||
3585 | ++MBBI; | |||
3586 | ||||
3587 | MF->insert(MBBI, LoopBB); | |||
3588 | MF->insert(MBBI, RemainderBB); | |||
3589 | ||||
3590 | LoopBB->addSuccessor(LoopBB); | |||
3591 | LoopBB->addSuccessor(RemainderBB); | |||
3592 | ||||
3593 | // Move the rest of the block into a new block. | |||
3594 | RemainderBB->transferSuccessorsAndUpdatePHIs(&MBB); | |||
3595 | ||||
3596 | if (InstInLoop) { | |||
3597 | auto Next = std::next(I); | |||
3598 | ||||
3599 | // Move instruction to loop body. | |||
3600 | LoopBB->splice(LoopBB->begin(), &MBB, I, Next); | |||
3601 | ||||
3602 | // Move the rest of the block. | |||
3603 | RemainderBB->splice(RemainderBB->begin(), &MBB, Next, MBB.end()); | |||
3604 | } else { | |||
3605 | RemainderBB->splice(RemainderBB->begin(), &MBB, I, MBB.end()); | |||
3606 | } | |||
3607 | ||||
3608 | MBB.addSuccessor(LoopBB); | |||
3609 | ||||
3610 | return std::make_pair(LoopBB, RemainderBB); | |||
3611 | } | |||
3612 | ||||
3613 | /// Insert \p MI into a BUNDLE with an S_WAITCNT 0 immediately following it. | |||
3614 | void SITargetLowering::bundleInstWithWaitcnt(MachineInstr &MI) const { | |||
3615 | MachineBasicBlock *MBB = MI.getParent(); | |||
3616 | const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); | |||
3617 | auto I = MI.getIterator(); | |||
3618 | auto E = std::next(I); | |||
3619 | ||||
3620 | BuildMI(*MBB, E, MI.getDebugLoc(), TII->get(AMDGPU::S_WAITCNT)) | |||
3621 | .addImm(0); | |||
3622 | ||||
3623 | MIBundleBuilder Bundler(*MBB, I, E); | |||
3624 | finalizeBundle(*MBB, Bundler.begin()); | |||
3625 | } | |||
3626 | ||||
3627 | MachineBasicBlock * | |||
3628 | SITargetLowering::emitGWSMemViolTestLoop(MachineInstr &MI, | |||
3629 | MachineBasicBlock *BB) const { | |||
3630 | const DebugLoc &DL = MI.getDebugLoc(); | |||
3631 | ||||
3632 | MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); | |||
3633 | ||||
3634 | MachineBasicBlock *LoopBB; | |||
3635 | MachineBasicBlock *RemainderBB; | |||
3636 | const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); | |||
3637 | ||||
3638 | // Apparently kill flags are only valid if the def is in the same block? | |||
3639 | if (MachineOperand *Src = TII->getNamedOperand(MI, AMDGPU::OpName::data0)) | |||
3640 | Src->setIsKill(false); | |||
3641 | ||||
3642 | std::tie(LoopBB, RemainderBB) = splitBlockForLoop(MI, *BB, true); | |||
3643 | ||||
3644 | MachineBasicBlock::iterator I = LoopBB->end(); | |||
3645 | ||||
3646 | const unsigned EncodedReg = AMDGPU::Hwreg::encodeHwreg( | |||
3647 | AMDGPU::Hwreg::ID_TRAPSTS, AMDGPU::Hwreg::OFFSET_MEM_VIOL, 1); | |||
3648 | ||||
3649 | // Clear TRAP_STS.MEM_VIOL | |||
3650 | BuildMI(*LoopBB, LoopBB->begin(), DL, TII->get(AMDGPU::S_SETREG_IMM32_B32)) | |||
3651 | .addImm(0) | |||
3652 | .addImm(EncodedReg); | |||
3653 | ||||
3654 | bundleInstWithWaitcnt(MI); | |||
3655 | ||||
3656 | Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); | |||
3657 | ||||
3658 | // Load and check TRAP_STS.MEM_VIOL | |||
3659 | BuildMI(*LoopBB, I, DL, TII->get(AMDGPU::S_GETREG_B32), Reg) | |||
3660 | .addImm(EncodedReg); | |||
3661 | ||||
3662 | // FIXME: Do we need to use an isel pseudo that may clobber scc? | |||
3663 | BuildMI(*LoopBB, I, DL, TII->get(AMDGPU::S_CMP_LG_U32)) | |||
3664 | .addReg(Reg, RegState::Kill) | |||
3665 | .addImm(0); | |||
3666 | BuildMI(*LoopBB, I, DL, TII->get(AMDGPU::S_CBRANCH_SCC1)) | |||
3667 | .addMBB(LoopBB); | |||
3668 | ||||
3669 | return RemainderBB; | |||
3670 | } | |||
3671 | ||||
3672 | // Do a v_movrels_b32 or v_movreld_b32 for each unique value of \p IdxReg in the | |||
3673 | // wavefront. If the value is uniform and just happens to be in a VGPR, this | |||
3674 | // will only do one iteration. In the worst case, this will loop 64 times. | |||
3675 | // | |||
3676 | // TODO: Just use v_readlane_b32 if we know the VGPR has a uniform value. | |||
3677 | static MachineBasicBlock::iterator | |||
3678 | emitLoadM0FromVGPRLoop(const SIInstrInfo *TII, MachineRegisterInfo &MRI, | |||
3679 | MachineBasicBlock &OrigBB, MachineBasicBlock &LoopBB, | |||
3680 | const DebugLoc &DL, const MachineOperand &Idx, | |||
3681 | unsigned InitReg, unsigned ResultReg, unsigned PhiReg, | |||
3682 | unsigned InitSaveExecReg, int Offset, bool UseGPRIdxMode, | |||
3683 | Register &SGPRIdxReg) { | |||
3684 | ||||
3685 | MachineFunction *MF = OrigBB.getParent(); | |||
3686 | const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>(); | |||
3687 | const SIRegisterInfo *TRI = ST.getRegisterInfo(); | |||
3688 | MachineBasicBlock::iterator I = LoopBB.begin(); | |||
3689 | ||||
3690 | const TargetRegisterClass *BoolRC = TRI->getBoolRC(); | |||
3691 | Register PhiExec = MRI.createVirtualRegister(BoolRC); | |||
3692 | Register NewExec = MRI.createVirtualRegister(BoolRC); | |||
3693 | Register CurrentIdxReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); | |||
3694 | Register CondReg = MRI.createVirtualRegister(BoolRC); | |||
3695 | ||||
3696 | BuildMI(LoopBB, I, DL, TII->get(TargetOpcode::PHI), PhiReg) | |||
3697 | .addReg(InitReg) | |||
3698 | .addMBB(&OrigBB) | |||
3699 | .addReg(ResultReg) | |||
3700 | .addMBB(&LoopBB); | |||
3701 | ||||
3702 | BuildMI(LoopBB, I, DL, TII->get(TargetOpcode::PHI), PhiExec) | |||
3703 | .addReg(InitSaveExecReg) | |||
3704 | .addMBB(&OrigBB) | |||
3705 | .addReg(NewExec) | |||
3706 | .addMBB(&LoopBB); | |||
3707 | ||||
3708 | // Read the next variant <- also loop target. | |||
3709 | BuildMI(LoopBB, I, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), CurrentIdxReg) | |||
3710 | .addReg(Idx.getReg(), getUndefRegState(Idx.isUndef())); | |||
3711 | ||||
3712 | // Compare the just read M0 value to all possible Idx values. | |||
3713 | BuildMI(LoopBB, I, DL, TII->get(AMDGPU::V_CMP_EQ_U32_e64), CondReg) | |||
3714 | .addReg(CurrentIdxReg) | |||
3715 | .addReg(Idx.getReg(), 0, Idx.getSubReg()); | |||
3716 | ||||
3717 | // Update EXEC, save the original EXEC value to VCC. | |||
3718 | BuildMI(LoopBB, I, DL, TII->get(ST.isWave32() ? AMDGPU::S_AND_SAVEEXEC_B32 | |||
3719 | : AMDGPU::S_AND_SAVEEXEC_B64), | |||
3720 | NewExec) | |||
3721 | .addReg(CondReg, RegState::Kill); | |||
3722 | ||||
3723 | MRI.setSimpleHint(NewExec, CondReg); | |||
3724 | ||||
3725 | if (UseGPRIdxMode) { | |||
3726 | if (Offset == 0) { | |||
3727 | SGPRIdxReg = CurrentIdxReg; | |||
3728 | } else { | |||
3729 | SGPRIdxReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); | |||
3730 | BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_ADD_I32), SGPRIdxReg) | |||
3731 | .addReg(CurrentIdxReg, RegState::Kill) | |||
3732 | .addImm(Offset); | |||
3733 | } | |||
3734 | } else { | |||
3735 | // Move index from VCC into M0 | |||
3736 | if (Offset == 0) { | |||
3737 | BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0) | |||
3738 | .addReg(CurrentIdxReg, RegState::Kill); | |||
3739 | } else { | |||
3740 | BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0) | |||
3741 | .addReg(CurrentIdxReg, RegState::Kill) | |||
3742 | .addImm(Offset); | |||
3743 | } | |||
3744 | } | |||
3745 | ||||
3746 | // Update EXEC, switch all done bits to 0 and all todo bits to 1. | |||
3747 | unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC; | |||
3748 | MachineInstr *InsertPt = | |||
3749 | BuildMI(LoopBB, I, DL, TII->get(ST.isWave32() ? AMDGPU::S_XOR_B32_term | |||
3750 | : AMDGPU::S_XOR_B64_term), Exec) | |||
3751 | .addReg(Exec) | |||
3752 | .addReg(NewExec); | |||
3753 | ||||
3754 | // XXX - s_xor_b64 sets scc to 1 if the result is nonzero, so can we use | |||
3755 | // s_cbranch_scc0? | |||
3756 | ||||
3757 | // Loop back to V_READFIRSTLANE_B32 if there are still variants to cover. | |||
3758 | BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ)) | |||
3759 | .addMBB(&LoopBB); | |||
3760 | ||||
3761 | return InsertPt->getIterator(); | |||
3762 | } | |||
3763 | ||||
3764 | // This has slightly sub-optimal regalloc when the source vector is killed by | |||
3765 | // the read. The register allocator does not understand that the kill is | |||
3766 | // per-workitem, so is kept alive for the whole loop so we end up not re-using a | |||
3767 | // subregister from it, using 1 more VGPR than necessary. This was saved when | |||
3768 | // this was expanded after register allocation. | |||
3769 | static MachineBasicBlock::iterator | |||
3770 | loadM0FromVGPR(const SIInstrInfo *TII, MachineBasicBlock &MBB, MachineInstr &MI, | |||
3771 | unsigned InitResultReg, unsigned PhiReg, int Offset, | |||
3772 | bool UseGPRIdxMode, Register &SGPRIdxReg) { | |||
3773 | MachineFunction *MF = MBB.getParent(); | |||
3774 | const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>(); | |||
3775 | const SIRegisterInfo *TRI = ST.getRegisterInfo(); | |||
3776 | MachineRegisterInfo &MRI = MF->getRegInfo(); | |||
3777 | const DebugLoc &DL = MI.getDebugLoc(); | |||
3778 | MachineBasicBlock::iterator I(&MI); | |||
3779 | ||||
3780 | const auto *BoolXExecRC = TRI->getRegClass(AMDGPU::SReg_1_XEXECRegClassID); | |||
3781 | Register DstReg = MI.getOperand(0).getReg(); | |||
3782 | Register SaveExec = MRI.createVirtualRegister(BoolXExecRC); | |||
3783 | Register TmpExec = MRI.createVirtualRegister(BoolXExecRC); | |||
3784 | unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC; | |||
3785 | unsigned MovExecOpc = ST.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64; | |||
3786 | ||||
3787 | BuildMI(MBB, I, DL, TII->get(TargetOpcode::IMPLICIT_DEF), TmpExec); | |||
3788 | ||||
3789 | // Save the EXEC mask | |||
3790 | BuildMI(MBB, I, DL, TII->get(MovExecOpc), SaveExec) | |||
3791 | .addReg(Exec); | |||
3792 | ||||
3793 | MachineBasicBlock *LoopBB; | |||
3794 | MachineBasicBlock *RemainderBB; | |||
3795 | std::tie(LoopBB, RemainderBB) = splitBlockForLoop(MI, MBB, false); | |||
3796 | ||||
3797 | const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx); | |||
3798 | ||||
3799 | auto InsPt = emitLoadM0FromVGPRLoop(TII, MRI, MBB, *LoopBB, DL, *Idx, | |||
3800 | InitResultReg, DstReg, PhiReg, TmpExec, | |||
3801 | Offset, UseGPRIdxMode, SGPRIdxReg); | |||
3802 | ||||
3803 | MachineBasicBlock* LandingPad = MF->CreateMachineBasicBlock(); | |||
3804 | MachineFunction::iterator MBBI(LoopBB); | |||
3805 | ++MBBI; | |||
3806 | MF->insert(MBBI, LandingPad); | |||
3807 | LoopBB->removeSuccessor(RemainderBB); | |||
3808 | LandingPad->addSuccessor(RemainderBB); | |||
3809 | LoopBB->addSuccessor(LandingPad); | |||
3810 | MachineBasicBlock::iterator First = LandingPad->begin(); | |||
3811 | BuildMI(*LandingPad, First, DL, TII->get(MovExecOpc), Exec) | |||
3812 | .addReg(SaveExec); | |||
3813 | ||||
3814 | return InsPt; | |||
3815 | } | |||
3816 | ||||
3817 | // Returns subreg index, offset | |||
3818 | static std::pair<unsigned, int> | |||
3819 | computeIndirectRegAndOffset(const SIRegisterInfo &TRI, | |||
3820 | const TargetRegisterClass *SuperRC, | |||
3821 | unsigned VecReg, | |||
3822 | int Offset) { | |||
3823 | int NumElts = TRI.getRegSizeInBits(*SuperRC) / 32; | |||
3824 | ||||
3825 | // Skip out of bounds offsets, or else we would end up using an undefined | |||
3826 | // register. | |||
3827 | if (Offset >= NumElts || Offset < 0) | |||
3828 | return std::make_pair(AMDGPU::sub0, Offset); | |||
3829 | ||||
3830 | return std::make_pair(SIRegisterInfo::getSubRegFromChannel(Offset), 0); | |||
3831 | } | |||
3832 | ||||
3833 | static void setM0ToIndexFromSGPR(const SIInstrInfo *TII, | |||
3834 | MachineRegisterInfo &MRI, MachineInstr &MI, | |||
3835 | int Offset) { | |||
3836 | MachineBasicBlock *MBB = MI.getParent(); | |||
3837 | const DebugLoc &DL = MI.getDebugLoc(); | |||
3838 | MachineBasicBlock::iterator I(&MI); | |||
3839 | ||||
3840 | const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx); | |||
3841 | ||||
3842 | assert(Idx->getReg() != AMDGPU::NoRegister)(static_cast <bool> (Idx->getReg() != AMDGPU::NoRegister ) ? void (0) : __assert_fail ("Idx->getReg() != AMDGPU::NoRegister" , "llvm/lib/Target/AMDGPU/SIISelLowering.cpp", 3842, __extension__ __PRETTY_FUNCTION__)); | |||
3843 | ||||
3844 | if (Offset == 0) { | |||
3845 | BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0).add(*Idx); | |||
3846 | } else { | |||
3847 | BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0) | |||
3848 | .add(*Idx) | |||
3849 | .addImm(Offset); | |||
3850 | } | |||
3851 | } | |||
3852 | ||||
3853 | static Register getIndirectSGPRIdx(const SIInstrInfo *TII, | |||
3854 | MachineRegisterInfo &MRI, MachineInstr &MI, | |||
3855 | int Offset) { | |||
3856 | MachineBasicBlock *MBB = MI.getParent(); | |||
3857 | const DebugLoc &DL = MI.getDebugLoc(); | |||
3858 | MachineBasicBlock::iterator I(&MI); | |||
3859 | ||||
3860 | const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx); | |||
3861 | ||||
3862 | if (Offset == 0) | |||
3863 | return Idx->getReg(); | |||
3864 | ||||
3865 | Register Tmp = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); | |||
3866 | BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_ADD_I32), Tmp) | |||
3867 | .add(*Idx) | |||
3868 | .addImm(Offset); | |||
3869 | return Tmp; | |||
3870 | } | |||
3871 | ||||
3872 | static MachineBasicBlock *emitIndirectSrc(MachineInstr &MI, | |||
3873 | MachineBasicBlock &MBB, | |||
3874 | const GCNSubtarget &ST) { | |||
3875 | const SIInstrInfo *TII = ST.getInstrInfo(); | |||
3876 | const SIRegisterInfo &TRI = TII->getRegisterInfo(); | |||
3877 | MachineFunction *MF = MBB.getParent(); | |||
3878 | MachineRegisterInfo &MRI = MF->getRegInfo(); | |||
3879 | ||||
3880 | Register Dst = MI.getOperand(0).getReg(); | |||
3881 | const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx); | |||
3882 | Register SrcReg = TII->getNamedOperand(MI, AMDGPU::OpName::src)->getReg(); | |||
3883 | int Offset = TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm(); | |||
3884 | ||||
3885 | const TargetRegisterClass *VecRC = MRI.getRegClass(SrcReg); | |||
3886 | const TargetRegisterClass *IdxRC = MRI.getRegClass(Idx->getReg()); | |||
3887 | ||||
3888 | unsigned SubReg; | |||
3889 | std::tie(SubReg, Offset) | |||
3890 | = computeIndirectRegAndOffset(TRI, VecRC, SrcReg, Offset); | |||
3891 | ||||
3892 | const bool UseGPRIdxMode = ST.useVGPRIndexMode(); | |||
3893 | ||||
3894 | // Check for a SGPR index. | |||
3895 | if (TII->getRegisterInfo().isSGPRClass(IdxRC)) { | |||
3896 | MachineBasicBlock::iterator I(&MI); | |||
3897 | const DebugLoc &DL = MI.getDebugLoc(); | |||
3898 | ||||
3899 | if (UseGPRIdxMode) { | |||
3900 | // TODO: Look at the uses to avoid the copy. This may require rescheduling | |||
3901 | // to avoid interfering with other uses, so probably requires a new | |||
3902 | // optimization pass. | |||
3903 | Register Idx = getIndirectSGPRIdx(TII, MRI, MI, Offset); | |||
3904 | ||||
3905 | const MCInstrDesc &GPRIDXDesc = | |||
3906 | TII->getIndirectGPRIDXPseudo(TRI.getRegSizeInBits(*VecRC), true); | |||
3907 | BuildMI(MBB, I, DL, GPRIDXDesc, Dst) | |||
3908 | .addReg(SrcReg) | |||
3909 | .addReg(Idx) | |||
3910 | .addImm(SubReg); | |||
3911 | } else { | |||
3912 | setM0ToIndexFromSGPR(TII, MRI, MI, Offset); | |||
3913 | ||||
3914 | BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOVRELS_B32_e32), Dst) | |||
3915 | .addReg(SrcReg, 0, SubReg) | |||
3916 | .addReg(SrcReg, RegState::Implicit); | |||
3917 | } | |||
3918 | ||||
3919 | MI.eraseFromParent(); | |||
3920 | ||||
3921 | return &MBB; | |||
3922 | } | |||
3923 | ||||
3924 | // Control flow needs to be inserted if indexing with a VGPR. | |||
3925 | const DebugLoc &DL = MI.getDebugLoc(); | |||
3926 | MachineBasicBlock::iterator I(&MI); | |||
3927 | ||||
3928 | Register PhiReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); | |||
3929 | Register InitReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); | |||
3930 | ||||
3931 | BuildMI(MBB, I, DL, TII->get(TargetOpcode::IMPLICIT_DEF), InitReg); | |||
3932 | ||||
3933 | Register SGPRIdxReg; | |||
3934 | auto InsPt = loadM0FromVGPR(TII, MBB, MI, InitReg, PhiReg, Offset, | |||
3935 | UseGPRIdxMode, SGPRIdxReg); | |||
3936 | ||||
3937 | MachineBasicBlock *LoopBB = InsPt->getParent(); | |||
3938 | ||||
3939 | if (UseGPRIdxMode) { | |||
3940 | const MCInstrDesc &GPRIDXDesc = | |||
3941 | TII->getIndirectGPRIDXPseudo(TRI.getRegSizeInBits(*VecRC), true); | |||
3942 | ||||
3943 | BuildMI(*LoopBB, InsPt, DL, GPRIDXDesc, Dst) | |||
3944 | .addReg(SrcReg) | |||
3945 | .addReg(SGPRIdxReg) | |||
3946 | .addImm(SubReg); | |||
3947 | } else { | |||
3948 | BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::V_MOVRELS_B32_e32), Dst) | |||
3949 | .addReg(SrcReg, 0, SubReg) | |||
3950 | .addReg(SrcReg, RegState::Implicit); | |||
3951 | } | |||
3952 | ||||
3953 | MI.eraseFromParent(); | |||
3954 | ||||
3955 | return LoopBB; | |||
3956 | } | |||
3957 | ||||
3958 | static MachineBasicBlock *emitIndirectDst(MachineInstr &MI, | |||
3959 | MachineBasicBlock &MBB, | |||
3960 | const GCNSubtarget &ST) { | |||
3961 | const SIInstrInfo *TII = ST.getInstrInfo(); | |||
3962 | const SIRegisterInfo &TRI = TII->getRegisterInfo(); | |||
3963 | MachineFunction *MF = MBB.getParent(); | |||
3964 | MachineRegisterInfo &MRI = MF->getRegInfo(); | |||
3965 | ||||
3966 | Register Dst = MI.getOperand(0).getReg(); | |||
3967 | const MachineOperand *SrcVec = TII->getNamedOperand(MI, AMDGPU::OpName::src); | |||
3968 | const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx); | |||
3969 | const MachineOperand *Val = TII->getNamedOperand(MI, AMDGPU::OpName::val); | |||
3970 | int Offset = TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm(); | |||
3971 | const TargetRegisterClass *VecRC = MRI.getRegClass(SrcVec->getReg()); | |||
3972 | const TargetRegisterClass *IdxRC = MRI.getRegClass(Idx->getReg()); | |||
3973 | ||||
3974 | // This can be an immediate, but will be folded later. | |||
3975 | assert(Val->getReg())(static_cast <bool> (Val->getReg()) ? void (0) : __assert_fail ("Val->getReg()", "llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 3975, __extension__ __PRETTY_FUNCTION__)); | |||
3976 | ||||
3977 | unsigned SubReg; | |||
3978 | std::tie(SubReg, Offset) = computeIndirectRegAndOffset(TRI, VecRC, | |||
3979 | SrcVec->getReg(), | |||
3980 | Offset); | |||
3981 | const bool UseGPRIdxMode = ST.useVGPRIndexMode(); | |||
3982 | ||||
3983 | if (Idx->getReg() == AMDGPU::NoRegister) { | |||
3984 | MachineBasicBlock::iterator I(&MI); | |||
3985 | const DebugLoc &DL = MI.getDebugLoc(); | |||
3986 | ||||
3987 | assert(Offset == 0)(static_cast <bool> (Offset == 0) ? void (0) : __assert_fail ("Offset == 0", "llvm/lib/Target/AMDGPU/SIISelLowering.cpp", 3987, __extension__ __PRETTY_FUNCTION__)); | |||
3988 | ||||
3989 | BuildMI(MBB, I, DL, TII->get(TargetOpcode::INSERT_SUBREG), Dst) | |||
3990 | .add(*SrcVec) | |||
3991 | .add(*Val) | |||
3992 | .addImm(SubReg); | |||
3993 | ||||
3994 | MI.eraseFromParent(); | |||
3995 | return &MBB; | |||
3996 | } | |||
3997 | ||||
3998 | // Check for a SGPR index. | |||
3999 | if (TII->getRegisterInfo().isSGPRClass(IdxRC)) { | |||
4000 | MachineBasicBlock::iterator I(&MI); | |||
4001 | const DebugLoc &DL = MI.getDebugLoc(); | |||
4002 | ||||
4003 | if (UseGPRIdxMode) { | |||
4004 | Register Idx = getIndirectSGPRIdx(TII, MRI, MI, Offset); | |||
4005 | ||||
4006 | const MCInstrDesc &GPRIDXDesc = | |||
4007 | TII->getIndirectGPRIDXPseudo(TRI.getRegSizeInBits(*VecRC), false); | |||
4008 | BuildMI(MBB, I, DL, GPRIDXDesc, Dst) | |||
4009 | .addReg(SrcVec->getReg()) | |||
4010 | .add(*Val) | |||
4011 | .addReg(Idx) | |||
4012 | .addImm(SubReg); | |||
4013 | } else { | |||
4014 | setM0ToIndexFromSGPR(TII, MRI, MI, Offset); | |||
4015 | ||||
4016 | const MCInstrDesc &MovRelDesc = TII->getIndirectRegWriteMovRelPseudo( | |||
4017 | TRI.getRegSizeInBits(*VecRC), 32, false); | |||
4018 | BuildMI(MBB, I, DL, MovRelDesc, Dst) | |||
4019 | .addReg(SrcVec->getReg()) | |||
4020 | .add(*Val) | |||
4021 | .addImm(SubReg); | |||
4022 | } | |||
4023 | MI.eraseFromParent(); | |||
4024 | return &MBB; | |||
4025 | } | |||
4026 | ||||
4027 | // Control flow needs to be inserted if indexing with a VGPR. | |||
4028 | if (Val->isReg()) | |||
4029 | MRI.clearKillFlags(Val->getReg()); | |||
4030 | ||||
4031 | const DebugLoc &DL = MI.getDebugLoc(); | |||
4032 | ||||
4033 | Register PhiReg = MRI.createVirtualRegister(VecRC); | |||
4034 | ||||
4035 | Register SGPRIdxReg; | |||
4036 | auto InsPt = loadM0FromVGPR(TII, MBB, MI, SrcVec->getReg(), PhiReg, Offset, | |||
4037 | UseGPRIdxMode, SGPRIdxReg); | |||
4038 | MachineBasicBlock *LoopBB = InsPt->getParent(); | |||
4039 | ||||
4040 | if (UseGPRIdxMode) { | |||
4041 | const MCInstrDesc &GPRIDXDesc = | |||
4042 | TII->getIndirectGPRIDXPseudo(TRI.getRegSizeInBits(*VecRC), false); | |||
4043 | ||||
4044 | BuildMI(*LoopBB, InsPt, DL, GPRIDXDesc, Dst) | |||
4045 | .addReg(PhiReg) | |||
4046 | .add(*Val) | |||
4047 | .addReg(SGPRIdxReg) | |||
4048 | .addImm(AMDGPU::sub0); | |||
4049 | } else { | |||
4050 | const MCInstrDesc &MovRelDesc = TII->getIndirectRegWriteMovRelPseudo( | |||
4051 | TRI.getRegSizeInBits(*VecRC), 32, false); | |||
4052 | BuildMI(*LoopBB, InsPt, DL, MovRelDesc, Dst) | |||
4053 | .addReg(PhiReg) | |||
4054 | .add(*Val) | |||
4055 | .addImm(AMDGPU::sub0); | |||
4056 | } | |||
4057 | ||||
4058 | MI.eraseFromParent(); | |||
4059 | return LoopBB; | |||
4060 | } | |||
4061 | ||||
4062 | MachineBasicBlock *SITargetLowering::EmitInstrWithCustomInserter( | |||
4063 | MachineInstr &MI, MachineBasicBlock *BB) const { | |||
4064 | ||||
4065 | const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); | |||
4066 | MachineFunction *MF = BB->getParent(); | |||
4067 | SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>(); | |||
4068 | ||||
4069 | switch (MI.getOpcode()) { | |||
4070 | case AMDGPU::S_UADDO_PSEUDO: | |||
4071 | case AMDGPU::S_USUBO_PSEUDO: { | |||
4072 | const DebugLoc &DL = MI.getDebugLoc(); | |||
4073 | MachineOperand &Dest0 = MI.getOperand(0); | |||
4074 | MachineOperand &Dest1 = MI.getOperand(1); | |||
4075 | MachineOperand &Src0 = MI.getOperand(2); | |||
4076 | MachineOperand &Src1 = MI.getOperand(3); | |||
4077 | ||||
4078 | unsigned Opc = (MI.getOpcode() == AMDGPU::S_UADDO_PSEUDO) | |||
4079 | ? AMDGPU::S_ADD_I32 | |||
4080 | : AMDGPU::S_SUB_I32; | |||
4081 | BuildMI(*BB, MI, DL, TII->get(Opc), Dest0.getReg()).add(Src0).add(Src1); | |||
4082 | ||||
4083 | BuildMI(*BB, MI, DL, TII->get(AMDGPU::S_CSELECT_B64), Dest1.getReg()) | |||
4084 | .addImm(1) | |||
4085 | .addImm(0); | |||
4086 | ||||
4087 | MI.eraseFromParent(); | |||
4088 | return BB; | |||
4089 | } | |||
4090 | case AMDGPU::S_ADD_U64_PSEUDO: | |||
4091 | case AMDGPU::S_SUB_U64_PSEUDO: { | |||
4092 | MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); | |||
4093 | const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>(); | |||
4094 | const SIRegisterInfo *TRI = ST.getRegisterInfo(); | |||
4095 | const TargetRegisterClass *BoolRC = TRI->getBoolRC(); | |||
4096 | const DebugLoc &DL = MI.getDebugLoc(); | |||
4097 | ||||
4098 | MachineOperand &Dest = MI.getOperand(0); | |||
4099 | MachineOperand &Src0 = MI.getOperand(1); | |||
4100 | MachineOperand &Src1 = MI.getOperand(2); | |||
4101 | ||||
4102 | Register DestSub0 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); | |||
4103 | Register DestSub1 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); | |||
4104 | ||||
4105 | MachineOperand Src0Sub0 = TII->buildExtractSubRegOrImm( | |||
4106 | MI, MRI, Src0, BoolRC, AMDGPU::sub0, &AMDGPU::SReg_32RegClass); | |||
4107 | MachineOperand Src0Sub1 = TII->buildExtractSubRegOrImm( | |||
4108 | MI, MRI, Src0, BoolRC, AMDGPU::sub1, &AMDGPU::SReg_32RegClass); | |||
4109 | ||||
4110 | MachineOperand Src1Sub0 = TII->buildExtractSubRegOrImm( | |||
4111 | MI, MRI, Src1, BoolRC, AMDGPU::sub0, &AMDGPU::SReg_32RegClass); | |||
4112 | MachineOperand Src1Sub1 = TII->buildExtractSubRegOrImm( | |||
4113 | MI, MRI, Src1, BoolRC, AMDGPU::sub1, &AMDGPU::SReg_32RegClass); | |||
4114 | ||||
4115 | bool IsAdd = (MI.getOpcode() == AMDGPU::S_ADD_U64_PSEUDO); | |||
4116 | ||||
4117 | unsigned LoOpc = IsAdd ? AMDGPU::S_ADD_U32 : AMDGPU::S_SUB_U32; | |||
4118 | unsigned HiOpc = IsAdd ? AMDGPU::S_ADDC_U32 : AMDGPU::S_SUBB_U32; | |||
4119 | BuildMI(*BB, MI, DL, TII->get(LoOpc), DestSub0).add(Src0Sub0).add(Src1Sub0); | |||
4120 | BuildMI(*BB, MI, DL, TII->get(HiOpc), DestSub1).add(Src0Sub1).add(Src1Sub1); | |||
4121 | BuildMI(*BB, MI, DL, TII->get(TargetOpcode::REG_SEQUENCE), Dest.getReg()) | |||
4122 | .addReg(DestSub0) | |||
4123 | .addImm(AMDGPU::sub0) | |||
4124 | .addReg(DestSub1) | |||
4125 | .addImm(AMDGPU::sub1); | |||
4126 | MI.eraseFromParent(); | |||
4127 | return BB; | |||
4128 | } | |||
4129 | case AMDGPU::V_ADD_U64_PSEUDO: | |||
4130 | case AMDGPU::V_SUB_U64_PSEUDO: { | |||
4131 | MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); | |||
4132 | const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>(); | |||
4133 | const SIRegisterInfo *TRI = ST.getRegisterInfo(); | |||
4134 | const DebugLoc &DL = MI.getDebugLoc(); | |||
4135 | ||||
4136 | bool IsAdd = (MI.getOpcode() == AMDGPU::V_ADD_U64_PSEUDO); | |||
4137 | ||||
4138 | MachineOperand &Dest = MI.getOperand(0); | |||
4139 | MachineOperand &Src0 = MI.getOperand(1); | |||
4140 | MachineOperand &Src1 = MI.getOperand(2); | |||
4141 | ||||
4142 | if (IsAdd && ST.hasLshlAddB64()) { | |||
4143 | auto Add = BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_LSHL_ADD_U64_e64), | |||
4144 | Dest.getReg()) | |||
4145 | .add(Src0) | |||
4146 | .addImm(0) | |||
4147 | .add(Src1); | |||
4148 | TII->legalizeOperands(*Add); | |||
4149 | MI.eraseFromParent(); | |||
4150 | return BB; | |||
4151 | } | |||
4152 | ||||
4153 | const auto *CarryRC = TRI->getRegClass(AMDGPU::SReg_1_XEXECRegClassID); | |||
4154 | ||||
4155 | Register DestSub0 = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); | |||
4156 | Register DestSub1 = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); | |||
4157 | ||||
4158 | Register CarryReg = MRI.createVirtualRegister(CarryRC); | |||
4159 | Register DeadCarryReg = MRI.createVirtualRegister(CarryRC); | |||
4160 | ||||
4161 | const TargetRegisterClass *Src0RC = Src0.isReg() | |||
4162 | ? MRI.getRegClass(Src0.getReg()) | |||
4163 | : &AMDGPU::VReg_64RegClass; | |||
4164 | const TargetRegisterClass *Src1RC = Src1.isReg() | |||
4165 | ? MRI.getRegClass(Src1.getReg()) | |||
4166 | : &AMDGPU::VReg_64RegClass; | |||
4167 | ||||
4168 | const TargetRegisterClass *Src0SubRC = | |||
4169 | TRI->getSubRegClass(Src0RC, AMDGPU::sub0); | |||
4170 | const TargetRegisterClass *Src1SubRC = | |||
4171 | TRI->getSubRegClass(Src1RC, AMDGPU::sub1); | |||
4172 | ||||
4173 | MachineOperand SrcReg0Sub0 = TII->buildExtractSubRegOrImm( | |||
4174 | MI, MRI, Src0, Src0RC, AMDGPU::sub0, Src0SubRC); | |||
4175 | MachineOperand SrcReg1Sub0 = TII->buildExtractSubRegOrImm( | |||
4176 | MI, MRI, Src1, Src1RC, AMDGPU::sub0, Src1SubRC); | |||
4177 | ||||
4178 | MachineOperand SrcReg0Sub1 = TII->buildExtractSubRegOrImm( | |||
4179 | MI, MRI, Src0, Src0RC, AMDGPU::sub1, Src0SubRC); | |||
4180 | MachineOperand SrcReg1Sub1 = TII->buildExtractSubRegOrImm( | |||
4181 | MI, MRI, Src1, Src1RC, AMDGPU::sub1, Src1SubRC); | |||
4182 | ||||
4183 | unsigned LoOpc = IsAdd ? AMDGPU::V_ADD_CO_U32_e64 : AMDGPU::V_SUB_CO_U32_e64; | |||
4184 | MachineInstr *LoHalf = BuildMI(*BB, MI, DL, TII->get(LoOpc), DestSub0) | |||
4185 | .addReg(CarryReg, RegState::Define) | |||
4186 | .add(SrcReg0Sub0) | |||
4187 | .add(SrcReg1Sub0) | |||
4188 | .addImm(0); // clamp bit | |||
4189 | ||||
4190 | unsigned HiOpc = IsAdd ? AMDGPU::V_ADDC_U32_e64 : AMDGPU::V_SUBB_U32_e64; | |||
4191 | MachineInstr *HiHalf = | |||
4192 | BuildMI(*BB, MI, DL, TII->get(HiOpc), DestSub1) | |||
4193 | .addReg(DeadCarryReg, RegState::Define | RegState::Dead) | |||
4194 | .add(SrcReg0Sub1) | |||
4195 | .add(SrcReg1Sub1) | |||
4196 | .addReg(CarryReg, RegState::Kill) | |||
4197 | .addImm(0); // clamp bit | |||
4198 | ||||
4199 | BuildMI(*BB, MI, DL, TII->get(TargetOpcode::REG_SEQUENCE), Dest.getReg()) | |||
4200 | .addReg(DestSub0) | |||
4201 | .addImm(AMDGPU::sub0) | |||
4202 | .addReg(DestSub1) | |||
4203 | .addImm(AMDGPU::sub1); | |||
4204 | TII->legalizeOperands(*LoHalf); | |||
4205 | TII->legalizeOperands(*HiHalf); | |||
4206 | MI.eraseFromParent(); | |||
4207 | return BB; | |||
4208 | } | |||
4209 | case AMDGPU::S_ADD_CO_PSEUDO: | |||
4210 | case AMDGPU::S_SUB_CO_PSEUDO: { | |||
4211 | // This pseudo has a chance to be selected | |||
4212 | // only from uniform add/subcarry node. All the VGPR operands | |||
4213 | // therefore assumed to be splat vectors. | |||
4214 | MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); | |||
4215 | const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>(); | |||
4216 | const SIRegisterInfo *TRI = ST.getRegisterInfo(); | |||
4217 | MachineBasicBlock::iterator MII = MI; | |||
4218 | const DebugLoc &DL = MI.getDebugLoc(); | |||
4219 | MachineOperand &Dest = MI.getOperand(0); | |||
4220 | MachineOperand &CarryDest = MI.getOperand(1); | |||
4221 | MachineOperand &Src0 = MI.getOperand(2); | |||
4222 | MachineOperand &Src1 = MI.getOperand(3); | |||
4223 | MachineOperand &Src2 = MI.getOperand(4); | |||
4224 | unsigned Opc = (MI.getOpcode() == AMDGPU::S_ADD_CO_PSEUDO) | |||
4225 | ? AMDGPU::S_ADDC_U32 | |||
4226 | : AMDGPU::S_SUBB_U32; | |||
4227 | if (Src0.isReg() && TRI->isVectorRegister(MRI, Src0.getReg())) { | |||
4228 | Register RegOp0 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); | |||
4229 | BuildMI(*BB, MII, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), RegOp0) | |||
4230 | .addReg(Src0.getReg()); | |||
4231 | Src0.setReg(RegOp0); | |||
4232 | } | |||
4233 | if (Src1.isReg() && TRI->isVectorRegister(MRI, Src1.getReg())) { | |||
4234 | Register RegOp1 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); | |||
4235 | BuildMI(*BB, MII, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), RegOp1) | |||
4236 | .addReg(Src1.getReg()); | |||
4237 | Src1.setReg(RegOp1); | |||
4238 | } | |||
4239 | Register RegOp2 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); | |||
4240 | if (TRI->isVectorRegister(MRI, Src2.getReg())) { | |||
4241 | BuildMI(*BB, MII, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), RegOp2) | |||
4242 | .addReg(Src2.getReg()); | |||
4243 | Src2.setReg(RegOp2); | |||
4244 | } | |||
4245 | ||||
4246 | const TargetRegisterClass *Src2RC = MRI.getRegClass(Src2.getReg()); | |||
4247 | unsigned WaveSize = TRI->getRegSizeInBits(*Src2RC); | |||
4248 | assert(WaveSize == 64 || WaveSize == 32)(static_cast <bool> (WaveSize == 64 || WaveSize == 32) ? void (0) : __assert_fail ("WaveSize == 64 || WaveSize == 32" , "llvm/lib/Target/AMDGPU/SIISelLowering.cpp", 4248, __extension__ __PRETTY_FUNCTION__)); | |||
4249 | ||||
4250 | if (WaveSize == 64) { | |||
4251 | if (ST.hasScalarCompareEq64()) { | |||
4252 | BuildMI(*BB, MII, DL, TII->get(AMDGPU::S_CMP_LG_U64)) | |||
4253 | .addReg(Src2.getReg()) | |||
4254 | .addImm(0); | |||
4255 | } else { | |||
4256 | const TargetRegisterClass *SubRC = | |||
4257 | TRI->getSubRegClass(Src2RC, AMDGPU::sub0); | |||
4258 | MachineOperand Src2Sub0 = TII->buildExtractSubRegOrImm( | |||
4259 | MII, MRI, Src2, Src2RC, AMDGPU::sub0, SubRC); | |||
4260 | MachineOperand Src2Sub1 = TII->buildExtractSubRegOrImm( | |||
4261 | MII, MRI, Src2, Src2RC, AMDGPU::sub1, SubRC); | |||
4262 | Register Src2_32 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); | |||
4263 | ||||
4264 | BuildMI(*BB, MII, DL, TII->get(AMDGPU::S_OR_B32), Src2_32) | |||
4265 | .add(Src2Sub0) | |||
4266 | .add(Src2Sub1); | |||
4267 | ||||
4268 | BuildMI(*BB, MII, DL, TII->get(AMDGPU::S_CMP_LG_U32)) | |||
4269 | .addReg(Src2_32, RegState::Kill) | |||
4270 | .addImm(0); | |||
4271 | } | |||
4272 | } else { | |||
4273 | BuildMI(*BB, MII, DL, TII->get(AMDGPU::S_CMPK_LG_U32)) | |||
4274 | .addReg(Src2.getReg()) | |||
4275 | .addImm(0); | |||
4276 | } | |||
4277 | ||||
4278 | BuildMI(*BB, MII, DL, TII->get(Opc), Dest.getReg()).add(Src0).add(Src1); | |||
4279 | ||||
4280 | unsigned SelOpc = | |||
4281 | (WaveSize == 64) ? AMDGPU::S_CSELECT_B64 : AMDGPU::S_CSELECT_B32; | |||
4282 | ||||
4283 | BuildMI(*BB, MII, DL, TII->get(SelOpc), CarryDest.getReg()) | |||
4284 | .addImm(-1) | |||
4285 | .addImm(0); | |||
4286 | ||||
4287 | MI.eraseFromParent(); | |||
4288 | return BB; | |||
4289 | } | |||
4290 | case AMDGPU::SI_INIT_M0: { | |||
4291 | BuildMI(*BB, MI.getIterator(), MI.getDebugLoc(), | |||
4292 | TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0) | |||
4293 | .add(MI.getOperand(0)); | |||
4294 | MI.eraseFromParent(); | |||
4295 | return BB; | |||
4296 | } | |||
4297 | case AMDGPU::GET_GROUPSTATICSIZE: { | |||
4298 | assert(getTargetMachine().getTargetTriple().getOS() == Triple::AMDHSA ||(static_cast <bool> (getTargetMachine().getTargetTriple ().getOS() == Triple::AMDHSA || getTargetMachine().getTargetTriple ().getOS() == Triple::AMDPAL) ? void (0) : __assert_fail ("getTargetMachine().getTargetTriple().getOS() == Triple::AMDHSA || getTargetMachine().getTargetTriple().getOS() == Triple::AMDPAL" , "llvm/lib/Target/AMDGPU/SIISelLowering.cpp", 4299, __extension__ __PRETTY_FUNCTION__)) | |||
4299 | getTargetMachine().getTargetTriple().getOS() == Triple::AMDPAL)(static_cast <bool> (getTargetMachine().getTargetTriple ().getOS() == Triple::AMDHSA || getTargetMachine().getTargetTriple ().getOS() == Triple::AMDPAL) ? void (0) : __assert_fail ("getTargetMachine().getTargetTriple().getOS() == Triple::AMDHSA || getTargetMachine().getTargetTriple().getOS() == Triple::AMDPAL" , "llvm/lib/Target/AMDGPU/SIISelLowering.cpp", 4299, __extension__ __PRETTY_FUNCTION__)); | |||
4300 | DebugLoc DL = MI.getDebugLoc(); | |||
4301 | BuildMI(*BB, MI, DL, TII->get(AMDGPU::S_MOV_B32)) | |||
4302 | .add(MI.getOperand(0)) | |||
4303 | .addImm(MFI->getLDSSize()); | |||
4304 | MI.eraseFromParent(); | |||
4305 | return BB; | |||
4306 | } | |||
4307 | case AMDGPU::SI_INDIRECT_SRC_V1: | |||
4308 | case AMDGPU::SI_INDIRECT_SRC_V2: | |||
4309 | case AMDGPU::SI_INDIRECT_SRC_V4: | |||
4310 | case AMDGPU::SI_INDIRECT_SRC_V8: | |||
4311 | case AMDGPU::SI_INDIRECT_SRC_V16: | |||
4312 | case AMDGPU::SI_INDIRECT_SRC_V32: | |||
4313 | return emitIndirectSrc(MI, *BB, *getSubtarget()); | |||
4314 | case AMDGPU::SI_INDIRECT_DST_V1: | |||
4315 | case AMDGPU::SI_INDIRECT_DST_V2: | |||
4316 | case AMDGPU::SI_INDIRECT_DST_V4: | |||
4317 | case AMDGPU::SI_INDIRECT_DST_V8: | |||
4318 | case AMDGPU::SI_INDIRECT_DST_V16: | |||
4319 | case AMDGPU::SI_INDIRECT_DST_V32: | |||
4320 | return emitIndirectDst(MI, *BB, *getSubtarget()); | |||
4321 | case AMDGPU::SI_KILL_F32_COND_IMM_PSEUDO: | |||
4322 | case AMDGPU::SI_KILL_I1_PSEUDO: | |||
4323 | return splitKillBlock(MI, BB); | |||
4324 | case AMDGPU::V_CNDMASK_B64_PSEUDO: { | |||
4325 | MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); | |||
4326 | const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>(); | |||
4327 | const SIRegisterInfo *TRI = ST.getRegisterInfo(); | |||
4328 | ||||
4329 | Register Dst = MI.getOperand(0).getReg(); | |||
4330 | Register Src0 = MI.getOperand(1).getReg(); | |||
4331 | Register Src1 = MI.getOperand(2).getReg(); | |||
4332 | const DebugLoc &DL = MI.getDebugLoc(); | |||
4333 | Register SrcCond = MI.getOperand(3).getReg(); | |||
4334 | ||||
4335 | Register DstLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); | |||
4336 | Register DstHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); | |||
4337 | const auto *CondRC = TRI->getRegClass(AMDGPU::SReg_1_XEXECRegClassID); | |||
4338 | Register SrcCondCopy = MRI.createVirtualRegister(CondRC); | |||
4339 | ||||
4340 | BuildMI(*BB, MI, DL, TII->get(AMDGPU::COPY), SrcCondCopy) | |||
4341 | .addReg(SrcCond); | |||
4342 | BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_CNDMASK_B32_e64), DstLo) | |||
4343 | .addImm(0) | |||
4344 | .addReg(Src0, 0, AMDGPU::sub0) | |||
4345 | .addImm(0) | |||
4346 | .addReg(Src1, 0, AMDGPU::sub0) | |||
4347 | .addReg(SrcCondCopy); | |||
4348 | BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_CNDMASK_B32_e64), DstHi) | |||
4349 | .addImm(0) | |||
4350 | .addReg(Src0, 0, AMDGPU::sub1) | |||
4351 | .addImm(0) | |||
4352 | .addReg(Src1, 0, AMDGPU::sub1) | |||
4353 | .addReg(SrcCondCopy); | |||
4354 | ||||
4355 | BuildMI(*BB, MI, DL, TII->get(AMDGPU::REG_SEQUENCE), Dst) | |||
4356 | .addReg(DstLo) | |||
4357 | .addImm(AMDGPU::sub0) | |||
4358 | .addReg(DstHi) | |||
4359 | .addImm(AMDGPU::sub1); | |||
4360 | MI.eraseFromParent(); | |||
4361 | return BB; | |||
4362 | } | |||
4363 | case AMDGPU::SI_BR_UNDEF: { | |||
4364 | const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); | |||
4365 | const DebugLoc &DL = MI.getDebugLoc(); | |||
4366 | MachineInstr *Br = BuildMI(*BB, MI, DL, TII->get(AMDGPU::S_CBRANCH_SCC1)) | |||
4367 | .add(MI.getOperand(0)); | |||
4368 | Br->getOperand(1).setIsUndef(true); // read undef SCC | |||
4369 | MI.eraseFromParent(); | |||
4370 | return BB; | |||
4371 | } | |||
4372 | case AMDGPU::ADJCALLSTACKUP: | |||
4373 | case AMDGPU::ADJCALLSTACKDOWN: { | |||
4374 | const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>(); | |||
4375 | MachineInstrBuilder MIB(*MF, &MI); | |||
4376 | MIB.addReg(Info->getStackPtrOffsetReg(), RegState::ImplicitDefine) | |||
4377 | .addReg(Info->getStackPtrOffsetReg(), RegState::Implicit); | |||
4378 | return BB; | |||
4379 | } | |||
4380 | case AMDGPU::SI_CALL_ISEL: { | |||
4381 | const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); | |||
4382 | const DebugLoc &DL = MI.getDebugLoc(); | |||
4383 | ||||
4384 | unsigned ReturnAddrReg = TII->getRegisterInfo().getReturnAddressReg(*MF); | |||
4385 | ||||
4386 | MachineInstrBuilder MIB; | |||
4387 | MIB = BuildMI(*BB, MI, DL, TII->get(AMDGPU::SI_CALL), ReturnAddrReg); | |||
4388 | ||||
4389 | for (const MachineOperand &MO : MI.operands()) | |||
4390 | MIB.add(MO); | |||
4391 | ||||
4392 | MIB.cloneMemRefs(MI); | |||
4393 | MI.eraseFromParent(); | |||
4394 | return BB; | |||
4395 | } | |||
4396 | case AMDGPU::V_ADD_CO_U32_e32: | |||
4397 | case AMDGPU::V_SUB_CO_U32_e32: | |||
4398 | case AMDGPU::V_SUBREV_CO_U32_e32: { | |||
4399 | // TODO: Define distinct V_*_I32_Pseudo instructions instead. | |||
4400 | const DebugLoc &DL = MI.getDebugLoc(); | |||
4401 | unsigned Opc = MI.getOpcode(); | |||
4402 | ||||
4403 | bool NeedClampOperand = false; | |||
4404 | if (TII->pseudoToMCOpcode(Opc) == -1) { | |||
4405 | Opc = AMDGPU::getVOPe64(Opc); | |||
4406 | NeedClampOperand = true; | |||
4407 | } | |||
4408 | ||||
4409 | auto I = BuildMI(*BB, MI, DL, TII->get(Opc), MI.getOperand(0).getReg()); | |||
4410 | if (TII->isVOP3(*I)) { | |||
4411 | const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>(); | |||
4412 | const SIRegisterInfo *TRI = ST.getRegisterInfo(); | |||
4413 | I.addReg(TRI->getVCC(), RegState::Define); | |||
4414 | } | |||
4415 | I.add(MI.getOperand(1)) | |||
4416 | .add(MI.getOperand(2)); | |||
4417 | if (NeedClampOperand) | |||
4418 | I.addImm(0); // clamp bit for e64 encoding | |||
4419 | ||||
4420 | TII->legalizeOperands(*I); | |||
4421 | ||||
4422 | MI.eraseFromParent(); | |||
4423 | return BB; | |||
4424 | } | |||
4425 | case AMDGPU::V_ADDC_U32_e32: | |||
4426 | case AMDGPU::V_SUBB_U32_e32: | |||
4427 | case AMDGPU::V_SUBBREV_U32_e32: | |||
4428 | // These instructions have an implicit use of vcc which counts towards the | |||
4429 | // constant bus limit. | |||
4430 | TII->legalizeOperands(MI); | |||
4431 | return BB; | |||
4432 | case AMDGPU::DS_GWS_INIT: | |||
4433 | case AMDGPU::DS_GWS_SEMA_BR: | |||
4434 | case AMDGPU::DS_GWS_BARRIER: | |||
4435 | if (Subtarget->needsAlignedVGPRs()) { | |||
4436 | // Add implicit aligned super-reg to force alignment on the data operand. | |||
4437 | const DebugLoc &DL = MI.getDebugLoc(); | |||
4438 | MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); | |||
4439 | const SIRegisterInfo *TRI = Subtarget->getRegisterInfo(); | |||
4440 | MachineOperand *Op = TII->getNamedOperand(MI, AMDGPU::OpName::data0); | |||
4441 | Register DataReg = Op->getReg(); | |||
4442 | bool IsAGPR = TRI->isAGPR(MRI, DataReg); | |||
4443 | Register Undef = MRI.createVirtualRegister( | |||
4444 | IsAGPR ? &AMDGPU::AGPR_32RegClass : &AMDGPU::VGPR_32RegClass); | |||
4445 | BuildMI(*BB, MI, DL, TII->get(AMDGPU::IMPLICIT_DEF), Undef); | |||
4446 | Register NewVR = | |||
4447 | MRI.createVirtualRegister(IsAGPR ? &AMDGPU::AReg_64_Align2RegClass | |||
4448 | : &AMDGPU::VReg_64_Align2RegClass); | |||
4449 | BuildMI(*BB, MI, DL, TII->get(AMDGPU::REG_SEQUENCE), NewVR) | |||
4450 | .addReg(DataReg, 0, Op->getSubReg()) | |||
4451 | .addImm(AMDGPU::sub0) | |||
4452 | .addReg(Undef) | |||
4453 | .addImm(AMDGPU::sub1); | |||
4454 | Op->setReg(NewVR); | |||
4455 | Op->setSubReg(AMDGPU::sub0); | |||
4456 | MI.addOperand(MachineOperand::CreateReg(NewVR, false, true)); | |||
4457 | } | |||
4458 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; | |||
4459 | case AMDGPU::DS_GWS_SEMA_V: | |||
4460 | case AMDGPU::DS_GWS_SEMA_P: | |||
4461 | case AMDGPU::DS_GWS_SEMA_RELEASE_ALL: | |||
4462 | // A s_waitcnt 0 is required to be the instruction immediately following. | |||
4463 | if (getSubtarget()->hasGWSAutoReplay()) { | |||
4464 | bundleInstWithWaitcnt(MI); | |||
4465 | return BB; | |||
4466 | } | |||
4467 | ||||
4468 | return emitGWSMemViolTestLoop(MI, BB); | |||
4469 | case AMDGPU::S_SETREG_B32: { | |||
4470 | // Try to optimize cases that only set the denormal mode or rounding mode. | |||
4471 | // | |||
4472 | // If the s_setreg_b32 fully sets all of the bits in the rounding mode or | |||
4473 | // denormal mode to a constant, we can use s_round_mode or s_denorm_mode | |||
4474 | // instead. | |||
4475 | // | |||
4476 | // FIXME: This could be predicates on the immediate, but tablegen doesn't | |||
4477 | // allow you to have a no side effect instruction in the output of a | |||
4478 | // sideeffecting pattern. | |||
4479 | unsigned ID, Offset, Width; | |||
4480 | AMDGPU::Hwreg::decodeHwreg(MI.getOperand(1).getImm(), ID, Offset, Width); | |||
4481 | if (ID != AMDGPU::Hwreg::ID_MODE) | |||
4482 | return BB; | |||
4483 | ||||
4484 | const unsigned WidthMask = maskTrailingOnes<unsigned>(Width); | |||
4485 | const unsigned SetMask = WidthMask << Offset; | |||
4486 | ||||
4487 | if (getSubtarget()->hasDenormModeInst()) { | |||
4488 | unsigned SetDenormOp = 0; | |||
4489 | unsigned SetRoundOp = 0; | |||
4490 | ||||
4491 | // The dedicated instructions can only set the whole denorm or round mode | |||
4492 | // at once, not a subset of bits in either. | |||
4493 | if (SetMask == | |||
4494 | (AMDGPU::Hwreg::FP_ROUND_MASK | AMDGPU::Hwreg::FP_DENORM_MASK)) { | |||
4495 | // If this fully sets both the round and denorm mode, emit the two | |||
4496 | // dedicated instructions for these. | |||
4497 | SetRoundOp = AMDGPU::S_ROUND_MODE; | |||
4498 | SetDenormOp = AMDGPU::S_DENORM_MODE; | |||
4499 | } else if (SetMask == AMDGPU::Hwreg::FP_ROUND_MASK) { | |||
4500 | SetRoundOp = AMDGPU::S_ROUND_MODE; | |||
4501 | } else if (SetMask == AMDGPU::Hwreg::FP_DENORM_MASK) { | |||
4502 | SetDenormOp = AMDGPU::S_DENORM_MODE; | |||
4503 | } | |||
4504 | ||||
4505 | if (SetRoundOp || SetDenormOp) { | |||
4506 | MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); | |||
4507 | MachineInstr *Def = MRI.getVRegDef(MI.getOperand(0).getReg()); | |||
4508 | if (Def && Def->isMoveImmediate() && Def->getOperand(1).isImm()) { | |||
4509 | unsigned ImmVal = Def->getOperand(1).getImm(); | |||
4510 | if (SetRoundOp) { | |||
4511 | BuildMI(*BB, MI, MI.getDebugLoc(), TII->get(SetRoundOp)) | |||
4512 | .addImm(ImmVal & 0xf); | |||
4513 | ||||
4514 | // If we also have the denorm mode, get just the denorm mode bits. | |||
4515 | ImmVal >>= 4; | |||
4516 | } | |||
4517 | ||||
4518 | if (SetDenormOp) { | |||
4519 | BuildMI(*BB, MI, MI.getDebugLoc(), TII->get(SetDenormOp)) | |||
4520 | .addImm(ImmVal & 0xf); | |||
4521 | } | |||
4522 | ||||
4523 | MI.eraseFromParent(); | |||
4524 | return BB; | |||
4525 | } | |||
4526 | } | |||
4527 | } | |||
4528 | ||||
4529 | // If only FP bits are touched, used the no side effects pseudo. | |||
4530 | if ((SetMask & (AMDGPU::Hwreg::FP_ROUND_MASK | | |||
4531 | AMDGPU::Hwreg::FP_DENORM_MASK)) == SetMask) | |||
4532 | MI.setDesc(TII->get(AMDGPU::S_SETREG_B32_mode)); | |||
4533 | ||||
4534 | return BB; | |||
4535 | } | |||
4536 | default: | |||
4537 | return AMDGPUTargetLowering::EmitInstrWithCustomInserter(MI, BB); | |||
4538 | } | |||
4539 | } | |||
4540 | ||||
4541 | bool SITargetLowering::hasBitPreservingFPLogic(EVT VT) const { | |||
4542 | return isTypeLegal(VT.getScalarType()); | |||
4543 | } | |||
4544 | ||||
4545 | bool SITargetLowering::enableAggressiveFMAFusion(EVT VT) const { | |||
4546 | // This currently forces unfolding various combinations of fsub into fma with | |||
4547 | // free fneg'd operands. As long as we have fast FMA (controlled by | |||
4548 | // isFMAFasterThanFMulAndFAdd), we should perform these. | |||
4549 | ||||
4550 | // When fma is quarter rate, for f64 where add / sub are at best half rate, | |||
4551 | // most of these combines appear to be cycle neutral but save on instruction | |||
4552 | // count / code size. | |||
4553 | return true; | |||
4554 | } | |||
4555 | ||||
4556 | bool SITargetLowering::enableAggressiveFMAFusion(LLT Ty) const { return true; } | |||
4557 | ||||
4558 | EVT SITargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &Ctx, | |||
4559 | EVT VT) const { | |||
4560 | if (!VT.isVector()) { | |||
4561 | return MVT::i1; | |||
4562 | } | |||
4563 | return EVT::getVectorVT(Ctx, MVT::i1, VT.getVectorNumElements()); | |||
4564 | } | |||
4565 | ||||
4566 | MVT SITargetLowering::getScalarShiftAmountTy(const DataLayout &, EVT VT) const { | |||
4567 | // TODO: Should i16 be used always if legal? For now it would force VALU | |||
4568 | // shifts. | |||
4569 | return (VT == MVT::i16) ? MVT::i16 : MVT::i32; | |||
4570 | } | |||
4571 | ||||
4572 | LLT SITargetLowering::getPreferredShiftAmountTy(LLT Ty) const { | |||
4573 | return (Ty.getScalarSizeInBits() <= 16 && Subtarget->has16BitInsts()) | |||
4574 | ? Ty.changeElementSize(16) | |||
4575 | : Ty.changeElementSize(32); | |||
4576 | } | |||
4577 | ||||
4578 | // Answering this is somewhat tricky and depends on the specific device which | |||
4579 | // have different rates for fma or all f64 operations. | |||
4580 | // | |||
4581 | // v_fma_f64 and v_mul_f64 always take the same number of cycles as each other | |||
4582 | // regardless of which device (although the number of cycles differs between | |||
4583 | // devices), so it is always profitable for f64. | |||
4584 | // | |||
4585 | // v_fma_f32 takes 4 or 16 cycles depending on the device, so it is profitable | |||
4586 | // only on full rate devices. Normally, we should prefer selecting v_mad_f32 | |||
4587 | // which we can always do even without fused FP ops since it returns the same | |||
4588 | // result as the separate operations and since it is always full | |||
4589 | // rate. Therefore, we lie and report that it is not faster for f32. v_mad_f32 | |||
4590 | // however does not support denormals, so we do report fma as faster if we have | |||
4591 | // a fast fma device and require denormals. | |||
4592 | // | |||
4593 | bool SITargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, | |||
4594 | EVT VT) const { | |||
4595 | VT = VT.getScalarType(); | |||
4596 | ||||
4597 | switch (VT.getSimpleVT().SimpleTy) { | |||
4598 | case MVT::f32: { | |||
4599 | // If mad is not available this depends only on if f32 fma is full rate. | |||
4600 | if (!Subtarget->hasMadMacF32Insts()) | |||
4601 | return Subtarget->hasFastFMAF32(); | |||
4602 | ||||
4603 | // Otherwise f32 mad is always full rate and returns the same result as | |||
4604 | // the separate operations so should be preferred over fma. | |||
4605 | // However does not support denormals. | |||
4606 | if (hasFP32Denormals(MF)) | |||
4607 | return Subtarget->hasFastFMAF32() || Subtarget->hasDLInsts(); | |||
4608 | ||||
4609 | // If the subtarget has v_fmac_f32, that's just as good as v_mac_f32. | |||
4610 | return Subtarget->hasFastFMAF32() && Subtarget->hasDLInsts(); | |||
4611 | } | |||
4612 | case MVT::f64: | |||
4613 | return true; | |||
4614 | case MVT::f16: | |||
4615 | return Subtarget->has16BitInsts() && hasFP64FP16Denormals(MF); | |||
4616 | default: | |||
4617 | break; | |||
4618 | } | |||
4619 | ||||
4620 | return false; | |||
4621 | } | |||
4622 | ||||
4623 | bool SITargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, | |||
4624 | LLT Ty) const { | |||
4625 | switch (Ty.getScalarSizeInBits()) { | |||
4626 | case 16: | |||
4627 | return isFMAFasterThanFMulAndFAdd(MF, MVT::f16); | |||
4628 | case 32: | |||
4629 | return isFMAFasterThanFMulAndFAdd(MF, MVT::f32); | |||
4630 | case 64: | |||
4631 | return isFMAFasterThanFMulAndFAdd(MF, MVT::f64); | |||
4632 | default: | |||
4633 | break; | |||
4634 | } | |||
4635 | ||||
4636 | return false; | |||
4637 | } | |||
4638 | ||||
4639 | bool SITargetLowering::isFMADLegal(const MachineInstr &MI, LLT Ty) const { | |||
4640 | if (!Ty.isScalar()) | |||
4641 | return false; | |||
4642 | ||||
4643 | if (Ty.getScalarSizeInBits() == 16) | |||
4644 | return Subtarget->hasMadF16() && !hasFP64FP16Denormals(*MI.getMF()); | |||
4645 | if (Ty.getScalarSizeInBits() == 32) | |||
4646 | return Subtarget->hasMadMacF32Insts() && !hasFP32Denormals(*MI.getMF()); | |||
4647 | ||||
4648 | return false; | |||
4649 | } | |||
4650 | ||||
4651 | bool SITargetLowering::isFMADLegal(const SelectionDAG &DAG, | |||
4652 | const SDNode *N) const { | |||
4653 | // TODO: Check future ftz flag | |||
4654 | // v_mad_f32/v_mac_f32 do not support denormals. | |||
4655 | EVT VT = N->getValueType(0); | |||
4656 | if (VT == MVT::f32) | |||
4657 | return Subtarget->hasMadMacF32Insts() && | |||
4658 | !hasFP32Denormals(DAG.getMachineFunction()); | |||
4659 | if (VT == MVT::f16) { | |||
4660 | return Subtarget->hasMadF16() && | |||
4661 | !hasFP64FP16Denormals(DAG.getMachineFunction()); | |||
4662 | } | |||
4663 | ||||
4664 | return false; | |||
4665 | } | |||
4666 | ||||
4667 | //===----------------------------------------------------------------------===// | |||
4668 | // Custom DAG Lowering Operations | |||
4669 | //===----------------------------------------------------------------------===// | |||
4670 | ||||
4671 | // Work around LegalizeDAG doing the wrong thing and fully scalarizing if the | |||
4672 | // wider vector type is legal. | |||
4673 | SDValue SITargetLowering::splitUnaryVectorOp(SDValue Op, | |||
4674 | SelectionDAG &DAG) const { | |||
4675 | unsigned Opc = Op.getOpcode(); | |||
4676 | EVT VT = Op.getValueType(); | |||
4677 | assert(VT == MVT::v4f16 || VT == MVT::v4i16)(static_cast <bool> (VT == MVT::v4f16 || VT == MVT::v4i16 ) ? void (0) : __assert_fail ("VT == MVT::v4f16 || VT == MVT::v4i16" , "llvm/lib/Target/AMDGPU/SIISelLowering.cpp", 4677, __extension__ __PRETTY_FUNCTION__)); | |||
4678 | ||||
4679 | SDValue Lo, Hi; | |||
4680 | std::tie(Lo, Hi) = DAG.SplitVectorOperand(Op.getNode(), 0); | |||
4681 | ||||
4682 | SDLoc SL(Op); | |||
4683 | SDValue OpLo = DAG.getNode(Opc, SL, Lo.getValueType(), Lo, | |||
4684 | Op->getFlags()); | |||
4685 | SDValue OpHi = DAG.getNode(Opc, SL, Hi.getValueType(), Hi, | |||
4686 | Op->getFlags()); | |||
4687 | ||||
4688 | return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(Op), VT, OpLo, OpHi); | |||
4689 | } | |||
4690 | ||||
4691 | // Work around LegalizeDAG doing the wrong thing and fully scalarizing if the | |||
4692 | // wider vector type is legal. | |||
4693 | SDValue SITargetLowering::splitBinaryVectorOp(SDValue Op, | |||
4694 | SelectionDAG &DAG) const { | |||
4695 | unsigned Opc = Op.getOpcode(); | |||
4696 | EVT VT = Op.getValueType(); | |||
4697 | assert(VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4f32 ||(static_cast <bool> (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4f32 || VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8f32 || VT == MVT::v16f32 || VT == MVT::v32f32) ? void (0) : __assert_fail ("VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4f32 || VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8f32 || VT == MVT::v16f32 || VT == MVT::v32f32" , "llvm/lib/Target/AMDGPU/SIISelLowering.cpp", 4699, __extension__ __PRETTY_FUNCTION__)) | |||
4698 | VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8f32 ||(static_cast <bool> (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4f32 || VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8f32 || VT == MVT::v16f32 || VT == MVT::v32f32) ? void (0) : __assert_fail ("VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4f32 || VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8f32 || VT == MVT::v16f32 || VT == MVT::v32f32" , "llvm/lib/Target/AMDGPU/SIISelLowering.cpp", 4699, __extension__ __PRETTY_FUNCTION__)) | |||
4699 | VT == MVT::v16f32 || VT == MVT::v32f32)(static_cast <bool> (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4f32 || VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8f32 || VT == MVT::v16f32 || VT == MVT::v32f32) ? void (0) : __assert_fail ("VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4f32 || VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v8f32 || VT == MVT::v16f32 || VT == MVT::v32f32" , "llvm/lib/Target/AMDGPU/SIISelLowering.cpp", 4699, __extension__ __PRETTY_FUNCTION__)); | |||
4700 | ||||
4701 | SDValue Lo0, Hi0; | |||
4702 | std::tie(Lo0, Hi0) = DAG.SplitVectorOperand(Op.getNode(), 0); | |||
4703 | SDValue Lo1, Hi1; | |||
4704 | std::tie(Lo1, Hi1) = DAG.SplitVectorOperand(Op.getNode(), 1); | |||
4705 | ||||
4706 | SDLoc SL(Op); | |||
4707 | ||||
4708 | SDValue OpLo = DAG.getNode(Opc, SL, Lo0.getValueType(), Lo0, Lo1, | |||
4709 | Op->getFlags()); | |||
4710 | SDValue OpHi = DAG.getNode(Opc, SL, Hi0.getValueType(), Hi0, Hi1, | |||
4711 | Op->getFlags()); | |||
4712 | ||||
4713 | return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(Op), VT, OpLo, OpHi); | |||
4714 | } | |||
4715 | ||||
4716 | SDValue SITargetLowering::splitTernaryVectorOp(SDValue Op, | |||
4717 | SelectionDAG &DAG) const { | |||
4718 | unsigned Opc = Op.getOpcode(); | |||
4719 | EVT VT = Op.getValueType(); | |||
4720 | assert(VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v8i16 ||(static_cast <bool> (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v4f32 || VT == MVT::v8f32 || VT == MVT::v16f32 || VT == MVT::v32f32) ? void (0) : __assert_fail ("VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v4f32 || VT == MVT::v8f32 || VT == MVT::v16f32 || VT == MVT::v32f32" , "llvm/lib/Target/AMDGPU/SIISelLowering.cpp", 4722, __extension__ __PRETTY_FUNCTION__)) | |||
4721 | VT == MVT::v8f16 || VT == MVT::v4f32 || VT == MVT::v8f32 ||(static_cast <bool> (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v4f32 || VT == MVT::v8f32 || VT == MVT::v16f32 || VT == MVT::v32f32) ? void (0) : __assert_fail ("VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v4f32 || VT == MVT::v8f32 || VT == MVT::v16f32 || VT == MVT::v32f32" , "llvm/lib/Target/AMDGPU/SIISelLowering.cpp", 4722, __extension__ __PRETTY_FUNCTION__)) | |||
4722 | VT == MVT::v16f32 || VT == MVT::v32f32)(static_cast <bool> (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v4f32 || VT == MVT::v8f32 || VT == MVT::v16f32 || VT == MVT::v32f32) ? void (0) : __assert_fail ("VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v4f32 || VT == MVT::v8f32 || VT == MVT::v16f32 || VT == MVT::v32f32" , "llvm/lib/Target/AMDGPU/SIISelLowering.cpp", 4722, __extension__ __PRETTY_FUNCTION__)); | |||
4723 | ||||
4724 | SDValue Lo0, Hi0; | |||
4725 | SDValue Op0 = Op.getOperand(0); | |||
4726 | std::tie(Lo0, Hi0) = Op0.getValueType().isVector() | |||
4727 | ? DAG.SplitVectorOperand(Op.getNode(), 0) | |||
4728 | : std::make_pair(Op0, Op0); | |||
4729 | SDValue Lo1, Hi1; | |||
4730 | std::tie(Lo1, Hi1) = DAG.SplitVectorOperand(Op.getNode(), 1); | |||
4731 | SDValue Lo2, Hi2; | |||
4732 | std::tie(Lo2, Hi2) = DAG.SplitVectorOperand(Op.getNode(), 2); | |||
4733 | ||||
4734 | SDLoc SL(Op); | |||
4735 | auto ResVT = DAG.GetSplitDestVTs(VT); | |||
4736 | ||||
4737 | SDValue OpLo = DAG.getNode(Opc, SL, ResVT.first, Lo0, Lo1, Lo2, | |||
4738 | Op->getFlags()); | |||
4739 | SDValue OpHi = DAG.getNode(Opc, SL, ResVT.second, Hi0, Hi1, Hi2, | |||
4740 | Op->getFlags()); | |||
4741 | ||||
4742 | return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(Op), VT, OpLo, OpHi); | |||
4743 | } | |||
4744 | ||||
4745 | ||||
4746 | SDValue SITargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { | |||
4747 | switch (Op.getOpcode()) { | |||
4748 | default: return AMDGPUTargetLowering::LowerOperation(Op, DAG); | |||
4749 | case ISD::BRCOND: return LowerBRCOND(Op, DAG); | |||
4750 | case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); | |||
4751 | case ISD::LOAD: { | |||
4752 | SDValue Result = LowerLOAD(Op, DAG); | |||
4753 | assert((!Result.getNode() ||(static_cast <bool> ((!Result.getNode() || Result.getNode ()->getNumValues() == 2) && "Load should return a value and a chain" ) ? void (0) : __assert_fail ("(!Result.getNode() || Result.getNode()->getNumValues() == 2) && \"Load should return a value and a chain\"" , "llvm/lib/Target/AMDGPU/SIISelLowering.cpp", 4755, __extension__ __PRETTY_FUNCTION__)) | |||
4754 | Result.getNode()->getNumValues() == 2) &&(static_cast <bool> ((!Result.getNode() || Result.getNode ()->getNumValues() == 2) && "Load should return a value and a chain" ) ? void (0) : __assert_fail ("(!Result.getNode() || Result.getNode()->getNumValues() == 2) && \"Load should return a value and a chain\"" , "llvm/lib/Target/AMDGPU/SIISelLowering.cpp", 4755, __extension__ __PRETTY_FUNCTION__)) | |||
4755 | "Load should return a value and a chain")(static_cast <bool> ((!Result.getNode() || Result.getNode ()->getNumValues() == 2) && "Load should return a value and a chain" ) ? void (0) : __assert_fail ("(!Result.getNode() || Result.getNode()->getNumValues() == 2) && \"Load should return a value and a chain\"" , "llvm/lib/Target/AMDGPU/SIISelLowering.cpp", 4755, __extension__ __PRETTY_FUNCTION__)); | |||
4756 | return Result; | |||
4757 | } | |||
4758 | ||||
4759 | case ISD::FSIN: | |||
4760 | case ISD::FCOS: | |||
4761 | return LowerTrig(Op, DAG); | |||
4762 | case ISD::SELECT: return LowerSELECT(Op, DAG); | |||
4763 | case ISD::FDIV: return LowerFDIV(Op, DAG); | |||
4764 | case ISD::ATOMIC_CMP_SWAP: return LowerATOMIC_CMP_SWAP(Op, DAG); | |||
4765 | case ISD::STORE: return LowerSTORE(Op, DAG); | |||
4766 | case ISD::GlobalAddress: { | |||
4767 | MachineFunction &MF = DAG.getMachineFunction(); | |||
4768 | SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); | |||
4769 | return LowerGlobalAddress(MFI, Op, DAG); | |||
4770 | } | |||
4771 | case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); | |||
4772 | case ISD::INTRINSIC_W_CHAIN: return LowerINTRINSIC_W_CHAIN(Op, DAG); | |||
4773 | case ISD::INTRINSIC_VOID: return LowerINTRINSIC_VOID(Op, DAG); | |||
4774 | case ISD::ADDRSPACECAST: return lowerADDRSPACECAST(Op, DAG); | |||
4775 | case ISD::INSERT_SUBVECTOR: | |||
4776 | return lowerINSERT_SUBVECTOR(Op, DAG); | |||
4777 | case ISD::INSERT_VECTOR_ELT: | |||
4778 | return lowerINSERT_VECTOR_ELT(Op, DAG); | |||
4779 | case ISD::EXTRACT_VECTOR_ELT: | |||
4780 | return lowerEXTRACT_VECTOR_ELT(Op, DAG); | |||
4781 | case ISD::VECTOR_SHUFFLE: | |||
4782 | return lowerVECTOR_SHUFFLE(Op, DAG); | |||
4783 | case ISD::BUILD_VECTOR: | |||
4784 | return lowerBUILD_VECTOR(Op, DAG); | |||
4785 | case ISD::FP_ROUND: | |||
4786 | return lowerFP_ROUND(Op, DAG); | |||
4787 | case ISD::FPTRUNC_ROUND: { | |||
4788 | unsigned Opc; | |||
4789 | SDLoc DL(Op); | |||
4790 | ||||
4791 | if (Op.getOperand(0)->getValueType(0) != MVT::f32) | |||
4792 | return SDValue(); | |||
4793 | ||||
4794 | // Get the rounding mode from the last operand | |||
4795 | int RoundMode = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); | |||
4796 | if (RoundMode == (int)RoundingMode::TowardPositive) | |||
4797 | Opc = AMDGPUISD::FPTRUNC_ROUND_UPWARD; | |||
4798 | else if (RoundMode == (int)RoundingMode::TowardNegative) | |||
4799 | Opc = AMDGPUISD::FPTRUNC_ROUND_DOWNWARD; | |||
4800 | else | |||
4801 | return SDValue(); | |||
4802 | ||||
4803 | return DAG.getNode(Opc, DL, Op.getNode()->getVTList(), Op->getOperand(0)); | |||
4804 | } | |||
4805 | case ISD::TRAP: | |||
4806 | return lowerTRAP(Op, DAG); | |||
4807 | case ISD::DEBUGTRAP: | |||
4808 | return lowerDEBUGTRAP(Op, DAG); | |||
4809 | case ISD::FABS: | |||
4810 | case ISD::FNEG: | |||
4811 | case ISD::FCANONICALIZE: | |||
4812 | case ISD::BSWAP: | |||
4813 | return splitUnaryVectorOp(Op, DAG); | |||
4814 | case ISD::FMINNUM: | |||
4815 | case ISD::FMAXNUM: | |||
4816 | return lowerFMINNUM_FMAXNUM(Op, DAG); | |||
4817 | case ISD::FMA: | |||
4818 | return splitTernaryVectorOp(Op, DAG); | |||
4819 | case ISD::FP_TO_SINT: | |||
4820 | case ISD::FP_TO_UINT: | |||
4821 | return LowerFP_TO_INT(Op, DAG); | |||
4822 | case ISD::SHL: | |||
4823 | case ISD::SRA: | |||
4824 | case ISD::SRL: | |||
4825 | case ISD::ADD: | |||
4826 | case ISD::SUB: | |||
4827 | case ISD::MUL: | |||
4828 | case ISD::SMIN: | |||
4829 | case ISD::SMAX: | |||
4830 | case ISD::UMIN: | |||
4831 | case ISD::UMAX: | |||
4832 | case ISD::FADD: | |||
4833 | case ISD::FMUL: | |||
4834 | case ISD::FMINNUM_IEEE: | |||
4835 | case ISD::FMAXNUM_IEEE: | |||
4836 | case ISD::UADDSAT: | |||
4837 | case ISD::USUBSAT: | |||
4838 | case ISD::SADDSAT: | |||
4839 | case ISD::SSUBSAT: | |||
4840 | return splitBinaryVectorOp(Op, DAG); | |||
4841 | case ISD::SMULO: | |||
4842 | case ISD::UMULO: | |||
4843 | return lowerXMULO(Op, DAG); | |||
4844 | case ISD::SMUL_LOHI: | |||
4845 | case ISD::UMUL_LOHI: | |||
4846 | return lowerXMUL_LOHI(Op, DAG); | |||
4847 | case ISD::DYNAMIC_STACKALLOC: | |||
4848 | return LowerDYNAMIC_STACKALLOC(Op, DAG); | |||
4849 | } | |||
4850 | return SDValue(); | |||
4851 | } | |||
4852 | ||||
4853 | // Used for D16: Casts the result of an instruction into the right vector, | |||
4854 | // packs values if loads return unpacked values. | |||
4855 | static SDValue adjustLoadValueTypeImpl(SDValue Result, EVT LoadVT, | |||
4856 | const SDLoc &DL, | |||
4857 | SelectionDAG &DAG, bool Unpacked) { | |||
4858 | if (!LoadVT.isVector()) | |||
4859 | return Result; | |||
4860 | ||||
4861 | // Cast back to the original packed type or to a larger type that is a | |||
4862 | // multiple of 32 bit for D16. Widening the return type is a required for | |||
4863 | // legalization. | |||
4864 | EVT FittingLoadVT = LoadVT; | |||
4865 | if ((LoadVT.getVectorNumElements() % 2) == 1) { | |||
4866 | FittingLoadVT = | |||
4867 | EVT::getVectorVT(*DAG.getContext(), LoadVT.getVectorElementType(), | |||
4868 | LoadVT.getVectorNumElements() + 1); | |||
4869 | } | |||
4870 | ||||
4871 | if (Unpacked) { // From v2i32/v4i32 back to v2f16/v4f16. | |||
4872 | // Truncate to v2i16/v4i16. | |||
4873 | EVT IntLoadVT = FittingLoadVT.changeTypeToInteger(); | |||
4874 | ||||
4875 | // Workaround legalizer not scalarizing truncate after vector op | |||
4876 | // legalization but not creating intermediate vector trunc. | |||
4877 | SmallVector<SDValue, 4> Elts; | |||
4878 | DAG.ExtractVectorElements(Result, Elts); | |||
4879 | for (SDValue &Elt : Elts) | |||
4880 | Elt = DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, Elt); | |||
4881 | ||||
4882 | // Pad illegal v1i16/v3fi6 to v4i16 | |||
4883 | if ((LoadVT.getVectorNumElements() % 2) == 1) | |||
4884 | Elts.push_back(DAG.getUNDEF(MVT::i16)); | |||
4885 | ||||
4886 | Result = DAG.getBuildVector(IntLoadVT, DL, Elts); | |||
4887 | ||||
4888 | // Bitcast to original type (v2f16/v4f16). | |||
4889 | return DAG.getNode(ISD::BITCAST, DL, FittingLoadVT, Result); | |||
4890 | } | |||
4891 | ||||
4892 | // Cast back to the original packed type. | |||
4893 | return DAG.getNode(ISD::BITCAST, DL, FittingLoadVT, Result); | |||
4894 | } | |||
4895 | ||||
4896 | SDValue SITargetLowering::adjustLoadValueType(unsigned Opcode, | |||
4897 | MemSDNode *M, | |||
4898 | SelectionDAG &DAG, | |||
4899 | ArrayRef<SDValue> Ops, | |||
4900 | bool IsIntrinsic) const { | |||
4901 | SDLoc DL(M); | |||
4902 | ||||
4903 | bool Unpacked = Subtarget->hasUnpackedD16VMem(); | |||
4904 | EVT LoadVT = M->getValueType(0); | |||
4905 | ||||
4906 | EVT EquivLoadVT = LoadVT; | |||
4907 | if (LoadVT.isVector()) { | |||
4908 | if (Unpacked) { | |||
4909 | EquivLoadVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, | |||
4910 | LoadVT.getVectorNumElements()); | |||
4911 | } else if ((LoadVT.getVectorNumElements() % 2) == 1) { | |||
4912 | // Widen v3f16 to legal type | |||
4913 | EquivLoadVT = | |||
4914 | EVT::getVectorVT(*DAG.getContext(), LoadVT.getVectorElementType(), | |||
4915 | LoadVT.getVectorNumElements() + 1); | |||
4916 | } | |||
4917 | } | |||
4918 | ||||
4919 | // Change from v4f16/v2f16 to EquivLoadVT. | |||
4920 | SDVTList VTList = DAG.getVTList(EquivLoadVT, MVT::Other); | |||
4921 | ||||
4922 | SDValue Load | |||
4923 | = DAG.getMemIntrinsicNode( | |||
4924 | IsIntrinsic ? (unsigned)ISD::INTRINSIC_W_CHAIN : Opcode, DL, | |||
4925 | VTList, Ops, M->getMemoryVT(), | |||
4926 | M->getMemOperand()); | |||
4927 | ||||
4928 | SDValue Adjusted = adjustLoadValueTypeImpl(Load, LoadVT, DL, DAG, Unpacked); | |||
4929 | ||||
4930 | return DAG.getMergeValues({ Adjusted, Load.getValue(1) }, DL); | |||
4931 | } | |||
4932 | ||||
4933 | SDValue SITargetLowering::lowerIntrinsicLoad(MemSDNode *M, bool IsFormat, | |||
4934 | SelectionDAG &DAG, | |||
4935 | ArrayRef<SDValue> Ops) const { | |||
4936 | SDLoc DL(M); | |||
4937 | EVT LoadVT = M->getValueType(0); | |||
4938 | EVT EltType = LoadVT.getScalarType(); | |||
4939 | EVT IntVT = LoadVT.changeTypeToInteger(); | |||
4940 | ||||
4941 | bool IsD16 = IsFormat && (EltType.getSizeInBits() == 16); | |||
4942 | ||||
4943 | unsigned Opc = | |||
4944 | IsFormat ? AMDGPUISD::BUFFER_LOAD_FORMAT : AMDGPUISD::BUFFER_LOAD; | |||
4945 | ||||
4946 | if (IsD16) { | |||
4947 | return adjustLoadValueType(AMDGPUISD::BUFFER_LOAD_FORMAT_D16, M, DAG, Ops); | |||
4948 | } | |||
4949 | ||||
4950 | // Handle BUFFER_LOAD_BYTE/UBYTE/SHORT/USHORT overloaded intrinsics | |||
4951 | if (!IsD16 && !LoadVT.isVector() && EltType.getSizeInBits() < 32) | |||
4952 | return handleByteShortBufferLoads(DAG, LoadVT, DL, Ops, M); | |||
4953 | ||||
4954 | if (isTypeLegal(LoadVT)) { | |||
4955 | return getMemIntrinsicNode(Opc, DL, M->getVTList(), Ops, IntVT, | |||
4956 | M->getMemOperand(), DAG); | |||
4957 | } | |||
4958 | ||||
4959 | EVT CastVT = getEquivalentMemType(*DAG.getContext(), LoadVT); | |||
4960 | SDVTList VTList = DAG.getVTList(CastVT, MVT::Other); | |||
4961 | SDValue MemNode = getMemIntrinsicNode(Opc, DL, VTList, Ops, CastVT, | |||
4962 | M->getMemOperand(), DAG); | |||
4963 | return DAG.getMergeValues( | |||
4964 | {DAG.getNode(ISD::BITCAST, DL, LoadVT, MemNode), MemNode.getValue(1)}, | |||
4965 | DL); | |||
4966 | } | |||
4967 | ||||
4968 | static SDValue lowerICMPIntrinsic(const SITargetLowering &TLI, | |||
4969 | SDNode *N, SelectionDAG &DAG) { | |||
4970 | EVT VT = N->getValueType(0); | |||
4971 | const auto *CD = cast<ConstantSDNode>(N->getOperand(3)); | |||
4972 | unsigned CondCode = CD->getZExtValue(); | |||
4973 | if (!ICmpInst::isIntPredicate(static_cast<ICmpInst::Predicate>(CondCode))) | |||
4974 | return DAG.getUNDEF(VT); | |||
4975 | ||||
4976 | ICmpInst::Predicate IcInput = static_cast<ICmpInst::Predicate>(CondCode); | |||
4977 | ||||
4978 | SDValue LHS = N->getOperand(1); | |||
4979 | SDValue RHS = N->getOperand(2); | |||
4980 | ||||
4981 | SDLoc DL(N); | |||
4982 | ||||
4983 | EVT CmpVT = LHS.getValueType(); | |||
4984 | if (CmpVT == MVT::i16 && !TLI.isTypeLegal(MVT::i16)) { | |||
4985 | unsigned PromoteOp = ICmpInst::isSigned(IcInput) ? | |||
4986 | ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; | |||
4987 | LHS = DAG.getNode(PromoteOp, DL, MVT::i32, LHS); | |||
4988 | RHS = DAG.getNode(PromoteOp, DL, MVT::i32, RHS); | |||
4989 | } | |||
4990 | ||||
4991 | ISD::CondCode CCOpcode = getICmpCondCode(IcInput); | |||
4992 | ||||
4993 | unsigned WavefrontSize = TLI.getSubtarget()->getWavefrontSize(); | |||
4994 | EVT CCVT = EVT::getIntegerVT(*DAG.getContext(), WavefrontSize); | |||
4995 | ||||
4996 | SDValue SetCC = DAG.getNode(AMDGPUISD::SETCC, DL, CCVT, LHS, RHS, | |||
4997 | DAG.getCondCode(CCOpcode)); | |||
4998 | if (VT.bitsEq(CCVT)) | |||
4999 | return SetCC; | |||
5000 | return DAG.getZExtOrTrunc(SetCC, DL, VT); | |||
5001 | } | |||
5002 | ||||
5003 | static SDValue lowerFCMPIntrinsic(const SITargetLowering &TLI, | |||
5004 | SDNode *N, SelectionDAG &DAG) { | |||
5005 | EVT VT = N->getValueType(0); | |||
5006 | const auto *CD = cast<ConstantSDNode>(N->getOperand(3)); | |||
5007 | ||||
5008 | unsigned CondCode = CD->getZExtValue(); | |||
5009 | if (!FCmpInst::isFPPredicate(static_cast<FCmpInst::Predicate>(CondCode))) | |||
5010 | return DAG.getUNDEF(VT); | |||
5011 | ||||
5012 | SDValue Src0 = N->getOperand(1); | |||
5013 | SDValue Src1 = N->getOperand(2); | |||
5014 | EVT CmpVT = Src0.getValueType(); | |||
5015 | SDLoc SL(N); | |||
5016 | ||||
5017 | if (CmpVT == MVT::f16 && !TLI.isTypeLegal(CmpVT)) { | |||
5018 | Src0 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src0); | |||
5019 | Src1 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src1); | |||
5020 | } | |||
5021 | ||||
5022 | FCmpInst::Predicate IcInput = static_cast<FCmpInst::Predicate>(CondCode); | |||
5023 | ISD::CondCode CCOpcode = getFCmpCondCode(IcInput); | |||
5024 | unsigned WavefrontSize = TLI.getSubtarget()->getWavefrontSize(); | |||
5025 | EVT CCVT = EVT::getIntegerVT(*DAG.getContext(), WavefrontSize); | |||
5026 | SDValue SetCC = DAG.getNode(AMDGPUISD::SETCC, SL, CCVT, Src0, | |||
5027 | Src1, DAG.getCondCode(CCOpcode)); | |||
5028 | if (VT.bitsEq(CCVT)) | |||
5029 | return SetCC; | |||
5030 | return DAG.getZExtOrTrunc(SetCC, SL, VT); | |||
5031 | } | |||
5032 | ||||
5033 | static SDValue lowerBALLOTIntrinsic(const SITargetLowering &TLI, SDNode *N, | |||
5034 | SelectionDAG &DAG) { | |||
5035 | EVT VT = N->getValueType(0); | |||
5036 | SDValue Src = N->getOperand(1); | |||
5037 | SDLoc SL(N); | |||
5038 | ||||
5039 | if (Src.getOpcode() == ISD::SETCC) { | |||
5040 | // (ballot (ISD::SETCC ...)) -> (AMDGPUISD::SETCC ...) | |||
5041 | return DAG.getNode(AMDGPUISD::SETCC, SL, VT, Src.getOperand(0), | |||
5042 | Src.getOperand(1), Src.getOperand(2)); | |||
5043 | } | |||
5044 | if (const ConstantSDNode *Arg = dyn_cast<ConstantSDNode>(Src)) { | |||
5045 | // (ballot 0) -> 0 | |||
5046 | if (Arg->isZero()) | |||
5047 | return DAG.getConstant(0, SL, VT); | |||
5048 | ||||
5049 | // (ballot 1) -> EXEC/EXEC_LO | |||
5050 | if (Arg->isOne()) { | |||
5051 | Register Exec; | |||
5052 | if (VT.getScalarSizeInBits() == 32) | |||
5053 | Exec = AMDGPU::EXEC_LO; | |||
5054 | else if (VT.getScalarSizeInBits() == 64) | |||
5055 | Exec = AMDGPU::EXEC; | |||
5056 | else | |||
5057 | return SDValue(); | |||
5058 | ||||
5059 | return DAG.getCopyFromReg(DAG.getEntryNode(), SL, Exec, VT); | |||
5060 | } | |||
5061 | } | |||
5062 | ||||
5063 | // (ballot (i1 $src)) -> (AMDGPUISD::SETCC (i32 (zext $src)) (i32 0) | |||
5064 | // ISD::SETNE) | |||
5065 | return DAG.getNode( | |||
5066 | AMDGPUISD::SETCC, SL, VT, DAG.getZExtOrTrunc(Src, SL, MVT::i32), | |||
5067 | DAG.getConstant(0, SL, MVT::i32), DAG.getCondCode(ISD::SETNE)); | |||
5068 | } | |||
5069 | ||||
5070 | void SITargetLowering::ReplaceNodeResults(SDNode *N, | |||
5071 | SmallVectorImpl<SDValue> &Results, | |||
5072 | SelectionDAG &DAG) const { | |||
5073 | switch (N->getOpcode()) { | |||
5074 | case ISD::INSERT_VECTOR_ELT: { | |||
5075 | if (SDValue Res = lowerINSERT_VECTOR_ELT(SDValue(N, 0), DAG)) | |||
5076 | Results.push_back(Res); | |||
5077 | return; | |||
5078 | } | |||
5079 | case ISD::EXTRACT_VECTOR_ELT: { | |||
5080 | if (SDValue Res = lowerEXTRACT_VECTOR_ELT(SDValue(N, 0), DAG)) | |||
5081 | Results.push_back(Res); | |||
5082 | return; | |||
5083 | } | |||
5084 | case ISD::INTRINSIC_WO_CHAIN: { | |||
5085 | unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue(); | |||
5086 | switch (IID) { | |||
5087 | case Intrinsic::amdgcn_cvt_pkrtz: { | |||
5088 | SDValue Src0 = N->getOperand(1); | |||
5089 | SDValue Src1 = N->getOperand(2); | |||
5090 | SDLoc SL(N); | |||
5091 | SDValue Cvt = DAG.getNode(AMDGPUISD::CVT_PKRTZ_F16_F32, SL, MVT::i32, | |||
5092 | Src0, Src1); | |||
5093 | Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2f16, Cvt)); | |||
5094 | return; | |||
5095 | } | |||
5096 | case Intrinsic::amdgcn_cvt_pknorm_i16: | |||
5097 | case Intrinsic::amdgcn_cvt_pknorm_u16: | |||
5098 | case Intrinsic::amdgcn_cvt_pk_i16: | |||
5099 | case Intrinsic::amdgcn_cvt_pk_u16: { | |||
5100 | SDValue Src0 = N->getOperand(1); | |||
5101 | SDValue Src1 = N->getOperand(2); | |||
5102 | SDLoc SL(N); | |||
5103 | unsigned Opcode; | |||
5104 | ||||
5105 | if (IID == Intrinsic::amdgcn_cvt_pknorm_i16) | |||
5106 | Opcode = AMDGPUISD::CVT_PKNORM_I16_F32; | |||
5107 | else if (IID == Intrinsic::amdgcn_cvt_pknorm_u16) | |||
5108 | Opcode = AMDGPUISD::CVT_PKNORM_U16_F32; | |||
5109 | else if (IID == Intrinsic::amdgcn_cvt_pk_i16) | |||
5110 | Opcode = AMDGPUISD::CVT_PK_I16_I32; | |||
5111 | else | |||
5112 | Opcode = AMDGPUISD::CVT_PK_U16_U32; | |||
5113 | ||||
5114 | EVT VT = N->getValueType(0); | |||
5115 | if (isTypeLegal(VT)) | |||
5116 | Results.push_back(DAG.getNode(Opcode, SL, VT, Src0, Src1)); | |||
5117 | else { | |||
5118 | SDValue Cvt = DAG.getNode(Opcode, SL, MVT::i32, Src0, Src1); | |||
5119 | Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2i16, Cvt)); | |||
5120 | } | |||
5121 | return; | |||
5122 | } | |||
5123 | } | |||
5124 | break; | |||
5125 | } | |||
5126 | case ISD::INTRINSIC_W_CHAIN: { | |||
5127 | if (SDValue Res = LowerINTRINSIC_W_CHAIN(SDValue(N, 0), DAG)) { | |||
5128 | if (Res.getOpcode() == ISD::MERGE_VALUES) { | |||
5129 | // FIXME: Hacky | |||
5130 | for (unsigned I = 0; I < Res.getNumOperands(); I++) { | |||
5131 | Results.push_back(Res.getOperand(I)); | |||
5132 | } | |||
5133 | } else { | |||
5134 | Results.push_back(Res); | |||
5135 | Results.push_back(Res.getValue(1)); | |||
5136 | } | |||
5137 | return; | |||
5138 | } | |||
5139 | ||||
5140 | break; | |||
5141 | } | |||
5142 | case ISD::SELECT: { | |||
5143 | SDLoc SL(N); | |||
5144 | EVT VT = N->getValueType(0); | |||
5145 | EVT NewVT = getEquivalentMemType(*DAG.getContext(), VT); | |||
5146 | SDValue LHS = DAG.getNode(ISD::BITCAST, SL, NewVT, N->getOperand(1)); | |||
5147 | SDValue RHS = DAG.getNode(ISD::BITCAST, SL, NewVT, N->getOperand(2)); | |||
5148 | ||||
5149 | EVT SelectVT = NewVT; | |||
5150 | if (NewVT.bitsLT(MVT::i32)) { | |||
5151 | LHS = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, LHS); | |||
5152 | RHS = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, RHS); | |||
5153 | SelectVT = MVT::i32; | |||
5154 | } | |||
5155 | ||||
5156 | SDValue NewSelect = DAG.getNode(ISD::SELECT, SL, SelectVT, | |||
5157 | N->getOperand(0), LHS, RHS); | |||
5158 | ||||
5159 | if (NewVT != SelectVT) | |||
5160 | NewSelect = DAG.getNode(ISD::TRUNCATE, SL, NewVT, NewSelect); | |||
5161 | Results.push_back(DAG.getNode(ISD::BITCAST, SL, VT, NewSelect)); | |||
5162 | return; | |||
5163 | } | |||
5164 | case ISD::FNEG: { | |||
5165 | if (N->getValueType(0) != MVT::v2f16) | |||
5166 | break; | |||
5167 | ||||
5168 | SDLoc SL(N); | |||
5169 | SDValue BC = DAG.getNode(ISD::BITCAST, SL, MVT::i32, N->getOperand(0)); | |||
5170 | ||||
5171 | SDValue Op = DAG.getNode(ISD::XOR, SL, MVT::i32, | |||
5172 | BC, | |||
5173 | DAG.getConstant(0x80008000, SL, MVT::i32)); | |||
5174 | Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2f16, Op)); | |||
5175 | return; | |||
5176 | } | |||
5177 | case ISD::FABS: { | |||
5178 | if (N->getValueType(0) != MVT::v2f16) | |||
5179 | break; | |||
5180 | ||||
5181 | SDLoc SL(N); | |||
5182 | SDValue BC = DAG.getNode(ISD::BITCAST, SL, MVT::i32, N->getOperand(0)); | |||
5183 | ||||
5184 | SDValue Op = DAG.getNode(ISD::AND, SL, MVT::i32, | |||
5185 | BC, | |||
5186 | DAG.getConstant(0x7fff7fff, SL, MVT::i32)); | |||
5187 | Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2f16, Op)); | |||
5188 | return; | |||
5189 | } | |||
5190 | default: | |||
5191 | break; | |||
5192 | } | |||
5193 | } | |||
5194 | ||||
5195 | /// Helper function for LowerBRCOND | |||
5196 | static SDNode *findUser(SDValue Value, unsigned Opcode) { | |||
5197 | ||||
5198 | SDNode *Parent = Value.getNode(); | |||
5199 | for (SDNode::use_iterator I = Parent->use_begin(), E = Parent->use_end(); | |||
5200 | I != E; ++I) { | |||
5201 | ||||
5202 | if (I.getUse().get() != Value) | |||
5203 | continue; | |||
5204 | ||||
5205 | if (I->getOpcode() == Opcode) | |||
5206 | return *I; | |||
5207 | } | |||
5208 | return nullptr; | |||
5209 | } | |||
5210 | ||||
5211 | unsigned SITargetLowering::isCFIntrinsic(const SDNode *Intr) const { | |||
5212 | if (Intr->getOpcode() == ISD::INTRINSIC_W_CHAIN) { | |||
5213 | switch (cast<ConstantSDNode>(Intr->getOperand(1))->getZExtValue()) { | |||
5214 | case Intrinsic::amdgcn_if: | |||
5215 | return AMDGPUISD::IF; | |||
5216 | case Intrinsic::amdgcn_else: | |||
5217 | return AMDGPUISD::ELSE; | |||
5218 | case Intrinsic::amdgcn_loop: | |||
5219 | return AMDGPUISD::LOOP; | |||
5220 | case Intrinsic::amdgcn_end_cf: | |||
5221 | llvm_unreachable("should not occur")::llvm::llvm_unreachable_internal("should not occur", "llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 5221); | |||
5222 | default: | |||
5223 | return 0; | |||
5224 | } | |||
5225 | } | |||
5226 | ||||
5227 | // break, if_break, else_break are all only used as inputs to loop, not | |||
5228 | // directly as branch conditions. | |||
5229 | return 0; | |||
5230 | } | |||
5231 | ||||
5232 | bool SITargetLowering::shouldEmitFixup(const GlobalValue *GV) const { | |||
5233 | const Triple &TT = getTargetMachine().getTargetTriple(); | |||
5234 | return (GV->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS || | |||
5235 | GV->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT) && | |||
5236 | AMDGPU::shouldEmitConstantsToTextSection(TT); | |||
5237 | } | |||
5238 | ||||
5239 | bool SITargetLowering::shouldEmitGOTReloc(const GlobalValue *GV) const { | |||
5240 | // FIXME: Either avoid relying on address space here or change the default | |||
5241 | // address space for functions to avoid the explicit check. | |||
5242 | return (GV->getValueType()->isFunctionTy() || | |||
5243 | !isNonGlobalAddrSpace(GV->getAddressSpace())) && | |||
5244 | !shouldEmitFixup(GV) && | |||
5245 | !getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV); | |||
5246 | } | |||
5247 | ||||
5248 | bool SITargetLowering::shouldEmitPCReloc(const GlobalValue *GV) const { | |||
5249 | return !shouldEmitFixup(GV) && !shouldEmitGOTReloc(GV); | |||
5250 | } | |||
5251 | ||||
5252 | bool SITargetLowering::shouldUseLDSConstAddress(const GlobalValue *GV) const { | |||
5253 | if (!GV->hasExternalLinkage()) | |||
5254 | return true; | |||
5255 | ||||
5256 | const auto OS = getTargetMachine().getTargetTriple().getOS(); | |||
5257 | return OS == Triple::AMDHSA || OS == Triple::AMDPAL; | |||
5258 | } | |||
5259 | ||||
5260 | /// This transforms the control flow intrinsics to get the branch destination as | |||
5261 | /// last parameter, also switches branch target with BR if the need arise | |||
5262 | SDValue SITargetLowering::LowerBRCOND(SDValue BRCOND, | |||
5263 | SelectionDAG &DAG) const { | |||
5264 | SDLoc DL(BRCOND); | |||
5265 | ||||
5266 | SDNode *Intr = BRCOND.getOperand(1).getNode(); | |||
5267 | SDValue Target = BRCOND.getOperand(2); | |||
5268 | SDNode *BR = nullptr; | |||
5269 | SDNode *SetCC = nullptr; | |||
5270 | ||||
5271 | if (Intr->getOpcode() == ISD::SETCC) { | |||
5272 | // As long as we negate the condition everything is fine | |||
5273 | SetCC = Intr; | |||
5274 | Intr = SetCC->getOperand(0).getNode(); | |||
5275 | ||||
5276 | } else { | |||
5277 | // Get the target from BR if we don't negate the condition | |||
5278 | BR = findUser(BRCOND, ISD::BR); | |||
5279 | assert(BR && "brcond missing unconditional branch user")(static_cast <bool> (BR && "brcond missing unconditional branch user" ) ? void (0) : __assert_fail ("BR && \"brcond missing unconditional branch user\"" , "llvm/lib/Target/AMDGPU/SIISelLowering.cpp", 5279, __extension__ __PRETTY_FUNCTION__)); | |||
5280 | Target = BR->getOperand(1); | |||
5281 | } | |||
5282 | ||||
5283 | unsigned CFNode = isCFIntrinsic(Intr); | |||
5284 | if (CFNode == 0) { | |||
5285 | // This is a uniform branch so we don't need to legalize. | |||
5286 | return BRCOND; | |||
5287 | } | |||
5288 | ||||
5289 | bool HaveChain = Intr->getOpcode() == ISD::INTRINSIC_VOID || | |||
5290 | Intr->getOpcode() == ISD::INTRINSIC_W_CHAIN; | |||
5291 | ||||
5292 | assert(!SetCC ||(static_cast <bool> (!SetCC || (SetCC->getConstantOperandVal (1) == 1 && cast<CondCodeSDNode>(SetCC->getOperand (2).getNode())->get() == ISD::SETNE)) ? void (0) : __assert_fail ("!SetCC || (SetCC->getConstantOperandVal(1) == 1 && cast<CondCodeSDNode>(SetCC->getOperand(2).getNode())->get() == ISD::SETNE)" , "llvm/lib/Target/AMDGPU/SIISelLowering.cpp", 5295, __extension__ __PRETTY_FUNCTION__)) | |||
5293 | (SetCC->getConstantOperandVal(1) == 1 &&(static_cast <bool> (!SetCC || (SetCC->getConstantOperandVal (1) == 1 && cast<CondCodeSDNode>(SetCC->getOperand (2).getNode())->get() == ISD::SETNE)) ? void (0) : __assert_fail ("!SetCC || (SetCC->getConstantOperandVal(1) == 1 && cast<CondCodeSDNode>(SetCC->getOperand(2).getNode())->get() == ISD::SETNE)" , "llvm/lib/Target/AMDGPU/SIISelLowering.cpp", 5295, __extension__ __PRETTY_FUNCTION__)) | |||
5294 | cast<CondCodeSDNode>(SetCC->getOperand(2).getNode())->get() ==(static_cast <bool> (!SetCC || (SetCC->getConstantOperandVal (1) == 1 && cast<CondCodeSDNode>(SetCC->getOperand (2).getNode())->get() == ISD::SETNE)) ? void (0) : __assert_fail ("!SetCC || (SetCC->getConstantOperandVal(1) == 1 && cast<CondCodeSDNode>(SetCC->getOperand(2).getNode())->get() == ISD::SETNE)" , "llvm/lib/Target/AMDGPU/SIISelLowering.cpp", 5295, __extension__ __PRETTY_FUNCTION__)) | |||
5295 | ISD::SETNE))(static_cast <bool> (!SetCC || (SetCC->getConstantOperandVal (1) == 1 && cast<CondCodeSDNode>(SetCC->getOperand (2).getNode())->get() == ISD::SETNE)) ? void (0) : __assert_fail ("!SetCC || (SetCC->getConstantOperandVal(1) == 1 && cast<CondCodeSDNode>(SetCC->getOperand(2).getNode())->get() == ISD::SETNE)" , "llvm/lib/Target/AMDGPU/SIISelLowering.cpp", 5295, __extension__ __PRETTY_FUNCTION__)); | |||
5296 | ||||
5297 | // operands of the new intrinsic call | |||
5298 | SmallVector<SDValue, 4> Ops; | |||
5299 | if (HaveChain) | |||
5300 | Ops.push_back(BRCOND.getOperand(0)); | |||
5301 | ||||
5302 | Ops.append(Intr->op_begin() + (HaveChain ? 2 : 1), Intr->op_end()); | |||
5303 | Ops.push_back(Target); | |||
5304 | ||||
5305 | ArrayRef<EVT> Res(Intr->value_begin() + 1, Intr->value_end()); | |||
5306 | ||||
5307 | // build the new intrinsic call | |||
5308 | SDNode *Result = DAG.getNode(CFNode, DL, DAG.getVTList(Res), Ops).getNode(); | |||
5309 | ||||
5310 | if (!HaveChain) { | |||
5311 | SDValue Ops[] = { | |||
5312 | SDValue(Result, 0), | |||
5313 | BRCOND.getOperand(0) | |||
5314 | }; | |||
5315 | ||||
5316 | Result = DAG.getMergeValues(Ops, DL).getNode(); | |||
5317 | } | |||
5318 | ||||
5319 | if (BR) { | |||
5320 | // Give the branch instruction our target | |||
5321 | SDValue Ops[] = { | |||
5322 | BR->getOperand(0), | |||
5323 | BRCOND.getOperand(2) | |||
5324 | }; | |||
5325 | SDValue NewBR = DAG.getNode(ISD::BR, DL, BR->getVTList(), Ops); | |||
5326 | DAG.ReplaceAllUsesWith(BR, NewBR.getNode()); | |||
5327 | } | |||
5328 | ||||
5329 | SDValue Chain = SDValue(Result, Result->getNumValues() - 1); | |||
5330 | ||||
5331 | // Copy the intrinsic results to registers | |||
5332 | for (unsigned i = 1, e = Intr->getNumValues() - 1; i != e; ++i) { | |||
5333 | SDNode *CopyToReg = findUser(SDValue(Intr, i), ISD::CopyToReg); | |||
5334 | if (!CopyToReg) | |||
5335 | continue; | |||
5336 | ||||
5337 | Chain = DAG.getCopyToReg( | |||
5338 | Chain, DL, | |||
5339 | CopyToReg->getOperand(1), | |||
5340 | SDValue(Result, i - 1), | |||
5341 | SDValue()); | |||
5342 | ||||
5343 | DAG.ReplaceAllUsesWith(SDValue(CopyToReg, 0), CopyToReg->getOperand(0)); | |||
5344 | } | |||
5345 | ||||
5346 | // Remove the old intrinsic from the chain | |||
5347 | DAG.ReplaceAllUsesOfValueWith( | |||
5348 | SDValue(Intr, Intr->getNumValues() - 1), | |||
5349 | Intr->getOperand(0)); | |||
5350 | ||||
5351 | return Chain; | |||
5352 | } | |||
5353 | ||||
5354 | SDValue SITargetLowering::LowerRETURNADDR(SDValue Op, | |||
5355 | SelectionDAG &DAG) const { | |||
5356 | MVT VT = Op.getSimpleValueType(); | |||
5357 | SDLoc DL(Op); | |||
5358 | // Checking the depth | |||
5359 | if (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() != 0) | |||
5360 | return DAG.getConstant(0, DL, VT); | |||
5361 | ||||
5362 | MachineFunction &MF = DAG.getMachineFunction(); | |||
5363 | const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); | |||
5364 | // Check for kernel and shader functions | |||
5365 | if (Info->isEntryFunction()) | |||
5366 | return DAG.getConstant(0, DL, VT); | |||
5367 | ||||
5368 | MachineFrameInfo &MFI = MF.getFrameInfo(); | |||
5369 | // There is a call to @llvm.returnaddress in this function | |||
5370 | MFI.setReturnAddressIsTaken(true); | |||
5371 | ||||
5372 | const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo(); | |||
5373 | // Get the return address reg and mark it as an implicit live-in | |||
5374 | Register Reg = MF.addLiveIn(TRI->getReturnAddressReg(MF), getRegClassFor(VT, Op.getNode()->isDivergent())); | |||
5375 | ||||
5376 | return DAG.getCopyFromReg(DAG.getEntryNode(), DL, Reg, VT); | |||
5377 | } | |||
5378 | ||||
5379 | SDValue SITargetLowering::getFPExtOrFPRound(SelectionDAG &DAG, | |||
5380 | SDValue Op, | |||
5381 | const SDLoc &DL, | |||
5382 | EVT VT) const { | |||
5383 | return Op.getValueType().bitsLE(VT) ? | |||
5384 | DAG.getNode(ISD::FP_EXTEND, DL, VT, Op) : | |||
5385 | DAG.getNode(ISD::FP_ROUND, DL, VT, Op, | |||
5386 | DAG.getTargetConstant(0, DL, MVT::i32)); | |||
5387 | } | |||
5388 | ||||
5389 | SDValue SITargetLowering::lowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const { | |||
5390 | assert(Op.getValueType() == MVT::f16 &&(static_cast <bool> (Op.getValueType() == MVT::f16 && "Do not know how to custom lower FP_ROUND for non-f16 type") ? void (0) : __assert_fail ("Op.getValueType() == MVT::f16 && \"Do not know how to custom lower FP_ROUND for non-f16 type\"" , "llvm/lib/Target/AMDGPU/SIISelLowering.cpp", 5391, __extension__ __PRETTY_FUNCTION__)) | |||
5391 | "Do not know how to custom lower FP_ROUND for non-f16 type")(static_cast <bool> (Op.getValueType() == MVT::f16 && "Do not know how to custom lower FP_ROUND for non-f16 type") ? void (0) : __assert_fail ("Op.getValueType() == MVT::f16 && \"Do not know how to custom lower FP_ROUND for non-f16 type\"" , "llvm/lib/Target/AMDGPU/SIISelLowering.cpp", 5391, __extension__ __PRETTY_FUNCTION__)); | |||
5392 | ||||
5393 | SDValue Src = Op.getOperand(0); | |||
5394 | EVT SrcVT = Src.getValueType(); | |||
5395 | if (SrcVT != MVT::f64) | |||
5396 | return Op; | |||
5397 | ||||
5398 | SDLoc DL(Op); | |||
5399 | ||||
5400 | SDValue FpToFp16 = DAG.getNode(ISD::FP_TO_FP16, DL, MVT::i32, Src); | |||
5401 | SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, FpToFp16); | |||
5402 | return DAG.getNode(ISD::BITCAST, DL, MVT::f16, Trunc); | |||
5403 | } | |||
5404 | ||||
5405 | SDValue SITargetLowering::lowerFMINNUM_FMAXNUM(SDValue Op, | |||
5406 | SelectionDAG &DAG) const { | |||
5407 | EVT VT = Op.getValueType(); | |||
5408 | const MachineFunction &MF = DAG.getMachineFunction(); | |||
5409 | const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); | |||
5410 | bool IsIEEEMode = Info->getMode().IEEE; | |||
5411 | ||||
5412 | // FIXME: Assert during selection that this is only selected for | |||
5413 | // ieee_mode. Currently a combine can produce the ieee version for non-ieee | |||
5414 | // mode functions, but this happens to be OK since it's only done in cases | |||
5415 | // where there is known no sNaN. | |||
5416 | if (IsIEEEMode) | |||
5417 | return expandFMINNUM_FMAXNUM(Op.getNode(), DAG); | |||
5418 | ||||
5419 | if (VT == MVT::v4f16 || VT == MVT::v8f16) | |||
5420 | return splitBinaryVectorOp(Op, DAG); | |||
5421 | return Op; | |||
5422 | } | |||
5423 | ||||
5424 | SDValue SITargetLowering::lowerXMULO(SDValue Op, SelectionDAG &DAG) const { | |||
5425 | EVT VT = Op.getValueType(); | |||
5426 | SDLoc SL(Op); | |||
5427 | SDValue LHS = Op.getOperand(0); | |||
5428 | SDValue RHS = Op.getOperand(1); | |||
5429 | bool isSigned = Op.getOpcode() == ISD::SMULO; | |||
5430 | ||||
5431 | if (ConstantSDNode *RHSC = isConstOrConstSplat(RHS)) { | |||
5432 | const APInt &C = RHSC->getAPIntValue(); | |||
5433 | // mulo(X, 1 << S) -> { X << S, (X << S) >> S != X } | |||
5434 | if (C.isPowerOf2()) { | |||
5435 | // smulo(x, signed_min) is same as umulo(x, signed_min). | |||
5436 | bool UseArithShift = isSigned && !C.isMinSignedValue(); | |||
5437 | SDValue ShiftAmt = DAG.getConstant(C.logBase2(), SL, MVT::i32); | |||
5438 | SDValue Result = DAG.getNode(ISD::SHL, SL, VT, LHS, ShiftAmt); | |||
5439 | SDValue Overflow = DAG.getSetCC(SL, MVT::i1, | |||
5440 | DAG.getNode(UseArithShift ? ISD::SRA : ISD::SRL, | |||
5441 | SL, VT, Result, ShiftAmt), | |||
5442 | LHS, ISD::SETNE); | |||
5443 | return DAG.getMergeValues({ Result, Overflow }, SL); | |||
5444 | } | |||
5445 | } | |||
5446 | ||||
5447 | SDValue Result = DAG.getNode(ISD::MUL, SL, VT, LHS, RHS); | |||
5448 | SDValue Top = DAG.getNode(isSigned ? ISD::MULHS : ISD::MULHU, | |||
5449 | SL, VT, LHS, RHS); | |||
5450 | ||||
5451 | SDValue Sign = isSigned | |||
5452 | ? DAG.getNode(ISD::SRA, SL, VT, Result, | |||
5453 | DAG.getConstant(VT.getScalarSizeInBits() - 1, SL, MVT::i32)) | |||
5454 | : DAG.getConstant(0, SL, VT); | |||
5455 | SDValue Overflow = DAG.getSetCC(SL, MVT::i1, Top, Sign, ISD::SETNE); | |||
5456 | ||||
5457 | return DAG.getMergeValues({ Result, Overflow }, SL); | |||
5458 | } | |||
5459 | ||||
5460 | SDValue SITargetLowering::lowerXMUL_LOHI(SDValue Op, SelectionDAG &DAG) const { | |||
5461 | if (Op->isDivergent()) { | |||
5462 | // Select to V_MAD_[IU]64_[IU]32. | |||
5463 | return Op; | |||
5464 | } | |||
5465 | if (Subtarget->hasSMulHi()) { | |||
5466 | // Expand to S_MUL_I32 + S_MUL_HI_[IU]32. | |||
5467 | return SDValue(); | |||
5468 | } | |||
5469 | // The multiply is uniform but we would have to use V_MUL_HI_[IU]32 to | |||
5470 | // calculate the high part, so we might as well do the whole thing with | |||
5471 | // V_MAD_[IU]64_[IU]32. | |||
5472 | return Op; | |||
5473 | } | |||
5474 | ||||
5475 | SDValue SITargetLowering::lowerTRAP(SDValue Op, SelectionDAG &DAG) const { | |||
5476 | if (!Subtarget->isTrapHandlerEnabled() || | |||
5477 | Subtarget->getTrapHandlerAbi() != GCNSubtarget::TrapHandlerAbi::AMDHSA) | |||
5478 | return lowerTrapEndpgm(Op, DAG); | |||
5479 | ||||
5480 | if (Optional<uint8_t> HsaAbiVer = AMDGPU::getHsaAbiVersion(Subtarget)) { | |||
5481 | switch (*HsaAbiVer) { | |||
5482 | case ELF::ELFABIVERSION_AMDGPU_HSA_V2: | |||
5483 | case ELF::ELFABIVERSION_AMDGPU_HSA_V3: | |||
5484 | return lowerTrapHsaQueuePtr(Op, DAG); | |||
5485 | case ELF::ELFABIVERSION_AMDGPU_HSA_V4: | |||
5486 | case ELF::ELFABIVERSION_AMDGPU_HSA_V5: | |||
5487 | return Subtarget->supportsGetDoorbellID() ? | |||
5488 | lowerTrapHsa(Op, DAG) : lowerTrapHsaQueuePtr(Op, DAG); | |||
5489 | } | |||
5490 | } | |||
5491 | ||||
5492 | llvm_unreachable("Unknown trap handler")::llvm::llvm_unreachable_internal("Unknown trap handler", "llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 5492); | |||
5493 | } | |||
5494 | ||||
5495 | SDValue SITargetLowering::lowerTrapEndpgm( | |||
5496 | SDValue Op, SelectionDAG &DAG) const { | |||
5497 | SDLoc SL(Op); | |||
5498 | SDValue Chain = Op.getOperand(0); | |||
5499 | return DAG.getNode(AMDGPUISD::ENDPGM, SL, MVT::Other, Chain); | |||
5500 | } | |||
5501 | ||||
5502 | SDValue SITargetLowering::loadImplicitKernelArgument(SelectionDAG &DAG, MVT VT, | |||
5503 | const SDLoc &DL, Align Alignment, ImplicitParameter Param) const { | |||
5504 | MachineFunction &MF = DAG.getMachineFunction(); | |||
5505 | uint64_t Offset = getImplicitParameterOffset(MF, Param); | |||
5506 | SDValue Ptr = lowerKernArgParameterPtr(DAG, DL, DAG.getEntryNode(), Offset); | |||
5507 | MachinePointerInfo PtrInfo(AMDGPUAS::CONSTANT_ADDRESS); | |||
5508 | return DAG.getLoad(VT, DL, DAG.getEntryNode(), Ptr, PtrInfo, Alignment, | |||
5509 | MachineMemOperand::MODereferenceable | | |||
5510 | MachineMemOperand::MOInvariant); | |||
5511 | } | |||
5512 | ||||
5513 | SDValue SITargetLowering::lowerTrapHsaQueuePtr( | |||
5514 | SDValue Op, SelectionDAG &DAG) const { | |||
5515 | SDLoc SL(Op); | |||
5516 | SDValue Chain = Op.getOperand(0); | |||
5517 | ||||
5518 | SDValue QueuePtr; | |||
5519 | // For code object version 5, QueuePtr is passed through implicit kernarg. | |||
5520 | if (AMDGPU::getAmdhsaCodeObjectVersion() == 5) { | |||
5521 | QueuePtr = | |||
5522 | loadImplicitKernelArgument(DAG, MVT::i64, SL, Align(8), QUEUE_PTR); | |||
5523 | } else { | |||
5524 | MachineFunction &MF = DAG.getMachineFunction(); | |||
5525 | SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); | |||
5526 | Register UserSGPR = Info->getQueuePtrUserSGPR(); | |||
5527 | ||||
5528 | if (UserSGPR == AMDGPU::NoRegister) { | |||
5529 | // We probably are in a function incorrectly marked with | |||
5530 | // amdgpu-no-queue-ptr. This is undefined. We don't want to delete the | |||
5531 | // trap, so just use a null pointer. | |||
5532 | QueuePtr = DAG.getConstant(0, SL, MVT::i64); | |||
5533 | } else { | |||
5534 | QueuePtr = CreateLiveInRegister(DAG, &AMDGPU::SReg_64RegClass, UserSGPR, | |||
5535 | MVT::i64); | |||
5536 | } | |||
5537 | } | |||
5538 | ||||
5539 | SDValue SGPR01 = DAG.getRegister(AMDGPU::SGPR0_SGPR1, MVT::i64); | |||
5540 | SDValue ToReg = DAG.getCopyToReg(Chain, SL, SGPR01, | |||
5541 | QueuePtr, SDValue()); | |||
5542 | ||||
5543 | uint64_t TrapID = static_cast<uint64_t>(GCNSubtarget::TrapID::LLVMAMDHSATrap); | |||
5544 | SDValue Ops[] = { | |||
5545 | ToReg, | |||
5546 | DAG.getTargetConstant(TrapID, SL, MVT::i16), | |||
5547 | SGPR01, | |||
5548 | ToReg.getValue(1) | |||
5549 | }; | |||
5550 | return DAG.getNode(AMDGPUISD::TRAP, SL, MVT::Other, Ops); | |||
5551 | } | |||
5552 | ||||
5553 | SDValue SITargetLowering::lowerTrapHsa( | |||
5554 | SDValue Op, SelectionDAG &DAG) const { | |||
5555 | SDLoc SL(Op); | |||
5556 | SDValue Chain = Op.getOperand(0); | |||
5557 | ||||
5558 | uint64_t TrapID = static_cast<uint64_t>(GCNSubtarget::TrapID::LLVMAMDHSATrap); | |||
5559 | SDValue Ops[] = { | |||
5560 | Chain, | |||
5561 | DAG.getTargetConstant(TrapID, SL, MVT::i16) | |||
5562 | }; | |||
5563 | return DAG.getNode(AMDGPUISD::TRAP, SL, MVT::Other, Ops); | |||
5564 | } | |||
5565 | ||||
5566 | SDValue SITargetLowering::lowerDEBUGTRAP(SDValue Op, SelectionDAG &DAG) const { | |||
5567 | SDLoc SL(Op); | |||
5568 | SDValue Chain = Op.getOperand(0); | |||
5569 | MachineFunction &MF = DAG.getMachineFunction(); | |||
5570 | ||||
5571 | if (!Subtarget->isTrapHandlerEnabled() || | |||
5572 | Subtarget->getTrapHandlerAbi() != GCNSubtarget::TrapHandlerAbi::AMDHSA) { | |||
5573 | DiagnosticInfoUnsupported NoTrap(MF.getFunction(), | |||
5574 | "debugtrap handler not supported", | |||
5575 | Op.getDebugLoc(), | |||
5576 | DS_Warning); | |||
5577 | LLVMContext &Ctx = MF.getFunction().getContext(); | |||
5578 | Ctx.diagnose(NoTrap); | |||
5579 | return Chain; | |||
5580 | } | |||
5581 | ||||
5582 | uint64_t TrapID = static_cast<uint64_t>(GCNSubtarget::TrapID::LLVMAMDHSADebugTrap); | |||
5583 | SDValue Ops[] = { | |||
5584 | Chain, | |||
5585 | DAG.getTargetConstant(TrapID, SL, MVT::i16) | |||
5586 | }; | |||
5587 | return DAG.getNode(AMDGPUISD::TRAP, SL, MVT::Other, Ops); | |||
5588 | } | |||
5589 | ||||
5590 | SDValue SITargetLowering::getSegmentAperture(unsigned AS, const SDLoc &DL, | |||
5591 | SelectionDAG &DAG) const { | |||
5592 | // FIXME: Use inline constants (src_{shared, private}_base) instead. | |||
5593 | if (Subtarget->hasApertureRegs()) { | |||
5594 | unsigned Offset = AS == AMDGPUAS::LOCAL_ADDRESS ? | |||
5595 | AMDGPU::Hwreg::OFFSET_SRC_SHARED_BASE : | |||
5596 | AMDGPU::Hwreg::OFFSET_SRC_PRIVATE_BASE; | |||
5597 | unsigned WidthM1 = AS == AMDGPUAS::LOCAL_ADDRESS ? | |||
5598 | AMDGPU::Hwreg::WIDTH_M1_SRC_SHARED_BASE : | |||
5599 | AMDGPU::Hwreg::WIDTH_M1_SRC_PRIVATE_BASE; | |||
5600 | unsigned Encoding = | |||
5601 | AMDGPU::Hwreg::ID_MEM_BASES << AMDGPU::Hwreg::ID_SHIFT_ | | |||
5602 | Offset << AMDGPU::Hwreg::OFFSET_SHIFT_ | | |||
5603 | WidthM1 << AMDGPU::Hwreg::WIDTH_M1_SHIFT_; | |||
5604 | ||||
5605 | SDValue EncodingImm = DAG.getTargetConstant(Encoding, DL, MVT::i16); | |||
5606 | SDValue ApertureReg = SDValue( | |||
5607 | DAG.getMachineNode(AMDGPU::S_GETREG_B32, DL, MVT::i32, EncodingImm), 0); | |||
5608 | SDValue ShiftAmount = DAG.getTargetConstant(WidthM1 + 1, DL, MVT::i32); | |||
5609 | return DAG.getNode(ISD::SHL, DL, MVT::i32, ApertureReg, ShiftAmount); | |||
5610 | } | |||
5611 | ||||
5612 | // For code object version 5, private_base and shared_base are passed through | |||
5613 | // implicit kernargs. | |||
5614 | if (AMDGPU::getAmdhsaCodeObjectVersion() == 5) { | |||
5615 | ImplicitParameter Param = | |||
5616 | (AS == AMDGPUAS::LOCAL_ADDRESS) ? SHARED_BASE : PRIVATE_BASE; | |||
5617 | return loadImplicitKernelArgument(DAG, MVT::i32, DL, Align(4), Param); | |||
5618 | } | |||
5619 | ||||
5620 | MachineFunction &MF = DAG.getMachineFunction(); | |||
5621 | SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); | |||
5622 | Register UserSGPR = Info->getQueuePtrUserSGPR(); | |||
5623 | if (UserSGPR == AMDGPU::NoRegister) { | |||
5624 | // We probably are in a function incorrectly marked with | |||
5625 | // amdgpu-no-queue-ptr. This is undefined. | |||
5626 | return DAG.getUNDEF(MVT::i32); | |||
5627 | } | |||
5628 | ||||
5629 | SDValue QueuePtr = CreateLiveInRegister( | |||
5630 | DAG, &AMDGPU::SReg_64RegClass, UserSGPR, MVT::i64); | |||
5631 | ||||
5632 | // Offset into amd_queue_t for group_segment_aperture_base_hi / | |||
5633 | // private_segment_aperture_base_hi. | |||
5634 | uint32_t StructOffset = (AS == AMDGPUAS::LOCAL_ADDRESS) ? 0x40 : 0x44; | |||
5635 | ||||
5636 | SDValue Ptr = | |||
5637 | DAG.getObjectPtrOffset(DL, QueuePtr, TypeSize::Fixed(StructOffset)); | |||
5638 | ||||
5639 | // TODO: Use custom target PseudoSourceValue. | |||
5640 | // TODO: We should use the value from the IR intrinsic call, but it might not | |||
5641 | // be available and how do we get it? | |||
5642 | MachinePointerInfo PtrInfo(AMDGPUAS::CONSTANT_ADDRESS); | |||
5643 | return DAG.getLoad(MVT::i32, DL, QueuePtr.getValue(1), Ptr, PtrInfo, | |||
5644 | commonAlignment(Align(64), StructOffset), | |||
5645 | MachineMemOperand::MODereferenceable | | |||
5646 | MachineMemOperand::MOInvariant); | |||
5647 | } | |||
5648 | ||||
5649 | /// Return true if the value is a known valid address, such that a null check is | |||
5650 | /// not necessary. | |||
5651 | static bool isKnownNonNull(SDValue Val, SelectionDAG &DAG, | |||
5652 | const AMDGPUTargetMachine &TM, unsigned AddrSpace) { | |||
5653 | if (isa<FrameIndexSDNode>(Val) || isa<GlobalAddressSDNode>(Val) || | |||
5654 | isa<BasicBlockSDNode>(Val)) | |||
5655 | return true; | |||
5656 | ||||
5657 | if (auto *ConstVal = dyn_cast<ConstantSDNode>(Val)) | |||
5658 | return ConstVal->getSExtValue() != TM.getNullPointerValue(AddrSpace); | |||
5659 | ||||
5660 | // TODO: Search through arithmetic, handle arguments and loads | |||
5661 | // marked nonnull. | |||
5662 | return false; | |||
5663 | } | |||
5664 | ||||
5665 | SDValue SITargetLowering::lowerADDRSPACECAST(SDValue Op, | |||
5666 | SelectionDAG &DAG) const { | |||
5667 | SDLoc SL(Op); | |||
5668 | const AddrSpaceCastSDNode *ASC = cast<AddrSpaceCastSDNode>(Op); | |||
5669 | ||||
5670 | SDValue Src = ASC->getOperand(0); | |||
5671 | SDValue FlatNullPtr = DAG.getConstant(0, SL, MVT::i64); | |||
5672 | unsigned SrcAS = ASC->getSrcAddressSpace(); | |||
5673 | ||||
5674 | const AMDGPUTargetMachine &TM = | |||
5675 | static_cast<const AMDGPUTargetMachine &>(getTargetMachine()); | |||
5676 | ||||
5677 | // flat -> local/private | |||
5678 | if (SrcAS == AMDGPUAS::FLAT_ADDRESS) { | |||
5679 | unsigned DestAS = ASC->getDestAddressSpace(); | |||
5680 | ||||
5681 | if (DestAS == AMDGPUAS::LOCAL_ADDRESS || | |||
5682 | DestAS == AMDGPUAS::PRIVATE_ADDRESS) { | |||
5683 | SDValue Ptr = DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, Src); | |||
5684 | ||||
5685 | if (isKnownNonNull(Src, DAG, TM, SrcAS)) | |||
5686 | return Ptr; | |||
5687 | ||||
5688 | unsigned NullVal = TM.getNullPointerValue(DestAS); | |||
5689 | SDValue SegmentNullPtr = DAG.getConstant(NullVal, SL, MVT::i32); | |||
5690 | SDValue NonNull = DAG.getSetCC(SL, MVT::i1, Src, FlatNullPtr, ISD::SETNE); | |||
5691 | ||||
5692 | return DAG.getNode(ISD::SELECT, SL, MVT::i32, NonNull, Ptr, | |||
5693 | SegmentNullPtr); | |||
5694 | } | |||
5695 | } | |||
5696 | ||||
5697 | // local/private -> flat | |||
5698 | if (ASC->getDestAddressSpace() == AMDGPUAS::FLAT_ADDRESS) { | |||
5699 | if (SrcAS == AMDGPUAS::LOCAL_ADDRESS || | |||
5700 | SrcAS == AMDGPUAS::PRIVATE_ADDRESS) { | |||
5701 | ||||
5702 | SDValue Aperture = getSegmentAperture(ASC->getSrcAddressSpace(), SL, DAG); | |||
5703 | SDValue CvtPtr = | |||
5704 | DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32, Src, Aperture); | |||
5705 | CvtPtr = DAG.getNode(ISD::BITCAST, SL, MVT::i64, CvtPtr); | |||
5706 | ||||
5707 | if (isKnownNonNull(Src, DAG, TM, SrcAS)) | |||
5708 | return CvtPtr; | |||
5709 | ||||
5710 | unsigned NullVal = TM.getNullPointerValue(SrcAS); | |||
5711 | SDValue SegmentNullPtr = DAG.getConstant(NullVal, SL, MVT::i32); | |||
5712 | ||||
5713 | SDValue NonNull | |||
5714 | = DAG.getSetCC(SL, MVT::i1, Src, SegmentNullPtr, ISD::SETNE); | |||
5715 | ||||
5716 | return DAG.getNode(ISD::SELECT, SL, MVT::i64, NonNull, CvtPtr, | |||
5717 | FlatNullPtr); | |||
5718 | } | |||
5719 | } | |||
5720 | ||||
5721 | if (SrcAS == AMDGPUAS::CONSTANT_ADDRESS_32BIT && | |||
5722 | Op.getValueType() == MVT::i64) { | |||
5723 | const SIMachineFunctionInfo *Info = | |||
5724 | DAG.getMachineFunction().getInfo<SIMachineFunctionInfo>(); | |||
5725 | SDValue Hi = DAG.getConstant(Info->get32BitAddressHighBits(), SL, MVT::i32); | |||
5726 | SDValue Vec = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32, Src, Hi); | |||
5727 | return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Vec); | |||
5728 | } | |||
5729 | ||||
5730 | if (ASC->getDestAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT && | |||
5731 | Src.getValueType() == MVT::i64) | |||
5732 | return DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, Src); | |||
5733 | ||||
5734 | // global <-> flat are no-ops and never emitted. | |||
5735 | ||||
5736 | const MachineFunction &MF = DAG.getMachineFunction(); | |||
5737 | DiagnosticInfoUnsupported InvalidAddrSpaceCast( | |||
5738 | MF.getFunction(), "invalid addrspacecast", SL.getDebugLoc()); | |||
5739 | DAG.getContext()->diagnose(InvalidAddrSpaceCast); | |||
5740 | ||||
5741 | return DAG.getUNDEF(ASC->getValueType(0)); | |||
5742 | } | |||
5743 | ||||
5744 | // This lowers an INSERT_SUBVECTOR by extracting the individual elements from | |||
5745 | // the small vector and inserting them into the big vector. That is better than | |||
5746 | // the default expansion of doing it via a stack slot. Even though the use of | |||
5747 | // the stack slot would be optimized away afterwards, the stack slot itself | |||
5748 | // remains. | |||
5749 | SDValue SITargetLowering::lowerINSERT_SUBVECTOR(SDValue Op, | |||
5750 | SelectionDAG &DAG) const { | |||
5751 | SDValue Vec = Op.getOperand(0); | |||
5752 | SDValue Ins = Op.getOperand(1); | |||
5753 | SDValue Idx = Op.getOperand(2); | |||
5754 | EVT VecVT = Vec.getValueType(); | |||
5755 | EVT InsVT = Ins.getValueType(); | |||
5756 | EVT EltVT = VecVT.getVectorElementType(); | |||
5757 | unsigned InsNumElts = InsVT.getVectorNumElements(); | |||
5758 | unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue(); | |||
5759 | SDLoc SL(Op); | |||
5760 | ||||
5761 | for (unsigned I = 0; I != InsNumElts; ++I) { | |||
5762 | SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, Ins, | |||
5763 | DAG.getConstant(I, SL, MVT::i32)); | |||
5764 | Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, SL, VecVT, Vec, Elt, | |||
5765 | DAG.getConstant(IdxVal + I, SL, MVT::i32)); | |||
5766 | } | |||
5767 | return Vec; | |||
5768 | } | |||
5769 | ||||
5770 | SDValue SITargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op, | |||
5771 | SelectionDAG &DAG) const { | |||
5772 | SDValue Vec = Op.getOperand(0); | |||
5773 | SDValue InsVal = Op.getOperand(1); | |||
5774 | SDValue Idx = Op.getOperand(2); | |||
5775 | EVT VecVT = Vec.getValueType(); | |||
5776 | EVT EltVT = VecVT.getVectorElementType(); | |||
5777 | unsigned VecSize = VecVT.getSizeInBits(); | |||
5778 | unsigned EltSize = EltVT.getSizeInBits(); | |||
5779 | ||||
5780 | ||||
5781 | assert(VecSize <= 64)(static_cast <bool> (VecSize <= 64) ? void (0) : __assert_fail ("VecSize <= 64", "llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 5781, __extension__ __PRETTY_FUNCTION__)); | |||
5782 | ||||
5783 | unsigned NumElts = VecVT.getVectorNumElements(); | |||
5784 | SDLoc SL(Op); | |||
5785 | auto KIdx = dyn_cast<ConstantSDNode>(Idx); | |||
5786 | ||||
5787 | if (NumElts == 4 && EltSize == 16 && KIdx) { | |||
5788 | SDValue BCVec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Vec); | |||
5789 | ||||
5790 | SDValue LoHalf = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, BCVec, | |||
5791 | DAG.getConstant(0, SL, MVT::i32)); | |||
5792 | SDValue HiHalf = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, BCVec, | |||
5793 | DAG.getConstant(1, SL, MVT::i32)); | |||
5794 | ||||
5795 | SDValue LoVec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i16, LoHalf); | |||
5796 | SDValue HiVec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i16, HiHalf); | |||
5797 | ||||
5798 | unsigned Idx = KIdx->getZExtValue(); | |||
5799 | bool InsertLo = Idx < 2; | |||
5800 | SDValue InsHalf = DAG.getNode(ISD::INSERT_VECTOR_ELT, SL, MVT::v2i16, | |||
5801 | InsertLo ? LoVec : HiVec, | |||
5802 | DAG.getNode(ISD::BITCAST, SL, MVT::i16, InsVal), | |||
5803 | DAG.getConstant(InsertLo ? Idx : (Idx - 2), SL, MVT::i32)); | |||
5804 | ||||
5805 | InsHalf = DAG.getNode(ISD::BITCAST, SL, MVT::i32, InsHalf); | |||
5806 | ||||
5807 | SDValue Concat = InsertLo ? | |||
5808 | DAG.getBuildVector(MVT::v2i32, SL, { InsHalf, HiHalf }) : | |||
5809 | DAG.getBuildVector(MVT::v2i32, SL, { LoHalf, InsHalf }); | |||
5810 | ||||
5811 | return DAG.getNode(ISD::BITCAST, SL, VecVT, Concat); | |||
5812 | } | |||
5813 | ||||
5814 | if (isa<ConstantSDNode>(Idx)) | |||
5815 | return SDValue(); | |||
5816 | ||||
5817 | MVT IntVT = MVT::getIntegerVT(VecSize); | |||
5818 | ||||
5819 | // Avoid stack access for dynamic indexing. | |||
5820 | // v_bfi_b32 (v_bfm_b32 16, (shl idx, 16)), val, vec | |||
5821 | ||||
5822 | // Create a congruent vector with the target value in each element so that | |||
5823 | // the required element can be masked and ORed into the target vector. | |||
5824 | SDValue ExtVal = DAG.getNode(ISD::BITCAST, SL, IntVT, | |||
5825 | DAG.getSplatBuildVector(VecVT, SL, InsVal)); | |||
5826 | ||||
5827 | assert(isPowerOf2_32(EltSize))(static_cast <bool> (isPowerOf2_32(EltSize)) ? void (0) : __assert_fail ("isPowerOf2_32(EltSize)", "llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 5827, __extension__ __PRETTY_FUNCTION__)); | |||
5828 | SDValue ScaleFactor = DAG.getConstant(Log2_32(EltSize), SL, MVT::i32); | |||
5829 | ||||
5830 | // Convert vector index to bit-index. | |||
5831 | SDValue ScaledIdx = DAG.getNode(ISD::SHL, SL, MVT::i32, Idx, ScaleFactor); | |||
5832 | ||||
5833 | SDValue BCVec = DAG.getNode(ISD::BITCAST, SL, IntVT, Vec); | |||
5834 | SDValue BFM = DAG.getNode(ISD::SHL, SL, IntVT, | |||
5835 | DAG.getConstant(0xffff, SL, IntVT), | |||
5836 | ScaledIdx); | |||
5837 | ||||
5838 | SDValue LHS = DAG.getNode(ISD::AND, SL, IntVT, BFM, ExtVal); | |||
5839 | SDValue RHS = DAG.getNode(ISD::AND, SL, IntVT, | |||
5840 | DAG.getNOT(SL, BFM, IntVT), BCVec); | |||
5841 | ||||
5842 | SDValue BFI = DAG.getNode(ISD::OR, SL, IntVT, LHS, RHS); | |||
5843 | return DAG.getNode(ISD::BITCAST, SL, VecVT, BFI); | |||
5844 | } | |||
5845 | ||||
5846 | SDValue SITargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op, | |||
5847 | SelectionDAG &DAG) const { | |||
5848 | SDLoc SL(Op); | |||
5849 | ||||
5850 | EVT ResultVT = Op.getValueType(); | |||
5851 | SDValue Vec = Op.getOperand(0); | |||
5852 | SDValue Idx = Op.getOperand(1); | |||
5853 | EVT VecVT = Vec.getValueType(); | |||
5854 | unsigned VecSize = VecVT.getSizeInBits(); | |||
5855 | EVT EltVT = VecVT.getVectorElementType(); | |||
5856 | ||||
5857 | DAGCombinerInfo DCI(DAG, AfterLegalizeVectorOps, true, nullptr); | |||
5858 | ||||
5859 | // Make sure we do any optimizations that will make it easier to fold | |||
5860 | // source modifiers before obscuring it with bit operations. | |||
5861 | ||||
5862 | // XXX - Why doesn't this get called when vector_shuffle is expanded? | |||
5863 | if (SDValue Combined = performExtractVectorEltCombine(Op.getNode(), DCI)) | |||
5864 | return Combined; | |||
5865 | ||||
5866 | if (VecSize == 128) { | |||
5867 | SDValue Lo, Hi; | |||
5868 | EVT LoVT, HiVT; | |||
5869 | SDValue V2 = DAG.getBitcast(MVT::v2i64, Vec); | |||
5870 | std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VecVT); | |||
5871 | Lo = | |||
5872 | DAG.getBitcast(LoVT, DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i64, | |||
5873 | V2, DAG.getConstant(0, SL, MVT::i32))); | |||
5874 | Hi = | |||
5875 | DAG.getBitcast(HiVT, DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i64, | |||
5876 | V2, DAG.getConstant(1, SL, MVT::i32))); | |||
5877 | EVT IdxVT = Idx.getValueType(); | |||
5878 | unsigned NElem = VecVT.getVectorNumElements(); | |||
5879 | assert(isPowerOf2_32(NElem))(static_cast <bool> (isPowerOf2_32(NElem)) ? void (0) : __assert_fail ("isPowerOf2_32(NElem)", "llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 5879, __extension__ __PRETTY_FUNCTION__)); | |||
5880 | SDValue IdxMask = DAG.getConstant(NElem / 2 - 1, SL, IdxVT); | |||
5881 | SDValue NewIdx = DAG.getNode(ISD::AND, SL, IdxVT, Idx, IdxMask); | |||
5882 | SDValue Half = DAG.getSelectCC(SL, Idx, IdxMask, Hi, Lo, ISD::SETUGT); | |||
5883 | return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, Half, NewIdx); | |||
5884 | } | |||
5885 | ||||
5886 | assert(VecSize <= 64)(static_cast <bool> (VecSize <= 64) ? void (0) : __assert_fail ("VecSize <= 64", "llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 5886, __extension__ __PRETTY_FUNCTION__)); | |||
5887 | ||||
5888 | unsigned EltSize = EltVT.getSizeInBits(); | |||
5889 | assert(isPowerOf2_32(EltSize))(static_cast <bool> (isPowerOf2_32(EltSize)) ? void (0) : __assert_fail ("isPowerOf2_32(EltSize)", "llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 5889, __extension__ __PRETTY_FUNCTION__)); | |||
5890 | ||||
5891 | MVT IntVT = MVT::getIntegerVT(VecSize); | |||
5892 | SDValue ScaleFactor = DAG.getConstant(Log2_32(EltSize), SL, MVT::i32); | |||
5893 | ||||
5894 | // Convert vector index to bit-index (* EltSize) | |||
5895 | SDValue ScaledIdx = DAG.getNode(ISD::SHL, SL, MVT::i32, Idx, ScaleFactor); | |||
5896 | ||||
5897 | SDValue BC = DAG.getNode(ISD::BITCAST, SL, IntVT, Vec); | |||
5898 | SDValue Elt = DAG.getNode(ISD::SRL, SL, IntVT, BC, ScaledIdx); | |||
5899 | ||||
5900 | if (ResultVT == MVT::f16) { | |||
5901 | SDValue Result = DAG.getNode(ISD::TRUNCATE, SL, MVT::i16, Elt); | |||
5902 | return DAG.getNode(ISD::BITCAST, SL, ResultVT, Result); | |||
5903 | } | |||
5904 | ||||
5905 | return DAG.getAnyExtOrTrunc(Elt, SL, ResultVT); | |||
5906 | } | |||
5907 | ||||
5908 | static bool elementPairIsContiguous(ArrayRef<int> Mask, int Elt) { | |||
5909 | assert(Elt % 2 == 0)(static_cast <bool> (Elt % 2 == 0) ? void (0) : __assert_fail ("Elt % 2 == 0", "llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 5909, __extension__ __PRETTY_FUNCTION__)); | |||
5910 | return Mask[Elt + 1] == Mask[Elt] + 1 && (Mask[Elt] % 2 == 0); | |||
5911 | } | |||
5912 | ||||
5913 | SDValue SITargetLowering::lowerVECTOR_SHUFFLE(SDValue Op, | |||
5914 | SelectionDAG &DAG) const { | |||
5915 | SDLoc SL(Op); | |||
5916 | EVT ResultVT = Op.getValueType(); | |||
5917 | ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op); | |||
5918 | ||||
5919 | EVT PackVT = ResultVT.isInteger() ? MVT::v2i16 : MVT::v2f16; | |||
5920 | EVT EltVT = PackVT.getVectorElementType(); | |||
5921 | int SrcNumElts = Op.getOperand(0).getValueType().getVectorNumElements(); | |||
5922 | ||||
5923 | // vector_shuffle <0,1,6,7> lhs, rhs | |||
5924 | // -> concat_vectors (extract_subvector lhs, 0), (extract_subvector rhs, 2) | |||
5925 | // | |||
5926 | // vector_shuffle <6,7,2,3> lhs, rhs | |||
5927 | // -> concat_vectors (extract_subvector rhs, 2), (extract_subvector lhs, 2) | |||
5928 | // | |||
5929 | // vector_shuffle <6,7,0,1> lhs, rhs | |||
5930 | // -> concat_vectors (extract_subvector rhs, 2), (extract_subvector lhs, 0) | |||
5931 | ||||
5932 | // Avoid scalarizing when both halves are reading from consecutive elements. | |||
5933 | SmallVector<SDValue, 4> Pieces; | |||
5934 | for (int I = 0, N = ResultVT.getVectorNumElements(); I != N; I += 2) { | |||
5935 | if (elementPairIsContiguous(SVN->getMask(), I)) { | |||
5936 | const int Idx = SVN->getMaskElt(I); | |||
5937 | int VecIdx = Idx < SrcNumElts ? 0 : 1; | |||
5938 | int EltIdx = Idx < SrcNumElts ? Idx : Idx - SrcNumElts; | |||
5939 | SDValue SubVec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, SL, | |||
5940 | PackVT, SVN->getOperand(VecIdx), | |||
5941 | DAG.getConstant(EltIdx, SL, MVT::i32)); | |||
5942 | Pieces.push_back(SubVec); | |||
5943 | } else { | |||
5944 | const int Idx0 = SVN->getMaskElt(I); | |||
5945 | const int Idx1 = SVN->getMaskElt(I + 1); | |||
5946 | int VecIdx0 = Idx0 < SrcNumElts ? 0 : 1; | |||
5947 | int VecIdx1 = Idx1 < SrcNumElts ? 0 : 1; | |||
5948 | int EltIdx0 = Idx0 < SrcNumElts ? Idx0 : Idx0 - SrcNumElts; | |||
5949 | int EltIdx1 = Idx1 < SrcNumElts ? Idx1 : Idx1 - SrcNumElts; | |||
5950 | ||||
5951 | SDValue Vec0 = SVN->getOperand(VecIdx0); | |||
5952 | SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, | |||
5953 | Vec0, DAG.getConstant(EltIdx0, SL, MVT::i32)); | |||
5954 | ||||
5955 | SDValue Vec1 = SVN->getOperand(VecIdx1); | |||
5956 | SDValue Elt1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, | |||
5957 | Vec1, DAG.getConstant(EltIdx1, SL, MVT::i32)); | |||
5958 | Pieces.push_back(DAG.getBuildVector(PackVT, SL, { Elt0, Elt1 })); | |||
5959 | } | |||
5960 | } | |||
5961 | ||||
5962 | return DAG.getNode(ISD::CONCAT_VECTORS, SL, ResultVT, Pieces); | |||
5963 | } | |||
5964 | ||||
5965 | SDValue SITargetLowering::lowerBUILD_VECTOR(SDValue Op, | |||
5966 | SelectionDAG &DAG) const { | |||
5967 | SDLoc SL(Op); | |||
5968 | EVT VT = Op.getValueType(); | |||
5969 | ||||
5970 | if (VT == MVT::v4i16 || VT == MVT::v4f16 || | |||
5971 | VT == MVT::v8i16 || VT == MVT::v8f16) { | |||
5972 | EVT HalfVT = MVT::getVectorVT(VT.getVectorElementType().getSimpleVT(), | |||
5973 | VT.getVectorNumElements() / 2); | |||
5974 | MVT HalfIntVT = MVT::getIntegerVT(HalfVT.getSizeInBits()); | |||
5975 | ||||
5976 | // Turn into pair of packed build_vectors. | |||
5977 | // TODO: Special case for constants that can be materialized with s_mov_b64. | |||
5978 | SmallVector<SDValue, 4> LoOps, HiOps; | |||
5979 | for (unsigned I = 0, E = VT.getVectorNumElements() / 2; I != E; ++I) { | |||
5980 | LoOps.push_back(Op.getOperand(I)); | |||
5981 | HiOps.push_back(Op.getOperand(I + E)); | |||
5982 | } | |||
5983 | SDValue Lo = DAG.getBuildVector(HalfVT, SL, LoOps); | |||
5984 | SDValue Hi = DAG.getBuildVector(HalfVT, SL, HiOps); | |||
5985 | ||||
5986 | SDValue CastLo = DAG.getNode(ISD::BITCAST, SL, HalfIntVT, Lo); | |||
5987 | SDValue CastHi = DAG.getNode(ISD::BITCAST, SL, HalfIntVT, Hi); | |||
5988 | ||||
5989 | SDValue Blend = DAG.getBuildVector(MVT::getVectorVT(HalfIntVT, 2), SL, | |||
5990 | { CastLo, CastHi }); | |||
5991 | return DAG.getNode(ISD::BITCAST, SL, VT, Blend); | |||
5992 | } | |||
5993 | ||||
5994 | assert(VT == MVT::v2f16 || VT == MVT::v2i16)(static_cast <bool> (VT == MVT::v2f16 || VT == MVT::v2i16 ) ? void (0) : __assert_fail ("VT == MVT::v2f16 || VT == MVT::v2i16" , "llvm/lib/Target/AMDGPU/SIISelLowering.cpp", 5994, __extension__ __PRETTY_FUNCTION__)); | |||
5995 | assert(!Subtarget->hasVOP3PInsts() && "this should be legal")(static_cast <bool> (!Subtarget->hasVOP3PInsts() && "this should be legal") ? void (0) : __assert_fail ("!Subtarget->hasVOP3PInsts() && \"this should be legal\"" , "llvm/lib/Target/AMDGPU/SIISelLowering.cpp", 5995, __extension__ __PRETTY_FUNCTION__)); | |||
5996 | ||||
5997 | SDValue Lo = Op.getOperand(0); | |||
5998 | SDValue Hi = Op.getOperand(1); | |||
5999 | ||||
6000 | // Avoid adding defined bits with the zero_extend. | |||
6001 | if (Hi.isUndef()) { | |||
6002 | Lo = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Lo); | |||
6003 | SDValue ExtLo = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, Lo); | |||
6004 | return DAG.getNode(ISD::BITCAST, SL, VT, ExtLo); | |||
6005 | } | |||
6006 | ||||
6007 | Hi = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Hi); | |||
6008 | Hi = DAG.getNode(ISD::ZERO_EXTEND, SL, MVT::i32, Hi); | |||
6009 | ||||
6010 | SDValue ShlHi = DAG.getNode(ISD::SHL, SL, MVT::i32, Hi, | |||
6011 | DAG.getConstant(16, SL, MVT::i32)); | |||
6012 | if (Lo.isUndef()) | |||
6013 | return DAG.getNode(ISD::BITCAST, SL, VT, ShlHi); | |||
6014 | ||||
6015 | Lo = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Lo); | |||
6016 | Lo = DAG.getNode(ISD::ZERO_EXTEND, SL, MVT::i32, Lo); | |||
6017 | ||||
6018 | SDValue Or = DAG.getNode(ISD::OR, SL, MVT::i32, Lo, ShlHi); | |||
6019 | return DAG.getNode(ISD::BITCAST, SL, VT, Or); | |||
6020 | } | |||
6021 | ||||
6022 | bool | |||
6023 | SITargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { | |||
6024 | // We can fold offsets for anything that doesn't require a GOT relocation. | |||
6025 | return (GA->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS || | |||
6026 | GA->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS || | |||
6027 | GA->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT) && | |||
6028 | !shouldEmitGOTReloc(GA->getGlobal()); | |||
6029 | } | |||
6030 | ||||
6031 | static SDValue | |||
6032 | buildPCRelGlobalAddress(SelectionDAG &DAG, const GlobalValue *GV, | |||
6033 | const SDLoc &DL, int64_t Offset, EVT PtrVT, | |||
6034 | unsigned GAFlags = SIInstrInfo::MO_NONE) { | |||
6035 | assert(isInt<32>(Offset + 4) && "32-bit offset is expected!")(static_cast <bool> (isInt<32>(Offset + 4) && "32-bit offset is expected!") ? void (0) : __assert_fail ("isInt<32>(Offset + 4) && \"32-bit offset is expected!\"" , "llvm/lib/Target/AMDGPU/SIISelLowering.cpp", 6035, __extension__ __PRETTY_FUNCTION__)); | |||
6036 | // In order to support pc-relative addressing, the PC_ADD_REL_OFFSET SDNode is | |||
6037 | // lowered to the following code sequence: | |||
6038 | // | |||
6039 | // For constant address space: | |||
6040 | // s_getpc_b64 s[0:1] | |||
6041 | // s_add_u32 s0, s0, $symbol | |||
6042 | // s_addc_u32 s1, s1, 0 | |||
6043 | // | |||
6044 | // s_getpc_b64 returns the address of the s_add_u32 instruction and then | |||
6045 | // a fixup or relocation is emitted to replace $symbol with a literal | |||
6046 | // constant, which is a pc-relative offset from the encoding of the $symbol | |||
6047 | // operand to the global variable. | |||
6048 | // | |||
6049 | // For global address space: | |||
6050 | // s_getpc_b64 s[0:1] | |||
6051 | // s_add_u32 s0, s0, $symbol@{gotpc}rel32@lo | |||
6052 | // s_addc_u32 s1, s1, $symbol@{gotpc}rel32@hi | |||
6053 | // | |||
6054 | // s_getpc_b64 returns the address of the s_add_u32 instruction and then | |||
6055 | // fixups or relocations are emitted to replace $symbol@*@lo and | |||
6056 | // $symbol@*@hi with lower 32 bits and higher 32 bits of a literal constant, | |||
6057 | // which is a 64-bit pc-relative offset from the encoding of the $symbol | |||
6058 | // operand to the global variable. | |||
6059 | // | |||
6060 | // What we want here is an offset from the value returned by s_getpc | |||
6061 | // (which is the address of the s_add_u32 instruction) to the global | |||
6062 | // variable, but since the encoding of $symbol starts 4 bytes after the start | |||
6063 | // of the s_add_u32 instruction, we end up with an offset that is 4 bytes too | |||
6064 | // small. This requires us to add 4 to the global variable offset in order to | |||
6065 | // compute the correct address. Similarly for the s_addc_u32 instruction, the | |||
6066 | // encoding of $symbol starts 12 bytes after the start of the s_add_u32 | |||
6067 | // instruction. | |||
6068 | SDValue PtrLo = | |||
6069 | DAG.getTargetGlobalAddress(GV, DL, MVT::i32, Offset + 4, GAFlags); | |||
6070 | SDValue PtrHi; | |||
6071 | if (GAFlags == SIInstrInfo::MO_NONE) { | |||
6072 | PtrHi = DAG.getTargetConstant(0, DL, MVT::i32); | |||
6073 | } else { | |||
6074 | PtrHi = | |||
6075 | DAG.getTargetGlobalAddress(GV, DL, MVT::i32, Offset + 12, GAFlags + 1); | |||
6076 | } | |||
6077 | return DAG.getNode(AMDGPUISD::PC_ADD_REL_OFFSET, DL, PtrVT, PtrLo, PtrHi); | |||
6078 | } | |||
6079 | ||||
6080 | SDValue SITargetLowering::LowerGlobalAddress(AMDGPUMachineFunction *MFI, | |||
6081 | SDValue Op, | |||
6082 | SelectionDAG &DAG) const { | |||
6083 | GlobalAddressSDNode *GSD = cast<GlobalAddressSDNode>(Op); | |||
6084 | SDLoc DL(GSD); | |||
6085 | EVT PtrVT = Op.getValueType(); | |||
6086 | ||||
6087 | const GlobalValue *GV = GSD->getGlobal(); | |||
6088 | if ((GSD->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS && | |||
6089 | shouldUseLDSConstAddress(GV)) || | |||
6090 | GSD->getAddressSpace() == AMDGPUAS::REGION_ADDRESS || | |||
6091 | GSD->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS) { | |||
6092 | if (GSD->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS && | |||
6093 | GV->hasExternalLinkage()) { | |||
6094 | Type *Ty = GV->getValueType(); | |||
6095 | // HIP uses an unsized array `extern __shared__ T s[]` or similar | |||
6096 | // zero-sized type in other languages to declare the dynamic shared | |||
6097 | // memory which size is not known at the compile time. They will be | |||
6098 | // allocated by the runtime and placed directly after the static | |||
6099 | // allocated ones. They all share the same offset. | |||
6100 | if (DAG.getDataLayout().getTypeAllocSize(Ty).isZero()) { | |||
6101 | assert(PtrVT == MVT::i32 && "32-bit pointer is expected.")(static_cast <bool> (PtrVT == MVT::i32 && "32-bit pointer is expected." ) ? void (0) : __assert_fail ("PtrVT == MVT::i32 && \"32-bit pointer is expected.\"" , "llvm/lib/Target/AMDGPU/SIISelLowering.cpp", 6101, __extension__ __PRETTY_FUNCTION__)); | |||
6102 | // Adjust alignment for that dynamic shared memory array. | |||
6103 | MFI->setDynLDSAlign(DAG.getDataLayout(), *cast<GlobalVariable>(GV)); | |||
6104 | return SDValue( | |||
6105 | DAG.getMachineNode(AMDGPU::GET_GROUPSTATICSIZE, DL, PtrVT), 0); | |||
6106 | } | |||
6107 | } | |||
6108 | return AMDGPUTargetLowering::LowerGlobalAddress(MFI, Op, DAG); | |||
6109 | } | |||
6110 | ||||
6111 | if (GSD->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS) { | |||
6112 | SDValue GA = DAG.getTargetGlobalAddress(GV, DL, MVT::i32, GSD->getOffset(), | |||
6113 | SIInstrInfo::MO_ABS32_LO); | |||
6114 | return DAG.getNode(AMDGPUISD::LDS, DL, MVT::i32, GA); | |||
6115 | } | |||
6116 | ||||
6117 | if (shouldEmitFixup(GV)) | |||
6118 | return buildPCRelGlobalAddress(DAG, GV, DL, GSD->getOffset(), PtrVT); | |||
6119 | else if (shouldEmitPCReloc(GV)) | |||
6120 | return buildPCRelGlobalAddress(DAG, GV, DL, GSD->getOffset(), PtrVT, | |||
6121 | SIInstrInfo::MO_REL32); | |||
6122 | ||||
6123 | SDValue GOTAddr = buildPCRelGlobalAddress(DAG, GV, DL, 0, PtrVT, | |||
6124 | SIInstrInfo::MO_GOTPCREL32); | |||
6125 | ||||
6126 | Type *Ty = PtrVT.getTypeForEVT(*DAG.getContext()); | |||
6127 | PointerType *PtrTy = PointerType::get(Ty, AMDGPUAS::CONSTANT_ADDRESS); | |||
6128 | const DataLayout &DataLayout = DAG.getDataLayout(); | |||
6129 | Align Alignment = DataLayout.getABITypeAlign(PtrTy); | |||
6130 | MachinePointerInfo PtrInfo | |||
6131 | = MachinePointerInfo::getGOT(DAG.getMachineFunction()); | |||
6132 | ||||
6133 | return DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), GOTAddr, PtrInfo, Alignment, | |||
6134 | MachineMemOperand::MODereferenceable | | |||
6135 | MachineMemOperand::MOInvariant); | |||
6136 | } | |||
6137 | ||||
6138 | SDValue SITargetLowering::copyToM0(SelectionDAG &DAG, SDValue Chain, | |||
6139 | const SDLoc &DL, SDValue V) const { | |||
6140 | // We can't use S_MOV_B32 directly, because there is no way to specify m0 as | |||
6141 | // the destination register. | |||
6142 | // | |||
6143 | // We can't use CopyToReg, because MachineCSE won't combine COPY instructions, | |||
6144 | // so we will end up with redundant moves to m0. | |||
6145 | // | |||
6146 | // We use a pseudo to ensure we emit s_mov_b32 with m0 as the direct result. | |||
6147 | ||||
6148 | // A Null SDValue creates a glue result. | |||
6149 | SDNode *M0 = DAG.getMachineNode(AMDGPU::SI_INIT_M0, DL, MVT::Other, MVT::Glue, | |||
6150 | V, Chain); | |||
6151 | return SDValue(M0, 0); | |||
6152 | } | |||
6153 | ||||
6154 | SDValue SITargetLowering::lowerImplicitZextParam(SelectionDAG &DAG, | |||
6155 | SDValue Op, | |||
6156 | MVT VT, | |||
6157 | unsigned Offset) const { | |||
6158 | SDLoc SL(Op); | |||
6159 | SDValue Param = lowerKernargMemParameter( | |||
6160 | DAG, MVT::i32, MVT::i32, SL, DAG.getEntryNode(), Offset, Align(4), false); | |||
6161 | // The local size values will have the hi 16-bits as zero. | |||
6162 | return DAG.getNode(ISD::AssertZext, SL, MVT::i32, Param, | |||
6163 | DAG.getValueType(VT)); | |||
6164 | } | |||
6165 | ||||
6166 | static SDValue emitNonHSAIntrinsicError(SelectionDAG &DAG, const SDLoc &DL, | |||
6167 | EVT VT) { | |||
6168 | DiagnosticInfoUnsupported BadIntrin(DAG.getMachineFunction().getFunction(), | |||
6169 | "non-hsa intrinsic with hsa target", | |||
6170 | DL.getDebugLoc()); | |||
6171 | DAG.getContext()->diagnose(BadIntrin); | |||
6172 | return DAG.getUNDEF(VT); | |||
6173 | } | |||
6174 | ||||
6175 | static SDValue emitRemovedIntrinsicError(SelectionDAG &DAG, const SDLoc &DL, | |||
6176 | EVT VT) { | |||
6177 | DiagnosticInfoUnsupported BadIntrin(DAG.getMachineFunction().getFunction(), | |||
6178 | "intrinsic not supported on subtarget", | |||
6179 | DL.getDebugLoc()); | |||
6180 | DAG.getContext()->diagnose(BadIntrin); | |||
6181 | return DAG.getUNDEF(VT); | |||
6182 | } | |||
6183 | ||||
6184 | static SDValue getBuildDwordsVector(SelectionDAG &DAG, SDLoc DL, | |||
6185 | ArrayRef<SDValue> Elts) { | |||
6186 | assert(!Elts.empty())(static_cast <bool> (!Elts.empty()) ? void (0) : __assert_fail ("!Elts.empty()", "llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 6186, __extension__ __PRETTY_FUNCTION__)); | |||
6187 | MVT Type; | |||
6188 | unsigned NumElts = Elts.size(); | |||
6189 | ||||
6190 | if (NumElts <= 8) { | |||
6191 | Type = MVT::getVectorVT(MVT::f32, NumElts); | |||
6192 | } else { | |||
6193 | assert(Elts.size() <= 16)(static_cast <bool> (Elts.size() <= 16) ? void (0) : __assert_fail ("Elts.size() <= 16", "llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 6193, __extension__ __PRETTY_FUNCTION__)); | |||
6194 | Type = MVT::v16f32; | |||
6195 | NumElts = 16; | |||
6196 | } | |||
6197 | ||||
6198 | SmallVector<SDValue, 16> VecElts(NumElts); | |||
6199 | for (unsigned i = 0; i < Elts.size(); ++i) { | |||
6200 | SDValue Elt = Elts[i]; | |||
6201 | if (Elt.getValueType() != MVT::f32) | |||
6202 | Elt = DAG.getBitcast(MVT::f32, Elt); | |||
6203 | VecElts[i] = Elt; | |||
6204 | } | |||
6205 | for (unsigned i = Elts.size(); i < NumElts; ++i) | |||
6206 | VecElts[i] = DAG.getUNDEF(MVT::f32); | |||
6207 | ||||
6208 | if (NumElts == 1) | |||
6209 | return VecElts[0]; | |||
6210 | return DAG.getBuildVector(Type, DL, VecElts); | |||
6211 | } | |||
6212 | ||||
6213 | static SDValue padEltsToUndef(SelectionDAG &DAG, const SDLoc &DL, EVT CastVT, | |||
6214 | SDValue Src, int ExtraElts) { | |||
6215 | EVT SrcVT = Src.getValueType(); | |||
6216 | ||||
6217 | SmallVector<SDValue, 8> Elts; | |||
6218 | ||||
6219 | if (SrcVT.isVector()) | |||
6220 | DAG.ExtractVectorElements(Src, Elts); | |||
6221 | else | |||
6222 | Elts.push_back(Src); | |||
6223 | ||||
6224 | SDValue Undef = DAG.getUNDEF(SrcVT.getScalarType()); | |||
6225 | while (ExtraElts--) | |||
6226 | Elts.push_back(Undef); | |||
6227 | ||||
6228 | return DAG.getBuildVector(CastVT, DL, Elts); | |||
6229 | } | |||
6230 | ||||
6231 | // Re-construct the required return value for a image load intrinsic. | |||
6232 | // This is more complicated due to the optional use TexFailCtrl which means the required | |||
6233 | // return type is an aggregate | |||
6234 | static SDValue constructRetValue(SelectionDAG &DAG, | |||
6235 | MachineSDNode *Result, | |||
6236 | ArrayRef<EVT> ResultTypes, | |||
6237 | bool IsTexFail, bool Unpacked, bool IsD16, | |||
6238 | int DMaskPop, int NumVDataDwords, | |||
6239 | const SDLoc &DL) { | |||
6240 | // Determine the required return type. This is the same regardless of IsTexFail flag | |||
6241 | EVT ReqRetVT = ResultTypes[0]; | |||
6242 | int ReqRetNumElts = ReqRetVT.isVector() ? ReqRetVT.getVectorNumElements() : 1; | |||
6243 | int NumDataDwords = (!IsD16 || (IsD16 && Unpacked)) ? | |||
6244 | ReqRetNumElts : (ReqRetNumElts + 1) / 2; | |||
6245 | ||||
6246 | int MaskPopDwords = (!IsD16 || (IsD16 && Unpacked)) ? | |||
6247 | DMaskPop : (DMaskPop + 1) / 2; | |||
6248 | ||||
6249 | MVT DataDwordVT = NumDataDwords == 1 ? | |||
6250 | MVT::i32 : MVT::getVectorVT(MVT::i32, NumDataDwords); | |||
6251 | ||||
6252 | MVT MaskPopVT = MaskPopDwords == 1 ? | |||
6253 | MVT::i32 : MVT::getVectorVT(MVT::i32, MaskPopDwords); | |||
6254 | ||||
6255 | SDValue Data(Result, 0); | |||
6256 | SDValue TexFail; | |||
6257 | ||||
6258 | if (DMaskPop > 0 && Data.getValueType() != MaskPopVT) { | |||
6259 | SDValue ZeroIdx = DAG.getConstant(0, DL, MVT::i32); | |||
6260 | if (MaskPopVT.isVector()) { | |||
6261 | Data = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MaskPopVT, | |||
6262 | SDValue(Result, 0), ZeroIdx); | |||
6263 | } else { | |||
6264 | Data = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MaskPopVT, | |||
6265 | SDValue(Result, 0), ZeroIdx); | |||
6266 | } | |||
6267 | } | |||
6268 | ||||
6269 | if (DataDwordVT.isVector()) | |||
6270 | Data = padEltsToUndef(DAG, DL, DataDwordVT, Data, | |||
6271 | NumDataDwords - MaskPopDwords); | |||
6272 | ||||
6273 | if (IsD16) | |||
6274 | Data = adjustLoadValueTypeImpl(Data, ReqRetVT, DL, DAG, Unpacked); | |||
6275 | ||||
6276 | EVT LegalReqRetVT = ReqRetVT; | |||
6277 | if (!ReqRetVT.isVector()) { | |||
6278 | if (!Data.getValueType().isInteger()) | |||
6279 | Data = DAG.getNode(ISD::BITCAST, DL, | |||
6280 | Data.getValueType().changeTypeToInteger(), Data); | |||
6281 | Data = DAG.getNode(ISD::TRUNCATE, DL, ReqRetVT.changeTypeToInteger(), Data); | |||
6282 | } else { | |||
6283 | // We need to widen the return vector to a legal type | |||
6284 | if ((ReqRetVT.getVectorNumElements() % 2) == 1 && | |||
6285 | ReqRetVT.getVectorElementType().getSizeInBits() == 16) { | |||
6286 | LegalReqRetVT = | |||
6287 | EVT::getVectorVT(*DAG.getContext(), ReqRetVT.getVectorElementType(), | |||
6288 | ReqRetVT.getVectorNumElements() + 1); | |||
6289 | } | |||
6290 | } | |||
6291 | Data = DAG.getNode(ISD::BITCAST, DL, LegalReqRetVT, Data); | |||
6292 | ||||
6293 | if (IsTexFail) { | |||
6294 | TexFail = | |||
6295 | DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, SDValue(Result, 0), | |||
6296 | DAG.getConstant(MaskPopDwords, DL, MVT::i32)); | |||
6297 | ||||
6298 | return DAG.getMergeValues({Data, TexFail, SDValue(Result, 1)}, DL); | |||
6299 | } | |||
6300 | ||||
6301 | if (Result->getNumValues() == 1) | |||
6302 | return Data; | |||
6303 | ||||
6304 | return DAG.getMergeValues({Data, SDValue(Result, 1)}, DL); | |||
6305 | } | |||
6306 | ||||
6307 | static bool parseTexFail(SDValue TexFailCtrl, SelectionDAG &DAG, SDValue *TFE, | |||
6308 | SDValue *LWE, bool &IsTexFail) { | |||
6309 | auto TexFailCtrlConst = cast<ConstantSDNode>(TexFailCtrl.getNode()); | |||
6310 | ||||
6311 | uint64_t Value = TexFailCtrlConst->getZExtValue(); | |||
6312 | if (Value) { | |||
6313 | IsTexFail = true; | |||
6314 | } | |||
6315 | ||||
6316 | SDLoc DL(TexFailCtrlConst); | |||
6317 | *TFE = DAG.getTargetConstant((Value & 0x1) ? 1 : 0, DL, MVT::i32); | |||
6318 | Value &= ~(uint64_t)0x1; | |||
6319 | *LWE = DAG.getTargetConstant((Value & 0x2) ? 1 : 0, DL, MVT::i32); | |||
6320 | Value &= ~(uint64_t)0x2; | |||
6321 | ||||
6322 | return Value == 0; | |||
6323 | } | |||
6324 | ||||
6325 | static void packImage16bitOpsToDwords(SelectionDAG &DAG, SDValue Op, | |||
6326 | MVT PackVectorVT, | |||
6327 | SmallVectorImpl<SDValue> &PackedAddrs, | |||
6328 | unsigned DimIdx, unsigned EndIdx, | |||
6329 | unsigned NumGradients) { | |||
6330 | SDLoc DL(Op); | |||
6331 | for (unsigned I = DimIdx; I < EndIdx; I++) { | |||
6332 | SDValue Addr = Op.getOperand(I); | |||
6333 | ||||
6334 | // Gradients are packed with undef for each coordinate. | |||
6335 | // In <hi 16 bit>,<lo 16 bit> notation, the registers look like this: | |||
6336 | // 1D: undef,dx/dh; undef,dx/dv | |||
6337 | // 2D: dy/dh,dx/dh; dy/dv,dx/dv | |||
6338 | // 3D: dy/dh,dx/dh; undef,dz/dh; dy/dv,dx/dv; undef,dz/dv | |||
6339 | if (((I + 1) >= EndIdx) || | |||
6340 | ((NumGradients / 2) % 2 == 1 && (I == DimIdx + (NumGradients / 2) - 1 || | |||
6341 | I == DimIdx + NumGradients - 1))) { | |||
6342 | if (Addr.getValueType() != MVT::i16) | |||
6343 | Addr = DAG.getBitcast(MVT::i16, Addr); | |||
6344 | Addr = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Addr); | |||
6345 | } else { | |||
6346 | Addr = DAG.getBuildVector(PackVectorVT, DL, {Addr, Op.getOperand(I + 1)}); | |||
6347 | I++; | |||
6348 | } | |||
6349 | Addr = DAG.getBitcast(MVT::f32, Addr); | |||
6350 | PackedAddrs.push_back(Addr); | |||
6351 | } | |||
6352 | } | |||
6353 | ||||
6354 | SDValue SITargetLowering::lowerImage(SDValue Op, | |||
6355 | const AMDGPU::ImageDimIntrinsicInfo *Intr, | |||
6356 | SelectionDAG &DAG, bool WithChain) const { | |||
6357 | SDLoc DL(Op); | |||
6358 | MachineFunction &MF = DAG.getMachineFunction(); | |||
6359 | const GCNSubtarget* ST = &MF.getSubtarget<GCNSubtarget>(); | |||
6360 | const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode = | |||
6361 | AMDGPU::getMIMGBaseOpcodeInfo(Intr->BaseOpcode); | |||
6362 | const AMDGPU::MIMGDimInfo *DimInfo = AMDGPU::getMIMGDimInfo(Intr->Dim); | |||
6363 | unsigned IntrOpcode = Intr->BaseOpcode; | |||
6364 | bool IsGFX10Plus = AMDGPU::isGFX10Plus(*Subtarget); | |||
6365 | ||||
6366 | SmallVector<EVT, 3> ResultTypes(Op->values()); | |||
6367 | SmallVector<EVT, 3> OrigResultTypes(Op->values()); | |||
6368 | bool IsD16 = false; | |||
6369 | bool IsG16 = false; | |||
6370 | bool IsA16 = false; | |||
6371 | SDValue VData; | |||
6372 | int NumVDataDwords; | |||
6373 | bool AdjustRetType = false; | |||
6374 | ||||
6375 | // Offset of intrinsic arguments | |||
6376 | const unsigned ArgOffset = WithChain ? 2 : 1; | |||
6377 | ||||
6378 | unsigned DMask; | |||
6379 | unsigned DMaskLanes = 0; | |||
6380 | ||||
6381 | if (BaseOpcode->Atomic) { | |||
6382 | VData = Op.getOperand(2); | |||
6383 | ||||
6384 | bool Is64Bit = VData.getValueType() == MVT::i64; | |||
6385 | if (BaseOpcode->AtomicX2) { | |||
6386 | SDValue VData2 = Op.getOperand(3); | |||
6387 | VData = DAG.getBuildVector(Is64Bit ? MVT::v2i64 : MVT::v2i32, DL, | |||
6388 | {VData, VData2}); | |||
6389 | if (Is64Bit) | |||
6390 | VData = DAG.getBitcast(MVT::v4i32, VData); | |||
6391 | ||||
6392 | ResultTypes[0] = Is64Bit ? MVT::v2i64 : MVT::v2i32; | |||
6393 | DMask = Is64Bit ? 0xf : 0x3; | |||
6394 | NumVDataDwords = Is64Bit ? 4 : 2; | |||
6395 | } else { | |||
6396 | DMask = Is64Bit ? 0x3 : 0x1; | |||
6397 | NumVDataDwords = Is64Bit ? 2 : 1; | |||
6398 | } | |||
6399 | } else { | |||
6400 | auto *DMaskConst = | |||
6401 | cast<ConstantSDNode>(Op.getOperand(ArgOffset + Intr->DMaskIndex)); | |||
6402 | DMask = DMaskConst->getZExtValue(); | |||
6403 | DMaskLanes = BaseOpcode->Gather4 ? 4 : countPopulation(DMask); | |||
6404 | ||||
6405 | if (BaseOpcode->Store) { | |||
6406 | VData = Op.getOperand(2); | |||
6407 | ||||
6408 | MVT StoreVT = VData.getSimpleValueType(); | |||
6409 | if (StoreVT.getScalarType() == MVT::f16) { | |||
6410 | if (!Subtarget->hasD16Images() || !BaseOpcode->HasD16) | |||
6411 | return Op; // D16 is unsupported for this instruction | |||
6412 | ||||
6413 | IsD16 = true; | |||
6414 | VData = handleD16VData(VData, DAG, true); | |||
6415 | } | |||
6416 | ||||
6417 | NumVDataDwords = (VData.getValueType().getSizeInBits() + 31) / 32; | |||
6418 | } else { | |||
6419 | // Work out the num dwords based on the dmask popcount and underlying type | |||
6420 | // and whether packing is supported. | |||
6421 | MVT LoadVT = ResultTypes[0].getSimpleVT(); | |||
6422 | if (LoadVT.getScalarType() == MVT::f16) { | |||
6423 | if (!Subtarget->hasD16Images() || !BaseOpcode->HasD16) | |||
6424 | return Op; // D16 is unsupported for this instruction | |||
6425 | ||||
6426 | IsD16 = true; | |||
6427 | } | |||
6428 | ||||
6429 | // Confirm that the return type is large enough for the dmask specified | |||
6430 | if ((LoadVT.isVector() && LoadVT.getVectorNumElements() < DMaskLanes) || | |||
6431 | (!LoadVT.isVector() && DMaskLanes > 1)) | |||
6432 | return Op; | |||
6433 | ||||
6434 | // The sq block of gfx8 and gfx9 do not estimate register use correctly | |||
6435 | // for d16 image_gather4, image_gather4_l, and image_gather4_lz | |||
6436 | // instructions. | |||
6437 | if (IsD16 && !Subtarget->hasUnpackedD16VMem() && | |||
6438 | !(BaseOpcode->Gather4 && Subtarget->hasImageGather4D16Bug())) | |||
6439 | NumVDataDwords = (DMaskLanes + 1) / 2; | |||
6440 | else | |||
6441 | NumVDataDwords = DMaskLanes; | |||
6442 | ||||
6443 | AdjustRetType = true; | |||
6444 | } | |||
6445 | } | |||
6446 | ||||
6447 | unsigned VAddrEnd = ArgOffset + Intr->VAddrEnd; | |||
6448 | SmallVector<SDValue, 4> VAddrs; | |||
6449 | ||||
6450 | // Check for 16 bit addresses or derivatives and pack if true. | |||
6451 | MVT VAddrVT = | |||
6452 | Op.getOperand(ArgOffset + Intr->GradientStart).getSimpleValueType(); | |||
6453 | MVT VAddrScalarVT = VAddrVT.getScalarType(); | |||
6454 | MVT GradPackVectorVT = VAddrScalarVT == MVT::f16 ? MVT::v2f16 : MVT::v2i16; | |||
6455 | IsG16 = VAddrScalarVT == MVT::f16 || VAddrScalarVT == MVT::i16; | |||
6456 | ||||
6457 | VAddrVT = Op.getOperand(ArgOffset + Intr->CoordStart).getSimpleValueType(); | |||
6458 | VAddrScalarVT = VAddrVT.getScalarType(); | |||
6459 | MVT AddrPackVectorVT = VAddrScalarVT == MVT::f16 ? MVT::v2f16 : MVT::v2i16; | |||
6460 | IsA16 = VAddrScalarVT == MVT::f16 || VAddrScalarVT == MVT::i16; | |||
6461 | ||||
6462 | // Push back extra arguments. | |||
6463 | for (unsigned I = Intr->VAddrStart; I < Intr->GradientStart; I++) { | |||
6464 | if (IsA16 && (Op.getOperand(ArgOffset + I).getValueType() == MVT::f16)) { | |||
6465 | assert(I == Intr->BiasIndex && "Got unexpected 16-bit extra argument")(static_cast <bool> (I == Intr->BiasIndex && "Got unexpected 16-bit extra argument") ? void (0) : __assert_fail ("I == Intr->BiasIndex && \"Got unexpected 16-bit extra argument\"" , "llvm/lib/Target/AMDGPU/SIISelLowering.cpp", 6465, __extension__ __PRETTY_FUNCTION__)); | |||
6466 | // Special handling of bias when A16 is on. Bias is of type half but | |||
6467 | // occupies full 32-bit. | |||
6468 | SDValue Bias = DAG.getBuildVector( | |||
6469 | MVT::v2f16, DL, | |||
6470 | {Op.getOperand(ArgOffset + I), DAG.getUNDEF(MVT::f16)}); | |||
6471 | VAddrs.push_back(Bias); | |||
6472 | } else { | |||
6473 | assert((!IsA16 || Intr->NumBiasArgs == 0 || I != Intr->BiasIndex) &&(static_cast <bool> ((!IsA16 || Intr->NumBiasArgs == 0 || I != Intr->BiasIndex) && "Bias needs to be converted to 16 bit in A16 mode" ) ? void (0) : __assert_fail ("(!IsA16 || Intr->NumBiasArgs == 0 || I != Intr->BiasIndex) && \"Bias needs to be converted to 16 bit in A16 mode\"" , "llvm/lib/Target/AMDGPU/SIISelLowering.cpp", 6474, __extension__ __PRETTY_FUNCTION__)) | |||
6474 | "Bias needs to be converted to 16 bit in A16 mode")(static_cast <bool> ((!IsA16 || Intr->NumBiasArgs == 0 || I != Intr->BiasIndex) && "Bias needs to be converted to 16 bit in A16 mode" ) ? void (0) : __assert_fail ("(!IsA16 || Intr->NumBiasArgs == 0 || I != Intr->BiasIndex) && \"Bias needs to be converted to 16 bit in A16 mode\"" , "llvm/lib/Target/AMDGPU/SIISelLowering.cpp", 6474, __extension__ __PRETTY_FUNCTION__)); | |||
6475 | VAddrs.push_back(Op.getOperand(ArgOffset + I)); | |||
6476 | } | |||
6477 | } | |||
6478 | ||||
6479 | if (BaseOpcode->Gradients && !ST->hasG16() && (IsA16 != IsG16)) { | |||
6480 | // 16 bit gradients are supported, but are tied to the A16 control | |||
6481 | // so both gradients and addresses must be 16 bit | |||
6482 | LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("si-lower")) { dbgs() << "Failed to lower image intrinsic: 16 bit addresses " "require 16 bit args for both gradients and addresses"; } } while (false) | |||
6483 | dbgs() << "Failed to lower image intrinsic: 16 bit addresses "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("si-lower")) { dbgs() << "Failed to lower image intrinsic: 16 bit addresses " "require 16 bit args for both gradients and addresses"; } } while (false) | |||
6484 | "require 16 bit args for both gradients and addresses")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("si-lower")) { dbgs() << "Failed to lower image intrinsic: 16 bit addresses " "require 16 bit args for both gradients and addresses"; } } while (false); | |||
6485 | return Op; | |||
6486 | } | |||
6487 | ||||
6488 | if (IsA16) { | |||
6489 | if (!ST->hasA16()) { | |||
6490 | LLVM_DEBUG(dbgs() << "Failed to lower image intrinsic: Target does not "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("si-lower")) { dbgs() << "Failed to lower image intrinsic: Target does not " "support 16 bit addresses\n"; } } while (false) | |||
6491 | "support 16 bit addresses\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("si-lower")) { dbgs() << "Failed to lower image intrinsic: Target does not " "support 16 bit addresses\n"; } } while (false); | |||
6492 | return Op; | |||
6493 | } | |||
6494 | } | |||
6495 | ||||
6496 | // We've dealt with incorrect input so we know that if IsA16, IsG16 | |||
6497 | // are set then we have to compress/pack operands (either address, | |||
6498 | // gradient or both) | |||
6499 | // In the case where a16 and gradients are tied (no G16 support) then we | |||
6500 | // have already verified that both IsA16 and IsG16 are true | |||
6501 | if (BaseOpcode->Gradients && IsG16 && ST->hasG16()) { | |||
6502 | // Activate g16 | |||
6503 | const AMDGPU::MIMGG16MappingInfo *G16MappingInfo = | |||
6504 | AMDGPU::getMIMGG16MappingInfo(Intr->BaseOpcode); | |||
6505 | IntrOpcode = G16MappingInfo->G16; // set new opcode to variant with _g16 | |||
6506 | } | |||
6507 | ||||
6508 | // Add gradients (packed or unpacked) | |||
6509 | if (IsG16) { | |||
6510 | // Pack the gradients | |||
6511 | // const int PackEndIdx = IsA16 ? VAddrEnd : (ArgOffset + Intr->CoordStart); | |||
6512 | packImage16bitOpsToDwords(DAG, Op, GradPackVectorVT, VAddrs, | |||
6513 | ArgOffset + Intr->GradientStart, | |||
6514 | ArgOffset + Intr->CoordStart, Intr->NumGradients); | |||
6515 | } else { | |||
6516 | for (unsigned I = ArgOffset + Intr->GradientStart; | |||
6517 | I < ArgOffset + Intr->CoordStart; I++) | |||
6518 | VAddrs.push_back(Op.getOperand(I)); | |||
6519 | } | |||
6520 | ||||
6521 | // Add addresses (packed or unpacked) | |||
6522 | if (IsA16) { | |||
6523 | packImage16bitOpsToDwords(DAG, Op, AddrPackVectorVT, VAddrs, | |||
6524 | ArgOffset + Intr->CoordStart, VAddrEnd, | |||
6525 | 0 /* No gradients */); | |||
6526 | } else { | |||
6527 | // Add uncompressed address | |||
6528 | for (unsigned I = ArgOffset + Intr->CoordStart; I < VAddrEnd; I++) | |||
6529 | VAddrs.push_back(Op.getOperand(I)); | |||
6530 | } | |||
6531 | ||||
6532 | // If the register allocator cannot place the address registers contiguously | |||
6533 | // without introducing moves, then using the non-sequential address encoding | |||
6534 | // is always preferable, since it saves VALU instructions and is usually a | |||
6535 | // wash in terms of code size or even better. | |||
6536 | // | |||
6537 | // However, we currently have no way of hinting to the register allocator that | |||
6538 | // MIMG addresses should be placed contiguously when it is possible to do so, | |||
6539 | // so force non-NSA for the common 2-address case as a heuristic. | |||
6540 | // | |||
6541 | // SIShrinkInstructions will convert NSA encodings to non-NSA after register | |||
6542 | // allocation when possible. | |||
6543 | bool UseNSA = ST->hasFeature(AMDGPU::FeatureNSAEncoding) && | |||
6544 | VAddrs.size() >= 3 && | |||
6545 | VAddrs.size() <= (unsigned)ST->getNSAMaxSize(); | |||
6546 | SDValue VAddr; | |||
6547 | if (!UseNSA) | |||
6548 | VAddr = getBuildDwordsVector(DAG, DL, VAddrs); | |||
6549 | ||||
6550 | SDValue True = DAG.getTargetConstant(1, DL, MVT::i1); | |||
6551 | SDValue False = DAG.getTargetConstant(0, DL, MVT::i1); | |||
6552 | SDValue Unorm; | |||
6553 | if (!BaseOpcode->Sampler) { | |||
6554 | Unorm = True; | |||
6555 | } else { | |||
6556 | auto UnormConst = | |||
6557 | cast<ConstantSDNode>(Op.getOperand(ArgOffset + Intr->UnormIndex)); | |||
6558 | ||||
6559 | Unorm = UnormConst->getZExtValue() ? True : False; | |||
6560 | } | |||
6561 | ||||
6562 | SDValue TFE; | |||
6563 | SDValue LWE; | |||
6564 | SDValue TexFail = Op.getOperand(ArgOffset + Intr->TexFailCtrlIndex); | |||
6565 | bool IsTexFail = false; | |||
6566 | if (!parseTexFail(TexFail, DAG, &TFE, &LWE, IsTexFail)) | |||
6567 | return Op; | |||
6568 | ||||
6569 | if (IsTexFail) { | |||
6570 | if (!DMaskLanes) { | |||
6571 | // Expecting to get an error flag since TFC is on - and dmask is 0 | |||
6572 | // Force dmask to be at least 1 otherwise the instruction will fail | |||
6573 | DMask = 0x1; | |||
6574 | DMaskLanes = 1; | |||
6575 | NumVDataDwords = 1; | |||
6576 | } | |||
6577 | NumVDataDwords += 1; | |||
6578 | AdjustRetType = true; | |||
6579 | } | |||
6580 | ||||
6581 | // Has something earlier tagged that the return type needs adjusting | |||
6582 | // This happens if the instruction is a load or has set TexFailCtrl flags | |||
6583 | if (AdjustRetType) { | |||
6584 | // NumVDataDwords reflects the true number of dwords required in the return type | |||
6585 | if (DMaskLanes == 0 && !BaseOpcode->Store) { | |||
6586 | // This is a no-op load. This can be eliminated | |||
6587 | SDValue Undef = DAG.getUNDEF(Op.getValueType()); | |||
6588 | if (isa<MemSDNode>(Op)) | |||
6589 | return DAG.getMergeValues({Undef, Op.getOperand(0)}, DL); | |||
6590 | return Undef; | |||
6591 | } | |||
6592 | ||||
6593 | EVT NewVT = NumVDataDwords > 1 ? | |||
6594 | EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumVDataDwords) | |||
6595 | : MVT::i32; | |||
6596 | ||||
6597 | ResultTypes[0] = NewVT; | |||
6598 | if (ResultTypes.size() == 3) { | |||
6599 | // Original result was aggregate type used for TexFailCtrl results | |||
6600 | // The actual instruction returns as a vector type which has now been | |||
6601 | // created. Remove the aggregate result. | |||
6602 | ResultTypes.erase(&ResultTypes[1]); | |||
6603 | } | |||
6604 | } | |||
6605 | ||||
6606 | unsigned CPol = cast<ConstantSDNode>( | |||
6607 | Op.getOperand(ArgOffset + Intr->CachePolicyIndex))->getZExtValue(); | |||
6608 | if (BaseOpcode->Atomic) | |||
6609 | CPol |= AMDGPU::CPol::GLC; // TODO no-return optimization | |||
6610 | if (CPol & ~AMDGPU::CPol::ALL) | |||
6611 | return Op; | |||
6612 | ||||
6613 | SmallVector<SDValue, 26> Ops; | |||
6614 | if (BaseOpcode->Store || BaseOpcode->Atomic) | |||
6615 | Ops.push_back(VData); // vdata | |||
6616 | if (UseNSA) | |||
6617 | append_range(Ops, VAddrs); | |||
6618 | else | |||
6619 | Ops.push_back(VAddr); | |||
6620 | Ops.push_back(Op.getOperand(ArgOffset + Intr->RsrcIndex)); | |||
6621 | if (BaseOpcode->Sampler) | |||
6622 | Ops.push_back(Op.getOperand(ArgOffset + Intr->SampIndex)); | |||
6623 | Ops.push_back(DAG.getTargetConstant(DMask, DL, MVT::i32)); | |||
6624 | if (IsGFX10Plus) | |||
6625 | Ops.push_back(DAG.getTargetConstant(DimInfo->Encoding, DL, MVT::i32)); | |||
6626 | Ops.push_back(Unorm); | |||
6627 | Ops.push_back(DAG.getTargetConstant(CPol, DL, MVT::i32)); | |||
6628 | Ops.push_back(IsA16 && // r128, a16 for gfx9 | |||
6629 | ST->hasFeature(AMDGPU::FeatureR128A16) ? True : False); | |||
6630 | if (IsGFX10Plus) | |||
6631 | Ops.push_back(IsA16 ? True : False); | |||
6632 | if (!Subtarget->hasGFX90AInsts()) { | |||
6633 | Ops.push_back(TFE); //tfe | |||
6634 | } else if (cast<ConstantSDNode>(TFE)->getZExtValue()) { | |||
6635 | report_fatal_error("TFE is not supported on this GPU"); | |||
6636 | } | |||
6637 | Ops.push_back(LWE); // lwe | |||
6638 | if (!IsGFX10Plus) | |||
6639 | Ops.push_back(DimInfo->DA ? True : False); | |||
6640 | if (BaseOpcode->HasD16) | |||
6641 | Ops.push_back(IsD16 ? True : False); | |||
6642 | if (isa<MemSDNode>(Op)) | |||
6643 | Ops.push_back(Op.getOperand(0)); // chain | |||
6644 | ||||
6645 | int NumVAddrDwords = | |||
6646 | UseNSA ? VAddrs.size() : VAddr.getValueType().getSizeInBits() / 32; | |||
6647 | int Opcode = -1; | |||
6648 | ||||
6649 | if (IsGFX10Plus) { | |||
6650 | Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, | |||
6651 | UseNSA ? AMDGPU::MIMGEncGfx10NSA | |||
6652 | : AMDGPU::MIMGEncGfx10Default, | |||
6653 | NumVDataDwords, NumVAddrDwords); | |||
6654 | } else { | |||
6655 | if (Subtarget->hasGFX90AInsts()) { | |||
6656 | Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx90a, | |||
6657 | NumVDataDwords, NumVAddrDwords); | |||
6658 | if (Opcode == -1) | |||
6659 | report_fatal_error( | |||
6660 | "requested image instruction is not supported on this GPU"); | |||
6661 | } | |||
6662 | if (Opcode == -1 && | |||
6663 | Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) | |||
6664 | Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx8, | |||
6665 | NumVDataDwords, NumVAddrDwords); | |||
6666 | if (Opcode == -1) | |||
6667 | Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx6, | |||
6668 | NumVDataDwords, NumVAddrDwords); | |||
6669 | } | |||
6670 | assert(Opcode != -1)(static_cast <bool> (Opcode != -1) ? void (0) : __assert_fail ("Opcode != -1", "llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 6670, __extension__ __PRETTY_FUNCTION__)); | |||
6671 | ||||
6672 | MachineSDNode *NewNode = DAG.getMachineNode(Opcode, DL, ResultTypes, Ops); | |||
6673 | if (auto MemOp = dyn_cast<MemSDNode>(Op)) { | |||
6674 | MachineMemOperand *MemRef = MemOp->getMemOperand(); | |||
6675 | DAG.setNodeMemRefs(NewNode, {MemRef}); | |||
6676 | } | |||
6677 | ||||
6678 | if (BaseOpcode->AtomicX2) { | |||
6679 | SmallVector<SDValue, 1> Elt; | |||
6680 | DAG.ExtractVectorElements(SDValue(NewNode, 0), Elt, 0, 1); | |||
6681 | return DAG.getMergeValues({Elt[0], SDValue(NewNode, 1)}, DL); | |||
6682 | } | |||
6683 | if (BaseOpcode->Store) | |||
6684 | return SDValue(NewNode, 0); | |||
6685 | return constructRetValue(DAG, NewNode, | |||
6686 | OrigResultTypes, IsTexFail, | |||
6687 | Subtarget->hasUnpackedD16VMem(), IsD16, | |||
6688 | DMaskLanes, NumVDataDwords, DL); | |||
6689 | } | |||
6690 | ||||
6691 | SDValue SITargetLowering::lowerSBuffer(EVT VT, SDLoc DL, SDValue Rsrc, | |||
6692 | SDValue Offset, SDValue CachePolicy, | |||
6693 | SelectionDAG &DAG) const { | |||
6694 | MachineFunction &MF = DAG.getMachineFunction(); | |||
6695 | ||||
6696 | const DataLayout &DataLayout = DAG.getDataLayout(); | |||
6697 | Align Alignment = | |||
6698 | DataLayout.getABITypeAlign(VT.getTypeForEVT(*DAG.getContext())); | |||
6699 | ||||
6700 | MachineMemOperand *MMO = MF.getMachineMemOperand( | |||
6701 | MachinePointerInfo(), | |||
6702 | MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable | | |||
6703 | MachineMemOperand::MOInvariant, | |||
6704 | VT.getStoreSize(), Alignment); | |||
6705 | ||||
6706 | if (!Offset->isDivergent()) { | |||
6707 | SDValue Ops[] = { | |||
6708 | Rsrc, | |||
6709 | Offset, // Offset | |||
6710 | CachePolicy | |||
6711 | }; | |||
6712 | ||||
6713 | // Widen vec3 load to vec4. | |||
6714 | if (VT.isVector() && VT.getVectorNumElements() == 3) { | |||
6715 | EVT WidenedVT = | |||
6716 | EVT::getVectorVT(*DAG.getContext(), VT.getVectorElementType(), 4); | |||
6717 | auto WidenedOp = DAG.getMemIntrinsicNode( | |||
6718 | AMDGPUISD::SBUFFER_LOAD, DL, DAG.getVTList(WidenedVT), Ops, WidenedVT, | |||
6719 | MF.getMachineMemOperand(MMO, 0, WidenedVT.getStoreSize())); | |||
6720 | auto Subvector = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, WidenedOp, | |||
6721 | DAG.getVectorIdxConstant(0, DL)); | |||
6722 | return Subvector; | |||
6723 | } | |||
6724 | ||||
6725 | return DAG.getMemIntrinsicNode(AMDGPUISD::SBUFFER_LOAD, DL, | |||
6726 | DAG.getVTList(VT), Ops, VT, MMO); | |||
6727 | } | |||
6728 | ||||
6729 | // We have a divergent offset. Emit a MUBUF buffer load instead. We can | |||
6730 | // assume that the buffer is unswizzled. | |||
6731 | SmallVector<SDValue, 4> Loads; | |||
6732 | unsigned NumLoads = 1; | |||
6733 | MVT LoadVT = VT.getSimpleVT(); | |||
6734 | unsigned NumElts = LoadVT.isVector() ? LoadVT.getVectorNumElements() : 1; | |||
6735 | assert((LoadVT.getScalarType() == MVT::i32 ||(static_cast <bool> ((LoadVT.getScalarType() == MVT::i32 || LoadVT.getScalarType() == MVT::f32)) ? void (0) : __assert_fail ("(LoadVT.getScalarType() == MVT::i32 || LoadVT.getScalarType() == MVT::f32)" , "llvm/lib/Target/AMDGPU/SIISelLowering.cpp", 6736, __extension__ __PRETTY_FUNCTION__)) | |||
6736 | LoadVT.getScalarType() == MVT::f32))(static_cast <bool> ((LoadVT.getScalarType() == MVT::i32 || LoadVT.getScalarType() == MVT::f32)) ? void (0) : __assert_fail ("(LoadVT.getScalarType() == MVT::i32 || LoadVT.getScalarType() == MVT::f32)" , "llvm/lib/Target/AMDGPU/SIISelLowering.cpp", 6736, __extension__ __PRETTY_FUNCTION__)); | |||
6737 | ||||
6738 | if (NumElts == 8 || NumElts == 16) { | |||
6739 | NumLoads = NumElts / 4; | |||
6740 | LoadVT = MVT::getVectorVT(LoadVT.getScalarType(), 4); | |||
6741 | } | |||
6742 | ||||
6743 | SDVTList VTList = DAG.getVTList({LoadVT, MVT::Glue}); | |||
6744 | SDValue Ops[] = { | |||
6745 | DAG.getEntryNode(), // Chain | |||
6746 | Rsrc, // rsrc | |||
6747 | DAG.getConstant(0, DL, MVT::i32), // vindex | |||
6748 | {}, // voffset | |||
6749 | {}, // soffset | |||
6750 | {}, // offset | |||
6751 | CachePolicy, // cachepolicy | |||
6752 | DAG.getTargetConstant(0, DL, MVT::i1), // idxen | |||
6753 | }; | |||
6754 | ||||
6755 | // Use the alignment to ensure that the required offsets will fit into the | |||
6756 | // immediate offsets. | |||
6757 | setBufferOffsets(Offset, DAG, &Ops[3], | |||
6758 | NumLoads > 1 ? Align(16 * NumLoads) : Align(4)); | |||
6759 | ||||
6760 | uint64_t InstOffset = cast<ConstantSDNode>(Ops[5])->getZExtValue(); | |||
6761 | for (unsigned i = 0; i < NumLoads; ++i) { | |||
6762 | Ops[5] = DAG.getTargetConstant(InstOffset + 16 * i, DL, MVT::i32); | |||
6763 | Loads.push_back(getMemIntrinsicNode(AMDGPUISD::BUFFER_LOAD, DL, VTList, Ops, | |||
6764 | LoadVT, MMO, DAG)); | |||
6765 | } | |||
6766 | ||||
6767 | if (NumElts == 8 || NumElts == 16) | |||
6768 | return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Loads); | |||
6769 | ||||
6770 | return Loads[0]; | |||
6771 | } | |||
6772 | ||||
6773 | SDValue SITargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, | |||
6774 | SelectionDAG &DAG) const { | |||
6775 | MachineFunction &MF = DAG.getMachineFunction(); | |||
6776 | auto MFI = MF.getInfo<SIMachineFunctionInfo>(); | |||
6777 | ||||
6778 | EVT VT = Op.getValueType(); | |||
6779 | SDLoc DL(Op); | |||
6780 | unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); | |||
6781 | ||||
6782 | // TODO: Should this propagate fast-math-flags? | |||
6783 | ||||
6784 | switch (IntrinsicID) { | |||
6785 | case Intrinsic::amdgcn_implicit_buffer_ptr: { | |||
6786 | if (getSubtarget()->isAmdHsaOrMesa(MF.getFunction())) | |||
6787 | return emitNonHSAIntrinsicError(DAG, DL, VT); | |||
6788 | return getPreloadedValue(DAG, *MFI, VT, | |||
6789 | AMDGPUFunctionArgInfo::IMPLICIT_BUFFER_PTR); | |||
6790 | } | |||
6791 | case Intrinsic::amdgcn_dispatch_ptr: | |||
6792 | case Intrinsic::amdgcn_queue_ptr: { | |||
6793 | if (!Subtarget->isAmdHsaOrMesa(MF.getFunction())) { | |||
6794 | DiagnosticInfoUnsupported BadIntrin( | |||
6795 | MF.getFunction(), "unsupported hsa intrinsic without hsa target", | |||
6796 | DL.getDebugLoc()); | |||
6797 | DAG.getContext()->diagnose(BadIntrin); | |||
6798 | return DAG.getUNDEF(VT); | |||
6799 | } | |||
6800 | ||||
6801 | auto RegID = IntrinsicID == Intrinsic::amdgcn_dispatch_ptr ? | |||
6802 | AMDGPUFunctionArgInfo::DISPATCH_PTR : AMDGPUFunctionArgInfo::QUEUE_PTR; | |||
6803 | return getPreloadedValue(DAG, *MFI, VT, RegID); | |||
6804 | } | |||
6805 | case Intrinsic::amdgcn_implicitarg_ptr: { | |||
6806 | if (MFI->isEntryFunction()) | |||
6807 | return getImplicitArgPtr(DAG, DL); | |||
6808 | return getPreloadedValue(DAG, *MFI, VT, | |||
6809 | AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR); | |||
6810 | } | |||
6811 | case Intrinsic::amdgcn_kernarg_segment_ptr: { | |||
6812 | if (!AMDGPU::isKernel(MF.getFunction().getCallingConv())) { | |||
6813 | // This only makes sense to call in a kernel, so just lower to null. | |||
6814 | return DAG.getConstant(0, DL, VT); | |||
6815 | } | |||
6816 | ||||
6817 | return getPreloadedValue(DAG, *MFI, VT, | |||
6818 | AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR); | |||
6819 | } | |||
6820 | case Intrinsic::amdgcn_dispatch_id: { | |||
6821 | return getPreloadedValue(DAG, *MFI, VT, AMDGPUFunctionArgInfo::DISPATCH_ID); | |||
6822 | } | |||
6823 | case Intrinsic::amdgcn_rcp: | |||
6824 | return DAG.getNode(AMDGPUISD::RCP, DL, VT, Op.getOperand(1)); | |||
6825 | case Intrinsic::amdgcn_rsq: | |||
6826 | return DAG.getNode(AMDGPUISD::RSQ, DL, VT, Op.getOperand(1)); | |||
6827 | case Intrinsic::amdgcn_rsq_legacy: | |||
6828 | if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) | |||
6829 | return emitRemovedIntrinsicError(DAG, DL, VT); | |||
6830 | return SDValue(); | |||
6831 | case Intrinsic::amdgcn_rcp_legacy: | |||
6832 | if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) | |||
6833 | return emitRemovedIntrinsicError(DAG, DL, VT); | |||
6834 | return DAG.getNode(AMDGPUISD::RCP_LEGACY, DL, VT, Op.getOperand(1)); | |||
6835 | case Intrinsic::amdgcn_rsq_clamp: { | |||
6836 | if (Subtarget->getGeneration() < AMDGPUSubtarget::VOLCANIC_ISLANDS) | |||
6837 | return DAG.getNode(AMDGPUISD::RSQ_CLAMP, DL, VT, Op.getOperand(1)); | |||
6838 | ||||
6839 | Type *Type = VT.getTypeForEVT(*DAG.getContext()); | |||
6840 | APFloat Max = APFloat::getLargest(Type->getFltSemantics()); | |||
6841 | APFloat Min = APFloat::getLargest(Type->getFltSemantics(), true); | |||
6842 | ||||
6843 | SDValue Rsq = DAG.getNode(AMDGPUISD::RSQ, DL, VT, Op.getOperand(1)); | |||
6844 | SDValue Tmp = DAG.getNode(ISD::FMINNUM, DL, VT, Rsq, | |||
6845 | DAG.getConstantFP(Max, DL, VT)); | |||
6846 | return DAG.getNode(ISD::FMAXNUM, DL, VT, Tmp, | |||
6847 | DAG.getConstantFP(Min, DL, VT)); | |||
6848 | } | |||
6849 | case Intrinsic::r600_read_ngroups_x: | |||
6850 | if (Subtarget->isAmdHsaOS()) | |||
6851 | return emitNonHSAIntrinsicError(DAG, DL, VT); | |||
6852 | ||||
6853 | return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(), | |||
6854 | SI::KernelInputOffsets::NGROUPS_X, Align(4), | |||
6855 | false); | |||
6856 | case Intrinsic::r600_read_ngroups_y: | |||
6857 | if (Subtarget->isAmdHsaOS()) | |||
6858 | return emitNonHSAIntrinsicError(DAG, DL, VT); | |||
6859 | ||||
6860 | return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(), | |||
6861 | SI::KernelInputOffsets::NGROUPS_Y, Align(4), | |||
6862 | false); | |||
6863 | case Intrinsic::r600_read_ngroups_z: | |||
6864 | if (Subtarget->isAmdHsaOS()) | |||
6865 | return emitNonHSAIntrinsicError(DAG, DL, VT); | |||
6866 | ||||
6867 | return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(), | |||
6868 | SI::KernelInputOffsets::NGROUPS_Z, Align(4), | |||
6869 | false); | |||
6870 | case Intrinsic::r600_read_global_size_x: | |||
6871 | if (Subtarget->isAmdHsaOS()) | |||
6872 | return emitNonHSAIntrinsicError(DAG, DL, VT); | |||
6873 | ||||
6874 | return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(), | |||
6875 | SI::KernelInputOffsets::GLOBAL_SIZE_X, | |||
6876 | Align(4), false); | |||
6877 | case Intrinsic::r600_read_global_size_y: | |||
6878 | if (Subtarget->isAmdHsaOS()) | |||
6879 | return emitNonHSAIntrinsicError(DAG, DL, VT); | |||
6880 | ||||
6881 | return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(), | |||
6882 | SI::KernelInputOffsets::GLOBAL_SIZE_Y, | |||
6883 | Align(4), false); | |||
6884 | case Intrinsic::r600_read_global_size_z: | |||
6885 | if (Subtarget->isAmdHsaOS()) | |||
6886 | return emitNonHSAIntrinsicError(DAG, DL, VT); | |||
6887 | ||||
6888 | return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(), | |||
6889 | SI::KernelInputOffsets::GLOBAL_SIZE_Z, | |||
6890 | Align(4), false); | |||
6891 | case Intrinsic::r600_read_local_size_x: | |||
6892 | if (Subtarget->isAmdHsaOS()) | |||
6893 | return emitNonHSAIntrinsicError(DAG, DL, VT); | |||
6894 | ||||
6895 | return lowerImplicitZextParam(DAG, Op, MVT::i16, | |||
6896 | SI::KernelInputOffsets::LOCAL_SIZE_X); | |||
6897 | case Intrinsic::r600_read_local_size_y: | |||
6898 | if (Subtarget->isAmdHsaOS()) | |||
6899 | return emitNonHSAIntrinsicError(DAG, DL, VT); | |||
6900 | ||||
6901 | return lowerImplicitZextParam(DAG, Op, MVT::i16, | |||
6902 | SI::KernelInputOffsets::LOCAL_SIZE_Y); | |||
6903 | case Intrinsic::r600_read_local_size_z: | |||
6904 | if (Subtarget->isAmdHsaOS()) | |||
6905 | return emitNonHSAIntrinsicError(DAG, DL, VT); | |||
6906 | ||||
6907 | return lowerImplicitZextParam(DAG, Op, MVT::i16, | |||
6908 | SI::KernelInputOffsets::LOCAL_SIZE_Z); | |||
6909 | case Intrinsic::amdgcn_workgroup_id_x: | |||
6910 | return getPreloadedValue(DAG, *MFI, VT, | |||
6911 | AMDGPUFunctionArgInfo::WORKGROUP_ID_X); | |||
6912 | case Intrinsic::amdgcn_workgroup_id_y: | |||
6913 | return getPreloadedValue(DAG, *MFI, VT, | |||
6914 | AMDGPUFunctionArgInfo::WORKGROUP_ID_Y); | |||
6915 | case Intrinsic::amdgcn_workgroup_id_z: | |||
6916 | return getPreloadedValue(DAG, *MFI, VT, | |||
6917 | AMDGPUFunctionArgInfo::WORKGROUP_ID_Z); | |||
6918 | case Intrinsic::amdgcn_workitem_id_x: | |||
6919 | if (Subtarget->getMaxWorkitemID(MF.getFunction(), 0) == 0) | |||
6920 | return DAG.getConstant(0, DL, MVT::i32); | |||
6921 | ||||
6922 | return loadInputValue(DAG, &AMDGPU::VGPR_32RegClass, MVT::i32, | |||
6923 | SDLoc(DAG.getEntryNode()), | |||
6924 | MFI->getArgInfo().WorkItemIDX); | |||
6925 | case Intrinsic::amdgcn_workitem_id_y: | |||
6926 | if (Subtarget->getMaxWorkitemID(MF.getFunction(), 1) == 0) | |||
6927 | return DAG.getConstant(0, DL, MVT::i32); | |||
6928 | ||||
6929 | return loadInputValue(DAG, &AMDGPU::VGPR_32RegClass, MVT::i32, | |||
6930 | SDLoc(DAG.getEntryNode()), | |||
6931 | MFI->getArgInfo().WorkItemIDY); | |||
6932 | case Intrinsic::amdgcn_workitem_id_z: | |||
6933 | if (Subtarget->getMaxWorkitemID(MF.getFunction(), 2) == 0) | |||
6934 | return DAG.getConstant(0, DL, MVT::i32); | |||
6935 | ||||
6936 | return loadInputValue(DAG, &AMDGPU::VGPR_32RegClass, MVT::i32, | |||
6937 | SDLoc(DAG.getEntryNode()), | |||
6938 | MFI->getArgInfo().WorkItemIDZ); | |||
6939 | case Intrinsic::amdgcn_wavefrontsize: | |||
6940 | return DAG.getConstant(MF.getSubtarget<GCNSubtarget>().getWavefrontSize(), | |||
6941 | SDLoc(Op), MVT::i32); | |||
6942 | case Intrinsic::amdgcn_s_buffer_load: { | |||
6943 | unsigned CPol = cast<ConstantSDNode>(Op.getOperand(3))->getZExtValue(); | |||
6944 | if (CPol & ~AMDGPU::CPol::ALL) | |||
6945 | return Op; | |||
6946 | return lowerSBuffer(VT, DL, Op.getOperand(1), Op.getOperand(2), Op.getOperand(3), | |||
6947 | DAG); | |||
6948 | } | |||
6949 | case Intrinsic::amdgcn_fdiv_fast: | |||
6950 | return lowerFDIV_FAST(Op, DAG); | |||
6951 | case Intrinsic::amdgcn_sin: | |||
6952 | return DAG.getNode(AMDGPUISD::SIN_HW, DL, VT, Op.getOperand(1)); | |||
6953 | ||||
6954 | case Intrinsic::amdgcn_cos: | |||
6955 | return DAG.getNode(AMDGPUISD::COS_HW, DL, VT, Op.getOperand(1)); | |||
6956 | ||||
6957 | case Intrinsic::amdgcn_mul_u24: | |||
6958 | return DAG.getNode(AMDGPUISD::MUL_U24, DL, VT, Op.getOperand(1), Op.getOperand(2)); | |||
6959 | case Intrinsic::amdgcn_mul_i24: | |||
6960 | return DAG.getNode(AMDGPUISD::MUL_I24, DL, VT, Op.getOperand(1), Op.getOperand(2)); | |||
6961 | ||||
6962 | case Intrinsic::amdgcn_log_clamp: { | |||
6963 | if (Subtarget->getGeneration() < AMDGPUSubtarget::VOLCANIC_ISLANDS) | |||
6964 | return SDValue(); | |||
6965 | ||||
6966 | return emitRemovedIntrinsicError(DAG, DL, VT); | |||
6967 | } | |||
6968 | case Intrinsic::amdgcn_ldexp: | |||
6969 | return DAG.getNode(AMDGPUISD::LDEXP, DL, VT, | |||
6970 | Op.getOperand(1), Op.getOperand(2)); | |||
6971 | ||||
6972 | case Intrinsic::amdgcn_fract: | |||
6973 | return DAG.getNode(AMDGPUISD::FRACT, DL, VT, Op.getOperand(1)); | |||
6974 | ||||
6975 | case Intrinsic::amdgcn_class: | |||
6976 | return DAG.getNode(AMDGPUISD::FP_CLASS, DL, VT, | |||
6977 | Op.getOperand(1), Op.getOperand(2)); | |||
6978 | case Intrinsic::amdgcn_div_fmas: | |||
6979 | return DAG.getNode(AMDGPUISD::DIV_FMAS, DL, VT, | |||
6980 | Op.getOperand(1), Op.getOperand(2), Op.getOperand(3), | |||
6981 | Op.getOperand(4)); | |||
6982 | ||||
6983 | case Intrinsic::amdgcn_div_fixup: | |||
6984 | return DAG.getNode(AMDGPUISD::DIV_FIXUP, DL, VT, | |||
6985 | Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); | |||
6986 | ||||
6987 | case Intrinsic::amdgcn_div_scale: { | |||
6988 | const ConstantSDNode *Param = cast<ConstantSDNode>(Op.getOperand(3)); | |||
6989 | ||||
6990 | // Translate to the operands expected by the machine instruction. The | |||
6991 | // first parameter must be the same as the first instruction. | |||
6992 | SDValue Numerator = Op.getOperand(1); | |||
6993 | SDValue Denominator = Op.getOperand(2); | |||
6994 | ||||
6995 | // Note this order is opposite of the machine instruction's operations, | |||
6996 | // which is s0.f = Quotient, s1.f = Denominator, s2.f = Numerator. The | |||
6997 | // intrinsic has the numerator as the first operand to match a normal | |||
6998 | // division operation. | |||
6999 | ||||
7000 | SDValue Src0 = Param->isAllOnes() ? Numerator : Denominator; | |||
7001 | ||||
7002 | return DAG.getNode(AMDGPUISD::DIV_SCALE, DL, Op->getVTList(), Src0, | |||
7003 | Denominator, Numerator); | |||
7004 | } | |||
7005 | case Intrinsic::amdgcn_icmp: { | |||
7006 | // There is a Pat that handles this variant, so return it as-is. | |||
7007 | if (Op.getOperand(1).getValueType() == MVT::i1 && | |||
7008 | Op.getConstantOperandVal(2) == 0 && | |||
7009 | Op.getConstantOperandVal(3) == ICmpInst::Predicate::ICMP_NE) | |||
7010 | return Op; | |||
7011 | return lowerICMPIntrinsic(*this, Op.getNode(), DAG); | |||
7012 | } | |||
7013 | case Intrinsic::amdgcn_fcmp: { | |||
7014 | return lowerFCMPIntrinsic(*this, Op.getNode(), DAG); | |||
7015 | } | |||
7016 | case Intrinsic::amdgcn_ballot: | |||
7017 | return lowerBALLOTIntrinsic(*this, Op.getNode(), DAG); | |||
7018 | case Intrinsic::amdgcn_fmed3: | |||
7019 | return DAG.getNode(AMDGPUISD::FMED3, DL, VT, | |||
7020 | Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); | |||
7021 | case Intrinsic::amdgcn_fdot2: | |||
7022 | return DAG.getNode(AMDGPUISD::FDOT2, DL, VT, | |||
7023 | Op.getOperand(1), Op.getOperand(2), Op.getOperand(3), | |||
7024 | Op.getOperand(4)); | |||
7025 | case Intrinsic::amdgcn_fmul_legacy: | |||
7026 | return DAG.getNode(AMDGPUISD::FMUL_LEGACY, DL, VT, | |||
7027 | Op.getOperand(1), Op.getOperand(2)); | |||
7028 | case Intrinsic::amdgcn_sffbh: | |||
7029 | return DAG.getNode(AMDGPUISD::FFBH_I32, DL, VT, Op.getOperand(1)); | |||
7030 | case Intrinsic::amdgcn_sbfe: | |||
7031 | return DAG.getNode(AMDGPUISD::BFE_I32, DL, VT, | |||
7032 | Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); | |||
7033 | case Intrinsic::amdgcn_ubfe: | |||
7034 | return DAG.getNode(AMDGPUISD::BFE_U32, DL, VT, | |||
7035 | Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); | |||
7036 | case Intrinsic::amdgcn_cvt_pkrtz: | |||
7037 | case Intrinsic::amdgcn_cvt_pknorm_i16: | |||
7038 | case Intrinsic::amdgcn_cvt_pknorm_u16: | |||
7039 | case Intrinsic::amdgcn_cvt_pk_i16: | |||
7040 | case Intrinsic::amdgcn_cvt_pk_u16: { | |||
7041 | // FIXME: Stop adding cast if v2f16/v2i16 are legal. | |||
7042 | EVT VT = Op.getValueType(); | |||
7043 | unsigned Opcode; | |||
7044 | ||||
7045 | if (IntrinsicID == Intrinsic::amdgcn_cvt_pkrtz) | |||
7046 | Opcode = AMDGPUISD::CVT_PKRTZ_F16_F32; | |||
7047 | else if (IntrinsicID == Intrinsic::amdgcn_cvt_pknorm_i16) | |||
7048 | Opcode = AMDGPUISD::CVT_PKNORM_I16_F32; | |||
7049 | else if (IntrinsicID == Intrinsic::amdgcn_cvt_pknorm_u16) | |||
7050 | Opcode = AMDGPUISD::CVT_PKNORM_U16_F32; | |||
7051 | else if (IntrinsicID == Intrinsic::amdgcn_cvt_pk_i16) | |||
7052 | Opcode = AMDGPUISD::CVT_PK_I16_I32; | |||
7053 | else | |||
7054 | Opcode = AMDGPUISD::CVT_PK_U16_U32; | |||
7055 | ||||
7056 | if (isTypeLegal(VT)) | |||
7057 | return DAG.getNode(Opcode, DL, VT, Op.getOperand(1), Op.getOperand(2)); | |||
7058 | ||||
7059 | SDValue Node = DAG.getNode(Opcode, DL, MVT::i32, | |||
7060 | Op.getOperand(1), Op.getOperand(2)); | |||
7061 | return DAG.getNode(ISD::BITCAST, DL, VT, Node); | |||
7062 | } | |||
7063 | case Intrinsic::amdgcn_fmad_ftz: | |||
7064 | return DAG.getNode(AMDGPUISD::FMAD_FTZ, DL, VT, Op.getOperand(1), | |||
7065 | Op.getOperand(2), Op.getOperand(3)); | |||
7066 | ||||
7067 | case Intrinsic::amdgcn_if_break: | |||
7068 | return SDValue(DAG.getMachineNode(AMDGPU::SI_IF_BREAK, DL, VT, | |||
7069 | Op->getOperand(1), Op->getOperand(2)), 0); | |||
7070 | ||||
7071 | case Intrinsic::amdgcn_groupstaticsize: { | |||
7072 | Triple::OSType OS = getTargetMachine().getTargetTriple().getOS(); | |||
7073 | if (OS == Triple::AMDHSA || OS == Triple::AMDPAL) | |||
7074 | return Op; | |||
7075 | ||||
7076 | const Module *M = MF.getFunction().getParent(); | |||
7077 | const GlobalValue *GV = | |||
7078 | M->getNamedValue(Intrinsic::getName(Intrinsic::amdgcn_groupstaticsize)); | |||
7079 | SDValue GA = DAG.getTargetGlobalAddress(GV, DL, MVT::i32, 0, | |||
7080 | SIInstrInfo::MO_ABS32_LO); | |||
7081 | return {DAG.getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32, GA), 0}; | |||
7082 | } | |||
7083 | case Intrinsic::amdgcn_is_shared: | |||
7084 | case Intrinsic::amdgcn_is_private: { | |||
7085 | SDLoc SL(Op); | |||
7086 | unsigned AS = (IntrinsicID == Intrinsic::amdgcn_is_shared) ? | |||
7087 | AMDGPUAS::LOCAL_ADDRESS : AMDGPUAS::PRIVATE_ADDRESS; | |||
7088 | SDValue Aperture = getSegmentAperture(AS, SL, DAG); | |||
7089 | SDValue SrcVec = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32, | |||
7090 | Op.getOperand(1)); | |||
7091 | ||||
7092 | SDValue SrcHi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, SrcVec, | |||
7093 | DAG.getConstant(1, SL, MVT::i32)); | |||
7094 | return DAG.getSetCC(SL, MVT::i1, SrcHi, Aperture, ISD::SETEQ); | |||
7095 | } | |||
7096 | case Intrinsic::amdgcn_perm: | |||
7097 | return DAG.getNode(AMDGPUISD::PERM, DL, MVT::i32, Op.getOperand(1), | |||
7098 | Op.getOperand(2), Op.getOperand(3)); | |||
7099 | case Intrinsic::amdgcn_reloc_constant: { | |||
7100 | Module *M = const_cast<Module *>(MF.getFunction().getParent()); | |||
7101 | const MDNode *Metadata = cast<MDNodeSDNode>(Op.getOperand(1))->getMD(); | |||
7102 | auto SymbolName = cast<MDString>(Metadata->getOperand(0))->getString(); | |||
7103 | auto RelocSymbol = cast<GlobalVariable>( | |||
7104 | M->getOrInsertGlobal(SymbolName, Type::getInt32Ty(M->getContext()))); | |||
7105 | SDValue GA = DAG.getTargetGlobalAddress(RelocSymbol, DL, MVT::i32, 0, | |||
7106 | SIInstrInfo::MO_ABS32_LO); | |||
7107 | return {DAG.getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32, GA), 0}; | |||
7108 | } | |||
7109 | default: | |||
7110 | if (const AMDGPU::ImageDimIntrinsicInfo *ImageDimIntr = | |||
7111 | AMDGPU::getImageDimIntrinsicInfo(IntrinsicID)) | |||
7112 | return lowerImage(Op, ImageDimIntr, DAG, false); | |||
7113 | ||||
7114 | return Op; | |||
7115 | } | |||
7116 | } | |||
7117 | ||||
7118 | /// Update \p MMO based on the offset inputs to an intrinsic. | |||
7119 | static void updateBufferMMO(MachineMemOperand *MMO, SDValue VOffset, | |||
7120 | SDValue SOffset, SDValue Offset, | |||
7121 | SDValue VIndex = SDValue()) { | |||
7122 | if (!isa<ConstantSDNode>(VOffset) || !isa<ConstantSDNode>(SOffset) || | |||
7123 | !isa<ConstantSDNode>(Offset)) { | |||
7124 | // The combined offset is not known to be constant, so we cannot represent | |||
7125 | // it in the MMO. Give up. | |||
7126 | MMO->setValue((Value *)nullptr); | |||
7127 | return; | |||
7128 | } | |||
7129 | ||||
7130 | if (VIndex && (!isa<ConstantSDNode>(VIndex) || | |||
7131 | !cast<ConstantSDNode>(VIndex)->isZero())) { | |||
7132 | // The strided index component of the address is not known to be zero, so we | |||
7133 | // cannot represent it in the MMO. Give up. | |||
7134 | MMO->setValue((Value *)nullptr); | |||
7135 | return; | |||
7136 | } | |||
7137 | ||||
7138 | MMO->setOffset(cast<ConstantSDNode>(VOffset)->getSExtValue() + | |||
7139 | cast<ConstantSDNode>(SOffset)->getSExtValue() + | |||
7140 | cast<ConstantSDNode>(Offset)->getSExtValue()); | |||
7141 | } | |||
7142 | ||||
7143 | SDValue SITargetLowering::lowerRawBufferAtomicIntrin(SDValue Op, | |||
7144 | SelectionDAG &DAG, | |||
7145 | unsigned NewOpcode) const { | |||
7146 | SDLoc DL(Op); | |||
7147 | ||||
7148 | SDValue VData = Op.getOperand(2); | |||
7149 | auto Offsets = splitBufferOffsets(Op.getOperand(4), DAG); | |||
7150 | SDValue Ops[] = { | |||
7151 | Op.getOperand(0), // Chain | |||
7152 | VData, // vdata | |||
7153 | Op.getOperand(3), // rsrc | |||
7154 | DAG.getConstant(0, DL, MVT::i32), // vindex | |||
7155 | Offsets.first, // voffset | |||
7156 | Op.getOperand(5), // soffset | |||
7157 | Offsets.second, // offset | |||
7158 | Op.getOperand(6), // cachepolicy | |||
7159 | DAG.getTargetConstant(0, DL, MVT::i1), // idxen | |||
7160 | }; | |||
7161 | ||||
7162 | auto *M = cast<MemSDNode>(Op); | |||
7163 | updateBufferMMO(M->getMemOperand(), Ops[4], Ops[5], Ops[6]); | |||
7164 | ||||
7165 | EVT MemVT = VData.getValueType(); | |||
7166 | return DAG.getMemIntrinsicNode(NewOpcode, DL, Op->getVTList(), Ops, MemVT, | |||
7167 | M->getMemOperand()); | |||
7168 | } | |||
7169 | ||||
7170 | // Return a value to use for the idxen operand by examining the vindex operand. | |||
7171 | static unsigned getIdxEn(SDValue VIndex) { | |||
7172 | if (auto VIndexC = dyn_cast<ConstantSDNode>(VIndex)) | |||
7173 | // No need to set idxen if vindex is known to be zero. | |||
7174 | return VIndexC->getZExtValue() != 0; | |||
7175 | return 1; | |||
7176 | } | |||
7177 | ||||
7178 | SDValue | |||
7179 | SITargetLowering::lowerStructBufferAtomicIntrin(SDValue Op, SelectionDAG &DAG, | |||
7180 | unsigned NewOpcode) const { | |||
7181 | SDLoc DL(Op); | |||
7182 | ||||
7183 | SDValue VData = Op.getOperand(2); | |||
7184 | auto Offsets = splitBufferOffsets(Op.getOperand(5), DAG); | |||
7185 | SDValue Ops[] = { | |||
7186 | Op.getOperand(0), // Chain | |||
7187 | VData, // vdata | |||
7188 | Op.getOperand(3), // rsrc | |||
7189 | Op.getOperand(4), // vindex | |||
7190 | Offsets.first, // voffset | |||
7191 | Op.getOperand(6), // soffset | |||
7192 | Offsets.second, // offset | |||
7193 | Op.getOperand(7), // cachepolicy | |||
7194 | DAG.getTargetConstant(1, DL, MVT::i1), // idxen | |||
7195 | }; | |||
7196 | ||||
7197 | auto *M = cast<MemSDNode>(Op); | |||
7198 | updateBufferMMO(M->getMemOperand(), Ops[4], Ops[5], Ops[6], Ops[3]); | |||
7199 | ||||
7200 | EVT MemVT = VData.getValueType(); | |||
7201 | return DAG.getMemIntrinsicNode(NewOpcode, DL, Op->getVTList(), Ops, MemVT, | |||
7202 | M->getMemOperand()); | |||
7203 | } | |||
7204 | ||||
7205 | SDValue SITargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op, | |||
7206 | SelectionDAG &DAG) const { | |||
7207 | unsigned IntrID = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); | |||
7208 | SDLoc DL(Op); | |||
7209 | ||||
7210 | switch (IntrID) { | |||
7211 | case Intrinsic::amdgcn_ds_ordered_add: | |||
7212 | case Intrinsic::amdgcn_ds_ordered_swap: { | |||
7213 | MemSDNode *M = cast<MemSDNode>(Op); | |||
7214 | SDValue Chain = M->getOperand(0); | |||
7215 | SDValue M0 = M->getOperand(2); | |||
7216 | SDValue Value = M->getOperand(3); | |||
7217 | unsigned IndexOperand = M->getConstantOperandVal(7); | |||
7218 | unsigned WaveRelease = M->getConstantOperandVal(8); | |||
7219 | unsigned WaveDone = M->getConstantOperandVal(9); | |||
7220 | ||||
7221 | unsigned OrderedCountIndex = IndexOperand & 0x3f; | |||
7222 | IndexOperand &= ~0x3f; | |||
7223 | unsigned CountDw = 0; | |||
7224 | ||||
7225 | if (Subtarget->getGeneration() >= AMDGPUSubtarget::GFX10) { | |||
7226 | CountDw = (IndexOperand >> 24) & 0xf; | |||
7227 | IndexOperand &= ~(0xf << 24); | |||
7228 | ||||
7229 | if (CountDw < 1 || CountDw > 4) { | |||
7230 | report_fatal_error( | |||
7231 | "ds_ordered_count: dword count must be between 1 and 4"); | |||
7232 | } | |||
7233 | } | |||
7234 | ||||
7235 | if (IndexOperand) | |||
7236 | report_fatal_error("ds_ordered_count: bad index operand"); | |||
7237 | ||||
7238 | if (WaveDone && !WaveRelease) | |||
7239 | report_fatal_error("ds_ordered_count: wave_done requires wave_release"); | |||
7240 | ||||
7241 | unsigned Instruction = IntrID == Intrinsic::amdgcn_ds_ordered_add ? 0 : 1; | |||
7242 | unsigned ShaderType = | |||
7243 | SIInstrInfo::getDSShaderTypeValue(DAG.getMachineFunction()); | |||
7244 | unsigned Offset0 = OrderedCountIndex << 2; | |||
7245 | unsigned Offset1 = WaveRelease | (WaveDone << 1) | (ShaderType << 2) | | |||
7246 | (Instruction << 4); | |||
7247 | ||||
7248 | if (Subtarget->getGeneration() >= AMDGPUSubtarget::GFX10) | |||
7249 | Offset1 |= (CountDw - 1) << 6; | |||
7250 | ||||
7251 | unsigned Offset = Offset0 | (Offset1 << 8); | |||
7252 | ||||
7253 | SDValue Ops[] = { | |||
7254 | Chain, | |||
7255 | Value, | |||
7256 | DAG.getTargetConstant(Offset, DL, MVT::i16), | |||
7257 | copyToM0(DAG, Chain, DL, M0).getValue(1), // Glue | |||
7258 | }; | |||
7259 | return DAG.getMemIntrinsicNode(AMDGPUISD::DS_ORDERED_COUNT, DL, | |||
7260 | M->getVTList(), Ops, M->getMemoryVT(), | |||
7261 | M->getMemOperand()); | |||
7262 | } | |||
7263 | case Intrinsic::amdgcn_ds_fadd: { | |||
7264 | MemSDNode *M = cast<MemSDNode>(Op); | |||
7265 | unsigned Opc; | |||
7266 | switch (IntrID) { | |||
7267 | case Intrinsic::amdgcn_ds_fadd: | |||
7268 | Opc = ISD::ATOMIC_LOAD_FADD; | |||
7269 | break; | |||
7270 | } | |||
7271 | ||||
7272 | return DAG.getAtomic(Opc, SDLoc(Op), M->getMemoryVT(), | |||
7273 | M->getOperand(0), M->getOperand(2), M->getOperand(3), | |||
7274 | M->getMemOperand()); | |||
7275 | } | |||
7276 | case Intrinsic::amdgcn_atomic_inc: | |||
7277 | case Intrinsic::amdgcn_atomic_dec: | |||
7278 | case Intrinsic::amdgcn_ds_fmin: | |||
7279 | case Intrinsic::amdgcn_ds_fmax: { | |||
7280 | MemSDNode *M = cast<MemSDNode>(Op); | |||
7281 | unsigned Opc; | |||
7282 | switch (IntrID) { | |||
7283 | case Intrinsic::amdgcn_atomic_inc: | |||
7284 | Opc = AMDGPUISD::ATOMIC_INC; | |||
7285 | break; | |||
7286 | case Intrinsic::amdgcn_atomic_dec: | |||
7287 | Opc = AMDGPUISD::ATOMIC_DEC; | |||
7288 | break; | |||
7289 | case Intrinsic::amdgcn_ds_fmin: | |||
7290 | Opc = AMDGPUISD::ATOMIC_LOAD_FMIN; | |||
7291 | break; | |||
7292 | case Intrinsic::amdgcn_ds_fmax: | |||
7293 | Opc = AMDGPUISD::ATOMIC_LOAD_FMAX; | |||
7294 | break; | |||
7295 | default: | |||
7296 | llvm_unreachable("Unknown intrinsic!")::llvm::llvm_unreachable_internal("Unknown intrinsic!", "llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 7296); | |||
7297 | } | |||
7298 | SDValue Ops[] = { | |||
7299 | M->getOperand(0), // Chain | |||
7300 | M->getOperand(2), // Ptr | |||
7301 | M->getOperand(3) // Value | |||
7302 | }; | |||
7303 | ||||
7304 | return DAG.getMemIntrinsicNode(Opc, SDLoc(Op), M->getVTList(), Ops, | |||
7305 | M->getMemoryVT(), M->getMemOperand()); | |||
7306 | } | |||
7307 | case Intrinsic::amdgcn_buffer_load: | |||
7308 | case Intrinsic::amdgcn_buffer_load_format: { | |||
7309 | unsigned Glc = cast<ConstantSDNode>(Op.getOperand(5))->getZExtValue(); | |||
7310 | unsigned Slc = cast<ConstantSDNode>(Op.getOperand(6))->getZExtValue(); | |||
7311 | unsigned IdxEn = getIdxEn(Op.getOperand(3)); | |||
7312 | SDValue Ops[] = { | |||
7313 | Op.getOperand(0), // Chain | |||
7314 | Op.getOperand(2), // rsrc | |||
7315 | Op.getOperand(3), // vindex | |||
7316 | SDValue(), // voffset -- will be set by setBufferOffsets | |||
7317 | SDValue(), // soffset -- will be set by setBufferOffsets | |||
7318 | SDValue(), // offset -- will be set by setBufferOffsets | |||
7319 | DAG.getTargetConstant(Glc | (Slc << 1), DL, MVT::i32), // cachepolicy | |||
7320 | DAG.getTargetConstant(IdxEn, DL, MVT::i1), // idxen | |||
7321 | }; | |||
7322 | setBufferOffsets(Op.getOperand(4), DAG, &Ops[3]); | |||
7323 | ||||
7324 | unsigned Opc = (IntrID == Intrinsic::amdgcn_buffer_load) ? | |||
7325 | AMDGPUISD::BUFFER_LOAD : AMDGPUISD::BUFFER_LOAD_FORMAT; | |||
7326 | ||||
7327 | EVT VT = Op.getValueType(); | |||
7328 | EVT IntVT = VT.changeTypeToInteger(); | |||
7329 | auto *M = cast<MemSDNode>(Op); | |||
7330 | updateBufferMMO(M->getMemOperand(), Ops[3], Ops[4], Ops[5], Ops[2]); | |||
7331 | EVT LoadVT = Op.getValueType(); | |||
7332 | ||||
7333 | if (LoadVT.getScalarType() == MVT::f16) | |||
7334 | return adjustLoadValueType(AMDGPUISD::BUFFER_LOAD_FORMAT_D16, | |||
7335 | M, DAG, Ops); | |||
7336 | ||||
7337 | // Handle BUFFER_LOAD_BYTE/UBYTE/SHORT/USHORT overloaded intrinsics | |||
7338 | if (LoadVT.getScalarType() == MVT::i8 || | |||
7339 | LoadVT.getScalarType() == MVT::i16) | |||
7340 | return handleByteShortBufferLoads(DAG, LoadVT, DL, Ops, M); | |||
7341 | ||||
7342 | return getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops, IntVT, | |||
7343 | M->getMemOperand(), DAG); | |||
7344 | } | |||
7345 | case Intrinsic::amdgcn_raw_buffer_load: | |||
7346 | case Intrinsic::amdgcn_raw_buffer_load_format: { | |||
7347 | const bool IsFormat = IntrID == Intrinsic::amdgcn_raw_buffer_load_format; | |||
7348 | ||||
7349 | auto Offsets = splitBufferOffsets(Op.getOperand(3), DAG); | |||
7350 | SDValue Ops[] = { | |||
7351 | Op.getOperand(0), // Chain | |||
7352 | Op.getOperand(2), // rsrc | |||
7353 | DAG.getConstant(0, DL, MVT::i32), // vindex | |||
7354 | Offsets.first, // voffset | |||
7355 | Op.getOperand(4), // soffset | |||
7356 | Offsets.second, // offset | |||
7357 | Op.getOperand(5), // cachepolicy, swizzled buffer | |||
7358 | DAG.getTargetConstant(0, DL, MVT::i1), // idxen | |||
7359 | }; | |||
7360 | ||||
7361 | auto *M = cast<MemSDNode>(Op); | |||
7362 | updateBufferMMO(M->getMemOperand(), Ops[3], Ops[4], Ops[5]); | |||
7363 | return lowerIntrinsicLoad(M, IsFormat, DAG, Ops); | |||
7364 | } | |||
7365 | case Intrinsic::amdgcn_struct_buffer_load: | |||
7366 | case Intrinsic::amdgcn_struct_buffer_load_format: { | |||
7367 | const bool IsFormat = IntrID == Intrinsic::amdgcn_struct_buffer_load_format; | |||
7368 | ||||
7369 | auto Offsets = splitBufferOffsets(Op.getOperand(4), DAG); | |||
7370 | SDValue Ops[] = { | |||
7371 | Op.getOperand(0), // Chain | |||
7372 | Op.getOperand(2), // rsrc | |||
7373 | Op.getOperand(3), // vindex | |||
7374 | Offsets.first, // voffset | |||
7375 | Op.getOperand(5), // soffset | |||
7376 | Offsets.second, // offset | |||
7377 | Op.getOperand(6), // cachepolicy, swizzled buffer | |||
7378 | DAG.getTargetConstant(1, DL, MVT::i1), // idxen | |||
7379 | }; | |||
7380 | ||||
7381 | auto *M = cast<MemSDNode>(Op); | |||
7382 | updateBufferMMO(M->getMemOperand(), Ops[3], Ops[4], Ops[5], Ops[2]); | |||
7383 | return lowerIntrinsicLoad(cast<MemSDNode>(Op), IsFormat, DAG, Ops); | |||
7384 | } | |||
7385 | case Intrinsic::amdgcn_tbuffer_load: { | |||
7386 | MemSDNode *M = cast<MemSDNode>(Op); | |||
7387 | EVT LoadVT = Op.getValueType(); | |||
7388 | ||||
7389 | unsigned Dfmt = cast<ConstantSDNode>(Op.getOperand(7))->getZExtValue(); | |||
7390 | unsigned Nfmt = cast<ConstantSDNode>(Op.getOperand(8))->getZExtValue(); | |||
7391 | unsigned Glc = cast<ConstantSDNode>(Op.getOperand(9))->getZExtValue(); | |||
7392 | unsigned Slc = cast<ConstantSDNode>(Op.getOperand(10))->getZExtValue(); | |||
7393 | unsigned IdxEn = getIdxEn(Op.getOperand(3)); | |||
7394 | SDValue Ops[] = { | |||
7395 | Op.getOperand(0), // Chain | |||
7396 | Op.getOperand(2), // rsrc | |||
7397 | Op.getOperand(3), // vindex | |||
7398 | Op.getOperand(4), // voffset | |||
7399 | Op.getOperand(5), // soffset | |||
7400 | Op.getOperand(6), // offset | |||
7401 | DAG.getTargetConstant(Dfmt | (Nfmt << 4), DL, MVT::i32), // format | |||
7402 | DAG.getTargetConstant(Glc | (Slc << 1), DL, MVT::i32), // cachepolicy | |||
7403 | DAG.getTargetConstant(IdxEn, DL, MVT::i1) // idxen | |||
7404 | }; | |||
7405 | ||||
7406 | if (LoadVT.getScalarType() == MVT::f16) | |||
7407 | return adjustLoadValueType(AMDGPUISD::TBUFFER_LOAD_FORMAT_D16, | |||
7408 | M, DAG, Ops); | |||
7409 | return getMemIntrinsicNode(AMDGPUISD::TBUFFER_LOAD_FORMAT, DL, | |||
7410 | Op->getVTList(), Ops, LoadVT, M->getMemOperand(), | |||
7411 | DAG); | |||
7412 | } | |||
7413 | case Intrinsic::amdgcn_raw_tbuffer_load: { | |||
7414 | MemSDNode *M = cast<MemSDNode>(Op); | |||
7415 | EVT LoadVT = Op.getValueType(); | |||
7416 | auto Offsets = splitBufferOffsets(Op.getOperand(3), DAG); | |||
7417 | ||||
7418 | SDValue Ops[] = { | |||
7419 | Op.getOperand(0), // Chain | |||
7420 | Op.getOperand(2), // rsrc | |||
7421 | DAG.getConstant(0, DL, MVT::i32), // vindex | |||
7422 | Offsets.first, // voffset | |||
7423 | Op.getOperand(4), // soffset | |||
7424 | Offsets.second, // offset | |||
7425 | Op.getOperand(5), // format | |||
7426 | Op.getOperand(6), // cachepolicy, swizzled buffer | |||
7427 | DAG.getTargetConstant(0, DL, MVT::i1), // idxen | |||
7428 | }; | |||
7429 | ||||
7430 | if (LoadVT.getScalarType() == MVT::f16) | |||
7431 | return adjustLoadValueType(AMDGPUISD::TBUFFER_LOAD_FORMAT_D16, | |||
7432 | M, DAG, Ops); | |||
7433 | return getMemIntrinsicNode(AMDGPUISD::TBUFFER_LOAD_FORMAT, DL, | |||
7434 | Op->getVTList(), Ops, LoadVT, M->getMemOperand(), | |||
7435 | DAG); | |||
7436 | } | |||
7437 | case Intrinsic::amdgcn_struct_tbuffer_load: { | |||
7438 | MemSDNode *M = cast<MemSDNode>(Op); | |||
7439 | EVT LoadVT = Op.getValueType(); | |||
7440 | auto Offsets = splitBufferOffsets(Op.getOperand(4), DAG); | |||
7441 | ||||
7442 | SDValue Ops[] = { | |||
7443 | Op.getOperand(0), // Chain | |||
7444 | Op.getOperand(2), // rsrc | |||
7445 | Op.getOperand(3), // vindex | |||
7446 | Offsets.first, // voffset | |||
7447 | Op.getOperand(5), // soffset | |||
7448 | Offsets.second, // offset | |||
7449 | Op.getOperand(6), // format | |||
7450 | Op.getOperand(7), // cachepolicy, swizzled buffer | |||
7451 | DAG.getTargetConstant(1, DL, MVT::i1), // idxen | |||
7452 | }; | |||
7453 | ||||
7454 | if (LoadVT.getScalarType() == MVT::f16) | |||
7455 | return adjustLoadValueType(AMDGPUISD::TBUFFER_LOAD_FORMAT_D16, | |||
7456 | M, DAG, Ops); | |||
7457 | return getMemIntrinsicNode(AMDGPUISD::TBUFFER_LOAD_FORMAT, DL, | |||
7458 | Op->getVTList(), Ops, LoadVT, M->getMemOperand(), | |||
7459 | DAG); | |||
7460 | } | |||
7461 | case Intrinsic::amdgcn_buffer_atomic_swap: | |||
7462 | case Intrinsic::amdgcn_buffer_atomic_add: | |||
7463 | case Intrinsic::amdgcn_buffer_atomic_sub: | |||
7464 | case Intrinsic::amdgcn_buffer_atomic_csub: | |||
7465 | case Intrinsic::amdgcn_buffer_atomic_smin: | |||
7466 | case Intrinsic::amdgcn_buffer_atomic_umin: | |||
7467 | case Intrinsic::amdgcn_buffer_atomic_smax: | |||
7468 | case Intrinsic::amdgcn_buffer_atomic_umax: | |||
7469 | case Intrinsic::amdgcn_buffer_atomic_and: | |||
7470 | case Intrinsic::amdgcn_buffer_atomic_or: | |||
7471 | case Intrinsic::amdgcn_buffer_atomic_xor: | |||
7472 | case Intrinsic::amdgcn_buffer_atomic_fadd: { | |||
7473 | unsigned Slc = cast<ConstantSDNode>(Op.getOperand(6))->getZExtValue(); | |||
7474 | unsigned IdxEn = getIdxEn(Op.getOperand(4)); | |||
7475 | SDValue Ops[] = { | |||
7476 | Op.getOperand(0), // Chain | |||
7477 | Op.getOperand(2), // vdata | |||
7478 | Op.getOperand(3), // rsrc | |||
7479 | Op.getOperand(4), // vindex | |||
7480 | SDValue(), // voffset -- will be set by setBufferOffsets | |||
7481 | SDValue(), // soffset -- will be set by setBufferOffsets | |||
7482 | SDValue(), // offset -- will be set by setBufferOffsets | |||
7483 | DAG.getTargetConstant(Slc << 1, DL, MVT::i32), // cachepolicy | |||
7484 | DAG.getTargetConstant(IdxEn, DL, MVT::i1), // idxen | |||
7485 | }; | |||
7486 | setBufferOffsets(Op.getOperand(5), DAG, &Ops[4]); | |||
7487 | ||||
7488 | EVT VT = Op.getValueType(); | |||
7489 | ||||
7490 | auto *M = cast<MemSDNode>(Op); | |||
7491 | updateBufferMMO(M->getMemOperand(), Ops[4], Ops[5], Ops[6], Ops[3]); | |||
7492 | unsigned Opcode = 0; | |||
7493 | ||||
7494 | switch (IntrID) { | |||
7495 | case Intrinsic::amdgcn_buffer_atomic_swap: | |||
7496 | Opcode = AMDGPUISD::BUFFER_ATOMIC_SWAP; | |||
7497 | break; | |||
7498 | case Intrinsic::amdgcn_buffer_atomic_add: | |||
7499 | Opcode = AMDGPUISD::BUFFER_ATOMIC_ADD; | |||
7500 | break; | |||
7501 | case Intrinsic::amdgcn_buffer_atomic_sub: | |||
7502 | Opcode = AMDGPUISD::BUFFER_ATOMIC_SUB; | |||
7503 | break; | |||
7504 | case Intrinsic::amdgcn_buffer_atomic_csub: | |||
7505 | Opcode = AMDGPUISD::BUFFER_ATOMIC_CSUB; | |||
7506 | break; | |||
7507 | case Intrinsic::amdgcn_buffer_atomic_smin: | |||
7508 | Opcode = AMDGPUISD::BUFFER_ATOMIC_SMIN; | |||
7509 | break; | |||
7510 | case Intrinsic::amdgcn_buffer_atomic_umin: | |||
7511 | Opcode = AMDGPUISD::BUFFER_ATOMIC_UMIN; | |||
7512 | break; | |||
7513 | case Intrinsic::amdgcn_buffer_atomic_smax: | |||
7514 | Opcode = AMDGPUISD::BUFFER_ATOMIC_SMAX; | |||
7515 | break; | |||
7516 | case Intrinsic::amdgcn_buffer_atomic_umax: | |||
7517 | Opcode = AMDGPUISD::BUFFER_ATOMIC_UMAX; | |||
7518 | break; | |||
7519 | case Intrinsic::amdgcn_buffer_atomic_and: | |||
7520 | Opcode = AMDGPUISD::BUFFER_ATOMIC_AND; | |||
7521 | break; | |||
7522 | case Intrinsic::amdgcn_buffer_atomic_or: | |||
7523 | Opcode = AMDGPUISD::BUFFER_ATOMIC_OR; | |||
7524 | break; | |||
7525 | case Intrinsic::amdgcn_buffer_atomic_xor: | |||
7526 | Opcode = AMDGPUISD::BUFFER_ATOMIC_XOR; | |||
7527 | break; | |||
7528 | case Intrinsic::amdgcn_buffer_atomic_fadd: | |||
7529 | if (!Op.getValue(0).use_empty() && !Subtarget->hasGFX90AInsts()) { | |||
7530 | DiagnosticInfoUnsupported | |||
7531 | NoFpRet(DAG.getMachineFunction().getFunction(), | |||
7532 | "return versions of fp atomics not supported", | |||
7533 | DL.getDebugLoc(), DS_Error); | |||
7534 | DAG.getContext()->diagnose(NoFpRet); | |||
7535 | return SDValue(); | |||
7536 | } | |||
7537 | Opcode = AMDGPUISD::BUFFER_ATOMIC_FADD; | |||
7538 | break; | |||
7539 | default: | |||
7540 | llvm_unreachable("unhandled atomic opcode")::llvm::llvm_unreachable_internal("unhandled atomic opcode", "llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 7540); | |||
7541 | } | |||
7542 | ||||
7543 | return DAG.getMemIntrinsicNode(Opcode, DL, Op->getVTList(), Ops, VT, | |||
7544 | M->getMemOperand()); | |||
7545 | } | |||
7546 | case Intrinsic::amdgcn_raw_buffer_atomic_fadd: | |||
7547 | return lowerRawBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_FADD); | |||
7548 | case Intrinsic::amdgcn_struct_buffer_atomic_fadd: | |||
7549 | return lowerStructBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_FADD); | |||
7550 | case Intrinsic::amdgcn_raw_buffer_atomic_fmin: | |||
7551 | return lowerRawBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_FMIN); | |||
7552 | case Intrinsic::amdgcn_struct_buffer_atomic_fmin: | |||
7553 | return lowerStructBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_FMIN); | |||
7554 | case Intrinsic::amdgcn_raw_buffer_atomic_fmax: | |||
7555 | return lowerRawBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_FMAX); | |||
7556 | case Intrinsic::amdgcn_struct_buffer_atomic_fmax: | |||
7557 | return lowerStructBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_FMAX); | |||
7558 | case Intrinsic::amdgcn_raw_buffer_atomic_swap: | |||
7559 | return lowerRawBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_SWAP); | |||
7560 | case Intrinsic::amdgcn_raw_buffer_atomic_add: | |||
7561 | return lowerRawBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_ADD); | |||
7562 | case Intrinsic::amdgcn_raw_buffer_atomic_sub: | |||
7563 | return lowerRawBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_SUB); | |||
7564 | case Intrinsic::amdgcn_raw_buffer_atomic_smin: | |||
7565 | return lowerRawBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_SMIN); | |||
7566 | case Intrinsic::amdgcn_raw_buffer_atomic_umin: | |||
7567 | return lowerRawBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_UMIN); | |||
7568 | case Intrinsic::amdgcn_raw_buffer_atomic_smax: | |||
7569 | return lowerRawBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_SMAX); | |||
7570 | case Intrinsic::amdgcn_raw_buffer_atomic_umax: | |||
7571 | return lowerRawBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_UMAX); | |||
7572 | case Intrinsic::amdgcn_raw_buffer_atomic_and: | |||
7573 | return lowerRawBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_AND); | |||
7574 | case Intrinsic::amdgcn_raw_buffer_atomic_or: | |||
7575 | return lowerRawBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_OR); | |||
7576 | case Intrinsic::amdgcn_raw_buffer_atomic_xor: | |||
7577 | return lowerRawBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_XOR); | |||
7578 | case Intrinsic::amdgcn_raw_buffer_atomic_inc: | |||
7579 | return lowerRawBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_INC); | |||
7580 | case Intrinsic::amdgcn_raw_buffer_atomic_dec: | |||
7581 | return lowerRawBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_DEC); | |||
7582 | case Intrinsic::amdgcn_struct_buffer_atomic_swap: | |||
7583 | return lowerStructBufferAtomicIntrin(Op, DAG, | |||
7584 | AMDGPUISD::BUFFER_ATOMIC_SWAP); | |||
7585 | case Intrinsic::amdgcn_struct_buffer_atomic_add: | |||
7586 | return lowerStructBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_ADD); | |||
7587 | case Intrinsic::amdgcn_struct_buffer_atomic_sub: | |||
7588 | return lowerStructBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_SUB); | |||
7589 | case Intrinsic::amdgcn_struct_buffer_atomic_smin: | |||
7590 | return lowerStructBufferAtomicIntrin(Op, DAG, | |||
7591 | AMDGPUISD::BUFFER_ATOMIC_SMIN); | |||
7592 | case Intrinsic::amdgcn_struct_buffer_atomic_umin: | |||
7593 | return lowerStructBufferAtomicIntrin(Op, DAG, | |||
7594 | AMDGPUISD::BUFFER_ATOMIC_UMIN); | |||
7595 | case Intrinsic::amdgcn_struct_buffer_atomic_smax: | |||
7596 | return lowerStructBufferAtomicIntrin(Op, DAG, | |||
7597 | AMDGPUISD::BUFFER_ATOMIC_SMAX); | |||
7598 | case Intrinsic::amdgcn_struct_buffer_atomic_umax: | |||
7599 | return lowerStructBufferAtomicIntrin(Op, DAG, | |||
7600 | AMDGPUISD::BUFFER_ATOMIC_UMAX); | |||
7601 | case Intrinsic::amdgcn_struct_buffer_atomic_and: | |||
7602 | return lowerStructBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_AND); | |||
7603 | case Intrinsic::amdgcn_struct_buffer_atomic_or: | |||
7604 | return lowerStructBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_OR); | |||
7605 | case Intrinsic::amdgcn_struct_buffer_atomic_xor: | |||
7606 | return lowerStructBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_XOR); | |||
7607 | case Intrinsic::amdgcn_struct_buffer_atomic_inc: | |||
7608 | return lowerStructBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_INC); | |||
7609 | case Intrinsic::amdgcn_struct_buffer_atomic_dec: | |||
7610 | return lowerStructBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_DEC); | |||
7611 | ||||
7612 | case Intrinsic::amdgcn_buffer_atomic_cmpswap: { | |||
7613 | unsigned Slc = cast<ConstantSDNode>(Op.getOperand(7))->getZExtValue(); | |||
7614 | unsigned IdxEn = getIdxEn(Op.getOperand(5)); | |||
7615 | SDValue Ops[] = { | |||
7616 | Op.getOperand(0), // Chain | |||
7617 | Op.getOperand(2), // src | |||
7618 | Op.getOperand(3), // cmp | |||
7619 | Op.getOperand(4), // rsrc | |||
7620 | Op.getOperand(5), // vindex | |||
7621 | SDValue(), // voffset -- will be set by setBufferOffsets | |||
7622 | SDValue(), // soffset -- will be set by setBufferOffsets | |||
7623 | SDValue(), // offset -- will be set by setBufferOffsets | |||
7624 | DAG.getTargetConstant(Slc << 1, DL, MVT::i32), // cachepolicy | |||
7625 | DAG.getTargetConstant(IdxEn, DL, MVT::i1), // idxen | |||
7626 | }; | |||
7627 | setBufferOffsets(Op.getOperand(6), DAG, &Ops[5]); | |||
7628 | ||||
7629 | EVT VT = Op.getValueType(); | |||
7630 | auto *M = cast<MemSDNode>(Op); | |||
7631 | updateBufferMMO(M->getMemOperand(), Ops[5], Ops[6], Ops[7], Ops[4]); | |||
7632 | ||||
7633 | return DAG.getMemIntrinsicNode(AMDGPUISD::BUFFER_ATOMIC_CMPSWAP, DL, | |||
7634 | Op->getVTList(), Ops, VT, M->getMemOperand()); | |||
7635 | } | |||
7636 | case Intrinsic::amdgcn_raw_buffer_atomic_cmpswap: { | |||
7637 | auto Offsets = splitBufferOffsets(Op.getOperand(5), DAG); | |||
7638 | SDValue Ops[] = { | |||
7639 | Op.getOperand(0), // Chain | |||
7640 | Op.getOperand(2), // src | |||
7641 | Op.getOperand(3), // cmp | |||
7642 | Op.getOperand(4), // rsrc | |||
7643 | DAG.getConstant(0, DL, MVT::i32), // vindex | |||
7644 | Offsets.first, // voffset | |||
7645 | Op.getOperand(6), // soffset | |||
7646 | Offsets.second, // offset | |||
7647 | Op.getOperand(7), // cachepolicy | |||
7648 | DAG.getTargetConstant(0, DL, MVT::i1), // idxen | |||
7649 | }; | |||
7650 | EVT VT = Op.getValueType(); | |||
7651 | auto *M = cast<MemSDNode>(Op); | |||
7652 | updateBufferMMO(M->getMemOperand(), Ops[5], Ops[6], Ops[7]); | |||
7653 | ||||
7654 | return DAG.getMemIntrinsicNode(AMDGPUISD::BUFFER_ATOMIC_CMPSWAP, DL, | |||
7655 | Op->getVTList(), Ops, VT, M->getMemOperand()); | |||
7656 | } | |||
7657 | case Intrinsic::amdgcn_struct_buffer_atomic_cmpswap: { | |||
7658 | auto Offsets = splitBufferOffsets(Op.getOperand(6), DAG); | |||
7659 | SDValue Ops[] = { | |||
7660 | Op.getOperand(0), // Chain | |||
7661 | Op.getOperand(2), // src | |||
7662 | Op.getOperand(3), // cmp | |||
7663 | Op.getOperand(4), // rsrc | |||
7664 | Op.getOperand(5), // vindex | |||
7665 | Offsets.first, // voffset | |||
7666 | Op.getOperand(7), // soffset | |||
7667 | Offsets.second, // offset | |||
7668 | Op.getOperand(8), // cachepolicy | |||
7669 | DAG.getTargetConstant(1, DL, MVT::i1), // idxen | |||
7670 | }; | |||
7671 | EVT VT = Op.getValueType(); | |||
7672 | auto *M = cast<MemSDNode>(Op); | |||
7673 | updateBufferMMO(M->getMemOperand(), Ops[5], Ops[6], Ops[7], Ops[4]); | |||
7674 | ||||
7675 | return DAG.getMemIntrinsicNode(AMDGPUISD::BUFFER_ATOMIC_CMPSWAP, DL, | |||
7676 | Op->getVTList(), Ops, VT, M->getMemOperand()); | |||
7677 | } | |||
7678 | case Intrinsic::amdgcn_image_bvh_intersect_ray: { | |||
7679 | MemSDNode *M = cast<MemSDNode>(Op); | |||
7680 | SDValue NodePtr = M->getOperand(2); | |||
7681 | SDValue RayExtent = M->getOperand(3); | |||
7682 | SDValue RayOrigin = M->getOperand(4); | |||
7683 | SDValue RayDir = M->getOperand(5); | |||
7684 | SDValue RayInvDir = M->getOperand(6); | |||
7685 | SDValue TDescr = M->getOperand(7); | |||
7686 | ||||
7687 | assert(NodePtr.getValueType() == MVT::i32 ||(static_cast <bool> (NodePtr.getValueType() == MVT::i32 || NodePtr.getValueType() == MVT::i64) ? void (0) : __assert_fail ("NodePtr.getValueType() == MVT::i32 || NodePtr.getValueType() == MVT::i64" , "llvm/lib/Target/AMDGPU/SIISelLowering.cpp", 7688, __extension__ __PRETTY_FUNCTION__)) | |||
7688 | NodePtr.getValueType() == MVT::i64)(static_cast <bool> (NodePtr.getValueType() == MVT::i32 || NodePtr.getValueType() == MVT::i64) ? void (0) : __assert_fail ("NodePtr.getValueType() == MVT::i32 || NodePtr.getValueType() == MVT::i64" , "llvm/lib/Target/AMDGPU/SIISelLowering.cpp", 7688, __extension__ __PRETTY_FUNCTION__)); | |||
7689 | assert(RayDir.getValueType() == MVT::v3f16 ||(static_cast <bool> (RayDir.getValueType() == MVT::v3f16 || RayDir.getValueType() == MVT::v3f32) ? void (0) : __assert_fail ("RayDir.getValueType() == MVT::v3f16 || RayDir.getValueType() == MVT::v3f32" , "llvm/lib/Target/AMDGPU/SIISelLowering.cpp", 7690, __extension__ __PRETTY_FUNCTION__)) | |||
7690 | RayDir.getValueType() == MVT::v3f32)(static_cast <bool> (RayDir.getValueType() == MVT::v3f16 || RayDir.getValueType() == MVT::v3f32) ? void (0) : __assert_fail ("RayDir.getValueType() == MVT::v3f16 || RayDir.getValueType() == MVT::v3f32" , "llvm/lib/Target/AMDGPU/SIISelLowering.cpp", 7690, __extension__ __PRETTY_FUNCTION__)); | |||
7691 | ||||
7692 | if (!Subtarget->hasGFX10_AEncoding()) { | |||
7693 | emitRemovedIntrinsicError(DAG, DL, Op.getValueType()); | |||
7694 | return SDValue(); | |||
7695 | } | |||
7696 | ||||
7697 | const bool IsA16 = RayDir.getValueType().getVectorElementType() == MVT::f16; | |||
7698 | const bool Is64 = NodePtr.getValueType() == MVT::i64; | |||
7699 | const unsigned NumVDataDwords = 4; | |||
7700 | const unsigned NumVAddrDwords = IsA16 ? (Is64 ? 9 : 8) : (Is64 ? 12 : 11); | |||
7701 | const bool UseNSA = Subtarget->hasNSAEncoding() && | |||
7702 | NumVAddrDwords <= Subtarget->getNSAMaxSize(); | |||
7703 | const unsigned BaseOpcodes[2][2] = { | |||
7704 | {AMDGPU::IMAGE_BVH_INTERSECT_RAY, AMDGPU::IMAGE_BVH_INTERSECT_RAY_a16}, | |||
7705 | {AMDGPU::IMAGE_BVH64_INTERSECT_RAY, | |||
7706 | AMDGPU::IMAGE_BVH64_INTERSECT_RAY_a16}}; | |||
7707 | int Opcode; | |||
7708 | if (UseNSA) { | |||
7709 | Opcode = AMDGPU::getMIMGOpcode(BaseOpcodes[Is64][IsA16], | |||
7710 | AMDGPU::MIMGEncGfx10NSA, NumVDataDwords, | |||
7711 | NumVAddrDwords); | |||
7712 | } else { | |||
7713 | Opcode = AMDGPU::getMIMGOpcode( | |||
7714 | BaseOpcodes[Is64][IsA16], AMDGPU::MIMGEncGfx10Default, NumVDataDwords, | |||
7715 | PowerOf2Ceil(NumVAddrDwords)); | |||
7716 | } | |||
7717 | assert(Opcode != -1)(static_cast <bool> (Opcode != -1) ? void (0) : __assert_fail ("Opcode != -1", "llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 7717, __extension__ __PRETTY_FUNCTION__)); | |||
7718 | ||||
7719 | SmallVector<SDValue, 16> Ops; | |||
7720 | ||||
7721 | auto packLanes = [&DAG, &Ops, &DL] (SDValue Op, bool IsAligned) { | |||
7722 | SmallVector<SDValue, 3> Lanes; | |||
7723 | DAG.ExtractVectorElements(Op, Lanes, 0, 3); | |||
7724 | if (Lanes[0].getValueSizeInBits() == 32) { | |||
7725 | for (unsigned I = 0; I < 3; ++I) | |||
7726 | Ops.push_back(DAG.getBitcast(MVT::i32, Lanes[I])); | |||
7727 | } else { | |||
7728 | if (IsAligned) { | |||
7729 | Ops.push_back( | |||
7730 | DAG.getBitcast(MVT::i32, | |||
7731 | DAG.getBuildVector(MVT::v2f16, DL, | |||
7732 | { Lanes[0], Lanes[1] }))); | |||
7733 | Ops.push_back(Lanes[2]); | |||
7734 | } else { | |||
7735 | SDValue Elt0 = Ops.pop_back_val(); | |||
7736 | Ops.push_back( | |||
7737 | DAG.getBitcast(MVT::i32, | |||
7738 | DAG.getBuildVector(MVT::v2f16, DL, | |||
7739 | { Elt0, Lanes[0] }))); | |||
7740 | Ops.push_back( | |||
7741 | DAG.getBitcast(MVT::i32, | |||
7742 | DAG.getBuildVector(MVT::v2f16, DL, | |||
7743 | { Lanes[1], Lanes[2] }))); | |||
7744 | } | |||
7745 | } | |||
7746 | }; | |||
7747 | ||||
7748 | if (Is64) | |||
7749 | DAG.ExtractVectorElements(DAG.getBitcast(MVT::v2i32, NodePtr), Ops, 0, 2); | |||
7750 | else | |||
7751 | Ops.push_back(NodePtr); | |||
7752 | ||||
7753 | Ops.push_back(DAG.getBitcast(MVT::i32, RayExtent)); | |||
7754 | packLanes(RayOrigin, true); | |||
7755 | packLanes(RayDir, true); | |||
7756 | packLanes(RayInvDir, false); | |||
7757 | ||||
7758 | if (!UseNSA) { | |||
7759 | // Build a single vector containing all the operands so far prepared. | |||
7760 | if (NumVAddrDwords > 8) { | |||
7761 | SDValue Undef = DAG.getUNDEF(MVT::i32); | |||
7762 | Ops.append(16 - Ops.size(), Undef); | |||
7763 | } | |||
7764 | assert(Ops.size() == 8 || Ops.size() == 16)(static_cast <bool> (Ops.size() == 8 || Ops.size() == 16 ) ? void (0) : __assert_fail ("Ops.size() == 8 || Ops.size() == 16" , "llvm/lib/Target/AMDGPU/SIISelLowering.cpp", 7764, __extension__ __PRETTY_FUNCTION__)); | |||
7765 | SDValue MergedOps = DAG.getBuildVector( | |||
7766 | Ops.size() == 16 ? MVT::v16i32 : MVT::v8i32, DL, Ops); | |||
7767 | Ops.clear(); | |||
7768 | Ops.push_back(MergedOps); | |||
7769 | } | |||
7770 | ||||
7771 | Ops.push_back(TDescr); | |||
7772 | if (IsA16) | |||
7773 | Ops.push_back(DAG.getTargetConstant(1, DL, MVT::i1)); | |||
7774 | Ops.push_back(M->getChain()); | |||
7775 | ||||
7776 | auto *NewNode = DAG.getMachineNode(Opcode, DL, M->getVTList(), Ops); | |||
7777 | MachineMemOperand *MemRef = M->getMemOperand(); | |||
7778 | DAG.setNodeMemRefs(NewNode, {MemRef}); | |||
7779 | return SDValue(NewNode, 0); | |||
7780 | } | |||
7781 | case Intrinsic::amdgcn_global_atomic_fadd: | |||
7782 | if (!Op.getValue(0).use_empty() && !Subtarget->hasGFX90AInsts()) { | |||
7783 | DiagnosticInfoUnsupported | |||
7784 | NoFpRet(DAG.getMachineFunction().getFunction(), | |||
7785 | "return versions of fp atomics not supported", | |||
7786 | DL.getDebugLoc(), DS_Error); | |||
7787 | DAG.getContext()->diagnose(NoFpRet); | |||
7788 | return SDValue(); | |||
7789 | } | |||
7790 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; | |||
7791 | case Intrinsic::amdgcn_global_atomic_fmin: | |||
7792 | case Intrinsic::amdgcn_global_atomic_fmax: | |||
7793 | case Intrinsic::amdgcn_flat_atomic_fadd: | |||
7794 | case Intrinsic::amdgcn_flat_atomic_fmin: | |||
7795 | case Intrinsic::amdgcn_flat_atomic_fmax: { | |||
7796 | MemSDNode *M = cast<MemSDNode>(Op); | |||
7797 | SDValue Ops[] = { | |||
7798 | M->getOperand(0), // Chain | |||
7799 | M->getOperand(2), // Ptr | |||
7800 | M->getOperand(3) // Value | |||
7801 | }; | |||
7802 | unsigned Opcode = 0; | |||
7803 | switch (IntrID) { | |||
7804 | case Intrinsic::amdgcn_global_atomic_fadd: | |||
7805 | case Intrinsic::amdgcn_flat_atomic_fadd: { | |||
7806 | EVT VT = Op.getOperand(3).getValueType(); | |||
7807 | return DAG.getAtomic(ISD::ATOMIC_LOAD_FADD, DL, VT, | |||
7808 | DAG.getVTList(VT, MVT::Other), Ops, | |||
7809 | M->getMemOperand()); | |||
7810 | } | |||
7811 | case Intrinsic::amdgcn_global_atomic_fmin: | |||
7812 | case Intrinsic::amdgcn_flat_atomic_fmin: { | |||
7813 | Opcode = AMDGPUISD::ATOMIC_LOAD_FMIN; | |||
7814 | break; | |||
7815 | } | |||
7816 | case Intrinsic::amdgcn_global_atomic_fmax: | |||
7817 | case Intrinsic::amdgcn_flat_atomic_fmax: { | |||
7818 | Opcode = AMDGPUISD::ATOMIC_LOAD_FMAX; | |||
7819 | break; | |||
7820 | } | |||
7821 | default: | |||
7822 | llvm_unreachable("unhandled atomic opcode")::llvm::llvm_unreachable_internal("unhandled atomic opcode", "llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 7822); | |||
7823 | } | |||
7824 | return DAG.getMemIntrinsicNode(Opcode, SDLoc(Op), | |||
7825 | M->getVTList(), Ops, M->getMemoryVT(), | |||
7826 | M->getMemOperand()); | |||
7827 | } | |||
7828 | default: | |||
7829 | ||||
7830 | if (const AMDGPU::ImageDimIntrinsicInfo *ImageDimIntr = | |||
7831 | AMDGPU::getImageDimIntrinsicInfo(IntrID)) | |||
7832 | return lowerImage(Op, ImageDimIntr, DAG, true); | |||
7833 | ||||
7834 | return SDValue(); | |||
7835 | } | |||
7836 | } | |||
7837 | ||||
7838 | // Call DAG.getMemIntrinsicNode for a load, but first widen a dwordx3 type to | |||
7839 | // dwordx4 if on SI. | |||
7840 | SDValue SITargetLowering::getMemIntrinsicNode(unsigned Opcode, const SDLoc &DL, | |||
7841 | SDVTList VTList, | |||
7842 | ArrayRef<SDValue> Ops, EVT MemVT, | |||
7843 | MachineMemOperand *MMO, | |||
7844 | SelectionDAG &DAG) const { | |||
7845 | EVT VT = VTList.VTs[0]; | |||
7846 | EVT WidenedVT = VT; | |||
7847 | EVT WidenedMemVT = MemVT; | |||
7848 | if (!Subtarget->hasDwordx3LoadStores() && | |||
7849 | (WidenedVT == MVT::v3i32 || WidenedVT == MVT::v3f32)) { | |||
7850 | WidenedVT = EVT::getVectorVT(*DAG.getContext(), | |||
7851 | WidenedVT.getVectorElementType(), 4); | |||
7852 | WidenedMemVT = EVT::getVectorVT(*DAG.getContext(), | |||
7853 | WidenedMemVT.getVectorElementType(), 4); | |||
7854 | MMO = DAG.getMachineFunction().getMachineMemOperand(MMO, 0, 16); | |||
7855 | } | |||
7856 | ||||
7857 | assert(VTList.NumVTs == 2)(static_cast <bool> (VTList.NumVTs == 2) ? void (0) : __assert_fail ("VTList.NumVTs == 2", "llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 7857, __extension__ __PRETTY_FUNCTION__)); | |||
7858 | SDVTList WidenedVTList = DAG.getVTList(WidenedVT, VTList.VTs[1]); | |||
7859 | ||||
7860 | auto NewOp = DAG.getMemIntrinsicNode(Opcode, DL, WidenedVTList, Ops, | |||
7861 | WidenedMemVT, MMO); | |||
7862 | if (WidenedVT != VT) { | |||
7863 | auto Extract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, NewOp, | |||
7864 | DAG.getVectorIdxConstant(0, DL)); | |||
7865 | NewOp = DAG.getMergeValues({ Extract, SDValue(NewOp.getNode(), 1) }, DL); | |||
7866 | } | |||
7867 | return NewOp; | |||
7868 | } | |||
7869 | ||||
7870 | SDValue SITargetLowering::handleD16VData(SDValue VData, SelectionDAG &DAG, | |||
7871 | bool ImageStore) const { | |||
7872 | EVT StoreVT = VData.getValueType(); | |||
7873 | ||||
7874 | // No change for f16 and legal vector D16 types. | |||
7875 | if (!StoreVT.isVector()) | |||
7876 | return VData; | |||
7877 | ||||
7878 | SDLoc DL(VData); | |||
7879 | unsigned NumElements = StoreVT.getVectorNumElements(); | |||
7880 | ||||
7881 | if (Subtarget->hasUnpackedD16VMem()) { | |||
7882 | // We need to unpack the packed data to store. | |||
7883 | EVT IntStoreVT = StoreVT.changeTypeToInteger(); | |||
7884 | SDValue IntVData = DAG.getNode(ISD::BITCAST, DL, IntStoreVT, VData); | |||
7885 | ||||
7886 | EVT EquivStoreVT = | |||
7887 | EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumElements); | |||
7888 | SDValue ZExt = DAG.getNode(ISD::ZERO_EXTEND, DL, EquivStoreVT, IntVData); | |||
7889 | return DAG.UnrollVectorOp(ZExt.getNode()); | |||
7890 | } | |||
7891 | ||||
7892 | // The sq block of gfx8.1 does not estimate register use correctly for d16 | |||
7893 | // image store instructions. The data operand is computed as if it were not a | |||
7894 | // d16 image instruction. | |||
7895 | if (ImageStore && Subtarget->hasImageStoreD16Bug()) { | |||
7896 | // Bitcast to i16 | |||
7897 | EVT IntStoreVT = StoreVT.changeTypeToInteger(); | |||
7898 | SDValue IntVData = DAG.getNode(ISD::BITCAST, DL, IntStoreVT, VData); | |||
7899 | ||||
7900 | // Decompose into scalars | |||
7901 | SmallVector<SDValue, 4> Elts; | |||
7902 | DAG.ExtractVectorElements(IntVData, Elts); | |||
7903 | ||||
7904 | // Group pairs of i16 into v2i16 and bitcast to i32 | |||
7905 | SmallVector<SDValue, 4> PackedElts; | |||
7906 | for (unsigned I = 0; I < Elts.size() / 2; I += 1) { | |||
7907 | SDValue Pair = | |||
7908 | DAG.getBuildVector(MVT::v2i16, DL, {Elts[I * 2], Elts[I * 2 + 1]}); | |||
7909 | SDValue IntPair = DAG.getNode(ISD::BITCAST, DL, MVT::i32, Pair); | |||
7910 | PackedElts.push_back(IntPair); | |||
7911 | } | |||
7912 | if ((NumElements % 2) == 1) { | |||
7913 | // Handle v3i16 | |||
7914 | unsigned I = Elts.size() / 2; | |||
7915 | SDValue Pair = DAG.getBuildVector(MVT::v2i16, DL, | |||
7916 | {Elts[I * 2], DAG.getUNDEF(MVT::i16)}); | |||
7917 | SDValue IntPair = DAG.getNode(ISD::BITCAST, DL, MVT::i32, Pair); | |||
7918 | PackedElts.push_back(IntPair); | |||
7919 | } | |||
7920 | ||||
7921 | // Pad using UNDEF | |||
7922 | PackedElts.resize(Elts.size(), DAG.getUNDEF(MVT::i32)); | |||
7923 | ||||
7924 | // Build final vector | |||
7925 | EVT VecVT = | |||
7926 | EVT::getVectorVT(*DAG.getContext(), MVT::i32, PackedElts.size()); | |||
7927 | return DAG.getBuildVector(VecVT, DL, PackedElts); | |||
7928 | } | |||
7929 | ||||
7930 | if (NumElements == 3) { | |||
7931 | EVT IntStoreVT = | |||
7932 | EVT::getIntegerVT(*DAG.getContext(), StoreVT.getStoreSizeInBits()); | |||
7933 | SDValue IntVData = DAG.getNode(ISD::BITCAST, DL, IntStoreVT, VData); | |||
7934 | ||||
7935 | EVT WidenedStoreVT = EVT::getVectorVT( | |||
7936 | *DAG.getContext(), StoreVT.getVectorElementType(), NumElements + 1); | |||
7937 | EVT WidenedIntVT = EVT::getIntegerVT(*DAG.getContext(), | |||
7938 | WidenedStoreVT.getStoreSizeInBits()); | |||
7939 | SDValue ZExt = DAG.getNode(ISD::ZERO_EXTEND, DL, WidenedIntVT, IntVData); | |||
7940 | return DAG.getNode(ISD::BITCAST, DL, WidenedStoreVT, ZExt); | |||
7941 | } | |||
7942 | ||||
7943 | assert(isTypeLegal(StoreVT))(static_cast <bool> (isTypeLegal(StoreVT)) ? void (0) : __assert_fail ("isTypeLegal(StoreVT)", "llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 7943, __extension__ __PRETTY_FUNCTION__)); | |||
7944 | return VData; | |||
7945 | } | |||
7946 | ||||
7947 | SDValue SITargetLowering::LowerINTRINSIC_VOID(SDValue Op, | |||
7948 | SelectionDAG &DAG) const { | |||
7949 | SDLoc DL(Op); | |||
7950 | SDValue Chain = Op.getOperand(0); | |||
7951 | unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); | |||
7952 | MachineFunction &MF = DAG.getMachineFunction(); | |||
7953 | ||||
7954 | switch (IntrinsicID) { | |||
7955 | case Intrinsic::amdgcn_exp_compr: { | |||
7956 | SDValue Src0 = Op.getOperand(4); | |||
7957 | SDValue Src1 = Op.getOperand(5); | |||
7958 | // Hack around illegal type on SI by directly selecting it. | |||
7959 | if (isTypeLegal(Src0.getValueType())) | |||
7960 | return SDValue(); | |||
7961 | ||||
7962 | const ConstantSDNode *Done = cast<ConstantSDNode>(Op.getOperand(6)); | |||
7963 | SDValue Undef = DAG.getUNDEF(MVT::f32); | |||
7964 | const SDValue Ops[] = { | |||
7965 | Op.getOperand(2), // tgt | |||
7966 | DAG.getNode(ISD::BITCAST, DL, MVT::f32, Src0), // src0 | |||
7967 | DAG.getNode(ISD::BITCAST, DL, MVT::f32, Src1), // src1 | |||
7968 | Undef, // src2 | |||
7969 | Undef, // src3 | |||
7970 | Op.getOperand(7), // vm | |||
7971 | DAG.getTargetConstant(1, DL, MVT::i1), // compr | |||
7972 | Op.getOperand(3), // en | |||
7973 | Op.getOperand(0) // Chain | |||
7974 | }; | |||
7975 | ||||
7976 | unsigned Opc = Done->isZero() ? AMDGPU::EXP : AMDGPU::EXP_DONE; | |||
7977 | return SDValue(DAG.getMachineNode(Opc, DL, Op->getVTList(), Ops), 0); | |||
7978 | } | |||
7979 | case Intrinsic::amdgcn_s_barrier: { | |||
7980 | if (getTargetMachine().getOptLevel() > CodeGenOpt::None) { | |||
7981 | const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); | |||
7982 | unsigned WGSize = ST.getFlatWorkGroupSizes(MF.getFunction()).second; | |||
7983 | if (WGSize <= ST.getWavefrontSize()) | |||
7984 | return SDValue(DAG.getMachineNode(AMDGPU::WAVE_BARRIER, DL, MVT::Other, | |||
7985 | Op.getOperand(0)), 0); | |||
7986 | } | |||
7987 | return SDValue(); | |||
7988 | }; | |||
7989 | case Intrinsic::amdgcn_tbuffer_store: { | |||
7990 | SDValue VData = Op.getOperand(2); | |||
7991 | bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16); | |||
7992 | if (IsD16) | |||
7993 | VData = handleD16VData(VData, DAG); | |||
7994 | unsigned Dfmt = cast<ConstantSDNode>(Op.getOperand(8))->getZExtValue(); | |||
7995 | unsigned Nfmt = cast<ConstantSDNode>(Op.getOperand(9))->getZExtValue(); | |||
7996 | unsigned Glc = cast<ConstantSDNode>(Op.getOperand(10))->getZExtValue(); | |||
7997 | unsigned Slc = cast<ConstantSDNode>(Op.getOperand(11))->getZExtValue(); | |||
7998 | unsigned IdxEn = getIdxEn(Op.getOperand(4)); | |||
7999 | SDValue Ops[] = { | |||
8000 | Chain, | |||
8001 | VData, // vdata | |||
8002 | Op.getOperand(3), // rsrc | |||
8003 | Op.getOperand(4), // vindex | |||
8004 | Op.getOperand(5), // voffset | |||
8005 | Op.getOperand(6), // soffset | |||
8006 | Op.getOperand(7), // offset | |||
8007 | DAG.getTargetConstant(Dfmt | (Nfmt << 4), DL, MVT::i32), // format | |||
8008 | DAG.getTargetConstant(Glc | (Slc << 1), DL, MVT::i32), // cachepolicy | |||
8009 | DAG.getTargetConstant(IdxEn, DL, MVT::i1), // idxen | |||
8010 | }; | |||
8011 | unsigned Opc = IsD16 ? AMDGPUISD::TBUFFER_STORE_FORMAT_D16 : | |||
8012 | AMDGPUISD::TBUFFER_STORE_FORMAT; | |||
8013 | MemSDNode *M = cast<MemSDNode>(Op); | |||
8014 | return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops, | |||
8015 | M->getMemoryVT(), M->getMemOperand()); | |||
8016 | } | |||
8017 | ||||
8018 | case Intrinsic::amdgcn_struct_tbuffer_store: { | |||
8019 | SDValue VData = Op.getOperand(2); | |||
8020 | bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16); | |||
8021 | if (IsD16) | |||
8022 | VData = handleD16VData(VData, DAG); | |||
8023 | auto Offsets = splitBufferOffsets(Op.getOperand(5), DAG); | |||
8024 | SDValue Ops[] = { | |||
8025 | Chain, | |||
8026 | VData, // vdata | |||
8027 | Op.getOperand(3), // rsrc | |||
8028 | Op.getOperand(4), // vindex | |||
8029 | Offsets.first, // voffset | |||
8030 | Op.getOperand(6), // soffset | |||
8031 | Offsets.second, // offset | |||
8032 | Op.getOperand(7), // format | |||
8033 | Op.getOperand(8), // cachepolicy, swizzled buffer | |||
8034 | DAG.getTargetConstant(1, DL, MVT::i1), // idxen | |||
8035 | }; | |||
8036 | unsigned Opc = IsD16 ? AMDGPUISD::TBUFFER_STORE_FORMAT_D16 : | |||
8037 | AMDGPUISD::TBUFFER_STORE_FORMAT; | |||
8038 | MemSDNode *M = cast<MemSDNode>(Op); | |||
8039 | return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops, | |||
8040 | M->getMemoryVT(), M->getMemOperand()); | |||
8041 | } | |||
8042 | ||||
8043 | case Intrinsic::amdgcn_raw_tbuffer_store: { | |||
8044 | SDValue VData = Op.getOperand(2); | |||
8045 | bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16); | |||
8046 | if (IsD16) | |||
8047 | VData = handleD16VData(VData, DAG); | |||
8048 | auto Offsets = splitBufferOffsets(Op.getOperand(4), DAG); | |||
8049 | SDValue Ops[] = { | |||
8050 | Chain, | |||
8051 | VData, // vdata | |||
8052 | Op.getOperand(3), // rsrc | |||
8053 | DAG.getConstant(0, DL, MVT::i32), // vindex | |||
8054 | Offsets.first, // voffset | |||
8055 | Op.getOperand(5), // soffset | |||
8056 | Offsets.second, // offset | |||
8057 | Op.getOperand(6), // format | |||
8058 | Op.getOperand(7), // cachepolicy, swizzled buffer | |||
8059 | DAG.getTargetConstant(0, DL, MVT::i1), // idxen | |||
8060 | }; | |||
8061 | unsigned Opc = IsD16 ? AMDGPUISD::TBUFFER_STORE_FORMAT_D16 : | |||
8062 | AMDGPUISD::TBUFFER_STORE_FORMAT; | |||
8063 | MemSDNode *M = cast<MemSDNode>(Op); | |||
8064 | return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops, | |||
8065 | M->getMemoryVT(), M->getMemOperand()); | |||
8066 | } | |||
8067 | ||||
8068 | case Intrinsic::amdgcn_buffer_store: | |||
8069 | case Intrinsic::amdgcn_buffer_store_format: { | |||
8070 | SDValue VData = Op.getOperand(2); | |||
8071 | bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16); | |||
8072 | if (IsD16) | |||
8073 | VData = handleD16VData(VData, DAG); | |||
8074 | unsigned Glc = cast<ConstantSDNode>(Op.getOperand(6))->getZExtValue(); | |||
8075 | unsigned Slc = cast<ConstantSDNode>(Op.getOperand(7))->getZExtValue(); | |||
8076 | unsigned IdxEn = getIdxEn(Op.getOperand(4)); | |||
8077 | SDValue Ops[] = { | |||
8078 | Chain, | |||
8079 | VData, | |||
8080 | Op.getOperand(3), // rsrc | |||
8081 | Op.getOperand(4), // vindex | |||
8082 | SDValue(), // voffset -- will be set by setBufferOffsets | |||
8083 | SDValue(), // soffset -- will be set by setBufferOffsets | |||
8084 | SDValue(), // offset -- will be set by setBufferOffsets | |||
8085 | DAG.getTargetConstant(Glc | (Slc << 1), DL, MVT::i32), // cachepolicy | |||
8086 | DAG.getTargetConstant(IdxEn, DL, MVT::i1), // idxen | |||
8087 | }; | |||
8088 | setBufferOffsets(Op.getOperand(5), DAG, &Ops[4]); | |||
8089 | ||||
8090 | unsigned Opc = IntrinsicID == Intrinsic::amdgcn_buffer_store ? | |||
8091 | AMDGPUISD::BUFFER_STORE : AMDGPUISD::BUFFER_STORE_FORMAT; | |||
8092 | Opc = IsD16 ? AMDGPUISD::BUFFER_STORE_FORMAT_D16 : Opc; | |||
8093 | MemSDNode *M = cast<MemSDNode>(Op); | |||
8094 | updateBufferMMO(M->getMemOperand(), Ops[4], Ops[5], Ops[6], Ops[3]); | |||
8095 | ||||
8096 | // Handle BUFFER_STORE_BYTE/SHORT overloaded intrinsics | |||
8097 | EVT VDataType = VData.getValueType().getScalarType(); | |||
8098 | if (VDataType == MVT::i8 || VDataType == MVT::i16) | |||
8099 | return handleByteShortBufferStores(DAG, VDataType, DL, Ops, M); | |||
8100 | ||||
8101 | return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops, | |||
8102 | M->getMemoryVT(), M->getMemOperand()); | |||
8103 | } | |||
8104 | ||||
8105 | case Intrinsic::amdgcn_raw_buffer_store: | |||
8106 | case Intrinsic::amdgcn_raw_buffer_store_format: { | |||
8107 | const bool IsFormat = | |||
8108 | IntrinsicID == Intrinsic::amdgcn_raw_buffer_store_format; | |||
8109 | ||||
8110 | SDValue VData = Op.getOperand(2); | |||
8111 | EVT VDataVT = VData.getValueType(); | |||
8112 | EVT EltType = VDataVT.getScalarType(); | |||
8113 | bool IsD16 = IsFormat && (EltType.getSizeInBits() == 16); | |||
8114 | if (IsD16) { | |||
8115 | VData = handleD16VData(VData, DAG); | |||
8116 | VDataVT = VData.getValueType(); | |||
8117 | } | |||
8118 | ||||
8119 | if (!isTypeLegal(VDataVT)) { | |||
8120 | VData = | |||
8121 | DAG.getNode(ISD::BITCAST, DL, | |||
8122 | getEquivalentMemType(*DAG.getContext(), VDataVT), VData); | |||
8123 | } | |||
8124 | ||||
8125 | auto Offsets = splitBufferOffsets(Op.getOperand(4), DAG); | |||
8126 | SDValue Ops[] = { | |||
8127 | Chain, | |||
8128 | VData, | |||
8129 | Op.getOperand(3), // rsrc | |||
8130 | DAG.getConstant(0, DL, MVT::i32), // vindex | |||
8131 | Offsets.first, // voffset | |||
8132 | Op.getOperand(5), // soffset | |||
8133 | Offsets.second, // offset | |||
8134 | Op.getOperand(6), // cachepolicy, swizzled buffer | |||
8135 | DAG.getTargetConstant(0, DL, MVT::i1), // idxen | |||
8136 | }; | |||
8137 | unsigned Opc = | |||
8138 | IsFormat ? AMDGPUISD::BUFFER_STORE_FORMAT : AMDGPUISD::BUFFER_STORE; | |||
8139 | Opc = IsD16 ? AMDGPUISD::BUFFER_STORE_FORMAT_D16 : Opc; | |||
8140 | MemSDNode *M = cast<MemSDNode>(Op); | |||
8141 | updateBufferMMO(M->getMemOperand(), Ops[4], Ops[5], Ops[6]); | |||
8142 | ||||
8143 | // Handle BUFFER_STORE_BYTE/SHORT overloaded intrinsics | |||
8144 | if (!IsD16 && !VDataVT.isVector() && EltType.getSizeInBits() < 32) | |||
8145 | return handleByteShortBufferStores(DAG, VDataVT, DL, Ops, M); | |||
8146 | ||||
8147 | return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops, | |||
8148 | M->getMemoryVT(), M->getMemOperand()); | |||
8149 | } | |||
8150 | ||||
8151 | case Intrinsic::amdgcn_struct_buffer_store: | |||
8152 | case Intrinsic::amdgcn_struct_buffer_store_format: { | |||
8153 | const bool IsFormat = | |||
8154 | IntrinsicID == Intrinsic::amdgcn_struct_buffer_store_format; | |||
8155 | ||||
8156 | SDValue VData = Op.getOperand(2); | |||
8157 | EVT VDataVT = VData.getValueType(); | |||
8158 | EVT EltType = VDataVT.getScalarType(); | |||
8159 | bool IsD16 = IsFormat && (EltType.getSizeInBits() == 16); | |||
8160 | ||||
8161 | if (IsD16) { | |||
8162 | VData = handleD16VData(VData, DAG); | |||
8163 | VDataVT = VData.getValueType(); | |||
8164 | } | |||
8165 | ||||
8166 | if (!isTypeLegal(VDataVT)) { | |||
8167 | VData = | |||
8168 | DAG.getNode(ISD::BITCAST, DL, | |||
8169 | getEquivalentMemType(*DAG.getContext(), VDataVT), VData); | |||
8170 | } | |||
8171 | ||||
8172 | auto Offsets = splitBufferOffsets(Op.getOperand(5), DAG); | |||
8173 | SDValue Ops[] = { | |||
8174 | Chain, | |||
8175 | VData, | |||
8176 | Op.getOperand(3), // rsrc | |||
8177 | Op.getOperand(4), // vindex | |||
8178 | Offsets.first, // voffset | |||
8179 | Op.getOperand(6), // soffset | |||
8180 | Offsets.second, // offset | |||
8181 | Op.getOperand(7), // cachepolicy, swizzled buffer | |||
8182 | DAG.getTargetConstant(1, DL, MVT::i1), // idxen | |||
8183 | }; | |||
8184 | unsigned Opc = IntrinsicID == Intrinsic::amdgcn_struct_buffer_store ? | |||
8185 | AMDGPUISD::BUFFER_STORE : AMDGPUISD::BUFFER_STORE_FORMAT; | |||
8186 | Opc = IsD16 ? AMDGPUISD::BUFFER_STORE_FORMAT_D16 : Opc; | |||
8187 | MemSDNode *M = cast<MemSDNode>(Op); | |||
8188 | updateBufferMMO(M->getMemOperand(), Ops[4], Ops[5], Ops[6], Ops[3]); | |||
8189 | ||||
8190 | // Handle BUFFER_STORE_BYTE/SHORT overloaded intrinsics | |||
8191 | EVT VDataType = VData.getValueType().getScalarType(); | |||
8192 | if (!IsD16 && !VDataVT.isVector() && EltType.getSizeInBits() < 32) | |||
8193 | return handleByteShortBufferStores(DAG, VDataType, DL, Ops, M); | |||
8194 | ||||
8195 | return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops, | |||
8196 | M->getMemoryVT(), M->getMemOperand()); | |||
8197 | } | |||
8198 | case Intrinsic::amdgcn_end_cf: | |||
8199 | return SDValue(DAG.getMachineNode(AMDGPU::SI_END_CF, DL, MVT::Other, | |||
8200 | Op->getOperand(2), Chain), 0); | |||
8201 | ||||
8202 | default: { | |||
8203 | if (const AMDGPU::ImageDimIntrinsicInfo *ImageDimIntr = | |||
8204 | AMDGPU::getImageDimIntrinsicInfo(IntrinsicID)) | |||
8205 | return lowerImage(Op, ImageDimIntr, DAG, true); | |||
8206 | ||||
8207 | return Op; | |||
8208 | } | |||
8209 | } | |||
8210 | } | |||
8211 | ||||
8212 | // The raw.(t)buffer and struct.(t)buffer intrinsics have two offset args: | |||
8213 | // offset (the offset that is included in bounds checking and swizzling, to be | |||
8214 | // split between the instruction's voffset and immoffset fields) and soffset | |||
8215 | // (the offset that is excluded from bounds checking and swizzling, to go in | |||
8216 | // the instruction's soffset field). This function takes the first kind of | |||
8217 | // offset and figures out how to split it between voffset and immoffset. | |||
8218 | std::pair<SDValue, SDValue> SITargetLowering::splitBufferOffsets( | |||
8219 | SDValue Offset, SelectionDAG &DAG) const { | |||
8220 | SDLoc DL(Offset); | |||
8221 | const unsigned MaxImm = 4095; | |||
8222 | SDValue N0 = Offset; | |||
8223 | ConstantSDNode *C1 = nullptr; | |||
8224 | ||||
8225 | if ((C1 = dyn_cast<ConstantSDNode>(N0))) | |||
8226 | N0 = SDValue(); | |||
8227 | else if (DAG.isBaseWithConstantOffset(N0)) { | |||
8228 | C1 = cast<ConstantSDNode>(N0.getOperand(1)); | |||
8229 | N0 = N0.getOperand(0); | |||
8230 | } | |||
8231 | ||||
8232 | if (C1) { | |||
8233 | unsigned ImmOffset = C1->getZExtValue(); | |||
8234 | // If the immediate value is too big for the immoffset field, put the value | |||
8235 | // and -4096 into the immoffset field so that the value that is copied/added | |||
8236 | // for the voffset field is a multiple of 4096, and it stands more chance | |||
8237 | // of being CSEd with the copy/add for another similar load/store. | |||
8238 | // However, do not do that rounding down to a multiple of 4096 if that is a | |||
8239 | // negative number, as it appears to be illegal to have a negative offset | |||
8240 | // in the vgpr, even if adding the immediate offset makes it positive. | |||
8241 | unsigned Overflow = ImmOffset & ~MaxImm; | |||
8242 | ImmOffset -= Overflow; | |||
8243 | if ((int32_t)Overflow < 0) { | |||
8244 | Overflow += ImmOffset; | |||
8245 | ImmOffset = 0; | |||
8246 | } | |||
8247 | C1 = cast<ConstantSDNode>(DAG.getTargetConstant(ImmOffset, DL, MVT::i32)); | |||
8248 | if (Overflow) { | |||
8249 | auto OverflowVal = DAG.getConstant(Overflow, DL, MVT::i32); | |||
8250 | if (!N0) | |||
8251 | N0 = OverflowVal; | |||
8252 | else { | |||
8253 | SDValue Ops[] = { N0, OverflowVal }; | |||
8254 | N0 = DAG.getNode(ISD::ADD, DL, MVT::i32, Ops); | |||
8255 | } | |||
8256 | } | |||
8257 | } | |||
8258 | if (!N0) | |||
8259 | N0 = DAG.getConstant(0, DL, MVT::i32); | |||
8260 | if (!C1) | |||
8261 | C1 = cast<ConstantSDNode>(DAG.getTargetConstant(0, DL, MVT::i32)); | |||
8262 | return {N0, SDValue(C1, 0)}; | |||
8263 | } | |||
8264 | ||||
8265 | // Analyze a combined offset from an amdgcn_buffer_ intrinsic and store the | |||
8266 | // three offsets (voffset, soffset and instoffset) into the SDValue[3] array | |||
8267 | // pointed to by Offsets. | |||
8268 | void SITargetLowering::setBufferOffsets(SDValue CombinedOffset, | |||
8269 | SelectionDAG &DAG, SDValue *Offsets, | |||
8270 | Align Alignment) const { | |||
8271 | SDLoc DL(CombinedOffset); | |||
8272 | if (auto C = dyn_cast<ConstantSDNode>(CombinedOffset)) { | |||
8273 | uint32_t Imm = C->getZExtValue(); | |||
8274 | uint32_t SOffset, ImmOffset; | |||
8275 | if (AMDGPU::splitMUBUFOffset(Imm, SOffset, ImmOffset, Subtarget, | |||
8276 | Alignment)) { | |||
8277 | Offsets[0] = DAG.getConstant(0, DL, MVT::i32); | |||
8278 | Offsets[1] = DAG.getConstant(SOffset, DL, MVT::i32); | |||
8279 | Offsets[2] = DAG.getTargetConstant(ImmOffset, DL, MVT::i32); | |||
8280 | return; | |||
8281 | } | |||
8282 | } | |||
8283 | if (DAG.isBaseWithConstantOffset(CombinedOffset)) { | |||
8284 | SDValue N0 = CombinedOffset.getOperand(0); | |||
8285 | SDValue N1 = CombinedOffset.getOperand(1); | |||
8286 | uint32_t SOffset, ImmOffset; | |||
8287 | int Offset = cast<ConstantSDNode>(N1)->getSExtValue(); | |||
8288 | if (Offset >= 0 && AMDGPU::splitMUBUFOffset(Offset, SOffset, ImmOffset, | |||
8289 | Subtarget, Alignment)) { | |||
8290 | Offsets[0] = N0; | |||
8291 | Offsets[1] = DAG.getConstant(SOffset, DL, MVT::i32); | |||
8292 | Offsets[2] = DAG.getTargetConstant(ImmOffset, DL, MVT::i32); | |||
8293 | return; | |||
8294 | } | |||
8295 | } | |||
8296 | Offsets[0] = CombinedOffset; | |||
8297 | Offsets[1] = DAG.getConstant(0, DL, MVT::i32); | |||
8298 | Offsets[2] = DAG.getTargetConstant(0, DL, MVT::i32); | |||
8299 | } | |||
8300 | ||||
8301 | // Handle 8 bit and 16 bit buffer loads | |||
8302 | SDValue SITargetLowering::handleByteShortBufferLoads(SelectionDAG &DAG, | |||
8303 | EVT LoadVT, SDLoc DL, | |||
8304 | ArrayRef<SDValue> Ops, | |||
8305 | MemSDNode *M) const { | |||
8306 | EVT IntVT = LoadVT.changeTypeToInteger(); | |||
8307 | unsigned Opc = (LoadVT.getScalarType() == MVT::i8) ? | |||
8308 | AMDGPUISD::BUFFER_LOAD_UBYTE : AMDGPUISD::BUFFER_LOAD_USHORT; | |||
8309 | ||||
8310 | SDVTList ResList = DAG.getVTList(MVT::i32, MVT::Other); | |||
8311 | SDValue BufferLoad = DAG.getMemIntrinsicNode(Opc, DL, ResList, | |||
8312 | Ops, IntVT, | |||
8313 | M->getMemOperand()); | |||
8314 | SDValue LoadVal = DAG.getNode(ISD::TRUNCATE, DL, IntVT, BufferLoad); | |||
8315 | LoadVal = DAG.getNode(ISD::BITCAST, DL, LoadVT, LoadVal); | |||
8316 | ||||
8317 | return DAG.getMergeValues({LoadVal, BufferLoad.getValue(1)}, DL); | |||
8318 | } | |||
8319 | ||||
8320 | // Handle 8 bit and 16 bit buffer stores | |||
8321 | SDValue SITargetLowering::handleByteShortBufferStores(SelectionDAG &DAG, | |||
8322 | EVT VDataType, SDLoc DL, | |||
8323 | SDValue Ops[], | |||
8324 | MemSDNode *M) const { | |||
8325 | if (VDataType == MVT::f16) | |||
8326 | Ops[1] = DAG.getNode(ISD::BITCAST, DL, MVT::i16, Ops[1]); | |||
8327 | ||||
8328 | SDValue BufferStoreExt = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Ops[1]); | |||
8329 | Ops[1] = BufferStoreExt; | |||
8330 | unsigned Opc = (VDataType == MVT::i8) ? AMDGPUISD::BUFFER_STORE_BYTE : | |||
8331 | AMDGPUISD::BUFFER_STORE_SHORT; | |||
8332 | ArrayRef<SDValue> OpsRef = makeArrayRef(&Ops[0], 9); | |||
8333 | return DAG.getMemIntrinsicNode(Opc, DL, M->getVTList(), OpsRef, VDataType, | |||
8334 | M->getMemOperand()); | |||
8335 | } | |||
8336 | ||||
8337 | static SDValue getLoadExtOrTrunc(SelectionDAG &DAG, | |||
8338 | ISD::LoadExtType ExtType, SDValue Op, | |||
8339 | const SDLoc &SL, EVT VT) { | |||
8340 | if (VT.bitsLT(Op.getValueType())) | |||
8341 | return DAG.getNode(ISD::TRUNCATE, SL, VT, Op); | |||
8342 | ||||
8343 | switch (ExtType) { | |||
8344 | case ISD::SEXTLOAD: | |||
8345 | return DAG.getNode(ISD::SIGN_EXTEND, SL, VT, Op); | |||
8346 | case ISD::ZEXTLOAD: | |||
8347 | return DAG.getNode(ISD::ZERO_EXTEND, SL, VT, Op); | |||
8348 | case ISD::EXTLOAD: | |||
8349 | return DAG.getNode(ISD::ANY_EXTEND, SL, VT, Op); | |||
8350 | case ISD::NON_EXTLOAD: | |||
8351 | return Op; | |||
8352 | } | |||
8353 | ||||
8354 | llvm_unreachable("invalid ext type")::llvm::llvm_unreachable_internal("invalid ext type", "llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 8354); | |||
8355 | } | |||
8356 | ||||
8357 | SDValue SITargetLowering::widenLoad(LoadSDNode *Ld, DAGCombinerInfo &DCI) const { | |||
8358 | SelectionDAG &DAG = DCI.DAG; | |||
8359 | if (Ld->getAlignment() < 4 || Ld->isDivergent()) | |||
8360 | return SDValue(); | |||
8361 | ||||
8362 | // FIXME: Constant loads should all be marked invariant. | |||
8363 | unsigned AS = Ld->getAddressSpace(); | |||
8364 | if (AS != AMDGPUAS::CONSTANT_ADDRESS && | |||
8365 | AS != AMDGPUAS::CONSTANT_ADDRESS_32BIT && | |||
8366 | (AS != AMDGPUAS::GLOBAL_ADDRESS || !Ld->isInvariant())) | |||
8367 | return SDValue(); | |||
8368 | ||||
8369 | // Don't do this early, since it may interfere with adjacent load merging for | |||
8370 | // illegal types. We can avoid losing alignment information for exotic types | |||
8371 | // pre-legalize. | |||
8372 | EVT MemVT = Ld->getMemoryVT(); | |||
8373 | if ((MemVT.isSimple() && !DCI.isAfterLegalizeDAG()) || | |||
8374 | MemVT.getSizeInBits() >= 32) | |||
8375 | return SDValue(); | |||
8376 | ||||
8377 | SDLoc SL(Ld); | |||
8378 | ||||
8379 | assert((!MemVT.isVector() || Ld->getExtensionType() == ISD::NON_EXTLOAD) &&(static_cast <bool> ((!MemVT.isVector() || Ld->getExtensionType () == ISD::NON_EXTLOAD) && "unexpected vector extload" ) ? void (0) : __assert_fail ("(!MemVT.isVector() || Ld->getExtensionType() == ISD::NON_EXTLOAD) && \"unexpected vector extload\"" , "llvm/lib/Target/AMDGPU/SIISelLowering.cpp", 8380, __extension__ __PRETTY_FUNCTION__)) | |||
8380 | "unexpected vector extload")(static_cast <bool> ((!MemVT.isVector() || Ld->getExtensionType () == ISD::NON_EXTLOAD) && "unexpected vector extload" ) ? void (0) : __assert_fail ("(!MemVT.isVector() || Ld->getExtensionType() == ISD::NON_EXTLOAD) && \"unexpected vector extload\"" , "llvm/lib/Target/AMDGPU/SIISelLowering.cpp", 8380, __extension__ __PRETTY_FUNCTION__)); | |||
8381 | ||||
8382 | // TODO: Drop only high part of range. | |||
8383 | SDValue Ptr = Ld->getBasePtr(); | |||
8384 | SDValue NewLoad = DAG.getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, | |||
8385 | MVT::i32, SL, Ld->getChain(), Ptr, | |||
8386 | Ld->getOffset(), | |||
8387 | Ld->getPointerInfo(), MVT::i32, | |||
8388 | Ld->getAlignment(), | |||
8389 | Ld->getMemOperand()->getFlags(), | |||
8390 | Ld->getAAInfo(), | |||
8391 | nullptr); // Drop ranges | |||
8392 | ||||
8393 | EVT TruncVT = EVT::getIntegerVT(*DAG.getContext(), MemVT.getSizeInBits()); | |||
8394 | if (MemVT.isFloatingPoint()) { | |||
8395 | assert(Ld->getExtensionType() == ISD::NON_EXTLOAD &&(static_cast <bool> (Ld->getExtensionType() == ISD:: NON_EXTLOAD && "unexpected fp extload") ? void (0) : __assert_fail ("Ld->getExtensionType() == ISD::NON_EXTLOAD && \"unexpected fp extload\"" , "llvm/lib/Target/AMDGPU/SIISelLowering.cpp", 8396, __extension__ __PRETTY_FUNCTION__)) | |||
8396 | "unexpected fp extload")(static_cast <bool> (Ld->getExtensionType() == ISD:: NON_EXTLOAD && "unexpected fp extload") ? void (0) : __assert_fail ("Ld->getExtensionType() == ISD::NON_EXTLOAD && \"unexpected fp extload\"" , "llvm/lib/Target/AMDGPU/SIISelLowering.cpp", 8396, __extension__ __PRETTY_FUNCTION__)); | |||
8397 | TruncVT = MemVT.changeTypeToInteger(); | |||
8398 | } | |||
8399 | ||||
8400 | SDValue Cvt = NewLoad; | |||
8401 | if (Ld->getExtensionType() == ISD::SEXTLOAD) { | |||
8402 | Cvt = DAG.getNode(ISD::SIGN_EXTEND_INREG, SL, MVT::i32, NewLoad, | |||
8403 | DAG.getValueType(TruncVT)); | |||
8404 | } else if (Ld->getExtensionType() == ISD::ZEXTLOAD || | |||
8405 | Ld->getExtensionType() == ISD::NON_EXTLOAD) { | |||
8406 | Cvt = DAG.getZeroExtendInReg(NewLoad, SL, TruncVT); | |||
8407 | } else { | |||
8408 | assert(Ld->getExtensionType() == ISD::EXTLOAD)(static_cast <bool> (Ld->getExtensionType() == ISD:: EXTLOAD) ? void (0) : __assert_fail ("Ld->getExtensionType() == ISD::EXTLOAD" , "llvm/lib/Target/AMDGPU/SIISelLowering.cpp", 8408, __extension__ __PRETTY_FUNCTION__)); | |||
8409 | } | |||
8410 | ||||
8411 | EVT VT = Ld->getValueType(0); | |||
8412 | EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits()); | |||
8413 | ||||
8414 | DCI.AddToWorklist(Cvt.getNode()); | |||
8415 | ||||
8416 | // We may need to handle exotic cases, such as i16->i64 extloads, so insert | |||
8417 | // the appropriate extension from the 32-bit load. | |||
8418 | Cvt = getLoadExtOrTrunc(DAG, Ld->getExtensionType(), Cvt, SL, IntVT); | |||
8419 | DCI.AddToWorklist(Cvt.getNode()); | |||
8420 | ||||
8421 | // Handle conversion back to floating point if necessary. | |||
8422 | Cvt = DAG.getNode(ISD::BITCAST, SL, VT, Cvt); | |||
8423 | ||||
8424 | return DAG.getMergeValues({ Cvt, NewLoad.getValue(1) }, SL); | |||
8425 | } | |||
8426 | ||||
8427 | SDValue SITargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const { | |||
8428 | SDLoc DL(Op); | |||
8429 | LoadSDNode *Load = cast<LoadSDNode>(Op); | |||
8430 | ISD::LoadExtType ExtType = Load->getExtensionType(); | |||
8431 | EVT MemVT = Load->getMemoryVT(); | |||
8432 | ||||
8433 | if (ExtType == ISD::NON_EXTLOAD && MemVT.getSizeInBits() < 32) { | |||
8434 | if (MemVT == MVT::i16 && isTypeLegal(MVT::i16)) | |||
8435 | return SDValue(); | |||
8436 | ||||
8437 | // FIXME: Copied from PPC | |||
8438 | // First, load into 32 bits, then truncate to 1 bit. | |||
8439 | ||||
8440 | SDValue Chain = Load->getChain(); | |||
8441 | SDValue BasePtr = Load->getBasePtr(); | |||
8442 | MachineMemOperand *MMO = Load->getMemOperand(); | |||
8443 | ||||
8444 | EVT RealMemVT = (MemVT == MVT::i1) ? MVT::i8 : MVT::i16; | |||
8445 | ||||
8446 | SDValue NewLD = DAG.getExtLoad(ISD::EXTLOAD, DL, MVT::i32, Chain, | |||
8447 | BasePtr, RealMemVT, MMO); | |||
8448 | ||||
8449 | if (!MemVT.isVector()) { | |||
8450 | SDValue Ops[] = { | |||
8451 | DAG.getNode(ISD::TRUNCATE, DL, MemVT, NewLD), | |||
8452 | NewLD.getValue(1) | |||
8453 | }; | |||
8454 | ||||
8455 | return DAG.getMergeValues(Ops, DL); | |||
8456 | } | |||
8457 | ||||
8458 | SmallVector<SDValue, 3> Elts; | |||
8459 | for (unsigned I = 0, N = MemVT.getVectorNumElements(); I != N; ++I) { | |||
8460 | SDValue Elt = DAG.getNode(ISD::SRL, DL, MVT::i32, NewLD, | |||
8461 | DAG.getConstant(I, DL, MVT::i32)); | |||
8462 | ||||
8463 | Elts.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, Elt)); | |||
8464 | } | |||
8465 | ||||
8466 | SDValue Ops[] = { | |||
8467 | DAG.getBuildVector(MemVT, DL, Elts), | |||
8468 | NewLD.getValue(1) | |||
8469 | }; | |||
8470 | ||||
8471 | return DAG.getMergeValues(Ops, DL); | |||
8472 | } | |||
8473 | ||||
8474 | if (!MemVT.isVector()) | |||
8475 | return SDValue(); | |||
8476 | ||||
8477 | assert(Op.getValueType().getVectorElementType() == MVT::i32 &&(static_cast <bool> (Op.getValueType().getVectorElementType () == MVT::i32 && "Custom lowering for non-i32 vectors hasn't been implemented." ) ? void (0) : __assert_fail ("Op.getValueType().getVectorElementType() == MVT::i32 && \"Custom lowering for non-i32 vectors hasn't been implemented.\"" , "llvm/lib/Target/AMDGPU/SIISelLowering.cpp", 8478, __extension__ __PRETTY_FUNCTION__)) | |||
8478 | "Custom lowering for non-i32 vectors hasn't been implemented.")(static_cast <bool> (Op.getValueType().getVectorElementType () == MVT::i32 && "Custom lowering for non-i32 vectors hasn't been implemented." ) ? void (0) : __assert_fail ("Op.getValueType().getVectorElementType() == MVT::i32 && \"Custom lowering for non-i32 vectors hasn't been implemented.\"" , "llvm/lib/Target/AMDGPU/SIISelLowering.cpp", 8478, __extension__ __PRETTY_FUNCTION__)); | |||
8479 | ||||
8480 | unsigned Alignment = Load->getAlignment(); | |||
8481 | unsigned AS = Load->getAddressSpace(); | |||
8482 | if (Subtarget->hasLDSMisalignedBug() && | |||
8483 | AS == AMDGPUAS::FLAT_ADDRESS && | |||
8484 | Alignment < MemVT.getStoreSize() && MemVT.getSizeInBits() > 32) { | |||
8485 | return SplitVectorLoad(Op, DAG); | |||
8486 | } | |||
8487 | ||||
8488 | MachineFunction &MF = DAG.getMachineFunction(); | |||
8489 | SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); | |||
8490 | // If there is a possibility that flat instruction access scratch memory | |||
8491 | // then we need to use the same legalization rules we use for private. | |||
8492 | if (AS == AMDGPUAS::FLAT_ADDRESS && | |||
8493 | !Subtarget->hasMultiDwordFlatScratchAddressing()) | |||
8494 | AS = MFI->hasFlatScratchInit() ? | |||
8495 | AMDGPUAS::PRIVATE_ADDRESS : AMDGPUAS::GLOBAL_ADDRESS; | |||
8496 | ||||
8497 | unsigned NumElements = MemVT.getVectorNumElements(); | |||
8498 | ||||
8499 | if (AS == AMDGPUAS::CONSTANT_ADDRESS || | |||
8500 | AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT) { | |||
8501 | if (!Op->isDivergent() && Alignment >= 4 && NumElements < 32) { | |||
8502 | if (MemVT.isPow2VectorType()) | |||
8503 | return SDValue(); | |||
8504 | return WidenOrSplitVectorLoad(Op, DAG); | |||
8505 | } | |||
8506 | // Non-uniform loads will be selected to MUBUF instructions, so they | |||
8507 | // have the same legalization requirements as global and private | |||
8508 | // loads. | |||
8509 | // | |||
8510 | } | |||
8511 | ||||
8512 | if (AS == AMDGPUAS::CONSTANT_ADDRESS || | |||
8513 | AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT || | |||
8514 | AS == AMDGPUAS::GLOBAL_ADDRESS) { | |||
8515 | if (Subtarget->getScalarizeGlobalBehavior() && !Op->isDivergent() && | |||
8516 | Load->isSimple() && isMemOpHasNoClobberedMemOperand(Load) && | |||
8517 | Alignment >= 4 && NumElements < 32) { | |||
8518 | if (MemVT.isPow2VectorType()) | |||
8519 | return SDValue(); | |||
8520 | return WidenOrSplitVectorLoad(Op, DAG); | |||
8521 | } | |||
8522 | // Non-uniform loads will be selected to MUBUF instructions, so they | |||
8523 | // have the same legalization requirements as global and private | |||
8524 | // loads. | |||
8525 | // | |||
8526 | } | |||
8527 | if (AS == AMDGPUAS::CONSTANT_ADDRESS || | |||
8528 | AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT || | |||
8529 | AS == AMDGPUAS::GLOBAL_ADDRESS || | |||
8530 | AS == AMDGPUAS::FLAT_ADDRESS) { | |||
8531 | if (NumElements > 4) | |||
8532 | return SplitVectorLoad(Op, DAG); | |||
8533 | // v3 loads not supported on SI. | |||
8534 | if (NumElements == 3 && !Subtarget->hasDwordx3LoadStores()) | |||
8535 | return WidenOrSplitVectorLoad(Op, DAG); | |||
8536 | ||||
8537 | // v3 and v4 loads are supported for private and global memory. | |||
8538 | return SDValue(); | |||
8539 | } | |||
8540 | if (AS == AMDGPUAS::PRIVATE_ADDRESS) { | |||
8541 | // Depending on the setting of the private_element_size field in the | |||
8542 | // resource descriptor, we can only make private accesses up to a certain | |||
8543 | // size. | |||
8544 | switch (Subtarget->getMaxPrivateElementSize()) { | |||
8545 | case 4: { | |||
8546 | SDValue Ops[2]; | |||
8547 | std::tie(Ops[0], Ops[1]) = scalarizeVectorLoad(Load, DAG); | |||
8548 | return DAG.getMergeValues(Ops, DL); | |||
8549 | } | |||
8550 | case 8: | |||
8551 | if (NumElements > 2) | |||
8552 | return SplitVectorLoad(Op, DAG); | |||
8553 | return SDValue(); | |||
8554 | case 16: | |||
8555 | // Same as global/flat | |||
8556 | if (NumElements > 4) | |||
8557 | return SplitVectorLoad(Op, DAG); | |||
8558 | // v3 loads not supported on SI. | |||
8559 | if (NumElements == 3 && !Subtarget->hasDwordx3LoadStores()) | |||
8560 | return WidenOrSplitVectorLoad(Op, DAG); | |||
8561 | ||||
8562 | return SDValue(); | |||
8563 | default: | |||
8564 | llvm_unreachable("unsupported private_element_size")::llvm::llvm_unreachable_internal("unsupported private_element_size" , "llvm/lib/Target/AMDGPU/SIISelLowering.cpp", 8564); | |||
8565 | } | |||
8566 | } else if (AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::REGION_ADDRESS) { | |||
8567 | bool Fast = false; | |||
8568 | auto Flags = Load->getMemOperand()->getFlags(); | |||
8569 | if (allowsMisalignedMemoryAccessesImpl(MemVT.getSizeInBits(), AS, | |||
8570 | Load->getAlign(), Flags, &Fast) && | |||
8571 | Fast) | |||
8572 | return SDValue(); | |||
8573 | ||||
8574 | if (MemVT.isVector()) | |||
8575 | return SplitVectorLoad(Op, DAG); | |||
8576 | } | |||
8577 | ||||
8578 | if (!allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(), | |||
8579 | MemVT, *Load->getMemOperand())) { | |||
8580 | SDValue Ops[2]; | |||
8581 | std::tie(Ops[0], Ops[1]) = expandUnalignedLoad(Load, DAG); | |||
8582 | return DAG.getMergeValues(Ops, DL); | |||
8583 | } | |||
8584 | ||||
8585 | return SDValue(); | |||
8586 | } | |||
8587 | ||||
8588 | SDValue SITargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const { | |||
8589 | EVT VT = Op.getValueType(); | |||
8590 | if (VT.getSizeInBits() == 128) | |||
8591 | return splitTernaryVectorOp(Op, DAG); | |||
8592 | ||||
8593 | assert(VT.getSizeInBits() == 64)(static_cast <bool> (VT.getSizeInBits() == 64) ? void ( 0) : __assert_fail ("VT.getSizeInBits() == 64", "llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 8593, __extension__ __PRETTY_FUNCTION__)); | |||
8594 | ||||
8595 | SDLoc DL(Op); | |||
8596 | SDValue Cond = Op.getOperand(0); | |||
8597 | ||||
8598 | SDValue Zero = DAG.getConstant(0, DL, MVT::i32); | |||
8599 | SDValue One = DAG.getConstant(1, DL, MVT::i32); | |||
8600 | ||||
8601 | SDValue LHS = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32, Op.getOperand(1)); | |||
8602 | SDValue RHS = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32, Op.getOperand(2)); | |||
8603 | ||||
8604 | SDValue Lo0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, LHS, Zero); | |||
8605 | SDValue Lo1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, RHS, Zero); | |||
8606 | ||||
8607 | SDValue Lo = DAG.getSelect(DL, MVT::i32, Cond, Lo0, Lo1); | |||
8608 | ||||
8609 | SDValue Hi0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, LHS, One); | |||
8610 | SDValue Hi1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, RHS, One); | |||
8611 | ||||
8612 | SDValue Hi = DAG.getSelect(DL, MVT::i32, Cond, Hi0, Hi1); | |||
8613 | ||||
8614 | SDValue Res = DAG.getBuildVector(MVT::v2i32, DL, {Lo, Hi}); | |||
8615 | return DAG.getNode(ISD::BITCAST, DL, VT, Res); | |||
8616 | } | |||
8617 | ||||
8618 | // Catch division cases where we can use shortcuts with rcp and rsq | |||
8619 | // instructions. | |||
8620 | SDValue SITargetLowering::lowerFastUnsafeFDIV(SDValue Op, | |||
8621 | SelectionDAG &DAG) const { | |||
8622 | SDLoc SL(Op); | |||
8623 | SDValue LHS = Op.getOperand(0); | |||
8624 | SDValue RHS = Op.getOperand(1); | |||
8625 | EVT VT = Op.getValueType(); | |||
8626 | const SDNodeFlags Flags = Op->getFlags(); | |||
8627 | ||||
8628 | bool AllowInaccurateRcp = Flags.hasApproximateFuncs(); | |||
8629 | ||||
8630 | // Without !fpmath accuracy information, we can't do more because we don't | |||
8631 | // know exactly whether rcp is accurate enough to meet !fpmath requirement. | |||
8632 | if (!AllowInaccurateRcp) | |||
8633 | return SDValue(); | |||
8634 | ||||
8635 | if (const ConstantFPSDNode *CLHS = dyn_cast<ConstantFPSDNode>(LHS)) { | |||
8636 | if (CLHS->isExactlyValue(1.0)) { | |||
8637 | // v_rcp_f32 and v_rsq_f32 do not support denormals, and according to | |||
8638 | // the CI documentation has a worst case error of 1 ulp. | |||
8639 | // OpenCL requires <= 2.5 ulp for 1.0 / x, so it should always be OK to | |||
8640 | // use it as long as we aren't trying to use denormals. | |||
8641 | // | |||
8642 | // v_rcp_f16 and v_rsq_f16 DO support denormals. | |||
8643 | ||||
8644 | // 1.0 / sqrt(x) -> rsq(x) | |||
8645 | ||||
8646 | // XXX - Is UnsafeFPMath sufficient to do this for f64? The maximum ULP | |||
8647 | // error seems really high at 2^29 ULP. | |||
8648 | if (RHS.getOpcode() == ISD::FSQRT) | |||
8649 | return DAG.getNode(AMDGPUISD::RSQ, SL, VT, RHS.getOperand(0)); | |||
8650 | ||||
8651 | // 1.0 / x -> rcp(x) | |||
8652 | return DAG.getNode(AMDGPUISD::RCP, SL, VT, RHS); | |||
8653 | } | |||
8654 | ||||
8655 | // Same as for 1.0, but expand the sign out of the constant. | |||
8656 | if (CLHS->isExactlyValue(-1.0)) { | |||
8657 | // -1.0 / x -> rcp (fneg x) | |||
8658 | SDValue FNegRHS = DAG.getNode(ISD::FNEG, SL, VT, RHS); | |||
8659 | return DAG.getNode(AMDGPUISD::RCP, SL, VT, FNegRHS); | |||
8660 | } | |||
8661 | } | |||
8662 | ||||
8663 | // Turn into multiply by the reciprocal. | |||
8664 | // x / y -> x * (1.0 / y) | |||
8665 | SDValue Recip = DAG.getNode(AMDGPUISD::RCP, SL, VT, RHS); | |||
8666 | return DAG.getNode(ISD::FMUL, SL, VT, LHS, Recip, Flags); | |||
8667 | } | |||
8668 | ||||
8669 | SDValue SITargetLowering::lowerFastUnsafeFDIV64(SDValue Op, | |||
8670 | SelectionDAG &DAG) const { | |||
8671 | SDLoc SL(Op); | |||
8672 | SDValue X = Op.getOperand(0); | |||
8673 | SDValue Y = Op.getOperand(1); | |||
8674 | EVT VT = Op.getValueType(); | |||
8675 | const SDNodeFlags Flags = Op->getFlags(); | |||
8676 | ||||
8677 | bool AllowInaccurateDiv = Flags.hasApproximateFuncs() || | |||
8678 | DAG.getTarget().Options.UnsafeFPMath; | |||
8679 | if (!AllowInaccurateDiv) | |||
8680 | return SDValue(); | |||
8681 | ||||
8682 | SDValue NegY = DAG.getNode(ISD::FNEG, SL, VT, Y); | |||
8683 | SDValue One = DAG.getConstantFP(1.0, SL, VT); | |||
8684 | ||||
8685 | SDValue R = DAG.getNode(AMDGPUISD::RCP, SL, VT, Y); | |||
8686 | SDValue Tmp0 = DAG.getNode(ISD::FMA, SL, VT, NegY, R, One); | |||
8687 | ||||
8688 | R = DAG.getNode(ISD::FMA, SL, VT, Tmp0, R, R); | |||
8689 | SDValue Tmp1 = DAG.getNode(ISD::FMA, SL, VT, NegY, R, One); | |||
8690 | R = DAG.getNode(ISD::FMA, SL, VT, Tmp1, R, R); | |||
8691 | SDValue Ret = DAG.getNode(ISD::FMUL, SL, VT, X, R); | |||
8692 | SDValue Tmp2 = DAG.getNode(ISD::FMA, SL, VT, NegY, Ret, X); | |||
8693 | return DAG.getNode(ISD::FMA, SL, VT, Tmp2, R, Ret); | |||
8694 | } | |||
8695 | ||||
8696 | static SDValue getFPBinOp(SelectionDAG &DAG, unsigned Opcode, const SDLoc &SL, | |||
8697 | EVT VT, SDValue A, SDValue B, SDValue GlueChain, | |||
8698 | SDNodeFlags Flags) { | |||
8699 | if (GlueChain->getNumValues() <= 1) { | |||
8700 | return DAG.getNode(Opcode, SL, VT, A, B, Flags); | |||
8701 | } | |||
8702 | ||||
8703 | assert(GlueChain->getNumValues() == 3)(static_cast <bool> (GlueChain->getNumValues() == 3) ? void (0) : __assert_fail ("GlueChain->getNumValues() == 3" , "llvm/lib/Target/AMDGPU/SIISelLowering.cpp", 8703, __extension__ __PRETTY_FUNCTION__)); | |||
8704 | ||||
8705 | SDVTList VTList = DAG.getVTList(VT, MVT::Other, MVT::Glue); | |||
8706 | switch (Opcode) { | |||
8707 | default: llvm_unreachable("no chain equivalent for opcode")::llvm::llvm_unreachable_internal("no chain equivalent for opcode" , "llvm/lib/Target/AMDGPU/SIISelLowering.cpp", 8707); | |||
8708 | case ISD::FMUL: | |||
8709 | Opcode = AMDGPUISD::FMUL_W_CHAIN; | |||
8710 | break; | |||
8711 | } | |||
8712 | ||||
8713 | return DAG.getNode(Opcode, SL, VTList, | |||
8714 | {GlueChain.getValue(1), A, B, GlueChain.getValue(2)}, | |||
8715 | Flags); | |||
8716 | } | |||
8717 | ||||
8718 | static SDValue getFPTernOp(SelectionDAG &DAG, unsigned Opcode, const SDLoc &SL, | |||
8719 | EVT VT, SDValue A, SDValue B, SDValue C, | |||
8720 | SDValue GlueChain, SDNodeFlags Flags) { | |||
8721 | if (GlueChain->getNumValues() <= 1) { | |||
8722 | return DAG.getNode(Opcode, SL, VT, {A, B, C}, Flags); | |||
8723 | } | |||
8724 | ||||
8725 | assert(GlueChain->getNumValues() == 3)(static_cast <bool> (GlueChain->getNumValues() == 3) ? void (0) : __assert_fail ("GlueChain->getNumValues() == 3" , "llvm/lib/Target/AMDGPU/SIISelLowering.cpp", 8725, __extension__ __PRETTY_FUNCTION__)); | |||
8726 | ||||
8727 | SDVTList VTList = DAG.getVTList(VT, MVT::Other, MVT::Glue); | |||
8728 | switch (Opcode) { | |||
8729 | default: llvm_unreachable("no chain equivalent for opcode")::llvm::llvm_unreachable_internal("no chain equivalent for opcode" , "llvm/lib/Target/AMDGPU/SIISelLowering.cpp", 8729); | |||
8730 | case ISD::FMA: | |||
8731 | Opcode = AMDGPUISD::FMA_W_CHAIN; | |||
8732 | break; | |||
8733 | } | |||
8734 | ||||
8735 | return DAG.getNode(Opcode, SL, VTList, | |||
8736 | {GlueChain.getValue(1), A, B, C, GlueChain.getValue(2)}, | |||
8737 | Flags); | |||
8738 | } | |||
8739 | ||||
8740 | SDValue SITargetLowering::LowerFDIV16(SDValue Op, SelectionDAG &DAG) const { | |||
8741 | if (SDValue FastLowered = lowerFastUnsafeFDIV(Op, DAG)) | |||
8742 | return FastLowered; | |||
8743 | ||||
8744 | SDLoc SL(Op); | |||
8745 | SDValue Src0 = Op.getOperand(0); | |||
8746 | SDValue Src1 = Op.getOperand(1); | |||
8747 | ||||
8748 | SDValue CvtSrc0 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src0); | |||
8749 | SDValue CvtSrc1 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src1); | |||
8750 | ||||
8751 | SDValue RcpSrc1 = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32, CvtSrc1); | |||
8752 | SDValue Quot = DAG.getNode(ISD::FMUL, SL, MVT::f32, CvtSrc0, RcpSrc1); | |||
8753 | ||||
8754 | SDValue FPRoundFlag = DAG.getTargetConstant(0, SL, MVT::i32); | |||
8755 | SDValue BestQuot = DAG.getNode(ISD::FP_ROUND, SL, MVT::f16, Quot, FPRoundFlag); | |||
8756 | ||||
8757 | return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f16, BestQuot, Src1, Src0); | |||
8758 | } | |||
8759 | ||||
8760 | // Faster 2.5 ULP division that does not support denormals. | |||
8761 | SDValue SITargetLowering::lowerFDIV_FAST(SDValue Op, SelectionDAG &DAG) const { | |||
8762 | SDLoc SL(Op); | |||
8763 | SDValue LHS = Op.getOperand(1); | |||
8764 | SDValue RHS = Op.getOperand(2); | |||
8765 | ||||
8766 | SDValue r1 = DAG.getNode(ISD::FABS, SL, MVT::f32, RHS); | |||
8767 | ||||
8768 | const APFloat K0Val(BitsToFloat(0x6f800000)); | |||
8769 | const SDValue K0 = DAG.getConstantFP(K0Val, SL, MVT::f32); | |||
8770 | ||||
8771 | const APFloat K1Val(BitsToFloat(0x2f800000)); | |||
8772 | const SDValue K1 = DAG.getConstantFP(K1Val, SL, MVT::f32); | |||
8773 | ||||
8774 | const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f32); | |||
8775 | ||||
8776 | EVT SetCCVT = | |||
8777 | getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f32); | |||
8778 | ||||
8779 | SDValue r2 = DAG.getSetCC(SL, SetCCVT, r1, K0, ISD::SETOGT); | |||
8780 | ||||
8781 | SDValue r3 = DAG.getNode(ISD::SELECT, SL, MVT::f32, r2, K1, One); | |||
8782 | ||||
8783 | // TODO: Should this propagate fast-math-flags? | |||
8784 | r1 = DAG.getNode(ISD::FMUL, SL, MVT::f32, RHS, r3); | |||
8785 | ||||
8786 | // rcp does not support denormals. | |||
8787 | SDValue r0 = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32, r1); | |||
8788 | ||||
8789 | SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f32, LHS, r0); | |||
8790 | ||||
8791 | return DAG.getNode(ISD::FMUL, SL, MVT::f32, r3, Mul); | |||
8792 | } | |||
8793 | ||||
8794 | // Returns immediate value for setting the F32 denorm mode when using the | |||
8795 | // S_DENORM_MODE instruction. | |||
8796 | static SDValue getSPDenormModeValue(int SPDenormMode, SelectionDAG &DAG, | |||
8797 | const SDLoc &SL, const GCNSubtarget *ST) { | |||
8798 | assert(ST->hasDenormModeInst() && "Requires S_DENORM_MODE")(static_cast <bool> (ST->hasDenormModeInst() && "Requires S_DENORM_MODE") ? void (0) : __assert_fail ("ST->hasDenormModeInst() && \"Requires S_DENORM_MODE\"" , "llvm/lib/Target/AMDGPU/SIISelLowering.cpp", 8798, __extension__ __PRETTY_FUNCTION__)); | |||
8799 | int DPDenormModeDefault = hasFP64FP16Denormals(DAG.getMachineFunction()) | |||
8800 | ? FP_DENORM_FLUSH_NONE3 | |||
8801 | : FP_DENORM_FLUSH_IN_FLUSH_OUT0; | |||
8802 | ||||
8803 | int Mode = SPDenormMode | (DPDenormModeDefault << 2); | |||
8804 | return DAG.getTargetConstant(Mode, SL, MVT::i32); | |||
8805 | } | |||
8806 | ||||
8807 | SDValue SITargetLowering::LowerFDIV32(SDValue Op, SelectionDAG &DAG) const { | |||
8808 | if (SDValue FastLowered = lowerFastUnsafeFDIV(Op, DAG)) | |||
8809 | return FastLowered; | |||
8810 | ||||
8811 | // The selection matcher assumes anything with a chain selecting to a | |||
8812 | // mayRaiseFPException machine instruction. Since we're introducing a chain | |||
8813 | // here, we need to explicitly report nofpexcept for the regular fdiv | |||
8814 | // lowering. | |||
8815 | SDNodeFlags Flags = Op->getFlags(); | |||
8816 | Flags.setNoFPExcept(true); | |||
8817 | ||||
8818 | SDLoc SL(Op); | |||
8819 | SDValue LHS = Op.getOperand(0); | |||
8820 | SDValue RHS = Op.getOperand(1); | |||
8821 | ||||
8822 | const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f32); | |||
8823 | ||||
8824 | SDVTList ScaleVT = DAG.getVTList(MVT::f32, MVT::i1); | |||
8825 | ||||
8826 | SDValue DenominatorScaled = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, | |||
8827 | {RHS, RHS, LHS}, Flags); | |||
8828 | SDValue NumeratorScaled = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, | |||
8829 | {LHS, RHS, LHS}, Flags); | |||
8830 | ||||
8831 | // Denominator is scaled to not be denormal, so using rcp is ok. | |||
8832 | SDValue ApproxRcp = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32, | |||
8833 | DenominatorScaled, Flags); | |||
8834 | SDValue NegDivScale0 = DAG.getNode(ISD::FNEG, SL, MVT::f32, | |||
8835 | DenominatorScaled, Flags); | |||
8836 | ||||
8837 | const unsigned Denorm32Reg = AMDGPU::Hwreg::ID_MODE | | |||
8838 | (4 << AMDGPU::Hwreg::OFFSET_SHIFT_) | | |||
8839 | (1 << AMDGPU::Hwreg::WIDTH_M1_SHIFT_); | |||
8840 | const SDValue BitField = DAG.getTargetConstant(Denorm32Reg, SL, MVT::i32); | |||
8841 | ||||
8842 | const bool HasFP32Denormals = hasFP32Denormals(DAG.getMachineFunction()); | |||
8843 | ||||
8844 | if (!HasFP32Denormals) { | |||
8845 | // Note we can't use the STRICT_FMA/STRICT_FMUL for the non-strict FDIV | |||
8846 | // lowering. The chain dependence is insufficient, and we need glue. We do | |||
8847 | // not need the glue variants in a strictfp function. | |||
8848 | ||||
8849 | SDVTList BindParamVTs = DAG.getVTList(MVT::Other, MVT::Glue); | |||
8850 | ||||
8851 | SDNode *EnableDenorm; | |||
8852 | if (Subtarget->hasDenormModeInst()) { | |||
8853 | const SDValue EnableDenormValue = | |||
8854 | getSPDenormModeValue(FP_DENORM_FLUSH_NONE3, DAG, SL, Subtarget); | |||
8855 | ||||
8856 | EnableDenorm = DAG.getNode(AMDGPUISD::DENORM_MODE, SL, BindParamVTs, | |||
8857 | DAG.getEntryNode(), EnableDenormValue).getNode(); | |||
8858 | } else { | |||
8859 | const SDValue EnableDenormValue = DAG.getConstant(FP_DENORM_FLUSH_NONE3, | |||
8860 | SL, MVT::i32); | |||
8861 | EnableDenorm = | |||
8862 | DAG.getMachineNode(AMDGPU::S_SETREG_B32, SL, BindParamVTs, | |||
8863 | {EnableDenormValue, BitField, DAG.getEntryNode()}); | |||
8864 | } | |||
8865 | ||||
8866 | SDValue Ops[3] = { | |||
8867 | NegDivScale0, | |||
8868 | SDValue(EnableDenorm, 0), | |||
8869 | SDValue(EnableDenorm, 1) | |||
8870 | }; | |||
8871 | ||||
8872 | NegDivScale0 = DAG.getMergeValues(Ops, SL); | |||
8873 | } | |||
8874 | ||||
8875 | SDValue Fma0 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, NegDivScale0, | |||
8876 | ApproxRcp, One, NegDivScale0, Flags); | |||
8877 | ||||
8878 | SDValue Fma1 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, Fma0, ApproxRcp, | |||
8879 | ApproxRcp, Fma0, Flags); | |||
8880 | ||||
8881 | SDValue Mul = getFPBinOp(DAG, ISD::FMUL, SL, MVT::f32, NumeratorScaled, | |||
8882 | Fma1, Fma1, Flags); | |||
8883 | ||||
8884 | SDValue Fma2 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, NegDivScale0, Mul, | |||
8885 | NumeratorScaled, Mul, Flags); | |||
8886 | ||||
8887 | SDValue Fma3 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, | |||
8888 | Fma2, Fma1, Mul, Fma2, Flags); | |||
8889 | ||||
8890 | SDValue Fma4 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, NegDivScale0, Fma3, | |||
8891 | NumeratorScaled, Fma3, Flags); | |||
8892 | ||||
8893 | if (!HasFP32Denormals) { | |||
8894 | SDNode *DisableDenorm; | |||
8895 | if (Subtarget->hasDenormModeInst()) { | |||
8896 | const SDValue DisableDenormValue = | |||
8897 | getSPDenormModeValue(FP_DENORM_FLUSH_IN_FLUSH_OUT0, DAG, SL, Subtarget); | |||
8898 | ||||
8899 | DisableDenorm = DAG.getNode(AMDGPUISD::DENORM_MODE, SL, MVT::Other, | |||
8900 | Fma4.getValue(1), DisableDenormValue, | |||
8901 | Fma4.getValue(2)).getNode(); | |||
8902 | } else { | |||
8903 | const SDValue DisableDenormValue = | |||
8904 | DAG.getConstant(FP_DENORM_FLUSH_IN_FLUSH_OUT0, SL, MVT::i32); | |||
8905 | ||||
8906 | DisableDenorm = DAG.getMachineNode( | |||
8907 | AMDGPU::S_SETREG_B32, SL, MVT::Other, | |||
8908 | {DisableDenormValue, BitField, Fma4.getValue(1), Fma4.getValue(2)}); | |||
8909 | } | |||
8910 | ||||
8911 | SDValue OutputChain = DAG.getNode(ISD::TokenFactor, SL, MVT::Other, | |||
8912 | SDValue(DisableDenorm, 0), DAG.getRoot()); | |||
8913 | DAG.setRoot(OutputChain); | |||
8914 | } | |||
8915 | ||||
8916 | SDValue Scale = NumeratorScaled.getValue(1); | |||
8917 | SDValue Fmas = DAG.getNode(AMDGPUISD::DIV_FMAS, SL, MVT::f32, | |||
8918 | {Fma4, Fma1, Fma3, Scale}, Flags); | |||
8919 | ||||
8920 | return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f32, Fmas, RHS, LHS, Flags); | |||
8921 | } | |||
8922 | ||||
8923 | SDValue SITargetLowering::LowerFDIV64(SDValue Op, SelectionDAG &DAG) const { | |||
8924 | if (SDValue FastLowered = lowerFastUnsafeFDIV64(Op, DAG)) | |||
8925 | return FastLowered; | |||
8926 | ||||
8927 | SDLoc SL(Op); | |||
8928 | SDValue X = Op.getOperand(0); | |||
8929 | SDValue Y = Op.getOperand(1); | |||
8930 | ||||
8931 | const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f64); | |||
8932 | ||||
8933 | SDVTList ScaleVT = DAG.getVTList(MVT::f64, MVT::i1); | |||
8934 | ||||
8935 | SDValue DivScale0 = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, Y, Y, X); | |||
8936 | ||||
8937 | SDValue NegDivScale0 = DAG.getNode(ISD::FNEG, SL, MVT::f64, DivScale0); | |||
8938 | ||||
8939 | SDValue Rcp = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f64, DivScale0); | |||
8940 | ||||
8941 | SDValue Fma0 = DAG.getNode(ISD::FMA, SL, MVT::f64, NegDivScale0, Rcp, One); | |||
8942 | ||||
8943 | SDValue Fma1 = DAG.getNode(ISD::FMA, SL, MVT::f64, Rcp, Fma0, Rcp); | |||
8944 | ||||
8945 | SDValue Fma2 = DAG.getNode(ISD::FMA, SL, MVT::f64, NegDivScale0, Fma1, One); | |||
8946 | ||||
8947 | SDValue DivScale1 = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, X, Y, X); | |||
8948 | ||||
8949 | SDValue Fma3 = DAG.getNode(ISD::FMA, SL, MVT::f64, Fma1, Fma2, Fma1); | |||
8950 | SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f64, DivScale1, Fma3); | |||
8951 | ||||
8952 | SDValue Fma4 = DAG.getNode(ISD::FMA, SL, MVT::f64, | |||
8953 | NegDivScale0, Mul, DivScale1); | |||
8954 | ||||
8955 | SDValue Scale; | |||
8956 | ||||
8957 | if (!Subtarget->hasUsableDivScaleConditionOutput()) { | |||
8958 | // Workaround a hardware bug on SI where the condition output from div_scale | |||
8959 | // is not usable. | |||
8960 | ||||
8961 | const SDValue Hi = DAG.getConstant(1, SL, MVT::i32); | |||
8962 | ||||
8963 | // Figure out if the scale to use for div_fmas. | |||
8964 | SDValue NumBC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, X); | |||
8965 | SDValue DenBC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Y); | |||
8966 | SDValue Scale0BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, DivScale0); | |||
8967 | SDValue Scale1BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, DivScale1); | |||
8968 | ||||
8969 | SDValue NumHi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, NumBC, Hi); | |||
8970 | SDValue DenHi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, DenBC, Hi); | |||
8971 | ||||
8972 | SDValue Scale0Hi | |||
8973 | = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Scale0BC, Hi); | |||
8974 | SDValue Scale1Hi | |||
8975 | = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Scale1BC, Hi); | |||
8976 | ||||
8977 | SDValue CmpDen = DAG.getSetCC(SL, MVT::i1, DenHi, Scale0Hi, ISD::SETEQ); | |||
8978 | SDValue CmpNum = DAG.getSetCC(SL, MVT::i1, NumHi, Scale1Hi, ISD::SETEQ); | |||
8979 | Scale = DAG.getNode(ISD::XOR, SL, MVT::i1, CmpNum, CmpDen); | |||
8980 | } else { | |||
8981 | Scale = DivScale1.getValue(1); | |||
8982 | } | |||
8983 | ||||
8984 | SDValue Fmas = DAG.getNode(AMDGPUISD::DIV_FMAS, SL, MVT::f64, | |||
8985 | Fma4, Fma3, Mul, Scale); | |||
8986 | ||||
8987 | return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f64, Fmas, Y, X); | |||
8988 | } | |||
8989 | ||||
8990 | SDValue SITargetLowering::LowerFDIV(SDValue Op, SelectionDAG &DAG) const { | |||
8991 | EVT VT = Op.getValueType(); | |||
8992 | ||||
8993 | if (VT == MVT::f32) | |||
8994 | return LowerFDIV32(Op, DAG); | |||
8995 | ||||
8996 | if (VT == MVT::f64) | |||
8997 | return LowerFDIV64(Op, DAG); | |||
8998 | ||||
8999 | if (VT == MVT::f16) | |||
9000 | return LowerFDIV16(Op, DAG); | |||
9001 | ||||
9002 | llvm_unreachable("Unexpected type for fdiv")::llvm::llvm_unreachable_internal("Unexpected type for fdiv", "llvm/lib/Target/AMDGPU/SIISelLowering.cpp", 9002); | |||
9003 | } | |||
9004 | ||||
9005 | SDValue SITargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const { | |||
9006 | SDLoc DL(Op); | |||
9007 | StoreSDNode *Store = cast<StoreSDNode>(Op); | |||
9008 | EVT VT = Store->getMemoryVT(); | |||
9009 | ||||
9010 | if (VT == MVT::i1) { | |||
9011 | return DAG.getTruncStore(Store->getChain(), DL, | |||
9012 | DAG.getSExtOrTrunc(Store->getValue(), DL, MVT::i32), | |||
9013 | Store->getBasePtr(), MVT::i1, Store->getMemOperand()); | |||
9014 | } | |||
9015 | ||||
9016 | assert(VT.isVector() &&(static_cast <bool> (VT.isVector() && Store-> getValue().getValueType().getScalarType() == MVT::i32) ? void (0) : __assert_fail ("VT.isVector() && Store->getValue().getValueType().getScalarType() == MVT::i32" , "llvm/lib/Target/AMDGPU/SIISelLowering.cpp", 9017, __extension__ __PRETTY_FUNCTION__)) | |||
9017 | Store->getValue().getValueType().getScalarType() == MVT::i32)(static_cast <bool> (VT.isVector() && Store-> getValue().getValueType().getScalarType() == MVT::i32) ? void (0) : __assert_fail ("VT.isVector() && Store->getValue().getValueType().getScalarType() == MVT::i32" , "llvm/lib/Target/AMDGPU/SIISelLowering.cpp", 9017, __extension__ __PRETTY_FUNCTION__)); | |||
9018 | ||||
9019 | unsigned AS = Store->getAddressSpace(); | |||
9020 | if (Subtarget->hasLDSMisalignedBug() && | |||
9021 | AS == AMDGPUAS::FLAT_ADDRESS && | |||
9022 | Store->getAlignment() < VT.getStoreSize() && VT.getSizeInBits() > 32) { | |||
9023 | return SplitVectorStore(Op, DAG); | |||
9024 | } | |||
9025 | ||||
9026 | MachineFunction &MF = DAG.getMachineFunction(); | |||
9027 | SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); | |||
9028 | // If there is a possibility that flat instruction access scratch memory | |||
9029 | // then we need to use the same legalization rules we use for private. | |||
9030 | if (AS == AMDGPUAS::FLAT_ADDRESS && | |||
9031 | !Subtarget->hasMultiDwordFlatScratchAddressing()) | |||
9032 | AS = MFI->hasFlatScratchInit() ? | |||
9033 | AMDGPUAS::PRIVATE_ADDRESS : AMDGPUAS::GLOBAL_ADDRESS; | |||
9034 | ||||
9035 | unsigned NumElements = VT.getVectorNumElements(); | |||
9036 | if (AS == AMDGPUAS::GLOBAL_ADDRESS || | |||
9037 | AS == AMDGPUAS::FLAT_ADDRESS) { | |||
9038 | if (NumElements > 4) | |||
9039 | return SplitVectorStore(Op, DAG); | |||
9040 | // v3 stores not supported on SI. | |||
9041 | if (NumElements == 3 && !Subtarget->hasDwordx3LoadStores()) | |||
9042 | return SplitVectorStore(Op, DAG); | |||
9043 | ||||
9044 | if (!allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(), | |||
9045 | VT, *Store->getMemOperand())) | |||
9046 | return expandUnalignedStore(Store, DAG); | |||
9047 | ||||
9048 | return SDValue(); | |||
9049 | } else if (AS == AMDGPUAS::PRIVATE_ADDRESS) { | |||
9050 | switch (Subtarget->getMaxPrivateElementSize()) { | |||
9051 | case 4: | |||
9052 | return scalarizeVectorStore(Store, DAG); | |||
9053 | case 8: | |||
9054 | if (NumElements > 2) | |||
9055 | return SplitVectorStore(Op, DAG); | |||
9056 | return SDValue(); | |||
9057 | case 16: | |||
9058 | if (NumElements > 4 || | |||
9059 | (NumElements == 3 && !Subtarget->enableFlatScratch())) | |||
9060 | return SplitVectorStore(Op, DAG); | |||
9061 | return SDValue(); | |||
9062 | default: | |||
9063 | llvm_unreachable("unsupported private_element_size")::llvm::llvm_unreachable_internal("unsupported private_element_size" , "llvm/lib/Target/AMDGPU/SIISelLowering.cpp", 9063); | |||
9064 | } | |||
9065 | } else if (AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::REGION_ADDRESS) { | |||
9066 | bool Fast = false; | |||
9067 | auto Flags = Store->getMemOperand()->getFlags(); | |||
9068 | if (allowsMisalignedMemoryAccessesImpl(VT.getSizeInBits(), AS, | |||
9069 | Store->getAlign(), Flags, &Fast) && | |||
9070 | Fast) | |||
9071 | return SDValue(); | |||
9072 | ||||
9073 | if (VT.isVector()) | |||
9074 | return SplitVectorStore(Op, DAG); | |||
9075 | ||||
9076 | return expandUnalignedStore(Store, DAG); | |||
9077 | } | |||
9078 | ||||
9079 | // Probably an invalid store. If so we'll end up emitting a selection error. | |||
9080 | return SDValue(); | |||
9081 | } | |||
9082 | ||||
9083 | SDValue SITargetLowering::LowerTrig(SDValue Op, SelectionDAG &DAG) const { | |||
9084 | SDLoc DL(Op); | |||
9085 | EVT VT = Op.getValueType(); | |||
9086 | SDValue Arg = Op.getOperand(0); | |||
9087 | SDValue TrigVal; | |||
9088 | ||||
9089 | // Propagate fast-math flags so that the multiply we introduce can be folded | |||
9090 | // if Arg is already the result of a multiply by constant. | |||
9091 | auto Flags = Op->getFlags(); | |||
9092 | ||||
9093 | SDValue OneOver2Pi = DAG.getConstantFP(0.5 * numbers::inv_pi, DL, VT); | |||
9094 | ||||
9095 | if (Subtarget->hasTrigReducedRange()) { | |||
9096 | SDValue MulVal = DAG.getNode(ISD::FMUL, DL, VT, Arg, OneOver2Pi, Flags); | |||
9097 | TrigVal = DAG.getNode(AMDGPUISD::FRACT, DL, VT, MulVal, Flags); | |||
9098 | } else { | |||
9099 | TrigVal = DAG.getNode(ISD::FMUL, DL, VT, Arg, OneOver2Pi, Flags); | |||
9100 | } | |||
9101 | ||||
9102 | switch (Op.getOpcode()) { | |||
9103 | case ISD::FCOS: | |||
9104 | return DAG.getNode(AMDGPUISD::COS_HW, SDLoc(Op), VT, TrigVal, Flags); | |||
9105 | case ISD::FSIN: | |||
9106 | return DAG.getNode(AMDGPUISD::SIN_HW, SDLoc(Op), VT, TrigVal, Flags); | |||
9107 | default: | |||
9108 | llvm_unreachable("Wrong trig opcode")::llvm::llvm_unreachable_internal("Wrong trig opcode", "llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 9108); | |||
9109 | } | |||
9110 | } | |||
9111 | ||||
9112 | SDValue SITargetLowering::LowerATOMIC_CMP_SWAP(SDValue Op, SelectionDAG &DAG) const { | |||
9113 | AtomicSDNode *AtomicNode = cast<AtomicSDNode>(Op); | |||
9114 | assert(AtomicNode->isCompareAndSwap())(static_cast <bool> (AtomicNode->isCompareAndSwap()) ? void (0) : __assert_fail ("AtomicNode->isCompareAndSwap()" , "llvm/lib/Target/AMDGPU/SIISelLowering.cpp", 9114, __extension__ __PRETTY_FUNCTION__)); | |||
9115 | unsigned AS = AtomicNode->getAddressSpace(); | |||
9116 | ||||
9117 | // No custom lowering required for local address space | |||
9118 | if (!AMDGPU::isFlatGlobalAddrSpace(AS)) | |||
9119 | return Op; | |||
9120 | ||||
9121 | // Non-local address space requires custom lowering for atomic compare | |||
9122 | // and swap; cmp and swap should be in a v2i32 or v2i64 in case of _X2 | |||
9123 | SDLoc DL(Op); | |||
9124 | SDValue ChainIn = Op.getOperand(0); | |||
9125 | SDValue Addr = Op.getOperand(1); | |||
9126 | SDValue Old = Op.getOperand(2); | |||
9127 | SDValue New = Op.getOperand(3); | |||
9128 | EVT VT = Op.getValueType(); | |||
9129 | MVT SimpleVT = VT.getSimpleVT(); | |||
9130 | MVT VecType = MVT::getVectorVT(SimpleVT, 2); | |||
9131 | ||||
9132 | SDValue NewOld = DAG.getBuildVector(VecType, DL, {New, Old}); | |||
9133 | SDValue Ops[] = { ChainIn, Addr, NewOld }; | |||
9134 | ||||
9135 | return DAG.getMemIntrinsicNode(AMDGPUISD::ATOMIC_CMP_SWAP, DL, Op->getVTList(), | |||
9136 | Ops, VT, AtomicNode->getMemOperand()); | |||
9137 | } | |||
9138 | ||||
9139 | //===----------------------------------------------------------------------===// | |||
9140 | // Custom DAG optimizations | |||
9141 | //===----------------------------------------------------------------------===// | |||
9142 | ||||
9143 | SDValue SITargetLowering::performUCharToFloatCombine(SDNode *N, | |||
9144 | DAGCombinerInfo &DCI) const { | |||
9145 | EVT VT = N->getValueType(0); | |||
9146 | EVT ScalarVT = VT.getScalarType(); | |||
9147 | if (ScalarVT != MVT::f32 && ScalarVT != MVT::f16) | |||
9148 | return SDValue(); | |||
9149 | ||||
9150 | SelectionDAG &DAG = DCI.DAG; | |||
9151 | SDLoc DL(N); | |||
9152 | ||||
9153 | SDValue Src = N->getOperand(0); | |||
9154 | EVT SrcVT = Src.getValueType(); | |||
9155 | ||||
9156 | // TODO: We could try to match extracting the higher bytes, which would be | |||
9157 | // easier if i8 vectors weren't promoted to i32 vectors, particularly after | |||
9158 | // types are legalized. v4i8 -> v4f32 is probably the only case to worry | |||
9159 | // about in practice. | |||
9160 | if (DCI.isAfterLegalizeDAG() && SrcVT == MVT::i32) { | |||
9161 | if (DAG.MaskedValueIsZero(Src, APInt::getHighBitsSet(32, 24))) { | |||
9162 | SDValue Cvt = DAG.getNode(AMDGPUISD::CVT_F32_UBYTE0, DL, MVT::f32, Src); | |||
9163 | DCI.AddToWorklist(Cvt.getNode()); | |||
9164 | ||||
9165 | // For the f16 case, fold to a cast to f32 and then cast back to f16. | |||
9166 | if (ScalarVT != MVT::f32) { | |||
9167 | Cvt = DAG.getNode(ISD::FP_ROUND, DL, VT, Cvt, | |||
9168 | DAG.getTargetConstant(0, DL, MVT::i32)); | |||
9169 | } | |||
9170 | return Cvt; | |||
9171 | } | |||
9172 | } | |||
9173 | ||||
9174 | return SDValue(); | |||
9175 | } | |||
9176 | ||||
9177 | // (shl (add x, c1), c2) -> add (shl x, c2), (shl c1, c2) | |||
9178 | ||||
9179 | // This is a variant of | |||
9180 | // (mul (add x, c1), c2) -> add (mul x, c2), (mul c1, c2), | |||
9181 | // | |||
9182 | // The normal DAG combiner will do this, but only if the add has one use since | |||
9183 | // that would increase the number of instructions. | |||
9184 | // | |||
9185 | // This prevents us from seeing a constant offset that can be folded into a | |||
9186 | // memory instruction's addressing mode. If we know the resulting add offset of | |||
9187 | // a pointer can be folded into an addressing offset, we can replace the pointer | |||
9188 | // operand with the add of new constant offset. This eliminates one of the uses, | |||
9189 | // and may allow the remaining use to also be simplified. | |||
9190 | // | |||
9191 | SDValue SITargetLowering::performSHLPtrCombine(SDNode *N, | |||
9192 | unsigned AddrSpace, | |||
9193 | EVT MemVT, | |||
9194 | DAGCombinerInfo &DCI) const { | |||
9195 | SDValue N0 = N->getOperand(0); | |||
9196 | SDValue N1 = N->getOperand(1); | |||
9197 | ||||
9198 | // We only do this to handle cases where it's profitable when there are | |||
9199 | // multiple uses of the add, so defer to the standard combine. | |||
9200 | if ((N0.getOpcode() != ISD::ADD && N0.getOpcode() != ISD::OR) || | |||
9201 | N0->hasOneUse()) | |||
9202 | return SDValue(); | |||
9203 | ||||
9204 | const ConstantSDNode *CN1 = dyn_cast<ConstantSDNode>(N1); | |||
9205 | if (!CN1) | |||
9206 | return SDValue(); | |||
9207 | ||||
9208 | const ConstantSDNode *CAdd = dyn_cast<ConstantSDNode>(N0.getOperand(1)); | |||
9209 | if (!CAdd) | |||
9210 | return SDValue(); | |||
9211 | ||||
9212 | // If the resulting offset is too large, we can't fold it into the addressing | |||
9213 | // mode offset. | |||
9214 | APInt Offset = CAdd->getAPIntValue() << CN1->getAPIntValue(); | |||
9215 | Type *Ty = MemVT.getTypeForEVT(*DCI.DAG.getContext()); | |||
9216 | ||||
9217 | AddrMode AM; | |||
9218 | AM.HasBaseReg = true; | |||
9219 | AM.BaseOffs = Offset.getSExtValue(); | |||
9220 | if (!isLegalAddressingMode(DCI.DAG.getDataLayout(), AM, Ty, AddrSpace)) | |||
9221 | return SDValue(); | |||
9222 | ||||
9223 | SelectionDAG &DAG = DCI.DAG; | |||
9224 | SDLoc SL(N); | |||
9225 | EVT VT = N->getValueType(0); | |||
9226 | ||||
9227 | SDValue ShlX = DAG.getNode(ISD::SHL, SL, VT, N0.getOperand(0), N1); | |||
9228 | SDValue COffset = DAG.getConstant(Offset, SL, VT); | |||
9229 | ||||
9230 | SDNodeFlags Flags; | |||
9231 | Flags.setNoUnsignedWrap(N->getFlags().hasNoUnsignedWrap() && | |||
9232 | (N0.getOpcode() == ISD::OR || | |||
9233 | N0->getFlags().hasNoUnsignedWrap())); | |||
9234 | ||||
9235 | return DAG.getNode(ISD::ADD, SL, VT, ShlX, COffset, Flags); | |||
9236 | } | |||
9237 | ||||
9238 | /// MemSDNode::getBasePtr() does not work for intrinsics, which needs to offset | |||
9239 | /// by the chain and intrinsic ID. Theoretically we would also need to check the | |||
9240 | /// specific intrinsic, but they all place the pointer operand first. | |||
9241 | static unsigned getBasePtrIndex(const MemSDNode *N) { | |||
9242 | switch (N->getOpcode()) { | |||
9243 | case ISD::STORE: | |||
9244 | case ISD::INTRINSIC_W_CHAIN: | |||
9245 | case ISD::INTRINSIC_VOID: | |||
9246 | return 2; | |||
9247 | default: | |||
9248 | return 1; | |||
9249 | } | |||
9250 | } | |||
9251 | ||||
9252 | SDValue SITargetLowering::performMemSDNodeCombine(MemSDNode *N, | |||
9253 | DAGCombinerInfo &DCI) const { | |||
9254 | SelectionDAG &DAG = DCI.DAG; | |||
9255 | SDLoc SL(N); | |||
9256 | ||||
9257 | unsigned PtrIdx = getBasePtrIndex(N); | |||
9258 | SDValue Ptr = N->getOperand(PtrIdx); | |||
9259 | ||||
9260 | // TODO: We could also do this for multiplies. | |||
9261 | if (Ptr.getOpcode() == ISD::SHL) { | |||
9262 | SDValue NewPtr = performSHLPtrCombine(Ptr.getNode(), N->getAddressSpace(), | |||
9263 | N->getMemoryVT(), DCI); | |||
9264 | if (NewPtr) { | |||
9265 | SmallVector<SDValue, 8> NewOps(N->op_begin(), N->op_end()); | |||
9266 | ||||
9267 | NewOps[PtrIdx] = NewPtr; | |||
9268 | return SDValue(DAG.UpdateNodeOperands(N, NewOps), 0); | |||
9269 | } | |||
9270 | } | |||
9271 | ||||
9272 | return SDValue(); | |||
9273 | } | |||
9274 | ||||
9275 | static bool bitOpWithConstantIsReducible(unsigned Opc, uint32_t Val) { | |||
9276 | return (Opc == ISD::AND && (Val == 0 || Val == 0xffffffff)) || | |||
9277 | (Opc == ISD::OR && (Val == 0xffffffff || Val == 0)) || | |||
9278 | (Opc == ISD::XOR && Val == 0); | |||
9279 | } | |||
9280 | ||||
9281 | // Break up 64-bit bit operation of a constant into two 32-bit and/or/xor. This | |||
9282 | // will typically happen anyway for a VALU 64-bit and. This exposes other 32-bit | |||
9283 | // integer combine opportunities since most 64-bit operations are decomposed | |||
9284 | // this way. TODO: We won't want this for SALU especially if it is an inline | |||
9285 | // immediate. | |||
9286 | SDValue SITargetLowering::splitBinaryBitConstantOp( | |||
9287 | DAGCombinerInfo &DCI, | |||
9288 | const SDLoc &SL, | |||
9289 | unsigned Opc, SDValue LHS, | |||
9290 | const ConstantSDNode *CRHS) const { | |||
9291 | uint64_t Val = CRHS->getZExtValue(); | |||
9292 | uint32_t ValLo = Lo_32(Val); | |||
9293 | uint32_t ValHi = Hi_32(Val); | |||
9294 | const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); | |||
9295 | ||||
9296 | if ((bitOpWithConstantIsReducible(Opc, ValLo) || | |||
9297 | bitOpWithConstantIsReducible(Opc, ValHi)) || | |||
9298 | (CRHS->hasOneUse() && !TII->isInlineConstant(CRHS->getAPIntValue()))) { | |||
9299 | // If we need to materialize a 64-bit immediate, it will be split up later | |||
9300 | // anyway. Avoid creating the harder to understand 64-bit immediate | |||
9301 | // materialization. | |||
9302 | return splitBinaryBitConstantOpImpl(DCI, SL, Opc, LHS, ValLo, ValHi); | |||
9303 | } | |||
9304 | ||||
9305 | return SDValue(); | |||
9306 | } | |||
9307 | ||||
9308 | // Returns true if argument is a boolean value which is not serialized into | |||
9309 | // memory or argument and does not require v_cndmask_b32 to be deserialized. | |||
9310 | static bool isBoolSGPR(SDValue V) { | |||
9311 | if (V.getValueType() != MVT::i1) | |||
9312 | return false; | |||
9313 | switch (V.getOpcode()) { | |||
9314 | default: | |||
9315 | break; | |||
9316 | case ISD::SETCC: | |||
9317 | case AMDGPUISD::FP_CLASS: | |||
9318 | return true; | |||
9319 | case ISD::AND: | |||
9320 | case ISD::OR: | |||
9321 | case ISD::XOR: | |||
9322 | return isBoolSGPR(V.getOperand(0)) && isBoolSGPR(V.getOperand(1)); | |||
9323 | } | |||
9324 | return false; | |||
9325 | } | |||
9326 | ||||
9327 | // If a constant has all zeroes or all ones within each byte return it. | |||
9328 | // Otherwise return 0. | |||
9329 | static uint32_t getConstantPermuteMask(uint32_t C) { | |||
9330 | // 0xff for any zero byte in the mask | |||
9331 | uint32_t ZeroByteMask = 0; | |||
9332 | if (!(C & 0x000000ff)) ZeroByteMask |= 0x000000ff; | |||
9333 | if (!(C & 0x0000ff00)) ZeroByteMask |= 0x0000ff00; | |||
9334 | if (!(C & 0x00ff0000)) ZeroByteMask |= 0x00ff0000; | |||
9335 | if (!(C & 0xff000000)) ZeroByteMask |= 0xff000000; | |||
9336 | uint32_t NonZeroByteMask = ~ZeroByteMask; // 0xff for any non-zero byte | |||
9337 | if ((NonZeroByteMask & C) != NonZeroByteMask) | |||
9338 | return 0; // Partial bytes selected. | |||
9339 | return C; | |||
9340 | } | |||
9341 | ||||
9342 | // Check if a node selects whole bytes from its operand 0 starting at a byte | |||
9343 | // boundary while masking the rest. Returns select mask as in the v_perm_b32 | |||
9344 | // or -1 if not succeeded. | |||
9345 | // Note byte select encoding: | |||
9346 | // value 0-3 selects corresponding source byte; | |||
9347 | // value 0xc selects zero; | |||
9348 | // value 0xff selects 0xff. | |||
9349 | static uint32_t getPermuteMask(SelectionDAG &DAG, SDValue V) { | |||
9350 | assert(V.getValueSizeInBits() == 32)(static_cast <bool> (V.getValueSizeInBits() == 32) ? void (0) : __assert_fail ("V.getValueSizeInBits() == 32", "llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 9350, __extension__ __PRETTY_FUNCTION__)); | |||
9351 | ||||
9352 | if (V.getNumOperands() != 2) | |||
9353 | return ~0; | |||
9354 | ||||
9355 | ConstantSDNode *N1 = dyn_cast<ConstantSDNode>(V.getOperand(1)); | |||
9356 | if (!N1) | |||
9357 | return ~0; | |||
9358 | ||||
9359 | uint32_t C = N1->getZExtValue(); | |||
9360 | ||||
9361 | switch (V.getOpcode()) { | |||
9362 | default: | |||
9363 | break; | |||
9364 | case ISD::AND: | |||
9365 | if (uint32_t ConstMask = getConstantPermuteMask(C)) { | |||
9366 | return (0x03020100 & ConstMask) | (0x0c0c0c0c & ~ConstMask); | |||
9367 | } | |||
9368 | break; | |||
9369 | ||||
9370 | case ISD::OR: | |||
9371 | if (uint32_t ConstMask = getConstantPermuteMask(C)) { | |||
9372 | return (0x03020100 & ~ConstMask) | ConstMask; | |||
9373 | } | |||
9374 | break; | |||
9375 | ||||
9376 | case ISD::SHL: | |||
9377 | if (C % 8) | |||
9378 | return ~0; | |||
9379 | ||||
9380 | return uint32_t((0x030201000c0c0c0cull << C) >> 32); | |||
9381 | ||||
9382 | case ISD::SRL: | |||
9383 | if (C % 8) | |||
9384 | return ~0; | |||
9385 | ||||
9386 | return uint32_t(0x0c0c0c0c03020100ull >> C); | |||
9387 | } | |||
9388 | ||||
9389 | return ~0; | |||
9390 | } | |||
9391 | ||||
9392 | SDValue SITargetLowering::performAndCombine(SDNode *N, | |||
9393 | DAGCombinerInfo &DCI) const { | |||
9394 | if (DCI.isBeforeLegalize()) | |||
9395 | return SDValue(); | |||
9396 | ||||
9397 | SelectionDAG &DAG = DCI.DAG; | |||
9398 | EVT VT = N->getValueType(0); | |||
9399 | SDValue LHS = N->getOperand(0); | |||
9400 | SDValue RHS = N->getOperand(1); | |||
9401 | ||||
9402 | ||||
9403 | const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS); | |||
9404 | if (VT == MVT::i64 && CRHS) { | |||
9405 | if (SDValue Split | |||
9406 | = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::AND, LHS, CRHS)) | |||
9407 | return Split; | |||
9408 | } | |||
9409 | ||||
9410 | if (CRHS && VT == MVT::i32) { | |||
9411 | // and (srl x, c), mask => shl (bfe x, nb + c, mask >> nb), nb | |||
9412 | // nb = number of trailing zeroes in mask | |||
9413 | // It can be optimized out using SDWA for GFX8+ in the SDWA peephole pass, | |||
9414 | // given that we are selecting 8 or 16 bit fields starting at byte boundary. | |||
9415 | uint64_t Mask = CRHS->getZExtValue(); | |||
9416 | unsigned Bits = countPopulation(Mask); | |||
9417 | if (getSubtarget()->hasSDWA() && LHS->getOpcode() == ISD::SRL && | |||
9418 | (Bits == 8 || Bits == 16) && isShiftedMask_64(Mask) && !(Mask & 1)) { | |||
9419 | if (auto *CShift = dyn_cast<ConstantSDNode>(LHS->getOperand(1))) { | |||
9420 | unsigned Shift = CShift->getZExtValue(); | |||
9421 | unsigned NB = CRHS->getAPIntValue().countTrailingZeros(); | |||
9422 | unsigned Offset = NB + Shift; | |||
9423 | if ((Offset & (Bits - 1)) == 0) { // Starts at a byte or word boundary. | |||
9424 | SDLoc SL(N); | |||
9425 | SDValue BFE = DAG.getNode(AMDGPUISD::BFE_U32, SL, MVT::i32, | |||
9426 | LHS->getOperand(0), | |||
9427 | DAG.getConstant(Offset, SL, MVT::i32), | |||
9428 | DAG.getConstant(Bits, SL, MVT::i32)); | |||
9429 | EVT NarrowVT = EVT::getIntegerVT(*DAG.getContext(), Bits); | |||
9430 | SDValue Ext = DAG.getNode(ISD::AssertZext, SL, VT, BFE, | |||
9431 | DAG.getValueType(NarrowVT)); | |||
9432 | SDValue Shl = DAG.getNode(ISD::SHL, SDLoc(LHS), VT, Ext, | |||
9433 | DAG.getConstant(NB, SDLoc(CRHS), MVT::i32)); | |||
9434 | return Shl; | |||
9435 | } | |||
9436 | } | |||
9437 | } | |||
9438 | ||||
9439 | // and (perm x, y, c1), c2 -> perm x, y, permute_mask(c1, c2) | |||
9440 | if (LHS.hasOneUse() && LHS.getOpcode() == AMDGPUISD::PERM && | |||
9441 | isa<ConstantSDNode>(LHS.getOperand(2))) { | |||
9442 | uint32_t Sel = getConstantPermuteMask(Mask); | |||
9443 | if (!Sel) | |||
9444 | return SDValue(); | |||
9445 | ||||
9446 | // Select 0xc for all zero bytes | |||
9447 | Sel = (LHS.getConstantOperandVal(2) & Sel) | (~Sel & 0x0c0c0c0c); | |||
9448 | SDLoc DL(N); | |||
9449 | return DAG.getNode(AMDGPUISD::PERM, DL, MVT::i32, LHS.getOperand(0), | |||
9450 | LHS.getOperand(1), DAG.getConstant(Sel, DL, MVT::i32)); | |||
9451 | } | |||
9452 | } | |||
9453 | ||||
9454 | // (and (fcmp ord x, x), (fcmp une (fabs x), inf)) -> | |||
9455 | // fp_class x, ~(s_nan | q_nan | n_infinity | p_infinity) | |||
9456 | if (LHS.getOpcode() == ISD::SETCC && RHS.getOpcode() == ISD::SETCC) { | |||
9457 | ISD::CondCode LCC = cast<CondCodeSDNode>(LHS.getOperand(2))->get(); | |||
9458 | ISD::CondCode RCC = cast<CondCodeSDNode>(RHS.getOperand(2))->get(); | |||
9459 | ||||
9460 | SDValue X = LHS.getOperand(0); | |||
9461 | SDValue Y = RHS.getOperand(0); | |||
9462 | if (Y.getOpcode() != ISD::FABS || Y.getOperand(0) != X) | |||
9463 | return SDValue(); | |||
9464 | ||||
9465 | if (LCC == ISD::SETO) { | |||
9466 | if (X != LHS.getOperand(1)) | |||
9467 | return SDValue(); | |||
9468 | ||||
9469 | if (RCC == ISD::SETUNE) { | |||
9470 | const ConstantFPSDNode *C1 = dyn_cast<ConstantFPSDNode>(RHS.getOperand(1)); | |||
9471 | if (!C1 || !C1->isInfinity() || C1->isNegative()) | |||
9472 | return SDValue(); | |||
9473 | ||||
9474 | const uint32_t Mask = SIInstrFlags::N_NORMAL | | |||
9475 | SIInstrFlags::N_SUBNORMAL | | |||
9476 | SIInstrFlags::N_ZERO | | |||
9477 | SIInstrFlags::P_ZERO | | |||
9478 | SIInstrFlags::P_SUBNORMAL | | |||
9479 | SIInstrFlags::P_NORMAL; | |||
9480 | ||||
9481 | static_assert(((~(SIInstrFlags::S_NAN | | |||
9482 | SIInstrFlags::Q_NAN | | |||
9483 | SIInstrFlags::N_INFINITY | | |||
9484 | SIInstrFlags::P_INFINITY)) & 0x3ff) == Mask, | |||
9485 | "mask not equal"); | |||
9486 | ||||
9487 | SDLoc DL(N); | |||
9488 | return DAG.getNode(AMDGPUISD::FP_CLASS, DL, MVT::i1, | |||
9489 | X, DAG.getConstant(Mask, DL, MVT::i32)); | |||
9490 | } | |||
9491 | } | |||
9492 | } | |||
9493 | ||||
9494 | if (RHS.getOpcode() == ISD::SETCC && LHS.getOpcode() == AMDGPUISD::FP_CLASS) | |||
9495 | std::swap(LHS, RHS); | |||
9496 | ||||
9497 | if (LHS.getOpcode() == ISD::SETCC && RHS.getOpcode() == AMDGPUISD::FP_CLASS && | |||
9498 | RHS.hasOneUse()) { | |||
9499 | ISD::CondCode LCC = cast<CondCodeSDNode>(LHS.getOperand(2))->get(); | |||
9500 | // and (fcmp seto), (fp_class x, mask) -> fp_class x, mask & ~(p_nan | n_nan) | |||
9501 | // and (fcmp setuo), (fp_class x, mask) -> fp_class x, mask & (p_nan | n_nan) | |||
9502 | const ConstantSDNode *Mask = dyn_cast<ConstantSDNode>(RHS.getOperand(1)); | |||
9503 | if ((LCC == ISD::SETO || LCC == ISD::SETUO) && Mask && | |||
9504 | (RHS.getOperand(0) == LHS.getOperand(0) && | |||
9505 | LHS.getOperand(0) == LHS.getOperand(1))) { | |||
9506 | const unsigned OrdMask = SIInstrFlags::S_NAN | SIInstrFlags::Q_NAN; | |||
9507 | unsigned NewMask = LCC == ISD::SETO ? | |||
9508 | Mask->getZExtValue() & ~OrdMask : | |||
9509 | Mask->getZExtValue() & OrdMask; | |||
9510 | ||||
9511 | SDLoc DL(N); | |||
9512 | return DAG.getNode(AMDGPUISD::FP_CLASS, DL, MVT::i1, RHS.getOperand(0), | |||
9513 | DAG.getConstant(NewMask, DL, MVT::i32)); | |||
9514 | } | |||
9515 | } | |||
9516 | ||||
9517 | if (VT == MVT::i32 && | |||
9518 | (RHS.getOpcode() == ISD::SIGN_EXTEND || LHS.getOpcode() == ISD::SIGN_EXTEND)) { | |||
9519 | // and x, (sext cc from i1) => select cc, x, 0 | |||
9520 | if (RHS.getOpcode() != ISD::SIGN_EXTEND) | |||
9521 | std::swap(LHS, RHS); | |||
9522 | if (isBoolSGPR(RHS.getOperand(0))) | |||
9523 | return DAG.getSelect(SDLoc(N), MVT::i32, RHS.getOperand(0), | |||
9524 | LHS, DAG.getConstant(0, SDLoc(N), MVT::i32)); | |||
9525 | } | |||
9526 | ||||
9527 | // and (op x, c1), (op y, c2) -> perm x, y, permute_mask(c1, c2) | |||
9528 | const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); | |||
9529 | if (VT == MVT::i32 && LHS.hasOneUse() && RHS.hasOneUse() && | |||
9530 | N->isDivergent() && TII->pseudoToMCOpcode(AMDGPU::V_PERM_B32_e64) != -1) { | |||
9531 | uint32_t LHSMask = getPermuteMask(DAG, LHS); | |||
9532 | uint32_t RHSMask = getPermuteMask(DAG, RHS); | |||
9533 | if (LHSMask != ~0u && RHSMask != ~0u) { | |||
9534 | // Canonicalize the expression in an attempt to have fewer unique masks | |||
9535 | // and therefore fewer registers used to hold the masks. | |||
9536 | if (LHSMask > RHSMask) { | |||
9537 | std::swap(LHSMask, RHSMask); | |||
9538 | std::swap(LHS, RHS); | |||
9539 | } | |||
9540 | ||||
9541 | // Select 0xc for each lane used from source operand. Zero has 0xc mask | |||
9542 | // set, 0xff have 0xff in the mask, actual lanes are in the 0-3 range. | |||
9543 | uint32_t LHSUsedLanes = ~(LHSMask & 0x0c0c0c0c) & 0x0c0c0c0c; | |||
9544 | uint32_t RHSUsedLanes = ~(RHSMask & 0x0c0c0c0c) & 0x0c0c0c0c; | |||
9545 | ||||
9546 | // Check of we need to combine values from two sources within a byte. | |||
9547 | if (!(LHSUsedLanes & RHSUsedLanes) && | |||
9548 | // If we select high and lower word keep it for SDWA. | |||
9549 | // TODO: teach SDWA to work with v_perm_b32 and remove the check. | |||
9550 | !(LHSUsedLanes == 0x0c0c0000 && RHSUsedLanes == 0x00000c0c)) { | |||
9551 | // Each byte in each mask is either selector mask 0-3, or has higher | |||
9552 | // bits set in either of masks, which can be 0xff for 0xff or 0x0c for | |||
9553 | // zero. If 0x0c is in either mask it shall always be 0x0c. Otherwise | |||
9554 | // mask which is not 0xff wins. By anding both masks we have a correct | |||
9555 | // result except that 0x0c shall be corrected to give 0x0c only. | |||
9556 | uint32_t Mask = LHSMask & RHSMask; | |||
9557 | for (unsigned I = 0; I < 32; I += 8) { | |||
9558 | uint32_t ByteSel = 0xff << I; | |||
9559 | if ((LHSMask & ByteSel) == 0x0c || (RHSMask & ByteSel) == 0x0c) | |||
9560 | Mask &= (0x0c << I) & 0xffffffff; | |||
9561 | } | |||
9562 | ||||
9563 | // Add 4 to each active LHS lane. It will not affect any existing 0xff | |||
9564 | // or 0x0c. | |||
9565 | uint32_t Sel = Mask | (LHSUsedLanes & 0x04040404); | |||
9566 | SDLoc DL(N); | |||
9567 | ||||
9568 | return DAG.getNode(AMDGPUISD::PERM, DL, MVT::i32, | |||
9569 | LHS.getOperand(0), RHS.getOperand(0), | |||
9570 | DAG.getConstant(Sel, DL, MVT::i32)); | |||
9571 | } | |||
9572 | } | |||
9573 | } | |||
9574 | ||||
9575 | return SDValue(); | |||
9576 | } | |||
9577 | ||||
9578 | SDValue SITargetLowering::performOrCombine(SDNode *N, | |||
9579 | DAGCombinerInfo &DCI) const { | |||
9580 | SelectionDAG &DAG = DCI.DAG; | |||
9581 | SDValue LHS = N->getOperand(0); | |||
9582 | SDValue RHS = N->getOperand(1); | |||
9583 | ||||
9584 | EVT VT = N->getValueType(0); | |||
9585 | if (VT == MVT::i1) { | |||
9586 | // or (fp_class x, c1), (fp_class x, c2) -> fp_class x, (c1 | c2) | |||
9587 | if (LHS.getOpcode() == AMDGPUISD::FP_CLASS && | |||
9588 | RHS.getOpcode() == AMDGPUISD::FP_CLASS) { | |||
9589 | SDValue Src = LHS.getOperand(0); | |||
9590 | if (Src != RHS.getOperand(0)) | |||
9591 | return SDValue(); | |||
9592 | ||||
9593 | const ConstantSDNode *CLHS = dyn_cast<ConstantSDNode>(LHS.getOperand(1)); | |||
9594 | const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS.getOperand(1)); | |||
9595 | if (!CLHS || !CRHS) | |||
9596 | return SDValue(); | |||
9597 | ||||
9598 | // Only 10 bits are used. | |||
9599 | static const uint32_t MaxMask = 0x3ff; | |||
9600 | ||||
9601 | uint32_t NewMask = (CLHS->getZExtValue() | CRHS->getZExtValue()) & MaxMask; | |||
9602 | SDLoc DL(N); | |||
9603 | return DAG.getNode(AMDGPUISD::FP_CLASS, DL, MVT::i1, | |||
9604 | Src, DAG.getConstant(NewMask, DL, MVT::i32)); | |||
9605 | } | |||
9606 | ||||
9607 | return SDValue(); | |||
9608 | } | |||
9609 | ||||
9610 | // or (perm x, y, c1), c2 -> perm x, y, permute_mask(c1, c2) | |||
9611 | if (isa<ConstantSDNode>(RHS) && LHS.hasOneUse() && | |||
9612 | LHS.getOpcode() == AMDGPUISD::PERM && | |||
9613 | isa<ConstantSDNode>(LHS.getOperand(2))) { | |||
9614 | uint32_t Sel = getConstantPermuteMask(N->getConstantOperandVal(1)); | |||
9615 | if (!Sel) | |||
9616 | return SDValue(); | |||
9617 | ||||
9618 | Sel |= LHS.getConstantOperandVal(2); | |||
9619 | SDLoc DL(N); | |||
9620 | return DAG.getNode(AMDGPUISD::PERM, DL, MVT::i32, LHS.getOperand(0), | |||
9621 | LHS.getOperand(1), DAG.getConstant(Sel, DL, MVT::i32)); | |||
9622 | } | |||
9623 | ||||
9624 | // or (op x, c1), (op y, c2) -> perm x, y, permute_mask(c1, c2) | |||
9625 | const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); | |||
9626 | if (VT == MVT::i32 && LHS.hasOneUse() && RHS.hasOneUse() && | |||
9627 | N->isDivergent() && TII->pseudoToMCOpcode(AMDGPU::V_PERM_B32_e64) != -1) { | |||
9628 | uint32_t LHSMask = getPermuteMask(DAG, LHS); | |||
9629 | uint32_t RHSMask = getPermuteMask(DAG, RHS); | |||
9630 | if (LHSMask != ~0u && RHSMask != ~0u) { | |||
9631 | // Canonicalize the expression in an attempt to have fewer unique masks | |||
9632 | // and therefore fewer registers used to hold the masks. | |||
9633 | if (LHSMask > RHSMask) { | |||
9634 | std::swap(LHSMask, RHSMask); | |||
9635 | std::swap(LHS, RHS); | |||
9636 | } | |||
9637 | ||||
9638 | // Select 0xc for each lane used from source operand. Zero has 0xc mask | |||
9639 | // set, 0xff have 0xff in the mask, actual lanes are in the 0-3 range. | |||
9640 | uint32_t LHSUsedLanes = ~(LHSMask & 0x0c0c0c0c) & 0x0c0c0c0c; | |||
9641 | uint32_t RHSUsedLanes = ~(RHSMask & 0x0c0c0c0c) & 0x0c0c0c0c; | |||
9642 | ||||
9643 | // Check of we need to combine values from two sources within a byte. | |||
9644 | if (!(LHSUsedLanes & RHSUsedLanes) && | |||
9645 | // If we select high and lower word keep it for SDWA. | |||
9646 | // TODO: teach SDWA to work with v_perm_b32 and remove the check. | |||
9647 | !(LHSUsedLanes == 0x0c0c0000 && RHSUsedLanes == 0x00000c0c)) { | |||
9648 | // Kill zero bytes selected by other mask. Zero value is 0xc. | |||
9649 | LHSMask &= ~RHSUsedLanes; | |||
9650 | RHSMask &= ~LHSUsedLanes; | |||
9651 | // Add 4 to each active LHS lane | |||
9652 | LHSMask |= LHSUsedLanes & 0x04040404; | |||
9653 | // Combine masks | |||
9654 | uint32_t Sel = LHSMask | RHSMask; | |||
9655 | SDLoc DL(N); | |||
9656 | ||||
9657 | return DAG.getNode(AMDGPUISD::PERM, DL, MVT::i32, | |||
9658 | LHS.getOperand(0), RHS.getOperand(0), | |||
9659 | DAG.getConstant(Sel, DL, MVT::i32)); | |||
9660 | } | |||
9661 | } | |||
9662 | } | |||
9663 | ||||
9664 | if (VT != MVT::i64 || DCI.isBeforeLegalizeOps()) | |||
9665 | return SDValue(); | |||
9666 | ||||
9667 | // TODO: This could be a generic combine with a predicate for extracting the | |||
9668 | // high half of an integer being free. | |||
9669 | ||||
9670 | // (or i64:x, (zero_extend i32:y)) -> | |||
9671 | // i64 (bitcast (v2i32 build_vector (or i32:y, lo_32(x)), hi_32(x))) | |||
9672 | if (LHS.getOpcode() == ISD::ZERO_EXTEND && | |||
9673 | RHS.getOpcode() != ISD::ZERO_EXTEND) | |||
9674 | std::swap(LHS, RHS); | |||
9675 | ||||
9676 | if (RHS.getOpcode() == ISD::ZERO_EXTEND) { | |||
9677 | SDValue ExtSrc = RHS.getOperand(0); | |||
9678 | EVT SrcVT = ExtSrc.getValueType(); | |||
9679 | if (SrcVT == MVT::i32) { | |||
9680 | SDLoc SL(N); | |||
9681 | SDValue LowLHS, HiBits; | |||
9682 | std::tie(LowLHS, HiBits) = split64BitValue(LHS, DAG); | |||
9683 | SDValue LowOr = DAG.getNode(ISD::OR, SL, MVT::i32, LowLHS, ExtSrc); | |||
9684 | ||||
9685 | DCI.AddToWorklist(LowOr.getNode()); | |||
9686 | DCI.AddToWorklist(HiBits.getNode()); | |||
9687 | ||||
9688 | SDValue Vec = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32, | |||
9689 | LowOr, HiBits); | |||
9690 | return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Vec); | |||
9691 | } | |||
9692 | } | |||
9693 | ||||
9694 | const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(N->getOperand(1)); | |||
9695 | if (CRHS) { | |||
9696 | if (SDValue Split | |||
9697 | = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::OR, | |||
9698 | N->getOperand(0), CRHS)) | |||
9699 | return Split; | |||
9700 | } | |||
9701 | ||||
9702 | return SDValue(); | |||
9703 | } | |||
9704 | ||||
9705 | SDValue SITargetLowering::performXorCombine(SDNode *N, | |||
9706 | DAGCombinerInfo &DCI) const { | |||
9707 | if (SDValue RV = reassociateScalarOps(N, DCI.DAG)) | |||
9708 | return RV; | |||
9709 | ||||
9710 | EVT VT = N->getValueType(0); | |||
9711 | if (VT != MVT::i64) | |||
9712 | return SDValue(); | |||
9713 | ||||
9714 | SDValue LHS = N->getOperand(0); | |||
9715 | SDValue RHS = N->getOperand(1); | |||
9716 | ||||
9717 | const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS); | |||
9718 | if (CRHS) { | |||
9719 | if (SDValue Split | |||
9720 | = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::XOR, LHS, CRHS)) | |||
9721 | return Split; | |||
9722 | } | |||
9723 | ||||
9724 | return SDValue(); | |||
9725 | } | |||
9726 | ||||
9727 | SDValue SITargetLowering::performZeroExtendCombine(SDNode *N, | |||
9728 | DAGCombinerInfo &DCI) const { | |||
9729 | if (!Subtarget->has16BitInsts() || | |||
9730 | DCI.getDAGCombineLevel() < AfterLegalizeDAG) | |||
9731 | return SDValue(); | |||
9732 | ||||
9733 | EVT VT = N->getValueType(0); | |||
9734 | if (VT != MVT::i32) | |||
9735 | return SDValue(); | |||
9736 | ||||
9737 | SDValue Src = N->getOperand(0); | |||
9738 | if (Src.getValueType() != MVT::i16) | |||
9739 | return SDValue(); | |||
9740 | ||||
9741 | return SDValue(); | |||
9742 | } | |||
9743 | ||||
9744 | SDValue SITargetLowering::performSignExtendInRegCombine(SDNode *N, | |||
9745 | DAGCombinerInfo &DCI) | |||
9746 | const { | |||
9747 | SDValue Src = N->getOperand(0); | |||
9748 | auto *VTSign = cast<VTSDNode>(N->getOperand(1)); | |||
9749 | ||||
9750 | if (((Src.getOpcode() == AMDGPUISD::BUFFER_LOAD_UBYTE && | |||
9751 | VTSign->getVT() == MVT::i8) || | |||
9752 | (Src.getOpcode() == AMDGPUISD::BUFFER_LOAD_USHORT && | |||
9753 | VTSign->getVT() == MVT::i16)) && | |||
9754 | Src.hasOneUse()) { | |||
9755 | auto *M = cast<MemSDNode>(Src); | |||
9756 | SDValue Ops[] = { | |||
9757 | Src.getOperand(0), // Chain | |||
9758 | Src.getOperand(1), // rsrc | |||
9759 | Src.getOperand(2), // vindex | |||
9760 | Src.getOperand(3), // voffset | |||
9761 | Src.getOperand(4), // soffset | |||
9762 | Src.getOperand(5), // offset | |||
9763 | Src.getOperand(6), | |||
9764 | Src.getOperand(7) | |||
9765 | }; | |||
9766 | // replace with BUFFER_LOAD_BYTE/SHORT | |||
9767 | SDVTList ResList = DCI.DAG.getVTList(MVT::i32, | |||
9768 | Src.getOperand(0).getValueType()); | |||
9769 | unsigned Opc = (Src.getOpcode() == AMDGPUISD::BUFFER_LOAD_UBYTE) ? | |||
9770 | AMDGPUISD::BUFFER_LOAD_BYTE : AMDGPUISD::BUFFER_LOAD_SHORT; | |||
9771 | SDValue BufferLoadSignExt = DCI.DAG.getMemIntrinsicNode(Opc, SDLoc(N), | |||
9772 | ResList, | |||
9773 | Ops, M->getMemoryVT(), | |||
9774 | M->getMemOperand()); | |||
9775 | return DCI.DAG.getMergeValues({BufferLoadSignExt, | |||
9776 | BufferLoadSignExt.getValue(1)}, SDLoc(N)); | |||
9777 | } | |||
9778 | return SDValue(); | |||
9779 | } | |||
9780 | ||||
9781 | SDValue SITargetLowering::performClassCombine(SDNode *N, | |||
9782 | DAGCombinerInfo &DCI) const { | |||
9783 | SelectionDAG &DAG = DCI.DAG; | |||
9784 | SDValue Mask = N->getOperand(1); | |||
9785 | ||||
9786 | // fp_class x, 0 -> false | |||
9787 | if (const ConstantSDNode *CMask = dyn_cast<ConstantSDNode>(Mask)) { | |||
9788 | if (CMask->isZero()) | |||
9789 | return DAG.getConstant(0, SDLoc(N), MVT::i1); | |||
9790 | } | |||
9791 | ||||
9792 | if (N->getOperand(0).isUndef()) | |||
9793 | return DAG.getUNDEF(MVT::i1); | |||
9794 | ||||
9795 | return SDValue(); | |||
9796 | } | |||
9797 | ||||
9798 | SDValue SITargetLowering::performRcpCombine(SDNode *N, | |||
9799 | DAGCombinerInfo &DCI) const { | |||
9800 | EVT VT = N->getValueType(0); | |||
9801 | SDValue N0 = N->getOperand(0); | |||
9802 | ||||
9803 | if (N0.isUndef()) | |||
9804 | return N0; | |||
9805 | ||||
9806 | if (VT == MVT::f32 && (N0.getOpcode() == ISD::UINT_TO_FP || | |||
9807 | N0.getOpcode() == ISD::SINT_TO_FP)) { | |||
9808 | return DCI.DAG.getNode(AMDGPUISD::RCP_IFLAG, SDLoc(N), VT, N0, | |||
9809 | N->getFlags()); | |||
9810 | } | |||
9811 | ||||
9812 | if ((VT == MVT::f32 || VT == MVT::f16) && N0.getOpcode() == ISD::FSQRT) { | |||
9813 | return DCI.DAG.getNode(AMDGPUISD::RSQ, SDLoc(N), VT, | |||
9814 | N0.getOperand(0), N->getFlags()); | |||
9815 | } | |||
9816 | ||||
9817 | return AMDGPUTargetLowering::performRcpCombine(N, DCI); | |||
9818 | } | |||
9819 | ||||
9820 | bool SITargetLowering::isCanonicalized(SelectionDAG &DAG, SDValue Op, | |||
9821 | unsigned MaxDepth) const { | |||
9822 | unsigned Opcode = Op.getOpcode(); | |||
9823 | if (Opcode == ISD::FCANONICALIZE) | |||
9824 | return true; | |||
9825 | ||||
9826 | if (auto *CFP = dyn_cast<ConstantFPSDNode>(Op)) { | |||
9827 | auto F = CFP->getValueAPF(); | |||
9828 | if (F.isNaN() && F.isSignaling()) | |||
9829 | return false; | |||
9830 | return !F.isDenormal() || denormalsEnabledForType(DAG, Op.getValueType()); | |||
9831 | } | |||
9832 | ||||
9833 | // If source is a result of another standard FP operation it is already in | |||
9834 | // canonical form. | |||
9835 | if (MaxDepth == 0) | |||
9836 | return false; | |||
9837 | ||||
9838 | switch (Opcode) { | |||
9839 | // These will flush denorms if required. | |||
9840 | case ISD::FADD: | |||
9841 | case ISD::FSUB: | |||
9842 | case ISD::FMUL: | |||
9843 | case ISD::FCEIL: | |||
9844 | case ISD::FFLOOR: | |||
9845 | case ISD::FMA: | |||
9846 | case ISD::FMAD: | |||
9847 | case ISD::FSQRT: | |||
9848 | case ISD::FDIV: | |||
9849 | case ISD::FREM: | |||
9850 | case ISD::FP_ROUND: | |||
9851 | case ISD::FP_EXTEND: | |||
9852 | case AMDGPUISD::FMUL_LEGACY: | |||
9853 | case AMDGPUISD::FMAD_FTZ: | |||
9854 | case AMDGPUISD::RCP: | |||
9855 | case AMDGPUISD::RSQ: | |||
9856 | case AMDGPUISD::RSQ_CLAMP: | |||
9857 | case AMDGPUISD::RCP_LEGACY: | |||
9858 | case AMDGPUISD::RCP_IFLAG: | |||
9859 | case AMDGPUISD::DIV_SCALE: | |||
9860 | case AMDGPUISD::DIV_FMAS: | |||
9861 | case AMDGPUISD::DIV_FIXUP: | |||
9862 | case AMDGPUISD::FRACT: | |||
9863 | case AMDGPUISD::LDEXP: | |||
9864 | case AMDGPUISD::CVT_PKRTZ_F16_F32: | |||
9865 | case AMDGPUISD::CVT_F32_UBYTE0: | |||
9866 | case AMDGPUISD::CVT_F32_UBYTE1: | |||
9867 | case AMDGPUISD::CVT_F32_UBYTE2: | |||
9868 | case AMDGPUISD::CVT_F32_UBYTE3: | |||
9869 | return true; | |||
9870 | ||||
9871 | // It can/will be lowered or combined as a bit operation. | |||
9872 | // Need to check their input recursively to handle. | |||
9873 | case ISD::FNEG: | |||
9874 | case ISD::FABS: | |||
9875 | case ISD::FCOPYSIGN: | |||
9876 | return isCanonicalized(DAG, Op.getOperand(0), MaxDepth - 1); | |||
9877 | ||||
9878 | case ISD::FSIN: | |||
9879 | case ISD::FCOS: | |||
9880 | case ISD::FSINCOS: | |||
9881 | return Op.getValueType().getScalarType() != MVT::f16; | |||
9882 | ||||
9883 | case ISD::FMINNUM: | |||
9884 | case ISD::FMAXNUM: | |||
9885 | case ISD::FMINNUM_IEEE: | |||
9886 | case ISD::FMAXNUM_IEEE: | |||
9887 | case AMDGPUISD::CLAMP: | |||
9888 | case AMDGPUISD::FMED3: | |||
9889 | case AMDGPUISD::FMAX3: | |||
9890 | case AMDGPUISD::FMIN3: { | |||
9891 | // FIXME: Shouldn't treat the generic operations different based these. | |||
9892 | // However, we aren't really required to flush the result from | |||
9893 | // minnum/maxnum.. | |||
9894 | ||||
9895 | // snans will be quieted, so we only need to worry about denormals. | |||
9896 | if (Subtarget->supportsMinMaxDenormModes() || | |||
9897 | denormalsEnabledForType(DAG, Op.getValueType())) | |||
9898 | return true; | |||
9899 | ||||
9900 | // Flushing may be required. | |||
9901 | // In pre-GFX9 targets V_MIN_F32 and others do not flush denorms. For such | |||
9902 | // targets need to check their input recursively. | |||
9903 | ||||
9904 | // FIXME: Does this apply with clamp? It's implemented with max. | |||
9905 | for (unsigned I = 0, E = Op.getNumOperands(); I != E; ++I) { | |||
9906 | if (!isCanonicalized(DAG, Op.getOperand(I), MaxDepth - 1)) | |||
9907 | return false; | |||
9908 | } | |||
9909 | ||||
9910 | return true; | |||
9911 | } | |||
9912 | case ISD::SELECT: { | |||
9913 | return isCanonicalized(DAG, Op.getOperand(1), MaxDepth - 1) && | |||
9914 | isCanonicalized(DAG, Op.getOperand(2), MaxDepth - 1); | |||
9915 | } | |||
9916 | case ISD::BUILD_VECTOR: { | |||
9917 | for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) { | |||
9918 | SDValue SrcOp = Op.getOperand(i); | |||
9919 | if (!isCanonicalized(DAG, SrcOp, MaxDepth - 1)) | |||
9920 | return false; | |||
9921 | } | |||
9922 | ||||
9923 | return true; | |||
9924 | } | |||
9925 | case ISD::EXTRACT_VECTOR_ELT: | |||
9926 | case ISD::EXTRACT_SUBVECTOR: { | |||
9927 | return isCanonicalized(DAG, Op.getOperand(0), MaxDepth - 1); | |||
9928 | } | |||
9929 | case ISD::INSERT_VECTOR_ELT: { | |||
9930 | return isCanonicalized(DAG, Op.getOperand(0), MaxDepth - 1) && | |||
9931 | isCanonicalized(DAG, Op.getOperand(1), MaxDepth - 1); | |||
9932 | } | |||
9933 | case ISD::UNDEF: | |||
9934 | // Could be anything. | |||
9935 | return false; | |||
9936 | ||||
9937 | case ISD::BITCAST: | |||
9938 | return isCanonicalized(DAG, Op.getOperand(0), MaxDepth - 1); | |||
9939 | case ISD::TRUNCATE: { | |||
9940 | // Hack round the mess we make when legalizing extract_vector_elt | |||
9941 | if (Op.getValueType() == MVT::i16) { | |||
9942 | SDValue TruncSrc = Op.getOperand(0); | |||
9943 | if (TruncSrc.getValueType() == MVT::i32 && | |||
9944 | TruncSrc.getOpcode() == ISD::BITCAST && | |||
9945 | TruncSrc.getOperand(0).getValueType() == MVT::v2f16) { | |||
9946 | return isCanonicalized(DAG, TruncSrc.getOperand(0), MaxDepth - 1); | |||
9947 | } | |||
9948 | } | |||
9949 | return false; | |||
9950 | } | |||
9951 | case ISD::INTRINSIC_WO_CHAIN: { | |||
9952 | unsigned IntrinsicID | |||
9953 | = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); | |||
9954 | // TODO: Handle more intrinsics | |||
9955 | switch (IntrinsicID) { | |||
9956 | case Intrinsic::amdgcn_cvt_pkrtz: | |||
9957 | case Intrinsic::amdgcn_cubeid: | |||
9958 | case Intrinsic::amdgcn_frexp_mant: | |||
9959 | case Intrinsic::amdgcn_fdot2: | |||
9960 | case Intrinsic::amdgcn_rcp: | |||
9961 | case Intrinsic::amdgcn_rsq: | |||
9962 | case Intrinsic::amdgcn_rsq_clamp: | |||
9963 | case Intrinsic::amdgcn_rcp_legacy: | |||
9964 | case Intrinsic::amdgcn_rsq_legacy: | |||
9965 | case Intrinsic::amdgcn_trig_preop: | |||
9966 | return true; | |||
9967 | default: | |||
9968 | break; | |||
9969 | } | |||
9970 | ||||
9971 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; | |||
9972 | } | |||
9973 | default: | |||
9974 | return denormalsEnabledForType(DAG, Op.getValueType()) && | |||
9975 | DAG.isKnownNeverSNaN(Op); | |||
9976 | } | |||
9977 | ||||
9978 | llvm_unreachable("invalid operation")::llvm::llvm_unreachable_internal("invalid operation", "llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 9978); | |||
9979 | } | |||
9980 | ||||
9981 | bool SITargetLowering::isCanonicalized(Register Reg, MachineFunction &MF, | |||
9982 | unsigned MaxDepth) const { | |||
9983 | MachineRegisterInfo &MRI = MF.getRegInfo(); | |||
9984 | MachineInstr *MI = MRI.getVRegDef(Reg); | |||
9985 | unsigned Opcode = MI->getOpcode(); | |||
9986 | ||||
9987 | if (Opcode == AMDGPU::G_FCANONICALIZE) | |||
9988 | return true; | |||
9989 | ||||
9990 | Optional<FPValueAndVReg> FCR; | |||
9991 | // Constant splat (can be padded with undef) or scalar constant. | |||
9992 | if (mi_match(Reg, MRI, MIPatternMatch::m_GFCstOrSplat(FCR))) { | |||
9993 | if (FCR->Value.isSignaling()) | |||
9994 | return false; | |||
9995 | return !FCR->Value.isDenormal() || | |||
9996 | denormalsEnabledForType(MRI.getType(FCR->VReg), MF); | |||
9997 | } | |||
9998 | ||||
9999 | if (MaxDepth == 0) | |||
10000 | return false; | |||
10001 | ||||
10002 | switch (Opcode) { | |||
10003 | case AMDGPU::G_FMINNUM_IEEE: | |||
10004 | case AMDGPU::G_FMAXNUM_IEEE: { | |||
10005 | if (Subtarget->supportsMinMaxDenormModes() || | |||
10006 | denormalsEnabledForType(MRI.getType(Reg), MF)) | |||
10007 | return true; | |||
10008 | for (const MachineOperand &MO : llvm::drop_begin(MI->operands())) | |||
10009 | if (!isCanonicalized(MO.getReg(), MF, MaxDepth - 1)) | |||
10010 | return false; | |||
10011 | return true; | |||
10012 | } | |||
10013 | default: | |||
10014 | return denormalsEnabledForType(MRI.getType(Reg), MF) && | |||
10015 | isKnownNeverSNaN(Reg, MRI); | |||
10016 | } | |||
10017 | ||||
10018 | llvm_unreachable("invalid operation")::llvm::llvm_unreachable_internal("invalid operation", "llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 10018); | |||
10019 | } | |||
10020 | ||||
10021 | // Constant fold canonicalize. | |||
10022 | SDValue SITargetLowering::getCanonicalConstantFP( | |||
10023 | SelectionDAG &DAG, const SDLoc &SL, EVT VT, const APFloat &C) const { | |||
10024 | // Flush denormals to 0 if not enabled. | |||
10025 | if (C.isDenormal() && !denormalsEnabledForType(DAG, VT)) | |||
10026 | return DAG.getConstantFP(0.0, SL, VT); | |||
10027 | ||||
10028 | if (C.isNaN()) { | |||
10029 | APFloat CanonicalQNaN = APFloat::getQNaN(C.getSemantics()); | |||
10030 | if (C.isSignaling()) { | |||
10031 | // Quiet a signaling NaN. | |||
10032 | // FIXME: Is this supposed to preserve payload bits? | |||
10033 | return DAG.getConstantFP(CanonicalQNaN, SL, VT); | |||
10034 | } | |||
10035 | ||||
10036 | // Make sure it is the canonical NaN bitpattern. | |||
10037 | // | |||
10038 | // TODO: Can we use -1 as the canonical NaN value since it's an inline | |||
10039 | // immediate? | |||
10040 | if (C.bitcastToAPInt() != CanonicalQNaN.bitcastToAPInt()) | |||
10041 | return DAG.getConstantFP(CanonicalQNaN, SL, VT); | |||
10042 | } | |||
10043 | ||||
10044 | // Already canonical. | |||
10045 | return DAG.getConstantFP(C, SL, VT); | |||
10046 | } | |||
10047 | ||||
10048 | static bool vectorEltWillFoldAway(SDValue Op) { | |||
10049 | return Op.isUndef() || isa<ConstantFPSDNode>(Op); | |||
10050 | } | |||
10051 | ||||
10052 | SDValue SITargetLowering::performFCanonicalizeCombine( | |||
10053 | SDNode *N, | |||
10054 | DAGCombinerInfo &DCI) const { | |||
10055 | SelectionDAG &DAG = DCI.DAG; | |||
10056 | SDValue N0 = N->getOperand(0); | |||
10057 | EVT VT = N->getValueType(0); | |||
10058 | ||||
10059 | // fcanonicalize undef -> qnan | |||
10060 | if (N0.isUndef()) { | |||
10061 | APFloat QNaN = APFloat::getQNaN(SelectionDAG::EVTToAPFloatSemantics(VT)); | |||
10062 | return DAG.getConstantFP(QNaN, SDLoc(N), VT); | |||
10063 | } | |||
10064 | ||||
10065 | if (ConstantFPSDNode *CFP = isConstOrConstSplatFP(N0)) { | |||
10066 | EVT VT = N->getValueType(0); | |||
10067 | return getCanonicalConstantFP(DAG, SDLoc(N), VT, CFP->getValueAPF()); | |||
10068 | } | |||
10069 | ||||
10070 | // fcanonicalize (build_vector x, k) -> build_vector (fcanonicalize x), | |||
10071 | // (fcanonicalize k) | |||
10072 | // | |||
10073 | // fcanonicalize (build_vector x, undef) -> build_vector (fcanonicalize x), 0 | |||
10074 | ||||
10075 | // TODO: This could be better with wider vectors that will be split to v2f16, | |||
10076 | // and to consider uses since there aren't that many packed operations. | |||
10077 | if (N0.getOpcode() == ISD::BUILD_VECTOR && VT == MVT::v2f16 && | |||
10078 | isTypeLegal(MVT::v2f16)) { | |||
10079 | SDLoc SL(N); | |||
10080 | SDValue NewElts[2]; | |||
10081 | SDValue Lo = N0.getOperand(0); | |||
10082 | SDValue Hi = N0.getOperand(1); | |||
10083 | EVT EltVT = Lo.getValueType(); | |||
10084 | ||||
10085 | if (vectorEltWillFoldAway(Lo) || vectorEltWillFoldAway(Hi)) { | |||
10086 | for (unsigned I = 0; I != 2; ++I) { | |||
10087 | SDValue Op = N0.getOperand(I); | |||
10088 | if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) { | |||
10089 | NewElts[I] = getCanonicalConstantFP(DAG, SL, EltVT, | |||
10090 | CFP->getValueAPF()); | |||
10091 | } else if (Op.isUndef()) { | |||
10092 | // Handled below based on what the other operand is. | |||
10093 | NewElts[I] = Op; | |||
10094 | } else { | |||
10095 | NewElts[I] = DAG.getNode(ISD::FCANONICALIZE, SL, EltVT, Op); | |||
10096 | } | |||
10097 | } | |||
10098 | ||||
10099 | // If one half is undef, and one is constant, prefer a splat vector rather | |||
10100 | // than the normal qNaN. If it's a register, prefer 0.0 since that's | |||
10101 | // cheaper to use and may be free with a packed operation. | |||
10102 | if (NewElts[0].isUndef()) { | |||
10103 | if (isa<ConstantFPSDNode>(NewElts[1])) | |||
10104 | NewElts[0] = isa<ConstantFPSDNode>(NewElts[1]) ? | |||
10105 | NewElts[1]: DAG.getConstantFP(0.0f, SL, EltVT); | |||
10106 | } | |||
10107 | ||||
10108 | if (NewElts[1].isUndef()) { | |||
10109 | NewElts[1] = isa<ConstantFPSDNode>(NewElts[0]) ? | |||
10110 | NewElts[0] : DAG.getConstantFP(0.0f, SL, EltVT); | |||
10111 | } | |||
10112 | ||||
10113 | return DAG.getBuildVector(VT, SL, NewElts); | |||
10114 | } | |||
10115 | } | |||
10116 | ||||
10117 | unsigned SrcOpc = N0.getOpcode(); | |||
10118 | ||||
10119 | // If it's free to do so, push canonicalizes further up the source, which may | |||
10120 | // find a canonical source. | |||
10121 | // | |||
10122 | // TODO: More opcodes. Note this is unsafe for the the _ieee minnum/maxnum for | |||
10123 | // sNaNs. | |||
10124 | if (SrcOpc == ISD::FMINNUM || SrcOpc == ISD::FMAXNUM) { | |||
10125 | auto *CRHS = dyn_cast<ConstantFPSDNode>(N0.getOperand(1)); | |||
10126 | if (CRHS && N0.hasOneUse()) { | |||
10127 | SDLoc SL(N); | |||
10128 | SDValue Canon0 = DAG.getNode(ISD::FCANONICALIZE, SL, VT, | |||
10129 | N0.getOperand(0)); | |||
10130 | SDValue Canon1 = getCanonicalConstantFP(DAG, SL, VT, CRHS->getValueAPF()); | |||
10131 | DCI.AddToWorklist(Canon0.getNode()); | |||
10132 | ||||
10133 | return DAG.getNode(N0.getOpcode(), SL, VT, Canon0, Canon1); | |||
10134 | } | |||
10135 | } | |||
10136 | ||||
10137 | return isCanonicalized(DAG, N0) ? N0 : SDValue(); | |||
10138 | } | |||
10139 | ||||
10140 | static unsigned minMaxOpcToMin3Max3Opc(unsigned Opc) { | |||
10141 | switch (Opc) { | |||
10142 | case ISD::FMAXNUM: | |||
10143 | case ISD::FMAXNUM_IEEE: | |||
10144 | return AMDGPUISD::FMAX3; | |||
10145 | case ISD::SMAX: | |||
10146 | return AMDGPUISD::SMAX3; | |||
10147 | case ISD::UMAX: | |||
10148 | return AMDGPUISD::UMAX3; | |||
10149 | case ISD::FMINNUM: | |||
10150 | case ISD::FMINNUM_IEEE: | |||
10151 | return AMDGPUISD::FMIN3; | |||
10152 | case ISD::SMIN: | |||
10153 | return AMDGPUISD::SMIN3; | |||
10154 | case ISD::UMIN: | |||
10155 | return AMDGPUISD::UMIN3; | |||
10156 | default: | |||
10157 | llvm_unreachable("Not a min/max opcode")::llvm::llvm_unreachable_internal("Not a min/max opcode", "llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 10157); | |||
10158 | } | |||
10159 | } | |||
10160 | ||||
10161 | SDValue SITargetLowering::performIntMed3ImmCombine( | |||
10162 | SelectionDAG &DAG, const SDLoc &SL, | |||
10163 | SDValue Op0, SDValue Op1, bool Signed) const { | |||
10164 | ConstantSDNode *K1 = dyn_cast<ConstantSDNode>(Op1); | |||
10165 | if (!K1) | |||
10166 | return SDValue(); | |||
10167 | ||||
10168 | ConstantSDNode *K0 = dyn_cast<ConstantSDNode>(Op0.getOperand(1)); | |||
10169 | if (!K0) | |||
10170 | return SDValue(); | |||
10171 | ||||
10172 | if (Signed) { | |||
10173 | if (K0->getAPIntValue().sge(K1->getAPIntValue())) | |||
10174 | return SDValue(); | |||
10175 | } else { | |||
10176 | if (K0->getAPIntValue().uge(K1->getAPIntValue())) | |||
10177 | return SDValue(); | |||
10178 | } | |||
10179 | ||||
10180 | EVT VT = K0->getValueType(0); | |||
10181 | unsigned Med3Opc = Signed ? AMDGPUISD::SMED3 : AMDGPUISD::UMED3; | |||
10182 | if (VT == MVT::i32 || (VT == MVT::i16 && Subtarget->hasMed3_16())) { | |||
10183 | return DAG.getNode(Med3Opc, SL, VT, | |||
10184 | Op0.getOperand(0), SDValue(K0, 0), SDValue(K1, 0)); | |||
10185 | } | |||
10186 | ||||
10187 | // If there isn't a 16-bit med3 operation, convert to 32-bit. | |||
10188 | if (VT == MVT::i16) { | |||
10189 | MVT NVT = MVT::i32; | |||
10190 | unsigned ExtOp = Signed ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; | |||
10191 | ||||
10192 | SDValue Tmp1 = DAG.getNode(ExtOp, SL, NVT, Op0->getOperand(0)); | |||
10193 | SDValue Tmp2 = DAG.getNode(ExtOp, SL, NVT, Op0->getOperand(1)); | |||
10194 | SDValue Tmp3 = DAG.getNode(ExtOp, SL, NVT, Op1); | |||
10195 | ||||
10196 | SDValue Med3 = DAG.getNode(Med3Opc, SL, NVT, Tmp1, Tmp2, Tmp3); | |||
10197 | return DAG.getNode(ISD::TRUNCATE, SL, VT, Med3); | |||
10198 | } | |||
10199 | ||||
10200 | return SDValue(); | |||
10201 | } | |||
10202 | ||||
10203 | static ConstantFPSDNode *getSplatConstantFP(SDValue Op) { | |||
10204 | if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op)) | |||
10205 | return C; | |||
10206 | ||||
10207 | if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Op)) { | |||
10208 | if (ConstantFPSDNode *C = BV->getConstantFPSplatNode()) | |||
10209 | return C; | |||
10210 | } | |||
10211 | ||||
10212 | return nullptr; | |||
10213 | } | |||
10214 | ||||
10215 | SDValue SITargetLowering::performFPMed3ImmCombine(SelectionDAG &DAG, | |||
10216 | const SDLoc &SL, | |||
10217 | SDValue Op0, | |||
10218 | SDValue Op1) const { | |||
10219 | ConstantFPSDNode *K1 = getSplatConstantFP(Op1); | |||
10220 | if (!K1) | |||
10221 | return SDValue(); | |||
10222 | ||||
10223 | ConstantFPSDNode *K0 = getSplatConstantFP(Op0.getOperand(1)); | |||
10224 | if (!K0) | |||
10225 | return SDValue(); | |||
10226 | ||||
10227 | // Ordered >= (although NaN inputs should have folded away by now). | |||
10228 | if (K0->getValueAPF() > K1->getValueAPF()) | |||
10229 | return SDValue(); | |||
10230 | ||||
10231 | const MachineFunction &MF = DAG.getMachineFunction(); | |||
10232 | const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); | |||
10233 | ||||
10234 | // TODO: Check IEEE bit enabled? | |||
10235 | EVT VT = Op0.getValueType(); | |||
10236 | if (Info->getMode().DX10Clamp) { | |||
10237 | // If dx10_clamp is enabled, NaNs clamp to 0.0. This is the same as the | |||
10238 | // hardware fmed3 behavior converting to a min. | |||
10239 | // FIXME: Should this be allowing -0.0? | |||
10240 | if (K1->isExactlyValue(1.0) && K0->isExactlyValue(0.0)) | |||
10241 | return DAG.getNode(AMDGPUISD::CLAMP, SL, VT, Op0.getOperand(0)); | |||
10242 | } | |||
10243 | ||||
10244 | // med3 for f16 is only available on gfx9+, and not available for v2f16. | |||
10245 | if (VT == MVT::f32 || (VT == MVT::f16 && Subtarget->hasMed3_16())) { | |||
10246 | // This isn't safe with signaling NaNs because in IEEE mode, min/max on a | |||
10247 | // signaling NaN gives a quiet NaN. The quiet NaN input to the min would | |||
10248 | // then give the other result, which is different from med3 with a NaN | |||
10249 | // input. | |||
10250 | SDValue Var = Op0.getOperand(0); | |||
10251 | if (!DAG.isKnownNeverSNaN(Var)) | |||
10252 | return SDValue(); | |||
10253 | ||||
10254 | const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); | |||
10255 | ||||
10256 | if ((!K0->hasOneUse() || | |||
10257 | TII->isInlineConstant(K0->getValueAPF().bitcastToAPInt())) && | |||
10258 | (!K1->hasOneUse() || | |||
10259 | TII->isInlineConstant(K1->getValueAPF().bitcastToAPInt()))) { | |||
10260 | return DAG.getNode(AMDGPUISD::FMED3, SL, K0->getValueType(0), | |||
10261 | Var, SDValue(K0, 0), SDValue(K1, 0)); | |||
10262 | } | |||
10263 | } | |||
10264 | ||||
10265 | return SDValue(); | |||
10266 | } | |||
10267 | ||||
10268 | SDValue SITargetLowering::performMinMaxCombine(SDNode *N, | |||
10269 | DAGCombinerInfo &DCI) const { | |||
10270 | SelectionDAG &DAG = DCI.DAG; | |||
10271 | ||||
10272 | EVT VT = N->getValueType(0); | |||
10273 | unsigned Opc = N->getOpcode(); | |||
10274 | SDValue Op0 = N->getOperand(0); | |||
10275 | SDValue Op1 = N->getOperand(1); | |||
10276 | ||||
10277 | // Only do this if the inner op has one use since this will just increases | |||
10278 | // register pressure for no benefit. | |||
10279 | ||||
10280 | if (Opc != AMDGPUISD::FMIN_LEGACY && Opc != AMDGPUISD::FMAX_LEGACY && | |||
10281 | !VT.isVector() && | |||
10282 | (VT == MVT::i32 || VT == MVT::f32 || | |||
10283 | ((VT == MVT::f16 || VT == MVT::i16) && Subtarget->hasMin3Max3_16()))) { | |||
10284 | // max(max(a, b), c) -> max3(a, b, c) | |||
10285 | // min(min(a, b), c) -> min3(a, b, c) | |||
10286 | if (Op0.getOpcode() == Opc && Op0.hasOneUse()) { | |||
10287 | SDLoc DL(N); | |||
10288 | return DAG.getNode(minMaxOpcToMin3Max3Opc(Opc), | |||
10289 | DL, | |||
10290 | N->getValueType(0), | |||
10291 | Op0.getOperand(0), | |||
10292 | Op0.getOperand(1), | |||
10293 | Op1); | |||
10294 | } | |||
10295 | ||||
10296 | // Try commuted. | |||
10297 | // max(a, max(b, c)) -> max3(a, b, c) | |||
10298 | // min(a, min(b, c)) -> min3(a, b, c) | |||
10299 | if (Op1.getOpcode() == Opc && Op1.hasOneUse()) { | |||
10300 | SDLoc DL(N); | |||
10301 | return DAG.getNode(minMaxOpcToMin3Max3Opc(Opc), | |||
10302 | DL, | |||
10303 | N->getValueType(0), | |||
10304 | Op0, | |||
10305 | Op1.getOperand(0), | |||
10306 | Op1.getOperand(1)); | |||
10307 | } | |||
10308 | } | |||
10309 | ||||
10310 | // min(max(x, K0), K1), K0 < K1 -> med3(x, K0, K1) | |||
10311 | if (Opc == ISD::SMIN && Op0.getOpcode() == ISD::SMAX && Op0.hasOneUse()) { | |||
10312 | if (SDValue Med3 = performIntMed3ImmCombine(DAG, SDLoc(N), Op0, Op1, true)) | |||
10313 | return Med3; | |||
10314 | } | |||
10315 | ||||
10316 | if (Opc == ISD::UMIN && Op0.getOpcode() == ISD::UMAX && Op0.hasOneUse()) { | |||
10317 | if (SDValue Med3 = performIntMed3ImmCombine(DAG, SDLoc(N), Op0, Op1, false)) | |||
10318 | return Med3; | |||
10319 | } | |||
10320 | ||||
10321 | // fminnum(fmaxnum(x, K0), K1), K0 < K1 && !is_snan(x) -> fmed3(x, K0, K1) | |||
10322 | if (((Opc == ISD::FMINNUM && Op0.getOpcode() == ISD::FMAXNUM) || | |||
10323 | (Opc == ISD::FMINNUM_IEEE && Op0.getOpcode() == ISD::FMAXNUM_IEEE) || | |||
10324 | (Opc == AMDGPUISD::FMIN_LEGACY && | |||
10325 | Op0.getOpcode() == AMDGPUISD::FMAX_LEGACY)) && | |||
10326 | (VT == MVT::f32 || VT == MVT::f64 || | |||
10327 | (VT == MVT::f16 && Subtarget->has16BitInsts()) || | |||
10328 | (VT == MVT::v2f16 && Subtarget->hasVOP3PInsts())) && | |||
10329 | Op0.hasOneUse()) { | |||
10330 | if (SDValue Res = performFPMed3ImmCombine(DAG, SDLoc(N), Op0, Op1)) | |||
10331 | return Res; | |||
10332 | } | |||
10333 | ||||
10334 | return SDValue(); | |||
10335 | } | |||
10336 | ||||
10337 | static bool isClampZeroToOne(SDValue A, SDValue B) { | |||
10338 | if (ConstantFPSDNode *CA = dyn_cast<ConstantFPSDNode>(A)) { | |||
10339 | if (ConstantFPSDNode *CB = dyn_cast<ConstantFPSDNode>(B)) { | |||
10340 | // FIXME: Should this be allowing -0.0? | |||
10341 | return (CA->isExactlyValue(0.0) && CB->isExactlyValue(1.0)) || | |||
10342 | (CA->isExactlyValue(1.0) && CB->isExactlyValue(0.0)); | |||
10343 | } | |||
10344 | } | |||
10345 | ||||
10346 | return false; | |||
10347 | } | |||
10348 | ||||
10349 | // FIXME: Should only worry about snans for version with chain. | |||
10350 | SDValue SITargetLowering::performFMed3Combine(SDNode *N, | |||
10351 | DAGCombinerInfo &DCI) const { | |||
10352 | EVT VT = N->getValueType(0); | |||
10353 | // v_med3_f32 and v_max_f32 behave identically wrt denorms, exceptions and | |||
10354 | // NaNs. With a NaN input, the order of the operands may change the result. | |||
10355 | ||||
10356 | SelectionDAG &DAG = DCI.DAG; | |||
10357 | SDLoc SL(N); | |||
10358 | ||||
10359 | SDValue Src0 = N->getOperand(0); | |||
10360 | SDValue Src1 = N->getOperand(1); | |||
10361 | SDValue Src2 = N->getOperand(2); | |||
10362 | ||||
10363 | if (isClampZeroToOne(Src0, Src1)) { | |||
10364 | // const_a, const_b, x -> clamp is safe in all cases including signaling | |||
10365 | // nans. | |||
10366 | // FIXME: Should this be allowing -0.0? | |||
10367 | return DAG.getNode(AMDGPUISD::CLAMP, SL, VT, Src2); | |||
10368 | } | |||
10369 | ||||
10370 | const MachineFunction &MF = DAG.getMachineFunction(); | |||
10371 | const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); | |||
10372 | ||||
10373 | // FIXME: dx10_clamp behavior assumed in instcombine. Should we really bother | |||
10374 | // handling no dx10-clamp? | |||
10375 | if (Info->getMode().DX10Clamp) { | |||
10376 | // If NaNs is clamped to 0, we are free to reorder the inputs. | |||
10377 | ||||
10378 | if (isa<ConstantFPSDNode>(Src0) && !isa<ConstantFPSDNode>(Src1)) | |||
10379 | std::swap(Src0, Src1); | |||
10380 | ||||
10381 | if (isa<ConstantFPSDNode>(Src1) && !isa<ConstantFPSDNode>(Src2)) | |||
10382 | std::swap(Src1, Src2); | |||
10383 | ||||
10384 | if (isa<ConstantFPSDNode>(Src0) && !isa<ConstantFPSDNode>(Src1)) | |||
10385 | std::swap(Src0, Src1); | |||
10386 | ||||
10387 | if (isClampZeroToOne(Src1, Src2)) | |||
10388 | return DAG.getNode(AMDGPUISD::CLAMP, SL, VT, Src0); | |||
10389 | } | |||
10390 | ||||
10391 | return SDValue(); | |||
10392 | } | |||
10393 | ||||
10394 | SDValue SITargetLowering::performCvtPkRTZCombine(SDNode *N, | |||
10395 | DAGCombinerInfo &DCI) const { | |||
10396 | SDValue Src0 = N->getOperand(0); | |||
10397 | SDValue Src1 = N->getOperand(1); | |||
10398 | if (Src0.isUndef() && Src1.isUndef()) | |||
10399 | return DCI.DAG.getUNDEF(N->getValueType(0)); | |||
10400 | return SDValue(); | |||
10401 | } | |||
10402 | ||||
10403 | // Check if EXTRACT_VECTOR_ELT/INSERT_VECTOR_ELT (<n x e>, var-idx) should be | |||
10404 | // expanded into a set of cmp/select instructions. | |||
10405 | bool SITargetLowering::shouldExpandVectorDynExt(unsigned EltSize, | |||
10406 | unsigned NumElem, | |||
10407 | bool IsDivergentIdx) { | |||
10408 | if (UseDivergentRegisterIndexing) | |||
10409 | return false; | |||
10410 | ||||
10411 | unsigned VecSize = EltSize * NumElem; | |||
10412 | ||||
10413 | // Sub-dword vectors of size 2 dword or less have better implementation. | |||
10414 | if (VecSize <= 64 && EltSize < 32) | |||
10415 | return false; | |||
10416 | ||||
10417 | // Always expand the rest of sub-dword instructions, otherwise it will be | |||
10418 | // lowered via memory. | |||
10419 | if (EltSize < 32) | |||
10420 | return true; | |||
10421 | ||||
10422 | // Always do this if var-idx is divergent, otherwise it will become a loop. | |||
10423 | if (IsDivergentIdx) | |||
10424 | return true; | |||
10425 | ||||
10426 | // Large vectors would yield too many compares and v_cndmask_b32 instructions. | |||
10427 | unsigned NumInsts = NumElem /* Number of compares */ + | |||
10428 | ((EltSize + 31) / 32) * NumElem /* Number of cndmasks */; | |||
10429 | return NumInsts <= 16; | |||
10430 | } | |||
10431 | ||||
10432 | static bool shouldExpandVectorDynExt(SDNode *N) { | |||
10433 | SDValue Idx = N->getOperand(N->getNumOperands() - 1); | |||
10434 | if (isa<ConstantSDNode>(Idx)) | |||
10435 | return false; | |||
10436 | ||||
10437 | SDValue Vec = N->getOperand(0); | |||
10438 | EVT VecVT = Vec.getValueType(); | |||
10439 | EVT EltVT = VecVT.getVectorElementType(); | |||
10440 | unsigned EltSize = EltVT.getSizeInBits(); | |||
10441 | unsigned NumElem = VecVT.getVectorNumElements(); | |||
10442 | ||||
10443 | return SITargetLowering::shouldExpandVectorDynExt(EltSize, NumElem, | |||
10444 | Idx->isDivergent()); | |||
10445 | } | |||
10446 | ||||
10447 | SDValue SITargetLowering::performExtractVectorEltCombine( | |||
10448 | SDNode *N, DAGCombinerInfo &DCI) const { | |||
10449 | SDValue Vec = N->getOperand(0); | |||
10450 | SelectionDAG &DAG = DCI.DAG; | |||
10451 | ||||
10452 | EVT VecVT = Vec.getValueType(); | |||
10453 | EVT EltVT = VecVT.getVectorElementType(); | |||
10454 | ||||
10455 | if ((Vec.getOpcode() == ISD::FNEG || | |||
10456 | Vec.getOpcode() == ISD::FABS) && allUsesHaveSourceMods(N)) { | |||
10457 | SDLoc SL(N); | |||
10458 | EVT EltVT = N->getValueType(0); | |||
10459 | SDValue Idx = N->getOperand(1); | |||
10460 | SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, | |||
10461 | Vec.getOperand(0), Idx); | |||
10462 | return DAG.getNode(Vec.getOpcode(), SL, EltVT, Elt); | |||
10463 | } | |||
10464 | ||||
10465 | // ScalarRes = EXTRACT_VECTOR_ELT ((vector-BINOP Vec1, Vec2), Idx) | |||
10466 | // => | |||
10467 | // Vec1Elt = EXTRACT_VECTOR_ELT(Vec1, Idx) | |||
10468 | // Vec2Elt = EXTRACT_VECTOR_ELT(Vec2, Idx) | |||
10469 | // ScalarRes = scalar-BINOP Vec1Elt, Vec2Elt | |||
10470 | if (Vec.hasOneUse() && DCI.isBeforeLegalize()) { | |||
10471 | SDLoc SL(N); | |||
10472 | EVT EltVT = N->getValueType(0); | |||
10473 | SDValue Idx = N->getOperand(1); | |||
10474 | unsigned Opc = Vec.getOpcode(); | |||
10475 | ||||
10476 | switch(Opc) { | |||
10477 | default: | |||
10478 | break; | |||
10479 | // TODO: Support other binary operations. | |||
10480 | case ISD::FADD: | |||
10481 | case ISD::FSUB: | |||
10482 | case ISD::FMUL: | |||
10483 | case ISD::ADD: | |||
10484 | case ISD::UMIN: | |||
10485 | case ISD::UMAX: | |||
10486 | case ISD::SMIN: | |||
10487 | case ISD::SMAX: | |||
10488 | case ISD::FMAXNUM: | |||
10489 | case ISD::FMINNUM: | |||
10490 | case ISD::FMAXNUM_IEEE: | |||
10491 | case ISD::FMINNUM_IEEE: { | |||
10492 | SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, | |||
10493 | Vec.getOperand(0), Idx); | |||
10494 | SDValue Elt1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, | |||
10495 | Vec.getOperand(1), Idx); | |||
10496 | ||||
10497 | DCI.AddToWorklist(Elt0.getNode()); | |||
10498 | DCI.AddToWorklist(Elt1.getNode()); | |||
10499 | return DAG.getNode(Opc, SL, EltVT, Elt0, Elt1, Vec->getFlags()); | |||
10500 | } | |||
10501 | } | |||
10502 | } | |||
10503 | ||||
10504 | unsigned VecSize = VecVT.getSizeInBits(); | |||
10505 | unsigned EltSize = EltVT.getSizeInBits(); | |||
10506 | ||||
10507 | // EXTRACT_VECTOR_ELT (<n x e>, var-idx) => n x select (e, const-idx) | |||
10508 | if (::shouldExpandVectorDynExt(N)) { | |||
10509 | SDLoc SL(N); | |||
10510 | SDValue Idx = N->getOperand(1); | |||
10511 | SDValue V; | |||
10512 | for (unsigned I = 0, E = VecVT.getVectorNumElements(); I < E; ++I) { | |||
10513 | SDValue IC = DAG.getVectorIdxConstant(I, SL); | |||
10514 | SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, Vec, IC); | |||
10515 | if (I == 0) | |||
10516 | V = Elt; | |||
10517 | else | |||
10518 | V = DAG.getSelectCC(SL, Idx, IC, Elt, V, ISD::SETEQ); | |||
10519 | } | |||
10520 | return V; | |||
10521 | } | |||
10522 | ||||
10523 | if (!DCI.isBeforeLegalize()) | |||
10524 | return SDValue(); | |||
10525 | ||||
10526 | // Try to turn sub-dword accesses of vectors into accesses of the same 32-bit | |||
10527 | // elements. This exposes more load reduction opportunities by replacing | |||
10528 | // multiple small extract_vector_elements with a single 32-bit extract. | |||
10529 | auto *Idx = dyn_cast<ConstantSDNode>(N->getOperand(1)); | |||
10530 | if (isa<MemSDNode>(Vec) && | |||
10531 | EltSize <= 16 && | |||
10532 | EltVT.isByteSized() && | |||
10533 | VecSize > 32 && | |||
10534 | VecSize % 32 == 0 && | |||
10535 | Idx) { | |||
10536 | EVT NewVT = getEquivalentMemType(*DAG.getContext(), VecVT); | |||
10537 | ||||
10538 | unsigned BitIndex = Idx->getZExtValue() * EltSize; | |||
10539 | unsigned EltIdx = BitIndex / 32; | |||
10540 | unsigned LeftoverBitIdx = BitIndex % 32; | |||
10541 | SDLoc SL(N); | |||
10542 | ||||
10543 | SDValue Cast = DAG.getNode(ISD::BITCAST, SL, NewVT, Vec); | |||
10544 | DCI.AddToWorklist(Cast.getNode()); | |||
10545 | ||||
10546 | SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Cast, | |||
10547 | DAG.getConstant(EltIdx, SL, MVT::i32)); | |||
10548 | DCI.AddToWorklist(Elt.getNode()); | |||
10549 | SDValue Srl = DAG.getNode(ISD::SRL, SL, MVT::i32, Elt, | |||
10550 | DAG.getConstant(LeftoverBitIdx, SL, MVT::i32)); | |||
10551 | DCI.AddToWorklist(Srl.getNode()); | |||
10552 | ||||
10553 | SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SL, EltVT.changeTypeToInteger(), Srl); | |||
10554 | DCI.AddToWorklist(Trunc.getNode()); | |||
10555 | return DAG.getNode(ISD::BITCAST, SL, EltVT, Trunc); | |||
10556 | } | |||
10557 | ||||
10558 | return SDValue(); | |||
10559 | } | |||
10560 | ||||
10561 | SDValue | |||
10562 | SITargetLowering::performInsertVectorEltCombine(SDNode *N, | |||
10563 | DAGCombinerInfo &DCI) const { | |||
10564 | SDValue Vec = N->getOperand(0); | |||
10565 | SDValue Idx = N->getOperand(2); | |||
10566 | EVT VecVT = Vec.getValueType(); | |||
10567 | EVT EltVT = VecVT.getVectorElementType(); | |||
10568 | ||||
10569 | // INSERT_VECTOR_ELT (<n x e>, var-idx) | |||
10570 | // => BUILD_VECTOR n x select (e, const-idx) | |||
10571 | if (!::shouldExpandVectorDynExt(N)) | |||
10572 | return SDValue(); | |||
10573 | ||||
10574 | SelectionDAG &DAG = DCI.DAG; | |||
10575 | SDLoc SL(N); | |||
10576 | SDValue Ins = N->getOperand(1); | |||
10577 | EVT IdxVT = Idx.getValueType(); | |||
10578 | ||||
10579 | SmallVector<SDValue, 16> Ops; | |||
10580 | for (unsigned I = 0, E = VecVT.getVectorNumElements(); I < E; ++I) { | |||
10581 | SDValue IC = DAG.getConstant(I, SL, IdxVT); | |||
10582 | SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, Vec, IC); | |||
10583 | SDValue V = DAG.getSelectCC(SL, Idx, IC, Ins, Elt, ISD::SETEQ); | |||
10584 | Ops.push_back(V); | |||
10585 | } | |||
10586 | ||||
10587 | return DAG.getBuildVector(VecVT, SL, Ops); | |||
10588 | } | |||
10589 | ||||
10590 | unsigned SITargetLowering::getFusedOpcode(const SelectionDAG &DAG, | |||
10591 | const SDNode *N0, | |||
10592 | const SDNode *N1) const { | |||
10593 | EVT VT = N0->getValueType(0); | |||
10594 | ||||
10595 | // Only do this if we are not trying to support denormals. v_mad_f32 does not | |||
10596 | // support denormals ever. | |||
10597 | if (((VT == MVT::f32 && !hasFP32Denormals(DAG.getMachineFunction())) || | |||
10598 | (VT == MVT::f16 && !hasFP64FP16Denormals(DAG.getMachineFunction()) && | |||
10599 | getSubtarget()->hasMadF16())) && | |||
10600 | isOperationLegal(ISD::FMAD, VT)) | |||
10601 | return ISD::FMAD; | |||
10602 | ||||
10603 | const TargetOptions &Options = DAG.getTarget().Options; | |||
10604 | if ((Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath || | |||
10605 | (N0->getFlags().hasAllowContract() && | |||
10606 | N1->getFlags().hasAllowContract())) && | |||
10607 | isFMAFasterThanFMulAndFAdd(DAG.getMachineFunction(), VT)) { | |||
10608 | return ISD::FMA; | |||
10609 | } | |||
10610 | ||||
10611 | return 0; | |||
10612 | } | |||
10613 | ||||
10614 | // For a reassociatable opcode perform: | |||
10615 | // op x, (op y, z) -> op (op x, z), y, if x and z are uniform | |||
10616 | SDValue SITargetLowering::reassociateScalarOps(SDNode *N, | |||
10617 | SelectionDAG &DAG) const { | |||
10618 | EVT VT = N->getValueType(0); | |||
10619 | if (VT != MVT::i32 && VT != MVT::i64) | |||
10620 | return SDValue(); | |||
10621 | ||||
10622 | if (DAG.isBaseWithConstantOffset(SDValue(N, 0))) | |||
10623 | return SDValue(); | |||
10624 | ||||
10625 | unsigned Opc = N->getOpcode(); | |||
10626 | SDValue Op0 = N->getOperand(0); | |||
10627 | SDValue Op1 = N->getOperand(1); | |||
10628 | ||||
10629 | if (!(Op0->isDivergent() ^ Op1->isDivergent())) | |||
10630 | return SDValue(); | |||
10631 | ||||
10632 | if (Op0->isDivergent()) | |||
10633 | std::swap(Op0, Op1); | |||
10634 | ||||
10635 | if (Op1.getOpcode() != Opc || !Op1.hasOneUse()) | |||
10636 | return SDValue(); | |||
10637 | ||||
10638 | SDValue Op2 = Op1.getOperand(1); | |||
10639 | Op1 = Op1.getOperand(0); | |||
10640 | if (!(Op1->isDivergent() ^ Op2->isDivergent())) | |||
10641 | return SDValue(); | |||
10642 | ||||
10643 | if (Op1->isDivergent()) | |||
10644 | std::swap(Op1, Op2); | |||
10645 | ||||
10646 | SDLoc SL(N); | |||
10647 | SDValue Add1 = DAG.getNode(Opc, SL, VT, Op0, Op1); | |||
10648 | return DAG.getNode(Opc, SL, VT, Add1, Op2); | |||
10649 | } | |||
10650 | ||||
10651 | static SDValue getMad64_32(SelectionDAG &DAG, const SDLoc &SL, | |||
10652 | EVT VT, | |||
10653 | SDValue N0, SDValue N1, SDValue N2, | |||
10654 | bool Signed) { | |||
10655 | unsigned MadOpc = Signed ? AMDGPUISD::MAD_I64_I32 : AMDGPUISD::MAD_U64_U32; | |||
10656 | SDVTList VTs = DAG.getVTList(MVT::i64, MVT::i1); | |||
10657 | SDValue Mad = DAG.getNode(MadOpc, SL, VTs, N0, N1, N2); | |||
10658 | return DAG.getNode(ISD::TRUNCATE, SL, VT, Mad); | |||
10659 | } | |||
10660 | ||||
10661 | SDValue SITargetLowering::performAddCombine(SDNode *N, | |||
10662 | DAGCombinerInfo &DCI) const { | |||
10663 | SelectionDAG &DAG = DCI.DAG; | |||
10664 | EVT VT = N->getValueType(0); | |||
10665 | SDLoc SL(N); | |||
10666 | SDValue LHS = N->getOperand(0); | |||
10667 | SDValue RHS = N->getOperand(1); | |||
10668 | ||||
10669 | if ((LHS.getOpcode() == ISD::MUL || RHS.getOpcode() == ISD::MUL) | |||
10670 | && Subtarget->hasMad64_32() && | |||
10671 | !VT.isVector() && VT.getScalarSizeInBits() > 32 && | |||
10672 | VT.getScalarSizeInBits() <= 64) { | |||
10673 | if (LHS.getOpcode() != ISD::MUL) | |||
10674 | std::swap(LHS, RHS); | |||
10675 | ||||
10676 | SDValue MulLHS = LHS.getOperand(0); | |||
10677 | SDValue MulRHS = LHS.getOperand(1); | |||
10678 | SDValue AddRHS = RHS; | |||
10679 | ||||
10680 | // TODO: Maybe restrict if SGPR inputs. | |||
10681 | if (numBitsUnsigned(MulLHS, DAG) <= 32 && | |||
10682 | numBitsUnsigned(MulRHS, DAG) <= 32) { | |||
10683 | MulLHS = DAG.getZExtOrTrunc(MulLHS, SL, MVT::i32); | |||
10684 | MulRHS = DAG.getZExtOrTrunc(MulRHS, SL, MVT::i32); | |||
10685 | AddRHS = DAG.getZExtOrTrunc(AddRHS, SL, MVT::i64); | |||
10686 | return getMad64_32(DAG, SL, VT, MulLHS, MulRHS, AddRHS, false); | |||
10687 | } | |||
10688 | ||||
10689 | if (numBitsSigned(MulLHS, DAG) <= 32 && numBitsSigned(MulRHS, DAG) <= 32) { | |||
10690 | MulLHS = DAG.getSExtOrTrunc(MulLHS, SL, MVT::i32); | |||
10691 | MulRHS = DAG.getSExtOrTrunc(MulRHS, SL, MVT::i32); | |||
10692 | AddRHS = DAG.getSExtOrTrunc(AddRHS, SL, MVT::i64); | |||
10693 | return getMad64_32(DAG, SL, VT, MulLHS, MulRHS, AddRHS, true); | |||
10694 | } | |||
10695 | ||||
10696 | return SDValue(); | |||
10697 | } | |||
10698 | ||||
10699 | if (SDValue V = reassociateScalarOps(N, DAG)) { | |||
10700 | return V; | |||
10701 | } | |||
10702 | ||||
10703 | if (VT != MVT::i32 || !DCI.isAfterLegalizeDAG()) | |||
10704 | return SDValue(); | |||
10705 | ||||
10706 | // add x, zext (setcc) => addcarry x, 0, setcc | |||
10707 | // add x, sext (setcc) => subcarry x, 0, setcc | |||
10708 | unsigned Opc = LHS.getOpcode(); | |||
10709 | if (Opc == ISD::ZERO_EXTEND || Opc == ISD::SIGN_EXTEND || | |||
10710 | Opc == ISD::ANY_EXTEND || Opc == ISD::ADDCARRY) | |||
10711 | std::swap(RHS, LHS); | |||
10712 | ||||
10713 | Opc = RHS.getOpcode(); | |||
10714 | switch (Opc) { | |||
10715 | default: break; | |||
10716 | case ISD::ZERO_EXTEND: | |||
10717 | case ISD::SIGN_EXTEND: | |||
10718 | case ISD::ANY_EXTEND: { | |||
10719 | auto Cond = RHS.getOperand(0); | |||
10720 | // If this won't be a real VOPC output, we would still need to insert an | |||
10721 | // extra instruction anyway. | |||
10722 | if (!isBoolSGPR(Cond)) | |||
10723 | break; | |||
10724 | SDVTList VTList = DAG.getVTList(MVT::i32, MVT::i1); | |||
10725 | SDValue Args[] = { LHS, DAG.getConstant(0, SL, MVT::i32), Cond }; | |||
10726 | Opc = (Opc == ISD::SIGN_EXTEND) ? ISD::SUBCARRY : ISD::ADDCARRY; | |||
10727 | return DAG.getNode(Opc, SL, VTList, Args); | |||
10728 | } | |||
10729 | case ISD::ADDCARRY: { | |||
10730 | // add x, (addcarry y, 0, cc) => addcarry x, y, cc | |||
10731 | auto C = dyn_cast<ConstantSDNode>(RHS.getOperand(1)); | |||
10732 | if (!C || C->getZExtValue() != 0) break; | |||
10733 | SDValue Args[] = { LHS, RHS.getOperand(0), RHS.getOperand(2) }; | |||
10734 | return DAG.getNode(ISD::ADDCARRY, SDLoc(N), RHS->getVTList(), Args); | |||
10735 | } | |||
10736 | } | |||
10737 | return SDValue(); | |||
10738 | } | |||
10739 | ||||
10740 | SDValue SITargetLowering::performSubCombine(SDNode *N, | |||
10741 | DAGCombinerInfo &DCI) const { | |||
10742 | SelectionDAG &DAG = DCI.DAG; | |||
10743 | EVT VT = N->getValueType(0); | |||
10744 | ||||
10745 | if (VT != MVT::i32) | |||
10746 | return SDValue(); | |||
10747 | ||||
10748 | SDLoc SL(N); | |||
10749 | SDValue LHS = N->getOperand(0); | |||
10750 | SDValue RHS = N->getOperand(1); | |||
10751 | ||||
10752 | // sub x, zext (setcc) => subcarry x, 0, setcc | |||
10753 | // sub x, sext (setcc) => addcarry x, 0, setcc | |||
10754 | unsigned Opc = RHS.getOpcode(); | |||
10755 | switch (Opc) { | |||
10756 | default: break; | |||
10757 | case ISD::ZERO_EXTEND: | |||
10758 | case ISD::SIGN_EXTEND: | |||
10759 | case ISD::ANY_EXTEND: { | |||
10760 | auto Cond = RHS.getOperand(0); | |||
10761 | // If this won't be a real VOPC output, we would still need to insert an | |||
10762 | // extra instruction anyway. | |||
10763 | if (!isBoolSGPR(Cond)) | |||
10764 | break; | |||
10765 | SDVTList VTList = DAG.getVTList(MVT::i32, MVT::i1); | |||
10766 | SDValue Args[] = { LHS, DAG.getConstant(0, SL, MVT::i32), Cond }; | |||
10767 | Opc = (Opc == ISD::SIGN_EXTEND) ? ISD::ADDCARRY : ISD::SUBCARRY; | |||
10768 | return DAG.getNode(Opc, SL, VTList, Args); | |||
10769 | } | |||
10770 | } | |||
10771 | ||||
10772 | if (LHS.getOpcode() == ISD::SUBCARRY) { | |||
10773 | // sub (subcarry x, 0, cc), y => subcarry x, y, cc | |||
10774 | auto C = dyn_cast<ConstantSDNode>(LHS.getOperand(1)); | |||
10775 | if (!C || !C->isZero()) | |||
10776 | return SDValue(); | |||
10777 | SDValue Args[] = { LHS.getOperand(0), RHS, LHS.getOperand(2) }; | |||
10778 | return DAG.getNode(ISD::SUBCARRY, SDLoc(N), LHS->getVTList(), Args); | |||
10779 | } | |||
10780 | return SDValue(); | |||
10781 | } | |||
10782 | ||||
10783 | SDValue SITargetLowering::performAddCarrySubCarryCombine(SDNode *N, | |||
10784 | DAGCombinerInfo &DCI) const { | |||
10785 | ||||
10786 | if (N->getValueType(0) != MVT::i32) | |||
10787 | return SDValue(); | |||
10788 | ||||
10789 | auto C = dyn_cast<ConstantSDNode>(N->getOperand(1)); | |||
10790 | if (!C || C->getZExtValue() != 0) | |||
10791 | return SDValue(); | |||
10792 | ||||
10793 | SelectionDAG &DAG = DCI.DAG; | |||
10794 | SDValue LHS = N->getOperand(0); | |||
10795 | ||||
10796 | // addcarry (add x, y), 0, cc => addcarry x, y, cc | |||
10797 | // subcarry (sub x, y), 0, cc => subcarry x, y, cc | |||
10798 | unsigned LHSOpc = LHS.getOpcode(); | |||
10799 | unsigned Opc = N->getOpcode(); | |||
10800 | if ((LHSOpc == ISD::ADD && Opc == ISD::ADDCARRY) || | |||
10801 | (LHSOpc == ISD::SUB && Opc == ISD::SUBCARRY)) { | |||
10802 | SDValue Args[] = { LHS.getOperand(0), LHS.getOperand(1), N->getOperand(2) }; | |||
10803 | return DAG.getNode(Opc, SDLoc(N), N->getVTList(), Args); | |||
10804 | } | |||
10805 | return SDValue(); | |||
10806 | } | |||
10807 | ||||
10808 | SDValue SITargetLowering::performFAddCombine(SDNode *N, | |||
10809 | DAGCombinerInfo &DCI) const { | |||
10810 | if (DCI.getDAGCombineLevel() < AfterLegalizeDAG) | |||
10811 | return SDValue(); | |||
10812 | ||||
10813 | SelectionDAG &DAG = DCI.DAG; | |||
10814 | EVT VT = N->getValueType(0); | |||
10815 | ||||
10816 | SDLoc SL(N); | |||
10817 | SDValue LHS = N->getOperand(0); | |||
10818 | SDValue RHS = N->getOperand(1); | |||
10819 | ||||
10820 | // These should really be instruction patterns, but writing patterns with | |||
10821 | // source modifiers is a pain. | |||
10822 | ||||
10823 | // fadd (fadd (a, a), b) -> mad 2.0, a, b | |||
10824 | if (LHS.getOpcode() == ISD::FADD) { | |||
10825 | SDValue A = LHS.getOperand(0); | |||
10826 | if (A == LHS.getOperand(1)) { | |||
10827 | unsigned FusedOp = getFusedOpcode(DAG, N, LHS.getNode()); | |||
10828 | if (FusedOp != 0) { | |||
10829 | const SDValue Two = DAG.getConstantFP(2.0, SL, VT); | |||
10830 | return DAG.getNode(FusedOp, SL, VT, A, Two, RHS); | |||
10831 | } | |||
10832 | } | |||
10833 | } | |||
10834 | ||||
10835 | // fadd (b, fadd (a, a)) -> mad 2.0, a, b | |||
10836 | if (RHS.getOpcode() == ISD::FADD) { | |||
10837 | SDValue A = RHS.getOperand(0); | |||
10838 | if (A == RHS.getOperand(1)) { | |||
10839 | unsigned FusedOp = getFusedOpcode(DAG, N, RHS.getNode()); | |||
10840 | if (FusedOp != 0) { | |||
10841 | const SDValue Two = DAG.getConstantFP(2.0, SL, VT); | |||
10842 | return DAG.getNode(FusedOp, SL, VT, A, Two, LHS); | |||
10843 | } | |||
10844 | } | |||
10845 | } | |||
10846 | ||||
10847 | return SDValue(); | |||
10848 | } | |||
10849 | ||||
10850 | SDValue SITargetLowering::performFSubCombine(SDNode *N, | |||
10851 | DAGCombinerInfo &DCI) const { | |||
10852 | if (DCI.getDAGCombineLevel() < AfterLegalizeDAG) | |||
10853 | return SDValue(); | |||
10854 | ||||
10855 | SelectionDAG &DAG = DCI.DAG; | |||
10856 | SDLoc SL(N); | |||
10857 | EVT VT = N->getValueType(0); | |||
10858 | assert(!VT.isVector())(static_cast <bool> (!VT.isVector()) ? void (0) : __assert_fail ("!VT.isVector()", "llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 10858, __extension__ __PRETTY_FUNCTION__)); | |||
10859 | ||||
10860 | // Try to get the fneg to fold into the source modifier. This undoes generic | |||
10861 | // DAG combines and folds them into the mad. | |||
10862 | // | |||
10863 | // Only do this if we are not trying to support denormals. v_mad_f32 does | |||
10864 | // not support denormals ever. | |||
10865 | SDValue LHS = N->getOperand(0); | |||
10866 | SDValue RHS = N->getOperand(1); | |||
10867 | if (LHS.getOpcode() == ISD::FADD) { | |||
10868 | // (fsub (fadd a, a), c) -> mad 2.0, a, (fneg c) | |||
10869 | SDValue A = LHS.getOperand(0); | |||
10870 | if (A == LHS.getOperand(1)) { | |||
10871 | unsigned FusedOp = getFusedOpcode(DAG, N, LHS.getNode()); | |||
10872 | if (FusedOp != 0){ | |||
10873 | const SDValue Two = DAG.getConstantFP(2.0, SL, VT); | |||
10874 | SDValue NegRHS = DAG.getNode(ISD::FNEG, SL, VT, RHS); | |||
10875 | ||||
10876 | return DAG.getNode(FusedOp, SL, VT, A, Two, NegRHS); | |||
10877 | } | |||
10878 | } | |||
10879 | } | |||
10880 | ||||
10881 | if (RHS.getOpcode() == ISD::FADD) { | |||
10882 | // (fsub c, (fadd a, a)) -> mad -2.0, a, c | |||
10883 | ||||
10884 | SDValue A = RHS.getOperand(0); | |||
10885 | if (A == RHS.getOperand(1)) { | |||
10886 | unsigned FusedOp = getFusedOpcode(DAG, N, RHS.getNode()); | |||
10887 | if (FusedOp != 0){ | |||
10888 | const SDValue NegTwo = DAG.getConstantFP(-2.0, SL, VT); | |||
10889 | return DAG.getNode(FusedOp, SL, VT, A, NegTwo, LHS); | |||
10890 | } | |||
10891 | } | |||
10892 | } | |||
10893 | ||||
10894 | return SDValue(); | |||
10895 | } | |||
10896 | ||||
10897 | SDValue SITargetLowering::performFMACombine(SDNode *N, | |||
10898 | DAGCombinerInfo &DCI) const { | |||
10899 | SelectionDAG &DAG = DCI.DAG; | |||
10900 | EVT VT = N->getValueType(0); | |||
10901 | SDLoc SL(N); | |||
10902 | ||||
10903 | if (!Subtarget->hasDot7Insts() || VT != MVT::f32) | |||
10904 | return SDValue(); | |||
10905 | ||||
10906 | // FMA((F32)S0.x, (F32)S1. x, FMA((F32)S0.y, (F32)S1.y, (F32)z)) -> | |||
10907 | // FDOT2((V2F16)S0, (V2F16)S1, (F32)z)) | |||
10908 | SDValue Op1 = N->getOperand(0); | |||
10909 | SDValue Op2 = N->getOperand(1); | |||
10910 | SDValue FMA = N->getOperand(2); | |||
10911 | ||||
10912 | if (FMA.getOpcode() != ISD::FMA || | |||
10913 | Op1.getOpcode() != ISD::FP_EXTEND || | |||
10914 | Op2.getOpcode() != ISD::FP_EXTEND) | |||
10915 | return SDValue(); | |||
10916 | ||||
10917 | // fdot2_f32_f16 always flushes fp32 denormal operand and output to zero, | |||
10918 | // regardless of the denorm mode setting. Therefore, | |||
10919 | // unsafe-fp-math/fp-contract is sufficient to allow generating fdot2. | |||
10920 | const TargetOptions &Options = DAG.getTarget().Options; | |||
10921 | if (Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath || | |||
10922 | (N->getFlags().hasAllowContract() && | |||
10923 | FMA->getFlags().hasAllowContract())) { | |||
10924 | Op1 = Op1.getOperand(0); | |||
10925 | Op2 = Op2.getOperand(0); | |||
10926 | if (Op1.getOpcode() != ISD::EXTRACT_VECTOR_ELT || | |||
10927 | Op2.getOpcode() != ISD::EXTRACT_VECTOR_ELT) | |||
10928 | return SDValue(); | |||
10929 | ||||
10930 | SDValue Vec1 = Op1.getOperand(0); | |||
10931 | SDValue Idx1 = Op1.getOperand(1); | |||
10932 | SDValue Vec2 = Op2.getOperand(0); | |||
10933 | ||||
10934 | SDValue FMAOp1 = FMA.getOperand(0); | |||
10935 | SDValue FMAOp2 = FMA.getOperand(1); | |||
10936 | SDValue FMAAcc = FMA.getOperand(2); | |||
10937 | ||||
10938 | if (FMAOp1.getOpcode() != ISD::FP_EXTEND || | |||
10939 | FMAOp2.getOpcode() != ISD::FP_EXTEND) | |||
10940 | return SDValue(); | |||
10941 | ||||
10942 | FMAOp1 = FMAOp1.getOperand(0); | |||
10943 | FMAOp2 = FMAOp2.getOperand(0); | |||
10944 | if (FMAOp1.getOpcode() != ISD::EXTRACT_VECTOR_ELT || | |||
10945 | FMAOp2.getOpcode() != ISD::EXTRACT_VECTOR_ELT) | |||
10946 | return SDValue(); | |||
10947 | ||||
10948 | SDValue Vec3 = FMAOp1.getOperand(0); | |||
10949 | SDValue Vec4 = FMAOp2.getOperand(0); | |||
10950 | SDValue Idx2 = FMAOp1.getOperand(1); | |||
10951 | ||||
10952 | if (Idx1 != Op2.getOperand(1) || Idx2 != FMAOp2.getOperand(1) || | |||
10953 | // Idx1 and Idx2 cannot be the same. | |||
10954 | Idx1 == Idx2) | |||
10955 | return SDValue(); | |||
10956 | ||||
10957 | if (Vec1 == Vec2 || Vec3 == Vec4) | |||
10958 | return SDValue(); | |||
10959 | ||||
10960 | if (Vec1.getValueType() != MVT::v2f16 || Vec2.getValueType() != MVT::v2f16) | |||
10961 | return SDValue(); | |||
10962 | ||||
10963 | if ((Vec1 == Vec3 && Vec2 == Vec4) || | |||
10964 | (Vec1 == Vec4 && Vec2 == Vec3)) { | |||
10965 | return DAG.getNode(AMDGPUISD::FDOT2, SL, MVT::f32, Vec1, Vec2, FMAAcc, | |||
10966 | DAG.getTargetConstant(0, SL, MVT::i1)); | |||
10967 | } | |||
10968 | } | |||
10969 | return SDValue(); | |||
10970 | } | |||
10971 | ||||
10972 | SDValue SITargetLowering::performSetCCCombine(SDNode *N, | |||
10973 | DAGCombinerInfo &DCI) const { | |||
10974 | SelectionDAG &DAG = DCI.DAG; | |||
10975 | SDLoc SL(N); | |||
10976 | ||||
10977 | SDValue LHS = N->getOperand(0); | |||
10978 | SDValue RHS = N->getOperand(1); | |||
10979 | EVT VT = LHS.getValueType(); | |||
10980 | ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get(); | |||
10981 | ||||
10982 | auto CRHS = dyn_cast<ConstantSDNode>(RHS); | |||
10983 | if (!CRHS) { | |||
10984 | CRHS = dyn_cast<ConstantSDNode>(LHS); | |||
10985 | if (CRHS) { | |||
10986 | std::swap(LHS, RHS); | |||
10987 | CC = getSetCCSwappedOperands(CC); | |||
10988 | } | |||
10989 | } | |||
10990 | ||||
10991 | if (CRHS) { | |||
10992 | if (VT == MVT::i32 && LHS.getOpcode() == ISD::SIGN_EXTEND && | |||
10993 | isBoolSGPR(LHS.getOperand(0))) { | |||
10994 | // setcc (sext from i1 cc), -1, ne|sgt|ult) => not cc => xor cc, -1 | |||
10995 | // setcc (sext from i1 cc), -1, eq|sle|uge) => cc | |||
10996 | // setcc (sext from i1 cc), 0, eq|sge|ule) => not cc => xor cc, -1 | |||
10997 | // setcc (sext from i1 cc), 0, ne|ugt|slt) => cc | |||
10998 | if ((CRHS->isAllOnes() && | |||
10999 | (CC == ISD::SETNE || CC == ISD::SETGT || CC == ISD::SETULT)) || | |||
11000 | (CRHS->isZero() && | |||
11001 | (CC == ISD::SETEQ || CC == ISD::SETGE || CC == ISD::SETULE))) | |||
11002 | return DAG.getNode(ISD::XOR, SL, MVT::i1, LHS.getOperand(0), | |||
11003 | DAG.getConstant(-1, SL, MVT::i1)); | |||
11004 | if ((CRHS->isAllOnes() && | |||
11005 | (CC == ISD::SETEQ || CC == ISD::SETLE || CC == ISD::SETUGE)) || | |||
11006 | (CRHS->isZero() && | |||
11007 | (CC == ISD::SETNE || CC == ISD::SETUGT || CC == ISD::SETLT))) | |||
11008 | return LHS.getOperand(0); | |||
11009 | } | |||
11010 | ||||
11011 | const APInt &CRHSVal = CRHS->getAPIntValue(); | |||
11012 | if ((CC == ISD::SETEQ || CC == ISD::SETNE) && | |||
11013 | LHS.getOpcode() == ISD::SELECT && | |||
11014 | isa<ConstantSDNode>(LHS.getOperand(1)) && | |||
11015 | isa<ConstantSDNode>(LHS.getOperand(2)) && | |||
11016 | LHS.getConstantOperandVal(1) != LHS.getConstantOperandVal(2) && | |||
11017 | isBoolSGPR(LHS.getOperand(0))) { | |||
11018 | // Given CT != FT: | |||
11019 | // setcc (select cc, CT, CF), CF, eq => xor cc, -1 | |||
11020 | // setcc (select cc, CT, CF), CF, ne => cc | |||
11021 | // setcc (select cc, CT, CF), CT, ne => xor cc, -1 | |||
11022 | // setcc (select cc, CT, CF), CT, eq => cc | |||
11023 | const APInt &CT = LHS.getConstantOperandAPInt(1); | |||
11024 | const APInt &CF = LHS.getConstantOperandAPInt(2); | |||
11025 | ||||
11026 | if ((CF == CRHSVal && CC == ISD::SETEQ) || | |||
11027 | (CT == CRHSVal && CC == ISD::SETNE)) | |||
11028 | return DAG.getNode(ISD::XOR, SL, MVT::i1, LHS.getOperand(0), | |||
11029 | DAG.getConstant(-1, SL, MVT::i1)); | |||
11030 | if ((CF == CRHSVal && CC == ISD::SETNE) || | |||
11031 | (CT == CRHSVal && CC == ISD::SETEQ)) | |||
11032 | return LHS.getOperand(0); | |||
11033 | } | |||
11034 | } | |||
11035 | ||||
11036 | if (VT != MVT::f32 && VT != MVT::f64 && (Subtarget->has16BitInsts() && | |||
11037 | VT != MVT::f16)) | |||
11038 | return SDValue(); | |||
11039 | ||||
11040 | // Match isinf/isfinite pattern | |||
11041 | // (fcmp oeq (fabs x), inf) -> (fp_class x, (p_infinity | n_infinity)) | |||
11042 | // (fcmp one (fabs x), inf) -> (fp_class x, | |||
11043 | // (p_normal | n_normal | p_subnormal | n_subnormal | p_zero | n_zero) | |||
11044 | if ((CC == ISD::SETOEQ || CC == ISD::SETONE) && LHS.getOpcode() == ISD::FABS) { | |||
11045 | const ConstantFPSDNode *CRHS = dyn_cast<ConstantFPSDNode>(RHS); | |||
11046 | if (!CRHS) | |||
11047 | return SDValue(); | |||
11048 | ||||
11049 | const APFloat &APF = CRHS->getValueAPF(); | |||
11050 | if (APF.isInfinity() && !APF.isNegative()) { | |||
11051 | const unsigned IsInfMask = SIInstrFlags::P_INFINITY | | |||
11052 | SIInstrFlags::N_INFINITY; | |||
11053 | const unsigned IsFiniteMask = SIInstrFlags::N_ZERO | | |||
11054 | SIInstrFlags::P_ZERO | | |||
11055 | SIInstrFlags::N_NORMAL | | |||
11056 | SIInstrFlags::P_NORMAL | | |||
11057 | SIInstrFlags::N_SUBNORMAL | | |||
11058 | SIInstrFlags::P_SUBNORMAL; | |||
11059 | unsigned Mask = CC == ISD::SETOEQ ? IsInfMask : IsFiniteMask; | |||
11060 | return DAG.getNode(AMDGPUISD::FP_CLASS, SL, MVT::i1, LHS.getOperand(0), | |||
11061 | DAG.getConstant(Mask, SL, MVT::i32)); | |||
11062 | } | |||
11063 | } | |||
11064 | ||||
11065 | return SDValue(); | |||
11066 | } | |||
11067 | ||||
11068 | SDValue SITargetLowering::performCvtF32UByteNCombine(SDNode *N, | |||
11069 | DAGCombinerInfo &DCI) const { | |||
11070 | SelectionDAG &DAG = DCI.DAG; | |||
11071 | SDLoc SL(N); | |||
11072 | unsigned Offset = N->getOpcode() - AMDGPUISD::CVT_F32_UBYTE0; | |||
11073 | ||||
11074 | SDValue Src = N->getOperand(0); | |||
11075 | SDValue Shift = N->getOperand(0); | |||
11076 | ||||
11077 | // TODO: Extend type shouldn't matter (assuming legal types). | |||
11078 | if (Shift.getOpcode() == ISD::ZERO_EXTEND) | |||
11079 | Shift = Shift.getOperand(0); | |||
11080 | ||||
11081 | if (Shift.getOpcode() == ISD::SRL || Shift.getOpcode() == ISD::SHL) { | |||
11082 | // cvt_f32_ubyte1 (shl x, 8) -> cvt_f32_ubyte0 x | |||
11083 | // cvt_f32_ubyte3 (shl x, 16) -> cvt_f32_ubyte1 x | |||
11084 | // cvt_f32_ubyte0 (srl x, 16) -> cvt_f32_ubyte2 x | |||
11085 | // cvt_f32_ubyte1 (srl x, 16) -> cvt_f32_ubyte3 x | |||
11086 | // cvt_f32_ubyte0 (srl x, 8) -> cvt_f32_ubyte1 x | |||
11087 | if (auto *C = dyn_cast<ConstantSDNode>(Shift.getOperand(1))) { | |||
11088 | SDValue Shifted = DAG.getZExtOrTrunc(Shift.getOperand(0), | |||
11089 | SDLoc(Shift.getOperand(0)), MVT::i32); | |||
11090 | ||||
11091 | unsigned ShiftOffset = 8 * Offset; | |||
11092 | if (Shift.getOpcode() == ISD::SHL) | |||
11093 | ShiftOffset -= C->getZExtValue(); | |||
11094 | else | |||
11095 | ShiftOffset += C->getZExtValue(); | |||
11096 | ||||
11097 | if (ShiftOffset < 32 && (ShiftOffset % 8) == 0) { | |||
11098 | return DAG.getNode(AMDGPUISD::CVT_F32_UBYTE0 + ShiftOffset / 8, SL, | |||
11099 | MVT::f32, Shifted); | |||
11100 | } | |||
11101 | } | |||
11102 | } | |||
11103 | ||||
11104 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); | |||
11105 | APInt DemandedBits = APInt::getBitsSet(32, 8 * Offset, 8 * Offset + 8); | |||
11106 | if (TLI.SimplifyDemandedBits(Src, DemandedBits, DCI)) { | |||
11107 | // We simplified Src. If this node is not dead, visit it again so it is | |||
11108 | // folded properly. | |||
11109 | if (N->getOpcode() != ISD::DELETED_NODE) | |||
11110 | DCI.AddToWorklist(N); | |||
11111 | return SDValue(N, 0); | |||
11112 | } | |||
11113 | ||||
11114 | // Handle (or x, (srl y, 8)) pattern when known bits are zero. | |||
11115 | if (SDValue DemandedSrc = | |||
11116 | TLI.SimplifyMultipleUseDemandedBits(Src, DemandedBits, DAG)) | |||
11117 | return DAG.getNode(N->getOpcode(), SL, MVT::f32, DemandedSrc); | |||
11118 | ||||
11119 | return SDValue(); | |||
11120 | } | |||
11121 | ||||
11122 | SDValue SITargetLowering::performClampCombine(SDNode *N, | |||
11123 | DAGCombinerInfo &DCI) const { | |||
11124 | ConstantFPSDNode *CSrc = dyn_cast<ConstantFPSDNode>(N->getOperand(0)); | |||
11125 | if (!CSrc) | |||
11126 | return SDValue(); | |||
11127 | ||||
11128 | const MachineFunction &MF = DCI.DAG.getMachineFunction(); | |||
11129 | const APFloat &F = CSrc->getValueAPF(); | |||
11130 | APFloat Zero = APFloat::getZero(F.getSemantics()); | |||
11131 | if (F < Zero || | |||
11132 | (F.isNaN() && MF.getInfo<SIMachineFunctionInfo>()->getMode().DX10Clamp)) { | |||
11133 | return DCI.DAG.getConstantFP(Zero, SDLoc(N), N->getValueType(0)); | |||
11134 | } | |||
11135 | ||||
11136 | APFloat One(F.getSemantics(), "1.0"); | |||
11137 | if (F > One) | |||
11138 | return DCI.DAG.getConstantFP(One, SDLoc(N), N->getValueType(0)); | |||
11139 | ||||
11140 | return SDValue(CSrc, 0); | |||
11141 | } | |||
11142 | ||||
11143 | ||||
11144 | SDValue SITargetLowering::PerformDAGCombine(SDNode *N, | |||
11145 | DAGCombinerInfo &DCI) const { | |||
11146 | if (getTargetMachine().getOptLevel() == CodeGenOpt::None) | |||
11147 | return SDValue(); | |||
11148 | switch (N->getOpcode()) { | |||
11149 | case ISD::ADD: | |||
11150 | return performAddCombine(N, DCI); | |||
11151 | case ISD::SUB: | |||
11152 | return performSubCombine(N, DCI); | |||
11153 | case ISD::ADDCARRY: | |||
11154 | case ISD::SUBCARRY: | |||
11155 | return performAddCarrySubCarryCombine(N, DCI); | |||
11156 | case ISD::FADD: | |||
11157 | return performFAddCombine(N, DCI); | |||
11158 | case ISD::FSUB: | |||
11159 | return performFSubCombine(N, DCI); | |||
11160 | case ISD::SETCC: | |||
11161 | return performSetCCCombine(N, DCI); | |||
11162 | case ISD::FMAXNUM: | |||
11163 | case ISD::FMINNUM: | |||
11164 | case ISD::FMAXNUM_IEEE: | |||
11165 | case ISD::FMINNUM_IEEE: | |||
11166 | case ISD::SMAX: | |||
11167 | case ISD::SMIN: | |||
11168 | case ISD::UMAX: | |||
11169 | case ISD::UMIN: | |||
11170 | case AMDGPUISD::FMIN_LEGACY: | |||
11171 | case AMDGPUISD::FMAX_LEGACY: | |||
11172 | return performMinMaxCombine(N, DCI); | |||
11173 | case ISD::FMA: | |||
11174 | return performFMACombine(N, DCI); | |||
11175 | case ISD::AND: | |||
11176 | return performAndCombine(N, DCI); | |||
11177 | case ISD::OR: | |||
11178 | return performOrCombine(N, DCI); | |||
11179 | case ISD::XOR: | |||
11180 | return performXorCombine(N, DCI); | |||
11181 | case ISD::ZERO_EXTEND: | |||
11182 | return performZeroExtendCombine(N, DCI); | |||
11183 | case ISD::SIGN_EXTEND_INREG: | |||
11184 | return performSignExtendInRegCombine(N , DCI); | |||
11185 | case AMDGPUISD::FP_CLASS: | |||
11186 | return performClassCombine(N, DCI); | |||
11187 | case ISD::FCANONICALIZE: | |||
11188 | return performFCanonicalizeCombine(N, DCI); | |||
11189 | case AMDGPUISD::RCP: | |||
11190 | return performRcpCombine(N, DCI); | |||
11191 | case AMDGPUISD::FRACT: | |||
11192 | case AMDGPUISD::RSQ: | |||
11193 | case AMDGPUISD::RCP_LEGACY: | |||
11194 | case AMDGPUISD::RCP_IFLAG: | |||
11195 | case AMDGPUISD::RSQ_CLAMP: | |||
11196 | case AMDGPUISD::LDEXP: { | |||
11197 | // FIXME: This is probably wrong. If src is an sNaN, it won't be quieted | |||
11198 | SDValue Src = N->getOperand(0); | |||
11199 | if (Src.isUndef()) | |||
11200 | return Src; | |||
11201 | break; | |||
11202 | } | |||
11203 | case ISD::SINT_TO_FP: | |||
11204 | case ISD::UINT_TO_FP: | |||
11205 | return performUCharToFloatCombine(N, DCI); | |||
11206 | case AMDGPUISD::CVT_F32_UBYTE0: | |||
11207 | case AMDGPUISD::CVT_F32_UBYTE1: | |||
11208 | case AMDGPUISD::CVT_F32_UBYTE2: | |||
11209 | case AMDGPUISD::CVT_F32_UBYTE3: | |||
11210 | return performCvtF32UByteNCombine(N, DCI); | |||
11211 | case AMDGPUISD::FMED3: | |||
11212 | return performFMed3Combine(N, DCI); | |||
11213 | case AMDGPUISD::CVT_PKRTZ_F16_F32: | |||
11214 | return performCvtPkRTZCombine(N, DCI); | |||
11215 | case AMDGPUISD::CLAMP: | |||
11216 | return performClampCombine(N, DCI); | |||
11217 | case ISD::SCALAR_TO_VECTOR: { | |||
11218 | SelectionDAG &DAG = DCI.DAG; | |||
11219 | EVT VT = N->getValueType(0); | |||
11220 | ||||
11221 | // v2i16 (scalar_to_vector i16:x) -> v2i16 (bitcast (any_extend i16:x)) | |||
11222 | if (VT == MVT::v2i16 || VT == MVT::v2f16) { | |||
11223 | SDLoc SL(N); | |||
11224 | SDValue Src = N->getOperand(0); | |||
11225 | EVT EltVT = Src.getValueType(); | |||
11226 | if (EltVT == MVT::f16) | |||
11227 | Src = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Src); | |||
11228 | ||||
11229 | SDValue Ext = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, Src); | |||
11230 | return DAG.getNode(ISD::BITCAST, SL, VT, Ext); | |||
11231 | } | |||
11232 | ||||
11233 | break; | |||
11234 | } | |||
11235 | case ISD::EXTRACT_VECTOR_ELT: | |||
11236 | return performExtractVectorEltCombine(N, DCI); | |||
11237 | case ISD::INSERT_VECTOR_ELT: | |||
11238 | return performInsertVectorEltCombine(N, DCI); | |||
11239 | case ISD::LOAD: { | |||
11240 | if (SDValue Widended = widenLoad(cast<LoadSDNode>(N), DCI)) | |||
11241 | return Widended; | |||
11242 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; | |||
11243 | } | |||
11244 | default: { | |||
11245 | if (!DCI.isBeforeLegalize()) { | |||
11246 | if (MemSDNode *MemNode = dyn_cast<MemSDNode>(N)) | |||
11247 | return performMemSDNodeCombine(MemNode, DCI); | |||
11248 | } | |||
11249 | ||||
11250 | break; | |||
11251 | } | |||
11252 | } | |||
11253 | ||||
11254 | return AMDGPUTargetLowering::PerformDAGCombine(N, DCI); | |||
11255 | } | |||
11256 | ||||
11257 | /// Helper function for adjustWritemask | |||
11258 | static unsigned SubIdx2Lane(unsigned Idx) { | |||
11259 | switch (Idx) { | |||
11260 | default: return ~0u; | |||
11261 | case AMDGPU::sub0: return 0; | |||
11262 | case AMDGPU::sub1: return 1; | |||
11263 | case AMDGPU::sub2: return 2; | |||
11264 | case AMDGPU::sub3: return 3; | |||
11265 | case AMDGPU::sub4: return 4; // Possible with TFE/LWE | |||
11266 | } | |||
11267 | } | |||
11268 | ||||
11269 | /// Adjust the writemask of MIMG instructions | |||
11270 | SDNode *SITargetLowering::adjustWritemask(MachineSDNode *&Node, | |||
11271 | SelectionDAG &DAG) const { | |||
11272 | unsigned Opcode = Node->getMachineOpcode(); | |||
11273 | ||||
11274 | // Subtract 1 because the vdata output is not a MachineSDNode operand. | |||
11275 | int D16Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::d16) - 1; | |||
11276 | if (D16Idx >= 0 && Node->getConstantOperandVal(D16Idx)) | |||
11277 | return Node; // not implemented for D16 | |||
11278 | ||||
11279 | SDNode *Users[5] = { nullptr }; | |||
11280 | unsigned Lane = 0; | |||
11281 | unsigned DmaskIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::dmask) - 1; | |||
11282 | unsigned OldDmask = Node->getConstantOperandVal(DmaskIdx); | |||
11283 | unsigned NewDmask = 0; | |||
11284 | unsigned TFEIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::tfe) - 1; | |||
11285 | unsigned LWEIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::lwe) - 1; | |||
11286 | bool UsesTFC = ((int(TFEIdx) >= 0 && Node->getConstantOperandVal(TFEIdx)) || | |||
11287 | Node->getConstantOperandVal(LWEIdx)) | |||
11288 | ? true | |||
11289 | : false; | |||
11290 | unsigned TFCLane = 0; | |||
11291 | bool HasChain = Node->getNumValues() > 1; | |||
11292 | ||||
11293 | if (OldDmask == 0) { | |||
11294 | // These are folded out, but on the chance it happens don't assert. | |||
11295 | return Node; | |||
11296 | } | |||
11297 | ||||
11298 | unsigned OldBitsSet = countPopulation(OldDmask); | |||
11299 | // Work out which is the TFE/LWE lane if that is enabled. | |||
11300 | if (UsesTFC
| |||
11301 | TFCLane = OldBitsSet; | |||
11302 | } | |||
11303 | ||||
11304 | // Try to figure out the used register components | |||
11305 | for (SDNode::use_iterator I = Node->use_begin(), E = Node->use_end(); | |||
11306 | I != E; ++I) { | |||
11307 | ||||
11308 | // Don't look at users of the chain. | |||
11309 | if (I.getUse().getResNo() != 0) | |||
11310 | continue; | |||
11311 | ||||
11312 | // Abort if we can't understand the usage | |||
11313 | if (!I->isMachineOpcode() || | |||
11314 | I->getMachineOpcode() != TargetOpcode::EXTRACT_SUBREG) | |||
11315 | return Node; | |||
11316 | ||||
11317 | // Lane means which subreg of %vgpra_vgprb_vgprc_vgprd is used. | |||
11318 | // Note that subregs are packed, i.e. Lane==0 is the first bit set | |||
11319 | // in OldDmask, so it can be any of X,Y,Z,W; Lane==1 is the second bit | |||
11320 | // set, etc. | |||
11321 | Lane = SubIdx2Lane(I->getConstantOperandVal(1)); | |||
11322 | if (Lane == ~0u) | |||
11323 | return Node; | |||
11324 | ||||
11325 | // Check if the use is for the TFE/LWE generated result at VGPRn+1. | |||
11326 | if (UsesTFC && Lane == TFCLane) { | |||
11327 | Users[Lane] = *I; | |||
11328 | } else { | |||
11329 | // Set which texture component corresponds to the lane. | |||
11330 | unsigned Comp; | |||
11331 | for (unsigned i = 0, Dmask = OldDmask; (i <= Lane) && (Dmask != 0); i++) { | |||
11332 | Comp = countTrailingZeros(Dmask); | |||
11333 | Dmask &= ~(1 << Comp); | |||
11334 | } | |||
11335 | ||||
11336 | // Abort if we have more than one user per component. | |||
11337 | if (Users[Lane]) | |||
11338 | return Node; | |||
11339 | ||||
11340 | Users[Lane] = *I; | |||
11341 | NewDmask |= 1 << Comp; | |||
11342 | } | |||
11343 | } | |||
11344 | ||||
11345 | // Don't allow 0 dmask, as hardware assumes one channel enabled. | |||
11346 | bool NoChannels = !NewDmask; | |||
11347 | if (NoChannels
| |||
11348 | if (!UsesTFC
| |||
11349 | // No uses of the result and not using TFC. Then do nothing. | |||
11350 | return Node; | |||
11351 | } | |||
11352 | // If the original dmask has one channel - then nothing to do | |||
11353 | if (OldBitsSet == 1) | |||
11354 | return Node; | |||
11355 | // Use an arbitrary dmask - required for the instruction to work | |||
11356 | NewDmask = 1; | |||
11357 | } | |||
11358 | // Abort if there's no change | |||
11359 | if (NewDmask == OldDmask) | |||
11360 | return Node; | |||
11361 | ||||
11362 | unsigned BitsSet = countPopulation(NewDmask); | |||
11363 | ||||
11364 | // Check for TFE or LWE - increase the number of channels by one to account | |||
11365 | // for the extra return value | |||
11366 | // This will need adjustment for D16 if this is also included in | |||
11367 | // adjustWriteMask (this function) but at present D16 are excluded. | |||
11368 | unsigned NewChannels = BitsSet + UsesTFC; | |||
11369 | ||||
11370 | int NewOpcode = | |||
11371 | AMDGPU::getMaskedMIMGOp(Node->getMachineOpcode(), NewChannels); | |||
11372 | assert(NewOpcode != -1 &&(static_cast <bool> (NewOpcode != -1 && NewOpcode != static_cast<int>(Node->getMachineOpcode()) && "failed to find equivalent MIMG op") ? void (0) : __assert_fail ("NewOpcode != -1 && NewOpcode != static_cast<int>(Node->getMachineOpcode()) && \"failed to find equivalent MIMG op\"" , "llvm/lib/Target/AMDGPU/SIISelLowering.cpp", 11374, __extension__ __PRETTY_FUNCTION__)) | |||
11373 | NewOpcode != static_cast<int>(Node->getMachineOpcode()) &&(static_cast <bool> (NewOpcode != -1 && NewOpcode != static_cast<int>(Node->getMachineOpcode()) && "failed to find equivalent MIMG op") ? void (0) : __assert_fail ("NewOpcode != -1 && NewOpcode != static_cast<int>(Node->getMachineOpcode()) && \"failed to find equivalent MIMG op\"" , "llvm/lib/Target/AMDGPU/SIISelLowering.cpp", 11374, __extension__ __PRETTY_FUNCTION__)) | |||
11374 | "failed to find equivalent MIMG op")(static_cast <bool> (NewOpcode != -1 && NewOpcode != static_cast<int>(Node->getMachineOpcode()) && "failed to find equivalent MIMG op") ? void (0) : __assert_fail ("NewOpcode != -1 && NewOpcode != static_cast<int>(Node->getMachineOpcode()) && \"failed to find equivalent MIMG op\"" , "llvm/lib/Target/AMDGPU/SIISelLowering.cpp", 11374, __extension__ __PRETTY_FUNCTION__)); | |||
11375 | ||||
11376 | // Adjust the writemask in the node | |||
11377 | SmallVector<SDValue, 12> Ops; | |||
11378 | Ops.insert(Ops.end(), Node->op_begin(), Node->op_begin() + DmaskIdx); | |||
11379 | Ops.push_back(DAG.getTargetConstant(NewDmask, SDLoc(Node), MVT::i32)); | |||
11380 | Ops.insert(Ops.end(), Node->op_begin() + DmaskIdx + 1, Node->op_end()); | |||
11381 | ||||
11382 | MVT SVT = Node->getValueType(0).getVectorElementType().getSimpleVT(); | |||
11383 | ||||
11384 | MVT ResultVT = NewChannels == 1 ? | |||
11385 | SVT : MVT::getVectorVT(SVT, NewChannels == 3 ? 4 : | |||
11386 | NewChannels == 5 ? 8 : NewChannels); | |||
11387 | SDVTList NewVTList = HasChain
| |||
11388 | DAG.getVTList(ResultVT, MVT::Other) : DAG.getVTList(ResultVT); | |||
11389 | ||||
11390 | ||||
11391 | MachineSDNode *NewNode = DAG.getMachineNode(NewOpcode, SDLoc(Node), | |||
11392 | NewVTList, Ops); | |||
11393 | ||||
11394 | if (HasChain
| |||
11395 | // Update chain. | |||
11396 | DAG.setNodeMemRefs(NewNode, Node->memoperands()); | |||
11397 | DAG.ReplaceAllUsesOfValueWith(SDValue(Node, 1), SDValue(NewNode, 1)); | |||
11398 | } | |||
11399 | ||||
11400 | if (NewChannels
| |||
11401 | assert(Node->hasNUsesOfValue(1, 0))(static_cast <bool> (Node->hasNUsesOfValue(1, 0)) ? void (0) : __assert_fail ("Node->hasNUsesOfValue(1, 0)", "llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 11401, __extension__ __PRETTY_FUNCTION__)); | |||
11402 | SDNode *Copy = DAG.getMachineNode(TargetOpcode::COPY, | |||
11403 | SDLoc(Node), Users[Lane]->getValueType(0), | |||
| ||||
11404 | SDValue(NewNode, 0)); | |||
11405 | DAG.ReplaceAllUsesWith(Users[Lane], Copy); | |||
11406 | return nullptr; | |||
11407 | } | |||
11408 | ||||
11409 | // Update the users of the node with the new indices | |||
11410 | for (unsigned i = 0, Idx = AMDGPU::sub0; i < 5; ++i) { | |||
11411 | SDNode *User = Users[i]; | |||
11412 | if (!User) { | |||
11413 | // Handle the special case of NoChannels. We set NewDmask to 1 above, but | |||
11414 | // Users[0] is still nullptr because channel 0 doesn't really have a use. | |||
11415 | if (i || !NoChannels) | |||
11416 | continue; | |||
11417 | } else { | |||
11418 | SDValue Op = DAG.getTargetConstant(Idx, SDLoc(User), MVT::i32); | |||
11419 | DAG.UpdateNodeOperands(User, SDValue(NewNode, 0), Op); | |||
11420 | } | |||
11421 | ||||
11422 | switch (Idx) { | |||
11423 | default: break; | |||
11424 | case AMDGPU::sub0: Idx = AMDGPU::sub1; break; | |||
11425 | case AMDGPU::sub1: Idx = AMDGPU::sub2; break; | |||
11426 | case AMDGPU::sub2: Idx = AMDGPU::sub3; break; | |||
11427 | case AMDGPU::sub3: Idx = AMDGPU::sub4; break; | |||
11428 | } | |||
11429 | } | |||
11430 | ||||
11431 | DAG.RemoveDeadNode(Node); | |||
11432 | return nullptr; | |||
11433 | } | |||
11434 | ||||
11435 | static bool isFrameIndexOp(SDValue Op) { | |||
11436 | if (Op.getOpcode() == ISD::AssertZext) | |||
11437 | Op = Op.getOperand(0); | |||
11438 | ||||
11439 | return isa<FrameIndexSDNode>(Op); | |||
11440 | } | |||
11441 | ||||
11442 | /// Legalize target independent instructions (e.g. INSERT_SUBREG) | |||
11443 | /// with frame index operands. | |||
11444 | /// LLVM assumes that inputs are to these instructions are registers. | |||
11445 | SDNode *SITargetLowering::legalizeTargetIndependentNode(SDNode *Node, | |||
11446 | SelectionDAG &DAG) const { | |||
11447 | if (Node->getOpcode() == ISD::CopyToReg) { | |||
11448 | RegisterSDNode *DestReg = cast<RegisterSDNode>(Node->getOperand(1)); | |||
11449 | SDValue SrcVal = Node->getOperand(2); | |||
11450 | ||||
11451 | // Insert a copy to a VReg_1 virtual register so LowerI1Copies doesn't have | |||
11452 | // to try understanding copies to physical registers. | |||
11453 | if (SrcVal.getValueType() == MVT::i1 && DestReg->getReg().isPhysical()) { | |||
11454 | SDLoc SL(Node); | |||
11455 | MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo(); | |||
11456 | SDValue VReg = DAG.getRegister( | |||
11457 | MRI.createVirtualRegister(&AMDGPU::VReg_1RegClass), MVT::i1); | |||
11458 | ||||
11459 | SDNode *Glued = Node->getGluedNode(); | |||
11460 | SDValue ToVReg | |||
11461 | = DAG.getCopyToReg(Node->getOperand(0), SL, VReg, SrcVal, | |||
11462 | SDValue(Glued, Glued ? Glued->getNumValues() - 1 : 0)); | |||
11463 | SDValue ToResultReg | |||
11464 | = DAG.getCopyToReg(ToVReg, SL, SDValue(DestReg, 0), | |||
11465 | VReg, ToVReg.getValue(1)); | |||
11466 | DAG.ReplaceAllUsesWith(Node, ToResultReg.getNode()); | |||
11467 | DAG.RemoveDeadNode(Node); | |||
11468 | return ToResultReg.getNode(); | |||
11469 | } | |||
11470 | } | |||
11471 | ||||
11472 | SmallVector<SDValue, 8> Ops; | |||
11473 | for (unsigned i = 0; i < Node->getNumOperands(); ++i) { | |||
11474 | if (!isFrameIndexOp(Node->getOperand(i))) { | |||
11475 | Ops.push_back(Node->getOperand(i)); | |||
11476 | continue; | |||
11477 | } | |||
11478 | ||||
11479 | SDLoc DL(Node); | |||
11480 | Ops.push_back(SDValue(DAG.getMachineNode(AMDGPU::S_MOV_B32, DL, | |||
11481 | Node->getOperand(i).getValueType(), | |||
11482 | Node->getOperand(i)), 0)); | |||
11483 | } | |||
11484 | ||||
11485 | return DAG.UpdateNodeOperands(Node, Ops); | |||
11486 | } | |||
11487 | ||||
11488 | /// Fold the instructions after selecting them. | |||
11489 | /// Returns null if users were already updated. | |||
11490 | SDNode *SITargetLowering::PostISelFolding(MachineSDNode *Node, | |||
11491 | SelectionDAG &DAG) const { | |||
11492 | const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); | |||
11493 | unsigned Opcode = Node->getMachineOpcode(); | |||
11494 | ||||
11495 | if (TII->isMIMG(Opcode) && !TII->get(Opcode).mayStore() && | |||
| ||||
11496 | !TII->isGather4(Opcode) && | |||
11497 | AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::dmask) != -1) { | |||
11498 | return adjustWritemask(Node, DAG); | |||
11499 | } | |||
11500 | ||||
11501 | if (Opcode == AMDGPU::INSERT_SUBREG || | |||
11502 | Opcode == AMDGPU::REG_SEQUENCE) { | |||
11503 | legalizeTargetIndependentNode(Node, DAG); | |||
11504 | return Node; | |||
11505 | } | |||
11506 | ||||
11507 | switch (Opcode) { | |||
11508 | case AMDGPU::V_DIV_SCALE_F32_e64: | |||
11509 | case AMDGPU::V_DIV_SCALE_F64_e64: { | |||
11510 | // Satisfy the operand register constraint when one of the inputs is | |||
11511 | // undefined. Ordinarily each undef value will have its own implicit_def of | |||
11512 | // a vreg, so force these to use a single register. | |||
11513 | SDValue Src0 = Node->getOperand(1); | |||
11514 | SDValue Src1 = Node->getOperand(3); | |||
11515 | SDValue Src2 = Node->getOperand(5); | |||
11516 | ||||
11517 | if ((Src0.isMachineOpcode() && | |||
11518 | Src0.getMachineOpcode() != AMDGPU::IMPLICIT_DEF) && | |||
11519 | (Src0 == Src1 || Src0 == Src2)) | |||
11520 | break; | |||
11521 | ||||
11522 | MVT VT = Src0.getValueType().getSimpleVT(); | |||
11523 | const TargetRegisterClass *RC = | |||
11524 | getRegClassFor(VT, Src0.getNode()->isDivergent()); | |||
11525 | ||||
11526 | MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo(); | |||
11527 | SDValue UndefReg = DAG.getRegister(MRI.createVirtualRegister(RC), VT); | |||
11528 | ||||
11529 | SDValue ImpDef = DAG.getCopyToReg(DAG.getEntryNode(), SDLoc(Node), | |||
11530 | UndefReg, Src0, SDValue()); | |||
11531 | ||||
11532 | // src0 must be the same register as src1 or src2, even if the value is | |||
11533 | // undefined, so make sure we don't violate this constraint. | |||
11534 | if (Src0.isMachineOpcode() && | |||
11535 | Src0.getMachineOpcode() == AMDGPU::IMPLICIT_DEF) { | |||
11536 | if (Src1.isMachineOpcode() && | |||
11537 | Src1.getMachineOpcode() != AMDGPU::IMPLICIT_DEF) | |||
11538 | Src0 = Src1; | |||
11539 | else if (Src2.isMachineOpcode() && | |||
11540 | Src2.getMachineOpcode() != AMDGPU::IMPLICIT_DEF) | |||
11541 | Src0 = Src2; | |||
11542 | else { | |||
11543 | assert(Src1.getMachineOpcode() == AMDGPU::IMPLICIT_DEF)(static_cast <bool> (Src1.getMachineOpcode() == AMDGPU:: IMPLICIT_DEF) ? void (0) : __assert_fail ("Src1.getMachineOpcode() == AMDGPU::IMPLICIT_DEF" , "llvm/lib/Target/AMDGPU/SIISelLowering.cpp", 11543, __extension__ __PRETTY_FUNCTION__)); | |||
11544 | Src0 = UndefReg; | |||
11545 | Src1 = UndefReg; | |||
11546 | } | |||
11547 | } else | |||
11548 | break; | |||
11549 | ||||
11550 | SmallVector<SDValue, 9> Ops(Node->op_begin(), Node->op_end()); | |||
11551 | Ops[1] = Src0; | |||
11552 | Ops[3] = Src1; | |||
11553 | Ops[5] = Src2; | |||
11554 | Ops.push_back(ImpDef.getValue(1)); | |||
11555 | return DAG.getMachineNode(Opcode, SDLoc(Node), Node->getVTList(), Ops); | |||
11556 | } | |||
11557 | default: | |||
11558 | break; | |||
11559 | } | |||
11560 | ||||
11561 | return Node; | |||
11562 | } | |||
11563 | ||||
11564 | // Any MIMG instructions that use tfe or lwe require an initialization of the | |||
11565 | // result register that will be written in the case of a memory access failure. | |||
11566 | // The required code is also added to tie this init code to the result of the | |||
11567 | // img instruction. | |||
11568 | void SITargetLowering::AddIMGInit(MachineInstr &MI) const { | |||
11569 | const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); | |||
11570 | const SIRegisterInfo &TRI = TII->getRegisterInfo(); | |||
11571 | MachineRegisterInfo &MRI = MI.getMF()->getRegInfo(); | |||
11572 | MachineBasicBlock &MBB = *MI.getParent(); | |||
11573 | ||||
11574 | MachineOperand *TFE = TII->getNamedOperand(MI, AMDGPU::OpName::tfe); | |||
11575 | MachineOperand *LWE = TII->getNamedOperand(MI, AMDGPU::OpName::lwe); | |||
11576 | MachineOperand *D16 = TII->getNamedOperand(MI, AMDGPU::OpName::d16); | |||
11577 | ||||
11578 | if (!TFE && !LWE) // intersect_ray | |||
11579 | return; | |||
11580 | ||||
11581 | unsigned TFEVal = TFE ? TFE->getImm() : 0; | |||
11582 | unsigned LWEVal = LWE->getImm(); | |||
11583 | unsigned D16Val = D16 ? D16->getImm() : 0; | |||
11584 | ||||
11585 | if (!TFEVal && !LWEVal) | |||
11586 | return; | |||
11587 | ||||
11588 | // At least one of TFE or LWE are non-zero | |||
11589 | // We have to insert a suitable initialization of the result value and | |||
11590 | // tie this to the dest of the image instruction. | |||
11591 | ||||
11592 | const DebugLoc &DL = MI.getDebugLoc(); | |||
11593 | ||||
11594 | int DstIdx = | |||
11595 | AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::vdata); | |||
11596 | ||||
11597 | // Calculate which dword we have to initialize to 0. | |||
11598 | MachineOperand *MO_Dmask = TII->getNamedOperand(MI, AMDGPU::OpName::dmask); | |||
11599 | ||||
11600 | // check that dmask operand is found. | |||
11601 | assert(MO_Dmask && "Expected dmask operand in instruction")(static_cast <bool> (MO_Dmask && "Expected dmask operand in instruction" ) ? void (0) : __assert_fail ("MO_Dmask && \"Expected dmask operand in instruction\"" , "llvm/lib/Target/AMDGPU/SIISelLowering.cpp", 11601, __extension__ __PRETTY_FUNCTION__)); | |||
11602 | ||||
11603 | unsigned dmask = MO_Dmask->getImm(); | |||
11604 | // Determine the number of active lanes taking into account the | |||
11605 | // Gather4 special case | |||
11606 | unsigned ActiveLanes = TII->isGather4(MI) ? 4 : countPopulation(dmask); | |||
11607 | ||||
11608 | bool Packed = !Subtarget->hasUnpackedD16VMem(); | |||
11609 | ||||
11610 | unsigned InitIdx = | |||
11611 | D16Val && Packed ? ((ActiveLanes + 1) >> 1) + 1 : ActiveLanes + 1; | |||
11612 | ||||
11613 | // Abandon attempt if the dst size isn't large enough | |||
11614 | // - this is in fact an error but this is picked up elsewhere and | |||
11615 | // reported correctly. | |||
11616 | uint32_t DstSize = TRI.getRegSizeInBits(*TII->getOpRegClass(MI, DstIdx)) / 32; | |||
11617 | if (DstSize < InitIdx) | |||
11618 | return; | |||
11619 | ||||
11620 | // Create a register for the initialization value. | |||
11621 | Register PrevDst = MRI.createVirtualRegister(TII->getOpRegClass(MI, DstIdx)); | |||
11622 | unsigned NewDst = 0; // Final initialized value will be in here | |||
11623 | ||||
11624 | // If PRTStrictNull feature is enabled (the default) then initialize | |||
11625 | // all the result registers to 0, otherwise just the error indication | |||
11626 | // register (VGPRn+1) | |||
11627 | unsigned SizeLeft = Subtarget->usePRTStrictNull() ? InitIdx : 1; | |||
11628 | unsigned CurrIdx = Subtarget->usePRTStrictNull() ? 0 : (InitIdx - 1); | |||
11629 | ||||
11630 | BuildMI(MBB, MI, DL, TII->get(AMDGPU::IMPLICIT_DEF), PrevDst); | |||
11631 | for (; SizeLeft; SizeLeft--, CurrIdx++) { | |||
11632 | NewDst = MRI.createVirtualRegister(TII->getOpRegClass(MI, DstIdx)); | |||
11633 | // Initialize dword | |||
11634 | Register SubReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); | |||
11635 | BuildMI(MBB, MI, DL, TII->get(AMDGPU::V_MOV_B32_e32), SubReg) | |||
11636 | .addImm(0); | |||
11637 | // Insert into the super-reg | |||
11638 | BuildMI(MBB, MI, DL, TII->get(TargetOpcode::INSERT_SUBREG), NewDst) | |||
11639 | .addReg(PrevDst) | |||
11640 | .addReg(SubReg) | |||
11641 | .addImm(SIRegisterInfo::getSubRegFromChannel(CurrIdx)); | |||
11642 | ||||
11643 | PrevDst = NewDst; | |||
11644 | } | |||
11645 | ||||
11646 | // Add as an implicit operand | |||
11647 | MI.addOperand(MachineOperand::CreateReg(NewDst, false, true)); | |||
11648 | ||||
11649 | // Tie the just added implicit operand to the dst | |||
11650 | MI.tieOperands(DstIdx, MI.getNumOperands() - 1); | |||
11651 | } | |||
11652 | ||||
11653 | /// Assign the register class depending on the number of | |||
11654 | /// bits set in the writemask | |||
11655 | void SITargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI, | |||
11656 | SDNode *Node) const { | |||
11657 | const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); | |||
11658 | ||||
11659 | MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); | |||
11660 | ||||
11661 | if (TII->isVOP3(MI.getOpcode())) { | |||
11662 | // Make sure constant bus requirements are respected. | |||
11663 | TII->legalizeOperandsVOP3(MRI, MI); | |||
11664 | ||||
11665 | // Prefer VGPRs over AGPRs in mAI instructions where possible. | |||
11666 | // This saves a chain-copy of registers and better balance register | |||
11667 | // use between vgpr and agpr as agpr tuples tend to be big. | |||
11668 | if (MI.getDesc().OpInfo) { | |||
11669 | unsigned Opc = MI.getOpcode(); | |||
11670 | const SIRegisterInfo *TRI = Subtarget->getRegisterInfo(); | |||
11671 | for (auto I : { AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0), | |||
11672 | AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1) }) { | |||
11673 | if (I == -1) | |||
11674 | break; | |||
11675 | MachineOperand &Op = MI.getOperand(I); | |||
11676 | if (!Op.isReg() || !Op.getReg().isVirtual()) | |||
11677 | continue; | |||
11678 | auto *RC = TRI->getRegClassForReg(MRI, Op.getReg()); | |||
11679 | if (!TRI->hasAGPRs(RC)) | |||
11680 | continue; | |||
11681 | auto *Src = MRI.getUniqueVRegDef(Op.getReg()); | |||
11682 | if (!Src || !Src->isCopy() || | |||
11683 | !TRI->isSGPRReg(MRI, Src->getOperand(1).getReg())) | |||
11684 | continue; | |||
11685 | auto *NewRC = TRI->getEquivalentVGPRClass(RC); | |||
11686 | // All uses of agpr64 and agpr32 can also accept vgpr except for | |||
11687 | // v_accvgpr_read, but we do not produce agpr reads during selection, | |||
11688 | // so no use checks are needed. | |||
11689 | MRI.setRegClass(Op.getReg(), NewRC); | |||
11690 | } | |||
11691 | ||||
11692 | // Resolve the rest of AV operands to AGPRs. | |||
11693 | if (auto *Src2 = TII->getNamedOperand(MI, AMDGPU::OpName::src2)) { | |||
11694 | if (Src2->isReg() && Src2->getReg().isVirtual()) { | |||
11695 | auto *RC = TRI->getRegClassForReg(MRI, Src2->getReg()); | |||
11696 | if (TRI->isVectorSuperClass(RC)) { | |||
11697 | auto *NewRC = TRI->getEquivalentAGPRClass(RC); | |||
11698 | MRI.setRegClass(Src2->getReg(), NewRC); | |||
11699 | if (Src2->isTied()) | |||
11700 | MRI.setRegClass(MI.getOperand(0).getReg(), NewRC); | |||
11701 | } | |||
11702 | } | |||
11703 | } | |||
11704 | } | |||
11705 | ||||
11706 | return; | |||
11707 | } | |||
11708 | ||||
11709 | // Replace unused atomics with the no return version. | |||
11710 | int NoRetAtomicOp = AMDGPU::getAtomicNoRetOp(MI.getOpcode()); | |||
11711 | if (NoRetAtomicOp != -1) { | |||
11712 | if (!Node->hasAnyUseOfValue(0)) { | |||
11713 | int CPolIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), | |||
11714 | AMDGPU::OpName::cpol); | |||
11715 | if (CPolIdx != -1) { | |||
11716 | MachineOperand &CPol = MI.getOperand(CPolIdx); | |||
11717 | CPol.setImm(CPol.getImm() & ~AMDGPU::CPol::GLC); | |||
11718 | } | |||
11719 | MI.removeOperand(0); | |||
11720 | MI.setDesc(TII->get(NoRetAtomicOp)); | |||
11721 | return; | |||
11722 | } | |||
11723 | ||||
11724 | // For mubuf_atomic_cmpswap, we need to have tablegen use an extract_subreg | |||
11725 | // instruction, because the return type of these instructions is a vec2 of | |||
11726 | // the memory type, so it can be tied to the input operand. | |||
11727 | // This means these instructions always have a use, so we need to add a | |||
11728 | // special case to check if the atomic has only one extract_subreg use, | |||
11729 | // which itself has no uses. | |||
11730 | if ((Node->hasNUsesOfValue(1, 0) && | |||
11731 | Node->use_begin()->isMachineOpcode() && | |||
11732 | Node->use_begin()->getMachineOpcode() == AMDGPU::EXTRACT_SUBREG && | |||
11733 | !Node->use_begin()->hasAnyUseOfValue(0))) { | |||
11734 | Register Def = MI.getOperand(0).getReg(); | |||
11735 | ||||
11736 | // Change this into a noret atomic. | |||
11737 | MI.setDesc(TII->get(NoRetAtomicOp)); | |||
11738 | MI.removeOperand(0); | |||
11739 | ||||
11740 | // If we only remove the def operand from the atomic instruction, the | |||
11741 | // extract_subreg will be left with a use of a vreg without a def. | |||
11742 | // So we need to insert an implicit_def to avoid machine verifier | |||
11743 | // errors. | |||
11744 | BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), | |||
11745 | TII->get(AMDGPU::IMPLICIT_DEF), Def); | |||
11746 | } | |||
11747 | return; | |||
11748 | } | |||
11749 | ||||
11750 | if (TII->isMIMG(MI) && !MI.mayStore()) | |||
11751 | AddIMGInit(MI); | |||
11752 | } | |||
11753 | ||||
11754 | static SDValue buildSMovImm32(SelectionDAG &DAG, const SDLoc &DL, | |||
11755 | uint64_t Val) { | |||
11756 | SDValue K = DAG.getTargetConstant(Val, DL, MVT::i32); | |||
11757 | return SDValue(DAG.getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32, K), 0); | |||
11758 | } | |||
11759 | ||||
11760 | MachineSDNode *SITargetLowering::wrapAddr64Rsrc(SelectionDAG &DAG, | |||
11761 | const SDLoc &DL, | |||
11762 | SDValue Ptr) const { | |||
11763 | const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); | |||
11764 | ||||
11765 | // Build the half of the subregister with the constants before building the | |||
11766 | // full 128-bit register. If we are building multiple resource descriptors, | |||
11767 | // this will allow CSEing of the 2-component register. | |||
11768 | const SDValue Ops0[] = { | |||
11769 | DAG.getTargetConstant(AMDGPU::SGPR_64RegClassID, DL, MVT::i32), | |||
11770 | buildSMovImm32(DAG, DL, 0), | |||
11771 | DAG.getTargetConstant(AMDGPU::sub0, DL, MVT::i32), | |||
11772 | buildSMovImm32(DAG, DL, TII->getDefaultRsrcDataFormat() >> 32), | |||
11773 | DAG.getTargetConstant(AMDGPU::sub1, DL, MVT::i32) | |||
11774 | }; | |||
11775 | ||||
11776 | SDValue SubRegHi = SDValue(DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, | |||
11777 | MVT::v2i32, Ops0), 0); | |||
11778 | ||||
11779 | // Combine the constants and the pointer. | |||
11780 | const SDValue Ops1[] = { | |||
11781 | DAG.getTargetConstant(AMDGPU::SGPR_128RegClassID, DL, MVT::i32), | |||
11782 | Ptr, | |||
11783 | DAG.getTargetConstant(AMDGPU::sub0_sub1, DL, MVT::i32), | |||
11784 | SubRegHi, | |||
11785 | DAG.getTargetConstant(AMDGPU::sub2_sub3, DL, MVT::i32) | |||
11786 | }; | |||
11787 | ||||
11788 | return DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, MVT::v4i32, Ops1); | |||
11789 | } | |||
11790 | ||||
11791 | /// Return a resource descriptor with the 'Add TID' bit enabled | |||
11792 | /// The TID (Thread ID) is multiplied by the stride value (bits [61:48] | |||
11793 | /// of the resource descriptor) to create an offset, which is added to | |||
11794 | /// the resource pointer. | |||
11795 | MachineSDNode *SITargetLowering::buildRSRC(SelectionDAG &DAG, const SDLoc &DL, | |||
11796 | SDValue Ptr, uint32_t RsrcDword1, | |||
11797 | uint64_t RsrcDword2And3) const { | |||
11798 | SDValue PtrLo = DAG.getTargetExtractSubreg(AMDGPU::sub0, DL, MVT::i32, Ptr); | |||
11799 | SDValue PtrHi = DAG.getTargetExtractSubreg(AMDGPU::sub1, DL, MVT::i32, Ptr); | |||
11800 | if (RsrcDword1) { | |||
11801 | PtrHi = SDValue(DAG.getMachineNode(AMDGPU::S_OR_B32, DL, MVT::i32, PtrHi, | |||
11802 | DAG.getConstant(RsrcDword1, DL, MVT::i32)), | |||
11803 | 0); | |||
11804 | } | |||
11805 | ||||
11806 | SDValue DataLo = buildSMovImm32(DAG, DL, | |||
11807 | RsrcDword2And3 & UINT64_C(0xFFFFFFFF)0xFFFFFFFFUL); | |||
11808 | SDValue DataHi = buildSMovImm32(DAG, DL, RsrcDword2And3 >> 32); | |||
11809 | ||||
11810 | const SDValue Ops[] = { | |||
11811 | DAG.getTargetConstant(AMDGPU::SGPR_128RegClassID, DL, MVT::i32), | |||
11812 | PtrLo, | |||
11813 | DAG.getTargetConstant(AMDGPU::sub0, DL, MVT::i32), | |||
11814 | PtrHi, | |||
11815 | DAG.getTargetConstant(AMDGPU::sub1, DL, MVT::i32), | |||
11816 | DataLo, | |||
11817 | DAG.getTargetConstant(AMDGPU::sub2, DL, MVT::i32), | |||
11818 | DataHi, | |||
11819 | DAG.getTargetConstant(AMDGPU::sub3, DL, MVT::i32) | |||
11820 | }; | |||
11821 | ||||
11822 | return DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, MVT::v4i32, Ops); | |||
11823 | } | |||
11824 | ||||
11825 | //===----------------------------------------------------------------------===// | |||
11826 | // SI Inline Assembly Support | |||
11827 | //===----------------------------------------------------------------------===// | |||
11828 | ||||
11829 | std::pair<unsigned, const TargetRegisterClass *> | |||
11830 | SITargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI_, | |||
11831 | StringRef Constraint, | |||
11832 | MVT VT) const { | |||
11833 | const SIRegisterInfo *TRI = static_cast<const SIRegisterInfo *>(TRI_); | |||
11834 | ||||
11835 | const TargetRegisterClass *RC = nullptr; | |||
11836 | if (Constraint.size() == 1) { | |||
11837 | const unsigned BitWidth = VT.getSizeInBits(); | |||
11838 | switch (Constraint[0]) { | |||
11839 | default: | |||
11840 | return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); | |||
11841 | case 's': | |||
11842 | case 'r': | |||
11843 | switch (BitWidth) { | |||
11844 | case 16: | |||
11845 | RC = &AMDGPU::SReg_32RegClass; | |||
11846 | break; | |||
11847 | case 64: | |||
11848 | RC = &AMDGPU::SGPR_64RegClass; | |||
11849 | break; | |||
11850 | default: | |||
11851 | RC = SIRegisterInfo::getSGPRClassForBitWidth(BitWidth); | |||
11852 | if (!RC) | |||
11853 | return std::make_pair(0U, nullptr); | |||
11854 | break; | |||
11855 | } | |||
11856 | break; | |||
11857 | case 'v': | |||
11858 | switch (BitWidth) { | |||
11859 | case 16: | |||
11860 | RC = &AMDGPU::VGPR_32RegClass; | |||
11861 | break; | |||
11862 | default: | |||
11863 | RC = TRI->getVGPRClassForBitWidth(BitWidth); | |||
11864 | if (!RC) | |||
11865 | return std::make_pair(0U, nullptr); | |||
11866 | break; | |||
11867 | } | |||
11868 | break; | |||
11869 | case 'a': | |||
11870 | if (!Subtarget->hasMAIInsts()) | |||
11871 | break; | |||
11872 | switch (BitWidth) { | |||
11873 | case 16: | |||
11874 | RC = &AMDGPU::AGPR_32RegClass; | |||
11875 | break; | |||
11876 | default: | |||
11877 | RC = TRI->getAGPRClassForBitWidth(BitWidth); | |||
11878 | if (!RC) | |||
11879 | return std::make_pair(0U, nullptr); | |||
11880 | break; | |||
11881 | } | |||
11882 | break; | |||
11883 | } | |||
11884 | // We actually support i128, i16 and f16 as inline parameters | |||
11885 | // even if they are not reported as legal | |||
11886 | if (RC && (isTypeLegal(VT) || VT.SimpleTy == MVT::i128 || | |||
11887 | VT.SimpleTy == MVT::i16 || VT.SimpleTy == MVT::f16)) | |||
11888 | return std::make_pair(0U, RC); | |||
11889 | } | |||
11890 | ||||
11891 | if (Constraint.startswith("{") && Constraint.endswith("}")) { | |||
11892 | StringRef RegName(Constraint.data() + 1, Constraint.size() - 2); | |||
11893 | if (RegName.consume_front("v")) { | |||
11894 | RC = &AMDGPU::VGPR_32RegClass; | |||
11895 | } else if (RegName.consume_front("s")) { | |||
11896 | RC = &AMDGPU::SGPR_32RegClass; | |||
11897 | } else if (RegName.consume_front("a")) { | |||
11898 | RC = &AMDGPU::AGPR_32RegClass; | |||
11899 | } | |||
11900 | ||||
11901 | if (RC) { | |||
11902 | uint32_t Idx; | |||
11903 | if (RegName.consume_front("[")) { | |||
11904 | uint32_t End; | |||
11905 | bool Failed = RegName.consumeInteger(10, Idx); | |||
11906 | Failed |= !RegName.consume_front(":"); | |||
11907 | Failed |= RegName.consumeInteger(10, End); | |||
11908 | Failed |= !RegName.consume_back("]"); | |||
11909 | if (!Failed) { | |||
11910 | uint32_t Width = (End - Idx + 1) * 32; | |||
11911 | MCRegister Reg = RC->getRegister(Idx); | |||
11912 | if (SIRegisterInfo::isVGPRClass(RC)) | |||
11913 | RC = TRI->getVGPRClassForBitWidth(Width); | |||
11914 | else if (SIRegisterInfo::isSGPRClass(RC)) | |||
11915 | RC = TRI->getSGPRClassForBitWidth(Width); | |||
11916 | else if (SIRegisterInfo::isAGPRClass(RC)) | |||
11917 | RC = TRI->getAGPRClassForBitWidth(Width); | |||
11918 | if (RC) { | |||
11919 | Reg = TRI->getMatchingSuperReg(Reg, AMDGPU::sub0, RC); | |||
11920 | return std::make_pair(Reg, RC); | |||
11921 | } | |||
11922 | } | |||
11923 | } else { | |||
11924 | bool Failed = RegName.getAsInteger(10, Idx); | |||
11925 | if (!Failed && Idx < RC->getNumRegs()) | |||
11926 | return std::make_pair(RC->getRegister(Idx), RC); | |||
11927 | } | |||
11928 | } | |||
11929 | } | |||
11930 | ||||
11931 | auto Ret = TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); | |||
11932 | if (Ret.first) | |||
11933 | Ret.second = TRI->getPhysRegClass(Ret.first); | |||
11934 | ||||
11935 | return Ret; | |||
11936 | } | |||
11937 | ||||
11938 | static bool isImmConstraint(StringRef Constraint) { | |||
11939 | if (Constraint.size() == 1) { | |||
11940 | switch (Constraint[0]) { | |||
11941 | default: break; | |||
11942 | case 'I': | |||
11943 | case 'J': | |||
11944 | case 'A': | |||
11945 | case 'B': | |||
11946 | case 'C': | |||
11947 | return true; | |||
11948 | } | |||
11949 | } else if (Constraint == "DA" || | |||
11950 | Constraint == "DB") { | |||
11951 | return true; | |||
11952 | } | |||
11953 | return false; | |||
11954 | } | |||
11955 | ||||
11956 | SITargetLowering::ConstraintType | |||
11957 | SITargetLowering::getConstraintType(StringRef Constraint) const { | |||
11958 | if (Constraint.size() == 1) { | |||
11959 | switch (Constraint[0]) { | |||
11960 | default: break; | |||
11961 | case 's': | |||
11962 | case 'v': | |||
11963 | case 'a': | |||
11964 | return C_RegisterClass; | |||
11965 | } | |||
11966 | } | |||
11967 | if (isImmConstraint(Constraint)) { | |||
11968 | return C_Other; | |||
11969 | } | |||
11970 | return TargetLowering::getConstraintType(Constraint); | |||
11971 | } | |||
11972 | ||||
11973 | static uint64_t clearUnusedBits(uint64_t Val, unsigned Size) { | |||
11974 | if (!AMDGPU::isInlinableIntLiteral(Val)) { | |||
11975 | Val = Val & maskTrailingOnes<uint64_t>(Size); | |||
11976 | } | |||
11977 | return Val; | |||
11978 | } | |||
11979 | ||||
11980 | void SITargetLowering::LowerAsmOperandForConstraint(SDValue Op, | |||
11981 | std::string &Constraint, | |||
11982 | std::vector<SDValue> &Ops, | |||
11983 | SelectionDAG &DAG) const { | |||
11984 | if (isImmConstraint(Constraint)) { | |||
11985 | uint64_t Val; | |||
11986 | if (getAsmOperandConstVal(Op, Val) && | |||
11987 | checkAsmConstraintVal(Op, Constraint, Val)) { | |||
11988 | Val = clearUnusedBits(Val, Op.getScalarValueSizeInBits()); | |||
11989 | Ops.push_back(DAG.getTargetConstant(Val, SDLoc(Op), MVT::i64)); | |||
11990 | } | |||
11991 | } else { | |||
11992 | TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); | |||
11993 | } | |||
11994 | } | |||
11995 | ||||
11996 | bool SITargetLowering::getAsmOperandConstVal(SDValue Op, uint64_t &Val) const { | |||
11997 | unsigned Size = Op.getScalarValueSizeInBits(); | |||
11998 | if (Size > 64) | |||
11999 | return false; | |||
12000 | ||||
12001 | if (Size == 16 && !Subtarget->has16BitInsts()) | |||
12002 | return false; | |||
12003 | ||||
12004 | if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { | |||
12005 | Val = C->getSExtValue(); | |||
12006 | return true; | |||
12007 | } | |||
12008 | if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op)) { | |||
12009 | Val = C->getValueAPF().bitcastToAPInt().getSExtValue(); | |||
12010 | return true; | |||
12011 | } | |||
12012 | if (BuildVectorSDNode *V = dyn_cast<BuildVectorSDNode>(Op)) { | |||
12013 | if (Size != 16 || Op.getNumOperands() != 2) | |||
12014 | return false; | |||
12015 | if (Op.getOperand(0).isUndef() || Op.getOperand(1).isUndef()) | |||
12016 | return false; | |||
12017 | if (ConstantSDNode *C = V->getConstantSplatNode()) { | |||
12018 | Val = C->getSExtValue(); | |||
12019 | return true; | |||
12020 | } | |||
12021 | if (ConstantFPSDNode *C = V->getConstantFPSplatNode()) { | |||
12022 | Val = C->getValueAPF().bitcastToAPInt().getSExtValue(); | |||
12023 | return true; | |||
12024 | } | |||
12025 | } | |||
12026 | ||||
12027 | return false; | |||
12028 | } | |||
12029 | ||||
12030 | bool SITargetLowering::checkAsmConstraintVal(SDValue Op, | |||
12031 | const std::string &Constraint, | |||
12032 | uint64_t Val) const { | |||
12033 | if (Constraint.size() == 1) { | |||
12034 | switch (Constraint[0]) { | |||
12035 | case 'I': | |||
12036 | return AMDGPU::isInlinableIntLiteral(Val); | |||
12037 | case 'J': | |||
12038 | return isInt<16>(Val); | |||
12039 | case 'A': | |||
12040 | return checkAsmConstraintValA(Op, Val); | |||
12041 | case 'B': | |||
12042 | return isInt<32>(Val); | |||
12043 | case 'C': | |||
12044 | return isUInt<32>(clearUnusedBits(Val, Op.getScalarValueSizeInBits())) || | |||
12045 | AMDGPU::isInlinableIntLiteral(Val); | |||
12046 | default: | |||
12047 | break; | |||
12048 | } | |||
12049 | } else if (Constraint.size() == 2) { | |||
12050 | if (Constraint == "DA") { | |||
12051 | int64_t HiBits = static_cast<int32_t>(Val >> 32); | |||
12052 | int64_t LoBits = static_cast<int32_t>(Val); | |||
12053 | return checkAsmConstraintValA(Op, HiBits, 32) && | |||
12054 | checkAsmConstraintValA(Op, LoBits, 32); | |||
12055 | } | |||
12056 | if (Constraint == "DB") { | |||
12057 | return true; | |||
12058 | } | |||
12059 | } | |||
12060 | llvm_unreachable("Invalid asm constraint")::llvm::llvm_unreachable_internal("Invalid asm constraint", "llvm/lib/Target/AMDGPU/SIISelLowering.cpp" , 12060); | |||
12061 | } | |||
12062 | ||||
12063 | bool SITargetLowering::checkAsmConstraintValA(SDValue Op, | |||
12064 | uint64_t Val, | |||
12065 | unsigned MaxSize) const { | |||
12066 | unsigned Size = std::min<unsigned>(Op.getScalarValueSizeInBits(), MaxSize); | |||
12067 | bool HasInv2Pi = Subtarget->hasInv2PiInlineImm(); | |||
12068 | if ((Size == 16 && AMDGPU::isInlinableLiteral16(Val, HasInv2Pi)) || | |||
12069 | (Size == 32 && AMDGPU::isInlinableLiteral32(Val, HasInv2Pi)) || | |||
12070 | (Size == 64 && AMDGPU::isInlinableLiteral64(Val, HasInv2Pi))) { | |||
12071 | return true; | |||
12072 | } | |||
12073 | return false; | |||
12074 | } | |||
12075 | ||||
12076 | static int getAlignedAGPRClassID(unsigned UnalignedClassID) { | |||
12077 | switch (UnalignedClassID) { | |||
12078 | case AMDGPU::VReg_64RegClassID: | |||
12079 | return AMDGPU::VReg_64_Align2RegClassID; | |||
12080 | case AMDGPU::VReg_96RegClassID: | |||
12081 | return AMDGPU::VReg_96_Align2RegClassID; | |||
12082 | case AMDGPU::VReg_128RegClassID: | |||
12083 | return AMDGPU::VReg_128_Align2RegClassID; | |||
12084 | case AMDGPU::VReg_160RegClassID: | |||
12085 | return AMDGPU::VReg_160_Align2RegClassID; | |||
12086 | case AMDGPU::VReg_192RegClassID: | |||
12087 | return AMDGPU::VReg_192_Align2RegClassID; | |||
12088 | case AMDGPU::VReg_224RegClassID: | |||
12089 | return AMDGPU::VReg_224_Align2RegClassID; | |||
12090 | case AMDGPU::VReg_256RegClassID: | |||
12091 | return AMDGPU::VReg_256_Align2RegClassID; | |||
12092 | case AMDGPU::VReg_512RegClassID: | |||
12093 | return AMDGPU::VReg_512_Align2RegClassID; | |||
12094 | case AMDGPU::VReg_1024RegClassID: | |||
12095 | return AMDGPU::VReg_1024_Align2RegClassID; | |||
12096 | case AMDGPU::AReg_64RegClassID: | |||
12097 | return AMDGPU::AReg_64_Align2RegClassID; | |||
12098 | case AMDGPU::AReg_96RegClassID: | |||
12099 | return AMDGPU::AReg_96_Align2RegClassID; | |||
12100 | case AMDGPU::AReg_128RegClassID: | |||
12101 | return AMDGPU::AReg_128_Align2RegClassID; | |||
12102 | case AMDGPU::AReg_160RegClassID: | |||
12103 | return AMDGPU::AReg_160_Align2RegClassID; | |||
12104 | case AMDGPU::AReg_192RegClassID: | |||
12105 | return AMDGPU::AReg_192_Align2RegClassID; | |||
12106 | case AMDGPU::AReg_256RegClassID: | |||
12107 | return AMDGPU::AReg_256_Align2RegClassID; | |||
12108 | case AMDGPU::AReg_512RegClassID: | |||
12109 | return AMDGPU::AReg_512_Align2RegClassID; | |||
12110 | case AMDGPU::AReg_1024RegClassID: | |||
12111 | return AMDGPU::AReg_1024_Align2RegClassID; | |||
12112 | default: | |||
12113 | return -1; | |||
12114 | } | |||
12115 | } | |||
12116 | ||||
12117 | // Figure out which registers should be reserved for stack access. Only after | |||
12118 | // the function is legalized do we know all of the non-spill stack objects or if | |||
12119 | // calls are present. | |||
12120 | void SITargetLowering::finalizeLowering(MachineFunction &MF) const { | |||
12121 | MachineRegisterInfo &MRI = MF.getRegInfo(); | |||
12122 | SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); | |||
12123 | const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); | |||
12124 | const SIRegisterInfo *TRI = Subtarget->getRegisterInfo(); | |||
12125 | const SIInstrInfo *TII = ST.getInstrInfo(); | |||
12126 | ||||
12127 | if (Info->isEntryFunction()) { | |||
12128 | // Callable functions have fixed registers used for stack access. | |||
12129 | reservePrivateMemoryRegs(getTargetMachine(), MF, *TRI, *Info); | |||
12130 | } | |||
12131 | ||||
12132 | assert(!TRI->isSubRegister(Info->getScratchRSrcReg(),(static_cast <bool> (!TRI->isSubRegister(Info->getScratchRSrcReg (), Info->getStackPtrOffsetReg())) ? void (0) : __assert_fail ("!TRI->isSubRegister(Info->getScratchRSrcReg(), Info->getStackPtrOffsetReg())" , "llvm/lib/Target/AMDGPU/SIISelLowering.cpp", 12133, __extension__ __PRETTY_FUNCTION__)) | |||
12133 | Info->getStackPtrOffsetReg()))(static_cast <bool> (!TRI->isSubRegister(Info->getScratchRSrcReg (), Info->getStackPtrOffsetReg())) ? void (0) : __assert_fail ("!TRI->isSubRegister(Info->getScratchRSrcReg(), Info->getStackPtrOffsetReg())" , "llvm/lib/Target/AMDGPU/SIISelLowering.cpp", 12133, __extension__ __PRETTY_FUNCTION__)); | |||
12134 | if (Info->getStackPtrOffsetReg() != AMDGPU::SP_REG) | |||
12135 | MRI.replaceRegWith(AMDGPU::SP_REG, Info->getStackPtrOffsetReg()); | |||
12136 | ||||
12137 | // We need to worry about replacing the default register with itself in case | |||
12138 | // of MIR testcases missing the MFI. | |||
12139 | if (Info->getScratchRSrcReg() != AMDGPU::PRIVATE_RSRC_REG) | |||
12140 | MRI.replaceRegWith(AMDGPU::PRIVATE_RSRC_REG, Info->getScratchRSrcReg()); | |||
12141 | ||||
12142 | if (Info->getFrameOffsetReg() != AMDGPU::FP_REG) | |||
12143 | MRI.replaceRegWith(AMDGPU::FP_REG, Info->getFrameOffsetReg()); | |||
12144 | ||||
12145 | Info->limitOccupancy(MF); | |||
12146 | ||||
12147 | if (ST.isWave32() && !MF.empty()) { | |||
12148 | for (auto &MBB : MF) { | |||
12149 | for (auto &MI : MBB) { | |||
12150 | TII->fixImplicitOperands(MI); | |||
12151 | } | |||
12152 | } | |||
12153 | } | |||
12154 | ||||
12155 | // FIXME: This is a hack to fixup AGPR classes to use the properly aligned | |||
12156 | // classes if required. Ideally the register class constraints would differ | |||
12157 | // per-subtarget, but there's no easy way to achieve that right now. This is | |||
12158 | // not a problem for VGPRs because the correctly aligned VGPR class is implied | |||
12159 | // from using them as the register class for legal types. | |||
12160 | if (ST.needsAlignedVGPRs()) { | |||
12161 | for (unsigned I = 0, E = MRI.getNumVirtRegs(); I != E; ++I) { | |||
12162 | const Register Reg = Register::index2VirtReg(I); | |||
12163 | const TargetRegisterClass *RC = MRI.getRegClassOrNull(Reg); | |||
12164 | if (!RC) | |||
12165 | continue; | |||
12166 | int NewClassID = getAlignedAGPRClassID(RC->getID()); | |||
12167 | if (NewClassID != -1) | |||
12168 | MRI.setRegClass(Reg, TRI->getRegClass(NewClassID)); | |||
12169 | } | |||
12170 | } | |||
12171 | ||||
12172 | TargetLoweringBase::finalizeLowering(MF); | |||
12173 | } | |||
12174 | ||||
12175 | void SITargetLowering::computeKnownBitsForFrameIndex( | |||
12176 | const int FI, KnownBits &Known, const MachineFunction &MF) const { | |||
12177 | TargetLowering::computeKnownBitsForFrameIndex(FI, Known, MF); | |||
12178 | ||||
12179 | // Set the high bits to zero based on the maximum allowed scratch size per | |||
12180 | // wave. We can't use vaddr in MUBUF instructions if we don't know the address | |||
12181 | // calculation won't overflow, so assume the sign bit is never set. | |||
12182 | Known.Zero.setHighBits(getSubtarget()->getKnownHighZeroBitsForFrameIndex()); | |||
12183 | } | |||
12184 | ||||
12185 | static void knownBitsForWorkitemID(const GCNSubtarget &ST, GISelKnownBits &KB, | |||
12186 | KnownBits &Known, unsigned Dim) { | |||
12187 | unsigned MaxValue = | |||
12188 | ST.getMaxWorkitemID(KB.getMachineFunction().getFunction(), Dim); | |||
12189 | Known.Zero.setHighBits(countLeadingZeros(MaxValue)); | |||
12190 | } | |||
12191 | ||||
12192 | void SITargetLowering::computeKnownBitsForTargetInstr( | |||
12193 | GISelKnownBits &KB, Register R, KnownBits &Known, const APInt &DemandedElts, | |||
12194 | const MachineRegisterInfo &MRI, unsigned Depth) const { | |||
12195 | const MachineInstr *MI = MRI.getVRegDef(R); | |||
12196 | switch (MI->getOpcode()) { | |||
12197 | case AMDGPU::G_INTRINSIC: { | |||
12198 | switch (MI->getIntrinsicID()) { | |||
12199 | case Intrinsic::amdgcn_workitem_id_x: | |||
12200 | knownBitsForWorkitemID(*getSubtarget(), KB, Known, 0); | |||
12201 | break; | |||
12202 | case Intrinsic::amdgcn_workitem_id_y: | |||
12203 | knownBitsForWorkitemID(*getSubtarget(), KB, Known, 1); | |||
12204 | break; | |||
12205 | case Intrinsic::amdgcn_workitem_id_z: | |||
12206 | knownBitsForWorkitemID(*getSubtarget(), KB, Known, 2); | |||
12207 | break; | |||
12208 | case Intrinsic::amdgcn_mbcnt_lo: | |||
12209 | case Intrinsic::amdgcn_mbcnt_hi: { | |||
12210 | // These return at most the wavefront size - 1. | |||
12211 | unsigned Size = MRI.getType(R).getSizeInBits(); | |||
12212 | Known.Zero.setHighBits(Size - getSubtarget()->getWavefrontSizeLog2()); | |||
12213 | break; | |||
12214 | } | |||
12215 | case Intrinsic::amdgcn_groupstaticsize: { | |||
12216 | // We can report everything over the maximum size as 0. We can't report | |||
12217 | // based on the actual size because we don't know if it's accurate or not | |||
12218 | // at any given point. | |||
12219 | Known.Zero.setHighBits(countLeadingZeros(getSubtarget()->getLocalMemorySize())); | |||
12220 | break; | |||
12221 | } | |||
12222 | } | |||
12223 | break; | |||
12224 | } | |||
12225 | case AMDGPU::G_AMDGPU_BUFFER_LOAD_UBYTE: | |||
12226 | Known.Zero.setHighBits(24); | |||
12227 | break; | |||
12228 | case AMDGPU::G_AMDGPU_BUFFER_LOAD_USHORT: | |||
12229 | Known.Zero.setHighBits(16); | |||
12230 | break; | |||
12231 | } | |||
12232 | } | |||
12233 | ||||
12234 | Align SITargetLowering::computeKnownAlignForTargetInstr( | |||
12235 | GISelKnownBits &KB, Register R, const MachineRegisterInfo &MRI, | |||
12236 | unsigned Depth) const { | |||
12237 | const MachineInstr *MI = MRI.getVRegDef(R); | |||
12238 | switch (MI->getOpcode()) { | |||
12239 | case AMDGPU::G_INTRINSIC: | |||
12240 | case AMDGPU::G_INTRINSIC_W_SIDE_EFFECTS: { | |||
12241 | // FIXME: Can this move to generic code? What about the case where the call | |||
12242 | // site specifies a lower alignment? | |||
12243 | Intrinsic::ID IID = MI->getIntrinsicID(); | |||
12244 | LLVMContext &Ctx = KB.getMachineFunction().getFunction().getContext(); | |||
12245 | AttributeList Attrs = Intrinsic::getAttributes(Ctx, IID); | |||
12246 | if (MaybeAlign RetAlign = Attrs.getRetAlignment()) | |||
12247 | return *RetAlign; | |||
12248 | return Align(1); | |||
12249 | } | |||
12250 | default: | |||
12251 | return Align(1); | |||
12252 | } | |||
12253 | } | |||
12254 | ||||
12255 | Align SITargetLowering::getPrefLoopAlignment(MachineLoop *ML) const { | |||
12256 | const Align PrefAlign = TargetLowering::getPrefLoopAlignment(ML); | |||
12257 | const Align CacheLineAlign = Align(64); | |||
12258 | ||||
12259 | // Pre-GFX10 target did not benefit from loop alignment | |||
12260 | if (!ML || DisableLoopAlignment || | |||
12261 | (getSubtarget()->getGeneration() < AMDGPUSubtarget::GFX10) || | |||
12262 | getSubtarget()->hasInstFwdPrefetchBug()) | |||
12263 | return PrefAlign; | |||
12264 | ||||
12265 | // On GFX10 I$ is 4 x 64 bytes cache lines. | |||
12266 | // By default prefetcher keeps one cache line behind and reads two ahead. | |||
12267 | // We can modify it with S_INST_PREFETCH for larger loops to have two lines | |||
12268 | // behind and one ahead. | |||
12269 | // Therefor we can benefit from aligning loop headers if loop fits 192 bytes. | |||
12270 | // If loop fits 64 bytes it always spans no more than two cache lines and | |||
12271 | // does not need an alignment. | |||
12272 | // Else if loop is less or equal 128 bytes we do not need to modify prefetch, | |||
12273 | // Else if loop is less or equal 192 bytes we need two lines behind. | |||
12274 | ||||
12275 | const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); | |||
12276 | const MachineBasicBlock *Header = ML->getHeader(); | |||
12277 | if (Header->getAlignment() != PrefAlign) | |||
12278 | return Header->getAlignment(); // Already processed. | |||
12279 | ||||
12280 | unsigned LoopSize = 0; | |||
12281 | for (const MachineBasicBlock *MBB : ML->blocks()) { | |||
12282 | // If inner loop block is aligned assume in average half of the alignment | |||
12283 | // size to be added as nops. | |||
12284 | if (MBB != Header) | |||
12285 | LoopSize += MBB->getAlignment().value() / 2; | |||
12286 | ||||
12287 | for (const MachineInstr &MI : *MBB) { | |||
12288 | LoopSize += TII->getInstSizeInBytes(MI); | |||
12289 | if (LoopSize > 192) | |||
12290 | return PrefAlign; | |||
12291 | } | |||
12292 | } | |||
12293 | ||||
12294 | if (LoopSize <= 64) | |||
12295 | return PrefAlign; | |||
12296 | ||||
12297 | if (LoopSize <= 128) | |||
12298 | return CacheLineAlign; | |||
12299 | ||||
12300 | // If any of parent loops is surrounded by prefetch instructions do not | |||
12301 | // insert new for inner loop, which would reset parent's settings. | |||
12302 | for (MachineLoop *P = ML->getParentLoop(); P; P = P->getParentLoop()) { | |||
12303 | if (MachineBasicBlock *Exit = P->getExitBlock()) { | |||
12304 | auto I = Exit->getFirstNonDebugInstr(); | |||
12305 | if (I != Exit->end() && I->getOpcode() == AMDGPU::S_INST_PREFETCH) | |||
12306 | return CacheLineAlign; | |||
12307 | } | |||
12308 | } | |||
12309 | ||||
12310 | MachineBasicBlock *Pre = ML->getLoopPreheader(); | |||
12311 | MachineBasicBlock *Exit = ML->getExitBlock(); | |||
12312 | ||||
12313 | if (Pre && Exit) { | |||
12314 | auto PreTerm = Pre->getFirstTerminator(); | |||
12315 | if (PreTerm == Pre->begin() || | |||
12316 | std::prev(PreTerm)->getOpcode() != AMDGPU::S_INST_PREFETCH) | |||
12317 | BuildMI(*Pre, PreTerm, DebugLoc(), TII->get(AMDGPU::S_INST_PREFETCH)) | |||
12318 | .addImm(1); // prefetch 2 lines behind PC | |||
12319 | ||||
12320 | auto ExitHead = Exit->getFirstNonDebugInstr(); | |||
12321 | if (ExitHead == Exit->end() || | |||
12322 | ExitHead->getOpcode() != AMDGPU::S_INST_PREFETCH) | |||
12323 | BuildMI(*Exit, ExitHead, DebugLoc(), TII->get(AMDGPU::S_INST_PREFETCH)) | |||
12324 | .addImm(2); // prefetch 1 line behind PC | |||
12325 | } | |||
12326 | ||||
12327 | return CacheLineAlign; | |||
12328 | } | |||
12329 | ||||
12330 | LLVM_ATTRIBUTE_UNUSED__attribute__((__unused__)) | |||
12331 | static bool isCopyFromRegOfInlineAsm(const SDNode *N) { | |||
12332 | assert(N->getOpcode() == ISD::CopyFromReg)(static_cast <bool> (N->getOpcode() == ISD::CopyFromReg ) ? void (0) : __assert_fail ("N->getOpcode() == ISD::CopyFromReg" , "llvm/lib/Target/AMDGPU/SIISelLowering.cpp", 12332, __extension__ __PRETTY_FUNCTION__)); | |||
12333 | do { | |||
12334 | // Follow the chain until we find an INLINEASM node. | |||
12335 | N = N->getOperand(0).getNode(); | |||
12336 | if (N->getOpcode() == ISD::INLINEASM || | |||
12337 | N->getOpcode() == ISD::INLINEASM_BR) | |||
12338 | return true; | |||
12339 | } while (N->getOpcode() == ISD::CopyFromReg); | |||
12340 | return false; | |||
12341 | } | |||
12342 | ||||
12343 | bool SITargetLowering::isSDNodeSourceOfDivergence( | |||
12344 | const SDNode *N, FunctionLoweringInfo *FLI, | |||
12345 | LegacyDivergenceAnalysis *KDA) const { | |||
12346 | switch (N->getOpcode()) { | |||
12347 | case ISD::CopyFromReg: { | |||
12348 | const RegisterSDNode *R = cast<RegisterSDNode>(N->getOperand(1)); | |||
12349 | const MachineRegisterInfo &MRI = FLI->MF->getRegInfo(); | |||
12350 | const SIRegisterInfo *TRI = Subtarget->getRegisterInfo(); | |||
12351 | Register Reg = R->getReg(); | |||
12352 | ||||
12353 | // FIXME: Why does this need to consider isLiveIn? | |||
12354 | if (Reg.isPhysical() || MRI.isLiveIn(Reg)) | |||
12355 | return !TRI->isSGPRReg(MRI, Reg); | |||
12356 | ||||
12357 | if (const Value *V = FLI->getValueFromVirtualReg(R->getReg())) | |||
12358 | return KDA->isDivergent(V); | |||
12359 | ||||
12360 | assert(Reg == FLI->DemoteRegister || isCopyFromRegOfInlineAsm(N))(static_cast <bool> (Reg == FLI->DemoteRegister || isCopyFromRegOfInlineAsm (N)) ? void (0) : __assert_fail ("Reg == FLI->DemoteRegister || isCopyFromRegOfInlineAsm(N)" , "llvm/lib/Target/AMDGPU/SIISelLowering.cpp", 12360, __extension__ __PRETTY_FUNCTION__)); | |||
12361 | return !TRI->isSGPRReg(MRI, Reg); | |||
12362 | } | |||
12363 | case ISD::LOAD: { | |||
12364 | const LoadSDNode *L = cast<LoadSDNode>(N); | |||
12365 | unsigned AS = L->getAddressSpace(); | |||
12366 | // A flat load may access private memory. | |||
12367 | return AS == AMDGPUAS::PRIVATE_ADDRESS || AS == AMDGPUAS::FLAT_ADDRESS; | |||
12368 | } | |||
12369 | case ISD::CALLSEQ_END: | |||
12370 | return true; | |||
12371 | case ISD::INTRINSIC_WO_CHAIN: | |||
12372 | return AMDGPU::isIntrinsicSourceOfDivergence( | |||
12373 | cast<ConstantSDNode>(N->getOperand(0))->getZExtValue()); | |||
12374 | case ISD::INTRINSIC_W_CHAIN: | |||
12375 | return AMDGPU::isIntrinsicSourceOfDivergence( | |||
12376 | cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()); | |||
12377 | case AMDGPUISD::ATOMIC_CMP_SWAP: | |||
12378 | case AMDGPUISD::ATOMIC_INC: | |||
12379 | case AMDGPUISD::ATOMIC_DEC: | |||
12380 | case AMDGPUISD::ATOMIC_LOAD_FMIN: | |||
12381 | case AMDGPUISD::ATOMIC_LOAD_FMAX: | |||
12382 | case AMDGPUISD::BUFFER_ATOMIC_SWAP: | |||
12383 | case AMDGPUISD::BUFFER_ATOMIC_ADD: | |||
12384 | case AMDGPUISD::BUFFER_ATOMIC_SUB: | |||
12385 | case AMDGPUISD::BUFFER_ATOMIC_SMIN: | |||
12386 | case AMDGPUISD::BUFFER_ATOMIC_UMIN: | |||
12387 | case AMDGPUISD::BUFFER_ATOMIC_SMAX: | |||
12388 | case AMDGPUISD::BUFFER_ATOMIC_UMAX: | |||
12389 | case AMDGPUISD::BUFFER_ATOMIC_AND: | |||
12390 | case AMDGPUISD::BUFFER_ATOMIC_OR: | |||
12391 | case AMDGPUISD::BUFFER_ATOMIC_XOR: | |||
12392 | case AMDGPUISD::BUFFER_ATOMIC_INC: | |||
12393 | case AMDGPUISD::BUFFER_ATOMIC_DEC: | |||
12394 | case AMDGPUISD::BUFFER_ATOMIC_CMPSWAP: | |||
12395 | case AMDGPUISD::BUFFER_ATOMIC_CSUB: | |||
12396 | case AMDGPUISD::BUFFER_ATOMIC_FADD: | |||
12397 | case AMDGPUISD::BUFFER_ATOMIC_FMIN: | |||
12398 | case AMDGPUISD::BUFFER_ATOMIC_FMAX: | |||
12399 | // Target-specific read-modify-write atomics are sources of divergence. | |||
12400 | return true; | |||
12401 | default: | |||
12402 | if (auto *A = dyn_cast<AtomicSDNode>(N)) { | |||
12403 | // Generic read-modify-write atomics are sources of divergence. | |||
12404 | return A->readMem() && A->writeMem(); | |||
12405 | } | |||
12406 | return false; | |||
12407 | } | |||
12408 | } | |||
12409 | ||||
12410 | bool SITargetLowering::denormalsEnabledForType(const SelectionDAG &DAG, | |||
12411 | EVT VT) const { | |||
12412 | switch (VT.getScalarType().getSimpleVT().SimpleTy) { | |||
12413 | case MVT::f32: | |||
12414 | return hasFP32Denormals(DAG.getMachineFunction()); | |||
12415 | case MVT::f64: | |||
12416 | case MVT::f16: | |||
12417 | return hasFP64FP16Denormals(DAG.getMachineFunction()); | |||
12418 | default: | |||
12419 | return false; | |||
12420 | } | |||
12421 | } | |||
12422 | ||||
12423 | bool SITargetLowering::denormalsEnabledForType(LLT Ty, | |||
12424 | MachineFunction &MF) const { | |||
12425 | switch (Ty.getScalarSizeInBits()) { | |||
12426 | case 32: | |||
12427 | return hasFP32Denormals(MF); | |||
12428 | case 64: | |||
12429 | case 16: | |||
12430 | return hasFP64FP16Denormals(MF); | |||
12431 | default: | |||
12432 | return false; | |||
12433 | } | |||
12434 | } | |||
12435 | ||||
12436 | bool SITargetLowering::isKnownNeverNaNForTargetNode(SDValue Op, | |||
12437 | const SelectionDAG &DAG, | |||
12438 | bool SNaN, | |||
12439 | unsigned Depth) const { | |||
12440 | if (Op.getOpcode() == AMDGPUISD::CLAMP) { | |||
12441 | const MachineFunction &MF = DAG.getMachineFunction(); | |||
12442 | const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); | |||
12443 | ||||
12444 | if (Info->getMode().DX10Clamp) | |||
12445 | return true; // Clamped to 0. | |||
12446 | return DAG.isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1); | |||
12447 | } | |||
12448 | ||||
12449 | return AMDGPUTargetLowering::isKnownNeverNaNForTargetNode(Op, DAG, | |||
12450 | SNaN, Depth); | |||
12451 | } | |||
12452 | ||||
12453 | // Global FP atomic instructions have a hardcoded FP mode and do not support | |||
12454 | // FP32 denormals, and only support v2f16 denormals. | |||
12455 | static bool fpModeMatchesGlobalFPAtomicMode(const AtomicRMWInst *RMW) { | |||
12456 | const fltSemantics &Flt = RMW->getType()->getScalarType()->getFltSemantics(); | |||
12457 | auto DenormMode = RMW->getParent()->getParent()->getDenormalMode(Flt); | |||
12458 | if (&Flt == &APFloat::IEEEsingle()) | |||
12459 | return DenormMode == DenormalMode::getPreserveSign(); | |||
12460 | return DenormMode == DenormalMode::getIEEE(); | |||
12461 | } | |||
12462 | ||||
12463 | TargetLowering::AtomicExpansionKind | |||
12464 | SITargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const { | |||
12465 | unsigned AS = RMW->getPointerAddressSpace(); | |||
12466 | if (AS == AMDGPUAS::PRIVATE_ADDRESS) | |||
12467 | return AtomicExpansionKind::NotAtomic; | |||
12468 | ||||
12469 | auto ReportUnsafeHWInst = [&](TargetLowering::AtomicExpansionKind Kind) { | |||
12470 | OptimizationRemarkEmitter ORE(RMW->getFunction()); | |||
12471 | LLVMContext &Ctx = RMW->getFunction()->getContext(); | |||
12472 | SmallVector<StringRef> SSNs; | |||
12473 | Ctx.getSyncScopeNames(SSNs); | |||
12474 | auto MemScope = SSNs[RMW->getSyncScopeID()].empty() | |||
12475 | ? "system" | |||
12476 | : SSNs[RMW->getSyncScopeID()]; | |||
12477 | ORE.emit([&]() { | |||
12478 | return OptimizationRemark(DEBUG_TYPE"si-lower", "Passed", RMW) | |||
12479 | << "Hardware instruction generated for atomic " | |||
12480 | << RMW->getOperationName(RMW->getOperation()) | |||
12481 | << " operation at memory scope " << MemScope | |||
12482 | << " due to an unsafe request."; | |||
12483 | }); | |||
12484 | return Kind; | |||
12485 | }; | |||
12486 | ||||
12487 | switch (RMW->getOperation()) { | |||
12488 | case AtomicRMWInst::FAdd: { | |||
12489 | Type *Ty = RMW->getType(); | |||
12490 | ||||
12491 | // We don't have a way to support 16-bit atomics now, so just leave them | |||
12492 | // as-is. | |||
12493 | if (Ty->isHalfTy()) | |||
12494 | return AtomicExpansionKind::None; | |||
12495 | ||||
12496 | if (!Ty->isFloatTy() && (!Subtarget->hasGFX90AInsts() || !Ty->isDoubleTy())) | |||
12497 | return AtomicExpansionKind::CmpXChg; | |||
12498 | ||||
12499 | if ((AS == AMDGPUAS::GLOBAL_ADDRESS || AS == AMDGPUAS::FLAT_ADDRESS) && | |||
12500 | Subtarget->hasAtomicFaddInsts()) { | |||
12501 | if (Subtarget->hasGFX940Insts()) | |||
12502 | return AtomicExpansionKind::None; | |||
12503 | ||||
12504 | // The amdgpu-unsafe-fp-atomics attribute enables generation of unsafe | |||
12505 | // floating point atomic instructions. May generate more efficient code, | |||
12506 | // but may not respect rounding and denormal modes, and may give incorrect | |||
12507 | // results for certain memory destinations. | |||
12508 | if (RMW->getFunction() | |||
12509 | ->getFnAttribute("amdgpu-unsafe-fp-atomics") | |||
12510 | .getValueAsString() != "true") | |||
12511 | return AtomicExpansionKind::CmpXChg; | |||
12512 | ||||
12513 | if (Subtarget->hasGFX90AInsts()) { | |||
12514 | if (Ty->isFloatTy() && AS == AMDGPUAS::FLAT_ADDRESS) | |||
12515 | return AtomicExpansionKind::CmpXChg; | |||
12516 | ||||
12517 | auto SSID = RMW->getSyncScopeID(); | |||
12518 | if (SSID == SyncScope::System || | |||
12519 | SSID == RMW->getContext().getOrInsertSyncScopeID("one-as")) | |||
12520 | return AtomicExpansionKind::CmpXChg; | |||
12521 | ||||
12522 | return ReportUnsafeHWInst(AtomicExpansionKind::None); | |||
12523 | } | |||
12524 | ||||
12525 | if (AS == AMDGPUAS::FLAT_ADDRESS) | |||
12526 | return AtomicExpansionKind::CmpXChg; | |||
12527 | ||||
12528 | return RMW->use_empty() ? ReportUnsafeHWInst(AtomicExpansionKind::None) | |||
12529 | : AtomicExpansionKind::CmpXChg; | |||
12530 | } | |||
12531 | ||||
12532 | // DS FP atomics do respect the denormal mode, but the rounding mode is | |||
12533 | // fixed to round-to-nearest-even. | |||
12534 | // The only exception is DS_ADD_F64 which never flushes regardless of mode. | |||
12535 | if (AS == AMDGPUAS::LOCAL_ADDRESS && Subtarget->hasLDSFPAtomicAdd()) { | |||
12536 | if (!Ty->isDoubleTy()) | |||
12537 | return AtomicExpansionKind::None; | |||
12538 | ||||
12539 | if (fpModeMatchesGlobalFPAtomicMode(RMW)) | |||
12540 | return AtomicExpansionKind::None; | |||
12541 | ||||
12542 | return RMW->getFunction() | |||
12543 | ->getFnAttribute("amdgpu-unsafe-fp-atomics") | |||
12544 | .getValueAsString() == "true" | |||
12545 | ? ReportUnsafeHWInst(AtomicExpansionKind::None) | |||
12546 | : AtomicExpansionKind::CmpXChg; | |||
12547 | } | |||
12548 | ||||
12549 | return AtomicExpansionKind::CmpXChg; | |||
12550 | } | |||
12551 | default: | |||
12552 | break; | |||
12553 | } | |||
12554 | ||||
12555 | return AMDGPUTargetLowering::shouldExpandAtomicRMWInIR(RMW); | |||
12556 | } | |||
12557 | ||||
12558 | TargetLowering::AtomicExpansionKind | |||
12559 | SITargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const { | |||
12560 | return LI->getPointerAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS | |||
12561 | ? AtomicExpansionKind::NotAtomic | |||
12562 | : AtomicExpansionKind::None; | |||
12563 | } | |||
12564 | ||||
12565 | TargetLowering::AtomicExpansionKind | |||
12566 | SITargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const { | |||
12567 | return SI->getPointerAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS | |||
12568 | ? AtomicExpansionKind::NotAtomic | |||
12569 | : AtomicExpansionKind::None; | |||
12570 | } | |||
12571 | ||||
12572 | TargetLowering::AtomicExpansionKind | |||
12573 | SITargetLowering::shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *CmpX) const { | |||
12574 | return CmpX->getPointerAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS | |||
12575 | ? AtomicExpansionKind::NotAtomic | |||
12576 | : AtomicExpansionKind::None; | |||
12577 | } | |||
12578 | ||||
12579 | const TargetRegisterClass * | |||
12580 | SITargetLowering::getRegClassFor(MVT VT, bool isDivergent) const { | |||
12581 | const TargetRegisterClass *RC = TargetLoweringBase::getRegClassFor(VT, false); | |||
12582 | const SIRegisterInfo *TRI = Subtarget->getRegisterInfo(); | |||
12583 | if (RC == &AMDGPU::VReg_1RegClass && !isDivergent) | |||
12584 | return Subtarget->getWavefrontSize() == 64 ? &AMDGPU::SReg_64RegClass | |||
12585 | : &AMDGPU::SReg_32RegClass; | |||
12586 | if (!TRI->isSGPRClass(RC) && !isDivergent) | |||
12587 | return TRI->getEquivalentSGPRClass(RC); | |||
12588 | else if (TRI->isSGPRClass(RC) && isDivergent) | |||
12589 | return TRI->getEquivalentVGPRClass(RC); | |||
12590 | ||||
12591 | return RC; | |||
12592 | } | |||
12593 | ||||
12594 | // FIXME: This is a workaround for DivergenceAnalysis not understanding always | |||
12595 | // uniform values (as produced by the mask results of control flow intrinsics) | |||
12596 | // used outside of divergent blocks. The phi users need to also be treated as | |||
12597 | // always uniform. | |||
12598 | static bool hasCFUser(const Value *V, SmallPtrSet<const Value *, 16> &Visited, | |||
12599 | unsigned WaveSize) { | |||
12600 | // FIXME: We assume we never cast the mask results of a control flow | |||
12601 | // intrinsic. | |||
12602 | // Early exit if the type won't be consistent as a compile time hack. | |||
12603 | IntegerType *IT = dyn_cast<IntegerType>(V->getType()); | |||
12604 | if (!IT || IT->getBitWidth() != WaveSize) | |||
12605 | return false; | |||
12606 | ||||
12607 | if (!isa<Instruction>(V)) | |||
12608 | return false; | |||
12609 | if (!Visited.insert(V).second) | |||
12610 | return false; | |||
12611 | bool Result = false; | |||
12612 | for (auto U : V->users()) { | |||
12613 | if (const IntrinsicInst *Intrinsic = dyn_cast<IntrinsicInst>(U)) { | |||
12614 | if (V == U->getOperand(1)) { | |||
12615 | switch (Intrinsic->getIntrinsicID()) { | |||
12616 | default: | |||
12617 | Result = false; | |||
12618 | break; | |||
12619 | case Intrinsic::amdgcn_if_break: | |||
12620 | case Intrinsic::amdgcn_if: | |||
12621 | case Intrinsic::amdgcn_else: | |||
12622 | Result = true; | |||
12623 | break; | |||
12624 | } | |||
12625 | } | |||
12626 | if (V == U->getOperand(0)) { | |||
12627 | switch (Intrinsic->getIntrinsicID()) { | |||
12628 | default: | |||
12629 | Result = false; | |||
12630 | break; | |||
12631 | case Intrinsic::amdgcn_end_cf: | |||
12632 | case Intrinsic::amdgcn_loop: | |||
12633 | Result = true; | |||
12634 | break; | |||
12635 | } | |||
12636 | } | |||
12637 | } else { | |||
12638 | Result = hasCFUser(U, Visited, WaveSize); | |||
12639 | } | |||
12640 | if (Result) | |||
12641 | break; | |||
12642 | } | |||
12643 | return Result; | |||
12644 | } | |||
12645 | ||||
12646 | bool SITargetLowering::requiresUniformRegister(MachineFunction &MF, | |||
12647 | const Value *V) const { | |||
12648 | if (const CallInst *CI = dyn_cast<CallInst>(V)) { | |||
12649 | if (CI->isInlineAsm()) { | |||
12650 | // FIXME: This cannot give a correct answer. This should only trigger in | |||
12651 | // the case where inline asm returns mixed SGPR and VGPR results, used | |||
12652 | // outside the defining block. We don't have a specific result to | |||
12653 | // consider, so this assumes if any value is SGPR, the overall register | |||
12654 | // also needs to be SGPR. | |||
12655 | const SIRegisterInfo *SIRI = Subtarget->getRegisterInfo(); | |||
12656 | TargetLowering::AsmOperandInfoVector TargetConstraints = ParseConstraints( | |||
12657 | MF.getDataLayout(), Subtarget->getRegisterInfo(), *CI); | |||
12658 | for (auto &TC : TargetConstraints) { | |||
12659 | if (TC.Type == InlineAsm::isOutput) { | |||
12660 | ComputeConstraintToUse(TC, SDValue()); | |||
12661 | const TargetRegisterClass *RC = getRegForInlineAsmConstraint( | |||
12662 | SIRI, TC.ConstraintCode, TC.ConstraintVT).second; | |||
12663 | if (RC && SIRI->isSGPRClass(RC)) | |||
12664 | return true; | |||
12665 | } | |||
12666 | } | |||
12667 | } | |||
12668 | } | |||
12669 | SmallPtrSet<const Value *, 16> Visited; | |||
12670 | return hasCFUser(V, Visited, Subtarget->getWavefrontSize()); | |||
12671 | } | |||
12672 | ||||
12673 | std::pair<InstructionCost, MVT> | |||
12674 | SITargetLowering::getTypeLegalizationCost(const DataLayout &DL, | |||
12675 | Type *Ty) const { | |||
12676 | std::pair<InstructionCost, MVT> Cost = | |||
12677 | TargetLoweringBase::getTypeLegalizationCost(DL, Ty); | |||
12678 | auto Size = DL.getTypeSizeInBits(Ty); | |||
12679 | // Maximum load or store can handle 8 dwords for scalar and 4 for | |||
12680 | // vector ALU. Let's assume anything above 8 dwords is expensive | |||
12681 | // even if legal. | |||
12682 | if (Size <= 256) | |||
12683 | return Cost; | |||
12684 | ||||
12685 | Cost.first += (Size + 255) / 256; | |||
12686 | return Cost; | |||
12687 | } | |||
12688 | ||||
12689 | bool SITargetLowering::hasMemSDNodeUser(SDNode *N) const { | |||
12690 | SDNode::use_iterator I = N->use_begin(), E = N->use_end(); | |||
12691 | for (; I != E; ++I) { | |||
12692 | if (MemSDNode *M = dyn_cast<MemSDNode>(*I)) { | |||
12693 | if (getBasePtrIndex(M) == I.getOperandNo()) | |||
12694 | return true; | |||
12695 | } | |||
12696 | } | |||
12697 | return false; | |||
12698 | } | |||
12699 | ||||
12700 | bool SITargetLowering::isReassocProfitable(SelectionDAG &DAG, SDValue N0, | |||
12701 | SDValue N1) const { | |||
12702 | if (!N0.hasOneUse()) | |||
12703 | return false; | |||
12704 | // Take care of the opportunity to keep N0 uniform | |||
12705 | if (N0->isDivergent() || !N1->isDivergent()) | |||
12706 | return true; | |||
12707 | // Check if we have a good chance to form the memory access pattern with the | |||
12708 | // base and offset | |||
12709 | return (DAG.isBaseWithConstantOffset(N0) && | |||
12710 | hasMemSDNodeUser(*N0->use_begin())); | |||
12711 | } | |||
12712 | ||||
12713 | MachineMemOperand::Flags | |||
12714 | SITargetLowering::getTargetMMOFlags(const Instruction &I) const { | |||
12715 | // Propagate metadata set by AMDGPUAnnotateUniformValues to the MMO of a load. | |||
12716 | if (I.getMetadata("amdgpu.noclobber")) | |||
12717 | return MONoClobber; | |||
12718 | return MachineMemOperand::MONone; | |||
12719 | } |